tg3: Add tx BD budgeting code
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / net / tg3.c
blobb93ba3d2192a72d23054942adfe47c236f227c7e
1 /*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2011 Broadcom Corporation.
9 * Firmware is:
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
48 #include <net/checksum.h>
49 #include <net/ip.h>
51 #include <asm/system.h>
52 #include <linux/io.h>
53 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
56 #ifdef CONFIG_SPARC
57 #include <asm/idprom.h>
58 #include <asm/prom.h>
59 #endif
61 #define BAR_0 0
62 #define BAR_2 2
64 #include "tg3.h"
66 /* Functions & macros to verify TG3_FLAGS types */
68 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
70 return test_bit(flag, bits);
73 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
75 set_bit(flag, bits);
78 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
80 clear_bit(flag, bits);
83 #define tg3_flag(tp, flag) \
84 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_set(tp, flag) \
86 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
87 #define tg3_flag_clear(tp, flag) \
88 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define DRV_MODULE_NAME "tg3"
91 #define TG3_MAJ_NUM 3
92 #define TG3_MIN_NUM 119
93 #define DRV_MODULE_VERSION \
94 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
95 #define DRV_MODULE_RELDATE "May 18, 2011"
97 #define TG3_DEF_MAC_MODE 0
98 #define TG3_DEF_RX_MODE 0
99 #define TG3_DEF_TX_MODE 0
100 #define TG3_DEF_MSG_ENABLE \
101 (NETIF_MSG_DRV | \
102 NETIF_MSG_PROBE | \
103 NETIF_MSG_LINK | \
104 NETIF_MSG_TIMER | \
105 NETIF_MSG_IFDOWN | \
106 NETIF_MSG_IFUP | \
107 NETIF_MSG_RX_ERR | \
108 NETIF_MSG_TX_ERR)
110 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
112 /* length of time before we decide the hardware is borked,
113 * and dev->tx_timeout() should be called to fix the problem
116 #define TG3_TX_TIMEOUT (5 * HZ)
118 /* hardware minimum and maximum for a single frame's data payload */
119 #define TG3_MIN_MTU 60
120 #define TG3_MAX_MTU(tp) \
121 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
123 /* These numbers seem to be hard coded in the NIC firmware somehow.
124 * You can't change the ring sizes, but you can change where you place
125 * them in the NIC onboard memory.
127 #define TG3_RX_STD_RING_SIZE(tp) \
128 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
129 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
130 #define TG3_DEF_RX_RING_PENDING 200
131 #define TG3_RX_JMB_RING_SIZE(tp) \
132 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
133 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
134 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
135 #define TG3_RSS_INDIR_TBL_SIZE 128
137 /* Do not place this n-ring entries value into the tp struct itself,
138 * we really want to expose these constants to GCC so that modulo et
139 * al. operations are done with shifts and masks instead of with
140 * hw multiply/modulo instructions. Another solution would be to
141 * replace things like '% foo' with '& (foo - 1)'.
144 #define TG3_TX_RING_SIZE 512
145 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
147 #define TG3_RX_STD_RING_BYTES(tp) \
148 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
149 #define TG3_RX_JMB_RING_BYTES(tp) \
150 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
151 #define TG3_RX_RCB_RING_BYTES(tp) \
152 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
153 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
154 TG3_TX_RING_SIZE)
155 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
157 #define TG3_DMA_BYTE_ENAB 64
159 #define TG3_RX_STD_DMA_SZ 1536
160 #define TG3_RX_JMB_DMA_SZ 9046
162 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
164 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
165 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
167 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
168 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
170 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
171 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
173 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
174 * that are at least dword aligned when used in PCIX mode. The driver
175 * works around this bug by double copying the packet. This workaround
176 * is built into the normal double copy length check for efficiency.
178 * However, the double copy is only necessary on those architectures
179 * where unaligned memory accesses are inefficient. For those architectures
180 * where unaligned memory accesses incur little penalty, we can reintegrate
181 * the 5701 in the normal rx path. Doing so saves a device structure
182 * dereference by hardcoding the double copy threshold in place.
184 #define TG3_RX_COPY_THRESHOLD 256
185 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
186 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
187 #else
188 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
189 #endif
191 /* minimum number of free TX descriptors required to wake up TX process */
192 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
194 #define TG3_RAW_IP_ALIGN 2
196 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
198 #define FIRMWARE_TG3 "tigon/tg3.bin"
199 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
200 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
202 static char version[] __devinitdata =
203 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
205 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
206 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
207 MODULE_LICENSE("GPL");
208 MODULE_VERSION(DRV_MODULE_VERSION);
209 MODULE_FIRMWARE(FIRMWARE_TG3);
210 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
211 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
213 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
214 module_param(tg3_debug, int, 0);
215 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
217 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
218 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
219 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
220 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
221 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
222 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
223 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
224 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
225 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
226 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
227 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
289 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
291 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
292 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
293 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
294 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
295 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
296 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
297 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
298 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
302 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
304 static const struct {
305 const char string[ETH_GSTRING_LEN];
306 } ethtool_stats_keys[] = {
307 { "rx_octets" },
308 { "rx_fragments" },
309 { "rx_ucast_packets" },
310 { "rx_mcast_packets" },
311 { "rx_bcast_packets" },
312 { "rx_fcs_errors" },
313 { "rx_align_errors" },
314 { "rx_xon_pause_rcvd" },
315 { "rx_xoff_pause_rcvd" },
316 { "rx_mac_ctrl_rcvd" },
317 { "rx_xoff_entered" },
318 { "rx_frame_too_long_errors" },
319 { "rx_jabbers" },
320 { "rx_undersize_packets" },
321 { "rx_in_length_errors" },
322 { "rx_out_length_errors" },
323 { "rx_64_or_less_octet_packets" },
324 { "rx_65_to_127_octet_packets" },
325 { "rx_128_to_255_octet_packets" },
326 { "rx_256_to_511_octet_packets" },
327 { "rx_512_to_1023_octet_packets" },
328 { "rx_1024_to_1522_octet_packets" },
329 { "rx_1523_to_2047_octet_packets" },
330 { "rx_2048_to_4095_octet_packets" },
331 { "rx_4096_to_8191_octet_packets" },
332 { "rx_8192_to_9022_octet_packets" },
334 { "tx_octets" },
335 { "tx_collisions" },
337 { "tx_xon_sent" },
338 { "tx_xoff_sent" },
339 { "tx_flow_control" },
340 { "tx_mac_errors" },
341 { "tx_single_collisions" },
342 { "tx_mult_collisions" },
343 { "tx_deferred" },
344 { "tx_excessive_collisions" },
345 { "tx_late_collisions" },
346 { "tx_collide_2times" },
347 { "tx_collide_3times" },
348 { "tx_collide_4times" },
349 { "tx_collide_5times" },
350 { "tx_collide_6times" },
351 { "tx_collide_7times" },
352 { "tx_collide_8times" },
353 { "tx_collide_9times" },
354 { "tx_collide_10times" },
355 { "tx_collide_11times" },
356 { "tx_collide_12times" },
357 { "tx_collide_13times" },
358 { "tx_collide_14times" },
359 { "tx_collide_15times" },
360 { "tx_ucast_packets" },
361 { "tx_mcast_packets" },
362 { "tx_bcast_packets" },
363 { "tx_carrier_sense_errors" },
364 { "tx_discards" },
365 { "tx_errors" },
367 { "dma_writeq_full" },
368 { "dma_write_prioq_full" },
369 { "rxbds_empty" },
370 { "rx_discards" },
371 { "rx_errors" },
372 { "rx_threshold_hit" },
374 { "dma_readq_full" },
375 { "dma_read_prioq_full" },
376 { "tx_comp_queue_full" },
378 { "ring_set_send_prod_index" },
379 { "ring_status_update" },
380 { "nic_irqs" },
381 { "nic_avoided_irqs" },
382 { "nic_tx_threshold_hit" },
384 { "mbuf_lwm_thresh_hit" },
387 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
390 static const struct {
391 const char string[ETH_GSTRING_LEN];
392 } ethtool_test_keys[] = {
393 { "nvram test (online) " },
394 { "link test (online) " },
395 { "register test (offline)" },
396 { "memory test (offline)" },
397 { "loopback test (offline)" },
398 { "interrupt test (offline)" },
401 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
404 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
406 writel(val, tp->regs + off);
409 static u32 tg3_read32(struct tg3 *tp, u32 off)
411 return readl(tp->regs + off);
414 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
416 writel(val, tp->aperegs + off);
419 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
421 return readl(tp->aperegs + off);
424 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
426 unsigned long flags;
428 spin_lock_irqsave(&tp->indirect_lock, flags);
429 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
430 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
431 spin_unlock_irqrestore(&tp->indirect_lock, flags);
434 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
436 writel(val, tp->regs + off);
437 readl(tp->regs + off);
440 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
442 unsigned long flags;
443 u32 val;
445 spin_lock_irqsave(&tp->indirect_lock, flags);
446 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
447 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
448 spin_unlock_irqrestore(&tp->indirect_lock, flags);
449 return val;
452 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
454 unsigned long flags;
456 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
457 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
458 TG3_64BIT_REG_LOW, val);
459 return;
461 if (off == TG3_RX_STD_PROD_IDX_REG) {
462 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
463 TG3_64BIT_REG_LOW, val);
464 return;
467 spin_lock_irqsave(&tp->indirect_lock, flags);
468 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
469 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
470 spin_unlock_irqrestore(&tp->indirect_lock, flags);
472 /* In indirect mode when disabling interrupts, we also need
473 * to clear the interrupt bit in the GRC local ctrl register.
475 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
476 (val == 0x1)) {
477 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
478 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
482 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
484 unsigned long flags;
485 u32 val;
487 spin_lock_irqsave(&tp->indirect_lock, flags);
488 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
489 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
490 spin_unlock_irqrestore(&tp->indirect_lock, flags);
491 return val;
494 /* usec_wait specifies the wait time in usec when writing to certain registers
495 * where it is unsafe to read back the register without some delay.
496 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
497 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
499 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
501 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
502 /* Non-posted methods */
503 tp->write32(tp, off, val);
504 else {
505 /* Posted method */
506 tg3_write32(tp, off, val);
507 if (usec_wait)
508 udelay(usec_wait);
509 tp->read32(tp, off);
511 /* Wait again after the read for the posted method to guarantee that
512 * the wait time is met.
514 if (usec_wait)
515 udelay(usec_wait);
518 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
520 tp->write32_mbox(tp, off, val);
521 if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
522 tp->read32_mbox(tp, off);
525 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
527 void __iomem *mbox = tp->regs + off;
528 writel(val, mbox);
529 if (tg3_flag(tp, TXD_MBOX_HWBUG))
530 writel(val, mbox);
531 if (tg3_flag(tp, MBOX_WRITE_REORDER))
532 readl(mbox);
535 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
537 return readl(tp->regs + off + GRCMBOX_BASE);
540 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
542 writel(val, tp->regs + off + GRCMBOX_BASE);
545 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
546 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
547 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
548 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
549 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
551 #define tw32(reg, val) tp->write32(tp, reg, val)
552 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
553 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
554 #define tr32(reg) tp->read32(tp, reg)
556 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
558 unsigned long flags;
560 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
561 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
562 return;
564 spin_lock_irqsave(&tp->indirect_lock, flags);
565 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
566 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
567 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
569 /* Always leave this as zero. */
570 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
571 } else {
572 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
573 tw32_f(TG3PCI_MEM_WIN_DATA, val);
575 /* Always leave this as zero. */
576 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
578 spin_unlock_irqrestore(&tp->indirect_lock, flags);
581 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
583 unsigned long flags;
585 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
586 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
587 *val = 0;
588 return;
591 spin_lock_irqsave(&tp->indirect_lock, flags);
592 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
593 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
594 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
596 /* Always leave this as zero. */
597 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
598 } else {
599 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
600 *val = tr32(TG3PCI_MEM_WIN_DATA);
602 /* Always leave this as zero. */
603 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
605 spin_unlock_irqrestore(&tp->indirect_lock, flags);
608 static void tg3_ape_lock_init(struct tg3 *tp)
610 int i;
611 u32 regbase, bit;
613 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
614 regbase = TG3_APE_LOCK_GRANT;
615 else
616 regbase = TG3_APE_PER_LOCK_GRANT;
618 /* Make sure the driver hasn't any stale locks. */
619 for (i = 0; i < 8; i++) {
620 if (i == TG3_APE_LOCK_GPIO)
621 continue;
622 tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
625 /* Clear the correct bit of the GPIO lock too. */
626 if (!tp->pci_fn)
627 bit = APE_LOCK_GRANT_DRIVER;
628 else
629 bit = 1 << tp->pci_fn;
631 tg3_ape_write32(tp, regbase + 4 * TG3_APE_LOCK_GPIO, bit);
634 static int tg3_ape_lock(struct tg3 *tp, int locknum)
636 int i, off;
637 int ret = 0;
638 u32 status, req, gnt, bit;
640 if (!tg3_flag(tp, ENABLE_APE))
641 return 0;
643 switch (locknum) {
644 case TG3_APE_LOCK_GPIO:
645 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
646 return 0;
647 case TG3_APE_LOCK_GRC:
648 case TG3_APE_LOCK_MEM:
649 break;
650 default:
651 return -EINVAL;
654 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
655 req = TG3_APE_LOCK_REQ;
656 gnt = TG3_APE_LOCK_GRANT;
657 } else {
658 req = TG3_APE_PER_LOCK_REQ;
659 gnt = TG3_APE_PER_LOCK_GRANT;
662 off = 4 * locknum;
664 if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn)
665 bit = APE_LOCK_REQ_DRIVER;
666 else
667 bit = 1 << tp->pci_fn;
669 tg3_ape_write32(tp, req + off, bit);
671 /* Wait for up to 1 millisecond to acquire lock. */
672 for (i = 0; i < 100; i++) {
673 status = tg3_ape_read32(tp, gnt + off);
674 if (status == bit)
675 break;
676 udelay(10);
679 if (status != bit) {
680 /* Revoke the lock request. */
681 tg3_ape_write32(tp, gnt + off, bit);
682 ret = -EBUSY;
685 return ret;
688 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
690 u32 gnt, bit;
692 if (!tg3_flag(tp, ENABLE_APE))
693 return;
695 switch (locknum) {
696 case TG3_APE_LOCK_GPIO:
697 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
698 return;
699 case TG3_APE_LOCK_GRC:
700 case TG3_APE_LOCK_MEM:
701 break;
702 default:
703 return;
706 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
707 gnt = TG3_APE_LOCK_GRANT;
708 else
709 gnt = TG3_APE_PER_LOCK_GRANT;
711 if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn)
712 bit = APE_LOCK_GRANT_DRIVER;
713 else
714 bit = 1 << tp->pci_fn;
716 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
719 static void tg3_disable_ints(struct tg3 *tp)
721 int i;
723 tw32(TG3PCI_MISC_HOST_CTRL,
724 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
725 for (i = 0; i < tp->irq_max; i++)
726 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
729 static void tg3_enable_ints(struct tg3 *tp)
731 int i;
733 tp->irq_sync = 0;
734 wmb();
736 tw32(TG3PCI_MISC_HOST_CTRL,
737 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
739 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
740 for (i = 0; i < tp->irq_cnt; i++) {
741 struct tg3_napi *tnapi = &tp->napi[i];
743 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
744 if (tg3_flag(tp, 1SHOT_MSI))
745 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
747 tp->coal_now |= tnapi->coal_now;
750 /* Force an initial interrupt */
751 if (!tg3_flag(tp, TAGGED_STATUS) &&
752 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
753 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
754 else
755 tw32(HOSTCC_MODE, tp->coal_now);
757 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
760 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
762 struct tg3 *tp = tnapi->tp;
763 struct tg3_hw_status *sblk = tnapi->hw_status;
764 unsigned int work_exists = 0;
766 /* check for phy events */
767 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
768 if (sblk->status & SD_STATUS_LINK_CHG)
769 work_exists = 1;
771 /* check for RX/TX work to do */
772 if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
773 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
774 work_exists = 1;
776 return work_exists;
779 /* tg3_int_reenable
780 * similar to tg3_enable_ints, but it accurately determines whether there
781 * is new work pending and can return without flushing the PIO write
782 * which reenables interrupts
784 static void tg3_int_reenable(struct tg3_napi *tnapi)
786 struct tg3 *tp = tnapi->tp;
788 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
789 mmiowb();
791 /* When doing tagged status, this work check is unnecessary.
792 * The last_tag we write above tells the chip which piece of
793 * work we've completed.
795 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
796 tw32(HOSTCC_MODE, tp->coalesce_mode |
797 HOSTCC_MODE_ENABLE | tnapi->coal_now);
800 static void tg3_switch_clocks(struct tg3 *tp)
802 u32 clock_ctrl;
803 u32 orig_clock_ctrl;
805 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
806 return;
808 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
810 orig_clock_ctrl = clock_ctrl;
811 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
812 CLOCK_CTRL_CLKRUN_OENABLE |
813 0x1f);
814 tp->pci_clock_ctrl = clock_ctrl;
816 if (tg3_flag(tp, 5705_PLUS)) {
817 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
818 tw32_wait_f(TG3PCI_CLOCK_CTRL,
819 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
821 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
822 tw32_wait_f(TG3PCI_CLOCK_CTRL,
823 clock_ctrl |
824 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
825 40);
826 tw32_wait_f(TG3PCI_CLOCK_CTRL,
827 clock_ctrl | (CLOCK_CTRL_ALTCLK),
828 40);
830 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
833 #define PHY_BUSY_LOOPS 5000
835 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
837 u32 frame_val;
838 unsigned int loops;
839 int ret;
841 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
842 tw32_f(MAC_MI_MODE,
843 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
844 udelay(80);
847 *val = 0x0;
849 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
850 MI_COM_PHY_ADDR_MASK);
851 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
852 MI_COM_REG_ADDR_MASK);
853 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
855 tw32_f(MAC_MI_COM, frame_val);
857 loops = PHY_BUSY_LOOPS;
858 while (loops != 0) {
859 udelay(10);
860 frame_val = tr32(MAC_MI_COM);
862 if ((frame_val & MI_COM_BUSY) == 0) {
863 udelay(5);
864 frame_val = tr32(MAC_MI_COM);
865 break;
867 loops -= 1;
870 ret = -EBUSY;
871 if (loops != 0) {
872 *val = frame_val & MI_COM_DATA_MASK;
873 ret = 0;
876 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
877 tw32_f(MAC_MI_MODE, tp->mi_mode);
878 udelay(80);
881 return ret;
884 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
886 u32 frame_val;
887 unsigned int loops;
888 int ret;
890 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
891 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
892 return 0;
894 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
895 tw32_f(MAC_MI_MODE,
896 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
897 udelay(80);
900 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
901 MI_COM_PHY_ADDR_MASK);
902 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
903 MI_COM_REG_ADDR_MASK);
904 frame_val |= (val & MI_COM_DATA_MASK);
905 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
907 tw32_f(MAC_MI_COM, frame_val);
909 loops = PHY_BUSY_LOOPS;
910 while (loops != 0) {
911 udelay(10);
912 frame_val = tr32(MAC_MI_COM);
913 if ((frame_val & MI_COM_BUSY) == 0) {
914 udelay(5);
915 frame_val = tr32(MAC_MI_COM);
916 break;
918 loops -= 1;
921 ret = -EBUSY;
922 if (loops != 0)
923 ret = 0;
925 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
926 tw32_f(MAC_MI_MODE, tp->mi_mode);
927 udelay(80);
930 return ret;
933 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
935 int err;
937 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
938 if (err)
939 goto done;
941 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
942 if (err)
943 goto done;
945 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
946 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
947 if (err)
948 goto done;
950 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
952 done:
953 return err;
956 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
958 int err;
960 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
961 if (err)
962 goto done;
964 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
965 if (err)
966 goto done;
968 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
969 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
970 if (err)
971 goto done;
973 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
975 done:
976 return err;
979 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
981 int err;
983 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
984 if (!err)
985 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
987 return err;
990 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
992 int err;
994 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
995 if (!err)
996 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
998 return err;
1001 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1003 int err;
1005 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1006 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1007 MII_TG3_AUXCTL_SHDWSEL_MISC);
1008 if (!err)
1009 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1011 return err;
1014 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1016 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1017 set |= MII_TG3_AUXCTL_MISC_WREN;
1019 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1022 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1023 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1024 MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1025 MII_TG3_AUXCTL_ACTL_TX_6DB)
1027 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1028 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1029 MII_TG3_AUXCTL_ACTL_TX_6DB);
1031 static int tg3_bmcr_reset(struct tg3 *tp)
1033 u32 phy_control;
1034 int limit, err;
1036 /* OK, reset it, and poll the BMCR_RESET bit until it
1037 * clears or we time out.
1039 phy_control = BMCR_RESET;
1040 err = tg3_writephy(tp, MII_BMCR, phy_control);
1041 if (err != 0)
1042 return -EBUSY;
1044 limit = 5000;
1045 while (limit--) {
1046 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1047 if (err != 0)
1048 return -EBUSY;
1050 if ((phy_control & BMCR_RESET) == 0) {
1051 udelay(40);
1052 break;
1054 udelay(10);
1056 if (limit < 0)
1057 return -EBUSY;
1059 return 0;
1062 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1064 struct tg3 *tp = bp->priv;
1065 u32 val;
1067 spin_lock_bh(&tp->lock);
1069 if (tg3_readphy(tp, reg, &val))
1070 val = -EIO;
1072 spin_unlock_bh(&tp->lock);
1074 return val;
1077 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1079 struct tg3 *tp = bp->priv;
1080 u32 ret = 0;
1082 spin_lock_bh(&tp->lock);
1084 if (tg3_writephy(tp, reg, val))
1085 ret = -EIO;
1087 spin_unlock_bh(&tp->lock);
1089 return ret;
1092 static int tg3_mdio_reset(struct mii_bus *bp)
1094 return 0;
1097 static void tg3_mdio_config_5785(struct tg3 *tp)
1099 u32 val;
1100 struct phy_device *phydev;
1102 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1103 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1104 case PHY_ID_BCM50610:
1105 case PHY_ID_BCM50610M:
1106 val = MAC_PHYCFG2_50610_LED_MODES;
1107 break;
1108 case PHY_ID_BCMAC131:
1109 val = MAC_PHYCFG2_AC131_LED_MODES;
1110 break;
1111 case PHY_ID_RTL8211C:
1112 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1113 break;
1114 case PHY_ID_RTL8201E:
1115 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1116 break;
1117 default:
1118 return;
1121 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1122 tw32(MAC_PHYCFG2, val);
1124 val = tr32(MAC_PHYCFG1);
1125 val &= ~(MAC_PHYCFG1_RGMII_INT |
1126 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1127 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1128 tw32(MAC_PHYCFG1, val);
1130 return;
1133 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1134 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1135 MAC_PHYCFG2_FMODE_MASK_MASK |
1136 MAC_PHYCFG2_GMODE_MASK_MASK |
1137 MAC_PHYCFG2_ACT_MASK_MASK |
1138 MAC_PHYCFG2_QUAL_MASK_MASK |
1139 MAC_PHYCFG2_INBAND_ENABLE;
1141 tw32(MAC_PHYCFG2, val);
1143 val = tr32(MAC_PHYCFG1);
1144 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1145 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1146 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1147 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1148 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1149 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1150 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1152 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1153 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1154 tw32(MAC_PHYCFG1, val);
1156 val = tr32(MAC_EXT_RGMII_MODE);
1157 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1158 MAC_RGMII_MODE_RX_QUALITY |
1159 MAC_RGMII_MODE_RX_ACTIVITY |
1160 MAC_RGMII_MODE_RX_ENG_DET |
1161 MAC_RGMII_MODE_TX_ENABLE |
1162 MAC_RGMII_MODE_TX_LOWPWR |
1163 MAC_RGMII_MODE_TX_RESET);
1164 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1165 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1166 val |= MAC_RGMII_MODE_RX_INT_B |
1167 MAC_RGMII_MODE_RX_QUALITY |
1168 MAC_RGMII_MODE_RX_ACTIVITY |
1169 MAC_RGMII_MODE_RX_ENG_DET;
1170 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1171 val |= MAC_RGMII_MODE_TX_ENABLE |
1172 MAC_RGMII_MODE_TX_LOWPWR |
1173 MAC_RGMII_MODE_TX_RESET;
1175 tw32(MAC_EXT_RGMII_MODE, val);
1178 static void tg3_mdio_start(struct tg3 *tp)
1180 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1181 tw32_f(MAC_MI_MODE, tp->mi_mode);
1182 udelay(80);
1184 if (tg3_flag(tp, MDIOBUS_INITED) &&
1185 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1186 tg3_mdio_config_5785(tp);
1189 static int tg3_mdio_init(struct tg3 *tp)
1191 int i;
1192 u32 reg;
1193 struct phy_device *phydev;
1195 if (tg3_flag(tp, 5717_PLUS)) {
1196 u32 is_serdes;
1198 tp->phy_addr = tp->pci_fn + 1;
1200 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1201 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1202 else
1203 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1204 TG3_CPMU_PHY_STRAP_IS_SERDES;
1205 if (is_serdes)
1206 tp->phy_addr += 7;
1207 } else
1208 tp->phy_addr = TG3_PHY_MII_ADDR;
1210 tg3_mdio_start(tp);
1212 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1213 return 0;
1215 tp->mdio_bus = mdiobus_alloc();
1216 if (tp->mdio_bus == NULL)
1217 return -ENOMEM;
1219 tp->mdio_bus->name = "tg3 mdio bus";
1220 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1221 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1222 tp->mdio_bus->priv = tp;
1223 tp->mdio_bus->parent = &tp->pdev->dev;
1224 tp->mdio_bus->read = &tg3_mdio_read;
1225 tp->mdio_bus->write = &tg3_mdio_write;
1226 tp->mdio_bus->reset = &tg3_mdio_reset;
1227 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1228 tp->mdio_bus->irq = &tp->mdio_irq[0];
1230 for (i = 0; i < PHY_MAX_ADDR; i++)
1231 tp->mdio_bus->irq[i] = PHY_POLL;
1233 /* The bus registration will look for all the PHYs on the mdio bus.
1234 * Unfortunately, it does not ensure the PHY is powered up before
1235 * accessing the PHY ID registers. A chip reset is the
1236 * quickest way to bring the device back to an operational state..
1238 if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1239 tg3_bmcr_reset(tp);
1241 i = mdiobus_register(tp->mdio_bus);
1242 if (i) {
1243 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1244 mdiobus_free(tp->mdio_bus);
1245 return i;
1248 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1250 if (!phydev || !phydev->drv) {
1251 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1252 mdiobus_unregister(tp->mdio_bus);
1253 mdiobus_free(tp->mdio_bus);
1254 return -ENODEV;
1257 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1258 case PHY_ID_BCM57780:
1259 phydev->interface = PHY_INTERFACE_MODE_GMII;
1260 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1261 break;
1262 case PHY_ID_BCM50610:
1263 case PHY_ID_BCM50610M:
1264 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1265 PHY_BRCM_RX_REFCLK_UNUSED |
1266 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1267 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1268 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1269 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1270 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1271 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1272 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1273 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1274 /* fallthru */
1275 case PHY_ID_RTL8211C:
1276 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1277 break;
1278 case PHY_ID_RTL8201E:
1279 case PHY_ID_BCMAC131:
1280 phydev->interface = PHY_INTERFACE_MODE_MII;
1281 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1282 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1283 break;
1286 tg3_flag_set(tp, MDIOBUS_INITED);
1288 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1289 tg3_mdio_config_5785(tp);
1291 return 0;
1294 static void tg3_mdio_fini(struct tg3 *tp)
1296 if (tg3_flag(tp, MDIOBUS_INITED)) {
1297 tg3_flag_clear(tp, MDIOBUS_INITED);
1298 mdiobus_unregister(tp->mdio_bus);
1299 mdiobus_free(tp->mdio_bus);
1303 /* tp->lock is held. */
1304 static inline void tg3_generate_fw_event(struct tg3 *tp)
1306 u32 val;
1308 val = tr32(GRC_RX_CPU_EVENT);
1309 val |= GRC_RX_CPU_DRIVER_EVENT;
1310 tw32_f(GRC_RX_CPU_EVENT, val);
1312 tp->last_event_jiffies = jiffies;
1315 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1317 /* tp->lock is held. */
1318 static void tg3_wait_for_event_ack(struct tg3 *tp)
1320 int i;
1321 unsigned int delay_cnt;
1322 long time_remain;
1324 /* If enough time has passed, no wait is necessary. */
1325 time_remain = (long)(tp->last_event_jiffies + 1 +
1326 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1327 (long)jiffies;
1328 if (time_remain < 0)
1329 return;
1331 /* Check if we can shorten the wait time. */
1332 delay_cnt = jiffies_to_usecs(time_remain);
1333 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1334 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1335 delay_cnt = (delay_cnt >> 3) + 1;
1337 for (i = 0; i < delay_cnt; i++) {
1338 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1339 break;
1340 udelay(8);
1344 /* tp->lock is held. */
1345 static void tg3_ump_link_report(struct tg3 *tp)
1347 u32 reg;
1348 u32 val;
1350 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1351 return;
1353 tg3_wait_for_event_ack(tp);
1355 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1357 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1359 val = 0;
1360 if (!tg3_readphy(tp, MII_BMCR, &reg))
1361 val = reg << 16;
1362 if (!tg3_readphy(tp, MII_BMSR, &reg))
1363 val |= (reg & 0xffff);
1364 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1366 val = 0;
1367 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1368 val = reg << 16;
1369 if (!tg3_readphy(tp, MII_LPA, &reg))
1370 val |= (reg & 0xffff);
1371 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1373 val = 0;
1374 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1375 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1376 val = reg << 16;
1377 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1378 val |= (reg & 0xffff);
1380 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1382 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1383 val = reg << 16;
1384 else
1385 val = 0;
1386 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1388 tg3_generate_fw_event(tp);
1391 static void tg3_link_report(struct tg3 *tp)
1393 if (!netif_carrier_ok(tp->dev)) {
1394 netif_info(tp, link, tp->dev, "Link is down\n");
1395 tg3_ump_link_report(tp);
1396 } else if (netif_msg_link(tp)) {
1397 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1398 (tp->link_config.active_speed == SPEED_1000 ?
1399 1000 :
1400 (tp->link_config.active_speed == SPEED_100 ?
1401 100 : 10)),
1402 (tp->link_config.active_duplex == DUPLEX_FULL ?
1403 "full" : "half"));
1405 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1406 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1407 "on" : "off",
1408 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1409 "on" : "off");
1411 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1412 netdev_info(tp->dev, "EEE is %s\n",
1413 tp->setlpicnt ? "enabled" : "disabled");
1415 tg3_ump_link_report(tp);
1419 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1421 u16 miireg;
1423 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1424 miireg = ADVERTISE_PAUSE_CAP;
1425 else if (flow_ctrl & FLOW_CTRL_TX)
1426 miireg = ADVERTISE_PAUSE_ASYM;
1427 else if (flow_ctrl & FLOW_CTRL_RX)
1428 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1429 else
1430 miireg = 0;
1432 return miireg;
1435 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1437 u16 miireg;
1439 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1440 miireg = ADVERTISE_1000XPAUSE;
1441 else if (flow_ctrl & FLOW_CTRL_TX)
1442 miireg = ADVERTISE_1000XPSE_ASYM;
1443 else if (flow_ctrl & FLOW_CTRL_RX)
1444 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1445 else
1446 miireg = 0;
1448 return miireg;
1451 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1453 u8 cap = 0;
1455 if (lcladv & ADVERTISE_1000XPAUSE) {
1456 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1457 if (rmtadv & LPA_1000XPAUSE)
1458 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1459 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1460 cap = FLOW_CTRL_RX;
1461 } else {
1462 if (rmtadv & LPA_1000XPAUSE)
1463 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1465 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1466 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1467 cap = FLOW_CTRL_TX;
1470 return cap;
1473 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1475 u8 autoneg;
1476 u8 flowctrl = 0;
1477 u32 old_rx_mode = tp->rx_mode;
1478 u32 old_tx_mode = tp->tx_mode;
1480 if (tg3_flag(tp, USE_PHYLIB))
1481 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1482 else
1483 autoneg = tp->link_config.autoneg;
1485 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1486 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1487 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1488 else
1489 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1490 } else
1491 flowctrl = tp->link_config.flowctrl;
1493 tp->link_config.active_flowctrl = flowctrl;
1495 if (flowctrl & FLOW_CTRL_RX)
1496 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1497 else
1498 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1500 if (old_rx_mode != tp->rx_mode)
1501 tw32_f(MAC_RX_MODE, tp->rx_mode);
1503 if (flowctrl & FLOW_CTRL_TX)
1504 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1505 else
1506 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1508 if (old_tx_mode != tp->tx_mode)
1509 tw32_f(MAC_TX_MODE, tp->tx_mode);
1512 static void tg3_adjust_link(struct net_device *dev)
1514 u8 oldflowctrl, linkmesg = 0;
1515 u32 mac_mode, lcl_adv, rmt_adv;
1516 struct tg3 *tp = netdev_priv(dev);
1517 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1519 spin_lock_bh(&tp->lock);
1521 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1522 MAC_MODE_HALF_DUPLEX);
1524 oldflowctrl = tp->link_config.active_flowctrl;
1526 if (phydev->link) {
1527 lcl_adv = 0;
1528 rmt_adv = 0;
1530 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1531 mac_mode |= MAC_MODE_PORT_MODE_MII;
1532 else if (phydev->speed == SPEED_1000 ||
1533 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1534 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1535 else
1536 mac_mode |= MAC_MODE_PORT_MODE_MII;
1538 if (phydev->duplex == DUPLEX_HALF)
1539 mac_mode |= MAC_MODE_HALF_DUPLEX;
1540 else {
1541 lcl_adv = tg3_advert_flowctrl_1000T(
1542 tp->link_config.flowctrl);
1544 if (phydev->pause)
1545 rmt_adv = LPA_PAUSE_CAP;
1546 if (phydev->asym_pause)
1547 rmt_adv |= LPA_PAUSE_ASYM;
1550 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1551 } else
1552 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1554 if (mac_mode != tp->mac_mode) {
1555 tp->mac_mode = mac_mode;
1556 tw32_f(MAC_MODE, tp->mac_mode);
1557 udelay(40);
1560 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1561 if (phydev->speed == SPEED_10)
1562 tw32(MAC_MI_STAT,
1563 MAC_MI_STAT_10MBPS_MODE |
1564 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1565 else
1566 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1569 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1570 tw32(MAC_TX_LENGTHS,
1571 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1572 (6 << TX_LENGTHS_IPG_SHIFT) |
1573 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1574 else
1575 tw32(MAC_TX_LENGTHS,
1576 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1577 (6 << TX_LENGTHS_IPG_SHIFT) |
1578 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1580 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1581 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1582 phydev->speed != tp->link_config.active_speed ||
1583 phydev->duplex != tp->link_config.active_duplex ||
1584 oldflowctrl != tp->link_config.active_flowctrl)
1585 linkmesg = 1;
1587 tp->link_config.active_speed = phydev->speed;
1588 tp->link_config.active_duplex = phydev->duplex;
1590 spin_unlock_bh(&tp->lock);
1592 if (linkmesg)
1593 tg3_link_report(tp);
1596 static int tg3_phy_init(struct tg3 *tp)
1598 struct phy_device *phydev;
1600 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1601 return 0;
1603 /* Bring the PHY back to a known state. */
1604 tg3_bmcr_reset(tp);
1606 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1608 /* Attach the MAC to the PHY. */
1609 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1610 phydev->dev_flags, phydev->interface);
1611 if (IS_ERR(phydev)) {
1612 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1613 return PTR_ERR(phydev);
1616 /* Mask with MAC supported features. */
1617 switch (phydev->interface) {
1618 case PHY_INTERFACE_MODE_GMII:
1619 case PHY_INTERFACE_MODE_RGMII:
1620 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1621 phydev->supported &= (PHY_GBIT_FEATURES |
1622 SUPPORTED_Pause |
1623 SUPPORTED_Asym_Pause);
1624 break;
1626 /* fallthru */
1627 case PHY_INTERFACE_MODE_MII:
1628 phydev->supported &= (PHY_BASIC_FEATURES |
1629 SUPPORTED_Pause |
1630 SUPPORTED_Asym_Pause);
1631 break;
1632 default:
1633 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1634 return -EINVAL;
1637 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1639 phydev->advertising = phydev->supported;
1641 return 0;
1644 static void tg3_phy_start(struct tg3 *tp)
1646 struct phy_device *phydev;
1648 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1649 return;
1651 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1653 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1654 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1655 phydev->speed = tp->link_config.orig_speed;
1656 phydev->duplex = tp->link_config.orig_duplex;
1657 phydev->autoneg = tp->link_config.orig_autoneg;
1658 phydev->advertising = tp->link_config.orig_advertising;
1661 phy_start(phydev);
1663 phy_start_aneg(phydev);
1666 static void tg3_phy_stop(struct tg3 *tp)
1668 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1669 return;
1671 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1674 static void tg3_phy_fini(struct tg3 *tp)
1676 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1677 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1678 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1682 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1684 u32 phytest;
1686 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1687 u32 phy;
1689 tg3_writephy(tp, MII_TG3_FET_TEST,
1690 phytest | MII_TG3_FET_SHADOW_EN);
1691 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1692 if (enable)
1693 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1694 else
1695 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1696 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1698 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1702 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1704 u32 reg;
1706 if (!tg3_flag(tp, 5705_PLUS) ||
1707 (tg3_flag(tp, 5717_PLUS) &&
1708 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1709 return;
1711 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1712 tg3_phy_fet_toggle_apd(tp, enable);
1713 return;
1716 reg = MII_TG3_MISC_SHDW_WREN |
1717 MII_TG3_MISC_SHDW_SCR5_SEL |
1718 MII_TG3_MISC_SHDW_SCR5_LPED |
1719 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1720 MII_TG3_MISC_SHDW_SCR5_SDTL |
1721 MII_TG3_MISC_SHDW_SCR5_C125OE;
1722 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1723 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1725 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1728 reg = MII_TG3_MISC_SHDW_WREN |
1729 MII_TG3_MISC_SHDW_APD_SEL |
1730 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1731 if (enable)
1732 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1734 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1737 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1739 u32 phy;
1741 if (!tg3_flag(tp, 5705_PLUS) ||
1742 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
1743 return;
1745 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1746 u32 ephy;
1748 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1749 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1751 tg3_writephy(tp, MII_TG3_FET_TEST,
1752 ephy | MII_TG3_FET_SHADOW_EN);
1753 if (!tg3_readphy(tp, reg, &phy)) {
1754 if (enable)
1755 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1756 else
1757 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1758 tg3_writephy(tp, reg, phy);
1760 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1762 } else {
1763 int ret;
1765 ret = tg3_phy_auxctl_read(tp,
1766 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
1767 if (!ret) {
1768 if (enable)
1769 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1770 else
1771 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1772 tg3_phy_auxctl_write(tp,
1773 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
1778 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1780 int ret;
1781 u32 val;
1783 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
1784 return;
1786 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
1787 if (!ret)
1788 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
1789 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
1792 static void tg3_phy_apply_otp(struct tg3 *tp)
1794 u32 otp, phy;
1796 if (!tp->phy_otp)
1797 return;
1799 otp = tp->phy_otp;
1801 if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
1802 return;
1804 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1805 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1806 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1808 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1809 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1810 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1812 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1813 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1814 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1816 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1817 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1819 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1820 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1822 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1823 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1824 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1826 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1829 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
1831 u32 val;
1833 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
1834 return;
1836 tp->setlpicnt = 0;
1838 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1839 current_link_up == 1 &&
1840 tp->link_config.active_duplex == DUPLEX_FULL &&
1841 (tp->link_config.active_speed == SPEED_100 ||
1842 tp->link_config.active_speed == SPEED_1000)) {
1843 u32 eeectl;
1845 if (tp->link_config.active_speed == SPEED_1000)
1846 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
1847 else
1848 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
1850 tw32(TG3_CPMU_EEE_CTRL, eeectl);
1852 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
1853 TG3_CL45_D7_EEERES_STAT, &val);
1855 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
1856 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
1857 tp->setlpicnt = 2;
1860 if (!tp->setlpicnt) {
1861 if (current_link_up == 1 &&
1862 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
1863 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
1864 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1867 val = tr32(TG3_CPMU_EEE_MODE);
1868 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
1872 static void tg3_phy_eee_enable(struct tg3 *tp)
1874 u32 val;
1876 if (tp->link_config.active_speed == SPEED_1000 &&
1877 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1878 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
1879 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
1880 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
1881 val = MII_TG3_DSP_TAP26_ALNOKO |
1882 MII_TG3_DSP_TAP26_RMRXSTO;
1883 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
1884 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1887 val = tr32(TG3_CPMU_EEE_MODE);
1888 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
1891 static int tg3_wait_macro_done(struct tg3 *tp)
1893 int limit = 100;
1895 while (limit--) {
1896 u32 tmp32;
1898 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
1899 if ((tmp32 & 0x1000) == 0)
1900 break;
1903 if (limit < 0)
1904 return -EBUSY;
1906 return 0;
1909 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1911 static const u32 test_pat[4][6] = {
1912 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1913 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1914 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1915 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1917 int chan;
1919 for (chan = 0; chan < 4; chan++) {
1920 int i;
1922 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1923 (chan * 0x2000) | 0x0200);
1924 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1926 for (i = 0; i < 6; i++)
1927 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1928 test_pat[chan][i]);
1930 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1931 if (tg3_wait_macro_done(tp)) {
1932 *resetp = 1;
1933 return -EBUSY;
1936 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1937 (chan * 0x2000) | 0x0200);
1938 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
1939 if (tg3_wait_macro_done(tp)) {
1940 *resetp = 1;
1941 return -EBUSY;
1944 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
1945 if (tg3_wait_macro_done(tp)) {
1946 *resetp = 1;
1947 return -EBUSY;
1950 for (i = 0; i < 6; i += 2) {
1951 u32 low, high;
1953 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1954 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1955 tg3_wait_macro_done(tp)) {
1956 *resetp = 1;
1957 return -EBUSY;
1959 low &= 0x7fff;
1960 high &= 0x000f;
1961 if (low != test_pat[chan][i] ||
1962 high != test_pat[chan][i+1]) {
1963 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1964 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1965 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1967 return -EBUSY;
1972 return 0;
1975 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1977 int chan;
1979 for (chan = 0; chan < 4; chan++) {
1980 int i;
1982 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1983 (chan * 0x2000) | 0x0200);
1984 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1985 for (i = 0; i < 6; i++)
1986 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1987 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1988 if (tg3_wait_macro_done(tp))
1989 return -EBUSY;
1992 return 0;
1995 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1997 u32 reg32, phy9_orig;
1998 int retries, do_phy_reset, err;
2000 retries = 10;
2001 do_phy_reset = 1;
2002 do {
2003 if (do_phy_reset) {
2004 err = tg3_bmcr_reset(tp);
2005 if (err)
2006 return err;
2007 do_phy_reset = 0;
2010 /* Disable transmitter and interrupt. */
2011 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2012 continue;
2014 reg32 |= 0x3000;
2015 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2017 /* Set full-duplex, 1000 mbps. */
2018 tg3_writephy(tp, MII_BMCR,
2019 BMCR_FULLDPLX | BMCR_SPEED1000);
2021 /* Set to master mode. */
2022 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2023 continue;
2025 tg3_writephy(tp, MII_CTRL1000,
2026 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2028 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2029 if (err)
2030 return err;
2032 /* Block the PHY control access. */
2033 tg3_phydsp_write(tp, 0x8005, 0x0800);
2035 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2036 if (!err)
2037 break;
2038 } while (--retries);
2040 err = tg3_phy_reset_chanpat(tp);
2041 if (err)
2042 return err;
2044 tg3_phydsp_write(tp, 0x8005, 0x0000);
2046 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2047 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2049 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2051 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2053 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2054 reg32 &= ~0x3000;
2055 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2056 } else if (!err)
2057 err = -EBUSY;
2059 return err;
2062 /* This will reset the tigon3 PHY if there is no valid
2063 * link unless the FORCE argument is non-zero.
2065 static int tg3_phy_reset(struct tg3 *tp)
2067 u32 val, cpmuctrl;
2068 int err;
2070 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2071 val = tr32(GRC_MISC_CFG);
2072 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2073 udelay(40);
2075 err = tg3_readphy(tp, MII_BMSR, &val);
2076 err |= tg3_readphy(tp, MII_BMSR, &val);
2077 if (err != 0)
2078 return -EBUSY;
2080 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2081 netif_carrier_off(tp->dev);
2082 tg3_link_report(tp);
2085 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2086 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2087 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2088 err = tg3_phy_reset_5703_4_5(tp);
2089 if (err)
2090 return err;
2091 goto out;
2094 cpmuctrl = 0;
2095 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2096 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2097 cpmuctrl = tr32(TG3_CPMU_CTRL);
2098 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2099 tw32(TG3_CPMU_CTRL,
2100 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2103 err = tg3_bmcr_reset(tp);
2104 if (err)
2105 return err;
2107 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2108 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2109 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2111 tw32(TG3_CPMU_CTRL, cpmuctrl);
2114 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2115 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2116 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2117 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2118 CPMU_LSPD_1000MB_MACCLK_12_5) {
2119 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2120 udelay(40);
2121 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2125 if (tg3_flag(tp, 5717_PLUS) &&
2126 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2127 return 0;
2129 tg3_phy_apply_otp(tp);
2131 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2132 tg3_phy_toggle_apd(tp, true);
2133 else
2134 tg3_phy_toggle_apd(tp, false);
2136 out:
2137 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2138 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2139 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2140 tg3_phydsp_write(tp, 0x000a, 0x0323);
2141 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2144 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2145 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2146 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2149 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2150 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2151 tg3_phydsp_write(tp, 0x000a, 0x310b);
2152 tg3_phydsp_write(tp, 0x201f, 0x9506);
2153 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2154 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2156 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2157 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2158 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2159 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2160 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2161 tg3_writephy(tp, MII_TG3_TEST1,
2162 MII_TG3_TEST1_TRIM_EN | 0x4);
2163 } else
2164 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2166 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2170 /* Set Extended packet length bit (bit 14) on all chips that */
2171 /* support jumbo frames */
2172 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2173 /* Cannot do read-modify-write on 5401 */
2174 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2175 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2176 /* Set bit 14 with read-modify-write to preserve other bits */
2177 err = tg3_phy_auxctl_read(tp,
2178 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2179 if (!err)
2180 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2181 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2184 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2185 * jumbo frames transmission.
2187 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2188 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2189 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2190 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2193 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2194 /* adjust output voltage */
2195 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2198 tg3_phy_toggle_automdix(tp, 1);
2199 tg3_phy_set_wirespeed(tp);
2200 return 0;
2203 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2204 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2205 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2206 TG3_GPIO_MSG_NEED_VAUX)
2207 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2208 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2209 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2210 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2211 (TG3_GPIO_MSG_DRVR_PRES << 12))
2213 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2214 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2215 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2216 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2217 (TG3_GPIO_MSG_NEED_VAUX << 12))
2219 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2221 u32 status, shift;
2223 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2224 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2225 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2226 else
2227 status = tr32(TG3_CPMU_DRV_STATUS);
2229 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2230 status &= ~(TG3_GPIO_MSG_MASK << shift);
2231 status |= (newstat << shift);
2233 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2234 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2235 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2236 else
2237 tw32(TG3_CPMU_DRV_STATUS, status);
2239 return status >> TG3_APE_GPIO_MSG_SHIFT;
2242 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2244 if (!tg3_flag(tp, IS_NIC))
2245 return 0;
2247 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2248 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2249 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2250 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2251 return -EIO;
2253 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2255 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2256 TG3_GRC_LCLCTL_PWRSW_DELAY);
2258 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2259 } else {
2260 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2261 TG3_GRC_LCLCTL_PWRSW_DELAY);
2264 return 0;
2267 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2269 u32 grc_local_ctrl;
2271 if (!tg3_flag(tp, IS_NIC) ||
2272 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2273 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2274 return;
2276 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2278 tw32_wait_f(GRC_LOCAL_CTRL,
2279 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2280 TG3_GRC_LCLCTL_PWRSW_DELAY);
2282 tw32_wait_f(GRC_LOCAL_CTRL,
2283 grc_local_ctrl,
2284 TG3_GRC_LCLCTL_PWRSW_DELAY);
2286 tw32_wait_f(GRC_LOCAL_CTRL,
2287 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2288 TG3_GRC_LCLCTL_PWRSW_DELAY);
2291 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2293 if (!tg3_flag(tp, IS_NIC))
2294 return;
2296 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2297 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2298 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2299 (GRC_LCLCTRL_GPIO_OE0 |
2300 GRC_LCLCTRL_GPIO_OE1 |
2301 GRC_LCLCTRL_GPIO_OE2 |
2302 GRC_LCLCTRL_GPIO_OUTPUT0 |
2303 GRC_LCLCTRL_GPIO_OUTPUT1),
2304 TG3_GRC_LCLCTL_PWRSW_DELAY);
2305 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2306 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2307 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2308 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2309 GRC_LCLCTRL_GPIO_OE1 |
2310 GRC_LCLCTRL_GPIO_OE2 |
2311 GRC_LCLCTRL_GPIO_OUTPUT0 |
2312 GRC_LCLCTRL_GPIO_OUTPUT1 |
2313 tp->grc_local_ctrl;
2314 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2315 TG3_GRC_LCLCTL_PWRSW_DELAY);
2317 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2318 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2319 TG3_GRC_LCLCTL_PWRSW_DELAY);
2321 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2322 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2323 TG3_GRC_LCLCTL_PWRSW_DELAY);
2324 } else {
2325 u32 no_gpio2;
2326 u32 grc_local_ctrl = 0;
2328 /* Workaround to prevent overdrawing Amps. */
2329 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2330 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2331 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2332 grc_local_ctrl,
2333 TG3_GRC_LCLCTL_PWRSW_DELAY);
2336 /* On 5753 and variants, GPIO2 cannot be used. */
2337 no_gpio2 = tp->nic_sram_data_cfg &
2338 NIC_SRAM_DATA_CFG_NO_GPIO2;
2340 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2341 GRC_LCLCTRL_GPIO_OE1 |
2342 GRC_LCLCTRL_GPIO_OE2 |
2343 GRC_LCLCTRL_GPIO_OUTPUT1 |
2344 GRC_LCLCTRL_GPIO_OUTPUT2;
2345 if (no_gpio2) {
2346 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2347 GRC_LCLCTRL_GPIO_OUTPUT2);
2349 tw32_wait_f(GRC_LOCAL_CTRL,
2350 tp->grc_local_ctrl | grc_local_ctrl,
2351 TG3_GRC_LCLCTL_PWRSW_DELAY);
2353 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2355 tw32_wait_f(GRC_LOCAL_CTRL,
2356 tp->grc_local_ctrl | grc_local_ctrl,
2357 TG3_GRC_LCLCTL_PWRSW_DELAY);
2359 if (!no_gpio2) {
2360 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2361 tw32_wait_f(GRC_LOCAL_CTRL,
2362 tp->grc_local_ctrl | grc_local_ctrl,
2363 TG3_GRC_LCLCTL_PWRSW_DELAY);
2368 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2370 u32 msg = 0;
2372 /* Serialize power state transitions */
2373 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2374 return;
2376 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2377 msg = TG3_GPIO_MSG_NEED_VAUX;
2379 msg = tg3_set_function_status(tp, msg);
2381 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2382 goto done;
2384 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2385 tg3_pwrsrc_switch_to_vaux(tp);
2386 else
2387 tg3_pwrsrc_die_with_vmain(tp);
2389 done:
2390 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2393 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2395 bool need_vaux = false;
2397 /* The GPIOs do something completely different on 57765. */
2398 if (!tg3_flag(tp, IS_NIC) ||
2399 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2400 return;
2402 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2403 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2404 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2405 tg3_frob_aux_power_5717(tp, include_wol ?
2406 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2407 return;
2410 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2411 struct net_device *dev_peer;
2413 dev_peer = pci_get_drvdata(tp->pdev_peer);
2415 /* remove_one() may have been run on the peer. */
2416 if (dev_peer) {
2417 struct tg3 *tp_peer = netdev_priv(dev_peer);
2419 if (tg3_flag(tp_peer, INIT_COMPLETE))
2420 return;
2422 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2423 tg3_flag(tp_peer, ENABLE_ASF))
2424 need_vaux = true;
2428 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2429 tg3_flag(tp, ENABLE_ASF))
2430 need_vaux = true;
2432 if (need_vaux)
2433 tg3_pwrsrc_switch_to_vaux(tp);
2434 else
2435 tg3_pwrsrc_die_with_vmain(tp);
2438 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2440 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2441 return 1;
2442 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2443 if (speed != SPEED_10)
2444 return 1;
2445 } else if (speed == SPEED_10)
2446 return 1;
2448 return 0;
2451 static int tg3_setup_phy(struct tg3 *, int);
2453 #define RESET_KIND_SHUTDOWN 0
2454 #define RESET_KIND_INIT 1
2455 #define RESET_KIND_SUSPEND 2
2457 static void tg3_write_sig_post_reset(struct tg3 *, int);
2458 static int tg3_halt_cpu(struct tg3 *, u32);
2460 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2462 u32 val;
2464 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2465 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2466 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2467 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2469 sg_dig_ctrl |=
2470 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2471 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2472 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2474 return;
2477 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2478 tg3_bmcr_reset(tp);
2479 val = tr32(GRC_MISC_CFG);
2480 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2481 udelay(40);
2482 return;
2483 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2484 u32 phytest;
2485 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2486 u32 phy;
2488 tg3_writephy(tp, MII_ADVERTISE, 0);
2489 tg3_writephy(tp, MII_BMCR,
2490 BMCR_ANENABLE | BMCR_ANRESTART);
2492 tg3_writephy(tp, MII_TG3_FET_TEST,
2493 phytest | MII_TG3_FET_SHADOW_EN);
2494 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2495 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2496 tg3_writephy(tp,
2497 MII_TG3_FET_SHDW_AUXMODE4,
2498 phy);
2500 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2502 return;
2503 } else if (do_low_power) {
2504 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2505 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2507 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2508 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2509 MII_TG3_AUXCTL_PCTL_VREG_11V;
2510 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2513 /* The PHY should not be powered down on some chips because
2514 * of bugs.
2516 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2517 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2518 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2519 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2520 return;
2522 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2523 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2524 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2525 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2526 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2527 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2530 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2533 /* tp->lock is held. */
2534 static int tg3_nvram_lock(struct tg3 *tp)
2536 if (tg3_flag(tp, NVRAM)) {
2537 int i;
2539 if (tp->nvram_lock_cnt == 0) {
2540 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2541 for (i = 0; i < 8000; i++) {
2542 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2543 break;
2544 udelay(20);
2546 if (i == 8000) {
2547 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2548 return -ENODEV;
2551 tp->nvram_lock_cnt++;
2553 return 0;
2556 /* tp->lock is held. */
2557 static void tg3_nvram_unlock(struct tg3 *tp)
2559 if (tg3_flag(tp, NVRAM)) {
2560 if (tp->nvram_lock_cnt > 0)
2561 tp->nvram_lock_cnt--;
2562 if (tp->nvram_lock_cnt == 0)
2563 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2567 /* tp->lock is held. */
2568 static void tg3_enable_nvram_access(struct tg3 *tp)
2570 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2571 u32 nvaccess = tr32(NVRAM_ACCESS);
2573 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2577 /* tp->lock is held. */
2578 static void tg3_disable_nvram_access(struct tg3 *tp)
2580 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2581 u32 nvaccess = tr32(NVRAM_ACCESS);
2583 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2587 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2588 u32 offset, u32 *val)
2590 u32 tmp;
2591 int i;
2593 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2594 return -EINVAL;
2596 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2597 EEPROM_ADDR_DEVID_MASK |
2598 EEPROM_ADDR_READ);
2599 tw32(GRC_EEPROM_ADDR,
2600 tmp |
2601 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2602 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2603 EEPROM_ADDR_ADDR_MASK) |
2604 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2606 for (i = 0; i < 1000; i++) {
2607 tmp = tr32(GRC_EEPROM_ADDR);
2609 if (tmp & EEPROM_ADDR_COMPLETE)
2610 break;
2611 msleep(1);
2613 if (!(tmp & EEPROM_ADDR_COMPLETE))
2614 return -EBUSY;
2616 tmp = tr32(GRC_EEPROM_DATA);
2619 * The data will always be opposite the native endian
2620 * format. Perform a blind byteswap to compensate.
2622 *val = swab32(tmp);
2624 return 0;
2627 #define NVRAM_CMD_TIMEOUT 10000
2629 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2631 int i;
2633 tw32(NVRAM_CMD, nvram_cmd);
2634 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2635 udelay(10);
2636 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2637 udelay(10);
2638 break;
2642 if (i == NVRAM_CMD_TIMEOUT)
2643 return -EBUSY;
2645 return 0;
2648 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2650 if (tg3_flag(tp, NVRAM) &&
2651 tg3_flag(tp, NVRAM_BUFFERED) &&
2652 tg3_flag(tp, FLASH) &&
2653 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2654 (tp->nvram_jedecnum == JEDEC_ATMEL))
2656 addr = ((addr / tp->nvram_pagesize) <<
2657 ATMEL_AT45DB0X1B_PAGE_POS) +
2658 (addr % tp->nvram_pagesize);
2660 return addr;
2663 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2665 if (tg3_flag(tp, NVRAM) &&
2666 tg3_flag(tp, NVRAM_BUFFERED) &&
2667 tg3_flag(tp, FLASH) &&
2668 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2669 (tp->nvram_jedecnum == JEDEC_ATMEL))
2671 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2672 tp->nvram_pagesize) +
2673 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2675 return addr;
2678 /* NOTE: Data read in from NVRAM is byteswapped according to
2679 * the byteswapping settings for all other register accesses.
2680 * tg3 devices are BE devices, so on a BE machine, the data
2681 * returned will be exactly as it is seen in NVRAM. On a LE
2682 * machine, the 32-bit value will be byteswapped.
2684 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2686 int ret;
2688 if (!tg3_flag(tp, NVRAM))
2689 return tg3_nvram_read_using_eeprom(tp, offset, val);
2691 offset = tg3_nvram_phys_addr(tp, offset);
2693 if (offset > NVRAM_ADDR_MSK)
2694 return -EINVAL;
2696 ret = tg3_nvram_lock(tp);
2697 if (ret)
2698 return ret;
2700 tg3_enable_nvram_access(tp);
2702 tw32(NVRAM_ADDR, offset);
2703 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2704 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2706 if (ret == 0)
2707 *val = tr32(NVRAM_RDDATA);
2709 tg3_disable_nvram_access(tp);
2711 tg3_nvram_unlock(tp);
2713 return ret;
2716 /* Ensures NVRAM data is in bytestream format. */
2717 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2719 u32 v;
2720 int res = tg3_nvram_read(tp, offset, &v);
2721 if (!res)
2722 *val = cpu_to_be32(v);
2723 return res;
2726 /* tp->lock is held. */
2727 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2729 u32 addr_high, addr_low;
2730 int i;
2732 addr_high = ((tp->dev->dev_addr[0] << 8) |
2733 tp->dev->dev_addr[1]);
2734 addr_low = ((tp->dev->dev_addr[2] << 24) |
2735 (tp->dev->dev_addr[3] << 16) |
2736 (tp->dev->dev_addr[4] << 8) |
2737 (tp->dev->dev_addr[5] << 0));
2738 for (i = 0; i < 4; i++) {
2739 if (i == 1 && skip_mac_1)
2740 continue;
2741 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2742 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2745 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2746 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2747 for (i = 0; i < 12; i++) {
2748 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2749 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2753 addr_high = (tp->dev->dev_addr[0] +
2754 tp->dev->dev_addr[1] +
2755 tp->dev->dev_addr[2] +
2756 tp->dev->dev_addr[3] +
2757 tp->dev->dev_addr[4] +
2758 tp->dev->dev_addr[5]) &
2759 TX_BACKOFF_SEED_MASK;
2760 tw32(MAC_TX_BACKOFF_SEED, addr_high);
2763 static void tg3_enable_register_access(struct tg3 *tp)
2766 * Make sure register accesses (indirect or otherwise) will function
2767 * correctly.
2769 pci_write_config_dword(tp->pdev,
2770 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
2773 static int tg3_power_up(struct tg3 *tp)
2775 int err;
2777 tg3_enable_register_access(tp);
2779 err = pci_set_power_state(tp->pdev, PCI_D0);
2780 if (!err) {
2781 /* Switch out of Vaux if it is a NIC */
2782 tg3_pwrsrc_switch_to_vmain(tp);
2783 } else {
2784 netdev_err(tp->dev, "Transition to D0 failed\n");
2787 return err;
2790 static int tg3_power_down_prepare(struct tg3 *tp)
2792 u32 misc_host_ctrl;
2793 bool device_should_wake, do_low_power;
2795 tg3_enable_register_access(tp);
2797 /* Restore the CLKREQ setting. */
2798 if (tg3_flag(tp, CLKREQ_BUG)) {
2799 u16 lnkctl;
2801 pci_read_config_word(tp->pdev,
2802 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
2803 &lnkctl);
2804 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2805 pci_write_config_word(tp->pdev,
2806 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
2807 lnkctl);
2810 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2811 tw32(TG3PCI_MISC_HOST_CTRL,
2812 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2814 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
2815 tg3_flag(tp, WOL_ENABLE);
2817 if (tg3_flag(tp, USE_PHYLIB)) {
2818 do_low_power = false;
2819 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
2820 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2821 struct phy_device *phydev;
2822 u32 phyid, advertising;
2824 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2826 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2828 tp->link_config.orig_speed = phydev->speed;
2829 tp->link_config.orig_duplex = phydev->duplex;
2830 tp->link_config.orig_autoneg = phydev->autoneg;
2831 tp->link_config.orig_advertising = phydev->advertising;
2833 advertising = ADVERTISED_TP |
2834 ADVERTISED_Pause |
2835 ADVERTISED_Autoneg |
2836 ADVERTISED_10baseT_Half;
2838 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
2839 if (tg3_flag(tp, WOL_SPEED_100MB))
2840 advertising |=
2841 ADVERTISED_100baseT_Half |
2842 ADVERTISED_100baseT_Full |
2843 ADVERTISED_10baseT_Full;
2844 else
2845 advertising |= ADVERTISED_10baseT_Full;
2848 phydev->advertising = advertising;
2850 phy_start_aneg(phydev);
2852 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2853 if (phyid != PHY_ID_BCMAC131) {
2854 phyid &= PHY_BCM_OUI_MASK;
2855 if (phyid == PHY_BCM_OUI_1 ||
2856 phyid == PHY_BCM_OUI_2 ||
2857 phyid == PHY_BCM_OUI_3)
2858 do_low_power = true;
2861 } else {
2862 do_low_power = true;
2864 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2865 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2866 tp->link_config.orig_speed = tp->link_config.speed;
2867 tp->link_config.orig_duplex = tp->link_config.duplex;
2868 tp->link_config.orig_autoneg = tp->link_config.autoneg;
2871 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
2872 tp->link_config.speed = SPEED_10;
2873 tp->link_config.duplex = DUPLEX_HALF;
2874 tp->link_config.autoneg = AUTONEG_ENABLE;
2875 tg3_setup_phy(tp, 0);
2879 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2880 u32 val;
2882 val = tr32(GRC_VCPU_EXT_CTRL);
2883 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2884 } else if (!tg3_flag(tp, ENABLE_ASF)) {
2885 int i;
2886 u32 val;
2888 for (i = 0; i < 200; i++) {
2889 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2890 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2891 break;
2892 msleep(1);
2895 if (tg3_flag(tp, WOL_CAP))
2896 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2897 WOL_DRV_STATE_SHUTDOWN |
2898 WOL_DRV_WOL |
2899 WOL_SET_MAGIC_PKT);
2901 if (device_should_wake) {
2902 u32 mac_mode;
2904 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
2905 if (do_low_power &&
2906 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
2907 tg3_phy_auxctl_write(tp,
2908 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
2909 MII_TG3_AUXCTL_PCTL_WOL_EN |
2910 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2911 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
2912 udelay(40);
2915 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2916 mac_mode = MAC_MODE_PORT_MODE_GMII;
2917 else
2918 mac_mode = MAC_MODE_PORT_MODE_MII;
2920 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2921 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2922 ASIC_REV_5700) {
2923 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
2924 SPEED_100 : SPEED_10;
2925 if (tg3_5700_link_polarity(tp, speed))
2926 mac_mode |= MAC_MODE_LINK_POLARITY;
2927 else
2928 mac_mode &= ~MAC_MODE_LINK_POLARITY;
2930 } else {
2931 mac_mode = MAC_MODE_PORT_MODE_TBI;
2934 if (!tg3_flag(tp, 5750_PLUS))
2935 tw32(MAC_LED_CTRL, tp->led_ctrl);
2937 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2938 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
2939 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
2940 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2942 if (tg3_flag(tp, ENABLE_APE))
2943 mac_mode |= MAC_MODE_APE_TX_EN |
2944 MAC_MODE_APE_RX_EN |
2945 MAC_MODE_TDE_ENABLE;
2947 tw32_f(MAC_MODE, mac_mode);
2948 udelay(100);
2950 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2951 udelay(10);
2954 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
2955 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2956 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2957 u32 base_val;
2959 base_val = tp->pci_clock_ctrl;
2960 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2961 CLOCK_CTRL_TXCLK_DISABLE);
2963 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2964 CLOCK_CTRL_PWRDOWN_PLL133, 40);
2965 } else if (tg3_flag(tp, 5780_CLASS) ||
2966 tg3_flag(tp, CPMU_PRESENT) ||
2967 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2968 /* do nothing */
2969 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
2970 u32 newbits1, newbits2;
2972 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2973 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2974 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2975 CLOCK_CTRL_TXCLK_DISABLE |
2976 CLOCK_CTRL_ALTCLK);
2977 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2978 } else if (tg3_flag(tp, 5705_PLUS)) {
2979 newbits1 = CLOCK_CTRL_625_CORE;
2980 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2981 } else {
2982 newbits1 = CLOCK_CTRL_ALTCLK;
2983 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2986 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2987 40);
2989 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2990 40);
2992 if (!tg3_flag(tp, 5705_PLUS)) {
2993 u32 newbits3;
2995 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2996 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2997 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2998 CLOCK_CTRL_TXCLK_DISABLE |
2999 CLOCK_CTRL_44MHZ_CORE);
3000 } else {
3001 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3004 tw32_wait_f(TG3PCI_CLOCK_CTRL,
3005 tp->pci_clock_ctrl | newbits3, 40);
3009 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3010 tg3_power_down_phy(tp, do_low_power);
3012 tg3_frob_aux_power(tp, true);
3014 /* Workaround for unstable PLL clock */
3015 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3016 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3017 u32 val = tr32(0x7d00);
3019 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3020 tw32(0x7d00, val);
3021 if (!tg3_flag(tp, ENABLE_ASF)) {
3022 int err;
3024 err = tg3_nvram_lock(tp);
3025 tg3_halt_cpu(tp, RX_CPU_BASE);
3026 if (!err)
3027 tg3_nvram_unlock(tp);
3031 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3033 return 0;
3036 static void tg3_power_down(struct tg3 *tp)
3038 tg3_power_down_prepare(tp);
3040 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3041 pci_set_power_state(tp->pdev, PCI_D3hot);
3044 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3046 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3047 case MII_TG3_AUX_STAT_10HALF:
3048 *speed = SPEED_10;
3049 *duplex = DUPLEX_HALF;
3050 break;
3052 case MII_TG3_AUX_STAT_10FULL:
3053 *speed = SPEED_10;
3054 *duplex = DUPLEX_FULL;
3055 break;
3057 case MII_TG3_AUX_STAT_100HALF:
3058 *speed = SPEED_100;
3059 *duplex = DUPLEX_HALF;
3060 break;
3062 case MII_TG3_AUX_STAT_100FULL:
3063 *speed = SPEED_100;
3064 *duplex = DUPLEX_FULL;
3065 break;
3067 case MII_TG3_AUX_STAT_1000HALF:
3068 *speed = SPEED_1000;
3069 *duplex = DUPLEX_HALF;
3070 break;
3072 case MII_TG3_AUX_STAT_1000FULL:
3073 *speed = SPEED_1000;
3074 *duplex = DUPLEX_FULL;
3075 break;
3077 default:
3078 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3079 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3080 SPEED_10;
3081 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3082 DUPLEX_HALF;
3083 break;
3085 *speed = SPEED_INVALID;
3086 *duplex = DUPLEX_INVALID;
3087 break;
3091 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3093 int err = 0;
3094 u32 val, new_adv;
3096 new_adv = ADVERTISE_CSMA;
3097 if (advertise & ADVERTISED_10baseT_Half)
3098 new_adv |= ADVERTISE_10HALF;
3099 if (advertise & ADVERTISED_10baseT_Full)
3100 new_adv |= ADVERTISE_10FULL;
3101 if (advertise & ADVERTISED_100baseT_Half)
3102 new_adv |= ADVERTISE_100HALF;
3103 if (advertise & ADVERTISED_100baseT_Full)
3104 new_adv |= ADVERTISE_100FULL;
3106 new_adv |= tg3_advert_flowctrl_1000T(flowctrl);
3108 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3109 if (err)
3110 goto done;
3112 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3113 goto done;
3115 new_adv = 0;
3116 if (advertise & ADVERTISED_1000baseT_Half)
3117 new_adv |= ADVERTISE_1000HALF;
3118 if (advertise & ADVERTISED_1000baseT_Full)
3119 new_adv |= ADVERTISE_1000FULL;
3121 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3122 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3123 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3125 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3126 if (err)
3127 goto done;
3129 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3130 goto done;
3132 tw32(TG3_CPMU_EEE_MODE,
3133 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3135 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3136 if (!err) {
3137 u32 err2;
3139 val = 0;
3140 /* Advertise 100-BaseTX EEE ability */
3141 if (advertise & ADVERTISED_100baseT_Full)
3142 val |= MDIO_AN_EEE_ADV_100TX;
3143 /* Advertise 1000-BaseT EEE ability */
3144 if (advertise & ADVERTISED_1000baseT_Full)
3145 val |= MDIO_AN_EEE_ADV_1000T;
3146 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3147 if (err)
3148 val = 0;
3150 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3151 case ASIC_REV_5717:
3152 case ASIC_REV_57765:
3153 case ASIC_REV_5719:
3154 /* If we advertised any eee advertisements above... */
3155 if (val)
3156 val = MII_TG3_DSP_TAP26_ALNOKO |
3157 MII_TG3_DSP_TAP26_RMRXSTO |
3158 MII_TG3_DSP_TAP26_OPCSINPT;
3159 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3160 /* Fall through */
3161 case ASIC_REV_5720:
3162 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3163 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3164 MII_TG3_DSP_CH34TP2_HIBW01);
3167 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3168 if (!err)
3169 err = err2;
3172 done:
3173 return err;
3176 static void tg3_phy_copper_begin(struct tg3 *tp)
3178 u32 new_adv;
3179 int i;
3181 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3182 new_adv = ADVERTISED_10baseT_Half |
3183 ADVERTISED_10baseT_Full;
3184 if (tg3_flag(tp, WOL_SPEED_100MB))
3185 new_adv |= ADVERTISED_100baseT_Half |
3186 ADVERTISED_100baseT_Full;
3188 tg3_phy_autoneg_cfg(tp, new_adv,
3189 FLOW_CTRL_TX | FLOW_CTRL_RX);
3190 } else if (tp->link_config.speed == SPEED_INVALID) {
3191 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3192 tp->link_config.advertising &=
3193 ~(ADVERTISED_1000baseT_Half |
3194 ADVERTISED_1000baseT_Full);
3196 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
3197 tp->link_config.flowctrl);
3198 } else {
3199 /* Asking for a specific link mode. */
3200 if (tp->link_config.speed == SPEED_1000) {
3201 if (tp->link_config.duplex == DUPLEX_FULL)
3202 new_adv = ADVERTISED_1000baseT_Full;
3203 else
3204 new_adv = ADVERTISED_1000baseT_Half;
3205 } else if (tp->link_config.speed == SPEED_100) {
3206 if (tp->link_config.duplex == DUPLEX_FULL)
3207 new_adv = ADVERTISED_100baseT_Full;
3208 else
3209 new_adv = ADVERTISED_100baseT_Half;
3210 } else {
3211 if (tp->link_config.duplex == DUPLEX_FULL)
3212 new_adv = ADVERTISED_10baseT_Full;
3213 else
3214 new_adv = ADVERTISED_10baseT_Half;
3217 tg3_phy_autoneg_cfg(tp, new_adv,
3218 tp->link_config.flowctrl);
3221 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3222 tp->link_config.speed != SPEED_INVALID) {
3223 u32 bmcr, orig_bmcr;
3225 tp->link_config.active_speed = tp->link_config.speed;
3226 tp->link_config.active_duplex = tp->link_config.duplex;
3228 bmcr = 0;
3229 switch (tp->link_config.speed) {
3230 default:
3231 case SPEED_10:
3232 break;
3234 case SPEED_100:
3235 bmcr |= BMCR_SPEED100;
3236 break;
3238 case SPEED_1000:
3239 bmcr |= BMCR_SPEED1000;
3240 break;
3243 if (tp->link_config.duplex == DUPLEX_FULL)
3244 bmcr |= BMCR_FULLDPLX;
3246 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3247 (bmcr != orig_bmcr)) {
3248 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3249 for (i = 0; i < 1500; i++) {
3250 u32 tmp;
3252 udelay(10);
3253 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3254 tg3_readphy(tp, MII_BMSR, &tmp))
3255 continue;
3256 if (!(tmp & BMSR_LSTATUS)) {
3257 udelay(40);
3258 break;
3261 tg3_writephy(tp, MII_BMCR, bmcr);
3262 udelay(40);
3264 } else {
3265 tg3_writephy(tp, MII_BMCR,
3266 BMCR_ANENABLE | BMCR_ANRESTART);
3270 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3272 int err;
3274 /* Turn off tap power management. */
3275 /* Set Extended packet length bit */
3276 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3278 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3279 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3280 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3281 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3282 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3284 udelay(40);
3286 return err;
3289 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
3291 u32 adv_reg, all_mask = 0;
3293 if (mask & ADVERTISED_10baseT_Half)
3294 all_mask |= ADVERTISE_10HALF;
3295 if (mask & ADVERTISED_10baseT_Full)
3296 all_mask |= ADVERTISE_10FULL;
3297 if (mask & ADVERTISED_100baseT_Half)
3298 all_mask |= ADVERTISE_100HALF;
3299 if (mask & ADVERTISED_100baseT_Full)
3300 all_mask |= ADVERTISE_100FULL;
3302 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
3303 return 0;
3305 if ((adv_reg & all_mask) != all_mask)
3306 return 0;
3307 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3308 u32 tg3_ctrl;
3310 all_mask = 0;
3311 if (mask & ADVERTISED_1000baseT_Half)
3312 all_mask |= ADVERTISE_1000HALF;
3313 if (mask & ADVERTISED_1000baseT_Full)
3314 all_mask |= ADVERTISE_1000FULL;
3316 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
3317 return 0;
3319 if ((tg3_ctrl & all_mask) != all_mask)
3320 return 0;
3322 return 1;
3325 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3327 u32 curadv, reqadv;
3329 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3330 return 1;
3332 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3333 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3335 if (tp->link_config.active_duplex == DUPLEX_FULL) {
3336 if (curadv != reqadv)
3337 return 0;
3339 if (tg3_flag(tp, PAUSE_AUTONEG))
3340 tg3_readphy(tp, MII_LPA, rmtadv);
3341 } else {
3342 /* Reprogram the advertisement register, even if it
3343 * does not affect the current link. If the link
3344 * gets renegotiated in the future, we can save an
3345 * additional renegotiation cycle by advertising
3346 * it correctly in the first place.
3348 if (curadv != reqadv) {
3349 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3350 ADVERTISE_PAUSE_ASYM);
3351 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3355 return 1;
3358 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3360 int current_link_up;
3361 u32 bmsr, val;
3362 u32 lcl_adv, rmt_adv;
3363 u16 current_speed;
3364 u8 current_duplex;
3365 int i, err;
3367 tw32(MAC_EVENT, 0);
3369 tw32_f(MAC_STATUS,
3370 (MAC_STATUS_SYNC_CHANGED |
3371 MAC_STATUS_CFG_CHANGED |
3372 MAC_STATUS_MI_COMPLETION |
3373 MAC_STATUS_LNKSTATE_CHANGED));
3374 udelay(40);
3376 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3377 tw32_f(MAC_MI_MODE,
3378 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3379 udelay(80);
3382 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
3384 /* Some third-party PHYs need to be reset on link going
3385 * down.
3387 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3388 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3389 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3390 netif_carrier_ok(tp->dev)) {
3391 tg3_readphy(tp, MII_BMSR, &bmsr);
3392 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3393 !(bmsr & BMSR_LSTATUS))
3394 force_reset = 1;
3396 if (force_reset)
3397 tg3_phy_reset(tp);
3399 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3400 tg3_readphy(tp, MII_BMSR, &bmsr);
3401 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3402 !tg3_flag(tp, INIT_COMPLETE))
3403 bmsr = 0;
3405 if (!(bmsr & BMSR_LSTATUS)) {
3406 err = tg3_init_5401phy_dsp(tp);
3407 if (err)
3408 return err;
3410 tg3_readphy(tp, MII_BMSR, &bmsr);
3411 for (i = 0; i < 1000; i++) {
3412 udelay(10);
3413 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3414 (bmsr & BMSR_LSTATUS)) {
3415 udelay(40);
3416 break;
3420 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3421 TG3_PHY_REV_BCM5401_B0 &&
3422 !(bmsr & BMSR_LSTATUS) &&
3423 tp->link_config.active_speed == SPEED_1000) {
3424 err = tg3_phy_reset(tp);
3425 if (!err)
3426 err = tg3_init_5401phy_dsp(tp);
3427 if (err)
3428 return err;
3431 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3432 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3433 /* 5701 {A0,B0} CRC bug workaround */
3434 tg3_writephy(tp, 0x15, 0x0a75);
3435 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3436 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3437 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3440 /* Clear pending interrupts... */
3441 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3442 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3444 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3445 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3446 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3447 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3449 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3450 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3451 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3452 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3453 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3454 else
3455 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3458 current_link_up = 0;
3459 current_speed = SPEED_INVALID;
3460 current_duplex = DUPLEX_INVALID;
3462 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3463 err = tg3_phy_auxctl_read(tp,
3464 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3465 &val);
3466 if (!err && !(val & (1 << 10))) {
3467 tg3_phy_auxctl_write(tp,
3468 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3469 val | (1 << 10));
3470 goto relink;
3474 bmsr = 0;
3475 for (i = 0; i < 100; i++) {
3476 tg3_readphy(tp, MII_BMSR, &bmsr);
3477 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3478 (bmsr & BMSR_LSTATUS))
3479 break;
3480 udelay(40);
3483 if (bmsr & BMSR_LSTATUS) {
3484 u32 aux_stat, bmcr;
3486 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3487 for (i = 0; i < 2000; i++) {
3488 udelay(10);
3489 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3490 aux_stat)
3491 break;
3494 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3495 &current_speed,
3496 &current_duplex);
3498 bmcr = 0;
3499 for (i = 0; i < 200; i++) {
3500 tg3_readphy(tp, MII_BMCR, &bmcr);
3501 if (tg3_readphy(tp, MII_BMCR, &bmcr))
3502 continue;
3503 if (bmcr && bmcr != 0x7fff)
3504 break;
3505 udelay(10);
3508 lcl_adv = 0;
3509 rmt_adv = 0;
3511 tp->link_config.active_speed = current_speed;
3512 tp->link_config.active_duplex = current_duplex;
3514 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3515 if ((bmcr & BMCR_ANENABLE) &&
3516 tg3_copper_is_advertising_all(tp,
3517 tp->link_config.advertising)) {
3518 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3519 &rmt_adv))
3520 current_link_up = 1;
3522 } else {
3523 if (!(bmcr & BMCR_ANENABLE) &&
3524 tp->link_config.speed == current_speed &&
3525 tp->link_config.duplex == current_duplex &&
3526 tp->link_config.flowctrl ==
3527 tp->link_config.active_flowctrl) {
3528 current_link_up = 1;
3532 if (current_link_up == 1 &&
3533 tp->link_config.active_duplex == DUPLEX_FULL)
3534 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3537 relink:
3538 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3539 tg3_phy_copper_begin(tp);
3541 tg3_readphy(tp, MII_BMSR, &bmsr);
3542 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
3543 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
3544 current_link_up = 1;
3547 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3548 if (current_link_up == 1) {
3549 if (tp->link_config.active_speed == SPEED_100 ||
3550 tp->link_config.active_speed == SPEED_10)
3551 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3552 else
3553 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3554 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
3555 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3556 else
3557 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3559 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3560 if (tp->link_config.active_duplex == DUPLEX_HALF)
3561 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3563 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3564 if (current_link_up == 1 &&
3565 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3566 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3567 else
3568 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3571 /* ??? Without this setting Netgear GA302T PHY does not
3572 * ??? send/receive packets...
3574 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
3575 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3576 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3577 tw32_f(MAC_MI_MODE, tp->mi_mode);
3578 udelay(80);
3581 tw32_f(MAC_MODE, tp->mac_mode);
3582 udelay(40);
3584 tg3_phy_eee_adjust(tp, current_link_up);
3586 if (tg3_flag(tp, USE_LINKCHG_REG)) {
3587 /* Polled via timer. */
3588 tw32_f(MAC_EVENT, 0);
3589 } else {
3590 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3592 udelay(40);
3594 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3595 current_link_up == 1 &&
3596 tp->link_config.active_speed == SPEED_1000 &&
3597 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
3598 udelay(120);
3599 tw32_f(MAC_STATUS,
3600 (MAC_STATUS_SYNC_CHANGED |
3601 MAC_STATUS_CFG_CHANGED));
3602 udelay(40);
3603 tg3_write_mem(tp,
3604 NIC_SRAM_FIRMWARE_MBOX,
3605 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3608 /* Prevent send BD corruption. */
3609 if (tg3_flag(tp, CLKREQ_BUG)) {
3610 u16 oldlnkctl, newlnkctl;
3612 pci_read_config_word(tp->pdev,
3613 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3614 &oldlnkctl);
3615 if (tp->link_config.active_speed == SPEED_100 ||
3616 tp->link_config.active_speed == SPEED_10)
3617 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3618 else
3619 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3620 if (newlnkctl != oldlnkctl)
3621 pci_write_config_word(tp->pdev,
3622 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3623 newlnkctl);
3626 if (current_link_up != netif_carrier_ok(tp->dev)) {
3627 if (current_link_up)
3628 netif_carrier_on(tp->dev);
3629 else
3630 netif_carrier_off(tp->dev);
3631 tg3_link_report(tp);
3634 return 0;
3637 struct tg3_fiber_aneginfo {
3638 int state;
3639 #define ANEG_STATE_UNKNOWN 0
3640 #define ANEG_STATE_AN_ENABLE 1
3641 #define ANEG_STATE_RESTART_INIT 2
3642 #define ANEG_STATE_RESTART 3
3643 #define ANEG_STATE_DISABLE_LINK_OK 4
3644 #define ANEG_STATE_ABILITY_DETECT_INIT 5
3645 #define ANEG_STATE_ABILITY_DETECT 6
3646 #define ANEG_STATE_ACK_DETECT_INIT 7
3647 #define ANEG_STATE_ACK_DETECT 8
3648 #define ANEG_STATE_COMPLETE_ACK_INIT 9
3649 #define ANEG_STATE_COMPLETE_ACK 10
3650 #define ANEG_STATE_IDLE_DETECT_INIT 11
3651 #define ANEG_STATE_IDLE_DETECT 12
3652 #define ANEG_STATE_LINK_OK 13
3653 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
3654 #define ANEG_STATE_NEXT_PAGE_WAIT 15
3656 u32 flags;
3657 #define MR_AN_ENABLE 0x00000001
3658 #define MR_RESTART_AN 0x00000002
3659 #define MR_AN_COMPLETE 0x00000004
3660 #define MR_PAGE_RX 0x00000008
3661 #define MR_NP_LOADED 0x00000010
3662 #define MR_TOGGLE_TX 0x00000020
3663 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
3664 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
3665 #define MR_LP_ADV_SYM_PAUSE 0x00000100
3666 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
3667 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3668 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3669 #define MR_LP_ADV_NEXT_PAGE 0x00001000
3670 #define MR_TOGGLE_RX 0x00002000
3671 #define MR_NP_RX 0x00004000
3673 #define MR_LINK_OK 0x80000000
3675 unsigned long link_time, cur_time;
3677 u32 ability_match_cfg;
3678 int ability_match_count;
3680 char ability_match, idle_match, ack_match;
3682 u32 txconfig, rxconfig;
3683 #define ANEG_CFG_NP 0x00000080
3684 #define ANEG_CFG_ACK 0x00000040
3685 #define ANEG_CFG_RF2 0x00000020
3686 #define ANEG_CFG_RF1 0x00000010
3687 #define ANEG_CFG_PS2 0x00000001
3688 #define ANEG_CFG_PS1 0x00008000
3689 #define ANEG_CFG_HD 0x00004000
3690 #define ANEG_CFG_FD 0x00002000
3691 #define ANEG_CFG_INVAL 0x00001f06
3694 #define ANEG_OK 0
3695 #define ANEG_DONE 1
3696 #define ANEG_TIMER_ENAB 2
3697 #define ANEG_FAILED -1
3699 #define ANEG_STATE_SETTLE_TIME 10000
3701 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3702 struct tg3_fiber_aneginfo *ap)
3704 u16 flowctrl;
3705 unsigned long delta;
3706 u32 rx_cfg_reg;
3707 int ret;
3709 if (ap->state == ANEG_STATE_UNKNOWN) {
3710 ap->rxconfig = 0;
3711 ap->link_time = 0;
3712 ap->cur_time = 0;
3713 ap->ability_match_cfg = 0;
3714 ap->ability_match_count = 0;
3715 ap->ability_match = 0;
3716 ap->idle_match = 0;
3717 ap->ack_match = 0;
3719 ap->cur_time++;
3721 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3722 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3724 if (rx_cfg_reg != ap->ability_match_cfg) {
3725 ap->ability_match_cfg = rx_cfg_reg;
3726 ap->ability_match = 0;
3727 ap->ability_match_count = 0;
3728 } else {
3729 if (++ap->ability_match_count > 1) {
3730 ap->ability_match = 1;
3731 ap->ability_match_cfg = rx_cfg_reg;
3734 if (rx_cfg_reg & ANEG_CFG_ACK)
3735 ap->ack_match = 1;
3736 else
3737 ap->ack_match = 0;
3739 ap->idle_match = 0;
3740 } else {
3741 ap->idle_match = 1;
3742 ap->ability_match_cfg = 0;
3743 ap->ability_match_count = 0;
3744 ap->ability_match = 0;
3745 ap->ack_match = 0;
3747 rx_cfg_reg = 0;
3750 ap->rxconfig = rx_cfg_reg;
3751 ret = ANEG_OK;
3753 switch (ap->state) {
3754 case ANEG_STATE_UNKNOWN:
3755 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3756 ap->state = ANEG_STATE_AN_ENABLE;
3758 /* fallthru */
3759 case ANEG_STATE_AN_ENABLE:
3760 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3761 if (ap->flags & MR_AN_ENABLE) {
3762 ap->link_time = 0;
3763 ap->cur_time = 0;
3764 ap->ability_match_cfg = 0;
3765 ap->ability_match_count = 0;
3766 ap->ability_match = 0;
3767 ap->idle_match = 0;
3768 ap->ack_match = 0;
3770 ap->state = ANEG_STATE_RESTART_INIT;
3771 } else {
3772 ap->state = ANEG_STATE_DISABLE_LINK_OK;
3774 break;
3776 case ANEG_STATE_RESTART_INIT:
3777 ap->link_time = ap->cur_time;
3778 ap->flags &= ~(MR_NP_LOADED);
3779 ap->txconfig = 0;
3780 tw32(MAC_TX_AUTO_NEG, 0);
3781 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3782 tw32_f(MAC_MODE, tp->mac_mode);
3783 udelay(40);
3785 ret = ANEG_TIMER_ENAB;
3786 ap->state = ANEG_STATE_RESTART;
3788 /* fallthru */
3789 case ANEG_STATE_RESTART:
3790 delta = ap->cur_time - ap->link_time;
3791 if (delta > ANEG_STATE_SETTLE_TIME)
3792 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3793 else
3794 ret = ANEG_TIMER_ENAB;
3795 break;
3797 case ANEG_STATE_DISABLE_LINK_OK:
3798 ret = ANEG_DONE;
3799 break;
3801 case ANEG_STATE_ABILITY_DETECT_INIT:
3802 ap->flags &= ~(MR_TOGGLE_TX);
3803 ap->txconfig = ANEG_CFG_FD;
3804 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3805 if (flowctrl & ADVERTISE_1000XPAUSE)
3806 ap->txconfig |= ANEG_CFG_PS1;
3807 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3808 ap->txconfig |= ANEG_CFG_PS2;
3809 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3810 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3811 tw32_f(MAC_MODE, tp->mac_mode);
3812 udelay(40);
3814 ap->state = ANEG_STATE_ABILITY_DETECT;
3815 break;
3817 case ANEG_STATE_ABILITY_DETECT:
3818 if (ap->ability_match != 0 && ap->rxconfig != 0)
3819 ap->state = ANEG_STATE_ACK_DETECT_INIT;
3820 break;
3822 case ANEG_STATE_ACK_DETECT_INIT:
3823 ap->txconfig |= ANEG_CFG_ACK;
3824 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3825 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3826 tw32_f(MAC_MODE, tp->mac_mode);
3827 udelay(40);
3829 ap->state = ANEG_STATE_ACK_DETECT;
3831 /* fallthru */
3832 case ANEG_STATE_ACK_DETECT:
3833 if (ap->ack_match != 0) {
3834 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3835 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3836 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3837 } else {
3838 ap->state = ANEG_STATE_AN_ENABLE;
3840 } else if (ap->ability_match != 0 &&
3841 ap->rxconfig == 0) {
3842 ap->state = ANEG_STATE_AN_ENABLE;
3844 break;
3846 case ANEG_STATE_COMPLETE_ACK_INIT:
3847 if (ap->rxconfig & ANEG_CFG_INVAL) {
3848 ret = ANEG_FAILED;
3849 break;
3851 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3852 MR_LP_ADV_HALF_DUPLEX |
3853 MR_LP_ADV_SYM_PAUSE |
3854 MR_LP_ADV_ASYM_PAUSE |
3855 MR_LP_ADV_REMOTE_FAULT1 |
3856 MR_LP_ADV_REMOTE_FAULT2 |
3857 MR_LP_ADV_NEXT_PAGE |
3858 MR_TOGGLE_RX |
3859 MR_NP_RX);
3860 if (ap->rxconfig & ANEG_CFG_FD)
3861 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3862 if (ap->rxconfig & ANEG_CFG_HD)
3863 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3864 if (ap->rxconfig & ANEG_CFG_PS1)
3865 ap->flags |= MR_LP_ADV_SYM_PAUSE;
3866 if (ap->rxconfig & ANEG_CFG_PS2)
3867 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3868 if (ap->rxconfig & ANEG_CFG_RF1)
3869 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3870 if (ap->rxconfig & ANEG_CFG_RF2)
3871 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3872 if (ap->rxconfig & ANEG_CFG_NP)
3873 ap->flags |= MR_LP_ADV_NEXT_PAGE;
3875 ap->link_time = ap->cur_time;
3877 ap->flags ^= (MR_TOGGLE_TX);
3878 if (ap->rxconfig & 0x0008)
3879 ap->flags |= MR_TOGGLE_RX;
3880 if (ap->rxconfig & ANEG_CFG_NP)
3881 ap->flags |= MR_NP_RX;
3882 ap->flags |= MR_PAGE_RX;
3884 ap->state = ANEG_STATE_COMPLETE_ACK;
3885 ret = ANEG_TIMER_ENAB;
3886 break;
3888 case ANEG_STATE_COMPLETE_ACK:
3889 if (ap->ability_match != 0 &&
3890 ap->rxconfig == 0) {
3891 ap->state = ANEG_STATE_AN_ENABLE;
3892 break;
3894 delta = ap->cur_time - ap->link_time;
3895 if (delta > ANEG_STATE_SETTLE_TIME) {
3896 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3897 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3898 } else {
3899 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3900 !(ap->flags & MR_NP_RX)) {
3901 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3902 } else {
3903 ret = ANEG_FAILED;
3907 break;
3909 case ANEG_STATE_IDLE_DETECT_INIT:
3910 ap->link_time = ap->cur_time;
3911 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3912 tw32_f(MAC_MODE, tp->mac_mode);
3913 udelay(40);
3915 ap->state = ANEG_STATE_IDLE_DETECT;
3916 ret = ANEG_TIMER_ENAB;
3917 break;
3919 case ANEG_STATE_IDLE_DETECT:
3920 if (ap->ability_match != 0 &&
3921 ap->rxconfig == 0) {
3922 ap->state = ANEG_STATE_AN_ENABLE;
3923 break;
3925 delta = ap->cur_time - ap->link_time;
3926 if (delta > ANEG_STATE_SETTLE_TIME) {
3927 /* XXX another gem from the Broadcom driver :( */
3928 ap->state = ANEG_STATE_LINK_OK;
3930 break;
3932 case ANEG_STATE_LINK_OK:
3933 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3934 ret = ANEG_DONE;
3935 break;
3937 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3938 /* ??? unimplemented */
3939 break;
3941 case ANEG_STATE_NEXT_PAGE_WAIT:
3942 /* ??? unimplemented */
3943 break;
3945 default:
3946 ret = ANEG_FAILED;
3947 break;
3950 return ret;
3953 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3955 int res = 0;
3956 struct tg3_fiber_aneginfo aninfo;
3957 int status = ANEG_FAILED;
3958 unsigned int tick;
3959 u32 tmp;
3961 tw32_f(MAC_TX_AUTO_NEG, 0);
3963 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3964 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3965 udelay(40);
3967 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3968 udelay(40);
3970 memset(&aninfo, 0, sizeof(aninfo));
3971 aninfo.flags |= MR_AN_ENABLE;
3972 aninfo.state = ANEG_STATE_UNKNOWN;
3973 aninfo.cur_time = 0;
3974 tick = 0;
3975 while (++tick < 195000) {
3976 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3977 if (status == ANEG_DONE || status == ANEG_FAILED)
3978 break;
3980 udelay(1);
3983 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3984 tw32_f(MAC_MODE, tp->mac_mode);
3985 udelay(40);
3987 *txflags = aninfo.txconfig;
3988 *rxflags = aninfo.flags;
3990 if (status == ANEG_DONE &&
3991 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3992 MR_LP_ADV_FULL_DUPLEX)))
3993 res = 1;
3995 return res;
3998 static void tg3_init_bcm8002(struct tg3 *tp)
4000 u32 mac_status = tr32(MAC_STATUS);
4001 int i;
4003 /* Reset when initting first time or we have a link. */
4004 if (tg3_flag(tp, INIT_COMPLETE) &&
4005 !(mac_status & MAC_STATUS_PCS_SYNCED))
4006 return;
4008 /* Set PLL lock range. */
4009 tg3_writephy(tp, 0x16, 0x8007);
4011 /* SW reset */
4012 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4014 /* Wait for reset to complete. */
4015 /* XXX schedule_timeout() ... */
4016 for (i = 0; i < 500; i++)
4017 udelay(10);
4019 /* Config mode; select PMA/Ch 1 regs. */
4020 tg3_writephy(tp, 0x10, 0x8411);
4022 /* Enable auto-lock and comdet, select txclk for tx. */
4023 tg3_writephy(tp, 0x11, 0x0a10);
4025 tg3_writephy(tp, 0x18, 0x00a0);
4026 tg3_writephy(tp, 0x16, 0x41ff);
4028 /* Assert and deassert POR. */
4029 tg3_writephy(tp, 0x13, 0x0400);
4030 udelay(40);
4031 tg3_writephy(tp, 0x13, 0x0000);
4033 tg3_writephy(tp, 0x11, 0x0a50);
4034 udelay(40);
4035 tg3_writephy(tp, 0x11, 0x0a10);
4037 /* Wait for signal to stabilize */
4038 /* XXX schedule_timeout() ... */
4039 for (i = 0; i < 15000; i++)
4040 udelay(10);
4042 /* Deselect the channel register so we can read the PHYID
4043 * later.
4045 tg3_writephy(tp, 0x10, 0x8011);
4048 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4050 u16 flowctrl;
4051 u32 sg_dig_ctrl, sg_dig_status;
4052 u32 serdes_cfg, expected_sg_dig_ctrl;
4053 int workaround, port_a;
4054 int current_link_up;
4056 serdes_cfg = 0;
4057 expected_sg_dig_ctrl = 0;
4058 workaround = 0;
4059 port_a = 1;
4060 current_link_up = 0;
4062 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4063 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4064 workaround = 1;
4065 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4066 port_a = 0;
4068 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4069 /* preserve bits 20-23 for voltage regulator */
4070 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4073 sg_dig_ctrl = tr32(SG_DIG_CTRL);
4075 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4076 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4077 if (workaround) {
4078 u32 val = serdes_cfg;
4080 if (port_a)
4081 val |= 0xc010000;
4082 else
4083 val |= 0x4010000;
4084 tw32_f(MAC_SERDES_CFG, val);
4087 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4089 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4090 tg3_setup_flow_control(tp, 0, 0);
4091 current_link_up = 1;
4093 goto out;
4096 /* Want auto-negotiation. */
4097 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4099 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4100 if (flowctrl & ADVERTISE_1000XPAUSE)
4101 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4102 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4103 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4105 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4106 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4107 tp->serdes_counter &&
4108 ((mac_status & (MAC_STATUS_PCS_SYNCED |
4109 MAC_STATUS_RCVD_CFG)) ==
4110 MAC_STATUS_PCS_SYNCED)) {
4111 tp->serdes_counter--;
4112 current_link_up = 1;
4113 goto out;
4115 restart_autoneg:
4116 if (workaround)
4117 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4118 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4119 udelay(5);
4120 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4122 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4123 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4124 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4125 MAC_STATUS_SIGNAL_DET)) {
4126 sg_dig_status = tr32(SG_DIG_STATUS);
4127 mac_status = tr32(MAC_STATUS);
4129 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4130 (mac_status & MAC_STATUS_PCS_SYNCED)) {
4131 u32 local_adv = 0, remote_adv = 0;
4133 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4134 local_adv |= ADVERTISE_1000XPAUSE;
4135 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4136 local_adv |= ADVERTISE_1000XPSE_ASYM;
4138 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4139 remote_adv |= LPA_1000XPAUSE;
4140 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4141 remote_adv |= LPA_1000XPAUSE_ASYM;
4143 tg3_setup_flow_control(tp, local_adv, remote_adv);
4144 current_link_up = 1;
4145 tp->serdes_counter = 0;
4146 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4147 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4148 if (tp->serdes_counter)
4149 tp->serdes_counter--;
4150 else {
4151 if (workaround) {
4152 u32 val = serdes_cfg;
4154 if (port_a)
4155 val |= 0xc010000;
4156 else
4157 val |= 0x4010000;
4159 tw32_f(MAC_SERDES_CFG, val);
4162 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4163 udelay(40);
4165 /* Link parallel detection - link is up */
4166 /* only if we have PCS_SYNC and not */
4167 /* receiving config code words */
4168 mac_status = tr32(MAC_STATUS);
4169 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4170 !(mac_status & MAC_STATUS_RCVD_CFG)) {
4171 tg3_setup_flow_control(tp, 0, 0);
4172 current_link_up = 1;
4173 tp->phy_flags |=
4174 TG3_PHYFLG_PARALLEL_DETECT;
4175 tp->serdes_counter =
4176 SERDES_PARALLEL_DET_TIMEOUT;
4177 } else
4178 goto restart_autoneg;
4181 } else {
4182 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4183 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4186 out:
4187 return current_link_up;
4190 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4192 int current_link_up = 0;
4194 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4195 goto out;
4197 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4198 u32 txflags, rxflags;
4199 int i;
4201 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4202 u32 local_adv = 0, remote_adv = 0;
4204 if (txflags & ANEG_CFG_PS1)
4205 local_adv |= ADVERTISE_1000XPAUSE;
4206 if (txflags & ANEG_CFG_PS2)
4207 local_adv |= ADVERTISE_1000XPSE_ASYM;
4209 if (rxflags & MR_LP_ADV_SYM_PAUSE)
4210 remote_adv |= LPA_1000XPAUSE;
4211 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4212 remote_adv |= LPA_1000XPAUSE_ASYM;
4214 tg3_setup_flow_control(tp, local_adv, remote_adv);
4216 current_link_up = 1;
4218 for (i = 0; i < 30; i++) {
4219 udelay(20);
4220 tw32_f(MAC_STATUS,
4221 (MAC_STATUS_SYNC_CHANGED |
4222 MAC_STATUS_CFG_CHANGED));
4223 udelay(40);
4224 if ((tr32(MAC_STATUS) &
4225 (MAC_STATUS_SYNC_CHANGED |
4226 MAC_STATUS_CFG_CHANGED)) == 0)
4227 break;
4230 mac_status = tr32(MAC_STATUS);
4231 if (current_link_up == 0 &&
4232 (mac_status & MAC_STATUS_PCS_SYNCED) &&
4233 !(mac_status & MAC_STATUS_RCVD_CFG))
4234 current_link_up = 1;
4235 } else {
4236 tg3_setup_flow_control(tp, 0, 0);
4238 /* Forcing 1000FD link up. */
4239 current_link_up = 1;
4241 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4242 udelay(40);
4244 tw32_f(MAC_MODE, tp->mac_mode);
4245 udelay(40);
4248 out:
4249 return current_link_up;
4252 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4254 u32 orig_pause_cfg;
4255 u16 orig_active_speed;
4256 u8 orig_active_duplex;
4257 u32 mac_status;
4258 int current_link_up;
4259 int i;
4261 orig_pause_cfg = tp->link_config.active_flowctrl;
4262 orig_active_speed = tp->link_config.active_speed;
4263 orig_active_duplex = tp->link_config.active_duplex;
4265 if (!tg3_flag(tp, HW_AUTONEG) &&
4266 netif_carrier_ok(tp->dev) &&
4267 tg3_flag(tp, INIT_COMPLETE)) {
4268 mac_status = tr32(MAC_STATUS);
4269 mac_status &= (MAC_STATUS_PCS_SYNCED |
4270 MAC_STATUS_SIGNAL_DET |
4271 MAC_STATUS_CFG_CHANGED |
4272 MAC_STATUS_RCVD_CFG);
4273 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4274 MAC_STATUS_SIGNAL_DET)) {
4275 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4276 MAC_STATUS_CFG_CHANGED));
4277 return 0;
4281 tw32_f(MAC_TX_AUTO_NEG, 0);
4283 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4284 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4285 tw32_f(MAC_MODE, tp->mac_mode);
4286 udelay(40);
4288 if (tp->phy_id == TG3_PHY_ID_BCM8002)
4289 tg3_init_bcm8002(tp);
4291 /* Enable link change event even when serdes polling. */
4292 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4293 udelay(40);
4295 current_link_up = 0;
4296 mac_status = tr32(MAC_STATUS);
4298 if (tg3_flag(tp, HW_AUTONEG))
4299 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4300 else
4301 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4303 tp->napi[0].hw_status->status =
4304 (SD_STATUS_UPDATED |
4305 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4307 for (i = 0; i < 100; i++) {
4308 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4309 MAC_STATUS_CFG_CHANGED));
4310 udelay(5);
4311 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4312 MAC_STATUS_CFG_CHANGED |
4313 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4314 break;
4317 mac_status = tr32(MAC_STATUS);
4318 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4319 current_link_up = 0;
4320 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4321 tp->serdes_counter == 0) {
4322 tw32_f(MAC_MODE, (tp->mac_mode |
4323 MAC_MODE_SEND_CONFIGS));
4324 udelay(1);
4325 tw32_f(MAC_MODE, tp->mac_mode);
4329 if (current_link_up == 1) {
4330 tp->link_config.active_speed = SPEED_1000;
4331 tp->link_config.active_duplex = DUPLEX_FULL;
4332 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4333 LED_CTRL_LNKLED_OVERRIDE |
4334 LED_CTRL_1000MBPS_ON));
4335 } else {
4336 tp->link_config.active_speed = SPEED_INVALID;
4337 tp->link_config.active_duplex = DUPLEX_INVALID;
4338 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4339 LED_CTRL_LNKLED_OVERRIDE |
4340 LED_CTRL_TRAFFIC_OVERRIDE));
4343 if (current_link_up != netif_carrier_ok(tp->dev)) {
4344 if (current_link_up)
4345 netif_carrier_on(tp->dev);
4346 else
4347 netif_carrier_off(tp->dev);
4348 tg3_link_report(tp);
4349 } else {
4350 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4351 if (orig_pause_cfg != now_pause_cfg ||
4352 orig_active_speed != tp->link_config.active_speed ||
4353 orig_active_duplex != tp->link_config.active_duplex)
4354 tg3_link_report(tp);
4357 return 0;
4360 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4362 int current_link_up, err = 0;
4363 u32 bmsr, bmcr;
4364 u16 current_speed;
4365 u8 current_duplex;
4366 u32 local_adv, remote_adv;
4368 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4369 tw32_f(MAC_MODE, tp->mac_mode);
4370 udelay(40);
4372 tw32(MAC_EVENT, 0);
4374 tw32_f(MAC_STATUS,
4375 (MAC_STATUS_SYNC_CHANGED |
4376 MAC_STATUS_CFG_CHANGED |
4377 MAC_STATUS_MI_COMPLETION |
4378 MAC_STATUS_LNKSTATE_CHANGED));
4379 udelay(40);
4381 if (force_reset)
4382 tg3_phy_reset(tp);
4384 current_link_up = 0;
4385 current_speed = SPEED_INVALID;
4386 current_duplex = DUPLEX_INVALID;
4388 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4389 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4390 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4391 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4392 bmsr |= BMSR_LSTATUS;
4393 else
4394 bmsr &= ~BMSR_LSTATUS;
4397 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4399 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4400 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4401 /* do nothing, just check for link up at the end */
4402 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4403 u32 adv, new_adv;
4405 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4406 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4407 ADVERTISE_1000XPAUSE |
4408 ADVERTISE_1000XPSE_ASYM |
4409 ADVERTISE_SLCT);
4411 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4413 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4414 new_adv |= ADVERTISE_1000XHALF;
4415 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4416 new_adv |= ADVERTISE_1000XFULL;
4418 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4419 tg3_writephy(tp, MII_ADVERTISE, new_adv);
4420 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4421 tg3_writephy(tp, MII_BMCR, bmcr);
4423 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4424 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4425 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4427 return err;
4429 } else {
4430 u32 new_bmcr;
4432 bmcr &= ~BMCR_SPEED1000;
4433 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4435 if (tp->link_config.duplex == DUPLEX_FULL)
4436 new_bmcr |= BMCR_FULLDPLX;
4438 if (new_bmcr != bmcr) {
4439 /* BMCR_SPEED1000 is a reserved bit that needs
4440 * to be set on write.
4442 new_bmcr |= BMCR_SPEED1000;
4444 /* Force a linkdown */
4445 if (netif_carrier_ok(tp->dev)) {
4446 u32 adv;
4448 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4449 adv &= ~(ADVERTISE_1000XFULL |
4450 ADVERTISE_1000XHALF |
4451 ADVERTISE_SLCT);
4452 tg3_writephy(tp, MII_ADVERTISE, adv);
4453 tg3_writephy(tp, MII_BMCR, bmcr |
4454 BMCR_ANRESTART |
4455 BMCR_ANENABLE);
4456 udelay(10);
4457 netif_carrier_off(tp->dev);
4459 tg3_writephy(tp, MII_BMCR, new_bmcr);
4460 bmcr = new_bmcr;
4461 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4462 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4463 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4464 ASIC_REV_5714) {
4465 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4466 bmsr |= BMSR_LSTATUS;
4467 else
4468 bmsr &= ~BMSR_LSTATUS;
4470 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4474 if (bmsr & BMSR_LSTATUS) {
4475 current_speed = SPEED_1000;
4476 current_link_up = 1;
4477 if (bmcr & BMCR_FULLDPLX)
4478 current_duplex = DUPLEX_FULL;
4479 else
4480 current_duplex = DUPLEX_HALF;
4482 local_adv = 0;
4483 remote_adv = 0;
4485 if (bmcr & BMCR_ANENABLE) {
4486 u32 common;
4488 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4489 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4490 common = local_adv & remote_adv;
4491 if (common & (ADVERTISE_1000XHALF |
4492 ADVERTISE_1000XFULL)) {
4493 if (common & ADVERTISE_1000XFULL)
4494 current_duplex = DUPLEX_FULL;
4495 else
4496 current_duplex = DUPLEX_HALF;
4497 } else if (!tg3_flag(tp, 5780_CLASS)) {
4498 /* Link is up via parallel detect */
4499 } else {
4500 current_link_up = 0;
4505 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4506 tg3_setup_flow_control(tp, local_adv, remote_adv);
4508 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4509 if (tp->link_config.active_duplex == DUPLEX_HALF)
4510 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4512 tw32_f(MAC_MODE, tp->mac_mode);
4513 udelay(40);
4515 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4517 tp->link_config.active_speed = current_speed;
4518 tp->link_config.active_duplex = current_duplex;
4520 if (current_link_up != netif_carrier_ok(tp->dev)) {
4521 if (current_link_up)
4522 netif_carrier_on(tp->dev);
4523 else {
4524 netif_carrier_off(tp->dev);
4525 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4527 tg3_link_report(tp);
4529 return err;
4532 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4534 if (tp->serdes_counter) {
4535 /* Give autoneg time to complete. */
4536 tp->serdes_counter--;
4537 return;
4540 if (!netif_carrier_ok(tp->dev) &&
4541 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4542 u32 bmcr;
4544 tg3_readphy(tp, MII_BMCR, &bmcr);
4545 if (bmcr & BMCR_ANENABLE) {
4546 u32 phy1, phy2;
4548 /* Select shadow register 0x1f */
4549 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
4550 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
4552 /* Select expansion interrupt status register */
4553 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4554 MII_TG3_DSP_EXP1_INT_STAT);
4555 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4556 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4558 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4559 /* We have signal detect and not receiving
4560 * config code words, link is up by parallel
4561 * detection.
4564 bmcr &= ~BMCR_ANENABLE;
4565 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4566 tg3_writephy(tp, MII_BMCR, bmcr);
4567 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
4570 } else if (netif_carrier_ok(tp->dev) &&
4571 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4572 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4573 u32 phy2;
4575 /* Select expansion interrupt status register */
4576 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4577 MII_TG3_DSP_EXP1_INT_STAT);
4578 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4579 if (phy2 & 0x20) {
4580 u32 bmcr;
4582 /* Config code words received, turn on autoneg. */
4583 tg3_readphy(tp, MII_BMCR, &bmcr);
4584 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4586 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4592 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4594 u32 val;
4595 int err;
4597 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
4598 err = tg3_setup_fiber_phy(tp, force_reset);
4599 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4600 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4601 else
4602 err = tg3_setup_copper_phy(tp, force_reset);
4604 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4605 u32 scale;
4607 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4608 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4609 scale = 65;
4610 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4611 scale = 6;
4612 else
4613 scale = 12;
4615 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4616 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4617 tw32(GRC_MISC_CFG, val);
4620 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4621 (6 << TX_LENGTHS_IPG_SHIFT);
4622 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
4623 val |= tr32(MAC_TX_LENGTHS) &
4624 (TX_LENGTHS_JMB_FRM_LEN_MSK |
4625 TX_LENGTHS_CNT_DWN_VAL_MSK);
4627 if (tp->link_config.active_speed == SPEED_1000 &&
4628 tp->link_config.active_duplex == DUPLEX_HALF)
4629 tw32(MAC_TX_LENGTHS, val |
4630 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
4631 else
4632 tw32(MAC_TX_LENGTHS, val |
4633 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
4635 if (!tg3_flag(tp, 5705_PLUS)) {
4636 if (netif_carrier_ok(tp->dev)) {
4637 tw32(HOSTCC_STAT_COAL_TICKS,
4638 tp->coal.stats_block_coalesce_usecs);
4639 } else {
4640 tw32(HOSTCC_STAT_COAL_TICKS, 0);
4644 if (tg3_flag(tp, ASPM_WORKAROUND)) {
4645 val = tr32(PCIE_PWR_MGMT_THRESH);
4646 if (!netif_carrier_ok(tp->dev))
4647 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4648 tp->pwrmgmt_thresh;
4649 else
4650 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4651 tw32(PCIE_PWR_MGMT_THRESH, val);
4654 return err;
4657 static inline int tg3_irq_sync(struct tg3 *tp)
4659 return tp->irq_sync;
4662 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
4664 int i;
4666 dst = (u32 *)((u8 *)dst + off);
4667 for (i = 0; i < len; i += sizeof(u32))
4668 *dst++ = tr32(off + i);
4671 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
4673 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
4674 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
4675 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
4676 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
4677 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
4678 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
4679 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
4680 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
4681 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
4682 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
4683 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
4684 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
4685 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
4686 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
4687 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
4688 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
4689 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
4690 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
4691 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
4693 if (tg3_flag(tp, SUPPORT_MSIX))
4694 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
4696 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
4697 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
4698 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
4699 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
4700 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
4701 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
4702 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
4703 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
4705 if (!tg3_flag(tp, 5705_PLUS)) {
4706 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
4707 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
4708 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
4711 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
4712 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
4713 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
4714 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
4715 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
4717 if (tg3_flag(tp, NVRAM))
4718 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
4721 static void tg3_dump_state(struct tg3 *tp)
4723 int i;
4724 u32 *regs;
4726 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
4727 if (!regs) {
4728 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
4729 return;
4732 if (tg3_flag(tp, PCI_EXPRESS)) {
4733 /* Read up to but not including private PCI registers */
4734 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
4735 regs[i / sizeof(u32)] = tr32(i);
4736 } else
4737 tg3_dump_legacy_regs(tp, regs);
4739 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
4740 if (!regs[i + 0] && !regs[i + 1] &&
4741 !regs[i + 2] && !regs[i + 3])
4742 continue;
4744 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
4745 i * 4,
4746 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
4749 kfree(regs);
4751 for (i = 0; i < tp->irq_cnt; i++) {
4752 struct tg3_napi *tnapi = &tp->napi[i];
4754 /* SW status block */
4755 netdev_err(tp->dev,
4756 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
4758 tnapi->hw_status->status,
4759 tnapi->hw_status->status_tag,
4760 tnapi->hw_status->rx_jumbo_consumer,
4761 tnapi->hw_status->rx_consumer,
4762 tnapi->hw_status->rx_mini_consumer,
4763 tnapi->hw_status->idx[0].rx_producer,
4764 tnapi->hw_status->idx[0].tx_consumer);
4766 netdev_err(tp->dev,
4767 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
4769 tnapi->last_tag, tnapi->last_irq_tag,
4770 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
4771 tnapi->rx_rcb_ptr,
4772 tnapi->prodring.rx_std_prod_idx,
4773 tnapi->prodring.rx_std_cons_idx,
4774 tnapi->prodring.rx_jmb_prod_idx,
4775 tnapi->prodring.rx_jmb_cons_idx);
4779 /* This is called whenever we suspect that the system chipset is re-
4780 * ordering the sequence of MMIO to the tx send mailbox. The symptom
4781 * is bogus tx completions. We try to recover by setting the
4782 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4783 * in the workqueue.
4785 static void tg3_tx_recover(struct tg3 *tp)
4787 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
4788 tp->write32_tx_mbox == tg3_write_indirect_mbox);
4790 netdev_warn(tp->dev,
4791 "The system may be re-ordering memory-mapped I/O "
4792 "cycles to the network device, attempting to recover. "
4793 "Please report the problem to the driver maintainer "
4794 "and include system chipset information.\n");
4796 spin_lock(&tp->lock);
4797 tg3_flag_set(tp, TX_RECOVERY_PENDING);
4798 spin_unlock(&tp->lock);
4801 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4803 /* Tell compiler to fetch tx indices from memory. */
4804 barrier();
4805 return tnapi->tx_pending -
4806 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4809 /* Tigon3 never reports partial packet sends. So we do not
4810 * need special logic to handle SKBs that have not had all
4811 * of their frags sent yet, like SunGEM does.
4813 static void tg3_tx(struct tg3_napi *tnapi)
4815 struct tg3 *tp = tnapi->tp;
4816 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4817 u32 sw_idx = tnapi->tx_cons;
4818 struct netdev_queue *txq;
4819 int index = tnapi - tp->napi;
4821 if (tg3_flag(tp, ENABLE_TSS))
4822 index--;
4824 txq = netdev_get_tx_queue(tp->dev, index);
4826 while (sw_idx != hw_idx) {
4827 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
4828 struct sk_buff *skb = ri->skb;
4829 int i, tx_bug = 0;
4831 if (unlikely(skb == NULL)) {
4832 tg3_tx_recover(tp);
4833 return;
4836 pci_unmap_single(tp->pdev,
4837 dma_unmap_addr(ri, mapping),
4838 skb_headlen(skb),
4839 PCI_DMA_TODEVICE);
4841 ri->skb = NULL;
4843 while (ri->fragmented) {
4844 ri->fragmented = false;
4845 sw_idx = NEXT_TX(sw_idx);
4846 ri = &tnapi->tx_buffers[sw_idx];
4849 sw_idx = NEXT_TX(sw_idx);
4851 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4852 ri = &tnapi->tx_buffers[sw_idx];
4853 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4854 tx_bug = 1;
4856 pci_unmap_page(tp->pdev,
4857 dma_unmap_addr(ri, mapping),
4858 skb_shinfo(skb)->frags[i].size,
4859 PCI_DMA_TODEVICE);
4861 while (ri->fragmented) {
4862 ri->fragmented = false;
4863 sw_idx = NEXT_TX(sw_idx);
4864 ri = &tnapi->tx_buffers[sw_idx];
4867 sw_idx = NEXT_TX(sw_idx);
4870 dev_kfree_skb(skb);
4872 if (unlikely(tx_bug)) {
4873 tg3_tx_recover(tp);
4874 return;
4878 tnapi->tx_cons = sw_idx;
4880 /* Need to make the tx_cons update visible to tg3_start_xmit()
4881 * before checking for netif_queue_stopped(). Without the
4882 * memory barrier, there is a small possibility that tg3_start_xmit()
4883 * will miss it and cause the queue to be stopped forever.
4885 smp_mb();
4887 if (unlikely(netif_tx_queue_stopped(txq) &&
4888 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4889 __netif_tx_lock(txq, smp_processor_id());
4890 if (netif_tx_queue_stopped(txq) &&
4891 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4892 netif_tx_wake_queue(txq);
4893 __netif_tx_unlock(txq);
4897 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4899 if (!ri->skb)
4900 return;
4902 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
4903 map_sz, PCI_DMA_FROMDEVICE);
4904 dev_kfree_skb_any(ri->skb);
4905 ri->skb = NULL;
4908 /* Returns size of skb allocated or < 0 on error.
4910 * We only need to fill in the address because the other members
4911 * of the RX descriptor are invariant, see tg3_init_rings.
4913 * Note the purposeful assymetry of cpu vs. chip accesses. For
4914 * posting buffers we only dirty the first cache line of the RX
4915 * descriptor (containing the address). Whereas for the RX status
4916 * buffers the cpu only reads the last cacheline of the RX descriptor
4917 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4919 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4920 u32 opaque_key, u32 dest_idx_unmasked)
4922 struct tg3_rx_buffer_desc *desc;
4923 struct ring_info *map;
4924 struct sk_buff *skb;
4925 dma_addr_t mapping;
4926 int skb_size, dest_idx;
4928 switch (opaque_key) {
4929 case RXD_OPAQUE_RING_STD:
4930 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4931 desc = &tpr->rx_std[dest_idx];
4932 map = &tpr->rx_std_buffers[dest_idx];
4933 skb_size = tp->rx_pkt_map_sz;
4934 break;
4936 case RXD_OPAQUE_RING_JUMBO:
4937 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4938 desc = &tpr->rx_jmb[dest_idx].std;
4939 map = &tpr->rx_jmb_buffers[dest_idx];
4940 skb_size = TG3_RX_JMB_MAP_SZ;
4941 break;
4943 default:
4944 return -EINVAL;
4947 /* Do not overwrite any of the map or rp information
4948 * until we are sure we can commit to a new buffer.
4950 * Callers depend upon this behavior and assume that
4951 * we leave everything unchanged if we fail.
4953 skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4954 if (skb == NULL)
4955 return -ENOMEM;
4957 skb_reserve(skb, tp->rx_offset);
4959 mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4960 PCI_DMA_FROMDEVICE);
4961 if (pci_dma_mapping_error(tp->pdev, mapping)) {
4962 dev_kfree_skb(skb);
4963 return -EIO;
4966 map->skb = skb;
4967 dma_unmap_addr_set(map, mapping, mapping);
4969 desc->addr_hi = ((u64)mapping >> 32);
4970 desc->addr_lo = ((u64)mapping & 0xffffffff);
4972 return skb_size;
4975 /* We only need to move over in the address because the other
4976 * members of the RX descriptor are invariant. See notes above
4977 * tg3_alloc_rx_skb for full details.
4979 static void tg3_recycle_rx(struct tg3_napi *tnapi,
4980 struct tg3_rx_prodring_set *dpr,
4981 u32 opaque_key, int src_idx,
4982 u32 dest_idx_unmasked)
4984 struct tg3 *tp = tnapi->tp;
4985 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4986 struct ring_info *src_map, *dest_map;
4987 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
4988 int dest_idx;
4990 switch (opaque_key) {
4991 case RXD_OPAQUE_RING_STD:
4992 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4993 dest_desc = &dpr->rx_std[dest_idx];
4994 dest_map = &dpr->rx_std_buffers[dest_idx];
4995 src_desc = &spr->rx_std[src_idx];
4996 src_map = &spr->rx_std_buffers[src_idx];
4997 break;
4999 case RXD_OPAQUE_RING_JUMBO:
5000 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5001 dest_desc = &dpr->rx_jmb[dest_idx].std;
5002 dest_map = &dpr->rx_jmb_buffers[dest_idx];
5003 src_desc = &spr->rx_jmb[src_idx].std;
5004 src_map = &spr->rx_jmb_buffers[src_idx];
5005 break;
5007 default:
5008 return;
5011 dest_map->skb = src_map->skb;
5012 dma_unmap_addr_set(dest_map, mapping,
5013 dma_unmap_addr(src_map, mapping));
5014 dest_desc->addr_hi = src_desc->addr_hi;
5015 dest_desc->addr_lo = src_desc->addr_lo;
5017 /* Ensure that the update to the skb happens after the physical
5018 * addresses have been transferred to the new BD location.
5020 smp_wmb();
5022 src_map->skb = NULL;
5025 /* The RX ring scheme is composed of multiple rings which post fresh
5026 * buffers to the chip, and one special ring the chip uses to report
5027 * status back to the host.
5029 * The special ring reports the status of received packets to the
5030 * host. The chip does not write into the original descriptor the
5031 * RX buffer was obtained from. The chip simply takes the original
5032 * descriptor as provided by the host, updates the status and length
5033 * field, then writes this into the next status ring entry.
5035 * Each ring the host uses to post buffers to the chip is described
5036 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
5037 * it is first placed into the on-chip ram. When the packet's length
5038 * is known, it walks down the TG3_BDINFO entries to select the ring.
5039 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5040 * which is within the range of the new packet's length is chosen.
5042 * The "separate ring for rx status" scheme may sound queer, but it makes
5043 * sense from a cache coherency perspective. If only the host writes
5044 * to the buffer post rings, and only the chip writes to the rx status
5045 * rings, then cache lines never move beyond shared-modified state.
5046 * If both the host and chip were to write into the same ring, cache line
5047 * eviction could occur since both entities want it in an exclusive state.
5049 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5051 struct tg3 *tp = tnapi->tp;
5052 u32 work_mask, rx_std_posted = 0;
5053 u32 std_prod_idx, jmb_prod_idx;
5054 u32 sw_idx = tnapi->rx_rcb_ptr;
5055 u16 hw_idx;
5056 int received;
5057 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5059 hw_idx = *(tnapi->rx_rcb_prod_idx);
5061 * We need to order the read of hw_idx and the read of
5062 * the opaque cookie.
5064 rmb();
5065 work_mask = 0;
5066 received = 0;
5067 std_prod_idx = tpr->rx_std_prod_idx;
5068 jmb_prod_idx = tpr->rx_jmb_prod_idx;
5069 while (sw_idx != hw_idx && budget > 0) {
5070 struct ring_info *ri;
5071 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5072 unsigned int len;
5073 struct sk_buff *skb;
5074 dma_addr_t dma_addr;
5075 u32 opaque_key, desc_idx, *post_ptr;
5077 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5078 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5079 if (opaque_key == RXD_OPAQUE_RING_STD) {
5080 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5081 dma_addr = dma_unmap_addr(ri, mapping);
5082 skb = ri->skb;
5083 post_ptr = &std_prod_idx;
5084 rx_std_posted++;
5085 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5086 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5087 dma_addr = dma_unmap_addr(ri, mapping);
5088 skb = ri->skb;
5089 post_ptr = &jmb_prod_idx;
5090 } else
5091 goto next_pkt_nopost;
5093 work_mask |= opaque_key;
5095 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5096 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5097 drop_it:
5098 tg3_recycle_rx(tnapi, tpr, opaque_key,
5099 desc_idx, *post_ptr);
5100 drop_it_no_recycle:
5101 /* Other statistics kept track of by card. */
5102 tp->rx_dropped++;
5103 goto next_pkt;
5106 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5107 ETH_FCS_LEN;
5109 if (len > TG3_RX_COPY_THRESH(tp)) {
5110 int skb_size;
5112 skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
5113 *post_ptr);
5114 if (skb_size < 0)
5115 goto drop_it;
5117 pci_unmap_single(tp->pdev, dma_addr, skb_size,
5118 PCI_DMA_FROMDEVICE);
5120 /* Ensure that the update to the skb happens
5121 * after the usage of the old DMA mapping.
5123 smp_wmb();
5125 ri->skb = NULL;
5127 skb_put(skb, len);
5128 } else {
5129 struct sk_buff *copy_skb;
5131 tg3_recycle_rx(tnapi, tpr, opaque_key,
5132 desc_idx, *post_ptr);
5134 copy_skb = netdev_alloc_skb(tp->dev, len +
5135 TG3_RAW_IP_ALIGN);
5136 if (copy_skb == NULL)
5137 goto drop_it_no_recycle;
5139 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
5140 skb_put(copy_skb, len);
5141 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5142 skb_copy_from_linear_data(skb, copy_skb->data, len);
5143 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5145 /* We'll reuse the original ring buffer. */
5146 skb = copy_skb;
5149 if ((tp->dev->features & NETIF_F_RXCSUM) &&
5150 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
5151 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
5152 >> RXD_TCPCSUM_SHIFT) == 0xffff))
5153 skb->ip_summed = CHECKSUM_UNNECESSARY;
5154 else
5155 skb_checksum_none_assert(skb);
5157 skb->protocol = eth_type_trans(skb, tp->dev);
5159 if (len > (tp->dev->mtu + ETH_HLEN) &&
5160 skb->protocol != htons(ETH_P_8021Q)) {
5161 dev_kfree_skb(skb);
5162 goto drop_it_no_recycle;
5165 if (desc->type_flags & RXD_FLAG_VLAN &&
5166 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
5167 __vlan_hwaccel_put_tag(skb,
5168 desc->err_vlan & RXD_VLAN_MASK);
5170 napi_gro_receive(&tnapi->napi, skb);
5172 received++;
5173 budget--;
5175 next_pkt:
5176 (*post_ptr)++;
5178 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5179 tpr->rx_std_prod_idx = std_prod_idx &
5180 tp->rx_std_ring_mask;
5181 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5182 tpr->rx_std_prod_idx);
5183 work_mask &= ~RXD_OPAQUE_RING_STD;
5184 rx_std_posted = 0;
5186 next_pkt_nopost:
5187 sw_idx++;
5188 sw_idx &= tp->rx_ret_ring_mask;
5190 /* Refresh hw_idx to see if there is new work */
5191 if (sw_idx == hw_idx) {
5192 hw_idx = *(tnapi->rx_rcb_prod_idx);
5193 rmb();
5197 /* ACK the status ring. */
5198 tnapi->rx_rcb_ptr = sw_idx;
5199 tw32_rx_mbox(tnapi->consmbox, sw_idx);
5201 /* Refill RX ring(s). */
5202 if (!tg3_flag(tp, ENABLE_RSS)) {
5203 if (work_mask & RXD_OPAQUE_RING_STD) {
5204 tpr->rx_std_prod_idx = std_prod_idx &
5205 tp->rx_std_ring_mask;
5206 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5207 tpr->rx_std_prod_idx);
5209 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5210 tpr->rx_jmb_prod_idx = jmb_prod_idx &
5211 tp->rx_jmb_ring_mask;
5212 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5213 tpr->rx_jmb_prod_idx);
5215 mmiowb();
5216 } else if (work_mask) {
5217 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5218 * updated before the producer indices can be updated.
5220 smp_wmb();
5222 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5223 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5225 if (tnapi != &tp->napi[1])
5226 napi_schedule(&tp->napi[1].napi);
5229 return received;
5232 static void tg3_poll_link(struct tg3 *tp)
5234 /* handle link change and other phy events */
5235 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5236 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5238 if (sblk->status & SD_STATUS_LINK_CHG) {
5239 sblk->status = SD_STATUS_UPDATED |
5240 (sblk->status & ~SD_STATUS_LINK_CHG);
5241 spin_lock(&tp->lock);
5242 if (tg3_flag(tp, USE_PHYLIB)) {
5243 tw32_f(MAC_STATUS,
5244 (MAC_STATUS_SYNC_CHANGED |
5245 MAC_STATUS_CFG_CHANGED |
5246 MAC_STATUS_MI_COMPLETION |
5247 MAC_STATUS_LNKSTATE_CHANGED));
5248 udelay(40);
5249 } else
5250 tg3_setup_phy(tp, 0);
5251 spin_unlock(&tp->lock);
5256 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5257 struct tg3_rx_prodring_set *dpr,
5258 struct tg3_rx_prodring_set *spr)
5260 u32 si, di, cpycnt, src_prod_idx;
5261 int i, err = 0;
5263 while (1) {
5264 src_prod_idx = spr->rx_std_prod_idx;
5266 /* Make sure updates to the rx_std_buffers[] entries and the
5267 * standard producer index are seen in the correct order.
5269 smp_rmb();
5271 if (spr->rx_std_cons_idx == src_prod_idx)
5272 break;
5274 if (spr->rx_std_cons_idx < src_prod_idx)
5275 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
5276 else
5277 cpycnt = tp->rx_std_ring_mask + 1 -
5278 spr->rx_std_cons_idx;
5280 cpycnt = min(cpycnt,
5281 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
5283 si = spr->rx_std_cons_idx;
5284 di = dpr->rx_std_prod_idx;
5286 for (i = di; i < di + cpycnt; i++) {
5287 if (dpr->rx_std_buffers[i].skb) {
5288 cpycnt = i - di;
5289 err = -ENOSPC;
5290 break;
5294 if (!cpycnt)
5295 break;
5297 /* Ensure that updates to the rx_std_buffers ring and the
5298 * shadowed hardware producer ring from tg3_recycle_skb() are
5299 * ordered correctly WRT the skb check above.
5301 smp_rmb();
5303 memcpy(&dpr->rx_std_buffers[di],
5304 &spr->rx_std_buffers[si],
5305 cpycnt * sizeof(struct ring_info));
5307 for (i = 0; i < cpycnt; i++, di++, si++) {
5308 struct tg3_rx_buffer_desc *sbd, *dbd;
5309 sbd = &spr->rx_std[si];
5310 dbd = &dpr->rx_std[di];
5311 dbd->addr_hi = sbd->addr_hi;
5312 dbd->addr_lo = sbd->addr_lo;
5315 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
5316 tp->rx_std_ring_mask;
5317 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
5318 tp->rx_std_ring_mask;
5321 while (1) {
5322 src_prod_idx = spr->rx_jmb_prod_idx;
5324 /* Make sure updates to the rx_jmb_buffers[] entries and
5325 * the jumbo producer index are seen in the correct order.
5327 smp_rmb();
5329 if (spr->rx_jmb_cons_idx == src_prod_idx)
5330 break;
5332 if (spr->rx_jmb_cons_idx < src_prod_idx)
5333 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5334 else
5335 cpycnt = tp->rx_jmb_ring_mask + 1 -
5336 spr->rx_jmb_cons_idx;
5338 cpycnt = min(cpycnt,
5339 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5341 si = spr->rx_jmb_cons_idx;
5342 di = dpr->rx_jmb_prod_idx;
5344 for (i = di; i < di + cpycnt; i++) {
5345 if (dpr->rx_jmb_buffers[i].skb) {
5346 cpycnt = i - di;
5347 err = -ENOSPC;
5348 break;
5352 if (!cpycnt)
5353 break;
5355 /* Ensure that updates to the rx_jmb_buffers ring and the
5356 * shadowed hardware producer ring from tg3_recycle_skb() are
5357 * ordered correctly WRT the skb check above.
5359 smp_rmb();
5361 memcpy(&dpr->rx_jmb_buffers[di],
5362 &spr->rx_jmb_buffers[si],
5363 cpycnt * sizeof(struct ring_info));
5365 for (i = 0; i < cpycnt; i++, di++, si++) {
5366 struct tg3_rx_buffer_desc *sbd, *dbd;
5367 sbd = &spr->rx_jmb[si].std;
5368 dbd = &dpr->rx_jmb[di].std;
5369 dbd->addr_hi = sbd->addr_hi;
5370 dbd->addr_lo = sbd->addr_lo;
5373 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5374 tp->rx_jmb_ring_mask;
5375 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5376 tp->rx_jmb_ring_mask;
5379 return err;
5382 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5384 struct tg3 *tp = tnapi->tp;
5386 /* run TX completion thread */
5387 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5388 tg3_tx(tnapi);
5389 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5390 return work_done;
5393 /* run RX thread, within the bounds set by NAPI.
5394 * All RX "locking" is done by ensuring outside
5395 * code synchronizes with tg3->napi.poll()
5397 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5398 work_done += tg3_rx(tnapi, budget - work_done);
5400 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
5401 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5402 int i, err = 0;
5403 u32 std_prod_idx = dpr->rx_std_prod_idx;
5404 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5406 for (i = 1; i < tp->irq_cnt; i++)
5407 err |= tg3_rx_prodring_xfer(tp, dpr,
5408 &tp->napi[i].prodring);
5410 wmb();
5412 if (std_prod_idx != dpr->rx_std_prod_idx)
5413 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5414 dpr->rx_std_prod_idx);
5416 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5417 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5418 dpr->rx_jmb_prod_idx);
5420 mmiowb();
5422 if (err)
5423 tw32_f(HOSTCC_MODE, tp->coal_now);
5426 return work_done;
5429 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5431 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5432 struct tg3 *tp = tnapi->tp;
5433 int work_done = 0;
5434 struct tg3_hw_status *sblk = tnapi->hw_status;
5436 while (1) {
5437 work_done = tg3_poll_work(tnapi, work_done, budget);
5439 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5440 goto tx_recovery;
5442 if (unlikely(work_done >= budget))
5443 break;
5445 /* tp->last_tag is used in tg3_int_reenable() below
5446 * to tell the hw how much work has been processed,
5447 * so we must read it before checking for more work.
5449 tnapi->last_tag = sblk->status_tag;
5450 tnapi->last_irq_tag = tnapi->last_tag;
5451 rmb();
5453 /* check for RX/TX work to do */
5454 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5455 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5456 napi_complete(napi);
5457 /* Reenable interrupts. */
5458 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5459 mmiowb();
5460 break;
5464 return work_done;
5466 tx_recovery:
5467 /* work_done is guaranteed to be less than budget. */
5468 napi_complete(napi);
5469 schedule_work(&tp->reset_task);
5470 return work_done;
5473 static void tg3_process_error(struct tg3 *tp)
5475 u32 val;
5476 bool real_error = false;
5478 if (tg3_flag(tp, ERROR_PROCESSED))
5479 return;
5481 /* Check Flow Attention register */
5482 val = tr32(HOSTCC_FLOW_ATTN);
5483 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
5484 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
5485 real_error = true;
5488 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
5489 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
5490 real_error = true;
5493 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
5494 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
5495 real_error = true;
5498 if (!real_error)
5499 return;
5501 tg3_dump_state(tp);
5503 tg3_flag_set(tp, ERROR_PROCESSED);
5504 schedule_work(&tp->reset_task);
5507 static int tg3_poll(struct napi_struct *napi, int budget)
5509 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5510 struct tg3 *tp = tnapi->tp;
5511 int work_done = 0;
5512 struct tg3_hw_status *sblk = tnapi->hw_status;
5514 while (1) {
5515 if (sblk->status & SD_STATUS_ERROR)
5516 tg3_process_error(tp);
5518 tg3_poll_link(tp);
5520 work_done = tg3_poll_work(tnapi, work_done, budget);
5522 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5523 goto tx_recovery;
5525 if (unlikely(work_done >= budget))
5526 break;
5528 if (tg3_flag(tp, TAGGED_STATUS)) {
5529 /* tp->last_tag is used in tg3_int_reenable() below
5530 * to tell the hw how much work has been processed,
5531 * so we must read it before checking for more work.
5533 tnapi->last_tag = sblk->status_tag;
5534 tnapi->last_irq_tag = tnapi->last_tag;
5535 rmb();
5536 } else
5537 sblk->status &= ~SD_STATUS_UPDATED;
5539 if (likely(!tg3_has_work(tnapi))) {
5540 napi_complete(napi);
5541 tg3_int_reenable(tnapi);
5542 break;
5546 return work_done;
5548 tx_recovery:
5549 /* work_done is guaranteed to be less than budget. */
5550 napi_complete(napi);
5551 schedule_work(&tp->reset_task);
5552 return work_done;
5555 static void tg3_napi_disable(struct tg3 *tp)
5557 int i;
5559 for (i = tp->irq_cnt - 1; i >= 0; i--)
5560 napi_disable(&tp->napi[i].napi);
5563 static void tg3_napi_enable(struct tg3 *tp)
5565 int i;
5567 for (i = 0; i < tp->irq_cnt; i++)
5568 napi_enable(&tp->napi[i].napi);
5571 static void tg3_napi_init(struct tg3 *tp)
5573 int i;
5575 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
5576 for (i = 1; i < tp->irq_cnt; i++)
5577 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
5580 static void tg3_napi_fini(struct tg3 *tp)
5582 int i;
5584 for (i = 0; i < tp->irq_cnt; i++)
5585 netif_napi_del(&tp->napi[i].napi);
5588 static inline void tg3_netif_stop(struct tg3 *tp)
5590 tp->dev->trans_start = jiffies; /* prevent tx timeout */
5591 tg3_napi_disable(tp);
5592 netif_tx_disable(tp->dev);
5595 static inline void tg3_netif_start(struct tg3 *tp)
5597 /* NOTE: unconditional netif_tx_wake_all_queues is only
5598 * appropriate so long as all callers are assured to
5599 * have free tx slots (such as after tg3_init_hw)
5601 netif_tx_wake_all_queues(tp->dev);
5603 tg3_napi_enable(tp);
5604 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
5605 tg3_enable_ints(tp);
5608 static void tg3_irq_quiesce(struct tg3 *tp)
5610 int i;
5612 BUG_ON(tp->irq_sync);
5614 tp->irq_sync = 1;
5615 smp_mb();
5617 for (i = 0; i < tp->irq_cnt; i++)
5618 synchronize_irq(tp->napi[i].irq_vec);
5621 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5622 * If irq_sync is non-zero, then the IRQ handler must be synchronized
5623 * with as well. Most of the time, this is not necessary except when
5624 * shutting down the device.
5626 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
5628 spin_lock_bh(&tp->lock);
5629 if (irq_sync)
5630 tg3_irq_quiesce(tp);
5633 static inline void tg3_full_unlock(struct tg3 *tp)
5635 spin_unlock_bh(&tp->lock);
5638 /* One-shot MSI handler - Chip automatically disables interrupt
5639 * after sending MSI so driver doesn't have to do it.
5641 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
5643 struct tg3_napi *tnapi = dev_id;
5644 struct tg3 *tp = tnapi->tp;
5646 prefetch(tnapi->hw_status);
5647 if (tnapi->rx_rcb)
5648 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5650 if (likely(!tg3_irq_sync(tp)))
5651 napi_schedule(&tnapi->napi);
5653 return IRQ_HANDLED;
5656 /* MSI ISR - No need to check for interrupt sharing and no need to
5657 * flush status block and interrupt mailbox. PCI ordering rules
5658 * guarantee that MSI will arrive after the status block.
5660 static irqreturn_t tg3_msi(int irq, void *dev_id)
5662 struct tg3_napi *tnapi = dev_id;
5663 struct tg3 *tp = tnapi->tp;
5665 prefetch(tnapi->hw_status);
5666 if (tnapi->rx_rcb)
5667 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5669 * Writing any value to intr-mbox-0 clears PCI INTA# and
5670 * chip-internal interrupt pending events.
5671 * Writing non-zero to intr-mbox-0 additional tells the
5672 * NIC to stop sending us irqs, engaging "in-intr-handler"
5673 * event coalescing.
5675 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5676 if (likely(!tg3_irq_sync(tp)))
5677 napi_schedule(&tnapi->napi);
5679 return IRQ_RETVAL(1);
5682 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
5684 struct tg3_napi *tnapi = dev_id;
5685 struct tg3 *tp = tnapi->tp;
5686 struct tg3_hw_status *sblk = tnapi->hw_status;
5687 unsigned int handled = 1;
5689 /* In INTx mode, it is possible for the interrupt to arrive at
5690 * the CPU before the status block posted prior to the interrupt.
5691 * Reading the PCI State register will confirm whether the
5692 * interrupt is ours and will flush the status block.
5694 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
5695 if (tg3_flag(tp, CHIP_RESETTING) ||
5696 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5697 handled = 0;
5698 goto out;
5703 * Writing any value to intr-mbox-0 clears PCI INTA# and
5704 * chip-internal interrupt pending events.
5705 * Writing non-zero to intr-mbox-0 additional tells the
5706 * NIC to stop sending us irqs, engaging "in-intr-handler"
5707 * event coalescing.
5709 * Flush the mailbox to de-assert the IRQ immediately to prevent
5710 * spurious interrupts. The flush impacts performance but
5711 * excessive spurious interrupts can be worse in some cases.
5713 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5714 if (tg3_irq_sync(tp))
5715 goto out;
5716 sblk->status &= ~SD_STATUS_UPDATED;
5717 if (likely(tg3_has_work(tnapi))) {
5718 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5719 napi_schedule(&tnapi->napi);
5720 } else {
5721 /* No work, shared interrupt perhaps? re-enable
5722 * interrupts, and flush that PCI write
5724 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
5725 0x00000000);
5727 out:
5728 return IRQ_RETVAL(handled);
5731 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
5733 struct tg3_napi *tnapi = dev_id;
5734 struct tg3 *tp = tnapi->tp;
5735 struct tg3_hw_status *sblk = tnapi->hw_status;
5736 unsigned int handled = 1;
5738 /* In INTx mode, it is possible for the interrupt to arrive at
5739 * the CPU before the status block posted prior to the interrupt.
5740 * Reading the PCI State register will confirm whether the
5741 * interrupt is ours and will flush the status block.
5743 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
5744 if (tg3_flag(tp, CHIP_RESETTING) ||
5745 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5746 handled = 0;
5747 goto out;
5752 * writing any value to intr-mbox-0 clears PCI INTA# and
5753 * chip-internal interrupt pending events.
5754 * writing non-zero to intr-mbox-0 additional tells the
5755 * NIC to stop sending us irqs, engaging "in-intr-handler"
5756 * event coalescing.
5758 * Flush the mailbox to de-assert the IRQ immediately to prevent
5759 * spurious interrupts. The flush impacts performance but
5760 * excessive spurious interrupts can be worse in some cases.
5762 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5765 * In a shared interrupt configuration, sometimes other devices'
5766 * interrupts will scream. We record the current status tag here
5767 * so that the above check can report that the screaming interrupts
5768 * are unhandled. Eventually they will be silenced.
5770 tnapi->last_irq_tag = sblk->status_tag;
5772 if (tg3_irq_sync(tp))
5773 goto out;
5775 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5777 napi_schedule(&tnapi->napi);
5779 out:
5780 return IRQ_RETVAL(handled);
5783 /* ISR for interrupt test */
5784 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
5786 struct tg3_napi *tnapi = dev_id;
5787 struct tg3 *tp = tnapi->tp;
5788 struct tg3_hw_status *sblk = tnapi->hw_status;
5790 if ((sblk->status & SD_STATUS_UPDATED) ||
5791 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5792 tg3_disable_ints(tp);
5793 return IRQ_RETVAL(1);
5795 return IRQ_RETVAL(0);
5798 static int tg3_init_hw(struct tg3 *, int);
5799 static int tg3_halt(struct tg3 *, int, int);
5801 /* Restart hardware after configuration changes, self-test, etc.
5802 * Invoked with tp->lock held.
5804 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
5805 __releases(tp->lock)
5806 __acquires(tp->lock)
5808 int err;
5810 err = tg3_init_hw(tp, reset_phy);
5811 if (err) {
5812 netdev_err(tp->dev,
5813 "Failed to re-initialize device, aborting\n");
5814 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5815 tg3_full_unlock(tp);
5816 del_timer_sync(&tp->timer);
5817 tp->irq_sync = 0;
5818 tg3_napi_enable(tp);
5819 dev_close(tp->dev);
5820 tg3_full_lock(tp, 0);
5822 return err;
5825 #ifdef CONFIG_NET_POLL_CONTROLLER
5826 static void tg3_poll_controller(struct net_device *dev)
5828 int i;
5829 struct tg3 *tp = netdev_priv(dev);
5831 for (i = 0; i < tp->irq_cnt; i++)
5832 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
5834 #endif
5836 static void tg3_reset_task(struct work_struct *work)
5838 struct tg3 *tp = container_of(work, struct tg3, reset_task);
5839 int err;
5840 unsigned int restart_timer;
5842 tg3_full_lock(tp, 0);
5844 if (!netif_running(tp->dev)) {
5845 tg3_full_unlock(tp);
5846 return;
5849 tg3_full_unlock(tp);
5851 tg3_phy_stop(tp);
5853 tg3_netif_stop(tp);
5855 tg3_full_lock(tp, 1);
5857 restart_timer = tg3_flag(tp, RESTART_TIMER);
5858 tg3_flag_clear(tp, RESTART_TIMER);
5860 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
5861 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5862 tp->write32_rx_mbox = tg3_write_flush_reg32;
5863 tg3_flag_set(tp, MBOX_WRITE_REORDER);
5864 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
5867 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5868 err = tg3_init_hw(tp, 1);
5869 if (err)
5870 goto out;
5872 tg3_netif_start(tp);
5874 if (restart_timer)
5875 mod_timer(&tp->timer, jiffies + 1);
5877 out:
5878 tg3_full_unlock(tp);
5880 if (!err)
5881 tg3_phy_start(tp);
5884 static void tg3_tx_timeout(struct net_device *dev)
5886 struct tg3 *tp = netdev_priv(dev);
5888 if (netif_msg_tx_err(tp)) {
5889 netdev_err(dev, "transmit timed out, resetting\n");
5890 tg3_dump_state(tp);
5893 schedule_work(&tp->reset_task);
5896 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5897 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5899 u32 base = (u32) mapping & 0xffffffff;
5901 return (base > 0xffffdcc0) && (base + len + 8 < base);
5904 /* Test for DMA addresses > 40-bit */
5905 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5906 int len)
5908 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5909 if (tg3_flag(tp, 40BIT_DMA_BUG))
5910 return ((u64) mapping + len) > DMA_BIT_MASK(40);
5911 return 0;
5912 #else
5913 return 0;
5914 #endif
5917 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
5918 dma_addr_t mapping, u32 len, u32 flags,
5919 u32 mss, u32 vlan)
5921 txbd->addr_hi = ((u64) mapping >> 32);
5922 txbd->addr_lo = ((u64) mapping & 0xffffffff);
5923 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
5924 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
5927 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
5928 dma_addr_t map, u32 len, u32 flags,
5929 u32 mss, u32 vlan)
5931 struct tg3 *tp = tnapi->tp;
5932 bool hwbug = false;
5934 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
5935 hwbug = 1;
5937 if (tg3_4g_overflow_test(map, len))
5938 hwbug = 1;
5940 if (tg3_40bit_overflow_test(tp, map, len))
5941 hwbug = 1;
5943 if (*budget) {
5944 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
5945 len, flags, mss, vlan);
5946 (*budget)--;
5947 } else
5948 hwbug = 1;
5950 *entry = NEXT_TX(*entry);
5952 return hwbug;
5955 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
5957 int i;
5958 struct sk_buff *skb;
5959 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
5961 skb = txb->skb;
5962 txb->skb = NULL;
5964 pci_unmap_single(tnapi->tp->pdev,
5965 dma_unmap_addr(txb, mapping),
5966 skb_headlen(skb),
5967 PCI_DMA_TODEVICE);
5969 while (txb->fragmented) {
5970 txb->fragmented = false;
5971 entry = NEXT_TX(entry);
5972 txb = &tnapi->tx_buffers[entry];
5975 for (i = 0; i < last; i++) {
5976 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5978 entry = NEXT_TX(entry);
5979 txb = &tnapi->tx_buffers[entry];
5981 pci_unmap_page(tnapi->tp->pdev,
5982 dma_unmap_addr(txb, mapping),
5983 frag->size, PCI_DMA_TODEVICE);
5985 while (txb->fragmented) {
5986 txb->fragmented = false;
5987 entry = NEXT_TX(entry);
5988 txb = &tnapi->tx_buffers[entry];
5993 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5994 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5995 struct sk_buff *skb,
5996 u32 *entry, u32 *budget,
5997 u32 base_flags, u32 mss, u32 vlan)
5999 struct tg3 *tp = tnapi->tp;
6000 struct sk_buff *new_skb;
6001 dma_addr_t new_addr = 0;
6002 int ret = 0;
6004 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6005 new_skb = skb_copy(skb, GFP_ATOMIC);
6006 else {
6007 int more_headroom = 4 - ((unsigned long)skb->data & 3);
6009 new_skb = skb_copy_expand(skb,
6010 skb_headroom(skb) + more_headroom,
6011 skb_tailroom(skb), GFP_ATOMIC);
6014 if (!new_skb) {
6015 ret = -1;
6016 } else {
6017 /* New SKB is guaranteed to be linear. */
6018 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6019 PCI_DMA_TODEVICE);
6020 /* Make sure the mapping succeeded */
6021 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
6022 dev_kfree_skb(new_skb);
6023 ret = -1;
6024 } else {
6025 base_flags |= TXD_FLAG_END;
6027 tnapi->tx_buffers[*entry].skb = new_skb;
6028 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
6029 mapping, new_addr);
6031 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
6032 new_skb->len, base_flags,
6033 mss, vlan)) {
6034 tg3_tx_skb_unmap(tnapi, *entry, 0);
6035 dev_kfree_skb(new_skb);
6036 ret = -1;
6041 dev_kfree_skb(skb);
6043 return ret;
6046 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6048 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6049 * TSO header is greater than 80 bytes.
6051 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6053 struct sk_buff *segs, *nskb;
6054 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6056 /* Estimate the number of fragments in the worst case */
6057 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6058 netif_stop_queue(tp->dev);
6060 /* netif_tx_stop_queue() must be done before checking
6061 * checking tx index in tg3_tx_avail() below, because in
6062 * tg3_tx(), we update tx index before checking for
6063 * netif_tx_queue_stopped().
6065 smp_mb();
6066 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6067 return NETDEV_TX_BUSY;
6069 netif_wake_queue(tp->dev);
6072 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6073 if (IS_ERR(segs))
6074 goto tg3_tso_bug_end;
6076 do {
6077 nskb = segs;
6078 segs = segs->next;
6079 nskb->next = NULL;
6080 tg3_start_xmit(nskb, tp->dev);
6081 } while (segs);
6083 tg3_tso_bug_end:
6084 dev_kfree_skb(skb);
6086 return NETDEV_TX_OK;
6089 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6090 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6092 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6094 struct tg3 *tp = netdev_priv(dev);
6095 u32 len, entry, base_flags, mss, vlan = 0;
6096 u32 budget;
6097 int i = -1, would_hit_hwbug;
6098 dma_addr_t mapping;
6099 struct tg3_napi *tnapi;
6100 struct netdev_queue *txq;
6101 unsigned int last;
6103 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6104 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6105 if (tg3_flag(tp, ENABLE_TSS))
6106 tnapi++;
6108 budget = tg3_tx_avail(tnapi);
6110 /* We are running in BH disabled context with netif_tx_lock
6111 * and TX reclaim runs via tp->napi.poll inside of a software
6112 * interrupt. Furthermore, IRQ processing runs lockless so we have
6113 * no IRQ context deadlocks to worry about either. Rejoice!
6115 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
6116 if (!netif_tx_queue_stopped(txq)) {
6117 netif_tx_stop_queue(txq);
6119 /* This is a hard error, log it. */
6120 netdev_err(dev,
6121 "BUG! Tx Ring full when queue awake!\n");
6123 return NETDEV_TX_BUSY;
6126 entry = tnapi->tx_prod;
6127 base_flags = 0;
6128 if (skb->ip_summed == CHECKSUM_PARTIAL)
6129 base_flags |= TXD_FLAG_TCPUDP_CSUM;
6131 mss = skb_shinfo(skb)->gso_size;
6132 if (mss) {
6133 struct iphdr *iph;
6134 u32 tcp_opt_len, hdr_len;
6136 if (skb_header_cloned(skb) &&
6137 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
6138 dev_kfree_skb(skb);
6139 goto out_unlock;
6142 iph = ip_hdr(skb);
6143 tcp_opt_len = tcp_optlen(skb);
6145 if (skb_is_gso_v6(skb)) {
6146 hdr_len = skb_headlen(skb) - ETH_HLEN;
6147 } else {
6148 u32 ip_tcp_len;
6150 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
6151 hdr_len = ip_tcp_len + tcp_opt_len;
6153 iph->check = 0;
6154 iph->tot_len = htons(mss + hdr_len);
6157 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6158 tg3_flag(tp, TSO_BUG))
6159 return tg3_tso_bug(tp, skb);
6161 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
6162 TXD_FLAG_CPU_POST_DMA);
6164 if (tg3_flag(tp, HW_TSO_1) ||
6165 tg3_flag(tp, HW_TSO_2) ||
6166 tg3_flag(tp, HW_TSO_3)) {
6167 tcp_hdr(skb)->check = 0;
6168 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
6169 } else
6170 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6171 iph->daddr, 0,
6172 IPPROTO_TCP,
6175 if (tg3_flag(tp, HW_TSO_3)) {
6176 mss |= (hdr_len & 0xc) << 12;
6177 if (hdr_len & 0x10)
6178 base_flags |= 0x00000010;
6179 base_flags |= (hdr_len & 0x3e0) << 5;
6180 } else if (tg3_flag(tp, HW_TSO_2))
6181 mss |= hdr_len << 9;
6182 else if (tg3_flag(tp, HW_TSO_1) ||
6183 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6184 if (tcp_opt_len || iph->ihl > 5) {
6185 int tsflags;
6187 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6188 mss |= (tsflags << 11);
6190 } else {
6191 if (tcp_opt_len || iph->ihl > 5) {
6192 int tsflags;
6194 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6195 base_flags |= tsflags << 12;
6200 #ifdef BCM_KERNEL_SUPPORTS_8021Q
6201 if (vlan_tx_tag_present(skb)) {
6202 base_flags |= TXD_FLAG_VLAN;
6203 vlan = vlan_tx_tag_get(skb);
6205 #endif
6207 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6208 !mss && skb->len > VLAN_ETH_FRAME_LEN)
6209 base_flags |= TXD_FLAG_JMB_PKT;
6211 len = skb_headlen(skb);
6213 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6214 if (pci_dma_mapping_error(tp->pdev, mapping)) {
6215 dev_kfree_skb(skb);
6216 goto out_unlock;
6219 tnapi->tx_buffers[entry].skb = skb;
6220 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6222 would_hit_hwbug = 0;
6224 if (tg3_flag(tp, 5701_DMA_BUG))
6225 would_hit_hwbug = 1;
6227 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
6228 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
6229 mss, vlan))
6230 would_hit_hwbug = 1;
6232 /* Now loop through additional data fragments, and queue them. */
6233 if (skb_shinfo(skb)->nr_frags > 0) {
6234 u32 tmp_mss = mss;
6236 if (!tg3_flag(tp, HW_TSO_1) &&
6237 !tg3_flag(tp, HW_TSO_2) &&
6238 !tg3_flag(tp, HW_TSO_3))
6239 tmp_mss = 0;
6241 last = skb_shinfo(skb)->nr_frags - 1;
6242 for (i = 0; i <= last; i++) {
6243 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6245 len = frag->size;
6246 mapping = pci_map_page(tp->pdev,
6247 frag->page,
6248 frag->page_offset,
6249 len, PCI_DMA_TODEVICE);
6251 tnapi->tx_buffers[entry].skb = NULL;
6252 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6253 mapping);
6254 if (pci_dma_mapping_error(tp->pdev, mapping))
6255 goto dma_error;
6257 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
6258 len, base_flags |
6259 ((i == last) ? TXD_FLAG_END : 0),
6260 tmp_mss, vlan))
6261 would_hit_hwbug = 1;
6265 if (would_hit_hwbug) {
6266 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
6268 /* If the workaround fails due to memory/mapping
6269 * failure, silently drop this packet.
6271 entry = tnapi->tx_prod;
6272 budget = tg3_tx_avail(tnapi);
6273 if (tigon3_dma_hwbug_workaround(tnapi, skb, &entry, &budget,
6274 base_flags, mss, vlan))
6275 goto out_unlock;
6278 skb_tx_timestamp(skb);
6280 /* Packets are ready, update Tx producer idx local and on card. */
6281 tw32_tx_mbox(tnapi->prodmbox, entry);
6283 tnapi->tx_prod = entry;
6284 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6285 netif_tx_stop_queue(txq);
6287 /* netif_tx_stop_queue() must be done before checking
6288 * checking tx index in tg3_tx_avail() below, because in
6289 * tg3_tx(), we update tx index before checking for
6290 * netif_tx_queue_stopped().
6292 smp_mb();
6293 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6294 netif_tx_wake_queue(txq);
6297 out_unlock:
6298 mmiowb();
6300 return NETDEV_TX_OK;
6302 dma_error:
6303 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
6304 dev_kfree_skb(skb);
6305 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
6306 return NETDEV_TX_OK;
6309 static void tg3_set_loopback(struct net_device *dev, u32 features)
6311 struct tg3 *tp = netdev_priv(dev);
6313 if (features & NETIF_F_LOOPBACK) {
6314 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
6315 return;
6318 * Clear MAC_MODE_HALF_DUPLEX or you won't get packets back in
6319 * loopback mode if Half-Duplex mode was negotiated earlier.
6321 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
6323 /* Enable internal MAC loopback mode */
6324 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
6325 spin_lock_bh(&tp->lock);
6326 tw32(MAC_MODE, tp->mac_mode);
6327 netif_carrier_on(tp->dev);
6328 spin_unlock_bh(&tp->lock);
6329 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
6330 } else {
6331 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
6332 return;
6334 /* Disable internal MAC loopback mode */
6335 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
6336 spin_lock_bh(&tp->lock);
6337 tw32(MAC_MODE, tp->mac_mode);
6338 /* Force link status check */
6339 tg3_setup_phy(tp, 1);
6340 spin_unlock_bh(&tp->lock);
6341 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
6345 static u32 tg3_fix_features(struct net_device *dev, u32 features)
6347 struct tg3 *tp = netdev_priv(dev);
6349 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
6350 features &= ~NETIF_F_ALL_TSO;
6352 return features;
6355 static int tg3_set_features(struct net_device *dev, u32 features)
6357 u32 changed = dev->features ^ features;
6359 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
6360 tg3_set_loopback(dev, features);
6362 return 0;
6365 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
6366 int new_mtu)
6368 dev->mtu = new_mtu;
6370 if (new_mtu > ETH_DATA_LEN) {
6371 if (tg3_flag(tp, 5780_CLASS)) {
6372 netdev_update_features(dev);
6373 tg3_flag_clear(tp, TSO_CAPABLE);
6374 } else {
6375 tg3_flag_set(tp, JUMBO_RING_ENABLE);
6377 } else {
6378 if (tg3_flag(tp, 5780_CLASS)) {
6379 tg3_flag_set(tp, TSO_CAPABLE);
6380 netdev_update_features(dev);
6382 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
6386 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
6388 struct tg3 *tp = netdev_priv(dev);
6389 int err;
6391 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
6392 return -EINVAL;
6394 if (!netif_running(dev)) {
6395 /* We'll just catch it later when the
6396 * device is up'd.
6398 tg3_set_mtu(dev, tp, new_mtu);
6399 return 0;
6402 tg3_phy_stop(tp);
6404 tg3_netif_stop(tp);
6406 tg3_full_lock(tp, 1);
6408 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6410 tg3_set_mtu(dev, tp, new_mtu);
6412 err = tg3_restart_hw(tp, 0);
6414 if (!err)
6415 tg3_netif_start(tp);
6417 tg3_full_unlock(tp);
6419 if (!err)
6420 tg3_phy_start(tp);
6422 return err;
6425 static void tg3_rx_prodring_free(struct tg3 *tp,
6426 struct tg3_rx_prodring_set *tpr)
6428 int i;
6430 if (tpr != &tp->napi[0].prodring) {
6431 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
6432 i = (i + 1) & tp->rx_std_ring_mask)
6433 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6434 tp->rx_pkt_map_sz);
6436 if (tg3_flag(tp, JUMBO_CAPABLE)) {
6437 for (i = tpr->rx_jmb_cons_idx;
6438 i != tpr->rx_jmb_prod_idx;
6439 i = (i + 1) & tp->rx_jmb_ring_mask) {
6440 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6441 TG3_RX_JMB_MAP_SZ);
6445 return;
6448 for (i = 0; i <= tp->rx_std_ring_mask; i++)
6449 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6450 tp->rx_pkt_map_sz);
6452 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6453 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
6454 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6455 TG3_RX_JMB_MAP_SZ);
6459 /* Initialize rx rings for packet processing.
6461 * The chip has been shut down and the driver detached from
6462 * the networking, so no interrupts or new tx packets will
6463 * end up in the driver. tp->{tx,}lock are held and thus
6464 * we may not sleep.
6466 static int tg3_rx_prodring_alloc(struct tg3 *tp,
6467 struct tg3_rx_prodring_set *tpr)
6469 u32 i, rx_pkt_dma_sz;
6471 tpr->rx_std_cons_idx = 0;
6472 tpr->rx_std_prod_idx = 0;
6473 tpr->rx_jmb_cons_idx = 0;
6474 tpr->rx_jmb_prod_idx = 0;
6476 if (tpr != &tp->napi[0].prodring) {
6477 memset(&tpr->rx_std_buffers[0], 0,
6478 TG3_RX_STD_BUFF_RING_SIZE(tp));
6479 if (tpr->rx_jmb_buffers)
6480 memset(&tpr->rx_jmb_buffers[0], 0,
6481 TG3_RX_JMB_BUFF_RING_SIZE(tp));
6482 goto done;
6485 /* Zero out all descriptors. */
6486 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
6488 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
6489 if (tg3_flag(tp, 5780_CLASS) &&
6490 tp->dev->mtu > ETH_DATA_LEN)
6491 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
6492 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
6494 /* Initialize invariants of the rings, we only set this
6495 * stuff once. This works because the card does not
6496 * write into the rx buffer posting rings.
6498 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
6499 struct tg3_rx_buffer_desc *rxd;
6501 rxd = &tpr->rx_std[i];
6502 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
6503 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
6504 rxd->opaque = (RXD_OPAQUE_RING_STD |
6505 (i << RXD_OPAQUE_INDEX_SHIFT));
6508 /* Now allocate fresh SKBs for each rx ring. */
6509 for (i = 0; i < tp->rx_pending; i++) {
6510 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
6511 netdev_warn(tp->dev,
6512 "Using a smaller RX standard ring. Only "
6513 "%d out of %d buffers were allocated "
6514 "successfully\n", i, tp->rx_pending);
6515 if (i == 0)
6516 goto initfail;
6517 tp->rx_pending = i;
6518 break;
6522 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
6523 goto done;
6525 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
6527 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
6528 goto done;
6530 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
6531 struct tg3_rx_buffer_desc *rxd;
6533 rxd = &tpr->rx_jmb[i].std;
6534 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
6535 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
6536 RXD_FLAG_JUMBO;
6537 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
6538 (i << RXD_OPAQUE_INDEX_SHIFT));
6541 for (i = 0; i < tp->rx_jumbo_pending; i++) {
6542 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
6543 netdev_warn(tp->dev,
6544 "Using a smaller RX jumbo ring. Only %d "
6545 "out of %d buffers were allocated "
6546 "successfully\n", i, tp->rx_jumbo_pending);
6547 if (i == 0)
6548 goto initfail;
6549 tp->rx_jumbo_pending = i;
6550 break;
6554 done:
6555 return 0;
6557 initfail:
6558 tg3_rx_prodring_free(tp, tpr);
6559 return -ENOMEM;
6562 static void tg3_rx_prodring_fini(struct tg3 *tp,
6563 struct tg3_rx_prodring_set *tpr)
6565 kfree(tpr->rx_std_buffers);
6566 tpr->rx_std_buffers = NULL;
6567 kfree(tpr->rx_jmb_buffers);
6568 tpr->rx_jmb_buffers = NULL;
6569 if (tpr->rx_std) {
6570 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
6571 tpr->rx_std, tpr->rx_std_mapping);
6572 tpr->rx_std = NULL;
6574 if (tpr->rx_jmb) {
6575 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
6576 tpr->rx_jmb, tpr->rx_jmb_mapping);
6577 tpr->rx_jmb = NULL;
6581 static int tg3_rx_prodring_init(struct tg3 *tp,
6582 struct tg3_rx_prodring_set *tpr)
6584 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
6585 GFP_KERNEL);
6586 if (!tpr->rx_std_buffers)
6587 return -ENOMEM;
6589 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
6590 TG3_RX_STD_RING_BYTES(tp),
6591 &tpr->rx_std_mapping,
6592 GFP_KERNEL);
6593 if (!tpr->rx_std)
6594 goto err_out;
6596 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6597 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
6598 GFP_KERNEL);
6599 if (!tpr->rx_jmb_buffers)
6600 goto err_out;
6602 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
6603 TG3_RX_JMB_RING_BYTES(tp),
6604 &tpr->rx_jmb_mapping,
6605 GFP_KERNEL);
6606 if (!tpr->rx_jmb)
6607 goto err_out;
6610 return 0;
6612 err_out:
6613 tg3_rx_prodring_fini(tp, tpr);
6614 return -ENOMEM;
6617 /* Free up pending packets in all rx/tx rings.
6619 * The chip has been shut down and the driver detached from
6620 * the networking, so no interrupts or new tx packets will
6621 * end up in the driver. tp->{tx,}lock is not held and we are not
6622 * in an interrupt context and thus may sleep.
6624 static void tg3_free_rings(struct tg3 *tp)
6626 int i, j;
6628 for (j = 0; j < tp->irq_cnt; j++) {
6629 struct tg3_napi *tnapi = &tp->napi[j];
6631 tg3_rx_prodring_free(tp, &tnapi->prodring);
6633 if (!tnapi->tx_buffers)
6634 continue;
6636 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
6637 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
6639 if (!skb)
6640 continue;
6642 tg3_tx_skb_unmap(tnapi, i, skb_shinfo(skb)->nr_frags);
6644 dev_kfree_skb_any(skb);
6649 /* Initialize tx/rx rings for packet processing.
6651 * The chip has been shut down and the driver detached from
6652 * the networking, so no interrupts or new tx packets will
6653 * end up in the driver. tp->{tx,}lock are held and thus
6654 * we may not sleep.
6656 static int tg3_init_rings(struct tg3 *tp)
6658 int i;
6660 /* Free up all the SKBs. */
6661 tg3_free_rings(tp);
6663 for (i = 0; i < tp->irq_cnt; i++) {
6664 struct tg3_napi *tnapi = &tp->napi[i];
6666 tnapi->last_tag = 0;
6667 tnapi->last_irq_tag = 0;
6668 tnapi->hw_status->status = 0;
6669 tnapi->hw_status->status_tag = 0;
6670 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6672 tnapi->tx_prod = 0;
6673 tnapi->tx_cons = 0;
6674 if (tnapi->tx_ring)
6675 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
6677 tnapi->rx_rcb_ptr = 0;
6678 if (tnapi->rx_rcb)
6679 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6681 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
6682 tg3_free_rings(tp);
6683 return -ENOMEM;
6687 return 0;
6691 * Must not be invoked with interrupt sources disabled and
6692 * the hardware shutdown down.
6694 static void tg3_free_consistent(struct tg3 *tp)
6696 int i;
6698 for (i = 0; i < tp->irq_cnt; i++) {
6699 struct tg3_napi *tnapi = &tp->napi[i];
6701 if (tnapi->tx_ring) {
6702 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
6703 tnapi->tx_ring, tnapi->tx_desc_mapping);
6704 tnapi->tx_ring = NULL;
6707 kfree(tnapi->tx_buffers);
6708 tnapi->tx_buffers = NULL;
6710 if (tnapi->rx_rcb) {
6711 dma_free_coherent(&tp->pdev->dev,
6712 TG3_RX_RCB_RING_BYTES(tp),
6713 tnapi->rx_rcb,
6714 tnapi->rx_rcb_mapping);
6715 tnapi->rx_rcb = NULL;
6718 tg3_rx_prodring_fini(tp, &tnapi->prodring);
6720 if (tnapi->hw_status) {
6721 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
6722 tnapi->hw_status,
6723 tnapi->status_mapping);
6724 tnapi->hw_status = NULL;
6728 if (tp->hw_stats) {
6729 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
6730 tp->hw_stats, tp->stats_mapping);
6731 tp->hw_stats = NULL;
6736 * Must not be invoked with interrupt sources disabled and
6737 * the hardware shutdown down. Can sleep.
6739 static int tg3_alloc_consistent(struct tg3 *tp)
6741 int i;
6743 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
6744 sizeof(struct tg3_hw_stats),
6745 &tp->stats_mapping,
6746 GFP_KERNEL);
6747 if (!tp->hw_stats)
6748 goto err_out;
6750 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6752 for (i = 0; i < tp->irq_cnt; i++) {
6753 struct tg3_napi *tnapi = &tp->napi[i];
6754 struct tg3_hw_status *sblk;
6756 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
6757 TG3_HW_STATUS_SIZE,
6758 &tnapi->status_mapping,
6759 GFP_KERNEL);
6760 if (!tnapi->hw_status)
6761 goto err_out;
6763 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6764 sblk = tnapi->hw_status;
6766 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
6767 goto err_out;
6769 /* If multivector TSS is enabled, vector 0 does not handle
6770 * tx interrupts. Don't allocate any resources for it.
6772 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
6773 (i && tg3_flag(tp, ENABLE_TSS))) {
6774 tnapi->tx_buffers = kzalloc(
6775 sizeof(struct tg3_tx_ring_info) *
6776 TG3_TX_RING_SIZE, GFP_KERNEL);
6777 if (!tnapi->tx_buffers)
6778 goto err_out;
6780 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
6781 TG3_TX_RING_BYTES,
6782 &tnapi->tx_desc_mapping,
6783 GFP_KERNEL);
6784 if (!tnapi->tx_ring)
6785 goto err_out;
6789 * When RSS is enabled, the status block format changes
6790 * slightly. The "rx_jumbo_consumer", "reserved",
6791 * and "rx_mini_consumer" members get mapped to the
6792 * other three rx return ring producer indexes.
6794 switch (i) {
6795 default:
6796 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
6797 break;
6798 case 2:
6799 tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
6800 break;
6801 case 3:
6802 tnapi->rx_rcb_prod_idx = &sblk->reserved;
6803 break;
6804 case 4:
6805 tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
6806 break;
6810 * If multivector RSS is enabled, vector 0 does not handle
6811 * rx or tx interrupts. Don't allocate any resources for it.
6813 if (!i && tg3_flag(tp, ENABLE_RSS))
6814 continue;
6816 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
6817 TG3_RX_RCB_RING_BYTES(tp),
6818 &tnapi->rx_rcb_mapping,
6819 GFP_KERNEL);
6820 if (!tnapi->rx_rcb)
6821 goto err_out;
6823 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6826 return 0;
6828 err_out:
6829 tg3_free_consistent(tp);
6830 return -ENOMEM;
6833 #define MAX_WAIT_CNT 1000
6835 /* To stop a block, clear the enable bit and poll till it
6836 * clears. tp->lock is held.
6838 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
6840 unsigned int i;
6841 u32 val;
6843 if (tg3_flag(tp, 5705_PLUS)) {
6844 switch (ofs) {
6845 case RCVLSC_MODE:
6846 case DMAC_MODE:
6847 case MBFREE_MODE:
6848 case BUFMGR_MODE:
6849 case MEMARB_MODE:
6850 /* We can't enable/disable these bits of the
6851 * 5705/5750, just say success.
6853 return 0;
6855 default:
6856 break;
6860 val = tr32(ofs);
6861 val &= ~enable_bit;
6862 tw32_f(ofs, val);
6864 for (i = 0; i < MAX_WAIT_CNT; i++) {
6865 udelay(100);
6866 val = tr32(ofs);
6867 if ((val & enable_bit) == 0)
6868 break;
6871 if (i == MAX_WAIT_CNT && !silent) {
6872 dev_err(&tp->pdev->dev,
6873 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
6874 ofs, enable_bit);
6875 return -ENODEV;
6878 return 0;
6881 /* tp->lock is held. */
6882 static int tg3_abort_hw(struct tg3 *tp, int silent)
6884 int i, err;
6886 tg3_disable_ints(tp);
6888 tp->rx_mode &= ~RX_MODE_ENABLE;
6889 tw32_f(MAC_RX_MODE, tp->rx_mode);
6890 udelay(10);
6892 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
6893 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
6894 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
6895 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
6896 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
6897 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
6899 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
6900 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
6901 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
6902 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
6903 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
6904 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
6905 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
6907 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
6908 tw32_f(MAC_MODE, tp->mac_mode);
6909 udelay(40);
6911 tp->tx_mode &= ~TX_MODE_ENABLE;
6912 tw32_f(MAC_TX_MODE, tp->tx_mode);
6914 for (i = 0; i < MAX_WAIT_CNT; i++) {
6915 udelay(100);
6916 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
6917 break;
6919 if (i >= MAX_WAIT_CNT) {
6920 dev_err(&tp->pdev->dev,
6921 "%s timed out, TX_MODE_ENABLE will not clear "
6922 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
6923 err |= -ENODEV;
6926 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
6927 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
6928 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
6930 tw32(FTQ_RESET, 0xffffffff);
6931 tw32(FTQ_RESET, 0x00000000);
6933 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
6934 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
6936 for (i = 0; i < tp->irq_cnt; i++) {
6937 struct tg3_napi *tnapi = &tp->napi[i];
6938 if (tnapi->hw_status)
6939 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6941 if (tp->hw_stats)
6942 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6944 return err;
6947 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
6949 int i;
6950 u32 apedata;
6952 /* NCSI does not support APE events */
6953 if (tg3_flag(tp, APE_HAS_NCSI))
6954 return;
6956 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
6957 if (apedata != APE_SEG_SIG_MAGIC)
6958 return;
6960 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
6961 if (!(apedata & APE_FW_STATUS_READY))
6962 return;
6964 /* Wait for up to 1 millisecond for APE to service previous event. */
6965 for (i = 0; i < 10; i++) {
6966 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
6967 return;
6969 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
6971 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6972 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
6973 event | APE_EVENT_STATUS_EVENT_PENDING);
6975 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
6977 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6978 break;
6980 udelay(100);
6983 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6984 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
6987 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
6989 u32 event;
6990 u32 apedata;
6992 if (!tg3_flag(tp, ENABLE_APE))
6993 return;
6995 switch (kind) {
6996 case RESET_KIND_INIT:
6997 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
6998 APE_HOST_SEG_SIG_MAGIC);
6999 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
7000 APE_HOST_SEG_LEN_MAGIC);
7001 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
7002 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
7003 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
7004 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
7005 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
7006 APE_HOST_BEHAV_NO_PHYLOCK);
7007 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
7008 TG3_APE_HOST_DRVR_STATE_START);
7010 event = APE_EVENT_STATUS_STATE_START;
7011 break;
7012 case RESET_KIND_SHUTDOWN:
7013 /* With the interface we are currently using,
7014 * APE does not track driver state. Wiping
7015 * out the HOST SEGMENT SIGNATURE forces
7016 * the APE to assume OS absent status.
7018 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
7020 if (device_may_wakeup(&tp->pdev->dev) &&
7021 tg3_flag(tp, WOL_ENABLE)) {
7022 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
7023 TG3_APE_HOST_WOL_SPEED_AUTO);
7024 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
7025 } else
7026 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
7028 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
7030 event = APE_EVENT_STATUS_STATE_UNLOAD;
7031 break;
7032 case RESET_KIND_SUSPEND:
7033 event = APE_EVENT_STATUS_STATE_SUSPEND;
7034 break;
7035 default:
7036 return;
7039 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
7041 tg3_ape_send_event(tp, event);
7044 /* tp->lock is held. */
7045 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
7047 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
7048 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
7050 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
7051 switch (kind) {
7052 case RESET_KIND_INIT:
7053 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7054 DRV_STATE_START);
7055 break;
7057 case RESET_KIND_SHUTDOWN:
7058 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7059 DRV_STATE_UNLOAD);
7060 break;
7062 case RESET_KIND_SUSPEND:
7063 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7064 DRV_STATE_SUSPEND);
7065 break;
7067 default:
7068 break;
7072 if (kind == RESET_KIND_INIT ||
7073 kind == RESET_KIND_SUSPEND)
7074 tg3_ape_driver_state_change(tp, kind);
7077 /* tp->lock is held. */
7078 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
7080 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
7081 switch (kind) {
7082 case RESET_KIND_INIT:
7083 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7084 DRV_STATE_START_DONE);
7085 break;
7087 case RESET_KIND_SHUTDOWN:
7088 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7089 DRV_STATE_UNLOAD_DONE);
7090 break;
7092 default:
7093 break;
7097 if (kind == RESET_KIND_SHUTDOWN)
7098 tg3_ape_driver_state_change(tp, kind);
7101 /* tp->lock is held. */
7102 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
7104 if (tg3_flag(tp, ENABLE_ASF)) {
7105 switch (kind) {
7106 case RESET_KIND_INIT:
7107 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7108 DRV_STATE_START);
7109 break;
7111 case RESET_KIND_SHUTDOWN:
7112 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7113 DRV_STATE_UNLOAD);
7114 break;
7116 case RESET_KIND_SUSPEND:
7117 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7118 DRV_STATE_SUSPEND);
7119 break;
7121 default:
7122 break;
7127 static int tg3_poll_fw(struct tg3 *tp)
7129 int i;
7130 u32 val;
7132 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7133 /* Wait up to 20ms for init done. */
7134 for (i = 0; i < 200; i++) {
7135 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
7136 return 0;
7137 udelay(100);
7139 return -ENODEV;
7142 /* Wait for firmware initialization to complete. */
7143 for (i = 0; i < 100000; i++) {
7144 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
7145 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
7146 break;
7147 udelay(10);
7150 /* Chip might not be fitted with firmware. Some Sun onboard
7151 * parts are configured like that. So don't signal the timeout
7152 * of the above loop as an error, but do report the lack of
7153 * running firmware once.
7155 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
7156 tg3_flag_set(tp, NO_FWARE_REPORTED);
7158 netdev_info(tp->dev, "No firmware running\n");
7161 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
7162 /* The 57765 A0 needs a little more
7163 * time to do some important work.
7165 mdelay(10);
7168 return 0;
7171 /* Save PCI command register before chip reset */
7172 static void tg3_save_pci_state(struct tg3 *tp)
7174 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7177 /* Restore PCI state after chip reset */
7178 static void tg3_restore_pci_state(struct tg3 *tp)
7180 u32 val;
7182 /* Re-enable indirect register accesses. */
7183 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7184 tp->misc_host_ctrl);
7186 /* Set MAX PCI retry to zero. */
7187 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7188 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7189 tg3_flag(tp, PCIX_MODE))
7190 val |= PCISTATE_RETRY_SAME_DMA;
7191 /* Allow reads and writes to the APE register and memory space. */
7192 if (tg3_flag(tp, ENABLE_APE))
7193 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7194 PCISTATE_ALLOW_APE_SHMEM_WR |
7195 PCISTATE_ALLOW_APE_PSPACE_WR;
7196 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7198 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7200 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
7201 if (tg3_flag(tp, PCI_EXPRESS))
7202 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7203 else {
7204 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7205 tp->pci_cacheline_sz);
7206 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7207 tp->pci_lat_timer);
7211 /* Make sure PCI-X relaxed ordering bit is clear. */
7212 if (tg3_flag(tp, PCIX_MODE)) {
7213 u16 pcix_cmd;
7215 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7216 &pcix_cmd);
7217 pcix_cmd &= ~PCI_X_CMD_ERO;
7218 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7219 pcix_cmd);
7222 if (tg3_flag(tp, 5780_CLASS)) {
7224 /* Chip reset on 5780 will reset MSI enable bit,
7225 * so need to restore it.
7227 if (tg3_flag(tp, USING_MSI)) {
7228 u16 ctrl;
7230 pci_read_config_word(tp->pdev,
7231 tp->msi_cap + PCI_MSI_FLAGS,
7232 &ctrl);
7233 pci_write_config_word(tp->pdev,
7234 tp->msi_cap + PCI_MSI_FLAGS,
7235 ctrl | PCI_MSI_FLAGS_ENABLE);
7236 val = tr32(MSGINT_MODE);
7237 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7242 static void tg3_stop_fw(struct tg3 *);
7244 /* tp->lock is held. */
7245 static int tg3_chip_reset(struct tg3 *tp)
7247 u32 val;
7248 void (*write_op)(struct tg3 *, u32, u32);
7249 int i, err;
7251 tg3_nvram_lock(tp);
7253 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7255 /* No matching tg3_nvram_unlock() after this because
7256 * chip reset below will undo the nvram lock.
7258 tp->nvram_lock_cnt = 0;
7260 /* GRC_MISC_CFG core clock reset will clear the memory
7261 * enable bit in PCI register 4 and the MSI enable bit
7262 * on some chips, so we save relevant registers here.
7264 tg3_save_pci_state(tp);
7266 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7267 tg3_flag(tp, 5755_PLUS))
7268 tw32(GRC_FASTBOOT_PC, 0);
7271 * We must avoid the readl() that normally takes place.
7272 * It locks machines, causes machine checks, and other
7273 * fun things. So, temporarily disable the 5701
7274 * hardware workaround, while we do the reset.
7276 write_op = tp->write32;
7277 if (write_op == tg3_write_flush_reg32)
7278 tp->write32 = tg3_write32;
7280 /* Prevent the irq handler from reading or writing PCI registers
7281 * during chip reset when the memory enable bit in the PCI command
7282 * register may be cleared. The chip does not generate interrupt
7283 * at this time, but the irq handler may still be called due to irq
7284 * sharing or irqpoll.
7286 tg3_flag_set(tp, CHIP_RESETTING);
7287 for (i = 0; i < tp->irq_cnt; i++) {
7288 struct tg3_napi *tnapi = &tp->napi[i];
7289 if (tnapi->hw_status) {
7290 tnapi->hw_status->status = 0;
7291 tnapi->hw_status->status_tag = 0;
7293 tnapi->last_tag = 0;
7294 tnapi->last_irq_tag = 0;
7296 smp_mb();
7298 for (i = 0; i < tp->irq_cnt; i++)
7299 synchronize_irq(tp->napi[i].irq_vec);
7301 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7302 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7303 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7306 /* do the reset */
7307 val = GRC_MISC_CFG_CORECLK_RESET;
7309 if (tg3_flag(tp, PCI_EXPRESS)) {
7310 /* Force PCIe 1.0a mode */
7311 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7312 !tg3_flag(tp, 57765_PLUS) &&
7313 tr32(TG3_PCIE_PHY_TSTCTL) ==
7314 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7315 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7317 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7318 tw32(GRC_MISC_CFG, (1 << 29));
7319 val |= (1 << 29);
7323 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7324 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7325 tw32(GRC_VCPU_EXT_CTRL,
7326 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7329 /* Manage gphy power for all CPMU absent PCIe devices. */
7330 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7331 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7333 tw32(GRC_MISC_CFG, val);
7335 /* restore 5701 hardware bug workaround write method */
7336 tp->write32 = write_op;
7338 /* Unfortunately, we have to delay before the PCI read back.
7339 * Some 575X chips even will not respond to a PCI cfg access
7340 * when the reset command is given to the chip.
7342 * How do these hardware designers expect things to work
7343 * properly if the PCI write is posted for a long period
7344 * of time? It is always necessary to have some method by
7345 * which a register read back can occur to push the write
7346 * out which does the reset.
7348 * For most tg3 variants the trick below was working.
7349 * Ho hum...
7351 udelay(120);
7353 /* Flush PCI posted writes. The normal MMIO registers
7354 * are inaccessible at this time so this is the only
7355 * way to make this reliably (actually, this is no longer
7356 * the case, see above). I tried to use indirect
7357 * register read/write but this upset some 5701 variants.
7359 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7361 udelay(120);
7363 if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
7364 u16 val16;
7366 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7367 int i;
7368 u32 cfg_val;
7370 /* Wait for link training to complete. */
7371 for (i = 0; i < 5000; i++)
7372 udelay(100);
7374 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7375 pci_write_config_dword(tp->pdev, 0xc4,
7376 cfg_val | (1 << 15));
7379 /* Clear the "no snoop" and "relaxed ordering" bits. */
7380 pci_read_config_word(tp->pdev,
7381 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7382 &val16);
7383 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7384 PCI_EXP_DEVCTL_NOSNOOP_EN);
7386 * Older PCIe devices only support the 128 byte
7387 * MPS setting. Enforce the restriction.
7389 if (!tg3_flag(tp, CPMU_PRESENT))
7390 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7391 pci_write_config_word(tp->pdev,
7392 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7393 val16);
7395 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7397 /* Clear error status */
7398 pci_write_config_word(tp->pdev,
7399 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
7400 PCI_EXP_DEVSTA_CED |
7401 PCI_EXP_DEVSTA_NFED |
7402 PCI_EXP_DEVSTA_FED |
7403 PCI_EXP_DEVSTA_URD);
7406 tg3_restore_pci_state(tp);
7408 tg3_flag_clear(tp, CHIP_RESETTING);
7409 tg3_flag_clear(tp, ERROR_PROCESSED);
7411 val = 0;
7412 if (tg3_flag(tp, 5780_CLASS))
7413 val = tr32(MEMARB_MODE);
7414 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7416 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7417 tg3_stop_fw(tp);
7418 tw32(0x5000, 0x400);
7421 tw32(GRC_MODE, tp->grc_mode);
7423 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7424 val = tr32(0xc4);
7426 tw32(0xc4, val | (1 << 15));
7429 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7430 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7431 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7432 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7433 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7434 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7437 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7438 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
7439 val = tp->mac_mode;
7440 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7441 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
7442 val = tp->mac_mode;
7443 } else
7444 val = 0;
7446 tw32_f(MAC_MODE, val);
7447 udelay(40);
7449 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7451 err = tg3_poll_fw(tp);
7452 if (err)
7453 return err;
7455 tg3_mdio_start(tp);
7457 if (tg3_flag(tp, PCI_EXPRESS) &&
7458 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7459 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7460 !tg3_flag(tp, 57765_PLUS)) {
7461 val = tr32(0x7c00);
7463 tw32(0x7c00, val | (1 << 25));
7466 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7467 val = tr32(TG3_CPMU_CLCK_ORIDE);
7468 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7471 /* Reprobe ASF enable state. */
7472 tg3_flag_clear(tp, ENABLE_ASF);
7473 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
7474 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7475 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7476 u32 nic_cfg;
7478 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7479 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7480 tg3_flag_set(tp, ENABLE_ASF);
7481 tp->last_event_jiffies = jiffies;
7482 if (tg3_flag(tp, 5750_PLUS))
7483 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
7487 return 0;
7490 /* tp->lock is held. */
7491 static void tg3_stop_fw(struct tg3 *tp)
7493 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
7494 /* Wait for RX cpu to ACK the previous event. */
7495 tg3_wait_for_event_ack(tp);
7497 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
7499 tg3_generate_fw_event(tp);
7501 /* Wait for RX cpu to ACK this event. */
7502 tg3_wait_for_event_ack(tp);
7506 /* tp->lock is held. */
7507 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7509 int err;
7511 tg3_stop_fw(tp);
7513 tg3_write_sig_pre_reset(tp, kind);
7515 tg3_abort_hw(tp, silent);
7516 err = tg3_chip_reset(tp);
7518 __tg3_set_mac_addr(tp, 0);
7520 tg3_write_sig_legacy(tp, kind);
7521 tg3_write_sig_post_reset(tp, kind);
7523 if (err)
7524 return err;
7526 return 0;
7529 #define RX_CPU_SCRATCH_BASE 0x30000
7530 #define RX_CPU_SCRATCH_SIZE 0x04000
7531 #define TX_CPU_SCRATCH_BASE 0x34000
7532 #define TX_CPU_SCRATCH_SIZE 0x04000
7534 /* tp->lock is held. */
7535 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
7537 int i;
7539 BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
7541 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7542 u32 val = tr32(GRC_VCPU_EXT_CTRL);
7544 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
7545 return 0;
7547 if (offset == RX_CPU_BASE) {
7548 for (i = 0; i < 10000; i++) {
7549 tw32(offset + CPU_STATE, 0xffffffff);
7550 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7551 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7552 break;
7555 tw32(offset + CPU_STATE, 0xffffffff);
7556 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
7557 udelay(10);
7558 } else {
7559 for (i = 0; i < 10000; i++) {
7560 tw32(offset + CPU_STATE, 0xffffffff);
7561 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7562 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7563 break;
7567 if (i >= 10000) {
7568 netdev_err(tp->dev, "%s timed out, %s CPU\n",
7569 __func__, offset == RX_CPU_BASE ? "RX" : "TX");
7570 return -ENODEV;
7573 /* Clear firmware's nvram arbitration. */
7574 if (tg3_flag(tp, NVRAM))
7575 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
7576 return 0;
7579 struct fw_info {
7580 unsigned int fw_base;
7581 unsigned int fw_len;
7582 const __be32 *fw_data;
7585 /* tp->lock is held. */
7586 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
7587 int cpu_scratch_size, struct fw_info *info)
7589 int err, lock_err, i;
7590 void (*write_op)(struct tg3 *, u32, u32);
7592 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
7593 netdev_err(tp->dev,
7594 "%s: Trying to load TX cpu firmware which is 5705\n",
7595 __func__);
7596 return -EINVAL;
7599 if (tg3_flag(tp, 5705_PLUS))
7600 write_op = tg3_write_mem;
7601 else
7602 write_op = tg3_write_indirect_reg32;
7604 /* It is possible that bootcode is still loading at this point.
7605 * Get the nvram lock first before halting the cpu.
7607 lock_err = tg3_nvram_lock(tp);
7608 err = tg3_halt_cpu(tp, cpu_base);
7609 if (!lock_err)
7610 tg3_nvram_unlock(tp);
7611 if (err)
7612 goto out;
7614 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
7615 write_op(tp, cpu_scratch_base + i, 0);
7616 tw32(cpu_base + CPU_STATE, 0xffffffff);
7617 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
7618 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
7619 write_op(tp, (cpu_scratch_base +
7620 (info->fw_base & 0xffff) +
7621 (i * sizeof(u32))),
7622 be32_to_cpu(info->fw_data[i]));
7624 err = 0;
7626 out:
7627 return err;
7630 /* tp->lock is held. */
7631 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
7633 struct fw_info info;
7634 const __be32 *fw_data;
7635 int err, i;
7637 fw_data = (void *)tp->fw->data;
7639 /* Firmware blob starts with version numbers, followed by
7640 start address and length. We are setting complete length.
7641 length = end_address_of_bss - start_address_of_text.
7642 Remainder is the blob to be loaded contiguously
7643 from start address. */
7645 info.fw_base = be32_to_cpu(fw_data[1]);
7646 info.fw_len = tp->fw->size - 12;
7647 info.fw_data = &fw_data[3];
7649 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
7650 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
7651 &info);
7652 if (err)
7653 return err;
7655 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
7656 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
7657 &info);
7658 if (err)
7659 return err;
7661 /* Now startup only the RX cpu. */
7662 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7663 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7665 for (i = 0; i < 5; i++) {
7666 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
7667 break;
7668 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7669 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
7670 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7671 udelay(1000);
7673 if (i >= 5) {
7674 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
7675 "should be %08x\n", __func__,
7676 tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
7677 return -ENODEV;
7679 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7680 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
7682 return 0;
7685 /* tp->lock is held. */
7686 static int tg3_load_tso_firmware(struct tg3 *tp)
7688 struct fw_info info;
7689 const __be32 *fw_data;
7690 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
7691 int err, i;
7693 if (tg3_flag(tp, HW_TSO_1) ||
7694 tg3_flag(tp, HW_TSO_2) ||
7695 tg3_flag(tp, HW_TSO_3))
7696 return 0;
7698 fw_data = (void *)tp->fw->data;
7700 /* Firmware blob starts with version numbers, followed by
7701 start address and length. We are setting complete length.
7702 length = end_address_of_bss - start_address_of_text.
7703 Remainder is the blob to be loaded contiguously
7704 from start address. */
7706 info.fw_base = be32_to_cpu(fw_data[1]);
7707 cpu_scratch_size = tp->fw_len;
7708 info.fw_len = tp->fw->size - 12;
7709 info.fw_data = &fw_data[3];
7711 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7712 cpu_base = RX_CPU_BASE;
7713 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
7714 } else {
7715 cpu_base = TX_CPU_BASE;
7716 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
7717 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
7720 err = tg3_load_firmware_cpu(tp, cpu_base,
7721 cpu_scratch_base, cpu_scratch_size,
7722 &info);
7723 if (err)
7724 return err;
7726 /* Now startup the cpu. */
7727 tw32(cpu_base + CPU_STATE, 0xffffffff);
7728 tw32_f(cpu_base + CPU_PC, info.fw_base);
7730 for (i = 0; i < 5; i++) {
7731 if (tr32(cpu_base + CPU_PC) == info.fw_base)
7732 break;
7733 tw32(cpu_base + CPU_STATE, 0xffffffff);
7734 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
7735 tw32_f(cpu_base + CPU_PC, info.fw_base);
7736 udelay(1000);
7738 if (i >= 5) {
7739 netdev_err(tp->dev,
7740 "%s fails to set CPU PC, is %08x should be %08x\n",
7741 __func__, tr32(cpu_base + CPU_PC), info.fw_base);
7742 return -ENODEV;
7744 tw32(cpu_base + CPU_STATE, 0xffffffff);
7745 tw32_f(cpu_base + CPU_MODE, 0x00000000);
7746 return 0;
7750 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7752 struct tg3 *tp = netdev_priv(dev);
7753 struct sockaddr *addr = p;
7754 int err = 0, skip_mac_1 = 0;
7756 if (!is_valid_ether_addr(addr->sa_data))
7757 return -EINVAL;
7759 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7761 if (!netif_running(dev))
7762 return 0;
7764 if (tg3_flag(tp, ENABLE_ASF)) {
7765 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7767 addr0_high = tr32(MAC_ADDR_0_HIGH);
7768 addr0_low = tr32(MAC_ADDR_0_LOW);
7769 addr1_high = tr32(MAC_ADDR_1_HIGH);
7770 addr1_low = tr32(MAC_ADDR_1_LOW);
7772 /* Skip MAC addr 1 if ASF is using it. */
7773 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7774 !(addr1_high == 0 && addr1_low == 0))
7775 skip_mac_1 = 1;
7777 spin_lock_bh(&tp->lock);
7778 __tg3_set_mac_addr(tp, skip_mac_1);
7779 spin_unlock_bh(&tp->lock);
7781 return err;
7784 /* tp->lock is held. */
7785 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7786 dma_addr_t mapping, u32 maxlen_flags,
7787 u32 nic_addr)
7789 tg3_write_mem(tp,
7790 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7791 ((u64) mapping >> 32));
7792 tg3_write_mem(tp,
7793 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7794 ((u64) mapping & 0xffffffff));
7795 tg3_write_mem(tp,
7796 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7797 maxlen_flags);
7799 if (!tg3_flag(tp, 5705_PLUS))
7800 tg3_write_mem(tp,
7801 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7802 nic_addr);
7805 static void __tg3_set_rx_mode(struct net_device *);
7806 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7808 int i;
7810 if (!tg3_flag(tp, ENABLE_TSS)) {
7811 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7812 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7813 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7814 } else {
7815 tw32(HOSTCC_TXCOL_TICKS, 0);
7816 tw32(HOSTCC_TXMAX_FRAMES, 0);
7817 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
7820 if (!tg3_flag(tp, ENABLE_RSS)) {
7821 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7822 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7823 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7824 } else {
7825 tw32(HOSTCC_RXCOL_TICKS, 0);
7826 tw32(HOSTCC_RXMAX_FRAMES, 0);
7827 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
7830 if (!tg3_flag(tp, 5705_PLUS)) {
7831 u32 val = ec->stats_block_coalesce_usecs;
7833 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7834 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7836 if (!netif_carrier_ok(tp->dev))
7837 val = 0;
7839 tw32(HOSTCC_STAT_COAL_TICKS, val);
7842 for (i = 0; i < tp->irq_cnt - 1; i++) {
7843 u32 reg;
7845 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
7846 tw32(reg, ec->rx_coalesce_usecs);
7847 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
7848 tw32(reg, ec->rx_max_coalesced_frames);
7849 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
7850 tw32(reg, ec->rx_max_coalesced_frames_irq);
7852 if (tg3_flag(tp, ENABLE_TSS)) {
7853 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
7854 tw32(reg, ec->tx_coalesce_usecs);
7855 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
7856 tw32(reg, ec->tx_max_coalesced_frames);
7857 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
7858 tw32(reg, ec->tx_max_coalesced_frames_irq);
7862 for (; i < tp->irq_max - 1; i++) {
7863 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
7864 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
7865 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7867 if (tg3_flag(tp, ENABLE_TSS)) {
7868 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
7869 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
7870 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7875 /* tp->lock is held. */
7876 static void tg3_rings_reset(struct tg3 *tp)
7878 int i;
7879 u32 stblk, txrcb, rxrcb, limit;
7880 struct tg3_napi *tnapi = &tp->napi[0];
7882 /* Disable all transmit rings but the first. */
7883 if (!tg3_flag(tp, 5705_PLUS))
7884 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
7885 else if (tg3_flag(tp, 5717_PLUS))
7886 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
7887 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7888 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
7889 else
7890 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7892 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7893 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
7894 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
7895 BDINFO_FLAGS_DISABLED);
7898 /* Disable all receive return rings but the first. */
7899 if (tg3_flag(tp, 5717_PLUS))
7900 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
7901 else if (!tg3_flag(tp, 5705_PLUS))
7902 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
7903 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7904 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7905 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
7906 else
7907 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7909 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7910 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
7911 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
7912 BDINFO_FLAGS_DISABLED);
7914 /* Disable interrupts */
7915 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
7916 tp->napi[0].chk_msi_cnt = 0;
7917 tp->napi[0].last_rx_cons = 0;
7918 tp->napi[0].last_tx_cons = 0;
7920 /* Zero mailbox registers. */
7921 if (tg3_flag(tp, SUPPORT_MSIX)) {
7922 for (i = 1; i < tp->irq_max; i++) {
7923 tp->napi[i].tx_prod = 0;
7924 tp->napi[i].tx_cons = 0;
7925 if (tg3_flag(tp, ENABLE_TSS))
7926 tw32_mailbox(tp->napi[i].prodmbox, 0);
7927 tw32_rx_mbox(tp->napi[i].consmbox, 0);
7928 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
7929 tp->napi[0].chk_msi_cnt = 0;
7930 tp->napi[i].last_rx_cons = 0;
7931 tp->napi[i].last_tx_cons = 0;
7933 if (!tg3_flag(tp, ENABLE_TSS))
7934 tw32_mailbox(tp->napi[0].prodmbox, 0);
7935 } else {
7936 tp->napi[0].tx_prod = 0;
7937 tp->napi[0].tx_cons = 0;
7938 tw32_mailbox(tp->napi[0].prodmbox, 0);
7939 tw32_rx_mbox(tp->napi[0].consmbox, 0);
7942 /* Make sure the NIC-based send BD rings are disabled. */
7943 if (!tg3_flag(tp, 5705_PLUS)) {
7944 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
7945 for (i = 0; i < 16; i++)
7946 tw32_tx_mbox(mbox + i * 8, 0);
7949 txrcb = NIC_SRAM_SEND_RCB;
7950 rxrcb = NIC_SRAM_RCV_RET_RCB;
7952 /* Clear status block in ram. */
7953 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7955 /* Set status block DMA address */
7956 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7957 ((u64) tnapi->status_mapping >> 32));
7958 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7959 ((u64) tnapi->status_mapping & 0xffffffff));
7961 if (tnapi->tx_ring) {
7962 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7963 (TG3_TX_RING_SIZE <<
7964 BDINFO_FLAGS_MAXLEN_SHIFT),
7965 NIC_SRAM_TX_BUFFER_DESC);
7966 txrcb += TG3_BDINFO_SIZE;
7969 if (tnapi->rx_rcb) {
7970 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7971 (tp->rx_ret_ring_mask + 1) <<
7972 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
7973 rxrcb += TG3_BDINFO_SIZE;
7976 stblk = HOSTCC_STATBLCK_RING1;
7978 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
7979 u64 mapping = (u64)tnapi->status_mapping;
7980 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
7981 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
7983 /* Clear status block in ram. */
7984 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7986 if (tnapi->tx_ring) {
7987 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7988 (TG3_TX_RING_SIZE <<
7989 BDINFO_FLAGS_MAXLEN_SHIFT),
7990 NIC_SRAM_TX_BUFFER_DESC);
7991 txrcb += TG3_BDINFO_SIZE;
7994 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7995 ((tp->rx_ret_ring_mask + 1) <<
7996 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
7998 stblk += 8;
7999 rxrcb += TG3_BDINFO_SIZE;
8003 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8005 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8007 if (!tg3_flag(tp, 5750_PLUS) ||
8008 tg3_flag(tp, 5780_CLASS) ||
8009 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8010 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8011 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8012 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8013 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8014 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8015 else
8016 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8018 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8019 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8021 val = min(nic_rep_thresh, host_rep_thresh);
8022 tw32(RCVBDI_STD_THRESH, val);
8024 if (tg3_flag(tp, 57765_PLUS))
8025 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8027 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8028 return;
8030 if (!tg3_flag(tp, 5705_PLUS))
8031 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8032 else
8033 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717;
8035 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8037 val = min(bdcache_maxcnt / 2, host_rep_thresh);
8038 tw32(RCVBDI_JUMBO_THRESH, val);
8040 if (tg3_flag(tp, 57765_PLUS))
8041 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8044 /* tp->lock is held. */
8045 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8047 u32 val, rdmac_mode;
8048 int i, err, limit;
8049 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8051 tg3_disable_ints(tp);
8053 tg3_stop_fw(tp);
8055 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8057 if (tg3_flag(tp, INIT_COMPLETE))
8058 tg3_abort_hw(tp, 1);
8060 /* Enable MAC control of LPI */
8061 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8062 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8063 TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8064 TG3_CPMU_EEE_LNKIDL_UART_IDL);
8066 tw32_f(TG3_CPMU_EEE_CTRL,
8067 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8069 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8070 TG3_CPMU_EEEMD_LPI_IN_TX |
8071 TG3_CPMU_EEEMD_LPI_IN_RX |
8072 TG3_CPMU_EEEMD_EEE_ENABLE;
8074 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8075 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8077 if (tg3_flag(tp, ENABLE_APE))
8078 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8080 tw32_f(TG3_CPMU_EEE_MODE, val);
8082 tw32_f(TG3_CPMU_EEE_DBTMR1,
8083 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8084 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8086 tw32_f(TG3_CPMU_EEE_DBTMR2,
8087 TG3_CPMU_DBTMR2_APE_TX_2047US |
8088 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8091 if (reset_phy)
8092 tg3_phy_reset(tp);
8094 err = tg3_chip_reset(tp);
8095 if (err)
8096 return err;
8098 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8100 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8101 val = tr32(TG3_CPMU_CTRL);
8102 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8103 tw32(TG3_CPMU_CTRL, val);
8105 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8106 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8107 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8108 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8110 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8111 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8112 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8113 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8115 val = tr32(TG3_CPMU_HST_ACC);
8116 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8117 val |= CPMU_HST_ACC_MACCLK_6_25;
8118 tw32(TG3_CPMU_HST_ACC, val);
8121 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8122 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8123 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8124 PCIE_PWR_MGMT_L1_THRESH_4MS;
8125 tw32(PCIE_PWR_MGMT_THRESH, val);
8127 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8128 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8130 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8132 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8133 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8136 if (tg3_flag(tp, L1PLLPD_EN)) {
8137 u32 grc_mode = tr32(GRC_MODE);
8139 /* Access the lower 1K of PL PCIE block registers. */
8140 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8141 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8143 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8144 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8145 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8147 tw32(GRC_MODE, grc_mode);
8150 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
8151 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8152 u32 grc_mode = tr32(GRC_MODE);
8154 /* Access the lower 1K of PL PCIE block registers. */
8155 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8156 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8158 val = tr32(TG3_PCIE_TLDLPL_PORT +
8159 TG3_PCIE_PL_LO_PHYCTL5);
8160 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8161 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8163 tw32(GRC_MODE, grc_mode);
8166 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8167 u32 grc_mode = tr32(GRC_MODE);
8169 /* Access the lower 1K of DL PCIE block registers. */
8170 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8171 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8173 val = tr32(TG3_PCIE_TLDLPL_PORT +
8174 TG3_PCIE_DL_LO_FTSMAX);
8175 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8176 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8177 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8179 tw32(GRC_MODE, grc_mode);
8182 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8183 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8184 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8185 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8188 /* This works around an issue with Athlon chipsets on
8189 * B3 tigon3 silicon. This bit has no effect on any
8190 * other revision. But do not set this on PCI Express
8191 * chips and don't even touch the clocks if the CPMU is present.
8193 if (!tg3_flag(tp, CPMU_PRESENT)) {
8194 if (!tg3_flag(tp, PCI_EXPRESS))
8195 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8196 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8199 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8200 tg3_flag(tp, PCIX_MODE)) {
8201 val = tr32(TG3PCI_PCISTATE);
8202 val |= PCISTATE_RETRY_SAME_DMA;
8203 tw32(TG3PCI_PCISTATE, val);
8206 if (tg3_flag(tp, ENABLE_APE)) {
8207 /* Allow reads and writes to the
8208 * APE register and memory space.
8210 val = tr32(TG3PCI_PCISTATE);
8211 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8212 PCISTATE_ALLOW_APE_SHMEM_WR |
8213 PCISTATE_ALLOW_APE_PSPACE_WR;
8214 tw32(TG3PCI_PCISTATE, val);
8217 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8218 /* Enable some hw fixes. */
8219 val = tr32(TG3PCI_MSI_DATA);
8220 val |= (1 << 26) | (1 << 28) | (1 << 29);
8221 tw32(TG3PCI_MSI_DATA, val);
8224 /* Descriptor ring init may make accesses to the
8225 * NIC SRAM area to setup the TX descriptors, so we
8226 * can only do this after the hardware has been
8227 * successfully reset.
8229 err = tg3_init_rings(tp);
8230 if (err)
8231 return err;
8233 if (tg3_flag(tp, 57765_PLUS)) {
8234 val = tr32(TG3PCI_DMA_RW_CTRL) &
8235 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8236 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8237 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8238 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
8239 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8240 val |= DMA_RWCTRL_TAGGED_STAT_WA;
8241 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8242 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8243 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8244 /* This value is determined during the probe time DMA
8245 * engine test, tg3_test_dma.
8247 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8250 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8251 GRC_MODE_4X_NIC_SEND_RINGS |
8252 GRC_MODE_NO_TX_PHDR_CSUM |
8253 GRC_MODE_NO_RX_PHDR_CSUM);
8254 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8256 /* Pseudo-header checksum is done by hardware logic and not
8257 * the offload processers, so make the chip do the pseudo-
8258 * header checksums on receive. For transmit it is more
8259 * convenient to do the pseudo-header checksum in software
8260 * as Linux does that on transmit for us in all cases.
8262 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8264 tw32(GRC_MODE,
8265 tp->grc_mode |
8266 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8268 /* Setup the timer prescalar register. Clock is always 66Mhz. */
8269 val = tr32(GRC_MISC_CFG);
8270 val &= ~0xff;
8271 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8272 tw32(GRC_MISC_CFG, val);
8274 /* Initialize MBUF/DESC pool. */
8275 if (tg3_flag(tp, 5750_PLUS)) {
8276 /* Do nothing. */
8277 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8278 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8279 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8280 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8281 else
8282 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8283 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8284 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8285 } else if (tg3_flag(tp, TSO_CAPABLE)) {
8286 int fw_len;
8288 fw_len = tp->fw_len;
8289 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8290 tw32(BUFMGR_MB_POOL_ADDR,
8291 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8292 tw32(BUFMGR_MB_POOL_SIZE,
8293 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8296 if (tp->dev->mtu <= ETH_DATA_LEN) {
8297 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8298 tp->bufmgr_config.mbuf_read_dma_low_water);
8299 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8300 tp->bufmgr_config.mbuf_mac_rx_low_water);
8301 tw32(BUFMGR_MB_HIGH_WATER,
8302 tp->bufmgr_config.mbuf_high_water);
8303 } else {
8304 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8305 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8306 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8307 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8308 tw32(BUFMGR_MB_HIGH_WATER,
8309 tp->bufmgr_config.mbuf_high_water_jumbo);
8311 tw32(BUFMGR_DMA_LOW_WATER,
8312 tp->bufmgr_config.dma_low_water);
8313 tw32(BUFMGR_DMA_HIGH_WATER,
8314 tp->bufmgr_config.dma_high_water);
8316 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8317 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8318 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8319 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8320 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8321 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8322 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8323 tw32(BUFMGR_MODE, val);
8324 for (i = 0; i < 2000; i++) {
8325 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8326 break;
8327 udelay(10);
8329 if (i >= 2000) {
8330 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8331 return -ENODEV;
8334 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8335 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8337 tg3_setup_rxbd_thresholds(tp);
8339 /* Initialize TG3_BDINFO's at:
8340 * RCVDBDI_STD_BD: standard eth size rx ring
8341 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
8342 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
8344 * like so:
8345 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
8346 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
8347 * ring attribute flags
8348 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
8350 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8351 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8353 * The size of each ring is fixed in the firmware, but the location is
8354 * configurable.
8356 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8357 ((u64) tpr->rx_std_mapping >> 32));
8358 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8359 ((u64) tpr->rx_std_mapping & 0xffffffff));
8360 if (!tg3_flag(tp, 5717_PLUS))
8361 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8362 NIC_SRAM_RX_BUFFER_DESC);
8364 /* Disable the mini ring */
8365 if (!tg3_flag(tp, 5705_PLUS))
8366 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8367 BDINFO_FLAGS_DISABLED);
8369 /* Program the jumbo buffer descriptor ring control
8370 * blocks on those devices that have them.
8372 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8373 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8375 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8376 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8377 ((u64) tpr->rx_jmb_mapping >> 32));
8378 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8379 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8380 val = TG3_RX_JMB_RING_SIZE(tp) <<
8381 BDINFO_FLAGS_MAXLEN_SHIFT;
8382 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8383 val | BDINFO_FLAGS_USE_EXT_RECV);
8384 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8385 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8386 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8387 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8388 } else {
8389 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8390 BDINFO_FLAGS_DISABLED);
8393 if (tg3_flag(tp, 57765_PLUS)) {
8394 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8395 val = TG3_RX_STD_MAX_SIZE_5700;
8396 else
8397 val = TG3_RX_STD_MAX_SIZE_5717;
8398 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8399 val |= (TG3_RX_STD_DMA_SZ << 2);
8400 } else
8401 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8402 } else
8403 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8405 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8407 tpr->rx_std_prod_idx = tp->rx_pending;
8408 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8410 tpr->rx_jmb_prod_idx =
8411 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8412 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8414 tg3_rings_reset(tp);
8416 /* Initialize MAC address and backoff seed. */
8417 __tg3_set_mac_addr(tp, 0);
8419 /* MTU + ethernet header + FCS + optional VLAN tag */
8420 tw32(MAC_RX_MTU_SIZE,
8421 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8423 /* The slot time is changed by tg3_setup_phy if we
8424 * run at gigabit with half duplex.
8426 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8427 (6 << TX_LENGTHS_IPG_SHIFT) |
8428 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8430 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8431 val |= tr32(MAC_TX_LENGTHS) &
8432 (TX_LENGTHS_JMB_FRM_LEN_MSK |
8433 TX_LENGTHS_CNT_DWN_VAL_MSK);
8435 tw32(MAC_TX_LENGTHS, val);
8437 /* Receive rules. */
8438 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8439 tw32(RCVLPC_CONFIG, 0x0181);
8441 /* Calculate RDMAC_MODE setting early, we need it to determine
8442 * the RCVLPC_STATE_ENABLE mask.
8444 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8445 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8446 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8447 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8448 RDMAC_MODE_LNGREAD_ENAB);
8450 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8451 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8453 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8454 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8455 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8456 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8457 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8458 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8460 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8461 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8462 if (tg3_flag(tp, TSO_CAPABLE) &&
8463 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8464 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8465 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8466 !tg3_flag(tp, IS_5788)) {
8467 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8471 if (tg3_flag(tp, PCI_EXPRESS))
8472 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8474 if (tg3_flag(tp, HW_TSO_1) ||
8475 tg3_flag(tp, HW_TSO_2) ||
8476 tg3_flag(tp, HW_TSO_3))
8477 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8479 if (tg3_flag(tp, 57765_PLUS) ||
8480 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8481 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8482 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8484 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8485 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8487 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8488 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8489 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8490 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8491 tg3_flag(tp, 57765_PLUS)) {
8492 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8493 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8494 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8495 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8496 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8497 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8498 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8499 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8500 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8502 tw32(TG3_RDMA_RSRVCTRL_REG,
8503 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8506 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8507 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8508 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8509 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8510 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8511 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8514 /* Receive/send statistics. */
8515 if (tg3_flag(tp, 5750_PLUS)) {
8516 val = tr32(RCVLPC_STATS_ENABLE);
8517 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8518 tw32(RCVLPC_STATS_ENABLE, val);
8519 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8520 tg3_flag(tp, TSO_CAPABLE)) {
8521 val = tr32(RCVLPC_STATS_ENABLE);
8522 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8523 tw32(RCVLPC_STATS_ENABLE, val);
8524 } else {
8525 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8527 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8528 tw32(SNDDATAI_STATSENAB, 0xffffff);
8529 tw32(SNDDATAI_STATSCTRL,
8530 (SNDDATAI_SCTRL_ENABLE |
8531 SNDDATAI_SCTRL_FASTUPD));
8533 /* Setup host coalescing engine. */
8534 tw32(HOSTCC_MODE, 0);
8535 for (i = 0; i < 2000; i++) {
8536 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8537 break;
8538 udelay(10);
8541 __tg3_set_coalesce(tp, &tp->coal);
8543 if (!tg3_flag(tp, 5705_PLUS)) {
8544 /* Status/statistics block address. See tg3_timer,
8545 * the tg3_periodic_fetch_stats call there, and
8546 * tg3_get_stats to see how this works for 5705/5750 chips.
8548 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8549 ((u64) tp->stats_mapping >> 32));
8550 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8551 ((u64) tp->stats_mapping & 0xffffffff));
8552 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8554 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8556 /* Clear statistics and status block memory areas */
8557 for (i = NIC_SRAM_STATS_BLK;
8558 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8559 i += sizeof(u32)) {
8560 tg3_write_mem(tp, i, 0);
8561 udelay(40);
8565 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8567 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8568 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8569 if (!tg3_flag(tp, 5705_PLUS))
8570 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8572 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8573 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8574 /* reset to prevent losing 1st rx packet intermittently */
8575 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8576 udelay(10);
8579 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8580 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
8581 MAC_MODE_FHDE_ENABLE;
8582 if (tg3_flag(tp, ENABLE_APE))
8583 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8584 if (!tg3_flag(tp, 5705_PLUS) &&
8585 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8586 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8587 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8588 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8589 udelay(40);
8591 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8592 * If TG3_FLAG_IS_NIC is zero, we should read the
8593 * register to preserve the GPIO settings for LOMs. The GPIOs,
8594 * whether used as inputs or outputs, are set by boot code after
8595 * reset.
8597 if (!tg3_flag(tp, IS_NIC)) {
8598 u32 gpio_mask;
8600 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8601 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8602 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8604 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8605 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8606 GRC_LCLCTRL_GPIO_OUTPUT3;
8608 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8609 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8611 tp->grc_local_ctrl &= ~gpio_mask;
8612 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8614 /* GPIO1 must be driven high for eeprom write protect */
8615 if (tg3_flag(tp, EEPROM_WRITE_PROT))
8616 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8617 GRC_LCLCTRL_GPIO_OUTPUT1);
8619 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8620 udelay(100);
8622 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) {
8623 val = tr32(MSGINT_MODE);
8624 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8625 tw32(MSGINT_MODE, val);
8628 if (!tg3_flag(tp, 5705_PLUS)) {
8629 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8630 udelay(40);
8633 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8634 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8635 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8636 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8637 WDMAC_MODE_LNGREAD_ENAB);
8639 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8640 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8641 if (tg3_flag(tp, TSO_CAPABLE) &&
8642 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8643 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8644 /* nothing */
8645 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8646 !tg3_flag(tp, IS_5788)) {
8647 val |= WDMAC_MODE_RX_ACCEL;
8651 /* Enable host coalescing bug fix */
8652 if (tg3_flag(tp, 5755_PLUS))
8653 val |= WDMAC_MODE_STATUS_TAG_FIX;
8655 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8656 val |= WDMAC_MODE_BURST_ALL_DATA;
8658 tw32_f(WDMAC_MODE, val);
8659 udelay(40);
8661 if (tg3_flag(tp, PCIX_MODE)) {
8662 u16 pcix_cmd;
8664 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8665 &pcix_cmd);
8666 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8667 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8668 pcix_cmd |= PCI_X_CMD_READ_2K;
8669 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8670 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8671 pcix_cmd |= PCI_X_CMD_READ_2K;
8673 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8674 pcix_cmd);
8677 tw32_f(RDMAC_MODE, rdmac_mode);
8678 udelay(40);
8680 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8681 if (!tg3_flag(tp, 5705_PLUS))
8682 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8684 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8685 tw32(SNDDATAC_MODE,
8686 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8687 else
8688 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8690 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8691 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8692 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
8693 if (tg3_flag(tp, LRG_PROD_RING_CAP))
8694 val |= RCVDBDI_MODE_LRG_RING_SZ;
8695 tw32(RCVDBDI_MODE, val);
8696 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8697 if (tg3_flag(tp, HW_TSO_1) ||
8698 tg3_flag(tp, HW_TSO_2) ||
8699 tg3_flag(tp, HW_TSO_3))
8700 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8701 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8702 if (tg3_flag(tp, ENABLE_TSS))
8703 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8704 tw32(SNDBDI_MODE, val);
8705 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8707 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8708 err = tg3_load_5701_a0_firmware_fix(tp);
8709 if (err)
8710 return err;
8713 if (tg3_flag(tp, TSO_CAPABLE)) {
8714 err = tg3_load_tso_firmware(tp);
8715 if (err)
8716 return err;
8719 tp->tx_mode = TX_MODE_ENABLE;
8721 if (tg3_flag(tp, 5755_PLUS) ||
8722 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8723 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8725 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8726 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
8727 tp->tx_mode &= ~val;
8728 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
8731 tw32_f(MAC_TX_MODE, tp->tx_mode);
8732 udelay(100);
8734 if (tg3_flag(tp, ENABLE_RSS)) {
8735 int i = 0;
8736 u32 reg = MAC_RSS_INDIR_TBL_0;
8738 if (tp->irq_cnt == 2) {
8739 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i += 8) {
8740 tw32(reg, 0x0);
8741 reg += 4;
8743 } else {
8744 u32 val;
8746 while (i < TG3_RSS_INDIR_TBL_SIZE) {
8747 val = i % (tp->irq_cnt - 1);
8748 i++;
8749 for (; i % 8; i++) {
8750 val <<= 4;
8751 val |= (i % (tp->irq_cnt - 1));
8753 tw32(reg, val);
8754 reg += 4;
8758 /* Setup the "secret" hash key. */
8759 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8760 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8761 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8762 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8763 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8764 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8765 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8766 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8767 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8768 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8771 tp->rx_mode = RX_MODE_ENABLE;
8772 if (tg3_flag(tp, 5755_PLUS))
8773 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8775 if (tg3_flag(tp, ENABLE_RSS))
8776 tp->rx_mode |= RX_MODE_RSS_ENABLE |
8777 RX_MODE_RSS_ITBL_HASH_BITS_7 |
8778 RX_MODE_RSS_IPV6_HASH_EN |
8779 RX_MODE_RSS_TCP_IPV6_HASH_EN |
8780 RX_MODE_RSS_IPV4_HASH_EN |
8781 RX_MODE_RSS_TCP_IPV4_HASH_EN;
8783 tw32_f(MAC_RX_MODE, tp->rx_mode);
8784 udelay(10);
8786 tw32(MAC_LED_CTRL, tp->led_ctrl);
8788 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8789 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8790 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8791 udelay(10);
8793 tw32_f(MAC_RX_MODE, tp->rx_mode);
8794 udelay(10);
8796 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8797 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
8798 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
8799 /* Set drive transmission level to 1.2V */
8800 /* only if the signal pre-emphasis bit is not set */
8801 val = tr32(MAC_SERDES_CFG);
8802 val &= 0xfffff000;
8803 val |= 0x880;
8804 tw32(MAC_SERDES_CFG, val);
8806 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
8807 tw32(MAC_SERDES_CFG, 0x616000);
8810 /* Prevent chip from dropping frames when flow control
8811 * is enabled.
8813 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8814 val = 1;
8815 else
8816 val = 2;
8817 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
8819 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8820 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
8821 /* Use hardware link auto-negotiation */
8822 tg3_flag_set(tp, HW_AUTONEG);
8825 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8826 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
8827 u32 tmp;
8829 tmp = tr32(SERDES_RX_CTRL);
8830 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
8831 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
8832 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
8833 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8836 if (!tg3_flag(tp, USE_PHYLIB)) {
8837 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
8838 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
8839 tp->link_config.speed = tp->link_config.orig_speed;
8840 tp->link_config.duplex = tp->link_config.orig_duplex;
8841 tp->link_config.autoneg = tp->link_config.orig_autoneg;
8844 err = tg3_setup_phy(tp, 0);
8845 if (err)
8846 return err;
8848 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8849 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8850 u32 tmp;
8852 /* Clear CRC stats. */
8853 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
8854 tg3_writephy(tp, MII_TG3_TEST1,
8855 tmp | MII_TG3_TEST1_CRC_EN);
8856 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
8861 __tg3_set_rx_mode(tp->dev);
8863 /* Initialize receive rules. */
8864 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
8865 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
8866 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
8867 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
8869 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
8870 limit = 8;
8871 else
8872 limit = 16;
8873 if (tg3_flag(tp, ENABLE_ASF))
8874 limit -= 4;
8875 switch (limit) {
8876 case 16:
8877 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
8878 case 15:
8879 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
8880 case 14:
8881 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
8882 case 13:
8883 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
8884 case 12:
8885 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
8886 case 11:
8887 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
8888 case 10:
8889 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
8890 case 9:
8891 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
8892 case 8:
8893 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
8894 case 7:
8895 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
8896 case 6:
8897 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
8898 case 5:
8899 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
8900 case 4:
8901 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
8902 case 3:
8903 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
8904 case 2:
8905 case 1:
8907 default:
8908 break;
8911 if (tg3_flag(tp, ENABLE_APE))
8912 /* Write our heartbeat update interval to APE. */
8913 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
8914 APE_HOST_HEARTBEAT_INT_DISABLE);
8916 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
8918 return 0;
8921 /* Called at device open time to get the chip ready for
8922 * packet processing. Invoked with tp->lock held.
8924 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
8926 tg3_switch_clocks(tp);
8928 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
8930 return tg3_reset_hw(tp, reset_phy);
8933 #define TG3_STAT_ADD32(PSTAT, REG) \
8934 do { u32 __val = tr32(REG); \
8935 (PSTAT)->low += __val; \
8936 if ((PSTAT)->low < __val) \
8937 (PSTAT)->high += 1; \
8938 } while (0)
8940 static void tg3_periodic_fetch_stats(struct tg3 *tp)
8942 struct tg3_hw_stats *sp = tp->hw_stats;
8944 if (!netif_carrier_ok(tp->dev))
8945 return;
8947 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
8948 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
8949 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
8950 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
8951 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
8952 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
8953 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
8954 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
8955 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
8956 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
8957 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
8958 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
8959 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
8961 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
8962 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
8963 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
8964 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
8965 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
8966 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
8967 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
8968 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
8969 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
8970 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
8971 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
8972 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
8973 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
8974 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
8976 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
8977 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
8978 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
8979 tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
8980 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
8981 } else {
8982 u32 val = tr32(HOSTCC_FLOW_ATTN);
8983 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
8984 if (val) {
8985 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
8986 sp->rx_discards.low += val;
8987 if (sp->rx_discards.low < val)
8988 sp->rx_discards.high += 1;
8990 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
8992 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
8995 static void tg3_chk_missed_msi(struct tg3 *tp)
8997 u32 i;
8999 for (i = 0; i < tp->irq_cnt; i++) {
9000 struct tg3_napi *tnapi = &tp->napi[i];
9002 if (tg3_has_work(tnapi)) {
9003 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
9004 tnapi->last_tx_cons == tnapi->tx_cons) {
9005 if (tnapi->chk_msi_cnt < 1) {
9006 tnapi->chk_msi_cnt++;
9007 return;
9009 tw32_mailbox(tnapi->int_mbox,
9010 tnapi->last_tag << 24);
9013 tnapi->chk_msi_cnt = 0;
9014 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9015 tnapi->last_tx_cons = tnapi->tx_cons;
9019 static void tg3_timer(unsigned long __opaque)
9021 struct tg3 *tp = (struct tg3 *) __opaque;
9023 if (tp->irq_sync)
9024 goto restart_timer;
9026 spin_lock(&tp->lock);
9028 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9029 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
9030 tg3_chk_missed_msi(tp);
9032 if (!tg3_flag(tp, TAGGED_STATUS)) {
9033 /* All of this garbage is because when using non-tagged
9034 * IRQ status the mailbox/status_block protocol the chip
9035 * uses with the cpu is race prone.
9037 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9038 tw32(GRC_LOCAL_CTRL,
9039 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9040 } else {
9041 tw32(HOSTCC_MODE, tp->coalesce_mode |
9042 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9045 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9046 tg3_flag_set(tp, RESTART_TIMER);
9047 spin_unlock(&tp->lock);
9048 schedule_work(&tp->reset_task);
9049 return;
9053 /* This part only runs once per second. */
9054 if (!--tp->timer_counter) {
9055 if (tg3_flag(tp, 5705_PLUS))
9056 tg3_periodic_fetch_stats(tp);
9058 if (tp->setlpicnt && !--tp->setlpicnt)
9059 tg3_phy_eee_enable(tp);
9061 if (tg3_flag(tp, USE_LINKCHG_REG)) {
9062 u32 mac_stat;
9063 int phy_event;
9065 mac_stat = tr32(MAC_STATUS);
9067 phy_event = 0;
9068 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9069 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9070 phy_event = 1;
9071 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9072 phy_event = 1;
9074 if (phy_event)
9075 tg3_setup_phy(tp, 0);
9076 } else if (tg3_flag(tp, POLL_SERDES)) {
9077 u32 mac_stat = tr32(MAC_STATUS);
9078 int need_setup = 0;
9080 if (netif_carrier_ok(tp->dev) &&
9081 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9082 need_setup = 1;
9084 if (!netif_carrier_ok(tp->dev) &&
9085 (mac_stat & (MAC_STATUS_PCS_SYNCED |
9086 MAC_STATUS_SIGNAL_DET))) {
9087 need_setup = 1;
9089 if (need_setup) {
9090 if (!tp->serdes_counter) {
9091 tw32_f(MAC_MODE,
9092 (tp->mac_mode &
9093 ~MAC_MODE_PORT_MODE_MASK));
9094 udelay(40);
9095 tw32_f(MAC_MODE, tp->mac_mode);
9096 udelay(40);
9098 tg3_setup_phy(tp, 0);
9100 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9101 tg3_flag(tp, 5780_CLASS)) {
9102 tg3_serdes_parallel_detect(tp);
9105 tp->timer_counter = tp->timer_multiplier;
9108 /* Heartbeat is only sent once every 2 seconds.
9110 * The heartbeat is to tell the ASF firmware that the host
9111 * driver is still alive. In the event that the OS crashes,
9112 * ASF needs to reset the hardware to free up the FIFO space
9113 * that may be filled with rx packets destined for the host.
9114 * If the FIFO is full, ASF will no longer function properly.
9116 * Unintended resets have been reported on real time kernels
9117 * where the timer doesn't run on time. Netpoll will also have
9118 * same problem.
9120 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9121 * to check the ring condition when the heartbeat is expiring
9122 * before doing the reset. This will prevent most unintended
9123 * resets.
9125 if (!--tp->asf_counter) {
9126 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9127 tg3_wait_for_event_ack(tp);
9129 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9130 FWCMD_NICDRV_ALIVE3);
9131 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9132 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9133 TG3_FW_UPDATE_TIMEOUT_SEC);
9135 tg3_generate_fw_event(tp);
9137 tp->asf_counter = tp->asf_multiplier;
9140 spin_unlock(&tp->lock);
9142 restart_timer:
9143 tp->timer.expires = jiffies + tp->timer_offset;
9144 add_timer(&tp->timer);
9147 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9149 irq_handler_t fn;
9150 unsigned long flags;
9151 char *name;
9152 struct tg3_napi *tnapi = &tp->napi[irq_num];
9154 if (tp->irq_cnt == 1)
9155 name = tp->dev->name;
9156 else {
9157 name = &tnapi->irq_lbl[0];
9158 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9159 name[IFNAMSIZ-1] = 0;
9162 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9163 fn = tg3_msi;
9164 if (tg3_flag(tp, 1SHOT_MSI))
9165 fn = tg3_msi_1shot;
9166 flags = 0;
9167 } else {
9168 fn = tg3_interrupt;
9169 if (tg3_flag(tp, TAGGED_STATUS))
9170 fn = tg3_interrupt_tagged;
9171 flags = IRQF_SHARED;
9174 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9177 static int tg3_test_interrupt(struct tg3 *tp)
9179 struct tg3_napi *tnapi = &tp->napi[0];
9180 struct net_device *dev = tp->dev;
9181 int err, i, intr_ok = 0;
9182 u32 val;
9184 if (!netif_running(dev))
9185 return -ENODEV;
9187 tg3_disable_ints(tp);
9189 free_irq(tnapi->irq_vec, tnapi);
9192 * Turn off MSI one shot mode. Otherwise this test has no
9193 * observable way to know whether the interrupt was delivered.
9195 if (tg3_flag(tp, 57765_PLUS)) {
9196 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9197 tw32(MSGINT_MODE, val);
9200 err = request_irq(tnapi->irq_vec, tg3_test_isr,
9201 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
9202 if (err)
9203 return err;
9205 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9206 tg3_enable_ints(tp);
9208 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9209 tnapi->coal_now);
9211 for (i = 0; i < 5; i++) {
9212 u32 int_mbox, misc_host_ctrl;
9214 int_mbox = tr32_mailbox(tnapi->int_mbox);
9215 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9217 if ((int_mbox != 0) ||
9218 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9219 intr_ok = 1;
9220 break;
9223 if (tg3_flag(tp, 57765_PLUS) &&
9224 tnapi->hw_status->status_tag != tnapi->last_tag)
9225 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
9227 msleep(10);
9230 tg3_disable_ints(tp);
9232 free_irq(tnapi->irq_vec, tnapi);
9234 err = tg3_request_irq(tp, 0);
9236 if (err)
9237 return err;
9239 if (intr_ok) {
9240 /* Reenable MSI one shot mode. */
9241 if (tg3_flag(tp, 57765_PLUS)) {
9242 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9243 tw32(MSGINT_MODE, val);
9245 return 0;
9248 return -EIO;
9251 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9252 * successfully restored
9254 static int tg3_test_msi(struct tg3 *tp)
9256 int err;
9257 u16 pci_cmd;
9259 if (!tg3_flag(tp, USING_MSI))
9260 return 0;
9262 /* Turn off SERR reporting in case MSI terminates with Master
9263 * Abort.
9265 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9266 pci_write_config_word(tp->pdev, PCI_COMMAND,
9267 pci_cmd & ~PCI_COMMAND_SERR);
9269 err = tg3_test_interrupt(tp);
9271 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9273 if (!err)
9274 return 0;
9276 /* other failures */
9277 if (err != -EIO)
9278 return err;
9280 /* MSI test failed, go back to INTx mode */
9281 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9282 "to INTx mode. Please report this failure to the PCI "
9283 "maintainer and include system chipset information\n");
9285 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9287 pci_disable_msi(tp->pdev);
9289 tg3_flag_clear(tp, USING_MSI);
9290 tp->napi[0].irq_vec = tp->pdev->irq;
9292 err = tg3_request_irq(tp, 0);
9293 if (err)
9294 return err;
9296 /* Need to reset the chip because the MSI cycle may have terminated
9297 * with Master Abort.
9299 tg3_full_lock(tp, 1);
9301 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9302 err = tg3_init_hw(tp, 1);
9304 tg3_full_unlock(tp);
9306 if (err)
9307 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9309 return err;
9312 static int tg3_request_firmware(struct tg3 *tp)
9314 const __be32 *fw_data;
9316 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9317 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9318 tp->fw_needed);
9319 return -ENOENT;
9322 fw_data = (void *)tp->fw->data;
9324 /* Firmware blob starts with version numbers, followed by
9325 * start address and _full_ length including BSS sections
9326 * (which must be longer than the actual data, of course
9329 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
9330 if (tp->fw_len < (tp->fw->size - 12)) {
9331 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9332 tp->fw_len, tp->fw_needed);
9333 release_firmware(tp->fw);
9334 tp->fw = NULL;
9335 return -EINVAL;
9338 /* We no longer need firmware; we have it. */
9339 tp->fw_needed = NULL;
9340 return 0;
9343 static bool tg3_enable_msix(struct tg3 *tp)
9345 int i, rc, cpus = num_online_cpus();
9346 struct msix_entry msix_ent[tp->irq_max];
9348 if (cpus == 1)
9349 /* Just fallback to the simpler MSI mode. */
9350 return false;
9353 * We want as many rx rings enabled as there are cpus.
9354 * The first MSIX vector only deals with link interrupts, etc,
9355 * so we add one to the number of vectors we are requesting.
9357 tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
9359 for (i = 0; i < tp->irq_max; i++) {
9360 msix_ent[i].entry = i;
9361 msix_ent[i].vector = 0;
9364 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9365 if (rc < 0) {
9366 return false;
9367 } else if (rc != 0) {
9368 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9369 return false;
9370 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9371 tp->irq_cnt, rc);
9372 tp->irq_cnt = rc;
9375 for (i = 0; i < tp->irq_max; i++)
9376 tp->napi[i].irq_vec = msix_ent[i].vector;
9378 netif_set_real_num_tx_queues(tp->dev, 1);
9379 rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9380 if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9381 pci_disable_msix(tp->pdev);
9382 return false;
9385 if (tp->irq_cnt > 1) {
9386 tg3_flag_set(tp, ENABLE_RSS);
9388 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9389 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9390 tg3_flag_set(tp, ENABLE_TSS);
9391 netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9395 return true;
9398 static void tg3_ints_init(struct tg3 *tp)
9400 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9401 !tg3_flag(tp, TAGGED_STATUS)) {
9402 /* All MSI supporting chips should support tagged
9403 * status. Assert that this is the case.
9405 netdev_warn(tp->dev,
9406 "MSI without TAGGED_STATUS? Not using MSI\n");
9407 goto defcfg;
9410 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9411 tg3_flag_set(tp, USING_MSIX);
9412 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9413 tg3_flag_set(tp, USING_MSI);
9415 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9416 u32 msi_mode = tr32(MSGINT_MODE);
9417 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9418 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9419 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9421 defcfg:
9422 if (!tg3_flag(tp, USING_MSIX)) {
9423 tp->irq_cnt = 1;
9424 tp->napi[0].irq_vec = tp->pdev->irq;
9425 netif_set_real_num_tx_queues(tp->dev, 1);
9426 netif_set_real_num_rx_queues(tp->dev, 1);
9430 static void tg3_ints_fini(struct tg3 *tp)
9432 if (tg3_flag(tp, USING_MSIX))
9433 pci_disable_msix(tp->pdev);
9434 else if (tg3_flag(tp, USING_MSI))
9435 pci_disable_msi(tp->pdev);
9436 tg3_flag_clear(tp, USING_MSI);
9437 tg3_flag_clear(tp, USING_MSIX);
9438 tg3_flag_clear(tp, ENABLE_RSS);
9439 tg3_flag_clear(tp, ENABLE_TSS);
9442 static int tg3_open(struct net_device *dev)
9444 struct tg3 *tp = netdev_priv(dev);
9445 int i, err;
9447 if (tp->fw_needed) {
9448 err = tg3_request_firmware(tp);
9449 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9450 if (err)
9451 return err;
9452 } else if (err) {
9453 netdev_warn(tp->dev, "TSO capability disabled\n");
9454 tg3_flag_clear(tp, TSO_CAPABLE);
9455 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9456 netdev_notice(tp->dev, "TSO capability restored\n");
9457 tg3_flag_set(tp, TSO_CAPABLE);
9461 netif_carrier_off(tp->dev);
9463 err = tg3_power_up(tp);
9464 if (err)
9465 return err;
9467 tg3_full_lock(tp, 0);
9469 tg3_disable_ints(tp);
9470 tg3_flag_clear(tp, INIT_COMPLETE);
9472 tg3_full_unlock(tp);
9475 * Setup interrupts first so we know how
9476 * many NAPI resources to allocate
9478 tg3_ints_init(tp);
9480 /* The placement of this call is tied
9481 * to the setup and use of Host TX descriptors.
9483 err = tg3_alloc_consistent(tp);
9484 if (err)
9485 goto err_out1;
9487 tg3_napi_init(tp);
9489 tg3_napi_enable(tp);
9491 for (i = 0; i < tp->irq_cnt; i++) {
9492 struct tg3_napi *tnapi = &tp->napi[i];
9493 err = tg3_request_irq(tp, i);
9494 if (err) {
9495 for (i--; i >= 0; i--)
9496 free_irq(tnapi->irq_vec, tnapi);
9497 break;
9501 if (err)
9502 goto err_out2;
9504 tg3_full_lock(tp, 0);
9506 err = tg3_init_hw(tp, 1);
9507 if (err) {
9508 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9509 tg3_free_rings(tp);
9510 } else {
9511 if (tg3_flag(tp, TAGGED_STATUS) &&
9512 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9513 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765)
9514 tp->timer_offset = HZ;
9515 else
9516 tp->timer_offset = HZ / 10;
9518 BUG_ON(tp->timer_offset > HZ);
9519 tp->timer_counter = tp->timer_multiplier =
9520 (HZ / tp->timer_offset);
9521 tp->asf_counter = tp->asf_multiplier =
9522 ((HZ / tp->timer_offset) * 2);
9524 init_timer(&tp->timer);
9525 tp->timer.expires = jiffies + tp->timer_offset;
9526 tp->timer.data = (unsigned long) tp;
9527 tp->timer.function = tg3_timer;
9530 tg3_full_unlock(tp);
9532 if (err)
9533 goto err_out3;
9535 if (tg3_flag(tp, USING_MSI)) {
9536 err = tg3_test_msi(tp);
9538 if (err) {
9539 tg3_full_lock(tp, 0);
9540 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9541 tg3_free_rings(tp);
9542 tg3_full_unlock(tp);
9544 goto err_out2;
9547 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9548 u32 val = tr32(PCIE_TRANSACTION_CFG);
9550 tw32(PCIE_TRANSACTION_CFG,
9551 val | PCIE_TRANS_CFG_1SHOT_MSI);
9555 tg3_phy_start(tp);
9557 tg3_full_lock(tp, 0);
9559 add_timer(&tp->timer);
9560 tg3_flag_set(tp, INIT_COMPLETE);
9561 tg3_enable_ints(tp);
9563 tg3_full_unlock(tp);
9565 netif_tx_start_all_queues(dev);
9568 * Reset loopback feature if it was turned on while the device was down
9569 * make sure that it's installed properly now.
9571 if (dev->features & NETIF_F_LOOPBACK)
9572 tg3_set_loopback(dev, dev->features);
9574 return 0;
9576 err_out3:
9577 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9578 struct tg3_napi *tnapi = &tp->napi[i];
9579 free_irq(tnapi->irq_vec, tnapi);
9582 err_out2:
9583 tg3_napi_disable(tp);
9584 tg3_napi_fini(tp);
9585 tg3_free_consistent(tp);
9587 err_out1:
9588 tg3_ints_fini(tp);
9589 tg3_frob_aux_power(tp, false);
9590 pci_set_power_state(tp->pdev, PCI_D3hot);
9591 return err;
9594 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
9595 struct rtnl_link_stats64 *);
9596 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
9598 static int tg3_close(struct net_device *dev)
9600 int i;
9601 struct tg3 *tp = netdev_priv(dev);
9603 tg3_napi_disable(tp);
9604 cancel_work_sync(&tp->reset_task);
9606 netif_tx_stop_all_queues(dev);
9608 del_timer_sync(&tp->timer);
9610 tg3_phy_stop(tp);
9612 tg3_full_lock(tp, 1);
9614 tg3_disable_ints(tp);
9616 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9617 tg3_free_rings(tp);
9618 tg3_flag_clear(tp, INIT_COMPLETE);
9620 tg3_full_unlock(tp);
9622 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9623 struct tg3_napi *tnapi = &tp->napi[i];
9624 free_irq(tnapi->irq_vec, tnapi);
9627 tg3_ints_fini(tp);
9629 tg3_get_stats64(tp->dev, &tp->net_stats_prev);
9631 memcpy(&tp->estats_prev, tg3_get_estats(tp),
9632 sizeof(tp->estats_prev));
9634 tg3_napi_fini(tp);
9636 tg3_free_consistent(tp);
9638 tg3_power_down(tp);
9640 netif_carrier_off(tp->dev);
9642 return 0;
9645 static inline u64 get_stat64(tg3_stat64_t *val)
9647 return ((u64)val->high << 32) | ((u64)val->low);
9650 static u64 calc_crc_errors(struct tg3 *tp)
9652 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9654 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9655 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9656 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9657 u32 val;
9659 spin_lock_bh(&tp->lock);
9660 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9661 tg3_writephy(tp, MII_TG3_TEST1,
9662 val | MII_TG3_TEST1_CRC_EN);
9663 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
9664 } else
9665 val = 0;
9666 spin_unlock_bh(&tp->lock);
9668 tp->phy_crc_errors += val;
9670 return tp->phy_crc_errors;
9673 return get_stat64(&hw_stats->rx_fcs_errors);
9676 #define ESTAT_ADD(member) \
9677 estats->member = old_estats->member + \
9678 get_stat64(&hw_stats->member)
9680 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9682 struct tg3_ethtool_stats *estats = &tp->estats;
9683 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9684 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9686 if (!hw_stats)
9687 return old_estats;
9689 ESTAT_ADD(rx_octets);
9690 ESTAT_ADD(rx_fragments);
9691 ESTAT_ADD(rx_ucast_packets);
9692 ESTAT_ADD(rx_mcast_packets);
9693 ESTAT_ADD(rx_bcast_packets);
9694 ESTAT_ADD(rx_fcs_errors);
9695 ESTAT_ADD(rx_align_errors);
9696 ESTAT_ADD(rx_xon_pause_rcvd);
9697 ESTAT_ADD(rx_xoff_pause_rcvd);
9698 ESTAT_ADD(rx_mac_ctrl_rcvd);
9699 ESTAT_ADD(rx_xoff_entered);
9700 ESTAT_ADD(rx_frame_too_long_errors);
9701 ESTAT_ADD(rx_jabbers);
9702 ESTAT_ADD(rx_undersize_packets);
9703 ESTAT_ADD(rx_in_length_errors);
9704 ESTAT_ADD(rx_out_length_errors);
9705 ESTAT_ADD(rx_64_or_less_octet_packets);
9706 ESTAT_ADD(rx_65_to_127_octet_packets);
9707 ESTAT_ADD(rx_128_to_255_octet_packets);
9708 ESTAT_ADD(rx_256_to_511_octet_packets);
9709 ESTAT_ADD(rx_512_to_1023_octet_packets);
9710 ESTAT_ADD(rx_1024_to_1522_octet_packets);
9711 ESTAT_ADD(rx_1523_to_2047_octet_packets);
9712 ESTAT_ADD(rx_2048_to_4095_octet_packets);
9713 ESTAT_ADD(rx_4096_to_8191_octet_packets);
9714 ESTAT_ADD(rx_8192_to_9022_octet_packets);
9716 ESTAT_ADD(tx_octets);
9717 ESTAT_ADD(tx_collisions);
9718 ESTAT_ADD(tx_xon_sent);
9719 ESTAT_ADD(tx_xoff_sent);
9720 ESTAT_ADD(tx_flow_control);
9721 ESTAT_ADD(tx_mac_errors);
9722 ESTAT_ADD(tx_single_collisions);
9723 ESTAT_ADD(tx_mult_collisions);
9724 ESTAT_ADD(tx_deferred);
9725 ESTAT_ADD(tx_excessive_collisions);
9726 ESTAT_ADD(tx_late_collisions);
9727 ESTAT_ADD(tx_collide_2times);
9728 ESTAT_ADD(tx_collide_3times);
9729 ESTAT_ADD(tx_collide_4times);
9730 ESTAT_ADD(tx_collide_5times);
9731 ESTAT_ADD(tx_collide_6times);
9732 ESTAT_ADD(tx_collide_7times);
9733 ESTAT_ADD(tx_collide_8times);
9734 ESTAT_ADD(tx_collide_9times);
9735 ESTAT_ADD(tx_collide_10times);
9736 ESTAT_ADD(tx_collide_11times);
9737 ESTAT_ADD(tx_collide_12times);
9738 ESTAT_ADD(tx_collide_13times);
9739 ESTAT_ADD(tx_collide_14times);
9740 ESTAT_ADD(tx_collide_15times);
9741 ESTAT_ADD(tx_ucast_packets);
9742 ESTAT_ADD(tx_mcast_packets);
9743 ESTAT_ADD(tx_bcast_packets);
9744 ESTAT_ADD(tx_carrier_sense_errors);
9745 ESTAT_ADD(tx_discards);
9746 ESTAT_ADD(tx_errors);
9748 ESTAT_ADD(dma_writeq_full);
9749 ESTAT_ADD(dma_write_prioq_full);
9750 ESTAT_ADD(rxbds_empty);
9751 ESTAT_ADD(rx_discards);
9752 ESTAT_ADD(rx_errors);
9753 ESTAT_ADD(rx_threshold_hit);
9755 ESTAT_ADD(dma_readq_full);
9756 ESTAT_ADD(dma_read_prioq_full);
9757 ESTAT_ADD(tx_comp_queue_full);
9759 ESTAT_ADD(ring_set_send_prod_index);
9760 ESTAT_ADD(ring_status_update);
9761 ESTAT_ADD(nic_irqs);
9762 ESTAT_ADD(nic_avoided_irqs);
9763 ESTAT_ADD(nic_tx_threshold_hit);
9765 ESTAT_ADD(mbuf_lwm_thresh_hit);
9767 return estats;
9770 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9771 struct rtnl_link_stats64 *stats)
9773 struct tg3 *tp = netdev_priv(dev);
9774 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
9775 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9777 if (!hw_stats)
9778 return old_stats;
9780 stats->rx_packets = old_stats->rx_packets +
9781 get_stat64(&hw_stats->rx_ucast_packets) +
9782 get_stat64(&hw_stats->rx_mcast_packets) +
9783 get_stat64(&hw_stats->rx_bcast_packets);
9785 stats->tx_packets = old_stats->tx_packets +
9786 get_stat64(&hw_stats->tx_ucast_packets) +
9787 get_stat64(&hw_stats->tx_mcast_packets) +
9788 get_stat64(&hw_stats->tx_bcast_packets);
9790 stats->rx_bytes = old_stats->rx_bytes +
9791 get_stat64(&hw_stats->rx_octets);
9792 stats->tx_bytes = old_stats->tx_bytes +
9793 get_stat64(&hw_stats->tx_octets);
9795 stats->rx_errors = old_stats->rx_errors +
9796 get_stat64(&hw_stats->rx_errors);
9797 stats->tx_errors = old_stats->tx_errors +
9798 get_stat64(&hw_stats->tx_errors) +
9799 get_stat64(&hw_stats->tx_mac_errors) +
9800 get_stat64(&hw_stats->tx_carrier_sense_errors) +
9801 get_stat64(&hw_stats->tx_discards);
9803 stats->multicast = old_stats->multicast +
9804 get_stat64(&hw_stats->rx_mcast_packets);
9805 stats->collisions = old_stats->collisions +
9806 get_stat64(&hw_stats->tx_collisions);
9808 stats->rx_length_errors = old_stats->rx_length_errors +
9809 get_stat64(&hw_stats->rx_frame_too_long_errors) +
9810 get_stat64(&hw_stats->rx_undersize_packets);
9812 stats->rx_over_errors = old_stats->rx_over_errors +
9813 get_stat64(&hw_stats->rxbds_empty);
9814 stats->rx_frame_errors = old_stats->rx_frame_errors +
9815 get_stat64(&hw_stats->rx_align_errors);
9816 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
9817 get_stat64(&hw_stats->tx_discards);
9818 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
9819 get_stat64(&hw_stats->tx_carrier_sense_errors);
9821 stats->rx_crc_errors = old_stats->rx_crc_errors +
9822 calc_crc_errors(tp);
9824 stats->rx_missed_errors = old_stats->rx_missed_errors +
9825 get_stat64(&hw_stats->rx_discards);
9827 stats->rx_dropped = tp->rx_dropped;
9829 return stats;
9832 static inline u32 calc_crc(unsigned char *buf, int len)
9834 u32 reg;
9835 u32 tmp;
9836 int j, k;
9838 reg = 0xffffffff;
9840 for (j = 0; j < len; j++) {
9841 reg ^= buf[j];
9843 for (k = 0; k < 8; k++) {
9844 tmp = reg & 0x01;
9846 reg >>= 1;
9848 if (tmp)
9849 reg ^= 0xedb88320;
9853 return ~reg;
9856 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9858 /* accept or reject all multicast frames */
9859 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9860 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9861 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9862 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9865 static void __tg3_set_rx_mode(struct net_device *dev)
9867 struct tg3 *tp = netdev_priv(dev);
9868 u32 rx_mode;
9870 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9871 RX_MODE_KEEP_VLAN_TAG);
9873 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9874 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9875 * flag clear.
9877 if (!tg3_flag(tp, ENABLE_ASF))
9878 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9879 #endif
9881 if (dev->flags & IFF_PROMISC) {
9882 /* Promiscuous mode. */
9883 rx_mode |= RX_MODE_PROMISC;
9884 } else if (dev->flags & IFF_ALLMULTI) {
9885 /* Accept all multicast. */
9886 tg3_set_multi(tp, 1);
9887 } else if (netdev_mc_empty(dev)) {
9888 /* Reject all multicast. */
9889 tg3_set_multi(tp, 0);
9890 } else {
9891 /* Accept one or more multicast(s). */
9892 struct netdev_hw_addr *ha;
9893 u32 mc_filter[4] = { 0, };
9894 u32 regidx;
9895 u32 bit;
9896 u32 crc;
9898 netdev_for_each_mc_addr(ha, dev) {
9899 crc = calc_crc(ha->addr, ETH_ALEN);
9900 bit = ~crc & 0x7f;
9901 regidx = (bit & 0x60) >> 5;
9902 bit &= 0x1f;
9903 mc_filter[regidx] |= (1 << bit);
9906 tw32(MAC_HASH_REG_0, mc_filter[0]);
9907 tw32(MAC_HASH_REG_1, mc_filter[1]);
9908 tw32(MAC_HASH_REG_2, mc_filter[2]);
9909 tw32(MAC_HASH_REG_3, mc_filter[3]);
9912 if (rx_mode != tp->rx_mode) {
9913 tp->rx_mode = rx_mode;
9914 tw32_f(MAC_RX_MODE, rx_mode);
9915 udelay(10);
9919 static void tg3_set_rx_mode(struct net_device *dev)
9921 struct tg3 *tp = netdev_priv(dev);
9923 if (!netif_running(dev))
9924 return;
9926 tg3_full_lock(tp, 0);
9927 __tg3_set_rx_mode(dev);
9928 tg3_full_unlock(tp);
9931 static int tg3_get_regs_len(struct net_device *dev)
9933 return TG3_REG_BLK_SIZE;
9936 static void tg3_get_regs(struct net_device *dev,
9937 struct ethtool_regs *regs, void *_p)
9939 struct tg3 *tp = netdev_priv(dev);
9941 regs->version = 0;
9943 memset(_p, 0, TG3_REG_BLK_SIZE);
9945 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9946 return;
9948 tg3_full_lock(tp, 0);
9950 tg3_dump_legacy_regs(tp, (u32 *)_p);
9952 tg3_full_unlock(tp);
9955 static int tg3_get_eeprom_len(struct net_device *dev)
9957 struct tg3 *tp = netdev_priv(dev);
9959 return tp->nvram_size;
9962 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9964 struct tg3 *tp = netdev_priv(dev);
9965 int ret;
9966 u8 *pd;
9967 u32 i, offset, len, b_offset, b_count;
9968 __be32 val;
9970 if (tg3_flag(tp, NO_NVRAM))
9971 return -EINVAL;
9973 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9974 return -EAGAIN;
9976 offset = eeprom->offset;
9977 len = eeprom->len;
9978 eeprom->len = 0;
9980 eeprom->magic = TG3_EEPROM_MAGIC;
9982 if (offset & 3) {
9983 /* adjustments to start on required 4 byte boundary */
9984 b_offset = offset & 3;
9985 b_count = 4 - b_offset;
9986 if (b_count > len) {
9987 /* i.e. offset=1 len=2 */
9988 b_count = len;
9990 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
9991 if (ret)
9992 return ret;
9993 memcpy(data, ((char *)&val) + b_offset, b_count);
9994 len -= b_count;
9995 offset += b_count;
9996 eeprom->len += b_count;
9999 /* read bytes up to the last 4 byte boundary */
10000 pd = &data[eeprom->len];
10001 for (i = 0; i < (len - (len & 3)); i += 4) {
10002 ret = tg3_nvram_read_be32(tp, offset + i, &val);
10003 if (ret) {
10004 eeprom->len += i;
10005 return ret;
10007 memcpy(pd + i, &val, 4);
10009 eeprom->len += i;
10011 if (len & 3) {
10012 /* read last bytes not ending on 4 byte boundary */
10013 pd = &data[eeprom->len];
10014 b_count = len & 3;
10015 b_offset = offset + len - b_count;
10016 ret = tg3_nvram_read_be32(tp, b_offset, &val);
10017 if (ret)
10018 return ret;
10019 memcpy(pd, &val, b_count);
10020 eeprom->len += b_count;
10022 return 0;
10025 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
10027 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10029 struct tg3 *tp = netdev_priv(dev);
10030 int ret;
10031 u32 offset, len, b_offset, odd_len;
10032 u8 *buf;
10033 __be32 start, end;
10035 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10036 return -EAGAIN;
10038 if (tg3_flag(tp, NO_NVRAM) ||
10039 eeprom->magic != TG3_EEPROM_MAGIC)
10040 return -EINVAL;
10042 offset = eeprom->offset;
10043 len = eeprom->len;
10045 if ((b_offset = (offset & 3))) {
10046 /* adjustments to start on required 4 byte boundary */
10047 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10048 if (ret)
10049 return ret;
10050 len += b_offset;
10051 offset &= ~3;
10052 if (len < 4)
10053 len = 4;
10056 odd_len = 0;
10057 if (len & 3) {
10058 /* adjustments to end on required 4 byte boundary */
10059 odd_len = 1;
10060 len = (len + 3) & ~3;
10061 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10062 if (ret)
10063 return ret;
10066 buf = data;
10067 if (b_offset || odd_len) {
10068 buf = kmalloc(len, GFP_KERNEL);
10069 if (!buf)
10070 return -ENOMEM;
10071 if (b_offset)
10072 memcpy(buf, &start, 4);
10073 if (odd_len)
10074 memcpy(buf+len-4, &end, 4);
10075 memcpy(buf + b_offset, data, eeprom->len);
10078 ret = tg3_nvram_write_block(tp, offset, len, buf);
10080 if (buf != data)
10081 kfree(buf);
10083 return ret;
10086 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10088 struct tg3 *tp = netdev_priv(dev);
10090 if (tg3_flag(tp, USE_PHYLIB)) {
10091 struct phy_device *phydev;
10092 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10093 return -EAGAIN;
10094 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10095 return phy_ethtool_gset(phydev, cmd);
10098 cmd->supported = (SUPPORTED_Autoneg);
10100 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10101 cmd->supported |= (SUPPORTED_1000baseT_Half |
10102 SUPPORTED_1000baseT_Full);
10104 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10105 cmd->supported |= (SUPPORTED_100baseT_Half |
10106 SUPPORTED_100baseT_Full |
10107 SUPPORTED_10baseT_Half |
10108 SUPPORTED_10baseT_Full |
10109 SUPPORTED_TP);
10110 cmd->port = PORT_TP;
10111 } else {
10112 cmd->supported |= SUPPORTED_FIBRE;
10113 cmd->port = PORT_FIBRE;
10116 cmd->advertising = tp->link_config.advertising;
10117 if (tg3_flag(tp, PAUSE_AUTONEG)) {
10118 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10119 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10120 cmd->advertising |= ADVERTISED_Pause;
10121 } else {
10122 cmd->advertising |= ADVERTISED_Pause |
10123 ADVERTISED_Asym_Pause;
10125 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10126 cmd->advertising |= ADVERTISED_Asym_Pause;
10129 if (netif_running(dev)) {
10130 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10131 cmd->duplex = tp->link_config.active_duplex;
10132 } else {
10133 ethtool_cmd_speed_set(cmd, SPEED_INVALID);
10134 cmd->duplex = DUPLEX_INVALID;
10136 cmd->phy_address = tp->phy_addr;
10137 cmd->transceiver = XCVR_INTERNAL;
10138 cmd->autoneg = tp->link_config.autoneg;
10139 cmd->maxtxpkt = 0;
10140 cmd->maxrxpkt = 0;
10141 return 0;
10144 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10146 struct tg3 *tp = netdev_priv(dev);
10147 u32 speed = ethtool_cmd_speed(cmd);
10149 if (tg3_flag(tp, USE_PHYLIB)) {
10150 struct phy_device *phydev;
10151 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10152 return -EAGAIN;
10153 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10154 return phy_ethtool_sset(phydev, cmd);
10157 if (cmd->autoneg != AUTONEG_ENABLE &&
10158 cmd->autoneg != AUTONEG_DISABLE)
10159 return -EINVAL;
10161 if (cmd->autoneg == AUTONEG_DISABLE &&
10162 cmd->duplex != DUPLEX_FULL &&
10163 cmd->duplex != DUPLEX_HALF)
10164 return -EINVAL;
10166 if (cmd->autoneg == AUTONEG_ENABLE) {
10167 u32 mask = ADVERTISED_Autoneg |
10168 ADVERTISED_Pause |
10169 ADVERTISED_Asym_Pause;
10171 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10172 mask |= ADVERTISED_1000baseT_Half |
10173 ADVERTISED_1000baseT_Full;
10175 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10176 mask |= ADVERTISED_100baseT_Half |
10177 ADVERTISED_100baseT_Full |
10178 ADVERTISED_10baseT_Half |
10179 ADVERTISED_10baseT_Full |
10180 ADVERTISED_TP;
10181 else
10182 mask |= ADVERTISED_FIBRE;
10184 if (cmd->advertising & ~mask)
10185 return -EINVAL;
10187 mask &= (ADVERTISED_1000baseT_Half |
10188 ADVERTISED_1000baseT_Full |
10189 ADVERTISED_100baseT_Half |
10190 ADVERTISED_100baseT_Full |
10191 ADVERTISED_10baseT_Half |
10192 ADVERTISED_10baseT_Full);
10194 cmd->advertising &= mask;
10195 } else {
10196 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10197 if (speed != SPEED_1000)
10198 return -EINVAL;
10200 if (cmd->duplex != DUPLEX_FULL)
10201 return -EINVAL;
10202 } else {
10203 if (speed != SPEED_100 &&
10204 speed != SPEED_10)
10205 return -EINVAL;
10209 tg3_full_lock(tp, 0);
10211 tp->link_config.autoneg = cmd->autoneg;
10212 if (cmd->autoneg == AUTONEG_ENABLE) {
10213 tp->link_config.advertising = (cmd->advertising |
10214 ADVERTISED_Autoneg);
10215 tp->link_config.speed = SPEED_INVALID;
10216 tp->link_config.duplex = DUPLEX_INVALID;
10217 } else {
10218 tp->link_config.advertising = 0;
10219 tp->link_config.speed = speed;
10220 tp->link_config.duplex = cmd->duplex;
10223 tp->link_config.orig_speed = tp->link_config.speed;
10224 tp->link_config.orig_duplex = tp->link_config.duplex;
10225 tp->link_config.orig_autoneg = tp->link_config.autoneg;
10227 if (netif_running(dev))
10228 tg3_setup_phy(tp, 1);
10230 tg3_full_unlock(tp);
10232 return 0;
10235 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10237 struct tg3 *tp = netdev_priv(dev);
10239 strcpy(info->driver, DRV_MODULE_NAME);
10240 strcpy(info->version, DRV_MODULE_VERSION);
10241 strcpy(info->fw_version, tp->fw_ver);
10242 strcpy(info->bus_info, pci_name(tp->pdev));
10245 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10247 struct tg3 *tp = netdev_priv(dev);
10249 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10250 wol->supported = WAKE_MAGIC;
10251 else
10252 wol->supported = 0;
10253 wol->wolopts = 0;
10254 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10255 wol->wolopts = WAKE_MAGIC;
10256 memset(&wol->sopass, 0, sizeof(wol->sopass));
10259 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10261 struct tg3 *tp = netdev_priv(dev);
10262 struct device *dp = &tp->pdev->dev;
10264 if (wol->wolopts & ~WAKE_MAGIC)
10265 return -EINVAL;
10266 if ((wol->wolopts & WAKE_MAGIC) &&
10267 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10268 return -EINVAL;
10270 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10272 spin_lock_bh(&tp->lock);
10273 if (device_may_wakeup(dp))
10274 tg3_flag_set(tp, WOL_ENABLE);
10275 else
10276 tg3_flag_clear(tp, WOL_ENABLE);
10277 spin_unlock_bh(&tp->lock);
10279 return 0;
10282 static u32 tg3_get_msglevel(struct net_device *dev)
10284 struct tg3 *tp = netdev_priv(dev);
10285 return tp->msg_enable;
10288 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10290 struct tg3 *tp = netdev_priv(dev);
10291 tp->msg_enable = value;
10294 static int tg3_nway_reset(struct net_device *dev)
10296 struct tg3 *tp = netdev_priv(dev);
10297 int r;
10299 if (!netif_running(dev))
10300 return -EAGAIN;
10302 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10303 return -EINVAL;
10305 if (tg3_flag(tp, USE_PHYLIB)) {
10306 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10307 return -EAGAIN;
10308 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10309 } else {
10310 u32 bmcr;
10312 spin_lock_bh(&tp->lock);
10313 r = -EINVAL;
10314 tg3_readphy(tp, MII_BMCR, &bmcr);
10315 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10316 ((bmcr & BMCR_ANENABLE) ||
10317 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10318 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10319 BMCR_ANENABLE);
10320 r = 0;
10322 spin_unlock_bh(&tp->lock);
10325 return r;
10328 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10330 struct tg3 *tp = netdev_priv(dev);
10332 ering->rx_max_pending = tp->rx_std_ring_mask;
10333 ering->rx_mini_max_pending = 0;
10334 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10335 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10336 else
10337 ering->rx_jumbo_max_pending = 0;
10339 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10341 ering->rx_pending = tp->rx_pending;
10342 ering->rx_mini_pending = 0;
10343 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10344 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10345 else
10346 ering->rx_jumbo_pending = 0;
10348 ering->tx_pending = tp->napi[0].tx_pending;
10351 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10353 struct tg3 *tp = netdev_priv(dev);
10354 int i, irq_sync = 0, err = 0;
10356 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10357 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10358 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10359 (ering->tx_pending <= MAX_SKB_FRAGS) ||
10360 (tg3_flag(tp, TSO_BUG) &&
10361 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10362 return -EINVAL;
10364 if (netif_running(dev)) {
10365 tg3_phy_stop(tp);
10366 tg3_netif_stop(tp);
10367 irq_sync = 1;
10370 tg3_full_lock(tp, irq_sync);
10372 tp->rx_pending = ering->rx_pending;
10374 if (tg3_flag(tp, MAX_RXPEND_64) &&
10375 tp->rx_pending > 63)
10376 tp->rx_pending = 63;
10377 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10379 for (i = 0; i < tp->irq_max; i++)
10380 tp->napi[i].tx_pending = ering->tx_pending;
10382 if (netif_running(dev)) {
10383 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10384 err = tg3_restart_hw(tp, 1);
10385 if (!err)
10386 tg3_netif_start(tp);
10389 tg3_full_unlock(tp);
10391 if (irq_sync && !err)
10392 tg3_phy_start(tp);
10394 return err;
10397 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10399 struct tg3 *tp = netdev_priv(dev);
10401 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10403 if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
10404 epause->rx_pause = 1;
10405 else
10406 epause->rx_pause = 0;
10408 if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
10409 epause->tx_pause = 1;
10410 else
10411 epause->tx_pause = 0;
10414 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10416 struct tg3 *tp = netdev_priv(dev);
10417 int err = 0;
10419 if (tg3_flag(tp, USE_PHYLIB)) {
10420 u32 newadv;
10421 struct phy_device *phydev;
10423 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10425 if (!(phydev->supported & SUPPORTED_Pause) ||
10426 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10427 (epause->rx_pause != epause->tx_pause)))
10428 return -EINVAL;
10430 tp->link_config.flowctrl = 0;
10431 if (epause->rx_pause) {
10432 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10434 if (epause->tx_pause) {
10435 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10436 newadv = ADVERTISED_Pause;
10437 } else
10438 newadv = ADVERTISED_Pause |
10439 ADVERTISED_Asym_Pause;
10440 } else if (epause->tx_pause) {
10441 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10442 newadv = ADVERTISED_Asym_Pause;
10443 } else
10444 newadv = 0;
10446 if (epause->autoneg)
10447 tg3_flag_set(tp, PAUSE_AUTONEG);
10448 else
10449 tg3_flag_clear(tp, PAUSE_AUTONEG);
10451 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10452 u32 oldadv = phydev->advertising &
10453 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10454 if (oldadv != newadv) {
10455 phydev->advertising &=
10456 ~(ADVERTISED_Pause |
10457 ADVERTISED_Asym_Pause);
10458 phydev->advertising |= newadv;
10459 if (phydev->autoneg) {
10461 * Always renegotiate the link to
10462 * inform our link partner of our
10463 * flow control settings, even if the
10464 * flow control is forced. Let
10465 * tg3_adjust_link() do the final
10466 * flow control setup.
10468 return phy_start_aneg(phydev);
10472 if (!epause->autoneg)
10473 tg3_setup_flow_control(tp, 0, 0);
10474 } else {
10475 tp->link_config.orig_advertising &=
10476 ~(ADVERTISED_Pause |
10477 ADVERTISED_Asym_Pause);
10478 tp->link_config.orig_advertising |= newadv;
10480 } else {
10481 int irq_sync = 0;
10483 if (netif_running(dev)) {
10484 tg3_netif_stop(tp);
10485 irq_sync = 1;
10488 tg3_full_lock(tp, irq_sync);
10490 if (epause->autoneg)
10491 tg3_flag_set(tp, PAUSE_AUTONEG);
10492 else
10493 tg3_flag_clear(tp, PAUSE_AUTONEG);
10494 if (epause->rx_pause)
10495 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10496 else
10497 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10498 if (epause->tx_pause)
10499 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10500 else
10501 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10503 if (netif_running(dev)) {
10504 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10505 err = tg3_restart_hw(tp, 1);
10506 if (!err)
10507 tg3_netif_start(tp);
10510 tg3_full_unlock(tp);
10513 return err;
10516 static int tg3_get_sset_count(struct net_device *dev, int sset)
10518 switch (sset) {
10519 case ETH_SS_TEST:
10520 return TG3_NUM_TEST;
10521 case ETH_SS_STATS:
10522 return TG3_NUM_STATS;
10523 default:
10524 return -EOPNOTSUPP;
10528 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10530 switch (stringset) {
10531 case ETH_SS_STATS:
10532 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
10533 break;
10534 case ETH_SS_TEST:
10535 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
10536 break;
10537 default:
10538 WARN_ON(1); /* we need a WARN() */
10539 break;
10543 static int tg3_set_phys_id(struct net_device *dev,
10544 enum ethtool_phys_id_state state)
10546 struct tg3 *tp = netdev_priv(dev);
10548 if (!netif_running(tp->dev))
10549 return -EAGAIN;
10551 switch (state) {
10552 case ETHTOOL_ID_ACTIVE:
10553 return 1; /* cycle on/off once per second */
10555 case ETHTOOL_ID_ON:
10556 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10557 LED_CTRL_1000MBPS_ON |
10558 LED_CTRL_100MBPS_ON |
10559 LED_CTRL_10MBPS_ON |
10560 LED_CTRL_TRAFFIC_OVERRIDE |
10561 LED_CTRL_TRAFFIC_BLINK |
10562 LED_CTRL_TRAFFIC_LED);
10563 break;
10565 case ETHTOOL_ID_OFF:
10566 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10567 LED_CTRL_TRAFFIC_OVERRIDE);
10568 break;
10570 case ETHTOOL_ID_INACTIVE:
10571 tw32(MAC_LED_CTRL, tp->led_ctrl);
10572 break;
10575 return 0;
10578 static void tg3_get_ethtool_stats(struct net_device *dev,
10579 struct ethtool_stats *estats, u64 *tmp_stats)
10581 struct tg3 *tp = netdev_priv(dev);
10582 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
10585 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
10587 int i;
10588 __be32 *buf;
10589 u32 offset = 0, len = 0;
10590 u32 magic, val;
10592 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
10593 return NULL;
10595 if (magic == TG3_EEPROM_MAGIC) {
10596 for (offset = TG3_NVM_DIR_START;
10597 offset < TG3_NVM_DIR_END;
10598 offset += TG3_NVM_DIRENT_SIZE) {
10599 if (tg3_nvram_read(tp, offset, &val))
10600 return NULL;
10602 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
10603 TG3_NVM_DIRTYPE_EXTVPD)
10604 break;
10607 if (offset != TG3_NVM_DIR_END) {
10608 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
10609 if (tg3_nvram_read(tp, offset + 4, &offset))
10610 return NULL;
10612 offset = tg3_nvram_logical_addr(tp, offset);
10616 if (!offset || !len) {
10617 offset = TG3_NVM_VPD_OFF;
10618 len = TG3_NVM_VPD_LEN;
10621 buf = kmalloc(len, GFP_KERNEL);
10622 if (buf == NULL)
10623 return NULL;
10625 if (magic == TG3_EEPROM_MAGIC) {
10626 for (i = 0; i < len; i += 4) {
10627 /* The data is in little-endian format in NVRAM.
10628 * Use the big-endian read routines to preserve
10629 * the byte order as it exists in NVRAM.
10631 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
10632 goto error;
10634 } else {
10635 u8 *ptr;
10636 ssize_t cnt;
10637 unsigned int pos = 0;
10639 ptr = (u8 *)&buf[0];
10640 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
10641 cnt = pci_read_vpd(tp->pdev, pos,
10642 len - pos, ptr);
10643 if (cnt == -ETIMEDOUT || cnt == -EINTR)
10644 cnt = 0;
10645 else if (cnt < 0)
10646 goto error;
10648 if (pos != len)
10649 goto error;
10652 *vpdlen = len;
10654 return buf;
10656 error:
10657 kfree(buf);
10658 return NULL;
10661 #define NVRAM_TEST_SIZE 0x100
10662 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
10663 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
10664 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
10665 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
10666 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
10667 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
10668 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10669 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10671 static int tg3_test_nvram(struct tg3 *tp)
10673 u32 csum, magic, len;
10674 __be32 *buf;
10675 int i, j, k, err = 0, size;
10677 if (tg3_flag(tp, NO_NVRAM))
10678 return 0;
10680 if (tg3_nvram_read(tp, 0, &magic) != 0)
10681 return -EIO;
10683 if (magic == TG3_EEPROM_MAGIC)
10684 size = NVRAM_TEST_SIZE;
10685 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10686 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10687 TG3_EEPROM_SB_FORMAT_1) {
10688 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10689 case TG3_EEPROM_SB_REVISION_0:
10690 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10691 break;
10692 case TG3_EEPROM_SB_REVISION_2:
10693 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10694 break;
10695 case TG3_EEPROM_SB_REVISION_3:
10696 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10697 break;
10698 case TG3_EEPROM_SB_REVISION_4:
10699 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
10700 break;
10701 case TG3_EEPROM_SB_REVISION_5:
10702 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
10703 break;
10704 case TG3_EEPROM_SB_REVISION_6:
10705 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
10706 break;
10707 default:
10708 return -EIO;
10710 } else
10711 return 0;
10712 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10713 size = NVRAM_SELFBOOT_HW_SIZE;
10714 else
10715 return -EIO;
10717 buf = kmalloc(size, GFP_KERNEL);
10718 if (buf == NULL)
10719 return -ENOMEM;
10721 err = -EIO;
10722 for (i = 0, j = 0; i < size; i += 4, j++) {
10723 err = tg3_nvram_read_be32(tp, i, &buf[j]);
10724 if (err)
10725 break;
10727 if (i < size)
10728 goto out;
10730 /* Selfboot format */
10731 magic = be32_to_cpu(buf[0]);
10732 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
10733 TG3_EEPROM_MAGIC_FW) {
10734 u8 *buf8 = (u8 *) buf, csum8 = 0;
10736 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
10737 TG3_EEPROM_SB_REVISION_2) {
10738 /* For rev 2, the csum doesn't include the MBA. */
10739 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
10740 csum8 += buf8[i];
10741 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
10742 csum8 += buf8[i];
10743 } else {
10744 for (i = 0; i < size; i++)
10745 csum8 += buf8[i];
10748 if (csum8 == 0) {
10749 err = 0;
10750 goto out;
10753 err = -EIO;
10754 goto out;
10757 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
10758 TG3_EEPROM_MAGIC_HW) {
10759 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
10760 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
10761 u8 *buf8 = (u8 *) buf;
10763 /* Separate the parity bits and the data bytes. */
10764 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
10765 if ((i == 0) || (i == 8)) {
10766 int l;
10767 u8 msk;
10769 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10770 parity[k++] = buf8[i] & msk;
10771 i++;
10772 } else if (i == 16) {
10773 int l;
10774 u8 msk;
10776 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
10777 parity[k++] = buf8[i] & msk;
10778 i++;
10780 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
10781 parity[k++] = buf8[i] & msk;
10782 i++;
10784 data[j++] = buf8[i];
10787 err = -EIO;
10788 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
10789 u8 hw8 = hweight8(data[i]);
10791 if ((hw8 & 0x1) && parity[i])
10792 goto out;
10793 else if (!(hw8 & 0x1) && !parity[i])
10794 goto out;
10796 err = 0;
10797 goto out;
10800 err = -EIO;
10802 /* Bootstrap checksum at offset 0x10 */
10803 csum = calc_crc((unsigned char *) buf, 0x10);
10804 if (csum != le32_to_cpu(buf[0x10/4]))
10805 goto out;
10807 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10808 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
10809 if (csum != le32_to_cpu(buf[0xfc/4]))
10810 goto out;
10812 kfree(buf);
10814 buf = tg3_vpd_readblock(tp, &len);
10815 if (!buf)
10816 return -ENOMEM;
10818 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
10819 if (i > 0) {
10820 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
10821 if (j < 0)
10822 goto out;
10824 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
10825 goto out;
10827 i += PCI_VPD_LRDT_TAG_SIZE;
10828 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
10829 PCI_VPD_RO_KEYWORD_CHKSUM);
10830 if (j > 0) {
10831 u8 csum8 = 0;
10833 j += PCI_VPD_INFO_FLD_HDR_SIZE;
10835 for (i = 0; i <= j; i++)
10836 csum8 += ((u8 *)buf)[i];
10838 if (csum8)
10839 goto out;
10843 err = 0;
10845 out:
10846 kfree(buf);
10847 return err;
10850 #define TG3_SERDES_TIMEOUT_SEC 2
10851 #define TG3_COPPER_TIMEOUT_SEC 6
10853 static int tg3_test_link(struct tg3 *tp)
10855 int i, max;
10857 if (!netif_running(tp->dev))
10858 return -ENODEV;
10860 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
10861 max = TG3_SERDES_TIMEOUT_SEC;
10862 else
10863 max = TG3_COPPER_TIMEOUT_SEC;
10865 for (i = 0; i < max; i++) {
10866 if (netif_carrier_ok(tp->dev))
10867 return 0;
10869 if (msleep_interruptible(1000))
10870 break;
10873 return -EIO;
10876 /* Only test the commonly used registers */
10877 static int tg3_test_registers(struct tg3 *tp)
10879 int i, is_5705, is_5750;
10880 u32 offset, read_mask, write_mask, val, save_val, read_val;
10881 static struct {
10882 u16 offset;
10883 u16 flags;
10884 #define TG3_FL_5705 0x1
10885 #define TG3_FL_NOT_5705 0x2
10886 #define TG3_FL_NOT_5788 0x4
10887 #define TG3_FL_NOT_5750 0x8
10888 u32 read_mask;
10889 u32 write_mask;
10890 } reg_tbl[] = {
10891 /* MAC Control Registers */
10892 { MAC_MODE, TG3_FL_NOT_5705,
10893 0x00000000, 0x00ef6f8c },
10894 { MAC_MODE, TG3_FL_5705,
10895 0x00000000, 0x01ef6b8c },
10896 { MAC_STATUS, TG3_FL_NOT_5705,
10897 0x03800107, 0x00000000 },
10898 { MAC_STATUS, TG3_FL_5705,
10899 0x03800100, 0x00000000 },
10900 { MAC_ADDR_0_HIGH, 0x0000,
10901 0x00000000, 0x0000ffff },
10902 { MAC_ADDR_0_LOW, 0x0000,
10903 0x00000000, 0xffffffff },
10904 { MAC_RX_MTU_SIZE, 0x0000,
10905 0x00000000, 0x0000ffff },
10906 { MAC_TX_MODE, 0x0000,
10907 0x00000000, 0x00000070 },
10908 { MAC_TX_LENGTHS, 0x0000,
10909 0x00000000, 0x00003fff },
10910 { MAC_RX_MODE, TG3_FL_NOT_5705,
10911 0x00000000, 0x000007fc },
10912 { MAC_RX_MODE, TG3_FL_5705,
10913 0x00000000, 0x000007dc },
10914 { MAC_HASH_REG_0, 0x0000,
10915 0x00000000, 0xffffffff },
10916 { MAC_HASH_REG_1, 0x0000,
10917 0x00000000, 0xffffffff },
10918 { MAC_HASH_REG_2, 0x0000,
10919 0x00000000, 0xffffffff },
10920 { MAC_HASH_REG_3, 0x0000,
10921 0x00000000, 0xffffffff },
10923 /* Receive Data and Receive BD Initiator Control Registers. */
10924 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
10925 0x00000000, 0xffffffff },
10926 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
10927 0x00000000, 0xffffffff },
10928 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
10929 0x00000000, 0x00000003 },
10930 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
10931 0x00000000, 0xffffffff },
10932 { RCVDBDI_STD_BD+0, 0x0000,
10933 0x00000000, 0xffffffff },
10934 { RCVDBDI_STD_BD+4, 0x0000,
10935 0x00000000, 0xffffffff },
10936 { RCVDBDI_STD_BD+8, 0x0000,
10937 0x00000000, 0xffff0002 },
10938 { RCVDBDI_STD_BD+0xc, 0x0000,
10939 0x00000000, 0xffffffff },
10941 /* Receive BD Initiator Control Registers. */
10942 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
10943 0x00000000, 0xffffffff },
10944 { RCVBDI_STD_THRESH, TG3_FL_5705,
10945 0x00000000, 0x000003ff },
10946 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
10947 0x00000000, 0xffffffff },
10949 /* Host Coalescing Control Registers. */
10950 { HOSTCC_MODE, TG3_FL_NOT_5705,
10951 0x00000000, 0x00000004 },
10952 { HOSTCC_MODE, TG3_FL_5705,
10953 0x00000000, 0x000000f6 },
10954 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
10955 0x00000000, 0xffffffff },
10956 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
10957 0x00000000, 0x000003ff },
10958 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
10959 0x00000000, 0xffffffff },
10960 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
10961 0x00000000, 0x000003ff },
10962 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
10963 0x00000000, 0xffffffff },
10964 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10965 0x00000000, 0x000000ff },
10966 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
10967 0x00000000, 0xffffffff },
10968 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10969 0x00000000, 0x000000ff },
10970 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
10971 0x00000000, 0xffffffff },
10972 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
10973 0x00000000, 0xffffffff },
10974 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10975 0x00000000, 0xffffffff },
10976 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10977 0x00000000, 0x000000ff },
10978 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10979 0x00000000, 0xffffffff },
10980 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10981 0x00000000, 0x000000ff },
10982 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
10983 0x00000000, 0xffffffff },
10984 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
10985 0x00000000, 0xffffffff },
10986 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
10987 0x00000000, 0xffffffff },
10988 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
10989 0x00000000, 0xffffffff },
10990 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
10991 0x00000000, 0xffffffff },
10992 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
10993 0xffffffff, 0x00000000 },
10994 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
10995 0xffffffff, 0x00000000 },
10997 /* Buffer Manager Control Registers. */
10998 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
10999 0x00000000, 0x007fff80 },
11000 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
11001 0x00000000, 0x007fffff },
11002 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11003 0x00000000, 0x0000003f },
11004 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11005 0x00000000, 0x000001ff },
11006 { BUFMGR_MB_HIGH_WATER, 0x0000,
11007 0x00000000, 0x000001ff },
11008 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11009 0xffffffff, 0x00000000 },
11010 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11011 0xffffffff, 0x00000000 },
11013 /* Mailbox Registers */
11014 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11015 0x00000000, 0x000001ff },
11016 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11017 0x00000000, 0x000001ff },
11018 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11019 0x00000000, 0x000007ff },
11020 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11021 0x00000000, 0x000001ff },
11023 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11026 is_5705 = is_5750 = 0;
11027 if (tg3_flag(tp, 5705_PLUS)) {
11028 is_5705 = 1;
11029 if (tg3_flag(tp, 5750_PLUS))
11030 is_5750 = 1;
11033 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11034 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11035 continue;
11037 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11038 continue;
11040 if (tg3_flag(tp, IS_5788) &&
11041 (reg_tbl[i].flags & TG3_FL_NOT_5788))
11042 continue;
11044 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11045 continue;
11047 offset = (u32) reg_tbl[i].offset;
11048 read_mask = reg_tbl[i].read_mask;
11049 write_mask = reg_tbl[i].write_mask;
11051 /* Save the original register content */
11052 save_val = tr32(offset);
11054 /* Determine the read-only value. */
11055 read_val = save_val & read_mask;
11057 /* Write zero to the register, then make sure the read-only bits
11058 * are not changed and the read/write bits are all zeros.
11060 tw32(offset, 0);
11062 val = tr32(offset);
11064 /* Test the read-only and read/write bits. */
11065 if (((val & read_mask) != read_val) || (val & write_mask))
11066 goto out;
11068 /* Write ones to all the bits defined by RdMask and WrMask, then
11069 * make sure the read-only bits are not changed and the
11070 * read/write bits are all ones.
11072 tw32(offset, read_mask | write_mask);
11074 val = tr32(offset);
11076 /* Test the read-only bits. */
11077 if ((val & read_mask) != read_val)
11078 goto out;
11080 /* Test the read/write bits. */
11081 if ((val & write_mask) != write_mask)
11082 goto out;
11084 tw32(offset, save_val);
11087 return 0;
11089 out:
11090 if (netif_msg_hw(tp))
11091 netdev_err(tp->dev,
11092 "Register test failed at offset %x\n", offset);
11093 tw32(offset, save_val);
11094 return -EIO;
11097 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11099 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11100 int i;
11101 u32 j;
11103 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11104 for (j = 0; j < len; j += 4) {
11105 u32 val;
11107 tg3_write_mem(tp, offset + j, test_pattern[i]);
11108 tg3_read_mem(tp, offset + j, &val);
11109 if (val != test_pattern[i])
11110 return -EIO;
11113 return 0;
11116 static int tg3_test_memory(struct tg3 *tp)
11118 static struct mem_entry {
11119 u32 offset;
11120 u32 len;
11121 } mem_tbl_570x[] = {
11122 { 0x00000000, 0x00b50},
11123 { 0x00002000, 0x1c000},
11124 { 0xffffffff, 0x00000}
11125 }, mem_tbl_5705[] = {
11126 { 0x00000100, 0x0000c},
11127 { 0x00000200, 0x00008},
11128 { 0x00004000, 0x00800},
11129 { 0x00006000, 0x01000},
11130 { 0x00008000, 0x02000},
11131 { 0x00010000, 0x0e000},
11132 { 0xffffffff, 0x00000}
11133 }, mem_tbl_5755[] = {
11134 { 0x00000200, 0x00008},
11135 { 0x00004000, 0x00800},
11136 { 0x00006000, 0x00800},
11137 { 0x00008000, 0x02000},
11138 { 0x00010000, 0x0c000},
11139 { 0xffffffff, 0x00000}
11140 }, mem_tbl_5906[] = {
11141 { 0x00000200, 0x00008},
11142 { 0x00004000, 0x00400},
11143 { 0x00006000, 0x00400},
11144 { 0x00008000, 0x01000},
11145 { 0x00010000, 0x01000},
11146 { 0xffffffff, 0x00000}
11147 }, mem_tbl_5717[] = {
11148 { 0x00000200, 0x00008},
11149 { 0x00010000, 0x0a000},
11150 { 0x00020000, 0x13c00},
11151 { 0xffffffff, 0x00000}
11152 }, mem_tbl_57765[] = {
11153 { 0x00000200, 0x00008},
11154 { 0x00004000, 0x00800},
11155 { 0x00006000, 0x09800},
11156 { 0x00010000, 0x0a000},
11157 { 0xffffffff, 0x00000}
11159 struct mem_entry *mem_tbl;
11160 int err = 0;
11161 int i;
11163 if (tg3_flag(tp, 5717_PLUS))
11164 mem_tbl = mem_tbl_5717;
11165 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
11166 mem_tbl = mem_tbl_57765;
11167 else if (tg3_flag(tp, 5755_PLUS))
11168 mem_tbl = mem_tbl_5755;
11169 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11170 mem_tbl = mem_tbl_5906;
11171 else if (tg3_flag(tp, 5705_PLUS))
11172 mem_tbl = mem_tbl_5705;
11173 else
11174 mem_tbl = mem_tbl_570x;
11176 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11177 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11178 if (err)
11179 break;
11182 return err;
11185 #define TG3_MAC_LOOPBACK 0
11186 #define TG3_PHY_LOOPBACK 1
11187 #define TG3_TSO_LOOPBACK 2
11189 #define TG3_TSO_MSS 500
11191 #define TG3_TSO_IP_HDR_LEN 20
11192 #define TG3_TSO_TCP_HDR_LEN 20
11193 #define TG3_TSO_TCP_OPT_LEN 12
11195 static const u8 tg3_tso_header[] = {
11196 0x08, 0x00,
11197 0x45, 0x00, 0x00, 0x00,
11198 0x00, 0x00, 0x40, 0x00,
11199 0x40, 0x06, 0x00, 0x00,
11200 0x0a, 0x00, 0x00, 0x01,
11201 0x0a, 0x00, 0x00, 0x02,
11202 0x0d, 0x00, 0xe0, 0x00,
11203 0x00, 0x00, 0x01, 0x00,
11204 0x00, 0x00, 0x02, 0x00,
11205 0x80, 0x10, 0x10, 0x00,
11206 0x14, 0x09, 0x00, 0x00,
11207 0x01, 0x01, 0x08, 0x0a,
11208 0x11, 0x11, 0x11, 0x11,
11209 0x11, 0x11, 0x11, 0x11,
11212 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, int loopback_mode)
11214 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
11215 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11216 u32 budget;
11217 struct sk_buff *skb, *rx_skb;
11218 u8 *tx_data;
11219 dma_addr_t map;
11220 int num_pkts, tx_len, rx_len, i, err;
11221 struct tg3_rx_buffer_desc *desc;
11222 struct tg3_napi *tnapi, *rnapi;
11223 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11225 tnapi = &tp->napi[0];
11226 rnapi = &tp->napi[0];
11227 if (tp->irq_cnt > 1) {
11228 if (tg3_flag(tp, ENABLE_RSS))
11229 rnapi = &tp->napi[1];
11230 if (tg3_flag(tp, ENABLE_TSS))
11231 tnapi = &tp->napi[1];
11233 coal_now = tnapi->coal_now | rnapi->coal_now;
11235 if (loopback_mode == TG3_MAC_LOOPBACK) {
11236 /* HW errata - mac loopback fails in some cases on 5780.
11237 * Normal traffic and PHY loopback are not affected by
11238 * errata. Also, the MAC loopback test is deprecated for
11239 * all newer ASIC revisions.
11241 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11242 tg3_flag(tp, CPMU_PRESENT))
11243 return 0;
11245 mac_mode = tp->mac_mode &
11246 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11247 mac_mode |= MAC_MODE_PORT_INT_LPBACK;
11248 if (!tg3_flag(tp, 5705_PLUS))
11249 mac_mode |= MAC_MODE_LINK_POLARITY;
11250 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
11251 mac_mode |= MAC_MODE_PORT_MODE_MII;
11252 else
11253 mac_mode |= MAC_MODE_PORT_MODE_GMII;
11254 tw32(MAC_MODE, mac_mode);
11255 } else {
11256 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11257 tg3_phy_fet_toggle_apd(tp, false);
11258 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
11259 } else
11260 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
11262 tg3_phy_toggle_automdix(tp, 0);
11264 tg3_writephy(tp, MII_BMCR, val);
11265 udelay(40);
11267 mac_mode = tp->mac_mode &
11268 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11269 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11270 tg3_writephy(tp, MII_TG3_FET_PTEST,
11271 MII_TG3_FET_PTEST_FRC_TX_LINK |
11272 MII_TG3_FET_PTEST_FRC_TX_LOCK);
11273 /* The write needs to be flushed for the AC131 */
11274 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11275 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
11276 mac_mode |= MAC_MODE_PORT_MODE_MII;
11277 } else
11278 mac_mode |= MAC_MODE_PORT_MODE_GMII;
11280 /* reset to prevent losing 1st rx packet intermittently */
11281 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
11282 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
11283 udelay(10);
11284 tw32_f(MAC_RX_MODE, tp->rx_mode);
11286 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
11287 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
11288 if (masked_phy_id == TG3_PHY_ID_BCM5401)
11289 mac_mode &= ~MAC_MODE_LINK_POLARITY;
11290 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
11291 mac_mode |= MAC_MODE_LINK_POLARITY;
11292 tg3_writephy(tp, MII_TG3_EXT_CTRL,
11293 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
11295 tw32(MAC_MODE, mac_mode);
11297 /* Wait for link */
11298 for (i = 0; i < 100; i++) {
11299 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11300 break;
11301 mdelay(1);
11305 err = -EIO;
11307 tx_len = pktsz;
11308 skb = netdev_alloc_skb(tp->dev, tx_len);
11309 if (!skb)
11310 return -ENOMEM;
11312 tx_data = skb_put(skb, tx_len);
11313 memcpy(tx_data, tp->dev->dev_addr, 6);
11314 memset(tx_data + 6, 0x0, 8);
11316 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11318 if (loopback_mode == TG3_TSO_LOOPBACK) {
11319 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11321 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11322 TG3_TSO_TCP_OPT_LEN;
11324 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11325 sizeof(tg3_tso_header));
11326 mss = TG3_TSO_MSS;
11328 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11329 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11331 /* Set the total length field in the IP header */
11332 iph->tot_len = htons((u16)(mss + hdr_len));
11334 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11335 TXD_FLAG_CPU_POST_DMA);
11337 if (tg3_flag(tp, HW_TSO_1) ||
11338 tg3_flag(tp, HW_TSO_2) ||
11339 tg3_flag(tp, HW_TSO_3)) {
11340 struct tcphdr *th;
11341 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11342 th = (struct tcphdr *)&tx_data[val];
11343 th->check = 0;
11344 } else
11345 base_flags |= TXD_FLAG_TCPUDP_CSUM;
11347 if (tg3_flag(tp, HW_TSO_3)) {
11348 mss |= (hdr_len & 0xc) << 12;
11349 if (hdr_len & 0x10)
11350 base_flags |= 0x00000010;
11351 base_flags |= (hdr_len & 0x3e0) << 5;
11352 } else if (tg3_flag(tp, HW_TSO_2))
11353 mss |= hdr_len << 9;
11354 else if (tg3_flag(tp, HW_TSO_1) ||
11355 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11356 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11357 } else {
11358 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11361 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11362 } else {
11363 num_pkts = 1;
11364 data_off = ETH_HLEN;
11367 for (i = data_off; i < tx_len; i++)
11368 tx_data[i] = (u8) (i & 0xff);
11370 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11371 if (pci_dma_mapping_error(tp->pdev, map)) {
11372 dev_kfree_skb(skb);
11373 return -EIO;
11376 val = tnapi->tx_prod;
11377 tnapi->tx_buffers[val].skb = skb;
11378 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
11380 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11381 rnapi->coal_now);
11383 udelay(10);
11385 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11387 budget = tg3_tx_avail(tnapi);
11388 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
11389 base_flags | TXD_FLAG_END, mss, 0)) {
11390 tnapi->tx_buffers[val].skb = NULL;
11391 dev_kfree_skb(skb);
11392 return -EIO;
11395 tnapi->tx_prod++;
11397 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11398 tr32_mailbox(tnapi->prodmbox);
11400 udelay(10);
11402 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
11403 for (i = 0; i < 35; i++) {
11404 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11405 coal_now);
11407 udelay(10);
11409 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11410 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11411 if ((tx_idx == tnapi->tx_prod) &&
11412 (rx_idx == (rx_start_idx + num_pkts)))
11413 break;
11416 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, 0);
11417 dev_kfree_skb(skb);
11419 if (tx_idx != tnapi->tx_prod)
11420 goto out;
11422 if (rx_idx != rx_start_idx + num_pkts)
11423 goto out;
11425 val = data_off;
11426 while (rx_idx != rx_start_idx) {
11427 desc = &rnapi->rx_rcb[rx_start_idx++];
11428 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11429 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11431 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11432 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11433 goto out;
11435 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11436 - ETH_FCS_LEN;
11438 if (loopback_mode != TG3_TSO_LOOPBACK) {
11439 if (rx_len != tx_len)
11440 goto out;
11442 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11443 if (opaque_key != RXD_OPAQUE_RING_STD)
11444 goto out;
11445 } else {
11446 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11447 goto out;
11449 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11450 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11451 >> RXD_TCPCSUM_SHIFT != 0xffff) {
11452 goto out;
11455 if (opaque_key == RXD_OPAQUE_RING_STD) {
11456 rx_skb = tpr->rx_std_buffers[desc_idx].skb;
11457 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11458 mapping);
11459 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11460 rx_skb = tpr->rx_jmb_buffers[desc_idx].skb;
11461 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11462 mapping);
11463 } else
11464 goto out;
11466 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11467 PCI_DMA_FROMDEVICE);
11469 for (i = data_off; i < rx_len; i++, val++) {
11470 if (*(rx_skb->data + i) != (u8) (val & 0xff))
11471 goto out;
11475 err = 0;
11477 /* tg3_free_rings will unmap and free the rx_skb */
11478 out:
11479 return err;
11482 #define TG3_STD_LOOPBACK_FAILED 1
11483 #define TG3_JMB_LOOPBACK_FAILED 2
11484 #define TG3_TSO_LOOPBACK_FAILED 4
11486 #define TG3_MAC_LOOPBACK_SHIFT 0
11487 #define TG3_PHY_LOOPBACK_SHIFT 4
11488 #define TG3_LOOPBACK_FAILED 0x00000077
11490 static int tg3_test_loopback(struct tg3 *tp)
11492 int err = 0;
11493 u32 eee_cap, cpmuctrl = 0;
11495 if (!netif_running(tp->dev))
11496 return TG3_LOOPBACK_FAILED;
11498 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11499 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11501 err = tg3_reset_hw(tp, 1);
11502 if (err) {
11503 err = TG3_LOOPBACK_FAILED;
11504 goto done;
11507 if (tg3_flag(tp, ENABLE_RSS)) {
11508 int i;
11510 /* Reroute all rx packets to the 1st queue */
11511 for (i = MAC_RSS_INDIR_TBL_0;
11512 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11513 tw32(i, 0x0);
11516 /* Turn off gphy autopowerdown. */
11517 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11518 tg3_phy_toggle_apd(tp, false);
11520 if (tg3_flag(tp, CPMU_PRESENT)) {
11521 int i;
11522 u32 status;
11524 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
11526 /* Wait for up to 40 microseconds to acquire lock. */
11527 for (i = 0; i < 4; i++) {
11528 status = tr32(TG3_CPMU_MUTEX_GNT);
11529 if (status == CPMU_MUTEX_GNT_DRIVER)
11530 break;
11531 udelay(10);
11534 if (status != CPMU_MUTEX_GNT_DRIVER) {
11535 err = TG3_LOOPBACK_FAILED;
11536 goto done;
11539 /* Turn off link-based power management. */
11540 cpmuctrl = tr32(TG3_CPMU_CTRL);
11541 tw32(TG3_CPMU_CTRL,
11542 cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
11543 CPMU_CTRL_LINK_AWARE_MODE));
11546 if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_MAC_LOOPBACK))
11547 err |= TG3_STD_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11549 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11550 tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_MAC_LOOPBACK))
11551 err |= TG3_JMB_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11553 if (tg3_flag(tp, CPMU_PRESENT)) {
11554 tw32(TG3_CPMU_CTRL, cpmuctrl);
11556 /* Release the mutex */
11557 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
11560 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11561 !tg3_flag(tp, USE_PHYLIB)) {
11562 if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_PHY_LOOPBACK))
11563 err |= TG3_STD_LOOPBACK_FAILED <<
11564 TG3_PHY_LOOPBACK_SHIFT;
11565 if (tg3_flag(tp, TSO_CAPABLE) &&
11566 tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_TSO_LOOPBACK))
11567 err |= TG3_TSO_LOOPBACK_FAILED <<
11568 TG3_PHY_LOOPBACK_SHIFT;
11569 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11570 tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_PHY_LOOPBACK))
11571 err |= TG3_JMB_LOOPBACK_FAILED <<
11572 TG3_PHY_LOOPBACK_SHIFT;
11575 /* Re-enable gphy autopowerdown. */
11576 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11577 tg3_phy_toggle_apd(tp, true);
11579 done:
11580 tp->phy_flags |= eee_cap;
11582 return err;
11585 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11586 u64 *data)
11588 struct tg3 *tp = netdev_priv(dev);
11590 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
11591 tg3_power_up(tp)) {
11592 etest->flags |= ETH_TEST_FL_FAILED;
11593 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
11594 return;
11597 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11599 if (tg3_test_nvram(tp) != 0) {
11600 etest->flags |= ETH_TEST_FL_FAILED;
11601 data[0] = 1;
11603 if (tg3_test_link(tp) != 0) {
11604 etest->flags |= ETH_TEST_FL_FAILED;
11605 data[1] = 1;
11607 if (etest->flags & ETH_TEST_FL_OFFLINE) {
11608 int err, err2 = 0, irq_sync = 0;
11610 if (netif_running(dev)) {
11611 tg3_phy_stop(tp);
11612 tg3_netif_stop(tp);
11613 irq_sync = 1;
11616 tg3_full_lock(tp, irq_sync);
11618 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
11619 err = tg3_nvram_lock(tp);
11620 tg3_halt_cpu(tp, RX_CPU_BASE);
11621 if (!tg3_flag(tp, 5705_PLUS))
11622 tg3_halt_cpu(tp, TX_CPU_BASE);
11623 if (!err)
11624 tg3_nvram_unlock(tp);
11626 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
11627 tg3_phy_reset(tp);
11629 if (tg3_test_registers(tp) != 0) {
11630 etest->flags |= ETH_TEST_FL_FAILED;
11631 data[2] = 1;
11633 if (tg3_test_memory(tp) != 0) {
11634 etest->flags |= ETH_TEST_FL_FAILED;
11635 data[3] = 1;
11637 if ((data[4] = tg3_test_loopback(tp)) != 0)
11638 etest->flags |= ETH_TEST_FL_FAILED;
11640 tg3_full_unlock(tp);
11642 if (tg3_test_interrupt(tp) != 0) {
11643 etest->flags |= ETH_TEST_FL_FAILED;
11644 data[5] = 1;
11647 tg3_full_lock(tp, 0);
11649 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11650 if (netif_running(dev)) {
11651 tg3_flag_set(tp, INIT_COMPLETE);
11652 err2 = tg3_restart_hw(tp, 1);
11653 if (!err2)
11654 tg3_netif_start(tp);
11657 tg3_full_unlock(tp);
11659 if (irq_sync && !err2)
11660 tg3_phy_start(tp);
11662 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11663 tg3_power_down(tp);
11667 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11669 struct mii_ioctl_data *data = if_mii(ifr);
11670 struct tg3 *tp = netdev_priv(dev);
11671 int err;
11673 if (tg3_flag(tp, USE_PHYLIB)) {
11674 struct phy_device *phydev;
11675 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11676 return -EAGAIN;
11677 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11678 return phy_mii_ioctl(phydev, ifr, cmd);
11681 switch (cmd) {
11682 case SIOCGMIIPHY:
11683 data->phy_id = tp->phy_addr;
11685 /* fallthru */
11686 case SIOCGMIIREG: {
11687 u32 mii_regval;
11689 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11690 break; /* We have no PHY */
11692 if (!netif_running(dev))
11693 return -EAGAIN;
11695 spin_lock_bh(&tp->lock);
11696 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
11697 spin_unlock_bh(&tp->lock);
11699 data->val_out = mii_regval;
11701 return err;
11704 case SIOCSMIIREG:
11705 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11706 break; /* We have no PHY */
11708 if (!netif_running(dev))
11709 return -EAGAIN;
11711 spin_lock_bh(&tp->lock);
11712 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
11713 spin_unlock_bh(&tp->lock);
11715 return err;
11717 default:
11718 /* do nothing */
11719 break;
11721 return -EOPNOTSUPP;
11724 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11726 struct tg3 *tp = netdev_priv(dev);
11728 memcpy(ec, &tp->coal, sizeof(*ec));
11729 return 0;
11732 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11734 struct tg3 *tp = netdev_priv(dev);
11735 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11736 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11738 if (!tg3_flag(tp, 5705_PLUS)) {
11739 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11740 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11741 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11742 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11745 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11746 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11747 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11748 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11749 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11750 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11751 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11752 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11753 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11754 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11755 return -EINVAL;
11757 /* No rx interrupts will be generated if both are zero */
11758 if ((ec->rx_coalesce_usecs == 0) &&
11759 (ec->rx_max_coalesced_frames == 0))
11760 return -EINVAL;
11762 /* No tx interrupts will be generated if both are zero */
11763 if ((ec->tx_coalesce_usecs == 0) &&
11764 (ec->tx_max_coalesced_frames == 0))
11765 return -EINVAL;
11767 /* Only copy relevant parameters, ignore all others. */
11768 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11769 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11770 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11771 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11772 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11773 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
11774 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
11775 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
11776 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
11778 if (netif_running(dev)) {
11779 tg3_full_lock(tp, 0);
11780 __tg3_set_coalesce(tp, &tp->coal);
11781 tg3_full_unlock(tp);
11783 return 0;
11786 static const struct ethtool_ops tg3_ethtool_ops = {
11787 .get_settings = tg3_get_settings,
11788 .set_settings = tg3_set_settings,
11789 .get_drvinfo = tg3_get_drvinfo,
11790 .get_regs_len = tg3_get_regs_len,
11791 .get_regs = tg3_get_regs,
11792 .get_wol = tg3_get_wol,
11793 .set_wol = tg3_set_wol,
11794 .get_msglevel = tg3_get_msglevel,
11795 .set_msglevel = tg3_set_msglevel,
11796 .nway_reset = tg3_nway_reset,
11797 .get_link = ethtool_op_get_link,
11798 .get_eeprom_len = tg3_get_eeprom_len,
11799 .get_eeprom = tg3_get_eeprom,
11800 .set_eeprom = tg3_set_eeprom,
11801 .get_ringparam = tg3_get_ringparam,
11802 .set_ringparam = tg3_set_ringparam,
11803 .get_pauseparam = tg3_get_pauseparam,
11804 .set_pauseparam = tg3_set_pauseparam,
11805 .self_test = tg3_self_test,
11806 .get_strings = tg3_get_strings,
11807 .set_phys_id = tg3_set_phys_id,
11808 .get_ethtool_stats = tg3_get_ethtool_stats,
11809 .get_coalesce = tg3_get_coalesce,
11810 .set_coalesce = tg3_set_coalesce,
11811 .get_sset_count = tg3_get_sset_count,
11814 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
11816 u32 cursize, val, magic;
11818 tp->nvram_size = EEPROM_CHIP_SIZE;
11820 if (tg3_nvram_read(tp, 0, &magic) != 0)
11821 return;
11823 if ((magic != TG3_EEPROM_MAGIC) &&
11824 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
11825 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
11826 return;
11829 * Size the chip by reading offsets at increasing powers of two.
11830 * When we encounter our validation signature, we know the addressing
11831 * has wrapped around, and thus have our chip size.
11833 cursize = 0x10;
11835 while (cursize < tp->nvram_size) {
11836 if (tg3_nvram_read(tp, cursize, &val) != 0)
11837 return;
11839 if (val == magic)
11840 break;
11842 cursize <<= 1;
11845 tp->nvram_size = cursize;
11848 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
11850 u32 val;
11852 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
11853 return;
11855 /* Selfboot format */
11856 if (val != TG3_EEPROM_MAGIC) {
11857 tg3_get_eeprom_size(tp);
11858 return;
11861 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
11862 if (val != 0) {
11863 /* This is confusing. We want to operate on the
11864 * 16-bit value at offset 0xf2. The tg3_nvram_read()
11865 * call will read from NVRAM and byteswap the data
11866 * according to the byteswapping settings for all
11867 * other register accesses. This ensures the data we
11868 * want will always reside in the lower 16-bits.
11869 * However, the data in NVRAM is in LE format, which
11870 * means the data from the NVRAM read will always be
11871 * opposite the endianness of the CPU. The 16-bit
11872 * byteswap then brings the data to CPU endianness.
11874 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
11875 return;
11878 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11881 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
11883 u32 nvcfg1;
11885 nvcfg1 = tr32(NVRAM_CFG1);
11886 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
11887 tg3_flag_set(tp, FLASH);
11888 } else {
11889 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11890 tw32(NVRAM_CFG1, nvcfg1);
11893 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11894 tg3_flag(tp, 5780_CLASS)) {
11895 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
11896 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
11897 tp->nvram_jedecnum = JEDEC_ATMEL;
11898 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11899 tg3_flag_set(tp, NVRAM_BUFFERED);
11900 break;
11901 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
11902 tp->nvram_jedecnum = JEDEC_ATMEL;
11903 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
11904 break;
11905 case FLASH_VENDOR_ATMEL_EEPROM:
11906 tp->nvram_jedecnum = JEDEC_ATMEL;
11907 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11908 tg3_flag_set(tp, NVRAM_BUFFERED);
11909 break;
11910 case FLASH_VENDOR_ST:
11911 tp->nvram_jedecnum = JEDEC_ST;
11912 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
11913 tg3_flag_set(tp, NVRAM_BUFFERED);
11914 break;
11915 case FLASH_VENDOR_SAIFUN:
11916 tp->nvram_jedecnum = JEDEC_SAIFUN;
11917 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
11918 break;
11919 case FLASH_VENDOR_SST_SMALL:
11920 case FLASH_VENDOR_SST_LARGE:
11921 tp->nvram_jedecnum = JEDEC_SST;
11922 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
11923 break;
11925 } else {
11926 tp->nvram_jedecnum = JEDEC_ATMEL;
11927 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11928 tg3_flag_set(tp, NVRAM_BUFFERED);
11932 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
11934 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
11935 case FLASH_5752PAGE_SIZE_256:
11936 tp->nvram_pagesize = 256;
11937 break;
11938 case FLASH_5752PAGE_SIZE_512:
11939 tp->nvram_pagesize = 512;
11940 break;
11941 case FLASH_5752PAGE_SIZE_1K:
11942 tp->nvram_pagesize = 1024;
11943 break;
11944 case FLASH_5752PAGE_SIZE_2K:
11945 tp->nvram_pagesize = 2048;
11946 break;
11947 case FLASH_5752PAGE_SIZE_4K:
11948 tp->nvram_pagesize = 4096;
11949 break;
11950 case FLASH_5752PAGE_SIZE_264:
11951 tp->nvram_pagesize = 264;
11952 break;
11953 case FLASH_5752PAGE_SIZE_528:
11954 tp->nvram_pagesize = 528;
11955 break;
11959 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
11961 u32 nvcfg1;
11963 nvcfg1 = tr32(NVRAM_CFG1);
11965 /* NVRAM protection for TPM */
11966 if (nvcfg1 & (1 << 27))
11967 tg3_flag_set(tp, PROTECTED_NVRAM);
11969 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11970 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
11971 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
11972 tp->nvram_jedecnum = JEDEC_ATMEL;
11973 tg3_flag_set(tp, NVRAM_BUFFERED);
11974 break;
11975 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11976 tp->nvram_jedecnum = JEDEC_ATMEL;
11977 tg3_flag_set(tp, NVRAM_BUFFERED);
11978 tg3_flag_set(tp, FLASH);
11979 break;
11980 case FLASH_5752VENDOR_ST_M45PE10:
11981 case FLASH_5752VENDOR_ST_M45PE20:
11982 case FLASH_5752VENDOR_ST_M45PE40:
11983 tp->nvram_jedecnum = JEDEC_ST;
11984 tg3_flag_set(tp, NVRAM_BUFFERED);
11985 tg3_flag_set(tp, FLASH);
11986 break;
11989 if (tg3_flag(tp, FLASH)) {
11990 tg3_nvram_get_pagesize(tp, nvcfg1);
11991 } else {
11992 /* For eeprom, set pagesize to maximum eeprom size */
11993 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11995 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11996 tw32(NVRAM_CFG1, nvcfg1);
12000 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
12002 u32 nvcfg1, protect = 0;
12004 nvcfg1 = tr32(NVRAM_CFG1);
12006 /* NVRAM protection for TPM */
12007 if (nvcfg1 & (1 << 27)) {
12008 tg3_flag_set(tp, PROTECTED_NVRAM);
12009 protect = 1;
12012 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12013 switch (nvcfg1) {
12014 case FLASH_5755VENDOR_ATMEL_FLASH_1:
12015 case FLASH_5755VENDOR_ATMEL_FLASH_2:
12016 case FLASH_5755VENDOR_ATMEL_FLASH_3:
12017 case FLASH_5755VENDOR_ATMEL_FLASH_5:
12018 tp->nvram_jedecnum = JEDEC_ATMEL;
12019 tg3_flag_set(tp, NVRAM_BUFFERED);
12020 tg3_flag_set(tp, FLASH);
12021 tp->nvram_pagesize = 264;
12022 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
12023 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
12024 tp->nvram_size = (protect ? 0x3e200 :
12025 TG3_NVRAM_SIZE_512KB);
12026 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
12027 tp->nvram_size = (protect ? 0x1f200 :
12028 TG3_NVRAM_SIZE_256KB);
12029 else
12030 tp->nvram_size = (protect ? 0x1f200 :
12031 TG3_NVRAM_SIZE_128KB);
12032 break;
12033 case FLASH_5752VENDOR_ST_M45PE10:
12034 case FLASH_5752VENDOR_ST_M45PE20:
12035 case FLASH_5752VENDOR_ST_M45PE40:
12036 tp->nvram_jedecnum = JEDEC_ST;
12037 tg3_flag_set(tp, NVRAM_BUFFERED);
12038 tg3_flag_set(tp, FLASH);
12039 tp->nvram_pagesize = 256;
12040 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
12041 tp->nvram_size = (protect ?
12042 TG3_NVRAM_SIZE_64KB :
12043 TG3_NVRAM_SIZE_128KB);
12044 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
12045 tp->nvram_size = (protect ?
12046 TG3_NVRAM_SIZE_64KB :
12047 TG3_NVRAM_SIZE_256KB);
12048 else
12049 tp->nvram_size = (protect ?
12050 TG3_NVRAM_SIZE_128KB :
12051 TG3_NVRAM_SIZE_512KB);
12052 break;
12056 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
12058 u32 nvcfg1;
12060 nvcfg1 = tr32(NVRAM_CFG1);
12062 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12063 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
12064 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12065 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
12066 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12067 tp->nvram_jedecnum = JEDEC_ATMEL;
12068 tg3_flag_set(tp, NVRAM_BUFFERED);
12069 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12071 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12072 tw32(NVRAM_CFG1, nvcfg1);
12073 break;
12074 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12075 case FLASH_5755VENDOR_ATMEL_FLASH_1:
12076 case FLASH_5755VENDOR_ATMEL_FLASH_2:
12077 case FLASH_5755VENDOR_ATMEL_FLASH_3:
12078 tp->nvram_jedecnum = JEDEC_ATMEL;
12079 tg3_flag_set(tp, NVRAM_BUFFERED);
12080 tg3_flag_set(tp, FLASH);
12081 tp->nvram_pagesize = 264;
12082 break;
12083 case FLASH_5752VENDOR_ST_M45PE10:
12084 case FLASH_5752VENDOR_ST_M45PE20:
12085 case FLASH_5752VENDOR_ST_M45PE40:
12086 tp->nvram_jedecnum = JEDEC_ST;
12087 tg3_flag_set(tp, NVRAM_BUFFERED);
12088 tg3_flag_set(tp, FLASH);
12089 tp->nvram_pagesize = 256;
12090 break;
12094 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
12096 u32 nvcfg1, protect = 0;
12098 nvcfg1 = tr32(NVRAM_CFG1);
12100 /* NVRAM protection for TPM */
12101 if (nvcfg1 & (1 << 27)) {
12102 tg3_flag_set(tp, PROTECTED_NVRAM);
12103 protect = 1;
12106 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12107 switch (nvcfg1) {
12108 case FLASH_5761VENDOR_ATMEL_ADB021D:
12109 case FLASH_5761VENDOR_ATMEL_ADB041D:
12110 case FLASH_5761VENDOR_ATMEL_ADB081D:
12111 case FLASH_5761VENDOR_ATMEL_ADB161D:
12112 case FLASH_5761VENDOR_ATMEL_MDB021D:
12113 case FLASH_5761VENDOR_ATMEL_MDB041D:
12114 case FLASH_5761VENDOR_ATMEL_MDB081D:
12115 case FLASH_5761VENDOR_ATMEL_MDB161D:
12116 tp->nvram_jedecnum = JEDEC_ATMEL;
12117 tg3_flag_set(tp, NVRAM_BUFFERED);
12118 tg3_flag_set(tp, FLASH);
12119 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12120 tp->nvram_pagesize = 256;
12121 break;
12122 case FLASH_5761VENDOR_ST_A_M45PE20:
12123 case FLASH_5761VENDOR_ST_A_M45PE40:
12124 case FLASH_5761VENDOR_ST_A_M45PE80:
12125 case FLASH_5761VENDOR_ST_A_M45PE16:
12126 case FLASH_5761VENDOR_ST_M_M45PE20:
12127 case FLASH_5761VENDOR_ST_M_M45PE40:
12128 case FLASH_5761VENDOR_ST_M_M45PE80:
12129 case FLASH_5761VENDOR_ST_M_M45PE16:
12130 tp->nvram_jedecnum = JEDEC_ST;
12131 tg3_flag_set(tp, NVRAM_BUFFERED);
12132 tg3_flag_set(tp, FLASH);
12133 tp->nvram_pagesize = 256;
12134 break;
12137 if (protect) {
12138 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12139 } else {
12140 switch (nvcfg1) {
12141 case FLASH_5761VENDOR_ATMEL_ADB161D:
12142 case FLASH_5761VENDOR_ATMEL_MDB161D:
12143 case FLASH_5761VENDOR_ST_A_M45PE16:
12144 case FLASH_5761VENDOR_ST_M_M45PE16:
12145 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12146 break;
12147 case FLASH_5761VENDOR_ATMEL_ADB081D:
12148 case FLASH_5761VENDOR_ATMEL_MDB081D:
12149 case FLASH_5761VENDOR_ST_A_M45PE80:
12150 case FLASH_5761VENDOR_ST_M_M45PE80:
12151 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12152 break;
12153 case FLASH_5761VENDOR_ATMEL_ADB041D:
12154 case FLASH_5761VENDOR_ATMEL_MDB041D:
12155 case FLASH_5761VENDOR_ST_A_M45PE40:
12156 case FLASH_5761VENDOR_ST_M_M45PE40:
12157 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12158 break;
12159 case FLASH_5761VENDOR_ATMEL_ADB021D:
12160 case FLASH_5761VENDOR_ATMEL_MDB021D:
12161 case FLASH_5761VENDOR_ST_A_M45PE20:
12162 case FLASH_5761VENDOR_ST_M_M45PE20:
12163 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12164 break;
12169 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12171 tp->nvram_jedecnum = JEDEC_ATMEL;
12172 tg3_flag_set(tp, NVRAM_BUFFERED);
12173 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12176 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12178 u32 nvcfg1;
12180 nvcfg1 = tr32(NVRAM_CFG1);
12182 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12183 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12184 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12185 tp->nvram_jedecnum = JEDEC_ATMEL;
12186 tg3_flag_set(tp, NVRAM_BUFFERED);
12187 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12189 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12190 tw32(NVRAM_CFG1, nvcfg1);
12191 return;
12192 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12193 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12194 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12195 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12196 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12197 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12198 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12199 tp->nvram_jedecnum = JEDEC_ATMEL;
12200 tg3_flag_set(tp, NVRAM_BUFFERED);
12201 tg3_flag_set(tp, FLASH);
12203 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12204 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12205 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12206 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12207 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12208 break;
12209 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12210 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12211 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12212 break;
12213 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12214 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12215 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12216 break;
12218 break;
12219 case FLASH_5752VENDOR_ST_M45PE10:
12220 case FLASH_5752VENDOR_ST_M45PE20:
12221 case FLASH_5752VENDOR_ST_M45PE40:
12222 tp->nvram_jedecnum = JEDEC_ST;
12223 tg3_flag_set(tp, NVRAM_BUFFERED);
12224 tg3_flag_set(tp, FLASH);
12226 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12227 case FLASH_5752VENDOR_ST_M45PE10:
12228 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12229 break;
12230 case FLASH_5752VENDOR_ST_M45PE20:
12231 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12232 break;
12233 case FLASH_5752VENDOR_ST_M45PE40:
12234 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12235 break;
12237 break;
12238 default:
12239 tg3_flag_set(tp, NO_NVRAM);
12240 return;
12243 tg3_nvram_get_pagesize(tp, nvcfg1);
12244 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12245 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12249 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12251 u32 nvcfg1;
12253 nvcfg1 = tr32(NVRAM_CFG1);
12255 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12256 case FLASH_5717VENDOR_ATMEL_EEPROM:
12257 case FLASH_5717VENDOR_MICRO_EEPROM:
12258 tp->nvram_jedecnum = JEDEC_ATMEL;
12259 tg3_flag_set(tp, NVRAM_BUFFERED);
12260 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12262 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12263 tw32(NVRAM_CFG1, nvcfg1);
12264 return;
12265 case FLASH_5717VENDOR_ATMEL_MDB011D:
12266 case FLASH_5717VENDOR_ATMEL_ADB011B:
12267 case FLASH_5717VENDOR_ATMEL_ADB011D:
12268 case FLASH_5717VENDOR_ATMEL_MDB021D:
12269 case FLASH_5717VENDOR_ATMEL_ADB021B:
12270 case FLASH_5717VENDOR_ATMEL_ADB021D:
12271 case FLASH_5717VENDOR_ATMEL_45USPT:
12272 tp->nvram_jedecnum = JEDEC_ATMEL;
12273 tg3_flag_set(tp, NVRAM_BUFFERED);
12274 tg3_flag_set(tp, FLASH);
12276 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12277 case FLASH_5717VENDOR_ATMEL_MDB021D:
12278 /* Detect size with tg3_nvram_get_size() */
12279 break;
12280 case FLASH_5717VENDOR_ATMEL_ADB021B:
12281 case FLASH_5717VENDOR_ATMEL_ADB021D:
12282 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12283 break;
12284 default:
12285 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12286 break;
12288 break;
12289 case FLASH_5717VENDOR_ST_M_M25PE10:
12290 case FLASH_5717VENDOR_ST_A_M25PE10:
12291 case FLASH_5717VENDOR_ST_M_M45PE10:
12292 case FLASH_5717VENDOR_ST_A_M45PE10:
12293 case FLASH_5717VENDOR_ST_M_M25PE20:
12294 case FLASH_5717VENDOR_ST_A_M25PE20:
12295 case FLASH_5717VENDOR_ST_M_M45PE20:
12296 case FLASH_5717VENDOR_ST_A_M45PE20:
12297 case FLASH_5717VENDOR_ST_25USPT:
12298 case FLASH_5717VENDOR_ST_45USPT:
12299 tp->nvram_jedecnum = JEDEC_ST;
12300 tg3_flag_set(tp, NVRAM_BUFFERED);
12301 tg3_flag_set(tp, FLASH);
12303 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12304 case FLASH_5717VENDOR_ST_M_M25PE20:
12305 case FLASH_5717VENDOR_ST_M_M45PE20:
12306 /* Detect size with tg3_nvram_get_size() */
12307 break;
12308 case FLASH_5717VENDOR_ST_A_M25PE20:
12309 case FLASH_5717VENDOR_ST_A_M45PE20:
12310 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12311 break;
12312 default:
12313 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12314 break;
12316 break;
12317 default:
12318 tg3_flag_set(tp, NO_NVRAM);
12319 return;
12322 tg3_nvram_get_pagesize(tp, nvcfg1);
12323 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12324 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12327 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12329 u32 nvcfg1, nvmpinstrp;
12331 nvcfg1 = tr32(NVRAM_CFG1);
12332 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12334 switch (nvmpinstrp) {
12335 case FLASH_5720_EEPROM_HD:
12336 case FLASH_5720_EEPROM_LD:
12337 tp->nvram_jedecnum = JEDEC_ATMEL;
12338 tg3_flag_set(tp, NVRAM_BUFFERED);
12340 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12341 tw32(NVRAM_CFG1, nvcfg1);
12342 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12343 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12344 else
12345 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12346 return;
12347 case FLASH_5720VENDOR_M_ATMEL_DB011D:
12348 case FLASH_5720VENDOR_A_ATMEL_DB011B:
12349 case FLASH_5720VENDOR_A_ATMEL_DB011D:
12350 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12351 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12352 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12353 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12354 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12355 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12356 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12357 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12358 case FLASH_5720VENDOR_ATMEL_45USPT:
12359 tp->nvram_jedecnum = JEDEC_ATMEL;
12360 tg3_flag_set(tp, NVRAM_BUFFERED);
12361 tg3_flag_set(tp, FLASH);
12363 switch (nvmpinstrp) {
12364 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12365 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12366 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12367 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12368 break;
12369 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12370 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12371 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12372 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12373 break;
12374 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12375 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12376 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12377 break;
12378 default:
12379 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12380 break;
12382 break;
12383 case FLASH_5720VENDOR_M_ST_M25PE10:
12384 case FLASH_5720VENDOR_M_ST_M45PE10:
12385 case FLASH_5720VENDOR_A_ST_M25PE10:
12386 case FLASH_5720VENDOR_A_ST_M45PE10:
12387 case FLASH_5720VENDOR_M_ST_M25PE20:
12388 case FLASH_5720VENDOR_M_ST_M45PE20:
12389 case FLASH_5720VENDOR_A_ST_M25PE20:
12390 case FLASH_5720VENDOR_A_ST_M45PE20:
12391 case FLASH_5720VENDOR_M_ST_M25PE40:
12392 case FLASH_5720VENDOR_M_ST_M45PE40:
12393 case FLASH_5720VENDOR_A_ST_M25PE40:
12394 case FLASH_5720VENDOR_A_ST_M45PE40:
12395 case FLASH_5720VENDOR_M_ST_M25PE80:
12396 case FLASH_5720VENDOR_M_ST_M45PE80:
12397 case FLASH_5720VENDOR_A_ST_M25PE80:
12398 case FLASH_5720VENDOR_A_ST_M45PE80:
12399 case FLASH_5720VENDOR_ST_25USPT:
12400 case FLASH_5720VENDOR_ST_45USPT:
12401 tp->nvram_jedecnum = JEDEC_ST;
12402 tg3_flag_set(tp, NVRAM_BUFFERED);
12403 tg3_flag_set(tp, FLASH);
12405 switch (nvmpinstrp) {
12406 case FLASH_5720VENDOR_M_ST_M25PE20:
12407 case FLASH_5720VENDOR_M_ST_M45PE20:
12408 case FLASH_5720VENDOR_A_ST_M25PE20:
12409 case FLASH_5720VENDOR_A_ST_M45PE20:
12410 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12411 break;
12412 case FLASH_5720VENDOR_M_ST_M25PE40:
12413 case FLASH_5720VENDOR_M_ST_M45PE40:
12414 case FLASH_5720VENDOR_A_ST_M25PE40:
12415 case FLASH_5720VENDOR_A_ST_M45PE40:
12416 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12417 break;
12418 case FLASH_5720VENDOR_M_ST_M25PE80:
12419 case FLASH_5720VENDOR_M_ST_M45PE80:
12420 case FLASH_5720VENDOR_A_ST_M25PE80:
12421 case FLASH_5720VENDOR_A_ST_M45PE80:
12422 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12423 break;
12424 default:
12425 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12426 break;
12428 break;
12429 default:
12430 tg3_flag_set(tp, NO_NVRAM);
12431 return;
12434 tg3_nvram_get_pagesize(tp, nvcfg1);
12435 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12436 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12439 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12440 static void __devinit tg3_nvram_init(struct tg3 *tp)
12442 tw32_f(GRC_EEPROM_ADDR,
12443 (EEPROM_ADDR_FSM_RESET |
12444 (EEPROM_DEFAULT_CLOCK_PERIOD <<
12445 EEPROM_ADDR_CLKPERD_SHIFT)));
12447 msleep(1);
12449 /* Enable seeprom accesses. */
12450 tw32_f(GRC_LOCAL_CTRL,
12451 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12452 udelay(100);
12454 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12455 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12456 tg3_flag_set(tp, NVRAM);
12458 if (tg3_nvram_lock(tp)) {
12459 netdev_warn(tp->dev,
12460 "Cannot get nvram lock, %s failed\n",
12461 __func__);
12462 return;
12464 tg3_enable_nvram_access(tp);
12466 tp->nvram_size = 0;
12468 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12469 tg3_get_5752_nvram_info(tp);
12470 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12471 tg3_get_5755_nvram_info(tp);
12472 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12473 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12474 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12475 tg3_get_5787_nvram_info(tp);
12476 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12477 tg3_get_5761_nvram_info(tp);
12478 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12479 tg3_get_5906_nvram_info(tp);
12480 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12481 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
12482 tg3_get_57780_nvram_info(tp);
12483 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12484 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12485 tg3_get_5717_nvram_info(tp);
12486 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12487 tg3_get_5720_nvram_info(tp);
12488 else
12489 tg3_get_nvram_info(tp);
12491 if (tp->nvram_size == 0)
12492 tg3_get_nvram_size(tp);
12494 tg3_disable_nvram_access(tp);
12495 tg3_nvram_unlock(tp);
12497 } else {
12498 tg3_flag_clear(tp, NVRAM);
12499 tg3_flag_clear(tp, NVRAM_BUFFERED);
12501 tg3_get_eeprom_size(tp);
12505 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
12506 u32 offset, u32 len, u8 *buf)
12508 int i, j, rc = 0;
12509 u32 val;
12511 for (i = 0; i < len; i += 4) {
12512 u32 addr;
12513 __be32 data;
12515 addr = offset + i;
12517 memcpy(&data, buf + i, 4);
12520 * The SEEPROM interface expects the data to always be opposite
12521 * the native endian format. We accomplish this by reversing
12522 * all the operations that would have been performed on the
12523 * data from a call to tg3_nvram_read_be32().
12525 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
12527 val = tr32(GRC_EEPROM_ADDR);
12528 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
12530 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
12531 EEPROM_ADDR_READ);
12532 tw32(GRC_EEPROM_ADDR, val |
12533 (0 << EEPROM_ADDR_DEVID_SHIFT) |
12534 (addr & EEPROM_ADDR_ADDR_MASK) |
12535 EEPROM_ADDR_START |
12536 EEPROM_ADDR_WRITE);
12538 for (j = 0; j < 1000; j++) {
12539 val = tr32(GRC_EEPROM_ADDR);
12541 if (val & EEPROM_ADDR_COMPLETE)
12542 break;
12543 msleep(1);
12545 if (!(val & EEPROM_ADDR_COMPLETE)) {
12546 rc = -EBUSY;
12547 break;
12551 return rc;
12554 /* offset and length are dword aligned */
12555 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
12556 u8 *buf)
12558 int ret = 0;
12559 u32 pagesize = tp->nvram_pagesize;
12560 u32 pagemask = pagesize - 1;
12561 u32 nvram_cmd;
12562 u8 *tmp;
12564 tmp = kmalloc(pagesize, GFP_KERNEL);
12565 if (tmp == NULL)
12566 return -ENOMEM;
12568 while (len) {
12569 int j;
12570 u32 phy_addr, page_off, size;
12572 phy_addr = offset & ~pagemask;
12574 for (j = 0; j < pagesize; j += 4) {
12575 ret = tg3_nvram_read_be32(tp, phy_addr + j,
12576 (__be32 *) (tmp + j));
12577 if (ret)
12578 break;
12580 if (ret)
12581 break;
12583 page_off = offset & pagemask;
12584 size = pagesize;
12585 if (len < size)
12586 size = len;
12588 len -= size;
12590 memcpy(tmp + page_off, buf, size);
12592 offset = offset + (pagesize - page_off);
12594 tg3_enable_nvram_access(tp);
12597 * Before we can erase the flash page, we need
12598 * to issue a special "write enable" command.
12600 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12602 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12603 break;
12605 /* Erase the target page */
12606 tw32(NVRAM_ADDR, phy_addr);
12608 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
12609 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
12611 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12612 break;
12614 /* Issue another write enable to start the write. */
12615 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12617 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12618 break;
12620 for (j = 0; j < pagesize; j += 4) {
12621 __be32 data;
12623 data = *((__be32 *) (tmp + j));
12625 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12627 tw32(NVRAM_ADDR, phy_addr + j);
12629 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
12630 NVRAM_CMD_WR;
12632 if (j == 0)
12633 nvram_cmd |= NVRAM_CMD_FIRST;
12634 else if (j == (pagesize - 4))
12635 nvram_cmd |= NVRAM_CMD_LAST;
12637 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12638 break;
12640 if (ret)
12641 break;
12644 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12645 tg3_nvram_exec_cmd(tp, nvram_cmd);
12647 kfree(tmp);
12649 return ret;
12652 /* offset and length are dword aligned */
12653 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
12654 u8 *buf)
12656 int i, ret = 0;
12658 for (i = 0; i < len; i += 4, offset += 4) {
12659 u32 page_off, phy_addr, nvram_cmd;
12660 __be32 data;
12662 memcpy(&data, buf + i, 4);
12663 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12665 page_off = offset % tp->nvram_pagesize;
12667 phy_addr = tg3_nvram_phys_addr(tp, offset);
12669 tw32(NVRAM_ADDR, phy_addr);
12671 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
12673 if (page_off == 0 || i == 0)
12674 nvram_cmd |= NVRAM_CMD_FIRST;
12675 if (page_off == (tp->nvram_pagesize - 4))
12676 nvram_cmd |= NVRAM_CMD_LAST;
12678 if (i == (len - 4))
12679 nvram_cmd |= NVRAM_CMD_LAST;
12681 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
12682 !tg3_flag(tp, 5755_PLUS) &&
12683 (tp->nvram_jedecnum == JEDEC_ST) &&
12684 (nvram_cmd & NVRAM_CMD_FIRST)) {
12686 if ((ret = tg3_nvram_exec_cmd(tp,
12687 NVRAM_CMD_WREN | NVRAM_CMD_GO |
12688 NVRAM_CMD_DONE)))
12690 break;
12692 if (!tg3_flag(tp, FLASH)) {
12693 /* We always do complete word writes to eeprom. */
12694 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
12697 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12698 break;
12700 return ret;
12703 /* offset and length are dword aligned */
12704 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12706 int ret;
12708 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12709 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
12710 ~GRC_LCLCTRL_GPIO_OUTPUT1);
12711 udelay(40);
12714 if (!tg3_flag(tp, NVRAM)) {
12715 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
12716 } else {
12717 u32 grc_mode;
12719 ret = tg3_nvram_lock(tp);
12720 if (ret)
12721 return ret;
12723 tg3_enable_nvram_access(tp);
12724 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
12725 tw32(NVRAM_WRITE1, 0x406);
12727 grc_mode = tr32(GRC_MODE);
12728 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
12730 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
12731 ret = tg3_nvram_write_block_buffered(tp, offset, len,
12732 buf);
12733 } else {
12734 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12735 buf);
12738 grc_mode = tr32(GRC_MODE);
12739 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
12741 tg3_disable_nvram_access(tp);
12742 tg3_nvram_unlock(tp);
12745 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12746 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
12747 udelay(40);
12750 return ret;
12753 struct subsys_tbl_ent {
12754 u16 subsys_vendor, subsys_devid;
12755 u32 phy_id;
12758 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
12759 /* Broadcom boards. */
12760 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12761 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
12762 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12763 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
12764 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12765 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
12766 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12767 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
12768 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12769 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
12770 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12771 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
12772 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12773 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
12774 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12775 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
12776 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12777 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
12778 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12779 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
12780 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12781 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
12783 /* 3com boards. */
12784 { TG3PCI_SUBVENDOR_ID_3COM,
12785 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
12786 { TG3PCI_SUBVENDOR_ID_3COM,
12787 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
12788 { TG3PCI_SUBVENDOR_ID_3COM,
12789 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
12790 { TG3PCI_SUBVENDOR_ID_3COM,
12791 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
12792 { TG3PCI_SUBVENDOR_ID_3COM,
12793 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
12795 /* DELL boards. */
12796 { TG3PCI_SUBVENDOR_ID_DELL,
12797 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
12798 { TG3PCI_SUBVENDOR_ID_DELL,
12799 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
12800 { TG3PCI_SUBVENDOR_ID_DELL,
12801 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
12802 { TG3PCI_SUBVENDOR_ID_DELL,
12803 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
12805 /* Compaq boards. */
12806 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12807 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
12808 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12809 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
12810 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12811 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
12812 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12813 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
12814 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12815 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
12817 /* IBM boards. */
12818 { TG3PCI_SUBVENDOR_ID_IBM,
12819 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
12822 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
12824 int i;
12826 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
12827 if ((subsys_id_to_phy_id[i].subsys_vendor ==
12828 tp->pdev->subsystem_vendor) &&
12829 (subsys_id_to_phy_id[i].subsys_devid ==
12830 tp->pdev->subsystem_device))
12831 return &subsys_id_to_phy_id[i];
12833 return NULL;
12836 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12838 u32 val;
12840 tp->phy_id = TG3_PHY_ID_INVALID;
12841 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12843 /* Assume an onboard device and WOL capable by default. */
12844 tg3_flag_set(tp, EEPROM_WRITE_PROT);
12845 tg3_flag_set(tp, WOL_CAP);
12847 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12848 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
12849 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12850 tg3_flag_set(tp, IS_NIC);
12852 val = tr32(VCPU_CFGSHDW);
12853 if (val & VCPU_CFGSHDW_ASPM_DBNC)
12854 tg3_flag_set(tp, ASPM_WORKAROUND);
12855 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
12856 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
12857 tg3_flag_set(tp, WOL_ENABLE);
12858 device_set_wakeup_enable(&tp->pdev->dev, true);
12860 goto done;
12863 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
12864 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
12865 u32 nic_cfg, led_cfg;
12866 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
12867 int eeprom_phy_serdes = 0;
12869 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
12870 tp->nic_sram_data_cfg = nic_cfg;
12872 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
12873 ver >>= NIC_SRAM_DATA_VER_SHIFT;
12874 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12875 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12876 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
12877 (ver > 0) && (ver < 0x100))
12878 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
12880 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12881 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
12883 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
12884 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
12885 eeprom_phy_serdes = 1;
12887 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
12888 if (nic_phy_id != 0) {
12889 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
12890 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
12892 eeprom_phy_id = (id1 >> 16) << 10;
12893 eeprom_phy_id |= (id2 & 0xfc00) << 16;
12894 eeprom_phy_id |= (id2 & 0x03ff) << 0;
12895 } else
12896 eeprom_phy_id = 0;
12898 tp->phy_id = eeprom_phy_id;
12899 if (eeprom_phy_serdes) {
12900 if (!tg3_flag(tp, 5705_PLUS))
12901 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12902 else
12903 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
12906 if (tg3_flag(tp, 5750_PLUS))
12907 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
12908 SHASTA_EXT_LED_MODE_MASK);
12909 else
12910 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
12912 switch (led_cfg) {
12913 default:
12914 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
12915 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12916 break;
12918 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
12919 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12920 break;
12922 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
12923 tp->led_ctrl = LED_CTRL_MODE_MAC;
12925 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
12926 * read on some older 5700/5701 bootcode.
12928 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12929 ASIC_REV_5700 ||
12930 GET_ASIC_REV(tp->pci_chip_rev_id) ==
12931 ASIC_REV_5701)
12932 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12934 break;
12936 case SHASTA_EXT_LED_SHARED:
12937 tp->led_ctrl = LED_CTRL_MODE_SHARED;
12938 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
12939 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
12940 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12941 LED_CTRL_MODE_PHY_2);
12942 break;
12944 case SHASTA_EXT_LED_MAC:
12945 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
12946 break;
12948 case SHASTA_EXT_LED_COMBO:
12949 tp->led_ctrl = LED_CTRL_MODE_COMBO;
12950 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
12951 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12952 LED_CTRL_MODE_PHY_2);
12953 break;
12957 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12958 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
12959 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
12960 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12962 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
12963 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12965 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
12966 tg3_flag_set(tp, EEPROM_WRITE_PROT);
12967 if ((tp->pdev->subsystem_vendor ==
12968 PCI_VENDOR_ID_ARIMA) &&
12969 (tp->pdev->subsystem_device == 0x205a ||
12970 tp->pdev->subsystem_device == 0x2063))
12971 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12972 } else {
12973 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12974 tg3_flag_set(tp, IS_NIC);
12977 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
12978 tg3_flag_set(tp, ENABLE_ASF);
12979 if (tg3_flag(tp, 5750_PLUS))
12980 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
12983 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
12984 tg3_flag(tp, 5750_PLUS))
12985 tg3_flag_set(tp, ENABLE_APE);
12987 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
12988 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
12989 tg3_flag_clear(tp, WOL_CAP);
12991 if (tg3_flag(tp, WOL_CAP) &&
12992 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
12993 tg3_flag_set(tp, WOL_ENABLE);
12994 device_set_wakeup_enable(&tp->pdev->dev, true);
12997 if (cfg2 & (1 << 17))
12998 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
13000 /* serdes signal pre-emphasis in register 0x590 set by */
13001 /* bootcode if bit 18 is set */
13002 if (cfg2 & (1 << 18))
13003 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
13005 if ((tg3_flag(tp, 57765_PLUS) ||
13006 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13007 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
13008 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
13009 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
13011 if (tg3_flag(tp, PCI_EXPRESS) &&
13012 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13013 !tg3_flag(tp, 57765_PLUS)) {
13014 u32 cfg3;
13016 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13017 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
13018 tg3_flag_set(tp, ASPM_WORKAROUND);
13021 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
13022 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
13023 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
13024 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
13025 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
13026 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
13028 done:
13029 if (tg3_flag(tp, WOL_CAP))
13030 device_set_wakeup_enable(&tp->pdev->dev,
13031 tg3_flag(tp, WOL_ENABLE));
13032 else
13033 device_set_wakeup_capable(&tp->pdev->dev, false);
13036 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13038 int i;
13039 u32 val;
13041 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13042 tw32(OTP_CTRL, cmd);
13044 /* Wait for up to 1 ms for command to execute. */
13045 for (i = 0; i < 100; i++) {
13046 val = tr32(OTP_STATUS);
13047 if (val & OTP_STATUS_CMD_DONE)
13048 break;
13049 udelay(10);
13052 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13055 /* Read the gphy configuration from the OTP region of the chip. The gphy
13056 * configuration is a 32-bit value that straddles the alignment boundary.
13057 * We do two 32-bit reads and then shift and merge the results.
13059 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13061 u32 bhalf_otp, thalf_otp;
13063 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13065 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13066 return 0;
13068 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13070 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13071 return 0;
13073 thalf_otp = tr32(OTP_READ_DATA);
13075 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13077 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13078 return 0;
13080 bhalf_otp = tr32(OTP_READ_DATA);
13082 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13085 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13087 u32 adv = ADVERTISED_Autoneg |
13088 ADVERTISED_Pause;
13090 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13091 adv |= ADVERTISED_1000baseT_Half |
13092 ADVERTISED_1000baseT_Full;
13094 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13095 adv |= ADVERTISED_100baseT_Half |
13096 ADVERTISED_100baseT_Full |
13097 ADVERTISED_10baseT_Half |
13098 ADVERTISED_10baseT_Full |
13099 ADVERTISED_TP;
13100 else
13101 adv |= ADVERTISED_FIBRE;
13103 tp->link_config.advertising = adv;
13104 tp->link_config.speed = SPEED_INVALID;
13105 tp->link_config.duplex = DUPLEX_INVALID;
13106 tp->link_config.autoneg = AUTONEG_ENABLE;
13107 tp->link_config.active_speed = SPEED_INVALID;
13108 tp->link_config.active_duplex = DUPLEX_INVALID;
13109 tp->link_config.orig_speed = SPEED_INVALID;
13110 tp->link_config.orig_duplex = DUPLEX_INVALID;
13111 tp->link_config.orig_autoneg = AUTONEG_INVALID;
13114 static int __devinit tg3_phy_probe(struct tg3 *tp)
13116 u32 hw_phy_id_1, hw_phy_id_2;
13117 u32 hw_phy_id, hw_phy_id_masked;
13118 int err;
13120 /* flow control autonegotiation is default behavior */
13121 tg3_flag_set(tp, PAUSE_AUTONEG);
13122 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13124 if (tg3_flag(tp, USE_PHYLIB))
13125 return tg3_phy_init(tp);
13127 /* Reading the PHY ID register can conflict with ASF
13128 * firmware access to the PHY hardware.
13130 err = 0;
13131 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13132 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13133 } else {
13134 /* Now read the physical PHY_ID from the chip and verify
13135 * that it is sane. If it doesn't look good, we fall back
13136 * to either the hard-coded table based PHY_ID and failing
13137 * that the value found in the eeprom area.
13139 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13140 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13142 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
13143 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13144 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
13146 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13149 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13150 tp->phy_id = hw_phy_id;
13151 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13152 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13153 else
13154 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13155 } else {
13156 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13157 /* Do nothing, phy ID already set up in
13158 * tg3_get_eeprom_hw_cfg().
13160 } else {
13161 struct subsys_tbl_ent *p;
13163 /* No eeprom signature? Try the hardcoded
13164 * subsys device table.
13166 p = tg3_lookup_by_subsys(tp);
13167 if (!p)
13168 return -ENODEV;
13170 tp->phy_id = p->phy_id;
13171 if (!tp->phy_id ||
13172 tp->phy_id == TG3_PHY_ID_BCM8002)
13173 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13177 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13178 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13179 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13180 (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13181 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13182 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13183 tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13184 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13186 tg3_phy_init_link_config(tp);
13188 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13189 !tg3_flag(tp, ENABLE_APE) &&
13190 !tg3_flag(tp, ENABLE_ASF)) {
13191 u32 bmsr, mask;
13193 tg3_readphy(tp, MII_BMSR, &bmsr);
13194 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13195 (bmsr & BMSR_LSTATUS))
13196 goto skip_phy_reset;
13198 err = tg3_phy_reset(tp);
13199 if (err)
13200 return err;
13202 tg3_phy_set_wirespeed(tp);
13204 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13205 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13206 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
13207 if (!tg3_copper_is_advertising_all(tp, mask)) {
13208 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13209 tp->link_config.flowctrl);
13211 tg3_writephy(tp, MII_BMCR,
13212 BMCR_ANENABLE | BMCR_ANRESTART);
13216 skip_phy_reset:
13217 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13218 err = tg3_init_5401phy_dsp(tp);
13219 if (err)
13220 return err;
13222 err = tg3_init_5401phy_dsp(tp);
13225 return err;
13228 static void __devinit tg3_read_vpd(struct tg3 *tp)
13230 u8 *vpd_data;
13231 unsigned int block_end, rosize, len;
13232 u32 vpdlen;
13233 int j, i = 0;
13235 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
13236 if (!vpd_data)
13237 goto out_no_vpd;
13239 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
13240 if (i < 0)
13241 goto out_not_found;
13243 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13244 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13245 i += PCI_VPD_LRDT_TAG_SIZE;
13247 if (block_end > vpdlen)
13248 goto out_not_found;
13250 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13251 PCI_VPD_RO_KEYWORD_MFR_ID);
13252 if (j > 0) {
13253 len = pci_vpd_info_field_size(&vpd_data[j]);
13255 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13256 if (j + len > block_end || len != 4 ||
13257 memcmp(&vpd_data[j], "1028", 4))
13258 goto partno;
13260 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13261 PCI_VPD_RO_KEYWORD_VENDOR0);
13262 if (j < 0)
13263 goto partno;
13265 len = pci_vpd_info_field_size(&vpd_data[j]);
13267 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13268 if (j + len > block_end)
13269 goto partno;
13271 memcpy(tp->fw_ver, &vpd_data[j], len);
13272 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
13275 partno:
13276 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13277 PCI_VPD_RO_KEYWORD_PARTNO);
13278 if (i < 0)
13279 goto out_not_found;
13281 len = pci_vpd_info_field_size(&vpd_data[i]);
13283 i += PCI_VPD_INFO_FLD_HDR_SIZE;
13284 if (len > TG3_BPN_SIZE ||
13285 (len + i) > vpdlen)
13286 goto out_not_found;
13288 memcpy(tp->board_part_number, &vpd_data[i], len);
13290 out_not_found:
13291 kfree(vpd_data);
13292 if (tp->board_part_number[0])
13293 return;
13295 out_no_vpd:
13296 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13297 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13298 strcpy(tp->board_part_number, "BCM5717");
13299 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13300 strcpy(tp->board_part_number, "BCM5718");
13301 else
13302 goto nomatch;
13303 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13304 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13305 strcpy(tp->board_part_number, "BCM57780");
13306 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13307 strcpy(tp->board_part_number, "BCM57760");
13308 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13309 strcpy(tp->board_part_number, "BCM57790");
13310 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13311 strcpy(tp->board_part_number, "BCM57788");
13312 else
13313 goto nomatch;
13314 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13315 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13316 strcpy(tp->board_part_number, "BCM57761");
13317 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13318 strcpy(tp->board_part_number, "BCM57765");
13319 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13320 strcpy(tp->board_part_number, "BCM57781");
13321 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13322 strcpy(tp->board_part_number, "BCM57785");
13323 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13324 strcpy(tp->board_part_number, "BCM57791");
13325 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13326 strcpy(tp->board_part_number, "BCM57795");
13327 else
13328 goto nomatch;
13329 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13330 strcpy(tp->board_part_number, "BCM95906");
13331 } else {
13332 nomatch:
13333 strcpy(tp->board_part_number, "none");
13337 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13339 u32 val;
13341 if (tg3_nvram_read(tp, offset, &val) ||
13342 (val & 0xfc000000) != 0x0c000000 ||
13343 tg3_nvram_read(tp, offset + 4, &val) ||
13344 val != 0)
13345 return 0;
13347 return 1;
13350 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13352 u32 val, offset, start, ver_offset;
13353 int i, dst_off;
13354 bool newver = false;
13356 if (tg3_nvram_read(tp, 0xc, &offset) ||
13357 tg3_nvram_read(tp, 0x4, &start))
13358 return;
13360 offset = tg3_nvram_logical_addr(tp, offset);
13362 if (tg3_nvram_read(tp, offset, &val))
13363 return;
13365 if ((val & 0xfc000000) == 0x0c000000) {
13366 if (tg3_nvram_read(tp, offset + 4, &val))
13367 return;
13369 if (val == 0)
13370 newver = true;
13373 dst_off = strlen(tp->fw_ver);
13375 if (newver) {
13376 if (TG3_VER_SIZE - dst_off < 16 ||
13377 tg3_nvram_read(tp, offset + 8, &ver_offset))
13378 return;
13380 offset = offset + ver_offset - start;
13381 for (i = 0; i < 16; i += 4) {
13382 __be32 v;
13383 if (tg3_nvram_read_be32(tp, offset + i, &v))
13384 return;
13386 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13388 } else {
13389 u32 major, minor;
13391 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13392 return;
13394 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13395 TG3_NVM_BCVER_MAJSFT;
13396 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13397 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13398 "v%d.%02d", major, minor);
13402 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13404 u32 val, major, minor;
13406 /* Use native endian representation */
13407 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13408 return;
13410 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13411 TG3_NVM_HWSB_CFG1_MAJSFT;
13412 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13413 TG3_NVM_HWSB_CFG1_MINSFT;
13415 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13418 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13420 u32 offset, major, minor, build;
13422 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13424 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13425 return;
13427 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13428 case TG3_EEPROM_SB_REVISION_0:
13429 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13430 break;
13431 case TG3_EEPROM_SB_REVISION_2:
13432 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13433 break;
13434 case TG3_EEPROM_SB_REVISION_3:
13435 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13436 break;
13437 case TG3_EEPROM_SB_REVISION_4:
13438 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13439 break;
13440 case TG3_EEPROM_SB_REVISION_5:
13441 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13442 break;
13443 case TG3_EEPROM_SB_REVISION_6:
13444 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13445 break;
13446 default:
13447 return;
13450 if (tg3_nvram_read(tp, offset, &val))
13451 return;
13453 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13454 TG3_EEPROM_SB_EDH_BLD_SHFT;
13455 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13456 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13457 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
13459 if (minor > 99 || build > 26)
13460 return;
13462 offset = strlen(tp->fw_ver);
13463 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13464 " v%d.%02d", major, minor);
13466 if (build > 0) {
13467 offset = strlen(tp->fw_ver);
13468 if (offset < TG3_VER_SIZE - 1)
13469 tp->fw_ver[offset] = 'a' + build - 1;
13473 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13475 u32 val, offset, start;
13476 int i, vlen;
13478 for (offset = TG3_NVM_DIR_START;
13479 offset < TG3_NVM_DIR_END;
13480 offset += TG3_NVM_DIRENT_SIZE) {
13481 if (tg3_nvram_read(tp, offset, &val))
13482 return;
13484 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13485 break;
13488 if (offset == TG3_NVM_DIR_END)
13489 return;
13491 if (!tg3_flag(tp, 5705_PLUS))
13492 start = 0x08000000;
13493 else if (tg3_nvram_read(tp, offset - 4, &start))
13494 return;
13496 if (tg3_nvram_read(tp, offset + 4, &offset) ||
13497 !tg3_fw_img_is_valid(tp, offset) ||
13498 tg3_nvram_read(tp, offset + 8, &val))
13499 return;
13501 offset += val - start;
13503 vlen = strlen(tp->fw_ver);
13505 tp->fw_ver[vlen++] = ',';
13506 tp->fw_ver[vlen++] = ' ';
13508 for (i = 0; i < 4; i++) {
13509 __be32 v;
13510 if (tg3_nvram_read_be32(tp, offset, &v))
13511 return;
13513 offset += sizeof(v);
13515 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13516 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13517 break;
13520 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13521 vlen += sizeof(v);
13525 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13527 int vlen;
13528 u32 apedata;
13529 char *fwtype;
13531 if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13532 return;
13534 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13535 if (apedata != APE_SEG_SIG_MAGIC)
13536 return;
13538 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13539 if (!(apedata & APE_FW_STATUS_READY))
13540 return;
13542 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13544 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13545 tg3_flag_set(tp, APE_HAS_NCSI);
13546 fwtype = "NCSI";
13547 } else {
13548 fwtype = "DASH";
13551 vlen = strlen(tp->fw_ver);
13553 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13554 fwtype,
13555 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13556 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13557 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13558 (apedata & APE_FW_VERSION_BLDMSK));
13561 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13563 u32 val;
13564 bool vpd_vers = false;
13566 if (tp->fw_ver[0] != 0)
13567 vpd_vers = true;
13569 if (tg3_flag(tp, NO_NVRAM)) {
13570 strcat(tp->fw_ver, "sb");
13571 return;
13574 if (tg3_nvram_read(tp, 0, &val))
13575 return;
13577 if (val == TG3_EEPROM_MAGIC)
13578 tg3_read_bc_ver(tp);
13579 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13580 tg3_read_sb_ver(tp, val);
13581 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13582 tg3_read_hwsb_ver(tp);
13583 else
13584 return;
13586 if (vpd_vers)
13587 goto done;
13589 if (tg3_flag(tp, ENABLE_APE)) {
13590 if (tg3_flag(tp, ENABLE_ASF))
13591 tg3_read_dash_ver(tp);
13592 } else if (tg3_flag(tp, ENABLE_ASF)) {
13593 tg3_read_mgmtfw_ver(tp);
13596 done:
13597 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13600 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13602 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13604 if (tg3_flag(tp, LRG_PROD_RING_CAP))
13605 return TG3_RX_RET_MAX_SIZE_5717;
13606 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13607 return TG3_RX_RET_MAX_SIZE_5700;
13608 else
13609 return TG3_RX_RET_MAX_SIZE_5705;
13612 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13613 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13614 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13615 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13616 { },
13619 static int __devinit tg3_get_invariants(struct tg3 *tp)
13621 u32 misc_ctrl_reg;
13622 u32 pci_state_reg, grc_misc_cfg;
13623 u32 val;
13624 u16 pci_cmd;
13625 int err;
13627 /* Force memory write invalidate off. If we leave it on,
13628 * then on 5700_BX chips we have to enable a workaround.
13629 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13630 * to match the cacheline size. The Broadcom driver have this
13631 * workaround but turns MWI off all the times so never uses
13632 * it. This seems to suggest that the workaround is insufficient.
13634 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13635 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13636 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13638 /* Important! -- Make sure register accesses are byteswapped
13639 * correctly. Also, for those chips that require it, make
13640 * sure that indirect register accesses are enabled before
13641 * the first operation.
13643 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13644 &misc_ctrl_reg);
13645 tp->misc_host_ctrl |= (misc_ctrl_reg &
13646 MISC_HOST_CTRL_CHIPREV);
13647 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13648 tp->misc_host_ctrl);
13650 tp->pci_chip_rev_id = (misc_ctrl_reg >>
13651 MISC_HOST_CTRL_CHIPREV_SHIFT);
13652 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13653 u32 prod_id_asic_rev;
13655 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13656 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13657 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13658 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13659 pci_read_config_dword(tp->pdev,
13660 TG3PCI_GEN2_PRODID_ASICREV,
13661 &prod_id_asic_rev);
13662 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13663 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13664 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13665 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13666 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13667 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13668 pci_read_config_dword(tp->pdev,
13669 TG3PCI_GEN15_PRODID_ASICREV,
13670 &prod_id_asic_rev);
13671 else
13672 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13673 &prod_id_asic_rev);
13675 tp->pci_chip_rev_id = prod_id_asic_rev;
13678 /* Wrong chip ID in 5752 A0. This code can be removed later
13679 * as A0 is not in production.
13681 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13682 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13684 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13685 * we need to disable memory and use config. cycles
13686 * only to access all registers. The 5702/03 chips
13687 * can mistakenly decode the special cycles from the
13688 * ICH chipsets as memory write cycles, causing corruption
13689 * of register and memory space. Only certain ICH bridges
13690 * will drive special cycles with non-zero data during the
13691 * address phase which can fall within the 5703's address
13692 * range. This is not an ICH bug as the PCI spec allows
13693 * non-zero address during special cycles. However, only
13694 * these ICH bridges are known to drive non-zero addresses
13695 * during special cycles.
13697 * Since special cycles do not cross PCI bridges, we only
13698 * enable this workaround if the 5703 is on the secondary
13699 * bus of these ICH bridges.
13701 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
13702 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
13703 static struct tg3_dev_id {
13704 u32 vendor;
13705 u32 device;
13706 u32 rev;
13707 } ich_chipsets[] = {
13708 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
13709 PCI_ANY_ID },
13710 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
13711 PCI_ANY_ID },
13712 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
13713 0xa },
13714 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
13715 PCI_ANY_ID },
13716 { },
13718 struct tg3_dev_id *pci_id = &ich_chipsets[0];
13719 struct pci_dev *bridge = NULL;
13721 while (pci_id->vendor != 0) {
13722 bridge = pci_get_device(pci_id->vendor, pci_id->device,
13723 bridge);
13724 if (!bridge) {
13725 pci_id++;
13726 continue;
13728 if (pci_id->rev != PCI_ANY_ID) {
13729 if (bridge->revision > pci_id->rev)
13730 continue;
13732 if (bridge->subordinate &&
13733 (bridge->subordinate->number ==
13734 tp->pdev->bus->number)) {
13735 tg3_flag_set(tp, ICH_WORKAROUND);
13736 pci_dev_put(bridge);
13737 break;
13742 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13743 static struct tg3_dev_id {
13744 u32 vendor;
13745 u32 device;
13746 } bridge_chipsets[] = {
13747 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
13748 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
13749 { },
13751 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
13752 struct pci_dev *bridge = NULL;
13754 while (pci_id->vendor != 0) {
13755 bridge = pci_get_device(pci_id->vendor,
13756 pci_id->device,
13757 bridge);
13758 if (!bridge) {
13759 pci_id++;
13760 continue;
13762 if (bridge->subordinate &&
13763 (bridge->subordinate->number <=
13764 tp->pdev->bus->number) &&
13765 (bridge->subordinate->subordinate >=
13766 tp->pdev->bus->number)) {
13767 tg3_flag_set(tp, 5701_DMA_BUG);
13768 pci_dev_put(bridge);
13769 break;
13774 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
13775 * DMA addresses > 40-bit. This bridge may have other additional
13776 * 57xx devices behind it in some 4-port NIC designs for example.
13777 * Any tg3 device found behind the bridge will also need the 40-bit
13778 * DMA workaround.
13780 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
13781 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13782 tg3_flag_set(tp, 5780_CLASS);
13783 tg3_flag_set(tp, 40BIT_DMA_BUG);
13784 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
13785 } else {
13786 struct pci_dev *bridge = NULL;
13788 do {
13789 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
13790 PCI_DEVICE_ID_SERVERWORKS_EPB,
13791 bridge);
13792 if (bridge && bridge->subordinate &&
13793 (bridge->subordinate->number <=
13794 tp->pdev->bus->number) &&
13795 (bridge->subordinate->subordinate >=
13796 tp->pdev->bus->number)) {
13797 tg3_flag_set(tp, 40BIT_DMA_BUG);
13798 pci_dev_put(bridge);
13799 break;
13801 } while (bridge);
13804 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
13805 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
13806 tp->pdev_peer = tg3_find_peer(tp);
13808 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13809 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13810 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13811 tg3_flag_set(tp, 5717_PLUS);
13813 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
13814 tg3_flag(tp, 5717_PLUS))
13815 tg3_flag_set(tp, 57765_PLUS);
13817 /* Intentionally exclude ASIC_REV_5906 */
13818 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13819 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13820 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13821 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13822 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13823 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13824 tg3_flag(tp, 57765_PLUS))
13825 tg3_flag_set(tp, 5755_PLUS);
13827 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13828 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13829 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13830 tg3_flag(tp, 5755_PLUS) ||
13831 tg3_flag(tp, 5780_CLASS))
13832 tg3_flag_set(tp, 5750_PLUS);
13834 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
13835 tg3_flag(tp, 5750_PLUS))
13836 tg3_flag_set(tp, 5705_PLUS);
13838 /* Determine TSO capabilities */
13839 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13840 ; /* Do nothing. HW bug. */
13841 else if (tg3_flag(tp, 57765_PLUS))
13842 tg3_flag_set(tp, HW_TSO_3);
13843 else if (tg3_flag(tp, 5755_PLUS) ||
13844 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13845 tg3_flag_set(tp, HW_TSO_2);
13846 else if (tg3_flag(tp, 5750_PLUS)) {
13847 tg3_flag_set(tp, HW_TSO_1);
13848 tg3_flag_set(tp, TSO_BUG);
13849 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
13850 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
13851 tg3_flag_clear(tp, TSO_BUG);
13852 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13853 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13854 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
13855 tg3_flag_set(tp, TSO_BUG);
13856 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13857 tp->fw_needed = FIRMWARE_TG3TSO5;
13858 else
13859 tp->fw_needed = FIRMWARE_TG3TSO;
13862 /* Selectively allow TSO based on operating conditions */
13863 if (tg3_flag(tp, HW_TSO_1) ||
13864 tg3_flag(tp, HW_TSO_2) ||
13865 tg3_flag(tp, HW_TSO_3) ||
13866 (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF)))
13867 tg3_flag_set(tp, TSO_CAPABLE);
13868 else {
13869 tg3_flag_clear(tp, TSO_CAPABLE);
13870 tg3_flag_clear(tp, TSO_BUG);
13871 tp->fw_needed = NULL;
13874 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
13875 tp->fw_needed = FIRMWARE_TG3;
13877 tp->irq_max = 1;
13879 if (tg3_flag(tp, 5750_PLUS)) {
13880 tg3_flag_set(tp, SUPPORT_MSI);
13881 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
13882 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
13883 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
13884 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
13885 tp->pdev_peer == tp->pdev))
13886 tg3_flag_clear(tp, SUPPORT_MSI);
13888 if (tg3_flag(tp, 5755_PLUS) ||
13889 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13890 tg3_flag_set(tp, 1SHOT_MSI);
13893 if (tg3_flag(tp, 57765_PLUS)) {
13894 tg3_flag_set(tp, SUPPORT_MSIX);
13895 tp->irq_max = TG3_IRQ_MAX_VECS;
13899 if (tg3_flag(tp, 5755_PLUS))
13900 tg3_flag_set(tp, SHORT_DMA_BUG);
13902 if (tg3_flag(tp, 5717_PLUS))
13903 tg3_flag_set(tp, LRG_PROD_RING_CAP);
13905 if (tg3_flag(tp, 57765_PLUS) &&
13906 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
13907 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
13909 if (!tg3_flag(tp, 5705_PLUS) ||
13910 tg3_flag(tp, 5780_CLASS) ||
13911 tg3_flag(tp, USE_JUMBO_BDFLAG))
13912 tg3_flag_set(tp, JUMBO_CAPABLE);
13914 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
13915 &pci_state_reg);
13917 if (pci_is_pcie(tp->pdev)) {
13918 u16 lnkctl;
13920 tg3_flag_set(tp, PCI_EXPRESS);
13922 tp->pcie_readrq = 4096;
13923 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13924 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13925 tp->pcie_readrq = 2048;
13927 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
13929 pci_read_config_word(tp->pdev,
13930 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
13931 &lnkctl);
13932 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
13933 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13934 ASIC_REV_5906) {
13935 tg3_flag_clear(tp, HW_TSO_2);
13936 tg3_flag_clear(tp, TSO_CAPABLE);
13938 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13939 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13940 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
13941 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
13942 tg3_flag_set(tp, CLKREQ_BUG);
13943 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
13944 tg3_flag_set(tp, L1PLLPD_EN);
13946 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
13947 /* BCM5785 devices are effectively PCIe devices, and should
13948 * follow PCIe codepaths, but do not have a PCIe capabilities
13949 * section.
13951 tg3_flag_set(tp, PCI_EXPRESS);
13952 } else if (!tg3_flag(tp, 5705_PLUS) ||
13953 tg3_flag(tp, 5780_CLASS)) {
13954 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
13955 if (!tp->pcix_cap) {
13956 dev_err(&tp->pdev->dev,
13957 "Cannot find PCI-X capability, aborting\n");
13958 return -EIO;
13961 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
13962 tg3_flag_set(tp, PCIX_MODE);
13965 /* If we have an AMD 762 or VIA K8T800 chipset, write
13966 * reordering to the mailbox registers done by the host
13967 * controller can cause major troubles. We read back from
13968 * every mailbox register write to force the writes to be
13969 * posted to the chip in order.
13971 if (pci_dev_present(tg3_write_reorder_chipsets) &&
13972 !tg3_flag(tp, PCI_EXPRESS))
13973 tg3_flag_set(tp, MBOX_WRITE_REORDER);
13975 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
13976 &tp->pci_cacheline_sz);
13977 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13978 &tp->pci_lat_timer);
13979 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
13980 tp->pci_lat_timer < 64) {
13981 tp->pci_lat_timer = 64;
13982 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13983 tp->pci_lat_timer);
13986 /* Important! -- It is critical that the PCI-X hw workaround
13987 * situation is decided before the first MMIO register access.
13989 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
13990 /* 5700 BX chips need to have their TX producer index
13991 * mailboxes written twice to workaround a bug.
13993 tg3_flag_set(tp, TXD_MBOX_HWBUG);
13995 /* If we are in PCI-X mode, enable register write workaround.
13997 * The workaround is to use indirect register accesses
13998 * for all chip writes not to mailbox registers.
14000 if (tg3_flag(tp, PCIX_MODE)) {
14001 u32 pm_reg;
14003 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14005 /* The chip can have it's power management PCI config
14006 * space registers clobbered due to this bug.
14007 * So explicitly force the chip into D0 here.
14009 pci_read_config_dword(tp->pdev,
14010 tp->pm_cap + PCI_PM_CTRL,
14011 &pm_reg);
14012 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14013 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
14014 pci_write_config_dword(tp->pdev,
14015 tp->pm_cap + PCI_PM_CTRL,
14016 pm_reg);
14018 /* Also, force SERR#/PERR# in PCI command. */
14019 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14020 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14021 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14025 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
14026 tg3_flag_set(tp, PCI_HIGH_SPEED);
14027 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
14028 tg3_flag_set(tp, PCI_32BIT);
14030 /* Chip-specific fixup from Broadcom driver */
14031 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14032 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14033 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14034 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14037 /* Default fast path register access methods */
14038 tp->read32 = tg3_read32;
14039 tp->write32 = tg3_write32;
14040 tp->read32_mbox = tg3_read32;
14041 tp->write32_mbox = tg3_write32;
14042 tp->write32_tx_mbox = tg3_write32;
14043 tp->write32_rx_mbox = tg3_write32;
14045 /* Various workaround register access methods */
14046 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14047 tp->write32 = tg3_write_indirect_reg32;
14048 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14049 (tg3_flag(tp, PCI_EXPRESS) &&
14050 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14052 * Back to back register writes can cause problems on these
14053 * chips, the workaround is to read back all reg writes
14054 * except those to mailbox regs.
14056 * See tg3_write_indirect_reg32().
14058 tp->write32 = tg3_write_flush_reg32;
14061 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14062 tp->write32_tx_mbox = tg3_write32_tx_mbox;
14063 if (tg3_flag(tp, MBOX_WRITE_REORDER))
14064 tp->write32_rx_mbox = tg3_write_flush_reg32;
14067 if (tg3_flag(tp, ICH_WORKAROUND)) {
14068 tp->read32 = tg3_read_indirect_reg32;
14069 tp->write32 = tg3_write_indirect_reg32;
14070 tp->read32_mbox = tg3_read_indirect_mbox;
14071 tp->write32_mbox = tg3_write_indirect_mbox;
14072 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14073 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14075 iounmap(tp->regs);
14076 tp->regs = NULL;
14078 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14079 pci_cmd &= ~PCI_COMMAND_MEMORY;
14080 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14082 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14083 tp->read32_mbox = tg3_read32_mbox_5906;
14084 tp->write32_mbox = tg3_write32_mbox_5906;
14085 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14086 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14089 if (tp->write32 == tg3_write_indirect_reg32 ||
14090 (tg3_flag(tp, PCIX_MODE) &&
14091 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14092 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14093 tg3_flag_set(tp, SRAM_USE_CONFIG);
14095 /* The memory arbiter has to be enabled in order for SRAM accesses
14096 * to succeed. Normally on powerup the tg3 chip firmware will make
14097 * sure it is enabled, but other entities such as system netboot
14098 * code might disable it.
14100 val = tr32(MEMARB_MODE);
14101 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14103 if (tg3_flag(tp, PCIX_MODE)) {
14104 pci_read_config_dword(tp->pdev,
14105 tp->pcix_cap + PCI_X_STATUS, &val);
14106 tp->pci_fn = val & 0x7;
14107 } else {
14108 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14111 /* Get eeprom hw config before calling tg3_set_power_state().
14112 * In particular, the TG3_FLAG_IS_NIC flag must be
14113 * determined before calling tg3_set_power_state() so that
14114 * we know whether or not to switch out of Vaux power.
14115 * When the flag is set, it means that GPIO1 is used for eeprom
14116 * write protect and also implies that it is a LOM where GPIOs
14117 * are not used to switch power.
14119 tg3_get_eeprom_hw_cfg(tp);
14121 if (tg3_flag(tp, ENABLE_APE)) {
14122 /* Allow reads and writes to the
14123 * APE register and memory space.
14125 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14126 PCISTATE_ALLOW_APE_SHMEM_WR |
14127 PCISTATE_ALLOW_APE_PSPACE_WR;
14128 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14129 pci_state_reg);
14131 tg3_ape_lock_init(tp);
14134 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14135 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14136 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14137 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14138 tg3_flag(tp, 57765_PLUS))
14139 tg3_flag_set(tp, CPMU_PRESENT);
14141 /* Set up tp->grc_local_ctrl before calling
14142 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
14143 * will bring 5700's external PHY out of reset.
14144 * It is also used as eeprom write protect on LOMs.
14146 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14147 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14148 tg3_flag(tp, EEPROM_WRITE_PROT))
14149 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14150 GRC_LCLCTRL_GPIO_OUTPUT1);
14151 /* Unused GPIO3 must be driven as output on 5752 because there
14152 * are no pull-up resistors on unused GPIO pins.
14154 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14155 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14157 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14158 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14159 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
14160 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14162 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14163 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14164 /* Turn off the debug UART. */
14165 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14166 if (tg3_flag(tp, IS_NIC))
14167 /* Keep VMain power. */
14168 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14169 GRC_LCLCTRL_GPIO_OUTPUT0;
14172 /* Switch out of Vaux if it is a NIC */
14173 tg3_pwrsrc_switch_to_vmain(tp);
14175 /* Derive initial jumbo mode from MTU assigned in
14176 * ether_setup() via the alloc_etherdev() call
14178 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14179 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14181 /* Determine WakeOnLan speed to use. */
14182 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14183 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14184 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14185 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14186 tg3_flag_clear(tp, WOL_SPEED_100MB);
14187 } else {
14188 tg3_flag_set(tp, WOL_SPEED_100MB);
14191 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14192 tp->phy_flags |= TG3_PHYFLG_IS_FET;
14194 /* A few boards don't want Ethernet@WireSpeed phy feature */
14195 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14196 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14197 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14198 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14199 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14200 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14201 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14203 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14204 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14205 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14206 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14207 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14209 if (tg3_flag(tp, 5705_PLUS) &&
14210 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14211 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14212 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14213 !tg3_flag(tp, 57765_PLUS)) {
14214 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14215 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14216 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14217 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14218 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14219 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14220 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14221 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14222 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14223 } else
14224 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14227 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14228 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14229 tp->phy_otp = tg3_read_otp_phycfg(tp);
14230 if (tp->phy_otp == 0)
14231 tp->phy_otp = TG3_OTP_DEFAULT;
14234 if (tg3_flag(tp, CPMU_PRESENT))
14235 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14236 else
14237 tp->mi_mode = MAC_MI_MODE_BASE;
14239 tp->coalesce_mode = 0;
14240 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14241 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14242 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14244 /* Set these bits to enable statistics workaround. */
14245 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14246 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14247 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14248 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14249 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14252 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14253 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14254 tg3_flag_set(tp, USE_PHYLIB);
14256 err = tg3_mdio_init(tp);
14257 if (err)
14258 return err;
14260 /* Initialize data/descriptor byte/word swapping. */
14261 val = tr32(GRC_MODE);
14262 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14263 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14264 GRC_MODE_WORD_SWAP_B2HRX_DATA |
14265 GRC_MODE_B2HRX_ENABLE |
14266 GRC_MODE_HTX2B_ENABLE |
14267 GRC_MODE_HOST_STACKUP);
14268 else
14269 val &= GRC_MODE_HOST_STACKUP;
14271 tw32(GRC_MODE, val | tp->grc_mode);
14273 tg3_switch_clocks(tp);
14275 /* Clear this out for sanity. */
14276 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14278 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14279 &pci_state_reg);
14280 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14281 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14282 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14284 if (chiprevid == CHIPREV_ID_5701_A0 ||
14285 chiprevid == CHIPREV_ID_5701_B0 ||
14286 chiprevid == CHIPREV_ID_5701_B2 ||
14287 chiprevid == CHIPREV_ID_5701_B5) {
14288 void __iomem *sram_base;
14290 /* Write some dummy words into the SRAM status block
14291 * area, see if it reads back correctly. If the return
14292 * value is bad, force enable the PCIX workaround.
14294 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14296 writel(0x00000000, sram_base);
14297 writel(0x00000000, sram_base + 4);
14298 writel(0xffffffff, sram_base + 4);
14299 if (readl(sram_base) != 0x00000000)
14300 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14304 udelay(50);
14305 tg3_nvram_init(tp);
14307 grc_misc_cfg = tr32(GRC_MISC_CFG);
14308 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14310 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14311 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14312 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14313 tg3_flag_set(tp, IS_5788);
14315 if (!tg3_flag(tp, IS_5788) &&
14316 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14317 tg3_flag_set(tp, TAGGED_STATUS);
14318 if (tg3_flag(tp, TAGGED_STATUS)) {
14319 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14320 HOSTCC_MODE_CLRTICK_TXBD);
14322 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14323 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14324 tp->misc_host_ctrl);
14327 /* Preserve the APE MAC_MODE bits */
14328 if (tg3_flag(tp, ENABLE_APE))
14329 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14330 else
14331 tp->mac_mode = TG3_DEF_MAC_MODE;
14333 /* these are limited to 10/100 only */
14334 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14335 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14336 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14337 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14338 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14339 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14340 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14341 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14342 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14343 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14344 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14345 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14346 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14347 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14348 (tp->phy_flags & TG3_PHYFLG_IS_FET))
14349 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14351 err = tg3_phy_probe(tp);
14352 if (err) {
14353 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14354 /* ... but do not return immediately ... */
14355 tg3_mdio_fini(tp);
14358 tg3_read_vpd(tp);
14359 tg3_read_fw_ver(tp);
14361 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14362 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14363 } else {
14364 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14365 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14366 else
14367 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14370 /* 5700 {AX,BX} chips have a broken status block link
14371 * change bit implementation, so we must use the
14372 * status register in those cases.
14374 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14375 tg3_flag_set(tp, USE_LINKCHG_REG);
14376 else
14377 tg3_flag_clear(tp, USE_LINKCHG_REG);
14379 /* The led_ctrl is set during tg3_phy_probe, here we might
14380 * have to force the link status polling mechanism based
14381 * upon subsystem IDs.
14383 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14384 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14385 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14386 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14387 tg3_flag_set(tp, USE_LINKCHG_REG);
14390 /* For all SERDES we poll the MAC status register. */
14391 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14392 tg3_flag_set(tp, POLL_SERDES);
14393 else
14394 tg3_flag_clear(tp, POLL_SERDES);
14396 tp->rx_offset = NET_IP_ALIGN;
14397 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14398 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14399 tg3_flag(tp, PCIX_MODE)) {
14400 tp->rx_offset = 0;
14401 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14402 tp->rx_copy_thresh = ~(u16)0;
14403 #endif
14406 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14407 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14408 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14410 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14412 /* Increment the rx prod index on the rx std ring by at most
14413 * 8 for these chips to workaround hw errata.
14415 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14416 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14417 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14418 tp->rx_std_max_post = 8;
14420 if (tg3_flag(tp, ASPM_WORKAROUND))
14421 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14422 PCIE_PWR_MGMT_L1_THRESH_MSK;
14424 return err;
14427 #ifdef CONFIG_SPARC
14428 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14430 struct net_device *dev = tp->dev;
14431 struct pci_dev *pdev = tp->pdev;
14432 struct device_node *dp = pci_device_to_OF_node(pdev);
14433 const unsigned char *addr;
14434 int len;
14436 addr = of_get_property(dp, "local-mac-address", &len);
14437 if (addr && len == 6) {
14438 memcpy(dev->dev_addr, addr, 6);
14439 memcpy(dev->perm_addr, dev->dev_addr, 6);
14440 return 0;
14442 return -ENODEV;
14445 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14447 struct net_device *dev = tp->dev;
14449 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14450 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14451 return 0;
14453 #endif
14455 static int __devinit tg3_get_device_address(struct tg3 *tp)
14457 struct net_device *dev = tp->dev;
14458 u32 hi, lo, mac_offset;
14459 int addr_ok = 0;
14461 #ifdef CONFIG_SPARC
14462 if (!tg3_get_macaddr_sparc(tp))
14463 return 0;
14464 #endif
14466 mac_offset = 0x7c;
14467 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14468 tg3_flag(tp, 5780_CLASS)) {
14469 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14470 mac_offset = 0xcc;
14471 if (tg3_nvram_lock(tp))
14472 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14473 else
14474 tg3_nvram_unlock(tp);
14475 } else if (tg3_flag(tp, 5717_PLUS)) {
14476 if (tp->pci_fn & 1)
14477 mac_offset = 0xcc;
14478 if (tp->pci_fn > 1)
14479 mac_offset += 0x18c;
14480 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14481 mac_offset = 0x10;
14483 /* First try to get it from MAC address mailbox. */
14484 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14485 if ((hi >> 16) == 0x484b) {
14486 dev->dev_addr[0] = (hi >> 8) & 0xff;
14487 dev->dev_addr[1] = (hi >> 0) & 0xff;
14489 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14490 dev->dev_addr[2] = (lo >> 24) & 0xff;
14491 dev->dev_addr[3] = (lo >> 16) & 0xff;
14492 dev->dev_addr[4] = (lo >> 8) & 0xff;
14493 dev->dev_addr[5] = (lo >> 0) & 0xff;
14495 /* Some old bootcode may report a 0 MAC address in SRAM */
14496 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14498 if (!addr_ok) {
14499 /* Next, try NVRAM. */
14500 if (!tg3_flag(tp, NO_NVRAM) &&
14501 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14502 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14503 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14504 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14506 /* Finally just fetch it out of the MAC control regs. */
14507 else {
14508 hi = tr32(MAC_ADDR_0_HIGH);
14509 lo = tr32(MAC_ADDR_0_LOW);
14511 dev->dev_addr[5] = lo & 0xff;
14512 dev->dev_addr[4] = (lo >> 8) & 0xff;
14513 dev->dev_addr[3] = (lo >> 16) & 0xff;
14514 dev->dev_addr[2] = (lo >> 24) & 0xff;
14515 dev->dev_addr[1] = hi & 0xff;
14516 dev->dev_addr[0] = (hi >> 8) & 0xff;
14520 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14521 #ifdef CONFIG_SPARC
14522 if (!tg3_get_default_macaddr_sparc(tp))
14523 return 0;
14524 #endif
14525 return -EINVAL;
14527 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14528 return 0;
14531 #define BOUNDARY_SINGLE_CACHELINE 1
14532 #define BOUNDARY_MULTI_CACHELINE 2
14534 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14536 int cacheline_size;
14537 u8 byte;
14538 int goal;
14540 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14541 if (byte == 0)
14542 cacheline_size = 1024;
14543 else
14544 cacheline_size = (int) byte * 4;
14546 /* On 5703 and later chips, the boundary bits have no
14547 * effect.
14549 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14550 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14551 !tg3_flag(tp, PCI_EXPRESS))
14552 goto out;
14554 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14555 goal = BOUNDARY_MULTI_CACHELINE;
14556 #else
14557 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14558 goal = BOUNDARY_SINGLE_CACHELINE;
14559 #else
14560 goal = 0;
14561 #endif
14562 #endif
14564 if (tg3_flag(tp, 57765_PLUS)) {
14565 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14566 goto out;
14569 if (!goal)
14570 goto out;
14572 /* PCI controllers on most RISC systems tend to disconnect
14573 * when a device tries to burst across a cache-line boundary.
14574 * Therefore, letting tg3 do so just wastes PCI bandwidth.
14576 * Unfortunately, for PCI-E there are only limited
14577 * write-side controls for this, and thus for reads
14578 * we will still get the disconnects. We'll also waste
14579 * these PCI cycles for both read and write for chips
14580 * other than 5700 and 5701 which do not implement the
14581 * boundary bits.
14583 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14584 switch (cacheline_size) {
14585 case 16:
14586 case 32:
14587 case 64:
14588 case 128:
14589 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14590 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14591 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14592 } else {
14593 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14594 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14596 break;
14598 case 256:
14599 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14600 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14601 break;
14603 default:
14604 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14605 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14606 break;
14608 } else if (tg3_flag(tp, PCI_EXPRESS)) {
14609 switch (cacheline_size) {
14610 case 16:
14611 case 32:
14612 case 64:
14613 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14614 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14615 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14616 break;
14618 /* fallthrough */
14619 case 128:
14620 default:
14621 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14622 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14623 break;
14625 } else {
14626 switch (cacheline_size) {
14627 case 16:
14628 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14629 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14630 DMA_RWCTRL_WRITE_BNDRY_16);
14631 break;
14633 /* fallthrough */
14634 case 32:
14635 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14636 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14637 DMA_RWCTRL_WRITE_BNDRY_32);
14638 break;
14640 /* fallthrough */
14641 case 64:
14642 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14643 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14644 DMA_RWCTRL_WRITE_BNDRY_64);
14645 break;
14647 /* fallthrough */
14648 case 128:
14649 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14650 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14651 DMA_RWCTRL_WRITE_BNDRY_128);
14652 break;
14654 /* fallthrough */
14655 case 256:
14656 val |= (DMA_RWCTRL_READ_BNDRY_256 |
14657 DMA_RWCTRL_WRITE_BNDRY_256);
14658 break;
14659 case 512:
14660 val |= (DMA_RWCTRL_READ_BNDRY_512 |
14661 DMA_RWCTRL_WRITE_BNDRY_512);
14662 break;
14663 case 1024:
14664 default:
14665 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14666 DMA_RWCTRL_WRITE_BNDRY_1024);
14667 break;
14671 out:
14672 return val;
14675 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14677 struct tg3_internal_buffer_desc test_desc;
14678 u32 sram_dma_descs;
14679 int i, ret;
14681 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
14683 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
14684 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
14685 tw32(RDMAC_STATUS, 0);
14686 tw32(WDMAC_STATUS, 0);
14688 tw32(BUFMGR_MODE, 0);
14689 tw32(FTQ_RESET, 0);
14691 test_desc.addr_hi = ((u64) buf_dma) >> 32;
14692 test_desc.addr_lo = buf_dma & 0xffffffff;
14693 test_desc.nic_mbuf = 0x00002100;
14694 test_desc.len = size;
14697 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14698 * the *second* time the tg3 driver was getting loaded after an
14699 * initial scan.
14701 * Broadcom tells me:
14702 * ...the DMA engine is connected to the GRC block and a DMA
14703 * reset may affect the GRC block in some unpredictable way...
14704 * The behavior of resets to individual blocks has not been tested.
14706 * Broadcom noted the GRC reset will also reset all sub-components.
14708 if (to_device) {
14709 test_desc.cqid_sqid = (13 << 8) | 2;
14711 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
14712 udelay(40);
14713 } else {
14714 test_desc.cqid_sqid = (16 << 8) | 7;
14716 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
14717 udelay(40);
14719 test_desc.flags = 0x00000005;
14721 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
14722 u32 val;
14724 val = *(((u32 *)&test_desc) + i);
14725 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
14726 sram_dma_descs + (i * sizeof(u32)));
14727 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
14729 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
14731 if (to_device)
14732 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
14733 else
14734 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
14736 ret = -ENODEV;
14737 for (i = 0; i < 40; i++) {
14738 u32 val;
14740 if (to_device)
14741 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
14742 else
14743 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
14744 if ((val & 0xffff) == sram_dma_descs) {
14745 ret = 0;
14746 break;
14749 udelay(100);
14752 return ret;
14755 #define TEST_BUFFER_SIZE 0x2000
14757 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
14758 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
14759 { },
14762 static int __devinit tg3_test_dma(struct tg3 *tp)
14764 dma_addr_t buf_dma;
14765 u32 *buf, saved_dma_rwctrl;
14766 int ret = 0;
14768 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
14769 &buf_dma, GFP_KERNEL);
14770 if (!buf) {
14771 ret = -ENOMEM;
14772 goto out_nofree;
14775 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
14776 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
14778 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
14780 if (tg3_flag(tp, 57765_PLUS))
14781 goto out;
14783 if (tg3_flag(tp, PCI_EXPRESS)) {
14784 /* DMA read watermark not used on PCIE */
14785 tp->dma_rwctrl |= 0x00180000;
14786 } else if (!tg3_flag(tp, PCIX_MODE)) {
14787 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14788 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
14789 tp->dma_rwctrl |= 0x003f0000;
14790 else
14791 tp->dma_rwctrl |= 0x003f000f;
14792 } else {
14793 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14794 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
14795 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
14796 u32 read_water = 0x7;
14798 /* If the 5704 is behind the EPB bridge, we can
14799 * do the less restrictive ONE_DMA workaround for
14800 * better performance.
14802 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
14803 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14804 tp->dma_rwctrl |= 0x8000;
14805 else if (ccval == 0x6 || ccval == 0x7)
14806 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
14808 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
14809 read_water = 4;
14810 /* Set bit 23 to enable PCIX hw bug fix */
14811 tp->dma_rwctrl |=
14812 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
14813 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
14814 (1 << 23);
14815 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
14816 /* 5780 always in PCIX mode */
14817 tp->dma_rwctrl |= 0x00144000;
14818 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14819 /* 5714 always in PCIX mode */
14820 tp->dma_rwctrl |= 0x00148000;
14821 } else {
14822 tp->dma_rwctrl |= 0x001b000f;
14826 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14827 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14828 tp->dma_rwctrl &= 0xfffffff0;
14830 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14831 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14832 /* Remove this if it causes problems for some boards. */
14833 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
14835 /* On 5700/5701 chips, we need to set this bit.
14836 * Otherwise the chip will issue cacheline transactions
14837 * to streamable DMA memory with not all the byte
14838 * enables turned on. This is an error on several
14839 * RISC PCI controllers, in particular sparc64.
14841 * On 5703/5704 chips, this bit has been reassigned
14842 * a different meaning. In particular, it is used
14843 * on those chips to enable a PCI-X workaround.
14845 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
14848 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14850 #if 0
14851 /* Unneeded, already done by tg3_get_invariants. */
14852 tg3_switch_clocks(tp);
14853 #endif
14855 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14856 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
14857 goto out;
14859 /* It is best to perform DMA test with maximum write burst size
14860 * to expose the 5700/5701 write DMA bug.
14862 saved_dma_rwctrl = tp->dma_rwctrl;
14863 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14864 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14866 while (1) {
14867 u32 *p = buf, i;
14869 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
14870 p[i] = i;
14872 /* Send the buffer to the chip. */
14873 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
14874 if (ret) {
14875 dev_err(&tp->pdev->dev,
14876 "%s: Buffer write failed. err = %d\n",
14877 __func__, ret);
14878 break;
14881 #if 0
14882 /* validate data reached card RAM correctly. */
14883 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14884 u32 val;
14885 tg3_read_mem(tp, 0x2100 + (i*4), &val);
14886 if (le32_to_cpu(val) != p[i]) {
14887 dev_err(&tp->pdev->dev,
14888 "%s: Buffer corrupted on device! "
14889 "(%d != %d)\n", __func__, val, i);
14890 /* ret = -ENODEV here? */
14892 p[i] = 0;
14894 #endif
14895 /* Now read it back. */
14896 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
14897 if (ret) {
14898 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
14899 "err = %d\n", __func__, ret);
14900 break;
14903 /* Verify it. */
14904 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14905 if (p[i] == i)
14906 continue;
14908 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14909 DMA_RWCTRL_WRITE_BNDRY_16) {
14910 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14911 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14912 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14913 break;
14914 } else {
14915 dev_err(&tp->pdev->dev,
14916 "%s: Buffer corrupted on read back! "
14917 "(%d != %d)\n", __func__, p[i], i);
14918 ret = -ENODEV;
14919 goto out;
14923 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
14924 /* Success. */
14925 ret = 0;
14926 break;
14929 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14930 DMA_RWCTRL_WRITE_BNDRY_16) {
14931 /* DMA test passed without adjusting DMA boundary,
14932 * now look for chipsets that are known to expose the
14933 * DMA bug without failing the test.
14935 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
14936 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14937 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14938 } else {
14939 /* Safe to use the calculated DMA boundary. */
14940 tp->dma_rwctrl = saved_dma_rwctrl;
14943 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14946 out:
14947 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
14948 out_nofree:
14949 return ret;
14952 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
14954 if (tg3_flag(tp, 57765_PLUS)) {
14955 tp->bufmgr_config.mbuf_read_dma_low_water =
14956 DEFAULT_MB_RDMA_LOW_WATER_5705;
14957 tp->bufmgr_config.mbuf_mac_rx_low_water =
14958 DEFAULT_MB_MACRX_LOW_WATER_57765;
14959 tp->bufmgr_config.mbuf_high_water =
14960 DEFAULT_MB_HIGH_WATER_57765;
14962 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14963 DEFAULT_MB_RDMA_LOW_WATER_5705;
14964 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14965 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
14966 tp->bufmgr_config.mbuf_high_water_jumbo =
14967 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
14968 } else if (tg3_flag(tp, 5705_PLUS)) {
14969 tp->bufmgr_config.mbuf_read_dma_low_water =
14970 DEFAULT_MB_RDMA_LOW_WATER_5705;
14971 tp->bufmgr_config.mbuf_mac_rx_low_water =
14972 DEFAULT_MB_MACRX_LOW_WATER_5705;
14973 tp->bufmgr_config.mbuf_high_water =
14974 DEFAULT_MB_HIGH_WATER_5705;
14975 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14976 tp->bufmgr_config.mbuf_mac_rx_low_water =
14977 DEFAULT_MB_MACRX_LOW_WATER_5906;
14978 tp->bufmgr_config.mbuf_high_water =
14979 DEFAULT_MB_HIGH_WATER_5906;
14982 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14983 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
14984 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14985 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
14986 tp->bufmgr_config.mbuf_high_water_jumbo =
14987 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
14988 } else {
14989 tp->bufmgr_config.mbuf_read_dma_low_water =
14990 DEFAULT_MB_RDMA_LOW_WATER;
14991 tp->bufmgr_config.mbuf_mac_rx_low_water =
14992 DEFAULT_MB_MACRX_LOW_WATER;
14993 tp->bufmgr_config.mbuf_high_water =
14994 DEFAULT_MB_HIGH_WATER;
14996 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14997 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
14998 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14999 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15000 tp->bufmgr_config.mbuf_high_water_jumbo =
15001 DEFAULT_MB_HIGH_WATER_JUMBO;
15004 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15005 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15008 static char * __devinit tg3_phy_string(struct tg3 *tp)
15010 switch (tp->phy_id & TG3_PHY_ID_MASK) {
15011 case TG3_PHY_ID_BCM5400: return "5400";
15012 case TG3_PHY_ID_BCM5401: return "5401";
15013 case TG3_PHY_ID_BCM5411: return "5411";
15014 case TG3_PHY_ID_BCM5701: return "5701";
15015 case TG3_PHY_ID_BCM5703: return "5703";
15016 case TG3_PHY_ID_BCM5704: return "5704";
15017 case TG3_PHY_ID_BCM5705: return "5705";
15018 case TG3_PHY_ID_BCM5750: return "5750";
15019 case TG3_PHY_ID_BCM5752: return "5752";
15020 case TG3_PHY_ID_BCM5714: return "5714";
15021 case TG3_PHY_ID_BCM5780: return "5780";
15022 case TG3_PHY_ID_BCM5755: return "5755";
15023 case TG3_PHY_ID_BCM5787: return "5787";
15024 case TG3_PHY_ID_BCM5784: return "5784";
15025 case TG3_PHY_ID_BCM5756: return "5722/5756";
15026 case TG3_PHY_ID_BCM5906: return "5906";
15027 case TG3_PHY_ID_BCM5761: return "5761";
15028 case TG3_PHY_ID_BCM5718C: return "5718C";
15029 case TG3_PHY_ID_BCM5718S: return "5718S";
15030 case TG3_PHY_ID_BCM57765: return "57765";
15031 case TG3_PHY_ID_BCM5719C: return "5719C";
15032 case TG3_PHY_ID_BCM5720C: return "5720C";
15033 case TG3_PHY_ID_BCM8002: return "8002/serdes";
15034 case 0: return "serdes";
15035 default: return "unknown";
15039 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15041 if (tg3_flag(tp, PCI_EXPRESS)) {
15042 strcpy(str, "PCI Express");
15043 return str;
15044 } else if (tg3_flag(tp, PCIX_MODE)) {
15045 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15047 strcpy(str, "PCIX:");
15049 if ((clock_ctrl == 7) ||
15050 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15051 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15052 strcat(str, "133MHz");
15053 else if (clock_ctrl == 0)
15054 strcat(str, "33MHz");
15055 else if (clock_ctrl == 2)
15056 strcat(str, "50MHz");
15057 else if (clock_ctrl == 4)
15058 strcat(str, "66MHz");
15059 else if (clock_ctrl == 6)
15060 strcat(str, "100MHz");
15061 } else {
15062 strcpy(str, "PCI:");
15063 if (tg3_flag(tp, PCI_HIGH_SPEED))
15064 strcat(str, "66MHz");
15065 else
15066 strcat(str, "33MHz");
15068 if (tg3_flag(tp, PCI_32BIT))
15069 strcat(str, ":32-bit");
15070 else
15071 strcat(str, ":64-bit");
15072 return str;
15075 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
15077 struct pci_dev *peer;
15078 unsigned int func, devnr = tp->pdev->devfn & ~7;
15080 for (func = 0; func < 8; func++) {
15081 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15082 if (peer && peer != tp->pdev)
15083 break;
15084 pci_dev_put(peer);
15086 /* 5704 can be configured in single-port mode, set peer to
15087 * tp->pdev in that case.
15089 if (!peer) {
15090 peer = tp->pdev;
15091 return peer;
15095 * We don't need to keep the refcount elevated; there's no way
15096 * to remove one half of this device without removing the other
15098 pci_dev_put(peer);
15100 return peer;
15103 static void __devinit tg3_init_coal(struct tg3 *tp)
15105 struct ethtool_coalesce *ec = &tp->coal;
15107 memset(ec, 0, sizeof(*ec));
15108 ec->cmd = ETHTOOL_GCOALESCE;
15109 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15110 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15111 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15112 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15113 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15114 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15115 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15116 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15117 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15119 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15120 HOSTCC_MODE_CLRTICK_TXBD)) {
15121 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15122 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15123 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15124 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15127 if (tg3_flag(tp, 5705_PLUS)) {
15128 ec->rx_coalesce_usecs_irq = 0;
15129 ec->tx_coalesce_usecs_irq = 0;
15130 ec->stats_block_coalesce_usecs = 0;
15134 static const struct net_device_ops tg3_netdev_ops = {
15135 .ndo_open = tg3_open,
15136 .ndo_stop = tg3_close,
15137 .ndo_start_xmit = tg3_start_xmit,
15138 .ndo_get_stats64 = tg3_get_stats64,
15139 .ndo_validate_addr = eth_validate_addr,
15140 .ndo_set_multicast_list = tg3_set_rx_mode,
15141 .ndo_set_mac_address = tg3_set_mac_addr,
15142 .ndo_do_ioctl = tg3_ioctl,
15143 .ndo_tx_timeout = tg3_tx_timeout,
15144 .ndo_change_mtu = tg3_change_mtu,
15145 .ndo_fix_features = tg3_fix_features,
15146 .ndo_set_features = tg3_set_features,
15147 #ifdef CONFIG_NET_POLL_CONTROLLER
15148 .ndo_poll_controller = tg3_poll_controller,
15149 #endif
15152 static int __devinit tg3_init_one(struct pci_dev *pdev,
15153 const struct pci_device_id *ent)
15155 struct net_device *dev;
15156 struct tg3 *tp;
15157 int i, err, pm_cap;
15158 u32 sndmbx, rcvmbx, intmbx;
15159 char str[40];
15160 u64 dma_mask, persist_dma_mask;
15161 u32 features = 0;
15163 printk_once(KERN_INFO "%s\n", version);
15165 err = pci_enable_device(pdev);
15166 if (err) {
15167 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15168 return err;
15171 err = pci_request_regions(pdev, DRV_MODULE_NAME);
15172 if (err) {
15173 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15174 goto err_out_disable_pdev;
15177 pci_set_master(pdev);
15179 /* Find power-management capability. */
15180 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15181 if (pm_cap == 0) {
15182 dev_err(&pdev->dev,
15183 "Cannot find Power Management capability, aborting\n");
15184 err = -EIO;
15185 goto err_out_free_res;
15188 err = pci_set_power_state(pdev, PCI_D0);
15189 if (err) {
15190 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15191 goto err_out_free_res;
15194 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15195 if (!dev) {
15196 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
15197 err = -ENOMEM;
15198 goto err_out_power_down;
15201 SET_NETDEV_DEV(dev, &pdev->dev);
15203 tp = netdev_priv(dev);
15204 tp->pdev = pdev;
15205 tp->dev = dev;
15206 tp->pm_cap = pm_cap;
15207 tp->rx_mode = TG3_DEF_RX_MODE;
15208 tp->tx_mode = TG3_DEF_TX_MODE;
15210 if (tg3_debug > 0)
15211 tp->msg_enable = tg3_debug;
15212 else
15213 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15215 /* The word/byte swap controls here control register access byte
15216 * swapping. DMA data byte swapping is controlled in the GRC_MODE
15217 * setting below.
15219 tp->misc_host_ctrl =
15220 MISC_HOST_CTRL_MASK_PCI_INT |
15221 MISC_HOST_CTRL_WORD_SWAP |
15222 MISC_HOST_CTRL_INDIR_ACCESS |
15223 MISC_HOST_CTRL_PCISTATE_RW;
15225 /* The NONFRM (non-frame) byte/word swap controls take effect
15226 * on descriptor entries, anything which isn't packet data.
15228 * The StrongARM chips on the board (one for tx, one for rx)
15229 * are running in big-endian mode.
15231 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15232 GRC_MODE_WSWAP_NONFRM_DATA);
15233 #ifdef __BIG_ENDIAN
15234 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15235 #endif
15236 spin_lock_init(&tp->lock);
15237 spin_lock_init(&tp->indirect_lock);
15238 INIT_WORK(&tp->reset_task, tg3_reset_task);
15240 tp->regs = pci_ioremap_bar(pdev, BAR_0);
15241 if (!tp->regs) {
15242 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15243 err = -ENOMEM;
15244 goto err_out_free_dev;
15247 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15248 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15249 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15250 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15251 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15252 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15253 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15254 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15255 tg3_flag_set(tp, ENABLE_APE);
15256 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15257 if (!tp->aperegs) {
15258 dev_err(&pdev->dev,
15259 "Cannot map APE registers, aborting\n");
15260 err = -ENOMEM;
15261 goto err_out_iounmap;
15265 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15266 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15268 dev->ethtool_ops = &tg3_ethtool_ops;
15269 dev->watchdog_timeo = TG3_TX_TIMEOUT;
15270 dev->netdev_ops = &tg3_netdev_ops;
15271 dev->irq = pdev->irq;
15273 err = tg3_get_invariants(tp);
15274 if (err) {
15275 dev_err(&pdev->dev,
15276 "Problem fetching invariants of chip, aborting\n");
15277 goto err_out_apeunmap;
15280 /* The EPB bridge inside 5714, 5715, and 5780 and any
15281 * device behind the EPB cannot support DMA addresses > 40-bit.
15282 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15283 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15284 * do DMA address check in tg3_start_xmit().
15286 if (tg3_flag(tp, IS_5788))
15287 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15288 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15289 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15290 #ifdef CONFIG_HIGHMEM
15291 dma_mask = DMA_BIT_MASK(64);
15292 #endif
15293 } else
15294 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15296 /* Configure DMA attributes. */
15297 if (dma_mask > DMA_BIT_MASK(32)) {
15298 err = pci_set_dma_mask(pdev, dma_mask);
15299 if (!err) {
15300 features |= NETIF_F_HIGHDMA;
15301 err = pci_set_consistent_dma_mask(pdev,
15302 persist_dma_mask);
15303 if (err < 0) {
15304 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15305 "DMA for consistent allocations\n");
15306 goto err_out_apeunmap;
15310 if (err || dma_mask == DMA_BIT_MASK(32)) {
15311 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15312 if (err) {
15313 dev_err(&pdev->dev,
15314 "No usable DMA configuration, aborting\n");
15315 goto err_out_apeunmap;
15319 tg3_init_bufmgr_config(tp);
15321 features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15323 /* 5700 B0 chips do not support checksumming correctly due
15324 * to hardware bugs.
15326 if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15327 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15329 if (tg3_flag(tp, 5755_PLUS))
15330 features |= NETIF_F_IPV6_CSUM;
15333 /* TSO is on by default on chips that support hardware TSO.
15334 * Firmware TSO on older chips gives lower performance, so it
15335 * is off by default, but can be enabled using ethtool.
15337 if ((tg3_flag(tp, HW_TSO_1) ||
15338 tg3_flag(tp, HW_TSO_2) ||
15339 tg3_flag(tp, HW_TSO_3)) &&
15340 (features & NETIF_F_IP_CSUM))
15341 features |= NETIF_F_TSO;
15342 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15343 if (features & NETIF_F_IPV6_CSUM)
15344 features |= NETIF_F_TSO6;
15345 if (tg3_flag(tp, HW_TSO_3) ||
15346 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15347 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15348 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15349 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15350 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15351 features |= NETIF_F_TSO_ECN;
15354 dev->features |= features;
15355 dev->vlan_features |= features;
15358 * Add loopback capability only for a subset of devices that support
15359 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15360 * loopback for the remaining devices.
15362 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15363 !tg3_flag(tp, CPMU_PRESENT))
15364 /* Add the loopback capability */
15365 features |= NETIF_F_LOOPBACK;
15367 dev->hw_features |= features;
15369 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15370 !tg3_flag(tp, TSO_CAPABLE) &&
15371 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15372 tg3_flag_set(tp, MAX_RXPEND_64);
15373 tp->rx_pending = 63;
15376 err = tg3_get_device_address(tp);
15377 if (err) {
15378 dev_err(&pdev->dev,
15379 "Could not obtain valid ethernet address, aborting\n");
15380 goto err_out_apeunmap;
15384 * Reset chip in case UNDI or EFI driver did not shutdown
15385 * DMA self test will enable WDMAC and we'll see (spurious)
15386 * pending DMA on the PCI bus at that point.
15388 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15389 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15390 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15391 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15394 err = tg3_test_dma(tp);
15395 if (err) {
15396 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15397 goto err_out_apeunmap;
15400 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15401 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15402 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15403 for (i = 0; i < tp->irq_max; i++) {
15404 struct tg3_napi *tnapi = &tp->napi[i];
15406 tnapi->tp = tp;
15407 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15409 tnapi->int_mbox = intmbx;
15410 if (i < 4)
15411 intmbx += 0x8;
15412 else
15413 intmbx += 0x4;
15415 tnapi->consmbox = rcvmbx;
15416 tnapi->prodmbox = sndmbx;
15418 if (i)
15419 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15420 else
15421 tnapi->coal_now = HOSTCC_MODE_NOW;
15423 if (!tg3_flag(tp, SUPPORT_MSIX))
15424 break;
15427 * If we support MSIX, we'll be using RSS. If we're using
15428 * RSS, the first vector only handles link interrupts and the
15429 * remaining vectors handle rx and tx interrupts. Reuse the
15430 * mailbox values for the next iteration. The values we setup
15431 * above are still useful for the single vectored mode.
15433 if (!i)
15434 continue;
15436 rcvmbx += 0x8;
15438 if (sndmbx & 0x4)
15439 sndmbx -= 0x4;
15440 else
15441 sndmbx += 0xc;
15444 tg3_init_coal(tp);
15446 pci_set_drvdata(pdev, dev);
15448 if (tg3_flag(tp, 5717_PLUS)) {
15449 /* Resume a low-power mode */
15450 tg3_frob_aux_power(tp, false);
15453 err = register_netdev(dev);
15454 if (err) {
15455 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15456 goto err_out_apeunmap;
15459 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15460 tp->board_part_number,
15461 tp->pci_chip_rev_id,
15462 tg3_bus_string(tp, str),
15463 dev->dev_addr);
15465 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15466 struct phy_device *phydev;
15467 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15468 netdev_info(dev,
15469 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15470 phydev->drv->name, dev_name(&phydev->dev));
15471 } else {
15472 char *ethtype;
15474 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15475 ethtype = "10/100Base-TX";
15476 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15477 ethtype = "1000Base-SX";
15478 else
15479 ethtype = "10/100/1000Base-T";
15481 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15482 "(WireSpeed[%d], EEE[%d])\n",
15483 tg3_phy_string(tp), ethtype,
15484 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15485 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15488 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15489 (dev->features & NETIF_F_RXCSUM) != 0,
15490 tg3_flag(tp, USE_LINKCHG_REG) != 0,
15491 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15492 tg3_flag(tp, ENABLE_ASF) != 0,
15493 tg3_flag(tp, TSO_CAPABLE) != 0);
15494 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15495 tp->dma_rwctrl,
15496 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15497 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15499 pci_save_state(pdev);
15501 return 0;
15503 err_out_apeunmap:
15504 if (tp->aperegs) {
15505 iounmap(tp->aperegs);
15506 tp->aperegs = NULL;
15509 err_out_iounmap:
15510 if (tp->regs) {
15511 iounmap(tp->regs);
15512 tp->regs = NULL;
15515 err_out_free_dev:
15516 free_netdev(dev);
15518 err_out_power_down:
15519 pci_set_power_state(pdev, PCI_D3hot);
15521 err_out_free_res:
15522 pci_release_regions(pdev);
15524 err_out_disable_pdev:
15525 pci_disable_device(pdev);
15526 pci_set_drvdata(pdev, NULL);
15527 return err;
15530 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15532 struct net_device *dev = pci_get_drvdata(pdev);
15534 if (dev) {
15535 struct tg3 *tp = netdev_priv(dev);
15537 if (tp->fw)
15538 release_firmware(tp->fw);
15540 cancel_work_sync(&tp->reset_task);
15542 if (!tg3_flag(tp, USE_PHYLIB)) {
15543 tg3_phy_fini(tp);
15544 tg3_mdio_fini(tp);
15547 unregister_netdev(dev);
15548 if (tp->aperegs) {
15549 iounmap(tp->aperegs);
15550 tp->aperegs = NULL;
15552 if (tp->regs) {
15553 iounmap(tp->regs);
15554 tp->regs = NULL;
15556 free_netdev(dev);
15557 pci_release_regions(pdev);
15558 pci_disable_device(pdev);
15559 pci_set_drvdata(pdev, NULL);
15563 #ifdef CONFIG_PM_SLEEP
15564 static int tg3_suspend(struct device *device)
15566 struct pci_dev *pdev = to_pci_dev(device);
15567 struct net_device *dev = pci_get_drvdata(pdev);
15568 struct tg3 *tp = netdev_priv(dev);
15569 int err;
15571 if (!netif_running(dev))
15572 return 0;
15574 flush_work_sync(&tp->reset_task);
15575 tg3_phy_stop(tp);
15576 tg3_netif_stop(tp);
15578 del_timer_sync(&tp->timer);
15580 tg3_full_lock(tp, 1);
15581 tg3_disable_ints(tp);
15582 tg3_full_unlock(tp);
15584 netif_device_detach(dev);
15586 tg3_full_lock(tp, 0);
15587 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15588 tg3_flag_clear(tp, INIT_COMPLETE);
15589 tg3_full_unlock(tp);
15591 err = tg3_power_down_prepare(tp);
15592 if (err) {
15593 int err2;
15595 tg3_full_lock(tp, 0);
15597 tg3_flag_set(tp, INIT_COMPLETE);
15598 err2 = tg3_restart_hw(tp, 1);
15599 if (err2)
15600 goto out;
15602 tp->timer.expires = jiffies + tp->timer_offset;
15603 add_timer(&tp->timer);
15605 netif_device_attach(dev);
15606 tg3_netif_start(tp);
15608 out:
15609 tg3_full_unlock(tp);
15611 if (!err2)
15612 tg3_phy_start(tp);
15615 return err;
15618 static int tg3_resume(struct device *device)
15620 struct pci_dev *pdev = to_pci_dev(device);
15621 struct net_device *dev = pci_get_drvdata(pdev);
15622 struct tg3 *tp = netdev_priv(dev);
15623 int err;
15625 if (!netif_running(dev))
15626 return 0;
15628 netif_device_attach(dev);
15630 tg3_full_lock(tp, 0);
15632 tg3_flag_set(tp, INIT_COMPLETE);
15633 err = tg3_restart_hw(tp, 1);
15634 if (err)
15635 goto out;
15637 tp->timer.expires = jiffies + tp->timer_offset;
15638 add_timer(&tp->timer);
15640 tg3_netif_start(tp);
15642 out:
15643 tg3_full_unlock(tp);
15645 if (!err)
15646 tg3_phy_start(tp);
15648 return err;
15651 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15652 #define TG3_PM_OPS (&tg3_pm_ops)
15654 #else
15656 #define TG3_PM_OPS NULL
15658 #endif /* CONFIG_PM_SLEEP */
15661 * tg3_io_error_detected - called when PCI error is detected
15662 * @pdev: Pointer to PCI device
15663 * @state: The current pci connection state
15665 * This function is called after a PCI bus error affecting
15666 * this device has been detected.
15668 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15669 pci_channel_state_t state)
15671 struct net_device *netdev = pci_get_drvdata(pdev);
15672 struct tg3 *tp = netdev_priv(netdev);
15673 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15675 netdev_info(netdev, "PCI I/O error detected\n");
15677 rtnl_lock();
15679 if (!netif_running(netdev))
15680 goto done;
15682 tg3_phy_stop(tp);
15684 tg3_netif_stop(tp);
15686 del_timer_sync(&tp->timer);
15687 tg3_flag_clear(tp, RESTART_TIMER);
15689 /* Want to make sure that the reset task doesn't run */
15690 cancel_work_sync(&tp->reset_task);
15691 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15692 tg3_flag_clear(tp, RESTART_TIMER);
15694 netif_device_detach(netdev);
15696 /* Clean up software state, even if MMIO is blocked */
15697 tg3_full_lock(tp, 0);
15698 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15699 tg3_full_unlock(tp);
15701 done:
15702 if (state == pci_channel_io_perm_failure)
15703 err = PCI_ERS_RESULT_DISCONNECT;
15704 else
15705 pci_disable_device(pdev);
15707 rtnl_unlock();
15709 return err;
15713 * tg3_io_slot_reset - called after the pci bus has been reset.
15714 * @pdev: Pointer to PCI device
15716 * Restart the card from scratch, as if from a cold-boot.
15717 * At this point, the card has exprienced a hard reset,
15718 * followed by fixups by BIOS, and has its config space
15719 * set up identically to what it was at cold boot.
15721 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
15723 struct net_device *netdev = pci_get_drvdata(pdev);
15724 struct tg3 *tp = netdev_priv(netdev);
15725 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15726 int err;
15728 rtnl_lock();
15730 if (pci_enable_device(pdev)) {
15731 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
15732 goto done;
15735 pci_set_master(pdev);
15736 pci_restore_state(pdev);
15737 pci_save_state(pdev);
15739 if (!netif_running(netdev)) {
15740 rc = PCI_ERS_RESULT_RECOVERED;
15741 goto done;
15744 err = tg3_power_up(tp);
15745 if (err)
15746 goto done;
15748 rc = PCI_ERS_RESULT_RECOVERED;
15750 done:
15751 rtnl_unlock();
15753 return rc;
15757 * tg3_io_resume - called when traffic can start flowing again.
15758 * @pdev: Pointer to PCI device
15760 * This callback is called when the error recovery driver tells
15761 * us that its OK to resume normal operation.
15763 static void tg3_io_resume(struct pci_dev *pdev)
15765 struct net_device *netdev = pci_get_drvdata(pdev);
15766 struct tg3 *tp = netdev_priv(netdev);
15767 int err;
15769 rtnl_lock();
15771 if (!netif_running(netdev))
15772 goto done;
15774 tg3_full_lock(tp, 0);
15775 tg3_flag_set(tp, INIT_COMPLETE);
15776 err = tg3_restart_hw(tp, 1);
15777 tg3_full_unlock(tp);
15778 if (err) {
15779 netdev_err(netdev, "Cannot restart hardware after reset.\n");
15780 goto done;
15783 netif_device_attach(netdev);
15785 tp->timer.expires = jiffies + tp->timer_offset;
15786 add_timer(&tp->timer);
15788 tg3_netif_start(tp);
15790 tg3_phy_start(tp);
15792 done:
15793 rtnl_unlock();
15796 static struct pci_error_handlers tg3_err_handler = {
15797 .error_detected = tg3_io_error_detected,
15798 .slot_reset = tg3_io_slot_reset,
15799 .resume = tg3_io_resume
15802 static struct pci_driver tg3_driver = {
15803 .name = DRV_MODULE_NAME,
15804 .id_table = tg3_pci_tbl,
15805 .probe = tg3_init_one,
15806 .remove = __devexit_p(tg3_remove_one),
15807 .err_handler = &tg3_err_handler,
15808 .driver.pm = TG3_PM_OPS,
15811 static int __init tg3_init(void)
15813 return pci_register_driver(&tg3_driver);
15816 static void __exit tg3_cleanup(void)
15818 pci_unregister_driver(&tg3_driver);
15821 module_init(tg3_init);
15822 module_exit(tg3_cleanup);