tg3: Move producer ring struct to tg3_napi
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / net / tg3.c
blob179eb6539bd076687eb5e4b7bc68adad99bc6358
1 /*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2010 Broadcom Corporation.
9 * Firmware is:
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/ioport.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/ethtool.h>
35 #include <linux/mii.h>
36 #include <linux/phy.h>
37 #include <linux/brcmphy.h>
38 #include <linux/if_vlan.h>
39 #include <linux/ip.h>
40 #include <linux/tcp.h>
41 #include <linux/workqueue.h>
42 #include <linux/prefetch.h>
43 #include <linux/dma-mapping.h>
44 #include <linux/firmware.h>
46 #include <net/checksum.h>
47 #include <net/ip.h>
49 #include <asm/system.h>
50 #include <asm/io.h>
51 #include <asm/byteorder.h>
52 #include <asm/uaccess.h>
54 #ifdef CONFIG_SPARC
55 #include <asm/idprom.h>
56 #include <asm/prom.h>
57 #endif
59 #define BAR_0 0
60 #define BAR_2 2
62 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
63 #define TG3_VLAN_TAG_USED 1
64 #else
65 #define TG3_VLAN_TAG_USED 0
66 #endif
68 #include "tg3.h"
70 #define DRV_MODULE_NAME "tg3"
71 #define TG3_MAJ_NUM 3
72 #define TG3_MIN_NUM 113
73 #define DRV_MODULE_VERSION \
74 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
75 #define DRV_MODULE_RELDATE "August 2, 2010"
77 #define TG3_DEF_MAC_MODE 0
78 #define TG3_DEF_RX_MODE 0
79 #define TG3_DEF_TX_MODE 0
80 #define TG3_DEF_MSG_ENABLE \
81 (NETIF_MSG_DRV | \
82 NETIF_MSG_PROBE | \
83 NETIF_MSG_LINK | \
84 NETIF_MSG_TIMER | \
85 NETIF_MSG_IFDOWN | \
86 NETIF_MSG_IFUP | \
87 NETIF_MSG_RX_ERR | \
88 NETIF_MSG_TX_ERR)
90 /* length of time before we decide the hardware is borked,
91 * and dev->tx_timeout() should be called to fix the problem
93 #define TG3_TX_TIMEOUT (5 * HZ)
95 /* hardware minimum and maximum for a single frame's data payload */
96 #define TG3_MIN_MTU 60
97 #define TG3_MAX_MTU(tp) \
98 ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) ? 9000 : 1500)
100 /* These numbers seem to be hard coded in the NIC firmware somehow.
101 * You can't change the ring sizes, but you can change where you place
102 * them in the NIC onboard memory.
104 #define TG3_RX_RING_SIZE 512
105 #define TG3_DEF_RX_RING_PENDING 200
106 #define TG3_RX_JUMBO_RING_SIZE 256
107 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
108 #define TG3_RSS_INDIR_TBL_SIZE 128
110 /* Do not place this n-ring entries value into the tp struct itself,
111 * we really want to expose these constants to GCC so that modulo et
112 * al. operations are done with shifts and masks instead of with
113 * hw multiply/modulo instructions. Another solution would be to
114 * replace things like '% foo' with '& (foo - 1)'.
116 #define TG3_RX_RCB_RING_SIZE(tp) \
117 (((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) && \
118 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) ? 1024 : 512)
120 #define TG3_TX_RING_SIZE 512
121 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
123 #define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
124 TG3_RX_RING_SIZE)
125 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_ext_rx_buffer_desc) * \
126 TG3_RX_JUMBO_RING_SIZE)
127 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
128 TG3_RX_RCB_RING_SIZE(tp))
129 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
130 TG3_TX_RING_SIZE)
131 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
133 #define TG3_RX_DMA_ALIGN 16
134 #define TG3_RX_HEADROOM ALIGN(VLAN_HLEN, TG3_RX_DMA_ALIGN)
136 #define TG3_DMA_BYTE_ENAB 64
138 #define TG3_RX_STD_DMA_SZ 1536
139 #define TG3_RX_JMB_DMA_SZ 9046
141 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
143 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
144 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
146 #define TG3_RX_STD_BUFF_RING_SIZE \
147 (sizeof(struct ring_info) * TG3_RX_RING_SIZE)
149 #define TG3_RX_JMB_BUFF_RING_SIZE \
150 (sizeof(struct ring_info) * TG3_RX_JUMBO_RING_SIZE)
152 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
153 * that are at least dword aligned when used in PCIX mode. The driver
154 * works around this bug by double copying the packet. This workaround
155 * is built into the normal double copy length check for efficiency.
157 * However, the double copy is only necessary on those architectures
158 * where unaligned memory accesses are inefficient. For those architectures
159 * where unaligned memory accesses incur little penalty, we can reintegrate
160 * the 5701 in the normal rx path. Doing so saves a device structure
161 * dereference by hardcoding the double copy threshold in place.
163 #define TG3_RX_COPY_THRESHOLD 256
164 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
165 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
166 #else
167 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
168 #endif
170 /* minimum number of free TX descriptors required to wake up TX process */
171 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
173 #define TG3_RAW_IP_ALIGN 2
175 /* number of ETHTOOL_GSTATS u64's */
176 #define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
178 #define TG3_NUM_TEST 6
180 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
182 #define FIRMWARE_TG3 "tigon/tg3.bin"
183 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
184 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
186 static char version[] __devinitdata =
187 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
189 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
190 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
191 MODULE_LICENSE("GPL");
192 MODULE_VERSION(DRV_MODULE_VERSION);
193 MODULE_FIRMWARE(FIRMWARE_TG3);
194 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
195 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
197 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
198 module_param(tg3_debug, int, 0);
199 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
201 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
202 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
203 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
204 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
205 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
206 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
207 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
208 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
209 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
210 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
211 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
212 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
213 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
214 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
215 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
216 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
217 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
218 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
219 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
220 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
221 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
222 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
223 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
224 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
225 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
226 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
227 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5724)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
275 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
276 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
277 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
278 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
279 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
280 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
281 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
285 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
287 static const struct {
288 const char string[ETH_GSTRING_LEN];
289 } ethtool_stats_keys[TG3_NUM_STATS] = {
290 { "rx_octets" },
291 { "rx_fragments" },
292 { "rx_ucast_packets" },
293 { "rx_mcast_packets" },
294 { "rx_bcast_packets" },
295 { "rx_fcs_errors" },
296 { "rx_align_errors" },
297 { "rx_xon_pause_rcvd" },
298 { "rx_xoff_pause_rcvd" },
299 { "rx_mac_ctrl_rcvd" },
300 { "rx_xoff_entered" },
301 { "rx_frame_too_long_errors" },
302 { "rx_jabbers" },
303 { "rx_undersize_packets" },
304 { "rx_in_length_errors" },
305 { "rx_out_length_errors" },
306 { "rx_64_or_less_octet_packets" },
307 { "rx_65_to_127_octet_packets" },
308 { "rx_128_to_255_octet_packets" },
309 { "rx_256_to_511_octet_packets" },
310 { "rx_512_to_1023_octet_packets" },
311 { "rx_1024_to_1522_octet_packets" },
312 { "rx_1523_to_2047_octet_packets" },
313 { "rx_2048_to_4095_octet_packets" },
314 { "rx_4096_to_8191_octet_packets" },
315 { "rx_8192_to_9022_octet_packets" },
317 { "tx_octets" },
318 { "tx_collisions" },
320 { "tx_xon_sent" },
321 { "tx_xoff_sent" },
322 { "tx_flow_control" },
323 { "tx_mac_errors" },
324 { "tx_single_collisions" },
325 { "tx_mult_collisions" },
326 { "tx_deferred" },
327 { "tx_excessive_collisions" },
328 { "tx_late_collisions" },
329 { "tx_collide_2times" },
330 { "tx_collide_3times" },
331 { "tx_collide_4times" },
332 { "tx_collide_5times" },
333 { "tx_collide_6times" },
334 { "tx_collide_7times" },
335 { "tx_collide_8times" },
336 { "tx_collide_9times" },
337 { "tx_collide_10times" },
338 { "tx_collide_11times" },
339 { "tx_collide_12times" },
340 { "tx_collide_13times" },
341 { "tx_collide_14times" },
342 { "tx_collide_15times" },
343 { "tx_ucast_packets" },
344 { "tx_mcast_packets" },
345 { "tx_bcast_packets" },
346 { "tx_carrier_sense_errors" },
347 { "tx_discards" },
348 { "tx_errors" },
350 { "dma_writeq_full" },
351 { "dma_write_prioq_full" },
352 { "rxbds_empty" },
353 { "rx_discards" },
354 { "rx_errors" },
355 { "rx_threshold_hit" },
357 { "dma_readq_full" },
358 { "dma_read_prioq_full" },
359 { "tx_comp_queue_full" },
361 { "ring_set_send_prod_index" },
362 { "ring_status_update" },
363 { "nic_irqs" },
364 { "nic_avoided_irqs" },
365 { "nic_tx_threshold_hit" }
368 static const struct {
369 const char string[ETH_GSTRING_LEN];
370 } ethtool_test_keys[TG3_NUM_TEST] = {
371 { "nvram test (online) " },
372 { "link test (online) " },
373 { "register test (offline)" },
374 { "memory test (offline)" },
375 { "loopback test (offline)" },
376 { "interrupt test (offline)" },
379 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
381 writel(val, tp->regs + off);
384 static u32 tg3_read32(struct tg3 *tp, u32 off)
386 return readl(tp->regs + off);
389 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
391 writel(val, tp->aperegs + off);
394 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
396 return readl(tp->aperegs + off);
399 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
401 unsigned long flags;
403 spin_lock_irqsave(&tp->indirect_lock, flags);
404 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
405 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
406 spin_unlock_irqrestore(&tp->indirect_lock, flags);
409 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
411 writel(val, tp->regs + off);
412 readl(tp->regs + off);
415 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
417 unsigned long flags;
418 u32 val;
420 spin_lock_irqsave(&tp->indirect_lock, flags);
421 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
422 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
423 spin_unlock_irqrestore(&tp->indirect_lock, flags);
424 return val;
427 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
429 unsigned long flags;
431 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
432 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
433 TG3_64BIT_REG_LOW, val);
434 return;
436 if (off == TG3_RX_STD_PROD_IDX_REG) {
437 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
438 TG3_64BIT_REG_LOW, val);
439 return;
442 spin_lock_irqsave(&tp->indirect_lock, flags);
443 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
444 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
445 spin_unlock_irqrestore(&tp->indirect_lock, flags);
447 /* In indirect mode when disabling interrupts, we also need
448 * to clear the interrupt bit in the GRC local ctrl register.
450 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
451 (val == 0x1)) {
452 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
453 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
457 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
459 unsigned long flags;
460 u32 val;
462 spin_lock_irqsave(&tp->indirect_lock, flags);
463 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
464 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
465 spin_unlock_irqrestore(&tp->indirect_lock, flags);
466 return val;
469 /* usec_wait specifies the wait time in usec when writing to certain registers
470 * where it is unsafe to read back the register without some delay.
471 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
472 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
474 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
476 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
477 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
478 /* Non-posted methods */
479 tp->write32(tp, off, val);
480 else {
481 /* Posted method */
482 tg3_write32(tp, off, val);
483 if (usec_wait)
484 udelay(usec_wait);
485 tp->read32(tp, off);
487 /* Wait again after the read for the posted method to guarantee that
488 * the wait time is met.
490 if (usec_wait)
491 udelay(usec_wait);
494 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
496 tp->write32_mbox(tp, off, val);
497 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
498 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
499 tp->read32_mbox(tp, off);
502 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
504 void __iomem *mbox = tp->regs + off;
505 writel(val, mbox);
506 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
507 writel(val, mbox);
508 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
509 readl(mbox);
512 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
514 return readl(tp->regs + off + GRCMBOX_BASE);
517 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
519 writel(val, tp->regs + off + GRCMBOX_BASE);
522 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
523 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
524 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
525 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
526 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
528 #define tw32(reg, val) tp->write32(tp, reg, val)
529 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
530 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
531 #define tr32(reg) tp->read32(tp, reg)
533 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
535 unsigned long flags;
537 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
538 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
539 return;
541 spin_lock_irqsave(&tp->indirect_lock, flags);
542 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
543 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
544 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
546 /* Always leave this as zero. */
547 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
548 } else {
549 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
550 tw32_f(TG3PCI_MEM_WIN_DATA, val);
552 /* Always leave this as zero. */
553 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
555 spin_unlock_irqrestore(&tp->indirect_lock, flags);
558 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
560 unsigned long flags;
562 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
563 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
564 *val = 0;
565 return;
568 spin_lock_irqsave(&tp->indirect_lock, flags);
569 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
570 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
571 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
573 /* Always leave this as zero. */
574 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
575 } else {
576 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
577 *val = tr32(TG3PCI_MEM_WIN_DATA);
579 /* Always leave this as zero. */
580 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
582 spin_unlock_irqrestore(&tp->indirect_lock, flags);
585 static void tg3_ape_lock_init(struct tg3 *tp)
587 int i;
588 u32 regbase;
590 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
591 regbase = TG3_APE_LOCK_GRANT;
592 else
593 regbase = TG3_APE_PER_LOCK_GRANT;
595 /* Make sure the driver hasn't any stale locks. */
596 for (i = 0; i < 8; i++)
597 tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
600 static int tg3_ape_lock(struct tg3 *tp, int locknum)
602 int i, off;
603 int ret = 0;
604 u32 status, req, gnt;
606 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
607 return 0;
609 switch (locknum) {
610 case TG3_APE_LOCK_GRC:
611 case TG3_APE_LOCK_MEM:
612 break;
613 default:
614 return -EINVAL;
617 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
618 req = TG3_APE_LOCK_REQ;
619 gnt = TG3_APE_LOCK_GRANT;
620 } else {
621 req = TG3_APE_PER_LOCK_REQ;
622 gnt = TG3_APE_PER_LOCK_GRANT;
625 off = 4 * locknum;
627 tg3_ape_write32(tp, req + off, APE_LOCK_REQ_DRIVER);
629 /* Wait for up to 1 millisecond to acquire lock. */
630 for (i = 0; i < 100; i++) {
631 status = tg3_ape_read32(tp, gnt + off);
632 if (status == APE_LOCK_GRANT_DRIVER)
633 break;
634 udelay(10);
637 if (status != APE_LOCK_GRANT_DRIVER) {
638 /* Revoke the lock request. */
639 tg3_ape_write32(tp, gnt + off,
640 APE_LOCK_GRANT_DRIVER);
642 ret = -EBUSY;
645 return ret;
648 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
650 u32 gnt;
652 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
653 return;
655 switch (locknum) {
656 case TG3_APE_LOCK_GRC:
657 case TG3_APE_LOCK_MEM:
658 break;
659 default:
660 return;
663 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
664 gnt = TG3_APE_LOCK_GRANT;
665 else
666 gnt = TG3_APE_PER_LOCK_GRANT;
668 tg3_ape_write32(tp, gnt + 4 * locknum, APE_LOCK_GRANT_DRIVER);
671 static void tg3_disable_ints(struct tg3 *tp)
673 int i;
675 tw32(TG3PCI_MISC_HOST_CTRL,
676 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
677 for (i = 0; i < tp->irq_max; i++)
678 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
681 static void tg3_enable_ints(struct tg3 *tp)
683 int i;
685 tp->irq_sync = 0;
686 wmb();
688 tw32(TG3PCI_MISC_HOST_CTRL,
689 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
691 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
692 for (i = 0; i < tp->irq_cnt; i++) {
693 struct tg3_napi *tnapi = &tp->napi[i];
695 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
696 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
697 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
699 tp->coal_now |= tnapi->coal_now;
702 /* Force an initial interrupt */
703 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
704 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
705 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
706 else
707 tw32(HOSTCC_MODE, tp->coal_now);
709 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
712 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
714 struct tg3 *tp = tnapi->tp;
715 struct tg3_hw_status *sblk = tnapi->hw_status;
716 unsigned int work_exists = 0;
718 /* check for phy events */
719 if (!(tp->tg3_flags &
720 (TG3_FLAG_USE_LINKCHG_REG |
721 TG3_FLAG_POLL_SERDES))) {
722 if (sblk->status & SD_STATUS_LINK_CHG)
723 work_exists = 1;
725 /* check for RX/TX work to do */
726 if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
727 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
728 work_exists = 1;
730 return work_exists;
733 /* tg3_int_reenable
734 * similar to tg3_enable_ints, but it accurately determines whether there
735 * is new work pending and can return without flushing the PIO write
736 * which reenables interrupts
738 static void tg3_int_reenable(struct tg3_napi *tnapi)
740 struct tg3 *tp = tnapi->tp;
742 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
743 mmiowb();
745 /* When doing tagged status, this work check is unnecessary.
746 * The last_tag we write above tells the chip which piece of
747 * work we've completed.
749 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
750 tg3_has_work(tnapi))
751 tw32(HOSTCC_MODE, tp->coalesce_mode |
752 HOSTCC_MODE_ENABLE | tnapi->coal_now);
755 static void tg3_napi_disable(struct tg3 *tp)
757 int i;
759 for (i = tp->irq_cnt - 1; i >= 0; i--)
760 napi_disable(&tp->napi[i].napi);
763 static void tg3_napi_enable(struct tg3 *tp)
765 int i;
767 for (i = 0; i < tp->irq_cnt; i++)
768 napi_enable(&tp->napi[i].napi);
771 static inline void tg3_netif_stop(struct tg3 *tp)
773 tp->dev->trans_start = jiffies; /* prevent tx timeout */
774 tg3_napi_disable(tp);
775 netif_tx_disable(tp->dev);
778 static inline void tg3_netif_start(struct tg3 *tp)
780 /* NOTE: unconditional netif_tx_wake_all_queues is only
781 * appropriate so long as all callers are assured to
782 * have free tx slots (such as after tg3_init_hw)
784 netif_tx_wake_all_queues(tp->dev);
786 tg3_napi_enable(tp);
787 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
788 tg3_enable_ints(tp);
791 static void tg3_switch_clocks(struct tg3 *tp)
793 u32 clock_ctrl;
794 u32 orig_clock_ctrl;
796 if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
797 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
798 return;
800 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
802 orig_clock_ctrl = clock_ctrl;
803 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
804 CLOCK_CTRL_CLKRUN_OENABLE |
805 0x1f);
806 tp->pci_clock_ctrl = clock_ctrl;
808 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
809 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
810 tw32_wait_f(TG3PCI_CLOCK_CTRL,
811 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
813 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
814 tw32_wait_f(TG3PCI_CLOCK_CTRL,
815 clock_ctrl |
816 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
817 40);
818 tw32_wait_f(TG3PCI_CLOCK_CTRL,
819 clock_ctrl | (CLOCK_CTRL_ALTCLK),
820 40);
822 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
825 #define PHY_BUSY_LOOPS 5000
827 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
829 u32 frame_val;
830 unsigned int loops;
831 int ret;
833 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
834 tw32_f(MAC_MI_MODE,
835 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
836 udelay(80);
839 *val = 0x0;
841 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
842 MI_COM_PHY_ADDR_MASK);
843 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
844 MI_COM_REG_ADDR_MASK);
845 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
847 tw32_f(MAC_MI_COM, frame_val);
849 loops = PHY_BUSY_LOOPS;
850 while (loops != 0) {
851 udelay(10);
852 frame_val = tr32(MAC_MI_COM);
854 if ((frame_val & MI_COM_BUSY) == 0) {
855 udelay(5);
856 frame_val = tr32(MAC_MI_COM);
857 break;
859 loops -= 1;
862 ret = -EBUSY;
863 if (loops != 0) {
864 *val = frame_val & MI_COM_DATA_MASK;
865 ret = 0;
868 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
869 tw32_f(MAC_MI_MODE, tp->mi_mode);
870 udelay(80);
873 return ret;
876 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
878 u32 frame_val;
879 unsigned int loops;
880 int ret;
882 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
883 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
884 return 0;
886 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
887 tw32_f(MAC_MI_MODE,
888 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
889 udelay(80);
892 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
893 MI_COM_PHY_ADDR_MASK);
894 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
895 MI_COM_REG_ADDR_MASK);
896 frame_val |= (val & MI_COM_DATA_MASK);
897 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
899 tw32_f(MAC_MI_COM, frame_val);
901 loops = PHY_BUSY_LOOPS;
902 while (loops != 0) {
903 udelay(10);
904 frame_val = tr32(MAC_MI_COM);
905 if ((frame_val & MI_COM_BUSY) == 0) {
906 udelay(5);
907 frame_val = tr32(MAC_MI_COM);
908 break;
910 loops -= 1;
913 ret = -EBUSY;
914 if (loops != 0)
915 ret = 0;
917 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
918 tw32_f(MAC_MI_MODE, tp->mi_mode);
919 udelay(80);
922 return ret;
925 static int tg3_bmcr_reset(struct tg3 *tp)
927 u32 phy_control;
928 int limit, err;
930 /* OK, reset it, and poll the BMCR_RESET bit until it
931 * clears or we time out.
933 phy_control = BMCR_RESET;
934 err = tg3_writephy(tp, MII_BMCR, phy_control);
935 if (err != 0)
936 return -EBUSY;
938 limit = 5000;
939 while (limit--) {
940 err = tg3_readphy(tp, MII_BMCR, &phy_control);
941 if (err != 0)
942 return -EBUSY;
944 if ((phy_control & BMCR_RESET) == 0) {
945 udelay(40);
946 break;
948 udelay(10);
950 if (limit < 0)
951 return -EBUSY;
953 return 0;
956 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
958 struct tg3 *tp = bp->priv;
959 u32 val;
961 spin_lock_bh(&tp->lock);
963 if (tg3_readphy(tp, reg, &val))
964 val = -EIO;
966 spin_unlock_bh(&tp->lock);
968 return val;
971 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
973 struct tg3 *tp = bp->priv;
974 u32 ret = 0;
976 spin_lock_bh(&tp->lock);
978 if (tg3_writephy(tp, reg, val))
979 ret = -EIO;
981 spin_unlock_bh(&tp->lock);
983 return ret;
986 static int tg3_mdio_reset(struct mii_bus *bp)
988 return 0;
991 static void tg3_mdio_config_5785(struct tg3 *tp)
993 u32 val;
994 struct phy_device *phydev;
996 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
997 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
998 case PHY_ID_BCM50610:
999 case PHY_ID_BCM50610M:
1000 val = MAC_PHYCFG2_50610_LED_MODES;
1001 break;
1002 case PHY_ID_BCMAC131:
1003 val = MAC_PHYCFG2_AC131_LED_MODES;
1004 break;
1005 case PHY_ID_RTL8211C:
1006 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1007 break;
1008 case PHY_ID_RTL8201E:
1009 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1010 break;
1011 default:
1012 return;
1015 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1016 tw32(MAC_PHYCFG2, val);
1018 val = tr32(MAC_PHYCFG1);
1019 val &= ~(MAC_PHYCFG1_RGMII_INT |
1020 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1021 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1022 tw32(MAC_PHYCFG1, val);
1024 return;
1027 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE))
1028 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1029 MAC_PHYCFG2_FMODE_MASK_MASK |
1030 MAC_PHYCFG2_GMODE_MASK_MASK |
1031 MAC_PHYCFG2_ACT_MASK_MASK |
1032 MAC_PHYCFG2_QUAL_MASK_MASK |
1033 MAC_PHYCFG2_INBAND_ENABLE;
1035 tw32(MAC_PHYCFG2, val);
1037 val = tr32(MAC_PHYCFG1);
1038 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1039 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1040 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)) {
1041 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1042 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1043 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1044 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1046 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1047 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1048 tw32(MAC_PHYCFG1, val);
1050 val = tr32(MAC_EXT_RGMII_MODE);
1051 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1052 MAC_RGMII_MODE_RX_QUALITY |
1053 MAC_RGMII_MODE_RX_ACTIVITY |
1054 MAC_RGMII_MODE_RX_ENG_DET |
1055 MAC_RGMII_MODE_TX_ENABLE |
1056 MAC_RGMII_MODE_TX_LOWPWR |
1057 MAC_RGMII_MODE_TX_RESET);
1058 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)) {
1059 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1060 val |= MAC_RGMII_MODE_RX_INT_B |
1061 MAC_RGMII_MODE_RX_QUALITY |
1062 MAC_RGMII_MODE_RX_ACTIVITY |
1063 MAC_RGMII_MODE_RX_ENG_DET;
1064 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1065 val |= MAC_RGMII_MODE_TX_ENABLE |
1066 MAC_RGMII_MODE_TX_LOWPWR |
1067 MAC_RGMII_MODE_TX_RESET;
1069 tw32(MAC_EXT_RGMII_MODE, val);
1072 static void tg3_mdio_start(struct tg3 *tp)
1074 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1075 tw32_f(MAC_MI_MODE, tp->mi_mode);
1076 udelay(80);
1078 if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) &&
1079 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1080 tg3_mdio_config_5785(tp);
1083 static int tg3_mdio_init(struct tg3 *tp)
1085 int i;
1086 u32 reg;
1087 struct phy_device *phydev;
1089 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1090 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
1091 u32 is_serdes;
1093 tp->phy_addr = PCI_FUNC(tp->pdev->devfn) + 1;
1095 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1096 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1097 else
1098 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1099 TG3_CPMU_PHY_STRAP_IS_SERDES;
1100 if (is_serdes)
1101 tp->phy_addr += 7;
1102 } else
1103 tp->phy_addr = TG3_PHY_MII_ADDR;
1105 tg3_mdio_start(tp);
1107 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
1108 (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
1109 return 0;
1111 tp->mdio_bus = mdiobus_alloc();
1112 if (tp->mdio_bus == NULL)
1113 return -ENOMEM;
1115 tp->mdio_bus->name = "tg3 mdio bus";
1116 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1117 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1118 tp->mdio_bus->priv = tp;
1119 tp->mdio_bus->parent = &tp->pdev->dev;
1120 tp->mdio_bus->read = &tg3_mdio_read;
1121 tp->mdio_bus->write = &tg3_mdio_write;
1122 tp->mdio_bus->reset = &tg3_mdio_reset;
1123 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1124 tp->mdio_bus->irq = &tp->mdio_irq[0];
1126 for (i = 0; i < PHY_MAX_ADDR; i++)
1127 tp->mdio_bus->irq[i] = PHY_POLL;
1129 /* The bus registration will look for all the PHYs on the mdio bus.
1130 * Unfortunately, it does not ensure the PHY is powered up before
1131 * accessing the PHY ID registers. A chip reset is the
1132 * quickest way to bring the device back to an operational state..
1134 if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1135 tg3_bmcr_reset(tp);
1137 i = mdiobus_register(tp->mdio_bus);
1138 if (i) {
1139 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1140 mdiobus_free(tp->mdio_bus);
1141 return i;
1144 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1146 if (!phydev || !phydev->drv) {
1147 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1148 mdiobus_unregister(tp->mdio_bus);
1149 mdiobus_free(tp->mdio_bus);
1150 return -ENODEV;
1153 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1154 case PHY_ID_BCM57780:
1155 phydev->interface = PHY_INTERFACE_MODE_GMII;
1156 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1157 break;
1158 case PHY_ID_BCM50610:
1159 case PHY_ID_BCM50610M:
1160 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1161 PHY_BRCM_RX_REFCLK_UNUSED |
1162 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1163 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1164 if (tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)
1165 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1166 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1167 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1168 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1169 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1170 /* fallthru */
1171 case PHY_ID_RTL8211C:
1172 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1173 break;
1174 case PHY_ID_RTL8201E:
1175 case PHY_ID_BCMAC131:
1176 phydev->interface = PHY_INTERFACE_MODE_MII;
1177 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1178 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1179 break;
1182 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
1184 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1185 tg3_mdio_config_5785(tp);
1187 return 0;
1190 static void tg3_mdio_fini(struct tg3 *tp)
1192 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1193 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
1194 mdiobus_unregister(tp->mdio_bus);
1195 mdiobus_free(tp->mdio_bus);
1199 /* tp->lock is held. */
1200 static inline void tg3_generate_fw_event(struct tg3 *tp)
1202 u32 val;
1204 val = tr32(GRC_RX_CPU_EVENT);
1205 val |= GRC_RX_CPU_DRIVER_EVENT;
1206 tw32_f(GRC_RX_CPU_EVENT, val);
1208 tp->last_event_jiffies = jiffies;
1211 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1213 /* tp->lock is held. */
1214 static void tg3_wait_for_event_ack(struct tg3 *tp)
1216 int i;
1217 unsigned int delay_cnt;
1218 long time_remain;
1220 /* If enough time has passed, no wait is necessary. */
1221 time_remain = (long)(tp->last_event_jiffies + 1 +
1222 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1223 (long)jiffies;
1224 if (time_remain < 0)
1225 return;
1227 /* Check if we can shorten the wait time. */
1228 delay_cnt = jiffies_to_usecs(time_remain);
1229 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1230 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1231 delay_cnt = (delay_cnt >> 3) + 1;
1233 for (i = 0; i < delay_cnt; i++) {
1234 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1235 break;
1236 udelay(8);
1240 /* tp->lock is held. */
1241 static void tg3_ump_link_report(struct tg3 *tp)
1243 u32 reg;
1244 u32 val;
1246 if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1247 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1248 return;
1250 tg3_wait_for_event_ack(tp);
1252 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1254 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1256 val = 0;
1257 if (!tg3_readphy(tp, MII_BMCR, &reg))
1258 val = reg << 16;
1259 if (!tg3_readphy(tp, MII_BMSR, &reg))
1260 val |= (reg & 0xffff);
1261 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1263 val = 0;
1264 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1265 val = reg << 16;
1266 if (!tg3_readphy(tp, MII_LPA, &reg))
1267 val |= (reg & 0xffff);
1268 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1270 val = 0;
1271 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1272 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1273 val = reg << 16;
1274 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1275 val |= (reg & 0xffff);
1277 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1279 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1280 val = reg << 16;
1281 else
1282 val = 0;
1283 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1285 tg3_generate_fw_event(tp);
1288 static void tg3_link_report(struct tg3 *tp)
1290 if (!netif_carrier_ok(tp->dev)) {
1291 netif_info(tp, link, tp->dev, "Link is down\n");
1292 tg3_ump_link_report(tp);
1293 } else if (netif_msg_link(tp)) {
1294 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1295 (tp->link_config.active_speed == SPEED_1000 ?
1296 1000 :
1297 (tp->link_config.active_speed == SPEED_100 ?
1298 100 : 10)),
1299 (tp->link_config.active_duplex == DUPLEX_FULL ?
1300 "full" : "half"));
1302 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1303 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1304 "on" : "off",
1305 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1306 "on" : "off");
1307 tg3_ump_link_report(tp);
1311 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1313 u16 miireg;
1315 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1316 miireg = ADVERTISE_PAUSE_CAP;
1317 else if (flow_ctrl & FLOW_CTRL_TX)
1318 miireg = ADVERTISE_PAUSE_ASYM;
1319 else if (flow_ctrl & FLOW_CTRL_RX)
1320 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1321 else
1322 miireg = 0;
1324 return miireg;
1327 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1329 u16 miireg;
1331 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1332 miireg = ADVERTISE_1000XPAUSE;
1333 else if (flow_ctrl & FLOW_CTRL_TX)
1334 miireg = ADVERTISE_1000XPSE_ASYM;
1335 else if (flow_ctrl & FLOW_CTRL_RX)
1336 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1337 else
1338 miireg = 0;
1340 return miireg;
1343 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1345 u8 cap = 0;
1347 if (lcladv & ADVERTISE_1000XPAUSE) {
1348 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1349 if (rmtadv & LPA_1000XPAUSE)
1350 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1351 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1352 cap = FLOW_CTRL_RX;
1353 } else {
1354 if (rmtadv & LPA_1000XPAUSE)
1355 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1357 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1358 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1359 cap = FLOW_CTRL_TX;
1362 return cap;
1365 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1367 u8 autoneg;
1368 u8 flowctrl = 0;
1369 u32 old_rx_mode = tp->rx_mode;
1370 u32 old_tx_mode = tp->tx_mode;
1372 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
1373 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1374 else
1375 autoneg = tp->link_config.autoneg;
1377 if (autoneg == AUTONEG_ENABLE &&
1378 (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1379 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1380 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1381 else
1382 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1383 } else
1384 flowctrl = tp->link_config.flowctrl;
1386 tp->link_config.active_flowctrl = flowctrl;
1388 if (flowctrl & FLOW_CTRL_RX)
1389 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1390 else
1391 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1393 if (old_rx_mode != tp->rx_mode)
1394 tw32_f(MAC_RX_MODE, tp->rx_mode);
1396 if (flowctrl & FLOW_CTRL_TX)
1397 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1398 else
1399 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1401 if (old_tx_mode != tp->tx_mode)
1402 tw32_f(MAC_TX_MODE, tp->tx_mode);
1405 static void tg3_adjust_link(struct net_device *dev)
1407 u8 oldflowctrl, linkmesg = 0;
1408 u32 mac_mode, lcl_adv, rmt_adv;
1409 struct tg3 *tp = netdev_priv(dev);
1410 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1412 spin_lock_bh(&tp->lock);
1414 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1415 MAC_MODE_HALF_DUPLEX);
1417 oldflowctrl = tp->link_config.active_flowctrl;
1419 if (phydev->link) {
1420 lcl_adv = 0;
1421 rmt_adv = 0;
1423 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1424 mac_mode |= MAC_MODE_PORT_MODE_MII;
1425 else if (phydev->speed == SPEED_1000 ||
1426 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1427 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1428 else
1429 mac_mode |= MAC_MODE_PORT_MODE_MII;
1431 if (phydev->duplex == DUPLEX_HALF)
1432 mac_mode |= MAC_MODE_HALF_DUPLEX;
1433 else {
1434 lcl_adv = tg3_advert_flowctrl_1000T(
1435 tp->link_config.flowctrl);
1437 if (phydev->pause)
1438 rmt_adv = LPA_PAUSE_CAP;
1439 if (phydev->asym_pause)
1440 rmt_adv |= LPA_PAUSE_ASYM;
1443 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1444 } else
1445 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1447 if (mac_mode != tp->mac_mode) {
1448 tp->mac_mode = mac_mode;
1449 tw32_f(MAC_MODE, tp->mac_mode);
1450 udelay(40);
1453 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1454 if (phydev->speed == SPEED_10)
1455 tw32(MAC_MI_STAT,
1456 MAC_MI_STAT_10MBPS_MODE |
1457 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1458 else
1459 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1462 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1463 tw32(MAC_TX_LENGTHS,
1464 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1465 (6 << TX_LENGTHS_IPG_SHIFT) |
1466 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1467 else
1468 tw32(MAC_TX_LENGTHS,
1469 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1470 (6 << TX_LENGTHS_IPG_SHIFT) |
1471 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1473 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1474 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1475 phydev->speed != tp->link_config.active_speed ||
1476 phydev->duplex != tp->link_config.active_duplex ||
1477 oldflowctrl != tp->link_config.active_flowctrl)
1478 linkmesg = 1;
1480 tp->link_config.active_speed = phydev->speed;
1481 tp->link_config.active_duplex = phydev->duplex;
1483 spin_unlock_bh(&tp->lock);
1485 if (linkmesg)
1486 tg3_link_report(tp);
1489 static int tg3_phy_init(struct tg3 *tp)
1491 struct phy_device *phydev;
1493 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1494 return 0;
1496 /* Bring the PHY back to a known state. */
1497 tg3_bmcr_reset(tp);
1499 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1501 /* Attach the MAC to the PHY. */
1502 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1503 phydev->dev_flags, phydev->interface);
1504 if (IS_ERR(phydev)) {
1505 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1506 return PTR_ERR(phydev);
1509 /* Mask with MAC supported features. */
1510 switch (phydev->interface) {
1511 case PHY_INTERFACE_MODE_GMII:
1512 case PHY_INTERFACE_MODE_RGMII:
1513 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1514 phydev->supported &= (PHY_GBIT_FEATURES |
1515 SUPPORTED_Pause |
1516 SUPPORTED_Asym_Pause);
1517 break;
1519 /* fallthru */
1520 case PHY_INTERFACE_MODE_MII:
1521 phydev->supported &= (PHY_BASIC_FEATURES |
1522 SUPPORTED_Pause |
1523 SUPPORTED_Asym_Pause);
1524 break;
1525 default:
1526 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1527 return -EINVAL;
1530 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1532 phydev->advertising = phydev->supported;
1534 return 0;
1537 static void tg3_phy_start(struct tg3 *tp)
1539 struct phy_device *phydev;
1541 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1542 return;
1544 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1546 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1547 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1548 phydev->speed = tp->link_config.orig_speed;
1549 phydev->duplex = tp->link_config.orig_duplex;
1550 phydev->autoneg = tp->link_config.orig_autoneg;
1551 phydev->advertising = tp->link_config.orig_advertising;
1554 phy_start(phydev);
1556 phy_start_aneg(phydev);
1559 static void tg3_phy_stop(struct tg3 *tp)
1561 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1562 return;
1564 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1567 static void tg3_phy_fini(struct tg3 *tp)
1569 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1570 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1571 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1575 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1577 int err;
1579 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1580 if (!err)
1581 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1583 return err;
1586 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1588 u32 phytest;
1590 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1591 u32 phy;
1593 tg3_writephy(tp, MII_TG3_FET_TEST,
1594 phytest | MII_TG3_FET_SHADOW_EN);
1595 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1596 if (enable)
1597 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1598 else
1599 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1600 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1602 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1606 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1608 u32 reg;
1610 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1611 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1612 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) &&
1613 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1614 return;
1616 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1617 tg3_phy_fet_toggle_apd(tp, enable);
1618 return;
1621 reg = MII_TG3_MISC_SHDW_WREN |
1622 MII_TG3_MISC_SHDW_SCR5_SEL |
1623 MII_TG3_MISC_SHDW_SCR5_LPED |
1624 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1625 MII_TG3_MISC_SHDW_SCR5_SDTL |
1626 MII_TG3_MISC_SHDW_SCR5_C125OE;
1627 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1628 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1630 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1633 reg = MII_TG3_MISC_SHDW_WREN |
1634 MII_TG3_MISC_SHDW_APD_SEL |
1635 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1636 if (enable)
1637 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1639 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1642 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1644 u32 phy;
1646 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1647 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
1648 return;
1650 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1651 u32 ephy;
1653 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1654 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1656 tg3_writephy(tp, MII_TG3_FET_TEST,
1657 ephy | MII_TG3_FET_SHADOW_EN);
1658 if (!tg3_readphy(tp, reg, &phy)) {
1659 if (enable)
1660 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1661 else
1662 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1663 tg3_writephy(tp, reg, phy);
1665 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1667 } else {
1668 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
1669 MII_TG3_AUXCTL_SHDWSEL_MISC;
1670 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
1671 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
1672 if (enable)
1673 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1674 else
1675 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1676 phy |= MII_TG3_AUXCTL_MISC_WREN;
1677 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1682 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1684 u32 val;
1686 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
1687 return;
1689 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
1690 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1691 tg3_writephy(tp, MII_TG3_AUX_CTRL,
1692 (val | (1 << 15) | (1 << 4)));
1695 static void tg3_phy_apply_otp(struct tg3 *tp)
1697 u32 otp, phy;
1699 if (!tp->phy_otp)
1700 return;
1702 otp = tp->phy_otp;
1704 /* Enable SM_DSP clock and tx 6dB coding. */
1705 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1706 MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1707 MII_TG3_AUXCTL_ACTL_TX_6DB;
1708 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1710 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1711 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1712 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1714 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1715 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1716 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1718 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1719 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1720 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1722 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1723 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1725 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1726 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1728 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1729 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1730 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1732 /* Turn off SM_DSP clock. */
1733 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1734 MII_TG3_AUXCTL_ACTL_TX_6DB;
1735 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1738 static int tg3_wait_macro_done(struct tg3 *tp)
1740 int limit = 100;
1742 while (limit--) {
1743 u32 tmp32;
1745 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
1746 if ((tmp32 & 0x1000) == 0)
1747 break;
1750 if (limit < 0)
1751 return -EBUSY;
1753 return 0;
1756 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1758 static const u32 test_pat[4][6] = {
1759 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1760 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1761 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1762 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1764 int chan;
1766 for (chan = 0; chan < 4; chan++) {
1767 int i;
1769 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1770 (chan * 0x2000) | 0x0200);
1771 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1773 for (i = 0; i < 6; i++)
1774 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1775 test_pat[chan][i]);
1777 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1778 if (tg3_wait_macro_done(tp)) {
1779 *resetp = 1;
1780 return -EBUSY;
1783 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1784 (chan * 0x2000) | 0x0200);
1785 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
1786 if (tg3_wait_macro_done(tp)) {
1787 *resetp = 1;
1788 return -EBUSY;
1791 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
1792 if (tg3_wait_macro_done(tp)) {
1793 *resetp = 1;
1794 return -EBUSY;
1797 for (i = 0; i < 6; i += 2) {
1798 u32 low, high;
1800 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1801 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1802 tg3_wait_macro_done(tp)) {
1803 *resetp = 1;
1804 return -EBUSY;
1806 low &= 0x7fff;
1807 high &= 0x000f;
1808 if (low != test_pat[chan][i] ||
1809 high != test_pat[chan][i+1]) {
1810 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1811 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1812 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1814 return -EBUSY;
1819 return 0;
1822 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1824 int chan;
1826 for (chan = 0; chan < 4; chan++) {
1827 int i;
1829 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1830 (chan * 0x2000) | 0x0200);
1831 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1832 for (i = 0; i < 6; i++)
1833 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1834 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1835 if (tg3_wait_macro_done(tp))
1836 return -EBUSY;
1839 return 0;
1842 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1844 u32 reg32, phy9_orig;
1845 int retries, do_phy_reset, err;
1847 retries = 10;
1848 do_phy_reset = 1;
1849 do {
1850 if (do_phy_reset) {
1851 err = tg3_bmcr_reset(tp);
1852 if (err)
1853 return err;
1854 do_phy_reset = 0;
1857 /* Disable transmitter and interrupt. */
1858 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1859 continue;
1861 reg32 |= 0x3000;
1862 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1864 /* Set full-duplex, 1000 mbps. */
1865 tg3_writephy(tp, MII_BMCR,
1866 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1868 /* Set to master mode. */
1869 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1870 continue;
1872 tg3_writephy(tp, MII_TG3_CTRL,
1873 (MII_TG3_CTRL_AS_MASTER |
1874 MII_TG3_CTRL_ENABLE_AS_MASTER));
1876 /* Enable SM_DSP_CLOCK and 6dB. */
1877 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1879 /* Block the PHY control access. */
1880 tg3_phydsp_write(tp, 0x8005, 0x0800);
1882 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1883 if (!err)
1884 break;
1885 } while (--retries);
1887 err = tg3_phy_reset_chanpat(tp);
1888 if (err)
1889 return err;
1891 tg3_phydsp_write(tp, 0x8005, 0x0000);
1893 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1894 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
1896 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1897 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1898 /* Set Extended packet length bit for jumbo frames */
1899 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1900 } else {
1901 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1904 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1906 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1907 reg32 &= ~0x3000;
1908 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1909 } else if (!err)
1910 err = -EBUSY;
1912 return err;
1915 /* This will reset the tigon3 PHY if there is no valid
1916 * link unless the FORCE argument is non-zero.
1918 static int tg3_phy_reset(struct tg3 *tp)
1920 u32 cpmuctrl;
1921 u32 phy_status;
1922 int err;
1924 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1925 u32 val;
1927 val = tr32(GRC_MISC_CFG);
1928 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1929 udelay(40);
1931 err = tg3_readphy(tp, MII_BMSR, &phy_status);
1932 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1933 if (err != 0)
1934 return -EBUSY;
1936 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1937 netif_carrier_off(tp->dev);
1938 tg3_link_report(tp);
1941 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1942 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1943 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1944 err = tg3_phy_reset_5703_4_5(tp);
1945 if (err)
1946 return err;
1947 goto out;
1950 cpmuctrl = 0;
1951 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1952 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1953 cpmuctrl = tr32(TG3_CPMU_CTRL);
1954 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1955 tw32(TG3_CPMU_CTRL,
1956 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1959 err = tg3_bmcr_reset(tp);
1960 if (err)
1961 return err;
1963 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1964 u32 phy;
1966 phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1967 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1969 tw32(TG3_CPMU_CTRL, cpmuctrl);
1972 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
1973 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
1974 u32 val;
1976 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1977 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1978 CPMU_LSPD_1000MB_MACCLK_12_5) {
1979 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1980 udelay(40);
1981 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1985 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1986 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) &&
1987 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
1988 return 0;
1990 tg3_phy_apply_otp(tp);
1992 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
1993 tg3_phy_toggle_apd(tp, true);
1994 else
1995 tg3_phy_toggle_apd(tp, false);
1997 out:
1998 if (tp->phy_flags & TG3_PHYFLG_ADC_BUG) {
1999 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
2000 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2001 tg3_phydsp_write(tp, 0x000a, 0x0323);
2002 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
2004 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2005 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2006 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2008 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2009 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
2010 tg3_phydsp_write(tp, 0x000a, 0x310b);
2011 tg3_phydsp_write(tp, 0x201f, 0x9506);
2012 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2013 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
2014 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2015 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
2016 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2017 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2018 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2019 tg3_writephy(tp, MII_TG3_TEST1,
2020 MII_TG3_TEST1_TRIM_EN | 0x4);
2021 } else
2022 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2023 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
2025 /* Set Extended packet length bit (bit 14) on all chips that */
2026 /* support jumbo frames */
2027 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2028 /* Cannot do read-modify-write on 5401 */
2029 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2030 } else if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
2031 u32 phy_reg;
2033 /* Set bit 14 with read-modify-write to preserve other bits */
2034 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
2035 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
2036 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
2039 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2040 * jumbo frames transmission.
2042 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
2043 u32 phy_reg;
2045 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
2046 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2047 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2050 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2051 /* adjust output voltage */
2052 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2055 tg3_phy_toggle_automdix(tp, 1);
2056 tg3_phy_set_wirespeed(tp);
2057 return 0;
2060 static void tg3_frob_aux_power(struct tg3 *tp)
2062 struct tg3 *tp_peer = tp;
2064 /* The GPIOs do something completely different on 57765. */
2065 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0 ||
2066 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2067 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2068 return;
2070 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2071 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
2072 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
2073 struct net_device *dev_peer;
2075 dev_peer = pci_get_drvdata(tp->pdev_peer);
2076 /* remove_one() may have been run on the peer. */
2077 if (!dev_peer)
2078 tp_peer = tp;
2079 else
2080 tp_peer = netdev_priv(dev_peer);
2083 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
2084 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
2085 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
2086 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
2087 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2088 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2089 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2090 (GRC_LCLCTRL_GPIO_OE0 |
2091 GRC_LCLCTRL_GPIO_OE1 |
2092 GRC_LCLCTRL_GPIO_OE2 |
2093 GRC_LCLCTRL_GPIO_OUTPUT0 |
2094 GRC_LCLCTRL_GPIO_OUTPUT1),
2095 100);
2096 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2097 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2098 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2099 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2100 GRC_LCLCTRL_GPIO_OE1 |
2101 GRC_LCLCTRL_GPIO_OE2 |
2102 GRC_LCLCTRL_GPIO_OUTPUT0 |
2103 GRC_LCLCTRL_GPIO_OUTPUT1 |
2104 tp->grc_local_ctrl;
2105 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2107 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2108 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2110 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2111 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2112 } else {
2113 u32 no_gpio2;
2114 u32 grc_local_ctrl = 0;
2116 if (tp_peer != tp &&
2117 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
2118 return;
2120 /* Workaround to prevent overdrawing Amps. */
2121 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2122 ASIC_REV_5714) {
2123 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2124 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2125 grc_local_ctrl, 100);
2128 /* On 5753 and variants, GPIO2 cannot be used. */
2129 no_gpio2 = tp->nic_sram_data_cfg &
2130 NIC_SRAM_DATA_CFG_NO_GPIO2;
2132 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2133 GRC_LCLCTRL_GPIO_OE1 |
2134 GRC_LCLCTRL_GPIO_OE2 |
2135 GRC_LCLCTRL_GPIO_OUTPUT1 |
2136 GRC_LCLCTRL_GPIO_OUTPUT2;
2137 if (no_gpio2) {
2138 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2139 GRC_LCLCTRL_GPIO_OUTPUT2);
2141 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2142 grc_local_ctrl, 100);
2144 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2146 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2147 grc_local_ctrl, 100);
2149 if (!no_gpio2) {
2150 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2151 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2152 grc_local_ctrl, 100);
2155 } else {
2156 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
2157 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
2158 if (tp_peer != tp &&
2159 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
2160 return;
2162 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2163 (GRC_LCLCTRL_GPIO_OE1 |
2164 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2166 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2167 GRC_LCLCTRL_GPIO_OE1, 100);
2169 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2170 (GRC_LCLCTRL_GPIO_OE1 |
2171 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2176 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2178 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2179 return 1;
2180 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2181 if (speed != SPEED_10)
2182 return 1;
2183 } else if (speed == SPEED_10)
2184 return 1;
2186 return 0;
2189 static int tg3_setup_phy(struct tg3 *, int);
2191 #define RESET_KIND_SHUTDOWN 0
2192 #define RESET_KIND_INIT 1
2193 #define RESET_KIND_SUSPEND 2
2195 static void tg3_write_sig_post_reset(struct tg3 *, int);
2196 static int tg3_halt_cpu(struct tg3 *, u32);
2198 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2200 u32 val;
2202 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2203 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2204 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2205 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2207 sg_dig_ctrl |=
2208 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2209 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2210 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2212 return;
2215 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2216 tg3_bmcr_reset(tp);
2217 val = tr32(GRC_MISC_CFG);
2218 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2219 udelay(40);
2220 return;
2221 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2222 u32 phytest;
2223 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2224 u32 phy;
2226 tg3_writephy(tp, MII_ADVERTISE, 0);
2227 tg3_writephy(tp, MII_BMCR,
2228 BMCR_ANENABLE | BMCR_ANRESTART);
2230 tg3_writephy(tp, MII_TG3_FET_TEST,
2231 phytest | MII_TG3_FET_SHADOW_EN);
2232 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2233 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2234 tg3_writephy(tp,
2235 MII_TG3_FET_SHDW_AUXMODE4,
2236 phy);
2238 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2240 return;
2241 } else if (do_low_power) {
2242 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2243 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2245 tg3_writephy(tp, MII_TG3_AUX_CTRL,
2246 MII_TG3_AUXCTL_SHDWSEL_PWRCTL |
2247 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2248 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2249 MII_TG3_AUXCTL_PCTL_VREG_11V);
2252 /* The PHY should not be powered down on some chips because
2253 * of bugs.
2255 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2256 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2257 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2258 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2259 return;
2261 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2262 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2263 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2264 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2265 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2266 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2269 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2272 /* tp->lock is held. */
2273 static int tg3_nvram_lock(struct tg3 *tp)
2275 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
2276 int i;
2278 if (tp->nvram_lock_cnt == 0) {
2279 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2280 for (i = 0; i < 8000; i++) {
2281 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2282 break;
2283 udelay(20);
2285 if (i == 8000) {
2286 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2287 return -ENODEV;
2290 tp->nvram_lock_cnt++;
2292 return 0;
2295 /* tp->lock is held. */
2296 static void tg3_nvram_unlock(struct tg3 *tp)
2298 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
2299 if (tp->nvram_lock_cnt > 0)
2300 tp->nvram_lock_cnt--;
2301 if (tp->nvram_lock_cnt == 0)
2302 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2306 /* tp->lock is held. */
2307 static void tg3_enable_nvram_access(struct tg3 *tp)
2309 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2310 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) {
2311 u32 nvaccess = tr32(NVRAM_ACCESS);
2313 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2317 /* tp->lock is held. */
2318 static void tg3_disable_nvram_access(struct tg3 *tp)
2320 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2321 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) {
2322 u32 nvaccess = tr32(NVRAM_ACCESS);
2324 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2328 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2329 u32 offset, u32 *val)
2331 u32 tmp;
2332 int i;
2334 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2335 return -EINVAL;
2337 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2338 EEPROM_ADDR_DEVID_MASK |
2339 EEPROM_ADDR_READ);
2340 tw32(GRC_EEPROM_ADDR,
2341 tmp |
2342 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2343 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2344 EEPROM_ADDR_ADDR_MASK) |
2345 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2347 for (i = 0; i < 1000; i++) {
2348 tmp = tr32(GRC_EEPROM_ADDR);
2350 if (tmp & EEPROM_ADDR_COMPLETE)
2351 break;
2352 msleep(1);
2354 if (!(tmp & EEPROM_ADDR_COMPLETE))
2355 return -EBUSY;
2357 tmp = tr32(GRC_EEPROM_DATA);
2360 * The data will always be opposite the native endian
2361 * format. Perform a blind byteswap to compensate.
2363 *val = swab32(tmp);
2365 return 0;
2368 #define NVRAM_CMD_TIMEOUT 10000
2370 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2372 int i;
2374 tw32(NVRAM_CMD, nvram_cmd);
2375 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2376 udelay(10);
2377 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2378 udelay(10);
2379 break;
2383 if (i == NVRAM_CMD_TIMEOUT)
2384 return -EBUSY;
2386 return 0;
2389 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2391 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
2392 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
2393 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
2394 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
2395 (tp->nvram_jedecnum == JEDEC_ATMEL))
2397 addr = ((addr / tp->nvram_pagesize) <<
2398 ATMEL_AT45DB0X1B_PAGE_POS) +
2399 (addr % tp->nvram_pagesize);
2401 return addr;
2404 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2406 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
2407 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
2408 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
2409 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
2410 (tp->nvram_jedecnum == JEDEC_ATMEL))
2412 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2413 tp->nvram_pagesize) +
2414 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2416 return addr;
2419 /* NOTE: Data read in from NVRAM is byteswapped according to
2420 * the byteswapping settings for all other register accesses.
2421 * tg3 devices are BE devices, so on a BE machine, the data
2422 * returned will be exactly as it is seen in NVRAM. On a LE
2423 * machine, the 32-bit value will be byteswapped.
2425 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2427 int ret;
2429 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
2430 return tg3_nvram_read_using_eeprom(tp, offset, val);
2432 offset = tg3_nvram_phys_addr(tp, offset);
2434 if (offset > NVRAM_ADDR_MSK)
2435 return -EINVAL;
2437 ret = tg3_nvram_lock(tp);
2438 if (ret)
2439 return ret;
2441 tg3_enable_nvram_access(tp);
2443 tw32(NVRAM_ADDR, offset);
2444 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2445 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2447 if (ret == 0)
2448 *val = tr32(NVRAM_RDDATA);
2450 tg3_disable_nvram_access(tp);
2452 tg3_nvram_unlock(tp);
2454 return ret;
2457 /* Ensures NVRAM data is in bytestream format. */
2458 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2460 u32 v;
2461 int res = tg3_nvram_read(tp, offset, &v);
2462 if (!res)
2463 *val = cpu_to_be32(v);
2464 return res;
2467 /* tp->lock is held. */
2468 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2470 u32 addr_high, addr_low;
2471 int i;
2473 addr_high = ((tp->dev->dev_addr[0] << 8) |
2474 tp->dev->dev_addr[1]);
2475 addr_low = ((tp->dev->dev_addr[2] << 24) |
2476 (tp->dev->dev_addr[3] << 16) |
2477 (tp->dev->dev_addr[4] << 8) |
2478 (tp->dev->dev_addr[5] << 0));
2479 for (i = 0; i < 4; i++) {
2480 if (i == 1 && skip_mac_1)
2481 continue;
2482 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2483 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2486 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2487 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2488 for (i = 0; i < 12; i++) {
2489 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2490 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2494 addr_high = (tp->dev->dev_addr[0] +
2495 tp->dev->dev_addr[1] +
2496 tp->dev->dev_addr[2] +
2497 tp->dev->dev_addr[3] +
2498 tp->dev->dev_addr[4] +
2499 tp->dev->dev_addr[5]) &
2500 TX_BACKOFF_SEED_MASK;
2501 tw32(MAC_TX_BACKOFF_SEED, addr_high);
2504 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2506 u32 misc_host_ctrl;
2507 bool device_should_wake, do_low_power;
2509 /* Make sure register accesses (indirect or otherwise)
2510 * will function correctly.
2512 pci_write_config_dword(tp->pdev,
2513 TG3PCI_MISC_HOST_CTRL,
2514 tp->misc_host_ctrl);
2516 switch (state) {
2517 case PCI_D0:
2518 pci_enable_wake(tp->pdev, state, false);
2519 pci_set_power_state(tp->pdev, PCI_D0);
2521 /* Switch out of Vaux if it is a NIC */
2522 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
2523 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2525 return 0;
2527 case PCI_D1:
2528 case PCI_D2:
2529 case PCI_D3hot:
2530 break;
2532 default:
2533 netdev_err(tp->dev, "Invalid power state (D%d) requested\n",
2534 state);
2535 return -EINVAL;
2538 /* Restore the CLKREQ setting. */
2539 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
2540 u16 lnkctl;
2542 pci_read_config_word(tp->pdev,
2543 tp->pcie_cap + PCI_EXP_LNKCTL,
2544 &lnkctl);
2545 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2546 pci_write_config_word(tp->pdev,
2547 tp->pcie_cap + PCI_EXP_LNKCTL,
2548 lnkctl);
2551 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2552 tw32(TG3PCI_MISC_HOST_CTRL,
2553 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2555 device_should_wake = pci_pme_capable(tp->pdev, state) &&
2556 device_may_wakeup(&tp->pdev->dev) &&
2557 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
2559 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
2560 do_low_power = false;
2561 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
2562 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2563 struct phy_device *phydev;
2564 u32 phyid, advertising;
2566 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2568 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2570 tp->link_config.orig_speed = phydev->speed;
2571 tp->link_config.orig_duplex = phydev->duplex;
2572 tp->link_config.orig_autoneg = phydev->autoneg;
2573 tp->link_config.orig_advertising = phydev->advertising;
2575 advertising = ADVERTISED_TP |
2576 ADVERTISED_Pause |
2577 ADVERTISED_Autoneg |
2578 ADVERTISED_10baseT_Half;
2580 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2581 device_should_wake) {
2582 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2583 advertising |=
2584 ADVERTISED_100baseT_Half |
2585 ADVERTISED_100baseT_Full |
2586 ADVERTISED_10baseT_Full;
2587 else
2588 advertising |= ADVERTISED_10baseT_Full;
2591 phydev->advertising = advertising;
2593 phy_start_aneg(phydev);
2595 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2596 if (phyid != PHY_ID_BCMAC131) {
2597 phyid &= PHY_BCM_OUI_MASK;
2598 if (phyid == PHY_BCM_OUI_1 ||
2599 phyid == PHY_BCM_OUI_2 ||
2600 phyid == PHY_BCM_OUI_3)
2601 do_low_power = true;
2604 } else {
2605 do_low_power = true;
2607 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2608 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2609 tp->link_config.orig_speed = tp->link_config.speed;
2610 tp->link_config.orig_duplex = tp->link_config.duplex;
2611 tp->link_config.orig_autoneg = tp->link_config.autoneg;
2614 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
2615 tp->link_config.speed = SPEED_10;
2616 tp->link_config.duplex = DUPLEX_HALF;
2617 tp->link_config.autoneg = AUTONEG_ENABLE;
2618 tg3_setup_phy(tp, 0);
2622 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2623 u32 val;
2625 val = tr32(GRC_VCPU_EXT_CTRL);
2626 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2627 } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2628 int i;
2629 u32 val;
2631 for (i = 0; i < 200; i++) {
2632 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2633 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2634 break;
2635 msleep(1);
2638 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
2639 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2640 WOL_DRV_STATE_SHUTDOWN |
2641 WOL_DRV_WOL |
2642 WOL_SET_MAGIC_PKT);
2644 if (device_should_wake) {
2645 u32 mac_mode;
2647 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
2648 if (do_low_power) {
2649 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
2650 udelay(40);
2653 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2654 mac_mode = MAC_MODE_PORT_MODE_GMII;
2655 else
2656 mac_mode = MAC_MODE_PORT_MODE_MII;
2658 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2659 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2660 ASIC_REV_5700) {
2661 u32 speed = (tp->tg3_flags &
2662 TG3_FLAG_WOL_SPEED_100MB) ?
2663 SPEED_100 : SPEED_10;
2664 if (tg3_5700_link_polarity(tp, speed))
2665 mac_mode |= MAC_MODE_LINK_POLARITY;
2666 else
2667 mac_mode &= ~MAC_MODE_LINK_POLARITY;
2669 } else {
2670 mac_mode = MAC_MODE_PORT_MODE_TBI;
2673 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
2674 tw32(MAC_LED_CTRL, tp->led_ctrl);
2676 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2677 if (((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
2678 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) &&
2679 ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2680 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)))
2681 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2683 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
2684 mac_mode |= tp->mac_mode &
2685 (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
2686 if (mac_mode & MAC_MODE_APE_TX_EN)
2687 mac_mode |= MAC_MODE_TDE_ENABLE;
2690 tw32_f(MAC_MODE, mac_mode);
2691 udelay(100);
2693 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2694 udelay(10);
2697 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
2698 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2699 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2700 u32 base_val;
2702 base_val = tp->pci_clock_ctrl;
2703 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2704 CLOCK_CTRL_TXCLK_DISABLE);
2706 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2707 CLOCK_CTRL_PWRDOWN_PLL133, 40);
2708 } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
2709 (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
2710 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
2711 /* do nothing */
2712 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2713 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
2714 u32 newbits1, newbits2;
2716 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2717 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2718 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2719 CLOCK_CTRL_TXCLK_DISABLE |
2720 CLOCK_CTRL_ALTCLK);
2721 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2722 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
2723 newbits1 = CLOCK_CTRL_625_CORE;
2724 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2725 } else {
2726 newbits1 = CLOCK_CTRL_ALTCLK;
2727 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2730 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2731 40);
2733 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2734 40);
2736 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2737 u32 newbits3;
2739 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2740 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2741 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2742 CLOCK_CTRL_TXCLK_DISABLE |
2743 CLOCK_CTRL_44MHZ_CORE);
2744 } else {
2745 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2748 tw32_wait_f(TG3PCI_CLOCK_CTRL,
2749 tp->pci_clock_ctrl | newbits3, 40);
2753 if (!(device_should_wake) &&
2754 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
2755 tg3_power_down_phy(tp, do_low_power);
2757 tg3_frob_aux_power(tp);
2759 /* Workaround for unstable PLL clock */
2760 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2761 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2762 u32 val = tr32(0x7d00);
2764 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2765 tw32(0x7d00, val);
2766 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2767 int err;
2769 err = tg3_nvram_lock(tp);
2770 tg3_halt_cpu(tp, RX_CPU_BASE);
2771 if (!err)
2772 tg3_nvram_unlock(tp);
2776 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2778 if (device_should_wake)
2779 pci_enable_wake(tp->pdev, state, true);
2781 /* Finally, set the new power state. */
2782 pci_set_power_state(tp->pdev, state);
2784 return 0;
2787 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2789 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2790 case MII_TG3_AUX_STAT_10HALF:
2791 *speed = SPEED_10;
2792 *duplex = DUPLEX_HALF;
2793 break;
2795 case MII_TG3_AUX_STAT_10FULL:
2796 *speed = SPEED_10;
2797 *duplex = DUPLEX_FULL;
2798 break;
2800 case MII_TG3_AUX_STAT_100HALF:
2801 *speed = SPEED_100;
2802 *duplex = DUPLEX_HALF;
2803 break;
2805 case MII_TG3_AUX_STAT_100FULL:
2806 *speed = SPEED_100;
2807 *duplex = DUPLEX_FULL;
2808 break;
2810 case MII_TG3_AUX_STAT_1000HALF:
2811 *speed = SPEED_1000;
2812 *duplex = DUPLEX_HALF;
2813 break;
2815 case MII_TG3_AUX_STAT_1000FULL:
2816 *speed = SPEED_1000;
2817 *duplex = DUPLEX_FULL;
2818 break;
2820 default:
2821 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2822 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2823 SPEED_10;
2824 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2825 DUPLEX_HALF;
2826 break;
2828 *speed = SPEED_INVALID;
2829 *duplex = DUPLEX_INVALID;
2830 break;
2834 static void tg3_phy_copper_begin(struct tg3 *tp)
2836 u32 new_adv;
2837 int i;
2839 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2840 /* Entering low power mode. Disable gigabit and
2841 * 100baseT advertisements.
2843 tg3_writephy(tp, MII_TG3_CTRL, 0);
2845 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2846 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2847 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2848 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2850 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2851 } else if (tp->link_config.speed == SPEED_INVALID) {
2852 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
2853 tp->link_config.advertising &=
2854 ~(ADVERTISED_1000baseT_Half |
2855 ADVERTISED_1000baseT_Full);
2857 new_adv = ADVERTISE_CSMA;
2858 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2859 new_adv |= ADVERTISE_10HALF;
2860 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2861 new_adv |= ADVERTISE_10FULL;
2862 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2863 new_adv |= ADVERTISE_100HALF;
2864 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2865 new_adv |= ADVERTISE_100FULL;
2867 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2869 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2871 if (tp->link_config.advertising &
2872 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2873 new_adv = 0;
2874 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2875 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2876 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2877 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2878 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY) &&
2879 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2880 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2881 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2882 MII_TG3_CTRL_ENABLE_AS_MASTER);
2883 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2884 } else {
2885 tg3_writephy(tp, MII_TG3_CTRL, 0);
2887 } else {
2888 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2889 new_adv |= ADVERTISE_CSMA;
2891 /* Asking for a specific link mode. */
2892 if (tp->link_config.speed == SPEED_1000) {
2893 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2895 if (tp->link_config.duplex == DUPLEX_FULL)
2896 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2897 else
2898 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2899 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2900 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2901 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2902 MII_TG3_CTRL_ENABLE_AS_MASTER);
2903 } else {
2904 if (tp->link_config.speed == SPEED_100) {
2905 if (tp->link_config.duplex == DUPLEX_FULL)
2906 new_adv |= ADVERTISE_100FULL;
2907 else
2908 new_adv |= ADVERTISE_100HALF;
2909 } else {
2910 if (tp->link_config.duplex == DUPLEX_FULL)
2911 new_adv |= ADVERTISE_10FULL;
2912 else
2913 new_adv |= ADVERTISE_10HALF;
2915 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2917 new_adv = 0;
2920 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2923 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
2924 tp->link_config.speed != SPEED_INVALID) {
2925 u32 bmcr, orig_bmcr;
2927 tp->link_config.active_speed = tp->link_config.speed;
2928 tp->link_config.active_duplex = tp->link_config.duplex;
2930 bmcr = 0;
2931 switch (tp->link_config.speed) {
2932 default:
2933 case SPEED_10:
2934 break;
2936 case SPEED_100:
2937 bmcr |= BMCR_SPEED100;
2938 break;
2940 case SPEED_1000:
2941 bmcr |= TG3_BMCR_SPEED1000;
2942 break;
2945 if (tp->link_config.duplex == DUPLEX_FULL)
2946 bmcr |= BMCR_FULLDPLX;
2948 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
2949 (bmcr != orig_bmcr)) {
2950 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
2951 for (i = 0; i < 1500; i++) {
2952 u32 tmp;
2954 udelay(10);
2955 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
2956 tg3_readphy(tp, MII_BMSR, &tmp))
2957 continue;
2958 if (!(tmp & BMSR_LSTATUS)) {
2959 udelay(40);
2960 break;
2963 tg3_writephy(tp, MII_BMCR, bmcr);
2964 udelay(40);
2966 } else {
2967 tg3_writephy(tp, MII_BMCR,
2968 BMCR_ANENABLE | BMCR_ANRESTART);
2972 static int tg3_init_5401phy_dsp(struct tg3 *tp)
2974 int err;
2976 /* Turn off tap power management. */
2977 /* Set Extended packet length bit */
2978 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2980 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
2981 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
2982 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
2983 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
2984 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
2986 udelay(40);
2988 return err;
2991 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
2993 u32 adv_reg, all_mask = 0;
2995 if (mask & ADVERTISED_10baseT_Half)
2996 all_mask |= ADVERTISE_10HALF;
2997 if (mask & ADVERTISED_10baseT_Full)
2998 all_mask |= ADVERTISE_10FULL;
2999 if (mask & ADVERTISED_100baseT_Half)
3000 all_mask |= ADVERTISE_100HALF;
3001 if (mask & ADVERTISED_100baseT_Full)
3002 all_mask |= ADVERTISE_100FULL;
3004 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
3005 return 0;
3007 if ((adv_reg & all_mask) != all_mask)
3008 return 0;
3009 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3010 u32 tg3_ctrl;
3012 all_mask = 0;
3013 if (mask & ADVERTISED_1000baseT_Half)
3014 all_mask |= ADVERTISE_1000HALF;
3015 if (mask & ADVERTISED_1000baseT_Full)
3016 all_mask |= ADVERTISE_1000FULL;
3018 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
3019 return 0;
3021 if ((tg3_ctrl & all_mask) != all_mask)
3022 return 0;
3024 return 1;
3027 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3029 u32 curadv, reqadv;
3031 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3032 return 1;
3034 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3035 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3037 if (tp->link_config.active_duplex == DUPLEX_FULL) {
3038 if (curadv != reqadv)
3039 return 0;
3041 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
3042 tg3_readphy(tp, MII_LPA, rmtadv);
3043 } else {
3044 /* Reprogram the advertisement register, even if it
3045 * does not affect the current link. If the link
3046 * gets renegotiated in the future, we can save an
3047 * additional renegotiation cycle by advertising
3048 * it correctly in the first place.
3050 if (curadv != reqadv) {
3051 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3052 ADVERTISE_PAUSE_ASYM);
3053 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3057 return 1;
3060 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3062 int current_link_up;
3063 u32 bmsr, dummy;
3064 u32 lcl_adv, rmt_adv;
3065 u16 current_speed;
3066 u8 current_duplex;
3067 int i, err;
3069 tw32(MAC_EVENT, 0);
3071 tw32_f(MAC_STATUS,
3072 (MAC_STATUS_SYNC_CHANGED |
3073 MAC_STATUS_CFG_CHANGED |
3074 MAC_STATUS_MI_COMPLETION |
3075 MAC_STATUS_LNKSTATE_CHANGED));
3076 udelay(40);
3078 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3079 tw32_f(MAC_MI_MODE,
3080 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3081 udelay(80);
3084 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
3086 /* Some third-party PHYs need to be reset on link going
3087 * down.
3089 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3090 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3091 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3092 netif_carrier_ok(tp->dev)) {
3093 tg3_readphy(tp, MII_BMSR, &bmsr);
3094 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3095 !(bmsr & BMSR_LSTATUS))
3096 force_reset = 1;
3098 if (force_reset)
3099 tg3_phy_reset(tp);
3101 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3102 tg3_readphy(tp, MII_BMSR, &bmsr);
3103 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3104 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
3105 bmsr = 0;
3107 if (!(bmsr & BMSR_LSTATUS)) {
3108 err = tg3_init_5401phy_dsp(tp);
3109 if (err)
3110 return err;
3112 tg3_readphy(tp, MII_BMSR, &bmsr);
3113 for (i = 0; i < 1000; i++) {
3114 udelay(10);
3115 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3116 (bmsr & BMSR_LSTATUS)) {
3117 udelay(40);
3118 break;
3122 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3123 TG3_PHY_REV_BCM5401_B0 &&
3124 !(bmsr & BMSR_LSTATUS) &&
3125 tp->link_config.active_speed == SPEED_1000) {
3126 err = tg3_phy_reset(tp);
3127 if (!err)
3128 err = tg3_init_5401phy_dsp(tp);
3129 if (err)
3130 return err;
3133 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3134 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3135 /* 5701 {A0,B0} CRC bug workaround */
3136 tg3_writephy(tp, 0x15, 0x0a75);
3137 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3138 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3139 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3142 /* Clear pending interrupts... */
3143 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
3144 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
3146 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3147 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3148 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3149 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3151 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3152 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3153 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3154 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3155 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3156 else
3157 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3160 current_link_up = 0;
3161 current_speed = SPEED_INVALID;
3162 current_duplex = DUPLEX_INVALID;
3164 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3165 u32 val;
3167 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
3168 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
3169 if (!(val & (1 << 10))) {
3170 val |= (1 << 10);
3171 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
3172 goto relink;
3176 bmsr = 0;
3177 for (i = 0; i < 100; i++) {
3178 tg3_readphy(tp, MII_BMSR, &bmsr);
3179 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3180 (bmsr & BMSR_LSTATUS))
3181 break;
3182 udelay(40);
3185 if (bmsr & BMSR_LSTATUS) {
3186 u32 aux_stat, bmcr;
3188 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3189 for (i = 0; i < 2000; i++) {
3190 udelay(10);
3191 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3192 aux_stat)
3193 break;
3196 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3197 &current_speed,
3198 &current_duplex);
3200 bmcr = 0;
3201 for (i = 0; i < 200; i++) {
3202 tg3_readphy(tp, MII_BMCR, &bmcr);
3203 if (tg3_readphy(tp, MII_BMCR, &bmcr))
3204 continue;
3205 if (bmcr && bmcr != 0x7fff)
3206 break;
3207 udelay(10);
3210 lcl_adv = 0;
3211 rmt_adv = 0;
3213 tp->link_config.active_speed = current_speed;
3214 tp->link_config.active_duplex = current_duplex;
3216 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3217 if ((bmcr & BMCR_ANENABLE) &&
3218 tg3_copper_is_advertising_all(tp,
3219 tp->link_config.advertising)) {
3220 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3221 &rmt_adv))
3222 current_link_up = 1;
3224 } else {
3225 if (!(bmcr & BMCR_ANENABLE) &&
3226 tp->link_config.speed == current_speed &&
3227 tp->link_config.duplex == current_duplex &&
3228 tp->link_config.flowctrl ==
3229 tp->link_config.active_flowctrl) {
3230 current_link_up = 1;
3234 if (current_link_up == 1 &&
3235 tp->link_config.active_duplex == DUPLEX_FULL)
3236 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3239 relink:
3240 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3241 u32 tmp;
3243 tg3_phy_copper_begin(tp);
3245 tg3_readphy(tp, MII_BMSR, &tmp);
3246 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
3247 (tmp & BMSR_LSTATUS))
3248 current_link_up = 1;
3251 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3252 if (current_link_up == 1) {
3253 if (tp->link_config.active_speed == SPEED_100 ||
3254 tp->link_config.active_speed == SPEED_10)
3255 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3256 else
3257 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3258 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
3259 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3260 else
3261 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3263 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3264 if (tp->link_config.active_duplex == DUPLEX_HALF)
3265 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3267 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3268 if (current_link_up == 1 &&
3269 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3270 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3271 else
3272 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3275 /* ??? Without this setting Netgear GA302T PHY does not
3276 * ??? send/receive packets...
3278 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
3279 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3280 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3281 tw32_f(MAC_MI_MODE, tp->mi_mode);
3282 udelay(80);
3285 tw32_f(MAC_MODE, tp->mac_mode);
3286 udelay(40);
3288 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
3289 /* Polled via timer. */
3290 tw32_f(MAC_EVENT, 0);
3291 } else {
3292 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3294 udelay(40);
3296 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3297 current_link_up == 1 &&
3298 tp->link_config.active_speed == SPEED_1000 &&
3299 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
3300 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
3301 udelay(120);
3302 tw32_f(MAC_STATUS,
3303 (MAC_STATUS_SYNC_CHANGED |
3304 MAC_STATUS_CFG_CHANGED));
3305 udelay(40);
3306 tg3_write_mem(tp,
3307 NIC_SRAM_FIRMWARE_MBOX,
3308 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3311 /* Prevent send BD corruption. */
3312 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
3313 u16 oldlnkctl, newlnkctl;
3315 pci_read_config_word(tp->pdev,
3316 tp->pcie_cap + PCI_EXP_LNKCTL,
3317 &oldlnkctl);
3318 if (tp->link_config.active_speed == SPEED_100 ||
3319 tp->link_config.active_speed == SPEED_10)
3320 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3321 else
3322 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3323 if (newlnkctl != oldlnkctl)
3324 pci_write_config_word(tp->pdev,
3325 tp->pcie_cap + PCI_EXP_LNKCTL,
3326 newlnkctl);
3329 if (current_link_up != netif_carrier_ok(tp->dev)) {
3330 if (current_link_up)
3331 netif_carrier_on(tp->dev);
3332 else
3333 netif_carrier_off(tp->dev);
3334 tg3_link_report(tp);
3337 return 0;
3340 struct tg3_fiber_aneginfo {
3341 int state;
3342 #define ANEG_STATE_UNKNOWN 0
3343 #define ANEG_STATE_AN_ENABLE 1
3344 #define ANEG_STATE_RESTART_INIT 2
3345 #define ANEG_STATE_RESTART 3
3346 #define ANEG_STATE_DISABLE_LINK_OK 4
3347 #define ANEG_STATE_ABILITY_DETECT_INIT 5
3348 #define ANEG_STATE_ABILITY_DETECT 6
3349 #define ANEG_STATE_ACK_DETECT_INIT 7
3350 #define ANEG_STATE_ACK_DETECT 8
3351 #define ANEG_STATE_COMPLETE_ACK_INIT 9
3352 #define ANEG_STATE_COMPLETE_ACK 10
3353 #define ANEG_STATE_IDLE_DETECT_INIT 11
3354 #define ANEG_STATE_IDLE_DETECT 12
3355 #define ANEG_STATE_LINK_OK 13
3356 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
3357 #define ANEG_STATE_NEXT_PAGE_WAIT 15
3359 u32 flags;
3360 #define MR_AN_ENABLE 0x00000001
3361 #define MR_RESTART_AN 0x00000002
3362 #define MR_AN_COMPLETE 0x00000004
3363 #define MR_PAGE_RX 0x00000008
3364 #define MR_NP_LOADED 0x00000010
3365 #define MR_TOGGLE_TX 0x00000020
3366 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
3367 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
3368 #define MR_LP_ADV_SYM_PAUSE 0x00000100
3369 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
3370 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3371 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3372 #define MR_LP_ADV_NEXT_PAGE 0x00001000
3373 #define MR_TOGGLE_RX 0x00002000
3374 #define MR_NP_RX 0x00004000
3376 #define MR_LINK_OK 0x80000000
3378 unsigned long link_time, cur_time;
3380 u32 ability_match_cfg;
3381 int ability_match_count;
3383 char ability_match, idle_match, ack_match;
3385 u32 txconfig, rxconfig;
3386 #define ANEG_CFG_NP 0x00000080
3387 #define ANEG_CFG_ACK 0x00000040
3388 #define ANEG_CFG_RF2 0x00000020
3389 #define ANEG_CFG_RF1 0x00000010
3390 #define ANEG_CFG_PS2 0x00000001
3391 #define ANEG_CFG_PS1 0x00008000
3392 #define ANEG_CFG_HD 0x00004000
3393 #define ANEG_CFG_FD 0x00002000
3394 #define ANEG_CFG_INVAL 0x00001f06
3397 #define ANEG_OK 0
3398 #define ANEG_DONE 1
3399 #define ANEG_TIMER_ENAB 2
3400 #define ANEG_FAILED -1
3402 #define ANEG_STATE_SETTLE_TIME 10000
3404 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3405 struct tg3_fiber_aneginfo *ap)
3407 u16 flowctrl;
3408 unsigned long delta;
3409 u32 rx_cfg_reg;
3410 int ret;
3412 if (ap->state == ANEG_STATE_UNKNOWN) {
3413 ap->rxconfig = 0;
3414 ap->link_time = 0;
3415 ap->cur_time = 0;
3416 ap->ability_match_cfg = 0;
3417 ap->ability_match_count = 0;
3418 ap->ability_match = 0;
3419 ap->idle_match = 0;
3420 ap->ack_match = 0;
3422 ap->cur_time++;
3424 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3425 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3427 if (rx_cfg_reg != ap->ability_match_cfg) {
3428 ap->ability_match_cfg = rx_cfg_reg;
3429 ap->ability_match = 0;
3430 ap->ability_match_count = 0;
3431 } else {
3432 if (++ap->ability_match_count > 1) {
3433 ap->ability_match = 1;
3434 ap->ability_match_cfg = rx_cfg_reg;
3437 if (rx_cfg_reg & ANEG_CFG_ACK)
3438 ap->ack_match = 1;
3439 else
3440 ap->ack_match = 0;
3442 ap->idle_match = 0;
3443 } else {
3444 ap->idle_match = 1;
3445 ap->ability_match_cfg = 0;
3446 ap->ability_match_count = 0;
3447 ap->ability_match = 0;
3448 ap->ack_match = 0;
3450 rx_cfg_reg = 0;
3453 ap->rxconfig = rx_cfg_reg;
3454 ret = ANEG_OK;
3456 switch (ap->state) {
3457 case ANEG_STATE_UNKNOWN:
3458 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3459 ap->state = ANEG_STATE_AN_ENABLE;
3461 /* fallthru */
3462 case ANEG_STATE_AN_ENABLE:
3463 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3464 if (ap->flags & MR_AN_ENABLE) {
3465 ap->link_time = 0;
3466 ap->cur_time = 0;
3467 ap->ability_match_cfg = 0;
3468 ap->ability_match_count = 0;
3469 ap->ability_match = 0;
3470 ap->idle_match = 0;
3471 ap->ack_match = 0;
3473 ap->state = ANEG_STATE_RESTART_INIT;
3474 } else {
3475 ap->state = ANEG_STATE_DISABLE_LINK_OK;
3477 break;
3479 case ANEG_STATE_RESTART_INIT:
3480 ap->link_time = ap->cur_time;
3481 ap->flags &= ~(MR_NP_LOADED);
3482 ap->txconfig = 0;
3483 tw32(MAC_TX_AUTO_NEG, 0);
3484 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3485 tw32_f(MAC_MODE, tp->mac_mode);
3486 udelay(40);
3488 ret = ANEG_TIMER_ENAB;
3489 ap->state = ANEG_STATE_RESTART;
3491 /* fallthru */
3492 case ANEG_STATE_RESTART:
3493 delta = ap->cur_time - ap->link_time;
3494 if (delta > ANEG_STATE_SETTLE_TIME)
3495 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3496 else
3497 ret = ANEG_TIMER_ENAB;
3498 break;
3500 case ANEG_STATE_DISABLE_LINK_OK:
3501 ret = ANEG_DONE;
3502 break;
3504 case ANEG_STATE_ABILITY_DETECT_INIT:
3505 ap->flags &= ~(MR_TOGGLE_TX);
3506 ap->txconfig = ANEG_CFG_FD;
3507 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3508 if (flowctrl & ADVERTISE_1000XPAUSE)
3509 ap->txconfig |= ANEG_CFG_PS1;
3510 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3511 ap->txconfig |= ANEG_CFG_PS2;
3512 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3513 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3514 tw32_f(MAC_MODE, tp->mac_mode);
3515 udelay(40);
3517 ap->state = ANEG_STATE_ABILITY_DETECT;
3518 break;
3520 case ANEG_STATE_ABILITY_DETECT:
3521 if (ap->ability_match != 0 && ap->rxconfig != 0)
3522 ap->state = ANEG_STATE_ACK_DETECT_INIT;
3523 break;
3525 case ANEG_STATE_ACK_DETECT_INIT:
3526 ap->txconfig |= ANEG_CFG_ACK;
3527 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3528 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3529 tw32_f(MAC_MODE, tp->mac_mode);
3530 udelay(40);
3532 ap->state = ANEG_STATE_ACK_DETECT;
3534 /* fallthru */
3535 case ANEG_STATE_ACK_DETECT:
3536 if (ap->ack_match != 0) {
3537 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3538 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3539 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3540 } else {
3541 ap->state = ANEG_STATE_AN_ENABLE;
3543 } else if (ap->ability_match != 0 &&
3544 ap->rxconfig == 0) {
3545 ap->state = ANEG_STATE_AN_ENABLE;
3547 break;
3549 case ANEG_STATE_COMPLETE_ACK_INIT:
3550 if (ap->rxconfig & ANEG_CFG_INVAL) {
3551 ret = ANEG_FAILED;
3552 break;
3554 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3555 MR_LP_ADV_HALF_DUPLEX |
3556 MR_LP_ADV_SYM_PAUSE |
3557 MR_LP_ADV_ASYM_PAUSE |
3558 MR_LP_ADV_REMOTE_FAULT1 |
3559 MR_LP_ADV_REMOTE_FAULT2 |
3560 MR_LP_ADV_NEXT_PAGE |
3561 MR_TOGGLE_RX |
3562 MR_NP_RX);
3563 if (ap->rxconfig & ANEG_CFG_FD)
3564 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3565 if (ap->rxconfig & ANEG_CFG_HD)
3566 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3567 if (ap->rxconfig & ANEG_CFG_PS1)
3568 ap->flags |= MR_LP_ADV_SYM_PAUSE;
3569 if (ap->rxconfig & ANEG_CFG_PS2)
3570 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3571 if (ap->rxconfig & ANEG_CFG_RF1)
3572 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3573 if (ap->rxconfig & ANEG_CFG_RF2)
3574 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3575 if (ap->rxconfig & ANEG_CFG_NP)
3576 ap->flags |= MR_LP_ADV_NEXT_PAGE;
3578 ap->link_time = ap->cur_time;
3580 ap->flags ^= (MR_TOGGLE_TX);
3581 if (ap->rxconfig & 0x0008)
3582 ap->flags |= MR_TOGGLE_RX;
3583 if (ap->rxconfig & ANEG_CFG_NP)
3584 ap->flags |= MR_NP_RX;
3585 ap->flags |= MR_PAGE_RX;
3587 ap->state = ANEG_STATE_COMPLETE_ACK;
3588 ret = ANEG_TIMER_ENAB;
3589 break;
3591 case ANEG_STATE_COMPLETE_ACK:
3592 if (ap->ability_match != 0 &&
3593 ap->rxconfig == 0) {
3594 ap->state = ANEG_STATE_AN_ENABLE;
3595 break;
3597 delta = ap->cur_time - ap->link_time;
3598 if (delta > ANEG_STATE_SETTLE_TIME) {
3599 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3600 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3601 } else {
3602 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3603 !(ap->flags & MR_NP_RX)) {
3604 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3605 } else {
3606 ret = ANEG_FAILED;
3610 break;
3612 case ANEG_STATE_IDLE_DETECT_INIT:
3613 ap->link_time = ap->cur_time;
3614 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3615 tw32_f(MAC_MODE, tp->mac_mode);
3616 udelay(40);
3618 ap->state = ANEG_STATE_IDLE_DETECT;
3619 ret = ANEG_TIMER_ENAB;
3620 break;
3622 case ANEG_STATE_IDLE_DETECT:
3623 if (ap->ability_match != 0 &&
3624 ap->rxconfig == 0) {
3625 ap->state = ANEG_STATE_AN_ENABLE;
3626 break;
3628 delta = ap->cur_time - ap->link_time;
3629 if (delta > ANEG_STATE_SETTLE_TIME) {
3630 /* XXX another gem from the Broadcom driver :( */
3631 ap->state = ANEG_STATE_LINK_OK;
3633 break;
3635 case ANEG_STATE_LINK_OK:
3636 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3637 ret = ANEG_DONE;
3638 break;
3640 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3641 /* ??? unimplemented */
3642 break;
3644 case ANEG_STATE_NEXT_PAGE_WAIT:
3645 /* ??? unimplemented */
3646 break;
3648 default:
3649 ret = ANEG_FAILED;
3650 break;
3653 return ret;
3656 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3658 int res = 0;
3659 struct tg3_fiber_aneginfo aninfo;
3660 int status = ANEG_FAILED;
3661 unsigned int tick;
3662 u32 tmp;
3664 tw32_f(MAC_TX_AUTO_NEG, 0);
3666 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3667 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3668 udelay(40);
3670 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3671 udelay(40);
3673 memset(&aninfo, 0, sizeof(aninfo));
3674 aninfo.flags |= MR_AN_ENABLE;
3675 aninfo.state = ANEG_STATE_UNKNOWN;
3676 aninfo.cur_time = 0;
3677 tick = 0;
3678 while (++tick < 195000) {
3679 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3680 if (status == ANEG_DONE || status == ANEG_FAILED)
3681 break;
3683 udelay(1);
3686 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3687 tw32_f(MAC_MODE, tp->mac_mode);
3688 udelay(40);
3690 *txflags = aninfo.txconfig;
3691 *rxflags = aninfo.flags;
3693 if (status == ANEG_DONE &&
3694 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3695 MR_LP_ADV_FULL_DUPLEX)))
3696 res = 1;
3698 return res;
3701 static void tg3_init_bcm8002(struct tg3 *tp)
3703 u32 mac_status = tr32(MAC_STATUS);
3704 int i;
3706 /* Reset when initting first time or we have a link. */
3707 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
3708 !(mac_status & MAC_STATUS_PCS_SYNCED))
3709 return;
3711 /* Set PLL lock range. */
3712 tg3_writephy(tp, 0x16, 0x8007);
3714 /* SW reset */
3715 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3717 /* Wait for reset to complete. */
3718 /* XXX schedule_timeout() ... */
3719 for (i = 0; i < 500; i++)
3720 udelay(10);
3722 /* Config mode; select PMA/Ch 1 regs. */
3723 tg3_writephy(tp, 0x10, 0x8411);
3725 /* Enable auto-lock and comdet, select txclk for tx. */
3726 tg3_writephy(tp, 0x11, 0x0a10);
3728 tg3_writephy(tp, 0x18, 0x00a0);
3729 tg3_writephy(tp, 0x16, 0x41ff);
3731 /* Assert and deassert POR. */
3732 tg3_writephy(tp, 0x13, 0x0400);
3733 udelay(40);
3734 tg3_writephy(tp, 0x13, 0x0000);
3736 tg3_writephy(tp, 0x11, 0x0a50);
3737 udelay(40);
3738 tg3_writephy(tp, 0x11, 0x0a10);
3740 /* Wait for signal to stabilize */
3741 /* XXX schedule_timeout() ... */
3742 for (i = 0; i < 15000; i++)
3743 udelay(10);
3745 /* Deselect the channel register so we can read the PHYID
3746 * later.
3748 tg3_writephy(tp, 0x10, 0x8011);
3751 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3753 u16 flowctrl;
3754 u32 sg_dig_ctrl, sg_dig_status;
3755 u32 serdes_cfg, expected_sg_dig_ctrl;
3756 int workaround, port_a;
3757 int current_link_up;
3759 serdes_cfg = 0;
3760 expected_sg_dig_ctrl = 0;
3761 workaround = 0;
3762 port_a = 1;
3763 current_link_up = 0;
3765 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3766 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3767 workaround = 1;
3768 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3769 port_a = 0;
3771 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3772 /* preserve bits 20-23 for voltage regulator */
3773 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3776 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3778 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3779 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3780 if (workaround) {
3781 u32 val = serdes_cfg;
3783 if (port_a)
3784 val |= 0xc010000;
3785 else
3786 val |= 0x4010000;
3787 tw32_f(MAC_SERDES_CFG, val);
3790 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3792 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3793 tg3_setup_flow_control(tp, 0, 0);
3794 current_link_up = 1;
3796 goto out;
3799 /* Want auto-negotiation. */
3800 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3802 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3803 if (flowctrl & ADVERTISE_1000XPAUSE)
3804 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3805 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3806 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3808 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3809 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
3810 tp->serdes_counter &&
3811 ((mac_status & (MAC_STATUS_PCS_SYNCED |
3812 MAC_STATUS_RCVD_CFG)) ==
3813 MAC_STATUS_PCS_SYNCED)) {
3814 tp->serdes_counter--;
3815 current_link_up = 1;
3816 goto out;
3818 restart_autoneg:
3819 if (workaround)
3820 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3821 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3822 udelay(5);
3823 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3825 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3826 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3827 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3828 MAC_STATUS_SIGNAL_DET)) {
3829 sg_dig_status = tr32(SG_DIG_STATUS);
3830 mac_status = tr32(MAC_STATUS);
3832 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3833 (mac_status & MAC_STATUS_PCS_SYNCED)) {
3834 u32 local_adv = 0, remote_adv = 0;
3836 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3837 local_adv |= ADVERTISE_1000XPAUSE;
3838 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3839 local_adv |= ADVERTISE_1000XPSE_ASYM;
3841 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3842 remote_adv |= LPA_1000XPAUSE;
3843 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3844 remote_adv |= LPA_1000XPAUSE_ASYM;
3846 tg3_setup_flow_control(tp, local_adv, remote_adv);
3847 current_link_up = 1;
3848 tp->serdes_counter = 0;
3849 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3850 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3851 if (tp->serdes_counter)
3852 tp->serdes_counter--;
3853 else {
3854 if (workaround) {
3855 u32 val = serdes_cfg;
3857 if (port_a)
3858 val |= 0xc010000;
3859 else
3860 val |= 0x4010000;
3862 tw32_f(MAC_SERDES_CFG, val);
3865 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3866 udelay(40);
3868 /* Link parallel detection - link is up */
3869 /* only if we have PCS_SYNC and not */
3870 /* receiving config code words */
3871 mac_status = tr32(MAC_STATUS);
3872 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
3873 !(mac_status & MAC_STATUS_RCVD_CFG)) {
3874 tg3_setup_flow_control(tp, 0, 0);
3875 current_link_up = 1;
3876 tp->phy_flags |=
3877 TG3_PHYFLG_PARALLEL_DETECT;
3878 tp->serdes_counter =
3879 SERDES_PARALLEL_DET_TIMEOUT;
3880 } else
3881 goto restart_autoneg;
3884 } else {
3885 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3886 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3889 out:
3890 return current_link_up;
3893 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
3895 int current_link_up = 0;
3897 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
3898 goto out;
3900 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3901 u32 txflags, rxflags;
3902 int i;
3904 if (fiber_autoneg(tp, &txflags, &rxflags)) {
3905 u32 local_adv = 0, remote_adv = 0;
3907 if (txflags & ANEG_CFG_PS1)
3908 local_adv |= ADVERTISE_1000XPAUSE;
3909 if (txflags & ANEG_CFG_PS2)
3910 local_adv |= ADVERTISE_1000XPSE_ASYM;
3912 if (rxflags & MR_LP_ADV_SYM_PAUSE)
3913 remote_adv |= LPA_1000XPAUSE;
3914 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
3915 remote_adv |= LPA_1000XPAUSE_ASYM;
3917 tg3_setup_flow_control(tp, local_adv, remote_adv);
3919 current_link_up = 1;
3921 for (i = 0; i < 30; i++) {
3922 udelay(20);
3923 tw32_f(MAC_STATUS,
3924 (MAC_STATUS_SYNC_CHANGED |
3925 MAC_STATUS_CFG_CHANGED));
3926 udelay(40);
3927 if ((tr32(MAC_STATUS) &
3928 (MAC_STATUS_SYNC_CHANGED |
3929 MAC_STATUS_CFG_CHANGED)) == 0)
3930 break;
3933 mac_status = tr32(MAC_STATUS);
3934 if (current_link_up == 0 &&
3935 (mac_status & MAC_STATUS_PCS_SYNCED) &&
3936 !(mac_status & MAC_STATUS_RCVD_CFG))
3937 current_link_up = 1;
3938 } else {
3939 tg3_setup_flow_control(tp, 0, 0);
3941 /* Forcing 1000FD link up. */
3942 current_link_up = 1;
3944 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
3945 udelay(40);
3947 tw32_f(MAC_MODE, tp->mac_mode);
3948 udelay(40);
3951 out:
3952 return current_link_up;
3955 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3957 u32 orig_pause_cfg;
3958 u16 orig_active_speed;
3959 u8 orig_active_duplex;
3960 u32 mac_status;
3961 int current_link_up;
3962 int i;
3964 orig_pause_cfg = tp->link_config.active_flowctrl;
3965 orig_active_speed = tp->link_config.active_speed;
3966 orig_active_duplex = tp->link_config.active_duplex;
3968 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
3969 netif_carrier_ok(tp->dev) &&
3970 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
3971 mac_status = tr32(MAC_STATUS);
3972 mac_status &= (MAC_STATUS_PCS_SYNCED |
3973 MAC_STATUS_SIGNAL_DET |
3974 MAC_STATUS_CFG_CHANGED |
3975 MAC_STATUS_RCVD_CFG);
3976 if (mac_status == (MAC_STATUS_PCS_SYNCED |
3977 MAC_STATUS_SIGNAL_DET)) {
3978 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3979 MAC_STATUS_CFG_CHANGED));
3980 return 0;
3984 tw32_f(MAC_TX_AUTO_NEG, 0);
3986 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
3987 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
3988 tw32_f(MAC_MODE, tp->mac_mode);
3989 udelay(40);
3991 if (tp->phy_id == TG3_PHY_ID_BCM8002)
3992 tg3_init_bcm8002(tp);
3994 /* Enable link change event even when serdes polling. */
3995 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3996 udelay(40);
3998 current_link_up = 0;
3999 mac_status = tr32(MAC_STATUS);
4001 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
4002 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4003 else
4004 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4006 tp->napi[0].hw_status->status =
4007 (SD_STATUS_UPDATED |
4008 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4010 for (i = 0; i < 100; i++) {
4011 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4012 MAC_STATUS_CFG_CHANGED));
4013 udelay(5);
4014 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4015 MAC_STATUS_CFG_CHANGED |
4016 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4017 break;
4020 mac_status = tr32(MAC_STATUS);
4021 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4022 current_link_up = 0;
4023 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4024 tp->serdes_counter == 0) {
4025 tw32_f(MAC_MODE, (tp->mac_mode |
4026 MAC_MODE_SEND_CONFIGS));
4027 udelay(1);
4028 tw32_f(MAC_MODE, tp->mac_mode);
4032 if (current_link_up == 1) {
4033 tp->link_config.active_speed = SPEED_1000;
4034 tp->link_config.active_duplex = DUPLEX_FULL;
4035 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4036 LED_CTRL_LNKLED_OVERRIDE |
4037 LED_CTRL_1000MBPS_ON));
4038 } else {
4039 tp->link_config.active_speed = SPEED_INVALID;
4040 tp->link_config.active_duplex = DUPLEX_INVALID;
4041 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4042 LED_CTRL_LNKLED_OVERRIDE |
4043 LED_CTRL_TRAFFIC_OVERRIDE));
4046 if (current_link_up != netif_carrier_ok(tp->dev)) {
4047 if (current_link_up)
4048 netif_carrier_on(tp->dev);
4049 else
4050 netif_carrier_off(tp->dev);
4051 tg3_link_report(tp);
4052 } else {
4053 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4054 if (orig_pause_cfg != now_pause_cfg ||
4055 orig_active_speed != tp->link_config.active_speed ||
4056 orig_active_duplex != tp->link_config.active_duplex)
4057 tg3_link_report(tp);
4060 return 0;
4063 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4065 int current_link_up, err = 0;
4066 u32 bmsr, bmcr;
4067 u16 current_speed;
4068 u8 current_duplex;
4069 u32 local_adv, remote_adv;
4071 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4072 tw32_f(MAC_MODE, tp->mac_mode);
4073 udelay(40);
4075 tw32(MAC_EVENT, 0);
4077 tw32_f(MAC_STATUS,
4078 (MAC_STATUS_SYNC_CHANGED |
4079 MAC_STATUS_CFG_CHANGED |
4080 MAC_STATUS_MI_COMPLETION |
4081 MAC_STATUS_LNKSTATE_CHANGED));
4082 udelay(40);
4084 if (force_reset)
4085 tg3_phy_reset(tp);
4087 current_link_up = 0;
4088 current_speed = SPEED_INVALID;
4089 current_duplex = DUPLEX_INVALID;
4091 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4092 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4093 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4094 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4095 bmsr |= BMSR_LSTATUS;
4096 else
4097 bmsr &= ~BMSR_LSTATUS;
4100 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4102 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4103 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4104 /* do nothing, just check for link up at the end */
4105 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4106 u32 adv, new_adv;
4108 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4109 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4110 ADVERTISE_1000XPAUSE |
4111 ADVERTISE_1000XPSE_ASYM |
4112 ADVERTISE_SLCT);
4114 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4116 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4117 new_adv |= ADVERTISE_1000XHALF;
4118 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4119 new_adv |= ADVERTISE_1000XFULL;
4121 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4122 tg3_writephy(tp, MII_ADVERTISE, new_adv);
4123 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4124 tg3_writephy(tp, MII_BMCR, bmcr);
4126 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4127 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4128 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4130 return err;
4132 } else {
4133 u32 new_bmcr;
4135 bmcr &= ~BMCR_SPEED1000;
4136 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4138 if (tp->link_config.duplex == DUPLEX_FULL)
4139 new_bmcr |= BMCR_FULLDPLX;
4141 if (new_bmcr != bmcr) {
4142 /* BMCR_SPEED1000 is a reserved bit that needs
4143 * to be set on write.
4145 new_bmcr |= BMCR_SPEED1000;
4147 /* Force a linkdown */
4148 if (netif_carrier_ok(tp->dev)) {
4149 u32 adv;
4151 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4152 adv &= ~(ADVERTISE_1000XFULL |
4153 ADVERTISE_1000XHALF |
4154 ADVERTISE_SLCT);
4155 tg3_writephy(tp, MII_ADVERTISE, adv);
4156 tg3_writephy(tp, MII_BMCR, bmcr |
4157 BMCR_ANRESTART |
4158 BMCR_ANENABLE);
4159 udelay(10);
4160 netif_carrier_off(tp->dev);
4162 tg3_writephy(tp, MII_BMCR, new_bmcr);
4163 bmcr = new_bmcr;
4164 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4165 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4166 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4167 ASIC_REV_5714) {
4168 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4169 bmsr |= BMSR_LSTATUS;
4170 else
4171 bmsr &= ~BMSR_LSTATUS;
4173 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4177 if (bmsr & BMSR_LSTATUS) {
4178 current_speed = SPEED_1000;
4179 current_link_up = 1;
4180 if (bmcr & BMCR_FULLDPLX)
4181 current_duplex = DUPLEX_FULL;
4182 else
4183 current_duplex = DUPLEX_HALF;
4185 local_adv = 0;
4186 remote_adv = 0;
4188 if (bmcr & BMCR_ANENABLE) {
4189 u32 common;
4191 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4192 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4193 common = local_adv & remote_adv;
4194 if (common & (ADVERTISE_1000XHALF |
4195 ADVERTISE_1000XFULL)) {
4196 if (common & ADVERTISE_1000XFULL)
4197 current_duplex = DUPLEX_FULL;
4198 else
4199 current_duplex = DUPLEX_HALF;
4200 } else if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
4201 /* Link is up via parallel detect */
4202 } else {
4203 current_link_up = 0;
4208 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4209 tg3_setup_flow_control(tp, local_adv, remote_adv);
4211 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4212 if (tp->link_config.active_duplex == DUPLEX_HALF)
4213 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4215 tw32_f(MAC_MODE, tp->mac_mode);
4216 udelay(40);
4218 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4220 tp->link_config.active_speed = current_speed;
4221 tp->link_config.active_duplex = current_duplex;
4223 if (current_link_up != netif_carrier_ok(tp->dev)) {
4224 if (current_link_up)
4225 netif_carrier_on(tp->dev);
4226 else {
4227 netif_carrier_off(tp->dev);
4228 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4230 tg3_link_report(tp);
4232 return err;
4235 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4237 if (tp->serdes_counter) {
4238 /* Give autoneg time to complete. */
4239 tp->serdes_counter--;
4240 return;
4243 if (!netif_carrier_ok(tp->dev) &&
4244 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4245 u32 bmcr;
4247 tg3_readphy(tp, MII_BMCR, &bmcr);
4248 if (bmcr & BMCR_ANENABLE) {
4249 u32 phy1, phy2;
4251 /* Select shadow register 0x1f */
4252 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
4253 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
4255 /* Select expansion interrupt status register */
4256 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4257 MII_TG3_DSP_EXP1_INT_STAT);
4258 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4259 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4261 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4262 /* We have signal detect and not receiving
4263 * config code words, link is up by parallel
4264 * detection.
4267 bmcr &= ~BMCR_ANENABLE;
4268 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4269 tg3_writephy(tp, MII_BMCR, bmcr);
4270 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
4273 } else if (netif_carrier_ok(tp->dev) &&
4274 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4275 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4276 u32 phy2;
4278 /* Select expansion interrupt status register */
4279 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4280 MII_TG3_DSP_EXP1_INT_STAT);
4281 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4282 if (phy2 & 0x20) {
4283 u32 bmcr;
4285 /* Config code words received, turn on autoneg. */
4286 tg3_readphy(tp, MII_BMCR, &bmcr);
4287 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4289 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4295 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4297 int err;
4299 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
4300 err = tg3_setup_fiber_phy(tp, force_reset);
4301 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4302 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4303 else
4304 err = tg3_setup_copper_phy(tp, force_reset);
4306 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4307 u32 val, scale;
4309 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4310 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4311 scale = 65;
4312 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4313 scale = 6;
4314 else
4315 scale = 12;
4317 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4318 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4319 tw32(GRC_MISC_CFG, val);
4322 if (tp->link_config.active_speed == SPEED_1000 &&
4323 tp->link_config.active_duplex == DUPLEX_HALF)
4324 tw32(MAC_TX_LENGTHS,
4325 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4326 (6 << TX_LENGTHS_IPG_SHIFT) |
4327 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
4328 else
4329 tw32(MAC_TX_LENGTHS,
4330 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4331 (6 << TX_LENGTHS_IPG_SHIFT) |
4332 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
4334 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4335 if (netif_carrier_ok(tp->dev)) {
4336 tw32(HOSTCC_STAT_COAL_TICKS,
4337 tp->coal.stats_block_coalesce_usecs);
4338 } else {
4339 tw32(HOSTCC_STAT_COAL_TICKS, 0);
4343 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
4344 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
4345 if (!netif_carrier_ok(tp->dev))
4346 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4347 tp->pwrmgmt_thresh;
4348 else
4349 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4350 tw32(PCIE_PWR_MGMT_THRESH, val);
4353 return err;
4356 /* This is called whenever we suspect that the system chipset is re-
4357 * ordering the sequence of MMIO to the tx send mailbox. The symptom
4358 * is bogus tx completions. We try to recover by setting the
4359 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4360 * in the workqueue.
4362 static void tg3_tx_recover(struct tg3 *tp)
4364 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
4365 tp->write32_tx_mbox == tg3_write_indirect_mbox);
4367 netdev_warn(tp->dev,
4368 "The system may be re-ordering memory-mapped I/O "
4369 "cycles to the network device, attempting to recover. "
4370 "Please report the problem to the driver maintainer "
4371 "and include system chipset information.\n");
4373 spin_lock(&tp->lock);
4374 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
4375 spin_unlock(&tp->lock);
4378 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4380 /* Tell compiler to fetch tx indices from memory. */
4381 barrier();
4382 return tnapi->tx_pending -
4383 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4386 /* Tigon3 never reports partial packet sends. So we do not
4387 * need special logic to handle SKBs that have not had all
4388 * of their frags sent yet, like SunGEM does.
4390 static void tg3_tx(struct tg3_napi *tnapi)
4392 struct tg3 *tp = tnapi->tp;
4393 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4394 u32 sw_idx = tnapi->tx_cons;
4395 struct netdev_queue *txq;
4396 int index = tnapi - tp->napi;
4398 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
4399 index--;
4401 txq = netdev_get_tx_queue(tp->dev, index);
4403 while (sw_idx != hw_idx) {
4404 struct ring_info *ri = &tnapi->tx_buffers[sw_idx];
4405 struct sk_buff *skb = ri->skb;
4406 int i, tx_bug = 0;
4408 if (unlikely(skb == NULL)) {
4409 tg3_tx_recover(tp);
4410 return;
4413 pci_unmap_single(tp->pdev,
4414 dma_unmap_addr(ri, mapping),
4415 skb_headlen(skb),
4416 PCI_DMA_TODEVICE);
4418 ri->skb = NULL;
4420 sw_idx = NEXT_TX(sw_idx);
4422 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4423 ri = &tnapi->tx_buffers[sw_idx];
4424 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4425 tx_bug = 1;
4427 pci_unmap_page(tp->pdev,
4428 dma_unmap_addr(ri, mapping),
4429 skb_shinfo(skb)->frags[i].size,
4430 PCI_DMA_TODEVICE);
4431 sw_idx = NEXT_TX(sw_idx);
4434 dev_kfree_skb(skb);
4436 if (unlikely(tx_bug)) {
4437 tg3_tx_recover(tp);
4438 return;
4442 tnapi->tx_cons = sw_idx;
4444 /* Need to make the tx_cons update visible to tg3_start_xmit()
4445 * before checking for netif_queue_stopped(). Without the
4446 * memory barrier, there is a small possibility that tg3_start_xmit()
4447 * will miss it and cause the queue to be stopped forever.
4449 smp_mb();
4451 if (unlikely(netif_tx_queue_stopped(txq) &&
4452 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4453 __netif_tx_lock(txq, smp_processor_id());
4454 if (netif_tx_queue_stopped(txq) &&
4455 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4456 netif_tx_wake_queue(txq);
4457 __netif_tx_unlock(txq);
4461 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4463 if (!ri->skb)
4464 return;
4466 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
4467 map_sz, PCI_DMA_FROMDEVICE);
4468 dev_kfree_skb_any(ri->skb);
4469 ri->skb = NULL;
4472 /* Returns size of skb allocated or < 0 on error.
4474 * We only need to fill in the address because the other members
4475 * of the RX descriptor are invariant, see tg3_init_rings.
4477 * Note the purposeful assymetry of cpu vs. chip accesses. For
4478 * posting buffers we only dirty the first cache line of the RX
4479 * descriptor (containing the address). Whereas for the RX status
4480 * buffers the cpu only reads the last cacheline of the RX descriptor
4481 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4483 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4484 u32 opaque_key, u32 dest_idx_unmasked)
4486 struct tg3_rx_buffer_desc *desc;
4487 struct ring_info *map, *src_map;
4488 struct sk_buff *skb;
4489 dma_addr_t mapping;
4490 int skb_size, dest_idx;
4492 src_map = NULL;
4493 switch (opaque_key) {
4494 case RXD_OPAQUE_RING_STD:
4495 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4496 desc = &tpr->rx_std[dest_idx];
4497 map = &tpr->rx_std_buffers[dest_idx];
4498 skb_size = tp->rx_pkt_map_sz;
4499 break;
4501 case RXD_OPAQUE_RING_JUMBO:
4502 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4503 desc = &tpr->rx_jmb[dest_idx].std;
4504 map = &tpr->rx_jmb_buffers[dest_idx];
4505 skb_size = TG3_RX_JMB_MAP_SZ;
4506 break;
4508 default:
4509 return -EINVAL;
4512 /* Do not overwrite any of the map or rp information
4513 * until we are sure we can commit to a new buffer.
4515 * Callers depend upon this behavior and assume that
4516 * we leave everything unchanged if we fail.
4518 skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4519 if (skb == NULL)
4520 return -ENOMEM;
4522 skb_reserve(skb, tp->rx_offset);
4524 mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4525 PCI_DMA_FROMDEVICE);
4526 if (pci_dma_mapping_error(tp->pdev, mapping)) {
4527 dev_kfree_skb(skb);
4528 return -EIO;
4531 map->skb = skb;
4532 dma_unmap_addr_set(map, mapping, mapping);
4534 desc->addr_hi = ((u64)mapping >> 32);
4535 desc->addr_lo = ((u64)mapping & 0xffffffff);
4537 return skb_size;
4540 /* We only need to move over in the address because the other
4541 * members of the RX descriptor are invariant. See notes above
4542 * tg3_alloc_rx_skb for full details.
4544 static void tg3_recycle_rx(struct tg3_napi *tnapi,
4545 struct tg3_rx_prodring_set *dpr,
4546 u32 opaque_key, int src_idx,
4547 u32 dest_idx_unmasked)
4549 struct tg3 *tp = tnapi->tp;
4550 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4551 struct ring_info *src_map, *dest_map;
4552 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
4553 int dest_idx;
4555 switch (opaque_key) {
4556 case RXD_OPAQUE_RING_STD:
4557 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4558 dest_desc = &dpr->rx_std[dest_idx];
4559 dest_map = &dpr->rx_std_buffers[dest_idx];
4560 src_desc = &spr->rx_std[src_idx];
4561 src_map = &spr->rx_std_buffers[src_idx];
4562 break;
4564 case RXD_OPAQUE_RING_JUMBO:
4565 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4566 dest_desc = &dpr->rx_jmb[dest_idx].std;
4567 dest_map = &dpr->rx_jmb_buffers[dest_idx];
4568 src_desc = &spr->rx_jmb[src_idx].std;
4569 src_map = &spr->rx_jmb_buffers[src_idx];
4570 break;
4572 default:
4573 return;
4576 dest_map->skb = src_map->skb;
4577 dma_unmap_addr_set(dest_map, mapping,
4578 dma_unmap_addr(src_map, mapping));
4579 dest_desc->addr_hi = src_desc->addr_hi;
4580 dest_desc->addr_lo = src_desc->addr_lo;
4582 /* Ensure that the update to the skb happens after the physical
4583 * addresses have been transferred to the new BD location.
4585 smp_wmb();
4587 src_map->skb = NULL;
4590 /* The RX ring scheme is composed of multiple rings which post fresh
4591 * buffers to the chip, and one special ring the chip uses to report
4592 * status back to the host.
4594 * The special ring reports the status of received packets to the
4595 * host. The chip does not write into the original descriptor the
4596 * RX buffer was obtained from. The chip simply takes the original
4597 * descriptor as provided by the host, updates the status and length
4598 * field, then writes this into the next status ring entry.
4600 * Each ring the host uses to post buffers to the chip is described
4601 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
4602 * it is first placed into the on-chip ram. When the packet's length
4603 * is known, it walks down the TG3_BDINFO entries to select the ring.
4604 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4605 * which is within the range of the new packet's length is chosen.
4607 * The "separate ring for rx status" scheme may sound queer, but it makes
4608 * sense from a cache coherency perspective. If only the host writes
4609 * to the buffer post rings, and only the chip writes to the rx status
4610 * rings, then cache lines never move beyond shared-modified state.
4611 * If both the host and chip were to write into the same ring, cache line
4612 * eviction could occur since both entities want it in an exclusive state.
4614 static int tg3_rx(struct tg3_napi *tnapi, int budget)
4616 struct tg3 *tp = tnapi->tp;
4617 u32 work_mask, rx_std_posted = 0;
4618 u32 std_prod_idx, jmb_prod_idx;
4619 u32 sw_idx = tnapi->rx_rcb_ptr;
4620 u16 hw_idx;
4621 int received;
4622 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
4624 hw_idx = *(tnapi->rx_rcb_prod_idx);
4626 * We need to order the read of hw_idx and the read of
4627 * the opaque cookie.
4629 rmb();
4630 work_mask = 0;
4631 received = 0;
4632 std_prod_idx = tpr->rx_std_prod_idx;
4633 jmb_prod_idx = tpr->rx_jmb_prod_idx;
4634 while (sw_idx != hw_idx && budget > 0) {
4635 struct ring_info *ri;
4636 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
4637 unsigned int len;
4638 struct sk_buff *skb;
4639 dma_addr_t dma_addr;
4640 u32 opaque_key, desc_idx, *post_ptr;
4641 bool hw_vlan __maybe_unused = false;
4642 u16 vtag __maybe_unused = 0;
4644 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4645 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4646 if (opaque_key == RXD_OPAQUE_RING_STD) {
4647 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
4648 dma_addr = dma_unmap_addr(ri, mapping);
4649 skb = ri->skb;
4650 post_ptr = &std_prod_idx;
4651 rx_std_posted++;
4652 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4653 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
4654 dma_addr = dma_unmap_addr(ri, mapping);
4655 skb = ri->skb;
4656 post_ptr = &jmb_prod_idx;
4657 } else
4658 goto next_pkt_nopost;
4660 work_mask |= opaque_key;
4662 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4663 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4664 drop_it:
4665 tg3_recycle_rx(tnapi, tpr, opaque_key,
4666 desc_idx, *post_ptr);
4667 drop_it_no_recycle:
4668 /* Other statistics kept track of by card. */
4669 tp->net_stats.rx_dropped++;
4670 goto next_pkt;
4673 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4674 ETH_FCS_LEN;
4676 if (len > TG3_RX_COPY_THRESH(tp)) {
4677 int skb_size;
4679 skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
4680 *post_ptr);
4681 if (skb_size < 0)
4682 goto drop_it;
4684 pci_unmap_single(tp->pdev, dma_addr, skb_size,
4685 PCI_DMA_FROMDEVICE);
4687 /* Ensure that the update to the skb happens
4688 * after the usage of the old DMA mapping.
4690 smp_wmb();
4692 ri->skb = NULL;
4694 skb_put(skb, len);
4695 } else {
4696 struct sk_buff *copy_skb;
4698 tg3_recycle_rx(tnapi, tpr, opaque_key,
4699 desc_idx, *post_ptr);
4701 copy_skb = netdev_alloc_skb(tp->dev, len + VLAN_HLEN +
4702 TG3_RAW_IP_ALIGN);
4703 if (copy_skb == NULL)
4704 goto drop_it_no_recycle;
4706 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN + VLAN_HLEN);
4707 skb_put(copy_skb, len);
4708 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4709 skb_copy_from_linear_data(skb, copy_skb->data, len);
4710 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4712 /* We'll reuse the original ring buffer. */
4713 skb = copy_skb;
4716 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
4717 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4718 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4719 >> RXD_TCPCSUM_SHIFT) == 0xffff))
4720 skb->ip_summed = CHECKSUM_UNNECESSARY;
4721 else
4722 skb_checksum_none_assert(skb);
4724 skb->protocol = eth_type_trans(skb, tp->dev);
4726 if (len > (tp->dev->mtu + ETH_HLEN) &&
4727 skb->protocol != htons(ETH_P_8021Q)) {
4728 dev_kfree_skb(skb);
4729 goto next_pkt;
4732 if (desc->type_flags & RXD_FLAG_VLAN &&
4733 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG)) {
4734 vtag = desc->err_vlan & RXD_VLAN_MASK;
4735 #if TG3_VLAN_TAG_USED
4736 if (tp->vlgrp)
4737 hw_vlan = true;
4738 else
4739 #endif
4741 struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
4742 __skb_push(skb, VLAN_HLEN);
4744 memmove(ve, skb->data + VLAN_HLEN,
4745 ETH_ALEN * 2);
4746 ve->h_vlan_proto = htons(ETH_P_8021Q);
4747 ve->h_vlan_TCI = htons(vtag);
4751 #if TG3_VLAN_TAG_USED
4752 if (hw_vlan)
4753 vlan_gro_receive(&tnapi->napi, tp->vlgrp, vtag, skb);
4754 else
4755 #endif
4756 napi_gro_receive(&tnapi->napi, skb);
4758 received++;
4759 budget--;
4761 next_pkt:
4762 (*post_ptr)++;
4764 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4765 tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
4766 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
4767 tpr->rx_std_prod_idx);
4768 work_mask &= ~RXD_OPAQUE_RING_STD;
4769 rx_std_posted = 0;
4771 next_pkt_nopost:
4772 sw_idx++;
4773 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
4775 /* Refresh hw_idx to see if there is new work */
4776 if (sw_idx == hw_idx) {
4777 hw_idx = *(tnapi->rx_rcb_prod_idx);
4778 rmb();
4782 /* ACK the status ring. */
4783 tnapi->rx_rcb_ptr = sw_idx;
4784 tw32_rx_mbox(tnapi->consmbox, sw_idx);
4786 /* Refill RX ring(s). */
4787 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)) {
4788 if (work_mask & RXD_OPAQUE_RING_STD) {
4789 tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
4790 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
4791 tpr->rx_std_prod_idx);
4793 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4794 tpr->rx_jmb_prod_idx = jmb_prod_idx %
4795 TG3_RX_JUMBO_RING_SIZE;
4796 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
4797 tpr->rx_jmb_prod_idx);
4799 mmiowb();
4800 } else if (work_mask) {
4801 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
4802 * updated before the producer indices can be updated.
4804 smp_wmb();
4806 tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
4807 tpr->rx_jmb_prod_idx = jmb_prod_idx % TG3_RX_JUMBO_RING_SIZE;
4809 if (tnapi != &tp->napi[1])
4810 napi_schedule(&tp->napi[1].napi);
4813 return received;
4816 static void tg3_poll_link(struct tg3 *tp)
4818 /* handle link change and other phy events */
4819 if (!(tp->tg3_flags &
4820 (TG3_FLAG_USE_LINKCHG_REG |
4821 TG3_FLAG_POLL_SERDES))) {
4822 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
4824 if (sblk->status & SD_STATUS_LINK_CHG) {
4825 sblk->status = SD_STATUS_UPDATED |
4826 (sblk->status & ~SD_STATUS_LINK_CHG);
4827 spin_lock(&tp->lock);
4828 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
4829 tw32_f(MAC_STATUS,
4830 (MAC_STATUS_SYNC_CHANGED |
4831 MAC_STATUS_CFG_CHANGED |
4832 MAC_STATUS_MI_COMPLETION |
4833 MAC_STATUS_LNKSTATE_CHANGED));
4834 udelay(40);
4835 } else
4836 tg3_setup_phy(tp, 0);
4837 spin_unlock(&tp->lock);
4842 static int tg3_rx_prodring_xfer(struct tg3 *tp,
4843 struct tg3_rx_prodring_set *dpr,
4844 struct tg3_rx_prodring_set *spr)
4846 u32 si, di, cpycnt, src_prod_idx;
4847 int i, err = 0;
4849 while (1) {
4850 src_prod_idx = spr->rx_std_prod_idx;
4852 /* Make sure updates to the rx_std_buffers[] entries and the
4853 * standard producer index are seen in the correct order.
4855 smp_rmb();
4857 if (spr->rx_std_cons_idx == src_prod_idx)
4858 break;
4860 if (spr->rx_std_cons_idx < src_prod_idx)
4861 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
4862 else
4863 cpycnt = TG3_RX_RING_SIZE - spr->rx_std_cons_idx;
4865 cpycnt = min(cpycnt, TG3_RX_RING_SIZE - dpr->rx_std_prod_idx);
4867 si = spr->rx_std_cons_idx;
4868 di = dpr->rx_std_prod_idx;
4870 for (i = di; i < di + cpycnt; i++) {
4871 if (dpr->rx_std_buffers[i].skb) {
4872 cpycnt = i - di;
4873 err = -ENOSPC;
4874 break;
4878 if (!cpycnt)
4879 break;
4881 /* Ensure that updates to the rx_std_buffers ring and the
4882 * shadowed hardware producer ring from tg3_recycle_skb() are
4883 * ordered correctly WRT the skb check above.
4885 smp_rmb();
4887 memcpy(&dpr->rx_std_buffers[di],
4888 &spr->rx_std_buffers[si],
4889 cpycnt * sizeof(struct ring_info));
4891 for (i = 0; i < cpycnt; i++, di++, si++) {
4892 struct tg3_rx_buffer_desc *sbd, *dbd;
4893 sbd = &spr->rx_std[si];
4894 dbd = &dpr->rx_std[di];
4895 dbd->addr_hi = sbd->addr_hi;
4896 dbd->addr_lo = sbd->addr_lo;
4899 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) %
4900 TG3_RX_RING_SIZE;
4901 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) %
4902 TG3_RX_RING_SIZE;
4905 while (1) {
4906 src_prod_idx = spr->rx_jmb_prod_idx;
4908 /* Make sure updates to the rx_jmb_buffers[] entries and
4909 * the jumbo producer index are seen in the correct order.
4911 smp_rmb();
4913 if (spr->rx_jmb_cons_idx == src_prod_idx)
4914 break;
4916 if (spr->rx_jmb_cons_idx < src_prod_idx)
4917 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
4918 else
4919 cpycnt = TG3_RX_JUMBO_RING_SIZE - spr->rx_jmb_cons_idx;
4921 cpycnt = min(cpycnt,
4922 TG3_RX_JUMBO_RING_SIZE - dpr->rx_jmb_prod_idx);
4924 si = spr->rx_jmb_cons_idx;
4925 di = dpr->rx_jmb_prod_idx;
4927 for (i = di; i < di + cpycnt; i++) {
4928 if (dpr->rx_jmb_buffers[i].skb) {
4929 cpycnt = i - di;
4930 err = -ENOSPC;
4931 break;
4935 if (!cpycnt)
4936 break;
4938 /* Ensure that updates to the rx_jmb_buffers ring and the
4939 * shadowed hardware producer ring from tg3_recycle_skb() are
4940 * ordered correctly WRT the skb check above.
4942 smp_rmb();
4944 memcpy(&dpr->rx_jmb_buffers[di],
4945 &spr->rx_jmb_buffers[si],
4946 cpycnt * sizeof(struct ring_info));
4948 for (i = 0; i < cpycnt; i++, di++, si++) {
4949 struct tg3_rx_buffer_desc *sbd, *dbd;
4950 sbd = &spr->rx_jmb[si].std;
4951 dbd = &dpr->rx_jmb[di].std;
4952 dbd->addr_hi = sbd->addr_hi;
4953 dbd->addr_lo = sbd->addr_lo;
4956 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) %
4957 TG3_RX_JUMBO_RING_SIZE;
4958 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) %
4959 TG3_RX_JUMBO_RING_SIZE;
4962 return err;
4965 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
4967 struct tg3 *tp = tnapi->tp;
4969 /* run TX completion thread */
4970 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
4971 tg3_tx(tnapi);
4972 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4973 return work_done;
4976 /* run RX thread, within the bounds set by NAPI.
4977 * All RX "locking" is done by ensuring outside
4978 * code synchronizes with tg3->napi.poll()
4980 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
4981 work_done += tg3_rx(tnapi, budget - work_done);
4983 if ((tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) && tnapi == &tp->napi[1]) {
4984 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
4985 int i, err = 0;
4986 u32 std_prod_idx = dpr->rx_std_prod_idx;
4987 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
4989 for (i = 1; i < tp->irq_cnt; i++)
4990 err |= tg3_rx_prodring_xfer(tp, dpr,
4991 &tp->napi[i].prodring);
4993 wmb();
4995 if (std_prod_idx != dpr->rx_std_prod_idx)
4996 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
4997 dpr->rx_std_prod_idx);
4999 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5000 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5001 dpr->rx_jmb_prod_idx);
5003 mmiowb();
5005 if (err)
5006 tw32_f(HOSTCC_MODE, tp->coal_now);
5009 return work_done;
5012 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5014 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5015 struct tg3 *tp = tnapi->tp;
5016 int work_done = 0;
5017 struct tg3_hw_status *sblk = tnapi->hw_status;
5019 while (1) {
5020 work_done = tg3_poll_work(tnapi, work_done, budget);
5022 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
5023 goto tx_recovery;
5025 if (unlikely(work_done >= budget))
5026 break;
5028 /* tp->last_tag is used in tg3_int_reenable() below
5029 * to tell the hw how much work has been processed,
5030 * so we must read it before checking for more work.
5032 tnapi->last_tag = sblk->status_tag;
5033 tnapi->last_irq_tag = tnapi->last_tag;
5034 rmb();
5036 /* check for RX/TX work to do */
5037 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5038 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5039 napi_complete(napi);
5040 /* Reenable interrupts. */
5041 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5042 mmiowb();
5043 break;
5047 return work_done;
5049 tx_recovery:
5050 /* work_done is guaranteed to be less than budget. */
5051 napi_complete(napi);
5052 schedule_work(&tp->reset_task);
5053 return work_done;
5056 static int tg3_poll(struct napi_struct *napi, int budget)
5058 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5059 struct tg3 *tp = tnapi->tp;
5060 int work_done = 0;
5061 struct tg3_hw_status *sblk = tnapi->hw_status;
5063 while (1) {
5064 tg3_poll_link(tp);
5066 work_done = tg3_poll_work(tnapi, work_done, budget);
5068 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
5069 goto tx_recovery;
5071 if (unlikely(work_done >= budget))
5072 break;
5074 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
5075 /* tp->last_tag is used in tg3_int_reenable() below
5076 * to tell the hw how much work has been processed,
5077 * so we must read it before checking for more work.
5079 tnapi->last_tag = sblk->status_tag;
5080 tnapi->last_irq_tag = tnapi->last_tag;
5081 rmb();
5082 } else
5083 sblk->status &= ~SD_STATUS_UPDATED;
5085 if (likely(!tg3_has_work(tnapi))) {
5086 napi_complete(napi);
5087 tg3_int_reenable(tnapi);
5088 break;
5092 return work_done;
5094 tx_recovery:
5095 /* work_done is guaranteed to be less than budget. */
5096 napi_complete(napi);
5097 schedule_work(&tp->reset_task);
5098 return work_done;
5101 static void tg3_irq_quiesce(struct tg3 *tp)
5103 int i;
5105 BUG_ON(tp->irq_sync);
5107 tp->irq_sync = 1;
5108 smp_mb();
5110 for (i = 0; i < tp->irq_cnt; i++)
5111 synchronize_irq(tp->napi[i].irq_vec);
5114 static inline int tg3_irq_sync(struct tg3 *tp)
5116 return tp->irq_sync;
5119 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5120 * If irq_sync is non-zero, then the IRQ handler must be synchronized
5121 * with as well. Most of the time, this is not necessary except when
5122 * shutting down the device.
5124 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
5126 spin_lock_bh(&tp->lock);
5127 if (irq_sync)
5128 tg3_irq_quiesce(tp);
5131 static inline void tg3_full_unlock(struct tg3 *tp)
5133 spin_unlock_bh(&tp->lock);
5136 /* One-shot MSI handler - Chip automatically disables interrupt
5137 * after sending MSI so driver doesn't have to do it.
5139 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
5141 struct tg3_napi *tnapi = dev_id;
5142 struct tg3 *tp = tnapi->tp;
5144 prefetch(tnapi->hw_status);
5145 if (tnapi->rx_rcb)
5146 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5148 if (likely(!tg3_irq_sync(tp)))
5149 napi_schedule(&tnapi->napi);
5151 return IRQ_HANDLED;
5154 /* MSI ISR - No need to check for interrupt sharing and no need to
5155 * flush status block and interrupt mailbox. PCI ordering rules
5156 * guarantee that MSI will arrive after the status block.
5158 static irqreturn_t tg3_msi(int irq, void *dev_id)
5160 struct tg3_napi *tnapi = dev_id;
5161 struct tg3 *tp = tnapi->tp;
5163 prefetch(tnapi->hw_status);
5164 if (tnapi->rx_rcb)
5165 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5167 * Writing any value to intr-mbox-0 clears PCI INTA# and
5168 * chip-internal interrupt pending events.
5169 * Writing non-zero to intr-mbox-0 additional tells the
5170 * NIC to stop sending us irqs, engaging "in-intr-handler"
5171 * event coalescing.
5173 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5174 if (likely(!tg3_irq_sync(tp)))
5175 napi_schedule(&tnapi->napi);
5177 return IRQ_RETVAL(1);
5180 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
5182 struct tg3_napi *tnapi = dev_id;
5183 struct tg3 *tp = tnapi->tp;
5184 struct tg3_hw_status *sblk = tnapi->hw_status;
5185 unsigned int handled = 1;
5187 /* In INTx mode, it is possible for the interrupt to arrive at
5188 * the CPU before the status block posted prior to the interrupt.
5189 * Reading the PCI State register will confirm whether the
5190 * interrupt is ours and will flush the status block.
5192 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
5193 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
5194 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5195 handled = 0;
5196 goto out;
5201 * Writing any value to intr-mbox-0 clears PCI INTA# and
5202 * chip-internal interrupt pending events.
5203 * Writing non-zero to intr-mbox-0 additional tells the
5204 * NIC to stop sending us irqs, engaging "in-intr-handler"
5205 * event coalescing.
5207 * Flush the mailbox to de-assert the IRQ immediately to prevent
5208 * spurious interrupts. The flush impacts performance but
5209 * excessive spurious interrupts can be worse in some cases.
5211 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5212 if (tg3_irq_sync(tp))
5213 goto out;
5214 sblk->status &= ~SD_STATUS_UPDATED;
5215 if (likely(tg3_has_work(tnapi))) {
5216 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5217 napi_schedule(&tnapi->napi);
5218 } else {
5219 /* No work, shared interrupt perhaps? re-enable
5220 * interrupts, and flush that PCI write
5222 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
5223 0x00000000);
5225 out:
5226 return IRQ_RETVAL(handled);
5229 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
5231 struct tg3_napi *tnapi = dev_id;
5232 struct tg3 *tp = tnapi->tp;
5233 struct tg3_hw_status *sblk = tnapi->hw_status;
5234 unsigned int handled = 1;
5236 /* In INTx mode, it is possible for the interrupt to arrive at
5237 * the CPU before the status block posted prior to the interrupt.
5238 * Reading the PCI State register will confirm whether the
5239 * interrupt is ours and will flush the status block.
5241 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
5242 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
5243 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5244 handled = 0;
5245 goto out;
5250 * writing any value to intr-mbox-0 clears PCI INTA# and
5251 * chip-internal interrupt pending events.
5252 * writing non-zero to intr-mbox-0 additional tells the
5253 * NIC to stop sending us irqs, engaging "in-intr-handler"
5254 * event coalescing.
5256 * Flush the mailbox to de-assert the IRQ immediately to prevent
5257 * spurious interrupts. The flush impacts performance but
5258 * excessive spurious interrupts can be worse in some cases.
5260 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5263 * In a shared interrupt configuration, sometimes other devices'
5264 * interrupts will scream. We record the current status tag here
5265 * so that the above check can report that the screaming interrupts
5266 * are unhandled. Eventually they will be silenced.
5268 tnapi->last_irq_tag = sblk->status_tag;
5270 if (tg3_irq_sync(tp))
5271 goto out;
5273 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5275 napi_schedule(&tnapi->napi);
5277 out:
5278 return IRQ_RETVAL(handled);
5281 /* ISR for interrupt test */
5282 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
5284 struct tg3_napi *tnapi = dev_id;
5285 struct tg3 *tp = tnapi->tp;
5286 struct tg3_hw_status *sblk = tnapi->hw_status;
5288 if ((sblk->status & SD_STATUS_UPDATED) ||
5289 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5290 tg3_disable_ints(tp);
5291 return IRQ_RETVAL(1);
5293 return IRQ_RETVAL(0);
5296 static int tg3_init_hw(struct tg3 *, int);
5297 static int tg3_halt(struct tg3 *, int, int);
5299 /* Restart hardware after configuration changes, self-test, etc.
5300 * Invoked with tp->lock held.
5302 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
5303 __releases(tp->lock)
5304 __acquires(tp->lock)
5306 int err;
5308 err = tg3_init_hw(tp, reset_phy);
5309 if (err) {
5310 netdev_err(tp->dev,
5311 "Failed to re-initialize device, aborting\n");
5312 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5313 tg3_full_unlock(tp);
5314 del_timer_sync(&tp->timer);
5315 tp->irq_sync = 0;
5316 tg3_napi_enable(tp);
5317 dev_close(tp->dev);
5318 tg3_full_lock(tp, 0);
5320 return err;
5323 #ifdef CONFIG_NET_POLL_CONTROLLER
5324 static void tg3_poll_controller(struct net_device *dev)
5326 int i;
5327 struct tg3 *tp = netdev_priv(dev);
5329 for (i = 0; i < tp->irq_cnt; i++)
5330 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
5332 #endif
5334 static void tg3_reset_task(struct work_struct *work)
5336 struct tg3 *tp = container_of(work, struct tg3, reset_task);
5337 int err;
5338 unsigned int restart_timer;
5340 tg3_full_lock(tp, 0);
5342 if (!netif_running(tp->dev)) {
5343 tg3_full_unlock(tp);
5344 return;
5347 tg3_full_unlock(tp);
5349 tg3_phy_stop(tp);
5351 tg3_netif_stop(tp);
5353 tg3_full_lock(tp, 1);
5355 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
5356 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
5358 if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
5359 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5360 tp->write32_rx_mbox = tg3_write_flush_reg32;
5361 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
5362 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
5365 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5366 err = tg3_init_hw(tp, 1);
5367 if (err)
5368 goto out;
5370 tg3_netif_start(tp);
5372 if (restart_timer)
5373 mod_timer(&tp->timer, jiffies + 1);
5375 out:
5376 tg3_full_unlock(tp);
5378 if (!err)
5379 tg3_phy_start(tp);
5382 static void tg3_dump_short_state(struct tg3 *tp)
5384 netdev_err(tp->dev, "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
5385 tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
5386 netdev_err(tp->dev, "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
5387 tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
5390 static void tg3_tx_timeout(struct net_device *dev)
5392 struct tg3 *tp = netdev_priv(dev);
5394 if (netif_msg_tx_err(tp)) {
5395 netdev_err(dev, "transmit timed out, resetting\n");
5396 tg3_dump_short_state(tp);
5399 schedule_work(&tp->reset_task);
5402 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5403 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5405 u32 base = (u32) mapping & 0xffffffff;
5407 return ((base > 0xffffdcc0) &&
5408 (base + len + 8 < base));
5411 /* Test for DMA addresses > 40-bit */
5412 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5413 int len)
5415 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5416 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
5417 return (((u64) mapping + len) > DMA_BIT_MASK(40));
5418 return 0;
5419 #else
5420 return 0;
5421 #endif
5424 static void tg3_set_txd(struct tg3_napi *, int, dma_addr_t, int, u32, u32);
5426 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5427 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5428 struct sk_buff *skb, u32 last_plus_one,
5429 u32 *start, u32 base_flags, u32 mss)
5431 struct tg3 *tp = tnapi->tp;
5432 struct sk_buff *new_skb;
5433 dma_addr_t new_addr = 0;
5434 u32 entry = *start;
5435 int i, ret = 0;
5437 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
5438 new_skb = skb_copy(skb, GFP_ATOMIC);
5439 else {
5440 int more_headroom = 4 - ((unsigned long)skb->data & 3);
5442 new_skb = skb_copy_expand(skb,
5443 skb_headroom(skb) + more_headroom,
5444 skb_tailroom(skb), GFP_ATOMIC);
5447 if (!new_skb) {
5448 ret = -1;
5449 } else {
5450 /* New SKB is guaranteed to be linear. */
5451 entry = *start;
5452 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
5453 PCI_DMA_TODEVICE);
5454 /* Make sure the mapping succeeded */
5455 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
5456 ret = -1;
5457 dev_kfree_skb(new_skb);
5458 new_skb = NULL;
5460 /* Make sure new skb does not cross any 4G boundaries.
5461 * Drop the packet if it does.
5463 } else if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
5464 tg3_4g_overflow_test(new_addr, new_skb->len)) {
5465 pci_unmap_single(tp->pdev, new_addr, new_skb->len,
5466 PCI_DMA_TODEVICE);
5467 ret = -1;
5468 dev_kfree_skb(new_skb);
5469 new_skb = NULL;
5470 } else {
5471 tg3_set_txd(tnapi, entry, new_addr, new_skb->len,
5472 base_flags, 1 | (mss << 1));
5473 *start = NEXT_TX(entry);
5477 /* Now clean up the sw ring entries. */
5478 i = 0;
5479 while (entry != last_plus_one) {
5480 int len;
5482 if (i == 0)
5483 len = skb_headlen(skb);
5484 else
5485 len = skb_shinfo(skb)->frags[i-1].size;
5487 pci_unmap_single(tp->pdev,
5488 dma_unmap_addr(&tnapi->tx_buffers[entry],
5489 mapping),
5490 len, PCI_DMA_TODEVICE);
5491 if (i == 0) {
5492 tnapi->tx_buffers[entry].skb = new_skb;
5493 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5494 new_addr);
5495 } else {
5496 tnapi->tx_buffers[entry].skb = NULL;
5498 entry = NEXT_TX(entry);
5499 i++;
5502 dev_kfree_skb(skb);
5504 return ret;
5507 static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5508 dma_addr_t mapping, int len, u32 flags,
5509 u32 mss_and_is_end)
5511 struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
5512 int is_end = (mss_and_is_end & 0x1);
5513 u32 mss = (mss_and_is_end >> 1);
5514 u32 vlan_tag = 0;
5516 if (is_end)
5517 flags |= TXD_FLAG_END;
5518 if (flags & TXD_FLAG_VLAN) {
5519 vlan_tag = flags >> 16;
5520 flags &= 0xffff;
5522 vlan_tag |= (mss << TXD_MSS_SHIFT);
5524 txd->addr_hi = ((u64) mapping >> 32);
5525 txd->addr_lo = ((u64) mapping & 0xffffffff);
5526 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
5527 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
5530 /* hard_start_xmit for devices that don't have any bugs and
5531 * support TG3_FLG2_HW_TSO_2 and TG3_FLG2_HW_TSO_3 only.
5533 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5534 struct net_device *dev)
5536 struct tg3 *tp = netdev_priv(dev);
5537 u32 len, entry, base_flags, mss;
5538 dma_addr_t mapping;
5539 struct tg3_napi *tnapi;
5540 struct netdev_queue *txq;
5541 unsigned int i, last;
5543 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5544 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5545 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
5546 tnapi++;
5548 /* We are running in BH disabled context with netif_tx_lock
5549 * and TX reclaim runs via tp->napi.poll inside of a software
5550 * interrupt. Furthermore, IRQ processing runs lockless so we have
5551 * no IRQ context deadlocks to worry about either. Rejoice!
5553 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5554 if (!netif_tx_queue_stopped(txq)) {
5555 netif_tx_stop_queue(txq);
5557 /* This is a hard error, log it. */
5558 netdev_err(dev,
5559 "BUG! Tx Ring full when queue awake!\n");
5561 return NETDEV_TX_BUSY;
5564 entry = tnapi->tx_prod;
5565 base_flags = 0;
5566 mss = skb_shinfo(skb)->gso_size;
5567 if (mss) {
5568 int tcp_opt_len, ip_tcp_len;
5569 u32 hdrlen;
5571 if (skb_header_cloned(skb) &&
5572 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5573 dev_kfree_skb(skb);
5574 goto out_unlock;
5577 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
5578 hdrlen = skb_headlen(skb) - ETH_HLEN;
5579 else {
5580 struct iphdr *iph = ip_hdr(skb);
5582 tcp_opt_len = tcp_optlen(skb);
5583 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5585 iph->check = 0;
5586 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5587 hdrlen = ip_tcp_len + tcp_opt_len;
5590 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) {
5591 mss |= (hdrlen & 0xc) << 12;
5592 if (hdrlen & 0x10)
5593 base_flags |= 0x00000010;
5594 base_flags |= (hdrlen & 0x3e0) << 5;
5595 } else
5596 mss |= hdrlen << 9;
5598 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5599 TXD_FLAG_CPU_POST_DMA);
5601 tcp_hdr(skb)->check = 0;
5603 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
5604 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5607 #if TG3_VLAN_TAG_USED
5608 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
5609 base_flags |= (TXD_FLAG_VLAN |
5610 (vlan_tx_tag_get(skb) << 16));
5611 #endif
5613 len = skb_headlen(skb);
5615 /* Queue skb data, a.k.a. the main skb fragment. */
5616 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5617 if (pci_dma_mapping_error(tp->pdev, mapping)) {
5618 dev_kfree_skb(skb);
5619 goto out_unlock;
5622 tnapi->tx_buffers[entry].skb = skb;
5623 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
5625 if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
5626 !mss && skb->len > ETH_DATA_LEN)
5627 base_flags |= TXD_FLAG_JMB_PKT;
5629 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
5630 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5632 entry = NEXT_TX(entry);
5634 /* Now loop through additional data fragments, and queue them. */
5635 if (skb_shinfo(skb)->nr_frags > 0) {
5636 last = skb_shinfo(skb)->nr_frags - 1;
5637 for (i = 0; i <= last; i++) {
5638 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5640 len = frag->size;
5641 mapping = pci_map_page(tp->pdev,
5642 frag->page,
5643 frag->page_offset,
5644 len, PCI_DMA_TODEVICE);
5645 if (pci_dma_mapping_error(tp->pdev, mapping))
5646 goto dma_error;
5648 tnapi->tx_buffers[entry].skb = NULL;
5649 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5650 mapping);
5652 tg3_set_txd(tnapi, entry, mapping, len,
5653 base_flags, (i == last) | (mss << 1));
5655 entry = NEXT_TX(entry);
5659 /* Packets are ready, update Tx producer idx local and on card. */
5660 tw32_tx_mbox(tnapi->prodmbox, entry);
5662 tnapi->tx_prod = entry;
5663 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
5664 netif_tx_stop_queue(txq);
5666 /* netif_tx_stop_queue() must be done before checking
5667 * checking tx index in tg3_tx_avail() below, because in
5668 * tg3_tx(), we update tx index before checking for
5669 * netif_tx_queue_stopped().
5671 smp_mb();
5672 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
5673 netif_tx_wake_queue(txq);
5676 out_unlock:
5677 mmiowb();
5679 return NETDEV_TX_OK;
5681 dma_error:
5682 last = i;
5683 entry = tnapi->tx_prod;
5684 tnapi->tx_buffers[entry].skb = NULL;
5685 pci_unmap_single(tp->pdev,
5686 dma_unmap_addr(&tnapi->tx_buffers[entry], mapping),
5687 skb_headlen(skb),
5688 PCI_DMA_TODEVICE);
5689 for (i = 0; i <= last; i++) {
5690 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5691 entry = NEXT_TX(entry);
5693 pci_unmap_page(tp->pdev,
5694 dma_unmap_addr(&tnapi->tx_buffers[entry],
5695 mapping),
5696 frag->size, PCI_DMA_TODEVICE);
5699 dev_kfree_skb(skb);
5700 return NETDEV_TX_OK;
5703 static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *,
5704 struct net_device *);
5706 /* Use GSO to workaround a rare TSO bug that may be triggered when the
5707 * TSO header is greater than 80 bytes.
5709 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
5711 struct sk_buff *segs, *nskb;
5712 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
5714 /* Estimate the number of fragments in the worst case */
5715 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
5716 netif_stop_queue(tp->dev);
5718 /* netif_tx_stop_queue() must be done before checking
5719 * checking tx index in tg3_tx_avail() below, because in
5720 * tg3_tx(), we update tx index before checking for
5721 * netif_tx_queue_stopped().
5723 smp_mb();
5724 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
5725 return NETDEV_TX_BUSY;
5727 netif_wake_queue(tp->dev);
5730 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
5731 if (IS_ERR(segs))
5732 goto tg3_tso_bug_end;
5734 do {
5735 nskb = segs;
5736 segs = segs->next;
5737 nskb->next = NULL;
5738 tg3_start_xmit_dma_bug(nskb, tp->dev);
5739 } while (segs);
5741 tg3_tso_bug_end:
5742 dev_kfree_skb(skb);
5744 return NETDEV_TX_OK;
5747 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
5748 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
5750 static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5751 struct net_device *dev)
5753 struct tg3 *tp = netdev_priv(dev);
5754 u32 len, entry, base_flags, mss;
5755 int would_hit_hwbug;
5756 dma_addr_t mapping;
5757 struct tg3_napi *tnapi;
5758 struct netdev_queue *txq;
5759 unsigned int i, last;
5761 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5762 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5763 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
5764 tnapi++;
5766 /* We are running in BH disabled context with netif_tx_lock
5767 * and TX reclaim runs via tp->napi.poll inside of a software
5768 * interrupt. Furthermore, IRQ processing runs lockless so we have
5769 * no IRQ context deadlocks to worry about either. Rejoice!
5771 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5772 if (!netif_tx_queue_stopped(txq)) {
5773 netif_tx_stop_queue(txq);
5775 /* This is a hard error, log it. */
5776 netdev_err(dev,
5777 "BUG! Tx Ring full when queue awake!\n");
5779 return NETDEV_TX_BUSY;
5782 entry = tnapi->tx_prod;
5783 base_flags = 0;
5784 if (skb->ip_summed == CHECKSUM_PARTIAL)
5785 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5787 mss = skb_shinfo(skb)->gso_size;
5788 if (mss) {
5789 struct iphdr *iph;
5790 u32 tcp_opt_len, hdr_len;
5792 if (skb_header_cloned(skb) &&
5793 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5794 dev_kfree_skb(skb);
5795 goto out_unlock;
5798 iph = ip_hdr(skb);
5799 tcp_opt_len = tcp_optlen(skb);
5801 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5802 hdr_len = skb_headlen(skb) - ETH_HLEN;
5803 } else {
5804 u32 ip_tcp_len;
5806 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5807 hdr_len = ip_tcp_len + tcp_opt_len;
5809 iph->check = 0;
5810 iph->tot_len = htons(mss + hdr_len);
5813 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
5814 (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
5815 return tg3_tso_bug(tp, skb);
5817 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5818 TXD_FLAG_CPU_POST_DMA);
5820 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
5821 tcp_hdr(skb)->check = 0;
5822 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
5823 } else
5824 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5825 iph->daddr, 0,
5826 IPPROTO_TCP,
5829 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) {
5830 mss |= (hdr_len & 0xc) << 12;
5831 if (hdr_len & 0x10)
5832 base_flags |= 0x00000010;
5833 base_flags |= (hdr_len & 0x3e0) << 5;
5834 } else if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)
5835 mss |= hdr_len << 9;
5836 else if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_1) ||
5837 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5838 if (tcp_opt_len || iph->ihl > 5) {
5839 int tsflags;
5841 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5842 mss |= (tsflags << 11);
5844 } else {
5845 if (tcp_opt_len || iph->ihl > 5) {
5846 int tsflags;
5848 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5849 base_flags |= tsflags << 12;
5853 #if TG3_VLAN_TAG_USED
5854 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
5855 base_flags |= (TXD_FLAG_VLAN |
5856 (vlan_tx_tag_get(skb) << 16));
5857 #endif
5859 if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
5860 !mss && skb->len > ETH_DATA_LEN)
5861 base_flags |= TXD_FLAG_JMB_PKT;
5863 len = skb_headlen(skb);
5865 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5866 if (pci_dma_mapping_error(tp->pdev, mapping)) {
5867 dev_kfree_skb(skb);
5868 goto out_unlock;
5871 tnapi->tx_buffers[entry].skb = skb;
5872 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
5874 would_hit_hwbug = 0;
5876 if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) && len <= 8)
5877 would_hit_hwbug = 1;
5879 if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
5880 tg3_4g_overflow_test(mapping, len))
5881 would_hit_hwbug = 1;
5883 if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) &&
5884 tg3_40bit_overflow_test(tp, mapping, len))
5885 would_hit_hwbug = 1;
5887 if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
5888 would_hit_hwbug = 1;
5890 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
5891 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5893 entry = NEXT_TX(entry);
5895 /* Now loop through additional data fragments, and queue them. */
5896 if (skb_shinfo(skb)->nr_frags > 0) {
5897 last = skb_shinfo(skb)->nr_frags - 1;
5898 for (i = 0; i <= last; i++) {
5899 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5901 len = frag->size;
5902 mapping = pci_map_page(tp->pdev,
5903 frag->page,
5904 frag->page_offset,
5905 len, PCI_DMA_TODEVICE);
5907 tnapi->tx_buffers[entry].skb = NULL;
5908 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5909 mapping);
5910 if (pci_dma_mapping_error(tp->pdev, mapping))
5911 goto dma_error;
5913 if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) &&
5914 len <= 8)
5915 would_hit_hwbug = 1;
5917 if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
5918 tg3_4g_overflow_test(mapping, len))
5919 would_hit_hwbug = 1;
5921 if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) &&
5922 tg3_40bit_overflow_test(tp, mapping, len))
5923 would_hit_hwbug = 1;
5925 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5926 tg3_set_txd(tnapi, entry, mapping, len,
5927 base_flags, (i == last)|(mss << 1));
5928 else
5929 tg3_set_txd(tnapi, entry, mapping, len,
5930 base_flags, (i == last));
5932 entry = NEXT_TX(entry);
5936 if (would_hit_hwbug) {
5937 u32 last_plus_one = entry;
5938 u32 start;
5940 start = entry - 1 - skb_shinfo(skb)->nr_frags;
5941 start &= (TG3_TX_RING_SIZE - 1);
5943 /* If the workaround fails due to memory/mapping
5944 * failure, silently drop this packet.
5946 if (tigon3_dma_hwbug_workaround(tnapi, skb, last_plus_one,
5947 &start, base_flags, mss))
5948 goto out_unlock;
5950 entry = start;
5953 /* Packets are ready, update Tx producer idx local and on card. */
5954 tw32_tx_mbox(tnapi->prodmbox, entry);
5956 tnapi->tx_prod = entry;
5957 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
5958 netif_tx_stop_queue(txq);
5960 /* netif_tx_stop_queue() must be done before checking
5961 * checking tx index in tg3_tx_avail() below, because in
5962 * tg3_tx(), we update tx index before checking for
5963 * netif_tx_queue_stopped().
5965 smp_mb();
5966 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
5967 netif_tx_wake_queue(txq);
5970 out_unlock:
5971 mmiowb();
5973 return NETDEV_TX_OK;
5975 dma_error:
5976 last = i;
5977 entry = tnapi->tx_prod;
5978 tnapi->tx_buffers[entry].skb = NULL;
5979 pci_unmap_single(tp->pdev,
5980 dma_unmap_addr(&tnapi->tx_buffers[entry], mapping),
5981 skb_headlen(skb),
5982 PCI_DMA_TODEVICE);
5983 for (i = 0; i <= last; i++) {
5984 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5985 entry = NEXT_TX(entry);
5987 pci_unmap_page(tp->pdev,
5988 dma_unmap_addr(&tnapi->tx_buffers[entry],
5989 mapping),
5990 frag->size, PCI_DMA_TODEVICE);
5993 dev_kfree_skb(skb);
5994 return NETDEV_TX_OK;
5997 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
5998 int new_mtu)
6000 dev->mtu = new_mtu;
6002 if (new_mtu > ETH_DATA_LEN) {
6003 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
6004 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
6005 ethtool_op_set_tso(dev, 0);
6006 } else {
6007 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
6009 } else {
6010 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
6011 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
6012 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
6016 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
6018 struct tg3 *tp = netdev_priv(dev);
6019 int err;
6021 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
6022 return -EINVAL;
6024 if (!netif_running(dev)) {
6025 /* We'll just catch it later when the
6026 * device is up'd.
6028 tg3_set_mtu(dev, tp, new_mtu);
6029 return 0;
6032 tg3_phy_stop(tp);
6034 tg3_netif_stop(tp);
6036 tg3_full_lock(tp, 1);
6038 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6040 tg3_set_mtu(dev, tp, new_mtu);
6042 err = tg3_restart_hw(tp, 0);
6044 if (!err)
6045 tg3_netif_start(tp);
6047 tg3_full_unlock(tp);
6049 if (!err)
6050 tg3_phy_start(tp);
6052 return err;
6055 static void tg3_rx_prodring_free(struct tg3 *tp,
6056 struct tg3_rx_prodring_set *tpr)
6058 int i;
6060 if (tpr != &tp->napi[0].prodring) {
6061 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
6062 i = (i + 1) % TG3_RX_RING_SIZE)
6063 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6064 tp->rx_pkt_map_sz);
6066 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
6067 for (i = tpr->rx_jmb_cons_idx;
6068 i != tpr->rx_jmb_prod_idx;
6069 i = (i + 1) % TG3_RX_JUMBO_RING_SIZE) {
6070 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6071 TG3_RX_JMB_MAP_SZ);
6075 return;
6078 for (i = 0; i < TG3_RX_RING_SIZE; i++)
6079 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6080 tp->rx_pkt_map_sz);
6082 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
6083 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++)
6084 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6085 TG3_RX_JMB_MAP_SZ);
6089 /* Initialize rx rings for packet processing.
6091 * The chip has been shut down and the driver detached from
6092 * the networking, so no interrupts or new tx packets will
6093 * end up in the driver. tp->{tx,}lock are held and thus
6094 * we may not sleep.
6096 static int tg3_rx_prodring_alloc(struct tg3 *tp,
6097 struct tg3_rx_prodring_set *tpr)
6099 u32 i, rx_pkt_dma_sz;
6101 tpr->rx_std_cons_idx = 0;
6102 tpr->rx_std_prod_idx = 0;
6103 tpr->rx_jmb_cons_idx = 0;
6104 tpr->rx_jmb_prod_idx = 0;
6106 if (tpr != &tp->napi[0].prodring) {
6107 memset(&tpr->rx_std_buffers[0], 0, TG3_RX_STD_BUFF_RING_SIZE);
6108 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE)
6109 memset(&tpr->rx_jmb_buffers[0], 0,
6110 TG3_RX_JMB_BUFF_RING_SIZE);
6111 goto done;
6114 /* Zero out all descriptors. */
6115 memset(tpr->rx_std, 0, TG3_RX_RING_BYTES);
6117 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
6118 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
6119 tp->dev->mtu > ETH_DATA_LEN)
6120 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
6121 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
6123 /* Initialize invariants of the rings, we only set this
6124 * stuff once. This works because the card does not
6125 * write into the rx buffer posting rings.
6127 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
6128 struct tg3_rx_buffer_desc *rxd;
6130 rxd = &tpr->rx_std[i];
6131 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
6132 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
6133 rxd->opaque = (RXD_OPAQUE_RING_STD |
6134 (i << RXD_OPAQUE_INDEX_SHIFT));
6137 /* Now allocate fresh SKBs for each rx ring. */
6138 for (i = 0; i < tp->rx_pending; i++) {
6139 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
6140 netdev_warn(tp->dev,
6141 "Using a smaller RX standard ring. Only "
6142 "%d out of %d buffers were allocated "
6143 "successfully\n", i, tp->rx_pending);
6144 if (i == 0)
6145 goto initfail;
6146 tp->rx_pending = i;
6147 break;
6151 if (!(tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE))
6152 goto done;
6154 memset(tpr->rx_jmb, 0, TG3_RX_JUMBO_RING_BYTES);
6156 if (!(tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE))
6157 goto done;
6159 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
6160 struct tg3_rx_buffer_desc *rxd;
6162 rxd = &tpr->rx_jmb[i].std;
6163 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
6164 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
6165 RXD_FLAG_JUMBO;
6166 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
6167 (i << RXD_OPAQUE_INDEX_SHIFT));
6170 for (i = 0; i < tp->rx_jumbo_pending; i++) {
6171 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
6172 netdev_warn(tp->dev,
6173 "Using a smaller RX jumbo ring. Only %d "
6174 "out of %d buffers were allocated "
6175 "successfully\n", i, tp->rx_jumbo_pending);
6176 if (i == 0)
6177 goto initfail;
6178 tp->rx_jumbo_pending = i;
6179 break;
6183 done:
6184 return 0;
6186 initfail:
6187 tg3_rx_prodring_free(tp, tpr);
6188 return -ENOMEM;
6191 static void tg3_rx_prodring_fini(struct tg3 *tp,
6192 struct tg3_rx_prodring_set *tpr)
6194 kfree(tpr->rx_std_buffers);
6195 tpr->rx_std_buffers = NULL;
6196 kfree(tpr->rx_jmb_buffers);
6197 tpr->rx_jmb_buffers = NULL;
6198 if (tpr->rx_std) {
6199 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
6200 tpr->rx_std, tpr->rx_std_mapping);
6201 tpr->rx_std = NULL;
6203 if (tpr->rx_jmb) {
6204 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
6205 tpr->rx_jmb, tpr->rx_jmb_mapping);
6206 tpr->rx_jmb = NULL;
6210 static int tg3_rx_prodring_init(struct tg3 *tp,
6211 struct tg3_rx_prodring_set *tpr)
6213 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE, GFP_KERNEL);
6214 if (!tpr->rx_std_buffers)
6215 return -ENOMEM;
6217 tpr->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
6218 &tpr->rx_std_mapping);
6219 if (!tpr->rx_std)
6220 goto err_out;
6222 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
6223 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE,
6224 GFP_KERNEL);
6225 if (!tpr->rx_jmb_buffers)
6226 goto err_out;
6228 tpr->rx_jmb = pci_alloc_consistent(tp->pdev,
6229 TG3_RX_JUMBO_RING_BYTES,
6230 &tpr->rx_jmb_mapping);
6231 if (!tpr->rx_jmb)
6232 goto err_out;
6235 return 0;
6237 err_out:
6238 tg3_rx_prodring_fini(tp, tpr);
6239 return -ENOMEM;
6242 /* Free up pending packets in all rx/tx rings.
6244 * The chip has been shut down and the driver detached from
6245 * the networking, so no interrupts or new tx packets will
6246 * end up in the driver. tp->{tx,}lock is not held and we are not
6247 * in an interrupt context and thus may sleep.
6249 static void tg3_free_rings(struct tg3 *tp)
6251 int i, j;
6253 for (j = 0; j < tp->irq_cnt; j++) {
6254 struct tg3_napi *tnapi = &tp->napi[j];
6256 tg3_rx_prodring_free(tp, &tnapi->prodring);
6258 if (!tnapi->tx_buffers)
6259 continue;
6261 for (i = 0; i < TG3_TX_RING_SIZE; ) {
6262 struct ring_info *txp;
6263 struct sk_buff *skb;
6264 unsigned int k;
6266 txp = &tnapi->tx_buffers[i];
6267 skb = txp->skb;
6269 if (skb == NULL) {
6270 i++;
6271 continue;
6274 pci_unmap_single(tp->pdev,
6275 dma_unmap_addr(txp, mapping),
6276 skb_headlen(skb),
6277 PCI_DMA_TODEVICE);
6278 txp->skb = NULL;
6280 i++;
6282 for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) {
6283 txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
6284 pci_unmap_page(tp->pdev,
6285 dma_unmap_addr(txp, mapping),
6286 skb_shinfo(skb)->frags[k].size,
6287 PCI_DMA_TODEVICE);
6288 i++;
6291 dev_kfree_skb_any(skb);
6296 /* Initialize tx/rx rings for packet processing.
6298 * The chip has been shut down and the driver detached from
6299 * the networking, so no interrupts or new tx packets will
6300 * end up in the driver. tp->{tx,}lock are held and thus
6301 * we may not sleep.
6303 static int tg3_init_rings(struct tg3 *tp)
6305 int i;
6307 /* Free up all the SKBs. */
6308 tg3_free_rings(tp);
6310 for (i = 0; i < tp->irq_cnt; i++) {
6311 struct tg3_napi *tnapi = &tp->napi[i];
6313 tnapi->last_tag = 0;
6314 tnapi->last_irq_tag = 0;
6315 tnapi->hw_status->status = 0;
6316 tnapi->hw_status->status_tag = 0;
6317 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6319 tnapi->tx_prod = 0;
6320 tnapi->tx_cons = 0;
6321 if (tnapi->tx_ring)
6322 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
6324 tnapi->rx_rcb_ptr = 0;
6325 if (tnapi->rx_rcb)
6326 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6328 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
6329 tg3_free_rings(tp);
6330 return -ENOMEM;
6334 return 0;
6338 * Must not be invoked with interrupt sources disabled and
6339 * the hardware shutdown down.
6341 static void tg3_free_consistent(struct tg3 *tp)
6343 int i;
6345 for (i = 0; i < tp->irq_cnt; i++) {
6346 struct tg3_napi *tnapi = &tp->napi[i];
6348 if (tnapi->tx_ring) {
6349 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
6350 tnapi->tx_ring, tnapi->tx_desc_mapping);
6351 tnapi->tx_ring = NULL;
6354 kfree(tnapi->tx_buffers);
6355 tnapi->tx_buffers = NULL;
6357 if (tnapi->rx_rcb) {
6358 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
6359 tnapi->rx_rcb,
6360 tnapi->rx_rcb_mapping);
6361 tnapi->rx_rcb = NULL;
6364 tg3_rx_prodring_fini(tp, &tnapi->prodring);
6366 if (tnapi->hw_status) {
6367 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
6368 tnapi->hw_status,
6369 tnapi->status_mapping);
6370 tnapi->hw_status = NULL;
6374 if (tp->hw_stats) {
6375 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
6376 tp->hw_stats, tp->stats_mapping);
6377 tp->hw_stats = NULL;
6382 * Must not be invoked with interrupt sources disabled and
6383 * the hardware shutdown down. Can sleep.
6385 static int tg3_alloc_consistent(struct tg3 *tp)
6387 int i;
6389 tp->hw_stats = pci_alloc_consistent(tp->pdev,
6390 sizeof(struct tg3_hw_stats),
6391 &tp->stats_mapping);
6392 if (!tp->hw_stats)
6393 goto err_out;
6395 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6397 for (i = 0; i < tp->irq_cnt; i++) {
6398 struct tg3_napi *tnapi = &tp->napi[i];
6399 struct tg3_hw_status *sblk;
6401 tnapi->hw_status = pci_alloc_consistent(tp->pdev,
6402 TG3_HW_STATUS_SIZE,
6403 &tnapi->status_mapping);
6404 if (!tnapi->hw_status)
6405 goto err_out;
6407 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6408 sblk = tnapi->hw_status;
6410 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
6411 goto err_out;
6413 /* If multivector TSS is enabled, vector 0 does not handle
6414 * tx interrupts. Don't allocate any resources for it.
6416 if ((!i && !(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)) ||
6417 (i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS))) {
6418 tnapi->tx_buffers = kzalloc(sizeof(struct ring_info) *
6419 TG3_TX_RING_SIZE,
6420 GFP_KERNEL);
6421 if (!tnapi->tx_buffers)
6422 goto err_out;
6424 tnapi->tx_ring = pci_alloc_consistent(tp->pdev,
6425 TG3_TX_RING_BYTES,
6426 &tnapi->tx_desc_mapping);
6427 if (!tnapi->tx_ring)
6428 goto err_out;
6432 * When RSS is enabled, the status block format changes
6433 * slightly. The "rx_jumbo_consumer", "reserved",
6434 * and "rx_mini_consumer" members get mapped to the
6435 * other three rx return ring producer indexes.
6437 switch (i) {
6438 default:
6439 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
6440 break;
6441 case 2:
6442 tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
6443 break;
6444 case 3:
6445 tnapi->rx_rcb_prod_idx = &sblk->reserved;
6446 break;
6447 case 4:
6448 tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
6449 break;
6453 * If multivector RSS is enabled, vector 0 does not handle
6454 * rx or tx interrupts. Don't allocate any resources for it.
6456 if (!i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS))
6457 continue;
6459 tnapi->rx_rcb = pci_alloc_consistent(tp->pdev,
6460 TG3_RX_RCB_RING_BYTES(tp),
6461 &tnapi->rx_rcb_mapping);
6462 if (!tnapi->rx_rcb)
6463 goto err_out;
6465 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6468 return 0;
6470 err_out:
6471 tg3_free_consistent(tp);
6472 return -ENOMEM;
6475 #define MAX_WAIT_CNT 1000
6477 /* To stop a block, clear the enable bit and poll till it
6478 * clears. tp->lock is held.
6480 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
6482 unsigned int i;
6483 u32 val;
6485 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6486 switch (ofs) {
6487 case RCVLSC_MODE:
6488 case DMAC_MODE:
6489 case MBFREE_MODE:
6490 case BUFMGR_MODE:
6491 case MEMARB_MODE:
6492 /* We can't enable/disable these bits of the
6493 * 5705/5750, just say success.
6495 return 0;
6497 default:
6498 break;
6502 val = tr32(ofs);
6503 val &= ~enable_bit;
6504 tw32_f(ofs, val);
6506 for (i = 0; i < MAX_WAIT_CNT; i++) {
6507 udelay(100);
6508 val = tr32(ofs);
6509 if ((val & enable_bit) == 0)
6510 break;
6513 if (i == MAX_WAIT_CNT && !silent) {
6514 dev_err(&tp->pdev->dev,
6515 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
6516 ofs, enable_bit);
6517 return -ENODEV;
6520 return 0;
6523 /* tp->lock is held. */
6524 static int tg3_abort_hw(struct tg3 *tp, int silent)
6526 int i, err;
6528 tg3_disable_ints(tp);
6530 tp->rx_mode &= ~RX_MODE_ENABLE;
6531 tw32_f(MAC_RX_MODE, tp->rx_mode);
6532 udelay(10);
6534 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
6535 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
6536 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
6537 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
6538 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
6539 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
6541 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
6542 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
6543 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
6544 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
6545 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
6546 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
6547 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
6549 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
6550 tw32_f(MAC_MODE, tp->mac_mode);
6551 udelay(40);
6553 tp->tx_mode &= ~TX_MODE_ENABLE;
6554 tw32_f(MAC_TX_MODE, tp->tx_mode);
6556 for (i = 0; i < MAX_WAIT_CNT; i++) {
6557 udelay(100);
6558 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
6559 break;
6561 if (i >= MAX_WAIT_CNT) {
6562 dev_err(&tp->pdev->dev,
6563 "%s timed out, TX_MODE_ENABLE will not clear "
6564 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
6565 err |= -ENODEV;
6568 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
6569 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
6570 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
6572 tw32(FTQ_RESET, 0xffffffff);
6573 tw32(FTQ_RESET, 0x00000000);
6575 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
6576 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
6578 for (i = 0; i < tp->irq_cnt; i++) {
6579 struct tg3_napi *tnapi = &tp->napi[i];
6580 if (tnapi->hw_status)
6581 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6583 if (tp->hw_stats)
6584 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6586 return err;
6589 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
6591 int i;
6592 u32 apedata;
6594 /* NCSI does not support APE events */
6595 if (tp->tg3_flags3 & TG3_FLG3_APE_HAS_NCSI)
6596 return;
6598 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
6599 if (apedata != APE_SEG_SIG_MAGIC)
6600 return;
6602 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
6603 if (!(apedata & APE_FW_STATUS_READY))
6604 return;
6606 /* Wait for up to 1 millisecond for APE to service previous event. */
6607 for (i = 0; i < 10; i++) {
6608 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
6609 return;
6611 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
6613 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6614 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
6615 event | APE_EVENT_STATUS_EVENT_PENDING);
6617 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
6619 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6620 break;
6622 udelay(100);
6625 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6626 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
6629 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
6631 u32 event;
6632 u32 apedata;
6634 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
6635 return;
6637 switch (kind) {
6638 case RESET_KIND_INIT:
6639 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
6640 APE_HOST_SEG_SIG_MAGIC);
6641 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
6642 APE_HOST_SEG_LEN_MAGIC);
6643 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
6644 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
6645 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
6646 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
6647 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
6648 APE_HOST_BEHAV_NO_PHYLOCK);
6649 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
6650 TG3_APE_HOST_DRVR_STATE_START);
6652 event = APE_EVENT_STATUS_STATE_START;
6653 break;
6654 case RESET_KIND_SHUTDOWN:
6655 /* With the interface we are currently using,
6656 * APE does not track driver state. Wiping
6657 * out the HOST SEGMENT SIGNATURE forces
6658 * the APE to assume OS absent status.
6660 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
6662 if (device_may_wakeup(&tp->pdev->dev) &&
6663 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)) {
6664 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
6665 TG3_APE_HOST_WOL_SPEED_AUTO);
6666 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
6667 } else
6668 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
6670 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
6672 event = APE_EVENT_STATUS_STATE_UNLOAD;
6673 break;
6674 case RESET_KIND_SUSPEND:
6675 event = APE_EVENT_STATUS_STATE_SUSPEND;
6676 break;
6677 default:
6678 return;
6681 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
6683 tg3_ape_send_event(tp, event);
6686 /* tp->lock is held. */
6687 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
6689 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
6690 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
6692 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
6693 switch (kind) {
6694 case RESET_KIND_INIT:
6695 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6696 DRV_STATE_START);
6697 break;
6699 case RESET_KIND_SHUTDOWN:
6700 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6701 DRV_STATE_UNLOAD);
6702 break;
6704 case RESET_KIND_SUSPEND:
6705 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6706 DRV_STATE_SUSPEND);
6707 break;
6709 default:
6710 break;
6714 if (kind == RESET_KIND_INIT ||
6715 kind == RESET_KIND_SUSPEND)
6716 tg3_ape_driver_state_change(tp, kind);
6719 /* tp->lock is held. */
6720 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
6722 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
6723 switch (kind) {
6724 case RESET_KIND_INIT:
6725 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6726 DRV_STATE_START_DONE);
6727 break;
6729 case RESET_KIND_SHUTDOWN:
6730 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6731 DRV_STATE_UNLOAD_DONE);
6732 break;
6734 default:
6735 break;
6739 if (kind == RESET_KIND_SHUTDOWN)
6740 tg3_ape_driver_state_change(tp, kind);
6743 /* tp->lock is held. */
6744 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
6746 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6747 switch (kind) {
6748 case RESET_KIND_INIT:
6749 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6750 DRV_STATE_START);
6751 break;
6753 case RESET_KIND_SHUTDOWN:
6754 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6755 DRV_STATE_UNLOAD);
6756 break;
6758 case RESET_KIND_SUSPEND:
6759 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6760 DRV_STATE_SUSPEND);
6761 break;
6763 default:
6764 break;
6769 static int tg3_poll_fw(struct tg3 *tp)
6771 int i;
6772 u32 val;
6774 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6775 /* Wait up to 20ms for init done. */
6776 for (i = 0; i < 200; i++) {
6777 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
6778 return 0;
6779 udelay(100);
6781 return -ENODEV;
6784 /* Wait for firmware initialization to complete. */
6785 for (i = 0; i < 100000; i++) {
6786 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
6787 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
6788 break;
6789 udelay(10);
6792 /* Chip might not be fitted with firmware. Some Sun onboard
6793 * parts are configured like that. So don't signal the timeout
6794 * of the above loop as an error, but do report the lack of
6795 * running firmware once.
6797 if (i >= 100000 &&
6798 !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
6799 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
6801 netdev_info(tp->dev, "No firmware running\n");
6804 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
6805 /* The 57765 A0 needs a little more
6806 * time to do some important work.
6808 mdelay(10);
6811 return 0;
6814 /* Save PCI command register before chip reset */
6815 static void tg3_save_pci_state(struct tg3 *tp)
6817 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
6820 /* Restore PCI state after chip reset */
6821 static void tg3_restore_pci_state(struct tg3 *tp)
6823 u32 val;
6825 /* Re-enable indirect register accesses. */
6826 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
6827 tp->misc_host_ctrl);
6829 /* Set MAX PCI retry to zero. */
6830 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
6831 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6832 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
6833 val |= PCISTATE_RETRY_SAME_DMA;
6834 /* Allow reads and writes to the APE register and memory space. */
6835 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
6836 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
6837 PCISTATE_ALLOW_APE_SHMEM_WR |
6838 PCISTATE_ALLOW_APE_PSPACE_WR;
6839 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
6841 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
6843 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
6844 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6845 pcie_set_readrq(tp->pdev, 4096);
6846 else {
6847 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
6848 tp->pci_cacheline_sz);
6849 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
6850 tp->pci_lat_timer);
6854 /* Make sure PCI-X relaxed ordering bit is clear. */
6855 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
6856 u16 pcix_cmd;
6858 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6859 &pcix_cmd);
6860 pcix_cmd &= ~PCI_X_CMD_ERO;
6861 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6862 pcix_cmd);
6865 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
6867 /* Chip reset on 5780 will reset MSI enable bit,
6868 * so need to restore it.
6870 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6871 u16 ctrl;
6873 pci_read_config_word(tp->pdev,
6874 tp->msi_cap + PCI_MSI_FLAGS,
6875 &ctrl);
6876 pci_write_config_word(tp->pdev,
6877 tp->msi_cap + PCI_MSI_FLAGS,
6878 ctrl | PCI_MSI_FLAGS_ENABLE);
6879 val = tr32(MSGINT_MODE);
6880 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
6885 static void tg3_stop_fw(struct tg3 *);
6887 /* tp->lock is held. */
6888 static int tg3_chip_reset(struct tg3 *tp)
6890 u32 val;
6891 void (*write_op)(struct tg3 *, u32, u32);
6892 int i, err;
6894 tg3_nvram_lock(tp);
6896 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
6898 /* No matching tg3_nvram_unlock() after this because
6899 * chip reset below will undo the nvram lock.
6901 tp->nvram_lock_cnt = 0;
6903 /* GRC_MISC_CFG core clock reset will clear the memory
6904 * enable bit in PCI register 4 and the MSI enable bit
6905 * on some chips, so we save relevant registers here.
6907 tg3_save_pci_state(tp);
6909 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
6910 (tp->tg3_flags3 & TG3_FLG3_5755_PLUS))
6911 tw32(GRC_FASTBOOT_PC, 0);
6914 * We must avoid the readl() that normally takes place.
6915 * It locks machines, causes machine checks, and other
6916 * fun things. So, temporarily disable the 5701
6917 * hardware workaround, while we do the reset.
6919 write_op = tp->write32;
6920 if (write_op == tg3_write_flush_reg32)
6921 tp->write32 = tg3_write32;
6923 /* Prevent the irq handler from reading or writing PCI registers
6924 * during chip reset when the memory enable bit in the PCI command
6925 * register may be cleared. The chip does not generate interrupt
6926 * at this time, but the irq handler may still be called due to irq
6927 * sharing or irqpoll.
6929 tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
6930 for (i = 0; i < tp->irq_cnt; i++) {
6931 struct tg3_napi *tnapi = &tp->napi[i];
6932 if (tnapi->hw_status) {
6933 tnapi->hw_status->status = 0;
6934 tnapi->hw_status->status_tag = 0;
6936 tnapi->last_tag = 0;
6937 tnapi->last_irq_tag = 0;
6939 smp_mb();
6941 for (i = 0; i < tp->irq_cnt; i++)
6942 synchronize_irq(tp->napi[i].irq_vec);
6944 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
6945 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
6946 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
6949 /* do the reset */
6950 val = GRC_MISC_CFG_CORECLK_RESET;
6952 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
6953 /* Force PCIe 1.0a mode */
6954 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
6955 !(tp->tg3_flags3 & TG3_FLG3_5717_PLUS) &&
6956 tr32(TG3_PCIE_PHY_TSTCTL) ==
6957 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
6958 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
6960 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
6961 tw32(GRC_MISC_CFG, (1 << 29));
6962 val |= (1 << 29);
6966 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6967 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
6968 tw32(GRC_VCPU_EXT_CTRL,
6969 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
6972 /* Manage gphy power for all CPMU absent PCIe devices. */
6973 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6974 !(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT))
6975 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
6977 tw32(GRC_MISC_CFG, val);
6979 /* restore 5701 hardware bug workaround write method */
6980 tp->write32 = write_op;
6982 /* Unfortunately, we have to delay before the PCI read back.
6983 * Some 575X chips even will not respond to a PCI cfg access
6984 * when the reset command is given to the chip.
6986 * How do these hardware designers expect things to work
6987 * properly if the PCI write is posted for a long period
6988 * of time? It is always necessary to have some method by
6989 * which a register read back can occur to push the write
6990 * out which does the reset.
6992 * For most tg3 variants the trick below was working.
6993 * Ho hum...
6995 udelay(120);
6997 /* Flush PCI posted writes. The normal MMIO registers
6998 * are inaccessible at this time so this is the only
6999 * way to make this reliably (actually, this is no longer
7000 * the case, see above). I tried to use indirect
7001 * register read/write but this upset some 5701 variants.
7003 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7005 udelay(120);
7007 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && tp->pcie_cap) {
7008 u16 val16;
7010 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7011 int i;
7012 u32 cfg_val;
7014 /* Wait for link training to complete. */
7015 for (i = 0; i < 5000; i++)
7016 udelay(100);
7018 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7019 pci_write_config_dword(tp->pdev, 0xc4,
7020 cfg_val | (1 << 15));
7023 /* Clear the "no snoop" and "relaxed ordering" bits. */
7024 pci_read_config_word(tp->pdev,
7025 tp->pcie_cap + PCI_EXP_DEVCTL,
7026 &val16);
7027 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7028 PCI_EXP_DEVCTL_NOSNOOP_EN);
7030 * Older PCIe devices only support the 128 byte
7031 * MPS setting. Enforce the restriction.
7033 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT))
7034 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7035 pci_write_config_word(tp->pdev,
7036 tp->pcie_cap + PCI_EXP_DEVCTL,
7037 val16);
7039 pcie_set_readrq(tp->pdev, 4096);
7041 /* Clear error status */
7042 pci_write_config_word(tp->pdev,
7043 tp->pcie_cap + PCI_EXP_DEVSTA,
7044 PCI_EXP_DEVSTA_CED |
7045 PCI_EXP_DEVSTA_NFED |
7046 PCI_EXP_DEVSTA_FED |
7047 PCI_EXP_DEVSTA_URD);
7050 tg3_restore_pci_state(tp);
7052 tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
7054 val = 0;
7055 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
7056 val = tr32(MEMARB_MODE);
7057 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7059 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7060 tg3_stop_fw(tp);
7061 tw32(0x5000, 0x400);
7064 tw32(GRC_MODE, tp->grc_mode);
7066 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7067 val = tr32(0xc4);
7069 tw32(0xc4, val | (1 << 15));
7072 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7073 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7074 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7075 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7076 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7077 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7080 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7081 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
7082 tw32_f(MAC_MODE, tp->mac_mode);
7083 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7084 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
7085 tw32_f(MAC_MODE, tp->mac_mode);
7086 } else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
7087 tp->mac_mode &= (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
7088 if (tp->mac_mode & MAC_MODE_APE_TX_EN)
7089 tp->mac_mode |= MAC_MODE_TDE_ENABLE;
7090 tw32_f(MAC_MODE, tp->mac_mode);
7091 } else
7092 tw32_f(MAC_MODE, 0);
7093 udelay(40);
7095 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7097 err = tg3_poll_fw(tp);
7098 if (err)
7099 return err;
7101 tg3_mdio_start(tp);
7103 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
7104 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7105 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7106 !(tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) {
7107 val = tr32(0x7c00);
7109 tw32(0x7c00, val | (1 << 25));
7112 /* Reprobe ASF enable state. */
7113 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
7114 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
7115 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7116 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7117 u32 nic_cfg;
7119 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7120 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7121 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
7122 tp->last_event_jiffies = jiffies;
7123 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
7124 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
7128 return 0;
7131 /* tp->lock is held. */
7132 static void tg3_stop_fw(struct tg3 *tp)
7134 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
7135 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
7136 /* Wait for RX cpu to ACK the previous event. */
7137 tg3_wait_for_event_ack(tp);
7139 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
7141 tg3_generate_fw_event(tp);
7143 /* Wait for RX cpu to ACK this event. */
7144 tg3_wait_for_event_ack(tp);
7148 /* tp->lock is held. */
7149 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7151 int err;
7153 tg3_stop_fw(tp);
7155 tg3_write_sig_pre_reset(tp, kind);
7157 tg3_abort_hw(tp, silent);
7158 err = tg3_chip_reset(tp);
7160 __tg3_set_mac_addr(tp, 0);
7162 tg3_write_sig_legacy(tp, kind);
7163 tg3_write_sig_post_reset(tp, kind);
7165 if (err)
7166 return err;
7168 return 0;
7171 #define RX_CPU_SCRATCH_BASE 0x30000
7172 #define RX_CPU_SCRATCH_SIZE 0x04000
7173 #define TX_CPU_SCRATCH_BASE 0x34000
7174 #define TX_CPU_SCRATCH_SIZE 0x04000
7176 /* tp->lock is held. */
7177 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
7179 int i;
7181 BUG_ON(offset == TX_CPU_BASE &&
7182 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
7184 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7185 u32 val = tr32(GRC_VCPU_EXT_CTRL);
7187 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
7188 return 0;
7190 if (offset == RX_CPU_BASE) {
7191 for (i = 0; i < 10000; i++) {
7192 tw32(offset + CPU_STATE, 0xffffffff);
7193 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7194 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7195 break;
7198 tw32(offset + CPU_STATE, 0xffffffff);
7199 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
7200 udelay(10);
7201 } else {
7202 for (i = 0; i < 10000; i++) {
7203 tw32(offset + CPU_STATE, 0xffffffff);
7204 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7205 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7206 break;
7210 if (i >= 10000) {
7211 netdev_err(tp->dev, "%s timed out, %s CPU\n",
7212 __func__, offset == RX_CPU_BASE ? "RX" : "TX");
7213 return -ENODEV;
7216 /* Clear firmware's nvram arbitration. */
7217 if (tp->tg3_flags & TG3_FLAG_NVRAM)
7218 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
7219 return 0;
7222 struct fw_info {
7223 unsigned int fw_base;
7224 unsigned int fw_len;
7225 const __be32 *fw_data;
7228 /* tp->lock is held. */
7229 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
7230 int cpu_scratch_size, struct fw_info *info)
7232 int err, lock_err, i;
7233 void (*write_op)(struct tg3 *, u32, u32);
7235 if (cpu_base == TX_CPU_BASE &&
7236 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7237 netdev_err(tp->dev,
7238 "%s: Trying to load TX cpu firmware which is 5705\n",
7239 __func__);
7240 return -EINVAL;
7243 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7244 write_op = tg3_write_mem;
7245 else
7246 write_op = tg3_write_indirect_reg32;
7248 /* It is possible that bootcode is still loading at this point.
7249 * Get the nvram lock first before halting the cpu.
7251 lock_err = tg3_nvram_lock(tp);
7252 err = tg3_halt_cpu(tp, cpu_base);
7253 if (!lock_err)
7254 tg3_nvram_unlock(tp);
7255 if (err)
7256 goto out;
7258 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
7259 write_op(tp, cpu_scratch_base + i, 0);
7260 tw32(cpu_base + CPU_STATE, 0xffffffff);
7261 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
7262 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
7263 write_op(tp, (cpu_scratch_base +
7264 (info->fw_base & 0xffff) +
7265 (i * sizeof(u32))),
7266 be32_to_cpu(info->fw_data[i]));
7268 err = 0;
7270 out:
7271 return err;
7274 /* tp->lock is held. */
7275 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
7277 struct fw_info info;
7278 const __be32 *fw_data;
7279 int err, i;
7281 fw_data = (void *)tp->fw->data;
7283 /* Firmware blob starts with version numbers, followed by
7284 start address and length. We are setting complete length.
7285 length = end_address_of_bss - start_address_of_text.
7286 Remainder is the blob to be loaded contiguously
7287 from start address. */
7289 info.fw_base = be32_to_cpu(fw_data[1]);
7290 info.fw_len = tp->fw->size - 12;
7291 info.fw_data = &fw_data[3];
7293 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
7294 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
7295 &info);
7296 if (err)
7297 return err;
7299 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
7300 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
7301 &info);
7302 if (err)
7303 return err;
7305 /* Now startup only the RX cpu. */
7306 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7307 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7309 for (i = 0; i < 5; i++) {
7310 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
7311 break;
7312 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7313 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
7314 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7315 udelay(1000);
7317 if (i >= 5) {
7318 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
7319 "should be %08x\n", __func__,
7320 tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
7321 return -ENODEV;
7323 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7324 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
7326 return 0;
7329 /* 5705 needs a special version of the TSO firmware. */
7331 /* tp->lock is held. */
7332 static int tg3_load_tso_firmware(struct tg3 *tp)
7334 struct fw_info info;
7335 const __be32 *fw_data;
7336 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
7337 int err, i;
7339 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7340 return 0;
7342 fw_data = (void *)tp->fw->data;
7344 /* Firmware blob starts with version numbers, followed by
7345 start address and length. We are setting complete length.
7346 length = end_address_of_bss - start_address_of_text.
7347 Remainder is the blob to be loaded contiguously
7348 from start address. */
7350 info.fw_base = be32_to_cpu(fw_data[1]);
7351 cpu_scratch_size = tp->fw_len;
7352 info.fw_len = tp->fw->size - 12;
7353 info.fw_data = &fw_data[3];
7355 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7356 cpu_base = RX_CPU_BASE;
7357 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
7358 } else {
7359 cpu_base = TX_CPU_BASE;
7360 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
7361 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
7364 err = tg3_load_firmware_cpu(tp, cpu_base,
7365 cpu_scratch_base, cpu_scratch_size,
7366 &info);
7367 if (err)
7368 return err;
7370 /* Now startup the cpu. */
7371 tw32(cpu_base + CPU_STATE, 0xffffffff);
7372 tw32_f(cpu_base + CPU_PC, info.fw_base);
7374 for (i = 0; i < 5; i++) {
7375 if (tr32(cpu_base + CPU_PC) == info.fw_base)
7376 break;
7377 tw32(cpu_base + CPU_STATE, 0xffffffff);
7378 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
7379 tw32_f(cpu_base + CPU_PC, info.fw_base);
7380 udelay(1000);
7382 if (i >= 5) {
7383 netdev_err(tp->dev,
7384 "%s fails to set CPU PC, is %08x should be %08x\n",
7385 __func__, tr32(cpu_base + CPU_PC), info.fw_base);
7386 return -ENODEV;
7388 tw32(cpu_base + CPU_STATE, 0xffffffff);
7389 tw32_f(cpu_base + CPU_MODE, 0x00000000);
7390 return 0;
7394 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7396 struct tg3 *tp = netdev_priv(dev);
7397 struct sockaddr *addr = p;
7398 int err = 0, skip_mac_1 = 0;
7400 if (!is_valid_ether_addr(addr->sa_data))
7401 return -EINVAL;
7403 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7405 if (!netif_running(dev))
7406 return 0;
7408 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7409 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7411 addr0_high = tr32(MAC_ADDR_0_HIGH);
7412 addr0_low = tr32(MAC_ADDR_0_LOW);
7413 addr1_high = tr32(MAC_ADDR_1_HIGH);
7414 addr1_low = tr32(MAC_ADDR_1_LOW);
7416 /* Skip MAC addr 1 if ASF is using it. */
7417 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7418 !(addr1_high == 0 && addr1_low == 0))
7419 skip_mac_1 = 1;
7421 spin_lock_bh(&tp->lock);
7422 __tg3_set_mac_addr(tp, skip_mac_1);
7423 spin_unlock_bh(&tp->lock);
7425 return err;
7428 /* tp->lock is held. */
7429 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7430 dma_addr_t mapping, u32 maxlen_flags,
7431 u32 nic_addr)
7433 tg3_write_mem(tp,
7434 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7435 ((u64) mapping >> 32));
7436 tg3_write_mem(tp,
7437 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7438 ((u64) mapping & 0xffffffff));
7439 tg3_write_mem(tp,
7440 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7441 maxlen_flags);
7443 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7444 tg3_write_mem(tp,
7445 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7446 nic_addr);
7449 static void __tg3_set_rx_mode(struct net_device *);
7450 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7452 int i;
7454 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)) {
7455 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7456 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7457 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7458 } else {
7459 tw32(HOSTCC_TXCOL_TICKS, 0);
7460 tw32(HOSTCC_TXMAX_FRAMES, 0);
7461 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
7464 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)) {
7465 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7466 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7467 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7468 } else {
7469 tw32(HOSTCC_RXCOL_TICKS, 0);
7470 tw32(HOSTCC_RXMAX_FRAMES, 0);
7471 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
7474 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7475 u32 val = ec->stats_block_coalesce_usecs;
7477 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7478 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7480 if (!netif_carrier_ok(tp->dev))
7481 val = 0;
7483 tw32(HOSTCC_STAT_COAL_TICKS, val);
7486 for (i = 0; i < tp->irq_cnt - 1; i++) {
7487 u32 reg;
7489 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
7490 tw32(reg, ec->rx_coalesce_usecs);
7491 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
7492 tw32(reg, ec->rx_max_coalesced_frames);
7493 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
7494 tw32(reg, ec->rx_max_coalesced_frames_irq);
7496 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) {
7497 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
7498 tw32(reg, ec->tx_coalesce_usecs);
7499 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
7500 tw32(reg, ec->tx_max_coalesced_frames);
7501 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
7502 tw32(reg, ec->tx_max_coalesced_frames_irq);
7506 for (; i < tp->irq_max - 1; i++) {
7507 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
7508 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
7509 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7511 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) {
7512 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
7513 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
7514 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7519 /* tp->lock is held. */
7520 static void tg3_rings_reset(struct tg3 *tp)
7522 int i;
7523 u32 stblk, txrcb, rxrcb, limit;
7524 struct tg3_napi *tnapi = &tp->napi[0];
7526 /* Disable all transmit rings but the first. */
7527 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7528 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
7529 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7530 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
7531 else
7532 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7534 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7535 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
7536 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
7537 BDINFO_FLAGS_DISABLED);
7540 /* Disable all receive return rings but the first. */
7541 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
7542 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
7543 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
7544 else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7545 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
7546 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7547 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7548 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
7549 else
7550 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7552 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7553 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
7554 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
7555 BDINFO_FLAGS_DISABLED);
7557 /* Disable interrupts */
7558 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
7560 /* Zero mailbox registers. */
7561 if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) {
7562 for (i = 1; i < tp->irq_max; i++) {
7563 tp->napi[i].tx_prod = 0;
7564 tp->napi[i].tx_cons = 0;
7565 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
7566 tw32_mailbox(tp->napi[i].prodmbox, 0);
7567 tw32_rx_mbox(tp->napi[i].consmbox, 0);
7568 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
7570 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS))
7571 tw32_mailbox(tp->napi[0].prodmbox, 0);
7572 } else {
7573 tp->napi[0].tx_prod = 0;
7574 tp->napi[0].tx_cons = 0;
7575 tw32_mailbox(tp->napi[0].prodmbox, 0);
7576 tw32_rx_mbox(tp->napi[0].consmbox, 0);
7579 /* Make sure the NIC-based send BD rings are disabled. */
7580 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7581 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
7582 for (i = 0; i < 16; i++)
7583 tw32_tx_mbox(mbox + i * 8, 0);
7586 txrcb = NIC_SRAM_SEND_RCB;
7587 rxrcb = NIC_SRAM_RCV_RET_RCB;
7589 /* Clear status block in ram. */
7590 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7592 /* Set status block DMA address */
7593 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7594 ((u64) tnapi->status_mapping >> 32));
7595 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7596 ((u64) tnapi->status_mapping & 0xffffffff));
7598 if (tnapi->tx_ring) {
7599 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7600 (TG3_TX_RING_SIZE <<
7601 BDINFO_FLAGS_MAXLEN_SHIFT),
7602 NIC_SRAM_TX_BUFFER_DESC);
7603 txrcb += TG3_BDINFO_SIZE;
7606 if (tnapi->rx_rcb) {
7607 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7608 (TG3_RX_RCB_RING_SIZE(tp) <<
7609 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
7610 rxrcb += TG3_BDINFO_SIZE;
7613 stblk = HOSTCC_STATBLCK_RING1;
7615 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
7616 u64 mapping = (u64)tnapi->status_mapping;
7617 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
7618 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
7620 /* Clear status block in ram. */
7621 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7623 if (tnapi->tx_ring) {
7624 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7625 (TG3_TX_RING_SIZE <<
7626 BDINFO_FLAGS_MAXLEN_SHIFT),
7627 NIC_SRAM_TX_BUFFER_DESC);
7628 txrcb += TG3_BDINFO_SIZE;
7631 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7632 (TG3_RX_RCB_RING_SIZE(tp) <<
7633 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
7635 stblk += 8;
7636 rxrcb += TG3_BDINFO_SIZE;
7640 /* tp->lock is held. */
7641 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7643 u32 val, rdmac_mode;
7644 int i, err, limit;
7645 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
7647 tg3_disable_ints(tp);
7649 tg3_stop_fw(tp);
7651 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
7653 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)
7654 tg3_abort_hw(tp, 1);
7656 if (reset_phy)
7657 tg3_phy_reset(tp);
7659 err = tg3_chip_reset(tp);
7660 if (err)
7661 return err;
7663 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
7665 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
7666 val = tr32(TG3_CPMU_CTRL);
7667 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
7668 tw32(TG3_CPMU_CTRL, val);
7670 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7671 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7672 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7673 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7675 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
7676 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
7677 val |= CPMU_LNK_AWARE_MACCLK_6_25;
7678 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
7680 val = tr32(TG3_CPMU_HST_ACC);
7681 val &= ~CPMU_HST_ACC_MACCLK_MASK;
7682 val |= CPMU_HST_ACC_MACCLK_6_25;
7683 tw32(TG3_CPMU_HST_ACC, val);
7686 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7687 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
7688 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
7689 PCIE_PWR_MGMT_L1_THRESH_4MS;
7690 tw32(PCIE_PWR_MGMT_THRESH, val);
7692 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
7693 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
7695 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
7697 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7698 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7701 if (tp->tg3_flags3 & TG3_FLG3_L1PLLPD_EN) {
7702 u32 grc_mode = tr32(GRC_MODE);
7704 /* Access the lower 1K of PL PCIE block registers. */
7705 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
7706 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
7708 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
7709 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
7710 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
7712 tw32(GRC_MODE, grc_mode);
7715 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
7716 u32 grc_mode = tr32(GRC_MODE);
7718 /* Access the lower 1K of PL PCIE block registers. */
7719 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
7720 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
7722 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5);
7723 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
7724 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
7726 tw32(GRC_MODE, grc_mode);
7728 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7729 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7730 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7731 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7734 /* This works around an issue with Athlon chipsets on
7735 * B3 tigon3 silicon. This bit has no effect on any
7736 * other revision. But do not set this on PCI Express
7737 * chips and don't even touch the clocks if the CPMU is present.
7739 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
7740 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
7741 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
7742 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7745 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7746 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
7747 val = tr32(TG3PCI_PCISTATE);
7748 val |= PCISTATE_RETRY_SAME_DMA;
7749 tw32(TG3PCI_PCISTATE, val);
7752 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
7753 /* Allow reads and writes to the
7754 * APE register and memory space.
7756 val = tr32(TG3PCI_PCISTATE);
7757 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7758 PCISTATE_ALLOW_APE_SHMEM_WR |
7759 PCISTATE_ALLOW_APE_PSPACE_WR;
7760 tw32(TG3PCI_PCISTATE, val);
7763 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
7764 /* Enable some hw fixes. */
7765 val = tr32(TG3PCI_MSI_DATA);
7766 val |= (1 << 26) | (1 << 28) | (1 << 29);
7767 tw32(TG3PCI_MSI_DATA, val);
7770 /* Descriptor ring init may make accesses to the
7771 * NIC SRAM area to setup the TX descriptors, so we
7772 * can only do this after the hardware has been
7773 * successfully reset.
7775 err = tg3_init_rings(tp);
7776 if (err)
7777 return err;
7779 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
7780 val = tr32(TG3PCI_DMA_RW_CTRL) &
7781 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
7782 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
7783 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
7784 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
7785 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
7786 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
7787 /* This value is determined during the probe time DMA
7788 * engine test, tg3_test_dma.
7790 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
7793 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
7794 GRC_MODE_4X_NIC_SEND_RINGS |
7795 GRC_MODE_NO_TX_PHDR_CSUM |
7796 GRC_MODE_NO_RX_PHDR_CSUM);
7797 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
7799 /* Pseudo-header checksum is done by hardware logic and not
7800 * the offload processers, so make the chip do the pseudo-
7801 * header checksums on receive. For transmit it is more
7802 * convenient to do the pseudo-header checksum in software
7803 * as Linux does that on transmit for us in all cases.
7805 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
7807 tw32(GRC_MODE,
7808 tp->grc_mode |
7809 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
7811 /* Setup the timer prescalar register. Clock is always 66Mhz. */
7812 val = tr32(GRC_MISC_CFG);
7813 val &= ~0xff;
7814 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
7815 tw32(GRC_MISC_CFG, val);
7817 /* Initialize MBUF/DESC pool. */
7818 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7819 /* Do nothing. */
7820 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
7821 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
7822 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
7823 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
7824 else
7825 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
7826 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
7827 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
7828 } else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7829 int fw_len;
7831 fw_len = tp->fw_len;
7832 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
7833 tw32(BUFMGR_MB_POOL_ADDR,
7834 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
7835 tw32(BUFMGR_MB_POOL_SIZE,
7836 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
7839 if (tp->dev->mtu <= ETH_DATA_LEN) {
7840 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7841 tp->bufmgr_config.mbuf_read_dma_low_water);
7842 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7843 tp->bufmgr_config.mbuf_mac_rx_low_water);
7844 tw32(BUFMGR_MB_HIGH_WATER,
7845 tp->bufmgr_config.mbuf_high_water);
7846 } else {
7847 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7848 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
7849 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7850 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
7851 tw32(BUFMGR_MB_HIGH_WATER,
7852 tp->bufmgr_config.mbuf_high_water_jumbo);
7854 tw32(BUFMGR_DMA_LOW_WATER,
7855 tp->bufmgr_config.dma_low_water);
7856 tw32(BUFMGR_DMA_HIGH_WATER,
7857 tp->bufmgr_config.dma_high_water);
7859 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
7860 for (i = 0; i < 2000; i++) {
7861 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
7862 break;
7863 udelay(10);
7865 if (i >= 2000) {
7866 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
7867 return -ENODEV;
7870 /* Setup replenish threshold. */
7871 val = tp->rx_pending / 8;
7872 if (val == 0)
7873 val = 1;
7874 else if (val > tp->rx_std_max_post)
7875 val = tp->rx_std_max_post;
7876 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7877 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
7878 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
7880 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
7881 val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
7884 tw32(RCVBDI_STD_THRESH, val);
7886 /* Initialize TG3_BDINFO's at:
7887 * RCVDBDI_STD_BD: standard eth size rx ring
7888 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
7889 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
7891 * like so:
7892 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
7893 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
7894 * ring attribute flags
7895 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
7897 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
7898 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
7900 * The size of each ring is fixed in the firmware, but the location is
7901 * configurable.
7903 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7904 ((u64) tpr->rx_std_mapping >> 32));
7905 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7906 ((u64) tpr->rx_std_mapping & 0xffffffff));
7907 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
7908 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
7909 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
7910 NIC_SRAM_RX_BUFFER_DESC);
7912 /* Disable the mini ring */
7913 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7914 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
7915 BDINFO_FLAGS_DISABLED);
7917 /* Program the jumbo buffer descriptor ring control
7918 * blocks on those devices that have them.
7920 if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
7921 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
7922 /* Setup replenish threshold. */
7923 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
7925 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
7926 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7927 ((u64) tpr->rx_jmb_mapping >> 32));
7928 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7929 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
7930 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7931 (RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT) |
7932 BDINFO_FLAGS_USE_EXT_RECV);
7933 if (!(tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) ||
7934 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7935 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
7936 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
7937 } else {
7938 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7939 BDINFO_FLAGS_DISABLED);
7942 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
7943 val = (RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT) |
7944 (TG3_RX_STD_DMA_SZ << 2);
7945 else
7946 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
7947 } else
7948 val = RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT;
7950 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
7952 tpr->rx_std_prod_idx = tp->rx_pending;
7953 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
7955 tpr->rx_jmb_prod_idx = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
7956 tp->rx_jumbo_pending : 0;
7957 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
7959 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
7960 tw32(STD_REPLENISH_LWM, 32);
7961 tw32(JMB_REPLENISH_LWM, 16);
7964 tg3_rings_reset(tp);
7966 /* Initialize MAC address and backoff seed. */
7967 __tg3_set_mac_addr(tp, 0);
7969 /* MTU + ethernet header + FCS + optional VLAN tag */
7970 tw32(MAC_RX_MTU_SIZE,
7971 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
7973 /* The slot time is changed by tg3_setup_phy if we
7974 * run at gigabit with half duplex.
7976 tw32(MAC_TX_LENGTHS,
7977 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
7978 (6 << TX_LENGTHS_IPG_SHIFT) |
7979 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
7981 /* Receive rules. */
7982 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
7983 tw32(RCVLPC_CONFIG, 0x0181);
7985 /* Calculate RDMAC_MODE setting early, we need it to determine
7986 * the RCVLPC_STATE_ENABLE mask.
7988 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
7989 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
7990 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
7991 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
7992 RDMAC_MODE_LNGREAD_ENAB);
7994 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
7995 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
7996 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
7998 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7999 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8000 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8001 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8002 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8003 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8005 /* If statement applies to 5705 and 5750 PCI devices only */
8006 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8007 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
8008 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
8009 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
8010 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8011 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8012 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8013 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
8014 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8018 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
8019 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8021 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
8022 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8024 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
8025 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8026 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8027 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8029 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8030 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8031 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8032 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8033 (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) {
8034 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8035 tw32(TG3_RDMA_RSRVCTRL_REG,
8036 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8039 /* Receive/send statistics. */
8040 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
8041 val = tr32(RCVLPC_STATS_ENABLE);
8042 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8043 tw32(RCVLPC_STATS_ENABLE, val);
8044 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8045 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8046 val = tr32(RCVLPC_STATS_ENABLE);
8047 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8048 tw32(RCVLPC_STATS_ENABLE, val);
8049 } else {
8050 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8052 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8053 tw32(SNDDATAI_STATSENAB, 0xffffff);
8054 tw32(SNDDATAI_STATSCTRL,
8055 (SNDDATAI_SCTRL_ENABLE |
8056 SNDDATAI_SCTRL_FASTUPD));
8058 /* Setup host coalescing engine. */
8059 tw32(HOSTCC_MODE, 0);
8060 for (i = 0; i < 2000; i++) {
8061 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8062 break;
8063 udelay(10);
8066 __tg3_set_coalesce(tp, &tp->coal);
8068 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8069 /* Status/statistics block address. See tg3_timer,
8070 * the tg3_periodic_fetch_stats call there, and
8071 * tg3_get_stats to see how this works for 5705/5750 chips.
8073 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8074 ((u64) tp->stats_mapping >> 32));
8075 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8076 ((u64) tp->stats_mapping & 0xffffffff));
8077 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8079 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8081 /* Clear statistics and status block memory areas */
8082 for (i = NIC_SRAM_STATS_BLK;
8083 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8084 i += sizeof(u32)) {
8085 tg3_write_mem(tp, i, 0);
8086 udelay(40);
8090 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8092 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8093 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8094 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8095 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8097 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8098 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8099 /* reset to prevent losing 1st rx packet intermittently */
8100 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8101 udelay(10);
8104 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
8105 tp->mac_mode &= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8106 else
8107 tp->mac_mode = 0;
8108 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8109 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
8110 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
8111 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8112 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8113 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8114 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8115 udelay(40);
8117 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8118 * If TG3_FLG2_IS_NIC is zero, we should read the
8119 * register to preserve the GPIO settings for LOMs. The GPIOs,
8120 * whether used as inputs or outputs, are set by boot code after
8121 * reset.
8123 if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
8124 u32 gpio_mask;
8126 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8127 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8128 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8130 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8131 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8132 GRC_LCLCTRL_GPIO_OUTPUT3;
8134 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8135 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8137 tp->grc_local_ctrl &= ~gpio_mask;
8138 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8140 /* GPIO1 must be driven high for eeprom write protect */
8141 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
8142 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8143 GRC_LCLCTRL_GPIO_OUTPUT1);
8145 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8146 udelay(100);
8148 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX) {
8149 val = tr32(MSGINT_MODE);
8150 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8151 tw32(MSGINT_MODE, val);
8154 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8155 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8156 udelay(40);
8159 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8160 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8161 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8162 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8163 WDMAC_MODE_LNGREAD_ENAB);
8165 /* If statement applies to 5705 and 5750 PCI devices only */
8166 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8167 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
8168 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
8169 if ((tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
8170 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8171 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8172 /* nothing */
8173 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8174 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8175 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
8176 val |= WDMAC_MODE_RX_ACCEL;
8180 /* Enable host coalescing bug fix */
8181 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
8182 val |= WDMAC_MODE_STATUS_TAG_FIX;
8184 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8185 val |= WDMAC_MODE_BURST_ALL_DATA;
8187 tw32_f(WDMAC_MODE, val);
8188 udelay(40);
8190 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
8191 u16 pcix_cmd;
8193 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8194 &pcix_cmd);
8195 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8196 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8197 pcix_cmd |= PCI_X_CMD_READ_2K;
8198 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8199 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8200 pcix_cmd |= PCI_X_CMD_READ_2K;
8202 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8203 pcix_cmd);
8206 tw32_f(RDMAC_MODE, rdmac_mode);
8207 udelay(40);
8209 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8210 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8211 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8213 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8214 tw32(SNDDATAC_MODE,
8215 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8216 else
8217 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8219 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8220 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8221 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
8222 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8223 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
8224 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8225 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8226 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
8227 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8228 tw32(SNDBDI_MODE, val);
8229 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8231 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8232 err = tg3_load_5701_a0_firmware_fix(tp);
8233 if (err)
8234 return err;
8237 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
8238 err = tg3_load_tso_firmware(tp);
8239 if (err)
8240 return err;
8243 tp->tx_mode = TX_MODE_ENABLE;
8244 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
8245 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8246 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8247 tw32_f(MAC_TX_MODE, tp->tx_mode);
8248 udelay(100);
8250 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) {
8251 u32 reg = MAC_RSS_INDIR_TBL_0;
8252 u8 *ent = (u8 *)&val;
8254 /* Setup the indirection table */
8255 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8256 int idx = i % sizeof(val);
8258 ent[idx] = i % (tp->irq_cnt - 1);
8259 if (idx == sizeof(val) - 1) {
8260 tw32(reg, val);
8261 reg += 4;
8265 /* Setup the "secret" hash key. */
8266 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8267 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8268 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8269 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8270 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8271 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8272 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8273 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8274 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8275 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8278 tp->rx_mode = RX_MODE_ENABLE;
8279 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
8280 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8282 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)
8283 tp->rx_mode |= RX_MODE_RSS_ENABLE |
8284 RX_MODE_RSS_ITBL_HASH_BITS_7 |
8285 RX_MODE_RSS_IPV6_HASH_EN |
8286 RX_MODE_RSS_TCP_IPV6_HASH_EN |
8287 RX_MODE_RSS_IPV4_HASH_EN |
8288 RX_MODE_RSS_TCP_IPV4_HASH_EN;
8290 tw32_f(MAC_RX_MODE, tp->rx_mode);
8291 udelay(10);
8293 tw32(MAC_LED_CTRL, tp->led_ctrl);
8295 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8296 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8297 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8298 udelay(10);
8300 tw32_f(MAC_RX_MODE, tp->rx_mode);
8301 udelay(10);
8303 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8304 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
8305 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
8306 /* Set drive transmission level to 1.2V */
8307 /* only if the signal pre-emphasis bit is not set */
8308 val = tr32(MAC_SERDES_CFG);
8309 val &= 0xfffff000;
8310 val |= 0x880;
8311 tw32(MAC_SERDES_CFG, val);
8313 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
8314 tw32(MAC_SERDES_CFG, 0x616000);
8317 /* Prevent chip from dropping frames when flow control
8318 * is enabled.
8320 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8321 val = 1;
8322 else
8323 val = 2;
8324 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
8326 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8327 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
8328 /* Use hardware link auto-negotiation */
8329 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
8332 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8333 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
8334 u32 tmp;
8336 tmp = tr32(SERDES_RX_CTRL);
8337 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
8338 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
8339 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
8340 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8343 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
8344 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
8345 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
8346 tp->link_config.speed = tp->link_config.orig_speed;
8347 tp->link_config.duplex = tp->link_config.orig_duplex;
8348 tp->link_config.autoneg = tp->link_config.orig_autoneg;
8351 err = tg3_setup_phy(tp, 0);
8352 if (err)
8353 return err;
8355 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8356 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8357 u32 tmp;
8359 /* Clear CRC stats. */
8360 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
8361 tg3_writephy(tp, MII_TG3_TEST1,
8362 tmp | MII_TG3_TEST1_CRC_EN);
8363 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
8368 __tg3_set_rx_mode(tp->dev);
8370 /* Initialize receive rules. */
8371 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
8372 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
8373 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
8374 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
8376 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
8377 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
8378 limit = 8;
8379 else
8380 limit = 16;
8381 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
8382 limit -= 4;
8383 switch (limit) {
8384 case 16:
8385 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
8386 case 15:
8387 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
8388 case 14:
8389 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
8390 case 13:
8391 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
8392 case 12:
8393 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
8394 case 11:
8395 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
8396 case 10:
8397 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
8398 case 9:
8399 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
8400 case 8:
8401 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
8402 case 7:
8403 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
8404 case 6:
8405 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
8406 case 5:
8407 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
8408 case 4:
8409 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
8410 case 3:
8411 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
8412 case 2:
8413 case 1:
8415 default:
8416 break;
8419 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
8420 /* Write our heartbeat update interval to APE. */
8421 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
8422 APE_HOST_HEARTBEAT_INT_DISABLE);
8424 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
8426 return 0;
8429 /* Called at device open time to get the chip ready for
8430 * packet processing. Invoked with tp->lock held.
8432 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
8434 tg3_switch_clocks(tp);
8436 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
8438 return tg3_reset_hw(tp, reset_phy);
8441 #define TG3_STAT_ADD32(PSTAT, REG) \
8442 do { u32 __val = tr32(REG); \
8443 (PSTAT)->low += __val; \
8444 if ((PSTAT)->low < __val) \
8445 (PSTAT)->high += 1; \
8446 } while (0)
8448 static void tg3_periodic_fetch_stats(struct tg3 *tp)
8450 struct tg3_hw_stats *sp = tp->hw_stats;
8452 if (!netif_carrier_ok(tp->dev))
8453 return;
8455 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
8456 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
8457 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
8458 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
8459 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
8460 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
8461 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
8462 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
8463 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
8464 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
8465 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
8466 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
8467 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
8469 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
8470 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
8471 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
8472 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
8473 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
8474 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
8475 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
8476 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
8477 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
8478 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
8479 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
8480 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
8481 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
8482 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
8484 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
8485 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
8486 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
8489 static void tg3_timer(unsigned long __opaque)
8491 struct tg3 *tp = (struct tg3 *) __opaque;
8493 if (tp->irq_sync)
8494 goto restart_timer;
8496 spin_lock(&tp->lock);
8498 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
8499 /* All of this garbage is because when using non-tagged
8500 * IRQ status the mailbox/status_block protocol the chip
8501 * uses with the cpu is race prone.
8503 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
8504 tw32(GRC_LOCAL_CTRL,
8505 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
8506 } else {
8507 tw32(HOSTCC_MODE, tp->coalesce_mode |
8508 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
8511 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
8512 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
8513 spin_unlock(&tp->lock);
8514 schedule_work(&tp->reset_task);
8515 return;
8519 /* This part only runs once per second. */
8520 if (!--tp->timer_counter) {
8521 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8522 tg3_periodic_fetch_stats(tp);
8524 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
8525 u32 mac_stat;
8526 int phy_event;
8528 mac_stat = tr32(MAC_STATUS);
8530 phy_event = 0;
8531 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
8532 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
8533 phy_event = 1;
8534 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
8535 phy_event = 1;
8537 if (phy_event)
8538 tg3_setup_phy(tp, 0);
8539 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
8540 u32 mac_stat = tr32(MAC_STATUS);
8541 int need_setup = 0;
8543 if (netif_carrier_ok(tp->dev) &&
8544 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
8545 need_setup = 1;
8547 if (!netif_carrier_ok(tp->dev) &&
8548 (mac_stat & (MAC_STATUS_PCS_SYNCED |
8549 MAC_STATUS_SIGNAL_DET))) {
8550 need_setup = 1;
8552 if (need_setup) {
8553 if (!tp->serdes_counter) {
8554 tw32_f(MAC_MODE,
8555 (tp->mac_mode &
8556 ~MAC_MODE_PORT_MODE_MASK));
8557 udelay(40);
8558 tw32_f(MAC_MODE, tp->mac_mode);
8559 udelay(40);
8561 tg3_setup_phy(tp, 0);
8563 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8564 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
8565 tg3_serdes_parallel_detect(tp);
8568 tp->timer_counter = tp->timer_multiplier;
8571 /* Heartbeat is only sent once every 2 seconds.
8573 * The heartbeat is to tell the ASF firmware that the host
8574 * driver is still alive. In the event that the OS crashes,
8575 * ASF needs to reset the hardware to free up the FIFO space
8576 * that may be filled with rx packets destined for the host.
8577 * If the FIFO is full, ASF will no longer function properly.
8579 * Unintended resets have been reported on real time kernels
8580 * where the timer doesn't run on time. Netpoll will also have
8581 * same problem.
8583 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
8584 * to check the ring condition when the heartbeat is expiring
8585 * before doing the reset. This will prevent most unintended
8586 * resets.
8588 if (!--tp->asf_counter) {
8589 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
8590 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
8591 tg3_wait_for_event_ack(tp);
8593 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
8594 FWCMD_NICDRV_ALIVE3);
8595 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
8596 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
8597 TG3_FW_UPDATE_TIMEOUT_SEC);
8599 tg3_generate_fw_event(tp);
8601 tp->asf_counter = tp->asf_multiplier;
8604 spin_unlock(&tp->lock);
8606 restart_timer:
8607 tp->timer.expires = jiffies + tp->timer_offset;
8608 add_timer(&tp->timer);
8611 static int tg3_request_irq(struct tg3 *tp, int irq_num)
8613 irq_handler_t fn;
8614 unsigned long flags;
8615 char *name;
8616 struct tg3_napi *tnapi = &tp->napi[irq_num];
8618 if (tp->irq_cnt == 1)
8619 name = tp->dev->name;
8620 else {
8621 name = &tnapi->irq_lbl[0];
8622 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
8623 name[IFNAMSIZ-1] = 0;
8626 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) {
8627 fn = tg3_msi;
8628 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
8629 fn = tg3_msi_1shot;
8630 flags = IRQF_SAMPLE_RANDOM;
8631 } else {
8632 fn = tg3_interrupt;
8633 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8634 fn = tg3_interrupt_tagged;
8635 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
8638 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
8641 static int tg3_test_interrupt(struct tg3 *tp)
8643 struct tg3_napi *tnapi = &tp->napi[0];
8644 struct net_device *dev = tp->dev;
8645 int err, i, intr_ok = 0;
8646 u32 val;
8648 if (!netif_running(dev))
8649 return -ENODEV;
8651 tg3_disable_ints(tp);
8653 free_irq(tnapi->irq_vec, tnapi);
8656 * Turn off MSI one shot mode. Otherwise this test has no
8657 * observable way to know whether the interrupt was delivered.
8659 if ((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) &&
8660 (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
8661 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
8662 tw32(MSGINT_MODE, val);
8665 err = request_irq(tnapi->irq_vec, tg3_test_isr,
8666 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
8667 if (err)
8668 return err;
8670 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
8671 tg3_enable_ints(tp);
8673 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8674 tnapi->coal_now);
8676 for (i = 0; i < 5; i++) {
8677 u32 int_mbox, misc_host_ctrl;
8679 int_mbox = tr32_mailbox(tnapi->int_mbox);
8680 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
8682 if ((int_mbox != 0) ||
8683 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
8684 intr_ok = 1;
8685 break;
8688 msleep(10);
8691 tg3_disable_ints(tp);
8693 free_irq(tnapi->irq_vec, tnapi);
8695 err = tg3_request_irq(tp, 0);
8697 if (err)
8698 return err;
8700 if (intr_ok) {
8701 /* Reenable MSI one shot mode. */
8702 if ((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) &&
8703 (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
8704 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
8705 tw32(MSGINT_MODE, val);
8707 return 0;
8710 return -EIO;
8713 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
8714 * successfully restored
8716 static int tg3_test_msi(struct tg3 *tp)
8718 int err;
8719 u16 pci_cmd;
8721 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
8722 return 0;
8724 /* Turn off SERR reporting in case MSI terminates with Master
8725 * Abort.
8727 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
8728 pci_write_config_word(tp->pdev, PCI_COMMAND,
8729 pci_cmd & ~PCI_COMMAND_SERR);
8731 err = tg3_test_interrupt(tp);
8733 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
8735 if (!err)
8736 return 0;
8738 /* other failures */
8739 if (err != -EIO)
8740 return err;
8742 /* MSI test failed, go back to INTx mode */
8743 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
8744 "to INTx mode. Please report this failure to the PCI "
8745 "maintainer and include system chipset information\n");
8747 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
8749 pci_disable_msi(tp->pdev);
8751 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8752 tp->napi[0].irq_vec = tp->pdev->irq;
8754 err = tg3_request_irq(tp, 0);
8755 if (err)
8756 return err;
8758 /* Need to reset the chip because the MSI cycle may have terminated
8759 * with Master Abort.
8761 tg3_full_lock(tp, 1);
8763 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8764 err = tg3_init_hw(tp, 1);
8766 tg3_full_unlock(tp);
8768 if (err)
8769 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
8771 return err;
8774 static int tg3_request_firmware(struct tg3 *tp)
8776 const __be32 *fw_data;
8778 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
8779 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
8780 tp->fw_needed);
8781 return -ENOENT;
8784 fw_data = (void *)tp->fw->data;
8786 /* Firmware blob starts with version numbers, followed by
8787 * start address and _full_ length including BSS sections
8788 * (which must be longer than the actual data, of course
8791 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
8792 if (tp->fw_len < (tp->fw->size - 12)) {
8793 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
8794 tp->fw_len, tp->fw_needed);
8795 release_firmware(tp->fw);
8796 tp->fw = NULL;
8797 return -EINVAL;
8800 /* We no longer need firmware; we have it. */
8801 tp->fw_needed = NULL;
8802 return 0;
8805 static bool tg3_enable_msix(struct tg3 *tp)
8807 int i, rc, cpus = num_online_cpus();
8808 struct msix_entry msix_ent[tp->irq_max];
8810 if (cpus == 1)
8811 /* Just fallback to the simpler MSI mode. */
8812 return false;
8815 * We want as many rx rings enabled as there are cpus.
8816 * The first MSIX vector only deals with link interrupts, etc,
8817 * so we add one to the number of vectors we are requesting.
8819 tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
8821 for (i = 0; i < tp->irq_max; i++) {
8822 msix_ent[i].entry = i;
8823 msix_ent[i].vector = 0;
8826 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
8827 if (rc < 0) {
8828 return false;
8829 } else if (rc != 0) {
8830 if (pci_enable_msix(tp->pdev, msix_ent, rc))
8831 return false;
8832 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
8833 tp->irq_cnt, rc);
8834 tp->irq_cnt = rc;
8837 for (i = 0; i < tp->irq_max; i++)
8838 tp->napi[i].irq_vec = msix_ent[i].vector;
8840 tp->dev->real_num_tx_queues = 1;
8841 if (tp->irq_cnt > 1)
8842 tp->tg3_flags3 |= TG3_FLG3_ENABLE_RSS;
8844 return true;
8847 static void tg3_ints_init(struct tg3 *tp)
8849 if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSI_OR_MSIX) &&
8850 !(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
8851 /* All MSI supporting chips should support tagged
8852 * status. Assert that this is the case.
8854 netdev_warn(tp->dev,
8855 "MSI without TAGGED_STATUS? Not using MSI\n");
8856 goto defcfg;
8859 if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) && tg3_enable_msix(tp))
8860 tp->tg3_flags2 |= TG3_FLG2_USING_MSIX;
8861 else if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) &&
8862 pci_enable_msi(tp->pdev) == 0)
8863 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
8865 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) {
8866 u32 msi_mode = tr32(MSGINT_MODE);
8867 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
8868 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
8869 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
8871 defcfg:
8872 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSIX)) {
8873 tp->irq_cnt = 1;
8874 tp->napi[0].irq_vec = tp->pdev->irq;
8875 tp->dev->real_num_tx_queues = 1;
8879 static void tg3_ints_fini(struct tg3 *tp)
8881 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
8882 pci_disable_msix(tp->pdev);
8883 else if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
8884 pci_disable_msi(tp->pdev);
8885 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI_OR_MSIX;
8886 tp->tg3_flags3 &= ~(TG3_FLG3_ENABLE_RSS | TG3_FLG3_ENABLE_TSS);
8889 static int tg3_open(struct net_device *dev)
8891 struct tg3 *tp = netdev_priv(dev);
8892 int i, err;
8894 if (tp->fw_needed) {
8895 err = tg3_request_firmware(tp);
8896 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8897 if (err)
8898 return err;
8899 } else if (err) {
8900 netdev_warn(tp->dev, "TSO capability disabled\n");
8901 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
8902 } else if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8903 netdev_notice(tp->dev, "TSO capability restored\n");
8904 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
8908 netif_carrier_off(tp->dev);
8910 err = tg3_set_power_state(tp, PCI_D0);
8911 if (err)
8912 return err;
8914 tg3_full_lock(tp, 0);
8916 tg3_disable_ints(tp);
8917 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8919 tg3_full_unlock(tp);
8922 * Setup interrupts first so we know how
8923 * many NAPI resources to allocate
8925 tg3_ints_init(tp);
8927 /* The placement of this call is tied
8928 * to the setup and use of Host TX descriptors.
8930 err = tg3_alloc_consistent(tp);
8931 if (err)
8932 goto err_out1;
8934 tg3_napi_enable(tp);
8936 for (i = 0; i < tp->irq_cnt; i++) {
8937 struct tg3_napi *tnapi = &tp->napi[i];
8938 err = tg3_request_irq(tp, i);
8939 if (err) {
8940 for (i--; i >= 0; i--)
8941 free_irq(tnapi->irq_vec, tnapi);
8942 break;
8946 if (err)
8947 goto err_out2;
8949 tg3_full_lock(tp, 0);
8951 err = tg3_init_hw(tp, 1);
8952 if (err) {
8953 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8954 tg3_free_rings(tp);
8955 } else {
8956 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8957 tp->timer_offset = HZ;
8958 else
8959 tp->timer_offset = HZ / 10;
8961 BUG_ON(tp->timer_offset > HZ);
8962 tp->timer_counter = tp->timer_multiplier =
8963 (HZ / tp->timer_offset);
8964 tp->asf_counter = tp->asf_multiplier =
8965 ((HZ / tp->timer_offset) * 2);
8967 init_timer(&tp->timer);
8968 tp->timer.expires = jiffies + tp->timer_offset;
8969 tp->timer.data = (unsigned long) tp;
8970 tp->timer.function = tg3_timer;
8973 tg3_full_unlock(tp);
8975 if (err)
8976 goto err_out3;
8978 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8979 err = tg3_test_msi(tp);
8981 if (err) {
8982 tg3_full_lock(tp, 0);
8983 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8984 tg3_free_rings(tp);
8985 tg3_full_unlock(tp);
8987 goto err_out2;
8990 if (!(tp->tg3_flags3 & TG3_FLG3_5717_PLUS) &&
8991 (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
8992 u32 val = tr32(PCIE_TRANSACTION_CFG);
8994 tw32(PCIE_TRANSACTION_CFG,
8995 val | PCIE_TRANS_CFG_1SHOT_MSI);
8999 tg3_phy_start(tp);
9001 tg3_full_lock(tp, 0);
9003 add_timer(&tp->timer);
9004 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
9005 tg3_enable_ints(tp);
9007 tg3_full_unlock(tp);
9009 netif_tx_start_all_queues(dev);
9011 return 0;
9013 err_out3:
9014 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9015 struct tg3_napi *tnapi = &tp->napi[i];
9016 free_irq(tnapi->irq_vec, tnapi);
9019 err_out2:
9020 tg3_napi_disable(tp);
9021 tg3_free_consistent(tp);
9023 err_out1:
9024 tg3_ints_fini(tp);
9025 return err;
9028 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
9029 struct rtnl_link_stats64 *);
9030 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
9032 static int tg3_close(struct net_device *dev)
9034 int i;
9035 struct tg3 *tp = netdev_priv(dev);
9037 tg3_napi_disable(tp);
9038 cancel_work_sync(&tp->reset_task);
9040 netif_tx_stop_all_queues(dev);
9042 del_timer_sync(&tp->timer);
9044 tg3_phy_stop(tp);
9046 tg3_full_lock(tp, 1);
9048 tg3_disable_ints(tp);
9050 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9051 tg3_free_rings(tp);
9052 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
9054 tg3_full_unlock(tp);
9056 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9057 struct tg3_napi *tnapi = &tp->napi[i];
9058 free_irq(tnapi->irq_vec, tnapi);
9061 tg3_ints_fini(tp);
9063 tg3_get_stats64(tp->dev, &tp->net_stats_prev);
9065 memcpy(&tp->estats_prev, tg3_get_estats(tp),
9066 sizeof(tp->estats_prev));
9068 tg3_free_consistent(tp);
9070 tg3_set_power_state(tp, PCI_D3hot);
9072 netif_carrier_off(tp->dev);
9074 return 0;
9077 static inline u64 get_stat64(tg3_stat64_t *val)
9079 return ((u64)val->high << 32) | ((u64)val->low);
9082 static u64 calc_crc_errors(struct tg3 *tp)
9084 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9086 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9087 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9088 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9089 u32 val;
9091 spin_lock_bh(&tp->lock);
9092 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9093 tg3_writephy(tp, MII_TG3_TEST1,
9094 val | MII_TG3_TEST1_CRC_EN);
9095 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
9096 } else
9097 val = 0;
9098 spin_unlock_bh(&tp->lock);
9100 tp->phy_crc_errors += val;
9102 return tp->phy_crc_errors;
9105 return get_stat64(&hw_stats->rx_fcs_errors);
9108 #define ESTAT_ADD(member) \
9109 estats->member = old_estats->member + \
9110 get_stat64(&hw_stats->member)
9112 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9114 struct tg3_ethtool_stats *estats = &tp->estats;
9115 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9116 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9118 if (!hw_stats)
9119 return old_estats;
9121 ESTAT_ADD(rx_octets);
9122 ESTAT_ADD(rx_fragments);
9123 ESTAT_ADD(rx_ucast_packets);
9124 ESTAT_ADD(rx_mcast_packets);
9125 ESTAT_ADD(rx_bcast_packets);
9126 ESTAT_ADD(rx_fcs_errors);
9127 ESTAT_ADD(rx_align_errors);
9128 ESTAT_ADD(rx_xon_pause_rcvd);
9129 ESTAT_ADD(rx_xoff_pause_rcvd);
9130 ESTAT_ADD(rx_mac_ctrl_rcvd);
9131 ESTAT_ADD(rx_xoff_entered);
9132 ESTAT_ADD(rx_frame_too_long_errors);
9133 ESTAT_ADD(rx_jabbers);
9134 ESTAT_ADD(rx_undersize_packets);
9135 ESTAT_ADD(rx_in_length_errors);
9136 ESTAT_ADD(rx_out_length_errors);
9137 ESTAT_ADD(rx_64_or_less_octet_packets);
9138 ESTAT_ADD(rx_65_to_127_octet_packets);
9139 ESTAT_ADD(rx_128_to_255_octet_packets);
9140 ESTAT_ADD(rx_256_to_511_octet_packets);
9141 ESTAT_ADD(rx_512_to_1023_octet_packets);
9142 ESTAT_ADD(rx_1024_to_1522_octet_packets);
9143 ESTAT_ADD(rx_1523_to_2047_octet_packets);
9144 ESTAT_ADD(rx_2048_to_4095_octet_packets);
9145 ESTAT_ADD(rx_4096_to_8191_octet_packets);
9146 ESTAT_ADD(rx_8192_to_9022_octet_packets);
9148 ESTAT_ADD(tx_octets);
9149 ESTAT_ADD(tx_collisions);
9150 ESTAT_ADD(tx_xon_sent);
9151 ESTAT_ADD(tx_xoff_sent);
9152 ESTAT_ADD(tx_flow_control);
9153 ESTAT_ADD(tx_mac_errors);
9154 ESTAT_ADD(tx_single_collisions);
9155 ESTAT_ADD(tx_mult_collisions);
9156 ESTAT_ADD(tx_deferred);
9157 ESTAT_ADD(tx_excessive_collisions);
9158 ESTAT_ADD(tx_late_collisions);
9159 ESTAT_ADD(tx_collide_2times);
9160 ESTAT_ADD(tx_collide_3times);
9161 ESTAT_ADD(tx_collide_4times);
9162 ESTAT_ADD(tx_collide_5times);
9163 ESTAT_ADD(tx_collide_6times);
9164 ESTAT_ADD(tx_collide_7times);
9165 ESTAT_ADD(tx_collide_8times);
9166 ESTAT_ADD(tx_collide_9times);
9167 ESTAT_ADD(tx_collide_10times);
9168 ESTAT_ADD(tx_collide_11times);
9169 ESTAT_ADD(tx_collide_12times);
9170 ESTAT_ADD(tx_collide_13times);
9171 ESTAT_ADD(tx_collide_14times);
9172 ESTAT_ADD(tx_collide_15times);
9173 ESTAT_ADD(tx_ucast_packets);
9174 ESTAT_ADD(tx_mcast_packets);
9175 ESTAT_ADD(tx_bcast_packets);
9176 ESTAT_ADD(tx_carrier_sense_errors);
9177 ESTAT_ADD(tx_discards);
9178 ESTAT_ADD(tx_errors);
9180 ESTAT_ADD(dma_writeq_full);
9181 ESTAT_ADD(dma_write_prioq_full);
9182 ESTAT_ADD(rxbds_empty);
9183 ESTAT_ADD(rx_discards);
9184 ESTAT_ADD(rx_errors);
9185 ESTAT_ADD(rx_threshold_hit);
9187 ESTAT_ADD(dma_readq_full);
9188 ESTAT_ADD(dma_read_prioq_full);
9189 ESTAT_ADD(tx_comp_queue_full);
9191 ESTAT_ADD(ring_set_send_prod_index);
9192 ESTAT_ADD(ring_status_update);
9193 ESTAT_ADD(nic_irqs);
9194 ESTAT_ADD(nic_avoided_irqs);
9195 ESTAT_ADD(nic_tx_threshold_hit);
9197 return estats;
9200 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9201 struct rtnl_link_stats64 *stats)
9203 struct tg3 *tp = netdev_priv(dev);
9204 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
9205 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9207 if (!hw_stats)
9208 return old_stats;
9210 stats->rx_packets = old_stats->rx_packets +
9211 get_stat64(&hw_stats->rx_ucast_packets) +
9212 get_stat64(&hw_stats->rx_mcast_packets) +
9213 get_stat64(&hw_stats->rx_bcast_packets);
9215 stats->tx_packets = old_stats->tx_packets +
9216 get_stat64(&hw_stats->tx_ucast_packets) +
9217 get_stat64(&hw_stats->tx_mcast_packets) +
9218 get_stat64(&hw_stats->tx_bcast_packets);
9220 stats->rx_bytes = old_stats->rx_bytes +
9221 get_stat64(&hw_stats->rx_octets);
9222 stats->tx_bytes = old_stats->tx_bytes +
9223 get_stat64(&hw_stats->tx_octets);
9225 stats->rx_errors = old_stats->rx_errors +
9226 get_stat64(&hw_stats->rx_errors);
9227 stats->tx_errors = old_stats->tx_errors +
9228 get_stat64(&hw_stats->tx_errors) +
9229 get_stat64(&hw_stats->tx_mac_errors) +
9230 get_stat64(&hw_stats->tx_carrier_sense_errors) +
9231 get_stat64(&hw_stats->tx_discards);
9233 stats->multicast = old_stats->multicast +
9234 get_stat64(&hw_stats->rx_mcast_packets);
9235 stats->collisions = old_stats->collisions +
9236 get_stat64(&hw_stats->tx_collisions);
9238 stats->rx_length_errors = old_stats->rx_length_errors +
9239 get_stat64(&hw_stats->rx_frame_too_long_errors) +
9240 get_stat64(&hw_stats->rx_undersize_packets);
9242 stats->rx_over_errors = old_stats->rx_over_errors +
9243 get_stat64(&hw_stats->rxbds_empty);
9244 stats->rx_frame_errors = old_stats->rx_frame_errors +
9245 get_stat64(&hw_stats->rx_align_errors);
9246 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
9247 get_stat64(&hw_stats->tx_discards);
9248 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
9249 get_stat64(&hw_stats->tx_carrier_sense_errors);
9251 stats->rx_crc_errors = old_stats->rx_crc_errors +
9252 calc_crc_errors(tp);
9254 stats->rx_missed_errors = old_stats->rx_missed_errors +
9255 get_stat64(&hw_stats->rx_discards);
9257 return stats;
9260 static inline u32 calc_crc(unsigned char *buf, int len)
9262 u32 reg;
9263 u32 tmp;
9264 int j, k;
9266 reg = 0xffffffff;
9268 for (j = 0; j < len; j++) {
9269 reg ^= buf[j];
9271 for (k = 0; k < 8; k++) {
9272 tmp = reg & 0x01;
9274 reg >>= 1;
9276 if (tmp)
9277 reg ^= 0xedb88320;
9281 return ~reg;
9284 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9286 /* accept or reject all multicast frames */
9287 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9288 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9289 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9290 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9293 static void __tg3_set_rx_mode(struct net_device *dev)
9295 struct tg3 *tp = netdev_priv(dev);
9296 u32 rx_mode;
9298 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9299 RX_MODE_KEEP_VLAN_TAG);
9301 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9302 * flag clear.
9304 #if TG3_VLAN_TAG_USED
9305 if (!tp->vlgrp &&
9306 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
9307 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9308 #else
9309 /* By definition, VLAN is disabled always in this
9310 * case.
9312 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
9313 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9314 #endif
9316 if (dev->flags & IFF_PROMISC) {
9317 /* Promiscuous mode. */
9318 rx_mode |= RX_MODE_PROMISC;
9319 } else if (dev->flags & IFF_ALLMULTI) {
9320 /* Accept all multicast. */
9321 tg3_set_multi(tp, 1);
9322 } else if (netdev_mc_empty(dev)) {
9323 /* Reject all multicast. */
9324 tg3_set_multi(tp, 0);
9325 } else {
9326 /* Accept one or more multicast(s). */
9327 struct netdev_hw_addr *ha;
9328 u32 mc_filter[4] = { 0, };
9329 u32 regidx;
9330 u32 bit;
9331 u32 crc;
9333 netdev_for_each_mc_addr(ha, dev) {
9334 crc = calc_crc(ha->addr, ETH_ALEN);
9335 bit = ~crc & 0x7f;
9336 regidx = (bit & 0x60) >> 5;
9337 bit &= 0x1f;
9338 mc_filter[regidx] |= (1 << bit);
9341 tw32(MAC_HASH_REG_0, mc_filter[0]);
9342 tw32(MAC_HASH_REG_1, mc_filter[1]);
9343 tw32(MAC_HASH_REG_2, mc_filter[2]);
9344 tw32(MAC_HASH_REG_3, mc_filter[3]);
9347 if (rx_mode != tp->rx_mode) {
9348 tp->rx_mode = rx_mode;
9349 tw32_f(MAC_RX_MODE, rx_mode);
9350 udelay(10);
9354 static void tg3_set_rx_mode(struct net_device *dev)
9356 struct tg3 *tp = netdev_priv(dev);
9358 if (!netif_running(dev))
9359 return;
9361 tg3_full_lock(tp, 0);
9362 __tg3_set_rx_mode(dev);
9363 tg3_full_unlock(tp);
9366 #define TG3_REGDUMP_LEN (32 * 1024)
9368 static int tg3_get_regs_len(struct net_device *dev)
9370 return TG3_REGDUMP_LEN;
9373 static void tg3_get_regs(struct net_device *dev,
9374 struct ethtool_regs *regs, void *_p)
9376 u32 *p = _p;
9377 struct tg3 *tp = netdev_priv(dev);
9378 u8 *orig_p = _p;
9379 int i;
9381 regs->version = 0;
9383 memset(p, 0, TG3_REGDUMP_LEN);
9385 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9386 return;
9388 tg3_full_lock(tp, 0);
9390 #define __GET_REG32(reg) (*(p)++ = tr32(reg))
9391 #define GET_REG32_LOOP(base, len) \
9392 do { p = (u32 *)(orig_p + (base)); \
9393 for (i = 0; i < len; i += 4) \
9394 __GET_REG32((base) + i); \
9395 } while (0)
9396 #define GET_REG32_1(reg) \
9397 do { p = (u32 *)(orig_p + (reg)); \
9398 __GET_REG32((reg)); \
9399 } while (0)
9401 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
9402 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
9403 GET_REG32_LOOP(MAC_MODE, 0x4f0);
9404 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
9405 GET_REG32_1(SNDDATAC_MODE);
9406 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
9407 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
9408 GET_REG32_1(SNDBDC_MODE);
9409 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
9410 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
9411 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
9412 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
9413 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
9414 GET_REG32_1(RCVDCC_MODE);
9415 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
9416 GET_REG32_LOOP(RCVCC_MODE, 0x14);
9417 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
9418 GET_REG32_1(MBFREE_MODE);
9419 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
9420 GET_REG32_LOOP(MEMARB_MODE, 0x10);
9421 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
9422 GET_REG32_LOOP(RDMAC_MODE, 0x08);
9423 GET_REG32_LOOP(WDMAC_MODE, 0x08);
9424 GET_REG32_1(RX_CPU_MODE);
9425 GET_REG32_1(RX_CPU_STATE);
9426 GET_REG32_1(RX_CPU_PGMCTR);
9427 GET_REG32_1(RX_CPU_HWBKPT);
9428 GET_REG32_1(TX_CPU_MODE);
9429 GET_REG32_1(TX_CPU_STATE);
9430 GET_REG32_1(TX_CPU_PGMCTR);
9431 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
9432 GET_REG32_LOOP(FTQ_RESET, 0x120);
9433 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
9434 GET_REG32_1(DMAC_MODE);
9435 GET_REG32_LOOP(GRC_MODE, 0x4c);
9436 if (tp->tg3_flags & TG3_FLAG_NVRAM)
9437 GET_REG32_LOOP(NVRAM_CMD, 0x24);
9439 #undef __GET_REG32
9440 #undef GET_REG32_LOOP
9441 #undef GET_REG32_1
9443 tg3_full_unlock(tp);
9446 static int tg3_get_eeprom_len(struct net_device *dev)
9448 struct tg3 *tp = netdev_priv(dev);
9450 return tp->nvram_size;
9453 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9455 struct tg3 *tp = netdev_priv(dev);
9456 int ret;
9457 u8 *pd;
9458 u32 i, offset, len, b_offset, b_count;
9459 __be32 val;
9461 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM)
9462 return -EINVAL;
9464 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9465 return -EAGAIN;
9467 offset = eeprom->offset;
9468 len = eeprom->len;
9469 eeprom->len = 0;
9471 eeprom->magic = TG3_EEPROM_MAGIC;
9473 if (offset & 3) {
9474 /* adjustments to start on required 4 byte boundary */
9475 b_offset = offset & 3;
9476 b_count = 4 - b_offset;
9477 if (b_count > len) {
9478 /* i.e. offset=1 len=2 */
9479 b_count = len;
9481 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
9482 if (ret)
9483 return ret;
9484 memcpy(data, ((char *)&val) + b_offset, b_count);
9485 len -= b_count;
9486 offset += b_count;
9487 eeprom->len += b_count;
9490 /* read bytes upto the last 4 byte boundary */
9491 pd = &data[eeprom->len];
9492 for (i = 0; i < (len - (len & 3)); i += 4) {
9493 ret = tg3_nvram_read_be32(tp, offset + i, &val);
9494 if (ret) {
9495 eeprom->len += i;
9496 return ret;
9498 memcpy(pd + i, &val, 4);
9500 eeprom->len += i;
9502 if (len & 3) {
9503 /* read last bytes not ending on 4 byte boundary */
9504 pd = &data[eeprom->len];
9505 b_count = len & 3;
9506 b_offset = offset + len - b_count;
9507 ret = tg3_nvram_read_be32(tp, b_offset, &val);
9508 if (ret)
9509 return ret;
9510 memcpy(pd, &val, b_count);
9511 eeprom->len += b_count;
9513 return 0;
9516 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
9518 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9520 struct tg3 *tp = netdev_priv(dev);
9521 int ret;
9522 u32 offset, len, b_offset, odd_len;
9523 u8 *buf;
9524 __be32 start, end;
9526 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9527 return -EAGAIN;
9529 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
9530 eeprom->magic != TG3_EEPROM_MAGIC)
9531 return -EINVAL;
9533 offset = eeprom->offset;
9534 len = eeprom->len;
9536 if ((b_offset = (offset & 3))) {
9537 /* adjustments to start on required 4 byte boundary */
9538 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
9539 if (ret)
9540 return ret;
9541 len += b_offset;
9542 offset &= ~3;
9543 if (len < 4)
9544 len = 4;
9547 odd_len = 0;
9548 if (len & 3) {
9549 /* adjustments to end on required 4 byte boundary */
9550 odd_len = 1;
9551 len = (len + 3) & ~3;
9552 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
9553 if (ret)
9554 return ret;
9557 buf = data;
9558 if (b_offset || odd_len) {
9559 buf = kmalloc(len, GFP_KERNEL);
9560 if (!buf)
9561 return -ENOMEM;
9562 if (b_offset)
9563 memcpy(buf, &start, 4);
9564 if (odd_len)
9565 memcpy(buf+len-4, &end, 4);
9566 memcpy(buf + b_offset, data, eeprom->len);
9569 ret = tg3_nvram_write_block(tp, offset, len, buf);
9571 if (buf != data)
9572 kfree(buf);
9574 return ret;
9577 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9579 struct tg3 *tp = netdev_priv(dev);
9581 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9582 struct phy_device *phydev;
9583 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
9584 return -EAGAIN;
9585 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9586 return phy_ethtool_gset(phydev, cmd);
9589 cmd->supported = (SUPPORTED_Autoneg);
9591 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
9592 cmd->supported |= (SUPPORTED_1000baseT_Half |
9593 SUPPORTED_1000baseT_Full);
9595 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
9596 cmd->supported |= (SUPPORTED_100baseT_Half |
9597 SUPPORTED_100baseT_Full |
9598 SUPPORTED_10baseT_Half |
9599 SUPPORTED_10baseT_Full |
9600 SUPPORTED_TP);
9601 cmd->port = PORT_TP;
9602 } else {
9603 cmd->supported |= SUPPORTED_FIBRE;
9604 cmd->port = PORT_FIBRE;
9607 cmd->advertising = tp->link_config.advertising;
9608 if (netif_running(dev)) {
9609 cmd->speed = tp->link_config.active_speed;
9610 cmd->duplex = tp->link_config.active_duplex;
9612 cmd->phy_address = tp->phy_addr;
9613 cmd->transceiver = XCVR_INTERNAL;
9614 cmd->autoneg = tp->link_config.autoneg;
9615 cmd->maxtxpkt = 0;
9616 cmd->maxrxpkt = 0;
9617 return 0;
9620 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9622 struct tg3 *tp = netdev_priv(dev);
9624 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9625 struct phy_device *phydev;
9626 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
9627 return -EAGAIN;
9628 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9629 return phy_ethtool_sset(phydev, cmd);
9632 if (cmd->autoneg != AUTONEG_ENABLE &&
9633 cmd->autoneg != AUTONEG_DISABLE)
9634 return -EINVAL;
9636 if (cmd->autoneg == AUTONEG_DISABLE &&
9637 cmd->duplex != DUPLEX_FULL &&
9638 cmd->duplex != DUPLEX_HALF)
9639 return -EINVAL;
9641 if (cmd->autoneg == AUTONEG_ENABLE) {
9642 u32 mask = ADVERTISED_Autoneg |
9643 ADVERTISED_Pause |
9644 ADVERTISED_Asym_Pause;
9646 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
9647 mask |= ADVERTISED_1000baseT_Half |
9648 ADVERTISED_1000baseT_Full;
9650 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
9651 mask |= ADVERTISED_100baseT_Half |
9652 ADVERTISED_100baseT_Full |
9653 ADVERTISED_10baseT_Half |
9654 ADVERTISED_10baseT_Full |
9655 ADVERTISED_TP;
9656 else
9657 mask |= ADVERTISED_FIBRE;
9659 if (cmd->advertising & ~mask)
9660 return -EINVAL;
9662 mask &= (ADVERTISED_1000baseT_Half |
9663 ADVERTISED_1000baseT_Full |
9664 ADVERTISED_100baseT_Half |
9665 ADVERTISED_100baseT_Full |
9666 ADVERTISED_10baseT_Half |
9667 ADVERTISED_10baseT_Full);
9669 cmd->advertising &= mask;
9670 } else {
9671 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
9672 if (cmd->speed != SPEED_1000)
9673 return -EINVAL;
9675 if (cmd->duplex != DUPLEX_FULL)
9676 return -EINVAL;
9677 } else {
9678 if (cmd->speed != SPEED_100 &&
9679 cmd->speed != SPEED_10)
9680 return -EINVAL;
9684 tg3_full_lock(tp, 0);
9686 tp->link_config.autoneg = cmd->autoneg;
9687 if (cmd->autoneg == AUTONEG_ENABLE) {
9688 tp->link_config.advertising = (cmd->advertising |
9689 ADVERTISED_Autoneg);
9690 tp->link_config.speed = SPEED_INVALID;
9691 tp->link_config.duplex = DUPLEX_INVALID;
9692 } else {
9693 tp->link_config.advertising = 0;
9694 tp->link_config.speed = cmd->speed;
9695 tp->link_config.duplex = cmd->duplex;
9698 tp->link_config.orig_speed = tp->link_config.speed;
9699 tp->link_config.orig_duplex = tp->link_config.duplex;
9700 tp->link_config.orig_autoneg = tp->link_config.autoneg;
9702 if (netif_running(dev))
9703 tg3_setup_phy(tp, 1);
9705 tg3_full_unlock(tp);
9707 return 0;
9710 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
9712 struct tg3 *tp = netdev_priv(dev);
9714 strcpy(info->driver, DRV_MODULE_NAME);
9715 strcpy(info->version, DRV_MODULE_VERSION);
9716 strcpy(info->fw_version, tp->fw_ver);
9717 strcpy(info->bus_info, pci_name(tp->pdev));
9720 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9722 struct tg3 *tp = netdev_priv(dev);
9724 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
9725 device_can_wakeup(&tp->pdev->dev))
9726 wol->supported = WAKE_MAGIC;
9727 else
9728 wol->supported = 0;
9729 wol->wolopts = 0;
9730 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
9731 device_can_wakeup(&tp->pdev->dev))
9732 wol->wolopts = WAKE_MAGIC;
9733 memset(&wol->sopass, 0, sizeof(wol->sopass));
9736 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9738 struct tg3 *tp = netdev_priv(dev);
9739 struct device *dp = &tp->pdev->dev;
9741 if (wol->wolopts & ~WAKE_MAGIC)
9742 return -EINVAL;
9743 if ((wol->wolopts & WAKE_MAGIC) &&
9744 !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
9745 return -EINVAL;
9747 spin_lock_bh(&tp->lock);
9748 if (wol->wolopts & WAKE_MAGIC) {
9749 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
9750 device_set_wakeup_enable(dp, true);
9751 } else {
9752 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
9753 device_set_wakeup_enable(dp, false);
9755 spin_unlock_bh(&tp->lock);
9757 return 0;
9760 static u32 tg3_get_msglevel(struct net_device *dev)
9762 struct tg3 *tp = netdev_priv(dev);
9763 return tp->msg_enable;
9766 static void tg3_set_msglevel(struct net_device *dev, u32 value)
9768 struct tg3 *tp = netdev_priv(dev);
9769 tp->msg_enable = value;
9772 static int tg3_set_tso(struct net_device *dev, u32 value)
9774 struct tg3 *tp = netdev_priv(dev);
9776 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
9777 if (value)
9778 return -EINVAL;
9779 return 0;
9781 if ((dev->features & NETIF_F_IPV6_CSUM) &&
9782 ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) ||
9783 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3))) {
9784 if (value) {
9785 dev->features |= NETIF_F_TSO6;
9786 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
9787 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9788 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
9789 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
9790 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9791 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9792 dev->features |= NETIF_F_TSO_ECN;
9793 } else
9794 dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
9796 return ethtool_op_set_tso(dev, value);
9799 static int tg3_nway_reset(struct net_device *dev)
9801 struct tg3 *tp = netdev_priv(dev);
9802 int r;
9804 if (!netif_running(dev))
9805 return -EAGAIN;
9807 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
9808 return -EINVAL;
9810 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9811 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
9812 return -EAGAIN;
9813 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
9814 } else {
9815 u32 bmcr;
9817 spin_lock_bh(&tp->lock);
9818 r = -EINVAL;
9819 tg3_readphy(tp, MII_BMCR, &bmcr);
9820 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
9821 ((bmcr & BMCR_ANENABLE) ||
9822 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
9823 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
9824 BMCR_ANENABLE);
9825 r = 0;
9827 spin_unlock_bh(&tp->lock);
9830 return r;
9833 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9835 struct tg3 *tp = netdev_priv(dev);
9837 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
9838 ering->rx_mini_max_pending = 0;
9839 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9840 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
9841 else
9842 ering->rx_jumbo_max_pending = 0;
9844 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
9846 ering->rx_pending = tp->rx_pending;
9847 ering->rx_mini_pending = 0;
9848 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9849 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
9850 else
9851 ering->rx_jumbo_pending = 0;
9853 ering->tx_pending = tp->napi[0].tx_pending;
9856 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9858 struct tg3 *tp = netdev_priv(dev);
9859 int i, irq_sync = 0, err = 0;
9861 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
9862 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
9863 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
9864 (ering->tx_pending <= MAX_SKB_FRAGS) ||
9865 ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
9866 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
9867 return -EINVAL;
9869 if (netif_running(dev)) {
9870 tg3_phy_stop(tp);
9871 tg3_netif_stop(tp);
9872 irq_sync = 1;
9875 tg3_full_lock(tp, irq_sync);
9877 tp->rx_pending = ering->rx_pending;
9879 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
9880 tp->rx_pending > 63)
9881 tp->rx_pending = 63;
9882 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
9884 for (i = 0; i < tp->irq_max; i++)
9885 tp->napi[i].tx_pending = ering->tx_pending;
9887 if (netif_running(dev)) {
9888 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9889 err = tg3_restart_hw(tp, 1);
9890 if (!err)
9891 tg3_netif_start(tp);
9894 tg3_full_unlock(tp);
9896 if (irq_sync && !err)
9897 tg3_phy_start(tp);
9899 return err;
9902 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9904 struct tg3 *tp = netdev_priv(dev);
9906 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
9908 if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
9909 epause->rx_pause = 1;
9910 else
9911 epause->rx_pause = 0;
9913 if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
9914 epause->tx_pause = 1;
9915 else
9916 epause->tx_pause = 0;
9919 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9921 struct tg3 *tp = netdev_priv(dev);
9922 int err = 0;
9924 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9925 u32 newadv;
9926 struct phy_device *phydev;
9928 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9930 if (!(phydev->supported & SUPPORTED_Pause) ||
9931 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
9932 ((epause->rx_pause && !epause->tx_pause) ||
9933 (!epause->rx_pause && epause->tx_pause))))
9934 return -EINVAL;
9936 tp->link_config.flowctrl = 0;
9937 if (epause->rx_pause) {
9938 tp->link_config.flowctrl |= FLOW_CTRL_RX;
9940 if (epause->tx_pause) {
9941 tp->link_config.flowctrl |= FLOW_CTRL_TX;
9942 newadv = ADVERTISED_Pause;
9943 } else
9944 newadv = ADVERTISED_Pause |
9945 ADVERTISED_Asym_Pause;
9946 } else if (epause->tx_pause) {
9947 tp->link_config.flowctrl |= FLOW_CTRL_TX;
9948 newadv = ADVERTISED_Asym_Pause;
9949 } else
9950 newadv = 0;
9952 if (epause->autoneg)
9953 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
9954 else
9955 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
9957 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
9958 u32 oldadv = phydev->advertising &
9959 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
9960 if (oldadv != newadv) {
9961 phydev->advertising &=
9962 ~(ADVERTISED_Pause |
9963 ADVERTISED_Asym_Pause);
9964 phydev->advertising |= newadv;
9965 if (phydev->autoneg) {
9967 * Always renegotiate the link to
9968 * inform our link partner of our
9969 * flow control settings, even if the
9970 * flow control is forced. Let
9971 * tg3_adjust_link() do the final
9972 * flow control setup.
9974 return phy_start_aneg(phydev);
9978 if (!epause->autoneg)
9979 tg3_setup_flow_control(tp, 0, 0);
9980 } else {
9981 tp->link_config.orig_advertising &=
9982 ~(ADVERTISED_Pause |
9983 ADVERTISED_Asym_Pause);
9984 tp->link_config.orig_advertising |= newadv;
9986 } else {
9987 int irq_sync = 0;
9989 if (netif_running(dev)) {
9990 tg3_netif_stop(tp);
9991 irq_sync = 1;
9994 tg3_full_lock(tp, irq_sync);
9996 if (epause->autoneg)
9997 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
9998 else
9999 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
10000 if (epause->rx_pause)
10001 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10002 else
10003 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10004 if (epause->tx_pause)
10005 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10006 else
10007 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10009 if (netif_running(dev)) {
10010 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10011 err = tg3_restart_hw(tp, 1);
10012 if (!err)
10013 tg3_netif_start(tp);
10016 tg3_full_unlock(tp);
10019 return err;
10022 static u32 tg3_get_rx_csum(struct net_device *dev)
10024 struct tg3 *tp = netdev_priv(dev);
10025 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
10028 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
10030 struct tg3 *tp = netdev_priv(dev);
10032 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
10033 if (data != 0)
10034 return -EINVAL;
10035 return 0;
10038 spin_lock_bh(&tp->lock);
10039 if (data)
10040 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
10041 else
10042 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
10043 spin_unlock_bh(&tp->lock);
10045 return 0;
10048 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
10050 struct tg3 *tp = netdev_priv(dev);
10052 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
10053 if (data != 0)
10054 return -EINVAL;
10055 return 0;
10058 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
10059 ethtool_op_set_tx_ipv6_csum(dev, data);
10060 else
10061 ethtool_op_set_tx_csum(dev, data);
10063 return 0;
10066 static int tg3_get_sset_count(struct net_device *dev, int sset)
10068 switch (sset) {
10069 case ETH_SS_TEST:
10070 return TG3_NUM_TEST;
10071 case ETH_SS_STATS:
10072 return TG3_NUM_STATS;
10073 default:
10074 return -EOPNOTSUPP;
10078 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10080 switch (stringset) {
10081 case ETH_SS_STATS:
10082 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
10083 break;
10084 case ETH_SS_TEST:
10085 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
10086 break;
10087 default:
10088 WARN_ON(1); /* we need a WARN() */
10089 break;
10093 static int tg3_phys_id(struct net_device *dev, u32 data)
10095 struct tg3 *tp = netdev_priv(dev);
10096 int i;
10098 if (!netif_running(tp->dev))
10099 return -EAGAIN;
10101 if (data == 0)
10102 data = UINT_MAX / 2;
10104 for (i = 0; i < (data * 2); i++) {
10105 if ((i % 2) == 0)
10106 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10107 LED_CTRL_1000MBPS_ON |
10108 LED_CTRL_100MBPS_ON |
10109 LED_CTRL_10MBPS_ON |
10110 LED_CTRL_TRAFFIC_OVERRIDE |
10111 LED_CTRL_TRAFFIC_BLINK |
10112 LED_CTRL_TRAFFIC_LED);
10114 else
10115 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10116 LED_CTRL_TRAFFIC_OVERRIDE);
10118 if (msleep_interruptible(500))
10119 break;
10121 tw32(MAC_LED_CTRL, tp->led_ctrl);
10122 return 0;
10125 static void tg3_get_ethtool_stats(struct net_device *dev,
10126 struct ethtool_stats *estats, u64 *tmp_stats)
10128 struct tg3 *tp = netdev_priv(dev);
10129 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
10132 #define NVRAM_TEST_SIZE 0x100
10133 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
10134 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
10135 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
10136 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10137 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10139 static int tg3_test_nvram(struct tg3 *tp)
10141 u32 csum, magic;
10142 __be32 *buf;
10143 int i, j, k, err = 0, size;
10145 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM)
10146 return 0;
10148 if (tg3_nvram_read(tp, 0, &magic) != 0)
10149 return -EIO;
10151 if (magic == TG3_EEPROM_MAGIC)
10152 size = NVRAM_TEST_SIZE;
10153 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10154 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10155 TG3_EEPROM_SB_FORMAT_1) {
10156 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10157 case TG3_EEPROM_SB_REVISION_0:
10158 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10159 break;
10160 case TG3_EEPROM_SB_REVISION_2:
10161 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10162 break;
10163 case TG3_EEPROM_SB_REVISION_3:
10164 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10165 break;
10166 default:
10167 return 0;
10169 } else
10170 return 0;
10171 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10172 size = NVRAM_SELFBOOT_HW_SIZE;
10173 else
10174 return -EIO;
10176 buf = kmalloc(size, GFP_KERNEL);
10177 if (buf == NULL)
10178 return -ENOMEM;
10180 err = -EIO;
10181 for (i = 0, j = 0; i < size; i += 4, j++) {
10182 err = tg3_nvram_read_be32(tp, i, &buf[j]);
10183 if (err)
10184 break;
10186 if (i < size)
10187 goto out;
10189 /* Selfboot format */
10190 magic = be32_to_cpu(buf[0]);
10191 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
10192 TG3_EEPROM_MAGIC_FW) {
10193 u8 *buf8 = (u8 *) buf, csum8 = 0;
10195 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
10196 TG3_EEPROM_SB_REVISION_2) {
10197 /* For rev 2, the csum doesn't include the MBA. */
10198 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
10199 csum8 += buf8[i];
10200 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
10201 csum8 += buf8[i];
10202 } else {
10203 for (i = 0; i < size; i++)
10204 csum8 += buf8[i];
10207 if (csum8 == 0) {
10208 err = 0;
10209 goto out;
10212 err = -EIO;
10213 goto out;
10216 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
10217 TG3_EEPROM_MAGIC_HW) {
10218 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
10219 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
10220 u8 *buf8 = (u8 *) buf;
10222 /* Separate the parity bits and the data bytes. */
10223 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
10224 if ((i == 0) || (i == 8)) {
10225 int l;
10226 u8 msk;
10228 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10229 parity[k++] = buf8[i] & msk;
10230 i++;
10231 } else if (i == 16) {
10232 int l;
10233 u8 msk;
10235 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
10236 parity[k++] = buf8[i] & msk;
10237 i++;
10239 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
10240 parity[k++] = buf8[i] & msk;
10241 i++;
10243 data[j++] = buf8[i];
10246 err = -EIO;
10247 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
10248 u8 hw8 = hweight8(data[i]);
10250 if ((hw8 & 0x1) && parity[i])
10251 goto out;
10252 else if (!(hw8 & 0x1) && !parity[i])
10253 goto out;
10255 err = 0;
10256 goto out;
10259 /* Bootstrap checksum at offset 0x10 */
10260 csum = calc_crc((unsigned char *) buf, 0x10);
10261 if (csum != be32_to_cpu(buf[0x10/4]))
10262 goto out;
10264 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10265 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
10266 if (csum != be32_to_cpu(buf[0xfc/4]))
10267 goto out;
10269 err = 0;
10271 out:
10272 kfree(buf);
10273 return err;
10276 #define TG3_SERDES_TIMEOUT_SEC 2
10277 #define TG3_COPPER_TIMEOUT_SEC 6
10279 static int tg3_test_link(struct tg3 *tp)
10281 int i, max;
10283 if (!netif_running(tp->dev))
10284 return -ENODEV;
10286 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
10287 max = TG3_SERDES_TIMEOUT_SEC;
10288 else
10289 max = TG3_COPPER_TIMEOUT_SEC;
10291 for (i = 0; i < max; i++) {
10292 if (netif_carrier_ok(tp->dev))
10293 return 0;
10295 if (msleep_interruptible(1000))
10296 break;
10299 return -EIO;
10302 /* Only test the commonly used registers */
10303 static int tg3_test_registers(struct tg3 *tp)
10305 int i, is_5705, is_5750;
10306 u32 offset, read_mask, write_mask, val, save_val, read_val;
10307 static struct {
10308 u16 offset;
10309 u16 flags;
10310 #define TG3_FL_5705 0x1
10311 #define TG3_FL_NOT_5705 0x2
10312 #define TG3_FL_NOT_5788 0x4
10313 #define TG3_FL_NOT_5750 0x8
10314 u32 read_mask;
10315 u32 write_mask;
10316 } reg_tbl[] = {
10317 /* MAC Control Registers */
10318 { MAC_MODE, TG3_FL_NOT_5705,
10319 0x00000000, 0x00ef6f8c },
10320 { MAC_MODE, TG3_FL_5705,
10321 0x00000000, 0x01ef6b8c },
10322 { MAC_STATUS, TG3_FL_NOT_5705,
10323 0x03800107, 0x00000000 },
10324 { MAC_STATUS, TG3_FL_5705,
10325 0x03800100, 0x00000000 },
10326 { MAC_ADDR_0_HIGH, 0x0000,
10327 0x00000000, 0x0000ffff },
10328 { MAC_ADDR_0_LOW, 0x0000,
10329 0x00000000, 0xffffffff },
10330 { MAC_RX_MTU_SIZE, 0x0000,
10331 0x00000000, 0x0000ffff },
10332 { MAC_TX_MODE, 0x0000,
10333 0x00000000, 0x00000070 },
10334 { MAC_TX_LENGTHS, 0x0000,
10335 0x00000000, 0x00003fff },
10336 { MAC_RX_MODE, TG3_FL_NOT_5705,
10337 0x00000000, 0x000007fc },
10338 { MAC_RX_MODE, TG3_FL_5705,
10339 0x00000000, 0x000007dc },
10340 { MAC_HASH_REG_0, 0x0000,
10341 0x00000000, 0xffffffff },
10342 { MAC_HASH_REG_1, 0x0000,
10343 0x00000000, 0xffffffff },
10344 { MAC_HASH_REG_2, 0x0000,
10345 0x00000000, 0xffffffff },
10346 { MAC_HASH_REG_3, 0x0000,
10347 0x00000000, 0xffffffff },
10349 /* Receive Data and Receive BD Initiator Control Registers. */
10350 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
10351 0x00000000, 0xffffffff },
10352 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
10353 0x00000000, 0xffffffff },
10354 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
10355 0x00000000, 0x00000003 },
10356 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
10357 0x00000000, 0xffffffff },
10358 { RCVDBDI_STD_BD+0, 0x0000,
10359 0x00000000, 0xffffffff },
10360 { RCVDBDI_STD_BD+4, 0x0000,
10361 0x00000000, 0xffffffff },
10362 { RCVDBDI_STD_BD+8, 0x0000,
10363 0x00000000, 0xffff0002 },
10364 { RCVDBDI_STD_BD+0xc, 0x0000,
10365 0x00000000, 0xffffffff },
10367 /* Receive BD Initiator Control Registers. */
10368 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
10369 0x00000000, 0xffffffff },
10370 { RCVBDI_STD_THRESH, TG3_FL_5705,
10371 0x00000000, 0x000003ff },
10372 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
10373 0x00000000, 0xffffffff },
10375 /* Host Coalescing Control Registers. */
10376 { HOSTCC_MODE, TG3_FL_NOT_5705,
10377 0x00000000, 0x00000004 },
10378 { HOSTCC_MODE, TG3_FL_5705,
10379 0x00000000, 0x000000f6 },
10380 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
10381 0x00000000, 0xffffffff },
10382 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
10383 0x00000000, 0x000003ff },
10384 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
10385 0x00000000, 0xffffffff },
10386 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
10387 0x00000000, 0x000003ff },
10388 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
10389 0x00000000, 0xffffffff },
10390 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10391 0x00000000, 0x000000ff },
10392 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
10393 0x00000000, 0xffffffff },
10394 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10395 0x00000000, 0x000000ff },
10396 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
10397 0x00000000, 0xffffffff },
10398 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
10399 0x00000000, 0xffffffff },
10400 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10401 0x00000000, 0xffffffff },
10402 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10403 0x00000000, 0x000000ff },
10404 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10405 0x00000000, 0xffffffff },
10406 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10407 0x00000000, 0x000000ff },
10408 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
10409 0x00000000, 0xffffffff },
10410 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
10411 0x00000000, 0xffffffff },
10412 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
10413 0x00000000, 0xffffffff },
10414 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
10415 0x00000000, 0xffffffff },
10416 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
10417 0x00000000, 0xffffffff },
10418 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
10419 0xffffffff, 0x00000000 },
10420 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
10421 0xffffffff, 0x00000000 },
10423 /* Buffer Manager Control Registers. */
10424 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
10425 0x00000000, 0x007fff80 },
10426 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
10427 0x00000000, 0x007fffff },
10428 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
10429 0x00000000, 0x0000003f },
10430 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
10431 0x00000000, 0x000001ff },
10432 { BUFMGR_MB_HIGH_WATER, 0x0000,
10433 0x00000000, 0x000001ff },
10434 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
10435 0xffffffff, 0x00000000 },
10436 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
10437 0xffffffff, 0x00000000 },
10439 /* Mailbox Registers */
10440 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
10441 0x00000000, 0x000001ff },
10442 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
10443 0x00000000, 0x000001ff },
10444 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
10445 0x00000000, 0x000007ff },
10446 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
10447 0x00000000, 0x000001ff },
10449 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
10452 is_5705 = is_5750 = 0;
10453 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10454 is_5705 = 1;
10455 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10456 is_5750 = 1;
10459 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
10460 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
10461 continue;
10463 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
10464 continue;
10466 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
10467 (reg_tbl[i].flags & TG3_FL_NOT_5788))
10468 continue;
10470 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
10471 continue;
10473 offset = (u32) reg_tbl[i].offset;
10474 read_mask = reg_tbl[i].read_mask;
10475 write_mask = reg_tbl[i].write_mask;
10477 /* Save the original register content */
10478 save_val = tr32(offset);
10480 /* Determine the read-only value. */
10481 read_val = save_val & read_mask;
10483 /* Write zero to the register, then make sure the read-only bits
10484 * are not changed and the read/write bits are all zeros.
10486 tw32(offset, 0);
10488 val = tr32(offset);
10490 /* Test the read-only and read/write bits. */
10491 if (((val & read_mask) != read_val) || (val & write_mask))
10492 goto out;
10494 /* Write ones to all the bits defined by RdMask and WrMask, then
10495 * make sure the read-only bits are not changed and the
10496 * read/write bits are all ones.
10498 tw32(offset, read_mask | write_mask);
10500 val = tr32(offset);
10502 /* Test the read-only bits. */
10503 if ((val & read_mask) != read_val)
10504 goto out;
10506 /* Test the read/write bits. */
10507 if ((val & write_mask) != write_mask)
10508 goto out;
10510 tw32(offset, save_val);
10513 return 0;
10515 out:
10516 if (netif_msg_hw(tp))
10517 netdev_err(tp->dev,
10518 "Register test failed at offset %x\n", offset);
10519 tw32(offset, save_val);
10520 return -EIO;
10523 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
10525 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
10526 int i;
10527 u32 j;
10529 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
10530 for (j = 0; j < len; j += 4) {
10531 u32 val;
10533 tg3_write_mem(tp, offset + j, test_pattern[i]);
10534 tg3_read_mem(tp, offset + j, &val);
10535 if (val != test_pattern[i])
10536 return -EIO;
10539 return 0;
10542 static int tg3_test_memory(struct tg3 *tp)
10544 static struct mem_entry {
10545 u32 offset;
10546 u32 len;
10547 } mem_tbl_570x[] = {
10548 { 0x00000000, 0x00b50},
10549 { 0x00002000, 0x1c000},
10550 { 0xffffffff, 0x00000}
10551 }, mem_tbl_5705[] = {
10552 { 0x00000100, 0x0000c},
10553 { 0x00000200, 0x00008},
10554 { 0x00004000, 0x00800},
10555 { 0x00006000, 0x01000},
10556 { 0x00008000, 0x02000},
10557 { 0x00010000, 0x0e000},
10558 { 0xffffffff, 0x00000}
10559 }, mem_tbl_5755[] = {
10560 { 0x00000200, 0x00008},
10561 { 0x00004000, 0x00800},
10562 { 0x00006000, 0x00800},
10563 { 0x00008000, 0x02000},
10564 { 0x00010000, 0x0c000},
10565 { 0xffffffff, 0x00000}
10566 }, mem_tbl_5906[] = {
10567 { 0x00000200, 0x00008},
10568 { 0x00004000, 0x00400},
10569 { 0x00006000, 0x00400},
10570 { 0x00008000, 0x01000},
10571 { 0x00010000, 0x01000},
10572 { 0xffffffff, 0x00000}
10573 }, mem_tbl_5717[] = {
10574 { 0x00000200, 0x00008},
10575 { 0x00010000, 0x0a000},
10576 { 0x00020000, 0x13c00},
10577 { 0xffffffff, 0x00000}
10578 }, mem_tbl_57765[] = {
10579 { 0x00000200, 0x00008},
10580 { 0x00004000, 0x00800},
10581 { 0x00006000, 0x09800},
10582 { 0x00010000, 0x0a000},
10583 { 0xffffffff, 0x00000}
10585 struct mem_entry *mem_tbl;
10586 int err = 0;
10587 int i;
10589 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
10590 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
10591 mem_tbl = mem_tbl_5717;
10592 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
10593 mem_tbl = mem_tbl_57765;
10594 else if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
10595 mem_tbl = mem_tbl_5755;
10596 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10597 mem_tbl = mem_tbl_5906;
10598 else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
10599 mem_tbl = mem_tbl_5705;
10600 else
10601 mem_tbl = mem_tbl_570x;
10603 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
10604 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
10605 if (err)
10606 break;
10609 return err;
10612 #define TG3_MAC_LOOPBACK 0
10613 #define TG3_PHY_LOOPBACK 1
10615 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10617 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
10618 u32 desc_idx, coal_now;
10619 struct sk_buff *skb, *rx_skb;
10620 u8 *tx_data;
10621 dma_addr_t map;
10622 int num_pkts, tx_len, rx_len, i, err;
10623 struct tg3_rx_buffer_desc *desc;
10624 struct tg3_napi *tnapi, *rnapi;
10625 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
10627 tnapi = &tp->napi[0];
10628 rnapi = &tp->napi[0];
10629 if (tp->irq_cnt > 1) {
10630 rnapi = &tp->napi[1];
10631 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
10632 tnapi = &tp->napi[1];
10634 coal_now = tnapi->coal_now | rnapi->coal_now;
10636 if (loopback_mode == TG3_MAC_LOOPBACK) {
10637 /* HW errata - mac loopback fails in some cases on 5780.
10638 * Normal traffic and PHY loopback are not affected by
10639 * errata.
10641 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
10642 return 0;
10644 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
10645 MAC_MODE_PORT_INT_LPBACK;
10646 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10647 mac_mode |= MAC_MODE_LINK_POLARITY;
10648 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
10649 mac_mode |= MAC_MODE_PORT_MODE_MII;
10650 else
10651 mac_mode |= MAC_MODE_PORT_MODE_GMII;
10652 tw32(MAC_MODE, mac_mode);
10653 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
10654 u32 val;
10656 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
10657 tg3_phy_fet_toggle_apd(tp, false);
10658 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
10659 } else
10660 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
10662 tg3_phy_toggle_automdix(tp, 0);
10664 tg3_writephy(tp, MII_BMCR, val);
10665 udelay(40);
10667 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
10668 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
10669 tg3_writephy(tp, MII_TG3_FET_PTEST,
10670 MII_TG3_FET_PTEST_FRC_TX_LINK |
10671 MII_TG3_FET_PTEST_FRC_TX_LOCK);
10672 /* The write needs to be flushed for the AC131 */
10673 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
10674 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
10675 mac_mode |= MAC_MODE_PORT_MODE_MII;
10676 } else
10677 mac_mode |= MAC_MODE_PORT_MODE_GMII;
10679 /* reset to prevent losing 1st rx packet intermittently */
10680 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10681 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10682 udelay(10);
10683 tw32_f(MAC_RX_MODE, tp->rx_mode);
10685 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
10686 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
10687 if (masked_phy_id == TG3_PHY_ID_BCM5401)
10688 mac_mode &= ~MAC_MODE_LINK_POLARITY;
10689 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
10690 mac_mode |= MAC_MODE_LINK_POLARITY;
10691 tg3_writephy(tp, MII_TG3_EXT_CTRL,
10692 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
10694 tw32(MAC_MODE, mac_mode);
10695 } else {
10696 return -EINVAL;
10699 err = -EIO;
10701 tx_len = 1514;
10702 skb = netdev_alloc_skb(tp->dev, tx_len);
10703 if (!skb)
10704 return -ENOMEM;
10706 tx_data = skb_put(skb, tx_len);
10707 memcpy(tx_data, tp->dev->dev_addr, 6);
10708 memset(tx_data + 6, 0x0, 8);
10710 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
10712 for (i = 14; i < tx_len; i++)
10713 tx_data[i] = (u8) (i & 0xff);
10715 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
10716 if (pci_dma_mapping_error(tp->pdev, map)) {
10717 dev_kfree_skb(skb);
10718 return -EIO;
10721 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10722 rnapi->coal_now);
10724 udelay(10);
10726 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
10728 num_pkts = 0;
10730 tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len, 0, 1);
10732 tnapi->tx_prod++;
10733 num_pkts++;
10735 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
10736 tr32_mailbox(tnapi->prodmbox);
10738 udelay(10);
10740 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
10741 for (i = 0; i < 35; i++) {
10742 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10743 coal_now);
10745 udelay(10);
10747 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
10748 rx_idx = rnapi->hw_status->idx[0].rx_producer;
10749 if ((tx_idx == tnapi->tx_prod) &&
10750 (rx_idx == (rx_start_idx + num_pkts)))
10751 break;
10754 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
10755 dev_kfree_skb(skb);
10757 if (tx_idx != tnapi->tx_prod)
10758 goto out;
10760 if (rx_idx != rx_start_idx + num_pkts)
10761 goto out;
10763 desc = &rnapi->rx_rcb[rx_start_idx];
10764 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
10765 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
10766 if (opaque_key != RXD_OPAQUE_RING_STD)
10767 goto out;
10769 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
10770 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
10771 goto out;
10773 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
10774 if (rx_len != tx_len)
10775 goto out;
10777 rx_skb = tpr->rx_std_buffers[desc_idx].skb;
10779 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx], mapping);
10780 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
10782 for (i = 14; i < tx_len; i++) {
10783 if (*(rx_skb->data + i) != (u8) (i & 0xff))
10784 goto out;
10786 err = 0;
10788 /* tg3_free_rings will unmap and free the rx_skb */
10789 out:
10790 return err;
10793 #define TG3_MAC_LOOPBACK_FAILED 1
10794 #define TG3_PHY_LOOPBACK_FAILED 2
10795 #define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
10796 TG3_PHY_LOOPBACK_FAILED)
10798 static int tg3_test_loopback(struct tg3 *tp)
10800 int err = 0;
10801 u32 cpmuctrl = 0;
10803 if (!netif_running(tp->dev))
10804 return TG3_LOOPBACK_FAILED;
10806 err = tg3_reset_hw(tp, 1);
10807 if (err)
10808 return TG3_LOOPBACK_FAILED;
10810 /* Turn off gphy autopowerdown. */
10811 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
10812 tg3_phy_toggle_apd(tp, false);
10814 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
10815 int i;
10816 u32 status;
10818 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
10820 /* Wait for up to 40 microseconds to acquire lock. */
10821 for (i = 0; i < 4; i++) {
10822 status = tr32(TG3_CPMU_MUTEX_GNT);
10823 if (status == CPMU_MUTEX_GNT_DRIVER)
10824 break;
10825 udelay(10);
10828 if (status != CPMU_MUTEX_GNT_DRIVER)
10829 return TG3_LOOPBACK_FAILED;
10831 /* Turn off link-based power management. */
10832 cpmuctrl = tr32(TG3_CPMU_CTRL);
10833 tw32(TG3_CPMU_CTRL,
10834 cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
10835 CPMU_CTRL_LINK_AWARE_MODE));
10838 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
10839 err |= TG3_MAC_LOOPBACK_FAILED;
10841 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
10842 tw32(TG3_CPMU_CTRL, cpmuctrl);
10844 /* Release the mutex */
10845 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
10848 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10849 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
10850 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
10851 err |= TG3_PHY_LOOPBACK_FAILED;
10854 /* Re-enable gphy autopowerdown. */
10855 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
10856 tg3_phy_toggle_apd(tp, true);
10858 return err;
10861 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
10862 u64 *data)
10864 struct tg3 *tp = netdev_priv(dev);
10866 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10867 tg3_set_power_state(tp, PCI_D0);
10869 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
10871 if (tg3_test_nvram(tp) != 0) {
10872 etest->flags |= ETH_TEST_FL_FAILED;
10873 data[0] = 1;
10875 if (tg3_test_link(tp) != 0) {
10876 etest->flags |= ETH_TEST_FL_FAILED;
10877 data[1] = 1;
10879 if (etest->flags & ETH_TEST_FL_OFFLINE) {
10880 int err, err2 = 0, irq_sync = 0;
10882 if (netif_running(dev)) {
10883 tg3_phy_stop(tp);
10884 tg3_netif_stop(tp);
10885 irq_sync = 1;
10888 tg3_full_lock(tp, irq_sync);
10890 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
10891 err = tg3_nvram_lock(tp);
10892 tg3_halt_cpu(tp, RX_CPU_BASE);
10893 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10894 tg3_halt_cpu(tp, TX_CPU_BASE);
10895 if (!err)
10896 tg3_nvram_unlock(tp);
10898 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
10899 tg3_phy_reset(tp);
10901 if (tg3_test_registers(tp) != 0) {
10902 etest->flags |= ETH_TEST_FL_FAILED;
10903 data[2] = 1;
10905 if (tg3_test_memory(tp) != 0) {
10906 etest->flags |= ETH_TEST_FL_FAILED;
10907 data[3] = 1;
10909 if ((data[4] = tg3_test_loopback(tp)) != 0)
10910 etest->flags |= ETH_TEST_FL_FAILED;
10912 tg3_full_unlock(tp);
10914 if (tg3_test_interrupt(tp) != 0) {
10915 etest->flags |= ETH_TEST_FL_FAILED;
10916 data[5] = 1;
10919 tg3_full_lock(tp, 0);
10921 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10922 if (netif_running(dev)) {
10923 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
10924 err2 = tg3_restart_hw(tp, 1);
10925 if (!err2)
10926 tg3_netif_start(tp);
10929 tg3_full_unlock(tp);
10931 if (irq_sync && !err2)
10932 tg3_phy_start(tp);
10934 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10935 tg3_set_power_state(tp, PCI_D3hot);
10939 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10941 struct mii_ioctl_data *data = if_mii(ifr);
10942 struct tg3 *tp = netdev_priv(dev);
10943 int err;
10945 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10946 struct phy_device *phydev;
10947 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10948 return -EAGAIN;
10949 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10950 return phy_mii_ioctl(phydev, ifr, cmd);
10953 switch (cmd) {
10954 case SIOCGMIIPHY:
10955 data->phy_id = tp->phy_addr;
10957 /* fallthru */
10958 case SIOCGMIIREG: {
10959 u32 mii_regval;
10961 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10962 break; /* We have no PHY */
10964 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10965 return -EAGAIN;
10967 spin_lock_bh(&tp->lock);
10968 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
10969 spin_unlock_bh(&tp->lock);
10971 data->val_out = mii_regval;
10973 return err;
10976 case SIOCSMIIREG:
10977 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10978 break; /* We have no PHY */
10980 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10981 return -EAGAIN;
10983 spin_lock_bh(&tp->lock);
10984 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
10985 spin_unlock_bh(&tp->lock);
10987 return err;
10989 default:
10990 /* do nothing */
10991 break;
10993 return -EOPNOTSUPP;
10996 #if TG3_VLAN_TAG_USED
10997 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
10999 struct tg3 *tp = netdev_priv(dev);
11001 if (!netif_running(dev)) {
11002 tp->vlgrp = grp;
11003 return;
11006 tg3_netif_stop(tp);
11008 tg3_full_lock(tp, 0);
11010 tp->vlgrp = grp;
11012 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
11013 __tg3_set_rx_mode(dev);
11015 tg3_netif_start(tp);
11017 tg3_full_unlock(tp);
11019 #endif
11021 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11023 struct tg3 *tp = netdev_priv(dev);
11025 memcpy(ec, &tp->coal, sizeof(*ec));
11026 return 0;
11029 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11031 struct tg3 *tp = netdev_priv(dev);
11032 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11033 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11035 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
11036 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11037 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11038 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11039 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11042 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11043 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11044 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11045 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11046 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11047 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11048 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11049 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11050 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11051 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11052 return -EINVAL;
11054 /* No rx interrupts will be generated if both are zero */
11055 if ((ec->rx_coalesce_usecs == 0) &&
11056 (ec->rx_max_coalesced_frames == 0))
11057 return -EINVAL;
11059 /* No tx interrupts will be generated if both are zero */
11060 if ((ec->tx_coalesce_usecs == 0) &&
11061 (ec->tx_max_coalesced_frames == 0))
11062 return -EINVAL;
11064 /* Only copy relevant parameters, ignore all others. */
11065 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11066 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11067 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11068 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11069 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11070 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
11071 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
11072 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
11073 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
11075 if (netif_running(dev)) {
11076 tg3_full_lock(tp, 0);
11077 __tg3_set_coalesce(tp, &tp->coal);
11078 tg3_full_unlock(tp);
11080 return 0;
11083 static const struct ethtool_ops tg3_ethtool_ops = {
11084 .get_settings = tg3_get_settings,
11085 .set_settings = tg3_set_settings,
11086 .get_drvinfo = tg3_get_drvinfo,
11087 .get_regs_len = tg3_get_regs_len,
11088 .get_regs = tg3_get_regs,
11089 .get_wol = tg3_get_wol,
11090 .set_wol = tg3_set_wol,
11091 .get_msglevel = tg3_get_msglevel,
11092 .set_msglevel = tg3_set_msglevel,
11093 .nway_reset = tg3_nway_reset,
11094 .get_link = ethtool_op_get_link,
11095 .get_eeprom_len = tg3_get_eeprom_len,
11096 .get_eeprom = tg3_get_eeprom,
11097 .set_eeprom = tg3_set_eeprom,
11098 .get_ringparam = tg3_get_ringparam,
11099 .set_ringparam = tg3_set_ringparam,
11100 .get_pauseparam = tg3_get_pauseparam,
11101 .set_pauseparam = tg3_set_pauseparam,
11102 .get_rx_csum = tg3_get_rx_csum,
11103 .set_rx_csum = tg3_set_rx_csum,
11104 .set_tx_csum = tg3_set_tx_csum,
11105 .set_sg = ethtool_op_set_sg,
11106 .set_tso = tg3_set_tso,
11107 .self_test = tg3_self_test,
11108 .get_strings = tg3_get_strings,
11109 .phys_id = tg3_phys_id,
11110 .get_ethtool_stats = tg3_get_ethtool_stats,
11111 .get_coalesce = tg3_get_coalesce,
11112 .set_coalesce = tg3_set_coalesce,
11113 .get_sset_count = tg3_get_sset_count,
11116 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
11118 u32 cursize, val, magic;
11120 tp->nvram_size = EEPROM_CHIP_SIZE;
11122 if (tg3_nvram_read(tp, 0, &magic) != 0)
11123 return;
11125 if ((magic != TG3_EEPROM_MAGIC) &&
11126 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
11127 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
11128 return;
11131 * Size the chip by reading offsets at increasing powers of two.
11132 * When we encounter our validation signature, we know the addressing
11133 * has wrapped around, and thus have our chip size.
11135 cursize = 0x10;
11137 while (cursize < tp->nvram_size) {
11138 if (tg3_nvram_read(tp, cursize, &val) != 0)
11139 return;
11141 if (val == magic)
11142 break;
11144 cursize <<= 1;
11147 tp->nvram_size = cursize;
11150 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
11152 u32 val;
11154 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
11155 tg3_nvram_read(tp, 0, &val) != 0)
11156 return;
11158 /* Selfboot format */
11159 if (val != TG3_EEPROM_MAGIC) {
11160 tg3_get_eeprom_size(tp);
11161 return;
11164 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
11165 if (val != 0) {
11166 /* This is confusing. We want to operate on the
11167 * 16-bit value at offset 0xf2. The tg3_nvram_read()
11168 * call will read from NVRAM and byteswap the data
11169 * according to the byteswapping settings for all
11170 * other register accesses. This ensures the data we
11171 * want will always reside in the lower 16-bits.
11172 * However, the data in NVRAM is in LE format, which
11173 * means the data from the NVRAM read will always be
11174 * opposite the endianness of the CPU. The 16-bit
11175 * byteswap then brings the data to CPU endianness.
11177 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
11178 return;
11181 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11184 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
11186 u32 nvcfg1;
11188 nvcfg1 = tr32(NVRAM_CFG1);
11189 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
11190 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11191 } else {
11192 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11193 tw32(NVRAM_CFG1, nvcfg1);
11196 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
11197 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11198 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
11199 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
11200 tp->nvram_jedecnum = JEDEC_ATMEL;
11201 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11202 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11203 break;
11204 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
11205 tp->nvram_jedecnum = JEDEC_ATMEL;
11206 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
11207 break;
11208 case FLASH_VENDOR_ATMEL_EEPROM:
11209 tp->nvram_jedecnum = JEDEC_ATMEL;
11210 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11211 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11212 break;
11213 case FLASH_VENDOR_ST:
11214 tp->nvram_jedecnum = JEDEC_ST;
11215 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
11216 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11217 break;
11218 case FLASH_VENDOR_SAIFUN:
11219 tp->nvram_jedecnum = JEDEC_SAIFUN;
11220 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
11221 break;
11222 case FLASH_VENDOR_SST_SMALL:
11223 case FLASH_VENDOR_SST_LARGE:
11224 tp->nvram_jedecnum = JEDEC_SST;
11225 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
11226 break;
11228 } else {
11229 tp->nvram_jedecnum = JEDEC_ATMEL;
11230 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11231 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11235 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
11237 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
11238 case FLASH_5752PAGE_SIZE_256:
11239 tp->nvram_pagesize = 256;
11240 break;
11241 case FLASH_5752PAGE_SIZE_512:
11242 tp->nvram_pagesize = 512;
11243 break;
11244 case FLASH_5752PAGE_SIZE_1K:
11245 tp->nvram_pagesize = 1024;
11246 break;
11247 case FLASH_5752PAGE_SIZE_2K:
11248 tp->nvram_pagesize = 2048;
11249 break;
11250 case FLASH_5752PAGE_SIZE_4K:
11251 tp->nvram_pagesize = 4096;
11252 break;
11253 case FLASH_5752PAGE_SIZE_264:
11254 tp->nvram_pagesize = 264;
11255 break;
11256 case FLASH_5752PAGE_SIZE_528:
11257 tp->nvram_pagesize = 528;
11258 break;
11262 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
11264 u32 nvcfg1;
11266 nvcfg1 = tr32(NVRAM_CFG1);
11268 /* NVRAM protection for TPM */
11269 if (nvcfg1 & (1 << 27))
11270 tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
11272 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11273 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
11274 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
11275 tp->nvram_jedecnum = JEDEC_ATMEL;
11276 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11277 break;
11278 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11279 tp->nvram_jedecnum = JEDEC_ATMEL;
11280 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11281 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11282 break;
11283 case FLASH_5752VENDOR_ST_M45PE10:
11284 case FLASH_5752VENDOR_ST_M45PE20:
11285 case FLASH_5752VENDOR_ST_M45PE40:
11286 tp->nvram_jedecnum = JEDEC_ST;
11287 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11288 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11289 break;
11292 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
11293 tg3_nvram_get_pagesize(tp, nvcfg1);
11294 } else {
11295 /* For eeprom, set pagesize to maximum eeprom size */
11296 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11298 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11299 tw32(NVRAM_CFG1, nvcfg1);
11303 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
11305 u32 nvcfg1, protect = 0;
11307 nvcfg1 = tr32(NVRAM_CFG1);
11309 /* NVRAM protection for TPM */
11310 if (nvcfg1 & (1 << 27)) {
11311 tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
11312 protect = 1;
11315 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11316 switch (nvcfg1) {
11317 case FLASH_5755VENDOR_ATMEL_FLASH_1:
11318 case FLASH_5755VENDOR_ATMEL_FLASH_2:
11319 case FLASH_5755VENDOR_ATMEL_FLASH_3:
11320 case FLASH_5755VENDOR_ATMEL_FLASH_5:
11321 tp->nvram_jedecnum = JEDEC_ATMEL;
11322 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11323 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11324 tp->nvram_pagesize = 264;
11325 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
11326 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
11327 tp->nvram_size = (protect ? 0x3e200 :
11328 TG3_NVRAM_SIZE_512KB);
11329 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
11330 tp->nvram_size = (protect ? 0x1f200 :
11331 TG3_NVRAM_SIZE_256KB);
11332 else
11333 tp->nvram_size = (protect ? 0x1f200 :
11334 TG3_NVRAM_SIZE_128KB);
11335 break;
11336 case FLASH_5752VENDOR_ST_M45PE10:
11337 case FLASH_5752VENDOR_ST_M45PE20:
11338 case FLASH_5752VENDOR_ST_M45PE40:
11339 tp->nvram_jedecnum = JEDEC_ST;
11340 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11341 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11342 tp->nvram_pagesize = 256;
11343 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
11344 tp->nvram_size = (protect ?
11345 TG3_NVRAM_SIZE_64KB :
11346 TG3_NVRAM_SIZE_128KB);
11347 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
11348 tp->nvram_size = (protect ?
11349 TG3_NVRAM_SIZE_64KB :
11350 TG3_NVRAM_SIZE_256KB);
11351 else
11352 tp->nvram_size = (protect ?
11353 TG3_NVRAM_SIZE_128KB :
11354 TG3_NVRAM_SIZE_512KB);
11355 break;
11359 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
11361 u32 nvcfg1;
11363 nvcfg1 = tr32(NVRAM_CFG1);
11365 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11366 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
11367 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11368 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
11369 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11370 tp->nvram_jedecnum = JEDEC_ATMEL;
11371 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11372 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11374 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11375 tw32(NVRAM_CFG1, nvcfg1);
11376 break;
11377 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11378 case FLASH_5755VENDOR_ATMEL_FLASH_1:
11379 case FLASH_5755VENDOR_ATMEL_FLASH_2:
11380 case FLASH_5755VENDOR_ATMEL_FLASH_3:
11381 tp->nvram_jedecnum = JEDEC_ATMEL;
11382 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11383 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11384 tp->nvram_pagesize = 264;
11385 break;
11386 case FLASH_5752VENDOR_ST_M45PE10:
11387 case FLASH_5752VENDOR_ST_M45PE20:
11388 case FLASH_5752VENDOR_ST_M45PE40:
11389 tp->nvram_jedecnum = JEDEC_ST;
11390 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11391 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11392 tp->nvram_pagesize = 256;
11393 break;
11397 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
11399 u32 nvcfg1, protect = 0;
11401 nvcfg1 = tr32(NVRAM_CFG1);
11403 /* NVRAM protection for TPM */
11404 if (nvcfg1 & (1 << 27)) {
11405 tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
11406 protect = 1;
11409 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11410 switch (nvcfg1) {
11411 case FLASH_5761VENDOR_ATMEL_ADB021D:
11412 case FLASH_5761VENDOR_ATMEL_ADB041D:
11413 case FLASH_5761VENDOR_ATMEL_ADB081D:
11414 case FLASH_5761VENDOR_ATMEL_ADB161D:
11415 case FLASH_5761VENDOR_ATMEL_MDB021D:
11416 case FLASH_5761VENDOR_ATMEL_MDB041D:
11417 case FLASH_5761VENDOR_ATMEL_MDB081D:
11418 case FLASH_5761VENDOR_ATMEL_MDB161D:
11419 tp->nvram_jedecnum = JEDEC_ATMEL;
11420 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11421 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11422 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
11423 tp->nvram_pagesize = 256;
11424 break;
11425 case FLASH_5761VENDOR_ST_A_M45PE20:
11426 case FLASH_5761VENDOR_ST_A_M45PE40:
11427 case FLASH_5761VENDOR_ST_A_M45PE80:
11428 case FLASH_5761VENDOR_ST_A_M45PE16:
11429 case FLASH_5761VENDOR_ST_M_M45PE20:
11430 case FLASH_5761VENDOR_ST_M_M45PE40:
11431 case FLASH_5761VENDOR_ST_M_M45PE80:
11432 case FLASH_5761VENDOR_ST_M_M45PE16:
11433 tp->nvram_jedecnum = JEDEC_ST;
11434 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11435 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11436 tp->nvram_pagesize = 256;
11437 break;
11440 if (protect) {
11441 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
11442 } else {
11443 switch (nvcfg1) {
11444 case FLASH_5761VENDOR_ATMEL_ADB161D:
11445 case FLASH_5761VENDOR_ATMEL_MDB161D:
11446 case FLASH_5761VENDOR_ST_A_M45PE16:
11447 case FLASH_5761VENDOR_ST_M_M45PE16:
11448 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
11449 break;
11450 case FLASH_5761VENDOR_ATMEL_ADB081D:
11451 case FLASH_5761VENDOR_ATMEL_MDB081D:
11452 case FLASH_5761VENDOR_ST_A_M45PE80:
11453 case FLASH_5761VENDOR_ST_M_M45PE80:
11454 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
11455 break;
11456 case FLASH_5761VENDOR_ATMEL_ADB041D:
11457 case FLASH_5761VENDOR_ATMEL_MDB041D:
11458 case FLASH_5761VENDOR_ST_A_M45PE40:
11459 case FLASH_5761VENDOR_ST_M_M45PE40:
11460 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11461 break;
11462 case FLASH_5761VENDOR_ATMEL_ADB021D:
11463 case FLASH_5761VENDOR_ATMEL_MDB021D:
11464 case FLASH_5761VENDOR_ST_A_M45PE20:
11465 case FLASH_5761VENDOR_ST_M_M45PE20:
11466 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11467 break;
11472 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
11474 tp->nvram_jedecnum = JEDEC_ATMEL;
11475 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11476 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11479 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
11481 u32 nvcfg1;
11483 nvcfg1 = tr32(NVRAM_CFG1);
11485 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11486 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11487 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11488 tp->nvram_jedecnum = JEDEC_ATMEL;
11489 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11490 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11492 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11493 tw32(NVRAM_CFG1, nvcfg1);
11494 return;
11495 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11496 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
11497 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
11498 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
11499 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
11500 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
11501 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
11502 tp->nvram_jedecnum = JEDEC_ATMEL;
11503 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11504 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11506 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11507 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11508 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
11509 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
11510 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11511 break;
11512 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
11513 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
11514 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11515 break;
11516 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
11517 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
11518 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11519 break;
11521 break;
11522 case FLASH_5752VENDOR_ST_M45PE10:
11523 case FLASH_5752VENDOR_ST_M45PE20:
11524 case FLASH_5752VENDOR_ST_M45PE40:
11525 tp->nvram_jedecnum = JEDEC_ST;
11526 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11527 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11529 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11530 case FLASH_5752VENDOR_ST_M45PE10:
11531 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11532 break;
11533 case FLASH_5752VENDOR_ST_M45PE20:
11534 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11535 break;
11536 case FLASH_5752VENDOR_ST_M45PE40:
11537 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11538 break;
11540 break;
11541 default:
11542 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM;
11543 return;
11546 tg3_nvram_get_pagesize(tp, nvcfg1);
11547 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
11548 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
11552 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
11554 u32 nvcfg1;
11556 nvcfg1 = tr32(NVRAM_CFG1);
11558 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11559 case FLASH_5717VENDOR_ATMEL_EEPROM:
11560 case FLASH_5717VENDOR_MICRO_EEPROM:
11561 tp->nvram_jedecnum = JEDEC_ATMEL;
11562 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11563 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11565 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11566 tw32(NVRAM_CFG1, nvcfg1);
11567 return;
11568 case FLASH_5717VENDOR_ATMEL_MDB011D:
11569 case FLASH_5717VENDOR_ATMEL_ADB011B:
11570 case FLASH_5717VENDOR_ATMEL_ADB011D:
11571 case FLASH_5717VENDOR_ATMEL_MDB021D:
11572 case FLASH_5717VENDOR_ATMEL_ADB021B:
11573 case FLASH_5717VENDOR_ATMEL_ADB021D:
11574 case FLASH_5717VENDOR_ATMEL_45USPT:
11575 tp->nvram_jedecnum = JEDEC_ATMEL;
11576 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11577 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11579 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11580 case FLASH_5717VENDOR_ATMEL_MDB021D:
11581 case FLASH_5717VENDOR_ATMEL_ADB021B:
11582 case FLASH_5717VENDOR_ATMEL_ADB021D:
11583 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11584 break;
11585 default:
11586 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11587 break;
11589 break;
11590 case FLASH_5717VENDOR_ST_M_M25PE10:
11591 case FLASH_5717VENDOR_ST_A_M25PE10:
11592 case FLASH_5717VENDOR_ST_M_M45PE10:
11593 case FLASH_5717VENDOR_ST_A_M45PE10:
11594 case FLASH_5717VENDOR_ST_M_M25PE20:
11595 case FLASH_5717VENDOR_ST_A_M25PE20:
11596 case FLASH_5717VENDOR_ST_M_M45PE20:
11597 case FLASH_5717VENDOR_ST_A_M45PE20:
11598 case FLASH_5717VENDOR_ST_25USPT:
11599 case FLASH_5717VENDOR_ST_45USPT:
11600 tp->nvram_jedecnum = JEDEC_ST;
11601 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11602 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11604 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11605 case FLASH_5717VENDOR_ST_M_M25PE20:
11606 case FLASH_5717VENDOR_ST_A_M25PE20:
11607 case FLASH_5717VENDOR_ST_M_M45PE20:
11608 case FLASH_5717VENDOR_ST_A_M45PE20:
11609 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11610 break;
11611 default:
11612 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11613 break;
11615 break;
11616 default:
11617 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM;
11618 return;
11621 tg3_nvram_get_pagesize(tp, nvcfg1);
11622 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
11623 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
11626 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
11627 static void __devinit tg3_nvram_init(struct tg3 *tp)
11629 tw32_f(GRC_EEPROM_ADDR,
11630 (EEPROM_ADDR_FSM_RESET |
11631 (EEPROM_DEFAULT_CLOCK_PERIOD <<
11632 EEPROM_ADDR_CLKPERD_SHIFT)));
11634 msleep(1);
11636 /* Enable seeprom accesses. */
11637 tw32_f(GRC_LOCAL_CTRL,
11638 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
11639 udelay(100);
11641 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11642 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
11643 tp->tg3_flags |= TG3_FLAG_NVRAM;
11645 if (tg3_nvram_lock(tp)) {
11646 netdev_warn(tp->dev,
11647 "Cannot get nvram lock, %s failed\n",
11648 __func__);
11649 return;
11651 tg3_enable_nvram_access(tp);
11653 tp->nvram_size = 0;
11655 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
11656 tg3_get_5752_nvram_info(tp);
11657 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11658 tg3_get_5755_nvram_info(tp);
11659 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11660 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11661 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11662 tg3_get_5787_nvram_info(tp);
11663 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
11664 tg3_get_5761_nvram_info(tp);
11665 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11666 tg3_get_5906_nvram_info(tp);
11667 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
11668 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
11669 tg3_get_57780_nvram_info(tp);
11670 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
11671 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
11672 tg3_get_5717_nvram_info(tp);
11673 else
11674 tg3_get_nvram_info(tp);
11676 if (tp->nvram_size == 0)
11677 tg3_get_nvram_size(tp);
11679 tg3_disable_nvram_access(tp);
11680 tg3_nvram_unlock(tp);
11682 } else {
11683 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
11685 tg3_get_eeprom_size(tp);
11689 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
11690 u32 offset, u32 len, u8 *buf)
11692 int i, j, rc = 0;
11693 u32 val;
11695 for (i = 0; i < len; i += 4) {
11696 u32 addr;
11697 __be32 data;
11699 addr = offset + i;
11701 memcpy(&data, buf + i, 4);
11704 * The SEEPROM interface expects the data to always be opposite
11705 * the native endian format. We accomplish this by reversing
11706 * all the operations that would have been performed on the
11707 * data from a call to tg3_nvram_read_be32().
11709 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
11711 val = tr32(GRC_EEPROM_ADDR);
11712 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
11714 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
11715 EEPROM_ADDR_READ);
11716 tw32(GRC_EEPROM_ADDR, val |
11717 (0 << EEPROM_ADDR_DEVID_SHIFT) |
11718 (addr & EEPROM_ADDR_ADDR_MASK) |
11719 EEPROM_ADDR_START |
11720 EEPROM_ADDR_WRITE);
11722 for (j = 0; j < 1000; j++) {
11723 val = tr32(GRC_EEPROM_ADDR);
11725 if (val & EEPROM_ADDR_COMPLETE)
11726 break;
11727 msleep(1);
11729 if (!(val & EEPROM_ADDR_COMPLETE)) {
11730 rc = -EBUSY;
11731 break;
11735 return rc;
11738 /* offset and length are dword aligned */
11739 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
11740 u8 *buf)
11742 int ret = 0;
11743 u32 pagesize = tp->nvram_pagesize;
11744 u32 pagemask = pagesize - 1;
11745 u32 nvram_cmd;
11746 u8 *tmp;
11748 tmp = kmalloc(pagesize, GFP_KERNEL);
11749 if (tmp == NULL)
11750 return -ENOMEM;
11752 while (len) {
11753 int j;
11754 u32 phy_addr, page_off, size;
11756 phy_addr = offset & ~pagemask;
11758 for (j = 0; j < pagesize; j += 4) {
11759 ret = tg3_nvram_read_be32(tp, phy_addr + j,
11760 (__be32 *) (tmp + j));
11761 if (ret)
11762 break;
11764 if (ret)
11765 break;
11767 page_off = offset & pagemask;
11768 size = pagesize;
11769 if (len < size)
11770 size = len;
11772 len -= size;
11774 memcpy(tmp + page_off, buf, size);
11776 offset = offset + (pagesize - page_off);
11778 tg3_enable_nvram_access(tp);
11781 * Before we can erase the flash page, we need
11782 * to issue a special "write enable" command.
11784 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11786 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11787 break;
11789 /* Erase the target page */
11790 tw32(NVRAM_ADDR, phy_addr);
11792 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
11793 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
11795 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11796 break;
11798 /* Issue another write enable to start the write. */
11799 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11801 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11802 break;
11804 for (j = 0; j < pagesize; j += 4) {
11805 __be32 data;
11807 data = *((__be32 *) (tmp + j));
11809 tw32(NVRAM_WRDATA, be32_to_cpu(data));
11811 tw32(NVRAM_ADDR, phy_addr + j);
11813 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
11814 NVRAM_CMD_WR;
11816 if (j == 0)
11817 nvram_cmd |= NVRAM_CMD_FIRST;
11818 else if (j == (pagesize - 4))
11819 nvram_cmd |= NVRAM_CMD_LAST;
11821 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11822 break;
11824 if (ret)
11825 break;
11828 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11829 tg3_nvram_exec_cmd(tp, nvram_cmd);
11831 kfree(tmp);
11833 return ret;
11836 /* offset and length are dword aligned */
11837 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
11838 u8 *buf)
11840 int i, ret = 0;
11842 for (i = 0; i < len; i += 4, offset += 4) {
11843 u32 page_off, phy_addr, nvram_cmd;
11844 __be32 data;
11846 memcpy(&data, buf + i, 4);
11847 tw32(NVRAM_WRDATA, be32_to_cpu(data));
11849 page_off = offset % tp->nvram_pagesize;
11851 phy_addr = tg3_nvram_phys_addr(tp, offset);
11853 tw32(NVRAM_ADDR, phy_addr);
11855 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
11857 if (page_off == 0 || i == 0)
11858 nvram_cmd |= NVRAM_CMD_FIRST;
11859 if (page_off == (tp->nvram_pagesize - 4))
11860 nvram_cmd |= NVRAM_CMD_LAST;
11862 if (i == (len - 4))
11863 nvram_cmd |= NVRAM_CMD_LAST;
11865 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
11866 !(tp->tg3_flags3 & TG3_FLG3_5755_PLUS) &&
11867 (tp->nvram_jedecnum == JEDEC_ST) &&
11868 (nvram_cmd & NVRAM_CMD_FIRST)) {
11870 if ((ret = tg3_nvram_exec_cmd(tp,
11871 NVRAM_CMD_WREN | NVRAM_CMD_GO |
11872 NVRAM_CMD_DONE)))
11874 break;
11876 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11877 /* We always do complete word writes to eeprom. */
11878 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
11881 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11882 break;
11884 return ret;
11887 /* offset and length are dword aligned */
11888 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
11890 int ret;
11892 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
11893 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
11894 ~GRC_LCLCTRL_GPIO_OUTPUT1);
11895 udelay(40);
11898 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
11899 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
11900 } else {
11901 u32 grc_mode;
11903 ret = tg3_nvram_lock(tp);
11904 if (ret)
11905 return ret;
11907 tg3_enable_nvram_access(tp);
11908 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
11909 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM))
11910 tw32(NVRAM_WRITE1, 0x406);
11912 grc_mode = tr32(GRC_MODE);
11913 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
11915 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
11916 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11918 ret = tg3_nvram_write_block_buffered(tp, offset, len,
11919 buf);
11920 } else {
11921 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
11922 buf);
11925 grc_mode = tr32(GRC_MODE);
11926 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
11928 tg3_disable_nvram_access(tp);
11929 tg3_nvram_unlock(tp);
11932 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
11933 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
11934 udelay(40);
11937 return ret;
11940 struct subsys_tbl_ent {
11941 u16 subsys_vendor, subsys_devid;
11942 u32 phy_id;
11945 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
11946 /* Broadcom boards. */
11947 { TG3PCI_SUBVENDOR_ID_BROADCOM,
11948 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
11949 { TG3PCI_SUBVENDOR_ID_BROADCOM,
11950 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
11951 { TG3PCI_SUBVENDOR_ID_BROADCOM,
11952 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
11953 { TG3PCI_SUBVENDOR_ID_BROADCOM,
11954 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
11955 { TG3PCI_SUBVENDOR_ID_BROADCOM,
11956 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
11957 { TG3PCI_SUBVENDOR_ID_BROADCOM,
11958 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
11959 { TG3PCI_SUBVENDOR_ID_BROADCOM,
11960 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
11961 { TG3PCI_SUBVENDOR_ID_BROADCOM,
11962 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
11963 { TG3PCI_SUBVENDOR_ID_BROADCOM,
11964 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
11965 { TG3PCI_SUBVENDOR_ID_BROADCOM,
11966 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
11967 { TG3PCI_SUBVENDOR_ID_BROADCOM,
11968 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
11970 /* 3com boards. */
11971 { TG3PCI_SUBVENDOR_ID_3COM,
11972 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
11973 { TG3PCI_SUBVENDOR_ID_3COM,
11974 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
11975 { TG3PCI_SUBVENDOR_ID_3COM,
11976 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
11977 { TG3PCI_SUBVENDOR_ID_3COM,
11978 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
11979 { TG3PCI_SUBVENDOR_ID_3COM,
11980 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
11982 /* DELL boards. */
11983 { TG3PCI_SUBVENDOR_ID_DELL,
11984 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
11985 { TG3PCI_SUBVENDOR_ID_DELL,
11986 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
11987 { TG3PCI_SUBVENDOR_ID_DELL,
11988 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
11989 { TG3PCI_SUBVENDOR_ID_DELL,
11990 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
11992 /* Compaq boards. */
11993 { TG3PCI_SUBVENDOR_ID_COMPAQ,
11994 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
11995 { TG3PCI_SUBVENDOR_ID_COMPAQ,
11996 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
11997 { TG3PCI_SUBVENDOR_ID_COMPAQ,
11998 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
11999 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12000 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
12001 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12002 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
12004 /* IBM boards. */
12005 { TG3PCI_SUBVENDOR_ID_IBM,
12006 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
12009 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
12011 int i;
12013 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
12014 if ((subsys_id_to_phy_id[i].subsys_vendor ==
12015 tp->pdev->subsystem_vendor) &&
12016 (subsys_id_to_phy_id[i].subsys_devid ==
12017 tp->pdev->subsystem_device))
12018 return &subsys_id_to_phy_id[i];
12020 return NULL;
12023 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12025 u32 val;
12026 u16 pmcsr;
12028 /* On some early chips the SRAM cannot be accessed in D3hot state,
12029 * so need make sure we're in D0.
12031 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
12032 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
12033 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
12034 msleep(1);
12036 /* Make sure register accesses (indirect or otherwise)
12037 * will function correctly.
12039 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12040 tp->misc_host_ctrl);
12042 /* The memory arbiter has to be enabled in order for SRAM accesses
12043 * to succeed. Normally on powerup the tg3 chip firmware will make
12044 * sure it is enabled, but other entities such as system netboot
12045 * code might disable it.
12047 val = tr32(MEMARB_MODE);
12048 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
12050 tp->phy_id = TG3_PHY_ID_INVALID;
12051 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12053 /* Assume an onboard device and WOL capable by default. */
12054 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
12056 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12057 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
12058 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
12059 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
12061 val = tr32(VCPU_CFGSHDW);
12062 if (val & VCPU_CFGSHDW_ASPM_DBNC)
12063 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
12064 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
12065 (val & VCPU_CFGSHDW_WOL_MAGPKT))
12066 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
12067 goto done;
12070 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
12071 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
12072 u32 nic_cfg, led_cfg;
12073 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
12074 int eeprom_phy_serdes = 0;
12076 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
12077 tp->nic_sram_data_cfg = nic_cfg;
12079 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
12080 ver >>= NIC_SRAM_DATA_VER_SHIFT;
12081 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
12082 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
12083 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
12084 (ver > 0) && (ver < 0x100))
12085 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
12087 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12088 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
12090 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
12091 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
12092 eeprom_phy_serdes = 1;
12094 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
12095 if (nic_phy_id != 0) {
12096 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
12097 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
12099 eeprom_phy_id = (id1 >> 16) << 10;
12100 eeprom_phy_id |= (id2 & 0xfc00) << 16;
12101 eeprom_phy_id |= (id2 & 0x03ff) << 0;
12102 } else
12103 eeprom_phy_id = 0;
12105 tp->phy_id = eeprom_phy_id;
12106 if (eeprom_phy_serdes) {
12107 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
12108 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12109 else
12110 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
12113 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
12114 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
12115 SHASTA_EXT_LED_MODE_MASK);
12116 else
12117 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
12119 switch (led_cfg) {
12120 default:
12121 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
12122 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12123 break;
12125 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
12126 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12127 break;
12129 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
12130 tp->led_ctrl = LED_CTRL_MODE_MAC;
12132 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
12133 * read on some older 5700/5701 bootcode.
12135 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12136 ASIC_REV_5700 ||
12137 GET_ASIC_REV(tp->pci_chip_rev_id) ==
12138 ASIC_REV_5701)
12139 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12141 break;
12143 case SHASTA_EXT_LED_SHARED:
12144 tp->led_ctrl = LED_CTRL_MODE_SHARED;
12145 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
12146 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
12147 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12148 LED_CTRL_MODE_PHY_2);
12149 break;
12151 case SHASTA_EXT_LED_MAC:
12152 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
12153 break;
12155 case SHASTA_EXT_LED_COMBO:
12156 tp->led_ctrl = LED_CTRL_MODE_COMBO;
12157 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
12158 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12159 LED_CTRL_MODE_PHY_2);
12160 break;
12164 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12165 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
12166 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
12167 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12169 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
12170 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12172 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
12173 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
12174 if ((tp->pdev->subsystem_vendor ==
12175 PCI_VENDOR_ID_ARIMA) &&
12176 (tp->pdev->subsystem_device == 0x205a ||
12177 tp->pdev->subsystem_device == 0x2063))
12178 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
12179 } else {
12180 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
12181 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
12184 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
12185 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
12186 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
12187 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
12190 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
12191 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
12192 tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
12194 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
12195 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
12196 tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
12198 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
12199 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE))
12200 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
12202 if (cfg2 & (1 << 17))
12203 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
12205 /* serdes signal pre-emphasis in register 0x590 set by */
12206 /* bootcode if bit 18 is set */
12207 if (cfg2 & (1 << 18))
12208 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
12210 if (((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12211 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
12212 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
12213 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
12215 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
12216 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
12217 !(tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) {
12218 u32 cfg3;
12220 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
12221 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
12222 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
12225 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
12226 tp->tg3_flags3 |= TG3_FLG3_RGMII_INBAND_DISABLE;
12227 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
12228 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN;
12229 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
12230 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
12232 done:
12233 device_init_wakeup(&tp->pdev->dev, tp->tg3_flags & TG3_FLAG_WOL_CAP);
12234 device_set_wakeup_enable(&tp->pdev->dev,
12235 tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
12238 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
12240 int i;
12241 u32 val;
12243 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
12244 tw32(OTP_CTRL, cmd);
12246 /* Wait for up to 1 ms for command to execute. */
12247 for (i = 0; i < 100; i++) {
12248 val = tr32(OTP_STATUS);
12249 if (val & OTP_STATUS_CMD_DONE)
12250 break;
12251 udelay(10);
12254 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
12257 /* Read the gphy configuration from the OTP region of the chip. The gphy
12258 * configuration is a 32-bit value that straddles the alignment boundary.
12259 * We do two 32-bit reads and then shift and merge the results.
12261 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
12263 u32 bhalf_otp, thalf_otp;
12265 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
12267 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
12268 return 0;
12270 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
12272 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12273 return 0;
12275 thalf_otp = tr32(OTP_READ_DATA);
12277 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
12279 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12280 return 0;
12282 bhalf_otp = tr32(OTP_READ_DATA);
12284 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
12287 static int __devinit tg3_phy_probe(struct tg3 *tp)
12289 u32 hw_phy_id_1, hw_phy_id_2;
12290 u32 hw_phy_id, hw_phy_id_masked;
12291 int err;
12293 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
12294 return tg3_phy_init(tp);
12296 /* Reading the PHY ID register can conflict with ASF
12297 * firmware access to the PHY hardware.
12299 err = 0;
12300 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
12301 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
12302 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
12303 } else {
12304 /* Now read the physical PHY_ID from the chip and verify
12305 * that it is sane. If it doesn't look good, we fall back
12306 * to either the hard-coded table based PHY_ID and failing
12307 * that the value found in the eeprom area.
12309 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
12310 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
12312 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
12313 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
12314 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
12316 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
12319 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
12320 tp->phy_id = hw_phy_id;
12321 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
12322 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12323 else
12324 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
12325 } else {
12326 if (tp->phy_id != TG3_PHY_ID_INVALID) {
12327 /* Do nothing, phy ID already set up in
12328 * tg3_get_eeprom_hw_cfg().
12330 } else {
12331 struct subsys_tbl_ent *p;
12333 /* No eeprom signature? Try the hardcoded
12334 * subsys device table.
12336 p = tg3_lookup_by_subsys(tp);
12337 if (!p)
12338 return -ENODEV;
12340 tp->phy_id = p->phy_id;
12341 if (!tp->phy_id ||
12342 tp->phy_id == TG3_PHY_ID_BCM8002)
12343 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12347 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
12348 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
12349 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
12350 u32 bmsr, adv_reg, tg3_ctrl, mask;
12352 tg3_readphy(tp, MII_BMSR, &bmsr);
12353 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
12354 (bmsr & BMSR_LSTATUS))
12355 goto skip_phy_reset;
12357 err = tg3_phy_reset(tp);
12358 if (err)
12359 return err;
12361 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
12362 ADVERTISE_100HALF | ADVERTISE_100FULL |
12363 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
12364 tg3_ctrl = 0;
12365 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
12366 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
12367 MII_TG3_CTRL_ADV_1000_FULL);
12368 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
12369 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
12370 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
12371 MII_TG3_CTRL_ENABLE_AS_MASTER);
12374 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
12375 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
12376 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
12377 if (!tg3_copper_is_advertising_all(tp, mask)) {
12378 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
12380 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12381 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
12383 tg3_writephy(tp, MII_BMCR,
12384 BMCR_ANENABLE | BMCR_ANRESTART);
12386 tg3_phy_set_wirespeed(tp);
12388 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
12389 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12390 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
12393 skip_phy_reset:
12394 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
12395 err = tg3_init_5401phy_dsp(tp);
12396 if (err)
12397 return err;
12399 err = tg3_init_5401phy_dsp(tp);
12402 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12403 tp->link_config.advertising =
12404 (ADVERTISED_1000baseT_Half |
12405 ADVERTISED_1000baseT_Full |
12406 ADVERTISED_Autoneg |
12407 ADVERTISED_FIBRE);
12408 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
12409 tp->link_config.advertising &=
12410 ~(ADVERTISED_1000baseT_Half |
12411 ADVERTISED_1000baseT_Full);
12413 return err;
12416 static void __devinit tg3_read_vpd(struct tg3 *tp)
12418 u8 vpd_data[TG3_NVM_VPD_LEN];
12419 unsigned int block_end, rosize, len;
12420 int j, i = 0;
12421 u32 magic;
12423 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
12424 tg3_nvram_read(tp, 0x0, &magic))
12425 goto out_not_found;
12427 if (magic == TG3_EEPROM_MAGIC) {
12428 for (i = 0; i < TG3_NVM_VPD_LEN; i += 4) {
12429 u32 tmp;
12431 /* The data is in little-endian format in NVRAM.
12432 * Use the big-endian read routines to preserve
12433 * the byte order as it exists in NVRAM.
12435 if (tg3_nvram_read_be32(tp, TG3_NVM_VPD_OFF + i, &tmp))
12436 goto out_not_found;
12438 memcpy(&vpd_data[i], &tmp, sizeof(tmp));
12440 } else {
12441 ssize_t cnt;
12442 unsigned int pos = 0;
12444 for (; pos < TG3_NVM_VPD_LEN && i < 3; i++, pos += cnt) {
12445 cnt = pci_read_vpd(tp->pdev, pos,
12446 TG3_NVM_VPD_LEN - pos,
12447 &vpd_data[pos]);
12448 if (cnt == -ETIMEDOUT || -EINTR)
12449 cnt = 0;
12450 else if (cnt < 0)
12451 goto out_not_found;
12453 if (pos != TG3_NVM_VPD_LEN)
12454 goto out_not_found;
12457 i = pci_vpd_find_tag(vpd_data, 0, TG3_NVM_VPD_LEN,
12458 PCI_VPD_LRDT_RO_DATA);
12459 if (i < 0)
12460 goto out_not_found;
12462 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
12463 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
12464 i += PCI_VPD_LRDT_TAG_SIZE;
12466 if (block_end > TG3_NVM_VPD_LEN)
12467 goto out_not_found;
12469 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
12470 PCI_VPD_RO_KEYWORD_MFR_ID);
12471 if (j > 0) {
12472 len = pci_vpd_info_field_size(&vpd_data[j]);
12474 j += PCI_VPD_INFO_FLD_HDR_SIZE;
12475 if (j + len > block_end || len != 4 ||
12476 memcmp(&vpd_data[j], "1028", 4))
12477 goto partno;
12479 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
12480 PCI_VPD_RO_KEYWORD_VENDOR0);
12481 if (j < 0)
12482 goto partno;
12484 len = pci_vpd_info_field_size(&vpd_data[j]);
12486 j += PCI_VPD_INFO_FLD_HDR_SIZE;
12487 if (j + len > block_end)
12488 goto partno;
12490 memcpy(tp->fw_ver, &vpd_data[j], len);
12491 strncat(tp->fw_ver, " bc ", TG3_NVM_VPD_LEN - len - 1);
12494 partno:
12495 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
12496 PCI_VPD_RO_KEYWORD_PARTNO);
12497 if (i < 0)
12498 goto out_not_found;
12500 len = pci_vpd_info_field_size(&vpd_data[i]);
12502 i += PCI_VPD_INFO_FLD_HDR_SIZE;
12503 if (len > TG3_BPN_SIZE ||
12504 (len + i) > TG3_NVM_VPD_LEN)
12505 goto out_not_found;
12507 memcpy(tp->board_part_number, &vpd_data[i], len);
12509 return;
12511 out_not_found:
12512 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12513 strcpy(tp->board_part_number, "BCM95906");
12514 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
12515 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
12516 strcpy(tp->board_part_number, "BCM57780");
12517 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
12518 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
12519 strcpy(tp->board_part_number, "BCM57760");
12520 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
12521 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
12522 strcpy(tp->board_part_number, "BCM57790");
12523 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
12524 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
12525 strcpy(tp->board_part_number, "BCM57788");
12526 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12527 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
12528 strcpy(tp->board_part_number, "BCM57761");
12529 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12530 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
12531 strcpy(tp->board_part_number, "BCM57765");
12532 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12533 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
12534 strcpy(tp->board_part_number, "BCM57781");
12535 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12536 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
12537 strcpy(tp->board_part_number, "BCM57785");
12538 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12539 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
12540 strcpy(tp->board_part_number, "BCM57791");
12541 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12542 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
12543 strcpy(tp->board_part_number, "BCM57795");
12544 else
12545 strcpy(tp->board_part_number, "none");
12548 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
12550 u32 val;
12552 if (tg3_nvram_read(tp, offset, &val) ||
12553 (val & 0xfc000000) != 0x0c000000 ||
12554 tg3_nvram_read(tp, offset + 4, &val) ||
12555 val != 0)
12556 return 0;
12558 return 1;
12561 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
12563 u32 val, offset, start, ver_offset;
12564 int i, dst_off;
12565 bool newver = false;
12567 if (tg3_nvram_read(tp, 0xc, &offset) ||
12568 tg3_nvram_read(tp, 0x4, &start))
12569 return;
12571 offset = tg3_nvram_logical_addr(tp, offset);
12573 if (tg3_nvram_read(tp, offset, &val))
12574 return;
12576 if ((val & 0xfc000000) == 0x0c000000) {
12577 if (tg3_nvram_read(tp, offset + 4, &val))
12578 return;
12580 if (val == 0)
12581 newver = true;
12584 dst_off = strlen(tp->fw_ver);
12586 if (newver) {
12587 if (TG3_VER_SIZE - dst_off < 16 ||
12588 tg3_nvram_read(tp, offset + 8, &ver_offset))
12589 return;
12591 offset = offset + ver_offset - start;
12592 for (i = 0; i < 16; i += 4) {
12593 __be32 v;
12594 if (tg3_nvram_read_be32(tp, offset + i, &v))
12595 return;
12597 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
12599 } else {
12600 u32 major, minor;
12602 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
12603 return;
12605 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
12606 TG3_NVM_BCVER_MAJSFT;
12607 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
12608 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
12609 "v%d.%02d", major, minor);
12613 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
12615 u32 val, major, minor;
12617 /* Use native endian representation */
12618 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
12619 return;
12621 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
12622 TG3_NVM_HWSB_CFG1_MAJSFT;
12623 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
12624 TG3_NVM_HWSB_CFG1_MINSFT;
12626 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
12629 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
12631 u32 offset, major, minor, build;
12633 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
12635 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
12636 return;
12638 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
12639 case TG3_EEPROM_SB_REVISION_0:
12640 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
12641 break;
12642 case TG3_EEPROM_SB_REVISION_2:
12643 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
12644 break;
12645 case TG3_EEPROM_SB_REVISION_3:
12646 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
12647 break;
12648 case TG3_EEPROM_SB_REVISION_4:
12649 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
12650 break;
12651 case TG3_EEPROM_SB_REVISION_5:
12652 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
12653 break;
12654 default:
12655 return;
12658 if (tg3_nvram_read(tp, offset, &val))
12659 return;
12661 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
12662 TG3_EEPROM_SB_EDH_BLD_SHFT;
12663 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
12664 TG3_EEPROM_SB_EDH_MAJ_SHFT;
12665 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
12667 if (minor > 99 || build > 26)
12668 return;
12670 offset = strlen(tp->fw_ver);
12671 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
12672 " v%d.%02d", major, minor);
12674 if (build > 0) {
12675 offset = strlen(tp->fw_ver);
12676 if (offset < TG3_VER_SIZE - 1)
12677 tp->fw_ver[offset] = 'a' + build - 1;
12681 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
12683 u32 val, offset, start;
12684 int i, vlen;
12686 for (offset = TG3_NVM_DIR_START;
12687 offset < TG3_NVM_DIR_END;
12688 offset += TG3_NVM_DIRENT_SIZE) {
12689 if (tg3_nvram_read(tp, offset, &val))
12690 return;
12692 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
12693 break;
12696 if (offset == TG3_NVM_DIR_END)
12697 return;
12699 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
12700 start = 0x08000000;
12701 else if (tg3_nvram_read(tp, offset - 4, &start))
12702 return;
12704 if (tg3_nvram_read(tp, offset + 4, &offset) ||
12705 !tg3_fw_img_is_valid(tp, offset) ||
12706 tg3_nvram_read(tp, offset + 8, &val))
12707 return;
12709 offset += val - start;
12711 vlen = strlen(tp->fw_ver);
12713 tp->fw_ver[vlen++] = ',';
12714 tp->fw_ver[vlen++] = ' ';
12716 for (i = 0; i < 4; i++) {
12717 __be32 v;
12718 if (tg3_nvram_read_be32(tp, offset, &v))
12719 return;
12721 offset += sizeof(v);
12723 if (vlen > TG3_VER_SIZE - sizeof(v)) {
12724 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
12725 break;
12728 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
12729 vlen += sizeof(v);
12733 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
12735 int vlen;
12736 u32 apedata;
12737 char *fwtype;
12739 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) ||
12740 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
12741 return;
12743 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
12744 if (apedata != APE_SEG_SIG_MAGIC)
12745 return;
12747 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
12748 if (!(apedata & APE_FW_STATUS_READY))
12749 return;
12751 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
12753 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
12754 tp->tg3_flags3 |= TG3_FLG3_APE_HAS_NCSI;
12755 fwtype = "NCSI";
12756 } else {
12757 fwtype = "DASH";
12760 vlen = strlen(tp->fw_ver);
12762 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
12763 fwtype,
12764 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
12765 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
12766 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
12767 (apedata & APE_FW_VERSION_BLDMSK));
12770 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
12772 u32 val;
12773 bool vpd_vers = false;
12775 if (tp->fw_ver[0] != 0)
12776 vpd_vers = true;
12778 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) {
12779 strcat(tp->fw_ver, "sb");
12780 return;
12783 if (tg3_nvram_read(tp, 0, &val))
12784 return;
12786 if (val == TG3_EEPROM_MAGIC)
12787 tg3_read_bc_ver(tp);
12788 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
12789 tg3_read_sb_ver(tp, val);
12790 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12791 tg3_read_hwsb_ver(tp);
12792 else
12793 return;
12795 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
12796 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) || vpd_vers)
12797 goto done;
12799 tg3_read_mgmtfw_ver(tp);
12801 done:
12802 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
12805 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
12807 static void inline vlan_features_add(struct net_device *dev, unsigned long flags)
12809 #if TG3_VLAN_TAG_USED
12810 dev->vlan_features |= flags;
12811 #endif
12814 static int __devinit tg3_get_invariants(struct tg3 *tp)
12816 static struct pci_device_id write_reorder_chipsets[] = {
12817 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
12818 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
12819 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
12820 PCI_DEVICE_ID_AMD_8131_BRIDGE) },
12821 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
12822 PCI_DEVICE_ID_VIA_8385_0) },
12823 { },
12825 u32 misc_ctrl_reg;
12826 u32 pci_state_reg, grc_misc_cfg;
12827 u32 val;
12828 u16 pci_cmd;
12829 int err;
12831 /* Force memory write invalidate off. If we leave it on,
12832 * then on 5700_BX chips we have to enable a workaround.
12833 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
12834 * to match the cacheline size. The Broadcom driver have this
12835 * workaround but turns MWI off all the times so never uses
12836 * it. This seems to suggest that the workaround is insufficient.
12838 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12839 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
12840 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12842 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
12843 * has the register indirect write enable bit set before
12844 * we try to access any of the MMIO registers. It is also
12845 * critical that the PCI-X hw workaround situation is decided
12846 * before that as well.
12848 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12849 &misc_ctrl_reg);
12851 tp->pci_chip_rev_id = (misc_ctrl_reg >>
12852 MISC_HOST_CTRL_CHIPREV_SHIFT);
12853 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
12854 u32 prod_id_asic_rev;
12856 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
12857 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
12858 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5724 ||
12859 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719)
12860 pci_read_config_dword(tp->pdev,
12861 TG3PCI_GEN2_PRODID_ASICREV,
12862 &prod_id_asic_rev);
12863 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
12864 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
12865 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
12866 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
12867 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
12868 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
12869 pci_read_config_dword(tp->pdev,
12870 TG3PCI_GEN15_PRODID_ASICREV,
12871 &prod_id_asic_rev);
12872 else
12873 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
12874 &prod_id_asic_rev);
12876 tp->pci_chip_rev_id = prod_id_asic_rev;
12879 /* Wrong chip ID in 5752 A0. This code can be removed later
12880 * as A0 is not in production.
12882 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
12883 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
12885 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
12886 * we need to disable memory and use config. cycles
12887 * only to access all registers. The 5702/03 chips
12888 * can mistakenly decode the special cycles from the
12889 * ICH chipsets as memory write cycles, causing corruption
12890 * of register and memory space. Only certain ICH bridges
12891 * will drive special cycles with non-zero data during the
12892 * address phase which can fall within the 5703's address
12893 * range. This is not an ICH bug as the PCI spec allows
12894 * non-zero address during special cycles. However, only
12895 * these ICH bridges are known to drive non-zero addresses
12896 * during special cycles.
12898 * Since special cycles do not cross PCI bridges, we only
12899 * enable this workaround if the 5703 is on the secondary
12900 * bus of these ICH bridges.
12902 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
12903 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
12904 static struct tg3_dev_id {
12905 u32 vendor;
12906 u32 device;
12907 u32 rev;
12908 } ich_chipsets[] = {
12909 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
12910 PCI_ANY_ID },
12911 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
12912 PCI_ANY_ID },
12913 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
12914 0xa },
12915 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
12916 PCI_ANY_ID },
12917 { },
12919 struct tg3_dev_id *pci_id = &ich_chipsets[0];
12920 struct pci_dev *bridge = NULL;
12922 while (pci_id->vendor != 0) {
12923 bridge = pci_get_device(pci_id->vendor, pci_id->device,
12924 bridge);
12925 if (!bridge) {
12926 pci_id++;
12927 continue;
12929 if (pci_id->rev != PCI_ANY_ID) {
12930 if (bridge->revision > pci_id->rev)
12931 continue;
12933 if (bridge->subordinate &&
12934 (bridge->subordinate->number ==
12935 tp->pdev->bus->number)) {
12937 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
12938 pci_dev_put(bridge);
12939 break;
12944 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
12945 static struct tg3_dev_id {
12946 u32 vendor;
12947 u32 device;
12948 } bridge_chipsets[] = {
12949 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
12950 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
12951 { },
12953 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
12954 struct pci_dev *bridge = NULL;
12956 while (pci_id->vendor != 0) {
12957 bridge = pci_get_device(pci_id->vendor,
12958 pci_id->device,
12959 bridge);
12960 if (!bridge) {
12961 pci_id++;
12962 continue;
12964 if (bridge->subordinate &&
12965 (bridge->subordinate->number <=
12966 tp->pdev->bus->number) &&
12967 (bridge->subordinate->subordinate >=
12968 tp->pdev->bus->number)) {
12969 tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
12970 pci_dev_put(bridge);
12971 break;
12976 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
12977 * DMA addresses > 40-bit. This bridge may have other additional
12978 * 57xx devices behind it in some 4-port NIC designs for example.
12979 * Any tg3 device found behind the bridge will also need the 40-bit
12980 * DMA workaround.
12982 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
12983 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12984 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
12985 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
12986 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
12987 } else {
12988 struct pci_dev *bridge = NULL;
12990 do {
12991 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
12992 PCI_DEVICE_ID_SERVERWORKS_EPB,
12993 bridge);
12994 if (bridge && bridge->subordinate &&
12995 (bridge->subordinate->number <=
12996 tp->pdev->bus->number) &&
12997 (bridge->subordinate->subordinate >=
12998 tp->pdev->bus->number)) {
12999 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
13000 pci_dev_put(bridge);
13001 break;
13003 } while (bridge);
13006 /* Initialize misc host control in PCI block. */
13007 tp->misc_host_ctrl |= (misc_ctrl_reg &
13008 MISC_HOST_CTRL_CHIPREV);
13009 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13010 tp->misc_host_ctrl);
13012 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
13013 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
13014 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
13015 tp->pdev_peer = tg3_find_peer(tp);
13017 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13018 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13019 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
13020 tp->tg3_flags3 |= TG3_FLG3_5717_PLUS;
13022 /* Intentionally exclude ASIC_REV_5906 */
13023 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13024 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13025 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13026 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13027 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13028 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13029 (tp->tg3_flags3 & TG3_FLG3_5717_PLUS))
13030 tp->tg3_flags3 |= TG3_FLG3_5755_PLUS;
13032 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13033 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13034 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13035 (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
13036 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
13037 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
13039 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
13040 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
13041 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
13043 /* 5700 B0 chips do not support checksumming correctly due
13044 * to hardware bugs.
13046 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
13047 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
13048 else {
13049 unsigned long features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_GRO;
13051 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
13052 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
13053 features |= NETIF_F_IPV6_CSUM;
13054 tp->dev->features |= features;
13055 vlan_features_add(tp->dev, features);
13058 /* Determine TSO capabilities */
13059 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
13060 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_3;
13061 else if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
13062 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13063 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
13064 else if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
13065 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
13066 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
13067 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
13068 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
13069 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13070 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13071 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
13072 tp->tg3_flags2 |= TG3_FLG2_TSO_BUG;
13073 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13074 tp->fw_needed = FIRMWARE_TG3TSO5;
13075 else
13076 tp->fw_needed = FIRMWARE_TG3TSO;
13079 tp->irq_max = 1;
13081 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
13082 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
13083 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
13084 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
13085 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
13086 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
13087 tp->pdev_peer == tp->pdev))
13088 tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
13090 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
13091 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13092 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
13095 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
13096 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSIX;
13097 tp->irq_max = TG3_IRQ_MAX_VECS;
13101 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13102 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13103 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13104 tp->tg3_flags3 |= TG3_FLG3_SHORT_DMA_BUG;
13105 else if (!(tp->tg3_flags3 & TG3_FLG3_5755_PLUS)) {
13106 tp->tg3_flags3 |= TG3_FLG3_4G_DMA_BNDRY_BUG;
13107 tp->tg3_flags3 |= TG3_FLG3_40BIT_DMA_LIMIT_BUG;
13110 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
13111 tp->tg3_flags3 |= TG3_FLG3_USE_JUMBO_BDFLAG;
13113 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
13114 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
13115 (tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG))
13116 tp->tg3_flags |= TG3_FLAG_JUMBO_CAPABLE;
13118 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
13119 &pci_state_reg);
13121 tp->pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
13122 if (tp->pcie_cap != 0) {
13123 u16 lnkctl;
13125 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
13127 pcie_set_readrq(tp->pdev, 4096);
13129 pci_read_config_word(tp->pdev,
13130 tp->pcie_cap + PCI_EXP_LNKCTL,
13131 &lnkctl);
13132 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
13133 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13134 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
13135 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13136 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13137 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
13138 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
13139 tp->tg3_flags3 |= TG3_FLG3_CLKREQ_BUG;
13140 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
13141 tp->tg3_flags3 |= TG3_FLG3_L1PLLPD_EN;
13143 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
13144 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
13145 } else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
13146 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
13147 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
13148 if (!tp->pcix_cap) {
13149 dev_err(&tp->pdev->dev,
13150 "Cannot find PCI-X capability, aborting\n");
13151 return -EIO;
13154 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
13155 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
13158 /* If we have an AMD 762 or VIA K8T800 chipset, write
13159 * reordering to the mailbox registers done by the host
13160 * controller can cause major troubles. We read back from
13161 * every mailbox register write to force the writes to be
13162 * posted to the chip in order.
13164 if (pci_dev_present(write_reorder_chipsets) &&
13165 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
13166 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
13168 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
13169 &tp->pci_cacheline_sz);
13170 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13171 &tp->pci_lat_timer);
13172 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
13173 tp->pci_lat_timer < 64) {
13174 tp->pci_lat_timer = 64;
13175 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13176 tp->pci_lat_timer);
13179 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
13180 /* 5700 BX chips need to have their TX producer index
13181 * mailboxes written twice to workaround a bug.
13183 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
13185 /* If we are in PCI-X mode, enable register write workaround.
13187 * The workaround is to use indirect register accesses
13188 * for all chip writes not to mailbox registers.
13190 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
13191 u32 pm_reg;
13193 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
13195 /* The chip can have it's power management PCI config
13196 * space registers clobbered due to this bug.
13197 * So explicitly force the chip into D0 here.
13199 pci_read_config_dword(tp->pdev,
13200 tp->pm_cap + PCI_PM_CTRL,
13201 &pm_reg);
13202 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
13203 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
13204 pci_write_config_dword(tp->pdev,
13205 tp->pm_cap + PCI_PM_CTRL,
13206 pm_reg);
13208 /* Also, force SERR#/PERR# in PCI command. */
13209 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13210 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
13211 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13215 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
13216 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
13217 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
13218 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
13220 /* Chip-specific fixup from Broadcom driver */
13221 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
13222 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
13223 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
13224 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
13227 /* Default fast path register access methods */
13228 tp->read32 = tg3_read32;
13229 tp->write32 = tg3_write32;
13230 tp->read32_mbox = tg3_read32;
13231 tp->write32_mbox = tg3_write32;
13232 tp->write32_tx_mbox = tg3_write32;
13233 tp->write32_rx_mbox = tg3_write32;
13235 /* Various workaround register access methods */
13236 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
13237 tp->write32 = tg3_write_indirect_reg32;
13238 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13239 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
13240 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
13242 * Back to back register writes can cause problems on these
13243 * chips, the workaround is to read back all reg writes
13244 * except those to mailbox regs.
13246 * See tg3_write_indirect_reg32().
13248 tp->write32 = tg3_write_flush_reg32;
13251 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
13252 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
13253 tp->write32_tx_mbox = tg3_write32_tx_mbox;
13254 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
13255 tp->write32_rx_mbox = tg3_write_flush_reg32;
13258 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
13259 tp->read32 = tg3_read_indirect_reg32;
13260 tp->write32 = tg3_write_indirect_reg32;
13261 tp->read32_mbox = tg3_read_indirect_mbox;
13262 tp->write32_mbox = tg3_write_indirect_mbox;
13263 tp->write32_tx_mbox = tg3_write_indirect_mbox;
13264 tp->write32_rx_mbox = tg3_write_indirect_mbox;
13266 iounmap(tp->regs);
13267 tp->regs = NULL;
13269 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13270 pci_cmd &= ~PCI_COMMAND_MEMORY;
13271 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13273 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13274 tp->read32_mbox = tg3_read32_mbox_5906;
13275 tp->write32_mbox = tg3_write32_mbox_5906;
13276 tp->write32_tx_mbox = tg3_write32_mbox_5906;
13277 tp->write32_rx_mbox = tg3_write32_mbox_5906;
13280 if (tp->write32 == tg3_write_indirect_reg32 ||
13281 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
13282 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13283 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
13284 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
13286 /* Get eeprom hw config before calling tg3_set_power_state().
13287 * In particular, the TG3_FLG2_IS_NIC flag must be
13288 * determined before calling tg3_set_power_state() so that
13289 * we know whether or not to switch out of Vaux power.
13290 * When the flag is set, it means that GPIO1 is used for eeprom
13291 * write protect and also implies that it is a LOM where GPIOs
13292 * are not used to switch power.
13294 tg3_get_eeprom_hw_cfg(tp);
13296 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
13297 /* Allow reads and writes to the
13298 * APE register and memory space.
13300 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
13301 PCISTATE_ALLOW_APE_SHMEM_WR |
13302 PCISTATE_ALLOW_APE_PSPACE_WR;
13303 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
13304 pci_state_reg);
13307 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13308 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13309 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13310 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13311 (tp->tg3_flags3 & TG3_FLG3_5717_PLUS))
13312 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
13314 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
13315 * GPIO1 driven high will bring 5700's external PHY out of reset.
13316 * It is also used as eeprom write protect on LOMs.
13318 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
13319 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
13320 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
13321 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
13322 GRC_LCLCTRL_GPIO_OUTPUT1);
13323 /* Unused GPIO3 must be driven as output on 5752 because there
13324 * are no pull-up resistors on unused GPIO pins.
13326 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13327 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
13329 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13330 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13331 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
13332 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
13334 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
13335 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
13336 /* Turn off the debug UART. */
13337 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
13338 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
13339 /* Keep VMain power. */
13340 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
13341 GRC_LCLCTRL_GPIO_OUTPUT0;
13344 /* Force the chip into D0. */
13345 err = tg3_set_power_state(tp, PCI_D0);
13346 if (err) {
13347 dev_err(&tp->pdev->dev, "Transition to D0 failed\n");
13348 return err;
13351 /* Derive initial jumbo mode from MTU assigned in
13352 * ether_setup() via the alloc_etherdev() call
13354 if (tp->dev->mtu > ETH_DATA_LEN &&
13355 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
13356 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
13358 /* Determine WakeOnLan speed to use. */
13359 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13360 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
13361 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
13362 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
13363 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
13364 } else {
13365 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
13368 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13369 tp->phy_flags |= TG3_PHYFLG_IS_FET;
13371 /* A few boards don't want Ethernet@WireSpeed phy feature */
13372 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
13373 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
13374 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
13375 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
13376 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
13377 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13378 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
13380 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
13381 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
13382 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
13383 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
13384 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
13386 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
13387 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
13388 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13389 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
13390 !(tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) {
13391 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13392 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13393 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13394 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
13395 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
13396 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
13397 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
13398 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
13399 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
13400 } else
13401 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
13404 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13405 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
13406 tp->phy_otp = tg3_read_otp_phycfg(tp);
13407 if (tp->phy_otp == 0)
13408 tp->phy_otp = TG3_OTP_DEFAULT;
13411 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)
13412 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
13413 else
13414 tp->mi_mode = MAC_MI_MODE_BASE;
13416 tp->coalesce_mode = 0;
13417 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
13418 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
13419 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
13421 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13422 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
13423 tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;
13425 err = tg3_mdio_init(tp);
13426 if (err)
13427 return err;
13429 /* Initialize data/descriptor byte/word swapping. */
13430 val = tr32(GRC_MODE);
13431 val &= GRC_MODE_HOST_STACKUP;
13432 tw32(GRC_MODE, val | tp->grc_mode);
13434 tg3_switch_clocks(tp);
13436 /* Clear this out for sanity. */
13437 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
13439 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
13440 &pci_state_reg);
13441 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
13442 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
13443 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
13445 if (chiprevid == CHIPREV_ID_5701_A0 ||
13446 chiprevid == CHIPREV_ID_5701_B0 ||
13447 chiprevid == CHIPREV_ID_5701_B2 ||
13448 chiprevid == CHIPREV_ID_5701_B5) {
13449 void __iomem *sram_base;
13451 /* Write some dummy words into the SRAM status block
13452 * area, see if it reads back correctly. If the return
13453 * value is bad, force enable the PCIX workaround.
13455 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
13457 writel(0x00000000, sram_base);
13458 writel(0x00000000, sram_base + 4);
13459 writel(0xffffffff, sram_base + 4);
13460 if (readl(sram_base) != 0x00000000)
13461 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
13465 udelay(50);
13466 tg3_nvram_init(tp);
13468 grc_misc_cfg = tr32(GRC_MISC_CFG);
13469 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
13471 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
13472 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
13473 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
13474 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
13476 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
13477 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
13478 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
13479 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
13480 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
13481 HOSTCC_MODE_CLRTICK_TXBD);
13483 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
13484 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13485 tp->misc_host_ctrl);
13488 /* Preserve the APE MAC_MODE bits */
13489 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
13490 tp->mac_mode = tr32(MAC_MODE) |
13491 MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
13492 else
13493 tp->mac_mode = TG3_DEF_MAC_MODE;
13495 /* these are limited to 10/100 only */
13496 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
13497 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
13498 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
13499 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
13500 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
13501 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
13502 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
13503 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
13504 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
13505 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
13506 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
13507 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
13508 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13509 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
13510 (tp->phy_flags & TG3_PHYFLG_IS_FET))
13511 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
13513 err = tg3_phy_probe(tp);
13514 if (err) {
13515 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
13516 /* ... but do not return immediately ... */
13517 tg3_mdio_fini(tp);
13520 tg3_read_vpd(tp);
13521 tg3_read_fw_ver(tp);
13523 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
13524 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
13525 } else {
13526 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
13527 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
13528 else
13529 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
13532 /* 5700 {AX,BX} chips have a broken status block link
13533 * change bit implementation, so we must use the
13534 * status register in those cases.
13536 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
13537 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
13538 else
13539 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
13541 /* The led_ctrl is set during tg3_phy_probe, here we might
13542 * have to force the link status polling mechanism based
13543 * upon subsystem IDs.
13545 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
13546 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
13547 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
13548 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
13549 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
13552 /* For all SERDES we poll the MAC status register. */
13553 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13554 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
13555 else
13556 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
13558 tp->rx_offset = NET_IP_ALIGN + TG3_RX_HEADROOM;
13559 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
13560 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
13561 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
13562 tp->rx_offset -= NET_IP_ALIGN;
13563 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
13564 tp->rx_copy_thresh = ~(u16)0;
13565 #endif
13568 tp->rx_std_max_post = TG3_RX_RING_SIZE;
13570 /* Increment the rx prod index on the rx std ring by at most
13571 * 8 for these chips to workaround hw errata.
13573 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13574 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13575 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
13576 tp->rx_std_max_post = 8;
13578 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
13579 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
13580 PCIE_PWR_MGMT_L1_THRESH_MSK;
13582 return err;
13585 #ifdef CONFIG_SPARC
13586 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
13588 struct net_device *dev = tp->dev;
13589 struct pci_dev *pdev = tp->pdev;
13590 struct device_node *dp = pci_device_to_OF_node(pdev);
13591 const unsigned char *addr;
13592 int len;
13594 addr = of_get_property(dp, "local-mac-address", &len);
13595 if (addr && len == 6) {
13596 memcpy(dev->dev_addr, addr, 6);
13597 memcpy(dev->perm_addr, dev->dev_addr, 6);
13598 return 0;
13600 return -ENODEV;
13603 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
13605 struct net_device *dev = tp->dev;
13607 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
13608 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
13609 return 0;
13611 #endif
13613 static int __devinit tg3_get_device_address(struct tg3 *tp)
13615 struct net_device *dev = tp->dev;
13616 u32 hi, lo, mac_offset;
13617 int addr_ok = 0;
13619 #ifdef CONFIG_SPARC
13620 if (!tg3_get_macaddr_sparc(tp))
13621 return 0;
13622 #endif
13624 mac_offset = 0x7c;
13625 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
13626 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
13627 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
13628 mac_offset = 0xcc;
13629 if (tg3_nvram_lock(tp))
13630 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
13631 else
13632 tg3_nvram_unlock(tp);
13633 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13634 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
13635 if (PCI_FUNC(tp->pdev->devfn) & 1)
13636 mac_offset = 0xcc;
13637 if (PCI_FUNC(tp->pdev->devfn) > 1)
13638 mac_offset += 0x18c;
13639 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13640 mac_offset = 0x10;
13642 /* First try to get it from MAC address mailbox. */
13643 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
13644 if ((hi >> 16) == 0x484b) {
13645 dev->dev_addr[0] = (hi >> 8) & 0xff;
13646 dev->dev_addr[1] = (hi >> 0) & 0xff;
13648 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
13649 dev->dev_addr[2] = (lo >> 24) & 0xff;
13650 dev->dev_addr[3] = (lo >> 16) & 0xff;
13651 dev->dev_addr[4] = (lo >> 8) & 0xff;
13652 dev->dev_addr[5] = (lo >> 0) & 0xff;
13654 /* Some old bootcode may report a 0 MAC address in SRAM */
13655 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
13657 if (!addr_ok) {
13658 /* Next, try NVRAM. */
13659 if (!(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) &&
13660 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
13661 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
13662 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
13663 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
13665 /* Finally just fetch it out of the MAC control regs. */
13666 else {
13667 hi = tr32(MAC_ADDR_0_HIGH);
13668 lo = tr32(MAC_ADDR_0_LOW);
13670 dev->dev_addr[5] = lo & 0xff;
13671 dev->dev_addr[4] = (lo >> 8) & 0xff;
13672 dev->dev_addr[3] = (lo >> 16) & 0xff;
13673 dev->dev_addr[2] = (lo >> 24) & 0xff;
13674 dev->dev_addr[1] = hi & 0xff;
13675 dev->dev_addr[0] = (hi >> 8) & 0xff;
13679 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
13680 #ifdef CONFIG_SPARC
13681 if (!tg3_get_default_macaddr_sparc(tp))
13682 return 0;
13683 #endif
13684 return -EINVAL;
13686 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
13687 return 0;
13690 #define BOUNDARY_SINGLE_CACHELINE 1
13691 #define BOUNDARY_MULTI_CACHELINE 2
13693 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
13695 int cacheline_size;
13696 u8 byte;
13697 int goal;
13699 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
13700 if (byte == 0)
13701 cacheline_size = 1024;
13702 else
13703 cacheline_size = (int) byte * 4;
13705 /* On 5703 and later chips, the boundary bits have no
13706 * effect.
13708 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13709 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13710 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
13711 goto out;
13713 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
13714 goal = BOUNDARY_MULTI_CACHELINE;
13715 #else
13716 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
13717 goal = BOUNDARY_SINGLE_CACHELINE;
13718 #else
13719 goal = 0;
13720 #endif
13721 #endif
13723 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
13724 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
13725 goto out;
13728 if (!goal)
13729 goto out;
13731 /* PCI controllers on most RISC systems tend to disconnect
13732 * when a device tries to burst across a cache-line boundary.
13733 * Therefore, letting tg3 do so just wastes PCI bandwidth.
13735 * Unfortunately, for PCI-E there are only limited
13736 * write-side controls for this, and thus for reads
13737 * we will still get the disconnects. We'll also waste
13738 * these PCI cycles for both read and write for chips
13739 * other than 5700 and 5701 which do not implement the
13740 * boundary bits.
13742 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
13743 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
13744 switch (cacheline_size) {
13745 case 16:
13746 case 32:
13747 case 64:
13748 case 128:
13749 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13750 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
13751 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
13752 } else {
13753 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
13754 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
13756 break;
13758 case 256:
13759 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
13760 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
13761 break;
13763 default:
13764 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
13765 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
13766 break;
13768 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13769 switch (cacheline_size) {
13770 case 16:
13771 case 32:
13772 case 64:
13773 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13774 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
13775 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
13776 break;
13778 /* fallthrough */
13779 case 128:
13780 default:
13781 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
13782 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
13783 break;
13785 } else {
13786 switch (cacheline_size) {
13787 case 16:
13788 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13789 val |= (DMA_RWCTRL_READ_BNDRY_16 |
13790 DMA_RWCTRL_WRITE_BNDRY_16);
13791 break;
13793 /* fallthrough */
13794 case 32:
13795 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13796 val |= (DMA_RWCTRL_READ_BNDRY_32 |
13797 DMA_RWCTRL_WRITE_BNDRY_32);
13798 break;
13800 /* fallthrough */
13801 case 64:
13802 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13803 val |= (DMA_RWCTRL_READ_BNDRY_64 |
13804 DMA_RWCTRL_WRITE_BNDRY_64);
13805 break;
13807 /* fallthrough */
13808 case 128:
13809 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13810 val |= (DMA_RWCTRL_READ_BNDRY_128 |
13811 DMA_RWCTRL_WRITE_BNDRY_128);
13812 break;
13814 /* fallthrough */
13815 case 256:
13816 val |= (DMA_RWCTRL_READ_BNDRY_256 |
13817 DMA_RWCTRL_WRITE_BNDRY_256);
13818 break;
13819 case 512:
13820 val |= (DMA_RWCTRL_READ_BNDRY_512 |
13821 DMA_RWCTRL_WRITE_BNDRY_512);
13822 break;
13823 case 1024:
13824 default:
13825 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
13826 DMA_RWCTRL_WRITE_BNDRY_1024);
13827 break;
13831 out:
13832 return val;
13835 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
13837 struct tg3_internal_buffer_desc test_desc;
13838 u32 sram_dma_descs;
13839 int i, ret;
13841 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
13843 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
13844 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
13845 tw32(RDMAC_STATUS, 0);
13846 tw32(WDMAC_STATUS, 0);
13848 tw32(BUFMGR_MODE, 0);
13849 tw32(FTQ_RESET, 0);
13851 test_desc.addr_hi = ((u64) buf_dma) >> 32;
13852 test_desc.addr_lo = buf_dma & 0xffffffff;
13853 test_desc.nic_mbuf = 0x00002100;
13854 test_desc.len = size;
13857 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
13858 * the *second* time the tg3 driver was getting loaded after an
13859 * initial scan.
13861 * Broadcom tells me:
13862 * ...the DMA engine is connected to the GRC block and a DMA
13863 * reset may affect the GRC block in some unpredictable way...
13864 * The behavior of resets to individual blocks has not been tested.
13866 * Broadcom noted the GRC reset will also reset all sub-components.
13868 if (to_device) {
13869 test_desc.cqid_sqid = (13 << 8) | 2;
13871 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
13872 udelay(40);
13873 } else {
13874 test_desc.cqid_sqid = (16 << 8) | 7;
13876 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
13877 udelay(40);
13879 test_desc.flags = 0x00000005;
13881 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
13882 u32 val;
13884 val = *(((u32 *)&test_desc) + i);
13885 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
13886 sram_dma_descs + (i * sizeof(u32)));
13887 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
13889 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
13891 if (to_device)
13892 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
13893 else
13894 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
13896 ret = -ENODEV;
13897 for (i = 0; i < 40; i++) {
13898 u32 val;
13900 if (to_device)
13901 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
13902 else
13903 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
13904 if ((val & 0xffff) == sram_dma_descs) {
13905 ret = 0;
13906 break;
13909 udelay(100);
13912 return ret;
13915 #define TEST_BUFFER_SIZE 0x2000
13917 static int __devinit tg3_test_dma(struct tg3 *tp)
13919 dma_addr_t buf_dma;
13920 u32 *buf, saved_dma_rwctrl;
13921 int ret = 0;
13923 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
13924 if (!buf) {
13925 ret = -ENOMEM;
13926 goto out_nofree;
13929 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
13930 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
13932 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
13934 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
13935 goto out;
13937 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13938 /* DMA read watermark not used on PCIE */
13939 tp->dma_rwctrl |= 0x00180000;
13940 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
13941 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
13942 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
13943 tp->dma_rwctrl |= 0x003f0000;
13944 else
13945 tp->dma_rwctrl |= 0x003f000f;
13946 } else {
13947 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
13948 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
13949 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
13950 u32 read_water = 0x7;
13952 /* If the 5704 is behind the EPB bridge, we can
13953 * do the less restrictive ONE_DMA workaround for
13954 * better performance.
13956 if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
13957 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
13958 tp->dma_rwctrl |= 0x8000;
13959 else if (ccval == 0x6 || ccval == 0x7)
13960 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
13962 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
13963 read_water = 4;
13964 /* Set bit 23 to enable PCIX hw bug fix */
13965 tp->dma_rwctrl |=
13966 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
13967 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
13968 (1 << 23);
13969 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
13970 /* 5780 always in PCIX mode */
13971 tp->dma_rwctrl |= 0x00144000;
13972 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13973 /* 5714 always in PCIX mode */
13974 tp->dma_rwctrl |= 0x00148000;
13975 } else {
13976 tp->dma_rwctrl |= 0x001b000f;
13980 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
13981 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
13982 tp->dma_rwctrl &= 0xfffffff0;
13984 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13985 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13986 /* Remove this if it causes problems for some boards. */
13987 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
13989 /* On 5700/5701 chips, we need to set this bit.
13990 * Otherwise the chip will issue cacheline transactions
13991 * to streamable DMA memory with not all the byte
13992 * enables turned on. This is an error on several
13993 * RISC PCI controllers, in particular sparc64.
13995 * On 5703/5704 chips, this bit has been reassigned
13996 * a different meaning. In particular, it is used
13997 * on those chips to enable a PCI-X workaround.
13999 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
14002 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14004 #if 0
14005 /* Unneeded, already done by tg3_get_invariants. */
14006 tg3_switch_clocks(tp);
14007 #endif
14009 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14010 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
14011 goto out;
14013 /* It is best to perform DMA test with maximum write burst size
14014 * to expose the 5700/5701 write DMA bug.
14016 saved_dma_rwctrl = tp->dma_rwctrl;
14017 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14018 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14020 while (1) {
14021 u32 *p = buf, i;
14023 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
14024 p[i] = i;
14026 /* Send the buffer to the chip. */
14027 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
14028 if (ret) {
14029 dev_err(&tp->pdev->dev,
14030 "%s: Buffer write failed. err = %d\n",
14031 __func__, ret);
14032 break;
14035 #if 0
14036 /* validate data reached card RAM correctly. */
14037 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14038 u32 val;
14039 tg3_read_mem(tp, 0x2100 + (i*4), &val);
14040 if (le32_to_cpu(val) != p[i]) {
14041 dev_err(&tp->pdev->dev,
14042 "%s: Buffer corrupted on device! "
14043 "(%d != %d)\n", __func__, val, i);
14044 /* ret = -ENODEV here? */
14046 p[i] = 0;
14048 #endif
14049 /* Now read it back. */
14050 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
14051 if (ret) {
14052 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
14053 "err = %d\n", __func__, ret);
14054 break;
14057 /* Verify it. */
14058 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14059 if (p[i] == i)
14060 continue;
14062 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14063 DMA_RWCTRL_WRITE_BNDRY_16) {
14064 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14065 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14066 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14067 break;
14068 } else {
14069 dev_err(&tp->pdev->dev,
14070 "%s: Buffer corrupted on read back! "
14071 "(%d != %d)\n", __func__, p[i], i);
14072 ret = -ENODEV;
14073 goto out;
14077 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
14078 /* Success. */
14079 ret = 0;
14080 break;
14083 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14084 DMA_RWCTRL_WRITE_BNDRY_16) {
14085 static struct pci_device_id dma_wait_state_chipsets[] = {
14086 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
14087 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
14088 { },
14091 /* DMA test passed without adjusting DMA boundary,
14092 * now look for chipsets that are known to expose the
14093 * DMA bug without failing the test.
14095 if (pci_dev_present(dma_wait_state_chipsets)) {
14096 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14097 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14098 } else {
14099 /* Safe to use the calculated DMA boundary. */
14100 tp->dma_rwctrl = saved_dma_rwctrl;
14103 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14106 out:
14107 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
14108 out_nofree:
14109 return ret;
14112 static void __devinit tg3_init_link_config(struct tg3 *tp)
14114 tp->link_config.advertising =
14115 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
14116 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
14117 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
14118 ADVERTISED_Autoneg | ADVERTISED_MII);
14119 tp->link_config.speed = SPEED_INVALID;
14120 tp->link_config.duplex = DUPLEX_INVALID;
14121 tp->link_config.autoneg = AUTONEG_ENABLE;
14122 tp->link_config.active_speed = SPEED_INVALID;
14123 tp->link_config.active_duplex = DUPLEX_INVALID;
14124 tp->link_config.orig_speed = SPEED_INVALID;
14125 tp->link_config.orig_duplex = DUPLEX_INVALID;
14126 tp->link_config.orig_autoneg = AUTONEG_INVALID;
14129 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
14131 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
14132 tp->bufmgr_config.mbuf_read_dma_low_water =
14133 DEFAULT_MB_RDMA_LOW_WATER_5705;
14134 tp->bufmgr_config.mbuf_mac_rx_low_water =
14135 DEFAULT_MB_MACRX_LOW_WATER_57765;
14136 tp->bufmgr_config.mbuf_high_water =
14137 DEFAULT_MB_HIGH_WATER_57765;
14139 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14140 DEFAULT_MB_RDMA_LOW_WATER_5705;
14141 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14142 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
14143 tp->bufmgr_config.mbuf_high_water_jumbo =
14144 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
14145 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
14146 tp->bufmgr_config.mbuf_read_dma_low_water =
14147 DEFAULT_MB_RDMA_LOW_WATER_5705;
14148 tp->bufmgr_config.mbuf_mac_rx_low_water =
14149 DEFAULT_MB_MACRX_LOW_WATER_5705;
14150 tp->bufmgr_config.mbuf_high_water =
14151 DEFAULT_MB_HIGH_WATER_5705;
14152 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14153 tp->bufmgr_config.mbuf_mac_rx_low_water =
14154 DEFAULT_MB_MACRX_LOW_WATER_5906;
14155 tp->bufmgr_config.mbuf_high_water =
14156 DEFAULT_MB_HIGH_WATER_5906;
14159 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14160 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
14161 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14162 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
14163 tp->bufmgr_config.mbuf_high_water_jumbo =
14164 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
14165 } else {
14166 tp->bufmgr_config.mbuf_read_dma_low_water =
14167 DEFAULT_MB_RDMA_LOW_WATER;
14168 tp->bufmgr_config.mbuf_mac_rx_low_water =
14169 DEFAULT_MB_MACRX_LOW_WATER;
14170 tp->bufmgr_config.mbuf_high_water =
14171 DEFAULT_MB_HIGH_WATER;
14173 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14174 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
14175 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14176 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
14177 tp->bufmgr_config.mbuf_high_water_jumbo =
14178 DEFAULT_MB_HIGH_WATER_JUMBO;
14181 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
14182 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
14185 static char * __devinit tg3_phy_string(struct tg3 *tp)
14187 switch (tp->phy_id & TG3_PHY_ID_MASK) {
14188 case TG3_PHY_ID_BCM5400: return "5400";
14189 case TG3_PHY_ID_BCM5401: return "5401";
14190 case TG3_PHY_ID_BCM5411: return "5411";
14191 case TG3_PHY_ID_BCM5701: return "5701";
14192 case TG3_PHY_ID_BCM5703: return "5703";
14193 case TG3_PHY_ID_BCM5704: return "5704";
14194 case TG3_PHY_ID_BCM5705: return "5705";
14195 case TG3_PHY_ID_BCM5750: return "5750";
14196 case TG3_PHY_ID_BCM5752: return "5752";
14197 case TG3_PHY_ID_BCM5714: return "5714";
14198 case TG3_PHY_ID_BCM5780: return "5780";
14199 case TG3_PHY_ID_BCM5755: return "5755";
14200 case TG3_PHY_ID_BCM5787: return "5787";
14201 case TG3_PHY_ID_BCM5784: return "5784";
14202 case TG3_PHY_ID_BCM5756: return "5722/5756";
14203 case TG3_PHY_ID_BCM5906: return "5906";
14204 case TG3_PHY_ID_BCM5761: return "5761";
14205 case TG3_PHY_ID_BCM5718C: return "5718C";
14206 case TG3_PHY_ID_BCM5718S: return "5718S";
14207 case TG3_PHY_ID_BCM57765: return "57765";
14208 case TG3_PHY_ID_BCM5719C: return "5719C";
14209 case TG3_PHY_ID_BCM8002: return "8002/serdes";
14210 case 0: return "serdes";
14211 default: return "unknown";
14215 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
14217 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
14218 strcpy(str, "PCI Express");
14219 return str;
14220 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
14221 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
14223 strcpy(str, "PCIX:");
14225 if ((clock_ctrl == 7) ||
14226 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
14227 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
14228 strcat(str, "133MHz");
14229 else if (clock_ctrl == 0)
14230 strcat(str, "33MHz");
14231 else if (clock_ctrl == 2)
14232 strcat(str, "50MHz");
14233 else if (clock_ctrl == 4)
14234 strcat(str, "66MHz");
14235 else if (clock_ctrl == 6)
14236 strcat(str, "100MHz");
14237 } else {
14238 strcpy(str, "PCI:");
14239 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
14240 strcat(str, "66MHz");
14241 else
14242 strcat(str, "33MHz");
14244 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
14245 strcat(str, ":32-bit");
14246 else
14247 strcat(str, ":64-bit");
14248 return str;
14251 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
14253 struct pci_dev *peer;
14254 unsigned int func, devnr = tp->pdev->devfn & ~7;
14256 for (func = 0; func < 8; func++) {
14257 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14258 if (peer && peer != tp->pdev)
14259 break;
14260 pci_dev_put(peer);
14262 /* 5704 can be configured in single-port mode, set peer to
14263 * tp->pdev in that case.
14265 if (!peer) {
14266 peer = tp->pdev;
14267 return peer;
14271 * We don't need to keep the refcount elevated; there's no way
14272 * to remove one half of this device without removing the other
14274 pci_dev_put(peer);
14276 return peer;
14279 static void __devinit tg3_init_coal(struct tg3 *tp)
14281 struct ethtool_coalesce *ec = &tp->coal;
14283 memset(ec, 0, sizeof(*ec));
14284 ec->cmd = ETHTOOL_GCOALESCE;
14285 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
14286 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
14287 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
14288 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
14289 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
14290 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
14291 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
14292 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
14293 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
14295 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
14296 HOSTCC_MODE_CLRTICK_TXBD)) {
14297 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
14298 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
14299 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
14300 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
14303 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
14304 ec->rx_coalesce_usecs_irq = 0;
14305 ec->tx_coalesce_usecs_irq = 0;
14306 ec->stats_block_coalesce_usecs = 0;
14310 static const struct net_device_ops tg3_netdev_ops = {
14311 .ndo_open = tg3_open,
14312 .ndo_stop = tg3_close,
14313 .ndo_start_xmit = tg3_start_xmit,
14314 .ndo_get_stats64 = tg3_get_stats64,
14315 .ndo_validate_addr = eth_validate_addr,
14316 .ndo_set_multicast_list = tg3_set_rx_mode,
14317 .ndo_set_mac_address = tg3_set_mac_addr,
14318 .ndo_do_ioctl = tg3_ioctl,
14319 .ndo_tx_timeout = tg3_tx_timeout,
14320 .ndo_change_mtu = tg3_change_mtu,
14321 #if TG3_VLAN_TAG_USED
14322 .ndo_vlan_rx_register = tg3_vlan_rx_register,
14323 #endif
14324 #ifdef CONFIG_NET_POLL_CONTROLLER
14325 .ndo_poll_controller = tg3_poll_controller,
14326 #endif
14329 static const struct net_device_ops tg3_netdev_ops_dma_bug = {
14330 .ndo_open = tg3_open,
14331 .ndo_stop = tg3_close,
14332 .ndo_start_xmit = tg3_start_xmit_dma_bug,
14333 .ndo_get_stats64 = tg3_get_stats64,
14334 .ndo_validate_addr = eth_validate_addr,
14335 .ndo_set_multicast_list = tg3_set_rx_mode,
14336 .ndo_set_mac_address = tg3_set_mac_addr,
14337 .ndo_do_ioctl = tg3_ioctl,
14338 .ndo_tx_timeout = tg3_tx_timeout,
14339 .ndo_change_mtu = tg3_change_mtu,
14340 #if TG3_VLAN_TAG_USED
14341 .ndo_vlan_rx_register = tg3_vlan_rx_register,
14342 #endif
14343 #ifdef CONFIG_NET_POLL_CONTROLLER
14344 .ndo_poll_controller = tg3_poll_controller,
14345 #endif
14348 static int __devinit tg3_init_one(struct pci_dev *pdev,
14349 const struct pci_device_id *ent)
14351 struct net_device *dev;
14352 struct tg3 *tp;
14353 int i, err, pm_cap;
14354 u32 sndmbx, rcvmbx, intmbx;
14355 char str[40];
14356 u64 dma_mask, persist_dma_mask;
14358 printk_once(KERN_INFO "%s\n", version);
14360 err = pci_enable_device(pdev);
14361 if (err) {
14362 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
14363 return err;
14366 err = pci_request_regions(pdev, DRV_MODULE_NAME);
14367 if (err) {
14368 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
14369 goto err_out_disable_pdev;
14372 pci_set_master(pdev);
14374 /* Find power-management capability. */
14375 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
14376 if (pm_cap == 0) {
14377 dev_err(&pdev->dev,
14378 "Cannot find Power Management capability, aborting\n");
14379 err = -EIO;
14380 goto err_out_free_res;
14383 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
14384 if (!dev) {
14385 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
14386 err = -ENOMEM;
14387 goto err_out_free_res;
14390 SET_NETDEV_DEV(dev, &pdev->dev);
14392 #if TG3_VLAN_TAG_USED
14393 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
14394 #endif
14396 tp = netdev_priv(dev);
14397 tp->pdev = pdev;
14398 tp->dev = dev;
14399 tp->pm_cap = pm_cap;
14400 tp->rx_mode = TG3_DEF_RX_MODE;
14401 tp->tx_mode = TG3_DEF_TX_MODE;
14403 if (tg3_debug > 0)
14404 tp->msg_enable = tg3_debug;
14405 else
14406 tp->msg_enable = TG3_DEF_MSG_ENABLE;
14408 /* The word/byte swap controls here control register access byte
14409 * swapping. DMA data byte swapping is controlled in the GRC_MODE
14410 * setting below.
14412 tp->misc_host_ctrl =
14413 MISC_HOST_CTRL_MASK_PCI_INT |
14414 MISC_HOST_CTRL_WORD_SWAP |
14415 MISC_HOST_CTRL_INDIR_ACCESS |
14416 MISC_HOST_CTRL_PCISTATE_RW;
14418 /* The NONFRM (non-frame) byte/word swap controls take effect
14419 * on descriptor entries, anything which isn't packet data.
14421 * The StrongARM chips on the board (one for tx, one for rx)
14422 * are running in big-endian mode.
14424 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
14425 GRC_MODE_WSWAP_NONFRM_DATA);
14426 #ifdef __BIG_ENDIAN
14427 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
14428 #endif
14429 spin_lock_init(&tp->lock);
14430 spin_lock_init(&tp->indirect_lock);
14431 INIT_WORK(&tp->reset_task, tg3_reset_task);
14433 tp->regs = pci_ioremap_bar(pdev, BAR_0);
14434 if (!tp->regs) {
14435 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
14436 err = -ENOMEM;
14437 goto err_out_free_dev;
14440 tg3_init_link_config(tp);
14442 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
14443 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
14445 dev->ethtool_ops = &tg3_ethtool_ops;
14446 dev->watchdog_timeo = TG3_TX_TIMEOUT;
14447 dev->irq = pdev->irq;
14449 err = tg3_get_invariants(tp);
14450 if (err) {
14451 dev_err(&pdev->dev,
14452 "Problem fetching invariants of chip, aborting\n");
14453 goto err_out_iounmap;
14456 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) &&
14457 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
14458 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
14459 dev->netdev_ops = &tg3_netdev_ops;
14460 else
14461 dev->netdev_ops = &tg3_netdev_ops_dma_bug;
14464 /* The EPB bridge inside 5714, 5715, and 5780 and any
14465 * device behind the EPB cannot support DMA addresses > 40-bit.
14466 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
14467 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
14468 * do DMA address check in tg3_start_xmit().
14470 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
14471 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
14472 else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
14473 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
14474 #ifdef CONFIG_HIGHMEM
14475 dma_mask = DMA_BIT_MASK(64);
14476 #endif
14477 } else
14478 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
14480 /* Configure DMA attributes. */
14481 if (dma_mask > DMA_BIT_MASK(32)) {
14482 err = pci_set_dma_mask(pdev, dma_mask);
14483 if (!err) {
14484 dev->features |= NETIF_F_HIGHDMA;
14485 err = pci_set_consistent_dma_mask(pdev,
14486 persist_dma_mask);
14487 if (err < 0) {
14488 dev_err(&pdev->dev, "Unable to obtain 64 bit "
14489 "DMA for consistent allocations\n");
14490 goto err_out_iounmap;
14494 if (err || dma_mask == DMA_BIT_MASK(32)) {
14495 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
14496 if (err) {
14497 dev_err(&pdev->dev,
14498 "No usable DMA configuration, aborting\n");
14499 goto err_out_iounmap;
14503 tg3_init_bufmgr_config(tp);
14505 /* Selectively allow TSO based on operating conditions */
14506 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
14507 (tp->fw_needed && !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)))
14508 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
14509 else {
14510 tp->tg3_flags2 &= ~(TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG);
14511 tp->fw_needed = NULL;
14514 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14515 tp->fw_needed = FIRMWARE_TG3;
14517 /* TSO is on by default on chips that support hardware TSO.
14518 * Firmware TSO on older chips gives lower performance, so it
14519 * is off by default, but can be enabled using ethtool.
14521 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) &&
14522 (dev->features & NETIF_F_IP_CSUM)) {
14523 dev->features |= NETIF_F_TSO;
14524 vlan_features_add(dev, NETIF_F_TSO);
14526 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) ||
14527 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3)) {
14528 if (dev->features & NETIF_F_IPV6_CSUM) {
14529 dev->features |= NETIF_F_TSO6;
14530 vlan_features_add(dev, NETIF_F_TSO6);
14532 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
14533 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14534 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14535 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
14536 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14537 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
14538 dev->features |= NETIF_F_TSO_ECN;
14539 vlan_features_add(dev, NETIF_F_TSO_ECN);
14543 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
14544 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
14545 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
14546 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
14547 tp->rx_pending = 63;
14550 err = tg3_get_device_address(tp);
14551 if (err) {
14552 dev_err(&pdev->dev,
14553 "Could not obtain valid ethernet address, aborting\n");
14554 goto err_out_iounmap;
14557 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
14558 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
14559 if (!tp->aperegs) {
14560 dev_err(&pdev->dev,
14561 "Cannot map APE registers, aborting\n");
14562 err = -ENOMEM;
14563 goto err_out_iounmap;
14566 tg3_ape_lock_init(tp);
14568 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
14569 tg3_read_dash_ver(tp);
14573 * Reset chip in case UNDI or EFI driver did not shutdown
14574 * DMA self test will enable WDMAC and we'll see (spurious)
14575 * pending DMA on the PCI bus at that point.
14577 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
14578 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
14579 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
14580 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14583 err = tg3_test_dma(tp);
14584 if (err) {
14585 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
14586 goto err_out_apeunmap;
14589 /* flow control autonegotiation is default behavior */
14590 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
14591 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
14593 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
14594 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
14595 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
14596 for (i = 0; i < tp->irq_max; i++) {
14597 struct tg3_napi *tnapi = &tp->napi[i];
14599 tnapi->tp = tp;
14600 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
14602 tnapi->int_mbox = intmbx;
14603 if (i < 4)
14604 intmbx += 0x8;
14605 else
14606 intmbx += 0x4;
14608 tnapi->consmbox = rcvmbx;
14609 tnapi->prodmbox = sndmbx;
14611 if (i) {
14612 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
14613 netif_napi_add(dev, &tnapi->napi, tg3_poll_msix, 64);
14614 } else {
14615 tnapi->coal_now = HOSTCC_MODE_NOW;
14616 netif_napi_add(dev, &tnapi->napi, tg3_poll, 64);
14619 if (!(tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX))
14620 break;
14623 * If we support MSIX, we'll be using RSS. If we're using
14624 * RSS, the first vector only handles link interrupts and the
14625 * remaining vectors handle rx and tx interrupts. Reuse the
14626 * mailbox values for the next iteration. The values we setup
14627 * above are still useful for the single vectored mode.
14629 if (!i)
14630 continue;
14632 rcvmbx += 0x8;
14634 if (sndmbx & 0x4)
14635 sndmbx -= 0x4;
14636 else
14637 sndmbx += 0xc;
14640 tg3_init_coal(tp);
14642 pci_set_drvdata(pdev, dev);
14644 err = register_netdev(dev);
14645 if (err) {
14646 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
14647 goto err_out_apeunmap;
14650 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
14651 tp->board_part_number,
14652 tp->pci_chip_rev_id,
14653 tg3_bus_string(tp, str),
14654 dev->dev_addr);
14656 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
14657 struct phy_device *phydev;
14658 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
14659 netdev_info(dev,
14660 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
14661 phydev->drv->name, dev_name(&phydev->dev));
14662 } else {
14663 char *ethtype;
14665 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
14666 ethtype = "10/100Base-TX";
14667 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
14668 ethtype = "1000Base-SX";
14669 else
14670 ethtype = "10/100/1000Base-T";
14672 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
14673 "(WireSpeed[%d])\n", tg3_phy_string(tp), ethtype,
14674 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0);
14677 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
14678 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
14679 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
14680 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
14681 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
14682 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
14683 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
14684 tp->dma_rwctrl,
14685 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
14686 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
14688 return 0;
14690 err_out_apeunmap:
14691 if (tp->aperegs) {
14692 iounmap(tp->aperegs);
14693 tp->aperegs = NULL;
14696 err_out_iounmap:
14697 if (tp->regs) {
14698 iounmap(tp->regs);
14699 tp->regs = NULL;
14702 err_out_free_dev:
14703 free_netdev(dev);
14705 err_out_free_res:
14706 pci_release_regions(pdev);
14708 err_out_disable_pdev:
14709 pci_disable_device(pdev);
14710 pci_set_drvdata(pdev, NULL);
14711 return err;
14714 static void __devexit tg3_remove_one(struct pci_dev *pdev)
14716 struct net_device *dev = pci_get_drvdata(pdev);
14718 if (dev) {
14719 struct tg3 *tp = netdev_priv(dev);
14721 if (tp->fw)
14722 release_firmware(tp->fw);
14724 flush_scheduled_work();
14726 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
14727 tg3_phy_fini(tp);
14728 tg3_mdio_fini(tp);
14731 unregister_netdev(dev);
14732 if (tp->aperegs) {
14733 iounmap(tp->aperegs);
14734 tp->aperegs = NULL;
14736 if (tp->regs) {
14737 iounmap(tp->regs);
14738 tp->regs = NULL;
14740 free_netdev(dev);
14741 pci_release_regions(pdev);
14742 pci_disable_device(pdev);
14743 pci_set_drvdata(pdev, NULL);
14747 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
14749 struct net_device *dev = pci_get_drvdata(pdev);
14750 struct tg3 *tp = netdev_priv(dev);
14751 pci_power_t target_state;
14752 int err;
14754 /* PCI register 4 needs to be saved whether netif_running() or not.
14755 * MSI address and data need to be saved if using MSI and
14756 * netif_running().
14758 pci_save_state(pdev);
14760 if (!netif_running(dev))
14761 return 0;
14763 flush_scheduled_work();
14764 tg3_phy_stop(tp);
14765 tg3_netif_stop(tp);
14767 del_timer_sync(&tp->timer);
14769 tg3_full_lock(tp, 1);
14770 tg3_disable_ints(tp);
14771 tg3_full_unlock(tp);
14773 netif_device_detach(dev);
14775 tg3_full_lock(tp, 0);
14776 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14777 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
14778 tg3_full_unlock(tp);
14780 target_state = pdev->pm_cap ? pci_target_state(pdev) : PCI_D3hot;
14782 err = tg3_set_power_state(tp, target_state);
14783 if (err) {
14784 int err2;
14786 tg3_full_lock(tp, 0);
14788 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
14789 err2 = tg3_restart_hw(tp, 1);
14790 if (err2)
14791 goto out;
14793 tp->timer.expires = jiffies + tp->timer_offset;
14794 add_timer(&tp->timer);
14796 netif_device_attach(dev);
14797 tg3_netif_start(tp);
14799 out:
14800 tg3_full_unlock(tp);
14802 if (!err2)
14803 tg3_phy_start(tp);
14806 return err;
14809 static int tg3_resume(struct pci_dev *pdev)
14811 struct net_device *dev = pci_get_drvdata(pdev);
14812 struct tg3 *tp = netdev_priv(dev);
14813 int err;
14815 pci_restore_state(tp->pdev);
14817 if (!netif_running(dev))
14818 return 0;
14820 err = tg3_set_power_state(tp, PCI_D0);
14821 if (err)
14822 return err;
14824 netif_device_attach(dev);
14826 tg3_full_lock(tp, 0);
14828 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
14829 err = tg3_restart_hw(tp, 1);
14830 if (err)
14831 goto out;
14833 tp->timer.expires = jiffies + tp->timer_offset;
14834 add_timer(&tp->timer);
14836 tg3_netif_start(tp);
14838 out:
14839 tg3_full_unlock(tp);
14841 if (!err)
14842 tg3_phy_start(tp);
14844 return err;
14847 static struct pci_driver tg3_driver = {
14848 .name = DRV_MODULE_NAME,
14849 .id_table = tg3_pci_tbl,
14850 .probe = tg3_init_one,
14851 .remove = __devexit_p(tg3_remove_one),
14852 .suspend = tg3_suspend,
14853 .resume = tg3_resume
14856 static int __init tg3_init(void)
14858 return pci_register_driver(&tg3_driver);
14861 static void __exit tg3_cleanup(void)
14863 pci_unregister_driver(&tg3_driver);
14866 module_init(tg3_init);
14867 module_exit(tg3_cleanup);