tg3: Fix TSO loopback test
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / net / tg3.c
blobd05c6a06da4400fec8c5743024e8bdbfae94c436
1 /*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2011 Broadcom Corporation.
9 * Firmware is:
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/ioport.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/ethtool.h>
35 #include <linux/mdio.h>
36 #include <linux/mii.h>
37 #include <linux/phy.h>
38 #include <linux/brcmphy.h>
39 #include <linux/if_vlan.h>
40 #include <linux/ip.h>
41 #include <linux/tcp.h>
42 #include <linux/workqueue.h>
43 #include <linux/prefetch.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/firmware.h>
47 #include <net/checksum.h>
48 #include <net/ip.h>
50 #include <asm/system.h>
51 #include <linux/io.h>
52 #include <asm/byteorder.h>
53 #include <linux/uaccess.h>
55 #ifdef CONFIG_SPARC
56 #include <asm/idprom.h>
57 #include <asm/prom.h>
58 #endif
60 #define BAR_0 0
61 #define BAR_2 2
63 #include "tg3.h"
65 /* Functions & macros to verify TG3_FLAGS types */
67 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
69 return test_bit(flag, bits);
72 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
74 set_bit(flag, bits);
77 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
79 clear_bit(flag, bits);
82 #define tg3_flag(tp, flag) \
83 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
84 #define tg3_flag_set(tp, flag) \
85 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
86 #define tg3_flag_clear(tp, flag) \
87 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
89 #define DRV_MODULE_NAME "tg3"
90 #define TG3_MAJ_NUM 3
91 #define TG3_MIN_NUM 118
92 #define DRV_MODULE_VERSION \
93 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
94 #define DRV_MODULE_RELDATE "April 22, 2011"
96 #define TG3_DEF_MAC_MODE 0
97 #define TG3_DEF_RX_MODE 0
98 #define TG3_DEF_TX_MODE 0
99 #define TG3_DEF_MSG_ENABLE \
100 (NETIF_MSG_DRV | \
101 NETIF_MSG_PROBE | \
102 NETIF_MSG_LINK | \
103 NETIF_MSG_TIMER | \
104 NETIF_MSG_IFDOWN | \
105 NETIF_MSG_IFUP | \
106 NETIF_MSG_RX_ERR | \
107 NETIF_MSG_TX_ERR)
109 /* length of time before we decide the hardware is borked,
110 * and dev->tx_timeout() should be called to fix the problem
113 #define TG3_TX_TIMEOUT (5 * HZ)
115 /* hardware minimum and maximum for a single frame's data payload */
116 #define TG3_MIN_MTU 60
117 #define TG3_MAX_MTU(tp) \
118 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
120 /* These numbers seem to be hard coded in the NIC firmware somehow.
121 * You can't change the ring sizes, but you can change where you place
122 * them in the NIC onboard memory.
124 #define TG3_RX_STD_RING_SIZE(tp) \
125 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
126 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
127 #define TG3_DEF_RX_RING_PENDING 200
128 #define TG3_RX_JMB_RING_SIZE(tp) \
129 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
130 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
131 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
132 #define TG3_RSS_INDIR_TBL_SIZE 128
134 /* Do not place this n-ring entries value into the tp struct itself,
135 * we really want to expose these constants to GCC so that modulo et
136 * al. operations are done with shifts and masks instead of with
137 * hw multiply/modulo instructions. Another solution would be to
138 * replace things like '% foo' with '& (foo - 1)'.
141 #define TG3_TX_RING_SIZE 512
142 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
144 #define TG3_RX_STD_RING_BYTES(tp) \
145 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
146 #define TG3_RX_JMB_RING_BYTES(tp) \
147 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
148 #define TG3_RX_RCB_RING_BYTES(tp) \
149 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
150 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
151 TG3_TX_RING_SIZE)
152 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
154 #define TG3_DMA_BYTE_ENAB 64
156 #define TG3_RX_STD_DMA_SZ 1536
157 #define TG3_RX_JMB_DMA_SZ 9046
159 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
161 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
162 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
164 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
165 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
167 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
168 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
170 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
171 * that are at least dword aligned when used in PCIX mode. The driver
172 * works around this bug by double copying the packet. This workaround
173 * is built into the normal double copy length check for efficiency.
175 * However, the double copy is only necessary on those architectures
176 * where unaligned memory accesses are inefficient. For those architectures
177 * where unaligned memory accesses incur little penalty, we can reintegrate
178 * the 5701 in the normal rx path. Doing so saves a device structure
179 * dereference by hardcoding the double copy threshold in place.
181 #define TG3_RX_COPY_THRESHOLD 256
182 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
183 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
184 #else
185 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
186 #endif
188 /* minimum number of free TX descriptors required to wake up TX process */
189 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
191 #define TG3_RAW_IP_ALIGN 2
193 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
195 #define FIRMWARE_TG3 "tigon/tg3.bin"
196 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
197 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
199 static char version[] __devinitdata =
200 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
202 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
203 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
204 MODULE_LICENSE("GPL");
205 MODULE_VERSION(DRV_MODULE_VERSION);
206 MODULE_FIRMWARE(FIRMWARE_TG3);
207 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
208 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
210 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
211 module_param(tg3_debug, int, 0);
212 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
214 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
215 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
216 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
217 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
218 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
219 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
220 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
221 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
222 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
223 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
224 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
225 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
226 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
227 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
288 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
289 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
290 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
291 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
292 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
293 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
294 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
298 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
300 static const struct {
301 const char string[ETH_GSTRING_LEN];
302 } ethtool_stats_keys[] = {
303 { "rx_octets" },
304 { "rx_fragments" },
305 { "rx_ucast_packets" },
306 { "rx_mcast_packets" },
307 { "rx_bcast_packets" },
308 { "rx_fcs_errors" },
309 { "rx_align_errors" },
310 { "rx_xon_pause_rcvd" },
311 { "rx_xoff_pause_rcvd" },
312 { "rx_mac_ctrl_rcvd" },
313 { "rx_xoff_entered" },
314 { "rx_frame_too_long_errors" },
315 { "rx_jabbers" },
316 { "rx_undersize_packets" },
317 { "rx_in_length_errors" },
318 { "rx_out_length_errors" },
319 { "rx_64_or_less_octet_packets" },
320 { "rx_65_to_127_octet_packets" },
321 { "rx_128_to_255_octet_packets" },
322 { "rx_256_to_511_octet_packets" },
323 { "rx_512_to_1023_octet_packets" },
324 { "rx_1024_to_1522_octet_packets" },
325 { "rx_1523_to_2047_octet_packets" },
326 { "rx_2048_to_4095_octet_packets" },
327 { "rx_4096_to_8191_octet_packets" },
328 { "rx_8192_to_9022_octet_packets" },
330 { "tx_octets" },
331 { "tx_collisions" },
333 { "tx_xon_sent" },
334 { "tx_xoff_sent" },
335 { "tx_flow_control" },
336 { "tx_mac_errors" },
337 { "tx_single_collisions" },
338 { "tx_mult_collisions" },
339 { "tx_deferred" },
340 { "tx_excessive_collisions" },
341 { "tx_late_collisions" },
342 { "tx_collide_2times" },
343 { "tx_collide_3times" },
344 { "tx_collide_4times" },
345 { "tx_collide_5times" },
346 { "tx_collide_6times" },
347 { "tx_collide_7times" },
348 { "tx_collide_8times" },
349 { "tx_collide_9times" },
350 { "tx_collide_10times" },
351 { "tx_collide_11times" },
352 { "tx_collide_12times" },
353 { "tx_collide_13times" },
354 { "tx_collide_14times" },
355 { "tx_collide_15times" },
356 { "tx_ucast_packets" },
357 { "tx_mcast_packets" },
358 { "tx_bcast_packets" },
359 { "tx_carrier_sense_errors" },
360 { "tx_discards" },
361 { "tx_errors" },
363 { "dma_writeq_full" },
364 { "dma_write_prioq_full" },
365 { "rxbds_empty" },
366 { "rx_discards" },
367 { "mbuf_lwm_thresh_hit" },
368 { "rx_errors" },
369 { "rx_threshold_hit" },
371 { "dma_readq_full" },
372 { "dma_read_prioq_full" },
373 { "tx_comp_queue_full" },
375 { "ring_set_send_prod_index" },
376 { "ring_status_update" },
377 { "nic_irqs" },
378 { "nic_avoided_irqs" },
379 { "nic_tx_threshold_hit" }
382 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
385 static const struct {
386 const char string[ETH_GSTRING_LEN];
387 } ethtool_test_keys[] = {
388 { "nvram test (online) " },
389 { "link test (online) " },
390 { "register test (offline)" },
391 { "memory test (offline)" },
392 { "loopback test (offline)" },
393 { "interrupt test (offline)" },
396 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
399 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
401 writel(val, tp->regs + off);
404 static u32 tg3_read32(struct tg3 *tp, u32 off)
406 return readl(tp->regs + off);
409 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
411 writel(val, tp->aperegs + off);
414 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
416 return readl(tp->aperegs + off);
419 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
421 unsigned long flags;
423 spin_lock_irqsave(&tp->indirect_lock, flags);
424 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
425 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
426 spin_unlock_irqrestore(&tp->indirect_lock, flags);
429 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
431 writel(val, tp->regs + off);
432 readl(tp->regs + off);
435 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
437 unsigned long flags;
438 u32 val;
440 spin_lock_irqsave(&tp->indirect_lock, flags);
441 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
442 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
443 spin_unlock_irqrestore(&tp->indirect_lock, flags);
444 return val;
447 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
449 unsigned long flags;
451 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
452 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
453 TG3_64BIT_REG_LOW, val);
454 return;
456 if (off == TG3_RX_STD_PROD_IDX_REG) {
457 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
458 TG3_64BIT_REG_LOW, val);
459 return;
462 spin_lock_irqsave(&tp->indirect_lock, flags);
463 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
464 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
465 spin_unlock_irqrestore(&tp->indirect_lock, flags);
467 /* In indirect mode when disabling interrupts, we also need
468 * to clear the interrupt bit in the GRC local ctrl register.
470 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
471 (val == 0x1)) {
472 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
473 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
477 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
479 unsigned long flags;
480 u32 val;
482 spin_lock_irqsave(&tp->indirect_lock, flags);
483 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
484 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
485 spin_unlock_irqrestore(&tp->indirect_lock, flags);
486 return val;
489 /* usec_wait specifies the wait time in usec when writing to certain registers
490 * where it is unsafe to read back the register without some delay.
491 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
492 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
494 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
496 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
497 /* Non-posted methods */
498 tp->write32(tp, off, val);
499 else {
500 /* Posted method */
501 tg3_write32(tp, off, val);
502 if (usec_wait)
503 udelay(usec_wait);
504 tp->read32(tp, off);
506 /* Wait again after the read for the posted method to guarantee that
507 * the wait time is met.
509 if (usec_wait)
510 udelay(usec_wait);
513 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
515 tp->write32_mbox(tp, off, val);
516 if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
517 tp->read32_mbox(tp, off);
520 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
522 void __iomem *mbox = tp->regs + off;
523 writel(val, mbox);
524 if (tg3_flag(tp, TXD_MBOX_HWBUG))
525 writel(val, mbox);
526 if (tg3_flag(tp, MBOX_WRITE_REORDER))
527 readl(mbox);
530 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
532 return readl(tp->regs + off + GRCMBOX_BASE);
535 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
537 writel(val, tp->regs + off + GRCMBOX_BASE);
540 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
541 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
542 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
543 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
544 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
546 #define tw32(reg, val) tp->write32(tp, reg, val)
547 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
548 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
549 #define tr32(reg) tp->read32(tp, reg)
551 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
553 unsigned long flags;
555 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
556 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
557 return;
559 spin_lock_irqsave(&tp->indirect_lock, flags);
560 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
561 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
562 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
564 /* Always leave this as zero. */
565 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
566 } else {
567 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
568 tw32_f(TG3PCI_MEM_WIN_DATA, val);
570 /* Always leave this as zero. */
571 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
573 spin_unlock_irqrestore(&tp->indirect_lock, flags);
576 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
578 unsigned long flags;
580 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
581 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
582 *val = 0;
583 return;
586 spin_lock_irqsave(&tp->indirect_lock, flags);
587 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
588 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
589 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
591 /* Always leave this as zero. */
592 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
593 } else {
594 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
595 *val = tr32(TG3PCI_MEM_WIN_DATA);
597 /* Always leave this as zero. */
598 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
600 spin_unlock_irqrestore(&tp->indirect_lock, flags);
603 static void tg3_ape_lock_init(struct tg3 *tp)
605 int i;
606 u32 regbase;
608 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
609 regbase = TG3_APE_LOCK_GRANT;
610 else
611 regbase = TG3_APE_PER_LOCK_GRANT;
613 /* Make sure the driver hasn't any stale locks. */
614 for (i = 0; i < 8; i++)
615 tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
618 static int tg3_ape_lock(struct tg3 *tp, int locknum)
620 int i, off;
621 int ret = 0;
622 u32 status, req, gnt;
624 if (!tg3_flag(tp, ENABLE_APE))
625 return 0;
627 switch (locknum) {
628 case TG3_APE_LOCK_GRC:
629 case TG3_APE_LOCK_MEM:
630 break;
631 default:
632 return -EINVAL;
635 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
636 req = TG3_APE_LOCK_REQ;
637 gnt = TG3_APE_LOCK_GRANT;
638 } else {
639 req = TG3_APE_PER_LOCK_REQ;
640 gnt = TG3_APE_PER_LOCK_GRANT;
643 off = 4 * locknum;
645 tg3_ape_write32(tp, req + off, APE_LOCK_REQ_DRIVER);
647 /* Wait for up to 1 millisecond to acquire lock. */
648 for (i = 0; i < 100; i++) {
649 status = tg3_ape_read32(tp, gnt + off);
650 if (status == APE_LOCK_GRANT_DRIVER)
651 break;
652 udelay(10);
655 if (status != APE_LOCK_GRANT_DRIVER) {
656 /* Revoke the lock request. */
657 tg3_ape_write32(tp, gnt + off,
658 APE_LOCK_GRANT_DRIVER);
660 ret = -EBUSY;
663 return ret;
666 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
668 u32 gnt;
670 if (!tg3_flag(tp, ENABLE_APE))
671 return;
673 switch (locknum) {
674 case TG3_APE_LOCK_GRC:
675 case TG3_APE_LOCK_MEM:
676 break;
677 default:
678 return;
681 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
682 gnt = TG3_APE_LOCK_GRANT;
683 else
684 gnt = TG3_APE_PER_LOCK_GRANT;
686 tg3_ape_write32(tp, gnt + 4 * locknum, APE_LOCK_GRANT_DRIVER);
689 static void tg3_disable_ints(struct tg3 *tp)
691 int i;
693 tw32(TG3PCI_MISC_HOST_CTRL,
694 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
695 for (i = 0; i < tp->irq_max; i++)
696 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
699 static void tg3_enable_ints(struct tg3 *tp)
701 int i;
703 tp->irq_sync = 0;
704 wmb();
706 tw32(TG3PCI_MISC_HOST_CTRL,
707 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
709 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
710 for (i = 0; i < tp->irq_cnt; i++) {
711 struct tg3_napi *tnapi = &tp->napi[i];
713 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
714 if (tg3_flag(tp, 1SHOT_MSI))
715 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
717 tp->coal_now |= tnapi->coal_now;
720 /* Force an initial interrupt */
721 if (!tg3_flag(tp, TAGGED_STATUS) &&
722 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
723 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
724 else
725 tw32(HOSTCC_MODE, tp->coal_now);
727 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
730 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
732 struct tg3 *tp = tnapi->tp;
733 struct tg3_hw_status *sblk = tnapi->hw_status;
734 unsigned int work_exists = 0;
736 /* check for phy events */
737 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
738 if (sblk->status & SD_STATUS_LINK_CHG)
739 work_exists = 1;
741 /* check for RX/TX work to do */
742 if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
743 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
744 work_exists = 1;
746 return work_exists;
749 /* tg3_int_reenable
750 * similar to tg3_enable_ints, but it accurately determines whether there
751 * is new work pending and can return without flushing the PIO write
752 * which reenables interrupts
754 static void tg3_int_reenable(struct tg3_napi *tnapi)
756 struct tg3 *tp = tnapi->tp;
758 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
759 mmiowb();
761 /* When doing tagged status, this work check is unnecessary.
762 * The last_tag we write above tells the chip which piece of
763 * work we've completed.
765 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
766 tw32(HOSTCC_MODE, tp->coalesce_mode |
767 HOSTCC_MODE_ENABLE | tnapi->coal_now);
770 static void tg3_switch_clocks(struct tg3 *tp)
772 u32 clock_ctrl;
773 u32 orig_clock_ctrl;
775 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
776 return;
778 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
780 orig_clock_ctrl = clock_ctrl;
781 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
782 CLOCK_CTRL_CLKRUN_OENABLE |
783 0x1f);
784 tp->pci_clock_ctrl = clock_ctrl;
786 if (tg3_flag(tp, 5705_PLUS)) {
787 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
788 tw32_wait_f(TG3PCI_CLOCK_CTRL,
789 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
791 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
792 tw32_wait_f(TG3PCI_CLOCK_CTRL,
793 clock_ctrl |
794 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
795 40);
796 tw32_wait_f(TG3PCI_CLOCK_CTRL,
797 clock_ctrl | (CLOCK_CTRL_ALTCLK),
798 40);
800 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
803 #define PHY_BUSY_LOOPS 5000
805 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
807 u32 frame_val;
808 unsigned int loops;
809 int ret;
811 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
812 tw32_f(MAC_MI_MODE,
813 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
814 udelay(80);
817 *val = 0x0;
819 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
820 MI_COM_PHY_ADDR_MASK);
821 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
822 MI_COM_REG_ADDR_MASK);
823 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
825 tw32_f(MAC_MI_COM, frame_val);
827 loops = PHY_BUSY_LOOPS;
828 while (loops != 0) {
829 udelay(10);
830 frame_val = tr32(MAC_MI_COM);
832 if ((frame_val & MI_COM_BUSY) == 0) {
833 udelay(5);
834 frame_val = tr32(MAC_MI_COM);
835 break;
837 loops -= 1;
840 ret = -EBUSY;
841 if (loops != 0) {
842 *val = frame_val & MI_COM_DATA_MASK;
843 ret = 0;
846 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
847 tw32_f(MAC_MI_MODE, tp->mi_mode);
848 udelay(80);
851 return ret;
854 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
856 u32 frame_val;
857 unsigned int loops;
858 int ret;
860 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
861 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
862 return 0;
864 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
865 tw32_f(MAC_MI_MODE,
866 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
867 udelay(80);
870 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
871 MI_COM_PHY_ADDR_MASK);
872 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
873 MI_COM_REG_ADDR_MASK);
874 frame_val |= (val & MI_COM_DATA_MASK);
875 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
877 tw32_f(MAC_MI_COM, frame_val);
879 loops = PHY_BUSY_LOOPS;
880 while (loops != 0) {
881 udelay(10);
882 frame_val = tr32(MAC_MI_COM);
883 if ((frame_val & MI_COM_BUSY) == 0) {
884 udelay(5);
885 frame_val = tr32(MAC_MI_COM);
886 break;
888 loops -= 1;
891 ret = -EBUSY;
892 if (loops != 0)
893 ret = 0;
895 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
896 tw32_f(MAC_MI_MODE, tp->mi_mode);
897 udelay(80);
900 return ret;
903 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
905 int err;
907 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
908 if (err)
909 goto done;
911 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
912 if (err)
913 goto done;
915 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
916 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
917 if (err)
918 goto done;
920 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
922 done:
923 return err;
926 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
928 int err;
930 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
931 if (err)
932 goto done;
934 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
935 if (err)
936 goto done;
938 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
939 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
940 if (err)
941 goto done;
943 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
945 done:
946 return err;
949 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
951 int err;
953 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
954 if (!err)
955 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
957 return err;
960 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
962 int err;
964 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
965 if (!err)
966 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
968 return err;
971 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
973 int err;
975 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
976 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
977 MII_TG3_AUXCTL_SHDWSEL_MISC);
978 if (!err)
979 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
981 return err;
984 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
986 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
987 set |= MII_TG3_AUXCTL_MISC_WREN;
989 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
992 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
993 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
994 MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
995 MII_TG3_AUXCTL_ACTL_TX_6DB)
997 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
998 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
999 MII_TG3_AUXCTL_ACTL_TX_6DB);
1001 static int tg3_bmcr_reset(struct tg3 *tp)
1003 u32 phy_control;
1004 int limit, err;
1006 /* OK, reset it, and poll the BMCR_RESET bit until it
1007 * clears or we time out.
1009 phy_control = BMCR_RESET;
1010 err = tg3_writephy(tp, MII_BMCR, phy_control);
1011 if (err != 0)
1012 return -EBUSY;
1014 limit = 5000;
1015 while (limit--) {
1016 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1017 if (err != 0)
1018 return -EBUSY;
1020 if ((phy_control & BMCR_RESET) == 0) {
1021 udelay(40);
1022 break;
1024 udelay(10);
1026 if (limit < 0)
1027 return -EBUSY;
1029 return 0;
1032 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1034 struct tg3 *tp = bp->priv;
1035 u32 val;
1037 spin_lock_bh(&tp->lock);
1039 if (tg3_readphy(tp, reg, &val))
1040 val = -EIO;
1042 spin_unlock_bh(&tp->lock);
1044 return val;
1047 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1049 struct tg3 *tp = bp->priv;
1050 u32 ret = 0;
1052 spin_lock_bh(&tp->lock);
1054 if (tg3_writephy(tp, reg, val))
1055 ret = -EIO;
1057 spin_unlock_bh(&tp->lock);
1059 return ret;
1062 static int tg3_mdio_reset(struct mii_bus *bp)
1064 return 0;
1067 static void tg3_mdio_config_5785(struct tg3 *tp)
1069 u32 val;
1070 struct phy_device *phydev;
1072 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1073 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1074 case PHY_ID_BCM50610:
1075 case PHY_ID_BCM50610M:
1076 val = MAC_PHYCFG2_50610_LED_MODES;
1077 break;
1078 case PHY_ID_BCMAC131:
1079 val = MAC_PHYCFG2_AC131_LED_MODES;
1080 break;
1081 case PHY_ID_RTL8211C:
1082 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1083 break;
1084 case PHY_ID_RTL8201E:
1085 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1086 break;
1087 default:
1088 return;
1091 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1092 tw32(MAC_PHYCFG2, val);
1094 val = tr32(MAC_PHYCFG1);
1095 val &= ~(MAC_PHYCFG1_RGMII_INT |
1096 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1097 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1098 tw32(MAC_PHYCFG1, val);
1100 return;
1103 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1104 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1105 MAC_PHYCFG2_FMODE_MASK_MASK |
1106 MAC_PHYCFG2_GMODE_MASK_MASK |
1107 MAC_PHYCFG2_ACT_MASK_MASK |
1108 MAC_PHYCFG2_QUAL_MASK_MASK |
1109 MAC_PHYCFG2_INBAND_ENABLE;
1111 tw32(MAC_PHYCFG2, val);
1113 val = tr32(MAC_PHYCFG1);
1114 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1115 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1116 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1117 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1118 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1119 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1120 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1122 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1123 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1124 tw32(MAC_PHYCFG1, val);
1126 val = tr32(MAC_EXT_RGMII_MODE);
1127 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1128 MAC_RGMII_MODE_RX_QUALITY |
1129 MAC_RGMII_MODE_RX_ACTIVITY |
1130 MAC_RGMII_MODE_RX_ENG_DET |
1131 MAC_RGMII_MODE_TX_ENABLE |
1132 MAC_RGMII_MODE_TX_LOWPWR |
1133 MAC_RGMII_MODE_TX_RESET);
1134 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1135 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1136 val |= MAC_RGMII_MODE_RX_INT_B |
1137 MAC_RGMII_MODE_RX_QUALITY |
1138 MAC_RGMII_MODE_RX_ACTIVITY |
1139 MAC_RGMII_MODE_RX_ENG_DET;
1140 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1141 val |= MAC_RGMII_MODE_TX_ENABLE |
1142 MAC_RGMII_MODE_TX_LOWPWR |
1143 MAC_RGMII_MODE_TX_RESET;
1145 tw32(MAC_EXT_RGMII_MODE, val);
1148 static void tg3_mdio_start(struct tg3 *tp)
1150 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1151 tw32_f(MAC_MI_MODE, tp->mi_mode);
1152 udelay(80);
1154 if (tg3_flag(tp, MDIOBUS_INITED) &&
1155 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1156 tg3_mdio_config_5785(tp);
1159 static int tg3_mdio_init(struct tg3 *tp)
1161 int i;
1162 u32 reg;
1163 struct phy_device *phydev;
1165 if (tg3_flag(tp, 5717_PLUS)) {
1166 u32 is_serdes;
1168 tp->phy_addr = PCI_FUNC(tp->pdev->devfn) + 1;
1170 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1171 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1172 else
1173 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1174 TG3_CPMU_PHY_STRAP_IS_SERDES;
1175 if (is_serdes)
1176 tp->phy_addr += 7;
1177 } else
1178 tp->phy_addr = TG3_PHY_MII_ADDR;
1180 tg3_mdio_start(tp);
1182 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1183 return 0;
1185 tp->mdio_bus = mdiobus_alloc();
1186 if (tp->mdio_bus == NULL)
1187 return -ENOMEM;
1189 tp->mdio_bus->name = "tg3 mdio bus";
1190 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1191 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1192 tp->mdio_bus->priv = tp;
1193 tp->mdio_bus->parent = &tp->pdev->dev;
1194 tp->mdio_bus->read = &tg3_mdio_read;
1195 tp->mdio_bus->write = &tg3_mdio_write;
1196 tp->mdio_bus->reset = &tg3_mdio_reset;
1197 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1198 tp->mdio_bus->irq = &tp->mdio_irq[0];
1200 for (i = 0; i < PHY_MAX_ADDR; i++)
1201 tp->mdio_bus->irq[i] = PHY_POLL;
1203 /* The bus registration will look for all the PHYs on the mdio bus.
1204 * Unfortunately, it does not ensure the PHY is powered up before
1205 * accessing the PHY ID registers. A chip reset is the
1206 * quickest way to bring the device back to an operational state..
1208 if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1209 tg3_bmcr_reset(tp);
1211 i = mdiobus_register(tp->mdio_bus);
1212 if (i) {
1213 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1214 mdiobus_free(tp->mdio_bus);
1215 return i;
1218 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1220 if (!phydev || !phydev->drv) {
1221 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1222 mdiobus_unregister(tp->mdio_bus);
1223 mdiobus_free(tp->mdio_bus);
1224 return -ENODEV;
1227 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1228 case PHY_ID_BCM57780:
1229 phydev->interface = PHY_INTERFACE_MODE_GMII;
1230 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1231 break;
1232 case PHY_ID_BCM50610:
1233 case PHY_ID_BCM50610M:
1234 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1235 PHY_BRCM_RX_REFCLK_UNUSED |
1236 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1237 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1238 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1239 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1240 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1241 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1242 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1243 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1244 /* fallthru */
1245 case PHY_ID_RTL8211C:
1246 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1247 break;
1248 case PHY_ID_RTL8201E:
1249 case PHY_ID_BCMAC131:
1250 phydev->interface = PHY_INTERFACE_MODE_MII;
1251 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1252 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1253 break;
1256 tg3_flag_set(tp, MDIOBUS_INITED);
1258 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1259 tg3_mdio_config_5785(tp);
1261 return 0;
1264 static void tg3_mdio_fini(struct tg3 *tp)
1266 if (tg3_flag(tp, MDIOBUS_INITED)) {
1267 tg3_flag_clear(tp, MDIOBUS_INITED);
1268 mdiobus_unregister(tp->mdio_bus);
1269 mdiobus_free(tp->mdio_bus);
1273 /* tp->lock is held. */
1274 static inline void tg3_generate_fw_event(struct tg3 *tp)
1276 u32 val;
1278 val = tr32(GRC_RX_CPU_EVENT);
1279 val |= GRC_RX_CPU_DRIVER_EVENT;
1280 tw32_f(GRC_RX_CPU_EVENT, val);
1282 tp->last_event_jiffies = jiffies;
1285 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1287 /* tp->lock is held. */
1288 static void tg3_wait_for_event_ack(struct tg3 *tp)
1290 int i;
1291 unsigned int delay_cnt;
1292 long time_remain;
1294 /* If enough time has passed, no wait is necessary. */
1295 time_remain = (long)(tp->last_event_jiffies + 1 +
1296 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1297 (long)jiffies;
1298 if (time_remain < 0)
1299 return;
1301 /* Check if we can shorten the wait time. */
1302 delay_cnt = jiffies_to_usecs(time_remain);
1303 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1304 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1305 delay_cnt = (delay_cnt >> 3) + 1;
1307 for (i = 0; i < delay_cnt; i++) {
1308 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1309 break;
1310 udelay(8);
1314 /* tp->lock is held. */
1315 static void tg3_ump_link_report(struct tg3 *tp)
1317 u32 reg;
1318 u32 val;
1320 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1321 return;
1323 tg3_wait_for_event_ack(tp);
1325 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1327 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1329 val = 0;
1330 if (!tg3_readphy(tp, MII_BMCR, &reg))
1331 val = reg << 16;
1332 if (!tg3_readphy(tp, MII_BMSR, &reg))
1333 val |= (reg & 0xffff);
1334 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1336 val = 0;
1337 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1338 val = reg << 16;
1339 if (!tg3_readphy(tp, MII_LPA, &reg))
1340 val |= (reg & 0xffff);
1341 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1343 val = 0;
1344 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1345 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1346 val = reg << 16;
1347 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1348 val |= (reg & 0xffff);
1350 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1352 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1353 val = reg << 16;
1354 else
1355 val = 0;
1356 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1358 tg3_generate_fw_event(tp);
1361 static void tg3_link_report(struct tg3 *tp)
1363 if (!netif_carrier_ok(tp->dev)) {
1364 netif_info(tp, link, tp->dev, "Link is down\n");
1365 tg3_ump_link_report(tp);
1366 } else if (netif_msg_link(tp)) {
1367 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1368 (tp->link_config.active_speed == SPEED_1000 ?
1369 1000 :
1370 (tp->link_config.active_speed == SPEED_100 ?
1371 100 : 10)),
1372 (tp->link_config.active_duplex == DUPLEX_FULL ?
1373 "full" : "half"));
1375 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1376 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1377 "on" : "off",
1378 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1379 "on" : "off");
1381 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1382 netdev_info(tp->dev, "EEE is %s\n",
1383 tp->setlpicnt ? "enabled" : "disabled");
1385 tg3_ump_link_report(tp);
1389 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1391 u16 miireg;
1393 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1394 miireg = ADVERTISE_PAUSE_CAP;
1395 else if (flow_ctrl & FLOW_CTRL_TX)
1396 miireg = ADVERTISE_PAUSE_ASYM;
1397 else if (flow_ctrl & FLOW_CTRL_RX)
1398 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1399 else
1400 miireg = 0;
1402 return miireg;
1405 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1407 u16 miireg;
1409 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1410 miireg = ADVERTISE_1000XPAUSE;
1411 else if (flow_ctrl & FLOW_CTRL_TX)
1412 miireg = ADVERTISE_1000XPSE_ASYM;
1413 else if (flow_ctrl & FLOW_CTRL_RX)
1414 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1415 else
1416 miireg = 0;
1418 return miireg;
1421 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1423 u8 cap = 0;
1425 if (lcladv & ADVERTISE_1000XPAUSE) {
1426 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1427 if (rmtadv & LPA_1000XPAUSE)
1428 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1429 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1430 cap = FLOW_CTRL_RX;
1431 } else {
1432 if (rmtadv & LPA_1000XPAUSE)
1433 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1435 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1436 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1437 cap = FLOW_CTRL_TX;
1440 return cap;
1443 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1445 u8 autoneg;
1446 u8 flowctrl = 0;
1447 u32 old_rx_mode = tp->rx_mode;
1448 u32 old_tx_mode = tp->tx_mode;
1450 if (tg3_flag(tp, USE_PHYLIB))
1451 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1452 else
1453 autoneg = tp->link_config.autoneg;
1455 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1456 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1457 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1458 else
1459 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1460 } else
1461 flowctrl = tp->link_config.flowctrl;
1463 tp->link_config.active_flowctrl = flowctrl;
1465 if (flowctrl & FLOW_CTRL_RX)
1466 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1467 else
1468 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1470 if (old_rx_mode != tp->rx_mode)
1471 tw32_f(MAC_RX_MODE, tp->rx_mode);
1473 if (flowctrl & FLOW_CTRL_TX)
1474 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1475 else
1476 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1478 if (old_tx_mode != tp->tx_mode)
1479 tw32_f(MAC_TX_MODE, tp->tx_mode);
1482 static void tg3_adjust_link(struct net_device *dev)
1484 u8 oldflowctrl, linkmesg = 0;
1485 u32 mac_mode, lcl_adv, rmt_adv;
1486 struct tg3 *tp = netdev_priv(dev);
1487 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1489 spin_lock_bh(&tp->lock);
1491 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1492 MAC_MODE_HALF_DUPLEX);
1494 oldflowctrl = tp->link_config.active_flowctrl;
1496 if (phydev->link) {
1497 lcl_adv = 0;
1498 rmt_adv = 0;
1500 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1501 mac_mode |= MAC_MODE_PORT_MODE_MII;
1502 else if (phydev->speed == SPEED_1000 ||
1503 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1504 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1505 else
1506 mac_mode |= MAC_MODE_PORT_MODE_MII;
1508 if (phydev->duplex == DUPLEX_HALF)
1509 mac_mode |= MAC_MODE_HALF_DUPLEX;
1510 else {
1511 lcl_adv = tg3_advert_flowctrl_1000T(
1512 tp->link_config.flowctrl);
1514 if (phydev->pause)
1515 rmt_adv = LPA_PAUSE_CAP;
1516 if (phydev->asym_pause)
1517 rmt_adv |= LPA_PAUSE_ASYM;
1520 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1521 } else
1522 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1524 if (mac_mode != tp->mac_mode) {
1525 tp->mac_mode = mac_mode;
1526 tw32_f(MAC_MODE, tp->mac_mode);
1527 udelay(40);
1530 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1531 if (phydev->speed == SPEED_10)
1532 tw32(MAC_MI_STAT,
1533 MAC_MI_STAT_10MBPS_MODE |
1534 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1535 else
1536 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1539 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1540 tw32(MAC_TX_LENGTHS,
1541 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1542 (6 << TX_LENGTHS_IPG_SHIFT) |
1543 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1544 else
1545 tw32(MAC_TX_LENGTHS,
1546 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1547 (6 << TX_LENGTHS_IPG_SHIFT) |
1548 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1550 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1551 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1552 phydev->speed != tp->link_config.active_speed ||
1553 phydev->duplex != tp->link_config.active_duplex ||
1554 oldflowctrl != tp->link_config.active_flowctrl)
1555 linkmesg = 1;
1557 tp->link_config.active_speed = phydev->speed;
1558 tp->link_config.active_duplex = phydev->duplex;
1560 spin_unlock_bh(&tp->lock);
1562 if (linkmesg)
1563 tg3_link_report(tp);
1566 static int tg3_phy_init(struct tg3 *tp)
1568 struct phy_device *phydev;
1570 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1571 return 0;
1573 /* Bring the PHY back to a known state. */
1574 tg3_bmcr_reset(tp);
1576 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1578 /* Attach the MAC to the PHY. */
1579 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1580 phydev->dev_flags, phydev->interface);
1581 if (IS_ERR(phydev)) {
1582 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1583 return PTR_ERR(phydev);
1586 /* Mask with MAC supported features. */
1587 switch (phydev->interface) {
1588 case PHY_INTERFACE_MODE_GMII:
1589 case PHY_INTERFACE_MODE_RGMII:
1590 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1591 phydev->supported &= (PHY_GBIT_FEATURES |
1592 SUPPORTED_Pause |
1593 SUPPORTED_Asym_Pause);
1594 break;
1596 /* fallthru */
1597 case PHY_INTERFACE_MODE_MII:
1598 phydev->supported &= (PHY_BASIC_FEATURES |
1599 SUPPORTED_Pause |
1600 SUPPORTED_Asym_Pause);
1601 break;
1602 default:
1603 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1604 return -EINVAL;
1607 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1609 phydev->advertising = phydev->supported;
1611 return 0;
1614 static void tg3_phy_start(struct tg3 *tp)
1616 struct phy_device *phydev;
1618 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1619 return;
1621 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1623 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1624 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1625 phydev->speed = tp->link_config.orig_speed;
1626 phydev->duplex = tp->link_config.orig_duplex;
1627 phydev->autoneg = tp->link_config.orig_autoneg;
1628 phydev->advertising = tp->link_config.orig_advertising;
1631 phy_start(phydev);
1633 phy_start_aneg(phydev);
1636 static void tg3_phy_stop(struct tg3 *tp)
1638 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1639 return;
1641 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1644 static void tg3_phy_fini(struct tg3 *tp)
1646 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1647 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1648 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1652 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1654 u32 phytest;
1656 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1657 u32 phy;
1659 tg3_writephy(tp, MII_TG3_FET_TEST,
1660 phytest | MII_TG3_FET_SHADOW_EN);
1661 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1662 if (enable)
1663 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1664 else
1665 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1666 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1668 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1672 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1674 u32 reg;
1676 if (!tg3_flag(tp, 5705_PLUS) ||
1677 (tg3_flag(tp, 5717_PLUS) &&
1678 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1679 return;
1681 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1682 tg3_phy_fet_toggle_apd(tp, enable);
1683 return;
1686 reg = MII_TG3_MISC_SHDW_WREN |
1687 MII_TG3_MISC_SHDW_SCR5_SEL |
1688 MII_TG3_MISC_SHDW_SCR5_LPED |
1689 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1690 MII_TG3_MISC_SHDW_SCR5_SDTL |
1691 MII_TG3_MISC_SHDW_SCR5_C125OE;
1692 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1693 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1695 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1698 reg = MII_TG3_MISC_SHDW_WREN |
1699 MII_TG3_MISC_SHDW_APD_SEL |
1700 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1701 if (enable)
1702 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1704 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1707 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1709 u32 phy;
1711 if (!tg3_flag(tp, 5705_PLUS) ||
1712 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
1713 return;
1715 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1716 u32 ephy;
1718 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1719 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1721 tg3_writephy(tp, MII_TG3_FET_TEST,
1722 ephy | MII_TG3_FET_SHADOW_EN);
1723 if (!tg3_readphy(tp, reg, &phy)) {
1724 if (enable)
1725 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1726 else
1727 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1728 tg3_writephy(tp, reg, phy);
1730 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1732 } else {
1733 int ret;
1735 ret = tg3_phy_auxctl_read(tp,
1736 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
1737 if (!ret) {
1738 if (enable)
1739 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1740 else
1741 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1742 tg3_phy_auxctl_write(tp,
1743 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
1748 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1750 int ret;
1751 u32 val;
1753 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
1754 return;
1756 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
1757 if (!ret)
1758 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
1759 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
1762 static void tg3_phy_apply_otp(struct tg3 *tp)
1764 u32 otp, phy;
1766 if (!tp->phy_otp)
1767 return;
1769 otp = tp->phy_otp;
1771 if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
1772 return;
1774 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1775 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1776 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1778 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1779 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1780 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1782 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1783 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1784 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1786 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1787 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1789 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1790 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1792 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1793 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1794 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1796 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1799 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
1801 u32 val;
1803 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
1804 return;
1806 tp->setlpicnt = 0;
1808 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1809 current_link_up == 1 &&
1810 tp->link_config.active_duplex == DUPLEX_FULL &&
1811 (tp->link_config.active_speed == SPEED_100 ||
1812 tp->link_config.active_speed == SPEED_1000)) {
1813 u32 eeectl;
1815 if (tp->link_config.active_speed == SPEED_1000)
1816 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
1817 else
1818 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
1820 tw32(TG3_CPMU_EEE_CTRL, eeectl);
1822 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
1823 TG3_CL45_D7_EEERES_STAT, &val);
1825 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
1826 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
1827 tp->setlpicnt = 2;
1830 if (!tp->setlpicnt) {
1831 val = tr32(TG3_CPMU_EEE_MODE);
1832 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
1836 static void tg3_phy_eee_enable(struct tg3 *tp)
1838 u32 val;
1840 if (tp->link_config.active_speed == SPEED_1000 &&
1841 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1842 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
1843 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
1844 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
1845 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0003);
1846 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1849 val = tr32(TG3_CPMU_EEE_MODE);
1850 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
1853 static int tg3_wait_macro_done(struct tg3 *tp)
1855 int limit = 100;
1857 while (limit--) {
1858 u32 tmp32;
1860 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
1861 if ((tmp32 & 0x1000) == 0)
1862 break;
1865 if (limit < 0)
1866 return -EBUSY;
1868 return 0;
1871 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1873 static const u32 test_pat[4][6] = {
1874 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1875 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1876 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1877 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1879 int chan;
1881 for (chan = 0; chan < 4; chan++) {
1882 int i;
1884 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1885 (chan * 0x2000) | 0x0200);
1886 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1888 for (i = 0; i < 6; i++)
1889 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1890 test_pat[chan][i]);
1892 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1893 if (tg3_wait_macro_done(tp)) {
1894 *resetp = 1;
1895 return -EBUSY;
1898 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1899 (chan * 0x2000) | 0x0200);
1900 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
1901 if (tg3_wait_macro_done(tp)) {
1902 *resetp = 1;
1903 return -EBUSY;
1906 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
1907 if (tg3_wait_macro_done(tp)) {
1908 *resetp = 1;
1909 return -EBUSY;
1912 for (i = 0; i < 6; i += 2) {
1913 u32 low, high;
1915 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1916 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1917 tg3_wait_macro_done(tp)) {
1918 *resetp = 1;
1919 return -EBUSY;
1921 low &= 0x7fff;
1922 high &= 0x000f;
1923 if (low != test_pat[chan][i] ||
1924 high != test_pat[chan][i+1]) {
1925 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1926 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1927 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1929 return -EBUSY;
1934 return 0;
1937 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1939 int chan;
1941 for (chan = 0; chan < 4; chan++) {
1942 int i;
1944 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1945 (chan * 0x2000) | 0x0200);
1946 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1947 for (i = 0; i < 6; i++)
1948 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1949 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1950 if (tg3_wait_macro_done(tp))
1951 return -EBUSY;
1954 return 0;
1957 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1959 u32 reg32, phy9_orig;
1960 int retries, do_phy_reset, err;
1962 retries = 10;
1963 do_phy_reset = 1;
1964 do {
1965 if (do_phy_reset) {
1966 err = tg3_bmcr_reset(tp);
1967 if (err)
1968 return err;
1969 do_phy_reset = 0;
1972 /* Disable transmitter and interrupt. */
1973 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1974 continue;
1976 reg32 |= 0x3000;
1977 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1979 /* Set full-duplex, 1000 mbps. */
1980 tg3_writephy(tp, MII_BMCR,
1981 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1983 /* Set to master mode. */
1984 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1985 continue;
1987 tg3_writephy(tp, MII_TG3_CTRL,
1988 (MII_TG3_CTRL_AS_MASTER |
1989 MII_TG3_CTRL_ENABLE_AS_MASTER));
1991 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
1992 if (err)
1993 return err;
1995 /* Block the PHY control access. */
1996 tg3_phydsp_write(tp, 0x8005, 0x0800);
1998 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1999 if (!err)
2000 break;
2001 } while (--retries);
2003 err = tg3_phy_reset_chanpat(tp);
2004 if (err)
2005 return err;
2007 tg3_phydsp_write(tp, 0x8005, 0x0000);
2009 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2010 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2012 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2014 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
2016 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2017 reg32 &= ~0x3000;
2018 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2019 } else if (!err)
2020 err = -EBUSY;
2022 return err;
2025 /* This will reset the tigon3 PHY if there is no valid
2026 * link unless the FORCE argument is non-zero.
2028 static int tg3_phy_reset(struct tg3 *tp)
2030 u32 val, cpmuctrl;
2031 int err;
2033 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2034 val = tr32(GRC_MISC_CFG);
2035 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2036 udelay(40);
2038 err = tg3_readphy(tp, MII_BMSR, &val);
2039 err |= tg3_readphy(tp, MII_BMSR, &val);
2040 if (err != 0)
2041 return -EBUSY;
2043 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2044 netif_carrier_off(tp->dev);
2045 tg3_link_report(tp);
2048 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2049 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2050 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2051 err = tg3_phy_reset_5703_4_5(tp);
2052 if (err)
2053 return err;
2054 goto out;
2057 cpmuctrl = 0;
2058 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2059 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2060 cpmuctrl = tr32(TG3_CPMU_CTRL);
2061 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2062 tw32(TG3_CPMU_CTRL,
2063 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2066 err = tg3_bmcr_reset(tp);
2067 if (err)
2068 return err;
2070 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2071 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2072 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2074 tw32(TG3_CPMU_CTRL, cpmuctrl);
2077 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2078 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2079 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2080 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2081 CPMU_LSPD_1000MB_MACCLK_12_5) {
2082 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2083 udelay(40);
2084 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2088 if (tg3_flag(tp, 5717_PLUS) &&
2089 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2090 return 0;
2092 tg3_phy_apply_otp(tp);
2094 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2095 tg3_phy_toggle_apd(tp, true);
2096 else
2097 tg3_phy_toggle_apd(tp, false);
2099 out:
2100 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2101 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2102 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2103 tg3_phydsp_write(tp, 0x000a, 0x0323);
2104 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2107 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2108 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2109 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2112 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2113 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2114 tg3_phydsp_write(tp, 0x000a, 0x310b);
2115 tg3_phydsp_write(tp, 0x201f, 0x9506);
2116 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2117 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2119 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2120 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2121 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2122 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2123 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2124 tg3_writephy(tp, MII_TG3_TEST1,
2125 MII_TG3_TEST1_TRIM_EN | 0x4);
2126 } else
2127 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2129 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2133 /* Set Extended packet length bit (bit 14) on all chips that */
2134 /* support jumbo frames */
2135 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2136 /* Cannot do read-modify-write on 5401 */
2137 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2138 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2139 /* Set bit 14 with read-modify-write to preserve other bits */
2140 err = tg3_phy_auxctl_read(tp,
2141 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2142 if (!err)
2143 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2144 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2147 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2148 * jumbo frames transmission.
2150 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2151 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2152 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2153 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2156 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2157 /* adjust output voltage */
2158 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2161 tg3_phy_toggle_automdix(tp, 1);
2162 tg3_phy_set_wirespeed(tp);
2163 return 0;
2166 static void tg3_frob_aux_power(struct tg3 *tp)
2168 bool need_vaux = false;
2170 /* The GPIOs do something completely different on 57765. */
2171 if (!tg3_flag(tp, IS_NIC) ||
2172 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2173 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2174 return;
2176 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2177 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
2178 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2179 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) &&
2180 tp->pdev_peer != tp->pdev) {
2181 struct net_device *dev_peer;
2183 dev_peer = pci_get_drvdata(tp->pdev_peer);
2185 /* remove_one() may have been run on the peer. */
2186 if (dev_peer) {
2187 struct tg3 *tp_peer = netdev_priv(dev_peer);
2189 if (tg3_flag(tp_peer, INIT_COMPLETE))
2190 return;
2192 if (tg3_flag(tp_peer, WOL_ENABLE) ||
2193 tg3_flag(tp_peer, ENABLE_ASF))
2194 need_vaux = true;
2198 if (tg3_flag(tp, WOL_ENABLE) || tg3_flag(tp, ENABLE_ASF))
2199 need_vaux = true;
2201 if (need_vaux) {
2202 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2203 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2204 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2205 (GRC_LCLCTRL_GPIO_OE0 |
2206 GRC_LCLCTRL_GPIO_OE1 |
2207 GRC_LCLCTRL_GPIO_OE2 |
2208 GRC_LCLCTRL_GPIO_OUTPUT0 |
2209 GRC_LCLCTRL_GPIO_OUTPUT1),
2210 100);
2211 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2212 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2213 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2214 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2215 GRC_LCLCTRL_GPIO_OE1 |
2216 GRC_LCLCTRL_GPIO_OE2 |
2217 GRC_LCLCTRL_GPIO_OUTPUT0 |
2218 GRC_LCLCTRL_GPIO_OUTPUT1 |
2219 tp->grc_local_ctrl;
2220 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2222 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2223 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2225 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2226 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2227 } else {
2228 u32 no_gpio2;
2229 u32 grc_local_ctrl = 0;
2231 /* Workaround to prevent overdrawing Amps. */
2232 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2233 ASIC_REV_5714) {
2234 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2235 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2236 grc_local_ctrl, 100);
2239 /* On 5753 and variants, GPIO2 cannot be used. */
2240 no_gpio2 = tp->nic_sram_data_cfg &
2241 NIC_SRAM_DATA_CFG_NO_GPIO2;
2243 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2244 GRC_LCLCTRL_GPIO_OE1 |
2245 GRC_LCLCTRL_GPIO_OE2 |
2246 GRC_LCLCTRL_GPIO_OUTPUT1 |
2247 GRC_LCLCTRL_GPIO_OUTPUT2;
2248 if (no_gpio2) {
2249 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2250 GRC_LCLCTRL_GPIO_OUTPUT2);
2252 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2253 grc_local_ctrl, 100);
2255 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2257 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2258 grc_local_ctrl, 100);
2260 if (!no_gpio2) {
2261 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2262 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2263 grc_local_ctrl, 100);
2266 } else {
2267 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
2268 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
2269 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2270 (GRC_LCLCTRL_GPIO_OE1 |
2271 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2273 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2274 GRC_LCLCTRL_GPIO_OE1, 100);
2276 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2277 (GRC_LCLCTRL_GPIO_OE1 |
2278 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2283 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2285 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2286 return 1;
2287 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2288 if (speed != SPEED_10)
2289 return 1;
2290 } else if (speed == SPEED_10)
2291 return 1;
2293 return 0;
2296 static int tg3_setup_phy(struct tg3 *, int);
2298 #define RESET_KIND_SHUTDOWN 0
2299 #define RESET_KIND_INIT 1
2300 #define RESET_KIND_SUSPEND 2
2302 static void tg3_write_sig_post_reset(struct tg3 *, int);
2303 static int tg3_halt_cpu(struct tg3 *, u32);
2305 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2307 u32 val;
2309 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2310 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2311 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2312 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2314 sg_dig_ctrl |=
2315 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2316 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2317 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2319 return;
2322 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2323 tg3_bmcr_reset(tp);
2324 val = tr32(GRC_MISC_CFG);
2325 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2326 udelay(40);
2327 return;
2328 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2329 u32 phytest;
2330 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2331 u32 phy;
2333 tg3_writephy(tp, MII_ADVERTISE, 0);
2334 tg3_writephy(tp, MII_BMCR,
2335 BMCR_ANENABLE | BMCR_ANRESTART);
2337 tg3_writephy(tp, MII_TG3_FET_TEST,
2338 phytest | MII_TG3_FET_SHADOW_EN);
2339 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2340 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2341 tg3_writephy(tp,
2342 MII_TG3_FET_SHDW_AUXMODE4,
2343 phy);
2345 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2347 return;
2348 } else if (do_low_power) {
2349 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2350 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2352 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2353 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2354 MII_TG3_AUXCTL_PCTL_VREG_11V;
2355 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2358 /* The PHY should not be powered down on some chips because
2359 * of bugs.
2361 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2362 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2363 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2364 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2365 return;
2367 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2368 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2369 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2370 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2371 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2372 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2375 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2378 /* tp->lock is held. */
2379 static int tg3_nvram_lock(struct tg3 *tp)
2381 if (tg3_flag(tp, NVRAM)) {
2382 int i;
2384 if (tp->nvram_lock_cnt == 0) {
2385 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2386 for (i = 0; i < 8000; i++) {
2387 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2388 break;
2389 udelay(20);
2391 if (i == 8000) {
2392 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2393 return -ENODEV;
2396 tp->nvram_lock_cnt++;
2398 return 0;
2401 /* tp->lock is held. */
2402 static void tg3_nvram_unlock(struct tg3 *tp)
2404 if (tg3_flag(tp, NVRAM)) {
2405 if (tp->nvram_lock_cnt > 0)
2406 tp->nvram_lock_cnt--;
2407 if (tp->nvram_lock_cnt == 0)
2408 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2412 /* tp->lock is held. */
2413 static void tg3_enable_nvram_access(struct tg3 *tp)
2415 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2416 u32 nvaccess = tr32(NVRAM_ACCESS);
2418 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2422 /* tp->lock is held. */
2423 static void tg3_disable_nvram_access(struct tg3 *tp)
2425 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2426 u32 nvaccess = tr32(NVRAM_ACCESS);
2428 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2432 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2433 u32 offset, u32 *val)
2435 u32 tmp;
2436 int i;
2438 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2439 return -EINVAL;
2441 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2442 EEPROM_ADDR_DEVID_MASK |
2443 EEPROM_ADDR_READ);
2444 tw32(GRC_EEPROM_ADDR,
2445 tmp |
2446 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2447 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2448 EEPROM_ADDR_ADDR_MASK) |
2449 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2451 for (i = 0; i < 1000; i++) {
2452 tmp = tr32(GRC_EEPROM_ADDR);
2454 if (tmp & EEPROM_ADDR_COMPLETE)
2455 break;
2456 msleep(1);
2458 if (!(tmp & EEPROM_ADDR_COMPLETE))
2459 return -EBUSY;
2461 tmp = tr32(GRC_EEPROM_DATA);
2464 * The data will always be opposite the native endian
2465 * format. Perform a blind byteswap to compensate.
2467 *val = swab32(tmp);
2469 return 0;
2472 #define NVRAM_CMD_TIMEOUT 10000
2474 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2476 int i;
2478 tw32(NVRAM_CMD, nvram_cmd);
2479 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2480 udelay(10);
2481 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2482 udelay(10);
2483 break;
2487 if (i == NVRAM_CMD_TIMEOUT)
2488 return -EBUSY;
2490 return 0;
2493 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2495 if (tg3_flag(tp, NVRAM) &&
2496 tg3_flag(tp, NVRAM_BUFFERED) &&
2497 tg3_flag(tp, FLASH) &&
2498 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2499 (tp->nvram_jedecnum == JEDEC_ATMEL))
2501 addr = ((addr / tp->nvram_pagesize) <<
2502 ATMEL_AT45DB0X1B_PAGE_POS) +
2503 (addr % tp->nvram_pagesize);
2505 return addr;
2508 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2510 if (tg3_flag(tp, NVRAM) &&
2511 tg3_flag(tp, NVRAM_BUFFERED) &&
2512 tg3_flag(tp, FLASH) &&
2513 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2514 (tp->nvram_jedecnum == JEDEC_ATMEL))
2516 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2517 tp->nvram_pagesize) +
2518 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2520 return addr;
2523 /* NOTE: Data read in from NVRAM is byteswapped according to
2524 * the byteswapping settings for all other register accesses.
2525 * tg3 devices are BE devices, so on a BE machine, the data
2526 * returned will be exactly as it is seen in NVRAM. On a LE
2527 * machine, the 32-bit value will be byteswapped.
2529 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2531 int ret;
2533 if (!tg3_flag(tp, NVRAM))
2534 return tg3_nvram_read_using_eeprom(tp, offset, val);
2536 offset = tg3_nvram_phys_addr(tp, offset);
2538 if (offset > NVRAM_ADDR_MSK)
2539 return -EINVAL;
2541 ret = tg3_nvram_lock(tp);
2542 if (ret)
2543 return ret;
2545 tg3_enable_nvram_access(tp);
2547 tw32(NVRAM_ADDR, offset);
2548 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2549 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2551 if (ret == 0)
2552 *val = tr32(NVRAM_RDDATA);
2554 tg3_disable_nvram_access(tp);
2556 tg3_nvram_unlock(tp);
2558 return ret;
2561 /* Ensures NVRAM data is in bytestream format. */
2562 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2564 u32 v;
2565 int res = tg3_nvram_read(tp, offset, &v);
2566 if (!res)
2567 *val = cpu_to_be32(v);
2568 return res;
2571 /* tp->lock is held. */
2572 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2574 u32 addr_high, addr_low;
2575 int i;
2577 addr_high = ((tp->dev->dev_addr[0] << 8) |
2578 tp->dev->dev_addr[1]);
2579 addr_low = ((tp->dev->dev_addr[2] << 24) |
2580 (tp->dev->dev_addr[3] << 16) |
2581 (tp->dev->dev_addr[4] << 8) |
2582 (tp->dev->dev_addr[5] << 0));
2583 for (i = 0; i < 4; i++) {
2584 if (i == 1 && skip_mac_1)
2585 continue;
2586 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2587 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2590 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2591 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2592 for (i = 0; i < 12; i++) {
2593 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2594 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2598 addr_high = (tp->dev->dev_addr[0] +
2599 tp->dev->dev_addr[1] +
2600 tp->dev->dev_addr[2] +
2601 tp->dev->dev_addr[3] +
2602 tp->dev->dev_addr[4] +
2603 tp->dev->dev_addr[5]) &
2604 TX_BACKOFF_SEED_MASK;
2605 tw32(MAC_TX_BACKOFF_SEED, addr_high);
2608 static void tg3_enable_register_access(struct tg3 *tp)
2611 * Make sure register accesses (indirect or otherwise) will function
2612 * correctly.
2614 pci_write_config_dword(tp->pdev,
2615 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
2618 static int tg3_power_up(struct tg3 *tp)
2620 tg3_enable_register_access(tp);
2622 pci_set_power_state(tp->pdev, PCI_D0);
2624 /* Switch out of Vaux if it is a NIC */
2625 if (tg3_flag(tp, IS_NIC))
2626 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2628 return 0;
2631 static int tg3_power_down_prepare(struct tg3 *tp)
2633 u32 misc_host_ctrl;
2634 bool device_should_wake, do_low_power;
2636 tg3_enable_register_access(tp);
2638 /* Restore the CLKREQ setting. */
2639 if (tg3_flag(tp, CLKREQ_BUG)) {
2640 u16 lnkctl;
2642 pci_read_config_word(tp->pdev,
2643 tp->pcie_cap + PCI_EXP_LNKCTL,
2644 &lnkctl);
2645 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2646 pci_write_config_word(tp->pdev,
2647 tp->pcie_cap + PCI_EXP_LNKCTL,
2648 lnkctl);
2651 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2652 tw32(TG3PCI_MISC_HOST_CTRL,
2653 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2655 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
2656 tg3_flag(tp, WOL_ENABLE);
2658 if (tg3_flag(tp, USE_PHYLIB)) {
2659 do_low_power = false;
2660 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
2661 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2662 struct phy_device *phydev;
2663 u32 phyid, advertising;
2665 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2667 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2669 tp->link_config.orig_speed = phydev->speed;
2670 tp->link_config.orig_duplex = phydev->duplex;
2671 tp->link_config.orig_autoneg = phydev->autoneg;
2672 tp->link_config.orig_advertising = phydev->advertising;
2674 advertising = ADVERTISED_TP |
2675 ADVERTISED_Pause |
2676 ADVERTISED_Autoneg |
2677 ADVERTISED_10baseT_Half;
2679 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
2680 if (tg3_flag(tp, WOL_SPEED_100MB))
2681 advertising |=
2682 ADVERTISED_100baseT_Half |
2683 ADVERTISED_100baseT_Full |
2684 ADVERTISED_10baseT_Full;
2685 else
2686 advertising |= ADVERTISED_10baseT_Full;
2689 phydev->advertising = advertising;
2691 phy_start_aneg(phydev);
2693 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2694 if (phyid != PHY_ID_BCMAC131) {
2695 phyid &= PHY_BCM_OUI_MASK;
2696 if (phyid == PHY_BCM_OUI_1 ||
2697 phyid == PHY_BCM_OUI_2 ||
2698 phyid == PHY_BCM_OUI_3)
2699 do_low_power = true;
2702 } else {
2703 do_low_power = true;
2705 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2706 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2707 tp->link_config.orig_speed = tp->link_config.speed;
2708 tp->link_config.orig_duplex = tp->link_config.duplex;
2709 tp->link_config.orig_autoneg = tp->link_config.autoneg;
2712 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
2713 tp->link_config.speed = SPEED_10;
2714 tp->link_config.duplex = DUPLEX_HALF;
2715 tp->link_config.autoneg = AUTONEG_ENABLE;
2716 tg3_setup_phy(tp, 0);
2720 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2721 u32 val;
2723 val = tr32(GRC_VCPU_EXT_CTRL);
2724 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2725 } else if (!tg3_flag(tp, ENABLE_ASF)) {
2726 int i;
2727 u32 val;
2729 for (i = 0; i < 200; i++) {
2730 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2731 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2732 break;
2733 msleep(1);
2736 if (tg3_flag(tp, WOL_CAP))
2737 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2738 WOL_DRV_STATE_SHUTDOWN |
2739 WOL_DRV_WOL |
2740 WOL_SET_MAGIC_PKT);
2742 if (device_should_wake) {
2743 u32 mac_mode;
2745 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
2746 if (do_low_power &&
2747 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
2748 tg3_phy_auxctl_write(tp,
2749 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
2750 MII_TG3_AUXCTL_PCTL_WOL_EN |
2751 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2752 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
2753 udelay(40);
2756 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2757 mac_mode = MAC_MODE_PORT_MODE_GMII;
2758 else
2759 mac_mode = MAC_MODE_PORT_MODE_MII;
2761 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2762 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2763 ASIC_REV_5700) {
2764 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
2765 SPEED_100 : SPEED_10;
2766 if (tg3_5700_link_polarity(tp, speed))
2767 mac_mode |= MAC_MODE_LINK_POLARITY;
2768 else
2769 mac_mode &= ~MAC_MODE_LINK_POLARITY;
2771 } else {
2772 mac_mode = MAC_MODE_PORT_MODE_TBI;
2775 if (!tg3_flag(tp, 5750_PLUS))
2776 tw32(MAC_LED_CTRL, tp->led_ctrl);
2778 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2779 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
2780 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
2781 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2783 if (tg3_flag(tp, ENABLE_APE))
2784 mac_mode |= MAC_MODE_APE_TX_EN |
2785 MAC_MODE_APE_RX_EN |
2786 MAC_MODE_TDE_ENABLE;
2788 tw32_f(MAC_MODE, mac_mode);
2789 udelay(100);
2791 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2792 udelay(10);
2795 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
2796 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2797 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2798 u32 base_val;
2800 base_val = tp->pci_clock_ctrl;
2801 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2802 CLOCK_CTRL_TXCLK_DISABLE);
2804 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2805 CLOCK_CTRL_PWRDOWN_PLL133, 40);
2806 } else if (tg3_flag(tp, 5780_CLASS) ||
2807 tg3_flag(tp, CPMU_PRESENT) ||
2808 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
2809 /* do nothing */
2810 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
2811 u32 newbits1, newbits2;
2813 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2814 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2815 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2816 CLOCK_CTRL_TXCLK_DISABLE |
2817 CLOCK_CTRL_ALTCLK);
2818 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2819 } else if (tg3_flag(tp, 5705_PLUS)) {
2820 newbits1 = CLOCK_CTRL_625_CORE;
2821 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2822 } else {
2823 newbits1 = CLOCK_CTRL_ALTCLK;
2824 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2827 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2828 40);
2830 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2831 40);
2833 if (!tg3_flag(tp, 5705_PLUS)) {
2834 u32 newbits3;
2836 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2837 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2838 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2839 CLOCK_CTRL_TXCLK_DISABLE |
2840 CLOCK_CTRL_44MHZ_CORE);
2841 } else {
2842 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2845 tw32_wait_f(TG3PCI_CLOCK_CTRL,
2846 tp->pci_clock_ctrl | newbits3, 40);
2850 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
2851 tg3_power_down_phy(tp, do_low_power);
2853 tg3_frob_aux_power(tp);
2855 /* Workaround for unstable PLL clock */
2856 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2857 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2858 u32 val = tr32(0x7d00);
2860 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2861 tw32(0x7d00, val);
2862 if (!tg3_flag(tp, ENABLE_ASF)) {
2863 int err;
2865 err = tg3_nvram_lock(tp);
2866 tg3_halt_cpu(tp, RX_CPU_BASE);
2867 if (!err)
2868 tg3_nvram_unlock(tp);
2872 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2874 return 0;
2877 static void tg3_power_down(struct tg3 *tp)
2879 tg3_power_down_prepare(tp);
2881 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
2882 pci_set_power_state(tp->pdev, PCI_D3hot);
2885 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2887 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2888 case MII_TG3_AUX_STAT_10HALF:
2889 *speed = SPEED_10;
2890 *duplex = DUPLEX_HALF;
2891 break;
2893 case MII_TG3_AUX_STAT_10FULL:
2894 *speed = SPEED_10;
2895 *duplex = DUPLEX_FULL;
2896 break;
2898 case MII_TG3_AUX_STAT_100HALF:
2899 *speed = SPEED_100;
2900 *duplex = DUPLEX_HALF;
2901 break;
2903 case MII_TG3_AUX_STAT_100FULL:
2904 *speed = SPEED_100;
2905 *duplex = DUPLEX_FULL;
2906 break;
2908 case MII_TG3_AUX_STAT_1000HALF:
2909 *speed = SPEED_1000;
2910 *duplex = DUPLEX_HALF;
2911 break;
2913 case MII_TG3_AUX_STAT_1000FULL:
2914 *speed = SPEED_1000;
2915 *duplex = DUPLEX_FULL;
2916 break;
2918 default:
2919 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2920 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2921 SPEED_10;
2922 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2923 DUPLEX_HALF;
2924 break;
2926 *speed = SPEED_INVALID;
2927 *duplex = DUPLEX_INVALID;
2928 break;
2932 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
2934 int err = 0;
2935 u32 val, new_adv;
2937 new_adv = ADVERTISE_CSMA;
2938 if (advertise & ADVERTISED_10baseT_Half)
2939 new_adv |= ADVERTISE_10HALF;
2940 if (advertise & ADVERTISED_10baseT_Full)
2941 new_adv |= ADVERTISE_10FULL;
2942 if (advertise & ADVERTISED_100baseT_Half)
2943 new_adv |= ADVERTISE_100HALF;
2944 if (advertise & ADVERTISED_100baseT_Full)
2945 new_adv |= ADVERTISE_100FULL;
2947 new_adv |= tg3_advert_flowctrl_1000T(flowctrl);
2949 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
2950 if (err)
2951 goto done;
2953 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
2954 goto done;
2956 new_adv = 0;
2957 if (advertise & ADVERTISED_1000baseT_Half)
2958 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2959 if (advertise & ADVERTISED_1000baseT_Full)
2960 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2962 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2963 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2964 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2965 MII_TG3_CTRL_ENABLE_AS_MASTER);
2967 err = tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2968 if (err)
2969 goto done;
2971 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2972 goto done;
2974 tw32(TG3_CPMU_EEE_MODE,
2975 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2977 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2978 if (!err) {
2979 u32 err2;
2981 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
2982 case ASIC_REV_5717:
2983 case ASIC_REV_57765:
2984 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
2985 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
2986 MII_TG3_DSP_CH34TP2_HIBW01);
2987 /* Fall through */
2988 case ASIC_REV_5719:
2989 val = MII_TG3_DSP_TAP26_ALNOKO |
2990 MII_TG3_DSP_TAP26_RMRXSTO |
2991 MII_TG3_DSP_TAP26_OPCSINPT;
2992 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2995 val = 0;
2996 /* Advertise 100-BaseTX EEE ability */
2997 if (advertise & ADVERTISED_100baseT_Full)
2998 val |= MDIO_AN_EEE_ADV_100TX;
2999 /* Advertise 1000-BaseT EEE ability */
3000 if (advertise & ADVERTISED_1000baseT_Full)
3001 val |= MDIO_AN_EEE_ADV_1000T;
3002 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3004 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3005 if (!err)
3006 err = err2;
3009 done:
3010 return err;
3013 static void tg3_phy_copper_begin(struct tg3 *tp)
3015 u32 new_adv;
3016 int i;
3018 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3019 new_adv = ADVERTISED_10baseT_Half |
3020 ADVERTISED_10baseT_Full;
3021 if (tg3_flag(tp, WOL_SPEED_100MB))
3022 new_adv |= ADVERTISED_100baseT_Half |
3023 ADVERTISED_100baseT_Full;
3025 tg3_phy_autoneg_cfg(tp, new_adv,
3026 FLOW_CTRL_TX | FLOW_CTRL_RX);
3027 } else if (tp->link_config.speed == SPEED_INVALID) {
3028 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3029 tp->link_config.advertising &=
3030 ~(ADVERTISED_1000baseT_Half |
3031 ADVERTISED_1000baseT_Full);
3033 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
3034 tp->link_config.flowctrl);
3035 } else {
3036 /* Asking for a specific link mode. */
3037 if (tp->link_config.speed == SPEED_1000) {
3038 if (tp->link_config.duplex == DUPLEX_FULL)
3039 new_adv = ADVERTISED_1000baseT_Full;
3040 else
3041 new_adv = ADVERTISED_1000baseT_Half;
3042 } else if (tp->link_config.speed == SPEED_100) {
3043 if (tp->link_config.duplex == DUPLEX_FULL)
3044 new_adv = ADVERTISED_100baseT_Full;
3045 else
3046 new_adv = ADVERTISED_100baseT_Half;
3047 } else {
3048 if (tp->link_config.duplex == DUPLEX_FULL)
3049 new_adv = ADVERTISED_10baseT_Full;
3050 else
3051 new_adv = ADVERTISED_10baseT_Half;
3054 tg3_phy_autoneg_cfg(tp, new_adv,
3055 tp->link_config.flowctrl);
3058 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3059 tp->link_config.speed != SPEED_INVALID) {
3060 u32 bmcr, orig_bmcr;
3062 tp->link_config.active_speed = tp->link_config.speed;
3063 tp->link_config.active_duplex = tp->link_config.duplex;
3065 bmcr = 0;
3066 switch (tp->link_config.speed) {
3067 default:
3068 case SPEED_10:
3069 break;
3071 case SPEED_100:
3072 bmcr |= BMCR_SPEED100;
3073 break;
3075 case SPEED_1000:
3076 bmcr |= TG3_BMCR_SPEED1000;
3077 break;
3080 if (tp->link_config.duplex == DUPLEX_FULL)
3081 bmcr |= BMCR_FULLDPLX;
3083 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3084 (bmcr != orig_bmcr)) {
3085 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3086 for (i = 0; i < 1500; i++) {
3087 u32 tmp;
3089 udelay(10);
3090 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3091 tg3_readphy(tp, MII_BMSR, &tmp))
3092 continue;
3093 if (!(tmp & BMSR_LSTATUS)) {
3094 udelay(40);
3095 break;
3098 tg3_writephy(tp, MII_BMCR, bmcr);
3099 udelay(40);
3101 } else {
3102 tg3_writephy(tp, MII_BMCR,
3103 BMCR_ANENABLE | BMCR_ANRESTART);
3107 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3109 int err;
3111 /* Turn off tap power management. */
3112 /* Set Extended packet length bit */
3113 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3115 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3116 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3117 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3118 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3119 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3121 udelay(40);
3123 return err;
3126 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
3128 u32 adv_reg, all_mask = 0;
3130 if (mask & ADVERTISED_10baseT_Half)
3131 all_mask |= ADVERTISE_10HALF;
3132 if (mask & ADVERTISED_10baseT_Full)
3133 all_mask |= ADVERTISE_10FULL;
3134 if (mask & ADVERTISED_100baseT_Half)
3135 all_mask |= ADVERTISE_100HALF;
3136 if (mask & ADVERTISED_100baseT_Full)
3137 all_mask |= ADVERTISE_100FULL;
3139 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
3140 return 0;
3142 if ((adv_reg & all_mask) != all_mask)
3143 return 0;
3144 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3145 u32 tg3_ctrl;
3147 all_mask = 0;
3148 if (mask & ADVERTISED_1000baseT_Half)
3149 all_mask |= ADVERTISE_1000HALF;
3150 if (mask & ADVERTISED_1000baseT_Full)
3151 all_mask |= ADVERTISE_1000FULL;
3153 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
3154 return 0;
3156 if ((tg3_ctrl & all_mask) != all_mask)
3157 return 0;
3159 return 1;
3162 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3164 u32 curadv, reqadv;
3166 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3167 return 1;
3169 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3170 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3172 if (tp->link_config.active_duplex == DUPLEX_FULL) {
3173 if (curadv != reqadv)
3174 return 0;
3176 if (tg3_flag(tp, PAUSE_AUTONEG))
3177 tg3_readphy(tp, MII_LPA, rmtadv);
3178 } else {
3179 /* Reprogram the advertisement register, even if it
3180 * does not affect the current link. If the link
3181 * gets renegotiated in the future, we can save an
3182 * additional renegotiation cycle by advertising
3183 * it correctly in the first place.
3185 if (curadv != reqadv) {
3186 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3187 ADVERTISE_PAUSE_ASYM);
3188 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3192 return 1;
3195 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3197 int current_link_up;
3198 u32 bmsr, val;
3199 u32 lcl_adv, rmt_adv;
3200 u16 current_speed;
3201 u8 current_duplex;
3202 int i, err;
3204 tw32(MAC_EVENT, 0);
3206 tw32_f(MAC_STATUS,
3207 (MAC_STATUS_SYNC_CHANGED |
3208 MAC_STATUS_CFG_CHANGED |
3209 MAC_STATUS_MI_COMPLETION |
3210 MAC_STATUS_LNKSTATE_CHANGED));
3211 udelay(40);
3213 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3214 tw32_f(MAC_MI_MODE,
3215 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3216 udelay(80);
3219 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
3221 /* Some third-party PHYs need to be reset on link going
3222 * down.
3224 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3225 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3226 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3227 netif_carrier_ok(tp->dev)) {
3228 tg3_readphy(tp, MII_BMSR, &bmsr);
3229 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3230 !(bmsr & BMSR_LSTATUS))
3231 force_reset = 1;
3233 if (force_reset)
3234 tg3_phy_reset(tp);
3236 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3237 tg3_readphy(tp, MII_BMSR, &bmsr);
3238 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3239 !tg3_flag(tp, INIT_COMPLETE))
3240 bmsr = 0;
3242 if (!(bmsr & BMSR_LSTATUS)) {
3243 err = tg3_init_5401phy_dsp(tp);
3244 if (err)
3245 return err;
3247 tg3_readphy(tp, MII_BMSR, &bmsr);
3248 for (i = 0; i < 1000; i++) {
3249 udelay(10);
3250 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3251 (bmsr & BMSR_LSTATUS)) {
3252 udelay(40);
3253 break;
3257 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3258 TG3_PHY_REV_BCM5401_B0 &&
3259 !(bmsr & BMSR_LSTATUS) &&
3260 tp->link_config.active_speed == SPEED_1000) {
3261 err = tg3_phy_reset(tp);
3262 if (!err)
3263 err = tg3_init_5401phy_dsp(tp);
3264 if (err)
3265 return err;
3268 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3269 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3270 /* 5701 {A0,B0} CRC bug workaround */
3271 tg3_writephy(tp, 0x15, 0x0a75);
3272 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3273 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3274 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3277 /* Clear pending interrupts... */
3278 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3279 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3281 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3282 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3283 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3284 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3286 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3287 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3288 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3289 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3290 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3291 else
3292 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3295 current_link_up = 0;
3296 current_speed = SPEED_INVALID;
3297 current_duplex = DUPLEX_INVALID;
3299 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3300 err = tg3_phy_auxctl_read(tp,
3301 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3302 &val);
3303 if (!err && !(val & (1 << 10))) {
3304 tg3_phy_auxctl_write(tp,
3305 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3306 val | (1 << 10));
3307 goto relink;
3311 bmsr = 0;
3312 for (i = 0; i < 100; i++) {
3313 tg3_readphy(tp, MII_BMSR, &bmsr);
3314 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3315 (bmsr & BMSR_LSTATUS))
3316 break;
3317 udelay(40);
3320 if (bmsr & BMSR_LSTATUS) {
3321 u32 aux_stat, bmcr;
3323 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3324 for (i = 0; i < 2000; i++) {
3325 udelay(10);
3326 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3327 aux_stat)
3328 break;
3331 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3332 &current_speed,
3333 &current_duplex);
3335 bmcr = 0;
3336 for (i = 0; i < 200; i++) {
3337 tg3_readphy(tp, MII_BMCR, &bmcr);
3338 if (tg3_readphy(tp, MII_BMCR, &bmcr))
3339 continue;
3340 if (bmcr && bmcr != 0x7fff)
3341 break;
3342 udelay(10);
3345 lcl_adv = 0;
3346 rmt_adv = 0;
3348 tp->link_config.active_speed = current_speed;
3349 tp->link_config.active_duplex = current_duplex;
3351 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3352 if ((bmcr & BMCR_ANENABLE) &&
3353 tg3_copper_is_advertising_all(tp,
3354 tp->link_config.advertising)) {
3355 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3356 &rmt_adv))
3357 current_link_up = 1;
3359 } else {
3360 if (!(bmcr & BMCR_ANENABLE) &&
3361 tp->link_config.speed == current_speed &&
3362 tp->link_config.duplex == current_duplex &&
3363 tp->link_config.flowctrl ==
3364 tp->link_config.active_flowctrl) {
3365 current_link_up = 1;
3369 if (current_link_up == 1 &&
3370 tp->link_config.active_duplex == DUPLEX_FULL)
3371 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3374 relink:
3375 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3376 tg3_phy_copper_begin(tp);
3378 tg3_readphy(tp, MII_BMSR, &bmsr);
3379 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
3380 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
3381 current_link_up = 1;
3384 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3385 if (current_link_up == 1) {
3386 if (tp->link_config.active_speed == SPEED_100 ||
3387 tp->link_config.active_speed == SPEED_10)
3388 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3389 else
3390 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3391 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
3392 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3393 else
3394 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3396 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3397 if (tp->link_config.active_duplex == DUPLEX_HALF)
3398 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3400 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3401 if (current_link_up == 1 &&
3402 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3403 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3404 else
3405 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3408 /* ??? Without this setting Netgear GA302T PHY does not
3409 * ??? send/receive packets...
3411 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
3412 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3413 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3414 tw32_f(MAC_MI_MODE, tp->mi_mode);
3415 udelay(80);
3418 tw32_f(MAC_MODE, tp->mac_mode);
3419 udelay(40);
3421 tg3_phy_eee_adjust(tp, current_link_up);
3423 if (tg3_flag(tp, USE_LINKCHG_REG)) {
3424 /* Polled via timer. */
3425 tw32_f(MAC_EVENT, 0);
3426 } else {
3427 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3429 udelay(40);
3431 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3432 current_link_up == 1 &&
3433 tp->link_config.active_speed == SPEED_1000 &&
3434 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
3435 udelay(120);
3436 tw32_f(MAC_STATUS,
3437 (MAC_STATUS_SYNC_CHANGED |
3438 MAC_STATUS_CFG_CHANGED));
3439 udelay(40);
3440 tg3_write_mem(tp,
3441 NIC_SRAM_FIRMWARE_MBOX,
3442 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3445 /* Prevent send BD corruption. */
3446 if (tg3_flag(tp, CLKREQ_BUG)) {
3447 u16 oldlnkctl, newlnkctl;
3449 pci_read_config_word(tp->pdev,
3450 tp->pcie_cap + PCI_EXP_LNKCTL,
3451 &oldlnkctl);
3452 if (tp->link_config.active_speed == SPEED_100 ||
3453 tp->link_config.active_speed == SPEED_10)
3454 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3455 else
3456 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3457 if (newlnkctl != oldlnkctl)
3458 pci_write_config_word(tp->pdev,
3459 tp->pcie_cap + PCI_EXP_LNKCTL,
3460 newlnkctl);
3463 if (current_link_up != netif_carrier_ok(tp->dev)) {
3464 if (current_link_up)
3465 netif_carrier_on(tp->dev);
3466 else
3467 netif_carrier_off(tp->dev);
3468 tg3_link_report(tp);
3471 return 0;
3474 struct tg3_fiber_aneginfo {
3475 int state;
3476 #define ANEG_STATE_UNKNOWN 0
3477 #define ANEG_STATE_AN_ENABLE 1
3478 #define ANEG_STATE_RESTART_INIT 2
3479 #define ANEG_STATE_RESTART 3
3480 #define ANEG_STATE_DISABLE_LINK_OK 4
3481 #define ANEG_STATE_ABILITY_DETECT_INIT 5
3482 #define ANEG_STATE_ABILITY_DETECT 6
3483 #define ANEG_STATE_ACK_DETECT_INIT 7
3484 #define ANEG_STATE_ACK_DETECT 8
3485 #define ANEG_STATE_COMPLETE_ACK_INIT 9
3486 #define ANEG_STATE_COMPLETE_ACK 10
3487 #define ANEG_STATE_IDLE_DETECT_INIT 11
3488 #define ANEG_STATE_IDLE_DETECT 12
3489 #define ANEG_STATE_LINK_OK 13
3490 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
3491 #define ANEG_STATE_NEXT_PAGE_WAIT 15
3493 u32 flags;
3494 #define MR_AN_ENABLE 0x00000001
3495 #define MR_RESTART_AN 0x00000002
3496 #define MR_AN_COMPLETE 0x00000004
3497 #define MR_PAGE_RX 0x00000008
3498 #define MR_NP_LOADED 0x00000010
3499 #define MR_TOGGLE_TX 0x00000020
3500 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
3501 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
3502 #define MR_LP_ADV_SYM_PAUSE 0x00000100
3503 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
3504 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3505 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3506 #define MR_LP_ADV_NEXT_PAGE 0x00001000
3507 #define MR_TOGGLE_RX 0x00002000
3508 #define MR_NP_RX 0x00004000
3510 #define MR_LINK_OK 0x80000000
3512 unsigned long link_time, cur_time;
3514 u32 ability_match_cfg;
3515 int ability_match_count;
3517 char ability_match, idle_match, ack_match;
3519 u32 txconfig, rxconfig;
3520 #define ANEG_CFG_NP 0x00000080
3521 #define ANEG_CFG_ACK 0x00000040
3522 #define ANEG_CFG_RF2 0x00000020
3523 #define ANEG_CFG_RF1 0x00000010
3524 #define ANEG_CFG_PS2 0x00000001
3525 #define ANEG_CFG_PS1 0x00008000
3526 #define ANEG_CFG_HD 0x00004000
3527 #define ANEG_CFG_FD 0x00002000
3528 #define ANEG_CFG_INVAL 0x00001f06
3531 #define ANEG_OK 0
3532 #define ANEG_DONE 1
3533 #define ANEG_TIMER_ENAB 2
3534 #define ANEG_FAILED -1
3536 #define ANEG_STATE_SETTLE_TIME 10000
3538 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3539 struct tg3_fiber_aneginfo *ap)
3541 u16 flowctrl;
3542 unsigned long delta;
3543 u32 rx_cfg_reg;
3544 int ret;
3546 if (ap->state == ANEG_STATE_UNKNOWN) {
3547 ap->rxconfig = 0;
3548 ap->link_time = 0;
3549 ap->cur_time = 0;
3550 ap->ability_match_cfg = 0;
3551 ap->ability_match_count = 0;
3552 ap->ability_match = 0;
3553 ap->idle_match = 0;
3554 ap->ack_match = 0;
3556 ap->cur_time++;
3558 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3559 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3561 if (rx_cfg_reg != ap->ability_match_cfg) {
3562 ap->ability_match_cfg = rx_cfg_reg;
3563 ap->ability_match = 0;
3564 ap->ability_match_count = 0;
3565 } else {
3566 if (++ap->ability_match_count > 1) {
3567 ap->ability_match = 1;
3568 ap->ability_match_cfg = rx_cfg_reg;
3571 if (rx_cfg_reg & ANEG_CFG_ACK)
3572 ap->ack_match = 1;
3573 else
3574 ap->ack_match = 0;
3576 ap->idle_match = 0;
3577 } else {
3578 ap->idle_match = 1;
3579 ap->ability_match_cfg = 0;
3580 ap->ability_match_count = 0;
3581 ap->ability_match = 0;
3582 ap->ack_match = 0;
3584 rx_cfg_reg = 0;
3587 ap->rxconfig = rx_cfg_reg;
3588 ret = ANEG_OK;
3590 switch (ap->state) {
3591 case ANEG_STATE_UNKNOWN:
3592 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3593 ap->state = ANEG_STATE_AN_ENABLE;
3595 /* fallthru */
3596 case ANEG_STATE_AN_ENABLE:
3597 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3598 if (ap->flags & MR_AN_ENABLE) {
3599 ap->link_time = 0;
3600 ap->cur_time = 0;
3601 ap->ability_match_cfg = 0;
3602 ap->ability_match_count = 0;
3603 ap->ability_match = 0;
3604 ap->idle_match = 0;
3605 ap->ack_match = 0;
3607 ap->state = ANEG_STATE_RESTART_INIT;
3608 } else {
3609 ap->state = ANEG_STATE_DISABLE_LINK_OK;
3611 break;
3613 case ANEG_STATE_RESTART_INIT:
3614 ap->link_time = ap->cur_time;
3615 ap->flags &= ~(MR_NP_LOADED);
3616 ap->txconfig = 0;
3617 tw32(MAC_TX_AUTO_NEG, 0);
3618 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3619 tw32_f(MAC_MODE, tp->mac_mode);
3620 udelay(40);
3622 ret = ANEG_TIMER_ENAB;
3623 ap->state = ANEG_STATE_RESTART;
3625 /* fallthru */
3626 case ANEG_STATE_RESTART:
3627 delta = ap->cur_time - ap->link_time;
3628 if (delta > ANEG_STATE_SETTLE_TIME)
3629 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3630 else
3631 ret = ANEG_TIMER_ENAB;
3632 break;
3634 case ANEG_STATE_DISABLE_LINK_OK:
3635 ret = ANEG_DONE;
3636 break;
3638 case ANEG_STATE_ABILITY_DETECT_INIT:
3639 ap->flags &= ~(MR_TOGGLE_TX);
3640 ap->txconfig = ANEG_CFG_FD;
3641 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3642 if (flowctrl & ADVERTISE_1000XPAUSE)
3643 ap->txconfig |= ANEG_CFG_PS1;
3644 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3645 ap->txconfig |= ANEG_CFG_PS2;
3646 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3647 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3648 tw32_f(MAC_MODE, tp->mac_mode);
3649 udelay(40);
3651 ap->state = ANEG_STATE_ABILITY_DETECT;
3652 break;
3654 case ANEG_STATE_ABILITY_DETECT:
3655 if (ap->ability_match != 0 && ap->rxconfig != 0)
3656 ap->state = ANEG_STATE_ACK_DETECT_INIT;
3657 break;
3659 case ANEG_STATE_ACK_DETECT_INIT:
3660 ap->txconfig |= ANEG_CFG_ACK;
3661 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3662 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3663 tw32_f(MAC_MODE, tp->mac_mode);
3664 udelay(40);
3666 ap->state = ANEG_STATE_ACK_DETECT;
3668 /* fallthru */
3669 case ANEG_STATE_ACK_DETECT:
3670 if (ap->ack_match != 0) {
3671 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3672 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3673 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3674 } else {
3675 ap->state = ANEG_STATE_AN_ENABLE;
3677 } else if (ap->ability_match != 0 &&
3678 ap->rxconfig == 0) {
3679 ap->state = ANEG_STATE_AN_ENABLE;
3681 break;
3683 case ANEG_STATE_COMPLETE_ACK_INIT:
3684 if (ap->rxconfig & ANEG_CFG_INVAL) {
3685 ret = ANEG_FAILED;
3686 break;
3688 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3689 MR_LP_ADV_HALF_DUPLEX |
3690 MR_LP_ADV_SYM_PAUSE |
3691 MR_LP_ADV_ASYM_PAUSE |
3692 MR_LP_ADV_REMOTE_FAULT1 |
3693 MR_LP_ADV_REMOTE_FAULT2 |
3694 MR_LP_ADV_NEXT_PAGE |
3695 MR_TOGGLE_RX |
3696 MR_NP_RX);
3697 if (ap->rxconfig & ANEG_CFG_FD)
3698 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3699 if (ap->rxconfig & ANEG_CFG_HD)
3700 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3701 if (ap->rxconfig & ANEG_CFG_PS1)
3702 ap->flags |= MR_LP_ADV_SYM_PAUSE;
3703 if (ap->rxconfig & ANEG_CFG_PS2)
3704 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3705 if (ap->rxconfig & ANEG_CFG_RF1)
3706 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3707 if (ap->rxconfig & ANEG_CFG_RF2)
3708 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3709 if (ap->rxconfig & ANEG_CFG_NP)
3710 ap->flags |= MR_LP_ADV_NEXT_PAGE;
3712 ap->link_time = ap->cur_time;
3714 ap->flags ^= (MR_TOGGLE_TX);
3715 if (ap->rxconfig & 0x0008)
3716 ap->flags |= MR_TOGGLE_RX;
3717 if (ap->rxconfig & ANEG_CFG_NP)
3718 ap->flags |= MR_NP_RX;
3719 ap->flags |= MR_PAGE_RX;
3721 ap->state = ANEG_STATE_COMPLETE_ACK;
3722 ret = ANEG_TIMER_ENAB;
3723 break;
3725 case ANEG_STATE_COMPLETE_ACK:
3726 if (ap->ability_match != 0 &&
3727 ap->rxconfig == 0) {
3728 ap->state = ANEG_STATE_AN_ENABLE;
3729 break;
3731 delta = ap->cur_time - ap->link_time;
3732 if (delta > ANEG_STATE_SETTLE_TIME) {
3733 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3734 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3735 } else {
3736 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3737 !(ap->flags & MR_NP_RX)) {
3738 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3739 } else {
3740 ret = ANEG_FAILED;
3744 break;
3746 case ANEG_STATE_IDLE_DETECT_INIT:
3747 ap->link_time = ap->cur_time;
3748 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3749 tw32_f(MAC_MODE, tp->mac_mode);
3750 udelay(40);
3752 ap->state = ANEG_STATE_IDLE_DETECT;
3753 ret = ANEG_TIMER_ENAB;
3754 break;
3756 case ANEG_STATE_IDLE_DETECT:
3757 if (ap->ability_match != 0 &&
3758 ap->rxconfig == 0) {
3759 ap->state = ANEG_STATE_AN_ENABLE;
3760 break;
3762 delta = ap->cur_time - ap->link_time;
3763 if (delta > ANEG_STATE_SETTLE_TIME) {
3764 /* XXX another gem from the Broadcom driver :( */
3765 ap->state = ANEG_STATE_LINK_OK;
3767 break;
3769 case ANEG_STATE_LINK_OK:
3770 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3771 ret = ANEG_DONE;
3772 break;
3774 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3775 /* ??? unimplemented */
3776 break;
3778 case ANEG_STATE_NEXT_PAGE_WAIT:
3779 /* ??? unimplemented */
3780 break;
3782 default:
3783 ret = ANEG_FAILED;
3784 break;
3787 return ret;
3790 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3792 int res = 0;
3793 struct tg3_fiber_aneginfo aninfo;
3794 int status = ANEG_FAILED;
3795 unsigned int tick;
3796 u32 tmp;
3798 tw32_f(MAC_TX_AUTO_NEG, 0);
3800 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3801 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3802 udelay(40);
3804 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3805 udelay(40);
3807 memset(&aninfo, 0, sizeof(aninfo));
3808 aninfo.flags |= MR_AN_ENABLE;
3809 aninfo.state = ANEG_STATE_UNKNOWN;
3810 aninfo.cur_time = 0;
3811 tick = 0;
3812 while (++tick < 195000) {
3813 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3814 if (status == ANEG_DONE || status == ANEG_FAILED)
3815 break;
3817 udelay(1);
3820 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3821 tw32_f(MAC_MODE, tp->mac_mode);
3822 udelay(40);
3824 *txflags = aninfo.txconfig;
3825 *rxflags = aninfo.flags;
3827 if (status == ANEG_DONE &&
3828 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3829 MR_LP_ADV_FULL_DUPLEX)))
3830 res = 1;
3832 return res;
3835 static void tg3_init_bcm8002(struct tg3 *tp)
3837 u32 mac_status = tr32(MAC_STATUS);
3838 int i;
3840 /* Reset when initting first time or we have a link. */
3841 if (tg3_flag(tp, INIT_COMPLETE) &&
3842 !(mac_status & MAC_STATUS_PCS_SYNCED))
3843 return;
3845 /* Set PLL lock range. */
3846 tg3_writephy(tp, 0x16, 0x8007);
3848 /* SW reset */
3849 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3851 /* Wait for reset to complete. */
3852 /* XXX schedule_timeout() ... */
3853 for (i = 0; i < 500; i++)
3854 udelay(10);
3856 /* Config mode; select PMA/Ch 1 regs. */
3857 tg3_writephy(tp, 0x10, 0x8411);
3859 /* Enable auto-lock and comdet, select txclk for tx. */
3860 tg3_writephy(tp, 0x11, 0x0a10);
3862 tg3_writephy(tp, 0x18, 0x00a0);
3863 tg3_writephy(tp, 0x16, 0x41ff);
3865 /* Assert and deassert POR. */
3866 tg3_writephy(tp, 0x13, 0x0400);
3867 udelay(40);
3868 tg3_writephy(tp, 0x13, 0x0000);
3870 tg3_writephy(tp, 0x11, 0x0a50);
3871 udelay(40);
3872 tg3_writephy(tp, 0x11, 0x0a10);
3874 /* Wait for signal to stabilize */
3875 /* XXX schedule_timeout() ... */
3876 for (i = 0; i < 15000; i++)
3877 udelay(10);
3879 /* Deselect the channel register so we can read the PHYID
3880 * later.
3882 tg3_writephy(tp, 0x10, 0x8011);
3885 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3887 u16 flowctrl;
3888 u32 sg_dig_ctrl, sg_dig_status;
3889 u32 serdes_cfg, expected_sg_dig_ctrl;
3890 int workaround, port_a;
3891 int current_link_up;
3893 serdes_cfg = 0;
3894 expected_sg_dig_ctrl = 0;
3895 workaround = 0;
3896 port_a = 1;
3897 current_link_up = 0;
3899 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3900 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3901 workaround = 1;
3902 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3903 port_a = 0;
3905 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3906 /* preserve bits 20-23 for voltage regulator */
3907 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3910 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3912 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3913 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3914 if (workaround) {
3915 u32 val = serdes_cfg;
3917 if (port_a)
3918 val |= 0xc010000;
3919 else
3920 val |= 0x4010000;
3921 tw32_f(MAC_SERDES_CFG, val);
3924 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3926 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3927 tg3_setup_flow_control(tp, 0, 0);
3928 current_link_up = 1;
3930 goto out;
3933 /* Want auto-negotiation. */
3934 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3936 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3937 if (flowctrl & ADVERTISE_1000XPAUSE)
3938 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3939 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3940 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3942 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3943 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
3944 tp->serdes_counter &&
3945 ((mac_status & (MAC_STATUS_PCS_SYNCED |
3946 MAC_STATUS_RCVD_CFG)) ==
3947 MAC_STATUS_PCS_SYNCED)) {
3948 tp->serdes_counter--;
3949 current_link_up = 1;
3950 goto out;
3952 restart_autoneg:
3953 if (workaround)
3954 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3955 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3956 udelay(5);
3957 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3959 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3960 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3961 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3962 MAC_STATUS_SIGNAL_DET)) {
3963 sg_dig_status = tr32(SG_DIG_STATUS);
3964 mac_status = tr32(MAC_STATUS);
3966 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3967 (mac_status & MAC_STATUS_PCS_SYNCED)) {
3968 u32 local_adv = 0, remote_adv = 0;
3970 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3971 local_adv |= ADVERTISE_1000XPAUSE;
3972 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3973 local_adv |= ADVERTISE_1000XPSE_ASYM;
3975 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3976 remote_adv |= LPA_1000XPAUSE;
3977 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3978 remote_adv |= LPA_1000XPAUSE_ASYM;
3980 tg3_setup_flow_control(tp, local_adv, remote_adv);
3981 current_link_up = 1;
3982 tp->serdes_counter = 0;
3983 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3984 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3985 if (tp->serdes_counter)
3986 tp->serdes_counter--;
3987 else {
3988 if (workaround) {
3989 u32 val = serdes_cfg;
3991 if (port_a)
3992 val |= 0xc010000;
3993 else
3994 val |= 0x4010000;
3996 tw32_f(MAC_SERDES_CFG, val);
3999 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4000 udelay(40);
4002 /* Link parallel detection - link is up */
4003 /* only if we have PCS_SYNC and not */
4004 /* receiving config code words */
4005 mac_status = tr32(MAC_STATUS);
4006 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4007 !(mac_status & MAC_STATUS_RCVD_CFG)) {
4008 tg3_setup_flow_control(tp, 0, 0);
4009 current_link_up = 1;
4010 tp->phy_flags |=
4011 TG3_PHYFLG_PARALLEL_DETECT;
4012 tp->serdes_counter =
4013 SERDES_PARALLEL_DET_TIMEOUT;
4014 } else
4015 goto restart_autoneg;
4018 } else {
4019 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4020 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4023 out:
4024 return current_link_up;
4027 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4029 int current_link_up = 0;
4031 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4032 goto out;
4034 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4035 u32 txflags, rxflags;
4036 int i;
4038 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4039 u32 local_adv = 0, remote_adv = 0;
4041 if (txflags & ANEG_CFG_PS1)
4042 local_adv |= ADVERTISE_1000XPAUSE;
4043 if (txflags & ANEG_CFG_PS2)
4044 local_adv |= ADVERTISE_1000XPSE_ASYM;
4046 if (rxflags & MR_LP_ADV_SYM_PAUSE)
4047 remote_adv |= LPA_1000XPAUSE;
4048 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4049 remote_adv |= LPA_1000XPAUSE_ASYM;
4051 tg3_setup_flow_control(tp, local_adv, remote_adv);
4053 current_link_up = 1;
4055 for (i = 0; i < 30; i++) {
4056 udelay(20);
4057 tw32_f(MAC_STATUS,
4058 (MAC_STATUS_SYNC_CHANGED |
4059 MAC_STATUS_CFG_CHANGED));
4060 udelay(40);
4061 if ((tr32(MAC_STATUS) &
4062 (MAC_STATUS_SYNC_CHANGED |
4063 MAC_STATUS_CFG_CHANGED)) == 0)
4064 break;
4067 mac_status = tr32(MAC_STATUS);
4068 if (current_link_up == 0 &&
4069 (mac_status & MAC_STATUS_PCS_SYNCED) &&
4070 !(mac_status & MAC_STATUS_RCVD_CFG))
4071 current_link_up = 1;
4072 } else {
4073 tg3_setup_flow_control(tp, 0, 0);
4075 /* Forcing 1000FD link up. */
4076 current_link_up = 1;
4078 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4079 udelay(40);
4081 tw32_f(MAC_MODE, tp->mac_mode);
4082 udelay(40);
4085 out:
4086 return current_link_up;
4089 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4091 u32 orig_pause_cfg;
4092 u16 orig_active_speed;
4093 u8 orig_active_duplex;
4094 u32 mac_status;
4095 int current_link_up;
4096 int i;
4098 orig_pause_cfg = tp->link_config.active_flowctrl;
4099 orig_active_speed = tp->link_config.active_speed;
4100 orig_active_duplex = tp->link_config.active_duplex;
4102 if (!tg3_flag(tp, HW_AUTONEG) &&
4103 netif_carrier_ok(tp->dev) &&
4104 tg3_flag(tp, INIT_COMPLETE)) {
4105 mac_status = tr32(MAC_STATUS);
4106 mac_status &= (MAC_STATUS_PCS_SYNCED |
4107 MAC_STATUS_SIGNAL_DET |
4108 MAC_STATUS_CFG_CHANGED |
4109 MAC_STATUS_RCVD_CFG);
4110 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4111 MAC_STATUS_SIGNAL_DET)) {
4112 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4113 MAC_STATUS_CFG_CHANGED));
4114 return 0;
4118 tw32_f(MAC_TX_AUTO_NEG, 0);
4120 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4121 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4122 tw32_f(MAC_MODE, tp->mac_mode);
4123 udelay(40);
4125 if (tp->phy_id == TG3_PHY_ID_BCM8002)
4126 tg3_init_bcm8002(tp);
4128 /* Enable link change event even when serdes polling. */
4129 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4130 udelay(40);
4132 current_link_up = 0;
4133 mac_status = tr32(MAC_STATUS);
4135 if (tg3_flag(tp, HW_AUTONEG))
4136 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4137 else
4138 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4140 tp->napi[0].hw_status->status =
4141 (SD_STATUS_UPDATED |
4142 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4144 for (i = 0; i < 100; i++) {
4145 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4146 MAC_STATUS_CFG_CHANGED));
4147 udelay(5);
4148 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4149 MAC_STATUS_CFG_CHANGED |
4150 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4151 break;
4154 mac_status = tr32(MAC_STATUS);
4155 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4156 current_link_up = 0;
4157 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4158 tp->serdes_counter == 0) {
4159 tw32_f(MAC_MODE, (tp->mac_mode |
4160 MAC_MODE_SEND_CONFIGS));
4161 udelay(1);
4162 tw32_f(MAC_MODE, tp->mac_mode);
4166 if (current_link_up == 1) {
4167 tp->link_config.active_speed = SPEED_1000;
4168 tp->link_config.active_duplex = DUPLEX_FULL;
4169 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4170 LED_CTRL_LNKLED_OVERRIDE |
4171 LED_CTRL_1000MBPS_ON));
4172 } else {
4173 tp->link_config.active_speed = SPEED_INVALID;
4174 tp->link_config.active_duplex = DUPLEX_INVALID;
4175 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4176 LED_CTRL_LNKLED_OVERRIDE |
4177 LED_CTRL_TRAFFIC_OVERRIDE));
4180 if (current_link_up != netif_carrier_ok(tp->dev)) {
4181 if (current_link_up)
4182 netif_carrier_on(tp->dev);
4183 else
4184 netif_carrier_off(tp->dev);
4185 tg3_link_report(tp);
4186 } else {
4187 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4188 if (orig_pause_cfg != now_pause_cfg ||
4189 orig_active_speed != tp->link_config.active_speed ||
4190 orig_active_duplex != tp->link_config.active_duplex)
4191 tg3_link_report(tp);
4194 return 0;
4197 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4199 int current_link_up, err = 0;
4200 u32 bmsr, bmcr;
4201 u16 current_speed;
4202 u8 current_duplex;
4203 u32 local_adv, remote_adv;
4205 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4206 tw32_f(MAC_MODE, tp->mac_mode);
4207 udelay(40);
4209 tw32(MAC_EVENT, 0);
4211 tw32_f(MAC_STATUS,
4212 (MAC_STATUS_SYNC_CHANGED |
4213 MAC_STATUS_CFG_CHANGED |
4214 MAC_STATUS_MI_COMPLETION |
4215 MAC_STATUS_LNKSTATE_CHANGED));
4216 udelay(40);
4218 if (force_reset)
4219 tg3_phy_reset(tp);
4221 current_link_up = 0;
4222 current_speed = SPEED_INVALID;
4223 current_duplex = DUPLEX_INVALID;
4225 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4226 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4227 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4228 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4229 bmsr |= BMSR_LSTATUS;
4230 else
4231 bmsr &= ~BMSR_LSTATUS;
4234 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4236 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4237 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4238 /* do nothing, just check for link up at the end */
4239 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4240 u32 adv, new_adv;
4242 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4243 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4244 ADVERTISE_1000XPAUSE |
4245 ADVERTISE_1000XPSE_ASYM |
4246 ADVERTISE_SLCT);
4248 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4250 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4251 new_adv |= ADVERTISE_1000XHALF;
4252 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4253 new_adv |= ADVERTISE_1000XFULL;
4255 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4256 tg3_writephy(tp, MII_ADVERTISE, new_adv);
4257 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4258 tg3_writephy(tp, MII_BMCR, bmcr);
4260 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4261 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4262 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4264 return err;
4266 } else {
4267 u32 new_bmcr;
4269 bmcr &= ~BMCR_SPEED1000;
4270 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4272 if (tp->link_config.duplex == DUPLEX_FULL)
4273 new_bmcr |= BMCR_FULLDPLX;
4275 if (new_bmcr != bmcr) {
4276 /* BMCR_SPEED1000 is a reserved bit that needs
4277 * to be set on write.
4279 new_bmcr |= BMCR_SPEED1000;
4281 /* Force a linkdown */
4282 if (netif_carrier_ok(tp->dev)) {
4283 u32 adv;
4285 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4286 adv &= ~(ADVERTISE_1000XFULL |
4287 ADVERTISE_1000XHALF |
4288 ADVERTISE_SLCT);
4289 tg3_writephy(tp, MII_ADVERTISE, adv);
4290 tg3_writephy(tp, MII_BMCR, bmcr |
4291 BMCR_ANRESTART |
4292 BMCR_ANENABLE);
4293 udelay(10);
4294 netif_carrier_off(tp->dev);
4296 tg3_writephy(tp, MII_BMCR, new_bmcr);
4297 bmcr = new_bmcr;
4298 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4299 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4300 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4301 ASIC_REV_5714) {
4302 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4303 bmsr |= BMSR_LSTATUS;
4304 else
4305 bmsr &= ~BMSR_LSTATUS;
4307 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4311 if (bmsr & BMSR_LSTATUS) {
4312 current_speed = SPEED_1000;
4313 current_link_up = 1;
4314 if (bmcr & BMCR_FULLDPLX)
4315 current_duplex = DUPLEX_FULL;
4316 else
4317 current_duplex = DUPLEX_HALF;
4319 local_adv = 0;
4320 remote_adv = 0;
4322 if (bmcr & BMCR_ANENABLE) {
4323 u32 common;
4325 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4326 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4327 common = local_adv & remote_adv;
4328 if (common & (ADVERTISE_1000XHALF |
4329 ADVERTISE_1000XFULL)) {
4330 if (common & ADVERTISE_1000XFULL)
4331 current_duplex = DUPLEX_FULL;
4332 else
4333 current_duplex = DUPLEX_HALF;
4334 } else if (!tg3_flag(tp, 5780_CLASS)) {
4335 /* Link is up via parallel detect */
4336 } else {
4337 current_link_up = 0;
4342 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4343 tg3_setup_flow_control(tp, local_adv, remote_adv);
4345 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4346 if (tp->link_config.active_duplex == DUPLEX_HALF)
4347 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4349 tw32_f(MAC_MODE, tp->mac_mode);
4350 udelay(40);
4352 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4354 tp->link_config.active_speed = current_speed;
4355 tp->link_config.active_duplex = current_duplex;
4357 if (current_link_up != netif_carrier_ok(tp->dev)) {
4358 if (current_link_up)
4359 netif_carrier_on(tp->dev);
4360 else {
4361 netif_carrier_off(tp->dev);
4362 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4364 tg3_link_report(tp);
4366 return err;
4369 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4371 if (tp->serdes_counter) {
4372 /* Give autoneg time to complete. */
4373 tp->serdes_counter--;
4374 return;
4377 if (!netif_carrier_ok(tp->dev) &&
4378 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4379 u32 bmcr;
4381 tg3_readphy(tp, MII_BMCR, &bmcr);
4382 if (bmcr & BMCR_ANENABLE) {
4383 u32 phy1, phy2;
4385 /* Select shadow register 0x1f */
4386 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
4387 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
4389 /* Select expansion interrupt status register */
4390 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4391 MII_TG3_DSP_EXP1_INT_STAT);
4392 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4393 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4395 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4396 /* We have signal detect and not receiving
4397 * config code words, link is up by parallel
4398 * detection.
4401 bmcr &= ~BMCR_ANENABLE;
4402 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4403 tg3_writephy(tp, MII_BMCR, bmcr);
4404 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
4407 } else if (netif_carrier_ok(tp->dev) &&
4408 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4409 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4410 u32 phy2;
4412 /* Select expansion interrupt status register */
4413 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4414 MII_TG3_DSP_EXP1_INT_STAT);
4415 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4416 if (phy2 & 0x20) {
4417 u32 bmcr;
4419 /* Config code words received, turn on autoneg. */
4420 tg3_readphy(tp, MII_BMCR, &bmcr);
4421 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4423 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4429 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4431 u32 val;
4432 int err;
4434 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
4435 err = tg3_setup_fiber_phy(tp, force_reset);
4436 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4437 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4438 else
4439 err = tg3_setup_copper_phy(tp, force_reset);
4441 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4442 u32 scale;
4444 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4445 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4446 scale = 65;
4447 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4448 scale = 6;
4449 else
4450 scale = 12;
4452 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4453 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4454 tw32(GRC_MISC_CFG, val);
4457 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4458 (6 << TX_LENGTHS_IPG_SHIFT);
4459 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
4460 val |= tr32(MAC_TX_LENGTHS) &
4461 (TX_LENGTHS_JMB_FRM_LEN_MSK |
4462 TX_LENGTHS_CNT_DWN_VAL_MSK);
4464 if (tp->link_config.active_speed == SPEED_1000 &&
4465 tp->link_config.active_duplex == DUPLEX_HALF)
4466 tw32(MAC_TX_LENGTHS, val |
4467 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
4468 else
4469 tw32(MAC_TX_LENGTHS, val |
4470 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
4472 if (!tg3_flag(tp, 5705_PLUS)) {
4473 if (netif_carrier_ok(tp->dev)) {
4474 tw32(HOSTCC_STAT_COAL_TICKS,
4475 tp->coal.stats_block_coalesce_usecs);
4476 } else {
4477 tw32(HOSTCC_STAT_COAL_TICKS, 0);
4481 if (tg3_flag(tp, ASPM_WORKAROUND)) {
4482 val = tr32(PCIE_PWR_MGMT_THRESH);
4483 if (!netif_carrier_ok(tp->dev))
4484 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4485 tp->pwrmgmt_thresh;
4486 else
4487 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4488 tw32(PCIE_PWR_MGMT_THRESH, val);
4491 return err;
4494 static inline int tg3_irq_sync(struct tg3 *tp)
4496 return tp->irq_sync;
4499 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
4501 int i;
4503 dst = (u32 *)((u8 *)dst + off);
4504 for (i = 0; i < len; i += sizeof(u32))
4505 *dst++ = tr32(off + i);
4508 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
4510 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
4511 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
4512 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
4513 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
4514 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
4515 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
4516 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
4517 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
4518 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
4519 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
4520 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
4521 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
4522 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
4523 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
4524 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
4525 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
4526 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
4527 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
4528 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
4530 if (tg3_flag(tp, SUPPORT_MSIX))
4531 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
4533 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
4534 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
4535 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
4536 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
4537 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
4538 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
4539 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
4540 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
4542 if (!tg3_flag(tp, 5705_PLUS)) {
4543 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
4544 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
4545 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
4548 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
4549 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
4550 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
4551 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
4552 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
4554 if (tg3_flag(tp, NVRAM))
4555 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
4558 static void tg3_dump_state(struct tg3 *tp)
4560 int i;
4561 u32 *regs;
4563 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
4564 if (!regs) {
4565 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
4566 return;
4569 if (tg3_flag(tp, PCI_EXPRESS)) {
4570 /* Read up to but not including private PCI registers */
4571 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
4572 regs[i / sizeof(u32)] = tr32(i);
4573 } else
4574 tg3_dump_legacy_regs(tp, regs);
4576 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
4577 if (!regs[i + 0] && !regs[i + 1] &&
4578 !regs[i + 2] && !regs[i + 3])
4579 continue;
4581 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
4582 i * 4,
4583 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
4586 kfree(regs);
4588 for (i = 0; i < tp->irq_cnt; i++) {
4589 struct tg3_napi *tnapi = &tp->napi[i];
4591 /* SW status block */
4592 netdev_err(tp->dev,
4593 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
4595 tnapi->hw_status->status,
4596 tnapi->hw_status->status_tag,
4597 tnapi->hw_status->rx_jumbo_consumer,
4598 tnapi->hw_status->rx_consumer,
4599 tnapi->hw_status->rx_mini_consumer,
4600 tnapi->hw_status->idx[0].rx_producer,
4601 tnapi->hw_status->idx[0].tx_consumer);
4603 netdev_err(tp->dev,
4604 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
4606 tnapi->last_tag, tnapi->last_irq_tag,
4607 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
4608 tnapi->rx_rcb_ptr,
4609 tnapi->prodring.rx_std_prod_idx,
4610 tnapi->prodring.rx_std_cons_idx,
4611 tnapi->prodring.rx_jmb_prod_idx,
4612 tnapi->prodring.rx_jmb_cons_idx);
4616 /* This is called whenever we suspect that the system chipset is re-
4617 * ordering the sequence of MMIO to the tx send mailbox. The symptom
4618 * is bogus tx completions. We try to recover by setting the
4619 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4620 * in the workqueue.
4622 static void tg3_tx_recover(struct tg3 *tp)
4624 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
4625 tp->write32_tx_mbox == tg3_write_indirect_mbox);
4627 netdev_warn(tp->dev,
4628 "The system may be re-ordering memory-mapped I/O "
4629 "cycles to the network device, attempting to recover. "
4630 "Please report the problem to the driver maintainer "
4631 "and include system chipset information.\n");
4633 spin_lock(&tp->lock);
4634 tg3_flag_set(tp, TX_RECOVERY_PENDING);
4635 spin_unlock(&tp->lock);
4638 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4640 /* Tell compiler to fetch tx indices from memory. */
4641 barrier();
4642 return tnapi->tx_pending -
4643 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4646 /* Tigon3 never reports partial packet sends. So we do not
4647 * need special logic to handle SKBs that have not had all
4648 * of their frags sent yet, like SunGEM does.
4650 static void tg3_tx(struct tg3_napi *tnapi)
4652 struct tg3 *tp = tnapi->tp;
4653 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4654 u32 sw_idx = tnapi->tx_cons;
4655 struct netdev_queue *txq;
4656 int index = tnapi - tp->napi;
4658 if (tg3_flag(tp, ENABLE_TSS))
4659 index--;
4661 txq = netdev_get_tx_queue(tp->dev, index);
4663 while (sw_idx != hw_idx) {
4664 struct ring_info *ri = &tnapi->tx_buffers[sw_idx];
4665 struct sk_buff *skb = ri->skb;
4666 int i, tx_bug = 0;
4668 if (unlikely(skb == NULL)) {
4669 tg3_tx_recover(tp);
4670 return;
4673 pci_unmap_single(tp->pdev,
4674 dma_unmap_addr(ri, mapping),
4675 skb_headlen(skb),
4676 PCI_DMA_TODEVICE);
4678 ri->skb = NULL;
4680 sw_idx = NEXT_TX(sw_idx);
4682 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4683 ri = &tnapi->tx_buffers[sw_idx];
4684 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4685 tx_bug = 1;
4687 pci_unmap_page(tp->pdev,
4688 dma_unmap_addr(ri, mapping),
4689 skb_shinfo(skb)->frags[i].size,
4690 PCI_DMA_TODEVICE);
4691 sw_idx = NEXT_TX(sw_idx);
4694 dev_kfree_skb(skb);
4696 if (unlikely(tx_bug)) {
4697 tg3_tx_recover(tp);
4698 return;
4702 tnapi->tx_cons = sw_idx;
4704 /* Need to make the tx_cons update visible to tg3_start_xmit()
4705 * before checking for netif_queue_stopped(). Without the
4706 * memory barrier, there is a small possibility that tg3_start_xmit()
4707 * will miss it and cause the queue to be stopped forever.
4709 smp_mb();
4711 if (unlikely(netif_tx_queue_stopped(txq) &&
4712 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4713 __netif_tx_lock(txq, smp_processor_id());
4714 if (netif_tx_queue_stopped(txq) &&
4715 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4716 netif_tx_wake_queue(txq);
4717 __netif_tx_unlock(txq);
4721 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4723 if (!ri->skb)
4724 return;
4726 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
4727 map_sz, PCI_DMA_FROMDEVICE);
4728 dev_kfree_skb_any(ri->skb);
4729 ri->skb = NULL;
4732 /* Returns size of skb allocated or < 0 on error.
4734 * We only need to fill in the address because the other members
4735 * of the RX descriptor are invariant, see tg3_init_rings.
4737 * Note the purposeful assymetry of cpu vs. chip accesses. For
4738 * posting buffers we only dirty the first cache line of the RX
4739 * descriptor (containing the address). Whereas for the RX status
4740 * buffers the cpu only reads the last cacheline of the RX descriptor
4741 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4743 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4744 u32 opaque_key, u32 dest_idx_unmasked)
4746 struct tg3_rx_buffer_desc *desc;
4747 struct ring_info *map;
4748 struct sk_buff *skb;
4749 dma_addr_t mapping;
4750 int skb_size, dest_idx;
4752 switch (opaque_key) {
4753 case RXD_OPAQUE_RING_STD:
4754 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4755 desc = &tpr->rx_std[dest_idx];
4756 map = &tpr->rx_std_buffers[dest_idx];
4757 skb_size = tp->rx_pkt_map_sz;
4758 break;
4760 case RXD_OPAQUE_RING_JUMBO:
4761 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4762 desc = &tpr->rx_jmb[dest_idx].std;
4763 map = &tpr->rx_jmb_buffers[dest_idx];
4764 skb_size = TG3_RX_JMB_MAP_SZ;
4765 break;
4767 default:
4768 return -EINVAL;
4771 /* Do not overwrite any of the map or rp information
4772 * until we are sure we can commit to a new buffer.
4774 * Callers depend upon this behavior and assume that
4775 * we leave everything unchanged if we fail.
4777 skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4778 if (skb == NULL)
4779 return -ENOMEM;
4781 skb_reserve(skb, tp->rx_offset);
4783 mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4784 PCI_DMA_FROMDEVICE);
4785 if (pci_dma_mapping_error(tp->pdev, mapping)) {
4786 dev_kfree_skb(skb);
4787 return -EIO;
4790 map->skb = skb;
4791 dma_unmap_addr_set(map, mapping, mapping);
4793 desc->addr_hi = ((u64)mapping >> 32);
4794 desc->addr_lo = ((u64)mapping & 0xffffffff);
4796 return skb_size;
4799 /* We only need to move over in the address because the other
4800 * members of the RX descriptor are invariant. See notes above
4801 * tg3_alloc_rx_skb for full details.
4803 static void tg3_recycle_rx(struct tg3_napi *tnapi,
4804 struct tg3_rx_prodring_set *dpr,
4805 u32 opaque_key, int src_idx,
4806 u32 dest_idx_unmasked)
4808 struct tg3 *tp = tnapi->tp;
4809 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4810 struct ring_info *src_map, *dest_map;
4811 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
4812 int dest_idx;
4814 switch (opaque_key) {
4815 case RXD_OPAQUE_RING_STD:
4816 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4817 dest_desc = &dpr->rx_std[dest_idx];
4818 dest_map = &dpr->rx_std_buffers[dest_idx];
4819 src_desc = &spr->rx_std[src_idx];
4820 src_map = &spr->rx_std_buffers[src_idx];
4821 break;
4823 case RXD_OPAQUE_RING_JUMBO:
4824 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4825 dest_desc = &dpr->rx_jmb[dest_idx].std;
4826 dest_map = &dpr->rx_jmb_buffers[dest_idx];
4827 src_desc = &spr->rx_jmb[src_idx].std;
4828 src_map = &spr->rx_jmb_buffers[src_idx];
4829 break;
4831 default:
4832 return;
4835 dest_map->skb = src_map->skb;
4836 dma_unmap_addr_set(dest_map, mapping,
4837 dma_unmap_addr(src_map, mapping));
4838 dest_desc->addr_hi = src_desc->addr_hi;
4839 dest_desc->addr_lo = src_desc->addr_lo;
4841 /* Ensure that the update to the skb happens after the physical
4842 * addresses have been transferred to the new BD location.
4844 smp_wmb();
4846 src_map->skb = NULL;
4849 /* The RX ring scheme is composed of multiple rings which post fresh
4850 * buffers to the chip, and one special ring the chip uses to report
4851 * status back to the host.
4853 * The special ring reports the status of received packets to the
4854 * host. The chip does not write into the original descriptor the
4855 * RX buffer was obtained from. The chip simply takes the original
4856 * descriptor as provided by the host, updates the status and length
4857 * field, then writes this into the next status ring entry.
4859 * Each ring the host uses to post buffers to the chip is described
4860 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
4861 * it is first placed into the on-chip ram. When the packet's length
4862 * is known, it walks down the TG3_BDINFO entries to select the ring.
4863 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4864 * which is within the range of the new packet's length is chosen.
4866 * The "separate ring for rx status" scheme may sound queer, but it makes
4867 * sense from a cache coherency perspective. If only the host writes
4868 * to the buffer post rings, and only the chip writes to the rx status
4869 * rings, then cache lines never move beyond shared-modified state.
4870 * If both the host and chip were to write into the same ring, cache line
4871 * eviction could occur since both entities want it in an exclusive state.
4873 static int tg3_rx(struct tg3_napi *tnapi, int budget)
4875 struct tg3 *tp = tnapi->tp;
4876 u32 work_mask, rx_std_posted = 0;
4877 u32 std_prod_idx, jmb_prod_idx;
4878 u32 sw_idx = tnapi->rx_rcb_ptr;
4879 u16 hw_idx;
4880 int received;
4881 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
4883 hw_idx = *(tnapi->rx_rcb_prod_idx);
4885 * We need to order the read of hw_idx and the read of
4886 * the opaque cookie.
4888 rmb();
4889 work_mask = 0;
4890 received = 0;
4891 std_prod_idx = tpr->rx_std_prod_idx;
4892 jmb_prod_idx = tpr->rx_jmb_prod_idx;
4893 while (sw_idx != hw_idx && budget > 0) {
4894 struct ring_info *ri;
4895 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
4896 unsigned int len;
4897 struct sk_buff *skb;
4898 dma_addr_t dma_addr;
4899 u32 opaque_key, desc_idx, *post_ptr;
4901 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4902 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4903 if (opaque_key == RXD_OPAQUE_RING_STD) {
4904 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
4905 dma_addr = dma_unmap_addr(ri, mapping);
4906 skb = ri->skb;
4907 post_ptr = &std_prod_idx;
4908 rx_std_posted++;
4909 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4910 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
4911 dma_addr = dma_unmap_addr(ri, mapping);
4912 skb = ri->skb;
4913 post_ptr = &jmb_prod_idx;
4914 } else
4915 goto next_pkt_nopost;
4917 work_mask |= opaque_key;
4919 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4920 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4921 drop_it:
4922 tg3_recycle_rx(tnapi, tpr, opaque_key,
4923 desc_idx, *post_ptr);
4924 drop_it_no_recycle:
4925 /* Other statistics kept track of by card. */
4926 tp->rx_dropped++;
4927 goto next_pkt;
4930 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4931 ETH_FCS_LEN;
4933 if (len > TG3_RX_COPY_THRESH(tp)) {
4934 int skb_size;
4936 skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
4937 *post_ptr);
4938 if (skb_size < 0)
4939 goto drop_it;
4941 pci_unmap_single(tp->pdev, dma_addr, skb_size,
4942 PCI_DMA_FROMDEVICE);
4944 /* Ensure that the update to the skb happens
4945 * after the usage of the old DMA mapping.
4947 smp_wmb();
4949 ri->skb = NULL;
4951 skb_put(skb, len);
4952 } else {
4953 struct sk_buff *copy_skb;
4955 tg3_recycle_rx(tnapi, tpr, opaque_key,
4956 desc_idx, *post_ptr);
4958 copy_skb = netdev_alloc_skb(tp->dev, len +
4959 TG3_RAW_IP_ALIGN);
4960 if (copy_skb == NULL)
4961 goto drop_it_no_recycle;
4963 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
4964 skb_put(copy_skb, len);
4965 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4966 skb_copy_from_linear_data(skb, copy_skb->data, len);
4967 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4969 /* We'll reuse the original ring buffer. */
4970 skb = copy_skb;
4973 if ((tp->dev->features & NETIF_F_RXCSUM) &&
4974 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4975 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4976 >> RXD_TCPCSUM_SHIFT) == 0xffff))
4977 skb->ip_summed = CHECKSUM_UNNECESSARY;
4978 else
4979 skb_checksum_none_assert(skb);
4981 skb->protocol = eth_type_trans(skb, tp->dev);
4983 if (len > (tp->dev->mtu + ETH_HLEN) &&
4984 skb->protocol != htons(ETH_P_8021Q)) {
4985 dev_kfree_skb(skb);
4986 goto drop_it_no_recycle;
4989 if (desc->type_flags & RXD_FLAG_VLAN &&
4990 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
4991 __vlan_hwaccel_put_tag(skb,
4992 desc->err_vlan & RXD_VLAN_MASK);
4994 napi_gro_receive(&tnapi->napi, skb);
4996 received++;
4997 budget--;
4999 next_pkt:
5000 (*post_ptr)++;
5002 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5003 tpr->rx_std_prod_idx = std_prod_idx &
5004 tp->rx_std_ring_mask;
5005 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5006 tpr->rx_std_prod_idx);
5007 work_mask &= ~RXD_OPAQUE_RING_STD;
5008 rx_std_posted = 0;
5010 next_pkt_nopost:
5011 sw_idx++;
5012 sw_idx &= tp->rx_ret_ring_mask;
5014 /* Refresh hw_idx to see if there is new work */
5015 if (sw_idx == hw_idx) {
5016 hw_idx = *(tnapi->rx_rcb_prod_idx);
5017 rmb();
5021 /* ACK the status ring. */
5022 tnapi->rx_rcb_ptr = sw_idx;
5023 tw32_rx_mbox(tnapi->consmbox, sw_idx);
5025 /* Refill RX ring(s). */
5026 if (!tg3_flag(tp, ENABLE_RSS)) {
5027 if (work_mask & RXD_OPAQUE_RING_STD) {
5028 tpr->rx_std_prod_idx = std_prod_idx &
5029 tp->rx_std_ring_mask;
5030 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5031 tpr->rx_std_prod_idx);
5033 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5034 tpr->rx_jmb_prod_idx = jmb_prod_idx &
5035 tp->rx_jmb_ring_mask;
5036 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5037 tpr->rx_jmb_prod_idx);
5039 mmiowb();
5040 } else if (work_mask) {
5041 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5042 * updated before the producer indices can be updated.
5044 smp_wmb();
5046 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5047 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5049 if (tnapi != &tp->napi[1])
5050 napi_schedule(&tp->napi[1].napi);
5053 return received;
5056 static void tg3_poll_link(struct tg3 *tp)
5058 /* handle link change and other phy events */
5059 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5060 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5062 if (sblk->status & SD_STATUS_LINK_CHG) {
5063 sblk->status = SD_STATUS_UPDATED |
5064 (sblk->status & ~SD_STATUS_LINK_CHG);
5065 spin_lock(&tp->lock);
5066 if (tg3_flag(tp, USE_PHYLIB)) {
5067 tw32_f(MAC_STATUS,
5068 (MAC_STATUS_SYNC_CHANGED |
5069 MAC_STATUS_CFG_CHANGED |
5070 MAC_STATUS_MI_COMPLETION |
5071 MAC_STATUS_LNKSTATE_CHANGED));
5072 udelay(40);
5073 } else
5074 tg3_setup_phy(tp, 0);
5075 spin_unlock(&tp->lock);
5080 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5081 struct tg3_rx_prodring_set *dpr,
5082 struct tg3_rx_prodring_set *spr)
5084 u32 si, di, cpycnt, src_prod_idx;
5085 int i, err = 0;
5087 while (1) {
5088 src_prod_idx = spr->rx_std_prod_idx;
5090 /* Make sure updates to the rx_std_buffers[] entries and the
5091 * standard producer index are seen in the correct order.
5093 smp_rmb();
5095 if (spr->rx_std_cons_idx == src_prod_idx)
5096 break;
5098 if (spr->rx_std_cons_idx < src_prod_idx)
5099 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
5100 else
5101 cpycnt = tp->rx_std_ring_mask + 1 -
5102 spr->rx_std_cons_idx;
5104 cpycnt = min(cpycnt,
5105 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
5107 si = spr->rx_std_cons_idx;
5108 di = dpr->rx_std_prod_idx;
5110 for (i = di; i < di + cpycnt; i++) {
5111 if (dpr->rx_std_buffers[i].skb) {
5112 cpycnt = i - di;
5113 err = -ENOSPC;
5114 break;
5118 if (!cpycnt)
5119 break;
5121 /* Ensure that updates to the rx_std_buffers ring and the
5122 * shadowed hardware producer ring from tg3_recycle_skb() are
5123 * ordered correctly WRT the skb check above.
5125 smp_rmb();
5127 memcpy(&dpr->rx_std_buffers[di],
5128 &spr->rx_std_buffers[si],
5129 cpycnt * sizeof(struct ring_info));
5131 for (i = 0; i < cpycnt; i++, di++, si++) {
5132 struct tg3_rx_buffer_desc *sbd, *dbd;
5133 sbd = &spr->rx_std[si];
5134 dbd = &dpr->rx_std[di];
5135 dbd->addr_hi = sbd->addr_hi;
5136 dbd->addr_lo = sbd->addr_lo;
5139 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
5140 tp->rx_std_ring_mask;
5141 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
5142 tp->rx_std_ring_mask;
5145 while (1) {
5146 src_prod_idx = spr->rx_jmb_prod_idx;
5148 /* Make sure updates to the rx_jmb_buffers[] entries and
5149 * the jumbo producer index are seen in the correct order.
5151 smp_rmb();
5153 if (spr->rx_jmb_cons_idx == src_prod_idx)
5154 break;
5156 if (spr->rx_jmb_cons_idx < src_prod_idx)
5157 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5158 else
5159 cpycnt = tp->rx_jmb_ring_mask + 1 -
5160 spr->rx_jmb_cons_idx;
5162 cpycnt = min(cpycnt,
5163 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5165 si = spr->rx_jmb_cons_idx;
5166 di = dpr->rx_jmb_prod_idx;
5168 for (i = di; i < di + cpycnt; i++) {
5169 if (dpr->rx_jmb_buffers[i].skb) {
5170 cpycnt = i - di;
5171 err = -ENOSPC;
5172 break;
5176 if (!cpycnt)
5177 break;
5179 /* Ensure that updates to the rx_jmb_buffers ring and the
5180 * shadowed hardware producer ring from tg3_recycle_skb() are
5181 * ordered correctly WRT the skb check above.
5183 smp_rmb();
5185 memcpy(&dpr->rx_jmb_buffers[di],
5186 &spr->rx_jmb_buffers[si],
5187 cpycnt * sizeof(struct ring_info));
5189 for (i = 0; i < cpycnt; i++, di++, si++) {
5190 struct tg3_rx_buffer_desc *sbd, *dbd;
5191 sbd = &spr->rx_jmb[si].std;
5192 dbd = &dpr->rx_jmb[di].std;
5193 dbd->addr_hi = sbd->addr_hi;
5194 dbd->addr_lo = sbd->addr_lo;
5197 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5198 tp->rx_jmb_ring_mask;
5199 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5200 tp->rx_jmb_ring_mask;
5203 return err;
5206 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5208 struct tg3 *tp = tnapi->tp;
5210 /* run TX completion thread */
5211 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5212 tg3_tx(tnapi);
5213 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5214 return work_done;
5217 /* run RX thread, within the bounds set by NAPI.
5218 * All RX "locking" is done by ensuring outside
5219 * code synchronizes with tg3->napi.poll()
5221 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5222 work_done += tg3_rx(tnapi, budget - work_done);
5224 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
5225 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5226 int i, err = 0;
5227 u32 std_prod_idx = dpr->rx_std_prod_idx;
5228 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5230 for (i = 1; i < tp->irq_cnt; i++)
5231 err |= tg3_rx_prodring_xfer(tp, dpr,
5232 &tp->napi[i].prodring);
5234 wmb();
5236 if (std_prod_idx != dpr->rx_std_prod_idx)
5237 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5238 dpr->rx_std_prod_idx);
5240 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5241 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5242 dpr->rx_jmb_prod_idx);
5244 mmiowb();
5246 if (err)
5247 tw32_f(HOSTCC_MODE, tp->coal_now);
5250 return work_done;
5253 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5255 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5256 struct tg3 *tp = tnapi->tp;
5257 int work_done = 0;
5258 struct tg3_hw_status *sblk = tnapi->hw_status;
5260 while (1) {
5261 work_done = tg3_poll_work(tnapi, work_done, budget);
5263 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5264 goto tx_recovery;
5266 if (unlikely(work_done >= budget))
5267 break;
5269 /* tp->last_tag is used in tg3_int_reenable() below
5270 * to tell the hw how much work has been processed,
5271 * so we must read it before checking for more work.
5273 tnapi->last_tag = sblk->status_tag;
5274 tnapi->last_irq_tag = tnapi->last_tag;
5275 rmb();
5277 /* check for RX/TX work to do */
5278 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5279 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5280 napi_complete(napi);
5281 /* Reenable interrupts. */
5282 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5283 mmiowb();
5284 break;
5288 return work_done;
5290 tx_recovery:
5291 /* work_done is guaranteed to be less than budget. */
5292 napi_complete(napi);
5293 schedule_work(&tp->reset_task);
5294 return work_done;
5297 static void tg3_process_error(struct tg3 *tp)
5299 u32 val;
5300 bool real_error = false;
5302 if (tg3_flag(tp, ERROR_PROCESSED))
5303 return;
5305 /* Check Flow Attention register */
5306 val = tr32(HOSTCC_FLOW_ATTN);
5307 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
5308 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
5309 real_error = true;
5312 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
5313 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
5314 real_error = true;
5317 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
5318 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
5319 real_error = true;
5322 if (!real_error)
5323 return;
5325 tg3_dump_state(tp);
5327 tg3_flag_set(tp, ERROR_PROCESSED);
5328 schedule_work(&tp->reset_task);
5331 static int tg3_poll(struct napi_struct *napi, int budget)
5333 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5334 struct tg3 *tp = tnapi->tp;
5335 int work_done = 0;
5336 struct tg3_hw_status *sblk = tnapi->hw_status;
5338 while (1) {
5339 if (sblk->status & SD_STATUS_ERROR)
5340 tg3_process_error(tp);
5342 tg3_poll_link(tp);
5344 work_done = tg3_poll_work(tnapi, work_done, budget);
5346 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5347 goto tx_recovery;
5349 if (unlikely(work_done >= budget))
5350 break;
5352 if (tg3_flag(tp, TAGGED_STATUS)) {
5353 /* tp->last_tag is used in tg3_int_reenable() below
5354 * to tell the hw how much work has been processed,
5355 * so we must read it before checking for more work.
5357 tnapi->last_tag = sblk->status_tag;
5358 tnapi->last_irq_tag = tnapi->last_tag;
5359 rmb();
5360 } else
5361 sblk->status &= ~SD_STATUS_UPDATED;
5363 if (likely(!tg3_has_work(tnapi))) {
5364 napi_complete(napi);
5365 tg3_int_reenable(tnapi);
5366 break;
5370 return work_done;
5372 tx_recovery:
5373 /* work_done is guaranteed to be less than budget. */
5374 napi_complete(napi);
5375 schedule_work(&tp->reset_task);
5376 return work_done;
5379 static void tg3_napi_disable(struct tg3 *tp)
5381 int i;
5383 for (i = tp->irq_cnt - 1; i >= 0; i--)
5384 napi_disable(&tp->napi[i].napi);
5387 static void tg3_napi_enable(struct tg3 *tp)
5389 int i;
5391 for (i = 0; i < tp->irq_cnt; i++)
5392 napi_enable(&tp->napi[i].napi);
5395 static void tg3_napi_init(struct tg3 *tp)
5397 int i;
5399 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
5400 for (i = 1; i < tp->irq_cnt; i++)
5401 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
5404 static void tg3_napi_fini(struct tg3 *tp)
5406 int i;
5408 for (i = 0; i < tp->irq_cnt; i++)
5409 netif_napi_del(&tp->napi[i].napi);
5412 static inline void tg3_netif_stop(struct tg3 *tp)
5414 tp->dev->trans_start = jiffies; /* prevent tx timeout */
5415 tg3_napi_disable(tp);
5416 netif_tx_disable(tp->dev);
5419 static inline void tg3_netif_start(struct tg3 *tp)
5421 /* NOTE: unconditional netif_tx_wake_all_queues is only
5422 * appropriate so long as all callers are assured to
5423 * have free tx slots (such as after tg3_init_hw)
5425 netif_tx_wake_all_queues(tp->dev);
5427 tg3_napi_enable(tp);
5428 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
5429 tg3_enable_ints(tp);
5432 static void tg3_irq_quiesce(struct tg3 *tp)
5434 int i;
5436 BUG_ON(tp->irq_sync);
5438 tp->irq_sync = 1;
5439 smp_mb();
5441 for (i = 0; i < tp->irq_cnt; i++)
5442 synchronize_irq(tp->napi[i].irq_vec);
5445 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5446 * If irq_sync is non-zero, then the IRQ handler must be synchronized
5447 * with as well. Most of the time, this is not necessary except when
5448 * shutting down the device.
5450 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
5452 spin_lock_bh(&tp->lock);
5453 if (irq_sync)
5454 tg3_irq_quiesce(tp);
5457 static inline void tg3_full_unlock(struct tg3 *tp)
5459 spin_unlock_bh(&tp->lock);
5462 /* One-shot MSI handler - Chip automatically disables interrupt
5463 * after sending MSI so driver doesn't have to do it.
5465 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
5467 struct tg3_napi *tnapi = dev_id;
5468 struct tg3 *tp = tnapi->tp;
5470 prefetch(tnapi->hw_status);
5471 if (tnapi->rx_rcb)
5472 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5474 if (likely(!tg3_irq_sync(tp)))
5475 napi_schedule(&tnapi->napi);
5477 return IRQ_HANDLED;
5480 /* MSI ISR - No need to check for interrupt sharing and no need to
5481 * flush status block and interrupt mailbox. PCI ordering rules
5482 * guarantee that MSI will arrive after the status block.
5484 static irqreturn_t tg3_msi(int irq, void *dev_id)
5486 struct tg3_napi *tnapi = dev_id;
5487 struct tg3 *tp = tnapi->tp;
5489 prefetch(tnapi->hw_status);
5490 if (tnapi->rx_rcb)
5491 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5493 * Writing any value to intr-mbox-0 clears PCI INTA# and
5494 * chip-internal interrupt pending events.
5495 * Writing non-zero to intr-mbox-0 additional tells the
5496 * NIC to stop sending us irqs, engaging "in-intr-handler"
5497 * event coalescing.
5499 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5500 if (likely(!tg3_irq_sync(tp)))
5501 napi_schedule(&tnapi->napi);
5503 return IRQ_RETVAL(1);
5506 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
5508 struct tg3_napi *tnapi = dev_id;
5509 struct tg3 *tp = tnapi->tp;
5510 struct tg3_hw_status *sblk = tnapi->hw_status;
5511 unsigned int handled = 1;
5513 /* In INTx mode, it is possible for the interrupt to arrive at
5514 * the CPU before the status block posted prior to the interrupt.
5515 * Reading the PCI State register will confirm whether the
5516 * interrupt is ours and will flush the status block.
5518 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
5519 if (tg3_flag(tp, CHIP_RESETTING) ||
5520 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5521 handled = 0;
5522 goto out;
5527 * Writing any value to intr-mbox-0 clears PCI INTA# and
5528 * chip-internal interrupt pending events.
5529 * Writing non-zero to intr-mbox-0 additional tells the
5530 * NIC to stop sending us irqs, engaging "in-intr-handler"
5531 * event coalescing.
5533 * Flush the mailbox to de-assert the IRQ immediately to prevent
5534 * spurious interrupts. The flush impacts performance but
5535 * excessive spurious interrupts can be worse in some cases.
5537 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5538 if (tg3_irq_sync(tp))
5539 goto out;
5540 sblk->status &= ~SD_STATUS_UPDATED;
5541 if (likely(tg3_has_work(tnapi))) {
5542 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5543 napi_schedule(&tnapi->napi);
5544 } else {
5545 /* No work, shared interrupt perhaps? re-enable
5546 * interrupts, and flush that PCI write
5548 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
5549 0x00000000);
5551 out:
5552 return IRQ_RETVAL(handled);
5555 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
5557 struct tg3_napi *tnapi = dev_id;
5558 struct tg3 *tp = tnapi->tp;
5559 struct tg3_hw_status *sblk = tnapi->hw_status;
5560 unsigned int handled = 1;
5562 /* In INTx mode, it is possible for the interrupt to arrive at
5563 * the CPU before the status block posted prior to the interrupt.
5564 * Reading the PCI State register will confirm whether the
5565 * interrupt is ours and will flush the status block.
5567 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
5568 if (tg3_flag(tp, CHIP_RESETTING) ||
5569 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5570 handled = 0;
5571 goto out;
5576 * writing any value to intr-mbox-0 clears PCI INTA# and
5577 * chip-internal interrupt pending events.
5578 * writing non-zero to intr-mbox-0 additional tells the
5579 * NIC to stop sending us irqs, engaging "in-intr-handler"
5580 * event coalescing.
5582 * Flush the mailbox to de-assert the IRQ immediately to prevent
5583 * spurious interrupts. The flush impacts performance but
5584 * excessive spurious interrupts can be worse in some cases.
5586 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5589 * In a shared interrupt configuration, sometimes other devices'
5590 * interrupts will scream. We record the current status tag here
5591 * so that the above check can report that the screaming interrupts
5592 * are unhandled. Eventually they will be silenced.
5594 tnapi->last_irq_tag = sblk->status_tag;
5596 if (tg3_irq_sync(tp))
5597 goto out;
5599 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5601 napi_schedule(&tnapi->napi);
5603 out:
5604 return IRQ_RETVAL(handled);
5607 /* ISR for interrupt test */
5608 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
5610 struct tg3_napi *tnapi = dev_id;
5611 struct tg3 *tp = tnapi->tp;
5612 struct tg3_hw_status *sblk = tnapi->hw_status;
5614 if ((sblk->status & SD_STATUS_UPDATED) ||
5615 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5616 tg3_disable_ints(tp);
5617 return IRQ_RETVAL(1);
5619 return IRQ_RETVAL(0);
5622 static int tg3_init_hw(struct tg3 *, int);
5623 static int tg3_halt(struct tg3 *, int, int);
5625 /* Restart hardware after configuration changes, self-test, etc.
5626 * Invoked with tp->lock held.
5628 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
5629 __releases(tp->lock)
5630 __acquires(tp->lock)
5632 int err;
5634 err = tg3_init_hw(tp, reset_phy);
5635 if (err) {
5636 netdev_err(tp->dev,
5637 "Failed to re-initialize device, aborting\n");
5638 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5639 tg3_full_unlock(tp);
5640 del_timer_sync(&tp->timer);
5641 tp->irq_sync = 0;
5642 tg3_napi_enable(tp);
5643 dev_close(tp->dev);
5644 tg3_full_lock(tp, 0);
5646 return err;
5649 #ifdef CONFIG_NET_POLL_CONTROLLER
5650 static void tg3_poll_controller(struct net_device *dev)
5652 int i;
5653 struct tg3 *tp = netdev_priv(dev);
5655 for (i = 0; i < tp->irq_cnt; i++)
5656 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
5658 #endif
5660 static void tg3_reset_task(struct work_struct *work)
5662 struct tg3 *tp = container_of(work, struct tg3, reset_task);
5663 int err;
5664 unsigned int restart_timer;
5666 tg3_full_lock(tp, 0);
5668 if (!netif_running(tp->dev)) {
5669 tg3_full_unlock(tp);
5670 return;
5673 tg3_full_unlock(tp);
5675 tg3_phy_stop(tp);
5677 tg3_netif_stop(tp);
5679 tg3_full_lock(tp, 1);
5681 restart_timer = tg3_flag(tp, RESTART_TIMER);
5682 tg3_flag_clear(tp, RESTART_TIMER);
5684 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
5685 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5686 tp->write32_rx_mbox = tg3_write_flush_reg32;
5687 tg3_flag_set(tp, MBOX_WRITE_REORDER);
5688 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
5691 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5692 err = tg3_init_hw(tp, 1);
5693 if (err)
5694 goto out;
5696 tg3_netif_start(tp);
5698 if (restart_timer)
5699 mod_timer(&tp->timer, jiffies + 1);
5701 out:
5702 tg3_full_unlock(tp);
5704 if (!err)
5705 tg3_phy_start(tp);
5708 static void tg3_tx_timeout(struct net_device *dev)
5710 struct tg3 *tp = netdev_priv(dev);
5712 if (netif_msg_tx_err(tp)) {
5713 netdev_err(dev, "transmit timed out, resetting\n");
5714 tg3_dump_state(tp);
5717 schedule_work(&tp->reset_task);
5720 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5721 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5723 u32 base = (u32) mapping & 0xffffffff;
5725 return (base > 0xffffdcc0) && (base + len + 8 < base);
5728 /* Test for DMA addresses > 40-bit */
5729 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5730 int len)
5732 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5733 if (tg3_flag(tp, 40BIT_DMA_BUG))
5734 return ((u64) mapping + len) > DMA_BIT_MASK(40);
5735 return 0;
5736 #else
5737 return 0;
5738 #endif
5741 static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5742 dma_addr_t mapping, int len, u32 flags,
5743 u32 mss_and_is_end)
5745 struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
5746 int is_end = (mss_and_is_end & 0x1);
5747 u32 mss = (mss_and_is_end >> 1);
5748 u32 vlan_tag = 0;
5750 if (is_end)
5751 flags |= TXD_FLAG_END;
5752 if (flags & TXD_FLAG_VLAN) {
5753 vlan_tag = flags >> 16;
5754 flags &= 0xffff;
5756 vlan_tag |= (mss << TXD_MSS_SHIFT);
5758 txd->addr_hi = ((u64) mapping >> 32);
5759 txd->addr_lo = ((u64) mapping & 0xffffffff);
5760 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
5761 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
5764 static void tg3_skb_error_unmap(struct tg3_napi *tnapi,
5765 struct sk_buff *skb, int last)
5767 int i;
5768 u32 entry = tnapi->tx_prod;
5769 struct ring_info *txb = &tnapi->tx_buffers[entry];
5771 pci_unmap_single(tnapi->tp->pdev,
5772 dma_unmap_addr(txb, mapping),
5773 skb_headlen(skb),
5774 PCI_DMA_TODEVICE);
5775 for (i = 0; i <= last; i++) {
5776 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5778 entry = NEXT_TX(entry);
5779 txb = &tnapi->tx_buffers[entry];
5781 pci_unmap_page(tnapi->tp->pdev,
5782 dma_unmap_addr(txb, mapping),
5783 frag->size, PCI_DMA_TODEVICE);
5787 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5788 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5789 struct sk_buff *skb,
5790 u32 base_flags, u32 mss)
5792 struct tg3 *tp = tnapi->tp;
5793 struct sk_buff *new_skb;
5794 dma_addr_t new_addr = 0;
5795 u32 entry = tnapi->tx_prod;
5796 int ret = 0;
5798 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
5799 new_skb = skb_copy(skb, GFP_ATOMIC);
5800 else {
5801 int more_headroom = 4 - ((unsigned long)skb->data & 3);
5803 new_skb = skb_copy_expand(skb,
5804 skb_headroom(skb) + more_headroom,
5805 skb_tailroom(skb), GFP_ATOMIC);
5808 if (!new_skb) {
5809 ret = -1;
5810 } else {
5811 /* New SKB is guaranteed to be linear. */
5812 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
5813 PCI_DMA_TODEVICE);
5814 /* Make sure the mapping succeeded */
5815 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
5816 ret = -1;
5817 dev_kfree_skb(new_skb);
5819 /* Make sure new skb does not cross any 4G boundaries.
5820 * Drop the packet if it does.
5822 } else if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
5823 tg3_4g_overflow_test(new_addr, new_skb->len)) {
5824 pci_unmap_single(tp->pdev, new_addr, new_skb->len,
5825 PCI_DMA_TODEVICE);
5826 ret = -1;
5827 dev_kfree_skb(new_skb);
5828 } else {
5829 tnapi->tx_buffers[entry].skb = new_skb;
5830 dma_unmap_addr_set(&tnapi->tx_buffers[entry],
5831 mapping, new_addr);
5833 tg3_set_txd(tnapi, entry, new_addr, new_skb->len,
5834 base_flags, 1 | (mss << 1));
5838 dev_kfree_skb(skb);
5840 return ret;
5843 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
5845 /* Use GSO to workaround a rare TSO bug that may be triggered when the
5846 * TSO header is greater than 80 bytes.
5848 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
5850 struct sk_buff *segs, *nskb;
5851 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
5853 /* Estimate the number of fragments in the worst case */
5854 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
5855 netif_stop_queue(tp->dev);
5857 /* netif_tx_stop_queue() must be done before checking
5858 * checking tx index in tg3_tx_avail() below, because in
5859 * tg3_tx(), we update tx index before checking for
5860 * netif_tx_queue_stopped().
5862 smp_mb();
5863 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
5864 return NETDEV_TX_BUSY;
5866 netif_wake_queue(tp->dev);
5869 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
5870 if (IS_ERR(segs))
5871 goto tg3_tso_bug_end;
5873 do {
5874 nskb = segs;
5875 segs = segs->next;
5876 nskb->next = NULL;
5877 tg3_start_xmit(nskb, tp->dev);
5878 } while (segs);
5880 tg3_tso_bug_end:
5881 dev_kfree_skb(skb);
5883 return NETDEV_TX_OK;
5886 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
5887 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
5889 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
5891 struct tg3 *tp = netdev_priv(dev);
5892 u32 len, entry, base_flags, mss;
5893 int i = -1, would_hit_hwbug;
5894 dma_addr_t mapping;
5895 struct tg3_napi *tnapi;
5896 struct netdev_queue *txq;
5897 unsigned int last;
5899 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5900 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5901 if (tg3_flag(tp, ENABLE_TSS))
5902 tnapi++;
5904 /* We are running in BH disabled context with netif_tx_lock
5905 * and TX reclaim runs via tp->napi.poll inside of a software
5906 * interrupt. Furthermore, IRQ processing runs lockless so we have
5907 * no IRQ context deadlocks to worry about either. Rejoice!
5909 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5910 if (!netif_tx_queue_stopped(txq)) {
5911 netif_tx_stop_queue(txq);
5913 /* This is a hard error, log it. */
5914 netdev_err(dev,
5915 "BUG! Tx Ring full when queue awake!\n");
5917 return NETDEV_TX_BUSY;
5920 entry = tnapi->tx_prod;
5921 base_flags = 0;
5922 if (skb->ip_summed == CHECKSUM_PARTIAL)
5923 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5925 mss = skb_shinfo(skb)->gso_size;
5926 if (mss) {
5927 struct iphdr *iph;
5928 u32 tcp_opt_len, hdr_len;
5930 if (skb_header_cloned(skb) &&
5931 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5932 dev_kfree_skb(skb);
5933 goto out_unlock;
5936 iph = ip_hdr(skb);
5937 tcp_opt_len = tcp_optlen(skb);
5939 if (skb_is_gso_v6(skb)) {
5940 hdr_len = skb_headlen(skb) - ETH_HLEN;
5941 } else {
5942 u32 ip_tcp_len;
5944 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5945 hdr_len = ip_tcp_len + tcp_opt_len;
5947 iph->check = 0;
5948 iph->tot_len = htons(mss + hdr_len);
5951 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
5952 tg3_flag(tp, TSO_BUG))
5953 return tg3_tso_bug(tp, skb);
5955 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5956 TXD_FLAG_CPU_POST_DMA);
5958 if (tg3_flag(tp, HW_TSO_1) ||
5959 tg3_flag(tp, HW_TSO_2) ||
5960 tg3_flag(tp, HW_TSO_3)) {
5961 tcp_hdr(skb)->check = 0;
5962 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
5963 } else
5964 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5965 iph->daddr, 0,
5966 IPPROTO_TCP,
5969 if (tg3_flag(tp, HW_TSO_3)) {
5970 mss |= (hdr_len & 0xc) << 12;
5971 if (hdr_len & 0x10)
5972 base_flags |= 0x00000010;
5973 base_flags |= (hdr_len & 0x3e0) << 5;
5974 } else if (tg3_flag(tp, HW_TSO_2))
5975 mss |= hdr_len << 9;
5976 else if (tg3_flag(tp, HW_TSO_1) ||
5977 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5978 if (tcp_opt_len || iph->ihl > 5) {
5979 int tsflags;
5981 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5982 mss |= (tsflags << 11);
5984 } else {
5985 if (tcp_opt_len || iph->ihl > 5) {
5986 int tsflags;
5988 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5989 base_flags |= tsflags << 12;
5994 if (vlan_tx_tag_present(skb))
5995 base_flags |= (TXD_FLAG_VLAN |
5996 (vlan_tx_tag_get(skb) << 16));
5998 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
5999 !mss && skb->len > VLAN_ETH_FRAME_LEN)
6000 base_flags |= TXD_FLAG_JMB_PKT;
6002 len = skb_headlen(skb);
6004 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6005 if (pci_dma_mapping_error(tp->pdev, mapping)) {
6006 dev_kfree_skb(skb);
6007 goto out_unlock;
6010 tnapi->tx_buffers[entry].skb = skb;
6011 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6013 would_hit_hwbug = 0;
6015 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6016 would_hit_hwbug = 1;
6018 if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
6019 tg3_4g_overflow_test(mapping, len))
6020 would_hit_hwbug = 1;
6022 if (tg3_flag(tp, 40BIT_DMA_LIMIT_BUG) &&
6023 tg3_40bit_overflow_test(tp, mapping, len))
6024 would_hit_hwbug = 1;
6026 if (tg3_flag(tp, 5701_DMA_BUG))
6027 would_hit_hwbug = 1;
6029 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
6030 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
6032 entry = NEXT_TX(entry);
6034 /* Now loop through additional data fragments, and queue them. */
6035 if (skb_shinfo(skb)->nr_frags > 0) {
6036 last = skb_shinfo(skb)->nr_frags - 1;
6037 for (i = 0; i <= last; i++) {
6038 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6040 len = frag->size;
6041 mapping = pci_map_page(tp->pdev,
6042 frag->page,
6043 frag->page_offset,
6044 len, PCI_DMA_TODEVICE);
6046 tnapi->tx_buffers[entry].skb = NULL;
6047 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6048 mapping);
6049 if (pci_dma_mapping_error(tp->pdev, mapping))
6050 goto dma_error;
6052 if (tg3_flag(tp, SHORT_DMA_BUG) &&
6053 len <= 8)
6054 would_hit_hwbug = 1;
6056 if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
6057 tg3_4g_overflow_test(mapping, len))
6058 would_hit_hwbug = 1;
6060 if (tg3_flag(tp, 40BIT_DMA_LIMIT_BUG) &&
6061 tg3_40bit_overflow_test(tp, mapping, len))
6062 would_hit_hwbug = 1;
6064 if (tg3_flag(tp, HW_TSO_1) ||
6065 tg3_flag(tp, HW_TSO_2) ||
6066 tg3_flag(tp, HW_TSO_3))
6067 tg3_set_txd(tnapi, entry, mapping, len,
6068 base_flags, (i == last)|(mss << 1));
6069 else
6070 tg3_set_txd(tnapi, entry, mapping, len,
6071 base_flags, (i == last));
6073 entry = NEXT_TX(entry);
6077 if (would_hit_hwbug) {
6078 tg3_skb_error_unmap(tnapi, skb, i);
6080 /* If the workaround fails due to memory/mapping
6081 * failure, silently drop this packet.
6083 if (tigon3_dma_hwbug_workaround(tnapi, skb, base_flags, mss))
6084 goto out_unlock;
6086 entry = NEXT_TX(tnapi->tx_prod);
6089 /* Packets are ready, update Tx producer idx local and on card. */
6090 tw32_tx_mbox(tnapi->prodmbox, entry);
6092 tnapi->tx_prod = entry;
6093 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6094 netif_tx_stop_queue(txq);
6096 /* netif_tx_stop_queue() must be done before checking
6097 * checking tx index in tg3_tx_avail() below, because in
6098 * tg3_tx(), we update tx index before checking for
6099 * netif_tx_queue_stopped().
6101 smp_mb();
6102 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6103 netif_tx_wake_queue(txq);
6106 out_unlock:
6107 mmiowb();
6109 return NETDEV_TX_OK;
6111 dma_error:
6112 tg3_skb_error_unmap(tnapi, skb, i);
6113 dev_kfree_skb(skb);
6114 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
6115 return NETDEV_TX_OK;
6118 static void tg3_set_loopback(struct net_device *dev, u32 features)
6120 struct tg3 *tp = netdev_priv(dev);
6122 if (features & NETIF_F_LOOPBACK) {
6123 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
6124 return;
6127 * Clear MAC_MODE_HALF_DUPLEX or you won't get packets back in
6128 * loopback mode if Half-Duplex mode was negotiated earlier.
6130 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
6132 /* Enable internal MAC loopback mode */
6133 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
6134 spin_lock_bh(&tp->lock);
6135 tw32(MAC_MODE, tp->mac_mode);
6136 netif_carrier_on(tp->dev);
6137 spin_unlock_bh(&tp->lock);
6138 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
6139 } else {
6140 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
6141 return;
6143 /* Disable internal MAC loopback mode */
6144 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
6145 spin_lock_bh(&tp->lock);
6146 tw32(MAC_MODE, tp->mac_mode);
6147 /* Force link status check */
6148 tg3_setup_phy(tp, 1);
6149 spin_unlock_bh(&tp->lock);
6150 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
6154 static u32 tg3_fix_features(struct net_device *dev, u32 features)
6156 struct tg3 *tp = netdev_priv(dev);
6158 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
6159 features &= ~NETIF_F_ALL_TSO;
6161 return features;
6164 static int tg3_set_features(struct net_device *dev, u32 features)
6166 u32 changed = dev->features ^ features;
6168 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
6169 tg3_set_loopback(dev, features);
6171 return 0;
6174 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
6175 int new_mtu)
6177 dev->mtu = new_mtu;
6179 if (new_mtu > ETH_DATA_LEN) {
6180 if (tg3_flag(tp, 5780_CLASS)) {
6181 netdev_update_features(dev);
6182 tg3_flag_clear(tp, TSO_CAPABLE);
6183 } else {
6184 tg3_flag_set(tp, JUMBO_RING_ENABLE);
6186 } else {
6187 if (tg3_flag(tp, 5780_CLASS)) {
6188 tg3_flag_set(tp, TSO_CAPABLE);
6189 netdev_update_features(dev);
6191 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
6195 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
6197 struct tg3 *tp = netdev_priv(dev);
6198 int err;
6200 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
6201 return -EINVAL;
6203 if (!netif_running(dev)) {
6204 /* We'll just catch it later when the
6205 * device is up'd.
6207 tg3_set_mtu(dev, tp, new_mtu);
6208 return 0;
6211 tg3_phy_stop(tp);
6213 tg3_netif_stop(tp);
6215 tg3_full_lock(tp, 1);
6217 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6219 tg3_set_mtu(dev, tp, new_mtu);
6221 err = tg3_restart_hw(tp, 0);
6223 if (!err)
6224 tg3_netif_start(tp);
6226 tg3_full_unlock(tp);
6228 if (!err)
6229 tg3_phy_start(tp);
6231 return err;
6234 static void tg3_rx_prodring_free(struct tg3 *tp,
6235 struct tg3_rx_prodring_set *tpr)
6237 int i;
6239 if (tpr != &tp->napi[0].prodring) {
6240 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
6241 i = (i + 1) & tp->rx_std_ring_mask)
6242 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6243 tp->rx_pkt_map_sz);
6245 if (tg3_flag(tp, JUMBO_CAPABLE)) {
6246 for (i = tpr->rx_jmb_cons_idx;
6247 i != tpr->rx_jmb_prod_idx;
6248 i = (i + 1) & tp->rx_jmb_ring_mask) {
6249 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6250 TG3_RX_JMB_MAP_SZ);
6254 return;
6257 for (i = 0; i <= tp->rx_std_ring_mask; i++)
6258 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6259 tp->rx_pkt_map_sz);
6261 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6262 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
6263 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6264 TG3_RX_JMB_MAP_SZ);
6268 /* Initialize rx rings for packet processing.
6270 * The chip has been shut down and the driver detached from
6271 * the networking, so no interrupts or new tx packets will
6272 * end up in the driver. tp->{tx,}lock are held and thus
6273 * we may not sleep.
6275 static int tg3_rx_prodring_alloc(struct tg3 *tp,
6276 struct tg3_rx_prodring_set *tpr)
6278 u32 i, rx_pkt_dma_sz;
6280 tpr->rx_std_cons_idx = 0;
6281 tpr->rx_std_prod_idx = 0;
6282 tpr->rx_jmb_cons_idx = 0;
6283 tpr->rx_jmb_prod_idx = 0;
6285 if (tpr != &tp->napi[0].prodring) {
6286 memset(&tpr->rx_std_buffers[0], 0,
6287 TG3_RX_STD_BUFF_RING_SIZE(tp));
6288 if (tpr->rx_jmb_buffers)
6289 memset(&tpr->rx_jmb_buffers[0], 0,
6290 TG3_RX_JMB_BUFF_RING_SIZE(tp));
6291 goto done;
6294 /* Zero out all descriptors. */
6295 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
6297 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
6298 if (tg3_flag(tp, 5780_CLASS) &&
6299 tp->dev->mtu > ETH_DATA_LEN)
6300 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
6301 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
6303 /* Initialize invariants of the rings, we only set this
6304 * stuff once. This works because the card does not
6305 * write into the rx buffer posting rings.
6307 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
6308 struct tg3_rx_buffer_desc *rxd;
6310 rxd = &tpr->rx_std[i];
6311 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
6312 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
6313 rxd->opaque = (RXD_OPAQUE_RING_STD |
6314 (i << RXD_OPAQUE_INDEX_SHIFT));
6317 /* Now allocate fresh SKBs for each rx ring. */
6318 for (i = 0; i < tp->rx_pending; i++) {
6319 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
6320 netdev_warn(tp->dev,
6321 "Using a smaller RX standard ring. Only "
6322 "%d out of %d buffers were allocated "
6323 "successfully\n", i, tp->rx_pending);
6324 if (i == 0)
6325 goto initfail;
6326 tp->rx_pending = i;
6327 break;
6331 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
6332 goto done;
6334 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
6336 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
6337 goto done;
6339 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
6340 struct tg3_rx_buffer_desc *rxd;
6342 rxd = &tpr->rx_jmb[i].std;
6343 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
6344 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
6345 RXD_FLAG_JUMBO;
6346 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
6347 (i << RXD_OPAQUE_INDEX_SHIFT));
6350 for (i = 0; i < tp->rx_jumbo_pending; i++) {
6351 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
6352 netdev_warn(tp->dev,
6353 "Using a smaller RX jumbo ring. Only %d "
6354 "out of %d buffers were allocated "
6355 "successfully\n", i, tp->rx_jumbo_pending);
6356 if (i == 0)
6357 goto initfail;
6358 tp->rx_jumbo_pending = i;
6359 break;
6363 done:
6364 return 0;
6366 initfail:
6367 tg3_rx_prodring_free(tp, tpr);
6368 return -ENOMEM;
6371 static void tg3_rx_prodring_fini(struct tg3 *tp,
6372 struct tg3_rx_prodring_set *tpr)
6374 kfree(tpr->rx_std_buffers);
6375 tpr->rx_std_buffers = NULL;
6376 kfree(tpr->rx_jmb_buffers);
6377 tpr->rx_jmb_buffers = NULL;
6378 if (tpr->rx_std) {
6379 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
6380 tpr->rx_std, tpr->rx_std_mapping);
6381 tpr->rx_std = NULL;
6383 if (tpr->rx_jmb) {
6384 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
6385 tpr->rx_jmb, tpr->rx_jmb_mapping);
6386 tpr->rx_jmb = NULL;
6390 static int tg3_rx_prodring_init(struct tg3 *tp,
6391 struct tg3_rx_prodring_set *tpr)
6393 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
6394 GFP_KERNEL);
6395 if (!tpr->rx_std_buffers)
6396 return -ENOMEM;
6398 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
6399 TG3_RX_STD_RING_BYTES(tp),
6400 &tpr->rx_std_mapping,
6401 GFP_KERNEL);
6402 if (!tpr->rx_std)
6403 goto err_out;
6405 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6406 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
6407 GFP_KERNEL);
6408 if (!tpr->rx_jmb_buffers)
6409 goto err_out;
6411 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
6412 TG3_RX_JMB_RING_BYTES(tp),
6413 &tpr->rx_jmb_mapping,
6414 GFP_KERNEL);
6415 if (!tpr->rx_jmb)
6416 goto err_out;
6419 return 0;
6421 err_out:
6422 tg3_rx_prodring_fini(tp, tpr);
6423 return -ENOMEM;
6426 /* Free up pending packets in all rx/tx rings.
6428 * The chip has been shut down and the driver detached from
6429 * the networking, so no interrupts or new tx packets will
6430 * end up in the driver. tp->{tx,}lock is not held and we are not
6431 * in an interrupt context and thus may sleep.
6433 static void tg3_free_rings(struct tg3 *tp)
6435 int i, j;
6437 for (j = 0; j < tp->irq_cnt; j++) {
6438 struct tg3_napi *tnapi = &tp->napi[j];
6440 tg3_rx_prodring_free(tp, &tnapi->prodring);
6442 if (!tnapi->tx_buffers)
6443 continue;
6445 for (i = 0; i < TG3_TX_RING_SIZE; ) {
6446 struct ring_info *txp;
6447 struct sk_buff *skb;
6448 unsigned int k;
6450 txp = &tnapi->tx_buffers[i];
6451 skb = txp->skb;
6453 if (skb == NULL) {
6454 i++;
6455 continue;
6458 pci_unmap_single(tp->pdev,
6459 dma_unmap_addr(txp, mapping),
6460 skb_headlen(skb),
6461 PCI_DMA_TODEVICE);
6462 txp->skb = NULL;
6464 i++;
6466 for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) {
6467 txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
6468 pci_unmap_page(tp->pdev,
6469 dma_unmap_addr(txp, mapping),
6470 skb_shinfo(skb)->frags[k].size,
6471 PCI_DMA_TODEVICE);
6472 i++;
6475 dev_kfree_skb_any(skb);
6480 /* Initialize tx/rx rings for packet processing.
6482 * The chip has been shut down and the driver detached from
6483 * the networking, so no interrupts or new tx packets will
6484 * end up in the driver. tp->{tx,}lock are held and thus
6485 * we may not sleep.
6487 static int tg3_init_rings(struct tg3 *tp)
6489 int i;
6491 /* Free up all the SKBs. */
6492 tg3_free_rings(tp);
6494 for (i = 0; i < tp->irq_cnt; i++) {
6495 struct tg3_napi *tnapi = &tp->napi[i];
6497 tnapi->last_tag = 0;
6498 tnapi->last_irq_tag = 0;
6499 tnapi->hw_status->status = 0;
6500 tnapi->hw_status->status_tag = 0;
6501 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6503 tnapi->tx_prod = 0;
6504 tnapi->tx_cons = 0;
6505 if (tnapi->tx_ring)
6506 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
6508 tnapi->rx_rcb_ptr = 0;
6509 if (tnapi->rx_rcb)
6510 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6512 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
6513 tg3_free_rings(tp);
6514 return -ENOMEM;
6518 return 0;
6522 * Must not be invoked with interrupt sources disabled and
6523 * the hardware shutdown down.
6525 static void tg3_free_consistent(struct tg3 *tp)
6527 int i;
6529 for (i = 0; i < tp->irq_cnt; i++) {
6530 struct tg3_napi *tnapi = &tp->napi[i];
6532 if (tnapi->tx_ring) {
6533 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
6534 tnapi->tx_ring, tnapi->tx_desc_mapping);
6535 tnapi->tx_ring = NULL;
6538 kfree(tnapi->tx_buffers);
6539 tnapi->tx_buffers = NULL;
6541 if (tnapi->rx_rcb) {
6542 dma_free_coherent(&tp->pdev->dev,
6543 TG3_RX_RCB_RING_BYTES(tp),
6544 tnapi->rx_rcb,
6545 tnapi->rx_rcb_mapping);
6546 tnapi->rx_rcb = NULL;
6549 tg3_rx_prodring_fini(tp, &tnapi->prodring);
6551 if (tnapi->hw_status) {
6552 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
6553 tnapi->hw_status,
6554 tnapi->status_mapping);
6555 tnapi->hw_status = NULL;
6559 if (tp->hw_stats) {
6560 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
6561 tp->hw_stats, tp->stats_mapping);
6562 tp->hw_stats = NULL;
6567 * Must not be invoked with interrupt sources disabled and
6568 * the hardware shutdown down. Can sleep.
6570 static int tg3_alloc_consistent(struct tg3 *tp)
6572 int i;
6574 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
6575 sizeof(struct tg3_hw_stats),
6576 &tp->stats_mapping,
6577 GFP_KERNEL);
6578 if (!tp->hw_stats)
6579 goto err_out;
6581 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6583 for (i = 0; i < tp->irq_cnt; i++) {
6584 struct tg3_napi *tnapi = &tp->napi[i];
6585 struct tg3_hw_status *sblk;
6587 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
6588 TG3_HW_STATUS_SIZE,
6589 &tnapi->status_mapping,
6590 GFP_KERNEL);
6591 if (!tnapi->hw_status)
6592 goto err_out;
6594 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6595 sblk = tnapi->hw_status;
6597 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
6598 goto err_out;
6600 /* If multivector TSS is enabled, vector 0 does not handle
6601 * tx interrupts. Don't allocate any resources for it.
6603 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
6604 (i && tg3_flag(tp, ENABLE_TSS))) {
6605 tnapi->tx_buffers = kzalloc(sizeof(struct ring_info) *
6606 TG3_TX_RING_SIZE,
6607 GFP_KERNEL);
6608 if (!tnapi->tx_buffers)
6609 goto err_out;
6611 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
6612 TG3_TX_RING_BYTES,
6613 &tnapi->tx_desc_mapping,
6614 GFP_KERNEL);
6615 if (!tnapi->tx_ring)
6616 goto err_out;
6620 * When RSS is enabled, the status block format changes
6621 * slightly. The "rx_jumbo_consumer", "reserved",
6622 * and "rx_mini_consumer" members get mapped to the
6623 * other three rx return ring producer indexes.
6625 switch (i) {
6626 default:
6627 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
6628 break;
6629 case 2:
6630 tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
6631 break;
6632 case 3:
6633 tnapi->rx_rcb_prod_idx = &sblk->reserved;
6634 break;
6635 case 4:
6636 tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
6637 break;
6641 * If multivector RSS is enabled, vector 0 does not handle
6642 * rx or tx interrupts. Don't allocate any resources for it.
6644 if (!i && tg3_flag(tp, ENABLE_RSS))
6645 continue;
6647 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
6648 TG3_RX_RCB_RING_BYTES(tp),
6649 &tnapi->rx_rcb_mapping,
6650 GFP_KERNEL);
6651 if (!tnapi->rx_rcb)
6652 goto err_out;
6654 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6657 return 0;
6659 err_out:
6660 tg3_free_consistent(tp);
6661 return -ENOMEM;
6664 #define MAX_WAIT_CNT 1000
6666 /* To stop a block, clear the enable bit and poll till it
6667 * clears. tp->lock is held.
6669 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
6671 unsigned int i;
6672 u32 val;
6674 if (tg3_flag(tp, 5705_PLUS)) {
6675 switch (ofs) {
6676 case RCVLSC_MODE:
6677 case DMAC_MODE:
6678 case MBFREE_MODE:
6679 case BUFMGR_MODE:
6680 case MEMARB_MODE:
6681 /* We can't enable/disable these bits of the
6682 * 5705/5750, just say success.
6684 return 0;
6686 default:
6687 break;
6691 val = tr32(ofs);
6692 val &= ~enable_bit;
6693 tw32_f(ofs, val);
6695 for (i = 0; i < MAX_WAIT_CNT; i++) {
6696 udelay(100);
6697 val = tr32(ofs);
6698 if ((val & enable_bit) == 0)
6699 break;
6702 if (i == MAX_WAIT_CNT && !silent) {
6703 dev_err(&tp->pdev->dev,
6704 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
6705 ofs, enable_bit);
6706 return -ENODEV;
6709 return 0;
6712 /* tp->lock is held. */
6713 static int tg3_abort_hw(struct tg3 *tp, int silent)
6715 int i, err;
6717 tg3_disable_ints(tp);
6719 tp->rx_mode &= ~RX_MODE_ENABLE;
6720 tw32_f(MAC_RX_MODE, tp->rx_mode);
6721 udelay(10);
6723 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
6724 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
6725 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
6726 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
6727 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
6728 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
6730 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
6731 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
6732 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
6733 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
6734 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
6735 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
6736 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
6738 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
6739 tw32_f(MAC_MODE, tp->mac_mode);
6740 udelay(40);
6742 tp->tx_mode &= ~TX_MODE_ENABLE;
6743 tw32_f(MAC_TX_MODE, tp->tx_mode);
6745 for (i = 0; i < MAX_WAIT_CNT; i++) {
6746 udelay(100);
6747 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
6748 break;
6750 if (i >= MAX_WAIT_CNT) {
6751 dev_err(&tp->pdev->dev,
6752 "%s timed out, TX_MODE_ENABLE will not clear "
6753 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
6754 err |= -ENODEV;
6757 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
6758 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
6759 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
6761 tw32(FTQ_RESET, 0xffffffff);
6762 tw32(FTQ_RESET, 0x00000000);
6764 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
6765 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
6767 for (i = 0; i < tp->irq_cnt; i++) {
6768 struct tg3_napi *tnapi = &tp->napi[i];
6769 if (tnapi->hw_status)
6770 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6772 if (tp->hw_stats)
6773 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6775 return err;
6778 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
6780 int i;
6781 u32 apedata;
6783 /* NCSI does not support APE events */
6784 if (tg3_flag(tp, APE_HAS_NCSI))
6785 return;
6787 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
6788 if (apedata != APE_SEG_SIG_MAGIC)
6789 return;
6791 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
6792 if (!(apedata & APE_FW_STATUS_READY))
6793 return;
6795 /* Wait for up to 1 millisecond for APE to service previous event. */
6796 for (i = 0; i < 10; i++) {
6797 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
6798 return;
6800 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
6802 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6803 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
6804 event | APE_EVENT_STATUS_EVENT_PENDING);
6806 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
6808 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6809 break;
6811 udelay(100);
6814 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6815 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
6818 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
6820 u32 event;
6821 u32 apedata;
6823 if (!tg3_flag(tp, ENABLE_APE))
6824 return;
6826 switch (kind) {
6827 case RESET_KIND_INIT:
6828 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
6829 APE_HOST_SEG_SIG_MAGIC);
6830 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
6831 APE_HOST_SEG_LEN_MAGIC);
6832 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
6833 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
6834 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
6835 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
6836 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
6837 APE_HOST_BEHAV_NO_PHYLOCK);
6838 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
6839 TG3_APE_HOST_DRVR_STATE_START);
6841 event = APE_EVENT_STATUS_STATE_START;
6842 break;
6843 case RESET_KIND_SHUTDOWN:
6844 /* With the interface we are currently using,
6845 * APE does not track driver state. Wiping
6846 * out the HOST SEGMENT SIGNATURE forces
6847 * the APE to assume OS absent status.
6849 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
6851 if (device_may_wakeup(&tp->pdev->dev) &&
6852 tg3_flag(tp, WOL_ENABLE)) {
6853 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
6854 TG3_APE_HOST_WOL_SPEED_AUTO);
6855 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
6856 } else
6857 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
6859 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
6861 event = APE_EVENT_STATUS_STATE_UNLOAD;
6862 break;
6863 case RESET_KIND_SUSPEND:
6864 event = APE_EVENT_STATUS_STATE_SUSPEND;
6865 break;
6866 default:
6867 return;
6870 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
6872 tg3_ape_send_event(tp, event);
6875 /* tp->lock is held. */
6876 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
6878 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
6879 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
6881 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
6882 switch (kind) {
6883 case RESET_KIND_INIT:
6884 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6885 DRV_STATE_START);
6886 break;
6888 case RESET_KIND_SHUTDOWN:
6889 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6890 DRV_STATE_UNLOAD);
6891 break;
6893 case RESET_KIND_SUSPEND:
6894 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6895 DRV_STATE_SUSPEND);
6896 break;
6898 default:
6899 break;
6903 if (kind == RESET_KIND_INIT ||
6904 kind == RESET_KIND_SUSPEND)
6905 tg3_ape_driver_state_change(tp, kind);
6908 /* tp->lock is held. */
6909 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
6911 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
6912 switch (kind) {
6913 case RESET_KIND_INIT:
6914 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6915 DRV_STATE_START_DONE);
6916 break;
6918 case RESET_KIND_SHUTDOWN:
6919 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6920 DRV_STATE_UNLOAD_DONE);
6921 break;
6923 default:
6924 break;
6928 if (kind == RESET_KIND_SHUTDOWN)
6929 tg3_ape_driver_state_change(tp, kind);
6932 /* tp->lock is held. */
6933 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
6935 if (tg3_flag(tp, ENABLE_ASF)) {
6936 switch (kind) {
6937 case RESET_KIND_INIT:
6938 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6939 DRV_STATE_START);
6940 break;
6942 case RESET_KIND_SHUTDOWN:
6943 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6944 DRV_STATE_UNLOAD);
6945 break;
6947 case RESET_KIND_SUSPEND:
6948 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6949 DRV_STATE_SUSPEND);
6950 break;
6952 default:
6953 break;
6958 static int tg3_poll_fw(struct tg3 *tp)
6960 int i;
6961 u32 val;
6963 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6964 /* Wait up to 20ms for init done. */
6965 for (i = 0; i < 200; i++) {
6966 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
6967 return 0;
6968 udelay(100);
6970 return -ENODEV;
6973 /* Wait for firmware initialization to complete. */
6974 for (i = 0; i < 100000; i++) {
6975 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
6976 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
6977 break;
6978 udelay(10);
6981 /* Chip might not be fitted with firmware. Some Sun onboard
6982 * parts are configured like that. So don't signal the timeout
6983 * of the above loop as an error, but do report the lack of
6984 * running firmware once.
6986 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
6987 tg3_flag_set(tp, NO_FWARE_REPORTED);
6989 netdev_info(tp->dev, "No firmware running\n");
6992 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
6993 /* The 57765 A0 needs a little more
6994 * time to do some important work.
6996 mdelay(10);
6999 return 0;
7002 /* Save PCI command register before chip reset */
7003 static void tg3_save_pci_state(struct tg3 *tp)
7005 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7008 /* Restore PCI state after chip reset */
7009 static void tg3_restore_pci_state(struct tg3 *tp)
7011 u32 val;
7013 /* Re-enable indirect register accesses. */
7014 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7015 tp->misc_host_ctrl);
7017 /* Set MAX PCI retry to zero. */
7018 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7019 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7020 tg3_flag(tp, PCIX_MODE))
7021 val |= PCISTATE_RETRY_SAME_DMA;
7022 /* Allow reads and writes to the APE register and memory space. */
7023 if (tg3_flag(tp, ENABLE_APE))
7024 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7025 PCISTATE_ALLOW_APE_SHMEM_WR |
7026 PCISTATE_ALLOW_APE_PSPACE_WR;
7027 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7029 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7031 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
7032 if (tg3_flag(tp, PCI_EXPRESS))
7033 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7034 else {
7035 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7036 tp->pci_cacheline_sz);
7037 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7038 tp->pci_lat_timer);
7042 /* Make sure PCI-X relaxed ordering bit is clear. */
7043 if (tg3_flag(tp, PCIX_MODE)) {
7044 u16 pcix_cmd;
7046 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7047 &pcix_cmd);
7048 pcix_cmd &= ~PCI_X_CMD_ERO;
7049 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7050 pcix_cmd);
7053 if (tg3_flag(tp, 5780_CLASS)) {
7055 /* Chip reset on 5780 will reset MSI enable bit,
7056 * so need to restore it.
7058 if (tg3_flag(tp, USING_MSI)) {
7059 u16 ctrl;
7061 pci_read_config_word(tp->pdev,
7062 tp->msi_cap + PCI_MSI_FLAGS,
7063 &ctrl);
7064 pci_write_config_word(tp->pdev,
7065 tp->msi_cap + PCI_MSI_FLAGS,
7066 ctrl | PCI_MSI_FLAGS_ENABLE);
7067 val = tr32(MSGINT_MODE);
7068 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7073 static void tg3_stop_fw(struct tg3 *);
7075 /* tp->lock is held. */
7076 static int tg3_chip_reset(struct tg3 *tp)
7078 u32 val;
7079 void (*write_op)(struct tg3 *, u32, u32);
7080 int i, err;
7082 tg3_nvram_lock(tp);
7084 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7086 /* No matching tg3_nvram_unlock() after this because
7087 * chip reset below will undo the nvram lock.
7089 tp->nvram_lock_cnt = 0;
7091 /* GRC_MISC_CFG core clock reset will clear the memory
7092 * enable bit in PCI register 4 and the MSI enable bit
7093 * on some chips, so we save relevant registers here.
7095 tg3_save_pci_state(tp);
7097 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7098 tg3_flag(tp, 5755_PLUS))
7099 tw32(GRC_FASTBOOT_PC, 0);
7102 * We must avoid the readl() that normally takes place.
7103 * It locks machines, causes machine checks, and other
7104 * fun things. So, temporarily disable the 5701
7105 * hardware workaround, while we do the reset.
7107 write_op = tp->write32;
7108 if (write_op == tg3_write_flush_reg32)
7109 tp->write32 = tg3_write32;
7111 /* Prevent the irq handler from reading or writing PCI registers
7112 * during chip reset when the memory enable bit in the PCI command
7113 * register may be cleared. The chip does not generate interrupt
7114 * at this time, but the irq handler may still be called due to irq
7115 * sharing or irqpoll.
7117 tg3_flag_set(tp, CHIP_RESETTING);
7118 for (i = 0; i < tp->irq_cnt; i++) {
7119 struct tg3_napi *tnapi = &tp->napi[i];
7120 if (tnapi->hw_status) {
7121 tnapi->hw_status->status = 0;
7122 tnapi->hw_status->status_tag = 0;
7124 tnapi->last_tag = 0;
7125 tnapi->last_irq_tag = 0;
7127 smp_mb();
7129 for (i = 0; i < tp->irq_cnt; i++)
7130 synchronize_irq(tp->napi[i].irq_vec);
7132 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7133 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7134 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7137 /* do the reset */
7138 val = GRC_MISC_CFG_CORECLK_RESET;
7140 if (tg3_flag(tp, PCI_EXPRESS)) {
7141 /* Force PCIe 1.0a mode */
7142 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7143 !tg3_flag(tp, 57765_PLUS) &&
7144 tr32(TG3_PCIE_PHY_TSTCTL) ==
7145 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7146 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7148 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7149 tw32(GRC_MISC_CFG, (1 << 29));
7150 val |= (1 << 29);
7154 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7155 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7156 tw32(GRC_VCPU_EXT_CTRL,
7157 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7160 /* Manage gphy power for all CPMU absent PCIe devices. */
7161 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7162 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7164 tw32(GRC_MISC_CFG, val);
7166 /* restore 5701 hardware bug workaround write method */
7167 tp->write32 = write_op;
7169 /* Unfortunately, we have to delay before the PCI read back.
7170 * Some 575X chips even will not respond to a PCI cfg access
7171 * when the reset command is given to the chip.
7173 * How do these hardware designers expect things to work
7174 * properly if the PCI write is posted for a long period
7175 * of time? It is always necessary to have some method by
7176 * which a register read back can occur to push the write
7177 * out which does the reset.
7179 * For most tg3 variants the trick below was working.
7180 * Ho hum...
7182 udelay(120);
7184 /* Flush PCI posted writes. The normal MMIO registers
7185 * are inaccessible at this time so this is the only
7186 * way to make this reliably (actually, this is no longer
7187 * the case, see above). I tried to use indirect
7188 * register read/write but this upset some 5701 variants.
7190 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7192 udelay(120);
7194 if (tg3_flag(tp, PCI_EXPRESS) && tp->pcie_cap) {
7195 u16 val16;
7197 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7198 int i;
7199 u32 cfg_val;
7201 /* Wait for link training to complete. */
7202 for (i = 0; i < 5000; i++)
7203 udelay(100);
7205 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7206 pci_write_config_dword(tp->pdev, 0xc4,
7207 cfg_val | (1 << 15));
7210 /* Clear the "no snoop" and "relaxed ordering" bits. */
7211 pci_read_config_word(tp->pdev,
7212 tp->pcie_cap + PCI_EXP_DEVCTL,
7213 &val16);
7214 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7215 PCI_EXP_DEVCTL_NOSNOOP_EN);
7217 * Older PCIe devices only support the 128 byte
7218 * MPS setting. Enforce the restriction.
7220 if (!tg3_flag(tp, CPMU_PRESENT))
7221 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7222 pci_write_config_word(tp->pdev,
7223 tp->pcie_cap + PCI_EXP_DEVCTL,
7224 val16);
7226 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7228 /* Clear error status */
7229 pci_write_config_word(tp->pdev,
7230 tp->pcie_cap + PCI_EXP_DEVSTA,
7231 PCI_EXP_DEVSTA_CED |
7232 PCI_EXP_DEVSTA_NFED |
7233 PCI_EXP_DEVSTA_FED |
7234 PCI_EXP_DEVSTA_URD);
7237 tg3_restore_pci_state(tp);
7239 tg3_flag_clear(tp, CHIP_RESETTING);
7240 tg3_flag_clear(tp, ERROR_PROCESSED);
7242 val = 0;
7243 if (tg3_flag(tp, 5780_CLASS))
7244 val = tr32(MEMARB_MODE);
7245 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7247 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7248 tg3_stop_fw(tp);
7249 tw32(0x5000, 0x400);
7252 tw32(GRC_MODE, tp->grc_mode);
7254 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7255 val = tr32(0xc4);
7257 tw32(0xc4, val | (1 << 15));
7260 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7261 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7262 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7263 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7264 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7265 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7268 if (tg3_flag(tp, ENABLE_APE))
7269 tp->mac_mode = MAC_MODE_APE_TX_EN |
7270 MAC_MODE_APE_RX_EN |
7271 MAC_MODE_TDE_ENABLE;
7273 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7274 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
7275 val = tp->mac_mode;
7276 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7277 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7278 val = tp->mac_mode;
7279 } else
7280 val = 0;
7282 tw32_f(MAC_MODE, val);
7283 udelay(40);
7285 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7287 err = tg3_poll_fw(tp);
7288 if (err)
7289 return err;
7291 tg3_mdio_start(tp);
7293 if (tg3_flag(tp, PCI_EXPRESS) &&
7294 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7295 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7296 !tg3_flag(tp, 57765_PLUS)) {
7297 val = tr32(0x7c00);
7299 tw32(0x7c00, val | (1 << 25));
7302 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7303 val = tr32(TG3_CPMU_CLCK_ORIDE);
7304 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7307 /* Reprobe ASF enable state. */
7308 tg3_flag_clear(tp, ENABLE_ASF);
7309 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
7310 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7311 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7312 u32 nic_cfg;
7314 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7315 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7316 tg3_flag_set(tp, ENABLE_ASF);
7317 tp->last_event_jiffies = jiffies;
7318 if (tg3_flag(tp, 5750_PLUS))
7319 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
7323 return 0;
7326 /* tp->lock is held. */
7327 static void tg3_stop_fw(struct tg3 *tp)
7329 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
7330 /* Wait for RX cpu to ACK the previous event. */
7331 tg3_wait_for_event_ack(tp);
7333 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
7335 tg3_generate_fw_event(tp);
7337 /* Wait for RX cpu to ACK this event. */
7338 tg3_wait_for_event_ack(tp);
7342 /* tp->lock is held. */
7343 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7345 int err;
7347 tg3_stop_fw(tp);
7349 tg3_write_sig_pre_reset(tp, kind);
7351 tg3_abort_hw(tp, silent);
7352 err = tg3_chip_reset(tp);
7354 __tg3_set_mac_addr(tp, 0);
7356 tg3_write_sig_legacy(tp, kind);
7357 tg3_write_sig_post_reset(tp, kind);
7359 if (err)
7360 return err;
7362 return 0;
7365 #define RX_CPU_SCRATCH_BASE 0x30000
7366 #define RX_CPU_SCRATCH_SIZE 0x04000
7367 #define TX_CPU_SCRATCH_BASE 0x34000
7368 #define TX_CPU_SCRATCH_SIZE 0x04000
7370 /* tp->lock is held. */
7371 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
7373 int i;
7375 BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
7377 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7378 u32 val = tr32(GRC_VCPU_EXT_CTRL);
7380 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
7381 return 0;
7383 if (offset == RX_CPU_BASE) {
7384 for (i = 0; i < 10000; i++) {
7385 tw32(offset + CPU_STATE, 0xffffffff);
7386 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7387 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7388 break;
7391 tw32(offset + CPU_STATE, 0xffffffff);
7392 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
7393 udelay(10);
7394 } else {
7395 for (i = 0; i < 10000; i++) {
7396 tw32(offset + CPU_STATE, 0xffffffff);
7397 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7398 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7399 break;
7403 if (i >= 10000) {
7404 netdev_err(tp->dev, "%s timed out, %s CPU\n",
7405 __func__, offset == RX_CPU_BASE ? "RX" : "TX");
7406 return -ENODEV;
7409 /* Clear firmware's nvram arbitration. */
7410 if (tg3_flag(tp, NVRAM))
7411 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
7412 return 0;
7415 struct fw_info {
7416 unsigned int fw_base;
7417 unsigned int fw_len;
7418 const __be32 *fw_data;
7421 /* tp->lock is held. */
7422 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
7423 int cpu_scratch_size, struct fw_info *info)
7425 int err, lock_err, i;
7426 void (*write_op)(struct tg3 *, u32, u32);
7428 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
7429 netdev_err(tp->dev,
7430 "%s: Trying to load TX cpu firmware which is 5705\n",
7431 __func__);
7432 return -EINVAL;
7435 if (tg3_flag(tp, 5705_PLUS))
7436 write_op = tg3_write_mem;
7437 else
7438 write_op = tg3_write_indirect_reg32;
7440 /* It is possible that bootcode is still loading at this point.
7441 * Get the nvram lock first before halting the cpu.
7443 lock_err = tg3_nvram_lock(tp);
7444 err = tg3_halt_cpu(tp, cpu_base);
7445 if (!lock_err)
7446 tg3_nvram_unlock(tp);
7447 if (err)
7448 goto out;
7450 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
7451 write_op(tp, cpu_scratch_base + i, 0);
7452 tw32(cpu_base + CPU_STATE, 0xffffffff);
7453 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
7454 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
7455 write_op(tp, (cpu_scratch_base +
7456 (info->fw_base & 0xffff) +
7457 (i * sizeof(u32))),
7458 be32_to_cpu(info->fw_data[i]));
7460 err = 0;
7462 out:
7463 return err;
7466 /* tp->lock is held. */
7467 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
7469 struct fw_info info;
7470 const __be32 *fw_data;
7471 int err, i;
7473 fw_data = (void *)tp->fw->data;
7475 /* Firmware blob starts with version numbers, followed by
7476 start address and length. We are setting complete length.
7477 length = end_address_of_bss - start_address_of_text.
7478 Remainder is the blob to be loaded contiguously
7479 from start address. */
7481 info.fw_base = be32_to_cpu(fw_data[1]);
7482 info.fw_len = tp->fw->size - 12;
7483 info.fw_data = &fw_data[3];
7485 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
7486 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
7487 &info);
7488 if (err)
7489 return err;
7491 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
7492 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
7493 &info);
7494 if (err)
7495 return err;
7497 /* Now startup only the RX cpu. */
7498 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7499 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7501 for (i = 0; i < 5; i++) {
7502 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
7503 break;
7504 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7505 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
7506 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7507 udelay(1000);
7509 if (i >= 5) {
7510 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
7511 "should be %08x\n", __func__,
7512 tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
7513 return -ENODEV;
7515 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7516 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
7518 return 0;
7521 /* tp->lock is held. */
7522 static int tg3_load_tso_firmware(struct tg3 *tp)
7524 struct fw_info info;
7525 const __be32 *fw_data;
7526 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
7527 int err, i;
7529 if (tg3_flag(tp, HW_TSO_1) ||
7530 tg3_flag(tp, HW_TSO_2) ||
7531 tg3_flag(tp, HW_TSO_3))
7532 return 0;
7534 fw_data = (void *)tp->fw->data;
7536 /* Firmware blob starts with version numbers, followed by
7537 start address and length. We are setting complete length.
7538 length = end_address_of_bss - start_address_of_text.
7539 Remainder is the blob to be loaded contiguously
7540 from start address. */
7542 info.fw_base = be32_to_cpu(fw_data[1]);
7543 cpu_scratch_size = tp->fw_len;
7544 info.fw_len = tp->fw->size - 12;
7545 info.fw_data = &fw_data[3];
7547 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7548 cpu_base = RX_CPU_BASE;
7549 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
7550 } else {
7551 cpu_base = TX_CPU_BASE;
7552 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
7553 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
7556 err = tg3_load_firmware_cpu(tp, cpu_base,
7557 cpu_scratch_base, cpu_scratch_size,
7558 &info);
7559 if (err)
7560 return err;
7562 /* Now startup the cpu. */
7563 tw32(cpu_base + CPU_STATE, 0xffffffff);
7564 tw32_f(cpu_base + CPU_PC, info.fw_base);
7566 for (i = 0; i < 5; i++) {
7567 if (tr32(cpu_base + CPU_PC) == info.fw_base)
7568 break;
7569 tw32(cpu_base + CPU_STATE, 0xffffffff);
7570 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
7571 tw32_f(cpu_base + CPU_PC, info.fw_base);
7572 udelay(1000);
7574 if (i >= 5) {
7575 netdev_err(tp->dev,
7576 "%s fails to set CPU PC, is %08x should be %08x\n",
7577 __func__, tr32(cpu_base + CPU_PC), info.fw_base);
7578 return -ENODEV;
7580 tw32(cpu_base + CPU_STATE, 0xffffffff);
7581 tw32_f(cpu_base + CPU_MODE, 0x00000000);
7582 return 0;
7586 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7588 struct tg3 *tp = netdev_priv(dev);
7589 struct sockaddr *addr = p;
7590 int err = 0, skip_mac_1 = 0;
7592 if (!is_valid_ether_addr(addr->sa_data))
7593 return -EINVAL;
7595 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7597 if (!netif_running(dev))
7598 return 0;
7600 if (tg3_flag(tp, ENABLE_ASF)) {
7601 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7603 addr0_high = tr32(MAC_ADDR_0_HIGH);
7604 addr0_low = tr32(MAC_ADDR_0_LOW);
7605 addr1_high = tr32(MAC_ADDR_1_HIGH);
7606 addr1_low = tr32(MAC_ADDR_1_LOW);
7608 /* Skip MAC addr 1 if ASF is using it. */
7609 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7610 !(addr1_high == 0 && addr1_low == 0))
7611 skip_mac_1 = 1;
7613 spin_lock_bh(&tp->lock);
7614 __tg3_set_mac_addr(tp, skip_mac_1);
7615 spin_unlock_bh(&tp->lock);
7617 return err;
7620 /* tp->lock is held. */
7621 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7622 dma_addr_t mapping, u32 maxlen_flags,
7623 u32 nic_addr)
7625 tg3_write_mem(tp,
7626 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7627 ((u64) mapping >> 32));
7628 tg3_write_mem(tp,
7629 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7630 ((u64) mapping & 0xffffffff));
7631 tg3_write_mem(tp,
7632 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7633 maxlen_flags);
7635 if (!tg3_flag(tp, 5705_PLUS))
7636 tg3_write_mem(tp,
7637 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7638 nic_addr);
7641 static void __tg3_set_rx_mode(struct net_device *);
7642 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7644 int i;
7646 if (!tg3_flag(tp, ENABLE_TSS)) {
7647 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7648 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7649 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7650 } else {
7651 tw32(HOSTCC_TXCOL_TICKS, 0);
7652 tw32(HOSTCC_TXMAX_FRAMES, 0);
7653 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
7656 if (!tg3_flag(tp, ENABLE_RSS)) {
7657 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7658 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7659 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7660 } else {
7661 tw32(HOSTCC_RXCOL_TICKS, 0);
7662 tw32(HOSTCC_RXMAX_FRAMES, 0);
7663 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
7666 if (!tg3_flag(tp, 5705_PLUS)) {
7667 u32 val = ec->stats_block_coalesce_usecs;
7669 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7670 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7672 if (!netif_carrier_ok(tp->dev))
7673 val = 0;
7675 tw32(HOSTCC_STAT_COAL_TICKS, val);
7678 for (i = 0; i < tp->irq_cnt - 1; i++) {
7679 u32 reg;
7681 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
7682 tw32(reg, ec->rx_coalesce_usecs);
7683 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
7684 tw32(reg, ec->rx_max_coalesced_frames);
7685 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
7686 tw32(reg, ec->rx_max_coalesced_frames_irq);
7688 if (tg3_flag(tp, ENABLE_TSS)) {
7689 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
7690 tw32(reg, ec->tx_coalesce_usecs);
7691 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
7692 tw32(reg, ec->tx_max_coalesced_frames);
7693 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
7694 tw32(reg, ec->tx_max_coalesced_frames_irq);
7698 for (; i < tp->irq_max - 1; i++) {
7699 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
7700 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
7701 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7703 if (tg3_flag(tp, ENABLE_TSS)) {
7704 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
7705 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
7706 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7711 /* tp->lock is held. */
7712 static void tg3_rings_reset(struct tg3 *tp)
7714 int i;
7715 u32 stblk, txrcb, rxrcb, limit;
7716 struct tg3_napi *tnapi = &tp->napi[0];
7718 /* Disable all transmit rings but the first. */
7719 if (!tg3_flag(tp, 5705_PLUS))
7720 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
7721 else if (tg3_flag(tp, 5717_PLUS))
7722 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
7723 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7724 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
7725 else
7726 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7728 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7729 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
7730 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
7731 BDINFO_FLAGS_DISABLED);
7734 /* Disable all receive return rings but the first. */
7735 if (tg3_flag(tp, 5717_PLUS))
7736 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
7737 else if (!tg3_flag(tp, 5705_PLUS))
7738 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
7739 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7740 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7741 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
7742 else
7743 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7745 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7746 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
7747 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
7748 BDINFO_FLAGS_DISABLED);
7750 /* Disable interrupts */
7751 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
7753 /* Zero mailbox registers. */
7754 if (tg3_flag(tp, SUPPORT_MSIX)) {
7755 for (i = 1; i < tp->irq_max; i++) {
7756 tp->napi[i].tx_prod = 0;
7757 tp->napi[i].tx_cons = 0;
7758 if (tg3_flag(tp, ENABLE_TSS))
7759 tw32_mailbox(tp->napi[i].prodmbox, 0);
7760 tw32_rx_mbox(tp->napi[i].consmbox, 0);
7761 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
7763 if (!tg3_flag(tp, ENABLE_TSS))
7764 tw32_mailbox(tp->napi[0].prodmbox, 0);
7765 } else {
7766 tp->napi[0].tx_prod = 0;
7767 tp->napi[0].tx_cons = 0;
7768 tw32_mailbox(tp->napi[0].prodmbox, 0);
7769 tw32_rx_mbox(tp->napi[0].consmbox, 0);
7772 /* Make sure the NIC-based send BD rings are disabled. */
7773 if (!tg3_flag(tp, 5705_PLUS)) {
7774 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
7775 for (i = 0; i < 16; i++)
7776 tw32_tx_mbox(mbox + i * 8, 0);
7779 txrcb = NIC_SRAM_SEND_RCB;
7780 rxrcb = NIC_SRAM_RCV_RET_RCB;
7782 /* Clear status block in ram. */
7783 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7785 /* Set status block DMA address */
7786 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7787 ((u64) tnapi->status_mapping >> 32));
7788 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7789 ((u64) tnapi->status_mapping & 0xffffffff));
7791 if (tnapi->tx_ring) {
7792 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7793 (TG3_TX_RING_SIZE <<
7794 BDINFO_FLAGS_MAXLEN_SHIFT),
7795 NIC_SRAM_TX_BUFFER_DESC);
7796 txrcb += TG3_BDINFO_SIZE;
7799 if (tnapi->rx_rcb) {
7800 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7801 (tp->rx_ret_ring_mask + 1) <<
7802 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
7803 rxrcb += TG3_BDINFO_SIZE;
7806 stblk = HOSTCC_STATBLCK_RING1;
7808 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
7809 u64 mapping = (u64)tnapi->status_mapping;
7810 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
7811 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
7813 /* Clear status block in ram. */
7814 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7816 if (tnapi->tx_ring) {
7817 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7818 (TG3_TX_RING_SIZE <<
7819 BDINFO_FLAGS_MAXLEN_SHIFT),
7820 NIC_SRAM_TX_BUFFER_DESC);
7821 txrcb += TG3_BDINFO_SIZE;
7824 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7825 ((tp->rx_ret_ring_mask + 1) <<
7826 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
7828 stblk += 8;
7829 rxrcb += TG3_BDINFO_SIZE;
7833 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
7835 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
7837 if (!tg3_flag(tp, 5750_PLUS) ||
7838 tg3_flag(tp, 5780_CLASS) ||
7839 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
7840 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7841 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
7842 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7843 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
7844 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
7845 else
7846 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
7848 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
7849 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
7851 val = min(nic_rep_thresh, host_rep_thresh);
7852 tw32(RCVBDI_STD_THRESH, val);
7854 if (tg3_flag(tp, 57765_PLUS))
7855 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
7857 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7858 return;
7860 if (!tg3_flag(tp, 5705_PLUS))
7861 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
7862 else
7863 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717;
7865 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
7867 val = min(bdcache_maxcnt / 2, host_rep_thresh);
7868 tw32(RCVBDI_JUMBO_THRESH, val);
7870 if (tg3_flag(tp, 57765_PLUS))
7871 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
7874 /* tp->lock is held. */
7875 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7877 u32 val, rdmac_mode;
7878 int i, err, limit;
7879 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
7881 tg3_disable_ints(tp);
7883 tg3_stop_fw(tp);
7885 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
7887 if (tg3_flag(tp, INIT_COMPLETE))
7888 tg3_abort_hw(tp, 1);
7890 /* Enable MAC control of LPI */
7891 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
7892 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
7893 TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
7894 TG3_CPMU_EEE_LNKIDL_UART_IDL);
7896 tw32_f(TG3_CPMU_EEE_CTRL,
7897 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
7899 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
7900 TG3_CPMU_EEEMD_LPI_IN_TX |
7901 TG3_CPMU_EEEMD_LPI_IN_RX |
7902 TG3_CPMU_EEEMD_EEE_ENABLE;
7904 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
7905 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
7907 if (tg3_flag(tp, ENABLE_APE))
7908 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
7910 tw32_f(TG3_CPMU_EEE_MODE, val);
7912 tw32_f(TG3_CPMU_EEE_DBTMR1,
7913 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
7914 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
7916 tw32_f(TG3_CPMU_EEE_DBTMR2,
7917 TG3_CPMU_DBTMR2_APE_TX_2047US |
7918 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
7921 if (reset_phy)
7922 tg3_phy_reset(tp);
7924 err = tg3_chip_reset(tp);
7925 if (err)
7926 return err;
7928 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
7930 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
7931 val = tr32(TG3_CPMU_CTRL);
7932 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
7933 tw32(TG3_CPMU_CTRL, val);
7935 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7936 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7937 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7938 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7940 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
7941 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
7942 val |= CPMU_LNK_AWARE_MACCLK_6_25;
7943 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
7945 val = tr32(TG3_CPMU_HST_ACC);
7946 val &= ~CPMU_HST_ACC_MACCLK_MASK;
7947 val |= CPMU_HST_ACC_MACCLK_6_25;
7948 tw32(TG3_CPMU_HST_ACC, val);
7951 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7952 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
7953 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
7954 PCIE_PWR_MGMT_L1_THRESH_4MS;
7955 tw32(PCIE_PWR_MGMT_THRESH, val);
7957 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
7958 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
7960 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
7962 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7963 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7966 if (tg3_flag(tp, L1PLLPD_EN)) {
7967 u32 grc_mode = tr32(GRC_MODE);
7969 /* Access the lower 1K of PL PCIE block registers. */
7970 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
7971 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
7973 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
7974 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
7975 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
7977 tw32(GRC_MODE, grc_mode);
7980 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
7981 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
7982 u32 grc_mode = tr32(GRC_MODE);
7984 /* Access the lower 1K of PL PCIE block registers. */
7985 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
7986 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
7988 val = tr32(TG3_PCIE_TLDLPL_PORT +
7989 TG3_PCIE_PL_LO_PHYCTL5);
7990 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
7991 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
7993 tw32(GRC_MODE, grc_mode);
7996 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
7997 u32 grc_mode = tr32(GRC_MODE);
7999 /* Access the lower 1K of DL PCIE block registers. */
8000 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8001 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8003 val = tr32(TG3_PCIE_TLDLPL_PORT +
8004 TG3_PCIE_DL_LO_FTSMAX);
8005 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8006 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8007 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8009 tw32(GRC_MODE, grc_mode);
8012 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8013 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8014 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8015 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8018 /* This works around an issue with Athlon chipsets on
8019 * B3 tigon3 silicon. This bit has no effect on any
8020 * other revision. But do not set this on PCI Express
8021 * chips and don't even touch the clocks if the CPMU is present.
8023 if (!tg3_flag(tp, CPMU_PRESENT)) {
8024 if (!tg3_flag(tp, PCI_EXPRESS))
8025 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8026 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8029 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8030 tg3_flag(tp, PCIX_MODE)) {
8031 val = tr32(TG3PCI_PCISTATE);
8032 val |= PCISTATE_RETRY_SAME_DMA;
8033 tw32(TG3PCI_PCISTATE, val);
8036 if (tg3_flag(tp, ENABLE_APE)) {
8037 /* Allow reads and writes to the
8038 * APE register and memory space.
8040 val = tr32(TG3PCI_PCISTATE);
8041 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8042 PCISTATE_ALLOW_APE_SHMEM_WR |
8043 PCISTATE_ALLOW_APE_PSPACE_WR;
8044 tw32(TG3PCI_PCISTATE, val);
8047 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8048 /* Enable some hw fixes. */
8049 val = tr32(TG3PCI_MSI_DATA);
8050 val |= (1 << 26) | (1 << 28) | (1 << 29);
8051 tw32(TG3PCI_MSI_DATA, val);
8054 /* Descriptor ring init may make accesses to the
8055 * NIC SRAM area to setup the TX descriptors, so we
8056 * can only do this after the hardware has been
8057 * successfully reset.
8059 err = tg3_init_rings(tp);
8060 if (err)
8061 return err;
8063 if (tg3_flag(tp, 57765_PLUS)) {
8064 val = tr32(TG3PCI_DMA_RW_CTRL) &
8065 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8066 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8067 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8068 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
8069 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8070 val |= DMA_RWCTRL_TAGGED_STAT_WA;
8071 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8072 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8073 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8074 /* This value is determined during the probe time DMA
8075 * engine test, tg3_test_dma.
8077 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8080 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8081 GRC_MODE_4X_NIC_SEND_RINGS |
8082 GRC_MODE_NO_TX_PHDR_CSUM |
8083 GRC_MODE_NO_RX_PHDR_CSUM);
8084 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8086 /* Pseudo-header checksum is done by hardware logic and not
8087 * the offload processers, so make the chip do the pseudo-
8088 * header checksums on receive. For transmit it is more
8089 * convenient to do the pseudo-header checksum in software
8090 * as Linux does that on transmit for us in all cases.
8092 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8094 tw32(GRC_MODE,
8095 tp->grc_mode |
8096 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8098 /* Setup the timer prescalar register. Clock is always 66Mhz. */
8099 val = tr32(GRC_MISC_CFG);
8100 val &= ~0xff;
8101 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8102 tw32(GRC_MISC_CFG, val);
8104 /* Initialize MBUF/DESC pool. */
8105 if (tg3_flag(tp, 5750_PLUS)) {
8106 /* Do nothing. */
8107 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8108 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8109 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8110 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8111 else
8112 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8113 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8114 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8115 } else if (tg3_flag(tp, TSO_CAPABLE)) {
8116 int fw_len;
8118 fw_len = tp->fw_len;
8119 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8120 tw32(BUFMGR_MB_POOL_ADDR,
8121 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8122 tw32(BUFMGR_MB_POOL_SIZE,
8123 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8126 if (tp->dev->mtu <= ETH_DATA_LEN) {
8127 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8128 tp->bufmgr_config.mbuf_read_dma_low_water);
8129 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8130 tp->bufmgr_config.mbuf_mac_rx_low_water);
8131 tw32(BUFMGR_MB_HIGH_WATER,
8132 tp->bufmgr_config.mbuf_high_water);
8133 } else {
8134 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8135 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8136 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8137 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8138 tw32(BUFMGR_MB_HIGH_WATER,
8139 tp->bufmgr_config.mbuf_high_water_jumbo);
8141 tw32(BUFMGR_DMA_LOW_WATER,
8142 tp->bufmgr_config.dma_low_water);
8143 tw32(BUFMGR_DMA_HIGH_WATER,
8144 tp->bufmgr_config.dma_high_water);
8146 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8147 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8148 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8149 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8150 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8151 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8152 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8153 tw32(BUFMGR_MODE, val);
8154 for (i = 0; i < 2000; i++) {
8155 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8156 break;
8157 udelay(10);
8159 if (i >= 2000) {
8160 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8161 return -ENODEV;
8164 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8165 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8167 tg3_setup_rxbd_thresholds(tp);
8169 /* Initialize TG3_BDINFO's at:
8170 * RCVDBDI_STD_BD: standard eth size rx ring
8171 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
8172 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
8174 * like so:
8175 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
8176 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
8177 * ring attribute flags
8178 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
8180 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8181 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8183 * The size of each ring is fixed in the firmware, but the location is
8184 * configurable.
8186 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8187 ((u64) tpr->rx_std_mapping >> 32));
8188 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8189 ((u64) tpr->rx_std_mapping & 0xffffffff));
8190 if (!tg3_flag(tp, 5717_PLUS))
8191 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8192 NIC_SRAM_RX_BUFFER_DESC);
8194 /* Disable the mini ring */
8195 if (!tg3_flag(tp, 5705_PLUS))
8196 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8197 BDINFO_FLAGS_DISABLED);
8199 /* Program the jumbo buffer descriptor ring control
8200 * blocks on those devices that have them.
8202 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8203 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8205 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8206 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8207 ((u64) tpr->rx_jmb_mapping >> 32));
8208 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8209 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8210 val = TG3_RX_JMB_RING_SIZE(tp) <<
8211 BDINFO_FLAGS_MAXLEN_SHIFT;
8212 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8213 val | BDINFO_FLAGS_USE_EXT_RECV);
8214 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8215 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8216 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8217 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8218 } else {
8219 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8220 BDINFO_FLAGS_DISABLED);
8223 if (tg3_flag(tp, 57765_PLUS)) {
8224 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8225 val = TG3_RX_STD_MAX_SIZE_5700;
8226 else
8227 val = TG3_RX_STD_MAX_SIZE_5717;
8228 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8229 val |= (TG3_RX_STD_DMA_SZ << 2);
8230 } else
8231 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8232 } else
8233 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8235 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8237 tpr->rx_std_prod_idx = tp->rx_pending;
8238 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8240 tpr->rx_jmb_prod_idx =
8241 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8242 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8244 tg3_rings_reset(tp);
8246 /* Initialize MAC address and backoff seed. */
8247 __tg3_set_mac_addr(tp, 0);
8249 /* MTU + ethernet header + FCS + optional VLAN tag */
8250 tw32(MAC_RX_MTU_SIZE,
8251 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8253 /* The slot time is changed by tg3_setup_phy if we
8254 * run at gigabit with half duplex.
8256 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8257 (6 << TX_LENGTHS_IPG_SHIFT) |
8258 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8260 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8261 val |= tr32(MAC_TX_LENGTHS) &
8262 (TX_LENGTHS_JMB_FRM_LEN_MSK |
8263 TX_LENGTHS_CNT_DWN_VAL_MSK);
8265 tw32(MAC_TX_LENGTHS, val);
8267 /* Receive rules. */
8268 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8269 tw32(RCVLPC_CONFIG, 0x0181);
8271 /* Calculate RDMAC_MODE setting early, we need it to determine
8272 * the RCVLPC_STATE_ENABLE mask.
8274 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8275 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8276 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8277 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8278 RDMAC_MODE_LNGREAD_ENAB);
8280 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8281 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8283 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8284 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8285 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8286 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8287 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8288 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8290 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8291 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8292 if (tg3_flag(tp, TSO_CAPABLE) &&
8293 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8294 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8295 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8296 !tg3_flag(tp, IS_5788)) {
8297 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8301 if (tg3_flag(tp, PCI_EXPRESS))
8302 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8304 if (tg3_flag(tp, HW_TSO_1) ||
8305 tg3_flag(tp, HW_TSO_2) ||
8306 tg3_flag(tp, HW_TSO_3))
8307 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8309 if (tg3_flag(tp, 57765_PLUS) ||
8310 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8311 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8312 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8314 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8315 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8317 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8318 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8319 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8320 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8321 tg3_flag(tp, 57765_PLUS)) {
8322 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8323 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8324 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8325 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8326 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8327 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8328 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8329 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8330 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8332 tw32(TG3_RDMA_RSRVCTRL_REG,
8333 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8336 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8337 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8338 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8339 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8340 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8341 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8344 /* Receive/send statistics. */
8345 if (tg3_flag(tp, 5750_PLUS)) {
8346 val = tr32(RCVLPC_STATS_ENABLE);
8347 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8348 tw32(RCVLPC_STATS_ENABLE, val);
8349 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8350 tg3_flag(tp, TSO_CAPABLE)) {
8351 val = tr32(RCVLPC_STATS_ENABLE);
8352 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8353 tw32(RCVLPC_STATS_ENABLE, val);
8354 } else {
8355 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8357 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8358 tw32(SNDDATAI_STATSENAB, 0xffffff);
8359 tw32(SNDDATAI_STATSCTRL,
8360 (SNDDATAI_SCTRL_ENABLE |
8361 SNDDATAI_SCTRL_FASTUPD));
8363 /* Setup host coalescing engine. */
8364 tw32(HOSTCC_MODE, 0);
8365 for (i = 0; i < 2000; i++) {
8366 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8367 break;
8368 udelay(10);
8371 __tg3_set_coalesce(tp, &tp->coal);
8373 if (!tg3_flag(tp, 5705_PLUS)) {
8374 /* Status/statistics block address. See tg3_timer,
8375 * the tg3_periodic_fetch_stats call there, and
8376 * tg3_get_stats to see how this works for 5705/5750 chips.
8378 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8379 ((u64) tp->stats_mapping >> 32));
8380 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8381 ((u64) tp->stats_mapping & 0xffffffff));
8382 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8384 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8386 /* Clear statistics and status block memory areas */
8387 for (i = NIC_SRAM_STATS_BLK;
8388 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8389 i += sizeof(u32)) {
8390 tg3_write_mem(tp, i, 0);
8391 udelay(40);
8395 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8397 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8398 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8399 if (!tg3_flag(tp, 5705_PLUS))
8400 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8402 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8403 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8404 /* reset to prevent losing 1st rx packet intermittently */
8405 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8406 udelay(10);
8409 if (tg3_flag(tp, ENABLE_APE))
8410 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8411 else
8412 tp->mac_mode = 0;
8413 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8414 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
8415 if (!tg3_flag(tp, 5705_PLUS) &&
8416 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8417 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8418 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8419 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8420 udelay(40);
8422 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8423 * If TG3_FLAG_IS_NIC is zero, we should read the
8424 * register to preserve the GPIO settings for LOMs. The GPIOs,
8425 * whether used as inputs or outputs, are set by boot code after
8426 * reset.
8428 if (!tg3_flag(tp, IS_NIC)) {
8429 u32 gpio_mask;
8431 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8432 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8433 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8435 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8436 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8437 GRC_LCLCTRL_GPIO_OUTPUT3;
8439 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8440 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8442 tp->grc_local_ctrl &= ~gpio_mask;
8443 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8445 /* GPIO1 must be driven high for eeprom write protect */
8446 if (tg3_flag(tp, EEPROM_WRITE_PROT))
8447 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8448 GRC_LCLCTRL_GPIO_OUTPUT1);
8450 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8451 udelay(100);
8453 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) {
8454 val = tr32(MSGINT_MODE);
8455 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8456 tw32(MSGINT_MODE, val);
8459 if (!tg3_flag(tp, 5705_PLUS)) {
8460 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8461 udelay(40);
8464 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8465 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8466 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8467 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8468 WDMAC_MODE_LNGREAD_ENAB);
8470 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8471 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8472 if (tg3_flag(tp, TSO_CAPABLE) &&
8473 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8474 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8475 /* nothing */
8476 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8477 !tg3_flag(tp, IS_5788)) {
8478 val |= WDMAC_MODE_RX_ACCEL;
8482 /* Enable host coalescing bug fix */
8483 if (tg3_flag(tp, 5755_PLUS))
8484 val |= WDMAC_MODE_STATUS_TAG_FIX;
8486 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8487 val |= WDMAC_MODE_BURST_ALL_DATA;
8489 tw32_f(WDMAC_MODE, val);
8490 udelay(40);
8492 if (tg3_flag(tp, PCIX_MODE)) {
8493 u16 pcix_cmd;
8495 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8496 &pcix_cmd);
8497 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8498 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8499 pcix_cmd |= PCI_X_CMD_READ_2K;
8500 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8501 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8502 pcix_cmd |= PCI_X_CMD_READ_2K;
8504 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8505 pcix_cmd);
8508 tw32_f(RDMAC_MODE, rdmac_mode);
8509 udelay(40);
8511 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8512 if (!tg3_flag(tp, 5705_PLUS))
8513 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8515 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8516 tw32(SNDDATAC_MODE,
8517 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8518 else
8519 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8521 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8522 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8523 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
8524 if (tg3_flag(tp, LRG_PROD_RING_CAP))
8525 val |= RCVDBDI_MODE_LRG_RING_SZ;
8526 tw32(RCVDBDI_MODE, val);
8527 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8528 if (tg3_flag(tp, HW_TSO_1) ||
8529 tg3_flag(tp, HW_TSO_2) ||
8530 tg3_flag(tp, HW_TSO_3))
8531 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8532 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8533 if (tg3_flag(tp, ENABLE_TSS))
8534 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8535 tw32(SNDBDI_MODE, val);
8536 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8538 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8539 err = tg3_load_5701_a0_firmware_fix(tp);
8540 if (err)
8541 return err;
8544 if (tg3_flag(tp, TSO_CAPABLE)) {
8545 err = tg3_load_tso_firmware(tp);
8546 if (err)
8547 return err;
8550 tp->tx_mode = TX_MODE_ENABLE;
8552 if (tg3_flag(tp, 5755_PLUS) ||
8553 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8554 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8556 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8557 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
8558 tp->tx_mode &= ~val;
8559 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
8562 tw32_f(MAC_TX_MODE, tp->tx_mode);
8563 udelay(100);
8565 if (tg3_flag(tp, ENABLE_RSS)) {
8566 u32 reg = MAC_RSS_INDIR_TBL_0;
8567 u8 *ent = (u8 *)&val;
8569 /* Setup the indirection table */
8570 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8571 int idx = i % sizeof(val);
8573 ent[idx] = i % (tp->irq_cnt - 1);
8574 if (idx == sizeof(val) - 1) {
8575 tw32(reg, val);
8576 reg += 4;
8580 /* Setup the "secret" hash key. */
8581 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8582 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8583 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8584 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8585 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8586 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8587 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8588 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8589 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8590 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8593 tp->rx_mode = RX_MODE_ENABLE;
8594 if (tg3_flag(tp, 5755_PLUS))
8595 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8597 if (tg3_flag(tp, ENABLE_RSS))
8598 tp->rx_mode |= RX_MODE_RSS_ENABLE |
8599 RX_MODE_RSS_ITBL_HASH_BITS_7 |
8600 RX_MODE_RSS_IPV6_HASH_EN |
8601 RX_MODE_RSS_TCP_IPV6_HASH_EN |
8602 RX_MODE_RSS_IPV4_HASH_EN |
8603 RX_MODE_RSS_TCP_IPV4_HASH_EN;
8605 tw32_f(MAC_RX_MODE, tp->rx_mode);
8606 udelay(10);
8608 tw32(MAC_LED_CTRL, tp->led_ctrl);
8610 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8611 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8612 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8613 udelay(10);
8615 tw32_f(MAC_RX_MODE, tp->rx_mode);
8616 udelay(10);
8618 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8619 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
8620 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
8621 /* Set drive transmission level to 1.2V */
8622 /* only if the signal pre-emphasis bit is not set */
8623 val = tr32(MAC_SERDES_CFG);
8624 val &= 0xfffff000;
8625 val |= 0x880;
8626 tw32(MAC_SERDES_CFG, val);
8628 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
8629 tw32(MAC_SERDES_CFG, 0x616000);
8632 /* Prevent chip from dropping frames when flow control
8633 * is enabled.
8635 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8636 val = 1;
8637 else
8638 val = 2;
8639 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
8641 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8642 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
8643 /* Use hardware link auto-negotiation */
8644 tg3_flag_set(tp, HW_AUTONEG);
8647 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8648 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
8649 u32 tmp;
8651 tmp = tr32(SERDES_RX_CTRL);
8652 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
8653 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
8654 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
8655 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8658 if (!tg3_flag(tp, USE_PHYLIB)) {
8659 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
8660 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
8661 tp->link_config.speed = tp->link_config.orig_speed;
8662 tp->link_config.duplex = tp->link_config.orig_duplex;
8663 tp->link_config.autoneg = tp->link_config.orig_autoneg;
8666 err = tg3_setup_phy(tp, 0);
8667 if (err)
8668 return err;
8670 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8671 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8672 u32 tmp;
8674 /* Clear CRC stats. */
8675 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
8676 tg3_writephy(tp, MII_TG3_TEST1,
8677 tmp | MII_TG3_TEST1_CRC_EN);
8678 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
8683 __tg3_set_rx_mode(tp->dev);
8685 /* Initialize receive rules. */
8686 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
8687 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
8688 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
8689 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
8691 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
8692 limit = 8;
8693 else
8694 limit = 16;
8695 if (tg3_flag(tp, ENABLE_ASF))
8696 limit -= 4;
8697 switch (limit) {
8698 case 16:
8699 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
8700 case 15:
8701 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
8702 case 14:
8703 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
8704 case 13:
8705 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
8706 case 12:
8707 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
8708 case 11:
8709 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
8710 case 10:
8711 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
8712 case 9:
8713 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
8714 case 8:
8715 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
8716 case 7:
8717 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
8718 case 6:
8719 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
8720 case 5:
8721 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
8722 case 4:
8723 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
8724 case 3:
8725 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
8726 case 2:
8727 case 1:
8729 default:
8730 break;
8733 if (tg3_flag(tp, ENABLE_APE))
8734 /* Write our heartbeat update interval to APE. */
8735 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
8736 APE_HOST_HEARTBEAT_INT_DISABLE);
8738 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
8740 return 0;
8743 /* Called at device open time to get the chip ready for
8744 * packet processing. Invoked with tp->lock held.
8746 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
8748 tg3_switch_clocks(tp);
8750 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
8752 return tg3_reset_hw(tp, reset_phy);
8755 #define TG3_STAT_ADD32(PSTAT, REG) \
8756 do { u32 __val = tr32(REG); \
8757 (PSTAT)->low += __val; \
8758 if ((PSTAT)->low < __val) \
8759 (PSTAT)->high += 1; \
8760 } while (0)
8762 static void tg3_periodic_fetch_stats(struct tg3 *tp)
8764 struct tg3_hw_stats *sp = tp->hw_stats;
8766 if (!netif_carrier_ok(tp->dev))
8767 return;
8769 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
8770 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
8771 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
8772 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
8773 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
8774 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
8775 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
8776 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
8777 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
8778 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
8779 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
8780 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
8781 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
8783 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
8784 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
8785 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
8786 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
8787 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
8788 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
8789 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
8790 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
8791 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
8792 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
8793 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
8794 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
8795 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
8796 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
8798 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
8799 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) {
8800 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
8801 } else {
8802 u32 val = tr32(HOSTCC_FLOW_ATTN);
8803 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
8804 if (val) {
8805 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
8806 sp->rx_discards.low += val;
8807 if (sp->rx_discards.low < val)
8808 sp->rx_discards.high += 1;
8810 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
8812 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
8815 static void tg3_timer(unsigned long __opaque)
8817 struct tg3 *tp = (struct tg3 *) __opaque;
8819 if (tp->irq_sync)
8820 goto restart_timer;
8822 spin_lock(&tp->lock);
8824 if (!tg3_flag(tp, TAGGED_STATUS)) {
8825 /* All of this garbage is because when using non-tagged
8826 * IRQ status the mailbox/status_block protocol the chip
8827 * uses with the cpu is race prone.
8829 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
8830 tw32(GRC_LOCAL_CTRL,
8831 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
8832 } else {
8833 tw32(HOSTCC_MODE, tp->coalesce_mode |
8834 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
8837 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
8838 tg3_flag_set(tp, RESTART_TIMER);
8839 spin_unlock(&tp->lock);
8840 schedule_work(&tp->reset_task);
8841 return;
8845 /* This part only runs once per second. */
8846 if (!--tp->timer_counter) {
8847 if (tg3_flag(tp, 5705_PLUS))
8848 tg3_periodic_fetch_stats(tp);
8850 if (tp->setlpicnt && !--tp->setlpicnt)
8851 tg3_phy_eee_enable(tp);
8853 if (tg3_flag(tp, USE_LINKCHG_REG)) {
8854 u32 mac_stat;
8855 int phy_event;
8857 mac_stat = tr32(MAC_STATUS);
8859 phy_event = 0;
8860 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
8861 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
8862 phy_event = 1;
8863 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
8864 phy_event = 1;
8866 if (phy_event)
8867 tg3_setup_phy(tp, 0);
8868 } else if (tg3_flag(tp, POLL_SERDES)) {
8869 u32 mac_stat = tr32(MAC_STATUS);
8870 int need_setup = 0;
8872 if (netif_carrier_ok(tp->dev) &&
8873 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
8874 need_setup = 1;
8876 if (!netif_carrier_ok(tp->dev) &&
8877 (mac_stat & (MAC_STATUS_PCS_SYNCED |
8878 MAC_STATUS_SIGNAL_DET))) {
8879 need_setup = 1;
8881 if (need_setup) {
8882 if (!tp->serdes_counter) {
8883 tw32_f(MAC_MODE,
8884 (tp->mac_mode &
8885 ~MAC_MODE_PORT_MODE_MASK));
8886 udelay(40);
8887 tw32_f(MAC_MODE, tp->mac_mode);
8888 udelay(40);
8890 tg3_setup_phy(tp, 0);
8892 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8893 tg3_flag(tp, 5780_CLASS)) {
8894 tg3_serdes_parallel_detect(tp);
8897 tp->timer_counter = tp->timer_multiplier;
8900 /* Heartbeat is only sent once every 2 seconds.
8902 * The heartbeat is to tell the ASF firmware that the host
8903 * driver is still alive. In the event that the OS crashes,
8904 * ASF needs to reset the hardware to free up the FIFO space
8905 * that may be filled with rx packets destined for the host.
8906 * If the FIFO is full, ASF will no longer function properly.
8908 * Unintended resets have been reported on real time kernels
8909 * where the timer doesn't run on time. Netpoll will also have
8910 * same problem.
8912 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
8913 * to check the ring condition when the heartbeat is expiring
8914 * before doing the reset. This will prevent most unintended
8915 * resets.
8917 if (!--tp->asf_counter) {
8918 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
8919 tg3_wait_for_event_ack(tp);
8921 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
8922 FWCMD_NICDRV_ALIVE3);
8923 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
8924 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
8925 TG3_FW_UPDATE_TIMEOUT_SEC);
8927 tg3_generate_fw_event(tp);
8929 tp->asf_counter = tp->asf_multiplier;
8932 spin_unlock(&tp->lock);
8934 restart_timer:
8935 tp->timer.expires = jiffies + tp->timer_offset;
8936 add_timer(&tp->timer);
8939 static int tg3_request_irq(struct tg3 *tp, int irq_num)
8941 irq_handler_t fn;
8942 unsigned long flags;
8943 char *name;
8944 struct tg3_napi *tnapi = &tp->napi[irq_num];
8946 if (tp->irq_cnt == 1)
8947 name = tp->dev->name;
8948 else {
8949 name = &tnapi->irq_lbl[0];
8950 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
8951 name[IFNAMSIZ-1] = 0;
8954 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
8955 fn = tg3_msi;
8956 if (tg3_flag(tp, 1SHOT_MSI))
8957 fn = tg3_msi_1shot;
8958 flags = 0;
8959 } else {
8960 fn = tg3_interrupt;
8961 if (tg3_flag(tp, TAGGED_STATUS))
8962 fn = tg3_interrupt_tagged;
8963 flags = IRQF_SHARED;
8966 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
8969 static int tg3_test_interrupt(struct tg3 *tp)
8971 struct tg3_napi *tnapi = &tp->napi[0];
8972 struct net_device *dev = tp->dev;
8973 int err, i, intr_ok = 0;
8974 u32 val;
8976 if (!netif_running(dev))
8977 return -ENODEV;
8979 tg3_disable_ints(tp);
8981 free_irq(tnapi->irq_vec, tnapi);
8984 * Turn off MSI one shot mode. Otherwise this test has no
8985 * observable way to know whether the interrupt was delivered.
8987 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
8988 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
8989 tw32(MSGINT_MODE, val);
8992 err = request_irq(tnapi->irq_vec, tg3_test_isr,
8993 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
8994 if (err)
8995 return err;
8997 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
8998 tg3_enable_ints(tp);
9000 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9001 tnapi->coal_now);
9003 for (i = 0; i < 5; i++) {
9004 u32 int_mbox, misc_host_ctrl;
9006 int_mbox = tr32_mailbox(tnapi->int_mbox);
9007 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9009 if ((int_mbox != 0) ||
9010 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9011 intr_ok = 1;
9012 break;
9015 msleep(10);
9018 tg3_disable_ints(tp);
9020 free_irq(tnapi->irq_vec, tnapi);
9022 err = tg3_request_irq(tp, 0);
9024 if (err)
9025 return err;
9027 if (intr_ok) {
9028 /* Reenable MSI one shot mode. */
9029 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9030 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9031 tw32(MSGINT_MODE, val);
9033 return 0;
9036 return -EIO;
9039 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9040 * successfully restored
9042 static int tg3_test_msi(struct tg3 *tp)
9044 int err;
9045 u16 pci_cmd;
9047 if (!tg3_flag(tp, USING_MSI))
9048 return 0;
9050 /* Turn off SERR reporting in case MSI terminates with Master
9051 * Abort.
9053 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9054 pci_write_config_word(tp->pdev, PCI_COMMAND,
9055 pci_cmd & ~PCI_COMMAND_SERR);
9057 err = tg3_test_interrupt(tp);
9059 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9061 if (!err)
9062 return 0;
9064 /* other failures */
9065 if (err != -EIO)
9066 return err;
9068 /* MSI test failed, go back to INTx mode */
9069 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9070 "to INTx mode. Please report this failure to the PCI "
9071 "maintainer and include system chipset information\n");
9073 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9075 pci_disable_msi(tp->pdev);
9077 tg3_flag_clear(tp, USING_MSI);
9078 tp->napi[0].irq_vec = tp->pdev->irq;
9080 err = tg3_request_irq(tp, 0);
9081 if (err)
9082 return err;
9084 /* Need to reset the chip because the MSI cycle may have terminated
9085 * with Master Abort.
9087 tg3_full_lock(tp, 1);
9089 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9090 err = tg3_init_hw(tp, 1);
9092 tg3_full_unlock(tp);
9094 if (err)
9095 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9097 return err;
9100 static int tg3_request_firmware(struct tg3 *tp)
9102 const __be32 *fw_data;
9104 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9105 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9106 tp->fw_needed);
9107 return -ENOENT;
9110 fw_data = (void *)tp->fw->data;
9112 /* Firmware blob starts with version numbers, followed by
9113 * start address and _full_ length including BSS sections
9114 * (which must be longer than the actual data, of course
9117 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
9118 if (tp->fw_len < (tp->fw->size - 12)) {
9119 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9120 tp->fw_len, tp->fw_needed);
9121 release_firmware(tp->fw);
9122 tp->fw = NULL;
9123 return -EINVAL;
9126 /* We no longer need firmware; we have it. */
9127 tp->fw_needed = NULL;
9128 return 0;
9131 static bool tg3_enable_msix(struct tg3 *tp)
9133 int i, rc, cpus = num_online_cpus();
9134 struct msix_entry msix_ent[tp->irq_max];
9136 if (cpus == 1)
9137 /* Just fallback to the simpler MSI mode. */
9138 return false;
9141 * We want as many rx rings enabled as there are cpus.
9142 * The first MSIX vector only deals with link interrupts, etc,
9143 * so we add one to the number of vectors we are requesting.
9145 tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
9147 for (i = 0; i < tp->irq_max; i++) {
9148 msix_ent[i].entry = i;
9149 msix_ent[i].vector = 0;
9152 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9153 if (rc < 0) {
9154 return false;
9155 } else if (rc != 0) {
9156 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9157 return false;
9158 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9159 tp->irq_cnt, rc);
9160 tp->irq_cnt = rc;
9163 for (i = 0; i < tp->irq_max; i++)
9164 tp->napi[i].irq_vec = msix_ent[i].vector;
9166 netif_set_real_num_tx_queues(tp->dev, 1);
9167 rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9168 if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9169 pci_disable_msix(tp->pdev);
9170 return false;
9173 if (tp->irq_cnt > 1) {
9174 tg3_flag_set(tp, ENABLE_RSS);
9176 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9177 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9178 tg3_flag_set(tp, ENABLE_TSS);
9179 netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9183 return true;
9186 static void tg3_ints_init(struct tg3 *tp)
9188 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9189 !tg3_flag(tp, TAGGED_STATUS)) {
9190 /* All MSI supporting chips should support tagged
9191 * status. Assert that this is the case.
9193 netdev_warn(tp->dev,
9194 "MSI without TAGGED_STATUS? Not using MSI\n");
9195 goto defcfg;
9198 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9199 tg3_flag_set(tp, USING_MSIX);
9200 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9201 tg3_flag_set(tp, USING_MSI);
9203 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9204 u32 msi_mode = tr32(MSGINT_MODE);
9205 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9206 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9207 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9209 defcfg:
9210 if (!tg3_flag(tp, USING_MSIX)) {
9211 tp->irq_cnt = 1;
9212 tp->napi[0].irq_vec = tp->pdev->irq;
9213 netif_set_real_num_tx_queues(tp->dev, 1);
9214 netif_set_real_num_rx_queues(tp->dev, 1);
9218 static void tg3_ints_fini(struct tg3 *tp)
9220 if (tg3_flag(tp, USING_MSIX))
9221 pci_disable_msix(tp->pdev);
9222 else if (tg3_flag(tp, USING_MSI))
9223 pci_disable_msi(tp->pdev);
9224 tg3_flag_clear(tp, USING_MSI);
9225 tg3_flag_clear(tp, USING_MSIX);
9226 tg3_flag_clear(tp, ENABLE_RSS);
9227 tg3_flag_clear(tp, ENABLE_TSS);
9230 static int tg3_open(struct net_device *dev)
9232 struct tg3 *tp = netdev_priv(dev);
9233 int i, err;
9235 if (tp->fw_needed) {
9236 err = tg3_request_firmware(tp);
9237 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9238 if (err)
9239 return err;
9240 } else if (err) {
9241 netdev_warn(tp->dev, "TSO capability disabled\n");
9242 tg3_flag_clear(tp, TSO_CAPABLE);
9243 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9244 netdev_notice(tp->dev, "TSO capability restored\n");
9245 tg3_flag_set(tp, TSO_CAPABLE);
9249 netif_carrier_off(tp->dev);
9251 err = tg3_power_up(tp);
9252 if (err)
9253 return err;
9255 tg3_full_lock(tp, 0);
9257 tg3_disable_ints(tp);
9258 tg3_flag_clear(tp, INIT_COMPLETE);
9260 tg3_full_unlock(tp);
9263 * Setup interrupts first so we know how
9264 * many NAPI resources to allocate
9266 tg3_ints_init(tp);
9268 /* The placement of this call is tied
9269 * to the setup and use of Host TX descriptors.
9271 err = tg3_alloc_consistent(tp);
9272 if (err)
9273 goto err_out1;
9275 tg3_napi_init(tp);
9277 tg3_napi_enable(tp);
9279 for (i = 0; i < tp->irq_cnt; i++) {
9280 struct tg3_napi *tnapi = &tp->napi[i];
9281 err = tg3_request_irq(tp, i);
9282 if (err) {
9283 for (i--; i >= 0; i--)
9284 free_irq(tnapi->irq_vec, tnapi);
9285 break;
9289 if (err)
9290 goto err_out2;
9292 tg3_full_lock(tp, 0);
9294 err = tg3_init_hw(tp, 1);
9295 if (err) {
9296 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9297 tg3_free_rings(tp);
9298 } else {
9299 if (tg3_flag(tp, TAGGED_STATUS))
9300 tp->timer_offset = HZ;
9301 else
9302 tp->timer_offset = HZ / 10;
9304 BUG_ON(tp->timer_offset > HZ);
9305 tp->timer_counter = tp->timer_multiplier =
9306 (HZ / tp->timer_offset);
9307 tp->asf_counter = tp->asf_multiplier =
9308 ((HZ / tp->timer_offset) * 2);
9310 init_timer(&tp->timer);
9311 tp->timer.expires = jiffies + tp->timer_offset;
9312 tp->timer.data = (unsigned long) tp;
9313 tp->timer.function = tg3_timer;
9316 tg3_full_unlock(tp);
9318 if (err)
9319 goto err_out3;
9321 if (tg3_flag(tp, USING_MSI)) {
9322 err = tg3_test_msi(tp);
9324 if (err) {
9325 tg3_full_lock(tp, 0);
9326 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9327 tg3_free_rings(tp);
9328 tg3_full_unlock(tp);
9330 goto err_out2;
9333 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9334 u32 val = tr32(PCIE_TRANSACTION_CFG);
9336 tw32(PCIE_TRANSACTION_CFG,
9337 val | PCIE_TRANS_CFG_1SHOT_MSI);
9341 tg3_phy_start(tp);
9343 tg3_full_lock(tp, 0);
9345 add_timer(&tp->timer);
9346 tg3_flag_set(tp, INIT_COMPLETE);
9347 tg3_enable_ints(tp);
9349 tg3_full_unlock(tp);
9351 netif_tx_start_all_queues(dev);
9354 * Reset loopback feature if it was turned on while the device was down
9355 * make sure that it's installed properly now.
9357 if (dev->features & NETIF_F_LOOPBACK)
9358 tg3_set_loopback(dev, dev->features);
9360 return 0;
9362 err_out3:
9363 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9364 struct tg3_napi *tnapi = &tp->napi[i];
9365 free_irq(tnapi->irq_vec, tnapi);
9368 err_out2:
9369 tg3_napi_disable(tp);
9370 tg3_napi_fini(tp);
9371 tg3_free_consistent(tp);
9373 err_out1:
9374 tg3_ints_fini(tp);
9375 return err;
9378 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
9379 struct rtnl_link_stats64 *);
9380 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
9382 static int tg3_close(struct net_device *dev)
9384 int i;
9385 struct tg3 *tp = netdev_priv(dev);
9387 tg3_napi_disable(tp);
9388 cancel_work_sync(&tp->reset_task);
9390 netif_tx_stop_all_queues(dev);
9392 del_timer_sync(&tp->timer);
9394 tg3_phy_stop(tp);
9396 tg3_full_lock(tp, 1);
9398 tg3_disable_ints(tp);
9400 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9401 tg3_free_rings(tp);
9402 tg3_flag_clear(tp, INIT_COMPLETE);
9404 tg3_full_unlock(tp);
9406 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9407 struct tg3_napi *tnapi = &tp->napi[i];
9408 free_irq(tnapi->irq_vec, tnapi);
9411 tg3_ints_fini(tp);
9413 tg3_get_stats64(tp->dev, &tp->net_stats_prev);
9415 memcpy(&tp->estats_prev, tg3_get_estats(tp),
9416 sizeof(tp->estats_prev));
9418 tg3_napi_fini(tp);
9420 tg3_free_consistent(tp);
9422 tg3_power_down(tp);
9424 netif_carrier_off(tp->dev);
9426 return 0;
9429 static inline u64 get_stat64(tg3_stat64_t *val)
9431 return ((u64)val->high << 32) | ((u64)val->low);
9434 static u64 calc_crc_errors(struct tg3 *tp)
9436 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9438 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9439 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9440 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9441 u32 val;
9443 spin_lock_bh(&tp->lock);
9444 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9445 tg3_writephy(tp, MII_TG3_TEST1,
9446 val | MII_TG3_TEST1_CRC_EN);
9447 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
9448 } else
9449 val = 0;
9450 spin_unlock_bh(&tp->lock);
9452 tp->phy_crc_errors += val;
9454 return tp->phy_crc_errors;
9457 return get_stat64(&hw_stats->rx_fcs_errors);
9460 #define ESTAT_ADD(member) \
9461 estats->member = old_estats->member + \
9462 get_stat64(&hw_stats->member)
9464 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9466 struct tg3_ethtool_stats *estats = &tp->estats;
9467 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9468 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9470 if (!hw_stats)
9471 return old_estats;
9473 ESTAT_ADD(rx_octets);
9474 ESTAT_ADD(rx_fragments);
9475 ESTAT_ADD(rx_ucast_packets);
9476 ESTAT_ADD(rx_mcast_packets);
9477 ESTAT_ADD(rx_bcast_packets);
9478 ESTAT_ADD(rx_fcs_errors);
9479 ESTAT_ADD(rx_align_errors);
9480 ESTAT_ADD(rx_xon_pause_rcvd);
9481 ESTAT_ADD(rx_xoff_pause_rcvd);
9482 ESTAT_ADD(rx_mac_ctrl_rcvd);
9483 ESTAT_ADD(rx_xoff_entered);
9484 ESTAT_ADD(rx_frame_too_long_errors);
9485 ESTAT_ADD(rx_jabbers);
9486 ESTAT_ADD(rx_undersize_packets);
9487 ESTAT_ADD(rx_in_length_errors);
9488 ESTAT_ADD(rx_out_length_errors);
9489 ESTAT_ADD(rx_64_or_less_octet_packets);
9490 ESTAT_ADD(rx_65_to_127_octet_packets);
9491 ESTAT_ADD(rx_128_to_255_octet_packets);
9492 ESTAT_ADD(rx_256_to_511_octet_packets);
9493 ESTAT_ADD(rx_512_to_1023_octet_packets);
9494 ESTAT_ADD(rx_1024_to_1522_octet_packets);
9495 ESTAT_ADD(rx_1523_to_2047_octet_packets);
9496 ESTAT_ADD(rx_2048_to_4095_octet_packets);
9497 ESTAT_ADD(rx_4096_to_8191_octet_packets);
9498 ESTAT_ADD(rx_8192_to_9022_octet_packets);
9500 ESTAT_ADD(tx_octets);
9501 ESTAT_ADD(tx_collisions);
9502 ESTAT_ADD(tx_xon_sent);
9503 ESTAT_ADD(tx_xoff_sent);
9504 ESTAT_ADD(tx_flow_control);
9505 ESTAT_ADD(tx_mac_errors);
9506 ESTAT_ADD(tx_single_collisions);
9507 ESTAT_ADD(tx_mult_collisions);
9508 ESTAT_ADD(tx_deferred);
9509 ESTAT_ADD(tx_excessive_collisions);
9510 ESTAT_ADD(tx_late_collisions);
9511 ESTAT_ADD(tx_collide_2times);
9512 ESTAT_ADD(tx_collide_3times);
9513 ESTAT_ADD(tx_collide_4times);
9514 ESTAT_ADD(tx_collide_5times);
9515 ESTAT_ADD(tx_collide_6times);
9516 ESTAT_ADD(tx_collide_7times);
9517 ESTAT_ADD(tx_collide_8times);
9518 ESTAT_ADD(tx_collide_9times);
9519 ESTAT_ADD(tx_collide_10times);
9520 ESTAT_ADD(tx_collide_11times);
9521 ESTAT_ADD(tx_collide_12times);
9522 ESTAT_ADD(tx_collide_13times);
9523 ESTAT_ADD(tx_collide_14times);
9524 ESTAT_ADD(tx_collide_15times);
9525 ESTAT_ADD(tx_ucast_packets);
9526 ESTAT_ADD(tx_mcast_packets);
9527 ESTAT_ADD(tx_bcast_packets);
9528 ESTAT_ADD(tx_carrier_sense_errors);
9529 ESTAT_ADD(tx_discards);
9530 ESTAT_ADD(tx_errors);
9532 ESTAT_ADD(dma_writeq_full);
9533 ESTAT_ADD(dma_write_prioq_full);
9534 ESTAT_ADD(rxbds_empty);
9535 ESTAT_ADD(rx_discards);
9536 ESTAT_ADD(rx_errors);
9537 ESTAT_ADD(rx_threshold_hit);
9539 ESTAT_ADD(dma_readq_full);
9540 ESTAT_ADD(dma_read_prioq_full);
9541 ESTAT_ADD(tx_comp_queue_full);
9543 ESTAT_ADD(ring_set_send_prod_index);
9544 ESTAT_ADD(ring_status_update);
9545 ESTAT_ADD(nic_irqs);
9546 ESTAT_ADD(nic_avoided_irqs);
9547 ESTAT_ADD(nic_tx_threshold_hit);
9549 return estats;
9552 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9553 struct rtnl_link_stats64 *stats)
9555 struct tg3 *tp = netdev_priv(dev);
9556 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
9557 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9559 if (!hw_stats)
9560 return old_stats;
9562 stats->rx_packets = old_stats->rx_packets +
9563 get_stat64(&hw_stats->rx_ucast_packets) +
9564 get_stat64(&hw_stats->rx_mcast_packets) +
9565 get_stat64(&hw_stats->rx_bcast_packets);
9567 stats->tx_packets = old_stats->tx_packets +
9568 get_stat64(&hw_stats->tx_ucast_packets) +
9569 get_stat64(&hw_stats->tx_mcast_packets) +
9570 get_stat64(&hw_stats->tx_bcast_packets);
9572 stats->rx_bytes = old_stats->rx_bytes +
9573 get_stat64(&hw_stats->rx_octets);
9574 stats->tx_bytes = old_stats->tx_bytes +
9575 get_stat64(&hw_stats->tx_octets);
9577 stats->rx_errors = old_stats->rx_errors +
9578 get_stat64(&hw_stats->rx_errors);
9579 stats->tx_errors = old_stats->tx_errors +
9580 get_stat64(&hw_stats->tx_errors) +
9581 get_stat64(&hw_stats->tx_mac_errors) +
9582 get_stat64(&hw_stats->tx_carrier_sense_errors) +
9583 get_stat64(&hw_stats->tx_discards);
9585 stats->multicast = old_stats->multicast +
9586 get_stat64(&hw_stats->rx_mcast_packets);
9587 stats->collisions = old_stats->collisions +
9588 get_stat64(&hw_stats->tx_collisions);
9590 stats->rx_length_errors = old_stats->rx_length_errors +
9591 get_stat64(&hw_stats->rx_frame_too_long_errors) +
9592 get_stat64(&hw_stats->rx_undersize_packets);
9594 stats->rx_over_errors = old_stats->rx_over_errors +
9595 get_stat64(&hw_stats->rxbds_empty);
9596 stats->rx_frame_errors = old_stats->rx_frame_errors +
9597 get_stat64(&hw_stats->rx_align_errors);
9598 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
9599 get_stat64(&hw_stats->tx_discards);
9600 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
9601 get_stat64(&hw_stats->tx_carrier_sense_errors);
9603 stats->rx_crc_errors = old_stats->rx_crc_errors +
9604 calc_crc_errors(tp);
9606 stats->rx_missed_errors = old_stats->rx_missed_errors +
9607 get_stat64(&hw_stats->rx_discards);
9609 stats->rx_dropped = tp->rx_dropped;
9611 return stats;
9614 static inline u32 calc_crc(unsigned char *buf, int len)
9616 u32 reg;
9617 u32 tmp;
9618 int j, k;
9620 reg = 0xffffffff;
9622 for (j = 0; j < len; j++) {
9623 reg ^= buf[j];
9625 for (k = 0; k < 8; k++) {
9626 tmp = reg & 0x01;
9628 reg >>= 1;
9630 if (tmp)
9631 reg ^= 0xedb88320;
9635 return ~reg;
9638 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9640 /* accept or reject all multicast frames */
9641 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9642 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9643 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9644 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9647 static void __tg3_set_rx_mode(struct net_device *dev)
9649 struct tg3 *tp = netdev_priv(dev);
9650 u32 rx_mode;
9652 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9653 RX_MODE_KEEP_VLAN_TAG);
9655 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9656 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9657 * flag clear.
9659 if (!tg3_flag(tp, ENABLE_ASF))
9660 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9661 #endif
9663 if (dev->flags & IFF_PROMISC) {
9664 /* Promiscuous mode. */
9665 rx_mode |= RX_MODE_PROMISC;
9666 } else if (dev->flags & IFF_ALLMULTI) {
9667 /* Accept all multicast. */
9668 tg3_set_multi(tp, 1);
9669 } else if (netdev_mc_empty(dev)) {
9670 /* Reject all multicast. */
9671 tg3_set_multi(tp, 0);
9672 } else {
9673 /* Accept one or more multicast(s). */
9674 struct netdev_hw_addr *ha;
9675 u32 mc_filter[4] = { 0, };
9676 u32 regidx;
9677 u32 bit;
9678 u32 crc;
9680 netdev_for_each_mc_addr(ha, dev) {
9681 crc = calc_crc(ha->addr, ETH_ALEN);
9682 bit = ~crc & 0x7f;
9683 regidx = (bit & 0x60) >> 5;
9684 bit &= 0x1f;
9685 mc_filter[regidx] |= (1 << bit);
9688 tw32(MAC_HASH_REG_0, mc_filter[0]);
9689 tw32(MAC_HASH_REG_1, mc_filter[1]);
9690 tw32(MAC_HASH_REG_2, mc_filter[2]);
9691 tw32(MAC_HASH_REG_3, mc_filter[3]);
9694 if (rx_mode != tp->rx_mode) {
9695 tp->rx_mode = rx_mode;
9696 tw32_f(MAC_RX_MODE, rx_mode);
9697 udelay(10);
9701 static void tg3_set_rx_mode(struct net_device *dev)
9703 struct tg3 *tp = netdev_priv(dev);
9705 if (!netif_running(dev))
9706 return;
9708 tg3_full_lock(tp, 0);
9709 __tg3_set_rx_mode(dev);
9710 tg3_full_unlock(tp);
9713 static int tg3_get_regs_len(struct net_device *dev)
9715 return TG3_REG_BLK_SIZE;
9718 static void tg3_get_regs(struct net_device *dev,
9719 struct ethtool_regs *regs, void *_p)
9721 struct tg3 *tp = netdev_priv(dev);
9723 regs->version = 0;
9725 memset(_p, 0, TG3_REG_BLK_SIZE);
9727 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9728 return;
9730 tg3_full_lock(tp, 0);
9732 tg3_dump_legacy_regs(tp, (u32 *)_p);
9734 tg3_full_unlock(tp);
9737 static int tg3_get_eeprom_len(struct net_device *dev)
9739 struct tg3 *tp = netdev_priv(dev);
9741 return tp->nvram_size;
9744 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9746 struct tg3 *tp = netdev_priv(dev);
9747 int ret;
9748 u8 *pd;
9749 u32 i, offset, len, b_offset, b_count;
9750 __be32 val;
9752 if (tg3_flag(tp, NO_NVRAM))
9753 return -EINVAL;
9755 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9756 return -EAGAIN;
9758 offset = eeprom->offset;
9759 len = eeprom->len;
9760 eeprom->len = 0;
9762 eeprom->magic = TG3_EEPROM_MAGIC;
9764 if (offset & 3) {
9765 /* adjustments to start on required 4 byte boundary */
9766 b_offset = offset & 3;
9767 b_count = 4 - b_offset;
9768 if (b_count > len) {
9769 /* i.e. offset=1 len=2 */
9770 b_count = len;
9772 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
9773 if (ret)
9774 return ret;
9775 memcpy(data, ((char *)&val) + b_offset, b_count);
9776 len -= b_count;
9777 offset += b_count;
9778 eeprom->len += b_count;
9781 /* read bytes up to the last 4 byte boundary */
9782 pd = &data[eeprom->len];
9783 for (i = 0; i < (len - (len & 3)); i += 4) {
9784 ret = tg3_nvram_read_be32(tp, offset + i, &val);
9785 if (ret) {
9786 eeprom->len += i;
9787 return ret;
9789 memcpy(pd + i, &val, 4);
9791 eeprom->len += i;
9793 if (len & 3) {
9794 /* read last bytes not ending on 4 byte boundary */
9795 pd = &data[eeprom->len];
9796 b_count = len & 3;
9797 b_offset = offset + len - b_count;
9798 ret = tg3_nvram_read_be32(tp, b_offset, &val);
9799 if (ret)
9800 return ret;
9801 memcpy(pd, &val, b_count);
9802 eeprom->len += b_count;
9804 return 0;
9807 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
9809 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9811 struct tg3 *tp = netdev_priv(dev);
9812 int ret;
9813 u32 offset, len, b_offset, odd_len;
9814 u8 *buf;
9815 __be32 start, end;
9817 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9818 return -EAGAIN;
9820 if (tg3_flag(tp, NO_NVRAM) ||
9821 eeprom->magic != TG3_EEPROM_MAGIC)
9822 return -EINVAL;
9824 offset = eeprom->offset;
9825 len = eeprom->len;
9827 if ((b_offset = (offset & 3))) {
9828 /* adjustments to start on required 4 byte boundary */
9829 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
9830 if (ret)
9831 return ret;
9832 len += b_offset;
9833 offset &= ~3;
9834 if (len < 4)
9835 len = 4;
9838 odd_len = 0;
9839 if (len & 3) {
9840 /* adjustments to end on required 4 byte boundary */
9841 odd_len = 1;
9842 len = (len + 3) & ~3;
9843 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
9844 if (ret)
9845 return ret;
9848 buf = data;
9849 if (b_offset || odd_len) {
9850 buf = kmalloc(len, GFP_KERNEL);
9851 if (!buf)
9852 return -ENOMEM;
9853 if (b_offset)
9854 memcpy(buf, &start, 4);
9855 if (odd_len)
9856 memcpy(buf+len-4, &end, 4);
9857 memcpy(buf + b_offset, data, eeprom->len);
9860 ret = tg3_nvram_write_block(tp, offset, len, buf);
9862 if (buf != data)
9863 kfree(buf);
9865 return ret;
9868 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9870 struct tg3 *tp = netdev_priv(dev);
9872 if (tg3_flag(tp, USE_PHYLIB)) {
9873 struct phy_device *phydev;
9874 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
9875 return -EAGAIN;
9876 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9877 return phy_ethtool_gset(phydev, cmd);
9880 cmd->supported = (SUPPORTED_Autoneg);
9882 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
9883 cmd->supported |= (SUPPORTED_1000baseT_Half |
9884 SUPPORTED_1000baseT_Full);
9886 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
9887 cmd->supported |= (SUPPORTED_100baseT_Half |
9888 SUPPORTED_100baseT_Full |
9889 SUPPORTED_10baseT_Half |
9890 SUPPORTED_10baseT_Full |
9891 SUPPORTED_TP);
9892 cmd->port = PORT_TP;
9893 } else {
9894 cmd->supported |= SUPPORTED_FIBRE;
9895 cmd->port = PORT_FIBRE;
9898 cmd->advertising = tp->link_config.advertising;
9899 if (netif_running(dev)) {
9900 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
9901 cmd->duplex = tp->link_config.active_duplex;
9902 } else {
9903 ethtool_cmd_speed_set(cmd, SPEED_INVALID);
9904 cmd->duplex = DUPLEX_INVALID;
9906 cmd->phy_address = tp->phy_addr;
9907 cmd->transceiver = XCVR_INTERNAL;
9908 cmd->autoneg = tp->link_config.autoneg;
9909 cmd->maxtxpkt = 0;
9910 cmd->maxrxpkt = 0;
9911 return 0;
9914 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9916 struct tg3 *tp = netdev_priv(dev);
9917 u32 speed = ethtool_cmd_speed(cmd);
9919 if (tg3_flag(tp, USE_PHYLIB)) {
9920 struct phy_device *phydev;
9921 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
9922 return -EAGAIN;
9923 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9924 return phy_ethtool_sset(phydev, cmd);
9927 if (cmd->autoneg != AUTONEG_ENABLE &&
9928 cmd->autoneg != AUTONEG_DISABLE)
9929 return -EINVAL;
9931 if (cmd->autoneg == AUTONEG_DISABLE &&
9932 cmd->duplex != DUPLEX_FULL &&
9933 cmd->duplex != DUPLEX_HALF)
9934 return -EINVAL;
9936 if (cmd->autoneg == AUTONEG_ENABLE) {
9937 u32 mask = ADVERTISED_Autoneg |
9938 ADVERTISED_Pause |
9939 ADVERTISED_Asym_Pause;
9941 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
9942 mask |= ADVERTISED_1000baseT_Half |
9943 ADVERTISED_1000baseT_Full;
9945 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
9946 mask |= ADVERTISED_100baseT_Half |
9947 ADVERTISED_100baseT_Full |
9948 ADVERTISED_10baseT_Half |
9949 ADVERTISED_10baseT_Full |
9950 ADVERTISED_TP;
9951 else
9952 mask |= ADVERTISED_FIBRE;
9954 if (cmd->advertising & ~mask)
9955 return -EINVAL;
9957 mask &= (ADVERTISED_1000baseT_Half |
9958 ADVERTISED_1000baseT_Full |
9959 ADVERTISED_100baseT_Half |
9960 ADVERTISED_100baseT_Full |
9961 ADVERTISED_10baseT_Half |
9962 ADVERTISED_10baseT_Full);
9964 cmd->advertising &= mask;
9965 } else {
9966 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
9967 if (speed != SPEED_1000)
9968 return -EINVAL;
9970 if (cmd->duplex != DUPLEX_FULL)
9971 return -EINVAL;
9972 } else {
9973 if (speed != SPEED_100 &&
9974 speed != SPEED_10)
9975 return -EINVAL;
9979 tg3_full_lock(tp, 0);
9981 tp->link_config.autoneg = cmd->autoneg;
9982 if (cmd->autoneg == AUTONEG_ENABLE) {
9983 tp->link_config.advertising = (cmd->advertising |
9984 ADVERTISED_Autoneg);
9985 tp->link_config.speed = SPEED_INVALID;
9986 tp->link_config.duplex = DUPLEX_INVALID;
9987 } else {
9988 tp->link_config.advertising = 0;
9989 tp->link_config.speed = speed;
9990 tp->link_config.duplex = cmd->duplex;
9993 tp->link_config.orig_speed = tp->link_config.speed;
9994 tp->link_config.orig_duplex = tp->link_config.duplex;
9995 tp->link_config.orig_autoneg = tp->link_config.autoneg;
9997 if (netif_running(dev))
9998 tg3_setup_phy(tp, 1);
10000 tg3_full_unlock(tp);
10002 return 0;
10005 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10007 struct tg3 *tp = netdev_priv(dev);
10009 strcpy(info->driver, DRV_MODULE_NAME);
10010 strcpy(info->version, DRV_MODULE_VERSION);
10011 strcpy(info->fw_version, tp->fw_ver);
10012 strcpy(info->bus_info, pci_name(tp->pdev));
10015 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10017 struct tg3 *tp = netdev_priv(dev);
10019 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10020 wol->supported = WAKE_MAGIC;
10021 else
10022 wol->supported = 0;
10023 wol->wolopts = 0;
10024 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10025 wol->wolopts = WAKE_MAGIC;
10026 memset(&wol->sopass, 0, sizeof(wol->sopass));
10029 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10031 struct tg3 *tp = netdev_priv(dev);
10032 struct device *dp = &tp->pdev->dev;
10034 if (wol->wolopts & ~WAKE_MAGIC)
10035 return -EINVAL;
10036 if ((wol->wolopts & WAKE_MAGIC) &&
10037 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10038 return -EINVAL;
10040 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10042 spin_lock_bh(&tp->lock);
10043 if (device_may_wakeup(dp))
10044 tg3_flag_set(tp, WOL_ENABLE);
10045 else
10046 tg3_flag_clear(tp, WOL_ENABLE);
10047 spin_unlock_bh(&tp->lock);
10049 return 0;
10052 static u32 tg3_get_msglevel(struct net_device *dev)
10054 struct tg3 *tp = netdev_priv(dev);
10055 return tp->msg_enable;
10058 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10060 struct tg3 *tp = netdev_priv(dev);
10061 tp->msg_enable = value;
10064 static int tg3_nway_reset(struct net_device *dev)
10066 struct tg3 *tp = netdev_priv(dev);
10067 int r;
10069 if (!netif_running(dev))
10070 return -EAGAIN;
10072 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10073 return -EINVAL;
10075 if (tg3_flag(tp, USE_PHYLIB)) {
10076 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10077 return -EAGAIN;
10078 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10079 } else {
10080 u32 bmcr;
10082 spin_lock_bh(&tp->lock);
10083 r = -EINVAL;
10084 tg3_readphy(tp, MII_BMCR, &bmcr);
10085 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10086 ((bmcr & BMCR_ANENABLE) ||
10087 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10088 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10089 BMCR_ANENABLE);
10090 r = 0;
10092 spin_unlock_bh(&tp->lock);
10095 return r;
10098 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10100 struct tg3 *tp = netdev_priv(dev);
10102 ering->rx_max_pending = tp->rx_std_ring_mask;
10103 ering->rx_mini_max_pending = 0;
10104 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10105 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10106 else
10107 ering->rx_jumbo_max_pending = 0;
10109 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10111 ering->rx_pending = tp->rx_pending;
10112 ering->rx_mini_pending = 0;
10113 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10114 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10115 else
10116 ering->rx_jumbo_pending = 0;
10118 ering->tx_pending = tp->napi[0].tx_pending;
10121 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10123 struct tg3 *tp = netdev_priv(dev);
10124 int i, irq_sync = 0, err = 0;
10126 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10127 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10128 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10129 (ering->tx_pending <= MAX_SKB_FRAGS) ||
10130 (tg3_flag(tp, TSO_BUG) &&
10131 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10132 return -EINVAL;
10134 if (netif_running(dev)) {
10135 tg3_phy_stop(tp);
10136 tg3_netif_stop(tp);
10137 irq_sync = 1;
10140 tg3_full_lock(tp, irq_sync);
10142 tp->rx_pending = ering->rx_pending;
10144 if (tg3_flag(tp, MAX_RXPEND_64) &&
10145 tp->rx_pending > 63)
10146 tp->rx_pending = 63;
10147 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10149 for (i = 0; i < tp->irq_max; i++)
10150 tp->napi[i].tx_pending = ering->tx_pending;
10152 if (netif_running(dev)) {
10153 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10154 err = tg3_restart_hw(tp, 1);
10155 if (!err)
10156 tg3_netif_start(tp);
10159 tg3_full_unlock(tp);
10161 if (irq_sync && !err)
10162 tg3_phy_start(tp);
10164 return err;
10167 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10169 struct tg3 *tp = netdev_priv(dev);
10171 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10173 if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
10174 epause->rx_pause = 1;
10175 else
10176 epause->rx_pause = 0;
10178 if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
10179 epause->tx_pause = 1;
10180 else
10181 epause->tx_pause = 0;
10184 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10186 struct tg3 *tp = netdev_priv(dev);
10187 int err = 0;
10189 if (tg3_flag(tp, USE_PHYLIB)) {
10190 u32 newadv;
10191 struct phy_device *phydev;
10193 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10195 if (!(phydev->supported & SUPPORTED_Pause) ||
10196 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10197 (epause->rx_pause != epause->tx_pause)))
10198 return -EINVAL;
10200 tp->link_config.flowctrl = 0;
10201 if (epause->rx_pause) {
10202 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10204 if (epause->tx_pause) {
10205 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10206 newadv = ADVERTISED_Pause;
10207 } else
10208 newadv = ADVERTISED_Pause |
10209 ADVERTISED_Asym_Pause;
10210 } else if (epause->tx_pause) {
10211 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10212 newadv = ADVERTISED_Asym_Pause;
10213 } else
10214 newadv = 0;
10216 if (epause->autoneg)
10217 tg3_flag_set(tp, PAUSE_AUTONEG);
10218 else
10219 tg3_flag_clear(tp, PAUSE_AUTONEG);
10221 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10222 u32 oldadv = phydev->advertising &
10223 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10224 if (oldadv != newadv) {
10225 phydev->advertising &=
10226 ~(ADVERTISED_Pause |
10227 ADVERTISED_Asym_Pause);
10228 phydev->advertising |= newadv;
10229 if (phydev->autoneg) {
10231 * Always renegotiate the link to
10232 * inform our link partner of our
10233 * flow control settings, even if the
10234 * flow control is forced. Let
10235 * tg3_adjust_link() do the final
10236 * flow control setup.
10238 return phy_start_aneg(phydev);
10242 if (!epause->autoneg)
10243 tg3_setup_flow_control(tp, 0, 0);
10244 } else {
10245 tp->link_config.orig_advertising &=
10246 ~(ADVERTISED_Pause |
10247 ADVERTISED_Asym_Pause);
10248 tp->link_config.orig_advertising |= newadv;
10250 } else {
10251 int irq_sync = 0;
10253 if (netif_running(dev)) {
10254 tg3_netif_stop(tp);
10255 irq_sync = 1;
10258 tg3_full_lock(tp, irq_sync);
10260 if (epause->autoneg)
10261 tg3_flag_set(tp, PAUSE_AUTONEG);
10262 else
10263 tg3_flag_clear(tp, PAUSE_AUTONEG);
10264 if (epause->rx_pause)
10265 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10266 else
10267 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10268 if (epause->tx_pause)
10269 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10270 else
10271 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10273 if (netif_running(dev)) {
10274 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10275 err = tg3_restart_hw(tp, 1);
10276 if (!err)
10277 tg3_netif_start(tp);
10280 tg3_full_unlock(tp);
10283 return err;
10286 static int tg3_get_sset_count(struct net_device *dev, int sset)
10288 switch (sset) {
10289 case ETH_SS_TEST:
10290 return TG3_NUM_TEST;
10291 case ETH_SS_STATS:
10292 return TG3_NUM_STATS;
10293 default:
10294 return -EOPNOTSUPP;
10298 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10300 switch (stringset) {
10301 case ETH_SS_STATS:
10302 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
10303 break;
10304 case ETH_SS_TEST:
10305 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
10306 break;
10307 default:
10308 WARN_ON(1); /* we need a WARN() */
10309 break;
10313 static int tg3_set_phys_id(struct net_device *dev,
10314 enum ethtool_phys_id_state state)
10316 struct tg3 *tp = netdev_priv(dev);
10318 if (!netif_running(tp->dev))
10319 return -EAGAIN;
10321 switch (state) {
10322 case ETHTOOL_ID_ACTIVE:
10323 return 1; /* cycle on/off once per second */
10325 case ETHTOOL_ID_ON:
10326 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10327 LED_CTRL_1000MBPS_ON |
10328 LED_CTRL_100MBPS_ON |
10329 LED_CTRL_10MBPS_ON |
10330 LED_CTRL_TRAFFIC_OVERRIDE |
10331 LED_CTRL_TRAFFIC_BLINK |
10332 LED_CTRL_TRAFFIC_LED);
10333 break;
10335 case ETHTOOL_ID_OFF:
10336 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10337 LED_CTRL_TRAFFIC_OVERRIDE);
10338 break;
10340 case ETHTOOL_ID_INACTIVE:
10341 tw32(MAC_LED_CTRL, tp->led_ctrl);
10342 break;
10345 return 0;
10348 static void tg3_get_ethtool_stats(struct net_device *dev,
10349 struct ethtool_stats *estats, u64 *tmp_stats)
10351 struct tg3 *tp = netdev_priv(dev);
10352 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
10355 static __be32 * tg3_vpd_readblock(struct tg3 *tp)
10357 int i;
10358 __be32 *buf;
10359 u32 offset = 0, len = 0;
10360 u32 magic, val;
10362 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
10363 return NULL;
10365 if (magic == TG3_EEPROM_MAGIC) {
10366 for (offset = TG3_NVM_DIR_START;
10367 offset < TG3_NVM_DIR_END;
10368 offset += TG3_NVM_DIRENT_SIZE) {
10369 if (tg3_nvram_read(tp, offset, &val))
10370 return NULL;
10372 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
10373 TG3_NVM_DIRTYPE_EXTVPD)
10374 break;
10377 if (offset != TG3_NVM_DIR_END) {
10378 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
10379 if (tg3_nvram_read(tp, offset + 4, &offset))
10380 return NULL;
10382 offset = tg3_nvram_logical_addr(tp, offset);
10386 if (!offset || !len) {
10387 offset = TG3_NVM_VPD_OFF;
10388 len = TG3_NVM_VPD_LEN;
10391 buf = kmalloc(len, GFP_KERNEL);
10392 if (buf == NULL)
10393 return NULL;
10395 if (magic == TG3_EEPROM_MAGIC) {
10396 for (i = 0; i < len; i += 4) {
10397 /* The data is in little-endian format in NVRAM.
10398 * Use the big-endian read routines to preserve
10399 * the byte order as it exists in NVRAM.
10401 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
10402 goto error;
10404 } else {
10405 u8 *ptr;
10406 ssize_t cnt;
10407 unsigned int pos = 0;
10409 ptr = (u8 *)&buf[0];
10410 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
10411 cnt = pci_read_vpd(tp->pdev, pos,
10412 len - pos, ptr);
10413 if (cnt == -ETIMEDOUT || cnt == -EINTR)
10414 cnt = 0;
10415 else if (cnt < 0)
10416 goto error;
10418 if (pos != len)
10419 goto error;
10422 return buf;
10424 error:
10425 kfree(buf);
10426 return NULL;
10429 #define NVRAM_TEST_SIZE 0x100
10430 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
10431 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
10432 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
10433 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10434 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10436 static int tg3_test_nvram(struct tg3 *tp)
10438 u32 csum, magic;
10439 __be32 *buf;
10440 int i, j, k, err = 0, size;
10442 if (tg3_flag(tp, NO_NVRAM))
10443 return 0;
10445 if (tg3_nvram_read(tp, 0, &magic) != 0)
10446 return -EIO;
10448 if (magic == TG3_EEPROM_MAGIC)
10449 size = NVRAM_TEST_SIZE;
10450 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10451 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10452 TG3_EEPROM_SB_FORMAT_1) {
10453 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10454 case TG3_EEPROM_SB_REVISION_0:
10455 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10456 break;
10457 case TG3_EEPROM_SB_REVISION_2:
10458 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10459 break;
10460 case TG3_EEPROM_SB_REVISION_3:
10461 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10462 break;
10463 default:
10464 return 0;
10466 } else
10467 return 0;
10468 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10469 size = NVRAM_SELFBOOT_HW_SIZE;
10470 else
10471 return -EIO;
10473 buf = kmalloc(size, GFP_KERNEL);
10474 if (buf == NULL)
10475 return -ENOMEM;
10477 err = -EIO;
10478 for (i = 0, j = 0; i < size; i += 4, j++) {
10479 err = tg3_nvram_read_be32(tp, i, &buf[j]);
10480 if (err)
10481 break;
10483 if (i < size)
10484 goto out;
10486 /* Selfboot format */
10487 magic = be32_to_cpu(buf[0]);
10488 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
10489 TG3_EEPROM_MAGIC_FW) {
10490 u8 *buf8 = (u8 *) buf, csum8 = 0;
10492 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
10493 TG3_EEPROM_SB_REVISION_2) {
10494 /* For rev 2, the csum doesn't include the MBA. */
10495 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
10496 csum8 += buf8[i];
10497 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
10498 csum8 += buf8[i];
10499 } else {
10500 for (i = 0; i < size; i++)
10501 csum8 += buf8[i];
10504 if (csum8 == 0) {
10505 err = 0;
10506 goto out;
10509 err = -EIO;
10510 goto out;
10513 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
10514 TG3_EEPROM_MAGIC_HW) {
10515 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
10516 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
10517 u8 *buf8 = (u8 *) buf;
10519 /* Separate the parity bits and the data bytes. */
10520 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
10521 if ((i == 0) || (i == 8)) {
10522 int l;
10523 u8 msk;
10525 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10526 parity[k++] = buf8[i] & msk;
10527 i++;
10528 } else if (i == 16) {
10529 int l;
10530 u8 msk;
10532 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
10533 parity[k++] = buf8[i] & msk;
10534 i++;
10536 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
10537 parity[k++] = buf8[i] & msk;
10538 i++;
10540 data[j++] = buf8[i];
10543 err = -EIO;
10544 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
10545 u8 hw8 = hweight8(data[i]);
10547 if ((hw8 & 0x1) && parity[i])
10548 goto out;
10549 else if (!(hw8 & 0x1) && !parity[i])
10550 goto out;
10552 err = 0;
10553 goto out;
10556 err = -EIO;
10558 /* Bootstrap checksum at offset 0x10 */
10559 csum = calc_crc((unsigned char *) buf, 0x10);
10560 if (csum != le32_to_cpu(buf[0x10/4]))
10561 goto out;
10563 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10564 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
10565 if (csum != le32_to_cpu(buf[0xfc/4]))
10566 goto out;
10568 kfree(buf);
10570 buf = tg3_vpd_readblock(tp);
10571 if (!buf)
10572 return -ENOMEM;
10574 i = pci_vpd_find_tag((u8 *)buf, 0, TG3_NVM_VPD_LEN,
10575 PCI_VPD_LRDT_RO_DATA);
10576 if (i > 0) {
10577 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
10578 if (j < 0)
10579 goto out;
10581 if (i + PCI_VPD_LRDT_TAG_SIZE + j > TG3_NVM_VPD_LEN)
10582 goto out;
10584 i += PCI_VPD_LRDT_TAG_SIZE;
10585 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
10586 PCI_VPD_RO_KEYWORD_CHKSUM);
10587 if (j > 0) {
10588 u8 csum8 = 0;
10590 j += PCI_VPD_INFO_FLD_HDR_SIZE;
10592 for (i = 0; i <= j; i++)
10593 csum8 += ((u8 *)buf)[i];
10595 if (csum8)
10596 goto out;
10600 err = 0;
10602 out:
10603 kfree(buf);
10604 return err;
10607 #define TG3_SERDES_TIMEOUT_SEC 2
10608 #define TG3_COPPER_TIMEOUT_SEC 6
10610 static int tg3_test_link(struct tg3 *tp)
10612 int i, max;
10614 if (!netif_running(tp->dev))
10615 return -ENODEV;
10617 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
10618 max = TG3_SERDES_TIMEOUT_SEC;
10619 else
10620 max = TG3_COPPER_TIMEOUT_SEC;
10622 for (i = 0; i < max; i++) {
10623 if (netif_carrier_ok(tp->dev))
10624 return 0;
10626 if (msleep_interruptible(1000))
10627 break;
10630 return -EIO;
10633 /* Only test the commonly used registers */
10634 static int tg3_test_registers(struct tg3 *tp)
10636 int i, is_5705, is_5750;
10637 u32 offset, read_mask, write_mask, val, save_val, read_val;
10638 static struct {
10639 u16 offset;
10640 u16 flags;
10641 #define TG3_FL_5705 0x1
10642 #define TG3_FL_NOT_5705 0x2
10643 #define TG3_FL_NOT_5788 0x4
10644 #define TG3_FL_NOT_5750 0x8
10645 u32 read_mask;
10646 u32 write_mask;
10647 } reg_tbl[] = {
10648 /* MAC Control Registers */
10649 { MAC_MODE, TG3_FL_NOT_5705,
10650 0x00000000, 0x00ef6f8c },
10651 { MAC_MODE, TG3_FL_5705,
10652 0x00000000, 0x01ef6b8c },
10653 { MAC_STATUS, TG3_FL_NOT_5705,
10654 0x03800107, 0x00000000 },
10655 { MAC_STATUS, TG3_FL_5705,
10656 0x03800100, 0x00000000 },
10657 { MAC_ADDR_0_HIGH, 0x0000,
10658 0x00000000, 0x0000ffff },
10659 { MAC_ADDR_0_LOW, 0x0000,
10660 0x00000000, 0xffffffff },
10661 { MAC_RX_MTU_SIZE, 0x0000,
10662 0x00000000, 0x0000ffff },
10663 { MAC_TX_MODE, 0x0000,
10664 0x00000000, 0x00000070 },
10665 { MAC_TX_LENGTHS, 0x0000,
10666 0x00000000, 0x00003fff },
10667 { MAC_RX_MODE, TG3_FL_NOT_5705,
10668 0x00000000, 0x000007fc },
10669 { MAC_RX_MODE, TG3_FL_5705,
10670 0x00000000, 0x000007dc },
10671 { MAC_HASH_REG_0, 0x0000,
10672 0x00000000, 0xffffffff },
10673 { MAC_HASH_REG_1, 0x0000,
10674 0x00000000, 0xffffffff },
10675 { MAC_HASH_REG_2, 0x0000,
10676 0x00000000, 0xffffffff },
10677 { MAC_HASH_REG_3, 0x0000,
10678 0x00000000, 0xffffffff },
10680 /* Receive Data and Receive BD Initiator Control Registers. */
10681 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
10682 0x00000000, 0xffffffff },
10683 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
10684 0x00000000, 0xffffffff },
10685 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
10686 0x00000000, 0x00000003 },
10687 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
10688 0x00000000, 0xffffffff },
10689 { RCVDBDI_STD_BD+0, 0x0000,
10690 0x00000000, 0xffffffff },
10691 { RCVDBDI_STD_BD+4, 0x0000,
10692 0x00000000, 0xffffffff },
10693 { RCVDBDI_STD_BD+8, 0x0000,
10694 0x00000000, 0xffff0002 },
10695 { RCVDBDI_STD_BD+0xc, 0x0000,
10696 0x00000000, 0xffffffff },
10698 /* Receive BD Initiator Control Registers. */
10699 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
10700 0x00000000, 0xffffffff },
10701 { RCVBDI_STD_THRESH, TG3_FL_5705,
10702 0x00000000, 0x000003ff },
10703 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
10704 0x00000000, 0xffffffff },
10706 /* Host Coalescing Control Registers. */
10707 { HOSTCC_MODE, TG3_FL_NOT_5705,
10708 0x00000000, 0x00000004 },
10709 { HOSTCC_MODE, TG3_FL_5705,
10710 0x00000000, 0x000000f6 },
10711 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
10712 0x00000000, 0xffffffff },
10713 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
10714 0x00000000, 0x000003ff },
10715 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
10716 0x00000000, 0xffffffff },
10717 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
10718 0x00000000, 0x000003ff },
10719 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
10720 0x00000000, 0xffffffff },
10721 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10722 0x00000000, 0x000000ff },
10723 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
10724 0x00000000, 0xffffffff },
10725 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10726 0x00000000, 0x000000ff },
10727 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
10728 0x00000000, 0xffffffff },
10729 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
10730 0x00000000, 0xffffffff },
10731 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10732 0x00000000, 0xffffffff },
10733 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10734 0x00000000, 0x000000ff },
10735 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10736 0x00000000, 0xffffffff },
10737 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10738 0x00000000, 0x000000ff },
10739 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
10740 0x00000000, 0xffffffff },
10741 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
10742 0x00000000, 0xffffffff },
10743 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
10744 0x00000000, 0xffffffff },
10745 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
10746 0x00000000, 0xffffffff },
10747 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
10748 0x00000000, 0xffffffff },
10749 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
10750 0xffffffff, 0x00000000 },
10751 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
10752 0xffffffff, 0x00000000 },
10754 /* Buffer Manager Control Registers. */
10755 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
10756 0x00000000, 0x007fff80 },
10757 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
10758 0x00000000, 0x007fffff },
10759 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
10760 0x00000000, 0x0000003f },
10761 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
10762 0x00000000, 0x000001ff },
10763 { BUFMGR_MB_HIGH_WATER, 0x0000,
10764 0x00000000, 0x000001ff },
10765 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
10766 0xffffffff, 0x00000000 },
10767 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
10768 0xffffffff, 0x00000000 },
10770 /* Mailbox Registers */
10771 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
10772 0x00000000, 0x000001ff },
10773 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
10774 0x00000000, 0x000001ff },
10775 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
10776 0x00000000, 0x000007ff },
10777 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
10778 0x00000000, 0x000001ff },
10780 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
10783 is_5705 = is_5750 = 0;
10784 if (tg3_flag(tp, 5705_PLUS)) {
10785 is_5705 = 1;
10786 if (tg3_flag(tp, 5750_PLUS))
10787 is_5750 = 1;
10790 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
10791 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
10792 continue;
10794 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
10795 continue;
10797 if (tg3_flag(tp, IS_5788) &&
10798 (reg_tbl[i].flags & TG3_FL_NOT_5788))
10799 continue;
10801 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
10802 continue;
10804 offset = (u32) reg_tbl[i].offset;
10805 read_mask = reg_tbl[i].read_mask;
10806 write_mask = reg_tbl[i].write_mask;
10808 /* Save the original register content */
10809 save_val = tr32(offset);
10811 /* Determine the read-only value. */
10812 read_val = save_val & read_mask;
10814 /* Write zero to the register, then make sure the read-only bits
10815 * are not changed and the read/write bits are all zeros.
10817 tw32(offset, 0);
10819 val = tr32(offset);
10821 /* Test the read-only and read/write bits. */
10822 if (((val & read_mask) != read_val) || (val & write_mask))
10823 goto out;
10825 /* Write ones to all the bits defined by RdMask and WrMask, then
10826 * make sure the read-only bits are not changed and the
10827 * read/write bits are all ones.
10829 tw32(offset, read_mask | write_mask);
10831 val = tr32(offset);
10833 /* Test the read-only bits. */
10834 if ((val & read_mask) != read_val)
10835 goto out;
10837 /* Test the read/write bits. */
10838 if ((val & write_mask) != write_mask)
10839 goto out;
10841 tw32(offset, save_val);
10844 return 0;
10846 out:
10847 if (netif_msg_hw(tp))
10848 netdev_err(tp->dev,
10849 "Register test failed at offset %x\n", offset);
10850 tw32(offset, save_val);
10851 return -EIO;
10854 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
10856 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
10857 int i;
10858 u32 j;
10860 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
10861 for (j = 0; j < len; j += 4) {
10862 u32 val;
10864 tg3_write_mem(tp, offset + j, test_pattern[i]);
10865 tg3_read_mem(tp, offset + j, &val);
10866 if (val != test_pattern[i])
10867 return -EIO;
10870 return 0;
10873 static int tg3_test_memory(struct tg3 *tp)
10875 static struct mem_entry {
10876 u32 offset;
10877 u32 len;
10878 } mem_tbl_570x[] = {
10879 { 0x00000000, 0x00b50},
10880 { 0x00002000, 0x1c000},
10881 { 0xffffffff, 0x00000}
10882 }, mem_tbl_5705[] = {
10883 { 0x00000100, 0x0000c},
10884 { 0x00000200, 0x00008},
10885 { 0x00004000, 0x00800},
10886 { 0x00006000, 0x01000},
10887 { 0x00008000, 0x02000},
10888 { 0x00010000, 0x0e000},
10889 { 0xffffffff, 0x00000}
10890 }, mem_tbl_5755[] = {
10891 { 0x00000200, 0x00008},
10892 { 0x00004000, 0x00800},
10893 { 0x00006000, 0x00800},
10894 { 0x00008000, 0x02000},
10895 { 0x00010000, 0x0c000},
10896 { 0xffffffff, 0x00000}
10897 }, mem_tbl_5906[] = {
10898 { 0x00000200, 0x00008},
10899 { 0x00004000, 0x00400},
10900 { 0x00006000, 0x00400},
10901 { 0x00008000, 0x01000},
10902 { 0x00010000, 0x01000},
10903 { 0xffffffff, 0x00000}
10904 }, mem_tbl_5717[] = {
10905 { 0x00000200, 0x00008},
10906 { 0x00010000, 0x0a000},
10907 { 0x00020000, 0x13c00},
10908 { 0xffffffff, 0x00000}
10909 }, mem_tbl_57765[] = {
10910 { 0x00000200, 0x00008},
10911 { 0x00004000, 0x00800},
10912 { 0x00006000, 0x09800},
10913 { 0x00010000, 0x0a000},
10914 { 0xffffffff, 0x00000}
10916 struct mem_entry *mem_tbl;
10917 int err = 0;
10918 int i;
10920 if (tg3_flag(tp, 5717_PLUS))
10921 mem_tbl = mem_tbl_5717;
10922 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
10923 mem_tbl = mem_tbl_57765;
10924 else if (tg3_flag(tp, 5755_PLUS))
10925 mem_tbl = mem_tbl_5755;
10926 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10927 mem_tbl = mem_tbl_5906;
10928 else if (tg3_flag(tp, 5705_PLUS))
10929 mem_tbl = mem_tbl_5705;
10930 else
10931 mem_tbl = mem_tbl_570x;
10933 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
10934 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
10935 if (err)
10936 break;
10939 return err;
10942 #define TG3_MAC_LOOPBACK 0
10943 #define TG3_PHY_LOOPBACK 1
10944 #define TG3_TSO_LOOPBACK 2
10946 #define TG3_TSO_MSS 500
10948 #define TG3_TSO_IP_HDR_LEN 20
10949 #define TG3_TSO_TCP_HDR_LEN 20
10950 #define TG3_TSO_TCP_OPT_LEN 12
10952 static const u8 tg3_tso_header[] = {
10953 0x08, 0x00,
10954 0x45, 0x00, 0x00, 0x00,
10955 0x00, 0x00, 0x40, 0x00,
10956 0x40, 0x06, 0x00, 0x00,
10957 0x0a, 0x00, 0x00, 0x01,
10958 0x0a, 0x00, 0x00, 0x02,
10959 0x0d, 0x00, 0xe0, 0x00,
10960 0x00, 0x00, 0x01, 0x00,
10961 0x00, 0x00, 0x02, 0x00,
10962 0x80, 0x10, 0x10, 0x00,
10963 0x14, 0x09, 0x00, 0x00,
10964 0x01, 0x01, 0x08, 0x0a,
10965 0x11, 0x11, 0x11, 0x11,
10966 0x11, 0x11, 0x11, 0x11,
10969 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, int loopback_mode)
10971 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
10972 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
10973 struct sk_buff *skb, *rx_skb;
10974 u8 *tx_data;
10975 dma_addr_t map;
10976 int num_pkts, tx_len, rx_len, i, err;
10977 struct tg3_rx_buffer_desc *desc;
10978 struct tg3_napi *tnapi, *rnapi;
10979 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
10981 tnapi = &tp->napi[0];
10982 rnapi = &tp->napi[0];
10983 if (tp->irq_cnt > 1) {
10984 if (tg3_flag(tp, ENABLE_RSS))
10985 rnapi = &tp->napi[1];
10986 if (tg3_flag(tp, ENABLE_TSS))
10987 tnapi = &tp->napi[1];
10989 coal_now = tnapi->coal_now | rnapi->coal_now;
10991 if (loopback_mode == TG3_MAC_LOOPBACK) {
10992 /* HW errata - mac loopback fails in some cases on 5780.
10993 * Normal traffic and PHY loopback are not affected by
10994 * errata. Also, the MAC loopback test is deprecated for
10995 * all newer ASIC revisions.
10997 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
10998 tg3_flag(tp, CPMU_PRESENT))
10999 return 0;
11001 mac_mode = tp->mac_mode &
11002 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11003 mac_mode |= MAC_MODE_PORT_INT_LPBACK;
11004 if (!tg3_flag(tp, 5705_PLUS))
11005 mac_mode |= MAC_MODE_LINK_POLARITY;
11006 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
11007 mac_mode |= MAC_MODE_PORT_MODE_MII;
11008 else
11009 mac_mode |= MAC_MODE_PORT_MODE_GMII;
11010 tw32(MAC_MODE, mac_mode);
11011 } else {
11012 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11013 tg3_phy_fet_toggle_apd(tp, false);
11014 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
11015 } else
11016 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
11018 tg3_phy_toggle_automdix(tp, 0);
11020 tg3_writephy(tp, MII_BMCR, val);
11021 udelay(40);
11023 mac_mode = tp->mac_mode &
11024 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11025 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11026 tg3_writephy(tp, MII_TG3_FET_PTEST,
11027 MII_TG3_FET_PTEST_FRC_TX_LINK |
11028 MII_TG3_FET_PTEST_FRC_TX_LOCK);
11029 /* The write needs to be flushed for the AC131 */
11030 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11031 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
11032 mac_mode |= MAC_MODE_PORT_MODE_MII;
11033 } else
11034 mac_mode |= MAC_MODE_PORT_MODE_GMII;
11036 /* reset to prevent losing 1st rx packet intermittently */
11037 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
11038 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
11039 udelay(10);
11040 tw32_f(MAC_RX_MODE, tp->rx_mode);
11042 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
11043 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
11044 if (masked_phy_id == TG3_PHY_ID_BCM5401)
11045 mac_mode &= ~MAC_MODE_LINK_POLARITY;
11046 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
11047 mac_mode |= MAC_MODE_LINK_POLARITY;
11048 tg3_writephy(tp, MII_TG3_EXT_CTRL,
11049 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
11051 tw32(MAC_MODE, mac_mode);
11053 /* Wait for link */
11054 for (i = 0; i < 100; i++) {
11055 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11056 break;
11057 mdelay(1);
11061 err = -EIO;
11063 tx_len = pktsz;
11064 skb = netdev_alloc_skb(tp->dev, tx_len);
11065 if (!skb)
11066 return -ENOMEM;
11068 tx_data = skb_put(skb, tx_len);
11069 memcpy(tx_data, tp->dev->dev_addr, 6);
11070 memset(tx_data + 6, 0x0, 8);
11072 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11074 if (loopback_mode == TG3_TSO_LOOPBACK) {
11075 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11077 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11078 TG3_TSO_TCP_OPT_LEN;
11080 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11081 sizeof(tg3_tso_header));
11082 mss = TG3_TSO_MSS;
11084 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11085 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11087 /* Set the total length field in the IP header */
11088 iph->tot_len = htons((u16)(mss + hdr_len));
11090 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11091 TXD_FLAG_CPU_POST_DMA);
11093 if (tg3_flag(tp, HW_TSO_1) ||
11094 tg3_flag(tp, HW_TSO_2) ||
11095 tg3_flag(tp, HW_TSO_3)) {
11096 struct tcphdr *th;
11097 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11098 th = (struct tcphdr *)&tx_data[val];
11099 th->check = 0;
11100 } else
11101 base_flags |= TXD_FLAG_TCPUDP_CSUM;
11103 if (tg3_flag(tp, HW_TSO_3)) {
11104 mss |= (hdr_len & 0xc) << 12;
11105 if (hdr_len & 0x10)
11106 base_flags |= 0x00000010;
11107 base_flags |= (hdr_len & 0x3e0) << 5;
11108 } else if (tg3_flag(tp, HW_TSO_2))
11109 mss |= hdr_len << 9;
11110 else if (tg3_flag(tp, HW_TSO_1) ||
11111 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11112 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11113 } else {
11114 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11117 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11118 } else {
11119 num_pkts = 1;
11120 data_off = ETH_HLEN;
11123 for (i = data_off; i < tx_len; i++)
11124 tx_data[i] = (u8) (i & 0xff);
11126 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11127 if (pci_dma_mapping_error(tp->pdev, map)) {
11128 dev_kfree_skb(skb);
11129 return -EIO;
11132 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11133 rnapi->coal_now);
11135 udelay(10);
11137 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11139 tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len,
11140 base_flags, (mss << 1) | 1);
11142 tnapi->tx_prod++;
11144 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11145 tr32_mailbox(tnapi->prodmbox);
11147 udelay(10);
11149 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
11150 for (i = 0; i < 35; i++) {
11151 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11152 coal_now);
11154 udelay(10);
11156 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11157 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11158 if ((tx_idx == tnapi->tx_prod) &&
11159 (rx_idx == (rx_start_idx + num_pkts)))
11160 break;
11163 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
11164 dev_kfree_skb(skb);
11166 if (tx_idx != tnapi->tx_prod)
11167 goto out;
11169 if (rx_idx != rx_start_idx + num_pkts)
11170 goto out;
11172 val = data_off;
11173 while (rx_idx != rx_start_idx) {
11174 desc = &rnapi->rx_rcb[rx_start_idx++];
11175 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11176 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11178 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11179 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11180 goto out;
11182 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11183 - ETH_FCS_LEN;
11185 if (loopback_mode != TG3_TSO_LOOPBACK) {
11186 if (rx_len != tx_len)
11187 goto out;
11189 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11190 if (opaque_key != RXD_OPAQUE_RING_STD)
11191 goto out;
11192 } else {
11193 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11194 goto out;
11196 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11197 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11198 >> RXD_TCPCSUM_SHIFT != 0xffff) {
11199 goto out;
11202 if (opaque_key == RXD_OPAQUE_RING_STD) {
11203 rx_skb = tpr->rx_std_buffers[desc_idx].skb;
11204 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11205 mapping);
11206 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11207 rx_skb = tpr->rx_jmb_buffers[desc_idx].skb;
11208 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11209 mapping);
11210 } else
11211 goto out;
11213 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11214 PCI_DMA_FROMDEVICE);
11216 for (i = data_off; i < rx_len; i++, val++) {
11217 if (*(rx_skb->data + i) != (u8) (val & 0xff))
11218 goto out;
11222 err = 0;
11224 /* tg3_free_rings will unmap and free the rx_skb */
11225 out:
11226 return err;
11229 #define TG3_STD_LOOPBACK_FAILED 1
11230 #define TG3_JMB_LOOPBACK_FAILED 2
11231 #define TG3_TSO_LOOPBACK_FAILED 4
11233 #define TG3_MAC_LOOPBACK_SHIFT 0
11234 #define TG3_PHY_LOOPBACK_SHIFT 4
11235 #define TG3_LOOPBACK_FAILED 0x00000077
11237 static int tg3_test_loopback(struct tg3 *tp)
11239 int err = 0;
11240 u32 eee_cap, cpmuctrl = 0;
11242 if (!netif_running(tp->dev))
11243 return TG3_LOOPBACK_FAILED;
11245 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11246 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11248 err = tg3_reset_hw(tp, 1);
11249 if (err) {
11250 err = TG3_LOOPBACK_FAILED;
11251 goto done;
11254 if (tg3_flag(tp, ENABLE_RSS)) {
11255 int i;
11257 /* Reroute all rx packets to the 1st queue */
11258 for (i = MAC_RSS_INDIR_TBL_0;
11259 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11260 tw32(i, 0x0);
11263 /* Turn off gphy autopowerdown. */
11264 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11265 tg3_phy_toggle_apd(tp, false);
11267 if (tg3_flag(tp, CPMU_PRESENT)) {
11268 int i;
11269 u32 status;
11271 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
11273 /* Wait for up to 40 microseconds to acquire lock. */
11274 for (i = 0; i < 4; i++) {
11275 status = tr32(TG3_CPMU_MUTEX_GNT);
11276 if (status == CPMU_MUTEX_GNT_DRIVER)
11277 break;
11278 udelay(10);
11281 if (status != CPMU_MUTEX_GNT_DRIVER) {
11282 err = TG3_LOOPBACK_FAILED;
11283 goto done;
11286 /* Turn off link-based power management. */
11287 cpmuctrl = tr32(TG3_CPMU_CTRL);
11288 tw32(TG3_CPMU_CTRL,
11289 cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
11290 CPMU_CTRL_LINK_AWARE_MODE));
11293 if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_MAC_LOOPBACK))
11294 err |= TG3_STD_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11296 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11297 tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_MAC_LOOPBACK))
11298 err |= TG3_JMB_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11300 if (tg3_flag(tp, CPMU_PRESENT)) {
11301 tw32(TG3_CPMU_CTRL, cpmuctrl);
11303 /* Release the mutex */
11304 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
11307 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11308 !tg3_flag(tp, USE_PHYLIB)) {
11309 if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_PHY_LOOPBACK))
11310 err |= TG3_STD_LOOPBACK_FAILED <<
11311 TG3_PHY_LOOPBACK_SHIFT;
11312 if (tg3_flag(tp, TSO_CAPABLE) &&
11313 tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_TSO_LOOPBACK))
11314 err |= TG3_TSO_LOOPBACK_FAILED <<
11315 TG3_PHY_LOOPBACK_SHIFT;
11316 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11317 tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_PHY_LOOPBACK))
11318 err |= TG3_JMB_LOOPBACK_FAILED <<
11319 TG3_PHY_LOOPBACK_SHIFT;
11322 /* Re-enable gphy autopowerdown. */
11323 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11324 tg3_phy_toggle_apd(tp, true);
11326 done:
11327 tp->phy_flags |= eee_cap;
11329 return err;
11332 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11333 u64 *data)
11335 struct tg3 *tp = netdev_priv(dev);
11337 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11338 tg3_power_up(tp);
11340 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11342 if (tg3_test_nvram(tp) != 0) {
11343 etest->flags |= ETH_TEST_FL_FAILED;
11344 data[0] = 1;
11346 if (tg3_test_link(tp) != 0) {
11347 etest->flags |= ETH_TEST_FL_FAILED;
11348 data[1] = 1;
11350 if (etest->flags & ETH_TEST_FL_OFFLINE) {
11351 int err, err2 = 0, irq_sync = 0;
11353 if (netif_running(dev)) {
11354 tg3_phy_stop(tp);
11355 tg3_netif_stop(tp);
11356 irq_sync = 1;
11359 tg3_full_lock(tp, irq_sync);
11361 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
11362 err = tg3_nvram_lock(tp);
11363 tg3_halt_cpu(tp, RX_CPU_BASE);
11364 if (!tg3_flag(tp, 5705_PLUS))
11365 tg3_halt_cpu(tp, TX_CPU_BASE);
11366 if (!err)
11367 tg3_nvram_unlock(tp);
11369 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
11370 tg3_phy_reset(tp);
11372 if (tg3_test_registers(tp) != 0) {
11373 etest->flags |= ETH_TEST_FL_FAILED;
11374 data[2] = 1;
11376 if (tg3_test_memory(tp) != 0) {
11377 etest->flags |= ETH_TEST_FL_FAILED;
11378 data[3] = 1;
11380 if ((data[4] = tg3_test_loopback(tp)) != 0)
11381 etest->flags |= ETH_TEST_FL_FAILED;
11383 tg3_full_unlock(tp);
11385 if (tg3_test_interrupt(tp) != 0) {
11386 etest->flags |= ETH_TEST_FL_FAILED;
11387 data[5] = 1;
11390 tg3_full_lock(tp, 0);
11392 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11393 if (netif_running(dev)) {
11394 tg3_flag_set(tp, INIT_COMPLETE);
11395 err2 = tg3_restart_hw(tp, 1);
11396 if (!err2)
11397 tg3_netif_start(tp);
11400 tg3_full_unlock(tp);
11402 if (irq_sync && !err2)
11403 tg3_phy_start(tp);
11405 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11406 tg3_power_down(tp);
11410 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11412 struct mii_ioctl_data *data = if_mii(ifr);
11413 struct tg3 *tp = netdev_priv(dev);
11414 int err;
11416 if (tg3_flag(tp, USE_PHYLIB)) {
11417 struct phy_device *phydev;
11418 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11419 return -EAGAIN;
11420 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11421 return phy_mii_ioctl(phydev, ifr, cmd);
11424 switch (cmd) {
11425 case SIOCGMIIPHY:
11426 data->phy_id = tp->phy_addr;
11428 /* fallthru */
11429 case SIOCGMIIREG: {
11430 u32 mii_regval;
11432 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11433 break; /* We have no PHY */
11435 if (!netif_running(dev))
11436 return -EAGAIN;
11438 spin_lock_bh(&tp->lock);
11439 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
11440 spin_unlock_bh(&tp->lock);
11442 data->val_out = mii_regval;
11444 return err;
11447 case SIOCSMIIREG:
11448 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11449 break; /* We have no PHY */
11451 if (!netif_running(dev))
11452 return -EAGAIN;
11454 spin_lock_bh(&tp->lock);
11455 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
11456 spin_unlock_bh(&tp->lock);
11458 return err;
11460 default:
11461 /* do nothing */
11462 break;
11464 return -EOPNOTSUPP;
11467 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11469 struct tg3 *tp = netdev_priv(dev);
11471 memcpy(ec, &tp->coal, sizeof(*ec));
11472 return 0;
11475 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11477 struct tg3 *tp = netdev_priv(dev);
11478 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11479 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11481 if (!tg3_flag(tp, 5705_PLUS)) {
11482 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11483 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11484 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11485 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11488 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11489 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11490 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11491 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11492 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11493 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11494 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11495 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11496 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11497 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11498 return -EINVAL;
11500 /* No rx interrupts will be generated if both are zero */
11501 if ((ec->rx_coalesce_usecs == 0) &&
11502 (ec->rx_max_coalesced_frames == 0))
11503 return -EINVAL;
11505 /* No tx interrupts will be generated if both are zero */
11506 if ((ec->tx_coalesce_usecs == 0) &&
11507 (ec->tx_max_coalesced_frames == 0))
11508 return -EINVAL;
11510 /* Only copy relevant parameters, ignore all others. */
11511 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11512 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11513 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11514 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11515 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11516 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
11517 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
11518 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
11519 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
11521 if (netif_running(dev)) {
11522 tg3_full_lock(tp, 0);
11523 __tg3_set_coalesce(tp, &tp->coal);
11524 tg3_full_unlock(tp);
11526 return 0;
11529 static const struct ethtool_ops tg3_ethtool_ops = {
11530 .get_settings = tg3_get_settings,
11531 .set_settings = tg3_set_settings,
11532 .get_drvinfo = tg3_get_drvinfo,
11533 .get_regs_len = tg3_get_regs_len,
11534 .get_regs = tg3_get_regs,
11535 .get_wol = tg3_get_wol,
11536 .set_wol = tg3_set_wol,
11537 .get_msglevel = tg3_get_msglevel,
11538 .set_msglevel = tg3_set_msglevel,
11539 .nway_reset = tg3_nway_reset,
11540 .get_link = ethtool_op_get_link,
11541 .get_eeprom_len = tg3_get_eeprom_len,
11542 .get_eeprom = tg3_get_eeprom,
11543 .set_eeprom = tg3_set_eeprom,
11544 .get_ringparam = tg3_get_ringparam,
11545 .set_ringparam = tg3_set_ringparam,
11546 .get_pauseparam = tg3_get_pauseparam,
11547 .set_pauseparam = tg3_set_pauseparam,
11548 .self_test = tg3_self_test,
11549 .get_strings = tg3_get_strings,
11550 .set_phys_id = tg3_set_phys_id,
11551 .get_ethtool_stats = tg3_get_ethtool_stats,
11552 .get_coalesce = tg3_get_coalesce,
11553 .set_coalesce = tg3_set_coalesce,
11554 .get_sset_count = tg3_get_sset_count,
11557 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
11559 u32 cursize, val, magic;
11561 tp->nvram_size = EEPROM_CHIP_SIZE;
11563 if (tg3_nvram_read(tp, 0, &magic) != 0)
11564 return;
11566 if ((magic != TG3_EEPROM_MAGIC) &&
11567 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
11568 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
11569 return;
11572 * Size the chip by reading offsets at increasing powers of two.
11573 * When we encounter our validation signature, we know the addressing
11574 * has wrapped around, and thus have our chip size.
11576 cursize = 0x10;
11578 while (cursize < tp->nvram_size) {
11579 if (tg3_nvram_read(tp, cursize, &val) != 0)
11580 return;
11582 if (val == magic)
11583 break;
11585 cursize <<= 1;
11588 tp->nvram_size = cursize;
11591 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
11593 u32 val;
11595 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
11596 return;
11598 /* Selfboot format */
11599 if (val != TG3_EEPROM_MAGIC) {
11600 tg3_get_eeprom_size(tp);
11601 return;
11604 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
11605 if (val != 0) {
11606 /* This is confusing. We want to operate on the
11607 * 16-bit value at offset 0xf2. The tg3_nvram_read()
11608 * call will read from NVRAM and byteswap the data
11609 * according to the byteswapping settings for all
11610 * other register accesses. This ensures the data we
11611 * want will always reside in the lower 16-bits.
11612 * However, the data in NVRAM is in LE format, which
11613 * means the data from the NVRAM read will always be
11614 * opposite the endianness of the CPU. The 16-bit
11615 * byteswap then brings the data to CPU endianness.
11617 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
11618 return;
11621 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11624 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
11626 u32 nvcfg1;
11628 nvcfg1 = tr32(NVRAM_CFG1);
11629 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
11630 tg3_flag_set(tp, FLASH);
11631 } else {
11632 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11633 tw32(NVRAM_CFG1, nvcfg1);
11636 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
11637 tg3_flag(tp, 5780_CLASS)) {
11638 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
11639 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
11640 tp->nvram_jedecnum = JEDEC_ATMEL;
11641 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11642 tg3_flag_set(tp, NVRAM_BUFFERED);
11643 break;
11644 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
11645 tp->nvram_jedecnum = JEDEC_ATMEL;
11646 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
11647 break;
11648 case FLASH_VENDOR_ATMEL_EEPROM:
11649 tp->nvram_jedecnum = JEDEC_ATMEL;
11650 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11651 tg3_flag_set(tp, NVRAM_BUFFERED);
11652 break;
11653 case FLASH_VENDOR_ST:
11654 tp->nvram_jedecnum = JEDEC_ST;
11655 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
11656 tg3_flag_set(tp, NVRAM_BUFFERED);
11657 break;
11658 case FLASH_VENDOR_SAIFUN:
11659 tp->nvram_jedecnum = JEDEC_SAIFUN;
11660 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
11661 break;
11662 case FLASH_VENDOR_SST_SMALL:
11663 case FLASH_VENDOR_SST_LARGE:
11664 tp->nvram_jedecnum = JEDEC_SST;
11665 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
11666 break;
11668 } else {
11669 tp->nvram_jedecnum = JEDEC_ATMEL;
11670 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11671 tg3_flag_set(tp, NVRAM_BUFFERED);
11675 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
11677 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
11678 case FLASH_5752PAGE_SIZE_256:
11679 tp->nvram_pagesize = 256;
11680 break;
11681 case FLASH_5752PAGE_SIZE_512:
11682 tp->nvram_pagesize = 512;
11683 break;
11684 case FLASH_5752PAGE_SIZE_1K:
11685 tp->nvram_pagesize = 1024;
11686 break;
11687 case FLASH_5752PAGE_SIZE_2K:
11688 tp->nvram_pagesize = 2048;
11689 break;
11690 case FLASH_5752PAGE_SIZE_4K:
11691 tp->nvram_pagesize = 4096;
11692 break;
11693 case FLASH_5752PAGE_SIZE_264:
11694 tp->nvram_pagesize = 264;
11695 break;
11696 case FLASH_5752PAGE_SIZE_528:
11697 tp->nvram_pagesize = 528;
11698 break;
11702 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
11704 u32 nvcfg1;
11706 nvcfg1 = tr32(NVRAM_CFG1);
11708 /* NVRAM protection for TPM */
11709 if (nvcfg1 & (1 << 27))
11710 tg3_flag_set(tp, PROTECTED_NVRAM);
11712 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11713 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
11714 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
11715 tp->nvram_jedecnum = JEDEC_ATMEL;
11716 tg3_flag_set(tp, NVRAM_BUFFERED);
11717 break;
11718 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11719 tp->nvram_jedecnum = JEDEC_ATMEL;
11720 tg3_flag_set(tp, NVRAM_BUFFERED);
11721 tg3_flag_set(tp, FLASH);
11722 break;
11723 case FLASH_5752VENDOR_ST_M45PE10:
11724 case FLASH_5752VENDOR_ST_M45PE20:
11725 case FLASH_5752VENDOR_ST_M45PE40:
11726 tp->nvram_jedecnum = JEDEC_ST;
11727 tg3_flag_set(tp, NVRAM_BUFFERED);
11728 tg3_flag_set(tp, FLASH);
11729 break;
11732 if (tg3_flag(tp, FLASH)) {
11733 tg3_nvram_get_pagesize(tp, nvcfg1);
11734 } else {
11735 /* For eeprom, set pagesize to maximum eeprom size */
11736 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11738 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11739 tw32(NVRAM_CFG1, nvcfg1);
11743 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
11745 u32 nvcfg1, protect = 0;
11747 nvcfg1 = tr32(NVRAM_CFG1);
11749 /* NVRAM protection for TPM */
11750 if (nvcfg1 & (1 << 27)) {
11751 tg3_flag_set(tp, PROTECTED_NVRAM);
11752 protect = 1;
11755 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11756 switch (nvcfg1) {
11757 case FLASH_5755VENDOR_ATMEL_FLASH_1:
11758 case FLASH_5755VENDOR_ATMEL_FLASH_2:
11759 case FLASH_5755VENDOR_ATMEL_FLASH_3:
11760 case FLASH_5755VENDOR_ATMEL_FLASH_5:
11761 tp->nvram_jedecnum = JEDEC_ATMEL;
11762 tg3_flag_set(tp, NVRAM_BUFFERED);
11763 tg3_flag_set(tp, FLASH);
11764 tp->nvram_pagesize = 264;
11765 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
11766 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
11767 tp->nvram_size = (protect ? 0x3e200 :
11768 TG3_NVRAM_SIZE_512KB);
11769 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
11770 tp->nvram_size = (protect ? 0x1f200 :
11771 TG3_NVRAM_SIZE_256KB);
11772 else
11773 tp->nvram_size = (protect ? 0x1f200 :
11774 TG3_NVRAM_SIZE_128KB);
11775 break;
11776 case FLASH_5752VENDOR_ST_M45PE10:
11777 case FLASH_5752VENDOR_ST_M45PE20:
11778 case FLASH_5752VENDOR_ST_M45PE40:
11779 tp->nvram_jedecnum = JEDEC_ST;
11780 tg3_flag_set(tp, NVRAM_BUFFERED);
11781 tg3_flag_set(tp, FLASH);
11782 tp->nvram_pagesize = 256;
11783 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
11784 tp->nvram_size = (protect ?
11785 TG3_NVRAM_SIZE_64KB :
11786 TG3_NVRAM_SIZE_128KB);
11787 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
11788 tp->nvram_size = (protect ?
11789 TG3_NVRAM_SIZE_64KB :
11790 TG3_NVRAM_SIZE_256KB);
11791 else
11792 tp->nvram_size = (protect ?
11793 TG3_NVRAM_SIZE_128KB :
11794 TG3_NVRAM_SIZE_512KB);
11795 break;
11799 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
11801 u32 nvcfg1;
11803 nvcfg1 = tr32(NVRAM_CFG1);
11805 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11806 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
11807 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11808 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
11809 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11810 tp->nvram_jedecnum = JEDEC_ATMEL;
11811 tg3_flag_set(tp, NVRAM_BUFFERED);
11812 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11814 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11815 tw32(NVRAM_CFG1, nvcfg1);
11816 break;
11817 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11818 case FLASH_5755VENDOR_ATMEL_FLASH_1:
11819 case FLASH_5755VENDOR_ATMEL_FLASH_2:
11820 case FLASH_5755VENDOR_ATMEL_FLASH_3:
11821 tp->nvram_jedecnum = JEDEC_ATMEL;
11822 tg3_flag_set(tp, NVRAM_BUFFERED);
11823 tg3_flag_set(tp, FLASH);
11824 tp->nvram_pagesize = 264;
11825 break;
11826 case FLASH_5752VENDOR_ST_M45PE10:
11827 case FLASH_5752VENDOR_ST_M45PE20:
11828 case FLASH_5752VENDOR_ST_M45PE40:
11829 tp->nvram_jedecnum = JEDEC_ST;
11830 tg3_flag_set(tp, NVRAM_BUFFERED);
11831 tg3_flag_set(tp, FLASH);
11832 tp->nvram_pagesize = 256;
11833 break;
11837 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
11839 u32 nvcfg1, protect = 0;
11841 nvcfg1 = tr32(NVRAM_CFG1);
11843 /* NVRAM protection for TPM */
11844 if (nvcfg1 & (1 << 27)) {
11845 tg3_flag_set(tp, PROTECTED_NVRAM);
11846 protect = 1;
11849 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11850 switch (nvcfg1) {
11851 case FLASH_5761VENDOR_ATMEL_ADB021D:
11852 case FLASH_5761VENDOR_ATMEL_ADB041D:
11853 case FLASH_5761VENDOR_ATMEL_ADB081D:
11854 case FLASH_5761VENDOR_ATMEL_ADB161D:
11855 case FLASH_5761VENDOR_ATMEL_MDB021D:
11856 case FLASH_5761VENDOR_ATMEL_MDB041D:
11857 case FLASH_5761VENDOR_ATMEL_MDB081D:
11858 case FLASH_5761VENDOR_ATMEL_MDB161D:
11859 tp->nvram_jedecnum = JEDEC_ATMEL;
11860 tg3_flag_set(tp, NVRAM_BUFFERED);
11861 tg3_flag_set(tp, FLASH);
11862 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
11863 tp->nvram_pagesize = 256;
11864 break;
11865 case FLASH_5761VENDOR_ST_A_M45PE20:
11866 case FLASH_5761VENDOR_ST_A_M45PE40:
11867 case FLASH_5761VENDOR_ST_A_M45PE80:
11868 case FLASH_5761VENDOR_ST_A_M45PE16:
11869 case FLASH_5761VENDOR_ST_M_M45PE20:
11870 case FLASH_5761VENDOR_ST_M_M45PE40:
11871 case FLASH_5761VENDOR_ST_M_M45PE80:
11872 case FLASH_5761VENDOR_ST_M_M45PE16:
11873 tp->nvram_jedecnum = JEDEC_ST;
11874 tg3_flag_set(tp, NVRAM_BUFFERED);
11875 tg3_flag_set(tp, FLASH);
11876 tp->nvram_pagesize = 256;
11877 break;
11880 if (protect) {
11881 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
11882 } else {
11883 switch (nvcfg1) {
11884 case FLASH_5761VENDOR_ATMEL_ADB161D:
11885 case FLASH_5761VENDOR_ATMEL_MDB161D:
11886 case FLASH_5761VENDOR_ST_A_M45PE16:
11887 case FLASH_5761VENDOR_ST_M_M45PE16:
11888 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
11889 break;
11890 case FLASH_5761VENDOR_ATMEL_ADB081D:
11891 case FLASH_5761VENDOR_ATMEL_MDB081D:
11892 case FLASH_5761VENDOR_ST_A_M45PE80:
11893 case FLASH_5761VENDOR_ST_M_M45PE80:
11894 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
11895 break;
11896 case FLASH_5761VENDOR_ATMEL_ADB041D:
11897 case FLASH_5761VENDOR_ATMEL_MDB041D:
11898 case FLASH_5761VENDOR_ST_A_M45PE40:
11899 case FLASH_5761VENDOR_ST_M_M45PE40:
11900 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11901 break;
11902 case FLASH_5761VENDOR_ATMEL_ADB021D:
11903 case FLASH_5761VENDOR_ATMEL_MDB021D:
11904 case FLASH_5761VENDOR_ST_A_M45PE20:
11905 case FLASH_5761VENDOR_ST_M_M45PE20:
11906 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11907 break;
11912 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
11914 tp->nvram_jedecnum = JEDEC_ATMEL;
11915 tg3_flag_set(tp, NVRAM_BUFFERED);
11916 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11919 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
11921 u32 nvcfg1;
11923 nvcfg1 = tr32(NVRAM_CFG1);
11925 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11926 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11927 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11928 tp->nvram_jedecnum = JEDEC_ATMEL;
11929 tg3_flag_set(tp, NVRAM_BUFFERED);
11930 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11932 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11933 tw32(NVRAM_CFG1, nvcfg1);
11934 return;
11935 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11936 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
11937 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
11938 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
11939 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
11940 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
11941 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
11942 tp->nvram_jedecnum = JEDEC_ATMEL;
11943 tg3_flag_set(tp, NVRAM_BUFFERED);
11944 tg3_flag_set(tp, FLASH);
11946 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11947 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11948 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
11949 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
11950 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11951 break;
11952 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
11953 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
11954 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11955 break;
11956 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
11957 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
11958 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11959 break;
11961 break;
11962 case FLASH_5752VENDOR_ST_M45PE10:
11963 case FLASH_5752VENDOR_ST_M45PE20:
11964 case FLASH_5752VENDOR_ST_M45PE40:
11965 tp->nvram_jedecnum = JEDEC_ST;
11966 tg3_flag_set(tp, NVRAM_BUFFERED);
11967 tg3_flag_set(tp, FLASH);
11969 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11970 case FLASH_5752VENDOR_ST_M45PE10:
11971 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11972 break;
11973 case FLASH_5752VENDOR_ST_M45PE20:
11974 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11975 break;
11976 case FLASH_5752VENDOR_ST_M45PE40:
11977 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11978 break;
11980 break;
11981 default:
11982 tg3_flag_set(tp, NO_NVRAM);
11983 return;
11986 tg3_nvram_get_pagesize(tp, nvcfg1);
11987 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
11988 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
11992 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
11994 u32 nvcfg1;
11996 nvcfg1 = tr32(NVRAM_CFG1);
11998 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11999 case FLASH_5717VENDOR_ATMEL_EEPROM:
12000 case FLASH_5717VENDOR_MICRO_EEPROM:
12001 tp->nvram_jedecnum = JEDEC_ATMEL;
12002 tg3_flag_set(tp, NVRAM_BUFFERED);
12003 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12005 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12006 tw32(NVRAM_CFG1, nvcfg1);
12007 return;
12008 case FLASH_5717VENDOR_ATMEL_MDB011D:
12009 case FLASH_5717VENDOR_ATMEL_ADB011B:
12010 case FLASH_5717VENDOR_ATMEL_ADB011D:
12011 case FLASH_5717VENDOR_ATMEL_MDB021D:
12012 case FLASH_5717VENDOR_ATMEL_ADB021B:
12013 case FLASH_5717VENDOR_ATMEL_ADB021D:
12014 case FLASH_5717VENDOR_ATMEL_45USPT:
12015 tp->nvram_jedecnum = JEDEC_ATMEL;
12016 tg3_flag_set(tp, NVRAM_BUFFERED);
12017 tg3_flag_set(tp, FLASH);
12019 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12020 case FLASH_5717VENDOR_ATMEL_MDB021D:
12021 /* Detect size with tg3_nvram_get_size() */
12022 break;
12023 case FLASH_5717VENDOR_ATMEL_ADB021B:
12024 case FLASH_5717VENDOR_ATMEL_ADB021D:
12025 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12026 break;
12027 default:
12028 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12029 break;
12031 break;
12032 case FLASH_5717VENDOR_ST_M_M25PE10:
12033 case FLASH_5717VENDOR_ST_A_M25PE10:
12034 case FLASH_5717VENDOR_ST_M_M45PE10:
12035 case FLASH_5717VENDOR_ST_A_M45PE10:
12036 case FLASH_5717VENDOR_ST_M_M25PE20:
12037 case FLASH_5717VENDOR_ST_A_M25PE20:
12038 case FLASH_5717VENDOR_ST_M_M45PE20:
12039 case FLASH_5717VENDOR_ST_A_M45PE20:
12040 case FLASH_5717VENDOR_ST_25USPT:
12041 case FLASH_5717VENDOR_ST_45USPT:
12042 tp->nvram_jedecnum = JEDEC_ST;
12043 tg3_flag_set(tp, NVRAM_BUFFERED);
12044 tg3_flag_set(tp, FLASH);
12046 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12047 case FLASH_5717VENDOR_ST_M_M25PE20:
12048 case FLASH_5717VENDOR_ST_M_M45PE20:
12049 /* Detect size with tg3_nvram_get_size() */
12050 break;
12051 case FLASH_5717VENDOR_ST_A_M25PE20:
12052 case FLASH_5717VENDOR_ST_A_M45PE20:
12053 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12054 break;
12055 default:
12056 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12057 break;
12059 break;
12060 default:
12061 tg3_flag_set(tp, NO_NVRAM);
12062 return;
12065 tg3_nvram_get_pagesize(tp, nvcfg1);
12066 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12067 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12070 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12072 u32 nvcfg1, nvmpinstrp;
12074 nvcfg1 = tr32(NVRAM_CFG1);
12075 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12077 switch (nvmpinstrp) {
12078 case FLASH_5720_EEPROM_HD:
12079 case FLASH_5720_EEPROM_LD:
12080 tp->nvram_jedecnum = JEDEC_ATMEL;
12081 tg3_flag_set(tp, NVRAM_BUFFERED);
12083 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12084 tw32(NVRAM_CFG1, nvcfg1);
12085 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12086 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12087 else
12088 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12089 return;
12090 case FLASH_5720VENDOR_M_ATMEL_DB011D:
12091 case FLASH_5720VENDOR_A_ATMEL_DB011B:
12092 case FLASH_5720VENDOR_A_ATMEL_DB011D:
12093 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12094 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12095 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12096 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12097 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12098 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12099 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12100 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12101 case FLASH_5720VENDOR_ATMEL_45USPT:
12102 tp->nvram_jedecnum = JEDEC_ATMEL;
12103 tg3_flag_set(tp, NVRAM_BUFFERED);
12104 tg3_flag_set(tp, FLASH);
12106 switch (nvmpinstrp) {
12107 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12108 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12109 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12110 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12111 break;
12112 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12113 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12114 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12115 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12116 break;
12117 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12118 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12119 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12120 break;
12121 default:
12122 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12123 break;
12125 break;
12126 case FLASH_5720VENDOR_M_ST_M25PE10:
12127 case FLASH_5720VENDOR_M_ST_M45PE10:
12128 case FLASH_5720VENDOR_A_ST_M25PE10:
12129 case FLASH_5720VENDOR_A_ST_M45PE10:
12130 case FLASH_5720VENDOR_M_ST_M25PE20:
12131 case FLASH_5720VENDOR_M_ST_M45PE20:
12132 case FLASH_5720VENDOR_A_ST_M25PE20:
12133 case FLASH_5720VENDOR_A_ST_M45PE20:
12134 case FLASH_5720VENDOR_M_ST_M25PE40:
12135 case FLASH_5720VENDOR_M_ST_M45PE40:
12136 case FLASH_5720VENDOR_A_ST_M25PE40:
12137 case FLASH_5720VENDOR_A_ST_M45PE40:
12138 case FLASH_5720VENDOR_M_ST_M25PE80:
12139 case FLASH_5720VENDOR_M_ST_M45PE80:
12140 case FLASH_5720VENDOR_A_ST_M25PE80:
12141 case FLASH_5720VENDOR_A_ST_M45PE80:
12142 case FLASH_5720VENDOR_ST_25USPT:
12143 case FLASH_5720VENDOR_ST_45USPT:
12144 tp->nvram_jedecnum = JEDEC_ST;
12145 tg3_flag_set(tp, NVRAM_BUFFERED);
12146 tg3_flag_set(tp, FLASH);
12148 switch (nvmpinstrp) {
12149 case FLASH_5720VENDOR_M_ST_M25PE20:
12150 case FLASH_5720VENDOR_M_ST_M45PE20:
12151 case FLASH_5720VENDOR_A_ST_M25PE20:
12152 case FLASH_5720VENDOR_A_ST_M45PE20:
12153 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12154 break;
12155 case FLASH_5720VENDOR_M_ST_M25PE40:
12156 case FLASH_5720VENDOR_M_ST_M45PE40:
12157 case FLASH_5720VENDOR_A_ST_M25PE40:
12158 case FLASH_5720VENDOR_A_ST_M45PE40:
12159 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12160 break;
12161 case FLASH_5720VENDOR_M_ST_M25PE80:
12162 case FLASH_5720VENDOR_M_ST_M45PE80:
12163 case FLASH_5720VENDOR_A_ST_M25PE80:
12164 case FLASH_5720VENDOR_A_ST_M45PE80:
12165 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12166 break;
12167 default:
12168 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12169 break;
12171 break;
12172 default:
12173 tg3_flag_set(tp, NO_NVRAM);
12174 return;
12177 tg3_nvram_get_pagesize(tp, nvcfg1);
12178 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12179 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12182 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12183 static void __devinit tg3_nvram_init(struct tg3 *tp)
12185 tw32_f(GRC_EEPROM_ADDR,
12186 (EEPROM_ADDR_FSM_RESET |
12187 (EEPROM_DEFAULT_CLOCK_PERIOD <<
12188 EEPROM_ADDR_CLKPERD_SHIFT)));
12190 msleep(1);
12192 /* Enable seeprom accesses. */
12193 tw32_f(GRC_LOCAL_CTRL,
12194 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12195 udelay(100);
12197 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12198 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12199 tg3_flag_set(tp, NVRAM);
12201 if (tg3_nvram_lock(tp)) {
12202 netdev_warn(tp->dev,
12203 "Cannot get nvram lock, %s failed\n",
12204 __func__);
12205 return;
12207 tg3_enable_nvram_access(tp);
12209 tp->nvram_size = 0;
12211 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12212 tg3_get_5752_nvram_info(tp);
12213 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12214 tg3_get_5755_nvram_info(tp);
12215 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12216 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12217 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12218 tg3_get_5787_nvram_info(tp);
12219 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12220 tg3_get_5761_nvram_info(tp);
12221 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12222 tg3_get_5906_nvram_info(tp);
12223 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12224 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
12225 tg3_get_57780_nvram_info(tp);
12226 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12227 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12228 tg3_get_5717_nvram_info(tp);
12229 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12230 tg3_get_5720_nvram_info(tp);
12231 else
12232 tg3_get_nvram_info(tp);
12234 if (tp->nvram_size == 0)
12235 tg3_get_nvram_size(tp);
12237 tg3_disable_nvram_access(tp);
12238 tg3_nvram_unlock(tp);
12240 } else {
12241 tg3_flag_clear(tp, NVRAM);
12242 tg3_flag_clear(tp, NVRAM_BUFFERED);
12244 tg3_get_eeprom_size(tp);
12248 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
12249 u32 offset, u32 len, u8 *buf)
12251 int i, j, rc = 0;
12252 u32 val;
12254 for (i = 0; i < len; i += 4) {
12255 u32 addr;
12256 __be32 data;
12258 addr = offset + i;
12260 memcpy(&data, buf + i, 4);
12263 * The SEEPROM interface expects the data to always be opposite
12264 * the native endian format. We accomplish this by reversing
12265 * all the operations that would have been performed on the
12266 * data from a call to tg3_nvram_read_be32().
12268 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
12270 val = tr32(GRC_EEPROM_ADDR);
12271 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
12273 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
12274 EEPROM_ADDR_READ);
12275 tw32(GRC_EEPROM_ADDR, val |
12276 (0 << EEPROM_ADDR_DEVID_SHIFT) |
12277 (addr & EEPROM_ADDR_ADDR_MASK) |
12278 EEPROM_ADDR_START |
12279 EEPROM_ADDR_WRITE);
12281 for (j = 0; j < 1000; j++) {
12282 val = tr32(GRC_EEPROM_ADDR);
12284 if (val & EEPROM_ADDR_COMPLETE)
12285 break;
12286 msleep(1);
12288 if (!(val & EEPROM_ADDR_COMPLETE)) {
12289 rc = -EBUSY;
12290 break;
12294 return rc;
12297 /* offset and length are dword aligned */
12298 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
12299 u8 *buf)
12301 int ret = 0;
12302 u32 pagesize = tp->nvram_pagesize;
12303 u32 pagemask = pagesize - 1;
12304 u32 nvram_cmd;
12305 u8 *tmp;
12307 tmp = kmalloc(pagesize, GFP_KERNEL);
12308 if (tmp == NULL)
12309 return -ENOMEM;
12311 while (len) {
12312 int j;
12313 u32 phy_addr, page_off, size;
12315 phy_addr = offset & ~pagemask;
12317 for (j = 0; j < pagesize; j += 4) {
12318 ret = tg3_nvram_read_be32(tp, phy_addr + j,
12319 (__be32 *) (tmp + j));
12320 if (ret)
12321 break;
12323 if (ret)
12324 break;
12326 page_off = offset & pagemask;
12327 size = pagesize;
12328 if (len < size)
12329 size = len;
12331 len -= size;
12333 memcpy(tmp + page_off, buf, size);
12335 offset = offset + (pagesize - page_off);
12337 tg3_enable_nvram_access(tp);
12340 * Before we can erase the flash page, we need
12341 * to issue a special "write enable" command.
12343 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12345 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12346 break;
12348 /* Erase the target page */
12349 tw32(NVRAM_ADDR, phy_addr);
12351 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
12352 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
12354 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12355 break;
12357 /* Issue another write enable to start the write. */
12358 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12360 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12361 break;
12363 for (j = 0; j < pagesize; j += 4) {
12364 __be32 data;
12366 data = *((__be32 *) (tmp + j));
12368 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12370 tw32(NVRAM_ADDR, phy_addr + j);
12372 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
12373 NVRAM_CMD_WR;
12375 if (j == 0)
12376 nvram_cmd |= NVRAM_CMD_FIRST;
12377 else if (j == (pagesize - 4))
12378 nvram_cmd |= NVRAM_CMD_LAST;
12380 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12381 break;
12383 if (ret)
12384 break;
12387 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12388 tg3_nvram_exec_cmd(tp, nvram_cmd);
12390 kfree(tmp);
12392 return ret;
12395 /* offset and length are dword aligned */
12396 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
12397 u8 *buf)
12399 int i, ret = 0;
12401 for (i = 0; i < len; i += 4, offset += 4) {
12402 u32 page_off, phy_addr, nvram_cmd;
12403 __be32 data;
12405 memcpy(&data, buf + i, 4);
12406 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12408 page_off = offset % tp->nvram_pagesize;
12410 phy_addr = tg3_nvram_phys_addr(tp, offset);
12412 tw32(NVRAM_ADDR, phy_addr);
12414 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
12416 if (page_off == 0 || i == 0)
12417 nvram_cmd |= NVRAM_CMD_FIRST;
12418 if (page_off == (tp->nvram_pagesize - 4))
12419 nvram_cmd |= NVRAM_CMD_LAST;
12421 if (i == (len - 4))
12422 nvram_cmd |= NVRAM_CMD_LAST;
12424 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
12425 !tg3_flag(tp, 5755_PLUS) &&
12426 (tp->nvram_jedecnum == JEDEC_ST) &&
12427 (nvram_cmd & NVRAM_CMD_FIRST)) {
12429 if ((ret = tg3_nvram_exec_cmd(tp,
12430 NVRAM_CMD_WREN | NVRAM_CMD_GO |
12431 NVRAM_CMD_DONE)))
12433 break;
12435 if (!tg3_flag(tp, FLASH)) {
12436 /* We always do complete word writes to eeprom. */
12437 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
12440 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12441 break;
12443 return ret;
12446 /* offset and length are dword aligned */
12447 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12449 int ret;
12451 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12452 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
12453 ~GRC_LCLCTRL_GPIO_OUTPUT1);
12454 udelay(40);
12457 if (!tg3_flag(tp, NVRAM)) {
12458 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
12459 } else {
12460 u32 grc_mode;
12462 ret = tg3_nvram_lock(tp);
12463 if (ret)
12464 return ret;
12466 tg3_enable_nvram_access(tp);
12467 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
12468 tw32(NVRAM_WRITE1, 0x406);
12470 grc_mode = tr32(GRC_MODE);
12471 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
12473 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
12474 ret = tg3_nvram_write_block_buffered(tp, offset, len,
12475 buf);
12476 } else {
12477 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12478 buf);
12481 grc_mode = tr32(GRC_MODE);
12482 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
12484 tg3_disable_nvram_access(tp);
12485 tg3_nvram_unlock(tp);
12488 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12489 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
12490 udelay(40);
12493 return ret;
12496 struct subsys_tbl_ent {
12497 u16 subsys_vendor, subsys_devid;
12498 u32 phy_id;
12501 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
12502 /* Broadcom boards. */
12503 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12504 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
12505 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12506 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
12507 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12508 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
12509 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12510 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
12511 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12512 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
12513 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12514 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
12515 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12516 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
12517 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12518 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
12519 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12520 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
12521 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12522 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
12523 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12524 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
12526 /* 3com boards. */
12527 { TG3PCI_SUBVENDOR_ID_3COM,
12528 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
12529 { TG3PCI_SUBVENDOR_ID_3COM,
12530 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
12531 { TG3PCI_SUBVENDOR_ID_3COM,
12532 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
12533 { TG3PCI_SUBVENDOR_ID_3COM,
12534 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
12535 { TG3PCI_SUBVENDOR_ID_3COM,
12536 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
12538 /* DELL boards. */
12539 { TG3PCI_SUBVENDOR_ID_DELL,
12540 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
12541 { TG3PCI_SUBVENDOR_ID_DELL,
12542 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
12543 { TG3PCI_SUBVENDOR_ID_DELL,
12544 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
12545 { TG3PCI_SUBVENDOR_ID_DELL,
12546 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
12548 /* Compaq boards. */
12549 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12550 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
12551 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12552 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
12553 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12554 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
12555 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12556 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
12557 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12558 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
12560 /* IBM boards. */
12561 { TG3PCI_SUBVENDOR_ID_IBM,
12562 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
12565 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
12567 int i;
12569 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
12570 if ((subsys_id_to_phy_id[i].subsys_vendor ==
12571 tp->pdev->subsystem_vendor) &&
12572 (subsys_id_to_phy_id[i].subsys_devid ==
12573 tp->pdev->subsystem_device))
12574 return &subsys_id_to_phy_id[i];
12576 return NULL;
12579 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12581 u32 val;
12582 u16 pmcsr;
12584 /* On some early chips the SRAM cannot be accessed in D3hot state,
12585 * so need make sure we're in D0.
12587 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
12588 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
12589 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
12590 msleep(1);
12592 /* Make sure register accesses (indirect or otherwise)
12593 * will function correctly.
12595 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12596 tp->misc_host_ctrl);
12598 /* The memory arbiter has to be enabled in order for SRAM accesses
12599 * to succeed. Normally on powerup the tg3 chip firmware will make
12600 * sure it is enabled, but other entities such as system netboot
12601 * code might disable it.
12603 val = tr32(MEMARB_MODE);
12604 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
12606 tp->phy_id = TG3_PHY_ID_INVALID;
12607 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12609 /* Assume an onboard device and WOL capable by default. */
12610 tg3_flag_set(tp, EEPROM_WRITE_PROT);
12611 tg3_flag_set(tp, WOL_CAP);
12613 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12614 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
12615 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12616 tg3_flag_set(tp, IS_NIC);
12618 val = tr32(VCPU_CFGSHDW);
12619 if (val & VCPU_CFGSHDW_ASPM_DBNC)
12620 tg3_flag_set(tp, ASPM_WORKAROUND);
12621 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
12622 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
12623 tg3_flag_set(tp, WOL_ENABLE);
12624 device_set_wakeup_enable(&tp->pdev->dev, true);
12626 goto done;
12629 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
12630 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
12631 u32 nic_cfg, led_cfg;
12632 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
12633 int eeprom_phy_serdes = 0;
12635 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
12636 tp->nic_sram_data_cfg = nic_cfg;
12638 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
12639 ver >>= NIC_SRAM_DATA_VER_SHIFT;
12640 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
12641 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
12642 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
12643 (ver > 0) && (ver < 0x100))
12644 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
12646 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12647 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
12649 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
12650 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
12651 eeprom_phy_serdes = 1;
12653 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
12654 if (nic_phy_id != 0) {
12655 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
12656 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
12658 eeprom_phy_id = (id1 >> 16) << 10;
12659 eeprom_phy_id |= (id2 & 0xfc00) << 16;
12660 eeprom_phy_id |= (id2 & 0x03ff) << 0;
12661 } else
12662 eeprom_phy_id = 0;
12664 tp->phy_id = eeprom_phy_id;
12665 if (eeprom_phy_serdes) {
12666 if (!tg3_flag(tp, 5705_PLUS))
12667 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12668 else
12669 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
12672 if (tg3_flag(tp, 5750_PLUS))
12673 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
12674 SHASTA_EXT_LED_MODE_MASK);
12675 else
12676 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
12678 switch (led_cfg) {
12679 default:
12680 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
12681 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12682 break;
12684 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
12685 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12686 break;
12688 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
12689 tp->led_ctrl = LED_CTRL_MODE_MAC;
12691 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
12692 * read on some older 5700/5701 bootcode.
12694 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12695 ASIC_REV_5700 ||
12696 GET_ASIC_REV(tp->pci_chip_rev_id) ==
12697 ASIC_REV_5701)
12698 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12700 break;
12702 case SHASTA_EXT_LED_SHARED:
12703 tp->led_ctrl = LED_CTRL_MODE_SHARED;
12704 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
12705 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
12706 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12707 LED_CTRL_MODE_PHY_2);
12708 break;
12710 case SHASTA_EXT_LED_MAC:
12711 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
12712 break;
12714 case SHASTA_EXT_LED_COMBO:
12715 tp->led_ctrl = LED_CTRL_MODE_COMBO;
12716 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
12717 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12718 LED_CTRL_MODE_PHY_2);
12719 break;
12723 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12724 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
12725 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
12726 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12728 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
12729 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12731 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
12732 tg3_flag_set(tp, EEPROM_WRITE_PROT);
12733 if ((tp->pdev->subsystem_vendor ==
12734 PCI_VENDOR_ID_ARIMA) &&
12735 (tp->pdev->subsystem_device == 0x205a ||
12736 tp->pdev->subsystem_device == 0x2063))
12737 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12738 } else {
12739 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12740 tg3_flag_set(tp, IS_NIC);
12743 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
12744 tg3_flag_set(tp, ENABLE_ASF);
12745 if (tg3_flag(tp, 5750_PLUS))
12746 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
12749 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
12750 tg3_flag(tp, 5750_PLUS))
12751 tg3_flag_set(tp, ENABLE_APE);
12753 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
12754 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
12755 tg3_flag_clear(tp, WOL_CAP);
12757 if (tg3_flag(tp, WOL_CAP) &&
12758 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
12759 tg3_flag_set(tp, WOL_ENABLE);
12760 device_set_wakeup_enable(&tp->pdev->dev, true);
12763 if (cfg2 & (1 << 17))
12764 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
12766 /* serdes signal pre-emphasis in register 0x590 set by */
12767 /* bootcode if bit 18 is set */
12768 if (cfg2 & (1 << 18))
12769 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
12771 if ((tg3_flag(tp, 57765_PLUS) ||
12772 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12773 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
12774 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
12775 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
12777 if (tg3_flag(tp, PCI_EXPRESS) &&
12778 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
12779 !tg3_flag(tp, 57765_PLUS)) {
12780 u32 cfg3;
12782 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
12783 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
12784 tg3_flag_set(tp, ASPM_WORKAROUND);
12787 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
12788 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
12789 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
12790 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
12791 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
12792 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
12794 done:
12795 if (tg3_flag(tp, WOL_CAP))
12796 device_set_wakeup_enable(&tp->pdev->dev,
12797 tg3_flag(tp, WOL_ENABLE));
12798 else
12799 device_set_wakeup_capable(&tp->pdev->dev, false);
12802 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
12804 int i;
12805 u32 val;
12807 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
12808 tw32(OTP_CTRL, cmd);
12810 /* Wait for up to 1 ms for command to execute. */
12811 for (i = 0; i < 100; i++) {
12812 val = tr32(OTP_STATUS);
12813 if (val & OTP_STATUS_CMD_DONE)
12814 break;
12815 udelay(10);
12818 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
12821 /* Read the gphy configuration from the OTP region of the chip. The gphy
12822 * configuration is a 32-bit value that straddles the alignment boundary.
12823 * We do two 32-bit reads and then shift and merge the results.
12825 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
12827 u32 bhalf_otp, thalf_otp;
12829 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
12831 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
12832 return 0;
12834 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
12836 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12837 return 0;
12839 thalf_otp = tr32(OTP_READ_DATA);
12841 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
12843 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12844 return 0;
12846 bhalf_otp = tr32(OTP_READ_DATA);
12848 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
12851 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
12853 u32 adv = ADVERTISED_Autoneg |
12854 ADVERTISED_Pause;
12856 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12857 adv |= ADVERTISED_1000baseT_Half |
12858 ADVERTISED_1000baseT_Full;
12860 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12861 adv |= ADVERTISED_100baseT_Half |
12862 ADVERTISED_100baseT_Full |
12863 ADVERTISED_10baseT_Half |
12864 ADVERTISED_10baseT_Full |
12865 ADVERTISED_TP;
12866 else
12867 adv |= ADVERTISED_FIBRE;
12869 tp->link_config.advertising = adv;
12870 tp->link_config.speed = SPEED_INVALID;
12871 tp->link_config.duplex = DUPLEX_INVALID;
12872 tp->link_config.autoneg = AUTONEG_ENABLE;
12873 tp->link_config.active_speed = SPEED_INVALID;
12874 tp->link_config.active_duplex = DUPLEX_INVALID;
12875 tp->link_config.orig_speed = SPEED_INVALID;
12876 tp->link_config.orig_duplex = DUPLEX_INVALID;
12877 tp->link_config.orig_autoneg = AUTONEG_INVALID;
12880 static int __devinit tg3_phy_probe(struct tg3 *tp)
12882 u32 hw_phy_id_1, hw_phy_id_2;
12883 u32 hw_phy_id, hw_phy_id_masked;
12884 int err;
12886 /* flow control autonegotiation is default behavior */
12887 tg3_flag_set(tp, PAUSE_AUTONEG);
12888 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
12890 if (tg3_flag(tp, USE_PHYLIB))
12891 return tg3_phy_init(tp);
12893 /* Reading the PHY ID register can conflict with ASF
12894 * firmware access to the PHY hardware.
12896 err = 0;
12897 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
12898 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
12899 } else {
12900 /* Now read the physical PHY_ID from the chip and verify
12901 * that it is sane. If it doesn't look good, we fall back
12902 * to either the hard-coded table based PHY_ID and failing
12903 * that the value found in the eeprom area.
12905 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
12906 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
12908 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
12909 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
12910 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
12912 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
12915 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
12916 tp->phy_id = hw_phy_id;
12917 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
12918 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12919 else
12920 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
12921 } else {
12922 if (tp->phy_id != TG3_PHY_ID_INVALID) {
12923 /* Do nothing, phy ID already set up in
12924 * tg3_get_eeprom_hw_cfg().
12926 } else {
12927 struct subsys_tbl_ent *p;
12929 /* No eeprom signature? Try the hardcoded
12930 * subsys device table.
12932 p = tg3_lookup_by_subsys(tp);
12933 if (!p)
12934 return -ENODEV;
12936 tp->phy_id = p->phy_id;
12937 if (!tp->phy_id ||
12938 tp->phy_id == TG3_PHY_ID_BCM8002)
12939 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12943 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
12944 ((tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
12945 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
12946 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12947 tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
12948 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
12950 tg3_phy_init_link_config(tp);
12952 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
12953 !tg3_flag(tp, ENABLE_APE) &&
12954 !tg3_flag(tp, ENABLE_ASF)) {
12955 u32 bmsr, mask;
12957 tg3_readphy(tp, MII_BMSR, &bmsr);
12958 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
12959 (bmsr & BMSR_LSTATUS))
12960 goto skip_phy_reset;
12962 err = tg3_phy_reset(tp);
12963 if (err)
12964 return err;
12966 tg3_phy_set_wirespeed(tp);
12968 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
12969 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
12970 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
12971 if (!tg3_copper_is_advertising_all(tp, mask)) {
12972 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
12973 tp->link_config.flowctrl);
12975 tg3_writephy(tp, MII_BMCR,
12976 BMCR_ANENABLE | BMCR_ANRESTART);
12980 skip_phy_reset:
12981 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
12982 err = tg3_init_5401phy_dsp(tp);
12983 if (err)
12984 return err;
12986 err = tg3_init_5401phy_dsp(tp);
12989 return err;
12992 static void __devinit tg3_read_vpd(struct tg3 *tp)
12994 u8 *vpd_data;
12995 unsigned int block_end, rosize, len;
12996 int j, i = 0;
12998 vpd_data = (u8 *)tg3_vpd_readblock(tp);
12999 if (!vpd_data)
13000 goto out_no_vpd;
13002 i = pci_vpd_find_tag(vpd_data, 0, TG3_NVM_VPD_LEN,
13003 PCI_VPD_LRDT_RO_DATA);
13004 if (i < 0)
13005 goto out_not_found;
13007 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13008 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13009 i += PCI_VPD_LRDT_TAG_SIZE;
13011 if (block_end > TG3_NVM_VPD_LEN)
13012 goto out_not_found;
13014 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13015 PCI_VPD_RO_KEYWORD_MFR_ID);
13016 if (j > 0) {
13017 len = pci_vpd_info_field_size(&vpd_data[j]);
13019 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13020 if (j + len > block_end || len != 4 ||
13021 memcmp(&vpd_data[j], "1028", 4))
13022 goto partno;
13024 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13025 PCI_VPD_RO_KEYWORD_VENDOR0);
13026 if (j < 0)
13027 goto partno;
13029 len = pci_vpd_info_field_size(&vpd_data[j]);
13031 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13032 if (j + len > block_end)
13033 goto partno;
13035 memcpy(tp->fw_ver, &vpd_data[j], len);
13036 strncat(tp->fw_ver, " bc ", TG3_NVM_VPD_LEN - len - 1);
13039 partno:
13040 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13041 PCI_VPD_RO_KEYWORD_PARTNO);
13042 if (i < 0)
13043 goto out_not_found;
13045 len = pci_vpd_info_field_size(&vpd_data[i]);
13047 i += PCI_VPD_INFO_FLD_HDR_SIZE;
13048 if (len > TG3_BPN_SIZE ||
13049 (len + i) > TG3_NVM_VPD_LEN)
13050 goto out_not_found;
13052 memcpy(tp->board_part_number, &vpd_data[i], len);
13054 out_not_found:
13055 kfree(vpd_data);
13056 if (tp->board_part_number[0])
13057 return;
13059 out_no_vpd:
13060 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13061 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13062 strcpy(tp->board_part_number, "BCM5717");
13063 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13064 strcpy(tp->board_part_number, "BCM5718");
13065 else
13066 goto nomatch;
13067 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13068 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13069 strcpy(tp->board_part_number, "BCM57780");
13070 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13071 strcpy(tp->board_part_number, "BCM57760");
13072 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13073 strcpy(tp->board_part_number, "BCM57790");
13074 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13075 strcpy(tp->board_part_number, "BCM57788");
13076 else
13077 goto nomatch;
13078 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13079 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13080 strcpy(tp->board_part_number, "BCM57761");
13081 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13082 strcpy(tp->board_part_number, "BCM57765");
13083 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13084 strcpy(tp->board_part_number, "BCM57781");
13085 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13086 strcpy(tp->board_part_number, "BCM57785");
13087 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13088 strcpy(tp->board_part_number, "BCM57791");
13089 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13090 strcpy(tp->board_part_number, "BCM57795");
13091 else
13092 goto nomatch;
13093 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13094 strcpy(tp->board_part_number, "BCM95906");
13095 } else {
13096 nomatch:
13097 strcpy(tp->board_part_number, "none");
13101 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13103 u32 val;
13105 if (tg3_nvram_read(tp, offset, &val) ||
13106 (val & 0xfc000000) != 0x0c000000 ||
13107 tg3_nvram_read(tp, offset + 4, &val) ||
13108 val != 0)
13109 return 0;
13111 return 1;
13114 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13116 u32 val, offset, start, ver_offset;
13117 int i, dst_off;
13118 bool newver = false;
13120 if (tg3_nvram_read(tp, 0xc, &offset) ||
13121 tg3_nvram_read(tp, 0x4, &start))
13122 return;
13124 offset = tg3_nvram_logical_addr(tp, offset);
13126 if (tg3_nvram_read(tp, offset, &val))
13127 return;
13129 if ((val & 0xfc000000) == 0x0c000000) {
13130 if (tg3_nvram_read(tp, offset + 4, &val))
13131 return;
13133 if (val == 0)
13134 newver = true;
13137 dst_off = strlen(tp->fw_ver);
13139 if (newver) {
13140 if (TG3_VER_SIZE - dst_off < 16 ||
13141 tg3_nvram_read(tp, offset + 8, &ver_offset))
13142 return;
13144 offset = offset + ver_offset - start;
13145 for (i = 0; i < 16; i += 4) {
13146 __be32 v;
13147 if (tg3_nvram_read_be32(tp, offset + i, &v))
13148 return;
13150 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13152 } else {
13153 u32 major, minor;
13155 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13156 return;
13158 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13159 TG3_NVM_BCVER_MAJSFT;
13160 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13161 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13162 "v%d.%02d", major, minor);
13166 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13168 u32 val, major, minor;
13170 /* Use native endian representation */
13171 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13172 return;
13174 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13175 TG3_NVM_HWSB_CFG1_MAJSFT;
13176 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13177 TG3_NVM_HWSB_CFG1_MINSFT;
13179 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13182 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13184 u32 offset, major, minor, build;
13186 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13188 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13189 return;
13191 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13192 case TG3_EEPROM_SB_REVISION_0:
13193 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13194 break;
13195 case TG3_EEPROM_SB_REVISION_2:
13196 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13197 break;
13198 case TG3_EEPROM_SB_REVISION_3:
13199 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13200 break;
13201 case TG3_EEPROM_SB_REVISION_4:
13202 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13203 break;
13204 case TG3_EEPROM_SB_REVISION_5:
13205 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13206 break;
13207 case TG3_EEPROM_SB_REVISION_6:
13208 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13209 break;
13210 default:
13211 return;
13214 if (tg3_nvram_read(tp, offset, &val))
13215 return;
13217 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13218 TG3_EEPROM_SB_EDH_BLD_SHFT;
13219 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13220 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13221 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
13223 if (minor > 99 || build > 26)
13224 return;
13226 offset = strlen(tp->fw_ver);
13227 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13228 " v%d.%02d", major, minor);
13230 if (build > 0) {
13231 offset = strlen(tp->fw_ver);
13232 if (offset < TG3_VER_SIZE - 1)
13233 tp->fw_ver[offset] = 'a' + build - 1;
13237 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13239 u32 val, offset, start;
13240 int i, vlen;
13242 for (offset = TG3_NVM_DIR_START;
13243 offset < TG3_NVM_DIR_END;
13244 offset += TG3_NVM_DIRENT_SIZE) {
13245 if (tg3_nvram_read(tp, offset, &val))
13246 return;
13248 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13249 break;
13252 if (offset == TG3_NVM_DIR_END)
13253 return;
13255 if (!tg3_flag(tp, 5705_PLUS))
13256 start = 0x08000000;
13257 else if (tg3_nvram_read(tp, offset - 4, &start))
13258 return;
13260 if (tg3_nvram_read(tp, offset + 4, &offset) ||
13261 !tg3_fw_img_is_valid(tp, offset) ||
13262 tg3_nvram_read(tp, offset + 8, &val))
13263 return;
13265 offset += val - start;
13267 vlen = strlen(tp->fw_ver);
13269 tp->fw_ver[vlen++] = ',';
13270 tp->fw_ver[vlen++] = ' ';
13272 for (i = 0; i < 4; i++) {
13273 __be32 v;
13274 if (tg3_nvram_read_be32(tp, offset, &v))
13275 return;
13277 offset += sizeof(v);
13279 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13280 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13281 break;
13284 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13285 vlen += sizeof(v);
13289 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13291 int vlen;
13292 u32 apedata;
13293 char *fwtype;
13295 if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13296 return;
13298 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13299 if (apedata != APE_SEG_SIG_MAGIC)
13300 return;
13302 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13303 if (!(apedata & APE_FW_STATUS_READY))
13304 return;
13306 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13308 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13309 tg3_flag_set(tp, APE_HAS_NCSI);
13310 fwtype = "NCSI";
13311 } else {
13312 fwtype = "DASH";
13315 vlen = strlen(tp->fw_ver);
13317 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13318 fwtype,
13319 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13320 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13321 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13322 (apedata & APE_FW_VERSION_BLDMSK));
13325 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13327 u32 val;
13328 bool vpd_vers = false;
13330 if (tp->fw_ver[0] != 0)
13331 vpd_vers = true;
13333 if (tg3_flag(tp, NO_NVRAM)) {
13334 strcat(tp->fw_ver, "sb");
13335 return;
13338 if (tg3_nvram_read(tp, 0, &val))
13339 return;
13341 if (val == TG3_EEPROM_MAGIC)
13342 tg3_read_bc_ver(tp);
13343 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13344 tg3_read_sb_ver(tp, val);
13345 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13346 tg3_read_hwsb_ver(tp);
13347 else
13348 return;
13350 if (!tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || vpd_vers)
13351 goto done;
13353 tg3_read_mgmtfw_ver(tp);
13355 done:
13356 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13359 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13361 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13363 if (tg3_flag(tp, LRG_PROD_RING_CAP))
13364 return TG3_RX_RET_MAX_SIZE_5717;
13365 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13366 return TG3_RX_RET_MAX_SIZE_5700;
13367 else
13368 return TG3_RX_RET_MAX_SIZE_5705;
13371 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13372 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13373 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13374 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13375 { },
13378 static int __devinit tg3_get_invariants(struct tg3 *tp)
13380 u32 misc_ctrl_reg;
13381 u32 pci_state_reg, grc_misc_cfg;
13382 u32 val;
13383 u16 pci_cmd;
13384 int err;
13386 /* Force memory write invalidate off. If we leave it on,
13387 * then on 5700_BX chips we have to enable a workaround.
13388 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13389 * to match the cacheline size. The Broadcom driver have this
13390 * workaround but turns MWI off all the times so never uses
13391 * it. This seems to suggest that the workaround is insufficient.
13393 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13394 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13395 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13397 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
13398 * has the register indirect write enable bit set before
13399 * we try to access any of the MMIO registers. It is also
13400 * critical that the PCI-X hw workaround situation is decided
13401 * before that as well.
13403 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13404 &misc_ctrl_reg);
13406 tp->pci_chip_rev_id = (misc_ctrl_reg >>
13407 MISC_HOST_CTRL_CHIPREV_SHIFT);
13408 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13409 u32 prod_id_asic_rev;
13411 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13412 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13413 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13414 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13415 pci_read_config_dword(tp->pdev,
13416 TG3PCI_GEN2_PRODID_ASICREV,
13417 &prod_id_asic_rev);
13418 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13419 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13420 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13421 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13422 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13423 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13424 pci_read_config_dword(tp->pdev,
13425 TG3PCI_GEN15_PRODID_ASICREV,
13426 &prod_id_asic_rev);
13427 else
13428 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13429 &prod_id_asic_rev);
13431 tp->pci_chip_rev_id = prod_id_asic_rev;
13434 /* Wrong chip ID in 5752 A0. This code can be removed later
13435 * as A0 is not in production.
13437 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13438 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13440 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13441 * we need to disable memory and use config. cycles
13442 * only to access all registers. The 5702/03 chips
13443 * can mistakenly decode the special cycles from the
13444 * ICH chipsets as memory write cycles, causing corruption
13445 * of register and memory space. Only certain ICH bridges
13446 * will drive special cycles with non-zero data during the
13447 * address phase which can fall within the 5703's address
13448 * range. This is not an ICH bug as the PCI spec allows
13449 * non-zero address during special cycles. However, only
13450 * these ICH bridges are known to drive non-zero addresses
13451 * during special cycles.
13453 * Since special cycles do not cross PCI bridges, we only
13454 * enable this workaround if the 5703 is on the secondary
13455 * bus of these ICH bridges.
13457 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
13458 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
13459 static struct tg3_dev_id {
13460 u32 vendor;
13461 u32 device;
13462 u32 rev;
13463 } ich_chipsets[] = {
13464 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
13465 PCI_ANY_ID },
13466 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
13467 PCI_ANY_ID },
13468 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
13469 0xa },
13470 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
13471 PCI_ANY_ID },
13472 { },
13474 struct tg3_dev_id *pci_id = &ich_chipsets[0];
13475 struct pci_dev *bridge = NULL;
13477 while (pci_id->vendor != 0) {
13478 bridge = pci_get_device(pci_id->vendor, pci_id->device,
13479 bridge);
13480 if (!bridge) {
13481 pci_id++;
13482 continue;
13484 if (pci_id->rev != PCI_ANY_ID) {
13485 if (bridge->revision > pci_id->rev)
13486 continue;
13488 if (bridge->subordinate &&
13489 (bridge->subordinate->number ==
13490 tp->pdev->bus->number)) {
13491 tg3_flag_set(tp, ICH_WORKAROUND);
13492 pci_dev_put(bridge);
13493 break;
13498 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
13499 static struct tg3_dev_id {
13500 u32 vendor;
13501 u32 device;
13502 } bridge_chipsets[] = {
13503 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
13504 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
13505 { },
13507 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
13508 struct pci_dev *bridge = NULL;
13510 while (pci_id->vendor != 0) {
13511 bridge = pci_get_device(pci_id->vendor,
13512 pci_id->device,
13513 bridge);
13514 if (!bridge) {
13515 pci_id++;
13516 continue;
13518 if (bridge->subordinate &&
13519 (bridge->subordinate->number <=
13520 tp->pdev->bus->number) &&
13521 (bridge->subordinate->subordinate >=
13522 tp->pdev->bus->number)) {
13523 tg3_flag_set(tp, 5701_DMA_BUG);
13524 pci_dev_put(bridge);
13525 break;
13530 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
13531 * DMA addresses > 40-bit. This bridge may have other additional
13532 * 57xx devices behind it in some 4-port NIC designs for example.
13533 * Any tg3 device found behind the bridge will also need the 40-bit
13534 * DMA workaround.
13536 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
13537 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13538 tg3_flag_set(tp, 5780_CLASS);
13539 tg3_flag_set(tp, 40BIT_DMA_BUG);
13540 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
13541 } else {
13542 struct pci_dev *bridge = NULL;
13544 do {
13545 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
13546 PCI_DEVICE_ID_SERVERWORKS_EPB,
13547 bridge);
13548 if (bridge && bridge->subordinate &&
13549 (bridge->subordinate->number <=
13550 tp->pdev->bus->number) &&
13551 (bridge->subordinate->subordinate >=
13552 tp->pdev->bus->number)) {
13553 tg3_flag_set(tp, 40BIT_DMA_BUG);
13554 pci_dev_put(bridge);
13555 break;
13557 } while (bridge);
13560 /* Initialize misc host control in PCI block. */
13561 tp->misc_host_ctrl |= (misc_ctrl_reg &
13562 MISC_HOST_CTRL_CHIPREV);
13563 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13564 tp->misc_host_ctrl);
13566 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
13567 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
13568 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13569 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13570 tp->pdev_peer = tg3_find_peer(tp);
13572 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13573 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13574 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13575 tg3_flag_set(tp, 5717_PLUS);
13577 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
13578 tg3_flag(tp, 5717_PLUS))
13579 tg3_flag_set(tp, 57765_PLUS);
13581 /* Intentionally exclude ASIC_REV_5906 */
13582 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13583 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13584 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13585 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13586 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13587 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13588 tg3_flag(tp, 57765_PLUS))
13589 tg3_flag_set(tp, 5755_PLUS);
13591 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13592 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13593 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13594 tg3_flag(tp, 5755_PLUS) ||
13595 tg3_flag(tp, 5780_CLASS))
13596 tg3_flag_set(tp, 5750_PLUS);
13598 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
13599 tg3_flag(tp, 5750_PLUS))
13600 tg3_flag_set(tp, 5705_PLUS);
13602 /* 5700 B0 chips do not support checksumming correctly due
13603 * to hardware bugs.
13605 if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
13606 u32 features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
13608 if (tg3_flag(tp, 5755_PLUS))
13609 features |= NETIF_F_IPV6_CSUM;
13610 tp->dev->features |= features;
13611 tp->dev->hw_features |= features;
13612 tp->dev->vlan_features |= features;
13615 /* Determine TSO capabilities */
13616 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13617 ; /* Do nothing. HW bug. */
13618 else if (tg3_flag(tp, 57765_PLUS))
13619 tg3_flag_set(tp, HW_TSO_3);
13620 else if (tg3_flag(tp, 5755_PLUS) ||
13621 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13622 tg3_flag_set(tp, HW_TSO_2);
13623 else if (tg3_flag(tp, 5750_PLUS)) {
13624 tg3_flag_set(tp, HW_TSO_1);
13625 tg3_flag_set(tp, TSO_BUG);
13626 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
13627 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
13628 tg3_flag_clear(tp, TSO_BUG);
13629 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13630 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13631 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
13632 tg3_flag_set(tp, TSO_BUG);
13633 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13634 tp->fw_needed = FIRMWARE_TG3TSO5;
13635 else
13636 tp->fw_needed = FIRMWARE_TG3TSO;
13639 tp->irq_max = 1;
13641 if (tg3_flag(tp, 5750_PLUS)) {
13642 tg3_flag_set(tp, SUPPORT_MSI);
13643 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
13644 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
13645 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
13646 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
13647 tp->pdev_peer == tp->pdev))
13648 tg3_flag_clear(tp, SUPPORT_MSI);
13650 if (tg3_flag(tp, 5755_PLUS) ||
13651 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13652 tg3_flag_set(tp, 1SHOT_MSI);
13655 if (tg3_flag(tp, 57765_PLUS)) {
13656 tg3_flag_set(tp, SUPPORT_MSIX);
13657 tp->irq_max = TG3_IRQ_MAX_VECS;
13661 /* All chips can get confused if TX buffers
13662 * straddle the 4GB address boundary.
13664 tg3_flag_set(tp, 4G_DMA_BNDRY_BUG);
13666 if (tg3_flag(tp, 5755_PLUS))
13667 tg3_flag_set(tp, SHORT_DMA_BUG);
13668 else
13669 tg3_flag_set(tp, 40BIT_DMA_LIMIT_BUG);
13671 if (tg3_flag(tp, 5717_PLUS))
13672 tg3_flag_set(tp, LRG_PROD_RING_CAP);
13674 if (tg3_flag(tp, 57765_PLUS) &&
13675 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
13676 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
13678 if (!tg3_flag(tp, 5705_PLUS) ||
13679 tg3_flag(tp, 5780_CLASS) ||
13680 tg3_flag(tp, USE_JUMBO_BDFLAG))
13681 tg3_flag_set(tp, JUMBO_CAPABLE);
13683 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
13684 &pci_state_reg);
13686 tp->pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
13687 if (tp->pcie_cap != 0) {
13688 u16 lnkctl;
13690 tg3_flag_set(tp, PCI_EXPRESS);
13692 tp->pcie_readrq = 4096;
13693 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13694 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13695 tp->pcie_readrq = 2048;
13697 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
13699 pci_read_config_word(tp->pdev,
13700 tp->pcie_cap + PCI_EXP_LNKCTL,
13701 &lnkctl);
13702 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
13703 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13704 tg3_flag_clear(tp, HW_TSO_2);
13705 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13706 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13707 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
13708 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
13709 tg3_flag_set(tp, CLKREQ_BUG);
13710 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
13711 tg3_flag_set(tp, L1PLLPD_EN);
13713 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
13714 tg3_flag_set(tp, PCI_EXPRESS);
13715 } else if (!tg3_flag(tp, 5705_PLUS) ||
13716 tg3_flag(tp, 5780_CLASS)) {
13717 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
13718 if (!tp->pcix_cap) {
13719 dev_err(&tp->pdev->dev,
13720 "Cannot find PCI-X capability, aborting\n");
13721 return -EIO;
13724 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
13725 tg3_flag_set(tp, PCIX_MODE);
13728 /* If we have an AMD 762 or VIA K8T800 chipset, write
13729 * reordering to the mailbox registers done by the host
13730 * controller can cause major troubles. We read back from
13731 * every mailbox register write to force the writes to be
13732 * posted to the chip in order.
13734 if (pci_dev_present(tg3_write_reorder_chipsets) &&
13735 !tg3_flag(tp, PCI_EXPRESS))
13736 tg3_flag_set(tp, MBOX_WRITE_REORDER);
13738 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
13739 &tp->pci_cacheline_sz);
13740 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13741 &tp->pci_lat_timer);
13742 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
13743 tp->pci_lat_timer < 64) {
13744 tp->pci_lat_timer = 64;
13745 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13746 tp->pci_lat_timer);
13749 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
13750 /* 5700 BX chips need to have their TX producer index
13751 * mailboxes written twice to workaround a bug.
13753 tg3_flag_set(tp, TXD_MBOX_HWBUG);
13755 /* If we are in PCI-X mode, enable register write workaround.
13757 * The workaround is to use indirect register accesses
13758 * for all chip writes not to mailbox registers.
13760 if (tg3_flag(tp, PCIX_MODE)) {
13761 u32 pm_reg;
13763 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
13765 /* The chip can have it's power management PCI config
13766 * space registers clobbered due to this bug.
13767 * So explicitly force the chip into D0 here.
13769 pci_read_config_dword(tp->pdev,
13770 tp->pm_cap + PCI_PM_CTRL,
13771 &pm_reg);
13772 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
13773 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
13774 pci_write_config_dword(tp->pdev,
13775 tp->pm_cap + PCI_PM_CTRL,
13776 pm_reg);
13778 /* Also, force SERR#/PERR# in PCI command. */
13779 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13780 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
13781 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13785 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
13786 tg3_flag_set(tp, PCI_HIGH_SPEED);
13787 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
13788 tg3_flag_set(tp, PCI_32BIT);
13790 /* Chip-specific fixup from Broadcom driver */
13791 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
13792 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
13793 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
13794 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
13797 /* Default fast path register access methods */
13798 tp->read32 = tg3_read32;
13799 tp->write32 = tg3_write32;
13800 tp->read32_mbox = tg3_read32;
13801 tp->write32_mbox = tg3_write32;
13802 tp->write32_tx_mbox = tg3_write32;
13803 tp->write32_rx_mbox = tg3_write32;
13805 /* Various workaround register access methods */
13806 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
13807 tp->write32 = tg3_write_indirect_reg32;
13808 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13809 (tg3_flag(tp, PCI_EXPRESS) &&
13810 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
13812 * Back to back register writes can cause problems on these
13813 * chips, the workaround is to read back all reg writes
13814 * except those to mailbox regs.
13816 * See tg3_write_indirect_reg32().
13818 tp->write32 = tg3_write_flush_reg32;
13821 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
13822 tp->write32_tx_mbox = tg3_write32_tx_mbox;
13823 if (tg3_flag(tp, MBOX_WRITE_REORDER))
13824 tp->write32_rx_mbox = tg3_write_flush_reg32;
13827 if (tg3_flag(tp, ICH_WORKAROUND)) {
13828 tp->read32 = tg3_read_indirect_reg32;
13829 tp->write32 = tg3_write_indirect_reg32;
13830 tp->read32_mbox = tg3_read_indirect_mbox;
13831 tp->write32_mbox = tg3_write_indirect_mbox;
13832 tp->write32_tx_mbox = tg3_write_indirect_mbox;
13833 tp->write32_rx_mbox = tg3_write_indirect_mbox;
13835 iounmap(tp->regs);
13836 tp->regs = NULL;
13838 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13839 pci_cmd &= ~PCI_COMMAND_MEMORY;
13840 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13842 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13843 tp->read32_mbox = tg3_read32_mbox_5906;
13844 tp->write32_mbox = tg3_write32_mbox_5906;
13845 tp->write32_tx_mbox = tg3_write32_mbox_5906;
13846 tp->write32_rx_mbox = tg3_write32_mbox_5906;
13849 if (tp->write32 == tg3_write_indirect_reg32 ||
13850 (tg3_flag(tp, PCIX_MODE) &&
13851 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13852 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
13853 tg3_flag_set(tp, SRAM_USE_CONFIG);
13855 /* Get eeprom hw config before calling tg3_set_power_state().
13856 * In particular, the TG3_FLAG_IS_NIC flag must be
13857 * determined before calling tg3_set_power_state() so that
13858 * we know whether or not to switch out of Vaux power.
13859 * When the flag is set, it means that GPIO1 is used for eeprom
13860 * write protect and also implies that it is a LOM where GPIOs
13861 * are not used to switch power.
13863 tg3_get_eeprom_hw_cfg(tp);
13865 if (tg3_flag(tp, ENABLE_APE)) {
13866 /* Allow reads and writes to the
13867 * APE register and memory space.
13869 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
13870 PCISTATE_ALLOW_APE_SHMEM_WR |
13871 PCISTATE_ALLOW_APE_PSPACE_WR;
13872 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
13873 pci_state_reg);
13876 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13877 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13878 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13879 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13880 tg3_flag(tp, 57765_PLUS))
13881 tg3_flag_set(tp, CPMU_PRESENT);
13883 /* Set up tp->grc_local_ctrl before calling tg3_power_up().
13884 * GPIO1 driven high will bring 5700's external PHY out of reset.
13885 * It is also used as eeprom write protect on LOMs.
13887 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
13888 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
13889 tg3_flag(tp, EEPROM_WRITE_PROT))
13890 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
13891 GRC_LCLCTRL_GPIO_OUTPUT1);
13892 /* Unused GPIO3 must be driven as output on 5752 because there
13893 * are no pull-up resistors on unused GPIO pins.
13895 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13896 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
13898 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13899 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13900 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
13901 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
13903 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
13904 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
13905 /* Turn off the debug UART. */
13906 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
13907 if (tg3_flag(tp, IS_NIC))
13908 /* Keep VMain power. */
13909 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
13910 GRC_LCLCTRL_GPIO_OUTPUT0;
13913 /* Force the chip into D0. */
13914 err = tg3_power_up(tp);
13915 if (err) {
13916 dev_err(&tp->pdev->dev, "Transition to D0 failed\n");
13917 return err;
13920 /* Derive initial jumbo mode from MTU assigned in
13921 * ether_setup() via the alloc_etherdev() call
13923 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
13924 tg3_flag_set(tp, JUMBO_RING_ENABLE);
13926 /* Determine WakeOnLan speed to use. */
13927 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13928 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
13929 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
13930 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
13931 tg3_flag_clear(tp, WOL_SPEED_100MB);
13932 } else {
13933 tg3_flag_set(tp, WOL_SPEED_100MB);
13936 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13937 tp->phy_flags |= TG3_PHYFLG_IS_FET;
13939 /* A few boards don't want Ethernet@WireSpeed phy feature */
13940 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
13941 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
13942 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
13943 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
13944 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
13945 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13946 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
13948 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
13949 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
13950 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
13951 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
13952 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
13954 if (tg3_flag(tp, 5705_PLUS) &&
13955 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
13956 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13957 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
13958 !tg3_flag(tp, 57765_PLUS)) {
13959 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13960 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13961 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13962 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
13963 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
13964 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
13965 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
13966 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
13967 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
13968 } else
13969 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
13972 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13973 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
13974 tp->phy_otp = tg3_read_otp_phycfg(tp);
13975 if (tp->phy_otp == 0)
13976 tp->phy_otp = TG3_OTP_DEFAULT;
13979 if (tg3_flag(tp, CPMU_PRESENT))
13980 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
13981 else
13982 tp->mi_mode = MAC_MI_MODE_BASE;
13984 tp->coalesce_mode = 0;
13985 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
13986 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
13987 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
13989 /* Set these bits to enable statistics workaround. */
13990 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13991 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
13992 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
13993 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
13994 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
13997 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13998 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
13999 tg3_flag_set(tp, USE_PHYLIB);
14001 err = tg3_mdio_init(tp);
14002 if (err)
14003 return err;
14005 /* Initialize data/descriptor byte/word swapping. */
14006 val = tr32(GRC_MODE);
14007 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14008 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14009 GRC_MODE_WORD_SWAP_B2HRX_DATA |
14010 GRC_MODE_B2HRX_ENABLE |
14011 GRC_MODE_HTX2B_ENABLE |
14012 GRC_MODE_HOST_STACKUP);
14013 else
14014 val &= GRC_MODE_HOST_STACKUP;
14016 tw32(GRC_MODE, val | tp->grc_mode);
14018 tg3_switch_clocks(tp);
14020 /* Clear this out for sanity. */
14021 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14023 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14024 &pci_state_reg);
14025 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14026 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14027 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14029 if (chiprevid == CHIPREV_ID_5701_A0 ||
14030 chiprevid == CHIPREV_ID_5701_B0 ||
14031 chiprevid == CHIPREV_ID_5701_B2 ||
14032 chiprevid == CHIPREV_ID_5701_B5) {
14033 void __iomem *sram_base;
14035 /* Write some dummy words into the SRAM status block
14036 * area, see if it reads back correctly. If the return
14037 * value is bad, force enable the PCIX workaround.
14039 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14041 writel(0x00000000, sram_base);
14042 writel(0x00000000, sram_base + 4);
14043 writel(0xffffffff, sram_base + 4);
14044 if (readl(sram_base) != 0x00000000)
14045 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14049 udelay(50);
14050 tg3_nvram_init(tp);
14052 grc_misc_cfg = tr32(GRC_MISC_CFG);
14053 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14055 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14056 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14057 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14058 tg3_flag_set(tp, IS_5788);
14060 if (!tg3_flag(tp, IS_5788) &&
14061 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
14062 tg3_flag_set(tp, TAGGED_STATUS);
14063 if (tg3_flag(tp, TAGGED_STATUS)) {
14064 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14065 HOSTCC_MODE_CLRTICK_TXBD);
14067 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14068 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14069 tp->misc_host_ctrl);
14072 /* Preserve the APE MAC_MODE bits */
14073 if (tg3_flag(tp, ENABLE_APE))
14074 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14075 else
14076 tp->mac_mode = TG3_DEF_MAC_MODE;
14078 /* these are limited to 10/100 only */
14079 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14080 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14081 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14082 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14083 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14084 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14085 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14086 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14087 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14088 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14089 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14090 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14091 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14092 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14093 (tp->phy_flags & TG3_PHYFLG_IS_FET))
14094 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14096 err = tg3_phy_probe(tp);
14097 if (err) {
14098 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14099 /* ... but do not return immediately ... */
14100 tg3_mdio_fini(tp);
14103 tg3_read_vpd(tp);
14104 tg3_read_fw_ver(tp);
14106 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14107 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14108 } else {
14109 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14110 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14111 else
14112 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14115 /* 5700 {AX,BX} chips have a broken status block link
14116 * change bit implementation, so we must use the
14117 * status register in those cases.
14119 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14120 tg3_flag_set(tp, USE_LINKCHG_REG);
14121 else
14122 tg3_flag_clear(tp, USE_LINKCHG_REG);
14124 /* The led_ctrl is set during tg3_phy_probe, here we might
14125 * have to force the link status polling mechanism based
14126 * upon subsystem IDs.
14128 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14129 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14130 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14131 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14132 tg3_flag_set(tp, USE_LINKCHG_REG);
14135 /* For all SERDES we poll the MAC status register. */
14136 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14137 tg3_flag_set(tp, POLL_SERDES);
14138 else
14139 tg3_flag_clear(tp, POLL_SERDES);
14141 tp->rx_offset = NET_IP_ALIGN;
14142 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14143 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14144 tg3_flag(tp, PCIX_MODE)) {
14145 tp->rx_offset = 0;
14146 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14147 tp->rx_copy_thresh = ~(u16)0;
14148 #endif
14151 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14152 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14153 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14155 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14157 /* Increment the rx prod index on the rx std ring by at most
14158 * 8 for these chips to workaround hw errata.
14160 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14161 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14162 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14163 tp->rx_std_max_post = 8;
14165 if (tg3_flag(tp, ASPM_WORKAROUND))
14166 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14167 PCIE_PWR_MGMT_L1_THRESH_MSK;
14169 return err;
14172 #ifdef CONFIG_SPARC
14173 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14175 struct net_device *dev = tp->dev;
14176 struct pci_dev *pdev = tp->pdev;
14177 struct device_node *dp = pci_device_to_OF_node(pdev);
14178 const unsigned char *addr;
14179 int len;
14181 addr = of_get_property(dp, "local-mac-address", &len);
14182 if (addr && len == 6) {
14183 memcpy(dev->dev_addr, addr, 6);
14184 memcpy(dev->perm_addr, dev->dev_addr, 6);
14185 return 0;
14187 return -ENODEV;
14190 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14192 struct net_device *dev = tp->dev;
14194 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14195 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14196 return 0;
14198 #endif
14200 static int __devinit tg3_get_device_address(struct tg3 *tp)
14202 struct net_device *dev = tp->dev;
14203 u32 hi, lo, mac_offset;
14204 int addr_ok = 0;
14206 #ifdef CONFIG_SPARC
14207 if (!tg3_get_macaddr_sparc(tp))
14208 return 0;
14209 #endif
14211 mac_offset = 0x7c;
14212 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
14213 tg3_flag(tp, 5780_CLASS)) {
14214 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14215 mac_offset = 0xcc;
14216 if (tg3_nvram_lock(tp))
14217 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14218 else
14219 tg3_nvram_unlock(tp);
14220 } else if (tg3_flag(tp, 5717_PLUS)) {
14221 if (PCI_FUNC(tp->pdev->devfn) & 1)
14222 mac_offset = 0xcc;
14223 if (PCI_FUNC(tp->pdev->devfn) > 1)
14224 mac_offset += 0x18c;
14225 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14226 mac_offset = 0x10;
14228 /* First try to get it from MAC address mailbox. */
14229 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14230 if ((hi >> 16) == 0x484b) {
14231 dev->dev_addr[0] = (hi >> 8) & 0xff;
14232 dev->dev_addr[1] = (hi >> 0) & 0xff;
14234 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14235 dev->dev_addr[2] = (lo >> 24) & 0xff;
14236 dev->dev_addr[3] = (lo >> 16) & 0xff;
14237 dev->dev_addr[4] = (lo >> 8) & 0xff;
14238 dev->dev_addr[5] = (lo >> 0) & 0xff;
14240 /* Some old bootcode may report a 0 MAC address in SRAM */
14241 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14243 if (!addr_ok) {
14244 /* Next, try NVRAM. */
14245 if (!tg3_flag(tp, NO_NVRAM) &&
14246 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14247 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14248 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14249 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14251 /* Finally just fetch it out of the MAC control regs. */
14252 else {
14253 hi = tr32(MAC_ADDR_0_HIGH);
14254 lo = tr32(MAC_ADDR_0_LOW);
14256 dev->dev_addr[5] = lo & 0xff;
14257 dev->dev_addr[4] = (lo >> 8) & 0xff;
14258 dev->dev_addr[3] = (lo >> 16) & 0xff;
14259 dev->dev_addr[2] = (lo >> 24) & 0xff;
14260 dev->dev_addr[1] = hi & 0xff;
14261 dev->dev_addr[0] = (hi >> 8) & 0xff;
14265 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14266 #ifdef CONFIG_SPARC
14267 if (!tg3_get_default_macaddr_sparc(tp))
14268 return 0;
14269 #endif
14270 return -EINVAL;
14272 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14273 return 0;
14276 #define BOUNDARY_SINGLE_CACHELINE 1
14277 #define BOUNDARY_MULTI_CACHELINE 2
14279 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14281 int cacheline_size;
14282 u8 byte;
14283 int goal;
14285 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14286 if (byte == 0)
14287 cacheline_size = 1024;
14288 else
14289 cacheline_size = (int) byte * 4;
14291 /* On 5703 and later chips, the boundary bits have no
14292 * effect.
14294 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14295 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14296 !tg3_flag(tp, PCI_EXPRESS))
14297 goto out;
14299 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14300 goal = BOUNDARY_MULTI_CACHELINE;
14301 #else
14302 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14303 goal = BOUNDARY_SINGLE_CACHELINE;
14304 #else
14305 goal = 0;
14306 #endif
14307 #endif
14309 if (tg3_flag(tp, 57765_PLUS)) {
14310 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14311 goto out;
14314 if (!goal)
14315 goto out;
14317 /* PCI controllers on most RISC systems tend to disconnect
14318 * when a device tries to burst across a cache-line boundary.
14319 * Therefore, letting tg3 do so just wastes PCI bandwidth.
14321 * Unfortunately, for PCI-E there are only limited
14322 * write-side controls for this, and thus for reads
14323 * we will still get the disconnects. We'll also waste
14324 * these PCI cycles for both read and write for chips
14325 * other than 5700 and 5701 which do not implement the
14326 * boundary bits.
14328 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14329 switch (cacheline_size) {
14330 case 16:
14331 case 32:
14332 case 64:
14333 case 128:
14334 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14335 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14336 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14337 } else {
14338 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14339 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14341 break;
14343 case 256:
14344 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14345 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14346 break;
14348 default:
14349 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14350 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14351 break;
14353 } else if (tg3_flag(tp, PCI_EXPRESS)) {
14354 switch (cacheline_size) {
14355 case 16:
14356 case 32:
14357 case 64:
14358 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14359 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14360 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14361 break;
14363 /* fallthrough */
14364 case 128:
14365 default:
14366 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14367 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14368 break;
14370 } else {
14371 switch (cacheline_size) {
14372 case 16:
14373 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14374 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14375 DMA_RWCTRL_WRITE_BNDRY_16);
14376 break;
14378 /* fallthrough */
14379 case 32:
14380 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14381 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14382 DMA_RWCTRL_WRITE_BNDRY_32);
14383 break;
14385 /* fallthrough */
14386 case 64:
14387 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14388 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14389 DMA_RWCTRL_WRITE_BNDRY_64);
14390 break;
14392 /* fallthrough */
14393 case 128:
14394 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14395 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14396 DMA_RWCTRL_WRITE_BNDRY_128);
14397 break;
14399 /* fallthrough */
14400 case 256:
14401 val |= (DMA_RWCTRL_READ_BNDRY_256 |
14402 DMA_RWCTRL_WRITE_BNDRY_256);
14403 break;
14404 case 512:
14405 val |= (DMA_RWCTRL_READ_BNDRY_512 |
14406 DMA_RWCTRL_WRITE_BNDRY_512);
14407 break;
14408 case 1024:
14409 default:
14410 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14411 DMA_RWCTRL_WRITE_BNDRY_1024);
14412 break;
14416 out:
14417 return val;
14420 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14422 struct tg3_internal_buffer_desc test_desc;
14423 u32 sram_dma_descs;
14424 int i, ret;
14426 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
14428 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
14429 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
14430 tw32(RDMAC_STATUS, 0);
14431 tw32(WDMAC_STATUS, 0);
14433 tw32(BUFMGR_MODE, 0);
14434 tw32(FTQ_RESET, 0);
14436 test_desc.addr_hi = ((u64) buf_dma) >> 32;
14437 test_desc.addr_lo = buf_dma & 0xffffffff;
14438 test_desc.nic_mbuf = 0x00002100;
14439 test_desc.len = size;
14442 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14443 * the *second* time the tg3 driver was getting loaded after an
14444 * initial scan.
14446 * Broadcom tells me:
14447 * ...the DMA engine is connected to the GRC block and a DMA
14448 * reset may affect the GRC block in some unpredictable way...
14449 * The behavior of resets to individual blocks has not been tested.
14451 * Broadcom noted the GRC reset will also reset all sub-components.
14453 if (to_device) {
14454 test_desc.cqid_sqid = (13 << 8) | 2;
14456 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
14457 udelay(40);
14458 } else {
14459 test_desc.cqid_sqid = (16 << 8) | 7;
14461 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
14462 udelay(40);
14464 test_desc.flags = 0x00000005;
14466 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
14467 u32 val;
14469 val = *(((u32 *)&test_desc) + i);
14470 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
14471 sram_dma_descs + (i * sizeof(u32)));
14472 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
14474 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
14476 if (to_device)
14477 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
14478 else
14479 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
14481 ret = -ENODEV;
14482 for (i = 0; i < 40; i++) {
14483 u32 val;
14485 if (to_device)
14486 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
14487 else
14488 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
14489 if ((val & 0xffff) == sram_dma_descs) {
14490 ret = 0;
14491 break;
14494 udelay(100);
14497 return ret;
14500 #define TEST_BUFFER_SIZE 0x2000
14502 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
14503 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
14504 { },
14507 static int __devinit tg3_test_dma(struct tg3 *tp)
14509 dma_addr_t buf_dma;
14510 u32 *buf, saved_dma_rwctrl;
14511 int ret = 0;
14513 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
14514 &buf_dma, GFP_KERNEL);
14515 if (!buf) {
14516 ret = -ENOMEM;
14517 goto out_nofree;
14520 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
14521 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
14523 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
14525 if (tg3_flag(tp, 57765_PLUS))
14526 goto out;
14528 if (tg3_flag(tp, PCI_EXPRESS)) {
14529 /* DMA read watermark not used on PCIE */
14530 tp->dma_rwctrl |= 0x00180000;
14531 } else if (!tg3_flag(tp, PCIX_MODE)) {
14532 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14533 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
14534 tp->dma_rwctrl |= 0x003f0000;
14535 else
14536 tp->dma_rwctrl |= 0x003f000f;
14537 } else {
14538 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14539 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
14540 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
14541 u32 read_water = 0x7;
14543 /* If the 5704 is behind the EPB bridge, we can
14544 * do the less restrictive ONE_DMA workaround for
14545 * better performance.
14547 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
14548 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14549 tp->dma_rwctrl |= 0x8000;
14550 else if (ccval == 0x6 || ccval == 0x7)
14551 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
14553 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
14554 read_water = 4;
14555 /* Set bit 23 to enable PCIX hw bug fix */
14556 tp->dma_rwctrl |=
14557 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
14558 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
14559 (1 << 23);
14560 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
14561 /* 5780 always in PCIX mode */
14562 tp->dma_rwctrl |= 0x00144000;
14563 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14564 /* 5714 always in PCIX mode */
14565 tp->dma_rwctrl |= 0x00148000;
14566 } else {
14567 tp->dma_rwctrl |= 0x001b000f;
14571 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14572 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14573 tp->dma_rwctrl &= 0xfffffff0;
14575 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14576 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14577 /* Remove this if it causes problems for some boards. */
14578 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
14580 /* On 5700/5701 chips, we need to set this bit.
14581 * Otherwise the chip will issue cacheline transactions
14582 * to streamable DMA memory with not all the byte
14583 * enables turned on. This is an error on several
14584 * RISC PCI controllers, in particular sparc64.
14586 * On 5703/5704 chips, this bit has been reassigned
14587 * a different meaning. In particular, it is used
14588 * on those chips to enable a PCI-X workaround.
14590 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
14593 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14595 #if 0
14596 /* Unneeded, already done by tg3_get_invariants. */
14597 tg3_switch_clocks(tp);
14598 #endif
14600 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14601 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
14602 goto out;
14604 /* It is best to perform DMA test with maximum write burst size
14605 * to expose the 5700/5701 write DMA bug.
14607 saved_dma_rwctrl = tp->dma_rwctrl;
14608 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14609 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14611 while (1) {
14612 u32 *p = buf, i;
14614 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
14615 p[i] = i;
14617 /* Send the buffer to the chip. */
14618 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
14619 if (ret) {
14620 dev_err(&tp->pdev->dev,
14621 "%s: Buffer write failed. err = %d\n",
14622 __func__, ret);
14623 break;
14626 #if 0
14627 /* validate data reached card RAM correctly. */
14628 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14629 u32 val;
14630 tg3_read_mem(tp, 0x2100 + (i*4), &val);
14631 if (le32_to_cpu(val) != p[i]) {
14632 dev_err(&tp->pdev->dev,
14633 "%s: Buffer corrupted on device! "
14634 "(%d != %d)\n", __func__, val, i);
14635 /* ret = -ENODEV here? */
14637 p[i] = 0;
14639 #endif
14640 /* Now read it back. */
14641 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
14642 if (ret) {
14643 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
14644 "err = %d\n", __func__, ret);
14645 break;
14648 /* Verify it. */
14649 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14650 if (p[i] == i)
14651 continue;
14653 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14654 DMA_RWCTRL_WRITE_BNDRY_16) {
14655 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14656 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14657 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14658 break;
14659 } else {
14660 dev_err(&tp->pdev->dev,
14661 "%s: Buffer corrupted on read back! "
14662 "(%d != %d)\n", __func__, p[i], i);
14663 ret = -ENODEV;
14664 goto out;
14668 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
14669 /* Success. */
14670 ret = 0;
14671 break;
14674 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14675 DMA_RWCTRL_WRITE_BNDRY_16) {
14676 /* DMA test passed without adjusting DMA boundary,
14677 * now look for chipsets that are known to expose the
14678 * DMA bug without failing the test.
14680 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
14681 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14682 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14683 } else {
14684 /* Safe to use the calculated DMA boundary. */
14685 tp->dma_rwctrl = saved_dma_rwctrl;
14688 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14691 out:
14692 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
14693 out_nofree:
14694 return ret;
14697 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
14699 if (tg3_flag(tp, 57765_PLUS)) {
14700 tp->bufmgr_config.mbuf_read_dma_low_water =
14701 DEFAULT_MB_RDMA_LOW_WATER_5705;
14702 tp->bufmgr_config.mbuf_mac_rx_low_water =
14703 DEFAULT_MB_MACRX_LOW_WATER_57765;
14704 tp->bufmgr_config.mbuf_high_water =
14705 DEFAULT_MB_HIGH_WATER_57765;
14707 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14708 DEFAULT_MB_RDMA_LOW_WATER_5705;
14709 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14710 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
14711 tp->bufmgr_config.mbuf_high_water_jumbo =
14712 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
14713 } else if (tg3_flag(tp, 5705_PLUS)) {
14714 tp->bufmgr_config.mbuf_read_dma_low_water =
14715 DEFAULT_MB_RDMA_LOW_WATER_5705;
14716 tp->bufmgr_config.mbuf_mac_rx_low_water =
14717 DEFAULT_MB_MACRX_LOW_WATER_5705;
14718 tp->bufmgr_config.mbuf_high_water =
14719 DEFAULT_MB_HIGH_WATER_5705;
14720 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14721 tp->bufmgr_config.mbuf_mac_rx_low_water =
14722 DEFAULT_MB_MACRX_LOW_WATER_5906;
14723 tp->bufmgr_config.mbuf_high_water =
14724 DEFAULT_MB_HIGH_WATER_5906;
14727 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14728 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
14729 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14730 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
14731 tp->bufmgr_config.mbuf_high_water_jumbo =
14732 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
14733 } else {
14734 tp->bufmgr_config.mbuf_read_dma_low_water =
14735 DEFAULT_MB_RDMA_LOW_WATER;
14736 tp->bufmgr_config.mbuf_mac_rx_low_water =
14737 DEFAULT_MB_MACRX_LOW_WATER;
14738 tp->bufmgr_config.mbuf_high_water =
14739 DEFAULT_MB_HIGH_WATER;
14741 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14742 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
14743 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14744 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
14745 tp->bufmgr_config.mbuf_high_water_jumbo =
14746 DEFAULT_MB_HIGH_WATER_JUMBO;
14749 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
14750 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
14753 static char * __devinit tg3_phy_string(struct tg3 *tp)
14755 switch (tp->phy_id & TG3_PHY_ID_MASK) {
14756 case TG3_PHY_ID_BCM5400: return "5400";
14757 case TG3_PHY_ID_BCM5401: return "5401";
14758 case TG3_PHY_ID_BCM5411: return "5411";
14759 case TG3_PHY_ID_BCM5701: return "5701";
14760 case TG3_PHY_ID_BCM5703: return "5703";
14761 case TG3_PHY_ID_BCM5704: return "5704";
14762 case TG3_PHY_ID_BCM5705: return "5705";
14763 case TG3_PHY_ID_BCM5750: return "5750";
14764 case TG3_PHY_ID_BCM5752: return "5752";
14765 case TG3_PHY_ID_BCM5714: return "5714";
14766 case TG3_PHY_ID_BCM5780: return "5780";
14767 case TG3_PHY_ID_BCM5755: return "5755";
14768 case TG3_PHY_ID_BCM5787: return "5787";
14769 case TG3_PHY_ID_BCM5784: return "5784";
14770 case TG3_PHY_ID_BCM5756: return "5722/5756";
14771 case TG3_PHY_ID_BCM5906: return "5906";
14772 case TG3_PHY_ID_BCM5761: return "5761";
14773 case TG3_PHY_ID_BCM5718C: return "5718C";
14774 case TG3_PHY_ID_BCM5718S: return "5718S";
14775 case TG3_PHY_ID_BCM57765: return "57765";
14776 case TG3_PHY_ID_BCM5719C: return "5719C";
14777 case TG3_PHY_ID_BCM5720C: return "5720C";
14778 case TG3_PHY_ID_BCM8002: return "8002/serdes";
14779 case 0: return "serdes";
14780 default: return "unknown";
14784 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
14786 if (tg3_flag(tp, PCI_EXPRESS)) {
14787 strcpy(str, "PCI Express");
14788 return str;
14789 } else if (tg3_flag(tp, PCIX_MODE)) {
14790 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
14792 strcpy(str, "PCIX:");
14794 if ((clock_ctrl == 7) ||
14795 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
14796 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
14797 strcat(str, "133MHz");
14798 else if (clock_ctrl == 0)
14799 strcat(str, "33MHz");
14800 else if (clock_ctrl == 2)
14801 strcat(str, "50MHz");
14802 else if (clock_ctrl == 4)
14803 strcat(str, "66MHz");
14804 else if (clock_ctrl == 6)
14805 strcat(str, "100MHz");
14806 } else {
14807 strcpy(str, "PCI:");
14808 if (tg3_flag(tp, PCI_HIGH_SPEED))
14809 strcat(str, "66MHz");
14810 else
14811 strcat(str, "33MHz");
14813 if (tg3_flag(tp, PCI_32BIT))
14814 strcat(str, ":32-bit");
14815 else
14816 strcat(str, ":64-bit");
14817 return str;
14820 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
14822 struct pci_dev *peer;
14823 unsigned int func, devnr = tp->pdev->devfn & ~7;
14825 for (func = 0; func < 8; func++) {
14826 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14827 if (peer && peer != tp->pdev)
14828 break;
14829 pci_dev_put(peer);
14831 /* 5704 can be configured in single-port mode, set peer to
14832 * tp->pdev in that case.
14834 if (!peer) {
14835 peer = tp->pdev;
14836 return peer;
14840 * We don't need to keep the refcount elevated; there's no way
14841 * to remove one half of this device without removing the other
14843 pci_dev_put(peer);
14845 return peer;
14848 static void __devinit tg3_init_coal(struct tg3 *tp)
14850 struct ethtool_coalesce *ec = &tp->coal;
14852 memset(ec, 0, sizeof(*ec));
14853 ec->cmd = ETHTOOL_GCOALESCE;
14854 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
14855 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
14856 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
14857 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
14858 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
14859 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
14860 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
14861 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
14862 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
14864 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
14865 HOSTCC_MODE_CLRTICK_TXBD)) {
14866 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
14867 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
14868 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
14869 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
14872 if (tg3_flag(tp, 5705_PLUS)) {
14873 ec->rx_coalesce_usecs_irq = 0;
14874 ec->tx_coalesce_usecs_irq = 0;
14875 ec->stats_block_coalesce_usecs = 0;
14879 static const struct net_device_ops tg3_netdev_ops = {
14880 .ndo_open = tg3_open,
14881 .ndo_stop = tg3_close,
14882 .ndo_start_xmit = tg3_start_xmit,
14883 .ndo_get_stats64 = tg3_get_stats64,
14884 .ndo_validate_addr = eth_validate_addr,
14885 .ndo_set_multicast_list = tg3_set_rx_mode,
14886 .ndo_set_mac_address = tg3_set_mac_addr,
14887 .ndo_do_ioctl = tg3_ioctl,
14888 .ndo_tx_timeout = tg3_tx_timeout,
14889 .ndo_change_mtu = tg3_change_mtu,
14890 .ndo_fix_features = tg3_fix_features,
14891 .ndo_set_features = tg3_set_features,
14892 #ifdef CONFIG_NET_POLL_CONTROLLER
14893 .ndo_poll_controller = tg3_poll_controller,
14894 #endif
14897 static int __devinit tg3_init_one(struct pci_dev *pdev,
14898 const struct pci_device_id *ent)
14900 struct net_device *dev;
14901 struct tg3 *tp;
14902 int i, err, pm_cap;
14903 u32 sndmbx, rcvmbx, intmbx;
14904 char str[40];
14905 u64 dma_mask, persist_dma_mask;
14906 u32 hw_features = 0;
14908 printk_once(KERN_INFO "%s\n", version);
14910 err = pci_enable_device(pdev);
14911 if (err) {
14912 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
14913 return err;
14916 err = pci_request_regions(pdev, DRV_MODULE_NAME);
14917 if (err) {
14918 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
14919 goto err_out_disable_pdev;
14922 pci_set_master(pdev);
14924 /* Find power-management capability. */
14925 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
14926 if (pm_cap == 0) {
14927 dev_err(&pdev->dev,
14928 "Cannot find Power Management capability, aborting\n");
14929 err = -EIO;
14930 goto err_out_free_res;
14933 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
14934 if (!dev) {
14935 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
14936 err = -ENOMEM;
14937 goto err_out_free_res;
14940 SET_NETDEV_DEV(dev, &pdev->dev);
14942 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
14944 tp = netdev_priv(dev);
14945 tp->pdev = pdev;
14946 tp->dev = dev;
14947 tp->pm_cap = pm_cap;
14948 tp->rx_mode = TG3_DEF_RX_MODE;
14949 tp->tx_mode = TG3_DEF_TX_MODE;
14951 if (tg3_debug > 0)
14952 tp->msg_enable = tg3_debug;
14953 else
14954 tp->msg_enable = TG3_DEF_MSG_ENABLE;
14956 /* The word/byte swap controls here control register access byte
14957 * swapping. DMA data byte swapping is controlled in the GRC_MODE
14958 * setting below.
14960 tp->misc_host_ctrl =
14961 MISC_HOST_CTRL_MASK_PCI_INT |
14962 MISC_HOST_CTRL_WORD_SWAP |
14963 MISC_HOST_CTRL_INDIR_ACCESS |
14964 MISC_HOST_CTRL_PCISTATE_RW;
14966 /* The NONFRM (non-frame) byte/word swap controls take effect
14967 * on descriptor entries, anything which isn't packet data.
14969 * The StrongARM chips on the board (one for tx, one for rx)
14970 * are running in big-endian mode.
14972 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
14973 GRC_MODE_WSWAP_NONFRM_DATA);
14974 #ifdef __BIG_ENDIAN
14975 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
14976 #endif
14977 spin_lock_init(&tp->lock);
14978 spin_lock_init(&tp->indirect_lock);
14979 INIT_WORK(&tp->reset_task, tg3_reset_task);
14981 tp->regs = pci_ioremap_bar(pdev, BAR_0);
14982 if (!tp->regs) {
14983 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
14984 err = -ENOMEM;
14985 goto err_out_free_dev;
14988 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
14989 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
14991 dev->ethtool_ops = &tg3_ethtool_ops;
14992 dev->watchdog_timeo = TG3_TX_TIMEOUT;
14993 dev->netdev_ops = &tg3_netdev_ops;
14994 dev->irq = pdev->irq;
14996 err = tg3_get_invariants(tp);
14997 if (err) {
14998 dev_err(&pdev->dev,
14999 "Problem fetching invariants of chip, aborting\n");
15000 goto err_out_iounmap;
15003 /* The EPB bridge inside 5714, 5715, and 5780 and any
15004 * device behind the EPB cannot support DMA addresses > 40-bit.
15005 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15006 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15007 * do DMA address check in tg3_start_xmit().
15009 if (tg3_flag(tp, IS_5788))
15010 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15011 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15012 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15013 #ifdef CONFIG_HIGHMEM
15014 dma_mask = DMA_BIT_MASK(64);
15015 #endif
15016 } else
15017 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15019 /* Configure DMA attributes. */
15020 if (dma_mask > DMA_BIT_MASK(32)) {
15021 err = pci_set_dma_mask(pdev, dma_mask);
15022 if (!err) {
15023 dev->features |= NETIF_F_HIGHDMA;
15024 err = pci_set_consistent_dma_mask(pdev,
15025 persist_dma_mask);
15026 if (err < 0) {
15027 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15028 "DMA for consistent allocations\n");
15029 goto err_out_iounmap;
15033 if (err || dma_mask == DMA_BIT_MASK(32)) {
15034 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15035 if (err) {
15036 dev_err(&pdev->dev,
15037 "No usable DMA configuration, aborting\n");
15038 goto err_out_iounmap;
15042 tg3_init_bufmgr_config(tp);
15044 /* Selectively allow TSO based on operating conditions */
15045 if ((tg3_flag(tp, HW_TSO_1) ||
15046 tg3_flag(tp, HW_TSO_2) ||
15047 tg3_flag(tp, HW_TSO_3)) ||
15048 (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF)))
15049 tg3_flag_set(tp, TSO_CAPABLE);
15050 else {
15051 tg3_flag_clear(tp, TSO_CAPABLE);
15052 tg3_flag_clear(tp, TSO_BUG);
15053 tp->fw_needed = NULL;
15056 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
15057 tp->fw_needed = FIRMWARE_TG3;
15059 /* TSO is on by default on chips that support hardware TSO.
15060 * Firmware TSO on older chips gives lower performance, so it
15061 * is off by default, but can be enabled using ethtool.
15063 if ((tg3_flag(tp, HW_TSO_1) ||
15064 tg3_flag(tp, HW_TSO_2) ||
15065 tg3_flag(tp, HW_TSO_3)) &&
15066 (dev->features & NETIF_F_IP_CSUM))
15067 hw_features |= NETIF_F_TSO;
15068 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15069 if (dev->features & NETIF_F_IPV6_CSUM)
15070 hw_features |= NETIF_F_TSO6;
15071 if (tg3_flag(tp, HW_TSO_3) ||
15072 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15073 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15074 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15075 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15076 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15077 hw_features |= NETIF_F_TSO_ECN;
15080 dev->hw_features |= hw_features;
15081 dev->features |= hw_features;
15082 dev->vlan_features |= hw_features;
15085 * Add loopback capability only for a subset of devices that support
15086 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15087 * loopback for the remaining devices.
15089 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15090 !tg3_flag(tp, CPMU_PRESENT))
15091 /* Add the loopback capability */
15092 dev->hw_features |= NETIF_F_LOOPBACK;
15094 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15095 !tg3_flag(tp, TSO_CAPABLE) &&
15096 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15097 tg3_flag_set(tp, MAX_RXPEND_64);
15098 tp->rx_pending = 63;
15101 err = tg3_get_device_address(tp);
15102 if (err) {
15103 dev_err(&pdev->dev,
15104 "Could not obtain valid ethernet address, aborting\n");
15105 goto err_out_iounmap;
15108 if (tg3_flag(tp, ENABLE_APE)) {
15109 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15110 if (!tp->aperegs) {
15111 dev_err(&pdev->dev,
15112 "Cannot map APE registers, aborting\n");
15113 err = -ENOMEM;
15114 goto err_out_iounmap;
15117 tg3_ape_lock_init(tp);
15119 if (tg3_flag(tp, ENABLE_ASF))
15120 tg3_read_dash_ver(tp);
15124 * Reset chip in case UNDI or EFI driver did not shutdown
15125 * DMA self test will enable WDMAC and we'll see (spurious)
15126 * pending DMA on the PCI bus at that point.
15128 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15129 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15130 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15131 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15134 err = tg3_test_dma(tp);
15135 if (err) {
15136 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15137 goto err_out_apeunmap;
15140 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15141 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15142 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15143 for (i = 0; i < tp->irq_max; i++) {
15144 struct tg3_napi *tnapi = &tp->napi[i];
15146 tnapi->tp = tp;
15147 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15149 tnapi->int_mbox = intmbx;
15150 if (i < 4)
15151 intmbx += 0x8;
15152 else
15153 intmbx += 0x4;
15155 tnapi->consmbox = rcvmbx;
15156 tnapi->prodmbox = sndmbx;
15158 if (i)
15159 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15160 else
15161 tnapi->coal_now = HOSTCC_MODE_NOW;
15163 if (!tg3_flag(tp, SUPPORT_MSIX))
15164 break;
15167 * If we support MSIX, we'll be using RSS. If we're using
15168 * RSS, the first vector only handles link interrupts and the
15169 * remaining vectors handle rx and tx interrupts. Reuse the
15170 * mailbox values for the next iteration. The values we setup
15171 * above are still useful for the single vectored mode.
15173 if (!i)
15174 continue;
15176 rcvmbx += 0x8;
15178 if (sndmbx & 0x4)
15179 sndmbx -= 0x4;
15180 else
15181 sndmbx += 0xc;
15184 tg3_init_coal(tp);
15186 pci_set_drvdata(pdev, dev);
15188 err = register_netdev(dev);
15189 if (err) {
15190 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15191 goto err_out_apeunmap;
15194 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15195 tp->board_part_number,
15196 tp->pci_chip_rev_id,
15197 tg3_bus_string(tp, str),
15198 dev->dev_addr);
15200 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15201 struct phy_device *phydev;
15202 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15203 netdev_info(dev,
15204 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15205 phydev->drv->name, dev_name(&phydev->dev));
15206 } else {
15207 char *ethtype;
15209 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15210 ethtype = "10/100Base-TX";
15211 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15212 ethtype = "1000Base-SX";
15213 else
15214 ethtype = "10/100/1000Base-T";
15216 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15217 "(WireSpeed[%d], EEE[%d])\n",
15218 tg3_phy_string(tp), ethtype,
15219 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15220 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15223 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15224 (dev->features & NETIF_F_RXCSUM) != 0,
15225 tg3_flag(tp, USE_LINKCHG_REG) != 0,
15226 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15227 tg3_flag(tp, ENABLE_ASF) != 0,
15228 tg3_flag(tp, TSO_CAPABLE) != 0);
15229 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15230 tp->dma_rwctrl,
15231 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15232 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15234 pci_save_state(pdev);
15236 return 0;
15238 err_out_apeunmap:
15239 if (tp->aperegs) {
15240 iounmap(tp->aperegs);
15241 tp->aperegs = NULL;
15244 err_out_iounmap:
15245 if (tp->regs) {
15246 iounmap(tp->regs);
15247 tp->regs = NULL;
15250 err_out_free_dev:
15251 free_netdev(dev);
15253 err_out_free_res:
15254 pci_release_regions(pdev);
15256 err_out_disable_pdev:
15257 pci_disable_device(pdev);
15258 pci_set_drvdata(pdev, NULL);
15259 return err;
15262 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15264 struct net_device *dev = pci_get_drvdata(pdev);
15266 if (dev) {
15267 struct tg3 *tp = netdev_priv(dev);
15269 if (tp->fw)
15270 release_firmware(tp->fw);
15272 cancel_work_sync(&tp->reset_task);
15274 if (!tg3_flag(tp, USE_PHYLIB)) {
15275 tg3_phy_fini(tp);
15276 tg3_mdio_fini(tp);
15279 unregister_netdev(dev);
15280 if (tp->aperegs) {
15281 iounmap(tp->aperegs);
15282 tp->aperegs = NULL;
15284 if (tp->regs) {
15285 iounmap(tp->regs);
15286 tp->regs = NULL;
15288 free_netdev(dev);
15289 pci_release_regions(pdev);
15290 pci_disable_device(pdev);
15291 pci_set_drvdata(pdev, NULL);
15295 #ifdef CONFIG_PM_SLEEP
15296 static int tg3_suspend(struct device *device)
15298 struct pci_dev *pdev = to_pci_dev(device);
15299 struct net_device *dev = pci_get_drvdata(pdev);
15300 struct tg3 *tp = netdev_priv(dev);
15301 int err;
15303 if (!netif_running(dev))
15304 return 0;
15306 flush_work_sync(&tp->reset_task);
15307 tg3_phy_stop(tp);
15308 tg3_netif_stop(tp);
15310 del_timer_sync(&tp->timer);
15312 tg3_full_lock(tp, 1);
15313 tg3_disable_ints(tp);
15314 tg3_full_unlock(tp);
15316 netif_device_detach(dev);
15318 tg3_full_lock(tp, 0);
15319 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15320 tg3_flag_clear(tp, INIT_COMPLETE);
15321 tg3_full_unlock(tp);
15323 err = tg3_power_down_prepare(tp);
15324 if (err) {
15325 int err2;
15327 tg3_full_lock(tp, 0);
15329 tg3_flag_set(tp, INIT_COMPLETE);
15330 err2 = tg3_restart_hw(tp, 1);
15331 if (err2)
15332 goto out;
15334 tp->timer.expires = jiffies + tp->timer_offset;
15335 add_timer(&tp->timer);
15337 netif_device_attach(dev);
15338 tg3_netif_start(tp);
15340 out:
15341 tg3_full_unlock(tp);
15343 if (!err2)
15344 tg3_phy_start(tp);
15347 return err;
15350 static int tg3_resume(struct device *device)
15352 struct pci_dev *pdev = to_pci_dev(device);
15353 struct net_device *dev = pci_get_drvdata(pdev);
15354 struct tg3 *tp = netdev_priv(dev);
15355 int err;
15357 if (!netif_running(dev))
15358 return 0;
15360 netif_device_attach(dev);
15362 tg3_full_lock(tp, 0);
15364 tg3_flag_set(tp, INIT_COMPLETE);
15365 err = tg3_restart_hw(tp, 1);
15366 if (err)
15367 goto out;
15369 tp->timer.expires = jiffies + tp->timer_offset;
15370 add_timer(&tp->timer);
15372 tg3_netif_start(tp);
15374 out:
15375 tg3_full_unlock(tp);
15377 if (!err)
15378 tg3_phy_start(tp);
15380 return err;
15383 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15384 #define TG3_PM_OPS (&tg3_pm_ops)
15386 #else
15388 #define TG3_PM_OPS NULL
15390 #endif /* CONFIG_PM_SLEEP */
15393 * tg3_io_error_detected - called when PCI error is detected
15394 * @pdev: Pointer to PCI device
15395 * @state: The current pci connection state
15397 * This function is called after a PCI bus error affecting
15398 * this device has been detected.
15400 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15401 pci_channel_state_t state)
15403 struct net_device *netdev = pci_get_drvdata(pdev);
15404 struct tg3 *tp = netdev_priv(netdev);
15405 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15407 netdev_info(netdev, "PCI I/O error detected\n");
15409 rtnl_lock();
15411 if (!netif_running(netdev))
15412 goto done;
15414 tg3_phy_stop(tp);
15416 tg3_netif_stop(tp);
15418 del_timer_sync(&tp->timer);
15419 tg3_flag_clear(tp, RESTART_TIMER);
15421 /* Want to make sure that the reset task doesn't run */
15422 cancel_work_sync(&tp->reset_task);
15423 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15424 tg3_flag_clear(tp, RESTART_TIMER);
15426 netif_device_detach(netdev);
15428 /* Clean up software state, even if MMIO is blocked */
15429 tg3_full_lock(tp, 0);
15430 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15431 tg3_full_unlock(tp);
15433 done:
15434 if (state == pci_channel_io_perm_failure)
15435 err = PCI_ERS_RESULT_DISCONNECT;
15436 else
15437 pci_disable_device(pdev);
15439 rtnl_unlock();
15441 return err;
15445 * tg3_io_slot_reset - called after the pci bus has been reset.
15446 * @pdev: Pointer to PCI device
15448 * Restart the card from scratch, as if from a cold-boot.
15449 * At this point, the card has exprienced a hard reset,
15450 * followed by fixups by BIOS, and has its config space
15451 * set up identically to what it was at cold boot.
15453 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
15455 struct net_device *netdev = pci_get_drvdata(pdev);
15456 struct tg3 *tp = netdev_priv(netdev);
15457 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15458 int err;
15460 rtnl_lock();
15462 if (pci_enable_device(pdev)) {
15463 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
15464 goto done;
15467 pci_set_master(pdev);
15468 pci_restore_state(pdev);
15469 pci_save_state(pdev);
15471 if (!netif_running(netdev)) {
15472 rc = PCI_ERS_RESULT_RECOVERED;
15473 goto done;
15476 err = tg3_power_up(tp);
15477 if (err) {
15478 netdev_err(netdev, "Failed to restore register access.\n");
15479 goto done;
15482 rc = PCI_ERS_RESULT_RECOVERED;
15484 done:
15485 rtnl_unlock();
15487 return rc;
15491 * tg3_io_resume - called when traffic can start flowing again.
15492 * @pdev: Pointer to PCI device
15494 * This callback is called when the error recovery driver tells
15495 * us that its OK to resume normal operation.
15497 static void tg3_io_resume(struct pci_dev *pdev)
15499 struct net_device *netdev = pci_get_drvdata(pdev);
15500 struct tg3 *tp = netdev_priv(netdev);
15501 int err;
15503 rtnl_lock();
15505 if (!netif_running(netdev))
15506 goto done;
15508 tg3_full_lock(tp, 0);
15509 tg3_flag_set(tp, INIT_COMPLETE);
15510 err = tg3_restart_hw(tp, 1);
15511 tg3_full_unlock(tp);
15512 if (err) {
15513 netdev_err(netdev, "Cannot restart hardware after reset.\n");
15514 goto done;
15517 netif_device_attach(netdev);
15519 tp->timer.expires = jiffies + tp->timer_offset;
15520 add_timer(&tp->timer);
15522 tg3_netif_start(tp);
15524 tg3_phy_start(tp);
15526 done:
15527 rtnl_unlock();
15530 static struct pci_error_handlers tg3_err_handler = {
15531 .error_detected = tg3_io_error_detected,
15532 .slot_reset = tg3_io_slot_reset,
15533 .resume = tg3_io_resume
15536 static struct pci_driver tg3_driver = {
15537 .name = DRV_MODULE_NAME,
15538 .id_table = tg3_pci_tbl,
15539 .probe = tg3_init_one,
15540 .remove = __devexit_p(tg3_remove_one),
15541 .err_handler = &tg3_err_handler,
15542 .driver.pm = TG3_PM_OPS,
15545 static int __init tg3_init(void)
15547 return pci_register_driver(&tg3_driver);
15550 static void __exit tg3_cleanup(void)
15552 pci_unregister_driver(&tg3_driver);
15555 module_init(tg3_init);
15556 module_exit(tg3_cleanup);