More meth updates.
[linux-2.6/linux-mips.git] / drivers / net / tg3.c
blob3787261c6ef54e1c8f822abbef63bbedfa3263c6
1 /*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002 Jeff Garzik (jgarzik@pobox.com)
6 */
8 #include <linux/config.h>
10 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/types.h>
14 #include <linux/compiler.h>
15 #include <linux/slab.h>
16 #include <linux/delay.h>
17 #include <linux/init.h>
18 #include <linux/ioport.h>
19 #include <linux/pci.h>
20 #include <linux/netdevice.h>
21 #include <linux/etherdevice.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/mii.h>
25 #include <linux/if_vlan.h>
26 #include <linux/ip.h>
27 #include <linux/tcp.h>
28 #include <linux/workqueue.h>
30 #include <asm/system.h>
31 #include <asm/io.h>
32 #include <asm/byteorder.h>
33 #include <asm/uaccess.h>
35 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
36 #define TG3_VLAN_TAG_USED 1
37 #else
38 #define TG3_VLAN_TAG_USED 0
39 #endif
41 #ifdef NETIF_F_TSO
42 /* XXX Works but still disabled, decreases TCP performance to 7MB/sec even
43 * XXX over gigabit.
45 #define TG3_DO_TSO 0
46 #else
47 #define TG3_DO_TSO 0
48 #endif
50 #include "tg3.h"
52 #define DRV_MODULE_NAME "tg3"
53 #define PFX DRV_MODULE_NAME ": "
54 #define DRV_MODULE_VERSION "1.6"
55 #define DRV_MODULE_RELDATE "June 11, 2003"
57 #define TG3_DEF_MAC_MODE 0
58 #define TG3_DEF_RX_MODE 0
59 #define TG3_DEF_TX_MODE 0
60 #define TG3_DEF_MSG_ENABLE \
61 (NETIF_MSG_DRV | \
62 NETIF_MSG_PROBE | \
63 NETIF_MSG_LINK | \
64 NETIF_MSG_TIMER | \
65 NETIF_MSG_IFDOWN | \
66 NETIF_MSG_IFUP | \
67 NETIF_MSG_RX_ERR | \
68 NETIF_MSG_TX_ERR)
70 /* length of time before we decide the hardware is borked,
71 * and dev->tx_timeout() should be called to fix the problem
73 #define TG3_TX_TIMEOUT (5 * HZ)
75 /* hardware minimum and maximum for a single frame's data payload */
76 #define TG3_MIN_MTU 60
77 #define TG3_MAX_MTU 9000
79 /* These numbers seem to be hard coded in the NIC firmware somehow.
80 * You can't change the ring sizes, but you can change where you place
81 * them in the NIC onboard memory.
83 #define TG3_RX_RING_SIZE 512
84 #define TG3_DEF_RX_RING_PENDING 200
85 #define TG3_RX_JUMBO_RING_SIZE 256
86 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
87 #define TG3_RX_RCB_RING_SIZE 1024
88 #define TG3_TX_RING_SIZE 512
89 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
91 #define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
92 TG3_RX_RING_SIZE)
93 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
94 TG3_RX_JUMBO_RING_SIZE)
95 #define TG3_RX_RCB_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
96 TG3_RX_RCB_RING_SIZE)
97 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
98 TG3_TX_RING_SIZE)
99 #define TX_RING_GAP(TP) \
100 (TG3_TX_RING_SIZE - (TP)->tx_pending)
101 #define TX_BUFFS_AVAIL(TP) \
102 (((TP)->tx_cons <= (TP)->tx_prod) ? \
103 (TP)->tx_cons + (TP)->tx_pending - (TP)->tx_prod : \
104 (TP)->tx_cons - (TP)->tx_prod - TX_RING_GAP(TP))
105 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
107 #define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
108 #define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
110 /* minimum number of free TX descriptors required to wake up TX process */
111 #define TG3_TX_WAKEUP_THRESH (TG3_TX_RING_SIZE / 4)
113 static char version[] __devinitdata =
114 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
116 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
117 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
118 MODULE_LICENSE("GPL");
119 MODULE_PARM(tg3_debug, "i");
120 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
122 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
124 static struct pci_device_id tg3_pci_tbl[] __devinitdata = {
125 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
126 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
127 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
128 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
129 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
130 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
131 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
132 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
133 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
134 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
135 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
136 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
137 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
138 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
139 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
140 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
141 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
142 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
143 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
144 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
145 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
146 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
147 { PCI_VENDOR_ID_SYSKONNECT, 0x4400,
148 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
149 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
150 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
151 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
152 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
153 { 0, }
156 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
158 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
160 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
161 unsigned long flags;
163 spin_lock_irqsave(&tp->indirect_lock, flags);
164 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
165 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
166 spin_unlock_irqrestore(&tp->indirect_lock, flags);
167 } else {
168 writel(val, tp->regs + off);
169 if ((tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG) != 0)
170 readl(tp->regs + off);
174 #define tw32(reg,val) tg3_write_indirect_reg32(tp,(reg),(val))
175 #define tw32_mailbox(reg, val) writel(((val) & 0xffffffff), tp->regs + (reg))
176 #define tw16(reg,val) writew(((val) & 0xffff), tp->regs + (reg))
177 #define tw8(reg,val) writeb(((val) & 0xff), tp->regs + (reg))
178 #define tr32(reg) readl(tp->regs + (reg))
179 #define tr16(reg) readw(tp->regs + (reg))
180 #define tr8(reg) readb(tp->regs + (reg))
182 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
184 unsigned long flags;
186 spin_lock_irqsave(&tp->indirect_lock, flags);
187 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
188 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
190 /* Always leave this as zero. */
191 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
192 spin_unlock_irqrestore(&tp->indirect_lock, flags);
195 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
197 unsigned long flags;
199 spin_lock_irqsave(&tp->indirect_lock, flags);
200 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
201 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
203 /* Always leave this as zero. */
204 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
205 spin_unlock_irqrestore(&tp->indirect_lock, flags);
208 static void tg3_disable_ints(struct tg3 *tp)
210 tw32(TG3PCI_MISC_HOST_CTRL,
211 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
212 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
213 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
216 static inline void tg3_cond_int(struct tg3 *tp)
218 if (tp->hw_status->status & SD_STATUS_UPDATED)
219 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
222 static void tg3_enable_ints(struct tg3 *tp)
224 tw32(TG3PCI_MISC_HOST_CTRL,
225 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
226 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000);
227 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
229 tg3_cond_int(tp);
232 /* these netif_xxx funcs should be moved into generic net layer */
233 static void netif_poll_disable(struct net_device *dev)
235 while (test_and_set_bit(__LINK_STATE_RX_SCHED, &dev->state)) {
236 current->state = TASK_INTERRUPTIBLE;
237 schedule_timeout(1);
241 static inline void netif_poll_enable(struct net_device *dev)
243 clear_bit(__LINK_STATE_RX_SCHED, &dev->state);
246 /* same as netif_rx_complete, except that local_irq_save(flags)
247 * has already been issued
249 static inline void __netif_rx_complete(struct net_device *dev)
251 if (!test_bit(__LINK_STATE_RX_SCHED, &dev->state)) BUG();
252 list_del(&dev->poll_list);
253 clear_bit(__LINK_STATE_RX_SCHED, &dev->state);
256 static inline void netif_tx_disable(struct net_device *dev)
258 spin_lock_bh(&dev->xmit_lock);
259 netif_stop_queue(dev);
260 spin_unlock_bh(&dev->xmit_lock);
263 static inline void tg3_netif_stop(struct tg3 *tp)
265 netif_poll_disable(tp->dev);
266 netif_tx_disable(tp->dev);
269 static inline void tg3_netif_start(struct tg3 *tp)
271 netif_wake_queue(tp->dev);
272 /* NOTE: unconditional netif_wake_queue is only appropriate
273 * so long as all callers are assured to have free tx slots
274 * (such as after tg3_init_hw)
276 netif_poll_enable(tp->dev);
277 tg3_cond_int(tp);
280 static void tg3_switch_clocks(struct tg3 *tp)
282 if (tr32(TG3PCI_CLOCK_CTRL) & CLOCK_CTRL_44MHZ_CORE) {
283 tw32(TG3PCI_CLOCK_CTRL,
284 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK));
285 tr32(TG3PCI_CLOCK_CTRL);
286 udelay(40);
287 tw32(TG3PCI_CLOCK_CTRL,
288 (CLOCK_CTRL_ALTCLK));
289 tr32(TG3PCI_CLOCK_CTRL);
290 udelay(40);
292 tw32(TG3PCI_CLOCK_CTRL, 0);
293 tr32(TG3PCI_CLOCK_CTRL);
294 udelay(40);
297 #define PHY_BUSY_LOOPS 5000
299 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
301 u32 frame_val;
302 int loops, ret;
304 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
305 tw32(MAC_MI_MODE,
306 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
307 tr32(MAC_MI_MODE);
308 udelay(40);
311 *val = 0xffffffff;
313 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
314 MI_COM_PHY_ADDR_MASK);
315 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
316 MI_COM_REG_ADDR_MASK);
317 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
319 tw32(MAC_MI_COM, frame_val);
320 tr32(MAC_MI_COM);
322 loops = PHY_BUSY_LOOPS;
323 while (loops-- > 0) {
324 udelay(10);
325 frame_val = tr32(MAC_MI_COM);
327 if ((frame_val & MI_COM_BUSY) == 0) {
328 udelay(5);
329 frame_val = tr32(MAC_MI_COM);
330 break;
334 ret = -EBUSY;
335 if (loops > 0) {
336 *val = frame_val & MI_COM_DATA_MASK;
337 ret = 0;
340 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
341 tw32(MAC_MI_MODE, tp->mi_mode);
342 tr32(MAC_MI_MODE);
343 udelay(40);
346 return ret;
349 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
351 u32 frame_val;
352 int loops, ret;
354 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
355 tw32(MAC_MI_MODE,
356 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
357 tr32(MAC_MI_MODE);
358 udelay(40);
361 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
362 MI_COM_PHY_ADDR_MASK);
363 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
364 MI_COM_REG_ADDR_MASK);
365 frame_val |= (val & MI_COM_DATA_MASK);
366 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
368 tw32(MAC_MI_COM, frame_val);
369 tr32(MAC_MI_COM);
371 loops = PHY_BUSY_LOOPS;
372 while (loops-- > 0) {
373 udelay(10);
374 frame_val = tr32(MAC_MI_COM);
375 if ((frame_val & MI_COM_BUSY) == 0) {
376 udelay(5);
377 frame_val = tr32(MAC_MI_COM);
378 break;
382 ret = -EBUSY;
383 if (loops > 0)
384 ret = 0;
386 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
387 tw32(MAC_MI_MODE, tp->mi_mode);
388 tr32(MAC_MI_MODE);
389 udelay(40);
392 return ret;
395 /* This will reset the tigon3 PHY if there is no valid
396 * link unless the FORCE argument is non-zero.
398 static int tg3_phy_reset(struct tg3 *tp, int force)
400 u32 phy_status, phy_control;
401 int err, limit;
403 err = tg3_readphy(tp, MII_BMSR, &phy_status);
404 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
405 if (err != 0)
406 return -EBUSY;
408 /* If we have link, and not forcing a reset, then nothing
409 * to do.
411 if ((phy_status & BMSR_LSTATUS) != 0 && (force == 0))
412 return 0;
414 /* OK, reset it, and poll the BMCR_RESET bit until it
415 * clears or we time out.
417 phy_control = BMCR_RESET;
418 err = tg3_writephy(tp, MII_BMCR, phy_control);
419 if (err != 0)
420 return -EBUSY;
422 limit = 5000;
423 while (limit--) {
424 err = tg3_readphy(tp, MII_BMCR, &phy_control);
425 if (err != 0)
426 return -EBUSY;
428 if ((phy_control & BMCR_RESET) == 0) {
429 udelay(40);
430 return 0;
432 udelay(10);
435 return -EBUSY;
438 static int tg3_setup_phy(struct tg3 *);
440 static int tg3_set_power_state(struct tg3 *tp, int state)
442 u32 misc_host_ctrl;
443 u16 power_control, power_caps;
444 int pm = tp->pm_cap;
446 /* Make sure register accesses (indirect or otherwise)
447 * will function correctly.
449 pci_write_config_dword(tp->pdev,
450 TG3PCI_MISC_HOST_CTRL,
451 tp->misc_host_ctrl);
453 pci_read_config_word(tp->pdev,
454 pm + PCI_PM_CTRL,
455 &power_control);
456 power_control |= PCI_PM_CTRL_PME_STATUS;
457 power_control &= ~(PCI_PM_CTRL_STATE_MASK);
458 switch (state) {
459 case 0:
460 power_control |= 0;
461 pci_write_config_word(tp->pdev,
462 pm + PCI_PM_CTRL,
463 power_control);
464 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
465 tr32(GRC_LOCAL_CTRL);
466 udelay(100);
468 return 0;
470 case 1:
471 power_control |= 1;
472 break;
474 case 2:
475 power_control |= 2;
476 break;
478 case 3:
479 power_control |= 3;
480 break;
482 default:
483 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
484 "requested.\n",
485 tp->dev->name, state);
486 return -EINVAL;
489 power_control |= PCI_PM_CTRL_PME_ENABLE;
491 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
492 tw32(TG3PCI_MISC_HOST_CTRL,
493 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
495 if (tp->link_config.phy_is_low_power == 0) {
496 tp->link_config.phy_is_low_power = 1;
497 tp->link_config.orig_speed = tp->link_config.speed;
498 tp->link_config.orig_duplex = tp->link_config.duplex;
499 tp->link_config.orig_autoneg = tp->link_config.autoneg;
502 if (tp->phy_id != PHY_ID_SERDES) {
503 tp->link_config.speed = SPEED_10;
504 tp->link_config.duplex = DUPLEX_HALF;
505 tp->link_config.autoneg = AUTONEG_ENABLE;
506 tg3_setup_phy(tp);
509 pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
511 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
512 u32 mac_mode;
514 if (tp->phy_id != PHY_ID_SERDES) {
515 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
516 udelay(40);
518 mac_mode = MAC_MODE_PORT_MODE_MII;
520 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
521 !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
522 mac_mode |= MAC_MODE_LINK_POLARITY;
523 } else {
524 mac_mode = MAC_MODE_PORT_MODE_TBI;
528 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
529 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
530 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
532 tw32(MAC_MODE, mac_mode);
533 tr32(MAC_MODE);
534 udelay(100);
536 tw32(MAC_RX_MODE, RX_MODE_ENABLE);
537 tr32(MAC_RX_MODE);
538 udelay(10);
541 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) {
542 u32 base_val;
544 base_val = 0;
545 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
546 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
547 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
548 CLOCK_CTRL_TXCLK_DISABLE);
550 tw32(TG3PCI_CLOCK_CTRL, base_val |
551 CLOCK_CTRL_ALTCLK);
552 tr32(TG3PCI_CLOCK_CTRL);
553 udelay(40);
555 tw32(TG3PCI_CLOCK_CTRL, base_val |
556 CLOCK_CTRL_ALTCLK |
557 CLOCK_CTRL_44MHZ_CORE);
558 tr32(TG3PCI_CLOCK_CTRL);
559 udelay(40);
561 tw32(TG3PCI_CLOCK_CTRL, base_val |
562 CLOCK_CTRL_44MHZ_CORE);
563 tr32(TG3PCI_CLOCK_CTRL);
564 udelay(40);
565 } else {
566 u32 base_val;
568 base_val = 0;
569 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
570 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
571 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
572 CLOCK_CTRL_TXCLK_DISABLE);
574 tw32(TG3PCI_CLOCK_CTRL, base_val |
575 CLOCK_CTRL_ALTCLK |
576 CLOCK_CTRL_PWRDOWN_PLL133);
577 tr32(TG3PCI_CLOCK_CTRL);
578 udelay(40);
581 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) &&
582 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)) {
583 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
584 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
585 tw32(GRC_LOCAL_CTRL,
586 (GRC_LCLCTRL_GPIO_OE0 |
587 GRC_LCLCTRL_GPIO_OE1 |
588 GRC_LCLCTRL_GPIO_OE2 |
589 GRC_LCLCTRL_GPIO_OUTPUT0 |
590 GRC_LCLCTRL_GPIO_OUTPUT1));
591 tr32(GRC_LOCAL_CTRL);
592 udelay(100);
593 } else {
594 tw32(GRC_LOCAL_CTRL,
595 (GRC_LCLCTRL_GPIO_OE0 |
596 GRC_LCLCTRL_GPIO_OE1 |
597 GRC_LCLCTRL_GPIO_OE2 |
598 GRC_LCLCTRL_GPIO_OUTPUT1 |
599 GRC_LCLCTRL_GPIO_OUTPUT2));
600 tr32(GRC_LOCAL_CTRL);
601 udelay(100);
603 tw32(GRC_LOCAL_CTRL,
604 (GRC_LCLCTRL_GPIO_OE0 |
605 GRC_LCLCTRL_GPIO_OE1 |
606 GRC_LCLCTRL_GPIO_OE2 |
607 GRC_LCLCTRL_GPIO_OUTPUT0 |
608 GRC_LCLCTRL_GPIO_OUTPUT1 |
609 GRC_LCLCTRL_GPIO_OUTPUT2));
610 tr32(GRC_LOCAL_CTRL);
611 udelay(100);
613 tw32(GRC_LOCAL_CTRL,
614 (GRC_LCLCTRL_GPIO_OE0 |
615 GRC_LCLCTRL_GPIO_OE1 |
616 GRC_LCLCTRL_GPIO_OE2 |
617 GRC_LCLCTRL_GPIO_OUTPUT0 |
618 GRC_LCLCTRL_GPIO_OUTPUT1));
619 tr32(GRC_LOCAL_CTRL);
620 udelay(100);
624 /* Finally, set the new power state. */
625 pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
627 return 0;
630 static void tg3_link_report(struct tg3 *tp)
632 if (!netif_carrier_ok(tp->dev)) {
633 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
634 } else {
635 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
636 tp->dev->name,
637 (tp->link_config.active_speed == SPEED_1000 ?
638 1000 :
639 (tp->link_config.active_speed == SPEED_100 ?
640 100 : 10)),
641 (tp->link_config.active_duplex == DUPLEX_FULL ?
642 "full" : "half"));
644 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
645 "%s for RX.\n",
646 tp->dev->name,
647 (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
648 (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
652 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
654 u32 new_tg3_flags = 0;
656 if (local_adv & ADVERTISE_PAUSE_CAP) {
657 if (local_adv & ADVERTISE_PAUSE_ASYM) {
658 if (remote_adv & LPA_PAUSE_CAP)
659 new_tg3_flags |=
660 (TG3_FLAG_RX_PAUSE |
661 TG3_FLAG_TX_PAUSE);
662 else if (remote_adv & LPA_PAUSE_ASYM)
663 new_tg3_flags |=
664 (TG3_FLAG_RX_PAUSE);
665 } else {
666 if (remote_adv & LPA_PAUSE_CAP)
667 new_tg3_flags |=
668 (TG3_FLAG_RX_PAUSE |
669 TG3_FLAG_TX_PAUSE);
671 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
672 if ((remote_adv & LPA_PAUSE_CAP) &&
673 (remote_adv & LPA_PAUSE_ASYM))
674 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
677 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
678 tp->tg3_flags |= new_tg3_flags;
680 if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
681 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
682 else
683 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
685 if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
686 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
687 else
688 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
691 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
693 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
694 case MII_TG3_AUX_STAT_10HALF:
695 *speed = SPEED_10;
696 *duplex = DUPLEX_HALF;
697 break;
699 case MII_TG3_AUX_STAT_10FULL:
700 *speed = SPEED_10;
701 *duplex = DUPLEX_FULL;
702 break;
704 case MII_TG3_AUX_STAT_100HALF:
705 *speed = SPEED_100;
706 *duplex = DUPLEX_HALF;
707 break;
709 case MII_TG3_AUX_STAT_100FULL:
710 *speed = SPEED_100;
711 *duplex = DUPLEX_FULL;
712 break;
714 case MII_TG3_AUX_STAT_1000HALF:
715 *speed = SPEED_1000;
716 *duplex = DUPLEX_HALF;
717 break;
719 case MII_TG3_AUX_STAT_1000FULL:
720 *speed = SPEED_1000;
721 *duplex = DUPLEX_FULL;
722 break;
724 default:
725 *speed = SPEED_INVALID;
726 *duplex = DUPLEX_INVALID;
727 break;
731 static int tg3_phy_copper_begin(struct tg3 *tp, int wait_for_link)
733 u32 new_adv;
734 int i;
736 if (tp->link_config.phy_is_low_power) {
737 /* Entering low power mode. Disable gigabit and
738 * 100baseT advertisements.
740 tg3_writephy(tp, MII_TG3_CTRL, 0);
742 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
743 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
744 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
745 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
747 tg3_writephy(tp, MII_ADVERTISE, new_adv);
748 } else if (tp->link_config.speed == SPEED_INVALID) {
749 tp->link_config.advertising =
750 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
751 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
752 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
753 ADVERTISED_Autoneg | ADVERTISED_MII);
755 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
756 tp->link_config.advertising &=
757 ~(ADVERTISED_1000baseT_Half |
758 ADVERTISED_1000baseT_Full);
760 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
761 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
762 new_adv |= ADVERTISE_10HALF;
763 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
764 new_adv |= ADVERTISE_10FULL;
765 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
766 new_adv |= ADVERTISE_100HALF;
767 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
768 new_adv |= ADVERTISE_100FULL;
769 tg3_writephy(tp, MII_ADVERTISE, new_adv);
771 if (tp->link_config.advertising &
772 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
773 new_adv = 0;
774 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
775 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
776 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
777 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
778 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
779 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
780 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
781 new_adv |= (MII_TG3_CTRL_AS_MASTER |
782 MII_TG3_CTRL_ENABLE_AS_MASTER);
783 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
784 } else {
785 tg3_writephy(tp, MII_TG3_CTRL, 0);
787 } else {
788 /* Asking for a specific link mode. */
789 if (tp->link_config.speed == SPEED_1000) {
790 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
791 tg3_writephy(tp, MII_ADVERTISE, new_adv);
793 if (tp->link_config.duplex == DUPLEX_FULL)
794 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
795 else
796 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
797 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
798 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
799 new_adv |= (MII_TG3_CTRL_AS_MASTER |
800 MII_TG3_CTRL_ENABLE_AS_MASTER);
801 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
802 } else {
803 tg3_writephy(tp, MII_TG3_CTRL, 0);
805 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
806 if (tp->link_config.speed == SPEED_100) {
807 if (tp->link_config.duplex == DUPLEX_FULL)
808 new_adv |= ADVERTISE_100FULL;
809 else
810 new_adv |= ADVERTISE_100HALF;
811 } else {
812 if (tp->link_config.duplex == DUPLEX_FULL)
813 new_adv |= ADVERTISE_10FULL;
814 else
815 new_adv |= ADVERTISE_10HALF;
817 tg3_writephy(tp, MII_ADVERTISE, new_adv);
821 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
822 tp->link_config.speed != SPEED_INVALID) {
823 u32 bmcr, orig_bmcr;
825 tp->link_config.active_speed = tp->link_config.speed;
826 tp->link_config.active_duplex = tp->link_config.duplex;
828 bmcr = 0;
829 switch (tp->link_config.speed) {
830 default:
831 case SPEED_10:
832 break;
834 case SPEED_100:
835 bmcr |= BMCR_SPEED100;
836 break;
838 case SPEED_1000:
839 bmcr |= TG3_BMCR_SPEED1000;
840 break;
843 if (tp->link_config.duplex == DUPLEX_FULL)
844 bmcr |= BMCR_FULLDPLX;
846 tg3_readphy(tp, MII_BMCR, &orig_bmcr);
847 if (bmcr != orig_bmcr) {
848 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
849 for (i = 0; i < 15000; i++) {
850 u32 tmp;
852 udelay(10);
853 tg3_readphy(tp, MII_BMSR, &tmp);
854 tg3_readphy(tp, MII_BMSR, &tmp);
855 if (!(tmp & BMSR_LSTATUS)) {
856 udelay(40);
857 break;
860 tg3_writephy(tp, MII_BMCR, bmcr);
861 udelay(40);
863 } else {
864 tg3_writephy(tp, MII_BMCR,
865 BMCR_ANENABLE | BMCR_ANRESTART);
868 if (wait_for_link) {
869 tp->link_config.active_speed = SPEED_INVALID;
870 tp->link_config.active_duplex = DUPLEX_INVALID;
871 for (i = 0; i < 300000; i++) {
872 u32 tmp;
874 udelay(10);
875 tg3_readphy(tp, MII_BMSR, &tmp);
876 tg3_readphy(tp, MII_BMSR, &tmp);
877 if (!(tmp & BMSR_LSTATUS))
878 continue;
880 tg3_readphy(tp, MII_TG3_AUX_STAT, &tmp);
881 tg3_aux_stat_to_speed_duplex(tp, tmp,
882 &tp->link_config.active_speed,
883 &tp->link_config.active_duplex);
885 if (tp->link_config.active_speed == SPEED_INVALID)
886 return -EINVAL;
889 return 0;
892 static int tg3_init_5401phy_dsp(struct tg3 *tp)
894 int err;
896 /* Turn off tap power management. */
897 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c20);
899 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
900 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
902 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
903 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
905 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
906 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
908 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
909 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
911 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
912 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
914 udelay(40);
916 return err;
919 static int tg3_setup_copper_phy(struct tg3 *tp)
921 int current_link_up;
922 u32 bmsr, dummy;
923 u16 current_speed;
924 u8 current_duplex;
925 int i, err;
927 tw32(MAC_STATUS,
928 (MAC_STATUS_SYNC_CHANGED |
929 MAC_STATUS_CFG_CHANGED));
930 tr32(MAC_STATUS);
931 udelay(40);
933 tp->mi_mode = MAC_MI_MODE_BASE;
934 tw32(MAC_MI_MODE, tp->mi_mode);
935 tr32(MAC_MI_MODE);
936 udelay(40);
938 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
940 /* Some third-party PHYs need to be reset on link going
941 * down.
943 * XXX 5705 note: This workaround also applies to 5705_a0
945 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
946 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
947 netif_carrier_ok(tp->dev)) {
948 tg3_readphy(tp, MII_BMSR, &bmsr);
949 tg3_readphy(tp, MII_BMSR, &bmsr);
950 if (!(bmsr & BMSR_LSTATUS))
951 tg3_phy_reset(tp, 1);
954 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
955 tg3_readphy(tp, MII_BMSR, &bmsr);
956 tg3_readphy(tp, MII_BMSR, &bmsr);
958 if (!(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
959 bmsr = 0;
961 if (!(bmsr & BMSR_LSTATUS)) {
962 err = tg3_init_5401phy_dsp(tp);
963 if (err)
964 return err;
966 tg3_readphy(tp, MII_BMSR, &bmsr);
967 for (i = 0; i < 1000; i++) {
968 udelay(10);
969 tg3_readphy(tp, MII_BMSR, &bmsr);
970 if (bmsr & BMSR_LSTATUS) {
971 udelay(40);
972 break;
976 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
977 !(bmsr & BMSR_LSTATUS) &&
978 tp->link_config.active_speed == SPEED_1000) {
979 err = tg3_phy_reset(tp, 1);
980 if (!err)
981 err = tg3_init_5401phy_dsp(tp);
982 if (err)
983 return err;
986 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
987 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
988 /* 5701 {A0,B0} CRC bug workaround */
989 tg3_writephy(tp, 0x15, 0x0a75);
990 tg3_writephy(tp, 0x1c, 0x8c68);
991 tg3_writephy(tp, 0x1c, 0x8d68);
992 tg3_writephy(tp, 0x1c, 0x8c68);
995 /* Clear pending interrupts... */
996 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
997 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
999 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1000 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1001 else
1002 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1004 if (tp->led_mode == led_mode_three_link)
1005 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1006 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1007 else
1008 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1010 current_link_up = 0;
1011 current_speed = SPEED_INVALID;
1012 current_duplex = DUPLEX_INVALID;
1014 tg3_readphy(tp, MII_BMSR, &bmsr);
1015 tg3_readphy(tp, MII_BMSR, &bmsr);
1017 if (bmsr & BMSR_LSTATUS) {
1018 u32 aux_stat, bmcr;
1020 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1021 for (i = 0; i < 2000; i++) {
1022 udelay(10);
1023 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1024 if (aux_stat)
1025 break;
1028 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1029 &current_speed,
1030 &current_duplex);
1031 tg3_readphy(tp, MII_BMCR, &bmcr);
1032 tg3_readphy(tp, MII_BMCR, &bmcr);
1033 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1034 if (bmcr & BMCR_ANENABLE) {
1035 u32 gig_ctrl;
1037 current_link_up = 1;
1039 /* Force autoneg restart if we are exiting
1040 * low power mode.
1042 tg3_readphy(tp, MII_TG3_CTRL, &gig_ctrl);
1043 if (!(gig_ctrl & (MII_TG3_CTRL_ADV_1000_HALF |
1044 MII_TG3_CTRL_ADV_1000_FULL))) {
1045 current_link_up = 0;
1047 } else {
1048 current_link_up = 0;
1050 } else {
1051 if (!(bmcr & BMCR_ANENABLE) &&
1052 tp->link_config.speed == current_speed &&
1053 tp->link_config.duplex == current_duplex) {
1054 current_link_up = 1;
1055 } else {
1056 current_link_up = 0;
1060 tp->link_config.active_speed = current_speed;
1061 tp->link_config.active_duplex = current_duplex;
1064 if (current_link_up == 1 &&
1065 (tp->link_config.active_duplex == DUPLEX_FULL) &&
1066 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1067 u32 local_adv, remote_adv;
1069 tg3_readphy(tp, MII_ADVERTISE, &local_adv);
1070 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1072 tg3_readphy(tp, MII_LPA, &remote_adv);
1073 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1075 /* If we are not advertising full pause capability,
1076 * something is wrong. Bring the link down and reconfigure.
1078 if (local_adv != ADVERTISE_PAUSE_CAP) {
1079 current_link_up = 0;
1080 } else {
1081 tg3_setup_flow_control(tp, local_adv, remote_adv);
1085 if (current_link_up == 0) {
1086 u32 tmp;
1088 tg3_phy_copper_begin(tp, 0);
1090 tg3_readphy(tp, MII_BMSR, &tmp);
1091 tg3_readphy(tp, MII_BMSR, &tmp);
1092 if (tmp & BMSR_LSTATUS)
1093 current_link_up = 1;
1096 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1097 if (current_link_up == 1) {
1098 if (tp->link_config.active_speed == SPEED_100 ||
1099 tp->link_config.active_speed == SPEED_10)
1100 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1101 else
1102 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1103 } else
1104 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1106 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1107 if (tp->link_config.active_duplex == DUPLEX_HALF)
1108 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1110 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1111 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1112 if ((tp->led_mode == led_mode_link10) ||
1113 (current_link_up == 1 &&
1114 tp->link_config.active_speed == SPEED_10))
1115 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1116 } else {
1117 if (current_link_up == 1)
1118 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1119 tw32(MAC_LED_CTRL, LED_CTRL_PHY_MODE_1);
1122 /* ??? Without this setting Netgear GA302T PHY does not
1123 * ??? send/receive packets...
1125 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1126 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1127 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1128 tw32(MAC_MI_MODE, tp->mi_mode);
1129 tr32(MAC_MI_MODE);
1130 udelay(40);
1133 tw32(MAC_MODE, tp->mac_mode);
1134 tr32(MAC_MODE);
1135 udelay(40);
1137 if (tp->tg3_flags &
1138 (TG3_FLAG_USE_LINKCHG_REG |
1139 TG3_FLAG_POLL_SERDES)) {
1140 /* Polled via timer. */
1141 tw32(MAC_EVENT, 0);
1142 } else {
1143 tw32(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1145 tr32(MAC_EVENT);
1146 udelay(40);
1148 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1149 current_link_up == 1 &&
1150 tp->link_config.active_speed == SPEED_1000 &&
1151 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1152 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1153 udelay(120);
1154 tw32(MAC_STATUS,
1155 (MAC_STATUS_SYNC_CHANGED |
1156 MAC_STATUS_CFG_CHANGED));
1157 tr32(MAC_STATUS);
1158 udelay(40);
1159 tg3_write_mem(tp,
1160 NIC_SRAM_FIRMWARE_MBOX,
1161 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1164 if (current_link_up != netif_carrier_ok(tp->dev)) {
1165 if (current_link_up)
1166 netif_carrier_on(tp->dev);
1167 else
1168 netif_carrier_off(tp->dev);
1169 tg3_link_report(tp);
1172 return 0;
1175 struct tg3_fiber_aneginfo {
1176 int state;
1177 #define ANEG_STATE_UNKNOWN 0
1178 #define ANEG_STATE_AN_ENABLE 1
1179 #define ANEG_STATE_RESTART_INIT 2
1180 #define ANEG_STATE_RESTART 3
1181 #define ANEG_STATE_DISABLE_LINK_OK 4
1182 #define ANEG_STATE_ABILITY_DETECT_INIT 5
1183 #define ANEG_STATE_ABILITY_DETECT 6
1184 #define ANEG_STATE_ACK_DETECT_INIT 7
1185 #define ANEG_STATE_ACK_DETECT 8
1186 #define ANEG_STATE_COMPLETE_ACK_INIT 9
1187 #define ANEG_STATE_COMPLETE_ACK 10
1188 #define ANEG_STATE_IDLE_DETECT_INIT 11
1189 #define ANEG_STATE_IDLE_DETECT 12
1190 #define ANEG_STATE_LINK_OK 13
1191 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
1192 #define ANEG_STATE_NEXT_PAGE_WAIT 15
1194 u32 flags;
1195 #define MR_AN_ENABLE 0x00000001
1196 #define MR_RESTART_AN 0x00000002
1197 #define MR_AN_COMPLETE 0x00000004
1198 #define MR_PAGE_RX 0x00000008
1199 #define MR_NP_LOADED 0x00000010
1200 #define MR_TOGGLE_TX 0x00000020
1201 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
1202 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
1203 #define MR_LP_ADV_SYM_PAUSE 0x00000100
1204 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
1205 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
1206 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
1207 #define MR_LP_ADV_NEXT_PAGE 0x00001000
1208 #define MR_TOGGLE_RX 0x00002000
1209 #define MR_NP_RX 0x00004000
1211 #define MR_LINK_OK 0x80000000
1213 unsigned long link_time, cur_time;
1215 u32 ability_match_cfg;
1216 int ability_match_count;
1218 char ability_match, idle_match, ack_match;
1220 u32 txconfig, rxconfig;
1221 #define ANEG_CFG_NP 0x00000080
1222 #define ANEG_CFG_ACK 0x00000040
1223 #define ANEG_CFG_RF2 0x00000020
1224 #define ANEG_CFG_RF1 0x00000010
1225 #define ANEG_CFG_PS2 0x00000001
1226 #define ANEG_CFG_PS1 0x00008000
1227 #define ANEG_CFG_HD 0x00004000
1228 #define ANEG_CFG_FD 0x00002000
1229 #define ANEG_CFG_INVAL 0x00001f06
1232 #define ANEG_OK 0
1233 #define ANEG_DONE 1
1234 #define ANEG_TIMER_ENAB 2
1235 #define ANEG_FAILED -1
1237 #define ANEG_STATE_SETTLE_TIME 10000
1239 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
1240 struct tg3_fiber_aneginfo *ap)
1242 unsigned long delta;
1243 u32 rx_cfg_reg;
1244 int ret;
1246 if (ap->state == ANEG_STATE_UNKNOWN) {
1247 ap->rxconfig = 0;
1248 ap->link_time = 0;
1249 ap->cur_time = 0;
1250 ap->ability_match_cfg = 0;
1251 ap->ability_match_count = 0;
1252 ap->ability_match = 0;
1253 ap->idle_match = 0;
1254 ap->ack_match = 0;
1256 ap->cur_time++;
1258 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
1259 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
1261 if (rx_cfg_reg != ap->ability_match_cfg) {
1262 ap->ability_match_cfg = rx_cfg_reg;
1263 ap->ability_match = 0;
1264 ap->ability_match_count = 0;
1265 } else {
1266 if (++ap->ability_match_count > 1) {
1267 ap->ability_match = 1;
1268 ap->ability_match_cfg = rx_cfg_reg;
1271 if (rx_cfg_reg & ANEG_CFG_ACK)
1272 ap->ack_match = 1;
1273 else
1274 ap->ack_match = 0;
1276 ap->idle_match = 0;
1277 } else {
1278 ap->idle_match = 1;
1279 ap->ability_match_cfg = 0;
1280 ap->ability_match_count = 0;
1281 ap->ability_match = 0;
1282 ap->ack_match = 0;
1284 rx_cfg_reg = 0;
1287 ap->rxconfig = rx_cfg_reg;
1288 ret = ANEG_OK;
1290 switch(ap->state) {
1291 case ANEG_STATE_UNKNOWN:
1292 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
1293 ap->state = ANEG_STATE_AN_ENABLE;
1295 /* fallthru */
1296 case ANEG_STATE_AN_ENABLE:
1297 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
1298 if (ap->flags & MR_AN_ENABLE) {
1299 ap->link_time = 0;
1300 ap->cur_time = 0;
1301 ap->ability_match_cfg = 0;
1302 ap->ability_match_count = 0;
1303 ap->ability_match = 0;
1304 ap->idle_match = 0;
1305 ap->ack_match = 0;
1307 ap->state = ANEG_STATE_RESTART_INIT;
1308 } else {
1309 ap->state = ANEG_STATE_DISABLE_LINK_OK;
1311 break;
1313 case ANEG_STATE_RESTART_INIT:
1314 ap->link_time = ap->cur_time;
1315 ap->flags &= ~(MR_NP_LOADED);
1316 ap->txconfig = 0;
1317 tw32(MAC_TX_AUTO_NEG, 0);
1318 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1319 tw32(MAC_MODE, tp->mac_mode);
1320 tr32(MAC_MODE);
1321 udelay(40);
1323 ret = ANEG_TIMER_ENAB;
1324 ap->state = ANEG_STATE_RESTART;
1326 /* fallthru */
1327 case ANEG_STATE_RESTART:
1328 delta = ap->cur_time - ap->link_time;
1329 if (delta > ANEG_STATE_SETTLE_TIME) {
1330 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
1331 } else {
1332 ret = ANEG_TIMER_ENAB;
1334 break;
1336 case ANEG_STATE_DISABLE_LINK_OK:
1337 ret = ANEG_DONE;
1338 break;
1340 case ANEG_STATE_ABILITY_DETECT_INIT:
1341 ap->flags &= ~(MR_TOGGLE_TX);
1342 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
1343 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1344 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1345 tw32(MAC_MODE, tp->mac_mode);
1346 tr32(MAC_MODE);
1347 udelay(40);
1349 ap->state = ANEG_STATE_ABILITY_DETECT;
1350 break;
1352 case ANEG_STATE_ABILITY_DETECT:
1353 if (ap->ability_match != 0 && ap->rxconfig != 0) {
1354 ap->state = ANEG_STATE_ACK_DETECT_INIT;
1356 break;
1358 case ANEG_STATE_ACK_DETECT_INIT:
1359 ap->txconfig |= ANEG_CFG_ACK;
1360 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1361 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1362 tw32(MAC_MODE, tp->mac_mode);
1363 tr32(MAC_MODE);
1364 udelay(40);
1366 ap->state = ANEG_STATE_ACK_DETECT;
1368 /* fallthru */
1369 case ANEG_STATE_ACK_DETECT:
1370 if (ap->ack_match != 0) {
1371 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
1372 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
1373 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
1374 } else {
1375 ap->state = ANEG_STATE_AN_ENABLE;
1377 } else if (ap->ability_match != 0 &&
1378 ap->rxconfig == 0) {
1379 ap->state = ANEG_STATE_AN_ENABLE;
1381 break;
1383 case ANEG_STATE_COMPLETE_ACK_INIT:
1384 if (ap->rxconfig & ANEG_CFG_INVAL) {
1385 ret = ANEG_FAILED;
1386 break;
1388 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
1389 MR_LP_ADV_HALF_DUPLEX |
1390 MR_LP_ADV_SYM_PAUSE |
1391 MR_LP_ADV_ASYM_PAUSE |
1392 MR_LP_ADV_REMOTE_FAULT1 |
1393 MR_LP_ADV_REMOTE_FAULT2 |
1394 MR_LP_ADV_NEXT_PAGE |
1395 MR_TOGGLE_RX |
1396 MR_NP_RX);
1397 if (ap->rxconfig & ANEG_CFG_FD)
1398 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
1399 if (ap->rxconfig & ANEG_CFG_HD)
1400 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
1401 if (ap->rxconfig & ANEG_CFG_PS1)
1402 ap->flags |= MR_LP_ADV_SYM_PAUSE;
1403 if (ap->rxconfig & ANEG_CFG_PS2)
1404 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
1405 if (ap->rxconfig & ANEG_CFG_RF1)
1406 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
1407 if (ap->rxconfig & ANEG_CFG_RF2)
1408 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
1409 if (ap->rxconfig & ANEG_CFG_NP)
1410 ap->flags |= MR_LP_ADV_NEXT_PAGE;
1412 ap->link_time = ap->cur_time;
1414 ap->flags ^= (MR_TOGGLE_TX);
1415 if (ap->rxconfig & 0x0008)
1416 ap->flags |= MR_TOGGLE_RX;
1417 if (ap->rxconfig & ANEG_CFG_NP)
1418 ap->flags |= MR_NP_RX;
1419 ap->flags |= MR_PAGE_RX;
1421 ap->state = ANEG_STATE_COMPLETE_ACK;
1422 ret = ANEG_TIMER_ENAB;
1423 break;
1425 case ANEG_STATE_COMPLETE_ACK:
1426 if (ap->ability_match != 0 &&
1427 ap->rxconfig == 0) {
1428 ap->state = ANEG_STATE_AN_ENABLE;
1429 break;
1431 delta = ap->cur_time - ap->link_time;
1432 if (delta > ANEG_STATE_SETTLE_TIME) {
1433 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
1434 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
1435 } else {
1436 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
1437 !(ap->flags & MR_NP_RX)) {
1438 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
1439 } else {
1440 ret = ANEG_FAILED;
1444 break;
1446 case ANEG_STATE_IDLE_DETECT_INIT:
1447 ap->link_time = ap->cur_time;
1448 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
1449 tw32(MAC_MODE, tp->mac_mode);
1450 tr32(MAC_MODE);
1451 udelay(40);
1453 ap->state = ANEG_STATE_IDLE_DETECT;
1454 ret = ANEG_TIMER_ENAB;
1455 break;
1457 case ANEG_STATE_IDLE_DETECT:
1458 if (ap->ability_match != 0 &&
1459 ap->rxconfig == 0) {
1460 ap->state = ANEG_STATE_AN_ENABLE;
1461 break;
1463 delta = ap->cur_time - ap->link_time;
1464 if (delta > ANEG_STATE_SETTLE_TIME) {
1465 /* XXX another gem from the Broadcom driver :( */
1466 ap->state = ANEG_STATE_LINK_OK;
1468 break;
1470 case ANEG_STATE_LINK_OK:
1471 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
1472 ret = ANEG_DONE;
1473 break;
1475 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
1476 /* ??? unimplemented */
1477 break;
1479 case ANEG_STATE_NEXT_PAGE_WAIT:
1480 /* ??? unimplemented */
1481 break;
1483 default:
1484 ret = ANEG_FAILED;
1485 break;
1488 return ret;
1491 static int tg3_setup_fiber_phy(struct tg3 *tp)
1493 u32 orig_pause_cfg;
1494 u16 orig_active_speed;
1495 u8 orig_active_duplex;
1496 int current_link_up;
1497 int i;
1499 orig_pause_cfg =
1500 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
1501 TG3_FLAG_TX_PAUSE));
1502 orig_active_speed = tp->link_config.active_speed;
1503 orig_active_duplex = tp->link_config.active_duplex;
1505 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
1506 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
1507 tw32(MAC_MODE, tp->mac_mode);
1508 tr32(MAC_MODE);
1509 udelay(40);
1511 /* Reset when initting first time or we have a link. */
1512 if (!(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) ||
1513 (tr32(MAC_STATUS) & MAC_STATUS_PCS_SYNCED)) {
1514 /* Set PLL lock range. */
1515 tg3_writephy(tp, 0x16, 0x8007);
1517 /* SW reset */
1518 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
1520 /* Wait for reset to complete. */
1521 /* XXX schedule_timeout() ... */
1522 for (i = 0; i < 500; i++)
1523 udelay(10);
1525 /* Config mode; select PMA/Ch 1 regs. */
1526 tg3_writephy(tp, 0x10, 0x8411);
1528 /* Enable auto-lock and comdet, select txclk for tx. */
1529 tg3_writephy(tp, 0x11, 0x0a10);
1531 tg3_writephy(tp, 0x18, 0x00a0);
1532 tg3_writephy(tp, 0x16, 0x41ff);
1534 /* Assert and deassert POR. */
1535 tg3_writephy(tp, 0x13, 0x0400);
1536 udelay(40);
1537 tg3_writephy(tp, 0x13, 0x0000);
1539 tg3_writephy(tp, 0x11, 0x0a50);
1540 udelay(40);
1541 tg3_writephy(tp, 0x11, 0x0a10);
1543 /* Wait for signal to stabilize */
1544 /* XXX schedule_timeout() ... */
1545 for (i = 0; i < 15000; i++)
1546 udelay(10);
1548 /* Deselect the channel register so we can read the PHYID
1549 * later.
1551 tg3_writephy(tp, 0x10, 0x8011);
1554 /* Enable link change interrupt unless serdes polling. */
1555 if (!(tp->tg3_flags & TG3_FLAG_POLL_SERDES))
1556 tw32(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1557 else
1558 tw32(MAC_EVENT, 0);
1559 tr32(MAC_EVENT);
1560 udelay(40);
1562 current_link_up = 0;
1563 if (tr32(MAC_STATUS) & MAC_STATUS_PCS_SYNCED) {
1564 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1565 !(tp->tg3_flags & TG3_FLAG_GOT_SERDES_FLOWCTL)) {
1566 struct tg3_fiber_aneginfo aninfo;
1567 int status = ANEG_FAILED;
1568 unsigned int tick;
1569 u32 tmp;
1571 memset(&aninfo, 0, sizeof(aninfo));
1572 aninfo.flags |= (MR_AN_ENABLE);
1574 tw32(MAC_TX_AUTO_NEG, 0);
1576 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
1577 tw32(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
1578 tr32(MAC_MODE);
1579 udelay(40);
1581 tw32(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
1582 tr32(MAC_MODE);
1583 udelay(40);
1585 aninfo.state = ANEG_STATE_UNKNOWN;
1586 aninfo.cur_time = 0;
1587 tick = 0;
1588 while (++tick < 195000) {
1589 status = tg3_fiber_aneg_smachine(tp, &aninfo);
1590 if (status == ANEG_DONE ||
1591 status == ANEG_FAILED)
1592 break;
1594 udelay(1);
1597 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
1598 tw32(MAC_MODE, tp->mac_mode);
1599 tr32(MAC_MODE);
1600 udelay(40);
1602 if (status == ANEG_DONE &&
1603 (aninfo.flags &
1604 (MR_AN_COMPLETE | MR_LINK_OK |
1605 MR_LP_ADV_FULL_DUPLEX))) {
1606 u32 local_adv, remote_adv;
1608 local_adv = ADVERTISE_PAUSE_CAP;
1609 remote_adv = 0;
1610 if (aninfo.flags & MR_LP_ADV_SYM_PAUSE)
1611 remote_adv |= LPA_PAUSE_CAP;
1612 if (aninfo.flags & MR_LP_ADV_ASYM_PAUSE)
1613 remote_adv |= LPA_PAUSE_ASYM;
1615 tg3_setup_flow_control(tp, local_adv, remote_adv);
1617 tp->tg3_flags |=
1618 TG3_FLAG_GOT_SERDES_FLOWCTL;
1619 current_link_up = 1;
1621 for (i = 0; i < 60; i++) {
1622 udelay(20);
1623 tw32(MAC_STATUS,
1624 (MAC_STATUS_SYNC_CHANGED |
1625 MAC_STATUS_CFG_CHANGED));
1626 tr32(MAC_STATUS);
1627 udelay(40);
1628 if ((tr32(MAC_STATUS) &
1629 (MAC_STATUS_SYNC_CHANGED |
1630 MAC_STATUS_CFG_CHANGED)) == 0)
1631 break;
1633 if (current_link_up == 0 &&
1634 (tr32(MAC_STATUS) & MAC_STATUS_PCS_SYNCED)) {
1635 current_link_up = 1;
1637 } else {
1638 /* Forcing 1000FD link up. */
1639 current_link_up = 1;
1643 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1644 tw32(MAC_MODE, tp->mac_mode);
1645 tr32(MAC_MODE);
1646 udelay(40);
1648 tp->hw_status->status =
1649 (SD_STATUS_UPDATED |
1650 (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
1652 for (i = 0; i < 100; i++) {
1653 udelay(20);
1654 tw32(MAC_STATUS,
1655 (MAC_STATUS_SYNC_CHANGED |
1656 MAC_STATUS_CFG_CHANGED));
1657 tr32(MAC_STATUS);
1658 udelay(40);
1659 if ((tr32(MAC_STATUS) &
1660 (MAC_STATUS_SYNC_CHANGED |
1661 MAC_STATUS_CFG_CHANGED)) == 0)
1662 break;
1665 if ((tr32(MAC_STATUS) & MAC_STATUS_PCS_SYNCED) == 0)
1666 current_link_up = 0;
1668 if (current_link_up == 1) {
1669 tp->link_config.active_speed = SPEED_1000;
1670 tp->link_config.active_duplex = DUPLEX_FULL;
1671 } else {
1672 tp->link_config.active_speed = SPEED_INVALID;
1673 tp->link_config.active_duplex = DUPLEX_INVALID;
1676 if (current_link_up != netif_carrier_ok(tp->dev)) {
1677 if (current_link_up)
1678 netif_carrier_on(tp->dev);
1679 else
1680 netif_carrier_off(tp->dev);
1681 tg3_link_report(tp);
1682 } else {
1683 u32 now_pause_cfg =
1684 tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
1685 TG3_FLAG_TX_PAUSE);
1686 if (orig_pause_cfg != now_pause_cfg ||
1687 orig_active_speed != tp->link_config.active_speed ||
1688 orig_active_duplex != tp->link_config.active_duplex)
1689 tg3_link_report(tp);
1692 if ((tr32(MAC_STATUS) & MAC_STATUS_PCS_SYNCED) == 0) {
1693 tw32(MAC_MODE, tp->mac_mode | MAC_MODE_LINK_POLARITY);
1694 tr32(MAC_MODE);
1695 udelay(40);
1696 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
1697 tw32(MAC_MODE, tp->mac_mode);
1698 tr32(MAC_MODE);
1699 udelay(40);
1703 return 0;
1706 static int tg3_setup_phy(struct tg3 *tp)
1708 int err;
1710 if (tp->phy_id == PHY_ID_SERDES) {
1711 err = tg3_setup_fiber_phy(tp);
1712 } else {
1713 err = tg3_setup_copper_phy(tp);
1716 if (tp->link_config.active_speed == SPEED_1000 &&
1717 tp->link_config.active_duplex == DUPLEX_HALF)
1718 tw32(MAC_TX_LENGTHS,
1719 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1720 (6 << TX_LENGTHS_IPG_SHIFT) |
1721 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1722 else
1723 tw32(MAC_TX_LENGTHS,
1724 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1725 (6 << TX_LENGTHS_IPG_SHIFT) |
1726 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1728 return err;
1731 /* Tigon3 never reports partial packet sends. So we do not
1732 * need special logic to handle SKBs that have not had all
1733 * of their frags sent yet, like SunGEM does.
1735 static void tg3_tx(struct tg3 *tp)
1737 u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
1738 u32 sw_idx = tp->tx_cons;
1740 while (sw_idx != hw_idx) {
1741 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
1742 struct sk_buff *skb = ri->skb;
1743 int i;
1745 if (unlikely(skb == NULL))
1746 BUG();
1748 pci_unmap_single(tp->pdev,
1749 pci_unmap_addr(ri, mapping),
1750 skb_headlen(skb),
1751 PCI_DMA_TODEVICE);
1753 ri->skb = NULL;
1755 sw_idx = NEXT_TX(sw_idx);
1757 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1758 if (unlikely(sw_idx == hw_idx))
1759 BUG();
1761 ri = &tp->tx_buffers[sw_idx];
1762 if (unlikely(ri->skb != NULL))
1763 BUG();
1765 pci_unmap_page(tp->pdev,
1766 pci_unmap_addr(ri, mapping),
1767 skb_shinfo(skb)->frags[i].size,
1768 PCI_DMA_TODEVICE);
1770 sw_idx = NEXT_TX(sw_idx);
1773 dev_kfree_skb_irq(skb);
1776 tp->tx_cons = sw_idx;
1778 if (netif_queue_stopped(tp->dev) &&
1779 (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
1780 netif_wake_queue(tp->dev);
1783 /* Returns size of skb allocated or < 0 on error.
1785 * We only need to fill in the address because the other members
1786 * of the RX descriptor are invariant, see tg3_init_rings.
1788 * Note the purposeful assymetry of cpu vs. chip accesses. For
1789 * posting buffers we only dirty the first cache line of the RX
1790 * descriptor (containing the address). Whereas for the RX status
1791 * buffers the cpu only reads the last cacheline of the RX descriptor
1792 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
1794 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
1795 int src_idx, u32 dest_idx_unmasked)
1797 struct tg3_rx_buffer_desc *desc;
1798 struct ring_info *map, *src_map;
1799 struct sk_buff *skb;
1800 dma_addr_t mapping;
1801 int skb_size, dest_idx;
1803 src_map = NULL;
1804 switch (opaque_key) {
1805 case RXD_OPAQUE_RING_STD:
1806 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
1807 desc = &tp->rx_std[dest_idx];
1808 map = &tp->rx_std_buffers[dest_idx];
1809 if (src_idx >= 0)
1810 src_map = &tp->rx_std_buffers[src_idx];
1811 skb_size = RX_PKT_BUF_SZ;
1812 break;
1814 case RXD_OPAQUE_RING_JUMBO:
1815 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
1816 desc = &tp->rx_jumbo[dest_idx];
1817 map = &tp->rx_jumbo_buffers[dest_idx];
1818 if (src_idx >= 0)
1819 src_map = &tp->rx_jumbo_buffers[src_idx];
1820 skb_size = RX_JUMBO_PKT_BUF_SZ;
1821 break;
1823 default:
1824 return -EINVAL;
1827 /* Do not overwrite any of the map or rp information
1828 * until we are sure we can commit to a new buffer.
1830 * Callers depend upon this behavior and assume that
1831 * we leave everything unchanged if we fail.
1833 skb = dev_alloc_skb(skb_size);
1834 if (skb == NULL)
1835 return -ENOMEM;
1837 skb->dev = tp->dev;
1838 skb_reserve(skb, tp->rx_offset);
1840 mapping = pci_map_single(tp->pdev, skb->data,
1841 skb_size - tp->rx_offset,
1842 PCI_DMA_FROMDEVICE);
1844 map->skb = skb;
1845 pci_unmap_addr_set(map, mapping, mapping);
1847 if (src_map != NULL)
1848 src_map->skb = NULL;
1850 desc->addr_hi = ((u64)mapping >> 32);
1851 desc->addr_lo = ((u64)mapping & 0xffffffff);
1853 return skb_size;
1856 /* We only need to move over in the address because the other
1857 * members of the RX descriptor are invariant. See notes above
1858 * tg3_alloc_rx_skb for full details.
1860 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
1861 int src_idx, u32 dest_idx_unmasked)
1863 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
1864 struct ring_info *src_map, *dest_map;
1865 int dest_idx;
1867 switch (opaque_key) {
1868 case RXD_OPAQUE_RING_STD:
1869 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
1870 dest_desc = &tp->rx_std[dest_idx];
1871 dest_map = &tp->rx_std_buffers[dest_idx];
1872 src_desc = &tp->rx_std[src_idx];
1873 src_map = &tp->rx_std_buffers[src_idx];
1874 break;
1876 case RXD_OPAQUE_RING_JUMBO:
1877 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
1878 dest_desc = &tp->rx_jumbo[dest_idx];
1879 dest_map = &tp->rx_jumbo_buffers[dest_idx];
1880 src_desc = &tp->rx_jumbo[src_idx];
1881 src_map = &tp->rx_jumbo_buffers[src_idx];
1882 break;
1884 default:
1885 return;
1888 dest_map->skb = src_map->skb;
1889 pci_unmap_addr_set(dest_map, mapping,
1890 pci_unmap_addr(src_map, mapping));
1891 dest_desc->addr_hi = src_desc->addr_hi;
1892 dest_desc->addr_lo = src_desc->addr_lo;
1894 src_map->skb = NULL;
1897 #if TG3_VLAN_TAG_USED
1898 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
1900 return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
1902 #endif
1904 /* The RX ring scheme is composed of multiple rings which post fresh
1905 * buffers to the chip, and one special ring the chip uses to report
1906 * status back to the host.
1908 * The special ring reports the status of received packets to the
1909 * host. The chip does not write into the original descriptor the
1910 * RX buffer was obtained from. The chip simply takes the original
1911 * descriptor as provided by the host, updates the status and length
1912 * field, then writes this into the next status ring entry.
1914 * Each ring the host uses to post buffers to the chip is described
1915 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
1916 * it is first placed into the on-chip ram. When the packet's length
1917 * is known, it walks down the TG3_BDINFO entries to select the ring.
1918 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
1919 * which is within the range of the new packet's length is chosen.
1921 * The "separate ring for rx status" scheme may sound queer, but it makes
1922 * sense from a cache coherency perspective. If only the host writes
1923 * to the buffer post rings, and only the chip writes to the rx status
1924 * rings, then cache lines never move beyond shared-modified state.
1925 * If both the host and chip were to write into the same ring, cache line
1926 * eviction could occur since both entities want it in an exclusive state.
1928 static int tg3_rx(struct tg3 *tp, int budget)
1930 u32 work_mask;
1931 u32 rx_rcb_ptr = tp->rx_rcb_ptr;
1932 u16 hw_idx, sw_idx;
1933 int received;
1935 hw_idx = tp->hw_status->idx[0].rx_producer;
1936 sw_idx = rx_rcb_ptr % TG3_RX_RCB_RING_SIZE;
1937 work_mask = 0;
1938 received = 0;
1939 while (sw_idx != hw_idx && budget > 0) {
1940 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
1941 unsigned int len;
1942 struct sk_buff *skb;
1943 dma_addr_t dma_addr;
1944 u32 opaque_key, desc_idx, *post_ptr;
1946 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
1947 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
1948 if (opaque_key == RXD_OPAQUE_RING_STD) {
1949 dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
1950 mapping);
1951 skb = tp->rx_std_buffers[desc_idx].skb;
1952 post_ptr = &tp->rx_std_ptr;
1953 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
1954 dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
1955 mapping);
1956 skb = tp->rx_jumbo_buffers[desc_idx].skb;
1957 post_ptr = &tp->rx_jumbo_ptr;
1959 else {
1960 goto next_pkt_nopost;
1963 work_mask |= opaque_key;
1965 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
1966 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
1967 drop_it:
1968 tg3_recycle_rx(tp, opaque_key,
1969 desc_idx, *post_ptr);
1970 drop_it_no_recycle:
1971 /* Other statistics kept track of by card. */
1972 tp->net_stats.rx_dropped++;
1973 goto next_pkt;
1976 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
1978 if (len > RX_COPY_THRESHOLD) {
1979 int skb_size;
1981 skb_size = tg3_alloc_rx_skb(tp, opaque_key,
1982 desc_idx, *post_ptr);
1983 if (skb_size < 0)
1984 goto drop_it;
1986 pci_unmap_single(tp->pdev, dma_addr,
1987 skb_size - tp->rx_offset,
1988 PCI_DMA_FROMDEVICE);
1990 skb_put(skb, len);
1991 } else {
1992 struct sk_buff *copy_skb;
1994 tg3_recycle_rx(tp, opaque_key,
1995 desc_idx, *post_ptr);
1997 copy_skb = dev_alloc_skb(len + 2);
1998 if (copy_skb == NULL)
1999 goto drop_it_no_recycle;
2001 copy_skb->dev = tp->dev;
2002 skb_reserve(copy_skb, 2);
2003 skb_put(copy_skb, len);
2004 pci_dma_sync_single(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
2005 memcpy(copy_skb->data, skb->data, len);
2007 /* We'll reuse the original ring buffer. */
2008 skb = copy_skb;
2011 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
2012 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
2013 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
2014 >> RXD_TCPCSUM_SHIFT) == 0xffff))
2015 skb->ip_summed = CHECKSUM_UNNECESSARY;
2016 else
2017 skb->ip_summed = CHECKSUM_NONE;
2019 skb->protocol = eth_type_trans(skb, tp->dev);
2020 #if TG3_VLAN_TAG_USED
2021 if (tp->vlgrp != NULL &&
2022 desc->type_flags & RXD_FLAG_VLAN) {
2023 tg3_vlan_rx(tp, skb,
2024 desc->err_vlan & RXD_VLAN_MASK);
2025 } else
2026 #endif
2027 netif_receive_skb(skb);
2029 tp->dev->last_rx = jiffies;
2030 received++;
2031 budget--;
2033 next_pkt:
2034 (*post_ptr)++;
2035 next_pkt_nopost:
2036 rx_rcb_ptr++;
2037 sw_idx = rx_rcb_ptr % TG3_RX_RCB_RING_SIZE;
2040 /* ACK the status ring. */
2041 tp->rx_rcb_ptr = rx_rcb_ptr;
2042 tw32_mailbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW,
2043 (rx_rcb_ptr % TG3_RX_RCB_RING_SIZE));
2044 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
2045 tr32(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW);
2047 /* Refill RX ring(s). */
2048 if (work_mask & RXD_OPAQUE_RING_STD) {
2049 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
2050 tw32_mailbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
2051 sw_idx);
2052 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
2053 tr32(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW);
2055 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
2056 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
2057 tw32_mailbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
2058 sw_idx);
2059 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
2060 tr32(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW);
2063 return received;
2066 static int tg3_poll(struct net_device *netdev, int *budget)
2068 struct tg3 *tp = netdev->priv;
2069 struct tg3_hw_status *sblk = tp->hw_status;
2070 unsigned long flags;
2071 int done;
2073 spin_lock_irqsave(&tp->lock, flags);
2075 /* handle link change and other phy events */
2076 if (!(tp->tg3_flags &
2077 (TG3_FLAG_USE_LINKCHG_REG |
2078 TG3_FLAG_POLL_SERDES))) {
2079 if (sblk->status & SD_STATUS_LINK_CHG) {
2080 sblk->status = SD_STATUS_UPDATED |
2081 (sblk->status & ~SD_STATUS_LINK_CHG);
2082 tg3_setup_phy(tp);
2086 /* run TX completion thread */
2087 if (sblk->idx[0].tx_consumer != tp->tx_cons) {
2088 spin_lock(&tp->tx_lock);
2089 tg3_tx(tp);
2090 spin_unlock(&tp->tx_lock);
2093 spin_unlock_irqrestore(&tp->lock, flags);
2095 /* run RX thread, within the bounds set by NAPI.
2096 * All RX "locking" is done by ensuring outside
2097 * code synchronizes with dev->poll()
2099 done = 1;
2100 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
2101 int orig_budget = *budget;
2102 int work_done;
2104 if (orig_budget > netdev->quota)
2105 orig_budget = netdev->quota;
2107 work_done = tg3_rx(tp, orig_budget);
2109 *budget -= work_done;
2110 netdev->quota -= work_done;
2112 if (work_done >= orig_budget)
2113 done = 0;
2116 /* if no more work, tell net stack and NIC we're done */
2117 if (done) {
2118 spin_lock_irqsave(&tp->lock, flags);
2119 __netif_rx_complete(netdev);
2120 tg3_enable_ints(tp);
2121 spin_unlock_irqrestore(&tp->lock, flags);
2124 return (done ? 0 : 1);
2127 static inline unsigned int tg3_has_work(struct net_device *dev, struct tg3 *tp)
2129 struct tg3_hw_status *sblk = tp->hw_status;
2130 unsigned int work_exists = 0;
2132 /* check for phy events */
2133 if (!(tp->tg3_flags &
2134 (TG3_FLAG_USE_LINKCHG_REG |
2135 TG3_FLAG_POLL_SERDES))) {
2136 if (sblk->status & SD_STATUS_LINK_CHG)
2137 work_exists = 1;
2139 /* check for RX/TX work to do */
2140 if (sblk->idx[0].tx_consumer != tp->tx_cons ||
2141 sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
2142 work_exists = 1;
2144 return work_exists;
2147 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2149 struct net_device *dev = dev_id;
2150 struct tg3 *tp = dev->priv;
2151 struct tg3_hw_status *sblk = tp->hw_status;
2152 unsigned long flags;
2153 unsigned int handled = 1;
2155 spin_lock_irqsave(&tp->lock, flags);
2157 if (sblk->status & SD_STATUS_UPDATED) {
2159 * writing any value to intr-mbox-0 clears PCI INTA# and
2160 * chip-internal interrupt pending events.
2161 * writing non-zero to intr-mbox-0 additional tells the
2162 * NIC to stop sending us irqs, engaging "in-intr-handler"
2163 * event coalescing.
2165 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2166 0x00000001);
2168 * Flush PCI write. This also guarantees that our
2169 * status block has been flushed to host memory.
2171 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2172 sblk->status &= ~SD_STATUS_UPDATED;
2174 if (likely(tg3_has_work(dev, tp)))
2175 netif_rx_schedule(dev); /* schedule NAPI poll */
2176 else {
2177 /* no work, shared interrupt perhaps? re-enable
2178 * interrupts, and flush that PCI write
2180 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2181 0x00000000);
2182 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2184 } else { /* shared interrupt */
2185 handled = 0;
2188 spin_unlock_irqrestore(&tp->lock, flags);
2190 return IRQ_RETVAL(handled);
2193 static void tg3_init_rings(struct tg3 *);
2194 static int tg3_init_hw(struct tg3 *);
2195 static int tg3_halt(struct tg3 *);
2197 static void tg3_reset_task(void *_data)
2199 struct tg3 *tp = _data;
2200 unsigned int restart_timer;
2202 tg3_netif_stop(tp);
2204 spin_lock_irq(&tp->lock);
2205 spin_lock(&tp->tx_lock);
2207 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
2208 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
2210 tg3_halt(tp);
2211 tg3_init_rings(tp);
2212 tg3_init_hw(tp);
2214 spin_unlock(&tp->tx_lock);
2215 spin_unlock_irq(&tp->lock);
2217 tg3_netif_start(tp);
2219 if (restart_timer)
2220 mod_timer(&tp->timer, jiffies + 1);
2223 static void tg3_tx_timeout(struct net_device *dev)
2225 struct tg3 *tp = dev->priv;
2227 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
2228 dev->name);
2230 schedule_work(&tp->reset_task);
2233 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
2235 static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
2236 u32 guilty_entry, int guilty_len,
2237 u32 last_plus_one, u32 *start, u32 mss)
2239 struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
2240 dma_addr_t new_addr;
2241 u32 entry = *start;
2242 int i;
2244 if (!new_skb) {
2245 dev_kfree_skb(skb);
2246 return -1;
2249 /* New SKB is guaranteed to be linear. */
2250 entry = *start;
2251 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
2252 PCI_DMA_TODEVICE);
2253 tg3_set_txd(tp, entry, new_addr, new_skb->len,
2254 (skb->ip_summed == CHECKSUM_HW) ?
2255 TXD_FLAG_TCPUDP_CSUM : 0, 1 | (mss << 1));
2256 *start = NEXT_TX(entry);
2258 /* Now clean up the sw ring entries. */
2259 i = 0;
2260 while (entry != last_plus_one) {
2261 int len;
2263 if (i == 0)
2264 len = skb_headlen(skb);
2265 else
2266 len = skb_shinfo(skb)->frags[i-1].size;
2267 pci_unmap_single(tp->pdev,
2268 pci_unmap_addr(&tp->tx_buffers[entry], mapping),
2269 len, PCI_DMA_TODEVICE);
2270 if (i == 0) {
2271 tp->tx_buffers[entry].skb = new_skb;
2272 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
2273 } else {
2274 tp->tx_buffers[entry].skb = NULL;
2276 entry = NEXT_TX(entry);
2279 dev_kfree_skb(skb);
2281 return 0;
2284 static void tg3_set_txd(struct tg3 *tp, int entry,
2285 dma_addr_t mapping, int len, u32 flags,
2286 u32 mss_and_is_end)
2288 int is_end = (mss_and_is_end & 0x1);
2289 u32 mss = (mss_and_is_end >> 1);
2290 u32 vlan_tag = 0;
2292 if (is_end)
2293 flags |= TXD_FLAG_END;
2294 if (flags & TXD_FLAG_VLAN) {
2295 vlan_tag = flags >> 16;
2296 flags &= 0xffff;
2298 vlan_tag |= (mss << TXD_MSS_SHIFT);
2299 if (tp->tg3_flags & TG3_FLAG_HOST_TXDS) {
2300 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
2302 txd->addr_hi = ((u64) mapping >> 32);
2303 txd->addr_lo = ((u64) mapping & 0xffffffff);
2304 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
2305 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
2306 } else {
2307 struct tx_ring_info *txr = &tp->tx_buffers[entry];
2308 unsigned long txd;
2310 txd = (tp->regs +
2311 NIC_SRAM_WIN_BASE +
2312 NIC_SRAM_TX_BUFFER_DESC);
2313 txd += (entry * TXD_SIZE);
2315 /* Save some PIOs */
2316 if (sizeof(dma_addr_t) != sizeof(u32))
2317 writel(((u64) mapping >> 32),
2318 txd + TXD_ADDR + TG3_64BIT_REG_HIGH);
2320 writel(((u64) mapping & 0xffffffff),
2321 txd + TXD_ADDR + TG3_64BIT_REG_LOW);
2322 writel(len << TXD_LEN_SHIFT | flags, txd + TXD_LEN_FLAGS);
2323 if (txr->prev_vlan_tag != vlan_tag) {
2324 writel(vlan_tag << TXD_VLAN_TAG_SHIFT, txd + TXD_VLAN_TAG);
2325 txr->prev_vlan_tag = vlan_tag;
2330 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
2332 u32 base = (u32) mapping & 0xffffffff;
2334 return ((base > 0xffffdcc0) &&
2335 ((u64) mapping >> 32) == 0 &&
2336 (base + len + 8 < base));
2339 static int tg3_start_xmit_4gbug(struct sk_buff *skb, struct net_device *dev)
2341 struct tg3 *tp = dev->priv;
2342 dma_addr_t mapping;
2343 unsigned int i;
2344 u32 len, entry, base_flags, mss;
2345 int would_hit_hwbug;
2346 unsigned long flags;
2348 len = skb_headlen(skb);
2350 /* No BH disabling for tx_lock here. We are running in BH disabled
2351 * context and TX reclaim runs via tp->poll inside of a software
2352 * interrupt. Rejoice!
2354 * Actually, things are not so simple. If we are to take a hw
2355 * IRQ here, we can deadlock, consider:
2357 * CPU1 CPU2
2358 * tg3_start_xmit
2359 * take tp->tx_lock
2360 * tg3_timer
2361 * take tp->lock
2362 * tg3_interrupt
2363 * spin on tp->lock
2364 * spin on tp->tx_lock
2366 * So we really do need to disable interrupts when taking
2367 * tx_lock here.
2369 spin_lock_irqsave(&tp->tx_lock, flags);
2371 /* This is a hard error, log it. */
2372 if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
2373 netif_stop_queue(dev);
2374 spin_unlock_irqrestore(&tp->tx_lock, flags);
2375 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
2376 dev->name);
2377 return 1;
2380 entry = tp->tx_prod;
2381 base_flags = 0;
2382 if (skb->ip_summed == CHECKSUM_HW)
2383 base_flags |= TXD_FLAG_TCPUDP_CSUM;
2384 #if TG3_DO_TSO != 0
2385 if ((mss = skb_shinfo(skb)->tso_size) != 0) {
2386 static int times = 0;
2388 mss += ((skb->h.th->doff * 4) - 20);
2389 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
2390 TXD_FLAG_CPU_POST_DMA);
2392 if (times++ < 5) {
2393 printk("tg3_xmit: tso_size[%u] tso_segs[%u] len[%u]\n",
2394 (unsigned int) skb_shinfo(skb)->tso_size,
2395 (unsigned int) skb_shinfo(skb)->tso_segs,
2396 skb->len);
2399 #else
2400 mss = 0;
2401 #endif
2402 #if TG3_VLAN_TAG_USED
2403 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
2404 base_flags |= (TXD_FLAG_VLAN |
2405 (vlan_tx_tag_get(skb) << 16));
2406 #endif
2408 /* Queue skb data, a.k.a. the main skb fragment. */
2409 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
2411 tp->tx_buffers[entry].skb = skb;
2412 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
2414 would_hit_hwbug = 0;
2416 if (tg3_4g_overflow_test(mapping, len))
2417 would_hit_hwbug = entry + 1;
2419 tg3_set_txd(tp, entry, mapping, len, base_flags,
2420 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
2422 entry = NEXT_TX(entry);
2424 /* Now loop through additional data fragments, and queue them. */
2425 if (skb_shinfo(skb)->nr_frags > 0) {
2426 unsigned int i, last;
2428 last = skb_shinfo(skb)->nr_frags - 1;
2429 for (i = 0; i <= last; i++) {
2430 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2432 len = frag->size;
2433 mapping = pci_map_page(tp->pdev,
2434 frag->page,
2435 frag->page_offset,
2436 len, PCI_DMA_TODEVICE);
2438 tp->tx_buffers[entry].skb = NULL;
2439 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
2441 if (tg3_4g_overflow_test(mapping, len)) {
2442 /* Only one should match. */
2443 if (would_hit_hwbug)
2444 BUG();
2445 would_hit_hwbug = entry + 1;
2448 tg3_set_txd(tp, entry, mapping, len,
2449 base_flags, (i == last));
2451 entry = NEXT_TX(entry);
2455 if (would_hit_hwbug) {
2456 u32 last_plus_one = entry;
2457 u32 start;
2458 unsigned int len = 0;
2460 would_hit_hwbug -= 1;
2461 entry = entry - 1 - skb_shinfo(skb)->nr_frags;
2462 entry &= (TG3_TX_RING_SIZE - 1);
2463 start = entry;
2464 i = 0;
2465 while (entry != last_plus_one) {
2466 if (i == 0)
2467 len = skb_headlen(skb);
2468 else
2469 len = skb_shinfo(skb)->frags[i-1].size;
2471 if (entry == would_hit_hwbug)
2472 break;
2474 i++;
2475 entry = NEXT_TX(entry);
2479 /* If the workaround fails due to memory/mapping
2480 * failure, silently drop this packet.
2482 if (tigon3_4gb_hwbug_workaround(tp, skb,
2483 entry, len,
2484 last_plus_one,
2485 &start, mss))
2486 goto out_unlock;
2488 entry = start;
2491 /* Packets are ready, update Tx producer idx local and on card. */
2492 if (tp->tg3_flags & TG3_FLAG_HOST_TXDS) {
2493 tw32_mailbox((MAILBOX_SNDHOST_PROD_IDX_0 +
2494 TG3_64BIT_REG_LOW), entry);
2495 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
2496 tw32_mailbox((MAILBOX_SNDHOST_PROD_IDX_0 +
2497 TG3_64BIT_REG_LOW), entry);
2498 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
2499 tr32(MAILBOX_SNDHOST_PROD_IDX_0 +
2500 TG3_64BIT_REG_LOW);
2501 } else {
2502 /* First, make sure tg3 sees last descriptor fully
2503 * in SRAM.
2505 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
2506 tr32(MAILBOX_SNDNIC_PROD_IDX_0 +
2507 TG3_64BIT_REG_LOW);
2509 tw32_mailbox((MAILBOX_SNDNIC_PROD_IDX_0 +
2510 TG3_64BIT_REG_LOW), entry);
2511 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
2512 tw32_mailbox((MAILBOX_SNDNIC_PROD_IDX_0 +
2513 TG3_64BIT_REG_LOW), entry);
2515 /* Now post the mailbox write itself. */
2516 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
2517 tr32(MAILBOX_SNDNIC_PROD_IDX_0 +
2518 TG3_64BIT_REG_LOW);
2521 tp->tx_prod = entry;
2522 if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))
2523 netif_stop_queue(dev);
2525 out_unlock:
2526 spin_unlock_irqrestore(&tp->tx_lock, flags);
2528 dev->trans_start = jiffies;
2530 return 0;
2533 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
2535 struct tg3 *tp = dev->priv;
2536 dma_addr_t mapping;
2537 u32 len, entry, base_flags, mss;
2538 unsigned long flags;
2540 len = skb_headlen(skb);
2542 /* No BH disabling for tx_lock here. We are running in BH disabled
2543 * context and TX reclaim runs via tp->poll inside of a software
2544 * interrupt. Rejoice!
2546 * Actually, things are not so simple. If we are to take a hw
2547 * IRQ here, we can deadlock, consider:
2549 * CPU1 CPU2
2550 * tg3_start_xmit
2551 * take tp->tx_lock
2552 * tg3_timer
2553 * take tp->lock
2554 * tg3_interrupt
2555 * spin on tp->lock
2556 * spin on tp->tx_lock
2558 * So we really do need to disable interrupts when taking
2559 * tx_lock here.
2561 spin_lock_irqsave(&tp->tx_lock, flags);
2563 /* This is a hard error, log it. */
2564 if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
2565 netif_stop_queue(dev);
2566 spin_unlock_irqrestore(&tp->tx_lock, flags);
2567 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
2568 dev->name);
2569 return 1;
2572 entry = tp->tx_prod;
2573 base_flags = 0;
2574 if (skb->ip_summed == CHECKSUM_HW)
2575 base_flags |= TXD_FLAG_TCPUDP_CSUM;
2576 #if TG3_DO_TSO != 0
2577 if ((mss = skb_shinfo(skb)->tso_size) != 0) {
2578 static int times = 0;
2580 /* TSO firmware wants TCP options included in
2581 * tx descriptor MSS value.
2583 mss += ((skb->h.th->doff * 4) - 20);
2585 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
2586 TXD_FLAG_CPU_POST_DMA);
2588 if (times++ < 5) {
2589 printk("tg3_xmit: tso_size[%u] tso_segs[%u] len[%u]\n",
2590 (unsigned int) skb_shinfo(skb)->tso_size,
2591 (unsigned int) skb_shinfo(skb)->tso_segs,
2592 skb->len);
2595 #else
2596 mss = 0;
2597 #endif
2598 #if TG3_VLAN_TAG_USED
2599 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
2600 base_flags |= (TXD_FLAG_VLAN |
2601 (vlan_tx_tag_get(skb) << 16));
2602 #endif
2604 /* Queue skb data, a.k.a. the main skb fragment. */
2605 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
2607 tp->tx_buffers[entry].skb = skb;
2608 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
2610 tg3_set_txd(tp, entry, mapping, len, base_flags,
2611 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
2613 entry = NEXT_TX(entry);
2615 /* Now loop through additional data fragments, and queue them. */
2616 if (skb_shinfo(skb)->nr_frags > 0) {
2617 unsigned int i, last;
2619 last = skb_shinfo(skb)->nr_frags - 1;
2620 for (i = 0; i <= last; i++) {
2621 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2624 len = frag->size;
2625 mapping = pci_map_page(tp->pdev,
2626 frag->page,
2627 frag->page_offset,
2628 len, PCI_DMA_TODEVICE);
2630 tp->tx_buffers[entry].skb = NULL;
2631 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
2633 tg3_set_txd(tp, entry, mapping, len,
2634 base_flags, (i == last));
2636 entry = NEXT_TX(entry);
2640 /* Packets are ready, update Tx producer idx local and on card.
2641 * We know this is not a 5700 (by virtue of not being a chip
2642 * requiring the 4GB overflow workaround) so we can safely omit
2643 * the double-write bug tests.
2645 if (tp->tg3_flags & TG3_FLAG_HOST_TXDS) {
2646 tw32_mailbox((MAILBOX_SNDHOST_PROD_IDX_0 +
2647 TG3_64BIT_REG_LOW), entry);
2648 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
2649 tr32(MAILBOX_SNDHOST_PROD_IDX_0 +
2650 TG3_64BIT_REG_LOW);
2651 } else {
2652 /* First, make sure tg3 sees last descriptor fully
2653 * in SRAM.
2655 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
2656 tr32(MAILBOX_SNDNIC_PROD_IDX_0 +
2657 TG3_64BIT_REG_LOW);
2659 tw32_mailbox((MAILBOX_SNDNIC_PROD_IDX_0 +
2660 TG3_64BIT_REG_LOW), entry);
2662 /* Now post the mailbox write itself. */
2663 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
2664 tr32(MAILBOX_SNDNIC_PROD_IDX_0 +
2665 TG3_64BIT_REG_LOW);
2668 tp->tx_prod = entry;
2669 if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))
2670 netif_stop_queue(dev);
2672 spin_unlock_irqrestore(&tp->tx_lock, flags);
2674 dev->trans_start = jiffies;
2676 return 0;
2679 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
2680 int new_mtu)
2682 dev->mtu = new_mtu;
2684 if (new_mtu > ETH_DATA_LEN)
2685 tp->tg3_flags |= TG3_FLAG_JUMBO_ENABLE;
2686 else
2687 tp->tg3_flags &= ~TG3_FLAG_JUMBO_ENABLE;
2690 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
2692 struct tg3 *tp = dev->priv;
2694 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU)
2695 return -EINVAL;
2697 if (!netif_running(dev)) {
2698 /* We'll just catch it later when the
2699 * device is up'd.
2701 tg3_set_mtu(dev, tp, new_mtu);
2702 return 0;
2705 tg3_netif_stop(tp);
2706 spin_lock_irq(&tp->lock);
2707 spin_lock(&tp->tx_lock);
2709 tg3_halt(tp);
2711 tg3_set_mtu(dev, tp, new_mtu);
2713 tg3_init_rings(tp);
2714 tg3_init_hw(tp);
2716 spin_unlock(&tp->tx_lock);
2717 spin_unlock_irq(&tp->lock);
2718 tg3_netif_start(tp);
2720 return 0;
2723 /* Free up pending packets in all rx/tx rings.
2725 * The chip has been shut down and the driver detached from
2726 * the networking, so no interrupts or new tx packets will
2727 * end up in the driver. tp->{tx,}lock is not held and we are not
2728 * in an interrupt context and thus may sleep.
2730 static void tg3_free_rings(struct tg3 *tp)
2732 struct ring_info *rxp;
2733 int i;
2735 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
2736 rxp = &tp->rx_std_buffers[i];
2738 if (rxp->skb == NULL)
2739 continue;
2740 pci_unmap_single(tp->pdev,
2741 pci_unmap_addr(rxp, mapping),
2742 RX_PKT_BUF_SZ - tp->rx_offset,
2743 PCI_DMA_FROMDEVICE);
2744 dev_kfree_skb_any(rxp->skb);
2745 rxp->skb = NULL;
2748 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
2749 rxp = &tp->rx_jumbo_buffers[i];
2751 if (rxp->skb == NULL)
2752 continue;
2753 pci_unmap_single(tp->pdev,
2754 pci_unmap_addr(rxp, mapping),
2755 RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
2756 PCI_DMA_FROMDEVICE);
2757 dev_kfree_skb_any(rxp->skb);
2758 rxp->skb = NULL;
2761 for (i = 0; i < TG3_TX_RING_SIZE; ) {
2762 struct tx_ring_info *txp;
2763 struct sk_buff *skb;
2764 int j;
2766 txp = &tp->tx_buffers[i];
2767 skb = txp->skb;
2769 if (skb == NULL) {
2770 i++;
2771 continue;
2774 pci_unmap_single(tp->pdev,
2775 pci_unmap_addr(txp, mapping),
2776 skb_headlen(skb),
2777 PCI_DMA_TODEVICE);
2778 txp->skb = NULL;
2780 i++;
2782 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
2783 txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
2784 pci_unmap_page(tp->pdev,
2785 pci_unmap_addr(txp, mapping),
2786 skb_shinfo(skb)->frags[j].size,
2787 PCI_DMA_TODEVICE);
2788 i++;
2791 dev_kfree_skb_any(skb);
2795 /* Initialize tx/rx rings for packet processing.
2797 * The chip has been shut down and the driver detached from
2798 * the networking, so no interrupts or new tx packets will
2799 * end up in the driver. tp->{tx,}lock is not held and we are not
2800 * in an interrupt context and thus may sleep.
2802 static void tg3_init_rings(struct tg3 *tp)
2804 unsigned long start, end;
2805 u32 i;
2807 /* Free up all the SKBs. */
2808 tg3_free_rings(tp);
2810 /* Zero out all descriptors. */
2811 memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
2812 memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
2813 memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES);
2815 if (tp->tg3_flags & TG3_FLAG_HOST_TXDS) {
2816 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
2817 } else {
2818 start = (tp->regs +
2819 NIC_SRAM_WIN_BASE +
2820 NIC_SRAM_TX_BUFFER_DESC);
2821 end = start + TG3_TX_RING_BYTES;
2822 while (start < end) {
2823 writel(0, start);
2824 start += 4;
2826 for (i = 0; i < TG3_TX_RING_SIZE; i++)
2827 tp->tx_buffers[i].prev_vlan_tag = 0;
2830 /* Initialize invariants of the rings, we only set this
2831 * stuff once. This works because the card does not
2832 * write into the rx buffer posting rings.
2834 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
2835 struct tg3_rx_buffer_desc *rxd;
2837 rxd = &tp->rx_std[i];
2838 rxd->idx_len = (RX_PKT_BUF_SZ - tp->rx_offset - 64)
2839 << RXD_LEN_SHIFT;
2840 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
2841 rxd->opaque = (RXD_OPAQUE_RING_STD |
2842 (i << RXD_OPAQUE_INDEX_SHIFT));
2845 if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
2846 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
2847 struct tg3_rx_buffer_desc *rxd;
2849 rxd = &tp->rx_jumbo[i];
2850 rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
2851 << RXD_LEN_SHIFT;
2852 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
2853 RXD_FLAG_JUMBO;
2854 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
2855 (i << RXD_OPAQUE_INDEX_SHIFT));
2859 /* Now allocate fresh SKBs for each rx ring. */
2860 for (i = 0; i < tp->rx_pending; i++) {
2861 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
2862 -1, i) < 0)
2863 break;
2866 if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
2867 for (i = 0; i < tp->rx_jumbo_pending; i++) {
2868 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
2869 -1, i) < 0)
2870 break;
2876 * Must not be invoked with interrupt sources disabled and
2877 * the hardware shutdown down.
2879 static void tg3_free_consistent(struct tg3 *tp)
2881 if (tp->rx_std_buffers) {
2882 kfree(tp->rx_std_buffers);
2883 tp->rx_std_buffers = NULL;
2885 if (tp->rx_std) {
2886 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
2887 tp->rx_std, tp->rx_std_mapping);
2888 tp->rx_std = NULL;
2890 if (tp->rx_jumbo) {
2891 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
2892 tp->rx_jumbo, tp->rx_jumbo_mapping);
2893 tp->rx_jumbo = NULL;
2895 if (tp->rx_rcb) {
2896 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES,
2897 tp->rx_rcb, tp->rx_rcb_mapping);
2898 tp->rx_rcb = NULL;
2900 if (tp->tx_ring) {
2901 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
2902 tp->tx_ring, tp->tx_desc_mapping);
2903 tp->tx_ring = NULL;
2905 if (tp->hw_status) {
2906 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
2907 tp->hw_status, tp->status_mapping);
2908 tp->hw_status = NULL;
2910 if (tp->hw_stats) {
2911 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
2912 tp->hw_stats, tp->stats_mapping);
2913 tp->hw_stats = NULL;
2918 * Must not be invoked with interrupt sources disabled and
2919 * the hardware shutdown down. Can sleep.
2921 static int tg3_alloc_consistent(struct tg3 *tp)
2923 tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
2924 (TG3_RX_RING_SIZE +
2925 TG3_RX_JUMBO_RING_SIZE)) +
2926 (sizeof(struct tx_ring_info) *
2927 TG3_TX_RING_SIZE),
2928 GFP_KERNEL);
2929 if (!tp->rx_std_buffers)
2930 return -ENOMEM;
2932 memset(tp->rx_std_buffers, 0,
2933 (sizeof(struct ring_info) *
2934 (TG3_RX_RING_SIZE +
2935 TG3_RX_JUMBO_RING_SIZE)) +
2936 (sizeof(struct tx_ring_info) *
2937 TG3_TX_RING_SIZE));
2939 tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
2940 tp->tx_buffers = (struct tx_ring_info *)
2941 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
2943 tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
2944 &tp->rx_std_mapping);
2945 if (!tp->rx_std)
2946 goto err_out;
2948 tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
2949 &tp->rx_jumbo_mapping);
2951 if (!tp->rx_jumbo)
2952 goto err_out;
2954 tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES,
2955 &tp->rx_rcb_mapping);
2956 if (!tp->rx_rcb)
2957 goto err_out;
2959 if (tp->tg3_flags & TG3_FLAG_HOST_TXDS) {
2960 tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
2961 &tp->tx_desc_mapping);
2962 if (!tp->tx_ring)
2963 goto err_out;
2964 } else {
2965 tp->tx_ring = NULL;
2966 tp->tx_desc_mapping = 0;
2969 tp->hw_status = pci_alloc_consistent(tp->pdev,
2970 TG3_HW_STATUS_SIZE,
2971 &tp->status_mapping);
2972 if (!tp->hw_status)
2973 goto err_out;
2975 tp->hw_stats = pci_alloc_consistent(tp->pdev,
2976 sizeof(struct tg3_hw_stats),
2977 &tp->stats_mapping);
2978 if (!tp->hw_stats)
2979 goto err_out;
2981 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
2982 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
2984 return 0;
2986 err_out:
2987 tg3_free_consistent(tp);
2988 return -ENOMEM;
2991 #define MAX_WAIT_CNT 1000
2993 /* To stop a block, clear the enable bit and poll till it
2994 * clears. tp->lock is held.
2996 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit)
2998 unsigned int i;
2999 u32 val;
3001 val = tr32(ofs);
3002 val &= ~enable_bit;
3003 tw32(ofs, val);
3004 tr32(ofs);
3006 for (i = 0; i < MAX_WAIT_CNT; i++) {
3007 udelay(100);
3008 val = tr32(ofs);
3009 if ((val & enable_bit) == 0)
3010 break;
3013 if (i == MAX_WAIT_CNT) {
3014 printk(KERN_ERR PFX "tg3_stop_block timed out, "
3015 "ofs=%lx enable_bit=%x\n",
3016 ofs, enable_bit);
3017 return -ENODEV;
3020 return 0;
3023 /* tp->lock is held. */
3024 static int tg3_abort_hw(struct tg3 *tp)
3026 int i, err;
3028 tg3_disable_ints(tp);
3030 tp->rx_mode &= ~RX_MODE_ENABLE;
3031 tw32(MAC_RX_MODE, tp->rx_mode);
3032 tr32(MAC_RX_MODE);
3033 udelay(10);
3035 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE);
3036 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE);
3037 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE);
3038 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE);
3039 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE);
3040 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE);
3042 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE);
3043 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE);
3044 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
3045 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE);
3046 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
3047 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE);
3048 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE);
3049 if (err)
3050 goto out;
3052 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
3053 tw32(MAC_MODE, tp->mac_mode);
3054 tr32(MAC_MODE);
3055 udelay(40);
3057 tp->tx_mode &= ~TX_MODE_ENABLE;
3058 tw32(MAC_TX_MODE, tp->tx_mode);
3059 tr32(MAC_TX_MODE);
3061 for (i = 0; i < MAX_WAIT_CNT; i++) {
3062 udelay(100);
3063 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
3064 break;
3066 if (i >= MAX_WAIT_CNT) {
3067 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
3068 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
3069 tp->dev->name, tr32(MAC_TX_MODE));
3070 return -ENODEV;
3073 err = tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE);
3074 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE);
3075 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE);
3077 tw32(FTQ_RESET, 0xffffffff);
3078 tw32(FTQ_RESET, 0x00000000);
3080 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE);
3081 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE);
3082 if (err)
3083 goto out;
3085 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
3087 out:
3088 return err;
3091 /* tp->lock is held. */
3092 static void tg3_chip_reset(struct tg3 *tp)
3094 u32 val;
3095 u32 flags_save;
3097 /* Force NVRAM to settle.
3098 * This deals with a chip bug which can result in EEPROM
3099 * corruption.
3101 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
3102 int i;
3104 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3105 for (i = 0; i < 100000; i++) {
3106 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3107 break;
3108 udelay(10);
3113 * We must avoid the readl() that normally takes place.
3114 * It locks machines, causes machine checks, and other
3115 * fun things. So, temporarily disable the 5701
3116 * hardware workaround, while we do the reset.
3118 flags_save = tp->tg3_flags;
3119 tp->tg3_flags &= ~TG3_FLAG_5701_REG_WRITE_BUG;
3121 /* do the reset */
3122 tw32(GRC_MISC_CFG, GRC_MISC_CFG_CORECLK_RESET);
3124 /* restore 5701 hardware bug workaround flag */
3125 tp->tg3_flags = flags_save;
3127 /* Flush PCI posted writes. The normal MMIO registers
3128 * are inaccessible at this time so this is the only
3129 * way to make this reliably. I tried to use indirect
3130 * register read/write but this upset some 5701 variants.
3132 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
3134 udelay(40);
3135 udelay(40);
3136 udelay(40);
3138 /* Re-enable indirect register accesses. */
3139 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
3140 tp->misc_host_ctrl);
3142 /* Set MAX PCI retry to zero. */
3143 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
3144 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
3145 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
3146 val |= PCISTATE_RETRY_SAME_DMA;
3147 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
3149 pci_restore_state(tp->pdev, tp->pci_cfg_state);
3151 /* Make sure PCI-X relaxed ordering bit is clear. */
3152 pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
3153 val &= ~PCIX_CAPS_RELAXED_ORDERING;
3154 pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
3156 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
3158 tw32(TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3161 /* tp->lock is held. */
3162 static void tg3_stop_fw(struct tg3 *tp)
3164 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
3165 u32 val;
3166 int i;
3168 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
3169 val = tr32(GRC_RX_CPU_EVENT);
3170 val |= (1 << 14);
3171 tw32(GRC_RX_CPU_EVENT, val);
3173 /* Wait for RX cpu to ACK the event. */
3174 for (i = 0; i < 100; i++) {
3175 if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
3176 break;
3177 udelay(1);
3182 /* tp->lock is held. */
3183 static int tg3_halt(struct tg3 *tp)
3185 u32 val;
3186 int i;
3188 tg3_stop_fw(tp);
3189 tg3_abort_hw(tp);
3190 tg3_chip_reset(tp);
3191 tg3_write_mem(tp,
3192 NIC_SRAM_FIRMWARE_MBOX,
3193 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
3194 for (i = 0; i < 100000; i++) {
3195 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
3196 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3197 break;
3198 udelay(10);
3201 if (i >= 100000) {
3202 printk(KERN_ERR PFX "tg3_halt timed out for %s, "
3203 "firmware will not restart magic=%08x\n",
3204 tp->dev->name, val);
3205 return -ENODEV;
3208 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
3209 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
3210 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3211 DRV_STATE_WOL);
3212 else
3213 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3214 DRV_STATE_UNLOAD);
3215 } else
3216 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3217 DRV_STATE_SUSPEND);
3219 return 0;
3222 #define TG3_FW_RELEASE_MAJOR 0x0
3223 #define TG3_FW_RELASE_MINOR 0x0
3224 #define TG3_FW_RELEASE_FIX 0x0
3225 #define TG3_FW_START_ADDR 0x08000000
3226 #define TG3_FW_TEXT_ADDR 0x08000000
3227 #define TG3_FW_TEXT_LEN 0x9c0
3228 #define TG3_FW_RODATA_ADDR 0x080009c0
3229 #define TG3_FW_RODATA_LEN 0x60
3230 #define TG3_FW_DATA_ADDR 0x08000a40
3231 #define TG3_FW_DATA_LEN 0x20
3232 #define TG3_FW_SBSS_ADDR 0x08000a60
3233 #define TG3_FW_SBSS_LEN 0xc
3234 #define TG3_FW_BSS_ADDR 0x08000a70
3235 #define TG3_FW_BSS_LEN 0x10
3237 static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
3238 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
3239 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
3240 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
3241 0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
3242 0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
3243 0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
3244 0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
3245 0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
3246 0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
3247 0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
3248 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
3249 0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
3250 0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
3251 0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
3252 0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
3253 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
3254 0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
3255 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
3256 0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
3257 0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
3258 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
3259 0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
3260 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
3261 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3262 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3263 0, 0, 0, 0, 0, 0,
3264 0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
3265 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
3266 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
3267 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
3268 0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
3269 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
3270 0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
3271 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
3272 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
3273 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
3274 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
3275 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3276 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3277 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3278 0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
3279 0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
3280 0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
3281 0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
3282 0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
3283 0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
3284 0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
3285 0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
3286 0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
3287 0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
3288 0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
3289 0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
3290 0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
3291 0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
3292 0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
3293 0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
3294 0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
3295 0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
3296 0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
3297 0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
3298 0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
3299 0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
3300 0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
3301 0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
3302 0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
3303 0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
3304 0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
3305 0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
3306 0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
3307 0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
3308 0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
3309 0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
3310 0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
3311 0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
3312 0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
3313 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
3314 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
3315 0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
3316 0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
3317 0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
3318 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
3319 0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
3320 0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
3321 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
3322 0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
3323 0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
3324 0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
3325 0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
3326 0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
3327 0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
3328 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
3331 static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
3332 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
3333 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
3334 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
3335 0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
3336 0x00000000
3339 #if 0 /* All zeros, don't eat up space with it. */
3340 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
3341 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
3342 0x00000000, 0x00000000, 0x00000000, 0x00000000
3344 #endif
3346 #define RX_CPU_SCRATCH_BASE 0x30000
3347 #define RX_CPU_SCRATCH_SIZE 0x04000
3348 #define TX_CPU_SCRATCH_BASE 0x34000
3349 #define TX_CPU_SCRATCH_SIZE 0x04000
3351 /* tp->lock is held. */
3352 static int tg3_reset_cpu(struct tg3 *tp, u32 offset)
3354 int i;
3356 tw32(offset + CPU_STATE, 0xffffffff);
3357 tw32(offset + CPU_MODE, CPU_MODE_RESET);
3358 if (offset == RX_CPU_BASE) {
3359 for (i = 0; i < 10000; i++)
3360 if (!(tr32(offset + CPU_MODE) & CPU_MODE_RESET))
3361 break;
3362 tw32(offset + CPU_STATE, 0xffffffff);
3363 tw32(offset + CPU_MODE, CPU_MODE_RESET);
3364 tr32(offset + CPU_MODE);
3365 udelay(10);
3366 } else {
3367 for (i = 0; i < 10000; i++) {
3368 if (!(tr32(offset + CPU_MODE) & CPU_MODE_RESET))
3369 break;
3370 tw32(offset + CPU_STATE, 0xffffffff);
3371 tw32(offset + CPU_MODE, CPU_MODE_RESET);
3372 tr32(offset + CPU_MODE);
3373 udelay(10);
3377 if (i >= 10000) {
3378 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
3379 "and %s CPU\n",
3380 tp->dev->name,
3381 (offset == RX_CPU_BASE ? "RX" : "TX"));
3382 return -ENODEV;
3384 return 0;
3387 struct fw_info {
3388 unsigned int text_base;
3389 unsigned int text_len;
3390 u32 *text_data;
3391 unsigned int rodata_base;
3392 unsigned int rodata_len;
3393 u32 *rodata_data;
3394 unsigned int data_base;
3395 unsigned int data_len;
3396 u32 *data_data;
3399 /* tp->lock is held. */
3400 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
3401 int cpu_scratch_size, struct fw_info *info)
3403 int err, i;
3404 u32 orig_tg3_flags = tp->tg3_flags;
3406 /* Force use of PCI config space for indirect register
3407 * write calls.
3409 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
3411 err = tg3_reset_cpu(tp, cpu_base);
3412 if (err)
3413 goto out;
3415 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3416 tg3_write_indirect_reg32(tp, cpu_scratch_base + i, 0);
3417 tw32(cpu_base + CPU_STATE, 0xffffffff);
3418 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3419 for (i = 0; i < (info->text_len / sizeof(u32)); i++)
3420 tg3_write_indirect_reg32(tp, (cpu_scratch_base +
3421 (info->text_base & 0xffff) +
3422 (i * sizeof(u32))),
3423 (info->text_data ?
3424 info->text_data[i] : 0));
3425 for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
3426 tg3_write_indirect_reg32(tp, (cpu_scratch_base +
3427 (info->rodata_base & 0xffff) +
3428 (i * sizeof(u32))),
3429 (info->rodata_data ?
3430 info->rodata_data[i] : 0));
3431 for (i = 0; i < (info->data_len / sizeof(u32)); i++)
3432 tg3_write_indirect_reg32(tp, (cpu_scratch_base +
3433 (info->data_base & 0xffff) +
3434 (i * sizeof(u32))),
3435 (info->data_data ?
3436 info->data_data[i] : 0));
3438 err = 0;
3440 out:
3441 tp->tg3_flags = orig_tg3_flags;
3442 return err;
3445 /* tp->lock is held. */
3446 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3448 struct fw_info info;
3449 int err, i;
3451 info.text_base = TG3_FW_TEXT_ADDR;
3452 info.text_len = TG3_FW_TEXT_LEN;
3453 info.text_data = &tg3FwText[0];
3454 info.rodata_base = TG3_FW_RODATA_ADDR;
3455 info.rodata_len = TG3_FW_RODATA_LEN;
3456 info.rodata_data = &tg3FwRodata[0];
3457 info.data_base = TG3_FW_DATA_ADDR;
3458 info.data_len = TG3_FW_DATA_LEN;
3459 info.data_data = NULL;
3461 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3462 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3463 &info);
3464 if (err)
3465 return err;
3467 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3468 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3469 &info);
3470 if (err)
3471 return err;
3473 /* Now startup only the RX cpu. */
3474 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3475 tw32(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
3477 /* Flush posted writes. */
3478 tr32(RX_CPU_BASE + CPU_PC);
3479 for (i = 0; i < 5; i++) {
3480 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
3481 break;
3482 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3483 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3484 tw32(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
3486 /* Flush posted writes. */
3487 tr32(RX_CPU_BASE + CPU_PC);
3489 udelay(1000);
3491 if (i >= 5) {
3492 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
3493 "to set RX CPU PC, is %08x should be %08x\n",
3494 tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
3495 TG3_FW_TEXT_ADDR);
3496 return -ENODEV;
3498 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3499 tw32(RX_CPU_BASE + CPU_MODE, 0x00000000);
3501 /* Flush posted writes. */
3502 tr32(RX_CPU_BASE + CPU_MODE);
3504 return 0;
3507 #if TG3_DO_TSO != 0
3509 #define TG3_TSO_FW_RELEASE_MAJOR 0x1
3510 #define TG3_TSO_FW_RELASE_MINOR 0x8
3511 #define TG3_TSO_FW_RELEASE_FIX 0x0
3512 #define TG3_TSO_FW_START_ADDR 0x08000000
3513 #define TG3_TSO_FW_TEXT_ADDR 0x08000000
3514 #define TG3_TSO_FW_TEXT_LEN 0x1650
3515 #define TG3_TSO_FW_RODATA_ADDR 0x08001650
3516 #define TG3_TSO_FW_RODATA_LEN 0x30
3517 #define TG3_TSO_FW_DATA_ADDR 0x080016a0
3518 #define TG3_TSO_FW_DATA_LEN 0x20
3519 #define TG3_TSO_FW_SBSS_ADDR 0x080016c0
3520 #define TG3_TSO_FW_SBSS_LEN 0x14
3521 #define TG3_TSO_FW_BSS_ADDR 0x080016e0
3522 #define TG3_TSO_FW_BSS_LEN 0x8fc
3524 static u32 tg3TsoFwText[] = {
3525 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
3526 0x37bd4000, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000010, 0x00000000,
3527 0x0000000d, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0x3c1bc000,
3528 0xafbf0018, 0x0e000058, 0xaf60680c, 0x3c040800, 0x24841650, 0x03602821,
3529 0x24060001, 0x24070004, 0xafa00010, 0x0e00006c, 0xafa00014, 0x8f625c50,
3530 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001, 0xaf625c90, 0x2402ffff,
3531 0x0e000098, 0xaf625404, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
3532 0x00000000, 0x00000000, 0x24030b60, 0x24050fff, 0xac000b50, 0x00002021,
3533 0xac640000, 0x24630004, 0x0065102b, 0x1440fffc, 0x24840001, 0x24030b60,
3534 0x0065102b, 0x10400011, 0x00002021, 0x24090b54, 0x3c06dead, 0x34c6beef,
3535 0x24080b58, 0x24070b5c, 0x8c620000, 0x50440006, 0x24630004, 0xad260000,
3536 0x8c620000, 0xace40000, 0xad020000, 0x24630004, 0x0065102b, 0x1440fff6,
3537 0x24840001, 0x03e00008, 0x00000000, 0x27bdfff8, 0x18800009, 0x00002821,
3538 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000, 0x24a50001, 0x00a4102a,
3539 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008, 0x3c020800, 0x34423000,
3540 0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac2216c4,
3541 0x24020040, 0x3c010800, 0xac2216c8, 0x3c010800, 0xac2016c0, 0xac600000,
3542 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
3543 0x00804821, 0x8faa0010, 0x3c020800, 0x8c4216c0, 0x3c040800, 0x8c8416c8,
3544 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac2316c0, 0x14400003,
3545 0x00004021, 0x3c010800, 0xac2016c0, 0x3c020800, 0x8c4216c0, 0x3c030800,
3546 0x8c6316c4, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
3547 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c4216c0,
3548 0x3c030800, 0x8c6316c4, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
3549 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
3550 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf0018, 0xafb10014, 0x0e0000b6,
3551 0xafb00010, 0x24110001, 0x8f706820, 0x32020100, 0x10400003, 0x00000000,
3552 0x0e000127, 0x00000000, 0x8f706820, 0x32022000, 0x10400004, 0x32020001,
3553 0x0e00025a, 0x24040001, 0x32020001, 0x10400003, 0x00000000, 0x0e0000e6,
3554 0x00000000, 0x0a00009e, 0xaf715028, 0x8fbf0018, 0x8fb10014, 0x8fb00010,
3555 0x03e00008, 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841660, 0x00002821,
3556 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00006c, 0xafa00014,
3557 0x3c010800, 0xa4201fb8, 0x3c010800, 0xa02016f8, 0x3c010800, 0xac2016fc,
3558 0x3c010800, 0xac201700, 0x3c010800, 0xac201704, 0x3c010800, 0xac20170c,
3559 0x3c010800, 0xac201718, 0x3c010800, 0xac20171c, 0x8f624434, 0x3c010800,
3560 0xac2216e8, 0x8f624438, 0x3c010800, 0xac2216ec, 0x8f624410, 0x3c010800,
3561 0xac2016e0, 0x3c010800, 0xac2016e4, 0x3c010800, 0xac201fc0, 0x3c010800,
3562 0xac201f68, 0x3c010800, 0xac201f6c, 0x3c010800, 0xac2216f0, 0x8fbf0018,
3563 0x03e00008, 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x2484166c, 0x00002821,
3564 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00006c, 0xafa00014,
3565 0x3c040800, 0x24841660, 0x00002821, 0x00003021, 0x00003821, 0xafa00010,
3566 0x0e00006c, 0xafa00014, 0x3c010800, 0xa4201fb8, 0x3c010800, 0xa02016f8,
3567 0x3c010800, 0xac2016fc, 0x3c010800, 0xac201700, 0x3c010800, 0xac201704,
3568 0x3c010800, 0xac20170c, 0x3c010800, 0xac201718, 0x3c010800, 0xac20171c,
3569 0x8f624434, 0x3c010800, 0xac2216e8, 0x8f624438, 0x3c010800, 0xac2216ec,
3570 0x8f624410, 0x3c010800, 0xac2016e0, 0x3c010800, 0xac2016e4, 0x3c010800,
3571 0xac201fc0, 0x3c010800, 0xac201f68, 0x3c010800, 0xac201f6c, 0x3c010800,
3572 0xac2216f0, 0x0e000120, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
3573 0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
3574 0xaf636820, 0x27bdffd0, 0x3c0300ff, 0xafbf002c, 0xafb60028, 0xafb50024,
3575 0xafb40020, 0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f665c5c,
3576 0x3c040800, 0x2484171c, 0x8c820000, 0x3463fff8, 0x14460005, 0x00c38824,
3577 0x3c020800, 0x904216f8, 0x14400115, 0x00000000, 0x00111902, 0x306300ff,
3578 0x30c20003, 0x000211c0, 0x00623825, 0x00e02821, 0x00061602, 0xac860000,
3579 0x3c030800, 0x906316f8, 0x3044000f, 0x1460002b, 0x00804021, 0x24020001,
3580 0x3c010800, 0xa02216f8, 0x00071100, 0x00821025, 0x3c010800, 0xac2016fc,
3581 0x3c010800, 0xac201700, 0x3c010800, 0xac201704, 0x3c010800, 0xac20170c,
3582 0x3c010800, 0xac201718, 0x3c010800, 0xac201710, 0x3c010800, 0xac201714,
3583 0x3c010800, 0xa4221fb8, 0x9623000c, 0x30628000, 0x10400008, 0x30627fff,
3584 0x2442003e, 0x3c010800, 0xa42216f6, 0x24020001, 0x3c010800, 0x0a00016e,
3585 0xac221fd4, 0x24620036, 0x3c010800, 0xa42216f6, 0x3c010800, 0xac201fd4,
3586 0x3c010800, 0xac201fd0, 0x3c010800, 0x0a000176, 0xac201fd8, 0x9622000c,
3587 0x3c010800, 0xa4221fcc, 0x3c040800, 0x248416fc, 0x8c820000, 0x00021100,
3588 0x3c010800, 0x00220821, 0xac311728, 0x8c820000, 0x00021100, 0x3c010800,
3589 0x00220821, 0xac26172c, 0x8c820000, 0x24a30001, 0x306701ff, 0x00021100,
3590 0x3c010800, 0x00220821, 0xac271730, 0x8c820000, 0x00021100, 0x3c010800,
3591 0x00220821, 0xac281734, 0x96230008, 0x3c020800, 0x8c42170c, 0x00432821,
3592 0x3c010800, 0xac25170c, 0x9622000a, 0x30420004, 0x14400019, 0x00071100,
3593 0x3c02c000, 0x00c21825, 0xaf635c5c, 0x8f625c50, 0x30420002, 0x1440fffc,
3594 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002, 0x1440001e, 0x00000000,
3595 0x8f630c14, 0x3c020800, 0x8c4216b4, 0x3063000f, 0x24420001, 0x3c010800,
3596 0xac2216b4, 0x2c620002, 0x1040fff7, 0x00000000, 0x0a0001c1, 0x00000000,
3597 0x3c030800, 0x8c6316e0, 0x3c040800, 0x948416f4, 0x01021025, 0x3c010800,
3598 0xa4221fba, 0x24020001, 0x3c010800, 0xac221718, 0x24630001, 0x0085202a,
3599 0x3c010800, 0x10800003, 0xac2316e0, 0x3c010800, 0xa42516f4, 0x3c030800,
3600 0x246316fc, 0x8c620000, 0x24420001, 0xac620000, 0x28420080, 0x14400005,
3601 0x24020001, 0x0e0002df, 0x24040002, 0x0a000250, 0x00000000, 0x3c030800,
3602 0x906316f8, 0x1462007c, 0x24020003, 0x3c160800, 0x96d616f6, 0x3c050800,
3603 0x8ca5170c, 0x32c4ffff, 0x00a4102a, 0x14400078, 0x00000000, 0x3c020800,
3604 0x8c421718, 0x10400005, 0x32c2ffff, 0x14a40003, 0x00000000, 0x3c010800,
3605 0xac231fd0, 0x10400062, 0x00009021, 0x0040a021, 0x3c150800, 0x26b51700,
3606 0x26b30010, 0x8ea20000, 0x00028100, 0x3c110800, 0x02308821, 0x0e0002e1,
3607 0x8e311728, 0x00403021, 0x10c00059, 0x00000000, 0x9628000a, 0x31020040,
3608 0x10400004, 0x2407180c, 0x8e22000c, 0x2407188c, 0xacc20018, 0x31021000,
3609 0x10400004, 0x34e32000, 0x00081040, 0x3042c000, 0x00623825, 0x3c030800,
3610 0x00701821, 0x8c631730, 0x3c020800, 0x00501021, 0x8c421734, 0x00031d00,
3611 0x00021400, 0x00621825, 0xacc30014, 0x8ea30004, 0x96220008, 0x00432023,
3612 0x3242ffff, 0x3083ffff, 0x00431021, 0x0282102a, 0x14400002, 0x02d22823,
3613 0x00802821, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000, 0x8e220000,
3614 0xacc20000, 0x8e220004, 0x8e63fff4, 0x00431021, 0xacc20004, 0xa4c5000e,
3615 0x8e62fff4, 0x00441021, 0xae62fff4, 0x96230008, 0x0043102a, 0x14400005,
3616 0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0xae62fff0, 0xacc00008,
3617 0x3242ffff, 0x14540008, 0x24020305, 0x31020080, 0x54400001, 0x34e70010,
3618 0x24020905, 0xa4c2000c, 0x0a000233, 0x34e70020, 0xa4c2000c, 0x30e2ffff,
3619 0xacc20010, 0x3c020800, 0x8c421fd0, 0x10400003, 0x3c024b65, 0x0a00023d,
3620 0x34427654, 0x3c02b49a, 0x344289ab, 0xacc2001c, 0x0e000560, 0x00c02021,
3621 0x3242ffff, 0x0054102b, 0x1440ffa4, 0x00000000, 0x24020002, 0x3c010800,
3622 0x0a000250, 0xa02216f8, 0x8ea208bc, 0x24420001, 0x0a000250, 0xaea208bc,
3623 0x14620003, 0x00000000, 0x0e000450, 0x00000000, 0x8fbf002c, 0x8fb60028,
3624 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018, 0x8fb10014, 0x8fb00010,
3625 0x03e00008, 0x27bd0030, 0x27bdffd8, 0xafb3001c, 0x00809821, 0xafbf0020,
3626 0xafb20018, 0xafb10014, 0xafb00010, 0x8f725c9c, 0x3c0200ff, 0x3442fff8,
3627 0x3c040800, 0x24841714, 0x02428824, 0x9623000e, 0x8c820000, 0x00431021,
3628 0xac820000, 0x8e220010, 0x30420020, 0x14400011, 0x00000000, 0x0e0002f7,
3629 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
3630 0x10400061, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x1040005c,
3631 0x00000000, 0x0a000278, 0x00000000, 0x8e220008, 0x00021c02, 0x000321c0,
3632 0x3042ffff, 0x3c030800, 0x906316f8, 0x000229c0, 0x24020002, 0x14620003,
3633 0x3c034b65, 0x0a000290, 0x00008021, 0x8e22001c, 0x34637654, 0x10430002,
3634 0x24100002, 0x24100001, 0x0e000300, 0x02003021, 0x24020003, 0x3c010800,
3635 0xa02216f8, 0x24020002, 0x1202000a, 0x24020001, 0x3c030800, 0x8c631fd0,
3636 0x10620006, 0x00000000, 0x3c020800, 0x94421fb8, 0x00021400, 0x0a0002cd,
3637 0xae220014, 0x3c040800, 0x24841fba, 0x94820000, 0x00021400, 0xae220014,
3638 0x3c020800, 0x8c42171c, 0x3c03c000, 0x3c010800, 0xa02016f8, 0x00431025,
3639 0xaf625c5c, 0x8f625c50, 0x30420002, 0x10400009, 0x00000000, 0x2484f762,
3640 0x8c820000, 0x00431025, 0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa,
3641 0x00000000, 0x3c020800, 0x244216e4, 0x8c430000, 0x24630001, 0xac430000,
3642 0x8f630c14, 0x3063000f, 0x2c620002, 0x1440000b, 0x00009821, 0x8f630c14,
3643 0x3c020800, 0x8c4216b4, 0x3063000f, 0x24420001, 0x3c010800, 0xac2216b4,
3644 0x2c620002, 0x1040fff7, 0x00009821, 0x3c024000, 0x02421825, 0xaf635c9c,
3645 0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x12600003, 0x00000000,
3646 0x0e000450, 0x00000000, 0x8fbf0020, 0x8fb3001c, 0x8fb20018, 0x8fb10014,
3647 0x8fb00010, 0x03e00008, 0x27bd0028, 0x0a0002df, 0x00000000, 0x8f634450,
3648 0x3c040800, 0x248416e8, 0x8c820000, 0x00031c02, 0x0043102b, 0x14400007,
3649 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02, 0x0083102b, 0x1040fffc,
3650 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd, 0x00000000,
3651 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000, 0x00822025, 0xaf645c38,
3652 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000, 0x03e00008, 0x00000000,
3653 0x27bdffe0, 0x00805021, 0x14c00017, 0x254c0008, 0x3c020800, 0x8c421fd4,
3654 0x1040000a, 0x2402003e, 0x3c010800, 0xa4221fb0, 0x24020016, 0x3c010800,
3655 0xa4221fb2, 0x2402002a, 0x3c010800, 0x0a00031a, 0xa4221fb4, 0x95420014,
3656 0x3c010800, 0xa4221fb0, 0x8d430010, 0x00031402, 0x3c010800, 0xa4221fb2,
3657 0x3c010800, 0xa4231fb4, 0x3c040800, 0x94841fb4, 0x3c030800, 0x94631fb2,
3658 0x958d0006, 0x3c020800, 0x94421fb0, 0x00832023, 0x01a27023, 0x3065ffff,
3659 0x24a20028, 0x01824021, 0x3082ffff, 0x14c0001a, 0x01025821, 0x9562000c,
3660 0x3042003f, 0x3c010800, 0xa4221fb6, 0x95620004, 0x95630006, 0x3c010800,
3661 0xac201fc4, 0x3c010800, 0xac201fc8, 0x00021400, 0x00431025, 0x3c010800,
3662 0xac221720, 0x95020004, 0x3c010800, 0xa4221724, 0x95030002, 0x01a51023,
3663 0x0043102a, 0x10400010, 0x24020001, 0x3c010800, 0x0a00034e, 0xac221fd8,
3664 0x3c030800, 0x8c631fc8, 0x3c020800, 0x94421724, 0x00431021, 0xa5020004,
3665 0x3c020800, 0x94421720, 0xa5620004, 0x3c020800, 0x8c421720, 0xa5620006,
3666 0x3c020800, 0x8c421fd0, 0x3c070800, 0x8ce71fc4, 0x3c050800, 0x144000c7,
3667 0x8ca51fc8, 0x3c020800, 0x94421724, 0x00451821, 0x3063ffff, 0x0062182b,
3668 0x24020002, 0x10c2000d, 0x00a32823, 0x3c020800, 0x94421fb6, 0x30420009,
3669 0x10400008, 0x00000000, 0x9562000c, 0x3042fff6, 0xa562000c, 0x3c020800,
3670 0x94421fb6, 0x30420009, 0x00e23823, 0x3c020800, 0x8c421fd8, 0x1040004b,
3671 0x24020002, 0x01003021, 0x3c020800, 0x94421fb2, 0x00003821, 0xa500000a,
3672 0x01a21023, 0xa5020002, 0x3082ffff, 0x00021042, 0x18400008, 0x00002821,
3673 0x00401821, 0x94c20000, 0x24e70001, 0x00a22821, 0x00e3102a, 0x1440fffb,
3674 0x24c60002, 0x00051c02, 0x30a2ffff, 0x00622821, 0x00051402, 0x00a22821,
3675 0x00a04821, 0x00051027, 0xa502000a, 0x00002821, 0x2506000c, 0x00003821,
3676 0x94c20000, 0x24e70001, 0x00a22821, 0x2ce20004, 0x1440fffb, 0x24c60002,
3677 0x95020002, 0x00003821, 0x91030009, 0x00442023, 0x01603021, 0x3082ffff,
3678 0xa4c00010, 0x00621821, 0x00021042, 0x18400010, 0x00a32821, 0x00404021,
3679 0x94c20000, 0x24c60002, 0x00a22821, 0x30c2007f, 0x14400006, 0x24e70001,
3680 0x8d430000, 0x3c02007f, 0x3442ff80, 0x00625024, 0x25460008, 0x00e8102a,
3681 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00051c02, 0xa0c00001,
3682 0x94c20000, 0x00a22821, 0x00051c02, 0x30a2ffff, 0x00622821, 0x00051402,
3683 0x00a22821, 0x0a000415, 0x30a5ffff, 0x14c20063, 0x00000000, 0x3c090800,
3684 0x95291fb2, 0x95030002, 0x01a91023, 0x1062005d, 0x01003021, 0x00003821,
3685 0x00002821, 0x01a91023, 0xa5020002, 0x3082ffff, 0x00021042, 0x18400008,
3686 0xa500000a, 0x00401821, 0x94c20000, 0x24e70001, 0x00a22821, 0x00e3102a,
3687 0x1440fffb, 0x24c60002, 0x00051c02, 0x30a2ffff, 0x00622821, 0x00051402,
3688 0x00a22821, 0x00a04821, 0x00051027, 0xa502000a, 0x00002821, 0x2506000c,
3689 0x00003821, 0x94c20000, 0x24e70001, 0x00a22821, 0x2ce20004, 0x1440fffb,
3690 0x24c60002, 0x95020002, 0x00003821, 0x91030009, 0x00442023, 0x01603021,
3691 0x3082ffff, 0xa4c00010, 0x3c040800, 0x94841fb4, 0x00621821, 0x00a32821,
3692 0x00051c02, 0x30a2ffff, 0x00622821, 0x00051c02, 0x3c020800, 0x94421fb0,
3693 0x00a34021, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043, 0x18400010,
3694 0x00002821, 0x00402021, 0x94c20000, 0x24c60002, 0x00a22821, 0x30c2007f,
3695 0x14400006, 0x24e70001, 0x8d430000, 0x3c02007f, 0x3442ff80, 0x00625024,
3696 0x25460008, 0x00e4102a, 0x1440fff3, 0x00000000, 0x3c020800, 0x94421fcc,
3697 0x00a22821, 0x00051c02, 0x30a2ffff, 0x00622821, 0x00051402, 0x00a22821,
3698 0x3102ffff, 0x00a22821, 0x00051c02, 0x30a2ffff, 0x00622821, 0x00051402,
3699 0x00a22821, 0x00a02021, 0x00051027, 0xa5620010, 0xad800014, 0x0a000435,
3700 0xad800000, 0x8d830010, 0x00602021, 0x10a00007, 0x00034c02, 0x01252821,
3701 0x00051402, 0x30a3ffff, 0x00432821, 0x00051402, 0x00a24821, 0x00091027,
3702 0xa502000a, 0x3c030800, 0x94631fb4, 0x3082ffff, 0x01a21021, 0x00432823,
3703 0x00a72821, 0x00051c02, 0x30a2ffff, 0x00622821, 0x00051402, 0x00a22821,
3704 0x00a02021, 0x00051027, 0xa5620010, 0x3082ffff, 0x00091c00, 0x00431025,
3705 0xad820010, 0x3c020800, 0x8c421fd4, 0x10400002, 0x25a2fff2, 0xa5820034,
3706 0x3c020800, 0x8c421fc8, 0x3c030800, 0x8c631720, 0x24420001, 0x3c010800,
3707 0xac221fc8, 0x3c020800, 0x8c421fc4, 0x31c4ffff, 0x00641821, 0x3c010800,
3708 0xac231720, 0x00441021, 0x3c010800, 0xac221fc4, 0x03e00008, 0x27bd0020,
3709 0x27bdffc8, 0x3c040800, 0x248416f8, 0xafbf0034, 0xafbe0030, 0xafb7002c,
3710 0xafb60028, 0xafb50024, 0xafb40020, 0xafb3001c, 0xafb20018, 0xafb10014,
3711 0xafb00010, 0x90830000, 0x24020003, 0x146200f4, 0x00000000, 0x3c020800,
3712 0x8c421710, 0x3c030800, 0x8c63170c, 0x3c1e0800, 0x97de16f6, 0x0043102a,
3713 0x104000eb, 0x3c168000, 0x249708c4, 0x33d5ffff, 0x24920018, 0x3c020800,
3714 0x8c421718, 0x104000e4, 0x00000000, 0x3c140800, 0x96941fb0, 0x3282ffff,
3715 0x104000d6, 0x00008021, 0x00409821, 0x00008821, 0x8f634450, 0x3c020800,
3716 0x8c4216e8, 0x00031c02, 0x0043102b, 0x14400008, 0x00000000, 0x3c040800,
3717 0x8c8416ec, 0x8f624450, 0x00021c02, 0x0083102b, 0x1040fffc, 0x00000000,
3718 0xaf764444, 0x8f624444, 0x00561024, 0x10400006, 0x00000000, 0x3c038000,
3719 0x8f624444, 0x00431024, 0x1440fffd, 0x00000000, 0x8f624448, 0x3046ffff,
3720 0x10c0005f, 0x00000000, 0x3c090800, 0x01314821, 0x8d291728, 0x9528000a,
3721 0x31020040, 0x10400004, 0x2407180c, 0x8d22000c, 0x2407188c, 0xacc20018,
3722 0x31021000, 0x10400004, 0x34e32000, 0x00081040, 0x3042c000, 0x00623825,
3723 0x31020080, 0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421730,
3724 0x3c030800, 0x00711821, 0x8c631734, 0x00021500, 0x00031c00, 0x00431025,
3725 0xacc20014, 0x95240008, 0x3202ffff, 0x00821021, 0x0262102a, 0x14400002,
3726 0x02902823, 0x00802821, 0x8d220000, 0x02058021, 0xacc20000, 0x8d220004,
3727 0x00c02021, 0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e,
3728 0xac820010, 0x24020305, 0x0e000560, 0xa482000c, 0x3202ffff, 0x0053102b,
3729 0x1440ffaf, 0x3202ffff, 0x0a00054c, 0x00000000, 0x8e420000, 0x8e43fffc,
3730 0x0043102a, 0x10400084, 0x00000000, 0x8e45fff0, 0x8f644450, 0x3c030800,
3731 0x8c6316e8, 0x00051100, 0x3c090800, 0x01224821, 0x8d291728, 0x00041402,
3732 0x0062182b, 0x14600008, 0x00000000, 0x3c030800, 0x8c6316ec, 0x8f624450,
3733 0x00021402, 0x0062102b, 0x1040fffc, 0x00000000, 0xaf764444, 0x8f624444,
3734 0x00561024, 0x10400006, 0x00000000, 0x3c038000, 0x8f624444, 0x00431024,
3735 0x1440fffd, 0x00000000, 0x8f624448, 0x3046ffff, 0x14c00005, 0x00000000,
3736 0x8ee20000, 0x24420001, 0x0a000554, 0xaee20000, 0x9528000a, 0x31020040,
3737 0x10400004, 0x2407180c, 0x8d22000c, 0x2407188c, 0xacc20018, 0x31021000,
3738 0x10400004, 0x34e32000, 0x00081040, 0x3042c000, 0x00623825, 0x00051900,
3739 0x3c020800, 0x00431021, 0x8c421730, 0x3c010800, 0x00230821, 0x8c231734,
3740 0x00021500, 0x00031c00, 0x00431025, 0xacc20014, 0x3c030800, 0x8c631704,
3741 0x95220008, 0x00432023, 0x3202ffff, 0x3083ffff, 0x00431021, 0x02a2102a,
3742 0x14400002, 0x03d02823, 0x00802821, 0x8e420000, 0x30a4ffff, 0x00441021,
3743 0xae420000, 0xa4c5000e, 0x8d220000, 0xacc20000, 0x8d220004, 0x8e43fff4,
3744 0x00431021, 0xacc20004, 0x8e43fff4, 0x95220008, 0x00641821, 0x0062102a,
3745 0x14400006, 0x02058021, 0x8e42fff0, 0xae40fff4, 0x24420001, 0x0a000530,
3746 0xae42fff0, 0xae43fff4, 0xacc00008, 0x3202ffff, 0x10550003, 0x31020004,
3747 0x10400006, 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020,
3748 0x24020905, 0xa4c2000c, 0x30e2ffff, 0xacc20010, 0x3c030800, 0x8c63170c,
3749 0x3c020800, 0x8c421710, 0x54620004, 0x3c02b49a, 0x3c024b65, 0x0a000548,
3750 0x34427654, 0x344289ab, 0xacc2001c, 0x0e000560, 0x00c02021, 0x3202ffff,
3751 0x0055102b, 0x1440ff7e, 0x00000000, 0x8e420000, 0x8e43fffc, 0x0043102a,
3752 0x1440ff1a, 0x00000000, 0x8fbf0034, 0x8fbe0030, 0x8fb7002c, 0x8fb60028,
3753 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018, 0x8fb10014, 0x8fb00010,
3754 0x03e00008, 0x27bd0038, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
3755 0x8f634410, 0x0a00056f, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
3756 0x00000000, 0x0e00025a, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
3757 0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
3758 0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c4216b4, 0x3063000f,
3759 0x24420001, 0x3c010800, 0xac2216b4, 0x2c620002, 0x1040fff7, 0x00000000,
3760 0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
3761 0x30422000, 0x1040fff8, 0x00000000, 0x0e00025a, 0x00002021, 0x0a000582,
3762 0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
3763 0x00000000
3766 u32 tg3TsoFwRodata[] = {
3767 0x4d61696e, 0x43707542, 0x00000000, 0x00000000, 0x74637073, 0x6567496e,
3768 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000, 0x00000000,
3769 0x00000000
3772 #if 0 /* All zeros, don't eat up space with it. */
3773 u32 tg3TsoFwData[] = {
3774 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
3775 0x00000000, 0x00000000, 0x00000000
3777 #endif
3779 /* tp->lock is held. */
3780 static int tg3_load_tso_firmware(struct tg3 *tp)
3782 struct fw_info info;
3783 int err, i;
3785 info.text_base = TG3_TSO_FW_TEXT_ADDR;
3786 info.text_len = TG3_TSO_FW_TEXT_LEN;
3787 info.text_data = &tg3TsoFwText[0];
3788 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
3789 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
3790 info.rodata_data = &tg3TsoFwRodata[0];
3791 info.data_base = TG3_TSO_FW_DATA_ADDR;
3792 info.data_len = TG3_TSO_FW_DATA_LEN;
3793 info.data_data = NULL;
3795 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3796 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3797 &info);
3798 if (err)
3799 return err;
3801 /* Now startup only the TX cpu. */
3802 tw32(TX_CPU_BASE + CPU_STATE, 0xffffffff);
3803 tw32(TX_CPU_BASE + CPU_PC, TG3_TSO_FW_TEXT_ADDR);
3805 /* Flush posted writes. */
3806 tr32(TX_CPU_BASE + CPU_PC);
3807 for (i = 0; i < 5; i++) {
3808 if (tr32(TX_CPU_BASE + CPU_PC) == TG3_TSO_FW_TEXT_ADDR)
3809 break;
3810 tw32(TX_CPU_BASE + CPU_STATE, 0xffffffff);
3811 tw32(TX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3812 tw32(TX_CPU_BASE + CPU_PC, TG3_TSO_FW_TEXT_ADDR);
3814 /* Flush posted writes. */
3815 tr32(TX_CPU_BASE + CPU_PC);
3817 udelay(1000);
3819 if (i >= 5) {
3820 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
3821 "to set TX CPU PC, is %08x should be %08x\n",
3822 tp->dev->name, tr32(TX_CPU_BASE + CPU_PC),
3823 TG3_TSO_FW_TEXT_ADDR);
3824 return -ENODEV;
3826 tw32(TX_CPU_BASE + CPU_STATE, 0xffffffff);
3827 tw32(TX_CPU_BASE + CPU_MODE, 0x00000000);
3829 /* Flush posted writes. */
3830 tr32(TX_CPU_BASE + CPU_MODE);
3832 return 0;
3835 #endif /* TG3_DO_TSO != 0 */
3837 /* tp->lock is held. */
3838 static void __tg3_set_mac_addr(struct tg3 *tp)
3840 u32 addr_high, addr_low;
3841 int i;
3843 addr_high = ((tp->dev->dev_addr[0] << 8) |
3844 tp->dev->dev_addr[1]);
3845 addr_low = ((tp->dev->dev_addr[2] << 24) |
3846 (tp->dev->dev_addr[3] << 16) |
3847 (tp->dev->dev_addr[4] << 8) |
3848 (tp->dev->dev_addr[5] << 0));
3849 for (i = 0; i < 4; i++) {
3850 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3851 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3854 addr_high = (tp->dev->dev_addr[0] +
3855 tp->dev->dev_addr[1] +
3856 tp->dev->dev_addr[2] +
3857 tp->dev->dev_addr[3] +
3858 tp->dev->dev_addr[4] +
3859 tp->dev->dev_addr[5]) &
3860 TX_BACKOFF_SEED_MASK;
3861 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3864 static int tg3_set_mac_addr(struct net_device *dev, void *p)
3866 struct tg3 *tp = dev->priv;
3867 struct sockaddr *addr = p;
3869 if (netif_running(dev))
3870 return -EBUSY;
3872 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3874 spin_lock_irq(&tp->lock);
3875 __tg3_set_mac_addr(tp);
3876 spin_unlock_irq(&tp->lock);
3878 return 0;
3881 /* tp->lock is held. */
3882 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
3883 dma_addr_t mapping, u32 maxlen_flags,
3884 u32 nic_addr)
3886 tg3_write_mem(tp,
3887 (bdinfo_addr +
3888 TG3_BDINFO_HOST_ADDR +
3889 TG3_64BIT_REG_HIGH),
3890 ((u64) mapping >> 32));
3891 tg3_write_mem(tp,
3892 (bdinfo_addr +
3893 TG3_BDINFO_HOST_ADDR +
3894 TG3_64BIT_REG_LOW),
3895 ((u64) mapping & 0xffffffff));
3896 tg3_write_mem(tp,
3897 (bdinfo_addr +
3898 TG3_BDINFO_MAXLEN_FLAGS),
3899 maxlen_flags);
3900 tg3_write_mem(tp,
3901 (bdinfo_addr +
3902 TG3_BDINFO_NIC_ADDR),
3903 nic_addr);
3906 static void __tg3_set_rx_mode(struct net_device *);
3908 /* tp->lock is held. */
3909 static int tg3_reset_hw(struct tg3 *tp)
3911 u32 val;
3912 int i, err;
3914 tg3_disable_ints(tp);
3916 tg3_stop_fw(tp);
3918 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
3919 err = tg3_abort_hw(tp);
3920 if (err)
3921 return err;
3924 tg3_chip_reset(tp);
3926 tw32(GRC_MODE, tp->grc_mode);
3927 tg3_write_mem(tp,
3928 NIC_SRAM_FIRMWARE_MBOX,
3929 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
3930 if (tp->phy_id == PHY_ID_SERDES) {
3931 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
3932 tw32(MAC_MODE, tp->mac_mode);
3933 } else
3934 tw32(MAC_MODE, 0);
3935 tr32(MAC_MODE);
3936 udelay(40);
3938 /* Wait for firmware initialization to complete. */
3939 for (i = 0; i < 100000; i++) {
3940 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
3941 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3942 break;
3943 udelay(10);
3945 if (i >= 100000) {
3946 printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
3947 "firmware will not restart magic=%08x\n",
3948 tp->dev->name, val);
3949 return -ENODEV;
3952 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
3953 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3954 DRV_STATE_START);
3955 else
3956 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3957 DRV_STATE_SUSPEND);
3959 /* This works around an issue with Athlon chipsets on
3960 * B3 tigon3 silicon. This bit has no effect on any
3961 * other revision.
3963 val = tr32(TG3PCI_CLOCK_CTRL);
3964 val |= CLOCK_CTRL_DELAY_PCI_GRANT;
3965 tw32(TG3PCI_CLOCK_CTRL, val);
3966 tr32(TG3PCI_CLOCK_CTRL);
3968 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
3969 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
3970 val = tr32(TG3PCI_PCISTATE);
3971 val |= PCISTATE_RETRY_SAME_DMA;
3972 tw32(TG3PCI_PCISTATE, val);
3975 /* Clear statistics/status block in chip, and status block in ram. */
3976 for (i = NIC_SRAM_STATS_BLK;
3977 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
3978 i += sizeof(u32)) {
3979 tg3_write_mem(tp, i, 0);
3980 udelay(40);
3982 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
3984 /* This value is determined during the probe time DMA
3985 * engine test, tg3_test_dma.
3987 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
3989 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
3990 GRC_MODE_4X_NIC_SEND_RINGS |
3991 GRC_MODE_NO_TX_PHDR_CSUM |
3992 GRC_MODE_NO_RX_PHDR_CSUM);
3993 if (tp->tg3_flags & TG3_FLAG_HOST_TXDS)
3994 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
3995 else
3996 tp->grc_mode |= GRC_MODE_4X_NIC_SEND_RINGS;
3997 if (tp->tg3_flags & TG3_FLAG_NO_TX_PSEUDO_CSUM)
3998 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
3999 if (tp->tg3_flags & TG3_FLAG_NO_RX_PSEUDO_CSUM)
4000 tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
4002 tw32(GRC_MODE,
4003 tp->grc_mode |
4004 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
4006 /* Setup the timer prescalar register. Clock is always 66Mhz. */
4007 tw32(GRC_MISC_CFG,
4008 (65 << GRC_MISC_CFG_PRESCALAR_SHIFT));
4010 /* Initialize MBUF/DESC pool. */
4011 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
4012 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
4013 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
4014 else
4015 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
4016 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
4017 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
4019 if (!(tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE)) {
4020 tw32(BUFMGR_MB_RDMA_LOW_WATER,
4021 tp->bufmgr_config.mbuf_read_dma_low_water);
4022 tw32(BUFMGR_MB_MACRX_LOW_WATER,
4023 tp->bufmgr_config.mbuf_mac_rx_low_water);
4024 tw32(BUFMGR_MB_HIGH_WATER,
4025 tp->bufmgr_config.mbuf_high_water);
4026 } else {
4027 tw32(BUFMGR_MB_RDMA_LOW_WATER,
4028 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
4029 tw32(BUFMGR_MB_MACRX_LOW_WATER,
4030 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
4031 tw32(BUFMGR_MB_HIGH_WATER,
4032 tp->bufmgr_config.mbuf_high_water_jumbo);
4034 tw32(BUFMGR_DMA_LOW_WATER,
4035 tp->bufmgr_config.dma_low_water);
4036 tw32(BUFMGR_DMA_HIGH_WATER,
4037 tp->bufmgr_config.dma_high_water);
4039 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
4040 for (i = 0; i < 2000; i++) {
4041 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
4042 break;
4043 udelay(10);
4045 if (i >= 2000) {
4046 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
4047 tp->dev->name);
4048 return -ENODEV;
4051 tw32(FTQ_RESET, 0xffffffff);
4052 tw32(FTQ_RESET, 0x00000000);
4053 for (i = 0; i < 2000; i++) {
4054 if (tr32(FTQ_RESET) == 0x00000000)
4055 break;
4056 udelay(10);
4058 if (i >= 2000) {
4059 printk(KERN_ERR PFX "tg3_reset_hw cannot reset FTQ for %s.\n",
4060 tp->dev->name);
4061 return -ENODEV;
4064 /* Initialize TG3_BDINFO's at:
4065 * RCVDBDI_STD_BD: standard eth size rx ring
4066 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
4067 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
4069 * like so:
4070 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
4071 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
4072 * ring attribute flags
4073 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
4075 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
4076 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
4078 * The size of each ring is fixed in the firmware, but the location is
4079 * configurable.
4081 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
4082 ((u64) tp->rx_std_mapping >> 32));
4083 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
4084 ((u64) tp->rx_std_mapping & 0xffffffff));
4085 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
4086 RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
4087 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
4088 NIC_SRAM_RX_BUFFER_DESC);
4090 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
4091 BDINFO_FLAGS_DISABLED);
4093 if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
4094 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
4095 ((u64) tp->rx_jumbo_mapping >> 32));
4096 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
4097 ((u64) tp->rx_jumbo_mapping & 0xffffffff));
4098 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
4099 RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
4100 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
4101 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
4102 } else {
4103 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
4104 BDINFO_FLAGS_DISABLED);
4107 /* Setup replenish thresholds. */
4108 tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
4109 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
4111 /* Clear out send RCB ring in SRAM. */
4112 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
4113 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS, BDINFO_FLAGS_DISABLED);
4115 tp->tx_prod = 0;
4116 tp->tx_cons = 0;
4117 tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
4118 tw32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
4119 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
4120 tr32(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW);
4122 if (tp->tg3_flags & TG3_FLAG_HOST_TXDS) {
4123 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
4124 tp->tx_desc_mapping,
4125 (TG3_TX_RING_SIZE <<
4126 BDINFO_FLAGS_MAXLEN_SHIFT),
4127 NIC_SRAM_TX_BUFFER_DESC);
4128 } else {
4129 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
4131 BDINFO_FLAGS_DISABLED,
4132 NIC_SRAM_TX_BUFFER_DESC);
4135 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK; i += TG3_BDINFO_SIZE) {
4136 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
4137 BDINFO_FLAGS_DISABLED);
4140 tp->rx_rcb_ptr = 0;
4141 tw32_mailbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
4142 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
4143 tr32(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW);
4145 tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
4146 tp->rx_rcb_mapping,
4147 (TG3_RX_RCB_RING_SIZE <<
4148 BDINFO_FLAGS_MAXLEN_SHIFT),
4151 tp->rx_std_ptr = tp->rx_pending;
4152 tw32_mailbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
4153 tp->rx_std_ptr);
4154 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
4155 tr32(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW);
4157 if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE)
4158 tp->rx_jumbo_ptr = tp->rx_jumbo_pending;
4159 else
4160 tp->rx_jumbo_ptr = 0;
4161 tw32_mailbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
4162 tp->rx_jumbo_ptr);
4163 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
4164 tr32(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW);
4166 /* Initialize MAC address and backoff seed. */
4167 __tg3_set_mac_addr(tp);
4169 /* MTU + ethernet header + FCS + optional VLAN tag */
4170 tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
4172 /* The slot time is changed by tg3_setup_phy if we
4173 * run at gigabit with half duplex.
4175 tw32(MAC_TX_LENGTHS,
4176 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4177 (6 << TX_LENGTHS_IPG_SHIFT) |
4178 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
4180 /* Receive rules. */
4181 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
4182 tw32(RCVLPC_CONFIG, 0x0181);
4184 /* Receive/send statistics. */
4185 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
4186 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
4187 tw32(SNDDATAI_STATSENAB, 0xffffff);
4188 tw32(SNDDATAI_STATSCTRL,
4189 (SNDDATAI_SCTRL_ENABLE |
4190 SNDDATAI_SCTRL_FASTUPD));
4192 /* Setup host coalescing engine. */
4193 tw32(HOSTCC_MODE, 0);
4194 for (i = 0; i < 2000; i++) {
4195 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
4196 break;
4197 udelay(10);
4200 tw32(HOSTCC_RXCOL_TICKS, 0);
4201 tw32(HOSTCC_RXMAX_FRAMES, 1);
4202 tw32(HOSTCC_RXCOAL_TICK_INT, 0);
4203 tw32(HOSTCC_RXCOAL_MAXF_INT, 1);
4204 tw32(HOSTCC_TXCOL_TICKS, LOW_TXCOL_TICKS);
4205 tw32(HOSTCC_TXMAX_FRAMES, LOW_RXMAX_FRAMES);
4206 tw32(HOSTCC_TXCOAL_TICK_INT, 0);
4207 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
4208 tw32(HOSTCC_STAT_COAL_TICKS,
4209 DEFAULT_STAT_COAL_TICKS);
4211 /* Status/statistics block address. */
4212 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
4213 ((u64) tp->stats_mapping >> 32));
4214 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
4215 ((u64) tp->stats_mapping & 0xffffffff));
4216 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
4217 ((u64) tp->status_mapping >> 32));
4218 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
4219 ((u64) tp->status_mapping & 0xffffffff));
4220 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
4221 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
4223 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
4225 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
4226 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
4227 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
4229 tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
4230 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
4231 tw32(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
4232 tr32(MAC_MODE);
4233 udelay(40);
4235 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
4236 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
4237 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
4238 GRC_LCLCTRL_GPIO_OUTPUT1);
4239 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
4240 tr32(GRC_LOCAL_CTRL);
4241 udelay(100);
4243 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
4244 tr32(MAILBOX_INTERRUPT_0);
4246 tw32(DMAC_MODE, DMAC_MODE_ENABLE);
4247 tr32(DMAC_MODE);
4248 udelay(40);
4250 tw32(WDMAC_MODE, (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
4251 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
4252 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
4253 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
4254 WDMAC_MODE_LNGREAD_ENAB));
4255 tr32(WDMAC_MODE);
4256 udelay(40);
4258 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
4259 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
4260 val = tr32(TG3PCI_X_CAPS);
4261 val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
4262 val |= (PCIX_CAPS_MAX_BURST_5704 << PCIX_CAPS_BURST_SHIFT);
4263 if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
4264 val |= (tp->split_mode_max_reqs <<
4265 PCIX_CAPS_SPLIT_SHIFT);
4266 tw32(TG3PCI_X_CAPS, val);
4269 val = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
4270 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
4271 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
4272 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
4273 RDMAC_MODE_LNGREAD_ENAB);
4274 if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
4275 val |= RDMAC_MODE_SPLIT_ENABLE;
4276 tw32(RDMAC_MODE, val);
4277 tr32(RDMAC_MODE);
4278 udelay(40);
4280 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
4281 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
4282 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
4283 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
4284 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
4285 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
4286 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
4287 tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
4288 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
4290 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
4291 err = tg3_load_5701_a0_firmware_fix(tp);
4292 if (err)
4293 return err;
4296 #if TG3_DO_TSO != 0
4297 if (tp->dev->features & NETIF_F_TSO) {
4298 err = tg3_load_tso_firmware(tp);
4299 if (err)
4300 return err;
4302 #endif
4304 tp->tx_mode = TX_MODE_ENABLE;
4305 tw32(MAC_TX_MODE, tp->tx_mode);
4306 tr32(MAC_TX_MODE);
4307 udelay(100);
4309 tp->rx_mode = RX_MODE_ENABLE;
4310 tw32(MAC_RX_MODE, tp->rx_mode);
4311 tr32(MAC_RX_MODE);
4312 udelay(10);
4314 if (tp->link_config.phy_is_low_power) {
4315 tp->link_config.phy_is_low_power = 0;
4316 tp->link_config.speed = tp->link_config.orig_speed;
4317 tp->link_config.duplex = tp->link_config.orig_duplex;
4318 tp->link_config.autoneg = tp->link_config.orig_autoneg;
4321 tp->mi_mode = MAC_MI_MODE_BASE;
4322 tw32(MAC_MI_MODE, tp->mi_mode);
4323 tr32(MAC_MI_MODE);
4324 udelay(40);
4326 tw32(MAC_LED_CTRL, 0);
4327 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
4328 tw32(MAC_RX_MODE, RX_MODE_RESET);
4329 tr32(MAC_RX_MODE);
4330 udelay(10);
4331 tw32(MAC_RX_MODE, tp->rx_mode);
4332 tr32(MAC_RX_MODE);
4333 udelay(10);
4335 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
4336 tw32(MAC_SERDES_CFG, 0x616000);
4338 /* Prevent chip from dropping frames when flow control
4339 * is enabled.
4341 tw32(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
4342 tr32(MAC_LOW_WMARK_MAX_RX_FRAME);
4344 err = tg3_setup_phy(tp);
4345 if (err)
4346 return err;
4348 if (tp->phy_id != PHY_ID_SERDES) {
4349 u32 tmp;
4351 /* Clear CRC stats. */
4352 tg3_readphy(tp, 0x1e, &tmp);
4353 tg3_writephy(tp, 0x1e, tmp | 0x8000);
4354 tg3_readphy(tp, 0x14, &tmp);
4357 __tg3_set_rx_mode(tp->dev);
4359 /* Initialize receive rules. */
4360 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
4361 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
4362 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
4363 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
4364 #if 0
4365 tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0);
4366 tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0);
4367 #endif
4368 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
4369 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
4370 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
4371 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
4372 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
4373 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
4374 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
4375 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
4376 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
4377 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
4378 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
4379 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
4381 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)
4382 tg3_enable_ints(tp);
4384 return 0;
4387 /* Called at device open time to get the chip ready for
4388 * packet processing. Invoked with tp->lock held.
4390 static int tg3_init_hw(struct tg3 *tp)
4392 int err;
4394 /* Force the chip into D0. */
4395 err = tg3_set_power_state(tp, 0);
4396 if (err)
4397 goto out;
4399 tg3_switch_clocks(tp);
4401 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
4403 err = tg3_reset_hw(tp);
4405 out:
4406 return err;
4409 static void tg3_timer(unsigned long __opaque)
4411 struct tg3 *tp = (struct tg3 *) __opaque;
4412 unsigned long flags;
4414 spin_lock_irqsave(&tp->lock, flags);
4415 spin_lock(&tp->tx_lock);
4417 /* All of this garbage is because when using non-tagged
4418 * IRQ status the mailbox/status_block protocol the chip
4419 * uses with the cpu is race prone.
4421 if (tp->hw_status->status & SD_STATUS_UPDATED) {
4422 tw32(GRC_LOCAL_CTRL,
4423 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
4424 } else {
4425 tw32(HOSTCC_MODE, tp->coalesce_mode |
4426 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
4429 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
4430 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
4431 spin_unlock(&tp->tx_lock);
4432 spin_unlock_irqrestore(&tp->lock, flags);
4433 schedule_work(&tp->reset_task);
4434 return;
4437 /* This part only runs once per second. */
4438 if (!--tp->timer_counter) {
4439 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
4440 u32 mac_stat;
4441 int phy_event;
4443 mac_stat = tr32(MAC_STATUS);
4445 phy_event = 0;
4446 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
4447 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
4448 phy_event = 1;
4449 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
4450 phy_event = 1;
4452 if (phy_event)
4453 tg3_setup_phy(tp);
4454 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
4455 u32 mac_stat = tr32(MAC_STATUS);
4456 int need_setup = 0;
4458 if (netif_carrier_ok(tp->dev) &&
4459 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
4460 need_setup = 1;
4462 if (! netif_carrier_ok(tp->dev) &&
4463 (mac_stat & MAC_STATUS_PCS_SYNCED)) {
4464 need_setup = 1;
4466 if (need_setup) {
4467 tw32(MAC_MODE,
4468 (tp->mac_mode &
4469 ~MAC_MODE_PORT_MODE_MASK));
4470 tr32(MAC_MODE);
4471 udelay(40);
4472 tw32(MAC_MODE, tp->mac_mode);
4473 tr32(MAC_MODE);
4474 udelay(40);
4475 tg3_setup_phy(tp);
4479 tp->timer_counter = tp->timer_multiplier;
4482 /* Heartbeat is only sent once every 120 seconds. */
4483 if (!--tp->asf_counter) {
4484 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4485 u32 val;
4487 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_ALIVE);
4488 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
4489 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 3);
4490 val = tr32(GRC_RX_CPU_EVENT);
4491 val |= (1 << 14);
4492 tw32(GRC_RX_CPU_EVENT, val);
4494 tp->asf_counter = tp->asf_multiplier;
4497 spin_unlock(&tp->tx_lock);
4498 spin_unlock_irqrestore(&tp->lock, flags);
4500 tp->timer.expires = jiffies + tp->timer_offset;
4501 add_timer(&tp->timer);
4504 static int tg3_open(struct net_device *dev)
4506 struct tg3 *tp = dev->priv;
4507 int err;
4509 spin_lock_irq(&tp->lock);
4510 spin_lock(&tp->tx_lock);
4512 tg3_disable_ints(tp);
4513 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
4515 spin_unlock(&tp->tx_lock);
4516 spin_unlock_irq(&tp->lock);
4518 /* If you move this call, make sure TG3_FLAG_HOST_TXDS in
4519 * tp->tg3_flags is accurate at that new place.
4521 err = tg3_alloc_consistent(tp);
4522 if (err)
4523 return err;
4525 err = request_irq(dev->irq, tg3_interrupt,
4526 SA_SHIRQ, dev->name, dev);
4528 if (err) {
4529 tg3_free_consistent(tp);
4530 return err;
4533 spin_lock_irq(&tp->lock);
4534 spin_lock(&tp->tx_lock);
4536 tg3_init_rings(tp);
4538 err = tg3_init_hw(tp);
4539 if (err) {
4540 tg3_halt(tp);
4541 tg3_free_rings(tp);
4542 } else {
4543 tp->timer_offset = HZ / 10;
4544 tp->timer_counter = tp->timer_multiplier = 10;
4545 tp->asf_counter = tp->asf_multiplier = (10 * 120);
4547 init_timer(&tp->timer);
4548 tp->timer.expires = jiffies + tp->timer_offset;
4549 tp->timer.data = (unsigned long) tp;
4550 tp->timer.function = tg3_timer;
4551 add_timer(&tp->timer);
4553 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
4556 spin_unlock(&tp->tx_lock);
4557 spin_unlock_irq(&tp->lock);
4559 if (err) {
4560 free_irq(dev->irq, dev);
4561 tg3_free_consistent(tp);
4562 return err;
4565 spin_lock_irq(&tp->lock);
4566 spin_lock(&tp->tx_lock);
4568 tg3_enable_ints(tp);
4570 spin_unlock(&tp->tx_lock);
4571 spin_unlock_irq(&tp->lock);
4573 netif_start_queue(dev);
4575 return 0;
4578 #if 0
4579 /*static*/ void tg3_dump_state(struct tg3 *tp)
4581 u32 val32, val32_2, val32_3, val32_4, val32_5;
4582 u16 val16;
4583 int i;
4585 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
4586 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
4587 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
4588 val16, val32);
4590 /* MAC block */
4591 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
4592 tr32(MAC_MODE), tr32(MAC_STATUS));
4593 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
4594 tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
4595 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
4596 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
4597 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
4598 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
4600 /* Send data initiator control block */
4601 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
4602 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
4603 printk(" SNDDATAI_STATSCTRL[%08x]\n",
4604 tr32(SNDDATAI_STATSCTRL));
4606 /* Send data completion control block */
4607 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
4609 /* Send BD ring selector block */
4610 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
4611 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
4613 /* Send BD initiator control block */
4614 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
4615 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
4617 /* Send BD completion control block */
4618 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
4620 /* Receive list placement control block */
4621 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
4622 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
4623 printk(" RCVLPC_STATSCTRL[%08x]\n",
4624 tr32(RCVLPC_STATSCTRL));
4626 /* Receive data and receive BD initiator control block */
4627 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
4628 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
4630 /* Receive data completion control block */
4631 printk("DEBUG: RCVDCC_MODE[%08x]\n",
4632 tr32(RCVDCC_MODE));
4634 /* Receive BD initiator control block */
4635 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
4636 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
4638 /* Receive BD completion control block */
4639 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
4640 tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
4642 /* Receive list selector control block */
4643 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
4644 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
4646 /* Mbuf cluster free block */
4647 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
4648 tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
4650 /* Host coalescing control block */
4651 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
4652 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
4653 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
4654 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
4655 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
4656 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
4657 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
4658 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
4659 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
4660 tr32(HOSTCC_STATS_BLK_NIC_ADDR));
4661 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
4662 tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
4664 /* Memory arbiter control block */
4665 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
4666 tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
4668 /* Buffer manager control block */
4669 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
4670 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
4671 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
4672 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
4673 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
4674 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
4675 tr32(BUFMGR_DMA_DESC_POOL_ADDR),
4676 tr32(BUFMGR_DMA_DESC_POOL_SIZE));
4678 /* Read DMA control block */
4679 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
4680 tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
4682 /* Write DMA control block */
4683 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
4684 tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
4686 /* DMA completion block */
4687 printk("DEBUG: DMAC_MODE[%08x]\n",
4688 tr32(DMAC_MODE));
4690 /* GRC block */
4691 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
4692 tr32(GRC_MODE), tr32(GRC_MISC_CFG));
4693 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
4694 tr32(GRC_LOCAL_CTRL));
4696 /* TG3_BDINFOs */
4697 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
4698 tr32(RCVDBDI_JUMBO_BD + 0x0),
4699 tr32(RCVDBDI_JUMBO_BD + 0x4),
4700 tr32(RCVDBDI_JUMBO_BD + 0x8),
4701 tr32(RCVDBDI_JUMBO_BD + 0xc));
4702 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
4703 tr32(RCVDBDI_STD_BD + 0x0),
4704 tr32(RCVDBDI_STD_BD + 0x4),
4705 tr32(RCVDBDI_STD_BD + 0x8),
4706 tr32(RCVDBDI_STD_BD + 0xc));
4707 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
4708 tr32(RCVDBDI_MINI_BD + 0x0),
4709 tr32(RCVDBDI_MINI_BD + 0x4),
4710 tr32(RCVDBDI_MINI_BD + 0x8),
4711 tr32(RCVDBDI_MINI_BD + 0xc));
4713 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
4714 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
4715 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
4716 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
4717 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
4718 val32, val32_2, val32_3, val32_4);
4720 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
4721 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
4722 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
4723 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
4724 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
4725 val32, val32_2, val32_3, val32_4);
4727 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
4728 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
4729 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
4730 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
4731 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
4732 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
4733 val32, val32_2, val32_3, val32_4, val32_5);
4735 /* SW status block */
4736 printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
4737 tp->hw_status->status,
4738 tp->hw_status->status_tag,
4739 tp->hw_status->rx_jumbo_consumer,
4740 tp->hw_status->rx_consumer,
4741 tp->hw_status->rx_mini_consumer,
4742 tp->hw_status->idx[0].rx_producer,
4743 tp->hw_status->idx[0].tx_consumer);
4745 /* SW statistics block */
4746 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
4747 ((u32 *)tp->hw_stats)[0],
4748 ((u32 *)tp->hw_stats)[1],
4749 ((u32 *)tp->hw_stats)[2],
4750 ((u32 *)tp->hw_stats)[3]);
4752 /* Mailboxes */
4753 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
4754 tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
4755 tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
4756 tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
4757 tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
4759 /* NIC side send descriptors. */
4760 for (i = 0; i < 6; i++) {
4761 unsigned long txd;
4763 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
4764 + (i * sizeof(struct tg3_tx_buffer_desc));
4765 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
4767 readl(txd + 0x0), readl(txd + 0x4),
4768 readl(txd + 0x8), readl(txd + 0xc));
4771 /* NIC side RX descriptors. */
4772 for (i = 0; i < 6; i++) {
4773 unsigned long rxd;
4775 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
4776 + (i * sizeof(struct tg3_rx_buffer_desc));
4777 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
4779 readl(rxd + 0x0), readl(rxd + 0x4),
4780 readl(rxd + 0x8), readl(rxd + 0xc));
4781 rxd += (4 * sizeof(u32));
4782 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
4784 readl(rxd + 0x0), readl(rxd + 0x4),
4785 readl(rxd + 0x8), readl(rxd + 0xc));
4788 for (i = 0; i < 6; i++) {
4789 unsigned long rxd;
4791 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
4792 + (i * sizeof(struct tg3_rx_buffer_desc));
4793 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
4795 readl(rxd + 0x0), readl(rxd + 0x4),
4796 readl(rxd + 0x8), readl(rxd + 0xc));
4797 rxd += (4 * sizeof(u32));
4798 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
4800 readl(rxd + 0x0), readl(rxd + 0x4),
4801 readl(rxd + 0x8), readl(rxd + 0xc));
4804 #endif
4806 static struct net_device_stats *tg3_get_stats(struct net_device *);
4808 static int tg3_close(struct net_device *dev)
4810 struct tg3 *tp = dev->priv;
4812 netif_stop_queue(dev);
4814 del_timer_sync(&tp->timer);
4816 spin_lock_irq(&tp->lock);
4817 spin_lock(&tp->tx_lock);
4818 #if 0
4819 tg3_dump_state(tp);
4820 #endif
4822 tg3_disable_ints(tp);
4824 tg3_halt(tp);
4825 tg3_free_rings(tp);
4826 tp->tg3_flags &=
4827 ~(TG3_FLAG_INIT_COMPLETE |
4828 TG3_FLAG_GOT_SERDES_FLOWCTL);
4829 netif_carrier_off(tp->dev);
4831 spin_unlock(&tp->tx_lock);
4832 spin_unlock_irq(&tp->lock);
4834 free_irq(dev->irq, dev);
4836 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
4837 sizeof(tp->net_stats_prev));
4839 tg3_free_consistent(tp);
4841 return 0;
4844 static inline unsigned long get_stat64(tg3_stat64_t *val)
4846 unsigned long ret;
4848 #if (BITS_PER_LONG == 32)
4849 ret = val->low;
4850 #else
4851 ret = ((u64)val->high << 32) | ((u64)val->low);
4852 #endif
4853 return ret;
4856 static unsigned long calc_crc_errors(struct tg3 *tp)
4858 struct tg3_hw_stats *hw_stats = tp->hw_stats;
4860 if (tp->phy_id != PHY_ID_SERDES &&
4861 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
4862 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
4863 unsigned long flags;
4864 u32 val;
4866 spin_lock_irqsave(&tp->lock, flags);
4867 tg3_readphy(tp, 0x1e, &val);
4868 tg3_writephy(tp, 0x1e, val | 0x8000);
4869 tg3_readphy(tp, 0x14, &val);
4870 spin_unlock_irqrestore(&tp->lock, flags);
4872 tp->phy_crc_errors += val;
4874 return tp->phy_crc_errors;
4877 return get_stat64(&hw_stats->rx_fcs_errors);
4880 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
4882 struct tg3 *tp = dev->priv;
4883 struct net_device_stats *stats = &tp->net_stats;
4884 struct net_device_stats *old_stats = &tp->net_stats_prev;
4885 struct tg3_hw_stats *hw_stats = tp->hw_stats;
4887 if (!hw_stats)
4888 return old_stats;
4890 stats->rx_packets = old_stats->rx_packets +
4891 get_stat64(&hw_stats->rx_ucast_packets) +
4892 get_stat64(&hw_stats->rx_mcast_packets) +
4893 get_stat64(&hw_stats->rx_bcast_packets);
4895 stats->tx_packets = old_stats->tx_packets +
4896 get_stat64(&hw_stats->COS_out_packets[0]);
4898 stats->rx_bytes = old_stats->rx_bytes +
4899 get_stat64(&hw_stats->rx_octets);
4900 stats->tx_bytes = old_stats->tx_bytes +
4901 get_stat64(&hw_stats->tx_octets);
4903 stats->rx_errors = old_stats->rx_errors +
4904 get_stat64(&hw_stats->rx_errors);
4905 stats->tx_errors = old_stats->tx_errors +
4906 get_stat64(&hw_stats->tx_errors) +
4907 get_stat64(&hw_stats->tx_mac_errors) +
4908 get_stat64(&hw_stats->tx_carrier_sense_errors) +
4909 get_stat64(&hw_stats->tx_discards);
4911 stats->multicast = old_stats->multicast +
4912 get_stat64(&hw_stats->rx_mcast_packets);
4913 stats->collisions = old_stats->collisions +
4914 get_stat64(&hw_stats->tx_collisions);
4916 stats->rx_length_errors = old_stats->rx_length_errors +
4917 get_stat64(&hw_stats->rx_frame_too_long_errors) +
4918 get_stat64(&hw_stats->rx_undersize_packets);
4920 stats->rx_over_errors = old_stats->rx_over_errors +
4921 get_stat64(&hw_stats->rxbds_empty);
4922 stats->rx_frame_errors = old_stats->rx_frame_errors +
4923 get_stat64(&hw_stats->rx_align_errors);
4924 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
4925 get_stat64(&hw_stats->tx_discards);
4926 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
4927 get_stat64(&hw_stats->tx_carrier_sense_errors);
4929 stats->rx_crc_errors = old_stats->rx_crc_errors +
4930 calc_crc_errors(tp);
4932 return stats;
4935 static inline u32 calc_crc(unsigned char *buf, int len)
4937 u32 reg;
4938 u32 tmp;
4939 int j, k;
4941 reg = 0xffffffff;
4943 for (j = 0; j < len; j++) {
4944 reg ^= buf[j];
4946 for (k = 0; k < 8; k++) {
4947 tmp = reg & 0x01;
4949 reg >>= 1;
4951 if (tmp) {
4952 reg ^= 0xedb88320;
4957 return ~reg;
4960 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
4962 /* accept or reject all multicast frames */
4963 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
4964 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
4965 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
4966 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
4969 static void __tg3_set_rx_mode(struct net_device *dev)
4971 struct tg3 *tp = dev->priv;
4972 u32 rx_mode;
4974 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
4975 RX_MODE_KEEP_VLAN_TAG);
4976 #if TG3_VLAN_TAG_USED
4977 if (!tp->vlgrp)
4978 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
4979 #else
4980 /* By definition, VLAN is disabled always in this
4981 * case.
4983 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
4984 #endif
4986 if (dev->flags & IFF_PROMISC) {
4987 /* Promiscuous mode. */
4988 rx_mode |= RX_MODE_PROMISC;
4989 } else if (dev->flags & IFF_ALLMULTI) {
4990 /* Accept all multicast. */
4991 tg3_set_multi (tp, 1);
4992 } else if (dev->mc_count < 1) {
4993 /* Reject all multicast. */
4994 tg3_set_multi (tp, 0);
4995 } else {
4996 /* Accept one or more multicast(s). */
4997 struct dev_mc_list *mclist;
4998 unsigned int i;
4999 u32 mc_filter[4] = { 0, };
5000 u32 regidx;
5001 u32 bit;
5002 u32 crc;
5004 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
5005 i++, mclist = mclist->next) {
5007 crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
5008 bit = ~crc & 0x7f;
5009 regidx = (bit & 0x60) >> 5;
5010 bit &= 0x1f;
5011 mc_filter[regidx] |= (1 << bit);
5014 tw32(MAC_HASH_REG_0, mc_filter[0]);
5015 tw32(MAC_HASH_REG_1, mc_filter[1]);
5016 tw32(MAC_HASH_REG_2, mc_filter[2]);
5017 tw32(MAC_HASH_REG_3, mc_filter[3]);
5020 if (rx_mode != tp->rx_mode) {
5021 tp->rx_mode = rx_mode;
5022 tw32(MAC_RX_MODE, rx_mode);
5023 tr32(MAC_RX_MODE);
5024 udelay(10);
5028 static void tg3_set_rx_mode(struct net_device *dev)
5030 struct tg3 *tp = dev->priv;
5032 spin_lock_irq(&tp->lock);
5033 __tg3_set_rx_mode(dev);
5034 spin_unlock_irq(&tp->lock);
5037 #define TG3_REGDUMP_LEN (32 * 1024)
5039 static u8 *tg3_get_regs(struct tg3 *tp)
5041 u8 *orig_p = kmalloc(TG3_REGDUMP_LEN, GFP_KERNEL);
5042 u8 *p;
5043 int i;
5045 if (orig_p == NULL)
5046 return NULL;
5048 memset(orig_p, 0, TG3_REGDUMP_LEN);
5050 spin_lock_irq(&tp->lock);
5051 spin_lock(&tp->tx_lock);
5053 #define __GET_REG32(reg) (*((u32 *)(p))++ = tr32(reg))
5054 #define GET_REG32_LOOP(base,len) \
5055 do { p = orig_p + (base); \
5056 for (i = 0; i < len; i += 4) \
5057 __GET_REG32((base) + i); \
5058 } while (0)
5059 #define GET_REG32_1(reg) \
5060 do { p = orig_p + (reg); \
5061 __GET_REG32((reg)); \
5062 } while (0)
5064 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
5065 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
5066 GET_REG32_LOOP(MAC_MODE, 0x4f0);
5067 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
5068 GET_REG32_1(SNDDATAC_MODE);
5069 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
5070 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
5071 GET_REG32_1(SNDBDC_MODE);
5072 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
5073 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
5074 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
5075 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
5076 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
5077 GET_REG32_1(RCVDCC_MODE);
5078 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
5079 GET_REG32_LOOP(RCVCC_MODE, 0x14);
5080 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
5081 GET_REG32_1(MBFREE_MODE);
5082 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
5083 GET_REG32_LOOP(MEMARB_MODE, 0x10);
5084 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
5085 GET_REG32_LOOP(RDMAC_MODE, 0x08);
5086 GET_REG32_LOOP(WDMAC_MODE, 0x08);
5087 GET_REG32_LOOP(RX_CPU_BASE, 0x280);
5088 GET_REG32_LOOP(TX_CPU_BASE, 0x280);
5089 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
5090 GET_REG32_LOOP(FTQ_RESET, 0x120);
5091 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
5092 GET_REG32_1(DMAC_MODE);
5093 GET_REG32_LOOP(GRC_MODE, 0x4c);
5094 GET_REG32_LOOP(NVRAM_CMD, 0x24);
5096 #undef __GET_REG32
5097 #undef GET_REG32_LOOP
5098 #undef GET_REG32_1
5100 spin_unlock(&tp->tx_lock);
5101 spin_unlock_irq(&tp->lock);
5103 return orig_p;
5106 static int tg3_ethtool_ioctl (struct net_device *dev, void *useraddr)
5108 struct tg3 *tp = dev->priv;
5109 struct pci_dev *pci_dev = tp->pdev;
5110 u32 ethcmd;
5112 if (copy_from_user (&ethcmd, useraddr, sizeof (ethcmd)))
5113 return -EFAULT;
5115 switch (ethcmd) {
5116 case ETHTOOL_GDRVINFO:{
5117 struct ethtool_drvinfo info = { ETHTOOL_GDRVINFO };
5118 strcpy (info.driver, DRV_MODULE_NAME);
5119 strcpy (info.version, DRV_MODULE_VERSION);
5120 memset(&info.fw_version, 0, sizeof(info.fw_version));
5121 strcpy (info.bus_info, pci_dev->slot_name);
5122 info.eedump_len = 0;
5123 info.regdump_len = TG3_REGDUMP_LEN;
5124 if (copy_to_user (useraddr, &info, sizeof (info)))
5125 return -EFAULT;
5126 return 0;
5129 case ETHTOOL_GSET: {
5130 struct ethtool_cmd cmd = { ETHTOOL_GSET };
5132 if (!(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) ||
5133 tp->link_config.phy_is_low_power)
5134 return -EAGAIN;
5135 cmd.supported = (SUPPORTED_Autoneg);
5137 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
5138 cmd.supported |= (SUPPORTED_1000baseT_Half |
5139 SUPPORTED_1000baseT_Full);
5141 if (tp->phy_id != PHY_ID_SERDES)
5142 cmd.supported |= (SUPPORTED_100baseT_Half |
5143 SUPPORTED_100baseT_Full |
5144 SUPPORTED_10baseT_Half |
5145 SUPPORTED_10baseT_Full |
5146 SUPPORTED_MII);
5147 else
5148 cmd.supported |= SUPPORTED_FIBRE;
5150 cmd.advertising = tp->link_config.advertising;
5151 cmd.speed = tp->link_config.active_speed;
5152 cmd.duplex = tp->link_config.active_duplex;
5153 cmd.port = 0;
5154 cmd.phy_address = PHY_ADDR;
5155 cmd.transceiver = 0;
5156 cmd.autoneg = tp->link_config.autoneg;
5157 cmd.maxtxpkt = 0;
5158 cmd.maxrxpkt = 0;
5159 if (copy_to_user(useraddr, &cmd, sizeof(cmd)))
5160 return -EFAULT;
5161 return 0;
5163 case ETHTOOL_SSET: {
5164 struct ethtool_cmd cmd;
5166 if (!(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) ||
5167 tp->link_config.phy_is_low_power)
5168 return -EAGAIN;
5170 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
5171 return -EFAULT;
5173 /* Fiber PHY only supports 1000 full/half */
5174 if (cmd.autoneg == AUTONEG_ENABLE) {
5175 if (tp->phy_id == PHY_ID_SERDES &&
5176 (cmd.advertising &
5177 (ADVERTISED_10baseT_Half |
5178 ADVERTISED_10baseT_Full |
5179 ADVERTISED_100baseT_Half |
5180 ADVERTISED_100baseT_Full)))
5181 return -EINVAL;
5182 if ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
5183 (cmd.advertising &
5184 (ADVERTISED_1000baseT_Half |
5185 ADVERTISED_1000baseT_Full)))
5186 return -EINVAL;
5187 } else {
5188 if (tp->phy_id == PHY_ID_SERDES &&
5189 (cmd.speed == SPEED_10 ||
5190 cmd.speed == SPEED_100))
5191 return -EINVAL;
5192 if ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
5193 (cmd.speed == SPEED_10 ||
5194 cmd.speed == SPEED_100))
5195 return -EINVAL;
5198 spin_lock_irq(&tp->lock);
5199 spin_lock(&tp->tx_lock);
5201 tp->link_config.autoneg = cmd.autoneg;
5202 if (cmd.autoneg == AUTONEG_ENABLE) {
5203 tp->link_config.advertising = cmd.advertising;
5204 tp->link_config.speed = SPEED_INVALID;
5205 tp->link_config.duplex = DUPLEX_INVALID;
5206 } else {
5207 tp->link_config.speed = cmd.speed;
5208 tp->link_config.duplex = cmd.duplex;
5211 tg3_setup_phy(tp);
5212 spin_unlock(&tp->tx_lock);
5213 spin_unlock_irq(&tp->lock);
5215 return 0;
5218 case ETHTOOL_GREGS: {
5219 struct ethtool_regs regs;
5220 u8 *regbuf;
5221 int ret;
5223 if (copy_from_user(&regs, useraddr, sizeof(regs)))
5224 return -EFAULT;
5225 if (regs.len > TG3_REGDUMP_LEN)
5226 regs.len = TG3_REGDUMP_LEN;
5227 regs.version = 0;
5228 if (copy_to_user(useraddr, &regs, sizeof(regs)))
5229 return -EFAULT;
5231 regbuf = tg3_get_regs(tp);
5232 if (!regbuf)
5233 return -ENOMEM;
5235 useraddr += offsetof(struct ethtool_regs, data);
5236 ret = 0;
5237 if (copy_to_user(useraddr, regbuf, regs.len))
5238 ret = -EFAULT;
5239 kfree(regbuf);
5240 return ret;
5242 case ETHTOOL_GWOL: {
5243 struct ethtool_wolinfo wol = { ETHTOOL_GWOL };
5245 wol.supported = WAKE_MAGIC;
5246 wol.wolopts = 0;
5247 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
5248 wol.wolopts = WAKE_MAGIC;
5249 memset(&wol.sopass, 0, sizeof(wol.sopass));
5250 if (copy_to_user(useraddr, &wol, sizeof(wol)))
5251 return -EFAULT;
5252 return 0;
5254 case ETHTOOL_SWOL: {
5255 struct ethtool_wolinfo wol;
5257 if (copy_from_user(&wol, useraddr, sizeof(wol)))
5258 return -EFAULT;
5259 if (wol.wolopts & ~WAKE_MAGIC)
5260 return -EINVAL;
5261 if ((wol.wolopts & WAKE_MAGIC) &&
5262 tp->phy_id == PHY_ID_SERDES &&
5263 !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
5264 return -EINVAL;
5266 spin_lock_irq(&tp->lock);
5267 if (wol.wolopts & WAKE_MAGIC)
5268 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
5269 else
5270 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
5271 spin_unlock_irq(&tp->lock);
5273 return 0;
5275 case ETHTOOL_GMSGLVL: {
5276 struct ethtool_value edata = { ETHTOOL_GMSGLVL };
5277 edata.data = tp->msg_enable;
5278 if (copy_to_user(useraddr, &edata, sizeof(edata)))
5279 return -EFAULT;
5280 return 0;
5282 case ETHTOOL_SMSGLVL: {
5283 struct ethtool_value edata;
5284 if (copy_from_user(&edata, useraddr, sizeof(edata)))
5285 return -EFAULT;
5286 tp->msg_enable = edata.data;
5287 return 0;
5289 case ETHTOOL_NWAY_RST: {
5290 u32 bmcr;
5291 int r;
5293 spin_lock_irq(&tp->lock);
5294 tg3_readphy(tp, MII_BMCR, &bmcr);
5295 tg3_readphy(tp, MII_BMCR, &bmcr);
5296 r = -EINVAL;
5297 if (bmcr & BMCR_ANENABLE) {
5298 tg3_writephy(tp, MII_BMCR,
5299 bmcr | BMCR_ANRESTART);
5300 r = 0;
5302 spin_unlock_irq(&tp->lock);
5304 return r;
5306 case ETHTOOL_GLINK: {
5307 struct ethtool_value edata = { ETHTOOL_GLINK };
5308 edata.data = netif_carrier_ok(tp->dev) ? 1 : 0;
5309 if (copy_to_user(useraddr, &edata, sizeof(edata)))
5310 return -EFAULT;
5311 return 0;
5313 case ETHTOOL_GRINGPARAM: {
5314 struct ethtool_ringparam ering = { ETHTOOL_GRINGPARAM };
5316 ering.rx_max_pending = TG3_RX_RING_SIZE - 1;
5317 ering.rx_mini_max_pending = 0;
5318 ering.rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
5320 ering.rx_pending = tp->rx_pending;
5321 ering.rx_mini_pending = 0;
5322 ering.rx_jumbo_pending = tp->rx_jumbo_pending;
5323 ering.tx_pending = tp->tx_pending;
5325 if (copy_to_user(useraddr, &ering, sizeof(ering)))
5326 return -EFAULT;
5327 return 0;
5329 case ETHTOOL_SRINGPARAM: {
5330 struct ethtool_ringparam ering;
5332 if (copy_from_user(&ering, useraddr, sizeof(ering)))
5333 return -EFAULT;
5335 if ((ering.rx_pending > TG3_RX_RING_SIZE - 1) ||
5336 (ering.rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
5337 (ering.tx_pending > TG3_TX_RING_SIZE - 1))
5338 return -EINVAL;
5340 tg3_netif_stop(tp);
5341 spin_lock_irq(&tp->lock);
5342 spin_lock(&tp->tx_lock);
5344 tp->rx_pending = ering.rx_pending;
5345 tp->rx_jumbo_pending = ering.rx_jumbo_pending;
5346 tp->tx_pending = ering.tx_pending;
5348 tg3_halt(tp);
5349 tg3_init_rings(tp);
5350 tg3_init_hw(tp);
5351 netif_wake_queue(tp->dev);
5352 spin_unlock(&tp->tx_lock);
5353 spin_unlock_irq(&tp->lock);
5354 tg3_netif_start(tp);
5356 return 0;
5358 case ETHTOOL_GPAUSEPARAM: {
5359 struct ethtool_pauseparam epause = { ETHTOOL_GPAUSEPARAM };
5361 epause.autoneg =
5362 (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
5363 epause.rx_pause =
5364 (tp->tg3_flags & TG3_FLAG_PAUSE_RX) != 0;
5365 epause.tx_pause =
5366 (tp->tg3_flags & TG3_FLAG_PAUSE_TX) != 0;
5367 if (copy_to_user(useraddr, &epause, sizeof(epause)))
5368 return -EFAULT;
5369 return 0;
5371 case ETHTOOL_SPAUSEPARAM: {
5372 struct ethtool_pauseparam epause;
5374 if (copy_from_user(&epause, useraddr, sizeof(epause)))
5375 return -EFAULT;
5377 tg3_netif_stop(tp);
5378 spin_lock_irq(&tp->lock);
5379 spin_lock(&tp->tx_lock);
5380 if (epause.autoneg)
5381 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
5382 else
5383 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
5384 if (epause.rx_pause)
5385 tp->tg3_flags |= TG3_FLAG_PAUSE_RX;
5386 else
5387 tp->tg3_flags &= ~TG3_FLAG_PAUSE_RX;
5388 if (epause.tx_pause)
5389 tp->tg3_flags |= TG3_FLAG_PAUSE_TX;
5390 else
5391 tp->tg3_flags &= ~TG3_FLAG_PAUSE_TX;
5392 tg3_halt(tp);
5393 tg3_init_rings(tp);
5394 tg3_init_hw(tp);
5395 spin_unlock(&tp->tx_lock);
5396 spin_unlock_irq(&tp->lock);
5397 tg3_netif_start(tp);
5399 return 0;
5401 case ETHTOOL_GRXCSUM: {
5402 struct ethtool_value edata = { ETHTOOL_GRXCSUM };
5404 edata.data =
5405 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
5406 if (copy_to_user(useraddr, &edata, sizeof(edata)))
5407 return -EFAULT;
5408 return 0;
5410 case ETHTOOL_SRXCSUM: {
5411 struct ethtool_value edata;
5413 if (copy_from_user(&edata, useraddr, sizeof(edata)))
5414 return -EFAULT;
5416 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
5417 if (edata.data != 0)
5418 return -EINVAL;
5419 return 0;
5422 spin_lock_irq(&tp->lock);
5423 if (edata.data)
5424 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
5425 else
5426 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
5427 spin_unlock_irq(&tp->lock);
5429 return 0;
5431 case ETHTOOL_GTXCSUM: {
5432 struct ethtool_value edata = { ETHTOOL_GTXCSUM };
5434 edata.data =
5435 (tp->dev->features & NETIF_F_IP_CSUM) != 0;
5436 if (copy_to_user(useraddr, &edata, sizeof(edata)))
5437 return -EFAULT;
5438 return 0;
5440 case ETHTOOL_STXCSUM: {
5441 struct ethtool_value edata;
5443 if (copy_from_user(&edata, useraddr, sizeof(edata)))
5444 return -EFAULT;
5446 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
5447 if (edata.data != 0)
5448 return -EINVAL;
5449 return 0;
5452 if (edata.data)
5453 tp->dev->features |= NETIF_F_IP_CSUM;
5454 else
5455 tp->dev->features &= ~NETIF_F_IP_CSUM;
5457 return 0;
5459 case ETHTOOL_GSG: {
5460 struct ethtool_value edata = { ETHTOOL_GSG };
5462 edata.data =
5463 (tp->dev->features & NETIF_F_SG) != 0;
5464 if (copy_to_user(useraddr, &edata, sizeof(edata)))
5465 return -EFAULT;
5466 return 0;
5468 case ETHTOOL_SSG: {
5469 struct ethtool_value edata;
5471 if (copy_from_user(&edata, useraddr, sizeof(edata)))
5472 return -EFAULT;
5474 if (edata.data)
5475 tp->dev->features |= NETIF_F_SG;
5476 else
5477 tp->dev->features &= ~NETIF_F_SG;
5479 return 0;
5483 return -EOPNOTSUPP;
5486 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5488 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&ifr->ifr_data;
5489 struct tg3 *tp = dev->priv;
5490 int err;
5492 switch(cmd) {
5493 case SIOCETHTOOL:
5494 return tg3_ethtool_ioctl(dev, (void *) ifr->ifr_data);
5495 case SIOCGMIIPHY:
5496 data->phy_id = PHY_ADDR;
5498 /* fallthru */
5499 case SIOCGMIIREG: {
5500 u32 mii_regval;
5502 spin_lock_irq(&tp->lock);
5503 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
5504 spin_unlock_irq(&tp->lock);
5506 data->val_out = mii_regval;
5508 return err;
5511 case SIOCSMIIREG:
5512 if (!capable(CAP_NET_ADMIN))
5513 return -EPERM;
5515 spin_lock_irq(&tp->lock);
5516 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
5517 spin_unlock_irq(&tp->lock);
5519 return err;
5521 default:
5522 /* do nothing */
5523 break;
5525 return -EOPNOTSUPP;
5528 #if TG3_VLAN_TAG_USED
5529 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
5531 struct tg3 *tp = dev->priv;
5533 spin_lock_irq(&tp->lock);
5534 spin_lock(&tp->tx_lock);
5536 tp->vlgrp = grp;
5538 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
5539 __tg3_set_rx_mode(dev);
5541 spin_unlock(&tp->tx_lock);
5542 spin_unlock_irq(&tp->lock);
5545 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
5547 struct tg3 *tp = dev->priv;
5549 spin_lock_irq(&tp->lock);
5550 spin_lock(&tp->tx_lock);
5551 if (tp->vlgrp)
5552 tp->vlgrp->vlan_devices[vid] = NULL;
5553 spin_unlock(&tp->tx_lock);
5554 spin_unlock_irq(&tp->lock);
5556 #endif
5558 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
5559 static void __devinit tg3_nvram_init(struct tg3 *tp)
5561 int j;
5563 tw32(GRC_EEPROM_ADDR,
5564 (EEPROM_ADDR_FSM_RESET |
5565 (EEPROM_DEFAULT_CLOCK_PERIOD <<
5566 EEPROM_ADDR_CLKPERD_SHIFT)));
5568 /* XXX schedule_timeout() ... */
5569 for (j = 0; j < 100; j++)
5570 udelay(10);
5572 /* Enable seeprom accesses. */
5573 tw32(GRC_LOCAL_CTRL,
5574 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
5575 tr32(GRC_LOCAL_CTRL);
5576 udelay(100);
5578 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
5579 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
5580 u32 nvcfg1 = tr32(NVRAM_CFG1);
5582 tp->tg3_flags |= TG3_FLAG_NVRAM;
5583 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
5584 if (nvcfg1 & NVRAM_CFG1_BUFFERED_MODE)
5585 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
5586 } else {
5587 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
5588 tw32(NVRAM_CFG1, nvcfg1);
5591 } else {
5592 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
5596 static int __devinit tg3_nvram_read_using_eeprom(struct tg3 *tp,
5597 u32 offset, u32 *val)
5599 u32 tmp;
5600 int i;
5602 if (offset > EEPROM_ADDR_ADDR_MASK ||
5603 (offset % 4) != 0)
5604 return -EINVAL;
5606 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
5607 EEPROM_ADDR_DEVID_MASK |
5608 EEPROM_ADDR_READ);
5609 tw32(GRC_EEPROM_ADDR,
5610 tmp |
5611 (0 << EEPROM_ADDR_DEVID_SHIFT) |
5612 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
5613 EEPROM_ADDR_ADDR_MASK) |
5614 EEPROM_ADDR_READ | EEPROM_ADDR_START);
5616 for (i = 0; i < 10000; i++) {
5617 tmp = tr32(GRC_EEPROM_ADDR);
5619 if (tmp & EEPROM_ADDR_COMPLETE)
5620 break;
5621 udelay(100);
5623 if (!(tmp & EEPROM_ADDR_COMPLETE))
5624 return -EBUSY;
5626 *val = tr32(GRC_EEPROM_DATA);
5627 return 0;
5630 static int __devinit tg3_nvram_read(struct tg3 *tp,
5631 u32 offset, u32 *val)
5633 int i, saw_done_clear;
5635 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
5636 return tg3_nvram_read_using_eeprom(tp, offset, val);
5638 if (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED)
5639 offset = ((offset / NVRAM_BUFFERED_PAGE_SIZE) <<
5640 NVRAM_BUFFERED_PAGE_POS) +
5641 (offset % NVRAM_BUFFERED_PAGE_SIZE);
5643 if (offset > NVRAM_ADDR_MSK)
5644 return -EINVAL;
5646 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
5647 for (i = 0; i < 1000; i++) {
5648 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
5649 break;
5650 udelay(20);
5653 tw32(NVRAM_ADDR, offset);
5654 tw32(NVRAM_CMD,
5655 NVRAM_CMD_RD | NVRAM_CMD_GO |
5656 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
5658 /* Wait for done bit to clear then set again. */
5659 saw_done_clear = 0;
5660 for (i = 0; i < 1000; i++) {
5661 udelay(10);
5662 if (!saw_done_clear &&
5663 !(tr32(NVRAM_CMD) & NVRAM_CMD_DONE))
5664 saw_done_clear = 1;
5665 else if (saw_done_clear &&
5666 (tr32(NVRAM_CMD) & NVRAM_CMD_DONE))
5667 break;
5669 if (i >= 1000) {
5670 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
5671 return -EBUSY;
5674 *val = swab32(tr32(NVRAM_RDDATA));
5675 tw32(NVRAM_SWARB, 0x20);
5677 return 0;
5680 struct subsys_tbl_ent {
5681 u16 subsys_vendor, subsys_devid;
5682 u32 phy_id;
5685 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
5686 /* Broadcom boards. */
5687 { 0x14e4, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
5688 { 0x14e4, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
5689 { 0x14e4, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
5690 { 0x14e4, 0x0003, PHY_ID_SERDES }, /* BCM95700A9 */
5691 { 0x14e4, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
5692 { 0x14e4, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
5693 { 0x14e4, 0x0007, PHY_ID_SERDES }, /* BCM95701A7 */
5694 { 0x14e4, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
5695 { 0x14e4, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
5696 { 0x14e4, 0x0009, PHY_ID_BCM5701 }, /* BCM95703Ax1 */
5697 { 0x14e4, 0x8009, PHY_ID_BCM5701 }, /* BCM95703Ax2 */
5699 /* 3com boards. */
5700 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
5701 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
5702 /* { PCI_VENDOR_ID_3COM, 0x1002, PHY_ID_XXX }, 3C996CT */
5703 /* { PCI_VENDOR_ID_3COM, 0x1003, PHY_ID_XXX }, 3C997T */
5704 { PCI_VENDOR_ID_3COM, 0x1004, PHY_ID_SERDES }, /* 3C996SX */
5705 /* { PCI_VENDOR_ID_3COM, 0x1005, PHY_ID_XXX }, 3C997SZ */
5706 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
5707 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
5709 /* DELL boards. */
5710 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
5711 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
5712 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
5713 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
5715 /* Compaq boards. */
5716 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
5717 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
5718 { PCI_VENDOR_ID_COMPAQ, 0x007d, PHY_ID_SERDES }, /* CHANGELING */
5719 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
5720 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 } /* NC7780_2 */
5723 static int __devinit tg3_phy_probe(struct tg3 *tp)
5725 u32 eeprom_phy_id, hw_phy_id_1, hw_phy_id_2;
5726 u32 hw_phy_id, hw_phy_id_masked;
5727 enum phy_led_mode eeprom_led_mode;
5728 u32 val;
5729 int i, eeprom_signature_found, err;
5731 tp->phy_id = PHY_ID_INVALID;
5732 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
5733 if ((subsys_id_to_phy_id[i].subsys_vendor ==
5734 tp->pdev->subsystem_vendor) &&
5735 (subsys_id_to_phy_id[i].subsys_devid ==
5736 tp->pdev->subsystem_device)) {
5737 tp->phy_id = subsys_id_to_phy_id[i].phy_id;
5738 break;
5742 eeprom_phy_id = PHY_ID_INVALID;
5743 eeprom_led_mode = led_mode_auto;
5744 eeprom_signature_found = 0;
5745 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
5746 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
5747 u32 nic_cfg;
5749 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
5751 eeprom_signature_found = 1;
5753 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
5754 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER) {
5755 eeprom_phy_id = PHY_ID_SERDES;
5756 } else {
5757 u32 nic_phy_id;
5759 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
5760 if (nic_phy_id != 0) {
5761 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
5762 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
5764 eeprom_phy_id = (id1 >> 16) << 10;
5765 eeprom_phy_id |= (id2 & 0xfc00) << 16;
5766 eeprom_phy_id |= (id2 & 0x03ff) << 0;
5770 switch (nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK) {
5771 case NIC_SRAM_DATA_CFG_LED_TRIPLE_SPD:
5772 eeprom_led_mode = led_mode_three_link;
5773 break;
5775 case NIC_SRAM_DATA_CFG_LED_LINK_SPD:
5776 eeprom_led_mode = led_mode_link10;
5777 break;
5779 default:
5780 eeprom_led_mode = led_mode_auto;
5781 break;
5783 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1 ||
5784 tp->pci_chip_rev_id == CHIPREV_ID_5703_A2) &&
5785 (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP))
5786 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
5788 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE)
5789 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
5790 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
5791 tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
5794 /* Now read the physical PHY_ID from the chip and verify
5795 * that it is sane. If it doesn't look good, we fall back
5796 * to either the hard-coded table based PHY_ID and failing
5797 * that the value found in the eeprom area.
5799 err = tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
5800 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
5802 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
5803 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
5804 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
5806 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
5808 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
5809 tp->phy_id = hw_phy_id;
5810 } else {
5811 /* phy_id currently holds the value found in the
5812 * subsys_id_to_phy_id[] table or PHY_ID_INVALID
5813 * if a match was not found there.
5815 if (tp->phy_id == PHY_ID_INVALID) {
5816 if (!eeprom_signature_found ||
5817 !KNOWN_PHY_ID(eeprom_phy_id & PHY_ID_MASK))
5818 return -ENODEV;
5819 tp->phy_id = eeprom_phy_id;
5823 err = tg3_phy_reset(tp, 1);
5824 if (err)
5825 return err;
5827 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
5828 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
5829 u32 mii_tg3_ctrl;
5831 /* These chips, when reset, only advertise 10Mb
5832 * capabilities. Fix that.
5834 err = tg3_writephy(tp, MII_ADVERTISE,
5835 (ADVERTISE_CSMA |
5836 ADVERTISE_PAUSE_CAP |
5837 ADVERTISE_10HALF |
5838 ADVERTISE_10FULL |
5839 ADVERTISE_100HALF |
5840 ADVERTISE_100FULL));
5841 mii_tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
5842 MII_TG3_CTRL_ADV_1000_FULL |
5843 MII_TG3_CTRL_AS_MASTER |
5844 MII_TG3_CTRL_ENABLE_AS_MASTER);
5845 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
5846 mii_tg3_ctrl = 0;
5848 err |= tg3_writephy(tp, MII_TG3_CTRL, mii_tg3_ctrl);
5849 err |= tg3_writephy(tp, MII_BMCR,
5850 (BMCR_ANRESTART | BMCR_ANENABLE));
5853 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
5854 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
5855 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
5856 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
5859 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
5860 (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)) {
5861 tg3_writephy(tp, 0x1c, 0x8d68);
5862 tg3_writephy(tp, 0x1c, 0x8d68);
5865 /* Enable Ethernet@WireSpeed */
5866 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007);
5867 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
5868 tg3_writephy(tp, MII_TG3_AUX_CTRL, (val | (1 << 15) | (1 << 4)));
5870 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
5871 err = tg3_init_5401phy_dsp(tp);
5874 /* Determine the PHY led mode. */
5875 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL) {
5876 tp->led_mode = led_mode_link10;
5877 } else {
5878 tp->led_mode = led_mode_three_link;
5879 if (eeprom_signature_found &&
5880 eeprom_led_mode != led_mode_auto)
5881 tp->led_mode = eeprom_led_mode;
5884 if (tp->phy_id == PHY_ID_SERDES)
5885 tp->link_config.advertising =
5886 (ADVERTISED_1000baseT_Half |
5887 ADVERTISED_1000baseT_Full |
5888 ADVERTISED_Autoneg |
5889 ADVERTISED_FIBRE);
5890 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
5891 tp->link_config.advertising &=
5892 ~(ADVERTISED_1000baseT_Half |
5893 ADVERTISED_1000baseT_Full);
5895 return err;
5898 static void __devinit tg3_read_partno(struct tg3 *tp)
5900 unsigned char vpd_data[256];
5901 int i;
5903 for (i = 0; i < 256; i += 4) {
5904 u32 tmp;
5906 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
5907 goto out_not_found;
5909 vpd_data[i + 0] = ((tmp >> 0) & 0xff);
5910 vpd_data[i + 1] = ((tmp >> 8) & 0xff);
5911 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
5912 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
5915 /* Now parse and find the part number. */
5916 for (i = 0; i < 256; ) {
5917 unsigned char val = vpd_data[i];
5918 int block_end;
5920 if (val == 0x82 || val == 0x91) {
5921 i = (i + 3 +
5922 (vpd_data[i + 1] +
5923 (vpd_data[i + 2] << 8)));
5924 continue;
5927 if (val != 0x90)
5928 goto out_not_found;
5930 block_end = (i + 3 +
5931 (vpd_data[i + 1] +
5932 (vpd_data[i + 2] << 8)));
5933 i += 3;
5934 while (i < block_end) {
5935 if (vpd_data[i + 0] == 'P' &&
5936 vpd_data[i + 1] == 'N') {
5937 int partno_len = vpd_data[i + 2];
5939 if (partno_len > 24)
5940 goto out_not_found;
5942 memcpy(tp->board_part_number,
5943 &vpd_data[i + 3],
5944 partno_len);
5946 /* Success. */
5947 return;
5951 /* Part number not found. */
5952 goto out_not_found;
5955 out_not_found:
5956 strcpy(tp->board_part_number, "none");
5959 static int __devinit tg3_get_invariants(struct tg3 *tp)
5961 u32 misc_ctrl_reg;
5962 u32 cacheline_sz_reg;
5963 u32 pci_state_reg, grc_misc_cfg;
5964 u16 pci_cmd;
5965 int err;
5967 /* If we have an AMD 762 or Intel ICH/ICH0 chipset, write
5968 * reordering to the mailbox registers done by the host
5969 * controller can cause major troubles. We read back from
5970 * every mailbox register write to force the writes to be
5971 * posted to the chip in order.
5973 if (pci_find_device(PCI_VENDOR_ID_INTEL,
5974 PCI_DEVICE_ID_INTEL_82801AA_8, NULL) ||
5975 pci_find_device(PCI_VENDOR_ID_INTEL,
5976 PCI_DEVICE_ID_INTEL_82801AB_8, NULL) ||
5977 pci_find_device(PCI_VENDOR_ID_AMD,
5978 PCI_DEVICE_ID_AMD_FE_GATE_700C, NULL))
5979 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
5981 /* Force memory write invalidate off. If we leave it on,
5982 * then on 5700_BX chips we have to enable a workaround.
5983 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
5984 * to match the cacheline size. The Broadcom driver have this
5985 * workaround but turns MWI off all the times so never uses
5986 * it. This seems to suggest that the workaround is insufficient.
5988 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
5989 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
5990 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
5992 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
5993 * has the register indirect write enable bit set before
5994 * we try to access any of the MMIO registers. It is also
5995 * critical that the PCI-X hw workaround situation is decided
5996 * before that as well.
5998 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
5999 &misc_ctrl_reg);
6001 tp->pci_chip_rev_id = (misc_ctrl_reg >>
6002 MISC_HOST_CTRL_CHIPREV_SHIFT);
6004 /* Initialize misc host control in PCI block. */
6005 tp->misc_host_ctrl |= (misc_ctrl_reg &
6006 MISC_HOST_CTRL_CHIPREV);
6007 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
6008 tp->misc_host_ctrl);
6010 pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
6011 &cacheline_sz_reg);
6013 tp->pci_cacheline_sz = (cacheline_sz_reg >> 0) & 0xff;
6014 tp->pci_lat_timer = (cacheline_sz_reg >> 8) & 0xff;
6015 tp->pci_hdr_type = (cacheline_sz_reg >> 16) & 0xff;
6016 tp->pci_bist = (cacheline_sz_reg >> 24) & 0xff;
6018 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
6019 tp->pci_lat_timer < 64) {
6020 tp->pci_lat_timer = 64;
6022 cacheline_sz_reg = ((tp->pci_cacheline_sz & 0xff) << 0);
6023 cacheline_sz_reg |= ((tp->pci_lat_timer & 0xff) << 8);
6024 cacheline_sz_reg |= ((tp->pci_hdr_type & 0xff) << 16);
6025 cacheline_sz_reg |= ((tp->pci_bist & 0xff) << 24);
6027 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
6028 cacheline_sz_reg);
6031 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
6032 &pci_state_reg);
6034 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
6035 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
6037 /* If this is a 5700 BX chipset, and we are in PCI-X
6038 * mode, enable register write workaround.
6040 * The workaround is to use indirect register accesses
6041 * for all chip writes not to mailbox registers.
6043 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
6044 u32 pm_reg;
6045 u16 pci_cmd;
6047 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
6049 /* The chip can have it's power management PCI config
6050 * space registers clobbered due to this bug.
6051 * So explicitly force the chip into D0 here.
6053 pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
6054 &pm_reg);
6055 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
6056 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
6057 pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
6058 pm_reg);
6060 /* Also, force SERR#/PERR# in PCI command. */
6061 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
6062 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
6063 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
6067 /* Back to back register writes can cause problems on this chip,
6068 * the workaround is to read back all reg writes except those to
6069 * mailbox regs. See tg3_write_indirect_reg32().
6071 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
6072 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
6074 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
6075 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
6076 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
6077 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
6079 /* Chip-specific fixup from Broadcom driver */
6080 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
6081 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
6082 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
6083 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
6086 /* Force the chip into D0. */
6087 err = tg3_set_power_state(tp, 0);
6088 if (err) {
6089 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
6090 tp->pdev->slot_name);
6091 return err;
6094 /* 5700 B0 chips do not support checksumming correctly due
6095 * to hardware bugs.
6097 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
6098 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
6100 /* Pseudo-header checksum is done by hardware logic and not
6101 * the offload processers, so make the chip do the pseudo-
6102 * header checksums on receive. For transmit it is more
6103 * convenient to do the pseudo-header checksum in software
6104 * as Linux does that on transmit for us in all cases.
6106 tp->tg3_flags |= TG3_FLAG_NO_TX_PSEUDO_CSUM;
6107 tp->tg3_flags &= ~TG3_FLAG_NO_RX_PSEUDO_CSUM;
6109 /* Derive initial jumbo mode from MTU assigned in
6110 * ether_setup() via the alloc_etherdev() call
6112 if (tp->dev->mtu > ETH_DATA_LEN)
6113 tp->tg3_flags |= TG3_FLAG_JUMBO_ENABLE;
6115 /* Determine WakeOnLan speed to use. */
6116 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
6117 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
6118 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
6119 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
6120 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
6121 } else {
6122 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
6125 /* Only 5701 and later support tagged irq status mode.
6127 * However, since we are using NAPI avoid tagged irq status
6128 * because the interrupt condition is more difficult to
6129 * fully clear in that mode.
6131 tp->coalesce_mode = 0;
6133 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
6134 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
6135 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
6137 /* Initialize MAC MI mode, polling disabled. */
6138 tw32(MAC_MI_MODE, tp->mi_mode);
6139 tr32(MAC_MI_MODE);
6140 udelay(40);
6142 /* Initialize data/descriptor byte/word swapping. */
6143 tw32(GRC_MODE, tp->grc_mode);
6145 tg3_switch_clocks(tp);
6147 /* Clear this out for sanity. */
6148 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6150 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
6151 &pci_state_reg);
6152 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
6153 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
6154 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
6156 if (chiprevid == CHIPREV_ID_5701_A0 ||
6157 chiprevid == CHIPREV_ID_5701_B0 ||
6158 chiprevid == CHIPREV_ID_5701_B2 ||
6159 chiprevid == CHIPREV_ID_5701_B5) {
6160 unsigned long sram_base;
6162 /* Write some dummy words into the SRAM status block
6163 * area, see if it reads back correctly. If the return
6164 * value is bad, force enable the PCIX workaround.
6166 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
6168 writel(0x00000000, sram_base);
6169 writel(0x00000000, sram_base + 4);
6170 writel(0xffffffff, sram_base + 4);
6171 if (readl(sram_base) != 0x00000000)
6172 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
6176 udelay(50);
6177 tg3_nvram_init(tp);
6179 /* Determine if TX descriptors will reside in
6180 * main memory or in the chip SRAM.
6182 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
6183 tp->tg3_flags |= TG3_FLAG_HOST_TXDS;
6185 grc_misc_cfg = tr32(GRC_MISC_CFG);
6186 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
6188 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
6189 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
6190 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
6191 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
6194 /* this one is limited to 10/100 only */
6195 if (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5702FE)
6196 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
6198 err = tg3_phy_probe(tp);
6199 if (err) {
6200 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
6201 tp->pdev->slot_name, err);
6202 /* ... but do not return immediately ... */
6205 tg3_read_partno(tp);
6207 if (tp->phy_id == PHY_ID_SERDES) {
6208 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
6210 /* And override led_mode in case Dell ever makes
6211 * a fibre board.
6213 tp->led_mode = led_mode_three_link;
6214 } else {
6215 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
6216 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
6217 else
6218 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
6221 /* 5700 {AX,BX} chips have a broken status block link
6222 * change bit implementation, so we must use the
6223 * status register in those cases.
6225 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
6226 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
6227 else
6228 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
6230 /* The led_mode is set during tg3_phy_probe, here we might
6231 * have to force the link status polling mechanism based
6232 * upon subsystem IDs.
6234 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
6235 tp->phy_id != PHY_ID_SERDES) {
6236 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
6237 TG3_FLAG_USE_LINKCHG_REG);
6240 /* For all SERDES we poll the MAC status register. */
6241 if (tp->phy_id == PHY_ID_SERDES)
6242 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
6243 else
6244 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
6246 /* 5700 BX chips need to have their TX producer index mailboxes
6247 * written twice to workaround a bug.
6249 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
6250 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
6251 else
6252 tp->tg3_flags &= ~TG3_FLAG_TXD_MBOX_HWBUG;
6254 /* 5700 chips can get confused if TX buffers straddle the
6255 * 4GB address boundary in some cases.
6257 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
6258 tp->dev->hard_start_xmit = tg3_start_xmit_4gbug;
6259 else
6260 tp->dev->hard_start_xmit = tg3_start_xmit;
6262 tp->rx_offset = 2;
6263 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
6264 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
6265 tp->rx_offset = 0;
6267 /* By default, disable wake-on-lan. User can change this
6268 * using ETHTOOL_SWOL.
6270 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
6272 return err;
6275 static int __devinit tg3_get_device_address(struct tg3 *tp)
6277 struct net_device *dev = tp->dev;
6278 u32 hi, lo, mac_offset;
6280 if (PCI_FUNC(tp->pdev->devfn) == 0)
6281 mac_offset = 0x7c;
6282 else
6283 mac_offset = 0xcc;
6285 /* First try to get it from MAC address mailbox. */
6286 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
6287 if ((hi >> 16) == 0x484b) {
6288 dev->dev_addr[0] = (hi >> 8) & 0xff;
6289 dev->dev_addr[1] = (hi >> 0) & 0xff;
6291 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
6292 dev->dev_addr[2] = (lo >> 24) & 0xff;
6293 dev->dev_addr[3] = (lo >> 16) & 0xff;
6294 dev->dev_addr[4] = (lo >> 8) & 0xff;
6295 dev->dev_addr[5] = (lo >> 0) & 0xff;
6297 /* Next, try NVRAM. */
6298 else if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
6299 !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
6300 dev->dev_addr[0] = ((hi >> 16) & 0xff);
6301 dev->dev_addr[1] = ((hi >> 24) & 0xff);
6302 dev->dev_addr[2] = ((lo >> 0) & 0xff);
6303 dev->dev_addr[3] = ((lo >> 8) & 0xff);
6304 dev->dev_addr[4] = ((lo >> 16) & 0xff);
6305 dev->dev_addr[5] = ((lo >> 24) & 0xff);
6307 /* Finally just fetch it out of the MAC control regs. */
6308 else {
6309 hi = tr32(MAC_ADDR_0_HIGH);
6310 lo = tr32(MAC_ADDR_0_LOW);
6312 dev->dev_addr[5] = lo & 0xff;
6313 dev->dev_addr[4] = (lo >> 8) & 0xff;
6314 dev->dev_addr[3] = (lo >> 16) & 0xff;
6315 dev->dev_addr[2] = (lo >> 24) & 0xff;
6316 dev->dev_addr[1] = hi & 0xff;
6317 dev->dev_addr[0] = (hi >> 8) & 0xff;
6320 if (!is_valid_ether_addr(&dev->dev_addr[0]))
6321 return -EINVAL;
6323 return 0;
6326 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
6328 struct tg3_internal_buffer_desc test_desc;
6329 u32 sram_dma_descs;
6330 int i, ret;
6332 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
6334 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
6335 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
6336 tw32(RDMAC_STATUS, 0);
6337 tw32(WDMAC_STATUS, 0);
6339 tw32(BUFMGR_MODE, 0);
6340 tw32(FTQ_RESET, 0);
6342 test_desc.addr_hi = ((u64) buf_dma) >> 32;
6343 test_desc.addr_lo = buf_dma & 0xffffffff;
6344 test_desc.nic_mbuf = 0x00002100;
6345 test_desc.len = size;
6346 if (to_device) {
6347 test_desc.cqid_sqid = (13 << 8) | 2;
6348 tw32(RDMAC_MODE, RDMAC_MODE_RESET);
6349 tr32(RDMAC_MODE);
6350 udelay(40);
6352 tw32(RDMAC_MODE, RDMAC_MODE_ENABLE);
6353 tr32(RDMAC_MODE);
6354 udelay(40);
6355 } else {
6356 test_desc.cqid_sqid = (16 << 8) | 7;
6357 tw32(WDMAC_MODE, WDMAC_MODE_RESET);
6358 tr32(WDMAC_MODE);
6359 udelay(40);
6361 tw32(WDMAC_MODE, WDMAC_MODE_ENABLE);
6362 tr32(WDMAC_MODE);
6363 udelay(40);
6365 test_desc.flags = 0x00000004;
6367 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
6368 u32 val;
6370 val = *(((u32 *)&test_desc) + i);
6371 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
6372 sram_dma_descs + (i * sizeof(u32)));
6373 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
6375 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
6377 if (to_device) {
6378 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
6379 } else {
6380 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
6383 ret = -ENODEV;
6384 for (i = 0; i < 40; i++) {
6385 u32 val;
6387 if (to_device)
6388 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
6389 else
6390 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
6391 if ((val & 0xffff) == sram_dma_descs) {
6392 ret = 0;
6393 break;
6396 udelay(100);
6399 return ret;
6402 #define TEST_BUFFER_SIZE 0x400
6404 static int __devinit tg3_test_dma(struct tg3 *tp)
6406 dma_addr_t buf_dma;
6407 u32 *buf;
6408 int ret;
6410 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
6411 if (!buf) {
6412 ret = -ENOMEM;
6413 goto out_nofree;
6416 tw32(TG3PCI_CLOCK_CTRL, 0);
6418 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) == 0) {
6419 tp->dma_rwctrl =
6420 (0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
6421 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT) |
6422 (0x7 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
6423 (0x7 << DMA_RWCTRL_READ_WATER_SHIFT) |
6424 (0x0f << DMA_RWCTRL_MIN_DMA_SHIFT);
6425 /* XXX 5705 note: set MIN_DMA to zero here */
6426 } else {
6427 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
6428 tp->dma_rwctrl =
6429 (0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
6430 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT) |
6431 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
6432 (0x7 << DMA_RWCTRL_READ_WATER_SHIFT) |
6433 (0x00 << DMA_RWCTRL_MIN_DMA_SHIFT);
6434 else
6435 tp->dma_rwctrl =
6436 (0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
6437 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT) |
6438 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
6439 (0x3 << DMA_RWCTRL_READ_WATER_SHIFT) |
6440 (0x0f << DMA_RWCTRL_MIN_DMA_SHIFT);
6442 /* Wheee, some more chip bugs... */
6443 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
6444 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6445 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
6447 if (ccval == 0x6 || ccval == 0x7)
6448 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
6452 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
6453 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
6454 tp->dma_rwctrl &= ~(DMA_RWCTRL_MIN_DMA
6455 << DMA_RWCTRL_MIN_DMA_SHIFT);
6457 /* We don't do this on x86 because it seems to hurt performace.
6458 * It does help things on other platforms though.
6460 #ifndef CONFIG_X86
6462 u8 byte;
6463 int cacheline_size;
6464 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
6466 if (byte == 0)
6467 cacheline_size = 1024;
6468 else
6469 cacheline_size = (int) byte * 4;
6471 tp->dma_rwctrl &= ~(DMA_RWCTRL_READ_BNDRY_MASK |
6472 DMA_RWCTRL_WRITE_BNDRY_MASK);
6474 switch (cacheline_size) {
6475 case 16:
6476 tp->dma_rwctrl |=
6477 (DMA_RWCTRL_READ_BNDRY_16 |
6478 DMA_RWCTRL_WRITE_BNDRY_16);
6479 break;
6481 case 32:
6482 tp->dma_rwctrl |=
6483 (DMA_RWCTRL_READ_BNDRY_32 |
6484 DMA_RWCTRL_WRITE_BNDRY_32);
6485 break;
6487 case 64:
6488 tp->dma_rwctrl |=
6489 (DMA_RWCTRL_READ_BNDRY_64 |
6490 DMA_RWCTRL_WRITE_BNDRY_64);
6491 break;
6493 case 128:
6494 tp->dma_rwctrl |=
6495 (DMA_RWCTRL_READ_BNDRY_128 |
6496 DMA_RWCTRL_WRITE_BNDRY_128);
6497 break;
6499 case 256:
6500 tp->dma_rwctrl |=
6501 (DMA_RWCTRL_READ_BNDRY_256 |
6502 DMA_RWCTRL_WRITE_BNDRY_256);
6503 break;
6505 case 512:
6506 tp->dma_rwctrl |=
6507 (DMA_RWCTRL_READ_BNDRY_512 |
6508 DMA_RWCTRL_WRITE_BNDRY_512);
6509 break;
6511 case 1024:
6512 tp->dma_rwctrl |=
6513 (DMA_RWCTRL_READ_BNDRY_1024 |
6514 DMA_RWCTRL_WRITE_BNDRY_1024);
6515 break;
6518 #endif
6520 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
6521 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
6522 /* Remove this if it causes problems for some boards. */
6523 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
6526 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
6528 ret = 0;
6529 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
6530 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6531 goto out;
6533 while (1) {
6534 u32 *p, i;
6536 p = buf;
6537 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
6538 p[i] = i;
6540 /* Send the buffer to the chip. */
6541 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
6542 if (ret)
6543 break;
6545 p = buf;
6546 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
6547 p[i] = 0;
6549 /* Now read it back. */
6550 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
6551 if (ret)
6552 break;
6554 /* Verify it. */
6555 p = buf;
6556 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
6557 if (p[i] == i)
6558 continue;
6560 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) ==
6561 DMA_RWCTRL_WRITE_BNDRY_DISAB) {
6562 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
6563 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
6564 break;
6565 } else {
6566 ret = -ENODEV;
6567 goto out;
6571 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
6572 /* Success. */
6573 ret = 0;
6574 break;
6578 out:
6579 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
6580 out_nofree:
6581 return ret;
6584 static void __devinit tg3_init_link_config(struct tg3 *tp)
6586 tp->link_config.advertising =
6587 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
6588 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
6589 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
6590 ADVERTISED_Autoneg | ADVERTISED_MII);
6591 tp->link_config.speed = SPEED_INVALID;
6592 tp->link_config.duplex = DUPLEX_INVALID;
6593 tp->link_config.autoneg = AUTONEG_ENABLE;
6594 netif_carrier_off(tp->dev);
6595 tp->link_config.active_speed = SPEED_INVALID;
6596 tp->link_config.active_duplex = DUPLEX_INVALID;
6597 tp->link_config.phy_is_low_power = 0;
6598 tp->link_config.orig_speed = SPEED_INVALID;
6599 tp->link_config.orig_duplex = DUPLEX_INVALID;
6600 tp->link_config.orig_autoneg = AUTONEG_INVALID;
6603 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
6605 tp->bufmgr_config.mbuf_read_dma_low_water =
6606 DEFAULT_MB_RDMA_LOW_WATER;
6607 tp->bufmgr_config.mbuf_mac_rx_low_water =
6608 DEFAULT_MB_MACRX_LOW_WATER;
6609 tp->bufmgr_config.mbuf_high_water =
6610 DEFAULT_MB_HIGH_WATER;
6612 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
6613 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
6614 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
6615 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
6616 tp->bufmgr_config.mbuf_high_water_jumbo =
6617 DEFAULT_MB_HIGH_WATER_JUMBO;
6619 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
6620 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
6623 static char * __devinit tg3_phy_string(struct tg3 *tp)
6625 switch (tp->phy_id & PHY_ID_MASK) {
6626 case PHY_ID_BCM5400: return "5400";
6627 case PHY_ID_BCM5401: return "5401";
6628 case PHY_ID_BCM5411: return "5411";
6629 case PHY_ID_BCM5701: return "5701";
6630 case PHY_ID_BCM5703: return "5703";
6631 case PHY_ID_BCM5704: return "5704";
6632 case PHY_ID_BCM8002: return "8002";
6633 case PHY_ID_SERDES: return "serdes";
6634 default: return "unknown";
6638 static int __devinit tg3_init_one(struct pci_dev *pdev,
6639 const struct pci_device_id *ent)
6641 static int tg3_version_printed = 0;
6642 unsigned long tg3reg_base, tg3reg_len;
6643 struct net_device *dev;
6644 struct tg3 *tp;
6645 int i, err, pci_using_dac, pm_cap;
6647 if (tg3_version_printed++ == 0)
6648 printk(KERN_INFO "%s", version);
6650 err = pci_enable_device(pdev);
6651 if (err) {
6652 printk(KERN_ERR PFX "Cannot enable PCI device, "
6653 "aborting.\n");
6654 return err;
6657 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
6658 printk(KERN_ERR PFX "Cannot find proper PCI device "
6659 "base address, aborting.\n");
6660 err = -ENODEV;
6661 goto err_out_disable_pdev;
6664 err = pci_request_regions(pdev, DRV_MODULE_NAME);
6665 if (err) {
6666 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
6667 "aborting.\n");
6668 goto err_out_disable_pdev;
6671 pci_set_master(pdev);
6673 /* Find power-management capability. */
6674 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6675 if (pm_cap == 0) {
6676 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
6677 "aborting.\n");
6678 goto err_out_free_res;
6681 /* Configure DMA attributes. */
6682 if (!pci_set_dma_mask(pdev, (u64) 0xffffffffffffffffULL)) {
6683 pci_using_dac = 1;
6684 if (pci_set_consistent_dma_mask(pdev,
6685 (u64) 0xffffffffffffffff)) {
6686 printk(KERN_ERR PFX "Unable to obtain 64 bit DMA "
6687 "for consistent allocations\n");
6688 goto err_out_free_res;
6690 } else {
6691 err = pci_set_dma_mask(pdev, (u64) 0xffffffff);
6692 if (err) {
6693 printk(KERN_ERR PFX "No usable DMA configuration, "
6694 "aborting.\n");
6695 goto err_out_free_res;
6697 pci_using_dac = 0;
6700 tg3reg_base = pci_resource_start(pdev, 0);
6701 tg3reg_len = pci_resource_len(pdev, 0);
6703 dev = alloc_etherdev(sizeof(*tp));
6704 if (!dev) {
6705 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
6706 err = -ENOMEM;
6707 goto err_out_free_res;
6710 SET_MODULE_OWNER(dev);
6711 SET_NETDEV_DEV(dev, &pdev->dev);
6713 if (pci_using_dac)
6714 dev->features |= NETIF_F_HIGHDMA;
6715 #if TG3_VLAN_TAG_USED
6716 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6717 dev->vlan_rx_register = tg3_vlan_rx_register;
6718 dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
6719 #endif
6721 tp = dev->priv;
6722 tp->pdev = pdev;
6723 tp->dev = dev;
6724 tp->pm_cap = pm_cap;
6725 tp->mac_mode = TG3_DEF_MAC_MODE;
6726 tp->rx_mode = TG3_DEF_RX_MODE;
6727 tp->tx_mode = TG3_DEF_TX_MODE;
6728 tp->mi_mode = MAC_MI_MODE_BASE;
6729 if (tg3_debug > 0)
6730 tp->msg_enable = tg3_debug;
6731 else
6732 tp->msg_enable = TG3_DEF_MSG_ENABLE;
6734 /* The word/byte swap controls here control register access byte
6735 * swapping. DMA data byte swapping is controlled in the GRC_MODE
6736 * setting below.
6738 tp->misc_host_ctrl =
6739 MISC_HOST_CTRL_MASK_PCI_INT |
6740 MISC_HOST_CTRL_WORD_SWAP |
6741 MISC_HOST_CTRL_INDIR_ACCESS |
6742 MISC_HOST_CTRL_PCISTATE_RW;
6744 /* The NONFRM (non-frame) byte/word swap controls take effect
6745 * on descriptor entries, anything which isn't packet data.
6747 * The StrongARM chips on the board (one for tx, one for rx)
6748 * are running in big-endian mode.
6750 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
6751 GRC_MODE_WSWAP_NONFRM_DATA);
6752 #ifdef __BIG_ENDIAN
6753 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
6754 #endif
6755 spin_lock_init(&tp->lock);
6756 spin_lock_init(&tp->tx_lock);
6757 spin_lock_init(&tp->indirect_lock);
6758 PREPARE_WORK(&tp->reset_task, tg3_reset_task, tp);
6760 tp->regs = (unsigned long) ioremap(tg3reg_base, tg3reg_len);
6761 if (tp->regs == 0UL) {
6762 printk(KERN_ERR PFX "Cannot map device registers, "
6763 "aborting.\n");
6764 err = -ENOMEM;
6765 goto err_out_free_dev;
6768 tg3_init_link_config(tp);
6770 tg3_init_bufmgr_config(tp);
6772 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
6773 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
6774 tp->tx_pending = TG3_DEF_TX_RING_PENDING;
6776 dev->open = tg3_open;
6777 dev->stop = tg3_close;
6778 dev->get_stats = tg3_get_stats;
6779 dev->set_multicast_list = tg3_set_rx_mode;
6780 dev->set_mac_address = tg3_set_mac_addr;
6781 dev->do_ioctl = tg3_ioctl;
6782 dev->tx_timeout = tg3_tx_timeout;
6783 dev->poll = tg3_poll;
6784 dev->weight = 64;
6785 dev->watchdog_timeo = TG3_TX_TIMEOUT;
6786 dev->change_mtu = tg3_change_mtu;
6787 dev->irq = pdev->irq;
6789 err = tg3_get_invariants(tp);
6790 if (err) {
6791 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
6792 "aborting.\n");
6793 goto err_out_iounmap;
6796 err = tg3_get_device_address(tp);
6797 if (err) {
6798 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
6799 "aborting.\n");
6800 goto err_out_iounmap;
6803 err = tg3_test_dma(tp);
6804 if (err) {
6805 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
6806 goto err_out_iounmap;
6809 /* Tigon3 can do ipv4 only... and some chips have buggy
6810 * checksumming.
6812 if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
6813 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
6814 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
6815 } else
6816 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
6818 #if TG3_DO_TSO != 0
6819 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
6820 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
6821 tp->pci_chip_rev_id <= CHIPREV_ID_5701_B2)) {
6822 /* Not TSO capable. */
6823 dev->features &= ~NETIF_F_TSO;
6824 } else {
6825 dev->features |= NETIF_F_TSO;
6827 #endif
6829 err = register_netdev(dev);
6830 if (err) {
6831 printk(KERN_ERR PFX "Cannot register net device, "
6832 "aborting.\n");
6833 goto err_out_iounmap;
6836 pci_set_drvdata(pdev, dev);
6838 /* Now that we have fully setup the chip, save away a snapshot
6839 * of the PCI config space. We need to restore this after
6840 * GRC_MISC_CFG core clock resets and some resume events.
6842 pci_save_state(tp->pdev, tp->pci_cfg_state);
6844 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (PCI%s:%s:%s) %sBaseT Ethernet ",
6845 dev->name,
6846 tp->board_part_number,
6847 tp->pci_chip_rev_id,
6848 tg3_phy_string(tp),
6849 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "X" : ""),
6850 ((tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED) ?
6851 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "133MHz" : "66MHz") :
6852 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "100MHz" : "33MHz")),
6853 ((tp->tg3_flags & TG3_FLAG_PCI_32BIT) ? "32-bit" : "64-bit"),
6854 (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
6856 for (i = 0; i < 6; i++)
6857 printk("%2.2x%c", dev->dev_addr[i],
6858 i == 5 ? '\n' : ':');
6860 return 0;
6862 err_out_iounmap:
6863 iounmap((void *) tp->regs);
6865 err_out_free_dev:
6866 kfree(dev);
6868 err_out_free_res:
6869 pci_release_regions(pdev);
6871 err_out_disable_pdev:
6872 pci_disable_device(pdev);
6873 pci_set_drvdata(pdev, NULL);
6874 return err;
6877 static void __devexit tg3_remove_one(struct pci_dev *pdev)
6879 struct net_device *dev = pci_get_drvdata(pdev);
6881 if (dev) {
6882 unregister_netdev(dev);
6883 iounmap((void *) ((struct tg3 *)(dev->priv))->regs);
6884 kfree(dev);
6885 pci_release_regions(pdev);
6886 pci_disable_device(pdev);
6887 pci_set_drvdata(pdev, NULL);
6891 static int tg3_suspend(struct pci_dev *pdev, u32 state)
6893 struct net_device *dev = pci_get_drvdata(pdev);
6894 struct tg3 *tp = dev->priv;
6895 int err;
6897 if (!netif_running(dev))
6898 return 0;
6900 tg3_netif_stop(tp);
6902 spin_lock_irq(&tp->lock);
6903 spin_lock(&tp->tx_lock);
6904 tg3_disable_ints(tp);
6905 spin_unlock(&tp->tx_lock);
6906 spin_unlock_irq(&tp->lock);
6908 netif_device_detach(dev);
6910 spin_lock_irq(&tp->lock);
6911 spin_lock(&tp->tx_lock);
6912 tg3_halt(tp);
6913 spin_unlock(&tp->tx_lock);
6914 spin_unlock_irq(&tp->lock);
6916 err = tg3_set_power_state(tp, state);
6917 if (err) {
6918 spin_lock_irq(&tp->lock);
6919 spin_lock(&tp->tx_lock);
6921 tg3_init_rings(tp);
6922 tg3_init_hw(tp);
6924 spin_unlock(&tp->tx_lock);
6925 spin_unlock_irq(&tp->lock);
6927 netif_device_attach(dev);
6928 tg3_netif_start(tp);
6931 return err;
6934 static int tg3_resume(struct pci_dev *pdev)
6936 struct net_device *dev = pci_get_drvdata(pdev);
6937 struct tg3 *tp = dev->priv;
6938 int err;
6940 if (!netif_running(dev))
6941 return 0;
6943 err = tg3_set_power_state(tp, 0);
6944 if (err)
6945 return err;
6947 netif_device_attach(dev);
6949 spin_lock_irq(&tp->lock);
6950 spin_lock(&tp->tx_lock);
6952 tg3_init_rings(tp);
6953 tg3_init_hw(tp);
6954 tg3_enable_ints(tp);
6956 spin_unlock(&tp->tx_lock);
6957 spin_unlock_irq(&tp->lock);
6959 tg3_netif_start(tp);
6961 return 0;
6964 static struct pci_driver tg3_driver = {
6965 .name = DRV_MODULE_NAME,
6966 .id_table = tg3_pci_tbl,
6967 .probe = tg3_init_one,
6968 .remove = __devexit_p(tg3_remove_one),
6969 .suspend = tg3_suspend,
6970 .resume = tg3_resume
6973 static int __init tg3_init(void)
6975 return pci_module_init(&tg3_driver);
6978 static void __exit tg3_cleanup(void)
6980 pci_unregister_driver(&tg3_driver);
6983 module_init(tg3_init);
6984 module_exit(tg3_cleanup);