merge sock_alloc_fd/sock_attach_fd into a new helper
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / net / sungem.c
blobb571a1babab95e879be372bbb16bf760581559e8
1 /* $Id: sungem.c,v 1.44.2.22 2002/03/13 01:18:12 davem Exp $
2 * sungem.c: Sun GEM ethernet driver.
4 * Copyright (C) 2000, 2001, 2002, 2003 David S. Miller (davem@redhat.com)
6 * Support for Apple GMAC and assorted PHYs, WOL, Power Management
7 * (C) 2001,2002,2003 Benjamin Herrenscmidt (benh@kernel.crashing.org)
8 * (C) 2004,2005 Benjamin Herrenscmidt, IBM Corp.
10 * NAPI and NETPOLL support
11 * (C) 2004 by Eric Lemoine (eric.lemoine@gmail.com)
13 * TODO:
14 * - Now that the driver was significantly simplified, I need to rework
15 * the locking. I'm sure we don't need _2_ spinlocks, and we probably
16 * can avoid taking most of them for so long period of time (and schedule
17 * instead). The main issues at this point are caused by the netdev layer
18 * though:
20 * gem_change_mtu() and gem_set_multicast() are called with a read_lock()
21 * help by net/core/dev.c, thus they can't schedule. That means they can't
22 * call napi_disable() neither, thus force gem_poll() to keep a spinlock
23 * where it could have been dropped. change_mtu especially would love also to
24 * be able to msleep instead of horrid locked delays when resetting the HW,
25 * but that read_lock() makes it impossible, unless I defer it's action to
26 * the reset task, which means it'll be asynchronous (won't take effect until
27 * the system schedules a bit).
29 * Also, it would probably be possible to also remove most of the long-life
30 * locking in open/resume code path (gem_reinit_chip) by beeing more careful
31 * about when we can start taking interrupts or get xmit() called...
34 #include <linux/module.h>
35 #include <linux/kernel.h>
36 #include <linux/types.h>
37 #include <linux/fcntl.h>
38 #include <linux/interrupt.h>
39 #include <linux/ioport.h>
40 #include <linux/in.h>
41 #include <linux/sched.h>
42 #include <linux/slab.h>
43 #include <linux/string.h>
44 #include <linux/delay.h>
45 #include <linux/init.h>
46 #include <linux/errno.h>
47 #include <linux/pci.h>
48 #include <linux/dma-mapping.h>
49 #include <linux/netdevice.h>
50 #include <linux/etherdevice.h>
51 #include <linux/skbuff.h>
52 #include <linux/mii.h>
53 #include <linux/ethtool.h>
54 #include <linux/crc32.h>
55 #include <linux/random.h>
56 #include <linux/workqueue.h>
57 #include <linux/if_vlan.h>
58 #include <linux/bitops.h>
59 #include <linux/mutex.h>
60 #include <linux/mm.h>
62 #include <asm/system.h>
63 #include <asm/io.h>
64 #include <asm/byteorder.h>
65 #include <asm/uaccess.h>
66 #include <asm/irq.h>
68 #ifdef CONFIG_SPARC
69 #include <asm/idprom.h>
70 #include <asm/prom.h>
71 #endif
73 #ifdef CONFIG_PPC_PMAC
74 #include <asm/pci-bridge.h>
75 #include <asm/prom.h>
76 #include <asm/machdep.h>
77 #include <asm/pmac_feature.h>
78 #endif
80 #include "sungem_phy.h"
81 #include "sungem.h"
83 /* Stripping FCS is causing problems, disabled for now */
84 #undef STRIP_FCS
86 #define DEFAULT_MSG (NETIF_MSG_DRV | \
87 NETIF_MSG_PROBE | \
88 NETIF_MSG_LINK)
90 #define ADVERTISE_MASK (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \
91 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \
92 SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full | \
93 SUPPORTED_Pause | SUPPORTED_Autoneg)
95 #define DRV_NAME "sungem"
96 #define DRV_VERSION "0.98"
97 #define DRV_RELDATE "8/24/03"
98 #define DRV_AUTHOR "David S. Miller (davem@redhat.com)"
100 static char version[] __devinitdata =
101 DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n";
103 MODULE_AUTHOR(DRV_AUTHOR);
104 MODULE_DESCRIPTION("Sun GEM Gbit ethernet driver");
105 MODULE_LICENSE("GPL");
107 #define GEM_MODULE_NAME "gem"
108 #define PFX GEM_MODULE_NAME ": "
110 static struct pci_device_id gem_pci_tbl[] = {
111 { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_GEM,
112 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
114 /* These models only differ from the original GEM in
115 * that their tx/rx fifos are of a different size and
116 * they only support 10/100 speeds. -DaveM
118 * Apple's GMAC does support gigabit on machines with
119 * the BCM54xx PHYs. -BenH
121 { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_RIO_GEM,
122 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
123 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMAC,
124 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
125 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMACP,
126 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
127 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMAC2,
128 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
129 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_K2_GMAC,
130 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
131 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_SH_SUNGEM,
132 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
133 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_IPID2_GMAC,
134 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
135 {0, }
138 MODULE_DEVICE_TABLE(pci, gem_pci_tbl);
140 static u16 __phy_read(struct gem *gp, int phy_addr, int reg)
142 u32 cmd;
143 int limit = 10000;
145 cmd = (1 << 30);
146 cmd |= (2 << 28);
147 cmd |= (phy_addr << 23) & MIF_FRAME_PHYAD;
148 cmd |= (reg << 18) & MIF_FRAME_REGAD;
149 cmd |= (MIF_FRAME_TAMSB);
150 writel(cmd, gp->regs + MIF_FRAME);
152 while (--limit) {
153 cmd = readl(gp->regs + MIF_FRAME);
154 if (cmd & MIF_FRAME_TALSB)
155 break;
157 udelay(10);
160 if (!limit)
161 cmd = 0xffff;
163 return cmd & MIF_FRAME_DATA;
166 static inline int _phy_read(struct net_device *dev, int mii_id, int reg)
168 struct gem *gp = netdev_priv(dev);
169 return __phy_read(gp, mii_id, reg);
172 static inline u16 phy_read(struct gem *gp, int reg)
174 return __phy_read(gp, gp->mii_phy_addr, reg);
177 static void __phy_write(struct gem *gp, int phy_addr, int reg, u16 val)
179 u32 cmd;
180 int limit = 10000;
182 cmd = (1 << 30);
183 cmd |= (1 << 28);
184 cmd |= (phy_addr << 23) & MIF_FRAME_PHYAD;
185 cmd |= (reg << 18) & MIF_FRAME_REGAD;
186 cmd |= (MIF_FRAME_TAMSB);
187 cmd |= (val & MIF_FRAME_DATA);
188 writel(cmd, gp->regs + MIF_FRAME);
190 while (limit--) {
191 cmd = readl(gp->regs + MIF_FRAME);
192 if (cmd & MIF_FRAME_TALSB)
193 break;
195 udelay(10);
199 static inline void _phy_write(struct net_device *dev, int mii_id, int reg, int val)
201 struct gem *gp = netdev_priv(dev);
202 __phy_write(gp, mii_id, reg, val & 0xffff);
205 static inline void phy_write(struct gem *gp, int reg, u16 val)
207 __phy_write(gp, gp->mii_phy_addr, reg, val);
210 static inline void gem_enable_ints(struct gem *gp)
212 /* Enable all interrupts but TXDONE */
213 writel(GREG_STAT_TXDONE, gp->regs + GREG_IMASK);
216 static inline void gem_disable_ints(struct gem *gp)
218 /* Disable all interrupts, including TXDONE */
219 writel(GREG_STAT_NAPI | GREG_STAT_TXDONE, gp->regs + GREG_IMASK);
222 static void gem_get_cell(struct gem *gp)
224 BUG_ON(gp->cell_enabled < 0);
225 gp->cell_enabled++;
226 #ifdef CONFIG_PPC_PMAC
227 if (gp->cell_enabled == 1) {
228 mb();
229 pmac_call_feature(PMAC_FTR_GMAC_ENABLE, gp->of_node, 0, 1);
230 udelay(10);
232 #endif /* CONFIG_PPC_PMAC */
235 /* Turn off the chip's clock */
236 static void gem_put_cell(struct gem *gp)
238 BUG_ON(gp->cell_enabled <= 0);
239 gp->cell_enabled--;
240 #ifdef CONFIG_PPC_PMAC
241 if (gp->cell_enabled == 0) {
242 mb();
243 pmac_call_feature(PMAC_FTR_GMAC_ENABLE, gp->of_node, 0, 0);
244 udelay(10);
246 #endif /* CONFIG_PPC_PMAC */
249 static void gem_handle_mif_event(struct gem *gp, u32 reg_val, u32 changed_bits)
251 if (netif_msg_intr(gp))
252 printk(KERN_DEBUG "%s: mif interrupt\n", gp->dev->name);
255 static int gem_pcs_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
257 u32 pcs_istat = readl(gp->regs + PCS_ISTAT);
258 u32 pcs_miistat;
260 if (netif_msg_intr(gp))
261 printk(KERN_DEBUG "%s: pcs interrupt, pcs_istat: 0x%x\n",
262 gp->dev->name, pcs_istat);
264 if (!(pcs_istat & PCS_ISTAT_LSC)) {
265 printk(KERN_ERR "%s: PCS irq but no link status change???\n",
266 dev->name);
267 return 0;
270 /* The link status bit latches on zero, so you must
271 * read it twice in such a case to see a transition
272 * to the link being up.
274 pcs_miistat = readl(gp->regs + PCS_MIISTAT);
275 if (!(pcs_miistat & PCS_MIISTAT_LS))
276 pcs_miistat |=
277 (readl(gp->regs + PCS_MIISTAT) &
278 PCS_MIISTAT_LS);
280 if (pcs_miistat & PCS_MIISTAT_ANC) {
281 /* The remote-fault indication is only valid
282 * when autoneg has completed.
284 if (pcs_miistat & PCS_MIISTAT_RF)
285 printk(KERN_INFO "%s: PCS AutoNEG complete, "
286 "RemoteFault\n", dev->name);
287 else
288 printk(KERN_INFO "%s: PCS AutoNEG complete.\n",
289 dev->name);
292 if (pcs_miistat & PCS_MIISTAT_LS) {
293 printk(KERN_INFO "%s: PCS link is now up.\n",
294 dev->name);
295 netif_carrier_on(gp->dev);
296 } else {
297 printk(KERN_INFO "%s: PCS link is now down.\n",
298 dev->name);
299 netif_carrier_off(gp->dev);
300 /* If this happens and the link timer is not running,
301 * reset so we re-negotiate.
303 if (!timer_pending(&gp->link_timer))
304 return 1;
307 return 0;
310 static int gem_txmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
312 u32 txmac_stat = readl(gp->regs + MAC_TXSTAT);
314 if (netif_msg_intr(gp))
315 printk(KERN_DEBUG "%s: txmac interrupt, txmac_stat: 0x%x\n",
316 gp->dev->name, txmac_stat);
318 /* Defer timer expiration is quite normal,
319 * don't even log the event.
321 if ((txmac_stat & MAC_TXSTAT_DTE) &&
322 !(txmac_stat & ~MAC_TXSTAT_DTE))
323 return 0;
325 if (txmac_stat & MAC_TXSTAT_URUN) {
326 printk(KERN_ERR "%s: TX MAC xmit underrun.\n",
327 dev->name);
328 gp->net_stats.tx_fifo_errors++;
331 if (txmac_stat & MAC_TXSTAT_MPE) {
332 printk(KERN_ERR "%s: TX MAC max packet size error.\n",
333 dev->name);
334 gp->net_stats.tx_errors++;
337 /* The rest are all cases of one of the 16-bit TX
338 * counters expiring.
340 if (txmac_stat & MAC_TXSTAT_NCE)
341 gp->net_stats.collisions += 0x10000;
343 if (txmac_stat & MAC_TXSTAT_ECE) {
344 gp->net_stats.tx_aborted_errors += 0x10000;
345 gp->net_stats.collisions += 0x10000;
348 if (txmac_stat & MAC_TXSTAT_LCE) {
349 gp->net_stats.tx_aborted_errors += 0x10000;
350 gp->net_stats.collisions += 0x10000;
353 /* We do not keep track of MAC_TXSTAT_FCE and
354 * MAC_TXSTAT_PCE events.
356 return 0;
359 /* When we get a RX fifo overflow, the RX unit in GEM is probably hung
360 * so we do the following.
362 * If any part of the reset goes wrong, we return 1 and that causes the
363 * whole chip to be reset.
365 static int gem_rxmac_reset(struct gem *gp)
367 struct net_device *dev = gp->dev;
368 int limit, i;
369 u64 desc_dma;
370 u32 val;
372 /* First, reset & disable MAC RX. */
373 writel(MAC_RXRST_CMD, gp->regs + MAC_RXRST);
374 for (limit = 0; limit < 5000; limit++) {
375 if (!(readl(gp->regs + MAC_RXRST) & MAC_RXRST_CMD))
376 break;
377 udelay(10);
379 if (limit == 5000) {
380 printk(KERN_ERR "%s: RX MAC will not reset, resetting whole "
381 "chip.\n", dev->name);
382 return 1;
385 writel(gp->mac_rx_cfg & ~MAC_RXCFG_ENAB,
386 gp->regs + MAC_RXCFG);
387 for (limit = 0; limit < 5000; limit++) {
388 if (!(readl(gp->regs + MAC_RXCFG) & MAC_RXCFG_ENAB))
389 break;
390 udelay(10);
392 if (limit == 5000) {
393 printk(KERN_ERR "%s: RX MAC will not disable, resetting whole "
394 "chip.\n", dev->name);
395 return 1;
398 /* Second, disable RX DMA. */
399 writel(0, gp->regs + RXDMA_CFG);
400 for (limit = 0; limit < 5000; limit++) {
401 if (!(readl(gp->regs + RXDMA_CFG) & RXDMA_CFG_ENABLE))
402 break;
403 udelay(10);
405 if (limit == 5000) {
406 printk(KERN_ERR "%s: RX DMA will not disable, resetting whole "
407 "chip.\n", dev->name);
408 return 1;
411 udelay(5000);
413 /* Execute RX reset command. */
414 writel(gp->swrst_base | GREG_SWRST_RXRST,
415 gp->regs + GREG_SWRST);
416 for (limit = 0; limit < 5000; limit++) {
417 if (!(readl(gp->regs + GREG_SWRST) & GREG_SWRST_RXRST))
418 break;
419 udelay(10);
421 if (limit == 5000) {
422 printk(KERN_ERR "%s: RX reset command will not execute, resetting "
423 "whole chip.\n", dev->name);
424 return 1;
427 /* Refresh the RX ring. */
428 for (i = 0; i < RX_RING_SIZE; i++) {
429 struct gem_rxd *rxd = &gp->init_block->rxd[i];
431 if (gp->rx_skbs[i] == NULL) {
432 printk(KERN_ERR "%s: Parts of RX ring empty, resetting "
433 "whole chip.\n", dev->name);
434 return 1;
437 rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp));
439 gp->rx_new = gp->rx_old = 0;
441 /* Now we must reprogram the rest of RX unit. */
442 desc_dma = (u64) gp->gblock_dvma;
443 desc_dma += (INIT_BLOCK_TX_RING_SIZE * sizeof(struct gem_txd));
444 writel(desc_dma >> 32, gp->regs + RXDMA_DBHI);
445 writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW);
446 writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);
447 val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) |
448 ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128);
449 writel(val, gp->regs + RXDMA_CFG);
450 if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN)
451 writel(((5 & RXDMA_BLANK_IPKTS) |
452 ((8 << 12) & RXDMA_BLANK_ITIME)),
453 gp->regs + RXDMA_BLANK);
454 else
455 writel(((5 & RXDMA_BLANK_IPKTS) |
456 ((4 << 12) & RXDMA_BLANK_ITIME)),
457 gp->regs + RXDMA_BLANK);
458 val = (((gp->rx_pause_off / 64) << 0) & RXDMA_PTHRESH_OFF);
459 val |= (((gp->rx_pause_on / 64) << 12) & RXDMA_PTHRESH_ON);
460 writel(val, gp->regs + RXDMA_PTHRESH);
461 val = readl(gp->regs + RXDMA_CFG);
462 writel(val | RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG);
463 writel(MAC_RXSTAT_RCV, gp->regs + MAC_RXMASK);
464 val = readl(gp->regs + MAC_RXCFG);
465 writel(val | MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG);
467 return 0;
470 static int gem_rxmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
472 u32 rxmac_stat = readl(gp->regs + MAC_RXSTAT);
473 int ret = 0;
475 if (netif_msg_intr(gp))
476 printk(KERN_DEBUG "%s: rxmac interrupt, rxmac_stat: 0x%x\n",
477 gp->dev->name, rxmac_stat);
479 if (rxmac_stat & MAC_RXSTAT_OFLW) {
480 u32 smac = readl(gp->regs + MAC_SMACHINE);
482 printk(KERN_ERR "%s: RX MAC fifo overflow smac[%08x].\n",
483 dev->name, smac);
484 gp->net_stats.rx_over_errors++;
485 gp->net_stats.rx_fifo_errors++;
487 ret = gem_rxmac_reset(gp);
490 if (rxmac_stat & MAC_RXSTAT_ACE)
491 gp->net_stats.rx_frame_errors += 0x10000;
493 if (rxmac_stat & MAC_RXSTAT_CCE)
494 gp->net_stats.rx_crc_errors += 0x10000;
496 if (rxmac_stat & MAC_RXSTAT_LCE)
497 gp->net_stats.rx_length_errors += 0x10000;
499 /* We do not track MAC_RXSTAT_FCE and MAC_RXSTAT_VCE
500 * events.
502 return ret;
505 static int gem_mac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
507 u32 mac_cstat = readl(gp->regs + MAC_CSTAT);
509 if (netif_msg_intr(gp))
510 printk(KERN_DEBUG "%s: mac interrupt, mac_cstat: 0x%x\n",
511 gp->dev->name, mac_cstat);
513 /* This interrupt is just for pause frame and pause
514 * tracking. It is useful for diagnostics and debug
515 * but probably by default we will mask these events.
517 if (mac_cstat & MAC_CSTAT_PS)
518 gp->pause_entered++;
520 if (mac_cstat & MAC_CSTAT_PRCV)
521 gp->pause_last_time_recvd = (mac_cstat >> 16);
523 return 0;
526 static int gem_mif_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
528 u32 mif_status = readl(gp->regs + MIF_STATUS);
529 u32 reg_val, changed_bits;
531 reg_val = (mif_status & MIF_STATUS_DATA) >> 16;
532 changed_bits = (mif_status & MIF_STATUS_STAT);
534 gem_handle_mif_event(gp, reg_val, changed_bits);
536 return 0;
539 static int gem_pci_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
541 u32 pci_estat = readl(gp->regs + GREG_PCIESTAT);
543 if (gp->pdev->vendor == PCI_VENDOR_ID_SUN &&
544 gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) {
545 printk(KERN_ERR "%s: PCI error [%04x] ",
546 dev->name, pci_estat);
548 if (pci_estat & GREG_PCIESTAT_BADACK)
549 printk("<No ACK64# during ABS64 cycle> ");
550 if (pci_estat & GREG_PCIESTAT_DTRTO)
551 printk("<Delayed transaction timeout> ");
552 if (pci_estat & GREG_PCIESTAT_OTHER)
553 printk("<other>");
554 printk("\n");
555 } else {
556 pci_estat |= GREG_PCIESTAT_OTHER;
557 printk(KERN_ERR "%s: PCI error\n", dev->name);
560 if (pci_estat & GREG_PCIESTAT_OTHER) {
561 u16 pci_cfg_stat;
563 /* Interrogate PCI config space for the
564 * true cause.
566 pci_read_config_word(gp->pdev, PCI_STATUS,
567 &pci_cfg_stat);
568 printk(KERN_ERR "%s: Read PCI cfg space status [%04x]\n",
569 dev->name, pci_cfg_stat);
570 if (pci_cfg_stat & PCI_STATUS_PARITY)
571 printk(KERN_ERR "%s: PCI parity error detected.\n",
572 dev->name);
573 if (pci_cfg_stat & PCI_STATUS_SIG_TARGET_ABORT)
574 printk(KERN_ERR "%s: PCI target abort.\n",
575 dev->name);
576 if (pci_cfg_stat & PCI_STATUS_REC_TARGET_ABORT)
577 printk(KERN_ERR "%s: PCI master acks target abort.\n",
578 dev->name);
579 if (pci_cfg_stat & PCI_STATUS_REC_MASTER_ABORT)
580 printk(KERN_ERR "%s: PCI master abort.\n",
581 dev->name);
582 if (pci_cfg_stat & PCI_STATUS_SIG_SYSTEM_ERROR)
583 printk(KERN_ERR "%s: PCI system error SERR#.\n",
584 dev->name);
585 if (pci_cfg_stat & PCI_STATUS_DETECTED_PARITY)
586 printk(KERN_ERR "%s: PCI parity error.\n",
587 dev->name);
589 /* Write the error bits back to clear them. */
590 pci_cfg_stat &= (PCI_STATUS_PARITY |
591 PCI_STATUS_SIG_TARGET_ABORT |
592 PCI_STATUS_REC_TARGET_ABORT |
593 PCI_STATUS_REC_MASTER_ABORT |
594 PCI_STATUS_SIG_SYSTEM_ERROR |
595 PCI_STATUS_DETECTED_PARITY);
596 pci_write_config_word(gp->pdev,
597 PCI_STATUS, pci_cfg_stat);
600 /* For all PCI errors, we should reset the chip. */
601 return 1;
604 /* All non-normal interrupt conditions get serviced here.
605 * Returns non-zero if we should just exit the interrupt
606 * handler right now (ie. if we reset the card which invalidates
607 * all of the other original irq status bits).
609 static int gem_abnormal_irq(struct net_device *dev, struct gem *gp, u32 gem_status)
611 if (gem_status & GREG_STAT_RXNOBUF) {
612 /* Frame arrived, no free RX buffers available. */
613 if (netif_msg_rx_err(gp))
614 printk(KERN_DEBUG "%s: no buffer for rx frame\n",
615 gp->dev->name);
616 gp->net_stats.rx_dropped++;
619 if (gem_status & GREG_STAT_RXTAGERR) {
620 /* corrupt RX tag framing */
621 if (netif_msg_rx_err(gp))
622 printk(KERN_DEBUG "%s: corrupt rx tag framing\n",
623 gp->dev->name);
624 gp->net_stats.rx_errors++;
626 goto do_reset;
629 if (gem_status & GREG_STAT_PCS) {
630 if (gem_pcs_interrupt(dev, gp, gem_status))
631 goto do_reset;
634 if (gem_status & GREG_STAT_TXMAC) {
635 if (gem_txmac_interrupt(dev, gp, gem_status))
636 goto do_reset;
639 if (gem_status & GREG_STAT_RXMAC) {
640 if (gem_rxmac_interrupt(dev, gp, gem_status))
641 goto do_reset;
644 if (gem_status & GREG_STAT_MAC) {
645 if (gem_mac_interrupt(dev, gp, gem_status))
646 goto do_reset;
649 if (gem_status & GREG_STAT_MIF) {
650 if (gem_mif_interrupt(dev, gp, gem_status))
651 goto do_reset;
654 if (gem_status & GREG_STAT_PCIERR) {
655 if (gem_pci_interrupt(dev, gp, gem_status))
656 goto do_reset;
659 return 0;
661 do_reset:
662 gp->reset_task_pending = 1;
663 schedule_work(&gp->reset_task);
665 return 1;
668 static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_status)
670 int entry, limit;
672 if (netif_msg_intr(gp))
673 printk(KERN_DEBUG "%s: tx interrupt, gem_status: 0x%x\n",
674 gp->dev->name, gem_status);
676 entry = gp->tx_old;
677 limit = ((gem_status & GREG_STAT_TXNR) >> GREG_STAT_TXNR_SHIFT);
678 while (entry != limit) {
679 struct sk_buff *skb;
680 struct gem_txd *txd;
681 dma_addr_t dma_addr;
682 u32 dma_len;
683 int frag;
685 if (netif_msg_tx_done(gp))
686 printk(KERN_DEBUG "%s: tx done, slot %d\n",
687 gp->dev->name, entry);
688 skb = gp->tx_skbs[entry];
689 if (skb_shinfo(skb)->nr_frags) {
690 int last = entry + skb_shinfo(skb)->nr_frags;
691 int walk = entry;
692 int incomplete = 0;
694 last &= (TX_RING_SIZE - 1);
695 for (;;) {
696 walk = NEXT_TX(walk);
697 if (walk == limit)
698 incomplete = 1;
699 if (walk == last)
700 break;
702 if (incomplete)
703 break;
705 gp->tx_skbs[entry] = NULL;
706 gp->net_stats.tx_bytes += skb->len;
708 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
709 txd = &gp->init_block->txd[entry];
711 dma_addr = le64_to_cpu(txd->buffer);
712 dma_len = le64_to_cpu(txd->control_word) & TXDCTRL_BUFSZ;
714 pci_unmap_page(gp->pdev, dma_addr, dma_len, PCI_DMA_TODEVICE);
715 entry = NEXT_TX(entry);
718 gp->net_stats.tx_packets++;
719 dev_kfree_skb_irq(skb);
721 gp->tx_old = entry;
723 if (netif_queue_stopped(dev) &&
724 TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1))
725 netif_wake_queue(dev);
728 static __inline__ void gem_post_rxds(struct gem *gp, int limit)
730 int cluster_start, curr, count, kick;
732 cluster_start = curr = (gp->rx_new & ~(4 - 1));
733 count = 0;
734 kick = -1;
735 wmb();
736 while (curr != limit) {
737 curr = NEXT_RX(curr);
738 if (++count == 4) {
739 struct gem_rxd *rxd =
740 &gp->init_block->rxd[cluster_start];
741 for (;;) {
742 rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp));
743 rxd++;
744 cluster_start = NEXT_RX(cluster_start);
745 if (cluster_start == curr)
746 break;
748 kick = curr;
749 count = 0;
752 if (kick >= 0) {
753 mb();
754 writel(kick, gp->regs + RXDMA_KICK);
758 static int gem_rx(struct gem *gp, int work_to_do)
760 int entry, drops, work_done = 0;
761 u32 done;
762 __sum16 csum;
764 if (netif_msg_rx_status(gp))
765 printk(KERN_DEBUG "%s: rx interrupt, done: %d, rx_new: %d\n",
766 gp->dev->name, readl(gp->regs + RXDMA_DONE), gp->rx_new);
768 entry = gp->rx_new;
769 drops = 0;
770 done = readl(gp->regs + RXDMA_DONE);
771 for (;;) {
772 struct gem_rxd *rxd = &gp->init_block->rxd[entry];
773 struct sk_buff *skb;
774 u64 status = le64_to_cpu(rxd->status_word);
775 dma_addr_t dma_addr;
776 int len;
778 if ((status & RXDCTRL_OWN) != 0)
779 break;
781 if (work_done >= RX_RING_SIZE || work_done >= work_to_do)
782 break;
784 /* When writing back RX descriptor, GEM writes status
785 * then buffer address, possibly in seperate transactions.
786 * If we don't wait for the chip to write both, we could
787 * post a new buffer to this descriptor then have GEM spam
788 * on the buffer address. We sync on the RX completion
789 * register to prevent this from happening.
791 if (entry == done) {
792 done = readl(gp->regs + RXDMA_DONE);
793 if (entry == done)
794 break;
797 /* We can now account for the work we're about to do */
798 work_done++;
800 skb = gp->rx_skbs[entry];
802 len = (status & RXDCTRL_BUFSZ) >> 16;
803 if ((len < ETH_ZLEN) || (status & RXDCTRL_BAD)) {
804 gp->net_stats.rx_errors++;
805 if (len < ETH_ZLEN)
806 gp->net_stats.rx_length_errors++;
807 if (len & RXDCTRL_BAD)
808 gp->net_stats.rx_crc_errors++;
810 /* We'll just return it to GEM. */
811 drop_it:
812 gp->net_stats.rx_dropped++;
813 goto next;
816 dma_addr = le64_to_cpu(rxd->buffer);
817 if (len > RX_COPY_THRESHOLD) {
818 struct sk_buff *new_skb;
820 new_skb = gem_alloc_skb(RX_BUF_ALLOC_SIZE(gp), GFP_ATOMIC);
821 if (new_skb == NULL) {
822 drops++;
823 goto drop_it;
825 pci_unmap_page(gp->pdev, dma_addr,
826 RX_BUF_ALLOC_SIZE(gp),
827 PCI_DMA_FROMDEVICE);
828 gp->rx_skbs[entry] = new_skb;
829 new_skb->dev = gp->dev;
830 skb_put(new_skb, (gp->rx_buf_sz + RX_OFFSET));
831 rxd->buffer = cpu_to_le64(pci_map_page(gp->pdev,
832 virt_to_page(new_skb->data),
833 offset_in_page(new_skb->data),
834 RX_BUF_ALLOC_SIZE(gp),
835 PCI_DMA_FROMDEVICE));
836 skb_reserve(new_skb, RX_OFFSET);
838 /* Trim the original skb for the netif. */
839 skb_trim(skb, len);
840 } else {
841 struct sk_buff *copy_skb = dev_alloc_skb(len + 2);
843 if (copy_skb == NULL) {
844 drops++;
845 goto drop_it;
848 skb_reserve(copy_skb, 2);
849 skb_put(copy_skb, len);
850 pci_dma_sync_single_for_cpu(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
851 skb_copy_from_linear_data(skb, copy_skb->data, len);
852 pci_dma_sync_single_for_device(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
854 /* We'll reuse the original ring buffer. */
855 skb = copy_skb;
858 csum = (__force __sum16)htons((status & RXDCTRL_TCPCSUM) ^ 0xffff);
859 skb->csum = csum_unfold(csum);
860 skb->ip_summed = CHECKSUM_COMPLETE;
861 skb->protocol = eth_type_trans(skb, gp->dev);
863 netif_receive_skb(skb);
865 gp->net_stats.rx_packets++;
866 gp->net_stats.rx_bytes += len;
868 next:
869 entry = NEXT_RX(entry);
872 gem_post_rxds(gp, entry);
874 gp->rx_new = entry;
876 if (drops)
877 printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n",
878 gp->dev->name);
880 return work_done;
883 static int gem_poll(struct napi_struct *napi, int budget)
885 struct gem *gp = container_of(napi, struct gem, napi);
886 struct net_device *dev = gp->dev;
887 unsigned long flags;
888 int work_done;
891 * NAPI locking nightmare: See comment at head of driver
893 spin_lock_irqsave(&gp->lock, flags);
895 work_done = 0;
896 do {
897 /* Handle anomalies */
898 if (gp->status & GREG_STAT_ABNORMAL) {
899 if (gem_abnormal_irq(dev, gp, gp->status))
900 break;
903 /* Run TX completion thread */
904 spin_lock(&gp->tx_lock);
905 gem_tx(dev, gp, gp->status);
906 spin_unlock(&gp->tx_lock);
908 spin_unlock_irqrestore(&gp->lock, flags);
910 /* Run RX thread. We don't use any locking here,
911 * code willing to do bad things - like cleaning the
912 * rx ring - must call napi_disable(), which
913 * schedule_timeout()'s if polling is already disabled.
915 work_done += gem_rx(gp, budget - work_done);
917 if (work_done >= budget)
918 return work_done;
920 spin_lock_irqsave(&gp->lock, flags);
922 gp->status = readl(gp->regs + GREG_STAT);
923 } while (gp->status & GREG_STAT_NAPI);
925 __napi_complete(napi);
926 gem_enable_ints(gp);
928 spin_unlock_irqrestore(&gp->lock, flags);
930 return work_done;
933 static irqreturn_t gem_interrupt(int irq, void *dev_id)
935 struct net_device *dev = dev_id;
936 struct gem *gp = netdev_priv(dev);
937 unsigned long flags;
939 /* Swallow interrupts when shutting the chip down, though
940 * that shouldn't happen, we should have done free_irq() at
941 * this point...
943 if (!gp->running)
944 return IRQ_HANDLED;
946 spin_lock_irqsave(&gp->lock, flags);
948 if (napi_schedule_prep(&gp->napi)) {
949 u32 gem_status = readl(gp->regs + GREG_STAT);
951 if (gem_status == 0) {
952 napi_enable(&gp->napi);
953 spin_unlock_irqrestore(&gp->lock, flags);
954 return IRQ_NONE;
956 gp->status = gem_status;
957 gem_disable_ints(gp);
958 __napi_schedule(&gp->napi);
961 spin_unlock_irqrestore(&gp->lock, flags);
963 /* If polling was disabled at the time we received that
964 * interrupt, we may return IRQ_HANDLED here while we
965 * should return IRQ_NONE. No big deal...
967 return IRQ_HANDLED;
970 #ifdef CONFIG_NET_POLL_CONTROLLER
971 static void gem_poll_controller(struct net_device *dev)
973 /* gem_interrupt is safe to reentrance so no need
974 * to disable_irq here.
976 gem_interrupt(dev->irq, dev);
978 #endif
980 static void gem_tx_timeout(struct net_device *dev)
982 struct gem *gp = netdev_priv(dev);
984 printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
985 if (!gp->running) {
986 printk("%s: hrm.. hw not running !\n", dev->name);
987 return;
989 printk(KERN_ERR "%s: TX_STATE[%08x:%08x:%08x]\n",
990 dev->name,
991 readl(gp->regs + TXDMA_CFG),
992 readl(gp->regs + MAC_TXSTAT),
993 readl(gp->regs + MAC_TXCFG));
994 printk(KERN_ERR "%s: RX_STATE[%08x:%08x:%08x]\n",
995 dev->name,
996 readl(gp->regs + RXDMA_CFG),
997 readl(gp->regs + MAC_RXSTAT),
998 readl(gp->regs + MAC_RXCFG));
1000 spin_lock_irq(&gp->lock);
1001 spin_lock(&gp->tx_lock);
1003 gp->reset_task_pending = 1;
1004 schedule_work(&gp->reset_task);
1006 spin_unlock(&gp->tx_lock);
1007 spin_unlock_irq(&gp->lock);
1010 static __inline__ int gem_intme(int entry)
1012 /* Algorithm: IRQ every 1/2 of descriptors. */
1013 if (!(entry & ((TX_RING_SIZE>>1)-1)))
1014 return 1;
1016 return 0;
1019 static netdev_tx_t gem_start_xmit(struct sk_buff *skb,
1020 struct net_device *dev)
1022 struct gem *gp = netdev_priv(dev);
1023 int entry;
1024 u64 ctrl;
1025 unsigned long flags;
1027 ctrl = 0;
1028 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1029 const u64 csum_start_off = skb_transport_offset(skb);
1030 const u64 csum_stuff_off = csum_start_off + skb->csum_offset;
1032 ctrl = (TXDCTRL_CENAB |
1033 (csum_start_off << 15) |
1034 (csum_stuff_off << 21));
1037 if (!spin_trylock_irqsave(&gp->tx_lock, flags)) {
1038 /* Tell upper layer to requeue */
1039 return NETDEV_TX_LOCKED;
1041 /* We raced with gem_do_stop() */
1042 if (!gp->running) {
1043 spin_unlock_irqrestore(&gp->tx_lock, flags);
1044 return NETDEV_TX_BUSY;
1047 /* This is a hard error, log it. */
1048 if (TX_BUFFS_AVAIL(gp) <= (skb_shinfo(skb)->nr_frags + 1)) {
1049 netif_stop_queue(dev);
1050 spin_unlock_irqrestore(&gp->tx_lock, flags);
1051 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
1052 dev->name);
1053 return NETDEV_TX_BUSY;
1056 entry = gp->tx_new;
1057 gp->tx_skbs[entry] = skb;
1059 if (skb_shinfo(skb)->nr_frags == 0) {
1060 struct gem_txd *txd = &gp->init_block->txd[entry];
1061 dma_addr_t mapping;
1062 u32 len;
1064 len = skb->len;
1065 mapping = pci_map_page(gp->pdev,
1066 virt_to_page(skb->data),
1067 offset_in_page(skb->data),
1068 len, PCI_DMA_TODEVICE);
1069 ctrl |= TXDCTRL_SOF | TXDCTRL_EOF | len;
1070 if (gem_intme(entry))
1071 ctrl |= TXDCTRL_INTME;
1072 txd->buffer = cpu_to_le64(mapping);
1073 wmb();
1074 txd->control_word = cpu_to_le64(ctrl);
1075 entry = NEXT_TX(entry);
1076 } else {
1077 struct gem_txd *txd;
1078 u32 first_len;
1079 u64 intme;
1080 dma_addr_t first_mapping;
1081 int frag, first_entry = entry;
1083 intme = 0;
1084 if (gem_intme(entry))
1085 intme |= TXDCTRL_INTME;
1087 /* We must give this initial chunk to the device last.
1088 * Otherwise we could race with the device.
1090 first_len = skb_headlen(skb);
1091 first_mapping = pci_map_page(gp->pdev, virt_to_page(skb->data),
1092 offset_in_page(skb->data),
1093 first_len, PCI_DMA_TODEVICE);
1094 entry = NEXT_TX(entry);
1096 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
1097 skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
1098 u32 len;
1099 dma_addr_t mapping;
1100 u64 this_ctrl;
1102 len = this_frag->size;
1103 mapping = pci_map_page(gp->pdev,
1104 this_frag->page,
1105 this_frag->page_offset,
1106 len, PCI_DMA_TODEVICE);
1107 this_ctrl = ctrl;
1108 if (frag == skb_shinfo(skb)->nr_frags - 1)
1109 this_ctrl |= TXDCTRL_EOF;
1111 txd = &gp->init_block->txd[entry];
1112 txd->buffer = cpu_to_le64(mapping);
1113 wmb();
1114 txd->control_word = cpu_to_le64(this_ctrl | len);
1116 if (gem_intme(entry))
1117 intme |= TXDCTRL_INTME;
1119 entry = NEXT_TX(entry);
1121 txd = &gp->init_block->txd[first_entry];
1122 txd->buffer = cpu_to_le64(first_mapping);
1123 wmb();
1124 txd->control_word =
1125 cpu_to_le64(ctrl | TXDCTRL_SOF | intme | first_len);
1128 gp->tx_new = entry;
1129 if (TX_BUFFS_AVAIL(gp) <= (MAX_SKB_FRAGS + 1))
1130 netif_stop_queue(dev);
1132 if (netif_msg_tx_queued(gp))
1133 printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n",
1134 dev->name, entry, skb->len);
1135 mb();
1136 writel(gp->tx_new, gp->regs + TXDMA_KICK);
1137 spin_unlock_irqrestore(&gp->tx_lock, flags);
1139 dev->trans_start = jiffies;
1141 return NETDEV_TX_OK;
1144 static void gem_pcs_reset(struct gem *gp)
1146 int limit;
1147 u32 val;
1149 /* Reset PCS unit. */
1150 val = readl(gp->regs + PCS_MIICTRL);
1151 val |= PCS_MIICTRL_RST;
1152 writel(val, gp->regs + PCS_MIICTRL);
1154 limit = 32;
1155 while (readl(gp->regs + PCS_MIICTRL) & PCS_MIICTRL_RST) {
1156 udelay(100);
1157 if (limit-- <= 0)
1158 break;
1160 if (limit < 0)
1161 printk(KERN_WARNING "%s: PCS reset bit would not clear.\n",
1162 gp->dev->name);
1165 static void gem_pcs_reinit_adv(struct gem *gp)
1167 u32 val;
1169 /* Make sure PCS is disabled while changing advertisement
1170 * configuration.
1172 val = readl(gp->regs + PCS_CFG);
1173 val &= ~(PCS_CFG_ENABLE | PCS_CFG_TO);
1174 writel(val, gp->regs + PCS_CFG);
1176 /* Advertise all capabilities except assymetric
1177 * pause.
1179 val = readl(gp->regs + PCS_MIIADV);
1180 val |= (PCS_MIIADV_FD | PCS_MIIADV_HD |
1181 PCS_MIIADV_SP | PCS_MIIADV_AP);
1182 writel(val, gp->regs + PCS_MIIADV);
1184 /* Enable and restart auto-negotiation, disable wrapback/loopback,
1185 * and re-enable PCS.
1187 val = readl(gp->regs + PCS_MIICTRL);
1188 val |= (PCS_MIICTRL_RAN | PCS_MIICTRL_ANE);
1189 val &= ~PCS_MIICTRL_WB;
1190 writel(val, gp->regs + PCS_MIICTRL);
1192 val = readl(gp->regs + PCS_CFG);
1193 val |= PCS_CFG_ENABLE;
1194 writel(val, gp->regs + PCS_CFG);
1196 /* Make sure serialink loopback is off. The meaning
1197 * of this bit is logically inverted based upon whether
1198 * you are in Serialink or SERDES mode.
1200 val = readl(gp->regs + PCS_SCTRL);
1201 if (gp->phy_type == phy_serialink)
1202 val &= ~PCS_SCTRL_LOOP;
1203 else
1204 val |= PCS_SCTRL_LOOP;
1205 writel(val, gp->regs + PCS_SCTRL);
1208 #define STOP_TRIES 32
1210 /* Must be invoked under gp->lock and gp->tx_lock. */
1211 static void gem_reset(struct gem *gp)
1213 int limit;
1214 u32 val;
1216 /* Make sure we won't get any more interrupts */
1217 writel(0xffffffff, gp->regs + GREG_IMASK);
1219 /* Reset the chip */
1220 writel(gp->swrst_base | GREG_SWRST_TXRST | GREG_SWRST_RXRST,
1221 gp->regs + GREG_SWRST);
1223 limit = STOP_TRIES;
1225 do {
1226 udelay(20);
1227 val = readl(gp->regs + GREG_SWRST);
1228 if (limit-- <= 0)
1229 break;
1230 } while (val & (GREG_SWRST_TXRST | GREG_SWRST_RXRST));
1232 if (limit < 0)
1233 printk(KERN_ERR "%s: SW reset is ghetto.\n", gp->dev->name);
1235 if (gp->phy_type == phy_serialink || gp->phy_type == phy_serdes)
1236 gem_pcs_reinit_adv(gp);
1239 /* Must be invoked under gp->lock and gp->tx_lock. */
1240 static void gem_start_dma(struct gem *gp)
1242 u32 val;
1244 /* We are ready to rock, turn everything on. */
1245 val = readl(gp->regs + TXDMA_CFG);
1246 writel(val | TXDMA_CFG_ENABLE, gp->regs + TXDMA_CFG);
1247 val = readl(gp->regs + RXDMA_CFG);
1248 writel(val | RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG);
1249 val = readl(gp->regs + MAC_TXCFG);
1250 writel(val | MAC_TXCFG_ENAB, gp->regs + MAC_TXCFG);
1251 val = readl(gp->regs + MAC_RXCFG);
1252 writel(val | MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG);
1254 (void) readl(gp->regs + MAC_RXCFG);
1255 udelay(100);
1257 gem_enable_ints(gp);
1259 writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);
1262 /* Must be invoked under gp->lock and gp->tx_lock. DMA won't be
1263 * actually stopped before about 4ms tho ...
1265 static void gem_stop_dma(struct gem *gp)
1267 u32 val;
1269 /* We are done rocking, turn everything off. */
1270 val = readl(gp->regs + TXDMA_CFG);
1271 writel(val & ~TXDMA_CFG_ENABLE, gp->regs + TXDMA_CFG);
1272 val = readl(gp->regs + RXDMA_CFG);
1273 writel(val & ~RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG);
1274 val = readl(gp->regs + MAC_TXCFG);
1275 writel(val & ~MAC_TXCFG_ENAB, gp->regs + MAC_TXCFG);
1276 val = readl(gp->regs + MAC_RXCFG);
1277 writel(val & ~MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG);
1279 (void) readl(gp->regs + MAC_RXCFG);
1281 /* Need to wait a bit ... done by the caller */
1285 /* Must be invoked under gp->lock and gp->tx_lock. */
1286 // XXX dbl check what that function should do when called on PCS PHY
1287 static void gem_begin_auto_negotiation(struct gem *gp, struct ethtool_cmd *ep)
1289 u32 advertise, features;
1290 int autoneg;
1291 int speed;
1292 int duplex;
1294 if (gp->phy_type != phy_mii_mdio0 &&
1295 gp->phy_type != phy_mii_mdio1)
1296 goto non_mii;
1298 /* Setup advertise */
1299 if (found_mii_phy(gp))
1300 features = gp->phy_mii.def->features;
1301 else
1302 features = 0;
1304 advertise = features & ADVERTISE_MASK;
1305 if (gp->phy_mii.advertising != 0)
1306 advertise &= gp->phy_mii.advertising;
1308 autoneg = gp->want_autoneg;
1309 speed = gp->phy_mii.speed;
1310 duplex = gp->phy_mii.duplex;
1312 /* Setup link parameters */
1313 if (!ep)
1314 goto start_aneg;
1315 if (ep->autoneg == AUTONEG_ENABLE) {
1316 advertise = ep->advertising;
1317 autoneg = 1;
1318 } else {
1319 autoneg = 0;
1320 speed = ep->speed;
1321 duplex = ep->duplex;
1324 start_aneg:
1325 /* Sanitize settings based on PHY capabilities */
1326 if ((features & SUPPORTED_Autoneg) == 0)
1327 autoneg = 0;
1328 if (speed == SPEED_1000 &&
1329 !(features & (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)))
1330 speed = SPEED_100;
1331 if (speed == SPEED_100 &&
1332 !(features & (SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full)))
1333 speed = SPEED_10;
1334 if (duplex == DUPLEX_FULL &&
1335 !(features & (SUPPORTED_1000baseT_Full |
1336 SUPPORTED_100baseT_Full |
1337 SUPPORTED_10baseT_Full)))
1338 duplex = DUPLEX_HALF;
1339 if (speed == 0)
1340 speed = SPEED_10;
1342 /* If we are asleep, we don't try to actually setup the PHY, we
1343 * just store the settings
1345 if (gp->asleep) {
1346 gp->phy_mii.autoneg = gp->want_autoneg = autoneg;
1347 gp->phy_mii.speed = speed;
1348 gp->phy_mii.duplex = duplex;
1349 return;
1352 /* Configure PHY & start aneg */
1353 gp->want_autoneg = autoneg;
1354 if (autoneg) {
1355 if (found_mii_phy(gp))
1356 gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, advertise);
1357 gp->lstate = link_aneg;
1358 } else {
1359 if (found_mii_phy(gp))
1360 gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, speed, duplex);
1361 gp->lstate = link_force_ok;
1364 non_mii:
1365 gp->timer_ticks = 0;
1366 mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10));
1369 /* A link-up condition has occurred, initialize and enable the
1370 * rest of the chip.
1372 * Must be invoked under gp->lock and gp->tx_lock.
1374 static int gem_set_link_modes(struct gem *gp)
1376 u32 val;
1377 int full_duplex, speed, pause;
1379 full_duplex = 0;
1380 speed = SPEED_10;
1381 pause = 0;
1383 if (found_mii_phy(gp)) {
1384 if (gp->phy_mii.def->ops->read_link(&gp->phy_mii))
1385 return 1;
1386 full_duplex = (gp->phy_mii.duplex == DUPLEX_FULL);
1387 speed = gp->phy_mii.speed;
1388 pause = gp->phy_mii.pause;
1389 } else if (gp->phy_type == phy_serialink ||
1390 gp->phy_type == phy_serdes) {
1391 u32 pcs_lpa = readl(gp->regs + PCS_MIILP);
1393 if ((pcs_lpa & PCS_MIIADV_FD) || gp->phy_type == phy_serdes)
1394 full_duplex = 1;
1395 speed = SPEED_1000;
1398 if (netif_msg_link(gp))
1399 printk(KERN_INFO "%s: Link is up at %d Mbps, %s-duplex.\n",
1400 gp->dev->name, speed, (full_duplex ? "full" : "half"));
1402 if (!gp->running)
1403 return 0;
1405 val = (MAC_TXCFG_EIPG0 | MAC_TXCFG_NGU);
1406 if (full_duplex) {
1407 val |= (MAC_TXCFG_ICS | MAC_TXCFG_ICOLL);
1408 } else {
1409 /* MAC_TXCFG_NBO must be zero. */
1411 writel(val, gp->regs + MAC_TXCFG);
1413 val = (MAC_XIFCFG_OE | MAC_XIFCFG_LLED);
1414 if (!full_duplex &&
1415 (gp->phy_type == phy_mii_mdio0 ||
1416 gp->phy_type == phy_mii_mdio1)) {
1417 val |= MAC_XIFCFG_DISE;
1418 } else if (full_duplex) {
1419 val |= MAC_XIFCFG_FLED;
1422 if (speed == SPEED_1000)
1423 val |= (MAC_XIFCFG_GMII);
1425 writel(val, gp->regs + MAC_XIFCFG);
1427 /* If gigabit and half-duplex, enable carrier extension
1428 * mode. Else, disable it.
1430 if (speed == SPEED_1000 && !full_duplex) {
1431 val = readl(gp->regs + MAC_TXCFG);
1432 writel(val | MAC_TXCFG_TCE, gp->regs + MAC_TXCFG);
1434 val = readl(gp->regs + MAC_RXCFG);
1435 writel(val | MAC_RXCFG_RCE, gp->regs + MAC_RXCFG);
1436 } else {
1437 val = readl(gp->regs + MAC_TXCFG);
1438 writel(val & ~MAC_TXCFG_TCE, gp->regs + MAC_TXCFG);
1440 val = readl(gp->regs + MAC_RXCFG);
1441 writel(val & ~MAC_RXCFG_RCE, gp->regs + MAC_RXCFG);
1444 if (gp->phy_type == phy_serialink ||
1445 gp->phy_type == phy_serdes) {
1446 u32 pcs_lpa = readl(gp->regs + PCS_MIILP);
1448 if (pcs_lpa & (PCS_MIIADV_SP | PCS_MIIADV_AP))
1449 pause = 1;
1452 if (netif_msg_link(gp)) {
1453 if (pause) {
1454 printk(KERN_INFO "%s: Pause is enabled "
1455 "(rxfifo: %d off: %d on: %d)\n",
1456 gp->dev->name,
1457 gp->rx_fifo_sz,
1458 gp->rx_pause_off,
1459 gp->rx_pause_on);
1460 } else {
1461 printk(KERN_INFO "%s: Pause is disabled\n",
1462 gp->dev->name);
1466 if (!full_duplex)
1467 writel(512, gp->regs + MAC_STIME);
1468 else
1469 writel(64, gp->regs + MAC_STIME);
1470 val = readl(gp->regs + MAC_MCCFG);
1471 if (pause)
1472 val |= (MAC_MCCFG_SPE | MAC_MCCFG_RPE);
1473 else
1474 val &= ~(MAC_MCCFG_SPE | MAC_MCCFG_RPE);
1475 writel(val, gp->regs + MAC_MCCFG);
1477 gem_start_dma(gp);
1479 return 0;
1482 /* Must be invoked under gp->lock and gp->tx_lock. */
1483 static int gem_mdio_link_not_up(struct gem *gp)
1485 switch (gp->lstate) {
1486 case link_force_ret:
1487 if (netif_msg_link(gp))
1488 printk(KERN_INFO "%s: Autoneg failed again, keeping"
1489 " forced mode\n", gp->dev->name);
1490 gp->phy_mii.def->ops->setup_forced(&gp->phy_mii,
1491 gp->last_forced_speed, DUPLEX_HALF);
1492 gp->timer_ticks = 5;
1493 gp->lstate = link_force_ok;
1494 return 0;
1495 case link_aneg:
1496 /* We try forced modes after a failed aneg only on PHYs that don't
1497 * have "magic_aneg" bit set, which means they internally do the
1498 * while forced-mode thingy. On these, we just restart aneg
1500 if (gp->phy_mii.def->magic_aneg)
1501 return 1;
1502 if (netif_msg_link(gp))
1503 printk(KERN_INFO "%s: switching to forced 100bt\n",
1504 gp->dev->name);
1505 /* Try forced modes. */
1506 gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_100,
1507 DUPLEX_HALF);
1508 gp->timer_ticks = 5;
1509 gp->lstate = link_force_try;
1510 return 0;
1511 case link_force_try:
1512 /* Downgrade from 100 to 10 Mbps if necessary.
1513 * If already at 10Mbps, warn user about the
1514 * situation every 10 ticks.
1516 if (gp->phy_mii.speed == SPEED_100) {
1517 gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_10,
1518 DUPLEX_HALF);
1519 gp->timer_ticks = 5;
1520 if (netif_msg_link(gp))
1521 printk(KERN_INFO "%s: switching to forced 10bt\n",
1522 gp->dev->name);
1523 return 0;
1524 } else
1525 return 1;
1526 default:
1527 return 0;
1531 static void gem_link_timer(unsigned long data)
1533 struct gem *gp = (struct gem *) data;
1534 int restart_aneg = 0;
1536 if (gp->asleep)
1537 return;
1539 spin_lock_irq(&gp->lock);
1540 spin_lock(&gp->tx_lock);
1541 gem_get_cell(gp);
1543 /* If the reset task is still pending, we just
1544 * reschedule the link timer
1546 if (gp->reset_task_pending)
1547 goto restart;
1549 if (gp->phy_type == phy_serialink ||
1550 gp->phy_type == phy_serdes) {
1551 u32 val = readl(gp->regs + PCS_MIISTAT);
1553 if (!(val & PCS_MIISTAT_LS))
1554 val = readl(gp->regs + PCS_MIISTAT);
1556 if ((val & PCS_MIISTAT_LS) != 0) {
1557 if (gp->lstate == link_up)
1558 goto restart;
1560 gp->lstate = link_up;
1561 netif_carrier_on(gp->dev);
1562 (void)gem_set_link_modes(gp);
1564 goto restart;
1566 if (found_mii_phy(gp) && gp->phy_mii.def->ops->poll_link(&gp->phy_mii)) {
1567 /* Ok, here we got a link. If we had it due to a forced
1568 * fallback, and we were configured for autoneg, we do
1569 * retry a short autoneg pass. If you know your hub is
1570 * broken, use ethtool ;)
1572 if (gp->lstate == link_force_try && gp->want_autoneg) {
1573 gp->lstate = link_force_ret;
1574 gp->last_forced_speed = gp->phy_mii.speed;
1575 gp->timer_ticks = 5;
1576 if (netif_msg_link(gp))
1577 printk(KERN_INFO "%s: Got link after fallback, retrying"
1578 " autoneg once...\n", gp->dev->name);
1579 gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, gp->phy_mii.advertising);
1580 } else if (gp->lstate != link_up) {
1581 gp->lstate = link_up;
1582 netif_carrier_on(gp->dev);
1583 if (gem_set_link_modes(gp))
1584 restart_aneg = 1;
1586 } else {
1587 /* If the link was previously up, we restart the
1588 * whole process
1590 if (gp->lstate == link_up) {
1591 gp->lstate = link_down;
1592 if (netif_msg_link(gp))
1593 printk(KERN_INFO "%s: Link down\n",
1594 gp->dev->name);
1595 netif_carrier_off(gp->dev);
1596 gp->reset_task_pending = 1;
1597 schedule_work(&gp->reset_task);
1598 restart_aneg = 1;
1599 } else if (++gp->timer_ticks > 10) {
1600 if (found_mii_phy(gp))
1601 restart_aneg = gem_mdio_link_not_up(gp);
1602 else
1603 restart_aneg = 1;
1606 if (restart_aneg) {
1607 gem_begin_auto_negotiation(gp, NULL);
1608 goto out_unlock;
1610 restart:
1611 mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10));
1612 out_unlock:
1613 gem_put_cell(gp);
1614 spin_unlock(&gp->tx_lock);
1615 spin_unlock_irq(&gp->lock);
1618 /* Must be invoked under gp->lock and gp->tx_lock. */
1619 static void gem_clean_rings(struct gem *gp)
1621 struct gem_init_block *gb = gp->init_block;
1622 struct sk_buff *skb;
1623 int i;
1624 dma_addr_t dma_addr;
1626 for (i = 0; i < RX_RING_SIZE; i++) {
1627 struct gem_rxd *rxd;
1629 rxd = &gb->rxd[i];
1630 if (gp->rx_skbs[i] != NULL) {
1631 skb = gp->rx_skbs[i];
1632 dma_addr = le64_to_cpu(rxd->buffer);
1633 pci_unmap_page(gp->pdev, dma_addr,
1634 RX_BUF_ALLOC_SIZE(gp),
1635 PCI_DMA_FROMDEVICE);
1636 dev_kfree_skb_any(skb);
1637 gp->rx_skbs[i] = NULL;
1639 rxd->status_word = 0;
1640 wmb();
1641 rxd->buffer = 0;
1644 for (i = 0; i < TX_RING_SIZE; i++) {
1645 if (gp->tx_skbs[i] != NULL) {
1646 struct gem_txd *txd;
1647 int frag;
1649 skb = gp->tx_skbs[i];
1650 gp->tx_skbs[i] = NULL;
1652 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
1653 int ent = i & (TX_RING_SIZE - 1);
1655 txd = &gb->txd[ent];
1656 dma_addr = le64_to_cpu(txd->buffer);
1657 pci_unmap_page(gp->pdev, dma_addr,
1658 le64_to_cpu(txd->control_word) &
1659 TXDCTRL_BUFSZ, PCI_DMA_TODEVICE);
1661 if (frag != skb_shinfo(skb)->nr_frags)
1662 i++;
1664 dev_kfree_skb_any(skb);
1669 /* Must be invoked under gp->lock and gp->tx_lock. */
1670 static void gem_init_rings(struct gem *gp)
1672 struct gem_init_block *gb = gp->init_block;
1673 struct net_device *dev = gp->dev;
1674 int i;
1675 dma_addr_t dma_addr;
1677 gp->rx_new = gp->rx_old = gp->tx_new = gp->tx_old = 0;
1679 gem_clean_rings(gp);
1681 gp->rx_buf_sz = max(dev->mtu + ETH_HLEN + VLAN_HLEN,
1682 (unsigned)VLAN_ETH_FRAME_LEN);
1684 for (i = 0; i < RX_RING_SIZE; i++) {
1685 struct sk_buff *skb;
1686 struct gem_rxd *rxd = &gb->rxd[i];
1688 skb = gem_alloc_skb(RX_BUF_ALLOC_SIZE(gp), GFP_ATOMIC);
1689 if (!skb) {
1690 rxd->buffer = 0;
1691 rxd->status_word = 0;
1692 continue;
1695 gp->rx_skbs[i] = skb;
1696 skb->dev = dev;
1697 skb_put(skb, (gp->rx_buf_sz + RX_OFFSET));
1698 dma_addr = pci_map_page(gp->pdev,
1699 virt_to_page(skb->data),
1700 offset_in_page(skb->data),
1701 RX_BUF_ALLOC_SIZE(gp),
1702 PCI_DMA_FROMDEVICE);
1703 rxd->buffer = cpu_to_le64(dma_addr);
1704 wmb();
1705 rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp));
1706 skb_reserve(skb, RX_OFFSET);
1709 for (i = 0; i < TX_RING_SIZE; i++) {
1710 struct gem_txd *txd = &gb->txd[i];
1712 txd->control_word = 0;
1713 wmb();
1714 txd->buffer = 0;
1716 wmb();
1719 /* Init PHY interface and start link poll state machine */
1720 static void gem_init_phy(struct gem *gp)
1722 u32 mifcfg;
1724 /* Revert MIF CFG setting done on stop_phy */
1725 mifcfg = readl(gp->regs + MIF_CFG);
1726 mifcfg &= ~MIF_CFG_BBMODE;
1727 writel(mifcfg, gp->regs + MIF_CFG);
1729 if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE) {
1730 int i;
1732 /* Those delay sucks, the HW seem to love them though, I'll
1733 * serisouly consider breaking some locks here to be able
1734 * to schedule instead
1736 for (i = 0; i < 3; i++) {
1737 #ifdef CONFIG_PPC_PMAC
1738 pmac_call_feature(PMAC_FTR_GMAC_PHY_RESET, gp->of_node, 0, 0);
1739 msleep(20);
1740 #endif
1741 /* Some PHYs used by apple have problem getting back to us,
1742 * we do an additional reset here
1744 phy_write(gp, MII_BMCR, BMCR_RESET);
1745 msleep(20);
1746 if (phy_read(gp, MII_BMCR) != 0xffff)
1747 break;
1748 if (i == 2)
1749 printk(KERN_WARNING "%s: GMAC PHY not responding !\n",
1750 gp->dev->name);
1754 if (gp->pdev->vendor == PCI_VENDOR_ID_SUN &&
1755 gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) {
1756 u32 val;
1758 /* Init datapath mode register. */
1759 if (gp->phy_type == phy_mii_mdio0 ||
1760 gp->phy_type == phy_mii_mdio1) {
1761 val = PCS_DMODE_MGM;
1762 } else if (gp->phy_type == phy_serialink) {
1763 val = PCS_DMODE_SM | PCS_DMODE_GMOE;
1764 } else {
1765 val = PCS_DMODE_ESM;
1768 writel(val, gp->regs + PCS_DMODE);
1771 if (gp->phy_type == phy_mii_mdio0 ||
1772 gp->phy_type == phy_mii_mdio1) {
1773 // XXX check for errors
1774 mii_phy_probe(&gp->phy_mii, gp->mii_phy_addr);
1776 /* Init PHY */
1777 if (gp->phy_mii.def && gp->phy_mii.def->ops->init)
1778 gp->phy_mii.def->ops->init(&gp->phy_mii);
1779 } else {
1780 gem_pcs_reset(gp);
1781 gem_pcs_reinit_adv(gp);
1784 /* Default aneg parameters */
1785 gp->timer_ticks = 0;
1786 gp->lstate = link_down;
1787 netif_carrier_off(gp->dev);
1789 /* Can I advertise gigabit here ? I'd need BCM PHY docs... */
1790 spin_lock_irq(&gp->lock);
1791 gem_begin_auto_negotiation(gp, NULL);
1792 spin_unlock_irq(&gp->lock);
1795 /* Must be invoked under gp->lock and gp->tx_lock. */
1796 static void gem_init_dma(struct gem *gp)
1798 u64 desc_dma = (u64) gp->gblock_dvma;
1799 u32 val;
1801 val = (TXDMA_CFG_BASE | (0x7ff << 10) | TXDMA_CFG_PMODE);
1802 writel(val, gp->regs + TXDMA_CFG);
1804 writel(desc_dma >> 32, gp->regs + TXDMA_DBHI);
1805 writel(desc_dma & 0xffffffff, gp->regs + TXDMA_DBLOW);
1806 desc_dma += (INIT_BLOCK_TX_RING_SIZE * sizeof(struct gem_txd));
1808 writel(0, gp->regs + TXDMA_KICK);
1810 val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) |
1811 ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128);
1812 writel(val, gp->regs + RXDMA_CFG);
1814 writel(desc_dma >> 32, gp->regs + RXDMA_DBHI);
1815 writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW);
1817 writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);
1819 val = (((gp->rx_pause_off / 64) << 0) & RXDMA_PTHRESH_OFF);
1820 val |= (((gp->rx_pause_on / 64) << 12) & RXDMA_PTHRESH_ON);
1821 writel(val, gp->regs + RXDMA_PTHRESH);
1823 if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN)
1824 writel(((5 & RXDMA_BLANK_IPKTS) |
1825 ((8 << 12) & RXDMA_BLANK_ITIME)),
1826 gp->regs + RXDMA_BLANK);
1827 else
1828 writel(((5 & RXDMA_BLANK_IPKTS) |
1829 ((4 << 12) & RXDMA_BLANK_ITIME)),
1830 gp->regs + RXDMA_BLANK);
1833 /* Must be invoked under gp->lock and gp->tx_lock. */
1834 static u32 gem_setup_multicast(struct gem *gp)
1836 u32 rxcfg = 0;
1837 int i;
1839 if ((gp->dev->flags & IFF_ALLMULTI) ||
1840 (gp->dev->mc_count > 256)) {
1841 for (i=0; i<16; i++)
1842 writel(0xffff, gp->regs + MAC_HASH0 + (i << 2));
1843 rxcfg |= MAC_RXCFG_HFE;
1844 } else if (gp->dev->flags & IFF_PROMISC) {
1845 rxcfg |= MAC_RXCFG_PROM;
1846 } else {
1847 u16 hash_table[16];
1848 u32 crc;
1849 struct dev_mc_list *dmi = gp->dev->mc_list;
1850 int i;
1852 for (i = 0; i < 16; i++)
1853 hash_table[i] = 0;
1855 for (i = 0; i < gp->dev->mc_count; i++) {
1856 char *addrs = dmi->dmi_addr;
1858 dmi = dmi->next;
1860 if (!(*addrs & 1))
1861 continue;
1863 crc = ether_crc_le(6, addrs);
1864 crc >>= 24;
1865 hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
1867 for (i=0; i<16; i++)
1868 writel(hash_table[i], gp->regs + MAC_HASH0 + (i << 2));
1869 rxcfg |= MAC_RXCFG_HFE;
1872 return rxcfg;
1875 /* Must be invoked under gp->lock and gp->tx_lock. */
1876 static void gem_init_mac(struct gem *gp)
1878 unsigned char *e = &gp->dev->dev_addr[0];
1880 writel(0x1bf0, gp->regs + MAC_SNDPAUSE);
1882 writel(0x00, gp->regs + MAC_IPG0);
1883 writel(0x08, gp->regs + MAC_IPG1);
1884 writel(0x04, gp->regs + MAC_IPG2);
1885 writel(0x40, gp->regs + MAC_STIME);
1886 writel(0x40, gp->regs + MAC_MINFSZ);
1888 /* Ethernet payload + header + FCS + optional VLAN tag. */
1889 writel(0x20000000 | (gp->rx_buf_sz + 4), gp->regs + MAC_MAXFSZ);
1891 writel(0x07, gp->regs + MAC_PASIZE);
1892 writel(0x04, gp->regs + MAC_JAMSIZE);
1893 writel(0x10, gp->regs + MAC_ATTLIM);
1894 writel(0x8808, gp->regs + MAC_MCTYPE);
1896 writel((e[5] | (e[4] << 8)) & 0x3ff, gp->regs + MAC_RANDSEED);
1898 writel((e[4] << 8) | e[5], gp->regs + MAC_ADDR0);
1899 writel((e[2] << 8) | e[3], gp->regs + MAC_ADDR1);
1900 writel((e[0] << 8) | e[1], gp->regs + MAC_ADDR2);
1902 writel(0, gp->regs + MAC_ADDR3);
1903 writel(0, gp->regs + MAC_ADDR4);
1904 writel(0, gp->regs + MAC_ADDR5);
1906 writel(0x0001, gp->regs + MAC_ADDR6);
1907 writel(0xc200, gp->regs + MAC_ADDR7);
1908 writel(0x0180, gp->regs + MAC_ADDR8);
1910 writel(0, gp->regs + MAC_AFILT0);
1911 writel(0, gp->regs + MAC_AFILT1);
1912 writel(0, gp->regs + MAC_AFILT2);
1913 writel(0, gp->regs + MAC_AF21MSK);
1914 writel(0, gp->regs + MAC_AF0MSK);
1916 gp->mac_rx_cfg = gem_setup_multicast(gp);
1917 #ifdef STRIP_FCS
1918 gp->mac_rx_cfg |= MAC_RXCFG_SFCS;
1919 #endif
1920 writel(0, gp->regs + MAC_NCOLL);
1921 writel(0, gp->regs + MAC_FASUCC);
1922 writel(0, gp->regs + MAC_ECOLL);
1923 writel(0, gp->regs + MAC_LCOLL);
1924 writel(0, gp->regs + MAC_DTIMER);
1925 writel(0, gp->regs + MAC_PATMPS);
1926 writel(0, gp->regs + MAC_RFCTR);
1927 writel(0, gp->regs + MAC_LERR);
1928 writel(0, gp->regs + MAC_AERR);
1929 writel(0, gp->regs + MAC_FCSERR);
1930 writel(0, gp->regs + MAC_RXCVERR);
1932 /* Clear RX/TX/MAC/XIF config, we will set these up and enable
1933 * them once a link is established.
1935 writel(0, gp->regs + MAC_TXCFG);
1936 writel(gp->mac_rx_cfg, gp->regs + MAC_RXCFG);
1937 writel(0, gp->regs + MAC_MCCFG);
1938 writel(0, gp->regs + MAC_XIFCFG);
1940 /* Setup MAC interrupts. We want to get all of the interesting
1941 * counter expiration events, but we do not want to hear about
1942 * normal rx/tx as the DMA engine tells us that.
1944 writel(MAC_TXSTAT_XMIT, gp->regs + MAC_TXMASK);
1945 writel(MAC_RXSTAT_RCV, gp->regs + MAC_RXMASK);
1947 /* Don't enable even the PAUSE interrupts for now, we
1948 * make no use of those events other than to record them.
1950 writel(0xffffffff, gp->regs + MAC_MCMASK);
1952 /* Don't enable GEM's WOL in normal operations
1954 if (gp->has_wol)
1955 writel(0, gp->regs + WOL_WAKECSR);
1958 /* Must be invoked under gp->lock and gp->tx_lock. */
1959 static void gem_init_pause_thresholds(struct gem *gp)
1961 u32 cfg;
1963 /* Calculate pause thresholds. Setting the OFF threshold to the
1964 * full RX fifo size effectively disables PAUSE generation which
1965 * is what we do for 10/100 only GEMs which have FIFOs too small
1966 * to make real gains from PAUSE.
1968 if (gp->rx_fifo_sz <= (2 * 1024)) {
1969 gp->rx_pause_off = gp->rx_pause_on = gp->rx_fifo_sz;
1970 } else {
1971 int max_frame = (gp->rx_buf_sz + 4 + 64) & ~63;
1972 int off = (gp->rx_fifo_sz - (max_frame * 2));
1973 int on = off - max_frame;
1975 gp->rx_pause_off = off;
1976 gp->rx_pause_on = on;
1980 /* Configure the chip "burst" DMA mode & enable some
1981 * HW bug fixes on Apple version
1983 cfg = 0;
1984 if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE)
1985 cfg |= GREG_CFG_RONPAULBIT | GREG_CFG_ENBUG2FIX;
1986 #if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA)
1987 cfg |= GREG_CFG_IBURST;
1988 #endif
1989 cfg |= ((31 << 1) & GREG_CFG_TXDMALIM);
1990 cfg |= ((31 << 6) & GREG_CFG_RXDMALIM);
1991 writel(cfg, gp->regs + GREG_CFG);
1993 /* If Infinite Burst didn't stick, then use different
1994 * thresholds (and Apple bug fixes don't exist)
1996 if (!(readl(gp->regs + GREG_CFG) & GREG_CFG_IBURST)) {
1997 cfg = ((2 << 1) & GREG_CFG_TXDMALIM);
1998 cfg |= ((8 << 6) & GREG_CFG_RXDMALIM);
1999 writel(cfg, gp->regs + GREG_CFG);
2003 static int gem_check_invariants(struct gem *gp)
2005 struct pci_dev *pdev = gp->pdev;
2006 u32 mif_cfg;
2008 /* On Apple's sungem, we can't rely on registers as the chip
2009 * was been powered down by the firmware. The PHY is looked
2010 * up later on.
2012 if (pdev->vendor == PCI_VENDOR_ID_APPLE) {
2013 gp->phy_type = phy_mii_mdio0;
2014 gp->tx_fifo_sz = readl(gp->regs + TXDMA_FSZ) * 64;
2015 gp->rx_fifo_sz = readl(gp->regs + RXDMA_FSZ) * 64;
2016 gp->swrst_base = 0;
2018 mif_cfg = readl(gp->regs + MIF_CFG);
2019 mif_cfg &= ~(MIF_CFG_PSELECT|MIF_CFG_POLL|MIF_CFG_BBMODE|MIF_CFG_MDI1);
2020 mif_cfg |= MIF_CFG_MDI0;
2021 writel(mif_cfg, gp->regs + MIF_CFG);
2022 writel(PCS_DMODE_MGM, gp->regs + PCS_DMODE);
2023 writel(MAC_XIFCFG_OE, gp->regs + MAC_XIFCFG);
2025 /* We hard-code the PHY address so we can properly bring it out of
2026 * reset later on, we can't really probe it at this point, though
2027 * that isn't an issue.
2029 if (gp->pdev->device == PCI_DEVICE_ID_APPLE_K2_GMAC)
2030 gp->mii_phy_addr = 1;
2031 else
2032 gp->mii_phy_addr = 0;
2034 return 0;
2037 mif_cfg = readl(gp->regs + MIF_CFG);
2039 if (pdev->vendor == PCI_VENDOR_ID_SUN &&
2040 pdev->device == PCI_DEVICE_ID_SUN_RIO_GEM) {
2041 /* One of the MII PHYs _must_ be present
2042 * as this chip has no gigabit PHY.
2044 if ((mif_cfg & (MIF_CFG_MDI0 | MIF_CFG_MDI1)) == 0) {
2045 printk(KERN_ERR PFX "RIO GEM lacks MII phy, mif_cfg[%08x]\n",
2046 mif_cfg);
2047 return -1;
2051 /* Determine initial PHY interface type guess. MDIO1 is the
2052 * external PHY and thus takes precedence over MDIO0.
2055 if (mif_cfg & MIF_CFG_MDI1) {
2056 gp->phy_type = phy_mii_mdio1;
2057 mif_cfg |= MIF_CFG_PSELECT;
2058 writel(mif_cfg, gp->regs + MIF_CFG);
2059 } else if (mif_cfg & MIF_CFG_MDI0) {
2060 gp->phy_type = phy_mii_mdio0;
2061 mif_cfg &= ~MIF_CFG_PSELECT;
2062 writel(mif_cfg, gp->regs + MIF_CFG);
2063 } else {
2064 #ifdef CONFIG_SPARC
2065 const char *p;
2067 p = of_get_property(gp->of_node, "shared-pins", NULL);
2068 if (p && !strcmp(p, "serdes"))
2069 gp->phy_type = phy_serdes;
2070 else
2071 #endif
2072 gp->phy_type = phy_serialink;
2074 if (gp->phy_type == phy_mii_mdio1 ||
2075 gp->phy_type == phy_mii_mdio0) {
2076 int i;
2078 for (i = 0; i < 32; i++) {
2079 gp->mii_phy_addr = i;
2080 if (phy_read(gp, MII_BMCR) != 0xffff)
2081 break;
2083 if (i == 32) {
2084 if (pdev->device != PCI_DEVICE_ID_SUN_GEM) {
2085 printk(KERN_ERR PFX "RIO MII phy will not respond.\n");
2086 return -1;
2088 gp->phy_type = phy_serdes;
2092 /* Fetch the FIFO configurations now too. */
2093 gp->tx_fifo_sz = readl(gp->regs + TXDMA_FSZ) * 64;
2094 gp->rx_fifo_sz = readl(gp->regs + RXDMA_FSZ) * 64;
2096 if (pdev->vendor == PCI_VENDOR_ID_SUN) {
2097 if (pdev->device == PCI_DEVICE_ID_SUN_GEM) {
2098 if (gp->tx_fifo_sz != (9 * 1024) ||
2099 gp->rx_fifo_sz != (20 * 1024)) {
2100 printk(KERN_ERR PFX "GEM has bogus fifo sizes tx(%d) rx(%d)\n",
2101 gp->tx_fifo_sz, gp->rx_fifo_sz);
2102 return -1;
2104 gp->swrst_base = 0;
2105 } else {
2106 if (gp->tx_fifo_sz != (2 * 1024) ||
2107 gp->rx_fifo_sz != (2 * 1024)) {
2108 printk(KERN_ERR PFX "RIO GEM has bogus fifo sizes tx(%d) rx(%d)\n",
2109 gp->tx_fifo_sz, gp->rx_fifo_sz);
2110 return -1;
2112 gp->swrst_base = (64 / 4) << GREG_SWRST_CACHE_SHIFT;
2116 return 0;
2119 /* Must be invoked under gp->lock and gp->tx_lock. */
2120 static void gem_reinit_chip(struct gem *gp)
2122 /* Reset the chip */
2123 gem_reset(gp);
2125 /* Make sure ints are disabled */
2126 gem_disable_ints(gp);
2128 /* Allocate & setup ring buffers */
2129 gem_init_rings(gp);
2131 /* Configure pause thresholds */
2132 gem_init_pause_thresholds(gp);
2134 /* Init DMA & MAC engines */
2135 gem_init_dma(gp);
2136 gem_init_mac(gp);
2140 /* Must be invoked with no lock held. */
2141 static void gem_stop_phy(struct gem *gp, int wol)
2143 u32 mifcfg;
2144 unsigned long flags;
2146 /* Let the chip settle down a bit, it seems that helps
2147 * for sleep mode on some models
2149 msleep(10);
2151 /* Make sure we aren't polling PHY status change. We
2152 * don't currently use that feature though
2154 mifcfg = readl(gp->regs + MIF_CFG);
2155 mifcfg &= ~MIF_CFG_POLL;
2156 writel(mifcfg, gp->regs + MIF_CFG);
2158 if (wol && gp->has_wol) {
2159 unsigned char *e = &gp->dev->dev_addr[0];
2160 u32 csr;
2162 /* Setup wake-on-lan for MAGIC packet */
2163 writel(MAC_RXCFG_HFE | MAC_RXCFG_SFCS | MAC_RXCFG_ENAB,
2164 gp->regs + MAC_RXCFG);
2165 writel((e[4] << 8) | e[5], gp->regs + WOL_MATCH0);
2166 writel((e[2] << 8) | e[3], gp->regs + WOL_MATCH1);
2167 writel((e[0] << 8) | e[1], gp->regs + WOL_MATCH2);
2169 writel(WOL_MCOUNT_N | WOL_MCOUNT_M, gp->regs + WOL_MCOUNT);
2170 csr = WOL_WAKECSR_ENABLE;
2171 if ((readl(gp->regs + MAC_XIFCFG) & MAC_XIFCFG_GMII) == 0)
2172 csr |= WOL_WAKECSR_MII;
2173 writel(csr, gp->regs + WOL_WAKECSR);
2174 } else {
2175 writel(0, gp->regs + MAC_RXCFG);
2176 (void)readl(gp->regs + MAC_RXCFG);
2177 /* Machine sleep will die in strange ways if we
2178 * dont wait a bit here, looks like the chip takes
2179 * some time to really shut down
2181 msleep(10);
2184 writel(0, gp->regs + MAC_TXCFG);
2185 writel(0, gp->regs + MAC_XIFCFG);
2186 writel(0, gp->regs + TXDMA_CFG);
2187 writel(0, gp->regs + RXDMA_CFG);
2189 if (!wol) {
2190 spin_lock_irqsave(&gp->lock, flags);
2191 spin_lock(&gp->tx_lock);
2192 gem_reset(gp);
2193 writel(MAC_TXRST_CMD, gp->regs + MAC_TXRST);
2194 writel(MAC_RXRST_CMD, gp->regs + MAC_RXRST);
2195 spin_unlock(&gp->tx_lock);
2196 spin_unlock_irqrestore(&gp->lock, flags);
2198 /* No need to take the lock here */
2200 if (found_mii_phy(gp) && gp->phy_mii.def->ops->suspend)
2201 gp->phy_mii.def->ops->suspend(&gp->phy_mii);
2203 /* According to Apple, we must set the MDIO pins to this begnign
2204 * state or we may 1) eat more current, 2) damage some PHYs
2206 writel(mifcfg | MIF_CFG_BBMODE, gp->regs + MIF_CFG);
2207 writel(0, gp->regs + MIF_BBCLK);
2208 writel(0, gp->regs + MIF_BBDATA);
2209 writel(0, gp->regs + MIF_BBOENAB);
2210 writel(MAC_XIFCFG_GMII | MAC_XIFCFG_LBCK, gp->regs + MAC_XIFCFG);
2211 (void) readl(gp->regs + MAC_XIFCFG);
2216 static int gem_do_start(struct net_device *dev)
2218 struct gem *gp = netdev_priv(dev);
2219 unsigned long flags;
2221 spin_lock_irqsave(&gp->lock, flags);
2222 spin_lock(&gp->tx_lock);
2224 /* Enable the cell */
2225 gem_get_cell(gp);
2227 /* Init & setup chip hardware */
2228 gem_reinit_chip(gp);
2230 gp->running = 1;
2232 napi_enable(&gp->napi);
2234 if (gp->lstate == link_up) {
2235 netif_carrier_on(gp->dev);
2236 gem_set_link_modes(gp);
2239 netif_wake_queue(gp->dev);
2241 spin_unlock(&gp->tx_lock);
2242 spin_unlock_irqrestore(&gp->lock, flags);
2244 if (request_irq(gp->pdev->irq, gem_interrupt,
2245 IRQF_SHARED, dev->name, (void *)dev)) {
2246 printk(KERN_ERR "%s: failed to request irq !\n", gp->dev->name);
2248 spin_lock_irqsave(&gp->lock, flags);
2249 spin_lock(&gp->tx_lock);
2251 napi_disable(&gp->napi);
2253 gp->running = 0;
2254 gem_reset(gp);
2255 gem_clean_rings(gp);
2256 gem_put_cell(gp);
2258 spin_unlock(&gp->tx_lock);
2259 spin_unlock_irqrestore(&gp->lock, flags);
2261 return -EAGAIN;
2264 return 0;
2267 static void gem_do_stop(struct net_device *dev, int wol)
2269 struct gem *gp = netdev_priv(dev);
2270 unsigned long flags;
2272 spin_lock_irqsave(&gp->lock, flags);
2273 spin_lock(&gp->tx_lock);
2275 gp->running = 0;
2277 /* Stop netif queue */
2278 netif_stop_queue(dev);
2280 /* Make sure ints are disabled */
2281 gem_disable_ints(gp);
2283 /* We can drop the lock now */
2284 spin_unlock(&gp->tx_lock);
2285 spin_unlock_irqrestore(&gp->lock, flags);
2287 /* If we are going to sleep with WOL */
2288 gem_stop_dma(gp);
2289 msleep(10);
2290 if (!wol)
2291 gem_reset(gp);
2292 msleep(10);
2294 /* Get rid of rings */
2295 gem_clean_rings(gp);
2297 /* No irq needed anymore */
2298 free_irq(gp->pdev->irq, (void *) dev);
2300 /* Cell not needed neither if no WOL */
2301 if (!wol) {
2302 spin_lock_irqsave(&gp->lock, flags);
2303 gem_put_cell(gp);
2304 spin_unlock_irqrestore(&gp->lock, flags);
2308 static void gem_reset_task(struct work_struct *work)
2310 struct gem *gp = container_of(work, struct gem, reset_task);
2312 mutex_lock(&gp->pm_mutex);
2314 if (gp->opened)
2315 napi_disable(&gp->napi);
2317 spin_lock_irq(&gp->lock);
2318 spin_lock(&gp->tx_lock);
2320 if (gp->running) {
2321 netif_stop_queue(gp->dev);
2323 /* Reset the chip & rings */
2324 gem_reinit_chip(gp);
2325 if (gp->lstate == link_up)
2326 gem_set_link_modes(gp);
2327 netif_wake_queue(gp->dev);
2330 gp->reset_task_pending = 0;
2332 spin_unlock(&gp->tx_lock);
2333 spin_unlock_irq(&gp->lock);
2335 if (gp->opened)
2336 napi_enable(&gp->napi);
2338 mutex_unlock(&gp->pm_mutex);
2342 static int gem_open(struct net_device *dev)
2344 struct gem *gp = netdev_priv(dev);
2345 int rc = 0;
2347 mutex_lock(&gp->pm_mutex);
2349 /* We need the cell enabled */
2350 if (!gp->asleep)
2351 rc = gem_do_start(dev);
2352 gp->opened = (rc == 0);
2354 mutex_unlock(&gp->pm_mutex);
2356 return rc;
2359 static int gem_close(struct net_device *dev)
2361 struct gem *gp = netdev_priv(dev);
2363 mutex_lock(&gp->pm_mutex);
2365 napi_disable(&gp->napi);
2367 gp->opened = 0;
2368 if (!gp->asleep)
2369 gem_do_stop(dev, 0);
2371 mutex_unlock(&gp->pm_mutex);
2373 return 0;
2376 #ifdef CONFIG_PM
2377 static int gem_suspend(struct pci_dev *pdev, pm_message_t state)
2379 struct net_device *dev = pci_get_drvdata(pdev);
2380 struct gem *gp = netdev_priv(dev);
2381 unsigned long flags;
2383 mutex_lock(&gp->pm_mutex);
2385 printk(KERN_INFO "%s: suspending, WakeOnLan %s\n",
2386 dev->name,
2387 (gp->wake_on_lan && gp->opened) ? "enabled" : "disabled");
2389 /* Keep the cell enabled during the entire operation */
2390 spin_lock_irqsave(&gp->lock, flags);
2391 spin_lock(&gp->tx_lock);
2392 gem_get_cell(gp);
2393 spin_unlock(&gp->tx_lock);
2394 spin_unlock_irqrestore(&gp->lock, flags);
2396 /* If the driver is opened, we stop the MAC */
2397 if (gp->opened) {
2398 napi_disable(&gp->napi);
2400 /* Stop traffic, mark us closed */
2401 netif_device_detach(dev);
2403 /* Switch off MAC, remember WOL setting */
2404 gp->asleep_wol = gp->wake_on_lan;
2405 gem_do_stop(dev, gp->asleep_wol);
2406 } else
2407 gp->asleep_wol = 0;
2409 /* Mark us asleep */
2410 gp->asleep = 1;
2411 wmb();
2413 /* Stop the link timer */
2414 del_timer_sync(&gp->link_timer);
2416 /* Now we release the mutex to not block the reset task who
2417 * can take it too. We are marked asleep, so there will be no
2418 * conflict here
2420 mutex_unlock(&gp->pm_mutex);
2422 /* Wait for a pending reset task to complete */
2423 while (gp->reset_task_pending)
2424 yield();
2425 flush_scheduled_work();
2427 /* Shut the PHY down eventually and setup WOL */
2428 gem_stop_phy(gp, gp->asleep_wol);
2430 /* Make sure bus master is disabled */
2431 pci_disable_device(gp->pdev);
2433 /* Release the cell, no need to take a lock at this point since
2434 * nothing else can happen now
2436 gem_put_cell(gp);
2438 return 0;
2441 static int gem_resume(struct pci_dev *pdev)
2443 struct net_device *dev = pci_get_drvdata(pdev);
2444 struct gem *gp = netdev_priv(dev);
2445 unsigned long flags;
2447 printk(KERN_INFO "%s: resuming\n", dev->name);
2449 mutex_lock(&gp->pm_mutex);
2451 /* Keep the cell enabled during the entire operation, no need to
2452 * take a lock here tho since nothing else can happen while we are
2453 * marked asleep
2455 gem_get_cell(gp);
2457 /* Make sure PCI access and bus master are enabled */
2458 if (pci_enable_device(gp->pdev)) {
2459 printk(KERN_ERR "%s: Can't re-enable chip !\n",
2460 dev->name);
2461 /* Put cell and forget it for now, it will be considered as
2462 * still asleep, a new sleep cycle may bring it back
2464 gem_put_cell(gp);
2465 mutex_unlock(&gp->pm_mutex);
2466 return 0;
2468 pci_set_master(gp->pdev);
2470 /* Reset everything */
2471 gem_reset(gp);
2473 /* Mark us woken up */
2474 gp->asleep = 0;
2475 wmb();
2477 /* Bring the PHY back. Again, lock is useless at this point as
2478 * nothing can be happening until we restart the whole thing
2480 gem_init_phy(gp);
2482 /* If we were opened, bring everything back */
2483 if (gp->opened) {
2484 /* Restart MAC */
2485 gem_do_start(dev);
2487 /* Re-attach net device */
2488 netif_device_attach(dev);
2491 spin_lock_irqsave(&gp->lock, flags);
2492 spin_lock(&gp->tx_lock);
2494 /* If we had WOL enabled, the cell clock was never turned off during
2495 * sleep, so we end up beeing unbalanced. Fix that here
2497 if (gp->asleep_wol)
2498 gem_put_cell(gp);
2500 /* This function doesn't need to hold the cell, it will be held if the
2501 * driver is open by gem_do_start().
2503 gem_put_cell(gp);
2505 spin_unlock(&gp->tx_lock);
2506 spin_unlock_irqrestore(&gp->lock, flags);
2508 mutex_unlock(&gp->pm_mutex);
2510 return 0;
2512 #endif /* CONFIG_PM */
2514 static struct net_device_stats *gem_get_stats(struct net_device *dev)
2516 struct gem *gp = netdev_priv(dev);
2517 struct net_device_stats *stats = &gp->net_stats;
2519 spin_lock_irq(&gp->lock);
2520 spin_lock(&gp->tx_lock);
2522 /* I have seen this being called while the PM was in progress,
2523 * so we shield against this
2525 if (gp->running) {
2526 stats->rx_crc_errors += readl(gp->regs + MAC_FCSERR);
2527 writel(0, gp->regs + MAC_FCSERR);
2529 stats->rx_frame_errors += readl(gp->regs + MAC_AERR);
2530 writel(0, gp->regs + MAC_AERR);
2532 stats->rx_length_errors += readl(gp->regs + MAC_LERR);
2533 writel(0, gp->regs + MAC_LERR);
2535 stats->tx_aborted_errors += readl(gp->regs + MAC_ECOLL);
2536 stats->collisions +=
2537 (readl(gp->regs + MAC_ECOLL) +
2538 readl(gp->regs + MAC_LCOLL));
2539 writel(0, gp->regs + MAC_ECOLL);
2540 writel(0, gp->regs + MAC_LCOLL);
2543 spin_unlock(&gp->tx_lock);
2544 spin_unlock_irq(&gp->lock);
2546 return &gp->net_stats;
2549 static int gem_set_mac_address(struct net_device *dev, void *addr)
2551 struct sockaddr *macaddr = (struct sockaddr *) addr;
2552 struct gem *gp = netdev_priv(dev);
2553 unsigned char *e = &dev->dev_addr[0];
2555 if (!is_valid_ether_addr(macaddr->sa_data))
2556 return -EADDRNOTAVAIL;
2558 if (!netif_running(dev) || !netif_device_present(dev)) {
2559 /* We'll just catch it later when the
2560 * device is up'd or resumed.
2562 memcpy(dev->dev_addr, macaddr->sa_data, dev->addr_len);
2563 return 0;
2566 mutex_lock(&gp->pm_mutex);
2567 memcpy(dev->dev_addr, macaddr->sa_data, dev->addr_len);
2568 if (gp->running) {
2569 writel((e[4] << 8) | e[5], gp->regs + MAC_ADDR0);
2570 writel((e[2] << 8) | e[3], gp->regs + MAC_ADDR1);
2571 writel((e[0] << 8) | e[1], gp->regs + MAC_ADDR2);
2573 mutex_unlock(&gp->pm_mutex);
2575 return 0;
2578 static void gem_set_multicast(struct net_device *dev)
2580 struct gem *gp = netdev_priv(dev);
2581 u32 rxcfg, rxcfg_new;
2582 int limit = 10000;
2585 spin_lock_irq(&gp->lock);
2586 spin_lock(&gp->tx_lock);
2588 if (!gp->running)
2589 goto bail;
2591 netif_stop_queue(dev);
2593 rxcfg = readl(gp->regs + MAC_RXCFG);
2594 rxcfg_new = gem_setup_multicast(gp);
2595 #ifdef STRIP_FCS
2596 rxcfg_new |= MAC_RXCFG_SFCS;
2597 #endif
2598 gp->mac_rx_cfg = rxcfg_new;
2600 writel(rxcfg & ~MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG);
2601 while (readl(gp->regs + MAC_RXCFG) & MAC_RXCFG_ENAB) {
2602 if (!limit--)
2603 break;
2604 udelay(10);
2607 rxcfg &= ~(MAC_RXCFG_PROM | MAC_RXCFG_HFE);
2608 rxcfg |= rxcfg_new;
2610 writel(rxcfg, gp->regs + MAC_RXCFG);
2612 netif_wake_queue(dev);
2614 bail:
2615 spin_unlock(&gp->tx_lock);
2616 spin_unlock_irq(&gp->lock);
2619 /* Jumbo-grams don't seem to work :-( */
2620 #define GEM_MIN_MTU 68
2621 #if 1
2622 #define GEM_MAX_MTU 1500
2623 #else
2624 #define GEM_MAX_MTU 9000
2625 #endif
2627 static int gem_change_mtu(struct net_device *dev, int new_mtu)
2629 struct gem *gp = netdev_priv(dev);
2631 if (new_mtu < GEM_MIN_MTU || new_mtu > GEM_MAX_MTU)
2632 return -EINVAL;
2634 if (!netif_running(dev) || !netif_device_present(dev)) {
2635 /* We'll just catch it later when the
2636 * device is up'd or resumed.
2638 dev->mtu = new_mtu;
2639 return 0;
2642 mutex_lock(&gp->pm_mutex);
2643 spin_lock_irq(&gp->lock);
2644 spin_lock(&gp->tx_lock);
2645 dev->mtu = new_mtu;
2646 if (gp->running) {
2647 gem_reinit_chip(gp);
2648 if (gp->lstate == link_up)
2649 gem_set_link_modes(gp);
2651 spin_unlock(&gp->tx_lock);
2652 spin_unlock_irq(&gp->lock);
2653 mutex_unlock(&gp->pm_mutex);
2655 return 0;
2658 static void gem_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2660 struct gem *gp = netdev_priv(dev);
2662 strcpy(info->driver, DRV_NAME);
2663 strcpy(info->version, DRV_VERSION);
2664 strcpy(info->bus_info, pci_name(gp->pdev));
2667 static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2669 struct gem *gp = netdev_priv(dev);
2671 if (gp->phy_type == phy_mii_mdio0 ||
2672 gp->phy_type == phy_mii_mdio1) {
2673 if (gp->phy_mii.def)
2674 cmd->supported = gp->phy_mii.def->features;
2675 else
2676 cmd->supported = (SUPPORTED_10baseT_Half |
2677 SUPPORTED_10baseT_Full);
2679 /* XXX hardcoded stuff for now */
2680 cmd->port = PORT_MII;
2681 cmd->transceiver = XCVR_EXTERNAL;
2682 cmd->phy_address = 0; /* XXX fixed PHYAD */
2684 /* Return current PHY settings */
2685 spin_lock_irq(&gp->lock);
2686 cmd->autoneg = gp->want_autoneg;
2687 cmd->speed = gp->phy_mii.speed;
2688 cmd->duplex = gp->phy_mii.duplex;
2689 cmd->advertising = gp->phy_mii.advertising;
2691 /* If we started with a forced mode, we don't have a default
2692 * advertise set, we need to return something sensible so
2693 * userland can re-enable autoneg properly.
2695 if (cmd->advertising == 0)
2696 cmd->advertising = cmd->supported;
2697 spin_unlock_irq(&gp->lock);
2698 } else { // XXX PCS ?
2699 cmd->supported =
2700 (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
2701 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
2702 SUPPORTED_Autoneg);
2703 cmd->advertising = cmd->supported;
2704 cmd->speed = 0;
2705 cmd->duplex = cmd->port = cmd->phy_address =
2706 cmd->transceiver = cmd->autoneg = 0;
2708 /* serdes means usually a Fibre connector, with most fixed */
2709 if (gp->phy_type == phy_serdes) {
2710 cmd->port = PORT_FIBRE;
2711 cmd->supported = (SUPPORTED_1000baseT_Half |
2712 SUPPORTED_1000baseT_Full |
2713 SUPPORTED_FIBRE | SUPPORTED_Autoneg |
2714 SUPPORTED_Pause | SUPPORTED_Asym_Pause);
2715 cmd->advertising = cmd->supported;
2716 cmd->transceiver = XCVR_INTERNAL;
2717 if (gp->lstate == link_up)
2718 cmd->speed = SPEED_1000;
2719 cmd->duplex = DUPLEX_FULL;
2720 cmd->autoneg = 1;
2723 cmd->maxtxpkt = cmd->maxrxpkt = 0;
2725 return 0;
2728 static int gem_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2730 struct gem *gp = netdev_priv(dev);
2732 /* Verify the settings we care about. */
2733 if (cmd->autoneg != AUTONEG_ENABLE &&
2734 cmd->autoneg != AUTONEG_DISABLE)
2735 return -EINVAL;
2737 if (cmd->autoneg == AUTONEG_ENABLE &&
2738 cmd->advertising == 0)
2739 return -EINVAL;
2741 if (cmd->autoneg == AUTONEG_DISABLE &&
2742 ((cmd->speed != SPEED_1000 &&
2743 cmd->speed != SPEED_100 &&
2744 cmd->speed != SPEED_10) ||
2745 (cmd->duplex != DUPLEX_HALF &&
2746 cmd->duplex != DUPLEX_FULL)))
2747 return -EINVAL;
2749 /* Apply settings and restart link process. */
2750 spin_lock_irq(&gp->lock);
2751 gem_get_cell(gp);
2752 gem_begin_auto_negotiation(gp, cmd);
2753 gem_put_cell(gp);
2754 spin_unlock_irq(&gp->lock);
2756 return 0;
2759 static int gem_nway_reset(struct net_device *dev)
2761 struct gem *gp = netdev_priv(dev);
2763 if (!gp->want_autoneg)
2764 return -EINVAL;
2766 /* Restart link process. */
2767 spin_lock_irq(&gp->lock);
2768 gem_get_cell(gp);
2769 gem_begin_auto_negotiation(gp, NULL);
2770 gem_put_cell(gp);
2771 spin_unlock_irq(&gp->lock);
2773 return 0;
2776 static u32 gem_get_msglevel(struct net_device *dev)
2778 struct gem *gp = netdev_priv(dev);
2779 return gp->msg_enable;
2782 static void gem_set_msglevel(struct net_device *dev, u32 value)
2784 struct gem *gp = netdev_priv(dev);
2785 gp->msg_enable = value;
2789 /* Add more when I understand how to program the chip */
2790 /* like WAKE_UCAST | WAKE_MCAST | WAKE_BCAST */
2792 #define WOL_SUPPORTED_MASK (WAKE_MAGIC)
2794 static void gem_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2796 struct gem *gp = netdev_priv(dev);
2798 /* Add more when I understand how to program the chip */
2799 if (gp->has_wol) {
2800 wol->supported = WOL_SUPPORTED_MASK;
2801 wol->wolopts = gp->wake_on_lan;
2802 } else {
2803 wol->supported = 0;
2804 wol->wolopts = 0;
2808 static int gem_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2810 struct gem *gp = netdev_priv(dev);
2812 if (!gp->has_wol)
2813 return -EOPNOTSUPP;
2814 gp->wake_on_lan = wol->wolopts & WOL_SUPPORTED_MASK;
2815 return 0;
2818 static const struct ethtool_ops gem_ethtool_ops = {
2819 .get_drvinfo = gem_get_drvinfo,
2820 .get_link = ethtool_op_get_link,
2821 .get_settings = gem_get_settings,
2822 .set_settings = gem_set_settings,
2823 .nway_reset = gem_nway_reset,
2824 .get_msglevel = gem_get_msglevel,
2825 .set_msglevel = gem_set_msglevel,
2826 .get_wol = gem_get_wol,
2827 .set_wol = gem_set_wol,
2830 static int gem_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2832 struct gem *gp = netdev_priv(dev);
2833 struct mii_ioctl_data *data = if_mii(ifr);
2834 int rc = -EOPNOTSUPP;
2835 unsigned long flags;
2837 /* Hold the PM mutex while doing ioctl's or we may collide
2838 * with power management.
2840 mutex_lock(&gp->pm_mutex);
2842 spin_lock_irqsave(&gp->lock, flags);
2843 gem_get_cell(gp);
2844 spin_unlock_irqrestore(&gp->lock, flags);
2846 switch (cmd) {
2847 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
2848 data->phy_id = gp->mii_phy_addr;
2849 /* Fallthrough... */
2851 case SIOCGMIIREG: /* Read MII PHY register. */
2852 if (!gp->running)
2853 rc = -EAGAIN;
2854 else {
2855 data->val_out = __phy_read(gp, data->phy_id & 0x1f,
2856 data->reg_num & 0x1f);
2857 rc = 0;
2859 break;
2861 case SIOCSMIIREG: /* Write MII PHY register. */
2862 if (!gp->running)
2863 rc = -EAGAIN;
2864 else {
2865 __phy_write(gp, data->phy_id & 0x1f, data->reg_num & 0x1f,
2866 data->val_in);
2867 rc = 0;
2869 break;
2872 spin_lock_irqsave(&gp->lock, flags);
2873 gem_put_cell(gp);
2874 spin_unlock_irqrestore(&gp->lock, flags);
2876 mutex_unlock(&gp->pm_mutex);
2878 return rc;
2881 #if (!defined(CONFIG_SPARC) && !defined(CONFIG_PPC_PMAC))
2882 /* Fetch MAC address from vital product data of PCI ROM. */
2883 static int find_eth_addr_in_vpd(void __iomem *rom_base, int len, unsigned char *dev_addr)
2885 int this_offset;
2887 for (this_offset = 0x20; this_offset < len; this_offset++) {
2888 void __iomem *p = rom_base + this_offset;
2889 int i;
2891 if (readb(p + 0) != 0x90 ||
2892 readb(p + 1) != 0x00 ||
2893 readb(p + 2) != 0x09 ||
2894 readb(p + 3) != 0x4e ||
2895 readb(p + 4) != 0x41 ||
2896 readb(p + 5) != 0x06)
2897 continue;
2899 this_offset += 6;
2900 p += 6;
2902 for (i = 0; i < 6; i++)
2903 dev_addr[i] = readb(p + i);
2904 return 1;
2906 return 0;
2909 static void get_gem_mac_nonobp(struct pci_dev *pdev, unsigned char *dev_addr)
2911 size_t size;
2912 void __iomem *p = pci_map_rom(pdev, &size);
2914 if (p) {
2915 int found;
2917 found = readb(p) == 0x55 &&
2918 readb(p + 1) == 0xaa &&
2919 find_eth_addr_in_vpd(p, (64 * 1024), dev_addr);
2920 pci_unmap_rom(pdev, p);
2921 if (found)
2922 return;
2925 /* Sun MAC prefix then 3 random bytes. */
2926 dev_addr[0] = 0x08;
2927 dev_addr[1] = 0x00;
2928 dev_addr[2] = 0x20;
2929 get_random_bytes(dev_addr + 3, 3);
2930 return;
2932 #endif /* not Sparc and not PPC */
2934 static int __devinit gem_get_device_address(struct gem *gp)
2936 #if defined(CONFIG_SPARC) || defined(CONFIG_PPC_PMAC)
2937 struct net_device *dev = gp->dev;
2938 const unsigned char *addr;
2940 addr = of_get_property(gp->of_node, "local-mac-address", NULL);
2941 if (addr == NULL) {
2942 #ifdef CONFIG_SPARC
2943 addr = idprom->id_ethaddr;
2944 #else
2945 printk("\n");
2946 printk(KERN_ERR "%s: can't get mac-address\n", dev->name);
2947 return -1;
2948 #endif
2950 memcpy(dev->dev_addr, addr, 6);
2951 #else
2952 get_gem_mac_nonobp(gp->pdev, gp->dev->dev_addr);
2953 #endif
2954 return 0;
2957 static void gem_remove_one(struct pci_dev *pdev)
2959 struct net_device *dev = pci_get_drvdata(pdev);
2961 if (dev) {
2962 struct gem *gp = netdev_priv(dev);
2964 unregister_netdev(dev);
2966 /* Stop the link timer */
2967 del_timer_sync(&gp->link_timer);
2969 /* We shouldn't need any locking here */
2970 gem_get_cell(gp);
2972 /* Wait for a pending reset task to complete */
2973 while (gp->reset_task_pending)
2974 yield();
2975 flush_scheduled_work();
2977 /* Shut the PHY down */
2978 gem_stop_phy(gp, 0);
2980 gem_put_cell(gp);
2982 /* Make sure bus master is disabled */
2983 pci_disable_device(gp->pdev);
2985 /* Free resources */
2986 pci_free_consistent(pdev,
2987 sizeof(struct gem_init_block),
2988 gp->init_block,
2989 gp->gblock_dvma);
2990 iounmap(gp->regs);
2991 pci_release_regions(pdev);
2992 free_netdev(dev);
2994 pci_set_drvdata(pdev, NULL);
2998 static const struct net_device_ops gem_netdev_ops = {
2999 .ndo_open = gem_open,
3000 .ndo_stop = gem_close,
3001 .ndo_start_xmit = gem_start_xmit,
3002 .ndo_get_stats = gem_get_stats,
3003 .ndo_set_multicast_list = gem_set_multicast,
3004 .ndo_do_ioctl = gem_ioctl,
3005 .ndo_tx_timeout = gem_tx_timeout,
3006 .ndo_change_mtu = gem_change_mtu,
3007 .ndo_validate_addr = eth_validate_addr,
3008 .ndo_set_mac_address = gem_set_mac_address,
3009 #ifdef CONFIG_NET_POLL_CONTROLLER
3010 .ndo_poll_controller = gem_poll_controller,
3011 #endif
3014 static int __devinit gem_init_one(struct pci_dev *pdev,
3015 const struct pci_device_id *ent)
3017 static int gem_version_printed = 0;
3018 unsigned long gemreg_base, gemreg_len;
3019 struct net_device *dev;
3020 struct gem *gp;
3021 int err, pci_using_dac;
3023 if (gem_version_printed++ == 0)
3024 printk(KERN_INFO "%s", version);
3026 /* Apple gmac note: during probe, the chip is powered up by
3027 * the arch code to allow the code below to work (and to let
3028 * the chip be probed on the config space. It won't stay powered
3029 * up until the interface is brought up however, so we can't rely
3030 * on register configuration done at this point.
3032 err = pci_enable_device(pdev);
3033 if (err) {
3034 printk(KERN_ERR PFX "Cannot enable MMIO operation, "
3035 "aborting.\n");
3036 return err;
3038 pci_set_master(pdev);
3040 /* Configure DMA attributes. */
3042 /* All of the GEM documentation states that 64-bit DMA addressing
3043 * is fully supported and should work just fine. However the
3044 * front end for RIO based GEMs is different and only supports
3045 * 32-bit addressing.
3047 * For now we assume the various PPC GEMs are 32-bit only as well.
3049 if (pdev->vendor == PCI_VENDOR_ID_SUN &&
3050 pdev->device == PCI_DEVICE_ID_SUN_GEM &&
3051 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3052 pci_using_dac = 1;
3053 } else {
3054 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3055 if (err) {
3056 printk(KERN_ERR PFX "No usable DMA configuration, "
3057 "aborting.\n");
3058 goto err_disable_device;
3060 pci_using_dac = 0;
3063 gemreg_base = pci_resource_start(pdev, 0);
3064 gemreg_len = pci_resource_len(pdev, 0);
3066 if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0) {
3067 printk(KERN_ERR PFX "Cannot find proper PCI device "
3068 "base address, aborting.\n");
3069 err = -ENODEV;
3070 goto err_disable_device;
3073 dev = alloc_etherdev(sizeof(*gp));
3074 if (!dev) {
3075 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
3076 err = -ENOMEM;
3077 goto err_disable_device;
3079 SET_NETDEV_DEV(dev, &pdev->dev);
3081 gp = netdev_priv(dev);
3083 err = pci_request_regions(pdev, DRV_NAME);
3084 if (err) {
3085 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
3086 "aborting.\n");
3087 goto err_out_free_netdev;
3090 gp->pdev = pdev;
3091 dev->base_addr = (long) pdev;
3092 gp->dev = dev;
3094 gp->msg_enable = DEFAULT_MSG;
3096 spin_lock_init(&gp->lock);
3097 spin_lock_init(&gp->tx_lock);
3098 mutex_init(&gp->pm_mutex);
3100 init_timer(&gp->link_timer);
3101 gp->link_timer.function = gem_link_timer;
3102 gp->link_timer.data = (unsigned long) gp;
3104 INIT_WORK(&gp->reset_task, gem_reset_task);
3106 gp->lstate = link_down;
3107 gp->timer_ticks = 0;
3108 netif_carrier_off(dev);
3110 gp->regs = ioremap(gemreg_base, gemreg_len);
3111 if (!gp->regs) {
3112 printk(KERN_ERR PFX "Cannot map device registers, "
3113 "aborting.\n");
3114 err = -EIO;
3115 goto err_out_free_res;
3118 /* On Apple, we want a reference to the Open Firmware device-tree
3119 * node. We use it for clock control.
3121 #if defined(CONFIG_PPC_PMAC) || defined(CONFIG_SPARC)
3122 gp->of_node = pci_device_to_OF_node(pdev);
3123 #endif
3125 /* Only Apple version supports WOL afaik */
3126 if (pdev->vendor == PCI_VENDOR_ID_APPLE)
3127 gp->has_wol = 1;
3129 /* Make sure cell is enabled */
3130 gem_get_cell(gp);
3132 /* Make sure everything is stopped and in init state */
3133 gem_reset(gp);
3135 /* Fill up the mii_phy structure (even if we won't use it) */
3136 gp->phy_mii.dev = dev;
3137 gp->phy_mii.mdio_read = _phy_read;
3138 gp->phy_mii.mdio_write = _phy_write;
3139 #ifdef CONFIG_PPC_PMAC
3140 gp->phy_mii.platform_data = gp->of_node;
3141 #endif
3142 /* By default, we start with autoneg */
3143 gp->want_autoneg = 1;
3145 /* Check fifo sizes, PHY type, etc... */
3146 if (gem_check_invariants(gp)) {
3147 err = -ENODEV;
3148 goto err_out_iounmap;
3151 /* It is guaranteed that the returned buffer will be at least
3152 * PAGE_SIZE aligned.
3154 gp->init_block = (struct gem_init_block *)
3155 pci_alloc_consistent(pdev, sizeof(struct gem_init_block),
3156 &gp->gblock_dvma);
3157 if (!gp->init_block) {
3158 printk(KERN_ERR PFX "Cannot allocate init block, "
3159 "aborting.\n");
3160 err = -ENOMEM;
3161 goto err_out_iounmap;
3164 if (gem_get_device_address(gp))
3165 goto err_out_free_consistent;
3167 dev->netdev_ops = &gem_netdev_ops;
3168 netif_napi_add(dev, &gp->napi, gem_poll, 64);
3169 dev->ethtool_ops = &gem_ethtool_ops;
3170 dev->watchdog_timeo = 5 * HZ;
3171 dev->irq = pdev->irq;
3172 dev->dma = 0;
3174 /* Set that now, in case PM kicks in now */
3175 pci_set_drvdata(pdev, dev);
3177 /* Detect & init PHY, start autoneg, we release the cell now
3178 * too, it will be managed by whoever needs it
3180 gem_init_phy(gp);
3182 spin_lock_irq(&gp->lock);
3183 gem_put_cell(gp);
3184 spin_unlock_irq(&gp->lock);
3186 /* Register with kernel */
3187 if (register_netdev(dev)) {
3188 printk(KERN_ERR PFX "Cannot register net device, "
3189 "aborting.\n");
3190 err = -ENOMEM;
3191 goto err_out_free_consistent;
3194 printk(KERN_INFO "%s: Sun GEM (PCI) 10/100/1000BaseT Ethernet %pM\n",
3195 dev->name, dev->dev_addr);
3197 if (gp->phy_type == phy_mii_mdio0 ||
3198 gp->phy_type == phy_mii_mdio1)
3199 printk(KERN_INFO "%s: Found %s PHY\n", dev->name,
3200 gp->phy_mii.def ? gp->phy_mii.def->name : "no");
3202 /* GEM can do it all... */
3203 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_LLTX;
3204 if (pci_using_dac)
3205 dev->features |= NETIF_F_HIGHDMA;
3207 return 0;
3209 err_out_free_consistent:
3210 gem_remove_one(pdev);
3211 err_out_iounmap:
3212 gem_put_cell(gp);
3213 iounmap(gp->regs);
3215 err_out_free_res:
3216 pci_release_regions(pdev);
3218 err_out_free_netdev:
3219 free_netdev(dev);
3220 err_disable_device:
3221 pci_disable_device(pdev);
3222 return err;
3227 static struct pci_driver gem_driver = {
3228 .name = GEM_MODULE_NAME,
3229 .id_table = gem_pci_tbl,
3230 .probe = gem_init_one,
3231 .remove = gem_remove_one,
3232 #ifdef CONFIG_PM
3233 .suspend = gem_suspend,
3234 .resume = gem_resume,
3235 #endif /* CONFIG_PM */
3238 static int __init gem_init(void)
3240 return pci_register_driver(&gem_driver);
3243 static void __exit gem_cleanup(void)
3245 pci_unregister_driver(&gem_driver);
3248 module_init(gem_init);
3249 module_exit(gem_cleanup);