3 * Alchemy Au1x00 ethernet driver
5 * Copyright 2001-2003, 2006 MontaVista Software Inc.
6 * Copyright 2002 TimeSys Corp.
7 * Added ethtool/mii-tool support,
8 * Copyright 2004 Matt Porter <mporter@kernel.crashing.org>
9 * Update: 2004 Bjoern Riemer, riemer@fokus.fraunhofer.de
10 * or riemer@riemer-nt.de: fixed the link beat detection with
11 * ioctls (SIOCGMIIPHY)
12 * Copyright 2006 Herbert Valerio Riedel <hvr@gnu.org>
13 * converted to use linux-2.6.x's PHY framework
15 * Author: MontaVista Software, Inc.
16 * ppopov@mvista.com or source@mvista.com
18 * ########################################################################
20 * This program is free software; you can distribute it and/or modify it
21 * under the terms of the GNU General Public License (Version 2) as
22 * published by the Free Software Foundation.
24 * This program is distributed in the hope it will be useful, but WITHOUT
25 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
26 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
29 * You should have received a copy of the GNU General Public License along
30 * with this program; if not, write to the Free Software Foundation, Inc.,
31 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
33 * ########################################################################
37 #include <linux/dma-mapping.h>
38 #include <linux/module.h>
39 #include <linux/kernel.h>
40 #include <linux/string.h>
41 #include <linux/timer.h>
42 #include <linux/errno.h>
44 #include <linux/ioport.h>
45 #include <linux/bitops.h>
46 #include <linux/slab.h>
47 #include <linux/interrupt.h>
48 #include <linux/init.h>
49 #include <linux/netdevice.h>
50 #include <linux/etherdevice.h>
51 #include <linux/ethtool.h>
52 #include <linux/mii.h>
53 #include <linux/skbuff.h>
54 #include <linux/delay.h>
55 #include <linux/crc32.h>
56 #include <linux/phy.h>
59 #include <asm/mipsregs.h>
62 #include <asm/processor.h>
67 #include "au1000_eth.h"
69 #ifdef AU1000_ETH_DEBUG
70 static int au1000_debug
= 5;
72 static int au1000_debug
= 3;
75 #define DRV_NAME "au1000_eth"
76 #define DRV_VERSION "1.6"
77 #define DRV_AUTHOR "Pete Popov <ppopov@embeddedalley.com>"
78 #define DRV_DESC "Au1xxx on-chip Ethernet driver"
80 MODULE_AUTHOR(DRV_AUTHOR
);
81 MODULE_DESCRIPTION(DRV_DESC
);
82 MODULE_LICENSE("GPL");
85 static void hard_stop(struct net_device
*);
86 static void enable_rx_tx(struct net_device
*dev
);
87 static struct net_device
* au1000_probe(int port_num
);
88 static int au1000_init(struct net_device
*);
89 static int au1000_open(struct net_device
*);
90 static int au1000_close(struct net_device
*);
91 static int au1000_tx(struct sk_buff
*, struct net_device
*);
92 static int au1000_rx(struct net_device
*);
93 static irqreturn_t
au1000_interrupt(int, void *);
94 static void au1000_tx_timeout(struct net_device
*);
95 static void set_rx_mode(struct net_device
*);
96 static int au1000_ioctl(struct net_device
*, struct ifreq
*, int);
97 static int mdio_read(struct net_device
*, int, int);
98 static void mdio_write(struct net_device
*, int, int, u16
);
99 static void au1000_adjust_link(struct net_device
*);
100 static void enable_mac(struct net_device
*, int);
103 * Theory of operation
105 * The Au1000 MACs use a simple rx and tx descriptor ring scheme.
106 * There are four receive and four transmit descriptors. These
107 * descriptors are not in memory; rather, they are just a set of
108 * hardware registers.
110 * Since the Au1000 has a coherent data cache, the receive and
111 * transmit buffers are allocated from the KSEG0 segment. The
112 * hardware registers, however, are still mapped at KSEG1 to
113 * make sure there's no out-of-order writes, and that all writes
114 * complete immediately.
117 /* These addresses are only used if yamon doesn't tell us what
118 * the mac address is, and the mac address is not passed on the
121 static unsigned char au1000_mac_addr
[6] __devinitdata
= {
122 0x00, 0x50, 0xc2, 0x0c, 0x30, 0x00
125 struct au1000_private
*au_macs
[NUM_ETH_INTERFACES
];
128 * board-specific configurations
130 * PHY detection algorithm
132 * If AU1XXX_PHY_STATIC_CONFIG is undefined, the PHY setup is
135 * mii_probe() first searches the current MAC's MII bus for a PHY,
136 * selecting the first (or last, if AU1XXX_PHY_SEARCH_HIGHEST_ADDR is
137 * defined) PHY address not already claimed by another netdev.
139 * If nothing was found that way when searching for the 2nd ethernet
140 * controller's PHY and AU1XXX_PHY1_SEARCH_ON_MAC0 is defined, then
141 * the first MII bus is searched as well for an unclaimed PHY; this is
142 * needed in case of a dual-PHY accessible only through the MAC0's MII
145 * Finally, if no PHY is found, then the corresponding ethernet
146 * controller is not registered to the network subsystem.
149 /* autodetection defaults */
150 #undef AU1XXX_PHY_SEARCH_HIGHEST_ADDR
151 #define AU1XXX_PHY1_SEARCH_ON_MAC0
155 * most boards PHY setup should be detectable properly with the
156 * autodetection algorithm in mii_probe(), but in some cases (e.g. if
157 * you have a switch attached, or want to use the PHY's interrupt
158 * notification capabilities) you can provide a static PHY
161 * IRQs may only be set, if a PHY address was configured
162 * If a PHY address is given, also a bus id is required to be set
164 * ps: make sure the used irqs are configured properly in the board
168 #if defined(CONFIG_MIPS_BOSPORUS)
170 * Micrel/Kendin 5 port switch attached to MAC0,
171 * MAC0 is associated with PHY address 5 (== WAN port)
172 * MAC1 is not associated with any PHY, since it's connected directly
174 * no interrupts are used
176 # define AU1XXX_PHY_STATIC_CONFIG
178 # define AU1XXX_PHY0_ADDR 5
179 # define AU1XXX_PHY0_BUSID 0
180 # undef AU1XXX_PHY0_IRQ
182 # undef AU1XXX_PHY1_ADDR
183 # undef AU1XXX_PHY1_BUSID
184 # undef AU1XXX_PHY1_IRQ
187 #if defined(AU1XXX_PHY0_BUSID) && (AU1XXX_PHY0_BUSID > 0)
188 # error MAC0-associated PHY attached 2nd MACs MII bus not supported yet
194 static int mdio_read(struct net_device
*dev
, int phy_addr
, int reg
)
196 struct au1000_private
*aup
= (struct au1000_private
*) dev
->priv
;
197 volatile u32
*const mii_control_reg
= &aup
->mac
->mii_control
;
198 volatile u32
*const mii_data_reg
= &aup
->mac
->mii_data
;
202 while (*mii_control_reg
& MAC_MII_BUSY
) {
204 if (--timedout
== 0) {
205 printk(KERN_ERR
"%s: read_MII busy timeout!!\n",
211 mii_control
= MAC_SET_MII_SELECT_REG(reg
) |
212 MAC_SET_MII_SELECT_PHY(phy_addr
) | MAC_MII_READ
;
214 *mii_control_reg
= mii_control
;
217 while (*mii_control_reg
& MAC_MII_BUSY
) {
219 if (--timedout
== 0) {
220 printk(KERN_ERR
"%s: mdio_read busy timeout!!\n",
225 return (int)*mii_data_reg
;
228 static void mdio_write(struct net_device
*dev
, int phy_addr
, int reg
, u16 value
)
230 struct au1000_private
*aup
= (struct au1000_private
*) dev
->priv
;
231 volatile u32
*const mii_control_reg
= &aup
->mac
->mii_control
;
232 volatile u32
*const mii_data_reg
= &aup
->mac
->mii_data
;
236 while (*mii_control_reg
& MAC_MII_BUSY
) {
238 if (--timedout
== 0) {
239 printk(KERN_ERR
"%s: mdio_write busy timeout!!\n",
245 mii_control
= MAC_SET_MII_SELECT_REG(reg
) |
246 MAC_SET_MII_SELECT_PHY(phy_addr
) | MAC_MII_WRITE
;
248 *mii_data_reg
= value
;
249 *mii_control_reg
= mii_control
;
252 static int mdiobus_read(struct mii_bus
*bus
, int phy_addr
, int regnum
)
254 /* WARNING: bus->phy_map[phy_addr].attached_dev == dev does
255 * _NOT_ hold (e.g. when PHY is accessed through other MAC's MII bus) */
256 struct net_device
*const dev
= bus
->priv
;
258 enable_mac(dev
, 0); /* make sure the MAC associated with this
259 * mii_bus is enabled */
260 return mdio_read(dev
, phy_addr
, regnum
);
263 static int mdiobus_write(struct mii_bus
*bus
, int phy_addr
, int regnum
,
266 struct net_device
*const dev
= bus
->priv
;
268 enable_mac(dev
, 0); /* make sure the MAC associated with this
269 * mii_bus is enabled */
270 mdio_write(dev
, phy_addr
, regnum
, value
);
274 static int mdiobus_reset(struct mii_bus
*bus
)
276 struct net_device
*const dev
= bus
->priv
;
278 enable_mac(dev
, 0); /* make sure the MAC associated with this
279 * mii_bus is enabled */
283 static int mii_probe (struct net_device
*dev
)
285 struct au1000_private
*const aup
= (struct au1000_private
*) dev
->priv
;
286 struct phy_device
*phydev
= NULL
;
288 #if defined(AU1XXX_PHY_STATIC_CONFIG)
289 BUG_ON(aup
->mac_id
< 0 || aup
->mac_id
> 1);
291 if(aup
->mac_id
== 0) { /* get PHY0 */
292 # if defined(AU1XXX_PHY0_ADDR)
293 phydev
= au_macs
[AU1XXX_PHY0_BUSID
]->mii_bus
.phy_map
[AU1XXX_PHY0_ADDR
];
295 printk (KERN_INFO DRV_NAME
":%s: using PHY-less setup\n",
298 # endif /* defined(AU1XXX_PHY0_ADDR) */
299 } else if (aup
->mac_id
== 1) { /* get PHY1 */
300 # if defined(AU1XXX_PHY1_ADDR)
301 phydev
= au_macs
[AU1XXX_PHY1_BUSID
]->mii_bus
.phy_map
[AU1XXX_PHY1_ADDR
];
303 printk (KERN_INFO DRV_NAME
":%s: using PHY-less setup\n",
306 # endif /* defined(AU1XXX_PHY1_ADDR) */
309 #else /* defined(AU1XXX_PHY_STATIC_CONFIG) */
312 /* find the first (lowest address) PHY on the current MAC's MII bus */
313 for (phy_addr
= 0; phy_addr
< PHY_MAX_ADDR
; phy_addr
++)
314 if (aup
->mii_bus
.phy_map
[phy_addr
]) {
315 phydev
= aup
->mii_bus
.phy_map
[phy_addr
];
316 # if !defined(AU1XXX_PHY_SEARCH_HIGHEST_ADDR)
317 break; /* break out with first one found */
321 # if defined(AU1XXX_PHY1_SEARCH_ON_MAC0)
322 /* try harder to find a PHY */
323 if (!phydev
&& (aup
->mac_id
== 1)) {
324 /* no PHY found, maybe we have a dual PHY? */
325 printk (KERN_INFO DRV_NAME
": no PHY found on MAC1, "
326 "let's see if it's attached to MAC0...\n");
330 /* find the first (lowest address) non-attached PHY on
331 * the MAC0 MII bus */
332 for (phy_addr
= 0; phy_addr
< PHY_MAX_ADDR
; phy_addr
++) {
333 struct phy_device
*const tmp_phydev
=
334 au_macs
[0]->mii_bus
.phy_map
[phy_addr
];
337 continue; /* no PHY here... */
339 if (tmp_phydev
->attached_dev
)
340 continue; /* already claimed by MAC0 */
343 break; /* found it */
346 # endif /* defined(AU1XXX_PHY1_SEARCH_OTHER_BUS) */
348 #endif /* defined(AU1XXX_PHY_STATIC_CONFIG) */
350 printk (KERN_ERR DRV_NAME
":%s: no PHY found\n", dev
->name
);
354 /* now we are supposed to have a proper phydev, to attach to... */
356 BUG_ON(phydev
->attached_dev
);
358 phydev
= phy_connect(dev
, phydev
->dev
.bus_id
, &au1000_adjust_link
, 0,
359 PHY_INTERFACE_MODE_MII
);
361 if (IS_ERR(phydev
)) {
362 printk(KERN_ERR
"%s: Could not attach to PHY\n", dev
->name
);
363 return PTR_ERR(phydev
);
366 /* mask with MAC supported features */
367 phydev
->supported
&= (SUPPORTED_10baseT_Half
368 | SUPPORTED_10baseT_Full
369 | SUPPORTED_100baseT_Half
370 | SUPPORTED_100baseT_Full
372 /* | SUPPORTED_Pause | SUPPORTED_Asym_Pause */
376 phydev
->advertising
= phydev
->supported
;
380 aup
->old_duplex
= -1;
381 aup
->phy_dev
= phydev
;
383 printk(KERN_INFO
"%s: attached PHY driver [%s] "
384 "(mii_bus:phy_addr=%s, irq=%d)\n",
385 dev
->name
, phydev
->drv
->name
, phydev
->dev
.bus_id
, phydev
->irq
);
392 * Buffer allocation/deallocation routines. The buffer descriptor returned
393 * has the virtual and dma address of a buffer suitable for
394 * both, receive and transmit operations.
396 static db_dest_t
*GetFreeDB(struct au1000_private
*aup
)
402 aup
->pDBfree
= pDB
->pnext
;
407 void ReleaseDB(struct au1000_private
*aup
, db_dest_t
*pDB
)
409 db_dest_t
*pDBfree
= aup
->pDBfree
;
411 pDBfree
->pnext
= pDB
;
415 static void enable_rx_tx(struct net_device
*dev
)
417 struct au1000_private
*aup
= (struct au1000_private
*) dev
->priv
;
419 if (au1000_debug
> 4)
420 printk(KERN_INFO
"%s: enable_rx_tx\n", dev
->name
);
422 aup
->mac
->control
|= (MAC_RX_ENABLE
| MAC_TX_ENABLE
);
426 static void hard_stop(struct net_device
*dev
)
428 struct au1000_private
*aup
= (struct au1000_private
*) dev
->priv
;
430 if (au1000_debug
> 4)
431 printk(KERN_INFO
"%s: hard stop\n", dev
->name
);
433 aup
->mac
->control
&= ~(MAC_RX_ENABLE
| MAC_TX_ENABLE
);
437 static void enable_mac(struct net_device
*dev
, int force_reset
)
440 struct au1000_private
*aup
= (struct au1000_private
*) dev
->priv
;
442 spin_lock_irqsave(&aup
->lock
, flags
);
444 if(force_reset
|| (!aup
->mac_enabled
)) {
445 *aup
->enable
= MAC_EN_CLOCK_ENABLE
;
447 *aup
->enable
= (MAC_EN_RESET0
| MAC_EN_RESET1
| MAC_EN_RESET2
448 | MAC_EN_CLOCK_ENABLE
);
451 aup
->mac_enabled
= 1;
454 spin_unlock_irqrestore(&aup
->lock
, flags
);
457 static void reset_mac_unlocked(struct net_device
*dev
)
459 struct au1000_private
*const aup
= (struct au1000_private
*) dev
->priv
;
464 *aup
->enable
= MAC_EN_CLOCK_ENABLE
;
470 for (i
= 0; i
< NUM_RX_DMA
; i
++) {
471 /* reset control bits */
472 aup
->rx_dma_ring
[i
]->buff_stat
&= ~0xf;
474 for (i
= 0; i
< NUM_TX_DMA
; i
++) {
475 /* reset control bits */
476 aup
->tx_dma_ring
[i
]->buff_stat
&= ~0xf;
479 aup
->mac_enabled
= 0;
483 static void reset_mac(struct net_device
*dev
)
485 struct au1000_private
*const aup
= (struct au1000_private
*) dev
->priv
;
488 if (au1000_debug
> 4)
489 printk(KERN_INFO
"%s: reset mac, aup %x\n",
490 dev
->name
, (unsigned)aup
);
492 spin_lock_irqsave(&aup
->lock
, flags
);
494 reset_mac_unlocked (dev
);
496 spin_unlock_irqrestore(&aup
->lock
, flags
);
500 * Setup the receive and transmit "rings". These pointers are the addresses
501 * of the rx and tx MAC DMA registers so they are fixed by the hardware --
502 * these are not descriptors sitting in memory.
505 setup_hw_rings(struct au1000_private
*aup
, u32 rx_base
, u32 tx_base
)
509 for (i
= 0; i
< NUM_RX_DMA
; i
++) {
510 aup
->rx_dma_ring
[i
] =
511 (volatile rx_dma_t
*) (rx_base
+ sizeof(rx_dma_t
)*i
);
513 for (i
= 0; i
< NUM_TX_DMA
; i
++) {
514 aup
->tx_dma_ring
[i
] =
515 (volatile tx_dma_t
*) (tx_base
+ sizeof(tx_dma_t
)*i
);
523 struct net_device
*dev
;
525 #ifdef CONFIG_SOC_AU1000
526 {AU1000_ETH0_BASE
, AU1000_MAC0_ENABLE
, AU1000_MAC0_DMA_INT
},
527 {AU1000_ETH1_BASE
, AU1000_MAC1_ENABLE
, AU1000_MAC1_DMA_INT
}
529 #ifdef CONFIG_SOC_AU1100
530 {AU1100_ETH0_BASE
, AU1100_MAC0_ENABLE
, AU1100_MAC0_DMA_INT
}
532 #ifdef CONFIG_SOC_AU1500
533 {AU1500_ETH0_BASE
, AU1500_MAC0_ENABLE
, AU1500_MAC0_DMA_INT
},
534 {AU1500_ETH1_BASE
, AU1500_MAC1_ENABLE
, AU1500_MAC1_DMA_INT
}
536 #ifdef CONFIG_SOC_AU1550
537 {AU1550_ETH0_BASE
, AU1550_MAC0_ENABLE
, AU1550_MAC0_DMA_INT
},
538 {AU1550_ETH1_BASE
, AU1550_MAC1_ENABLE
, AU1550_MAC1_DMA_INT
}
545 * Setup the base address and interrupt of the Au1xxx ethernet macs
546 * based on cpu type and whether the interface is enabled in sys_pinfunc
547 * register. The last interface is enabled if SYS_PF_NI2 (bit 4) is 0.
549 static int __init
au1000_init_module(void)
551 int ni
= (int)((au_readl(SYS_PINFUNC
) & (u32
)(SYS_PF_NI2
)) >> 4);
552 struct net_device
*dev
;
553 int i
, found_one
= 0;
555 num_ifs
= NUM_ETH_INTERFACES
- ni
;
557 for(i
= 0; i
< num_ifs
; i
++) {
558 dev
= au1000_probe(i
);
572 static int au1000_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
574 struct au1000_private
*aup
= (struct au1000_private
*)dev
->priv
;
577 return phy_ethtool_gset(aup
->phy_dev
, cmd
);
582 static int au1000_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
584 struct au1000_private
*aup
= (struct au1000_private
*)dev
->priv
;
586 if (!capable(CAP_NET_ADMIN
))
590 return phy_ethtool_sset(aup
->phy_dev
, cmd
);
596 au1000_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
598 struct au1000_private
*aup
= (struct au1000_private
*)dev
->priv
;
600 strcpy(info
->driver
, DRV_NAME
);
601 strcpy(info
->version
, DRV_VERSION
);
602 info
->fw_version
[0] = '\0';
603 sprintf(info
->bus_info
, "%s %d", DRV_NAME
, aup
->mac_id
);
604 info
->regdump_len
= 0;
607 static const struct ethtool_ops au1000_ethtool_ops
= {
608 .get_settings
= au1000_get_settings
,
609 .set_settings
= au1000_set_settings
,
610 .get_drvinfo
= au1000_get_drvinfo
,
611 .get_link
= ethtool_op_get_link
,
614 static struct net_device
* au1000_probe(int port_num
)
616 static unsigned version_printed
= 0;
617 struct au1000_private
*aup
= NULL
;
618 struct net_device
*dev
= NULL
;
619 db_dest_t
*pDB
, *pDBfree
;
624 if (port_num
>= NUM_ETH_INTERFACES
)
627 base
= CPHYSADDR(iflist
[port_num
].base_addr
);
628 macen
= CPHYSADDR(iflist
[port_num
].macen_addr
);
629 irq
= iflist
[port_num
].irq
;
631 if (!request_mem_region( base
, MAC_IOSIZE
, "Au1x00 ENET") ||
632 !request_mem_region(macen
, 4, "Au1x00 ENET"))
635 if (version_printed
++ == 0)
636 printk("%s version %s %s\n", DRV_NAME
, DRV_VERSION
, DRV_AUTHOR
);
638 dev
= alloc_etherdev(sizeof(struct au1000_private
));
640 printk(KERN_ERR
"%s: alloc_etherdev failed\n", DRV_NAME
);
644 if ((err
= register_netdev(dev
)) != 0) {
645 printk(KERN_ERR
"%s: Cannot register net device, error %d\n",
651 printk("%s: Au1xx0 Ethernet found at 0x%x, irq %d\n",
652 dev
->name
, base
, irq
);
656 /* Allocate the data buffers */
657 /* Snooping works fine with eth on all au1xxx */
658 aup
->vaddr
= (u32
)dma_alloc_noncoherent(NULL
, MAX_BUF_SIZE
*
659 (NUM_TX_BUFFS
+ NUM_RX_BUFFS
),
663 release_mem_region( base
, MAC_IOSIZE
);
664 release_mem_region(macen
, 4);
668 /* aup->mac is the base address of the MAC's registers */
669 aup
->mac
= (volatile mac_reg_t
*)iflist
[port_num
].base_addr
;
671 /* Setup some variables for quick register address access */
672 aup
->enable
= (volatile u32
*)iflist
[port_num
].macen_addr
;
673 aup
->mac_id
= port_num
;
674 au_macs
[port_num
] = aup
;
677 if (prom_get_ethernet_addr(ethaddr
) == 0)
678 memcpy(au1000_mac_addr
, ethaddr
, sizeof(au1000_mac_addr
));
680 printk(KERN_INFO
"%s: No MAC address found\n",
682 /* Use the hard coded MAC addresses */
685 setup_hw_rings(aup
, MAC0_RX_DMA_ADDR
, MAC0_TX_DMA_ADDR
);
686 } else if (port_num
== 1)
687 setup_hw_rings(aup
, MAC1_RX_DMA_ADDR
, MAC1_TX_DMA_ADDR
);
690 * Assign to the Ethernet ports two consecutive MAC addresses
691 * to match those that are printed on their stickers
693 memcpy(dev
->dev_addr
, au1000_mac_addr
, sizeof(au1000_mac_addr
));
694 dev
->dev_addr
[5] += port_num
;
697 aup
->mac_enabled
= 0;
699 aup
->mii_bus
.priv
= dev
;
700 aup
->mii_bus
.read
= mdiobus_read
;
701 aup
->mii_bus
.write
= mdiobus_write
;
702 aup
->mii_bus
.reset
= mdiobus_reset
;
703 aup
->mii_bus
.name
= "au1000_eth_mii";
704 aup
->mii_bus
.id
= aup
->mac_id
;
705 aup
->mii_bus
.irq
= kmalloc(sizeof(int)*PHY_MAX_ADDR
, GFP_KERNEL
);
706 for(i
= 0; i
< PHY_MAX_ADDR
; ++i
)
707 aup
->mii_bus
.irq
[i
] = PHY_POLL
;
709 /* if known, set corresponding PHY IRQs */
710 #if defined(AU1XXX_PHY_STATIC_CONFIG)
711 # if defined(AU1XXX_PHY0_IRQ)
712 if (AU1XXX_PHY0_BUSID
== aup
->mii_bus
.id
)
713 aup
->mii_bus
.irq
[AU1XXX_PHY0_ADDR
] = AU1XXX_PHY0_IRQ
;
715 # if defined(AU1XXX_PHY1_IRQ)
716 if (AU1XXX_PHY1_BUSID
== aup
->mii_bus
.id
)
717 aup
->mii_bus
.irq
[AU1XXX_PHY1_ADDR
] = AU1XXX_PHY1_IRQ
;
720 mdiobus_register(&aup
->mii_bus
);
722 if (mii_probe(dev
) != 0) {
727 /* setup the data buffer descriptors and attach a buffer to each one */
729 for (i
= 0; i
< (NUM_TX_BUFFS
+NUM_RX_BUFFS
); i
++) {
730 pDB
->pnext
= pDBfree
;
732 pDB
->vaddr
= (u32
*)((unsigned)aup
->vaddr
+ MAX_BUF_SIZE
*i
);
733 pDB
->dma_addr
= (dma_addr_t
)virt_to_bus(pDB
->vaddr
);
736 aup
->pDBfree
= pDBfree
;
738 for (i
= 0; i
< NUM_RX_DMA
; i
++) {
739 pDB
= GetFreeDB(aup
);
743 aup
->rx_dma_ring
[i
]->buff_stat
= (unsigned)pDB
->dma_addr
;
744 aup
->rx_db_inuse
[i
] = pDB
;
746 for (i
= 0; i
< NUM_TX_DMA
; i
++) {
747 pDB
= GetFreeDB(aup
);
751 aup
->tx_dma_ring
[i
]->buff_stat
= (unsigned)pDB
->dma_addr
;
752 aup
->tx_dma_ring
[i
]->len
= 0;
753 aup
->tx_db_inuse
[i
] = pDB
;
756 spin_lock_init(&aup
->lock
);
757 dev
->base_addr
= base
;
759 dev
->open
= au1000_open
;
760 dev
->hard_start_xmit
= au1000_tx
;
761 dev
->stop
= au1000_close
;
762 dev
->set_multicast_list
= &set_rx_mode
;
763 dev
->do_ioctl
= &au1000_ioctl
;
764 SET_ETHTOOL_OPS(dev
, &au1000_ethtool_ops
);
765 dev
->tx_timeout
= au1000_tx_timeout
;
766 dev
->watchdog_timeo
= ETH_TX_TIMEOUT
;
769 * The boot code uses the ethernet controller, so reset it to start
770 * fresh. au1000_init() expects that the device is in reset state.
777 /* here we should have a valid dev plus aup-> register addresses
778 * so we can reset the mac properly.*/
781 for (i
= 0; i
< NUM_RX_DMA
; i
++) {
782 if (aup
->rx_db_inuse
[i
])
783 ReleaseDB(aup
, aup
->rx_db_inuse
[i
]);
785 for (i
= 0; i
< NUM_TX_DMA
; i
++) {
786 if (aup
->tx_db_inuse
[i
])
787 ReleaseDB(aup
, aup
->tx_db_inuse
[i
]);
789 dma_free_noncoherent(NULL
, MAX_BUF_SIZE
* (NUM_TX_BUFFS
+ NUM_RX_BUFFS
),
790 (void *)aup
->vaddr
, aup
->dma_addr
);
791 unregister_netdev(dev
);
793 release_mem_region( base
, MAC_IOSIZE
);
794 release_mem_region(macen
, 4);
799 * Initialize the interface.
801 * When the device powers up, the clocks are disabled and the
802 * mac is in reset state. When the interface is closed, we
803 * do the same -- reset the device and disable the clocks to
804 * conserve power. Thus, whenever au1000_init() is called,
805 * the device should already be in reset state.
807 static int au1000_init(struct net_device
*dev
)
809 struct au1000_private
*aup
= (struct au1000_private
*) dev
->priv
;
814 if (au1000_debug
> 4)
815 printk("%s: au1000_init\n", dev
->name
);
817 /* bring the device out of reset */
820 spin_lock_irqsave(&aup
->lock
, flags
);
822 aup
->mac
->control
= 0;
823 aup
->tx_head
= (aup
->tx_dma_ring
[0]->buff_stat
& 0xC) >> 2;
824 aup
->tx_tail
= aup
->tx_head
;
825 aup
->rx_head
= (aup
->rx_dma_ring
[0]->buff_stat
& 0xC) >> 2;
827 aup
->mac
->mac_addr_high
= dev
->dev_addr
[5]<<8 | dev
->dev_addr
[4];
828 aup
->mac
->mac_addr_low
= dev
->dev_addr
[3]<<24 | dev
->dev_addr
[2]<<16 |
829 dev
->dev_addr
[1]<<8 | dev
->dev_addr
[0];
831 for (i
= 0; i
< NUM_RX_DMA
; i
++) {
832 aup
->rx_dma_ring
[i
]->buff_stat
|= RX_DMA_ENABLE
;
836 control
= MAC_RX_ENABLE
| MAC_TX_ENABLE
;
837 #ifndef CONFIG_CPU_LITTLE_ENDIAN
838 control
|= MAC_BIG_ENDIAN
;
841 if (aup
->phy_dev
->link
&& (DUPLEX_FULL
== aup
->phy_dev
->duplex
))
842 control
|= MAC_FULL_DUPLEX
;
844 control
|= MAC_DISABLE_RX_OWN
;
845 } else { /* PHY-less op, assume full-duplex */
846 control
|= MAC_FULL_DUPLEX
;
849 aup
->mac
->control
= control
;
850 aup
->mac
->vlan1_tag
= 0x8100; /* activate vlan support */
853 spin_unlock_irqrestore(&aup
->lock
, flags
);
858 au1000_adjust_link(struct net_device
*dev
)
860 struct au1000_private
*aup
= (struct au1000_private
*) dev
->priv
;
861 struct phy_device
*phydev
= aup
->phy_dev
;
864 int status_change
= 0;
866 BUG_ON(!aup
->phy_dev
);
868 spin_lock_irqsave(&aup
->lock
, flags
);
870 if (phydev
->link
&& (aup
->old_speed
!= phydev
->speed
)) {
873 switch(phydev
->speed
) {
879 "%s: Speed (%d) is not 10/100 ???\n",
880 dev
->name
, phydev
->speed
);
884 aup
->old_speed
= phydev
->speed
;
889 if (phydev
->link
&& (aup
->old_duplex
!= phydev
->duplex
)) {
890 // duplex mode changed
892 /* switching duplex mode requires to disable rx and tx! */
895 if (DUPLEX_FULL
== phydev
->duplex
)
896 aup
->mac
->control
= ((aup
->mac
->control
898 & ~MAC_DISABLE_RX_OWN
);
900 aup
->mac
->control
= ((aup
->mac
->control
902 | MAC_DISABLE_RX_OWN
);
906 aup
->old_duplex
= phydev
->duplex
;
911 if(phydev
->link
!= aup
->old_link
) {
912 // link state changed
914 if (phydev
->link
) // link went up
916 else { // link went down
918 aup
->old_duplex
= -1;
921 aup
->old_link
= phydev
->link
;
925 spin_unlock_irqrestore(&aup
->lock
, flags
);
929 printk(KERN_INFO
"%s: link up (%d/%s)\n",
930 dev
->name
, phydev
->speed
,
931 DUPLEX_FULL
== phydev
->duplex
? "Full" : "Half");
933 printk(KERN_INFO
"%s: link down\n", dev
->name
);
937 static int au1000_open(struct net_device
*dev
)
940 struct au1000_private
*aup
= (struct au1000_private
*) dev
->priv
;
942 if (au1000_debug
> 4)
943 printk("%s: open: dev=%p\n", dev
->name
, dev
);
945 if ((retval
= request_irq(dev
->irq
, &au1000_interrupt
, 0,
947 printk(KERN_ERR
"%s: unable to get IRQ %d\n",
948 dev
->name
, dev
->irq
);
952 if ((retval
= au1000_init(dev
))) {
953 printk(KERN_ERR
"%s: error in au1000_init\n", dev
->name
);
954 free_irq(dev
->irq
, dev
);
959 /* cause the PHY state machine to schedule a link state check */
960 aup
->phy_dev
->state
= PHY_CHANGELINK
;
961 phy_start(aup
->phy_dev
);
964 netif_start_queue(dev
);
966 if (au1000_debug
> 4)
967 printk("%s: open: Initialization done.\n", dev
->name
);
972 static int au1000_close(struct net_device
*dev
)
975 struct au1000_private
*const aup
= (struct au1000_private
*) dev
->priv
;
977 if (au1000_debug
> 4)
978 printk("%s: close: dev=%p\n", dev
->name
, dev
);
981 phy_stop(aup
->phy_dev
);
983 spin_lock_irqsave(&aup
->lock
, flags
);
985 reset_mac_unlocked (dev
);
987 /* stop the device */
988 netif_stop_queue(dev
);
990 /* disable the interrupt */
991 free_irq(dev
->irq
, dev
);
992 spin_unlock_irqrestore(&aup
->lock
, flags
);
997 static void __exit
au1000_cleanup_module(void)
1000 struct net_device
*dev
;
1001 struct au1000_private
*aup
;
1003 for (i
= 0; i
< num_ifs
; i
++) {
1004 dev
= iflist
[i
].dev
;
1006 aup
= (struct au1000_private
*) dev
->priv
;
1007 unregister_netdev(dev
);
1008 for (j
= 0; j
< NUM_RX_DMA
; j
++)
1009 if (aup
->rx_db_inuse
[j
])
1010 ReleaseDB(aup
, aup
->rx_db_inuse
[j
]);
1011 for (j
= 0; j
< NUM_TX_DMA
; j
++)
1012 if (aup
->tx_db_inuse
[j
])
1013 ReleaseDB(aup
, aup
->tx_db_inuse
[j
]);
1014 dma_free_noncoherent(NULL
, MAX_BUF_SIZE
*
1015 (NUM_TX_BUFFS
+ NUM_RX_BUFFS
),
1016 (void *)aup
->vaddr
, aup
->dma_addr
);
1017 release_mem_region(dev
->base_addr
, MAC_IOSIZE
);
1018 release_mem_region(CPHYSADDR(iflist
[i
].macen_addr
), 4);
1024 static void update_tx_stats(struct net_device
*dev
, u32 status
)
1026 struct au1000_private
*aup
= (struct au1000_private
*) dev
->priv
;
1027 struct net_device_stats
*ps
= &dev
->stats
;
1029 if (status
& TX_FRAME_ABORTED
) {
1030 if (!aup
->phy_dev
|| (DUPLEX_FULL
== aup
->phy_dev
->duplex
)) {
1031 if (status
& (TX_JAB_TIMEOUT
| TX_UNDERRUN
)) {
1032 /* any other tx errors are only valid
1033 * in half duplex mode */
1035 ps
->tx_aborted_errors
++;
1040 ps
->tx_aborted_errors
++;
1041 if (status
& (TX_NO_CARRIER
| TX_LOSS_CARRIER
))
1042 ps
->tx_carrier_errors
++;
1049 * Called from the interrupt service routine to acknowledge
1050 * the TX DONE bits. This is a must if the irq is setup as
1053 static void au1000_tx_ack(struct net_device
*dev
)
1055 struct au1000_private
*aup
= (struct au1000_private
*) dev
->priv
;
1056 volatile tx_dma_t
*ptxd
;
1058 ptxd
= aup
->tx_dma_ring
[aup
->tx_tail
];
1060 while (ptxd
->buff_stat
& TX_T_DONE
) {
1061 update_tx_stats(dev
, ptxd
->status
);
1062 ptxd
->buff_stat
&= ~TX_T_DONE
;
1066 aup
->tx_tail
= (aup
->tx_tail
+ 1) & (NUM_TX_DMA
- 1);
1067 ptxd
= aup
->tx_dma_ring
[aup
->tx_tail
];
1071 netif_wake_queue(dev
);
1078 * Au1000 transmit routine.
1080 static int au1000_tx(struct sk_buff
*skb
, struct net_device
*dev
)
1082 struct au1000_private
*aup
= (struct au1000_private
*) dev
->priv
;
1083 struct net_device_stats
*ps
= &dev
->stats
;
1084 volatile tx_dma_t
*ptxd
;
1089 if (au1000_debug
> 5)
1090 printk("%s: tx: aup %x len=%d, data=%p, head %d\n",
1091 dev
->name
, (unsigned)aup
, skb
->len
,
1092 skb
->data
, aup
->tx_head
);
1094 ptxd
= aup
->tx_dma_ring
[aup
->tx_head
];
1095 buff_stat
= ptxd
->buff_stat
;
1096 if (buff_stat
& TX_DMA_ENABLE
) {
1097 /* We've wrapped around and the transmitter is still busy */
1098 netif_stop_queue(dev
);
1102 else if (buff_stat
& TX_T_DONE
) {
1103 update_tx_stats(dev
, ptxd
->status
);
1109 netif_wake_queue(dev
);
1112 pDB
= aup
->tx_db_inuse
[aup
->tx_head
];
1113 skb_copy_from_linear_data(skb
, pDB
->vaddr
, skb
->len
);
1114 if (skb
->len
< ETH_ZLEN
) {
1115 for (i
=skb
->len
; i
<ETH_ZLEN
; i
++) {
1116 ((char *)pDB
->vaddr
)[i
] = 0;
1118 ptxd
->len
= ETH_ZLEN
;
1121 ptxd
->len
= skb
->len
;
1124 ps
->tx_bytes
+= ptxd
->len
;
1126 ptxd
->buff_stat
= pDB
->dma_addr
| TX_DMA_ENABLE
;
1129 aup
->tx_head
= (aup
->tx_head
+ 1) & (NUM_TX_DMA
- 1);
1130 dev
->trans_start
= jiffies
;
1134 static inline void update_rx_stats(struct net_device
*dev
, u32 status
)
1136 struct au1000_private
*aup
= (struct au1000_private
*) dev
->priv
;
1137 struct net_device_stats
*ps
= &dev
->stats
;
1140 if (status
& RX_MCAST_FRAME
)
1143 if (status
& RX_ERROR
) {
1145 if (status
& RX_MISSED_FRAME
)
1146 ps
->rx_missed_errors
++;
1147 if (status
& (RX_OVERLEN
| RX_OVERLEN
| RX_LEN_ERROR
))
1148 ps
->rx_length_errors
++;
1149 if (status
& RX_CRC_ERROR
)
1150 ps
->rx_crc_errors
++;
1151 if (status
& RX_COLL
)
1155 ps
->rx_bytes
+= status
& RX_FRAME_LEN_MASK
;
1160 * Au1000 receive routine.
1162 static int au1000_rx(struct net_device
*dev
)
1164 struct au1000_private
*aup
= (struct au1000_private
*) dev
->priv
;
1165 struct sk_buff
*skb
;
1166 volatile rx_dma_t
*prxd
;
1167 u32 buff_stat
, status
;
1171 if (au1000_debug
> 5)
1172 printk("%s: au1000_rx head %d\n", dev
->name
, aup
->rx_head
);
1174 prxd
= aup
->rx_dma_ring
[aup
->rx_head
];
1175 buff_stat
= prxd
->buff_stat
;
1176 while (buff_stat
& RX_T_DONE
) {
1177 status
= prxd
->status
;
1178 pDB
= aup
->rx_db_inuse
[aup
->rx_head
];
1179 update_rx_stats(dev
, status
);
1180 if (!(status
& RX_ERROR
)) {
1183 frmlen
= (status
& RX_FRAME_LEN_MASK
);
1184 frmlen
-= 4; /* Remove FCS */
1185 skb
= dev_alloc_skb(frmlen
+ 2);
1188 "%s: Memory squeeze, dropping packet.\n",
1190 dev
->stats
.rx_dropped
++;
1193 skb_reserve(skb
, 2); /* 16 byte IP header align */
1194 skb_copy_to_linear_data(skb
,
1195 (unsigned char *)pDB
->vaddr
, frmlen
);
1196 skb_put(skb
, frmlen
);
1197 skb
->protocol
= eth_type_trans(skb
, dev
);
1198 netif_rx(skb
); /* pass the packet to upper layers */
1201 if (au1000_debug
> 4) {
1202 if (status
& RX_MISSED_FRAME
)
1203 printk("rx miss\n");
1204 if (status
& RX_WDOG_TIMER
)
1205 printk("rx wdog\n");
1206 if (status
& RX_RUNT
)
1207 printk("rx runt\n");
1208 if (status
& RX_OVERLEN
)
1209 printk("rx overlen\n");
1210 if (status
& RX_COLL
)
1211 printk("rx coll\n");
1212 if (status
& RX_MII_ERROR
)
1213 printk("rx mii error\n");
1214 if (status
& RX_CRC_ERROR
)
1215 printk("rx crc error\n");
1216 if (status
& RX_LEN_ERROR
)
1217 printk("rx len error\n");
1218 if (status
& RX_U_CNTRL_FRAME
)
1219 printk("rx u control frame\n");
1220 if (status
& RX_MISSED_FRAME
)
1221 printk("rx miss\n");
1224 prxd
->buff_stat
= (u32
)(pDB
->dma_addr
| RX_DMA_ENABLE
);
1225 aup
->rx_head
= (aup
->rx_head
+ 1) & (NUM_RX_DMA
- 1);
1228 /* next descriptor */
1229 prxd
= aup
->rx_dma_ring
[aup
->rx_head
];
1230 buff_stat
= prxd
->buff_stat
;
1231 dev
->last_rx
= jiffies
;
1238 * Au1000 interrupt service routine.
1240 static irqreturn_t
au1000_interrupt(int irq
, void *dev_id
)
1242 struct net_device
*dev
= (struct net_device
*) dev_id
;
1245 printk(KERN_ERR
"%s: isr: null dev ptr\n", dev
->name
);
1246 return IRQ_RETVAL(1);
1249 /* Handle RX interrupts first to minimize chance of overrun */
1253 return IRQ_RETVAL(1);
1258 * The Tx ring has been full longer than the watchdog timeout
1259 * value. The transmitter must be hung?
1261 static void au1000_tx_timeout(struct net_device
*dev
)
1263 printk(KERN_ERR
"%s: au1000_tx_timeout: dev=%p\n", dev
->name
, dev
);
1266 dev
->trans_start
= jiffies
;
1267 netif_wake_queue(dev
);
1270 static void set_rx_mode(struct net_device
*dev
)
1272 struct au1000_private
*aup
= (struct au1000_private
*) dev
->priv
;
1274 if (au1000_debug
> 4)
1275 printk("%s: set_rx_mode: flags=%x\n", dev
->name
, dev
->flags
);
1277 if (dev
->flags
& IFF_PROMISC
) { /* Set promiscuous. */
1278 aup
->mac
->control
|= MAC_PROMISCUOUS
;
1279 } else if ((dev
->flags
& IFF_ALLMULTI
) ||
1280 dev
->mc_count
> MULTICAST_FILTER_LIMIT
) {
1281 aup
->mac
->control
|= MAC_PASS_ALL_MULTI
;
1282 aup
->mac
->control
&= ~MAC_PROMISCUOUS
;
1283 printk(KERN_INFO
"%s: Pass all multicast\n", dev
->name
);
1286 struct dev_mc_list
*mclist
;
1287 u32 mc_filter
[2]; /* Multicast hash filter */
1289 mc_filter
[1] = mc_filter
[0] = 0;
1290 for (i
= 0, mclist
= dev
->mc_list
; mclist
&& i
< dev
->mc_count
;
1291 i
++, mclist
= mclist
->next
) {
1292 set_bit(ether_crc(ETH_ALEN
, mclist
->dmi_addr
)>>26,
1295 aup
->mac
->multi_hash_high
= mc_filter
[1];
1296 aup
->mac
->multi_hash_low
= mc_filter
[0];
1297 aup
->mac
->control
&= ~MAC_PROMISCUOUS
;
1298 aup
->mac
->control
|= MAC_HASH_MODE
;
1302 static int au1000_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
1304 struct au1000_private
*aup
= (struct au1000_private
*)dev
->priv
;
1306 if (!netif_running(dev
)) return -EINVAL
;
1308 if (!aup
->phy_dev
) return -EINVAL
; // PHY not controllable
1310 return phy_mii_ioctl(aup
->phy_dev
, if_mii(rq
), cmd
);
1313 module_init(au1000_init_module
);
1314 module_exit(au1000_cleanup_module
);