2 * Alchemy Semi Au1000 ethernet driver
4 * Copyright 2001 MontaVista Software Inc.
5 * Author: MontaVista Software, Inc.
6 * ppopov@mvista.com or source@mvista.com
8 * This program is free software; you can distribute it and/or modify it
9 * under the terms of the GNU General Public License (Version 2) as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
21 #include <linux/config.h>
23 #include <linux/module.h>
24 #include <linux/kernel.h>
25 #include <linux/string.h>
26 #include <linux/timer.h>
27 #include <linux/errno.h>
29 #include <linux/ioport.h>
30 #include <linux/slab.h>
31 #include <linux/interrupt.h>
32 #include <linux/pci.h>
33 #include <linux/init.h>
34 #include <linux/netdevice.h>
35 #include <linux/etherdevice.h>
36 #include <linux/skbuff.h>
37 #include <linux/delay.h>
38 #include <linux/crc32.h>
40 #include <asm/mipsregs.h>
42 #include <asm/bitops.h>
44 #include <asm/au1000.h>
46 #include "au1000_eth.h"
48 #ifdef AU1000_ETH_DEBUG
49 static int au1000_debug
= 10;
51 static int au1000_debug
= 3;
55 static void *dma_alloc(size_t, dma_addr_t
*);
56 static void dma_free(void *, size_t);
57 static void hard_stop(struct net_device
*);
58 static void enable_rx_tx(struct net_device
*dev
);
59 static int __init
au1000_probe1(struct net_device
*, long, int, int);
60 static int au1000_init(struct net_device
*);
61 static int au1000_open(struct net_device
*);
62 static int au1000_close(struct net_device
*);
63 static int au1000_tx(struct sk_buff
*, struct net_device
*);
64 static int au1000_rx(struct net_device
*);
65 static irqreturn_t
au1000_interrupt(int, void *, struct pt_regs
*);
66 static void au1000_tx_timeout(struct net_device
*);
67 static int au1000_set_config(struct net_device
*dev
, struct ifmap
*map
);
68 static void set_rx_mode(struct net_device
*);
69 static struct net_device_stats
*au1000_get_stats(struct net_device
*);
70 static inline void update_tx_stats(struct net_device
*, u32
, u32
);
71 static inline void update_rx_stats(struct net_device
*, u32
);
72 static void au1000_timer(unsigned long);
73 static int au1000_ioctl(struct net_device
*, struct ifreq
*, int);
74 static int mdio_read(struct net_device
*, int, int);
75 static void mdio_write(struct net_device
*, int, int, u16
);
76 static void dump_mii(struct net_device
*dev
, int phy_id
);
79 extern void ack_rise_edge_irq(unsigned int);
80 extern int get_ethernet_addr(char *ethernet_addr
);
81 extern inline void str2eaddr(unsigned char *ea
, unsigned char *str
);
82 extern inline unsigned char str2hexnum(unsigned char c
);
83 extern char * __init
prom_getcmdline(void);
88 * The Au1000 MACs use a simple rx and tx descriptor ring scheme.
89 * There are four receive and four transmit descriptors. These
90 * descriptors are not in memory; rather, they are just a set of
93 * Since the Au1000 has a coherent data cache, the receive and
94 * transmit buffers are allocated from the KSEG0 segment. The
95 * hardware registers, however, are still mapped at KSEG1 to
96 * make sure there's no out-of-order writes, and that all writes
97 * complete immediately.
102 * Base address and interrupt of the Au1xxx ethernet macs
107 } au1000_iflist
[NUM_INTERFACES
] = {
108 {AU1000_ETH0_BASE
, AU1000_ETH0_IRQ
},
109 {AU1000_ETH1_BASE
, AU1000_ETH1_IRQ
}
111 au1500_iflist
[NUM_INTERFACES
] = {
112 {AU1500_ETH0_BASE
, AU1000_ETH0_IRQ
},
113 {AU1500_ETH1_BASE
, AU1000_ETH1_IRQ
}
115 au1100_iflist
[NUM_INTERFACES
] = {
116 {AU1000_ETH0_BASE
, AU1000_ETH0_IRQ
},
120 static char version
[] __devinitdata
=
121 "au1000eth.c:1.0 ppopov@mvista.com\n";
123 /* These addresses are only used if yamon doesn't tell us what
124 * the mac address is, and the mac address is not passed on the
127 static unsigned char au1000_mac_addr
[6] __devinitdata
= {
128 0x00, 0x50, 0xc2, 0x0c, 0x30, 0x00
131 #define nibswap(x) ((((x) >> 4) & 0x0f) | (((x) << 4) & 0xf0))
132 #define RUN_AT(x) (jiffies + (x))
134 // For reading/writing 32-bit words from/to DMA memory
135 #define cpu_to_dma32 cpu_to_be32
136 #define dma32_to_cpu be32_to_cpu
140 * All of the PHY code really should be detached from the MAC
144 int bcm_5201_init(struct net_device
*dev
, int phy_addr
)
148 /* Stop auto-negotiation */
149 //printk("bcm_5201_init\n");
150 data
= mdio_read(dev
, phy_addr
, MII_CONTROL
);
151 mdio_write(dev
, phy_addr
, MII_CONTROL
, data
& ~MII_CNTL_AUTO
);
153 /* Set advertisement to 10/100 and Half/Full duplex
154 * (full capabilities) */
155 data
= mdio_read(dev
, phy_addr
, MII_ANADV
);
156 data
|= MII_NWAY_TX
| MII_NWAY_TX_FDX
| MII_NWAY_T_FDX
| MII_NWAY_T
;
157 mdio_write(dev
, phy_addr
, MII_ANADV
, data
);
159 /* Restart auto-negotiation */
160 data
= mdio_read(dev
, phy_addr
, MII_CONTROL
);
161 data
|= MII_CNTL_RST_AUTO
| MII_CNTL_AUTO
;
162 mdio_write(dev
, phy_addr
, MII_CONTROL
, data
);
164 /* Enable TX LED instead of FDX */
165 data
= mdio_read(dev
, phy_addr
, MII_INT
);
166 data
&= ~MII_FDX_LED
;
167 mdio_write(dev
, phy_addr
, MII_INT
, data
);
169 /* Enable TX LED instead of FDX */
170 data
= mdio_read(dev
, phy_addr
, MII_INT
);
171 data
&= ~MII_FDX_LED
;
172 mdio_write(dev
, phy_addr
, MII_INT
, data
);
174 if (au1000_debug
> 4) dump_mii(dev
, phy_addr
);
178 int bcm_5201_reset(struct net_device
*dev
, int phy_addr
)
180 s16 mii_control
, timeout
;
182 //printk("bcm_5201_reset\n");
183 mii_control
= mdio_read(dev
, phy_addr
, MII_CONTROL
);
184 mdio_write(dev
, phy_addr
, MII_CONTROL
, mii_control
| MII_CNTL_RESET
);
186 for (timeout
= 100; timeout
> 0; --timeout
) {
187 mii_control
= mdio_read(dev
, phy_addr
, MII_CONTROL
);
188 if ((mii_control
& MII_CNTL_RESET
) == 0)
192 if (mii_control
& MII_CNTL_RESET
) {
193 printk(KERN_ERR
"%s PHY reset timeout !\n", dev
->name
);
200 bcm_5201_status(struct net_device
*dev
, int phy_addr
, u16
*link
, u16
*speed
)
203 struct au1000_private
*aup
;
206 printk(KERN_ERR
"bcm_5201_status error: NULL dev\n");
209 aup
= (struct au1000_private
*) dev
->priv
;
211 mii_data
= mdio_read(dev
, aup
->phy_addr
, MII_STATUS
);
212 if (mii_data
& MII_STAT_LINK
) {
214 mii_data
= mdio_read(dev
, aup
->phy_addr
, MII_AUX_CNTRL
);
215 if (mii_data
& MII_AUX_100
) {
216 if (mii_data
& MII_AUX_FDX
) {
217 *speed
= IF_PORT_100BASEFX
;
218 dev
->if_port
= IF_PORT_100BASEFX
;
221 *speed
= IF_PORT_100BASETX
;
222 dev
->if_port
= IF_PORT_100BASETX
;
226 *speed
= IF_PORT_10BASET
;
227 dev
->if_port
= IF_PORT_10BASET
;
234 dev
->if_port
= IF_PORT_UNKNOWN
;
239 int lsi_80227_init(struct net_device
*dev
, int phy_addr
)
241 if (au1000_debug
> 4)
242 printk("lsi_80227_init\n");
244 /* restart auto-negotiation */
245 mdio_write(dev
, phy_addr
, 0, 0x3200);
249 /* set up LEDs to correct display */
250 mdio_write(dev
, phy_addr
, 17, 0xffc0);
252 if (au1000_debug
> 4)
253 dump_mii(dev
, phy_addr
);
257 int lsi_80227_reset(struct net_device
*dev
, int phy_addr
)
259 s16 mii_control
, timeout
;
261 if (au1000_debug
> 4) {
262 printk("lsi_80227_reset\n");
263 dump_mii(dev
, phy_addr
);
266 mii_control
= mdio_read(dev
, phy_addr
, MII_CONTROL
);
267 mdio_write(dev
, phy_addr
, MII_CONTROL
, mii_control
| MII_CNTL_RESET
);
269 for (timeout
= 100; timeout
> 0; --timeout
) {
270 mii_control
= mdio_read(dev
, phy_addr
, MII_CONTROL
);
271 if ((mii_control
& MII_CNTL_RESET
) == 0)
275 if (mii_control
& MII_CNTL_RESET
) {
276 printk(KERN_ERR
"%s PHY reset timeout !\n", dev
->name
);
283 lsi_80227_status(struct net_device
*dev
, int phy_addr
, u16
*link
, u16
*speed
)
286 struct au1000_private
*aup
;
289 printk(KERN_ERR
"lsi_80227_status error: NULL dev\n");
292 aup
= (struct au1000_private
*) dev
->priv
;
294 mii_data
= mdio_read(dev
, aup
->phy_addr
, MII_STATUS
);
295 if (mii_data
& MII_STAT_LINK
) {
297 mii_data
= mdio_read(dev
, aup
->phy_addr
, MII_LSI_STAT
);
298 if (mii_data
& MII_LSI_STAT_SPD
) {
299 if (mii_data
& MII_LSI_STAT_FDX
) {
300 *speed
= IF_PORT_100BASEFX
;
301 dev
->if_port
= IF_PORT_100BASEFX
;
304 *speed
= IF_PORT_100BASETX
;
305 dev
->if_port
= IF_PORT_100BASETX
;
309 *speed
= IF_PORT_10BASET
;
310 dev
->if_port
= IF_PORT_10BASET
;
317 dev
->if_port
= IF_PORT_UNKNOWN
;
322 int am79c901_init(struct net_device
*dev
, int phy_addr
)
324 printk("am79c901_init\n");
328 int am79c901_reset(struct net_device
*dev
, int phy_addr
)
330 printk("am79c901_reset\n");
335 am79c901_status(struct net_device
*dev
, int phy_addr
, u16
*link
, u16
*speed
)
340 struct phy_ops bcm_5201_ops
= {
346 struct phy_ops am79c901_ops
= {
352 struct phy_ops lsi_80227_ops
= {
358 static struct mii_chip_info
{
362 struct phy_ops
*phy_ops
;
363 } mii_chip_table
[] = {
364 {"Broadcom BCM5201 10/100 BaseT PHY", 0x0040, 0x6212, &bcm_5201_ops
},
365 {"AMD 79C901 HomePNA PHY", 0x0000, 0x35c8, &am79c901_ops
},
366 {"LSI 80227 10/100 BaseT PHY", 0x0016, 0xf840, &lsi_80227_ops
},
367 {"Broadcom BCM5221 10/100 BaseT PHY", 0x0040, 0x61e4, &bcm_5201_ops
},
371 static int mdio_read(struct net_device
*dev
, int phy_id
, int reg
)
373 struct au1000_private
*aup
= (struct au1000_private
*) dev
->priv
;
377 while (aup
->mac
->mii_control
& MAC_MII_BUSY
) {
379 if (--timedout
== 0) {
380 printk(KERN_ERR
"%s: read_MII busy timeout!!\n",
386 mii_control
= MAC_SET_MII_SELECT_REG(reg
) |
387 MAC_SET_MII_SELECT_PHY(phy_id
) | MAC_MII_READ
;
389 aup
->mac
->mii_control
= mii_control
;
392 while (aup
->mac
->mii_control
& MAC_MII_BUSY
) {
394 if (--timedout
== 0) {
395 printk(KERN_ERR
"%s: mdio_read busy timeout!!\n",
400 return (int)aup
->mac
->mii_data
;
403 static void mdio_write(struct net_device
*dev
, int phy_id
, int reg
, u16 value
)
405 struct au1000_private
*aup
= (struct au1000_private
*) dev
->priv
;
409 while (aup
->mac
->mii_control
& MAC_MII_BUSY
) {
411 if (--timedout
== 0) {
412 printk(KERN_ERR
"%s: mdio_write busy timeout!!\n",
418 mii_control
= MAC_SET_MII_SELECT_REG(reg
) |
419 MAC_SET_MII_SELECT_PHY(phy_id
) | MAC_MII_WRITE
;
421 aup
->mac
->mii_data
= value
;
422 aup
->mac
->mii_control
= mii_control
;
426 static void dump_mii(struct net_device
*dev
, int phy_id
)
430 for (i
= 0; i
< 7; i
++) {
431 if ((val
= mdio_read(dev
, phy_id
, i
)) >= 0)
432 printk("%s: MII Reg %d=%x\n", dev
->name
, i
, val
);
434 for (i
= 16; i
< 25; i
++) {
435 if ((val
= mdio_read(dev
, phy_id
, i
)) >= 0)
436 printk("%s: MII Reg %d=%x\n", dev
->name
, i
, val
);
440 static int __init
mii_probe (struct net_device
* dev
)
442 struct au1000_private
*aup
= (struct au1000_private
*) dev
->priv
;
447 /* search for total of 32 possible mii phy addresses */
448 for (phy_addr
= 0; phy_addr
< 32; phy_addr
++) {
450 u16 phy_id0
, phy_id1
;
453 mii_status
= mdio_read(dev
, phy_addr
, MII_STATUS
);
454 if (mii_status
== 0xffff || mii_status
== 0x0000)
455 /* the mii is not accessible, try next one */
458 phy_id0
= mdio_read(dev
, phy_addr
, MII_PHY_ID0
);
459 phy_id1
= mdio_read(dev
, phy_addr
, MII_PHY_ID1
);
461 /* search our mii table for the current mii */
462 for (i
= 0; mii_chip_table
[i
].phy_id1
; i
++) {
463 if (phy_id0
== mii_chip_table
[i
].phy_id0
&&
464 phy_id1
== mii_chip_table
[i
].phy_id1
) {
465 struct mii_phy
* mii_phy
;
467 printk(KERN_INFO
"%s: %s at phy address %d\n",
468 dev
->name
, mii_chip_table
[i
].name
,
470 mii_phy
= kmalloc(sizeof(struct mii_phy
),
473 mii_phy
->chip_info
= mii_chip_table
+i
;
474 mii_phy
->phy_addr
= phy_addr
;
475 mii_phy
->next
= aup
->mii
;
477 mii_chip_table
[i
].phy_ops
;
479 aup
->phy_ops
->phy_init(dev
,phy_addr
);
481 printk(KERN_ERR
"%s: out of memory\n",
485 /* the current mii is on our mii_info_table,
492 if (aup
->mii
== NULL
) {
493 printk(KERN_ERR
"%s: No MII transceivers found!\n", dev
->name
);
498 aup
->phy_addr
= aup
->mii
->phy_addr
;
499 printk(KERN_INFO
"%s: Using %s as default\n",
500 dev
->name
, aup
->mii
->chip_info
->name
);
507 * Buffer allocation/deallocation routines. The buffer descriptor returned
508 * has the virtual and dma address of a buffer suitable for
509 * both, receive and transmit operations.
511 static db_dest_t
*GetFreeDB(struct au1000_private
*aup
)
517 aup
->pDBfree
= pDB
->pnext
;
519 //printk("GetFreeDB: %x\n", pDB);
523 void ReleaseDB(struct au1000_private
*aup
, db_dest_t
*pDB
)
525 db_dest_t
*pDBfree
= aup
->pDBfree
;
527 pDBfree
->pnext
= pDB
;
533 DMA memory allocation, derived from pci_alloc_consistent.
534 However, the Au1000 data cache is coherent (when programmed
535 so), therefore we return KSEG0 address, not KSEG1.
537 static void *dma_alloc(size_t size
, dma_addr_t
* dma_handle
)
540 int gfp
= GFP_ATOMIC
| GFP_DMA
;
542 ret
= (void *) __get_free_pages(gfp
, get_order(size
));
545 memset(ret
, 0, size
);
546 *dma_handle
= virt_to_bus(ret
);
547 ret
= (void *)KSEG0ADDR(ret
);
553 static void dma_free(void *vaddr
, size_t size
)
555 vaddr
= (void *)KSEG0ADDR(vaddr
);
556 free_pages((unsigned long) vaddr
, get_order(size
));
560 static void enable_rx_tx(struct net_device
*dev
)
562 struct au1000_private
*aup
= (struct au1000_private
*) dev
->priv
;
564 if (au1000_debug
> 4)
565 printk(KERN_INFO
"%s: enable_rx_tx\n", dev
->name
);
567 aup
->mac
->control
|= (MAC_RX_ENABLE
| MAC_TX_ENABLE
);
571 static void hard_stop(struct net_device
*dev
)
573 struct au1000_private
*aup
= (struct au1000_private
*) dev
->priv
;
575 if (au1000_debug
> 4)
576 printk(KERN_INFO
"%s: hard stop\n", dev
->name
);
578 aup
->mac
->control
&= ~(MAC_RX_ENABLE
| MAC_TX_ENABLE
);
583 static void reset_mac(struct net_device
*dev
)
586 struct au1000_private
*aup
= (struct au1000_private
*) dev
->priv
;
588 if (au1000_debug
> 4)
589 printk(KERN_INFO
"%s: reset mac, aup %x\n",
590 dev
->name
, (unsigned)aup
);
592 spin_lock_irqsave(&aup
->lock
, flags
);
593 del_timer(&aup
->timer
);
595 *aup
->enable
= MAC_EN_CLOCK_ENABLE
;
600 spin_unlock_irqrestore(&aup
->lock
, flags
);
605 * Setup the receive and transmit "rings". These pointers are the addresses
606 * of the rx and tx MAC DMA registers so they are fixed by the hardware --
607 * these are not descriptors sitting in memory.
610 setup_hw_rings(struct au1000_private
*aup
, u32 rx_base
, u32 tx_base
)
614 for (i
=0; i
<NUM_RX_DMA
; i
++) {
615 aup
->rx_dma_ring
[i
] =
616 (volatile rx_dma_t
*) (rx_base
+ sizeof(rx_dma_t
)*i
);
618 for (i
=0; i
<NUM_TX_DMA
; i
++) {
619 aup
->tx_dma_ring
[i
] =
620 (volatile tx_dma_t
*) (tx_base
+ sizeof(tx_dma_t
)*i
);
624 static int __init
au1000_init_module(void)
630 prid
= read_c0_prid();
631 for (i
=0; i
<NUM_INTERFACES
; i
++) {
632 if ( (prid
& 0xffff0000) == 0x00030000 ) {
633 base_addr
= au1000_iflist
[i
].port
;
634 irq
= au1000_iflist
[i
].irq
;
635 } else if ( (prid
& 0xffff0000) == 0x01030000 ) {
636 base_addr
= au1500_iflist
[i
].port
;
637 irq
= au1500_iflist
[i
].irq
;
638 } else if ( (prid
& 0xffff0000) == 0x02030000 ) {
639 base_addr
= au1100_iflist
[i
].port
;
640 irq
= au1100_iflist
[i
].irq
;
642 printk(KERN_ERR
"au1000 eth: unknown Processor ID\n");
645 // check for valid entries, au1100 only has one entry
646 if (base_addr
&& irq
) {
647 if (au1000_probe1(NULL
, base_addr
, irq
, i
) != 0) {
656 au1000_probe1(struct net_device
*dev
, long ioaddr
, int irq
, int port_num
)
658 static unsigned version_printed
= 0;
659 struct au1000_private
*aup
= NULL
;
661 db_dest_t
*pDB
, *pDBfree
;
665 if (!request_region(PHYSADDR(ioaddr
), MAC_IOSIZE
, "Au1000 ENET"))
668 if (version_printed
++ == 0)
672 dev
= init_etherdev(NULL
, sizeof(struct au1000_private
));
675 printk (KERN_ERR
"au1000 eth: init_etherdev failed\n");
676 release_region(ioaddr
, MAC_IOSIZE
);
680 printk("%s: Au1xxx ethernet found at 0x%lx, irq %d\n",
681 dev
->name
, ioaddr
, irq
);
685 /* Allocate the data buffers */
686 aup
->vaddr
= (u32
)dma_alloc(MAX_BUF_SIZE
*
687 (NUM_TX_BUFFS
+NUM_RX_BUFFS
), &aup
->dma_addr
);
693 /* aup->mac is the base address of the MAC's registers */
694 aup
->mac
= (volatile mac_reg_t
*)((unsigned long)ioaddr
);
695 /* Setup some variables for quick register address access */
697 case AU1000_ETH0_BASE
:
698 case AU1500_ETH0_BASE
:
699 /* check env variables first */
700 if (!get_ethernet_addr(ethaddr
)) {
701 memcpy(au1000_mac_addr
, ethaddr
, sizeof(dev
->dev_addr
));
703 /* Check command line */
704 argptr
= prom_getcmdline();
705 if ((pmac
= strstr(argptr
, "ethaddr=")) == NULL
) {
706 printk(KERN_INFO
"%s: No mac address found\n",
708 /* use the hard coded mac addresses */
710 str2eaddr(ethaddr
, pmac
+ strlen("ethaddr="));
711 memcpy(au1000_mac_addr
, ethaddr
,
712 sizeof(dev
->dev_addr
));
715 if (ioaddr
== AU1000_ETH0_BASE
)
716 aup
->enable
= (volatile u32
*)
717 ((unsigned long)AU1000_MAC0_ENABLE
);
719 aup
->enable
= (volatile u32
*)
720 ((unsigned long)AU1500_MAC0_ENABLE
);
721 memcpy(dev
->dev_addr
, au1000_mac_addr
, sizeof(dev
->dev_addr
));
722 setup_hw_rings(aup
, MAC0_RX_DMA_ADDR
, MAC0_TX_DMA_ADDR
);
724 case AU1000_ETH1_BASE
:
725 case AU1500_ETH1_BASE
:
726 if (ioaddr
== AU1000_ETH1_BASE
)
727 aup
->enable
= (volatile u32
*)
728 ((unsigned long)AU1000_MAC1_ENABLE
);
730 aup
->enable
= (volatile u32
*)
731 ((unsigned long)AU1500_MAC1_ENABLE
);
732 memcpy(dev
->dev_addr
, au1000_mac_addr
, sizeof(dev
->dev_addr
));
733 dev
->dev_addr
[4] += 0x10;
734 setup_hw_rings(aup
, MAC1_RX_DMA_ADDR
, MAC1_TX_DMA_ADDR
);
737 printk(KERN_ERR
"%s: bad ioaddr\n", dev
->name
);
742 aup
->phy_addr
= PHY_ADDRESS
;
744 /* bring the device out of reset, otherwise probing the mii
746 *aup
->enable
= MAC_EN_CLOCK_ENABLE
;
748 *aup
->enable
= MAC_EN_RESET0
| MAC_EN_RESET1
|
749 MAC_EN_RESET2
| MAC_EN_CLOCK_ENABLE
;
752 if (mii_probe(dev
) != 0) {
757 /* setup the data buffer descriptors and attach a buffer to each one */
759 for (i
=0; i
<(NUM_TX_BUFFS
+NUM_RX_BUFFS
); i
++) {
760 pDB
->pnext
= pDBfree
;
762 pDB
->vaddr
= (u32
*)((unsigned)aup
->vaddr
+ MAX_BUF_SIZE
*i
);
763 pDB
->dma_addr
= (dma_addr_t
)virt_to_bus(pDB
->vaddr
);
766 aup
->pDBfree
= pDBfree
;
768 for (i
=0; i
<NUM_RX_DMA
; i
++) {
769 pDB
= GetFreeDB(aup
);
770 if (!pDB
) goto free_region
;
771 aup
->rx_dma_ring
[i
]->buff_stat
= (unsigned)pDB
->dma_addr
;
772 aup
->rx_db_inuse
[i
] = pDB
;
774 for (i
=0; i
<NUM_TX_DMA
; i
++) {
775 pDB
= GetFreeDB(aup
);
776 if (!pDB
) goto free_region
;
777 aup
->tx_dma_ring
[i
]->buff_stat
= (unsigned)pDB
->dma_addr
;
778 aup
->tx_dma_ring
[i
]->len
= 0;
779 aup
->tx_db_inuse
[i
] = pDB
;
782 spin_lock_init(&aup
->lock
);
783 dev
->base_addr
= ioaddr
;
785 dev
->open
= au1000_open
;
786 dev
->hard_start_xmit
= au1000_tx
;
787 dev
->stop
= au1000_close
;
788 dev
->get_stats
= au1000_get_stats
;
789 dev
->set_multicast_list
= &set_rx_mode
;
790 dev
->do_ioctl
= &au1000_ioctl
;
791 dev
->set_config
= &au1000_set_config
;
792 dev
->tx_timeout
= au1000_tx_timeout
;
793 dev
->watchdog_timeo
= ETH_TX_TIMEOUT
;
796 /* Fill in the fields of the device structure with ethernet values. */
800 * The boot code uses the ethernet controller, so reset it to start
801 * fresh. au1000_init() expects that the device is in reset state.
807 release_region(PHYSADDR(ioaddr
), MAC_IOSIZE
);
808 unregister_netdev(dev
);
810 dma_free((void *)aup
->vaddr
,
811 MAX_BUF_SIZE
* (NUM_TX_BUFFS
+NUM_RX_BUFFS
));
812 printk(KERN_ERR
"%s: au1000_probe1 failed. Returns %d\n",
820 * Initialize the interface.
822 * When the device powers up, the clocks are disabled and the
823 * mac is in reset state. When the interface is closed, we
824 * do the same -- reset the device and disable the clocks to
825 * conserve power. Thus, whenever au1000_init() is called,
826 * the device should already be in reset state.
828 static int au1000_init(struct net_device
*dev
)
830 struct au1000_private
*aup
= (struct au1000_private
*) dev
->priv
;
836 if (au1000_debug
> 4) printk("%s: au1000_init\n", dev
->name
);
838 spin_lock_irqsave(&aup
->lock
, flags
);
840 /* bring the device out of reset */
841 *aup
->enable
= MAC_EN_CLOCK_ENABLE
;
843 *aup
->enable
= MAC_EN_RESET0
| MAC_EN_RESET1
|
844 MAC_EN_RESET2
| MAC_EN_CLOCK_ENABLE
;
847 aup
->mac
->control
= 0;
848 aup
->tx_head
= (aup
->tx_dma_ring
[0]->buff_stat
& 0xC) >> 2;
849 aup
->tx_tail
= aup
->tx_head
;
850 aup
->rx_head
= (aup
->rx_dma_ring
[0]->buff_stat
& 0xC) >> 2;
852 aup
->mac
->mac_addr_high
= dev
->dev_addr
[5]<<8 | dev
->dev_addr
[4];
853 aup
->mac
->mac_addr_low
= dev
->dev_addr
[3]<<24 | dev
->dev_addr
[2]<<16 |
854 dev
->dev_addr
[1]<<8 | dev
->dev_addr
[0];
856 for (i
=0; i
<NUM_RX_DMA
; i
++) {
857 aup
->rx_dma_ring
[i
]->buff_stat
|= RX_DMA_ENABLE
;
861 aup
->phy_ops
->phy_status(dev
, aup
->phy_addr
, &link
, &speed
);
862 control
= MAC_DISABLE_RX_OWN
| MAC_RX_ENABLE
| MAC_TX_ENABLE
;
863 #ifndef CONFIG_CPU_LITTLE_ENDIAN
864 control
|= MAC_BIG_ENDIAN
;
866 if (link
&& (dev
->if_port
== IF_PORT_100BASEFX
)) {
867 control
|= MAC_FULL_DUPLEX
;
869 aup
->mac
->control
= control
;
872 spin_unlock_irqrestore(&aup
->lock
, flags
);
876 static void au1000_timer(unsigned long data
)
878 struct net_device
*dev
= (struct net_device
*)data
;
879 struct au1000_private
*aup
= (struct au1000_private
*) dev
->priv
;
880 unsigned char if_port
;
884 /* fatal error, don't restart the timer */
885 printk(KERN_ERR
"au1000_timer error: NULL dev\n");
889 if_port
= dev
->if_port
;
890 if (aup
->phy_ops
->phy_status(dev
, aup
->phy_addr
, &link
, &speed
) == 0) {
892 if (!(dev
->flags
& IFF_RUNNING
)) {
893 netif_carrier_on(dev
);
894 dev
->flags
|= IFF_RUNNING
;
895 printk(KERN_INFO
"%s: link up\n", dev
->name
);
899 if (dev
->flags
& IFF_RUNNING
) {
900 netif_carrier_off(dev
);
901 dev
->flags
&= ~IFF_RUNNING
;
903 printk(KERN_INFO
"%s: link down\n", dev
->name
);
908 if (link
&& (dev
->if_port
!= if_port
) &&
909 (dev
->if_port
!= IF_PORT_UNKNOWN
)) {
911 if (dev
->if_port
== IF_PORT_100BASEFX
) {
912 printk(KERN_INFO
"%s: going to full duplex\n",
914 aup
->mac
->control
|= MAC_FULL_DUPLEX
;
918 aup
->mac
->control
&= ~MAC_FULL_DUPLEX
;
924 aup
->timer
.expires
= RUN_AT((1*HZ
));
925 aup
->timer
.data
= (unsigned long)dev
;
926 aup
->timer
.function
= &au1000_timer
; /* timer handler */
927 add_timer(&aup
->timer
);
931 static int au1000_open(struct net_device
*dev
)
934 struct au1000_private
*aup
= (struct au1000_private
*) dev
->priv
;
938 if (au1000_debug
> 4)
939 printk("%s: open: dev=%p\n", dev
->name
, dev
);
941 if ((retval
= au1000_init(dev
))) {
942 printk(KERN_ERR
"%s: error in au1000_init\n", dev
->name
);
943 free_irq(dev
->irq
, dev
);
947 netif_start_queue(dev
);
949 if ((retval
= request_irq(dev
->irq
, &au1000_interrupt
, 0,
951 printk(KERN_ERR
"%s: unable to get IRQ %d\n",
952 dev
->name
, dev
->irq
);
957 aup
->timer
.expires
= RUN_AT((3*HZ
));
958 aup
->timer
.data
= (unsigned long)dev
;
959 aup
->timer
.function
= &au1000_timer
; /* timer handler */
960 add_timer(&aup
->timer
);
962 if (au1000_debug
> 4)
963 printk("%s: open: Initialization done.\n", dev
->name
);
968 static int au1000_close(struct net_device
*dev
)
971 struct au1000_private
*aup
= (struct au1000_private
*) dev
->priv
;
973 if (au1000_debug
> 4)
974 printk("%s: close: dev=%p\n", dev
->name
, dev
);
976 spin_lock_irqsave(&aup
->lock
, flags
);
978 /* stop the device */
979 if (netif_device_present(dev
))
980 netif_stop_queue(dev
);
982 /* disable the interrupt */
983 free_irq(dev
->irq
, dev
);
984 spin_unlock_irqrestore(&aup
->lock
, flags
);
992 static void __exit
au1000_cleanup_module(void)
998 update_tx_stats(struct net_device
*dev
, u32 status
, u32 pkt_len
)
1000 struct au1000_private
*aup
= (struct au1000_private
*) dev
->priv
;
1001 struct net_device_stats
*ps
= &aup
->stats
;
1004 ps
->tx_bytes
+= pkt_len
;
1006 if (status
& TX_FRAME_ABORTED
) {
1007 if (dev
->if_port
== IF_PORT_100BASEFX
) {
1008 if (status
& (TX_JAB_TIMEOUT
| TX_UNDERRUN
)) {
1009 /* any other tx errors are only valid
1010 * in half duplex mode */
1012 ps
->tx_aborted_errors
++;
1017 ps
->tx_aborted_errors
++;
1018 if (status
& (TX_NO_CARRIER
| TX_LOSS_CARRIER
))
1019 ps
->tx_carrier_errors
++;
1026 * Called from the interrupt service routine to acknowledge
1027 * the TX DONE bits. This is a must if the irq is setup as
1030 static void au1000_tx_ack(struct net_device
*dev
)
1032 struct au1000_private
*aup
= (struct au1000_private
*) dev
->priv
;
1033 volatile tx_dma_t
*ptxd
;
1035 ptxd
= aup
->tx_dma_ring
[aup
->tx_tail
];
1037 while (ptxd
->buff_stat
& TX_T_DONE
) {
1038 update_tx_stats(dev
, ptxd
->status
, aup
->tx_len
[aup
->tx_tail
] & 0x3ff);
1039 ptxd
->buff_stat
&= ~TX_T_DONE
;
1040 aup
->tx_len
[aup
->tx_tail
] = 0;
1044 aup
->tx_tail
= (aup
->tx_tail
+ 1) & (NUM_TX_DMA
- 1);
1045 ptxd
= aup
->tx_dma_ring
[aup
->tx_tail
];
1049 netif_wake_queue(dev
);
1056 * Au1000 transmit routine.
1058 static int au1000_tx(struct sk_buff
*skb
, struct net_device
*dev
)
1060 struct au1000_private
*aup
= (struct au1000_private
*) dev
->priv
;
1061 volatile tx_dma_t
*ptxd
;
1066 if (au1000_debug
> 4)
1067 printk("%s: tx: aup %x len=%d, data=%p, head %d\n",
1068 dev
->name
, (unsigned)aup
, skb
->len
,
1069 skb
->data
, aup
->tx_head
);
1071 ptxd
= aup
->tx_dma_ring
[aup
->tx_head
];
1072 buff_stat
= ptxd
->buff_stat
;
1073 if (buff_stat
& TX_DMA_ENABLE
) {
1074 /* We've wrapped around and the transmitter is still busy */
1075 netif_stop_queue(dev
);
1079 else if (buff_stat
& TX_T_DONE
) {
1080 update_tx_stats(dev
, ptxd
->status
, aup
->tx_len
[aup
->tx_head
] & 0x3ff);
1081 aup
->tx_len
[aup
->tx_head
] = 0;
1087 netif_wake_queue(dev
);
1090 pDB
= aup
->tx_db_inuse
[aup
->tx_head
];
1091 memcpy((void *)pDB
->vaddr
, skb
->data
, skb
->len
);
1092 if (skb
->len
< MAC_MIN_PKT_SIZE
) {
1093 for (i
=skb
->len
; i
<MAC_MIN_PKT_SIZE
; i
++) {
1094 ((char *)pDB
->vaddr
)[i
] = 0;
1096 aup
->tx_len
[aup
->tx_head
] = MAC_MIN_PKT_SIZE
;
1097 ptxd
->len
= MAC_MIN_PKT_SIZE
;
1100 aup
->tx_len
[aup
->tx_head
] = skb
->len
;
1101 ptxd
->len
= skb
->len
;
1103 ptxd
->buff_stat
= pDB
->dma_addr
| TX_DMA_ENABLE
;
1106 aup
->tx_head
= (aup
->tx_head
+ 1) & (NUM_TX_DMA
- 1);
1107 dev
->trans_start
= jiffies
;
1112 static inline void update_rx_stats(struct net_device
*dev
, u32 status
)
1114 struct au1000_private
*aup
= (struct au1000_private
*) dev
->priv
;
1115 struct net_device_stats
*ps
= &aup
->stats
;
1118 if (status
& RX_MCAST_FRAME
)
1121 if (status
& RX_ERROR
) {
1123 if (status
& RX_MISSED_FRAME
)
1124 ps
->rx_missed_errors
++;
1125 if (status
& (RX_OVERLEN
| RX_OVERLEN
| RX_LEN_ERROR
))
1126 ps
->rx_length_errors
++;
1127 if (status
& RX_CRC_ERROR
)
1128 ps
->rx_crc_errors
++;
1129 if (status
& RX_COLL
)
1133 ps
->rx_bytes
+= status
& RX_FRAME_LEN_MASK
;
1138 * Au1000 receive routine.
1140 static int au1000_rx(struct net_device
*dev
)
1142 struct au1000_private
*aup
= (struct au1000_private
*) dev
->priv
;
1143 struct sk_buff
*skb
;
1144 volatile rx_dma_t
*prxd
;
1145 u32 buff_stat
, status
;
1148 if (au1000_debug
> 4)
1149 printk("%s: au1000_rx head %d\n", dev
->name
, aup
->rx_head
);
1151 prxd
= aup
->rx_dma_ring
[aup
->rx_head
];
1152 buff_stat
= prxd
->buff_stat
;
1153 while (buff_stat
& RX_T_DONE
) {
1154 status
= prxd
->status
;
1155 pDB
= aup
->rx_db_inuse
[aup
->rx_head
];
1156 update_rx_stats(dev
, status
);
1157 if (!(status
& RX_ERROR
)) {
1160 skb
= dev_alloc_skb((status
& RX_FRAME_LEN_MASK
) + 2);
1163 "%s: Memory squeeze, dropping packet.\n",
1165 aup
->stats
.rx_dropped
++;
1169 skb_reserve(skb
, 2); /* 16 byte IP header align */
1170 eth_copy_and_sum(skb
, (unsigned char *)pDB
->vaddr
,
1171 status
& RX_FRAME_LEN_MASK
, 0);
1172 skb_put(skb
, status
& RX_FRAME_LEN_MASK
);
1173 skb
->protocol
= eth_type_trans(skb
, dev
);
1174 netif_rx(skb
); /* pass the packet to upper layers */
1177 if (au1000_debug
> 4) {
1178 if (status
& RX_MISSED_FRAME
)
1179 printk("rx miss\n");
1180 if (status
& RX_WDOG_TIMER
)
1181 printk("rx wdog\n");
1182 if (status
& RX_RUNT
)
1183 printk("rx runt\n");
1184 if (status
& RX_OVERLEN
)
1185 printk("rx overlen\n");
1186 if (status
& RX_COLL
)
1187 printk("rx coll\n");
1188 if (status
& RX_MII_ERROR
)
1189 printk("rx mii error\n");
1190 if (status
& RX_CRC_ERROR
)
1191 printk("rx crc error\n");
1192 if (status
& RX_LEN_ERROR
)
1193 printk("rx len error\n");
1194 if (status
& RX_U_CNTRL_FRAME
)
1195 printk("rx u control frame\n");
1196 if (status
& RX_MISSED_FRAME
)
1197 printk("rx miss\n");
1200 prxd
->buff_stat
= (u32
)(pDB
->dma_addr
| RX_DMA_ENABLE
);
1201 aup
->rx_head
= (aup
->rx_head
+ 1) & (NUM_RX_DMA
- 1);
1204 /* next descriptor */
1205 prxd
= aup
->rx_dma_ring
[aup
->rx_head
];
1206 buff_stat
= prxd
->buff_stat
;
1207 dev
->last_rx
= jiffies
;
1214 * Au1000 interrupt service routine.
1216 irqreturn_t
au1000_interrupt(int irq
, void *dev_id
, struct pt_regs
*regs
)
1218 struct net_device
*dev
= (struct net_device
*) dev_id
;
1221 printk(KERN_ERR
"%s: isr: null dev ptr\n", dev
->name
);
1231 * The Tx ring has been full longer than the watchdog timeout
1232 * value. The transmitter must be hung?
1234 static void au1000_tx_timeout(struct net_device
*dev
)
1236 printk(KERN_ERR
"%s: au1000_tx_timeout: dev=%p\n", dev
->name
, dev
);
1239 dev
->trans_start
= jiffies
;
1240 netif_wake_queue(dev
);
1243 static void set_rx_mode(struct net_device
*dev
)
1245 struct au1000_private
*aup
= (struct au1000_private
*) dev
->priv
;
1247 if (au1000_debug
> 4)
1248 printk("%s: set_rx_mode: flags=%x\n", dev
->name
, dev
->flags
);
1250 if (dev
->flags
& IFF_PROMISC
) { /* Set promiscuous. */
1251 aup
->mac
->control
|= MAC_PROMISCUOUS
;
1252 printk(KERN_INFO
"%s: Promiscuous mode enabled.\n", dev
->name
);
1253 } else if ((dev
->flags
& IFF_ALLMULTI
) ||
1254 dev
->mc_count
> MULTICAST_FILTER_LIMIT
) {
1255 aup
->mac
->control
|= MAC_PASS_ALL_MULTI
;
1256 aup
->mac
->control
&= ~MAC_PROMISCUOUS
;
1257 printk(KERN_INFO
"%s: Pass all multicast\n", dev
->name
);
1260 struct dev_mc_list
*mclist
;
1261 u32 mc_filter
[2]; /* Multicast hash filter */
1263 mc_filter
[1] = mc_filter
[0] = 0;
1264 for (i
= 0, mclist
= dev
->mc_list
; mclist
&& i
< dev
->mc_count
;
1265 i
++, mclist
= mclist
->next
) {
1266 set_bit(ether_crc_le(ETH_ALEN
, mclist
->dmi_addr
)>>26,
1269 aup
->mac
->multi_hash_high
= mc_filter
[1];
1270 aup
->mac
->multi_hash_low
= mc_filter
[0];
1271 aup
->mac
->control
&= ~MAC_PROMISCUOUS
;
1272 aup
->mac
->control
|= MAC_HASH_MODE
;
1277 static int au1000_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
1279 u16
*data
= (u16
*)&rq
->ifr_data
;
1283 case SIOCGMIIPHY
: /* Get the address of the PHY in use. */
1284 data
[0] = PHY_ADDRESS
;
1287 case SIOCGMIIREG
: /* Read the specified MII register. */
1288 //data[3] = mdio_read(ioaddr, data[0], data[1]);
1291 case SIOCSMIIREG
: /* Write the specified MII register */
1292 if (!capable(CAP_NET_ADMIN
))
1295 //mdio_write(ioaddr, data[0], data[1], data[2]);
1304 static int au1000_set_config(struct net_device
*dev
, struct ifmap
*map
)
1306 struct au1000_private
*aup
= (struct au1000_private
*) dev
->priv
;
1309 if (au1000_debug
> 4) {
1310 printk("%s: set_config called: dev->if_port %d map->port %x\n",
1311 dev
->name
, dev
->if_port
, map
->port
);
1315 case IF_PORT_UNKNOWN
: /* use auto here */
1316 printk(KERN_INFO
"%s: config phy for aneg\n",
1318 dev
->if_port
= map
->port
;
1319 /* Link Down: the timer will bring it up */
1320 netif_carrier_off(dev
);
1322 /* read current control */
1323 control
= mdio_read(dev
, aup
->phy_addr
, MII_CONTROL
);
1324 control
&= ~(MII_CNTL_FDX
| MII_CNTL_F100
);
1326 /* enable auto negotiation and reset the negotiation */
1327 mdio_write(dev
, aup
->phy_addr
, MII_CONTROL
,
1328 control
| MII_CNTL_AUTO
|
1333 case IF_PORT_10BASET
: /* 10BaseT */
1334 printk(KERN_INFO
"%s: config phy for 10BaseT\n",
1336 dev
->if_port
= map
->port
;
1338 /* Link Down: the timer will bring it up */
1339 netif_carrier_off(dev
);
1341 /* set Speed to 10Mbps, Half Duplex */
1342 control
= mdio_read(dev
, aup
->phy_addr
, MII_CONTROL
);
1343 control
&= ~(MII_CNTL_F100
| MII_CNTL_AUTO
|
1346 /* disable auto negotiation and force 10M/HD mode*/
1347 mdio_write(dev
, aup
->phy_addr
, MII_CONTROL
, control
);
1350 case IF_PORT_100BASET
: /* 100BaseT */
1351 case IF_PORT_100BASETX
: /* 100BaseTx */
1352 printk(KERN_INFO
"%s: config phy for 100BaseTX\n",
1354 dev
->if_port
= map
->port
;
1356 /* Link Down: the timer will bring it up */
1357 netif_carrier_off(dev
);
1359 /* set Speed to 100Mbps, Half Duplex */
1360 /* disable auto negotiation and enable 100MBit Mode */
1361 control
= mdio_read(dev
, aup
->phy_addr
, MII_CONTROL
);
1362 printk("read control %x\n", control
);
1363 control
&= ~(MII_CNTL_AUTO
| MII_CNTL_FDX
);
1364 control
|= MII_CNTL_F100
;
1365 mdio_write(dev
, aup
->phy_addr
, MII_CONTROL
, control
);
1368 case IF_PORT_100BASEFX
: /* 100BaseFx */
1369 printk(KERN_INFO
"%s: config phy for 100BaseFX\n",
1371 dev
->if_port
= map
->port
;
1373 /* Link Down: the timer will bring it up */
1374 netif_carrier_off(dev
);
1376 /* set Speed to 100Mbps, Full Duplex */
1377 /* disable auto negotiation and enable 100MBit Mode */
1378 control
= mdio_read(dev
, aup
->phy_addr
, MII_CONTROL
);
1379 control
&= ~MII_CNTL_AUTO
;
1380 control
|= MII_CNTL_F100
| MII_CNTL_FDX
;
1381 mdio_write(dev
, aup
->phy_addr
, MII_CONTROL
, control
);
1383 case IF_PORT_10BASE2
: /* 10Base2 */
1384 case IF_PORT_AUI
: /* AUI */
1385 /* These Modes are not supported (are they?)*/
1386 printk(KERN_ERR
"%s: 10Base2/AUI not supported",
1392 printk(KERN_ERR
"%s: Invalid media selected",
1399 static struct net_device_stats
*au1000_get_stats(struct net_device
*dev
)
1401 struct au1000_private
*aup
= (struct au1000_private
*) dev
->priv
;
1403 if (au1000_debug
> 4)
1404 printk("%s: au1000_get_stats: dev=%p\n", dev
->name
, dev
);
1406 if (netif_device_present(dev
)) {
1412 module_init(au1000_init_module
);
1413 module_exit(au1000_cleanup_module
);