2 * Atmel MACB Ethernet Controller driver
4 * Copyright (C) 2004-2006 Atmel Corporation
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #include <linux/clk.h>
12 #include <linux/module.h>
13 #include <linux/moduleparam.h>
14 #include <linux/kernel.h>
15 #include <linux/types.h>
16 #include <linux/slab.h>
17 #include <linux/init.h>
18 #include <linux/netdevice.h>
19 #include <linux/etherdevice.h>
20 #include <linux/mii.h>
21 #include <linux/mutex.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/ethtool.h>
24 #include <linux/platform_device.h>
26 #include <asm/arch/board.h>
30 #define to_net_dev(class) container_of(class, struct net_device, class_dev)
32 #define RX_BUFFER_SIZE 128
33 #define RX_RING_SIZE 512
34 #define RX_RING_BYTES (sizeof(struct dma_desc) * RX_RING_SIZE)
36 /* Make the IP header word-aligned (the ethernet header is 14 bytes) */
39 #define TX_RING_SIZE 128
40 #define DEF_TX_RING_PENDING (TX_RING_SIZE - 1)
41 #define TX_RING_BYTES (sizeof(struct dma_desc) * TX_RING_SIZE)
43 #define TX_RING_GAP(bp) \
44 (TX_RING_SIZE - (bp)->tx_pending)
45 #define TX_BUFFS_AVAIL(bp) \
46 (((bp)->tx_tail <= (bp)->tx_head) ? \
47 (bp)->tx_tail + (bp)->tx_pending - (bp)->tx_head : \
48 (bp)->tx_tail - (bp)->tx_head - TX_RING_GAP(bp))
49 #define NEXT_TX(n) (((n) + 1) & (TX_RING_SIZE - 1))
51 #define NEXT_RX(n) (((n) + 1) & (RX_RING_SIZE - 1))
53 /* minimum number of free TX descriptors before waking up TX process */
54 #define MACB_TX_WAKEUP_THRESH (TX_RING_SIZE / 4)
56 #define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \
59 static void __macb_set_hwaddr(struct macb
*bp
)
64 bottom
= cpu_to_le32(*((u32
*)bp
->dev
->dev_addr
));
65 macb_writel(bp
, SA1B
, bottom
);
66 top
= cpu_to_le16(*((u16
*)(bp
->dev
->dev_addr
+ 4)));
67 macb_writel(bp
, SA1T
, top
);
70 static void __init
macb_get_hwaddr(struct macb
*bp
)
76 bottom
= macb_readl(bp
, SA1B
);
77 top
= macb_readl(bp
, SA1T
);
79 addr
[0] = bottom
& 0xff;
80 addr
[1] = (bottom
>> 8) & 0xff;
81 addr
[2] = (bottom
>> 16) & 0xff;
82 addr
[3] = (bottom
>> 24) & 0xff;
84 addr
[5] = (top
>> 8) & 0xff;
86 if (is_valid_ether_addr(addr
))
87 memcpy(bp
->dev
->dev_addr
, addr
, sizeof(addr
));
90 static void macb_enable_mdio(struct macb
*bp
)
95 spin_lock_irqsave(&bp
->lock
, flags
);
96 reg
= macb_readl(bp
, NCR
);
98 macb_writel(bp
, NCR
, reg
);
99 macb_writel(bp
, IER
, MACB_BIT(MFD
));
100 spin_unlock_irqrestore(&bp
->lock
, flags
);
103 static void macb_disable_mdio(struct macb
*bp
)
108 spin_lock_irqsave(&bp
->lock
, flags
);
109 reg
= macb_readl(bp
, NCR
);
110 reg
&= ~MACB_BIT(MPE
);
111 macb_writel(bp
, NCR
, reg
);
112 macb_writel(bp
, IDR
, MACB_BIT(MFD
));
113 spin_unlock_irqrestore(&bp
->lock
, flags
);
116 static int macb_mdio_read(struct net_device
*dev
, int phy_id
, int location
)
118 struct macb
*bp
= netdev_priv(dev
);
121 mutex_lock(&bp
->mdio_mutex
);
123 macb_enable_mdio(bp
);
124 macb_writel(bp
, MAN
, (MACB_BF(SOF
, MACB_MAN_SOF
)
125 | MACB_BF(RW
, MACB_MAN_READ
)
126 | MACB_BF(PHYA
, phy_id
)
127 | MACB_BF(REGA
, location
)
128 | MACB_BF(CODE
, MACB_MAN_CODE
)));
130 wait_for_completion(&bp
->mdio_complete
);
132 value
= MACB_BFEXT(DATA
, macb_readl(bp
, MAN
));
133 macb_disable_mdio(bp
);
134 mutex_unlock(&bp
->mdio_mutex
);
139 static void macb_mdio_write(struct net_device
*dev
, int phy_id
,
140 int location
, int val
)
142 struct macb
*bp
= netdev_priv(dev
);
144 dev_dbg(&bp
->pdev
->dev
, "mdio_write %02x:%02x <- %04x\n",
145 phy_id
, location
, val
);
147 mutex_lock(&bp
->mdio_mutex
);
148 macb_enable_mdio(bp
);
150 macb_writel(bp
, MAN
, (MACB_BF(SOF
, MACB_MAN_SOF
)
151 | MACB_BF(RW
, MACB_MAN_WRITE
)
152 | MACB_BF(PHYA
, phy_id
)
153 | MACB_BF(REGA
, location
)
154 | MACB_BF(CODE
, MACB_MAN_CODE
)
155 | MACB_BF(DATA
, val
)));
157 wait_for_completion(&bp
->mdio_complete
);
159 macb_disable_mdio(bp
);
160 mutex_unlock(&bp
->mdio_mutex
);
163 static int macb_phy_probe(struct macb
*bp
)
168 for (phy_address
= 0; phy_address
< 32; phy_address
++) {
169 phyid1
= macb_mdio_read(bp
->dev
, phy_address
, MII_PHYSID1
);
170 phyid2
= macb_mdio_read(bp
->dev
, phy_address
, MII_PHYSID2
);
172 if (phyid1
!= 0xffff && phyid1
!= 0x0000
173 && phyid2
!= 0xffff && phyid2
!= 0x0000)
177 if (phy_address
== 32)
180 dev_info(&bp
->pdev
->dev
,
181 "detected PHY at address %d (ID %04x:%04x)\n",
182 phy_address
, phyid1
, phyid2
);
184 bp
->mii
.phy_id
= phy_address
;
188 static void macb_set_media(struct macb
*bp
, int media
)
192 spin_lock_irq(&bp
->lock
);
193 reg
= macb_readl(bp
, NCFGR
);
194 reg
&= ~(MACB_BIT(SPD
) | MACB_BIT(FD
));
195 if (media
& (ADVERTISE_100HALF
| ADVERTISE_100FULL
))
196 reg
|= MACB_BIT(SPD
);
197 if (media
& ADVERTISE_FULL
)
199 macb_writel(bp
, NCFGR
, reg
);
200 spin_unlock_irq(&bp
->lock
);
203 static void macb_check_media(struct macb
*bp
, int ok_to_print
, int init_media
)
205 struct mii_if_info
*mii
= &bp
->mii
;
206 unsigned int old_carrier
, new_carrier
;
207 int advertise
, lpa
, media
, duplex
;
209 /* if forced media, go no further */
210 if (mii
->force_media
)
213 /* check current and old link status */
214 old_carrier
= netif_carrier_ok(mii
->dev
) ? 1 : 0;
215 new_carrier
= (unsigned int) mii_link_ok(mii
);
217 /* if carrier state did not change, assume nothing else did */
218 if (!init_media
&& old_carrier
== new_carrier
)
221 /* no carrier, nothing much to do */
223 netif_carrier_off(mii
->dev
);
224 printk(KERN_INFO
"%s: link down\n", mii
->dev
->name
);
229 * we have carrier, see who's on the other end
231 netif_carrier_on(mii
->dev
);
233 /* get MII advertise and LPA values */
234 if (!init_media
&& mii
->advertising
) {
235 advertise
= mii
->advertising
;
237 advertise
= mii
->mdio_read(mii
->dev
, mii
->phy_id
, MII_ADVERTISE
);
238 mii
->advertising
= advertise
;
240 lpa
= mii
->mdio_read(mii
->dev
, mii
->phy_id
, MII_LPA
);
242 /* figure out media and duplex from advertise and LPA values */
243 media
= mii_nway_result(lpa
& advertise
);
244 duplex
= (media
& ADVERTISE_FULL
) ? 1 : 0;
247 printk(KERN_INFO
"%s: link up, %sMbps, %s-duplex, lpa 0x%04X\n",
249 media
& (ADVERTISE_100FULL
| ADVERTISE_100HALF
) ? "100" : "10",
250 duplex
? "full" : "half", lpa
);
252 mii
->full_duplex
= duplex
;
254 /* Let the MAC know about the new link state */
255 macb_set_media(bp
, media
);
258 static void macb_update_stats(struct macb
*bp
)
260 u32 __iomem
*reg
= bp
->regs
+ MACB_PFR
;
261 u32
*p
= &bp
->hw_stats
.rx_pause_frames
;
262 u32
*end
= &bp
->hw_stats
.tx_pause_frames
+ 1;
264 WARN_ON((unsigned long)(end
- p
- 1) != (MACB_TPF
- MACB_PFR
) / 4);
266 for(; p
< end
; p
++, reg
++)
267 *p
+= __raw_readl(reg
);
270 static void macb_periodic_task(struct work_struct
*work
)
272 struct macb
*bp
= container_of(work
, struct macb
, periodic_task
.work
);
274 macb_update_stats(bp
);
275 macb_check_media(bp
, 1, 0);
277 schedule_delayed_work(&bp
->periodic_task
, HZ
);
280 static void macb_tx(struct macb
*bp
)
286 status
= macb_readl(bp
, TSR
);
287 macb_writel(bp
, TSR
, status
);
289 dev_dbg(&bp
->pdev
->dev
, "macb_tx status = %02lx\n",
290 (unsigned long)status
);
292 if (status
& MACB_BIT(UND
)) {
293 printk(KERN_ERR
"%s: TX underrun, resetting buffers\n",
295 bp
->tx_head
= bp
->tx_tail
= 0;
298 if (!(status
& MACB_BIT(COMP
)))
300 * This may happen when a buffer becomes complete
301 * between reading the ISR and scanning the
302 * descriptors. Nothing to worry about.
307 for (tail
= bp
->tx_tail
; tail
!= head
; tail
= NEXT_TX(tail
)) {
308 struct ring_info
*rp
= &bp
->tx_skb
[tail
];
309 struct sk_buff
*skb
= rp
->skb
;
315 bufstat
= bp
->tx_ring
[tail
].ctrl
;
317 if (!(bufstat
& MACB_BIT(TX_USED
)))
320 dev_dbg(&bp
->pdev
->dev
, "skb %u (data %p) TX complete\n",
322 dma_unmap_single(&bp
->pdev
->dev
, rp
->mapping
, skb
->len
,
324 bp
->stats
.tx_packets
++;
325 bp
->stats
.tx_bytes
+= skb
->len
;
327 dev_kfree_skb_irq(skb
);
331 if (netif_queue_stopped(bp
->dev
) &&
332 TX_BUFFS_AVAIL(bp
) > MACB_TX_WAKEUP_THRESH
)
333 netif_wake_queue(bp
->dev
);
336 static int macb_rx_frame(struct macb
*bp
, unsigned int first_frag
,
337 unsigned int last_frag
)
341 unsigned int offset
= 0;
344 len
= MACB_BFEXT(RX_FRMLEN
, bp
->rx_ring
[last_frag
].ctrl
);
346 dev_dbg(&bp
->pdev
->dev
, "macb_rx_frame frags %u - %u (len %u)\n",
347 first_frag
, last_frag
, len
);
349 skb
= dev_alloc_skb(len
+ RX_OFFSET
);
351 bp
->stats
.rx_dropped
++;
352 for (frag
= first_frag
; ; frag
= NEXT_RX(frag
)) {
353 bp
->rx_ring
[frag
].addr
&= ~MACB_BIT(RX_USED
);
354 if (frag
== last_frag
)
361 skb_reserve(skb
, RX_OFFSET
);
363 skb
->ip_summed
= CHECKSUM_NONE
;
366 for (frag
= first_frag
; ; frag
= NEXT_RX(frag
)) {
367 unsigned int frag_len
= RX_BUFFER_SIZE
;
369 if (offset
+ frag_len
> len
) {
370 BUG_ON(frag
!= last_frag
);
371 frag_len
= len
- offset
;
373 memcpy(skb
->data
+ offset
,
374 bp
->rx_buffers
+ (RX_BUFFER_SIZE
* frag
),
376 offset
+= RX_BUFFER_SIZE
;
377 bp
->rx_ring
[frag
].addr
&= ~MACB_BIT(RX_USED
);
380 if (frag
== last_frag
)
384 skb
->protocol
= eth_type_trans(skb
, bp
->dev
);
386 bp
->stats
.rx_packets
++;
387 bp
->stats
.rx_bytes
+= len
;
388 bp
->dev
->last_rx
= jiffies
;
389 dev_dbg(&bp
->pdev
->dev
, "received skb of length %u, csum: %08x\n",
390 skb
->len
, skb
->csum
);
391 netif_receive_skb(skb
);
396 /* Mark DMA descriptors from begin up to and not including end as unused */
397 static void discard_partial_frame(struct macb
*bp
, unsigned int begin
,
402 for (frag
= begin
; frag
!= end
; frag
= NEXT_RX(frag
))
403 bp
->rx_ring
[frag
].addr
&= ~MACB_BIT(RX_USED
);
407 * When this happens, the hardware stats registers for
408 * whatever caused this is updated, so we don't have to record
413 static int macb_rx(struct macb
*bp
, int budget
)
416 unsigned int tail
= bp
->rx_tail
;
419 for (; budget
> 0; tail
= NEXT_RX(tail
)) {
423 addr
= bp
->rx_ring
[tail
].addr
;
424 ctrl
= bp
->rx_ring
[tail
].ctrl
;
426 if (!(addr
& MACB_BIT(RX_USED
)))
429 if (ctrl
& MACB_BIT(RX_SOF
)) {
430 if (first_frag
!= -1)
431 discard_partial_frame(bp
, first_frag
, tail
);
435 if (ctrl
& MACB_BIT(RX_EOF
)) {
437 BUG_ON(first_frag
== -1);
439 dropped
= macb_rx_frame(bp
, first_frag
, tail
);
448 if (first_frag
!= -1)
449 bp
->rx_tail
= first_frag
;
456 static int macb_poll(struct net_device
*dev
, int *budget
)
458 struct macb
*bp
= netdev_priv(dev
);
459 int orig_budget
, work_done
, retval
= 0;
462 status
= macb_readl(bp
, RSR
);
463 macb_writel(bp
, RSR
, status
);
467 * This may happen if an interrupt was pending before
468 * this function was called last time, and no packets
469 * have been received since.
471 netif_rx_complete(dev
);
475 dev_dbg(&bp
->pdev
->dev
, "poll: status = %08lx, budget = %d\n",
476 (unsigned long)status
, *budget
);
478 if (!(status
& MACB_BIT(REC
))) {
479 dev_warn(&bp
->pdev
->dev
,
480 "No RX buffers complete, status = %02lx\n",
481 (unsigned long)status
);
482 netif_rx_complete(dev
);
486 orig_budget
= *budget
;
487 if (orig_budget
> dev
->quota
)
488 orig_budget
= dev
->quota
;
490 work_done
= macb_rx(bp
, orig_budget
);
491 if (work_done
< orig_budget
) {
492 netif_rx_complete(dev
);
499 * We've done what we can to clean the buffers. Make sure we
500 * get notified when new packets arrive.
503 macb_writel(bp
, IER
, MACB_RX_INT_FLAGS
);
505 /* TODO: Handle errors */
510 static irqreturn_t
macb_interrupt(int irq
, void *dev_id
)
512 struct net_device
*dev
= dev_id
;
513 struct macb
*bp
= netdev_priv(dev
);
516 status
= macb_readl(bp
, ISR
);
518 if (unlikely(!status
))
521 spin_lock(&bp
->lock
);
524 if (status
& MACB_BIT(MFD
))
525 complete(&bp
->mdio_complete
);
527 /* close possible race with dev_close */
528 if (unlikely(!netif_running(dev
))) {
529 macb_writel(bp
, IDR
, ~0UL);
533 if (status
& MACB_RX_INT_FLAGS
) {
534 if (netif_rx_schedule_prep(dev
)) {
536 * There's no point taking any more interrupts
537 * until we have processed the buffers
539 macb_writel(bp
, IDR
, MACB_RX_INT_FLAGS
);
540 dev_dbg(&bp
->pdev
->dev
, "scheduling RX softirq\n");
541 __netif_rx_schedule(dev
);
545 if (status
& (MACB_BIT(TCOMP
) | MACB_BIT(ISR_TUND
)))
549 * Link change detection isn't possible with RMII, so we'll
550 * add that if/when we get our hands on a full-blown MII PHY.
553 if (status
& MACB_BIT(HRESP
)) {
555 * TODO: Reset the hardware, and maybe move the printk
556 * to a lower-priority context as well (work queue?)
558 printk(KERN_ERR
"%s: DMA bus error: HRESP not OK\n",
562 status
= macb_readl(bp
, ISR
);
565 spin_unlock(&bp
->lock
);
570 static int macb_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
572 struct macb
*bp
= netdev_priv(dev
);
574 unsigned int len
, entry
;
579 dev_dbg(&bp
->pdev
->dev
,
580 "start_xmit: len %u head %p data %p tail %p end %p\n",
581 skb
->len
, skb
->head
, skb
->data
, skb
->tail
, skb
->end
);
582 dev_dbg(&bp
->pdev
->dev
,
584 for (i
= 0; i
< 16; i
++)
585 printk(" %02x", (unsigned int)skb
->data
[i
]);
590 spin_lock_irq(&bp
->lock
);
592 /* This is a hard error, log it. */
593 if (TX_BUFFS_AVAIL(bp
) < 1) {
594 netif_stop_queue(dev
);
595 spin_unlock_irq(&bp
->lock
);
596 dev_err(&bp
->pdev
->dev
,
597 "BUG! Tx Ring full when queue awake!\n");
598 dev_dbg(&bp
->pdev
->dev
, "tx_head = %u, tx_tail = %u\n",
599 bp
->tx_head
, bp
->tx_tail
);
604 dev_dbg(&bp
->pdev
->dev
, "Allocated ring entry %u\n", entry
);
605 mapping
= dma_map_single(&bp
->pdev
->dev
, skb
->data
,
607 bp
->tx_skb
[entry
].skb
= skb
;
608 bp
->tx_skb
[entry
].mapping
= mapping
;
609 dev_dbg(&bp
->pdev
->dev
, "Mapped skb data %p to DMA addr %08lx\n",
610 skb
->data
, (unsigned long)mapping
);
612 ctrl
= MACB_BF(TX_FRMLEN
, len
);
613 ctrl
|= MACB_BIT(TX_LAST
);
614 if (entry
== (TX_RING_SIZE
- 1))
615 ctrl
|= MACB_BIT(TX_WRAP
);
617 bp
->tx_ring
[entry
].addr
= mapping
;
618 bp
->tx_ring
[entry
].ctrl
= ctrl
;
621 entry
= NEXT_TX(entry
);
624 macb_writel(bp
, NCR
, macb_readl(bp
, NCR
) | MACB_BIT(TSTART
));
626 if (TX_BUFFS_AVAIL(bp
) < 1)
627 netif_stop_queue(dev
);
629 spin_unlock_irq(&bp
->lock
);
631 dev
->trans_start
= jiffies
;
636 static void macb_free_consistent(struct macb
*bp
)
643 dma_free_coherent(&bp
->pdev
->dev
, RX_RING_BYTES
,
644 bp
->rx_ring
, bp
->rx_ring_dma
);
648 dma_free_coherent(&bp
->pdev
->dev
, TX_RING_BYTES
,
649 bp
->tx_ring
, bp
->tx_ring_dma
);
652 if (bp
->rx_buffers
) {
653 dma_free_coherent(&bp
->pdev
->dev
,
654 RX_RING_SIZE
* RX_BUFFER_SIZE
,
655 bp
->rx_buffers
, bp
->rx_buffers_dma
);
656 bp
->rx_buffers
= NULL
;
660 static int macb_alloc_consistent(struct macb
*bp
)
664 size
= TX_RING_SIZE
* sizeof(struct ring_info
);
665 bp
->tx_skb
= kmalloc(size
, GFP_KERNEL
);
669 size
= RX_RING_BYTES
;
670 bp
->rx_ring
= dma_alloc_coherent(&bp
->pdev
->dev
, size
,
671 &bp
->rx_ring_dma
, GFP_KERNEL
);
674 dev_dbg(&bp
->pdev
->dev
,
675 "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
676 size
, (unsigned long)bp
->rx_ring_dma
, bp
->rx_ring
);
678 size
= TX_RING_BYTES
;
679 bp
->tx_ring
= dma_alloc_coherent(&bp
->pdev
->dev
, size
,
680 &bp
->tx_ring_dma
, GFP_KERNEL
);
683 dev_dbg(&bp
->pdev
->dev
,
684 "Allocated TX ring of %d bytes at %08lx (mapped %p)\n",
685 size
, (unsigned long)bp
->tx_ring_dma
, bp
->tx_ring
);
687 size
= RX_RING_SIZE
* RX_BUFFER_SIZE
;
688 bp
->rx_buffers
= dma_alloc_coherent(&bp
->pdev
->dev
, size
,
689 &bp
->rx_buffers_dma
, GFP_KERNEL
);
692 dev_dbg(&bp
->pdev
->dev
,
693 "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
694 size
, (unsigned long)bp
->rx_buffers_dma
, bp
->rx_buffers
);
699 macb_free_consistent(bp
);
703 static void macb_init_rings(struct macb
*bp
)
708 addr
= bp
->rx_buffers_dma
;
709 for (i
= 0; i
< RX_RING_SIZE
; i
++) {
710 bp
->rx_ring
[i
].addr
= addr
;
711 bp
->rx_ring
[i
].ctrl
= 0;
712 addr
+= RX_BUFFER_SIZE
;
714 bp
->rx_ring
[RX_RING_SIZE
- 1].addr
|= MACB_BIT(RX_WRAP
);
716 for (i
= 0; i
< TX_RING_SIZE
; i
++) {
717 bp
->tx_ring
[i
].addr
= 0;
718 bp
->tx_ring
[i
].ctrl
= MACB_BIT(TX_USED
);
720 bp
->tx_ring
[TX_RING_SIZE
- 1].ctrl
|= MACB_BIT(TX_WRAP
);
722 bp
->rx_tail
= bp
->tx_head
= bp
->tx_tail
= 0;
725 static void macb_reset_hw(struct macb
*bp
)
727 /* Make sure we have the write buffer for ourselves */
731 * Disable RX and TX (XXX: Should we halt the transmission
734 macb_writel(bp
, NCR
, 0);
736 /* Clear the stats registers (XXX: Update stats first?) */
737 macb_writel(bp
, NCR
, MACB_BIT(CLRSTAT
));
739 /* Clear all status flags */
740 macb_writel(bp
, TSR
, ~0UL);
741 macb_writel(bp
, RSR
, ~0UL);
743 /* Disable all interrupts */
744 macb_writel(bp
, IDR
, ~0UL);
748 static void macb_init_hw(struct macb
*bp
)
753 __macb_set_hwaddr(bp
);
755 config
= macb_readl(bp
, NCFGR
) & MACB_BF(CLK
, -1L);
756 config
|= MACB_BIT(PAE
); /* PAuse Enable */
757 config
|= MACB_BIT(DRFCS
); /* Discard Rx FCS */
758 if (bp
->dev
->flags
& IFF_PROMISC
)
759 config
|= MACB_BIT(CAF
); /* Copy All Frames */
760 if (!(bp
->dev
->flags
& IFF_BROADCAST
))
761 config
|= MACB_BIT(NBC
); /* No BroadCast */
762 macb_writel(bp
, NCFGR
, config
);
764 /* Initialize TX and RX buffers */
765 macb_writel(bp
, RBQP
, bp
->rx_ring_dma
);
766 macb_writel(bp
, TBQP
, bp
->tx_ring_dma
);
768 /* Enable TX and RX */
769 macb_writel(bp
, NCR
, MACB_BIT(RE
) | MACB_BIT(TE
));
771 /* Enable interrupts */
772 macb_writel(bp
, IER
, (MACB_BIT(RCOMP
)
782 static void macb_init_phy(struct net_device
*dev
)
784 struct macb
*bp
= netdev_priv(dev
);
786 /* Set some reasonable default settings */
787 macb_mdio_write(dev
, bp
->mii
.phy_id
, MII_ADVERTISE
,
788 ADVERTISE_CSMA
| ADVERTISE_ALL
);
789 macb_mdio_write(dev
, bp
->mii
.phy_id
, MII_BMCR
,
790 (BMCR_SPEED100
| BMCR_ANENABLE
791 | BMCR_ANRESTART
| BMCR_FULLDPLX
));
794 static int macb_open(struct net_device
*dev
)
796 struct macb
*bp
= netdev_priv(dev
);
799 dev_dbg(&bp
->pdev
->dev
, "open\n");
801 if (!is_valid_ether_addr(dev
->dev_addr
))
802 return -EADDRNOTAVAIL
;
804 err
= macb_alloc_consistent(bp
);
807 "%s: Unable to allocate DMA memory (error %d)\n",
816 macb_check_media(bp
, 1, 1);
817 netif_start_queue(dev
);
819 schedule_delayed_work(&bp
->periodic_task
, HZ
);
824 static int macb_close(struct net_device
*dev
)
826 struct macb
*bp
= netdev_priv(dev
);
829 cancel_rearming_delayed_work(&bp
->periodic_task
);
831 netif_stop_queue(dev
);
833 spin_lock_irqsave(&bp
->lock
, flags
);
835 netif_carrier_off(dev
);
836 spin_unlock_irqrestore(&bp
->lock
, flags
);
838 macb_free_consistent(bp
);
843 static struct net_device_stats
*macb_get_stats(struct net_device
*dev
)
845 struct macb
*bp
= netdev_priv(dev
);
846 struct net_device_stats
*nstat
= &bp
->stats
;
847 struct macb_stats
*hwstat
= &bp
->hw_stats
;
849 /* Convert HW stats into netdevice stats */
850 nstat
->rx_errors
= (hwstat
->rx_fcs_errors
+
851 hwstat
->rx_align_errors
+
852 hwstat
->rx_resource_errors
+
853 hwstat
->rx_overruns
+
854 hwstat
->rx_oversize_pkts
+
856 hwstat
->rx_undersize_pkts
+
857 hwstat
->sqe_test_errors
+
858 hwstat
->rx_length_mismatch
);
859 nstat
->tx_errors
= (hwstat
->tx_late_cols
+
860 hwstat
->tx_excessive_cols
+
861 hwstat
->tx_underruns
+
862 hwstat
->tx_carrier_errors
);
863 nstat
->collisions
= (hwstat
->tx_single_cols
+
864 hwstat
->tx_multiple_cols
+
865 hwstat
->tx_excessive_cols
);
866 nstat
->rx_length_errors
= (hwstat
->rx_oversize_pkts
+
868 hwstat
->rx_undersize_pkts
+
869 hwstat
->rx_length_mismatch
);
870 nstat
->rx_over_errors
= hwstat
->rx_resource_errors
;
871 nstat
->rx_crc_errors
= hwstat
->rx_fcs_errors
;
872 nstat
->rx_frame_errors
= hwstat
->rx_align_errors
;
873 nstat
->rx_fifo_errors
= hwstat
->rx_overruns
;
874 /* XXX: What does "missed" mean? */
875 nstat
->tx_aborted_errors
= hwstat
->tx_excessive_cols
;
876 nstat
->tx_carrier_errors
= hwstat
->tx_carrier_errors
;
877 nstat
->tx_fifo_errors
= hwstat
->tx_underruns
;
878 /* Don't know about heartbeat or window errors... */
883 static int macb_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
885 struct macb
*bp
= netdev_priv(dev
);
889 spin_lock_irqsave(&bp
->lock
, flags
);
890 ret
= mii_ethtool_gset(&bp
->mii
, cmd
);
891 spin_unlock_irqrestore(&bp
->lock
, flags
);
896 static int macb_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
898 struct macb
*bp
= netdev_priv(dev
);
902 spin_lock_irqsave(&bp
->lock
, flags
);
903 ret
= mii_ethtool_sset(&bp
->mii
, cmd
);
904 spin_unlock_irqrestore(&bp
->lock
, flags
);
909 static void macb_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
911 struct macb
*bp
= netdev_priv(dev
);
913 strcpy(info
->driver
, bp
->pdev
->dev
.driver
->name
);
914 strcpy(info
->version
, "$Revision: 1.14 $");
915 strcpy(info
->bus_info
, bp
->pdev
->dev
.bus_id
);
918 static int macb_nway_reset(struct net_device
*dev
)
920 struct macb
*bp
= netdev_priv(dev
);
921 return mii_nway_restart(&bp
->mii
);
924 static struct ethtool_ops macb_ethtool_ops
= {
925 .get_settings
= macb_get_settings
,
926 .set_settings
= macb_set_settings
,
927 .get_drvinfo
= macb_get_drvinfo
,
928 .nway_reset
= macb_nway_reset
,
929 .get_link
= ethtool_op_get_link
,
932 static int macb_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
934 struct macb
*bp
= netdev_priv(dev
);
938 if (!netif_running(dev
))
941 spin_lock_irqsave(&bp
->lock
, flags
);
942 ret
= generic_mii_ioctl(&bp
->mii
, if_mii(rq
), cmd
, NULL
);
943 spin_unlock_irqrestore(&bp
->lock
, flags
);
948 static ssize_t
macb_mii_show(const struct class_device
*cd
, char *buf
,
951 struct net_device
*dev
= to_net_dev(cd
);
952 struct macb
*bp
= netdev_priv(dev
);
953 ssize_t ret
= -EINVAL
;
955 if (netif_running(dev
)) {
957 value
= macb_mdio_read(dev
, bp
->mii
.phy_id
, addr
);
958 ret
= sprintf(buf
, "0x%04x\n", (uint16_t)value
);
964 #define MII_ENTRY(name, addr) \
965 static ssize_t show_##name(struct class_device *cd, char *buf) \
967 return macb_mii_show(cd, buf, addr); \
969 static CLASS_DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
971 MII_ENTRY(bmcr
, MII_BMCR
);
972 MII_ENTRY(bmsr
, MII_BMSR
);
973 MII_ENTRY(physid1
, MII_PHYSID1
);
974 MII_ENTRY(physid2
, MII_PHYSID2
);
975 MII_ENTRY(advertise
, MII_ADVERTISE
);
976 MII_ENTRY(lpa
, MII_LPA
);
977 MII_ENTRY(expansion
, MII_EXPANSION
);
979 static struct attribute
*macb_mii_attrs
[] = {
980 &class_device_attr_bmcr
.attr
,
981 &class_device_attr_bmsr
.attr
,
982 &class_device_attr_physid1
.attr
,
983 &class_device_attr_physid2
.attr
,
984 &class_device_attr_advertise
.attr
,
985 &class_device_attr_lpa
.attr
,
986 &class_device_attr_expansion
.attr
,
990 static struct attribute_group macb_mii_group
= {
992 .attrs
= macb_mii_attrs
,
995 static void macb_unregister_sysfs(struct net_device
*net
)
997 struct class_device
*class_dev
= &net
->class_dev
;
999 sysfs_remove_group(&class_dev
->kobj
, &macb_mii_group
);
1002 static int macb_register_sysfs(struct net_device
*net
)
1004 struct class_device
*class_dev
= &net
->class_dev
;
1007 ret
= sysfs_create_group(&class_dev
->kobj
, &macb_mii_group
);
1010 "%s: sysfs mii attribute registration failed: %d\n",
1014 static int __devinit
macb_probe(struct platform_device
*pdev
)
1016 struct eth_platform_data
*pdata
;
1017 struct resource
*regs
;
1018 struct net_device
*dev
;
1020 unsigned long pclk_hz
;
1024 regs
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1026 dev_err(&pdev
->dev
, "no mmio resource defined\n");
1031 dev
= alloc_etherdev(sizeof(*bp
));
1033 dev_err(&pdev
->dev
, "etherdev alloc failed, aborting.\n");
1037 SET_MODULE_OWNER(dev
);
1038 SET_NETDEV_DEV(dev
, &pdev
->dev
);
1040 /* TODO: Actually, we have some interesting features... */
1043 bp
= netdev_priv(dev
);
1047 spin_lock_init(&bp
->lock
);
1049 bp
->pclk
= clk_get(&pdev
->dev
, "pclk");
1050 if (IS_ERR(bp
->pclk
)) {
1051 dev_err(&pdev
->dev
, "failed to get pclk\n");
1052 goto err_out_free_dev
;
1054 bp
->hclk
= clk_get(&pdev
->dev
, "hclk");
1055 if (IS_ERR(bp
->hclk
)) {
1056 dev_err(&pdev
->dev
, "failed to get hclk\n");
1057 goto err_out_put_pclk
;
1060 clk_enable(bp
->pclk
);
1061 clk_enable(bp
->hclk
);
1063 bp
->regs
= ioremap(regs
->start
, regs
->end
- regs
->start
+ 1);
1065 dev_err(&pdev
->dev
, "failed to map registers, aborting.\n");
1067 goto err_out_disable_clocks
;
1070 dev
->irq
= platform_get_irq(pdev
, 0);
1071 err
= request_irq(dev
->irq
, macb_interrupt
, SA_SAMPLE_RANDOM
,
1075 "%s: Unable to request IRQ %d (error %d)\n",
1076 dev
->name
, dev
->irq
, err
);
1077 goto err_out_iounmap
;
1080 dev
->open
= macb_open
;
1081 dev
->stop
= macb_close
;
1082 dev
->hard_start_xmit
= macb_start_xmit
;
1083 dev
->get_stats
= macb_get_stats
;
1084 dev
->do_ioctl
= macb_ioctl
;
1085 dev
->poll
= macb_poll
;
1087 dev
->ethtool_ops
= &macb_ethtool_ops
;
1089 dev
->base_addr
= regs
->start
;
1091 INIT_DELAYED_WORK(&bp
->periodic_task
, macb_periodic_task
);
1092 mutex_init(&bp
->mdio_mutex
);
1093 init_completion(&bp
->mdio_complete
);
1095 /* Set MII management clock divider */
1096 pclk_hz
= clk_get_rate(bp
->pclk
);
1097 if (pclk_hz
<= 20000000)
1098 config
= MACB_BF(CLK
, MACB_CLK_DIV8
);
1099 else if (pclk_hz
<= 40000000)
1100 config
= MACB_BF(CLK
, MACB_CLK_DIV16
);
1101 else if (pclk_hz
<= 80000000)
1102 config
= MACB_BF(CLK
, MACB_CLK_DIV32
);
1104 config
= MACB_BF(CLK
, MACB_CLK_DIV64
);
1105 macb_writel(bp
, NCFGR
, config
);
1108 bp
->mii
.mdio_read
= macb_mdio_read
;
1109 bp
->mii
.mdio_write
= macb_mdio_write
;
1110 bp
->mii
.phy_id_mask
= 0x1f;
1111 bp
->mii
.reg_num_mask
= 0x1f;
1113 macb_get_hwaddr(bp
);
1114 err
= macb_phy_probe(bp
);
1116 dev_err(&pdev
->dev
, "Failed to detect PHY, aborting.\n");
1117 goto err_out_free_irq
;
1120 pdata
= pdev
->dev
.platform_data
;
1121 if (pdata
&& pdata
->is_rmii
)
1122 macb_writel(bp
, USRIO
, 0);
1124 macb_writel(bp
, USRIO
, MACB_BIT(MII
));
1126 bp
->tx_pending
= DEF_TX_RING_PENDING
;
1128 err
= register_netdev(dev
);
1130 dev_err(&pdev
->dev
, "Cannot register net device, aborting.\n");
1131 goto err_out_free_irq
;
1134 platform_set_drvdata(pdev
, dev
);
1136 macb_register_sysfs(dev
);
1138 printk(KERN_INFO
"%s: Atmel MACB at 0x%08lx irq %d "
1139 "(%02x:%02x:%02x:%02x:%02x:%02x)\n",
1140 dev
->name
, dev
->base_addr
, dev
->irq
,
1141 dev
->dev_addr
[0], dev
->dev_addr
[1], dev
->dev_addr
[2],
1142 dev
->dev_addr
[3], dev
->dev_addr
[4], dev
->dev_addr
[5]);
1147 free_irq(dev
->irq
, dev
);
1150 err_out_disable_clocks
:
1151 clk_disable(bp
->hclk
);
1152 clk_disable(bp
->pclk
);
1159 platform_set_drvdata(pdev
, NULL
);
1163 static int __devexit
macb_remove(struct platform_device
*pdev
)
1165 struct net_device
*dev
;
1168 dev
= platform_get_drvdata(pdev
);
1171 bp
= netdev_priv(dev
);
1172 macb_unregister_sysfs(dev
);
1173 unregister_netdev(dev
);
1174 free_irq(dev
->irq
, dev
);
1176 clk_disable(bp
->hclk
);
1177 clk_disable(bp
->pclk
);
1181 platform_set_drvdata(pdev
, NULL
);
1187 static struct platform_driver macb_driver
= {
1188 .probe
= macb_probe
,
1189 .remove
= __devexit_p(macb_remove
),
1195 static int __init
macb_init(void)
1197 return platform_driver_register(&macb_driver
);
1200 static void __exit
macb_exit(void)
1202 platform_driver_unregister(&macb_driver
);
1205 module_init(macb_init
);
1206 module_exit(macb_exit
);
1208 MODULE_LICENSE("GPL");
1209 MODULE_DESCRIPTION("Atmel MACB Ethernet driver");
1210 MODULE_AUTHOR("Haavard Skinnemoen <hskinnemoen@atmel.com>");