2 * Driver for the IDT RC32434 (Korina) on-chip ethernet controller.
4 * Copyright 2004 IDT Inc. (rischelp@idt.com)
5 * Copyright 2006 Felix Fietkau <nbd@openwrt.org>
6 * Copyright 2008 Florian Fainelli <florian@openwrt.org>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
13 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
14 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
15 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
16 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
17 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
18 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
19 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
20 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
22 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 * You should have received a copy of the GNU General Public License along
25 * with this program; if not, write to the Free Software Foundation, Inc.,
26 * 675 Mass Ave, Cambridge, MA 02139, USA.
28 * Writing to a DMA status register:
30 * When writing to the status register, you should mask the bit you have
31 * been testing the status register with. Both Tx and Rx DMA registers
32 * should stick to this procedure.
35 #include <linux/module.h>
36 #include <linux/kernel.h>
37 #include <linux/moduleparam.h>
38 #include <linux/sched.h>
39 #include <linux/ctype.h>
40 #include <linux/types.h>
41 #include <linux/interrupt.h>
42 #include <linux/ioport.h>
44 #include <linux/slab.h>
45 #include <linux/string.h>
46 #include <linux/delay.h>
47 #include <linux/netdevice.h>
48 #include <linux/etherdevice.h>
49 #include <linux/skbuff.h>
50 #include <linux/errno.h>
51 #include <linux/platform_device.h>
52 #include <linux/mii.h>
53 #include <linux/ethtool.h>
54 #include <linux/crc32.h>
56 #include <asm/bootinfo.h>
57 #include <asm/bitops.h>
58 #include <asm/pgtable.h>
62 #include <asm/mach-rc32434/rb.h>
63 #include <asm/mach-rc32434/rc32434.h>
64 #include <asm/mach-rc32434/eth.h>
65 #include <asm/mach-rc32434/dma_v.h>
67 #define DRV_NAME "korina"
68 #define DRV_VERSION "0.10"
69 #define DRV_RELDATE "04Mar2008"
71 #define STATION_ADDRESS_HIGH(dev) (((dev)->dev_addr[0] << 8) | \
73 #define STATION_ADDRESS_LOW(dev) (((dev)->dev_addr[2] << 24) | \
74 ((dev)->dev_addr[3] << 16) | \
75 ((dev)->dev_addr[4] << 8) | \
78 #define MII_CLOCK 1250000 /* no more than 2.5MHz */
80 /* the following must be powers of two */
81 #define KORINA_NUM_RDS 64 /* number of receive descriptors */
82 #define KORINA_NUM_TDS 64 /* number of transmit descriptors */
84 /* KORINA_RBSIZE is the hardware's default maximum receive
85 * frame size in bytes. Having this hardcoded means that there
86 * is no support for MTU sizes greater than 1500. */
87 #define KORINA_RBSIZE 1536 /* size of one resource buffer = Ether MTU */
88 #define KORINA_RDS_MASK (KORINA_NUM_RDS - 1)
89 #define KORINA_TDS_MASK (KORINA_NUM_TDS - 1)
90 #define RD_RING_SIZE (KORINA_NUM_RDS * sizeof(struct dma_desc))
91 #define TD_RING_SIZE (KORINA_NUM_TDS * sizeof(struct dma_desc))
93 #define TX_TIMEOUT (6000 * HZ / 1000)
95 enum chain_status
{ desc_filled
, desc_empty
};
96 #define IS_DMA_FINISHED(X) (((X) & (DMA_DESC_FINI)) != 0)
97 #define IS_DMA_DONE(X) (((X) & (DMA_DESC_DONE)) != 0)
98 #define RCVPKT_LENGTH(X) (((X) & ETH_RX_LEN) >> ETH_RX_LEN_BIT)
100 /* Information that need to be kept for each board. */
101 struct korina_private
{
102 struct eth_regs
*eth_regs
;
103 struct dma_reg
*rx_dma_regs
;
104 struct dma_reg
*tx_dma_regs
;
105 struct dma_desc
*td_ring
; /* transmit descriptor ring */
106 struct dma_desc
*rd_ring
; /* receive descriptor ring */
108 struct sk_buff
*tx_skb
[KORINA_NUM_TDS
];
109 struct sk_buff
*rx_skb
[KORINA_NUM_RDS
];
114 enum chain_status rx_chain_status
;
119 enum chain_status tx_chain_status
;
128 spinlock_t lock
; /* NIC xmit lock */
132 struct napi_struct napi
;
133 struct timer_list media_check_timer
;
134 struct mii_if_info mii_if
;
135 struct work_struct restart_task
;
136 struct net_device
*dev
;
140 extern unsigned int idt_cpu_freq
;
142 static inline void korina_start_dma(struct dma_reg
*ch
, u32 dma_addr
)
144 writel(0, &ch
->dmandptr
);
145 writel(dma_addr
, &ch
->dmadptr
);
148 static inline void korina_abort_dma(struct net_device
*dev
,
151 if (readl(&ch
->dmac
) & DMA_CHAN_RUN_BIT
) {
152 writel(0x10, &ch
->dmac
);
154 while (!(readl(&ch
->dmas
) & DMA_STAT_HALT
))
155 netif_trans_update(dev
);
157 writel(0, &ch
->dmas
);
160 writel(0, &ch
->dmadptr
);
161 writel(0, &ch
->dmandptr
);
164 static inline void korina_chain_dma(struct dma_reg
*ch
, u32 dma_addr
)
166 writel(dma_addr
, &ch
->dmandptr
);
169 static void korina_abort_tx(struct net_device
*dev
)
171 struct korina_private
*lp
= netdev_priv(dev
);
173 korina_abort_dma(dev
, lp
->tx_dma_regs
);
176 static void korina_abort_rx(struct net_device
*dev
)
178 struct korina_private
*lp
= netdev_priv(dev
);
180 korina_abort_dma(dev
, lp
->rx_dma_regs
);
183 static void korina_start_rx(struct korina_private
*lp
,
186 korina_start_dma(lp
->rx_dma_regs
, CPHYSADDR(rd
));
189 static void korina_chain_rx(struct korina_private
*lp
,
192 korina_chain_dma(lp
->rx_dma_regs
, CPHYSADDR(rd
));
195 /* transmit packet */
196 static int korina_send_packet(struct sk_buff
*skb
, struct net_device
*dev
)
198 struct korina_private
*lp
= netdev_priv(dev
);
201 u32 chain_prev
, chain_next
;
204 spin_lock_irqsave(&lp
->lock
, flags
);
206 td
= &lp
->td_ring
[lp
->tx_chain_tail
];
208 /* stop queue when full, drop pkts if queue already full */
209 if (lp
->tx_count
>= (KORINA_NUM_TDS
- 2)) {
212 if (lp
->tx_count
== (KORINA_NUM_TDS
- 2))
213 netif_stop_queue(dev
);
215 dev
->stats
.tx_dropped
++;
216 dev_kfree_skb_any(skb
);
217 spin_unlock_irqrestore(&lp
->lock
, flags
);
219 return NETDEV_TX_BUSY
;
225 lp
->tx_skb
[lp
->tx_chain_tail
] = skb
;
228 dma_cache_wback((u32
)skb
->data
, skb
->len
);
230 /* Setup the transmit descriptor. */
231 dma_cache_inv((u32
) td
, sizeof(*td
));
232 td
->ca
= CPHYSADDR(skb
->data
);
233 chain_prev
= (lp
->tx_chain_tail
- 1) & KORINA_TDS_MASK
;
234 chain_next
= (lp
->tx_chain_tail
+ 1) & KORINA_TDS_MASK
;
236 if (readl(&(lp
->tx_dma_regs
->dmandptr
)) == 0) {
237 if (lp
->tx_chain_status
== desc_empty
) {
239 td
->control
= DMA_COUNT(length
) |
240 DMA_DESC_COF
| DMA_DESC_IOF
;
242 lp
->tx_chain_tail
= chain_next
;
244 writel(CPHYSADDR(&lp
->td_ring
[lp
->tx_chain_head
]),
245 &lp
->tx_dma_regs
->dmandptr
);
246 /* Move head to tail */
247 lp
->tx_chain_head
= lp
->tx_chain_tail
;
250 td
->control
= DMA_COUNT(length
) |
251 DMA_DESC_COF
| DMA_DESC_IOF
;
253 lp
->td_ring
[chain_prev
].control
&=
256 lp
->td_ring
[chain_prev
].link
= CPHYSADDR(td
);
258 lp
->tx_chain_tail
= chain_next
;
260 writel(CPHYSADDR(&lp
->td_ring
[lp
->tx_chain_head
]),
261 &(lp
->tx_dma_regs
->dmandptr
));
262 /* Move head to tail */
263 lp
->tx_chain_head
= lp
->tx_chain_tail
;
264 lp
->tx_chain_status
= desc_empty
;
267 if (lp
->tx_chain_status
== desc_empty
) {
269 td
->control
= DMA_COUNT(length
) |
270 DMA_DESC_COF
| DMA_DESC_IOF
;
272 lp
->tx_chain_tail
= chain_next
;
273 lp
->tx_chain_status
= desc_filled
;
276 td
->control
= DMA_COUNT(length
) |
277 DMA_DESC_COF
| DMA_DESC_IOF
;
278 lp
->td_ring
[chain_prev
].control
&=
280 lp
->td_ring
[chain_prev
].link
= CPHYSADDR(td
);
281 lp
->tx_chain_tail
= chain_next
;
284 dma_cache_wback((u32
) td
, sizeof(*td
));
286 netif_trans_update(dev
);
287 spin_unlock_irqrestore(&lp
->lock
, flags
);
292 static int mdio_read(struct net_device
*dev
, int mii_id
, int reg
)
294 struct korina_private
*lp
= netdev_priv(dev
);
297 mii_id
= ((lp
->rx_irq
== 0x2c ? 1 : 0) << 8);
299 writel(0, &lp
->eth_regs
->miimcfg
);
300 writel(0, &lp
->eth_regs
->miimcmd
);
301 writel(mii_id
| reg
, &lp
->eth_regs
->miimaddr
);
302 writel(ETH_MII_CMD_SCN
, &lp
->eth_regs
->miimcmd
);
304 ret
= (int)(readl(&lp
->eth_regs
->miimrdd
));
308 static void mdio_write(struct net_device
*dev
, int mii_id
, int reg
, int val
)
310 struct korina_private
*lp
= netdev_priv(dev
);
312 mii_id
= ((lp
->rx_irq
== 0x2c ? 1 : 0) << 8);
314 writel(0, &lp
->eth_regs
->miimcfg
);
315 writel(1, &lp
->eth_regs
->miimcmd
);
316 writel(mii_id
| reg
, &lp
->eth_regs
->miimaddr
);
317 writel(ETH_MII_CMD_SCN
, &lp
->eth_regs
->miimcmd
);
318 writel(val
, &lp
->eth_regs
->miimwtd
);
321 /* Ethernet Rx DMA interrupt */
322 static irqreturn_t
korina_rx_dma_interrupt(int irq
, void *dev_id
)
324 struct net_device
*dev
= dev_id
;
325 struct korina_private
*lp
= netdev_priv(dev
);
329 dmas
= readl(&lp
->rx_dma_regs
->dmas
);
330 if (dmas
& (DMA_STAT_DONE
| DMA_STAT_HALT
| DMA_STAT_ERR
)) {
331 dmasm
= readl(&lp
->rx_dma_regs
->dmasm
);
332 writel(dmasm
| (DMA_STAT_DONE
|
333 DMA_STAT_HALT
| DMA_STAT_ERR
),
334 &lp
->rx_dma_regs
->dmasm
);
336 napi_schedule(&lp
->napi
);
338 if (dmas
& DMA_STAT_ERR
)
339 printk(KERN_ERR
"%s: DMA error\n", dev
->name
);
341 retval
= IRQ_HANDLED
;
348 static int korina_rx(struct net_device
*dev
, int limit
)
350 struct korina_private
*lp
= netdev_priv(dev
);
351 struct dma_desc
*rd
= &lp
->rd_ring
[lp
->rx_next_done
];
352 struct sk_buff
*skb
, *skb_new
;
354 u32 devcs
, pkt_len
, dmas
;
357 dma_cache_inv((u32
)rd
, sizeof(*rd
));
359 for (count
= 0; count
< limit
; count
++) {
360 skb
= lp
->rx_skb
[lp
->rx_next_done
];
365 if ((KORINA_RBSIZE
- (u32
)DMA_COUNT(rd
->control
)) == 0)
368 /* Update statistics counters */
369 if (devcs
& ETH_RX_CRC
)
370 dev
->stats
.rx_crc_errors
++;
371 if (devcs
& ETH_RX_LOR
)
372 dev
->stats
.rx_length_errors
++;
373 if (devcs
& ETH_RX_LE
)
374 dev
->stats
.rx_length_errors
++;
375 if (devcs
& ETH_RX_OVR
)
376 dev
->stats
.rx_fifo_errors
++;
377 if (devcs
& ETH_RX_CV
)
378 dev
->stats
.rx_frame_errors
++;
379 if (devcs
& ETH_RX_CES
)
380 dev
->stats
.rx_length_errors
++;
381 if (devcs
& ETH_RX_MP
)
382 dev
->stats
.multicast
++;
384 if ((devcs
& ETH_RX_LD
) != ETH_RX_LD
) {
385 /* check that this is a whole packet
386 * WARNING: DMA_FD bit incorrectly set
387 * in Rc32434 (errata ref #077) */
388 dev
->stats
.rx_errors
++;
389 dev
->stats
.rx_dropped
++;
390 } else if ((devcs
& ETH_RX_ROK
)) {
391 pkt_len
= RCVPKT_LENGTH(devcs
);
393 /* must be the (first and) last
395 pkt_buf
= (u8
*)lp
->rx_skb
[lp
->rx_next_done
]->data
;
397 /* invalidate the cache */
398 dma_cache_inv((unsigned long)pkt_buf
, pkt_len
- 4);
400 /* Malloc up new buffer. */
401 skb_new
= netdev_alloc_skb_ip_align(dev
, KORINA_RBSIZE
);
405 /* Do not count the CRC */
406 skb_put(skb
, pkt_len
- 4);
407 skb
->protocol
= eth_type_trans(skb
, dev
);
409 /* Pass the packet to upper layers */
410 netif_receive_skb(skb
);
411 dev
->stats
.rx_packets
++;
412 dev
->stats
.rx_bytes
+= pkt_len
;
414 /* Update the mcast stats */
415 if (devcs
& ETH_RX_MP
)
416 dev
->stats
.multicast
++;
418 lp
->rx_skb
[lp
->rx_next_done
] = skb_new
;
423 /* Restore descriptor's curr_addr */
425 rd
->ca
= CPHYSADDR(skb_new
->data
);
427 rd
->ca
= CPHYSADDR(skb
->data
);
429 rd
->control
= DMA_COUNT(KORINA_RBSIZE
) |
430 DMA_DESC_COD
| DMA_DESC_IOD
;
431 lp
->rd_ring
[(lp
->rx_next_done
- 1) &
432 KORINA_RDS_MASK
].control
&=
435 lp
->rx_next_done
= (lp
->rx_next_done
+ 1) & KORINA_RDS_MASK
;
436 dma_cache_wback((u32
)rd
, sizeof(*rd
));
437 rd
= &lp
->rd_ring
[lp
->rx_next_done
];
438 writel(~DMA_STAT_DONE
, &lp
->rx_dma_regs
->dmas
);
441 dmas
= readl(&lp
->rx_dma_regs
->dmas
);
443 if (dmas
& DMA_STAT_HALT
) {
444 writel(~(DMA_STAT_HALT
| DMA_STAT_ERR
),
445 &lp
->rx_dma_regs
->dmas
);
449 skb
= lp
->rx_skb
[lp
->rx_next_done
];
450 rd
->ca
= CPHYSADDR(skb
->data
);
451 dma_cache_wback((u32
)rd
, sizeof(*rd
));
452 korina_chain_rx(lp
, rd
);
458 static int korina_poll(struct napi_struct
*napi
, int budget
)
460 struct korina_private
*lp
=
461 container_of(napi
, struct korina_private
, napi
);
462 struct net_device
*dev
= lp
->dev
;
465 work_done
= korina_rx(dev
, budget
);
466 if (work_done
< budget
) {
467 napi_complete_done(napi
, work_done
);
469 writel(readl(&lp
->rx_dma_regs
->dmasm
) &
470 ~(DMA_STAT_DONE
| DMA_STAT_HALT
| DMA_STAT_ERR
),
471 &lp
->rx_dma_regs
->dmasm
);
477 * Set or clear the multicast filter for this adaptor.
479 static void korina_multicast_list(struct net_device
*dev
)
481 struct korina_private
*lp
= netdev_priv(dev
);
483 struct netdev_hw_addr
*ha
;
484 u32 recognise
= ETH_ARC_AB
; /* always accept broadcasts */
486 /* Set promiscuous mode */
487 if (dev
->flags
& IFF_PROMISC
)
488 recognise
|= ETH_ARC_PRO
;
490 else if ((dev
->flags
& IFF_ALLMULTI
) || (netdev_mc_count(dev
) > 4))
491 /* All multicast and broadcast */
492 recognise
|= ETH_ARC_AM
;
494 /* Build the hash table */
495 if (netdev_mc_count(dev
) > 4) {
496 u16 hash_table
[4] = { 0 };
499 netdev_for_each_mc_addr(ha
, dev
) {
500 crc
= ether_crc_le(6, ha
->addr
);
502 hash_table
[crc
>> 4] |= 1 << (15 - (crc
& 0xf));
504 /* Accept filtered multicast */
505 recognise
|= ETH_ARC_AFM
;
507 /* Fill the MAC hash tables with their values */
508 writel((u32
)(hash_table
[1] << 16 | hash_table
[0]),
509 &lp
->eth_regs
->ethhash0
);
510 writel((u32
)(hash_table
[3] << 16 | hash_table
[2]),
511 &lp
->eth_regs
->ethhash1
);
514 spin_lock_irqsave(&lp
->lock
, flags
);
515 writel(recognise
, &lp
->eth_regs
->etharc
);
516 spin_unlock_irqrestore(&lp
->lock
, flags
);
519 static void korina_tx(struct net_device
*dev
)
521 struct korina_private
*lp
= netdev_priv(dev
);
522 struct dma_desc
*td
= &lp
->td_ring
[lp
->tx_next_done
];
526 spin_lock(&lp
->lock
);
528 /* Process all desc that are done */
529 while (IS_DMA_FINISHED(td
->control
)) {
530 if (lp
->tx_full
== 1) {
531 netif_wake_queue(dev
);
535 devcs
= lp
->td_ring
[lp
->tx_next_done
].devcs
;
536 if ((devcs
& (ETH_TX_FD
| ETH_TX_LD
)) !=
537 (ETH_TX_FD
| ETH_TX_LD
)) {
538 dev
->stats
.tx_errors
++;
539 dev
->stats
.tx_dropped
++;
541 /* Should never happen */
542 printk(KERN_ERR
"%s: split tx ignored\n",
544 } else if (devcs
& ETH_TX_TOK
) {
545 dev
->stats
.tx_packets
++;
546 dev
->stats
.tx_bytes
+=
547 lp
->tx_skb
[lp
->tx_next_done
]->len
;
549 dev
->stats
.tx_errors
++;
550 dev
->stats
.tx_dropped
++;
553 if (devcs
& ETH_TX_UND
)
554 dev
->stats
.tx_fifo_errors
++;
556 /* Oversized frame */
557 if (devcs
& ETH_TX_OF
)
558 dev
->stats
.tx_aborted_errors
++;
560 /* Excessive deferrals */
561 if (devcs
& ETH_TX_ED
)
562 dev
->stats
.tx_carrier_errors
++;
564 /* Collisions: medium busy */
565 if (devcs
& ETH_TX_EC
)
566 dev
->stats
.collisions
++;
569 if (devcs
& ETH_TX_LC
)
570 dev
->stats
.tx_window_errors
++;
573 /* We must always free the original skb */
574 if (lp
->tx_skb
[lp
->tx_next_done
]) {
575 dev_kfree_skb_any(lp
->tx_skb
[lp
->tx_next_done
]);
576 lp
->tx_skb
[lp
->tx_next_done
] = NULL
;
579 lp
->td_ring
[lp
->tx_next_done
].control
= DMA_DESC_IOF
;
580 lp
->td_ring
[lp
->tx_next_done
].devcs
= ETH_TX_FD
| ETH_TX_LD
;
581 lp
->td_ring
[lp
->tx_next_done
].link
= 0;
582 lp
->td_ring
[lp
->tx_next_done
].ca
= 0;
585 /* Go on to next transmission */
586 lp
->tx_next_done
= (lp
->tx_next_done
+ 1) & KORINA_TDS_MASK
;
587 td
= &lp
->td_ring
[lp
->tx_next_done
];
591 /* Clear the DMA status register */
592 dmas
= readl(&lp
->tx_dma_regs
->dmas
);
593 writel(~dmas
, &lp
->tx_dma_regs
->dmas
);
595 writel(readl(&lp
->tx_dma_regs
->dmasm
) &
596 ~(DMA_STAT_FINI
| DMA_STAT_ERR
),
597 &lp
->tx_dma_regs
->dmasm
);
599 spin_unlock(&lp
->lock
);
603 korina_tx_dma_interrupt(int irq
, void *dev_id
)
605 struct net_device
*dev
= dev_id
;
606 struct korina_private
*lp
= netdev_priv(dev
);
610 dmas
= readl(&lp
->tx_dma_regs
->dmas
);
612 if (dmas
& (DMA_STAT_FINI
| DMA_STAT_ERR
)) {
613 dmasm
= readl(&lp
->tx_dma_regs
->dmasm
);
614 writel(dmasm
| (DMA_STAT_FINI
| DMA_STAT_ERR
),
615 &lp
->tx_dma_regs
->dmasm
);
619 if (lp
->tx_chain_status
== desc_filled
&&
620 (readl(&(lp
->tx_dma_regs
->dmandptr
)) == 0)) {
621 writel(CPHYSADDR(&lp
->td_ring
[lp
->tx_chain_head
]),
622 &(lp
->tx_dma_regs
->dmandptr
));
623 lp
->tx_chain_status
= desc_empty
;
624 lp
->tx_chain_head
= lp
->tx_chain_tail
;
625 netif_trans_update(dev
);
627 if (dmas
& DMA_STAT_ERR
)
628 printk(KERN_ERR
"%s: DMA error\n", dev
->name
);
630 retval
= IRQ_HANDLED
;
638 static void korina_check_media(struct net_device
*dev
, unsigned int init_media
)
640 struct korina_private
*lp
= netdev_priv(dev
);
642 mii_check_media(&lp
->mii_if
, 0, init_media
);
644 if (lp
->mii_if
.full_duplex
)
645 writel(readl(&lp
->eth_regs
->ethmac2
) | ETH_MAC2_FD
,
646 &lp
->eth_regs
->ethmac2
);
648 writel(readl(&lp
->eth_regs
->ethmac2
) & ~ETH_MAC2_FD
,
649 &lp
->eth_regs
->ethmac2
);
652 static void korina_poll_media(unsigned long data
)
654 struct net_device
*dev
= (struct net_device
*) data
;
655 struct korina_private
*lp
= netdev_priv(dev
);
657 korina_check_media(dev
, 0);
658 mod_timer(&lp
->media_check_timer
, jiffies
+ HZ
);
661 static void korina_set_carrier(struct mii_if_info
*mii
)
663 if (mii
->force_media
) {
664 /* autoneg is off: Link is always assumed to be up */
665 if (!netif_carrier_ok(mii
->dev
))
666 netif_carrier_on(mii
->dev
);
667 } else /* Let MMI library update carrier status */
668 korina_check_media(mii
->dev
, 0);
671 static int korina_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
673 struct korina_private
*lp
= netdev_priv(dev
);
674 struct mii_ioctl_data
*data
= if_mii(rq
);
677 if (!netif_running(dev
))
679 spin_lock_irq(&lp
->lock
);
680 rc
= generic_mii_ioctl(&lp
->mii_if
, data
, cmd
, NULL
);
681 spin_unlock_irq(&lp
->lock
);
682 korina_set_carrier(&lp
->mii_if
);
687 /* ethtool helpers */
688 static void netdev_get_drvinfo(struct net_device
*dev
,
689 struct ethtool_drvinfo
*info
)
691 struct korina_private
*lp
= netdev_priv(dev
);
693 strlcpy(info
->driver
, DRV_NAME
, sizeof(info
->driver
));
694 strlcpy(info
->version
, DRV_VERSION
, sizeof(info
->version
));
695 strlcpy(info
->bus_info
, lp
->dev
->name
, sizeof(info
->bus_info
));
698 static int netdev_get_link_ksettings(struct net_device
*dev
,
699 struct ethtool_link_ksettings
*cmd
)
701 struct korina_private
*lp
= netdev_priv(dev
);
703 spin_lock_irq(&lp
->lock
);
704 mii_ethtool_get_link_ksettings(&lp
->mii_if
, cmd
);
705 spin_unlock_irq(&lp
->lock
);
710 static int netdev_set_link_ksettings(struct net_device
*dev
,
711 const struct ethtool_link_ksettings
*cmd
)
713 struct korina_private
*lp
= netdev_priv(dev
);
716 spin_lock_irq(&lp
->lock
);
717 rc
= mii_ethtool_set_link_ksettings(&lp
->mii_if
, cmd
);
718 spin_unlock_irq(&lp
->lock
);
719 korina_set_carrier(&lp
->mii_if
);
724 static u32
netdev_get_link(struct net_device
*dev
)
726 struct korina_private
*lp
= netdev_priv(dev
);
728 return mii_link_ok(&lp
->mii_if
);
731 static const struct ethtool_ops netdev_ethtool_ops
= {
732 .get_drvinfo
= netdev_get_drvinfo
,
733 .get_link
= netdev_get_link
,
734 .get_link_ksettings
= netdev_get_link_ksettings
,
735 .set_link_ksettings
= netdev_set_link_ksettings
,
738 static int korina_alloc_ring(struct net_device
*dev
)
740 struct korina_private
*lp
= netdev_priv(dev
);
744 /* Initialize the transmit descriptors */
745 for (i
= 0; i
< KORINA_NUM_TDS
; i
++) {
746 lp
->td_ring
[i
].control
= DMA_DESC_IOF
;
747 lp
->td_ring
[i
].devcs
= ETH_TX_FD
| ETH_TX_LD
;
748 lp
->td_ring
[i
].ca
= 0;
749 lp
->td_ring
[i
].link
= 0;
751 lp
->tx_next_done
= lp
->tx_chain_head
= lp
->tx_chain_tail
=
752 lp
->tx_full
= lp
->tx_count
= 0;
753 lp
->tx_chain_status
= desc_empty
;
755 /* Initialize the receive descriptors */
756 for (i
= 0; i
< KORINA_NUM_RDS
; i
++) {
757 skb
= netdev_alloc_skb_ip_align(dev
, KORINA_RBSIZE
);
761 lp
->rd_ring
[i
].control
= DMA_DESC_IOD
|
762 DMA_COUNT(KORINA_RBSIZE
);
763 lp
->rd_ring
[i
].devcs
= 0;
764 lp
->rd_ring
[i
].ca
= CPHYSADDR(skb
->data
);
765 lp
->rd_ring
[i
].link
= CPHYSADDR(&lp
->rd_ring
[i
+1]);
768 /* loop back receive descriptors, so the last
769 * descriptor points to the first one */
770 lp
->rd_ring
[i
- 1].link
= CPHYSADDR(&lp
->rd_ring
[0]);
771 lp
->rd_ring
[i
- 1].control
|= DMA_DESC_COD
;
773 lp
->rx_next_done
= 0;
774 lp
->rx_chain_head
= 0;
775 lp
->rx_chain_tail
= 0;
776 lp
->rx_chain_status
= desc_empty
;
781 static void korina_free_ring(struct net_device
*dev
)
783 struct korina_private
*lp
= netdev_priv(dev
);
786 for (i
= 0; i
< KORINA_NUM_RDS
; i
++) {
787 lp
->rd_ring
[i
].control
= 0;
789 dev_kfree_skb_any(lp
->rx_skb
[i
]);
790 lp
->rx_skb
[i
] = NULL
;
793 for (i
= 0; i
< KORINA_NUM_TDS
; i
++) {
794 lp
->td_ring
[i
].control
= 0;
796 dev_kfree_skb_any(lp
->tx_skb
[i
]);
797 lp
->tx_skb
[i
] = NULL
;
802 * Initialize the RC32434 ethernet controller.
804 static int korina_init(struct net_device
*dev
)
806 struct korina_private
*lp
= netdev_priv(dev
);
809 korina_abort_tx(dev
);
810 korina_abort_rx(dev
);
812 /* reset ethernet logic */
813 writel(0, &lp
->eth_regs
->ethintfc
);
814 while ((readl(&lp
->eth_regs
->ethintfc
) & ETH_INT_FC_RIP
))
815 netif_trans_update(dev
);
817 /* Enable Ethernet Interface */
818 writel(ETH_INT_FC_EN
, &lp
->eth_regs
->ethintfc
);
821 if (korina_alloc_ring(dev
)) {
822 printk(KERN_ERR
"%s: descriptor allocation failed\n", dev
->name
);
823 korina_free_ring(dev
);
827 writel(0, &lp
->rx_dma_regs
->dmas
);
829 korina_start_rx(lp
, &lp
->rd_ring
[0]);
831 writel(readl(&lp
->tx_dma_regs
->dmasm
) &
832 ~(DMA_STAT_FINI
| DMA_STAT_ERR
),
833 &lp
->tx_dma_regs
->dmasm
);
834 writel(readl(&lp
->rx_dma_regs
->dmasm
) &
835 ~(DMA_STAT_DONE
| DMA_STAT_HALT
| DMA_STAT_ERR
),
836 &lp
->rx_dma_regs
->dmasm
);
838 /* Accept only packets destined for this Ethernet device address */
839 writel(ETH_ARC_AB
, &lp
->eth_regs
->etharc
);
841 /* Set all Ether station address registers to their initial values */
842 writel(STATION_ADDRESS_LOW(dev
), &lp
->eth_regs
->ethsal0
);
843 writel(STATION_ADDRESS_HIGH(dev
), &lp
->eth_regs
->ethsah0
);
845 writel(STATION_ADDRESS_LOW(dev
), &lp
->eth_regs
->ethsal1
);
846 writel(STATION_ADDRESS_HIGH(dev
), &lp
->eth_regs
->ethsah1
);
848 writel(STATION_ADDRESS_LOW(dev
), &lp
->eth_regs
->ethsal2
);
849 writel(STATION_ADDRESS_HIGH(dev
), &lp
->eth_regs
->ethsah2
);
851 writel(STATION_ADDRESS_LOW(dev
), &lp
->eth_regs
->ethsal3
);
852 writel(STATION_ADDRESS_HIGH(dev
), &lp
->eth_regs
->ethsah3
);
855 /* Frame Length Checking, Pad Enable, CRC Enable, Full Duplex set */
856 writel(ETH_MAC2_PE
| ETH_MAC2_CEN
| ETH_MAC2_FD
,
857 &lp
->eth_regs
->ethmac2
);
859 /* Back to back inter-packet-gap */
860 writel(0x15, &lp
->eth_regs
->ethipgt
);
861 /* Non - Back to back inter-packet-gap */
862 writel(0x12, &lp
->eth_regs
->ethipgr
);
864 /* Management Clock Prescaler Divisor
865 * Clock independent setting */
866 writel(((idt_cpu_freq
) / MII_CLOCK
+ 1) & ~1,
867 &lp
->eth_regs
->ethmcp
);
869 /* don't transmit until fifo contains 48b */
870 writel(48, &lp
->eth_regs
->ethfifott
);
872 writel(ETH_MAC1_RE
, &lp
->eth_regs
->ethmac1
);
874 napi_enable(&lp
->napi
);
875 netif_start_queue(dev
);
881 * Restart the RC32434 ethernet controller.
883 static void korina_restart_task(struct work_struct
*work
)
885 struct korina_private
*lp
= container_of(work
,
886 struct korina_private
, restart_task
);
887 struct net_device
*dev
= lp
->dev
;
892 disable_irq(lp
->rx_irq
);
893 disable_irq(lp
->tx_irq
);
894 disable_irq(lp
->ovr_irq
);
895 disable_irq(lp
->und_irq
);
897 writel(readl(&lp
->tx_dma_regs
->dmasm
) |
898 DMA_STAT_FINI
| DMA_STAT_ERR
,
899 &lp
->tx_dma_regs
->dmasm
);
900 writel(readl(&lp
->rx_dma_regs
->dmasm
) |
901 DMA_STAT_DONE
| DMA_STAT_HALT
| DMA_STAT_ERR
,
902 &lp
->rx_dma_regs
->dmasm
);
904 napi_disable(&lp
->napi
);
906 korina_free_ring(dev
);
908 if (korina_init(dev
) < 0) {
909 printk(KERN_ERR
"%s: cannot restart device\n", dev
->name
);
912 korina_multicast_list(dev
);
914 enable_irq(lp
->und_irq
);
915 enable_irq(lp
->ovr_irq
);
916 enable_irq(lp
->tx_irq
);
917 enable_irq(lp
->rx_irq
);
920 static void korina_clear_and_restart(struct net_device
*dev
, u32 value
)
922 struct korina_private
*lp
= netdev_priv(dev
);
924 netif_stop_queue(dev
);
925 writel(value
, &lp
->eth_regs
->ethintfc
);
926 schedule_work(&lp
->restart_task
);
929 /* Ethernet Tx Underflow interrupt */
930 static irqreturn_t
korina_und_interrupt(int irq
, void *dev_id
)
932 struct net_device
*dev
= dev_id
;
933 struct korina_private
*lp
= netdev_priv(dev
);
936 spin_lock(&lp
->lock
);
938 und
= readl(&lp
->eth_regs
->ethintfc
);
940 if (und
& ETH_INT_FC_UND
)
941 korina_clear_and_restart(dev
, und
& ~ETH_INT_FC_UND
);
943 spin_unlock(&lp
->lock
);
948 static void korina_tx_timeout(struct net_device
*dev
)
950 struct korina_private
*lp
= netdev_priv(dev
);
952 schedule_work(&lp
->restart_task
);
955 /* Ethernet Rx Overflow interrupt */
957 korina_ovr_interrupt(int irq
, void *dev_id
)
959 struct net_device
*dev
= dev_id
;
960 struct korina_private
*lp
= netdev_priv(dev
);
963 spin_lock(&lp
->lock
);
964 ovr
= readl(&lp
->eth_regs
->ethintfc
);
966 if (ovr
& ETH_INT_FC_OVR
)
967 korina_clear_and_restart(dev
, ovr
& ~ETH_INT_FC_OVR
);
969 spin_unlock(&lp
->lock
);
974 #ifdef CONFIG_NET_POLL_CONTROLLER
975 static void korina_poll_controller(struct net_device
*dev
)
977 disable_irq(dev
->irq
);
978 korina_tx_dma_interrupt(dev
->irq
, dev
);
979 enable_irq(dev
->irq
);
983 static int korina_open(struct net_device
*dev
)
985 struct korina_private
*lp
= netdev_priv(dev
);
989 ret
= korina_init(dev
);
991 printk(KERN_ERR
"%s: cannot open device\n", dev
->name
);
995 /* Install the interrupt handler
996 * that handles the Done Finished
997 * Ovr and Und Events */
998 ret
= request_irq(lp
->rx_irq
, korina_rx_dma_interrupt
,
999 0, "Korina ethernet Rx", dev
);
1001 printk(KERN_ERR
"%s: unable to get Rx DMA IRQ %d\n",
1002 dev
->name
, lp
->rx_irq
);
1005 ret
= request_irq(lp
->tx_irq
, korina_tx_dma_interrupt
,
1006 0, "Korina ethernet Tx", dev
);
1008 printk(KERN_ERR
"%s: unable to get Tx DMA IRQ %d\n",
1009 dev
->name
, lp
->tx_irq
);
1010 goto err_free_rx_irq
;
1013 /* Install handler for overrun error. */
1014 ret
= request_irq(lp
->ovr_irq
, korina_ovr_interrupt
,
1015 0, "Ethernet Overflow", dev
);
1017 printk(KERN_ERR
"%s: unable to get OVR IRQ %d\n",
1018 dev
->name
, lp
->ovr_irq
);
1019 goto err_free_tx_irq
;
1022 /* Install handler for underflow error. */
1023 ret
= request_irq(lp
->und_irq
, korina_und_interrupt
,
1024 0, "Ethernet Underflow", dev
);
1026 printk(KERN_ERR
"%s: unable to get UND IRQ %d\n",
1027 dev
->name
, lp
->und_irq
);
1028 goto err_free_ovr_irq
;
1030 mod_timer(&lp
->media_check_timer
, jiffies
+ 1);
1035 free_irq(lp
->ovr_irq
, dev
);
1037 free_irq(lp
->tx_irq
, dev
);
1039 free_irq(lp
->rx_irq
, dev
);
1041 korina_free_ring(dev
);
1045 static int korina_close(struct net_device
*dev
)
1047 struct korina_private
*lp
= netdev_priv(dev
);
1050 del_timer(&lp
->media_check_timer
);
1052 /* Disable interrupts */
1053 disable_irq(lp
->rx_irq
);
1054 disable_irq(lp
->tx_irq
);
1055 disable_irq(lp
->ovr_irq
);
1056 disable_irq(lp
->und_irq
);
1058 korina_abort_tx(dev
);
1059 tmp
= readl(&lp
->tx_dma_regs
->dmasm
);
1060 tmp
= tmp
| DMA_STAT_FINI
| DMA_STAT_ERR
;
1061 writel(tmp
, &lp
->tx_dma_regs
->dmasm
);
1063 korina_abort_rx(dev
);
1064 tmp
= readl(&lp
->rx_dma_regs
->dmasm
);
1065 tmp
= tmp
| DMA_STAT_DONE
| DMA_STAT_HALT
| DMA_STAT_ERR
;
1066 writel(tmp
, &lp
->rx_dma_regs
->dmasm
);
1068 napi_disable(&lp
->napi
);
1070 cancel_work_sync(&lp
->restart_task
);
1072 korina_free_ring(dev
);
1074 free_irq(lp
->rx_irq
, dev
);
1075 free_irq(lp
->tx_irq
, dev
);
1076 free_irq(lp
->ovr_irq
, dev
);
1077 free_irq(lp
->und_irq
, dev
);
1082 static const struct net_device_ops korina_netdev_ops
= {
1083 .ndo_open
= korina_open
,
1084 .ndo_stop
= korina_close
,
1085 .ndo_start_xmit
= korina_send_packet
,
1086 .ndo_set_rx_mode
= korina_multicast_list
,
1087 .ndo_tx_timeout
= korina_tx_timeout
,
1088 .ndo_do_ioctl
= korina_ioctl
,
1089 .ndo_validate_addr
= eth_validate_addr
,
1090 .ndo_set_mac_address
= eth_mac_addr
,
1091 #ifdef CONFIG_NET_POLL_CONTROLLER
1092 .ndo_poll_controller
= korina_poll_controller
,
1096 static int korina_probe(struct platform_device
*pdev
)
1098 struct korina_device
*bif
= platform_get_drvdata(pdev
);
1099 struct korina_private
*lp
;
1100 struct net_device
*dev
;
1104 dev
= alloc_etherdev(sizeof(struct korina_private
));
1108 SET_NETDEV_DEV(dev
, &pdev
->dev
);
1109 lp
= netdev_priv(dev
);
1112 memcpy(dev
->dev_addr
, bif
->mac
, ETH_ALEN
);
1114 lp
->rx_irq
= platform_get_irq_byname(pdev
, "korina_rx");
1115 lp
->tx_irq
= platform_get_irq_byname(pdev
, "korina_tx");
1116 lp
->ovr_irq
= platform_get_irq_byname(pdev
, "korina_ovr");
1117 lp
->und_irq
= platform_get_irq_byname(pdev
, "korina_und");
1119 r
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "korina_regs");
1120 dev
->base_addr
= r
->start
;
1121 lp
->eth_regs
= ioremap_nocache(r
->start
, resource_size(r
));
1122 if (!lp
->eth_regs
) {
1123 printk(KERN_ERR DRV_NAME
": cannot remap registers\n");
1128 r
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "korina_dma_rx");
1129 lp
->rx_dma_regs
= ioremap_nocache(r
->start
, resource_size(r
));
1130 if (!lp
->rx_dma_regs
) {
1131 printk(KERN_ERR DRV_NAME
": cannot remap Rx DMA registers\n");
1133 goto probe_err_dma_rx
;
1136 r
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "korina_dma_tx");
1137 lp
->tx_dma_regs
= ioremap_nocache(r
->start
, resource_size(r
));
1138 if (!lp
->tx_dma_regs
) {
1139 printk(KERN_ERR DRV_NAME
": cannot remap Tx DMA registers\n");
1141 goto probe_err_dma_tx
;
1144 lp
->td_ring
= kmalloc(TD_RING_SIZE
+ RD_RING_SIZE
, GFP_KERNEL
);
1147 goto probe_err_td_ring
;
1150 dma_cache_inv((unsigned long)(lp
->td_ring
),
1151 TD_RING_SIZE
+ RD_RING_SIZE
);
1153 /* now convert TD_RING pointer to KSEG1 */
1154 lp
->td_ring
= (struct dma_desc
*)KSEG1ADDR(lp
->td_ring
);
1155 lp
->rd_ring
= &lp
->td_ring
[KORINA_NUM_TDS
];
1157 spin_lock_init(&lp
->lock
);
1158 /* just use the rx dma irq */
1159 dev
->irq
= lp
->rx_irq
;
1162 dev
->netdev_ops
= &korina_netdev_ops
;
1163 dev
->ethtool_ops
= &netdev_ethtool_ops
;
1164 dev
->watchdog_timeo
= TX_TIMEOUT
;
1165 netif_napi_add(dev
, &lp
->napi
, korina_poll
, 64);
1167 lp
->phy_addr
= (((lp
->rx_irq
== 0x2c? 1:0) << 8) | 0x05);
1168 lp
->mii_if
.dev
= dev
;
1169 lp
->mii_if
.mdio_read
= mdio_read
;
1170 lp
->mii_if
.mdio_write
= mdio_write
;
1171 lp
->mii_if
.phy_id
= lp
->phy_addr
;
1172 lp
->mii_if
.phy_id_mask
= 0x1f;
1173 lp
->mii_if
.reg_num_mask
= 0x1f;
1175 rc
= register_netdev(dev
);
1177 printk(KERN_ERR DRV_NAME
1178 ": cannot register net device: %d\n", rc
);
1179 goto probe_err_register
;
1181 setup_timer(&lp
->media_check_timer
, korina_poll_media
, (unsigned long) dev
);
1183 INIT_WORK(&lp
->restart_task
, korina_restart_task
);
1185 printk(KERN_INFO
"%s: " DRV_NAME
"-" DRV_VERSION
" " DRV_RELDATE
"\n",
1193 iounmap(lp
->tx_dma_regs
);
1195 iounmap(lp
->rx_dma_regs
);
1197 iounmap(lp
->eth_regs
);
1203 static int korina_remove(struct platform_device
*pdev
)
1205 struct korina_device
*bif
= platform_get_drvdata(pdev
);
1206 struct korina_private
*lp
= netdev_priv(bif
->dev
);
1208 iounmap(lp
->eth_regs
);
1209 iounmap(lp
->rx_dma_regs
);
1210 iounmap(lp
->tx_dma_regs
);
1212 unregister_netdev(bif
->dev
);
1213 free_netdev(bif
->dev
);
1218 static struct platform_driver korina_driver
= {
1219 .driver
.name
= "korina",
1220 .probe
= korina_probe
,
1221 .remove
= korina_remove
,
1224 module_platform_driver(korina_driver
);
1226 MODULE_AUTHOR("Philip Rischel <rischelp@idt.com>");
1227 MODULE_AUTHOR("Felix Fietkau <nbd@openwrt.org>");
1228 MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
1229 MODULE_DESCRIPTION("IDT RC32434 (Korina) Ethernet driver");
1230 MODULE_LICENSE("GPL");