net: remove redundant code
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / net / greth.c
blobc5e0d28a6de9bafcbfc7e057cb6d514f3e113581
1 /*
2 * Aeroflex Gaisler GRETH 10/100/1G Ethernet MAC.
4 * 2005-2009 (c) Aeroflex Gaisler AB
6 * This driver supports GRETH 10/100 and GRETH 10/100/1G Ethernet MACs
7 * available in the GRLIB VHDL IP core library.
9 * Full documentation of both cores can be found here:
10 * http://www.gaisler.com/products/grlib/grip.pdf
12 * The Gigabit version supports scatter/gather DMA, any alignment of
13 * buffers and checksum offloading.
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the
17 * Free Software Foundation; either version 2 of the License, or (at your
18 * option) any later version.
20 * Contributors: Kristoffer Glembo
21 * Daniel Hellstrom
22 * Marko Isomaki
25 #include <linux/module.h>
26 #include <linux/uaccess.h>
27 #include <linux/init.h>
28 #include <linux/netdevice.h>
29 #include <linux/etherdevice.h>
30 #include <linux/ethtool.h>
31 #include <linux/skbuff.h>
32 #include <linux/io.h>
33 #include <linux/crc32.h>
34 #include <linux/mii.h>
35 #include <linux/of_device.h>
36 #include <linux/of_platform.h>
37 #include <asm/cacheflush.h>
38 #include <asm/byteorder.h>
40 #ifdef CONFIG_SPARC
41 #include <asm/idprom.h>
42 #endif
44 #include "greth.h"
46 #define GRETH_DEF_MSG_ENABLE \
47 (NETIF_MSG_DRV | \
48 NETIF_MSG_PROBE | \
49 NETIF_MSG_LINK | \
50 NETIF_MSG_IFDOWN | \
51 NETIF_MSG_IFUP | \
52 NETIF_MSG_RX_ERR | \
53 NETIF_MSG_TX_ERR)
55 static int greth_debug = -1; /* -1 == use GRETH_DEF_MSG_ENABLE as value */
56 module_param(greth_debug, int, 0);
57 MODULE_PARM_DESC(greth_debug, "GRETH bitmapped debugging message enable value");
59 /* Accept MAC address of the form macaddr=0x08,0x00,0x20,0x30,0x40,0x50 */
60 static int macaddr[6];
61 module_param_array(macaddr, int, NULL, 0);
62 MODULE_PARM_DESC(macaddr, "GRETH Ethernet MAC address");
64 static int greth_edcl = 1;
65 module_param(greth_edcl, int, 0);
66 MODULE_PARM_DESC(greth_edcl, "GRETH EDCL usage indicator. Set to 1 if EDCL is used.");
68 static int greth_open(struct net_device *dev);
69 static netdev_tx_t greth_start_xmit(struct sk_buff *skb,
70 struct net_device *dev);
71 static netdev_tx_t greth_start_xmit_gbit(struct sk_buff *skb,
72 struct net_device *dev);
73 static int greth_rx(struct net_device *dev, int limit);
74 static int greth_rx_gbit(struct net_device *dev, int limit);
75 static void greth_clean_tx(struct net_device *dev);
76 static void greth_clean_tx_gbit(struct net_device *dev);
77 static irqreturn_t greth_interrupt(int irq, void *dev_id);
78 static int greth_close(struct net_device *dev);
79 static int greth_set_mac_add(struct net_device *dev, void *p);
80 static void greth_set_multicast_list(struct net_device *dev);
82 #define GRETH_REGLOAD(a) (be32_to_cpu(__raw_readl(&(a))))
83 #define GRETH_REGSAVE(a, v) (__raw_writel(cpu_to_be32(v), &(a)))
84 #define GRETH_REGORIN(a, v) (GRETH_REGSAVE(a, (GRETH_REGLOAD(a) | (v))))
85 #define GRETH_REGANDIN(a, v) (GRETH_REGSAVE(a, (GRETH_REGLOAD(a) & (v))))
87 #define NEXT_TX(N) (((N) + 1) & GRETH_TXBD_NUM_MASK)
88 #define SKIP_TX(N, C) (((N) + C) & GRETH_TXBD_NUM_MASK)
89 #define NEXT_RX(N) (((N) + 1) & GRETH_RXBD_NUM_MASK)
91 static void greth_print_rx_packet(void *addr, int len)
93 print_hex_dump(KERN_DEBUG, "RX: ", DUMP_PREFIX_OFFSET, 16, 1,
94 addr, len, true);
97 static void greth_print_tx_packet(struct sk_buff *skb)
99 int i;
100 int length;
102 if (skb_shinfo(skb)->nr_frags == 0)
103 length = skb->len;
104 else
105 length = skb_headlen(skb);
107 print_hex_dump(KERN_DEBUG, "TX: ", DUMP_PREFIX_OFFSET, 16, 1,
108 skb->data, length, true);
110 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
112 print_hex_dump(KERN_DEBUG, "TX: ", DUMP_PREFIX_OFFSET, 16, 1,
113 phys_to_virt(page_to_phys(skb_shinfo(skb)->frags[i].page)) +
114 skb_shinfo(skb)->frags[i].page_offset,
115 length, true);
119 static inline void greth_enable_tx(struct greth_private *greth)
121 wmb();
122 GRETH_REGORIN(greth->regs->control, GRETH_TXEN);
125 static inline void greth_disable_tx(struct greth_private *greth)
127 GRETH_REGANDIN(greth->regs->control, ~GRETH_TXEN);
130 static inline void greth_enable_rx(struct greth_private *greth)
132 wmb();
133 GRETH_REGORIN(greth->regs->control, GRETH_RXEN);
136 static inline void greth_disable_rx(struct greth_private *greth)
138 GRETH_REGANDIN(greth->regs->control, ~GRETH_RXEN);
141 static inline void greth_enable_irqs(struct greth_private *greth)
143 GRETH_REGORIN(greth->regs->control, GRETH_RXI | GRETH_TXI);
146 static inline void greth_disable_irqs(struct greth_private *greth)
148 GRETH_REGANDIN(greth->regs->control, ~(GRETH_RXI|GRETH_TXI));
151 static inline void greth_write_bd(u32 *bd, u32 val)
153 __raw_writel(cpu_to_be32(val), bd);
156 static inline u32 greth_read_bd(u32 *bd)
158 return be32_to_cpu(__raw_readl(bd));
161 static void greth_clean_rings(struct greth_private *greth)
163 int i;
164 struct greth_bd *rx_bdp = greth->rx_bd_base;
165 struct greth_bd *tx_bdp = greth->tx_bd_base;
167 if (greth->gbit_mac) {
169 /* Free and unmap RX buffers */
170 for (i = 0; i < GRETH_RXBD_NUM; i++, rx_bdp++) {
171 if (greth->rx_skbuff[i] != NULL) {
172 dev_kfree_skb(greth->rx_skbuff[i]);
173 dma_unmap_single(greth->dev,
174 greth_read_bd(&rx_bdp->addr),
175 MAX_FRAME_SIZE+NET_IP_ALIGN,
176 DMA_FROM_DEVICE);
180 /* TX buffers */
181 while (greth->tx_free < GRETH_TXBD_NUM) {
183 struct sk_buff *skb = greth->tx_skbuff[greth->tx_last];
184 int nr_frags = skb_shinfo(skb)->nr_frags;
185 tx_bdp = greth->tx_bd_base + greth->tx_last;
186 greth->tx_last = NEXT_TX(greth->tx_last);
188 dma_unmap_single(greth->dev,
189 greth_read_bd(&tx_bdp->addr),
190 skb_headlen(skb),
191 DMA_TO_DEVICE);
193 for (i = 0; i < nr_frags; i++) {
194 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
195 tx_bdp = greth->tx_bd_base + greth->tx_last;
197 dma_unmap_page(greth->dev,
198 greth_read_bd(&tx_bdp->addr),
199 frag->size,
200 DMA_TO_DEVICE);
202 greth->tx_last = NEXT_TX(greth->tx_last);
204 greth->tx_free += nr_frags+1;
205 dev_kfree_skb(skb);
209 } else { /* 10/100 Mbps MAC */
211 for (i = 0; i < GRETH_RXBD_NUM; i++, rx_bdp++) {
212 kfree(greth->rx_bufs[i]);
213 dma_unmap_single(greth->dev,
214 greth_read_bd(&rx_bdp->addr),
215 MAX_FRAME_SIZE,
216 DMA_FROM_DEVICE);
218 for (i = 0; i < GRETH_TXBD_NUM; i++, tx_bdp++) {
219 kfree(greth->tx_bufs[i]);
220 dma_unmap_single(greth->dev,
221 greth_read_bd(&tx_bdp->addr),
222 MAX_FRAME_SIZE,
223 DMA_TO_DEVICE);
228 static int greth_init_rings(struct greth_private *greth)
230 struct sk_buff *skb;
231 struct greth_bd *rx_bd, *tx_bd;
232 u32 dma_addr;
233 int i;
235 rx_bd = greth->rx_bd_base;
236 tx_bd = greth->tx_bd_base;
238 /* Initialize descriptor rings and buffers */
239 if (greth->gbit_mac) {
241 for (i = 0; i < GRETH_RXBD_NUM; i++) {
242 skb = netdev_alloc_skb(greth->netdev, MAX_FRAME_SIZE+NET_IP_ALIGN);
243 if (skb == NULL) {
244 if (netif_msg_ifup(greth))
245 dev_err(greth->dev, "Error allocating DMA ring.\n");
246 goto cleanup;
248 skb_reserve(skb, NET_IP_ALIGN);
249 dma_addr = dma_map_single(greth->dev,
250 skb->data,
251 MAX_FRAME_SIZE+NET_IP_ALIGN,
252 DMA_FROM_DEVICE);
254 if (dma_mapping_error(greth->dev, dma_addr)) {
255 if (netif_msg_ifup(greth))
256 dev_err(greth->dev, "Could not create initial DMA mapping\n");
257 goto cleanup;
259 greth->rx_skbuff[i] = skb;
260 greth_write_bd(&rx_bd[i].addr, dma_addr);
261 greth_write_bd(&rx_bd[i].stat, GRETH_BD_EN | GRETH_BD_IE);
264 } else {
266 /* 10/100 MAC uses a fixed set of buffers and copy to/from SKBs */
267 for (i = 0; i < GRETH_RXBD_NUM; i++) {
269 greth->rx_bufs[i] = kmalloc(MAX_FRAME_SIZE, GFP_KERNEL);
271 if (greth->rx_bufs[i] == NULL) {
272 if (netif_msg_ifup(greth))
273 dev_err(greth->dev, "Error allocating DMA ring.\n");
274 goto cleanup;
277 dma_addr = dma_map_single(greth->dev,
278 greth->rx_bufs[i],
279 MAX_FRAME_SIZE,
280 DMA_FROM_DEVICE);
282 if (dma_mapping_error(greth->dev, dma_addr)) {
283 if (netif_msg_ifup(greth))
284 dev_err(greth->dev, "Could not create initial DMA mapping\n");
285 goto cleanup;
287 greth_write_bd(&rx_bd[i].addr, dma_addr);
288 greth_write_bd(&rx_bd[i].stat, GRETH_BD_EN | GRETH_BD_IE);
290 for (i = 0; i < GRETH_TXBD_NUM; i++) {
292 greth->tx_bufs[i] = kmalloc(MAX_FRAME_SIZE, GFP_KERNEL);
294 if (greth->tx_bufs[i] == NULL) {
295 if (netif_msg_ifup(greth))
296 dev_err(greth->dev, "Error allocating DMA ring.\n");
297 goto cleanup;
300 dma_addr = dma_map_single(greth->dev,
301 greth->tx_bufs[i],
302 MAX_FRAME_SIZE,
303 DMA_TO_DEVICE);
305 if (dma_mapping_error(greth->dev, dma_addr)) {
306 if (netif_msg_ifup(greth))
307 dev_err(greth->dev, "Could not create initial DMA mapping\n");
308 goto cleanup;
310 greth_write_bd(&tx_bd[i].addr, dma_addr);
311 greth_write_bd(&tx_bd[i].stat, 0);
314 greth_write_bd(&rx_bd[GRETH_RXBD_NUM - 1].stat,
315 greth_read_bd(&rx_bd[GRETH_RXBD_NUM - 1].stat) | GRETH_BD_WR);
317 /* Initialize pointers. */
318 greth->rx_cur = 0;
319 greth->tx_next = 0;
320 greth->tx_last = 0;
321 greth->tx_free = GRETH_TXBD_NUM;
323 /* Initialize descriptor base address */
324 GRETH_REGSAVE(greth->regs->tx_desc_p, greth->tx_bd_base_phys);
325 GRETH_REGSAVE(greth->regs->rx_desc_p, greth->rx_bd_base_phys);
327 return 0;
329 cleanup:
330 greth_clean_rings(greth);
331 return -ENOMEM;
334 static int greth_open(struct net_device *dev)
336 struct greth_private *greth = netdev_priv(dev);
337 int err;
339 err = greth_init_rings(greth);
340 if (err) {
341 if (netif_msg_ifup(greth))
342 dev_err(&dev->dev, "Could not allocate memory for DMA rings\n");
343 return err;
346 err = request_irq(greth->irq, greth_interrupt, 0, "eth", (void *) dev);
347 if (err) {
348 if (netif_msg_ifup(greth))
349 dev_err(&dev->dev, "Could not allocate interrupt %d\n", dev->irq);
350 greth_clean_rings(greth);
351 return err;
354 if (netif_msg_ifup(greth))
355 dev_dbg(&dev->dev, " starting queue\n");
356 netif_start_queue(dev);
358 napi_enable(&greth->napi);
360 greth_enable_irqs(greth);
361 greth_enable_tx(greth);
362 greth_enable_rx(greth);
363 return 0;
367 static int greth_close(struct net_device *dev)
369 struct greth_private *greth = netdev_priv(dev);
371 napi_disable(&greth->napi);
373 greth_disable_tx(greth);
375 netif_stop_queue(dev);
377 free_irq(greth->irq, (void *) dev);
379 greth_clean_rings(greth);
381 return 0;
384 static netdev_tx_t
385 greth_start_xmit(struct sk_buff *skb, struct net_device *dev)
387 struct greth_private *greth = netdev_priv(dev);
388 struct greth_bd *bdp;
389 int err = NETDEV_TX_OK;
390 u32 status, dma_addr;
392 bdp = greth->tx_bd_base + greth->tx_next;
394 if (unlikely(greth->tx_free <= 0)) {
395 netif_stop_queue(dev);
396 return NETDEV_TX_BUSY;
399 if (netif_msg_pktdata(greth))
400 greth_print_tx_packet(skb);
403 if (unlikely(skb->len > MAX_FRAME_SIZE)) {
404 dev->stats.tx_errors++;
405 goto out;
408 dma_addr = greth_read_bd(&bdp->addr);
410 memcpy((unsigned char *) phys_to_virt(dma_addr), skb->data, skb->len);
412 dma_sync_single_for_device(greth->dev, dma_addr, skb->len, DMA_TO_DEVICE);
414 status = GRETH_BD_EN | (skb->len & GRETH_BD_LEN);
416 /* Wrap around descriptor ring */
417 if (greth->tx_next == GRETH_TXBD_NUM_MASK) {
418 status |= GRETH_BD_WR;
421 greth->tx_next = NEXT_TX(greth->tx_next);
422 greth->tx_free--;
424 /* No more descriptors */
425 if (unlikely(greth->tx_free == 0)) {
427 /* Free transmitted descriptors */
428 greth_clean_tx(dev);
430 /* If nothing was cleaned, stop queue & wait for irq */
431 if (unlikely(greth->tx_free == 0)) {
432 status |= GRETH_BD_IE;
433 netif_stop_queue(dev);
437 /* Write descriptor control word and enable transmission */
438 greth_write_bd(&bdp->stat, status);
439 greth_enable_tx(greth);
441 out:
442 dev_kfree_skb(skb);
443 return err;
447 static netdev_tx_t
448 greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
450 struct greth_private *greth = netdev_priv(dev);
451 struct greth_bd *bdp;
452 u32 status = 0, dma_addr;
453 int curr_tx, nr_frags, i, err = NETDEV_TX_OK;
455 nr_frags = skb_shinfo(skb)->nr_frags;
457 if (greth->tx_free < nr_frags + 1) {
458 netif_stop_queue(dev);
459 err = NETDEV_TX_BUSY;
460 goto out;
463 if (netif_msg_pktdata(greth))
464 greth_print_tx_packet(skb);
466 if (unlikely(skb->len > MAX_FRAME_SIZE)) {
467 dev->stats.tx_errors++;
468 goto out;
471 /* Save skb pointer. */
472 greth->tx_skbuff[greth->tx_next] = skb;
474 /* Linear buf */
475 if (nr_frags != 0)
476 status = GRETH_TXBD_MORE;
478 status |= GRETH_TXBD_CSALL;
479 status |= skb_headlen(skb) & GRETH_BD_LEN;
480 if (greth->tx_next == GRETH_TXBD_NUM_MASK)
481 status |= GRETH_BD_WR;
484 bdp = greth->tx_bd_base + greth->tx_next;
485 greth_write_bd(&bdp->stat, status);
486 dma_addr = dma_map_single(greth->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
488 if (unlikely(dma_mapping_error(greth->dev, dma_addr)))
489 goto map_error;
491 greth_write_bd(&bdp->addr, dma_addr);
493 curr_tx = NEXT_TX(greth->tx_next);
495 /* Frags */
496 for (i = 0; i < nr_frags; i++) {
497 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
498 greth->tx_skbuff[curr_tx] = NULL;
499 bdp = greth->tx_bd_base + curr_tx;
501 status = GRETH_TXBD_CSALL;
502 status |= frag->size & GRETH_BD_LEN;
504 /* Wrap around descriptor ring */
505 if (curr_tx == GRETH_TXBD_NUM_MASK)
506 status |= GRETH_BD_WR;
508 /* More fragments left */
509 if (i < nr_frags - 1)
510 status |= GRETH_TXBD_MORE;
512 /* ... last fragment, check if out of descriptors */
513 else if (greth->tx_free - nr_frags - 1 < (MAX_SKB_FRAGS + 1)) {
515 /* Enable interrupts and stop queue */
516 status |= GRETH_BD_IE;
517 netif_stop_queue(dev);
520 greth_write_bd(&bdp->stat, status);
522 dma_addr = dma_map_page(greth->dev,
523 frag->page,
524 frag->page_offset,
525 frag->size,
526 DMA_TO_DEVICE);
528 if (unlikely(dma_mapping_error(greth->dev, dma_addr)))
529 goto frag_map_error;
531 greth_write_bd(&bdp->addr, dma_addr);
533 curr_tx = NEXT_TX(curr_tx);
536 wmb();
538 /* Enable the descriptors that we configured ... */
539 for (i = 0; i < nr_frags + 1; i++) {
540 bdp = greth->tx_bd_base + greth->tx_next;
541 greth_write_bd(&bdp->stat, greth_read_bd(&bdp->stat) | GRETH_BD_EN);
542 greth->tx_next = NEXT_TX(greth->tx_next);
543 greth->tx_free--;
546 greth_enable_tx(greth);
548 return NETDEV_TX_OK;
550 frag_map_error:
551 /* Unmap SKB mappings that succeeded */
552 for (i = 0; greth->tx_next + i != curr_tx; i++) {
553 bdp = greth->tx_bd_base + greth->tx_next + i;
554 dma_unmap_single(greth->dev,
555 greth_read_bd(&bdp->addr),
556 greth_read_bd(&bdp->stat) & GRETH_BD_LEN,
557 DMA_TO_DEVICE);
559 map_error:
560 if (net_ratelimit())
561 dev_warn(greth->dev, "Could not create TX DMA mapping\n");
562 dev_kfree_skb(skb);
563 out:
564 return err;
568 static irqreturn_t greth_interrupt(int irq, void *dev_id)
570 struct net_device *dev = dev_id;
571 struct greth_private *greth;
572 u32 status;
573 irqreturn_t retval = IRQ_NONE;
575 greth = netdev_priv(dev);
577 spin_lock(&greth->devlock);
579 /* Get the interrupt events that caused us to be here. */
580 status = GRETH_REGLOAD(greth->regs->status);
582 /* Handle rx and tx interrupts through poll */
583 if (status & (GRETH_INT_RX | GRETH_INT_TX)) {
585 /* Clear interrupt status */
586 GRETH_REGORIN(greth->regs->status,
587 status & (GRETH_INT_RX | GRETH_INT_TX));
589 retval = IRQ_HANDLED;
591 /* Disable interrupts and schedule poll() */
592 greth_disable_irqs(greth);
593 napi_schedule(&greth->napi);
596 mmiowb();
597 spin_unlock(&greth->devlock);
599 return retval;
602 static void greth_clean_tx(struct net_device *dev)
604 struct greth_private *greth;
605 struct greth_bd *bdp;
606 u32 stat;
608 greth = netdev_priv(dev);
610 while (1) {
611 bdp = greth->tx_bd_base + greth->tx_last;
612 stat = greth_read_bd(&bdp->stat);
614 if (unlikely(stat & GRETH_BD_EN))
615 break;
617 if (greth->tx_free == GRETH_TXBD_NUM)
618 break;
620 /* Check status for errors */
621 if (unlikely(stat & GRETH_TXBD_STATUS)) {
622 dev->stats.tx_errors++;
623 if (stat & GRETH_TXBD_ERR_AL)
624 dev->stats.tx_aborted_errors++;
625 if (stat & GRETH_TXBD_ERR_UE)
626 dev->stats.tx_fifo_errors++;
628 dev->stats.tx_packets++;
629 greth->tx_last = NEXT_TX(greth->tx_last);
630 greth->tx_free++;
633 if (greth->tx_free > 0) {
634 netif_wake_queue(dev);
639 static inline void greth_update_tx_stats(struct net_device *dev, u32 stat)
641 /* Check status for errors */
642 if (unlikely(stat & GRETH_TXBD_STATUS)) {
643 dev->stats.tx_errors++;
644 if (stat & GRETH_TXBD_ERR_AL)
645 dev->stats.tx_aborted_errors++;
646 if (stat & GRETH_TXBD_ERR_UE)
647 dev->stats.tx_fifo_errors++;
648 if (stat & GRETH_TXBD_ERR_LC)
649 dev->stats.tx_aborted_errors++;
651 dev->stats.tx_packets++;
654 static void greth_clean_tx_gbit(struct net_device *dev)
656 struct greth_private *greth;
657 struct greth_bd *bdp, *bdp_last_frag;
658 struct sk_buff *skb;
659 u32 stat;
660 int nr_frags, i;
662 greth = netdev_priv(dev);
664 while (greth->tx_free < GRETH_TXBD_NUM) {
666 skb = greth->tx_skbuff[greth->tx_last];
668 nr_frags = skb_shinfo(skb)->nr_frags;
670 /* We only clean fully completed SKBs */
671 bdp_last_frag = greth->tx_bd_base + SKIP_TX(greth->tx_last, nr_frags);
672 stat = bdp_last_frag->stat;
674 if (stat & GRETH_BD_EN)
675 break;
677 greth->tx_skbuff[greth->tx_last] = NULL;
679 greth_update_tx_stats(dev, stat);
681 bdp = greth->tx_bd_base + greth->tx_last;
683 greth->tx_last = NEXT_TX(greth->tx_last);
685 dma_unmap_single(greth->dev,
686 greth_read_bd(&bdp->addr),
687 skb_headlen(skb),
688 DMA_TO_DEVICE);
690 for (i = 0; i < nr_frags; i++) {
691 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
692 bdp = greth->tx_bd_base + greth->tx_last;
694 dma_unmap_page(greth->dev,
695 greth_read_bd(&bdp->addr),
696 frag->size,
697 DMA_TO_DEVICE);
699 greth->tx_last = NEXT_TX(greth->tx_last);
701 greth->tx_free += nr_frags+1;
702 dev_kfree_skb(skb);
704 if (greth->tx_free > (MAX_SKB_FRAGS + 1)) {
705 netif_wake_queue(dev);
709 static int greth_pending_packets(struct greth_private *greth)
711 struct greth_bd *bdp;
712 u32 status;
713 bdp = greth->rx_bd_base + greth->rx_cur;
714 status = greth_read_bd(&bdp->stat);
715 if (status & GRETH_BD_EN)
716 return 0;
717 else
718 return 1;
721 static int greth_rx(struct net_device *dev, int limit)
723 struct greth_private *greth;
724 struct greth_bd *bdp;
725 struct sk_buff *skb;
726 int pkt_len;
727 int bad, count;
728 u32 status, dma_addr;
730 greth = netdev_priv(dev);
732 for (count = 0; count < limit; ++count) {
734 bdp = greth->rx_bd_base + greth->rx_cur;
735 status = greth_read_bd(&bdp->stat);
736 dma_addr = greth_read_bd(&bdp->addr);
737 bad = 0;
739 if (unlikely(status & GRETH_BD_EN)) {
740 break;
743 /* Check status for errors. */
744 if (unlikely(status & GRETH_RXBD_STATUS)) {
745 if (status & GRETH_RXBD_ERR_FT) {
746 dev->stats.rx_length_errors++;
747 bad = 1;
749 if (status & (GRETH_RXBD_ERR_AE | GRETH_RXBD_ERR_OE)) {
750 dev->stats.rx_frame_errors++;
751 bad = 1;
753 if (status & GRETH_RXBD_ERR_CRC) {
754 dev->stats.rx_crc_errors++;
755 bad = 1;
758 if (unlikely(bad)) {
759 dev->stats.rx_errors++;
761 } else {
763 pkt_len = status & GRETH_BD_LEN;
765 skb = netdev_alloc_skb(dev, pkt_len + NET_IP_ALIGN);
767 if (unlikely(skb == NULL)) {
769 if (net_ratelimit())
770 dev_warn(&dev->dev, "low on memory - " "packet dropped\n");
772 dev->stats.rx_dropped++;
774 } else {
775 skb_reserve(skb, NET_IP_ALIGN);
776 skb->dev = dev;
778 dma_sync_single_for_cpu(greth->dev,
779 dma_addr,
780 pkt_len,
781 DMA_FROM_DEVICE);
783 if (netif_msg_pktdata(greth))
784 greth_print_rx_packet(phys_to_virt(dma_addr), pkt_len);
786 memcpy(skb_put(skb, pkt_len), phys_to_virt(dma_addr), pkt_len);
788 skb->protocol = eth_type_trans(skb, dev);
789 dev->stats.rx_packets++;
790 netif_receive_skb(skb);
794 status = GRETH_BD_EN | GRETH_BD_IE;
795 if (greth->rx_cur == GRETH_RXBD_NUM_MASK) {
796 status |= GRETH_BD_WR;
799 wmb();
800 greth_write_bd(&bdp->stat, status);
802 dma_sync_single_for_device(greth->dev, dma_addr, MAX_FRAME_SIZE, DMA_FROM_DEVICE);
804 greth_enable_rx(greth);
806 greth->rx_cur = NEXT_RX(greth->rx_cur);
809 return count;
812 static inline int hw_checksummed(u32 status)
815 if (status & GRETH_RXBD_IP_FRAG)
816 return 0;
818 if (status & GRETH_RXBD_IP && status & GRETH_RXBD_IP_CSERR)
819 return 0;
821 if (status & GRETH_RXBD_UDP && status & GRETH_RXBD_UDP_CSERR)
822 return 0;
824 if (status & GRETH_RXBD_TCP && status & GRETH_RXBD_TCP_CSERR)
825 return 0;
827 return 1;
830 static int greth_rx_gbit(struct net_device *dev, int limit)
832 struct greth_private *greth;
833 struct greth_bd *bdp;
834 struct sk_buff *skb, *newskb;
835 int pkt_len;
836 int bad, count = 0;
837 u32 status, dma_addr;
839 greth = netdev_priv(dev);
841 for (count = 0; count < limit; ++count) {
843 bdp = greth->rx_bd_base + greth->rx_cur;
844 skb = greth->rx_skbuff[greth->rx_cur];
845 status = greth_read_bd(&bdp->stat);
846 bad = 0;
848 if (status & GRETH_BD_EN)
849 break;
851 /* Check status for errors. */
852 if (unlikely(status & GRETH_RXBD_STATUS)) {
854 if (status & GRETH_RXBD_ERR_FT) {
855 dev->stats.rx_length_errors++;
856 bad = 1;
857 } else if (status &
858 (GRETH_RXBD_ERR_AE | GRETH_RXBD_ERR_OE | GRETH_RXBD_ERR_LE)) {
859 dev->stats.rx_frame_errors++;
860 bad = 1;
861 } else if (status & GRETH_RXBD_ERR_CRC) {
862 dev->stats.rx_crc_errors++;
863 bad = 1;
867 /* Allocate new skb to replace current */
868 newskb = netdev_alloc_skb(dev, MAX_FRAME_SIZE + NET_IP_ALIGN);
870 if (!bad && newskb) {
871 skb_reserve(newskb, NET_IP_ALIGN);
873 dma_addr = dma_map_single(greth->dev,
874 newskb->data,
875 MAX_FRAME_SIZE + NET_IP_ALIGN,
876 DMA_FROM_DEVICE);
878 if (!dma_mapping_error(greth->dev, dma_addr)) {
879 /* Process the incoming frame. */
880 pkt_len = status & GRETH_BD_LEN;
882 dma_unmap_single(greth->dev,
883 greth_read_bd(&bdp->addr),
884 MAX_FRAME_SIZE + NET_IP_ALIGN,
885 DMA_FROM_DEVICE);
887 if (netif_msg_pktdata(greth))
888 greth_print_rx_packet(phys_to_virt(greth_read_bd(&bdp->addr)), pkt_len);
890 skb_put(skb, pkt_len);
892 if (greth->flags & GRETH_FLAG_RX_CSUM && hw_checksummed(status))
893 skb->ip_summed = CHECKSUM_UNNECESSARY;
894 else
895 skb->ip_summed = CHECKSUM_NONE;
897 skb->protocol = eth_type_trans(skb, dev);
898 dev->stats.rx_packets++;
899 netif_receive_skb(skb);
901 greth->rx_skbuff[greth->rx_cur] = newskb;
902 greth_write_bd(&bdp->addr, dma_addr);
903 } else {
904 if (net_ratelimit())
905 dev_warn(greth->dev, "Could not create DMA mapping, dropping packet\n");
906 dev_kfree_skb(newskb);
907 dev->stats.rx_dropped++;
909 } else {
910 if (net_ratelimit())
911 dev_warn(greth->dev, "Could not allocate SKB, dropping packet\n");
912 dev->stats.rx_dropped++;
915 status = GRETH_BD_EN | GRETH_BD_IE;
916 if (greth->rx_cur == GRETH_RXBD_NUM_MASK) {
917 status |= GRETH_BD_WR;
920 wmb();
921 greth_write_bd(&bdp->stat, status);
922 greth_enable_rx(greth);
923 greth->rx_cur = NEXT_RX(greth->rx_cur);
926 return count;
930 static int greth_poll(struct napi_struct *napi, int budget)
932 struct greth_private *greth;
933 int work_done = 0;
934 greth = container_of(napi, struct greth_private, napi);
936 if (greth->gbit_mac) {
937 greth_clean_tx_gbit(greth->netdev);
938 } else {
939 greth_clean_tx(greth->netdev);
942 restart_poll:
943 if (greth->gbit_mac) {
944 work_done += greth_rx_gbit(greth->netdev, budget - work_done);
945 } else {
946 work_done += greth_rx(greth->netdev, budget - work_done);
949 if (work_done < budget) {
951 napi_complete(napi);
953 if (greth_pending_packets(greth)) {
954 napi_reschedule(napi);
955 goto restart_poll;
959 greth_enable_irqs(greth);
960 return work_done;
963 static int greth_set_mac_add(struct net_device *dev, void *p)
965 struct sockaddr *addr = p;
966 struct greth_private *greth;
967 struct greth_regs *regs;
969 greth = netdev_priv(dev);
970 regs = (struct greth_regs *) greth->regs;
972 if (!is_valid_ether_addr(addr->sa_data))
973 return -EINVAL;
975 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
977 GRETH_REGSAVE(regs->esa_msb, addr->sa_data[0] << 8 | addr->sa_data[1]);
978 GRETH_REGSAVE(regs->esa_lsb,
979 addr->sa_data[2] << 24 | addr->
980 sa_data[3] << 16 | addr->sa_data[4] << 8 | addr->sa_data[5]);
981 return 0;
984 static u32 greth_hash_get_index(__u8 *addr)
986 return (ether_crc(6, addr)) & 0x3F;
989 static void greth_set_hash_filter(struct net_device *dev)
991 struct dev_mc_list *curr;
992 struct greth_private *greth = netdev_priv(dev);
993 struct greth_regs *regs = (struct greth_regs *) greth->regs;
994 u32 mc_filter[2];
995 unsigned int bitnr;
997 mc_filter[0] = mc_filter[1] = 0;
999 netdev_for_each_mc_addr(curr, dev) {
1000 bitnr = greth_hash_get_index(curr->dmi_addr);
1001 mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
1004 GRETH_REGSAVE(regs->hash_msb, mc_filter[1]);
1005 GRETH_REGSAVE(regs->hash_lsb, mc_filter[0]);
1008 static void greth_set_multicast_list(struct net_device *dev)
1010 int cfg;
1011 struct greth_private *greth = netdev_priv(dev);
1012 struct greth_regs *regs = (struct greth_regs *) greth->regs;
1014 cfg = GRETH_REGLOAD(regs->control);
1015 if (dev->flags & IFF_PROMISC)
1016 cfg |= GRETH_CTRL_PR;
1017 else
1018 cfg &= ~GRETH_CTRL_PR;
1020 if (greth->multicast) {
1021 if (dev->flags & IFF_ALLMULTI) {
1022 GRETH_REGSAVE(regs->hash_msb, -1);
1023 GRETH_REGSAVE(regs->hash_lsb, -1);
1024 cfg |= GRETH_CTRL_MCEN;
1025 GRETH_REGSAVE(regs->control, cfg);
1026 return;
1029 if (netdev_mc_empty(dev)) {
1030 cfg &= ~GRETH_CTRL_MCEN;
1031 GRETH_REGSAVE(regs->control, cfg);
1032 return;
1035 /* Setup multicast filter */
1036 greth_set_hash_filter(dev);
1037 cfg |= GRETH_CTRL_MCEN;
1039 GRETH_REGSAVE(regs->control, cfg);
1042 static u32 greth_get_msglevel(struct net_device *dev)
1044 struct greth_private *greth = netdev_priv(dev);
1045 return greth->msg_enable;
1048 static void greth_set_msglevel(struct net_device *dev, u32 value)
1050 struct greth_private *greth = netdev_priv(dev);
1051 greth->msg_enable = value;
1053 static int greth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1055 struct greth_private *greth = netdev_priv(dev);
1056 struct phy_device *phy = greth->phy;
1058 if (!phy)
1059 return -ENODEV;
1061 return phy_ethtool_gset(phy, cmd);
1064 static int greth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1066 struct greth_private *greth = netdev_priv(dev);
1067 struct phy_device *phy = greth->phy;
1069 if (!phy)
1070 return -ENODEV;
1072 return phy_ethtool_sset(phy, cmd);
1075 static int greth_get_regs_len(struct net_device *dev)
1077 return sizeof(struct greth_regs);
1080 static void greth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1082 struct greth_private *greth = netdev_priv(dev);
1084 strncpy(info->driver, dev_driver_string(greth->dev), 32);
1085 strncpy(info->version, "revision: 1.0", 32);
1086 strncpy(info->bus_info, greth->dev->bus->name, 32);
1087 strncpy(info->fw_version, "N/A", 32);
1088 info->eedump_len = 0;
1089 info->regdump_len = sizeof(struct greth_regs);
1092 static void greth_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
1094 int i;
1095 struct greth_private *greth = netdev_priv(dev);
1096 u32 __iomem *greth_regs = (u32 __iomem *) greth->regs;
1097 u32 *buff = p;
1099 for (i = 0; i < sizeof(struct greth_regs) / sizeof(u32); i++)
1100 buff[i] = greth_read_bd(&greth_regs[i]);
1103 static u32 greth_get_rx_csum(struct net_device *dev)
1105 struct greth_private *greth = netdev_priv(dev);
1106 return (greth->flags & GRETH_FLAG_RX_CSUM) != 0;
1109 static int greth_set_rx_csum(struct net_device *dev, u32 data)
1111 struct greth_private *greth = netdev_priv(dev);
1113 spin_lock_bh(&greth->devlock);
1115 if (data)
1116 greth->flags |= GRETH_FLAG_RX_CSUM;
1117 else
1118 greth->flags &= ~GRETH_FLAG_RX_CSUM;
1120 spin_unlock_bh(&greth->devlock);
1122 return 0;
1125 static u32 greth_get_tx_csum(struct net_device *dev)
1127 return (dev->features & NETIF_F_IP_CSUM) != 0;
1130 static int greth_set_tx_csum(struct net_device *dev, u32 data)
1132 netif_tx_lock_bh(dev);
1133 ethtool_op_set_tx_csum(dev, data);
1134 netif_tx_unlock_bh(dev);
1135 return 0;
1138 static const struct ethtool_ops greth_ethtool_ops = {
1139 .get_msglevel = greth_get_msglevel,
1140 .set_msglevel = greth_set_msglevel,
1141 .get_settings = greth_get_settings,
1142 .set_settings = greth_set_settings,
1143 .get_drvinfo = greth_get_drvinfo,
1144 .get_regs_len = greth_get_regs_len,
1145 .get_regs = greth_get_regs,
1146 .get_rx_csum = greth_get_rx_csum,
1147 .set_rx_csum = greth_set_rx_csum,
1148 .get_tx_csum = greth_get_tx_csum,
1149 .set_tx_csum = greth_set_tx_csum,
1150 .get_link = ethtool_op_get_link,
1153 static struct net_device_ops greth_netdev_ops = {
1154 .ndo_open = greth_open,
1155 .ndo_stop = greth_close,
1156 .ndo_start_xmit = greth_start_xmit,
1157 .ndo_set_mac_address = greth_set_mac_add,
1158 .ndo_validate_addr = eth_validate_addr,
1161 static inline int wait_for_mdio(struct greth_private *greth)
1163 unsigned long timeout = jiffies + 4*HZ/100;
1164 while (GRETH_REGLOAD(greth->regs->mdio) & GRETH_MII_BUSY) {
1165 if (time_after(jiffies, timeout))
1166 return 0;
1168 return 1;
1171 static int greth_mdio_read(struct mii_bus *bus, int phy, int reg)
1173 struct greth_private *greth = bus->priv;
1174 int data;
1176 if (!wait_for_mdio(greth))
1177 return -EBUSY;
1179 GRETH_REGSAVE(greth->regs->mdio, ((phy & 0x1F) << 11) | ((reg & 0x1F) << 6) | 2);
1181 if (!wait_for_mdio(greth))
1182 return -EBUSY;
1184 if (!(GRETH_REGLOAD(greth->regs->mdio) & GRETH_MII_NVALID)) {
1185 data = (GRETH_REGLOAD(greth->regs->mdio) >> 16) & 0xFFFF;
1186 return data;
1188 } else {
1189 return -1;
1193 static int greth_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
1195 struct greth_private *greth = bus->priv;
1197 if (!wait_for_mdio(greth))
1198 return -EBUSY;
1200 GRETH_REGSAVE(greth->regs->mdio,
1201 ((val & 0xFFFF) << 16) | ((phy & 0x1F) << 11) | ((reg & 0x1F) << 6) | 1);
1203 if (!wait_for_mdio(greth))
1204 return -EBUSY;
1206 return 0;
1209 static int greth_mdio_reset(struct mii_bus *bus)
1211 return 0;
1214 static void greth_link_change(struct net_device *dev)
1216 struct greth_private *greth = netdev_priv(dev);
1217 struct phy_device *phydev = greth->phy;
1218 unsigned long flags;
1220 int status_change = 0;
1222 spin_lock_irqsave(&greth->devlock, flags);
1224 if (phydev->link) {
1226 if ((greth->speed != phydev->speed) || (greth->duplex != phydev->duplex)) {
1228 GRETH_REGANDIN(greth->regs->control,
1229 ~(GRETH_CTRL_FD | GRETH_CTRL_SP | GRETH_CTRL_GB));
1231 if (phydev->duplex)
1232 GRETH_REGORIN(greth->regs->control, GRETH_CTRL_FD);
1234 if (phydev->speed == SPEED_100) {
1236 GRETH_REGORIN(greth->regs->control, GRETH_CTRL_SP);
1239 else if (phydev->speed == SPEED_1000)
1240 GRETH_REGORIN(greth->regs->control, GRETH_CTRL_GB);
1242 greth->speed = phydev->speed;
1243 greth->duplex = phydev->duplex;
1244 status_change = 1;
1248 if (phydev->link != greth->link) {
1249 if (!phydev->link) {
1250 greth->speed = 0;
1251 greth->duplex = -1;
1253 greth->link = phydev->link;
1255 status_change = 1;
1258 spin_unlock_irqrestore(&greth->devlock, flags);
1260 if (status_change) {
1261 if (phydev->link)
1262 pr_debug("%s: link up (%d/%s)\n",
1263 dev->name, phydev->speed,
1264 DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
1265 else
1266 pr_debug("%s: link down\n", dev->name);
1270 static int greth_mdio_probe(struct net_device *dev)
1272 struct greth_private *greth = netdev_priv(dev);
1273 struct phy_device *phy = NULL;
1274 int ret;
1276 /* Find the first PHY */
1277 phy = phy_find_first(greth->mdio);
1279 if (!phy) {
1280 if (netif_msg_probe(greth))
1281 dev_err(&dev->dev, "no PHY found\n");
1282 return -ENXIO;
1285 ret = phy_connect_direct(dev, phy, &greth_link_change,
1286 0, greth->gbit_mac ?
1287 PHY_INTERFACE_MODE_GMII :
1288 PHY_INTERFACE_MODE_MII);
1289 if (ret) {
1290 if (netif_msg_ifup(greth))
1291 dev_err(&dev->dev, "could not attach to PHY\n");
1292 return ret;
1295 if (greth->gbit_mac)
1296 phy->supported &= PHY_GBIT_FEATURES;
1297 else
1298 phy->supported &= PHY_BASIC_FEATURES;
1300 phy->advertising = phy->supported;
1302 greth->link = 0;
1303 greth->speed = 0;
1304 greth->duplex = -1;
1305 greth->phy = phy;
1307 return 0;
1310 static inline int phy_aneg_done(struct phy_device *phydev)
1312 int retval;
1314 retval = phy_read(phydev, MII_BMSR);
1316 return (retval < 0) ? retval : (retval & BMSR_ANEGCOMPLETE);
1319 static int greth_mdio_init(struct greth_private *greth)
1321 int ret, phy;
1322 unsigned long timeout;
1324 greth->mdio = mdiobus_alloc();
1325 if (!greth->mdio) {
1326 return -ENOMEM;
1329 greth->mdio->name = "greth-mdio";
1330 snprintf(greth->mdio->id, MII_BUS_ID_SIZE, "%s-%d", greth->mdio->name, greth->irq);
1331 greth->mdio->read = greth_mdio_read;
1332 greth->mdio->write = greth_mdio_write;
1333 greth->mdio->reset = greth_mdio_reset;
1334 greth->mdio->priv = greth;
1336 greth->mdio->irq = greth->mdio_irqs;
1338 for (phy = 0; phy < PHY_MAX_ADDR; phy++)
1339 greth->mdio->irq[phy] = PHY_POLL;
1341 ret = mdiobus_register(greth->mdio);
1342 if (ret) {
1343 goto error;
1346 ret = greth_mdio_probe(greth->netdev);
1347 if (ret) {
1348 if (netif_msg_probe(greth))
1349 dev_err(&greth->netdev->dev, "failed to probe MDIO bus\n");
1350 goto unreg_mdio;
1353 phy_start(greth->phy);
1355 /* If Ethernet debug link is used make autoneg happen right away */
1356 if (greth->edcl && greth_edcl == 1) {
1357 phy_start_aneg(greth->phy);
1358 timeout = jiffies + 6*HZ;
1359 while (!phy_aneg_done(greth->phy) && time_before(jiffies, timeout)) {
1361 genphy_read_status(greth->phy);
1362 greth_link_change(greth->netdev);
1365 return 0;
1367 unreg_mdio:
1368 mdiobus_unregister(greth->mdio);
1369 error:
1370 mdiobus_free(greth->mdio);
1371 return ret;
1374 /* Initialize the GRETH MAC */
1375 static int __devinit greth_of_probe(struct of_device *ofdev, const struct of_device_id *match)
1377 struct net_device *dev;
1378 struct greth_private *greth;
1379 struct greth_regs *regs;
1381 int i;
1382 int err;
1383 int tmp;
1384 unsigned long timeout;
1386 dev = alloc_etherdev(sizeof(struct greth_private));
1388 if (dev == NULL)
1389 return -ENOMEM;
1391 greth = netdev_priv(dev);
1392 greth->netdev = dev;
1393 greth->dev = &ofdev->dev;
1395 if (greth_debug > 0)
1396 greth->msg_enable = greth_debug;
1397 else
1398 greth->msg_enable = GRETH_DEF_MSG_ENABLE;
1400 spin_lock_init(&greth->devlock);
1402 greth->regs = of_ioremap(&ofdev->resource[0], 0,
1403 resource_size(&ofdev->resource[0]),
1404 "grlib-greth regs");
1406 if (greth->regs == NULL) {
1407 if (netif_msg_probe(greth))
1408 dev_err(greth->dev, "ioremap failure.\n");
1409 err = -EIO;
1410 goto error1;
1413 regs = (struct greth_regs *) greth->regs;
1414 greth->irq = ofdev->irqs[0];
1416 dev_set_drvdata(greth->dev, dev);
1417 SET_NETDEV_DEV(dev, greth->dev);
1419 if (netif_msg_probe(greth))
1420 dev_dbg(greth->dev, "reseting controller.\n");
1422 /* Reset the controller. */
1423 GRETH_REGSAVE(regs->control, GRETH_RESET);
1425 /* Wait for MAC to reset itself */
1426 timeout = jiffies + HZ/100;
1427 while (GRETH_REGLOAD(regs->control) & GRETH_RESET) {
1428 if (time_after(jiffies, timeout)) {
1429 err = -EIO;
1430 if (netif_msg_probe(greth))
1431 dev_err(greth->dev, "timeout when waiting for reset.\n");
1432 goto error2;
1436 /* Get default PHY address */
1437 greth->phyaddr = (GRETH_REGLOAD(regs->mdio) >> 11) & 0x1F;
1439 /* Check if we have GBIT capable MAC */
1440 tmp = GRETH_REGLOAD(regs->control);
1441 greth->gbit_mac = (tmp >> 27) & 1;
1443 /* Check for multicast capability */
1444 greth->multicast = (tmp >> 25) & 1;
1446 greth->edcl = (tmp >> 31) & 1;
1448 /* If we have EDCL we disable the EDCL speed-duplex FSM so
1449 * it doesn't interfere with the software */
1450 if (greth->edcl != 0)
1451 GRETH_REGORIN(regs->control, GRETH_CTRL_DISDUPLEX);
1453 /* Check if MAC can handle MDIO interrupts */
1454 greth->mdio_int_en = (tmp >> 26) & 1;
1456 err = greth_mdio_init(greth);
1457 if (err) {
1458 if (netif_msg_probe(greth))
1459 dev_err(greth->dev, "failed to register MDIO bus\n");
1460 goto error2;
1463 /* Allocate TX descriptor ring in coherent memory */
1464 greth->tx_bd_base = (struct greth_bd *) dma_alloc_coherent(greth->dev,
1465 1024,
1466 &greth->tx_bd_base_phys,
1467 GFP_KERNEL);
1469 if (!greth->tx_bd_base) {
1470 if (netif_msg_probe(greth))
1471 dev_err(&dev->dev, "could not allocate descriptor memory.\n");
1472 err = -ENOMEM;
1473 goto error3;
1476 memset(greth->tx_bd_base, 0, 1024);
1478 /* Allocate RX descriptor ring in coherent memory */
1479 greth->rx_bd_base = (struct greth_bd *) dma_alloc_coherent(greth->dev,
1480 1024,
1481 &greth->rx_bd_base_phys,
1482 GFP_KERNEL);
1484 if (!greth->rx_bd_base) {
1485 if (netif_msg_probe(greth))
1486 dev_err(greth->dev, "could not allocate descriptor memory.\n");
1487 err = -ENOMEM;
1488 goto error4;
1491 memset(greth->rx_bd_base, 0, 1024);
1493 /* Get MAC address from: module param, OF property or ID prom */
1494 for (i = 0; i < 6; i++) {
1495 if (macaddr[i] != 0)
1496 break;
1498 if (i == 6) {
1499 const unsigned char *addr;
1500 int len;
1501 addr = of_get_property(ofdev->node, "local-mac-address", &len);
1502 if (addr != NULL && len == 6) {
1503 for (i = 0; i < 6; i++)
1504 macaddr[i] = (unsigned int) addr[i];
1505 } else {
1506 #ifdef CONFIG_SPARC
1507 for (i = 0; i < 6; i++)
1508 macaddr[i] = (unsigned int) idprom->id_ethaddr[i];
1509 #endif
1513 for (i = 0; i < 6; i++)
1514 dev->dev_addr[i] = macaddr[i];
1516 macaddr[5]++;
1518 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
1519 if (netif_msg_probe(greth))
1520 dev_err(greth->dev, "no valid ethernet address, aborting.\n");
1521 err = -EINVAL;
1522 goto error5;
1525 GRETH_REGSAVE(regs->esa_msb, dev->dev_addr[0] << 8 | dev->dev_addr[1]);
1526 GRETH_REGSAVE(regs->esa_lsb, dev->dev_addr[2] << 24 | dev->dev_addr[3] << 16 |
1527 dev->dev_addr[4] << 8 | dev->dev_addr[5]);
1529 /* Clear all pending interrupts except PHY irq */
1530 GRETH_REGSAVE(regs->status, 0xFF);
1532 if (greth->gbit_mac) {
1533 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_HIGHDMA;
1534 greth_netdev_ops.ndo_start_xmit = greth_start_xmit_gbit;
1535 greth->flags = GRETH_FLAG_RX_CSUM;
1538 if (greth->multicast) {
1539 greth_netdev_ops.ndo_set_multicast_list = greth_set_multicast_list;
1540 dev->flags |= IFF_MULTICAST;
1541 } else {
1542 dev->flags &= ~IFF_MULTICAST;
1545 dev->netdev_ops = &greth_netdev_ops;
1546 dev->ethtool_ops = &greth_ethtool_ops;
1548 if (register_netdev(dev)) {
1549 if (netif_msg_probe(greth))
1550 dev_err(greth->dev, "netdevice registration failed.\n");
1551 err = -ENOMEM;
1552 goto error5;
1555 /* setup NAPI */
1556 memset(&greth->napi, 0, sizeof(greth->napi));
1557 netif_napi_add(dev, &greth->napi, greth_poll, 64);
1559 return 0;
1561 error5:
1562 dma_free_coherent(greth->dev, 1024, greth->rx_bd_base, greth->rx_bd_base_phys);
1563 error4:
1564 dma_free_coherent(greth->dev, 1024, greth->tx_bd_base, greth->tx_bd_base_phys);
1565 error3:
1566 mdiobus_unregister(greth->mdio);
1567 error2:
1568 of_iounmap(&ofdev->resource[0], greth->regs, resource_size(&ofdev->resource[0]));
1569 error1:
1570 free_netdev(dev);
1571 return err;
1574 static int __devexit greth_of_remove(struct of_device *of_dev)
1576 struct net_device *ndev = dev_get_drvdata(&of_dev->dev);
1577 struct greth_private *greth = netdev_priv(ndev);
1579 /* Free descriptor areas */
1580 dma_free_coherent(&of_dev->dev, 1024, greth->rx_bd_base, greth->rx_bd_base_phys);
1582 dma_free_coherent(&of_dev->dev, 1024, greth->tx_bd_base, greth->tx_bd_base_phys);
1584 dev_set_drvdata(&of_dev->dev, NULL);
1586 if (greth->phy)
1587 phy_stop(greth->phy);
1588 mdiobus_unregister(greth->mdio);
1590 unregister_netdev(ndev);
1591 free_netdev(ndev);
1593 of_iounmap(&of_dev->resource[0], greth->regs, resource_size(&of_dev->resource[0]));
1595 return 0;
1598 static struct of_device_id greth_of_match[] = {
1600 .name = "GAISLER_ETHMAC",
1605 MODULE_DEVICE_TABLE(of, greth_of_match);
1607 static struct of_platform_driver greth_of_driver = {
1608 .name = "grlib-greth",
1609 .match_table = greth_of_match,
1610 .probe = greth_of_probe,
1611 .remove = __devexit_p(greth_of_remove),
1612 .driver = {
1613 .owner = THIS_MODULE,
1614 .name = "grlib-greth",
1618 static int __init greth_init(void)
1620 return of_register_platform_driver(&greth_of_driver);
1623 static void __exit greth_cleanup(void)
1625 of_unregister_platform_driver(&greth_of_driver);
1628 module_init(greth_init);
1629 module_exit(greth_cleanup);
1631 MODULE_AUTHOR("Aeroflex Gaisler AB.");
1632 MODULE_DESCRIPTION("Aeroflex Gaisler Ethernet MAC driver");
1633 MODULE_LICENSE("GPL");