[SCSI] isci: Update MAINTAINERS entry for the isci driver
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / net / dnet.c
blobc1063d1540c2acd993e7d695803c64917cafc18d
1 /*
2 * Dave DNET Ethernet Controller driver
4 * Copyright (C) 2008 Dave S.r.l. <www.dave.eu>
5 * Copyright (C) 2009 Ilya Yanok, Emcraft Systems Ltd, <yanok@emcraft.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/io.h>
12 #include <linux/module.h>
13 #include <linux/moduleparam.h>
14 #include <linux/kernel.h>
15 #include <linux/types.h>
16 #include <linux/slab.h>
17 #include <linux/delay.h>
18 #include <linux/init.h>
19 #include <linux/interrupt.h>
20 #include <linux/netdevice.h>
21 #include <linux/etherdevice.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/platform_device.h>
24 #include <linux/phy.h>
26 #include "dnet.h"
28 #undef DEBUG
30 /* function for reading internal MAC register */
31 static u16 dnet_readw_mac(struct dnet *bp, u16 reg)
33 u16 data_read;
35 /* issue a read */
36 dnet_writel(bp, reg, MACREG_ADDR);
38 /* since a read/write op to the MAC is very slow,
39 * we must wait before reading the data */
40 ndelay(500);
42 /* read data read from the MAC register */
43 data_read = dnet_readl(bp, MACREG_DATA);
45 /* all done */
46 return data_read;
49 /* function for writing internal MAC register */
50 static void dnet_writew_mac(struct dnet *bp, u16 reg, u16 val)
52 /* load data to write */
53 dnet_writel(bp, val, MACREG_DATA);
55 /* issue a write */
56 dnet_writel(bp, reg | DNET_INTERNAL_WRITE, MACREG_ADDR);
58 /* since a read/write op to the MAC is very slow,
59 * we must wait before exiting */
60 ndelay(500);
63 static void __dnet_set_hwaddr(struct dnet *bp)
65 u16 tmp;
67 tmp = be16_to_cpup((__be16 *)bp->dev->dev_addr);
68 dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_0_REG, tmp);
69 tmp = be16_to_cpup((__be16 *)(bp->dev->dev_addr + 2));
70 dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_1_REG, tmp);
71 tmp = be16_to_cpup((__be16 *)(bp->dev->dev_addr + 4));
72 dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_2_REG, tmp);
75 static void __devinit dnet_get_hwaddr(struct dnet *bp)
77 u16 tmp;
78 u8 addr[6];
81 * from MAC docs:
82 * "Note that the MAC address is stored in the registers in Hexadecimal
83 * form. For example, to set the MAC Address to: AC-DE-48-00-00-80
84 * would require writing 0xAC (octet 0) to address 0x0B (high byte of
85 * Mac_addr[15:0]), 0xDE (octet 1) to address 0x0A (Low byte of
86 * Mac_addr[15:0]), 0x48 (octet 2) to address 0x0D (high byte of
87 * Mac_addr[15:0]), 0x00 (octet 3) to address 0x0C (Low byte of
88 * Mac_addr[15:0]), 0x00 (octet 4) to address 0x0F (high byte of
89 * Mac_addr[15:0]), and 0x80 (octet 5) to address * 0x0E (Low byte of
90 * Mac_addr[15:0]).
92 tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_0_REG);
93 *((__be16 *)addr) = cpu_to_be16(tmp);
94 tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_1_REG);
95 *((__be16 *)(addr + 2)) = cpu_to_be16(tmp);
96 tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_2_REG);
97 *((__be16 *)(addr + 4)) = cpu_to_be16(tmp);
99 if (is_valid_ether_addr(addr))
100 memcpy(bp->dev->dev_addr, addr, sizeof(addr));
103 static int dnet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
105 struct dnet *bp = bus->priv;
106 u16 value;
108 while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG)
109 & DNET_INTERNAL_GMII_MNG_CMD_FIN))
110 cpu_relax();
112 /* only 5 bits allowed for phy-addr and reg_offset */
113 mii_id &= 0x1f;
114 regnum &= 0x1f;
116 /* prepare reg_value for a read */
117 value = (mii_id << 8);
118 value |= regnum;
120 /* write control word */
121 dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG, value);
123 /* wait for end of transfer */
124 while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG)
125 & DNET_INTERNAL_GMII_MNG_CMD_FIN))
126 cpu_relax();
128 value = dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_DAT_REG);
130 pr_debug("mdio_read %02x:%02x <- %04x\n", mii_id, regnum, value);
132 return value;
135 static int dnet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
136 u16 value)
138 struct dnet *bp = bus->priv;
139 u16 tmp;
141 pr_debug("mdio_write %02x:%02x <- %04x\n", mii_id, regnum, value);
143 while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG)
144 & DNET_INTERNAL_GMII_MNG_CMD_FIN))
145 cpu_relax();
147 /* prepare for a write operation */
148 tmp = (1 << 13);
150 /* only 5 bits allowed for phy-addr and reg_offset */
151 mii_id &= 0x1f;
152 regnum &= 0x1f;
154 /* only 16 bits on data */
155 value &= 0xffff;
157 /* prepare reg_value for a write */
158 tmp |= (mii_id << 8);
159 tmp |= regnum;
161 /* write data to write first */
162 dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_DAT_REG, value);
164 /* write control word */
165 dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG, tmp);
167 while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG)
168 & DNET_INTERNAL_GMII_MNG_CMD_FIN))
169 cpu_relax();
171 return 0;
174 static int dnet_mdio_reset(struct mii_bus *bus)
176 return 0;
179 static void dnet_handle_link_change(struct net_device *dev)
181 struct dnet *bp = netdev_priv(dev);
182 struct phy_device *phydev = bp->phy_dev;
183 unsigned long flags;
184 u32 mode_reg, ctl_reg;
186 int status_change = 0;
188 spin_lock_irqsave(&bp->lock, flags);
190 mode_reg = dnet_readw_mac(bp, DNET_INTERNAL_MODE_REG);
191 ctl_reg = dnet_readw_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG);
193 if (phydev->link) {
194 if (bp->duplex != phydev->duplex) {
195 if (phydev->duplex)
196 ctl_reg &=
197 ~(DNET_INTERNAL_RXTX_CONTROL_ENABLEHALFDUP);
198 else
199 ctl_reg |=
200 DNET_INTERNAL_RXTX_CONTROL_ENABLEHALFDUP;
202 bp->duplex = phydev->duplex;
203 status_change = 1;
206 if (bp->speed != phydev->speed) {
207 status_change = 1;
208 switch (phydev->speed) {
209 case 1000:
210 mode_reg |= DNET_INTERNAL_MODE_GBITEN;
211 break;
212 case 100:
213 case 10:
214 mode_reg &= ~DNET_INTERNAL_MODE_GBITEN;
215 break;
216 default:
217 printk(KERN_WARNING
218 "%s: Ack! Speed (%d) is not "
219 "10/100/1000!\n", dev->name,
220 phydev->speed);
221 break;
223 bp->speed = phydev->speed;
227 if (phydev->link != bp->link) {
228 if (phydev->link) {
229 mode_reg |=
230 (DNET_INTERNAL_MODE_RXEN | DNET_INTERNAL_MODE_TXEN);
231 } else {
232 mode_reg &=
233 ~(DNET_INTERNAL_MODE_RXEN |
234 DNET_INTERNAL_MODE_TXEN);
235 bp->speed = 0;
236 bp->duplex = -1;
238 bp->link = phydev->link;
240 status_change = 1;
243 if (status_change) {
244 dnet_writew_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG, ctl_reg);
245 dnet_writew_mac(bp, DNET_INTERNAL_MODE_REG, mode_reg);
248 spin_unlock_irqrestore(&bp->lock, flags);
250 if (status_change) {
251 if (phydev->link)
252 printk(KERN_INFO "%s: link up (%d/%s)\n",
253 dev->name, phydev->speed,
254 DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
255 else
256 printk(KERN_INFO "%s: link down\n", dev->name);
260 static int dnet_mii_probe(struct net_device *dev)
262 struct dnet *bp = netdev_priv(dev);
263 struct phy_device *phydev = NULL;
264 int phy_addr;
266 /* find the first phy */
267 for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
268 if (bp->mii_bus->phy_map[phy_addr]) {
269 phydev = bp->mii_bus->phy_map[phy_addr];
270 break;
274 if (!phydev) {
275 printk(KERN_ERR "%s: no PHY found\n", dev->name);
276 return -ENODEV;
279 /* TODO : add pin_irq */
281 /* attach the mac to the phy */
282 if (bp->capabilities & DNET_HAS_RMII) {
283 phydev = phy_connect(dev, dev_name(&phydev->dev),
284 &dnet_handle_link_change, 0,
285 PHY_INTERFACE_MODE_RMII);
286 } else {
287 phydev = phy_connect(dev, dev_name(&phydev->dev),
288 &dnet_handle_link_change, 0,
289 PHY_INTERFACE_MODE_MII);
292 if (IS_ERR(phydev)) {
293 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
294 return PTR_ERR(phydev);
297 /* mask with MAC supported features */
298 if (bp->capabilities & DNET_HAS_GIGABIT)
299 phydev->supported &= PHY_GBIT_FEATURES;
300 else
301 phydev->supported &= PHY_BASIC_FEATURES;
303 phydev->supported |= SUPPORTED_Asym_Pause | SUPPORTED_Pause;
305 phydev->advertising = phydev->supported;
307 bp->link = 0;
308 bp->speed = 0;
309 bp->duplex = -1;
310 bp->phy_dev = phydev;
312 return 0;
315 static int dnet_mii_init(struct dnet *bp)
317 int err, i;
319 bp->mii_bus = mdiobus_alloc();
320 if (bp->mii_bus == NULL)
321 return -ENOMEM;
323 bp->mii_bus->name = "dnet_mii_bus";
324 bp->mii_bus->read = &dnet_mdio_read;
325 bp->mii_bus->write = &dnet_mdio_write;
326 bp->mii_bus->reset = &dnet_mdio_reset;
328 snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%x", 0);
330 bp->mii_bus->priv = bp;
332 bp->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
333 if (!bp->mii_bus->irq) {
334 err = -ENOMEM;
335 goto err_out;
338 for (i = 0; i < PHY_MAX_ADDR; i++)
339 bp->mii_bus->irq[i] = PHY_POLL;
341 if (mdiobus_register(bp->mii_bus)) {
342 err = -ENXIO;
343 goto err_out_free_mdio_irq;
346 if (dnet_mii_probe(bp->dev) != 0) {
347 err = -ENXIO;
348 goto err_out_unregister_bus;
351 return 0;
353 err_out_unregister_bus:
354 mdiobus_unregister(bp->mii_bus);
355 err_out_free_mdio_irq:
356 kfree(bp->mii_bus->irq);
357 err_out:
358 mdiobus_free(bp->mii_bus);
359 return err;
362 /* For Neptune board: LINK1000 as Link LED and TX as activity LED */
363 static int dnet_phy_marvell_fixup(struct phy_device *phydev)
365 return phy_write(phydev, 0x18, 0x4148);
368 static void dnet_update_stats(struct dnet *bp)
370 u32 __iomem *reg = bp->regs + DNET_RX_PKT_IGNR_CNT;
371 u32 *p = &bp->hw_stats.rx_pkt_ignr;
372 u32 *end = &bp->hw_stats.rx_byte + 1;
374 WARN_ON((unsigned long)(end - p - 1) !=
375 (DNET_RX_BYTE_CNT - DNET_RX_PKT_IGNR_CNT) / 4);
377 for (; p < end; p++, reg++)
378 *p += readl(reg);
380 reg = bp->regs + DNET_TX_UNICAST_CNT;
381 p = &bp->hw_stats.tx_unicast;
382 end = &bp->hw_stats.tx_byte + 1;
384 WARN_ON((unsigned long)(end - p - 1) !=
385 (DNET_TX_BYTE_CNT - DNET_TX_UNICAST_CNT) / 4);
387 for (; p < end; p++, reg++)
388 *p += readl(reg);
391 static int dnet_poll(struct napi_struct *napi, int budget)
393 struct dnet *bp = container_of(napi, struct dnet, napi);
394 struct net_device *dev = bp->dev;
395 int npackets = 0;
396 unsigned int pkt_len;
397 struct sk_buff *skb;
398 unsigned int *data_ptr;
399 u32 int_enable;
400 u32 cmd_word;
401 int i;
403 while (npackets < budget) {
405 * break out of while loop if there are no more
406 * packets waiting
408 if (!(dnet_readl(bp, RX_FIFO_WCNT) >> 16)) {
409 napi_complete(napi);
410 int_enable = dnet_readl(bp, INTR_ENB);
411 int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF;
412 dnet_writel(bp, int_enable, INTR_ENB);
413 return 0;
416 cmd_word = dnet_readl(bp, RX_LEN_FIFO);
417 pkt_len = cmd_word & 0xFFFF;
419 if (cmd_word & 0xDF180000)
420 printk(KERN_ERR "%s packet receive error %x\n",
421 __func__, cmd_word);
423 skb = dev_alloc_skb(pkt_len + 5);
424 if (skb != NULL) {
425 /* Align IP on 16 byte boundaries */
426 skb_reserve(skb, 2);
428 * 'skb_put()' points to the start of sk_buff
429 * data area.
431 data_ptr = (unsigned int *)skb_put(skb, pkt_len);
432 for (i = 0; i < (pkt_len + 3) >> 2; i++)
433 *data_ptr++ = dnet_readl(bp, RX_DATA_FIFO);
434 skb->protocol = eth_type_trans(skb, dev);
435 netif_receive_skb(skb);
436 npackets++;
437 } else
438 printk(KERN_NOTICE
439 "%s: No memory to allocate a sk_buff of "
440 "size %u.\n", dev->name, pkt_len);
443 budget -= npackets;
445 if (npackets < budget) {
446 /* We processed all packets available. Tell NAPI it can
447 * stop polling then re-enable rx interrupts */
448 napi_complete(napi);
449 int_enable = dnet_readl(bp, INTR_ENB);
450 int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF;
451 dnet_writel(bp, int_enable, INTR_ENB);
452 return 0;
455 /* There are still packets waiting */
456 return 1;
459 static irqreturn_t dnet_interrupt(int irq, void *dev_id)
461 struct net_device *dev = dev_id;
462 struct dnet *bp = netdev_priv(dev);
463 u32 int_src, int_enable, int_current;
464 unsigned long flags;
465 unsigned int handled = 0;
467 spin_lock_irqsave(&bp->lock, flags);
469 /* read and clear the DNET irq (clear on read) */
470 int_src = dnet_readl(bp, INTR_SRC);
471 int_enable = dnet_readl(bp, INTR_ENB);
472 int_current = int_src & int_enable;
474 /* restart the queue if we had stopped it for TX fifo almost full */
475 if (int_current & DNET_INTR_SRC_TX_FIFOAE) {
476 int_enable = dnet_readl(bp, INTR_ENB);
477 int_enable &= ~DNET_INTR_ENB_TX_FIFOAE;
478 dnet_writel(bp, int_enable, INTR_ENB);
479 netif_wake_queue(dev);
480 handled = 1;
483 /* RX FIFO error checking */
484 if (int_current &
485 (DNET_INTR_SRC_RX_CMDFIFOFF | DNET_INTR_SRC_RX_DATAFIFOFF)) {
486 printk(KERN_ERR "%s: RX fifo error %x, irq %x\n", __func__,
487 dnet_readl(bp, RX_STATUS), int_current);
488 /* we can only flush the RX FIFOs */
489 dnet_writel(bp, DNET_SYS_CTL_RXFIFOFLUSH, SYS_CTL);
490 ndelay(500);
491 dnet_writel(bp, 0, SYS_CTL);
492 handled = 1;
495 /* TX FIFO error checking */
496 if (int_current &
497 (DNET_INTR_SRC_TX_FIFOFULL | DNET_INTR_SRC_TX_DISCFRM)) {
498 printk(KERN_ERR "%s: TX fifo error %x, irq %x\n", __func__,
499 dnet_readl(bp, TX_STATUS), int_current);
500 /* we can only flush the TX FIFOs */
501 dnet_writel(bp, DNET_SYS_CTL_TXFIFOFLUSH, SYS_CTL);
502 ndelay(500);
503 dnet_writel(bp, 0, SYS_CTL);
504 handled = 1;
507 if (int_current & DNET_INTR_SRC_RX_CMDFIFOAF) {
508 if (napi_schedule_prep(&bp->napi)) {
510 * There's no point taking any more interrupts
511 * until we have processed the buffers
513 /* Disable Rx interrupts and schedule NAPI poll */
514 int_enable = dnet_readl(bp, INTR_ENB);
515 int_enable &= ~DNET_INTR_SRC_RX_CMDFIFOAF;
516 dnet_writel(bp, int_enable, INTR_ENB);
517 __napi_schedule(&bp->napi);
519 handled = 1;
522 if (!handled)
523 pr_debug("%s: irq %x remains\n", __func__, int_current);
525 spin_unlock_irqrestore(&bp->lock, flags);
527 return IRQ_RETVAL(handled);
530 #ifdef DEBUG
531 static inline void dnet_print_skb(struct sk_buff *skb)
533 int k;
534 printk(KERN_DEBUG PFX "data:");
535 for (k = 0; k < skb->len; k++)
536 printk(" %02x", (unsigned int)skb->data[k]);
537 printk("\n");
539 #else
540 #define dnet_print_skb(skb) do {} while (0)
541 #endif
543 static netdev_tx_t dnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
546 struct dnet *bp = netdev_priv(dev);
547 u32 tx_status, irq_enable;
548 unsigned int len, i, tx_cmd, wrsz;
549 unsigned long flags;
550 unsigned int *bufp;
552 tx_status = dnet_readl(bp, TX_STATUS);
554 pr_debug("start_xmit: len %u head %p data %p\n",
555 skb->len, skb->head, skb->data);
556 dnet_print_skb(skb);
558 /* frame size (words) */
559 len = (skb->len + 3) >> 2;
561 spin_lock_irqsave(&bp->lock, flags);
563 tx_status = dnet_readl(bp, TX_STATUS);
565 bufp = (unsigned int *)(((unsigned long) skb->data) & ~0x3UL);
566 wrsz = (u32) skb->len + 3;
567 wrsz += ((unsigned long) skb->data) & 0x3;
568 wrsz >>= 2;
569 tx_cmd = ((((unsigned long)(skb->data)) & 0x03) << 16) | (u32) skb->len;
571 /* check if there is enough room for the current frame */
572 if (wrsz < (DNET_FIFO_SIZE - dnet_readl(bp, TX_FIFO_WCNT))) {
573 for (i = 0; i < wrsz; i++)
574 dnet_writel(bp, *bufp++, TX_DATA_FIFO);
577 * inform MAC that a packet's written and ready to be
578 * shipped out
580 dnet_writel(bp, tx_cmd, TX_LEN_FIFO);
583 if (dnet_readl(bp, TX_FIFO_WCNT) > DNET_FIFO_TX_DATA_AF_TH) {
584 netif_stop_queue(dev);
585 tx_status = dnet_readl(bp, INTR_SRC);
586 irq_enable = dnet_readl(bp, INTR_ENB);
587 irq_enable |= DNET_INTR_ENB_TX_FIFOAE;
588 dnet_writel(bp, irq_enable, INTR_ENB);
591 skb_tx_timestamp(skb);
593 /* free the buffer */
594 dev_kfree_skb(skb);
596 spin_unlock_irqrestore(&bp->lock, flags);
598 return NETDEV_TX_OK;
601 static void dnet_reset_hw(struct dnet *bp)
603 /* put ts_mac in IDLE state i.e. disable rx/tx */
604 dnet_writew_mac(bp, DNET_INTERNAL_MODE_REG, DNET_INTERNAL_MODE_FCEN);
607 * RX FIFO almost full threshold: only cmd FIFO almost full is
608 * implemented for RX side
610 dnet_writel(bp, DNET_FIFO_RX_CMD_AF_TH, RX_FIFO_TH);
612 * TX FIFO almost empty threshold: only data FIFO almost empty
613 * is implemented for TX side
615 dnet_writel(bp, DNET_FIFO_TX_DATA_AE_TH, TX_FIFO_TH);
617 /* flush rx/tx fifos */
618 dnet_writel(bp, DNET_SYS_CTL_RXFIFOFLUSH | DNET_SYS_CTL_TXFIFOFLUSH,
619 SYS_CTL);
620 msleep(1);
621 dnet_writel(bp, 0, SYS_CTL);
624 static void dnet_init_hw(struct dnet *bp)
626 u32 config;
628 dnet_reset_hw(bp);
629 __dnet_set_hwaddr(bp);
631 config = dnet_readw_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG);
633 if (bp->dev->flags & IFF_PROMISC)
634 /* Copy All Frames */
635 config |= DNET_INTERNAL_RXTX_CONTROL_ENPROMISC;
636 if (!(bp->dev->flags & IFF_BROADCAST))
637 /* No BroadCast */
638 config |= DNET_INTERNAL_RXTX_CONTROL_RXMULTICAST;
640 config |= DNET_INTERNAL_RXTX_CONTROL_RXPAUSE |
641 DNET_INTERNAL_RXTX_CONTROL_RXBROADCAST |
642 DNET_INTERNAL_RXTX_CONTROL_DROPCONTROL |
643 DNET_INTERNAL_RXTX_CONTROL_DISCFXFCS;
645 dnet_writew_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG, config);
647 /* clear irq before enabling them */
648 config = dnet_readl(bp, INTR_SRC);
650 /* enable RX/TX interrupt, recv packet ready interrupt */
651 dnet_writel(bp, DNET_INTR_ENB_GLOBAL_ENABLE | DNET_INTR_ENB_RX_SUMMARY |
652 DNET_INTR_ENB_TX_SUMMARY | DNET_INTR_ENB_RX_FIFOERR |
653 DNET_INTR_ENB_RX_ERROR | DNET_INTR_ENB_RX_FIFOFULL |
654 DNET_INTR_ENB_TX_FIFOFULL | DNET_INTR_ENB_TX_DISCFRM |
655 DNET_INTR_ENB_RX_PKTRDY, INTR_ENB);
658 static int dnet_open(struct net_device *dev)
660 struct dnet *bp = netdev_priv(dev);
662 /* if the phy is not yet register, retry later */
663 if (!bp->phy_dev)
664 return -EAGAIN;
666 if (!is_valid_ether_addr(dev->dev_addr))
667 return -EADDRNOTAVAIL;
669 napi_enable(&bp->napi);
670 dnet_init_hw(bp);
672 phy_start_aneg(bp->phy_dev);
674 /* schedule a link state check */
675 phy_start(bp->phy_dev);
677 netif_start_queue(dev);
679 return 0;
682 static int dnet_close(struct net_device *dev)
684 struct dnet *bp = netdev_priv(dev);
686 netif_stop_queue(dev);
687 napi_disable(&bp->napi);
689 if (bp->phy_dev)
690 phy_stop(bp->phy_dev);
692 dnet_reset_hw(bp);
693 netif_carrier_off(dev);
695 return 0;
698 static inline void dnet_print_pretty_hwstats(struct dnet_stats *hwstat)
700 pr_debug("%s\n", __func__);
701 pr_debug("----------------------------- RX statistics "
702 "-------------------------------\n");
703 pr_debug("RX_PKT_IGNR_CNT %-8x\n", hwstat->rx_pkt_ignr);
704 pr_debug("RX_LEN_CHK_ERR_CNT %-8x\n", hwstat->rx_len_chk_err);
705 pr_debug("RX_LNG_FRM_CNT %-8x\n", hwstat->rx_lng_frm);
706 pr_debug("RX_SHRT_FRM_CNT %-8x\n", hwstat->rx_shrt_frm);
707 pr_debug("RX_IPG_VIOL_CNT %-8x\n", hwstat->rx_ipg_viol);
708 pr_debug("RX_CRC_ERR_CNT %-8x\n", hwstat->rx_crc_err);
709 pr_debug("RX_OK_PKT_CNT %-8x\n", hwstat->rx_ok_pkt);
710 pr_debug("RX_CTL_FRM_CNT %-8x\n", hwstat->rx_ctl_frm);
711 pr_debug("RX_PAUSE_FRM_CNT %-8x\n", hwstat->rx_pause_frm);
712 pr_debug("RX_MULTICAST_CNT %-8x\n", hwstat->rx_multicast);
713 pr_debug("RX_BROADCAST_CNT %-8x\n", hwstat->rx_broadcast);
714 pr_debug("RX_VLAN_TAG_CNT %-8x\n", hwstat->rx_vlan_tag);
715 pr_debug("RX_PRE_SHRINK_CNT %-8x\n", hwstat->rx_pre_shrink);
716 pr_debug("RX_DRIB_NIB_CNT %-8x\n", hwstat->rx_drib_nib);
717 pr_debug("RX_UNSUP_OPCD_CNT %-8x\n", hwstat->rx_unsup_opcd);
718 pr_debug("RX_BYTE_CNT %-8x\n", hwstat->rx_byte);
719 pr_debug("----------------------------- TX statistics "
720 "-------------------------------\n");
721 pr_debug("TX_UNICAST_CNT %-8x\n", hwstat->tx_unicast);
722 pr_debug("TX_PAUSE_FRM_CNT %-8x\n", hwstat->tx_pause_frm);
723 pr_debug("TX_MULTICAST_CNT %-8x\n", hwstat->tx_multicast);
724 pr_debug("TX_BRDCAST_CNT %-8x\n", hwstat->tx_brdcast);
725 pr_debug("TX_VLAN_TAG_CNT %-8x\n", hwstat->tx_vlan_tag);
726 pr_debug("TX_BAD_FCS_CNT %-8x\n", hwstat->tx_bad_fcs);
727 pr_debug("TX_JUMBO_CNT %-8x\n", hwstat->tx_jumbo);
728 pr_debug("TX_BYTE_CNT %-8x\n", hwstat->tx_byte);
731 static struct net_device_stats *dnet_get_stats(struct net_device *dev)
734 struct dnet *bp = netdev_priv(dev);
735 struct net_device_stats *nstat = &dev->stats;
736 struct dnet_stats *hwstat = &bp->hw_stats;
738 /* read stats from hardware */
739 dnet_update_stats(bp);
741 /* Convert HW stats into netdevice stats */
742 nstat->rx_errors = (hwstat->rx_len_chk_err +
743 hwstat->rx_lng_frm + hwstat->rx_shrt_frm +
744 /* ignore IGP violation error
745 hwstat->rx_ipg_viol + */
746 hwstat->rx_crc_err +
747 hwstat->rx_pre_shrink +
748 hwstat->rx_drib_nib + hwstat->rx_unsup_opcd);
749 nstat->tx_errors = hwstat->tx_bad_fcs;
750 nstat->rx_length_errors = (hwstat->rx_len_chk_err +
751 hwstat->rx_lng_frm +
752 hwstat->rx_shrt_frm + hwstat->rx_pre_shrink);
753 nstat->rx_crc_errors = hwstat->rx_crc_err;
754 nstat->rx_frame_errors = hwstat->rx_pre_shrink + hwstat->rx_drib_nib;
755 nstat->rx_packets = hwstat->rx_ok_pkt;
756 nstat->tx_packets = (hwstat->tx_unicast +
757 hwstat->tx_multicast + hwstat->tx_brdcast);
758 nstat->rx_bytes = hwstat->rx_byte;
759 nstat->tx_bytes = hwstat->tx_byte;
760 nstat->multicast = hwstat->rx_multicast;
761 nstat->rx_missed_errors = hwstat->rx_pkt_ignr;
763 dnet_print_pretty_hwstats(hwstat);
765 return nstat;
768 static int dnet_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
770 struct dnet *bp = netdev_priv(dev);
771 struct phy_device *phydev = bp->phy_dev;
773 if (!phydev)
774 return -ENODEV;
776 return phy_ethtool_gset(phydev, cmd);
779 static int dnet_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
781 struct dnet *bp = netdev_priv(dev);
782 struct phy_device *phydev = bp->phy_dev;
784 if (!phydev)
785 return -ENODEV;
787 return phy_ethtool_sset(phydev, cmd);
790 static int dnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
792 struct dnet *bp = netdev_priv(dev);
793 struct phy_device *phydev = bp->phy_dev;
795 if (!netif_running(dev))
796 return -EINVAL;
798 if (!phydev)
799 return -ENODEV;
801 return phy_mii_ioctl(phydev, rq, cmd);
804 static void dnet_get_drvinfo(struct net_device *dev,
805 struct ethtool_drvinfo *info)
807 strcpy(info->driver, DRV_NAME);
808 strcpy(info->version, DRV_VERSION);
809 strcpy(info->bus_info, "0");
812 static const struct ethtool_ops dnet_ethtool_ops = {
813 .get_settings = dnet_get_settings,
814 .set_settings = dnet_set_settings,
815 .get_drvinfo = dnet_get_drvinfo,
816 .get_link = ethtool_op_get_link,
819 static const struct net_device_ops dnet_netdev_ops = {
820 .ndo_open = dnet_open,
821 .ndo_stop = dnet_close,
822 .ndo_get_stats = dnet_get_stats,
823 .ndo_start_xmit = dnet_start_xmit,
824 .ndo_do_ioctl = dnet_ioctl,
825 .ndo_set_mac_address = eth_mac_addr,
826 .ndo_validate_addr = eth_validate_addr,
827 .ndo_change_mtu = eth_change_mtu,
830 static int __devinit dnet_probe(struct platform_device *pdev)
832 struct resource *res;
833 struct net_device *dev;
834 struct dnet *bp;
835 struct phy_device *phydev;
836 int err = -ENXIO;
837 unsigned int mem_base, mem_size, irq;
839 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
840 if (!res) {
841 dev_err(&pdev->dev, "no mmio resource defined\n");
842 goto err_out;
844 mem_base = res->start;
845 mem_size = resource_size(res);
846 irq = platform_get_irq(pdev, 0);
848 if (!request_mem_region(mem_base, mem_size, DRV_NAME)) {
849 dev_err(&pdev->dev, "no memory region available\n");
850 err = -EBUSY;
851 goto err_out;
854 err = -ENOMEM;
855 dev = alloc_etherdev(sizeof(*bp));
856 if (!dev) {
857 dev_err(&pdev->dev, "etherdev alloc failed, aborting.\n");
858 goto err_out_release_mem;
861 /* TODO: Actually, we have some interesting features... */
862 dev->features |= 0;
864 bp = netdev_priv(dev);
865 bp->dev = dev;
867 platform_set_drvdata(pdev, dev);
868 SET_NETDEV_DEV(dev, &pdev->dev);
870 spin_lock_init(&bp->lock);
872 bp->regs = ioremap(mem_base, mem_size);
873 if (!bp->regs) {
874 dev_err(&pdev->dev, "failed to map registers, aborting.\n");
875 err = -ENOMEM;
876 goto err_out_free_dev;
879 dev->irq = irq;
880 err = request_irq(dev->irq, dnet_interrupt, 0, DRV_NAME, dev);
881 if (err) {
882 dev_err(&pdev->dev, "Unable to request IRQ %d (error %d)\n",
883 irq, err);
884 goto err_out_iounmap;
887 dev->netdev_ops = &dnet_netdev_ops;
888 netif_napi_add(dev, &bp->napi, dnet_poll, 64);
889 dev->ethtool_ops = &dnet_ethtool_ops;
891 dev->base_addr = (unsigned long)bp->regs;
893 bp->capabilities = dnet_readl(bp, VERCAPS) & DNET_CAPS_MASK;
895 dnet_get_hwaddr(bp);
897 if (!is_valid_ether_addr(dev->dev_addr)) {
898 /* choose a random ethernet address */
899 random_ether_addr(dev->dev_addr);
900 __dnet_set_hwaddr(bp);
903 err = register_netdev(dev);
904 if (err) {
905 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
906 goto err_out_free_irq;
909 /* register the PHY board fixup (for Marvell 88E1111) */
910 err = phy_register_fixup_for_uid(0x01410cc0, 0xfffffff0,
911 dnet_phy_marvell_fixup);
912 /* we can live without it, so just issue a warning */
913 if (err)
914 dev_warn(&pdev->dev, "Cannot register PHY board fixup.\n");
916 err = dnet_mii_init(bp);
917 if (err)
918 goto err_out_unregister_netdev;
920 dev_info(&pdev->dev, "Dave DNET at 0x%p (0x%08x) irq %d %pM\n",
921 bp->regs, mem_base, dev->irq, dev->dev_addr);
922 dev_info(&pdev->dev, "has %smdio, %sirq, %sgigabit, %sdma\n",
923 (bp->capabilities & DNET_HAS_MDIO) ? "" : "no ",
924 (bp->capabilities & DNET_HAS_IRQ) ? "" : "no ",
925 (bp->capabilities & DNET_HAS_GIGABIT) ? "" : "no ",
926 (bp->capabilities & DNET_HAS_DMA) ? "" : "no ");
927 phydev = bp->phy_dev;
928 dev_info(&pdev->dev, "attached PHY driver [%s] "
929 "(mii_bus:phy_addr=%s, irq=%d)\n",
930 phydev->drv->name, dev_name(&phydev->dev), phydev->irq);
932 return 0;
934 err_out_unregister_netdev:
935 unregister_netdev(dev);
936 err_out_free_irq:
937 free_irq(dev->irq, dev);
938 err_out_iounmap:
939 iounmap(bp->regs);
940 err_out_free_dev:
941 free_netdev(dev);
942 err_out_release_mem:
943 release_mem_region(mem_base, mem_size);
944 err_out:
945 return err;
948 static int __devexit dnet_remove(struct platform_device *pdev)
951 struct net_device *dev;
952 struct dnet *bp;
954 dev = platform_get_drvdata(pdev);
956 if (dev) {
957 bp = netdev_priv(dev);
958 if (bp->phy_dev)
959 phy_disconnect(bp->phy_dev);
960 mdiobus_unregister(bp->mii_bus);
961 kfree(bp->mii_bus->irq);
962 mdiobus_free(bp->mii_bus);
963 unregister_netdev(dev);
964 free_irq(dev->irq, dev);
965 iounmap(bp->regs);
966 free_netdev(dev);
969 return 0;
972 static struct platform_driver dnet_driver = {
973 .probe = dnet_probe,
974 .remove = __devexit_p(dnet_remove),
975 .driver = {
976 .name = "dnet",
980 static int __init dnet_init(void)
982 return platform_driver_register(&dnet_driver);
985 static void __exit dnet_exit(void)
987 platform_driver_unregister(&dnet_driver);
990 module_init(dnet_init);
991 module_exit(dnet_exit);
993 MODULE_LICENSE("GPL");
994 MODULE_DESCRIPTION("Dave DNET Ethernet driver");
995 MODULE_AUTHOR("Ilya Yanok <yanok@emcraft.com>, "
996 "Matteo Vit <matteo.vit@dave.eu>");