NFSv4: Fix a bug when the server returns NFS4ERR_RESOURCE
[linux-2.6/kvm.git] / drivers / net / dm9000.c
blob31b8bef49d2e1a15323634f29b72b8453815b0e8
1 /*
2 * Davicom DM9000 Fast Ethernet driver for Linux.
3 * Copyright (C) 1997 Sten Wang
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * (C) Copyright 1997-1998 DAVICOM Semiconductor,Inc. All Rights Reserved.
17 * Additional updates, Copyright:
18 * Ben Dooks <ben@simtec.co.uk>
19 * Sascha Hauer <s.hauer@pengutronix.de>
22 #include <linux/module.h>
23 #include <linux/ioport.h>
24 #include <linux/netdevice.h>
25 #include <linux/etherdevice.h>
26 #include <linux/init.h>
27 #include <linux/skbuff.h>
28 #include <linux/spinlock.h>
29 #include <linux/crc32.h>
30 #include <linux/mii.h>
31 #include <linux/ethtool.h>
32 #include <linux/dm9000.h>
33 #include <linux/delay.h>
34 #include <linux/platform_device.h>
35 #include <linux/irq.h>
37 #include <asm/delay.h>
38 #include <asm/irq.h>
39 #include <asm/io.h>
41 #include "dm9000.h"
43 /* Board/System/Debug information/definition ---------------- */
45 #define DM9000_PHY 0x40 /* PHY address 0x01 */
47 #define CARDNAME "dm9000"
48 #define DRV_VERSION "1.31"
51 * Transmit timeout, default 5 seconds.
53 static int watchdog = 5000;
54 module_param(watchdog, int, 0400);
55 MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds");
57 /* DM9000 register address locking.
59 * The DM9000 uses an address register to control where data written
60 * to the data register goes. This means that the address register
61 * must be preserved over interrupts or similar calls.
63 * During interrupt and other critical calls, a spinlock is used to
64 * protect the system, but the calls themselves save the address
65 * in the address register in case they are interrupting another
66 * access to the device.
68 * For general accesses a lock is provided so that calls which are
69 * allowed to sleep are serialised so that the address register does
70 * not need to be saved. This lock also serves to serialise access
71 * to the EEPROM and PHY access registers which are shared between
72 * these two devices.
75 /* The driver supports the original DM9000E, and now the two newer
76 * devices, DM9000A and DM9000B.
79 enum dm9000_type {
80 TYPE_DM9000E, /* original DM9000 */
81 TYPE_DM9000A,
82 TYPE_DM9000B
85 /* Structure/enum declaration ------------------------------- */
86 typedef struct board_info {
88 void __iomem *io_addr; /* Register I/O base address */
89 void __iomem *io_data; /* Data I/O address */
90 u16 irq; /* IRQ */
92 u16 tx_pkt_cnt;
93 u16 queue_pkt_len;
94 u16 queue_start_addr;
95 u16 queue_ip_summed;
96 u16 dbug_cnt;
97 u8 io_mode; /* 0:word, 2:byte */
98 u8 phy_addr;
99 u8 imr_all;
101 unsigned int flags;
102 unsigned int in_suspend :1;
103 int debug_level;
105 enum dm9000_type type;
107 void (*inblk)(void __iomem *port, void *data, int length);
108 void (*outblk)(void __iomem *port, void *data, int length);
109 void (*dumpblk)(void __iomem *port, int length);
111 struct device *dev; /* parent device */
113 struct resource *addr_res; /* resources found */
114 struct resource *data_res;
115 struct resource *addr_req; /* resources requested */
116 struct resource *data_req;
117 struct resource *irq_res;
119 struct mutex addr_lock; /* phy and eeprom access lock */
121 struct delayed_work phy_poll;
122 struct net_device *ndev;
124 spinlock_t lock;
126 struct mii_if_info mii;
127 u32 msg_enable;
129 int rx_csum;
130 int can_csum;
131 int ip_summed;
132 } board_info_t;
134 /* debug code */
136 #define dm9000_dbg(db, lev, msg...) do { \
137 if ((lev) < CONFIG_DM9000_DEBUGLEVEL && \
138 (lev) < db->debug_level) { \
139 dev_dbg(db->dev, msg); \
141 } while (0)
143 static inline board_info_t *to_dm9000_board(struct net_device *dev)
145 return netdev_priv(dev);
148 /* DM9000 network board routine ---------------------------- */
150 static void
151 dm9000_reset(board_info_t * db)
153 dev_dbg(db->dev, "resetting device\n");
155 /* RESET device */
156 writeb(DM9000_NCR, db->io_addr);
157 udelay(200);
158 writeb(NCR_RST, db->io_data);
159 udelay(200);
163 * Read a byte from I/O port
165 static u8
166 ior(board_info_t * db, int reg)
168 writeb(reg, db->io_addr);
169 return readb(db->io_data);
173 * Write a byte to I/O port
176 static void
177 iow(board_info_t * db, int reg, int value)
179 writeb(reg, db->io_addr);
180 writeb(value, db->io_data);
183 /* routines for sending block to chip */
185 static void dm9000_outblk_8bit(void __iomem *reg, void *data, int count)
187 writesb(reg, data, count);
190 static void dm9000_outblk_16bit(void __iomem *reg, void *data, int count)
192 writesw(reg, data, (count+1) >> 1);
195 static void dm9000_outblk_32bit(void __iomem *reg, void *data, int count)
197 writesl(reg, data, (count+3) >> 2);
200 /* input block from chip to memory */
202 static void dm9000_inblk_8bit(void __iomem *reg, void *data, int count)
204 readsb(reg, data, count);
208 static void dm9000_inblk_16bit(void __iomem *reg, void *data, int count)
210 readsw(reg, data, (count+1) >> 1);
213 static void dm9000_inblk_32bit(void __iomem *reg, void *data, int count)
215 readsl(reg, data, (count+3) >> 2);
218 /* dump block from chip to null */
220 static void dm9000_dumpblk_8bit(void __iomem *reg, int count)
222 int i;
223 int tmp;
225 for (i = 0; i < count; i++)
226 tmp = readb(reg);
229 static void dm9000_dumpblk_16bit(void __iomem *reg, int count)
231 int i;
232 int tmp;
234 count = (count + 1) >> 1;
236 for (i = 0; i < count; i++)
237 tmp = readw(reg);
240 static void dm9000_dumpblk_32bit(void __iomem *reg, int count)
242 int i;
243 int tmp;
245 count = (count + 3) >> 2;
247 for (i = 0; i < count; i++)
248 tmp = readl(reg);
251 /* dm9000_set_io
253 * select the specified set of io routines to use with the
254 * device
257 static void dm9000_set_io(struct board_info *db, int byte_width)
259 /* use the size of the data resource to work out what IO
260 * routines we want to use
263 switch (byte_width) {
264 case 1:
265 db->dumpblk = dm9000_dumpblk_8bit;
266 db->outblk = dm9000_outblk_8bit;
267 db->inblk = dm9000_inblk_8bit;
268 break;
271 case 3:
272 dev_dbg(db->dev, ": 3 byte IO, falling back to 16bit\n");
273 case 2:
274 db->dumpblk = dm9000_dumpblk_16bit;
275 db->outblk = dm9000_outblk_16bit;
276 db->inblk = dm9000_inblk_16bit;
277 break;
279 case 4:
280 default:
281 db->dumpblk = dm9000_dumpblk_32bit;
282 db->outblk = dm9000_outblk_32bit;
283 db->inblk = dm9000_inblk_32bit;
284 break;
288 static void dm9000_schedule_poll(board_info_t *db)
290 if (db->type == TYPE_DM9000E)
291 schedule_delayed_work(&db->phy_poll, HZ * 2);
294 static int dm9000_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
296 board_info_t *dm = to_dm9000_board(dev);
298 if (!netif_running(dev))
299 return -EINVAL;
301 return generic_mii_ioctl(&dm->mii, if_mii(req), cmd, NULL);
304 static unsigned int
305 dm9000_read_locked(board_info_t *db, int reg)
307 unsigned long flags;
308 unsigned int ret;
310 spin_lock_irqsave(&db->lock, flags);
311 ret = ior(db, reg);
312 spin_unlock_irqrestore(&db->lock, flags);
314 return ret;
317 static int dm9000_wait_eeprom(board_info_t *db)
319 unsigned int status;
320 int timeout = 8; /* wait max 8msec */
322 /* The DM9000 data sheets say we should be able to
323 * poll the ERRE bit in EPCR to wait for the EEPROM
324 * operation. From testing several chips, this bit
325 * does not seem to work.
327 * We attempt to use the bit, but fall back to the
328 * timeout (which is why we do not return an error
329 * on expiry) to say that the EEPROM operation has
330 * completed.
333 while (1) {
334 status = dm9000_read_locked(db, DM9000_EPCR);
336 if ((status & EPCR_ERRE) == 0)
337 break;
339 msleep(1);
341 if (timeout-- < 0) {
342 dev_dbg(db->dev, "timeout waiting EEPROM\n");
343 break;
347 return 0;
351 * Read a word data from EEPROM
353 static void
354 dm9000_read_eeprom(board_info_t *db, int offset, u8 *to)
356 unsigned long flags;
358 if (db->flags & DM9000_PLATF_NO_EEPROM) {
359 to[0] = 0xff;
360 to[1] = 0xff;
361 return;
364 mutex_lock(&db->addr_lock);
366 spin_lock_irqsave(&db->lock, flags);
368 iow(db, DM9000_EPAR, offset);
369 iow(db, DM9000_EPCR, EPCR_ERPRR);
371 spin_unlock_irqrestore(&db->lock, flags);
373 dm9000_wait_eeprom(db);
375 /* delay for at-least 150uS */
376 msleep(1);
378 spin_lock_irqsave(&db->lock, flags);
380 iow(db, DM9000_EPCR, 0x0);
382 to[0] = ior(db, DM9000_EPDRL);
383 to[1] = ior(db, DM9000_EPDRH);
385 spin_unlock_irqrestore(&db->lock, flags);
387 mutex_unlock(&db->addr_lock);
391 * Write a word data to SROM
393 static void
394 dm9000_write_eeprom(board_info_t *db, int offset, u8 *data)
396 unsigned long flags;
398 if (db->flags & DM9000_PLATF_NO_EEPROM)
399 return;
401 mutex_lock(&db->addr_lock);
403 spin_lock_irqsave(&db->lock, flags);
404 iow(db, DM9000_EPAR, offset);
405 iow(db, DM9000_EPDRH, data[1]);
406 iow(db, DM9000_EPDRL, data[0]);
407 iow(db, DM9000_EPCR, EPCR_WEP | EPCR_ERPRW);
408 spin_unlock_irqrestore(&db->lock, flags);
410 dm9000_wait_eeprom(db);
412 mdelay(1); /* wait at least 150uS to clear */
414 spin_lock_irqsave(&db->lock, flags);
415 iow(db, DM9000_EPCR, 0);
416 spin_unlock_irqrestore(&db->lock, flags);
418 mutex_unlock(&db->addr_lock);
421 /* ethtool ops */
423 static void dm9000_get_drvinfo(struct net_device *dev,
424 struct ethtool_drvinfo *info)
426 board_info_t *dm = to_dm9000_board(dev);
428 strcpy(info->driver, CARDNAME);
429 strcpy(info->version, DRV_VERSION);
430 strcpy(info->bus_info, to_platform_device(dm->dev)->name);
433 static u32 dm9000_get_msglevel(struct net_device *dev)
435 board_info_t *dm = to_dm9000_board(dev);
437 return dm->msg_enable;
440 static void dm9000_set_msglevel(struct net_device *dev, u32 value)
442 board_info_t *dm = to_dm9000_board(dev);
444 dm->msg_enable = value;
447 static int dm9000_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
449 board_info_t *dm = to_dm9000_board(dev);
451 mii_ethtool_gset(&dm->mii, cmd);
452 return 0;
455 static int dm9000_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
457 board_info_t *dm = to_dm9000_board(dev);
459 return mii_ethtool_sset(&dm->mii, cmd);
462 static int dm9000_nway_reset(struct net_device *dev)
464 board_info_t *dm = to_dm9000_board(dev);
465 return mii_nway_restart(&dm->mii);
468 static uint32_t dm9000_get_rx_csum(struct net_device *dev)
470 board_info_t *dm = to_dm9000_board(dev);
471 return dm->rx_csum;
474 static int dm9000_set_rx_csum(struct net_device *dev, uint32_t data)
476 board_info_t *dm = to_dm9000_board(dev);
477 unsigned long flags;
479 if (dm->can_csum) {
480 dm->rx_csum = data;
482 spin_lock_irqsave(&dm->lock, flags);
483 iow(dm, DM9000_RCSR, dm->rx_csum ? RCSR_CSUM : 0);
484 spin_unlock_irqrestore(&dm->lock, flags);
486 return 0;
489 return -EOPNOTSUPP;
492 static int dm9000_set_tx_csum(struct net_device *dev, uint32_t data)
494 board_info_t *dm = to_dm9000_board(dev);
495 int ret = -EOPNOTSUPP;
497 if (dm->can_csum)
498 ret = ethtool_op_set_tx_csum(dev, data);
499 return ret;
502 static u32 dm9000_get_link(struct net_device *dev)
504 board_info_t *dm = to_dm9000_board(dev);
505 u32 ret;
507 if (dm->flags & DM9000_PLATF_EXT_PHY)
508 ret = mii_link_ok(&dm->mii);
509 else
510 ret = dm9000_read_locked(dm, DM9000_NSR) & NSR_LINKST ? 1 : 0;
512 return ret;
515 #define DM_EEPROM_MAGIC (0x444D394B)
517 static int dm9000_get_eeprom_len(struct net_device *dev)
519 return 128;
522 static int dm9000_get_eeprom(struct net_device *dev,
523 struct ethtool_eeprom *ee, u8 *data)
525 board_info_t *dm = to_dm9000_board(dev);
526 int offset = ee->offset;
527 int len = ee->len;
528 int i;
530 /* EEPROM access is aligned to two bytes */
532 if ((len & 1) != 0 || (offset & 1) != 0)
533 return -EINVAL;
535 if (dm->flags & DM9000_PLATF_NO_EEPROM)
536 return -ENOENT;
538 ee->magic = DM_EEPROM_MAGIC;
540 for (i = 0; i < len; i += 2)
541 dm9000_read_eeprom(dm, (offset + i) / 2, data + i);
543 return 0;
546 static int dm9000_set_eeprom(struct net_device *dev,
547 struct ethtool_eeprom *ee, u8 *data)
549 board_info_t *dm = to_dm9000_board(dev);
550 int offset = ee->offset;
551 int len = ee->len;
552 int i;
554 /* EEPROM access is aligned to two bytes */
556 if ((len & 1) != 0 || (offset & 1) != 0)
557 return -EINVAL;
559 if (dm->flags & DM9000_PLATF_NO_EEPROM)
560 return -ENOENT;
562 if (ee->magic != DM_EEPROM_MAGIC)
563 return -EINVAL;
565 for (i = 0; i < len; i += 2)
566 dm9000_write_eeprom(dm, (offset + i) / 2, data + i);
568 return 0;
571 static const struct ethtool_ops dm9000_ethtool_ops = {
572 .get_drvinfo = dm9000_get_drvinfo,
573 .get_settings = dm9000_get_settings,
574 .set_settings = dm9000_set_settings,
575 .get_msglevel = dm9000_get_msglevel,
576 .set_msglevel = dm9000_set_msglevel,
577 .nway_reset = dm9000_nway_reset,
578 .get_link = dm9000_get_link,
579 .get_eeprom_len = dm9000_get_eeprom_len,
580 .get_eeprom = dm9000_get_eeprom,
581 .set_eeprom = dm9000_set_eeprom,
582 .get_rx_csum = dm9000_get_rx_csum,
583 .set_rx_csum = dm9000_set_rx_csum,
584 .get_tx_csum = ethtool_op_get_tx_csum,
585 .set_tx_csum = dm9000_set_tx_csum,
588 static void dm9000_show_carrier(board_info_t *db,
589 unsigned carrier, unsigned nsr)
591 struct net_device *ndev = db->ndev;
592 unsigned ncr = dm9000_read_locked(db, DM9000_NCR);
594 if (carrier)
595 dev_info(db->dev, "%s: link up, %dMbps, %s-duplex, no LPA\n",
596 ndev->name, (nsr & NSR_SPEED) ? 10 : 100,
597 (ncr & NCR_FDX) ? "full" : "half");
598 else
599 dev_info(db->dev, "%s: link down\n", ndev->name);
602 static void
603 dm9000_poll_work(struct work_struct *w)
605 struct delayed_work *dw = to_delayed_work(w);
606 board_info_t *db = container_of(dw, board_info_t, phy_poll);
607 struct net_device *ndev = db->ndev;
609 if (db->flags & DM9000_PLATF_SIMPLE_PHY &&
610 !(db->flags & DM9000_PLATF_EXT_PHY)) {
611 unsigned nsr = dm9000_read_locked(db, DM9000_NSR);
612 unsigned old_carrier = netif_carrier_ok(ndev) ? 1 : 0;
613 unsigned new_carrier;
615 new_carrier = (nsr & NSR_LINKST) ? 1 : 0;
617 if (old_carrier != new_carrier) {
618 if (netif_msg_link(db))
619 dm9000_show_carrier(db, new_carrier, nsr);
621 if (!new_carrier)
622 netif_carrier_off(ndev);
623 else
624 netif_carrier_on(ndev);
626 } else
627 mii_check_media(&db->mii, netif_msg_link(db), 0);
629 if (netif_running(ndev))
630 dm9000_schedule_poll(db);
633 /* dm9000_release_board
635 * release a board, and any mapped resources
638 static void
639 dm9000_release_board(struct platform_device *pdev, struct board_info *db)
641 /* unmap our resources */
643 iounmap(db->io_addr);
644 iounmap(db->io_data);
646 /* release the resources */
648 release_resource(db->data_req);
649 kfree(db->data_req);
651 release_resource(db->addr_req);
652 kfree(db->addr_req);
655 static unsigned char dm9000_type_to_char(enum dm9000_type type)
657 switch (type) {
658 case TYPE_DM9000E: return 'e';
659 case TYPE_DM9000A: return 'a';
660 case TYPE_DM9000B: return 'b';
663 return '?';
667 * Set DM9000 multicast address
669 static void
670 dm9000_hash_table(struct net_device *dev)
672 board_info_t *db = netdev_priv(dev);
673 struct dev_mc_list *mcptr = dev->mc_list;
674 int mc_cnt = dev->mc_count;
675 int i, oft;
676 u32 hash_val;
677 u16 hash_table[4];
678 u8 rcr = RCR_DIS_LONG | RCR_DIS_CRC | RCR_RXEN;
679 unsigned long flags;
681 dm9000_dbg(db, 1, "entering %s\n", __func__);
683 spin_lock_irqsave(&db->lock, flags);
685 for (i = 0, oft = DM9000_PAR; i < 6; i++, oft++)
686 iow(db, oft, dev->dev_addr[i]);
688 /* Clear Hash Table */
689 for (i = 0; i < 4; i++)
690 hash_table[i] = 0x0;
692 /* broadcast address */
693 hash_table[3] = 0x8000;
695 if (dev->flags & IFF_PROMISC)
696 rcr |= RCR_PRMSC;
698 if (dev->flags & IFF_ALLMULTI)
699 rcr |= RCR_ALL;
701 /* the multicast address in Hash Table : 64 bits */
702 for (i = 0; i < mc_cnt; i++, mcptr = mcptr->next) {
703 hash_val = ether_crc_le(6, mcptr->dmi_addr) & 0x3f;
704 hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
707 /* Write the hash table to MAC MD table */
708 for (i = 0, oft = DM9000_MAR; i < 4; i++) {
709 iow(db, oft++, hash_table[i]);
710 iow(db, oft++, hash_table[i] >> 8);
713 iow(db, DM9000_RCR, rcr);
714 spin_unlock_irqrestore(&db->lock, flags);
718 * Initilize dm9000 board
720 static void
721 dm9000_init_dm9000(struct net_device *dev)
723 board_info_t *db = netdev_priv(dev);
724 unsigned int imr;
726 dm9000_dbg(db, 1, "entering %s\n", __func__);
728 /* I/O mode */
729 db->io_mode = ior(db, DM9000_ISR) >> 6; /* ISR bit7:6 keeps I/O mode */
731 /* Checksum mode */
732 dm9000_set_rx_csum(dev, db->rx_csum);
734 /* GPIO0 on pre-activate PHY */
735 iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */
736 iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */
737 iow(db, DM9000_GPR, 0); /* Enable PHY */
739 if (db->flags & DM9000_PLATF_EXT_PHY)
740 iow(db, DM9000_NCR, NCR_EXT_PHY);
742 /* Program operating register */
743 iow(db, DM9000_TCR, 0); /* TX Polling clear */
744 iow(db, DM9000_BPTR, 0x3f); /* Less 3Kb, 200us */
745 iow(db, DM9000_FCR, 0xff); /* Flow Control */
746 iow(db, DM9000_SMCR, 0); /* Special Mode */
747 /* clear TX status */
748 iow(db, DM9000_NSR, NSR_WAKEST | NSR_TX2END | NSR_TX1END);
749 iow(db, DM9000_ISR, ISR_CLR_STATUS); /* Clear interrupt status */
751 /* Set address filter table */
752 dm9000_hash_table(dev);
754 imr = IMR_PAR | IMR_PTM | IMR_PRM;
755 if (db->type != TYPE_DM9000E)
756 imr |= IMR_LNKCHNG;
758 db->imr_all = imr;
760 /* Enable TX/RX interrupt mask */
761 iow(db, DM9000_IMR, imr);
763 /* Init Driver variable */
764 db->tx_pkt_cnt = 0;
765 db->queue_pkt_len = 0;
766 dev->trans_start = 0;
769 /* Our watchdog timed out. Called by the networking layer */
770 static void dm9000_timeout(struct net_device *dev)
772 board_info_t *db = netdev_priv(dev);
773 u8 reg_save;
774 unsigned long flags;
776 /* Save previous register address */
777 reg_save = readb(db->io_addr);
778 spin_lock_irqsave(&db->lock, flags);
780 netif_stop_queue(dev);
781 dm9000_reset(db);
782 dm9000_init_dm9000(dev);
783 /* We can accept TX packets again */
784 dev->trans_start = jiffies;
785 netif_wake_queue(dev);
787 /* Restore previous register address */
788 writeb(reg_save, db->io_addr);
789 spin_unlock_irqrestore(&db->lock, flags);
792 static void dm9000_send_packet(struct net_device *dev,
793 int ip_summed,
794 u16 pkt_len)
796 board_info_t *dm = to_dm9000_board(dev);
798 /* The DM9000 is not smart enough to leave fragmented packets alone. */
799 if (dm->ip_summed != ip_summed) {
800 if (ip_summed == CHECKSUM_NONE)
801 iow(dm, DM9000_TCCR, 0);
802 else
803 iow(dm, DM9000_TCCR, TCCR_IP | TCCR_UDP | TCCR_TCP);
804 dm->ip_summed = ip_summed;
807 /* Set TX length to DM9000 */
808 iow(dm, DM9000_TXPLL, pkt_len);
809 iow(dm, DM9000_TXPLH, pkt_len >> 8);
811 /* Issue TX polling command */
812 iow(dm, DM9000_TCR, TCR_TXREQ); /* Cleared after TX complete */
816 * Hardware start transmission.
817 * Send a packet to media from the upper layer.
819 static int
820 dm9000_start_xmit(struct sk_buff *skb, struct net_device *dev)
822 unsigned long flags;
823 board_info_t *db = netdev_priv(dev);
825 dm9000_dbg(db, 3, "%s:\n", __func__);
827 if (db->tx_pkt_cnt > 1)
828 return NETDEV_TX_BUSY;
830 spin_lock_irqsave(&db->lock, flags);
832 /* Move data to DM9000 TX RAM */
833 writeb(DM9000_MWCMD, db->io_addr);
835 (db->outblk)(db->io_data, skb->data, skb->len);
836 dev->stats.tx_bytes += skb->len;
838 db->tx_pkt_cnt++;
839 /* TX control: First packet immediately send, second packet queue */
840 if (db->tx_pkt_cnt == 1) {
841 dm9000_send_packet(dev, skb->ip_summed, skb->len);
842 } else {
843 /* Second packet */
844 db->queue_pkt_len = skb->len;
845 db->queue_ip_summed = skb->ip_summed;
846 netif_stop_queue(dev);
849 spin_unlock_irqrestore(&db->lock, flags);
851 /* free this SKB */
852 dev_kfree_skb(skb);
854 return NETDEV_TX_OK;
858 * DM9000 interrupt handler
859 * receive the packet to upper layer, free the transmitted packet
862 static void dm9000_tx_done(struct net_device *dev, board_info_t *db)
864 int tx_status = ior(db, DM9000_NSR); /* Got TX status */
866 if (tx_status & (NSR_TX2END | NSR_TX1END)) {
867 /* One packet sent complete */
868 db->tx_pkt_cnt--;
869 dev->stats.tx_packets++;
871 if (netif_msg_tx_done(db))
872 dev_dbg(db->dev, "tx done, NSR %02x\n", tx_status);
874 /* Queue packet check & send */
875 if (db->tx_pkt_cnt > 0)
876 dm9000_send_packet(dev, db->queue_ip_summed,
877 db->queue_pkt_len);
878 netif_wake_queue(dev);
882 struct dm9000_rxhdr {
883 u8 RxPktReady;
884 u8 RxStatus;
885 __le16 RxLen;
886 } __attribute__((__packed__));
889 * Received a packet and pass to upper layer
891 static void
892 dm9000_rx(struct net_device *dev)
894 board_info_t *db = netdev_priv(dev);
895 struct dm9000_rxhdr rxhdr;
896 struct sk_buff *skb;
897 u8 rxbyte, *rdptr;
898 bool GoodPacket;
899 int RxLen;
901 /* Check packet ready or not */
902 do {
903 ior(db, DM9000_MRCMDX); /* Dummy read */
905 /* Get most updated data */
906 rxbyte = readb(db->io_data);
908 /* Status check: this byte must be 0 or 1 */
909 if (rxbyte & DM9000_PKT_ERR) {
910 dev_warn(db->dev, "status check fail: %d\n", rxbyte);
911 iow(db, DM9000_RCR, 0x00); /* Stop Device */
912 iow(db, DM9000_ISR, IMR_PAR); /* Stop INT request */
913 return;
916 if (!(rxbyte & DM9000_PKT_RDY))
917 return;
919 /* A packet ready now & Get status/length */
920 GoodPacket = true;
921 writeb(DM9000_MRCMD, db->io_addr);
923 (db->inblk)(db->io_data, &rxhdr, sizeof(rxhdr));
925 RxLen = le16_to_cpu(rxhdr.RxLen);
927 if (netif_msg_rx_status(db))
928 dev_dbg(db->dev, "RX: status %02x, length %04x\n",
929 rxhdr.RxStatus, RxLen);
931 /* Packet Status check */
932 if (RxLen < 0x40) {
933 GoodPacket = false;
934 if (netif_msg_rx_err(db))
935 dev_dbg(db->dev, "RX: Bad Packet (runt)\n");
938 if (RxLen > DM9000_PKT_MAX) {
939 dev_dbg(db->dev, "RST: RX Len:%x\n", RxLen);
942 /* rxhdr.RxStatus is identical to RSR register. */
943 if (rxhdr.RxStatus & (RSR_FOE | RSR_CE | RSR_AE |
944 RSR_PLE | RSR_RWTO |
945 RSR_LCS | RSR_RF)) {
946 GoodPacket = false;
947 if (rxhdr.RxStatus & RSR_FOE) {
948 if (netif_msg_rx_err(db))
949 dev_dbg(db->dev, "fifo error\n");
950 dev->stats.rx_fifo_errors++;
952 if (rxhdr.RxStatus & RSR_CE) {
953 if (netif_msg_rx_err(db))
954 dev_dbg(db->dev, "crc error\n");
955 dev->stats.rx_crc_errors++;
957 if (rxhdr.RxStatus & RSR_RF) {
958 if (netif_msg_rx_err(db))
959 dev_dbg(db->dev, "length error\n");
960 dev->stats.rx_length_errors++;
964 /* Move data from DM9000 */
965 if (GoodPacket
966 && ((skb = dev_alloc_skb(RxLen + 4)) != NULL)) {
967 skb_reserve(skb, 2);
968 rdptr = (u8 *) skb_put(skb, RxLen - 4);
970 /* Read received packet from RX SRAM */
972 (db->inblk)(db->io_data, rdptr, RxLen);
973 dev->stats.rx_bytes += RxLen;
975 /* Pass to upper layer */
976 skb->protocol = eth_type_trans(skb, dev);
977 if (db->rx_csum) {
978 if ((((rxbyte & 0x1c) << 3) & rxbyte) == 0)
979 skb->ip_summed = CHECKSUM_UNNECESSARY;
980 else
981 skb->ip_summed = CHECKSUM_NONE;
983 netif_rx(skb);
984 dev->stats.rx_packets++;
986 } else {
987 /* need to dump the packet's data */
989 (db->dumpblk)(db->io_data, RxLen);
991 } while (rxbyte & DM9000_PKT_RDY);
994 static irqreturn_t dm9000_interrupt(int irq, void *dev_id)
996 struct net_device *dev = dev_id;
997 board_info_t *db = netdev_priv(dev);
998 int int_status;
999 unsigned long flags;
1000 u8 reg_save;
1002 dm9000_dbg(db, 3, "entering %s\n", __func__);
1004 /* A real interrupt coming */
1006 /* holders of db->lock must always block IRQs */
1007 spin_lock_irqsave(&db->lock, flags);
1009 /* Save previous register address */
1010 reg_save = readb(db->io_addr);
1012 /* Disable all interrupts */
1013 iow(db, DM9000_IMR, IMR_PAR);
1015 /* Got DM9000 interrupt status */
1016 int_status = ior(db, DM9000_ISR); /* Got ISR */
1017 iow(db, DM9000_ISR, int_status); /* Clear ISR status */
1019 if (netif_msg_intr(db))
1020 dev_dbg(db->dev, "interrupt status %02x\n", int_status);
1022 /* Received the coming packet */
1023 if (int_status & ISR_PRS)
1024 dm9000_rx(dev);
1026 /* Trnasmit Interrupt check */
1027 if (int_status & ISR_PTS)
1028 dm9000_tx_done(dev, db);
1030 if (db->type != TYPE_DM9000E) {
1031 if (int_status & ISR_LNKCHNG) {
1032 /* fire a link-change request */
1033 schedule_delayed_work(&db->phy_poll, 1);
1037 /* Re-enable interrupt mask */
1038 iow(db, DM9000_IMR, db->imr_all);
1040 /* Restore previous register address */
1041 writeb(reg_save, db->io_addr);
1043 spin_unlock_irqrestore(&db->lock, flags);
1045 return IRQ_HANDLED;
1048 #ifdef CONFIG_NET_POLL_CONTROLLER
1050 *Used by netconsole
1052 static void dm9000_poll_controller(struct net_device *dev)
1054 disable_irq(dev->irq);
1055 dm9000_interrupt(dev->irq, dev);
1056 enable_irq(dev->irq);
1058 #endif
1061 * Open the interface.
1062 * The interface is opened whenever "ifconfig" actives it.
1064 static int
1065 dm9000_open(struct net_device *dev)
1067 board_info_t *db = netdev_priv(dev);
1068 unsigned long irqflags = db->irq_res->flags & IRQF_TRIGGER_MASK;
1070 if (netif_msg_ifup(db))
1071 dev_dbg(db->dev, "enabling %s\n", dev->name);
1073 /* If there is no IRQ type specified, default to something that
1074 * may work, and tell the user that this is a problem */
1076 if (irqflags == IRQF_TRIGGER_NONE)
1077 dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n");
1079 irqflags |= IRQF_SHARED;
1081 if (request_irq(dev->irq, &dm9000_interrupt, irqflags, dev->name, dev))
1082 return -EAGAIN;
1084 /* Initialize DM9000 board */
1085 dm9000_reset(db);
1086 dm9000_init_dm9000(dev);
1088 /* Init driver variable */
1089 db->dbug_cnt = 0;
1091 mii_check_media(&db->mii, netif_msg_link(db), 1);
1092 netif_start_queue(dev);
1094 dm9000_schedule_poll(db);
1096 return 0;
1100 * Sleep, either by using msleep() or if we are suspending, then
1101 * use mdelay() to sleep.
1103 static void dm9000_msleep(board_info_t *db, unsigned int ms)
1105 if (db->in_suspend)
1106 mdelay(ms);
1107 else
1108 msleep(ms);
1112 * Read a word from phyxcer
1114 static int
1115 dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg)
1117 board_info_t *db = netdev_priv(dev);
1118 unsigned long flags;
1119 unsigned int reg_save;
1120 int ret;
1122 mutex_lock(&db->addr_lock);
1124 spin_lock_irqsave(&db->lock,flags);
1126 /* Save previous register address */
1127 reg_save = readb(db->io_addr);
1129 /* Fill the phyxcer register into REG_0C */
1130 iow(db, DM9000_EPAR, DM9000_PHY | reg);
1132 iow(db, DM9000_EPCR, EPCR_ERPRR | EPCR_EPOS); /* Issue phyxcer read command */
1134 writeb(reg_save, db->io_addr);
1135 spin_unlock_irqrestore(&db->lock,flags);
1137 dm9000_msleep(db, 1); /* Wait read complete */
1139 spin_lock_irqsave(&db->lock,flags);
1140 reg_save = readb(db->io_addr);
1142 iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer read command */
1144 /* The read data keeps on REG_0D & REG_0E */
1145 ret = (ior(db, DM9000_EPDRH) << 8) | ior(db, DM9000_EPDRL);
1147 /* restore the previous address */
1148 writeb(reg_save, db->io_addr);
1149 spin_unlock_irqrestore(&db->lock,flags);
1151 mutex_unlock(&db->addr_lock);
1153 dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret);
1154 return ret;
1158 * Write a word to phyxcer
1160 static void
1161 dm9000_phy_write(struct net_device *dev,
1162 int phyaddr_unused, int reg, int value)
1164 board_info_t *db = netdev_priv(dev);
1165 unsigned long flags;
1166 unsigned long reg_save;
1168 dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value);
1169 mutex_lock(&db->addr_lock);
1171 spin_lock_irqsave(&db->lock,flags);
1173 /* Save previous register address */
1174 reg_save = readb(db->io_addr);
1176 /* Fill the phyxcer register into REG_0C */
1177 iow(db, DM9000_EPAR, DM9000_PHY | reg);
1179 /* Fill the written data into REG_0D & REG_0E */
1180 iow(db, DM9000_EPDRL, value);
1181 iow(db, DM9000_EPDRH, value >> 8);
1183 iow(db, DM9000_EPCR, EPCR_EPOS | EPCR_ERPRW); /* Issue phyxcer write command */
1185 writeb(reg_save, db->io_addr);
1186 spin_unlock_irqrestore(&db->lock, flags);
1188 dm9000_msleep(db, 1); /* Wait write complete */
1190 spin_lock_irqsave(&db->lock,flags);
1191 reg_save = readb(db->io_addr);
1193 iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer write command */
1195 /* restore the previous address */
1196 writeb(reg_save, db->io_addr);
1198 spin_unlock_irqrestore(&db->lock, flags);
1199 mutex_unlock(&db->addr_lock);
1202 static void
1203 dm9000_shutdown(struct net_device *dev)
1205 board_info_t *db = netdev_priv(dev);
1207 /* RESET device */
1208 dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET); /* PHY RESET */
1209 iow(db, DM9000_GPR, 0x01); /* Power-Down PHY */
1210 iow(db, DM9000_IMR, IMR_PAR); /* Disable all interrupt */
1211 iow(db, DM9000_RCR, 0x00); /* Disable RX */
1215 * Stop the interface.
1216 * The interface is stopped when it is brought.
1218 static int
1219 dm9000_stop(struct net_device *ndev)
1221 board_info_t *db = netdev_priv(ndev);
1223 if (netif_msg_ifdown(db))
1224 dev_dbg(db->dev, "shutting down %s\n", ndev->name);
1226 cancel_delayed_work_sync(&db->phy_poll);
1228 netif_stop_queue(ndev);
1229 netif_carrier_off(ndev);
1231 /* free interrupt */
1232 free_irq(ndev->irq, ndev);
1234 dm9000_shutdown(ndev);
1236 return 0;
1239 static const struct net_device_ops dm9000_netdev_ops = {
1240 .ndo_open = dm9000_open,
1241 .ndo_stop = dm9000_stop,
1242 .ndo_start_xmit = dm9000_start_xmit,
1243 .ndo_tx_timeout = dm9000_timeout,
1244 .ndo_set_multicast_list = dm9000_hash_table,
1245 .ndo_do_ioctl = dm9000_ioctl,
1246 .ndo_change_mtu = eth_change_mtu,
1247 .ndo_validate_addr = eth_validate_addr,
1248 .ndo_set_mac_address = eth_mac_addr,
1249 #ifdef CONFIG_NET_POLL_CONTROLLER
1250 .ndo_poll_controller = dm9000_poll_controller,
1251 #endif
1255 * Search DM9000 board, allocate space and register it
1257 static int __devinit
1258 dm9000_probe(struct platform_device *pdev)
1260 struct dm9000_plat_data *pdata = pdev->dev.platform_data;
1261 struct board_info *db; /* Point a board information structure */
1262 struct net_device *ndev;
1263 const unsigned char *mac_src;
1264 int ret = 0;
1265 int iosize;
1266 int i;
1267 u32 id_val;
1269 /* Init network device */
1270 ndev = alloc_etherdev(sizeof(struct board_info));
1271 if (!ndev) {
1272 dev_err(&pdev->dev, "could not allocate device.\n");
1273 return -ENOMEM;
1276 SET_NETDEV_DEV(ndev, &pdev->dev);
1278 dev_dbg(&pdev->dev, "dm9000_probe()\n");
1280 /* setup board info structure */
1281 db = netdev_priv(ndev);
1283 db->dev = &pdev->dev;
1284 db->ndev = ndev;
1286 spin_lock_init(&db->lock);
1287 mutex_init(&db->addr_lock);
1289 INIT_DELAYED_WORK(&db->phy_poll, dm9000_poll_work);
1291 db->addr_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1292 db->data_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1293 db->irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1295 if (db->addr_res == NULL || db->data_res == NULL ||
1296 db->irq_res == NULL) {
1297 dev_err(db->dev, "insufficient resources\n");
1298 ret = -ENOENT;
1299 goto out;
1302 iosize = resource_size(db->addr_res);
1303 db->addr_req = request_mem_region(db->addr_res->start, iosize,
1304 pdev->name);
1306 if (db->addr_req == NULL) {
1307 dev_err(db->dev, "cannot claim address reg area\n");
1308 ret = -EIO;
1309 goto out;
1312 db->io_addr = ioremap(db->addr_res->start, iosize);
1314 if (db->io_addr == NULL) {
1315 dev_err(db->dev, "failed to ioremap address reg\n");
1316 ret = -EINVAL;
1317 goto out;
1320 iosize = resource_size(db->data_res);
1321 db->data_req = request_mem_region(db->data_res->start, iosize,
1322 pdev->name);
1324 if (db->data_req == NULL) {
1325 dev_err(db->dev, "cannot claim data reg area\n");
1326 ret = -EIO;
1327 goto out;
1330 db->io_data = ioremap(db->data_res->start, iosize);
1332 if (db->io_data == NULL) {
1333 dev_err(db->dev, "failed to ioremap data reg\n");
1334 ret = -EINVAL;
1335 goto out;
1338 /* fill in parameters for net-dev structure */
1339 ndev->base_addr = (unsigned long)db->io_addr;
1340 ndev->irq = db->irq_res->start;
1342 /* ensure at least we have a default set of IO routines */
1343 dm9000_set_io(db, iosize);
1345 /* check to see if anything is being over-ridden */
1346 if (pdata != NULL) {
1347 /* check to see if the driver wants to over-ride the
1348 * default IO width */
1350 if (pdata->flags & DM9000_PLATF_8BITONLY)
1351 dm9000_set_io(db, 1);
1353 if (pdata->flags & DM9000_PLATF_16BITONLY)
1354 dm9000_set_io(db, 2);
1356 if (pdata->flags & DM9000_PLATF_32BITONLY)
1357 dm9000_set_io(db, 4);
1359 /* check to see if there are any IO routine
1360 * over-rides */
1362 if (pdata->inblk != NULL)
1363 db->inblk = pdata->inblk;
1365 if (pdata->outblk != NULL)
1366 db->outblk = pdata->outblk;
1368 if (pdata->dumpblk != NULL)
1369 db->dumpblk = pdata->dumpblk;
1371 db->flags = pdata->flags;
1374 #ifdef CONFIG_DM9000_FORCE_SIMPLE_PHY_POLL
1375 db->flags |= DM9000_PLATF_SIMPLE_PHY;
1376 #endif
1378 dm9000_reset(db);
1380 /* try multiple times, DM9000 sometimes gets the read wrong */
1381 for (i = 0; i < 8; i++) {
1382 id_val = ior(db, DM9000_VIDL);
1383 id_val |= (u32)ior(db, DM9000_VIDH) << 8;
1384 id_val |= (u32)ior(db, DM9000_PIDL) << 16;
1385 id_val |= (u32)ior(db, DM9000_PIDH) << 24;
1387 if (id_val == DM9000_ID)
1388 break;
1389 dev_err(db->dev, "read wrong id 0x%08x\n", id_val);
1392 if (id_val != DM9000_ID) {
1393 dev_err(db->dev, "wrong id: 0x%08x\n", id_val);
1394 ret = -ENODEV;
1395 goto out;
1398 /* Identify what type of DM9000 we are working on */
1400 id_val = ior(db, DM9000_CHIPR);
1401 dev_dbg(db->dev, "dm9000 revision 0x%02x\n", id_val);
1403 switch (id_val) {
1404 case CHIPR_DM9000A:
1405 db->type = TYPE_DM9000A;
1406 break;
1407 case CHIPR_DM9000B:
1408 db->type = TYPE_DM9000B;
1409 break;
1410 default:
1411 dev_dbg(db->dev, "ID %02x => defaulting to DM9000E\n", id_val);
1412 db->type = TYPE_DM9000E;
1415 /* dm9000a/b are capable of hardware checksum offload */
1416 if (db->type == TYPE_DM9000A || db->type == TYPE_DM9000B) {
1417 db->can_csum = 1;
1418 db->rx_csum = 1;
1419 ndev->features |= NETIF_F_IP_CSUM;
1422 /* from this point we assume that we have found a DM9000 */
1424 /* driver system function */
1425 ether_setup(ndev);
1427 ndev->netdev_ops = &dm9000_netdev_ops;
1428 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
1429 ndev->ethtool_ops = &dm9000_ethtool_ops;
1431 db->msg_enable = NETIF_MSG_LINK;
1432 db->mii.phy_id_mask = 0x1f;
1433 db->mii.reg_num_mask = 0x1f;
1434 db->mii.force_media = 0;
1435 db->mii.full_duplex = 0;
1436 db->mii.dev = ndev;
1437 db->mii.mdio_read = dm9000_phy_read;
1438 db->mii.mdio_write = dm9000_phy_write;
1440 mac_src = "eeprom";
1442 /* try reading the node address from the attached EEPROM */
1443 for (i = 0; i < 6; i += 2)
1444 dm9000_read_eeprom(db, i / 2, ndev->dev_addr+i);
1446 if (!is_valid_ether_addr(ndev->dev_addr) && pdata != NULL) {
1447 mac_src = "platform data";
1448 memcpy(ndev->dev_addr, pdata->dev_addr, 6);
1451 if (!is_valid_ether_addr(ndev->dev_addr)) {
1452 /* try reading from mac */
1454 mac_src = "chip";
1455 for (i = 0; i < 6; i++)
1456 ndev->dev_addr[i] = ior(db, i+DM9000_PAR);
1459 if (!is_valid_ether_addr(ndev->dev_addr))
1460 dev_warn(db->dev, "%s: Invalid ethernet MAC address. Please "
1461 "set using ifconfig\n", ndev->name);
1463 platform_set_drvdata(pdev, ndev);
1464 ret = register_netdev(ndev);
1466 if (ret == 0)
1467 printk(KERN_INFO "%s: dm9000%c at %p,%p IRQ %d MAC: %pM (%s)\n",
1468 ndev->name, dm9000_type_to_char(db->type),
1469 db->io_addr, db->io_data, ndev->irq,
1470 ndev->dev_addr, mac_src);
1471 return 0;
1473 out:
1474 dev_err(db->dev, "not found (%d).\n", ret);
1476 dm9000_release_board(pdev, db);
1477 free_netdev(ndev);
1479 return ret;
1482 static int
1483 dm9000_drv_suspend(struct device *dev)
1485 struct platform_device *pdev = to_platform_device(dev);
1486 struct net_device *ndev = platform_get_drvdata(pdev);
1487 board_info_t *db;
1489 if (ndev) {
1490 db = netdev_priv(ndev);
1491 db->in_suspend = 1;
1493 if (netif_running(ndev)) {
1494 netif_device_detach(ndev);
1495 dm9000_shutdown(ndev);
1498 return 0;
1501 static int
1502 dm9000_drv_resume(struct device *dev)
1504 struct platform_device *pdev = to_platform_device(dev);
1505 struct net_device *ndev = platform_get_drvdata(pdev);
1506 board_info_t *db = netdev_priv(ndev);
1508 if (ndev) {
1510 if (netif_running(ndev)) {
1511 dm9000_reset(db);
1512 dm9000_init_dm9000(ndev);
1514 netif_device_attach(ndev);
1517 db->in_suspend = 0;
1519 return 0;
1522 static struct dev_pm_ops dm9000_drv_pm_ops = {
1523 .suspend = dm9000_drv_suspend,
1524 .resume = dm9000_drv_resume,
1527 static int __devexit
1528 dm9000_drv_remove(struct platform_device *pdev)
1530 struct net_device *ndev = platform_get_drvdata(pdev);
1532 platform_set_drvdata(pdev, NULL);
1534 unregister_netdev(ndev);
1535 dm9000_release_board(pdev, (board_info_t *) netdev_priv(ndev));
1536 free_netdev(ndev); /* free device structure */
1538 dev_dbg(&pdev->dev, "released and freed device\n");
1539 return 0;
1542 static struct platform_driver dm9000_driver = {
1543 .driver = {
1544 .name = "dm9000",
1545 .owner = THIS_MODULE,
1546 .pm = &dm9000_drv_pm_ops,
1548 .probe = dm9000_probe,
1549 .remove = __devexit_p(dm9000_drv_remove),
1552 static int __init
1553 dm9000_init(void)
1555 printk(KERN_INFO "%s Ethernet Driver, V%s\n", CARDNAME, DRV_VERSION);
1557 return platform_driver_register(&dm9000_driver);
1560 static void __exit
1561 dm9000_cleanup(void)
1563 platform_driver_unregister(&dm9000_driver);
1566 module_init(dm9000_init);
1567 module_exit(dm9000_cleanup);
1569 MODULE_AUTHOR("Sascha Hauer, Ben Dooks");
1570 MODULE_DESCRIPTION("Davicom DM9000 network driver");
1571 MODULE_LICENSE("GPL");
1572 MODULE_ALIAS("platform:dm9000");