[CPUFREQ] Re-enable cpufreq suspend and resume code
[linux-2.6/mini2440.git] / drivers / net / fec.c
blobc9fd82d3a80d0dba6290af1720a9f11ccd0ca460
1 /*
2 * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx.
3 * Copyright (c) 1997 Dan Malek (dmalek@jlc.net)
5 * Right now, I am very wasteful with the buffers. I allocate memory
6 * pages and then divide them into 2K frame buffers. This way I know I
7 * have buffers large enough to hold one frame within one buffer descriptor.
8 * Once I get this working, I will use 64 or 128 byte CPM buffers, which
9 * will be much more memory efficient and will easily handle lots of
10 * small packets.
12 * Much better multiple PHY support by Magnus Damm.
13 * Copyright (c) 2000 Ericsson Radio Systems AB.
15 * Support for FEC controller of ColdFire processors.
16 * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com)
18 * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be)
19 * Copyright (c) 2004-2006 Macq Electronique SA.
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/string.h>
25 #include <linux/ptrace.h>
26 #include <linux/errno.h>
27 #include <linux/ioport.h>
28 #include <linux/slab.h>
29 #include <linux/interrupt.h>
30 #include <linux/pci.h>
31 #include <linux/init.h>
32 #include <linux/delay.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/skbuff.h>
36 #include <linux/spinlock.h>
37 #include <linux/workqueue.h>
38 #include <linux/bitops.h>
39 #include <linux/io.h>
40 #include <linux/irq.h>
41 #include <linux/clk.h>
42 #include <linux/platform_device.h>
44 #include <asm/cacheflush.h>
46 #ifndef CONFIG_ARCH_MXC
47 #include <asm/coldfire.h>
48 #include <asm/mcfsim.h>
49 #endif
51 #include "fec.h"
53 #ifdef CONFIG_ARCH_MXC
54 #include <mach/hardware.h>
55 #define FEC_ALIGNMENT 0xf
56 #else
57 #define FEC_ALIGNMENT 0x3
58 #endif
61 * Define the fixed address of the FEC hardware.
63 #if defined(CONFIG_M5272)
64 #define HAVE_mii_link_interrupt
66 static unsigned char fec_mac_default[] = {
67 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
71 * Some hardware gets it MAC address out of local flash memory.
72 * if this is non-zero then assume it is the address to get MAC from.
74 #if defined(CONFIG_NETtel)
75 #define FEC_FLASHMAC 0xf0006006
76 #elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES)
77 #define FEC_FLASHMAC 0xf0006000
78 #elif defined(CONFIG_CANCam)
79 #define FEC_FLASHMAC 0xf0020000
80 #elif defined (CONFIG_M5272C3)
81 #define FEC_FLASHMAC (0xffe04000 + 4)
82 #elif defined(CONFIG_MOD5272)
83 #define FEC_FLASHMAC 0xffc0406b
84 #else
85 #define FEC_FLASHMAC 0
86 #endif
87 #endif /* CONFIG_M5272 */
89 /* Forward declarations of some structures to support different PHYs */
91 typedef struct {
92 uint mii_data;
93 void (*funct)(uint mii_reg, struct net_device *dev);
94 } phy_cmd_t;
96 typedef struct {
97 uint id;
98 char *name;
100 const phy_cmd_t *config;
101 const phy_cmd_t *startup;
102 const phy_cmd_t *ack_int;
103 const phy_cmd_t *shutdown;
104 } phy_info_t;
106 /* The number of Tx and Rx buffers. These are allocated from the page
107 * pool. The code may assume these are power of two, so it it best
108 * to keep them that size.
109 * We don't need to allocate pages for the transmitter. We just use
110 * the skbuffer directly.
112 #define FEC_ENET_RX_PAGES 8
113 #define FEC_ENET_RX_FRSIZE 2048
114 #define FEC_ENET_RX_FRPPG (PAGE_SIZE / FEC_ENET_RX_FRSIZE)
115 #define RX_RING_SIZE (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES)
116 #define FEC_ENET_TX_FRSIZE 2048
117 #define FEC_ENET_TX_FRPPG (PAGE_SIZE / FEC_ENET_TX_FRSIZE)
118 #define TX_RING_SIZE 16 /* Must be power of two */
119 #define TX_RING_MOD_MASK 15 /* for this to work */
121 #if (((RX_RING_SIZE + TX_RING_SIZE) * 8) > PAGE_SIZE)
122 #error "FEC: descriptor ring size constants too large"
123 #endif
125 /* Interrupt events/masks. */
126 #define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */
127 #define FEC_ENET_BABR ((uint)0x40000000) /* Babbling receiver */
128 #define FEC_ENET_BABT ((uint)0x20000000) /* Babbling transmitter */
129 #define FEC_ENET_GRA ((uint)0x10000000) /* Graceful stop complete */
130 #define FEC_ENET_TXF ((uint)0x08000000) /* Full frame transmitted */
131 #define FEC_ENET_TXB ((uint)0x04000000) /* A buffer was transmitted */
132 #define FEC_ENET_RXF ((uint)0x02000000) /* Full frame received */
133 #define FEC_ENET_RXB ((uint)0x01000000) /* A buffer was received */
134 #define FEC_ENET_MII ((uint)0x00800000) /* MII interrupt */
135 #define FEC_ENET_EBERR ((uint)0x00400000) /* SDMA bus error */
137 /* The FEC stores dest/src/type, data, and checksum for receive packets.
139 #define PKT_MAXBUF_SIZE 1518
140 #define PKT_MINBUF_SIZE 64
141 #define PKT_MAXBLR_SIZE 1520
145 * The 5270/5271/5280/5282/532x RX control register also contains maximum frame
146 * size bits. Other FEC hardware does not, so we need to take that into
147 * account when setting it.
149 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
150 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARCH_MXC)
151 #define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16)
152 #else
153 #define OPT_FRAME_SIZE 0
154 #endif
156 /* The FEC buffer descriptors track the ring buffers. The rx_bd_base and
157 * tx_bd_base always point to the base of the buffer descriptors. The
158 * cur_rx and cur_tx point to the currently available buffer.
159 * The dirty_tx tracks the current buffer that is being sent by the
160 * controller. The cur_tx and dirty_tx are equal under both completely
161 * empty and completely full conditions. The empty/ready indicator in
162 * the buffer descriptor determines the actual condition.
164 struct fec_enet_private {
165 /* Hardware registers of the FEC device */
166 void __iomem *hwp;
168 struct net_device *netdev;
170 struct clk *clk;
172 /* The saved address of a sent-in-place packet/buffer, for skfree(). */
173 unsigned char *tx_bounce[TX_RING_SIZE];
174 struct sk_buff* tx_skbuff[TX_RING_SIZE];
175 struct sk_buff* rx_skbuff[RX_RING_SIZE];
176 ushort skb_cur;
177 ushort skb_dirty;
179 /* CPM dual port RAM relative addresses */
180 dma_addr_t bd_dma;
181 /* Address of Rx and Tx buffers */
182 struct bufdesc *rx_bd_base;
183 struct bufdesc *tx_bd_base;
184 /* The next free ring entry */
185 struct bufdesc *cur_rx, *cur_tx;
186 /* The ring entries to be free()ed */
187 struct bufdesc *dirty_tx;
189 uint tx_full;
190 /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */
191 spinlock_t hw_lock;
192 /* hold while accessing the mii_list_t() elements */
193 spinlock_t mii_lock;
195 uint phy_id;
196 uint phy_id_done;
197 uint phy_status;
198 uint phy_speed;
199 phy_info_t const *phy;
200 struct work_struct phy_task;
202 uint sequence_done;
203 uint mii_phy_task_queued;
205 uint phy_addr;
207 int index;
208 int opened;
209 int link;
210 int old_link;
211 int full_duplex;
214 static void fec_enet_mii(struct net_device *dev);
215 static irqreturn_t fec_enet_interrupt(int irq, void * dev_id);
216 static void fec_enet_tx(struct net_device *dev);
217 static void fec_enet_rx(struct net_device *dev);
218 static int fec_enet_close(struct net_device *dev);
219 static void fec_restart(struct net_device *dev, int duplex);
220 static void fec_stop(struct net_device *dev);
223 /* MII processing. We keep this as simple as possible. Requests are
224 * placed on the list (if there is room). When the request is finished
225 * by the MII, an optional function may be called.
227 typedef struct mii_list {
228 uint mii_regval;
229 void (*mii_func)(uint val, struct net_device *dev);
230 struct mii_list *mii_next;
231 } mii_list_t;
233 #define NMII 20
234 static mii_list_t mii_cmds[NMII];
235 static mii_list_t *mii_free;
236 static mii_list_t *mii_head;
237 static mii_list_t *mii_tail;
239 static int mii_queue(struct net_device *dev, int request,
240 void (*func)(uint, struct net_device *));
242 /* Make MII read/write commands for the FEC */
243 #define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18))
244 #define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | \
245 (VAL & 0xffff))
246 #define mk_mii_end 0
248 /* Transmitter timeout */
249 #define TX_TIMEOUT (2 * HZ)
251 /* Register definitions for the PHY */
253 #define MII_REG_CR 0 /* Control Register */
254 #define MII_REG_SR 1 /* Status Register */
255 #define MII_REG_PHYIR1 2 /* PHY Identification Register 1 */
256 #define MII_REG_PHYIR2 3 /* PHY Identification Register 2 */
257 #define MII_REG_ANAR 4 /* A-N Advertisement Register */
258 #define MII_REG_ANLPAR 5 /* A-N Link Partner Ability Register */
259 #define MII_REG_ANER 6 /* A-N Expansion Register */
260 #define MII_REG_ANNPTR 7 /* A-N Next Page Transmit Register */
261 #define MII_REG_ANLPRNPR 8 /* A-N Link Partner Received Next Page Reg. */
263 /* values for phy_status */
265 #define PHY_CONF_ANE 0x0001 /* 1 auto-negotiation enabled */
266 #define PHY_CONF_LOOP 0x0002 /* 1 loopback mode enabled */
267 #define PHY_CONF_SPMASK 0x00f0 /* mask for speed */
268 #define PHY_CONF_10HDX 0x0010 /* 10 Mbit half duplex supported */
269 #define PHY_CONF_10FDX 0x0020 /* 10 Mbit full duplex supported */
270 #define PHY_CONF_100HDX 0x0040 /* 100 Mbit half duplex supported */
271 #define PHY_CONF_100FDX 0x0080 /* 100 Mbit full duplex supported */
273 #define PHY_STAT_LINK 0x0100 /* 1 up - 0 down */
274 #define PHY_STAT_FAULT 0x0200 /* 1 remote fault */
275 #define PHY_STAT_ANC 0x0400 /* 1 auto-negotiation complete */
276 #define PHY_STAT_SPMASK 0xf000 /* mask for speed */
277 #define PHY_STAT_10HDX 0x1000 /* 10 Mbit half duplex selected */
278 #define PHY_STAT_10FDX 0x2000 /* 10 Mbit full duplex selected */
279 #define PHY_STAT_100HDX 0x4000 /* 100 Mbit half duplex selected */
280 #define PHY_STAT_100FDX 0x8000 /* 100 Mbit full duplex selected */
283 static int
284 fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
286 struct fec_enet_private *fep = netdev_priv(dev);
287 struct bufdesc *bdp;
288 void *bufaddr;
289 unsigned short status;
290 unsigned long flags;
292 if (!fep->link) {
293 /* Link is down or autonegotiation is in progress. */
294 return NETDEV_TX_BUSY;
297 spin_lock_irqsave(&fep->hw_lock, flags);
298 /* Fill in a Tx ring entry */
299 bdp = fep->cur_tx;
301 status = bdp->cbd_sc;
303 if (status & BD_ENET_TX_READY) {
304 /* Ooops. All transmit buffers are full. Bail out.
305 * This should not happen, since dev->tbusy should be set.
307 printk("%s: tx queue full!.\n", dev->name);
308 spin_unlock_irqrestore(&fep->hw_lock, flags);
309 return NETDEV_TX_BUSY;
312 /* Clear all of the status flags */
313 status &= ~BD_ENET_TX_STATS;
315 /* Set buffer length and buffer pointer */
316 bufaddr = skb->data;
317 bdp->cbd_datlen = skb->len;
320 * On some FEC implementations data must be aligned on
321 * 4-byte boundaries. Use bounce buffers to copy data
322 * and get it aligned. Ugh.
324 if (((unsigned long) bufaddr) & FEC_ALIGNMENT) {
325 unsigned int index;
326 index = bdp - fep->tx_bd_base;
327 memcpy(fep->tx_bounce[index], (void *)skb->data, skb->len);
328 bufaddr = fep->tx_bounce[index];
331 /* Save skb pointer */
332 fep->tx_skbuff[fep->skb_cur] = skb;
334 dev->stats.tx_bytes += skb->len;
335 fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK;
337 /* Push the data cache so the CPM does not get stale memory
338 * data.
340 bdp->cbd_bufaddr = dma_map_single(&dev->dev, bufaddr,
341 FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
343 /* Send it on its way. Tell FEC it's ready, interrupt when done,
344 * it's the last BD of the frame, and to put the CRC on the end.
346 status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR
347 | BD_ENET_TX_LAST | BD_ENET_TX_TC);
348 bdp->cbd_sc = status;
350 dev->trans_start = jiffies;
352 /* Trigger transmission start */
353 writel(0, fep->hwp + FEC_X_DES_ACTIVE);
355 /* If this was the last BD in the ring, start at the beginning again. */
356 if (status & BD_ENET_TX_WRAP)
357 bdp = fep->tx_bd_base;
358 else
359 bdp++;
361 if (bdp == fep->dirty_tx) {
362 fep->tx_full = 1;
363 netif_stop_queue(dev);
366 fep->cur_tx = bdp;
368 spin_unlock_irqrestore(&fep->hw_lock, flags);
370 return 0;
373 static void
374 fec_timeout(struct net_device *dev)
376 struct fec_enet_private *fep = netdev_priv(dev);
378 dev->stats.tx_errors++;
380 fec_restart(dev, fep->full_duplex);
381 netif_wake_queue(dev);
384 static irqreturn_t
385 fec_enet_interrupt(int irq, void * dev_id)
387 struct net_device *dev = dev_id;
388 struct fec_enet_private *fep = netdev_priv(dev);
389 uint int_events;
390 irqreturn_t ret = IRQ_NONE;
392 do {
393 int_events = readl(fep->hwp + FEC_IEVENT);
394 writel(int_events, fep->hwp + FEC_IEVENT);
396 if (int_events & FEC_ENET_RXF) {
397 ret = IRQ_HANDLED;
398 fec_enet_rx(dev);
401 /* Transmit OK, or non-fatal error. Update the buffer
402 * descriptors. FEC handles all errors, we just discover
403 * them as part of the transmit process.
405 if (int_events & FEC_ENET_TXF) {
406 ret = IRQ_HANDLED;
407 fec_enet_tx(dev);
410 if (int_events & FEC_ENET_MII) {
411 ret = IRQ_HANDLED;
412 fec_enet_mii(dev);
415 } while (int_events);
417 return ret;
421 static void
422 fec_enet_tx(struct net_device *dev)
424 struct fec_enet_private *fep;
425 struct bufdesc *bdp;
426 unsigned short status;
427 struct sk_buff *skb;
429 fep = netdev_priv(dev);
430 spin_lock_irq(&fep->hw_lock);
431 bdp = fep->dirty_tx;
433 while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) {
434 if (bdp == fep->cur_tx && fep->tx_full == 0)
435 break;
437 dma_unmap_single(&dev->dev, bdp->cbd_bufaddr, FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
438 bdp->cbd_bufaddr = 0;
440 skb = fep->tx_skbuff[fep->skb_dirty];
441 /* Check for errors. */
442 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
443 BD_ENET_TX_RL | BD_ENET_TX_UN |
444 BD_ENET_TX_CSL)) {
445 dev->stats.tx_errors++;
446 if (status & BD_ENET_TX_HB) /* No heartbeat */
447 dev->stats.tx_heartbeat_errors++;
448 if (status & BD_ENET_TX_LC) /* Late collision */
449 dev->stats.tx_window_errors++;
450 if (status & BD_ENET_TX_RL) /* Retrans limit */
451 dev->stats.tx_aborted_errors++;
452 if (status & BD_ENET_TX_UN) /* Underrun */
453 dev->stats.tx_fifo_errors++;
454 if (status & BD_ENET_TX_CSL) /* Carrier lost */
455 dev->stats.tx_carrier_errors++;
456 } else {
457 dev->stats.tx_packets++;
460 if (status & BD_ENET_TX_READY)
461 printk("HEY! Enet xmit interrupt and TX_READY.\n");
463 /* Deferred means some collisions occurred during transmit,
464 * but we eventually sent the packet OK.
466 if (status & BD_ENET_TX_DEF)
467 dev->stats.collisions++;
469 /* Free the sk buffer associated with this last transmit */
470 dev_kfree_skb_any(skb);
471 fep->tx_skbuff[fep->skb_dirty] = NULL;
472 fep->skb_dirty = (fep->skb_dirty + 1) & TX_RING_MOD_MASK;
474 /* Update pointer to next buffer descriptor to be transmitted */
475 if (status & BD_ENET_TX_WRAP)
476 bdp = fep->tx_bd_base;
477 else
478 bdp++;
480 /* Since we have freed up a buffer, the ring is no longer full
482 if (fep->tx_full) {
483 fep->tx_full = 0;
484 if (netif_queue_stopped(dev))
485 netif_wake_queue(dev);
488 fep->dirty_tx = bdp;
489 spin_unlock_irq(&fep->hw_lock);
493 /* During a receive, the cur_rx points to the current incoming buffer.
494 * When we update through the ring, if the next incoming buffer has
495 * not been given to the system, we just set the empty indicator,
496 * effectively tossing the packet.
498 static void
499 fec_enet_rx(struct net_device *dev)
501 struct fec_enet_private *fep = netdev_priv(dev);
502 struct bufdesc *bdp;
503 unsigned short status;
504 struct sk_buff *skb;
505 ushort pkt_len;
506 __u8 *data;
508 #ifdef CONFIG_M532x
509 flush_cache_all();
510 #endif
512 spin_lock_irq(&fep->hw_lock);
514 /* First, grab all of the stats for the incoming packet.
515 * These get messed up if we get called due to a busy condition.
517 bdp = fep->cur_rx;
519 while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {
521 /* Since we have allocated space to hold a complete frame,
522 * the last indicator should be set.
524 if ((status & BD_ENET_RX_LAST) == 0)
525 printk("FEC ENET: rcv is not +last\n");
527 if (!fep->opened)
528 goto rx_processing_done;
530 /* Check for errors. */
531 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
532 BD_ENET_RX_CR | BD_ENET_RX_OV)) {
533 dev->stats.rx_errors++;
534 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) {
535 /* Frame too long or too short. */
536 dev->stats.rx_length_errors++;
538 if (status & BD_ENET_RX_NO) /* Frame alignment */
539 dev->stats.rx_frame_errors++;
540 if (status & BD_ENET_RX_CR) /* CRC Error */
541 dev->stats.rx_crc_errors++;
542 if (status & BD_ENET_RX_OV) /* FIFO overrun */
543 dev->stats.rx_fifo_errors++;
546 /* Report late collisions as a frame error.
547 * On this error, the BD is closed, but we don't know what we
548 * have in the buffer. So, just drop this frame on the floor.
550 if (status & BD_ENET_RX_CL) {
551 dev->stats.rx_errors++;
552 dev->stats.rx_frame_errors++;
553 goto rx_processing_done;
556 /* Process the incoming frame. */
557 dev->stats.rx_packets++;
558 pkt_len = bdp->cbd_datlen;
559 dev->stats.rx_bytes += pkt_len;
560 data = (__u8*)__va(bdp->cbd_bufaddr);
562 dma_unmap_single(NULL, bdp->cbd_bufaddr, bdp->cbd_datlen,
563 DMA_FROM_DEVICE);
565 /* This does 16 byte alignment, exactly what we need.
566 * The packet length includes FCS, but we don't want to
567 * include that when passing upstream as it messes up
568 * bridging applications.
570 skb = dev_alloc_skb(pkt_len - 4 + NET_IP_ALIGN);
572 if (unlikely(!skb)) {
573 printk("%s: Memory squeeze, dropping packet.\n",
574 dev->name);
575 dev->stats.rx_dropped++;
576 } else {
577 skb_reserve(skb, NET_IP_ALIGN);
578 skb_put(skb, pkt_len - 4); /* Make room */
579 skb_copy_to_linear_data(skb, data, pkt_len - 4);
580 skb->protocol = eth_type_trans(skb, dev);
581 netif_rx(skb);
584 bdp->cbd_bufaddr = dma_map_single(NULL, data, bdp->cbd_datlen,
585 DMA_FROM_DEVICE);
586 rx_processing_done:
587 /* Clear the status flags for this buffer */
588 status &= ~BD_ENET_RX_STATS;
590 /* Mark the buffer empty */
591 status |= BD_ENET_RX_EMPTY;
592 bdp->cbd_sc = status;
594 /* Update BD pointer to next entry */
595 if (status & BD_ENET_RX_WRAP)
596 bdp = fep->rx_bd_base;
597 else
598 bdp++;
599 /* Doing this here will keep the FEC running while we process
600 * incoming frames. On a heavily loaded network, we should be
601 * able to keep up at the expense of system resources.
603 writel(0, fep->hwp + FEC_R_DES_ACTIVE);
605 fep->cur_rx = bdp;
607 spin_unlock_irq(&fep->hw_lock);
610 /* called from interrupt context */
611 static void
612 fec_enet_mii(struct net_device *dev)
614 struct fec_enet_private *fep;
615 mii_list_t *mip;
617 fep = netdev_priv(dev);
618 spin_lock_irq(&fep->mii_lock);
620 if ((mip = mii_head) == NULL) {
621 printk("MII and no head!\n");
622 goto unlock;
625 if (mip->mii_func != NULL)
626 (*(mip->mii_func))(readl(fep->hwp + FEC_MII_DATA), dev);
628 mii_head = mip->mii_next;
629 mip->mii_next = mii_free;
630 mii_free = mip;
632 if ((mip = mii_head) != NULL)
633 writel(mip->mii_regval, fep->hwp + FEC_MII_DATA);
635 unlock:
636 spin_unlock_irq(&fep->mii_lock);
639 static int
640 mii_queue(struct net_device *dev, int regval, void (*func)(uint, struct net_device *))
642 struct fec_enet_private *fep;
643 unsigned long flags;
644 mii_list_t *mip;
645 int retval;
647 /* Add PHY address to register command */
648 fep = netdev_priv(dev);
649 spin_lock_irqsave(&fep->mii_lock, flags);
651 regval |= fep->phy_addr << 23;
652 retval = 0;
654 if ((mip = mii_free) != NULL) {
655 mii_free = mip->mii_next;
656 mip->mii_regval = regval;
657 mip->mii_func = func;
658 mip->mii_next = NULL;
659 if (mii_head) {
660 mii_tail->mii_next = mip;
661 mii_tail = mip;
662 } else {
663 mii_head = mii_tail = mip;
664 writel(regval, fep->hwp + FEC_MII_DATA);
666 } else {
667 retval = 1;
670 spin_unlock_irqrestore(&fep->mii_lock, flags);
671 return retval;
674 static void mii_do_cmd(struct net_device *dev, const phy_cmd_t *c)
676 if(!c)
677 return;
679 for (; c->mii_data != mk_mii_end; c++)
680 mii_queue(dev, c->mii_data, c->funct);
683 static void mii_parse_sr(uint mii_reg, struct net_device *dev)
685 struct fec_enet_private *fep = netdev_priv(dev);
686 volatile uint *s = &(fep->phy_status);
687 uint status;
689 status = *s & ~(PHY_STAT_LINK | PHY_STAT_FAULT | PHY_STAT_ANC);
691 if (mii_reg & 0x0004)
692 status |= PHY_STAT_LINK;
693 if (mii_reg & 0x0010)
694 status |= PHY_STAT_FAULT;
695 if (mii_reg & 0x0020)
696 status |= PHY_STAT_ANC;
697 *s = status;
700 static void mii_parse_cr(uint mii_reg, struct net_device *dev)
702 struct fec_enet_private *fep = netdev_priv(dev);
703 volatile uint *s = &(fep->phy_status);
704 uint status;
706 status = *s & ~(PHY_CONF_ANE | PHY_CONF_LOOP);
708 if (mii_reg & 0x1000)
709 status |= PHY_CONF_ANE;
710 if (mii_reg & 0x4000)
711 status |= PHY_CONF_LOOP;
712 *s = status;
715 static void mii_parse_anar(uint mii_reg, struct net_device *dev)
717 struct fec_enet_private *fep = netdev_priv(dev);
718 volatile uint *s = &(fep->phy_status);
719 uint status;
721 status = *s & ~(PHY_CONF_SPMASK);
723 if (mii_reg & 0x0020)
724 status |= PHY_CONF_10HDX;
725 if (mii_reg & 0x0040)
726 status |= PHY_CONF_10FDX;
727 if (mii_reg & 0x0080)
728 status |= PHY_CONF_100HDX;
729 if (mii_reg & 0x00100)
730 status |= PHY_CONF_100FDX;
731 *s = status;
734 /* ------------------------------------------------------------------------- */
735 /* The Level one LXT970 is used by many boards */
737 #define MII_LXT970_MIRROR 16 /* Mirror register */
738 #define MII_LXT970_IER 17 /* Interrupt Enable Register */
739 #define MII_LXT970_ISR 18 /* Interrupt Status Register */
740 #define MII_LXT970_CONFIG 19 /* Configuration Register */
741 #define MII_LXT970_CSR 20 /* Chip Status Register */
743 static void mii_parse_lxt970_csr(uint mii_reg, struct net_device *dev)
745 struct fec_enet_private *fep = netdev_priv(dev);
746 volatile uint *s = &(fep->phy_status);
747 uint status;
749 status = *s & ~(PHY_STAT_SPMASK);
750 if (mii_reg & 0x0800) {
751 if (mii_reg & 0x1000)
752 status |= PHY_STAT_100FDX;
753 else
754 status |= PHY_STAT_100HDX;
755 } else {
756 if (mii_reg & 0x1000)
757 status |= PHY_STAT_10FDX;
758 else
759 status |= PHY_STAT_10HDX;
761 *s = status;
764 static phy_cmd_t const phy_cmd_lxt970_config[] = {
765 { mk_mii_read(MII_REG_CR), mii_parse_cr },
766 { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
767 { mk_mii_end, }
769 static phy_cmd_t const phy_cmd_lxt970_startup[] = { /* enable interrupts */
770 { mk_mii_write(MII_LXT970_IER, 0x0002), NULL },
771 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
772 { mk_mii_end, }
774 static phy_cmd_t const phy_cmd_lxt970_ack_int[] = {
775 /* read SR and ISR to acknowledge */
776 { mk_mii_read(MII_REG_SR), mii_parse_sr },
777 { mk_mii_read(MII_LXT970_ISR), NULL },
779 /* find out the current status */
780 { mk_mii_read(MII_LXT970_CSR), mii_parse_lxt970_csr },
781 { mk_mii_end, }
783 static phy_cmd_t const phy_cmd_lxt970_shutdown[] = { /* disable interrupts */
784 { mk_mii_write(MII_LXT970_IER, 0x0000), NULL },
785 { mk_mii_end, }
787 static phy_info_t const phy_info_lxt970 = {
788 .id = 0x07810000,
789 .name = "LXT970",
790 .config = phy_cmd_lxt970_config,
791 .startup = phy_cmd_lxt970_startup,
792 .ack_int = phy_cmd_lxt970_ack_int,
793 .shutdown = phy_cmd_lxt970_shutdown
796 /* ------------------------------------------------------------------------- */
797 /* The Level one LXT971 is used on some of my custom boards */
799 /* register definitions for the 971 */
801 #define MII_LXT971_PCR 16 /* Port Control Register */
802 #define MII_LXT971_SR2 17 /* Status Register 2 */
803 #define MII_LXT971_IER 18 /* Interrupt Enable Register */
804 #define MII_LXT971_ISR 19 /* Interrupt Status Register */
805 #define MII_LXT971_LCR 20 /* LED Control Register */
806 #define MII_LXT971_TCR 30 /* Transmit Control Register */
809 * I had some nice ideas of running the MDIO faster...
810 * The 971 should support 8MHz and I tried it, but things acted really
811 * weird, so 2.5 MHz ought to be enough for anyone...
814 static void mii_parse_lxt971_sr2(uint mii_reg, struct net_device *dev)
816 struct fec_enet_private *fep = netdev_priv(dev);
817 volatile uint *s = &(fep->phy_status);
818 uint status;
820 status = *s & ~(PHY_STAT_SPMASK | PHY_STAT_LINK | PHY_STAT_ANC);
822 if (mii_reg & 0x0400) {
823 fep->link = 1;
824 status |= PHY_STAT_LINK;
825 } else {
826 fep->link = 0;
828 if (mii_reg & 0x0080)
829 status |= PHY_STAT_ANC;
830 if (mii_reg & 0x4000) {
831 if (mii_reg & 0x0200)
832 status |= PHY_STAT_100FDX;
833 else
834 status |= PHY_STAT_100HDX;
835 } else {
836 if (mii_reg & 0x0200)
837 status |= PHY_STAT_10FDX;
838 else
839 status |= PHY_STAT_10HDX;
841 if (mii_reg & 0x0008)
842 status |= PHY_STAT_FAULT;
844 *s = status;
847 static phy_cmd_t const phy_cmd_lxt971_config[] = {
848 /* limit to 10MBit because my prototype board
849 * doesn't work with 100. */
850 { mk_mii_read(MII_REG_CR), mii_parse_cr },
851 { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
852 { mk_mii_read(MII_LXT971_SR2), mii_parse_lxt971_sr2 },
853 { mk_mii_end, }
855 static phy_cmd_t const phy_cmd_lxt971_startup[] = { /* enable interrupts */
856 { mk_mii_write(MII_LXT971_IER, 0x00f2), NULL },
857 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
858 { mk_mii_write(MII_LXT971_LCR, 0xd422), NULL }, /* LED config */
859 /* Somehow does the 971 tell me that the link is down
860 * the first read after power-up.
861 * read here to get a valid value in ack_int */
862 { mk_mii_read(MII_REG_SR), mii_parse_sr },
863 { mk_mii_end, }
865 static phy_cmd_t const phy_cmd_lxt971_ack_int[] = {
866 /* acknowledge the int before reading status ! */
867 { mk_mii_read(MII_LXT971_ISR), NULL },
868 /* find out the current status */
869 { mk_mii_read(MII_REG_SR), mii_parse_sr },
870 { mk_mii_read(MII_LXT971_SR2), mii_parse_lxt971_sr2 },
871 { mk_mii_end, }
873 static phy_cmd_t const phy_cmd_lxt971_shutdown[] = { /* disable interrupts */
874 { mk_mii_write(MII_LXT971_IER, 0x0000), NULL },
875 { mk_mii_end, }
877 static phy_info_t const phy_info_lxt971 = {
878 .id = 0x0001378e,
879 .name = "LXT971",
880 .config = phy_cmd_lxt971_config,
881 .startup = phy_cmd_lxt971_startup,
882 .ack_int = phy_cmd_lxt971_ack_int,
883 .shutdown = phy_cmd_lxt971_shutdown
886 /* ------------------------------------------------------------------------- */
887 /* The Quality Semiconductor QS6612 is used on the RPX CLLF */
889 /* register definitions */
891 #define MII_QS6612_MCR 17 /* Mode Control Register */
892 #define MII_QS6612_FTR 27 /* Factory Test Register */
893 #define MII_QS6612_MCO 28 /* Misc. Control Register */
894 #define MII_QS6612_ISR 29 /* Interrupt Source Register */
895 #define MII_QS6612_IMR 30 /* Interrupt Mask Register */
896 #define MII_QS6612_PCR 31 /* 100BaseTx PHY Control Reg. */
898 static void mii_parse_qs6612_pcr(uint mii_reg, struct net_device *dev)
900 struct fec_enet_private *fep = netdev_priv(dev);
901 volatile uint *s = &(fep->phy_status);
902 uint status;
904 status = *s & ~(PHY_STAT_SPMASK);
906 switch((mii_reg >> 2) & 7) {
907 case 1: status |= PHY_STAT_10HDX; break;
908 case 2: status |= PHY_STAT_100HDX; break;
909 case 5: status |= PHY_STAT_10FDX; break;
910 case 6: status |= PHY_STAT_100FDX; break;
913 *s = status;
916 static phy_cmd_t const phy_cmd_qs6612_config[] = {
917 /* The PHY powers up isolated on the RPX,
918 * so send a command to allow operation.
920 { mk_mii_write(MII_QS6612_PCR, 0x0dc0), NULL },
922 /* parse cr and anar to get some info */
923 { mk_mii_read(MII_REG_CR), mii_parse_cr },
924 { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
925 { mk_mii_end, }
927 static phy_cmd_t const phy_cmd_qs6612_startup[] = { /* enable interrupts */
928 { mk_mii_write(MII_QS6612_IMR, 0x003a), NULL },
929 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
930 { mk_mii_end, }
932 static phy_cmd_t const phy_cmd_qs6612_ack_int[] = {
933 /* we need to read ISR, SR and ANER to acknowledge */
934 { mk_mii_read(MII_QS6612_ISR), NULL },
935 { mk_mii_read(MII_REG_SR), mii_parse_sr },
936 { mk_mii_read(MII_REG_ANER), NULL },
938 /* read pcr to get info */
939 { mk_mii_read(MII_QS6612_PCR), mii_parse_qs6612_pcr },
940 { mk_mii_end, }
942 static phy_cmd_t const phy_cmd_qs6612_shutdown[] = { /* disable interrupts */
943 { mk_mii_write(MII_QS6612_IMR, 0x0000), NULL },
944 { mk_mii_end, }
946 static phy_info_t const phy_info_qs6612 = {
947 .id = 0x00181440,
948 .name = "QS6612",
949 .config = phy_cmd_qs6612_config,
950 .startup = phy_cmd_qs6612_startup,
951 .ack_int = phy_cmd_qs6612_ack_int,
952 .shutdown = phy_cmd_qs6612_shutdown
955 /* ------------------------------------------------------------------------- */
956 /* AMD AM79C874 phy */
958 /* register definitions for the 874 */
960 #define MII_AM79C874_MFR 16 /* Miscellaneous Feature Register */
961 #define MII_AM79C874_ICSR 17 /* Interrupt/Status Register */
962 #define MII_AM79C874_DR 18 /* Diagnostic Register */
963 #define MII_AM79C874_PMLR 19 /* Power and Loopback Register */
964 #define MII_AM79C874_MCR 21 /* ModeControl Register */
965 #define MII_AM79C874_DC 23 /* Disconnect Counter */
966 #define MII_AM79C874_REC 24 /* Recieve Error Counter */
968 static void mii_parse_am79c874_dr(uint mii_reg, struct net_device *dev)
970 struct fec_enet_private *fep = netdev_priv(dev);
971 volatile uint *s = &(fep->phy_status);
972 uint status;
974 status = *s & ~(PHY_STAT_SPMASK | PHY_STAT_ANC);
976 if (mii_reg & 0x0080)
977 status |= PHY_STAT_ANC;
978 if (mii_reg & 0x0400)
979 status |= ((mii_reg & 0x0800) ? PHY_STAT_100FDX : PHY_STAT_100HDX);
980 else
981 status |= ((mii_reg & 0x0800) ? PHY_STAT_10FDX : PHY_STAT_10HDX);
983 *s = status;
986 static phy_cmd_t const phy_cmd_am79c874_config[] = {
987 { mk_mii_read(MII_REG_CR), mii_parse_cr },
988 { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
989 { mk_mii_read(MII_AM79C874_DR), mii_parse_am79c874_dr },
990 { mk_mii_end, }
992 static phy_cmd_t const phy_cmd_am79c874_startup[] = { /* enable interrupts */
993 { mk_mii_write(MII_AM79C874_ICSR, 0xff00), NULL },
994 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
995 { mk_mii_read(MII_REG_SR), mii_parse_sr },
996 { mk_mii_end, }
998 static phy_cmd_t const phy_cmd_am79c874_ack_int[] = {
999 /* find out the current status */
1000 { mk_mii_read(MII_REG_SR), mii_parse_sr },
1001 { mk_mii_read(MII_AM79C874_DR), mii_parse_am79c874_dr },
1002 /* we only need to read ISR to acknowledge */
1003 { mk_mii_read(MII_AM79C874_ICSR), NULL },
1004 { mk_mii_end, }
1006 static phy_cmd_t const phy_cmd_am79c874_shutdown[] = { /* disable interrupts */
1007 { mk_mii_write(MII_AM79C874_ICSR, 0x0000), NULL },
1008 { mk_mii_end, }
1010 static phy_info_t const phy_info_am79c874 = {
1011 .id = 0x00022561,
1012 .name = "AM79C874",
1013 .config = phy_cmd_am79c874_config,
1014 .startup = phy_cmd_am79c874_startup,
1015 .ack_int = phy_cmd_am79c874_ack_int,
1016 .shutdown = phy_cmd_am79c874_shutdown
1020 /* ------------------------------------------------------------------------- */
1021 /* Kendin KS8721BL phy */
1023 /* register definitions for the 8721 */
1025 #define MII_KS8721BL_RXERCR 21
1026 #define MII_KS8721BL_ICSR 27
1027 #define MII_KS8721BL_PHYCR 31
1029 static phy_cmd_t const phy_cmd_ks8721bl_config[] = {
1030 { mk_mii_read(MII_REG_CR), mii_parse_cr },
1031 { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
1032 { mk_mii_end, }
1034 static phy_cmd_t const phy_cmd_ks8721bl_startup[] = { /* enable interrupts */
1035 { mk_mii_write(MII_KS8721BL_ICSR, 0xff00), NULL },
1036 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
1037 { mk_mii_read(MII_REG_SR), mii_parse_sr },
1038 { mk_mii_end, }
1040 static phy_cmd_t const phy_cmd_ks8721bl_ack_int[] = {
1041 /* find out the current status */
1042 { mk_mii_read(MII_REG_SR), mii_parse_sr },
1043 /* we only need to read ISR to acknowledge */
1044 { mk_mii_read(MII_KS8721BL_ICSR), NULL },
1045 { mk_mii_end, }
1047 static phy_cmd_t const phy_cmd_ks8721bl_shutdown[] = { /* disable interrupts */
1048 { mk_mii_write(MII_KS8721BL_ICSR, 0x0000), NULL },
1049 { mk_mii_end, }
1051 static phy_info_t const phy_info_ks8721bl = {
1052 .id = 0x00022161,
1053 .name = "KS8721BL",
1054 .config = phy_cmd_ks8721bl_config,
1055 .startup = phy_cmd_ks8721bl_startup,
1056 .ack_int = phy_cmd_ks8721bl_ack_int,
1057 .shutdown = phy_cmd_ks8721bl_shutdown
1060 /* ------------------------------------------------------------------------- */
1061 /* register definitions for the DP83848 */
1063 #define MII_DP8384X_PHYSTST 16 /* PHY Status Register */
1065 static void mii_parse_dp8384x_sr2(uint mii_reg, struct net_device *dev)
1067 struct fec_enet_private *fep = netdev_priv(dev);
1068 volatile uint *s = &(fep->phy_status);
1070 *s &= ~(PHY_STAT_SPMASK | PHY_STAT_LINK | PHY_STAT_ANC);
1072 /* Link up */
1073 if (mii_reg & 0x0001) {
1074 fep->link = 1;
1075 *s |= PHY_STAT_LINK;
1076 } else
1077 fep->link = 0;
1078 /* Status of link */
1079 if (mii_reg & 0x0010) /* Autonegotioation complete */
1080 *s |= PHY_STAT_ANC;
1081 if (mii_reg & 0x0002) { /* 10MBps? */
1082 if (mii_reg & 0x0004) /* Full Duplex? */
1083 *s |= PHY_STAT_10FDX;
1084 else
1085 *s |= PHY_STAT_10HDX;
1086 } else { /* 100 Mbps? */
1087 if (mii_reg & 0x0004) /* Full Duplex? */
1088 *s |= PHY_STAT_100FDX;
1089 else
1090 *s |= PHY_STAT_100HDX;
1092 if (mii_reg & 0x0008)
1093 *s |= PHY_STAT_FAULT;
1096 static phy_info_t phy_info_dp83848= {
1097 0x020005c9,
1098 "DP83848",
1100 (const phy_cmd_t []) { /* config */
1101 { mk_mii_read(MII_REG_CR), mii_parse_cr },
1102 { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
1103 { mk_mii_read(MII_DP8384X_PHYSTST), mii_parse_dp8384x_sr2 },
1104 { mk_mii_end, }
1106 (const phy_cmd_t []) { /* startup - enable interrupts */
1107 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
1108 { mk_mii_read(MII_REG_SR), mii_parse_sr },
1109 { mk_mii_end, }
1111 (const phy_cmd_t []) { /* ack_int - never happens, no interrupt */
1112 { mk_mii_end, }
1114 (const phy_cmd_t []) { /* shutdown */
1115 { mk_mii_end, }
1119 /* ------------------------------------------------------------------------- */
1121 static phy_info_t const * const phy_info[] = {
1122 &phy_info_lxt970,
1123 &phy_info_lxt971,
1124 &phy_info_qs6612,
1125 &phy_info_am79c874,
1126 &phy_info_ks8721bl,
1127 &phy_info_dp83848,
1128 NULL
1131 /* ------------------------------------------------------------------------- */
1132 #ifdef HAVE_mii_link_interrupt
1133 static irqreturn_t
1134 mii_link_interrupt(int irq, void * dev_id);
1137 * This is specific to the MII interrupt setup of the M5272EVB.
1139 static void __inline__ fec_request_mii_intr(struct net_device *dev)
1141 if (request_irq(66, mii_link_interrupt, IRQF_DISABLED, "fec(MII)", dev) != 0)
1142 printk("FEC: Could not allocate fec(MII) IRQ(66)!\n");
1145 static void __inline__ fec_disable_phy_intr(void)
1147 volatile unsigned long *icrp;
1148 icrp = (volatile unsigned long *) (MCF_MBAR + MCFSIM_ICR1);
1149 *icrp = 0x08000000;
1152 static void __inline__ fec_phy_ack_intr(void)
1154 volatile unsigned long *icrp;
1155 /* Acknowledge the interrupt */
1156 icrp = (volatile unsigned long *) (MCF_MBAR + MCFSIM_ICR1);
1157 *icrp = 0x0d000000;
1159 #endif
1161 #ifdef CONFIG_M5272
1162 static void __inline__ fec_get_mac(struct net_device *dev)
1164 struct fec_enet_private *fep = netdev_priv(dev);
1165 unsigned char *iap, tmpaddr[ETH_ALEN];
1167 if (FEC_FLASHMAC) {
1169 * Get MAC address from FLASH.
1170 * If it is all 1's or 0's, use the default.
1172 iap = (unsigned char *)FEC_FLASHMAC;
1173 if ((iap[0] == 0) && (iap[1] == 0) && (iap[2] == 0) &&
1174 (iap[3] == 0) && (iap[4] == 0) && (iap[5] == 0))
1175 iap = fec_mac_default;
1176 if ((iap[0] == 0xff) && (iap[1] == 0xff) && (iap[2] == 0xff) &&
1177 (iap[3] == 0xff) && (iap[4] == 0xff) && (iap[5] == 0xff))
1178 iap = fec_mac_default;
1179 } else {
1180 *((unsigned long *) &tmpaddr[0]) = readl(fep->hwp + FEC_ADDR_LOW);
1181 *((unsigned short *) &tmpaddr[4]) = (readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
1182 iap = &tmpaddr[0];
1185 memcpy(dev->dev_addr, iap, ETH_ALEN);
1187 /* Adjust MAC if using default MAC address */
1188 if (iap == fec_mac_default)
1189 dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index;
1191 #endif
1193 /* ------------------------------------------------------------------------- */
1195 static void mii_display_status(struct net_device *dev)
1197 struct fec_enet_private *fep = netdev_priv(dev);
1198 volatile uint *s = &(fep->phy_status);
1200 if (!fep->link && !fep->old_link) {
1201 /* Link is still down - don't print anything */
1202 return;
1205 printk("%s: status: ", dev->name);
1207 if (!fep->link) {
1208 printk("link down");
1209 } else {
1210 printk("link up");
1212 switch(*s & PHY_STAT_SPMASK) {
1213 case PHY_STAT_100FDX: printk(", 100MBit Full Duplex"); break;
1214 case PHY_STAT_100HDX: printk(", 100MBit Half Duplex"); break;
1215 case PHY_STAT_10FDX: printk(", 10MBit Full Duplex"); break;
1216 case PHY_STAT_10HDX: printk(", 10MBit Half Duplex"); break;
1217 default:
1218 printk(", Unknown speed/duplex");
1221 if (*s & PHY_STAT_ANC)
1222 printk(", auto-negotiation complete");
1225 if (*s & PHY_STAT_FAULT)
1226 printk(", remote fault");
1228 printk(".\n");
1231 static void mii_display_config(struct work_struct *work)
1233 struct fec_enet_private *fep = container_of(work, struct fec_enet_private, phy_task);
1234 struct net_device *dev = fep->netdev;
1235 uint status = fep->phy_status;
1238 ** When we get here, phy_task is already removed from
1239 ** the workqueue. It is thus safe to allow to reuse it.
1241 fep->mii_phy_task_queued = 0;
1242 printk("%s: config: auto-negotiation ", dev->name);
1244 if (status & PHY_CONF_ANE)
1245 printk("on");
1246 else
1247 printk("off");
1249 if (status & PHY_CONF_100FDX)
1250 printk(", 100FDX");
1251 if (status & PHY_CONF_100HDX)
1252 printk(", 100HDX");
1253 if (status & PHY_CONF_10FDX)
1254 printk(", 10FDX");
1255 if (status & PHY_CONF_10HDX)
1256 printk(", 10HDX");
1257 if (!(status & PHY_CONF_SPMASK))
1258 printk(", No speed/duplex selected?");
1260 if (status & PHY_CONF_LOOP)
1261 printk(", loopback enabled");
1263 printk(".\n");
1265 fep->sequence_done = 1;
1268 static void mii_relink(struct work_struct *work)
1270 struct fec_enet_private *fep = container_of(work, struct fec_enet_private, phy_task);
1271 struct net_device *dev = fep->netdev;
1272 int duplex;
1275 ** When we get here, phy_task is already removed from
1276 ** the workqueue. It is thus safe to allow to reuse it.
1278 fep->mii_phy_task_queued = 0;
1279 fep->link = (fep->phy_status & PHY_STAT_LINK) ? 1 : 0;
1280 mii_display_status(dev);
1281 fep->old_link = fep->link;
1283 if (fep->link) {
1284 duplex = 0;
1285 if (fep->phy_status
1286 & (PHY_STAT_100FDX | PHY_STAT_10FDX))
1287 duplex = 1;
1288 fec_restart(dev, duplex);
1289 } else
1290 fec_stop(dev);
1293 /* mii_queue_relink is called in interrupt context from mii_link_interrupt */
1294 static void mii_queue_relink(uint mii_reg, struct net_device *dev)
1296 struct fec_enet_private *fep = netdev_priv(dev);
1299 * We cannot queue phy_task twice in the workqueue. It
1300 * would cause an endless loop in the workqueue.
1301 * Fortunately, if the last mii_relink entry has not yet been
1302 * executed now, it will do the job for the current interrupt,
1303 * which is just what we want.
1305 if (fep->mii_phy_task_queued)
1306 return;
1308 fep->mii_phy_task_queued = 1;
1309 INIT_WORK(&fep->phy_task, mii_relink);
1310 schedule_work(&fep->phy_task);
1313 /* mii_queue_config is called in interrupt context from fec_enet_mii */
1314 static void mii_queue_config(uint mii_reg, struct net_device *dev)
1316 struct fec_enet_private *fep = netdev_priv(dev);
1318 if (fep->mii_phy_task_queued)
1319 return;
1321 fep->mii_phy_task_queued = 1;
1322 INIT_WORK(&fep->phy_task, mii_display_config);
1323 schedule_work(&fep->phy_task);
1326 phy_cmd_t const phy_cmd_relink[] = {
1327 { mk_mii_read(MII_REG_CR), mii_queue_relink },
1328 { mk_mii_end, }
1330 phy_cmd_t const phy_cmd_config[] = {
1331 { mk_mii_read(MII_REG_CR), mii_queue_config },
1332 { mk_mii_end, }
1335 /* Read remainder of PHY ID. */
1336 static void
1337 mii_discover_phy3(uint mii_reg, struct net_device *dev)
1339 struct fec_enet_private *fep;
1340 int i;
1342 fep = netdev_priv(dev);
1343 fep->phy_id |= (mii_reg & 0xffff);
1344 printk("fec: PHY @ 0x%x, ID 0x%08x", fep->phy_addr, fep->phy_id);
1346 for(i = 0; phy_info[i]; i++) {
1347 if(phy_info[i]->id == (fep->phy_id >> 4))
1348 break;
1351 if (phy_info[i])
1352 printk(" -- %s\n", phy_info[i]->name);
1353 else
1354 printk(" -- unknown PHY!\n");
1356 fep->phy = phy_info[i];
1357 fep->phy_id_done = 1;
1360 /* Scan all of the MII PHY addresses looking for someone to respond
1361 * with a valid ID. This usually happens quickly.
1363 static void
1364 mii_discover_phy(uint mii_reg, struct net_device *dev)
1366 struct fec_enet_private *fep;
1367 uint phytype;
1369 fep = netdev_priv(dev);
1371 if (fep->phy_addr < 32) {
1372 if ((phytype = (mii_reg & 0xffff)) != 0xffff && phytype != 0) {
1374 /* Got first part of ID, now get remainder */
1375 fep->phy_id = phytype << 16;
1376 mii_queue(dev, mk_mii_read(MII_REG_PHYIR2),
1377 mii_discover_phy3);
1378 } else {
1379 fep->phy_addr++;
1380 mii_queue(dev, mk_mii_read(MII_REG_PHYIR1),
1381 mii_discover_phy);
1383 } else {
1384 printk("FEC: No PHY device found.\n");
1385 /* Disable external MII interface */
1386 writel(0, fep->hwp + FEC_MII_SPEED);
1387 fep->phy_speed = 0;
1388 #ifdef HAVE_mii_link_interrupt
1389 fec_disable_phy_intr();
1390 #endif
1394 /* This interrupt occurs when the PHY detects a link change */
1395 #ifdef HAVE_mii_link_interrupt
1396 static irqreturn_t
1397 mii_link_interrupt(int irq, void * dev_id)
1399 struct net_device *dev = dev_id;
1400 struct fec_enet_private *fep = netdev_priv(dev);
1402 fec_phy_ack_intr();
1404 mii_do_cmd(dev, fep->phy->ack_int);
1405 mii_do_cmd(dev, phy_cmd_relink); /* restart and display status */
1407 return IRQ_HANDLED;
1409 #endif
1411 static void fec_enet_free_buffers(struct net_device *dev)
1413 struct fec_enet_private *fep = netdev_priv(dev);
1414 int i;
1415 struct sk_buff *skb;
1416 struct bufdesc *bdp;
1418 bdp = fep->rx_bd_base;
1419 for (i = 0; i < RX_RING_SIZE; i++) {
1420 skb = fep->rx_skbuff[i];
1422 if (bdp->cbd_bufaddr)
1423 dma_unmap_single(&dev->dev, bdp->cbd_bufaddr,
1424 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
1425 if (skb)
1426 dev_kfree_skb(skb);
1427 bdp++;
1430 bdp = fep->tx_bd_base;
1431 for (i = 0; i < TX_RING_SIZE; i++)
1432 kfree(fep->tx_bounce[i]);
1435 static int fec_enet_alloc_buffers(struct net_device *dev)
1437 struct fec_enet_private *fep = netdev_priv(dev);
1438 int i;
1439 struct sk_buff *skb;
1440 struct bufdesc *bdp;
1442 bdp = fep->rx_bd_base;
1443 for (i = 0; i < RX_RING_SIZE; i++) {
1444 skb = dev_alloc_skb(FEC_ENET_RX_FRSIZE);
1445 if (!skb) {
1446 fec_enet_free_buffers(dev);
1447 return -ENOMEM;
1449 fep->rx_skbuff[i] = skb;
1451 bdp->cbd_bufaddr = dma_map_single(&dev->dev, skb->data,
1452 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
1453 bdp->cbd_sc = BD_ENET_RX_EMPTY;
1454 bdp++;
1457 /* Set the last buffer to wrap. */
1458 bdp--;
1459 bdp->cbd_sc |= BD_SC_WRAP;
1461 bdp = fep->tx_bd_base;
1462 for (i = 0; i < TX_RING_SIZE; i++) {
1463 fep->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL);
1465 bdp->cbd_sc = 0;
1466 bdp->cbd_bufaddr = 0;
1467 bdp++;
1470 /* Set the last buffer to wrap. */
1471 bdp--;
1472 bdp->cbd_sc |= BD_SC_WRAP;
1474 return 0;
1477 static int
1478 fec_enet_open(struct net_device *dev)
1480 struct fec_enet_private *fep = netdev_priv(dev);
1481 int ret;
1483 /* I should reset the ring buffers here, but I don't yet know
1484 * a simple way to do that.
1487 ret = fec_enet_alloc_buffers(dev);
1488 if (ret)
1489 return ret;
1491 fep->sequence_done = 0;
1492 fep->link = 0;
1494 fec_restart(dev, 1);
1496 if (fep->phy) {
1497 mii_do_cmd(dev, fep->phy->ack_int);
1498 mii_do_cmd(dev, fep->phy->config);
1499 mii_do_cmd(dev, phy_cmd_config); /* display configuration */
1501 /* Poll until the PHY tells us its configuration
1502 * (not link state).
1503 * Request is initiated by mii_do_cmd above, but answer
1504 * comes by interrupt.
1505 * This should take about 25 usec per register at 2.5 MHz,
1506 * and we read approximately 5 registers.
1508 while(!fep->sequence_done)
1509 schedule();
1511 mii_do_cmd(dev, fep->phy->startup);
1514 /* Set the initial link state to true. A lot of hardware
1515 * based on this device does not implement a PHY interrupt,
1516 * so we are never notified of link change.
1518 fep->link = 1;
1520 netif_start_queue(dev);
1521 fep->opened = 1;
1522 return 0;
1525 static int
1526 fec_enet_close(struct net_device *dev)
1528 struct fec_enet_private *fep = netdev_priv(dev);
1530 /* Don't know what to do yet. */
1531 fep->opened = 0;
1532 netif_stop_queue(dev);
1533 fec_stop(dev);
1535 fec_enet_free_buffers(dev);
1537 return 0;
1540 /* Set or clear the multicast filter for this adaptor.
1541 * Skeleton taken from sunlance driver.
1542 * The CPM Ethernet implementation allows Multicast as well as individual
1543 * MAC address filtering. Some of the drivers check to make sure it is
1544 * a group multicast address, and discard those that are not. I guess I
1545 * will do the same for now, but just remove the test if you want
1546 * individual filtering as well (do the upper net layers want or support
1547 * this kind of feature?).
1550 #define HASH_BITS 6 /* #bits in hash */
1551 #define CRC32_POLY 0xEDB88320
1553 static void set_multicast_list(struct net_device *dev)
1555 struct fec_enet_private *fep = netdev_priv(dev);
1556 struct dev_mc_list *dmi;
1557 unsigned int i, j, bit, data, crc, tmp;
1558 unsigned char hash;
1560 if (dev->flags & IFF_PROMISC) {
1561 tmp = readl(fep->hwp + FEC_R_CNTRL);
1562 tmp |= 0x8;
1563 writel(tmp, fep->hwp + FEC_R_CNTRL);
1564 return;
1567 tmp = readl(fep->hwp + FEC_R_CNTRL);
1568 tmp &= ~0x8;
1569 writel(tmp, fep->hwp + FEC_R_CNTRL);
1571 if (dev->flags & IFF_ALLMULTI) {
1572 /* Catch all multicast addresses, so set the
1573 * filter to all 1's
1575 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
1576 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
1578 return;
1581 /* Clear filter and add the addresses in hash register
1583 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
1584 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
1586 dmi = dev->mc_list;
1588 for (j = 0; j < dev->mc_count; j++, dmi = dmi->next) {
1589 /* Only support group multicast for now */
1590 if (!(dmi->dmi_addr[0] & 1))
1591 continue;
1593 /* calculate crc32 value of mac address */
1594 crc = 0xffffffff;
1596 for (i = 0; i < dmi->dmi_addrlen; i++) {
1597 data = dmi->dmi_addr[i];
1598 for (bit = 0; bit < 8; bit++, data >>= 1) {
1599 crc = (crc >> 1) ^
1600 (((crc ^ data) & 1) ? CRC32_POLY : 0);
1604 /* only upper 6 bits (HASH_BITS) are used
1605 * which point to specific bit in he hash registers
1607 hash = (crc >> (32 - HASH_BITS)) & 0x3f;
1609 if (hash > 31) {
1610 tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
1611 tmp |= 1 << (hash - 32);
1612 writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
1613 } else {
1614 tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_LOW);
1615 tmp |= 1 << hash;
1616 writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
1621 /* Set a MAC change in hardware. */
1622 static int
1623 fec_set_mac_address(struct net_device *dev, void *p)
1625 struct fec_enet_private *fep = netdev_priv(dev);
1626 struct sockaddr *addr = p;
1628 if (!is_valid_ether_addr(addr->sa_data))
1629 return -EADDRNOTAVAIL;
1631 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1633 writel(dev->dev_addr[3] | (dev->dev_addr[2] << 8) |
1634 (dev->dev_addr[1] << 16) | (dev->dev_addr[0] << 24),
1635 fep->hwp + FEC_ADDR_LOW);
1636 writel((dev->dev_addr[5] << 16) | (dev->dev_addr[4] << 24),
1637 fep + FEC_ADDR_HIGH);
1638 return 0;
1641 static const struct net_device_ops fec_netdev_ops = {
1642 .ndo_open = fec_enet_open,
1643 .ndo_stop = fec_enet_close,
1644 .ndo_start_xmit = fec_enet_start_xmit,
1645 .ndo_set_multicast_list = set_multicast_list,
1646 .ndo_change_mtu = eth_change_mtu,
1647 .ndo_validate_addr = eth_validate_addr,
1648 .ndo_tx_timeout = fec_timeout,
1649 .ndo_set_mac_address = fec_set_mac_address,
1653 * XXX: We need to clean up on failure exits here.
1655 * index is only used in legacy code
1657 int __init fec_enet_init(struct net_device *dev, int index)
1659 struct fec_enet_private *fep = netdev_priv(dev);
1660 struct bufdesc *cbd_base;
1661 int i;
1663 /* Allocate memory for buffer descriptors. */
1664 cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma,
1665 GFP_KERNEL);
1666 if (!cbd_base) {
1667 printk("FEC: allocate descriptor memory failed?\n");
1668 return -ENOMEM;
1671 spin_lock_init(&fep->hw_lock);
1672 spin_lock_init(&fep->mii_lock);
1674 fep->index = index;
1675 fep->hwp = (void __iomem *)dev->base_addr;
1676 fep->netdev = dev;
1678 /* Set the Ethernet address */
1679 #ifdef CONFIG_M5272
1680 fec_get_mac(dev);
1681 #else
1683 unsigned long l;
1684 l = readl(fep->hwp + FEC_ADDR_LOW);
1685 dev->dev_addr[0] = (unsigned char)((l & 0xFF000000) >> 24);
1686 dev->dev_addr[1] = (unsigned char)((l & 0x00FF0000) >> 16);
1687 dev->dev_addr[2] = (unsigned char)((l & 0x0000FF00) >> 8);
1688 dev->dev_addr[3] = (unsigned char)((l & 0x000000FF) >> 0);
1689 l = readl(fep->hwp + FEC_ADDR_HIGH);
1690 dev->dev_addr[4] = (unsigned char)((l & 0xFF000000) >> 24);
1691 dev->dev_addr[5] = (unsigned char)((l & 0x00FF0000) >> 16);
1693 #endif
1695 /* Set receive and transmit descriptor base. */
1696 fep->rx_bd_base = cbd_base;
1697 fep->tx_bd_base = cbd_base + RX_RING_SIZE;
1699 #ifdef HAVE_mii_link_interrupt
1700 fec_request_mii_intr(dev);
1701 #endif
1702 /* The FEC Ethernet specific entries in the device structure */
1703 dev->watchdog_timeo = TX_TIMEOUT;
1704 dev->netdev_ops = &fec_netdev_ops;
1706 for (i=0; i<NMII-1; i++)
1707 mii_cmds[i].mii_next = &mii_cmds[i+1];
1708 mii_free = mii_cmds;
1710 /* Set MII speed to 2.5 MHz */
1711 fep->phy_speed = ((((clk_get_rate(fep->clk) / 2 + 4999999)
1712 / 2500000) / 2) & 0x3F) << 1;
1713 fec_restart(dev, 0);
1715 /* Queue up command to detect the PHY and initialize the
1716 * remainder of the interface.
1718 fep->phy_id_done = 0;
1719 fep->phy_addr = 0;
1720 mii_queue(dev, mk_mii_read(MII_REG_PHYIR1), mii_discover_phy);
1722 return 0;
1725 /* This function is called to start or restart the FEC during a link
1726 * change. This only happens when switching between half and full
1727 * duplex.
1729 static void
1730 fec_restart(struct net_device *dev, int duplex)
1732 struct fec_enet_private *fep = netdev_priv(dev);
1733 struct bufdesc *bdp;
1734 int i;
1736 /* Whack a reset. We should wait for this. */
1737 writel(1, fep->hwp + FEC_ECNTRL);
1738 udelay(10);
1740 /* Clear any outstanding interrupt. */
1741 writel(0xffc00000, fep->hwp + FEC_IEVENT);
1743 /* Reset all multicast. */
1744 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
1745 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
1746 #ifndef CONFIG_M5272
1747 writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
1748 writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
1749 #endif
1751 /* Set maximum receive buffer size. */
1752 writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE);
1754 /* Set receive and transmit descriptor base. */
1755 writel(fep->bd_dma, fep->hwp + FEC_R_DES_START);
1756 writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) * RX_RING_SIZE,
1757 fep->hwp + FEC_X_DES_START);
1759 fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
1760 fep->cur_rx = fep->rx_bd_base;
1762 /* Reset SKB transmit buffers. */
1763 fep->skb_cur = fep->skb_dirty = 0;
1764 for (i = 0; i <= TX_RING_MOD_MASK; i++) {
1765 if (fep->tx_skbuff[i]) {
1766 dev_kfree_skb_any(fep->tx_skbuff[i]);
1767 fep->tx_skbuff[i] = NULL;
1771 /* Initialize the receive buffer descriptors. */
1772 bdp = fep->rx_bd_base;
1773 for (i = 0; i < RX_RING_SIZE; i++) {
1775 /* Initialize the BD for every fragment in the page. */
1776 bdp->cbd_sc = BD_ENET_RX_EMPTY;
1777 bdp++;
1780 /* Set the last buffer to wrap */
1781 bdp--;
1782 bdp->cbd_sc |= BD_SC_WRAP;
1784 /* ...and the same for transmit */
1785 bdp = fep->tx_bd_base;
1786 for (i = 0; i < TX_RING_SIZE; i++) {
1788 /* Initialize the BD for every fragment in the page. */
1789 bdp->cbd_sc = 0;
1790 bdp->cbd_bufaddr = 0;
1791 bdp++;
1794 /* Set the last buffer to wrap */
1795 bdp--;
1796 bdp->cbd_sc |= BD_SC_WRAP;
1798 /* Enable MII mode */
1799 if (duplex) {
1800 /* MII enable / FD enable */
1801 writel(OPT_FRAME_SIZE | 0x04, fep->hwp + FEC_R_CNTRL);
1802 writel(0x04, fep->hwp + FEC_X_CNTRL);
1803 } else {
1804 /* MII enable / No Rcv on Xmit */
1805 writel(OPT_FRAME_SIZE | 0x06, fep->hwp + FEC_R_CNTRL);
1806 writel(0x0, fep->hwp + FEC_X_CNTRL);
1808 fep->full_duplex = duplex;
1810 /* Set MII speed */
1811 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1813 /* And last, enable the transmit and receive processing */
1814 writel(2, fep->hwp + FEC_ECNTRL);
1815 writel(0, fep->hwp + FEC_R_DES_ACTIVE);
1817 /* Enable interrupts we wish to service */
1818 writel(FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII,
1819 fep->hwp + FEC_IMASK);
1822 static void
1823 fec_stop(struct net_device *dev)
1825 struct fec_enet_private *fep = netdev_priv(dev);
1827 /* We cannot expect a graceful transmit stop without link !!! */
1828 if (fep->link) {
1829 writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */
1830 udelay(10);
1831 if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA))
1832 printk("fec_stop : Graceful transmit stop did not complete !\n");
1835 /* Whack a reset. We should wait for this. */
1836 writel(1, fep->hwp + FEC_ECNTRL);
1837 udelay(10);
1839 /* Clear outstanding MII command interrupts. */
1840 writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT);
1842 writel(FEC_ENET_MII, fep->hwp + FEC_IMASK);
1843 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1846 static int __devinit
1847 fec_probe(struct platform_device *pdev)
1849 struct fec_enet_private *fep;
1850 struct net_device *ndev;
1851 int i, irq, ret = 0;
1852 struct resource *r;
1854 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1855 if (!r)
1856 return -ENXIO;
1858 r = request_mem_region(r->start, resource_size(r), pdev->name);
1859 if (!r)
1860 return -EBUSY;
1862 /* Init network device */
1863 ndev = alloc_etherdev(sizeof(struct fec_enet_private));
1864 if (!ndev)
1865 return -ENOMEM;
1867 SET_NETDEV_DEV(ndev, &pdev->dev);
1869 /* setup board info structure */
1870 fep = netdev_priv(ndev);
1871 memset(fep, 0, sizeof(*fep));
1873 ndev->base_addr = (unsigned long)ioremap(r->start, resource_size(r));
1875 if (!ndev->base_addr) {
1876 ret = -ENOMEM;
1877 goto failed_ioremap;
1880 platform_set_drvdata(pdev, ndev);
1882 /* This device has up to three irqs on some platforms */
1883 for (i = 0; i < 3; i++) {
1884 irq = platform_get_irq(pdev, i);
1885 if (i && irq < 0)
1886 break;
1887 ret = request_irq(irq, fec_enet_interrupt, IRQF_DISABLED, pdev->name, ndev);
1888 if (ret) {
1889 while (i >= 0) {
1890 irq = platform_get_irq(pdev, i);
1891 free_irq(irq, ndev);
1892 i--;
1894 goto failed_irq;
1898 fep->clk = clk_get(&pdev->dev, "fec_clk");
1899 if (IS_ERR(fep->clk)) {
1900 ret = PTR_ERR(fep->clk);
1901 goto failed_clk;
1903 clk_enable(fep->clk);
1905 ret = fec_enet_init(ndev, 0);
1906 if (ret)
1907 goto failed_init;
1909 ret = register_netdev(ndev);
1910 if (ret)
1911 goto failed_register;
1913 return 0;
1915 failed_register:
1916 failed_init:
1917 clk_disable(fep->clk);
1918 clk_put(fep->clk);
1919 failed_clk:
1920 for (i = 0; i < 3; i++) {
1921 irq = platform_get_irq(pdev, i);
1922 if (irq > 0)
1923 free_irq(irq, ndev);
1925 failed_irq:
1926 iounmap((void __iomem *)ndev->base_addr);
1927 failed_ioremap:
1928 free_netdev(ndev);
1930 return ret;
1933 static int __devexit
1934 fec_drv_remove(struct platform_device *pdev)
1936 struct net_device *ndev = platform_get_drvdata(pdev);
1937 struct fec_enet_private *fep = netdev_priv(ndev);
1939 platform_set_drvdata(pdev, NULL);
1941 fec_stop(ndev);
1942 clk_disable(fep->clk);
1943 clk_put(fep->clk);
1944 iounmap((void __iomem *)ndev->base_addr);
1945 unregister_netdev(ndev);
1946 free_netdev(ndev);
1947 return 0;
1950 static int
1951 fec_suspend(struct platform_device *dev, pm_message_t state)
1953 struct net_device *ndev = platform_get_drvdata(dev);
1954 struct fec_enet_private *fep;
1956 if (ndev) {
1957 fep = netdev_priv(ndev);
1958 if (netif_running(ndev)) {
1959 netif_device_detach(ndev);
1960 fec_stop(ndev);
1963 return 0;
1966 static int
1967 fec_resume(struct platform_device *dev)
1969 struct net_device *ndev = platform_get_drvdata(dev);
1971 if (ndev) {
1972 if (netif_running(ndev)) {
1973 fec_enet_init(ndev, 0);
1974 netif_device_attach(ndev);
1977 return 0;
1980 static struct platform_driver fec_driver = {
1981 .driver = {
1982 .name = "fec",
1983 .owner = THIS_MODULE,
1985 .probe = fec_probe,
1986 .remove = __devexit_p(fec_drv_remove),
1987 .suspend = fec_suspend,
1988 .resume = fec_resume,
1991 static int __init
1992 fec_enet_module_init(void)
1994 printk(KERN_INFO "FEC Ethernet Driver\n");
1996 return platform_driver_register(&fec_driver);
1999 static void __exit
2000 fec_enet_cleanup(void)
2002 platform_driver_unregister(&fec_driver);
2005 module_exit(fec_enet_cleanup);
2006 module_init(fec_enet_module_init);
2008 MODULE_LICENSE("GPL");