Merge branch 'devel' of master.kernel.org:/home/rmk/linux-2.6-mmc
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / net / gianfar.c
blobebbbd6ca6204e28ba605cd980d8b6d5e1a408047
1 /*
2 * drivers/net/gianfar.c
4 * Gianfar Ethernet Driver
5 * This driver is designed for the non-CPM ethernet controllers
6 * on the 85xx and 83xx family of integrated processors
7 * Based on 8260_io/fcc_enet.c
9 * Author: Andy Fleming
10 * Maintainer: Kumar Gala
12 * Copyright (c) 2002-2004 Freescale Semiconductor, Inc.
14 * This program is free software; you can redistribute it and/or modify it
15 * under the terms of the GNU General Public License as published by the
16 * Free Software Foundation; either version 2 of the License, or (at your
17 * option) any later version.
19 * Gianfar: AKA Lambda Draconis, "Dragon"
20 * RA 11 31 24.2
21 * Dec +69 19 52
22 * V 3.84
23 * B-V +1.62
25 * Theory of operation
27 * The driver is initialized through platform_device. Structures which
28 * define the configuration needed by the board are defined in a
29 * board structure in arch/ppc/platforms (though I do not
30 * discount the possibility that other architectures could one
31 * day be supported.
33 * The Gianfar Ethernet Controller uses a ring of buffer
34 * descriptors. The beginning is indicated by a register
35 * pointing to the physical address of the start of the ring.
36 * The end is determined by a "wrap" bit being set in the
37 * last descriptor of the ring.
39 * When a packet is received, the RXF bit in the
40 * IEVENT register is set, triggering an interrupt when the
41 * corresponding bit in the IMASK register is also set (if
42 * interrupt coalescing is active, then the interrupt may not
43 * happen immediately, but will wait until either a set number
44 * of frames or amount of time have passed). In NAPI, the
45 * interrupt handler will signal there is work to be done, and
46 * exit. Without NAPI, the packet(s) will be handled
47 * immediately. Both methods will start at the last known empty
48 * descriptor, and process every subsequent descriptor until there
49 * are none left with data (NAPI will stop after a set number of
50 * packets to give time to other tasks, but will eventually
51 * process all the packets). The data arrives inside a
52 * pre-allocated skb, and so after the skb is passed up to the
53 * stack, a new skb must be allocated, and the address field in
54 * the buffer descriptor must be updated to indicate this new
55 * skb.
57 * When the kernel requests that a packet be transmitted, the
58 * driver starts where it left off last time, and points the
59 * descriptor at the buffer which was passed in. The driver
60 * then informs the DMA engine that there are packets ready to
61 * be transmitted. Once the controller is finished transmitting
62 * the packet, an interrupt may be triggered (under the same
63 * conditions as for reception, but depending on the TXF bit).
64 * The driver then cleans up the buffer.
67 #include <linux/kernel.h>
68 #include <linux/sched.h>
69 #include <linux/string.h>
70 #include <linux/errno.h>
71 #include <linux/unistd.h>
72 #include <linux/slab.h>
73 #include <linux/interrupt.h>
74 #include <linux/init.h>
75 #include <linux/delay.h>
76 #include <linux/netdevice.h>
77 #include <linux/etherdevice.h>
78 #include <linux/skbuff.h>
79 #include <linux/if_vlan.h>
80 #include <linux/spinlock.h>
81 #include <linux/mm.h>
82 #include <linux/platform_device.h>
83 #include <linux/ip.h>
84 #include <linux/tcp.h>
85 #include <linux/udp.h>
86 #include <linux/in.h>
88 #include <asm/io.h>
89 #include <asm/irq.h>
90 #include <asm/uaccess.h>
91 #include <linux/module.h>
92 #include <linux/dma-mapping.h>
93 #include <linux/crc32.h>
94 #include <linux/mii.h>
95 #include <linux/phy.h>
97 #include "gianfar.h"
98 #include "gianfar_mii.h"
100 #define TX_TIMEOUT (1*HZ)
101 #define SKB_ALLOC_TIMEOUT 1000000
102 #undef BRIEF_GFAR_ERRORS
103 #undef VERBOSE_GFAR_ERRORS
105 #ifdef CONFIG_GFAR_NAPI
106 #define RECEIVE(x) netif_receive_skb(x)
107 #else
108 #define RECEIVE(x) netif_rx(x)
109 #endif
111 const char gfar_driver_name[] = "Gianfar Ethernet";
112 const char gfar_driver_version[] = "1.3";
114 static int gfar_enet_open(struct net_device *dev);
115 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
116 static void gfar_timeout(struct net_device *dev);
117 static int gfar_close(struct net_device *dev);
118 struct sk_buff *gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp);
119 static struct net_device_stats *gfar_get_stats(struct net_device *dev);
120 static int gfar_set_mac_address(struct net_device *dev);
121 static int gfar_change_mtu(struct net_device *dev, int new_mtu);
122 static irqreturn_t gfar_error(int irq, void *dev_id, struct pt_regs *regs);
123 static irqreturn_t gfar_transmit(int irq, void *dev_id, struct pt_regs *regs);
124 static irqreturn_t gfar_interrupt(int irq, void *dev_id, struct pt_regs *regs);
125 static void adjust_link(struct net_device *dev);
126 static void init_registers(struct net_device *dev);
127 static int init_phy(struct net_device *dev);
128 static int gfar_probe(struct platform_device *pdev);
129 static int gfar_remove(struct platform_device *pdev);
130 static void free_skb_resources(struct gfar_private *priv);
131 static void gfar_set_multi(struct net_device *dev);
132 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
133 #ifdef CONFIG_GFAR_NAPI
134 static int gfar_poll(struct net_device *dev, int *budget);
135 #endif
136 int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit);
137 static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, int length);
138 static void gfar_vlan_rx_register(struct net_device *netdev,
139 struct vlan_group *grp);
140 static void gfar_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid);
141 void gfar_halt(struct net_device *dev);
142 void gfar_start(struct net_device *dev);
143 static void gfar_clear_exact_match(struct net_device *dev);
144 static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr);
146 extern struct ethtool_ops gfar_ethtool_ops;
148 MODULE_AUTHOR("Freescale Semiconductor, Inc");
149 MODULE_DESCRIPTION("Gianfar Ethernet Driver");
150 MODULE_LICENSE("GPL");
152 /* Returns 1 if incoming frames use an FCB */
153 static inline int gfar_uses_fcb(struct gfar_private *priv)
155 return (priv->vlan_enable || priv->rx_csum_enable);
158 /* Set up the ethernet device structure, private data,
159 * and anything else we need before we start */
160 static int gfar_probe(struct platform_device *pdev)
162 u32 tempval;
163 struct net_device *dev = NULL;
164 struct gfar_private *priv = NULL;
165 struct gianfar_platform_data *einfo;
166 struct resource *r;
167 int idx;
168 int err = 0;
170 einfo = (struct gianfar_platform_data *) pdev->dev.platform_data;
172 if (NULL == einfo) {
173 printk(KERN_ERR "gfar %d: Missing additional data!\n",
174 pdev->id);
176 return -ENODEV;
179 /* Create an ethernet device instance */
180 dev = alloc_etherdev(sizeof (*priv));
182 if (NULL == dev)
183 return -ENOMEM;
185 priv = netdev_priv(dev);
187 /* Set the info in the priv to the current info */
188 priv->einfo = einfo;
190 /* fill out IRQ fields */
191 if (einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
192 priv->interruptTransmit = platform_get_irq_byname(pdev, "tx");
193 priv->interruptReceive = platform_get_irq_byname(pdev, "rx");
194 priv->interruptError = platform_get_irq_byname(pdev, "error");
195 if (priv->interruptTransmit < 0 || priv->interruptReceive < 0 || priv->interruptError < 0)
196 goto regs_fail;
197 } else {
198 priv->interruptTransmit = platform_get_irq(pdev, 0);
199 if (priv->interruptTransmit < 0)
200 goto regs_fail;
203 /* get a pointer to the register memory */
204 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
205 priv->regs = ioremap(r->start, sizeof (struct gfar));
207 if (NULL == priv->regs) {
208 err = -ENOMEM;
209 goto regs_fail;
212 spin_lock_init(&priv->txlock);
213 spin_lock_init(&priv->rxlock);
215 platform_set_drvdata(pdev, dev);
217 /* Stop the DMA engine now, in case it was running before */
218 /* (The firmware could have used it, and left it running). */
219 /* To do this, we write Graceful Receive Stop and Graceful */
220 /* Transmit Stop, and then wait until the corresponding bits */
221 /* in IEVENT indicate the stops have completed. */
222 tempval = gfar_read(&priv->regs->dmactrl);
223 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
224 gfar_write(&priv->regs->dmactrl, tempval);
226 tempval = gfar_read(&priv->regs->dmactrl);
227 tempval |= (DMACTRL_GRS | DMACTRL_GTS);
228 gfar_write(&priv->regs->dmactrl, tempval);
230 while (!(gfar_read(&priv->regs->ievent) & (IEVENT_GRSC | IEVENT_GTSC)))
231 cpu_relax();
233 /* Reset MAC layer */
234 gfar_write(&priv->regs->maccfg1, MACCFG1_SOFT_RESET);
236 tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
237 gfar_write(&priv->regs->maccfg1, tempval);
239 /* Initialize MACCFG2. */
240 gfar_write(&priv->regs->maccfg2, MACCFG2_INIT_SETTINGS);
242 /* Initialize ECNTRL */
243 gfar_write(&priv->regs->ecntrl, ECNTRL_INIT_SETTINGS);
245 /* Copy the station address into the dev structure, */
246 memcpy(dev->dev_addr, einfo->mac_addr, MAC_ADDR_LEN);
248 /* Set the dev->base_addr to the gfar reg region */
249 dev->base_addr = (unsigned long) (priv->regs);
251 SET_MODULE_OWNER(dev);
252 SET_NETDEV_DEV(dev, &pdev->dev);
254 /* Fill in the dev structure */
255 dev->open = gfar_enet_open;
256 dev->hard_start_xmit = gfar_start_xmit;
257 dev->tx_timeout = gfar_timeout;
258 dev->watchdog_timeo = TX_TIMEOUT;
259 #ifdef CONFIG_GFAR_NAPI
260 dev->poll = gfar_poll;
261 dev->weight = GFAR_DEV_WEIGHT;
262 #endif
263 dev->stop = gfar_close;
264 dev->get_stats = gfar_get_stats;
265 dev->change_mtu = gfar_change_mtu;
266 dev->mtu = 1500;
267 dev->set_multicast_list = gfar_set_multi;
269 dev->ethtool_ops = &gfar_ethtool_ops;
271 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
272 priv->rx_csum_enable = 1;
273 dev->features |= NETIF_F_IP_CSUM;
274 } else
275 priv->rx_csum_enable = 0;
277 priv->vlgrp = NULL;
279 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
280 dev->vlan_rx_register = gfar_vlan_rx_register;
281 dev->vlan_rx_kill_vid = gfar_vlan_rx_kill_vid;
283 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
285 priv->vlan_enable = 1;
288 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
289 priv->extended_hash = 1;
290 priv->hash_width = 9;
292 priv->hash_regs[0] = &priv->regs->igaddr0;
293 priv->hash_regs[1] = &priv->regs->igaddr1;
294 priv->hash_regs[2] = &priv->regs->igaddr2;
295 priv->hash_regs[3] = &priv->regs->igaddr3;
296 priv->hash_regs[4] = &priv->regs->igaddr4;
297 priv->hash_regs[5] = &priv->regs->igaddr5;
298 priv->hash_regs[6] = &priv->regs->igaddr6;
299 priv->hash_regs[7] = &priv->regs->igaddr7;
300 priv->hash_regs[8] = &priv->regs->gaddr0;
301 priv->hash_regs[9] = &priv->regs->gaddr1;
302 priv->hash_regs[10] = &priv->regs->gaddr2;
303 priv->hash_regs[11] = &priv->regs->gaddr3;
304 priv->hash_regs[12] = &priv->regs->gaddr4;
305 priv->hash_regs[13] = &priv->regs->gaddr5;
306 priv->hash_regs[14] = &priv->regs->gaddr6;
307 priv->hash_regs[15] = &priv->regs->gaddr7;
309 } else {
310 priv->extended_hash = 0;
311 priv->hash_width = 8;
313 priv->hash_regs[0] = &priv->regs->gaddr0;
314 priv->hash_regs[1] = &priv->regs->gaddr1;
315 priv->hash_regs[2] = &priv->regs->gaddr2;
316 priv->hash_regs[3] = &priv->regs->gaddr3;
317 priv->hash_regs[4] = &priv->regs->gaddr4;
318 priv->hash_regs[5] = &priv->regs->gaddr5;
319 priv->hash_regs[6] = &priv->regs->gaddr6;
320 priv->hash_regs[7] = &priv->regs->gaddr7;
323 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_PADDING)
324 priv->padding = DEFAULT_PADDING;
325 else
326 priv->padding = 0;
328 if (dev->features & NETIF_F_IP_CSUM)
329 dev->hard_header_len += GMAC_FCB_LEN;
331 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
332 priv->tx_ring_size = DEFAULT_TX_RING_SIZE;
333 priv->rx_ring_size = DEFAULT_RX_RING_SIZE;
335 priv->txcoalescing = DEFAULT_TX_COALESCE;
336 priv->txcount = DEFAULT_TXCOUNT;
337 priv->txtime = DEFAULT_TXTIME;
338 priv->rxcoalescing = DEFAULT_RX_COALESCE;
339 priv->rxcount = DEFAULT_RXCOUNT;
340 priv->rxtime = DEFAULT_RXTIME;
342 /* Enable most messages by default */
343 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
345 err = register_netdev(dev);
347 if (err) {
348 printk(KERN_ERR "%s: Cannot register net device, aborting.\n",
349 dev->name);
350 goto register_fail;
353 /* Create all the sysfs files */
354 gfar_init_sysfs(dev);
356 /* Print out the device info */
357 printk(KERN_INFO DEVICE_NAME, dev->name);
358 for (idx = 0; idx < 6; idx++)
359 printk("%2.2x%c", dev->dev_addr[idx], idx == 5 ? ' ' : ':');
360 printk("\n");
362 /* Even more device info helps when determining which kernel */
363 /* provided which set of benchmarks. */
364 #ifdef CONFIG_GFAR_NAPI
365 printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name);
366 #else
367 printk(KERN_INFO "%s: Running with NAPI disabled\n", dev->name);
368 #endif
369 printk(KERN_INFO "%s: %d/%d RX/TX BD ring size\n",
370 dev->name, priv->rx_ring_size, priv->tx_ring_size);
372 return 0;
374 register_fail:
375 iounmap(priv->regs);
376 regs_fail:
377 free_netdev(dev);
378 return err;
381 static int gfar_remove(struct platform_device *pdev)
383 struct net_device *dev = platform_get_drvdata(pdev);
384 struct gfar_private *priv = netdev_priv(dev);
386 platform_set_drvdata(pdev, NULL);
388 iounmap(priv->regs);
389 free_netdev(dev);
391 return 0;
395 /* Initializes driver's PHY state, and attaches to the PHY.
396 * Returns 0 on success.
398 static int init_phy(struct net_device *dev)
400 struct gfar_private *priv = netdev_priv(dev);
401 uint gigabit_support =
402 priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
403 SUPPORTED_1000baseT_Full : 0;
404 struct phy_device *phydev;
405 char phy_id[BUS_ID_SIZE];
407 priv->oldlink = 0;
408 priv->oldspeed = 0;
409 priv->oldduplex = -1;
411 snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT, priv->einfo->bus_id, priv->einfo->phy_id);
413 phydev = phy_connect(dev, phy_id, &adjust_link, 0);
415 if (IS_ERR(phydev)) {
416 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
417 return PTR_ERR(phydev);
420 /* Remove any features not supported by the controller */
421 phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
422 phydev->advertising = phydev->supported;
424 priv->phydev = phydev;
426 return 0;
429 static void init_registers(struct net_device *dev)
431 struct gfar_private *priv = netdev_priv(dev);
433 /* Clear IEVENT */
434 gfar_write(&priv->regs->ievent, IEVENT_INIT_CLEAR);
436 /* Initialize IMASK */
437 gfar_write(&priv->regs->imask, IMASK_INIT_CLEAR);
439 /* Init hash registers to zero */
440 gfar_write(&priv->regs->igaddr0, 0);
441 gfar_write(&priv->regs->igaddr1, 0);
442 gfar_write(&priv->regs->igaddr2, 0);
443 gfar_write(&priv->regs->igaddr3, 0);
444 gfar_write(&priv->regs->igaddr4, 0);
445 gfar_write(&priv->regs->igaddr5, 0);
446 gfar_write(&priv->regs->igaddr6, 0);
447 gfar_write(&priv->regs->igaddr7, 0);
449 gfar_write(&priv->regs->gaddr0, 0);
450 gfar_write(&priv->regs->gaddr1, 0);
451 gfar_write(&priv->regs->gaddr2, 0);
452 gfar_write(&priv->regs->gaddr3, 0);
453 gfar_write(&priv->regs->gaddr4, 0);
454 gfar_write(&priv->regs->gaddr5, 0);
455 gfar_write(&priv->regs->gaddr6, 0);
456 gfar_write(&priv->regs->gaddr7, 0);
458 /* Zero out the rmon mib registers if it has them */
459 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
460 memset_io(&(priv->regs->rmon), 0, sizeof (struct rmon_mib));
462 /* Mask off the CAM interrupts */
463 gfar_write(&priv->regs->rmon.cam1, 0xffffffff);
464 gfar_write(&priv->regs->rmon.cam2, 0xffffffff);
467 /* Initialize the max receive buffer length */
468 gfar_write(&priv->regs->mrblr, priv->rx_buffer_size);
470 /* Initialize the Minimum Frame Length Register */
471 gfar_write(&priv->regs->minflr, MINFLR_INIT_SETTINGS);
473 /* Assign the TBI an address which won't conflict with the PHYs */
474 gfar_write(&priv->regs->tbipa, TBIPA_VALUE);
478 /* Halt the receive and transmit queues */
479 void gfar_halt(struct net_device *dev)
481 struct gfar_private *priv = netdev_priv(dev);
482 struct gfar __iomem *regs = priv->regs;
483 u32 tempval;
485 /* Mask all interrupts */
486 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
488 /* Clear all interrupts */
489 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
491 /* Stop the DMA, and wait for it to stop */
492 tempval = gfar_read(&priv->regs->dmactrl);
493 if ((tempval & (DMACTRL_GRS | DMACTRL_GTS))
494 != (DMACTRL_GRS | DMACTRL_GTS)) {
495 tempval |= (DMACTRL_GRS | DMACTRL_GTS);
496 gfar_write(&priv->regs->dmactrl, tempval);
498 while (!(gfar_read(&priv->regs->ievent) &
499 (IEVENT_GRSC | IEVENT_GTSC)))
500 cpu_relax();
503 /* Disable Rx and Tx */
504 tempval = gfar_read(&regs->maccfg1);
505 tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
506 gfar_write(&regs->maccfg1, tempval);
509 void stop_gfar(struct net_device *dev)
511 struct gfar_private *priv = netdev_priv(dev);
512 struct gfar __iomem *regs = priv->regs;
513 unsigned long flags;
515 phy_stop(priv->phydev);
517 /* Lock it down */
518 spin_lock_irqsave(&priv->txlock, flags);
519 spin_lock(&priv->rxlock);
521 gfar_halt(dev);
523 spin_unlock(&priv->rxlock);
524 spin_unlock_irqrestore(&priv->txlock, flags);
526 /* Free the IRQs */
527 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
528 free_irq(priv->interruptError, dev);
529 free_irq(priv->interruptTransmit, dev);
530 free_irq(priv->interruptReceive, dev);
531 } else {
532 free_irq(priv->interruptTransmit, dev);
535 free_skb_resources(priv);
537 dma_free_coherent(NULL,
538 sizeof(struct txbd8)*priv->tx_ring_size
539 + sizeof(struct rxbd8)*priv->rx_ring_size,
540 priv->tx_bd_base,
541 gfar_read(&regs->tbase0));
544 /* If there are any tx skbs or rx skbs still around, free them.
545 * Then free tx_skbuff and rx_skbuff */
546 static void free_skb_resources(struct gfar_private *priv)
548 struct rxbd8 *rxbdp;
549 struct txbd8 *txbdp;
550 int i;
552 /* Go through all the buffer descriptors and free their data buffers */
553 txbdp = priv->tx_bd_base;
555 for (i = 0; i < priv->tx_ring_size; i++) {
557 if (priv->tx_skbuff[i]) {
558 dma_unmap_single(NULL, txbdp->bufPtr,
559 txbdp->length,
560 DMA_TO_DEVICE);
561 dev_kfree_skb_any(priv->tx_skbuff[i]);
562 priv->tx_skbuff[i] = NULL;
566 kfree(priv->tx_skbuff);
568 rxbdp = priv->rx_bd_base;
570 /* rx_skbuff is not guaranteed to be allocated, so only
571 * free it and its contents if it is allocated */
572 if(priv->rx_skbuff != NULL) {
573 for (i = 0; i < priv->rx_ring_size; i++) {
574 if (priv->rx_skbuff[i]) {
575 dma_unmap_single(NULL, rxbdp->bufPtr,
576 priv->rx_buffer_size,
577 DMA_FROM_DEVICE);
579 dev_kfree_skb_any(priv->rx_skbuff[i]);
580 priv->rx_skbuff[i] = NULL;
583 rxbdp->status = 0;
584 rxbdp->length = 0;
585 rxbdp->bufPtr = 0;
587 rxbdp++;
590 kfree(priv->rx_skbuff);
594 void gfar_start(struct net_device *dev)
596 struct gfar_private *priv = netdev_priv(dev);
597 struct gfar __iomem *regs = priv->regs;
598 u32 tempval;
600 /* Enable Rx and Tx in MACCFG1 */
601 tempval = gfar_read(&regs->maccfg1);
602 tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
603 gfar_write(&regs->maccfg1, tempval);
605 /* Initialize DMACTRL to have WWR and WOP */
606 tempval = gfar_read(&priv->regs->dmactrl);
607 tempval |= DMACTRL_INIT_SETTINGS;
608 gfar_write(&priv->regs->dmactrl, tempval);
610 /* Make sure we aren't stopped */
611 tempval = gfar_read(&priv->regs->dmactrl);
612 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
613 gfar_write(&priv->regs->dmactrl, tempval);
615 /* Clear THLT/RHLT, so that the DMA starts polling now */
616 gfar_write(&regs->tstat, TSTAT_CLEAR_THALT);
617 gfar_write(&regs->rstat, RSTAT_CLEAR_RHALT);
619 /* Unmask the interrupts we look for */
620 gfar_write(&regs->imask, IMASK_DEFAULT);
623 /* Bring the controller up and running */
624 int startup_gfar(struct net_device *dev)
626 struct txbd8 *txbdp;
627 struct rxbd8 *rxbdp;
628 dma_addr_t addr;
629 unsigned long vaddr;
630 int i;
631 struct gfar_private *priv = netdev_priv(dev);
632 struct gfar __iomem *regs = priv->regs;
633 int err = 0;
634 u32 rctrl = 0;
635 u32 attrs = 0;
637 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
639 /* Allocate memory for the buffer descriptors */
640 vaddr = (unsigned long) dma_alloc_coherent(NULL,
641 sizeof (struct txbd8) * priv->tx_ring_size +
642 sizeof (struct rxbd8) * priv->rx_ring_size,
643 &addr, GFP_KERNEL);
645 if (vaddr == 0) {
646 if (netif_msg_ifup(priv))
647 printk(KERN_ERR "%s: Could not allocate buffer descriptors!\n",
648 dev->name);
649 return -ENOMEM;
652 priv->tx_bd_base = (struct txbd8 *) vaddr;
654 /* enet DMA only understands physical addresses */
655 gfar_write(&regs->tbase0, addr);
657 /* Start the rx descriptor ring where the tx ring leaves off */
658 addr = addr + sizeof (struct txbd8) * priv->tx_ring_size;
659 vaddr = vaddr + sizeof (struct txbd8) * priv->tx_ring_size;
660 priv->rx_bd_base = (struct rxbd8 *) vaddr;
661 gfar_write(&regs->rbase0, addr);
663 /* Setup the skbuff rings */
664 priv->tx_skbuff =
665 (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) *
666 priv->tx_ring_size, GFP_KERNEL);
668 if (NULL == priv->tx_skbuff) {
669 if (netif_msg_ifup(priv))
670 printk(KERN_ERR "%s: Could not allocate tx_skbuff\n",
671 dev->name);
672 err = -ENOMEM;
673 goto tx_skb_fail;
676 for (i = 0; i < priv->tx_ring_size; i++)
677 priv->tx_skbuff[i] = NULL;
679 priv->rx_skbuff =
680 (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) *
681 priv->rx_ring_size, GFP_KERNEL);
683 if (NULL == priv->rx_skbuff) {
684 if (netif_msg_ifup(priv))
685 printk(KERN_ERR "%s: Could not allocate rx_skbuff\n",
686 dev->name);
687 err = -ENOMEM;
688 goto rx_skb_fail;
691 for (i = 0; i < priv->rx_ring_size; i++)
692 priv->rx_skbuff[i] = NULL;
694 /* Initialize some variables in our dev structure */
695 priv->dirty_tx = priv->cur_tx = priv->tx_bd_base;
696 priv->cur_rx = priv->rx_bd_base;
697 priv->skb_curtx = priv->skb_dirtytx = 0;
698 priv->skb_currx = 0;
700 /* Initialize Transmit Descriptor Ring */
701 txbdp = priv->tx_bd_base;
702 for (i = 0; i < priv->tx_ring_size; i++) {
703 txbdp->status = 0;
704 txbdp->length = 0;
705 txbdp->bufPtr = 0;
706 txbdp++;
709 /* Set the last descriptor in the ring to indicate wrap */
710 txbdp--;
711 txbdp->status |= TXBD_WRAP;
713 rxbdp = priv->rx_bd_base;
714 for (i = 0; i < priv->rx_ring_size; i++) {
715 struct sk_buff *skb = NULL;
717 rxbdp->status = 0;
719 skb = gfar_new_skb(dev, rxbdp);
721 priv->rx_skbuff[i] = skb;
723 rxbdp++;
726 /* Set the last descriptor in the ring to wrap */
727 rxbdp--;
728 rxbdp->status |= RXBD_WRAP;
730 /* If the device has multiple interrupts, register for
731 * them. Otherwise, only register for the one */
732 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
733 /* Install our interrupt handlers for Error,
734 * Transmit, and Receive */
735 if (request_irq(priv->interruptError, gfar_error,
736 0, "enet_error", dev) < 0) {
737 if (netif_msg_intr(priv))
738 printk(KERN_ERR "%s: Can't get IRQ %d\n",
739 dev->name, priv->interruptError);
741 err = -1;
742 goto err_irq_fail;
745 if (request_irq(priv->interruptTransmit, gfar_transmit,
746 0, "enet_tx", dev) < 0) {
747 if (netif_msg_intr(priv))
748 printk(KERN_ERR "%s: Can't get IRQ %d\n",
749 dev->name, priv->interruptTransmit);
751 err = -1;
753 goto tx_irq_fail;
756 if (request_irq(priv->interruptReceive, gfar_receive,
757 0, "enet_rx", dev) < 0) {
758 if (netif_msg_intr(priv))
759 printk(KERN_ERR "%s: Can't get IRQ %d (receive0)\n",
760 dev->name, priv->interruptReceive);
762 err = -1;
763 goto rx_irq_fail;
765 } else {
766 if (request_irq(priv->interruptTransmit, gfar_interrupt,
767 0, "gfar_interrupt", dev) < 0) {
768 if (netif_msg_intr(priv))
769 printk(KERN_ERR "%s: Can't get IRQ %d\n",
770 dev->name, priv->interruptError);
772 err = -1;
773 goto err_irq_fail;
777 phy_start(priv->phydev);
779 /* Configure the coalescing support */
780 if (priv->txcoalescing)
781 gfar_write(&regs->txic,
782 mk_ic_value(priv->txcount, priv->txtime));
783 else
784 gfar_write(&regs->txic, 0);
786 if (priv->rxcoalescing)
787 gfar_write(&regs->rxic,
788 mk_ic_value(priv->rxcount, priv->rxtime));
789 else
790 gfar_write(&regs->rxic, 0);
792 if (priv->rx_csum_enable)
793 rctrl |= RCTRL_CHECKSUMMING;
795 if (priv->extended_hash) {
796 rctrl |= RCTRL_EXTHASH;
798 gfar_clear_exact_match(dev);
799 rctrl |= RCTRL_EMEN;
802 if (priv->vlan_enable)
803 rctrl |= RCTRL_VLAN;
805 if (priv->padding) {
806 rctrl &= ~RCTRL_PAL_MASK;
807 rctrl |= RCTRL_PADDING(priv->padding);
810 /* Init rctrl based on our settings */
811 gfar_write(&priv->regs->rctrl, rctrl);
813 if (dev->features & NETIF_F_IP_CSUM)
814 gfar_write(&priv->regs->tctrl, TCTRL_INIT_CSUM);
816 /* Set the extraction length and index */
817 attrs = ATTRELI_EL(priv->rx_stash_size) |
818 ATTRELI_EI(priv->rx_stash_index);
820 gfar_write(&priv->regs->attreli, attrs);
822 /* Start with defaults, and add stashing or locking
823 * depending on the approprate variables */
824 attrs = ATTR_INIT_SETTINGS;
826 if (priv->bd_stash_en)
827 attrs |= ATTR_BDSTASH;
829 if (priv->rx_stash_size != 0)
830 attrs |= ATTR_BUFSTASH;
832 gfar_write(&priv->regs->attr, attrs);
834 gfar_write(&priv->regs->fifo_tx_thr, priv->fifo_threshold);
835 gfar_write(&priv->regs->fifo_tx_starve, priv->fifo_starve);
836 gfar_write(&priv->regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);
838 /* Start the controller */
839 gfar_start(dev);
841 return 0;
843 rx_irq_fail:
844 free_irq(priv->interruptTransmit, dev);
845 tx_irq_fail:
846 free_irq(priv->interruptError, dev);
847 err_irq_fail:
848 rx_skb_fail:
849 free_skb_resources(priv);
850 tx_skb_fail:
851 dma_free_coherent(NULL,
852 sizeof(struct txbd8)*priv->tx_ring_size
853 + sizeof(struct rxbd8)*priv->rx_ring_size,
854 priv->tx_bd_base,
855 gfar_read(&regs->tbase0));
857 return err;
860 /* Called when something needs to use the ethernet device */
861 /* Returns 0 for success. */
862 static int gfar_enet_open(struct net_device *dev)
864 int err;
866 /* Initialize a bunch of registers */
867 init_registers(dev);
869 gfar_set_mac_address(dev);
871 err = init_phy(dev);
873 if(err)
874 return err;
876 err = startup_gfar(dev);
878 netif_start_queue(dev);
880 return err;
883 static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb, struct txbd8 *bdp)
885 struct txfcb *fcb = (struct txfcb *)skb_push (skb, GMAC_FCB_LEN);
887 memset(fcb, 0, GMAC_FCB_LEN);
889 return fcb;
892 static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb)
894 u8 flags = 0;
896 /* If we're here, it's a IP packet with a TCP or UDP
897 * payload. We set it to checksum, using a pseudo-header
898 * we provide
900 flags = TXFCB_DEFAULT;
902 /* Tell the controller what the protocol is */
903 /* And provide the already calculated phcs */
904 if (skb->nh.iph->protocol == IPPROTO_UDP) {
905 flags |= TXFCB_UDP;
906 fcb->phcs = skb->h.uh->check;
907 } else
908 fcb->phcs = skb->h.th->check;
910 /* l3os is the distance between the start of the
911 * frame (skb->data) and the start of the IP hdr.
912 * l4os is the distance between the start of the
913 * l3 hdr and the l4 hdr */
914 fcb->l3os = (u16)(skb->nh.raw - skb->data - GMAC_FCB_LEN);
915 fcb->l4os = (u16)(skb->h.raw - skb->nh.raw);
917 fcb->flags = flags;
920 void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
922 fcb->flags |= TXFCB_VLN;
923 fcb->vlctl = vlan_tx_tag_get(skb);
926 /* This is called by the kernel when a frame is ready for transmission. */
927 /* It is pointed to by the dev->hard_start_xmit function pointer */
928 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
930 struct gfar_private *priv = netdev_priv(dev);
931 struct txfcb *fcb = NULL;
932 struct txbd8 *txbdp;
933 u16 status;
934 unsigned long flags;
936 /* Update transmit stats */
937 priv->stats.tx_bytes += skb->len;
939 /* Lock priv now */
940 spin_lock_irqsave(&priv->txlock, flags);
942 /* Point at the first free tx descriptor */
943 txbdp = priv->cur_tx;
945 /* Clear all but the WRAP status flags */
946 status = txbdp->status & TXBD_WRAP;
948 /* Set up checksumming */
949 if (likely((dev->features & NETIF_F_IP_CSUM)
950 && (CHECKSUM_HW == skb->ip_summed))) {
951 fcb = gfar_add_fcb(skb, txbdp);
952 status |= TXBD_TOE;
953 gfar_tx_checksum(skb, fcb);
956 if (priv->vlan_enable &&
957 unlikely(priv->vlgrp && vlan_tx_tag_present(skb))) {
958 if (unlikely(NULL == fcb)) {
959 fcb = gfar_add_fcb(skb, txbdp);
960 status |= TXBD_TOE;
963 gfar_tx_vlan(skb, fcb);
966 /* Set buffer length and pointer */
967 txbdp->length = skb->len;
968 txbdp->bufPtr = dma_map_single(NULL, skb->data,
969 skb->len, DMA_TO_DEVICE);
971 /* Save the skb pointer so we can free it later */
972 priv->tx_skbuff[priv->skb_curtx] = skb;
974 /* Update the current skb pointer (wrapping if this was the last) */
975 priv->skb_curtx =
976 (priv->skb_curtx + 1) & TX_RING_MOD_MASK(priv->tx_ring_size);
978 /* Flag the BD as interrupt-causing */
979 status |= TXBD_INTERRUPT;
981 /* Flag the BD as ready to go, last in frame, and */
982 /* in need of CRC */
983 status |= (TXBD_READY | TXBD_LAST | TXBD_CRC);
985 dev->trans_start = jiffies;
987 txbdp->status = status;
989 /* If this was the last BD in the ring, the next one */
990 /* is at the beginning of the ring */
991 if (txbdp->status & TXBD_WRAP)
992 txbdp = priv->tx_bd_base;
993 else
994 txbdp++;
996 /* If the next BD still needs to be cleaned up, then the bds
997 are full. We need to tell the kernel to stop sending us stuff. */
998 if (txbdp == priv->dirty_tx) {
999 netif_stop_queue(dev);
1001 priv->stats.tx_fifo_errors++;
1004 /* Update the current txbd to the next one */
1005 priv->cur_tx = txbdp;
1007 /* Tell the DMA to go go go */
1008 gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT);
1010 /* Unlock priv */
1011 spin_unlock_irqrestore(&priv->txlock, flags);
1013 return 0;
1016 /* Stops the kernel queue, and halts the controller */
1017 static int gfar_close(struct net_device *dev)
1019 struct gfar_private *priv = netdev_priv(dev);
1020 stop_gfar(dev);
1022 /* Disconnect from the PHY */
1023 phy_disconnect(priv->phydev);
1024 priv->phydev = NULL;
1026 netif_stop_queue(dev);
1028 return 0;
1031 /* returns a net_device_stats structure pointer */
1032 static struct net_device_stats * gfar_get_stats(struct net_device *dev)
1034 struct gfar_private *priv = netdev_priv(dev);
1036 return &(priv->stats);
1039 /* Changes the mac address if the controller is not running. */
1040 int gfar_set_mac_address(struct net_device *dev)
1042 gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
1044 return 0;
1048 /* Enables and disables VLAN insertion/extraction */
1049 static void gfar_vlan_rx_register(struct net_device *dev,
1050 struct vlan_group *grp)
1052 struct gfar_private *priv = netdev_priv(dev);
1053 unsigned long flags;
1054 u32 tempval;
1056 spin_lock_irqsave(&priv->rxlock, flags);
1058 priv->vlgrp = grp;
1060 if (grp) {
1061 /* Enable VLAN tag insertion */
1062 tempval = gfar_read(&priv->regs->tctrl);
1063 tempval |= TCTRL_VLINS;
1065 gfar_write(&priv->regs->tctrl, tempval);
1067 /* Enable VLAN tag extraction */
1068 tempval = gfar_read(&priv->regs->rctrl);
1069 tempval |= RCTRL_VLEX;
1070 gfar_write(&priv->regs->rctrl, tempval);
1071 } else {
1072 /* Disable VLAN tag insertion */
1073 tempval = gfar_read(&priv->regs->tctrl);
1074 tempval &= ~TCTRL_VLINS;
1075 gfar_write(&priv->regs->tctrl, tempval);
1077 /* Disable VLAN tag extraction */
1078 tempval = gfar_read(&priv->regs->rctrl);
1079 tempval &= ~RCTRL_VLEX;
1080 gfar_write(&priv->regs->rctrl, tempval);
1083 spin_unlock_irqrestore(&priv->rxlock, flags);
1087 static void gfar_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
1089 struct gfar_private *priv = netdev_priv(dev);
1090 unsigned long flags;
1092 spin_lock_irqsave(&priv->rxlock, flags);
1094 if (priv->vlgrp)
1095 priv->vlgrp->vlan_devices[vid] = NULL;
1097 spin_unlock_irqrestore(&priv->rxlock, flags);
1101 static int gfar_change_mtu(struct net_device *dev, int new_mtu)
1103 int tempsize, tempval;
1104 struct gfar_private *priv = netdev_priv(dev);
1105 int oldsize = priv->rx_buffer_size;
1106 int frame_size = new_mtu + ETH_HLEN;
1108 if (priv->vlan_enable)
1109 frame_size += VLAN_ETH_HLEN;
1111 if (gfar_uses_fcb(priv))
1112 frame_size += GMAC_FCB_LEN;
1114 frame_size += priv->padding;
1116 if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
1117 if (netif_msg_drv(priv))
1118 printk(KERN_ERR "%s: Invalid MTU setting\n",
1119 dev->name);
1120 return -EINVAL;
1123 tempsize =
1124 (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
1125 INCREMENTAL_BUFFER_SIZE;
1127 /* Only stop and start the controller if it isn't already
1128 * stopped, and we changed something */
1129 if ((oldsize != tempsize) && (dev->flags & IFF_UP))
1130 stop_gfar(dev);
1132 priv->rx_buffer_size = tempsize;
1134 dev->mtu = new_mtu;
1136 gfar_write(&priv->regs->mrblr, priv->rx_buffer_size);
1137 gfar_write(&priv->regs->maxfrm, priv->rx_buffer_size);
1139 /* If the mtu is larger than the max size for standard
1140 * ethernet frames (ie, a jumbo frame), then set maccfg2
1141 * to allow huge frames, and to check the length */
1142 tempval = gfar_read(&priv->regs->maccfg2);
1144 if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE)
1145 tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
1146 else
1147 tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
1149 gfar_write(&priv->regs->maccfg2, tempval);
1151 if ((oldsize != tempsize) && (dev->flags & IFF_UP))
1152 startup_gfar(dev);
1154 return 0;
1157 /* gfar_timeout gets called when a packet has not been
1158 * transmitted after a set amount of time.
1159 * For now, assume that clearing out all the structures, and
1160 * starting over will fix the problem. */
1161 static void gfar_timeout(struct net_device *dev)
1163 struct gfar_private *priv = netdev_priv(dev);
1165 priv->stats.tx_errors++;
1167 if (dev->flags & IFF_UP) {
1168 stop_gfar(dev);
1169 startup_gfar(dev);
1172 netif_schedule(dev);
1175 /* Interrupt Handler for Transmit complete */
1176 static irqreturn_t gfar_transmit(int irq, void *dev_id, struct pt_regs *regs)
1178 struct net_device *dev = (struct net_device *) dev_id;
1179 struct gfar_private *priv = netdev_priv(dev);
1180 struct txbd8 *bdp;
1182 /* Clear IEVENT */
1183 gfar_write(&priv->regs->ievent, IEVENT_TX_MASK);
1185 /* Lock priv */
1186 spin_lock(&priv->txlock);
1187 bdp = priv->dirty_tx;
1188 while ((bdp->status & TXBD_READY) == 0) {
1189 /* If dirty_tx and cur_tx are the same, then either the */
1190 /* ring is empty or full now (it could only be full in the beginning, */
1191 /* obviously). If it is empty, we are done. */
1192 if ((bdp == priv->cur_tx) && (netif_queue_stopped(dev) == 0))
1193 break;
1195 priv->stats.tx_packets++;
1197 /* Deferred means some collisions occurred during transmit, */
1198 /* but we eventually sent the packet. */
1199 if (bdp->status & TXBD_DEF)
1200 priv->stats.collisions++;
1202 /* Free the sk buffer associated with this TxBD */
1203 dev_kfree_skb_irq(priv->tx_skbuff[priv->skb_dirtytx]);
1204 priv->tx_skbuff[priv->skb_dirtytx] = NULL;
1205 priv->skb_dirtytx =
1206 (priv->skb_dirtytx +
1207 1) & TX_RING_MOD_MASK(priv->tx_ring_size);
1209 /* update bdp to point at next bd in the ring (wrapping if necessary) */
1210 if (bdp->status & TXBD_WRAP)
1211 bdp = priv->tx_bd_base;
1212 else
1213 bdp++;
1215 /* Move dirty_tx to be the next bd */
1216 priv->dirty_tx = bdp;
1218 /* We freed a buffer, so now we can restart transmission */
1219 if (netif_queue_stopped(dev))
1220 netif_wake_queue(dev);
1221 } /* while ((bdp->status & TXBD_READY) == 0) */
1223 /* If we are coalescing the interrupts, reset the timer */
1224 /* Otherwise, clear it */
1225 if (priv->txcoalescing)
1226 gfar_write(&priv->regs->txic,
1227 mk_ic_value(priv->txcount, priv->txtime));
1228 else
1229 gfar_write(&priv->regs->txic, 0);
1231 spin_unlock(&priv->txlock);
1233 return IRQ_HANDLED;
1236 struct sk_buff * gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp)
1238 unsigned int alignamount;
1239 struct gfar_private *priv = netdev_priv(dev);
1240 struct sk_buff *skb = NULL;
1241 unsigned int timeout = SKB_ALLOC_TIMEOUT;
1243 /* We have to allocate the skb, so keep trying till we succeed */
1244 while ((!skb) && timeout--)
1245 skb = dev_alloc_skb(priv->rx_buffer_size + RXBUF_ALIGNMENT);
1247 if (NULL == skb)
1248 return NULL;
1250 alignamount = RXBUF_ALIGNMENT -
1251 (((unsigned) skb->data) & (RXBUF_ALIGNMENT - 1));
1253 /* We need the data buffer to be aligned properly. We will reserve
1254 * as many bytes as needed to align the data properly
1256 skb_reserve(skb, alignamount);
1258 skb->dev = dev;
1260 bdp->bufPtr = dma_map_single(NULL, skb->data,
1261 priv->rx_buffer_size, DMA_FROM_DEVICE);
1263 bdp->length = 0;
1265 /* Mark the buffer empty */
1266 bdp->status |= (RXBD_EMPTY | RXBD_INTERRUPT);
1268 return skb;
1271 static inline void count_errors(unsigned short status, struct gfar_private *priv)
1273 struct net_device_stats *stats = &priv->stats;
1274 struct gfar_extra_stats *estats = &priv->extra_stats;
1276 /* If the packet was truncated, none of the other errors
1277 * matter */
1278 if (status & RXBD_TRUNCATED) {
1279 stats->rx_length_errors++;
1281 estats->rx_trunc++;
1283 return;
1285 /* Count the errors, if there were any */
1286 if (status & (RXBD_LARGE | RXBD_SHORT)) {
1287 stats->rx_length_errors++;
1289 if (status & RXBD_LARGE)
1290 estats->rx_large++;
1291 else
1292 estats->rx_short++;
1294 if (status & RXBD_NONOCTET) {
1295 stats->rx_frame_errors++;
1296 estats->rx_nonoctet++;
1298 if (status & RXBD_CRCERR) {
1299 estats->rx_crcerr++;
1300 stats->rx_crc_errors++;
1302 if (status & RXBD_OVERRUN) {
1303 estats->rx_overrun++;
1304 stats->rx_crc_errors++;
1308 irqreturn_t gfar_receive(int irq, void *dev_id, struct pt_regs *regs)
1310 struct net_device *dev = (struct net_device *) dev_id;
1311 struct gfar_private *priv = netdev_priv(dev);
1312 #ifdef CONFIG_GFAR_NAPI
1313 u32 tempval;
1314 #else
1315 unsigned long flags;
1316 #endif
1318 /* Clear IEVENT, so rx interrupt isn't called again
1319 * because of this interrupt */
1320 gfar_write(&priv->regs->ievent, IEVENT_RX_MASK);
1322 /* support NAPI */
1323 #ifdef CONFIG_GFAR_NAPI
1324 if (netif_rx_schedule_prep(dev)) {
1325 tempval = gfar_read(&priv->regs->imask);
1326 tempval &= IMASK_RX_DISABLED;
1327 gfar_write(&priv->regs->imask, tempval);
1329 __netif_rx_schedule(dev);
1330 } else {
1331 if (netif_msg_rx_err(priv))
1332 printk(KERN_DEBUG "%s: receive called twice (%x)[%x]\n",
1333 dev->name, gfar_read(&priv->regs->ievent),
1334 gfar_read(&priv->regs->imask));
1336 #else
1338 spin_lock_irqsave(&priv->rxlock, flags);
1339 gfar_clean_rx_ring(dev, priv->rx_ring_size);
1341 /* If we are coalescing interrupts, update the timer */
1342 /* Otherwise, clear it */
1343 if (priv->rxcoalescing)
1344 gfar_write(&priv->regs->rxic,
1345 mk_ic_value(priv->rxcount, priv->rxtime));
1346 else
1347 gfar_write(&priv->regs->rxic, 0);
1349 spin_unlock_irqrestore(&priv->rxlock, flags);
1350 #endif
1352 return IRQ_HANDLED;
1355 static inline int gfar_rx_vlan(struct sk_buff *skb,
1356 struct vlan_group *vlgrp, unsigned short vlctl)
1358 #ifdef CONFIG_GFAR_NAPI
1359 return vlan_hwaccel_receive_skb(skb, vlgrp, vlctl);
1360 #else
1361 return vlan_hwaccel_rx(skb, vlgrp, vlctl);
1362 #endif
1365 static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
1367 /* If valid headers were found, and valid sums
1368 * were verified, then we tell the kernel that no
1369 * checksumming is necessary. Otherwise, it is */
1370 if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU))
1371 skb->ip_summed = CHECKSUM_UNNECESSARY;
1372 else
1373 skb->ip_summed = CHECKSUM_NONE;
1377 static inline struct rxfcb *gfar_get_fcb(struct sk_buff *skb)
1379 struct rxfcb *fcb = (struct rxfcb *)skb->data;
1381 /* Remove the FCB from the skb */
1382 skb_pull(skb, GMAC_FCB_LEN);
1384 return fcb;
1387 /* gfar_process_frame() -- handle one incoming packet if skb
1388 * isn't NULL. */
1389 static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
1390 int length)
1392 struct gfar_private *priv = netdev_priv(dev);
1393 struct rxfcb *fcb = NULL;
1395 if (NULL == skb) {
1396 if (netif_msg_rx_err(priv))
1397 printk(KERN_WARNING "%s: Missing skb!!.\n", dev->name);
1398 priv->stats.rx_dropped++;
1399 priv->extra_stats.rx_skbmissing++;
1400 } else {
1401 int ret;
1403 /* Prep the skb for the packet */
1404 skb_put(skb, length);
1406 /* Grab the FCB if there is one */
1407 if (gfar_uses_fcb(priv))
1408 fcb = gfar_get_fcb(skb);
1410 /* Remove the padded bytes, if there are any */
1411 if (priv->padding)
1412 skb_pull(skb, priv->padding);
1414 if (priv->rx_csum_enable)
1415 gfar_rx_checksum(skb, fcb);
1417 /* Tell the skb what kind of packet this is */
1418 skb->protocol = eth_type_trans(skb, dev);
1420 /* Send the packet up the stack */
1421 if (unlikely(priv->vlgrp && (fcb->flags & RXFCB_VLN)))
1422 ret = gfar_rx_vlan(skb, priv->vlgrp, fcb->vlctl);
1423 else
1424 ret = RECEIVE(skb);
1426 if (NET_RX_DROP == ret)
1427 priv->extra_stats.kernel_dropped++;
1430 return 0;
1433 /* gfar_clean_rx_ring() -- Processes each frame in the rx ring
1434 * until the budget/quota has been reached. Returns the number
1435 * of frames handled
1437 int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
1439 struct rxbd8 *bdp;
1440 struct sk_buff *skb;
1441 u16 pkt_len;
1442 int howmany = 0;
1443 struct gfar_private *priv = netdev_priv(dev);
1445 /* Get the first full descriptor */
1446 bdp = priv->cur_rx;
1448 while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
1449 skb = priv->rx_skbuff[priv->skb_currx];
1451 if (!(bdp->status &
1452 (RXBD_LARGE | RXBD_SHORT | RXBD_NONOCTET
1453 | RXBD_CRCERR | RXBD_OVERRUN | RXBD_TRUNCATED))) {
1454 /* Increment the number of packets */
1455 priv->stats.rx_packets++;
1456 howmany++;
1458 /* Remove the FCS from the packet length */
1459 pkt_len = bdp->length - 4;
1461 gfar_process_frame(dev, skb, pkt_len);
1463 priv->stats.rx_bytes += pkt_len;
1464 } else {
1465 count_errors(bdp->status, priv);
1467 if (skb)
1468 dev_kfree_skb_any(skb);
1470 priv->rx_skbuff[priv->skb_currx] = NULL;
1473 dev->last_rx = jiffies;
1475 /* Clear the status flags for this buffer */
1476 bdp->status &= ~RXBD_STATS;
1478 /* Add another skb for the future */
1479 skb = gfar_new_skb(dev, bdp);
1480 priv->rx_skbuff[priv->skb_currx] = skb;
1482 /* Update to the next pointer */
1483 if (bdp->status & RXBD_WRAP)
1484 bdp = priv->rx_bd_base;
1485 else
1486 bdp++;
1488 /* update to point at the next skb */
1489 priv->skb_currx =
1490 (priv->skb_currx +
1491 1) & RX_RING_MOD_MASK(priv->rx_ring_size);
1495 /* Update the current rxbd pointer to be the next one */
1496 priv->cur_rx = bdp;
1498 return howmany;
1501 #ifdef CONFIG_GFAR_NAPI
1502 static int gfar_poll(struct net_device *dev, int *budget)
1504 int howmany;
1505 struct gfar_private *priv = netdev_priv(dev);
1506 int rx_work_limit = *budget;
1508 if (rx_work_limit > dev->quota)
1509 rx_work_limit = dev->quota;
1511 howmany = gfar_clean_rx_ring(dev, rx_work_limit);
1513 dev->quota -= howmany;
1514 rx_work_limit -= howmany;
1515 *budget -= howmany;
1517 if (rx_work_limit > 0) {
1518 netif_rx_complete(dev);
1520 /* Clear the halt bit in RSTAT */
1521 gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT);
1523 gfar_write(&priv->regs->imask, IMASK_DEFAULT);
1525 /* If we are coalescing interrupts, update the timer */
1526 /* Otherwise, clear it */
1527 if (priv->rxcoalescing)
1528 gfar_write(&priv->regs->rxic,
1529 mk_ic_value(priv->rxcount, priv->rxtime));
1530 else
1531 gfar_write(&priv->regs->rxic, 0);
1534 /* Return 1 if there's more work to do */
1535 return (rx_work_limit > 0) ? 0 : 1;
1537 #endif
1539 /* The interrupt handler for devices with one interrupt */
1540 static irqreturn_t gfar_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1542 struct net_device *dev = dev_id;
1543 struct gfar_private *priv = netdev_priv(dev);
1545 /* Save ievent for future reference */
1546 u32 events = gfar_read(&priv->regs->ievent);
1548 /* Clear IEVENT */
1549 gfar_write(&priv->regs->ievent, events);
1551 /* Check for reception */
1552 if ((events & IEVENT_RXF0) || (events & IEVENT_RXB0))
1553 gfar_receive(irq, dev_id, regs);
1555 /* Check for transmit completion */
1556 if ((events & IEVENT_TXF) || (events & IEVENT_TXB))
1557 gfar_transmit(irq, dev_id, regs);
1559 /* Update error statistics */
1560 if (events & IEVENT_TXE) {
1561 priv->stats.tx_errors++;
1563 if (events & IEVENT_LC)
1564 priv->stats.tx_window_errors++;
1565 if (events & IEVENT_CRL)
1566 priv->stats.tx_aborted_errors++;
1567 if (events & IEVENT_XFUN) {
1568 if (netif_msg_tx_err(priv))
1569 printk(KERN_WARNING "%s: tx underrun. dropped packet\n", dev->name);
1570 priv->stats.tx_dropped++;
1571 priv->extra_stats.tx_underrun++;
1573 /* Reactivate the Tx Queues */
1574 gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT);
1577 if (events & IEVENT_BSY) {
1578 priv->stats.rx_errors++;
1579 priv->extra_stats.rx_bsy++;
1581 gfar_receive(irq, dev_id, regs);
1583 #ifndef CONFIG_GFAR_NAPI
1584 /* Clear the halt bit in RSTAT */
1585 gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT);
1586 #endif
1588 if (netif_msg_rx_err(priv))
1589 printk(KERN_DEBUG "%s: busy error (rhalt: %x)\n",
1590 dev->name,
1591 gfar_read(&priv->regs->rstat));
1593 if (events & IEVENT_BABR) {
1594 priv->stats.rx_errors++;
1595 priv->extra_stats.rx_babr++;
1597 if (netif_msg_rx_err(priv))
1598 printk(KERN_DEBUG "%s: babbling error\n", dev->name);
1600 if (events & IEVENT_EBERR) {
1601 priv->extra_stats.eberr++;
1602 if (netif_msg_rx_err(priv))
1603 printk(KERN_DEBUG "%s: EBERR\n", dev->name);
1605 if ((events & IEVENT_RXC) && (netif_msg_rx_err(priv)))
1606 printk(KERN_DEBUG "%s: control frame\n", dev->name);
1608 if (events & IEVENT_BABT) {
1609 priv->extra_stats.tx_babt++;
1610 if (netif_msg_rx_err(priv))
1611 printk(KERN_DEBUG "%s: babt error\n", dev->name);
1614 return IRQ_HANDLED;
1617 /* Called every time the controller might need to be made
1618 * aware of new link state. The PHY code conveys this
1619 * information through variables in the phydev structure, and this
1620 * function converts those variables into the appropriate
1621 * register values, and can bring down the device if needed.
1623 static void adjust_link(struct net_device *dev)
1625 struct gfar_private *priv = netdev_priv(dev);
1626 struct gfar __iomem *regs = priv->regs;
1627 unsigned long flags;
1628 struct phy_device *phydev = priv->phydev;
1629 int new_state = 0;
1631 spin_lock_irqsave(&priv->txlock, flags);
1632 if (phydev->link) {
1633 u32 tempval = gfar_read(&regs->maccfg2);
1634 u32 ecntrl = gfar_read(&regs->ecntrl);
1636 /* Now we make sure that we can be in full duplex mode.
1637 * If not, we operate in half-duplex mode. */
1638 if (phydev->duplex != priv->oldduplex) {
1639 new_state = 1;
1640 if (!(phydev->duplex))
1641 tempval &= ~(MACCFG2_FULL_DUPLEX);
1642 else
1643 tempval |= MACCFG2_FULL_DUPLEX;
1645 priv->oldduplex = phydev->duplex;
1648 if (phydev->speed != priv->oldspeed) {
1649 new_state = 1;
1650 switch (phydev->speed) {
1651 case 1000:
1652 tempval =
1653 ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
1654 break;
1655 case 100:
1656 case 10:
1657 tempval =
1658 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
1660 /* Reduced mode distinguishes
1661 * between 10 and 100 */
1662 if (phydev->speed == SPEED_100)
1663 ecntrl |= ECNTRL_R100;
1664 else
1665 ecntrl &= ~(ECNTRL_R100);
1666 break;
1667 default:
1668 if (netif_msg_link(priv))
1669 printk(KERN_WARNING
1670 "%s: Ack! Speed (%d) is not 10/100/1000!\n",
1671 dev->name, phydev->speed);
1672 break;
1675 priv->oldspeed = phydev->speed;
1678 gfar_write(&regs->maccfg2, tempval);
1679 gfar_write(&regs->ecntrl, ecntrl);
1681 if (!priv->oldlink) {
1682 new_state = 1;
1683 priv->oldlink = 1;
1684 netif_schedule(dev);
1686 } else if (priv->oldlink) {
1687 new_state = 1;
1688 priv->oldlink = 0;
1689 priv->oldspeed = 0;
1690 priv->oldduplex = -1;
1693 if (new_state && netif_msg_link(priv))
1694 phy_print_status(phydev);
1696 spin_unlock_irqrestore(&priv->txlock, flags);
1699 /* Update the hash table based on the current list of multicast
1700 * addresses we subscribe to. Also, change the promiscuity of
1701 * the device based on the flags (this function is called
1702 * whenever dev->flags is changed */
1703 static void gfar_set_multi(struct net_device *dev)
1705 struct dev_mc_list *mc_ptr;
1706 struct gfar_private *priv = netdev_priv(dev);
1707 struct gfar __iomem *regs = priv->regs;
1708 u32 tempval;
1710 if(dev->flags & IFF_PROMISC) {
1711 if (netif_msg_drv(priv))
1712 printk(KERN_INFO "%s: Entering promiscuous mode.\n",
1713 dev->name);
1714 /* Set RCTRL to PROM */
1715 tempval = gfar_read(&regs->rctrl);
1716 tempval |= RCTRL_PROM;
1717 gfar_write(&regs->rctrl, tempval);
1718 } else {
1719 /* Set RCTRL to not PROM */
1720 tempval = gfar_read(&regs->rctrl);
1721 tempval &= ~(RCTRL_PROM);
1722 gfar_write(&regs->rctrl, tempval);
1725 if(dev->flags & IFF_ALLMULTI) {
1726 /* Set the hash to rx all multicast frames */
1727 gfar_write(&regs->igaddr0, 0xffffffff);
1728 gfar_write(&regs->igaddr1, 0xffffffff);
1729 gfar_write(&regs->igaddr2, 0xffffffff);
1730 gfar_write(&regs->igaddr3, 0xffffffff);
1731 gfar_write(&regs->igaddr4, 0xffffffff);
1732 gfar_write(&regs->igaddr5, 0xffffffff);
1733 gfar_write(&regs->igaddr6, 0xffffffff);
1734 gfar_write(&regs->igaddr7, 0xffffffff);
1735 gfar_write(&regs->gaddr0, 0xffffffff);
1736 gfar_write(&regs->gaddr1, 0xffffffff);
1737 gfar_write(&regs->gaddr2, 0xffffffff);
1738 gfar_write(&regs->gaddr3, 0xffffffff);
1739 gfar_write(&regs->gaddr4, 0xffffffff);
1740 gfar_write(&regs->gaddr5, 0xffffffff);
1741 gfar_write(&regs->gaddr6, 0xffffffff);
1742 gfar_write(&regs->gaddr7, 0xffffffff);
1743 } else {
1744 int em_num;
1745 int idx;
1747 /* zero out the hash */
1748 gfar_write(&regs->igaddr0, 0x0);
1749 gfar_write(&regs->igaddr1, 0x0);
1750 gfar_write(&regs->igaddr2, 0x0);
1751 gfar_write(&regs->igaddr3, 0x0);
1752 gfar_write(&regs->igaddr4, 0x0);
1753 gfar_write(&regs->igaddr5, 0x0);
1754 gfar_write(&regs->igaddr6, 0x0);
1755 gfar_write(&regs->igaddr7, 0x0);
1756 gfar_write(&regs->gaddr0, 0x0);
1757 gfar_write(&regs->gaddr1, 0x0);
1758 gfar_write(&regs->gaddr2, 0x0);
1759 gfar_write(&regs->gaddr3, 0x0);
1760 gfar_write(&regs->gaddr4, 0x0);
1761 gfar_write(&regs->gaddr5, 0x0);
1762 gfar_write(&regs->gaddr6, 0x0);
1763 gfar_write(&regs->gaddr7, 0x0);
1765 /* If we have extended hash tables, we need to
1766 * clear the exact match registers to prepare for
1767 * setting them */
1768 if (priv->extended_hash) {
1769 em_num = GFAR_EM_NUM + 1;
1770 gfar_clear_exact_match(dev);
1771 idx = 1;
1772 } else {
1773 idx = 0;
1774 em_num = 0;
1777 if(dev->mc_count == 0)
1778 return;
1780 /* Parse the list, and set the appropriate bits */
1781 for(mc_ptr = dev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
1782 if (idx < em_num) {
1783 gfar_set_mac_for_addr(dev, idx,
1784 mc_ptr->dmi_addr);
1785 idx++;
1786 } else
1787 gfar_set_hash_for_addr(dev, mc_ptr->dmi_addr);
1791 return;
1795 /* Clears each of the exact match registers to zero, so they
1796 * don't interfere with normal reception */
1797 static void gfar_clear_exact_match(struct net_device *dev)
1799 int idx;
1800 u8 zero_arr[MAC_ADDR_LEN] = {0,0,0,0,0,0};
1802 for(idx = 1;idx < GFAR_EM_NUM + 1;idx++)
1803 gfar_set_mac_for_addr(dev, idx, (u8 *)zero_arr);
1806 /* Set the appropriate hash bit for the given addr */
1807 /* The algorithm works like so:
1808 * 1) Take the Destination Address (ie the multicast address), and
1809 * do a CRC on it (little endian), and reverse the bits of the
1810 * result.
1811 * 2) Use the 8 most significant bits as a hash into a 256-entry
1812 * table. The table is controlled through 8 32-bit registers:
1813 * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is
1814 * gaddr7. This means that the 3 most significant bits in the
1815 * hash index which gaddr register to use, and the 5 other bits
1816 * indicate which bit (assuming an IBM numbering scheme, which
1817 * for PowerPC (tm) is usually the case) in the register holds
1818 * the entry. */
1819 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
1821 u32 tempval;
1822 struct gfar_private *priv = netdev_priv(dev);
1823 u32 result = ether_crc(MAC_ADDR_LEN, addr);
1824 int width = priv->hash_width;
1825 u8 whichbit = (result >> (32 - width)) & 0x1f;
1826 u8 whichreg = result >> (32 - width + 5);
1827 u32 value = (1 << (31-whichbit));
1829 tempval = gfar_read(priv->hash_regs[whichreg]);
1830 tempval |= value;
1831 gfar_write(priv->hash_regs[whichreg], tempval);
1833 return;
1837 /* There are multiple MAC Address register pairs on some controllers
1838 * This function sets the numth pair to a given address
1840 static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr)
1842 struct gfar_private *priv = netdev_priv(dev);
1843 int idx;
1844 char tmpbuf[MAC_ADDR_LEN];
1845 u32 tempval;
1846 u32 __iomem *macptr = &priv->regs->macstnaddr1;
1848 macptr += num*2;
1850 /* Now copy it into the mac registers backwards, cuz */
1851 /* little endian is silly */
1852 for (idx = 0; idx < MAC_ADDR_LEN; idx++)
1853 tmpbuf[MAC_ADDR_LEN - 1 - idx] = addr[idx];
1855 gfar_write(macptr, *((u32 *) (tmpbuf)));
1857 tempval = *((u32 *) (tmpbuf + 4));
1859 gfar_write(macptr+1, tempval);
1862 /* GFAR error interrupt handler */
1863 static irqreturn_t gfar_error(int irq, void *dev_id, struct pt_regs *regs)
1865 struct net_device *dev = dev_id;
1866 struct gfar_private *priv = netdev_priv(dev);
1868 /* Save ievent for future reference */
1869 u32 events = gfar_read(&priv->regs->ievent);
1871 /* Clear IEVENT */
1872 gfar_write(&priv->regs->ievent, IEVENT_ERR_MASK);
1874 /* Hmm... */
1875 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
1876 printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n",
1877 dev->name, events, gfar_read(&priv->regs->imask));
1879 /* Update the error counters */
1880 if (events & IEVENT_TXE) {
1881 priv->stats.tx_errors++;
1883 if (events & IEVENT_LC)
1884 priv->stats.tx_window_errors++;
1885 if (events & IEVENT_CRL)
1886 priv->stats.tx_aborted_errors++;
1887 if (events & IEVENT_XFUN) {
1888 if (netif_msg_tx_err(priv))
1889 printk(KERN_DEBUG "%s: underrun. packet dropped.\n",
1890 dev->name);
1891 priv->stats.tx_dropped++;
1892 priv->extra_stats.tx_underrun++;
1894 /* Reactivate the Tx Queues */
1895 gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT);
1897 if (netif_msg_tx_err(priv))
1898 printk(KERN_DEBUG "%s: Transmit Error\n", dev->name);
1900 if (events & IEVENT_BSY) {
1901 priv->stats.rx_errors++;
1902 priv->extra_stats.rx_bsy++;
1904 gfar_receive(irq, dev_id, regs);
1906 #ifndef CONFIG_GFAR_NAPI
1907 /* Clear the halt bit in RSTAT */
1908 gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT);
1909 #endif
1911 if (netif_msg_rx_err(priv))
1912 printk(KERN_DEBUG "%s: busy error (rhalt: %x)\n",
1913 dev->name,
1914 gfar_read(&priv->regs->rstat));
1916 if (events & IEVENT_BABR) {
1917 priv->stats.rx_errors++;
1918 priv->extra_stats.rx_babr++;
1920 if (netif_msg_rx_err(priv))
1921 printk(KERN_DEBUG "%s: babbling error\n", dev->name);
1923 if (events & IEVENT_EBERR) {
1924 priv->extra_stats.eberr++;
1925 if (netif_msg_rx_err(priv))
1926 printk(KERN_DEBUG "%s: EBERR\n", dev->name);
1928 if ((events & IEVENT_RXC) && netif_msg_rx_status(priv))
1929 if (netif_msg_rx_status(priv))
1930 printk(KERN_DEBUG "%s: control frame\n", dev->name);
1932 if (events & IEVENT_BABT) {
1933 priv->extra_stats.tx_babt++;
1934 if (netif_msg_tx_err(priv))
1935 printk(KERN_DEBUG "%s: babt error\n", dev->name);
1937 return IRQ_HANDLED;
1940 /* Structure for a device driver */
1941 static struct platform_driver gfar_driver = {
1942 .probe = gfar_probe,
1943 .remove = gfar_remove,
1944 .driver = {
1945 .name = "fsl-gianfar",
1949 static int __init gfar_init(void)
1951 int err = gfar_mdio_init();
1953 if (err)
1954 return err;
1956 err = platform_driver_register(&gfar_driver);
1958 if (err)
1959 gfar_mdio_exit();
1961 return err;
1964 static void __exit gfar_exit(void)
1966 platform_driver_unregister(&gfar_driver);
1967 gfar_mdio_exit();
1970 module_init(gfar_init);
1971 module_exit(gfar_exit);