gma500: add more ops
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / net / gianfar.c
blob2dfcc8047847b12ade17da7c324a717133398680
1 /*
2 * drivers/net/gianfar.c
4 * Gianfar Ethernet Driver
5 * This driver is designed for the non-CPM ethernet controllers
6 * on the 85xx and 83xx family of integrated processors
7 * Based on 8260_io/fcc_enet.c
9 * Author: Andy Fleming
10 * Maintainer: Kumar Gala
11 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
13 * Copyright 2002-2009, 2011 Freescale Semiconductor, Inc.
14 * Copyright 2007 MontaVista Software, Inc.
16 * This program is free software; you can redistribute it and/or modify it
17 * under the terms of the GNU General Public License as published by the
18 * Free Software Foundation; either version 2 of the License, or (at your
19 * option) any later version.
21 * Gianfar: AKA Lambda Draconis, "Dragon"
22 * RA 11 31 24.2
23 * Dec +69 19 52
24 * V 3.84
25 * B-V +1.62
27 * Theory of operation
29 * The driver is initialized through of_device. Configuration information
30 * is therefore conveyed through an OF-style device tree.
32 * The Gianfar Ethernet Controller uses a ring of buffer
33 * descriptors. The beginning is indicated by a register
34 * pointing to the physical address of the start of the ring.
35 * The end is determined by a "wrap" bit being set in the
36 * last descriptor of the ring.
38 * When a packet is received, the RXF bit in the
39 * IEVENT register is set, triggering an interrupt when the
40 * corresponding bit in the IMASK register is also set (if
41 * interrupt coalescing is active, then the interrupt may not
42 * happen immediately, but will wait until either a set number
43 * of frames or amount of time have passed). In NAPI, the
44 * interrupt handler will signal there is work to be done, and
45 * exit. This method will start at the last known empty
46 * descriptor, and process every subsequent descriptor until there
47 * are none left with data (NAPI will stop after a set number of
48 * packets to give time to other tasks, but will eventually
49 * process all the packets). The data arrives inside a
50 * pre-allocated skb, and so after the skb is passed up to the
51 * stack, a new skb must be allocated, and the address field in
52 * the buffer descriptor must be updated to indicate this new
53 * skb.
55 * When the kernel requests that a packet be transmitted, the
56 * driver starts where it left off last time, and points the
57 * descriptor at the buffer which was passed in. The driver
58 * then informs the DMA engine that there are packets ready to
59 * be transmitted. Once the controller is finished transmitting
60 * the packet, an interrupt may be triggered (under the same
61 * conditions as for reception, but depending on the TXF bit).
62 * The driver then cleans up the buffer.
65 #include <linux/kernel.h>
66 #include <linux/string.h>
67 #include <linux/errno.h>
68 #include <linux/unistd.h>
69 #include <linux/slab.h>
70 #include <linux/interrupt.h>
71 #include <linux/init.h>
72 #include <linux/delay.h>
73 #include <linux/netdevice.h>
74 #include <linux/etherdevice.h>
75 #include <linux/skbuff.h>
76 #include <linux/if_vlan.h>
77 #include <linux/spinlock.h>
78 #include <linux/mm.h>
79 #include <linux/of_mdio.h>
80 #include <linux/of_platform.h>
81 #include <linux/ip.h>
82 #include <linux/tcp.h>
83 #include <linux/udp.h>
84 #include <linux/in.h>
85 #include <linux/net_tstamp.h>
87 #include <asm/io.h>
88 #include <asm/reg.h>
89 #include <asm/irq.h>
90 #include <asm/uaccess.h>
91 #include <linux/module.h>
92 #include <linux/dma-mapping.h>
93 #include <linux/crc32.h>
94 #include <linux/mii.h>
95 #include <linux/phy.h>
96 #include <linux/phy_fixed.h>
97 #include <linux/of.h>
98 #include <linux/of_net.h>
100 #include "gianfar.h"
101 #include "fsl_pq_mdio.h"
103 #define TX_TIMEOUT (1*HZ)
104 #undef BRIEF_GFAR_ERRORS
105 #undef VERBOSE_GFAR_ERRORS
107 const char gfar_driver_name[] = "Gianfar Ethernet";
108 const char gfar_driver_version[] = "1.3";
110 static int gfar_enet_open(struct net_device *dev);
111 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
112 static void gfar_reset_task(struct work_struct *work);
113 static void gfar_timeout(struct net_device *dev);
114 static int gfar_close(struct net_device *dev);
115 struct sk_buff *gfar_new_skb(struct net_device *dev);
116 static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
117 struct sk_buff *skb);
118 static int gfar_set_mac_address(struct net_device *dev);
119 static int gfar_change_mtu(struct net_device *dev, int new_mtu);
120 static irqreturn_t gfar_error(int irq, void *dev_id);
121 static irqreturn_t gfar_transmit(int irq, void *dev_id);
122 static irqreturn_t gfar_interrupt(int irq, void *dev_id);
123 static void adjust_link(struct net_device *dev);
124 static void init_registers(struct net_device *dev);
125 static int init_phy(struct net_device *dev);
126 static int gfar_probe(struct platform_device *ofdev);
127 static int gfar_remove(struct platform_device *ofdev);
128 static void free_skb_resources(struct gfar_private *priv);
129 static void gfar_set_multi(struct net_device *dev);
130 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
131 static void gfar_configure_serdes(struct net_device *dev);
132 static int gfar_poll(struct napi_struct *napi, int budget);
133 #ifdef CONFIG_NET_POLL_CONTROLLER
134 static void gfar_netpoll(struct net_device *dev);
135 #endif
136 int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
137 static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
138 static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
139 int amount_pull);
140 static void gfar_vlan_rx_register(struct net_device *netdev,
141 struct vlan_group *grp);
142 void gfar_halt(struct net_device *dev);
143 static void gfar_halt_nodisable(struct net_device *dev);
144 void gfar_start(struct net_device *dev);
145 static void gfar_clear_exact_match(struct net_device *dev);
146 static void gfar_set_mac_for_addr(struct net_device *dev, int num,
147 const u8 *addr);
148 static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
150 MODULE_AUTHOR("Freescale Semiconductor, Inc");
151 MODULE_DESCRIPTION("Gianfar Ethernet Driver");
152 MODULE_LICENSE("GPL");
154 static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
155 dma_addr_t buf)
157 u32 lstatus;
159 bdp->bufPtr = buf;
161 lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
162 if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
163 lstatus |= BD_LFLAG(RXBD_WRAP);
165 eieio();
167 bdp->lstatus = lstatus;
170 static int gfar_init_bds(struct net_device *ndev)
172 struct gfar_private *priv = netdev_priv(ndev);
173 struct gfar_priv_tx_q *tx_queue = NULL;
174 struct gfar_priv_rx_q *rx_queue = NULL;
175 struct txbd8 *txbdp;
176 struct rxbd8 *rxbdp;
177 int i, j;
179 for (i = 0; i < priv->num_tx_queues; i++) {
180 tx_queue = priv->tx_queue[i];
181 /* Initialize some variables in our dev structure */
182 tx_queue->num_txbdfree = tx_queue->tx_ring_size;
183 tx_queue->dirty_tx = tx_queue->tx_bd_base;
184 tx_queue->cur_tx = tx_queue->tx_bd_base;
185 tx_queue->skb_curtx = 0;
186 tx_queue->skb_dirtytx = 0;
188 /* Initialize Transmit Descriptor Ring */
189 txbdp = tx_queue->tx_bd_base;
190 for (j = 0; j < tx_queue->tx_ring_size; j++) {
191 txbdp->lstatus = 0;
192 txbdp->bufPtr = 0;
193 txbdp++;
196 /* Set the last descriptor in the ring to indicate wrap */
197 txbdp--;
198 txbdp->status |= TXBD_WRAP;
201 for (i = 0; i < priv->num_rx_queues; i++) {
202 rx_queue = priv->rx_queue[i];
203 rx_queue->cur_rx = rx_queue->rx_bd_base;
204 rx_queue->skb_currx = 0;
205 rxbdp = rx_queue->rx_bd_base;
207 for (j = 0; j < rx_queue->rx_ring_size; j++) {
208 struct sk_buff *skb = rx_queue->rx_skbuff[j];
210 if (skb) {
211 gfar_init_rxbdp(rx_queue, rxbdp,
212 rxbdp->bufPtr);
213 } else {
214 skb = gfar_new_skb(ndev);
215 if (!skb) {
216 pr_err("%s: Can't allocate RX buffers\n",
217 ndev->name);
218 goto err_rxalloc_fail;
220 rx_queue->rx_skbuff[j] = skb;
222 gfar_new_rxbdp(rx_queue, rxbdp, skb);
225 rxbdp++;
230 return 0;
232 err_rxalloc_fail:
233 free_skb_resources(priv);
234 return -ENOMEM;
237 static int gfar_alloc_skb_resources(struct net_device *ndev)
239 void *vaddr;
240 dma_addr_t addr;
241 int i, j, k;
242 struct gfar_private *priv = netdev_priv(ndev);
243 struct device *dev = &priv->ofdev->dev;
244 struct gfar_priv_tx_q *tx_queue = NULL;
245 struct gfar_priv_rx_q *rx_queue = NULL;
247 priv->total_tx_ring_size = 0;
248 for (i = 0; i < priv->num_tx_queues; i++)
249 priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
251 priv->total_rx_ring_size = 0;
252 for (i = 0; i < priv->num_rx_queues; i++)
253 priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
255 /* Allocate memory for the buffer descriptors */
256 vaddr = dma_alloc_coherent(dev,
257 sizeof(struct txbd8) * priv->total_tx_ring_size +
258 sizeof(struct rxbd8) * priv->total_rx_ring_size,
259 &addr, GFP_KERNEL);
260 if (!vaddr) {
261 if (netif_msg_ifup(priv))
262 pr_err("%s: Could not allocate buffer descriptors!\n",
263 ndev->name);
264 return -ENOMEM;
267 for (i = 0; i < priv->num_tx_queues; i++) {
268 tx_queue = priv->tx_queue[i];
269 tx_queue->tx_bd_base = (struct txbd8 *) vaddr;
270 tx_queue->tx_bd_dma_base = addr;
271 tx_queue->dev = ndev;
272 /* enet DMA only understands physical addresses */
273 addr += sizeof(struct txbd8) *tx_queue->tx_ring_size;
274 vaddr += sizeof(struct txbd8) *tx_queue->tx_ring_size;
277 /* Start the rx descriptor ring where the tx ring leaves off */
278 for (i = 0; i < priv->num_rx_queues; i++) {
279 rx_queue = priv->rx_queue[i];
280 rx_queue->rx_bd_base = (struct rxbd8 *) vaddr;
281 rx_queue->rx_bd_dma_base = addr;
282 rx_queue->dev = ndev;
283 addr += sizeof (struct rxbd8) * rx_queue->rx_ring_size;
284 vaddr += sizeof (struct rxbd8) * rx_queue->rx_ring_size;
287 /* Setup the skbuff rings */
288 for (i = 0; i < priv->num_tx_queues; i++) {
289 tx_queue = priv->tx_queue[i];
290 tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) *
291 tx_queue->tx_ring_size, GFP_KERNEL);
292 if (!tx_queue->tx_skbuff) {
293 if (netif_msg_ifup(priv))
294 pr_err("%s: Could not allocate tx_skbuff\n",
295 ndev->name);
296 goto cleanup;
299 for (k = 0; k < tx_queue->tx_ring_size; k++)
300 tx_queue->tx_skbuff[k] = NULL;
303 for (i = 0; i < priv->num_rx_queues; i++) {
304 rx_queue = priv->rx_queue[i];
305 rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) *
306 rx_queue->rx_ring_size, GFP_KERNEL);
308 if (!rx_queue->rx_skbuff) {
309 if (netif_msg_ifup(priv))
310 pr_err("%s: Could not allocate rx_skbuff\n",
311 ndev->name);
312 goto cleanup;
315 for (j = 0; j < rx_queue->rx_ring_size; j++)
316 rx_queue->rx_skbuff[j] = NULL;
319 if (gfar_init_bds(ndev))
320 goto cleanup;
322 return 0;
324 cleanup:
325 free_skb_resources(priv);
326 return -ENOMEM;
329 static void gfar_init_tx_rx_base(struct gfar_private *priv)
331 struct gfar __iomem *regs = priv->gfargrp[0].regs;
332 u32 __iomem *baddr;
333 int i;
335 baddr = &regs->tbase0;
336 for(i = 0; i < priv->num_tx_queues; i++) {
337 gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
338 baddr += 2;
341 baddr = &regs->rbase0;
342 for(i = 0; i < priv->num_rx_queues; i++) {
343 gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
344 baddr += 2;
348 static void gfar_init_mac(struct net_device *ndev)
350 struct gfar_private *priv = netdev_priv(ndev);
351 struct gfar __iomem *regs = priv->gfargrp[0].regs;
352 u32 rctrl = 0;
353 u32 tctrl = 0;
354 u32 attrs = 0;
356 /* write the tx/rx base registers */
357 gfar_init_tx_rx_base(priv);
359 /* Configure the coalescing support */
360 gfar_configure_coalescing(priv, 0xFF, 0xFF);
362 if (priv->rx_filer_enable) {
363 rctrl |= RCTRL_FILREN;
364 /* Program the RIR0 reg with the required distribution */
365 gfar_write(&regs->rir0, DEFAULT_RIR0);
368 if (ndev->features & NETIF_F_RXCSUM)
369 rctrl |= RCTRL_CHECKSUMMING;
371 if (priv->extended_hash) {
372 rctrl |= RCTRL_EXTHASH;
374 gfar_clear_exact_match(ndev);
375 rctrl |= RCTRL_EMEN;
378 if (priv->padding) {
379 rctrl &= ~RCTRL_PAL_MASK;
380 rctrl |= RCTRL_PADDING(priv->padding);
383 /* Insert receive time stamps into padding alignment bytes */
384 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) {
385 rctrl &= ~RCTRL_PAL_MASK;
386 rctrl |= RCTRL_PADDING(8);
387 priv->padding = 8;
390 /* Enable HW time stamping if requested from user space */
391 if (priv->hwts_rx_en)
392 rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
394 /* keep vlan related bits if it's enabled */
395 if (priv->vlgrp) {
396 rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
397 tctrl |= TCTRL_VLINS;
400 /* Init rctrl based on our settings */
401 gfar_write(&regs->rctrl, rctrl);
403 if (ndev->features & NETIF_F_IP_CSUM)
404 tctrl |= TCTRL_INIT_CSUM;
406 tctrl |= TCTRL_TXSCHED_PRIO;
408 gfar_write(&regs->tctrl, tctrl);
410 /* Set the extraction length and index */
411 attrs = ATTRELI_EL(priv->rx_stash_size) |
412 ATTRELI_EI(priv->rx_stash_index);
414 gfar_write(&regs->attreli, attrs);
416 /* Start with defaults, and add stashing or locking
417 * depending on the approprate variables */
418 attrs = ATTR_INIT_SETTINGS;
420 if (priv->bd_stash_en)
421 attrs |= ATTR_BDSTASH;
423 if (priv->rx_stash_size != 0)
424 attrs |= ATTR_BUFSTASH;
426 gfar_write(&regs->attr, attrs);
428 gfar_write(&regs->fifo_tx_thr, priv->fifo_threshold);
429 gfar_write(&regs->fifo_tx_starve, priv->fifo_starve);
430 gfar_write(&regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);
433 static struct net_device_stats *gfar_get_stats(struct net_device *dev)
435 struct gfar_private *priv = netdev_priv(dev);
436 unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
437 unsigned long tx_packets = 0, tx_bytes = 0;
438 int i = 0;
440 for (i = 0; i < priv->num_rx_queues; i++) {
441 rx_packets += priv->rx_queue[i]->stats.rx_packets;
442 rx_bytes += priv->rx_queue[i]->stats.rx_bytes;
443 rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
446 dev->stats.rx_packets = rx_packets;
447 dev->stats.rx_bytes = rx_bytes;
448 dev->stats.rx_dropped = rx_dropped;
450 for (i = 0; i < priv->num_tx_queues; i++) {
451 tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
452 tx_packets += priv->tx_queue[i]->stats.tx_packets;
455 dev->stats.tx_bytes = tx_bytes;
456 dev->stats.tx_packets = tx_packets;
458 return &dev->stats;
461 static const struct net_device_ops gfar_netdev_ops = {
462 .ndo_open = gfar_enet_open,
463 .ndo_start_xmit = gfar_start_xmit,
464 .ndo_stop = gfar_close,
465 .ndo_change_mtu = gfar_change_mtu,
466 .ndo_set_features = gfar_set_features,
467 .ndo_set_multicast_list = gfar_set_multi,
468 .ndo_tx_timeout = gfar_timeout,
469 .ndo_do_ioctl = gfar_ioctl,
470 .ndo_get_stats = gfar_get_stats,
471 .ndo_vlan_rx_register = gfar_vlan_rx_register,
472 .ndo_set_mac_address = eth_mac_addr,
473 .ndo_validate_addr = eth_validate_addr,
474 #ifdef CONFIG_NET_POLL_CONTROLLER
475 .ndo_poll_controller = gfar_netpoll,
476 #endif
479 void lock_rx_qs(struct gfar_private *priv)
481 int i = 0x0;
483 for (i = 0; i < priv->num_rx_queues; i++)
484 spin_lock(&priv->rx_queue[i]->rxlock);
487 void lock_tx_qs(struct gfar_private *priv)
489 int i = 0x0;
491 for (i = 0; i < priv->num_tx_queues; i++)
492 spin_lock(&priv->tx_queue[i]->txlock);
495 void unlock_rx_qs(struct gfar_private *priv)
497 int i = 0x0;
499 for (i = 0; i < priv->num_rx_queues; i++)
500 spin_unlock(&priv->rx_queue[i]->rxlock);
503 void unlock_tx_qs(struct gfar_private *priv)
505 int i = 0x0;
507 for (i = 0; i < priv->num_tx_queues; i++)
508 spin_unlock(&priv->tx_queue[i]->txlock);
511 /* Returns 1 if incoming frames use an FCB */
512 static inline int gfar_uses_fcb(struct gfar_private *priv)
514 return priv->vlgrp || (priv->ndev->features & NETIF_F_RXCSUM) ||
515 (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER);
518 static void free_tx_pointers(struct gfar_private *priv)
520 int i = 0;
522 for (i = 0; i < priv->num_tx_queues; i++)
523 kfree(priv->tx_queue[i]);
526 static void free_rx_pointers(struct gfar_private *priv)
528 int i = 0;
530 for (i = 0; i < priv->num_rx_queues; i++)
531 kfree(priv->rx_queue[i]);
534 static void unmap_group_regs(struct gfar_private *priv)
536 int i = 0;
538 for (i = 0; i < MAXGROUPS; i++)
539 if (priv->gfargrp[i].regs)
540 iounmap(priv->gfargrp[i].regs);
543 static void disable_napi(struct gfar_private *priv)
545 int i = 0;
547 for (i = 0; i < priv->num_grps; i++)
548 napi_disable(&priv->gfargrp[i].napi);
551 static void enable_napi(struct gfar_private *priv)
553 int i = 0;
555 for (i = 0; i < priv->num_grps; i++)
556 napi_enable(&priv->gfargrp[i].napi);
559 static int gfar_parse_group(struct device_node *np,
560 struct gfar_private *priv, const char *model)
562 u32 *queue_mask;
564 priv->gfargrp[priv->num_grps].regs = of_iomap(np, 0);
565 if (!priv->gfargrp[priv->num_grps].regs)
566 return -ENOMEM;
568 priv->gfargrp[priv->num_grps].interruptTransmit =
569 irq_of_parse_and_map(np, 0);
571 /* If we aren't the FEC we have multiple interrupts */
572 if (model && strcasecmp(model, "FEC")) {
573 priv->gfargrp[priv->num_grps].interruptReceive =
574 irq_of_parse_and_map(np, 1);
575 priv->gfargrp[priv->num_grps].interruptError =
576 irq_of_parse_and_map(np,2);
577 if (priv->gfargrp[priv->num_grps].interruptTransmit == NO_IRQ ||
578 priv->gfargrp[priv->num_grps].interruptReceive == NO_IRQ ||
579 priv->gfargrp[priv->num_grps].interruptError == NO_IRQ)
580 return -EINVAL;
583 priv->gfargrp[priv->num_grps].grp_id = priv->num_grps;
584 priv->gfargrp[priv->num_grps].priv = priv;
585 spin_lock_init(&priv->gfargrp[priv->num_grps].grplock);
586 if(priv->mode == MQ_MG_MODE) {
587 queue_mask = (u32 *)of_get_property(np,
588 "fsl,rx-bit-map", NULL);
589 priv->gfargrp[priv->num_grps].rx_bit_map =
590 queue_mask ? *queue_mask :(DEFAULT_MAPPING >> priv->num_grps);
591 queue_mask = (u32 *)of_get_property(np,
592 "fsl,tx-bit-map", NULL);
593 priv->gfargrp[priv->num_grps].tx_bit_map =
594 queue_mask ? *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
595 } else {
596 priv->gfargrp[priv->num_grps].rx_bit_map = 0xFF;
597 priv->gfargrp[priv->num_grps].tx_bit_map = 0xFF;
599 priv->num_grps++;
601 return 0;
604 static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
606 const char *model;
607 const char *ctype;
608 const void *mac_addr;
609 int err = 0, i;
610 struct net_device *dev = NULL;
611 struct gfar_private *priv = NULL;
612 struct device_node *np = ofdev->dev.of_node;
613 struct device_node *child = NULL;
614 const u32 *stash;
615 const u32 *stash_len;
616 const u32 *stash_idx;
617 unsigned int num_tx_qs, num_rx_qs;
618 u32 *tx_queues, *rx_queues;
620 if (!np || !of_device_is_available(np))
621 return -ENODEV;
623 /* parse the num of tx and rx queues */
624 tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL);
625 num_tx_qs = tx_queues ? *tx_queues : 1;
627 if (num_tx_qs > MAX_TX_QS) {
628 printk(KERN_ERR "num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
629 num_tx_qs, MAX_TX_QS);
630 printk(KERN_ERR "Cannot do alloc_etherdev, aborting\n");
631 return -EINVAL;
634 rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL);
635 num_rx_qs = rx_queues ? *rx_queues : 1;
637 if (num_rx_qs > MAX_RX_QS) {
638 printk(KERN_ERR "num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
639 num_tx_qs, MAX_TX_QS);
640 printk(KERN_ERR "Cannot do alloc_etherdev, aborting\n");
641 return -EINVAL;
644 *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
645 dev = *pdev;
646 if (NULL == dev)
647 return -ENOMEM;
649 priv = netdev_priv(dev);
650 priv->node = ofdev->dev.of_node;
651 priv->ndev = dev;
653 priv->num_tx_queues = num_tx_qs;
654 netif_set_real_num_rx_queues(dev, num_rx_qs);
655 priv->num_rx_queues = num_rx_qs;
656 priv->num_grps = 0x0;
658 model = of_get_property(np, "model", NULL);
660 for (i = 0; i < MAXGROUPS; i++)
661 priv->gfargrp[i].regs = NULL;
663 /* Parse and initialize group specific information */
664 if (of_device_is_compatible(np, "fsl,etsec2")) {
665 priv->mode = MQ_MG_MODE;
666 for_each_child_of_node(np, child) {
667 err = gfar_parse_group(child, priv, model);
668 if (err)
669 goto err_grp_init;
671 } else {
672 priv->mode = SQ_SG_MODE;
673 err = gfar_parse_group(np, priv, model);
674 if(err)
675 goto err_grp_init;
678 for (i = 0; i < priv->num_tx_queues; i++)
679 priv->tx_queue[i] = NULL;
680 for (i = 0; i < priv->num_rx_queues; i++)
681 priv->rx_queue[i] = NULL;
683 for (i = 0; i < priv->num_tx_queues; i++) {
684 priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
685 GFP_KERNEL);
686 if (!priv->tx_queue[i]) {
687 err = -ENOMEM;
688 goto tx_alloc_failed;
690 priv->tx_queue[i]->tx_skbuff = NULL;
691 priv->tx_queue[i]->qindex = i;
692 priv->tx_queue[i]->dev = dev;
693 spin_lock_init(&(priv->tx_queue[i]->txlock));
696 for (i = 0; i < priv->num_rx_queues; i++) {
697 priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
698 GFP_KERNEL);
699 if (!priv->rx_queue[i]) {
700 err = -ENOMEM;
701 goto rx_alloc_failed;
703 priv->rx_queue[i]->rx_skbuff = NULL;
704 priv->rx_queue[i]->qindex = i;
705 priv->rx_queue[i]->dev = dev;
706 spin_lock_init(&(priv->rx_queue[i]->rxlock));
710 stash = of_get_property(np, "bd-stash", NULL);
712 if (stash) {
713 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
714 priv->bd_stash_en = 1;
717 stash_len = of_get_property(np, "rx-stash-len", NULL);
719 if (stash_len)
720 priv->rx_stash_size = *stash_len;
722 stash_idx = of_get_property(np, "rx-stash-idx", NULL);
724 if (stash_idx)
725 priv->rx_stash_index = *stash_idx;
727 if (stash_len || stash_idx)
728 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
730 mac_addr = of_get_mac_address(np);
731 if (mac_addr)
732 memcpy(dev->dev_addr, mac_addr, MAC_ADDR_LEN);
734 if (model && !strcasecmp(model, "TSEC"))
735 priv->device_flags =
736 FSL_GIANFAR_DEV_HAS_GIGABIT |
737 FSL_GIANFAR_DEV_HAS_COALESCE |
738 FSL_GIANFAR_DEV_HAS_RMON |
739 FSL_GIANFAR_DEV_HAS_MULTI_INTR;
740 if (model && !strcasecmp(model, "eTSEC"))
741 priv->device_flags =
742 FSL_GIANFAR_DEV_HAS_GIGABIT |
743 FSL_GIANFAR_DEV_HAS_COALESCE |
744 FSL_GIANFAR_DEV_HAS_RMON |
745 FSL_GIANFAR_DEV_HAS_MULTI_INTR |
746 FSL_GIANFAR_DEV_HAS_PADDING |
747 FSL_GIANFAR_DEV_HAS_CSUM |
748 FSL_GIANFAR_DEV_HAS_VLAN |
749 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
750 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
751 FSL_GIANFAR_DEV_HAS_TIMER;
753 ctype = of_get_property(np, "phy-connection-type", NULL);
755 /* We only care about rgmii-id. The rest are autodetected */
756 if (ctype && !strcmp(ctype, "rgmii-id"))
757 priv->interface = PHY_INTERFACE_MODE_RGMII_ID;
758 else
759 priv->interface = PHY_INTERFACE_MODE_MII;
761 if (of_get_property(np, "fsl,magic-packet", NULL))
762 priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
764 priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
766 /* Find the TBI PHY. If it's not there, we don't support SGMII */
767 priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
769 return 0;
771 rx_alloc_failed:
772 free_rx_pointers(priv);
773 tx_alloc_failed:
774 free_tx_pointers(priv);
775 err_grp_init:
776 unmap_group_regs(priv);
777 free_netdev(dev);
778 return err;
781 static int gfar_hwtstamp_ioctl(struct net_device *netdev,
782 struct ifreq *ifr, int cmd)
784 struct hwtstamp_config config;
785 struct gfar_private *priv = netdev_priv(netdev);
787 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
788 return -EFAULT;
790 /* reserved for future extensions */
791 if (config.flags)
792 return -EINVAL;
794 switch (config.tx_type) {
795 case HWTSTAMP_TX_OFF:
796 priv->hwts_tx_en = 0;
797 break;
798 case HWTSTAMP_TX_ON:
799 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
800 return -ERANGE;
801 priv->hwts_tx_en = 1;
802 break;
803 default:
804 return -ERANGE;
807 switch (config.rx_filter) {
808 case HWTSTAMP_FILTER_NONE:
809 if (priv->hwts_rx_en) {
810 stop_gfar(netdev);
811 priv->hwts_rx_en = 0;
812 startup_gfar(netdev);
814 break;
815 default:
816 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
817 return -ERANGE;
818 if (!priv->hwts_rx_en) {
819 stop_gfar(netdev);
820 priv->hwts_rx_en = 1;
821 startup_gfar(netdev);
823 config.rx_filter = HWTSTAMP_FILTER_ALL;
824 break;
827 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
828 -EFAULT : 0;
831 /* Ioctl MII Interface */
832 static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
834 struct gfar_private *priv = netdev_priv(dev);
836 if (!netif_running(dev))
837 return -EINVAL;
839 if (cmd == SIOCSHWTSTAMP)
840 return gfar_hwtstamp_ioctl(dev, rq, cmd);
842 if (!priv->phydev)
843 return -ENODEV;
845 return phy_mii_ioctl(priv->phydev, rq, cmd);
848 static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs)
850 unsigned int new_bit_map = 0x0;
851 int mask = 0x1 << (max_qs - 1), i;
852 for (i = 0; i < max_qs; i++) {
853 if (bit_map & mask)
854 new_bit_map = new_bit_map + (1 << i);
855 mask = mask >> 0x1;
857 return new_bit_map;
860 static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
861 u32 class)
863 u32 rqfpr = FPR_FILER_MASK;
864 u32 rqfcr = 0x0;
866 rqfar--;
867 rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
868 priv->ftp_rqfpr[rqfar] = rqfpr;
869 priv->ftp_rqfcr[rqfar] = rqfcr;
870 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
872 rqfar--;
873 rqfcr = RQFCR_CMP_NOMATCH;
874 priv->ftp_rqfpr[rqfar] = rqfpr;
875 priv->ftp_rqfcr[rqfar] = rqfcr;
876 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
878 rqfar--;
879 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
880 rqfpr = class;
881 priv->ftp_rqfcr[rqfar] = rqfcr;
882 priv->ftp_rqfpr[rqfar] = rqfpr;
883 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
885 rqfar--;
886 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
887 rqfpr = class;
888 priv->ftp_rqfcr[rqfar] = rqfcr;
889 priv->ftp_rqfpr[rqfar] = rqfpr;
890 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
892 return rqfar;
895 static void gfar_init_filer_table(struct gfar_private *priv)
897 int i = 0x0;
898 u32 rqfar = MAX_FILER_IDX;
899 u32 rqfcr = 0x0;
900 u32 rqfpr = FPR_FILER_MASK;
902 /* Default rule */
903 rqfcr = RQFCR_CMP_MATCH;
904 priv->ftp_rqfcr[rqfar] = rqfcr;
905 priv->ftp_rqfpr[rqfar] = rqfpr;
906 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
908 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
909 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
910 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
911 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
912 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
913 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);
915 /* cur_filer_idx indicated the first non-masked rule */
916 priv->cur_filer_idx = rqfar;
918 /* Rest are masked rules */
919 rqfcr = RQFCR_CMP_NOMATCH;
920 for (i = 0; i < rqfar; i++) {
921 priv->ftp_rqfcr[i] = rqfcr;
922 priv->ftp_rqfpr[i] = rqfpr;
923 gfar_write_filer(priv, i, rqfcr, rqfpr);
927 static void gfar_detect_errata(struct gfar_private *priv)
929 struct device *dev = &priv->ofdev->dev;
930 unsigned int pvr = mfspr(SPRN_PVR);
931 unsigned int svr = mfspr(SPRN_SVR);
932 unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */
933 unsigned int rev = svr & 0xffff;
935 /* MPC8313 Rev 2.0 and higher; All MPC837x */
936 if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
937 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
938 priv->errata |= GFAR_ERRATA_74;
940 /* MPC8313 and MPC837x all rev */
941 if ((pvr == 0x80850010 && mod == 0x80b0) ||
942 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
943 priv->errata |= GFAR_ERRATA_76;
945 /* MPC8313 and MPC837x all rev */
946 if ((pvr == 0x80850010 && mod == 0x80b0) ||
947 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
948 priv->errata |= GFAR_ERRATA_A002;
950 /* MPC8313 Rev < 2.0, MPC8548 rev 2.0 */
951 if ((pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020) ||
952 (pvr == 0x80210020 && mod == 0x8030 && rev == 0x0020))
953 priv->errata |= GFAR_ERRATA_12;
955 if (priv->errata)
956 dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
957 priv->errata);
960 /* Set up the ethernet device structure, private data,
961 * and anything else we need before we start */
962 static int gfar_probe(struct platform_device *ofdev)
964 u32 tempval;
965 struct net_device *dev = NULL;
966 struct gfar_private *priv = NULL;
967 struct gfar __iomem *regs = NULL;
968 int err = 0, i, grp_idx = 0;
969 int len_devname;
970 u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0;
971 u32 isrg = 0;
972 u32 __iomem *baddr;
974 err = gfar_of_init(ofdev, &dev);
976 if (err)
977 return err;
979 priv = netdev_priv(dev);
980 priv->ndev = dev;
981 priv->ofdev = ofdev;
982 priv->node = ofdev->dev.of_node;
983 SET_NETDEV_DEV(dev, &ofdev->dev);
985 spin_lock_init(&priv->bflock);
986 INIT_WORK(&priv->reset_task, gfar_reset_task);
988 dev_set_drvdata(&ofdev->dev, priv);
989 regs = priv->gfargrp[0].regs;
991 gfar_detect_errata(priv);
993 /* Stop the DMA engine now, in case it was running before */
994 /* (The firmware could have used it, and left it running). */
995 gfar_halt(dev);
997 /* Reset MAC layer */
998 gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET);
1000 /* We need to delay at least 3 TX clocks */
1001 udelay(2);
1003 tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
1004 gfar_write(&regs->maccfg1, tempval);
1006 /* Initialize MACCFG2. */
1007 tempval = MACCFG2_INIT_SETTINGS;
1008 if (gfar_has_errata(priv, GFAR_ERRATA_74))
1009 tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
1010 gfar_write(&regs->maccfg2, tempval);
1012 /* Initialize ECNTRL */
1013 gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
1015 /* Set the dev->base_addr to the gfar reg region */
1016 dev->base_addr = (unsigned long) regs;
1018 SET_NETDEV_DEV(dev, &ofdev->dev);
1020 /* Fill in the dev structure */
1021 dev->watchdog_timeo = TX_TIMEOUT;
1022 dev->mtu = 1500;
1023 dev->netdev_ops = &gfar_netdev_ops;
1024 dev->ethtool_ops = &gfar_ethtool_ops;
1026 /* Register for napi ...We are registering NAPI for each grp */
1027 for (i = 0; i < priv->num_grps; i++)
1028 netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll, GFAR_DEV_WEIGHT);
1030 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
1031 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
1032 NETIF_F_RXCSUM;
1033 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
1034 NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
1037 priv->vlgrp = NULL;
1039 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN)
1040 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1042 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
1043 priv->extended_hash = 1;
1044 priv->hash_width = 9;
1046 priv->hash_regs[0] = &regs->igaddr0;
1047 priv->hash_regs[1] = &regs->igaddr1;
1048 priv->hash_regs[2] = &regs->igaddr2;
1049 priv->hash_regs[3] = &regs->igaddr3;
1050 priv->hash_regs[4] = &regs->igaddr4;
1051 priv->hash_regs[5] = &regs->igaddr5;
1052 priv->hash_regs[6] = &regs->igaddr6;
1053 priv->hash_regs[7] = &regs->igaddr7;
1054 priv->hash_regs[8] = &regs->gaddr0;
1055 priv->hash_regs[9] = &regs->gaddr1;
1056 priv->hash_regs[10] = &regs->gaddr2;
1057 priv->hash_regs[11] = &regs->gaddr3;
1058 priv->hash_regs[12] = &regs->gaddr4;
1059 priv->hash_regs[13] = &regs->gaddr5;
1060 priv->hash_regs[14] = &regs->gaddr6;
1061 priv->hash_regs[15] = &regs->gaddr7;
1063 } else {
1064 priv->extended_hash = 0;
1065 priv->hash_width = 8;
1067 priv->hash_regs[0] = &regs->gaddr0;
1068 priv->hash_regs[1] = &regs->gaddr1;
1069 priv->hash_regs[2] = &regs->gaddr2;
1070 priv->hash_regs[3] = &regs->gaddr3;
1071 priv->hash_regs[4] = &regs->gaddr4;
1072 priv->hash_regs[5] = &regs->gaddr5;
1073 priv->hash_regs[6] = &regs->gaddr6;
1074 priv->hash_regs[7] = &regs->gaddr7;
1077 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING)
1078 priv->padding = DEFAULT_PADDING;
1079 else
1080 priv->padding = 0;
1082 if (dev->features & NETIF_F_IP_CSUM ||
1083 priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
1084 dev->hard_header_len += GMAC_FCB_LEN;
1086 /* Program the isrg regs only if number of grps > 1 */
1087 if (priv->num_grps > 1) {
1088 baddr = &regs->isrg0;
1089 for (i = 0; i < priv->num_grps; i++) {
1090 isrg |= (priv->gfargrp[i].rx_bit_map << ISRG_SHIFT_RX);
1091 isrg |= (priv->gfargrp[i].tx_bit_map << ISRG_SHIFT_TX);
1092 gfar_write(baddr, isrg);
1093 baddr++;
1094 isrg = 0x0;
1098 /* Need to reverse the bit maps as bit_map's MSB is q0
1099 * but, for_each_set_bit parses from right to left, which
1100 * basically reverses the queue numbers */
1101 for (i = 0; i< priv->num_grps; i++) {
1102 priv->gfargrp[i].tx_bit_map = reverse_bitmap(
1103 priv->gfargrp[i].tx_bit_map, MAX_TX_QS);
1104 priv->gfargrp[i].rx_bit_map = reverse_bitmap(
1105 priv->gfargrp[i].rx_bit_map, MAX_RX_QS);
1108 /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
1109 * also assign queues to groups */
1110 for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) {
1111 priv->gfargrp[grp_idx].num_rx_queues = 0x0;
1112 for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map,
1113 priv->num_rx_queues) {
1114 priv->gfargrp[grp_idx].num_rx_queues++;
1115 priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx];
1116 rstat = rstat | (RSTAT_CLEAR_RHALT >> i);
1117 rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
1119 priv->gfargrp[grp_idx].num_tx_queues = 0x0;
1120 for_each_set_bit(i, &priv->gfargrp[grp_idx].tx_bit_map,
1121 priv->num_tx_queues) {
1122 priv->gfargrp[grp_idx].num_tx_queues++;
1123 priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx];
1124 tstat = tstat | (TSTAT_CLEAR_THALT >> i);
1125 tqueue = tqueue | (TQUEUE_EN0 >> i);
1127 priv->gfargrp[grp_idx].rstat = rstat;
1128 priv->gfargrp[grp_idx].tstat = tstat;
1129 rstat = tstat =0;
1132 gfar_write(&regs->rqueue, rqueue);
1133 gfar_write(&regs->tqueue, tqueue);
1135 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
1137 /* Initializing some of the rx/tx queue level parameters */
1138 for (i = 0; i < priv->num_tx_queues; i++) {
1139 priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
1140 priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
1141 priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
1142 priv->tx_queue[i]->txic = DEFAULT_TXIC;
1145 for (i = 0; i < priv->num_rx_queues; i++) {
1146 priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
1147 priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
1148 priv->rx_queue[i]->rxic = DEFAULT_RXIC;
1151 /* enable filer if using multiple RX queues*/
1152 if(priv->num_rx_queues > 1)
1153 priv->rx_filer_enable = 1;
1154 /* Enable most messages by default */
1155 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
1157 /* Carrier starts down, phylib will bring it up */
1158 netif_carrier_off(dev);
1160 err = register_netdev(dev);
1162 if (err) {
1163 printk(KERN_ERR "%s: Cannot register net device, aborting.\n",
1164 dev->name);
1165 goto register_fail;
1168 device_init_wakeup(&dev->dev,
1169 priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1171 /* fill out IRQ number and name fields */
1172 len_devname = strlen(dev->name);
1173 for (i = 0; i < priv->num_grps; i++) {
1174 strncpy(&priv->gfargrp[i].int_name_tx[0], dev->name,
1175 len_devname);
1176 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1177 strncpy(&priv->gfargrp[i].int_name_tx[len_devname],
1178 "_g", sizeof("_g"));
1179 priv->gfargrp[i].int_name_tx[
1180 strlen(priv->gfargrp[i].int_name_tx)] = i+48;
1181 strncpy(&priv->gfargrp[i].int_name_tx[strlen(
1182 priv->gfargrp[i].int_name_tx)],
1183 "_tx", sizeof("_tx") + 1);
1185 strncpy(&priv->gfargrp[i].int_name_rx[0], dev->name,
1186 len_devname);
1187 strncpy(&priv->gfargrp[i].int_name_rx[len_devname],
1188 "_g", sizeof("_g"));
1189 priv->gfargrp[i].int_name_rx[
1190 strlen(priv->gfargrp[i].int_name_rx)] = i+48;
1191 strncpy(&priv->gfargrp[i].int_name_rx[strlen(
1192 priv->gfargrp[i].int_name_rx)],
1193 "_rx", sizeof("_rx") + 1);
1195 strncpy(&priv->gfargrp[i].int_name_er[0], dev->name,
1196 len_devname);
1197 strncpy(&priv->gfargrp[i].int_name_er[len_devname],
1198 "_g", sizeof("_g"));
1199 priv->gfargrp[i].int_name_er[strlen(
1200 priv->gfargrp[i].int_name_er)] = i+48;
1201 strncpy(&priv->gfargrp[i].int_name_er[strlen(\
1202 priv->gfargrp[i].int_name_er)],
1203 "_er", sizeof("_er") + 1);
1204 } else
1205 priv->gfargrp[i].int_name_tx[len_devname] = '\0';
1208 /* Initialize the filer table */
1209 gfar_init_filer_table(priv);
1211 /* Create all the sysfs files */
1212 gfar_init_sysfs(dev);
1214 /* Print out the device info */
1215 printk(KERN_INFO DEVICE_NAME "%pM\n", dev->name, dev->dev_addr);
1217 /* Even more device info helps when determining which kernel */
1218 /* provided which set of benchmarks. */
1219 printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name);
1220 for (i = 0; i < priv->num_rx_queues; i++)
1221 printk(KERN_INFO "%s: RX BD ring size for Q[%d]: %d\n",
1222 dev->name, i, priv->rx_queue[i]->rx_ring_size);
1223 for(i = 0; i < priv->num_tx_queues; i++)
1224 printk(KERN_INFO "%s: TX BD ring size for Q[%d]: %d\n",
1225 dev->name, i, priv->tx_queue[i]->tx_ring_size);
1227 return 0;
1229 register_fail:
1230 unmap_group_regs(priv);
1231 free_tx_pointers(priv);
1232 free_rx_pointers(priv);
1233 if (priv->phy_node)
1234 of_node_put(priv->phy_node);
1235 if (priv->tbi_node)
1236 of_node_put(priv->tbi_node);
1237 free_netdev(dev);
1238 return err;
1241 static int gfar_remove(struct platform_device *ofdev)
1243 struct gfar_private *priv = dev_get_drvdata(&ofdev->dev);
1245 if (priv->phy_node)
1246 of_node_put(priv->phy_node);
1247 if (priv->tbi_node)
1248 of_node_put(priv->tbi_node);
1250 dev_set_drvdata(&ofdev->dev, NULL);
1252 unregister_netdev(priv->ndev);
1253 unmap_group_regs(priv);
1254 free_netdev(priv->ndev);
1256 return 0;
1259 #ifdef CONFIG_PM
1261 static int gfar_suspend(struct device *dev)
1263 struct gfar_private *priv = dev_get_drvdata(dev);
1264 struct net_device *ndev = priv->ndev;
1265 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1266 unsigned long flags;
1267 u32 tempval;
1269 int magic_packet = priv->wol_en &&
1270 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1272 netif_device_detach(ndev);
1274 if (netif_running(ndev)) {
1276 local_irq_save(flags);
1277 lock_tx_qs(priv);
1278 lock_rx_qs(priv);
1280 gfar_halt_nodisable(ndev);
1282 /* Disable Tx, and Rx if wake-on-LAN is disabled. */
1283 tempval = gfar_read(&regs->maccfg1);
1285 tempval &= ~MACCFG1_TX_EN;
1287 if (!magic_packet)
1288 tempval &= ~MACCFG1_RX_EN;
1290 gfar_write(&regs->maccfg1, tempval);
1292 unlock_rx_qs(priv);
1293 unlock_tx_qs(priv);
1294 local_irq_restore(flags);
1296 disable_napi(priv);
1298 if (magic_packet) {
1299 /* Enable interrupt on Magic Packet */
1300 gfar_write(&regs->imask, IMASK_MAG);
1302 /* Enable Magic Packet mode */
1303 tempval = gfar_read(&regs->maccfg2);
1304 tempval |= MACCFG2_MPEN;
1305 gfar_write(&regs->maccfg2, tempval);
1306 } else {
1307 phy_stop(priv->phydev);
1311 return 0;
1314 static int gfar_resume(struct device *dev)
1316 struct gfar_private *priv = dev_get_drvdata(dev);
1317 struct net_device *ndev = priv->ndev;
1318 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1319 unsigned long flags;
1320 u32 tempval;
1321 int magic_packet = priv->wol_en &&
1322 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1324 if (!netif_running(ndev)) {
1325 netif_device_attach(ndev);
1326 return 0;
1329 if (!magic_packet && priv->phydev)
1330 phy_start(priv->phydev);
1332 /* Disable Magic Packet mode, in case something
1333 * else woke us up.
1335 local_irq_save(flags);
1336 lock_tx_qs(priv);
1337 lock_rx_qs(priv);
1339 tempval = gfar_read(&regs->maccfg2);
1340 tempval &= ~MACCFG2_MPEN;
1341 gfar_write(&regs->maccfg2, tempval);
1343 gfar_start(ndev);
1345 unlock_rx_qs(priv);
1346 unlock_tx_qs(priv);
1347 local_irq_restore(flags);
1349 netif_device_attach(ndev);
1351 enable_napi(priv);
1353 return 0;
1356 static int gfar_restore(struct device *dev)
1358 struct gfar_private *priv = dev_get_drvdata(dev);
1359 struct net_device *ndev = priv->ndev;
1361 if (!netif_running(ndev))
1362 return 0;
1364 gfar_init_bds(ndev);
1365 init_registers(ndev);
1366 gfar_set_mac_address(ndev);
1367 gfar_init_mac(ndev);
1368 gfar_start(ndev);
1370 priv->oldlink = 0;
1371 priv->oldspeed = 0;
1372 priv->oldduplex = -1;
1374 if (priv->phydev)
1375 phy_start(priv->phydev);
1377 netif_device_attach(ndev);
1378 enable_napi(priv);
1380 return 0;
1383 static struct dev_pm_ops gfar_pm_ops = {
1384 .suspend = gfar_suspend,
1385 .resume = gfar_resume,
1386 .freeze = gfar_suspend,
1387 .thaw = gfar_resume,
1388 .restore = gfar_restore,
1391 #define GFAR_PM_OPS (&gfar_pm_ops)
1393 #else
1395 #define GFAR_PM_OPS NULL
1397 #endif
1399 /* Reads the controller's registers to determine what interface
1400 * connects it to the PHY.
1402 static phy_interface_t gfar_get_interface(struct net_device *dev)
1404 struct gfar_private *priv = netdev_priv(dev);
1405 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1406 u32 ecntrl;
1408 ecntrl = gfar_read(&regs->ecntrl);
1410 if (ecntrl & ECNTRL_SGMII_MODE)
1411 return PHY_INTERFACE_MODE_SGMII;
1413 if (ecntrl & ECNTRL_TBI_MODE) {
1414 if (ecntrl & ECNTRL_REDUCED_MODE)
1415 return PHY_INTERFACE_MODE_RTBI;
1416 else
1417 return PHY_INTERFACE_MODE_TBI;
1420 if (ecntrl & ECNTRL_REDUCED_MODE) {
1421 if (ecntrl & ECNTRL_REDUCED_MII_MODE)
1422 return PHY_INTERFACE_MODE_RMII;
1423 else {
1424 phy_interface_t interface = priv->interface;
1427 * This isn't autodetected right now, so it must
1428 * be set by the device tree or platform code.
1430 if (interface == PHY_INTERFACE_MODE_RGMII_ID)
1431 return PHY_INTERFACE_MODE_RGMII_ID;
1433 return PHY_INTERFACE_MODE_RGMII;
1437 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
1438 return PHY_INTERFACE_MODE_GMII;
1440 return PHY_INTERFACE_MODE_MII;
1444 /* Initializes driver's PHY state, and attaches to the PHY.
1445 * Returns 0 on success.
1447 static int init_phy(struct net_device *dev)
1449 struct gfar_private *priv = netdev_priv(dev);
1450 uint gigabit_support =
1451 priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
1452 SUPPORTED_1000baseT_Full : 0;
1453 phy_interface_t interface;
1455 priv->oldlink = 0;
1456 priv->oldspeed = 0;
1457 priv->oldduplex = -1;
1459 interface = gfar_get_interface(dev);
1461 priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
1462 interface);
1463 if (!priv->phydev)
1464 priv->phydev = of_phy_connect_fixed_link(dev, &adjust_link,
1465 interface);
1466 if (!priv->phydev) {
1467 dev_err(&dev->dev, "could not attach to PHY\n");
1468 return -ENODEV;
1471 if (interface == PHY_INTERFACE_MODE_SGMII)
1472 gfar_configure_serdes(dev);
1474 /* Remove any features not supported by the controller */
1475 priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
1476 priv->phydev->advertising = priv->phydev->supported;
1478 return 0;
1482 * Initialize TBI PHY interface for communicating with the
1483 * SERDES lynx PHY on the chip. We communicate with this PHY
1484 * through the MDIO bus on each controller, treating it as a
1485 * "normal" PHY at the address found in the TBIPA register. We assume
1486 * that the TBIPA register is valid. Either the MDIO bus code will set
1487 * it to a value that doesn't conflict with other PHYs on the bus, or the
1488 * value doesn't matter, as there are no other PHYs on the bus.
1490 static void gfar_configure_serdes(struct net_device *dev)
1492 struct gfar_private *priv = netdev_priv(dev);
1493 struct phy_device *tbiphy;
1495 if (!priv->tbi_node) {
1496 dev_warn(&dev->dev, "error: SGMII mode requires that the "
1497 "device tree specify a tbi-handle\n");
1498 return;
1501 tbiphy = of_phy_find_device(priv->tbi_node);
1502 if (!tbiphy) {
1503 dev_err(&dev->dev, "error: Could not get TBI device\n");
1504 return;
1508 * If the link is already up, we must already be ok, and don't need to
1509 * configure and reset the TBI<->SerDes link. Maybe U-Boot configured
1510 * everything for us? Resetting it takes the link down and requires
1511 * several seconds for it to come back.
1513 if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS)
1514 return;
1516 /* Single clk mode, mii mode off(for serdes communication) */
1517 phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
1519 phy_write(tbiphy, MII_ADVERTISE,
1520 ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
1521 ADVERTISE_1000XPSE_ASYM);
1523 phy_write(tbiphy, MII_BMCR, BMCR_ANENABLE |
1524 BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000);
1527 static void init_registers(struct net_device *dev)
1529 struct gfar_private *priv = netdev_priv(dev);
1530 struct gfar __iomem *regs = NULL;
1531 int i = 0;
1533 for (i = 0; i < priv->num_grps; i++) {
1534 regs = priv->gfargrp[i].regs;
1535 /* Clear IEVENT */
1536 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
1538 /* Initialize IMASK */
1539 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
1542 regs = priv->gfargrp[0].regs;
1543 /* Init hash registers to zero */
1544 gfar_write(&regs->igaddr0, 0);
1545 gfar_write(&regs->igaddr1, 0);
1546 gfar_write(&regs->igaddr2, 0);
1547 gfar_write(&regs->igaddr3, 0);
1548 gfar_write(&regs->igaddr4, 0);
1549 gfar_write(&regs->igaddr5, 0);
1550 gfar_write(&regs->igaddr6, 0);
1551 gfar_write(&regs->igaddr7, 0);
1553 gfar_write(&regs->gaddr0, 0);
1554 gfar_write(&regs->gaddr1, 0);
1555 gfar_write(&regs->gaddr2, 0);
1556 gfar_write(&regs->gaddr3, 0);
1557 gfar_write(&regs->gaddr4, 0);
1558 gfar_write(&regs->gaddr5, 0);
1559 gfar_write(&regs->gaddr6, 0);
1560 gfar_write(&regs->gaddr7, 0);
1562 /* Zero out the rmon mib registers if it has them */
1563 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
1564 memset_io(&(regs->rmon), 0, sizeof (struct rmon_mib));
1566 /* Mask off the CAM interrupts */
1567 gfar_write(&regs->rmon.cam1, 0xffffffff);
1568 gfar_write(&regs->rmon.cam2, 0xffffffff);
1571 /* Initialize the max receive buffer length */
1572 gfar_write(&regs->mrblr, priv->rx_buffer_size);
1574 /* Initialize the Minimum Frame Length Register */
1575 gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
1578 static int __gfar_is_rx_idle(struct gfar_private *priv)
1580 u32 res;
1583 * Normaly TSEC should not hang on GRS commands, so we should
1584 * actually wait for IEVENT_GRSC flag.
1586 if (likely(!gfar_has_errata(priv, GFAR_ERRATA_A002)))
1587 return 0;
1590 * Read the eTSEC register at offset 0xD1C. If bits 7-14 are
1591 * the same as bits 23-30, the eTSEC Rx is assumed to be idle
1592 * and the Rx can be safely reset.
1594 res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c);
1595 res &= 0x7f807f80;
1596 if ((res & 0xffff) == (res >> 16))
1597 return 1;
1599 return 0;
1602 /* Halt the receive and transmit queues */
1603 static void gfar_halt_nodisable(struct net_device *dev)
1605 struct gfar_private *priv = netdev_priv(dev);
1606 struct gfar __iomem *regs = NULL;
1607 u32 tempval;
1608 int i = 0;
1610 for (i = 0; i < priv->num_grps; i++) {
1611 regs = priv->gfargrp[i].regs;
1612 /* Mask all interrupts */
1613 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
1615 /* Clear all interrupts */
1616 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
1619 regs = priv->gfargrp[0].regs;
1620 /* Stop the DMA, and wait for it to stop */
1621 tempval = gfar_read(&regs->dmactrl);
1622 if ((tempval & (DMACTRL_GRS | DMACTRL_GTS))
1623 != (DMACTRL_GRS | DMACTRL_GTS)) {
1624 int ret;
1626 tempval |= (DMACTRL_GRS | DMACTRL_GTS);
1627 gfar_write(&regs->dmactrl, tempval);
1629 do {
1630 ret = spin_event_timeout(((gfar_read(&regs->ievent) &
1631 (IEVENT_GRSC | IEVENT_GTSC)) ==
1632 (IEVENT_GRSC | IEVENT_GTSC)), 1000000, 0);
1633 if (!ret && !(gfar_read(&regs->ievent) & IEVENT_GRSC))
1634 ret = __gfar_is_rx_idle(priv);
1635 } while (!ret);
1639 /* Halt the receive and transmit queues */
1640 void gfar_halt(struct net_device *dev)
1642 struct gfar_private *priv = netdev_priv(dev);
1643 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1644 u32 tempval;
1646 gfar_halt_nodisable(dev);
1648 /* Disable Rx and Tx */
1649 tempval = gfar_read(&regs->maccfg1);
1650 tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
1651 gfar_write(&regs->maccfg1, tempval);
1654 static void free_grp_irqs(struct gfar_priv_grp *grp)
1656 free_irq(grp->interruptError, grp);
1657 free_irq(grp->interruptTransmit, grp);
1658 free_irq(grp->interruptReceive, grp);
1661 void stop_gfar(struct net_device *dev)
1663 struct gfar_private *priv = netdev_priv(dev);
1664 unsigned long flags;
1665 int i;
1667 phy_stop(priv->phydev);
1670 /* Lock it down */
1671 local_irq_save(flags);
1672 lock_tx_qs(priv);
1673 lock_rx_qs(priv);
1675 gfar_halt(dev);
1677 unlock_rx_qs(priv);
1678 unlock_tx_qs(priv);
1679 local_irq_restore(flags);
1681 /* Free the IRQs */
1682 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1683 for (i = 0; i < priv->num_grps; i++)
1684 free_grp_irqs(&priv->gfargrp[i]);
1685 } else {
1686 for (i = 0; i < priv->num_grps; i++)
1687 free_irq(priv->gfargrp[i].interruptTransmit,
1688 &priv->gfargrp[i]);
1691 free_skb_resources(priv);
1694 static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
1696 struct txbd8 *txbdp;
1697 struct gfar_private *priv = netdev_priv(tx_queue->dev);
1698 int i, j;
1700 txbdp = tx_queue->tx_bd_base;
1702 for (i = 0; i < tx_queue->tx_ring_size; i++) {
1703 if (!tx_queue->tx_skbuff[i])
1704 continue;
1706 dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr,
1707 txbdp->length, DMA_TO_DEVICE);
1708 txbdp->lstatus = 0;
1709 for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
1710 j++) {
1711 txbdp++;
1712 dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr,
1713 txbdp->length, DMA_TO_DEVICE);
1715 txbdp++;
1716 dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
1717 tx_queue->tx_skbuff[i] = NULL;
1719 kfree(tx_queue->tx_skbuff);
1722 static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
1724 struct rxbd8 *rxbdp;
1725 struct gfar_private *priv = netdev_priv(rx_queue->dev);
1726 int i;
1728 rxbdp = rx_queue->rx_bd_base;
1730 for (i = 0; i < rx_queue->rx_ring_size; i++) {
1731 if (rx_queue->rx_skbuff[i]) {
1732 dma_unmap_single(&priv->ofdev->dev,
1733 rxbdp->bufPtr, priv->rx_buffer_size,
1734 DMA_FROM_DEVICE);
1735 dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
1736 rx_queue->rx_skbuff[i] = NULL;
1738 rxbdp->lstatus = 0;
1739 rxbdp->bufPtr = 0;
1740 rxbdp++;
1742 kfree(rx_queue->rx_skbuff);
1745 /* If there are any tx skbs or rx skbs still around, free them.
1746 * Then free tx_skbuff and rx_skbuff */
1747 static void free_skb_resources(struct gfar_private *priv)
1749 struct gfar_priv_tx_q *tx_queue = NULL;
1750 struct gfar_priv_rx_q *rx_queue = NULL;
1751 int i;
1753 /* Go through all the buffer descriptors and free their data buffers */
1754 for (i = 0; i < priv->num_tx_queues; i++) {
1755 tx_queue = priv->tx_queue[i];
1756 if(tx_queue->tx_skbuff)
1757 free_skb_tx_queue(tx_queue);
1760 for (i = 0; i < priv->num_rx_queues; i++) {
1761 rx_queue = priv->rx_queue[i];
1762 if(rx_queue->rx_skbuff)
1763 free_skb_rx_queue(rx_queue);
1766 dma_free_coherent(&priv->ofdev->dev,
1767 sizeof(struct txbd8) * priv->total_tx_ring_size +
1768 sizeof(struct rxbd8) * priv->total_rx_ring_size,
1769 priv->tx_queue[0]->tx_bd_base,
1770 priv->tx_queue[0]->tx_bd_dma_base);
1771 skb_queue_purge(&priv->rx_recycle);
1774 void gfar_start(struct net_device *dev)
1776 struct gfar_private *priv = netdev_priv(dev);
1777 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1778 u32 tempval;
1779 int i = 0;
1781 /* Enable Rx and Tx in MACCFG1 */
1782 tempval = gfar_read(&regs->maccfg1);
1783 tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
1784 gfar_write(&regs->maccfg1, tempval);
1786 /* Initialize DMACTRL to have WWR and WOP */
1787 tempval = gfar_read(&regs->dmactrl);
1788 tempval |= DMACTRL_INIT_SETTINGS;
1789 gfar_write(&regs->dmactrl, tempval);
1791 /* Make sure we aren't stopped */
1792 tempval = gfar_read(&regs->dmactrl);
1793 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
1794 gfar_write(&regs->dmactrl, tempval);
1796 for (i = 0; i < priv->num_grps; i++) {
1797 regs = priv->gfargrp[i].regs;
1798 /* Clear THLT/RHLT, so that the DMA starts polling now */
1799 gfar_write(&regs->tstat, priv->gfargrp[i].tstat);
1800 gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
1801 /* Unmask the interrupts we look for */
1802 gfar_write(&regs->imask, IMASK_DEFAULT);
1805 dev->trans_start = jiffies; /* prevent tx timeout */
1808 void gfar_configure_coalescing(struct gfar_private *priv,
1809 unsigned long tx_mask, unsigned long rx_mask)
1811 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1812 u32 __iomem *baddr;
1813 int i = 0;
1815 /* Backward compatible case ---- even if we enable
1816 * multiple queues, there's only single reg to program
1818 gfar_write(&regs->txic, 0);
1819 if(likely(priv->tx_queue[0]->txcoalescing))
1820 gfar_write(&regs->txic, priv->tx_queue[0]->txic);
1822 gfar_write(&regs->rxic, 0);
1823 if(unlikely(priv->rx_queue[0]->rxcoalescing))
1824 gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
1826 if (priv->mode == MQ_MG_MODE) {
1827 baddr = &regs->txic0;
1828 for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
1829 if (likely(priv->tx_queue[i]->txcoalescing)) {
1830 gfar_write(baddr + i, 0);
1831 gfar_write(baddr + i, priv->tx_queue[i]->txic);
1835 baddr = &regs->rxic0;
1836 for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
1837 if (likely(priv->rx_queue[i]->rxcoalescing)) {
1838 gfar_write(baddr + i, 0);
1839 gfar_write(baddr + i, priv->rx_queue[i]->rxic);
1845 static int register_grp_irqs(struct gfar_priv_grp *grp)
1847 struct gfar_private *priv = grp->priv;
1848 struct net_device *dev = priv->ndev;
1849 int err;
1851 /* If the device has multiple interrupts, register for
1852 * them. Otherwise, only register for the one */
1853 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1854 /* Install our interrupt handlers for Error,
1855 * Transmit, and Receive */
1856 if ((err = request_irq(grp->interruptError, gfar_error, 0,
1857 grp->int_name_er,grp)) < 0) {
1858 if (netif_msg_intr(priv))
1859 printk(KERN_ERR "%s: Can't get IRQ %d\n",
1860 dev->name, grp->interruptError);
1862 goto err_irq_fail;
1865 if ((err = request_irq(grp->interruptTransmit, gfar_transmit,
1866 0, grp->int_name_tx, grp)) < 0) {
1867 if (netif_msg_intr(priv))
1868 printk(KERN_ERR "%s: Can't get IRQ %d\n",
1869 dev->name, grp->interruptTransmit);
1870 goto tx_irq_fail;
1873 if ((err = request_irq(grp->interruptReceive, gfar_receive, 0,
1874 grp->int_name_rx, grp)) < 0) {
1875 if (netif_msg_intr(priv))
1876 printk(KERN_ERR "%s: Can't get IRQ %d\n",
1877 dev->name, grp->interruptReceive);
1878 goto rx_irq_fail;
1880 } else {
1881 if ((err = request_irq(grp->interruptTransmit, gfar_interrupt, 0,
1882 grp->int_name_tx, grp)) < 0) {
1883 if (netif_msg_intr(priv))
1884 printk(KERN_ERR "%s: Can't get IRQ %d\n",
1885 dev->name, grp->interruptTransmit);
1886 goto err_irq_fail;
1890 return 0;
1892 rx_irq_fail:
1893 free_irq(grp->interruptTransmit, grp);
1894 tx_irq_fail:
1895 free_irq(grp->interruptError, grp);
1896 err_irq_fail:
1897 return err;
1901 /* Bring the controller up and running */
1902 int startup_gfar(struct net_device *ndev)
1904 struct gfar_private *priv = netdev_priv(ndev);
1905 struct gfar __iomem *regs = NULL;
1906 int err, i, j;
1908 for (i = 0; i < priv->num_grps; i++) {
1909 regs= priv->gfargrp[i].regs;
1910 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
1913 regs= priv->gfargrp[0].regs;
1914 err = gfar_alloc_skb_resources(ndev);
1915 if (err)
1916 return err;
1918 gfar_init_mac(ndev);
1920 for (i = 0; i < priv->num_grps; i++) {
1921 err = register_grp_irqs(&priv->gfargrp[i]);
1922 if (err) {
1923 for (j = 0; j < i; j++)
1924 free_grp_irqs(&priv->gfargrp[j]);
1925 goto irq_fail;
1929 /* Start the controller */
1930 gfar_start(ndev);
1932 phy_start(priv->phydev);
1934 gfar_configure_coalescing(priv, 0xFF, 0xFF);
1936 return 0;
1938 irq_fail:
1939 free_skb_resources(priv);
1940 return err;
1943 /* Called when something needs to use the ethernet device */
1944 /* Returns 0 for success. */
1945 static int gfar_enet_open(struct net_device *dev)
1947 struct gfar_private *priv = netdev_priv(dev);
1948 int err;
1950 enable_napi(priv);
1952 skb_queue_head_init(&priv->rx_recycle);
1954 /* Initialize a bunch of registers */
1955 init_registers(dev);
1957 gfar_set_mac_address(dev);
1959 err = init_phy(dev);
1961 if (err) {
1962 disable_napi(priv);
1963 return err;
1966 err = startup_gfar(dev);
1967 if (err) {
1968 disable_napi(priv);
1969 return err;
1972 netif_tx_start_all_queues(dev);
1974 device_set_wakeup_enable(&dev->dev, priv->wol_en);
1976 return err;
1979 static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
1981 struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN);
1983 memset(fcb, 0, GMAC_FCB_LEN);
1985 return fcb;
1988 static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb)
1990 u8 flags = 0;
1992 /* If we're here, it's a IP packet with a TCP or UDP
1993 * payload. We set it to checksum, using a pseudo-header
1994 * we provide
1996 flags = TXFCB_DEFAULT;
1998 /* Tell the controller what the protocol is */
1999 /* And provide the already calculated phcs */
2000 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
2001 flags |= TXFCB_UDP;
2002 fcb->phcs = udp_hdr(skb)->check;
2003 } else
2004 fcb->phcs = tcp_hdr(skb)->check;
2006 /* l3os is the distance between the start of the
2007 * frame (skb->data) and the start of the IP hdr.
2008 * l4os is the distance between the start of the
2009 * l3 hdr and the l4 hdr */
2010 fcb->l3os = (u16)(skb_network_offset(skb) - GMAC_FCB_LEN);
2011 fcb->l4os = skb_network_header_len(skb);
2013 fcb->flags = flags;
2016 void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
2018 fcb->flags |= TXFCB_VLN;
2019 fcb->vlctl = vlan_tx_tag_get(skb);
2022 static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
2023 struct txbd8 *base, int ring_size)
2025 struct txbd8 *new_bd = bdp + stride;
2027 return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
2030 static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
2031 int ring_size)
2033 return skip_txbd(bdp, 1, base, ring_size);
2036 /* This is called by the kernel when a frame is ready for transmission. */
2037 /* It is pointed to by the dev->hard_start_xmit function pointer */
2038 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2040 struct gfar_private *priv = netdev_priv(dev);
2041 struct gfar_priv_tx_q *tx_queue = NULL;
2042 struct netdev_queue *txq;
2043 struct gfar __iomem *regs = NULL;
2044 struct txfcb *fcb = NULL;
2045 struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
2046 u32 lstatus;
2047 int i, rq = 0, do_tstamp = 0;
2048 u32 bufaddr;
2049 unsigned long flags;
2050 unsigned int nr_frags, nr_txbds, length;
2053 * TOE=1 frames larger than 2500 bytes may see excess delays
2054 * before start of transmission.
2056 if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_76) &&
2057 skb->ip_summed == CHECKSUM_PARTIAL &&
2058 skb->len > 2500)) {
2059 int ret;
2061 ret = skb_checksum_help(skb);
2062 if (ret)
2063 return ret;
2066 rq = skb->queue_mapping;
2067 tx_queue = priv->tx_queue[rq];
2068 txq = netdev_get_tx_queue(dev, rq);
2069 base = tx_queue->tx_bd_base;
2070 regs = tx_queue->grp->regs;
2072 /* check if time stamp should be generated */
2073 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
2074 priv->hwts_tx_en))
2075 do_tstamp = 1;
2077 /* make space for additional header when fcb is needed */
2078 if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
2079 vlan_tx_tag_present(skb) ||
2080 unlikely(do_tstamp)) &&
2081 (skb_headroom(skb) < GMAC_FCB_LEN)) {
2082 struct sk_buff *skb_new;
2084 skb_new = skb_realloc_headroom(skb, GMAC_FCB_LEN);
2085 if (!skb_new) {
2086 dev->stats.tx_errors++;
2087 kfree_skb(skb);
2088 return NETDEV_TX_OK;
2090 kfree_skb(skb);
2091 skb = skb_new;
2094 /* total number of fragments in the SKB */
2095 nr_frags = skb_shinfo(skb)->nr_frags;
2097 /* calculate the required number of TxBDs for this skb */
2098 if (unlikely(do_tstamp))
2099 nr_txbds = nr_frags + 2;
2100 else
2101 nr_txbds = nr_frags + 1;
2103 /* check if there is space to queue this packet */
2104 if (nr_txbds > tx_queue->num_txbdfree) {
2105 /* no space, stop the queue */
2106 netif_tx_stop_queue(txq);
2107 dev->stats.tx_fifo_errors++;
2108 return NETDEV_TX_BUSY;
2111 /* Update transmit stats */
2112 tx_queue->stats.tx_bytes += skb->len;
2113 tx_queue->stats.tx_packets++;
2115 txbdp = txbdp_start = tx_queue->cur_tx;
2116 lstatus = txbdp->lstatus;
2118 /* Time stamp insertion requires one additional TxBD */
2119 if (unlikely(do_tstamp))
2120 txbdp_tstamp = txbdp = next_txbd(txbdp, base,
2121 tx_queue->tx_ring_size);
2123 if (nr_frags == 0) {
2124 if (unlikely(do_tstamp))
2125 txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST |
2126 TXBD_INTERRUPT);
2127 else
2128 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
2129 } else {
2130 /* Place the fragment addresses and lengths into the TxBDs */
2131 for (i = 0; i < nr_frags; i++) {
2132 /* Point at the next BD, wrapping as needed */
2133 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2135 length = skb_shinfo(skb)->frags[i].size;
2137 lstatus = txbdp->lstatus | length |
2138 BD_LFLAG(TXBD_READY);
2140 /* Handle the last BD specially */
2141 if (i == nr_frags - 1)
2142 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
2144 bufaddr = dma_map_page(&priv->ofdev->dev,
2145 skb_shinfo(skb)->frags[i].page,
2146 skb_shinfo(skb)->frags[i].page_offset,
2147 length,
2148 DMA_TO_DEVICE);
2150 /* set the TxBD length and buffer pointer */
2151 txbdp->bufPtr = bufaddr;
2152 txbdp->lstatus = lstatus;
2155 lstatus = txbdp_start->lstatus;
2158 /* Set up checksumming */
2159 if (CHECKSUM_PARTIAL == skb->ip_summed) {
2160 fcb = gfar_add_fcb(skb);
2161 /* as specified by errata */
2162 if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_12)
2163 && ((unsigned long)fcb % 0x20) > 0x18)) {
2164 __skb_pull(skb, GMAC_FCB_LEN);
2165 skb_checksum_help(skb);
2166 } else {
2167 lstatus |= BD_LFLAG(TXBD_TOE);
2168 gfar_tx_checksum(skb, fcb);
2172 if (vlan_tx_tag_present(skb)) {
2173 if (unlikely(NULL == fcb)) {
2174 fcb = gfar_add_fcb(skb);
2175 lstatus |= BD_LFLAG(TXBD_TOE);
2178 gfar_tx_vlan(skb, fcb);
2181 /* Setup tx hardware time stamping if requested */
2182 if (unlikely(do_tstamp)) {
2183 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2184 if (fcb == NULL)
2185 fcb = gfar_add_fcb(skb);
2186 fcb->ptp = 1;
2187 lstatus |= BD_LFLAG(TXBD_TOE);
2190 txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
2191 skb_headlen(skb), DMA_TO_DEVICE);
2194 * If time stamping is requested one additional TxBD must be set up. The
2195 * first TxBD points to the FCB and must have a data length of
2196 * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
2197 * the full frame length.
2199 if (unlikely(do_tstamp)) {
2200 txbdp_tstamp->bufPtr = txbdp_start->bufPtr + GMAC_FCB_LEN;
2201 txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) |
2202 (skb_headlen(skb) - GMAC_FCB_LEN);
2203 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
2204 } else {
2205 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
2209 * We can work in parallel with gfar_clean_tx_ring(), except
2210 * when modifying num_txbdfree. Note that we didn't grab the lock
2211 * when we were reading the num_txbdfree and checking for available
2212 * space, that's because outside of this function it can only grow,
2213 * and once we've got needed space, it cannot suddenly disappear.
2215 * The lock also protects us from gfar_error(), which can modify
2216 * regs->tstat and thus retrigger the transfers, which is why we
2217 * also must grab the lock before setting ready bit for the first
2218 * to be transmitted BD.
2220 spin_lock_irqsave(&tx_queue->txlock, flags);
2223 * The powerpc-specific eieio() is used, as wmb() has too strong
2224 * semantics (it requires synchronization between cacheable and
2225 * uncacheable mappings, which eieio doesn't provide and which we
2226 * don't need), thus requiring a more expensive sync instruction. At
2227 * some point, the set of architecture-independent barrier functions
2228 * should be expanded to include weaker barriers.
2230 eieio();
2232 txbdp_start->lstatus = lstatus;
2234 eieio(); /* force lstatus write before tx_skbuff */
2236 tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
2238 /* Update the current skb pointer to the next entry we will use
2239 * (wrapping if necessary) */
2240 tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
2241 TX_RING_MOD_MASK(tx_queue->tx_ring_size);
2243 tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2245 /* reduce TxBD free count */
2246 tx_queue->num_txbdfree -= (nr_txbds);
2248 /* If the next BD still needs to be cleaned up, then the bds
2249 are full. We need to tell the kernel to stop sending us stuff. */
2250 if (!tx_queue->num_txbdfree) {
2251 netif_tx_stop_queue(txq);
2253 dev->stats.tx_fifo_errors++;
2256 /* Tell the DMA to go go go */
2257 gfar_write(&regs->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
2259 /* Unlock priv */
2260 spin_unlock_irqrestore(&tx_queue->txlock, flags);
2262 return NETDEV_TX_OK;
2265 /* Stops the kernel queue, and halts the controller */
2266 static int gfar_close(struct net_device *dev)
2268 struct gfar_private *priv = netdev_priv(dev);
2270 disable_napi(priv);
2272 cancel_work_sync(&priv->reset_task);
2273 stop_gfar(dev);
2275 /* Disconnect from the PHY */
2276 phy_disconnect(priv->phydev);
2277 priv->phydev = NULL;
2279 netif_tx_stop_all_queues(dev);
2281 return 0;
2284 /* Changes the mac address if the controller is not running. */
2285 static int gfar_set_mac_address(struct net_device *dev)
2287 gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
2289 return 0;
2293 /* Enables and disables VLAN insertion/extraction */
2294 static void gfar_vlan_rx_register(struct net_device *dev,
2295 struct vlan_group *grp)
2297 struct gfar_private *priv = netdev_priv(dev);
2298 struct gfar __iomem *regs = NULL;
2299 unsigned long flags;
2300 u32 tempval;
2302 regs = priv->gfargrp[0].regs;
2303 local_irq_save(flags);
2304 lock_rx_qs(priv);
2306 priv->vlgrp = grp;
2308 if (grp) {
2309 /* Enable VLAN tag insertion */
2310 tempval = gfar_read(&regs->tctrl);
2311 tempval |= TCTRL_VLINS;
2313 gfar_write(&regs->tctrl, tempval);
2315 /* Enable VLAN tag extraction */
2316 tempval = gfar_read(&regs->rctrl);
2317 tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT);
2318 gfar_write(&regs->rctrl, tempval);
2319 } else {
2320 /* Disable VLAN tag insertion */
2321 tempval = gfar_read(&regs->tctrl);
2322 tempval &= ~TCTRL_VLINS;
2323 gfar_write(&regs->tctrl, tempval);
2325 /* Disable VLAN tag extraction */
2326 tempval = gfar_read(&regs->rctrl);
2327 tempval &= ~RCTRL_VLEX;
2328 /* If parse is no longer required, then disable parser */
2329 if (tempval & RCTRL_REQ_PARSER)
2330 tempval |= RCTRL_PRSDEP_INIT;
2331 else
2332 tempval &= ~RCTRL_PRSDEP_INIT;
2333 gfar_write(&regs->rctrl, tempval);
2336 gfar_change_mtu(dev, dev->mtu);
2338 unlock_rx_qs(priv);
2339 local_irq_restore(flags);
2342 static int gfar_change_mtu(struct net_device *dev, int new_mtu)
2344 int tempsize, tempval;
2345 struct gfar_private *priv = netdev_priv(dev);
2346 struct gfar __iomem *regs = priv->gfargrp[0].regs;
2347 int oldsize = priv->rx_buffer_size;
2348 int frame_size = new_mtu + ETH_HLEN;
2350 if (priv->vlgrp)
2351 frame_size += VLAN_HLEN;
2353 if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
2354 if (netif_msg_drv(priv))
2355 printk(KERN_ERR "%s: Invalid MTU setting\n",
2356 dev->name);
2357 return -EINVAL;
2360 if (gfar_uses_fcb(priv))
2361 frame_size += GMAC_FCB_LEN;
2363 frame_size += priv->padding;
2365 tempsize =
2366 (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
2367 INCREMENTAL_BUFFER_SIZE;
2369 /* Only stop and start the controller if it isn't already
2370 * stopped, and we changed something */
2371 if ((oldsize != tempsize) && (dev->flags & IFF_UP))
2372 stop_gfar(dev);
2374 priv->rx_buffer_size = tempsize;
2376 dev->mtu = new_mtu;
2378 gfar_write(&regs->mrblr, priv->rx_buffer_size);
2379 gfar_write(&regs->maxfrm, priv->rx_buffer_size);
2381 /* If the mtu is larger than the max size for standard
2382 * ethernet frames (ie, a jumbo frame), then set maccfg2
2383 * to allow huge frames, and to check the length */
2384 tempval = gfar_read(&regs->maccfg2);
2386 if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE ||
2387 gfar_has_errata(priv, GFAR_ERRATA_74))
2388 tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
2389 else
2390 tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
2392 gfar_write(&regs->maccfg2, tempval);
2394 if ((oldsize != tempsize) && (dev->flags & IFF_UP))
2395 startup_gfar(dev);
2397 return 0;
2400 /* gfar_reset_task gets scheduled when a packet has not been
2401 * transmitted after a set amount of time.
2402 * For now, assume that clearing out all the structures, and
2403 * starting over will fix the problem.
2405 static void gfar_reset_task(struct work_struct *work)
2407 struct gfar_private *priv = container_of(work, struct gfar_private,
2408 reset_task);
2409 struct net_device *dev = priv->ndev;
2411 if (dev->flags & IFF_UP) {
2412 netif_tx_stop_all_queues(dev);
2413 stop_gfar(dev);
2414 startup_gfar(dev);
2415 netif_tx_start_all_queues(dev);
2418 netif_tx_schedule_all(dev);
2421 static void gfar_timeout(struct net_device *dev)
2423 struct gfar_private *priv = netdev_priv(dev);
2425 dev->stats.tx_errors++;
2426 schedule_work(&priv->reset_task);
2429 static void gfar_align_skb(struct sk_buff *skb)
2431 /* We need the data buffer to be aligned properly. We will reserve
2432 * as many bytes as needed to align the data properly
2434 skb_reserve(skb, RXBUF_ALIGNMENT -
2435 (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)));
2438 /* Interrupt Handler for Transmit complete */
2439 static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2441 struct net_device *dev = tx_queue->dev;
2442 struct gfar_private *priv = netdev_priv(dev);
2443 struct gfar_priv_rx_q *rx_queue = NULL;
2444 struct txbd8 *bdp, *next = NULL;
2445 struct txbd8 *lbdp = NULL;
2446 struct txbd8 *base = tx_queue->tx_bd_base;
2447 struct sk_buff *skb;
2448 int skb_dirtytx;
2449 int tx_ring_size = tx_queue->tx_ring_size;
2450 int frags = 0, nr_txbds = 0;
2451 int i;
2452 int howmany = 0;
2453 u32 lstatus;
2454 size_t buflen;
2456 rx_queue = priv->rx_queue[tx_queue->qindex];
2457 bdp = tx_queue->dirty_tx;
2458 skb_dirtytx = tx_queue->skb_dirtytx;
2460 while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
2461 unsigned long flags;
2463 frags = skb_shinfo(skb)->nr_frags;
2466 * When time stamping, one additional TxBD must be freed.
2467 * Also, we need to dma_unmap_single() the TxPAL.
2469 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
2470 nr_txbds = frags + 2;
2471 else
2472 nr_txbds = frags + 1;
2474 lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
2476 lstatus = lbdp->lstatus;
2478 /* Only clean completed frames */
2479 if ((lstatus & BD_LFLAG(TXBD_READY)) &&
2480 (lstatus & BD_LENGTH_MASK))
2481 break;
2483 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
2484 next = next_txbd(bdp, base, tx_ring_size);
2485 buflen = next->length + GMAC_FCB_LEN;
2486 } else
2487 buflen = bdp->length;
2489 dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
2490 buflen, DMA_TO_DEVICE);
2492 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
2493 struct skb_shared_hwtstamps shhwtstamps;
2494 u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
2495 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
2496 shhwtstamps.hwtstamp = ns_to_ktime(*ns);
2497 skb_tstamp_tx(skb, &shhwtstamps);
2498 bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2499 bdp = next;
2502 bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2503 bdp = next_txbd(bdp, base, tx_ring_size);
2505 for (i = 0; i < frags; i++) {
2506 dma_unmap_page(&priv->ofdev->dev,
2507 bdp->bufPtr,
2508 bdp->length,
2509 DMA_TO_DEVICE);
2510 bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2511 bdp = next_txbd(bdp, base, tx_ring_size);
2515 * If there's room in the queue (limit it to rx_buffer_size)
2516 * we add this skb back into the pool, if it's the right size
2518 if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size &&
2519 skb_recycle_check(skb, priv->rx_buffer_size +
2520 RXBUF_ALIGNMENT)) {
2521 gfar_align_skb(skb);
2522 skb_queue_head(&priv->rx_recycle, skb);
2523 } else
2524 dev_kfree_skb_any(skb);
2526 tx_queue->tx_skbuff[skb_dirtytx] = NULL;
2528 skb_dirtytx = (skb_dirtytx + 1) &
2529 TX_RING_MOD_MASK(tx_ring_size);
2531 howmany++;
2532 spin_lock_irqsave(&tx_queue->txlock, flags);
2533 tx_queue->num_txbdfree += nr_txbds;
2534 spin_unlock_irqrestore(&tx_queue->txlock, flags);
2537 /* If we freed a buffer, we can restart transmission, if necessary */
2538 if (__netif_subqueue_stopped(dev, tx_queue->qindex) && tx_queue->num_txbdfree)
2539 netif_wake_subqueue(dev, tx_queue->qindex);
2541 /* Update dirty indicators */
2542 tx_queue->skb_dirtytx = skb_dirtytx;
2543 tx_queue->dirty_tx = bdp;
2545 return howmany;
2548 static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp)
2550 unsigned long flags;
2552 spin_lock_irqsave(&gfargrp->grplock, flags);
2553 if (napi_schedule_prep(&gfargrp->napi)) {
2554 gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED);
2555 __napi_schedule(&gfargrp->napi);
2556 } else {
2558 * Clear IEVENT, so interrupts aren't called again
2559 * because of the packets that have already arrived.
2561 gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK);
2563 spin_unlock_irqrestore(&gfargrp->grplock, flags);
2567 /* Interrupt Handler for Transmit complete */
2568 static irqreturn_t gfar_transmit(int irq, void *grp_id)
2570 gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
2571 return IRQ_HANDLED;
2574 static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
2575 struct sk_buff *skb)
2577 struct net_device *dev = rx_queue->dev;
2578 struct gfar_private *priv = netdev_priv(dev);
2579 dma_addr_t buf;
2581 buf = dma_map_single(&priv->ofdev->dev, skb->data,
2582 priv->rx_buffer_size, DMA_FROM_DEVICE);
2583 gfar_init_rxbdp(rx_queue, bdp, buf);
2586 static struct sk_buff * gfar_alloc_skb(struct net_device *dev)
2588 struct gfar_private *priv = netdev_priv(dev);
2589 struct sk_buff *skb = NULL;
2591 skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT);
2592 if (!skb)
2593 return NULL;
2595 gfar_align_skb(skb);
2597 return skb;
2600 struct sk_buff * gfar_new_skb(struct net_device *dev)
2602 struct gfar_private *priv = netdev_priv(dev);
2603 struct sk_buff *skb = NULL;
2605 skb = skb_dequeue(&priv->rx_recycle);
2606 if (!skb)
2607 skb = gfar_alloc_skb(dev);
2609 return skb;
2612 static inline void count_errors(unsigned short status, struct net_device *dev)
2614 struct gfar_private *priv = netdev_priv(dev);
2615 struct net_device_stats *stats = &dev->stats;
2616 struct gfar_extra_stats *estats = &priv->extra_stats;
2618 /* If the packet was truncated, none of the other errors
2619 * matter */
2620 if (status & RXBD_TRUNCATED) {
2621 stats->rx_length_errors++;
2623 estats->rx_trunc++;
2625 return;
2627 /* Count the errors, if there were any */
2628 if (status & (RXBD_LARGE | RXBD_SHORT)) {
2629 stats->rx_length_errors++;
2631 if (status & RXBD_LARGE)
2632 estats->rx_large++;
2633 else
2634 estats->rx_short++;
2636 if (status & RXBD_NONOCTET) {
2637 stats->rx_frame_errors++;
2638 estats->rx_nonoctet++;
2640 if (status & RXBD_CRCERR) {
2641 estats->rx_crcerr++;
2642 stats->rx_crc_errors++;
2644 if (status & RXBD_OVERRUN) {
2645 estats->rx_overrun++;
2646 stats->rx_crc_errors++;
2650 irqreturn_t gfar_receive(int irq, void *grp_id)
2652 gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
2653 return IRQ_HANDLED;
2656 static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
2658 /* If valid headers were found, and valid sums
2659 * were verified, then we tell the kernel that no
2660 * checksumming is necessary. Otherwise, it is */
2661 if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU))
2662 skb->ip_summed = CHECKSUM_UNNECESSARY;
2663 else
2664 skb_checksum_none_assert(skb);
2668 /* gfar_process_frame() -- handle one incoming packet if skb
2669 * isn't NULL. */
2670 static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
2671 int amount_pull)
2673 struct gfar_private *priv = netdev_priv(dev);
2674 struct rxfcb *fcb = NULL;
2676 int ret;
2678 /* fcb is at the beginning if exists */
2679 fcb = (struct rxfcb *)skb->data;
2681 /* Remove the FCB from the skb */
2682 /* Remove the padded bytes, if there are any */
2683 if (amount_pull) {
2684 skb_record_rx_queue(skb, fcb->rq);
2685 skb_pull(skb, amount_pull);
2688 /* Get receive timestamp from the skb */
2689 if (priv->hwts_rx_en) {
2690 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
2691 u64 *ns = (u64 *) skb->data;
2692 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
2693 shhwtstamps->hwtstamp = ns_to_ktime(*ns);
2696 if (priv->padding)
2697 skb_pull(skb, priv->padding);
2699 if (dev->features & NETIF_F_RXCSUM)
2700 gfar_rx_checksum(skb, fcb);
2702 /* Tell the skb what kind of packet this is */
2703 skb->protocol = eth_type_trans(skb, dev);
2705 /* Send the packet up the stack */
2706 if (unlikely(priv->vlgrp && (fcb->flags & RXFCB_VLN)))
2707 ret = vlan_hwaccel_receive_skb(skb, priv->vlgrp, fcb->vlctl);
2708 else
2709 ret = netif_receive_skb(skb);
2711 if (NET_RX_DROP == ret)
2712 priv->extra_stats.kernel_dropped++;
2714 return 0;
2717 /* gfar_clean_rx_ring() -- Processes each frame in the rx ring
2718 * until the budget/quota has been reached. Returns the number
2719 * of frames handled
2721 int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2723 struct net_device *dev = rx_queue->dev;
2724 struct rxbd8 *bdp, *base;
2725 struct sk_buff *skb;
2726 int pkt_len;
2727 int amount_pull;
2728 int howmany = 0;
2729 struct gfar_private *priv = netdev_priv(dev);
2731 /* Get the first full descriptor */
2732 bdp = rx_queue->cur_rx;
2733 base = rx_queue->rx_bd_base;
2735 amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0);
2737 while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
2738 struct sk_buff *newskb;
2739 rmb();
2741 /* Add another skb for the future */
2742 newskb = gfar_new_skb(dev);
2744 skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
2746 dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
2747 priv->rx_buffer_size, DMA_FROM_DEVICE);
2749 if (unlikely(!(bdp->status & RXBD_ERR) &&
2750 bdp->length > priv->rx_buffer_size))
2751 bdp->status = RXBD_LARGE;
2753 /* We drop the frame if we failed to allocate a new buffer */
2754 if (unlikely(!newskb || !(bdp->status & RXBD_LAST) ||
2755 bdp->status & RXBD_ERR)) {
2756 count_errors(bdp->status, dev);
2758 if (unlikely(!newskb))
2759 newskb = skb;
2760 else if (skb)
2761 skb_queue_head(&priv->rx_recycle, skb);
2762 } else {
2763 /* Increment the number of packets */
2764 rx_queue->stats.rx_packets++;
2765 howmany++;
2767 if (likely(skb)) {
2768 pkt_len = bdp->length - ETH_FCS_LEN;
2769 /* Remove the FCS from the packet length */
2770 skb_put(skb, pkt_len);
2771 rx_queue->stats.rx_bytes += pkt_len;
2772 skb_record_rx_queue(skb, rx_queue->qindex);
2773 gfar_process_frame(dev, skb, amount_pull);
2775 } else {
2776 if (netif_msg_rx_err(priv))
2777 printk(KERN_WARNING
2778 "%s: Missing skb!\n", dev->name);
2779 rx_queue->stats.rx_dropped++;
2780 priv->extra_stats.rx_skbmissing++;
2785 rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
2787 /* Setup the new bdp */
2788 gfar_new_rxbdp(rx_queue, bdp, newskb);
2790 /* Update to the next pointer */
2791 bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
2793 /* update to point at the next skb */
2794 rx_queue->skb_currx =
2795 (rx_queue->skb_currx + 1) &
2796 RX_RING_MOD_MASK(rx_queue->rx_ring_size);
2799 /* Update the current rxbd pointer to be the next one */
2800 rx_queue->cur_rx = bdp;
2802 return howmany;
2805 static int gfar_poll(struct napi_struct *napi, int budget)
2807 struct gfar_priv_grp *gfargrp = container_of(napi,
2808 struct gfar_priv_grp, napi);
2809 struct gfar_private *priv = gfargrp->priv;
2810 struct gfar __iomem *regs = gfargrp->regs;
2811 struct gfar_priv_tx_q *tx_queue = NULL;
2812 struct gfar_priv_rx_q *rx_queue = NULL;
2813 int rx_cleaned = 0, budget_per_queue = 0, rx_cleaned_per_queue = 0;
2814 int tx_cleaned = 0, i, left_over_budget = budget;
2815 unsigned long serviced_queues = 0;
2816 int num_queues = 0;
2818 num_queues = gfargrp->num_rx_queues;
2819 budget_per_queue = budget/num_queues;
2821 /* Clear IEVENT, so interrupts aren't called again
2822 * because of the packets that have already arrived */
2823 gfar_write(&regs->ievent, IEVENT_RTX_MASK);
2825 while (num_queues && left_over_budget) {
2827 budget_per_queue = left_over_budget/num_queues;
2828 left_over_budget = 0;
2830 for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
2831 if (test_bit(i, &serviced_queues))
2832 continue;
2833 rx_queue = priv->rx_queue[i];
2834 tx_queue = priv->tx_queue[rx_queue->qindex];
2836 tx_cleaned += gfar_clean_tx_ring(tx_queue);
2837 rx_cleaned_per_queue = gfar_clean_rx_ring(rx_queue,
2838 budget_per_queue);
2839 rx_cleaned += rx_cleaned_per_queue;
2840 if(rx_cleaned_per_queue < budget_per_queue) {
2841 left_over_budget = left_over_budget +
2842 (budget_per_queue - rx_cleaned_per_queue);
2843 set_bit(i, &serviced_queues);
2844 num_queues--;
2849 if (tx_cleaned)
2850 return budget;
2852 if (rx_cleaned < budget) {
2853 napi_complete(napi);
2855 /* Clear the halt bit in RSTAT */
2856 gfar_write(&regs->rstat, gfargrp->rstat);
2858 gfar_write(&regs->imask, IMASK_DEFAULT);
2860 /* If we are coalescing interrupts, update the timer */
2861 /* Otherwise, clear it */
2862 gfar_configure_coalescing(priv,
2863 gfargrp->rx_bit_map, gfargrp->tx_bit_map);
2866 return rx_cleaned;
2869 #ifdef CONFIG_NET_POLL_CONTROLLER
2871 * Polling 'interrupt' - used by things like netconsole to send skbs
2872 * without having to re-enable interrupts. It's not called while
2873 * the interrupt routine is executing.
2875 static void gfar_netpoll(struct net_device *dev)
2877 struct gfar_private *priv = netdev_priv(dev);
2878 int i = 0;
2880 /* If the device has multiple interrupts, run tx/rx */
2881 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2882 for (i = 0; i < priv->num_grps; i++) {
2883 disable_irq(priv->gfargrp[i].interruptTransmit);
2884 disable_irq(priv->gfargrp[i].interruptReceive);
2885 disable_irq(priv->gfargrp[i].interruptError);
2886 gfar_interrupt(priv->gfargrp[i].interruptTransmit,
2887 &priv->gfargrp[i]);
2888 enable_irq(priv->gfargrp[i].interruptError);
2889 enable_irq(priv->gfargrp[i].interruptReceive);
2890 enable_irq(priv->gfargrp[i].interruptTransmit);
2892 } else {
2893 for (i = 0; i < priv->num_grps; i++) {
2894 disable_irq(priv->gfargrp[i].interruptTransmit);
2895 gfar_interrupt(priv->gfargrp[i].interruptTransmit,
2896 &priv->gfargrp[i]);
2897 enable_irq(priv->gfargrp[i].interruptTransmit);
2901 #endif
2903 /* The interrupt handler for devices with one interrupt */
2904 static irqreturn_t gfar_interrupt(int irq, void *grp_id)
2906 struct gfar_priv_grp *gfargrp = grp_id;
2908 /* Save ievent for future reference */
2909 u32 events = gfar_read(&gfargrp->regs->ievent);
2911 /* Check for reception */
2912 if (events & IEVENT_RX_MASK)
2913 gfar_receive(irq, grp_id);
2915 /* Check for transmit completion */
2916 if (events & IEVENT_TX_MASK)
2917 gfar_transmit(irq, grp_id);
2919 /* Check for errors */
2920 if (events & IEVENT_ERR_MASK)
2921 gfar_error(irq, grp_id);
2923 return IRQ_HANDLED;
2926 /* Called every time the controller might need to be made
2927 * aware of new link state. The PHY code conveys this
2928 * information through variables in the phydev structure, and this
2929 * function converts those variables into the appropriate
2930 * register values, and can bring down the device if needed.
2932 static void adjust_link(struct net_device *dev)
2934 struct gfar_private *priv = netdev_priv(dev);
2935 struct gfar __iomem *regs = priv->gfargrp[0].regs;
2936 unsigned long flags;
2937 struct phy_device *phydev = priv->phydev;
2938 int new_state = 0;
2940 local_irq_save(flags);
2941 lock_tx_qs(priv);
2943 if (phydev->link) {
2944 u32 tempval = gfar_read(&regs->maccfg2);
2945 u32 ecntrl = gfar_read(&regs->ecntrl);
2947 /* Now we make sure that we can be in full duplex mode.
2948 * If not, we operate in half-duplex mode. */
2949 if (phydev->duplex != priv->oldduplex) {
2950 new_state = 1;
2951 if (!(phydev->duplex))
2952 tempval &= ~(MACCFG2_FULL_DUPLEX);
2953 else
2954 tempval |= MACCFG2_FULL_DUPLEX;
2956 priv->oldduplex = phydev->duplex;
2959 if (phydev->speed != priv->oldspeed) {
2960 new_state = 1;
2961 switch (phydev->speed) {
2962 case 1000:
2963 tempval =
2964 ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
2966 ecntrl &= ~(ECNTRL_R100);
2967 break;
2968 case 100:
2969 case 10:
2970 tempval =
2971 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
2973 /* Reduced mode distinguishes
2974 * between 10 and 100 */
2975 if (phydev->speed == SPEED_100)
2976 ecntrl |= ECNTRL_R100;
2977 else
2978 ecntrl &= ~(ECNTRL_R100);
2979 break;
2980 default:
2981 if (netif_msg_link(priv))
2982 printk(KERN_WARNING
2983 "%s: Ack! Speed (%d) is not 10/100/1000!\n",
2984 dev->name, phydev->speed);
2985 break;
2988 priv->oldspeed = phydev->speed;
2991 gfar_write(&regs->maccfg2, tempval);
2992 gfar_write(&regs->ecntrl, ecntrl);
2994 if (!priv->oldlink) {
2995 new_state = 1;
2996 priv->oldlink = 1;
2998 } else if (priv->oldlink) {
2999 new_state = 1;
3000 priv->oldlink = 0;
3001 priv->oldspeed = 0;
3002 priv->oldduplex = -1;
3005 if (new_state && netif_msg_link(priv))
3006 phy_print_status(phydev);
3007 unlock_tx_qs(priv);
3008 local_irq_restore(flags);
3011 /* Update the hash table based on the current list of multicast
3012 * addresses we subscribe to. Also, change the promiscuity of
3013 * the device based on the flags (this function is called
3014 * whenever dev->flags is changed */
3015 static void gfar_set_multi(struct net_device *dev)
3017 struct netdev_hw_addr *ha;
3018 struct gfar_private *priv = netdev_priv(dev);
3019 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3020 u32 tempval;
3022 if (dev->flags & IFF_PROMISC) {
3023 /* Set RCTRL to PROM */
3024 tempval = gfar_read(&regs->rctrl);
3025 tempval |= RCTRL_PROM;
3026 gfar_write(&regs->rctrl, tempval);
3027 } else {
3028 /* Set RCTRL to not PROM */
3029 tempval = gfar_read(&regs->rctrl);
3030 tempval &= ~(RCTRL_PROM);
3031 gfar_write(&regs->rctrl, tempval);
3034 if (dev->flags & IFF_ALLMULTI) {
3035 /* Set the hash to rx all multicast frames */
3036 gfar_write(&regs->igaddr0, 0xffffffff);
3037 gfar_write(&regs->igaddr1, 0xffffffff);
3038 gfar_write(&regs->igaddr2, 0xffffffff);
3039 gfar_write(&regs->igaddr3, 0xffffffff);
3040 gfar_write(&regs->igaddr4, 0xffffffff);
3041 gfar_write(&regs->igaddr5, 0xffffffff);
3042 gfar_write(&regs->igaddr6, 0xffffffff);
3043 gfar_write(&regs->igaddr7, 0xffffffff);
3044 gfar_write(&regs->gaddr0, 0xffffffff);
3045 gfar_write(&regs->gaddr1, 0xffffffff);
3046 gfar_write(&regs->gaddr2, 0xffffffff);
3047 gfar_write(&regs->gaddr3, 0xffffffff);
3048 gfar_write(&regs->gaddr4, 0xffffffff);
3049 gfar_write(&regs->gaddr5, 0xffffffff);
3050 gfar_write(&regs->gaddr6, 0xffffffff);
3051 gfar_write(&regs->gaddr7, 0xffffffff);
3052 } else {
3053 int em_num;
3054 int idx;
3056 /* zero out the hash */
3057 gfar_write(&regs->igaddr0, 0x0);
3058 gfar_write(&regs->igaddr1, 0x0);
3059 gfar_write(&regs->igaddr2, 0x0);
3060 gfar_write(&regs->igaddr3, 0x0);
3061 gfar_write(&regs->igaddr4, 0x0);
3062 gfar_write(&regs->igaddr5, 0x0);
3063 gfar_write(&regs->igaddr6, 0x0);
3064 gfar_write(&regs->igaddr7, 0x0);
3065 gfar_write(&regs->gaddr0, 0x0);
3066 gfar_write(&regs->gaddr1, 0x0);
3067 gfar_write(&regs->gaddr2, 0x0);
3068 gfar_write(&regs->gaddr3, 0x0);
3069 gfar_write(&regs->gaddr4, 0x0);
3070 gfar_write(&regs->gaddr5, 0x0);
3071 gfar_write(&regs->gaddr6, 0x0);
3072 gfar_write(&regs->gaddr7, 0x0);
3074 /* If we have extended hash tables, we need to
3075 * clear the exact match registers to prepare for
3076 * setting them */
3077 if (priv->extended_hash) {
3078 em_num = GFAR_EM_NUM + 1;
3079 gfar_clear_exact_match(dev);
3080 idx = 1;
3081 } else {
3082 idx = 0;
3083 em_num = 0;
3086 if (netdev_mc_empty(dev))
3087 return;
3089 /* Parse the list, and set the appropriate bits */
3090 netdev_for_each_mc_addr(ha, dev) {
3091 if (idx < em_num) {
3092 gfar_set_mac_for_addr(dev, idx, ha->addr);
3093 idx++;
3094 } else
3095 gfar_set_hash_for_addr(dev, ha->addr);
3101 /* Clears each of the exact match registers to zero, so they
3102 * don't interfere with normal reception */
3103 static void gfar_clear_exact_match(struct net_device *dev)
3105 int idx;
3106 static const u8 zero_arr[MAC_ADDR_LEN] = {0, 0, 0, 0, 0, 0};
3108 for(idx = 1;idx < GFAR_EM_NUM + 1;idx++)
3109 gfar_set_mac_for_addr(dev, idx, zero_arr);
3112 /* Set the appropriate hash bit for the given addr */
3113 /* The algorithm works like so:
3114 * 1) Take the Destination Address (ie the multicast address), and
3115 * do a CRC on it (little endian), and reverse the bits of the
3116 * result.
3117 * 2) Use the 8 most significant bits as a hash into a 256-entry
3118 * table. The table is controlled through 8 32-bit registers:
3119 * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is
3120 * gaddr7. This means that the 3 most significant bits in the
3121 * hash index which gaddr register to use, and the 5 other bits
3122 * indicate which bit (assuming an IBM numbering scheme, which
3123 * for PowerPC (tm) is usually the case) in the register holds
3124 * the entry. */
3125 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
3127 u32 tempval;
3128 struct gfar_private *priv = netdev_priv(dev);
3129 u32 result = ether_crc(MAC_ADDR_LEN, addr);
3130 int width = priv->hash_width;
3131 u8 whichbit = (result >> (32 - width)) & 0x1f;
3132 u8 whichreg = result >> (32 - width + 5);
3133 u32 value = (1 << (31-whichbit));
3135 tempval = gfar_read(priv->hash_regs[whichreg]);
3136 tempval |= value;
3137 gfar_write(priv->hash_regs[whichreg], tempval);
3141 /* There are multiple MAC Address register pairs on some controllers
3142 * This function sets the numth pair to a given address
3144 static void gfar_set_mac_for_addr(struct net_device *dev, int num,
3145 const u8 *addr)
3147 struct gfar_private *priv = netdev_priv(dev);
3148 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3149 int idx;
3150 char tmpbuf[MAC_ADDR_LEN];
3151 u32 tempval;
3152 u32 __iomem *macptr = &regs->macstnaddr1;
3154 macptr += num*2;
3156 /* Now copy it into the mac registers backwards, cuz */
3157 /* little endian is silly */
3158 for (idx = 0; idx < MAC_ADDR_LEN; idx++)
3159 tmpbuf[MAC_ADDR_LEN - 1 - idx] = addr[idx];
3161 gfar_write(macptr, *((u32 *) (tmpbuf)));
3163 tempval = *((u32 *) (tmpbuf + 4));
3165 gfar_write(macptr+1, tempval);
3168 /* GFAR error interrupt handler */
3169 static irqreturn_t gfar_error(int irq, void *grp_id)
3171 struct gfar_priv_grp *gfargrp = grp_id;
3172 struct gfar __iomem *regs = gfargrp->regs;
3173 struct gfar_private *priv= gfargrp->priv;
3174 struct net_device *dev = priv->ndev;
3176 /* Save ievent for future reference */
3177 u32 events = gfar_read(&regs->ievent);
3179 /* Clear IEVENT */
3180 gfar_write(&regs->ievent, events & IEVENT_ERR_MASK);
3182 /* Magic Packet is not an error. */
3183 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
3184 (events & IEVENT_MAG))
3185 events &= ~IEVENT_MAG;
3187 /* Hmm... */
3188 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
3189 printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n",
3190 dev->name, events, gfar_read(&regs->imask));
3192 /* Update the error counters */
3193 if (events & IEVENT_TXE) {
3194 dev->stats.tx_errors++;
3196 if (events & IEVENT_LC)
3197 dev->stats.tx_window_errors++;
3198 if (events & IEVENT_CRL)
3199 dev->stats.tx_aborted_errors++;
3200 if (events & IEVENT_XFUN) {
3201 unsigned long flags;
3203 if (netif_msg_tx_err(priv))
3204 printk(KERN_DEBUG "%s: TX FIFO underrun, "
3205 "packet dropped.\n", dev->name);
3206 dev->stats.tx_dropped++;
3207 priv->extra_stats.tx_underrun++;
3209 local_irq_save(flags);
3210 lock_tx_qs(priv);
3212 /* Reactivate the Tx Queues */
3213 gfar_write(&regs->tstat, gfargrp->tstat);
3215 unlock_tx_qs(priv);
3216 local_irq_restore(flags);
3218 if (netif_msg_tx_err(priv))
3219 printk(KERN_DEBUG "%s: Transmit Error\n", dev->name);
3221 if (events & IEVENT_BSY) {
3222 dev->stats.rx_errors++;
3223 priv->extra_stats.rx_bsy++;
3225 gfar_receive(irq, grp_id);
3227 if (netif_msg_rx_err(priv))
3228 printk(KERN_DEBUG "%s: busy error (rstat: %x)\n",
3229 dev->name, gfar_read(&regs->rstat));
3231 if (events & IEVENT_BABR) {
3232 dev->stats.rx_errors++;
3233 priv->extra_stats.rx_babr++;
3235 if (netif_msg_rx_err(priv))
3236 printk(KERN_DEBUG "%s: babbling RX error\n", dev->name);
3238 if (events & IEVENT_EBERR) {
3239 priv->extra_stats.eberr++;
3240 if (netif_msg_rx_err(priv))
3241 printk(KERN_DEBUG "%s: bus error\n", dev->name);
3243 if ((events & IEVENT_RXC) && netif_msg_rx_status(priv))
3244 printk(KERN_DEBUG "%s: control frame\n", dev->name);
3246 if (events & IEVENT_BABT) {
3247 priv->extra_stats.tx_babt++;
3248 if (netif_msg_tx_err(priv))
3249 printk(KERN_DEBUG "%s: babbling TX error\n", dev->name);
3251 return IRQ_HANDLED;
3254 static struct of_device_id gfar_match[] =
3257 .type = "network",
3258 .compatible = "gianfar",
3261 .compatible = "fsl,etsec2",
3265 MODULE_DEVICE_TABLE(of, gfar_match);
3267 /* Structure for a device driver */
3268 static struct platform_driver gfar_driver = {
3269 .driver = {
3270 .name = "fsl-gianfar",
3271 .owner = THIS_MODULE,
3272 .pm = GFAR_PM_OPS,
3273 .of_match_table = gfar_match,
3275 .probe = gfar_probe,
3276 .remove = gfar_remove,
3279 static int __init gfar_init(void)
3281 return platform_driver_register(&gfar_driver);
3284 static void __exit gfar_exit(void)
3286 platform_driver_unregister(&gfar_driver);
3289 module_init(gfar_init);
3290 module_exit(gfar_exit);