2 * drivers/net/gianfar.c
4 * Gianfar Ethernet Driver
5 * This driver is designed for the non-CPM ethernet controllers
6 * on the 85xx and 83xx family of integrated processors
7 * Based on 8260_io/fcc_enet.c
10 * Maintainer: Kumar Gala
12 * Copyright (c) 2002-2006 Freescale Semiconductor, Inc.
13 * Copyright (c) 2007 MontaVista Software, Inc.
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the
17 * Free Software Foundation; either version 2 of the License, or (at your
18 * option) any later version.
20 * Gianfar: AKA Lambda Draconis, "Dragon"
28 * The driver is initialized through of_device. Configuration information
29 * is therefore conveyed through an OF-style device tree.
31 * The Gianfar Ethernet Controller uses a ring of buffer
32 * descriptors. The beginning is indicated by a register
33 * pointing to the physical address of the start of the ring.
34 * The end is determined by a "wrap" bit being set in the
35 * last descriptor of the ring.
37 * When a packet is received, the RXF bit in the
38 * IEVENT register is set, triggering an interrupt when the
39 * corresponding bit in the IMASK register is also set (if
40 * interrupt coalescing is active, then the interrupt may not
41 * happen immediately, but will wait until either a set number
42 * of frames or amount of time have passed). In NAPI, the
43 * interrupt handler will signal there is work to be done, and
44 * exit. This method will start at the last known empty
45 * descriptor, and process every subsequent descriptor until there
46 * are none left with data (NAPI will stop after a set number of
47 * packets to give time to other tasks, but will eventually
48 * process all the packets). The data arrives inside a
49 * pre-allocated skb, and so after the skb is passed up to the
50 * stack, a new skb must be allocated, and the address field in
51 * the buffer descriptor must be updated to indicate this new
54 * When the kernel requests that a packet be transmitted, the
55 * driver starts where it left off last time, and points the
56 * descriptor at the buffer which was passed in. The driver
57 * then informs the DMA engine that there are packets ready to
58 * be transmitted. Once the controller is finished transmitting
59 * the packet, an interrupt may be triggered (under the same
60 * conditions as for reception, but depending on the TXF bit).
61 * The driver then cleans up the buffer.
64 #include <linux/kernel.h>
65 #include <linux/string.h>
66 #include <linux/errno.h>
67 #include <linux/unistd.h>
68 #include <linux/slab.h>
69 #include <linux/interrupt.h>
70 #include <linux/init.h>
71 #include <linux/delay.h>
72 #include <linux/netdevice.h>
73 #include <linux/etherdevice.h>
74 #include <linux/skbuff.h>
75 #include <linux/if_vlan.h>
76 #include <linux/spinlock.h>
78 #include <linux/of_platform.h>
80 #include <linux/tcp.h>
81 #include <linux/udp.h>
86 #include <asm/uaccess.h>
87 #include <linux/module.h>
88 #include <linux/dma-mapping.h>
89 #include <linux/crc32.h>
90 #include <linux/mii.h>
91 #include <linux/phy.h>
92 #include <linux/phy_fixed.h>
96 #include "fsl_pq_mdio.h"
98 #define TX_TIMEOUT (1*HZ)
99 #undef BRIEF_GFAR_ERRORS
100 #undef VERBOSE_GFAR_ERRORS
102 const char gfar_driver_name
[] = "Gianfar Ethernet";
103 const char gfar_driver_version
[] = "1.3";
105 static int gfar_enet_open(struct net_device
*dev
);
106 static int gfar_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
);
107 static void gfar_reset_task(struct work_struct
*work
);
108 static void gfar_timeout(struct net_device
*dev
);
109 static int gfar_close(struct net_device
*dev
);
110 struct sk_buff
*gfar_new_skb(struct net_device
*dev
);
111 static void gfar_new_rxbdp(struct net_device
*dev
, struct rxbd8
*bdp
,
112 struct sk_buff
*skb
);
113 static int gfar_set_mac_address(struct net_device
*dev
);
114 static int gfar_change_mtu(struct net_device
*dev
, int new_mtu
);
115 static irqreturn_t
gfar_error(int irq
, void *dev_id
);
116 static irqreturn_t
gfar_transmit(int irq
, void *dev_id
);
117 static irqreturn_t
gfar_interrupt(int irq
, void *dev_id
);
118 static void adjust_link(struct net_device
*dev
);
119 static void init_registers(struct net_device
*dev
);
120 static int init_phy(struct net_device
*dev
);
121 static int gfar_probe(struct of_device
*ofdev
,
122 const struct of_device_id
*match
);
123 static int gfar_remove(struct of_device
*ofdev
);
124 static void free_skb_resources(struct gfar_private
*priv
);
125 static void gfar_set_multi(struct net_device
*dev
);
126 static void gfar_set_hash_for_addr(struct net_device
*dev
, u8
*addr
);
127 static void gfar_configure_serdes(struct net_device
*dev
);
128 static int gfar_poll(struct napi_struct
*napi
, int budget
);
129 #ifdef CONFIG_NET_POLL_CONTROLLER
130 static void gfar_netpoll(struct net_device
*dev
);
132 int gfar_clean_rx_ring(struct net_device
*dev
, int rx_work_limit
);
133 static int gfar_clean_tx_ring(struct net_device
*dev
);
134 static int gfar_process_frame(struct net_device
*dev
, struct sk_buff
*skb
,
136 static void gfar_vlan_rx_register(struct net_device
*netdev
,
137 struct vlan_group
*grp
);
138 void gfar_halt(struct net_device
*dev
);
139 static void gfar_halt_nodisable(struct net_device
*dev
);
140 void gfar_start(struct net_device
*dev
);
141 static void gfar_clear_exact_match(struct net_device
*dev
);
142 static void gfar_set_mac_for_addr(struct net_device
*dev
, int num
, u8
*addr
);
143 static int gfar_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
);
145 MODULE_AUTHOR("Freescale Semiconductor, Inc");
146 MODULE_DESCRIPTION("Gianfar Ethernet Driver");
147 MODULE_LICENSE("GPL");
149 static const struct net_device_ops gfar_netdev_ops
= {
150 .ndo_open
= gfar_enet_open
,
151 .ndo_start_xmit
= gfar_start_xmit
,
152 .ndo_stop
= gfar_close
,
153 .ndo_change_mtu
= gfar_change_mtu
,
154 .ndo_set_multicast_list
= gfar_set_multi
,
155 .ndo_tx_timeout
= gfar_timeout
,
156 .ndo_do_ioctl
= gfar_ioctl
,
157 .ndo_vlan_rx_register
= gfar_vlan_rx_register
,
158 .ndo_set_mac_address
= eth_mac_addr
,
159 .ndo_validate_addr
= eth_validate_addr
,
160 #ifdef CONFIG_NET_POLL_CONTROLLER
161 .ndo_poll_controller
= gfar_netpoll
,
165 /* Returns 1 if incoming frames use an FCB */
166 static inline int gfar_uses_fcb(struct gfar_private
*priv
)
168 return priv
->vlgrp
|| priv
->rx_csum_enable
;
171 static int gfar_of_init(struct net_device
*dev
)
173 struct device_node
*phy
, *mdio
;
174 const unsigned int *id
;
177 const void *mac_addr
;
181 struct gfar_private
*priv
= netdev_priv(dev
);
182 struct device_node
*np
= priv
->node
;
183 char bus_name
[MII_BUS_ID_SIZE
];
185 const u32
*stash_len
;
186 const u32
*stash_idx
;
188 if (!np
|| !of_device_is_available(np
))
191 /* get a pointer to the register memory */
192 addr
= of_translate_address(np
, of_get_address(np
, 0, &size
, NULL
));
193 priv
->regs
= ioremap(addr
, size
);
195 if (priv
->regs
== NULL
)
198 priv
->interruptTransmit
= irq_of_parse_and_map(np
, 0);
200 model
= of_get_property(np
, "model", NULL
);
202 /* If we aren't the FEC we have multiple interrupts */
203 if (model
&& strcasecmp(model
, "FEC")) {
204 priv
->interruptReceive
= irq_of_parse_and_map(np
, 1);
206 priv
->interruptError
= irq_of_parse_and_map(np
, 2);
208 if (priv
->interruptTransmit
< 0 ||
209 priv
->interruptReceive
< 0 ||
210 priv
->interruptError
< 0) {
216 stash
= of_get_property(np
, "bd-stash", NULL
);
219 priv
->device_flags
|= FSL_GIANFAR_DEV_HAS_BD_STASHING
;
220 priv
->bd_stash_en
= 1;
223 stash_len
= of_get_property(np
, "rx-stash-len", NULL
);
226 priv
->rx_stash_size
= *stash_len
;
228 stash_idx
= of_get_property(np
, "rx-stash-idx", NULL
);
231 priv
->rx_stash_index
= *stash_idx
;
233 if (stash_len
|| stash_idx
)
234 priv
->device_flags
|= FSL_GIANFAR_DEV_HAS_BUF_STASHING
;
236 mac_addr
= of_get_mac_address(np
);
238 memcpy(dev
->dev_addr
, mac_addr
, MAC_ADDR_LEN
);
240 if (model
&& !strcasecmp(model
, "TSEC"))
242 FSL_GIANFAR_DEV_HAS_GIGABIT
|
243 FSL_GIANFAR_DEV_HAS_COALESCE
|
244 FSL_GIANFAR_DEV_HAS_RMON
|
245 FSL_GIANFAR_DEV_HAS_MULTI_INTR
;
246 if (model
&& !strcasecmp(model
, "eTSEC"))
248 FSL_GIANFAR_DEV_HAS_GIGABIT
|
249 FSL_GIANFAR_DEV_HAS_COALESCE
|
250 FSL_GIANFAR_DEV_HAS_RMON
|
251 FSL_GIANFAR_DEV_HAS_MULTI_INTR
|
252 FSL_GIANFAR_DEV_HAS_PADDING
|
253 FSL_GIANFAR_DEV_HAS_CSUM
|
254 FSL_GIANFAR_DEV_HAS_VLAN
|
255 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET
|
256 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH
;
258 ctype
= of_get_property(np
, "phy-connection-type", NULL
);
260 /* We only care about rgmii-id. The rest are autodetected */
261 if (ctype
&& !strcmp(ctype
, "rgmii-id"))
262 priv
->interface
= PHY_INTERFACE_MODE_RGMII_ID
;
264 priv
->interface
= PHY_INTERFACE_MODE_MII
;
266 if (of_get_property(np
, "fsl,magic-packet", NULL
))
267 priv
->device_flags
|= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET
;
269 ph
= of_get_property(np
, "phy-handle", NULL
);
273 fixed_link
= (u32
*)of_get_property(np
, "fixed-link", NULL
);
279 snprintf(priv
->phy_bus_id
, sizeof(priv
->phy_bus_id
),
280 PHY_ID_FMT
, "0", fixed_link
[0]);
282 phy
= of_find_node_by_phandle(*ph
);
289 mdio
= of_get_parent(phy
);
291 id
= of_get_property(phy
, "reg", NULL
);
295 fsl_pq_mdio_bus_name(bus_name
, mdio
);
297 snprintf(priv
->phy_bus_id
, sizeof(priv
->phy_bus_id
), "%s:%02x",
301 /* Find the TBI PHY. If it's not there, we don't support SGMII */
302 ph
= of_get_property(np
, "tbi-handle", NULL
);
304 struct device_node
*tbi
= of_find_node_by_phandle(*ph
);
305 struct of_device
*ofdev
;
311 mdio
= of_get_parent(tbi
);
315 ofdev
= of_find_device_by_node(mdio
);
319 id
= of_get_property(tbi
, "reg", NULL
);
325 bus
= dev_get_drvdata(&ofdev
->dev
);
327 priv
->tbiphy
= bus
->phy_map
[*id
];
337 /* Ioctl MII Interface */
338 static int gfar_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
340 struct gfar_private
*priv
= netdev_priv(dev
);
342 if (!netif_running(dev
))
348 return phy_mii_ioctl(priv
->phydev
, if_mii(rq
), cmd
);
351 /* Set up the ethernet device structure, private data,
352 * and anything else we need before we start */
353 static int gfar_probe(struct of_device
*ofdev
,
354 const struct of_device_id
*match
)
357 struct net_device
*dev
= NULL
;
358 struct gfar_private
*priv
= NULL
;
359 DECLARE_MAC_BUF(mac
);
363 /* Create an ethernet device instance */
364 dev
= alloc_etherdev(sizeof (*priv
));
369 priv
= netdev_priv(dev
);
372 priv
->node
= ofdev
->node
;
373 SET_NETDEV_DEV(dev
, &ofdev
->dev
);
375 err
= gfar_of_init(dev
);
380 spin_lock_init(&priv
->txlock
);
381 spin_lock_init(&priv
->rxlock
);
382 spin_lock_init(&priv
->bflock
);
383 INIT_WORK(&priv
->reset_task
, gfar_reset_task
);
385 dev_set_drvdata(&ofdev
->dev
, priv
);
387 /* Stop the DMA engine now, in case it was running before */
388 /* (The firmware could have used it, and left it running). */
391 /* Reset MAC layer */
392 gfar_write(&priv
->regs
->maccfg1
, MACCFG1_SOFT_RESET
);
394 /* We need to delay at least 3 TX clocks */
397 tempval
= (MACCFG1_TX_FLOW
| MACCFG1_RX_FLOW
);
398 gfar_write(&priv
->regs
->maccfg1
, tempval
);
400 /* Initialize MACCFG2. */
401 gfar_write(&priv
->regs
->maccfg2
, MACCFG2_INIT_SETTINGS
);
403 /* Initialize ECNTRL */
404 gfar_write(&priv
->regs
->ecntrl
, ECNTRL_INIT_SETTINGS
);
406 /* Set the dev->base_addr to the gfar reg region */
407 dev
->base_addr
= (unsigned long) (priv
->regs
);
409 SET_NETDEV_DEV(dev
, &ofdev
->dev
);
411 /* Fill in the dev structure */
412 dev
->watchdog_timeo
= TX_TIMEOUT
;
413 netif_napi_add(dev
, &priv
->napi
, gfar_poll
, GFAR_DEV_WEIGHT
);
416 dev
->netdev_ops
= &gfar_netdev_ops
;
417 dev
->ethtool_ops
= &gfar_ethtool_ops
;
419 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_CSUM
) {
420 priv
->rx_csum_enable
= 1;
421 dev
->features
|= NETIF_F_IP_CSUM
| NETIF_F_SG
| NETIF_F_HIGHDMA
;
423 priv
->rx_csum_enable
= 0;
427 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_VLAN
)
428 dev
->features
|= NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
;
430 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_EXTENDED_HASH
) {
431 priv
->extended_hash
= 1;
432 priv
->hash_width
= 9;
434 priv
->hash_regs
[0] = &priv
->regs
->igaddr0
;
435 priv
->hash_regs
[1] = &priv
->regs
->igaddr1
;
436 priv
->hash_regs
[2] = &priv
->regs
->igaddr2
;
437 priv
->hash_regs
[3] = &priv
->regs
->igaddr3
;
438 priv
->hash_regs
[4] = &priv
->regs
->igaddr4
;
439 priv
->hash_regs
[5] = &priv
->regs
->igaddr5
;
440 priv
->hash_regs
[6] = &priv
->regs
->igaddr6
;
441 priv
->hash_regs
[7] = &priv
->regs
->igaddr7
;
442 priv
->hash_regs
[8] = &priv
->regs
->gaddr0
;
443 priv
->hash_regs
[9] = &priv
->regs
->gaddr1
;
444 priv
->hash_regs
[10] = &priv
->regs
->gaddr2
;
445 priv
->hash_regs
[11] = &priv
->regs
->gaddr3
;
446 priv
->hash_regs
[12] = &priv
->regs
->gaddr4
;
447 priv
->hash_regs
[13] = &priv
->regs
->gaddr5
;
448 priv
->hash_regs
[14] = &priv
->regs
->gaddr6
;
449 priv
->hash_regs
[15] = &priv
->regs
->gaddr7
;
452 priv
->extended_hash
= 0;
453 priv
->hash_width
= 8;
455 priv
->hash_regs
[0] = &priv
->regs
->gaddr0
;
456 priv
->hash_regs
[1] = &priv
->regs
->gaddr1
;
457 priv
->hash_regs
[2] = &priv
->regs
->gaddr2
;
458 priv
->hash_regs
[3] = &priv
->regs
->gaddr3
;
459 priv
->hash_regs
[4] = &priv
->regs
->gaddr4
;
460 priv
->hash_regs
[5] = &priv
->regs
->gaddr5
;
461 priv
->hash_regs
[6] = &priv
->regs
->gaddr6
;
462 priv
->hash_regs
[7] = &priv
->regs
->gaddr7
;
465 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_PADDING
)
466 priv
->padding
= DEFAULT_PADDING
;
470 if (dev
->features
& NETIF_F_IP_CSUM
)
471 dev
->hard_header_len
+= GMAC_FCB_LEN
;
473 priv
->rx_buffer_size
= DEFAULT_RX_BUFFER_SIZE
;
474 priv
->tx_ring_size
= DEFAULT_TX_RING_SIZE
;
475 priv
->rx_ring_size
= DEFAULT_RX_RING_SIZE
;
476 priv
->num_txbdfree
= DEFAULT_TX_RING_SIZE
;
478 priv
->txcoalescing
= DEFAULT_TX_COALESCE
;
479 priv
->txic
= DEFAULT_TXIC
;
480 priv
->rxcoalescing
= DEFAULT_RX_COALESCE
;
481 priv
->rxic
= DEFAULT_RXIC
;
483 /* Enable most messages by default */
484 priv
->msg_enable
= (NETIF_MSG_IFUP
<< 1 ) - 1;
486 /* Carrier starts down, phylib will bring it up */
487 netif_carrier_off(dev
);
489 err
= register_netdev(dev
);
492 printk(KERN_ERR
"%s: Cannot register net device, aborting.\n",
497 device_init_wakeup(&dev
->dev
,
498 priv
->device_flags
& FSL_GIANFAR_DEV_HAS_MAGIC_PACKET
);
500 /* fill out IRQ number and name fields */
501 len_devname
= strlen(dev
->name
);
502 strncpy(&priv
->int_name_tx
[0], dev
->name
, len_devname
);
503 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_MULTI_INTR
) {
504 strncpy(&priv
->int_name_tx
[len_devname
],
505 "_tx", sizeof("_tx") + 1);
507 strncpy(&priv
->int_name_rx
[0], dev
->name
, len_devname
);
508 strncpy(&priv
->int_name_rx
[len_devname
],
509 "_rx", sizeof("_rx") + 1);
511 strncpy(&priv
->int_name_er
[0], dev
->name
, len_devname
);
512 strncpy(&priv
->int_name_er
[len_devname
],
513 "_er", sizeof("_er") + 1);
515 priv
->int_name_tx
[len_devname
] = '\0';
517 /* Create all the sysfs files */
518 gfar_init_sysfs(dev
);
520 /* Print out the device info */
521 printk(KERN_INFO DEVICE_NAME
"%pM\n", dev
->name
, dev
->dev_addr
);
523 /* Even more device info helps when determining which kernel */
524 /* provided which set of benchmarks. */
525 printk(KERN_INFO
"%s: Running with NAPI enabled\n", dev
->name
);
526 printk(KERN_INFO
"%s: %d/%d RX/TX BD ring size\n",
527 dev
->name
, priv
->rx_ring_size
, priv
->tx_ring_size
);
538 static int gfar_remove(struct of_device
*ofdev
)
540 struct gfar_private
*priv
= dev_get_drvdata(&ofdev
->dev
);
542 dev_set_drvdata(&ofdev
->dev
, NULL
);
545 free_netdev(priv
->ndev
);
551 static int gfar_suspend(struct of_device
*ofdev
, pm_message_t state
)
553 struct gfar_private
*priv
= dev_get_drvdata(&ofdev
->dev
);
554 struct net_device
*dev
= priv
->ndev
;
558 int magic_packet
= priv
->wol_en
&&
559 (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_MAGIC_PACKET
);
561 netif_device_detach(dev
);
563 if (netif_running(dev
)) {
564 spin_lock_irqsave(&priv
->txlock
, flags
);
565 spin_lock(&priv
->rxlock
);
567 gfar_halt_nodisable(dev
);
569 /* Disable Tx, and Rx if wake-on-LAN is disabled. */
570 tempval
= gfar_read(&priv
->regs
->maccfg1
);
572 tempval
&= ~MACCFG1_TX_EN
;
575 tempval
&= ~MACCFG1_RX_EN
;
577 gfar_write(&priv
->regs
->maccfg1
, tempval
);
579 spin_unlock(&priv
->rxlock
);
580 spin_unlock_irqrestore(&priv
->txlock
, flags
);
582 napi_disable(&priv
->napi
);
585 /* Enable interrupt on Magic Packet */
586 gfar_write(&priv
->regs
->imask
, IMASK_MAG
);
588 /* Enable Magic Packet mode */
589 tempval
= gfar_read(&priv
->regs
->maccfg2
);
590 tempval
|= MACCFG2_MPEN
;
591 gfar_write(&priv
->regs
->maccfg2
, tempval
);
593 phy_stop(priv
->phydev
);
600 static int gfar_resume(struct of_device
*ofdev
)
602 struct gfar_private
*priv
= dev_get_drvdata(&ofdev
->dev
);
603 struct net_device
*dev
= priv
->ndev
;
606 int magic_packet
= priv
->wol_en
&&
607 (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_MAGIC_PACKET
);
609 if (!netif_running(dev
)) {
610 netif_device_attach(dev
);
614 if (!magic_packet
&& priv
->phydev
)
615 phy_start(priv
->phydev
);
617 /* Disable Magic Packet mode, in case something
621 spin_lock_irqsave(&priv
->txlock
, flags
);
622 spin_lock(&priv
->rxlock
);
624 tempval
= gfar_read(&priv
->regs
->maccfg2
);
625 tempval
&= ~MACCFG2_MPEN
;
626 gfar_write(&priv
->regs
->maccfg2
, tempval
);
630 spin_unlock(&priv
->rxlock
);
631 spin_unlock_irqrestore(&priv
->txlock
, flags
);
633 netif_device_attach(dev
);
635 napi_enable(&priv
->napi
);
640 #define gfar_suspend NULL
641 #define gfar_resume NULL
644 /* Reads the controller's registers to determine what interface
645 * connects it to the PHY.
647 static phy_interface_t
gfar_get_interface(struct net_device
*dev
)
649 struct gfar_private
*priv
= netdev_priv(dev
);
650 u32 ecntrl
= gfar_read(&priv
->regs
->ecntrl
);
652 if (ecntrl
& ECNTRL_SGMII_MODE
)
653 return PHY_INTERFACE_MODE_SGMII
;
655 if (ecntrl
& ECNTRL_TBI_MODE
) {
656 if (ecntrl
& ECNTRL_REDUCED_MODE
)
657 return PHY_INTERFACE_MODE_RTBI
;
659 return PHY_INTERFACE_MODE_TBI
;
662 if (ecntrl
& ECNTRL_REDUCED_MODE
) {
663 if (ecntrl
& ECNTRL_REDUCED_MII_MODE
)
664 return PHY_INTERFACE_MODE_RMII
;
666 phy_interface_t interface
= priv
->interface
;
669 * This isn't autodetected right now, so it must
670 * be set by the device tree or platform code.
672 if (interface
== PHY_INTERFACE_MODE_RGMII_ID
)
673 return PHY_INTERFACE_MODE_RGMII_ID
;
675 return PHY_INTERFACE_MODE_RGMII
;
679 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_GIGABIT
)
680 return PHY_INTERFACE_MODE_GMII
;
682 return PHY_INTERFACE_MODE_MII
;
686 /* Initializes driver's PHY state, and attaches to the PHY.
687 * Returns 0 on success.
689 static int init_phy(struct net_device
*dev
)
691 struct gfar_private
*priv
= netdev_priv(dev
);
692 uint gigabit_support
=
693 priv
->device_flags
& FSL_GIANFAR_DEV_HAS_GIGABIT
?
694 SUPPORTED_1000baseT_Full
: 0;
695 struct phy_device
*phydev
;
696 phy_interface_t interface
;
700 priv
->oldduplex
= -1;
702 interface
= gfar_get_interface(dev
);
704 phydev
= phy_connect(dev
, priv
->phy_bus_id
, &adjust_link
, 0, interface
);
706 if (interface
== PHY_INTERFACE_MODE_SGMII
)
707 gfar_configure_serdes(dev
);
709 if (IS_ERR(phydev
)) {
710 printk(KERN_ERR
"%s: Could not attach to PHY\n", dev
->name
);
711 return PTR_ERR(phydev
);
714 /* Remove any features not supported by the controller */
715 phydev
->supported
&= (GFAR_SUPPORTED
| gigabit_support
);
716 phydev
->advertising
= phydev
->supported
;
718 priv
->phydev
= phydev
;
724 * Initialize TBI PHY interface for communicating with the
725 * SERDES lynx PHY on the chip. We communicate with this PHY
726 * through the MDIO bus on each controller, treating it as a
727 * "normal" PHY at the address found in the TBIPA register. We assume
728 * that the TBIPA register is valid. Either the MDIO bus code will set
729 * it to a value that doesn't conflict with other PHYs on the bus, or the
730 * value doesn't matter, as there are no other PHYs on the bus.
732 static void gfar_configure_serdes(struct net_device
*dev
)
734 struct gfar_private
*priv
= netdev_priv(dev
);
737 printk(KERN_WARNING
"SGMII mode requires that the device "
738 "tree specify a tbi-handle\n");
743 * If the link is already up, we must already be ok, and don't need to
744 * configure and reset the TBI<->SerDes link. Maybe U-Boot configured
745 * everything for us? Resetting it takes the link down and requires
746 * several seconds for it to come back.
748 if (phy_read(priv
->tbiphy
, MII_BMSR
) & BMSR_LSTATUS
)
751 /* Single clk mode, mii mode off(for serdes communication) */
752 phy_write(priv
->tbiphy
, MII_TBICON
, TBICON_CLK_SELECT
);
754 phy_write(priv
->tbiphy
, MII_ADVERTISE
,
755 ADVERTISE_1000XFULL
| ADVERTISE_1000XPAUSE
|
756 ADVERTISE_1000XPSE_ASYM
);
758 phy_write(priv
->tbiphy
, MII_BMCR
, BMCR_ANENABLE
|
759 BMCR_ANRESTART
| BMCR_FULLDPLX
| BMCR_SPEED1000
);
762 static void init_registers(struct net_device
*dev
)
764 struct gfar_private
*priv
= netdev_priv(dev
);
767 gfar_write(&priv
->regs
->ievent
, IEVENT_INIT_CLEAR
);
769 /* Initialize IMASK */
770 gfar_write(&priv
->regs
->imask
, IMASK_INIT_CLEAR
);
772 /* Init hash registers to zero */
773 gfar_write(&priv
->regs
->igaddr0
, 0);
774 gfar_write(&priv
->regs
->igaddr1
, 0);
775 gfar_write(&priv
->regs
->igaddr2
, 0);
776 gfar_write(&priv
->regs
->igaddr3
, 0);
777 gfar_write(&priv
->regs
->igaddr4
, 0);
778 gfar_write(&priv
->regs
->igaddr5
, 0);
779 gfar_write(&priv
->regs
->igaddr6
, 0);
780 gfar_write(&priv
->regs
->igaddr7
, 0);
782 gfar_write(&priv
->regs
->gaddr0
, 0);
783 gfar_write(&priv
->regs
->gaddr1
, 0);
784 gfar_write(&priv
->regs
->gaddr2
, 0);
785 gfar_write(&priv
->regs
->gaddr3
, 0);
786 gfar_write(&priv
->regs
->gaddr4
, 0);
787 gfar_write(&priv
->regs
->gaddr5
, 0);
788 gfar_write(&priv
->regs
->gaddr6
, 0);
789 gfar_write(&priv
->regs
->gaddr7
, 0);
791 /* Zero out the rmon mib registers if it has them */
792 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_RMON
) {
793 memset_io(&(priv
->regs
->rmon
), 0, sizeof (struct rmon_mib
));
795 /* Mask off the CAM interrupts */
796 gfar_write(&priv
->regs
->rmon
.cam1
, 0xffffffff);
797 gfar_write(&priv
->regs
->rmon
.cam2
, 0xffffffff);
800 /* Initialize the max receive buffer length */
801 gfar_write(&priv
->regs
->mrblr
, priv
->rx_buffer_size
);
803 /* Initialize the Minimum Frame Length Register */
804 gfar_write(&priv
->regs
->minflr
, MINFLR_INIT_SETTINGS
);
808 /* Halt the receive and transmit queues */
809 static void gfar_halt_nodisable(struct net_device
*dev
)
811 struct gfar_private
*priv
= netdev_priv(dev
);
812 struct gfar __iomem
*regs
= priv
->regs
;
815 /* Mask all interrupts */
816 gfar_write(®s
->imask
, IMASK_INIT_CLEAR
);
818 /* Clear all interrupts */
819 gfar_write(®s
->ievent
, IEVENT_INIT_CLEAR
);
821 /* Stop the DMA, and wait for it to stop */
822 tempval
= gfar_read(&priv
->regs
->dmactrl
);
823 if ((tempval
& (DMACTRL_GRS
| DMACTRL_GTS
))
824 != (DMACTRL_GRS
| DMACTRL_GTS
)) {
825 tempval
|= (DMACTRL_GRS
| DMACTRL_GTS
);
826 gfar_write(&priv
->regs
->dmactrl
, tempval
);
828 while (!(gfar_read(&priv
->regs
->ievent
) &
829 (IEVENT_GRSC
| IEVENT_GTSC
)))
834 /* Halt the receive and transmit queues */
835 void gfar_halt(struct net_device
*dev
)
837 struct gfar_private
*priv
= netdev_priv(dev
);
838 struct gfar __iomem
*regs
= priv
->regs
;
841 gfar_halt_nodisable(dev
);
843 /* Disable Rx and Tx */
844 tempval
= gfar_read(®s
->maccfg1
);
845 tempval
&= ~(MACCFG1_RX_EN
| MACCFG1_TX_EN
);
846 gfar_write(®s
->maccfg1
, tempval
);
849 void stop_gfar(struct net_device
*dev
)
851 struct gfar_private
*priv
= netdev_priv(dev
);
852 struct gfar __iomem
*regs
= priv
->regs
;
855 phy_stop(priv
->phydev
);
858 spin_lock_irqsave(&priv
->txlock
, flags
);
859 spin_lock(&priv
->rxlock
);
863 spin_unlock(&priv
->rxlock
);
864 spin_unlock_irqrestore(&priv
->txlock
, flags
);
867 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_MULTI_INTR
) {
868 free_irq(priv
->interruptError
, dev
);
869 free_irq(priv
->interruptTransmit
, dev
);
870 free_irq(priv
->interruptReceive
, dev
);
872 free_irq(priv
->interruptTransmit
, dev
);
875 free_skb_resources(priv
);
877 dma_free_coherent(&priv
->ofdev
->dev
,
878 sizeof(struct txbd8
)*priv
->tx_ring_size
879 + sizeof(struct rxbd8
)*priv
->rx_ring_size
,
881 gfar_read(®s
->tbase0
));
884 /* If there are any tx skbs or rx skbs still around, free them.
885 * Then free tx_skbuff and rx_skbuff */
886 static void free_skb_resources(struct gfar_private
*priv
)
892 /* Go through all the buffer descriptors and free their data buffers */
893 txbdp
= priv
->tx_bd_base
;
895 for (i
= 0; i
< priv
->tx_ring_size
; i
++) {
896 if (!priv
->tx_skbuff
[i
])
899 dma_unmap_single(&priv
->ofdev
->dev
, txbdp
->bufPtr
,
900 txbdp
->length
, DMA_TO_DEVICE
);
902 for (j
= 0; j
< skb_shinfo(priv
->tx_skbuff
[i
])->nr_frags
; j
++) {
904 dma_unmap_page(&priv
->ofdev
->dev
, txbdp
->bufPtr
,
905 txbdp
->length
, DMA_TO_DEVICE
);
908 dev_kfree_skb_any(priv
->tx_skbuff
[i
]);
909 priv
->tx_skbuff
[i
] = NULL
;
912 kfree(priv
->tx_skbuff
);
914 rxbdp
= priv
->rx_bd_base
;
916 /* rx_skbuff is not guaranteed to be allocated, so only
917 * free it and its contents if it is allocated */
918 if(priv
->rx_skbuff
!= NULL
) {
919 for (i
= 0; i
< priv
->rx_ring_size
; i
++) {
920 if (priv
->rx_skbuff
[i
]) {
921 dma_unmap_single(&priv
->ofdev
->dev
, rxbdp
->bufPtr
,
922 priv
->rx_buffer_size
,
925 dev_kfree_skb_any(priv
->rx_skbuff
[i
]);
926 priv
->rx_skbuff
[i
] = NULL
;
935 kfree(priv
->rx_skbuff
);
939 void gfar_start(struct net_device
*dev
)
941 struct gfar_private
*priv
= netdev_priv(dev
);
942 struct gfar __iomem
*regs
= priv
->regs
;
945 /* Enable Rx and Tx in MACCFG1 */
946 tempval
= gfar_read(®s
->maccfg1
);
947 tempval
|= (MACCFG1_RX_EN
| MACCFG1_TX_EN
);
948 gfar_write(®s
->maccfg1
, tempval
);
950 /* Initialize DMACTRL to have WWR and WOP */
951 tempval
= gfar_read(&priv
->regs
->dmactrl
);
952 tempval
|= DMACTRL_INIT_SETTINGS
;
953 gfar_write(&priv
->regs
->dmactrl
, tempval
);
955 /* Make sure we aren't stopped */
956 tempval
= gfar_read(&priv
->regs
->dmactrl
);
957 tempval
&= ~(DMACTRL_GRS
| DMACTRL_GTS
);
958 gfar_write(&priv
->regs
->dmactrl
, tempval
);
960 /* Clear THLT/RHLT, so that the DMA starts polling now */
961 gfar_write(®s
->tstat
, TSTAT_CLEAR_THALT
);
962 gfar_write(®s
->rstat
, RSTAT_CLEAR_RHALT
);
964 /* Unmask the interrupts we look for */
965 gfar_write(®s
->imask
, IMASK_DEFAULT
);
967 dev
->trans_start
= jiffies
;
970 /* Bring the controller up and running */
971 int startup_gfar(struct net_device
*dev
)
978 struct gfar_private
*priv
= netdev_priv(dev
);
979 struct gfar __iomem
*regs
= priv
->regs
;
984 gfar_write(®s
->imask
, IMASK_INIT_CLEAR
);
986 /* Allocate memory for the buffer descriptors */
987 vaddr
= (unsigned long) dma_alloc_coherent(&priv
->ofdev
->dev
,
988 sizeof (struct txbd8
) * priv
->tx_ring_size
+
989 sizeof (struct rxbd8
) * priv
->rx_ring_size
,
993 if (netif_msg_ifup(priv
))
994 printk(KERN_ERR
"%s: Could not allocate buffer descriptors!\n",
999 priv
->tx_bd_base
= (struct txbd8
*) vaddr
;
1001 /* enet DMA only understands physical addresses */
1002 gfar_write(®s
->tbase0
, addr
);
1004 /* Start the rx descriptor ring where the tx ring leaves off */
1005 addr
= addr
+ sizeof (struct txbd8
) * priv
->tx_ring_size
;
1006 vaddr
= vaddr
+ sizeof (struct txbd8
) * priv
->tx_ring_size
;
1007 priv
->rx_bd_base
= (struct rxbd8
*) vaddr
;
1008 gfar_write(®s
->rbase0
, addr
);
1010 /* Setup the skbuff rings */
1012 (struct sk_buff
**) kmalloc(sizeof (struct sk_buff
*) *
1013 priv
->tx_ring_size
, GFP_KERNEL
);
1015 if (NULL
== priv
->tx_skbuff
) {
1016 if (netif_msg_ifup(priv
))
1017 printk(KERN_ERR
"%s: Could not allocate tx_skbuff\n",
1023 for (i
= 0; i
< priv
->tx_ring_size
; i
++)
1024 priv
->tx_skbuff
[i
] = NULL
;
1027 (struct sk_buff
**) kmalloc(sizeof (struct sk_buff
*) *
1028 priv
->rx_ring_size
, GFP_KERNEL
);
1030 if (NULL
== priv
->rx_skbuff
) {
1031 if (netif_msg_ifup(priv
))
1032 printk(KERN_ERR
"%s: Could not allocate rx_skbuff\n",
1038 for (i
= 0; i
< priv
->rx_ring_size
; i
++)
1039 priv
->rx_skbuff
[i
] = NULL
;
1041 /* Initialize some variables in our dev structure */
1042 priv
->num_txbdfree
= priv
->tx_ring_size
;
1043 priv
->dirty_tx
= priv
->cur_tx
= priv
->tx_bd_base
;
1044 priv
->cur_rx
= priv
->rx_bd_base
;
1045 priv
->skb_curtx
= priv
->skb_dirtytx
= 0;
1046 priv
->skb_currx
= 0;
1048 /* Initialize Transmit Descriptor Ring */
1049 txbdp
= priv
->tx_bd_base
;
1050 for (i
= 0; i
< priv
->tx_ring_size
; i
++) {
1056 /* Set the last descriptor in the ring to indicate wrap */
1058 txbdp
->status
|= TXBD_WRAP
;
1060 rxbdp
= priv
->rx_bd_base
;
1061 for (i
= 0; i
< priv
->rx_ring_size
; i
++) {
1062 struct sk_buff
*skb
;
1064 skb
= gfar_new_skb(dev
);
1067 printk(KERN_ERR
"%s: Can't allocate RX buffers\n",
1070 goto err_rxalloc_fail
;
1073 priv
->rx_skbuff
[i
] = skb
;
1075 gfar_new_rxbdp(dev
, rxbdp
, skb
);
1080 /* Set the last descriptor in the ring to wrap */
1082 rxbdp
->status
|= RXBD_WRAP
;
1084 /* If the device has multiple interrupts, register for
1085 * them. Otherwise, only register for the one */
1086 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_MULTI_INTR
) {
1087 /* Install our interrupt handlers for Error,
1088 * Transmit, and Receive */
1089 if (request_irq(priv
->interruptError
, gfar_error
,
1090 0, priv
->int_name_er
, dev
) < 0) {
1091 if (netif_msg_intr(priv
))
1092 printk(KERN_ERR
"%s: Can't get IRQ %d\n",
1093 dev
->name
, priv
->interruptError
);
1099 if (request_irq(priv
->interruptTransmit
, gfar_transmit
,
1100 0, priv
->int_name_tx
, dev
) < 0) {
1101 if (netif_msg_intr(priv
))
1102 printk(KERN_ERR
"%s: Can't get IRQ %d\n",
1103 dev
->name
, priv
->interruptTransmit
);
1110 if (request_irq(priv
->interruptReceive
, gfar_receive
,
1111 0, priv
->int_name_rx
, dev
) < 0) {
1112 if (netif_msg_intr(priv
))
1113 printk(KERN_ERR
"%s: Can't get IRQ %d (receive0)\n",
1114 dev
->name
, priv
->interruptReceive
);
1120 if (request_irq(priv
->interruptTransmit
, gfar_interrupt
,
1121 0, priv
->int_name_tx
, dev
) < 0) {
1122 if (netif_msg_intr(priv
))
1123 printk(KERN_ERR
"%s: Can't get IRQ %d\n",
1124 dev
->name
, priv
->interruptTransmit
);
1131 phy_start(priv
->phydev
);
1133 /* Configure the coalescing support */
1134 gfar_write(®s
->txic
, 0);
1135 if (priv
->txcoalescing
)
1136 gfar_write(®s
->txic
, priv
->txic
);
1138 gfar_write(®s
->rxic
, 0);
1139 if (priv
->rxcoalescing
)
1140 gfar_write(®s
->rxic
, priv
->rxic
);
1142 if (priv
->rx_csum_enable
)
1143 rctrl
|= RCTRL_CHECKSUMMING
;
1145 if (priv
->extended_hash
) {
1146 rctrl
|= RCTRL_EXTHASH
;
1148 gfar_clear_exact_match(dev
);
1149 rctrl
|= RCTRL_EMEN
;
1152 if (priv
->padding
) {
1153 rctrl
&= ~RCTRL_PAL_MASK
;
1154 rctrl
|= RCTRL_PADDING(priv
->padding
);
1157 /* Init rctrl based on our settings */
1158 gfar_write(&priv
->regs
->rctrl
, rctrl
);
1160 if (dev
->features
& NETIF_F_IP_CSUM
)
1161 gfar_write(&priv
->regs
->tctrl
, TCTRL_INIT_CSUM
);
1163 /* Set the extraction length and index */
1164 attrs
= ATTRELI_EL(priv
->rx_stash_size
) |
1165 ATTRELI_EI(priv
->rx_stash_index
);
1167 gfar_write(&priv
->regs
->attreli
, attrs
);
1169 /* Start with defaults, and add stashing or locking
1170 * depending on the approprate variables */
1171 attrs
= ATTR_INIT_SETTINGS
;
1173 if (priv
->bd_stash_en
)
1174 attrs
|= ATTR_BDSTASH
;
1176 if (priv
->rx_stash_size
!= 0)
1177 attrs
|= ATTR_BUFSTASH
;
1179 gfar_write(&priv
->regs
->attr
, attrs
);
1181 gfar_write(&priv
->regs
->fifo_tx_thr
, priv
->fifo_threshold
);
1182 gfar_write(&priv
->regs
->fifo_tx_starve
, priv
->fifo_starve
);
1183 gfar_write(&priv
->regs
->fifo_tx_starve_shutoff
, priv
->fifo_starve_off
);
1185 /* Start the controller */
1191 free_irq(priv
->interruptTransmit
, dev
);
1193 free_irq(priv
->interruptError
, dev
);
1197 free_skb_resources(priv
);
1199 dma_free_coherent(&priv
->ofdev
->dev
,
1200 sizeof(struct txbd8
)*priv
->tx_ring_size
1201 + sizeof(struct rxbd8
)*priv
->rx_ring_size
,
1203 gfar_read(®s
->tbase0
));
1208 /* Called when something needs to use the ethernet device */
1209 /* Returns 0 for success. */
1210 static int gfar_enet_open(struct net_device
*dev
)
1212 struct gfar_private
*priv
= netdev_priv(dev
);
1215 napi_enable(&priv
->napi
);
1217 skb_queue_head_init(&priv
->rx_recycle
);
1219 /* Initialize a bunch of registers */
1220 init_registers(dev
);
1222 gfar_set_mac_address(dev
);
1224 err
= init_phy(dev
);
1227 napi_disable(&priv
->napi
);
1231 err
= startup_gfar(dev
);
1233 napi_disable(&priv
->napi
);
1237 netif_start_queue(dev
);
1239 device_set_wakeup_enable(&dev
->dev
, priv
->wol_en
);
1244 static inline struct txfcb
*gfar_add_fcb(struct sk_buff
*skb
)
1246 struct txfcb
*fcb
= (struct txfcb
*)skb_push(skb
, GMAC_FCB_LEN
);
1247 cacheable_memzero(fcb
, GMAC_FCB_LEN
);
1252 static inline void gfar_tx_checksum(struct sk_buff
*skb
, struct txfcb
*fcb
)
1256 /* If we're here, it's a IP packet with a TCP or UDP
1257 * payload. We set it to checksum, using a pseudo-header
1260 flags
= TXFCB_DEFAULT
;
1262 /* Tell the controller what the protocol is */
1263 /* And provide the already calculated phcs */
1264 if (ip_hdr(skb
)->protocol
== IPPROTO_UDP
) {
1266 fcb
->phcs
= udp_hdr(skb
)->check
;
1268 fcb
->phcs
= tcp_hdr(skb
)->check
;
1270 /* l3os is the distance between the start of the
1271 * frame (skb->data) and the start of the IP hdr.
1272 * l4os is the distance between the start of the
1273 * l3 hdr and the l4 hdr */
1274 fcb
->l3os
= (u16
)(skb_network_offset(skb
) - GMAC_FCB_LEN
);
1275 fcb
->l4os
= skb_network_header_len(skb
);
1280 void inline gfar_tx_vlan(struct sk_buff
*skb
, struct txfcb
*fcb
)
1282 fcb
->flags
|= TXFCB_VLN
;
1283 fcb
->vlctl
= vlan_tx_tag_get(skb
);
1286 static inline struct txbd8
*skip_txbd(struct txbd8
*bdp
, int stride
,
1287 struct txbd8
*base
, int ring_size
)
1289 struct txbd8
*new_bd
= bdp
+ stride
;
1291 return (new_bd
>= (base
+ ring_size
)) ? (new_bd
- ring_size
) : new_bd
;
1294 static inline struct txbd8
*next_txbd(struct txbd8
*bdp
, struct txbd8
*base
,
1297 return skip_txbd(bdp
, 1, base
, ring_size
);
1300 /* This is called by the kernel when a frame is ready for transmission. */
1301 /* It is pointed to by the dev->hard_start_xmit function pointer */
1302 static int gfar_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1304 struct gfar_private
*priv
= netdev_priv(dev
);
1305 struct txfcb
*fcb
= NULL
;
1306 struct txbd8
*txbdp
, *txbdp_start
, *base
;
1310 unsigned long flags
;
1311 unsigned int nr_frags
, length
;
1313 base
= priv
->tx_bd_base
;
1315 /* make space for additional header when fcb is needed */
1316 if (((skb
->ip_summed
== CHECKSUM_PARTIAL
) ||
1317 (priv
->vlgrp
&& vlan_tx_tag_present(skb
))) &&
1318 (skb_headroom(skb
) < GMAC_FCB_LEN
)) {
1319 struct sk_buff
*skb_new
;
1321 skb_new
= skb_realloc_headroom(skb
, GMAC_FCB_LEN
);
1323 dev
->stats
.tx_errors
++;
1325 return NETDEV_TX_OK
;
1331 /* total number of fragments in the SKB */
1332 nr_frags
= skb_shinfo(skb
)->nr_frags
;
1334 spin_lock_irqsave(&priv
->txlock
, flags
);
1336 /* check if there is space to queue this packet */
1337 if ((nr_frags
+1) > priv
->num_txbdfree
) {
1338 /* no space, stop the queue */
1339 netif_stop_queue(dev
);
1340 dev
->stats
.tx_fifo_errors
++;
1341 spin_unlock_irqrestore(&priv
->txlock
, flags
);
1342 return NETDEV_TX_BUSY
;
1345 /* Update transmit stats */
1346 dev
->stats
.tx_bytes
+= skb
->len
;
1348 txbdp
= txbdp_start
= priv
->cur_tx
;
1350 if (nr_frags
== 0) {
1351 lstatus
= txbdp
->lstatus
| BD_LFLAG(TXBD_LAST
| TXBD_INTERRUPT
);
1353 /* Place the fragment addresses and lengths into the TxBDs */
1354 for (i
= 0; i
< nr_frags
; i
++) {
1355 /* Point at the next BD, wrapping as needed */
1356 txbdp
= next_txbd(txbdp
, base
, priv
->tx_ring_size
);
1358 length
= skb_shinfo(skb
)->frags
[i
].size
;
1360 lstatus
= txbdp
->lstatus
| length
|
1361 BD_LFLAG(TXBD_READY
);
1363 /* Handle the last BD specially */
1364 if (i
== nr_frags
- 1)
1365 lstatus
|= BD_LFLAG(TXBD_LAST
| TXBD_INTERRUPT
);
1367 bufaddr
= dma_map_page(&priv
->ofdev
->dev
,
1368 skb_shinfo(skb
)->frags
[i
].page
,
1369 skb_shinfo(skb
)->frags
[i
].page_offset
,
1373 /* set the TxBD length and buffer pointer */
1374 txbdp
->bufPtr
= bufaddr
;
1375 txbdp
->lstatus
= lstatus
;
1378 lstatus
= txbdp_start
->lstatus
;
1381 /* Set up checksumming */
1382 if (CHECKSUM_PARTIAL
== skb
->ip_summed
) {
1383 fcb
= gfar_add_fcb(skb
);
1384 lstatus
|= BD_LFLAG(TXBD_TOE
);
1385 gfar_tx_checksum(skb
, fcb
);
1388 if (priv
->vlgrp
&& vlan_tx_tag_present(skb
)) {
1389 if (unlikely(NULL
== fcb
)) {
1390 fcb
= gfar_add_fcb(skb
);
1391 lstatus
|= BD_LFLAG(TXBD_TOE
);
1394 gfar_tx_vlan(skb
, fcb
);
1397 /* setup the TxBD length and buffer pointer for the first BD */
1398 priv
->tx_skbuff
[priv
->skb_curtx
] = skb
;
1399 txbdp_start
->bufPtr
= dma_map_single(&priv
->ofdev
->dev
, skb
->data
,
1400 skb_headlen(skb
), DMA_TO_DEVICE
);
1402 lstatus
|= BD_LFLAG(TXBD_CRC
| TXBD_READY
) | skb_headlen(skb
);
1405 * The powerpc-specific eieio() is used, as wmb() has too strong
1406 * semantics (it requires synchronization between cacheable and
1407 * uncacheable mappings, which eieio doesn't provide and which we
1408 * don't need), thus requiring a more expensive sync instruction. At
1409 * some point, the set of architecture-independent barrier functions
1410 * should be expanded to include weaker barriers.
1414 txbdp_start
->lstatus
= lstatus
;
1416 /* Update the current skb pointer to the next entry we will use
1417 * (wrapping if necessary) */
1418 priv
->skb_curtx
= (priv
->skb_curtx
+ 1) &
1419 TX_RING_MOD_MASK(priv
->tx_ring_size
);
1421 priv
->cur_tx
= next_txbd(txbdp
, base
, priv
->tx_ring_size
);
1423 /* reduce TxBD free count */
1424 priv
->num_txbdfree
-= (nr_frags
+ 1);
1426 dev
->trans_start
= jiffies
;
1428 /* If the next BD still needs to be cleaned up, then the bds
1429 are full. We need to tell the kernel to stop sending us stuff. */
1430 if (!priv
->num_txbdfree
) {
1431 netif_stop_queue(dev
);
1433 dev
->stats
.tx_fifo_errors
++;
1436 /* Tell the DMA to go go go */
1437 gfar_write(&priv
->regs
->tstat
, TSTAT_CLEAR_THALT
);
1440 spin_unlock_irqrestore(&priv
->txlock
, flags
);
1442 return NETDEV_TX_OK
;
1445 /* Stops the kernel queue, and halts the controller */
1446 static int gfar_close(struct net_device
*dev
)
1448 struct gfar_private
*priv
= netdev_priv(dev
);
1450 napi_disable(&priv
->napi
);
1452 skb_queue_purge(&priv
->rx_recycle
);
1453 cancel_work_sync(&priv
->reset_task
);
1456 /* Disconnect from the PHY */
1457 phy_disconnect(priv
->phydev
);
1458 priv
->phydev
= NULL
;
1460 netif_stop_queue(dev
);
1465 /* Changes the mac address if the controller is not running. */
1466 static int gfar_set_mac_address(struct net_device
*dev
)
1468 gfar_set_mac_for_addr(dev
, 0, dev
->dev_addr
);
1474 /* Enables and disables VLAN insertion/extraction */
1475 static void gfar_vlan_rx_register(struct net_device
*dev
,
1476 struct vlan_group
*grp
)
1478 struct gfar_private
*priv
= netdev_priv(dev
);
1479 unsigned long flags
;
1482 spin_lock_irqsave(&priv
->rxlock
, flags
);
1487 /* Enable VLAN tag insertion */
1488 tempval
= gfar_read(&priv
->regs
->tctrl
);
1489 tempval
|= TCTRL_VLINS
;
1491 gfar_write(&priv
->regs
->tctrl
, tempval
);
1493 /* Enable VLAN tag extraction */
1494 tempval
= gfar_read(&priv
->regs
->rctrl
);
1495 tempval
|= RCTRL_VLEX
;
1496 tempval
|= (RCTRL_VLEX
| RCTRL_PRSDEP_INIT
);
1497 gfar_write(&priv
->regs
->rctrl
, tempval
);
1499 /* Disable VLAN tag insertion */
1500 tempval
= gfar_read(&priv
->regs
->tctrl
);
1501 tempval
&= ~TCTRL_VLINS
;
1502 gfar_write(&priv
->regs
->tctrl
, tempval
);
1504 /* Disable VLAN tag extraction */
1505 tempval
= gfar_read(&priv
->regs
->rctrl
);
1506 tempval
&= ~RCTRL_VLEX
;
1507 /* If parse is no longer required, then disable parser */
1508 if (tempval
& RCTRL_REQ_PARSER
)
1509 tempval
|= RCTRL_PRSDEP_INIT
;
1511 tempval
&= ~RCTRL_PRSDEP_INIT
;
1512 gfar_write(&priv
->regs
->rctrl
, tempval
);
1515 gfar_change_mtu(dev
, dev
->mtu
);
1517 spin_unlock_irqrestore(&priv
->rxlock
, flags
);
1520 static int gfar_change_mtu(struct net_device
*dev
, int new_mtu
)
1522 int tempsize
, tempval
;
1523 struct gfar_private
*priv
= netdev_priv(dev
);
1524 int oldsize
= priv
->rx_buffer_size
;
1525 int frame_size
= new_mtu
+ ETH_HLEN
;
1528 frame_size
+= VLAN_HLEN
;
1530 if ((frame_size
< 64) || (frame_size
> JUMBO_FRAME_SIZE
)) {
1531 if (netif_msg_drv(priv
))
1532 printk(KERN_ERR
"%s: Invalid MTU setting\n",
1537 if (gfar_uses_fcb(priv
))
1538 frame_size
+= GMAC_FCB_LEN
;
1540 frame_size
+= priv
->padding
;
1543 (frame_size
& ~(INCREMENTAL_BUFFER_SIZE
- 1)) +
1544 INCREMENTAL_BUFFER_SIZE
;
1546 /* Only stop and start the controller if it isn't already
1547 * stopped, and we changed something */
1548 if ((oldsize
!= tempsize
) && (dev
->flags
& IFF_UP
))
1551 priv
->rx_buffer_size
= tempsize
;
1555 gfar_write(&priv
->regs
->mrblr
, priv
->rx_buffer_size
);
1556 gfar_write(&priv
->regs
->maxfrm
, priv
->rx_buffer_size
);
1558 /* If the mtu is larger than the max size for standard
1559 * ethernet frames (ie, a jumbo frame), then set maccfg2
1560 * to allow huge frames, and to check the length */
1561 tempval
= gfar_read(&priv
->regs
->maccfg2
);
1563 if (priv
->rx_buffer_size
> DEFAULT_RX_BUFFER_SIZE
)
1564 tempval
|= (MACCFG2_HUGEFRAME
| MACCFG2_LENGTHCHECK
);
1566 tempval
&= ~(MACCFG2_HUGEFRAME
| MACCFG2_LENGTHCHECK
);
1568 gfar_write(&priv
->regs
->maccfg2
, tempval
);
1570 if ((oldsize
!= tempsize
) && (dev
->flags
& IFF_UP
))
1576 /* gfar_reset_task gets scheduled when a packet has not been
1577 * transmitted after a set amount of time.
1578 * For now, assume that clearing out all the structures, and
1579 * starting over will fix the problem.
1581 static void gfar_reset_task(struct work_struct
*work
)
1583 struct gfar_private
*priv
= container_of(work
, struct gfar_private
,
1585 struct net_device
*dev
= priv
->ndev
;
1587 if (dev
->flags
& IFF_UP
) {
1588 netif_stop_queue(dev
);
1591 netif_start_queue(dev
);
1594 netif_tx_schedule_all(dev
);
1597 static void gfar_timeout(struct net_device
*dev
)
1599 struct gfar_private
*priv
= netdev_priv(dev
);
1601 dev
->stats
.tx_errors
++;
1602 schedule_work(&priv
->reset_task
);
1605 /* Interrupt Handler for Transmit complete */
1606 static int gfar_clean_tx_ring(struct net_device
*dev
)
1608 struct gfar_private
*priv
= netdev_priv(dev
);
1610 struct txbd8
*lbdp
= NULL
;
1611 struct txbd8
*base
= priv
->tx_bd_base
;
1612 struct sk_buff
*skb
;
1614 int tx_ring_size
= priv
->tx_ring_size
;
1620 bdp
= priv
->dirty_tx
;
1621 skb_dirtytx
= priv
->skb_dirtytx
;
1623 while ((skb
= priv
->tx_skbuff
[skb_dirtytx
])) {
1624 frags
= skb_shinfo(skb
)->nr_frags
;
1625 lbdp
= skip_txbd(bdp
, frags
, base
, tx_ring_size
);
1627 lstatus
= lbdp
->lstatus
;
1629 /* Only clean completed frames */
1630 if ((lstatus
& BD_LFLAG(TXBD_READY
)) &&
1631 (lstatus
& BD_LENGTH_MASK
))
1634 dma_unmap_single(&priv
->ofdev
->dev
,
1639 bdp
->lstatus
&= BD_LFLAG(TXBD_WRAP
);
1640 bdp
= next_txbd(bdp
, base
, tx_ring_size
);
1642 for (i
= 0; i
< frags
; i
++) {
1643 dma_unmap_page(&priv
->ofdev
->dev
,
1647 bdp
->lstatus
&= BD_LFLAG(TXBD_WRAP
);
1648 bdp
= next_txbd(bdp
, base
, tx_ring_size
);
1652 * If there's room in the queue (limit it to rx_buffer_size)
1653 * we add this skb back into the pool, if it's the right size
1655 if (skb_queue_len(&priv
->rx_recycle
) < priv
->rx_ring_size
&&
1656 skb_recycle_check(skb
, priv
->rx_buffer_size
+
1658 __skb_queue_head(&priv
->rx_recycle
, skb
);
1660 dev_kfree_skb_any(skb
);
1662 priv
->tx_skbuff
[skb_dirtytx
] = NULL
;
1664 skb_dirtytx
= (skb_dirtytx
+ 1) &
1665 TX_RING_MOD_MASK(tx_ring_size
);
1668 priv
->num_txbdfree
+= frags
+ 1;
1671 /* If we freed a buffer, we can restart transmission, if necessary */
1672 if (netif_queue_stopped(dev
) && priv
->num_txbdfree
)
1673 netif_wake_queue(dev
);
1675 /* Update dirty indicators */
1676 priv
->skb_dirtytx
= skb_dirtytx
;
1677 priv
->dirty_tx
= bdp
;
1679 dev
->stats
.tx_packets
+= howmany
;
1684 static void gfar_schedule_cleanup(struct net_device
*dev
)
1686 struct gfar_private
*priv
= netdev_priv(dev
);
1687 unsigned long flags
;
1689 spin_lock_irqsave(&priv
->txlock
, flags
);
1690 spin_lock(&priv
->rxlock
);
1692 if (napi_schedule_prep(&priv
->napi
)) {
1693 gfar_write(&priv
->regs
->imask
, IMASK_RTX_DISABLED
);
1694 __napi_schedule(&priv
->napi
);
1697 * Clear IEVENT, so interrupts aren't called again
1698 * because of the packets that have already arrived.
1700 gfar_write(&priv
->regs
->ievent
, IEVENT_RTX_MASK
);
1703 spin_unlock(&priv
->rxlock
);
1704 spin_unlock_irqrestore(&priv
->txlock
, flags
);
1707 /* Interrupt Handler for Transmit complete */
1708 static irqreturn_t
gfar_transmit(int irq
, void *dev_id
)
1710 gfar_schedule_cleanup((struct net_device
*)dev_id
);
1714 static void gfar_new_rxbdp(struct net_device
*dev
, struct rxbd8
*bdp
,
1715 struct sk_buff
*skb
)
1717 struct gfar_private
*priv
= netdev_priv(dev
);
1720 bdp
->bufPtr
= dma_map_single(&priv
->ofdev
->dev
, skb
->data
,
1721 priv
->rx_buffer_size
, DMA_FROM_DEVICE
);
1723 lstatus
= BD_LFLAG(RXBD_EMPTY
| RXBD_INTERRUPT
);
1725 if (bdp
== priv
->rx_bd_base
+ priv
->rx_ring_size
- 1)
1726 lstatus
|= BD_LFLAG(RXBD_WRAP
);
1730 bdp
->lstatus
= lstatus
;
1734 struct sk_buff
* gfar_new_skb(struct net_device
*dev
)
1736 unsigned int alignamount
;
1737 struct gfar_private
*priv
= netdev_priv(dev
);
1738 struct sk_buff
*skb
= NULL
;
1740 skb
= __skb_dequeue(&priv
->rx_recycle
);
1742 skb
= netdev_alloc_skb(dev
,
1743 priv
->rx_buffer_size
+ RXBUF_ALIGNMENT
);
1748 alignamount
= RXBUF_ALIGNMENT
-
1749 (((unsigned long) skb
->data
) & (RXBUF_ALIGNMENT
- 1));
1751 /* We need the data buffer to be aligned properly. We will reserve
1752 * as many bytes as needed to align the data properly
1754 skb_reserve(skb
, alignamount
);
1759 static inline void count_errors(unsigned short status
, struct net_device
*dev
)
1761 struct gfar_private
*priv
= netdev_priv(dev
);
1762 struct net_device_stats
*stats
= &dev
->stats
;
1763 struct gfar_extra_stats
*estats
= &priv
->extra_stats
;
1765 /* If the packet was truncated, none of the other errors
1767 if (status
& RXBD_TRUNCATED
) {
1768 stats
->rx_length_errors
++;
1774 /* Count the errors, if there were any */
1775 if (status
& (RXBD_LARGE
| RXBD_SHORT
)) {
1776 stats
->rx_length_errors
++;
1778 if (status
& RXBD_LARGE
)
1783 if (status
& RXBD_NONOCTET
) {
1784 stats
->rx_frame_errors
++;
1785 estats
->rx_nonoctet
++;
1787 if (status
& RXBD_CRCERR
) {
1788 estats
->rx_crcerr
++;
1789 stats
->rx_crc_errors
++;
1791 if (status
& RXBD_OVERRUN
) {
1792 estats
->rx_overrun
++;
1793 stats
->rx_crc_errors
++;
1797 irqreturn_t
gfar_receive(int irq
, void *dev_id
)
1799 gfar_schedule_cleanup((struct net_device
*)dev_id
);
1803 static inline void gfar_rx_checksum(struct sk_buff
*skb
, struct rxfcb
*fcb
)
1805 /* If valid headers were found, and valid sums
1806 * were verified, then we tell the kernel that no
1807 * checksumming is necessary. Otherwise, it is */
1808 if ((fcb
->flags
& RXFCB_CSUM_MASK
) == (RXFCB_CIP
| RXFCB_CTU
))
1809 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1811 skb
->ip_summed
= CHECKSUM_NONE
;
1815 /* gfar_process_frame() -- handle one incoming packet if skb
1817 static int gfar_process_frame(struct net_device
*dev
, struct sk_buff
*skb
,
1820 struct gfar_private
*priv
= netdev_priv(dev
);
1821 struct rxfcb
*fcb
= NULL
;
1825 /* fcb is at the beginning if exists */
1826 fcb
= (struct rxfcb
*)skb
->data
;
1828 /* Remove the FCB from the skb */
1829 /* Remove the padded bytes, if there are any */
1831 skb_pull(skb
, amount_pull
);
1833 if (priv
->rx_csum_enable
)
1834 gfar_rx_checksum(skb
, fcb
);
1836 /* Tell the skb what kind of packet this is */
1837 skb
->protocol
= eth_type_trans(skb
, dev
);
1839 /* Send the packet up the stack */
1840 if (unlikely(priv
->vlgrp
&& (fcb
->flags
& RXFCB_VLN
)))
1841 ret
= vlan_hwaccel_receive_skb(skb
, priv
->vlgrp
, fcb
->vlctl
);
1843 ret
= netif_receive_skb(skb
);
1845 if (NET_RX_DROP
== ret
)
1846 priv
->extra_stats
.kernel_dropped
++;
1851 /* gfar_clean_rx_ring() -- Processes each frame in the rx ring
1852 * until the budget/quota has been reached. Returns the number
1855 int gfar_clean_rx_ring(struct net_device
*dev
, int rx_work_limit
)
1857 struct rxbd8
*bdp
, *base
;
1858 struct sk_buff
*skb
;
1862 struct gfar_private
*priv
= netdev_priv(dev
);
1864 /* Get the first full descriptor */
1866 base
= priv
->rx_bd_base
;
1868 amount_pull
= (gfar_uses_fcb(priv
) ? GMAC_FCB_LEN
: 0) +
1871 while (!((bdp
->status
& RXBD_EMPTY
) || (--rx_work_limit
< 0))) {
1872 struct sk_buff
*newskb
;
1875 /* Add another skb for the future */
1876 newskb
= gfar_new_skb(dev
);
1878 skb
= priv
->rx_skbuff
[priv
->skb_currx
];
1880 dma_unmap_single(&priv
->ofdev
->dev
, bdp
->bufPtr
,
1881 priv
->rx_buffer_size
, DMA_FROM_DEVICE
);
1883 /* We drop the frame if we failed to allocate a new buffer */
1884 if (unlikely(!newskb
|| !(bdp
->status
& RXBD_LAST
) ||
1885 bdp
->status
& RXBD_ERR
)) {
1886 count_errors(bdp
->status
, dev
);
1888 if (unlikely(!newskb
))
1892 * We need to reset ->data to what it
1893 * was before gfar_new_skb() re-aligned
1894 * it to an RXBUF_ALIGNMENT boundary
1895 * before we put the skb back on the
1898 skb
->data
= skb
->head
+ NET_SKB_PAD
;
1899 __skb_queue_head(&priv
->rx_recycle
, skb
);
1902 /* Increment the number of packets */
1903 dev
->stats
.rx_packets
++;
1907 pkt_len
= bdp
->length
- ETH_FCS_LEN
;
1908 /* Remove the FCS from the packet length */
1909 skb_put(skb
, pkt_len
);
1910 dev
->stats
.rx_bytes
+= pkt_len
;
1912 if (in_irq() || irqs_disabled())
1913 printk("Interrupt problem!\n");
1914 gfar_process_frame(dev
, skb
, amount_pull
);
1917 if (netif_msg_rx_err(priv
))
1919 "%s: Missing skb!\n", dev
->name
);
1920 dev
->stats
.rx_dropped
++;
1921 priv
->extra_stats
.rx_skbmissing
++;
1926 priv
->rx_skbuff
[priv
->skb_currx
] = newskb
;
1928 /* Setup the new bdp */
1929 gfar_new_rxbdp(dev
, bdp
, newskb
);
1931 /* Update to the next pointer */
1932 bdp
= next_bd(bdp
, base
, priv
->rx_ring_size
);
1934 /* update to point at the next skb */
1936 (priv
->skb_currx
+ 1) &
1937 RX_RING_MOD_MASK(priv
->rx_ring_size
);
1940 /* Update the current rxbd pointer to be the next one */
1946 static int gfar_poll(struct napi_struct
*napi
, int budget
)
1948 struct gfar_private
*priv
= container_of(napi
, struct gfar_private
, napi
);
1949 struct net_device
*dev
= priv
->ndev
;
1952 unsigned long flags
;
1954 /* Clear IEVENT, so interrupts aren't called again
1955 * because of the packets that have already arrived */
1956 gfar_write(&priv
->regs
->ievent
, IEVENT_RTX_MASK
);
1958 /* If we fail to get the lock, don't bother with the TX BDs */
1959 if (spin_trylock_irqsave(&priv
->txlock
, flags
)) {
1960 tx_cleaned
= gfar_clean_tx_ring(dev
);
1961 spin_unlock_irqrestore(&priv
->txlock
, flags
);
1964 rx_cleaned
= gfar_clean_rx_ring(dev
, budget
);
1969 if (rx_cleaned
< budget
) {
1970 napi_complete(napi
);
1972 /* Clear the halt bit in RSTAT */
1973 gfar_write(&priv
->regs
->rstat
, RSTAT_CLEAR_RHALT
);
1975 gfar_write(&priv
->regs
->imask
, IMASK_DEFAULT
);
1977 /* If we are coalescing interrupts, update the timer */
1978 /* Otherwise, clear it */
1979 if (likely(priv
->rxcoalescing
)) {
1980 gfar_write(&priv
->regs
->rxic
, 0);
1981 gfar_write(&priv
->regs
->rxic
, priv
->rxic
);
1983 if (likely(priv
->txcoalescing
)) {
1984 gfar_write(&priv
->regs
->txic
, 0);
1985 gfar_write(&priv
->regs
->txic
, priv
->txic
);
1992 #ifdef CONFIG_NET_POLL_CONTROLLER
1994 * Polling 'interrupt' - used by things like netconsole to send skbs
1995 * without having to re-enable interrupts. It's not called while
1996 * the interrupt routine is executing.
1998 static void gfar_netpoll(struct net_device
*dev
)
2000 struct gfar_private
*priv
= netdev_priv(dev
);
2002 /* If the device has multiple interrupts, run tx/rx */
2003 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_MULTI_INTR
) {
2004 disable_irq(priv
->interruptTransmit
);
2005 disable_irq(priv
->interruptReceive
);
2006 disable_irq(priv
->interruptError
);
2007 gfar_interrupt(priv
->interruptTransmit
, dev
);
2008 enable_irq(priv
->interruptError
);
2009 enable_irq(priv
->interruptReceive
);
2010 enable_irq(priv
->interruptTransmit
);
2012 disable_irq(priv
->interruptTransmit
);
2013 gfar_interrupt(priv
->interruptTransmit
, dev
);
2014 enable_irq(priv
->interruptTransmit
);
2019 /* The interrupt handler for devices with one interrupt */
2020 static irqreturn_t
gfar_interrupt(int irq
, void *dev_id
)
2022 struct net_device
*dev
= dev_id
;
2023 struct gfar_private
*priv
= netdev_priv(dev
);
2025 /* Save ievent for future reference */
2026 u32 events
= gfar_read(&priv
->regs
->ievent
);
2028 /* Check for reception */
2029 if (events
& IEVENT_RX_MASK
)
2030 gfar_receive(irq
, dev_id
);
2032 /* Check for transmit completion */
2033 if (events
& IEVENT_TX_MASK
)
2034 gfar_transmit(irq
, dev_id
);
2036 /* Check for errors */
2037 if (events
& IEVENT_ERR_MASK
)
2038 gfar_error(irq
, dev_id
);
2043 /* Called every time the controller might need to be made
2044 * aware of new link state. The PHY code conveys this
2045 * information through variables in the phydev structure, and this
2046 * function converts those variables into the appropriate
2047 * register values, and can bring down the device if needed.
2049 static void adjust_link(struct net_device
*dev
)
2051 struct gfar_private
*priv
= netdev_priv(dev
);
2052 struct gfar __iomem
*regs
= priv
->regs
;
2053 unsigned long flags
;
2054 struct phy_device
*phydev
= priv
->phydev
;
2057 spin_lock_irqsave(&priv
->txlock
, flags
);
2059 u32 tempval
= gfar_read(®s
->maccfg2
);
2060 u32 ecntrl
= gfar_read(®s
->ecntrl
);
2062 /* Now we make sure that we can be in full duplex mode.
2063 * If not, we operate in half-duplex mode. */
2064 if (phydev
->duplex
!= priv
->oldduplex
) {
2066 if (!(phydev
->duplex
))
2067 tempval
&= ~(MACCFG2_FULL_DUPLEX
);
2069 tempval
|= MACCFG2_FULL_DUPLEX
;
2071 priv
->oldduplex
= phydev
->duplex
;
2074 if (phydev
->speed
!= priv
->oldspeed
) {
2076 switch (phydev
->speed
) {
2079 ((tempval
& ~(MACCFG2_IF
)) | MACCFG2_GMII
);
2081 ecntrl
&= ~(ECNTRL_R100
);
2086 ((tempval
& ~(MACCFG2_IF
)) | MACCFG2_MII
);
2088 /* Reduced mode distinguishes
2089 * between 10 and 100 */
2090 if (phydev
->speed
== SPEED_100
)
2091 ecntrl
|= ECNTRL_R100
;
2093 ecntrl
&= ~(ECNTRL_R100
);
2096 if (netif_msg_link(priv
))
2098 "%s: Ack! Speed (%d) is not 10/100/1000!\n",
2099 dev
->name
, phydev
->speed
);
2103 priv
->oldspeed
= phydev
->speed
;
2106 gfar_write(®s
->maccfg2
, tempval
);
2107 gfar_write(®s
->ecntrl
, ecntrl
);
2109 if (!priv
->oldlink
) {
2113 } else if (priv
->oldlink
) {
2117 priv
->oldduplex
= -1;
2120 if (new_state
&& netif_msg_link(priv
))
2121 phy_print_status(phydev
);
2123 spin_unlock_irqrestore(&priv
->txlock
, flags
);
2126 /* Update the hash table based on the current list of multicast
2127 * addresses we subscribe to. Also, change the promiscuity of
2128 * the device based on the flags (this function is called
2129 * whenever dev->flags is changed */
2130 static void gfar_set_multi(struct net_device
*dev
)
2132 struct dev_mc_list
*mc_ptr
;
2133 struct gfar_private
*priv
= netdev_priv(dev
);
2134 struct gfar __iomem
*regs
= priv
->regs
;
2137 if(dev
->flags
& IFF_PROMISC
) {
2138 /* Set RCTRL to PROM */
2139 tempval
= gfar_read(®s
->rctrl
);
2140 tempval
|= RCTRL_PROM
;
2141 gfar_write(®s
->rctrl
, tempval
);
2143 /* Set RCTRL to not PROM */
2144 tempval
= gfar_read(®s
->rctrl
);
2145 tempval
&= ~(RCTRL_PROM
);
2146 gfar_write(®s
->rctrl
, tempval
);
2149 if(dev
->flags
& IFF_ALLMULTI
) {
2150 /* Set the hash to rx all multicast frames */
2151 gfar_write(®s
->igaddr0
, 0xffffffff);
2152 gfar_write(®s
->igaddr1
, 0xffffffff);
2153 gfar_write(®s
->igaddr2
, 0xffffffff);
2154 gfar_write(®s
->igaddr3
, 0xffffffff);
2155 gfar_write(®s
->igaddr4
, 0xffffffff);
2156 gfar_write(®s
->igaddr5
, 0xffffffff);
2157 gfar_write(®s
->igaddr6
, 0xffffffff);
2158 gfar_write(®s
->igaddr7
, 0xffffffff);
2159 gfar_write(®s
->gaddr0
, 0xffffffff);
2160 gfar_write(®s
->gaddr1
, 0xffffffff);
2161 gfar_write(®s
->gaddr2
, 0xffffffff);
2162 gfar_write(®s
->gaddr3
, 0xffffffff);
2163 gfar_write(®s
->gaddr4
, 0xffffffff);
2164 gfar_write(®s
->gaddr5
, 0xffffffff);
2165 gfar_write(®s
->gaddr6
, 0xffffffff);
2166 gfar_write(®s
->gaddr7
, 0xffffffff);
2171 /* zero out the hash */
2172 gfar_write(®s
->igaddr0
, 0x0);
2173 gfar_write(®s
->igaddr1
, 0x0);
2174 gfar_write(®s
->igaddr2
, 0x0);
2175 gfar_write(®s
->igaddr3
, 0x0);
2176 gfar_write(®s
->igaddr4
, 0x0);
2177 gfar_write(®s
->igaddr5
, 0x0);
2178 gfar_write(®s
->igaddr6
, 0x0);
2179 gfar_write(®s
->igaddr7
, 0x0);
2180 gfar_write(®s
->gaddr0
, 0x0);
2181 gfar_write(®s
->gaddr1
, 0x0);
2182 gfar_write(®s
->gaddr2
, 0x0);
2183 gfar_write(®s
->gaddr3
, 0x0);
2184 gfar_write(®s
->gaddr4
, 0x0);
2185 gfar_write(®s
->gaddr5
, 0x0);
2186 gfar_write(®s
->gaddr6
, 0x0);
2187 gfar_write(®s
->gaddr7
, 0x0);
2189 /* If we have extended hash tables, we need to
2190 * clear the exact match registers to prepare for
2192 if (priv
->extended_hash
) {
2193 em_num
= GFAR_EM_NUM
+ 1;
2194 gfar_clear_exact_match(dev
);
2201 if(dev
->mc_count
== 0)
2204 /* Parse the list, and set the appropriate bits */
2205 for(mc_ptr
= dev
->mc_list
; mc_ptr
; mc_ptr
= mc_ptr
->next
) {
2207 gfar_set_mac_for_addr(dev
, idx
,
2211 gfar_set_hash_for_addr(dev
, mc_ptr
->dmi_addr
);
2219 /* Clears each of the exact match registers to zero, so they
2220 * don't interfere with normal reception */
2221 static void gfar_clear_exact_match(struct net_device
*dev
)
2224 u8 zero_arr
[MAC_ADDR_LEN
] = {0,0,0,0,0,0};
2226 for(idx
= 1;idx
< GFAR_EM_NUM
+ 1;idx
++)
2227 gfar_set_mac_for_addr(dev
, idx
, (u8
*)zero_arr
);
2230 /* Set the appropriate hash bit for the given addr */
2231 /* The algorithm works like so:
2232 * 1) Take the Destination Address (ie the multicast address), and
2233 * do a CRC on it (little endian), and reverse the bits of the
2235 * 2) Use the 8 most significant bits as a hash into a 256-entry
2236 * table. The table is controlled through 8 32-bit registers:
2237 * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is
2238 * gaddr7. This means that the 3 most significant bits in the
2239 * hash index which gaddr register to use, and the 5 other bits
2240 * indicate which bit (assuming an IBM numbering scheme, which
2241 * for PowerPC (tm) is usually the case) in the register holds
2243 static void gfar_set_hash_for_addr(struct net_device
*dev
, u8
*addr
)
2246 struct gfar_private
*priv
= netdev_priv(dev
);
2247 u32 result
= ether_crc(MAC_ADDR_LEN
, addr
);
2248 int width
= priv
->hash_width
;
2249 u8 whichbit
= (result
>> (32 - width
)) & 0x1f;
2250 u8 whichreg
= result
>> (32 - width
+ 5);
2251 u32 value
= (1 << (31-whichbit
));
2253 tempval
= gfar_read(priv
->hash_regs
[whichreg
]);
2255 gfar_write(priv
->hash_regs
[whichreg
], tempval
);
2261 /* There are multiple MAC Address register pairs on some controllers
2262 * This function sets the numth pair to a given address
2264 static void gfar_set_mac_for_addr(struct net_device
*dev
, int num
, u8
*addr
)
2266 struct gfar_private
*priv
= netdev_priv(dev
);
2268 char tmpbuf
[MAC_ADDR_LEN
];
2270 u32 __iomem
*macptr
= &priv
->regs
->macstnaddr1
;
2274 /* Now copy it into the mac registers backwards, cuz */
2275 /* little endian is silly */
2276 for (idx
= 0; idx
< MAC_ADDR_LEN
; idx
++)
2277 tmpbuf
[MAC_ADDR_LEN
- 1 - idx
] = addr
[idx
];
2279 gfar_write(macptr
, *((u32
*) (tmpbuf
)));
2281 tempval
= *((u32
*) (tmpbuf
+ 4));
2283 gfar_write(macptr
+1, tempval
);
2286 /* GFAR error interrupt handler */
2287 static irqreturn_t
gfar_error(int irq
, void *dev_id
)
2289 struct net_device
*dev
= dev_id
;
2290 struct gfar_private
*priv
= netdev_priv(dev
);
2292 /* Save ievent for future reference */
2293 u32 events
= gfar_read(&priv
->regs
->ievent
);
2296 gfar_write(&priv
->regs
->ievent
, events
& IEVENT_ERR_MASK
);
2298 /* Magic Packet is not an error. */
2299 if ((priv
->device_flags
& FSL_GIANFAR_DEV_HAS_MAGIC_PACKET
) &&
2300 (events
& IEVENT_MAG
))
2301 events
&= ~IEVENT_MAG
;
2304 if (netif_msg_rx_err(priv
) || netif_msg_tx_err(priv
))
2305 printk(KERN_DEBUG
"%s: error interrupt (ievent=0x%08x imask=0x%08x)\n",
2306 dev
->name
, events
, gfar_read(&priv
->regs
->imask
));
2308 /* Update the error counters */
2309 if (events
& IEVENT_TXE
) {
2310 dev
->stats
.tx_errors
++;
2312 if (events
& IEVENT_LC
)
2313 dev
->stats
.tx_window_errors
++;
2314 if (events
& IEVENT_CRL
)
2315 dev
->stats
.tx_aborted_errors
++;
2316 if (events
& IEVENT_XFUN
) {
2317 if (netif_msg_tx_err(priv
))
2318 printk(KERN_DEBUG
"%s: TX FIFO underrun, "
2319 "packet dropped.\n", dev
->name
);
2320 dev
->stats
.tx_dropped
++;
2321 priv
->extra_stats
.tx_underrun
++;
2323 /* Reactivate the Tx Queues */
2324 gfar_write(&priv
->regs
->tstat
, TSTAT_CLEAR_THALT
);
2326 if (netif_msg_tx_err(priv
))
2327 printk(KERN_DEBUG
"%s: Transmit Error\n", dev
->name
);
2329 if (events
& IEVENT_BSY
) {
2330 dev
->stats
.rx_errors
++;
2331 priv
->extra_stats
.rx_bsy
++;
2333 gfar_receive(irq
, dev_id
);
2335 if (netif_msg_rx_err(priv
))
2336 printk(KERN_DEBUG
"%s: busy error (rstat: %x)\n",
2337 dev
->name
, gfar_read(&priv
->regs
->rstat
));
2339 if (events
& IEVENT_BABR
) {
2340 dev
->stats
.rx_errors
++;
2341 priv
->extra_stats
.rx_babr
++;
2343 if (netif_msg_rx_err(priv
))
2344 printk(KERN_DEBUG
"%s: babbling RX error\n", dev
->name
);
2346 if (events
& IEVENT_EBERR
) {
2347 priv
->extra_stats
.eberr
++;
2348 if (netif_msg_rx_err(priv
))
2349 printk(KERN_DEBUG
"%s: bus error\n", dev
->name
);
2351 if ((events
& IEVENT_RXC
) && netif_msg_rx_status(priv
))
2352 printk(KERN_DEBUG
"%s: control frame\n", dev
->name
);
2354 if (events
& IEVENT_BABT
) {
2355 priv
->extra_stats
.tx_babt
++;
2356 if (netif_msg_tx_err(priv
))
2357 printk(KERN_DEBUG
"%s: babbling TX error\n", dev
->name
);
2362 /* work with hotplug and coldplug */
2363 MODULE_ALIAS("platform:fsl-gianfar");
2365 static struct of_device_id gfar_match
[] =
2369 .compatible
= "gianfar",
2374 /* Structure for a device driver */
2375 static struct of_platform_driver gfar_driver
= {
2376 .name
= "fsl-gianfar",
2377 .match_table
= gfar_match
,
2379 .probe
= gfar_probe
,
2380 .remove
= gfar_remove
,
2381 .suspend
= gfar_suspend
,
2382 .resume
= gfar_resume
,
2385 static int __init
gfar_init(void)
2387 return of_register_platform_driver(&gfar_driver
);
2390 static void __exit
gfar_exit(void)
2392 of_unregister_platform_driver(&gfar_driver
);
2395 module_init(gfar_init
);
2396 module_exit(gfar_exit
);