2 * drivers/net/gianfar.c
4 * Gianfar Ethernet Driver
5 * This driver is designed for the non-CPM ethernet controllers
6 * on the 85xx and 83xx family of integrated processors
7 * Based on 8260_io/fcc_enet.c
10 * Maintainer: Kumar Gala
11 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
13 * Copyright 2002-2009 Freescale Semiconductor, Inc.
14 * Copyright 2007 MontaVista Software, Inc.
16 * This program is free software; you can redistribute it and/or modify it
17 * under the terms of the GNU General Public License as published by the
18 * Free Software Foundation; either version 2 of the License, or (at your
19 * option) any later version.
21 * Gianfar: AKA Lambda Draconis, "Dragon"
29 * The driver is initialized through of_device. Configuration information
30 * is therefore conveyed through an OF-style device tree.
32 * The Gianfar Ethernet Controller uses a ring of buffer
33 * descriptors. The beginning is indicated by a register
34 * pointing to the physical address of the start of the ring.
35 * The end is determined by a "wrap" bit being set in the
36 * last descriptor of the ring.
38 * When a packet is received, the RXF bit in the
39 * IEVENT register is set, triggering an interrupt when the
40 * corresponding bit in the IMASK register is also set (if
41 * interrupt coalescing is active, then the interrupt may not
42 * happen immediately, but will wait until either a set number
43 * of frames or amount of time have passed). In NAPI, the
44 * interrupt handler will signal there is work to be done, and
45 * exit. This method will start at the last known empty
46 * descriptor, and process every subsequent descriptor until there
47 * are none left with data (NAPI will stop after a set number of
48 * packets to give time to other tasks, but will eventually
49 * process all the packets). The data arrives inside a
50 * pre-allocated skb, and so after the skb is passed up to the
51 * stack, a new skb must be allocated, and the address field in
52 * the buffer descriptor must be updated to indicate this new
55 * When the kernel requests that a packet be transmitted, the
56 * driver starts where it left off last time, and points the
57 * descriptor at the buffer which was passed in. The driver
58 * then informs the DMA engine that there are packets ready to
59 * be transmitted. Once the controller is finished transmitting
60 * the packet, an interrupt may be triggered (under the same
61 * conditions as for reception, but depending on the TXF bit).
62 * The driver then cleans up the buffer.
65 #include <linux/kernel.h>
66 #include <linux/string.h>
67 #include <linux/errno.h>
68 #include <linux/unistd.h>
69 #include <linux/slab.h>
70 #include <linux/interrupt.h>
71 #include <linux/init.h>
72 #include <linux/delay.h>
73 #include <linux/netdevice.h>
74 #include <linux/etherdevice.h>
75 #include <linux/skbuff.h>
76 #include <linux/if_vlan.h>
77 #include <linux/spinlock.h>
79 #include <linux/of_mdio.h>
80 #include <linux/of_platform.h>
82 #include <linux/tcp.h>
83 #include <linux/udp.h>
85 #include <linux/net_tstamp.h>
89 #include <asm/uaccess.h>
90 #include <linux/module.h>
91 #include <linux/dma-mapping.h>
92 #include <linux/crc32.h>
93 #include <linux/mii.h>
94 #include <linux/phy.h>
95 #include <linux/phy_fixed.h>
99 #include "fsl_pq_mdio.h"
101 #define TX_TIMEOUT (1*HZ)
102 #undef BRIEF_GFAR_ERRORS
103 #undef VERBOSE_GFAR_ERRORS
105 const char gfar_driver_name
[] = "Gianfar Ethernet";
106 const char gfar_driver_version
[] = "1.3";
108 static int gfar_enet_open(struct net_device
*dev
);
109 static int gfar_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
);
110 static void gfar_reset_task(struct work_struct
*work
);
111 static void gfar_timeout(struct net_device
*dev
);
112 static int gfar_close(struct net_device
*dev
);
113 struct sk_buff
*gfar_new_skb(struct net_device
*dev
);
114 static void gfar_new_rxbdp(struct gfar_priv_rx_q
*rx_queue
, struct rxbd8
*bdp
,
115 struct sk_buff
*skb
);
116 static int gfar_set_mac_address(struct net_device
*dev
);
117 static int gfar_change_mtu(struct net_device
*dev
, int new_mtu
);
118 static irqreturn_t
gfar_error(int irq
, void *dev_id
);
119 static irqreturn_t
gfar_transmit(int irq
, void *dev_id
);
120 static irqreturn_t
gfar_interrupt(int irq
, void *dev_id
);
121 static void adjust_link(struct net_device
*dev
);
122 static void init_registers(struct net_device
*dev
);
123 static int init_phy(struct net_device
*dev
);
124 static int gfar_probe(struct of_device
*ofdev
,
125 const struct of_device_id
*match
);
126 static int gfar_remove(struct of_device
*ofdev
);
127 static void free_skb_resources(struct gfar_private
*priv
);
128 static void gfar_set_multi(struct net_device
*dev
);
129 static void gfar_set_hash_for_addr(struct net_device
*dev
, u8
*addr
);
130 static void gfar_configure_serdes(struct net_device
*dev
);
131 static int gfar_poll(struct napi_struct
*napi
, int budget
);
132 #ifdef CONFIG_NET_POLL_CONTROLLER
133 static void gfar_netpoll(struct net_device
*dev
);
135 int gfar_clean_rx_ring(struct gfar_priv_rx_q
*rx_queue
, int rx_work_limit
);
136 static int gfar_clean_tx_ring(struct gfar_priv_tx_q
*tx_queue
);
137 static int gfar_process_frame(struct net_device
*dev
, struct sk_buff
*skb
,
139 static void gfar_vlan_rx_register(struct net_device
*netdev
,
140 struct vlan_group
*grp
);
141 void gfar_halt(struct net_device
*dev
);
142 static void gfar_halt_nodisable(struct net_device
*dev
);
143 void gfar_start(struct net_device
*dev
);
144 static void gfar_clear_exact_match(struct net_device
*dev
);
145 static void gfar_set_mac_for_addr(struct net_device
*dev
, int num
, u8
*addr
);
146 static int gfar_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
);
148 MODULE_AUTHOR("Freescale Semiconductor, Inc");
149 MODULE_DESCRIPTION("Gianfar Ethernet Driver");
150 MODULE_LICENSE("GPL");
152 static void gfar_init_rxbdp(struct gfar_priv_rx_q
*rx_queue
, struct rxbd8
*bdp
,
159 lstatus
= BD_LFLAG(RXBD_EMPTY
| RXBD_INTERRUPT
);
160 if (bdp
== rx_queue
->rx_bd_base
+ rx_queue
->rx_ring_size
- 1)
161 lstatus
|= BD_LFLAG(RXBD_WRAP
);
165 bdp
->lstatus
= lstatus
;
168 static int gfar_init_bds(struct net_device
*ndev
)
170 struct gfar_private
*priv
= netdev_priv(ndev
);
171 struct gfar_priv_tx_q
*tx_queue
= NULL
;
172 struct gfar_priv_rx_q
*rx_queue
= NULL
;
177 for (i
= 0; i
< priv
->num_tx_queues
; i
++) {
178 tx_queue
= priv
->tx_queue
[i
];
179 /* Initialize some variables in our dev structure */
180 tx_queue
->num_txbdfree
= tx_queue
->tx_ring_size
;
181 tx_queue
->dirty_tx
= tx_queue
->tx_bd_base
;
182 tx_queue
->cur_tx
= tx_queue
->tx_bd_base
;
183 tx_queue
->skb_curtx
= 0;
184 tx_queue
->skb_dirtytx
= 0;
186 /* Initialize Transmit Descriptor Ring */
187 txbdp
= tx_queue
->tx_bd_base
;
188 for (j
= 0; j
< tx_queue
->tx_ring_size
; j
++) {
194 /* Set the last descriptor in the ring to indicate wrap */
196 txbdp
->status
|= TXBD_WRAP
;
199 for (i
= 0; i
< priv
->num_rx_queues
; i
++) {
200 rx_queue
= priv
->rx_queue
[i
];
201 rx_queue
->cur_rx
= rx_queue
->rx_bd_base
;
202 rx_queue
->skb_currx
= 0;
203 rxbdp
= rx_queue
->rx_bd_base
;
205 for (j
= 0; j
< rx_queue
->rx_ring_size
; j
++) {
206 struct sk_buff
*skb
= rx_queue
->rx_skbuff
[j
];
209 gfar_init_rxbdp(rx_queue
, rxbdp
,
212 skb
= gfar_new_skb(ndev
);
214 pr_err("%s: Can't allocate RX buffers\n",
216 goto err_rxalloc_fail
;
218 rx_queue
->rx_skbuff
[j
] = skb
;
220 gfar_new_rxbdp(rx_queue
, rxbdp
, skb
);
231 free_skb_resources(priv
);
235 static int gfar_alloc_skb_resources(struct net_device
*ndev
)
240 struct gfar_private
*priv
= netdev_priv(ndev
);
241 struct device
*dev
= &priv
->ofdev
->dev
;
242 struct gfar_priv_tx_q
*tx_queue
= NULL
;
243 struct gfar_priv_rx_q
*rx_queue
= NULL
;
245 priv
->total_tx_ring_size
= 0;
246 for (i
= 0; i
< priv
->num_tx_queues
; i
++)
247 priv
->total_tx_ring_size
+= priv
->tx_queue
[i
]->tx_ring_size
;
249 priv
->total_rx_ring_size
= 0;
250 for (i
= 0; i
< priv
->num_rx_queues
; i
++)
251 priv
->total_rx_ring_size
+= priv
->rx_queue
[i
]->rx_ring_size
;
253 /* Allocate memory for the buffer descriptors */
254 vaddr
= dma_alloc_coherent(dev
,
255 sizeof(struct txbd8
) * priv
->total_tx_ring_size
+
256 sizeof(struct rxbd8
) * priv
->total_rx_ring_size
,
259 if (netif_msg_ifup(priv
))
260 pr_err("%s: Could not allocate buffer descriptors!\n",
265 for (i
= 0; i
< priv
->num_tx_queues
; i
++) {
266 tx_queue
= priv
->tx_queue
[i
];
267 tx_queue
->tx_bd_base
= (struct txbd8
*) vaddr
;
268 tx_queue
->tx_bd_dma_base
= addr
;
269 tx_queue
->dev
= ndev
;
270 /* enet DMA only understands physical addresses */
271 addr
+= sizeof(struct txbd8
) *tx_queue
->tx_ring_size
;
272 vaddr
+= sizeof(struct txbd8
) *tx_queue
->tx_ring_size
;
275 /* Start the rx descriptor ring where the tx ring leaves off */
276 for (i
= 0; i
< priv
->num_rx_queues
; i
++) {
277 rx_queue
= priv
->rx_queue
[i
];
278 rx_queue
->rx_bd_base
= (struct rxbd8
*) vaddr
;
279 rx_queue
->rx_bd_dma_base
= addr
;
280 rx_queue
->dev
= ndev
;
281 addr
+= sizeof (struct rxbd8
) * rx_queue
->rx_ring_size
;
282 vaddr
+= sizeof (struct rxbd8
) * rx_queue
->rx_ring_size
;
285 /* Setup the skbuff rings */
286 for (i
= 0; i
< priv
->num_tx_queues
; i
++) {
287 tx_queue
= priv
->tx_queue
[i
];
288 tx_queue
->tx_skbuff
= kmalloc(sizeof(*tx_queue
->tx_skbuff
) *
289 tx_queue
->tx_ring_size
, GFP_KERNEL
);
290 if (!tx_queue
->tx_skbuff
) {
291 if (netif_msg_ifup(priv
))
292 pr_err("%s: Could not allocate tx_skbuff\n",
297 for (k
= 0; k
< tx_queue
->tx_ring_size
; k
++)
298 tx_queue
->tx_skbuff
[k
] = NULL
;
301 for (i
= 0; i
< priv
->num_rx_queues
; i
++) {
302 rx_queue
= priv
->rx_queue
[i
];
303 rx_queue
->rx_skbuff
= kmalloc(sizeof(*rx_queue
->rx_skbuff
) *
304 rx_queue
->rx_ring_size
, GFP_KERNEL
);
306 if (!rx_queue
->rx_skbuff
) {
307 if (netif_msg_ifup(priv
))
308 pr_err("%s: Could not allocate rx_skbuff\n",
313 for (j
= 0; j
< rx_queue
->rx_ring_size
; j
++)
314 rx_queue
->rx_skbuff
[j
] = NULL
;
317 if (gfar_init_bds(ndev
))
323 free_skb_resources(priv
);
327 static void gfar_init_tx_rx_base(struct gfar_private
*priv
)
329 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
333 baddr
= ®s
->tbase0
;
334 for(i
= 0; i
< priv
->num_tx_queues
; i
++) {
335 gfar_write(baddr
, priv
->tx_queue
[i
]->tx_bd_dma_base
);
339 baddr
= ®s
->rbase0
;
340 for(i
= 0; i
< priv
->num_rx_queues
; i
++) {
341 gfar_write(baddr
, priv
->rx_queue
[i
]->rx_bd_dma_base
);
346 static void gfar_init_mac(struct net_device
*ndev
)
348 struct gfar_private
*priv
= netdev_priv(ndev
);
349 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
354 /* write the tx/rx base registers */
355 gfar_init_tx_rx_base(priv
);
357 /* Configure the coalescing support */
358 gfar_configure_coalescing(priv
, 0xFF, 0xFF);
360 if (priv
->rx_filer_enable
) {
361 rctrl
|= RCTRL_FILREN
;
362 /* Program the RIR0 reg with the required distribution */
363 gfar_write(®s
->rir0
, DEFAULT_RIR0
);
366 if (priv
->rx_csum_enable
)
367 rctrl
|= RCTRL_CHECKSUMMING
;
369 if (priv
->extended_hash
) {
370 rctrl
|= RCTRL_EXTHASH
;
372 gfar_clear_exact_match(ndev
);
377 rctrl
&= ~RCTRL_PAL_MASK
;
378 rctrl
|= RCTRL_PADDING(priv
->padding
);
381 /* Insert receive time stamps into padding alignment bytes */
382 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_TIMER
) {
383 rctrl
&= ~RCTRL_PAL_MASK
;
384 rctrl
|= RCTRL_PADDING(8);
388 /* Enable HW time stamping if requested from user space */
389 if (priv
->hwts_rx_en
)
390 rctrl
|= RCTRL_PRSDEP_INIT
| RCTRL_TS_ENABLE
;
392 /* keep vlan related bits if it's enabled */
394 rctrl
|= RCTRL_VLEX
| RCTRL_PRSDEP_INIT
;
395 tctrl
|= TCTRL_VLINS
;
398 /* Init rctrl based on our settings */
399 gfar_write(®s
->rctrl
, rctrl
);
401 if (ndev
->features
& NETIF_F_IP_CSUM
)
402 tctrl
|= TCTRL_INIT_CSUM
;
404 tctrl
|= TCTRL_TXSCHED_PRIO
;
406 gfar_write(®s
->tctrl
, tctrl
);
408 /* Set the extraction length and index */
409 attrs
= ATTRELI_EL(priv
->rx_stash_size
) |
410 ATTRELI_EI(priv
->rx_stash_index
);
412 gfar_write(®s
->attreli
, attrs
);
414 /* Start with defaults, and add stashing or locking
415 * depending on the approprate variables */
416 attrs
= ATTR_INIT_SETTINGS
;
418 if (priv
->bd_stash_en
)
419 attrs
|= ATTR_BDSTASH
;
421 if (priv
->rx_stash_size
!= 0)
422 attrs
|= ATTR_BUFSTASH
;
424 gfar_write(®s
->attr
, attrs
);
426 gfar_write(®s
->fifo_tx_thr
, priv
->fifo_threshold
);
427 gfar_write(®s
->fifo_tx_starve
, priv
->fifo_starve
);
428 gfar_write(®s
->fifo_tx_starve_shutoff
, priv
->fifo_starve_off
);
431 static struct net_device_stats
*gfar_get_stats(struct net_device
*dev
)
433 struct gfar_private
*priv
= netdev_priv(dev
);
434 struct netdev_queue
*txq
;
435 unsigned long rx_packets
= 0, rx_bytes
= 0, rx_dropped
= 0;
436 unsigned long tx_packets
= 0, tx_bytes
= 0;
439 for (i
= 0; i
< priv
->num_rx_queues
; i
++) {
440 rx_packets
+= priv
->rx_queue
[i
]->stats
.rx_packets
;
441 rx_bytes
+= priv
->rx_queue
[i
]->stats
.rx_bytes
;
442 rx_dropped
+= priv
->rx_queue
[i
]->stats
.rx_dropped
;
445 dev
->stats
.rx_packets
= rx_packets
;
446 dev
->stats
.rx_bytes
= rx_bytes
;
447 dev
->stats
.rx_dropped
= rx_dropped
;
449 for (i
= 0; i
< priv
->num_tx_queues
; i
++) {
450 txq
= netdev_get_tx_queue(dev
, i
);
451 tx_bytes
+= txq
->tx_bytes
;
452 tx_packets
+= txq
->tx_packets
;
455 dev
->stats
.tx_bytes
= tx_bytes
;
456 dev
->stats
.tx_packets
= tx_packets
;
461 static const struct net_device_ops gfar_netdev_ops
= {
462 .ndo_open
= gfar_enet_open
,
463 .ndo_start_xmit
= gfar_start_xmit
,
464 .ndo_stop
= gfar_close
,
465 .ndo_change_mtu
= gfar_change_mtu
,
466 .ndo_set_multicast_list
= gfar_set_multi
,
467 .ndo_tx_timeout
= gfar_timeout
,
468 .ndo_do_ioctl
= gfar_ioctl
,
469 .ndo_get_stats
= gfar_get_stats
,
470 .ndo_vlan_rx_register
= gfar_vlan_rx_register
,
471 .ndo_set_mac_address
= eth_mac_addr
,
472 .ndo_validate_addr
= eth_validate_addr
,
473 #ifdef CONFIG_NET_POLL_CONTROLLER
474 .ndo_poll_controller
= gfar_netpoll
,
478 unsigned int ftp_rqfpr
[MAX_FILER_IDX
+ 1];
479 unsigned int ftp_rqfcr
[MAX_FILER_IDX
+ 1];
481 void lock_rx_qs(struct gfar_private
*priv
)
485 for (i
= 0; i
< priv
->num_rx_queues
; i
++)
486 spin_lock(&priv
->rx_queue
[i
]->rxlock
);
489 void lock_tx_qs(struct gfar_private
*priv
)
493 for (i
= 0; i
< priv
->num_tx_queues
; i
++)
494 spin_lock(&priv
->tx_queue
[i
]->txlock
);
497 void unlock_rx_qs(struct gfar_private
*priv
)
501 for (i
= 0; i
< priv
->num_rx_queues
; i
++)
502 spin_unlock(&priv
->rx_queue
[i
]->rxlock
);
505 void unlock_tx_qs(struct gfar_private
*priv
)
509 for (i
= 0; i
< priv
->num_tx_queues
; i
++)
510 spin_unlock(&priv
->tx_queue
[i
]->txlock
);
513 /* Returns 1 if incoming frames use an FCB */
514 static inline int gfar_uses_fcb(struct gfar_private
*priv
)
516 return priv
->vlgrp
|| priv
->rx_csum_enable
||
517 (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_TIMER
);
520 static void free_tx_pointers(struct gfar_private
*priv
)
524 for (i
= 0; i
< priv
->num_tx_queues
; i
++)
525 kfree(priv
->tx_queue
[i
]);
528 static void free_rx_pointers(struct gfar_private
*priv
)
532 for (i
= 0; i
< priv
->num_rx_queues
; i
++)
533 kfree(priv
->rx_queue
[i
]);
536 static void unmap_group_regs(struct gfar_private
*priv
)
540 for (i
= 0; i
< MAXGROUPS
; i
++)
541 if (priv
->gfargrp
[i
].regs
)
542 iounmap(priv
->gfargrp
[i
].regs
);
545 static void disable_napi(struct gfar_private
*priv
)
549 for (i
= 0; i
< priv
->num_grps
; i
++)
550 napi_disable(&priv
->gfargrp
[i
].napi
);
553 static void enable_napi(struct gfar_private
*priv
)
557 for (i
= 0; i
< priv
->num_grps
; i
++)
558 napi_enable(&priv
->gfargrp
[i
].napi
);
561 static int gfar_parse_group(struct device_node
*np
,
562 struct gfar_private
*priv
, const char *model
)
566 priv
->gfargrp
[priv
->num_grps
].regs
= of_iomap(np
, 0);
567 if (!priv
->gfargrp
[priv
->num_grps
].regs
)
570 priv
->gfargrp
[priv
->num_grps
].interruptTransmit
=
571 irq_of_parse_and_map(np
, 0);
573 /* If we aren't the FEC we have multiple interrupts */
574 if (model
&& strcasecmp(model
, "FEC")) {
575 priv
->gfargrp
[priv
->num_grps
].interruptReceive
=
576 irq_of_parse_and_map(np
, 1);
577 priv
->gfargrp
[priv
->num_grps
].interruptError
=
578 irq_of_parse_and_map(np
,2);
579 if (priv
->gfargrp
[priv
->num_grps
].interruptTransmit
< 0 ||
580 priv
->gfargrp
[priv
->num_grps
].interruptReceive
< 0 ||
581 priv
->gfargrp
[priv
->num_grps
].interruptError
< 0) {
586 priv
->gfargrp
[priv
->num_grps
].grp_id
= priv
->num_grps
;
587 priv
->gfargrp
[priv
->num_grps
].priv
= priv
;
588 spin_lock_init(&priv
->gfargrp
[priv
->num_grps
].grplock
);
589 if(priv
->mode
== MQ_MG_MODE
) {
590 queue_mask
= (u32
*)of_get_property(np
,
591 "fsl,rx-bit-map", NULL
);
592 priv
->gfargrp
[priv
->num_grps
].rx_bit_map
=
593 queue_mask
? *queue_mask
:(DEFAULT_MAPPING
>> priv
->num_grps
);
594 queue_mask
= (u32
*)of_get_property(np
,
595 "fsl,tx-bit-map", NULL
);
596 priv
->gfargrp
[priv
->num_grps
].tx_bit_map
=
597 queue_mask
? *queue_mask
: (DEFAULT_MAPPING
>> priv
->num_grps
);
599 priv
->gfargrp
[priv
->num_grps
].rx_bit_map
= 0xFF;
600 priv
->gfargrp
[priv
->num_grps
].tx_bit_map
= 0xFF;
607 static int gfar_of_init(struct of_device
*ofdev
, struct net_device
**pdev
)
611 const void *mac_addr
;
613 struct net_device
*dev
= NULL
;
614 struct gfar_private
*priv
= NULL
;
615 struct device_node
*np
= ofdev
->dev
.of_node
;
616 struct device_node
*child
= NULL
;
618 const u32
*stash_len
;
619 const u32
*stash_idx
;
620 unsigned int num_tx_qs
, num_rx_qs
;
621 u32
*tx_queues
, *rx_queues
;
623 if (!np
|| !of_device_is_available(np
))
626 /* parse the num of tx and rx queues */
627 tx_queues
= (u32
*)of_get_property(np
, "fsl,num_tx_queues", NULL
);
628 num_tx_qs
= tx_queues
? *tx_queues
: 1;
630 if (num_tx_qs
> MAX_TX_QS
) {
631 printk(KERN_ERR
"num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
632 num_tx_qs
, MAX_TX_QS
);
633 printk(KERN_ERR
"Cannot do alloc_etherdev, aborting\n");
637 rx_queues
= (u32
*)of_get_property(np
, "fsl,num_rx_queues", NULL
);
638 num_rx_qs
= rx_queues
? *rx_queues
: 1;
640 if (num_rx_qs
> MAX_RX_QS
) {
641 printk(KERN_ERR
"num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
642 num_tx_qs
, MAX_TX_QS
);
643 printk(KERN_ERR
"Cannot do alloc_etherdev, aborting\n");
647 *pdev
= alloc_etherdev_mq(sizeof(*priv
), num_tx_qs
);
652 priv
= netdev_priv(dev
);
653 priv
->node
= ofdev
->dev
.of_node
;
656 dev
->num_tx_queues
= num_tx_qs
;
657 dev
->real_num_tx_queues
= num_tx_qs
;
658 priv
->num_tx_queues
= num_tx_qs
;
659 priv
->num_rx_queues
= num_rx_qs
;
660 priv
->num_grps
= 0x0;
662 model
= of_get_property(np
, "model", NULL
);
664 for (i
= 0; i
< MAXGROUPS
; i
++)
665 priv
->gfargrp
[i
].regs
= NULL
;
667 /* Parse and initialize group specific information */
668 if (of_device_is_compatible(np
, "fsl,etsec2")) {
669 priv
->mode
= MQ_MG_MODE
;
670 for_each_child_of_node(np
, child
) {
671 err
= gfar_parse_group(child
, priv
, model
);
676 priv
->mode
= SQ_SG_MODE
;
677 err
= gfar_parse_group(np
, priv
, model
);
682 for (i
= 0; i
< priv
->num_tx_queues
; i
++)
683 priv
->tx_queue
[i
] = NULL
;
684 for (i
= 0; i
< priv
->num_rx_queues
; i
++)
685 priv
->rx_queue
[i
] = NULL
;
687 for (i
= 0; i
< priv
->num_tx_queues
; i
++) {
688 priv
->tx_queue
[i
] = (struct gfar_priv_tx_q
*)kzalloc(
689 sizeof (struct gfar_priv_tx_q
), GFP_KERNEL
);
690 if (!priv
->tx_queue
[i
]) {
692 goto tx_alloc_failed
;
694 priv
->tx_queue
[i
]->tx_skbuff
= NULL
;
695 priv
->tx_queue
[i
]->qindex
= i
;
696 priv
->tx_queue
[i
]->dev
= dev
;
697 spin_lock_init(&(priv
->tx_queue
[i
]->txlock
));
700 for (i
= 0; i
< priv
->num_rx_queues
; i
++) {
701 priv
->rx_queue
[i
] = (struct gfar_priv_rx_q
*)kzalloc(
702 sizeof (struct gfar_priv_rx_q
), GFP_KERNEL
);
703 if (!priv
->rx_queue
[i
]) {
705 goto rx_alloc_failed
;
707 priv
->rx_queue
[i
]->rx_skbuff
= NULL
;
708 priv
->rx_queue
[i
]->qindex
= i
;
709 priv
->rx_queue
[i
]->dev
= dev
;
710 spin_lock_init(&(priv
->rx_queue
[i
]->rxlock
));
714 stash
= of_get_property(np
, "bd-stash", NULL
);
717 priv
->device_flags
|= FSL_GIANFAR_DEV_HAS_BD_STASHING
;
718 priv
->bd_stash_en
= 1;
721 stash_len
= of_get_property(np
, "rx-stash-len", NULL
);
724 priv
->rx_stash_size
= *stash_len
;
726 stash_idx
= of_get_property(np
, "rx-stash-idx", NULL
);
729 priv
->rx_stash_index
= *stash_idx
;
731 if (stash_len
|| stash_idx
)
732 priv
->device_flags
|= FSL_GIANFAR_DEV_HAS_BUF_STASHING
;
734 mac_addr
= of_get_mac_address(np
);
736 memcpy(dev
->dev_addr
, mac_addr
, MAC_ADDR_LEN
);
738 if (model
&& !strcasecmp(model
, "TSEC"))
740 FSL_GIANFAR_DEV_HAS_GIGABIT
|
741 FSL_GIANFAR_DEV_HAS_COALESCE
|
742 FSL_GIANFAR_DEV_HAS_RMON
|
743 FSL_GIANFAR_DEV_HAS_MULTI_INTR
;
744 if (model
&& !strcasecmp(model
, "eTSEC"))
746 FSL_GIANFAR_DEV_HAS_GIGABIT
|
747 FSL_GIANFAR_DEV_HAS_COALESCE
|
748 FSL_GIANFAR_DEV_HAS_RMON
|
749 FSL_GIANFAR_DEV_HAS_MULTI_INTR
|
750 FSL_GIANFAR_DEV_HAS_PADDING
|
751 FSL_GIANFAR_DEV_HAS_CSUM
|
752 FSL_GIANFAR_DEV_HAS_VLAN
|
753 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET
|
754 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH
|
755 FSL_GIANFAR_DEV_HAS_TIMER
;
757 ctype
= of_get_property(np
, "phy-connection-type", NULL
);
759 /* We only care about rgmii-id. The rest are autodetected */
760 if (ctype
&& !strcmp(ctype
, "rgmii-id"))
761 priv
->interface
= PHY_INTERFACE_MODE_RGMII_ID
;
763 priv
->interface
= PHY_INTERFACE_MODE_MII
;
765 if (of_get_property(np
, "fsl,magic-packet", NULL
))
766 priv
->device_flags
|= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET
;
768 priv
->phy_node
= of_parse_phandle(np
, "phy-handle", 0);
770 /* Find the TBI PHY. If it's not there, we don't support SGMII */
771 priv
->tbi_node
= of_parse_phandle(np
, "tbi-handle", 0);
776 free_rx_pointers(priv
);
778 free_tx_pointers(priv
);
780 unmap_group_regs(priv
);
785 static int gfar_hwtstamp_ioctl(struct net_device
*netdev
,
786 struct ifreq
*ifr
, int cmd
)
788 struct hwtstamp_config config
;
789 struct gfar_private
*priv
= netdev_priv(netdev
);
791 if (copy_from_user(&config
, ifr
->ifr_data
, sizeof(config
)))
794 /* reserved for future extensions */
798 switch (config
.tx_type
) {
799 case HWTSTAMP_TX_OFF
:
800 priv
->hwts_tx_en
= 0;
803 if (!(priv
->device_flags
& FSL_GIANFAR_DEV_HAS_TIMER
))
805 priv
->hwts_tx_en
= 1;
811 switch (config
.rx_filter
) {
812 case HWTSTAMP_FILTER_NONE
:
813 if (priv
->hwts_rx_en
) {
815 priv
->hwts_rx_en
= 0;
816 startup_gfar(netdev
);
820 if (!(priv
->device_flags
& FSL_GIANFAR_DEV_HAS_TIMER
))
822 if (!priv
->hwts_rx_en
) {
824 priv
->hwts_rx_en
= 1;
825 startup_gfar(netdev
);
827 config
.rx_filter
= HWTSTAMP_FILTER_ALL
;
831 return copy_to_user(ifr
->ifr_data
, &config
, sizeof(config
)) ?
835 /* Ioctl MII Interface */
836 static int gfar_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
838 struct gfar_private
*priv
= netdev_priv(dev
);
840 if (!netif_running(dev
))
843 if (cmd
== SIOCSHWTSTAMP
)
844 return gfar_hwtstamp_ioctl(dev
, rq
, cmd
);
849 return phy_mii_ioctl(priv
->phydev
, if_mii(rq
), cmd
);
852 static unsigned int reverse_bitmap(unsigned int bit_map
, unsigned int max_qs
)
854 unsigned int new_bit_map
= 0x0;
855 int mask
= 0x1 << (max_qs
- 1), i
;
856 for (i
= 0; i
< max_qs
; i
++) {
858 new_bit_map
= new_bit_map
+ (1 << i
);
864 static u32
cluster_entry_per_class(struct gfar_private
*priv
, u32 rqfar
,
867 u32 rqfpr
= FPR_FILER_MASK
;
871 rqfcr
= RQFCR_CLE
| RQFCR_PID_MASK
| RQFCR_CMP_EXACT
;
872 ftp_rqfpr
[rqfar
] = rqfpr
;
873 ftp_rqfcr
[rqfar
] = rqfcr
;
874 gfar_write_filer(priv
, rqfar
, rqfcr
, rqfpr
);
877 rqfcr
= RQFCR_CMP_NOMATCH
;
878 ftp_rqfpr
[rqfar
] = rqfpr
;
879 ftp_rqfcr
[rqfar
] = rqfcr
;
880 gfar_write_filer(priv
, rqfar
, rqfcr
, rqfpr
);
883 rqfcr
= RQFCR_CMP_EXACT
| RQFCR_PID_PARSE
| RQFCR_CLE
| RQFCR_AND
;
885 ftp_rqfcr
[rqfar
] = rqfcr
;
886 ftp_rqfpr
[rqfar
] = rqfpr
;
887 gfar_write_filer(priv
, rqfar
, rqfcr
, rqfpr
);
890 rqfcr
= RQFCR_CMP_EXACT
| RQFCR_PID_MASK
| RQFCR_AND
;
892 ftp_rqfcr
[rqfar
] = rqfcr
;
893 ftp_rqfpr
[rqfar
] = rqfpr
;
894 gfar_write_filer(priv
, rqfar
, rqfcr
, rqfpr
);
899 static void gfar_init_filer_table(struct gfar_private
*priv
)
902 u32 rqfar
= MAX_FILER_IDX
;
904 u32 rqfpr
= FPR_FILER_MASK
;
907 rqfcr
= RQFCR_CMP_MATCH
;
908 ftp_rqfcr
[rqfar
] = rqfcr
;
909 ftp_rqfpr
[rqfar
] = rqfpr
;
910 gfar_write_filer(priv
, rqfar
, rqfcr
, rqfpr
);
912 rqfar
= cluster_entry_per_class(priv
, rqfar
, RQFPR_IPV6
);
913 rqfar
= cluster_entry_per_class(priv
, rqfar
, RQFPR_IPV6
| RQFPR_UDP
);
914 rqfar
= cluster_entry_per_class(priv
, rqfar
, RQFPR_IPV6
| RQFPR_TCP
);
915 rqfar
= cluster_entry_per_class(priv
, rqfar
, RQFPR_IPV4
);
916 rqfar
= cluster_entry_per_class(priv
, rqfar
, RQFPR_IPV4
| RQFPR_UDP
);
917 rqfar
= cluster_entry_per_class(priv
, rqfar
, RQFPR_IPV4
| RQFPR_TCP
);
919 /* cur_filer_idx indicated the fisrt non-masked rule */
920 priv
->cur_filer_idx
= rqfar
;
922 /* Rest are masked rules */
923 rqfcr
= RQFCR_CMP_NOMATCH
;
924 for (i
= 0; i
< rqfar
; i
++) {
925 ftp_rqfcr
[i
] = rqfcr
;
926 ftp_rqfpr
[i
] = rqfpr
;
927 gfar_write_filer(priv
, i
, rqfcr
, rqfpr
);
931 /* Set up the ethernet device structure, private data,
932 * and anything else we need before we start */
933 static int gfar_probe(struct of_device
*ofdev
,
934 const struct of_device_id
*match
)
937 struct net_device
*dev
= NULL
;
938 struct gfar_private
*priv
= NULL
;
939 struct gfar __iomem
*regs
= NULL
;
940 int err
= 0, i
, grp_idx
= 0;
942 u32 rstat
= 0, tstat
= 0, rqueue
= 0, tqueue
= 0;
946 err
= gfar_of_init(ofdev
, &dev
);
951 priv
= netdev_priv(dev
);
954 priv
->node
= ofdev
->dev
.of_node
;
955 SET_NETDEV_DEV(dev
, &ofdev
->dev
);
957 spin_lock_init(&priv
->bflock
);
958 INIT_WORK(&priv
->reset_task
, gfar_reset_task
);
960 dev_set_drvdata(&ofdev
->dev
, priv
);
961 regs
= priv
->gfargrp
[0].regs
;
963 /* Stop the DMA engine now, in case it was running before */
964 /* (The firmware could have used it, and left it running). */
967 /* Reset MAC layer */
968 gfar_write(®s
->maccfg1
, MACCFG1_SOFT_RESET
);
970 /* We need to delay at least 3 TX clocks */
973 tempval
= (MACCFG1_TX_FLOW
| MACCFG1_RX_FLOW
);
974 gfar_write(®s
->maccfg1
, tempval
);
976 /* Initialize MACCFG2. */
977 gfar_write(®s
->maccfg2
, MACCFG2_INIT_SETTINGS
);
979 /* Initialize ECNTRL */
980 gfar_write(®s
->ecntrl
, ECNTRL_INIT_SETTINGS
);
982 /* Set the dev->base_addr to the gfar reg region */
983 dev
->base_addr
= (unsigned long) regs
;
985 SET_NETDEV_DEV(dev
, &ofdev
->dev
);
987 /* Fill in the dev structure */
988 dev
->watchdog_timeo
= TX_TIMEOUT
;
990 dev
->netdev_ops
= &gfar_netdev_ops
;
991 dev
->ethtool_ops
= &gfar_ethtool_ops
;
993 /* Register for napi ...We are registering NAPI for each grp */
994 for (i
= 0; i
< priv
->num_grps
; i
++)
995 netif_napi_add(dev
, &priv
->gfargrp
[i
].napi
, gfar_poll
, GFAR_DEV_WEIGHT
);
997 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_CSUM
) {
998 priv
->rx_csum_enable
= 1;
999 dev
->features
|= NETIF_F_IP_CSUM
| NETIF_F_SG
| NETIF_F_HIGHDMA
;
1001 priv
->rx_csum_enable
= 0;
1005 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_VLAN
)
1006 dev
->features
|= NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
;
1008 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_EXTENDED_HASH
) {
1009 priv
->extended_hash
= 1;
1010 priv
->hash_width
= 9;
1012 priv
->hash_regs
[0] = ®s
->igaddr0
;
1013 priv
->hash_regs
[1] = ®s
->igaddr1
;
1014 priv
->hash_regs
[2] = ®s
->igaddr2
;
1015 priv
->hash_regs
[3] = ®s
->igaddr3
;
1016 priv
->hash_regs
[4] = ®s
->igaddr4
;
1017 priv
->hash_regs
[5] = ®s
->igaddr5
;
1018 priv
->hash_regs
[6] = ®s
->igaddr6
;
1019 priv
->hash_regs
[7] = ®s
->igaddr7
;
1020 priv
->hash_regs
[8] = ®s
->gaddr0
;
1021 priv
->hash_regs
[9] = ®s
->gaddr1
;
1022 priv
->hash_regs
[10] = ®s
->gaddr2
;
1023 priv
->hash_regs
[11] = ®s
->gaddr3
;
1024 priv
->hash_regs
[12] = ®s
->gaddr4
;
1025 priv
->hash_regs
[13] = ®s
->gaddr5
;
1026 priv
->hash_regs
[14] = ®s
->gaddr6
;
1027 priv
->hash_regs
[15] = ®s
->gaddr7
;
1030 priv
->extended_hash
= 0;
1031 priv
->hash_width
= 8;
1033 priv
->hash_regs
[0] = ®s
->gaddr0
;
1034 priv
->hash_regs
[1] = ®s
->gaddr1
;
1035 priv
->hash_regs
[2] = ®s
->gaddr2
;
1036 priv
->hash_regs
[3] = ®s
->gaddr3
;
1037 priv
->hash_regs
[4] = ®s
->gaddr4
;
1038 priv
->hash_regs
[5] = ®s
->gaddr5
;
1039 priv
->hash_regs
[6] = ®s
->gaddr6
;
1040 priv
->hash_regs
[7] = ®s
->gaddr7
;
1043 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_PADDING
)
1044 priv
->padding
= DEFAULT_PADDING
;
1048 if (dev
->features
& NETIF_F_IP_CSUM
||
1049 priv
->device_flags
& FSL_GIANFAR_DEV_HAS_TIMER
)
1050 dev
->hard_header_len
+= GMAC_FCB_LEN
;
1052 /* Program the isrg regs only if number of grps > 1 */
1053 if (priv
->num_grps
> 1) {
1054 baddr
= ®s
->isrg0
;
1055 for (i
= 0; i
< priv
->num_grps
; i
++) {
1056 isrg
|= (priv
->gfargrp
[i
].rx_bit_map
<< ISRG_SHIFT_RX
);
1057 isrg
|= (priv
->gfargrp
[i
].tx_bit_map
<< ISRG_SHIFT_TX
);
1058 gfar_write(baddr
, isrg
);
1064 /* Need to reverse the bit maps as bit_map's MSB is q0
1065 * but, for_each_set_bit parses from right to left, which
1066 * basically reverses the queue numbers */
1067 for (i
= 0; i
< priv
->num_grps
; i
++) {
1068 priv
->gfargrp
[i
].tx_bit_map
= reverse_bitmap(
1069 priv
->gfargrp
[i
].tx_bit_map
, MAX_TX_QS
);
1070 priv
->gfargrp
[i
].rx_bit_map
= reverse_bitmap(
1071 priv
->gfargrp
[i
].rx_bit_map
, MAX_RX_QS
);
1074 /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
1075 * also assign queues to groups */
1076 for (grp_idx
= 0; grp_idx
< priv
->num_grps
; grp_idx
++) {
1077 priv
->gfargrp
[grp_idx
].num_rx_queues
= 0x0;
1078 for_each_set_bit(i
, &priv
->gfargrp
[grp_idx
].rx_bit_map
,
1079 priv
->num_rx_queues
) {
1080 priv
->gfargrp
[grp_idx
].num_rx_queues
++;
1081 priv
->rx_queue
[i
]->grp
= &priv
->gfargrp
[grp_idx
];
1082 rstat
= rstat
| (RSTAT_CLEAR_RHALT
>> i
);
1083 rqueue
= rqueue
| ((RQUEUE_EN0
| RQUEUE_EX0
) >> i
);
1085 priv
->gfargrp
[grp_idx
].num_tx_queues
= 0x0;
1086 for_each_set_bit(i
, &priv
->gfargrp
[grp_idx
].tx_bit_map
,
1087 priv
->num_tx_queues
) {
1088 priv
->gfargrp
[grp_idx
].num_tx_queues
++;
1089 priv
->tx_queue
[i
]->grp
= &priv
->gfargrp
[grp_idx
];
1090 tstat
= tstat
| (TSTAT_CLEAR_THALT
>> i
);
1091 tqueue
= tqueue
| (TQUEUE_EN0
>> i
);
1093 priv
->gfargrp
[grp_idx
].rstat
= rstat
;
1094 priv
->gfargrp
[grp_idx
].tstat
= tstat
;
1098 gfar_write(®s
->rqueue
, rqueue
);
1099 gfar_write(®s
->tqueue
, tqueue
);
1101 priv
->rx_buffer_size
= DEFAULT_RX_BUFFER_SIZE
;
1103 /* Initializing some of the rx/tx queue level parameters */
1104 for (i
= 0; i
< priv
->num_tx_queues
; i
++) {
1105 priv
->tx_queue
[i
]->tx_ring_size
= DEFAULT_TX_RING_SIZE
;
1106 priv
->tx_queue
[i
]->num_txbdfree
= DEFAULT_TX_RING_SIZE
;
1107 priv
->tx_queue
[i
]->txcoalescing
= DEFAULT_TX_COALESCE
;
1108 priv
->tx_queue
[i
]->txic
= DEFAULT_TXIC
;
1111 for (i
= 0; i
< priv
->num_rx_queues
; i
++) {
1112 priv
->rx_queue
[i
]->rx_ring_size
= DEFAULT_RX_RING_SIZE
;
1113 priv
->rx_queue
[i
]->rxcoalescing
= DEFAULT_RX_COALESCE
;
1114 priv
->rx_queue
[i
]->rxic
= DEFAULT_RXIC
;
1117 /* enable filer if using multiple RX queues*/
1118 if(priv
->num_rx_queues
> 1)
1119 priv
->rx_filer_enable
= 1;
1120 /* Enable most messages by default */
1121 priv
->msg_enable
= (NETIF_MSG_IFUP
<< 1 ) - 1;
1123 /* Carrier starts down, phylib will bring it up */
1124 netif_carrier_off(dev
);
1126 err
= register_netdev(dev
);
1129 printk(KERN_ERR
"%s: Cannot register net device, aborting.\n",
1134 device_init_wakeup(&dev
->dev
,
1135 priv
->device_flags
& FSL_GIANFAR_DEV_HAS_MAGIC_PACKET
);
1137 /* fill out IRQ number and name fields */
1138 len_devname
= strlen(dev
->name
);
1139 for (i
= 0; i
< priv
->num_grps
; i
++) {
1140 strncpy(&priv
->gfargrp
[i
].int_name_tx
[0], dev
->name
,
1142 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_MULTI_INTR
) {
1143 strncpy(&priv
->gfargrp
[i
].int_name_tx
[len_devname
],
1144 "_g", sizeof("_g"));
1145 priv
->gfargrp
[i
].int_name_tx
[
1146 strlen(priv
->gfargrp
[i
].int_name_tx
)] = i
+48;
1147 strncpy(&priv
->gfargrp
[i
].int_name_tx
[strlen(
1148 priv
->gfargrp
[i
].int_name_tx
)],
1149 "_tx", sizeof("_tx") + 1);
1151 strncpy(&priv
->gfargrp
[i
].int_name_rx
[0], dev
->name
,
1153 strncpy(&priv
->gfargrp
[i
].int_name_rx
[len_devname
],
1154 "_g", sizeof("_g"));
1155 priv
->gfargrp
[i
].int_name_rx
[
1156 strlen(priv
->gfargrp
[i
].int_name_rx
)] = i
+48;
1157 strncpy(&priv
->gfargrp
[i
].int_name_rx
[strlen(
1158 priv
->gfargrp
[i
].int_name_rx
)],
1159 "_rx", sizeof("_rx") + 1);
1161 strncpy(&priv
->gfargrp
[i
].int_name_er
[0], dev
->name
,
1163 strncpy(&priv
->gfargrp
[i
].int_name_er
[len_devname
],
1164 "_g", sizeof("_g"));
1165 priv
->gfargrp
[i
].int_name_er
[strlen(
1166 priv
->gfargrp
[i
].int_name_er
)] = i
+48;
1167 strncpy(&priv
->gfargrp
[i
].int_name_er
[strlen(\
1168 priv
->gfargrp
[i
].int_name_er
)],
1169 "_er", sizeof("_er") + 1);
1171 priv
->gfargrp
[i
].int_name_tx
[len_devname
] = '\0';
1174 /* Initialize the filer table */
1175 gfar_init_filer_table(priv
);
1177 /* Create all the sysfs files */
1178 gfar_init_sysfs(dev
);
1180 /* Print out the device info */
1181 printk(KERN_INFO DEVICE_NAME
"%pM\n", dev
->name
, dev
->dev_addr
);
1183 /* Even more device info helps when determining which kernel */
1184 /* provided which set of benchmarks. */
1185 printk(KERN_INFO
"%s: Running with NAPI enabled\n", dev
->name
);
1186 for (i
= 0; i
< priv
->num_rx_queues
; i
++)
1187 printk(KERN_INFO
"%s: RX BD ring size for Q[%d]: %d\n",
1188 dev
->name
, i
, priv
->rx_queue
[i
]->rx_ring_size
);
1189 for(i
= 0; i
< priv
->num_tx_queues
; i
++)
1190 printk(KERN_INFO
"%s: TX BD ring size for Q[%d]: %d\n",
1191 dev
->name
, i
, priv
->tx_queue
[i
]->tx_ring_size
);
1196 unmap_group_regs(priv
);
1197 free_tx_pointers(priv
);
1198 free_rx_pointers(priv
);
1200 of_node_put(priv
->phy_node
);
1202 of_node_put(priv
->tbi_node
);
1207 static int gfar_remove(struct of_device
*ofdev
)
1209 struct gfar_private
*priv
= dev_get_drvdata(&ofdev
->dev
);
1212 of_node_put(priv
->phy_node
);
1214 of_node_put(priv
->tbi_node
);
1216 dev_set_drvdata(&ofdev
->dev
, NULL
);
1218 unregister_netdev(priv
->ndev
);
1219 unmap_group_regs(priv
);
1220 free_netdev(priv
->ndev
);
1227 static int gfar_suspend(struct device
*dev
)
1229 struct gfar_private
*priv
= dev_get_drvdata(dev
);
1230 struct net_device
*ndev
= priv
->ndev
;
1231 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
1232 unsigned long flags
;
1235 int magic_packet
= priv
->wol_en
&&
1236 (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_MAGIC_PACKET
);
1238 netif_device_detach(ndev
);
1240 if (netif_running(ndev
)) {
1242 local_irq_save(flags
);
1246 gfar_halt_nodisable(ndev
);
1248 /* Disable Tx, and Rx if wake-on-LAN is disabled. */
1249 tempval
= gfar_read(®s
->maccfg1
);
1251 tempval
&= ~MACCFG1_TX_EN
;
1254 tempval
&= ~MACCFG1_RX_EN
;
1256 gfar_write(®s
->maccfg1
, tempval
);
1260 local_irq_restore(flags
);
1265 /* Enable interrupt on Magic Packet */
1266 gfar_write(®s
->imask
, IMASK_MAG
);
1268 /* Enable Magic Packet mode */
1269 tempval
= gfar_read(®s
->maccfg2
);
1270 tempval
|= MACCFG2_MPEN
;
1271 gfar_write(®s
->maccfg2
, tempval
);
1273 phy_stop(priv
->phydev
);
1280 static int gfar_resume(struct device
*dev
)
1282 struct gfar_private
*priv
= dev_get_drvdata(dev
);
1283 struct net_device
*ndev
= priv
->ndev
;
1284 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
1285 unsigned long flags
;
1287 int magic_packet
= priv
->wol_en
&&
1288 (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_MAGIC_PACKET
);
1290 if (!netif_running(ndev
)) {
1291 netif_device_attach(ndev
);
1295 if (!magic_packet
&& priv
->phydev
)
1296 phy_start(priv
->phydev
);
1298 /* Disable Magic Packet mode, in case something
1301 local_irq_save(flags
);
1305 tempval
= gfar_read(®s
->maccfg2
);
1306 tempval
&= ~MACCFG2_MPEN
;
1307 gfar_write(®s
->maccfg2
, tempval
);
1313 local_irq_restore(flags
);
1315 netif_device_attach(ndev
);
1322 static int gfar_restore(struct device
*dev
)
1324 struct gfar_private
*priv
= dev_get_drvdata(dev
);
1325 struct net_device
*ndev
= priv
->ndev
;
1327 if (!netif_running(ndev
))
1330 gfar_init_bds(ndev
);
1331 init_registers(ndev
);
1332 gfar_set_mac_address(ndev
);
1333 gfar_init_mac(ndev
);
1338 priv
->oldduplex
= -1;
1341 phy_start(priv
->phydev
);
1343 netif_device_attach(ndev
);
1349 static struct dev_pm_ops gfar_pm_ops
= {
1350 .suspend
= gfar_suspend
,
1351 .resume
= gfar_resume
,
1352 .freeze
= gfar_suspend
,
1353 .thaw
= gfar_resume
,
1354 .restore
= gfar_restore
,
1357 #define GFAR_PM_OPS (&gfar_pm_ops)
1361 #define GFAR_PM_OPS NULL
1365 /* Reads the controller's registers to determine what interface
1366 * connects it to the PHY.
1368 static phy_interface_t
gfar_get_interface(struct net_device
*dev
)
1370 struct gfar_private
*priv
= netdev_priv(dev
);
1371 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
1374 ecntrl
= gfar_read(®s
->ecntrl
);
1376 if (ecntrl
& ECNTRL_SGMII_MODE
)
1377 return PHY_INTERFACE_MODE_SGMII
;
1379 if (ecntrl
& ECNTRL_TBI_MODE
) {
1380 if (ecntrl
& ECNTRL_REDUCED_MODE
)
1381 return PHY_INTERFACE_MODE_RTBI
;
1383 return PHY_INTERFACE_MODE_TBI
;
1386 if (ecntrl
& ECNTRL_REDUCED_MODE
) {
1387 if (ecntrl
& ECNTRL_REDUCED_MII_MODE
)
1388 return PHY_INTERFACE_MODE_RMII
;
1390 phy_interface_t interface
= priv
->interface
;
1393 * This isn't autodetected right now, so it must
1394 * be set by the device tree or platform code.
1396 if (interface
== PHY_INTERFACE_MODE_RGMII_ID
)
1397 return PHY_INTERFACE_MODE_RGMII_ID
;
1399 return PHY_INTERFACE_MODE_RGMII
;
1403 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_GIGABIT
)
1404 return PHY_INTERFACE_MODE_GMII
;
1406 return PHY_INTERFACE_MODE_MII
;
1410 /* Initializes driver's PHY state, and attaches to the PHY.
1411 * Returns 0 on success.
1413 static int init_phy(struct net_device
*dev
)
1415 struct gfar_private
*priv
= netdev_priv(dev
);
1416 uint gigabit_support
=
1417 priv
->device_flags
& FSL_GIANFAR_DEV_HAS_GIGABIT
?
1418 SUPPORTED_1000baseT_Full
: 0;
1419 phy_interface_t interface
;
1423 priv
->oldduplex
= -1;
1425 interface
= gfar_get_interface(dev
);
1427 priv
->phydev
= of_phy_connect(dev
, priv
->phy_node
, &adjust_link
, 0,
1430 priv
->phydev
= of_phy_connect_fixed_link(dev
, &adjust_link
,
1432 if (!priv
->phydev
) {
1433 dev_err(&dev
->dev
, "could not attach to PHY\n");
1437 if (interface
== PHY_INTERFACE_MODE_SGMII
)
1438 gfar_configure_serdes(dev
);
1440 /* Remove any features not supported by the controller */
1441 priv
->phydev
->supported
&= (GFAR_SUPPORTED
| gigabit_support
);
1442 priv
->phydev
->advertising
= priv
->phydev
->supported
;
1448 * Initialize TBI PHY interface for communicating with the
1449 * SERDES lynx PHY on the chip. We communicate with this PHY
1450 * through the MDIO bus on each controller, treating it as a
1451 * "normal" PHY at the address found in the TBIPA register. We assume
1452 * that the TBIPA register is valid. Either the MDIO bus code will set
1453 * it to a value that doesn't conflict with other PHYs on the bus, or the
1454 * value doesn't matter, as there are no other PHYs on the bus.
1456 static void gfar_configure_serdes(struct net_device
*dev
)
1458 struct gfar_private
*priv
= netdev_priv(dev
);
1459 struct phy_device
*tbiphy
;
1461 if (!priv
->tbi_node
) {
1462 dev_warn(&dev
->dev
, "error: SGMII mode requires that the "
1463 "device tree specify a tbi-handle\n");
1467 tbiphy
= of_phy_find_device(priv
->tbi_node
);
1469 dev_err(&dev
->dev
, "error: Could not get TBI device\n");
1474 * If the link is already up, we must already be ok, and don't need to
1475 * configure and reset the TBI<->SerDes link. Maybe U-Boot configured
1476 * everything for us? Resetting it takes the link down and requires
1477 * several seconds for it to come back.
1479 if (phy_read(tbiphy
, MII_BMSR
) & BMSR_LSTATUS
)
1482 /* Single clk mode, mii mode off(for serdes communication) */
1483 phy_write(tbiphy
, MII_TBICON
, TBICON_CLK_SELECT
);
1485 phy_write(tbiphy
, MII_ADVERTISE
,
1486 ADVERTISE_1000XFULL
| ADVERTISE_1000XPAUSE
|
1487 ADVERTISE_1000XPSE_ASYM
);
1489 phy_write(tbiphy
, MII_BMCR
, BMCR_ANENABLE
|
1490 BMCR_ANRESTART
| BMCR_FULLDPLX
| BMCR_SPEED1000
);
1493 static void init_registers(struct net_device
*dev
)
1495 struct gfar_private
*priv
= netdev_priv(dev
);
1496 struct gfar __iomem
*regs
= NULL
;
1499 for (i
= 0; i
< priv
->num_grps
; i
++) {
1500 regs
= priv
->gfargrp
[i
].regs
;
1502 gfar_write(®s
->ievent
, IEVENT_INIT_CLEAR
);
1504 /* Initialize IMASK */
1505 gfar_write(®s
->imask
, IMASK_INIT_CLEAR
);
1508 regs
= priv
->gfargrp
[0].regs
;
1509 /* Init hash registers to zero */
1510 gfar_write(®s
->igaddr0
, 0);
1511 gfar_write(®s
->igaddr1
, 0);
1512 gfar_write(®s
->igaddr2
, 0);
1513 gfar_write(®s
->igaddr3
, 0);
1514 gfar_write(®s
->igaddr4
, 0);
1515 gfar_write(®s
->igaddr5
, 0);
1516 gfar_write(®s
->igaddr6
, 0);
1517 gfar_write(®s
->igaddr7
, 0);
1519 gfar_write(®s
->gaddr0
, 0);
1520 gfar_write(®s
->gaddr1
, 0);
1521 gfar_write(®s
->gaddr2
, 0);
1522 gfar_write(®s
->gaddr3
, 0);
1523 gfar_write(®s
->gaddr4
, 0);
1524 gfar_write(®s
->gaddr5
, 0);
1525 gfar_write(®s
->gaddr6
, 0);
1526 gfar_write(®s
->gaddr7
, 0);
1528 /* Zero out the rmon mib registers if it has them */
1529 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_RMON
) {
1530 memset_io(&(regs
->rmon
), 0, sizeof (struct rmon_mib
));
1532 /* Mask off the CAM interrupts */
1533 gfar_write(®s
->rmon
.cam1
, 0xffffffff);
1534 gfar_write(®s
->rmon
.cam2
, 0xffffffff);
1537 /* Initialize the max receive buffer length */
1538 gfar_write(®s
->mrblr
, priv
->rx_buffer_size
);
1540 /* Initialize the Minimum Frame Length Register */
1541 gfar_write(®s
->minflr
, MINFLR_INIT_SETTINGS
);
1545 /* Halt the receive and transmit queues */
1546 static void gfar_halt_nodisable(struct net_device
*dev
)
1548 struct gfar_private
*priv
= netdev_priv(dev
);
1549 struct gfar __iomem
*regs
= NULL
;
1553 for (i
= 0; i
< priv
->num_grps
; i
++) {
1554 regs
= priv
->gfargrp
[i
].regs
;
1555 /* Mask all interrupts */
1556 gfar_write(®s
->imask
, IMASK_INIT_CLEAR
);
1558 /* Clear all interrupts */
1559 gfar_write(®s
->ievent
, IEVENT_INIT_CLEAR
);
1562 regs
= priv
->gfargrp
[0].regs
;
1563 /* Stop the DMA, and wait for it to stop */
1564 tempval
= gfar_read(®s
->dmactrl
);
1565 if ((tempval
& (DMACTRL_GRS
| DMACTRL_GTS
))
1566 != (DMACTRL_GRS
| DMACTRL_GTS
)) {
1567 tempval
|= (DMACTRL_GRS
| DMACTRL_GTS
);
1568 gfar_write(®s
->dmactrl
, tempval
);
1570 spin_event_timeout(((gfar_read(®s
->ievent
) &
1571 (IEVENT_GRSC
| IEVENT_GTSC
)) ==
1572 (IEVENT_GRSC
| IEVENT_GTSC
)), -1, 0);
1576 /* Halt the receive and transmit queues */
1577 void gfar_halt(struct net_device
*dev
)
1579 struct gfar_private
*priv
= netdev_priv(dev
);
1580 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
1583 gfar_halt_nodisable(dev
);
1585 /* Disable Rx and Tx */
1586 tempval
= gfar_read(®s
->maccfg1
);
1587 tempval
&= ~(MACCFG1_RX_EN
| MACCFG1_TX_EN
);
1588 gfar_write(®s
->maccfg1
, tempval
);
1591 static void free_grp_irqs(struct gfar_priv_grp
*grp
)
1593 free_irq(grp
->interruptError
, grp
);
1594 free_irq(grp
->interruptTransmit
, grp
);
1595 free_irq(grp
->interruptReceive
, grp
);
1598 void stop_gfar(struct net_device
*dev
)
1600 struct gfar_private
*priv
= netdev_priv(dev
);
1601 unsigned long flags
;
1604 phy_stop(priv
->phydev
);
1608 local_irq_save(flags
);
1616 local_irq_restore(flags
);
1619 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_MULTI_INTR
) {
1620 for (i
= 0; i
< priv
->num_grps
; i
++)
1621 free_grp_irqs(&priv
->gfargrp
[i
]);
1623 for (i
= 0; i
< priv
->num_grps
; i
++)
1624 free_irq(priv
->gfargrp
[i
].interruptTransmit
,
1628 free_skb_resources(priv
);
1631 static void free_skb_tx_queue(struct gfar_priv_tx_q
*tx_queue
)
1633 struct txbd8
*txbdp
;
1634 struct gfar_private
*priv
= netdev_priv(tx_queue
->dev
);
1637 txbdp
= tx_queue
->tx_bd_base
;
1639 for (i
= 0; i
< tx_queue
->tx_ring_size
; i
++) {
1640 if (!tx_queue
->tx_skbuff
[i
])
1643 dma_unmap_single(&priv
->ofdev
->dev
, txbdp
->bufPtr
,
1644 txbdp
->length
, DMA_TO_DEVICE
);
1646 for (j
= 0; j
< skb_shinfo(tx_queue
->tx_skbuff
[i
])->nr_frags
;
1649 dma_unmap_page(&priv
->ofdev
->dev
, txbdp
->bufPtr
,
1650 txbdp
->length
, DMA_TO_DEVICE
);
1653 dev_kfree_skb_any(tx_queue
->tx_skbuff
[i
]);
1654 tx_queue
->tx_skbuff
[i
] = NULL
;
1656 kfree(tx_queue
->tx_skbuff
);
1659 static void free_skb_rx_queue(struct gfar_priv_rx_q
*rx_queue
)
1661 struct rxbd8
*rxbdp
;
1662 struct gfar_private
*priv
= netdev_priv(rx_queue
->dev
);
1665 rxbdp
= rx_queue
->rx_bd_base
;
1667 for (i
= 0; i
< rx_queue
->rx_ring_size
; i
++) {
1668 if (rx_queue
->rx_skbuff
[i
]) {
1669 dma_unmap_single(&priv
->ofdev
->dev
,
1670 rxbdp
->bufPtr
, priv
->rx_buffer_size
,
1672 dev_kfree_skb_any(rx_queue
->rx_skbuff
[i
]);
1673 rx_queue
->rx_skbuff
[i
] = NULL
;
1679 kfree(rx_queue
->rx_skbuff
);
1682 /* If there are any tx skbs or rx skbs still around, free them.
1683 * Then free tx_skbuff and rx_skbuff */
1684 static void free_skb_resources(struct gfar_private
*priv
)
1686 struct gfar_priv_tx_q
*tx_queue
= NULL
;
1687 struct gfar_priv_rx_q
*rx_queue
= NULL
;
1690 /* Go through all the buffer descriptors and free their data buffers */
1691 for (i
= 0; i
< priv
->num_tx_queues
; i
++) {
1692 tx_queue
= priv
->tx_queue
[i
];
1693 if(tx_queue
->tx_skbuff
)
1694 free_skb_tx_queue(tx_queue
);
1697 for (i
= 0; i
< priv
->num_rx_queues
; i
++) {
1698 rx_queue
= priv
->rx_queue
[i
];
1699 if(rx_queue
->rx_skbuff
)
1700 free_skb_rx_queue(rx_queue
);
1703 dma_free_coherent(&priv
->ofdev
->dev
,
1704 sizeof(struct txbd8
) * priv
->total_tx_ring_size
+
1705 sizeof(struct rxbd8
) * priv
->total_rx_ring_size
,
1706 priv
->tx_queue
[0]->tx_bd_base
,
1707 priv
->tx_queue
[0]->tx_bd_dma_base
);
1708 skb_queue_purge(&priv
->rx_recycle
);
1711 void gfar_start(struct net_device
*dev
)
1713 struct gfar_private
*priv
= netdev_priv(dev
);
1714 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
1718 /* Enable Rx and Tx in MACCFG1 */
1719 tempval
= gfar_read(®s
->maccfg1
);
1720 tempval
|= (MACCFG1_RX_EN
| MACCFG1_TX_EN
);
1721 gfar_write(®s
->maccfg1
, tempval
);
1723 /* Initialize DMACTRL to have WWR and WOP */
1724 tempval
= gfar_read(®s
->dmactrl
);
1725 tempval
|= DMACTRL_INIT_SETTINGS
;
1726 gfar_write(®s
->dmactrl
, tempval
);
1728 /* Make sure we aren't stopped */
1729 tempval
= gfar_read(®s
->dmactrl
);
1730 tempval
&= ~(DMACTRL_GRS
| DMACTRL_GTS
);
1731 gfar_write(®s
->dmactrl
, tempval
);
1733 for (i
= 0; i
< priv
->num_grps
; i
++) {
1734 regs
= priv
->gfargrp
[i
].regs
;
1735 /* Clear THLT/RHLT, so that the DMA starts polling now */
1736 gfar_write(®s
->tstat
, priv
->gfargrp
[i
].tstat
);
1737 gfar_write(®s
->rstat
, priv
->gfargrp
[i
].rstat
);
1738 /* Unmask the interrupts we look for */
1739 gfar_write(®s
->imask
, IMASK_DEFAULT
);
1742 dev
->trans_start
= jiffies
; /* prevent tx timeout */
1745 void gfar_configure_coalescing(struct gfar_private
*priv
,
1746 unsigned long tx_mask
, unsigned long rx_mask
)
1748 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
1752 /* Backward compatible case ---- even if we enable
1753 * multiple queues, there's only single reg to program
1755 gfar_write(®s
->txic
, 0);
1756 if(likely(priv
->tx_queue
[0]->txcoalescing
))
1757 gfar_write(®s
->txic
, priv
->tx_queue
[0]->txic
);
1759 gfar_write(®s
->rxic
, 0);
1760 if(unlikely(priv
->rx_queue
[0]->rxcoalescing
))
1761 gfar_write(®s
->rxic
, priv
->rx_queue
[0]->rxic
);
1763 if (priv
->mode
== MQ_MG_MODE
) {
1764 baddr
= ®s
->txic0
;
1765 for_each_set_bit(i
, &tx_mask
, priv
->num_tx_queues
) {
1766 if (likely(priv
->tx_queue
[i
]->txcoalescing
)) {
1767 gfar_write(baddr
+ i
, 0);
1768 gfar_write(baddr
+ i
, priv
->tx_queue
[i
]->txic
);
1772 baddr
= ®s
->rxic0
;
1773 for_each_set_bit(i
, &rx_mask
, priv
->num_rx_queues
) {
1774 if (likely(priv
->rx_queue
[i
]->rxcoalescing
)) {
1775 gfar_write(baddr
+ i
, 0);
1776 gfar_write(baddr
+ i
, priv
->rx_queue
[i
]->rxic
);
1782 static int register_grp_irqs(struct gfar_priv_grp
*grp
)
1784 struct gfar_private
*priv
= grp
->priv
;
1785 struct net_device
*dev
= priv
->ndev
;
1788 /* If the device has multiple interrupts, register for
1789 * them. Otherwise, only register for the one */
1790 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_MULTI_INTR
) {
1791 /* Install our interrupt handlers for Error,
1792 * Transmit, and Receive */
1793 if ((err
= request_irq(grp
->interruptError
, gfar_error
, 0,
1794 grp
->int_name_er
,grp
)) < 0) {
1795 if (netif_msg_intr(priv
))
1796 printk(KERN_ERR
"%s: Can't get IRQ %d\n",
1797 dev
->name
, grp
->interruptError
);
1802 if ((err
= request_irq(grp
->interruptTransmit
, gfar_transmit
,
1803 0, grp
->int_name_tx
, grp
)) < 0) {
1804 if (netif_msg_intr(priv
))
1805 printk(KERN_ERR
"%s: Can't get IRQ %d\n",
1806 dev
->name
, grp
->interruptTransmit
);
1810 if ((err
= request_irq(grp
->interruptReceive
, gfar_receive
, 0,
1811 grp
->int_name_rx
, grp
)) < 0) {
1812 if (netif_msg_intr(priv
))
1813 printk(KERN_ERR
"%s: Can't get IRQ %d\n",
1814 dev
->name
, grp
->interruptReceive
);
1818 if ((err
= request_irq(grp
->interruptTransmit
, gfar_interrupt
, 0,
1819 grp
->int_name_tx
, grp
)) < 0) {
1820 if (netif_msg_intr(priv
))
1821 printk(KERN_ERR
"%s: Can't get IRQ %d\n",
1822 dev
->name
, grp
->interruptTransmit
);
1830 free_irq(grp
->interruptTransmit
, grp
);
1832 free_irq(grp
->interruptError
, grp
);
1838 /* Bring the controller up and running */
1839 int startup_gfar(struct net_device
*ndev
)
1841 struct gfar_private
*priv
= netdev_priv(ndev
);
1842 struct gfar __iomem
*regs
= NULL
;
1845 for (i
= 0; i
< priv
->num_grps
; i
++) {
1846 regs
= priv
->gfargrp
[i
].regs
;
1847 gfar_write(®s
->imask
, IMASK_INIT_CLEAR
);
1850 regs
= priv
->gfargrp
[0].regs
;
1851 err
= gfar_alloc_skb_resources(ndev
);
1855 gfar_init_mac(ndev
);
1857 for (i
= 0; i
< priv
->num_grps
; i
++) {
1858 err
= register_grp_irqs(&priv
->gfargrp
[i
]);
1860 for (j
= 0; j
< i
; j
++)
1861 free_grp_irqs(&priv
->gfargrp
[j
]);
1866 /* Start the controller */
1869 phy_start(priv
->phydev
);
1871 gfar_configure_coalescing(priv
, 0xFF, 0xFF);
1876 free_skb_resources(priv
);
1880 /* Called when something needs to use the ethernet device */
1881 /* Returns 0 for success. */
1882 static int gfar_enet_open(struct net_device
*dev
)
1884 struct gfar_private
*priv
= netdev_priv(dev
);
1889 skb_queue_head_init(&priv
->rx_recycle
);
1891 /* Initialize a bunch of registers */
1892 init_registers(dev
);
1894 gfar_set_mac_address(dev
);
1896 err
= init_phy(dev
);
1903 err
= startup_gfar(dev
);
1909 netif_tx_start_all_queues(dev
);
1911 device_set_wakeup_enable(&dev
->dev
, priv
->wol_en
);
1916 static inline struct txfcb
*gfar_add_fcb(struct sk_buff
*skb
)
1918 struct txfcb
*fcb
= (struct txfcb
*)skb_push(skb
, GMAC_FCB_LEN
);
1920 memset(fcb
, 0, GMAC_FCB_LEN
);
1925 static inline void gfar_tx_checksum(struct sk_buff
*skb
, struct txfcb
*fcb
)
1929 /* If we're here, it's a IP packet with a TCP or UDP
1930 * payload. We set it to checksum, using a pseudo-header
1933 flags
= TXFCB_DEFAULT
;
1935 /* Tell the controller what the protocol is */
1936 /* And provide the already calculated phcs */
1937 if (ip_hdr(skb
)->protocol
== IPPROTO_UDP
) {
1939 fcb
->phcs
= udp_hdr(skb
)->check
;
1941 fcb
->phcs
= tcp_hdr(skb
)->check
;
1943 /* l3os is the distance between the start of the
1944 * frame (skb->data) and the start of the IP hdr.
1945 * l4os is the distance between the start of the
1946 * l3 hdr and the l4 hdr */
1947 fcb
->l3os
= (u16
)(skb_network_offset(skb
) - GMAC_FCB_LEN
);
1948 fcb
->l4os
= skb_network_header_len(skb
);
1953 void inline gfar_tx_vlan(struct sk_buff
*skb
, struct txfcb
*fcb
)
1955 fcb
->flags
|= TXFCB_VLN
;
1956 fcb
->vlctl
= vlan_tx_tag_get(skb
);
1959 static inline struct txbd8
*skip_txbd(struct txbd8
*bdp
, int stride
,
1960 struct txbd8
*base
, int ring_size
)
1962 struct txbd8
*new_bd
= bdp
+ stride
;
1964 return (new_bd
>= (base
+ ring_size
)) ? (new_bd
- ring_size
) : new_bd
;
1967 static inline struct txbd8
*next_txbd(struct txbd8
*bdp
, struct txbd8
*base
,
1970 return skip_txbd(bdp
, 1, base
, ring_size
);
1973 /* This is called by the kernel when a frame is ready for transmission. */
1974 /* It is pointed to by the dev->hard_start_xmit function pointer */
1975 static int gfar_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1977 struct gfar_private
*priv
= netdev_priv(dev
);
1978 struct gfar_priv_tx_q
*tx_queue
= NULL
;
1979 struct netdev_queue
*txq
;
1980 struct gfar __iomem
*regs
= NULL
;
1981 struct txfcb
*fcb
= NULL
;
1982 struct txbd8
*txbdp
, *txbdp_start
, *base
, *txbdp_tstamp
= NULL
;
1984 int i
, rq
= 0, do_tstamp
= 0;
1986 unsigned long flags
;
1987 unsigned int nr_frags
, nr_txbds
, length
;
1988 union skb_shared_tx
*shtx
;
1990 rq
= skb
->queue_mapping
;
1991 tx_queue
= priv
->tx_queue
[rq
];
1992 txq
= netdev_get_tx_queue(dev
, rq
);
1993 base
= tx_queue
->tx_bd_base
;
1994 regs
= tx_queue
->grp
->regs
;
1997 /* check if time stamp should be generated */
1998 if (unlikely(shtx
->hardware
&& priv
->hwts_tx_en
))
2001 /* make space for additional header when fcb is needed */
2002 if (((skb
->ip_summed
== CHECKSUM_PARTIAL
) ||
2003 (priv
->vlgrp
&& vlan_tx_tag_present(skb
)) ||
2004 unlikely(do_tstamp
)) &&
2005 (skb_headroom(skb
) < GMAC_FCB_LEN
)) {
2006 struct sk_buff
*skb_new
;
2008 skb_new
= skb_realloc_headroom(skb
, GMAC_FCB_LEN
);
2010 dev
->stats
.tx_errors
++;
2012 return NETDEV_TX_OK
;
2018 /* total number of fragments in the SKB */
2019 nr_frags
= skb_shinfo(skb
)->nr_frags
;
2021 /* calculate the required number of TxBDs for this skb */
2022 if (unlikely(do_tstamp
))
2023 nr_txbds
= nr_frags
+ 2;
2025 nr_txbds
= nr_frags
+ 1;
2027 /* check if there is space to queue this packet */
2028 if (nr_txbds
> tx_queue
->num_txbdfree
) {
2029 /* no space, stop the queue */
2030 netif_tx_stop_queue(txq
);
2031 dev
->stats
.tx_fifo_errors
++;
2032 return NETDEV_TX_BUSY
;
2035 /* Update transmit stats */
2036 txq
->tx_bytes
+= skb
->len
;
2039 txbdp
= txbdp_start
= tx_queue
->cur_tx
;
2040 lstatus
= txbdp
->lstatus
;
2042 /* Time stamp insertion requires one additional TxBD */
2043 if (unlikely(do_tstamp
))
2044 txbdp_tstamp
= txbdp
= next_txbd(txbdp
, base
,
2045 tx_queue
->tx_ring_size
);
2047 if (nr_frags
== 0) {
2048 if (unlikely(do_tstamp
))
2049 txbdp_tstamp
->lstatus
|= BD_LFLAG(TXBD_LAST
|
2052 lstatus
|= BD_LFLAG(TXBD_LAST
| TXBD_INTERRUPT
);
2054 /* Place the fragment addresses and lengths into the TxBDs */
2055 for (i
= 0; i
< nr_frags
; i
++) {
2056 /* Point at the next BD, wrapping as needed */
2057 txbdp
= next_txbd(txbdp
, base
, tx_queue
->tx_ring_size
);
2059 length
= skb_shinfo(skb
)->frags
[i
].size
;
2061 lstatus
= txbdp
->lstatus
| length
|
2062 BD_LFLAG(TXBD_READY
);
2064 /* Handle the last BD specially */
2065 if (i
== nr_frags
- 1)
2066 lstatus
|= BD_LFLAG(TXBD_LAST
| TXBD_INTERRUPT
);
2068 bufaddr
= dma_map_page(&priv
->ofdev
->dev
,
2069 skb_shinfo(skb
)->frags
[i
].page
,
2070 skb_shinfo(skb
)->frags
[i
].page_offset
,
2074 /* set the TxBD length and buffer pointer */
2075 txbdp
->bufPtr
= bufaddr
;
2076 txbdp
->lstatus
= lstatus
;
2079 lstatus
= txbdp_start
->lstatus
;
2082 /* Set up checksumming */
2083 if (CHECKSUM_PARTIAL
== skb
->ip_summed
) {
2084 fcb
= gfar_add_fcb(skb
);
2085 lstatus
|= BD_LFLAG(TXBD_TOE
);
2086 gfar_tx_checksum(skb
, fcb
);
2089 if (priv
->vlgrp
&& vlan_tx_tag_present(skb
)) {
2090 if (unlikely(NULL
== fcb
)) {
2091 fcb
= gfar_add_fcb(skb
);
2092 lstatus
|= BD_LFLAG(TXBD_TOE
);
2095 gfar_tx_vlan(skb
, fcb
);
2098 /* Setup tx hardware time stamping if requested */
2099 if (unlikely(do_tstamp
)) {
2100 shtx
->in_progress
= 1;
2102 fcb
= gfar_add_fcb(skb
);
2104 lstatus
|= BD_LFLAG(TXBD_TOE
);
2107 txbdp_start
->bufPtr
= dma_map_single(&priv
->ofdev
->dev
, skb
->data
,
2108 skb_headlen(skb
), DMA_TO_DEVICE
);
2111 * If time stamping is requested one additional TxBD must be set up. The
2112 * first TxBD points to the FCB and must have a data length of
2113 * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
2114 * the full frame length.
2116 if (unlikely(do_tstamp
)) {
2117 txbdp_tstamp
->bufPtr
= txbdp_start
->bufPtr
+ GMAC_FCB_LEN
;
2118 txbdp_tstamp
->lstatus
|= BD_LFLAG(TXBD_READY
) |
2119 (skb_headlen(skb
) - GMAC_FCB_LEN
);
2120 lstatus
|= BD_LFLAG(TXBD_CRC
| TXBD_READY
) | GMAC_FCB_LEN
;
2122 lstatus
|= BD_LFLAG(TXBD_CRC
| TXBD_READY
) | skb_headlen(skb
);
2126 * We can work in parallel with gfar_clean_tx_ring(), except
2127 * when modifying num_txbdfree. Note that we didn't grab the lock
2128 * when we were reading the num_txbdfree and checking for available
2129 * space, that's because outside of this function it can only grow,
2130 * and once we've got needed space, it cannot suddenly disappear.
2132 * The lock also protects us from gfar_error(), which can modify
2133 * regs->tstat and thus retrigger the transfers, which is why we
2134 * also must grab the lock before setting ready bit for the first
2135 * to be transmitted BD.
2137 spin_lock_irqsave(&tx_queue
->txlock
, flags
);
2140 * The powerpc-specific eieio() is used, as wmb() has too strong
2141 * semantics (it requires synchronization between cacheable and
2142 * uncacheable mappings, which eieio doesn't provide and which we
2143 * don't need), thus requiring a more expensive sync instruction. At
2144 * some point, the set of architecture-independent barrier functions
2145 * should be expanded to include weaker barriers.
2149 txbdp_start
->lstatus
= lstatus
;
2151 eieio(); /* force lstatus write before tx_skbuff */
2153 tx_queue
->tx_skbuff
[tx_queue
->skb_curtx
] = skb
;
2155 /* Update the current skb pointer to the next entry we will use
2156 * (wrapping if necessary) */
2157 tx_queue
->skb_curtx
= (tx_queue
->skb_curtx
+ 1) &
2158 TX_RING_MOD_MASK(tx_queue
->tx_ring_size
);
2160 tx_queue
->cur_tx
= next_txbd(txbdp
, base
, tx_queue
->tx_ring_size
);
2162 /* reduce TxBD free count */
2163 tx_queue
->num_txbdfree
-= (nr_txbds
);
2165 /* If the next BD still needs to be cleaned up, then the bds
2166 are full. We need to tell the kernel to stop sending us stuff. */
2167 if (!tx_queue
->num_txbdfree
) {
2168 netif_tx_stop_queue(txq
);
2170 dev
->stats
.tx_fifo_errors
++;
2173 /* Tell the DMA to go go go */
2174 gfar_write(®s
->tstat
, TSTAT_CLEAR_THALT
>> tx_queue
->qindex
);
2177 spin_unlock_irqrestore(&tx_queue
->txlock
, flags
);
2179 return NETDEV_TX_OK
;
2182 /* Stops the kernel queue, and halts the controller */
2183 static int gfar_close(struct net_device
*dev
)
2185 struct gfar_private
*priv
= netdev_priv(dev
);
2189 cancel_work_sync(&priv
->reset_task
);
2192 /* Disconnect from the PHY */
2193 phy_disconnect(priv
->phydev
);
2194 priv
->phydev
= NULL
;
2196 netif_tx_stop_all_queues(dev
);
2201 /* Changes the mac address if the controller is not running. */
2202 static int gfar_set_mac_address(struct net_device
*dev
)
2204 gfar_set_mac_for_addr(dev
, 0, dev
->dev_addr
);
2210 /* Enables and disables VLAN insertion/extraction */
2211 static void gfar_vlan_rx_register(struct net_device
*dev
,
2212 struct vlan_group
*grp
)
2214 struct gfar_private
*priv
= netdev_priv(dev
);
2215 struct gfar __iomem
*regs
= NULL
;
2216 unsigned long flags
;
2219 regs
= priv
->gfargrp
[0].regs
;
2220 local_irq_save(flags
);
2226 /* Enable VLAN tag insertion */
2227 tempval
= gfar_read(®s
->tctrl
);
2228 tempval
|= TCTRL_VLINS
;
2230 gfar_write(®s
->tctrl
, tempval
);
2232 /* Enable VLAN tag extraction */
2233 tempval
= gfar_read(®s
->rctrl
);
2234 tempval
|= (RCTRL_VLEX
| RCTRL_PRSDEP_INIT
);
2235 gfar_write(®s
->rctrl
, tempval
);
2237 /* Disable VLAN tag insertion */
2238 tempval
= gfar_read(®s
->tctrl
);
2239 tempval
&= ~TCTRL_VLINS
;
2240 gfar_write(®s
->tctrl
, tempval
);
2242 /* Disable VLAN tag extraction */
2243 tempval
= gfar_read(®s
->rctrl
);
2244 tempval
&= ~RCTRL_VLEX
;
2245 /* If parse is no longer required, then disable parser */
2246 if (tempval
& RCTRL_REQ_PARSER
)
2247 tempval
|= RCTRL_PRSDEP_INIT
;
2249 tempval
&= ~RCTRL_PRSDEP_INIT
;
2250 gfar_write(®s
->rctrl
, tempval
);
2253 gfar_change_mtu(dev
, dev
->mtu
);
2256 local_irq_restore(flags
);
2259 static int gfar_change_mtu(struct net_device
*dev
, int new_mtu
)
2261 int tempsize
, tempval
;
2262 struct gfar_private
*priv
= netdev_priv(dev
);
2263 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
2264 int oldsize
= priv
->rx_buffer_size
;
2265 int frame_size
= new_mtu
+ ETH_HLEN
;
2268 frame_size
+= VLAN_HLEN
;
2270 if ((frame_size
< 64) || (frame_size
> JUMBO_FRAME_SIZE
)) {
2271 if (netif_msg_drv(priv
))
2272 printk(KERN_ERR
"%s: Invalid MTU setting\n",
2277 if (gfar_uses_fcb(priv
))
2278 frame_size
+= GMAC_FCB_LEN
;
2280 frame_size
+= priv
->padding
;
2283 (frame_size
& ~(INCREMENTAL_BUFFER_SIZE
- 1)) +
2284 INCREMENTAL_BUFFER_SIZE
;
2286 /* Only stop and start the controller if it isn't already
2287 * stopped, and we changed something */
2288 if ((oldsize
!= tempsize
) && (dev
->flags
& IFF_UP
))
2291 priv
->rx_buffer_size
= tempsize
;
2295 gfar_write(®s
->mrblr
, priv
->rx_buffer_size
);
2296 gfar_write(®s
->maxfrm
, priv
->rx_buffer_size
);
2298 /* If the mtu is larger than the max size for standard
2299 * ethernet frames (ie, a jumbo frame), then set maccfg2
2300 * to allow huge frames, and to check the length */
2301 tempval
= gfar_read(®s
->maccfg2
);
2303 if (priv
->rx_buffer_size
> DEFAULT_RX_BUFFER_SIZE
)
2304 tempval
|= (MACCFG2_HUGEFRAME
| MACCFG2_LENGTHCHECK
);
2306 tempval
&= ~(MACCFG2_HUGEFRAME
| MACCFG2_LENGTHCHECK
);
2308 gfar_write(®s
->maccfg2
, tempval
);
2310 if ((oldsize
!= tempsize
) && (dev
->flags
& IFF_UP
))
2316 /* gfar_reset_task gets scheduled when a packet has not been
2317 * transmitted after a set amount of time.
2318 * For now, assume that clearing out all the structures, and
2319 * starting over will fix the problem.
2321 static void gfar_reset_task(struct work_struct
*work
)
2323 struct gfar_private
*priv
= container_of(work
, struct gfar_private
,
2325 struct net_device
*dev
= priv
->ndev
;
2327 if (dev
->flags
& IFF_UP
) {
2328 netif_tx_stop_all_queues(dev
);
2331 netif_tx_start_all_queues(dev
);
2334 netif_tx_schedule_all(dev
);
2337 static void gfar_timeout(struct net_device
*dev
)
2339 struct gfar_private
*priv
= netdev_priv(dev
);
2341 dev
->stats
.tx_errors
++;
2342 schedule_work(&priv
->reset_task
);
2345 /* Interrupt Handler for Transmit complete */
2346 static int gfar_clean_tx_ring(struct gfar_priv_tx_q
*tx_queue
)
2348 struct net_device
*dev
= tx_queue
->dev
;
2349 struct gfar_private
*priv
= netdev_priv(dev
);
2350 struct gfar_priv_rx_q
*rx_queue
= NULL
;
2351 struct txbd8
*bdp
, *next
= NULL
;
2352 struct txbd8
*lbdp
= NULL
;
2353 struct txbd8
*base
= tx_queue
->tx_bd_base
;
2354 struct sk_buff
*skb
;
2356 int tx_ring_size
= tx_queue
->tx_ring_size
;
2357 int frags
= 0, nr_txbds
= 0;
2362 union skb_shared_tx
*shtx
;
2364 rx_queue
= priv
->rx_queue
[tx_queue
->qindex
];
2365 bdp
= tx_queue
->dirty_tx
;
2366 skb_dirtytx
= tx_queue
->skb_dirtytx
;
2368 while ((skb
= tx_queue
->tx_skbuff
[skb_dirtytx
])) {
2369 unsigned long flags
;
2371 frags
= skb_shinfo(skb
)->nr_frags
;
2374 * When time stamping, one additional TxBD must be freed.
2375 * Also, we need to dma_unmap_single() the TxPAL.
2378 if (unlikely(shtx
->in_progress
))
2379 nr_txbds
= frags
+ 2;
2381 nr_txbds
= frags
+ 1;
2383 lbdp
= skip_txbd(bdp
, nr_txbds
- 1, base
, tx_ring_size
);
2385 lstatus
= lbdp
->lstatus
;
2387 /* Only clean completed frames */
2388 if ((lstatus
& BD_LFLAG(TXBD_READY
)) &&
2389 (lstatus
& BD_LENGTH_MASK
))
2392 if (unlikely(shtx
->in_progress
)) {
2393 next
= next_txbd(bdp
, base
, tx_ring_size
);
2394 buflen
= next
->length
+ GMAC_FCB_LEN
;
2396 buflen
= bdp
->length
;
2398 dma_unmap_single(&priv
->ofdev
->dev
, bdp
->bufPtr
,
2399 buflen
, DMA_TO_DEVICE
);
2401 if (unlikely(shtx
->in_progress
)) {
2402 struct skb_shared_hwtstamps shhwtstamps
;
2403 u64
*ns
= (u64
*) (((u32
)skb
->data
+ 0x10) & ~0x7);
2404 memset(&shhwtstamps
, 0, sizeof(shhwtstamps
));
2405 shhwtstamps
.hwtstamp
= ns_to_ktime(*ns
);
2406 skb_tstamp_tx(skb
, &shhwtstamps
);
2407 bdp
->lstatus
&= BD_LFLAG(TXBD_WRAP
);
2411 bdp
->lstatus
&= BD_LFLAG(TXBD_WRAP
);
2412 bdp
= next_txbd(bdp
, base
, tx_ring_size
);
2414 for (i
= 0; i
< frags
; i
++) {
2415 dma_unmap_page(&priv
->ofdev
->dev
,
2419 bdp
->lstatus
&= BD_LFLAG(TXBD_WRAP
);
2420 bdp
= next_txbd(bdp
, base
, tx_ring_size
);
2424 * If there's room in the queue (limit it to rx_buffer_size)
2425 * we add this skb back into the pool, if it's the right size
2427 if (skb_queue_len(&priv
->rx_recycle
) < rx_queue
->rx_ring_size
&&
2428 skb_recycle_check(skb
, priv
->rx_buffer_size
+
2430 __skb_queue_head(&priv
->rx_recycle
, skb
);
2432 dev_kfree_skb_any(skb
);
2434 tx_queue
->tx_skbuff
[skb_dirtytx
] = NULL
;
2436 skb_dirtytx
= (skb_dirtytx
+ 1) &
2437 TX_RING_MOD_MASK(tx_ring_size
);
2440 spin_lock_irqsave(&tx_queue
->txlock
, flags
);
2441 tx_queue
->num_txbdfree
+= nr_txbds
;
2442 spin_unlock_irqrestore(&tx_queue
->txlock
, flags
);
2445 /* If we freed a buffer, we can restart transmission, if necessary */
2446 if (__netif_subqueue_stopped(dev
, tx_queue
->qindex
) && tx_queue
->num_txbdfree
)
2447 netif_wake_subqueue(dev
, tx_queue
->qindex
);
2449 /* Update dirty indicators */
2450 tx_queue
->skb_dirtytx
= skb_dirtytx
;
2451 tx_queue
->dirty_tx
= bdp
;
2456 static void gfar_schedule_cleanup(struct gfar_priv_grp
*gfargrp
)
2458 unsigned long flags
;
2460 spin_lock_irqsave(&gfargrp
->grplock
, flags
);
2461 if (napi_schedule_prep(&gfargrp
->napi
)) {
2462 gfar_write(&gfargrp
->regs
->imask
, IMASK_RTX_DISABLED
);
2463 __napi_schedule(&gfargrp
->napi
);
2466 * Clear IEVENT, so interrupts aren't called again
2467 * because of the packets that have already arrived.
2469 gfar_write(&gfargrp
->regs
->ievent
, IEVENT_RTX_MASK
);
2471 spin_unlock_irqrestore(&gfargrp
->grplock
, flags
);
2475 /* Interrupt Handler for Transmit complete */
2476 static irqreturn_t
gfar_transmit(int irq
, void *grp_id
)
2478 gfar_schedule_cleanup((struct gfar_priv_grp
*)grp_id
);
2482 static void gfar_new_rxbdp(struct gfar_priv_rx_q
*rx_queue
, struct rxbd8
*bdp
,
2483 struct sk_buff
*skb
)
2485 struct net_device
*dev
= rx_queue
->dev
;
2486 struct gfar_private
*priv
= netdev_priv(dev
);
2489 buf
= dma_map_single(&priv
->ofdev
->dev
, skb
->data
,
2490 priv
->rx_buffer_size
, DMA_FROM_DEVICE
);
2491 gfar_init_rxbdp(rx_queue
, bdp
, buf
);
2495 struct sk_buff
* gfar_new_skb(struct net_device
*dev
)
2497 unsigned int alignamount
;
2498 struct gfar_private
*priv
= netdev_priv(dev
);
2499 struct sk_buff
*skb
= NULL
;
2501 skb
= __skb_dequeue(&priv
->rx_recycle
);
2503 skb
= netdev_alloc_skb(dev
,
2504 priv
->rx_buffer_size
+ RXBUF_ALIGNMENT
);
2509 alignamount
= RXBUF_ALIGNMENT
-
2510 (((unsigned long) skb
->data
) & (RXBUF_ALIGNMENT
- 1));
2512 /* We need the data buffer to be aligned properly. We will reserve
2513 * as many bytes as needed to align the data properly
2515 skb_reserve(skb
, alignamount
);
2516 GFAR_CB(skb
)->alignamount
= alignamount
;
2521 static inline void count_errors(unsigned short status
, struct net_device
*dev
)
2523 struct gfar_private
*priv
= netdev_priv(dev
);
2524 struct net_device_stats
*stats
= &dev
->stats
;
2525 struct gfar_extra_stats
*estats
= &priv
->extra_stats
;
2527 /* If the packet was truncated, none of the other errors
2529 if (status
& RXBD_TRUNCATED
) {
2530 stats
->rx_length_errors
++;
2536 /* Count the errors, if there were any */
2537 if (status
& (RXBD_LARGE
| RXBD_SHORT
)) {
2538 stats
->rx_length_errors
++;
2540 if (status
& RXBD_LARGE
)
2545 if (status
& RXBD_NONOCTET
) {
2546 stats
->rx_frame_errors
++;
2547 estats
->rx_nonoctet
++;
2549 if (status
& RXBD_CRCERR
) {
2550 estats
->rx_crcerr
++;
2551 stats
->rx_crc_errors
++;
2553 if (status
& RXBD_OVERRUN
) {
2554 estats
->rx_overrun
++;
2555 stats
->rx_crc_errors
++;
2559 irqreturn_t
gfar_receive(int irq
, void *grp_id
)
2561 gfar_schedule_cleanup((struct gfar_priv_grp
*)grp_id
);
2565 static inline void gfar_rx_checksum(struct sk_buff
*skb
, struct rxfcb
*fcb
)
2567 /* If valid headers were found, and valid sums
2568 * were verified, then we tell the kernel that no
2569 * checksumming is necessary. Otherwise, it is */
2570 if ((fcb
->flags
& RXFCB_CSUM_MASK
) == (RXFCB_CIP
| RXFCB_CTU
))
2571 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2573 skb
->ip_summed
= CHECKSUM_NONE
;
2577 /* gfar_process_frame() -- handle one incoming packet if skb
2579 static int gfar_process_frame(struct net_device
*dev
, struct sk_buff
*skb
,
2582 struct gfar_private
*priv
= netdev_priv(dev
);
2583 struct rxfcb
*fcb
= NULL
;
2587 /* fcb is at the beginning if exists */
2588 fcb
= (struct rxfcb
*)skb
->data
;
2590 /* Remove the FCB from the skb */
2591 /* Remove the padded bytes, if there are any */
2593 skb_record_rx_queue(skb
, fcb
->rq
);
2594 skb_pull(skb
, amount_pull
);
2597 /* Get receive timestamp from the skb */
2598 if (priv
->hwts_rx_en
) {
2599 struct skb_shared_hwtstamps
*shhwtstamps
= skb_hwtstamps(skb
);
2600 u64
*ns
= (u64
*) skb
->data
;
2601 memset(shhwtstamps
, 0, sizeof(*shhwtstamps
));
2602 shhwtstamps
->hwtstamp
= ns_to_ktime(*ns
);
2606 skb_pull(skb
, priv
->padding
);
2608 if (priv
->rx_csum_enable
)
2609 gfar_rx_checksum(skb
, fcb
);
2611 /* Tell the skb what kind of packet this is */
2612 skb
->protocol
= eth_type_trans(skb
, dev
);
2614 /* Send the packet up the stack */
2615 if (unlikely(priv
->vlgrp
&& (fcb
->flags
& RXFCB_VLN
)))
2616 ret
= vlan_hwaccel_receive_skb(skb
, priv
->vlgrp
, fcb
->vlctl
);
2618 ret
= netif_receive_skb(skb
);
2620 if (NET_RX_DROP
== ret
)
2621 priv
->extra_stats
.kernel_dropped
++;
2626 /* gfar_clean_rx_ring() -- Processes each frame in the rx ring
2627 * until the budget/quota has been reached. Returns the number
2630 int gfar_clean_rx_ring(struct gfar_priv_rx_q
*rx_queue
, int rx_work_limit
)
2632 struct net_device
*dev
= rx_queue
->dev
;
2633 struct rxbd8
*bdp
, *base
;
2634 struct sk_buff
*skb
;
2638 struct gfar_private
*priv
= netdev_priv(dev
);
2640 /* Get the first full descriptor */
2641 bdp
= rx_queue
->cur_rx
;
2642 base
= rx_queue
->rx_bd_base
;
2644 amount_pull
= (gfar_uses_fcb(priv
) ? GMAC_FCB_LEN
: 0);
2646 while (!((bdp
->status
& RXBD_EMPTY
) || (--rx_work_limit
< 0))) {
2647 struct sk_buff
*newskb
;
2650 /* Add another skb for the future */
2651 newskb
= gfar_new_skb(dev
);
2653 skb
= rx_queue
->rx_skbuff
[rx_queue
->skb_currx
];
2655 dma_unmap_single(&priv
->ofdev
->dev
, bdp
->bufPtr
,
2656 priv
->rx_buffer_size
, DMA_FROM_DEVICE
);
2658 if (unlikely(!(bdp
->status
& RXBD_ERR
) &&
2659 bdp
->length
> priv
->rx_buffer_size
))
2660 bdp
->status
= RXBD_LARGE
;
2662 /* We drop the frame if we failed to allocate a new buffer */
2663 if (unlikely(!newskb
|| !(bdp
->status
& RXBD_LAST
) ||
2664 bdp
->status
& RXBD_ERR
)) {
2665 count_errors(bdp
->status
, dev
);
2667 if (unlikely(!newskb
))
2671 * We need to un-reserve() the skb to what it
2672 * was before gfar_new_skb() re-aligned
2673 * it to an RXBUF_ALIGNMENT boundary
2674 * before we put the skb back on the
2677 skb_reserve(skb
, -GFAR_CB(skb
)->alignamount
);
2678 __skb_queue_head(&priv
->rx_recycle
, skb
);
2681 /* Increment the number of packets */
2682 rx_queue
->stats
.rx_packets
++;
2686 pkt_len
= bdp
->length
- ETH_FCS_LEN
;
2687 /* Remove the FCS from the packet length */
2688 skb_put(skb
, pkt_len
);
2689 rx_queue
->stats
.rx_bytes
+= pkt_len
;
2690 skb_record_rx_queue(skb
, rx_queue
->qindex
);
2691 gfar_process_frame(dev
, skb
, amount_pull
);
2694 if (netif_msg_rx_err(priv
))
2696 "%s: Missing skb!\n", dev
->name
);
2697 rx_queue
->stats
.rx_dropped
++;
2698 priv
->extra_stats
.rx_skbmissing
++;
2703 rx_queue
->rx_skbuff
[rx_queue
->skb_currx
] = newskb
;
2705 /* Setup the new bdp */
2706 gfar_new_rxbdp(rx_queue
, bdp
, newskb
);
2708 /* Update to the next pointer */
2709 bdp
= next_bd(bdp
, base
, rx_queue
->rx_ring_size
);
2711 /* update to point at the next skb */
2712 rx_queue
->skb_currx
=
2713 (rx_queue
->skb_currx
+ 1) &
2714 RX_RING_MOD_MASK(rx_queue
->rx_ring_size
);
2717 /* Update the current rxbd pointer to be the next one */
2718 rx_queue
->cur_rx
= bdp
;
2723 static int gfar_poll(struct napi_struct
*napi
, int budget
)
2725 struct gfar_priv_grp
*gfargrp
= container_of(napi
,
2726 struct gfar_priv_grp
, napi
);
2727 struct gfar_private
*priv
= gfargrp
->priv
;
2728 struct gfar __iomem
*regs
= gfargrp
->regs
;
2729 struct gfar_priv_tx_q
*tx_queue
= NULL
;
2730 struct gfar_priv_rx_q
*rx_queue
= NULL
;
2731 int rx_cleaned
= 0, budget_per_queue
= 0, rx_cleaned_per_queue
= 0;
2732 int tx_cleaned
= 0, i
, left_over_budget
= budget
;
2733 unsigned long serviced_queues
= 0;
2736 num_queues
= gfargrp
->num_rx_queues
;
2737 budget_per_queue
= budget
/num_queues
;
2739 /* Clear IEVENT, so interrupts aren't called again
2740 * because of the packets that have already arrived */
2741 gfar_write(®s
->ievent
, IEVENT_RTX_MASK
);
2743 while (num_queues
&& left_over_budget
) {
2745 budget_per_queue
= left_over_budget
/num_queues
;
2746 left_over_budget
= 0;
2748 for_each_set_bit(i
, &gfargrp
->rx_bit_map
, priv
->num_rx_queues
) {
2749 if (test_bit(i
, &serviced_queues
))
2751 rx_queue
= priv
->rx_queue
[i
];
2752 tx_queue
= priv
->tx_queue
[rx_queue
->qindex
];
2754 tx_cleaned
+= gfar_clean_tx_ring(tx_queue
);
2755 rx_cleaned_per_queue
= gfar_clean_rx_ring(rx_queue
,
2757 rx_cleaned
+= rx_cleaned_per_queue
;
2758 if(rx_cleaned_per_queue
< budget_per_queue
) {
2759 left_over_budget
= left_over_budget
+
2760 (budget_per_queue
- rx_cleaned_per_queue
);
2761 set_bit(i
, &serviced_queues
);
2770 if (rx_cleaned
< budget
) {
2771 napi_complete(napi
);
2773 /* Clear the halt bit in RSTAT */
2774 gfar_write(®s
->rstat
, gfargrp
->rstat
);
2776 gfar_write(®s
->imask
, IMASK_DEFAULT
);
2778 /* If we are coalescing interrupts, update the timer */
2779 /* Otherwise, clear it */
2780 gfar_configure_coalescing(priv
,
2781 gfargrp
->rx_bit_map
, gfargrp
->tx_bit_map
);
2787 #ifdef CONFIG_NET_POLL_CONTROLLER
2789 * Polling 'interrupt' - used by things like netconsole to send skbs
2790 * without having to re-enable interrupts. It's not called while
2791 * the interrupt routine is executing.
2793 static void gfar_netpoll(struct net_device
*dev
)
2795 struct gfar_private
*priv
= netdev_priv(dev
);
2798 /* If the device has multiple interrupts, run tx/rx */
2799 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_MULTI_INTR
) {
2800 for (i
= 0; i
< priv
->num_grps
; i
++) {
2801 disable_irq(priv
->gfargrp
[i
].interruptTransmit
);
2802 disable_irq(priv
->gfargrp
[i
].interruptReceive
);
2803 disable_irq(priv
->gfargrp
[i
].interruptError
);
2804 gfar_interrupt(priv
->gfargrp
[i
].interruptTransmit
,
2806 enable_irq(priv
->gfargrp
[i
].interruptError
);
2807 enable_irq(priv
->gfargrp
[i
].interruptReceive
);
2808 enable_irq(priv
->gfargrp
[i
].interruptTransmit
);
2811 for (i
= 0; i
< priv
->num_grps
; i
++) {
2812 disable_irq(priv
->gfargrp
[i
].interruptTransmit
);
2813 gfar_interrupt(priv
->gfargrp
[i
].interruptTransmit
,
2815 enable_irq(priv
->gfargrp
[i
].interruptTransmit
);
2821 /* The interrupt handler for devices with one interrupt */
2822 static irqreturn_t
gfar_interrupt(int irq
, void *grp_id
)
2824 struct gfar_priv_grp
*gfargrp
= grp_id
;
2826 /* Save ievent for future reference */
2827 u32 events
= gfar_read(&gfargrp
->regs
->ievent
);
2829 /* Check for reception */
2830 if (events
& IEVENT_RX_MASK
)
2831 gfar_receive(irq
, grp_id
);
2833 /* Check for transmit completion */
2834 if (events
& IEVENT_TX_MASK
)
2835 gfar_transmit(irq
, grp_id
);
2837 /* Check for errors */
2838 if (events
& IEVENT_ERR_MASK
)
2839 gfar_error(irq
, grp_id
);
2844 /* Called every time the controller might need to be made
2845 * aware of new link state. The PHY code conveys this
2846 * information through variables in the phydev structure, and this
2847 * function converts those variables into the appropriate
2848 * register values, and can bring down the device if needed.
2850 static void adjust_link(struct net_device
*dev
)
2852 struct gfar_private
*priv
= netdev_priv(dev
);
2853 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
2854 unsigned long flags
;
2855 struct phy_device
*phydev
= priv
->phydev
;
2858 local_irq_save(flags
);
2862 u32 tempval
= gfar_read(®s
->maccfg2
);
2863 u32 ecntrl
= gfar_read(®s
->ecntrl
);
2865 /* Now we make sure that we can be in full duplex mode.
2866 * If not, we operate in half-duplex mode. */
2867 if (phydev
->duplex
!= priv
->oldduplex
) {
2869 if (!(phydev
->duplex
))
2870 tempval
&= ~(MACCFG2_FULL_DUPLEX
);
2872 tempval
|= MACCFG2_FULL_DUPLEX
;
2874 priv
->oldduplex
= phydev
->duplex
;
2877 if (phydev
->speed
!= priv
->oldspeed
) {
2879 switch (phydev
->speed
) {
2882 ((tempval
& ~(MACCFG2_IF
)) | MACCFG2_GMII
);
2884 ecntrl
&= ~(ECNTRL_R100
);
2889 ((tempval
& ~(MACCFG2_IF
)) | MACCFG2_MII
);
2891 /* Reduced mode distinguishes
2892 * between 10 and 100 */
2893 if (phydev
->speed
== SPEED_100
)
2894 ecntrl
|= ECNTRL_R100
;
2896 ecntrl
&= ~(ECNTRL_R100
);
2899 if (netif_msg_link(priv
))
2901 "%s: Ack! Speed (%d) is not 10/100/1000!\n",
2902 dev
->name
, phydev
->speed
);
2906 priv
->oldspeed
= phydev
->speed
;
2909 gfar_write(®s
->maccfg2
, tempval
);
2910 gfar_write(®s
->ecntrl
, ecntrl
);
2912 if (!priv
->oldlink
) {
2916 } else if (priv
->oldlink
) {
2920 priv
->oldduplex
= -1;
2923 if (new_state
&& netif_msg_link(priv
))
2924 phy_print_status(phydev
);
2926 local_irq_restore(flags
);
2929 /* Update the hash table based on the current list of multicast
2930 * addresses we subscribe to. Also, change the promiscuity of
2931 * the device based on the flags (this function is called
2932 * whenever dev->flags is changed */
2933 static void gfar_set_multi(struct net_device
*dev
)
2935 struct netdev_hw_addr
*ha
;
2936 struct gfar_private
*priv
= netdev_priv(dev
);
2937 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
2940 if (dev
->flags
& IFF_PROMISC
) {
2941 /* Set RCTRL to PROM */
2942 tempval
= gfar_read(®s
->rctrl
);
2943 tempval
|= RCTRL_PROM
;
2944 gfar_write(®s
->rctrl
, tempval
);
2946 /* Set RCTRL to not PROM */
2947 tempval
= gfar_read(®s
->rctrl
);
2948 tempval
&= ~(RCTRL_PROM
);
2949 gfar_write(®s
->rctrl
, tempval
);
2952 if (dev
->flags
& IFF_ALLMULTI
) {
2953 /* Set the hash to rx all multicast frames */
2954 gfar_write(®s
->igaddr0
, 0xffffffff);
2955 gfar_write(®s
->igaddr1
, 0xffffffff);
2956 gfar_write(®s
->igaddr2
, 0xffffffff);
2957 gfar_write(®s
->igaddr3
, 0xffffffff);
2958 gfar_write(®s
->igaddr4
, 0xffffffff);
2959 gfar_write(®s
->igaddr5
, 0xffffffff);
2960 gfar_write(®s
->igaddr6
, 0xffffffff);
2961 gfar_write(®s
->igaddr7
, 0xffffffff);
2962 gfar_write(®s
->gaddr0
, 0xffffffff);
2963 gfar_write(®s
->gaddr1
, 0xffffffff);
2964 gfar_write(®s
->gaddr2
, 0xffffffff);
2965 gfar_write(®s
->gaddr3
, 0xffffffff);
2966 gfar_write(®s
->gaddr4
, 0xffffffff);
2967 gfar_write(®s
->gaddr5
, 0xffffffff);
2968 gfar_write(®s
->gaddr6
, 0xffffffff);
2969 gfar_write(®s
->gaddr7
, 0xffffffff);
2974 /* zero out the hash */
2975 gfar_write(®s
->igaddr0
, 0x0);
2976 gfar_write(®s
->igaddr1
, 0x0);
2977 gfar_write(®s
->igaddr2
, 0x0);
2978 gfar_write(®s
->igaddr3
, 0x0);
2979 gfar_write(®s
->igaddr4
, 0x0);
2980 gfar_write(®s
->igaddr5
, 0x0);
2981 gfar_write(®s
->igaddr6
, 0x0);
2982 gfar_write(®s
->igaddr7
, 0x0);
2983 gfar_write(®s
->gaddr0
, 0x0);
2984 gfar_write(®s
->gaddr1
, 0x0);
2985 gfar_write(®s
->gaddr2
, 0x0);
2986 gfar_write(®s
->gaddr3
, 0x0);
2987 gfar_write(®s
->gaddr4
, 0x0);
2988 gfar_write(®s
->gaddr5
, 0x0);
2989 gfar_write(®s
->gaddr6
, 0x0);
2990 gfar_write(®s
->gaddr7
, 0x0);
2992 /* If we have extended hash tables, we need to
2993 * clear the exact match registers to prepare for
2995 if (priv
->extended_hash
) {
2996 em_num
= GFAR_EM_NUM
+ 1;
2997 gfar_clear_exact_match(dev
);
3004 if (netdev_mc_empty(dev
))
3007 /* Parse the list, and set the appropriate bits */
3008 netdev_for_each_mc_addr(ha
, dev
) {
3010 gfar_set_mac_for_addr(dev
, idx
, ha
->addr
);
3013 gfar_set_hash_for_addr(dev
, ha
->addr
);
3019 /* Clears each of the exact match registers to zero, so they
3020 * don't interfere with normal reception */
3021 static void gfar_clear_exact_match(struct net_device
*dev
)
3024 u8 zero_arr
[MAC_ADDR_LEN
] = {0,0,0,0,0,0};
3026 for(idx
= 1;idx
< GFAR_EM_NUM
+ 1;idx
++)
3027 gfar_set_mac_for_addr(dev
, idx
, (u8
*)zero_arr
);
3030 /* Set the appropriate hash bit for the given addr */
3031 /* The algorithm works like so:
3032 * 1) Take the Destination Address (ie the multicast address), and
3033 * do a CRC on it (little endian), and reverse the bits of the
3035 * 2) Use the 8 most significant bits as a hash into a 256-entry
3036 * table. The table is controlled through 8 32-bit registers:
3037 * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is
3038 * gaddr7. This means that the 3 most significant bits in the
3039 * hash index which gaddr register to use, and the 5 other bits
3040 * indicate which bit (assuming an IBM numbering scheme, which
3041 * for PowerPC (tm) is usually the case) in the register holds
3043 static void gfar_set_hash_for_addr(struct net_device
*dev
, u8
*addr
)
3046 struct gfar_private
*priv
= netdev_priv(dev
);
3047 u32 result
= ether_crc(MAC_ADDR_LEN
, addr
);
3048 int width
= priv
->hash_width
;
3049 u8 whichbit
= (result
>> (32 - width
)) & 0x1f;
3050 u8 whichreg
= result
>> (32 - width
+ 5);
3051 u32 value
= (1 << (31-whichbit
));
3053 tempval
= gfar_read(priv
->hash_regs
[whichreg
]);
3055 gfar_write(priv
->hash_regs
[whichreg
], tempval
);
3059 /* There are multiple MAC Address register pairs on some controllers
3060 * This function sets the numth pair to a given address
3062 static void gfar_set_mac_for_addr(struct net_device
*dev
, int num
, u8
*addr
)
3064 struct gfar_private
*priv
= netdev_priv(dev
);
3065 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
3067 char tmpbuf
[MAC_ADDR_LEN
];
3069 u32 __iomem
*macptr
= ®s
->macstnaddr1
;
3073 /* Now copy it into the mac registers backwards, cuz */
3074 /* little endian is silly */
3075 for (idx
= 0; idx
< MAC_ADDR_LEN
; idx
++)
3076 tmpbuf
[MAC_ADDR_LEN
- 1 - idx
] = addr
[idx
];
3078 gfar_write(macptr
, *((u32
*) (tmpbuf
)));
3080 tempval
= *((u32
*) (tmpbuf
+ 4));
3082 gfar_write(macptr
+1, tempval
);
3085 /* GFAR error interrupt handler */
3086 static irqreturn_t
gfar_error(int irq
, void *grp_id
)
3088 struct gfar_priv_grp
*gfargrp
= grp_id
;
3089 struct gfar __iomem
*regs
= gfargrp
->regs
;
3090 struct gfar_private
*priv
= gfargrp
->priv
;
3091 struct net_device
*dev
= priv
->ndev
;
3093 /* Save ievent for future reference */
3094 u32 events
= gfar_read(®s
->ievent
);
3097 gfar_write(®s
->ievent
, events
& IEVENT_ERR_MASK
);
3099 /* Magic Packet is not an error. */
3100 if ((priv
->device_flags
& FSL_GIANFAR_DEV_HAS_MAGIC_PACKET
) &&
3101 (events
& IEVENT_MAG
))
3102 events
&= ~IEVENT_MAG
;
3105 if (netif_msg_rx_err(priv
) || netif_msg_tx_err(priv
))
3106 printk(KERN_DEBUG
"%s: error interrupt (ievent=0x%08x imask=0x%08x)\n",
3107 dev
->name
, events
, gfar_read(®s
->imask
));
3109 /* Update the error counters */
3110 if (events
& IEVENT_TXE
) {
3111 dev
->stats
.tx_errors
++;
3113 if (events
& IEVENT_LC
)
3114 dev
->stats
.tx_window_errors
++;
3115 if (events
& IEVENT_CRL
)
3116 dev
->stats
.tx_aborted_errors
++;
3117 if (events
& IEVENT_XFUN
) {
3118 unsigned long flags
;
3120 if (netif_msg_tx_err(priv
))
3121 printk(KERN_DEBUG
"%s: TX FIFO underrun, "
3122 "packet dropped.\n", dev
->name
);
3123 dev
->stats
.tx_dropped
++;
3124 priv
->extra_stats
.tx_underrun
++;
3126 local_irq_save(flags
);
3129 /* Reactivate the Tx Queues */
3130 gfar_write(®s
->tstat
, gfargrp
->tstat
);
3133 local_irq_restore(flags
);
3135 if (netif_msg_tx_err(priv
))
3136 printk(KERN_DEBUG
"%s: Transmit Error\n", dev
->name
);
3138 if (events
& IEVENT_BSY
) {
3139 dev
->stats
.rx_errors
++;
3140 priv
->extra_stats
.rx_bsy
++;
3142 gfar_receive(irq
, grp_id
);
3144 if (netif_msg_rx_err(priv
))
3145 printk(KERN_DEBUG
"%s: busy error (rstat: %x)\n",
3146 dev
->name
, gfar_read(®s
->rstat
));
3148 if (events
& IEVENT_BABR
) {
3149 dev
->stats
.rx_errors
++;
3150 priv
->extra_stats
.rx_babr
++;
3152 if (netif_msg_rx_err(priv
))
3153 printk(KERN_DEBUG
"%s: babbling RX error\n", dev
->name
);
3155 if (events
& IEVENT_EBERR
) {
3156 priv
->extra_stats
.eberr
++;
3157 if (netif_msg_rx_err(priv
))
3158 printk(KERN_DEBUG
"%s: bus error\n", dev
->name
);
3160 if ((events
& IEVENT_RXC
) && netif_msg_rx_status(priv
))
3161 printk(KERN_DEBUG
"%s: control frame\n", dev
->name
);
3163 if (events
& IEVENT_BABT
) {
3164 priv
->extra_stats
.tx_babt
++;
3165 if (netif_msg_tx_err(priv
))
3166 printk(KERN_DEBUG
"%s: babbling TX error\n", dev
->name
);
3171 static struct of_device_id gfar_match
[] =
3175 .compatible
= "gianfar",
3178 .compatible
= "fsl,etsec2",
3182 MODULE_DEVICE_TABLE(of
, gfar_match
);
3184 /* Structure for a device driver */
3185 static struct of_platform_driver gfar_driver
= {
3187 .name
= "fsl-gianfar",
3188 .owner
= THIS_MODULE
,
3190 .of_match_table
= gfar_match
,
3192 .probe
= gfar_probe
,
3193 .remove
= gfar_remove
,
3196 static int __init
gfar_init(void)
3198 return of_register_platform_driver(&gfar_driver
);
3201 static void __exit
gfar_exit(void)
3203 of_unregister_platform_driver(&gfar_driver
);
3206 module_init(gfar_init
);
3207 module_exit(gfar_exit
);