NEWEMAC: fix support for pause packets
[linux-2.6/mini2440.git] / drivers / net / ibm_newemac / core.c
blob0789802d59ed3314dd304feb8bfa6ba637790b9b
1 /*
2 * drivers/net/ibm_newemac/core.c
4 * Driver for PowerPC 4xx on-chip ethernet controller.
6 * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
7 * <benh@kernel.crashing.org>
9 * Based on the arch/ppc version of the driver:
11 * Copyright (c) 2004, 2005 Zultys Technologies.
12 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
14 * Based on original work by
15 * Matt Porter <mporter@kernel.crashing.org>
16 * (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
17 * Armin Kuster <akuster@mvista.com>
18 * Johnnie Peters <jpeters@mvista.com>
20 * This program is free software; you can redistribute it and/or modify it
21 * under the terms of the GNU General Public License as published by the
22 * Free Software Foundation; either version 2 of the License, or (at your
23 * option) any later version.
27 #include <linux/sched.h>
28 #include <linux/string.h>
29 #include <linux/errno.h>
30 #include <linux/delay.h>
31 #include <linux/types.h>
32 #include <linux/pci.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/crc32.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #include <linux/bitops.h>
39 #include <linux/workqueue.h>
40 #include <linux/of.h>
42 #include <asm/processor.h>
43 #include <asm/io.h>
44 #include <asm/dma.h>
45 #include <asm/uaccess.h>
47 #include "core.h"
50 * Lack of dma_unmap_???? calls is intentional.
52 * API-correct usage requires additional support state information to be
53 * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
54 * EMAC design (e.g. TX buffer passed from network stack can be split into
55 * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
56 * maintaining such information will add additional overhead.
57 * Current DMA API implementation for 4xx processors only ensures cache coherency
58 * and dma_unmap_???? routines are empty and are likely to stay this way.
59 * I decided to omit dma_unmap_??? calls because I don't want to add additional
60 * complexity just for the sake of following some abstract API, when it doesn't
61 * add any real benefit to the driver. I understand that this decision maybe
62 * controversial, but I really tried to make code API-correct and efficient
63 * at the same time and didn't come up with code I liked :(. --ebs
66 #define DRV_NAME "emac"
67 #define DRV_VERSION "3.54"
68 #define DRV_DESC "PPC 4xx OCP EMAC driver"
70 MODULE_DESCRIPTION(DRV_DESC);
71 MODULE_AUTHOR
72 ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
73 MODULE_LICENSE("GPL");
76 * PPC64 doesn't (yet) have a cacheable_memcpy
78 #ifdef CONFIG_PPC64
79 #define cacheable_memcpy(d,s,n) memcpy((d),(s),(n))
80 #endif
82 /* minimum number of free TX descriptors required to wake up TX process */
83 #define EMAC_TX_WAKEUP_THRESH (NUM_TX_BUFF / 4)
85 /* If packet size is less than this number, we allocate small skb and copy packet
86 * contents into it instead of just sending original big skb up
88 #define EMAC_RX_COPY_THRESH CONFIG_IBM_NEW_EMAC_RX_COPY_THRESHOLD
90 /* Since multiple EMACs share MDIO lines in various ways, we need
91 * to avoid re-using the same PHY ID in cases where the arch didn't
92 * setup precise phy_map entries
94 * XXX This is something that needs to be reworked as we can have multiple
95 * EMAC "sets" (multiple ASICs containing several EMACs) though we can
96 * probably require in that case to have explicit PHY IDs in the device-tree
98 static u32 busy_phy_map;
99 static DEFINE_MUTEX(emac_phy_map_lock);
101 /* This is the wait queue used to wait on any event related to probe, that
102 * is discovery of MALs, other EMACs, ZMII/RGMIIs, etc...
104 static DECLARE_WAIT_QUEUE_HEAD(emac_probe_wait);
106 /* Having stable interface names is a doomed idea. However, it would be nice
107 * if we didn't have completely random interface names at boot too :-) It's
108 * just a matter of making everybody's life easier. Since we are doing
109 * threaded probing, it's a bit harder though. The base idea here is that
110 * we make up a list of all emacs in the device-tree before we register the
111 * driver. Every emac will then wait for the previous one in the list to
112 * initialize before itself. We should also keep that list ordered by
113 * cell_index.
114 * That list is only 4 entries long, meaning that additional EMACs don't
115 * get ordering guarantees unless EMAC_BOOT_LIST_SIZE is increased.
118 #define EMAC_BOOT_LIST_SIZE 4
119 static struct device_node *emac_boot_list[EMAC_BOOT_LIST_SIZE];
121 /* How long should I wait for dependent devices ? */
122 #define EMAC_PROBE_DEP_TIMEOUT (HZ * 5)
124 /* I don't want to litter system log with timeout errors
125 * when we have brain-damaged PHY.
127 static inline void emac_report_timeout_error(struct emac_instance *dev,
128 const char *error)
130 if (net_ratelimit())
131 printk(KERN_ERR "%s: %s\n", dev->ndev->name, error);
134 /* PHY polling intervals */
135 #define PHY_POLL_LINK_ON HZ
136 #define PHY_POLL_LINK_OFF (HZ / 5)
138 /* Graceful stop timeouts in us.
139 * We should allow up to 1 frame time (full-duplex, ignoring collisions)
141 #define STOP_TIMEOUT_10 1230
142 #define STOP_TIMEOUT_100 124
143 #define STOP_TIMEOUT_1000 13
144 #define STOP_TIMEOUT_1000_JUMBO 73
146 static unsigned char default_mcast_addr[] = {
147 0x01, 0x80, 0xC2, 0x00, 0x00, 0x01
150 /* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
151 static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
152 "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
153 "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
154 "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
155 "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
156 "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
157 "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
158 "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
159 "rx_bad_packet", "rx_runt_packet", "rx_short_event",
160 "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
161 "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
162 "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
163 "tx_bd_excessive_collisions", "tx_bd_late_collision",
164 "tx_bd_multple_collisions", "tx_bd_single_collision",
165 "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
166 "tx_errors"
169 static irqreturn_t emac_irq(int irq, void *dev_instance);
170 static void emac_clean_tx_ring(struct emac_instance *dev);
171 static void __emac_set_multicast_list(struct emac_instance *dev);
173 static inline int emac_phy_supports_gige(int phy_mode)
175 return phy_mode == PHY_MODE_GMII ||
176 phy_mode == PHY_MODE_RGMII ||
177 phy_mode == PHY_MODE_TBI ||
178 phy_mode == PHY_MODE_RTBI;
181 static inline int emac_phy_gpcs(int phy_mode)
183 return phy_mode == PHY_MODE_TBI ||
184 phy_mode == PHY_MODE_RTBI;
187 static inline void emac_tx_enable(struct emac_instance *dev)
189 struct emac_regs __iomem *p = dev->emacp;
190 u32 r;
192 DBG(dev, "tx_enable" NL);
194 r = in_be32(&p->mr0);
195 if (!(r & EMAC_MR0_TXE))
196 out_be32(&p->mr0, r | EMAC_MR0_TXE);
199 static void emac_tx_disable(struct emac_instance *dev)
201 struct emac_regs __iomem *p = dev->emacp;
202 u32 r;
204 DBG(dev, "tx_disable" NL);
206 r = in_be32(&p->mr0);
207 if (r & EMAC_MR0_TXE) {
208 int n = dev->stop_timeout;
209 out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
210 while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) {
211 udelay(1);
212 --n;
214 if (unlikely(!n))
215 emac_report_timeout_error(dev, "TX disable timeout");
219 static void emac_rx_enable(struct emac_instance *dev)
221 struct emac_regs __iomem *p = dev->emacp;
222 u32 r;
224 if (unlikely(test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags)))
225 goto out;
227 DBG(dev, "rx_enable" NL);
229 r = in_be32(&p->mr0);
230 if (!(r & EMAC_MR0_RXE)) {
231 if (unlikely(!(r & EMAC_MR0_RXI))) {
232 /* Wait if previous async disable is still in progress */
233 int n = dev->stop_timeout;
234 while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
235 udelay(1);
236 --n;
238 if (unlikely(!n))
239 emac_report_timeout_error(dev,
240 "RX disable timeout");
242 out_be32(&p->mr0, r | EMAC_MR0_RXE);
244 out:
248 static void emac_rx_disable(struct emac_instance *dev)
250 struct emac_regs __iomem *p = dev->emacp;
251 u32 r;
253 DBG(dev, "rx_disable" NL);
255 r = in_be32(&p->mr0);
256 if (r & EMAC_MR0_RXE) {
257 int n = dev->stop_timeout;
258 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
259 while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
260 udelay(1);
261 --n;
263 if (unlikely(!n))
264 emac_report_timeout_error(dev, "RX disable timeout");
268 static inline void emac_netif_stop(struct emac_instance *dev)
270 netif_tx_lock_bh(dev->ndev);
271 dev->no_mcast = 1;
272 netif_tx_unlock_bh(dev->ndev);
273 dev->ndev->trans_start = jiffies; /* prevent tx timeout */
274 mal_poll_disable(dev->mal, &dev->commac);
275 netif_tx_disable(dev->ndev);
278 static inline void emac_netif_start(struct emac_instance *dev)
280 netif_tx_lock_bh(dev->ndev);
281 dev->no_mcast = 0;
282 if (dev->mcast_pending && netif_running(dev->ndev))
283 __emac_set_multicast_list(dev);
284 netif_tx_unlock_bh(dev->ndev);
286 netif_wake_queue(dev->ndev);
288 /* NOTE: unconditional netif_wake_queue is only appropriate
289 * so long as all callers are assured to have free tx slots
290 * (taken from tg3... though the case where that is wrong is
291 * not terribly harmful)
293 mal_poll_enable(dev->mal, &dev->commac);
296 static inline void emac_rx_disable_async(struct emac_instance *dev)
298 struct emac_regs __iomem *p = dev->emacp;
299 u32 r;
301 DBG(dev, "rx_disable_async" NL);
303 r = in_be32(&p->mr0);
304 if (r & EMAC_MR0_RXE)
305 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
308 static int emac_reset(struct emac_instance *dev)
310 struct emac_regs __iomem *p = dev->emacp;
311 int n = 20;
313 DBG(dev, "reset" NL);
315 if (!dev->reset_failed) {
316 /* 40x erratum suggests stopping RX channel before reset,
317 * we stop TX as well
319 emac_rx_disable(dev);
320 emac_tx_disable(dev);
323 out_be32(&p->mr0, EMAC_MR0_SRST);
324 while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
325 --n;
327 if (n) {
328 dev->reset_failed = 0;
329 return 0;
330 } else {
331 emac_report_timeout_error(dev, "reset timeout");
332 dev->reset_failed = 1;
333 return -ETIMEDOUT;
337 static void emac_hash_mc(struct emac_instance *dev)
339 struct emac_regs __iomem *p = dev->emacp;
340 u16 gaht[4] = { 0 };
341 struct dev_mc_list *dmi;
343 DBG(dev, "hash_mc %d" NL, dev->ndev->mc_count);
345 for (dmi = dev->ndev->mc_list; dmi; dmi = dmi->next) {
346 int bit;
347 DBG2(dev, "mc %02x:%02x:%02x:%02x:%02x:%02x" NL,
348 dmi->dmi_addr[0], dmi->dmi_addr[1], dmi->dmi_addr[2],
349 dmi->dmi_addr[3], dmi->dmi_addr[4], dmi->dmi_addr[5]);
351 bit = 63 - (ether_crc(ETH_ALEN, dmi->dmi_addr) >> 26);
352 gaht[bit >> 4] |= 0x8000 >> (bit & 0x0f);
354 out_be32(&p->gaht1, gaht[0]);
355 out_be32(&p->gaht2, gaht[1]);
356 out_be32(&p->gaht3, gaht[2]);
357 out_be32(&p->gaht4, gaht[3]);
360 static inline u32 emac_iff2rmr(struct net_device *ndev)
362 struct emac_instance *dev = netdev_priv(ndev);
363 u32 r;
365 r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE;
367 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
368 r |= EMAC4_RMR_BASE;
369 else
370 r |= EMAC_RMR_BASE;
372 if (ndev->flags & IFF_PROMISC)
373 r |= EMAC_RMR_PME;
374 else if (ndev->flags & IFF_ALLMULTI || ndev->mc_count > 32)
375 r |= EMAC_RMR_PMME;
376 else if (ndev->mc_count > 0)
377 r |= EMAC_RMR_MAE;
379 return r;
382 static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
384 u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC_MR1_TR0_MULT;
386 DBG2(dev, "__emac_calc_base_mr1" NL);
388 switch(tx_size) {
389 case 2048:
390 ret |= EMAC_MR1_TFS_2K;
391 break;
392 default:
393 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
394 dev->ndev->name, tx_size);
397 switch(rx_size) {
398 case 16384:
399 ret |= EMAC_MR1_RFS_16K;
400 break;
401 case 4096:
402 ret |= EMAC_MR1_RFS_4K;
403 break;
404 default:
405 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
406 dev->ndev->name, rx_size);
409 return ret;
412 static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
414 u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC4_MR1_TR |
415 EMAC4_MR1_OBCI(dev->opb_bus_freq / 1000000);
417 DBG2(dev, "__emac4_calc_base_mr1" NL);
419 switch(tx_size) {
420 case 4096:
421 ret |= EMAC4_MR1_TFS_4K;
422 break;
423 case 2048:
424 ret |= EMAC4_MR1_TFS_2K;
425 break;
426 default:
427 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
428 dev->ndev->name, tx_size);
431 switch(rx_size) {
432 case 16384:
433 ret |= EMAC4_MR1_RFS_16K;
434 break;
435 case 4096:
436 ret |= EMAC4_MR1_RFS_4K;
437 break;
438 case 2048:
439 ret |= EMAC4_MR1_RFS_2K;
440 break;
441 default:
442 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
443 dev->ndev->name, rx_size);
446 return ret;
449 static u32 emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
451 return emac_has_feature(dev, EMAC_FTR_EMAC4) ?
452 __emac4_calc_base_mr1(dev, tx_size, rx_size) :
453 __emac_calc_base_mr1(dev, tx_size, rx_size);
456 static inline u32 emac_calc_trtr(struct emac_instance *dev, unsigned int size)
458 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
459 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT_EMAC4;
460 else
461 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT;
464 static inline u32 emac_calc_rwmr(struct emac_instance *dev,
465 unsigned int low, unsigned int high)
467 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
468 return (low << 22) | ( (high & 0x3ff) << 6);
469 else
470 return (low << 23) | ( (high & 0x1ff) << 7);
473 static int emac_configure(struct emac_instance *dev)
475 struct emac_regs __iomem *p = dev->emacp;
476 struct net_device *ndev = dev->ndev;
477 int tx_size, rx_size, link = netif_carrier_ok(dev->ndev);
478 u32 r, mr1 = 0;
480 DBG(dev, "configure" NL);
482 if (!link) {
483 out_be32(&p->mr1, in_be32(&p->mr1)
484 | EMAC_MR1_FDE | EMAC_MR1_ILE);
485 udelay(100);
486 } else if (emac_reset(dev) < 0)
487 return -ETIMEDOUT;
489 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
490 tah_reset(dev->tah_dev);
492 DBG(dev, " link = %d duplex = %d, pause = %d, asym_pause = %d\n",
493 link, dev->phy.duplex, dev->phy.pause, dev->phy.asym_pause);
495 /* Default fifo sizes */
496 tx_size = dev->tx_fifo_size;
497 rx_size = dev->rx_fifo_size;
499 /* No link, force loopback */
500 if (!link)
501 mr1 = EMAC_MR1_FDE | EMAC_MR1_ILE;
503 /* Check for full duplex */
504 else if (dev->phy.duplex == DUPLEX_FULL)
505 mr1 |= EMAC_MR1_FDE | EMAC_MR1_MWSW_001;
507 /* Adjust fifo sizes, mr1 and timeouts based on link speed */
508 dev->stop_timeout = STOP_TIMEOUT_10;
509 switch (dev->phy.speed) {
510 case SPEED_1000:
511 if (emac_phy_gpcs(dev->phy.mode)) {
512 mr1 |= EMAC_MR1_MF_1000GPCS |
513 EMAC_MR1_MF_IPPA(dev->phy.address);
515 /* Put some arbitrary OUI, Manuf & Rev IDs so we can
516 * identify this GPCS PHY later.
518 out_be32(&p->ipcr, 0xdeadbeef);
519 } else
520 mr1 |= EMAC_MR1_MF_1000;
522 /* Extended fifo sizes */
523 tx_size = dev->tx_fifo_size_gige;
524 rx_size = dev->rx_fifo_size_gige;
526 if (dev->ndev->mtu > ETH_DATA_LEN) {
527 mr1 |= EMAC_MR1_JPSM;
528 dev->stop_timeout = STOP_TIMEOUT_1000_JUMBO;
529 } else
530 dev->stop_timeout = STOP_TIMEOUT_1000;
531 break;
532 case SPEED_100:
533 mr1 |= EMAC_MR1_MF_100;
534 dev->stop_timeout = STOP_TIMEOUT_100;
535 break;
536 default: /* make gcc happy */
537 break;
540 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
541 rgmii_set_speed(dev->rgmii_dev, dev->rgmii_port,
542 dev->phy.speed);
543 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
544 zmii_set_speed(dev->zmii_dev, dev->zmii_port, dev->phy.speed);
546 /* on 40x erratum forces us to NOT use integrated flow control,
547 * let's hope it works on 44x ;)
549 if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x) &&
550 dev->phy.duplex == DUPLEX_FULL) {
551 if (dev->phy.pause)
552 mr1 |= EMAC_MR1_EIFC | EMAC_MR1_APP;
553 else if (dev->phy.asym_pause)
554 mr1 |= EMAC_MR1_APP;
557 /* Add base settings & fifo sizes & program MR1 */
558 mr1 |= emac_calc_base_mr1(dev, tx_size, rx_size);
559 out_be32(&p->mr1, mr1);
561 /* Set individual MAC address */
562 out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
563 out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
564 (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
565 ndev->dev_addr[5]);
567 /* VLAN Tag Protocol ID */
568 out_be32(&p->vtpid, 0x8100);
570 /* Receive mode register */
571 r = emac_iff2rmr(ndev);
572 if (r & EMAC_RMR_MAE)
573 emac_hash_mc(dev);
574 out_be32(&p->rmr, r);
576 /* FIFOs thresholds */
577 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
578 r = EMAC4_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
579 tx_size / 2 / dev->fifo_entry_size);
580 else
581 r = EMAC_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
582 tx_size / 2 / dev->fifo_entry_size);
583 out_be32(&p->tmr1, r);
584 out_be32(&p->trtr, emac_calc_trtr(dev, tx_size / 2));
586 /* PAUSE frame is sent when RX FIFO reaches its high-water mark,
587 there should be still enough space in FIFO to allow the our link
588 partner time to process this frame and also time to send PAUSE
589 frame itself.
591 Here is the worst case scenario for the RX FIFO "headroom"
592 (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
594 1) One maximum-length frame on TX 1522 bytes
595 2) One PAUSE frame time 64 bytes
596 3) PAUSE frame decode time allowance 64 bytes
597 4) One maximum-length frame on RX 1522 bytes
598 5) Round-trip propagation delay of the link (100Mb) 15 bytes
599 ----------
600 3187 bytes
602 I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
603 low-water mark to RX_FIFO_SIZE / 8 (512 bytes)
605 r = emac_calc_rwmr(dev, rx_size / 8 / dev->fifo_entry_size,
606 rx_size / 4 / dev->fifo_entry_size);
607 out_be32(&p->rwmr, r);
609 /* Set PAUSE timer to the maximum */
610 out_be32(&p->ptr, 0xffff);
612 /* IRQ sources */
613 r = EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE |
614 EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE |
615 EMAC_ISR_IRE | EMAC_ISR_TE;
616 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
617 r |= EMAC4_ISR_TXPE | EMAC4_ISR_RXPE /* | EMAC4_ISR_TXUE |
618 EMAC4_ISR_RXOE | */;
619 out_be32(&p->iser, r);
621 /* We need to take GPCS PHY out of isolate mode after EMAC reset */
622 if (emac_phy_gpcs(dev->phy.mode))
623 emac_mii_reset_phy(&dev->phy);
625 /* Required for Pause packet support in EMAC */
626 dev_mc_add(ndev, default_mcast_addr, sizeof(default_mcast_addr), 1);
628 return 0;
631 static void emac_reinitialize(struct emac_instance *dev)
633 DBG(dev, "reinitialize" NL);
635 emac_netif_stop(dev);
636 if (!emac_configure(dev)) {
637 emac_tx_enable(dev);
638 emac_rx_enable(dev);
640 emac_netif_start(dev);
643 static void emac_full_tx_reset(struct emac_instance *dev)
645 DBG(dev, "full_tx_reset" NL);
647 emac_tx_disable(dev);
648 mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
649 emac_clean_tx_ring(dev);
650 dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;
652 emac_configure(dev);
654 mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
655 emac_tx_enable(dev);
656 emac_rx_enable(dev);
659 static void emac_reset_work(struct work_struct *work)
661 struct emac_instance *dev = container_of(work, struct emac_instance, reset_work);
663 DBG(dev, "reset_work" NL);
665 mutex_lock(&dev->link_lock);
666 if (dev->opened) {
667 emac_netif_stop(dev);
668 emac_full_tx_reset(dev);
669 emac_netif_start(dev);
671 mutex_unlock(&dev->link_lock);
674 static void emac_tx_timeout(struct net_device *ndev)
676 struct emac_instance *dev = netdev_priv(ndev);
678 DBG(dev, "tx_timeout" NL);
680 schedule_work(&dev->reset_work);
684 static inline int emac_phy_done(struct emac_instance *dev, u32 stacr)
686 int done = !!(stacr & EMAC_STACR_OC);
688 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
689 done = !done;
691 return done;
694 static int __emac_mdio_read(struct emac_instance *dev, u8 id, u8 reg)
696 struct emac_regs __iomem *p = dev->emacp;
697 u32 r = 0;
698 int n, err = -ETIMEDOUT;
700 mutex_lock(&dev->mdio_lock);
702 DBG2(dev, "mdio_read(%02x,%02x)" NL, id, reg);
704 /* Enable proper MDIO port */
705 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
706 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
707 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
708 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
710 /* Wait for management interface to become idle */
711 n = 10;
712 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
713 udelay(1);
714 if (!--n) {
715 DBG2(dev, " -> timeout wait idle\n");
716 goto bail;
720 /* Issue read command */
721 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
722 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
723 else
724 r = EMAC_STACR_BASE(dev->opb_bus_freq);
725 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
726 r |= EMAC_STACR_OC;
727 if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
728 r |= EMACX_STACR_STAC_READ;
729 else
730 r |= EMAC_STACR_STAC_READ;
731 r |= (reg & EMAC_STACR_PRA_MASK)
732 | ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT);
733 out_be32(&p->stacr, r);
735 /* Wait for read to complete */
736 n = 100;
737 while (!emac_phy_done(dev, (r = in_be32(&p->stacr)))) {
738 udelay(1);
739 if (!--n) {
740 DBG2(dev, " -> timeout wait complete\n");
741 goto bail;
745 if (unlikely(r & EMAC_STACR_PHYE)) {
746 DBG(dev, "mdio_read(%02x, %02x) failed" NL, id, reg);
747 err = -EREMOTEIO;
748 goto bail;
751 r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);
753 DBG2(dev, "mdio_read -> %04x" NL, r);
754 err = 0;
755 bail:
756 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
757 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
758 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
759 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
760 mutex_unlock(&dev->mdio_lock);
762 return err == 0 ? r : err;
765 static void __emac_mdio_write(struct emac_instance *dev, u8 id, u8 reg,
766 u16 val)
768 struct emac_regs __iomem *p = dev->emacp;
769 u32 r = 0;
770 int n, err = -ETIMEDOUT;
772 mutex_lock(&dev->mdio_lock);
774 DBG2(dev, "mdio_write(%02x,%02x,%04x)" NL, id, reg, val);
776 /* Enable proper MDIO port */
777 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
778 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
779 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
780 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
782 /* Wait for management interface to be idle */
783 n = 10;
784 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
785 udelay(1);
786 if (!--n) {
787 DBG2(dev, " -> timeout wait idle\n");
788 goto bail;
792 /* Issue write command */
793 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
794 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
795 else
796 r = EMAC_STACR_BASE(dev->opb_bus_freq);
797 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
798 r |= EMAC_STACR_OC;
799 if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
800 r |= EMACX_STACR_STAC_WRITE;
801 else
802 r |= EMAC_STACR_STAC_WRITE;
803 r |= (reg & EMAC_STACR_PRA_MASK) |
804 ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
805 (val << EMAC_STACR_PHYD_SHIFT);
806 out_be32(&p->stacr, r);
808 /* Wait for write to complete */
809 n = 100;
810 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
811 udelay(1);
812 if (!--n) {
813 DBG2(dev, " -> timeout wait complete\n");
814 goto bail;
817 err = 0;
818 bail:
819 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
820 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
821 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
822 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
823 mutex_unlock(&dev->mdio_lock);
826 static int emac_mdio_read(struct net_device *ndev, int id, int reg)
828 struct emac_instance *dev = netdev_priv(ndev);
829 int res;
831 res = __emac_mdio_read(dev->mdio_instance ? dev->mdio_instance : dev,
832 (u8) id, (u8) reg);
833 return res;
836 static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
838 struct emac_instance *dev = netdev_priv(ndev);
840 __emac_mdio_write(dev->mdio_instance ? dev->mdio_instance : dev,
841 (u8) id, (u8) reg, (u16) val);
844 /* Tx lock BH */
845 static void __emac_set_multicast_list(struct emac_instance *dev)
847 struct emac_regs __iomem *p = dev->emacp;
848 u32 rmr = emac_iff2rmr(dev->ndev);
850 DBG(dev, "__multicast %08x" NL, rmr);
852 /* I decided to relax register access rules here to avoid
853 * full EMAC reset.
855 * There is a real problem with EMAC4 core if we use MWSW_001 bit
856 * in MR1 register and do a full EMAC reset.
857 * One TX BD status update is delayed and, after EMAC reset, it
858 * never happens, resulting in TX hung (it'll be recovered by TX
859 * timeout handler eventually, but this is just gross).
860 * So we either have to do full TX reset or try to cheat here :)
862 * The only required change is to RX mode register, so I *think* all
863 * we need is just to stop RX channel. This seems to work on all
864 * tested SoCs. --ebs
866 * If we need the full reset, we might just trigger the workqueue
867 * and do it async... a bit nasty but should work --BenH
869 dev->mcast_pending = 0;
870 emac_rx_disable(dev);
871 if (rmr & EMAC_RMR_MAE)
872 emac_hash_mc(dev);
873 out_be32(&p->rmr, rmr);
874 emac_rx_enable(dev);
877 /* Tx lock BH */
878 static void emac_set_multicast_list(struct net_device *ndev)
880 struct emac_instance *dev = netdev_priv(ndev);
882 DBG(dev, "multicast" NL);
884 BUG_ON(!netif_running(dev->ndev));
886 if (dev->no_mcast) {
887 dev->mcast_pending = 1;
888 return;
890 __emac_set_multicast_list(dev);
893 static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
895 int rx_sync_size = emac_rx_sync_size(new_mtu);
896 int rx_skb_size = emac_rx_skb_size(new_mtu);
897 int i, ret = 0;
899 mutex_lock(&dev->link_lock);
900 emac_netif_stop(dev);
901 emac_rx_disable(dev);
902 mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
904 if (dev->rx_sg_skb) {
905 ++dev->estats.rx_dropped_resize;
906 dev_kfree_skb(dev->rx_sg_skb);
907 dev->rx_sg_skb = NULL;
910 /* Make a first pass over RX ring and mark BDs ready, dropping
911 * non-processed packets on the way. We need this as a separate pass
912 * to simplify error recovery in the case of allocation failure later.
914 for (i = 0; i < NUM_RX_BUFF; ++i) {
915 if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
916 ++dev->estats.rx_dropped_resize;
918 dev->rx_desc[i].data_len = 0;
919 dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
920 (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
923 /* Reallocate RX ring only if bigger skb buffers are required */
924 if (rx_skb_size <= dev->rx_skb_size)
925 goto skip;
927 /* Second pass, allocate new skbs */
928 for (i = 0; i < NUM_RX_BUFF; ++i) {
929 struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
930 if (!skb) {
931 ret = -ENOMEM;
932 goto oom;
935 BUG_ON(!dev->rx_skb[i]);
936 dev_kfree_skb(dev->rx_skb[i]);
938 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
939 dev->rx_desc[i].data_ptr =
940 dma_map_single(&dev->ofdev->dev, skb->data - 2, rx_sync_size,
941 DMA_FROM_DEVICE) + 2;
942 dev->rx_skb[i] = skb;
944 skip:
945 /* Check if we need to change "Jumbo" bit in MR1 */
946 if ((new_mtu > ETH_DATA_LEN) ^ (dev->ndev->mtu > ETH_DATA_LEN)) {
947 /* This is to prevent starting RX channel in emac_rx_enable() */
948 set_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
950 dev->ndev->mtu = new_mtu;
951 emac_full_tx_reset(dev);
954 mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(new_mtu));
955 oom:
956 /* Restart RX */
957 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
958 dev->rx_slot = 0;
959 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
960 emac_rx_enable(dev);
961 emac_netif_start(dev);
962 mutex_unlock(&dev->link_lock);
964 return ret;
967 /* Process ctx, rtnl_lock semaphore */
968 static int emac_change_mtu(struct net_device *ndev, int new_mtu)
970 struct emac_instance *dev = netdev_priv(ndev);
971 int ret = 0;
973 if (new_mtu < EMAC_MIN_MTU || new_mtu > dev->max_mtu)
974 return -EINVAL;
976 DBG(dev, "change_mtu(%d)" NL, new_mtu);
978 if (netif_running(ndev)) {
979 /* Check if we really need to reinitalize RX ring */
980 if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
981 ret = emac_resize_rx_ring(dev, new_mtu);
984 if (!ret) {
985 ndev->mtu = new_mtu;
986 dev->rx_skb_size = emac_rx_skb_size(new_mtu);
987 dev->rx_sync_size = emac_rx_sync_size(new_mtu);
990 return ret;
993 static void emac_clean_tx_ring(struct emac_instance *dev)
995 int i;
997 for (i = 0; i < NUM_TX_BUFF; ++i) {
998 if (dev->tx_skb[i]) {
999 dev_kfree_skb(dev->tx_skb[i]);
1000 dev->tx_skb[i] = NULL;
1001 if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)
1002 ++dev->estats.tx_dropped;
1004 dev->tx_desc[i].ctrl = 0;
1005 dev->tx_desc[i].data_ptr = 0;
1009 static void emac_clean_rx_ring(struct emac_instance *dev)
1011 int i;
1013 for (i = 0; i < NUM_RX_BUFF; ++i)
1014 if (dev->rx_skb[i]) {
1015 dev->rx_desc[i].ctrl = 0;
1016 dev_kfree_skb(dev->rx_skb[i]);
1017 dev->rx_skb[i] = NULL;
1018 dev->rx_desc[i].data_ptr = 0;
1021 if (dev->rx_sg_skb) {
1022 dev_kfree_skb(dev->rx_sg_skb);
1023 dev->rx_sg_skb = NULL;
1027 static inline int emac_alloc_rx_skb(struct emac_instance *dev, int slot,
1028 gfp_t flags)
1030 struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);
1031 if (unlikely(!skb))
1032 return -ENOMEM;
1034 dev->rx_skb[slot] = skb;
1035 dev->rx_desc[slot].data_len = 0;
1037 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
1038 dev->rx_desc[slot].data_ptr =
1039 dma_map_single(&dev->ofdev->dev, skb->data - 2, dev->rx_sync_size,
1040 DMA_FROM_DEVICE) + 2;
1041 wmb();
1042 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1043 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1045 return 0;
1048 static void emac_print_link_status(struct emac_instance *dev)
1050 if (netif_carrier_ok(dev->ndev))
1051 printk(KERN_INFO "%s: link is up, %d %s%s\n",
1052 dev->ndev->name, dev->phy.speed,
1053 dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
1054 dev->phy.pause ? ", pause enabled" :
1055 dev->phy.asym_pause ? ", asymmetric pause enabled" : "");
1056 else
1057 printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
1060 /* Process ctx, rtnl_lock semaphore */
1061 static int emac_open(struct net_device *ndev)
1063 struct emac_instance *dev = netdev_priv(ndev);
1064 int err, i;
1066 DBG(dev, "open" NL);
1068 /* Setup error IRQ handler */
1069 err = request_irq(dev->emac_irq, emac_irq, 0, "EMAC", dev);
1070 if (err) {
1071 printk(KERN_ERR "%s: failed to request IRQ %d\n",
1072 ndev->name, dev->emac_irq);
1073 return err;
1076 /* Allocate RX ring */
1077 for (i = 0; i < NUM_RX_BUFF; ++i)
1078 if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {
1079 printk(KERN_ERR "%s: failed to allocate RX ring\n",
1080 ndev->name);
1081 goto oom;
1084 dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot = 0;
1085 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1086 dev->rx_sg_skb = NULL;
1088 mutex_lock(&dev->link_lock);
1089 dev->opened = 1;
1091 /* Start PHY polling now.
1093 if (dev->phy.address >= 0) {
1094 int link_poll_interval;
1095 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1096 dev->phy.def->ops->read_link(&dev->phy);
1097 netif_carrier_on(dev->ndev);
1098 link_poll_interval = PHY_POLL_LINK_ON;
1099 } else {
1100 netif_carrier_off(dev->ndev);
1101 link_poll_interval = PHY_POLL_LINK_OFF;
1103 dev->link_polling = 1;
1104 wmb();
1105 schedule_delayed_work(&dev->link_work, link_poll_interval);
1106 emac_print_link_status(dev);
1107 } else
1108 netif_carrier_on(dev->ndev);
1110 emac_configure(dev);
1111 mal_poll_add(dev->mal, &dev->commac);
1112 mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
1113 mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(ndev->mtu));
1114 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1115 emac_tx_enable(dev);
1116 emac_rx_enable(dev);
1117 emac_netif_start(dev);
1119 mutex_unlock(&dev->link_lock);
1121 return 0;
1122 oom:
1123 emac_clean_rx_ring(dev);
1124 free_irq(dev->emac_irq, dev);
1126 return -ENOMEM;
1129 /* BHs disabled */
1130 #if 0
1131 static int emac_link_differs(struct emac_instance *dev)
1133 u32 r = in_be32(&dev->emacp->mr1);
1135 int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;
1136 int speed, pause, asym_pause;
1138 if (r & EMAC_MR1_MF_1000)
1139 speed = SPEED_1000;
1140 else if (r & EMAC_MR1_MF_100)
1141 speed = SPEED_100;
1142 else
1143 speed = SPEED_10;
1145 switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) {
1146 case (EMAC_MR1_EIFC | EMAC_MR1_APP):
1147 pause = 1;
1148 asym_pause = 0;
1149 break;
1150 case EMAC_MR1_APP:
1151 pause = 0;
1152 asym_pause = 1;
1153 break;
1154 default:
1155 pause = asym_pause = 0;
1157 return speed != dev->phy.speed || duplex != dev->phy.duplex ||
1158 pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;
1160 #endif
1162 static void emac_link_timer(struct work_struct *work)
1164 struct emac_instance *dev =
1165 container_of((struct delayed_work *)work,
1166 struct emac_instance, link_work);
1167 int link_poll_interval;
1169 mutex_lock(&dev->link_lock);
1170 DBG2(dev, "link timer" NL);
1172 if (!dev->opened)
1173 goto bail;
1175 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1176 if (!netif_carrier_ok(dev->ndev)) {
1177 /* Get new link parameters */
1178 dev->phy.def->ops->read_link(&dev->phy);
1180 netif_carrier_on(dev->ndev);
1181 emac_netif_stop(dev);
1182 emac_full_tx_reset(dev);
1183 emac_netif_start(dev);
1184 emac_print_link_status(dev);
1186 link_poll_interval = PHY_POLL_LINK_ON;
1187 } else {
1188 if (netif_carrier_ok(dev->ndev)) {
1189 netif_carrier_off(dev->ndev);
1190 netif_tx_disable(dev->ndev);
1191 emac_reinitialize(dev);
1192 emac_print_link_status(dev);
1194 link_poll_interval = PHY_POLL_LINK_OFF;
1196 schedule_delayed_work(&dev->link_work, link_poll_interval);
1197 bail:
1198 mutex_unlock(&dev->link_lock);
1201 static void emac_force_link_update(struct emac_instance *dev)
1203 netif_carrier_off(dev->ndev);
1204 smp_rmb();
1205 if (dev->link_polling) {
1206 cancel_rearming_delayed_work(&dev->link_work);
1207 if (dev->link_polling)
1208 schedule_delayed_work(&dev->link_work, PHY_POLL_LINK_OFF);
1212 /* Process ctx, rtnl_lock semaphore */
1213 static int emac_close(struct net_device *ndev)
1215 struct emac_instance *dev = netdev_priv(ndev);
1217 DBG(dev, "close" NL);
1219 if (dev->phy.address >= 0) {
1220 dev->link_polling = 0;
1221 cancel_rearming_delayed_work(&dev->link_work);
1223 mutex_lock(&dev->link_lock);
1224 emac_netif_stop(dev);
1225 dev->opened = 0;
1226 mutex_unlock(&dev->link_lock);
1228 emac_rx_disable(dev);
1229 emac_tx_disable(dev);
1230 mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
1231 mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
1232 mal_poll_del(dev->mal, &dev->commac);
1234 emac_clean_tx_ring(dev);
1235 emac_clean_rx_ring(dev);
1237 free_irq(dev->emac_irq, dev);
1239 return 0;
1242 static inline u16 emac_tx_csum(struct emac_instance *dev,
1243 struct sk_buff *skb)
1245 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH &&
1246 skb->ip_summed == CHECKSUM_PARTIAL)) {
1247 ++dev->stats.tx_packets_csum;
1248 return EMAC_TX_CTRL_TAH_CSUM;
1250 return 0;
1253 static inline int emac_xmit_finish(struct emac_instance *dev, int len)
1255 struct emac_regs __iomem *p = dev->emacp;
1256 struct net_device *ndev = dev->ndev;
1258 /* Send the packet out. If the if makes a significant perf
1259 * difference, then we can store the TMR0 value in "dev"
1260 * instead
1262 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
1263 out_be32(&p->tmr0, EMAC4_TMR0_XMIT);
1264 else
1265 out_be32(&p->tmr0, EMAC_TMR0_XMIT);
1267 if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {
1268 netif_stop_queue(ndev);
1269 DBG2(dev, "stopped TX queue" NL);
1272 ndev->trans_start = jiffies;
1273 ++dev->stats.tx_packets;
1274 dev->stats.tx_bytes += len;
1276 return 0;
1279 /* Tx lock BH */
1280 static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1282 struct emac_instance *dev = netdev_priv(ndev);
1283 unsigned int len = skb->len;
1284 int slot;
1286 u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1287 MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
1289 slot = dev->tx_slot++;
1290 if (dev->tx_slot == NUM_TX_BUFF) {
1291 dev->tx_slot = 0;
1292 ctrl |= MAL_TX_CTRL_WRAP;
1295 DBG2(dev, "xmit(%u) %d" NL, len, slot);
1297 dev->tx_skb[slot] = skb;
1298 dev->tx_desc[slot].data_ptr = dma_map_single(&dev->ofdev->dev,
1299 skb->data, len,
1300 DMA_TO_DEVICE);
1301 dev->tx_desc[slot].data_len = (u16) len;
1302 wmb();
1303 dev->tx_desc[slot].ctrl = ctrl;
1305 return emac_xmit_finish(dev, len);
1308 static inline int emac_xmit_split(struct emac_instance *dev, int slot,
1309 u32 pd, int len, int last, u16 base_ctrl)
1311 while (1) {
1312 u16 ctrl = base_ctrl;
1313 int chunk = min(len, MAL_MAX_TX_SIZE);
1314 len -= chunk;
1316 slot = (slot + 1) % NUM_TX_BUFF;
1318 if (last && !len)
1319 ctrl |= MAL_TX_CTRL_LAST;
1320 if (slot == NUM_TX_BUFF - 1)
1321 ctrl |= MAL_TX_CTRL_WRAP;
1323 dev->tx_skb[slot] = NULL;
1324 dev->tx_desc[slot].data_ptr = pd;
1325 dev->tx_desc[slot].data_len = (u16) chunk;
1326 dev->tx_desc[slot].ctrl = ctrl;
1327 ++dev->tx_cnt;
1329 if (!len)
1330 break;
1332 pd += chunk;
1334 return slot;
1337 /* Tx lock BH disabled (SG version for TAH equipped EMACs) */
1338 static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
1340 struct emac_instance *dev = netdev_priv(ndev);
1341 int nr_frags = skb_shinfo(skb)->nr_frags;
1342 int len = skb->len, chunk;
1343 int slot, i;
1344 u16 ctrl;
1345 u32 pd;
1347 /* This is common "fast" path */
1348 if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
1349 return emac_start_xmit(skb, ndev);
1351 len -= skb->data_len;
1353 /* Note, this is only an *estimation*, we can still run out of empty
1354 * slots because of the additional fragmentation into
1355 * MAL_MAX_TX_SIZE-sized chunks
1357 if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))
1358 goto stop_queue;
1360 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1361 emac_tx_csum(dev, skb);
1362 slot = dev->tx_slot;
1364 /* skb data */
1365 dev->tx_skb[slot] = NULL;
1366 chunk = min(len, MAL_MAX_TX_SIZE);
1367 dev->tx_desc[slot].data_ptr = pd =
1368 dma_map_single(&dev->ofdev->dev, skb->data, len, DMA_TO_DEVICE);
1369 dev->tx_desc[slot].data_len = (u16) chunk;
1370 len -= chunk;
1371 if (unlikely(len))
1372 slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,
1373 ctrl);
1374 /* skb fragments */
1375 for (i = 0; i < nr_frags; ++i) {
1376 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1377 len = frag->size;
1379 if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
1380 goto undo_frame;
1382 pd = dma_map_page(&dev->ofdev->dev, frag->page, frag->page_offset, len,
1383 DMA_TO_DEVICE);
1385 slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,
1386 ctrl);
1389 DBG2(dev, "xmit_sg(%u) %d - %d" NL, skb->len, dev->tx_slot, slot);
1391 /* Attach skb to the last slot so we don't release it too early */
1392 dev->tx_skb[slot] = skb;
1394 /* Send the packet out */
1395 if (dev->tx_slot == NUM_TX_BUFF - 1)
1396 ctrl |= MAL_TX_CTRL_WRAP;
1397 wmb();
1398 dev->tx_desc[dev->tx_slot].ctrl = ctrl;
1399 dev->tx_slot = (slot + 1) % NUM_TX_BUFF;
1401 return emac_xmit_finish(dev, skb->len);
1403 undo_frame:
1404 /* Well, too bad. Our previous estimation was overly optimistic.
1405 * Undo everything.
1407 while (slot != dev->tx_slot) {
1408 dev->tx_desc[slot].ctrl = 0;
1409 --dev->tx_cnt;
1410 if (--slot < 0)
1411 slot = NUM_TX_BUFF - 1;
1413 ++dev->estats.tx_undo;
1415 stop_queue:
1416 netif_stop_queue(ndev);
1417 DBG2(dev, "stopped TX queue" NL);
1418 return 1;
1421 /* Tx lock BHs */
1422 static void emac_parse_tx_error(struct emac_instance *dev, u16 ctrl)
1424 struct emac_error_stats *st = &dev->estats;
1426 DBG(dev, "BD TX error %04x" NL, ctrl);
1428 ++st->tx_bd_errors;
1429 if (ctrl & EMAC_TX_ST_BFCS)
1430 ++st->tx_bd_bad_fcs;
1431 if (ctrl & EMAC_TX_ST_LCS)
1432 ++st->tx_bd_carrier_loss;
1433 if (ctrl & EMAC_TX_ST_ED)
1434 ++st->tx_bd_excessive_deferral;
1435 if (ctrl & EMAC_TX_ST_EC)
1436 ++st->tx_bd_excessive_collisions;
1437 if (ctrl & EMAC_TX_ST_LC)
1438 ++st->tx_bd_late_collision;
1439 if (ctrl & EMAC_TX_ST_MC)
1440 ++st->tx_bd_multple_collisions;
1441 if (ctrl & EMAC_TX_ST_SC)
1442 ++st->tx_bd_single_collision;
1443 if (ctrl & EMAC_TX_ST_UR)
1444 ++st->tx_bd_underrun;
1445 if (ctrl & EMAC_TX_ST_SQE)
1446 ++st->tx_bd_sqe;
1449 static void emac_poll_tx(void *param)
1451 struct emac_instance *dev = param;
1452 u32 bad_mask;
1454 DBG2(dev, "poll_tx, %d %d" NL, dev->tx_cnt, dev->ack_slot);
1456 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
1457 bad_mask = EMAC_IS_BAD_TX_TAH;
1458 else
1459 bad_mask = EMAC_IS_BAD_TX;
1461 netif_tx_lock_bh(dev->ndev);
1462 if (dev->tx_cnt) {
1463 u16 ctrl;
1464 int slot = dev->ack_slot, n = 0;
1465 again:
1466 ctrl = dev->tx_desc[slot].ctrl;
1467 if (!(ctrl & MAL_TX_CTRL_READY)) {
1468 struct sk_buff *skb = dev->tx_skb[slot];
1469 ++n;
1471 if (skb) {
1472 dev_kfree_skb(skb);
1473 dev->tx_skb[slot] = NULL;
1475 slot = (slot + 1) % NUM_TX_BUFF;
1477 if (unlikely(ctrl & bad_mask))
1478 emac_parse_tx_error(dev, ctrl);
1480 if (--dev->tx_cnt)
1481 goto again;
1483 if (n) {
1484 dev->ack_slot = slot;
1485 if (netif_queue_stopped(dev->ndev) &&
1486 dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
1487 netif_wake_queue(dev->ndev);
1489 DBG2(dev, "tx %d pkts" NL, n);
1492 netif_tx_unlock_bh(dev->ndev);
1495 static inline void emac_recycle_rx_skb(struct emac_instance *dev, int slot,
1496 int len)
1498 struct sk_buff *skb = dev->rx_skb[slot];
1500 DBG2(dev, "recycle %d %d" NL, slot, len);
1502 if (len)
1503 dma_map_single(&dev->ofdev->dev, skb->data - 2,
1504 EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);
1506 dev->rx_desc[slot].data_len = 0;
1507 wmb();
1508 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1509 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1512 static void emac_parse_rx_error(struct emac_instance *dev, u16 ctrl)
1514 struct emac_error_stats *st = &dev->estats;
1516 DBG(dev, "BD RX error %04x" NL, ctrl);
1518 ++st->rx_bd_errors;
1519 if (ctrl & EMAC_RX_ST_OE)
1520 ++st->rx_bd_overrun;
1521 if (ctrl & EMAC_RX_ST_BP)
1522 ++st->rx_bd_bad_packet;
1523 if (ctrl & EMAC_RX_ST_RP)
1524 ++st->rx_bd_runt_packet;
1525 if (ctrl & EMAC_RX_ST_SE)
1526 ++st->rx_bd_short_event;
1527 if (ctrl & EMAC_RX_ST_AE)
1528 ++st->rx_bd_alignment_error;
1529 if (ctrl & EMAC_RX_ST_BFCS)
1530 ++st->rx_bd_bad_fcs;
1531 if (ctrl & EMAC_RX_ST_PTL)
1532 ++st->rx_bd_packet_too_long;
1533 if (ctrl & EMAC_RX_ST_ORE)
1534 ++st->rx_bd_out_of_range;
1535 if (ctrl & EMAC_RX_ST_IRE)
1536 ++st->rx_bd_in_range;
1539 static inline void emac_rx_csum(struct emac_instance *dev,
1540 struct sk_buff *skb, u16 ctrl)
1542 #ifdef CONFIG_IBM_NEW_EMAC_TAH
1543 if (!ctrl && dev->tah_dev) {
1544 skb->ip_summed = CHECKSUM_UNNECESSARY;
1545 ++dev->stats.rx_packets_csum;
1547 #endif
1550 static inline int emac_rx_sg_append(struct emac_instance *dev, int slot)
1552 if (likely(dev->rx_sg_skb != NULL)) {
1553 int len = dev->rx_desc[slot].data_len;
1554 int tot_len = dev->rx_sg_skb->len + len;
1556 if (unlikely(tot_len + 2 > dev->rx_skb_size)) {
1557 ++dev->estats.rx_dropped_mtu;
1558 dev_kfree_skb(dev->rx_sg_skb);
1559 dev->rx_sg_skb = NULL;
1560 } else {
1561 cacheable_memcpy(skb_tail_pointer(dev->rx_sg_skb),
1562 dev->rx_skb[slot]->data, len);
1563 skb_put(dev->rx_sg_skb, len);
1564 emac_recycle_rx_skb(dev, slot, len);
1565 return 0;
1568 emac_recycle_rx_skb(dev, slot, 0);
1569 return -1;
1572 /* NAPI poll context */
1573 static int emac_poll_rx(void *param, int budget)
1575 struct emac_instance *dev = param;
1576 int slot = dev->rx_slot, received = 0;
1578 DBG2(dev, "poll_rx(%d)" NL, budget);
1580 again:
1581 while (budget > 0) {
1582 int len;
1583 struct sk_buff *skb;
1584 u16 ctrl = dev->rx_desc[slot].ctrl;
1586 if (ctrl & MAL_RX_CTRL_EMPTY)
1587 break;
1589 skb = dev->rx_skb[slot];
1590 mb();
1591 len = dev->rx_desc[slot].data_len;
1593 if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))
1594 goto sg;
1596 ctrl &= EMAC_BAD_RX_MASK;
1597 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1598 emac_parse_rx_error(dev, ctrl);
1599 ++dev->estats.rx_dropped_error;
1600 emac_recycle_rx_skb(dev, slot, 0);
1601 len = 0;
1602 goto next;
1605 if (len && len < EMAC_RX_COPY_THRESH) {
1606 struct sk_buff *copy_skb =
1607 alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
1608 if (unlikely(!copy_skb))
1609 goto oom;
1611 skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
1612 cacheable_memcpy(copy_skb->data - 2, skb->data - 2,
1613 len + 2);
1614 emac_recycle_rx_skb(dev, slot, len);
1615 skb = copy_skb;
1616 } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC)))
1617 goto oom;
1619 skb_put(skb, len);
1620 push_packet:
1621 skb->dev = dev->ndev;
1622 skb->protocol = eth_type_trans(skb, dev->ndev);
1623 emac_rx_csum(dev, skb, ctrl);
1625 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
1626 ++dev->estats.rx_dropped_stack;
1627 next:
1628 ++dev->stats.rx_packets;
1629 skip:
1630 dev->stats.rx_bytes += len;
1631 slot = (slot + 1) % NUM_RX_BUFF;
1632 --budget;
1633 ++received;
1634 continue;
1636 if (ctrl & MAL_RX_CTRL_FIRST) {
1637 BUG_ON(dev->rx_sg_skb);
1638 if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) {
1639 DBG(dev, "rx OOM %d" NL, slot);
1640 ++dev->estats.rx_dropped_oom;
1641 emac_recycle_rx_skb(dev, slot, 0);
1642 } else {
1643 dev->rx_sg_skb = skb;
1644 skb_put(skb, len);
1646 } else if (!emac_rx_sg_append(dev, slot) &&
1647 (ctrl & MAL_RX_CTRL_LAST)) {
1649 skb = dev->rx_sg_skb;
1650 dev->rx_sg_skb = NULL;
1652 ctrl &= EMAC_BAD_RX_MASK;
1653 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1654 emac_parse_rx_error(dev, ctrl);
1655 ++dev->estats.rx_dropped_error;
1656 dev_kfree_skb(skb);
1657 len = 0;
1658 } else
1659 goto push_packet;
1661 goto skip;
1662 oom:
1663 DBG(dev, "rx OOM %d" NL, slot);
1664 /* Drop the packet and recycle skb */
1665 ++dev->estats.rx_dropped_oom;
1666 emac_recycle_rx_skb(dev, slot, 0);
1667 goto next;
1670 if (received) {
1671 DBG2(dev, "rx %d BDs" NL, received);
1672 dev->rx_slot = slot;
1675 if (unlikely(budget && test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags))) {
1676 mb();
1677 if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {
1678 DBG2(dev, "rx restart" NL);
1679 received = 0;
1680 goto again;
1683 if (dev->rx_sg_skb) {
1684 DBG2(dev, "dropping partial rx packet" NL);
1685 ++dev->estats.rx_dropped_error;
1686 dev_kfree_skb(dev->rx_sg_skb);
1687 dev->rx_sg_skb = NULL;
1690 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1691 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1692 emac_rx_enable(dev);
1693 dev->rx_slot = 0;
1695 return received;
1698 /* NAPI poll context */
1699 static int emac_peek_rx(void *param)
1701 struct emac_instance *dev = param;
1703 return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);
1706 /* NAPI poll context */
1707 static int emac_peek_rx_sg(void *param)
1709 struct emac_instance *dev = param;
1711 int slot = dev->rx_slot;
1712 while (1) {
1713 u16 ctrl = dev->rx_desc[slot].ctrl;
1714 if (ctrl & MAL_RX_CTRL_EMPTY)
1715 return 0;
1716 else if (ctrl & MAL_RX_CTRL_LAST)
1717 return 1;
1719 slot = (slot + 1) % NUM_RX_BUFF;
1721 /* I'm just being paranoid here :) */
1722 if (unlikely(slot == dev->rx_slot))
1723 return 0;
1727 /* Hard IRQ */
1728 static void emac_rxde(void *param)
1730 struct emac_instance *dev = param;
1732 ++dev->estats.rx_stopped;
1733 emac_rx_disable_async(dev);
1736 /* Hard IRQ */
1737 static irqreturn_t emac_irq(int irq, void *dev_instance)
1739 struct emac_instance *dev = dev_instance;
1740 struct emac_regs __iomem *p = dev->emacp;
1741 struct emac_error_stats *st = &dev->estats;
1742 u32 isr;
1744 spin_lock(&dev->lock);
1746 isr = in_be32(&p->isr);
1747 out_be32(&p->isr, isr);
1749 DBG(dev, "isr = %08x" NL, isr);
1751 if (isr & EMAC4_ISR_TXPE)
1752 ++st->tx_parity;
1753 if (isr & EMAC4_ISR_RXPE)
1754 ++st->rx_parity;
1755 if (isr & EMAC4_ISR_TXUE)
1756 ++st->tx_underrun;
1757 if (isr & EMAC4_ISR_RXOE)
1758 ++st->rx_fifo_overrun;
1759 if (isr & EMAC_ISR_OVR)
1760 ++st->rx_overrun;
1761 if (isr & EMAC_ISR_BP)
1762 ++st->rx_bad_packet;
1763 if (isr & EMAC_ISR_RP)
1764 ++st->rx_runt_packet;
1765 if (isr & EMAC_ISR_SE)
1766 ++st->rx_short_event;
1767 if (isr & EMAC_ISR_ALE)
1768 ++st->rx_alignment_error;
1769 if (isr & EMAC_ISR_BFCS)
1770 ++st->rx_bad_fcs;
1771 if (isr & EMAC_ISR_PTLE)
1772 ++st->rx_packet_too_long;
1773 if (isr & EMAC_ISR_ORE)
1774 ++st->rx_out_of_range;
1775 if (isr & EMAC_ISR_IRE)
1776 ++st->rx_in_range;
1777 if (isr & EMAC_ISR_SQE)
1778 ++st->tx_sqe;
1779 if (isr & EMAC_ISR_TE)
1780 ++st->tx_errors;
1782 spin_unlock(&dev->lock);
1784 return IRQ_HANDLED;
1787 static struct net_device_stats *emac_stats(struct net_device *ndev)
1789 struct emac_instance *dev = netdev_priv(ndev);
1790 struct emac_stats *st = &dev->stats;
1791 struct emac_error_stats *est = &dev->estats;
1792 struct net_device_stats *nst = &dev->nstats;
1793 unsigned long flags;
1795 DBG2(dev, "stats" NL);
1797 /* Compute "legacy" statistics */
1798 spin_lock_irqsave(&dev->lock, flags);
1799 nst->rx_packets = (unsigned long)st->rx_packets;
1800 nst->rx_bytes = (unsigned long)st->rx_bytes;
1801 nst->tx_packets = (unsigned long)st->tx_packets;
1802 nst->tx_bytes = (unsigned long)st->tx_bytes;
1803 nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +
1804 est->rx_dropped_error +
1805 est->rx_dropped_resize +
1806 est->rx_dropped_mtu);
1807 nst->tx_dropped = (unsigned long)est->tx_dropped;
1809 nst->rx_errors = (unsigned long)est->rx_bd_errors;
1810 nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +
1811 est->rx_fifo_overrun +
1812 est->rx_overrun);
1813 nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +
1814 est->rx_alignment_error);
1815 nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +
1816 est->rx_bad_fcs);
1817 nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +
1818 est->rx_bd_short_event +
1819 est->rx_bd_packet_too_long +
1820 est->rx_bd_out_of_range +
1821 est->rx_bd_in_range +
1822 est->rx_runt_packet +
1823 est->rx_short_event +
1824 est->rx_packet_too_long +
1825 est->rx_out_of_range +
1826 est->rx_in_range);
1828 nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);
1829 nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +
1830 est->tx_underrun);
1831 nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;
1832 nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +
1833 est->tx_bd_excessive_collisions +
1834 est->tx_bd_late_collision +
1835 est->tx_bd_multple_collisions);
1836 spin_unlock_irqrestore(&dev->lock, flags);
1837 return nst;
1840 static struct mal_commac_ops emac_commac_ops = {
1841 .poll_tx = &emac_poll_tx,
1842 .poll_rx = &emac_poll_rx,
1843 .peek_rx = &emac_peek_rx,
1844 .rxde = &emac_rxde,
1847 static struct mal_commac_ops emac_commac_sg_ops = {
1848 .poll_tx = &emac_poll_tx,
1849 .poll_rx = &emac_poll_rx,
1850 .peek_rx = &emac_peek_rx_sg,
1851 .rxde = &emac_rxde,
1854 /* Ethtool support */
1855 static int emac_ethtool_get_settings(struct net_device *ndev,
1856 struct ethtool_cmd *cmd)
1858 struct emac_instance *dev = netdev_priv(ndev);
1860 cmd->supported = dev->phy.features;
1861 cmd->port = PORT_MII;
1862 cmd->phy_address = dev->phy.address;
1863 cmd->transceiver =
1864 dev->phy.address >= 0 ? XCVR_EXTERNAL : XCVR_INTERNAL;
1866 mutex_lock(&dev->link_lock);
1867 cmd->advertising = dev->phy.advertising;
1868 cmd->autoneg = dev->phy.autoneg;
1869 cmd->speed = dev->phy.speed;
1870 cmd->duplex = dev->phy.duplex;
1871 mutex_unlock(&dev->link_lock);
1873 return 0;
1876 static int emac_ethtool_set_settings(struct net_device *ndev,
1877 struct ethtool_cmd *cmd)
1879 struct emac_instance *dev = netdev_priv(ndev);
1880 u32 f = dev->phy.features;
1882 DBG(dev, "set_settings(%d, %d, %d, 0x%08x)" NL,
1883 cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);
1885 /* Basic sanity checks */
1886 if (dev->phy.address < 0)
1887 return -EOPNOTSUPP;
1888 if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
1889 return -EINVAL;
1890 if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
1891 return -EINVAL;
1892 if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
1893 return -EINVAL;
1895 if (cmd->autoneg == AUTONEG_DISABLE) {
1896 switch (cmd->speed) {
1897 case SPEED_10:
1898 if (cmd->duplex == DUPLEX_HALF
1899 && !(f & SUPPORTED_10baseT_Half))
1900 return -EINVAL;
1901 if (cmd->duplex == DUPLEX_FULL
1902 && !(f & SUPPORTED_10baseT_Full))
1903 return -EINVAL;
1904 break;
1905 case SPEED_100:
1906 if (cmd->duplex == DUPLEX_HALF
1907 && !(f & SUPPORTED_100baseT_Half))
1908 return -EINVAL;
1909 if (cmd->duplex == DUPLEX_FULL
1910 && !(f & SUPPORTED_100baseT_Full))
1911 return -EINVAL;
1912 break;
1913 case SPEED_1000:
1914 if (cmd->duplex == DUPLEX_HALF
1915 && !(f & SUPPORTED_1000baseT_Half))
1916 return -EINVAL;
1917 if (cmd->duplex == DUPLEX_FULL
1918 && !(f & SUPPORTED_1000baseT_Full))
1919 return -EINVAL;
1920 break;
1921 default:
1922 return -EINVAL;
1925 mutex_lock(&dev->link_lock);
1926 dev->phy.def->ops->setup_forced(&dev->phy, cmd->speed,
1927 cmd->duplex);
1928 mutex_unlock(&dev->link_lock);
1930 } else {
1931 if (!(f & SUPPORTED_Autoneg))
1932 return -EINVAL;
1934 mutex_lock(&dev->link_lock);
1935 dev->phy.def->ops->setup_aneg(&dev->phy,
1936 (cmd->advertising & f) |
1937 (dev->phy.advertising &
1938 (ADVERTISED_Pause |
1939 ADVERTISED_Asym_Pause)));
1940 mutex_unlock(&dev->link_lock);
1942 emac_force_link_update(dev);
1944 return 0;
1947 static void emac_ethtool_get_ringparam(struct net_device *ndev,
1948 struct ethtool_ringparam *rp)
1950 rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
1951 rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
1954 static void emac_ethtool_get_pauseparam(struct net_device *ndev,
1955 struct ethtool_pauseparam *pp)
1957 struct emac_instance *dev = netdev_priv(ndev);
1959 mutex_lock(&dev->link_lock);
1960 if ((dev->phy.features & SUPPORTED_Autoneg) &&
1961 (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause)))
1962 pp->autoneg = 1;
1964 if (dev->phy.duplex == DUPLEX_FULL) {
1965 if (dev->phy.pause)
1966 pp->rx_pause = pp->tx_pause = 1;
1967 else if (dev->phy.asym_pause)
1968 pp->tx_pause = 1;
1970 mutex_unlock(&dev->link_lock);
1973 static u32 emac_ethtool_get_rx_csum(struct net_device *ndev)
1975 struct emac_instance *dev = netdev_priv(ndev);
1977 return dev->tah_dev != NULL;
1980 static int emac_get_regs_len(struct emac_instance *dev)
1982 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
1983 return sizeof(struct emac_ethtool_regs_subhdr) +
1984 EMAC4_ETHTOOL_REGS_SIZE;
1985 else
1986 return sizeof(struct emac_ethtool_regs_subhdr) +
1987 EMAC_ETHTOOL_REGS_SIZE;
1990 static int emac_ethtool_get_regs_len(struct net_device *ndev)
1992 struct emac_instance *dev = netdev_priv(ndev);
1993 int size;
1995 size = sizeof(struct emac_ethtool_regs_hdr) +
1996 emac_get_regs_len(dev) + mal_get_regs_len(dev->mal);
1997 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
1998 size += zmii_get_regs_len(dev->zmii_dev);
1999 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2000 size += rgmii_get_regs_len(dev->rgmii_dev);
2001 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2002 size += tah_get_regs_len(dev->tah_dev);
2004 return size;
2007 static void *emac_dump_regs(struct emac_instance *dev, void *buf)
2009 struct emac_ethtool_regs_subhdr *hdr = buf;
2011 hdr->index = dev->cell_index;
2012 if (emac_has_feature(dev, EMAC_FTR_EMAC4)) {
2013 hdr->version = EMAC4_ETHTOOL_REGS_VER;
2014 memcpy_fromio(hdr + 1, dev->emacp, EMAC4_ETHTOOL_REGS_SIZE);
2015 return ((void *)(hdr + 1) + EMAC4_ETHTOOL_REGS_SIZE);
2016 } else {
2017 hdr->version = EMAC_ETHTOOL_REGS_VER;
2018 memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE);
2019 return ((void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE);
2023 static void emac_ethtool_get_regs(struct net_device *ndev,
2024 struct ethtool_regs *regs, void *buf)
2026 struct emac_instance *dev = netdev_priv(ndev);
2027 struct emac_ethtool_regs_hdr *hdr = buf;
2029 hdr->components = 0;
2030 buf = hdr + 1;
2032 buf = mal_dump_regs(dev->mal, buf);
2033 buf = emac_dump_regs(dev, buf);
2034 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) {
2035 hdr->components |= EMAC_ETHTOOL_REGS_ZMII;
2036 buf = zmii_dump_regs(dev->zmii_dev, buf);
2038 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) {
2039 hdr->components |= EMAC_ETHTOOL_REGS_RGMII;
2040 buf = rgmii_dump_regs(dev->rgmii_dev, buf);
2042 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) {
2043 hdr->components |= EMAC_ETHTOOL_REGS_TAH;
2044 buf = tah_dump_regs(dev->tah_dev, buf);
2048 static int emac_ethtool_nway_reset(struct net_device *ndev)
2050 struct emac_instance *dev = netdev_priv(ndev);
2051 int res = 0;
2053 DBG(dev, "nway_reset" NL);
2055 if (dev->phy.address < 0)
2056 return -EOPNOTSUPP;
2058 mutex_lock(&dev->link_lock);
2059 if (!dev->phy.autoneg) {
2060 res = -EINVAL;
2061 goto out;
2064 dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising);
2065 out:
2066 mutex_unlock(&dev->link_lock);
2067 emac_force_link_update(dev);
2068 return res;
2071 static int emac_ethtool_get_stats_count(struct net_device *ndev)
2073 return EMAC_ETHTOOL_STATS_COUNT;
2076 static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
2077 u8 * buf)
2079 if (stringset == ETH_SS_STATS)
2080 memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys));
2083 static void emac_ethtool_get_ethtool_stats(struct net_device *ndev,
2084 struct ethtool_stats *estats,
2085 u64 * tmp_stats)
2087 struct emac_instance *dev = netdev_priv(ndev);
2089 memcpy(tmp_stats, &dev->stats, sizeof(dev->stats));
2090 tmp_stats += sizeof(dev->stats) / sizeof(u64);
2091 memcpy(tmp_stats, &dev->estats, sizeof(dev->estats));
2094 static void emac_ethtool_get_drvinfo(struct net_device *ndev,
2095 struct ethtool_drvinfo *info)
2097 struct emac_instance *dev = netdev_priv(ndev);
2099 strcpy(info->driver, "ibm_emac");
2100 strcpy(info->version, DRV_VERSION);
2101 info->fw_version[0] = '\0';
2102 sprintf(info->bus_info, "PPC 4xx EMAC-%d %s",
2103 dev->cell_index, dev->ofdev->node->full_name);
2104 info->n_stats = emac_ethtool_get_stats_count(ndev);
2105 info->regdump_len = emac_ethtool_get_regs_len(ndev);
2108 static const struct ethtool_ops emac_ethtool_ops = {
2109 .get_settings = emac_ethtool_get_settings,
2110 .set_settings = emac_ethtool_set_settings,
2111 .get_drvinfo = emac_ethtool_get_drvinfo,
2113 .get_regs_len = emac_ethtool_get_regs_len,
2114 .get_regs = emac_ethtool_get_regs,
2116 .nway_reset = emac_ethtool_nway_reset,
2118 .get_ringparam = emac_ethtool_get_ringparam,
2119 .get_pauseparam = emac_ethtool_get_pauseparam,
2121 .get_rx_csum = emac_ethtool_get_rx_csum,
2123 .get_strings = emac_ethtool_get_strings,
2124 .get_stats_count = emac_ethtool_get_stats_count,
2125 .get_ethtool_stats = emac_ethtool_get_ethtool_stats,
2127 .get_link = ethtool_op_get_link,
2128 .get_tx_csum = ethtool_op_get_tx_csum,
2129 .get_sg = ethtool_op_get_sg,
2132 static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2134 struct emac_instance *dev = netdev_priv(ndev);
2135 uint16_t *data = (uint16_t *) & rq->ifr_ifru;
2137 DBG(dev, "ioctl %08x" NL, cmd);
2139 if (dev->phy.address < 0)
2140 return -EOPNOTSUPP;
2142 switch (cmd) {
2143 case SIOCGMIIPHY:
2144 case SIOCDEVPRIVATE:
2145 data[0] = dev->phy.address;
2146 /* Fall through */
2147 case SIOCGMIIREG:
2148 case SIOCDEVPRIVATE + 1:
2149 data[3] = emac_mdio_read(ndev, dev->phy.address, data[1]);
2150 return 0;
2152 case SIOCSMIIREG:
2153 case SIOCDEVPRIVATE + 2:
2154 if (!capable(CAP_NET_ADMIN))
2155 return -EPERM;
2156 emac_mdio_write(ndev, dev->phy.address, data[1], data[2]);
2157 return 0;
2158 default:
2159 return -EOPNOTSUPP;
2163 struct emac_depentry {
2164 u32 phandle;
2165 struct device_node *node;
2166 struct of_device *ofdev;
2167 void *drvdata;
2170 #define EMAC_DEP_MAL_IDX 0
2171 #define EMAC_DEP_ZMII_IDX 1
2172 #define EMAC_DEP_RGMII_IDX 2
2173 #define EMAC_DEP_TAH_IDX 3
2174 #define EMAC_DEP_MDIO_IDX 4
2175 #define EMAC_DEP_PREV_IDX 5
2176 #define EMAC_DEP_COUNT 6
2178 static int __devinit emac_check_deps(struct emac_instance *dev,
2179 struct emac_depentry *deps)
2181 int i, there = 0;
2182 struct device_node *np;
2184 for (i = 0; i < EMAC_DEP_COUNT; i++) {
2185 /* no dependency on that item, allright */
2186 if (deps[i].phandle == 0) {
2187 there++;
2188 continue;
2190 /* special case for blist as the dependency might go away */
2191 if (i == EMAC_DEP_PREV_IDX) {
2192 np = *(dev->blist - 1);
2193 if (np == NULL) {
2194 deps[i].phandle = 0;
2195 there++;
2196 continue;
2198 if (deps[i].node == NULL)
2199 deps[i].node = of_node_get(np);
2201 if (deps[i].node == NULL)
2202 deps[i].node = of_find_node_by_phandle(deps[i].phandle);
2203 if (deps[i].node == NULL)
2204 continue;
2205 if (deps[i].ofdev == NULL)
2206 deps[i].ofdev = of_find_device_by_node(deps[i].node);
2207 if (deps[i].ofdev == NULL)
2208 continue;
2209 if (deps[i].drvdata == NULL)
2210 deps[i].drvdata = dev_get_drvdata(&deps[i].ofdev->dev);
2211 if (deps[i].drvdata != NULL)
2212 there++;
2214 return (there == EMAC_DEP_COUNT);
2217 static void emac_put_deps(struct emac_instance *dev)
2219 if (dev->mal_dev)
2220 of_dev_put(dev->mal_dev);
2221 if (dev->zmii_dev)
2222 of_dev_put(dev->zmii_dev);
2223 if (dev->rgmii_dev)
2224 of_dev_put(dev->rgmii_dev);
2225 if (dev->mdio_dev)
2226 of_dev_put(dev->mdio_dev);
2227 if (dev->tah_dev)
2228 of_dev_put(dev->tah_dev);
2231 static int __devinit emac_of_bus_notify(struct notifier_block *nb,
2232 unsigned long action, void *data)
2234 /* We are only intereted in device addition */
2235 if (action == BUS_NOTIFY_BOUND_DRIVER)
2236 wake_up_all(&emac_probe_wait);
2237 return 0;
2240 static struct notifier_block emac_of_bus_notifier = {
2241 .notifier_call = emac_of_bus_notify
2244 static int __devinit emac_wait_deps(struct emac_instance *dev)
2246 struct emac_depentry deps[EMAC_DEP_COUNT];
2247 int i, err;
2249 memset(&deps, 0, sizeof(deps));
2251 deps[EMAC_DEP_MAL_IDX].phandle = dev->mal_ph;
2252 deps[EMAC_DEP_ZMII_IDX].phandle = dev->zmii_ph;
2253 deps[EMAC_DEP_RGMII_IDX].phandle = dev->rgmii_ph;
2254 if (dev->tah_ph)
2255 deps[EMAC_DEP_TAH_IDX].phandle = dev->tah_ph;
2256 if (dev->mdio_ph)
2257 deps[EMAC_DEP_MDIO_IDX].phandle = dev->mdio_ph;
2258 if (dev->blist && dev->blist > emac_boot_list)
2259 deps[EMAC_DEP_PREV_IDX].phandle = 0xffffffffu;
2260 bus_register_notifier(&of_platform_bus_type, &emac_of_bus_notifier);
2261 wait_event_timeout(emac_probe_wait,
2262 emac_check_deps(dev, deps),
2263 EMAC_PROBE_DEP_TIMEOUT);
2264 bus_unregister_notifier(&of_platform_bus_type, &emac_of_bus_notifier);
2265 err = emac_check_deps(dev, deps) ? 0 : -ENODEV;
2266 for (i = 0; i < EMAC_DEP_COUNT; i++) {
2267 if (deps[i].node)
2268 of_node_put(deps[i].node);
2269 if (err && deps[i].ofdev)
2270 of_dev_put(deps[i].ofdev);
2272 if (err == 0) {
2273 dev->mal_dev = deps[EMAC_DEP_MAL_IDX].ofdev;
2274 dev->zmii_dev = deps[EMAC_DEP_ZMII_IDX].ofdev;
2275 dev->rgmii_dev = deps[EMAC_DEP_RGMII_IDX].ofdev;
2276 dev->tah_dev = deps[EMAC_DEP_TAH_IDX].ofdev;
2277 dev->mdio_dev = deps[EMAC_DEP_MDIO_IDX].ofdev;
2279 if (deps[EMAC_DEP_PREV_IDX].ofdev)
2280 of_dev_put(deps[EMAC_DEP_PREV_IDX].ofdev);
2281 return err;
2284 static int __devinit emac_read_uint_prop(struct device_node *np, const char *name,
2285 u32 *val, int fatal)
2287 int len;
2288 const u32 *prop = of_get_property(np, name, &len);
2289 if (prop == NULL || len < sizeof(u32)) {
2290 if (fatal)
2291 printk(KERN_ERR "%s: missing %s property\n",
2292 np->full_name, name);
2293 return -ENODEV;
2295 *val = *prop;
2296 return 0;
2299 static int __devinit emac_init_phy(struct emac_instance *dev)
2301 struct device_node *np = dev->ofdev->node;
2302 struct net_device *ndev = dev->ndev;
2303 u32 phy_map, adv;
2304 int i;
2306 dev->phy.dev = ndev;
2307 dev->phy.mode = dev->phy_mode;
2309 /* PHY-less configuration.
2310 * XXX I probably should move these settings to the dev tree
2312 if (dev->phy_address == 0xffffffff && dev->phy_map == 0xffffffff) {
2313 emac_reset(dev);
2315 /* PHY-less configuration.
2316 * XXX I probably should move these settings to the dev tree
2318 dev->phy.address = -1;
2319 dev->phy.features = SUPPORTED_100baseT_Full | SUPPORTED_MII;
2320 dev->phy.pause = 1;
2322 return 0;
2325 mutex_lock(&emac_phy_map_lock);
2326 phy_map = dev->phy_map | busy_phy_map;
2328 DBG(dev, "PHY maps %08x %08x" NL, dev->phy_map, busy_phy_map);
2330 dev->phy.mdio_read = emac_mdio_read;
2331 dev->phy.mdio_write = emac_mdio_write;
2333 /* Configure EMAC with defaults so we can at least use MDIO
2334 * This is needed mostly for 440GX
2336 if (emac_phy_gpcs(dev->phy.mode)) {
2337 /* XXX
2338 * Make GPCS PHY address equal to EMAC index.
2339 * We probably should take into account busy_phy_map
2340 * and/or phy_map here.
2342 * Note that the busy_phy_map is currently global
2343 * while it should probably be per-ASIC...
2345 dev->phy.address = dev->cell_index;
2348 emac_configure(dev);
2350 if (dev->phy_address != 0xffffffff)
2351 phy_map = ~(1 << dev->phy_address);
2353 for (i = 0; i < 0x20; phy_map >>= 1, ++i)
2354 if (!(phy_map & 1)) {
2355 int r;
2356 busy_phy_map |= 1 << i;
2358 /* Quick check if there is a PHY at the address */
2359 r = emac_mdio_read(dev->ndev, i, MII_BMCR);
2360 if (r == 0xffff || r < 0)
2361 continue;
2362 if (!emac_mii_phy_probe(&dev->phy, i))
2363 break;
2365 mutex_unlock(&emac_phy_map_lock);
2366 if (i == 0x20) {
2367 printk(KERN_WARNING "%s: can't find PHY!\n", np->full_name);
2368 return -ENXIO;
2371 /* Init PHY */
2372 if (dev->phy.def->ops->init)
2373 dev->phy.def->ops->init(&dev->phy);
2375 /* Disable any PHY features not supported by the platform */
2376 dev->phy.def->features &= ~dev->phy_feat_exc;
2378 /* Setup initial link parameters */
2379 if (dev->phy.features & SUPPORTED_Autoneg) {
2380 adv = dev->phy.features;
2381 if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x))
2382 adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
2383 /* Restart autonegotiation */
2384 dev->phy.def->ops->setup_aneg(&dev->phy, adv);
2385 } else {
2386 u32 f = dev->phy.def->features;
2387 int speed = SPEED_10, fd = DUPLEX_HALF;
2389 /* Select highest supported speed/duplex */
2390 if (f & SUPPORTED_1000baseT_Full) {
2391 speed = SPEED_1000;
2392 fd = DUPLEX_FULL;
2393 } else if (f & SUPPORTED_1000baseT_Half)
2394 speed = SPEED_1000;
2395 else if (f & SUPPORTED_100baseT_Full) {
2396 speed = SPEED_100;
2397 fd = DUPLEX_FULL;
2398 } else if (f & SUPPORTED_100baseT_Half)
2399 speed = SPEED_100;
2400 else if (f & SUPPORTED_10baseT_Full)
2401 fd = DUPLEX_FULL;
2403 /* Force link parameters */
2404 dev->phy.def->ops->setup_forced(&dev->phy, speed, fd);
2406 return 0;
2409 static int __devinit emac_init_config(struct emac_instance *dev)
2411 struct device_node *np = dev->ofdev->node;
2412 const void *p;
2413 unsigned int plen;
2414 const char *pm, *phy_modes[] = {
2415 [PHY_MODE_NA] = "",
2416 [PHY_MODE_MII] = "mii",
2417 [PHY_MODE_RMII] = "rmii",
2418 [PHY_MODE_SMII] = "smii",
2419 [PHY_MODE_RGMII] = "rgmii",
2420 [PHY_MODE_TBI] = "tbi",
2421 [PHY_MODE_GMII] = "gmii",
2422 [PHY_MODE_RTBI] = "rtbi",
2423 [PHY_MODE_SGMII] = "sgmii",
2426 /* Read config from device-tree */
2427 if (emac_read_uint_prop(np, "mal-device", &dev->mal_ph, 1))
2428 return -ENXIO;
2429 if (emac_read_uint_prop(np, "mal-tx-channel", &dev->mal_tx_chan, 1))
2430 return -ENXIO;
2431 if (emac_read_uint_prop(np, "mal-rx-channel", &dev->mal_rx_chan, 1))
2432 return -ENXIO;
2433 if (emac_read_uint_prop(np, "cell-index", &dev->cell_index, 1))
2434 return -ENXIO;
2435 if (emac_read_uint_prop(np, "max-frame-size", &dev->max_mtu, 0))
2436 dev->max_mtu = 1500;
2437 if (emac_read_uint_prop(np, "rx-fifo-size", &dev->rx_fifo_size, 0))
2438 dev->rx_fifo_size = 2048;
2439 if (emac_read_uint_prop(np, "tx-fifo-size", &dev->tx_fifo_size, 0))
2440 dev->tx_fifo_size = 2048;
2441 if (emac_read_uint_prop(np, "rx-fifo-size-gige", &dev->rx_fifo_size_gige, 0))
2442 dev->rx_fifo_size_gige = dev->rx_fifo_size;
2443 if (emac_read_uint_prop(np, "tx-fifo-size-gige", &dev->tx_fifo_size_gige, 0))
2444 dev->tx_fifo_size_gige = dev->tx_fifo_size;
2445 if (emac_read_uint_prop(np, "phy-address", &dev->phy_address, 0))
2446 dev->phy_address = 0xffffffff;
2447 if (emac_read_uint_prop(np, "phy-map", &dev->phy_map, 0))
2448 dev->phy_map = 0xffffffff;
2449 if (emac_read_uint_prop(np->parent, "clock-frequency", &dev->opb_bus_freq, 1))
2450 return -ENXIO;
2451 if (emac_read_uint_prop(np, "tah-device", &dev->tah_ph, 0))
2452 dev->tah_ph = 0;
2453 if (emac_read_uint_prop(np, "tah-channel", &dev->tah_port, 0))
2454 dev->tah_port = 0;
2455 if (emac_read_uint_prop(np, "mdio-device", &dev->mdio_ph, 0))
2456 dev->mdio_ph = 0;
2457 if (emac_read_uint_prop(np, "zmii-device", &dev->zmii_ph, 0))
2458 dev->zmii_ph = 0;;
2459 if (emac_read_uint_prop(np, "zmii-channel", &dev->zmii_port, 0))
2460 dev->zmii_port = 0xffffffff;;
2461 if (emac_read_uint_prop(np, "rgmii-device", &dev->rgmii_ph, 0))
2462 dev->rgmii_ph = 0;;
2463 if (emac_read_uint_prop(np, "rgmii-channel", &dev->rgmii_port, 0))
2464 dev->rgmii_port = 0xffffffff;;
2465 if (emac_read_uint_prop(np, "fifo-entry-size", &dev->fifo_entry_size, 0))
2466 dev->fifo_entry_size = 16;
2467 if (emac_read_uint_prop(np, "mal-burst-size", &dev->mal_burst_size, 0))
2468 dev->mal_burst_size = 256;
2470 /* PHY mode needs some decoding */
2471 dev->phy_mode = PHY_MODE_NA;
2472 pm = of_get_property(np, "phy-mode", &plen);
2473 if (pm != NULL) {
2474 int i;
2475 for (i = 0; i < ARRAY_SIZE(phy_modes); i++)
2476 if (!strcasecmp(pm, phy_modes[i])) {
2477 dev->phy_mode = i;
2478 break;
2482 /* Backward compat with non-final DT */
2483 if (dev->phy_mode == PHY_MODE_NA && pm != NULL && plen == 4) {
2484 u32 nmode = *(const u32 *)pm;
2485 if (nmode > PHY_MODE_NA && nmode <= PHY_MODE_SGMII)
2486 dev->phy_mode = nmode;
2489 /* Check EMAC version */
2490 if (of_device_is_compatible(np, "ibm,emac4"))
2491 dev->features |= EMAC_FTR_EMAC4;
2493 /* Fixup some feature bits based on the device tree */
2494 if (of_get_property(np, "has-inverted-stacr-oc", NULL))
2495 dev->features |= EMAC_FTR_STACR_OC_INVERT;
2496 if (of_get_property(np, "has-new-stacr-staopc", NULL))
2497 dev->features |= EMAC_FTR_HAS_NEW_STACR;
2499 /* CAB lacks the appropriate properties */
2500 if (of_device_is_compatible(np, "ibm,emac-axon"))
2501 dev->features |= EMAC_FTR_HAS_NEW_STACR |
2502 EMAC_FTR_STACR_OC_INVERT;
2504 /* Enable TAH/ZMII/RGMII features as found */
2505 if (dev->tah_ph != 0) {
2506 #ifdef CONFIG_IBM_NEW_EMAC_TAH
2507 dev->features |= EMAC_FTR_HAS_TAH;
2508 #else
2509 printk(KERN_ERR "%s: TAH support not enabled !\n",
2510 np->full_name);
2511 return -ENXIO;
2512 #endif
2515 if (dev->zmii_ph != 0) {
2516 #ifdef CONFIG_IBM_NEW_EMAC_ZMII
2517 dev->features |= EMAC_FTR_HAS_ZMII;
2518 #else
2519 printk(KERN_ERR "%s: ZMII support not enabled !\n",
2520 np->full_name);
2521 return -ENXIO;
2522 #endif
2525 if (dev->rgmii_ph != 0) {
2526 #ifdef CONFIG_IBM_NEW_EMAC_RGMII
2527 dev->features |= EMAC_FTR_HAS_RGMII;
2528 #else
2529 printk(KERN_ERR "%s: RGMII support not enabled !\n",
2530 np->full_name);
2531 return -ENXIO;
2532 #endif
2535 /* Read MAC-address */
2536 p = of_get_property(np, "local-mac-address", NULL);
2537 if (p == NULL) {
2538 printk(KERN_ERR "%s: Can't find local-mac-address property\n",
2539 np->full_name);
2540 return -ENXIO;
2542 memcpy(dev->ndev->dev_addr, p, 6);
2544 DBG(dev, "features : 0x%08x / 0x%08x\n", dev->features, EMAC_FTRS_POSSIBLE);
2545 DBG(dev, "tx_fifo_size : %d (%d gige)\n", dev->tx_fifo_size, dev->tx_fifo_size_gige);
2546 DBG(dev, "rx_fifo_size : %d (%d gige)\n", dev->rx_fifo_size, dev->rx_fifo_size_gige);
2547 DBG(dev, "max_mtu : %d\n", dev->max_mtu);
2548 DBG(dev, "OPB freq : %d\n", dev->opb_bus_freq);
2550 return 0;
2553 static int __devinit emac_probe(struct of_device *ofdev,
2554 const struct of_device_id *match)
2556 struct net_device *ndev;
2557 struct emac_instance *dev;
2558 struct device_node *np = ofdev->node;
2559 struct device_node **blist = NULL;
2560 int err, i;
2562 /* Skip unused/unwired EMACS */
2563 if (of_get_property(np, "unused", NULL))
2564 return -ENODEV;
2566 /* Find ourselves in the bootlist if we are there */
2567 for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
2568 if (emac_boot_list[i] == np)
2569 blist = &emac_boot_list[i];
2571 /* Allocate our net_device structure */
2572 err = -ENOMEM;
2573 ndev = alloc_etherdev(sizeof(struct emac_instance));
2574 if (!ndev) {
2575 printk(KERN_ERR "%s: could not allocate ethernet device!\n",
2576 np->full_name);
2577 goto err_gone;
2579 dev = netdev_priv(ndev);
2580 dev->ndev = ndev;
2581 dev->ofdev = ofdev;
2582 dev->blist = blist;
2583 SET_NETDEV_DEV(ndev, &ofdev->dev);
2585 /* Initialize some embedded data structures */
2586 mutex_init(&dev->mdio_lock);
2587 mutex_init(&dev->link_lock);
2588 spin_lock_init(&dev->lock);
2589 INIT_WORK(&dev->reset_work, emac_reset_work);
2591 /* Init various config data based on device-tree */
2592 err = emac_init_config(dev);
2593 if (err != 0)
2594 goto err_free;
2596 /* Get interrupts. EMAC irq is mandatory, WOL irq is optional */
2597 dev->emac_irq = irq_of_parse_and_map(np, 0);
2598 dev->wol_irq = irq_of_parse_and_map(np, 1);
2599 if (dev->emac_irq == NO_IRQ) {
2600 printk(KERN_ERR "%s: Can't map main interrupt\n", np->full_name);
2601 goto err_free;
2603 ndev->irq = dev->emac_irq;
2605 /* Map EMAC regs */
2606 if (of_address_to_resource(np, 0, &dev->rsrc_regs)) {
2607 printk(KERN_ERR "%s: Can't get registers address\n",
2608 np->full_name);
2609 goto err_irq_unmap;
2611 // TODO : request_mem_region
2612 dev->emacp = ioremap(dev->rsrc_regs.start, sizeof(struct emac_regs));
2613 if (dev->emacp == NULL) {
2614 printk(KERN_ERR "%s: Can't map device registers!\n",
2615 np->full_name);
2616 err = -ENOMEM;
2617 goto err_irq_unmap;
2620 /* Wait for dependent devices */
2621 err = emac_wait_deps(dev);
2622 if (err) {
2623 printk(KERN_ERR
2624 "%s: Timeout waiting for dependent devices\n",
2625 np->full_name);
2626 /* display more info about what's missing ? */
2627 goto err_reg_unmap;
2629 dev->mal = dev_get_drvdata(&dev->mal_dev->dev);
2630 if (dev->mdio_dev != NULL)
2631 dev->mdio_instance = dev_get_drvdata(&dev->mdio_dev->dev);
2633 /* Register with MAL */
2634 dev->commac.ops = &emac_commac_ops;
2635 dev->commac.dev = dev;
2636 dev->commac.tx_chan_mask = MAL_CHAN_MASK(dev->mal_tx_chan);
2637 dev->commac.rx_chan_mask = MAL_CHAN_MASK(dev->mal_rx_chan);
2638 err = mal_register_commac(dev->mal, &dev->commac);
2639 if (err) {
2640 printk(KERN_ERR "%s: failed to register with mal %s!\n",
2641 np->full_name, dev->mal_dev->node->full_name);
2642 goto err_rel_deps;
2644 dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
2645 dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
2647 /* Get pointers to BD rings */
2648 dev->tx_desc =
2649 dev->mal->bd_virt + mal_tx_bd_offset(dev->mal, dev->mal_tx_chan);
2650 dev->rx_desc =
2651 dev->mal->bd_virt + mal_rx_bd_offset(dev->mal, dev->mal_rx_chan);
2653 DBG(dev, "tx_desc %p" NL, dev->tx_desc);
2654 DBG(dev, "rx_desc %p" NL, dev->rx_desc);
2656 /* Clean rings */
2657 memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
2658 memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
2660 /* Attach to ZMII, if needed */
2661 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII) &&
2662 (err = zmii_attach(dev->zmii_dev, dev->zmii_port, &dev->phy_mode)) != 0)
2663 goto err_unreg_commac;
2665 /* Attach to RGMII, if needed */
2666 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII) &&
2667 (err = rgmii_attach(dev->rgmii_dev, dev->rgmii_port, dev->phy_mode)) != 0)
2668 goto err_detach_zmii;
2670 /* Attach to TAH, if needed */
2671 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
2672 (err = tah_attach(dev->tah_dev, dev->tah_port)) != 0)
2673 goto err_detach_rgmii;
2675 /* Set some link defaults before we can find out real parameters */
2676 dev->phy.speed = SPEED_100;
2677 dev->phy.duplex = DUPLEX_FULL;
2678 dev->phy.autoneg = AUTONEG_DISABLE;
2679 dev->phy.pause = dev->phy.asym_pause = 0;
2680 dev->stop_timeout = STOP_TIMEOUT_100;
2681 INIT_DELAYED_WORK(&dev->link_work, emac_link_timer);
2683 /* Find PHY if any */
2684 err = emac_init_phy(dev);
2685 if (err != 0)
2686 goto err_detach_tah;
2688 /* Fill in the driver function table */
2689 ndev->open = &emac_open;
2690 if (dev->tah_dev)
2691 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
2692 ndev->tx_timeout = &emac_tx_timeout;
2693 ndev->watchdog_timeo = 5 * HZ;
2694 ndev->stop = &emac_close;
2695 ndev->get_stats = &emac_stats;
2696 ndev->set_multicast_list = &emac_set_multicast_list;
2697 ndev->do_ioctl = &emac_ioctl;
2698 if (emac_phy_supports_gige(dev->phy_mode)) {
2699 ndev->hard_start_xmit = &emac_start_xmit_sg;
2700 ndev->change_mtu = &emac_change_mtu;
2701 dev->commac.ops = &emac_commac_sg_ops;
2702 } else {
2703 ndev->hard_start_xmit = &emac_start_xmit;
2705 SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
2707 netif_carrier_off(ndev);
2708 netif_stop_queue(ndev);
2710 err = register_netdev(ndev);
2711 if (err) {
2712 printk(KERN_ERR "%s: failed to register net device (%d)!\n",
2713 np->full_name, err);
2714 goto err_detach_tah;
2717 /* Set our drvdata last as we don't want them visible until we are
2718 * fully initialized
2720 wmb();
2721 dev_set_drvdata(&ofdev->dev, dev);
2723 /* There's a new kid in town ! Let's tell everybody */
2724 wake_up_all(&emac_probe_wait);
2727 printk(KERN_INFO
2728 "%s: EMAC-%d %s, MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
2729 ndev->name, dev->cell_index, np->full_name,
2730 ndev->dev_addr[0], ndev->dev_addr[1], ndev->dev_addr[2],
2731 ndev->dev_addr[3], ndev->dev_addr[4], ndev->dev_addr[5]);
2733 if (dev->phy.address >= 0)
2734 printk("%s: found %s PHY (0x%02x)\n", ndev->name,
2735 dev->phy.def->name, dev->phy.address);
2737 emac_dbg_register(dev);
2739 /* Life is good */
2740 return 0;
2742 /* I have a bad feeling about this ... */
2744 err_detach_tah:
2745 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2746 tah_detach(dev->tah_dev, dev->tah_port);
2747 err_detach_rgmii:
2748 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2749 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2750 err_detach_zmii:
2751 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2752 zmii_detach(dev->zmii_dev, dev->zmii_port);
2753 err_unreg_commac:
2754 mal_unregister_commac(dev->mal, &dev->commac);
2755 err_rel_deps:
2756 emac_put_deps(dev);
2757 err_reg_unmap:
2758 iounmap(dev->emacp);
2759 err_irq_unmap:
2760 if (dev->wol_irq != NO_IRQ)
2761 irq_dispose_mapping(dev->wol_irq);
2762 if (dev->emac_irq != NO_IRQ)
2763 irq_dispose_mapping(dev->emac_irq);
2764 err_free:
2765 kfree(ndev);
2766 err_gone:
2767 /* if we were on the bootlist, remove us as we won't show up and
2768 * wake up all waiters to notify them in case they were waiting
2769 * on us
2771 if (blist) {
2772 *blist = NULL;
2773 wake_up_all(&emac_probe_wait);
2775 return err;
2778 static int __devexit emac_remove(struct of_device *ofdev)
2780 struct emac_instance *dev = dev_get_drvdata(&ofdev->dev);
2782 DBG(dev, "remove" NL);
2784 dev_set_drvdata(&ofdev->dev, NULL);
2786 unregister_netdev(dev->ndev);
2788 flush_scheduled_work();
2790 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2791 tah_detach(dev->tah_dev, dev->tah_port);
2792 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2793 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2794 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2795 zmii_detach(dev->zmii_dev, dev->zmii_port);
2797 mal_unregister_commac(dev->mal, &dev->commac);
2798 emac_put_deps(dev);
2800 emac_dbg_unregister(dev);
2801 iounmap(dev->emacp);
2803 if (dev->wol_irq != NO_IRQ)
2804 irq_dispose_mapping(dev->wol_irq);
2805 if (dev->emac_irq != NO_IRQ)
2806 irq_dispose_mapping(dev->emac_irq);
2808 kfree(dev->ndev);
2810 return 0;
2813 /* XXX Features in here should be replaced by properties... */
2814 static struct of_device_id emac_match[] =
2817 .type = "network",
2818 .compatible = "ibm,emac",
2821 .type = "network",
2822 .compatible = "ibm,emac4",
2827 static struct of_platform_driver emac_driver = {
2828 .name = "emac",
2829 .match_table = emac_match,
2831 .probe = emac_probe,
2832 .remove = emac_remove,
2835 static void __init emac_make_bootlist(void)
2837 struct device_node *np = NULL;
2838 int j, max, i = 0, k;
2839 int cell_indices[EMAC_BOOT_LIST_SIZE];
2841 /* Collect EMACs */
2842 while((np = of_find_all_nodes(np)) != NULL) {
2843 const u32 *idx;
2845 if (of_match_node(emac_match, np) == NULL)
2846 continue;
2847 if (of_get_property(np, "unused", NULL))
2848 continue;
2849 idx = of_get_property(np, "cell-index", NULL);
2850 if (idx == NULL)
2851 continue;
2852 cell_indices[i] = *idx;
2853 emac_boot_list[i++] = of_node_get(np);
2854 if (i >= EMAC_BOOT_LIST_SIZE) {
2855 of_node_put(np);
2856 break;
2859 max = i;
2861 /* Bubble sort them (doh, what a creative algorithm :-) */
2862 for (i = 0; max > 1 && (i < (max - 1)); i++)
2863 for (j = i; j < max; j++) {
2864 if (cell_indices[i] > cell_indices[j]) {
2865 np = emac_boot_list[i];
2866 emac_boot_list[i] = emac_boot_list[j];
2867 emac_boot_list[j] = np;
2868 k = cell_indices[i];
2869 cell_indices[i] = cell_indices[j];
2870 cell_indices[j] = k;
2875 static int __init emac_init(void)
2877 int rc;
2879 printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
2881 /* Init debug stuff */
2882 emac_init_debug();
2884 /* Build EMAC boot list */
2885 emac_make_bootlist();
2887 /* Init submodules */
2888 rc = mal_init();
2889 if (rc)
2890 goto err;
2891 rc = zmii_init();
2892 if (rc)
2893 goto err_mal;
2894 rc = rgmii_init();
2895 if (rc)
2896 goto err_zmii;
2897 rc = tah_init();
2898 if (rc)
2899 goto err_rgmii;
2900 rc = of_register_platform_driver(&emac_driver);
2901 if (rc)
2902 goto err_tah;
2904 return 0;
2906 err_tah:
2907 tah_exit();
2908 err_rgmii:
2909 rgmii_exit();
2910 err_zmii:
2911 zmii_exit();
2912 err_mal:
2913 mal_exit();
2914 err:
2915 return rc;
2918 static void __exit emac_exit(void)
2920 int i;
2922 of_unregister_platform_driver(&emac_driver);
2924 tah_exit();
2925 rgmii_exit();
2926 zmii_exit();
2927 mal_exit();
2928 emac_fini_debug();
2930 /* Destroy EMAC boot list */
2931 for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
2932 if (emac_boot_list[i])
2933 of_node_put(emac_boot_list[i]);
2936 module_init(emac_init);
2937 module_exit(emac_exit);