ibm_newemac: PowerPC 440GX EMAC PHY clock workaround
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / net / ibm_newemac / core.c
blob4176dd6a2e83f2e1f964e68f36ee6e388c541bb2
1 /*
2 * drivers/net/ibm_newemac/core.c
4 * Driver for PowerPC 4xx on-chip ethernet controller.
6 * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
7 * <benh@kernel.crashing.org>
9 * Based on the arch/ppc version of the driver:
11 * Copyright (c) 2004, 2005 Zultys Technologies.
12 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
14 * Based on original work by
15 * Matt Porter <mporter@kernel.crashing.org>
16 * (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
17 * Armin Kuster <akuster@mvista.com>
18 * Johnnie Peters <jpeters@mvista.com>
20 * This program is free software; you can redistribute it and/or modify it
21 * under the terms of the GNU General Public License as published by the
22 * Free Software Foundation; either version 2 of the License, or (at your
23 * option) any later version.
27 #include <linux/sched.h>
28 #include <linux/string.h>
29 #include <linux/errno.h>
30 #include <linux/delay.h>
31 #include <linux/types.h>
32 #include <linux/pci.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/crc32.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #include <linux/bitops.h>
39 #include <linux/workqueue.h>
40 #include <linux/of.h>
42 #include <asm/processor.h>
43 #include <asm/io.h>
44 #include <asm/dma.h>
45 #include <asm/uaccess.h>
46 #include <asm/dcr.h>
47 #include <asm/dcr-regs.h>
49 #include "core.h"
52 * Lack of dma_unmap_???? calls is intentional.
54 * API-correct usage requires additional support state information to be
55 * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
56 * EMAC design (e.g. TX buffer passed from network stack can be split into
57 * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
58 * maintaining such information will add additional overhead.
59 * Current DMA API implementation for 4xx processors only ensures cache coherency
60 * and dma_unmap_???? routines are empty and are likely to stay this way.
61 * I decided to omit dma_unmap_??? calls because I don't want to add additional
62 * complexity just for the sake of following some abstract API, when it doesn't
63 * add any real benefit to the driver. I understand that this decision maybe
64 * controversial, but I really tried to make code API-correct and efficient
65 * at the same time and didn't come up with code I liked :(. --ebs
68 #define DRV_NAME "emac"
69 #define DRV_VERSION "3.54"
70 #define DRV_DESC "PPC 4xx OCP EMAC driver"
72 MODULE_DESCRIPTION(DRV_DESC);
73 MODULE_AUTHOR
74 ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
75 MODULE_LICENSE("GPL");
78 * PPC64 doesn't (yet) have a cacheable_memcpy
80 #ifdef CONFIG_PPC64
81 #define cacheable_memcpy(d,s,n) memcpy((d),(s),(n))
82 #endif
84 /* minimum number of free TX descriptors required to wake up TX process */
85 #define EMAC_TX_WAKEUP_THRESH (NUM_TX_BUFF / 4)
87 /* If packet size is less than this number, we allocate small skb and copy packet
88 * contents into it instead of just sending original big skb up
90 #define EMAC_RX_COPY_THRESH CONFIG_IBM_NEW_EMAC_RX_COPY_THRESHOLD
92 /* Since multiple EMACs share MDIO lines in various ways, we need
93 * to avoid re-using the same PHY ID in cases where the arch didn't
94 * setup precise phy_map entries
96 * XXX This is something that needs to be reworked as we can have multiple
97 * EMAC "sets" (multiple ASICs containing several EMACs) though we can
98 * probably require in that case to have explicit PHY IDs in the device-tree
100 static u32 busy_phy_map;
101 static DEFINE_MUTEX(emac_phy_map_lock);
103 /* This is the wait queue used to wait on any event related to probe, that
104 * is discovery of MALs, other EMACs, ZMII/RGMIIs, etc...
106 static DECLARE_WAIT_QUEUE_HEAD(emac_probe_wait);
108 /* Having stable interface names is a doomed idea. However, it would be nice
109 * if we didn't have completely random interface names at boot too :-) It's
110 * just a matter of making everybody's life easier. Since we are doing
111 * threaded probing, it's a bit harder though. The base idea here is that
112 * we make up a list of all emacs in the device-tree before we register the
113 * driver. Every emac will then wait for the previous one in the list to
114 * initialize before itself. We should also keep that list ordered by
115 * cell_index.
116 * That list is only 4 entries long, meaning that additional EMACs don't
117 * get ordering guarantees unless EMAC_BOOT_LIST_SIZE is increased.
120 #define EMAC_BOOT_LIST_SIZE 4
121 static struct device_node *emac_boot_list[EMAC_BOOT_LIST_SIZE];
123 /* How long should I wait for dependent devices ? */
124 #define EMAC_PROBE_DEP_TIMEOUT (HZ * 5)
126 /* I don't want to litter system log with timeout errors
127 * when we have brain-damaged PHY.
129 static inline void emac_report_timeout_error(struct emac_instance *dev,
130 const char *error)
132 if (net_ratelimit())
133 printk(KERN_ERR "%s: %s\n", dev->ndev->name, error);
136 /* PHY polling intervals */
137 #define PHY_POLL_LINK_ON HZ
138 #define PHY_POLL_LINK_OFF (HZ / 5)
140 /* Graceful stop timeouts in us.
141 * We should allow up to 1 frame time (full-duplex, ignoring collisions)
143 #define STOP_TIMEOUT_10 1230
144 #define STOP_TIMEOUT_100 124
145 #define STOP_TIMEOUT_1000 13
146 #define STOP_TIMEOUT_1000_JUMBO 73
148 static unsigned char default_mcast_addr[] = {
149 0x01, 0x80, 0xC2, 0x00, 0x00, 0x01
152 /* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
153 static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
154 "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
155 "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
156 "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
157 "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
158 "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
159 "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
160 "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
161 "rx_bad_packet", "rx_runt_packet", "rx_short_event",
162 "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
163 "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
164 "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
165 "tx_bd_excessive_collisions", "tx_bd_late_collision",
166 "tx_bd_multple_collisions", "tx_bd_single_collision",
167 "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
168 "tx_errors"
171 static irqreturn_t emac_irq(int irq, void *dev_instance);
172 static void emac_clean_tx_ring(struct emac_instance *dev);
173 static void __emac_set_multicast_list(struct emac_instance *dev);
175 static inline int emac_phy_supports_gige(int phy_mode)
177 return phy_mode == PHY_MODE_GMII ||
178 phy_mode == PHY_MODE_RGMII ||
179 phy_mode == PHY_MODE_TBI ||
180 phy_mode == PHY_MODE_RTBI;
183 static inline int emac_phy_gpcs(int phy_mode)
185 return phy_mode == PHY_MODE_TBI ||
186 phy_mode == PHY_MODE_RTBI;
189 static inline void emac_tx_enable(struct emac_instance *dev)
191 struct emac_regs __iomem *p = dev->emacp;
192 u32 r;
194 DBG(dev, "tx_enable" NL);
196 r = in_be32(&p->mr0);
197 if (!(r & EMAC_MR0_TXE))
198 out_be32(&p->mr0, r | EMAC_MR0_TXE);
201 static void emac_tx_disable(struct emac_instance *dev)
203 struct emac_regs __iomem *p = dev->emacp;
204 u32 r;
206 DBG(dev, "tx_disable" NL);
208 r = in_be32(&p->mr0);
209 if (r & EMAC_MR0_TXE) {
210 int n = dev->stop_timeout;
211 out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
212 while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) {
213 udelay(1);
214 --n;
216 if (unlikely(!n))
217 emac_report_timeout_error(dev, "TX disable timeout");
221 static void emac_rx_enable(struct emac_instance *dev)
223 struct emac_regs __iomem *p = dev->emacp;
224 u32 r;
226 if (unlikely(test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags)))
227 goto out;
229 DBG(dev, "rx_enable" NL);
231 r = in_be32(&p->mr0);
232 if (!(r & EMAC_MR0_RXE)) {
233 if (unlikely(!(r & EMAC_MR0_RXI))) {
234 /* Wait if previous async disable is still in progress */
235 int n = dev->stop_timeout;
236 while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
237 udelay(1);
238 --n;
240 if (unlikely(!n))
241 emac_report_timeout_error(dev,
242 "RX disable timeout");
244 out_be32(&p->mr0, r | EMAC_MR0_RXE);
246 out:
250 static void emac_rx_disable(struct emac_instance *dev)
252 struct emac_regs __iomem *p = dev->emacp;
253 u32 r;
255 DBG(dev, "rx_disable" NL);
257 r = in_be32(&p->mr0);
258 if (r & EMAC_MR0_RXE) {
259 int n = dev->stop_timeout;
260 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
261 while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
262 udelay(1);
263 --n;
265 if (unlikely(!n))
266 emac_report_timeout_error(dev, "RX disable timeout");
270 static inline void emac_netif_stop(struct emac_instance *dev)
272 netif_tx_lock_bh(dev->ndev);
273 dev->no_mcast = 1;
274 netif_tx_unlock_bh(dev->ndev);
275 dev->ndev->trans_start = jiffies; /* prevent tx timeout */
276 mal_poll_disable(dev->mal, &dev->commac);
277 netif_tx_disable(dev->ndev);
280 static inline void emac_netif_start(struct emac_instance *dev)
282 netif_tx_lock_bh(dev->ndev);
283 dev->no_mcast = 0;
284 if (dev->mcast_pending && netif_running(dev->ndev))
285 __emac_set_multicast_list(dev);
286 netif_tx_unlock_bh(dev->ndev);
288 netif_wake_queue(dev->ndev);
290 /* NOTE: unconditional netif_wake_queue is only appropriate
291 * so long as all callers are assured to have free tx slots
292 * (taken from tg3... though the case where that is wrong is
293 * not terribly harmful)
295 mal_poll_enable(dev->mal, &dev->commac);
298 static inline void emac_rx_disable_async(struct emac_instance *dev)
300 struct emac_regs __iomem *p = dev->emacp;
301 u32 r;
303 DBG(dev, "rx_disable_async" NL);
305 r = in_be32(&p->mr0);
306 if (r & EMAC_MR0_RXE)
307 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
310 static int emac_reset(struct emac_instance *dev)
312 struct emac_regs __iomem *p = dev->emacp;
313 int n = 20;
315 DBG(dev, "reset" NL);
317 if (!dev->reset_failed) {
318 /* 40x erratum suggests stopping RX channel before reset,
319 * we stop TX as well
321 emac_rx_disable(dev);
322 emac_tx_disable(dev);
325 out_be32(&p->mr0, EMAC_MR0_SRST);
326 while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
327 --n;
329 if (n) {
330 dev->reset_failed = 0;
331 return 0;
332 } else {
333 emac_report_timeout_error(dev, "reset timeout");
334 dev->reset_failed = 1;
335 return -ETIMEDOUT;
339 static void emac_hash_mc(struct emac_instance *dev)
341 struct emac_regs __iomem *p = dev->emacp;
342 u16 gaht[4] = { 0 };
343 struct dev_mc_list *dmi;
345 DBG(dev, "hash_mc %d" NL, dev->ndev->mc_count);
347 for (dmi = dev->ndev->mc_list; dmi; dmi = dmi->next) {
348 int bit;
349 DBG2(dev, "mc %02x:%02x:%02x:%02x:%02x:%02x" NL,
350 dmi->dmi_addr[0], dmi->dmi_addr[1], dmi->dmi_addr[2],
351 dmi->dmi_addr[3], dmi->dmi_addr[4], dmi->dmi_addr[5]);
353 bit = 63 - (ether_crc(ETH_ALEN, dmi->dmi_addr) >> 26);
354 gaht[bit >> 4] |= 0x8000 >> (bit & 0x0f);
356 out_be32(&p->gaht1, gaht[0]);
357 out_be32(&p->gaht2, gaht[1]);
358 out_be32(&p->gaht3, gaht[2]);
359 out_be32(&p->gaht4, gaht[3]);
362 static inline u32 emac_iff2rmr(struct net_device *ndev)
364 struct emac_instance *dev = netdev_priv(ndev);
365 u32 r;
367 r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE;
369 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
370 r |= EMAC4_RMR_BASE;
371 else
372 r |= EMAC_RMR_BASE;
374 if (ndev->flags & IFF_PROMISC)
375 r |= EMAC_RMR_PME;
376 else if (ndev->flags & IFF_ALLMULTI || ndev->mc_count > 32)
377 r |= EMAC_RMR_PMME;
378 else if (ndev->mc_count > 0)
379 r |= EMAC_RMR_MAE;
381 return r;
384 static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
386 u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC_MR1_TR0_MULT;
388 DBG2(dev, "__emac_calc_base_mr1" NL);
390 switch(tx_size) {
391 case 2048:
392 ret |= EMAC_MR1_TFS_2K;
393 break;
394 default:
395 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
396 dev->ndev->name, tx_size);
399 switch(rx_size) {
400 case 16384:
401 ret |= EMAC_MR1_RFS_16K;
402 break;
403 case 4096:
404 ret |= EMAC_MR1_RFS_4K;
405 break;
406 default:
407 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
408 dev->ndev->name, rx_size);
411 return ret;
414 static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
416 u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC4_MR1_TR |
417 EMAC4_MR1_OBCI(dev->opb_bus_freq / 1000000);
419 DBG2(dev, "__emac4_calc_base_mr1" NL);
421 switch(tx_size) {
422 case 4096:
423 ret |= EMAC4_MR1_TFS_4K;
424 break;
425 case 2048:
426 ret |= EMAC4_MR1_TFS_2K;
427 break;
428 default:
429 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
430 dev->ndev->name, tx_size);
433 switch(rx_size) {
434 case 16384:
435 ret |= EMAC4_MR1_RFS_16K;
436 break;
437 case 4096:
438 ret |= EMAC4_MR1_RFS_4K;
439 break;
440 case 2048:
441 ret |= EMAC4_MR1_RFS_2K;
442 break;
443 default:
444 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
445 dev->ndev->name, rx_size);
448 return ret;
451 static u32 emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
453 return emac_has_feature(dev, EMAC_FTR_EMAC4) ?
454 __emac4_calc_base_mr1(dev, tx_size, rx_size) :
455 __emac_calc_base_mr1(dev, tx_size, rx_size);
458 static inline u32 emac_calc_trtr(struct emac_instance *dev, unsigned int size)
460 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
461 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT_EMAC4;
462 else
463 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT;
466 static inline u32 emac_calc_rwmr(struct emac_instance *dev,
467 unsigned int low, unsigned int high)
469 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
470 return (low << 22) | ( (high & 0x3ff) << 6);
471 else
472 return (low << 23) | ( (high & 0x1ff) << 7);
475 static int emac_configure(struct emac_instance *dev)
477 struct emac_regs __iomem *p = dev->emacp;
478 struct net_device *ndev = dev->ndev;
479 int tx_size, rx_size, link = netif_carrier_ok(dev->ndev);
480 u32 r, mr1 = 0;
482 DBG(dev, "configure" NL);
484 if (!link) {
485 out_be32(&p->mr1, in_be32(&p->mr1)
486 | EMAC_MR1_FDE | EMAC_MR1_ILE);
487 udelay(100);
488 } else if (emac_reset(dev) < 0)
489 return -ETIMEDOUT;
491 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
492 tah_reset(dev->tah_dev);
494 DBG(dev, " link = %d duplex = %d, pause = %d, asym_pause = %d\n",
495 link, dev->phy.duplex, dev->phy.pause, dev->phy.asym_pause);
497 /* Default fifo sizes */
498 tx_size = dev->tx_fifo_size;
499 rx_size = dev->rx_fifo_size;
501 /* No link, force loopback */
502 if (!link)
503 mr1 = EMAC_MR1_FDE | EMAC_MR1_ILE;
505 /* Check for full duplex */
506 else if (dev->phy.duplex == DUPLEX_FULL)
507 mr1 |= EMAC_MR1_FDE | EMAC_MR1_MWSW_001;
509 /* Adjust fifo sizes, mr1 and timeouts based on link speed */
510 dev->stop_timeout = STOP_TIMEOUT_10;
511 switch (dev->phy.speed) {
512 case SPEED_1000:
513 if (emac_phy_gpcs(dev->phy.mode)) {
514 mr1 |= EMAC_MR1_MF_1000GPCS |
515 EMAC_MR1_MF_IPPA(dev->phy.address);
517 /* Put some arbitrary OUI, Manuf & Rev IDs so we can
518 * identify this GPCS PHY later.
520 out_be32(&p->ipcr, 0xdeadbeef);
521 } else
522 mr1 |= EMAC_MR1_MF_1000;
524 /* Extended fifo sizes */
525 tx_size = dev->tx_fifo_size_gige;
526 rx_size = dev->rx_fifo_size_gige;
528 if (dev->ndev->mtu > ETH_DATA_LEN) {
529 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
530 mr1 |= EMAC4_MR1_JPSM;
531 else
532 mr1 |= EMAC_MR1_JPSM;
533 dev->stop_timeout = STOP_TIMEOUT_1000_JUMBO;
534 } else
535 dev->stop_timeout = STOP_TIMEOUT_1000;
536 break;
537 case SPEED_100:
538 mr1 |= EMAC_MR1_MF_100;
539 dev->stop_timeout = STOP_TIMEOUT_100;
540 break;
541 default: /* make gcc happy */
542 break;
545 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
546 rgmii_set_speed(dev->rgmii_dev, dev->rgmii_port,
547 dev->phy.speed);
548 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
549 zmii_set_speed(dev->zmii_dev, dev->zmii_port, dev->phy.speed);
551 /* on 40x erratum forces us to NOT use integrated flow control,
552 * let's hope it works on 44x ;)
554 if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x) &&
555 dev->phy.duplex == DUPLEX_FULL) {
556 if (dev->phy.pause)
557 mr1 |= EMAC_MR1_EIFC | EMAC_MR1_APP;
558 else if (dev->phy.asym_pause)
559 mr1 |= EMAC_MR1_APP;
562 /* Add base settings & fifo sizes & program MR1 */
563 mr1 |= emac_calc_base_mr1(dev, tx_size, rx_size);
564 out_be32(&p->mr1, mr1);
566 /* Set individual MAC address */
567 out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
568 out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
569 (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
570 ndev->dev_addr[5]);
572 /* VLAN Tag Protocol ID */
573 out_be32(&p->vtpid, 0x8100);
575 /* Receive mode register */
576 r = emac_iff2rmr(ndev);
577 if (r & EMAC_RMR_MAE)
578 emac_hash_mc(dev);
579 out_be32(&p->rmr, r);
581 /* FIFOs thresholds */
582 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
583 r = EMAC4_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
584 tx_size / 2 / dev->fifo_entry_size);
585 else
586 r = EMAC_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
587 tx_size / 2 / dev->fifo_entry_size);
588 out_be32(&p->tmr1, r);
589 out_be32(&p->trtr, emac_calc_trtr(dev, tx_size / 2));
591 /* PAUSE frame is sent when RX FIFO reaches its high-water mark,
592 there should be still enough space in FIFO to allow the our link
593 partner time to process this frame and also time to send PAUSE
594 frame itself.
596 Here is the worst case scenario for the RX FIFO "headroom"
597 (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
599 1) One maximum-length frame on TX 1522 bytes
600 2) One PAUSE frame time 64 bytes
601 3) PAUSE frame decode time allowance 64 bytes
602 4) One maximum-length frame on RX 1522 bytes
603 5) Round-trip propagation delay of the link (100Mb) 15 bytes
604 ----------
605 3187 bytes
607 I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
608 low-water mark to RX_FIFO_SIZE / 8 (512 bytes)
610 r = emac_calc_rwmr(dev, rx_size / 8 / dev->fifo_entry_size,
611 rx_size / 4 / dev->fifo_entry_size);
612 out_be32(&p->rwmr, r);
614 /* Set PAUSE timer to the maximum */
615 out_be32(&p->ptr, 0xffff);
617 /* IRQ sources */
618 r = EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE |
619 EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE |
620 EMAC_ISR_IRE | EMAC_ISR_TE;
621 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
622 r |= EMAC4_ISR_TXPE | EMAC4_ISR_RXPE /* | EMAC4_ISR_TXUE |
623 EMAC4_ISR_RXOE | */;
624 out_be32(&p->iser, r);
626 /* We need to take GPCS PHY out of isolate mode after EMAC reset */
627 if (emac_phy_gpcs(dev->phy.mode))
628 emac_mii_reset_phy(&dev->phy);
630 /* Required for Pause packet support in EMAC */
631 dev_mc_add(ndev, default_mcast_addr, sizeof(default_mcast_addr), 1);
633 return 0;
636 static void emac_reinitialize(struct emac_instance *dev)
638 DBG(dev, "reinitialize" NL);
640 emac_netif_stop(dev);
641 if (!emac_configure(dev)) {
642 emac_tx_enable(dev);
643 emac_rx_enable(dev);
645 emac_netif_start(dev);
648 static void emac_full_tx_reset(struct emac_instance *dev)
650 DBG(dev, "full_tx_reset" NL);
652 emac_tx_disable(dev);
653 mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
654 emac_clean_tx_ring(dev);
655 dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;
657 emac_configure(dev);
659 mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
660 emac_tx_enable(dev);
661 emac_rx_enable(dev);
664 static void emac_reset_work(struct work_struct *work)
666 struct emac_instance *dev = container_of(work, struct emac_instance, reset_work);
668 DBG(dev, "reset_work" NL);
670 mutex_lock(&dev->link_lock);
671 if (dev->opened) {
672 emac_netif_stop(dev);
673 emac_full_tx_reset(dev);
674 emac_netif_start(dev);
676 mutex_unlock(&dev->link_lock);
679 static void emac_tx_timeout(struct net_device *ndev)
681 struct emac_instance *dev = netdev_priv(ndev);
683 DBG(dev, "tx_timeout" NL);
685 schedule_work(&dev->reset_work);
689 static inline int emac_phy_done(struct emac_instance *dev, u32 stacr)
691 int done = !!(stacr & EMAC_STACR_OC);
693 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
694 done = !done;
696 return done;
699 static int __emac_mdio_read(struct emac_instance *dev, u8 id, u8 reg)
701 struct emac_regs __iomem *p = dev->emacp;
702 u32 r = 0;
703 int n, err = -ETIMEDOUT;
705 mutex_lock(&dev->mdio_lock);
707 DBG2(dev, "mdio_read(%02x,%02x)" NL, id, reg);
709 /* Enable proper MDIO port */
710 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
711 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
712 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
713 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
715 /* Wait for management interface to become idle */
716 n = 10;
717 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
718 udelay(1);
719 if (!--n) {
720 DBG2(dev, " -> timeout wait idle\n");
721 goto bail;
725 /* Issue read command */
726 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
727 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
728 else
729 r = EMAC_STACR_BASE(dev->opb_bus_freq);
730 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
731 r |= EMAC_STACR_OC;
732 if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
733 r |= EMACX_STACR_STAC_READ;
734 else
735 r |= EMAC_STACR_STAC_READ;
736 r |= (reg & EMAC_STACR_PRA_MASK)
737 | ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT);
738 out_be32(&p->stacr, r);
740 /* Wait for read to complete */
741 n = 100;
742 while (!emac_phy_done(dev, (r = in_be32(&p->stacr)))) {
743 udelay(1);
744 if (!--n) {
745 DBG2(dev, " -> timeout wait complete\n");
746 goto bail;
750 if (unlikely(r & EMAC_STACR_PHYE)) {
751 DBG(dev, "mdio_read(%02x, %02x) failed" NL, id, reg);
752 err = -EREMOTEIO;
753 goto bail;
756 r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);
758 DBG2(dev, "mdio_read -> %04x" NL, r);
759 err = 0;
760 bail:
761 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
762 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
763 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
764 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
765 mutex_unlock(&dev->mdio_lock);
767 return err == 0 ? r : err;
770 static void __emac_mdio_write(struct emac_instance *dev, u8 id, u8 reg,
771 u16 val)
773 struct emac_regs __iomem *p = dev->emacp;
774 u32 r = 0;
775 int n, err = -ETIMEDOUT;
777 mutex_lock(&dev->mdio_lock);
779 DBG2(dev, "mdio_write(%02x,%02x,%04x)" NL, id, reg, val);
781 /* Enable proper MDIO port */
782 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
783 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
784 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
785 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
787 /* Wait for management interface to be idle */
788 n = 10;
789 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
790 udelay(1);
791 if (!--n) {
792 DBG2(dev, " -> timeout wait idle\n");
793 goto bail;
797 /* Issue write command */
798 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
799 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
800 else
801 r = EMAC_STACR_BASE(dev->opb_bus_freq);
802 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
803 r |= EMAC_STACR_OC;
804 if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
805 r |= EMACX_STACR_STAC_WRITE;
806 else
807 r |= EMAC_STACR_STAC_WRITE;
808 r |= (reg & EMAC_STACR_PRA_MASK) |
809 ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
810 (val << EMAC_STACR_PHYD_SHIFT);
811 out_be32(&p->stacr, r);
813 /* Wait for write to complete */
814 n = 100;
815 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
816 udelay(1);
817 if (!--n) {
818 DBG2(dev, " -> timeout wait complete\n");
819 goto bail;
822 err = 0;
823 bail:
824 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
825 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
826 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
827 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
828 mutex_unlock(&dev->mdio_lock);
831 static int emac_mdio_read(struct net_device *ndev, int id, int reg)
833 struct emac_instance *dev = netdev_priv(ndev);
834 int res;
836 res = __emac_mdio_read(dev->mdio_instance ? dev->mdio_instance : dev,
837 (u8) id, (u8) reg);
838 return res;
841 static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
843 struct emac_instance *dev = netdev_priv(ndev);
845 __emac_mdio_write(dev->mdio_instance ? dev->mdio_instance : dev,
846 (u8) id, (u8) reg, (u16) val);
849 /* Tx lock BH */
850 static void __emac_set_multicast_list(struct emac_instance *dev)
852 struct emac_regs __iomem *p = dev->emacp;
853 u32 rmr = emac_iff2rmr(dev->ndev);
855 DBG(dev, "__multicast %08x" NL, rmr);
857 /* I decided to relax register access rules here to avoid
858 * full EMAC reset.
860 * There is a real problem with EMAC4 core if we use MWSW_001 bit
861 * in MR1 register and do a full EMAC reset.
862 * One TX BD status update is delayed and, after EMAC reset, it
863 * never happens, resulting in TX hung (it'll be recovered by TX
864 * timeout handler eventually, but this is just gross).
865 * So we either have to do full TX reset or try to cheat here :)
867 * The only required change is to RX mode register, so I *think* all
868 * we need is just to stop RX channel. This seems to work on all
869 * tested SoCs. --ebs
871 * If we need the full reset, we might just trigger the workqueue
872 * and do it async... a bit nasty but should work --BenH
874 dev->mcast_pending = 0;
875 emac_rx_disable(dev);
876 if (rmr & EMAC_RMR_MAE)
877 emac_hash_mc(dev);
878 out_be32(&p->rmr, rmr);
879 emac_rx_enable(dev);
882 /* Tx lock BH */
883 static void emac_set_multicast_list(struct net_device *ndev)
885 struct emac_instance *dev = netdev_priv(ndev);
887 DBG(dev, "multicast" NL);
889 BUG_ON(!netif_running(dev->ndev));
891 if (dev->no_mcast) {
892 dev->mcast_pending = 1;
893 return;
895 __emac_set_multicast_list(dev);
898 static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
900 int rx_sync_size = emac_rx_sync_size(new_mtu);
901 int rx_skb_size = emac_rx_skb_size(new_mtu);
902 int i, ret = 0;
904 mutex_lock(&dev->link_lock);
905 emac_netif_stop(dev);
906 emac_rx_disable(dev);
907 mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
909 if (dev->rx_sg_skb) {
910 ++dev->estats.rx_dropped_resize;
911 dev_kfree_skb(dev->rx_sg_skb);
912 dev->rx_sg_skb = NULL;
915 /* Make a first pass over RX ring and mark BDs ready, dropping
916 * non-processed packets on the way. We need this as a separate pass
917 * to simplify error recovery in the case of allocation failure later.
919 for (i = 0; i < NUM_RX_BUFF; ++i) {
920 if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
921 ++dev->estats.rx_dropped_resize;
923 dev->rx_desc[i].data_len = 0;
924 dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
925 (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
928 /* Reallocate RX ring only if bigger skb buffers are required */
929 if (rx_skb_size <= dev->rx_skb_size)
930 goto skip;
932 /* Second pass, allocate new skbs */
933 for (i = 0; i < NUM_RX_BUFF; ++i) {
934 struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
935 if (!skb) {
936 ret = -ENOMEM;
937 goto oom;
940 BUG_ON(!dev->rx_skb[i]);
941 dev_kfree_skb(dev->rx_skb[i]);
943 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
944 dev->rx_desc[i].data_ptr =
945 dma_map_single(&dev->ofdev->dev, skb->data - 2, rx_sync_size,
946 DMA_FROM_DEVICE) + 2;
947 dev->rx_skb[i] = skb;
949 skip:
950 /* Check if we need to change "Jumbo" bit in MR1 */
951 if ((new_mtu > ETH_DATA_LEN) ^ (dev->ndev->mtu > ETH_DATA_LEN)) {
952 /* This is to prevent starting RX channel in emac_rx_enable() */
953 set_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
955 dev->ndev->mtu = new_mtu;
956 emac_full_tx_reset(dev);
959 mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(new_mtu));
960 oom:
961 /* Restart RX */
962 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
963 dev->rx_slot = 0;
964 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
965 emac_rx_enable(dev);
966 emac_netif_start(dev);
967 mutex_unlock(&dev->link_lock);
969 return ret;
972 /* Process ctx, rtnl_lock semaphore */
973 static int emac_change_mtu(struct net_device *ndev, int new_mtu)
975 struct emac_instance *dev = netdev_priv(ndev);
976 int ret = 0;
978 if (new_mtu < EMAC_MIN_MTU || new_mtu > dev->max_mtu)
979 return -EINVAL;
981 DBG(dev, "change_mtu(%d)" NL, new_mtu);
983 if (netif_running(ndev)) {
984 /* Check if we really need to reinitalize RX ring */
985 if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
986 ret = emac_resize_rx_ring(dev, new_mtu);
989 if (!ret) {
990 ndev->mtu = new_mtu;
991 dev->rx_skb_size = emac_rx_skb_size(new_mtu);
992 dev->rx_sync_size = emac_rx_sync_size(new_mtu);
995 return ret;
998 static void emac_clean_tx_ring(struct emac_instance *dev)
1000 int i;
1002 for (i = 0; i < NUM_TX_BUFF; ++i) {
1003 if (dev->tx_skb[i]) {
1004 dev_kfree_skb(dev->tx_skb[i]);
1005 dev->tx_skb[i] = NULL;
1006 if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)
1007 ++dev->estats.tx_dropped;
1009 dev->tx_desc[i].ctrl = 0;
1010 dev->tx_desc[i].data_ptr = 0;
1014 static void emac_clean_rx_ring(struct emac_instance *dev)
1016 int i;
1018 for (i = 0; i < NUM_RX_BUFF; ++i)
1019 if (dev->rx_skb[i]) {
1020 dev->rx_desc[i].ctrl = 0;
1021 dev_kfree_skb(dev->rx_skb[i]);
1022 dev->rx_skb[i] = NULL;
1023 dev->rx_desc[i].data_ptr = 0;
1026 if (dev->rx_sg_skb) {
1027 dev_kfree_skb(dev->rx_sg_skb);
1028 dev->rx_sg_skb = NULL;
1032 static inline int emac_alloc_rx_skb(struct emac_instance *dev, int slot,
1033 gfp_t flags)
1035 struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);
1036 if (unlikely(!skb))
1037 return -ENOMEM;
1039 dev->rx_skb[slot] = skb;
1040 dev->rx_desc[slot].data_len = 0;
1042 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
1043 dev->rx_desc[slot].data_ptr =
1044 dma_map_single(&dev->ofdev->dev, skb->data - 2, dev->rx_sync_size,
1045 DMA_FROM_DEVICE) + 2;
1046 wmb();
1047 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1048 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1050 return 0;
1053 static void emac_print_link_status(struct emac_instance *dev)
1055 if (netif_carrier_ok(dev->ndev))
1056 printk(KERN_INFO "%s: link is up, %d %s%s\n",
1057 dev->ndev->name, dev->phy.speed,
1058 dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
1059 dev->phy.pause ? ", pause enabled" :
1060 dev->phy.asym_pause ? ", asymmetric pause enabled" : "");
1061 else
1062 printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
1065 /* Process ctx, rtnl_lock semaphore */
1066 static int emac_open(struct net_device *ndev)
1068 struct emac_instance *dev = netdev_priv(ndev);
1069 int err, i;
1071 DBG(dev, "open" NL);
1073 /* Setup error IRQ handler */
1074 err = request_irq(dev->emac_irq, emac_irq, 0, "EMAC", dev);
1075 if (err) {
1076 printk(KERN_ERR "%s: failed to request IRQ %d\n",
1077 ndev->name, dev->emac_irq);
1078 return err;
1081 /* Allocate RX ring */
1082 for (i = 0; i < NUM_RX_BUFF; ++i)
1083 if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {
1084 printk(KERN_ERR "%s: failed to allocate RX ring\n",
1085 ndev->name);
1086 goto oom;
1089 dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot = 0;
1090 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1091 dev->rx_sg_skb = NULL;
1093 mutex_lock(&dev->link_lock);
1094 dev->opened = 1;
1096 /* Start PHY polling now.
1098 if (dev->phy.address >= 0) {
1099 int link_poll_interval;
1100 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1101 dev->phy.def->ops->read_link(&dev->phy);
1102 netif_carrier_on(dev->ndev);
1103 link_poll_interval = PHY_POLL_LINK_ON;
1104 } else {
1105 netif_carrier_off(dev->ndev);
1106 link_poll_interval = PHY_POLL_LINK_OFF;
1108 dev->link_polling = 1;
1109 wmb();
1110 schedule_delayed_work(&dev->link_work, link_poll_interval);
1111 emac_print_link_status(dev);
1112 } else
1113 netif_carrier_on(dev->ndev);
1115 emac_configure(dev);
1116 mal_poll_add(dev->mal, &dev->commac);
1117 mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
1118 mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(ndev->mtu));
1119 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1120 emac_tx_enable(dev);
1121 emac_rx_enable(dev);
1122 emac_netif_start(dev);
1124 mutex_unlock(&dev->link_lock);
1126 return 0;
1127 oom:
1128 emac_clean_rx_ring(dev);
1129 free_irq(dev->emac_irq, dev);
1131 return -ENOMEM;
1134 /* BHs disabled */
1135 #if 0
1136 static int emac_link_differs(struct emac_instance *dev)
1138 u32 r = in_be32(&dev->emacp->mr1);
1140 int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;
1141 int speed, pause, asym_pause;
1143 if (r & EMAC_MR1_MF_1000)
1144 speed = SPEED_1000;
1145 else if (r & EMAC_MR1_MF_100)
1146 speed = SPEED_100;
1147 else
1148 speed = SPEED_10;
1150 switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) {
1151 case (EMAC_MR1_EIFC | EMAC_MR1_APP):
1152 pause = 1;
1153 asym_pause = 0;
1154 break;
1155 case EMAC_MR1_APP:
1156 pause = 0;
1157 asym_pause = 1;
1158 break;
1159 default:
1160 pause = asym_pause = 0;
1162 return speed != dev->phy.speed || duplex != dev->phy.duplex ||
1163 pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;
1165 #endif
1167 static void emac_link_timer(struct work_struct *work)
1169 struct emac_instance *dev =
1170 container_of((struct delayed_work *)work,
1171 struct emac_instance, link_work);
1172 int link_poll_interval;
1174 mutex_lock(&dev->link_lock);
1175 DBG2(dev, "link timer" NL);
1177 if (!dev->opened)
1178 goto bail;
1180 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1181 if (!netif_carrier_ok(dev->ndev)) {
1182 /* Get new link parameters */
1183 dev->phy.def->ops->read_link(&dev->phy);
1185 netif_carrier_on(dev->ndev);
1186 emac_netif_stop(dev);
1187 emac_full_tx_reset(dev);
1188 emac_netif_start(dev);
1189 emac_print_link_status(dev);
1191 link_poll_interval = PHY_POLL_LINK_ON;
1192 } else {
1193 if (netif_carrier_ok(dev->ndev)) {
1194 netif_carrier_off(dev->ndev);
1195 netif_tx_disable(dev->ndev);
1196 emac_reinitialize(dev);
1197 emac_print_link_status(dev);
1199 link_poll_interval = PHY_POLL_LINK_OFF;
1201 schedule_delayed_work(&dev->link_work, link_poll_interval);
1202 bail:
1203 mutex_unlock(&dev->link_lock);
1206 static void emac_force_link_update(struct emac_instance *dev)
1208 netif_carrier_off(dev->ndev);
1209 smp_rmb();
1210 if (dev->link_polling) {
1211 cancel_rearming_delayed_work(&dev->link_work);
1212 if (dev->link_polling)
1213 schedule_delayed_work(&dev->link_work, PHY_POLL_LINK_OFF);
1217 /* Process ctx, rtnl_lock semaphore */
1218 static int emac_close(struct net_device *ndev)
1220 struct emac_instance *dev = netdev_priv(ndev);
1222 DBG(dev, "close" NL);
1224 if (dev->phy.address >= 0) {
1225 dev->link_polling = 0;
1226 cancel_rearming_delayed_work(&dev->link_work);
1228 mutex_lock(&dev->link_lock);
1229 emac_netif_stop(dev);
1230 dev->opened = 0;
1231 mutex_unlock(&dev->link_lock);
1233 emac_rx_disable(dev);
1234 emac_tx_disable(dev);
1235 mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
1236 mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
1237 mal_poll_del(dev->mal, &dev->commac);
1239 emac_clean_tx_ring(dev);
1240 emac_clean_rx_ring(dev);
1242 free_irq(dev->emac_irq, dev);
1244 return 0;
1247 static inline u16 emac_tx_csum(struct emac_instance *dev,
1248 struct sk_buff *skb)
1250 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
1251 (skb->ip_summed == CHECKSUM_PARTIAL)) {
1252 ++dev->stats.tx_packets_csum;
1253 return EMAC_TX_CTRL_TAH_CSUM;
1255 return 0;
1258 static inline int emac_xmit_finish(struct emac_instance *dev, int len)
1260 struct emac_regs __iomem *p = dev->emacp;
1261 struct net_device *ndev = dev->ndev;
1263 /* Send the packet out. If the if makes a significant perf
1264 * difference, then we can store the TMR0 value in "dev"
1265 * instead
1267 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
1268 out_be32(&p->tmr0, EMAC4_TMR0_XMIT);
1269 else
1270 out_be32(&p->tmr0, EMAC_TMR0_XMIT);
1272 if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {
1273 netif_stop_queue(ndev);
1274 DBG2(dev, "stopped TX queue" NL);
1277 ndev->trans_start = jiffies;
1278 ++dev->stats.tx_packets;
1279 dev->stats.tx_bytes += len;
1281 return 0;
1284 /* Tx lock BH */
1285 static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1287 struct emac_instance *dev = netdev_priv(ndev);
1288 unsigned int len = skb->len;
1289 int slot;
1291 u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1292 MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
1294 slot = dev->tx_slot++;
1295 if (dev->tx_slot == NUM_TX_BUFF) {
1296 dev->tx_slot = 0;
1297 ctrl |= MAL_TX_CTRL_WRAP;
1300 DBG2(dev, "xmit(%u) %d" NL, len, slot);
1302 dev->tx_skb[slot] = skb;
1303 dev->tx_desc[slot].data_ptr = dma_map_single(&dev->ofdev->dev,
1304 skb->data, len,
1305 DMA_TO_DEVICE);
1306 dev->tx_desc[slot].data_len = (u16) len;
1307 wmb();
1308 dev->tx_desc[slot].ctrl = ctrl;
1310 return emac_xmit_finish(dev, len);
1313 static inline int emac_xmit_split(struct emac_instance *dev, int slot,
1314 u32 pd, int len, int last, u16 base_ctrl)
1316 while (1) {
1317 u16 ctrl = base_ctrl;
1318 int chunk = min(len, MAL_MAX_TX_SIZE);
1319 len -= chunk;
1321 slot = (slot + 1) % NUM_TX_BUFF;
1323 if (last && !len)
1324 ctrl |= MAL_TX_CTRL_LAST;
1325 if (slot == NUM_TX_BUFF - 1)
1326 ctrl |= MAL_TX_CTRL_WRAP;
1328 dev->tx_skb[slot] = NULL;
1329 dev->tx_desc[slot].data_ptr = pd;
1330 dev->tx_desc[slot].data_len = (u16) chunk;
1331 dev->tx_desc[slot].ctrl = ctrl;
1332 ++dev->tx_cnt;
1334 if (!len)
1335 break;
1337 pd += chunk;
1339 return slot;
1342 /* Tx lock BH disabled (SG version for TAH equipped EMACs) */
1343 static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
1345 struct emac_instance *dev = netdev_priv(ndev);
1346 int nr_frags = skb_shinfo(skb)->nr_frags;
1347 int len = skb->len, chunk;
1348 int slot, i;
1349 u16 ctrl;
1350 u32 pd;
1352 /* This is common "fast" path */
1353 if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
1354 return emac_start_xmit(skb, ndev);
1356 len -= skb->data_len;
1358 /* Note, this is only an *estimation*, we can still run out of empty
1359 * slots because of the additional fragmentation into
1360 * MAL_MAX_TX_SIZE-sized chunks
1362 if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))
1363 goto stop_queue;
1365 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1366 emac_tx_csum(dev, skb);
1367 slot = dev->tx_slot;
1369 /* skb data */
1370 dev->tx_skb[slot] = NULL;
1371 chunk = min(len, MAL_MAX_TX_SIZE);
1372 dev->tx_desc[slot].data_ptr = pd =
1373 dma_map_single(&dev->ofdev->dev, skb->data, len, DMA_TO_DEVICE);
1374 dev->tx_desc[slot].data_len = (u16) chunk;
1375 len -= chunk;
1376 if (unlikely(len))
1377 slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,
1378 ctrl);
1379 /* skb fragments */
1380 for (i = 0; i < nr_frags; ++i) {
1381 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1382 len = frag->size;
1384 if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
1385 goto undo_frame;
1387 pd = dma_map_page(&dev->ofdev->dev, frag->page, frag->page_offset, len,
1388 DMA_TO_DEVICE);
1390 slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,
1391 ctrl);
1394 DBG2(dev, "xmit_sg(%u) %d - %d" NL, skb->len, dev->tx_slot, slot);
1396 /* Attach skb to the last slot so we don't release it too early */
1397 dev->tx_skb[slot] = skb;
1399 /* Send the packet out */
1400 if (dev->tx_slot == NUM_TX_BUFF - 1)
1401 ctrl |= MAL_TX_CTRL_WRAP;
1402 wmb();
1403 dev->tx_desc[dev->tx_slot].ctrl = ctrl;
1404 dev->tx_slot = (slot + 1) % NUM_TX_BUFF;
1406 return emac_xmit_finish(dev, skb->len);
1408 undo_frame:
1409 /* Well, too bad. Our previous estimation was overly optimistic.
1410 * Undo everything.
1412 while (slot != dev->tx_slot) {
1413 dev->tx_desc[slot].ctrl = 0;
1414 --dev->tx_cnt;
1415 if (--slot < 0)
1416 slot = NUM_TX_BUFF - 1;
1418 ++dev->estats.tx_undo;
1420 stop_queue:
1421 netif_stop_queue(ndev);
1422 DBG2(dev, "stopped TX queue" NL);
1423 return 1;
1426 /* Tx lock BHs */
1427 static void emac_parse_tx_error(struct emac_instance *dev, u16 ctrl)
1429 struct emac_error_stats *st = &dev->estats;
1431 DBG(dev, "BD TX error %04x" NL, ctrl);
1433 ++st->tx_bd_errors;
1434 if (ctrl & EMAC_TX_ST_BFCS)
1435 ++st->tx_bd_bad_fcs;
1436 if (ctrl & EMAC_TX_ST_LCS)
1437 ++st->tx_bd_carrier_loss;
1438 if (ctrl & EMAC_TX_ST_ED)
1439 ++st->tx_bd_excessive_deferral;
1440 if (ctrl & EMAC_TX_ST_EC)
1441 ++st->tx_bd_excessive_collisions;
1442 if (ctrl & EMAC_TX_ST_LC)
1443 ++st->tx_bd_late_collision;
1444 if (ctrl & EMAC_TX_ST_MC)
1445 ++st->tx_bd_multple_collisions;
1446 if (ctrl & EMAC_TX_ST_SC)
1447 ++st->tx_bd_single_collision;
1448 if (ctrl & EMAC_TX_ST_UR)
1449 ++st->tx_bd_underrun;
1450 if (ctrl & EMAC_TX_ST_SQE)
1451 ++st->tx_bd_sqe;
1454 static void emac_poll_tx(void *param)
1456 struct emac_instance *dev = param;
1457 u32 bad_mask;
1459 DBG2(dev, "poll_tx, %d %d" NL, dev->tx_cnt, dev->ack_slot);
1461 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
1462 bad_mask = EMAC_IS_BAD_TX_TAH;
1463 else
1464 bad_mask = EMAC_IS_BAD_TX;
1466 netif_tx_lock_bh(dev->ndev);
1467 if (dev->tx_cnt) {
1468 u16 ctrl;
1469 int slot = dev->ack_slot, n = 0;
1470 again:
1471 ctrl = dev->tx_desc[slot].ctrl;
1472 if (!(ctrl & MAL_TX_CTRL_READY)) {
1473 struct sk_buff *skb = dev->tx_skb[slot];
1474 ++n;
1476 if (skb) {
1477 dev_kfree_skb(skb);
1478 dev->tx_skb[slot] = NULL;
1480 slot = (slot + 1) % NUM_TX_BUFF;
1482 if (unlikely(ctrl & bad_mask))
1483 emac_parse_tx_error(dev, ctrl);
1485 if (--dev->tx_cnt)
1486 goto again;
1488 if (n) {
1489 dev->ack_slot = slot;
1490 if (netif_queue_stopped(dev->ndev) &&
1491 dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
1492 netif_wake_queue(dev->ndev);
1494 DBG2(dev, "tx %d pkts" NL, n);
1497 netif_tx_unlock_bh(dev->ndev);
1500 static inline void emac_recycle_rx_skb(struct emac_instance *dev, int slot,
1501 int len)
1503 struct sk_buff *skb = dev->rx_skb[slot];
1505 DBG2(dev, "recycle %d %d" NL, slot, len);
1507 if (len)
1508 dma_map_single(&dev->ofdev->dev, skb->data - 2,
1509 EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);
1511 dev->rx_desc[slot].data_len = 0;
1512 wmb();
1513 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1514 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1517 static void emac_parse_rx_error(struct emac_instance *dev, u16 ctrl)
1519 struct emac_error_stats *st = &dev->estats;
1521 DBG(dev, "BD RX error %04x" NL, ctrl);
1523 ++st->rx_bd_errors;
1524 if (ctrl & EMAC_RX_ST_OE)
1525 ++st->rx_bd_overrun;
1526 if (ctrl & EMAC_RX_ST_BP)
1527 ++st->rx_bd_bad_packet;
1528 if (ctrl & EMAC_RX_ST_RP)
1529 ++st->rx_bd_runt_packet;
1530 if (ctrl & EMAC_RX_ST_SE)
1531 ++st->rx_bd_short_event;
1532 if (ctrl & EMAC_RX_ST_AE)
1533 ++st->rx_bd_alignment_error;
1534 if (ctrl & EMAC_RX_ST_BFCS)
1535 ++st->rx_bd_bad_fcs;
1536 if (ctrl & EMAC_RX_ST_PTL)
1537 ++st->rx_bd_packet_too_long;
1538 if (ctrl & EMAC_RX_ST_ORE)
1539 ++st->rx_bd_out_of_range;
1540 if (ctrl & EMAC_RX_ST_IRE)
1541 ++st->rx_bd_in_range;
1544 static inline void emac_rx_csum(struct emac_instance *dev,
1545 struct sk_buff *skb, u16 ctrl)
1547 #ifdef CONFIG_IBM_NEW_EMAC_TAH
1548 if (!ctrl && dev->tah_dev) {
1549 skb->ip_summed = CHECKSUM_UNNECESSARY;
1550 ++dev->stats.rx_packets_csum;
1552 #endif
1555 static inline int emac_rx_sg_append(struct emac_instance *dev, int slot)
1557 if (likely(dev->rx_sg_skb != NULL)) {
1558 int len = dev->rx_desc[slot].data_len;
1559 int tot_len = dev->rx_sg_skb->len + len;
1561 if (unlikely(tot_len + 2 > dev->rx_skb_size)) {
1562 ++dev->estats.rx_dropped_mtu;
1563 dev_kfree_skb(dev->rx_sg_skb);
1564 dev->rx_sg_skb = NULL;
1565 } else {
1566 cacheable_memcpy(skb_tail_pointer(dev->rx_sg_skb),
1567 dev->rx_skb[slot]->data, len);
1568 skb_put(dev->rx_sg_skb, len);
1569 emac_recycle_rx_skb(dev, slot, len);
1570 return 0;
1573 emac_recycle_rx_skb(dev, slot, 0);
1574 return -1;
1577 /* NAPI poll context */
1578 static int emac_poll_rx(void *param, int budget)
1580 struct emac_instance *dev = param;
1581 int slot = dev->rx_slot, received = 0;
1583 DBG2(dev, "poll_rx(%d)" NL, budget);
1585 again:
1586 while (budget > 0) {
1587 int len;
1588 struct sk_buff *skb;
1589 u16 ctrl = dev->rx_desc[slot].ctrl;
1591 if (ctrl & MAL_RX_CTRL_EMPTY)
1592 break;
1594 skb = dev->rx_skb[slot];
1595 mb();
1596 len = dev->rx_desc[slot].data_len;
1598 if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))
1599 goto sg;
1601 ctrl &= EMAC_BAD_RX_MASK;
1602 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1603 emac_parse_rx_error(dev, ctrl);
1604 ++dev->estats.rx_dropped_error;
1605 emac_recycle_rx_skb(dev, slot, 0);
1606 len = 0;
1607 goto next;
1610 if (len && len < EMAC_RX_COPY_THRESH) {
1611 struct sk_buff *copy_skb =
1612 alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
1613 if (unlikely(!copy_skb))
1614 goto oom;
1616 skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
1617 cacheable_memcpy(copy_skb->data - 2, skb->data - 2,
1618 len + 2);
1619 emac_recycle_rx_skb(dev, slot, len);
1620 skb = copy_skb;
1621 } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC)))
1622 goto oom;
1624 skb_put(skb, len);
1625 push_packet:
1626 skb->dev = dev->ndev;
1627 skb->protocol = eth_type_trans(skb, dev->ndev);
1628 emac_rx_csum(dev, skb, ctrl);
1630 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
1631 ++dev->estats.rx_dropped_stack;
1632 next:
1633 ++dev->stats.rx_packets;
1634 skip:
1635 dev->stats.rx_bytes += len;
1636 slot = (slot + 1) % NUM_RX_BUFF;
1637 --budget;
1638 ++received;
1639 continue;
1641 if (ctrl & MAL_RX_CTRL_FIRST) {
1642 BUG_ON(dev->rx_sg_skb);
1643 if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) {
1644 DBG(dev, "rx OOM %d" NL, slot);
1645 ++dev->estats.rx_dropped_oom;
1646 emac_recycle_rx_skb(dev, slot, 0);
1647 } else {
1648 dev->rx_sg_skb = skb;
1649 skb_put(skb, len);
1651 } else if (!emac_rx_sg_append(dev, slot) &&
1652 (ctrl & MAL_RX_CTRL_LAST)) {
1654 skb = dev->rx_sg_skb;
1655 dev->rx_sg_skb = NULL;
1657 ctrl &= EMAC_BAD_RX_MASK;
1658 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1659 emac_parse_rx_error(dev, ctrl);
1660 ++dev->estats.rx_dropped_error;
1661 dev_kfree_skb(skb);
1662 len = 0;
1663 } else
1664 goto push_packet;
1666 goto skip;
1667 oom:
1668 DBG(dev, "rx OOM %d" NL, slot);
1669 /* Drop the packet and recycle skb */
1670 ++dev->estats.rx_dropped_oom;
1671 emac_recycle_rx_skb(dev, slot, 0);
1672 goto next;
1675 if (received) {
1676 DBG2(dev, "rx %d BDs" NL, received);
1677 dev->rx_slot = slot;
1680 if (unlikely(budget && test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags))) {
1681 mb();
1682 if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {
1683 DBG2(dev, "rx restart" NL);
1684 received = 0;
1685 goto again;
1688 if (dev->rx_sg_skb) {
1689 DBG2(dev, "dropping partial rx packet" NL);
1690 ++dev->estats.rx_dropped_error;
1691 dev_kfree_skb(dev->rx_sg_skb);
1692 dev->rx_sg_skb = NULL;
1695 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1696 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1697 emac_rx_enable(dev);
1698 dev->rx_slot = 0;
1700 return received;
1703 /* NAPI poll context */
1704 static int emac_peek_rx(void *param)
1706 struct emac_instance *dev = param;
1708 return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);
1711 /* NAPI poll context */
1712 static int emac_peek_rx_sg(void *param)
1714 struct emac_instance *dev = param;
1716 int slot = dev->rx_slot;
1717 while (1) {
1718 u16 ctrl = dev->rx_desc[slot].ctrl;
1719 if (ctrl & MAL_RX_CTRL_EMPTY)
1720 return 0;
1721 else if (ctrl & MAL_RX_CTRL_LAST)
1722 return 1;
1724 slot = (slot + 1) % NUM_RX_BUFF;
1726 /* I'm just being paranoid here :) */
1727 if (unlikely(slot == dev->rx_slot))
1728 return 0;
1732 /* Hard IRQ */
1733 static void emac_rxde(void *param)
1735 struct emac_instance *dev = param;
1737 ++dev->estats.rx_stopped;
1738 emac_rx_disable_async(dev);
1741 /* Hard IRQ */
1742 static irqreturn_t emac_irq(int irq, void *dev_instance)
1744 struct emac_instance *dev = dev_instance;
1745 struct emac_regs __iomem *p = dev->emacp;
1746 struct emac_error_stats *st = &dev->estats;
1747 u32 isr;
1749 spin_lock(&dev->lock);
1751 isr = in_be32(&p->isr);
1752 out_be32(&p->isr, isr);
1754 DBG(dev, "isr = %08x" NL, isr);
1756 if (isr & EMAC4_ISR_TXPE)
1757 ++st->tx_parity;
1758 if (isr & EMAC4_ISR_RXPE)
1759 ++st->rx_parity;
1760 if (isr & EMAC4_ISR_TXUE)
1761 ++st->tx_underrun;
1762 if (isr & EMAC4_ISR_RXOE)
1763 ++st->rx_fifo_overrun;
1764 if (isr & EMAC_ISR_OVR)
1765 ++st->rx_overrun;
1766 if (isr & EMAC_ISR_BP)
1767 ++st->rx_bad_packet;
1768 if (isr & EMAC_ISR_RP)
1769 ++st->rx_runt_packet;
1770 if (isr & EMAC_ISR_SE)
1771 ++st->rx_short_event;
1772 if (isr & EMAC_ISR_ALE)
1773 ++st->rx_alignment_error;
1774 if (isr & EMAC_ISR_BFCS)
1775 ++st->rx_bad_fcs;
1776 if (isr & EMAC_ISR_PTLE)
1777 ++st->rx_packet_too_long;
1778 if (isr & EMAC_ISR_ORE)
1779 ++st->rx_out_of_range;
1780 if (isr & EMAC_ISR_IRE)
1781 ++st->rx_in_range;
1782 if (isr & EMAC_ISR_SQE)
1783 ++st->tx_sqe;
1784 if (isr & EMAC_ISR_TE)
1785 ++st->tx_errors;
1787 spin_unlock(&dev->lock);
1789 return IRQ_HANDLED;
1792 static struct net_device_stats *emac_stats(struct net_device *ndev)
1794 struct emac_instance *dev = netdev_priv(ndev);
1795 struct emac_stats *st = &dev->stats;
1796 struct emac_error_stats *est = &dev->estats;
1797 struct net_device_stats *nst = &dev->nstats;
1798 unsigned long flags;
1800 DBG2(dev, "stats" NL);
1802 /* Compute "legacy" statistics */
1803 spin_lock_irqsave(&dev->lock, flags);
1804 nst->rx_packets = (unsigned long)st->rx_packets;
1805 nst->rx_bytes = (unsigned long)st->rx_bytes;
1806 nst->tx_packets = (unsigned long)st->tx_packets;
1807 nst->tx_bytes = (unsigned long)st->tx_bytes;
1808 nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +
1809 est->rx_dropped_error +
1810 est->rx_dropped_resize +
1811 est->rx_dropped_mtu);
1812 nst->tx_dropped = (unsigned long)est->tx_dropped;
1814 nst->rx_errors = (unsigned long)est->rx_bd_errors;
1815 nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +
1816 est->rx_fifo_overrun +
1817 est->rx_overrun);
1818 nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +
1819 est->rx_alignment_error);
1820 nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +
1821 est->rx_bad_fcs);
1822 nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +
1823 est->rx_bd_short_event +
1824 est->rx_bd_packet_too_long +
1825 est->rx_bd_out_of_range +
1826 est->rx_bd_in_range +
1827 est->rx_runt_packet +
1828 est->rx_short_event +
1829 est->rx_packet_too_long +
1830 est->rx_out_of_range +
1831 est->rx_in_range);
1833 nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);
1834 nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +
1835 est->tx_underrun);
1836 nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;
1837 nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +
1838 est->tx_bd_excessive_collisions +
1839 est->tx_bd_late_collision +
1840 est->tx_bd_multple_collisions);
1841 spin_unlock_irqrestore(&dev->lock, flags);
1842 return nst;
1845 static struct mal_commac_ops emac_commac_ops = {
1846 .poll_tx = &emac_poll_tx,
1847 .poll_rx = &emac_poll_rx,
1848 .peek_rx = &emac_peek_rx,
1849 .rxde = &emac_rxde,
1852 static struct mal_commac_ops emac_commac_sg_ops = {
1853 .poll_tx = &emac_poll_tx,
1854 .poll_rx = &emac_poll_rx,
1855 .peek_rx = &emac_peek_rx_sg,
1856 .rxde = &emac_rxde,
1859 /* Ethtool support */
1860 static int emac_ethtool_get_settings(struct net_device *ndev,
1861 struct ethtool_cmd *cmd)
1863 struct emac_instance *dev = netdev_priv(ndev);
1865 cmd->supported = dev->phy.features;
1866 cmd->port = PORT_MII;
1867 cmd->phy_address = dev->phy.address;
1868 cmd->transceiver =
1869 dev->phy.address >= 0 ? XCVR_EXTERNAL : XCVR_INTERNAL;
1871 mutex_lock(&dev->link_lock);
1872 cmd->advertising = dev->phy.advertising;
1873 cmd->autoneg = dev->phy.autoneg;
1874 cmd->speed = dev->phy.speed;
1875 cmd->duplex = dev->phy.duplex;
1876 mutex_unlock(&dev->link_lock);
1878 return 0;
1881 static int emac_ethtool_set_settings(struct net_device *ndev,
1882 struct ethtool_cmd *cmd)
1884 struct emac_instance *dev = netdev_priv(ndev);
1885 u32 f = dev->phy.features;
1887 DBG(dev, "set_settings(%d, %d, %d, 0x%08x)" NL,
1888 cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);
1890 /* Basic sanity checks */
1891 if (dev->phy.address < 0)
1892 return -EOPNOTSUPP;
1893 if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
1894 return -EINVAL;
1895 if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
1896 return -EINVAL;
1897 if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
1898 return -EINVAL;
1900 if (cmd->autoneg == AUTONEG_DISABLE) {
1901 switch (cmd->speed) {
1902 case SPEED_10:
1903 if (cmd->duplex == DUPLEX_HALF
1904 && !(f & SUPPORTED_10baseT_Half))
1905 return -EINVAL;
1906 if (cmd->duplex == DUPLEX_FULL
1907 && !(f & SUPPORTED_10baseT_Full))
1908 return -EINVAL;
1909 break;
1910 case SPEED_100:
1911 if (cmd->duplex == DUPLEX_HALF
1912 && !(f & SUPPORTED_100baseT_Half))
1913 return -EINVAL;
1914 if (cmd->duplex == DUPLEX_FULL
1915 && !(f & SUPPORTED_100baseT_Full))
1916 return -EINVAL;
1917 break;
1918 case SPEED_1000:
1919 if (cmd->duplex == DUPLEX_HALF
1920 && !(f & SUPPORTED_1000baseT_Half))
1921 return -EINVAL;
1922 if (cmd->duplex == DUPLEX_FULL
1923 && !(f & SUPPORTED_1000baseT_Full))
1924 return -EINVAL;
1925 break;
1926 default:
1927 return -EINVAL;
1930 mutex_lock(&dev->link_lock);
1931 dev->phy.def->ops->setup_forced(&dev->phy, cmd->speed,
1932 cmd->duplex);
1933 mutex_unlock(&dev->link_lock);
1935 } else {
1936 if (!(f & SUPPORTED_Autoneg))
1937 return -EINVAL;
1939 mutex_lock(&dev->link_lock);
1940 dev->phy.def->ops->setup_aneg(&dev->phy,
1941 (cmd->advertising & f) |
1942 (dev->phy.advertising &
1943 (ADVERTISED_Pause |
1944 ADVERTISED_Asym_Pause)));
1945 mutex_unlock(&dev->link_lock);
1947 emac_force_link_update(dev);
1949 return 0;
1952 static void emac_ethtool_get_ringparam(struct net_device *ndev,
1953 struct ethtool_ringparam *rp)
1955 rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
1956 rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
1959 static void emac_ethtool_get_pauseparam(struct net_device *ndev,
1960 struct ethtool_pauseparam *pp)
1962 struct emac_instance *dev = netdev_priv(ndev);
1964 mutex_lock(&dev->link_lock);
1965 if ((dev->phy.features & SUPPORTED_Autoneg) &&
1966 (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause)))
1967 pp->autoneg = 1;
1969 if (dev->phy.duplex == DUPLEX_FULL) {
1970 if (dev->phy.pause)
1971 pp->rx_pause = pp->tx_pause = 1;
1972 else if (dev->phy.asym_pause)
1973 pp->tx_pause = 1;
1975 mutex_unlock(&dev->link_lock);
1978 static u32 emac_ethtool_get_rx_csum(struct net_device *ndev)
1980 struct emac_instance *dev = netdev_priv(ndev);
1982 return dev->tah_dev != NULL;
1985 static int emac_get_regs_len(struct emac_instance *dev)
1987 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
1988 return sizeof(struct emac_ethtool_regs_subhdr) +
1989 EMAC4_ETHTOOL_REGS_SIZE;
1990 else
1991 return sizeof(struct emac_ethtool_regs_subhdr) +
1992 EMAC_ETHTOOL_REGS_SIZE;
1995 static int emac_ethtool_get_regs_len(struct net_device *ndev)
1997 struct emac_instance *dev = netdev_priv(ndev);
1998 int size;
2000 size = sizeof(struct emac_ethtool_regs_hdr) +
2001 emac_get_regs_len(dev) + mal_get_regs_len(dev->mal);
2002 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2003 size += zmii_get_regs_len(dev->zmii_dev);
2004 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2005 size += rgmii_get_regs_len(dev->rgmii_dev);
2006 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2007 size += tah_get_regs_len(dev->tah_dev);
2009 return size;
2012 static void *emac_dump_regs(struct emac_instance *dev, void *buf)
2014 struct emac_ethtool_regs_subhdr *hdr = buf;
2016 hdr->index = dev->cell_index;
2017 if (emac_has_feature(dev, EMAC_FTR_EMAC4)) {
2018 hdr->version = EMAC4_ETHTOOL_REGS_VER;
2019 memcpy_fromio(hdr + 1, dev->emacp, EMAC4_ETHTOOL_REGS_SIZE);
2020 return ((void *)(hdr + 1) + EMAC4_ETHTOOL_REGS_SIZE);
2021 } else {
2022 hdr->version = EMAC_ETHTOOL_REGS_VER;
2023 memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE);
2024 return ((void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE);
2028 static void emac_ethtool_get_regs(struct net_device *ndev,
2029 struct ethtool_regs *regs, void *buf)
2031 struct emac_instance *dev = netdev_priv(ndev);
2032 struct emac_ethtool_regs_hdr *hdr = buf;
2034 hdr->components = 0;
2035 buf = hdr + 1;
2037 buf = mal_dump_regs(dev->mal, buf);
2038 buf = emac_dump_regs(dev, buf);
2039 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) {
2040 hdr->components |= EMAC_ETHTOOL_REGS_ZMII;
2041 buf = zmii_dump_regs(dev->zmii_dev, buf);
2043 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) {
2044 hdr->components |= EMAC_ETHTOOL_REGS_RGMII;
2045 buf = rgmii_dump_regs(dev->rgmii_dev, buf);
2047 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) {
2048 hdr->components |= EMAC_ETHTOOL_REGS_TAH;
2049 buf = tah_dump_regs(dev->tah_dev, buf);
2053 static int emac_ethtool_nway_reset(struct net_device *ndev)
2055 struct emac_instance *dev = netdev_priv(ndev);
2056 int res = 0;
2058 DBG(dev, "nway_reset" NL);
2060 if (dev->phy.address < 0)
2061 return -EOPNOTSUPP;
2063 mutex_lock(&dev->link_lock);
2064 if (!dev->phy.autoneg) {
2065 res = -EINVAL;
2066 goto out;
2069 dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising);
2070 out:
2071 mutex_unlock(&dev->link_lock);
2072 emac_force_link_update(dev);
2073 return res;
2076 static int emac_ethtool_get_stats_count(struct net_device *ndev)
2078 return EMAC_ETHTOOL_STATS_COUNT;
2081 static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
2082 u8 * buf)
2084 if (stringset == ETH_SS_STATS)
2085 memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys));
2088 static void emac_ethtool_get_ethtool_stats(struct net_device *ndev,
2089 struct ethtool_stats *estats,
2090 u64 * tmp_stats)
2092 struct emac_instance *dev = netdev_priv(ndev);
2094 memcpy(tmp_stats, &dev->stats, sizeof(dev->stats));
2095 tmp_stats += sizeof(dev->stats) / sizeof(u64);
2096 memcpy(tmp_stats, &dev->estats, sizeof(dev->estats));
2099 static void emac_ethtool_get_drvinfo(struct net_device *ndev,
2100 struct ethtool_drvinfo *info)
2102 struct emac_instance *dev = netdev_priv(ndev);
2104 strcpy(info->driver, "ibm_emac");
2105 strcpy(info->version, DRV_VERSION);
2106 info->fw_version[0] = '\0';
2107 sprintf(info->bus_info, "PPC 4xx EMAC-%d %s",
2108 dev->cell_index, dev->ofdev->node->full_name);
2109 info->n_stats = emac_ethtool_get_stats_count(ndev);
2110 info->regdump_len = emac_ethtool_get_regs_len(ndev);
2113 static const struct ethtool_ops emac_ethtool_ops = {
2114 .get_settings = emac_ethtool_get_settings,
2115 .set_settings = emac_ethtool_set_settings,
2116 .get_drvinfo = emac_ethtool_get_drvinfo,
2118 .get_regs_len = emac_ethtool_get_regs_len,
2119 .get_regs = emac_ethtool_get_regs,
2121 .nway_reset = emac_ethtool_nway_reset,
2123 .get_ringparam = emac_ethtool_get_ringparam,
2124 .get_pauseparam = emac_ethtool_get_pauseparam,
2126 .get_rx_csum = emac_ethtool_get_rx_csum,
2128 .get_strings = emac_ethtool_get_strings,
2129 .get_stats_count = emac_ethtool_get_stats_count,
2130 .get_ethtool_stats = emac_ethtool_get_ethtool_stats,
2132 .get_link = ethtool_op_get_link,
2133 .get_tx_csum = ethtool_op_get_tx_csum,
2134 .get_sg = ethtool_op_get_sg,
2137 static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2139 struct emac_instance *dev = netdev_priv(ndev);
2140 uint16_t *data = (uint16_t *) & rq->ifr_ifru;
2142 DBG(dev, "ioctl %08x" NL, cmd);
2144 if (dev->phy.address < 0)
2145 return -EOPNOTSUPP;
2147 switch (cmd) {
2148 case SIOCGMIIPHY:
2149 case SIOCDEVPRIVATE:
2150 data[0] = dev->phy.address;
2151 /* Fall through */
2152 case SIOCGMIIREG:
2153 case SIOCDEVPRIVATE + 1:
2154 data[3] = emac_mdio_read(ndev, dev->phy.address, data[1]);
2155 return 0;
2157 case SIOCSMIIREG:
2158 case SIOCDEVPRIVATE + 2:
2159 if (!capable(CAP_NET_ADMIN))
2160 return -EPERM;
2161 emac_mdio_write(ndev, dev->phy.address, data[1], data[2]);
2162 return 0;
2163 default:
2164 return -EOPNOTSUPP;
2168 struct emac_depentry {
2169 u32 phandle;
2170 struct device_node *node;
2171 struct of_device *ofdev;
2172 void *drvdata;
2175 #define EMAC_DEP_MAL_IDX 0
2176 #define EMAC_DEP_ZMII_IDX 1
2177 #define EMAC_DEP_RGMII_IDX 2
2178 #define EMAC_DEP_TAH_IDX 3
2179 #define EMAC_DEP_MDIO_IDX 4
2180 #define EMAC_DEP_PREV_IDX 5
2181 #define EMAC_DEP_COUNT 6
2183 static int __devinit emac_check_deps(struct emac_instance *dev,
2184 struct emac_depentry *deps)
2186 int i, there = 0;
2187 struct device_node *np;
2189 for (i = 0; i < EMAC_DEP_COUNT; i++) {
2190 /* no dependency on that item, allright */
2191 if (deps[i].phandle == 0) {
2192 there++;
2193 continue;
2195 /* special case for blist as the dependency might go away */
2196 if (i == EMAC_DEP_PREV_IDX) {
2197 np = *(dev->blist - 1);
2198 if (np == NULL) {
2199 deps[i].phandle = 0;
2200 there++;
2201 continue;
2203 if (deps[i].node == NULL)
2204 deps[i].node = of_node_get(np);
2206 if (deps[i].node == NULL)
2207 deps[i].node = of_find_node_by_phandle(deps[i].phandle);
2208 if (deps[i].node == NULL)
2209 continue;
2210 if (deps[i].ofdev == NULL)
2211 deps[i].ofdev = of_find_device_by_node(deps[i].node);
2212 if (deps[i].ofdev == NULL)
2213 continue;
2214 if (deps[i].drvdata == NULL)
2215 deps[i].drvdata = dev_get_drvdata(&deps[i].ofdev->dev);
2216 if (deps[i].drvdata != NULL)
2217 there++;
2219 return (there == EMAC_DEP_COUNT);
2222 static void emac_put_deps(struct emac_instance *dev)
2224 if (dev->mal_dev)
2225 of_dev_put(dev->mal_dev);
2226 if (dev->zmii_dev)
2227 of_dev_put(dev->zmii_dev);
2228 if (dev->rgmii_dev)
2229 of_dev_put(dev->rgmii_dev);
2230 if (dev->mdio_dev)
2231 of_dev_put(dev->mdio_dev);
2232 if (dev->tah_dev)
2233 of_dev_put(dev->tah_dev);
2236 static int __devinit emac_of_bus_notify(struct notifier_block *nb,
2237 unsigned long action, void *data)
2239 /* We are only intereted in device addition */
2240 if (action == BUS_NOTIFY_BOUND_DRIVER)
2241 wake_up_all(&emac_probe_wait);
2242 return 0;
2245 static struct notifier_block emac_of_bus_notifier __devinitdata = {
2246 .notifier_call = emac_of_bus_notify
2249 static int __devinit emac_wait_deps(struct emac_instance *dev)
2251 struct emac_depentry deps[EMAC_DEP_COUNT];
2252 int i, err;
2254 memset(&deps, 0, sizeof(deps));
2256 deps[EMAC_DEP_MAL_IDX].phandle = dev->mal_ph;
2257 deps[EMAC_DEP_ZMII_IDX].phandle = dev->zmii_ph;
2258 deps[EMAC_DEP_RGMII_IDX].phandle = dev->rgmii_ph;
2259 if (dev->tah_ph)
2260 deps[EMAC_DEP_TAH_IDX].phandle = dev->tah_ph;
2261 if (dev->mdio_ph)
2262 deps[EMAC_DEP_MDIO_IDX].phandle = dev->mdio_ph;
2263 if (dev->blist && dev->blist > emac_boot_list)
2264 deps[EMAC_DEP_PREV_IDX].phandle = 0xffffffffu;
2265 bus_register_notifier(&of_platform_bus_type, &emac_of_bus_notifier);
2266 wait_event_timeout(emac_probe_wait,
2267 emac_check_deps(dev, deps),
2268 EMAC_PROBE_DEP_TIMEOUT);
2269 bus_unregister_notifier(&of_platform_bus_type, &emac_of_bus_notifier);
2270 err = emac_check_deps(dev, deps) ? 0 : -ENODEV;
2271 for (i = 0; i < EMAC_DEP_COUNT; i++) {
2272 if (deps[i].node)
2273 of_node_put(deps[i].node);
2274 if (err && deps[i].ofdev)
2275 of_dev_put(deps[i].ofdev);
2277 if (err == 0) {
2278 dev->mal_dev = deps[EMAC_DEP_MAL_IDX].ofdev;
2279 dev->zmii_dev = deps[EMAC_DEP_ZMII_IDX].ofdev;
2280 dev->rgmii_dev = deps[EMAC_DEP_RGMII_IDX].ofdev;
2281 dev->tah_dev = deps[EMAC_DEP_TAH_IDX].ofdev;
2282 dev->mdio_dev = deps[EMAC_DEP_MDIO_IDX].ofdev;
2284 if (deps[EMAC_DEP_PREV_IDX].ofdev)
2285 of_dev_put(deps[EMAC_DEP_PREV_IDX].ofdev);
2286 return err;
2289 static int __devinit emac_read_uint_prop(struct device_node *np, const char *name,
2290 u32 *val, int fatal)
2292 int len;
2293 const u32 *prop = of_get_property(np, name, &len);
2294 if (prop == NULL || len < sizeof(u32)) {
2295 if (fatal)
2296 printk(KERN_ERR "%s: missing %s property\n",
2297 np->full_name, name);
2298 return -ENODEV;
2300 *val = *prop;
2301 return 0;
2304 static int __devinit emac_init_phy(struct emac_instance *dev)
2306 struct device_node *np = dev->ofdev->node;
2307 struct net_device *ndev = dev->ndev;
2308 u32 phy_map, adv;
2309 int i;
2311 dev->phy.dev = ndev;
2312 dev->phy.mode = dev->phy_mode;
2314 /* PHY-less configuration.
2315 * XXX I probably should move these settings to the dev tree
2317 if (dev->phy_address == 0xffffffff && dev->phy_map == 0xffffffff) {
2318 emac_reset(dev);
2320 /* PHY-less configuration.
2321 * XXX I probably should move these settings to the dev tree
2323 dev->phy.address = -1;
2324 dev->phy.features = SUPPORTED_100baseT_Full | SUPPORTED_MII;
2325 dev->phy.pause = 1;
2327 return 0;
2330 mutex_lock(&emac_phy_map_lock);
2331 phy_map = dev->phy_map | busy_phy_map;
2333 DBG(dev, "PHY maps %08x %08x" NL, dev->phy_map, busy_phy_map);
2335 dev->phy.mdio_read = emac_mdio_read;
2336 dev->phy.mdio_write = emac_mdio_write;
2338 /* Enable internal clock source */
2339 #ifdef CONFIG_PPC_DCR_NATIVE
2340 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2341 dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
2342 #endif
2343 /* Configure EMAC with defaults so we can at least use MDIO
2344 * This is needed mostly for 440GX
2346 if (emac_phy_gpcs(dev->phy.mode)) {
2347 /* XXX
2348 * Make GPCS PHY address equal to EMAC index.
2349 * We probably should take into account busy_phy_map
2350 * and/or phy_map here.
2352 * Note that the busy_phy_map is currently global
2353 * while it should probably be per-ASIC...
2355 dev->phy.address = dev->cell_index;
2358 emac_configure(dev);
2360 if (dev->phy_address != 0xffffffff)
2361 phy_map = ~(1 << dev->phy_address);
2363 for (i = 0; i < 0x20; phy_map >>= 1, ++i)
2364 if (!(phy_map & 1)) {
2365 int r;
2366 busy_phy_map |= 1 << i;
2368 /* Quick check if there is a PHY at the address */
2369 r = emac_mdio_read(dev->ndev, i, MII_BMCR);
2370 if (r == 0xffff || r < 0)
2371 continue;
2372 if (!emac_mii_phy_probe(&dev->phy, i))
2373 break;
2376 /* Enable external clock source */
2377 #ifdef CONFIG_PPC_DCR_NATIVE
2378 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2379 dcri_clrset(SDR0, SDR0_MFR, SDR0_MFR_ECS, 0);
2380 #endif
2381 mutex_unlock(&emac_phy_map_lock);
2382 if (i == 0x20) {
2383 printk(KERN_WARNING "%s: can't find PHY!\n", np->full_name);
2384 return -ENXIO;
2387 /* Init PHY */
2388 if (dev->phy.def->ops->init)
2389 dev->phy.def->ops->init(&dev->phy);
2391 /* Disable any PHY features not supported by the platform */
2392 dev->phy.def->features &= ~dev->phy_feat_exc;
2394 /* Setup initial link parameters */
2395 if (dev->phy.features & SUPPORTED_Autoneg) {
2396 adv = dev->phy.features;
2397 if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x))
2398 adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
2399 /* Restart autonegotiation */
2400 dev->phy.def->ops->setup_aneg(&dev->phy, adv);
2401 } else {
2402 u32 f = dev->phy.def->features;
2403 int speed = SPEED_10, fd = DUPLEX_HALF;
2405 /* Select highest supported speed/duplex */
2406 if (f & SUPPORTED_1000baseT_Full) {
2407 speed = SPEED_1000;
2408 fd = DUPLEX_FULL;
2409 } else if (f & SUPPORTED_1000baseT_Half)
2410 speed = SPEED_1000;
2411 else if (f & SUPPORTED_100baseT_Full) {
2412 speed = SPEED_100;
2413 fd = DUPLEX_FULL;
2414 } else if (f & SUPPORTED_100baseT_Half)
2415 speed = SPEED_100;
2416 else if (f & SUPPORTED_10baseT_Full)
2417 fd = DUPLEX_FULL;
2419 /* Force link parameters */
2420 dev->phy.def->ops->setup_forced(&dev->phy, speed, fd);
2422 return 0;
2425 static int __devinit emac_init_config(struct emac_instance *dev)
2427 struct device_node *np = dev->ofdev->node;
2428 const void *p;
2429 unsigned int plen;
2430 const char *pm, *phy_modes[] = {
2431 [PHY_MODE_NA] = "",
2432 [PHY_MODE_MII] = "mii",
2433 [PHY_MODE_RMII] = "rmii",
2434 [PHY_MODE_SMII] = "smii",
2435 [PHY_MODE_RGMII] = "rgmii",
2436 [PHY_MODE_TBI] = "tbi",
2437 [PHY_MODE_GMII] = "gmii",
2438 [PHY_MODE_RTBI] = "rtbi",
2439 [PHY_MODE_SGMII] = "sgmii",
2442 /* Read config from device-tree */
2443 if (emac_read_uint_prop(np, "mal-device", &dev->mal_ph, 1))
2444 return -ENXIO;
2445 if (emac_read_uint_prop(np, "mal-tx-channel", &dev->mal_tx_chan, 1))
2446 return -ENXIO;
2447 if (emac_read_uint_prop(np, "mal-rx-channel", &dev->mal_rx_chan, 1))
2448 return -ENXIO;
2449 if (emac_read_uint_prop(np, "cell-index", &dev->cell_index, 1))
2450 return -ENXIO;
2451 if (emac_read_uint_prop(np, "max-frame-size", &dev->max_mtu, 0))
2452 dev->max_mtu = 1500;
2453 if (emac_read_uint_prop(np, "rx-fifo-size", &dev->rx_fifo_size, 0))
2454 dev->rx_fifo_size = 2048;
2455 if (emac_read_uint_prop(np, "tx-fifo-size", &dev->tx_fifo_size, 0))
2456 dev->tx_fifo_size = 2048;
2457 if (emac_read_uint_prop(np, "rx-fifo-size-gige", &dev->rx_fifo_size_gige, 0))
2458 dev->rx_fifo_size_gige = dev->rx_fifo_size;
2459 if (emac_read_uint_prop(np, "tx-fifo-size-gige", &dev->tx_fifo_size_gige, 0))
2460 dev->tx_fifo_size_gige = dev->tx_fifo_size;
2461 if (emac_read_uint_prop(np, "phy-address", &dev->phy_address, 0))
2462 dev->phy_address = 0xffffffff;
2463 if (emac_read_uint_prop(np, "phy-map", &dev->phy_map, 0))
2464 dev->phy_map = 0xffffffff;
2465 if (emac_read_uint_prop(np->parent, "clock-frequency", &dev->opb_bus_freq, 1))
2466 return -ENXIO;
2467 if (emac_read_uint_prop(np, "tah-device", &dev->tah_ph, 0))
2468 dev->tah_ph = 0;
2469 if (emac_read_uint_prop(np, "tah-channel", &dev->tah_port, 0))
2470 dev->tah_port = 0;
2471 if (emac_read_uint_prop(np, "mdio-device", &dev->mdio_ph, 0))
2472 dev->mdio_ph = 0;
2473 if (emac_read_uint_prop(np, "zmii-device", &dev->zmii_ph, 0))
2474 dev->zmii_ph = 0;;
2475 if (emac_read_uint_prop(np, "zmii-channel", &dev->zmii_port, 0))
2476 dev->zmii_port = 0xffffffff;;
2477 if (emac_read_uint_prop(np, "rgmii-device", &dev->rgmii_ph, 0))
2478 dev->rgmii_ph = 0;;
2479 if (emac_read_uint_prop(np, "rgmii-channel", &dev->rgmii_port, 0))
2480 dev->rgmii_port = 0xffffffff;;
2481 if (emac_read_uint_prop(np, "fifo-entry-size", &dev->fifo_entry_size, 0))
2482 dev->fifo_entry_size = 16;
2483 if (emac_read_uint_prop(np, "mal-burst-size", &dev->mal_burst_size, 0))
2484 dev->mal_burst_size = 256;
2486 /* PHY mode needs some decoding */
2487 dev->phy_mode = PHY_MODE_NA;
2488 pm = of_get_property(np, "phy-mode", &plen);
2489 if (pm != NULL) {
2490 int i;
2491 for (i = 0; i < ARRAY_SIZE(phy_modes); i++)
2492 if (!strcasecmp(pm, phy_modes[i])) {
2493 dev->phy_mode = i;
2494 break;
2498 /* Backward compat with non-final DT */
2499 if (dev->phy_mode == PHY_MODE_NA && pm != NULL && plen == 4) {
2500 u32 nmode = *(const u32 *)pm;
2501 if (nmode > PHY_MODE_NA && nmode <= PHY_MODE_SGMII)
2502 dev->phy_mode = nmode;
2505 /* Check EMAC version */
2506 if (of_device_is_compatible(np, "ibm,emac4")) {
2507 dev->features |= EMAC_FTR_EMAC4;
2508 if (of_device_is_compatible(np, "ibm,emac-440gx"))
2509 dev->features |= EMAC_FTR_440GX_PHY_CLK_FIX;
2512 /* Fixup some feature bits based on the device tree */
2513 if (of_get_property(np, "has-inverted-stacr-oc", NULL))
2514 dev->features |= EMAC_FTR_STACR_OC_INVERT;
2515 if (of_get_property(np, "has-new-stacr-staopc", NULL))
2516 dev->features |= EMAC_FTR_HAS_NEW_STACR;
2518 /* CAB lacks the appropriate properties */
2519 if (of_device_is_compatible(np, "ibm,emac-axon"))
2520 dev->features |= EMAC_FTR_HAS_NEW_STACR |
2521 EMAC_FTR_STACR_OC_INVERT;
2523 /* Enable TAH/ZMII/RGMII features as found */
2524 if (dev->tah_ph != 0) {
2525 #ifdef CONFIG_IBM_NEW_EMAC_TAH
2526 dev->features |= EMAC_FTR_HAS_TAH;
2527 #else
2528 printk(KERN_ERR "%s: TAH support not enabled !\n",
2529 np->full_name);
2530 return -ENXIO;
2531 #endif
2534 if (dev->zmii_ph != 0) {
2535 #ifdef CONFIG_IBM_NEW_EMAC_ZMII
2536 dev->features |= EMAC_FTR_HAS_ZMII;
2537 #else
2538 printk(KERN_ERR "%s: ZMII support not enabled !\n",
2539 np->full_name);
2540 return -ENXIO;
2541 #endif
2544 if (dev->rgmii_ph != 0) {
2545 #ifdef CONFIG_IBM_NEW_EMAC_RGMII
2546 dev->features |= EMAC_FTR_HAS_RGMII;
2547 #else
2548 printk(KERN_ERR "%s: RGMII support not enabled !\n",
2549 np->full_name);
2550 return -ENXIO;
2551 #endif
2554 /* Read MAC-address */
2555 p = of_get_property(np, "local-mac-address", NULL);
2556 if (p == NULL) {
2557 printk(KERN_ERR "%s: Can't find local-mac-address property\n",
2558 np->full_name);
2559 return -ENXIO;
2561 memcpy(dev->ndev->dev_addr, p, 6);
2563 DBG(dev, "features : 0x%08x / 0x%08x\n", dev->features, EMAC_FTRS_POSSIBLE);
2564 DBG(dev, "tx_fifo_size : %d (%d gige)\n", dev->tx_fifo_size, dev->tx_fifo_size_gige);
2565 DBG(dev, "rx_fifo_size : %d (%d gige)\n", dev->rx_fifo_size, dev->rx_fifo_size_gige);
2566 DBG(dev, "max_mtu : %d\n", dev->max_mtu);
2567 DBG(dev, "OPB freq : %d\n", dev->opb_bus_freq);
2569 return 0;
2572 static int __devinit emac_probe(struct of_device *ofdev,
2573 const struct of_device_id *match)
2575 struct net_device *ndev;
2576 struct emac_instance *dev;
2577 struct device_node *np = ofdev->node;
2578 struct device_node **blist = NULL;
2579 int err, i;
2581 /* Skip unused/unwired EMACS. We leave the check for an unused
2582 * property here for now, but new flat device trees should set a
2583 * status property to "disabled" instead.
2585 if (of_get_property(np, "unused", NULL) || !of_device_is_available(np))
2586 return -ENODEV;
2588 /* Find ourselves in the bootlist if we are there */
2589 for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
2590 if (emac_boot_list[i] == np)
2591 blist = &emac_boot_list[i];
2593 /* Allocate our net_device structure */
2594 err = -ENOMEM;
2595 ndev = alloc_etherdev(sizeof(struct emac_instance));
2596 if (!ndev) {
2597 printk(KERN_ERR "%s: could not allocate ethernet device!\n",
2598 np->full_name);
2599 goto err_gone;
2601 dev = netdev_priv(ndev);
2602 dev->ndev = ndev;
2603 dev->ofdev = ofdev;
2604 dev->blist = blist;
2605 SET_NETDEV_DEV(ndev, &ofdev->dev);
2607 /* Initialize some embedded data structures */
2608 mutex_init(&dev->mdio_lock);
2609 mutex_init(&dev->link_lock);
2610 spin_lock_init(&dev->lock);
2611 INIT_WORK(&dev->reset_work, emac_reset_work);
2613 /* Init various config data based on device-tree */
2614 err = emac_init_config(dev);
2615 if (err != 0)
2616 goto err_free;
2618 /* Get interrupts. EMAC irq is mandatory, WOL irq is optional */
2619 dev->emac_irq = irq_of_parse_and_map(np, 0);
2620 dev->wol_irq = irq_of_parse_and_map(np, 1);
2621 if (dev->emac_irq == NO_IRQ) {
2622 printk(KERN_ERR "%s: Can't map main interrupt\n", np->full_name);
2623 goto err_free;
2625 ndev->irq = dev->emac_irq;
2627 /* Map EMAC regs */
2628 if (of_address_to_resource(np, 0, &dev->rsrc_regs)) {
2629 printk(KERN_ERR "%s: Can't get registers address\n",
2630 np->full_name);
2631 goto err_irq_unmap;
2633 // TODO : request_mem_region
2634 dev->emacp = ioremap(dev->rsrc_regs.start, sizeof(struct emac_regs));
2635 if (dev->emacp == NULL) {
2636 printk(KERN_ERR "%s: Can't map device registers!\n",
2637 np->full_name);
2638 err = -ENOMEM;
2639 goto err_irq_unmap;
2642 /* Wait for dependent devices */
2643 err = emac_wait_deps(dev);
2644 if (err) {
2645 printk(KERN_ERR
2646 "%s: Timeout waiting for dependent devices\n",
2647 np->full_name);
2648 /* display more info about what's missing ? */
2649 goto err_reg_unmap;
2651 dev->mal = dev_get_drvdata(&dev->mal_dev->dev);
2652 if (dev->mdio_dev != NULL)
2653 dev->mdio_instance = dev_get_drvdata(&dev->mdio_dev->dev);
2655 /* Register with MAL */
2656 dev->commac.ops = &emac_commac_ops;
2657 dev->commac.dev = dev;
2658 dev->commac.tx_chan_mask = MAL_CHAN_MASK(dev->mal_tx_chan);
2659 dev->commac.rx_chan_mask = MAL_CHAN_MASK(dev->mal_rx_chan);
2660 err = mal_register_commac(dev->mal, &dev->commac);
2661 if (err) {
2662 printk(KERN_ERR "%s: failed to register with mal %s!\n",
2663 np->full_name, dev->mal_dev->node->full_name);
2664 goto err_rel_deps;
2666 dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
2667 dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
2669 /* Get pointers to BD rings */
2670 dev->tx_desc =
2671 dev->mal->bd_virt + mal_tx_bd_offset(dev->mal, dev->mal_tx_chan);
2672 dev->rx_desc =
2673 dev->mal->bd_virt + mal_rx_bd_offset(dev->mal, dev->mal_rx_chan);
2675 DBG(dev, "tx_desc %p" NL, dev->tx_desc);
2676 DBG(dev, "rx_desc %p" NL, dev->rx_desc);
2678 /* Clean rings */
2679 memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
2680 memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
2682 /* Attach to ZMII, if needed */
2683 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII) &&
2684 (err = zmii_attach(dev->zmii_dev, dev->zmii_port, &dev->phy_mode)) != 0)
2685 goto err_unreg_commac;
2687 /* Attach to RGMII, if needed */
2688 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII) &&
2689 (err = rgmii_attach(dev->rgmii_dev, dev->rgmii_port, dev->phy_mode)) != 0)
2690 goto err_detach_zmii;
2692 /* Attach to TAH, if needed */
2693 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
2694 (err = tah_attach(dev->tah_dev, dev->tah_port)) != 0)
2695 goto err_detach_rgmii;
2697 /* Set some link defaults before we can find out real parameters */
2698 dev->phy.speed = SPEED_100;
2699 dev->phy.duplex = DUPLEX_FULL;
2700 dev->phy.autoneg = AUTONEG_DISABLE;
2701 dev->phy.pause = dev->phy.asym_pause = 0;
2702 dev->stop_timeout = STOP_TIMEOUT_100;
2703 INIT_DELAYED_WORK(&dev->link_work, emac_link_timer);
2705 /* Find PHY if any */
2706 err = emac_init_phy(dev);
2707 if (err != 0)
2708 goto err_detach_tah;
2710 /* Fill in the driver function table */
2711 ndev->open = &emac_open;
2712 if (dev->tah_dev)
2713 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
2714 ndev->tx_timeout = &emac_tx_timeout;
2715 ndev->watchdog_timeo = 5 * HZ;
2716 ndev->stop = &emac_close;
2717 ndev->get_stats = &emac_stats;
2718 ndev->set_multicast_list = &emac_set_multicast_list;
2719 ndev->do_ioctl = &emac_ioctl;
2720 if (emac_phy_supports_gige(dev->phy_mode)) {
2721 ndev->hard_start_xmit = &emac_start_xmit_sg;
2722 ndev->change_mtu = &emac_change_mtu;
2723 dev->commac.ops = &emac_commac_sg_ops;
2724 } else {
2725 ndev->hard_start_xmit = &emac_start_xmit;
2727 SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
2729 netif_carrier_off(ndev);
2730 netif_stop_queue(ndev);
2732 err = register_netdev(ndev);
2733 if (err) {
2734 printk(KERN_ERR "%s: failed to register net device (%d)!\n",
2735 np->full_name, err);
2736 goto err_detach_tah;
2739 /* Set our drvdata last as we don't want them visible until we are
2740 * fully initialized
2742 wmb();
2743 dev_set_drvdata(&ofdev->dev, dev);
2745 /* There's a new kid in town ! Let's tell everybody */
2746 wake_up_all(&emac_probe_wait);
2749 printk(KERN_INFO
2750 "%s: EMAC-%d %s, MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
2751 ndev->name, dev->cell_index, np->full_name,
2752 ndev->dev_addr[0], ndev->dev_addr[1], ndev->dev_addr[2],
2753 ndev->dev_addr[3], ndev->dev_addr[4], ndev->dev_addr[5]);
2755 if (dev->phy.address >= 0)
2756 printk("%s: found %s PHY (0x%02x)\n", ndev->name,
2757 dev->phy.def->name, dev->phy.address);
2759 emac_dbg_register(dev);
2761 /* Life is good */
2762 return 0;
2764 /* I have a bad feeling about this ... */
2766 err_detach_tah:
2767 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2768 tah_detach(dev->tah_dev, dev->tah_port);
2769 err_detach_rgmii:
2770 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2771 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2772 err_detach_zmii:
2773 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2774 zmii_detach(dev->zmii_dev, dev->zmii_port);
2775 err_unreg_commac:
2776 mal_unregister_commac(dev->mal, &dev->commac);
2777 err_rel_deps:
2778 emac_put_deps(dev);
2779 err_reg_unmap:
2780 iounmap(dev->emacp);
2781 err_irq_unmap:
2782 if (dev->wol_irq != NO_IRQ)
2783 irq_dispose_mapping(dev->wol_irq);
2784 if (dev->emac_irq != NO_IRQ)
2785 irq_dispose_mapping(dev->emac_irq);
2786 err_free:
2787 kfree(ndev);
2788 err_gone:
2789 /* if we were on the bootlist, remove us as we won't show up and
2790 * wake up all waiters to notify them in case they were waiting
2791 * on us
2793 if (blist) {
2794 *blist = NULL;
2795 wake_up_all(&emac_probe_wait);
2797 return err;
2800 static int __devexit emac_remove(struct of_device *ofdev)
2802 struct emac_instance *dev = dev_get_drvdata(&ofdev->dev);
2804 DBG(dev, "remove" NL);
2806 dev_set_drvdata(&ofdev->dev, NULL);
2808 unregister_netdev(dev->ndev);
2810 flush_scheduled_work();
2812 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2813 tah_detach(dev->tah_dev, dev->tah_port);
2814 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2815 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2816 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2817 zmii_detach(dev->zmii_dev, dev->zmii_port);
2819 mal_unregister_commac(dev->mal, &dev->commac);
2820 emac_put_deps(dev);
2822 emac_dbg_unregister(dev);
2823 iounmap(dev->emacp);
2825 if (dev->wol_irq != NO_IRQ)
2826 irq_dispose_mapping(dev->wol_irq);
2827 if (dev->emac_irq != NO_IRQ)
2828 irq_dispose_mapping(dev->emac_irq);
2830 kfree(dev->ndev);
2832 return 0;
2835 /* XXX Features in here should be replaced by properties... */
2836 static struct of_device_id emac_match[] =
2839 .type = "network",
2840 .compatible = "ibm,emac",
2843 .type = "network",
2844 .compatible = "ibm,emac4",
2849 static struct of_platform_driver emac_driver = {
2850 .name = "emac",
2851 .match_table = emac_match,
2853 .probe = emac_probe,
2854 .remove = emac_remove,
2857 static void __init emac_make_bootlist(void)
2859 struct device_node *np = NULL;
2860 int j, max, i = 0, k;
2861 int cell_indices[EMAC_BOOT_LIST_SIZE];
2863 /* Collect EMACs */
2864 while((np = of_find_all_nodes(np)) != NULL) {
2865 const u32 *idx;
2867 if (of_match_node(emac_match, np) == NULL)
2868 continue;
2869 if (of_get_property(np, "unused", NULL))
2870 continue;
2871 idx = of_get_property(np, "cell-index", NULL);
2872 if (idx == NULL)
2873 continue;
2874 cell_indices[i] = *idx;
2875 emac_boot_list[i++] = of_node_get(np);
2876 if (i >= EMAC_BOOT_LIST_SIZE) {
2877 of_node_put(np);
2878 break;
2881 max = i;
2883 /* Bubble sort them (doh, what a creative algorithm :-) */
2884 for (i = 0; max > 1 && (i < (max - 1)); i++)
2885 for (j = i; j < max; j++) {
2886 if (cell_indices[i] > cell_indices[j]) {
2887 np = emac_boot_list[i];
2888 emac_boot_list[i] = emac_boot_list[j];
2889 emac_boot_list[j] = np;
2890 k = cell_indices[i];
2891 cell_indices[i] = cell_indices[j];
2892 cell_indices[j] = k;
2897 static int __init emac_init(void)
2899 int rc;
2901 printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
2903 /* Init debug stuff */
2904 emac_init_debug();
2906 /* Build EMAC boot list */
2907 emac_make_bootlist();
2909 /* Init submodules */
2910 rc = mal_init();
2911 if (rc)
2912 goto err;
2913 rc = zmii_init();
2914 if (rc)
2915 goto err_mal;
2916 rc = rgmii_init();
2917 if (rc)
2918 goto err_zmii;
2919 rc = tah_init();
2920 if (rc)
2921 goto err_rgmii;
2922 rc = of_register_platform_driver(&emac_driver);
2923 if (rc)
2924 goto err_tah;
2926 return 0;
2928 err_tah:
2929 tah_exit();
2930 err_rgmii:
2931 rgmii_exit();
2932 err_zmii:
2933 zmii_exit();
2934 err_mal:
2935 mal_exit();
2936 err:
2937 return rc;
2940 static void __exit emac_exit(void)
2942 int i;
2944 of_unregister_platform_driver(&emac_driver);
2946 tah_exit();
2947 rgmii_exit();
2948 zmii_exit();
2949 mal_exit();
2950 emac_fini_debug();
2952 /* Destroy EMAC boot list */
2953 for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
2954 if (emac_boot_list[i])
2955 of_node_put(emac_boot_list[i]);
2958 module_init(emac_init);
2959 module_exit(emac_exit);