ibm_emac: fix link speed detection change
[linux-2.6/linux-2.6-openrd.git] / drivers / net / ibm_emac / ibm_emac_core.c
blobf752e5fc65ba31d43a7267a5cb721275dbf0e89a
1 /*
2 * drivers/net/ibm_emac/ibm_emac_core.c
4 * Driver for PowerPC 4xx on-chip ethernet controller.
6 * Copyright (c) 2004, 2005 Zultys Technologies.
7 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
9 * Based on original work by
10 * Matt Porter <mporter@kernel.crashing.org>
11 * (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
12 * Armin Kuster <akuster@mvista.com>
13 * Johnnie Peters <jpeters@mvista.com>
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the
17 * Free Software Foundation; either version 2 of the License, or (at your
18 * option) any later version.
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/string.h>
25 #include <linux/errno.h>
26 #include <linux/interrupt.h>
27 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/types.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/crc32.h>
34 #include <linux/ethtool.h>
35 #include <linux/mii.h>
36 #include <linux/bitops.h>
38 #include <asm/processor.h>
39 #include <asm/io.h>
40 #include <asm/dma.h>
41 #include <asm/uaccess.h>
42 #include <asm/ocp.h>
44 #include "ibm_emac_core.h"
45 #include "ibm_emac_debug.h"
48 * Lack of dma_unmap_???? calls is intentional.
50 * API-correct usage requires additional support state information to be
51 * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
52 * EMAC design (e.g. TX buffer passed from network stack can be split into
53 * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
54 * maintaining such information will add additional overhead.
55 * Current DMA API implementation for 4xx processors only ensures cache coherency
56 * and dma_unmap_???? routines are empty and are likely to stay this way.
57 * I decided to omit dma_unmap_??? calls because I don't want to add additional
58 * complexity just for the sake of following some abstract API, when it doesn't
59 * add any real benefit to the driver. I understand that this decision maybe
60 * controversial, but I really tried to make code API-correct and efficient
61 * at the same time and didn't come up with code I liked :(. --ebs
64 #define DRV_NAME "emac"
65 #define DRV_VERSION "3.54"
66 #define DRV_DESC "PPC 4xx OCP EMAC driver"
68 MODULE_DESCRIPTION(DRV_DESC);
69 MODULE_AUTHOR
70 ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
71 MODULE_LICENSE("GPL");
73 /* minimum number of free TX descriptors required to wake up TX process */
74 #define EMAC_TX_WAKEUP_THRESH (NUM_TX_BUFF / 4)
76 /* If packet size is less than this number, we allocate small skb and copy packet
77 * contents into it instead of just sending original big skb up
79 #define EMAC_RX_COPY_THRESH CONFIG_IBM_EMAC_RX_COPY_THRESHOLD
81 /* Since multiple EMACs share MDIO lines in various ways, we need
82 * to avoid re-using the same PHY ID in cases where the arch didn't
83 * setup precise phy_map entries
85 static u32 busy_phy_map;
87 #if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX) && \
88 (defined(CONFIG_405EP) || defined(CONFIG_440EP) || defined(CONFIG_440GR))
89 /* 405EP has "EMAC to PHY Control Register" (CPC0_EPCTL) which can help us
90 * with PHY RX clock problem.
91 * 440EP/440GR has more sane SDR0_MFR register implementation than 440GX, which
92 * also allows controlling each EMAC clock
94 static inline void EMAC_RX_CLK_TX(int idx)
96 unsigned long flags;
97 local_irq_save(flags);
99 #if defined(CONFIG_405EP)
100 mtdcr(0xf3, mfdcr(0xf3) | (1 << idx));
101 #else /* CONFIG_440EP || CONFIG_440GR */
102 SDR_WRITE(DCRN_SDR_MFR, SDR_READ(DCRN_SDR_MFR) | (0x08000000 >> idx));
103 #endif
105 local_irq_restore(flags);
108 static inline void EMAC_RX_CLK_DEFAULT(int idx)
110 unsigned long flags;
111 local_irq_save(flags);
113 #if defined(CONFIG_405EP)
114 mtdcr(0xf3, mfdcr(0xf3) & ~(1 << idx));
115 #else /* CONFIG_440EP */
116 SDR_WRITE(DCRN_SDR_MFR, SDR_READ(DCRN_SDR_MFR) & ~(0x08000000 >> idx));
117 #endif
119 local_irq_restore(flags);
121 #else
122 #define EMAC_RX_CLK_TX(idx) ((void)0)
123 #define EMAC_RX_CLK_DEFAULT(idx) ((void)0)
124 #endif
126 #if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX) && defined(CONFIG_440GX)
127 /* We can switch Ethernet clock to the internal source through SDR0_MFR[ECS],
128 * unfortunately this is less flexible than 440EP case, because it's a global
129 * setting for all EMACs, therefore we do this clock trick only during probe.
131 #define EMAC_CLK_INTERNAL SDR_WRITE(DCRN_SDR_MFR, \
132 SDR_READ(DCRN_SDR_MFR) | 0x08000000)
133 #define EMAC_CLK_EXTERNAL SDR_WRITE(DCRN_SDR_MFR, \
134 SDR_READ(DCRN_SDR_MFR) & ~0x08000000)
135 #else
136 #define EMAC_CLK_INTERNAL ((void)0)
137 #define EMAC_CLK_EXTERNAL ((void)0)
138 #endif
140 /* I don't want to litter system log with timeout errors
141 * when we have brain-damaged PHY.
143 static inline void emac_report_timeout_error(struct ocp_enet_private *dev,
144 const char *error)
146 #if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX)
147 DBG("%d: %s" NL, dev->def->index, error);
148 #else
149 if (net_ratelimit())
150 printk(KERN_ERR "emac%d: %s\n", dev->def->index, error);
151 #endif
154 /* PHY polling intervals */
155 #define PHY_POLL_LINK_ON HZ
156 #define PHY_POLL_LINK_OFF (HZ / 5)
158 /* Graceful stop timeouts in us.
159 * We should allow up to 1 frame time (full-duplex, ignoring collisions)
161 #define STOP_TIMEOUT_10 1230
162 #define STOP_TIMEOUT_100 124
163 #define STOP_TIMEOUT_1000 13
164 #define STOP_TIMEOUT_1000_JUMBO 73
166 /* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
167 static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
168 "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
169 "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
170 "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
171 "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
172 "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
173 "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
174 "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
175 "rx_bad_packet", "rx_runt_packet", "rx_short_event",
176 "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
177 "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
178 "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
179 "tx_bd_excessive_collisions", "tx_bd_late_collision",
180 "tx_bd_multple_collisions", "tx_bd_single_collision",
181 "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
182 "tx_errors"
185 static irqreturn_t emac_irq(int irq, void *dev_instance);
186 static void emac_clean_tx_ring(struct ocp_enet_private *dev);
188 static inline int emac_phy_supports_gige(int phy_mode)
190 return phy_mode == PHY_MODE_GMII ||
191 phy_mode == PHY_MODE_RGMII ||
192 phy_mode == PHY_MODE_TBI ||
193 phy_mode == PHY_MODE_RTBI;
196 static inline int emac_phy_gpcs(int phy_mode)
198 return phy_mode == PHY_MODE_TBI ||
199 phy_mode == PHY_MODE_RTBI;
202 static inline void emac_tx_enable(struct ocp_enet_private *dev)
204 struct emac_regs __iomem *p = dev->emacp;
205 unsigned long flags;
206 u32 r;
208 local_irq_save(flags);
210 DBG("%d: tx_enable" NL, dev->def->index);
212 r = in_be32(&p->mr0);
213 if (!(r & EMAC_MR0_TXE))
214 out_be32(&p->mr0, r | EMAC_MR0_TXE);
215 local_irq_restore(flags);
218 static void emac_tx_disable(struct ocp_enet_private *dev)
220 struct emac_regs __iomem *p = dev->emacp;
221 unsigned long flags;
222 u32 r;
224 local_irq_save(flags);
226 DBG("%d: tx_disable" NL, dev->def->index);
228 r = in_be32(&p->mr0);
229 if (r & EMAC_MR0_TXE) {
230 int n = dev->stop_timeout;
231 out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
232 while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) {
233 udelay(1);
234 --n;
236 if (unlikely(!n))
237 emac_report_timeout_error(dev, "TX disable timeout");
239 local_irq_restore(flags);
242 static void emac_rx_enable(struct ocp_enet_private *dev)
244 struct emac_regs __iomem *p = dev->emacp;
245 unsigned long flags;
246 u32 r;
248 local_irq_save(flags);
249 if (unlikely(dev->commac.rx_stopped))
250 goto out;
252 DBG("%d: rx_enable" NL, dev->def->index);
254 r = in_be32(&p->mr0);
255 if (!(r & EMAC_MR0_RXE)) {
256 if (unlikely(!(r & EMAC_MR0_RXI))) {
257 /* Wait if previous async disable is still in progress */
258 int n = dev->stop_timeout;
259 while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
260 udelay(1);
261 --n;
263 if (unlikely(!n))
264 emac_report_timeout_error(dev,
265 "RX disable timeout");
267 out_be32(&p->mr0, r | EMAC_MR0_RXE);
269 out:
270 local_irq_restore(flags);
273 static void emac_rx_disable(struct ocp_enet_private *dev)
275 struct emac_regs __iomem *p = dev->emacp;
276 unsigned long flags;
277 u32 r;
279 local_irq_save(flags);
281 DBG("%d: rx_disable" NL, dev->def->index);
283 r = in_be32(&p->mr0);
284 if (r & EMAC_MR0_RXE) {
285 int n = dev->stop_timeout;
286 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
287 while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
288 udelay(1);
289 --n;
291 if (unlikely(!n))
292 emac_report_timeout_error(dev, "RX disable timeout");
294 local_irq_restore(flags);
297 static inline void emac_rx_disable_async(struct ocp_enet_private *dev)
299 struct emac_regs __iomem *p = dev->emacp;
300 unsigned long flags;
301 u32 r;
303 local_irq_save(flags);
305 DBG("%d: rx_disable_async" NL, dev->def->index);
307 r = in_be32(&p->mr0);
308 if (r & EMAC_MR0_RXE)
309 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
310 local_irq_restore(flags);
313 static int emac_reset(struct ocp_enet_private *dev)
315 struct emac_regs __iomem *p = dev->emacp;
316 unsigned long flags;
317 int n = 20;
319 DBG("%d: reset" NL, dev->def->index);
321 local_irq_save(flags);
323 if (!dev->reset_failed) {
324 /* 40x erratum suggests stopping RX channel before reset,
325 * we stop TX as well
327 emac_rx_disable(dev);
328 emac_tx_disable(dev);
331 out_be32(&p->mr0, EMAC_MR0_SRST);
332 while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
333 --n;
334 local_irq_restore(flags);
336 if (n) {
337 dev->reset_failed = 0;
338 return 0;
339 } else {
340 emac_report_timeout_error(dev, "reset timeout");
341 dev->reset_failed = 1;
342 return -ETIMEDOUT;
346 static void emac_hash_mc(struct ocp_enet_private *dev)
348 struct emac_regs __iomem *p = dev->emacp;
349 u16 gaht[4] = { 0 };
350 struct dev_mc_list *dmi;
352 DBG("%d: hash_mc %d" NL, dev->def->index, dev->ndev->mc_count);
354 for (dmi = dev->ndev->mc_list; dmi; dmi = dmi->next) {
355 int bit;
356 DBG2("%d: mc %02x:%02x:%02x:%02x:%02x:%02x" NL,
357 dev->def->index,
358 dmi->dmi_addr[0], dmi->dmi_addr[1], dmi->dmi_addr[2],
359 dmi->dmi_addr[3], dmi->dmi_addr[4], dmi->dmi_addr[5]);
361 bit = 63 - (ether_crc(ETH_ALEN, dmi->dmi_addr) >> 26);
362 gaht[bit >> 4] |= 0x8000 >> (bit & 0x0f);
364 out_be32(&p->gaht1, gaht[0]);
365 out_be32(&p->gaht2, gaht[1]);
366 out_be32(&p->gaht3, gaht[2]);
367 out_be32(&p->gaht4, gaht[3]);
370 static inline u32 emac_iff2rmr(struct net_device *ndev)
372 u32 r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE |
373 EMAC_RMR_BASE;
375 if (ndev->flags & IFF_PROMISC)
376 r |= EMAC_RMR_PME;
377 else if (ndev->flags & IFF_ALLMULTI || ndev->mc_count > 32)
378 r |= EMAC_RMR_PMME;
379 else if (ndev->mc_count > 0)
380 r |= EMAC_RMR_MAE;
382 return r;
385 static inline int emac_opb_mhz(void)
387 return (ocp_sys_info.opb_bus_freq + 500000) / 1000000;
390 /* BHs disabled */
391 static int emac_configure(struct ocp_enet_private *dev)
393 struct emac_regs __iomem *p = dev->emacp;
394 struct net_device *ndev = dev->ndev;
395 int gige;
396 u32 r;
398 DBG("%d: configure" NL, dev->def->index);
400 if (emac_reset(dev) < 0)
401 return -ETIMEDOUT;
403 tah_reset(dev->tah_dev);
405 /* Mode register */
406 r = EMAC_MR1_BASE(emac_opb_mhz()) | EMAC_MR1_VLE | EMAC_MR1_IST;
407 if (dev->phy.duplex == DUPLEX_FULL)
408 r |= EMAC_MR1_FDE | EMAC_MR1_MWSW_001;
409 dev->stop_timeout = STOP_TIMEOUT_10;
410 switch (dev->phy.speed) {
411 case SPEED_1000:
412 if (emac_phy_gpcs(dev->phy.mode)) {
413 r |= EMAC_MR1_MF_1000GPCS |
414 EMAC_MR1_MF_IPPA(dev->phy.address);
416 /* Put some arbitrary OUI, Manuf & Rev IDs so we can
417 * identify this GPCS PHY later.
419 out_be32(&p->ipcr, 0xdeadbeef);
420 } else
421 r |= EMAC_MR1_MF_1000;
422 r |= EMAC_MR1_RFS_16K;
423 gige = 1;
425 if (dev->ndev->mtu > ETH_DATA_LEN) {
426 r |= EMAC_MR1_JPSM;
427 dev->stop_timeout = STOP_TIMEOUT_1000_JUMBO;
428 } else
429 dev->stop_timeout = STOP_TIMEOUT_1000;
430 break;
431 case SPEED_100:
432 r |= EMAC_MR1_MF_100;
433 dev->stop_timeout = STOP_TIMEOUT_100;
434 /* Fall through */
435 default:
436 r |= EMAC_MR1_RFS_4K;
437 gige = 0;
438 break;
441 if (dev->rgmii_dev)
442 rgmii_set_speed(dev->rgmii_dev, dev->rgmii_input,
443 dev->phy.speed);
444 else
445 zmii_set_speed(dev->zmii_dev, dev->zmii_input, dev->phy.speed);
447 #if !defined(CONFIG_40x)
448 /* on 40x erratum forces us to NOT use integrated flow control,
449 * let's hope it works on 44x ;)
451 if (dev->phy.duplex == DUPLEX_FULL) {
452 if (dev->phy.pause)
453 r |= EMAC_MR1_EIFC | EMAC_MR1_APP;
454 else if (dev->phy.asym_pause)
455 r |= EMAC_MR1_APP;
457 #endif
458 out_be32(&p->mr1, r);
460 /* Set individual MAC address */
461 out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
462 out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
463 (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
464 ndev->dev_addr[5]);
466 /* VLAN Tag Protocol ID */
467 out_be32(&p->vtpid, 0x8100);
469 /* Receive mode register */
470 r = emac_iff2rmr(ndev);
471 if (r & EMAC_RMR_MAE)
472 emac_hash_mc(dev);
473 out_be32(&p->rmr, r);
475 /* FIFOs thresholds */
476 r = EMAC_TMR1((EMAC_MAL_BURST_SIZE / EMAC_FIFO_ENTRY_SIZE) + 1,
477 EMAC_TX_FIFO_SIZE / 2 / EMAC_FIFO_ENTRY_SIZE);
478 out_be32(&p->tmr1, r);
479 out_be32(&p->trtr, EMAC_TRTR(EMAC_TX_FIFO_SIZE / 2));
481 /* PAUSE frame is sent when RX FIFO reaches its high-water mark,
482 there should be still enough space in FIFO to allow the our link
483 partner time to process this frame and also time to send PAUSE
484 frame itself.
486 Here is the worst case scenario for the RX FIFO "headroom"
487 (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
489 1) One maximum-length frame on TX 1522 bytes
490 2) One PAUSE frame time 64 bytes
491 3) PAUSE frame decode time allowance 64 bytes
492 4) One maximum-length frame on RX 1522 bytes
493 5) Round-trip propagation delay of the link (100Mb) 15 bytes
494 ----------
495 3187 bytes
497 I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
498 low-water mark to RX_FIFO_SIZE / 8 (512 bytes)
500 r = EMAC_RWMR(EMAC_RX_FIFO_SIZE(gige) / 8 / EMAC_FIFO_ENTRY_SIZE,
501 EMAC_RX_FIFO_SIZE(gige) / 4 / EMAC_FIFO_ENTRY_SIZE);
502 out_be32(&p->rwmr, r);
504 /* Set PAUSE timer to the maximum */
505 out_be32(&p->ptr, 0xffff);
507 /* IRQ sources */
508 out_be32(&p->iser, EMAC_ISR_TXPE | EMAC_ISR_RXPE | /* EMAC_ISR_TXUE |
509 EMAC_ISR_RXOE | */ EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE |
510 EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE |
511 EMAC_ISR_IRE | EMAC_ISR_TE);
513 /* We need to take GPCS PHY out of isolate mode after EMAC reset */
514 if (emac_phy_gpcs(dev->phy.mode))
515 mii_reset_phy(&dev->phy);
517 return 0;
520 /* BHs disabled */
521 static void emac_reinitialize(struct ocp_enet_private *dev)
523 DBG("%d: reinitialize" NL, dev->def->index);
525 if (!emac_configure(dev)) {
526 emac_tx_enable(dev);
527 emac_rx_enable(dev);
531 /* BHs disabled */
532 static void emac_full_tx_reset(struct net_device *ndev)
534 struct ocp_enet_private *dev = ndev->priv;
535 struct ocp_func_emac_data *emacdata = dev->def->additions;
537 DBG("%d: full_tx_reset" NL, dev->def->index);
539 emac_tx_disable(dev);
540 mal_disable_tx_channel(dev->mal, emacdata->mal_tx_chan);
541 emac_clean_tx_ring(dev);
542 dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;
544 emac_configure(dev);
546 mal_enable_tx_channel(dev->mal, emacdata->mal_tx_chan);
547 emac_tx_enable(dev);
548 emac_rx_enable(dev);
550 netif_wake_queue(ndev);
553 static int __emac_mdio_read(struct ocp_enet_private *dev, u8 id, u8 reg)
555 struct emac_regs __iomem *p = dev->emacp;
556 u32 r;
557 int n;
559 DBG2("%d: mdio_read(%02x,%02x)" NL, dev->def->index, id, reg);
561 /* Enable proper MDIO port */
562 zmii_enable_mdio(dev->zmii_dev, dev->zmii_input);
564 /* Wait for management interface to become idle */
565 n = 10;
566 while (!emac_phy_done(in_be32(&p->stacr))) {
567 udelay(1);
568 if (!--n)
569 goto to;
572 /* Issue read command */
573 out_be32(&p->stacr,
574 EMAC_STACR_BASE(emac_opb_mhz()) | EMAC_STACR_STAC_READ |
575 (reg & EMAC_STACR_PRA_MASK)
576 | ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT)
577 | EMAC_STACR_START);
579 /* Wait for read to complete */
580 n = 100;
581 while (!emac_phy_done(r = in_be32(&p->stacr))) {
582 udelay(1);
583 if (!--n)
584 goto to;
587 if (unlikely(r & EMAC_STACR_PHYE)) {
588 DBG("%d: mdio_read(%02x, %02x) failed" NL, dev->def->index,
589 id, reg);
590 return -EREMOTEIO;
593 r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);
594 DBG2("%d: mdio_read -> %04x" NL, dev->def->index, r);
595 return r;
597 DBG("%d: MII management interface timeout (read)" NL, dev->def->index);
598 return -ETIMEDOUT;
601 static void __emac_mdio_write(struct ocp_enet_private *dev, u8 id, u8 reg,
602 u16 val)
604 struct emac_regs __iomem *p = dev->emacp;
605 int n;
607 DBG2("%d: mdio_write(%02x,%02x,%04x)" NL, dev->def->index, id, reg,
608 val);
610 /* Enable proper MDIO port */
611 zmii_enable_mdio(dev->zmii_dev, dev->zmii_input);
613 /* Wait for management interface to be idle */
614 n = 10;
615 while (!emac_phy_done(in_be32(&p->stacr))) {
616 udelay(1);
617 if (!--n)
618 goto to;
621 /* Issue write command */
622 out_be32(&p->stacr,
623 EMAC_STACR_BASE(emac_opb_mhz()) | EMAC_STACR_STAC_WRITE |
624 (reg & EMAC_STACR_PRA_MASK) |
625 ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
626 (val << EMAC_STACR_PHYD_SHIFT) | EMAC_STACR_START);
628 /* Wait for write to complete */
629 n = 100;
630 while (!emac_phy_done(in_be32(&p->stacr))) {
631 udelay(1);
632 if (!--n)
633 goto to;
635 return;
637 DBG("%d: MII management interface timeout (write)" NL, dev->def->index);
640 static int emac_mdio_read(struct net_device *ndev, int id, int reg)
642 struct ocp_enet_private *dev = ndev->priv;
643 int res;
645 local_bh_disable();
646 res = __emac_mdio_read(dev->mdio_dev ? dev->mdio_dev : dev, (u8) id,
647 (u8) reg);
648 local_bh_enable();
649 return res;
652 static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
654 struct ocp_enet_private *dev = ndev->priv;
656 local_bh_disable();
657 __emac_mdio_write(dev->mdio_dev ? dev->mdio_dev : dev, (u8) id,
658 (u8) reg, (u16) val);
659 local_bh_enable();
662 /* BHs disabled */
663 static void emac_set_multicast_list(struct net_device *ndev)
665 struct ocp_enet_private *dev = ndev->priv;
666 struct emac_regs __iomem *p = dev->emacp;
667 u32 rmr = emac_iff2rmr(ndev);
669 DBG("%d: multicast %08x" NL, dev->def->index, rmr);
670 BUG_ON(!netif_running(dev->ndev));
672 /* I decided to relax register access rules here to avoid
673 * full EMAC reset.
675 * There is a real problem with EMAC4 core if we use MWSW_001 bit
676 * in MR1 register and do a full EMAC reset.
677 * One TX BD status update is delayed and, after EMAC reset, it
678 * never happens, resulting in TX hung (it'll be recovered by TX
679 * timeout handler eventually, but this is just gross).
680 * So we either have to do full TX reset or try to cheat here :)
682 * The only required change is to RX mode register, so I *think* all
683 * we need is just to stop RX channel. This seems to work on all
684 * tested SoCs. --ebs
686 emac_rx_disable(dev);
687 if (rmr & EMAC_RMR_MAE)
688 emac_hash_mc(dev);
689 out_be32(&p->rmr, rmr);
690 emac_rx_enable(dev);
693 /* BHs disabled */
694 static int emac_resize_rx_ring(struct ocp_enet_private *dev, int new_mtu)
696 struct ocp_func_emac_data *emacdata = dev->def->additions;
697 int rx_sync_size = emac_rx_sync_size(new_mtu);
698 int rx_skb_size = emac_rx_skb_size(new_mtu);
699 int i, ret = 0;
701 emac_rx_disable(dev);
702 mal_disable_rx_channel(dev->mal, emacdata->mal_rx_chan);
704 if (dev->rx_sg_skb) {
705 ++dev->estats.rx_dropped_resize;
706 dev_kfree_skb(dev->rx_sg_skb);
707 dev->rx_sg_skb = NULL;
710 /* Make a first pass over RX ring and mark BDs ready, dropping
711 * non-processed packets on the way. We need this as a separate pass
712 * to simplify error recovery in the case of allocation failure later.
714 for (i = 0; i < NUM_RX_BUFF; ++i) {
715 if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
716 ++dev->estats.rx_dropped_resize;
718 dev->rx_desc[i].data_len = 0;
719 dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
720 (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
723 /* Reallocate RX ring only if bigger skb buffers are required */
724 if (rx_skb_size <= dev->rx_skb_size)
725 goto skip;
727 /* Second pass, allocate new skbs */
728 for (i = 0; i < NUM_RX_BUFF; ++i) {
729 struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
730 if (!skb) {
731 ret = -ENOMEM;
732 goto oom;
735 BUG_ON(!dev->rx_skb[i]);
736 dev_kfree_skb(dev->rx_skb[i]);
738 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
739 dev->rx_desc[i].data_ptr =
740 dma_map_single(dev->ldev, skb->data - 2, rx_sync_size,
741 DMA_FROM_DEVICE) + 2;
742 dev->rx_skb[i] = skb;
744 skip:
745 /* Check if we need to change "Jumbo" bit in MR1 */
746 if ((new_mtu > ETH_DATA_LEN) ^ (dev->ndev->mtu > ETH_DATA_LEN)) {
747 /* This is to prevent starting RX channel in emac_rx_enable() */
748 dev->commac.rx_stopped = 1;
750 dev->ndev->mtu = new_mtu;
751 emac_full_tx_reset(dev->ndev);
754 mal_set_rcbs(dev->mal, emacdata->mal_rx_chan, emac_rx_size(new_mtu));
755 oom:
756 /* Restart RX */
757 dev->commac.rx_stopped = dev->rx_slot = 0;
758 mal_enable_rx_channel(dev->mal, emacdata->mal_rx_chan);
759 emac_rx_enable(dev);
761 return ret;
764 /* Process ctx, rtnl_lock semaphore */
765 static int emac_change_mtu(struct net_device *ndev, int new_mtu)
767 struct ocp_enet_private *dev = ndev->priv;
768 int ret = 0;
770 if (new_mtu < EMAC_MIN_MTU || new_mtu > EMAC_MAX_MTU)
771 return -EINVAL;
773 DBG("%d: change_mtu(%d)" NL, dev->def->index, new_mtu);
775 local_bh_disable();
776 if (netif_running(ndev)) {
777 /* Check if we really need to reinitalize RX ring */
778 if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
779 ret = emac_resize_rx_ring(dev, new_mtu);
782 if (!ret) {
783 ndev->mtu = new_mtu;
784 dev->rx_skb_size = emac_rx_skb_size(new_mtu);
785 dev->rx_sync_size = emac_rx_sync_size(new_mtu);
787 local_bh_enable();
789 return ret;
792 static void emac_clean_tx_ring(struct ocp_enet_private *dev)
794 int i;
795 for (i = 0; i < NUM_TX_BUFF; ++i) {
796 if (dev->tx_skb[i]) {
797 dev_kfree_skb(dev->tx_skb[i]);
798 dev->tx_skb[i] = NULL;
799 if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)
800 ++dev->estats.tx_dropped;
802 dev->tx_desc[i].ctrl = 0;
803 dev->tx_desc[i].data_ptr = 0;
807 static void emac_clean_rx_ring(struct ocp_enet_private *dev)
809 int i;
810 for (i = 0; i < NUM_RX_BUFF; ++i)
811 if (dev->rx_skb[i]) {
812 dev->rx_desc[i].ctrl = 0;
813 dev_kfree_skb(dev->rx_skb[i]);
814 dev->rx_skb[i] = NULL;
815 dev->rx_desc[i].data_ptr = 0;
818 if (dev->rx_sg_skb) {
819 dev_kfree_skb(dev->rx_sg_skb);
820 dev->rx_sg_skb = NULL;
824 static inline int emac_alloc_rx_skb(struct ocp_enet_private *dev, int slot,
825 gfp_t flags)
827 struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);
828 if (unlikely(!skb))
829 return -ENOMEM;
831 dev->rx_skb[slot] = skb;
832 dev->rx_desc[slot].data_len = 0;
834 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
835 dev->rx_desc[slot].data_ptr =
836 dma_map_single(dev->ldev, skb->data - 2, dev->rx_sync_size,
837 DMA_FROM_DEVICE) + 2;
838 barrier();
839 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
840 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
842 return 0;
845 static void emac_print_link_status(struct ocp_enet_private *dev)
847 if (netif_carrier_ok(dev->ndev))
848 printk(KERN_INFO "%s: link is up, %d %s%s\n",
849 dev->ndev->name, dev->phy.speed,
850 dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
851 dev->phy.pause ? ", pause enabled" :
852 dev->phy.asym_pause ? ", assymetric pause enabled" : "");
853 else
854 printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
857 /* Process ctx, rtnl_lock semaphore */
858 static int emac_open(struct net_device *ndev)
860 struct ocp_enet_private *dev = ndev->priv;
861 struct ocp_func_emac_data *emacdata = dev->def->additions;
862 int err, i;
864 DBG("%d: open" NL, dev->def->index);
866 /* Setup error IRQ handler */
867 err = request_irq(dev->def->irq, emac_irq, 0, "EMAC", dev);
868 if (err) {
869 printk(KERN_ERR "%s: failed to request IRQ %d\n",
870 ndev->name, dev->def->irq);
871 return err;
874 /* Allocate RX ring */
875 for (i = 0; i < NUM_RX_BUFF; ++i)
876 if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {
877 printk(KERN_ERR "%s: failed to allocate RX ring\n",
878 ndev->name);
879 goto oom;
882 local_bh_disable();
883 dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot =
884 dev->commac.rx_stopped = 0;
885 dev->rx_sg_skb = NULL;
887 if (dev->phy.address >= 0) {
888 int link_poll_interval;
889 if (dev->phy.def->ops->poll_link(&dev->phy)) {
890 dev->phy.def->ops->read_link(&dev->phy);
891 EMAC_RX_CLK_DEFAULT(dev->def->index);
892 netif_carrier_on(dev->ndev);
893 link_poll_interval = PHY_POLL_LINK_ON;
894 } else {
895 EMAC_RX_CLK_TX(dev->def->index);
896 netif_carrier_off(dev->ndev);
897 link_poll_interval = PHY_POLL_LINK_OFF;
899 mod_timer(&dev->link_timer, jiffies + link_poll_interval);
900 emac_print_link_status(dev);
901 } else
902 netif_carrier_on(dev->ndev);
904 emac_configure(dev);
905 mal_poll_add(dev->mal, &dev->commac);
906 mal_enable_tx_channel(dev->mal, emacdata->mal_tx_chan);
907 mal_set_rcbs(dev->mal, emacdata->mal_rx_chan, emac_rx_size(ndev->mtu));
908 mal_enable_rx_channel(dev->mal, emacdata->mal_rx_chan);
909 emac_tx_enable(dev);
910 emac_rx_enable(dev);
911 netif_start_queue(ndev);
912 local_bh_enable();
914 return 0;
915 oom:
916 emac_clean_rx_ring(dev);
917 free_irq(dev->def->irq, dev);
918 return -ENOMEM;
921 /* BHs disabled */
922 static int emac_link_differs(struct ocp_enet_private *dev)
924 u32 r = in_be32(&dev->emacp->mr1);
926 int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;
927 int speed, pause, asym_pause;
929 if (r & EMAC_MR1_MF_1000)
930 speed = SPEED_1000;
931 else if (r & EMAC_MR1_MF_100)
932 speed = SPEED_100;
933 else
934 speed = SPEED_10;
936 switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) {
937 case (EMAC_MR1_EIFC | EMAC_MR1_APP):
938 pause = 1;
939 asym_pause = 0;
940 break;
941 case EMAC_MR1_APP:
942 pause = 0;
943 asym_pause = 1;
944 break;
945 default:
946 pause = asym_pause = 0;
948 return speed != dev->phy.speed || duplex != dev->phy.duplex ||
949 pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;
952 /* BHs disabled */
953 static void emac_link_timer(unsigned long data)
955 struct ocp_enet_private *dev = (struct ocp_enet_private *)data;
956 int link_poll_interval;
958 DBG2("%d: link timer" NL, dev->def->index);
960 if (dev->phy.def->ops->poll_link(&dev->phy)) {
961 if (!netif_carrier_ok(dev->ndev)) {
962 EMAC_RX_CLK_DEFAULT(dev->def->index);
964 /* Get new link parameters */
965 dev->phy.def->ops->read_link(&dev->phy);
967 if (dev->tah_dev || emac_link_differs(dev))
968 emac_full_tx_reset(dev->ndev);
970 netif_carrier_on(dev->ndev);
971 emac_print_link_status(dev);
973 link_poll_interval = PHY_POLL_LINK_ON;
974 } else {
975 if (netif_carrier_ok(dev->ndev)) {
976 EMAC_RX_CLK_TX(dev->def->index);
977 #if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX)
978 emac_reinitialize(dev);
979 #endif
980 netif_carrier_off(dev->ndev);
981 emac_print_link_status(dev);
984 /* Retry reset if the previous attempt failed.
985 * This is needed mostly for CONFIG_IBM_EMAC_PHY_RX_CLK_FIX
986 * case, but I left it here because it shouldn't trigger for
987 * sane PHYs anyway.
989 if (unlikely(dev->reset_failed))
990 emac_reinitialize(dev);
992 link_poll_interval = PHY_POLL_LINK_OFF;
994 mod_timer(&dev->link_timer, jiffies + link_poll_interval);
997 /* BHs disabled */
998 static void emac_force_link_update(struct ocp_enet_private *dev)
1000 netif_carrier_off(dev->ndev);
1001 if (timer_pending(&dev->link_timer))
1002 mod_timer(&dev->link_timer, jiffies + PHY_POLL_LINK_OFF);
1005 /* Process ctx, rtnl_lock semaphore */
1006 static int emac_close(struct net_device *ndev)
1008 struct ocp_enet_private *dev = ndev->priv;
1009 struct ocp_func_emac_data *emacdata = dev->def->additions;
1011 DBG("%d: close" NL, dev->def->index);
1013 local_bh_disable();
1015 if (dev->phy.address >= 0)
1016 del_timer_sync(&dev->link_timer);
1018 netif_stop_queue(ndev);
1019 emac_rx_disable(dev);
1020 emac_tx_disable(dev);
1021 mal_disable_rx_channel(dev->mal, emacdata->mal_rx_chan);
1022 mal_disable_tx_channel(dev->mal, emacdata->mal_tx_chan);
1023 mal_poll_del(dev->mal, &dev->commac);
1024 local_bh_enable();
1026 emac_clean_tx_ring(dev);
1027 emac_clean_rx_ring(dev);
1028 free_irq(dev->def->irq, dev);
1030 return 0;
1033 static inline u16 emac_tx_csum(struct ocp_enet_private *dev,
1034 struct sk_buff *skb)
1036 #if defined(CONFIG_IBM_EMAC_TAH)
1037 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1038 ++dev->stats.tx_packets_csum;
1039 return EMAC_TX_CTRL_TAH_CSUM;
1041 #endif
1042 return 0;
1045 static inline int emac_xmit_finish(struct ocp_enet_private *dev, int len)
1047 struct emac_regs __iomem *p = dev->emacp;
1048 struct net_device *ndev = dev->ndev;
1050 /* Send the packet out */
1051 out_be32(&p->tmr0, EMAC_TMR0_XMIT);
1053 if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {
1054 netif_stop_queue(ndev);
1055 DBG2("%d: stopped TX queue" NL, dev->def->index);
1058 ndev->trans_start = jiffies;
1059 ++dev->stats.tx_packets;
1060 dev->stats.tx_bytes += len;
1062 return 0;
1065 /* BHs disabled */
1066 static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1068 struct ocp_enet_private *dev = ndev->priv;
1069 unsigned int len = skb->len;
1070 int slot;
1072 u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1073 MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
1075 slot = dev->tx_slot++;
1076 if (dev->tx_slot == NUM_TX_BUFF) {
1077 dev->tx_slot = 0;
1078 ctrl |= MAL_TX_CTRL_WRAP;
1081 DBG2("%d: xmit(%u) %d" NL, dev->def->index, len, slot);
1083 dev->tx_skb[slot] = skb;
1084 dev->tx_desc[slot].data_ptr = dma_map_single(dev->ldev, skb->data, len,
1085 DMA_TO_DEVICE);
1086 dev->tx_desc[slot].data_len = (u16) len;
1087 barrier();
1088 dev->tx_desc[slot].ctrl = ctrl;
1090 return emac_xmit_finish(dev, len);
1093 #if defined(CONFIG_IBM_EMAC_TAH)
1094 static inline int emac_xmit_split(struct ocp_enet_private *dev, int slot,
1095 u32 pd, int len, int last, u16 base_ctrl)
1097 while (1) {
1098 u16 ctrl = base_ctrl;
1099 int chunk = min(len, MAL_MAX_TX_SIZE);
1100 len -= chunk;
1102 slot = (slot + 1) % NUM_TX_BUFF;
1104 if (last && !len)
1105 ctrl |= MAL_TX_CTRL_LAST;
1106 if (slot == NUM_TX_BUFF - 1)
1107 ctrl |= MAL_TX_CTRL_WRAP;
1109 dev->tx_skb[slot] = NULL;
1110 dev->tx_desc[slot].data_ptr = pd;
1111 dev->tx_desc[slot].data_len = (u16) chunk;
1112 dev->tx_desc[slot].ctrl = ctrl;
1113 ++dev->tx_cnt;
1115 if (!len)
1116 break;
1118 pd += chunk;
1120 return slot;
1123 /* BHs disabled (SG version for TAH equipped EMACs) */
1124 static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
1126 struct ocp_enet_private *dev = ndev->priv;
1127 int nr_frags = skb_shinfo(skb)->nr_frags;
1128 int len = skb->len, chunk;
1129 int slot, i;
1130 u16 ctrl;
1131 u32 pd;
1133 /* This is common "fast" path */
1134 if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
1135 return emac_start_xmit(skb, ndev);
1137 len -= skb->data_len;
1139 /* Note, this is only an *estimation*, we can still run out of empty
1140 * slots because of the additional fragmentation into
1141 * MAL_MAX_TX_SIZE-sized chunks
1143 if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))
1144 goto stop_queue;
1146 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1147 emac_tx_csum(dev, skb);
1148 slot = dev->tx_slot;
1150 /* skb data */
1151 dev->tx_skb[slot] = NULL;
1152 chunk = min(len, MAL_MAX_TX_SIZE);
1153 dev->tx_desc[slot].data_ptr = pd =
1154 dma_map_single(dev->ldev, skb->data, len, DMA_TO_DEVICE);
1155 dev->tx_desc[slot].data_len = (u16) chunk;
1156 len -= chunk;
1157 if (unlikely(len))
1158 slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,
1159 ctrl);
1160 /* skb fragments */
1161 for (i = 0; i < nr_frags; ++i) {
1162 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1163 len = frag->size;
1165 if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
1166 goto undo_frame;
1168 pd = dma_map_page(dev->ldev, frag->page, frag->page_offset, len,
1169 DMA_TO_DEVICE);
1171 slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,
1172 ctrl);
1175 DBG2("%d: xmit_sg(%u) %d - %d" NL, dev->def->index, skb->len,
1176 dev->tx_slot, slot);
1178 /* Attach skb to the last slot so we don't release it too early */
1179 dev->tx_skb[slot] = skb;
1181 /* Send the packet out */
1182 if (dev->tx_slot == NUM_TX_BUFF - 1)
1183 ctrl |= MAL_TX_CTRL_WRAP;
1184 barrier();
1185 dev->tx_desc[dev->tx_slot].ctrl = ctrl;
1186 dev->tx_slot = (slot + 1) % NUM_TX_BUFF;
1188 return emac_xmit_finish(dev, skb->len);
1190 undo_frame:
1191 /* Well, too bad. Our previous estimation was overly optimistic.
1192 * Undo everything.
1194 while (slot != dev->tx_slot) {
1195 dev->tx_desc[slot].ctrl = 0;
1196 --dev->tx_cnt;
1197 if (--slot < 0)
1198 slot = NUM_TX_BUFF - 1;
1200 ++dev->estats.tx_undo;
1202 stop_queue:
1203 netif_stop_queue(ndev);
1204 DBG2("%d: stopped TX queue" NL, dev->def->index);
1205 return 1;
1207 #else
1208 # define emac_start_xmit_sg emac_start_xmit
1209 #endif /* !defined(CONFIG_IBM_EMAC_TAH) */
1211 /* BHs disabled */
1212 static void emac_parse_tx_error(struct ocp_enet_private *dev, u16 ctrl)
1214 struct ibm_emac_error_stats *st = &dev->estats;
1215 DBG("%d: BD TX error %04x" NL, dev->def->index, ctrl);
1217 ++st->tx_bd_errors;
1218 if (ctrl & EMAC_TX_ST_BFCS)
1219 ++st->tx_bd_bad_fcs;
1220 if (ctrl & EMAC_TX_ST_LCS)
1221 ++st->tx_bd_carrier_loss;
1222 if (ctrl & EMAC_TX_ST_ED)
1223 ++st->tx_bd_excessive_deferral;
1224 if (ctrl & EMAC_TX_ST_EC)
1225 ++st->tx_bd_excessive_collisions;
1226 if (ctrl & EMAC_TX_ST_LC)
1227 ++st->tx_bd_late_collision;
1228 if (ctrl & EMAC_TX_ST_MC)
1229 ++st->tx_bd_multple_collisions;
1230 if (ctrl & EMAC_TX_ST_SC)
1231 ++st->tx_bd_single_collision;
1232 if (ctrl & EMAC_TX_ST_UR)
1233 ++st->tx_bd_underrun;
1234 if (ctrl & EMAC_TX_ST_SQE)
1235 ++st->tx_bd_sqe;
1238 static void emac_poll_tx(void *param)
1240 struct ocp_enet_private *dev = param;
1241 DBG2("%d: poll_tx, %d %d" NL, dev->def->index, dev->tx_cnt,
1242 dev->ack_slot);
1244 if (dev->tx_cnt) {
1245 u16 ctrl;
1246 int slot = dev->ack_slot, n = 0;
1247 again:
1248 ctrl = dev->tx_desc[slot].ctrl;
1249 if (!(ctrl & MAL_TX_CTRL_READY)) {
1250 struct sk_buff *skb = dev->tx_skb[slot];
1251 ++n;
1253 if (skb) {
1254 dev_kfree_skb(skb);
1255 dev->tx_skb[slot] = NULL;
1257 slot = (slot + 1) % NUM_TX_BUFF;
1259 if (unlikely(EMAC_IS_BAD_TX(ctrl)))
1260 emac_parse_tx_error(dev, ctrl);
1262 if (--dev->tx_cnt)
1263 goto again;
1265 if (n) {
1266 dev->ack_slot = slot;
1267 if (netif_queue_stopped(dev->ndev) &&
1268 dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
1269 netif_wake_queue(dev->ndev);
1271 DBG2("%d: tx %d pkts" NL, dev->def->index, n);
1276 static inline void emac_recycle_rx_skb(struct ocp_enet_private *dev, int slot,
1277 int len)
1279 struct sk_buff *skb = dev->rx_skb[slot];
1280 DBG2("%d: recycle %d %d" NL, dev->def->index, slot, len);
1282 if (len)
1283 dma_map_single(dev->ldev, skb->data - 2,
1284 EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);
1286 dev->rx_desc[slot].data_len = 0;
1287 barrier();
1288 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1289 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1292 static void emac_parse_rx_error(struct ocp_enet_private *dev, u16 ctrl)
1294 struct ibm_emac_error_stats *st = &dev->estats;
1295 DBG("%d: BD RX error %04x" NL, dev->def->index, ctrl);
1297 ++st->rx_bd_errors;
1298 if (ctrl & EMAC_RX_ST_OE)
1299 ++st->rx_bd_overrun;
1300 if (ctrl & EMAC_RX_ST_BP)
1301 ++st->rx_bd_bad_packet;
1302 if (ctrl & EMAC_RX_ST_RP)
1303 ++st->rx_bd_runt_packet;
1304 if (ctrl & EMAC_RX_ST_SE)
1305 ++st->rx_bd_short_event;
1306 if (ctrl & EMAC_RX_ST_AE)
1307 ++st->rx_bd_alignment_error;
1308 if (ctrl & EMAC_RX_ST_BFCS)
1309 ++st->rx_bd_bad_fcs;
1310 if (ctrl & EMAC_RX_ST_PTL)
1311 ++st->rx_bd_packet_too_long;
1312 if (ctrl & EMAC_RX_ST_ORE)
1313 ++st->rx_bd_out_of_range;
1314 if (ctrl & EMAC_RX_ST_IRE)
1315 ++st->rx_bd_in_range;
1318 static inline void emac_rx_csum(struct ocp_enet_private *dev,
1319 struct sk_buff *skb, u16 ctrl)
1321 #if defined(CONFIG_IBM_EMAC_TAH)
1322 if (!ctrl && dev->tah_dev) {
1323 skb->ip_summed = CHECKSUM_UNNECESSARY;
1324 ++dev->stats.rx_packets_csum;
1326 #endif
1329 static inline int emac_rx_sg_append(struct ocp_enet_private *dev, int slot)
1331 if (likely(dev->rx_sg_skb != NULL)) {
1332 int len = dev->rx_desc[slot].data_len;
1333 int tot_len = dev->rx_sg_skb->len + len;
1335 if (unlikely(tot_len + 2 > dev->rx_skb_size)) {
1336 ++dev->estats.rx_dropped_mtu;
1337 dev_kfree_skb(dev->rx_sg_skb);
1338 dev->rx_sg_skb = NULL;
1339 } else {
1340 cacheable_memcpy(skb_tail_pointer(dev->rx_sg_skb),
1341 dev->rx_skb[slot]->data, len);
1342 skb_put(dev->rx_sg_skb, len);
1343 emac_recycle_rx_skb(dev, slot, len);
1344 return 0;
1347 emac_recycle_rx_skb(dev, slot, 0);
1348 return -1;
1351 /* BHs disabled */
1352 static int emac_poll_rx(void *param, int budget)
1354 struct ocp_enet_private *dev = param;
1355 int slot = dev->rx_slot, received = 0;
1357 DBG2("%d: poll_rx(%d)" NL, dev->def->index, budget);
1359 again:
1360 while (budget > 0) {
1361 int len;
1362 struct sk_buff *skb;
1363 u16 ctrl = dev->rx_desc[slot].ctrl;
1365 if (ctrl & MAL_RX_CTRL_EMPTY)
1366 break;
1368 skb = dev->rx_skb[slot];
1369 barrier();
1370 len = dev->rx_desc[slot].data_len;
1372 if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))
1373 goto sg;
1375 ctrl &= EMAC_BAD_RX_MASK;
1376 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1377 emac_parse_rx_error(dev, ctrl);
1378 ++dev->estats.rx_dropped_error;
1379 emac_recycle_rx_skb(dev, slot, 0);
1380 len = 0;
1381 goto next;
1384 if (len && len < EMAC_RX_COPY_THRESH) {
1385 struct sk_buff *copy_skb =
1386 alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
1387 if (unlikely(!copy_skb))
1388 goto oom;
1390 skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
1391 cacheable_memcpy(copy_skb->data - 2, skb->data - 2,
1392 len + 2);
1393 emac_recycle_rx_skb(dev, slot, len);
1394 skb = copy_skb;
1395 } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC)))
1396 goto oom;
1398 skb_put(skb, len);
1399 push_packet:
1400 skb->protocol = eth_type_trans(skb, dev->ndev);
1401 emac_rx_csum(dev, skb, ctrl);
1403 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
1404 ++dev->estats.rx_dropped_stack;
1405 next:
1406 ++dev->stats.rx_packets;
1407 skip:
1408 dev->stats.rx_bytes += len;
1409 slot = (slot + 1) % NUM_RX_BUFF;
1410 --budget;
1411 ++received;
1412 continue;
1414 if (ctrl & MAL_RX_CTRL_FIRST) {
1415 BUG_ON(dev->rx_sg_skb);
1416 if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) {
1417 DBG("%d: rx OOM %d" NL, dev->def->index, slot);
1418 ++dev->estats.rx_dropped_oom;
1419 emac_recycle_rx_skb(dev, slot, 0);
1420 } else {
1421 dev->rx_sg_skb = skb;
1422 skb_put(skb, len);
1424 } else if (!emac_rx_sg_append(dev, slot) &&
1425 (ctrl & MAL_RX_CTRL_LAST)) {
1427 skb = dev->rx_sg_skb;
1428 dev->rx_sg_skb = NULL;
1430 ctrl &= EMAC_BAD_RX_MASK;
1431 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1432 emac_parse_rx_error(dev, ctrl);
1433 ++dev->estats.rx_dropped_error;
1434 dev_kfree_skb(skb);
1435 len = 0;
1436 } else
1437 goto push_packet;
1439 goto skip;
1440 oom:
1441 DBG("%d: rx OOM %d" NL, dev->def->index, slot);
1442 /* Drop the packet and recycle skb */
1443 ++dev->estats.rx_dropped_oom;
1444 emac_recycle_rx_skb(dev, slot, 0);
1445 goto next;
1448 if (received) {
1449 DBG2("%d: rx %d BDs" NL, dev->def->index, received);
1450 dev->rx_slot = slot;
1453 if (unlikely(budget && dev->commac.rx_stopped)) {
1454 struct ocp_func_emac_data *emacdata = dev->def->additions;
1456 barrier();
1457 if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {
1458 DBG2("%d: rx restart" NL, dev->def->index);
1459 received = 0;
1460 goto again;
1463 if (dev->rx_sg_skb) {
1464 DBG2("%d: dropping partial rx packet" NL,
1465 dev->def->index);
1466 ++dev->estats.rx_dropped_error;
1467 dev_kfree_skb(dev->rx_sg_skb);
1468 dev->rx_sg_skb = NULL;
1471 dev->commac.rx_stopped = 0;
1472 mal_enable_rx_channel(dev->mal, emacdata->mal_rx_chan);
1473 emac_rx_enable(dev);
1474 dev->rx_slot = 0;
1476 return received;
1479 /* BHs disabled */
1480 static int emac_peek_rx(void *param)
1482 struct ocp_enet_private *dev = param;
1483 return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);
1486 /* BHs disabled */
1487 static int emac_peek_rx_sg(void *param)
1489 struct ocp_enet_private *dev = param;
1490 int slot = dev->rx_slot;
1491 while (1) {
1492 u16 ctrl = dev->rx_desc[slot].ctrl;
1493 if (ctrl & MAL_RX_CTRL_EMPTY)
1494 return 0;
1495 else if (ctrl & MAL_RX_CTRL_LAST)
1496 return 1;
1498 slot = (slot + 1) % NUM_RX_BUFF;
1500 /* I'm just being paranoid here :) */
1501 if (unlikely(slot == dev->rx_slot))
1502 return 0;
1506 /* Hard IRQ */
1507 static void emac_rxde(void *param)
1509 struct ocp_enet_private *dev = param;
1510 ++dev->estats.rx_stopped;
1511 emac_rx_disable_async(dev);
1514 /* Hard IRQ */
1515 static irqreturn_t emac_irq(int irq, void *dev_instance)
1517 struct ocp_enet_private *dev = dev_instance;
1518 struct emac_regs __iomem *p = dev->emacp;
1519 struct ibm_emac_error_stats *st = &dev->estats;
1521 u32 isr = in_be32(&p->isr);
1522 out_be32(&p->isr, isr);
1524 DBG("%d: isr = %08x" NL, dev->def->index, isr);
1526 if (isr & EMAC_ISR_TXPE)
1527 ++st->tx_parity;
1528 if (isr & EMAC_ISR_RXPE)
1529 ++st->rx_parity;
1530 if (isr & EMAC_ISR_TXUE)
1531 ++st->tx_underrun;
1532 if (isr & EMAC_ISR_RXOE)
1533 ++st->rx_fifo_overrun;
1534 if (isr & EMAC_ISR_OVR)
1535 ++st->rx_overrun;
1536 if (isr & EMAC_ISR_BP)
1537 ++st->rx_bad_packet;
1538 if (isr & EMAC_ISR_RP)
1539 ++st->rx_runt_packet;
1540 if (isr & EMAC_ISR_SE)
1541 ++st->rx_short_event;
1542 if (isr & EMAC_ISR_ALE)
1543 ++st->rx_alignment_error;
1544 if (isr & EMAC_ISR_BFCS)
1545 ++st->rx_bad_fcs;
1546 if (isr & EMAC_ISR_PTLE)
1547 ++st->rx_packet_too_long;
1548 if (isr & EMAC_ISR_ORE)
1549 ++st->rx_out_of_range;
1550 if (isr & EMAC_ISR_IRE)
1551 ++st->rx_in_range;
1552 if (isr & EMAC_ISR_SQE)
1553 ++st->tx_sqe;
1554 if (isr & EMAC_ISR_TE)
1555 ++st->tx_errors;
1557 return IRQ_HANDLED;
1560 static struct net_device_stats *emac_stats(struct net_device *ndev)
1562 struct ocp_enet_private *dev = ndev->priv;
1563 struct ibm_emac_stats *st = &dev->stats;
1564 struct ibm_emac_error_stats *est = &dev->estats;
1565 struct net_device_stats *nst = &dev->nstats;
1567 DBG2("%d: stats" NL, dev->def->index);
1569 /* Compute "legacy" statistics */
1570 local_irq_disable();
1571 nst->rx_packets = (unsigned long)st->rx_packets;
1572 nst->rx_bytes = (unsigned long)st->rx_bytes;
1573 nst->tx_packets = (unsigned long)st->tx_packets;
1574 nst->tx_bytes = (unsigned long)st->tx_bytes;
1575 nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +
1576 est->rx_dropped_error +
1577 est->rx_dropped_resize +
1578 est->rx_dropped_mtu);
1579 nst->tx_dropped = (unsigned long)est->tx_dropped;
1581 nst->rx_errors = (unsigned long)est->rx_bd_errors;
1582 nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +
1583 est->rx_fifo_overrun +
1584 est->rx_overrun);
1585 nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +
1586 est->rx_alignment_error);
1587 nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +
1588 est->rx_bad_fcs);
1589 nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +
1590 est->rx_bd_short_event +
1591 est->rx_bd_packet_too_long +
1592 est->rx_bd_out_of_range +
1593 est->rx_bd_in_range +
1594 est->rx_runt_packet +
1595 est->rx_short_event +
1596 est->rx_packet_too_long +
1597 est->rx_out_of_range +
1598 est->rx_in_range);
1600 nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);
1601 nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +
1602 est->tx_underrun);
1603 nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;
1604 nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +
1605 est->tx_bd_excessive_collisions +
1606 est->tx_bd_late_collision +
1607 est->tx_bd_multple_collisions);
1608 local_irq_enable();
1609 return nst;
1612 static void emac_remove(struct ocp_device *ocpdev)
1614 struct ocp_enet_private *dev = ocp_get_drvdata(ocpdev);
1616 DBG("%d: remove" NL, dev->def->index);
1618 ocp_set_drvdata(ocpdev, NULL);
1619 unregister_netdev(dev->ndev);
1621 tah_fini(dev->tah_dev);
1622 rgmii_fini(dev->rgmii_dev, dev->rgmii_input);
1623 zmii_fini(dev->zmii_dev, dev->zmii_input);
1625 emac_dbg_register(dev->def->index, NULL);
1627 mal_unregister_commac(dev->mal, &dev->commac);
1628 iounmap(dev->emacp);
1629 kfree(dev->ndev);
1632 static struct mal_commac_ops emac_commac_ops = {
1633 .poll_tx = &emac_poll_tx,
1634 .poll_rx = &emac_poll_rx,
1635 .peek_rx = &emac_peek_rx,
1636 .rxde = &emac_rxde,
1639 static struct mal_commac_ops emac_commac_sg_ops = {
1640 .poll_tx = &emac_poll_tx,
1641 .poll_rx = &emac_poll_rx,
1642 .peek_rx = &emac_peek_rx_sg,
1643 .rxde = &emac_rxde,
1646 /* Ethtool support */
1647 static int emac_ethtool_get_settings(struct net_device *ndev,
1648 struct ethtool_cmd *cmd)
1650 struct ocp_enet_private *dev = ndev->priv;
1652 cmd->supported = dev->phy.features;
1653 cmd->port = PORT_MII;
1654 cmd->phy_address = dev->phy.address;
1655 cmd->transceiver =
1656 dev->phy.address >= 0 ? XCVR_EXTERNAL : XCVR_INTERNAL;
1658 local_bh_disable();
1659 cmd->advertising = dev->phy.advertising;
1660 cmd->autoneg = dev->phy.autoneg;
1661 cmd->speed = dev->phy.speed;
1662 cmd->duplex = dev->phy.duplex;
1663 local_bh_enable();
1665 return 0;
1668 static int emac_ethtool_set_settings(struct net_device *ndev,
1669 struct ethtool_cmd *cmd)
1671 struct ocp_enet_private *dev = ndev->priv;
1672 u32 f = dev->phy.features;
1674 DBG("%d: set_settings(%d, %d, %d, 0x%08x)" NL, dev->def->index,
1675 cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);
1677 /* Basic sanity checks */
1678 if (dev->phy.address < 0)
1679 return -EOPNOTSUPP;
1680 if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
1681 return -EINVAL;
1682 if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
1683 return -EINVAL;
1684 if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
1685 return -EINVAL;
1687 if (cmd->autoneg == AUTONEG_DISABLE) {
1688 switch (cmd->speed) {
1689 case SPEED_10:
1690 if (cmd->duplex == DUPLEX_HALF
1691 && !(f & SUPPORTED_10baseT_Half))
1692 return -EINVAL;
1693 if (cmd->duplex == DUPLEX_FULL
1694 && !(f & SUPPORTED_10baseT_Full))
1695 return -EINVAL;
1696 break;
1697 case SPEED_100:
1698 if (cmd->duplex == DUPLEX_HALF
1699 && !(f & SUPPORTED_100baseT_Half))
1700 return -EINVAL;
1701 if (cmd->duplex == DUPLEX_FULL
1702 && !(f & SUPPORTED_100baseT_Full))
1703 return -EINVAL;
1704 break;
1705 case SPEED_1000:
1706 if (cmd->duplex == DUPLEX_HALF
1707 && !(f & SUPPORTED_1000baseT_Half))
1708 return -EINVAL;
1709 if (cmd->duplex == DUPLEX_FULL
1710 && !(f & SUPPORTED_1000baseT_Full))
1711 return -EINVAL;
1712 break;
1713 default:
1714 return -EINVAL;
1717 local_bh_disable();
1718 dev->phy.def->ops->setup_forced(&dev->phy, cmd->speed,
1719 cmd->duplex);
1721 } else {
1722 if (!(f & SUPPORTED_Autoneg))
1723 return -EINVAL;
1725 local_bh_disable();
1726 dev->phy.def->ops->setup_aneg(&dev->phy,
1727 (cmd->advertising & f) |
1728 (dev->phy.advertising &
1729 (ADVERTISED_Pause |
1730 ADVERTISED_Asym_Pause)));
1732 emac_force_link_update(dev);
1733 local_bh_enable();
1735 return 0;
1738 static void emac_ethtool_get_ringparam(struct net_device *ndev,
1739 struct ethtool_ringparam *rp)
1741 rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
1742 rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
1745 static void emac_ethtool_get_pauseparam(struct net_device *ndev,
1746 struct ethtool_pauseparam *pp)
1748 struct ocp_enet_private *dev = ndev->priv;
1750 local_bh_disable();
1751 if ((dev->phy.features & SUPPORTED_Autoneg) &&
1752 (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause)))
1753 pp->autoneg = 1;
1755 if (dev->phy.duplex == DUPLEX_FULL) {
1756 if (dev->phy.pause)
1757 pp->rx_pause = pp->tx_pause = 1;
1758 else if (dev->phy.asym_pause)
1759 pp->tx_pause = 1;
1761 local_bh_enable();
1764 static u32 emac_ethtool_get_rx_csum(struct net_device *ndev)
1766 struct ocp_enet_private *dev = ndev->priv;
1767 return dev->tah_dev != 0;
1770 static int emac_get_regs_len(struct ocp_enet_private *dev)
1772 return sizeof(struct emac_ethtool_regs_subhdr) + EMAC_ETHTOOL_REGS_SIZE;
1775 static int emac_ethtool_get_regs_len(struct net_device *ndev)
1777 struct ocp_enet_private *dev = ndev->priv;
1778 return sizeof(struct emac_ethtool_regs_hdr) +
1779 emac_get_regs_len(dev) + mal_get_regs_len(dev->mal) +
1780 zmii_get_regs_len(dev->zmii_dev) +
1781 rgmii_get_regs_len(dev->rgmii_dev) +
1782 tah_get_regs_len(dev->tah_dev);
1785 static void *emac_dump_regs(struct ocp_enet_private *dev, void *buf)
1787 struct emac_ethtool_regs_subhdr *hdr = buf;
1789 hdr->version = EMAC_ETHTOOL_REGS_VER;
1790 hdr->index = dev->def->index;
1791 memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE);
1792 return ((void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE);
1795 static void emac_ethtool_get_regs(struct net_device *ndev,
1796 struct ethtool_regs *regs, void *buf)
1798 struct ocp_enet_private *dev = ndev->priv;
1799 struct emac_ethtool_regs_hdr *hdr = buf;
1801 hdr->components = 0;
1802 buf = hdr + 1;
1804 local_irq_disable();
1805 buf = mal_dump_regs(dev->mal, buf);
1806 buf = emac_dump_regs(dev, buf);
1807 if (dev->zmii_dev) {
1808 hdr->components |= EMAC_ETHTOOL_REGS_ZMII;
1809 buf = zmii_dump_regs(dev->zmii_dev, buf);
1811 if (dev->rgmii_dev) {
1812 hdr->components |= EMAC_ETHTOOL_REGS_RGMII;
1813 buf = rgmii_dump_regs(dev->rgmii_dev, buf);
1815 if (dev->tah_dev) {
1816 hdr->components |= EMAC_ETHTOOL_REGS_TAH;
1817 buf = tah_dump_regs(dev->tah_dev, buf);
1819 local_irq_enable();
1822 static int emac_ethtool_nway_reset(struct net_device *ndev)
1824 struct ocp_enet_private *dev = ndev->priv;
1825 int res = 0;
1827 DBG("%d: nway_reset" NL, dev->def->index);
1829 if (dev->phy.address < 0)
1830 return -EOPNOTSUPP;
1832 local_bh_disable();
1833 if (!dev->phy.autoneg) {
1834 res = -EINVAL;
1835 goto out;
1838 dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising);
1839 emac_force_link_update(dev);
1841 out:
1842 local_bh_enable();
1843 return res;
1846 static int emac_ethtool_get_stats_count(struct net_device *ndev)
1848 return EMAC_ETHTOOL_STATS_COUNT;
1851 static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
1852 u8 * buf)
1854 if (stringset == ETH_SS_STATS)
1855 memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys));
1858 static void emac_ethtool_get_ethtool_stats(struct net_device *ndev,
1859 struct ethtool_stats *estats,
1860 u64 * tmp_stats)
1862 struct ocp_enet_private *dev = ndev->priv;
1863 local_irq_disable();
1864 memcpy(tmp_stats, &dev->stats, sizeof(dev->stats));
1865 tmp_stats += sizeof(dev->stats) / sizeof(u64);
1866 memcpy(tmp_stats, &dev->estats, sizeof(dev->estats));
1867 local_irq_enable();
1870 static void emac_ethtool_get_drvinfo(struct net_device *ndev,
1871 struct ethtool_drvinfo *info)
1873 struct ocp_enet_private *dev = ndev->priv;
1875 strcpy(info->driver, "ibm_emac");
1876 strcpy(info->version, DRV_VERSION);
1877 info->fw_version[0] = '\0';
1878 sprintf(info->bus_info, "PPC 4xx EMAC %d", dev->def->index);
1879 info->n_stats = emac_ethtool_get_stats_count(ndev);
1880 info->regdump_len = emac_ethtool_get_regs_len(ndev);
1883 static const struct ethtool_ops emac_ethtool_ops = {
1884 .get_settings = emac_ethtool_get_settings,
1885 .set_settings = emac_ethtool_set_settings,
1886 .get_drvinfo = emac_ethtool_get_drvinfo,
1888 .get_regs_len = emac_ethtool_get_regs_len,
1889 .get_regs = emac_ethtool_get_regs,
1891 .nway_reset = emac_ethtool_nway_reset,
1893 .get_ringparam = emac_ethtool_get_ringparam,
1894 .get_pauseparam = emac_ethtool_get_pauseparam,
1896 .get_rx_csum = emac_ethtool_get_rx_csum,
1898 .get_strings = emac_ethtool_get_strings,
1899 .get_stats_count = emac_ethtool_get_stats_count,
1900 .get_ethtool_stats = emac_ethtool_get_ethtool_stats,
1902 .get_link = ethtool_op_get_link,
1903 .get_tx_csum = ethtool_op_get_tx_csum,
1904 .get_sg = ethtool_op_get_sg,
1907 static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
1909 struct ocp_enet_private *dev = ndev->priv;
1910 uint16_t *data = (uint16_t *) & rq->ifr_ifru;
1912 DBG("%d: ioctl %08x" NL, dev->def->index, cmd);
1914 if (dev->phy.address < 0)
1915 return -EOPNOTSUPP;
1917 switch (cmd) {
1918 case SIOCGMIIPHY:
1919 case SIOCDEVPRIVATE:
1920 data[0] = dev->phy.address;
1921 /* Fall through */
1922 case SIOCGMIIREG:
1923 case SIOCDEVPRIVATE + 1:
1924 data[3] = emac_mdio_read(ndev, dev->phy.address, data[1]);
1925 return 0;
1927 case SIOCSMIIREG:
1928 case SIOCDEVPRIVATE + 2:
1929 if (!capable(CAP_NET_ADMIN))
1930 return -EPERM;
1931 emac_mdio_write(ndev, dev->phy.address, data[1], data[2]);
1932 return 0;
1933 default:
1934 return -EOPNOTSUPP;
1938 static int __init emac_probe(struct ocp_device *ocpdev)
1940 struct ocp_func_emac_data *emacdata = ocpdev->def->additions;
1941 struct net_device *ndev;
1942 struct ocp_device *maldev;
1943 struct ocp_enet_private *dev;
1944 int err, i;
1946 DBG("%d: probe" NL, ocpdev->def->index);
1948 if (!emacdata) {
1949 printk(KERN_ERR "emac%d: Missing additional data!\n",
1950 ocpdev->def->index);
1951 return -ENODEV;
1954 /* Allocate our net_device structure */
1955 ndev = alloc_etherdev(sizeof(struct ocp_enet_private));
1956 if (!ndev) {
1957 printk(KERN_ERR "emac%d: could not allocate ethernet device!\n",
1958 ocpdev->def->index);
1959 return -ENOMEM;
1961 dev = ndev->priv;
1962 dev->ndev = ndev;
1963 dev->ldev = &ocpdev->dev;
1964 dev->def = ocpdev->def;
1965 SET_MODULE_OWNER(ndev);
1967 /* Find MAL device we are connected to */
1968 maldev =
1969 ocp_find_device(OCP_VENDOR_IBM, OCP_FUNC_MAL, emacdata->mal_idx);
1970 if (!maldev) {
1971 printk(KERN_ERR "emac%d: unknown mal%d device!\n",
1972 dev->def->index, emacdata->mal_idx);
1973 err = -ENODEV;
1974 goto out;
1976 dev->mal = ocp_get_drvdata(maldev);
1977 if (!dev->mal) {
1978 printk(KERN_ERR "emac%d: mal%d hasn't been initialized yet!\n",
1979 dev->def->index, emacdata->mal_idx);
1980 err = -ENODEV;
1981 goto out;
1984 /* Register with MAL */
1985 dev->commac.ops = &emac_commac_ops;
1986 dev->commac.dev = dev;
1987 dev->commac.tx_chan_mask = MAL_CHAN_MASK(emacdata->mal_tx_chan);
1988 dev->commac.rx_chan_mask = MAL_CHAN_MASK(emacdata->mal_rx_chan);
1989 err = mal_register_commac(dev->mal, &dev->commac);
1990 if (err) {
1991 printk(KERN_ERR "emac%d: failed to register with mal%d!\n",
1992 dev->def->index, emacdata->mal_idx);
1993 goto out;
1995 dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
1996 dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
1998 /* Get pointers to BD rings */
1999 dev->tx_desc =
2000 dev->mal->bd_virt + mal_tx_bd_offset(dev->mal,
2001 emacdata->mal_tx_chan);
2002 dev->rx_desc =
2003 dev->mal->bd_virt + mal_rx_bd_offset(dev->mal,
2004 emacdata->mal_rx_chan);
2006 DBG("%d: tx_desc %p" NL, ocpdev->def->index, dev->tx_desc);
2007 DBG("%d: rx_desc %p" NL, ocpdev->def->index, dev->rx_desc);
2009 /* Clean rings */
2010 memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
2011 memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
2013 /* If we depend on another EMAC for MDIO, check whether it was probed already */
2014 if (emacdata->mdio_idx >= 0 && emacdata->mdio_idx != ocpdev->def->index) {
2015 struct ocp_device *mdiodev =
2016 ocp_find_device(OCP_VENDOR_IBM, OCP_FUNC_EMAC,
2017 emacdata->mdio_idx);
2018 if (!mdiodev) {
2019 printk(KERN_ERR "emac%d: unknown emac%d device!\n",
2020 dev->def->index, emacdata->mdio_idx);
2021 err = -ENODEV;
2022 goto out2;
2024 dev->mdio_dev = ocp_get_drvdata(mdiodev);
2025 if (!dev->mdio_dev) {
2026 printk(KERN_ERR
2027 "emac%d: emac%d hasn't been initialized yet!\n",
2028 dev->def->index, emacdata->mdio_idx);
2029 err = -ENODEV;
2030 goto out2;
2034 /* Attach to ZMII, if needed */
2035 if ((err = zmii_attach(dev)) != 0)
2036 goto out2;
2038 /* Attach to RGMII, if needed */
2039 if ((err = rgmii_attach(dev)) != 0)
2040 goto out3;
2042 /* Attach to TAH, if needed */
2043 if ((err = tah_attach(dev)) != 0)
2044 goto out4;
2046 /* Map EMAC regs */
2047 dev->emacp = ioremap(dev->def->paddr, sizeof(struct emac_regs));
2048 if (!dev->emacp) {
2049 printk(KERN_ERR "emac%d: could not ioremap device registers!\n",
2050 dev->def->index);
2051 err = -ENOMEM;
2052 goto out5;
2055 /* Fill in MAC address */
2056 for (i = 0; i < 6; ++i)
2057 ndev->dev_addr[i] = emacdata->mac_addr[i];
2059 /* Set some link defaults before we can find out real parameters */
2060 dev->phy.speed = SPEED_100;
2061 dev->phy.duplex = DUPLEX_FULL;
2062 dev->phy.autoneg = AUTONEG_DISABLE;
2063 dev->phy.pause = dev->phy.asym_pause = 0;
2064 dev->stop_timeout = STOP_TIMEOUT_100;
2065 init_timer(&dev->link_timer);
2066 dev->link_timer.function = emac_link_timer;
2067 dev->link_timer.data = (unsigned long)dev;
2069 /* Find PHY if any */
2070 dev->phy.dev = ndev;
2071 dev->phy.mode = emacdata->phy_mode;
2072 if (emacdata->phy_map != 0xffffffff) {
2073 u32 phy_map = emacdata->phy_map | busy_phy_map;
2074 u32 adv;
2076 DBG("%d: PHY maps %08x %08x" NL, dev->def->index,
2077 emacdata->phy_map, busy_phy_map);
2079 EMAC_RX_CLK_TX(dev->def->index);
2081 dev->phy.mdio_read = emac_mdio_read;
2082 dev->phy.mdio_write = emac_mdio_write;
2084 /* Configure EMAC with defaults so we can at least use MDIO
2085 * This is needed mostly for 440GX
2087 if (emac_phy_gpcs(dev->phy.mode)) {
2088 /* XXX
2089 * Make GPCS PHY address equal to EMAC index.
2090 * We probably should take into account busy_phy_map
2091 * and/or phy_map here.
2093 dev->phy.address = dev->def->index;
2096 emac_configure(dev);
2098 for (i = 0; i < 0x20; phy_map >>= 1, ++i)
2099 if (!(phy_map & 1)) {
2100 int r;
2101 busy_phy_map |= 1 << i;
2103 /* Quick check if there is a PHY at the address */
2104 r = emac_mdio_read(dev->ndev, i, MII_BMCR);
2105 if (r == 0xffff || r < 0)
2106 continue;
2107 if (!mii_phy_probe(&dev->phy, i))
2108 break;
2110 if (i == 0x20) {
2111 printk(KERN_WARNING "emac%d: can't find PHY!\n",
2112 dev->def->index);
2113 goto out6;
2116 /* Init PHY */
2117 if (dev->phy.def->ops->init)
2118 dev->phy.def->ops->init(&dev->phy);
2120 /* Disable any PHY features not supported by the platform */
2121 dev->phy.def->features &= ~emacdata->phy_feat_exc;
2123 /* Setup initial link parameters */
2124 if (dev->phy.features & SUPPORTED_Autoneg) {
2125 adv = dev->phy.features;
2126 #if !defined(CONFIG_40x)
2127 adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
2128 #endif
2129 /* Restart autonegotiation */
2130 dev->phy.def->ops->setup_aneg(&dev->phy, adv);
2131 } else {
2132 u32 f = dev->phy.def->features;
2133 int speed = SPEED_10, fd = DUPLEX_HALF;
2135 /* Select highest supported speed/duplex */
2136 if (f & SUPPORTED_1000baseT_Full) {
2137 speed = SPEED_1000;
2138 fd = DUPLEX_FULL;
2139 } else if (f & SUPPORTED_1000baseT_Half)
2140 speed = SPEED_1000;
2141 else if (f & SUPPORTED_100baseT_Full) {
2142 speed = SPEED_100;
2143 fd = DUPLEX_FULL;
2144 } else if (f & SUPPORTED_100baseT_Half)
2145 speed = SPEED_100;
2146 else if (f & SUPPORTED_10baseT_Full)
2147 fd = DUPLEX_FULL;
2149 /* Force link parameters */
2150 dev->phy.def->ops->setup_forced(&dev->phy, speed, fd);
2152 } else {
2153 emac_reset(dev);
2155 /* PHY-less configuration.
2156 * XXX I probably should move these settings to emacdata
2158 dev->phy.address = -1;
2159 dev->phy.features = SUPPORTED_100baseT_Full | SUPPORTED_MII;
2160 dev->phy.pause = 1;
2163 /* Fill in the driver function table */
2164 ndev->open = &emac_open;
2165 if (dev->tah_dev) {
2166 ndev->hard_start_xmit = &emac_start_xmit_sg;
2167 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
2168 } else
2169 ndev->hard_start_xmit = &emac_start_xmit;
2170 ndev->tx_timeout = &emac_full_tx_reset;
2171 ndev->watchdog_timeo = 5 * HZ;
2172 ndev->stop = &emac_close;
2173 ndev->get_stats = &emac_stats;
2174 ndev->set_multicast_list = &emac_set_multicast_list;
2175 ndev->do_ioctl = &emac_ioctl;
2176 if (emac_phy_supports_gige(emacdata->phy_mode)) {
2177 ndev->change_mtu = &emac_change_mtu;
2178 dev->commac.ops = &emac_commac_sg_ops;
2180 SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
2182 netif_carrier_off(ndev);
2183 netif_stop_queue(ndev);
2185 err = register_netdev(ndev);
2186 if (err) {
2187 printk(KERN_ERR "emac%d: failed to register net device (%d)!\n",
2188 dev->def->index, err);
2189 goto out6;
2192 ocp_set_drvdata(ocpdev, dev);
2194 printk("%s: emac%d, MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
2195 ndev->name, dev->def->index,
2196 ndev->dev_addr[0], ndev->dev_addr[1], ndev->dev_addr[2],
2197 ndev->dev_addr[3], ndev->dev_addr[4], ndev->dev_addr[5]);
2199 if (dev->phy.address >= 0)
2200 printk("%s: found %s PHY (0x%02x)\n", ndev->name,
2201 dev->phy.def->name, dev->phy.address);
2203 emac_dbg_register(dev->def->index, dev);
2205 return 0;
2206 out6:
2207 iounmap(dev->emacp);
2208 out5:
2209 tah_fini(dev->tah_dev);
2210 out4:
2211 rgmii_fini(dev->rgmii_dev, dev->rgmii_input);
2212 out3:
2213 zmii_fini(dev->zmii_dev, dev->zmii_input);
2214 out2:
2215 mal_unregister_commac(dev->mal, &dev->commac);
2216 out:
2217 kfree(ndev);
2218 return err;
2221 static struct ocp_device_id emac_ids[] = {
2222 { .vendor = OCP_VENDOR_IBM, .function = OCP_FUNC_EMAC },
2223 { .vendor = OCP_VENDOR_INVALID}
2226 static struct ocp_driver emac_driver = {
2227 .name = "emac",
2228 .id_table = emac_ids,
2229 .probe = emac_probe,
2230 .remove = emac_remove,
2233 static int __init emac_init(void)
2235 printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
2237 DBG(": init" NL);
2239 if (mal_init())
2240 return -ENODEV;
2242 EMAC_CLK_INTERNAL;
2243 if (ocp_register_driver(&emac_driver)) {
2244 EMAC_CLK_EXTERNAL;
2245 ocp_unregister_driver(&emac_driver);
2246 mal_exit();
2247 return -ENODEV;
2249 EMAC_CLK_EXTERNAL;
2251 emac_init_debug();
2252 return 0;
2255 static void __exit emac_exit(void)
2257 DBG(": exit" NL);
2258 ocp_unregister_driver(&emac_driver);
2259 mal_exit();
2260 emac_fini_debug();
2263 module_init(emac_init);
2264 module_exit(emac_exit);