2 * drivers/net/ibm_newemac/core.c
4 * Driver for PowerPC 4xx on-chip ethernet controller.
6 * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
7 * <benh@kernel.crashing.org>
9 * Based on the arch/ppc version of the driver:
11 * Copyright (c) 2004, 2005 Zultys Technologies.
12 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
14 * Based on original work by
15 * Matt Porter <mporter@kernel.crashing.org>
16 * (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
17 * Armin Kuster <akuster@mvista.com>
18 * Johnnie Peters <jpeters@mvista.com>
20 * This program is free software; you can redistribute it and/or modify it
21 * under the terms of the GNU General Public License as published by the
22 * Free Software Foundation; either version 2 of the License, or (at your
23 * option) any later version.
27 #include <linux/sched.h>
28 #include <linux/string.h>
29 #include <linux/errno.h>
30 #include <linux/delay.h>
31 #include <linux/types.h>
32 #include <linux/pci.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/crc32.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #include <linux/bitops.h>
39 #include <linux/workqueue.h>
42 #include <asm/processor.h>
45 #include <asm/uaccess.h>
47 #include <asm/dcr-regs.h>
52 * Lack of dma_unmap_???? calls is intentional.
54 * API-correct usage requires additional support state information to be
55 * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
56 * EMAC design (e.g. TX buffer passed from network stack can be split into
57 * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
58 * maintaining such information will add additional overhead.
59 * Current DMA API implementation for 4xx processors only ensures cache coherency
60 * and dma_unmap_???? routines are empty and are likely to stay this way.
61 * I decided to omit dma_unmap_??? calls because I don't want to add additional
62 * complexity just for the sake of following some abstract API, when it doesn't
63 * add any real benefit to the driver. I understand that this decision maybe
64 * controversial, but I really tried to make code API-correct and efficient
65 * at the same time and didn't come up with code I liked :(. --ebs
68 #define DRV_NAME "emac"
69 #define DRV_VERSION "3.54"
70 #define DRV_DESC "PPC 4xx OCP EMAC driver"
72 MODULE_DESCRIPTION(DRV_DESC
);
74 ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
75 MODULE_LICENSE("GPL");
78 * PPC64 doesn't (yet) have a cacheable_memcpy
81 #define cacheable_memcpy(d,s,n) memcpy((d),(s),(n))
84 /* minimum number of free TX descriptors required to wake up TX process */
85 #define EMAC_TX_WAKEUP_THRESH (NUM_TX_BUFF / 4)
87 /* If packet size is less than this number, we allocate small skb and copy packet
88 * contents into it instead of just sending original big skb up
90 #define EMAC_RX_COPY_THRESH CONFIG_IBM_NEW_EMAC_RX_COPY_THRESHOLD
92 /* Since multiple EMACs share MDIO lines in various ways, we need
93 * to avoid re-using the same PHY ID in cases where the arch didn't
94 * setup precise phy_map entries
96 * XXX This is something that needs to be reworked as we can have multiple
97 * EMAC "sets" (multiple ASICs containing several EMACs) though we can
98 * probably require in that case to have explicit PHY IDs in the device-tree
100 static u32 busy_phy_map
;
101 static DEFINE_MUTEX(emac_phy_map_lock
);
103 /* This is the wait queue used to wait on any event related to probe, that
104 * is discovery of MALs, other EMACs, ZMII/RGMIIs, etc...
106 static DECLARE_WAIT_QUEUE_HEAD(emac_probe_wait
);
108 /* Having stable interface names is a doomed idea. However, it would be nice
109 * if we didn't have completely random interface names at boot too :-) It's
110 * just a matter of making everybody's life easier. Since we are doing
111 * threaded probing, it's a bit harder though. The base idea here is that
112 * we make up a list of all emacs in the device-tree before we register the
113 * driver. Every emac will then wait for the previous one in the list to
114 * initialize before itself. We should also keep that list ordered by
116 * That list is only 4 entries long, meaning that additional EMACs don't
117 * get ordering guarantees unless EMAC_BOOT_LIST_SIZE is increased.
120 #define EMAC_BOOT_LIST_SIZE 4
121 static struct device_node
*emac_boot_list
[EMAC_BOOT_LIST_SIZE
];
123 /* How long should I wait for dependent devices ? */
124 #define EMAC_PROBE_DEP_TIMEOUT (HZ * 5)
126 /* I don't want to litter system log with timeout errors
127 * when we have brain-damaged PHY.
129 static inline void emac_report_timeout_error(struct emac_instance
*dev
,
133 printk(KERN_ERR
"%s: %s\n", dev
->ndev
->name
, error
);
136 /* PHY polling intervals */
137 #define PHY_POLL_LINK_ON HZ
138 #define PHY_POLL_LINK_OFF (HZ / 5)
140 /* Graceful stop timeouts in us.
141 * We should allow up to 1 frame time (full-duplex, ignoring collisions)
143 #define STOP_TIMEOUT_10 1230
144 #define STOP_TIMEOUT_100 124
145 #define STOP_TIMEOUT_1000 13
146 #define STOP_TIMEOUT_1000_JUMBO 73
148 static unsigned char default_mcast_addr
[] = {
149 0x01, 0x80, 0xC2, 0x00, 0x00, 0x01
152 /* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
153 static const char emac_stats_keys
[EMAC_ETHTOOL_STATS_COUNT
][ETH_GSTRING_LEN
] = {
154 "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
155 "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
156 "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
157 "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
158 "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
159 "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
160 "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
161 "rx_bad_packet", "rx_runt_packet", "rx_short_event",
162 "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
163 "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
164 "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
165 "tx_bd_excessive_collisions", "tx_bd_late_collision",
166 "tx_bd_multple_collisions", "tx_bd_single_collision",
167 "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
171 static irqreturn_t
emac_irq(int irq
, void *dev_instance
);
172 static void emac_clean_tx_ring(struct emac_instance
*dev
);
173 static void __emac_set_multicast_list(struct emac_instance
*dev
);
175 static inline int emac_phy_supports_gige(int phy_mode
)
177 return phy_mode
== PHY_MODE_GMII
||
178 phy_mode
== PHY_MODE_RGMII
||
179 phy_mode
== PHY_MODE_TBI
||
180 phy_mode
== PHY_MODE_RTBI
;
183 static inline int emac_phy_gpcs(int phy_mode
)
185 return phy_mode
== PHY_MODE_TBI
||
186 phy_mode
== PHY_MODE_RTBI
;
189 static inline void emac_tx_enable(struct emac_instance
*dev
)
191 struct emac_regs __iomem
*p
= dev
->emacp
;
194 DBG(dev
, "tx_enable" NL
);
196 r
= in_be32(&p
->mr0
);
197 if (!(r
& EMAC_MR0_TXE
))
198 out_be32(&p
->mr0
, r
| EMAC_MR0_TXE
);
201 static void emac_tx_disable(struct emac_instance
*dev
)
203 struct emac_regs __iomem
*p
= dev
->emacp
;
206 DBG(dev
, "tx_disable" NL
);
208 r
= in_be32(&p
->mr0
);
209 if (r
& EMAC_MR0_TXE
) {
210 int n
= dev
->stop_timeout
;
211 out_be32(&p
->mr0
, r
& ~EMAC_MR0_TXE
);
212 while (!(in_be32(&p
->mr0
) & EMAC_MR0_TXI
) && n
) {
217 emac_report_timeout_error(dev
, "TX disable timeout");
221 static void emac_rx_enable(struct emac_instance
*dev
)
223 struct emac_regs __iomem
*p
= dev
->emacp
;
226 if (unlikely(test_bit(MAL_COMMAC_RX_STOPPED
, &dev
->commac
.flags
)))
229 DBG(dev
, "rx_enable" NL
);
231 r
= in_be32(&p
->mr0
);
232 if (!(r
& EMAC_MR0_RXE
)) {
233 if (unlikely(!(r
& EMAC_MR0_RXI
))) {
234 /* Wait if previous async disable is still in progress */
235 int n
= dev
->stop_timeout
;
236 while (!(r
= in_be32(&p
->mr0
) & EMAC_MR0_RXI
) && n
) {
241 emac_report_timeout_error(dev
,
242 "RX disable timeout");
244 out_be32(&p
->mr0
, r
| EMAC_MR0_RXE
);
250 static void emac_rx_disable(struct emac_instance
*dev
)
252 struct emac_regs __iomem
*p
= dev
->emacp
;
255 DBG(dev
, "rx_disable" NL
);
257 r
= in_be32(&p
->mr0
);
258 if (r
& EMAC_MR0_RXE
) {
259 int n
= dev
->stop_timeout
;
260 out_be32(&p
->mr0
, r
& ~EMAC_MR0_RXE
);
261 while (!(in_be32(&p
->mr0
) & EMAC_MR0_RXI
) && n
) {
266 emac_report_timeout_error(dev
, "RX disable timeout");
270 static inline void emac_netif_stop(struct emac_instance
*dev
)
272 netif_tx_lock_bh(dev
->ndev
);
274 netif_tx_unlock_bh(dev
->ndev
);
275 dev
->ndev
->trans_start
= jiffies
; /* prevent tx timeout */
276 mal_poll_disable(dev
->mal
, &dev
->commac
);
277 netif_tx_disable(dev
->ndev
);
280 static inline void emac_netif_start(struct emac_instance
*dev
)
282 netif_tx_lock_bh(dev
->ndev
);
284 if (dev
->mcast_pending
&& netif_running(dev
->ndev
))
285 __emac_set_multicast_list(dev
);
286 netif_tx_unlock_bh(dev
->ndev
);
288 netif_wake_queue(dev
->ndev
);
290 /* NOTE: unconditional netif_wake_queue is only appropriate
291 * so long as all callers are assured to have free tx slots
292 * (taken from tg3... though the case where that is wrong is
293 * not terribly harmful)
295 mal_poll_enable(dev
->mal
, &dev
->commac
);
298 static inline void emac_rx_disable_async(struct emac_instance
*dev
)
300 struct emac_regs __iomem
*p
= dev
->emacp
;
303 DBG(dev
, "rx_disable_async" NL
);
305 r
= in_be32(&p
->mr0
);
306 if (r
& EMAC_MR0_RXE
)
307 out_be32(&p
->mr0
, r
& ~EMAC_MR0_RXE
);
310 static int emac_reset(struct emac_instance
*dev
)
312 struct emac_regs __iomem
*p
= dev
->emacp
;
315 DBG(dev
, "reset" NL
);
317 if (!dev
->reset_failed
) {
318 /* 40x erratum suggests stopping RX channel before reset,
321 emac_rx_disable(dev
);
322 emac_tx_disable(dev
);
325 out_be32(&p
->mr0
, EMAC_MR0_SRST
);
326 while ((in_be32(&p
->mr0
) & EMAC_MR0_SRST
) && n
)
330 dev
->reset_failed
= 0;
333 emac_report_timeout_error(dev
, "reset timeout");
334 dev
->reset_failed
= 1;
339 static void emac_hash_mc(struct emac_instance
*dev
)
341 struct emac_regs __iomem
*p
= dev
->emacp
;
343 struct dev_mc_list
*dmi
;
345 DBG(dev
, "hash_mc %d" NL
, dev
->ndev
->mc_count
);
347 for (dmi
= dev
->ndev
->mc_list
; dmi
; dmi
= dmi
->next
) {
349 DBG2(dev
, "mc %02x:%02x:%02x:%02x:%02x:%02x" NL
,
350 dmi
->dmi_addr
[0], dmi
->dmi_addr
[1], dmi
->dmi_addr
[2],
351 dmi
->dmi_addr
[3], dmi
->dmi_addr
[4], dmi
->dmi_addr
[5]);
353 bit
= 63 - (ether_crc(ETH_ALEN
, dmi
->dmi_addr
) >> 26);
354 gaht
[bit
>> 4] |= 0x8000 >> (bit
& 0x0f);
356 out_be32(&p
->gaht1
, gaht
[0]);
357 out_be32(&p
->gaht2
, gaht
[1]);
358 out_be32(&p
->gaht3
, gaht
[2]);
359 out_be32(&p
->gaht4
, gaht
[3]);
362 static inline u32
emac_iff2rmr(struct net_device
*ndev
)
364 struct emac_instance
*dev
= netdev_priv(ndev
);
367 r
= EMAC_RMR_SP
| EMAC_RMR_SFCS
| EMAC_RMR_IAE
| EMAC_RMR_BAE
;
369 if (emac_has_feature(dev
, EMAC_FTR_EMAC4
))
374 if (ndev
->flags
& IFF_PROMISC
)
376 else if (ndev
->flags
& IFF_ALLMULTI
|| ndev
->mc_count
> 32)
378 else if (ndev
->mc_count
> 0)
384 static u32
__emac_calc_base_mr1(struct emac_instance
*dev
, int tx_size
, int rx_size
)
386 u32 ret
= EMAC_MR1_VLE
| EMAC_MR1_IST
| EMAC_MR1_TR0_MULT
;
388 DBG2(dev
, "__emac_calc_base_mr1" NL
);
392 ret
|= EMAC_MR1_TFS_2K
;
395 printk(KERN_WARNING
"%s: Unknown Rx FIFO size %d\n",
396 dev
->ndev
->name
, tx_size
);
401 ret
|= EMAC_MR1_RFS_16K
;
404 ret
|= EMAC_MR1_RFS_4K
;
407 printk(KERN_WARNING
"%s: Unknown Rx FIFO size %d\n",
408 dev
->ndev
->name
, rx_size
);
414 static u32
__emac4_calc_base_mr1(struct emac_instance
*dev
, int tx_size
, int rx_size
)
416 u32 ret
= EMAC_MR1_VLE
| EMAC_MR1_IST
| EMAC4_MR1_TR
|
417 EMAC4_MR1_OBCI(dev
->opb_bus_freq
/ 1000000);
419 DBG2(dev
, "__emac4_calc_base_mr1" NL
);
423 ret
|= EMAC4_MR1_TFS_4K
;
426 ret
|= EMAC4_MR1_TFS_2K
;
429 printk(KERN_WARNING
"%s: Unknown Rx FIFO size %d\n",
430 dev
->ndev
->name
, tx_size
);
435 ret
|= EMAC4_MR1_RFS_16K
;
438 ret
|= EMAC4_MR1_RFS_4K
;
441 ret
|= EMAC4_MR1_RFS_2K
;
444 printk(KERN_WARNING
"%s: Unknown Rx FIFO size %d\n",
445 dev
->ndev
->name
, rx_size
);
451 static u32
emac_calc_base_mr1(struct emac_instance
*dev
, int tx_size
, int rx_size
)
453 return emac_has_feature(dev
, EMAC_FTR_EMAC4
) ?
454 __emac4_calc_base_mr1(dev
, tx_size
, rx_size
) :
455 __emac_calc_base_mr1(dev
, tx_size
, rx_size
);
458 static inline u32
emac_calc_trtr(struct emac_instance
*dev
, unsigned int size
)
460 if (emac_has_feature(dev
, EMAC_FTR_EMAC4
))
461 return ((size
>> 6) - 1) << EMAC_TRTR_SHIFT_EMAC4
;
463 return ((size
>> 6) - 1) << EMAC_TRTR_SHIFT
;
466 static inline u32
emac_calc_rwmr(struct emac_instance
*dev
,
467 unsigned int low
, unsigned int high
)
469 if (emac_has_feature(dev
, EMAC_FTR_EMAC4
))
470 return (low
<< 22) | ( (high
& 0x3ff) << 6);
472 return (low
<< 23) | ( (high
& 0x1ff) << 7);
475 static int emac_configure(struct emac_instance
*dev
)
477 struct emac_regs __iomem
*p
= dev
->emacp
;
478 struct net_device
*ndev
= dev
->ndev
;
479 int tx_size
, rx_size
, link
= netif_carrier_ok(dev
->ndev
);
482 DBG(dev
, "configure" NL
);
485 out_be32(&p
->mr1
, in_be32(&p
->mr1
)
486 | EMAC_MR1_FDE
| EMAC_MR1_ILE
);
488 } else if (emac_reset(dev
) < 0)
491 if (emac_has_feature(dev
, EMAC_FTR_HAS_TAH
))
492 tah_reset(dev
->tah_dev
);
494 DBG(dev
, " link = %d duplex = %d, pause = %d, asym_pause = %d\n",
495 link
, dev
->phy
.duplex
, dev
->phy
.pause
, dev
->phy
.asym_pause
);
497 /* Default fifo sizes */
498 tx_size
= dev
->tx_fifo_size
;
499 rx_size
= dev
->rx_fifo_size
;
501 /* No link, force loopback */
503 mr1
= EMAC_MR1_FDE
| EMAC_MR1_ILE
;
505 /* Check for full duplex */
506 else if (dev
->phy
.duplex
== DUPLEX_FULL
)
507 mr1
|= EMAC_MR1_FDE
| EMAC_MR1_MWSW_001
;
509 /* Adjust fifo sizes, mr1 and timeouts based on link speed */
510 dev
->stop_timeout
= STOP_TIMEOUT_10
;
511 switch (dev
->phy
.speed
) {
513 if (emac_phy_gpcs(dev
->phy
.mode
)) {
514 mr1
|= EMAC_MR1_MF_1000GPCS
|
515 EMAC_MR1_MF_IPPA(dev
->phy
.address
);
517 /* Put some arbitrary OUI, Manuf & Rev IDs so we can
518 * identify this GPCS PHY later.
520 out_be32(&p
->ipcr
, 0xdeadbeef);
522 mr1
|= EMAC_MR1_MF_1000
;
524 /* Extended fifo sizes */
525 tx_size
= dev
->tx_fifo_size_gige
;
526 rx_size
= dev
->rx_fifo_size_gige
;
528 if (dev
->ndev
->mtu
> ETH_DATA_LEN
) {
529 if (emac_has_feature(dev
, EMAC_FTR_EMAC4
))
530 mr1
|= EMAC4_MR1_JPSM
;
532 mr1
|= EMAC_MR1_JPSM
;
533 dev
->stop_timeout
= STOP_TIMEOUT_1000_JUMBO
;
535 dev
->stop_timeout
= STOP_TIMEOUT_1000
;
538 mr1
|= EMAC_MR1_MF_100
;
539 dev
->stop_timeout
= STOP_TIMEOUT_100
;
541 default: /* make gcc happy */
545 if (emac_has_feature(dev
, EMAC_FTR_HAS_RGMII
))
546 rgmii_set_speed(dev
->rgmii_dev
, dev
->rgmii_port
,
548 if (emac_has_feature(dev
, EMAC_FTR_HAS_ZMII
))
549 zmii_set_speed(dev
->zmii_dev
, dev
->zmii_port
, dev
->phy
.speed
);
551 /* on 40x erratum forces us to NOT use integrated flow control,
552 * let's hope it works on 44x ;)
554 if (!emac_has_feature(dev
, EMAC_FTR_NO_FLOW_CONTROL_40x
) &&
555 dev
->phy
.duplex
== DUPLEX_FULL
) {
557 mr1
|= EMAC_MR1_EIFC
| EMAC_MR1_APP
;
558 else if (dev
->phy
.asym_pause
)
562 /* Add base settings & fifo sizes & program MR1 */
563 mr1
|= emac_calc_base_mr1(dev
, tx_size
, rx_size
);
564 out_be32(&p
->mr1
, mr1
);
566 /* Set individual MAC address */
567 out_be32(&p
->iahr
, (ndev
->dev_addr
[0] << 8) | ndev
->dev_addr
[1]);
568 out_be32(&p
->ialr
, (ndev
->dev_addr
[2] << 24) |
569 (ndev
->dev_addr
[3] << 16) | (ndev
->dev_addr
[4] << 8) |
572 /* VLAN Tag Protocol ID */
573 out_be32(&p
->vtpid
, 0x8100);
575 /* Receive mode register */
576 r
= emac_iff2rmr(ndev
);
577 if (r
& EMAC_RMR_MAE
)
579 out_be32(&p
->rmr
, r
);
581 /* FIFOs thresholds */
582 if (emac_has_feature(dev
, EMAC_FTR_EMAC4
))
583 r
= EMAC4_TMR1((dev
->mal_burst_size
/ dev
->fifo_entry_size
) + 1,
584 tx_size
/ 2 / dev
->fifo_entry_size
);
586 r
= EMAC_TMR1((dev
->mal_burst_size
/ dev
->fifo_entry_size
) + 1,
587 tx_size
/ 2 / dev
->fifo_entry_size
);
588 out_be32(&p
->tmr1
, r
);
589 out_be32(&p
->trtr
, emac_calc_trtr(dev
, tx_size
/ 2));
591 /* PAUSE frame is sent when RX FIFO reaches its high-water mark,
592 there should be still enough space in FIFO to allow the our link
593 partner time to process this frame and also time to send PAUSE
596 Here is the worst case scenario for the RX FIFO "headroom"
597 (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
599 1) One maximum-length frame on TX 1522 bytes
600 2) One PAUSE frame time 64 bytes
601 3) PAUSE frame decode time allowance 64 bytes
602 4) One maximum-length frame on RX 1522 bytes
603 5) Round-trip propagation delay of the link (100Mb) 15 bytes
607 I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
608 low-water mark to RX_FIFO_SIZE / 8 (512 bytes)
610 r
= emac_calc_rwmr(dev
, rx_size
/ 8 / dev
->fifo_entry_size
,
611 rx_size
/ 4 / dev
->fifo_entry_size
);
612 out_be32(&p
->rwmr
, r
);
614 /* Set PAUSE timer to the maximum */
615 out_be32(&p
->ptr
, 0xffff);
618 r
= EMAC_ISR_OVR
| EMAC_ISR_BP
| EMAC_ISR_SE
|
619 EMAC_ISR_ALE
| EMAC_ISR_BFCS
| EMAC_ISR_PTLE
| EMAC_ISR_ORE
|
620 EMAC_ISR_IRE
| EMAC_ISR_TE
;
621 if (emac_has_feature(dev
, EMAC_FTR_EMAC4
))
622 r
|= EMAC4_ISR_TXPE
| EMAC4_ISR_RXPE
/* | EMAC4_ISR_TXUE |
624 out_be32(&p
->iser
, r
);
626 /* We need to take GPCS PHY out of isolate mode after EMAC reset */
627 if (emac_phy_gpcs(dev
->phy
.mode
))
628 emac_mii_reset_phy(&dev
->phy
);
630 /* Required for Pause packet support in EMAC */
631 dev_mc_add(ndev
, default_mcast_addr
, sizeof(default_mcast_addr
), 1);
636 static void emac_reinitialize(struct emac_instance
*dev
)
638 DBG(dev
, "reinitialize" NL
);
640 emac_netif_stop(dev
);
641 if (!emac_configure(dev
)) {
645 emac_netif_start(dev
);
648 static void emac_full_tx_reset(struct emac_instance
*dev
)
650 DBG(dev
, "full_tx_reset" NL
);
652 emac_tx_disable(dev
);
653 mal_disable_tx_channel(dev
->mal
, dev
->mal_tx_chan
);
654 emac_clean_tx_ring(dev
);
655 dev
->tx_cnt
= dev
->tx_slot
= dev
->ack_slot
= 0;
659 mal_enable_tx_channel(dev
->mal
, dev
->mal_tx_chan
);
664 static void emac_reset_work(struct work_struct
*work
)
666 struct emac_instance
*dev
= container_of(work
, struct emac_instance
, reset_work
);
668 DBG(dev
, "reset_work" NL
);
670 mutex_lock(&dev
->link_lock
);
672 emac_netif_stop(dev
);
673 emac_full_tx_reset(dev
);
674 emac_netif_start(dev
);
676 mutex_unlock(&dev
->link_lock
);
679 static void emac_tx_timeout(struct net_device
*ndev
)
681 struct emac_instance
*dev
= netdev_priv(ndev
);
683 DBG(dev
, "tx_timeout" NL
);
685 schedule_work(&dev
->reset_work
);
689 static inline int emac_phy_done(struct emac_instance
*dev
, u32 stacr
)
691 int done
= !!(stacr
& EMAC_STACR_OC
);
693 if (emac_has_feature(dev
, EMAC_FTR_STACR_OC_INVERT
))
699 static int __emac_mdio_read(struct emac_instance
*dev
, u8 id
, u8 reg
)
701 struct emac_regs __iomem
*p
= dev
->emacp
;
703 int n
, err
= -ETIMEDOUT
;
705 mutex_lock(&dev
->mdio_lock
);
707 DBG2(dev
, "mdio_read(%02x,%02x)" NL
, id
, reg
);
709 /* Enable proper MDIO port */
710 if (emac_has_feature(dev
, EMAC_FTR_HAS_ZMII
))
711 zmii_get_mdio(dev
->zmii_dev
, dev
->zmii_port
);
712 if (emac_has_feature(dev
, EMAC_FTR_HAS_RGMII
))
713 rgmii_get_mdio(dev
->rgmii_dev
, dev
->rgmii_port
);
715 /* Wait for management interface to become idle */
717 while (!emac_phy_done(dev
, in_be32(&p
->stacr
))) {
720 DBG2(dev
, " -> timeout wait idle\n");
725 /* Issue read command */
726 if (emac_has_feature(dev
, EMAC_FTR_EMAC4
))
727 r
= EMAC4_STACR_BASE(dev
->opb_bus_freq
);
729 r
= EMAC_STACR_BASE(dev
->opb_bus_freq
);
730 if (emac_has_feature(dev
, EMAC_FTR_STACR_OC_INVERT
))
732 if (emac_has_feature(dev
, EMAC_FTR_HAS_NEW_STACR
))
733 r
|= EMACX_STACR_STAC_READ
;
735 r
|= EMAC_STACR_STAC_READ
;
736 r
|= (reg
& EMAC_STACR_PRA_MASK
)
737 | ((id
& EMAC_STACR_PCDA_MASK
) << EMAC_STACR_PCDA_SHIFT
);
738 out_be32(&p
->stacr
, r
);
740 /* Wait for read to complete */
742 while (!emac_phy_done(dev
, (r
= in_be32(&p
->stacr
)))) {
745 DBG2(dev
, " -> timeout wait complete\n");
750 if (unlikely(r
& EMAC_STACR_PHYE
)) {
751 DBG(dev
, "mdio_read(%02x, %02x) failed" NL
, id
, reg
);
756 r
= ((r
>> EMAC_STACR_PHYD_SHIFT
) & EMAC_STACR_PHYD_MASK
);
758 DBG2(dev
, "mdio_read -> %04x" NL
, r
);
761 if (emac_has_feature(dev
, EMAC_FTR_HAS_RGMII
))
762 rgmii_put_mdio(dev
->rgmii_dev
, dev
->rgmii_port
);
763 if (emac_has_feature(dev
, EMAC_FTR_HAS_ZMII
))
764 zmii_put_mdio(dev
->zmii_dev
, dev
->zmii_port
);
765 mutex_unlock(&dev
->mdio_lock
);
767 return err
== 0 ? r
: err
;
770 static void __emac_mdio_write(struct emac_instance
*dev
, u8 id
, u8 reg
,
773 struct emac_regs __iomem
*p
= dev
->emacp
;
775 int n
, err
= -ETIMEDOUT
;
777 mutex_lock(&dev
->mdio_lock
);
779 DBG2(dev
, "mdio_write(%02x,%02x,%04x)" NL
, id
, reg
, val
);
781 /* Enable proper MDIO port */
782 if (emac_has_feature(dev
, EMAC_FTR_HAS_ZMII
))
783 zmii_get_mdio(dev
->zmii_dev
, dev
->zmii_port
);
784 if (emac_has_feature(dev
, EMAC_FTR_HAS_RGMII
))
785 rgmii_get_mdio(dev
->rgmii_dev
, dev
->rgmii_port
);
787 /* Wait for management interface to be idle */
789 while (!emac_phy_done(dev
, in_be32(&p
->stacr
))) {
792 DBG2(dev
, " -> timeout wait idle\n");
797 /* Issue write command */
798 if (emac_has_feature(dev
, EMAC_FTR_EMAC4
))
799 r
= EMAC4_STACR_BASE(dev
->opb_bus_freq
);
801 r
= EMAC_STACR_BASE(dev
->opb_bus_freq
);
802 if (emac_has_feature(dev
, EMAC_FTR_STACR_OC_INVERT
))
804 if (emac_has_feature(dev
, EMAC_FTR_HAS_NEW_STACR
))
805 r
|= EMACX_STACR_STAC_WRITE
;
807 r
|= EMAC_STACR_STAC_WRITE
;
808 r
|= (reg
& EMAC_STACR_PRA_MASK
) |
809 ((id
& EMAC_STACR_PCDA_MASK
) << EMAC_STACR_PCDA_SHIFT
) |
810 (val
<< EMAC_STACR_PHYD_SHIFT
);
811 out_be32(&p
->stacr
, r
);
813 /* Wait for write to complete */
815 while (!emac_phy_done(dev
, in_be32(&p
->stacr
))) {
818 DBG2(dev
, " -> timeout wait complete\n");
824 if (emac_has_feature(dev
, EMAC_FTR_HAS_RGMII
))
825 rgmii_put_mdio(dev
->rgmii_dev
, dev
->rgmii_port
);
826 if (emac_has_feature(dev
, EMAC_FTR_HAS_ZMII
))
827 zmii_put_mdio(dev
->zmii_dev
, dev
->zmii_port
);
828 mutex_unlock(&dev
->mdio_lock
);
831 static int emac_mdio_read(struct net_device
*ndev
, int id
, int reg
)
833 struct emac_instance
*dev
= netdev_priv(ndev
);
836 res
= __emac_mdio_read(dev
->mdio_instance
? dev
->mdio_instance
: dev
,
841 static void emac_mdio_write(struct net_device
*ndev
, int id
, int reg
, int val
)
843 struct emac_instance
*dev
= netdev_priv(ndev
);
845 __emac_mdio_write(dev
->mdio_instance
? dev
->mdio_instance
: dev
,
846 (u8
) id
, (u8
) reg
, (u16
) val
);
850 static void __emac_set_multicast_list(struct emac_instance
*dev
)
852 struct emac_regs __iomem
*p
= dev
->emacp
;
853 u32 rmr
= emac_iff2rmr(dev
->ndev
);
855 DBG(dev
, "__multicast %08x" NL
, rmr
);
857 /* I decided to relax register access rules here to avoid
860 * There is a real problem with EMAC4 core if we use MWSW_001 bit
861 * in MR1 register and do a full EMAC reset.
862 * One TX BD status update is delayed and, after EMAC reset, it
863 * never happens, resulting in TX hung (it'll be recovered by TX
864 * timeout handler eventually, but this is just gross).
865 * So we either have to do full TX reset or try to cheat here :)
867 * The only required change is to RX mode register, so I *think* all
868 * we need is just to stop RX channel. This seems to work on all
871 * If we need the full reset, we might just trigger the workqueue
872 * and do it async... a bit nasty but should work --BenH
874 dev
->mcast_pending
= 0;
875 emac_rx_disable(dev
);
876 if (rmr
& EMAC_RMR_MAE
)
878 out_be32(&p
->rmr
, rmr
);
883 static void emac_set_multicast_list(struct net_device
*ndev
)
885 struct emac_instance
*dev
= netdev_priv(ndev
);
887 DBG(dev
, "multicast" NL
);
889 BUG_ON(!netif_running(dev
->ndev
));
892 dev
->mcast_pending
= 1;
895 __emac_set_multicast_list(dev
);
898 static int emac_resize_rx_ring(struct emac_instance
*dev
, int new_mtu
)
900 int rx_sync_size
= emac_rx_sync_size(new_mtu
);
901 int rx_skb_size
= emac_rx_skb_size(new_mtu
);
904 mutex_lock(&dev
->link_lock
);
905 emac_netif_stop(dev
);
906 emac_rx_disable(dev
);
907 mal_disable_rx_channel(dev
->mal
, dev
->mal_rx_chan
);
909 if (dev
->rx_sg_skb
) {
910 ++dev
->estats
.rx_dropped_resize
;
911 dev_kfree_skb(dev
->rx_sg_skb
);
912 dev
->rx_sg_skb
= NULL
;
915 /* Make a first pass over RX ring and mark BDs ready, dropping
916 * non-processed packets on the way. We need this as a separate pass
917 * to simplify error recovery in the case of allocation failure later.
919 for (i
= 0; i
< NUM_RX_BUFF
; ++i
) {
920 if (dev
->rx_desc
[i
].ctrl
& MAL_RX_CTRL_FIRST
)
921 ++dev
->estats
.rx_dropped_resize
;
923 dev
->rx_desc
[i
].data_len
= 0;
924 dev
->rx_desc
[i
].ctrl
= MAL_RX_CTRL_EMPTY
|
925 (i
== (NUM_RX_BUFF
- 1) ? MAL_RX_CTRL_WRAP
: 0);
928 /* Reallocate RX ring only if bigger skb buffers are required */
929 if (rx_skb_size
<= dev
->rx_skb_size
)
932 /* Second pass, allocate new skbs */
933 for (i
= 0; i
< NUM_RX_BUFF
; ++i
) {
934 struct sk_buff
*skb
= alloc_skb(rx_skb_size
, GFP_ATOMIC
);
940 BUG_ON(!dev
->rx_skb
[i
]);
941 dev_kfree_skb(dev
->rx_skb
[i
]);
943 skb_reserve(skb
, EMAC_RX_SKB_HEADROOM
+ 2);
944 dev
->rx_desc
[i
].data_ptr
=
945 dma_map_single(&dev
->ofdev
->dev
, skb
->data
- 2, rx_sync_size
,
946 DMA_FROM_DEVICE
) + 2;
947 dev
->rx_skb
[i
] = skb
;
950 /* Check if we need to change "Jumbo" bit in MR1 */
951 if ((new_mtu
> ETH_DATA_LEN
) ^ (dev
->ndev
->mtu
> ETH_DATA_LEN
)) {
952 /* This is to prevent starting RX channel in emac_rx_enable() */
953 set_bit(MAL_COMMAC_RX_STOPPED
, &dev
->commac
.flags
);
955 dev
->ndev
->mtu
= new_mtu
;
956 emac_full_tx_reset(dev
);
959 mal_set_rcbs(dev
->mal
, dev
->mal_rx_chan
, emac_rx_size(new_mtu
));
962 clear_bit(MAL_COMMAC_RX_STOPPED
, &dev
->commac
.flags
);
964 mal_enable_rx_channel(dev
->mal
, dev
->mal_rx_chan
);
966 emac_netif_start(dev
);
967 mutex_unlock(&dev
->link_lock
);
972 /* Process ctx, rtnl_lock semaphore */
973 static int emac_change_mtu(struct net_device
*ndev
, int new_mtu
)
975 struct emac_instance
*dev
= netdev_priv(ndev
);
978 if (new_mtu
< EMAC_MIN_MTU
|| new_mtu
> dev
->max_mtu
)
981 DBG(dev
, "change_mtu(%d)" NL
, new_mtu
);
983 if (netif_running(ndev
)) {
984 /* Check if we really need to reinitalize RX ring */
985 if (emac_rx_skb_size(ndev
->mtu
) != emac_rx_skb_size(new_mtu
))
986 ret
= emac_resize_rx_ring(dev
, new_mtu
);
991 dev
->rx_skb_size
= emac_rx_skb_size(new_mtu
);
992 dev
->rx_sync_size
= emac_rx_sync_size(new_mtu
);
998 static void emac_clean_tx_ring(struct emac_instance
*dev
)
1002 for (i
= 0; i
< NUM_TX_BUFF
; ++i
) {
1003 if (dev
->tx_skb
[i
]) {
1004 dev_kfree_skb(dev
->tx_skb
[i
]);
1005 dev
->tx_skb
[i
] = NULL
;
1006 if (dev
->tx_desc
[i
].ctrl
& MAL_TX_CTRL_READY
)
1007 ++dev
->estats
.tx_dropped
;
1009 dev
->tx_desc
[i
].ctrl
= 0;
1010 dev
->tx_desc
[i
].data_ptr
= 0;
1014 static void emac_clean_rx_ring(struct emac_instance
*dev
)
1018 for (i
= 0; i
< NUM_RX_BUFF
; ++i
)
1019 if (dev
->rx_skb
[i
]) {
1020 dev
->rx_desc
[i
].ctrl
= 0;
1021 dev_kfree_skb(dev
->rx_skb
[i
]);
1022 dev
->rx_skb
[i
] = NULL
;
1023 dev
->rx_desc
[i
].data_ptr
= 0;
1026 if (dev
->rx_sg_skb
) {
1027 dev_kfree_skb(dev
->rx_sg_skb
);
1028 dev
->rx_sg_skb
= NULL
;
1032 static inline int emac_alloc_rx_skb(struct emac_instance
*dev
, int slot
,
1035 struct sk_buff
*skb
= alloc_skb(dev
->rx_skb_size
, flags
);
1039 dev
->rx_skb
[slot
] = skb
;
1040 dev
->rx_desc
[slot
].data_len
= 0;
1042 skb_reserve(skb
, EMAC_RX_SKB_HEADROOM
+ 2);
1043 dev
->rx_desc
[slot
].data_ptr
=
1044 dma_map_single(&dev
->ofdev
->dev
, skb
->data
- 2, dev
->rx_sync_size
,
1045 DMA_FROM_DEVICE
) + 2;
1047 dev
->rx_desc
[slot
].ctrl
= MAL_RX_CTRL_EMPTY
|
1048 (slot
== (NUM_RX_BUFF
- 1) ? MAL_RX_CTRL_WRAP
: 0);
1053 static void emac_print_link_status(struct emac_instance
*dev
)
1055 if (netif_carrier_ok(dev
->ndev
))
1056 printk(KERN_INFO
"%s: link is up, %d %s%s\n",
1057 dev
->ndev
->name
, dev
->phy
.speed
,
1058 dev
->phy
.duplex
== DUPLEX_FULL
? "FDX" : "HDX",
1059 dev
->phy
.pause
? ", pause enabled" :
1060 dev
->phy
.asym_pause
? ", asymmetric pause enabled" : "");
1062 printk(KERN_INFO
"%s: link is down\n", dev
->ndev
->name
);
1065 /* Process ctx, rtnl_lock semaphore */
1066 static int emac_open(struct net_device
*ndev
)
1068 struct emac_instance
*dev
= netdev_priv(ndev
);
1071 DBG(dev
, "open" NL
);
1073 /* Setup error IRQ handler */
1074 err
= request_irq(dev
->emac_irq
, emac_irq
, 0, "EMAC", dev
);
1076 printk(KERN_ERR
"%s: failed to request IRQ %d\n",
1077 ndev
->name
, dev
->emac_irq
);
1081 /* Allocate RX ring */
1082 for (i
= 0; i
< NUM_RX_BUFF
; ++i
)
1083 if (emac_alloc_rx_skb(dev
, i
, GFP_KERNEL
)) {
1084 printk(KERN_ERR
"%s: failed to allocate RX ring\n",
1089 dev
->tx_cnt
= dev
->tx_slot
= dev
->ack_slot
= dev
->rx_slot
= 0;
1090 clear_bit(MAL_COMMAC_RX_STOPPED
, &dev
->commac
.flags
);
1091 dev
->rx_sg_skb
= NULL
;
1093 mutex_lock(&dev
->link_lock
);
1096 /* Start PHY polling now.
1098 if (dev
->phy
.address
>= 0) {
1099 int link_poll_interval
;
1100 if (dev
->phy
.def
->ops
->poll_link(&dev
->phy
)) {
1101 dev
->phy
.def
->ops
->read_link(&dev
->phy
);
1102 netif_carrier_on(dev
->ndev
);
1103 link_poll_interval
= PHY_POLL_LINK_ON
;
1105 netif_carrier_off(dev
->ndev
);
1106 link_poll_interval
= PHY_POLL_LINK_OFF
;
1108 dev
->link_polling
= 1;
1110 schedule_delayed_work(&dev
->link_work
, link_poll_interval
);
1111 emac_print_link_status(dev
);
1113 netif_carrier_on(dev
->ndev
);
1115 emac_configure(dev
);
1116 mal_poll_add(dev
->mal
, &dev
->commac
);
1117 mal_enable_tx_channel(dev
->mal
, dev
->mal_tx_chan
);
1118 mal_set_rcbs(dev
->mal
, dev
->mal_rx_chan
, emac_rx_size(ndev
->mtu
));
1119 mal_enable_rx_channel(dev
->mal
, dev
->mal_rx_chan
);
1120 emac_tx_enable(dev
);
1121 emac_rx_enable(dev
);
1122 emac_netif_start(dev
);
1124 mutex_unlock(&dev
->link_lock
);
1128 emac_clean_rx_ring(dev
);
1129 free_irq(dev
->emac_irq
, dev
);
1136 static int emac_link_differs(struct emac_instance
*dev
)
1138 u32 r
= in_be32(&dev
->emacp
->mr1
);
1140 int duplex
= r
& EMAC_MR1_FDE
? DUPLEX_FULL
: DUPLEX_HALF
;
1141 int speed
, pause
, asym_pause
;
1143 if (r
& EMAC_MR1_MF_1000
)
1145 else if (r
& EMAC_MR1_MF_100
)
1150 switch (r
& (EMAC_MR1_EIFC
| EMAC_MR1_APP
)) {
1151 case (EMAC_MR1_EIFC
| EMAC_MR1_APP
):
1160 pause
= asym_pause
= 0;
1162 return speed
!= dev
->phy
.speed
|| duplex
!= dev
->phy
.duplex
||
1163 pause
!= dev
->phy
.pause
|| asym_pause
!= dev
->phy
.asym_pause
;
1167 static void emac_link_timer(struct work_struct
*work
)
1169 struct emac_instance
*dev
=
1170 container_of((struct delayed_work
*)work
,
1171 struct emac_instance
, link_work
);
1172 int link_poll_interval
;
1174 mutex_lock(&dev
->link_lock
);
1175 DBG2(dev
, "link timer" NL
);
1180 if (dev
->phy
.def
->ops
->poll_link(&dev
->phy
)) {
1181 if (!netif_carrier_ok(dev
->ndev
)) {
1182 /* Get new link parameters */
1183 dev
->phy
.def
->ops
->read_link(&dev
->phy
);
1185 netif_carrier_on(dev
->ndev
);
1186 emac_netif_stop(dev
);
1187 emac_full_tx_reset(dev
);
1188 emac_netif_start(dev
);
1189 emac_print_link_status(dev
);
1191 link_poll_interval
= PHY_POLL_LINK_ON
;
1193 if (netif_carrier_ok(dev
->ndev
)) {
1194 netif_carrier_off(dev
->ndev
);
1195 netif_tx_disable(dev
->ndev
);
1196 emac_reinitialize(dev
);
1197 emac_print_link_status(dev
);
1199 link_poll_interval
= PHY_POLL_LINK_OFF
;
1201 schedule_delayed_work(&dev
->link_work
, link_poll_interval
);
1203 mutex_unlock(&dev
->link_lock
);
1206 static void emac_force_link_update(struct emac_instance
*dev
)
1208 netif_carrier_off(dev
->ndev
);
1210 if (dev
->link_polling
) {
1211 cancel_rearming_delayed_work(&dev
->link_work
);
1212 if (dev
->link_polling
)
1213 schedule_delayed_work(&dev
->link_work
, PHY_POLL_LINK_OFF
);
1217 /* Process ctx, rtnl_lock semaphore */
1218 static int emac_close(struct net_device
*ndev
)
1220 struct emac_instance
*dev
= netdev_priv(ndev
);
1222 DBG(dev
, "close" NL
);
1224 if (dev
->phy
.address
>= 0) {
1225 dev
->link_polling
= 0;
1226 cancel_rearming_delayed_work(&dev
->link_work
);
1228 mutex_lock(&dev
->link_lock
);
1229 emac_netif_stop(dev
);
1231 mutex_unlock(&dev
->link_lock
);
1233 emac_rx_disable(dev
);
1234 emac_tx_disable(dev
);
1235 mal_disable_rx_channel(dev
->mal
, dev
->mal_rx_chan
);
1236 mal_disable_tx_channel(dev
->mal
, dev
->mal_tx_chan
);
1237 mal_poll_del(dev
->mal
, &dev
->commac
);
1239 emac_clean_tx_ring(dev
);
1240 emac_clean_rx_ring(dev
);
1242 free_irq(dev
->emac_irq
, dev
);
1247 static inline u16
emac_tx_csum(struct emac_instance
*dev
,
1248 struct sk_buff
*skb
)
1250 if (emac_has_feature(dev
, EMAC_FTR_HAS_TAH
) &&
1251 (skb
->ip_summed
== CHECKSUM_PARTIAL
)) {
1252 ++dev
->stats
.tx_packets_csum
;
1253 return EMAC_TX_CTRL_TAH_CSUM
;
1258 static inline int emac_xmit_finish(struct emac_instance
*dev
, int len
)
1260 struct emac_regs __iomem
*p
= dev
->emacp
;
1261 struct net_device
*ndev
= dev
->ndev
;
1263 /* Send the packet out. If the if makes a significant perf
1264 * difference, then we can store the TMR0 value in "dev"
1267 if (emac_has_feature(dev
, EMAC_FTR_EMAC4
))
1268 out_be32(&p
->tmr0
, EMAC4_TMR0_XMIT
);
1270 out_be32(&p
->tmr0
, EMAC_TMR0_XMIT
);
1272 if (unlikely(++dev
->tx_cnt
== NUM_TX_BUFF
)) {
1273 netif_stop_queue(ndev
);
1274 DBG2(dev
, "stopped TX queue" NL
);
1277 ndev
->trans_start
= jiffies
;
1278 ++dev
->stats
.tx_packets
;
1279 dev
->stats
.tx_bytes
+= len
;
1285 static int emac_start_xmit(struct sk_buff
*skb
, struct net_device
*ndev
)
1287 struct emac_instance
*dev
= netdev_priv(ndev
);
1288 unsigned int len
= skb
->len
;
1291 u16 ctrl
= EMAC_TX_CTRL_GFCS
| EMAC_TX_CTRL_GP
| MAL_TX_CTRL_READY
|
1292 MAL_TX_CTRL_LAST
| emac_tx_csum(dev
, skb
);
1294 slot
= dev
->tx_slot
++;
1295 if (dev
->tx_slot
== NUM_TX_BUFF
) {
1297 ctrl
|= MAL_TX_CTRL_WRAP
;
1300 DBG2(dev
, "xmit(%u) %d" NL
, len
, slot
);
1302 dev
->tx_skb
[slot
] = skb
;
1303 dev
->tx_desc
[slot
].data_ptr
= dma_map_single(&dev
->ofdev
->dev
,
1306 dev
->tx_desc
[slot
].data_len
= (u16
) len
;
1308 dev
->tx_desc
[slot
].ctrl
= ctrl
;
1310 return emac_xmit_finish(dev
, len
);
1313 static inline int emac_xmit_split(struct emac_instance
*dev
, int slot
,
1314 u32 pd
, int len
, int last
, u16 base_ctrl
)
1317 u16 ctrl
= base_ctrl
;
1318 int chunk
= min(len
, MAL_MAX_TX_SIZE
);
1321 slot
= (slot
+ 1) % NUM_TX_BUFF
;
1324 ctrl
|= MAL_TX_CTRL_LAST
;
1325 if (slot
== NUM_TX_BUFF
- 1)
1326 ctrl
|= MAL_TX_CTRL_WRAP
;
1328 dev
->tx_skb
[slot
] = NULL
;
1329 dev
->tx_desc
[slot
].data_ptr
= pd
;
1330 dev
->tx_desc
[slot
].data_len
= (u16
) chunk
;
1331 dev
->tx_desc
[slot
].ctrl
= ctrl
;
1342 /* Tx lock BH disabled (SG version for TAH equipped EMACs) */
1343 static int emac_start_xmit_sg(struct sk_buff
*skb
, struct net_device
*ndev
)
1345 struct emac_instance
*dev
= netdev_priv(ndev
);
1346 int nr_frags
= skb_shinfo(skb
)->nr_frags
;
1347 int len
= skb
->len
, chunk
;
1352 /* This is common "fast" path */
1353 if (likely(!nr_frags
&& len
<= MAL_MAX_TX_SIZE
))
1354 return emac_start_xmit(skb
, ndev
);
1356 len
-= skb
->data_len
;
1358 /* Note, this is only an *estimation*, we can still run out of empty
1359 * slots because of the additional fragmentation into
1360 * MAL_MAX_TX_SIZE-sized chunks
1362 if (unlikely(dev
->tx_cnt
+ nr_frags
+ mal_tx_chunks(len
) > NUM_TX_BUFF
))
1365 ctrl
= EMAC_TX_CTRL_GFCS
| EMAC_TX_CTRL_GP
| MAL_TX_CTRL_READY
|
1366 emac_tx_csum(dev
, skb
);
1367 slot
= dev
->tx_slot
;
1370 dev
->tx_skb
[slot
] = NULL
;
1371 chunk
= min(len
, MAL_MAX_TX_SIZE
);
1372 dev
->tx_desc
[slot
].data_ptr
= pd
=
1373 dma_map_single(&dev
->ofdev
->dev
, skb
->data
, len
, DMA_TO_DEVICE
);
1374 dev
->tx_desc
[slot
].data_len
= (u16
) chunk
;
1377 slot
= emac_xmit_split(dev
, slot
, pd
+ chunk
, len
, !nr_frags
,
1380 for (i
= 0; i
< nr_frags
; ++i
) {
1381 struct skb_frag_struct
*frag
= &skb_shinfo(skb
)->frags
[i
];
1384 if (unlikely(dev
->tx_cnt
+ mal_tx_chunks(len
) >= NUM_TX_BUFF
))
1387 pd
= dma_map_page(&dev
->ofdev
->dev
, frag
->page
, frag
->page_offset
, len
,
1390 slot
= emac_xmit_split(dev
, slot
, pd
, len
, i
== nr_frags
- 1,
1394 DBG2(dev
, "xmit_sg(%u) %d - %d" NL
, skb
->len
, dev
->tx_slot
, slot
);
1396 /* Attach skb to the last slot so we don't release it too early */
1397 dev
->tx_skb
[slot
] = skb
;
1399 /* Send the packet out */
1400 if (dev
->tx_slot
== NUM_TX_BUFF
- 1)
1401 ctrl
|= MAL_TX_CTRL_WRAP
;
1403 dev
->tx_desc
[dev
->tx_slot
].ctrl
= ctrl
;
1404 dev
->tx_slot
= (slot
+ 1) % NUM_TX_BUFF
;
1406 return emac_xmit_finish(dev
, skb
->len
);
1409 /* Well, too bad. Our previous estimation was overly optimistic.
1412 while (slot
!= dev
->tx_slot
) {
1413 dev
->tx_desc
[slot
].ctrl
= 0;
1416 slot
= NUM_TX_BUFF
- 1;
1418 ++dev
->estats
.tx_undo
;
1421 netif_stop_queue(ndev
);
1422 DBG2(dev
, "stopped TX queue" NL
);
1427 static void emac_parse_tx_error(struct emac_instance
*dev
, u16 ctrl
)
1429 struct emac_error_stats
*st
= &dev
->estats
;
1431 DBG(dev
, "BD TX error %04x" NL
, ctrl
);
1434 if (ctrl
& EMAC_TX_ST_BFCS
)
1435 ++st
->tx_bd_bad_fcs
;
1436 if (ctrl
& EMAC_TX_ST_LCS
)
1437 ++st
->tx_bd_carrier_loss
;
1438 if (ctrl
& EMAC_TX_ST_ED
)
1439 ++st
->tx_bd_excessive_deferral
;
1440 if (ctrl
& EMAC_TX_ST_EC
)
1441 ++st
->tx_bd_excessive_collisions
;
1442 if (ctrl
& EMAC_TX_ST_LC
)
1443 ++st
->tx_bd_late_collision
;
1444 if (ctrl
& EMAC_TX_ST_MC
)
1445 ++st
->tx_bd_multple_collisions
;
1446 if (ctrl
& EMAC_TX_ST_SC
)
1447 ++st
->tx_bd_single_collision
;
1448 if (ctrl
& EMAC_TX_ST_UR
)
1449 ++st
->tx_bd_underrun
;
1450 if (ctrl
& EMAC_TX_ST_SQE
)
1454 static void emac_poll_tx(void *param
)
1456 struct emac_instance
*dev
= param
;
1459 DBG2(dev
, "poll_tx, %d %d" NL
, dev
->tx_cnt
, dev
->ack_slot
);
1461 if (emac_has_feature(dev
, EMAC_FTR_HAS_TAH
))
1462 bad_mask
= EMAC_IS_BAD_TX_TAH
;
1464 bad_mask
= EMAC_IS_BAD_TX
;
1466 netif_tx_lock_bh(dev
->ndev
);
1469 int slot
= dev
->ack_slot
, n
= 0;
1471 ctrl
= dev
->tx_desc
[slot
].ctrl
;
1472 if (!(ctrl
& MAL_TX_CTRL_READY
)) {
1473 struct sk_buff
*skb
= dev
->tx_skb
[slot
];
1478 dev
->tx_skb
[slot
] = NULL
;
1480 slot
= (slot
+ 1) % NUM_TX_BUFF
;
1482 if (unlikely(ctrl
& bad_mask
))
1483 emac_parse_tx_error(dev
, ctrl
);
1489 dev
->ack_slot
= slot
;
1490 if (netif_queue_stopped(dev
->ndev
) &&
1491 dev
->tx_cnt
< EMAC_TX_WAKEUP_THRESH
)
1492 netif_wake_queue(dev
->ndev
);
1494 DBG2(dev
, "tx %d pkts" NL
, n
);
1497 netif_tx_unlock_bh(dev
->ndev
);
1500 static inline void emac_recycle_rx_skb(struct emac_instance
*dev
, int slot
,
1503 struct sk_buff
*skb
= dev
->rx_skb
[slot
];
1505 DBG2(dev
, "recycle %d %d" NL
, slot
, len
);
1508 dma_map_single(&dev
->ofdev
->dev
, skb
->data
- 2,
1509 EMAC_DMA_ALIGN(len
+ 2), DMA_FROM_DEVICE
);
1511 dev
->rx_desc
[slot
].data_len
= 0;
1513 dev
->rx_desc
[slot
].ctrl
= MAL_RX_CTRL_EMPTY
|
1514 (slot
== (NUM_RX_BUFF
- 1) ? MAL_RX_CTRL_WRAP
: 0);
1517 static void emac_parse_rx_error(struct emac_instance
*dev
, u16 ctrl
)
1519 struct emac_error_stats
*st
= &dev
->estats
;
1521 DBG(dev
, "BD RX error %04x" NL
, ctrl
);
1524 if (ctrl
& EMAC_RX_ST_OE
)
1525 ++st
->rx_bd_overrun
;
1526 if (ctrl
& EMAC_RX_ST_BP
)
1527 ++st
->rx_bd_bad_packet
;
1528 if (ctrl
& EMAC_RX_ST_RP
)
1529 ++st
->rx_bd_runt_packet
;
1530 if (ctrl
& EMAC_RX_ST_SE
)
1531 ++st
->rx_bd_short_event
;
1532 if (ctrl
& EMAC_RX_ST_AE
)
1533 ++st
->rx_bd_alignment_error
;
1534 if (ctrl
& EMAC_RX_ST_BFCS
)
1535 ++st
->rx_bd_bad_fcs
;
1536 if (ctrl
& EMAC_RX_ST_PTL
)
1537 ++st
->rx_bd_packet_too_long
;
1538 if (ctrl
& EMAC_RX_ST_ORE
)
1539 ++st
->rx_bd_out_of_range
;
1540 if (ctrl
& EMAC_RX_ST_IRE
)
1541 ++st
->rx_bd_in_range
;
1544 static inline void emac_rx_csum(struct emac_instance
*dev
,
1545 struct sk_buff
*skb
, u16 ctrl
)
1547 #ifdef CONFIG_IBM_NEW_EMAC_TAH
1548 if (!ctrl
&& dev
->tah_dev
) {
1549 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1550 ++dev
->stats
.rx_packets_csum
;
1555 static inline int emac_rx_sg_append(struct emac_instance
*dev
, int slot
)
1557 if (likely(dev
->rx_sg_skb
!= NULL
)) {
1558 int len
= dev
->rx_desc
[slot
].data_len
;
1559 int tot_len
= dev
->rx_sg_skb
->len
+ len
;
1561 if (unlikely(tot_len
+ 2 > dev
->rx_skb_size
)) {
1562 ++dev
->estats
.rx_dropped_mtu
;
1563 dev_kfree_skb(dev
->rx_sg_skb
);
1564 dev
->rx_sg_skb
= NULL
;
1566 cacheable_memcpy(skb_tail_pointer(dev
->rx_sg_skb
),
1567 dev
->rx_skb
[slot
]->data
, len
);
1568 skb_put(dev
->rx_sg_skb
, len
);
1569 emac_recycle_rx_skb(dev
, slot
, len
);
1573 emac_recycle_rx_skb(dev
, slot
, 0);
1577 /* NAPI poll context */
1578 static int emac_poll_rx(void *param
, int budget
)
1580 struct emac_instance
*dev
= param
;
1581 int slot
= dev
->rx_slot
, received
= 0;
1583 DBG2(dev
, "poll_rx(%d)" NL
, budget
);
1586 while (budget
> 0) {
1588 struct sk_buff
*skb
;
1589 u16 ctrl
= dev
->rx_desc
[slot
].ctrl
;
1591 if (ctrl
& MAL_RX_CTRL_EMPTY
)
1594 skb
= dev
->rx_skb
[slot
];
1596 len
= dev
->rx_desc
[slot
].data_len
;
1598 if (unlikely(!MAL_IS_SINGLE_RX(ctrl
)))
1601 ctrl
&= EMAC_BAD_RX_MASK
;
1602 if (unlikely(ctrl
&& ctrl
!= EMAC_RX_TAH_BAD_CSUM
)) {
1603 emac_parse_rx_error(dev
, ctrl
);
1604 ++dev
->estats
.rx_dropped_error
;
1605 emac_recycle_rx_skb(dev
, slot
, 0);
1610 if (len
&& len
< EMAC_RX_COPY_THRESH
) {
1611 struct sk_buff
*copy_skb
=
1612 alloc_skb(len
+ EMAC_RX_SKB_HEADROOM
+ 2, GFP_ATOMIC
);
1613 if (unlikely(!copy_skb
))
1616 skb_reserve(copy_skb
, EMAC_RX_SKB_HEADROOM
+ 2);
1617 cacheable_memcpy(copy_skb
->data
- 2, skb
->data
- 2,
1619 emac_recycle_rx_skb(dev
, slot
, len
);
1621 } else if (unlikely(emac_alloc_rx_skb(dev
, slot
, GFP_ATOMIC
)))
1626 skb
->dev
= dev
->ndev
;
1627 skb
->protocol
= eth_type_trans(skb
, dev
->ndev
);
1628 emac_rx_csum(dev
, skb
, ctrl
);
1630 if (unlikely(netif_receive_skb(skb
) == NET_RX_DROP
))
1631 ++dev
->estats
.rx_dropped_stack
;
1633 ++dev
->stats
.rx_packets
;
1635 dev
->stats
.rx_bytes
+= len
;
1636 slot
= (slot
+ 1) % NUM_RX_BUFF
;
1641 if (ctrl
& MAL_RX_CTRL_FIRST
) {
1642 BUG_ON(dev
->rx_sg_skb
);
1643 if (unlikely(emac_alloc_rx_skb(dev
, slot
, GFP_ATOMIC
))) {
1644 DBG(dev
, "rx OOM %d" NL
, slot
);
1645 ++dev
->estats
.rx_dropped_oom
;
1646 emac_recycle_rx_skb(dev
, slot
, 0);
1648 dev
->rx_sg_skb
= skb
;
1651 } else if (!emac_rx_sg_append(dev
, slot
) &&
1652 (ctrl
& MAL_RX_CTRL_LAST
)) {
1654 skb
= dev
->rx_sg_skb
;
1655 dev
->rx_sg_skb
= NULL
;
1657 ctrl
&= EMAC_BAD_RX_MASK
;
1658 if (unlikely(ctrl
&& ctrl
!= EMAC_RX_TAH_BAD_CSUM
)) {
1659 emac_parse_rx_error(dev
, ctrl
);
1660 ++dev
->estats
.rx_dropped_error
;
1668 DBG(dev
, "rx OOM %d" NL
, slot
);
1669 /* Drop the packet and recycle skb */
1670 ++dev
->estats
.rx_dropped_oom
;
1671 emac_recycle_rx_skb(dev
, slot
, 0);
1676 DBG2(dev
, "rx %d BDs" NL
, received
);
1677 dev
->rx_slot
= slot
;
1680 if (unlikely(budget
&& test_bit(MAL_COMMAC_RX_STOPPED
, &dev
->commac
.flags
))) {
1682 if (!(dev
->rx_desc
[slot
].ctrl
& MAL_RX_CTRL_EMPTY
)) {
1683 DBG2(dev
, "rx restart" NL
);
1688 if (dev
->rx_sg_skb
) {
1689 DBG2(dev
, "dropping partial rx packet" NL
);
1690 ++dev
->estats
.rx_dropped_error
;
1691 dev_kfree_skb(dev
->rx_sg_skb
);
1692 dev
->rx_sg_skb
= NULL
;
1695 clear_bit(MAL_COMMAC_RX_STOPPED
, &dev
->commac
.flags
);
1696 mal_enable_rx_channel(dev
->mal
, dev
->mal_rx_chan
);
1697 emac_rx_enable(dev
);
1703 /* NAPI poll context */
1704 static int emac_peek_rx(void *param
)
1706 struct emac_instance
*dev
= param
;
1708 return !(dev
->rx_desc
[dev
->rx_slot
].ctrl
& MAL_RX_CTRL_EMPTY
);
1711 /* NAPI poll context */
1712 static int emac_peek_rx_sg(void *param
)
1714 struct emac_instance
*dev
= param
;
1716 int slot
= dev
->rx_slot
;
1718 u16 ctrl
= dev
->rx_desc
[slot
].ctrl
;
1719 if (ctrl
& MAL_RX_CTRL_EMPTY
)
1721 else if (ctrl
& MAL_RX_CTRL_LAST
)
1724 slot
= (slot
+ 1) % NUM_RX_BUFF
;
1726 /* I'm just being paranoid here :) */
1727 if (unlikely(slot
== dev
->rx_slot
))
1733 static void emac_rxde(void *param
)
1735 struct emac_instance
*dev
= param
;
1737 ++dev
->estats
.rx_stopped
;
1738 emac_rx_disable_async(dev
);
1742 static irqreturn_t
emac_irq(int irq
, void *dev_instance
)
1744 struct emac_instance
*dev
= dev_instance
;
1745 struct emac_regs __iomem
*p
= dev
->emacp
;
1746 struct emac_error_stats
*st
= &dev
->estats
;
1749 spin_lock(&dev
->lock
);
1751 isr
= in_be32(&p
->isr
);
1752 out_be32(&p
->isr
, isr
);
1754 DBG(dev
, "isr = %08x" NL
, isr
);
1756 if (isr
& EMAC4_ISR_TXPE
)
1758 if (isr
& EMAC4_ISR_RXPE
)
1760 if (isr
& EMAC4_ISR_TXUE
)
1762 if (isr
& EMAC4_ISR_RXOE
)
1763 ++st
->rx_fifo_overrun
;
1764 if (isr
& EMAC_ISR_OVR
)
1766 if (isr
& EMAC_ISR_BP
)
1767 ++st
->rx_bad_packet
;
1768 if (isr
& EMAC_ISR_RP
)
1769 ++st
->rx_runt_packet
;
1770 if (isr
& EMAC_ISR_SE
)
1771 ++st
->rx_short_event
;
1772 if (isr
& EMAC_ISR_ALE
)
1773 ++st
->rx_alignment_error
;
1774 if (isr
& EMAC_ISR_BFCS
)
1776 if (isr
& EMAC_ISR_PTLE
)
1777 ++st
->rx_packet_too_long
;
1778 if (isr
& EMAC_ISR_ORE
)
1779 ++st
->rx_out_of_range
;
1780 if (isr
& EMAC_ISR_IRE
)
1782 if (isr
& EMAC_ISR_SQE
)
1784 if (isr
& EMAC_ISR_TE
)
1787 spin_unlock(&dev
->lock
);
1792 static struct net_device_stats
*emac_stats(struct net_device
*ndev
)
1794 struct emac_instance
*dev
= netdev_priv(ndev
);
1795 struct emac_stats
*st
= &dev
->stats
;
1796 struct emac_error_stats
*est
= &dev
->estats
;
1797 struct net_device_stats
*nst
= &dev
->nstats
;
1798 unsigned long flags
;
1800 DBG2(dev
, "stats" NL
);
1802 /* Compute "legacy" statistics */
1803 spin_lock_irqsave(&dev
->lock
, flags
);
1804 nst
->rx_packets
= (unsigned long)st
->rx_packets
;
1805 nst
->rx_bytes
= (unsigned long)st
->rx_bytes
;
1806 nst
->tx_packets
= (unsigned long)st
->tx_packets
;
1807 nst
->tx_bytes
= (unsigned long)st
->tx_bytes
;
1808 nst
->rx_dropped
= (unsigned long)(est
->rx_dropped_oom
+
1809 est
->rx_dropped_error
+
1810 est
->rx_dropped_resize
+
1811 est
->rx_dropped_mtu
);
1812 nst
->tx_dropped
= (unsigned long)est
->tx_dropped
;
1814 nst
->rx_errors
= (unsigned long)est
->rx_bd_errors
;
1815 nst
->rx_fifo_errors
= (unsigned long)(est
->rx_bd_overrun
+
1816 est
->rx_fifo_overrun
+
1818 nst
->rx_frame_errors
= (unsigned long)(est
->rx_bd_alignment_error
+
1819 est
->rx_alignment_error
);
1820 nst
->rx_crc_errors
= (unsigned long)(est
->rx_bd_bad_fcs
+
1822 nst
->rx_length_errors
= (unsigned long)(est
->rx_bd_runt_packet
+
1823 est
->rx_bd_short_event
+
1824 est
->rx_bd_packet_too_long
+
1825 est
->rx_bd_out_of_range
+
1826 est
->rx_bd_in_range
+
1827 est
->rx_runt_packet
+
1828 est
->rx_short_event
+
1829 est
->rx_packet_too_long
+
1830 est
->rx_out_of_range
+
1833 nst
->tx_errors
= (unsigned long)(est
->tx_bd_errors
+ est
->tx_errors
);
1834 nst
->tx_fifo_errors
= (unsigned long)(est
->tx_bd_underrun
+
1836 nst
->tx_carrier_errors
= (unsigned long)est
->tx_bd_carrier_loss
;
1837 nst
->collisions
= (unsigned long)(est
->tx_bd_excessive_deferral
+
1838 est
->tx_bd_excessive_collisions
+
1839 est
->tx_bd_late_collision
+
1840 est
->tx_bd_multple_collisions
);
1841 spin_unlock_irqrestore(&dev
->lock
, flags
);
1845 static struct mal_commac_ops emac_commac_ops
= {
1846 .poll_tx
= &emac_poll_tx
,
1847 .poll_rx
= &emac_poll_rx
,
1848 .peek_rx
= &emac_peek_rx
,
1852 static struct mal_commac_ops emac_commac_sg_ops
= {
1853 .poll_tx
= &emac_poll_tx
,
1854 .poll_rx
= &emac_poll_rx
,
1855 .peek_rx
= &emac_peek_rx_sg
,
1859 /* Ethtool support */
1860 static int emac_ethtool_get_settings(struct net_device
*ndev
,
1861 struct ethtool_cmd
*cmd
)
1863 struct emac_instance
*dev
= netdev_priv(ndev
);
1865 cmd
->supported
= dev
->phy
.features
;
1866 cmd
->port
= PORT_MII
;
1867 cmd
->phy_address
= dev
->phy
.address
;
1869 dev
->phy
.address
>= 0 ? XCVR_EXTERNAL
: XCVR_INTERNAL
;
1871 mutex_lock(&dev
->link_lock
);
1872 cmd
->advertising
= dev
->phy
.advertising
;
1873 cmd
->autoneg
= dev
->phy
.autoneg
;
1874 cmd
->speed
= dev
->phy
.speed
;
1875 cmd
->duplex
= dev
->phy
.duplex
;
1876 mutex_unlock(&dev
->link_lock
);
1881 static int emac_ethtool_set_settings(struct net_device
*ndev
,
1882 struct ethtool_cmd
*cmd
)
1884 struct emac_instance
*dev
= netdev_priv(ndev
);
1885 u32 f
= dev
->phy
.features
;
1887 DBG(dev
, "set_settings(%d, %d, %d, 0x%08x)" NL
,
1888 cmd
->autoneg
, cmd
->speed
, cmd
->duplex
, cmd
->advertising
);
1890 /* Basic sanity checks */
1891 if (dev
->phy
.address
< 0)
1893 if (cmd
->autoneg
!= AUTONEG_ENABLE
&& cmd
->autoneg
!= AUTONEG_DISABLE
)
1895 if (cmd
->autoneg
== AUTONEG_ENABLE
&& cmd
->advertising
== 0)
1897 if (cmd
->duplex
!= DUPLEX_HALF
&& cmd
->duplex
!= DUPLEX_FULL
)
1900 if (cmd
->autoneg
== AUTONEG_DISABLE
) {
1901 switch (cmd
->speed
) {
1903 if (cmd
->duplex
== DUPLEX_HALF
1904 && !(f
& SUPPORTED_10baseT_Half
))
1906 if (cmd
->duplex
== DUPLEX_FULL
1907 && !(f
& SUPPORTED_10baseT_Full
))
1911 if (cmd
->duplex
== DUPLEX_HALF
1912 && !(f
& SUPPORTED_100baseT_Half
))
1914 if (cmd
->duplex
== DUPLEX_FULL
1915 && !(f
& SUPPORTED_100baseT_Full
))
1919 if (cmd
->duplex
== DUPLEX_HALF
1920 && !(f
& SUPPORTED_1000baseT_Half
))
1922 if (cmd
->duplex
== DUPLEX_FULL
1923 && !(f
& SUPPORTED_1000baseT_Full
))
1930 mutex_lock(&dev
->link_lock
);
1931 dev
->phy
.def
->ops
->setup_forced(&dev
->phy
, cmd
->speed
,
1933 mutex_unlock(&dev
->link_lock
);
1936 if (!(f
& SUPPORTED_Autoneg
))
1939 mutex_lock(&dev
->link_lock
);
1940 dev
->phy
.def
->ops
->setup_aneg(&dev
->phy
,
1941 (cmd
->advertising
& f
) |
1942 (dev
->phy
.advertising
&
1944 ADVERTISED_Asym_Pause
)));
1945 mutex_unlock(&dev
->link_lock
);
1947 emac_force_link_update(dev
);
1952 static void emac_ethtool_get_ringparam(struct net_device
*ndev
,
1953 struct ethtool_ringparam
*rp
)
1955 rp
->rx_max_pending
= rp
->rx_pending
= NUM_RX_BUFF
;
1956 rp
->tx_max_pending
= rp
->tx_pending
= NUM_TX_BUFF
;
1959 static void emac_ethtool_get_pauseparam(struct net_device
*ndev
,
1960 struct ethtool_pauseparam
*pp
)
1962 struct emac_instance
*dev
= netdev_priv(ndev
);
1964 mutex_lock(&dev
->link_lock
);
1965 if ((dev
->phy
.features
& SUPPORTED_Autoneg
) &&
1966 (dev
->phy
.advertising
& (ADVERTISED_Pause
| ADVERTISED_Asym_Pause
)))
1969 if (dev
->phy
.duplex
== DUPLEX_FULL
) {
1971 pp
->rx_pause
= pp
->tx_pause
= 1;
1972 else if (dev
->phy
.asym_pause
)
1975 mutex_unlock(&dev
->link_lock
);
1978 static u32
emac_ethtool_get_rx_csum(struct net_device
*ndev
)
1980 struct emac_instance
*dev
= netdev_priv(ndev
);
1982 return dev
->tah_dev
!= NULL
;
1985 static int emac_get_regs_len(struct emac_instance
*dev
)
1987 if (emac_has_feature(dev
, EMAC_FTR_EMAC4
))
1988 return sizeof(struct emac_ethtool_regs_subhdr
) +
1989 EMAC4_ETHTOOL_REGS_SIZE
;
1991 return sizeof(struct emac_ethtool_regs_subhdr
) +
1992 EMAC_ETHTOOL_REGS_SIZE
;
1995 static int emac_ethtool_get_regs_len(struct net_device
*ndev
)
1997 struct emac_instance
*dev
= netdev_priv(ndev
);
2000 size
= sizeof(struct emac_ethtool_regs_hdr
) +
2001 emac_get_regs_len(dev
) + mal_get_regs_len(dev
->mal
);
2002 if (emac_has_feature(dev
, EMAC_FTR_HAS_ZMII
))
2003 size
+= zmii_get_regs_len(dev
->zmii_dev
);
2004 if (emac_has_feature(dev
, EMAC_FTR_HAS_RGMII
))
2005 size
+= rgmii_get_regs_len(dev
->rgmii_dev
);
2006 if (emac_has_feature(dev
, EMAC_FTR_HAS_TAH
))
2007 size
+= tah_get_regs_len(dev
->tah_dev
);
2012 static void *emac_dump_regs(struct emac_instance
*dev
, void *buf
)
2014 struct emac_ethtool_regs_subhdr
*hdr
= buf
;
2016 hdr
->index
= dev
->cell_index
;
2017 if (emac_has_feature(dev
, EMAC_FTR_EMAC4
)) {
2018 hdr
->version
= EMAC4_ETHTOOL_REGS_VER
;
2019 memcpy_fromio(hdr
+ 1, dev
->emacp
, EMAC4_ETHTOOL_REGS_SIZE
);
2020 return ((void *)(hdr
+ 1) + EMAC4_ETHTOOL_REGS_SIZE
);
2022 hdr
->version
= EMAC_ETHTOOL_REGS_VER
;
2023 memcpy_fromio(hdr
+ 1, dev
->emacp
, EMAC_ETHTOOL_REGS_SIZE
);
2024 return ((void *)(hdr
+ 1) + EMAC_ETHTOOL_REGS_SIZE
);
2028 static void emac_ethtool_get_regs(struct net_device
*ndev
,
2029 struct ethtool_regs
*regs
, void *buf
)
2031 struct emac_instance
*dev
= netdev_priv(ndev
);
2032 struct emac_ethtool_regs_hdr
*hdr
= buf
;
2034 hdr
->components
= 0;
2037 buf
= mal_dump_regs(dev
->mal
, buf
);
2038 buf
= emac_dump_regs(dev
, buf
);
2039 if (emac_has_feature(dev
, EMAC_FTR_HAS_ZMII
)) {
2040 hdr
->components
|= EMAC_ETHTOOL_REGS_ZMII
;
2041 buf
= zmii_dump_regs(dev
->zmii_dev
, buf
);
2043 if (emac_has_feature(dev
, EMAC_FTR_HAS_RGMII
)) {
2044 hdr
->components
|= EMAC_ETHTOOL_REGS_RGMII
;
2045 buf
= rgmii_dump_regs(dev
->rgmii_dev
, buf
);
2047 if (emac_has_feature(dev
, EMAC_FTR_HAS_TAH
)) {
2048 hdr
->components
|= EMAC_ETHTOOL_REGS_TAH
;
2049 buf
= tah_dump_regs(dev
->tah_dev
, buf
);
2053 static int emac_ethtool_nway_reset(struct net_device
*ndev
)
2055 struct emac_instance
*dev
= netdev_priv(ndev
);
2058 DBG(dev
, "nway_reset" NL
);
2060 if (dev
->phy
.address
< 0)
2063 mutex_lock(&dev
->link_lock
);
2064 if (!dev
->phy
.autoneg
) {
2069 dev
->phy
.def
->ops
->setup_aneg(&dev
->phy
, dev
->phy
.advertising
);
2071 mutex_unlock(&dev
->link_lock
);
2072 emac_force_link_update(dev
);
2076 static int emac_ethtool_get_stats_count(struct net_device
*ndev
)
2078 return EMAC_ETHTOOL_STATS_COUNT
;
2081 static void emac_ethtool_get_strings(struct net_device
*ndev
, u32 stringset
,
2084 if (stringset
== ETH_SS_STATS
)
2085 memcpy(buf
, &emac_stats_keys
, sizeof(emac_stats_keys
));
2088 static void emac_ethtool_get_ethtool_stats(struct net_device
*ndev
,
2089 struct ethtool_stats
*estats
,
2092 struct emac_instance
*dev
= netdev_priv(ndev
);
2094 memcpy(tmp_stats
, &dev
->stats
, sizeof(dev
->stats
));
2095 tmp_stats
+= sizeof(dev
->stats
) / sizeof(u64
);
2096 memcpy(tmp_stats
, &dev
->estats
, sizeof(dev
->estats
));
2099 static void emac_ethtool_get_drvinfo(struct net_device
*ndev
,
2100 struct ethtool_drvinfo
*info
)
2102 struct emac_instance
*dev
= netdev_priv(ndev
);
2104 strcpy(info
->driver
, "ibm_emac");
2105 strcpy(info
->version
, DRV_VERSION
);
2106 info
->fw_version
[0] = '\0';
2107 sprintf(info
->bus_info
, "PPC 4xx EMAC-%d %s",
2108 dev
->cell_index
, dev
->ofdev
->node
->full_name
);
2109 info
->n_stats
= emac_ethtool_get_stats_count(ndev
);
2110 info
->regdump_len
= emac_ethtool_get_regs_len(ndev
);
2113 static const struct ethtool_ops emac_ethtool_ops
= {
2114 .get_settings
= emac_ethtool_get_settings
,
2115 .set_settings
= emac_ethtool_set_settings
,
2116 .get_drvinfo
= emac_ethtool_get_drvinfo
,
2118 .get_regs_len
= emac_ethtool_get_regs_len
,
2119 .get_regs
= emac_ethtool_get_regs
,
2121 .nway_reset
= emac_ethtool_nway_reset
,
2123 .get_ringparam
= emac_ethtool_get_ringparam
,
2124 .get_pauseparam
= emac_ethtool_get_pauseparam
,
2126 .get_rx_csum
= emac_ethtool_get_rx_csum
,
2128 .get_strings
= emac_ethtool_get_strings
,
2129 .get_stats_count
= emac_ethtool_get_stats_count
,
2130 .get_ethtool_stats
= emac_ethtool_get_ethtool_stats
,
2132 .get_link
= ethtool_op_get_link
,
2133 .get_tx_csum
= ethtool_op_get_tx_csum
,
2134 .get_sg
= ethtool_op_get_sg
,
2137 static int emac_ioctl(struct net_device
*ndev
, struct ifreq
*rq
, int cmd
)
2139 struct emac_instance
*dev
= netdev_priv(ndev
);
2140 uint16_t *data
= (uint16_t *) & rq
->ifr_ifru
;
2142 DBG(dev
, "ioctl %08x" NL
, cmd
);
2144 if (dev
->phy
.address
< 0)
2149 case SIOCDEVPRIVATE
:
2150 data
[0] = dev
->phy
.address
;
2153 case SIOCDEVPRIVATE
+ 1:
2154 data
[3] = emac_mdio_read(ndev
, dev
->phy
.address
, data
[1]);
2158 case SIOCDEVPRIVATE
+ 2:
2159 if (!capable(CAP_NET_ADMIN
))
2161 emac_mdio_write(ndev
, dev
->phy
.address
, data
[1], data
[2]);
2168 struct emac_depentry
{
2170 struct device_node
*node
;
2171 struct of_device
*ofdev
;
2175 #define EMAC_DEP_MAL_IDX 0
2176 #define EMAC_DEP_ZMII_IDX 1
2177 #define EMAC_DEP_RGMII_IDX 2
2178 #define EMAC_DEP_TAH_IDX 3
2179 #define EMAC_DEP_MDIO_IDX 4
2180 #define EMAC_DEP_PREV_IDX 5
2181 #define EMAC_DEP_COUNT 6
2183 static int __devinit
emac_check_deps(struct emac_instance
*dev
,
2184 struct emac_depentry
*deps
)
2187 struct device_node
*np
;
2189 for (i
= 0; i
< EMAC_DEP_COUNT
; i
++) {
2190 /* no dependency on that item, allright */
2191 if (deps
[i
].phandle
== 0) {
2195 /* special case for blist as the dependency might go away */
2196 if (i
== EMAC_DEP_PREV_IDX
) {
2197 np
= *(dev
->blist
- 1);
2199 deps
[i
].phandle
= 0;
2203 if (deps
[i
].node
== NULL
)
2204 deps
[i
].node
= of_node_get(np
);
2206 if (deps
[i
].node
== NULL
)
2207 deps
[i
].node
= of_find_node_by_phandle(deps
[i
].phandle
);
2208 if (deps
[i
].node
== NULL
)
2210 if (deps
[i
].ofdev
== NULL
)
2211 deps
[i
].ofdev
= of_find_device_by_node(deps
[i
].node
);
2212 if (deps
[i
].ofdev
== NULL
)
2214 if (deps
[i
].drvdata
== NULL
)
2215 deps
[i
].drvdata
= dev_get_drvdata(&deps
[i
].ofdev
->dev
);
2216 if (deps
[i
].drvdata
!= NULL
)
2219 return (there
== EMAC_DEP_COUNT
);
2222 static void emac_put_deps(struct emac_instance
*dev
)
2225 of_dev_put(dev
->mal_dev
);
2227 of_dev_put(dev
->zmii_dev
);
2229 of_dev_put(dev
->rgmii_dev
);
2231 of_dev_put(dev
->mdio_dev
);
2233 of_dev_put(dev
->tah_dev
);
2236 static int __devinit
emac_of_bus_notify(struct notifier_block
*nb
,
2237 unsigned long action
, void *data
)
2239 /* We are only intereted in device addition */
2240 if (action
== BUS_NOTIFY_BOUND_DRIVER
)
2241 wake_up_all(&emac_probe_wait
);
2245 static struct notifier_block emac_of_bus_notifier __devinitdata
= {
2246 .notifier_call
= emac_of_bus_notify
2249 static int __devinit
emac_wait_deps(struct emac_instance
*dev
)
2251 struct emac_depentry deps
[EMAC_DEP_COUNT
];
2254 memset(&deps
, 0, sizeof(deps
));
2256 deps
[EMAC_DEP_MAL_IDX
].phandle
= dev
->mal_ph
;
2257 deps
[EMAC_DEP_ZMII_IDX
].phandle
= dev
->zmii_ph
;
2258 deps
[EMAC_DEP_RGMII_IDX
].phandle
= dev
->rgmii_ph
;
2260 deps
[EMAC_DEP_TAH_IDX
].phandle
= dev
->tah_ph
;
2262 deps
[EMAC_DEP_MDIO_IDX
].phandle
= dev
->mdio_ph
;
2263 if (dev
->blist
&& dev
->blist
> emac_boot_list
)
2264 deps
[EMAC_DEP_PREV_IDX
].phandle
= 0xffffffffu
;
2265 bus_register_notifier(&of_platform_bus_type
, &emac_of_bus_notifier
);
2266 wait_event_timeout(emac_probe_wait
,
2267 emac_check_deps(dev
, deps
),
2268 EMAC_PROBE_DEP_TIMEOUT
);
2269 bus_unregister_notifier(&of_platform_bus_type
, &emac_of_bus_notifier
);
2270 err
= emac_check_deps(dev
, deps
) ? 0 : -ENODEV
;
2271 for (i
= 0; i
< EMAC_DEP_COUNT
; i
++) {
2273 of_node_put(deps
[i
].node
);
2274 if (err
&& deps
[i
].ofdev
)
2275 of_dev_put(deps
[i
].ofdev
);
2278 dev
->mal_dev
= deps
[EMAC_DEP_MAL_IDX
].ofdev
;
2279 dev
->zmii_dev
= deps
[EMAC_DEP_ZMII_IDX
].ofdev
;
2280 dev
->rgmii_dev
= deps
[EMAC_DEP_RGMII_IDX
].ofdev
;
2281 dev
->tah_dev
= deps
[EMAC_DEP_TAH_IDX
].ofdev
;
2282 dev
->mdio_dev
= deps
[EMAC_DEP_MDIO_IDX
].ofdev
;
2284 if (deps
[EMAC_DEP_PREV_IDX
].ofdev
)
2285 of_dev_put(deps
[EMAC_DEP_PREV_IDX
].ofdev
);
2289 static int __devinit
emac_read_uint_prop(struct device_node
*np
, const char *name
,
2290 u32
*val
, int fatal
)
2293 const u32
*prop
= of_get_property(np
, name
, &len
);
2294 if (prop
== NULL
|| len
< sizeof(u32
)) {
2296 printk(KERN_ERR
"%s: missing %s property\n",
2297 np
->full_name
, name
);
2304 static int __devinit
emac_init_phy(struct emac_instance
*dev
)
2306 struct device_node
*np
= dev
->ofdev
->node
;
2307 struct net_device
*ndev
= dev
->ndev
;
2311 dev
->phy
.dev
= ndev
;
2312 dev
->phy
.mode
= dev
->phy_mode
;
2314 /* PHY-less configuration.
2315 * XXX I probably should move these settings to the dev tree
2317 if (dev
->phy_address
== 0xffffffff && dev
->phy_map
== 0xffffffff) {
2320 /* PHY-less configuration.
2321 * XXX I probably should move these settings to the dev tree
2323 dev
->phy
.address
= -1;
2324 dev
->phy
.features
= SUPPORTED_100baseT_Full
| SUPPORTED_MII
;
2330 mutex_lock(&emac_phy_map_lock
);
2331 phy_map
= dev
->phy_map
| busy_phy_map
;
2333 DBG(dev
, "PHY maps %08x %08x" NL
, dev
->phy_map
, busy_phy_map
);
2335 dev
->phy
.mdio_read
= emac_mdio_read
;
2336 dev
->phy
.mdio_write
= emac_mdio_write
;
2338 /* Enable internal clock source */
2339 #ifdef CONFIG_PPC_DCR_NATIVE
2340 if (emac_has_feature(dev
, EMAC_FTR_440GX_PHY_CLK_FIX
))
2341 dcri_clrset(SDR0
, SDR0_MFR
, 0, SDR0_MFR_ECS
);
2343 /* Configure EMAC with defaults so we can at least use MDIO
2344 * This is needed mostly for 440GX
2346 if (emac_phy_gpcs(dev
->phy
.mode
)) {
2348 * Make GPCS PHY address equal to EMAC index.
2349 * We probably should take into account busy_phy_map
2350 * and/or phy_map here.
2352 * Note that the busy_phy_map is currently global
2353 * while it should probably be per-ASIC...
2355 dev
->phy
.address
= dev
->cell_index
;
2358 emac_configure(dev
);
2360 if (dev
->phy_address
!= 0xffffffff)
2361 phy_map
= ~(1 << dev
->phy_address
);
2363 for (i
= 0; i
< 0x20; phy_map
>>= 1, ++i
)
2364 if (!(phy_map
& 1)) {
2366 busy_phy_map
|= 1 << i
;
2368 /* Quick check if there is a PHY at the address */
2369 r
= emac_mdio_read(dev
->ndev
, i
, MII_BMCR
);
2370 if (r
== 0xffff || r
< 0)
2372 if (!emac_mii_phy_probe(&dev
->phy
, i
))
2376 /* Enable external clock source */
2377 #ifdef CONFIG_PPC_DCR_NATIVE
2378 if (emac_has_feature(dev
, EMAC_FTR_440GX_PHY_CLK_FIX
))
2379 dcri_clrset(SDR0
, SDR0_MFR
, SDR0_MFR_ECS
, 0);
2381 mutex_unlock(&emac_phy_map_lock
);
2383 printk(KERN_WARNING
"%s: can't find PHY!\n", np
->full_name
);
2388 if (dev
->phy
.def
->ops
->init
)
2389 dev
->phy
.def
->ops
->init(&dev
->phy
);
2391 /* Disable any PHY features not supported by the platform */
2392 dev
->phy
.def
->features
&= ~dev
->phy_feat_exc
;
2394 /* Setup initial link parameters */
2395 if (dev
->phy
.features
& SUPPORTED_Autoneg
) {
2396 adv
= dev
->phy
.features
;
2397 if (!emac_has_feature(dev
, EMAC_FTR_NO_FLOW_CONTROL_40x
))
2398 adv
|= ADVERTISED_Pause
| ADVERTISED_Asym_Pause
;
2399 /* Restart autonegotiation */
2400 dev
->phy
.def
->ops
->setup_aneg(&dev
->phy
, adv
);
2402 u32 f
= dev
->phy
.def
->features
;
2403 int speed
= SPEED_10
, fd
= DUPLEX_HALF
;
2405 /* Select highest supported speed/duplex */
2406 if (f
& SUPPORTED_1000baseT_Full
) {
2409 } else if (f
& SUPPORTED_1000baseT_Half
)
2411 else if (f
& SUPPORTED_100baseT_Full
) {
2414 } else if (f
& SUPPORTED_100baseT_Half
)
2416 else if (f
& SUPPORTED_10baseT_Full
)
2419 /* Force link parameters */
2420 dev
->phy
.def
->ops
->setup_forced(&dev
->phy
, speed
, fd
);
2425 static int __devinit
emac_init_config(struct emac_instance
*dev
)
2427 struct device_node
*np
= dev
->ofdev
->node
;
2430 const char *pm
, *phy_modes
[] = {
2432 [PHY_MODE_MII
] = "mii",
2433 [PHY_MODE_RMII
] = "rmii",
2434 [PHY_MODE_SMII
] = "smii",
2435 [PHY_MODE_RGMII
] = "rgmii",
2436 [PHY_MODE_TBI
] = "tbi",
2437 [PHY_MODE_GMII
] = "gmii",
2438 [PHY_MODE_RTBI
] = "rtbi",
2439 [PHY_MODE_SGMII
] = "sgmii",
2442 /* Read config from device-tree */
2443 if (emac_read_uint_prop(np
, "mal-device", &dev
->mal_ph
, 1))
2445 if (emac_read_uint_prop(np
, "mal-tx-channel", &dev
->mal_tx_chan
, 1))
2447 if (emac_read_uint_prop(np
, "mal-rx-channel", &dev
->mal_rx_chan
, 1))
2449 if (emac_read_uint_prop(np
, "cell-index", &dev
->cell_index
, 1))
2451 if (emac_read_uint_prop(np
, "max-frame-size", &dev
->max_mtu
, 0))
2452 dev
->max_mtu
= 1500;
2453 if (emac_read_uint_prop(np
, "rx-fifo-size", &dev
->rx_fifo_size
, 0))
2454 dev
->rx_fifo_size
= 2048;
2455 if (emac_read_uint_prop(np
, "tx-fifo-size", &dev
->tx_fifo_size
, 0))
2456 dev
->tx_fifo_size
= 2048;
2457 if (emac_read_uint_prop(np
, "rx-fifo-size-gige", &dev
->rx_fifo_size_gige
, 0))
2458 dev
->rx_fifo_size_gige
= dev
->rx_fifo_size
;
2459 if (emac_read_uint_prop(np
, "tx-fifo-size-gige", &dev
->tx_fifo_size_gige
, 0))
2460 dev
->tx_fifo_size_gige
= dev
->tx_fifo_size
;
2461 if (emac_read_uint_prop(np
, "phy-address", &dev
->phy_address
, 0))
2462 dev
->phy_address
= 0xffffffff;
2463 if (emac_read_uint_prop(np
, "phy-map", &dev
->phy_map
, 0))
2464 dev
->phy_map
= 0xffffffff;
2465 if (emac_read_uint_prop(np
->parent
, "clock-frequency", &dev
->opb_bus_freq
, 1))
2467 if (emac_read_uint_prop(np
, "tah-device", &dev
->tah_ph
, 0))
2469 if (emac_read_uint_prop(np
, "tah-channel", &dev
->tah_port
, 0))
2471 if (emac_read_uint_prop(np
, "mdio-device", &dev
->mdio_ph
, 0))
2473 if (emac_read_uint_prop(np
, "zmii-device", &dev
->zmii_ph
, 0))
2475 if (emac_read_uint_prop(np
, "zmii-channel", &dev
->zmii_port
, 0))
2476 dev
->zmii_port
= 0xffffffff;;
2477 if (emac_read_uint_prop(np
, "rgmii-device", &dev
->rgmii_ph
, 0))
2479 if (emac_read_uint_prop(np
, "rgmii-channel", &dev
->rgmii_port
, 0))
2480 dev
->rgmii_port
= 0xffffffff;;
2481 if (emac_read_uint_prop(np
, "fifo-entry-size", &dev
->fifo_entry_size
, 0))
2482 dev
->fifo_entry_size
= 16;
2483 if (emac_read_uint_prop(np
, "mal-burst-size", &dev
->mal_burst_size
, 0))
2484 dev
->mal_burst_size
= 256;
2486 /* PHY mode needs some decoding */
2487 dev
->phy_mode
= PHY_MODE_NA
;
2488 pm
= of_get_property(np
, "phy-mode", &plen
);
2491 for (i
= 0; i
< ARRAY_SIZE(phy_modes
); i
++)
2492 if (!strcasecmp(pm
, phy_modes
[i
])) {
2498 /* Backward compat with non-final DT */
2499 if (dev
->phy_mode
== PHY_MODE_NA
&& pm
!= NULL
&& plen
== 4) {
2500 u32 nmode
= *(const u32
*)pm
;
2501 if (nmode
> PHY_MODE_NA
&& nmode
<= PHY_MODE_SGMII
)
2502 dev
->phy_mode
= nmode
;
2505 /* Check EMAC version */
2506 if (of_device_is_compatible(np
, "ibm,emac4")) {
2507 dev
->features
|= EMAC_FTR_EMAC4
;
2508 if (of_device_is_compatible(np
, "ibm,emac-440gx"))
2509 dev
->features
|= EMAC_FTR_440GX_PHY_CLK_FIX
;
2512 /* Fixup some feature bits based on the device tree */
2513 if (of_get_property(np
, "has-inverted-stacr-oc", NULL
))
2514 dev
->features
|= EMAC_FTR_STACR_OC_INVERT
;
2515 if (of_get_property(np
, "has-new-stacr-staopc", NULL
))
2516 dev
->features
|= EMAC_FTR_HAS_NEW_STACR
;
2518 /* CAB lacks the appropriate properties */
2519 if (of_device_is_compatible(np
, "ibm,emac-axon"))
2520 dev
->features
|= EMAC_FTR_HAS_NEW_STACR
|
2521 EMAC_FTR_STACR_OC_INVERT
;
2523 /* Enable TAH/ZMII/RGMII features as found */
2524 if (dev
->tah_ph
!= 0) {
2525 #ifdef CONFIG_IBM_NEW_EMAC_TAH
2526 dev
->features
|= EMAC_FTR_HAS_TAH
;
2528 printk(KERN_ERR
"%s: TAH support not enabled !\n",
2534 if (dev
->zmii_ph
!= 0) {
2535 #ifdef CONFIG_IBM_NEW_EMAC_ZMII
2536 dev
->features
|= EMAC_FTR_HAS_ZMII
;
2538 printk(KERN_ERR
"%s: ZMII support not enabled !\n",
2544 if (dev
->rgmii_ph
!= 0) {
2545 #ifdef CONFIG_IBM_NEW_EMAC_RGMII
2546 dev
->features
|= EMAC_FTR_HAS_RGMII
;
2548 printk(KERN_ERR
"%s: RGMII support not enabled !\n",
2554 /* Read MAC-address */
2555 p
= of_get_property(np
, "local-mac-address", NULL
);
2557 printk(KERN_ERR
"%s: Can't find local-mac-address property\n",
2561 memcpy(dev
->ndev
->dev_addr
, p
, 6);
2563 DBG(dev
, "features : 0x%08x / 0x%08x\n", dev
->features
, EMAC_FTRS_POSSIBLE
);
2564 DBG(dev
, "tx_fifo_size : %d (%d gige)\n", dev
->tx_fifo_size
, dev
->tx_fifo_size_gige
);
2565 DBG(dev
, "rx_fifo_size : %d (%d gige)\n", dev
->rx_fifo_size
, dev
->rx_fifo_size_gige
);
2566 DBG(dev
, "max_mtu : %d\n", dev
->max_mtu
);
2567 DBG(dev
, "OPB freq : %d\n", dev
->opb_bus_freq
);
2572 static int __devinit
emac_probe(struct of_device
*ofdev
,
2573 const struct of_device_id
*match
)
2575 struct net_device
*ndev
;
2576 struct emac_instance
*dev
;
2577 struct device_node
*np
= ofdev
->node
;
2578 struct device_node
**blist
= NULL
;
2581 /* Skip unused/unwired EMACS. We leave the check for an unused
2582 * property here for now, but new flat device trees should set a
2583 * status property to "disabled" instead.
2585 if (of_get_property(np
, "unused", NULL
) || !of_device_is_available(np
))
2588 /* Find ourselves in the bootlist if we are there */
2589 for (i
= 0; i
< EMAC_BOOT_LIST_SIZE
; i
++)
2590 if (emac_boot_list
[i
] == np
)
2591 blist
= &emac_boot_list
[i
];
2593 /* Allocate our net_device structure */
2595 ndev
= alloc_etherdev(sizeof(struct emac_instance
));
2597 printk(KERN_ERR
"%s: could not allocate ethernet device!\n",
2601 dev
= netdev_priv(ndev
);
2605 SET_NETDEV_DEV(ndev
, &ofdev
->dev
);
2607 /* Initialize some embedded data structures */
2608 mutex_init(&dev
->mdio_lock
);
2609 mutex_init(&dev
->link_lock
);
2610 spin_lock_init(&dev
->lock
);
2611 INIT_WORK(&dev
->reset_work
, emac_reset_work
);
2613 /* Init various config data based on device-tree */
2614 err
= emac_init_config(dev
);
2618 /* Get interrupts. EMAC irq is mandatory, WOL irq is optional */
2619 dev
->emac_irq
= irq_of_parse_and_map(np
, 0);
2620 dev
->wol_irq
= irq_of_parse_and_map(np
, 1);
2621 if (dev
->emac_irq
== NO_IRQ
) {
2622 printk(KERN_ERR
"%s: Can't map main interrupt\n", np
->full_name
);
2625 ndev
->irq
= dev
->emac_irq
;
2628 if (of_address_to_resource(np
, 0, &dev
->rsrc_regs
)) {
2629 printk(KERN_ERR
"%s: Can't get registers address\n",
2633 // TODO : request_mem_region
2634 dev
->emacp
= ioremap(dev
->rsrc_regs
.start
, sizeof(struct emac_regs
));
2635 if (dev
->emacp
== NULL
) {
2636 printk(KERN_ERR
"%s: Can't map device registers!\n",
2642 /* Wait for dependent devices */
2643 err
= emac_wait_deps(dev
);
2646 "%s: Timeout waiting for dependent devices\n",
2648 /* display more info about what's missing ? */
2651 dev
->mal
= dev_get_drvdata(&dev
->mal_dev
->dev
);
2652 if (dev
->mdio_dev
!= NULL
)
2653 dev
->mdio_instance
= dev_get_drvdata(&dev
->mdio_dev
->dev
);
2655 /* Register with MAL */
2656 dev
->commac
.ops
= &emac_commac_ops
;
2657 dev
->commac
.dev
= dev
;
2658 dev
->commac
.tx_chan_mask
= MAL_CHAN_MASK(dev
->mal_tx_chan
);
2659 dev
->commac
.rx_chan_mask
= MAL_CHAN_MASK(dev
->mal_rx_chan
);
2660 err
= mal_register_commac(dev
->mal
, &dev
->commac
);
2662 printk(KERN_ERR
"%s: failed to register with mal %s!\n",
2663 np
->full_name
, dev
->mal_dev
->node
->full_name
);
2666 dev
->rx_skb_size
= emac_rx_skb_size(ndev
->mtu
);
2667 dev
->rx_sync_size
= emac_rx_sync_size(ndev
->mtu
);
2669 /* Get pointers to BD rings */
2671 dev
->mal
->bd_virt
+ mal_tx_bd_offset(dev
->mal
, dev
->mal_tx_chan
);
2673 dev
->mal
->bd_virt
+ mal_rx_bd_offset(dev
->mal
, dev
->mal_rx_chan
);
2675 DBG(dev
, "tx_desc %p" NL
, dev
->tx_desc
);
2676 DBG(dev
, "rx_desc %p" NL
, dev
->rx_desc
);
2679 memset(dev
->tx_desc
, 0, NUM_TX_BUFF
* sizeof(struct mal_descriptor
));
2680 memset(dev
->rx_desc
, 0, NUM_RX_BUFF
* sizeof(struct mal_descriptor
));
2682 /* Attach to ZMII, if needed */
2683 if (emac_has_feature(dev
, EMAC_FTR_HAS_ZMII
) &&
2684 (err
= zmii_attach(dev
->zmii_dev
, dev
->zmii_port
, &dev
->phy_mode
)) != 0)
2685 goto err_unreg_commac
;
2687 /* Attach to RGMII, if needed */
2688 if (emac_has_feature(dev
, EMAC_FTR_HAS_RGMII
) &&
2689 (err
= rgmii_attach(dev
->rgmii_dev
, dev
->rgmii_port
, dev
->phy_mode
)) != 0)
2690 goto err_detach_zmii
;
2692 /* Attach to TAH, if needed */
2693 if (emac_has_feature(dev
, EMAC_FTR_HAS_TAH
) &&
2694 (err
= tah_attach(dev
->tah_dev
, dev
->tah_port
)) != 0)
2695 goto err_detach_rgmii
;
2697 /* Set some link defaults before we can find out real parameters */
2698 dev
->phy
.speed
= SPEED_100
;
2699 dev
->phy
.duplex
= DUPLEX_FULL
;
2700 dev
->phy
.autoneg
= AUTONEG_DISABLE
;
2701 dev
->phy
.pause
= dev
->phy
.asym_pause
= 0;
2702 dev
->stop_timeout
= STOP_TIMEOUT_100
;
2703 INIT_DELAYED_WORK(&dev
->link_work
, emac_link_timer
);
2705 /* Find PHY if any */
2706 err
= emac_init_phy(dev
);
2708 goto err_detach_tah
;
2710 /* Fill in the driver function table */
2711 ndev
->open
= &emac_open
;
2713 ndev
->features
|= NETIF_F_IP_CSUM
| NETIF_F_SG
;
2714 ndev
->tx_timeout
= &emac_tx_timeout
;
2715 ndev
->watchdog_timeo
= 5 * HZ
;
2716 ndev
->stop
= &emac_close
;
2717 ndev
->get_stats
= &emac_stats
;
2718 ndev
->set_multicast_list
= &emac_set_multicast_list
;
2719 ndev
->do_ioctl
= &emac_ioctl
;
2720 if (emac_phy_supports_gige(dev
->phy_mode
)) {
2721 ndev
->hard_start_xmit
= &emac_start_xmit_sg
;
2722 ndev
->change_mtu
= &emac_change_mtu
;
2723 dev
->commac
.ops
= &emac_commac_sg_ops
;
2725 ndev
->hard_start_xmit
= &emac_start_xmit
;
2727 SET_ETHTOOL_OPS(ndev
, &emac_ethtool_ops
);
2729 netif_carrier_off(ndev
);
2730 netif_stop_queue(ndev
);
2732 err
= register_netdev(ndev
);
2734 printk(KERN_ERR
"%s: failed to register net device (%d)!\n",
2735 np
->full_name
, err
);
2736 goto err_detach_tah
;
2739 /* Set our drvdata last as we don't want them visible until we are
2743 dev_set_drvdata(&ofdev
->dev
, dev
);
2745 /* There's a new kid in town ! Let's tell everybody */
2746 wake_up_all(&emac_probe_wait
);
2750 "%s: EMAC-%d %s, MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
2751 ndev
->name
, dev
->cell_index
, np
->full_name
,
2752 ndev
->dev_addr
[0], ndev
->dev_addr
[1], ndev
->dev_addr
[2],
2753 ndev
->dev_addr
[3], ndev
->dev_addr
[4], ndev
->dev_addr
[5]);
2755 if (dev
->phy
.address
>= 0)
2756 printk("%s: found %s PHY (0x%02x)\n", ndev
->name
,
2757 dev
->phy
.def
->name
, dev
->phy
.address
);
2759 emac_dbg_register(dev
);
2764 /* I have a bad feeling about this ... */
2767 if (emac_has_feature(dev
, EMAC_FTR_HAS_TAH
))
2768 tah_detach(dev
->tah_dev
, dev
->tah_port
);
2770 if (emac_has_feature(dev
, EMAC_FTR_HAS_RGMII
))
2771 rgmii_detach(dev
->rgmii_dev
, dev
->rgmii_port
);
2773 if (emac_has_feature(dev
, EMAC_FTR_HAS_ZMII
))
2774 zmii_detach(dev
->zmii_dev
, dev
->zmii_port
);
2776 mal_unregister_commac(dev
->mal
, &dev
->commac
);
2780 iounmap(dev
->emacp
);
2782 if (dev
->wol_irq
!= NO_IRQ
)
2783 irq_dispose_mapping(dev
->wol_irq
);
2784 if (dev
->emac_irq
!= NO_IRQ
)
2785 irq_dispose_mapping(dev
->emac_irq
);
2789 /* if we were on the bootlist, remove us as we won't show up and
2790 * wake up all waiters to notify them in case they were waiting
2795 wake_up_all(&emac_probe_wait
);
2800 static int __devexit
emac_remove(struct of_device
*ofdev
)
2802 struct emac_instance
*dev
= dev_get_drvdata(&ofdev
->dev
);
2804 DBG(dev
, "remove" NL
);
2806 dev_set_drvdata(&ofdev
->dev
, NULL
);
2808 unregister_netdev(dev
->ndev
);
2810 flush_scheduled_work();
2812 if (emac_has_feature(dev
, EMAC_FTR_HAS_TAH
))
2813 tah_detach(dev
->tah_dev
, dev
->tah_port
);
2814 if (emac_has_feature(dev
, EMAC_FTR_HAS_RGMII
))
2815 rgmii_detach(dev
->rgmii_dev
, dev
->rgmii_port
);
2816 if (emac_has_feature(dev
, EMAC_FTR_HAS_ZMII
))
2817 zmii_detach(dev
->zmii_dev
, dev
->zmii_port
);
2819 mal_unregister_commac(dev
->mal
, &dev
->commac
);
2822 emac_dbg_unregister(dev
);
2823 iounmap(dev
->emacp
);
2825 if (dev
->wol_irq
!= NO_IRQ
)
2826 irq_dispose_mapping(dev
->wol_irq
);
2827 if (dev
->emac_irq
!= NO_IRQ
)
2828 irq_dispose_mapping(dev
->emac_irq
);
2835 /* XXX Features in here should be replaced by properties... */
2836 static struct of_device_id emac_match
[] =
2840 .compatible
= "ibm,emac",
2844 .compatible
= "ibm,emac4",
2849 static struct of_platform_driver emac_driver
= {
2851 .match_table
= emac_match
,
2853 .probe
= emac_probe
,
2854 .remove
= emac_remove
,
2857 static void __init
emac_make_bootlist(void)
2859 struct device_node
*np
= NULL
;
2860 int j
, max
, i
= 0, k
;
2861 int cell_indices
[EMAC_BOOT_LIST_SIZE
];
2864 while((np
= of_find_all_nodes(np
)) != NULL
) {
2867 if (of_match_node(emac_match
, np
) == NULL
)
2869 if (of_get_property(np
, "unused", NULL
))
2871 idx
= of_get_property(np
, "cell-index", NULL
);
2874 cell_indices
[i
] = *idx
;
2875 emac_boot_list
[i
++] = of_node_get(np
);
2876 if (i
>= EMAC_BOOT_LIST_SIZE
) {
2883 /* Bubble sort them (doh, what a creative algorithm :-) */
2884 for (i
= 0; max
> 1 && (i
< (max
- 1)); i
++)
2885 for (j
= i
; j
< max
; j
++) {
2886 if (cell_indices
[i
] > cell_indices
[j
]) {
2887 np
= emac_boot_list
[i
];
2888 emac_boot_list
[i
] = emac_boot_list
[j
];
2889 emac_boot_list
[j
] = np
;
2890 k
= cell_indices
[i
];
2891 cell_indices
[i
] = cell_indices
[j
];
2892 cell_indices
[j
] = k
;
2897 static int __init
emac_init(void)
2901 printk(KERN_INFO DRV_DESC
", version " DRV_VERSION
"\n");
2903 /* Init debug stuff */
2906 /* Build EMAC boot list */
2907 emac_make_bootlist();
2909 /* Init submodules */
2922 rc
= of_register_platform_driver(&emac_driver
);
2940 static void __exit
emac_exit(void)
2944 of_unregister_platform_driver(&emac_driver
);
2952 /* Destroy EMAC boot list */
2953 for (i
= 0; i
< EMAC_BOOT_LIST_SIZE
; i
++)
2954 if (emac_boot_list
[i
])
2955 of_node_put(emac_boot_list
[i
]);
2958 module_init(emac_init
);
2959 module_exit(emac_exit
);