2 * drivers/net/ibm_newemac/core.c
4 * Driver for PowerPC 4xx on-chip ethernet controller.
6 * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
7 * <benh@kernel.crashing.org>
9 * Based on the arch/ppc version of the driver:
11 * Copyright (c) 2004, 2005 Zultys Technologies.
12 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
14 * Based on original work by
15 * Matt Porter <mporter@kernel.crashing.org>
16 * (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
17 * Armin Kuster <akuster@mvista.com>
18 * Johnnie Peters <jpeters@mvista.com>
20 * This program is free software; you can redistribute it and/or modify it
21 * under the terms of the GNU General Public License as published by the
22 * Free Software Foundation; either version 2 of the License, or (at your
23 * option) any later version.
27 #include <linux/sched.h>
28 #include <linux/string.h>
29 #include <linux/errno.h>
30 #include <linux/delay.h>
31 #include <linux/types.h>
32 #include <linux/pci.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/crc32.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #include <linux/bitops.h>
39 #include <linux/workqueue.h>
42 #include <asm/processor.h>
45 #include <asm/uaccess.h>
50 * Lack of dma_unmap_???? calls is intentional.
52 * API-correct usage requires additional support state information to be
53 * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
54 * EMAC design (e.g. TX buffer passed from network stack can be split into
55 * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
56 * maintaining such information will add additional overhead.
57 * Current DMA API implementation for 4xx processors only ensures cache coherency
58 * and dma_unmap_???? routines are empty and are likely to stay this way.
59 * I decided to omit dma_unmap_??? calls because I don't want to add additional
60 * complexity just for the sake of following some abstract API, when it doesn't
61 * add any real benefit to the driver. I understand that this decision maybe
62 * controversial, but I really tried to make code API-correct and efficient
63 * at the same time and didn't come up with code I liked :(. --ebs
66 #define DRV_NAME "emac"
67 #define DRV_VERSION "3.54"
68 #define DRV_DESC "PPC 4xx OCP EMAC driver"
70 MODULE_DESCRIPTION(DRV_DESC
);
72 ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
73 MODULE_LICENSE("GPL");
76 * PPC64 doesn't (yet) have a cacheable_memcpy
79 #define cacheable_memcpy(d,s,n) memcpy((d),(s),(n))
82 /* minimum number of free TX descriptors required to wake up TX process */
83 #define EMAC_TX_WAKEUP_THRESH (NUM_TX_BUFF / 4)
85 /* If packet size is less than this number, we allocate small skb and copy packet
86 * contents into it instead of just sending original big skb up
88 #define EMAC_RX_COPY_THRESH CONFIG_IBM_NEW_EMAC_RX_COPY_THRESHOLD
90 /* Since multiple EMACs share MDIO lines in various ways, we need
91 * to avoid re-using the same PHY ID in cases where the arch didn't
92 * setup precise phy_map entries
94 * XXX This is something that needs to be reworked as we can have multiple
95 * EMAC "sets" (multiple ASICs containing several EMACs) though we can
96 * probably require in that case to have explicit PHY IDs in the device-tree
98 static u32 busy_phy_map
;
99 static DEFINE_MUTEX(emac_phy_map_lock
);
101 /* This is the wait queue used to wait on any event related to probe, that
102 * is discovery of MALs, other EMACs, ZMII/RGMIIs, etc...
104 static DECLARE_WAIT_QUEUE_HEAD(emac_probe_wait
);
106 /* Having stable interface names is a doomed idea. However, it would be nice
107 * if we didn't have completely random interface names at boot too :-) It's
108 * just a matter of making everybody's life easier. Since we are doing
109 * threaded probing, it's a bit harder though. The base idea here is that
110 * we make up a list of all emacs in the device-tree before we register the
111 * driver. Every emac will then wait for the previous one in the list to
112 * initialize before itself. We should also keep that list ordered by
114 * That list is only 4 entries long, meaning that additional EMACs don't
115 * get ordering guarantees unless EMAC_BOOT_LIST_SIZE is increased.
118 #define EMAC_BOOT_LIST_SIZE 4
119 static struct device_node
*emac_boot_list
[EMAC_BOOT_LIST_SIZE
];
121 /* How long should I wait for dependent devices ? */
122 #define EMAC_PROBE_DEP_TIMEOUT (HZ * 5)
124 /* I don't want to litter system log with timeout errors
125 * when we have brain-damaged PHY.
127 static inline void emac_report_timeout_error(struct emac_instance
*dev
,
131 printk(KERN_ERR
"%s: %s\n", dev
->ndev
->name
, error
);
134 /* PHY polling intervals */
135 #define PHY_POLL_LINK_ON HZ
136 #define PHY_POLL_LINK_OFF (HZ / 5)
138 /* Graceful stop timeouts in us.
139 * We should allow up to 1 frame time (full-duplex, ignoring collisions)
141 #define STOP_TIMEOUT_10 1230
142 #define STOP_TIMEOUT_100 124
143 #define STOP_TIMEOUT_1000 13
144 #define STOP_TIMEOUT_1000_JUMBO 73
146 static unsigned char default_mcast_addr
[] = {
147 0x01, 0x80, 0xC2, 0x00, 0x00, 0x01
150 /* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
151 static const char emac_stats_keys
[EMAC_ETHTOOL_STATS_COUNT
][ETH_GSTRING_LEN
] = {
152 "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
153 "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
154 "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
155 "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
156 "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
157 "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
158 "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
159 "rx_bad_packet", "rx_runt_packet", "rx_short_event",
160 "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
161 "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
162 "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
163 "tx_bd_excessive_collisions", "tx_bd_late_collision",
164 "tx_bd_multple_collisions", "tx_bd_single_collision",
165 "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
169 static irqreturn_t
emac_irq(int irq
, void *dev_instance
);
170 static void emac_clean_tx_ring(struct emac_instance
*dev
);
171 static void __emac_set_multicast_list(struct emac_instance
*dev
);
173 static inline int emac_phy_supports_gige(int phy_mode
)
175 return phy_mode
== PHY_MODE_GMII
||
176 phy_mode
== PHY_MODE_RGMII
||
177 phy_mode
== PHY_MODE_TBI
||
178 phy_mode
== PHY_MODE_RTBI
;
181 static inline int emac_phy_gpcs(int phy_mode
)
183 return phy_mode
== PHY_MODE_TBI
||
184 phy_mode
== PHY_MODE_RTBI
;
187 static inline void emac_tx_enable(struct emac_instance
*dev
)
189 struct emac_regs __iomem
*p
= dev
->emacp
;
192 DBG(dev
, "tx_enable" NL
);
194 r
= in_be32(&p
->mr0
);
195 if (!(r
& EMAC_MR0_TXE
))
196 out_be32(&p
->mr0
, r
| EMAC_MR0_TXE
);
199 static void emac_tx_disable(struct emac_instance
*dev
)
201 struct emac_regs __iomem
*p
= dev
->emacp
;
204 DBG(dev
, "tx_disable" NL
);
206 r
= in_be32(&p
->mr0
);
207 if (r
& EMAC_MR0_TXE
) {
208 int n
= dev
->stop_timeout
;
209 out_be32(&p
->mr0
, r
& ~EMAC_MR0_TXE
);
210 while (!(in_be32(&p
->mr0
) & EMAC_MR0_TXI
) && n
) {
215 emac_report_timeout_error(dev
, "TX disable timeout");
219 static void emac_rx_enable(struct emac_instance
*dev
)
221 struct emac_regs __iomem
*p
= dev
->emacp
;
224 if (unlikely(test_bit(MAL_COMMAC_RX_STOPPED
, &dev
->commac
.flags
)))
227 DBG(dev
, "rx_enable" NL
);
229 r
= in_be32(&p
->mr0
);
230 if (!(r
& EMAC_MR0_RXE
)) {
231 if (unlikely(!(r
& EMAC_MR0_RXI
))) {
232 /* Wait if previous async disable is still in progress */
233 int n
= dev
->stop_timeout
;
234 while (!(r
= in_be32(&p
->mr0
) & EMAC_MR0_RXI
) && n
) {
239 emac_report_timeout_error(dev
,
240 "RX disable timeout");
242 out_be32(&p
->mr0
, r
| EMAC_MR0_RXE
);
248 static void emac_rx_disable(struct emac_instance
*dev
)
250 struct emac_regs __iomem
*p
= dev
->emacp
;
253 DBG(dev
, "rx_disable" NL
);
255 r
= in_be32(&p
->mr0
);
256 if (r
& EMAC_MR0_RXE
) {
257 int n
= dev
->stop_timeout
;
258 out_be32(&p
->mr0
, r
& ~EMAC_MR0_RXE
);
259 while (!(in_be32(&p
->mr0
) & EMAC_MR0_RXI
) && n
) {
264 emac_report_timeout_error(dev
, "RX disable timeout");
268 static inline void emac_netif_stop(struct emac_instance
*dev
)
270 netif_tx_lock_bh(dev
->ndev
);
272 netif_tx_unlock_bh(dev
->ndev
);
273 dev
->ndev
->trans_start
= jiffies
; /* prevent tx timeout */
274 mal_poll_disable(dev
->mal
, &dev
->commac
);
275 netif_tx_disable(dev
->ndev
);
278 static inline void emac_netif_start(struct emac_instance
*dev
)
280 netif_tx_lock_bh(dev
->ndev
);
282 if (dev
->mcast_pending
&& netif_running(dev
->ndev
))
283 __emac_set_multicast_list(dev
);
284 netif_tx_unlock_bh(dev
->ndev
);
286 netif_wake_queue(dev
->ndev
);
288 /* NOTE: unconditional netif_wake_queue is only appropriate
289 * so long as all callers are assured to have free tx slots
290 * (taken from tg3... though the case where that is wrong is
291 * not terribly harmful)
293 mal_poll_enable(dev
->mal
, &dev
->commac
);
296 static inline void emac_rx_disable_async(struct emac_instance
*dev
)
298 struct emac_regs __iomem
*p
= dev
->emacp
;
301 DBG(dev
, "rx_disable_async" NL
);
303 r
= in_be32(&p
->mr0
);
304 if (r
& EMAC_MR0_RXE
)
305 out_be32(&p
->mr0
, r
& ~EMAC_MR0_RXE
);
308 static int emac_reset(struct emac_instance
*dev
)
310 struct emac_regs __iomem
*p
= dev
->emacp
;
313 DBG(dev
, "reset" NL
);
315 if (!dev
->reset_failed
) {
316 /* 40x erratum suggests stopping RX channel before reset,
319 emac_rx_disable(dev
);
320 emac_tx_disable(dev
);
323 out_be32(&p
->mr0
, EMAC_MR0_SRST
);
324 while ((in_be32(&p
->mr0
) & EMAC_MR0_SRST
) && n
)
328 dev
->reset_failed
= 0;
331 emac_report_timeout_error(dev
, "reset timeout");
332 dev
->reset_failed
= 1;
337 static void emac_hash_mc(struct emac_instance
*dev
)
339 struct emac_regs __iomem
*p
= dev
->emacp
;
341 struct dev_mc_list
*dmi
;
343 DBG(dev
, "hash_mc %d" NL
, dev
->ndev
->mc_count
);
345 for (dmi
= dev
->ndev
->mc_list
; dmi
; dmi
= dmi
->next
) {
347 DBG2(dev
, "mc %02x:%02x:%02x:%02x:%02x:%02x" NL
,
348 dmi
->dmi_addr
[0], dmi
->dmi_addr
[1], dmi
->dmi_addr
[2],
349 dmi
->dmi_addr
[3], dmi
->dmi_addr
[4], dmi
->dmi_addr
[5]);
351 bit
= 63 - (ether_crc(ETH_ALEN
, dmi
->dmi_addr
) >> 26);
352 gaht
[bit
>> 4] |= 0x8000 >> (bit
& 0x0f);
354 out_be32(&p
->gaht1
, gaht
[0]);
355 out_be32(&p
->gaht2
, gaht
[1]);
356 out_be32(&p
->gaht3
, gaht
[2]);
357 out_be32(&p
->gaht4
, gaht
[3]);
360 static inline u32
emac_iff2rmr(struct net_device
*ndev
)
362 struct emac_instance
*dev
= netdev_priv(ndev
);
365 r
= EMAC_RMR_SP
| EMAC_RMR_SFCS
| EMAC_RMR_IAE
| EMAC_RMR_BAE
;
367 if (emac_has_feature(dev
, EMAC_FTR_EMAC4
))
372 if (ndev
->flags
& IFF_PROMISC
)
374 else if (ndev
->flags
& IFF_ALLMULTI
|| ndev
->mc_count
> 32)
376 else if (ndev
->mc_count
> 0)
382 static u32
__emac_calc_base_mr1(struct emac_instance
*dev
, int tx_size
, int rx_size
)
384 u32 ret
= EMAC_MR1_VLE
| EMAC_MR1_IST
| EMAC_MR1_TR0_MULT
;
386 DBG2(dev
, "__emac_calc_base_mr1" NL
);
390 ret
|= EMAC_MR1_TFS_2K
;
393 printk(KERN_WARNING
"%s: Unknown Rx FIFO size %d\n",
394 dev
->ndev
->name
, tx_size
);
399 ret
|= EMAC_MR1_RFS_16K
;
402 ret
|= EMAC_MR1_RFS_4K
;
405 printk(KERN_WARNING
"%s: Unknown Rx FIFO size %d\n",
406 dev
->ndev
->name
, rx_size
);
412 static u32
__emac4_calc_base_mr1(struct emac_instance
*dev
, int tx_size
, int rx_size
)
414 u32 ret
= EMAC_MR1_VLE
| EMAC_MR1_IST
| EMAC4_MR1_TR
|
415 EMAC4_MR1_OBCI(dev
->opb_bus_freq
/ 1000000);
417 DBG2(dev
, "__emac4_calc_base_mr1" NL
);
421 ret
|= EMAC4_MR1_TFS_4K
;
424 ret
|= EMAC4_MR1_TFS_2K
;
427 printk(KERN_WARNING
"%s: Unknown Rx FIFO size %d\n",
428 dev
->ndev
->name
, tx_size
);
433 ret
|= EMAC4_MR1_RFS_16K
;
436 ret
|= EMAC4_MR1_RFS_4K
;
439 ret
|= EMAC4_MR1_RFS_2K
;
442 printk(KERN_WARNING
"%s: Unknown Rx FIFO size %d\n",
443 dev
->ndev
->name
, rx_size
);
449 static u32
emac_calc_base_mr1(struct emac_instance
*dev
, int tx_size
, int rx_size
)
451 return emac_has_feature(dev
, EMAC_FTR_EMAC4
) ?
452 __emac4_calc_base_mr1(dev
, tx_size
, rx_size
) :
453 __emac_calc_base_mr1(dev
, tx_size
, rx_size
);
456 static inline u32
emac_calc_trtr(struct emac_instance
*dev
, unsigned int size
)
458 if (emac_has_feature(dev
, EMAC_FTR_EMAC4
))
459 return ((size
>> 6) - 1) << EMAC_TRTR_SHIFT_EMAC4
;
461 return ((size
>> 6) - 1) << EMAC_TRTR_SHIFT
;
464 static inline u32
emac_calc_rwmr(struct emac_instance
*dev
,
465 unsigned int low
, unsigned int high
)
467 if (emac_has_feature(dev
, EMAC_FTR_EMAC4
))
468 return (low
<< 22) | ( (high
& 0x3ff) << 6);
470 return (low
<< 23) | ( (high
& 0x1ff) << 7);
473 static int emac_configure(struct emac_instance
*dev
)
475 struct emac_regs __iomem
*p
= dev
->emacp
;
476 struct net_device
*ndev
= dev
->ndev
;
477 int tx_size
, rx_size
, link
= netif_carrier_ok(dev
->ndev
);
480 DBG(dev
, "configure" NL
);
483 out_be32(&p
->mr1
, in_be32(&p
->mr1
)
484 | EMAC_MR1_FDE
| EMAC_MR1_ILE
);
486 } else if (emac_reset(dev
) < 0)
489 if (emac_has_feature(dev
, EMAC_FTR_HAS_TAH
))
490 tah_reset(dev
->tah_dev
);
492 DBG(dev
, " link = %d duplex = %d, pause = %d, asym_pause = %d\n",
493 link
, dev
->phy
.duplex
, dev
->phy
.pause
, dev
->phy
.asym_pause
);
495 /* Default fifo sizes */
496 tx_size
= dev
->tx_fifo_size
;
497 rx_size
= dev
->rx_fifo_size
;
499 /* No link, force loopback */
501 mr1
= EMAC_MR1_FDE
| EMAC_MR1_ILE
;
503 /* Check for full duplex */
504 else if (dev
->phy
.duplex
== DUPLEX_FULL
)
505 mr1
|= EMAC_MR1_FDE
| EMAC_MR1_MWSW_001
;
507 /* Adjust fifo sizes, mr1 and timeouts based on link speed */
508 dev
->stop_timeout
= STOP_TIMEOUT_10
;
509 switch (dev
->phy
.speed
) {
511 if (emac_phy_gpcs(dev
->phy
.mode
)) {
512 mr1
|= EMAC_MR1_MF_1000GPCS
|
513 EMAC_MR1_MF_IPPA(dev
->phy
.address
);
515 /* Put some arbitrary OUI, Manuf & Rev IDs so we can
516 * identify this GPCS PHY later.
518 out_be32(&p
->ipcr
, 0xdeadbeef);
520 mr1
|= EMAC_MR1_MF_1000
;
522 /* Extended fifo sizes */
523 tx_size
= dev
->tx_fifo_size_gige
;
524 rx_size
= dev
->rx_fifo_size_gige
;
526 if (dev
->ndev
->mtu
> ETH_DATA_LEN
) {
527 if (emac_has_feature(dev
, EMAC_FTR_EMAC4
))
528 mr1
|= EMAC4_MR1_JPSM
;
530 mr1
|= EMAC_MR1_JPSM
;
531 dev
->stop_timeout
= STOP_TIMEOUT_1000_JUMBO
;
533 dev
->stop_timeout
= STOP_TIMEOUT_1000
;
536 mr1
|= EMAC_MR1_MF_100
;
537 dev
->stop_timeout
= STOP_TIMEOUT_100
;
539 default: /* make gcc happy */
543 if (emac_has_feature(dev
, EMAC_FTR_HAS_RGMII
))
544 rgmii_set_speed(dev
->rgmii_dev
, dev
->rgmii_port
,
546 if (emac_has_feature(dev
, EMAC_FTR_HAS_ZMII
))
547 zmii_set_speed(dev
->zmii_dev
, dev
->zmii_port
, dev
->phy
.speed
);
549 /* on 40x erratum forces us to NOT use integrated flow control,
550 * let's hope it works on 44x ;)
552 if (!emac_has_feature(dev
, EMAC_FTR_NO_FLOW_CONTROL_40x
) &&
553 dev
->phy
.duplex
== DUPLEX_FULL
) {
555 mr1
|= EMAC_MR1_EIFC
| EMAC_MR1_APP
;
556 else if (dev
->phy
.asym_pause
)
560 /* Add base settings & fifo sizes & program MR1 */
561 mr1
|= emac_calc_base_mr1(dev
, tx_size
, rx_size
);
562 out_be32(&p
->mr1
, mr1
);
564 /* Set individual MAC address */
565 out_be32(&p
->iahr
, (ndev
->dev_addr
[0] << 8) | ndev
->dev_addr
[1]);
566 out_be32(&p
->ialr
, (ndev
->dev_addr
[2] << 24) |
567 (ndev
->dev_addr
[3] << 16) | (ndev
->dev_addr
[4] << 8) |
570 /* VLAN Tag Protocol ID */
571 out_be32(&p
->vtpid
, 0x8100);
573 /* Receive mode register */
574 r
= emac_iff2rmr(ndev
);
575 if (r
& EMAC_RMR_MAE
)
577 out_be32(&p
->rmr
, r
);
579 /* FIFOs thresholds */
580 if (emac_has_feature(dev
, EMAC_FTR_EMAC4
))
581 r
= EMAC4_TMR1((dev
->mal_burst_size
/ dev
->fifo_entry_size
) + 1,
582 tx_size
/ 2 / dev
->fifo_entry_size
);
584 r
= EMAC_TMR1((dev
->mal_burst_size
/ dev
->fifo_entry_size
) + 1,
585 tx_size
/ 2 / dev
->fifo_entry_size
);
586 out_be32(&p
->tmr1
, r
);
587 out_be32(&p
->trtr
, emac_calc_trtr(dev
, tx_size
/ 2));
589 /* PAUSE frame is sent when RX FIFO reaches its high-water mark,
590 there should be still enough space in FIFO to allow the our link
591 partner time to process this frame and also time to send PAUSE
594 Here is the worst case scenario for the RX FIFO "headroom"
595 (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
597 1) One maximum-length frame on TX 1522 bytes
598 2) One PAUSE frame time 64 bytes
599 3) PAUSE frame decode time allowance 64 bytes
600 4) One maximum-length frame on RX 1522 bytes
601 5) Round-trip propagation delay of the link (100Mb) 15 bytes
605 I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
606 low-water mark to RX_FIFO_SIZE / 8 (512 bytes)
608 r
= emac_calc_rwmr(dev
, rx_size
/ 8 / dev
->fifo_entry_size
,
609 rx_size
/ 4 / dev
->fifo_entry_size
);
610 out_be32(&p
->rwmr
, r
);
612 /* Set PAUSE timer to the maximum */
613 out_be32(&p
->ptr
, 0xffff);
616 r
= EMAC_ISR_OVR
| EMAC_ISR_BP
| EMAC_ISR_SE
|
617 EMAC_ISR_ALE
| EMAC_ISR_BFCS
| EMAC_ISR_PTLE
| EMAC_ISR_ORE
|
618 EMAC_ISR_IRE
| EMAC_ISR_TE
;
619 if (emac_has_feature(dev
, EMAC_FTR_EMAC4
))
620 r
|= EMAC4_ISR_TXPE
| EMAC4_ISR_RXPE
/* | EMAC4_ISR_TXUE |
622 out_be32(&p
->iser
, r
);
624 /* We need to take GPCS PHY out of isolate mode after EMAC reset */
625 if (emac_phy_gpcs(dev
->phy
.mode
))
626 emac_mii_reset_phy(&dev
->phy
);
628 /* Required for Pause packet support in EMAC */
629 dev_mc_add(ndev
, default_mcast_addr
, sizeof(default_mcast_addr
), 1);
634 static void emac_reinitialize(struct emac_instance
*dev
)
636 DBG(dev
, "reinitialize" NL
);
638 emac_netif_stop(dev
);
639 if (!emac_configure(dev
)) {
643 emac_netif_start(dev
);
646 static void emac_full_tx_reset(struct emac_instance
*dev
)
648 DBG(dev
, "full_tx_reset" NL
);
650 emac_tx_disable(dev
);
651 mal_disable_tx_channel(dev
->mal
, dev
->mal_tx_chan
);
652 emac_clean_tx_ring(dev
);
653 dev
->tx_cnt
= dev
->tx_slot
= dev
->ack_slot
= 0;
657 mal_enable_tx_channel(dev
->mal
, dev
->mal_tx_chan
);
662 static void emac_reset_work(struct work_struct
*work
)
664 struct emac_instance
*dev
= container_of(work
, struct emac_instance
, reset_work
);
666 DBG(dev
, "reset_work" NL
);
668 mutex_lock(&dev
->link_lock
);
670 emac_netif_stop(dev
);
671 emac_full_tx_reset(dev
);
672 emac_netif_start(dev
);
674 mutex_unlock(&dev
->link_lock
);
677 static void emac_tx_timeout(struct net_device
*ndev
)
679 struct emac_instance
*dev
= netdev_priv(ndev
);
681 DBG(dev
, "tx_timeout" NL
);
683 schedule_work(&dev
->reset_work
);
687 static inline int emac_phy_done(struct emac_instance
*dev
, u32 stacr
)
689 int done
= !!(stacr
& EMAC_STACR_OC
);
691 if (emac_has_feature(dev
, EMAC_FTR_STACR_OC_INVERT
))
697 static int __emac_mdio_read(struct emac_instance
*dev
, u8 id
, u8 reg
)
699 struct emac_regs __iomem
*p
= dev
->emacp
;
701 int n
, err
= -ETIMEDOUT
;
703 mutex_lock(&dev
->mdio_lock
);
705 DBG2(dev
, "mdio_read(%02x,%02x)" NL
, id
, reg
);
707 /* Enable proper MDIO port */
708 if (emac_has_feature(dev
, EMAC_FTR_HAS_ZMII
))
709 zmii_get_mdio(dev
->zmii_dev
, dev
->zmii_port
);
710 if (emac_has_feature(dev
, EMAC_FTR_HAS_RGMII
))
711 rgmii_get_mdio(dev
->rgmii_dev
, dev
->rgmii_port
);
713 /* Wait for management interface to become idle */
715 while (!emac_phy_done(dev
, in_be32(&p
->stacr
))) {
718 DBG2(dev
, " -> timeout wait idle\n");
723 /* Issue read command */
724 if (emac_has_feature(dev
, EMAC_FTR_EMAC4
))
725 r
= EMAC4_STACR_BASE(dev
->opb_bus_freq
);
727 r
= EMAC_STACR_BASE(dev
->opb_bus_freq
);
728 if (emac_has_feature(dev
, EMAC_FTR_STACR_OC_INVERT
))
730 if (emac_has_feature(dev
, EMAC_FTR_HAS_NEW_STACR
))
731 r
|= EMACX_STACR_STAC_READ
;
733 r
|= EMAC_STACR_STAC_READ
;
734 r
|= (reg
& EMAC_STACR_PRA_MASK
)
735 | ((id
& EMAC_STACR_PCDA_MASK
) << EMAC_STACR_PCDA_SHIFT
);
736 out_be32(&p
->stacr
, r
);
738 /* Wait for read to complete */
740 while (!emac_phy_done(dev
, (r
= in_be32(&p
->stacr
)))) {
743 DBG2(dev
, " -> timeout wait complete\n");
748 if (unlikely(r
& EMAC_STACR_PHYE
)) {
749 DBG(dev
, "mdio_read(%02x, %02x) failed" NL
, id
, reg
);
754 r
= ((r
>> EMAC_STACR_PHYD_SHIFT
) & EMAC_STACR_PHYD_MASK
);
756 DBG2(dev
, "mdio_read -> %04x" NL
, r
);
759 if (emac_has_feature(dev
, EMAC_FTR_HAS_RGMII
))
760 rgmii_put_mdio(dev
->rgmii_dev
, dev
->rgmii_port
);
761 if (emac_has_feature(dev
, EMAC_FTR_HAS_ZMII
))
762 zmii_put_mdio(dev
->zmii_dev
, dev
->zmii_port
);
763 mutex_unlock(&dev
->mdio_lock
);
765 return err
== 0 ? r
: err
;
768 static void __emac_mdio_write(struct emac_instance
*dev
, u8 id
, u8 reg
,
771 struct emac_regs __iomem
*p
= dev
->emacp
;
773 int n
, err
= -ETIMEDOUT
;
775 mutex_lock(&dev
->mdio_lock
);
777 DBG2(dev
, "mdio_write(%02x,%02x,%04x)" NL
, id
, reg
, val
);
779 /* Enable proper MDIO port */
780 if (emac_has_feature(dev
, EMAC_FTR_HAS_ZMII
))
781 zmii_get_mdio(dev
->zmii_dev
, dev
->zmii_port
);
782 if (emac_has_feature(dev
, EMAC_FTR_HAS_RGMII
))
783 rgmii_get_mdio(dev
->rgmii_dev
, dev
->rgmii_port
);
785 /* Wait for management interface to be idle */
787 while (!emac_phy_done(dev
, in_be32(&p
->stacr
))) {
790 DBG2(dev
, " -> timeout wait idle\n");
795 /* Issue write command */
796 if (emac_has_feature(dev
, EMAC_FTR_EMAC4
))
797 r
= EMAC4_STACR_BASE(dev
->opb_bus_freq
);
799 r
= EMAC_STACR_BASE(dev
->opb_bus_freq
);
800 if (emac_has_feature(dev
, EMAC_FTR_STACR_OC_INVERT
))
802 if (emac_has_feature(dev
, EMAC_FTR_HAS_NEW_STACR
))
803 r
|= EMACX_STACR_STAC_WRITE
;
805 r
|= EMAC_STACR_STAC_WRITE
;
806 r
|= (reg
& EMAC_STACR_PRA_MASK
) |
807 ((id
& EMAC_STACR_PCDA_MASK
) << EMAC_STACR_PCDA_SHIFT
) |
808 (val
<< EMAC_STACR_PHYD_SHIFT
);
809 out_be32(&p
->stacr
, r
);
811 /* Wait for write to complete */
813 while (!emac_phy_done(dev
, in_be32(&p
->stacr
))) {
816 DBG2(dev
, " -> timeout wait complete\n");
822 if (emac_has_feature(dev
, EMAC_FTR_HAS_RGMII
))
823 rgmii_put_mdio(dev
->rgmii_dev
, dev
->rgmii_port
);
824 if (emac_has_feature(dev
, EMAC_FTR_HAS_ZMII
))
825 zmii_put_mdio(dev
->zmii_dev
, dev
->zmii_port
);
826 mutex_unlock(&dev
->mdio_lock
);
829 static int emac_mdio_read(struct net_device
*ndev
, int id
, int reg
)
831 struct emac_instance
*dev
= netdev_priv(ndev
);
834 res
= __emac_mdio_read(dev
->mdio_instance
? dev
->mdio_instance
: dev
,
839 static void emac_mdio_write(struct net_device
*ndev
, int id
, int reg
, int val
)
841 struct emac_instance
*dev
= netdev_priv(ndev
);
843 __emac_mdio_write(dev
->mdio_instance
? dev
->mdio_instance
: dev
,
844 (u8
) id
, (u8
) reg
, (u16
) val
);
848 static void __emac_set_multicast_list(struct emac_instance
*dev
)
850 struct emac_regs __iomem
*p
= dev
->emacp
;
851 u32 rmr
= emac_iff2rmr(dev
->ndev
);
853 DBG(dev
, "__multicast %08x" NL
, rmr
);
855 /* I decided to relax register access rules here to avoid
858 * There is a real problem with EMAC4 core if we use MWSW_001 bit
859 * in MR1 register and do a full EMAC reset.
860 * One TX BD status update is delayed and, after EMAC reset, it
861 * never happens, resulting in TX hung (it'll be recovered by TX
862 * timeout handler eventually, but this is just gross).
863 * So we either have to do full TX reset or try to cheat here :)
865 * The only required change is to RX mode register, so I *think* all
866 * we need is just to stop RX channel. This seems to work on all
869 * If we need the full reset, we might just trigger the workqueue
870 * and do it async... a bit nasty but should work --BenH
872 dev
->mcast_pending
= 0;
873 emac_rx_disable(dev
);
874 if (rmr
& EMAC_RMR_MAE
)
876 out_be32(&p
->rmr
, rmr
);
881 static void emac_set_multicast_list(struct net_device
*ndev
)
883 struct emac_instance
*dev
= netdev_priv(ndev
);
885 DBG(dev
, "multicast" NL
);
887 BUG_ON(!netif_running(dev
->ndev
));
890 dev
->mcast_pending
= 1;
893 __emac_set_multicast_list(dev
);
896 static int emac_resize_rx_ring(struct emac_instance
*dev
, int new_mtu
)
898 int rx_sync_size
= emac_rx_sync_size(new_mtu
);
899 int rx_skb_size
= emac_rx_skb_size(new_mtu
);
902 mutex_lock(&dev
->link_lock
);
903 emac_netif_stop(dev
);
904 emac_rx_disable(dev
);
905 mal_disable_rx_channel(dev
->mal
, dev
->mal_rx_chan
);
907 if (dev
->rx_sg_skb
) {
908 ++dev
->estats
.rx_dropped_resize
;
909 dev_kfree_skb(dev
->rx_sg_skb
);
910 dev
->rx_sg_skb
= NULL
;
913 /* Make a first pass over RX ring and mark BDs ready, dropping
914 * non-processed packets on the way. We need this as a separate pass
915 * to simplify error recovery in the case of allocation failure later.
917 for (i
= 0; i
< NUM_RX_BUFF
; ++i
) {
918 if (dev
->rx_desc
[i
].ctrl
& MAL_RX_CTRL_FIRST
)
919 ++dev
->estats
.rx_dropped_resize
;
921 dev
->rx_desc
[i
].data_len
= 0;
922 dev
->rx_desc
[i
].ctrl
= MAL_RX_CTRL_EMPTY
|
923 (i
== (NUM_RX_BUFF
- 1) ? MAL_RX_CTRL_WRAP
: 0);
926 /* Reallocate RX ring only if bigger skb buffers are required */
927 if (rx_skb_size
<= dev
->rx_skb_size
)
930 /* Second pass, allocate new skbs */
931 for (i
= 0; i
< NUM_RX_BUFF
; ++i
) {
932 struct sk_buff
*skb
= alloc_skb(rx_skb_size
, GFP_ATOMIC
);
938 BUG_ON(!dev
->rx_skb
[i
]);
939 dev_kfree_skb(dev
->rx_skb
[i
]);
941 skb_reserve(skb
, EMAC_RX_SKB_HEADROOM
+ 2);
942 dev
->rx_desc
[i
].data_ptr
=
943 dma_map_single(&dev
->ofdev
->dev
, skb
->data
- 2, rx_sync_size
,
944 DMA_FROM_DEVICE
) + 2;
945 dev
->rx_skb
[i
] = skb
;
948 /* Check if we need to change "Jumbo" bit in MR1 */
949 if ((new_mtu
> ETH_DATA_LEN
) ^ (dev
->ndev
->mtu
> ETH_DATA_LEN
)) {
950 /* This is to prevent starting RX channel in emac_rx_enable() */
951 set_bit(MAL_COMMAC_RX_STOPPED
, &dev
->commac
.flags
);
953 dev
->ndev
->mtu
= new_mtu
;
954 emac_full_tx_reset(dev
);
957 mal_set_rcbs(dev
->mal
, dev
->mal_rx_chan
, emac_rx_size(new_mtu
));
960 clear_bit(MAL_COMMAC_RX_STOPPED
, &dev
->commac
.flags
);
962 mal_enable_rx_channel(dev
->mal
, dev
->mal_rx_chan
);
964 emac_netif_start(dev
);
965 mutex_unlock(&dev
->link_lock
);
970 /* Process ctx, rtnl_lock semaphore */
971 static int emac_change_mtu(struct net_device
*ndev
, int new_mtu
)
973 struct emac_instance
*dev
= netdev_priv(ndev
);
976 if (new_mtu
< EMAC_MIN_MTU
|| new_mtu
> dev
->max_mtu
)
979 DBG(dev
, "change_mtu(%d)" NL
, new_mtu
);
981 if (netif_running(ndev
)) {
982 /* Check if we really need to reinitalize RX ring */
983 if (emac_rx_skb_size(ndev
->mtu
) != emac_rx_skb_size(new_mtu
))
984 ret
= emac_resize_rx_ring(dev
, new_mtu
);
989 dev
->rx_skb_size
= emac_rx_skb_size(new_mtu
);
990 dev
->rx_sync_size
= emac_rx_sync_size(new_mtu
);
996 static void emac_clean_tx_ring(struct emac_instance
*dev
)
1000 for (i
= 0; i
< NUM_TX_BUFF
; ++i
) {
1001 if (dev
->tx_skb
[i
]) {
1002 dev_kfree_skb(dev
->tx_skb
[i
]);
1003 dev
->tx_skb
[i
] = NULL
;
1004 if (dev
->tx_desc
[i
].ctrl
& MAL_TX_CTRL_READY
)
1005 ++dev
->estats
.tx_dropped
;
1007 dev
->tx_desc
[i
].ctrl
= 0;
1008 dev
->tx_desc
[i
].data_ptr
= 0;
1012 static void emac_clean_rx_ring(struct emac_instance
*dev
)
1016 for (i
= 0; i
< NUM_RX_BUFF
; ++i
)
1017 if (dev
->rx_skb
[i
]) {
1018 dev
->rx_desc
[i
].ctrl
= 0;
1019 dev_kfree_skb(dev
->rx_skb
[i
]);
1020 dev
->rx_skb
[i
] = NULL
;
1021 dev
->rx_desc
[i
].data_ptr
= 0;
1024 if (dev
->rx_sg_skb
) {
1025 dev_kfree_skb(dev
->rx_sg_skb
);
1026 dev
->rx_sg_skb
= NULL
;
1030 static inline int emac_alloc_rx_skb(struct emac_instance
*dev
, int slot
,
1033 struct sk_buff
*skb
= alloc_skb(dev
->rx_skb_size
, flags
);
1037 dev
->rx_skb
[slot
] = skb
;
1038 dev
->rx_desc
[slot
].data_len
= 0;
1040 skb_reserve(skb
, EMAC_RX_SKB_HEADROOM
+ 2);
1041 dev
->rx_desc
[slot
].data_ptr
=
1042 dma_map_single(&dev
->ofdev
->dev
, skb
->data
- 2, dev
->rx_sync_size
,
1043 DMA_FROM_DEVICE
) + 2;
1045 dev
->rx_desc
[slot
].ctrl
= MAL_RX_CTRL_EMPTY
|
1046 (slot
== (NUM_RX_BUFF
- 1) ? MAL_RX_CTRL_WRAP
: 0);
1051 static void emac_print_link_status(struct emac_instance
*dev
)
1053 if (netif_carrier_ok(dev
->ndev
))
1054 printk(KERN_INFO
"%s: link is up, %d %s%s\n",
1055 dev
->ndev
->name
, dev
->phy
.speed
,
1056 dev
->phy
.duplex
== DUPLEX_FULL
? "FDX" : "HDX",
1057 dev
->phy
.pause
? ", pause enabled" :
1058 dev
->phy
.asym_pause
? ", asymmetric pause enabled" : "");
1060 printk(KERN_INFO
"%s: link is down\n", dev
->ndev
->name
);
1063 /* Process ctx, rtnl_lock semaphore */
1064 static int emac_open(struct net_device
*ndev
)
1066 struct emac_instance
*dev
= netdev_priv(ndev
);
1069 DBG(dev
, "open" NL
);
1071 /* Setup error IRQ handler */
1072 err
= request_irq(dev
->emac_irq
, emac_irq
, 0, "EMAC", dev
);
1074 printk(KERN_ERR
"%s: failed to request IRQ %d\n",
1075 ndev
->name
, dev
->emac_irq
);
1079 /* Allocate RX ring */
1080 for (i
= 0; i
< NUM_RX_BUFF
; ++i
)
1081 if (emac_alloc_rx_skb(dev
, i
, GFP_KERNEL
)) {
1082 printk(KERN_ERR
"%s: failed to allocate RX ring\n",
1087 dev
->tx_cnt
= dev
->tx_slot
= dev
->ack_slot
= dev
->rx_slot
= 0;
1088 clear_bit(MAL_COMMAC_RX_STOPPED
, &dev
->commac
.flags
);
1089 dev
->rx_sg_skb
= NULL
;
1091 mutex_lock(&dev
->link_lock
);
1094 /* Start PHY polling now.
1096 if (dev
->phy
.address
>= 0) {
1097 int link_poll_interval
;
1098 if (dev
->phy
.def
->ops
->poll_link(&dev
->phy
)) {
1099 dev
->phy
.def
->ops
->read_link(&dev
->phy
);
1100 netif_carrier_on(dev
->ndev
);
1101 link_poll_interval
= PHY_POLL_LINK_ON
;
1103 netif_carrier_off(dev
->ndev
);
1104 link_poll_interval
= PHY_POLL_LINK_OFF
;
1106 dev
->link_polling
= 1;
1108 schedule_delayed_work(&dev
->link_work
, link_poll_interval
);
1109 emac_print_link_status(dev
);
1111 netif_carrier_on(dev
->ndev
);
1113 emac_configure(dev
);
1114 mal_poll_add(dev
->mal
, &dev
->commac
);
1115 mal_enable_tx_channel(dev
->mal
, dev
->mal_tx_chan
);
1116 mal_set_rcbs(dev
->mal
, dev
->mal_rx_chan
, emac_rx_size(ndev
->mtu
));
1117 mal_enable_rx_channel(dev
->mal
, dev
->mal_rx_chan
);
1118 emac_tx_enable(dev
);
1119 emac_rx_enable(dev
);
1120 emac_netif_start(dev
);
1122 mutex_unlock(&dev
->link_lock
);
1126 emac_clean_rx_ring(dev
);
1127 free_irq(dev
->emac_irq
, dev
);
1134 static int emac_link_differs(struct emac_instance
*dev
)
1136 u32 r
= in_be32(&dev
->emacp
->mr1
);
1138 int duplex
= r
& EMAC_MR1_FDE
? DUPLEX_FULL
: DUPLEX_HALF
;
1139 int speed
, pause
, asym_pause
;
1141 if (r
& EMAC_MR1_MF_1000
)
1143 else if (r
& EMAC_MR1_MF_100
)
1148 switch (r
& (EMAC_MR1_EIFC
| EMAC_MR1_APP
)) {
1149 case (EMAC_MR1_EIFC
| EMAC_MR1_APP
):
1158 pause
= asym_pause
= 0;
1160 return speed
!= dev
->phy
.speed
|| duplex
!= dev
->phy
.duplex
||
1161 pause
!= dev
->phy
.pause
|| asym_pause
!= dev
->phy
.asym_pause
;
1165 static void emac_link_timer(struct work_struct
*work
)
1167 struct emac_instance
*dev
=
1168 container_of((struct delayed_work
*)work
,
1169 struct emac_instance
, link_work
);
1170 int link_poll_interval
;
1172 mutex_lock(&dev
->link_lock
);
1173 DBG2(dev
, "link timer" NL
);
1178 if (dev
->phy
.def
->ops
->poll_link(&dev
->phy
)) {
1179 if (!netif_carrier_ok(dev
->ndev
)) {
1180 /* Get new link parameters */
1181 dev
->phy
.def
->ops
->read_link(&dev
->phy
);
1183 netif_carrier_on(dev
->ndev
);
1184 emac_netif_stop(dev
);
1185 emac_full_tx_reset(dev
);
1186 emac_netif_start(dev
);
1187 emac_print_link_status(dev
);
1189 link_poll_interval
= PHY_POLL_LINK_ON
;
1191 if (netif_carrier_ok(dev
->ndev
)) {
1192 netif_carrier_off(dev
->ndev
);
1193 netif_tx_disable(dev
->ndev
);
1194 emac_reinitialize(dev
);
1195 emac_print_link_status(dev
);
1197 link_poll_interval
= PHY_POLL_LINK_OFF
;
1199 schedule_delayed_work(&dev
->link_work
, link_poll_interval
);
1201 mutex_unlock(&dev
->link_lock
);
1204 static void emac_force_link_update(struct emac_instance
*dev
)
1206 netif_carrier_off(dev
->ndev
);
1208 if (dev
->link_polling
) {
1209 cancel_rearming_delayed_work(&dev
->link_work
);
1210 if (dev
->link_polling
)
1211 schedule_delayed_work(&dev
->link_work
, PHY_POLL_LINK_OFF
);
1215 /* Process ctx, rtnl_lock semaphore */
1216 static int emac_close(struct net_device
*ndev
)
1218 struct emac_instance
*dev
= netdev_priv(ndev
);
1220 DBG(dev
, "close" NL
);
1222 if (dev
->phy
.address
>= 0) {
1223 dev
->link_polling
= 0;
1224 cancel_rearming_delayed_work(&dev
->link_work
);
1226 mutex_lock(&dev
->link_lock
);
1227 emac_netif_stop(dev
);
1229 mutex_unlock(&dev
->link_lock
);
1231 emac_rx_disable(dev
);
1232 emac_tx_disable(dev
);
1233 mal_disable_rx_channel(dev
->mal
, dev
->mal_rx_chan
);
1234 mal_disable_tx_channel(dev
->mal
, dev
->mal_tx_chan
);
1235 mal_poll_del(dev
->mal
, &dev
->commac
);
1237 emac_clean_tx_ring(dev
);
1238 emac_clean_rx_ring(dev
);
1240 free_irq(dev
->emac_irq
, dev
);
1245 static inline u16
emac_tx_csum(struct emac_instance
*dev
,
1246 struct sk_buff
*skb
)
1248 if (emac_has_feature(dev
, EMAC_FTR_HAS_TAH
) &&
1249 (skb
->ip_summed
== CHECKSUM_PARTIAL
)) {
1250 ++dev
->stats
.tx_packets_csum
;
1251 return EMAC_TX_CTRL_TAH_CSUM
;
1256 static inline int emac_xmit_finish(struct emac_instance
*dev
, int len
)
1258 struct emac_regs __iomem
*p
= dev
->emacp
;
1259 struct net_device
*ndev
= dev
->ndev
;
1261 /* Send the packet out. If the if makes a significant perf
1262 * difference, then we can store the TMR0 value in "dev"
1265 if (emac_has_feature(dev
, EMAC_FTR_EMAC4
))
1266 out_be32(&p
->tmr0
, EMAC4_TMR0_XMIT
);
1268 out_be32(&p
->tmr0
, EMAC_TMR0_XMIT
);
1270 if (unlikely(++dev
->tx_cnt
== NUM_TX_BUFF
)) {
1271 netif_stop_queue(ndev
);
1272 DBG2(dev
, "stopped TX queue" NL
);
1275 ndev
->trans_start
= jiffies
;
1276 ++dev
->stats
.tx_packets
;
1277 dev
->stats
.tx_bytes
+= len
;
1283 static int emac_start_xmit(struct sk_buff
*skb
, struct net_device
*ndev
)
1285 struct emac_instance
*dev
= netdev_priv(ndev
);
1286 unsigned int len
= skb
->len
;
1289 u16 ctrl
= EMAC_TX_CTRL_GFCS
| EMAC_TX_CTRL_GP
| MAL_TX_CTRL_READY
|
1290 MAL_TX_CTRL_LAST
| emac_tx_csum(dev
, skb
);
1292 slot
= dev
->tx_slot
++;
1293 if (dev
->tx_slot
== NUM_TX_BUFF
) {
1295 ctrl
|= MAL_TX_CTRL_WRAP
;
1298 DBG2(dev
, "xmit(%u) %d" NL
, len
, slot
);
1300 dev
->tx_skb
[slot
] = skb
;
1301 dev
->tx_desc
[slot
].data_ptr
= dma_map_single(&dev
->ofdev
->dev
,
1304 dev
->tx_desc
[slot
].data_len
= (u16
) len
;
1306 dev
->tx_desc
[slot
].ctrl
= ctrl
;
1308 return emac_xmit_finish(dev
, len
);
1311 static inline int emac_xmit_split(struct emac_instance
*dev
, int slot
,
1312 u32 pd
, int len
, int last
, u16 base_ctrl
)
1315 u16 ctrl
= base_ctrl
;
1316 int chunk
= min(len
, MAL_MAX_TX_SIZE
);
1319 slot
= (slot
+ 1) % NUM_TX_BUFF
;
1322 ctrl
|= MAL_TX_CTRL_LAST
;
1323 if (slot
== NUM_TX_BUFF
- 1)
1324 ctrl
|= MAL_TX_CTRL_WRAP
;
1326 dev
->tx_skb
[slot
] = NULL
;
1327 dev
->tx_desc
[slot
].data_ptr
= pd
;
1328 dev
->tx_desc
[slot
].data_len
= (u16
) chunk
;
1329 dev
->tx_desc
[slot
].ctrl
= ctrl
;
1340 /* Tx lock BH disabled (SG version for TAH equipped EMACs) */
1341 static int emac_start_xmit_sg(struct sk_buff
*skb
, struct net_device
*ndev
)
1343 struct emac_instance
*dev
= netdev_priv(ndev
);
1344 int nr_frags
= skb_shinfo(skb
)->nr_frags
;
1345 int len
= skb
->len
, chunk
;
1350 /* This is common "fast" path */
1351 if (likely(!nr_frags
&& len
<= MAL_MAX_TX_SIZE
))
1352 return emac_start_xmit(skb
, ndev
);
1354 len
-= skb
->data_len
;
1356 /* Note, this is only an *estimation*, we can still run out of empty
1357 * slots because of the additional fragmentation into
1358 * MAL_MAX_TX_SIZE-sized chunks
1360 if (unlikely(dev
->tx_cnt
+ nr_frags
+ mal_tx_chunks(len
) > NUM_TX_BUFF
))
1363 ctrl
= EMAC_TX_CTRL_GFCS
| EMAC_TX_CTRL_GP
| MAL_TX_CTRL_READY
|
1364 emac_tx_csum(dev
, skb
);
1365 slot
= dev
->tx_slot
;
1368 dev
->tx_skb
[slot
] = NULL
;
1369 chunk
= min(len
, MAL_MAX_TX_SIZE
);
1370 dev
->tx_desc
[slot
].data_ptr
= pd
=
1371 dma_map_single(&dev
->ofdev
->dev
, skb
->data
, len
, DMA_TO_DEVICE
);
1372 dev
->tx_desc
[slot
].data_len
= (u16
) chunk
;
1375 slot
= emac_xmit_split(dev
, slot
, pd
+ chunk
, len
, !nr_frags
,
1378 for (i
= 0; i
< nr_frags
; ++i
) {
1379 struct skb_frag_struct
*frag
= &skb_shinfo(skb
)->frags
[i
];
1382 if (unlikely(dev
->tx_cnt
+ mal_tx_chunks(len
) >= NUM_TX_BUFF
))
1385 pd
= dma_map_page(&dev
->ofdev
->dev
, frag
->page
, frag
->page_offset
, len
,
1388 slot
= emac_xmit_split(dev
, slot
, pd
, len
, i
== nr_frags
- 1,
1392 DBG2(dev
, "xmit_sg(%u) %d - %d" NL
, skb
->len
, dev
->tx_slot
, slot
);
1394 /* Attach skb to the last slot so we don't release it too early */
1395 dev
->tx_skb
[slot
] = skb
;
1397 /* Send the packet out */
1398 if (dev
->tx_slot
== NUM_TX_BUFF
- 1)
1399 ctrl
|= MAL_TX_CTRL_WRAP
;
1401 dev
->tx_desc
[dev
->tx_slot
].ctrl
= ctrl
;
1402 dev
->tx_slot
= (slot
+ 1) % NUM_TX_BUFF
;
1404 return emac_xmit_finish(dev
, skb
->len
);
1407 /* Well, too bad. Our previous estimation was overly optimistic.
1410 while (slot
!= dev
->tx_slot
) {
1411 dev
->tx_desc
[slot
].ctrl
= 0;
1414 slot
= NUM_TX_BUFF
- 1;
1416 ++dev
->estats
.tx_undo
;
1419 netif_stop_queue(ndev
);
1420 DBG2(dev
, "stopped TX queue" NL
);
1425 static void emac_parse_tx_error(struct emac_instance
*dev
, u16 ctrl
)
1427 struct emac_error_stats
*st
= &dev
->estats
;
1429 DBG(dev
, "BD TX error %04x" NL
, ctrl
);
1432 if (ctrl
& EMAC_TX_ST_BFCS
)
1433 ++st
->tx_bd_bad_fcs
;
1434 if (ctrl
& EMAC_TX_ST_LCS
)
1435 ++st
->tx_bd_carrier_loss
;
1436 if (ctrl
& EMAC_TX_ST_ED
)
1437 ++st
->tx_bd_excessive_deferral
;
1438 if (ctrl
& EMAC_TX_ST_EC
)
1439 ++st
->tx_bd_excessive_collisions
;
1440 if (ctrl
& EMAC_TX_ST_LC
)
1441 ++st
->tx_bd_late_collision
;
1442 if (ctrl
& EMAC_TX_ST_MC
)
1443 ++st
->tx_bd_multple_collisions
;
1444 if (ctrl
& EMAC_TX_ST_SC
)
1445 ++st
->tx_bd_single_collision
;
1446 if (ctrl
& EMAC_TX_ST_UR
)
1447 ++st
->tx_bd_underrun
;
1448 if (ctrl
& EMAC_TX_ST_SQE
)
1452 static void emac_poll_tx(void *param
)
1454 struct emac_instance
*dev
= param
;
1457 DBG2(dev
, "poll_tx, %d %d" NL
, dev
->tx_cnt
, dev
->ack_slot
);
1459 if (emac_has_feature(dev
, EMAC_FTR_HAS_TAH
))
1460 bad_mask
= EMAC_IS_BAD_TX_TAH
;
1462 bad_mask
= EMAC_IS_BAD_TX
;
1464 netif_tx_lock_bh(dev
->ndev
);
1467 int slot
= dev
->ack_slot
, n
= 0;
1469 ctrl
= dev
->tx_desc
[slot
].ctrl
;
1470 if (!(ctrl
& MAL_TX_CTRL_READY
)) {
1471 struct sk_buff
*skb
= dev
->tx_skb
[slot
];
1476 dev
->tx_skb
[slot
] = NULL
;
1478 slot
= (slot
+ 1) % NUM_TX_BUFF
;
1480 if (unlikely(ctrl
& bad_mask
))
1481 emac_parse_tx_error(dev
, ctrl
);
1487 dev
->ack_slot
= slot
;
1488 if (netif_queue_stopped(dev
->ndev
) &&
1489 dev
->tx_cnt
< EMAC_TX_WAKEUP_THRESH
)
1490 netif_wake_queue(dev
->ndev
);
1492 DBG2(dev
, "tx %d pkts" NL
, n
);
1495 netif_tx_unlock_bh(dev
->ndev
);
1498 static inline void emac_recycle_rx_skb(struct emac_instance
*dev
, int slot
,
1501 struct sk_buff
*skb
= dev
->rx_skb
[slot
];
1503 DBG2(dev
, "recycle %d %d" NL
, slot
, len
);
1506 dma_map_single(&dev
->ofdev
->dev
, skb
->data
- 2,
1507 EMAC_DMA_ALIGN(len
+ 2), DMA_FROM_DEVICE
);
1509 dev
->rx_desc
[slot
].data_len
= 0;
1511 dev
->rx_desc
[slot
].ctrl
= MAL_RX_CTRL_EMPTY
|
1512 (slot
== (NUM_RX_BUFF
- 1) ? MAL_RX_CTRL_WRAP
: 0);
1515 static void emac_parse_rx_error(struct emac_instance
*dev
, u16 ctrl
)
1517 struct emac_error_stats
*st
= &dev
->estats
;
1519 DBG(dev
, "BD RX error %04x" NL
, ctrl
);
1522 if (ctrl
& EMAC_RX_ST_OE
)
1523 ++st
->rx_bd_overrun
;
1524 if (ctrl
& EMAC_RX_ST_BP
)
1525 ++st
->rx_bd_bad_packet
;
1526 if (ctrl
& EMAC_RX_ST_RP
)
1527 ++st
->rx_bd_runt_packet
;
1528 if (ctrl
& EMAC_RX_ST_SE
)
1529 ++st
->rx_bd_short_event
;
1530 if (ctrl
& EMAC_RX_ST_AE
)
1531 ++st
->rx_bd_alignment_error
;
1532 if (ctrl
& EMAC_RX_ST_BFCS
)
1533 ++st
->rx_bd_bad_fcs
;
1534 if (ctrl
& EMAC_RX_ST_PTL
)
1535 ++st
->rx_bd_packet_too_long
;
1536 if (ctrl
& EMAC_RX_ST_ORE
)
1537 ++st
->rx_bd_out_of_range
;
1538 if (ctrl
& EMAC_RX_ST_IRE
)
1539 ++st
->rx_bd_in_range
;
1542 static inline void emac_rx_csum(struct emac_instance
*dev
,
1543 struct sk_buff
*skb
, u16 ctrl
)
1545 #ifdef CONFIG_IBM_NEW_EMAC_TAH
1546 if (!ctrl
&& dev
->tah_dev
) {
1547 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1548 ++dev
->stats
.rx_packets_csum
;
1553 static inline int emac_rx_sg_append(struct emac_instance
*dev
, int slot
)
1555 if (likely(dev
->rx_sg_skb
!= NULL
)) {
1556 int len
= dev
->rx_desc
[slot
].data_len
;
1557 int tot_len
= dev
->rx_sg_skb
->len
+ len
;
1559 if (unlikely(tot_len
+ 2 > dev
->rx_skb_size
)) {
1560 ++dev
->estats
.rx_dropped_mtu
;
1561 dev_kfree_skb(dev
->rx_sg_skb
);
1562 dev
->rx_sg_skb
= NULL
;
1564 cacheable_memcpy(skb_tail_pointer(dev
->rx_sg_skb
),
1565 dev
->rx_skb
[slot
]->data
, len
);
1566 skb_put(dev
->rx_sg_skb
, len
);
1567 emac_recycle_rx_skb(dev
, slot
, len
);
1571 emac_recycle_rx_skb(dev
, slot
, 0);
1575 /* NAPI poll context */
1576 static int emac_poll_rx(void *param
, int budget
)
1578 struct emac_instance
*dev
= param
;
1579 int slot
= dev
->rx_slot
, received
= 0;
1581 DBG2(dev
, "poll_rx(%d)" NL
, budget
);
1584 while (budget
> 0) {
1586 struct sk_buff
*skb
;
1587 u16 ctrl
= dev
->rx_desc
[slot
].ctrl
;
1589 if (ctrl
& MAL_RX_CTRL_EMPTY
)
1592 skb
= dev
->rx_skb
[slot
];
1594 len
= dev
->rx_desc
[slot
].data_len
;
1596 if (unlikely(!MAL_IS_SINGLE_RX(ctrl
)))
1599 ctrl
&= EMAC_BAD_RX_MASK
;
1600 if (unlikely(ctrl
&& ctrl
!= EMAC_RX_TAH_BAD_CSUM
)) {
1601 emac_parse_rx_error(dev
, ctrl
);
1602 ++dev
->estats
.rx_dropped_error
;
1603 emac_recycle_rx_skb(dev
, slot
, 0);
1608 if (len
&& len
< EMAC_RX_COPY_THRESH
) {
1609 struct sk_buff
*copy_skb
=
1610 alloc_skb(len
+ EMAC_RX_SKB_HEADROOM
+ 2, GFP_ATOMIC
);
1611 if (unlikely(!copy_skb
))
1614 skb_reserve(copy_skb
, EMAC_RX_SKB_HEADROOM
+ 2);
1615 cacheable_memcpy(copy_skb
->data
- 2, skb
->data
- 2,
1617 emac_recycle_rx_skb(dev
, slot
, len
);
1619 } else if (unlikely(emac_alloc_rx_skb(dev
, slot
, GFP_ATOMIC
)))
1624 skb
->dev
= dev
->ndev
;
1625 skb
->protocol
= eth_type_trans(skb
, dev
->ndev
);
1626 emac_rx_csum(dev
, skb
, ctrl
);
1628 if (unlikely(netif_receive_skb(skb
) == NET_RX_DROP
))
1629 ++dev
->estats
.rx_dropped_stack
;
1631 ++dev
->stats
.rx_packets
;
1633 dev
->stats
.rx_bytes
+= len
;
1634 slot
= (slot
+ 1) % NUM_RX_BUFF
;
1639 if (ctrl
& MAL_RX_CTRL_FIRST
) {
1640 BUG_ON(dev
->rx_sg_skb
);
1641 if (unlikely(emac_alloc_rx_skb(dev
, slot
, GFP_ATOMIC
))) {
1642 DBG(dev
, "rx OOM %d" NL
, slot
);
1643 ++dev
->estats
.rx_dropped_oom
;
1644 emac_recycle_rx_skb(dev
, slot
, 0);
1646 dev
->rx_sg_skb
= skb
;
1649 } else if (!emac_rx_sg_append(dev
, slot
) &&
1650 (ctrl
& MAL_RX_CTRL_LAST
)) {
1652 skb
= dev
->rx_sg_skb
;
1653 dev
->rx_sg_skb
= NULL
;
1655 ctrl
&= EMAC_BAD_RX_MASK
;
1656 if (unlikely(ctrl
&& ctrl
!= EMAC_RX_TAH_BAD_CSUM
)) {
1657 emac_parse_rx_error(dev
, ctrl
);
1658 ++dev
->estats
.rx_dropped_error
;
1666 DBG(dev
, "rx OOM %d" NL
, slot
);
1667 /* Drop the packet and recycle skb */
1668 ++dev
->estats
.rx_dropped_oom
;
1669 emac_recycle_rx_skb(dev
, slot
, 0);
1674 DBG2(dev
, "rx %d BDs" NL
, received
);
1675 dev
->rx_slot
= slot
;
1678 if (unlikely(budget
&& test_bit(MAL_COMMAC_RX_STOPPED
, &dev
->commac
.flags
))) {
1680 if (!(dev
->rx_desc
[slot
].ctrl
& MAL_RX_CTRL_EMPTY
)) {
1681 DBG2(dev
, "rx restart" NL
);
1686 if (dev
->rx_sg_skb
) {
1687 DBG2(dev
, "dropping partial rx packet" NL
);
1688 ++dev
->estats
.rx_dropped_error
;
1689 dev_kfree_skb(dev
->rx_sg_skb
);
1690 dev
->rx_sg_skb
= NULL
;
1693 clear_bit(MAL_COMMAC_RX_STOPPED
, &dev
->commac
.flags
);
1694 mal_enable_rx_channel(dev
->mal
, dev
->mal_rx_chan
);
1695 emac_rx_enable(dev
);
1701 /* NAPI poll context */
1702 static int emac_peek_rx(void *param
)
1704 struct emac_instance
*dev
= param
;
1706 return !(dev
->rx_desc
[dev
->rx_slot
].ctrl
& MAL_RX_CTRL_EMPTY
);
1709 /* NAPI poll context */
1710 static int emac_peek_rx_sg(void *param
)
1712 struct emac_instance
*dev
= param
;
1714 int slot
= dev
->rx_slot
;
1716 u16 ctrl
= dev
->rx_desc
[slot
].ctrl
;
1717 if (ctrl
& MAL_RX_CTRL_EMPTY
)
1719 else if (ctrl
& MAL_RX_CTRL_LAST
)
1722 slot
= (slot
+ 1) % NUM_RX_BUFF
;
1724 /* I'm just being paranoid here :) */
1725 if (unlikely(slot
== dev
->rx_slot
))
1731 static void emac_rxde(void *param
)
1733 struct emac_instance
*dev
= param
;
1735 ++dev
->estats
.rx_stopped
;
1736 emac_rx_disable_async(dev
);
1740 static irqreturn_t
emac_irq(int irq
, void *dev_instance
)
1742 struct emac_instance
*dev
= dev_instance
;
1743 struct emac_regs __iomem
*p
= dev
->emacp
;
1744 struct emac_error_stats
*st
= &dev
->estats
;
1747 spin_lock(&dev
->lock
);
1749 isr
= in_be32(&p
->isr
);
1750 out_be32(&p
->isr
, isr
);
1752 DBG(dev
, "isr = %08x" NL
, isr
);
1754 if (isr
& EMAC4_ISR_TXPE
)
1756 if (isr
& EMAC4_ISR_RXPE
)
1758 if (isr
& EMAC4_ISR_TXUE
)
1760 if (isr
& EMAC4_ISR_RXOE
)
1761 ++st
->rx_fifo_overrun
;
1762 if (isr
& EMAC_ISR_OVR
)
1764 if (isr
& EMAC_ISR_BP
)
1765 ++st
->rx_bad_packet
;
1766 if (isr
& EMAC_ISR_RP
)
1767 ++st
->rx_runt_packet
;
1768 if (isr
& EMAC_ISR_SE
)
1769 ++st
->rx_short_event
;
1770 if (isr
& EMAC_ISR_ALE
)
1771 ++st
->rx_alignment_error
;
1772 if (isr
& EMAC_ISR_BFCS
)
1774 if (isr
& EMAC_ISR_PTLE
)
1775 ++st
->rx_packet_too_long
;
1776 if (isr
& EMAC_ISR_ORE
)
1777 ++st
->rx_out_of_range
;
1778 if (isr
& EMAC_ISR_IRE
)
1780 if (isr
& EMAC_ISR_SQE
)
1782 if (isr
& EMAC_ISR_TE
)
1785 spin_unlock(&dev
->lock
);
1790 static struct net_device_stats
*emac_stats(struct net_device
*ndev
)
1792 struct emac_instance
*dev
= netdev_priv(ndev
);
1793 struct emac_stats
*st
= &dev
->stats
;
1794 struct emac_error_stats
*est
= &dev
->estats
;
1795 struct net_device_stats
*nst
= &dev
->nstats
;
1796 unsigned long flags
;
1798 DBG2(dev
, "stats" NL
);
1800 /* Compute "legacy" statistics */
1801 spin_lock_irqsave(&dev
->lock
, flags
);
1802 nst
->rx_packets
= (unsigned long)st
->rx_packets
;
1803 nst
->rx_bytes
= (unsigned long)st
->rx_bytes
;
1804 nst
->tx_packets
= (unsigned long)st
->tx_packets
;
1805 nst
->tx_bytes
= (unsigned long)st
->tx_bytes
;
1806 nst
->rx_dropped
= (unsigned long)(est
->rx_dropped_oom
+
1807 est
->rx_dropped_error
+
1808 est
->rx_dropped_resize
+
1809 est
->rx_dropped_mtu
);
1810 nst
->tx_dropped
= (unsigned long)est
->tx_dropped
;
1812 nst
->rx_errors
= (unsigned long)est
->rx_bd_errors
;
1813 nst
->rx_fifo_errors
= (unsigned long)(est
->rx_bd_overrun
+
1814 est
->rx_fifo_overrun
+
1816 nst
->rx_frame_errors
= (unsigned long)(est
->rx_bd_alignment_error
+
1817 est
->rx_alignment_error
);
1818 nst
->rx_crc_errors
= (unsigned long)(est
->rx_bd_bad_fcs
+
1820 nst
->rx_length_errors
= (unsigned long)(est
->rx_bd_runt_packet
+
1821 est
->rx_bd_short_event
+
1822 est
->rx_bd_packet_too_long
+
1823 est
->rx_bd_out_of_range
+
1824 est
->rx_bd_in_range
+
1825 est
->rx_runt_packet
+
1826 est
->rx_short_event
+
1827 est
->rx_packet_too_long
+
1828 est
->rx_out_of_range
+
1831 nst
->tx_errors
= (unsigned long)(est
->tx_bd_errors
+ est
->tx_errors
);
1832 nst
->tx_fifo_errors
= (unsigned long)(est
->tx_bd_underrun
+
1834 nst
->tx_carrier_errors
= (unsigned long)est
->tx_bd_carrier_loss
;
1835 nst
->collisions
= (unsigned long)(est
->tx_bd_excessive_deferral
+
1836 est
->tx_bd_excessive_collisions
+
1837 est
->tx_bd_late_collision
+
1838 est
->tx_bd_multple_collisions
);
1839 spin_unlock_irqrestore(&dev
->lock
, flags
);
1843 static struct mal_commac_ops emac_commac_ops
= {
1844 .poll_tx
= &emac_poll_tx
,
1845 .poll_rx
= &emac_poll_rx
,
1846 .peek_rx
= &emac_peek_rx
,
1850 static struct mal_commac_ops emac_commac_sg_ops
= {
1851 .poll_tx
= &emac_poll_tx
,
1852 .poll_rx
= &emac_poll_rx
,
1853 .peek_rx
= &emac_peek_rx_sg
,
1857 /* Ethtool support */
1858 static int emac_ethtool_get_settings(struct net_device
*ndev
,
1859 struct ethtool_cmd
*cmd
)
1861 struct emac_instance
*dev
= netdev_priv(ndev
);
1863 cmd
->supported
= dev
->phy
.features
;
1864 cmd
->port
= PORT_MII
;
1865 cmd
->phy_address
= dev
->phy
.address
;
1867 dev
->phy
.address
>= 0 ? XCVR_EXTERNAL
: XCVR_INTERNAL
;
1869 mutex_lock(&dev
->link_lock
);
1870 cmd
->advertising
= dev
->phy
.advertising
;
1871 cmd
->autoneg
= dev
->phy
.autoneg
;
1872 cmd
->speed
= dev
->phy
.speed
;
1873 cmd
->duplex
= dev
->phy
.duplex
;
1874 mutex_unlock(&dev
->link_lock
);
1879 static int emac_ethtool_set_settings(struct net_device
*ndev
,
1880 struct ethtool_cmd
*cmd
)
1882 struct emac_instance
*dev
= netdev_priv(ndev
);
1883 u32 f
= dev
->phy
.features
;
1885 DBG(dev
, "set_settings(%d, %d, %d, 0x%08x)" NL
,
1886 cmd
->autoneg
, cmd
->speed
, cmd
->duplex
, cmd
->advertising
);
1888 /* Basic sanity checks */
1889 if (dev
->phy
.address
< 0)
1891 if (cmd
->autoneg
!= AUTONEG_ENABLE
&& cmd
->autoneg
!= AUTONEG_DISABLE
)
1893 if (cmd
->autoneg
== AUTONEG_ENABLE
&& cmd
->advertising
== 0)
1895 if (cmd
->duplex
!= DUPLEX_HALF
&& cmd
->duplex
!= DUPLEX_FULL
)
1898 if (cmd
->autoneg
== AUTONEG_DISABLE
) {
1899 switch (cmd
->speed
) {
1901 if (cmd
->duplex
== DUPLEX_HALF
1902 && !(f
& SUPPORTED_10baseT_Half
))
1904 if (cmd
->duplex
== DUPLEX_FULL
1905 && !(f
& SUPPORTED_10baseT_Full
))
1909 if (cmd
->duplex
== DUPLEX_HALF
1910 && !(f
& SUPPORTED_100baseT_Half
))
1912 if (cmd
->duplex
== DUPLEX_FULL
1913 && !(f
& SUPPORTED_100baseT_Full
))
1917 if (cmd
->duplex
== DUPLEX_HALF
1918 && !(f
& SUPPORTED_1000baseT_Half
))
1920 if (cmd
->duplex
== DUPLEX_FULL
1921 && !(f
& SUPPORTED_1000baseT_Full
))
1928 mutex_lock(&dev
->link_lock
);
1929 dev
->phy
.def
->ops
->setup_forced(&dev
->phy
, cmd
->speed
,
1931 mutex_unlock(&dev
->link_lock
);
1934 if (!(f
& SUPPORTED_Autoneg
))
1937 mutex_lock(&dev
->link_lock
);
1938 dev
->phy
.def
->ops
->setup_aneg(&dev
->phy
,
1939 (cmd
->advertising
& f
) |
1940 (dev
->phy
.advertising
&
1942 ADVERTISED_Asym_Pause
)));
1943 mutex_unlock(&dev
->link_lock
);
1945 emac_force_link_update(dev
);
1950 static void emac_ethtool_get_ringparam(struct net_device
*ndev
,
1951 struct ethtool_ringparam
*rp
)
1953 rp
->rx_max_pending
= rp
->rx_pending
= NUM_RX_BUFF
;
1954 rp
->tx_max_pending
= rp
->tx_pending
= NUM_TX_BUFF
;
1957 static void emac_ethtool_get_pauseparam(struct net_device
*ndev
,
1958 struct ethtool_pauseparam
*pp
)
1960 struct emac_instance
*dev
= netdev_priv(ndev
);
1962 mutex_lock(&dev
->link_lock
);
1963 if ((dev
->phy
.features
& SUPPORTED_Autoneg
) &&
1964 (dev
->phy
.advertising
& (ADVERTISED_Pause
| ADVERTISED_Asym_Pause
)))
1967 if (dev
->phy
.duplex
== DUPLEX_FULL
) {
1969 pp
->rx_pause
= pp
->tx_pause
= 1;
1970 else if (dev
->phy
.asym_pause
)
1973 mutex_unlock(&dev
->link_lock
);
1976 static u32
emac_ethtool_get_rx_csum(struct net_device
*ndev
)
1978 struct emac_instance
*dev
= netdev_priv(ndev
);
1980 return dev
->tah_dev
!= NULL
;
1983 static int emac_get_regs_len(struct emac_instance
*dev
)
1985 if (emac_has_feature(dev
, EMAC_FTR_EMAC4
))
1986 return sizeof(struct emac_ethtool_regs_subhdr
) +
1987 EMAC4_ETHTOOL_REGS_SIZE
;
1989 return sizeof(struct emac_ethtool_regs_subhdr
) +
1990 EMAC_ETHTOOL_REGS_SIZE
;
1993 static int emac_ethtool_get_regs_len(struct net_device
*ndev
)
1995 struct emac_instance
*dev
= netdev_priv(ndev
);
1998 size
= sizeof(struct emac_ethtool_regs_hdr
) +
1999 emac_get_regs_len(dev
) + mal_get_regs_len(dev
->mal
);
2000 if (emac_has_feature(dev
, EMAC_FTR_HAS_ZMII
))
2001 size
+= zmii_get_regs_len(dev
->zmii_dev
);
2002 if (emac_has_feature(dev
, EMAC_FTR_HAS_RGMII
))
2003 size
+= rgmii_get_regs_len(dev
->rgmii_dev
);
2004 if (emac_has_feature(dev
, EMAC_FTR_HAS_TAH
))
2005 size
+= tah_get_regs_len(dev
->tah_dev
);
2010 static void *emac_dump_regs(struct emac_instance
*dev
, void *buf
)
2012 struct emac_ethtool_regs_subhdr
*hdr
= buf
;
2014 hdr
->index
= dev
->cell_index
;
2015 if (emac_has_feature(dev
, EMAC_FTR_EMAC4
)) {
2016 hdr
->version
= EMAC4_ETHTOOL_REGS_VER
;
2017 memcpy_fromio(hdr
+ 1, dev
->emacp
, EMAC4_ETHTOOL_REGS_SIZE
);
2018 return ((void *)(hdr
+ 1) + EMAC4_ETHTOOL_REGS_SIZE
);
2020 hdr
->version
= EMAC_ETHTOOL_REGS_VER
;
2021 memcpy_fromio(hdr
+ 1, dev
->emacp
, EMAC_ETHTOOL_REGS_SIZE
);
2022 return ((void *)(hdr
+ 1) + EMAC_ETHTOOL_REGS_SIZE
);
2026 static void emac_ethtool_get_regs(struct net_device
*ndev
,
2027 struct ethtool_regs
*regs
, void *buf
)
2029 struct emac_instance
*dev
= netdev_priv(ndev
);
2030 struct emac_ethtool_regs_hdr
*hdr
= buf
;
2032 hdr
->components
= 0;
2035 buf
= mal_dump_regs(dev
->mal
, buf
);
2036 buf
= emac_dump_regs(dev
, buf
);
2037 if (emac_has_feature(dev
, EMAC_FTR_HAS_ZMII
)) {
2038 hdr
->components
|= EMAC_ETHTOOL_REGS_ZMII
;
2039 buf
= zmii_dump_regs(dev
->zmii_dev
, buf
);
2041 if (emac_has_feature(dev
, EMAC_FTR_HAS_RGMII
)) {
2042 hdr
->components
|= EMAC_ETHTOOL_REGS_RGMII
;
2043 buf
= rgmii_dump_regs(dev
->rgmii_dev
, buf
);
2045 if (emac_has_feature(dev
, EMAC_FTR_HAS_TAH
)) {
2046 hdr
->components
|= EMAC_ETHTOOL_REGS_TAH
;
2047 buf
= tah_dump_regs(dev
->tah_dev
, buf
);
2051 static int emac_ethtool_nway_reset(struct net_device
*ndev
)
2053 struct emac_instance
*dev
= netdev_priv(ndev
);
2056 DBG(dev
, "nway_reset" NL
);
2058 if (dev
->phy
.address
< 0)
2061 mutex_lock(&dev
->link_lock
);
2062 if (!dev
->phy
.autoneg
) {
2067 dev
->phy
.def
->ops
->setup_aneg(&dev
->phy
, dev
->phy
.advertising
);
2069 mutex_unlock(&dev
->link_lock
);
2070 emac_force_link_update(dev
);
2074 static int emac_ethtool_get_stats_count(struct net_device
*ndev
)
2076 return EMAC_ETHTOOL_STATS_COUNT
;
2079 static void emac_ethtool_get_strings(struct net_device
*ndev
, u32 stringset
,
2082 if (stringset
== ETH_SS_STATS
)
2083 memcpy(buf
, &emac_stats_keys
, sizeof(emac_stats_keys
));
2086 static void emac_ethtool_get_ethtool_stats(struct net_device
*ndev
,
2087 struct ethtool_stats
*estats
,
2090 struct emac_instance
*dev
= netdev_priv(ndev
);
2092 memcpy(tmp_stats
, &dev
->stats
, sizeof(dev
->stats
));
2093 tmp_stats
+= sizeof(dev
->stats
) / sizeof(u64
);
2094 memcpy(tmp_stats
, &dev
->estats
, sizeof(dev
->estats
));
2097 static void emac_ethtool_get_drvinfo(struct net_device
*ndev
,
2098 struct ethtool_drvinfo
*info
)
2100 struct emac_instance
*dev
= netdev_priv(ndev
);
2102 strcpy(info
->driver
, "ibm_emac");
2103 strcpy(info
->version
, DRV_VERSION
);
2104 info
->fw_version
[0] = '\0';
2105 sprintf(info
->bus_info
, "PPC 4xx EMAC-%d %s",
2106 dev
->cell_index
, dev
->ofdev
->node
->full_name
);
2107 info
->n_stats
= emac_ethtool_get_stats_count(ndev
);
2108 info
->regdump_len
= emac_ethtool_get_regs_len(ndev
);
2111 static const struct ethtool_ops emac_ethtool_ops
= {
2112 .get_settings
= emac_ethtool_get_settings
,
2113 .set_settings
= emac_ethtool_set_settings
,
2114 .get_drvinfo
= emac_ethtool_get_drvinfo
,
2116 .get_regs_len
= emac_ethtool_get_regs_len
,
2117 .get_regs
= emac_ethtool_get_regs
,
2119 .nway_reset
= emac_ethtool_nway_reset
,
2121 .get_ringparam
= emac_ethtool_get_ringparam
,
2122 .get_pauseparam
= emac_ethtool_get_pauseparam
,
2124 .get_rx_csum
= emac_ethtool_get_rx_csum
,
2126 .get_strings
= emac_ethtool_get_strings
,
2127 .get_stats_count
= emac_ethtool_get_stats_count
,
2128 .get_ethtool_stats
= emac_ethtool_get_ethtool_stats
,
2130 .get_link
= ethtool_op_get_link
,
2131 .get_tx_csum
= ethtool_op_get_tx_csum
,
2132 .get_sg
= ethtool_op_get_sg
,
2135 static int emac_ioctl(struct net_device
*ndev
, struct ifreq
*rq
, int cmd
)
2137 struct emac_instance
*dev
= netdev_priv(ndev
);
2138 uint16_t *data
= (uint16_t *) & rq
->ifr_ifru
;
2140 DBG(dev
, "ioctl %08x" NL
, cmd
);
2142 if (dev
->phy
.address
< 0)
2147 case SIOCDEVPRIVATE
:
2148 data
[0] = dev
->phy
.address
;
2151 case SIOCDEVPRIVATE
+ 1:
2152 data
[3] = emac_mdio_read(ndev
, dev
->phy
.address
, data
[1]);
2156 case SIOCDEVPRIVATE
+ 2:
2157 if (!capable(CAP_NET_ADMIN
))
2159 emac_mdio_write(ndev
, dev
->phy
.address
, data
[1], data
[2]);
2166 struct emac_depentry
{
2168 struct device_node
*node
;
2169 struct of_device
*ofdev
;
2173 #define EMAC_DEP_MAL_IDX 0
2174 #define EMAC_DEP_ZMII_IDX 1
2175 #define EMAC_DEP_RGMII_IDX 2
2176 #define EMAC_DEP_TAH_IDX 3
2177 #define EMAC_DEP_MDIO_IDX 4
2178 #define EMAC_DEP_PREV_IDX 5
2179 #define EMAC_DEP_COUNT 6
2181 static int __devinit
emac_check_deps(struct emac_instance
*dev
,
2182 struct emac_depentry
*deps
)
2185 struct device_node
*np
;
2187 for (i
= 0; i
< EMAC_DEP_COUNT
; i
++) {
2188 /* no dependency on that item, allright */
2189 if (deps
[i
].phandle
== 0) {
2193 /* special case for blist as the dependency might go away */
2194 if (i
== EMAC_DEP_PREV_IDX
) {
2195 np
= *(dev
->blist
- 1);
2197 deps
[i
].phandle
= 0;
2201 if (deps
[i
].node
== NULL
)
2202 deps
[i
].node
= of_node_get(np
);
2204 if (deps
[i
].node
== NULL
)
2205 deps
[i
].node
= of_find_node_by_phandle(deps
[i
].phandle
);
2206 if (deps
[i
].node
== NULL
)
2208 if (deps
[i
].ofdev
== NULL
)
2209 deps
[i
].ofdev
= of_find_device_by_node(deps
[i
].node
);
2210 if (deps
[i
].ofdev
== NULL
)
2212 if (deps
[i
].drvdata
== NULL
)
2213 deps
[i
].drvdata
= dev_get_drvdata(&deps
[i
].ofdev
->dev
);
2214 if (deps
[i
].drvdata
!= NULL
)
2217 return (there
== EMAC_DEP_COUNT
);
2220 static void emac_put_deps(struct emac_instance
*dev
)
2223 of_dev_put(dev
->mal_dev
);
2225 of_dev_put(dev
->zmii_dev
);
2227 of_dev_put(dev
->rgmii_dev
);
2229 of_dev_put(dev
->mdio_dev
);
2231 of_dev_put(dev
->tah_dev
);
2234 static int __devinit
emac_of_bus_notify(struct notifier_block
*nb
,
2235 unsigned long action
, void *data
)
2237 /* We are only intereted in device addition */
2238 if (action
== BUS_NOTIFY_BOUND_DRIVER
)
2239 wake_up_all(&emac_probe_wait
);
2243 static struct notifier_block emac_of_bus_notifier __devinitdata
= {
2244 .notifier_call
= emac_of_bus_notify
2247 static int __devinit
emac_wait_deps(struct emac_instance
*dev
)
2249 struct emac_depentry deps
[EMAC_DEP_COUNT
];
2252 memset(&deps
, 0, sizeof(deps
));
2254 deps
[EMAC_DEP_MAL_IDX
].phandle
= dev
->mal_ph
;
2255 deps
[EMAC_DEP_ZMII_IDX
].phandle
= dev
->zmii_ph
;
2256 deps
[EMAC_DEP_RGMII_IDX
].phandle
= dev
->rgmii_ph
;
2258 deps
[EMAC_DEP_TAH_IDX
].phandle
= dev
->tah_ph
;
2260 deps
[EMAC_DEP_MDIO_IDX
].phandle
= dev
->mdio_ph
;
2261 if (dev
->blist
&& dev
->blist
> emac_boot_list
)
2262 deps
[EMAC_DEP_PREV_IDX
].phandle
= 0xffffffffu
;
2263 bus_register_notifier(&of_platform_bus_type
, &emac_of_bus_notifier
);
2264 wait_event_timeout(emac_probe_wait
,
2265 emac_check_deps(dev
, deps
),
2266 EMAC_PROBE_DEP_TIMEOUT
);
2267 bus_unregister_notifier(&of_platform_bus_type
, &emac_of_bus_notifier
);
2268 err
= emac_check_deps(dev
, deps
) ? 0 : -ENODEV
;
2269 for (i
= 0; i
< EMAC_DEP_COUNT
; i
++) {
2271 of_node_put(deps
[i
].node
);
2272 if (err
&& deps
[i
].ofdev
)
2273 of_dev_put(deps
[i
].ofdev
);
2276 dev
->mal_dev
= deps
[EMAC_DEP_MAL_IDX
].ofdev
;
2277 dev
->zmii_dev
= deps
[EMAC_DEP_ZMII_IDX
].ofdev
;
2278 dev
->rgmii_dev
= deps
[EMAC_DEP_RGMII_IDX
].ofdev
;
2279 dev
->tah_dev
= deps
[EMAC_DEP_TAH_IDX
].ofdev
;
2280 dev
->mdio_dev
= deps
[EMAC_DEP_MDIO_IDX
].ofdev
;
2282 if (deps
[EMAC_DEP_PREV_IDX
].ofdev
)
2283 of_dev_put(deps
[EMAC_DEP_PREV_IDX
].ofdev
);
2287 static int __devinit
emac_read_uint_prop(struct device_node
*np
, const char *name
,
2288 u32
*val
, int fatal
)
2291 const u32
*prop
= of_get_property(np
, name
, &len
);
2292 if (prop
== NULL
|| len
< sizeof(u32
)) {
2294 printk(KERN_ERR
"%s: missing %s property\n",
2295 np
->full_name
, name
);
2302 static int __devinit
emac_init_phy(struct emac_instance
*dev
)
2304 struct device_node
*np
= dev
->ofdev
->node
;
2305 struct net_device
*ndev
= dev
->ndev
;
2309 dev
->phy
.dev
= ndev
;
2310 dev
->phy
.mode
= dev
->phy_mode
;
2312 /* PHY-less configuration.
2313 * XXX I probably should move these settings to the dev tree
2315 if (dev
->phy_address
== 0xffffffff && dev
->phy_map
== 0xffffffff) {
2318 /* PHY-less configuration.
2319 * XXX I probably should move these settings to the dev tree
2321 dev
->phy
.address
= -1;
2322 dev
->phy
.features
= SUPPORTED_100baseT_Full
| SUPPORTED_MII
;
2328 mutex_lock(&emac_phy_map_lock
);
2329 phy_map
= dev
->phy_map
| busy_phy_map
;
2331 DBG(dev
, "PHY maps %08x %08x" NL
, dev
->phy_map
, busy_phy_map
);
2333 dev
->phy
.mdio_read
= emac_mdio_read
;
2334 dev
->phy
.mdio_write
= emac_mdio_write
;
2336 /* Configure EMAC with defaults so we can at least use MDIO
2337 * This is needed mostly for 440GX
2339 if (emac_phy_gpcs(dev
->phy
.mode
)) {
2341 * Make GPCS PHY address equal to EMAC index.
2342 * We probably should take into account busy_phy_map
2343 * and/or phy_map here.
2345 * Note that the busy_phy_map is currently global
2346 * while it should probably be per-ASIC...
2348 dev
->phy
.address
= dev
->cell_index
;
2351 emac_configure(dev
);
2353 if (dev
->phy_address
!= 0xffffffff)
2354 phy_map
= ~(1 << dev
->phy_address
);
2356 for (i
= 0; i
< 0x20; phy_map
>>= 1, ++i
)
2357 if (!(phy_map
& 1)) {
2359 busy_phy_map
|= 1 << i
;
2361 /* Quick check if there is a PHY at the address */
2362 r
= emac_mdio_read(dev
->ndev
, i
, MII_BMCR
);
2363 if (r
== 0xffff || r
< 0)
2365 if (!emac_mii_phy_probe(&dev
->phy
, i
))
2368 mutex_unlock(&emac_phy_map_lock
);
2370 printk(KERN_WARNING
"%s: can't find PHY!\n", np
->full_name
);
2375 if (dev
->phy
.def
->ops
->init
)
2376 dev
->phy
.def
->ops
->init(&dev
->phy
);
2378 /* Disable any PHY features not supported by the platform */
2379 dev
->phy
.def
->features
&= ~dev
->phy_feat_exc
;
2381 /* Setup initial link parameters */
2382 if (dev
->phy
.features
& SUPPORTED_Autoneg
) {
2383 adv
= dev
->phy
.features
;
2384 if (!emac_has_feature(dev
, EMAC_FTR_NO_FLOW_CONTROL_40x
))
2385 adv
|= ADVERTISED_Pause
| ADVERTISED_Asym_Pause
;
2386 /* Restart autonegotiation */
2387 dev
->phy
.def
->ops
->setup_aneg(&dev
->phy
, adv
);
2389 u32 f
= dev
->phy
.def
->features
;
2390 int speed
= SPEED_10
, fd
= DUPLEX_HALF
;
2392 /* Select highest supported speed/duplex */
2393 if (f
& SUPPORTED_1000baseT_Full
) {
2396 } else if (f
& SUPPORTED_1000baseT_Half
)
2398 else if (f
& SUPPORTED_100baseT_Full
) {
2401 } else if (f
& SUPPORTED_100baseT_Half
)
2403 else if (f
& SUPPORTED_10baseT_Full
)
2406 /* Force link parameters */
2407 dev
->phy
.def
->ops
->setup_forced(&dev
->phy
, speed
, fd
);
2412 static int __devinit
emac_init_config(struct emac_instance
*dev
)
2414 struct device_node
*np
= dev
->ofdev
->node
;
2417 const char *pm
, *phy_modes
[] = {
2419 [PHY_MODE_MII
] = "mii",
2420 [PHY_MODE_RMII
] = "rmii",
2421 [PHY_MODE_SMII
] = "smii",
2422 [PHY_MODE_RGMII
] = "rgmii",
2423 [PHY_MODE_TBI
] = "tbi",
2424 [PHY_MODE_GMII
] = "gmii",
2425 [PHY_MODE_RTBI
] = "rtbi",
2426 [PHY_MODE_SGMII
] = "sgmii",
2429 /* Read config from device-tree */
2430 if (emac_read_uint_prop(np
, "mal-device", &dev
->mal_ph
, 1))
2432 if (emac_read_uint_prop(np
, "mal-tx-channel", &dev
->mal_tx_chan
, 1))
2434 if (emac_read_uint_prop(np
, "mal-rx-channel", &dev
->mal_rx_chan
, 1))
2436 if (emac_read_uint_prop(np
, "cell-index", &dev
->cell_index
, 1))
2438 if (emac_read_uint_prop(np
, "max-frame-size", &dev
->max_mtu
, 0))
2439 dev
->max_mtu
= 1500;
2440 if (emac_read_uint_prop(np
, "rx-fifo-size", &dev
->rx_fifo_size
, 0))
2441 dev
->rx_fifo_size
= 2048;
2442 if (emac_read_uint_prop(np
, "tx-fifo-size", &dev
->tx_fifo_size
, 0))
2443 dev
->tx_fifo_size
= 2048;
2444 if (emac_read_uint_prop(np
, "rx-fifo-size-gige", &dev
->rx_fifo_size_gige
, 0))
2445 dev
->rx_fifo_size_gige
= dev
->rx_fifo_size
;
2446 if (emac_read_uint_prop(np
, "tx-fifo-size-gige", &dev
->tx_fifo_size_gige
, 0))
2447 dev
->tx_fifo_size_gige
= dev
->tx_fifo_size
;
2448 if (emac_read_uint_prop(np
, "phy-address", &dev
->phy_address
, 0))
2449 dev
->phy_address
= 0xffffffff;
2450 if (emac_read_uint_prop(np
, "phy-map", &dev
->phy_map
, 0))
2451 dev
->phy_map
= 0xffffffff;
2452 if (emac_read_uint_prop(np
->parent
, "clock-frequency", &dev
->opb_bus_freq
, 1))
2454 if (emac_read_uint_prop(np
, "tah-device", &dev
->tah_ph
, 0))
2456 if (emac_read_uint_prop(np
, "tah-channel", &dev
->tah_port
, 0))
2458 if (emac_read_uint_prop(np
, "mdio-device", &dev
->mdio_ph
, 0))
2460 if (emac_read_uint_prop(np
, "zmii-device", &dev
->zmii_ph
, 0))
2462 if (emac_read_uint_prop(np
, "zmii-channel", &dev
->zmii_port
, 0))
2463 dev
->zmii_port
= 0xffffffff;;
2464 if (emac_read_uint_prop(np
, "rgmii-device", &dev
->rgmii_ph
, 0))
2466 if (emac_read_uint_prop(np
, "rgmii-channel", &dev
->rgmii_port
, 0))
2467 dev
->rgmii_port
= 0xffffffff;;
2468 if (emac_read_uint_prop(np
, "fifo-entry-size", &dev
->fifo_entry_size
, 0))
2469 dev
->fifo_entry_size
= 16;
2470 if (emac_read_uint_prop(np
, "mal-burst-size", &dev
->mal_burst_size
, 0))
2471 dev
->mal_burst_size
= 256;
2473 /* PHY mode needs some decoding */
2474 dev
->phy_mode
= PHY_MODE_NA
;
2475 pm
= of_get_property(np
, "phy-mode", &plen
);
2478 for (i
= 0; i
< ARRAY_SIZE(phy_modes
); i
++)
2479 if (!strcasecmp(pm
, phy_modes
[i
])) {
2485 /* Backward compat with non-final DT */
2486 if (dev
->phy_mode
== PHY_MODE_NA
&& pm
!= NULL
&& plen
== 4) {
2487 u32 nmode
= *(const u32
*)pm
;
2488 if (nmode
> PHY_MODE_NA
&& nmode
<= PHY_MODE_SGMII
)
2489 dev
->phy_mode
= nmode
;
2492 /* Check EMAC version */
2493 if (of_device_is_compatible(np
, "ibm,emac4"))
2494 dev
->features
|= EMAC_FTR_EMAC4
;
2496 /* Fixup some feature bits based on the device tree */
2497 if (of_get_property(np
, "has-inverted-stacr-oc", NULL
))
2498 dev
->features
|= EMAC_FTR_STACR_OC_INVERT
;
2499 if (of_get_property(np
, "has-new-stacr-staopc", NULL
))
2500 dev
->features
|= EMAC_FTR_HAS_NEW_STACR
;
2502 /* CAB lacks the appropriate properties */
2503 if (of_device_is_compatible(np
, "ibm,emac-axon"))
2504 dev
->features
|= EMAC_FTR_HAS_NEW_STACR
|
2505 EMAC_FTR_STACR_OC_INVERT
;
2507 /* Enable TAH/ZMII/RGMII features as found */
2508 if (dev
->tah_ph
!= 0) {
2509 #ifdef CONFIG_IBM_NEW_EMAC_TAH
2510 dev
->features
|= EMAC_FTR_HAS_TAH
;
2512 printk(KERN_ERR
"%s: TAH support not enabled !\n",
2518 if (dev
->zmii_ph
!= 0) {
2519 #ifdef CONFIG_IBM_NEW_EMAC_ZMII
2520 dev
->features
|= EMAC_FTR_HAS_ZMII
;
2522 printk(KERN_ERR
"%s: ZMII support not enabled !\n",
2528 if (dev
->rgmii_ph
!= 0) {
2529 #ifdef CONFIG_IBM_NEW_EMAC_RGMII
2530 dev
->features
|= EMAC_FTR_HAS_RGMII
;
2532 printk(KERN_ERR
"%s: RGMII support not enabled !\n",
2538 /* Read MAC-address */
2539 p
= of_get_property(np
, "local-mac-address", NULL
);
2541 printk(KERN_ERR
"%s: Can't find local-mac-address property\n",
2545 memcpy(dev
->ndev
->dev_addr
, p
, 6);
2547 DBG(dev
, "features : 0x%08x / 0x%08x\n", dev
->features
, EMAC_FTRS_POSSIBLE
);
2548 DBG(dev
, "tx_fifo_size : %d (%d gige)\n", dev
->tx_fifo_size
, dev
->tx_fifo_size_gige
);
2549 DBG(dev
, "rx_fifo_size : %d (%d gige)\n", dev
->rx_fifo_size
, dev
->rx_fifo_size_gige
);
2550 DBG(dev
, "max_mtu : %d\n", dev
->max_mtu
);
2551 DBG(dev
, "OPB freq : %d\n", dev
->opb_bus_freq
);
2556 static int __devinit
emac_probe(struct of_device
*ofdev
,
2557 const struct of_device_id
*match
)
2559 struct net_device
*ndev
;
2560 struct emac_instance
*dev
;
2561 struct device_node
*np
= ofdev
->node
;
2562 struct device_node
**blist
= NULL
;
2565 /* Skip unused/unwired EMACS */
2566 if (of_get_property(np
, "unused", NULL
))
2569 /* Find ourselves in the bootlist if we are there */
2570 for (i
= 0; i
< EMAC_BOOT_LIST_SIZE
; i
++)
2571 if (emac_boot_list
[i
] == np
)
2572 blist
= &emac_boot_list
[i
];
2574 /* Allocate our net_device structure */
2576 ndev
= alloc_etherdev(sizeof(struct emac_instance
));
2578 printk(KERN_ERR
"%s: could not allocate ethernet device!\n",
2582 dev
= netdev_priv(ndev
);
2586 SET_NETDEV_DEV(ndev
, &ofdev
->dev
);
2588 /* Initialize some embedded data structures */
2589 mutex_init(&dev
->mdio_lock
);
2590 mutex_init(&dev
->link_lock
);
2591 spin_lock_init(&dev
->lock
);
2592 INIT_WORK(&dev
->reset_work
, emac_reset_work
);
2594 /* Init various config data based on device-tree */
2595 err
= emac_init_config(dev
);
2599 /* Get interrupts. EMAC irq is mandatory, WOL irq is optional */
2600 dev
->emac_irq
= irq_of_parse_and_map(np
, 0);
2601 dev
->wol_irq
= irq_of_parse_and_map(np
, 1);
2602 if (dev
->emac_irq
== NO_IRQ
) {
2603 printk(KERN_ERR
"%s: Can't map main interrupt\n", np
->full_name
);
2606 ndev
->irq
= dev
->emac_irq
;
2609 if (of_address_to_resource(np
, 0, &dev
->rsrc_regs
)) {
2610 printk(KERN_ERR
"%s: Can't get registers address\n",
2614 // TODO : request_mem_region
2615 dev
->emacp
= ioremap(dev
->rsrc_regs
.start
, sizeof(struct emac_regs
));
2616 if (dev
->emacp
== NULL
) {
2617 printk(KERN_ERR
"%s: Can't map device registers!\n",
2623 /* Wait for dependent devices */
2624 err
= emac_wait_deps(dev
);
2627 "%s: Timeout waiting for dependent devices\n",
2629 /* display more info about what's missing ? */
2632 dev
->mal
= dev_get_drvdata(&dev
->mal_dev
->dev
);
2633 if (dev
->mdio_dev
!= NULL
)
2634 dev
->mdio_instance
= dev_get_drvdata(&dev
->mdio_dev
->dev
);
2636 /* Register with MAL */
2637 dev
->commac
.ops
= &emac_commac_ops
;
2638 dev
->commac
.dev
= dev
;
2639 dev
->commac
.tx_chan_mask
= MAL_CHAN_MASK(dev
->mal_tx_chan
);
2640 dev
->commac
.rx_chan_mask
= MAL_CHAN_MASK(dev
->mal_rx_chan
);
2641 err
= mal_register_commac(dev
->mal
, &dev
->commac
);
2643 printk(KERN_ERR
"%s: failed to register with mal %s!\n",
2644 np
->full_name
, dev
->mal_dev
->node
->full_name
);
2647 dev
->rx_skb_size
= emac_rx_skb_size(ndev
->mtu
);
2648 dev
->rx_sync_size
= emac_rx_sync_size(ndev
->mtu
);
2650 /* Get pointers to BD rings */
2652 dev
->mal
->bd_virt
+ mal_tx_bd_offset(dev
->mal
, dev
->mal_tx_chan
);
2654 dev
->mal
->bd_virt
+ mal_rx_bd_offset(dev
->mal
, dev
->mal_rx_chan
);
2656 DBG(dev
, "tx_desc %p" NL
, dev
->tx_desc
);
2657 DBG(dev
, "rx_desc %p" NL
, dev
->rx_desc
);
2660 memset(dev
->tx_desc
, 0, NUM_TX_BUFF
* sizeof(struct mal_descriptor
));
2661 memset(dev
->rx_desc
, 0, NUM_RX_BUFF
* sizeof(struct mal_descriptor
));
2663 /* Attach to ZMII, if needed */
2664 if (emac_has_feature(dev
, EMAC_FTR_HAS_ZMII
) &&
2665 (err
= zmii_attach(dev
->zmii_dev
, dev
->zmii_port
, &dev
->phy_mode
)) != 0)
2666 goto err_unreg_commac
;
2668 /* Attach to RGMII, if needed */
2669 if (emac_has_feature(dev
, EMAC_FTR_HAS_RGMII
) &&
2670 (err
= rgmii_attach(dev
->rgmii_dev
, dev
->rgmii_port
, dev
->phy_mode
)) != 0)
2671 goto err_detach_zmii
;
2673 /* Attach to TAH, if needed */
2674 if (emac_has_feature(dev
, EMAC_FTR_HAS_TAH
) &&
2675 (err
= tah_attach(dev
->tah_dev
, dev
->tah_port
)) != 0)
2676 goto err_detach_rgmii
;
2678 /* Set some link defaults before we can find out real parameters */
2679 dev
->phy
.speed
= SPEED_100
;
2680 dev
->phy
.duplex
= DUPLEX_FULL
;
2681 dev
->phy
.autoneg
= AUTONEG_DISABLE
;
2682 dev
->phy
.pause
= dev
->phy
.asym_pause
= 0;
2683 dev
->stop_timeout
= STOP_TIMEOUT_100
;
2684 INIT_DELAYED_WORK(&dev
->link_work
, emac_link_timer
);
2686 /* Find PHY if any */
2687 err
= emac_init_phy(dev
);
2689 goto err_detach_tah
;
2691 /* Fill in the driver function table */
2692 ndev
->open
= &emac_open
;
2694 ndev
->features
|= NETIF_F_IP_CSUM
| NETIF_F_SG
;
2695 ndev
->tx_timeout
= &emac_tx_timeout
;
2696 ndev
->watchdog_timeo
= 5 * HZ
;
2697 ndev
->stop
= &emac_close
;
2698 ndev
->get_stats
= &emac_stats
;
2699 ndev
->set_multicast_list
= &emac_set_multicast_list
;
2700 ndev
->do_ioctl
= &emac_ioctl
;
2701 if (emac_phy_supports_gige(dev
->phy_mode
)) {
2702 ndev
->hard_start_xmit
= &emac_start_xmit_sg
;
2703 ndev
->change_mtu
= &emac_change_mtu
;
2704 dev
->commac
.ops
= &emac_commac_sg_ops
;
2706 ndev
->hard_start_xmit
= &emac_start_xmit
;
2708 SET_ETHTOOL_OPS(ndev
, &emac_ethtool_ops
);
2710 netif_carrier_off(ndev
);
2711 netif_stop_queue(ndev
);
2713 err
= register_netdev(ndev
);
2715 printk(KERN_ERR
"%s: failed to register net device (%d)!\n",
2716 np
->full_name
, err
);
2717 goto err_detach_tah
;
2720 /* Set our drvdata last as we don't want them visible until we are
2724 dev_set_drvdata(&ofdev
->dev
, dev
);
2726 /* There's a new kid in town ! Let's tell everybody */
2727 wake_up_all(&emac_probe_wait
);
2731 "%s: EMAC-%d %s, MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
2732 ndev
->name
, dev
->cell_index
, np
->full_name
,
2733 ndev
->dev_addr
[0], ndev
->dev_addr
[1], ndev
->dev_addr
[2],
2734 ndev
->dev_addr
[3], ndev
->dev_addr
[4], ndev
->dev_addr
[5]);
2736 if (dev
->phy
.address
>= 0)
2737 printk("%s: found %s PHY (0x%02x)\n", ndev
->name
,
2738 dev
->phy
.def
->name
, dev
->phy
.address
);
2740 emac_dbg_register(dev
);
2745 /* I have a bad feeling about this ... */
2748 if (emac_has_feature(dev
, EMAC_FTR_HAS_TAH
))
2749 tah_detach(dev
->tah_dev
, dev
->tah_port
);
2751 if (emac_has_feature(dev
, EMAC_FTR_HAS_RGMII
))
2752 rgmii_detach(dev
->rgmii_dev
, dev
->rgmii_port
);
2754 if (emac_has_feature(dev
, EMAC_FTR_HAS_ZMII
))
2755 zmii_detach(dev
->zmii_dev
, dev
->zmii_port
);
2757 mal_unregister_commac(dev
->mal
, &dev
->commac
);
2761 iounmap(dev
->emacp
);
2763 if (dev
->wol_irq
!= NO_IRQ
)
2764 irq_dispose_mapping(dev
->wol_irq
);
2765 if (dev
->emac_irq
!= NO_IRQ
)
2766 irq_dispose_mapping(dev
->emac_irq
);
2770 /* if we were on the bootlist, remove us as we won't show up and
2771 * wake up all waiters to notify them in case they were waiting
2776 wake_up_all(&emac_probe_wait
);
2781 static int __devexit
emac_remove(struct of_device
*ofdev
)
2783 struct emac_instance
*dev
= dev_get_drvdata(&ofdev
->dev
);
2785 DBG(dev
, "remove" NL
);
2787 dev_set_drvdata(&ofdev
->dev
, NULL
);
2789 unregister_netdev(dev
->ndev
);
2791 flush_scheduled_work();
2793 if (emac_has_feature(dev
, EMAC_FTR_HAS_TAH
))
2794 tah_detach(dev
->tah_dev
, dev
->tah_port
);
2795 if (emac_has_feature(dev
, EMAC_FTR_HAS_RGMII
))
2796 rgmii_detach(dev
->rgmii_dev
, dev
->rgmii_port
);
2797 if (emac_has_feature(dev
, EMAC_FTR_HAS_ZMII
))
2798 zmii_detach(dev
->zmii_dev
, dev
->zmii_port
);
2800 mal_unregister_commac(dev
->mal
, &dev
->commac
);
2803 emac_dbg_unregister(dev
);
2804 iounmap(dev
->emacp
);
2806 if (dev
->wol_irq
!= NO_IRQ
)
2807 irq_dispose_mapping(dev
->wol_irq
);
2808 if (dev
->emac_irq
!= NO_IRQ
)
2809 irq_dispose_mapping(dev
->emac_irq
);
2816 /* XXX Features in here should be replaced by properties... */
2817 static struct of_device_id emac_match
[] =
2821 .compatible
= "ibm,emac",
2825 .compatible
= "ibm,emac4",
2830 static struct of_platform_driver emac_driver
= {
2832 .match_table
= emac_match
,
2834 .probe
= emac_probe
,
2835 .remove
= emac_remove
,
2838 static void __init
emac_make_bootlist(void)
2840 struct device_node
*np
= NULL
;
2841 int j
, max
, i
= 0, k
;
2842 int cell_indices
[EMAC_BOOT_LIST_SIZE
];
2845 while((np
= of_find_all_nodes(np
)) != NULL
) {
2848 if (of_match_node(emac_match
, np
) == NULL
)
2850 if (of_get_property(np
, "unused", NULL
))
2852 idx
= of_get_property(np
, "cell-index", NULL
);
2855 cell_indices
[i
] = *idx
;
2856 emac_boot_list
[i
++] = of_node_get(np
);
2857 if (i
>= EMAC_BOOT_LIST_SIZE
) {
2864 /* Bubble sort them (doh, what a creative algorithm :-) */
2865 for (i
= 0; max
> 1 && (i
< (max
- 1)); i
++)
2866 for (j
= i
; j
< max
; j
++) {
2867 if (cell_indices
[i
] > cell_indices
[j
]) {
2868 np
= emac_boot_list
[i
];
2869 emac_boot_list
[i
] = emac_boot_list
[j
];
2870 emac_boot_list
[j
] = np
;
2871 k
= cell_indices
[i
];
2872 cell_indices
[i
] = cell_indices
[j
];
2873 cell_indices
[j
] = k
;
2878 static int __init
emac_init(void)
2882 printk(KERN_INFO DRV_DESC
", version " DRV_VERSION
"\n");
2884 /* Init debug stuff */
2887 /* Build EMAC boot list */
2888 emac_make_bootlist();
2890 /* Init submodules */
2903 rc
= of_register_platform_driver(&emac_driver
);
2921 static void __exit
emac_exit(void)
2925 of_unregister_platform_driver(&emac_driver
);
2933 /* Destroy EMAC boot list */
2934 for (i
= 0; i
< EMAC_BOOT_LIST_SIZE
; i
++)
2935 if (emac_boot_list
[i
])
2936 of_node_put(emac_boot_list
[i
]);
2939 module_init(emac_init
);
2940 module_exit(emac_exit
);