2 * drivers/net/ibm_emac/ibm_emac_core.c
4 * Driver for PowerPC 4xx on-chip ethernet controller.
6 * Copyright (c) 2004, 2005 Zultys Technologies.
7 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
9 * Based on original work by
10 * Matt Porter <mporter@kernel.crashing.org>
11 * (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
12 * Armin Kuster <akuster@mvista.com>
13 * Johnnie Peters <jpeters@mvista.com>
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the
17 * Free Software Foundation; either version 2 of the License, or (at your
18 * option) any later version.
22 #include <linux/config.h>
23 #include <linux/module.h>
24 #include <linux/kernel.h>
25 #include <linux/sched.h>
26 #include <linux/string.h>
27 #include <linux/errno.h>
28 #include <linux/interrupt.h>
29 #include <linux/delay.h>
30 #include <linux/init.h>
31 #include <linux/types.h>
32 #include <linux/pci.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/skbuff.h>
36 #include <linux/crc32.h>
37 #include <linux/ethtool.h>
38 #include <linux/mii.h>
39 #include <linux/bitops.h>
41 #include <asm/processor.h>
44 #include <asm/uaccess.h>
47 #include "ibm_emac_core.h"
48 #include "ibm_emac_debug.h"
51 * Lack of dma_unmap_???? calls is intentional.
53 * API-correct usage requires additional support state information to be
54 * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
55 * EMAC design (e.g. TX buffer passed from network stack can be split into
56 * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
57 * maintaining such information will add additional overhead.
58 * Current DMA API implementation for 4xx processors only ensures cache coherency
59 * and dma_unmap_???? routines are empty and are likely to stay this way.
60 * I decided to omit dma_unmap_??? calls because I don't want to add additional
61 * complexity just for the sake of following some abstract API, when it doesn't
62 * add any real benefit to the driver. I understand that this decision maybe
63 * controversial, but I really tried to make code API-correct and efficient
64 * at the same time and didn't come up with code I liked :(. --ebs
67 #define DRV_NAME "emac"
68 #define DRV_VERSION "3.54"
69 #define DRV_DESC "PPC 4xx OCP EMAC driver"
71 MODULE_DESCRIPTION(DRV_DESC
);
73 ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
74 MODULE_LICENSE("GPL");
76 /* minimum number of free TX descriptors required to wake up TX process */
77 #define EMAC_TX_WAKEUP_THRESH (NUM_TX_BUFF / 4)
79 /* If packet size is less than this number, we allocate small skb and copy packet
80 * contents into it instead of just sending original big skb up
82 #define EMAC_RX_COPY_THRESH CONFIG_IBM_EMAC_RX_COPY_THRESHOLD
84 /* Since multiple EMACs share MDIO lines in various ways, we need
85 * to avoid re-using the same PHY ID in cases where the arch didn't
86 * setup precise phy_map entries
88 static u32 busy_phy_map
;
90 #if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX) && \
91 (defined(CONFIG_405EP) || defined(CONFIG_440EP) || defined(CONFIG_440GR))
92 /* 405EP has "EMAC to PHY Control Register" (CPC0_EPCTL) which can help us
93 * with PHY RX clock problem.
94 * 440EP/440GR has more sane SDR0_MFR register implementation than 440GX, which
95 * also allows controlling each EMAC clock
97 static inline void EMAC_RX_CLK_TX(int idx
)
100 local_irq_save(flags
);
102 #if defined(CONFIG_405EP)
103 mtdcr(0xf3, mfdcr(0xf3) | (1 << idx
));
104 #else /* CONFIG_440EP || CONFIG_440GR */
105 SDR_WRITE(DCRN_SDR_MFR
, SDR_READ(DCRN_SDR_MFR
) | (0x08000000 >> idx
));
108 local_irq_restore(flags
);
111 static inline void EMAC_RX_CLK_DEFAULT(int idx
)
114 local_irq_save(flags
);
116 #if defined(CONFIG_405EP)
117 mtdcr(0xf3, mfdcr(0xf3) & ~(1 << idx
));
118 #else /* CONFIG_440EP */
119 SDR_WRITE(DCRN_SDR_MFR
, SDR_READ(DCRN_SDR_MFR
) & ~(0x08000000 >> idx
));
122 local_irq_restore(flags
);
125 #define EMAC_RX_CLK_TX(idx) ((void)0)
126 #define EMAC_RX_CLK_DEFAULT(idx) ((void)0)
129 #if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX) && defined(CONFIG_440GX)
130 /* We can switch Ethernet clock to the internal source through SDR0_MFR[ECS],
131 * unfortunately this is less flexible than 440EP case, because it's a global
132 * setting for all EMACs, therefore we do this clock trick only during probe.
134 #define EMAC_CLK_INTERNAL SDR_WRITE(DCRN_SDR_MFR, \
135 SDR_READ(DCRN_SDR_MFR) | 0x08000000)
136 #define EMAC_CLK_EXTERNAL SDR_WRITE(DCRN_SDR_MFR, \
137 SDR_READ(DCRN_SDR_MFR) & ~0x08000000)
139 #define EMAC_CLK_INTERNAL ((void)0)
140 #define EMAC_CLK_EXTERNAL ((void)0)
143 /* I don't want to litter system log with timeout errors
144 * when we have brain-damaged PHY.
146 static inline void emac_report_timeout_error(struct ocp_enet_private
*dev
,
149 #if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX)
150 DBG("%d: %s" NL
, dev
->def
->index
, error
);
153 printk(KERN_ERR
"emac%d: %s\n", dev
->def
->index
, error
);
157 /* PHY polling intervals */
158 #define PHY_POLL_LINK_ON HZ
159 #define PHY_POLL_LINK_OFF (HZ / 5)
161 /* Graceful stop timeouts in us.
162 * We should allow up to 1 frame time (full-duplex, ignoring collisions)
164 #define STOP_TIMEOUT_10 1230
165 #define STOP_TIMEOUT_100 124
166 #define STOP_TIMEOUT_1000 13
167 #define STOP_TIMEOUT_1000_JUMBO 73
169 /* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
170 static const char emac_stats_keys
[EMAC_ETHTOOL_STATS_COUNT
][ETH_GSTRING_LEN
] = {
171 "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
172 "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
173 "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
174 "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
175 "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
176 "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
177 "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
178 "rx_bad_packet", "rx_runt_packet", "rx_short_event",
179 "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
180 "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
181 "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
182 "tx_bd_excessive_collisions", "tx_bd_late_collision",
183 "tx_bd_multple_collisions", "tx_bd_single_collision",
184 "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
188 static irqreturn_t
emac_irq(int irq
, void *dev_instance
, struct pt_regs
*regs
);
189 static void emac_clean_tx_ring(struct ocp_enet_private
*dev
);
191 static inline int emac_phy_supports_gige(int phy_mode
)
193 return phy_mode
== PHY_MODE_GMII
||
194 phy_mode
== PHY_MODE_RGMII
||
195 phy_mode
== PHY_MODE_TBI
||
196 phy_mode
== PHY_MODE_RTBI
;
199 static inline int emac_phy_gpcs(int phy_mode
)
201 return phy_mode
== PHY_MODE_TBI
||
202 phy_mode
== PHY_MODE_RTBI
;
205 static inline void emac_tx_enable(struct ocp_enet_private
*dev
)
207 struct emac_regs
*p
= dev
->emacp
;
211 local_irq_save(flags
);
213 DBG("%d: tx_enable" NL
, dev
->def
->index
);
215 r
= in_be32(&p
->mr0
);
216 if (!(r
& EMAC_MR0_TXE
))
217 out_be32(&p
->mr0
, r
| EMAC_MR0_TXE
);
218 local_irq_restore(flags
);
221 static void emac_tx_disable(struct ocp_enet_private
*dev
)
223 struct emac_regs
*p
= dev
->emacp
;
227 local_irq_save(flags
);
229 DBG("%d: tx_disable" NL
, dev
->def
->index
);
231 r
= in_be32(&p
->mr0
);
232 if (r
& EMAC_MR0_TXE
) {
233 int n
= dev
->stop_timeout
;
234 out_be32(&p
->mr0
, r
& ~EMAC_MR0_TXE
);
235 while (!(in_be32(&p
->mr0
) & EMAC_MR0_TXI
) && n
) {
240 emac_report_timeout_error(dev
, "TX disable timeout");
242 local_irq_restore(flags
);
245 static void emac_rx_enable(struct ocp_enet_private
*dev
)
247 struct emac_regs
*p
= dev
->emacp
;
251 local_irq_save(flags
);
252 if (unlikely(dev
->commac
.rx_stopped
))
255 DBG("%d: rx_enable" NL
, dev
->def
->index
);
257 r
= in_be32(&p
->mr0
);
258 if (!(r
& EMAC_MR0_RXE
)) {
259 if (unlikely(!(r
& EMAC_MR0_RXI
))) {
260 /* Wait if previous async disable is still in progress */
261 int n
= dev
->stop_timeout
;
262 while (!(r
= in_be32(&p
->mr0
) & EMAC_MR0_RXI
) && n
) {
267 emac_report_timeout_error(dev
,
268 "RX disable timeout");
270 out_be32(&p
->mr0
, r
| EMAC_MR0_RXE
);
273 local_irq_restore(flags
);
276 static void emac_rx_disable(struct ocp_enet_private
*dev
)
278 struct emac_regs
*p
= dev
->emacp
;
282 local_irq_save(flags
);
284 DBG("%d: rx_disable" NL
, dev
->def
->index
);
286 r
= in_be32(&p
->mr0
);
287 if (r
& EMAC_MR0_RXE
) {
288 int n
= dev
->stop_timeout
;
289 out_be32(&p
->mr0
, r
& ~EMAC_MR0_RXE
);
290 while (!(in_be32(&p
->mr0
) & EMAC_MR0_RXI
) && n
) {
295 emac_report_timeout_error(dev
, "RX disable timeout");
297 local_irq_restore(flags
);
300 static inline void emac_rx_disable_async(struct ocp_enet_private
*dev
)
302 struct emac_regs
*p
= dev
->emacp
;
306 local_irq_save(flags
);
308 DBG("%d: rx_disable_async" NL
, dev
->def
->index
);
310 r
= in_be32(&p
->mr0
);
311 if (r
& EMAC_MR0_RXE
)
312 out_be32(&p
->mr0
, r
& ~EMAC_MR0_RXE
);
313 local_irq_restore(flags
);
316 static int emac_reset(struct ocp_enet_private
*dev
)
318 struct emac_regs
*p
= dev
->emacp
;
322 DBG("%d: reset" NL
, dev
->def
->index
);
324 local_irq_save(flags
);
326 if (!dev
->reset_failed
) {
327 /* 40x erratum suggests stopping RX channel before reset,
330 emac_rx_disable(dev
);
331 emac_tx_disable(dev
);
334 out_be32(&p
->mr0
, EMAC_MR0_SRST
);
335 while ((in_be32(&p
->mr0
) & EMAC_MR0_SRST
) && n
)
337 local_irq_restore(flags
);
340 dev
->reset_failed
= 0;
343 emac_report_timeout_error(dev
, "reset timeout");
344 dev
->reset_failed
= 1;
349 static void emac_hash_mc(struct ocp_enet_private
*dev
)
351 struct emac_regs
*p
= dev
->emacp
;
353 struct dev_mc_list
*dmi
;
355 DBG("%d: hash_mc %d" NL
, dev
->def
->index
, dev
->ndev
->mc_count
);
357 for (dmi
= dev
->ndev
->mc_list
; dmi
; dmi
= dmi
->next
) {
359 DBG2("%d: mc %02x:%02x:%02x:%02x:%02x:%02x" NL
,
361 dmi
->dmi_addr
[0], dmi
->dmi_addr
[1], dmi
->dmi_addr
[2],
362 dmi
->dmi_addr
[3], dmi
->dmi_addr
[4], dmi
->dmi_addr
[5]);
364 bit
= 63 - (ether_crc(ETH_ALEN
, dmi
->dmi_addr
) >> 26);
365 gaht
[bit
>> 4] |= 0x8000 >> (bit
& 0x0f);
367 out_be32(&p
->gaht1
, gaht
[0]);
368 out_be32(&p
->gaht2
, gaht
[1]);
369 out_be32(&p
->gaht3
, gaht
[2]);
370 out_be32(&p
->gaht4
, gaht
[3]);
373 static inline u32
emac_iff2rmr(struct net_device
*ndev
)
375 u32 r
= EMAC_RMR_SP
| EMAC_RMR_SFCS
| EMAC_RMR_IAE
| EMAC_RMR_BAE
|
378 if (ndev
->flags
& IFF_PROMISC
)
380 else if (ndev
->flags
& IFF_ALLMULTI
|| ndev
->mc_count
> 32)
382 else if (ndev
->mc_count
> 0)
388 static inline int emac_opb_mhz(void)
390 return (ocp_sys_info
.opb_bus_freq
+ 500000) / 1000000;
394 static int emac_configure(struct ocp_enet_private
*dev
)
396 struct emac_regs
*p
= dev
->emacp
;
397 struct net_device
*ndev
= dev
->ndev
;
401 DBG("%d: configure" NL
, dev
->def
->index
);
403 if (emac_reset(dev
) < 0)
406 tah_reset(dev
->tah_dev
);
409 r
= EMAC_MR1_BASE(emac_opb_mhz()) | EMAC_MR1_VLE
| EMAC_MR1_IST
;
410 if (dev
->phy
.duplex
== DUPLEX_FULL
)
411 r
|= EMAC_MR1_FDE
| EMAC_MR1_MWSW_001
;
412 dev
->stop_timeout
= STOP_TIMEOUT_10
;
413 switch (dev
->phy
.speed
) {
415 if (emac_phy_gpcs(dev
->phy
.mode
)) {
416 r
|= EMAC_MR1_MF_1000GPCS
|
417 EMAC_MR1_MF_IPPA(dev
->phy
.address
);
419 /* Put some arbitrary OUI, Manuf & Rev IDs so we can
420 * identify this GPCS PHY later.
422 out_be32(&p
->ipcr
, 0xdeadbeef);
424 r
|= EMAC_MR1_MF_1000
;
425 r
|= EMAC_MR1_RFS_16K
;
428 if (dev
->ndev
->mtu
> ETH_DATA_LEN
) {
430 dev
->stop_timeout
= STOP_TIMEOUT_1000_JUMBO
;
432 dev
->stop_timeout
= STOP_TIMEOUT_1000
;
435 r
|= EMAC_MR1_MF_100
;
436 dev
->stop_timeout
= STOP_TIMEOUT_100
;
439 r
|= EMAC_MR1_RFS_4K
;
445 rgmii_set_speed(dev
->rgmii_dev
, dev
->rgmii_input
,
448 zmii_set_speed(dev
->zmii_dev
, dev
->zmii_input
, dev
->phy
.speed
);
450 #if !defined(CONFIG_40x)
451 /* on 40x erratum forces us to NOT use integrated flow control,
452 * let's hope it works on 44x ;)
454 if (dev
->phy
.duplex
== DUPLEX_FULL
) {
456 r
|= EMAC_MR1_EIFC
| EMAC_MR1_APP
;
457 else if (dev
->phy
.asym_pause
)
461 out_be32(&p
->mr1
, r
);
463 /* Set individual MAC address */
464 out_be32(&p
->iahr
, (ndev
->dev_addr
[0] << 8) | ndev
->dev_addr
[1]);
465 out_be32(&p
->ialr
, (ndev
->dev_addr
[2] << 24) |
466 (ndev
->dev_addr
[3] << 16) | (ndev
->dev_addr
[4] << 8) |
469 /* VLAN Tag Protocol ID */
470 out_be32(&p
->vtpid
, 0x8100);
472 /* Receive mode register */
473 r
= emac_iff2rmr(ndev
);
474 if (r
& EMAC_RMR_MAE
)
476 out_be32(&p
->rmr
, r
);
478 /* FIFOs thresholds */
479 r
= EMAC_TMR1((EMAC_MAL_BURST_SIZE
/ EMAC_FIFO_ENTRY_SIZE
) + 1,
480 EMAC_TX_FIFO_SIZE
/ 2 / EMAC_FIFO_ENTRY_SIZE
);
481 out_be32(&p
->tmr1
, r
);
482 out_be32(&p
->trtr
, EMAC_TRTR(EMAC_TX_FIFO_SIZE
/ 2));
484 /* PAUSE frame is sent when RX FIFO reaches its high-water mark,
485 there should be still enough space in FIFO to allow the our link
486 partner time to process this frame and also time to send PAUSE
489 Here is the worst case scenario for the RX FIFO "headroom"
490 (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
492 1) One maximum-length frame on TX 1522 bytes
493 2) One PAUSE frame time 64 bytes
494 3) PAUSE frame decode time allowance 64 bytes
495 4) One maximum-length frame on RX 1522 bytes
496 5) Round-trip propagation delay of the link (100Mb) 15 bytes
500 I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
501 low-water mark to RX_FIFO_SIZE / 8 (512 bytes)
503 r
= EMAC_RWMR(EMAC_RX_FIFO_SIZE(gige
) / 8 / EMAC_FIFO_ENTRY_SIZE
,
504 EMAC_RX_FIFO_SIZE(gige
) / 4 / EMAC_FIFO_ENTRY_SIZE
);
505 out_be32(&p
->rwmr
, r
);
507 /* Set PAUSE timer to the maximum */
508 out_be32(&p
->ptr
, 0xffff);
511 out_be32(&p
->iser
, EMAC_ISR_TXPE
| EMAC_ISR_RXPE
| /* EMAC_ISR_TXUE |
512 EMAC_ISR_RXOE | */ EMAC_ISR_OVR
| EMAC_ISR_BP
| EMAC_ISR_SE
|
513 EMAC_ISR_ALE
| EMAC_ISR_BFCS
| EMAC_ISR_PTLE
| EMAC_ISR_ORE
|
514 EMAC_ISR_IRE
| EMAC_ISR_TE
);
516 /* We need to take GPCS PHY out of isolate mode after EMAC reset */
517 if (emac_phy_gpcs(dev
->phy
.mode
))
518 mii_reset_phy(&dev
->phy
);
524 static void emac_reinitialize(struct ocp_enet_private
*dev
)
526 DBG("%d: reinitialize" NL
, dev
->def
->index
);
528 if (!emac_configure(dev
)) {
535 static void emac_full_tx_reset(struct net_device
*ndev
)
537 struct ocp_enet_private
*dev
= ndev
->priv
;
538 struct ocp_func_emac_data
*emacdata
= dev
->def
->additions
;
540 DBG("%d: full_tx_reset" NL
, dev
->def
->index
);
542 emac_tx_disable(dev
);
543 mal_disable_tx_channel(dev
->mal
, emacdata
->mal_tx_chan
);
544 emac_clean_tx_ring(dev
);
545 dev
->tx_cnt
= dev
->tx_slot
= dev
->ack_slot
= 0;
549 mal_enable_tx_channel(dev
->mal
, emacdata
->mal_tx_chan
);
553 netif_wake_queue(ndev
);
556 static int __emac_mdio_read(struct ocp_enet_private
*dev
, u8 id
, u8 reg
)
558 struct emac_regs
*p
= dev
->emacp
;
562 DBG2("%d: mdio_read(%02x,%02x)" NL
, dev
->def
->index
, id
, reg
);
564 /* Enable proper MDIO port */
565 zmii_enable_mdio(dev
->zmii_dev
, dev
->zmii_input
);
567 /* Wait for management interface to become idle */
569 while (!emac_phy_done(in_be32(&p
->stacr
))) {
575 /* Issue read command */
577 EMAC_STACR_BASE(emac_opb_mhz()) | EMAC_STACR_STAC_READ
|
578 (reg
& EMAC_STACR_PRA_MASK
)
579 | ((id
& EMAC_STACR_PCDA_MASK
) << EMAC_STACR_PCDA_SHIFT
)
582 /* Wait for read to complete */
584 while (!emac_phy_done(r
= in_be32(&p
->stacr
))) {
590 if (unlikely(r
& EMAC_STACR_PHYE
)) {
591 DBG("%d: mdio_read(%02x, %02x) failed" NL
, dev
->def
->index
,
596 r
= ((r
>> EMAC_STACR_PHYD_SHIFT
) & EMAC_STACR_PHYD_MASK
);
597 DBG2("%d: mdio_read -> %04x" NL
, dev
->def
->index
, r
);
600 DBG("%d: MII management interface timeout (read)" NL
, dev
->def
->index
);
604 static void __emac_mdio_write(struct ocp_enet_private
*dev
, u8 id
, u8 reg
,
607 struct emac_regs
*p
= dev
->emacp
;
610 DBG2("%d: mdio_write(%02x,%02x,%04x)" NL
, dev
->def
->index
, id
, reg
,
613 /* Enable proper MDIO port */
614 zmii_enable_mdio(dev
->zmii_dev
, dev
->zmii_input
);
616 /* Wait for management interface to be idle */
618 while (!emac_phy_done(in_be32(&p
->stacr
))) {
624 /* Issue write command */
626 EMAC_STACR_BASE(emac_opb_mhz()) | EMAC_STACR_STAC_WRITE
|
627 (reg
& EMAC_STACR_PRA_MASK
) |
628 ((id
& EMAC_STACR_PCDA_MASK
) << EMAC_STACR_PCDA_SHIFT
) |
629 (val
<< EMAC_STACR_PHYD_SHIFT
) | EMAC_STACR_START
);
631 /* Wait for write to complete */
633 while (!emac_phy_done(in_be32(&p
->stacr
))) {
640 DBG("%d: MII management interface timeout (write)" NL
, dev
->def
->index
);
643 static int emac_mdio_read(struct net_device
*ndev
, int id
, int reg
)
645 struct ocp_enet_private
*dev
= ndev
->priv
;
649 res
= __emac_mdio_read(dev
->mdio_dev
? dev
->mdio_dev
: dev
, (u8
) id
,
655 static void emac_mdio_write(struct net_device
*ndev
, int id
, int reg
, int val
)
657 struct ocp_enet_private
*dev
= ndev
->priv
;
660 __emac_mdio_write(dev
->mdio_dev
? dev
->mdio_dev
: dev
, (u8
) id
,
661 (u8
) reg
, (u16
) val
);
666 static void emac_set_multicast_list(struct net_device
*ndev
)
668 struct ocp_enet_private
*dev
= ndev
->priv
;
669 struct emac_regs
*p
= dev
->emacp
;
670 u32 rmr
= emac_iff2rmr(ndev
);
672 DBG("%d: multicast %08x" NL
, dev
->def
->index
, rmr
);
673 BUG_ON(!netif_running(dev
->ndev
));
675 /* I decided to relax register access rules here to avoid
678 * There is a real problem with EMAC4 core if we use MWSW_001 bit
679 * in MR1 register and do a full EMAC reset.
680 * One TX BD status update is delayed and, after EMAC reset, it
681 * never happens, resulting in TX hung (it'll be recovered by TX
682 * timeout handler eventually, but this is just gross).
683 * So we either have to do full TX reset or try to cheat here :)
685 * The only required change is to RX mode register, so I *think* all
686 * we need is just to stop RX channel. This seems to work on all
689 emac_rx_disable(dev
);
690 if (rmr
& EMAC_RMR_MAE
)
692 out_be32(&p
->rmr
, rmr
);
697 static int emac_resize_rx_ring(struct ocp_enet_private
*dev
, int new_mtu
)
699 struct ocp_func_emac_data
*emacdata
= dev
->def
->additions
;
700 int rx_sync_size
= emac_rx_sync_size(new_mtu
);
701 int rx_skb_size
= emac_rx_skb_size(new_mtu
);
704 emac_rx_disable(dev
);
705 mal_disable_rx_channel(dev
->mal
, emacdata
->mal_rx_chan
);
707 if (dev
->rx_sg_skb
) {
708 ++dev
->estats
.rx_dropped_resize
;
709 dev_kfree_skb(dev
->rx_sg_skb
);
710 dev
->rx_sg_skb
= NULL
;
713 /* Make a first pass over RX ring and mark BDs ready, dropping
714 * non-processed packets on the way. We need this as a separate pass
715 * to simplify error recovery in the case of allocation failure later.
717 for (i
= 0; i
< NUM_RX_BUFF
; ++i
) {
718 if (dev
->rx_desc
[i
].ctrl
& MAL_RX_CTRL_FIRST
)
719 ++dev
->estats
.rx_dropped_resize
;
721 dev
->rx_desc
[i
].data_len
= 0;
722 dev
->rx_desc
[i
].ctrl
= MAL_RX_CTRL_EMPTY
|
723 (i
== (NUM_RX_BUFF
- 1) ? MAL_RX_CTRL_WRAP
: 0);
726 /* Reallocate RX ring only if bigger skb buffers are required */
727 if (rx_skb_size
<= dev
->rx_skb_size
)
730 /* Second pass, allocate new skbs */
731 for (i
= 0; i
< NUM_RX_BUFF
; ++i
) {
732 struct sk_buff
*skb
= alloc_skb(rx_skb_size
, GFP_ATOMIC
);
738 BUG_ON(!dev
->rx_skb
[i
]);
739 dev_kfree_skb(dev
->rx_skb
[i
]);
741 skb_reserve(skb
, EMAC_RX_SKB_HEADROOM
+ 2);
742 dev
->rx_desc
[i
].data_ptr
=
743 dma_map_single(dev
->ldev
, skb
->data
- 2, rx_sync_size
,
744 DMA_FROM_DEVICE
) + 2;
745 dev
->rx_skb
[i
] = skb
;
748 /* Check if we need to change "Jumbo" bit in MR1 */
749 if ((new_mtu
> ETH_DATA_LEN
) ^ (dev
->ndev
->mtu
> ETH_DATA_LEN
)) {
750 /* This is to prevent starting RX channel in emac_rx_enable() */
751 dev
->commac
.rx_stopped
= 1;
753 dev
->ndev
->mtu
= new_mtu
;
754 emac_full_tx_reset(dev
->ndev
);
757 mal_set_rcbs(dev
->mal
, emacdata
->mal_rx_chan
, emac_rx_size(new_mtu
));
760 dev
->commac
.rx_stopped
= dev
->rx_slot
= 0;
761 mal_enable_rx_channel(dev
->mal
, emacdata
->mal_rx_chan
);
767 /* Process ctx, rtnl_lock semaphore */
768 static int emac_change_mtu(struct net_device
*ndev
, int new_mtu
)
770 struct ocp_enet_private
*dev
= ndev
->priv
;
773 if (new_mtu
< EMAC_MIN_MTU
|| new_mtu
> EMAC_MAX_MTU
)
776 DBG("%d: change_mtu(%d)" NL
, dev
->def
->index
, new_mtu
);
779 if (netif_running(ndev
)) {
780 /* Check if we really need to reinitalize RX ring */
781 if (emac_rx_skb_size(ndev
->mtu
) != emac_rx_skb_size(new_mtu
))
782 ret
= emac_resize_rx_ring(dev
, new_mtu
);
787 dev
->rx_skb_size
= emac_rx_skb_size(new_mtu
);
788 dev
->rx_sync_size
= emac_rx_sync_size(new_mtu
);
795 static void emac_clean_tx_ring(struct ocp_enet_private
*dev
)
798 for (i
= 0; i
< NUM_TX_BUFF
; ++i
) {
799 if (dev
->tx_skb
[i
]) {
800 dev_kfree_skb(dev
->tx_skb
[i
]);
801 dev
->tx_skb
[i
] = NULL
;
802 if (dev
->tx_desc
[i
].ctrl
& MAL_TX_CTRL_READY
)
803 ++dev
->estats
.tx_dropped
;
805 dev
->tx_desc
[i
].ctrl
= 0;
806 dev
->tx_desc
[i
].data_ptr
= 0;
810 static void emac_clean_rx_ring(struct ocp_enet_private
*dev
)
813 for (i
= 0; i
< NUM_RX_BUFF
; ++i
)
814 if (dev
->rx_skb
[i
]) {
815 dev
->rx_desc
[i
].ctrl
= 0;
816 dev_kfree_skb(dev
->rx_skb
[i
]);
817 dev
->rx_skb
[i
] = NULL
;
818 dev
->rx_desc
[i
].data_ptr
= 0;
821 if (dev
->rx_sg_skb
) {
822 dev_kfree_skb(dev
->rx_sg_skb
);
823 dev
->rx_sg_skb
= NULL
;
827 static inline int emac_alloc_rx_skb(struct ocp_enet_private
*dev
, int slot
,
830 struct sk_buff
*skb
= alloc_skb(dev
->rx_skb_size
, flags
);
834 dev
->rx_skb
[slot
] = skb
;
835 dev
->rx_desc
[slot
].data_len
= 0;
837 skb_reserve(skb
, EMAC_RX_SKB_HEADROOM
+ 2);
838 dev
->rx_desc
[slot
].data_ptr
=
839 dma_map_single(dev
->ldev
, skb
->data
- 2, dev
->rx_sync_size
,
840 DMA_FROM_DEVICE
) + 2;
842 dev
->rx_desc
[slot
].ctrl
= MAL_RX_CTRL_EMPTY
|
843 (slot
== (NUM_RX_BUFF
- 1) ? MAL_RX_CTRL_WRAP
: 0);
848 static void emac_print_link_status(struct ocp_enet_private
*dev
)
850 if (netif_carrier_ok(dev
->ndev
))
851 printk(KERN_INFO
"%s: link is up, %d %s%s\n",
852 dev
->ndev
->name
, dev
->phy
.speed
,
853 dev
->phy
.duplex
== DUPLEX_FULL
? "FDX" : "HDX",
854 dev
->phy
.pause
? ", pause enabled" :
855 dev
->phy
.asym_pause
? ", assymetric pause enabled" : "");
857 printk(KERN_INFO
"%s: link is down\n", dev
->ndev
->name
);
860 /* Process ctx, rtnl_lock semaphore */
861 static int emac_open(struct net_device
*ndev
)
863 struct ocp_enet_private
*dev
= ndev
->priv
;
864 struct ocp_func_emac_data
*emacdata
= dev
->def
->additions
;
867 DBG("%d: open" NL
, dev
->def
->index
);
869 /* Setup error IRQ handler */
870 err
= request_irq(dev
->def
->irq
, emac_irq
, 0, "EMAC", dev
);
872 printk(KERN_ERR
"%s: failed to request IRQ %d\n",
873 ndev
->name
, dev
->def
->irq
);
877 /* Allocate RX ring */
878 for (i
= 0; i
< NUM_RX_BUFF
; ++i
)
879 if (emac_alloc_rx_skb(dev
, i
, GFP_KERNEL
)) {
880 printk(KERN_ERR
"%s: failed to allocate RX ring\n",
886 dev
->tx_cnt
= dev
->tx_slot
= dev
->ack_slot
= dev
->rx_slot
=
887 dev
->commac
.rx_stopped
= 0;
888 dev
->rx_sg_skb
= NULL
;
890 if (dev
->phy
.address
>= 0) {
891 int link_poll_interval
;
892 if (dev
->phy
.def
->ops
->poll_link(&dev
->phy
)) {
893 dev
->phy
.def
->ops
->read_link(&dev
->phy
);
894 EMAC_RX_CLK_DEFAULT(dev
->def
->index
);
895 netif_carrier_on(dev
->ndev
);
896 link_poll_interval
= PHY_POLL_LINK_ON
;
898 EMAC_RX_CLK_TX(dev
->def
->index
);
899 netif_carrier_off(dev
->ndev
);
900 link_poll_interval
= PHY_POLL_LINK_OFF
;
902 mod_timer(&dev
->link_timer
, jiffies
+ link_poll_interval
);
903 emac_print_link_status(dev
);
905 netif_carrier_on(dev
->ndev
);
908 mal_poll_add(dev
->mal
, &dev
->commac
);
909 mal_enable_tx_channel(dev
->mal
, emacdata
->mal_tx_chan
);
910 mal_set_rcbs(dev
->mal
, emacdata
->mal_rx_chan
, emac_rx_size(ndev
->mtu
));
911 mal_enable_rx_channel(dev
->mal
, emacdata
->mal_rx_chan
);
914 netif_start_queue(ndev
);
919 emac_clean_rx_ring(dev
);
920 free_irq(dev
->def
->irq
, dev
);
925 static int emac_link_differs(struct ocp_enet_private
*dev
)
927 u32 r
= in_be32(&dev
->emacp
->mr1
);
929 int duplex
= r
& EMAC_MR1_FDE
? DUPLEX_FULL
: DUPLEX_HALF
;
930 int speed
, pause
, asym_pause
;
932 if (r
& (EMAC_MR1_MF_1000
| EMAC_MR1_MF_1000GPCS
))
934 else if (r
& EMAC_MR1_MF_100
)
939 switch (r
& (EMAC_MR1_EIFC
| EMAC_MR1_APP
)) {
940 case (EMAC_MR1_EIFC
| EMAC_MR1_APP
):
949 pause
= asym_pause
= 0;
951 return speed
!= dev
->phy
.speed
|| duplex
!= dev
->phy
.duplex
||
952 pause
!= dev
->phy
.pause
|| asym_pause
!= dev
->phy
.asym_pause
;
956 static void emac_link_timer(unsigned long data
)
958 struct ocp_enet_private
*dev
= (struct ocp_enet_private
*)data
;
959 int link_poll_interval
;
961 DBG2("%d: link timer" NL
, dev
->def
->index
);
963 if (dev
->phy
.def
->ops
->poll_link(&dev
->phy
)) {
964 if (!netif_carrier_ok(dev
->ndev
)) {
965 EMAC_RX_CLK_DEFAULT(dev
->def
->index
);
967 /* Get new link parameters */
968 dev
->phy
.def
->ops
->read_link(&dev
->phy
);
970 if (dev
->tah_dev
|| emac_link_differs(dev
))
971 emac_full_tx_reset(dev
->ndev
);
973 netif_carrier_on(dev
->ndev
);
974 emac_print_link_status(dev
);
976 link_poll_interval
= PHY_POLL_LINK_ON
;
978 if (netif_carrier_ok(dev
->ndev
)) {
979 EMAC_RX_CLK_TX(dev
->def
->index
);
980 #if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX)
981 emac_reinitialize(dev
);
983 netif_carrier_off(dev
->ndev
);
984 emac_print_link_status(dev
);
987 /* Retry reset if the previous attempt failed.
988 * This is needed mostly for CONFIG_IBM_EMAC_PHY_RX_CLK_FIX
989 * case, but I left it here because it shouldn't trigger for
992 if (unlikely(dev
->reset_failed
))
993 emac_reinitialize(dev
);
995 link_poll_interval
= PHY_POLL_LINK_OFF
;
997 mod_timer(&dev
->link_timer
, jiffies
+ link_poll_interval
);
1001 static void emac_force_link_update(struct ocp_enet_private
*dev
)
1003 netif_carrier_off(dev
->ndev
);
1004 if (timer_pending(&dev
->link_timer
))
1005 mod_timer(&dev
->link_timer
, jiffies
+ PHY_POLL_LINK_OFF
);
1008 /* Process ctx, rtnl_lock semaphore */
1009 static int emac_close(struct net_device
*ndev
)
1011 struct ocp_enet_private
*dev
= ndev
->priv
;
1012 struct ocp_func_emac_data
*emacdata
= dev
->def
->additions
;
1014 DBG("%d: close" NL
, dev
->def
->index
);
1018 if (dev
->phy
.address
>= 0)
1019 del_timer_sync(&dev
->link_timer
);
1021 netif_stop_queue(ndev
);
1022 emac_rx_disable(dev
);
1023 emac_tx_disable(dev
);
1024 mal_disable_rx_channel(dev
->mal
, emacdata
->mal_rx_chan
);
1025 mal_disable_tx_channel(dev
->mal
, emacdata
->mal_tx_chan
);
1026 mal_poll_del(dev
->mal
, &dev
->commac
);
1029 emac_clean_tx_ring(dev
);
1030 emac_clean_rx_ring(dev
);
1031 free_irq(dev
->def
->irq
, dev
);
1036 static inline u16
emac_tx_csum(struct ocp_enet_private
*dev
,
1037 struct sk_buff
*skb
)
1039 #if defined(CONFIG_IBM_EMAC_TAH)
1040 if (skb
->ip_summed
== CHECKSUM_HW
) {
1041 ++dev
->stats
.tx_packets_csum
;
1042 return EMAC_TX_CTRL_TAH_CSUM
;
1048 static inline int emac_xmit_finish(struct ocp_enet_private
*dev
, int len
)
1050 struct emac_regs
*p
= dev
->emacp
;
1051 struct net_device
*ndev
= dev
->ndev
;
1053 /* Send the packet out */
1054 out_be32(&p
->tmr0
, EMAC_TMR0_XMIT
);
1056 if (unlikely(++dev
->tx_cnt
== NUM_TX_BUFF
)) {
1057 netif_stop_queue(ndev
);
1058 DBG2("%d: stopped TX queue" NL
, dev
->def
->index
);
1061 ndev
->trans_start
= jiffies
;
1062 ++dev
->stats
.tx_packets
;
1063 dev
->stats
.tx_bytes
+= len
;
1069 static int emac_start_xmit(struct sk_buff
*skb
, struct net_device
*ndev
)
1071 struct ocp_enet_private
*dev
= ndev
->priv
;
1072 unsigned int len
= skb
->len
;
1075 u16 ctrl
= EMAC_TX_CTRL_GFCS
| EMAC_TX_CTRL_GP
| MAL_TX_CTRL_READY
|
1076 MAL_TX_CTRL_LAST
| emac_tx_csum(dev
, skb
);
1078 slot
= dev
->tx_slot
++;
1079 if (dev
->tx_slot
== NUM_TX_BUFF
) {
1081 ctrl
|= MAL_TX_CTRL_WRAP
;
1084 DBG2("%d: xmit(%u) %d" NL
, dev
->def
->index
, len
, slot
);
1086 dev
->tx_skb
[slot
] = skb
;
1087 dev
->tx_desc
[slot
].data_ptr
= dma_map_single(dev
->ldev
, skb
->data
, len
,
1089 dev
->tx_desc
[slot
].data_len
= (u16
) len
;
1091 dev
->tx_desc
[slot
].ctrl
= ctrl
;
1093 return emac_xmit_finish(dev
, len
);
1096 #if defined(CONFIG_IBM_EMAC_TAH)
1097 static inline int emac_xmit_split(struct ocp_enet_private
*dev
, int slot
,
1098 u32 pd
, int len
, int last
, u16 base_ctrl
)
1101 u16 ctrl
= base_ctrl
;
1102 int chunk
= min(len
, MAL_MAX_TX_SIZE
);
1105 slot
= (slot
+ 1) % NUM_TX_BUFF
;
1108 ctrl
|= MAL_TX_CTRL_LAST
;
1109 if (slot
== NUM_TX_BUFF
- 1)
1110 ctrl
|= MAL_TX_CTRL_WRAP
;
1112 dev
->tx_skb
[slot
] = NULL
;
1113 dev
->tx_desc
[slot
].data_ptr
= pd
;
1114 dev
->tx_desc
[slot
].data_len
= (u16
) chunk
;
1115 dev
->tx_desc
[slot
].ctrl
= ctrl
;
1126 /* BHs disabled (SG version for TAH equipped EMACs) */
1127 static int emac_start_xmit_sg(struct sk_buff
*skb
, struct net_device
*ndev
)
1129 struct ocp_enet_private
*dev
= ndev
->priv
;
1130 int nr_frags
= skb_shinfo(skb
)->nr_frags
;
1131 int len
= skb
->len
, chunk
;
1136 /* This is common "fast" path */
1137 if (likely(!nr_frags
&& len
<= MAL_MAX_TX_SIZE
))
1138 return emac_start_xmit(skb
, ndev
);
1140 len
-= skb
->data_len
;
1142 /* Note, this is only an *estimation*, we can still run out of empty
1143 * slots because of the additional fragmentation into
1144 * MAL_MAX_TX_SIZE-sized chunks
1146 if (unlikely(dev
->tx_cnt
+ nr_frags
+ mal_tx_chunks(len
) > NUM_TX_BUFF
))
1149 ctrl
= EMAC_TX_CTRL_GFCS
| EMAC_TX_CTRL_GP
| MAL_TX_CTRL_READY
|
1150 emac_tx_csum(dev
, skb
);
1151 slot
= dev
->tx_slot
;
1154 dev
->tx_skb
[slot
] = NULL
;
1155 chunk
= min(len
, MAL_MAX_TX_SIZE
);
1156 dev
->tx_desc
[slot
].data_ptr
= pd
=
1157 dma_map_single(dev
->ldev
, skb
->data
, len
, DMA_TO_DEVICE
);
1158 dev
->tx_desc
[slot
].data_len
= (u16
) chunk
;
1161 slot
= emac_xmit_split(dev
, slot
, pd
+ chunk
, len
, !nr_frags
,
1164 for (i
= 0; i
< nr_frags
; ++i
) {
1165 struct skb_frag_struct
*frag
= &skb_shinfo(skb
)->frags
[i
];
1168 if (unlikely(dev
->tx_cnt
+ mal_tx_chunks(len
) >= NUM_TX_BUFF
))
1171 pd
= dma_map_page(dev
->ldev
, frag
->page
, frag
->page_offset
, len
,
1174 slot
= emac_xmit_split(dev
, slot
, pd
, len
, i
== nr_frags
- 1,
1178 DBG2("%d: xmit_sg(%u) %d - %d" NL
, dev
->def
->index
, skb
->len
,
1179 dev
->tx_slot
, slot
);
1181 /* Attach skb to the last slot so we don't release it too early */
1182 dev
->tx_skb
[slot
] = skb
;
1184 /* Send the packet out */
1185 if (dev
->tx_slot
== NUM_TX_BUFF
- 1)
1186 ctrl
|= MAL_TX_CTRL_WRAP
;
1188 dev
->tx_desc
[dev
->tx_slot
].ctrl
= ctrl
;
1189 dev
->tx_slot
= (slot
+ 1) % NUM_TX_BUFF
;
1191 return emac_xmit_finish(dev
, skb
->len
);
1194 /* Well, too bad. Our previous estimation was overly optimistic.
1197 while (slot
!= dev
->tx_slot
) {
1198 dev
->tx_desc
[slot
].ctrl
= 0;
1201 slot
= NUM_TX_BUFF
- 1;
1203 ++dev
->estats
.tx_undo
;
1206 netif_stop_queue(ndev
);
1207 DBG2("%d: stopped TX queue" NL
, dev
->def
->index
);
1211 # define emac_start_xmit_sg emac_start_xmit
1212 #endif /* !defined(CONFIG_IBM_EMAC_TAH) */
1215 static void emac_parse_tx_error(struct ocp_enet_private
*dev
, u16 ctrl
)
1217 struct ibm_emac_error_stats
*st
= &dev
->estats
;
1218 DBG("%d: BD TX error %04x" NL
, dev
->def
->index
, ctrl
);
1221 if (ctrl
& EMAC_TX_ST_BFCS
)
1222 ++st
->tx_bd_bad_fcs
;
1223 if (ctrl
& EMAC_TX_ST_LCS
)
1224 ++st
->tx_bd_carrier_loss
;
1225 if (ctrl
& EMAC_TX_ST_ED
)
1226 ++st
->tx_bd_excessive_deferral
;
1227 if (ctrl
& EMAC_TX_ST_EC
)
1228 ++st
->tx_bd_excessive_collisions
;
1229 if (ctrl
& EMAC_TX_ST_LC
)
1230 ++st
->tx_bd_late_collision
;
1231 if (ctrl
& EMAC_TX_ST_MC
)
1232 ++st
->tx_bd_multple_collisions
;
1233 if (ctrl
& EMAC_TX_ST_SC
)
1234 ++st
->tx_bd_single_collision
;
1235 if (ctrl
& EMAC_TX_ST_UR
)
1236 ++st
->tx_bd_underrun
;
1237 if (ctrl
& EMAC_TX_ST_SQE
)
1241 static void emac_poll_tx(void *param
)
1243 struct ocp_enet_private
*dev
= param
;
1244 DBG2("%d: poll_tx, %d %d" NL
, dev
->def
->index
, dev
->tx_cnt
,
1249 int slot
= dev
->ack_slot
, n
= 0;
1251 ctrl
= dev
->tx_desc
[slot
].ctrl
;
1252 if (!(ctrl
& MAL_TX_CTRL_READY
)) {
1253 struct sk_buff
*skb
= dev
->tx_skb
[slot
];
1258 dev
->tx_skb
[slot
] = NULL
;
1260 slot
= (slot
+ 1) % NUM_TX_BUFF
;
1262 if (unlikely(EMAC_IS_BAD_TX(ctrl
)))
1263 emac_parse_tx_error(dev
, ctrl
);
1269 dev
->ack_slot
= slot
;
1270 if (netif_queue_stopped(dev
->ndev
) &&
1271 dev
->tx_cnt
< EMAC_TX_WAKEUP_THRESH
)
1272 netif_wake_queue(dev
->ndev
);
1274 DBG2("%d: tx %d pkts" NL
, dev
->def
->index
, n
);
1279 static inline void emac_recycle_rx_skb(struct ocp_enet_private
*dev
, int slot
,
1282 struct sk_buff
*skb
= dev
->rx_skb
[slot
];
1283 DBG2("%d: recycle %d %d" NL
, dev
->def
->index
, slot
, len
);
1286 dma_map_single(dev
->ldev
, skb
->data
- 2,
1287 EMAC_DMA_ALIGN(len
+ 2), DMA_FROM_DEVICE
);
1289 dev
->rx_desc
[slot
].data_len
= 0;
1291 dev
->rx_desc
[slot
].ctrl
= MAL_RX_CTRL_EMPTY
|
1292 (slot
== (NUM_RX_BUFF
- 1) ? MAL_RX_CTRL_WRAP
: 0);
1295 static void emac_parse_rx_error(struct ocp_enet_private
*dev
, u16 ctrl
)
1297 struct ibm_emac_error_stats
*st
= &dev
->estats
;
1298 DBG("%d: BD RX error %04x" NL
, dev
->def
->index
, ctrl
);
1301 if (ctrl
& EMAC_RX_ST_OE
)
1302 ++st
->rx_bd_overrun
;
1303 if (ctrl
& EMAC_RX_ST_BP
)
1304 ++st
->rx_bd_bad_packet
;
1305 if (ctrl
& EMAC_RX_ST_RP
)
1306 ++st
->rx_bd_runt_packet
;
1307 if (ctrl
& EMAC_RX_ST_SE
)
1308 ++st
->rx_bd_short_event
;
1309 if (ctrl
& EMAC_RX_ST_AE
)
1310 ++st
->rx_bd_alignment_error
;
1311 if (ctrl
& EMAC_RX_ST_BFCS
)
1312 ++st
->rx_bd_bad_fcs
;
1313 if (ctrl
& EMAC_RX_ST_PTL
)
1314 ++st
->rx_bd_packet_too_long
;
1315 if (ctrl
& EMAC_RX_ST_ORE
)
1316 ++st
->rx_bd_out_of_range
;
1317 if (ctrl
& EMAC_RX_ST_IRE
)
1318 ++st
->rx_bd_in_range
;
1321 static inline void emac_rx_csum(struct ocp_enet_private
*dev
,
1322 struct sk_buff
*skb
, u16 ctrl
)
1324 #if defined(CONFIG_IBM_EMAC_TAH)
1325 if (!ctrl
&& dev
->tah_dev
) {
1326 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1327 ++dev
->stats
.rx_packets_csum
;
1332 static inline int emac_rx_sg_append(struct ocp_enet_private
*dev
, int slot
)
1334 if (likely(dev
->rx_sg_skb
!= NULL
)) {
1335 int len
= dev
->rx_desc
[slot
].data_len
;
1336 int tot_len
= dev
->rx_sg_skb
->len
+ len
;
1338 if (unlikely(tot_len
+ 2 > dev
->rx_skb_size
)) {
1339 ++dev
->estats
.rx_dropped_mtu
;
1340 dev_kfree_skb(dev
->rx_sg_skb
);
1341 dev
->rx_sg_skb
= NULL
;
1343 cacheable_memcpy(dev
->rx_sg_skb
->tail
,
1344 dev
->rx_skb
[slot
]->data
, len
);
1345 skb_put(dev
->rx_sg_skb
, len
);
1346 emac_recycle_rx_skb(dev
, slot
, len
);
1350 emac_recycle_rx_skb(dev
, slot
, 0);
1355 static int emac_poll_rx(void *param
, int budget
)
1357 struct ocp_enet_private
*dev
= param
;
1358 int slot
= dev
->rx_slot
, received
= 0;
1360 DBG2("%d: poll_rx(%d)" NL
, dev
->def
->index
, budget
);
1363 while (budget
> 0) {
1365 struct sk_buff
*skb
;
1366 u16 ctrl
= dev
->rx_desc
[slot
].ctrl
;
1368 if (ctrl
& MAL_RX_CTRL_EMPTY
)
1371 skb
= dev
->rx_skb
[slot
];
1373 len
= dev
->rx_desc
[slot
].data_len
;
1375 if (unlikely(!MAL_IS_SINGLE_RX(ctrl
)))
1378 ctrl
&= EMAC_BAD_RX_MASK
;
1379 if (unlikely(ctrl
&& ctrl
!= EMAC_RX_TAH_BAD_CSUM
)) {
1380 emac_parse_rx_error(dev
, ctrl
);
1381 ++dev
->estats
.rx_dropped_error
;
1382 emac_recycle_rx_skb(dev
, slot
, 0);
1387 if (len
&& len
< EMAC_RX_COPY_THRESH
) {
1388 struct sk_buff
*copy_skb
=
1389 alloc_skb(len
+ EMAC_RX_SKB_HEADROOM
+ 2, GFP_ATOMIC
);
1390 if (unlikely(!copy_skb
))
1393 skb_reserve(copy_skb
, EMAC_RX_SKB_HEADROOM
+ 2);
1394 cacheable_memcpy(copy_skb
->data
- 2, skb
->data
- 2,
1396 emac_recycle_rx_skb(dev
, slot
, len
);
1398 } else if (unlikely(emac_alloc_rx_skb(dev
, slot
, GFP_ATOMIC
)))
1403 skb
->dev
= dev
->ndev
;
1404 skb
->protocol
= eth_type_trans(skb
, dev
->ndev
);
1405 emac_rx_csum(dev
, skb
, ctrl
);
1407 if (unlikely(netif_receive_skb(skb
) == NET_RX_DROP
))
1408 ++dev
->estats
.rx_dropped_stack
;
1410 ++dev
->stats
.rx_packets
;
1412 dev
->stats
.rx_bytes
+= len
;
1413 slot
= (slot
+ 1) % NUM_RX_BUFF
;
1418 if (ctrl
& MAL_RX_CTRL_FIRST
) {
1419 BUG_ON(dev
->rx_sg_skb
);
1420 if (unlikely(emac_alloc_rx_skb(dev
, slot
, GFP_ATOMIC
))) {
1421 DBG("%d: rx OOM %d" NL
, dev
->def
->index
, slot
);
1422 ++dev
->estats
.rx_dropped_oom
;
1423 emac_recycle_rx_skb(dev
, slot
, 0);
1425 dev
->rx_sg_skb
= skb
;
1428 } else if (!emac_rx_sg_append(dev
, slot
) &&
1429 (ctrl
& MAL_RX_CTRL_LAST
)) {
1431 skb
= dev
->rx_sg_skb
;
1432 dev
->rx_sg_skb
= NULL
;
1434 ctrl
&= EMAC_BAD_RX_MASK
;
1435 if (unlikely(ctrl
&& ctrl
!= EMAC_RX_TAH_BAD_CSUM
)) {
1436 emac_parse_rx_error(dev
, ctrl
);
1437 ++dev
->estats
.rx_dropped_error
;
1445 DBG("%d: rx OOM %d" NL
, dev
->def
->index
, slot
);
1446 /* Drop the packet and recycle skb */
1447 ++dev
->estats
.rx_dropped_oom
;
1448 emac_recycle_rx_skb(dev
, slot
, 0);
1453 DBG2("%d: rx %d BDs" NL
, dev
->def
->index
, received
);
1454 dev
->rx_slot
= slot
;
1457 if (unlikely(budget
&& dev
->commac
.rx_stopped
)) {
1458 struct ocp_func_emac_data
*emacdata
= dev
->def
->additions
;
1461 if (!(dev
->rx_desc
[slot
].ctrl
& MAL_RX_CTRL_EMPTY
)) {
1462 DBG2("%d: rx restart" NL
, dev
->def
->index
);
1467 if (dev
->rx_sg_skb
) {
1468 DBG2("%d: dropping partial rx packet" NL
,
1470 ++dev
->estats
.rx_dropped_error
;
1471 dev_kfree_skb(dev
->rx_sg_skb
);
1472 dev
->rx_sg_skb
= NULL
;
1475 dev
->commac
.rx_stopped
= 0;
1476 mal_enable_rx_channel(dev
->mal
, emacdata
->mal_rx_chan
);
1477 emac_rx_enable(dev
);
1484 static int emac_peek_rx(void *param
)
1486 struct ocp_enet_private
*dev
= param
;
1487 return !(dev
->rx_desc
[dev
->rx_slot
].ctrl
& MAL_RX_CTRL_EMPTY
);
1491 static int emac_peek_rx_sg(void *param
)
1493 struct ocp_enet_private
*dev
= param
;
1494 int slot
= dev
->rx_slot
;
1496 u16 ctrl
= dev
->rx_desc
[slot
].ctrl
;
1497 if (ctrl
& MAL_RX_CTRL_EMPTY
)
1499 else if (ctrl
& MAL_RX_CTRL_LAST
)
1502 slot
= (slot
+ 1) % NUM_RX_BUFF
;
1504 /* I'm just being paranoid here :) */
1505 if (unlikely(slot
== dev
->rx_slot
))
1511 static void emac_rxde(void *param
)
1513 struct ocp_enet_private
*dev
= param
;
1514 ++dev
->estats
.rx_stopped
;
1515 emac_rx_disable_async(dev
);
1519 static irqreturn_t
emac_irq(int irq
, void *dev_instance
, struct pt_regs
*regs
)
1521 struct ocp_enet_private
*dev
= dev_instance
;
1522 struct emac_regs
*p
= dev
->emacp
;
1523 struct ibm_emac_error_stats
*st
= &dev
->estats
;
1525 u32 isr
= in_be32(&p
->isr
);
1526 out_be32(&p
->isr
, isr
);
1528 DBG("%d: isr = %08x" NL
, dev
->def
->index
, isr
);
1530 if (isr
& EMAC_ISR_TXPE
)
1532 if (isr
& EMAC_ISR_RXPE
)
1534 if (isr
& EMAC_ISR_TXUE
)
1536 if (isr
& EMAC_ISR_RXOE
)
1537 ++st
->rx_fifo_overrun
;
1538 if (isr
& EMAC_ISR_OVR
)
1540 if (isr
& EMAC_ISR_BP
)
1541 ++st
->rx_bad_packet
;
1542 if (isr
& EMAC_ISR_RP
)
1543 ++st
->rx_runt_packet
;
1544 if (isr
& EMAC_ISR_SE
)
1545 ++st
->rx_short_event
;
1546 if (isr
& EMAC_ISR_ALE
)
1547 ++st
->rx_alignment_error
;
1548 if (isr
& EMAC_ISR_BFCS
)
1550 if (isr
& EMAC_ISR_PTLE
)
1551 ++st
->rx_packet_too_long
;
1552 if (isr
& EMAC_ISR_ORE
)
1553 ++st
->rx_out_of_range
;
1554 if (isr
& EMAC_ISR_IRE
)
1556 if (isr
& EMAC_ISR_SQE
)
1558 if (isr
& EMAC_ISR_TE
)
1564 static struct net_device_stats
*emac_stats(struct net_device
*ndev
)
1566 struct ocp_enet_private
*dev
= ndev
->priv
;
1567 struct ibm_emac_stats
*st
= &dev
->stats
;
1568 struct ibm_emac_error_stats
*est
= &dev
->estats
;
1569 struct net_device_stats
*nst
= &dev
->nstats
;
1571 DBG2("%d: stats" NL
, dev
->def
->index
);
1573 /* Compute "legacy" statistics */
1574 local_irq_disable();
1575 nst
->rx_packets
= (unsigned long)st
->rx_packets
;
1576 nst
->rx_bytes
= (unsigned long)st
->rx_bytes
;
1577 nst
->tx_packets
= (unsigned long)st
->tx_packets
;
1578 nst
->tx_bytes
= (unsigned long)st
->tx_bytes
;
1579 nst
->rx_dropped
= (unsigned long)(est
->rx_dropped_oom
+
1580 est
->rx_dropped_error
+
1581 est
->rx_dropped_resize
+
1582 est
->rx_dropped_mtu
);
1583 nst
->tx_dropped
= (unsigned long)est
->tx_dropped
;
1585 nst
->rx_errors
= (unsigned long)est
->rx_bd_errors
;
1586 nst
->rx_fifo_errors
= (unsigned long)(est
->rx_bd_overrun
+
1587 est
->rx_fifo_overrun
+
1589 nst
->rx_frame_errors
= (unsigned long)(est
->rx_bd_alignment_error
+
1590 est
->rx_alignment_error
);
1591 nst
->rx_crc_errors
= (unsigned long)(est
->rx_bd_bad_fcs
+
1593 nst
->rx_length_errors
= (unsigned long)(est
->rx_bd_runt_packet
+
1594 est
->rx_bd_short_event
+
1595 est
->rx_bd_packet_too_long
+
1596 est
->rx_bd_out_of_range
+
1597 est
->rx_bd_in_range
+
1598 est
->rx_runt_packet
+
1599 est
->rx_short_event
+
1600 est
->rx_packet_too_long
+
1601 est
->rx_out_of_range
+
1604 nst
->tx_errors
= (unsigned long)(est
->tx_bd_errors
+ est
->tx_errors
);
1605 nst
->tx_fifo_errors
= (unsigned long)(est
->tx_bd_underrun
+
1607 nst
->tx_carrier_errors
= (unsigned long)est
->tx_bd_carrier_loss
;
1608 nst
->collisions
= (unsigned long)(est
->tx_bd_excessive_deferral
+
1609 est
->tx_bd_excessive_collisions
+
1610 est
->tx_bd_late_collision
+
1611 est
->tx_bd_multple_collisions
);
1616 static void emac_remove(struct ocp_device
*ocpdev
)
1618 struct ocp_enet_private
*dev
= ocp_get_drvdata(ocpdev
);
1620 DBG("%d: remove" NL
, dev
->def
->index
);
1622 ocp_set_drvdata(ocpdev
, 0);
1623 unregister_netdev(dev
->ndev
);
1625 tah_fini(dev
->tah_dev
);
1626 rgmii_fini(dev
->rgmii_dev
, dev
->rgmii_input
);
1627 zmii_fini(dev
->zmii_dev
, dev
->zmii_input
);
1629 emac_dbg_register(dev
->def
->index
, 0);
1631 mal_unregister_commac(dev
->mal
, &dev
->commac
);
1632 iounmap((void *)dev
->emacp
);
1636 static struct mal_commac_ops emac_commac_ops
= {
1637 .poll_tx
= &emac_poll_tx
,
1638 .poll_rx
= &emac_poll_rx
,
1639 .peek_rx
= &emac_peek_rx
,
1643 static struct mal_commac_ops emac_commac_sg_ops
= {
1644 .poll_tx
= &emac_poll_tx
,
1645 .poll_rx
= &emac_poll_rx
,
1646 .peek_rx
= &emac_peek_rx_sg
,
1650 /* Ethtool support */
1651 static int emac_ethtool_get_settings(struct net_device
*ndev
,
1652 struct ethtool_cmd
*cmd
)
1654 struct ocp_enet_private
*dev
= ndev
->priv
;
1656 cmd
->supported
= dev
->phy
.features
;
1657 cmd
->port
= PORT_MII
;
1658 cmd
->phy_address
= dev
->phy
.address
;
1660 dev
->phy
.address
>= 0 ? XCVR_EXTERNAL
: XCVR_INTERNAL
;
1663 cmd
->advertising
= dev
->phy
.advertising
;
1664 cmd
->autoneg
= dev
->phy
.autoneg
;
1665 cmd
->speed
= dev
->phy
.speed
;
1666 cmd
->duplex
= dev
->phy
.duplex
;
1672 static int emac_ethtool_set_settings(struct net_device
*ndev
,
1673 struct ethtool_cmd
*cmd
)
1675 struct ocp_enet_private
*dev
= ndev
->priv
;
1676 u32 f
= dev
->phy
.features
;
1678 DBG("%d: set_settings(%d, %d, %d, 0x%08x)" NL
, dev
->def
->index
,
1679 cmd
->autoneg
, cmd
->speed
, cmd
->duplex
, cmd
->advertising
);
1681 /* Basic sanity checks */
1682 if (dev
->phy
.address
< 0)
1684 if (cmd
->autoneg
!= AUTONEG_ENABLE
&& cmd
->autoneg
!= AUTONEG_DISABLE
)
1686 if (cmd
->autoneg
== AUTONEG_ENABLE
&& cmd
->advertising
== 0)
1688 if (cmd
->duplex
!= DUPLEX_HALF
&& cmd
->duplex
!= DUPLEX_FULL
)
1691 if (cmd
->autoneg
== AUTONEG_DISABLE
) {
1692 switch (cmd
->speed
) {
1694 if (cmd
->duplex
== DUPLEX_HALF
1695 && !(f
& SUPPORTED_10baseT_Half
))
1697 if (cmd
->duplex
== DUPLEX_FULL
1698 && !(f
& SUPPORTED_10baseT_Full
))
1702 if (cmd
->duplex
== DUPLEX_HALF
1703 && !(f
& SUPPORTED_100baseT_Half
))
1705 if (cmd
->duplex
== DUPLEX_FULL
1706 && !(f
& SUPPORTED_100baseT_Full
))
1710 if (cmd
->duplex
== DUPLEX_HALF
1711 && !(f
& SUPPORTED_1000baseT_Half
))
1713 if (cmd
->duplex
== DUPLEX_FULL
1714 && !(f
& SUPPORTED_1000baseT_Full
))
1722 dev
->phy
.def
->ops
->setup_forced(&dev
->phy
, cmd
->speed
,
1726 if (!(f
& SUPPORTED_Autoneg
))
1730 dev
->phy
.def
->ops
->setup_aneg(&dev
->phy
,
1731 (cmd
->advertising
& f
) |
1732 (dev
->phy
.advertising
&
1734 ADVERTISED_Asym_Pause
)));
1736 emac_force_link_update(dev
);
1742 static void emac_ethtool_get_ringparam(struct net_device
*ndev
,
1743 struct ethtool_ringparam
*rp
)
1745 rp
->rx_max_pending
= rp
->rx_pending
= NUM_RX_BUFF
;
1746 rp
->tx_max_pending
= rp
->tx_pending
= NUM_TX_BUFF
;
1749 static void emac_ethtool_get_pauseparam(struct net_device
*ndev
,
1750 struct ethtool_pauseparam
*pp
)
1752 struct ocp_enet_private
*dev
= ndev
->priv
;
1755 if ((dev
->phy
.features
& SUPPORTED_Autoneg
) &&
1756 (dev
->phy
.advertising
& (ADVERTISED_Pause
| ADVERTISED_Asym_Pause
)))
1759 if (dev
->phy
.duplex
== DUPLEX_FULL
) {
1761 pp
->rx_pause
= pp
->tx_pause
= 1;
1762 else if (dev
->phy
.asym_pause
)
1768 static u32
emac_ethtool_get_rx_csum(struct net_device
*ndev
)
1770 struct ocp_enet_private
*dev
= ndev
->priv
;
1771 return dev
->tah_dev
!= 0;
1774 static int emac_get_regs_len(struct ocp_enet_private
*dev
)
1776 return sizeof(struct emac_ethtool_regs_subhdr
) + EMAC_ETHTOOL_REGS_SIZE
;
1779 static int emac_ethtool_get_regs_len(struct net_device
*ndev
)
1781 struct ocp_enet_private
*dev
= ndev
->priv
;
1782 return sizeof(struct emac_ethtool_regs_hdr
) +
1783 emac_get_regs_len(dev
) + mal_get_regs_len(dev
->mal
) +
1784 zmii_get_regs_len(dev
->zmii_dev
) +
1785 rgmii_get_regs_len(dev
->rgmii_dev
) +
1786 tah_get_regs_len(dev
->tah_dev
);
1789 static void *emac_dump_regs(struct ocp_enet_private
*dev
, void *buf
)
1791 struct emac_ethtool_regs_subhdr
*hdr
= buf
;
1793 hdr
->version
= EMAC_ETHTOOL_REGS_VER
;
1794 hdr
->index
= dev
->def
->index
;
1795 memcpy_fromio(hdr
+ 1, dev
->emacp
, EMAC_ETHTOOL_REGS_SIZE
);
1796 return ((void *)(hdr
+ 1) + EMAC_ETHTOOL_REGS_SIZE
);
1799 static void emac_ethtool_get_regs(struct net_device
*ndev
,
1800 struct ethtool_regs
*regs
, void *buf
)
1802 struct ocp_enet_private
*dev
= ndev
->priv
;
1803 struct emac_ethtool_regs_hdr
*hdr
= buf
;
1805 hdr
->components
= 0;
1808 local_irq_disable();
1809 buf
= mal_dump_regs(dev
->mal
, buf
);
1810 buf
= emac_dump_regs(dev
, buf
);
1811 if (dev
->zmii_dev
) {
1812 hdr
->components
|= EMAC_ETHTOOL_REGS_ZMII
;
1813 buf
= zmii_dump_regs(dev
->zmii_dev
, buf
);
1815 if (dev
->rgmii_dev
) {
1816 hdr
->components
|= EMAC_ETHTOOL_REGS_RGMII
;
1817 buf
= rgmii_dump_regs(dev
->rgmii_dev
, buf
);
1820 hdr
->components
|= EMAC_ETHTOOL_REGS_TAH
;
1821 buf
= tah_dump_regs(dev
->tah_dev
, buf
);
1826 static int emac_ethtool_nway_reset(struct net_device
*ndev
)
1828 struct ocp_enet_private
*dev
= ndev
->priv
;
1831 DBG("%d: nway_reset" NL
, dev
->def
->index
);
1833 if (dev
->phy
.address
< 0)
1837 if (!dev
->phy
.autoneg
) {
1842 dev
->phy
.def
->ops
->setup_aneg(&dev
->phy
, dev
->phy
.advertising
);
1843 emac_force_link_update(dev
);
1850 static int emac_ethtool_get_stats_count(struct net_device
*ndev
)
1852 return EMAC_ETHTOOL_STATS_COUNT
;
1855 static void emac_ethtool_get_strings(struct net_device
*ndev
, u32 stringset
,
1858 if (stringset
== ETH_SS_STATS
)
1859 memcpy(buf
, &emac_stats_keys
, sizeof(emac_stats_keys
));
1862 static void emac_ethtool_get_ethtool_stats(struct net_device
*ndev
,
1863 struct ethtool_stats
*estats
,
1866 struct ocp_enet_private
*dev
= ndev
->priv
;
1867 local_irq_disable();
1868 memcpy(tmp_stats
, &dev
->stats
, sizeof(dev
->stats
));
1869 tmp_stats
+= sizeof(dev
->stats
) / sizeof(u64
);
1870 memcpy(tmp_stats
, &dev
->estats
, sizeof(dev
->estats
));
1874 static void emac_ethtool_get_drvinfo(struct net_device
*ndev
,
1875 struct ethtool_drvinfo
*info
)
1877 struct ocp_enet_private
*dev
= ndev
->priv
;
1879 strcpy(info
->driver
, "ibm_emac");
1880 strcpy(info
->version
, DRV_VERSION
);
1881 info
->fw_version
[0] = '\0';
1882 sprintf(info
->bus_info
, "PPC 4xx EMAC %d", dev
->def
->index
);
1883 info
->n_stats
= emac_ethtool_get_stats_count(ndev
);
1884 info
->regdump_len
= emac_ethtool_get_regs_len(ndev
);
1887 static struct ethtool_ops emac_ethtool_ops
= {
1888 .get_settings
= emac_ethtool_get_settings
,
1889 .set_settings
= emac_ethtool_set_settings
,
1890 .get_drvinfo
= emac_ethtool_get_drvinfo
,
1892 .get_regs_len
= emac_ethtool_get_regs_len
,
1893 .get_regs
= emac_ethtool_get_regs
,
1895 .nway_reset
= emac_ethtool_nway_reset
,
1897 .get_ringparam
= emac_ethtool_get_ringparam
,
1898 .get_pauseparam
= emac_ethtool_get_pauseparam
,
1900 .get_rx_csum
= emac_ethtool_get_rx_csum
,
1902 .get_strings
= emac_ethtool_get_strings
,
1903 .get_stats_count
= emac_ethtool_get_stats_count
,
1904 .get_ethtool_stats
= emac_ethtool_get_ethtool_stats
,
1906 .get_link
= ethtool_op_get_link
,
1907 .get_tx_csum
= ethtool_op_get_tx_csum
,
1908 .get_sg
= ethtool_op_get_sg
,
1911 static int emac_ioctl(struct net_device
*ndev
, struct ifreq
*rq
, int cmd
)
1913 struct ocp_enet_private
*dev
= ndev
->priv
;
1914 uint16_t *data
= (uint16_t *) & rq
->ifr_ifru
;
1916 DBG("%d: ioctl %08x" NL
, dev
->def
->index
, cmd
);
1918 if (dev
->phy
.address
< 0)
1923 case SIOCDEVPRIVATE
:
1924 data
[0] = dev
->phy
.address
;
1927 case SIOCDEVPRIVATE
+ 1:
1928 data
[3] = emac_mdio_read(ndev
, dev
->phy
.address
, data
[1]);
1932 case SIOCDEVPRIVATE
+ 2:
1933 if (!capable(CAP_NET_ADMIN
))
1935 emac_mdio_write(ndev
, dev
->phy
.address
, data
[1], data
[2]);
1942 static int __init
emac_probe(struct ocp_device
*ocpdev
)
1944 struct ocp_func_emac_data
*emacdata
= ocpdev
->def
->additions
;
1945 struct net_device
*ndev
;
1946 struct ocp_device
*maldev
;
1947 struct ocp_enet_private
*dev
;
1950 DBG("%d: probe" NL
, ocpdev
->def
->index
);
1953 printk(KERN_ERR
"emac%d: Missing additional data!\n",
1954 ocpdev
->def
->index
);
1958 /* Allocate our net_device structure */
1959 ndev
= alloc_etherdev(sizeof(struct ocp_enet_private
));
1961 printk(KERN_ERR
"emac%d: could not allocate ethernet device!\n",
1962 ocpdev
->def
->index
);
1967 dev
->ldev
= &ocpdev
->dev
;
1968 dev
->def
= ocpdev
->def
;
1969 SET_MODULE_OWNER(ndev
);
1971 /* Find MAL device we are connected to */
1973 ocp_find_device(OCP_VENDOR_IBM
, OCP_FUNC_MAL
, emacdata
->mal_idx
);
1975 printk(KERN_ERR
"emac%d: unknown mal%d device!\n",
1976 dev
->def
->index
, emacdata
->mal_idx
);
1980 dev
->mal
= ocp_get_drvdata(maldev
);
1982 printk(KERN_ERR
"emac%d: mal%d hasn't been initialized yet!\n",
1983 dev
->def
->index
, emacdata
->mal_idx
);
1988 /* Register with MAL */
1989 dev
->commac
.ops
= &emac_commac_ops
;
1990 dev
->commac
.dev
= dev
;
1991 dev
->commac
.tx_chan_mask
= MAL_CHAN_MASK(emacdata
->mal_tx_chan
);
1992 dev
->commac
.rx_chan_mask
= MAL_CHAN_MASK(emacdata
->mal_rx_chan
);
1993 err
= mal_register_commac(dev
->mal
, &dev
->commac
);
1995 printk(KERN_ERR
"emac%d: failed to register with mal%d!\n",
1996 dev
->def
->index
, emacdata
->mal_idx
);
1999 dev
->rx_skb_size
= emac_rx_skb_size(ndev
->mtu
);
2000 dev
->rx_sync_size
= emac_rx_sync_size(ndev
->mtu
);
2002 /* Get pointers to BD rings */
2004 dev
->mal
->bd_virt
+ mal_tx_bd_offset(dev
->mal
,
2005 emacdata
->mal_tx_chan
);
2007 dev
->mal
->bd_virt
+ mal_rx_bd_offset(dev
->mal
,
2008 emacdata
->mal_rx_chan
);
2010 DBG("%d: tx_desc %p" NL
, ocpdev
->def
->index
, dev
->tx_desc
);
2011 DBG("%d: rx_desc %p" NL
, ocpdev
->def
->index
, dev
->rx_desc
);
2014 memset(dev
->tx_desc
, 0, NUM_TX_BUFF
* sizeof(struct mal_descriptor
));
2015 memset(dev
->rx_desc
, 0, NUM_RX_BUFF
* sizeof(struct mal_descriptor
));
2017 /* If we depend on another EMAC for MDIO, check whether it was probed already */
2018 if (emacdata
->mdio_idx
>= 0 && emacdata
->mdio_idx
!= ocpdev
->def
->index
) {
2019 struct ocp_device
*mdiodev
=
2020 ocp_find_device(OCP_VENDOR_IBM
, OCP_FUNC_EMAC
,
2021 emacdata
->mdio_idx
);
2023 printk(KERN_ERR
"emac%d: unknown emac%d device!\n",
2024 dev
->def
->index
, emacdata
->mdio_idx
);
2028 dev
->mdio_dev
= ocp_get_drvdata(mdiodev
);
2029 if (!dev
->mdio_dev
) {
2031 "emac%d: emac%d hasn't been initialized yet!\n",
2032 dev
->def
->index
, emacdata
->mdio_idx
);
2038 /* Attach to ZMII, if needed */
2039 if ((err
= zmii_attach(dev
)) != 0)
2042 /* Attach to RGMII, if needed */
2043 if ((err
= rgmii_attach(dev
)) != 0)
2046 /* Attach to TAH, if needed */
2047 if ((err
= tah_attach(dev
)) != 0)
2052 (struct emac_regs
*)ioremap(dev
->def
->paddr
,
2053 sizeof(struct emac_regs
));
2055 printk(KERN_ERR
"emac%d: could not ioremap device registers!\n",
2061 /* Fill in MAC address */
2062 for (i
= 0; i
< 6; ++i
)
2063 ndev
->dev_addr
[i
] = emacdata
->mac_addr
[i
];
2065 /* Set some link defaults before we can find out real parameters */
2066 dev
->phy
.speed
= SPEED_100
;
2067 dev
->phy
.duplex
= DUPLEX_FULL
;
2068 dev
->phy
.autoneg
= AUTONEG_DISABLE
;
2069 dev
->phy
.pause
= dev
->phy
.asym_pause
= 0;
2070 dev
->stop_timeout
= STOP_TIMEOUT_100
;
2071 init_timer(&dev
->link_timer
);
2072 dev
->link_timer
.function
= emac_link_timer
;
2073 dev
->link_timer
.data
= (unsigned long)dev
;
2075 /* Find PHY if any */
2076 dev
->phy
.dev
= ndev
;
2077 dev
->phy
.mode
= emacdata
->phy_mode
;
2078 if (emacdata
->phy_map
!= 0xffffffff) {
2079 u32 phy_map
= emacdata
->phy_map
| busy_phy_map
;
2082 DBG("%d: PHY maps %08x %08x" NL
, dev
->def
->index
,
2083 emacdata
->phy_map
, busy_phy_map
);
2085 EMAC_RX_CLK_TX(dev
->def
->index
);
2087 dev
->phy
.mdio_read
= emac_mdio_read
;
2088 dev
->phy
.mdio_write
= emac_mdio_write
;
2090 /* Configure EMAC with defaults so we can at least use MDIO
2091 * This is needed mostly for 440GX
2093 if (emac_phy_gpcs(dev
->phy
.mode
)) {
2095 * Make GPCS PHY address equal to EMAC index.
2096 * We probably should take into account busy_phy_map
2097 * and/or phy_map here.
2099 dev
->phy
.address
= dev
->def
->index
;
2102 emac_configure(dev
);
2104 for (i
= 0; i
< 0x20; phy_map
>>= 1, ++i
)
2105 if (!(phy_map
& 1)) {
2107 busy_phy_map
|= 1 << i
;
2109 /* Quick check if there is a PHY at the address */
2110 r
= emac_mdio_read(dev
->ndev
, i
, MII_BMCR
);
2111 if (r
== 0xffff || r
< 0)
2113 if (!mii_phy_probe(&dev
->phy
, i
))
2117 printk(KERN_WARNING
"emac%d: can't find PHY!\n",
2123 if (dev
->phy
.def
->ops
->init
)
2124 dev
->phy
.def
->ops
->init(&dev
->phy
);
2126 /* Disable any PHY features not supported by the platform */
2127 dev
->phy
.def
->features
&= ~emacdata
->phy_feat_exc
;
2129 /* Setup initial link parameters */
2130 if (dev
->phy
.features
& SUPPORTED_Autoneg
) {
2131 adv
= dev
->phy
.features
;
2132 #if !defined(CONFIG_40x)
2133 adv
|= ADVERTISED_Pause
| ADVERTISED_Asym_Pause
;
2135 /* Restart autonegotiation */
2136 dev
->phy
.def
->ops
->setup_aneg(&dev
->phy
, adv
);
2138 u32 f
= dev
->phy
.def
->features
;
2139 int speed
= SPEED_10
, fd
= DUPLEX_HALF
;
2141 /* Select highest supported speed/duplex */
2142 if (f
& SUPPORTED_1000baseT_Full
) {
2145 } else if (f
& SUPPORTED_1000baseT_Half
)
2147 else if (f
& SUPPORTED_100baseT_Full
) {
2150 } else if (f
& SUPPORTED_100baseT_Half
)
2152 else if (f
& SUPPORTED_10baseT_Full
)
2155 /* Force link parameters */
2156 dev
->phy
.def
->ops
->setup_forced(&dev
->phy
, speed
, fd
);
2161 /* PHY-less configuration.
2162 * XXX I probably should move these settings to emacdata
2164 dev
->phy
.address
= -1;
2165 dev
->phy
.features
= SUPPORTED_100baseT_Full
| SUPPORTED_MII
;
2169 /* Fill in the driver function table */
2170 ndev
->open
= &emac_open
;
2172 ndev
->hard_start_xmit
= &emac_start_xmit_sg
;
2173 ndev
->features
|= NETIF_F_IP_CSUM
| NETIF_F_SG
;
2175 ndev
->hard_start_xmit
= &emac_start_xmit
;
2176 ndev
->tx_timeout
= &emac_full_tx_reset
;
2177 ndev
->watchdog_timeo
= 5 * HZ
;
2178 ndev
->stop
= &emac_close
;
2179 ndev
->get_stats
= &emac_stats
;
2180 ndev
->set_multicast_list
= &emac_set_multicast_list
;
2181 ndev
->do_ioctl
= &emac_ioctl
;
2182 if (emac_phy_supports_gige(emacdata
->phy_mode
)) {
2183 ndev
->change_mtu
= &emac_change_mtu
;
2184 dev
->commac
.ops
= &emac_commac_sg_ops
;
2186 SET_ETHTOOL_OPS(ndev
, &emac_ethtool_ops
);
2188 netif_carrier_off(ndev
);
2189 netif_stop_queue(ndev
);
2191 err
= register_netdev(ndev
);
2193 printk(KERN_ERR
"emac%d: failed to register net device (%d)!\n",
2194 dev
->def
->index
, err
);
2198 ocp_set_drvdata(ocpdev
, dev
);
2200 printk("%s: emac%d, MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
2201 ndev
->name
, dev
->def
->index
,
2202 ndev
->dev_addr
[0], ndev
->dev_addr
[1], ndev
->dev_addr
[2],
2203 ndev
->dev_addr
[3], ndev
->dev_addr
[4], ndev
->dev_addr
[5]);
2205 if (dev
->phy
.address
>= 0)
2206 printk("%s: found %s PHY (0x%02x)\n", ndev
->name
,
2207 dev
->phy
.def
->name
, dev
->phy
.address
);
2209 emac_dbg_register(dev
->def
->index
, dev
);
2213 iounmap((void *)dev
->emacp
);
2215 tah_fini(dev
->tah_dev
);
2217 rgmii_fini(dev
->rgmii_dev
, dev
->rgmii_input
);
2219 zmii_fini(dev
->zmii_dev
, dev
->zmii_input
);
2221 mal_unregister_commac(dev
->mal
, &dev
->commac
);
2227 static struct ocp_device_id emac_ids
[] = {
2228 { .vendor
= OCP_VENDOR_IBM
, .function
= OCP_FUNC_EMAC
},
2229 { .vendor
= OCP_VENDOR_INVALID
}
2232 static struct ocp_driver emac_driver
= {
2234 .id_table
= emac_ids
,
2235 .probe
= emac_probe
,
2236 .remove
= emac_remove
,
2239 static int __init
emac_init(void)
2241 printk(KERN_INFO DRV_DESC
", version " DRV_VERSION
"\n");
2249 if (ocp_register_driver(&emac_driver
)) {
2251 ocp_unregister_driver(&emac_driver
);
2261 static void __exit
emac_exit(void)
2264 ocp_unregister_driver(&emac_driver
);
2269 module_init(emac_init
);
2270 module_exit(emac_exit
);