2 * SuperH Ethernet device driver
4 * Copyright (C) 2006,2007 Nobuhiro Iwamatsu
5 * Copyright (C) 2008 Renesas Solutions Corp.
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
23 #include <linux/version.h>
24 #include <linux/init.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/etherdevice.h>
27 #include <linux/delay.h>
28 #include <linux/platform_device.h>
29 #include <linux/mdio-bitbang.h>
30 #include <linux/netdevice.h>
31 #include <linux/phy.h>
32 #include <linux/cache.h>
38 * Program the hardware MAC address from dev->dev_addr.
40 static void update_mac_address(struct net_device
*ndev
)
42 u32 ioaddr
= ndev
->base_addr
;
44 ctrl_outl((ndev
->dev_addr
[0] << 24) | (ndev
->dev_addr
[1] << 16) |
45 (ndev
->dev_addr
[2] << 8) | (ndev
->dev_addr
[3]),
47 ctrl_outl((ndev
->dev_addr
[4] << 8) | (ndev
->dev_addr
[5]),
52 * Get MAC address from SuperH MAC address register
54 * SuperH's Ethernet device doesn't have 'ROM' to MAC address.
55 * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g).
56 * When you want use this device, you must set MAC address in bootloader.
59 static void read_mac_address(struct net_device
*ndev
)
61 u32 ioaddr
= ndev
->base_addr
;
63 ndev
->dev_addr
[0] = (ctrl_inl(ioaddr
+ MAHR
) >> 24);
64 ndev
->dev_addr
[1] = (ctrl_inl(ioaddr
+ MAHR
) >> 16) & 0xFF;
65 ndev
->dev_addr
[2] = (ctrl_inl(ioaddr
+ MAHR
) >> 8) & 0xFF;
66 ndev
->dev_addr
[3] = (ctrl_inl(ioaddr
+ MAHR
) & 0xFF);
67 ndev
->dev_addr
[4] = (ctrl_inl(ioaddr
+ MALR
) >> 8) & 0xFF;
68 ndev
->dev_addr
[5] = (ctrl_inl(ioaddr
+ MALR
) & 0xFF);
72 struct mdiobb_ctrl ctrl
;
81 static void bb_set(u32 addr
, u32 msk
)
83 ctrl_outl(ctrl_inl(addr
) | msk
, addr
);
87 static void bb_clr(u32 addr
, u32 msk
)
89 ctrl_outl((ctrl_inl(addr
) & ~msk
), addr
);
93 static int bb_read(u32 addr
, u32 msk
)
95 return (ctrl_inl(addr
) & msk
) != 0;
98 /* Data I/O pin control */
99 static void sh_mmd_ctrl(struct mdiobb_ctrl
*ctrl
, int bit
)
101 struct bb_info
*bitbang
= container_of(ctrl
, struct bb_info
, ctrl
);
103 bb_set(bitbang
->addr
, bitbang
->mmd_msk
);
105 bb_clr(bitbang
->addr
, bitbang
->mmd_msk
);
109 static void sh_set_mdio(struct mdiobb_ctrl
*ctrl
, int bit
)
111 struct bb_info
*bitbang
= container_of(ctrl
, struct bb_info
, ctrl
);
114 bb_set(bitbang
->addr
, bitbang
->mdo_msk
);
116 bb_clr(bitbang
->addr
, bitbang
->mdo_msk
);
120 static int sh_get_mdio(struct mdiobb_ctrl
*ctrl
)
122 struct bb_info
*bitbang
= container_of(ctrl
, struct bb_info
, ctrl
);
123 return bb_read(bitbang
->addr
, bitbang
->mdi_msk
);
126 /* MDC pin control */
127 static void sh_mdc_ctrl(struct mdiobb_ctrl
*ctrl
, int bit
)
129 struct bb_info
*bitbang
= container_of(ctrl
, struct bb_info
, ctrl
);
132 bb_set(bitbang
->addr
, bitbang
->mdc_msk
);
134 bb_clr(bitbang
->addr
, bitbang
->mdc_msk
);
137 /* mdio bus control struct */
138 static struct mdiobb_ops bb_ops
= {
139 .owner
= THIS_MODULE
,
140 .set_mdc
= sh_mdc_ctrl
,
141 .set_mdio_dir
= sh_mmd_ctrl
,
142 .set_mdio_data
= sh_set_mdio
,
143 .get_mdio_data
= sh_get_mdio
,
146 static void sh_eth_reset(struct net_device
*ndev
)
148 u32 ioaddr
= ndev
->base_addr
;
150 ctrl_outl(ctrl_inl(ioaddr
+ EDMR
) | EDMR_SRST
, ioaddr
+ EDMR
);
152 ctrl_outl(ctrl_inl(ioaddr
+ EDMR
) & ~EDMR_SRST
, ioaddr
+ EDMR
);
155 /* free skb and descriptor buffer */
156 static void sh_eth_ring_free(struct net_device
*ndev
)
158 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
161 /* Free Rx skb ringbuffer */
162 if (mdp
->rx_skbuff
) {
163 for (i
= 0; i
< RX_RING_SIZE
; i
++) {
164 if (mdp
->rx_skbuff
[i
])
165 dev_kfree_skb(mdp
->rx_skbuff
[i
]);
168 kfree(mdp
->rx_skbuff
);
170 /* Free Tx skb ringbuffer */
171 if (mdp
->tx_skbuff
) {
172 for (i
= 0; i
< TX_RING_SIZE
; i
++) {
173 if (mdp
->tx_skbuff
[i
])
174 dev_kfree_skb(mdp
->tx_skbuff
[i
]);
177 kfree(mdp
->tx_skbuff
);
180 /* format skb and descriptor buffer */
181 static void sh_eth_ring_format(struct net_device
*ndev
)
183 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
186 struct sh_eth_rxdesc
*rxdesc
= NULL
;
187 struct sh_eth_txdesc
*txdesc
= NULL
;
188 int rx_ringsize
= sizeof(*rxdesc
) * RX_RING_SIZE
;
189 int tx_ringsize
= sizeof(*txdesc
) * TX_RING_SIZE
;
191 mdp
->cur_rx
= mdp
->cur_tx
= 0;
192 mdp
->dirty_rx
= mdp
->dirty_tx
= 0;
194 memset(mdp
->rx_ring
, 0, rx_ringsize
);
196 /* build Rx ring buffer */
197 for (i
= 0; i
< RX_RING_SIZE
; i
++) {
199 mdp
->rx_skbuff
[i
] = NULL
;
200 skb
= dev_alloc_skb(mdp
->rx_buf_sz
);
201 mdp
->rx_skbuff
[i
] = skb
;
204 skb
->dev
= ndev
; /* Mark as being used by this device. */
205 skb_reserve(skb
, RX_OFFSET
);
208 rxdesc
= &mdp
->rx_ring
[i
];
209 rxdesc
->addr
= (u32
)skb
->data
& ~0x3UL
;
210 rxdesc
->status
= cpu_to_le32(RD_RACT
| RD_RFP
);
212 /* The size of the buffer is 16 byte boundary. */
213 rxdesc
->buffer_length
= (mdp
->rx_buf_sz
+ 16) & ~0x0F;
216 mdp
->dirty_rx
= (u32
) (i
- RX_RING_SIZE
);
218 /* Mark the last entry as wrapping the ring. */
219 rxdesc
->status
|= cpu_to_le32(RC_RDEL
);
221 memset(mdp
->tx_ring
, 0, tx_ringsize
);
223 /* build Tx ring buffer */
224 for (i
= 0; i
< TX_RING_SIZE
; i
++) {
225 mdp
->tx_skbuff
[i
] = NULL
;
226 txdesc
= &mdp
->tx_ring
[i
];
227 txdesc
->status
= cpu_to_le32(TD_TFP
);
228 txdesc
->buffer_length
= 0;
231 txdesc
->status
|= cpu_to_le32(TD_TDLE
);
234 /* Get skb and descriptor buffer */
235 static int sh_eth_ring_init(struct net_device
*ndev
)
237 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
238 int rx_ringsize
, tx_ringsize
, ret
= 0;
241 * +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
242 * card needs room to do 8 byte alignment, +2 so we can reserve
243 * the first 2 bytes, and +16 gets room for the status word from the
246 mdp
->rx_buf_sz
= (ndev
->mtu
<= 1492 ? PKT_BUF_SZ
:
247 (((ndev
->mtu
+ 26 + 7) & ~7) + 2 + 16));
249 /* Allocate RX and TX skb rings */
250 mdp
->rx_skbuff
= kmalloc(sizeof(*mdp
->rx_skbuff
) * RX_RING_SIZE
,
252 if (!mdp
->rx_skbuff
) {
253 printk(KERN_ERR
"%s: Cannot allocate Rx skb\n", ndev
->name
);
258 mdp
->tx_skbuff
= kmalloc(sizeof(*mdp
->tx_skbuff
) * TX_RING_SIZE
,
260 if (!mdp
->tx_skbuff
) {
261 printk(KERN_ERR
"%s: Cannot allocate Tx skb\n", ndev
->name
);
266 /* Allocate all Rx descriptors. */
267 rx_ringsize
= sizeof(struct sh_eth_rxdesc
) * RX_RING_SIZE
;
268 mdp
->rx_ring
= dma_alloc_coherent(NULL
, rx_ringsize
, &mdp
->rx_desc_dma
,
272 printk(KERN_ERR
"%s: Cannot allocate Rx Ring (size %d bytes)\n",
273 ndev
->name
, rx_ringsize
);
280 /* Allocate all Tx descriptors. */
281 tx_ringsize
= sizeof(struct sh_eth_txdesc
) * TX_RING_SIZE
;
282 mdp
->tx_ring
= dma_alloc_coherent(NULL
, tx_ringsize
, &mdp
->tx_desc_dma
,
285 printk(KERN_ERR
"%s: Cannot allocate Tx Ring (size %d bytes)\n",
286 ndev
->name
, tx_ringsize
);
293 /* free DMA buffer */
294 dma_free_coherent(NULL
, rx_ringsize
, mdp
->rx_ring
, mdp
->rx_desc_dma
);
297 /* Free Rx and Tx skb ring buffer */
298 sh_eth_ring_free(ndev
);
303 static int sh_eth_dev_init(struct net_device
*ndev
)
306 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
307 u32 ioaddr
= ndev
->base_addr
;
308 u_int32_t rx_int_var
, tx_int_var
;
314 ctrl_outl(RPADIR_PADS1
, ioaddr
+ RPADIR
); /* SH7712-DMA-RX-PAD2 */
316 /* all sh_eth int mask */
317 ctrl_outl(0, ioaddr
+ EESIPR
);
320 ctrl_outl(0, ioaddr
+ EDMR
); /* Endian change */
322 ctrl_outl((FIFO_SIZE_T
| FIFO_SIZE_R
), ioaddr
+ FDR
);
323 ctrl_outl(0, ioaddr
+ TFTR
);
325 ctrl_outl(0, ioaddr
+ RMCR
);
327 rx_int_var
= mdp
->rx_int_var
= DESC_I_RINT8
| DESC_I_RINT5
;
328 tx_int_var
= mdp
->tx_int_var
= DESC_I_TINT2
;
329 ctrl_outl(rx_int_var
| tx_int_var
, ioaddr
+ TRSCER
);
331 ctrl_outl((FIFO_F_D_RFF
| FIFO_F_D_RFD
), ioaddr
+ FCFTR
);
332 ctrl_outl(0, ioaddr
+ TRIMD
);
334 /* Descriptor format */
335 sh_eth_ring_format(ndev
);
337 ctrl_outl((u32
)mdp
->rx_ring
, ioaddr
+ RDLAR
);
338 ctrl_outl((u32
)mdp
->tx_ring
, ioaddr
+ TDLAR
);
340 ctrl_outl(ctrl_inl(ioaddr
+ EESR
), ioaddr
+ EESR
);
341 ctrl_outl((DMAC_M_RFRMER
| DMAC_M_ECI
| 0x003fffff), ioaddr
+ EESIPR
);
343 /* PAUSE Prohibition */
344 val
= (ctrl_inl(ioaddr
+ ECMR
) & ECMR_DM
) |
345 ECMR_ZPF
| (mdp
->duplex
? ECMR_DM
: 0) | ECMR_TE
| ECMR_RE
;
347 ctrl_outl(val
, ioaddr
+ ECMR
);
348 ctrl_outl(ECSR_BRCRX
| ECSR_PSRTO
| ECSR_LCHNG
| ECSR_ICD
|
349 ECSIPR_MPDIP
, ioaddr
+ ECSR
);
350 ctrl_outl(ECSIPR_BRCRXIP
| ECSIPR_PSRTOIP
| ECSIPR_LCHNGIP
|
351 ECSIPR_ICDIP
| ECSIPR_MPDIP
, ioaddr
+ ECSIPR
);
353 /* Set MAC address */
354 update_mac_address(ndev
);
357 #if defined(CONFIG_CPU_SUBTYPE_SH7710)
358 ctrl_outl(APR_AP
, ioaddr
+ APR
);
359 ctrl_outl(MPR_MP
, ioaddr
+ MPR
);
360 ctrl_outl(TPAUSER_UNLIMITED
, ioaddr
+ TPAUSER
);
361 ctrl_outl(BCFR_UNLIMITED
, ioaddr
+ BCFR
);
363 /* Setting the Rx mode will start the Rx process. */
364 ctrl_outl(EDRRR_R
, ioaddr
+ EDRRR
);
366 netif_start_queue(ndev
);
371 /* free Tx skb function */
372 static int sh_eth_txfree(struct net_device
*ndev
)
374 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
375 struct sh_eth_txdesc
*txdesc
;
379 for (; mdp
->cur_tx
- mdp
->dirty_tx
> 0; mdp
->dirty_tx
++) {
380 entry
= mdp
->dirty_tx
% TX_RING_SIZE
;
381 txdesc
= &mdp
->tx_ring
[entry
];
382 if (txdesc
->status
& cpu_to_le32(TD_TACT
))
384 /* Free the original skb. */
385 if (mdp
->tx_skbuff
[entry
]) {
386 dev_kfree_skb_irq(mdp
->tx_skbuff
[entry
]);
387 mdp
->tx_skbuff
[entry
] = NULL
;
390 txdesc
->status
= cpu_to_le32(TD_TFP
);
391 if (entry
>= TX_RING_SIZE
- 1)
392 txdesc
->status
|= cpu_to_le32(TD_TDLE
);
394 mdp
->stats
.tx_packets
++;
395 mdp
->stats
.tx_bytes
+= txdesc
->buffer_length
;
400 /* Packet receive function */
401 static int sh_eth_rx(struct net_device
*ndev
)
403 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
404 struct sh_eth_rxdesc
*rxdesc
;
406 int entry
= mdp
->cur_rx
% RX_RING_SIZE
;
407 int boguscnt
= (mdp
->dirty_rx
+ RX_RING_SIZE
) - mdp
->cur_rx
;
412 rxdesc
= &mdp
->rx_ring
[entry
];
413 while (!(rxdesc
->status
& cpu_to_le32(RD_RACT
))) {
414 desc_status
= le32_to_cpu(rxdesc
->status
);
415 pkt_len
= rxdesc
->frame_length
;
420 if (!(desc_status
& RDFEND
))
421 mdp
->stats
.rx_length_errors
++;
423 if (desc_status
& (RD_RFS1
| RD_RFS2
| RD_RFS3
| RD_RFS4
|
424 RD_RFS5
| RD_RFS6
| RD_RFS10
)) {
425 mdp
->stats
.rx_errors
++;
426 if (desc_status
& RD_RFS1
)
427 mdp
->stats
.rx_crc_errors
++;
428 if (desc_status
& RD_RFS2
)
429 mdp
->stats
.rx_frame_errors
++;
430 if (desc_status
& RD_RFS3
)
431 mdp
->stats
.rx_length_errors
++;
432 if (desc_status
& RD_RFS4
)
433 mdp
->stats
.rx_length_errors
++;
434 if (desc_status
& RD_RFS6
)
435 mdp
->stats
.rx_missed_errors
++;
436 if (desc_status
& RD_RFS10
)
437 mdp
->stats
.rx_over_errors
++;
439 swaps((char *)(rxdesc
->addr
& ~0x3), pkt_len
+ 2);
440 skb
= mdp
->rx_skbuff
[entry
];
441 mdp
->rx_skbuff
[entry
] = NULL
;
442 skb_put(skb
, pkt_len
);
443 skb
->protocol
= eth_type_trans(skb
, ndev
);
445 ndev
->last_rx
= jiffies
;
446 mdp
->stats
.rx_packets
++;
447 mdp
->stats
.rx_bytes
+= pkt_len
;
449 rxdesc
->status
|= cpu_to_le32(RD_RACT
);
450 entry
= (++mdp
->cur_rx
) % RX_RING_SIZE
;
453 /* Refill the Rx ring buffers. */
454 for (; mdp
->cur_rx
- mdp
->dirty_rx
> 0; mdp
->dirty_rx
++) {
455 entry
= mdp
->dirty_rx
% RX_RING_SIZE
;
456 rxdesc
= &mdp
->rx_ring
[entry
];
457 if (mdp
->rx_skbuff
[entry
] == NULL
) {
458 skb
= dev_alloc_skb(mdp
->rx_buf_sz
);
459 mdp
->rx_skbuff
[entry
] = skb
;
461 break; /* Better luck next round. */
463 skb_reserve(skb
, RX_OFFSET
);
464 rxdesc
->addr
= (u32
)skb
->data
& ~0x3UL
;
466 /* The size of the buffer is 16 byte boundary. */
467 rxdesc
->buffer_length
= (mdp
->rx_buf_sz
+ 16) & ~0x0F;
468 if (entry
>= RX_RING_SIZE
- 1)
470 cpu_to_le32(RD_RACT
| RD_RFP
| RC_RDEL
);
473 cpu_to_le32(RD_RACT
| RD_RFP
);
476 /* Restart Rx engine if stopped. */
477 /* If we don't need to check status, don't. -KDU */
478 ctrl_outl(EDRRR_R
, ndev
->base_addr
+ EDRRR
);
483 /* error control function */
484 static void sh_eth_error(struct net_device
*ndev
, int intr_status
)
486 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
487 u32 ioaddr
= ndev
->base_addr
;
490 if (intr_status
& EESR_ECI
) {
491 felic_stat
= ctrl_inl(ioaddr
+ ECSR
);
492 ctrl_outl(felic_stat
, ioaddr
+ ECSR
); /* clear int */
493 if (felic_stat
& ECSR_ICD
)
494 mdp
->stats
.tx_carrier_errors
++;
495 if (felic_stat
& ECSR_LCHNG
) {
497 u32 link_stat
= (ctrl_inl(ioaddr
+ PSR
));
498 if (!(link_stat
& PHY_ST_LINK
)) {
499 /* Link Down : disable tx and rx */
500 ctrl_outl(ctrl_inl(ioaddr
+ ECMR
) &
501 ~(ECMR_RE
| ECMR_TE
), ioaddr
+ ECMR
);
504 ctrl_outl(ctrl_inl(ioaddr
+ EESIPR
) &
505 ~DMAC_M_ECI
, ioaddr
+ EESIPR
);
507 ctrl_outl(ctrl_inl(ioaddr
+ ECSR
),
509 ctrl_outl(ctrl_inl(ioaddr
+ EESIPR
) |
510 DMAC_M_ECI
, ioaddr
+ EESIPR
);
511 /* enable tx and rx */
512 ctrl_outl(ctrl_inl(ioaddr
+ ECMR
) |
513 (ECMR_RE
| ECMR_TE
), ioaddr
+ ECMR
);
518 if (intr_status
& EESR_TWB
) {
519 /* Write buck end. unused write back interrupt */
520 if (intr_status
& EESR_TABT
) /* Transmit Abort int */
521 mdp
->stats
.tx_aborted_errors
++;
524 if (intr_status
& EESR_RABT
) {
525 /* Receive Abort int */
526 if (intr_status
& EESR_RFRMER
) {
527 /* Receive Frame Overflow int */
528 mdp
->stats
.rx_frame_errors
++;
529 printk(KERN_ERR
"Receive Frame Overflow\n");
533 if (intr_status
& EESR_ADE
) {
534 if (intr_status
& EESR_TDE
) {
535 if (intr_status
& EESR_TFE
)
536 mdp
->stats
.tx_fifo_errors
++;
540 if (intr_status
& EESR_RDE
) {
541 /* Receive Descriptor Empty int */
542 mdp
->stats
.rx_over_errors
++;
544 if (ctrl_inl(ioaddr
+ EDRRR
) ^ EDRRR_R
)
545 ctrl_outl(EDRRR_R
, ioaddr
+ EDRRR
);
546 printk(KERN_ERR
"Receive Descriptor Empty\n");
548 if (intr_status
& EESR_RFE
) {
549 /* Receive FIFO Overflow int */
550 mdp
->stats
.rx_fifo_errors
++;
551 printk(KERN_ERR
"Receive FIFO Overflow\n");
554 (EESR_TWB
| EESR_TABT
| EESR_ADE
| EESR_TDE
| EESR_TFE
)) {
556 u32 edtrr
= ctrl_inl(ndev
->base_addr
+ EDTRR
);
558 printk(KERN_ERR
"%s:TX error. status=%8.8x cur_tx=%8.8x ",
559 ndev
->name
, intr_status
, mdp
->cur_tx
);
560 printk(KERN_ERR
"dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
561 mdp
->dirty_tx
, (u32
) ndev
->state
, edtrr
);
562 /* dirty buffer free */
566 if (edtrr
^ EDTRR_TRNS
) {
568 ctrl_outl(EDTRR_TRNS
, ndev
->base_addr
+ EDTRR
);
571 netif_wake_queue(ndev
);
575 static irqreturn_t
sh_eth_interrupt(int irq
, void *netdev
)
577 struct net_device
*ndev
= netdev
;
578 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
579 u32 ioaddr
, boguscnt
= RX_RING_SIZE
;
582 ioaddr
= ndev
->base_addr
;
583 spin_lock(&mdp
->lock
);
585 intr_status
= ctrl_inl(ioaddr
+ EESR
);
586 /* Clear interrupt */
587 ctrl_outl(intr_status
, ioaddr
+ EESR
);
589 if (intr_status
& (EESR_FRC
| EESR_RINT8
|
590 EESR_RINT5
| EESR_RINT4
| EESR_RINT3
| EESR_RINT2
|
593 if (intr_status
& (EESR_FTC
|
594 EESR_TINT4
| EESR_TINT3
| EESR_TINT2
| EESR_TINT1
)) {
597 netif_wake_queue(ndev
);
600 if (intr_status
& EESR_ERR_CHECK
)
601 sh_eth_error(ndev
, intr_status
);
603 if (--boguscnt
< 0) {
605 "%s: Too much work at interrupt, status=0x%4.4x.\n",
606 ndev
->name
, intr_status
);
609 spin_unlock(&mdp
->lock
);
614 static void sh_eth_timer(unsigned long data
)
616 struct net_device
*ndev
= (struct net_device
*)data
;
617 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
619 mod_timer(&mdp
->timer
, jiffies
+ (10 * HZ
));
622 /* PHY state control function */
623 static void sh_eth_adjust_link(struct net_device
*ndev
)
625 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
626 struct phy_device
*phydev
= mdp
->phydev
;
627 u32 ioaddr
= ndev
->base_addr
;
630 if (phydev
->link
!= PHY_DOWN
) {
631 if (phydev
->duplex
!= mdp
->duplex
) {
633 mdp
->duplex
= phydev
->duplex
;
636 if (phydev
->speed
!= mdp
->speed
) {
638 mdp
->speed
= phydev
->speed
;
640 if (mdp
->link
== PHY_DOWN
) {
641 ctrl_outl((ctrl_inl(ioaddr
+ ECMR
) & ~ECMR_TXF
)
642 | ECMR_DM
, ioaddr
+ ECMR
);
644 mdp
->link
= phydev
->link
;
645 netif_tx_schedule_all(ndev
);
646 netif_carrier_on(ndev
);
647 netif_start_queue(ndev
);
649 } else if (mdp
->link
) {
651 mdp
->link
= PHY_DOWN
;
654 netif_stop_queue(ndev
);
655 netif_carrier_off(ndev
);
659 phy_print_status(phydev
);
662 /* PHY init function */
663 static int sh_eth_phy_init(struct net_device
*ndev
)
665 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
666 char phy_id
[BUS_ID_SIZE
];
667 struct phy_device
*phydev
= NULL
;
669 snprintf(phy_id
, BUS_ID_SIZE
, PHY_ID_FMT
,
670 mdp
->mii_bus
->id
, mdp
->phy_id
);
672 mdp
->link
= PHY_DOWN
;
676 /* Try connect to PHY */
677 phydev
= phy_connect(ndev
, phy_id
, &sh_eth_adjust_link
,
678 0, PHY_INTERFACE_MODE_MII
);
679 if (IS_ERR(phydev
)) {
680 dev_err(&ndev
->dev
, "phy_connect failed\n");
681 return PTR_ERR(phydev
);
683 dev_info(&ndev
->dev
, "attached phy %i to driver %s\n",
684 phydev
->addr
, phydev
->drv
->name
);
686 mdp
->phydev
= phydev
;
691 /* PHY control start function */
692 static int sh_eth_phy_start(struct net_device
*ndev
)
694 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
697 ret
= sh_eth_phy_init(ndev
);
701 /* reset phy - this also wakes it from PDOWN */
702 phy_write(mdp
->phydev
, MII_BMCR
, BMCR_RESET
);
703 phy_start(mdp
->phydev
);
708 /* network device open function */
709 static int sh_eth_open(struct net_device
*ndev
)
712 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
714 ret
= request_irq(ndev
->irq
, &sh_eth_interrupt
, 0, ndev
->name
, ndev
);
716 printk(KERN_ERR
"Can not assign IRQ number to %s\n", CARDNAME
);
721 ret
= sh_eth_ring_init(ndev
);
726 ret
= sh_eth_dev_init(ndev
);
730 /* PHY control start*/
731 ret
= sh_eth_phy_start(ndev
);
735 /* Set the timer to check for link beat. */
736 init_timer(&mdp
->timer
);
737 mdp
->timer
.expires
= (jiffies
+ (24 * HZ
)) / 10;/* 2.4 sec. */
738 setup_timer(&mdp
->timer
, sh_eth_timer
, ndev
);
743 free_irq(ndev
->irq
, ndev
);
747 /* Timeout function */
748 static void sh_eth_tx_timeout(struct net_device
*ndev
)
750 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
751 u32 ioaddr
= ndev
->base_addr
;
752 struct sh_eth_rxdesc
*rxdesc
;
755 netif_stop_queue(ndev
);
757 /* worning message out. */
758 printk(KERN_WARNING
"%s: transmit timed out, status %8.8x,"
759 " resetting...\n", ndev
->name
, (int)ctrl_inl(ioaddr
+ EESR
));
761 /* tx_errors count up */
762 mdp
->stats
.tx_errors
++;
765 del_timer_sync(&mdp
->timer
);
767 /* Free all the skbuffs in the Rx queue. */
768 for (i
= 0; i
< RX_RING_SIZE
; i
++) {
769 rxdesc
= &mdp
->rx_ring
[i
];
771 rxdesc
->addr
= 0xBADF00D0;
772 if (mdp
->rx_skbuff
[i
])
773 dev_kfree_skb(mdp
->rx_skbuff
[i
]);
774 mdp
->rx_skbuff
[i
] = NULL
;
776 for (i
= 0; i
< TX_RING_SIZE
; i
++) {
777 if (mdp
->tx_skbuff
[i
])
778 dev_kfree_skb(mdp
->tx_skbuff
[i
]);
779 mdp
->tx_skbuff
[i
] = NULL
;
783 sh_eth_dev_init(ndev
);
786 mdp
->timer
.expires
= (jiffies
+ (24 * HZ
)) / 10;/* 2.4 sec. */
787 add_timer(&mdp
->timer
);
790 /* Packet transmit function */
791 static int sh_eth_start_xmit(struct sk_buff
*skb
, struct net_device
*ndev
)
793 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
794 struct sh_eth_txdesc
*txdesc
;
798 spin_lock_irqsave(&mdp
->lock
, flags
);
799 if ((mdp
->cur_tx
- mdp
->dirty_tx
) >= (TX_RING_SIZE
- 4)) {
800 if (!sh_eth_txfree(ndev
)) {
801 netif_stop_queue(ndev
);
802 spin_unlock_irqrestore(&mdp
->lock
, flags
);
806 spin_unlock_irqrestore(&mdp
->lock
, flags
);
808 entry
= mdp
->cur_tx
% TX_RING_SIZE
;
809 mdp
->tx_skbuff
[entry
] = skb
;
810 txdesc
= &mdp
->tx_ring
[entry
];
811 txdesc
->addr
= (u32
)(skb
->data
);
813 swaps((char *)(txdesc
->addr
& ~0x3), skb
->len
+ 2);
815 __flush_purge_region(skb
->data
, skb
->len
);
816 if (skb
->len
< ETHERSMALL
)
817 txdesc
->buffer_length
= ETHERSMALL
;
819 txdesc
->buffer_length
= skb
->len
;
821 if (entry
>= TX_RING_SIZE
- 1)
822 txdesc
->status
|= cpu_to_le32(TD_TACT
| TD_TDLE
);
824 txdesc
->status
|= cpu_to_le32(TD_TACT
);
828 ctrl_outl(EDTRR_TRNS
, ndev
->base_addr
+ EDTRR
);
829 ndev
->trans_start
= jiffies
;
834 /* device close function */
835 static int sh_eth_close(struct net_device
*ndev
)
837 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
838 u32 ioaddr
= ndev
->base_addr
;
841 netif_stop_queue(ndev
);
843 /* Disable interrupts by clearing the interrupt mask. */
844 ctrl_outl(0x0000, ioaddr
+ EESIPR
);
846 /* Stop the chip's Tx and Rx processes. */
847 ctrl_outl(0, ioaddr
+ EDTRR
);
848 ctrl_outl(0, ioaddr
+ EDRRR
);
852 phy_stop(mdp
->phydev
);
853 phy_disconnect(mdp
->phydev
);
856 free_irq(ndev
->irq
, ndev
);
858 del_timer_sync(&mdp
->timer
);
860 /* Free all the skbuffs in the Rx queue. */
861 sh_eth_ring_free(ndev
);
863 /* free DMA buffer */
864 ringsize
= sizeof(struct sh_eth_rxdesc
) * RX_RING_SIZE
;
865 dma_free_coherent(NULL
, ringsize
, mdp
->rx_ring
, mdp
->rx_desc_dma
);
867 /* free DMA buffer */
868 ringsize
= sizeof(struct sh_eth_txdesc
) * TX_RING_SIZE
;
869 dma_free_coherent(NULL
, ringsize
, mdp
->tx_ring
, mdp
->tx_desc_dma
);
874 static struct net_device_stats
*sh_eth_get_stats(struct net_device
*ndev
)
876 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
877 u32 ioaddr
= ndev
->base_addr
;
879 mdp
->stats
.tx_dropped
+= ctrl_inl(ioaddr
+ TROCR
);
880 ctrl_outl(0, ioaddr
+ TROCR
); /* (write clear) */
881 mdp
->stats
.collisions
+= ctrl_inl(ioaddr
+ CDCR
);
882 ctrl_outl(0, ioaddr
+ CDCR
); /* (write clear) */
883 mdp
->stats
.tx_carrier_errors
+= ctrl_inl(ioaddr
+ LCCR
);
884 ctrl_outl(0, ioaddr
+ LCCR
); /* (write clear) */
885 mdp
->stats
.tx_carrier_errors
+= ctrl_inl(ioaddr
+ CNDCR
);
886 ctrl_outl(0, ioaddr
+ CNDCR
); /* (write clear) */
891 /* ioctl to device funciotn*/
892 static int sh_eth_do_ioctl(struct net_device
*ndev
, struct ifreq
*rq
,
895 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
896 struct phy_device
*phydev
= mdp
->phydev
;
898 if (!netif_running(ndev
))
904 return phy_mii_ioctl(phydev
, if_mii(rq
), cmd
);
908 /* Multicast reception directions set */
909 static void sh_eth_set_multicast_list(struct net_device
*ndev
)
911 u32 ioaddr
= ndev
->base_addr
;
913 if (ndev
->flags
& IFF_PROMISC
) {
914 /* Set promiscuous. */
915 ctrl_outl((ctrl_inl(ioaddr
+ ECMR
) & ~ECMR_MCT
) | ECMR_PRM
,
918 /* Normal, unicast/broadcast-only mode. */
919 ctrl_outl((ctrl_inl(ioaddr
+ ECMR
) & ~ECMR_PRM
) | ECMR_MCT
,
924 /* SuperH's TSU register init function */
925 static void sh_eth_tsu_init(u32 ioaddr
)
927 ctrl_outl(0, ioaddr
+ TSU_FWEN0
); /* Disable forward(0->1) */
928 ctrl_outl(0, ioaddr
+ TSU_FWEN1
); /* Disable forward(1->0) */
929 ctrl_outl(0, ioaddr
+ TSU_FCM
); /* forward fifo 3k-3k */
930 ctrl_outl(0xc, ioaddr
+ TSU_BSYSL0
);
931 ctrl_outl(0xc, ioaddr
+ TSU_BSYSL1
);
932 ctrl_outl(0, ioaddr
+ TSU_PRISL0
);
933 ctrl_outl(0, ioaddr
+ TSU_PRISL1
);
934 ctrl_outl(0, ioaddr
+ TSU_FWSL0
);
935 ctrl_outl(0, ioaddr
+ TSU_FWSL1
);
936 ctrl_outl(TSU_FWSLC_POSTENU
| TSU_FWSLC_POSTENL
, ioaddr
+ TSU_FWSLC
);
937 ctrl_outl(0, ioaddr
+ TSU_QTAGM0
); /* Disable QTAG(0->1) */
938 ctrl_outl(0, ioaddr
+ TSU_QTAGM1
); /* Disable QTAG(1->0) */
939 ctrl_outl(0, ioaddr
+ TSU_FWSR
); /* all interrupt status clear */
940 ctrl_outl(0, ioaddr
+ TSU_FWINMK
); /* Disable all interrupt */
941 ctrl_outl(0, ioaddr
+ TSU_TEN
); /* Disable all CAM entry */
942 ctrl_outl(0, ioaddr
+ TSU_POST1
); /* Disable CAM entry [ 0- 7] */
943 ctrl_outl(0, ioaddr
+ TSU_POST2
); /* Disable CAM entry [ 8-15] */
944 ctrl_outl(0, ioaddr
+ TSU_POST3
); /* Disable CAM entry [16-23] */
945 ctrl_outl(0, ioaddr
+ TSU_POST4
); /* Disable CAM entry [24-31] */
948 /* MDIO bus release function */
949 static int sh_mdio_release(struct net_device
*ndev
)
951 struct mii_bus
*bus
= dev_get_drvdata(&ndev
->dev
);
953 /* unregister mdio bus */
954 mdiobus_unregister(bus
);
956 /* remove mdio bus info from net_device */
957 dev_set_drvdata(&ndev
->dev
, NULL
);
959 /* free bitbang info */
960 free_mdio_bitbang(bus
);
965 /* MDIO bus init function */
966 static int sh_mdio_init(struct net_device
*ndev
, int id
)
969 struct bb_info
*bitbang
;
970 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
972 /* create bit control struct for PHY */
973 bitbang
= kzalloc(sizeof(struct bb_info
), GFP_KERNEL
);
980 bitbang
->addr
= ndev
->base_addr
+ PIR
;
981 bitbang
->mdi_msk
= 0x08;
982 bitbang
->mdo_msk
= 0x04;
983 bitbang
->mmd_msk
= 0x02;/* MMD */
984 bitbang
->mdc_msk
= 0x01;
985 bitbang
->ctrl
.ops
= &bb_ops
;
987 /* MII contorller setting */
988 mdp
->mii_bus
= alloc_mdio_bitbang(&bitbang
->ctrl
);
991 goto out_free_bitbang
;
994 /* Hook up MII support for ethtool */
995 mdp
->mii_bus
->name
= "sh_mii";
996 mdp
->mii_bus
->dev
= &ndev
->dev
;
997 mdp
->mii_bus
->id
[0] = id
;
1000 mdp
->mii_bus
->irq
= kmalloc(sizeof(int)*PHY_MAX_ADDR
, GFP_KERNEL
);
1001 if (!mdp
->mii_bus
->irq
) {
1006 for (i
= 0; i
< PHY_MAX_ADDR
; i
++)
1007 mdp
->mii_bus
->irq
[i
] = PHY_POLL
;
1009 /* regist mdio bus */
1010 ret
= mdiobus_register(mdp
->mii_bus
);
1014 dev_set_drvdata(&ndev
->dev
, mdp
->mii_bus
);
1019 kfree(mdp
->mii_bus
->irq
);
1022 kfree(mdp
->mii_bus
);
1031 static int sh_eth_drv_probe(struct platform_device
*pdev
)
1033 int ret
, i
, devno
= 0;
1034 struct resource
*res
;
1035 struct net_device
*ndev
= NULL
;
1036 struct sh_eth_private
*mdp
;
1039 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1040 if (unlikely(res
== NULL
)) {
1041 dev_err(&pdev
->dev
, "invalid resource\n");
1046 ndev
= alloc_etherdev(sizeof(struct sh_eth_private
));
1048 printk(KERN_ERR
"%s: could not allocate device.\n", CARDNAME
);
1053 /* The sh Ether-specific entries in the device structure. */
1054 ndev
->base_addr
= res
->start
;
1060 ndev
->irq
= platform_get_irq(pdev
, 0);
1061 if (ndev
->irq
< 0) {
1066 SET_NETDEV_DEV(ndev
, &pdev
->dev
);
1068 /* Fill in the fields of the device structure with ethernet values. */
1071 mdp
= netdev_priv(ndev
);
1072 spin_lock_init(&mdp
->lock
);
1075 mdp
->phy_id
= (int)pdev
->dev
.platform_data
;
1078 ndev
->open
= sh_eth_open
;
1079 ndev
->hard_start_xmit
= sh_eth_start_xmit
;
1080 ndev
->stop
= sh_eth_close
;
1081 ndev
->get_stats
= sh_eth_get_stats
;
1082 ndev
->set_multicast_list
= sh_eth_set_multicast_list
;
1083 ndev
->do_ioctl
= sh_eth_do_ioctl
;
1084 ndev
->tx_timeout
= sh_eth_tx_timeout
;
1085 ndev
->watchdog_timeo
= TX_TIMEOUT
;
1087 mdp
->post_rx
= POST_RX
>> (devno
<< 1);
1088 mdp
->post_fw
= POST_FW
>> (devno
<< 1);
1090 /* read and set MAC address */
1091 read_mac_address(ndev
);
1093 /* First device only init */
1096 ctrl_outl(ARSTR_ARSTR
, ndev
->base_addr
+ ARSTR
);
1099 /* TSU init (Init only)*/
1100 sh_eth_tsu_init(SH_TSU_ADDR
);
1103 /* network device register */
1104 ret
= register_netdev(ndev
);
1109 ret
= sh_mdio_init(ndev
, pdev
->id
);
1111 goto out_unregister
;
1113 /* pritnt device infomation */
1114 printk(KERN_INFO
"%s: %s at 0x%x, ",
1115 ndev
->name
, CARDNAME
, (u32
) ndev
->base_addr
);
1117 for (i
= 0; i
< 5; i
++)
1118 printk(KERN_INFO
"%2.2x:", ndev
->dev_addr
[i
]);
1119 printk(KERN_INFO
"%2.2x, IRQ %d.\n", ndev
->dev_addr
[i
], ndev
->irq
);
1121 platform_set_drvdata(pdev
, ndev
);
1126 unregister_netdev(ndev
);
1137 static int sh_eth_drv_remove(struct platform_device
*pdev
)
1139 struct net_device
*ndev
= platform_get_drvdata(pdev
);
1141 sh_mdio_release(ndev
);
1142 unregister_netdev(ndev
);
1143 flush_scheduled_work();
1146 platform_set_drvdata(pdev
, NULL
);
1151 static struct platform_driver sh_eth_driver
= {
1152 .probe
= sh_eth_drv_probe
,
1153 .remove
= sh_eth_drv_remove
,
1159 static int __init
sh_eth_init(void)
1161 return platform_driver_register(&sh_eth_driver
);
1164 static void __exit
sh_eth_cleanup(void)
1166 platform_driver_unregister(&sh_eth_driver
);
1169 module_init(sh_eth_init
);
1170 module_exit(sh_eth_cleanup
);
1172 MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda");
1173 MODULE_DESCRIPTION("Renesas SuperH Ethernet driver");
1174 MODULE_LICENSE("GPL v2");