2 * SuperH Ethernet device driver
4 * Copyright (C) 2006-2008 Nobuhiro Iwamatsu
5 * Copyright (C) 2008 Renesas Solutions Corp.
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
23 #include <linux/init.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/etherdevice.h>
26 #include <linux/delay.h>
27 #include <linux/platform_device.h>
28 #include <linux/mdio-bitbang.h>
29 #include <linux/netdevice.h>
30 #include <linux/phy.h>
31 #include <linux/cache.h>
36 /* CPU <-> EDMAC endian convert */
37 static inline __u32
cpu_to_edmac(struct sh_eth_private
*mdp
, u32 x
)
39 switch (mdp
->edmac_endian
) {
40 case EDMAC_LITTLE_ENDIAN
:
41 return cpu_to_le32(x
);
42 case EDMAC_BIG_ENDIAN
:
43 return cpu_to_be32(x
);
48 static inline __u32
edmac_to_cpu(struct sh_eth_private
*mdp
, u32 x
)
50 switch (mdp
->edmac_endian
) {
51 case EDMAC_LITTLE_ENDIAN
:
52 return le32_to_cpu(x
);
53 case EDMAC_BIG_ENDIAN
:
54 return be32_to_cpu(x
);
60 * Program the hardware MAC address from dev->dev_addr.
62 static void update_mac_address(struct net_device
*ndev
)
64 u32 ioaddr
= ndev
->base_addr
;
66 ctrl_outl((ndev
->dev_addr
[0] << 24) | (ndev
->dev_addr
[1] << 16) |
67 (ndev
->dev_addr
[2] << 8) | (ndev
->dev_addr
[3]),
69 ctrl_outl((ndev
->dev_addr
[4] << 8) | (ndev
->dev_addr
[5]),
74 * Get MAC address from SuperH MAC address register
76 * SuperH's Ethernet device doesn't have 'ROM' to MAC address.
77 * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g).
78 * When you want use this device, you must set MAC address in bootloader.
81 static void read_mac_address(struct net_device
*ndev
)
83 u32 ioaddr
= ndev
->base_addr
;
85 ndev
->dev_addr
[0] = (ctrl_inl(ioaddr
+ MAHR
) >> 24);
86 ndev
->dev_addr
[1] = (ctrl_inl(ioaddr
+ MAHR
) >> 16) & 0xFF;
87 ndev
->dev_addr
[2] = (ctrl_inl(ioaddr
+ MAHR
) >> 8) & 0xFF;
88 ndev
->dev_addr
[3] = (ctrl_inl(ioaddr
+ MAHR
) & 0xFF);
89 ndev
->dev_addr
[4] = (ctrl_inl(ioaddr
+ MALR
) >> 8) & 0xFF;
90 ndev
->dev_addr
[5] = (ctrl_inl(ioaddr
+ MALR
) & 0xFF);
94 struct mdiobb_ctrl ctrl
;
103 static void bb_set(u32 addr
, u32 msk
)
105 ctrl_outl(ctrl_inl(addr
) | msk
, addr
);
109 static void bb_clr(u32 addr
, u32 msk
)
111 ctrl_outl((ctrl_inl(addr
) & ~msk
), addr
);
115 static int bb_read(u32 addr
, u32 msk
)
117 return (ctrl_inl(addr
) & msk
) != 0;
120 /* Data I/O pin control */
121 static void sh_mmd_ctrl(struct mdiobb_ctrl
*ctrl
, int bit
)
123 struct bb_info
*bitbang
= container_of(ctrl
, struct bb_info
, ctrl
);
125 bb_set(bitbang
->addr
, bitbang
->mmd_msk
);
127 bb_clr(bitbang
->addr
, bitbang
->mmd_msk
);
131 static void sh_set_mdio(struct mdiobb_ctrl
*ctrl
, int bit
)
133 struct bb_info
*bitbang
= container_of(ctrl
, struct bb_info
, ctrl
);
136 bb_set(bitbang
->addr
, bitbang
->mdo_msk
);
138 bb_clr(bitbang
->addr
, bitbang
->mdo_msk
);
142 static int sh_get_mdio(struct mdiobb_ctrl
*ctrl
)
144 struct bb_info
*bitbang
= container_of(ctrl
, struct bb_info
, ctrl
);
145 return bb_read(bitbang
->addr
, bitbang
->mdi_msk
);
148 /* MDC pin control */
149 static void sh_mdc_ctrl(struct mdiobb_ctrl
*ctrl
, int bit
)
151 struct bb_info
*bitbang
= container_of(ctrl
, struct bb_info
, ctrl
);
154 bb_set(bitbang
->addr
, bitbang
->mdc_msk
);
156 bb_clr(bitbang
->addr
, bitbang
->mdc_msk
);
159 /* mdio bus control struct */
160 static struct mdiobb_ops bb_ops
= {
161 .owner
= THIS_MODULE
,
162 .set_mdc
= sh_mdc_ctrl
,
163 .set_mdio_dir
= sh_mmd_ctrl
,
164 .set_mdio_data
= sh_set_mdio
,
165 .get_mdio_data
= sh_get_mdio
,
169 static void sh_eth_reset(struct net_device
*ndev
)
171 u32 ioaddr
= ndev
->base_addr
;
173 #if defined(CONFIG_CPU_SUBTYPE_SH7763)
176 ctrl_outl(EDSR_ENALL
, ioaddr
+ EDSR
);
177 ctrl_outl(ctrl_inl(ioaddr
+ EDMR
) | EDMR_SRST
, ioaddr
+ EDMR
);
179 if (!(ctrl_inl(ioaddr
+ EDMR
) & 0x3))
185 printk(KERN_ERR
"Device reset fail\n");
188 ctrl_outl(0x0, ioaddr
+ TDLAR
);
189 ctrl_outl(0x0, ioaddr
+ TDFAR
);
190 ctrl_outl(0x0, ioaddr
+ TDFXR
);
191 ctrl_outl(0x0, ioaddr
+ TDFFR
);
192 ctrl_outl(0x0, ioaddr
+ RDLAR
);
193 ctrl_outl(0x0, ioaddr
+ RDFAR
);
194 ctrl_outl(0x0, ioaddr
+ RDFXR
);
195 ctrl_outl(0x0, ioaddr
+ RDFFR
);
197 ctrl_outl(ctrl_inl(ioaddr
+ EDMR
) | EDMR_SRST
, ioaddr
+ EDMR
);
199 ctrl_outl(ctrl_inl(ioaddr
+ EDMR
) & ~EDMR_SRST
, ioaddr
+ EDMR
);
203 /* free skb and descriptor buffer */
204 static void sh_eth_ring_free(struct net_device
*ndev
)
206 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
209 /* Free Rx skb ringbuffer */
210 if (mdp
->rx_skbuff
) {
211 for (i
= 0; i
< RX_RING_SIZE
; i
++) {
212 if (mdp
->rx_skbuff
[i
])
213 dev_kfree_skb(mdp
->rx_skbuff
[i
]);
216 kfree(mdp
->rx_skbuff
);
218 /* Free Tx skb ringbuffer */
219 if (mdp
->tx_skbuff
) {
220 for (i
= 0; i
< TX_RING_SIZE
; i
++) {
221 if (mdp
->tx_skbuff
[i
])
222 dev_kfree_skb(mdp
->tx_skbuff
[i
]);
225 kfree(mdp
->tx_skbuff
);
228 /* format skb and descriptor buffer */
229 static void sh_eth_ring_format(struct net_device
*ndev
)
231 u32 ioaddr
= ndev
->base_addr
, reserve
= 0;
232 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
235 struct sh_eth_rxdesc
*rxdesc
= NULL
;
236 struct sh_eth_txdesc
*txdesc
= NULL
;
237 int rx_ringsize
= sizeof(*rxdesc
) * RX_RING_SIZE
;
238 int tx_ringsize
= sizeof(*txdesc
) * TX_RING_SIZE
;
240 mdp
->cur_rx
= mdp
->cur_tx
= 0;
241 mdp
->dirty_rx
= mdp
->dirty_tx
= 0;
243 memset(mdp
->rx_ring
, 0, rx_ringsize
);
245 /* build Rx ring buffer */
246 for (i
= 0; i
< RX_RING_SIZE
; i
++) {
248 mdp
->rx_skbuff
[i
] = NULL
;
249 skb
= dev_alloc_skb(mdp
->rx_buf_sz
);
250 mdp
->rx_skbuff
[i
] = skb
;
253 dma_map_single(&ndev
->dev
, skb
->tail
, mdp
->rx_buf_sz
,
255 skb
->dev
= ndev
; /* Mark as being used by this device. */
256 #if defined(CONFIG_CPU_SUBTYPE_SH7763)
257 reserve
= SH7763_SKB_ALIGN
258 - ((uint32_t)skb
->data
& (SH7763_SKB_ALIGN
-1));
260 skb_reserve(skb
, reserve
);
262 skb_reserve(skb
, RX_OFFSET
);
265 rxdesc
= &mdp
->rx_ring
[i
];
266 rxdesc
->addr
= virt_to_phys(PTR_ALIGN(skb
->data
, 4));
267 rxdesc
->status
= cpu_to_edmac(mdp
, RD_RACT
| RD_RFP
);
269 /* The size of the buffer is 16 byte boundary. */
270 rxdesc
->buffer_length
= ALIGN(mdp
->rx_buf_sz
, 16);
271 /* Rx descriptor address set */
273 ctrl_outl(mdp
->rx_desc_dma
, ioaddr
+ RDLAR
);
274 #if defined(CONFIG_CPU_SUBTYPE_SH7763)
275 ctrl_outl(mdp
->rx_desc_dma
, ioaddr
+ RDFAR
);
280 mdp
->dirty_rx
= (u32
) (i
- RX_RING_SIZE
);
282 /* Mark the last entry as wrapping the ring. */
283 rxdesc
->status
|= cpu_to_edmac(mdp
, RD_RDEL
);
285 memset(mdp
->tx_ring
, 0, tx_ringsize
);
287 /* build Tx ring buffer */
288 for (i
= 0; i
< TX_RING_SIZE
; i
++) {
289 mdp
->tx_skbuff
[i
] = NULL
;
290 txdesc
= &mdp
->tx_ring
[i
];
291 txdesc
->status
= cpu_to_edmac(mdp
, TD_TFP
);
292 txdesc
->buffer_length
= 0;
294 /* Tx descriptor address set */
295 ctrl_outl(mdp
->tx_desc_dma
, ioaddr
+ TDLAR
);
296 #if defined(CONFIG_CPU_SUBTYPE_SH7763)
297 ctrl_outl(mdp
->tx_desc_dma
, ioaddr
+ TDFAR
);
302 txdesc
->status
|= cpu_to_edmac(mdp
, TD_TDLE
);
305 /* Get skb and descriptor buffer */
306 static int sh_eth_ring_init(struct net_device
*ndev
)
308 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
309 int rx_ringsize
, tx_ringsize
, ret
= 0;
312 * +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
313 * card needs room to do 8 byte alignment, +2 so we can reserve
314 * the first 2 bytes, and +16 gets room for the status word from the
317 mdp
->rx_buf_sz
= (ndev
->mtu
<= 1492 ? PKT_BUF_SZ
:
318 (((ndev
->mtu
+ 26 + 7) & ~7) + 2 + 16));
320 /* Allocate RX and TX skb rings */
321 mdp
->rx_skbuff
= kmalloc(sizeof(*mdp
->rx_skbuff
) * RX_RING_SIZE
,
323 if (!mdp
->rx_skbuff
) {
324 printk(KERN_ERR
"%s: Cannot allocate Rx skb\n", ndev
->name
);
329 mdp
->tx_skbuff
= kmalloc(sizeof(*mdp
->tx_skbuff
) * TX_RING_SIZE
,
331 if (!mdp
->tx_skbuff
) {
332 printk(KERN_ERR
"%s: Cannot allocate Tx skb\n", ndev
->name
);
337 /* Allocate all Rx descriptors. */
338 rx_ringsize
= sizeof(struct sh_eth_rxdesc
) * RX_RING_SIZE
;
339 mdp
->rx_ring
= dma_alloc_coherent(NULL
, rx_ringsize
, &mdp
->rx_desc_dma
,
343 printk(KERN_ERR
"%s: Cannot allocate Rx Ring (size %d bytes)\n",
344 ndev
->name
, rx_ringsize
);
351 /* Allocate all Tx descriptors. */
352 tx_ringsize
= sizeof(struct sh_eth_txdesc
) * TX_RING_SIZE
;
353 mdp
->tx_ring
= dma_alloc_coherent(NULL
, tx_ringsize
, &mdp
->tx_desc_dma
,
356 printk(KERN_ERR
"%s: Cannot allocate Tx Ring (size %d bytes)\n",
357 ndev
->name
, tx_ringsize
);
364 /* free DMA buffer */
365 dma_free_coherent(NULL
, rx_ringsize
, mdp
->rx_ring
, mdp
->rx_desc_dma
);
368 /* Free Rx and Tx skb ring buffer */
369 sh_eth_ring_free(ndev
);
374 static int sh_eth_dev_init(struct net_device
*ndev
)
377 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
378 u32 ioaddr
= ndev
->base_addr
;
379 u_int32_t rx_int_var
, tx_int_var
;
385 /* Descriptor format */
386 sh_eth_ring_format(ndev
);
387 ctrl_outl(RPADIR_INIT
, ioaddr
+ RPADIR
);
389 /* all sh_eth int mask */
390 ctrl_outl(0, ioaddr
+ EESIPR
);
392 #if defined(CONFIG_CPU_SUBTYPE_SH7763)
393 ctrl_outl(EDMR_EL
, ioaddr
+ EDMR
);
395 ctrl_outl(0, ioaddr
+ EDMR
); /* Endian change */
399 ctrl_outl((FIFO_SIZE_T
| FIFO_SIZE_R
), ioaddr
+ FDR
);
400 ctrl_outl(0, ioaddr
+ TFTR
);
402 /* Frame recv control */
403 ctrl_outl(0, ioaddr
+ RMCR
);
405 rx_int_var
= mdp
->rx_int_var
= DESC_I_RINT8
| DESC_I_RINT5
;
406 tx_int_var
= mdp
->tx_int_var
= DESC_I_TINT2
;
407 ctrl_outl(rx_int_var
| tx_int_var
, ioaddr
+ TRSCER
);
409 #if defined(CONFIG_CPU_SUBTYPE_SH7763)
410 /* Burst sycle set */
411 ctrl_outl(0x800, ioaddr
+ BCULR
);
414 ctrl_outl((FIFO_F_D_RFF
| FIFO_F_D_RFD
), ioaddr
+ FCFTR
);
416 #if !defined(CONFIG_CPU_SUBTYPE_SH7763)
417 ctrl_outl(0, ioaddr
+ TRIMD
);
420 /* Recv frame limit set register */
421 ctrl_outl(RFLR_VALUE
, ioaddr
+ RFLR
);
423 ctrl_outl(ctrl_inl(ioaddr
+ EESR
), ioaddr
+ EESR
);
424 ctrl_outl((DMAC_M_RFRMER
| DMAC_M_ECI
| 0x003fffff), ioaddr
+ EESIPR
);
426 /* PAUSE Prohibition */
427 val
= (ctrl_inl(ioaddr
+ ECMR
) & ECMR_DM
) |
428 ECMR_ZPF
| (mdp
->duplex
? ECMR_DM
: 0) | ECMR_TE
| ECMR_RE
;
430 ctrl_outl(val
, ioaddr
+ ECMR
);
432 /* E-MAC Status Register clear */
433 ctrl_outl(ECSR_INIT
, ioaddr
+ ECSR
);
435 /* E-MAC Interrupt Enable register */
436 ctrl_outl(ECSIPR_INIT
, ioaddr
+ ECSIPR
);
438 /* Set MAC address */
439 update_mac_address(ndev
);
442 #if defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7763)
443 ctrl_outl(APR_AP
, ioaddr
+ APR
);
444 ctrl_outl(MPR_MP
, ioaddr
+ MPR
);
445 ctrl_outl(TPAUSER_UNLIMITED
, ioaddr
+ TPAUSER
);
447 #if defined(CONFIG_CPU_SUBTYPE_SH7710)
448 ctrl_outl(BCFR_UNLIMITED
, ioaddr
+ BCFR
);
451 /* Setting the Rx mode will start the Rx process. */
452 ctrl_outl(EDRRR_R
, ioaddr
+ EDRRR
);
454 netif_start_queue(ndev
);
459 /* free Tx skb function */
460 static int sh_eth_txfree(struct net_device
*ndev
)
462 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
463 struct sh_eth_txdesc
*txdesc
;
467 for (; mdp
->cur_tx
- mdp
->dirty_tx
> 0; mdp
->dirty_tx
++) {
468 entry
= mdp
->dirty_tx
% TX_RING_SIZE
;
469 txdesc
= &mdp
->tx_ring
[entry
];
470 if (txdesc
->status
& cpu_to_edmac(mdp
, TD_TACT
))
472 /* Free the original skb. */
473 if (mdp
->tx_skbuff
[entry
]) {
474 dev_kfree_skb_irq(mdp
->tx_skbuff
[entry
]);
475 mdp
->tx_skbuff
[entry
] = NULL
;
478 txdesc
->status
= cpu_to_edmac(mdp
, TD_TFP
);
479 if (entry
>= TX_RING_SIZE
- 1)
480 txdesc
->status
|= cpu_to_edmac(mdp
, TD_TDLE
);
482 mdp
->stats
.tx_packets
++;
483 mdp
->stats
.tx_bytes
+= txdesc
->buffer_length
;
488 /* Packet receive function */
489 static int sh_eth_rx(struct net_device
*ndev
)
491 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
492 struct sh_eth_rxdesc
*rxdesc
;
494 int entry
= mdp
->cur_rx
% RX_RING_SIZE
;
495 int boguscnt
= (mdp
->dirty_rx
+ RX_RING_SIZE
) - mdp
->cur_rx
;
498 u32 desc_status
, reserve
= 0;
500 rxdesc
= &mdp
->rx_ring
[entry
];
501 while (!(rxdesc
->status
& cpu_to_edmac(mdp
, RD_RACT
))) {
502 desc_status
= edmac_to_cpu(mdp
, rxdesc
->status
);
503 pkt_len
= rxdesc
->frame_length
;
508 if (!(desc_status
& RDFEND
))
509 mdp
->stats
.rx_length_errors
++;
511 if (desc_status
& (RD_RFS1
| RD_RFS2
| RD_RFS3
| RD_RFS4
|
512 RD_RFS5
| RD_RFS6
| RD_RFS10
)) {
513 mdp
->stats
.rx_errors
++;
514 if (desc_status
& RD_RFS1
)
515 mdp
->stats
.rx_crc_errors
++;
516 if (desc_status
& RD_RFS2
)
517 mdp
->stats
.rx_frame_errors
++;
518 if (desc_status
& RD_RFS3
)
519 mdp
->stats
.rx_length_errors
++;
520 if (desc_status
& RD_RFS4
)
521 mdp
->stats
.rx_length_errors
++;
522 if (desc_status
& RD_RFS6
)
523 mdp
->stats
.rx_missed_errors
++;
524 if (desc_status
& RD_RFS10
)
525 mdp
->stats
.rx_over_errors
++;
527 swaps(phys_to_virt(ALIGN(rxdesc
->addr
, 4)),
529 skb
= mdp
->rx_skbuff
[entry
];
530 mdp
->rx_skbuff
[entry
] = NULL
;
531 skb_put(skb
, pkt_len
);
532 skb
->protocol
= eth_type_trans(skb
, ndev
);
534 mdp
->stats
.rx_packets
++;
535 mdp
->stats
.rx_bytes
+= pkt_len
;
537 rxdesc
->status
|= cpu_to_edmac(mdp
, RD_RACT
);
538 entry
= (++mdp
->cur_rx
) % RX_RING_SIZE
;
539 rxdesc
= &mdp
->rx_ring
[entry
];
542 /* Refill the Rx ring buffers. */
543 for (; mdp
->cur_rx
- mdp
->dirty_rx
> 0; mdp
->dirty_rx
++) {
544 entry
= mdp
->dirty_rx
% RX_RING_SIZE
;
545 rxdesc
= &mdp
->rx_ring
[entry
];
546 /* The size of the buffer is 16 byte boundary. */
547 rxdesc
->buffer_length
= ALIGN(mdp
->rx_buf_sz
, 16);
549 if (mdp
->rx_skbuff
[entry
] == NULL
) {
550 skb
= dev_alloc_skb(mdp
->rx_buf_sz
);
551 mdp
->rx_skbuff
[entry
] = skb
;
553 break; /* Better luck next round. */
554 dma_map_single(&ndev
->dev
, skb
->tail
, mdp
->rx_buf_sz
,
557 #if defined(CONFIG_CPU_SUBTYPE_SH7763)
558 reserve
= SH7763_SKB_ALIGN
559 - ((uint32_t)skb
->data
& (SH7763_SKB_ALIGN
-1));
561 skb_reserve(skb
, reserve
);
563 skb_reserve(skb
, RX_OFFSET
);
565 skb
->ip_summed
= CHECKSUM_NONE
;
566 rxdesc
->addr
= virt_to_phys(PTR_ALIGN(skb
->data
, 4));
568 if (entry
>= RX_RING_SIZE
- 1)
570 cpu_to_edmac(mdp
, RD_RACT
| RD_RFP
| RD_RDEL
);
573 cpu_to_edmac(mdp
, RD_RACT
| RD_RFP
);
576 /* Restart Rx engine if stopped. */
577 /* If we don't need to check status, don't. -KDU */
578 if (!(ctrl_inl(ndev
->base_addr
+ EDRRR
) & EDRRR_R
))
579 ctrl_outl(EDRRR_R
, ndev
->base_addr
+ EDRRR
);
584 /* error control function */
585 static void sh_eth_error(struct net_device
*ndev
, int intr_status
)
587 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
588 u32 ioaddr
= ndev
->base_addr
;
591 if (intr_status
& EESR_ECI
) {
592 felic_stat
= ctrl_inl(ioaddr
+ ECSR
);
593 ctrl_outl(felic_stat
, ioaddr
+ ECSR
); /* clear int */
594 if (felic_stat
& ECSR_ICD
)
595 mdp
->stats
.tx_carrier_errors
++;
596 if (felic_stat
& ECSR_LCHNG
) {
598 u32 link_stat
= (ctrl_inl(ioaddr
+ PSR
));
599 if (!(link_stat
& PHY_ST_LINK
)) {
600 /* Link Down : disable tx and rx */
601 ctrl_outl(ctrl_inl(ioaddr
+ ECMR
) &
602 ~(ECMR_RE
| ECMR_TE
), ioaddr
+ ECMR
);
605 ctrl_outl(ctrl_inl(ioaddr
+ EESIPR
) &
606 ~DMAC_M_ECI
, ioaddr
+ EESIPR
);
608 ctrl_outl(ctrl_inl(ioaddr
+ ECSR
),
610 ctrl_outl(ctrl_inl(ioaddr
+ EESIPR
) |
611 DMAC_M_ECI
, ioaddr
+ EESIPR
);
612 /* enable tx and rx */
613 ctrl_outl(ctrl_inl(ioaddr
+ ECMR
) |
614 (ECMR_RE
| ECMR_TE
), ioaddr
+ ECMR
);
619 if (intr_status
& EESR_TWB
) {
620 /* Write buck end. unused write back interrupt */
621 if (intr_status
& EESR_TABT
) /* Transmit Abort int */
622 mdp
->stats
.tx_aborted_errors
++;
625 if (intr_status
& EESR_RABT
) {
626 /* Receive Abort int */
627 if (intr_status
& EESR_RFRMER
) {
628 /* Receive Frame Overflow int */
629 mdp
->stats
.rx_frame_errors
++;
630 printk(KERN_ERR
"Receive Frame Overflow\n");
633 #if !defined(CONFIG_CPU_SUBTYPE_SH7763)
634 if (intr_status
& EESR_ADE
) {
635 if (intr_status
& EESR_TDE
) {
636 if (intr_status
& EESR_TFE
)
637 mdp
->stats
.tx_fifo_errors
++;
642 if (intr_status
& EESR_RDE
) {
643 /* Receive Descriptor Empty int */
644 mdp
->stats
.rx_over_errors
++;
646 if (ctrl_inl(ioaddr
+ EDRRR
) ^ EDRRR_R
)
647 ctrl_outl(EDRRR_R
, ioaddr
+ EDRRR
);
648 printk(KERN_ERR
"Receive Descriptor Empty\n");
650 if (intr_status
& EESR_RFE
) {
651 /* Receive FIFO Overflow int */
652 mdp
->stats
.rx_fifo_errors
++;
653 printk(KERN_ERR
"Receive FIFO Overflow\n");
655 if (intr_status
& (EESR_TWB
| EESR_TABT
|
656 #if !defined(CONFIG_CPU_SUBTYPE_SH7763)
659 EESR_TDE
| EESR_TFE
)) {
661 u32 edtrr
= ctrl_inl(ndev
->base_addr
+ EDTRR
);
663 printk(KERN_ERR
"%s:TX error. status=%8.8x cur_tx=%8.8x ",
664 ndev
->name
, intr_status
, mdp
->cur_tx
);
665 printk(KERN_ERR
"dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
666 mdp
->dirty_tx
, (u32
) ndev
->state
, edtrr
);
667 /* dirty buffer free */
671 if (edtrr
^ EDTRR_TRNS
) {
673 ctrl_outl(EDTRR_TRNS
, ndev
->base_addr
+ EDTRR
);
676 netif_wake_queue(ndev
);
680 static irqreturn_t
sh_eth_interrupt(int irq
, void *netdev
)
682 struct net_device
*ndev
= netdev
;
683 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
684 irqreturn_t ret
= IRQ_NONE
;
685 u32 ioaddr
, boguscnt
= RX_RING_SIZE
;
688 ioaddr
= ndev
->base_addr
;
689 spin_lock(&mdp
->lock
);
691 /* Get interrpt stat */
692 intr_status
= ctrl_inl(ioaddr
+ EESR
);
693 /* Clear interrupt */
694 if (intr_status
& (EESR_FRC
| EESR_RMAF
| EESR_RRF
|
695 EESR_RTLF
| EESR_RTSF
| EESR_PRE
| EESR_CERF
|
696 TX_CHECK
| EESR_ERR_CHECK
)) {
697 ctrl_outl(intr_status
, ioaddr
+ EESR
);
702 if (intr_status
& (EESR_FRC
| /* Frame recv*/
703 EESR_RMAF
| /* Multi cast address recv*/
704 EESR_RRF
| /* Bit frame recv */
705 EESR_RTLF
| /* Long frame recv*/
706 EESR_RTSF
| /* short frame recv */
707 EESR_PRE
| /* PHY-LSI recv error */
708 EESR_CERF
)){ /* recv frame CRC error */
713 if (intr_status
& TX_CHECK
) {
715 netif_wake_queue(ndev
);
718 if (intr_status
& EESR_ERR_CHECK
)
719 sh_eth_error(ndev
, intr_status
);
721 if (--boguscnt
< 0) {
723 "%s: Too much work at interrupt, status=0x%4.4x.\n",
724 ndev
->name
, intr_status
);
728 spin_unlock(&mdp
->lock
);
733 static void sh_eth_timer(unsigned long data
)
735 struct net_device
*ndev
= (struct net_device
*)data
;
736 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
738 mod_timer(&mdp
->timer
, jiffies
+ (10 * HZ
));
741 /* PHY state control function */
742 static void sh_eth_adjust_link(struct net_device
*ndev
)
744 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
745 struct phy_device
*phydev
= mdp
->phydev
;
746 u32 ioaddr
= ndev
->base_addr
;
749 if (phydev
->link
!= PHY_DOWN
) {
750 if (phydev
->duplex
!= mdp
->duplex
) {
752 mdp
->duplex
= phydev
->duplex
;
753 #if defined(CONFIG_CPU_SUBTYPE_SH7763)
754 if (mdp
->duplex
) { /* FULL */
755 ctrl_outl(ctrl_inl(ioaddr
+ ECMR
) | ECMR_DM
,
758 ctrl_outl(ctrl_inl(ioaddr
+ ECMR
) & ~ECMR_DM
,
764 if (phydev
->speed
!= mdp
->speed
) {
766 mdp
->speed
= phydev
->speed
;
767 #if defined(CONFIG_CPU_SUBTYPE_SH7763)
768 switch (mdp
->speed
) {
769 case 10: /* 10BASE */
770 ctrl_outl(GECMR_10
, ioaddr
+ GECMR
); break;
771 case 100:/* 100BASE */
772 ctrl_outl(GECMR_100
, ioaddr
+ GECMR
); break;
773 case 1000: /* 1000BASE */
774 ctrl_outl(GECMR_1000
, ioaddr
+ GECMR
); break;
780 if (mdp
->link
== PHY_DOWN
) {
781 ctrl_outl((ctrl_inl(ioaddr
+ ECMR
) & ~ECMR_TXF
)
782 | ECMR_DM
, ioaddr
+ ECMR
);
784 mdp
->link
= phydev
->link
;
786 } else if (mdp
->link
) {
788 mdp
->link
= PHY_DOWN
;
794 phy_print_status(phydev
);
797 /* PHY init function */
798 static int sh_eth_phy_init(struct net_device
*ndev
)
800 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
801 char phy_id
[BUS_ID_SIZE
];
802 struct phy_device
*phydev
= NULL
;
804 snprintf(phy_id
, sizeof(phy_id
), PHY_ID_FMT
,
805 mdp
->mii_bus
->id
, mdp
->phy_id
);
807 mdp
->link
= PHY_DOWN
;
811 /* Try connect to PHY */
812 phydev
= phy_connect(ndev
, phy_id
, &sh_eth_adjust_link
,
813 0, PHY_INTERFACE_MODE_MII
);
814 if (IS_ERR(phydev
)) {
815 dev_err(&ndev
->dev
, "phy_connect failed\n");
816 return PTR_ERR(phydev
);
818 dev_info(&ndev
->dev
, "attached phy %i to driver %s\n",
819 phydev
->addr
, phydev
->drv
->name
);
821 mdp
->phydev
= phydev
;
826 /* PHY control start function */
827 static int sh_eth_phy_start(struct net_device
*ndev
)
829 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
832 ret
= sh_eth_phy_init(ndev
);
836 /* reset phy - this also wakes it from PDOWN */
837 phy_write(mdp
->phydev
, MII_BMCR
, BMCR_RESET
);
838 phy_start(mdp
->phydev
);
843 /* network device open function */
844 static int sh_eth_open(struct net_device
*ndev
)
847 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
849 ret
= request_irq(ndev
->irq
, &sh_eth_interrupt
,
850 #if defined(CONFIG_CPU_SUBTYPE_SH7763) || defined(CONFIG_CPU_SUBTYPE_SH7764)
857 printk(KERN_ERR
"Can not assign IRQ number to %s\n", CARDNAME
);
862 ret
= sh_eth_ring_init(ndev
);
867 ret
= sh_eth_dev_init(ndev
);
871 /* PHY control start*/
872 ret
= sh_eth_phy_start(ndev
);
876 /* Set the timer to check for link beat. */
877 init_timer(&mdp
->timer
);
878 mdp
->timer
.expires
= (jiffies
+ (24 * HZ
)) / 10;/* 2.4 sec. */
879 setup_timer(&mdp
->timer
, sh_eth_timer
, (unsigned long)ndev
);
884 free_irq(ndev
->irq
, ndev
);
888 /* Timeout function */
889 static void sh_eth_tx_timeout(struct net_device
*ndev
)
891 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
892 u32 ioaddr
= ndev
->base_addr
;
893 struct sh_eth_rxdesc
*rxdesc
;
896 netif_stop_queue(ndev
);
898 /* worning message out. */
899 printk(KERN_WARNING
"%s: transmit timed out, status %8.8x,"
900 " resetting...\n", ndev
->name
, (int)ctrl_inl(ioaddr
+ EESR
));
902 /* tx_errors count up */
903 mdp
->stats
.tx_errors
++;
906 del_timer_sync(&mdp
->timer
);
908 /* Free all the skbuffs in the Rx queue. */
909 for (i
= 0; i
< RX_RING_SIZE
; i
++) {
910 rxdesc
= &mdp
->rx_ring
[i
];
912 rxdesc
->addr
= 0xBADF00D0;
913 if (mdp
->rx_skbuff
[i
])
914 dev_kfree_skb(mdp
->rx_skbuff
[i
]);
915 mdp
->rx_skbuff
[i
] = NULL
;
917 for (i
= 0; i
< TX_RING_SIZE
; i
++) {
918 if (mdp
->tx_skbuff
[i
])
919 dev_kfree_skb(mdp
->tx_skbuff
[i
]);
920 mdp
->tx_skbuff
[i
] = NULL
;
924 sh_eth_dev_init(ndev
);
927 mdp
->timer
.expires
= (jiffies
+ (24 * HZ
)) / 10;/* 2.4 sec. */
928 add_timer(&mdp
->timer
);
931 /* Packet transmit function */
932 static int sh_eth_start_xmit(struct sk_buff
*skb
, struct net_device
*ndev
)
934 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
935 struct sh_eth_txdesc
*txdesc
;
939 spin_lock_irqsave(&mdp
->lock
, flags
);
940 if ((mdp
->cur_tx
- mdp
->dirty_tx
) >= (TX_RING_SIZE
- 4)) {
941 if (!sh_eth_txfree(ndev
)) {
942 netif_stop_queue(ndev
);
943 spin_unlock_irqrestore(&mdp
->lock
, flags
);
947 spin_unlock_irqrestore(&mdp
->lock
, flags
);
949 entry
= mdp
->cur_tx
% TX_RING_SIZE
;
950 mdp
->tx_skbuff
[entry
] = skb
;
951 txdesc
= &mdp
->tx_ring
[entry
];
952 txdesc
->addr
= virt_to_phys(skb
->data
);
954 swaps(phys_to_virt(ALIGN(txdesc
->addr
, 4)), skb
->len
+ 2);
956 __flush_purge_region(skb
->data
, skb
->len
);
957 if (skb
->len
< ETHERSMALL
)
958 txdesc
->buffer_length
= ETHERSMALL
;
960 txdesc
->buffer_length
= skb
->len
;
962 if (entry
>= TX_RING_SIZE
- 1)
963 txdesc
->status
|= cpu_to_edmac(mdp
, TD_TACT
| TD_TDLE
);
965 txdesc
->status
|= cpu_to_edmac(mdp
, TD_TACT
);
969 if (!(ctrl_inl(ndev
->base_addr
+ EDTRR
) & EDTRR_TRNS
))
970 ctrl_outl(EDTRR_TRNS
, ndev
->base_addr
+ EDTRR
);
972 ndev
->trans_start
= jiffies
;
977 /* device close function */
978 static int sh_eth_close(struct net_device
*ndev
)
980 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
981 u32 ioaddr
= ndev
->base_addr
;
984 netif_stop_queue(ndev
);
986 /* Disable interrupts by clearing the interrupt mask. */
987 ctrl_outl(0x0000, ioaddr
+ EESIPR
);
989 /* Stop the chip's Tx and Rx processes. */
990 ctrl_outl(0, ioaddr
+ EDTRR
);
991 ctrl_outl(0, ioaddr
+ EDRRR
);
995 phy_stop(mdp
->phydev
);
996 phy_disconnect(mdp
->phydev
);
999 free_irq(ndev
->irq
, ndev
);
1001 del_timer_sync(&mdp
->timer
);
1003 /* Free all the skbuffs in the Rx queue. */
1004 sh_eth_ring_free(ndev
);
1006 /* free DMA buffer */
1007 ringsize
= sizeof(struct sh_eth_rxdesc
) * RX_RING_SIZE
;
1008 dma_free_coherent(NULL
, ringsize
, mdp
->rx_ring
, mdp
->rx_desc_dma
);
1010 /* free DMA buffer */
1011 ringsize
= sizeof(struct sh_eth_txdesc
) * TX_RING_SIZE
;
1012 dma_free_coherent(NULL
, ringsize
, mdp
->tx_ring
, mdp
->tx_desc_dma
);
1017 static struct net_device_stats
*sh_eth_get_stats(struct net_device
*ndev
)
1019 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1020 u32 ioaddr
= ndev
->base_addr
;
1022 mdp
->stats
.tx_dropped
+= ctrl_inl(ioaddr
+ TROCR
);
1023 ctrl_outl(0, ioaddr
+ TROCR
); /* (write clear) */
1024 mdp
->stats
.collisions
+= ctrl_inl(ioaddr
+ CDCR
);
1025 ctrl_outl(0, ioaddr
+ CDCR
); /* (write clear) */
1026 mdp
->stats
.tx_carrier_errors
+= ctrl_inl(ioaddr
+ LCCR
);
1027 ctrl_outl(0, ioaddr
+ LCCR
); /* (write clear) */
1028 #if defined(CONFIG_CPU_SUBTYPE_SH7763)
1029 mdp
->stats
.tx_carrier_errors
+= ctrl_inl(ioaddr
+ CERCR
);/* CERCR */
1030 ctrl_outl(0, ioaddr
+ CERCR
); /* (write clear) */
1031 mdp
->stats
.tx_carrier_errors
+= ctrl_inl(ioaddr
+ CEECR
);/* CEECR */
1032 ctrl_outl(0, ioaddr
+ CEECR
); /* (write clear) */
1034 mdp
->stats
.tx_carrier_errors
+= ctrl_inl(ioaddr
+ CNDCR
);
1035 ctrl_outl(0, ioaddr
+ CNDCR
); /* (write clear) */
1040 /* ioctl to device funciotn*/
1041 static int sh_eth_do_ioctl(struct net_device
*ndev
, struct ifreq
*rq
,
1044 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1045 struct phy_device
*phydev
= mdp
->phydev
;
1047 if (!netif_running(ndev
))
1053 return phy_mii_ioctl(phydev
, if_mii(rq
), cmd
);
1057 /* Multicast reception directions set */
1058 static void sh_eth_set_multicast_list(struct net_device
*ndev
)
1060 u32 ioaddr
= ndev
->base_addr
;
1062 if (ndev
->flags
& IFF_PROMISC
) {
1063 /* Set promiscuous. */
1064 ctrl_outl((ctrl_inl(ioaddr
+ ECMR
) & ~ECMR_MCT
) | ECMR_PRM
,
1067 /* Normal, unicast/broadcast-only mode. */
1068 ctrl_outl((ctrl_inl(ioaddr
+ ECMR
) & ~ECMR_PRM
) | ECMR_MCT
,
1073 /* SuperH's TSU register init function */
1074 static void sh_eth_tsu_init(u32 ioaddr
)
1076 ctrl_outl(0, ioaddr
+ TSU_FWEN0
); /* Disable forward(0->1) */
1077 ctrl_outl(0, ioaddr
+ TSU_FWEN1
); /* Disable forward(1->0) */
1078 ctrl_outl(0, ioaddr
+ TSU_FCM
); /* forward fifo 3k-3k */
1079 ctrl_outl(0xc, ioaddr
+ TSU_BSYSL0
);
1080 ctrl_outl(0xc, ioaddr
+ TSU_BSYSL1
);
1081 ctrl_outl(0, ioaddr
+ TSU_PRISL0
);
1082 ctrl_outl(0, ioaddr
+ TSU_PRISL1
);
1083 ctrl_outl(0, ioaddr
+ TSU_FWSL0
);
1084 ctrl_outl(0, ioaddr
+ TSU_FWSL1
);
1085 ctrl_outl(TSU_FWSLC_POSTENU
| TSU_FWSLC_POSTENL
, ioaddr
+ TSU_FWSLC
);
1086 #if defined(CONFIG_CPU_SUBTYPE_SH7763)
1087 ctrl_outl(0, ioaddr
+ TSU_QTAG0
); /* Disable QTAG(0->1) */
1088 ctrl_outl(0, ioaddr
+ TSU_QTAG1
); /* Disable QTAG(1->0) */
1090 ctrl_outl(0, ioaddr
+ TSU_QTAGM0
); /* Disable QTAG(0->1) */
1091 ctrl_outl(0, ioaddr
+ TSU_QTAGM1
); /* Disable QTAG(1->0) */
1093 ctrl_outl(0, ioaddr
+ TSU_FWSR
); /* all interrupt status clear */
1094 ctrl_outl(0, ioaddr
+ TSU_FWINMK
); /* Disable all interrupt */
1095 ctrl_outl(0, ioaddr
+ TSU_TEN
); /* Disable all CAM entry */
1096 ctrl_outl(0, ioaddr
+ TSU_POST1
); /* Disable CAM entry [ 0- 7] */
1097 ctrl_outl(0, ioaddr
+ TSU_POST2
); /* Disable CAM entry [ 8-15] */
1098 ctrl_outl(0, ioaddr
+ TSU_POST3
); /* Disable CAM entry [16-23] */
1099 ctrl_outl(0, ioaddr
+ TSU_POST4
); /* Disable CAM entry [24-31] */
1102 /* MDIO bus release function */
1103 static int sh_mdio_release(struct net_device
*ndev
)
1105 struct mii_bus
*bus
= dev_get_drvdata(&ndev
->dev
);
1107 /* unregister mdio bus */
1108 mdiobus_unregister(bus
);
1110 /* remove mdio bus info from net_device */
1111 dev_set_drvdata(&ndev
->dev
, NULL
);
1113 /* free bitbang info */
1114 free_mdio_bitbang(bus
);
1119 /* MDIO bus init function */
1120 static int sh_mdio_init(struct net_device
*ndev
, int id
)
1123 struct bb_info
*bitbang
;
1124 struct sh_eth_private
*mdp
= netdev_priv(ndev
);
1126 /* create bit control struct for PHY */
1127 bitbang
= kzalloc(sizeof(struct bb_info
), GFP_KERNEL
);
1134 bitbang
->addr
= ndev
->base_addr
+ PIR
;
1135 bitbang
->mdi_msk
= 0x08;
1136 bitbang
->mdo_msk
= 0x04;
1137 bitbang
->mmd_msk
= 0x02;/* MMD */
1138 bitbang
->mdc_msk
= 0x01;
1139 bitbang
->ctrl
.ops
= &bb_ops
;
1141 /* MII contorller setting */
1142 mdp
->mii_bus
= alloc_mdio_bitbang(&bitbang
->ctrl
);
1143 if (!mdp
->mii_bus
) {
1145 goto out_free_bitbang
;
1148 /* Hook up MII support for ethtool */
1149 mdp
->mii_bus
->name
= "sh_mii";
1150 mdp
->mii_bus
->parent
= &ndev
->dev
;
1151 snprintf(mdp
->mii_bus
->id
, MII_BUS_ID_SIZE
, "%x", id
);
1154 mdp
->mii_bus
->irq
= kmalloc(sizeof(int)*PHY_MAX_ADDR
, GFP_KERNEL
);
1155 if (!mdp
->mii_bus
->irq
) {
1160 for (i
= 0; i
< PHY_MAX_ADDR
; i
++)
1161 mdp
->mii_bus
->irq
[i
] = PHY_POLL
;
1163 /* regist mdio bus */
1164 ret
= mdiobus_register(mdp
->mii_bus
);
1168 dev_set_drvdata(&ndev
->dev
, mdp
->mii_bus
);
1173 kfree(mdp
->mii_bus
->irq
);
1176 free_mdio_bitbang(mdp
->mii_bus
);
1185 static const struct net_device_ops sh_eth_netdev_ops
= {
1186 .ndo_open
= sh_eth_open
,
1187 .ndo_stop
= sh_eth_close
,
1188 .ndo_start_xmit
= sh_eth_start_xmit
,
1189 .ndo_get_stats
= sh_eth_get_stats
,
1190 .ndo_set_multicast_list
= sh_eth_set_multicast_list
,
1191 .ndo_tx_timeout
= sh_eth_tx_timeout
,
1192 .ndo_do_ioctl
= sh_eth_do_ioctl
,
1193 .ndo_validate_addr
= eth_validate_addr
,
1194 .ndo_set_mac_address
= eth_mac_addr
,
1195 .ndo_change_mtu
= eth_change_mtu
,
1198 static int sh_eth_drv_probe(struct platform_device
*pdev
)
1200 int ret
, i
, devno
= 0;
1201 struct resource
*res
;
1202 struct net_device
*ndev
= NULL
;
1203 struct sh_eth_private
*mdp
;
1204 struct sh_eth_plat_data
*pd
;
1207 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1208 if (unlikely(res
== NULL
)) {
1209 dev_err(&pdev
->dev
, "invalid resource\n");
1214 ndev
= alloc_etherdev(sizeof(struct sh_eth_private
));
1216 printk(KERN_ERR
"%s: could not allocate device.\n", CARDNAME
);
1221 /* The sh Ether-specific entries in the device structure. */
1222 ndev
->base_addr
= res
->start
;
1228 ret
= platform_get_irq(pdev
, 0);
1235 SET_NETDEV_DEV(ndev
, &pdev
->dev
);
1237 /* Fill in the fields of the device structure with ethernet values. */
1240 mdp
= netdev_priv(ndev
);
1241 spin_lock_init(&mdp
->lock
);
1243 pd
= (struct sh_eth_plat_data
*)(pdev
->dev
.platform_data
);
1245 mdp
->phy_id
= pd
->phy
;
1247 mdp
->edmac_endian
= pd
->edmac_endian
;
1250 ndev
->netdev_ops
= &sh_eth_netdev_ops
;
1251 ndev
->watchdog_timeo
= TX_TIMEOUT
;
1253 mdp
->post_rx
= POST_RX
>> (devno
<< 1);
1254 mdp
->post_fw
= POST_FW
>> (devno
<< 1);
1256 /* read and set MAC address */
1257 read_mac_address(ndev
);
1259 /* First device only init */
1263 ctrl_outl(ARSTR_ARSTR
, ARSTR
);
1267 #if defined(SH_TSU_ADDR)
1268 /* TSU init (Init only)*/
1269 sh_eth_tsu_init(SH_TSU_ADDR
);
1273 /* network device register */
1274 ret
= register_netdev(ndev
);
1279 ret
= sh_mdio_init(ndev
, pdev
->id
);
1281 goto out_unregister
;
1283 /* pritnt device infomation */
1284 printk(KERN_INFO
"%s: %s at 0x%x, ",
1285 ndev
->name
, CARDNAME
, (u32
) ndev
->base_addr
);
1287 for (i
= 0; i
< 5; i
++)
1288 printk("%02X:", ndev
->dev_addr
[i
]);
1289 printk("%02X, IRQ %d.\n", ndev
->dev_addr
[i
], ndev
->irq
);
1291 platform_set_drvdata(pdev
, ndev
);
1296 unregister_netdev(ndev
);
1307 static int sh_eth_drv_remove(struct platform_device
*pdev
)
1309 struct net_device
*ndev
= platform_get_drvdata(pdev
);
1311 sh_mdio_release(ndev
);
1312 unregister_netdev(ndev
);
1313 flush_scheduled_work();
1316 platform_set_drvdata(pdev
, NULL
);
1321 static struct platform_driver sh_eth_driver
= {
1322 .probe
= sh_eth_drv_probe
,
1323 .remove
= sh_eth_drv_remove
,
1329 static int __init
sh_eth_init(void)
1331 return platform_driver_register(&sh_eth_driver
);
1334 static void __exit
sh_eth_cleanup(void)
1336 platform_driver_unregister(&sh_eth_driver
);
1339 module_init(sh_eth_init
);
1340 module_exit(sh_eth_cleanup
);
1342 MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda");
1343 MODULE_DESCRIPTION("Renesas SuperH Ethernet driver");
1344 MODULE_LICENSE("GPL v2");