2 * Driver for Xilinx TEMAC Ethernet device
4 * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi
5 * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net>
6 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
8 * This is a driver for the Xilinx ll_temac ipcore which is often used
9 * in the Virtex and Spartan series of chips.
12 * - The ll_temac hardware uses indirect access for many of the TEMAC
13 * registers, include the MDIO bus. However, indirect access to MDIO
14 * registers take considerably more clock cycles than to TEMAC registers.
15 * MDIO accesses are long, so threads doing them should probably sleep
16 * rather than busywait. However, since only one indirect access can be
17 * in progress at any given time, that means that *all* indirect accesses
18 * could end up sleeping (to wait for an MDIO access to complete).
19 * Fortunately none of the indirect accesses are on the 'hot' path for tx
20 * or rx, so this should be okay.
23 * - Fix driver to work on more than just Virtex5. Right now the driver
24 * assumes that the locallink DMA registers are accessed via DCR
26 * - Factor out locallink DMA code into separate driver
27 * - Fix multicast assignment.
28 * - Fix support for hardware checksumming.
29 * - Testing. Lots and lots of testing.
33 #include <linux/delay.h>
34 #include <linux/etherdevice.h>
35 #include <linux/init.h>
36 #include <linux/mii.h>
37 #include <linux/module.h>
38 #include <linux/mutex.h>
39 #include <linux/netdevice.h>
41 #include <linux/of_device.h>
42 #include <linux/of_mdio.h>
43 #include <linux/of_platform.h>
44 #include <linux/skbuff.h>
45 #include <linux/spinlock.h>
46 #include <linux/tcp.h> /* needed for sizeof(tcphdr) */
47 #include <linux/udp.h> /* needed for sizeof(udphdr) */
48 #include <linux/phy.h>
58 /* ---------------------------------------------------------------------
59 * Low level register access functions
62 u32
temac_ior(struct temac_local
*lp
, int offset
)
64 return in_be32((u32
*)(lp
->regs
+ offset
));
67 void temac_iow(struct temac_local
*lp
, int offset
, u32 value
)
69 out_be32((u32
*) (lp
->regs
+ offset
), value
);
72 int temac_indirect_busywait(struct temac_local
*lp
)
74 long end
= jiffies
+ 2;
76 while (!(temac_ior(lp
, XTE_RDY0_OFFSET
) & XTE_RDY0_HARD_ACS_RDY_MASK
)) {
77 if (end
- jiffies
<= 0) {
89 * lp->indirect_mutex must be held when calling this function
91 u32
temac_indirect_in32(struct temac_local
*lp
, int reg
)
95 if (temac_indirect_busywait(lp
))
97 temac_iow(lp
, XTE_CTL0_OFFSET
, reg
);
98 if (temac_indirect_busywait(lp
))
100 val
= temac_ior(lp
, XTE_LSW0_OFFSET
);
106 * temac_indirect_out32
108 * lp->indirect_mutex must be held when calling this function
110 void temac_indirect_out32(struct temac_local
*lp
, int reg
, u32 value
)
112 if (temac_indirect_busywait(lp
))
114 temac_iow(lp
, XTE_LSW0_OFFSET
, value
);
115 temac_iow(lp
, XTE_CTL0_OFFSET
, CNTLREG_WRITE_ENABLE_MASK
| reg
);
118 static u32
temac_dma_in32(struct temac_local
*lp
, int reg
)
120 return dcr_read(lp
->sdma_dcrs
, reg
);
123 static void temac_dma_out32(struct temac_local
*lp
, int reg
, u32 value
)
125 dcr_write(lp
->sdma_dcrs
, reg
, value
);
129 * temac_dma_bd_init - Setup buffer descriptor rings
131 static int temac_dma_bd_init(struct net_device
*ndev
)
133 struct temac_local
*lp
= netdev_priv(ndev
);
137 lp
->rx_skb
= kzalloc(sizeof(struct sk_buff
)*RX_BD_NUM
, GFP_KERNEL
);
138 /* allocate the tx and rx ring buffer descriptors. */
139 /* returns a virtual addres and a physical address. */
140 lp
->tx_bd_v
= dma_alloc_coherent(ndev
->dev
.parent
,
141 sizeof(*lp
->tx_bd_v
) * TX_BD_NUM
,
142 &lp
->tx_bd_p
, GFP_KERNEL
);
143 lp
->rx_bd_v
= dma_alloc_coherent(ndev
->dev
.parent
,
144 sizeof(*lp
->rx_bd_v
) * RX_BD_NUM
,
145 &lp
->rx_bd_p
, GFP_KERNEL
);
147 memset(lp
->tx_bd_v
, 0, sizeof(*lp
->tx_bd_v
) * TX_BD_NUM
);
148 for (i
= 0; i
< TX_BD_NUM
; i
++) {
149 lp
->tx_bd_v
[i
].next
= lp
->tx_bd_p
+
150 sizeof(*lp
->tx_bd_v
) * ((i
+ 1) % TX_BD_NUM
);
153 memset(lp
->rx_bd_v
, 0, sizeof(*lp
->rx_bd_v
) * RX_BD_NUM
);
154 for (i
= 0; i
< RX_BD_NUM
; i
++) {
155 lp
->rx_bd_v
[i
].next
= lp
->rx_bd_p
+
156 sizeof(*lp
->rx_bd_v
) * ((i
+ 1) % RX_BD_NUM
);
158 skb
= alloc_skb(XTE_MAX_JUMBO_FRAME_SIZE
159 + XTE_ALIGN
, GFP_ATOMIC
);
161 dev_err(&ndev
->dev
, "alloc_skb error %d\n", i
);
165 skb_reserve(skb
, BUFFER_ALIGN(skb
->data
));
166 /* returns physical address of skb->data */
167 lp
->rx_bd_v
[i
].phys
= dma_map_single(ndev
->dev
.parent
,
169 XTE_MAX_JUMBO_FRAME_SIZE
,
171 lp
->rx_bd_v
[i
].len
= XTE_MAX_JUMBO_FRAME_SIZE
;
172 lp
->rx_bd_v
[i
].app0
= STS_CTRL_APP0_IRQONEND
;
175 temac_dma_out32(lp
, TX_CHNL_CTRL
, 0x10220400 |
177 CHNL_CTRL_IRQ_DLY_EN
|
178 CHNL_CTRL_IRQ_COAL_EN
);
181 temac_dma_out32(lp
, RX_CHNL_CTRL
, 0xff010000 |
183 CHNL_CTRL_IRQ_DLY_EN
|
184 CHNL_CTRL_IRQ_COAL_EN
|
188 temac_dma_out32(lp
, RX_CURDESC_PTR
, lp
->rx_bd_p
);
189 temac_dma_out32(lp
, RX_TAILDESC_PTR
,
190 lp
->rx_bd_p
+ (sizeof(*lp
->rx_bd_v
) * (RX_BD_NUM
- 1)));
191 temac_dma_out32(lp
, TX_CURDESC_PTR
, lp
->tx_bd_p
);
196 /* ---------------------------------------------------------------------
200 static int temac_set_mac_address(struct net_device
*ndev
, void *address
)
202 struct temac_local
*lp
= netdev_priv(ndev
);
205 memcpy(ndev
->dev_addr
, address
, ETH_ALEN
);
207 if (!is_valid_ether_addr(ndev
->dev_addr
))
208 random_ether_addr(ndev
->dev_addr
);
210 /* set up unicast MAC address filter set its mac address */
211 mutex_lock(&lp
->indirect_mutex
);
212 temac_indirect_out32(lp
, XTE_UAW0_OFFSET
,
213 (ndev
->dev_addr
[0]) |
214 (ndev
->dev_addr
[1] << 8) |
215 (ndev
->dev_addr
[2] << 16) |
216 (ndev
->dev_addr
[3] << 24));
217 /* There are reserved bits in EUAW1
218 * so don't affect them Set MAC bits [47:32] in EUAW1 */
219 temac_indirect_out32(lp
, XTE_UAW1_OFFSET
,
220 (ndev
->dev_addr
[4] & 0x000000ff) |
221 (ndev
->dev_addr
[5] << 8));
222 mutex_unlock(&lp
->indirect_mutex
);
227 static void temac_set_multicast_list(struct net_device
*ndev
)
229 struct temac_local
*lp
= netdev_priv(ndev
);
230 u32 multi_addr_msw
, multi_addr_lsw
, val
;
233 mutex_lock(&lp
->indirect_mutex
);
234 if (ndev
->flags
& (IFF_ALLMULTI
| IFF_PROMISC
)
235 || ndev
->mc_count
> MULTICAST_CAM_TABLE_NUM
) {
237 * We must make the kernel realise we had to move
238 * into promisc mode or we start all out war on
239 * the cable. If it was a promisc request the
240 * flag is already set. If not we assert it.
242 ndev
->flags
|= IFF_PROMISC
;
243 temac_indirect_out32(lp
, XTE_AFM_OFFSET
, XTE_AFM_EPPRM_MASK
);
244 dev_info(&ndev
->dev
, "Promiscuous mode enabled.\n");
245 } else if (ndev
->mc_count
) {
246 struct dev_mc_list
*mclist
= ndev
->mc_list
;
247 for (i
= 0; mclist
&& i
< ndev
->mc_count
; i
++) {
249 if (i
>= MULTICAST_CAM_TABLE_NUM
)
251 multi_addr_msw
= ((mclist
->dmi_addr
[3] << 24) |
252 (mclist
->dmi_addr
[2] << 16) |
253 (mclist
->dmi_addr
[1] << 8) |
254 (mclist
->dmi_addr
[0]));
255 temac_indirect_out32(lp
, XTE_MAW0_OFFSET
,
257 multi_addr_lsw
= ((mclist
->dmi_addr
[5] << 8) |
258 (mclist
->dmi_addr
[4]) | (i
<< 16));
259 temac_indirect_out32(lp
, XTE_MAW1_OFFSET
,
261 mclist
= mclist
->next
;
264 val
= temac_indirect_in32(lp
, XTE_AFM_OFFSET
);
265 temac_indirect_out32(lp
, XTE_AFM_OFFSET
,
266 val
& ~XTE_AFM_EPPRM_MASK
);
267 temac_indirect_out32(lp
, XTE_MAW0_OFFSET
, 0);
268 temac_indirect_out32(lp
, XTE_MAW1_OFFSET
, 0);
269 dev_info(&ndev
->dev
, "Promiscuous mode disabled.\n");
271 mutex_unlock(&lp
->indirect_mutex
);
274 struct temac_option
{
280 } temac_options
[] = {
281 /* Turn on jumbo packet support for both Rx and Tx */
283 .opt
= XTE_OPTION_JUMBO
,
284 .reg
= XTE_TXC_OFFSET
,
285 .m_or
= XTE_TXC_TXJMBO_MASK
,
288 .opt
= XTE_OPTION_JUMBO
,
289 .reg
= XTE_RXC1_OFFSET
,
290 .m_or
=XTE_RXC1_RXJMBO_MASK
,
292 /* Turn on VLAN packet support for both Rx and Tx */
294 .opt
= XTE_OPTION_VLAN
,
295 .reg
= XTE_TXC_OFFSET
,
296 .m_or
=XTE_TXC_TXVLAN_MASK
,
299 .opt
= XTE_OPTION_VLAN
,
300 .reg
= XTE_RXC1_OFFSET
,
301 .m_or
=XTE_RXC1_RXVLAN_MASK
,
303 /* Turn on FCS stripping on receive packets */
305 .opt
= XTE_OPTION_FCS_STRIP
,
306 .reg
= XTE_RXC1_OFFSET
,
307 .m_or
=XTE_RXC1_RXFCS_MASK
,
309 /* Turn on FCS insertion on transmit packets */
311 .opt
= XTE_OPTION_FCS_INSERT
,
312 .reg
= XTE_TXC_OFFSET
,
313 .m_or
=XTE_TXC_TXFCS_MASK
,
315 /* Turn on length/type field checking on receive packets */
317 .opt
= XTE_OPTION_LENTYPE_ERR
,
318 .reg
= XTE_RXC1_OFFSET
,
319 .m_or
=XTE_RXC1_RXLT_MASK
,
321 /* Turn on flow control */
323 .opt
= XTE_OPTION_FLOW_CONTROL
,
324 .reg
= XTE_FCC_OFFSET
,
325 .m_or
=XTE_FCC_RXFLO_MASK
,
327 /* Turn on flow control */
329 .opt
= XTE_OPTION_FLOW_CONTROL
,
330 .reg
= XTE_FCC_OFFSET
,
331 .m_or
=XTE_FCC_TXFLO_MASK
,
333 /* Turn on promiscuous frame filtering (all frames are received ) */
335 .opt
= XTE_OPTION_PROMISC
,
336 .reg
= XTE_AFM_OFFSET
,
337 .m_or
=XTE_AFM_EPPRM_MASK
,
339 /* Enable transmitter if not already enabled */
341 .opt
= XTE_OPTION_TXEN
,
342 .reg
= XTE_TXC_OFFSET
,
343 .m_or
=XTE_TXC_TXEN_MASK
,
345 /* Enable receiver? */
347 .opt
= XTE_OPTION_RXEN
,
348 .reg
= XTE_RXC1_OFFSET
,
349 .m_or
=XTE_RXC1_RXEN_MASK
,
357 static u32
temac_setoptions(struct net_device
*ndev
, u32 options
)
359 struct temac_local
*lp
= netdev_priv(ndev
);
360 struct temac_option
*tp
= &temac_options
[0];
363 mutex_lock(&lp
->indirect_mutex
);
365 reg
= temac_indirect_in32(lp
, tp
->reg
) & ~tp
->m_or
;
366 if (options
& tp
->opt
)
368 temac_indirect_out32(lp
, tp
->reg
, reg
);
371 lp
->options
|= options
;
372 mutex_unlock(&lp
->indirect_mutex
);
377 /* Initilize temac */
378 static void temac_device_reset(struct net_device
*ndev
)
380 struct temac_local
*lp
= netdev_priv(ndev
);
384 /* Perform a software reset */
386 /* 0x300 host enable bit ? */
387 /* reset PHY through control register ?:1 */
389 dev_dbg(&ndev
->dev
, "%s()\n", __func__
);
391 mutex_lock(&lp
->indirect_mutex
);
392 /* Reset the receiver and wait for it to finish reset */
393 temac_indirect_out32(lp
, XTE_RXC1_OFFSET
, XTE_RXC1_RXRST_MASK
);
395 while (temac_indirect_in32(lp
, XTE_RXC1_OFFSET
) & XTE_RXC1_RXRST_MASK
) {
397 if (--timeout
== 0) {
399 "temac_device_reset RX reset timeout!!\n");
404 /* Reset the transmitter and wait for it to finish reset */
405 temac_indirect_out32(lp
, XTE_TXC_OFFSET
, XTE_TXC_TXRST_MASK
);
407 while (temac_indirect_in32(lp
, XTE_TXC_OFFSET
) & XTE_TXC_TXRST_MASK
) {
409 if (--timeout
== 0) {
411 "temac_device_reset TX reset timeout!!\n");
416 /* Disable the receiver */
417 val
= temac_indirect_in32(lp
, XTE_RXC1_OFFSET
);
418 temac_indirect_out32(lp
, XTE_RXC1_OFFSET
, val
& ~XTE_RXC1_RXEN_MASK
);
420 /* Reset Local Link (DMA) */
421 temac_dma_out32(lp
, DMA_CONTROL_REG
, DMA_CONTROL_RST
);
423 while (temac_dma_in32(lp
, DMA_CONTROL_REG
) & DMA_CONTROL_RST
) {
425 if (--timeout
== 0) {
427 "temac_device_reset DMA reset timeout!!\n");
431 temac_dma_out32(lp
, DMA_CONTROL_REG
, DMA_TAIL_ENABLE
);
433 temac_dma_bd_init(ndev
);
435 temac_indirect_out32(lp
, XTE_RXC0_OFFSET
, 0);
436 temac_indirect_out32(lp
, XTE_RXC1_OFFSET
, 0);
437 temac_indirect_out32(lp
, XTE_TXC_OFFSET
, 0);
438 temac_indirect_out32(lp
, XTE_FCC_OFFSET
, XTE_FCC_RXFLO_MASK
);
440 mutex_unlock(&lp
->indirect_mutex
);
442 /* Sync default options with HW
443 * but leave receiver and transmitter disabled. */
444 temac_setoptions(ndev
,
445 lp
->options
& ~(XTE_OPTION_TXEN
| XTE_OPTION_RXEN
));
447 temac_set_mac_address(ndev
, NULL
);
449 /* Set address filter table */
450 temac_set_multicast_list(ndev
);
451 if (temac_setoptions(ndev
, lp
->options
))
452 dev_err(&ndev
->dev
, "Error setting TEMAC options\n");
454 /* Init Driver variable */
455 ndev
->trans_start
= 0;
458 void temac_adjust_link(struct net_device
*ndev
)
460 struct temac_local
*lp
= netdev_priv(ndev
);
461 struct phy_device
*phy
= lp
->phy_dev
;
465 /* hash together the state values to decide if something has changed */
466 link_state
= phy
->speed
| (phy
->duplex
<< 1) | phy
->link
;
468 mutex_lock(&lp
->indirect_mutex
);
469 if (lp
->last_link
!= link_state
) {
470 mii_speed
= temac_indirect_in32(lp
, XTE_EMCFG_OFFSET
);
471 mii_speed
&= ~XTE_EMCFG_LINKSPD_MASK
;
473 switch (phy
->speed
) {
474 case SPEED_1000
: mii_speed
|= XTE_EMCFG_LINKSPD_1000
; break;
475 case SPEED_100
: mii_speed
|= XTE_EMCFG_LINKSPD_100
; break;
476 case SPEED_10
: mii_speed
|= XTE_EMCFG_LINKSPD_10
; break;
479 /* Write new speed setting out to TEMAC */
480 temac_indirect_out32(lp
, XTE_EMCFG_OFFSET
, mii_speed
);
481 lp
->last_link
= link_state
;
482 phy_print_status(phy
);
484 mutex_unlock(&lp
->indirect_mutex
);
487 static void temac_start_xmit_done(struct net_device
*ndev
)
489 struct temac_local
*lp
= netdev_priv(ndev
);
490 struct cdmac_bd
*cur_p
;
491 unsigned int stat
= 0;
493 cur_p
= &lp
->tx_bd_v
[lp
->tx_bd_ci
];
496 while (stat
& STS_CTRL_APP0_CMPLT
) {
497 dma_unmap_single(ndev
->dev
.parent
, cur_p
->phys
, cur_p
->len
,
500 dev_kfree_skb_irq((struct sk_buff
*)cur_p
->app4
);
503 ndev
->stats
.tx_packets
++;
504 ndev
->stats
.tx_bytes
+= cur_p
->len
;
507 if (lp
->tx_bd_ci
>= TX_BD_NUM
)
510 cur_p
= &lp
->tx_bd_v
[lp
->tx_bd_ci
];
514 netif_wake_queue(ndev
);
517 static int temac_start_xmit(struct sk_buff
*skb
, struct net_device
*ndev
)
519 struct temac_local
*lp
= netdev_priv(ndev
);
520 struct cdmac_bd
*cur_p
;
521 dma_addr_t start_p
, tail_p
;
523 unsigned long num_frag
;
526 num_frag
= skb_shinfo(skb
)->nr_frags
;
527 frag
= &skb_shinfo(skb
)->frags
[0];
528 start_p
= lp
->tx_bd_p
+ sizeof(*lp
->tx_bd_v
) * lp
->tx_bd_tail
;
529 cur_p
= &lp
->tx_bd_v
[lp
->tx_bd_tail
];
531 if (cur_p
->app0
& STS_CTRL_APP0_CMPLT
) {
532 if (!netif_queue_stopped(ndev
)) {
533 netif_stop_queue(ndev
);
534 return NETDEV_TX_BUSY
;
536 return NETDEV_TX_BUSY
;
540 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
541 const struct iphdr
*ip
= ip_hdr(skb
);
542 int length
= 0, start
= 0, insert
= 0;
544 switch (ip
->protocol
) {
546 start
= sizeof(struct iphdr
) + ETH_HLEN
;
547 insert
= sizeof(struct iphdr
) + ETH_HLEN
+ 16;
548 length
= ip
->tot_len
- sizeof(struct iphdr
);
551 start
= sizeof(struct iphdr
) + ETH_HLEN
;
552 insert
= sizeof(struct iphdr
) + ETH_HLEN
+ 6;
553 length
= ip
->tot_len
- sizeof(struct iphdr
);
558 cur_p
->app1
= ((start
<< 16) | insert
);
559 cur_p
->app2
= csum_tcpudp_magic(ip
->saddr
, ip
->daddr
,
560 length
, ip
->protocol
, 0);
561 skb
->data
[insert
] = 0;
562 skb
->data
[insert
+ 1] = 0;
564 cur_p
->app0
|= STS_CTRL_APP0_SOP
;
565 cur_p
->len
= skb_headlen(skb
);
566 cur_p
->phys
= dma_map_single(ndev
->dev
.parent
, skb
->data
, skb
->len
,
568 cur_p
->app4
= (unsigned long)skb
;
570 for (ii
= 0; ii
< num_frag
; ii
++) {
572 if (lp
->tx_bd_tail
>= TX_BD_NUM
)
575 cur_p
= &lp
->tx_bd_v
[lp
->tx_bd_tail
];
576 cur_p
->phys
= dma_map_single(ndev
->dev
.parent
,
577 (void *)page_address(frag
->page
) +
579 frag
->size
, DMA_TO_DEVICE
);
580 cur_p
->len
= frag
->size
;
584 cur_p
->app0
|= STS_CTRL_APP0_EOP
;
586 tail_p
= lp
->tx_bd_p
+ sizeof(*lp
->tx_bd_v
) * lp
->tx_bd_tail
;
588 if (lp
->tx_bd_tail
>= TX_BD_NUM
)
591 /* Kick off the transfer */
592 temac_dma_out32(lp
, TX_TAILDESC_PTR
, tail_p
); /* DMA start */
598 static void ll_temac_recv(struct net_device
*ndev
)
600 struct temac_local
*lp
= netdev_priv(ndev
);
601 struct sk_buff
*skb
, *new_skb
;
603 struct cdmac_bd
*cur_p
;
606 unsigned long skb_vaddr
;
609 spin_lock_irqsave(&lp
->rx_lock
, flags
);
611 tail_p
= lp
->rx_bd_p
+ sizeof(*lp
->rx_bd_v
) * lp
->rx_bd_ci
;
612 cur_p
= &lp
->rx_bd_v
[lp
->rx_bd_ci
];
614 bdstat
= cur_p
->app0
;
615 while ((bdstat
& STS_CTRL_APP0_CMPLT
)) {
617 skb
= lp
->rx_skb
[lp
->rx_bd_ci
];
618 length
= cur_p
->app4
;
620 skb_vaddr
= virt_to_bus(skb
->data
);
621 dma_unmap_single(ndev
->dev
.parent
, skb_vaddr
, length
,
624 skb_put(skb
, length
);
626 skb
->protocol
= eth_type_trans(skb
, ndev
);
627 skb
->ip_summed
= CHECKSUM_NONE
;
631 ndev
->stats
.rx_packets
++;
632 ndev
->stats
.rx_bytes
+= length
;
634 new_skb
= alloc_skb(XTE_MAX_JUMBO_FRAME_SIZE
+ XTE_ALIGN
,
637 dev_err(&ndev
->dev
, "no memory for new sk_buff\n");
638 spin_unlock_irqrestore(&lp
->rx_lock
, flags
);
642 skb_reserve(new_skb
, BUFFER_ALIGN(new_skb
->data
));
644 cur_p
->app0
= STS_CTRL_APP0_IRQONEND
;
645 cur_p
->phys
= dma_map_single(ndev
->dev
.parent
, new_skb
->data
,
646 XTE_MAX_JUMBO_FRAME_SIZE
,
648 cur_p
->len
= XTE_MAX_JUMBO_FRAME_SIZE
;
649 lp
->rx_skb
[lp
->rx_bd_ci
] = new_skb
;
652 if (lp
->rx_bd_ci
>= RX_BD_NUM
)
655 cur_p
= &lp
->rx_bd_v
[lp
->rx_bd_ci
];
656 bdstat
= cur_p
->app0
;
658 temac_dma_out32(lp
, RX_TAILDESC_PTR
, tail_p
);
660 spin_unlock_irqrestore(&lp
->rx_lock
, flags
);
663 static irqreturn_t
ll_temac_tx_irq(int irq
, void *_ndev
)
665 struct net_device
*ndev
= _ndev
;
666 struct temac_local
*lp
= netdev_priv(ndev
);
669 status
= temac_dma_in32(lp
, TX_IRQ_REG
);
670 temac_dma_out32(lp
, TX_IRQ_REG
, status
);
672 if (status
& (IRQ_COAL
| IRQ_DLY
))
673 temac_start_xmit_done(lp
->ndev
);
675 dev_err(&ndev
->dev
, "DMA error 0x%x\n", status
);
680 static irqreturn_t
ll_temac_rx_irq(int irq
, void *_ndev
)
682 struct net_device
*ndev
= _ndev
;
683 struct temac_local
*lp
= netdev_priv(ndev
);
686 /* Read and clear the status registers */
687 status
= temac_dma_in32(lp
, RX_IRQ_REG
);
688 temac_dma_out32(lp
, RX_IRQ_REG
, status
);
690 if (status
& (IRQ_COAL
| IRQ_DLY
))
691 ll_temac_recv(lp
->ndev
);
696 static int temac_open(struct net_device
*ndev
)
698 struct temac_local
*lp
= netdev_priv(ndev
);
701 dev_dbg(&ndev
->dev
, "temac_open()\n");
704 lp
->phy_dev
= of_phy_connect(lp
->ndev
, lp
->phy_node
,
705 temac_adjust_link
, 0, 0);
707 dev_err(lp
->dev
, "of_phy_connect() failed\n");
711 phy_start(lp
->phy_dev
);
714 rc
= request_irq(lp
->tx_irq
, ll_temac_tx_irq
, 0, ndev
->name
, ndev
);
717 rc
= request_irq(lp
->rx_irq
, ll_temac_rx_irq
, 0, ndev
->name
, ndev
);
721 temac_device_reset(ndev
);
725 free_irq(lp
->tx_irq
, ndev
);
728 phy_disconnect(lp
->phy_dev
);
730 dev_err(lp
->dev
, "request_irq() failed\n");
734 static int temac_stop(struct net_device
*ndev
)
736 struct temac_local
*lp
= netdev_priv(ndev
);
738 dev_dbg(&ndev
->dev
, "temac_close()\n");
740 free_irq(lp
->tx_irq
, ndev
);
741 free_irq(lp
->rx_irq
, ndev
);
744 phy_disconnect(lp
->phy_dev
);
750 #ifdef CONFIG_NET_POLL_CONTROLLER
752 temac_poll_controller(struct net_device
*ndev
)
754 struct temac_local
*lp
= netdev_priv(ndev
);
756 disable_irq(lp
->tx_irq
);
757 disable_irq(lp
->rx_irq
);
759 ll_temac_rx_irq(lp
->tx_irq
, lp
);
760 ll_temac_tx_irq(lp
->rx_irq
, lp
);
762 enable_irq(lp
->tx_irq
);
763 enable_irq(lp
->rx_irq
);
767 static const struct net_device_ops temac_netdev_ops
= {
768 .ndo_open
= temac_open
,
769 .ndo_stop
= temac_stop
,
770 .ndo_start_xmit
= temac_start_xmit
,
771 .ndo_set_mac_address
= temac_set_mac_address
,
772 //.ndo_set_multicast_list = temac_set_multicast_list,
773 #ifdef CONFIG_NET_POLL_CONTROLLER
774 .ndo_poll_controller
= temac_poll_controller
,
778 /* ---------------------------------------------------------------------
779 * SYSFS device attributes
781 static ssize_t
temac_show_llink_regs(struct device
*dev
,
782 struct device_attribute
*attr
, char *buf
)
784 struct net_device
*ndev
= dev_get_drvdata(dev
);
785 struct temac_local
*lp
= netdev_priv(ndev
);
788 for (i
= 0; i
< 0x11; i
++)
789 len
+= sprintf(buf
+ len
, "%.8x%s", temac_dma_in32(lp
, i
),
790 (i
% 8) == 7 ? "\n" : " ");
791 len
+= sprintf(buf
+ len
, "\n");
796 static DEVICE_ATTR(llink_regs
, 0440, temac_show_llink_regs
, NULL
);
798 static struct attribute
*temac_device_attrs
[] = {
799 &dev_attr_llink_regs
.attr
,
803 static const struct attribute_group temac_attr_group
= {
804 .attrs
= temac_device_attrs
,
808 temac_of_probe(struct of_device
*op
, const struct of_device_id
*match
)
810 struct device_node
*np
;
811 struct temac_local
*lp
;
812 struct net_device
*ndev
;
817 /* Init network device structure */
818 ndev
= alloc_etherdev(sizeof(*lp
));
820 dev_err(&op
->dev
, "could not allocate device.\n");
824 dev_set_drvdata(&op
->dev
, ndev
);
825 SET_NETDEV_DEV(ndev
, &op
->dev
);
826 ndev
->flags
&= ~IFF_MULTICAST
; /* clear multicast */
827 ndev
->features
= NETIF_F_SG
| NETIF_F_FRAGLIST
;
828 ndev
->netdev_ops
= &temac_netdev_ops
;
830 ndev
->features
|= NETIF_F_IP_CSUM
; /* Can checksum TCP/UDP over IPv4. */
831 ndev
->features
|= NETIF_F_HW_CSUM
; /* Can checksum all the packets. */
832 ndev
->features
|= NETIF_F_IPV6_CSUM
; /* Can checksum IPV6 TCP/UDP */
833 ndev
->features
|= NETIF_F_HIGHDMA
; /* Can DMA to high memory. */
834 ndev
->features
|= NETIF_F_HW_VLAN_TX
; /* Transmit VLAN hw accel */
835 ndev
->features
|= NETIF_F_HW_VLAN_RX
; /* Receive VLAN hw acceleration */
836 ndev
->features
|= NETIF_F_HW_VLAN_FILTER
; /* Receive VLAN filtering */
837 ndev
->features
|= NETIF_F_VLAN_CHALLENGED
; /* cannot handle VLAN pkts */
838 ndev
->features
|= NETIF_F_GSO
; /* Enable software GSO. */
839 ndev
->features
|= NETIF_F_MULTI_QUEUE
; /* Has multiple TX/RX queues */
840 ndev
->features
|= NETIF_F_LRO
; /* large receive offload */
843 /* setup temac private info structure */
844 lp
= netdev_priv(ndev
);
847 lp
->options
= XTE_OPTION_DEFAULTS
;
848 spin_lock_init(&lp
->rx_lock
);
849 mutex_init(&lp
->indirect_mutex
);
851 /* map device registers */
852 lp
->regs
= of_iomap(op
->node
, 0);
854 dev_err(&op
->dev
, "could not map temac regs.\n");
858 /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
859 np
= of_parse_phandle(op
->node
, "llink-connected", 0);
861 dev_err(&op
->dev
, "could not find DMA node\n");
865 dcrs
= dcr_resource_start(np
, 0);
867 dev_err(&op
->dev
, "could not get DMA register address\n");
870 lp
->sdma_dcrs
= dcr_map(np
, dcrs
, dcr_resource_len(np
, 0));
871 dev_dbg(&op
->dev
, "DCR base: %x\n", dcrs
);
873 lp
->rx_irq
= irq_of_parse_and_map(np
, 0);
874 lp
->tx_irq
= irq_of_parse_and_map(np
, 1);
875 if (!lp
->rx_irq
|| !lp
->tx_irq
) {
876 dev_err(&op
->dev
, "could not determine irqs\n");
881 of_node_put(np
); /* Finished with the DMA node; drop the reference */
883 /* Retrieve the MAC address */
884 addr
= of_get_property(op
->node
, "local-mac-address", &size
);
885 if ((!addr
) || (size
!= 6)) {
886 dev_err(&op
->dev
, "could not find MAC address\n");
890 temac_set_mac_address(ndev
, (void *)addr
);
892 rc
= temac_mdio_setup(lp
, op
->node
);
894 dev_warn(&op
->dev
, "error registering MDIO bus\n");
896 lp
->phy_node
= of_parse_phandle(op
->node
, "phy-handle", 0);
898 dev_dbg(lp
->dev
, "using PHY node %s (%p)\n", np
->full_name
, np
);
900 /* Add the device attributes */
901 rc
= sysfs_create_group(&lp
->dev
->kobj
, &temac_attr_group
);
903 dev_err(lp
->dev
, "Error creating sysfs files\n");
907 rc
= register_netdev(lp
->ndev
);
909 dev_err(lp
->dev
, "register_netdev() error (%i)\n", rc
);
910 goto err_register_ndev
;
916 sysfs_remove_group(&lp
->dev
->kobj
, &temac_attr_group
);
923 static int __devexit
temac_of_remove(struct of_device
*op
)
925 struct net_device
*ndev
= dev_get_drvdata(&op
->dev
);
926 struct temac_local
*lp
= netdev_priv(ndev
);
928 temac_mdio_teardown(lp
);
929 unregister_netdev(ndev
);
930 sysfs_remove_group(&lp
->dev
->kobj
, &temac_attr_group
);
932 of_node_put(lp
->phy_node
);
934 dev_set_drvdata(&op
->dev
, NULL
);
939 static struct of_device_id temac_of_match
[] __devinitdata
= {
940 { .compatible
= "xlnx,xps-ll-temac-1.01.b", },
943 MODULE_DEVICE_TABLE(of
, temac_of_match
);
945 static struct of_platform_driver temac_of_driver
= {
946 .match_table
= temac_of_match
,
947 .probe
= temac_of_probe
,
948 .remove
= __devexit_p(temac_of_remove
),
950 .owner
= THIS_MODULE
,
951 .name
= "xilinx_temac",
955 static int __init
temac_init(void)
957 return of_register_platform_driver(&temac_of_driver
);
959 module_init(temac_init
);
961 static void __exit
temac_exit(void)
963 of_unregister_platform_driver(&temac_of_driver
);
965 module_exit(temac_exit
);
967 MODULE_DESCRIPTION("Xilinx LL_TEMAC Ethernet driver");
968 MODULE_AUTHOR("Yoshio Kashiwagi");
969 MODULE_LICENSE("GPL");