2 * Copyright (C) 2006-2007 PA Semi, Inc
4 * Driver for the PA Semi PWRficient onchip 1G/10G Ethernet MACs
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include <linux/init.h>
21 #include <linux/module.h>
22 #include <linux/pci.h>
23 #include <linux/interrupt.h>
24 #include <linux/dmaengine.h>
25 #include <linux/delay.h>
26 #include <linux/netdevice.h>
27 #include <linux/etherdevice.h>
28 #include <asm/dma-mapping.h>
30 #include <linux/skbuff.h>
33 #include <linux/tcp.h>
34 #include <net/checksum.h>
38 #include "pasemi_mac.h"
43 * - Get rid of pci_{read,write}_config(), map registers with ioremap
48 * - Other performance improvements
52 /* Must be a power of two */
53 #define RX_RING_SIZE 512
54 #define TX_RING_SIZE 512
56 #define DEFAULT_MSG_ENABLE \
66 #define TX_DESC(mac, num) ((mac)->tx->desc[(num) & (TX_RING_SIZE-1)])
67 #define TX_DESC_INFO(mac, num) ((mac)->tx->desc_info[(num) & (TX_RING_SIZE-1)])
68 #define RX_DESC(mac, num) ((mac)->rx->desc[(num) & (RX_RING_SIZE-1)])
69 #define RX_DESC_INFO(mac, num) ((mac)->rx->desc_info[(num) & (RX_RING_SIZE-1)])
70 #define RX_BUFF(mac, num) ((mac)->rx->buffers[(num) & (RX_RING_SIZE-1)])
72 #define BUF_SIZE 1646 /* 1500 MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */
74 MODULE_LICENSE("GPL");
75 MODULE_AUTHOR ("Olof Johansson <olof@lixom.net>");
76 MODULE_DESCRIPTION("PA Semi PWRficient Ethernet driver");
78 static int debug
= -1; /* -1 == use DEFAULT_MSG_ENABLE as value */
79 module_param(debug
, int, 0);
80 MODULE_PARM_DESC(debug
, "PA Semi MAC bitmapped debugging message enable value");
82 static struct pasdma_status
*dma_status
;
84 static int pasemi_get_mac_addr(struct pasemi_mac
*mac
)
86 struct pci_dev
*pdev
= mac
->pdev
;
87 struct device_node
*dn
= pci_device_to_OF_node(pdev
);
94 "No device node for mac, not configuring\n");
98 maddr
= of_get_property(dn
, "local-mac-address", &len
);
100 if (maddr
&& len
== 6) {
101 memcpy(mac
->mac_addr
, maddr
, 6);
105 /* Some old versions of firmware mistakenly uses mac-address
106 * (and as a string) instead of a byte array in local-mac-address.
110 maddr
= of_get_property(dn
, "mac-address", NULL
);
114 "no mac address in device tree, not configuring\n");
119 if (sscanf(maddr
, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &addr
[0],
120 &addr
[1], &addr
[2], &addr
[3], &addr
[4], &addr
[5]) != 6) {
122 "can't parse mac address, not configuring\n");
126 memcpy(mac
->mac_addr
, addr
, 6);
131 static int pasemi_mac_setup_rx_resources(struct net_device
*dev
)
133 struct pasemi_mac_rxring
*ring
;
134 struct pasemi_mac
*mac
= netdev_priv(dev
);
135 int chan_id
= mac
->dma_rxch
;
137 ring
= kzalloc(sizeof(*ring
), GFP_KERNEL
);
142 spin_lock_init(&ring
->lock
);
144 ring
->desc_info
= kzalloc(sizeof(struct pasemi_mac_buffer
) *
145 RX_RING_SIZE
, GFP_KERNEL
);
147 if (!ring
->desc_info
)
150 /* Allocate descriptors */
151 ring
->desc
= dma_alloc_coherent(&mac
->dma_pdev
->dev
,
153 sizeof(struct pas_dma_xct_descr
),
154 &ring
->dma
, GFP_KERNEL
);
159 memset(ring
->desc
, 0, RX_RING_SIZE
* sizeof(struct pas_dma_xct_descr
));
161 ring
->buffers
= dma_alloc_coherent(&mac
->dma_pdev
->dev
,
162 RX_RING_SIZE
* sizeof(u64
),
163 &ring
->buf_dma
, GFP_KERNEL
);
167 memset(ring
->buffers
, 0, RX_RING_SIZE
* sizeof(u64
));
169 pci_write_config_dword(mac
->dma_pdev
, PAS_DMA_RXCHAN_BASEL(chan_id
),
170 PAS_DMA_RXCHAN_BASEL_BRBL(ring
->dma
));
172 pci_write_config_dword(mac
->dma_pdev
, PAS_DMA_RXCHAN_BASEU(chan_id
),
173 PAS_DMA_RXCHAN_BASEU_BRBH(ring
->dma
>> 32) |
174 PAS_DMA_RXCHAN_BASEU_SIZ(RX_RING_SIZE
>> 2));
176 pci_write_config_dword(mac
->dma_pdev
, PAS_DMA_RXCHAN_CFG(chan_id
),
177 PAS_DMA_RXCHAN_CFG_HBU(1));
179 pci_write_config_dword(mac
->dma_pdev
, PAS_DMA_RXINT_BASEL(mac
->dma_if
),
180 PAS_DMA_RXINT_BASEL_BRBL(__pa(ring
->buffers
)));
182 pci_write_config_dword(mac
->dma_pdev
, PAS_DMA_RXINT_BASEU(mac
->dma_if
),
183 PAS_DMA_RXINT_BASEU_BRBH(__pa(ring
->buffers
) >> 32) |
184 PAS_DMA_RXINT_BASEU_SIZ(RX_RING_SIZE
>> 3));
186 ring
->next_to_fill
= 0;
187 ring
->next_to_clean
= 0;
189 snprintf(ring
->irq_name
, sizeof(ring
->irq_name
),
196 dma_free_coherent(&mac
->dma_pdev
->dev
,
197 RX_RING_SIZE
* sizeof(struct pas_dma_xct_descr
),
198 mac
->rx
->desc
, mac
->rx
->dma
);
200 kfree(ring
->desc_info
);
208 static int pasemi_mac_setup_tx_resources(struct net_device
*dev
)
210 struct pasemi_mac
*mac
= netdev_priv(dev
);
212 int chan_id
= mac
->dma_txch
;
213 struct pasemi_mac_txring
*ring
;
215 ring
= kzalloc(sizeof(*ring
), GFP_KERNEL
);
219 spin_lock_init(&ring
->lock
);
221 ring
->desc_info
= kzalloc(sizeof(struct pasemi_mac_buffer
) *
222 TX_RING_SIZE
, GFP_KERNEL
);
223 if (!ring
->desc_info
)
226 /* Allocate descriptors */
227 ring
->desc
= dma_alloc_coherent(&mac
->dma_pdev
->dev
,
229 sizeof(struct pas_dma_xct_descr
),
230 &ring
->dma
, GFP_KERNEL
);
234 memset(ring
->desc
, 0, TX_RING_SIZE
* sizeof(struct pas_dma_xct_descr
));
236 pci_write_config_dword(mac
->dma_pdev
, PAS_DMA_TXCHAN_BASEL(chan_id
),
237 PAS_DMA_TXCHAN_BASEL_BRBL(ring
->dma
));
238 val
= PAS_DMA_TXCHAN_BASEU_BRBH(ring
->dma
>> 32);
239 val
|= PAS_DMA_TXCHAN_BASEU_SIZ(TX_RING_SIZE
>> 2);
241 pci_write_config_dword(mac
->dma_pdev
, PAS_DMA_TXCHAN_BASEU(chan_id
), val
);
243 pci_write_config_dword(mac
->dma_pdev
, PAS_DMA_TXCHAN_CFG(chan_id
),
244 PAS_DMA_TXCHAN_CFG_TY_IFACE
|
245 PAS_DMA_TXCHAN_CFG_TATTR(mac
->dma_if
) |
246 PAS_DMA_TXCHAN_CFG_UP
|
247 PAS_DMA_TXCHAN_CFG_WT(2));
249 ring
->next_to_use
= 0;
250 ring
->next_to_clean
= 0;
252 snprintf(ring
->irq_name
, sizeof(ring
->irq_name
),
259 kfree(ring
->desc_info
);
266 static void pasemi_mac_free_tx_resources(struct net_device
*dev
)
268 struct pasemi_mac
*mac
= netdev_priv(dev
);
270 struct pasemi_mac_buffer
*info
;
271 struct pas_dma_xct_descr
*dp
;
273 for (i
= 0; i
< TX_RING_SIZE
; i
++) {
274 info
= &TX_DESC_INFO(mac
, i
);
275 dp
= &TX_DESC(mac
, i
);
278 pci_unmap_single(mac
->dma_pdev
,
282 dev_kfree_skb_any(info
->skb
);
291 dma_free_coherent(&mac
->dma_pdev
->dev
,
292 TX_RING_SIZE
* sizeof(struct pas_dma_xct_descr
),
293 mac
->tx
->desc
, mac
->tx
->dma
);
295 kfree(mac
->tx
->desc_info
);
300 static void pasemi_mac_free_rx_resources(struct net_device
*dev
)
302 struct pasemi_mac
*mac
= netdev_priv(dev
);
304 struct pasemi_mac_buffer
*info
;
305 struct pas_dma_xct_descr
*dp
;
307 for (i
= 0; i
< RX_RING_SIZE
; i
++) {
308 info
= &RX_DESC_INFO(mac
, i
);
309 dp
= &RX_DESC(mac
, i
);
312 pci_unmap_single(mac
->dma_pdev
,
316 dev_kfree_skb_any(info
->skb
);
325 dma_free_coherent(&mac
->dma_pdev
->dev
,
326 RX_RING_SIZE
* sizeof(struct pas_dma_xct_descr
),
327 mac
->rx
->desc
, mac
->rx
->dma
);
329 dma_free_coherent(&mac
->dma_pdev
->dev
, RX_RING_SIZE
* sizeof(u64
),
330 mac
->rx
->buffers
, mac
->rx
->buf_dma
);
332 kfree(mac
->rx
->desc_info
);
337 static void pasemi_mac_replenish_rx_ring(struct net_device
*dev
)
339 struct pasemi_mac
*mac
= netdev_priv(dev
);
341 int start
= mac
->rx
->next_to_fill
;
342 unsigned int limit
, count
;
344 limit
= (mac
->rx
->next_to_clean
+ RX_RING_SIZE
-
345 mac
->rx
->next_to_fill
) & (RX_RING_SIZE
- 1);
347 /* Check to see if we're doing first-time setup */
348 if (unlikely(mac
->rx
->next_to_clean
== 0 && mac
->rx
->next_to_fill
== 0))
349 limit
= RX_RING_SIZE
;
355 for (count
= limit
; count
; count
--) {
356 struct pasemi_mac_buffer
*info
= &RX_DESC_INFO(mac
, i
);
357 u64
*buff
= &RX_BUFF(mac
, i
);
361 /* skb might still be in there for recycle on short receives */
365 skb
= dev_alloc_skb(BUF_SIZE
);
370 dma
= pci_map_single(mac
->dma_pdev
, skb
->data
, skb
->len
,
373 if (unlikely(dma_mapping_error(dma
))) {
374 dev_kfree_skb_irq(info
->skb
);
380 *buff
= XCT_RXB_LEN(BUF_SIZE
) | XCT_RXB_ADDR(dma
);
386 pci_write_config_dword(mac
->dma_pdev
,
387 PAS_DMA_RXCHAN_INCR(mac
->dma_rxch
),
389 pci_write_config_dword(mac
->dma_pdev
,
390 PAS_DMA_RXINT_INCR(mac
->dma_if
),
393 mac
->rx
->next_to_fill
+= limit
- count
;
396 static void pasemi_mac_restart_rx_intr(struct pasemi_mac
*mac
)
398 unsigned int reg
, pcnt
;
399 /* Re-enable packet count interrupts: finally
400 * ack the packet count interrupt we got in rx_intr.
403 pcnt
= *mac
->rx_status
& PAS_STATUS_PCNT_M
;
405 reg
= PAS_IOB_DMA_RXCH_RESET_PCNT(pcnt
) | PAS_IOB_DMA_RXCH_RESET_PINTC
;
407 pci_write_config_dword(mac
->iob_pdev
,
408 PAS_IOB_DMA_RXCH_RESET(mac
->dma_rxch
),
412 static void pasemi_mac_restart_tx_intr(struct pasemi_mac
*mac
)
414 unsigned int reg
, pcnt
;
416 /* Re-enable packet count interrupts */
417 pcnt
= *mac
->tx_status
& PAS_STATUS_PCNT_M
;
419 reg
= PAS_IOB_DMA_TXCH_RESET_PCNT(pcnt
) | PAS_IOB_DMA_TXCH_RESET_PINTC
;
421 pci_write_config_dword(mac
->iob_pdev
,
422 PAS_IOB_DMA_TXCH_RESET(mac
->dma_txch
), reg
);
426 static int pasemi_mac_clean_rx(struct pasemi_mac
*mac
, int limit
)
430 struct pas_dma_xct_descr
*dp
;
431 struct pasemi_mac_buffer
*info
;
437 spin_lock(&mac
->rx
->lock
);
439 n
= mac
->rx
->next_to_clean
;
441 for (count
= limit
; count
; count
--) {
445 dp
= &RX_DESC(mac
, n
);
448 if (!(macrx
& XCT_MACRX_O
))
454 /* We have to scan for our skb since there's no way
455 * to back-map them from the descriptor, and if we
456 * have several receive channels then they might not
457 * show up in the same order as they were put on the
461 dma
= (dp
->ptr
& XCT_PTR_ADDR_M
);
462 for (i
= n
; i
< (n
+ RX_RING_SIZE
); i
++) {
463 info
= &RX_DESC_INFO(mac
, i
);
464 if (info
->dma
== dma
)
471 pci_unmap_single(mac
->dma_pdev
, dma
, skb
->len
,
474 len
= (macrx
& XCT_MACRX_LLEN_M
) >> XCT_MACRX_LLEN_S
;
477 struct sk_buff
*new_skb
=
478 netdev_alloc_skb(mac
->netdev
, len
+ NET_IP_ALIGN
);
480 skb_reserve(new_skb
, NET_IP_ALIGN
);
481 memcpy(new_skb
->data
- NET_IP_ALIGN
,
482 skb
->data
- NET_IP_ALIGN
,
484 /* save the skb in buffer_info as good */
487 /* else just continue with the old one */
493 skb
->protocol
= eth_type_trans(skb
, mac
->netdev
);
495 if ((macrx
& XCT_MACRX_HTY_M
) == XCT_MACRX_HTY_IPV4_OK
) {
496 skb
->ip_summed
= CHECKSUM_COMPLETE
;
497 skb
->csum
= (macrx
& XCT_MACRX_CSUM_M
) >>
500 skb
->ip_summed
= CHECKSUM_NONE
;
502 mac
->stats
.rx_bytes
+= len
;
503 mac
->stats
.rx_packets
++;
505 netif_receive_skb(skb
);
513 mac
->rx
->next_to_clean
+= limit
- count
;
514 pasemi_mac_replenish_rx_ring(mac
->netdev
);
516 spin_unlock(&mac
->rx
->lock
);
521 static int pasemi_mac_clean_tx(struct pasemi_mac
*mac
)
524 struct pasemi_mac_buffer
*info
;
525 struct pas_dma_xct_descr
*dp
;
529 spin_lock_irqsave(&mac
->tx
->lock
, flags
);
531 start
= mac
->tx
->next_to_clean
;
534 for (i
= start
; i
< mac
->tx
->next_to_use
; i
++) {
535 dp
= &TX_DESC(mac
, i
);
536 if (!dp
|| (dp
->mactx
& XCT_MACTX_O
))
541 info
= &TX_DESC_INFO(mac
, i
);
543 pci_unmap_single(mac
->dma_pdev
, info
->dma
,
544 info
->skb
->len
, PCI_DMA_TODEVICE
);
545 dev_kfree_skb_irq(info
->skb
);
552 mac
->tx
->next_to_clean
+= count
;
553 spin_unlock_irqrestore(&mac
->tx
->lock
, flags
);
555 netif_wake_queue(mac
->netdev
);
561 static irqreturn_t
pasemi_mac_rx_intr(int irq
, void *data
)
563 struct net_device
*dev
= data
;
564 struct pasemi_mac
*mac
= netdev_priv(dev
);
567 if (!(*mac
->rx_status
& PAS_STATUS_CAUSE_M
))
570 if (*mac
->rx_status
& PAS_STATUS_ERROR
)
571 printk("rx_status reported error\n");
573 /* Don't reset packet count so it won't fire again but clear
577 pci_read_config_dword(mac
->dma_pdev
, PAS_DMA_RXINT_RCMDSTA(mac
->dma_if
), ®
);
580 if (*mac
->rx_status
& PAS_STATUS_SOFT
)
581 reg
|= PAS_IOB_DMA_RXCH_RESET_SINTC
;
582 if (*mac
->rx_status
& PAS_STATUS_ERROR
)
583 reg
|= PAS_IOB_DMA_RXCH_RESET_DINTC
;
584 if (*mac
->rx_status
& PAS_STATUS_TIMER
)
585 reg
|= PAS_IOB_DMA_RXCH_RESET_TINTC
;
587 netif_rx_schedule(dev
, &mac
->napi
);
589 pci_write_config_dword(mac
->iob_pdev
,
590 PAS_IOB_DMA_RXCH_RESET(mac
->dma_rxch
), reg
);
596 static irqreturn_t
pasemi_mac_tx_intr(int irq
, void *data
)
598 struct net_device
*dev
= data
;
599 struct pasemi_mac
*mac
= netdev_priv(dev
);
600 unsigned int reg
, pcnt
;
602 if (!(*mac
->tx_status
& PAS_STATUS_CAUSE_M
))
605 pasemi_mac_clean_tx(mac
);
607 pcnt
= *mac
->tx_status
& PAS_STATUS_PCNT_M
;
609 reg
= PAS_IOB_DMA_TXCH_RESET_PCNT(pcnt
) | PAS_IOB_DMA_TXCH_RESET_PINTC
;
611 if (*mac
->tx_status
& PAS_STATUS_SOFT
)
612 reg
|= PAS_IOB_DMA_TXCH_RESET_SINTC
;
613 if (*mac
->tx_status
& PAS_STATUS_ERROR
)
614 reg
|= PAS_IOB_DMA_TXCH_RESET_DINTC
;
616 pci_write_config_dword(mac
->iob_pdev
,
617 PAS_IOB_DMA_TXCH_RESET(mac
->dma_txch
),
623 static void pasemi_adjust_link(struct net_device
*dev
)
625 struct pasemi_mac
*mac
= netdev_priv(dev
);
628 unsigned int new_flags
;
630 if (!mac
->phydev
->link
) {
631 /* If no link, MAC speed settings don't matter. Just report
632 * link down and return.
634 if (mac
->link
&& netif_msg_link(mac
))
635 printk(KERN_INFO
"%s: Link is down.\n", dev
->name
);
637 netif_carrier_off(dev
);
642 netif_carrier_on(dev
);
644 pci_read_config_dword(mac
->pdev
, PAS_MAC_CFG_PCFG
, &flags
);
645 new_flags
= flags
& ~(PAS_MAC_CFG_PCFG_HD
| PAS_MAC_CFG_PCFG_SPD_M
|
646 PAS_MAC_CFG_PCFG_TSR_M
);
648 if (!mac
->phydev
->duplex
)
649 new_flags
|= PAS_MAC_CFG_PCFG_HD
;
651 switch (mac
->phydev
->speed
) {
653 new_flags
|= PAS_MAC_CFG_PCFG_SPD_1G
|
654 PAS_MAC_CFG_PCFG_TSR_1G
;
657 new_flags
|= PAS_MAC_CFG_PCFG_SPD_100M
|
658 PAS_MAC_CFG_PCFG_TSR_100M
;
661 new_flags
|= PAS_MAC_CFG_PCFG_SPD_10M
|
662 PAS_MAC_CFG_PCFG_TSR_10M
;
665 printk("Unsupported speed %d\n", mac
->phydev
->speed
);
668 /* Print on link or speed/duplex change */
669 msg
= mac
->link
!= mac
->phydev
->link
|| flags
!= new_flags
;
671 mac
->duplex
= mac
->phydev
->duplex
;
672 mac
->speed
= mac
->phydev
->speed
;
673 mac
->link
= mac
->phydev
->link
;
675 if (new_flags
!= flags
)
676 pci_write_config_dword(mac
->pdev
, PAS_MAC_CFG_PCFG
, new_flags
);
678 if (msg
&& netif_msg_link(mac
))
679 printk(KERN_INFO
"%s: Link is up at %d Mbps, %s duplex.\n",
680 dev
->name
, mac
->speed
, mac
->duplex
? "full" : "half");
683 static int pasemi_mac_phy_init(struct net_device
*dev
)
685 struct pasemi_mac
*mac
= netdev_priv(dev
);
686 struct device_node
*dn
, *phy_dn
;
687 struct phy_device
*phydev
;
690 const unsigned int *prop
;
694 dn
= pci_device_to_OF_node(mac
->pdev
);
695 ph
= of_get_property(dn
, "phy-handle", NULL
);
698 phy_dn
= of_find_node_by_phandle(*ph
);
700 prop
= of_get_property(phy_dn
, "reg", NULL
);
701 ret
= of_address_to_resource(phy_dn
->parent
, 0, &r
);
706 snprintf(mac
->phy_id
, BUS_ID_SIZE
, PHY_ID_FMT
, (int)r
.start
, phy_id
);
714 phydev
= phy_connect(dev
, mac
->phy_id
, &pasemi_adjust_link
, 0, PHY_INTERFACE_MODE_SGMII
);
716 if (IS_ERR(phydev
)) {
717 printk(KERN_ERR
"%s: Could not attach to phy\n", dev
->name
);
718 return PTR_ERR(phydev
);
721 mac
->phydev
= phydev
;
731 static int pasemi_mac_open(struct net_device
*dev
)
733 struct pasemi_mac
*mac
= netdev_priv(dev
);
738 /* enable rx section */
739 pci_write_config_dword(mac
->dma_pdev
, PAS_DMA_COM_RXCMD
,
740 PAS_DMA_COM_RXCMD_EN
);
742 /* enable tx section */
743 pci_write_config_dword(mac
->dma_pdev
, PAS_DMA_COM_TXCMD
,
744 PAS_DMA_COM_TXCMD_EN
);
746 flags
= PAS_MAC_CFG_TXP_FCE
| PAS_MAC_CFG_TXP_FPC(3) |
747 PAS_MAC_CFG_TXP_SL(3) | PAS_MAC_CFG_TXP_COB(0xf) |
748 PAS_MAC_CFG_TXP_TIFT(8) | PAS_MAC_CFG_TXP_TIFG(12);
750 pci_write_config_dword(mac
->pdev
, PAS_MAC_CFG_TXP
, flags
);
752 flags
= PAS_MAC_CFG_PCFG_S1
| PAS_MAC_CFG_PCFG_PE
|
753 PAS_MAC_CFG_PCFG_PR
| PAS_MAC_CFG_PCFG_CE
;
755 flags
|= PAS_MAC_CFG_PCFG_TSR_1G
| PAS_MAC_CFG_PCFG_SPD_1G
;
757 pci_write_config_dword(mac
->iob_pdev
, PAS_IOB_DMA_RXCH_CFG(mac
->dma_rxch
),
758 PAS_IOB_DMA_RXCH_CFG_CNTTH(0));
760 pci_write_config_dword(mac
->iob_pdev
, PAS_IOB_DMA_TXCH_CFG(mac
->dma_txch
),
761 PAS_IOB_DMA_TXCH_CFG_CNTTH(32));
763 /* Clear out any residual packet count state from firmware */
764 pasemi_mac_restart_rx_intr(mac
);
765 pasemi_mac_restart_tx_intr(mac
);
767 /* 0xffffff is max value, about 16ms */
768 pci_write_config_dword(mac
->iob_pdev
, PAS_IOB_DMA_COM_TIMEOUTCFG
,
769 PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(0xffffff));
771 pci_write_config_dword(mac
->pdev
, PAS_MAC_CFG_PCFG
, flags
);
773 ret
= pasemi_mac_setup_rx_resources(dev
);
775 goto out_rx_resources
;
777 ret
= pasemi_mac_setup_tx_resources(dev
);
779 goto out_tx_resources
;
781 pci_write_config_dword(mac
->pdev
, PAS_MAC_IPC_CHNL
,
782 PAS_MAC_IPC_CHNL_DCHNO(mac
->dma_rxch
) |
783 PAS_MAC_IPC_CHNL_BCH(mac
->dma_rxch
));
786 pci_write_config_dword(mac
->dma_pdev
,
787 PAS_DMA_RXINT_RCMDSTA(mac
->dma_if
),
788 PAS_DMA_RXINT_RCMDSTA_EN
);
790 /* enable rx channel */
791 pci_write_config_dword(mac
->dma_pdev
,
792 PAS_DMA_RXCHAN_CCMDSTA(mac
->dma_rxch
),
793 PAS_DMA_RXCHAN_CCMDSTA_EN
|
794 PAS_DMA_RXCHAN_CCMDSTA_DU
);
796 /* enable tx channel */
797 pci_write_config_dword(mac
->dma_pdev
,
798 PAS_DMA_TXCHAN_TCMDSTA(mac
->dma_txch
),
799 PAS_DMA_TXCHAN_TCMDSTA_EN
);
801 pasemi_mac_replenish_rx_ring(dev
);
803 ret
= pasemi_mac_phy_init(dev
);
804 /* Some configs don't have PHYs (XAUI etc), so don't complain about
805 * failed init due to -ENODEV.
807 if (ret
&& ret
!= -ENODEV
)
808 dev_warn(&mac
->pdev
->dev
, "phy init failed: %d\n", ret
);
810 netif_start_queue(dev
);
811 napi_enable(&mac
->napi
);
813 /* Interrupts are a bit different for our DMA controller: While
814 * it's got one a regular PCI device header, the interrupt there
815 * is really the base of the range it's using. Each tx and rx
816 * channel has it's own interrupt source.
819 base_irq
= virq_to_hw(mac
->dma_pdev
->irq
);
821 mac
->tx_irq
= irq_create_mapping(NULL
, base_irq
+ mac
->dma_txch
);
822 mac
->rx_irq
= irq_create_mapping(NULL
, base_irq
+ 20 + mac
->dma_txch
);
824 ret
= request_irq(mac
->tx_irq
, &pasemi_mac_tx_intr
, IRQF_DISABLED
,
825 mac
->tx
->irq_name
, dev
);
827 dev_err(&mac
->pdev
->dev
, "request_irq of irq %d failed: %d\n",
828 base_irq
+ mac
->dma_txch
, ret
);
832 ret
= request_irq(mac
->rx_irq
, &pasemi_mac_rx_intr
, IRQF_DISABLED
,
833 mac
->rx
->irq_name
, dev
);
835 dev_err(&mac
->pdev
->dev
, "request_irq of irq %d failed: %d\n",
836 base_irq
+ 20 + mac
->dma_rxch
, ret
);
841 phy_start(mac
->phydev
);
846 free_irq(mac
->tx_irq
, dev
);
848 napi_disable(&mac
->napi
);
849 netif_stop_queue(dev
);
850 pasemi_mac_free_tx_resources(dev
);
852 pasemi_mac_free_rx_resources(dev
);
858 #define MAX_RETRIES 5000
860 static int pasemi_mac_close(struct net_device
*dev
)
862 struct pasemi_mac
*mac
= netdev_priv(dev
);
867 phy_stop(mac
->phydev
);
868 phy_disconnect(mac
->phydev
);
871 netif_stop_queue(dev
);
872 napi_disable(&mac
->napi
);
874 /* Clean out any pending buffers */
875 pasemi_mac_clean_tx(mac
);
876 pasemi_mac_clean_rx(mac
, RX_RING_SIZE
);
878 /* Disable interface */
879 pci_write_config_dword(mac
->dma_pdev
,
880 PAS_DMA_TXCHAN_TCMDSTA(mac
->dma_txch
),
881 PAS_DMA_TXCHAN_TCMDSTA_ST
);
882 pci_write_config_dword(mac
->dma_pdev
,
883 PAS_DMA_RXINT_RCMDSTA(mac
->dma_if
),
884 PAS_DMA_RXINT_RCMDSTA_ST
);
885 pci_write_config_dword(mac
->dma_pdev
,
886 PAS_DMA_RXCHAN_CCMDSTA(mac
->dma_rxch
),
887 PAS_DMA_RXCHAN_CCMDSTA_ST
);
889 for (retries
= 0; retries
< MAX_RETRIES
; retries
++) {
890 pci_read_config_dword(mac
->dma_pdev
,
891 PAS_DMA_TXCHAN_TCMDSTA(mac
->dma_txch
),
893 if (!(stat
& PAS_DMA_TXCHAN_TCMDSTA_ACT
))
898 if (stat
& PAS_DMA_TXCHAN_TCMDSTA_ACT
)
899 dev_err(&mac
->dma_pdev
->dev
, "Failed to stop tx channel\n");
901 for (retries
= 0; retries
< MAX_RETRIES
; retries
++) {
902 pci_read_config_dword(mac
->dma_pdev
,
903 PAS_DMA_RXCHAN_CCMDSTA(mac
->dma_rxch
),
905 if (!(stat
& PAS_DMA_RXCHAN_CCMDSTA_ACT
))
910 if (stat
& PAS_DMA_RXCHAN_CCMDSTA_ACT
)
911 dev_err(&mac
->dma_pdev
->dev
, "Failed to stop rx channel\n");
913 for (retries
= 0; retries
< MAX_RETRIES
; retries
++) {
914 pci_read_config_dword(mac
->dma_pdev
,
915 PAS_DMA_RXINT_RCMDSTA(mac
->dma_if
),
917 if (!(stat
& PAS_DMA_RXINT_RCMDSTA_ACT
))
922 if (stat
& PAS_DMA_RXINT_RCMDSTA_ACT
)
923 dev_err(&mac
->dma_pdev
->dev
, "Failed to stop rx interface\n");
925 /* Then, disable the channel. This must be done separately from
926 * stopping, since you can't disable when active.
929 pci_write_config_dword(mac
->dma_pdev
,
930 PAS_DMA_TXCHAN_TCMDSTA(mac
->dma_txch
), 0);
931 pci_write_config_dword(mac
->dma_pdev
,
932 PAS_DMA_RXCHAN_CCMDSTA(mac
->dma_rxch
), 0);
933 pci_write_config_dword(mac
->dma_pdev
,
934 PAS_DMA_RXINT_RCMDSTA(mac
->dma_if
), 0);
936 free_irq(mac
->tx_irq
, dev
);
937 free_irq(mac
->rx_irq
, dev
);
940 pasemi_mac_free_rx_resources(dev
);
941 pasemi_mac_free_tx_resources(dev
);
946 static int pasemi_mac_start_tx(struct sk_buff
*skb
, struct net_device
*dev
)
948 struct pasemi_mac
*mac
= netdev_priv(dev
);
949 struct pasemi_mac_txring
*txring
;
950 struct pasemi_mac_buffer
*info
;
951 struct pas_dma_xct_descr
*dp
;
956 dflags
= XCT_MACTX_O
| XCT_MACTX_ST
| XCT_MACTX_SS
| XCT_MACTX_CRC_PAD
;
958 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
959 const unsigned char *nh
= skb_network_header(skb
);
961 switch (ip_hdr(skb
)->protocol
) {
963 dflags
|= XCT_MACTX_CSUM_TCP
;
964 dflags
|= XCT_MACTX_IPH(skb_network_header_len(skb
) >> 2);
965 dflags
|= XCT_MACTX_IPO(nh
- skb
->data
);
968 dflags
|= XCT_MACTX_CSUM_UDP
;
969 dflags
|= XCT_MACTX_IPH(skb_network_header_len(skb
) >> 2);
970 dflags
|= XCT_MACTX_IPO(nh
- skb
->data
);
975 map
= pci_map_single(mac
->dma_pdev
, skb
->data
, skb
->len
, PCI_DMA_TODEVICE
);
977 if (dma_mapping_error(map
))
978 return NETDEV_TX_BUSY
;
982 spin_lock_irqsave(&txring
->lock
, flags
);
984 if (txring
->next_to_clean
- txring
->next_to_use
== TX_RING_SIZE
) {
985 spin_unlock_irqrestore(&txring
->lock
, flags
);
986 pasemi_mac_clean_tx(mac
);
987 pasemi_mac_restart_tx_intr(mac
);
988 spin_lock_irqsave(&txring
->lock
, flags
);
990 if (txring
->next_to_clean
- txring
->next_to_use
==
992 /* Still no room -- stop the queue and wait for tx
993 * intr when there's room.
995 netif_stop_queue(dev
);
1001 dp
= &TX_DESC(mac
, txring
->next_to_use
);
1002 info
= &TX_DESC_INFO(mac
, txring
->next_to_use
);
1004 dp
->mactx
= dflags
| XCT_MACTX_LLEN(skb
->len
);
1005 dp
->ptr
= XCT_PTR_LEN(skb
->len
) | XCT_PTR_ADDR(map
);
1009 txring
->next_to_use
++;
1010 mac
->stats
.tx_packets
++;
1011 mac
->stats
.tx_bytes
+= skb
->len
;
1013 spin_unlock_irqrestore(&txring
->lock
, flags
);
1015 pci_write_config_dword(mac
->dma_pdev
,
1016 PAS_DMA_TXCHAN_INCR(mac
->dma_txch
), 1);
1018 return NETDEV_TX_OK
;
1021 spin_unlock_irqrestore(&txring
->lock
, flags
);
1022 pci_unmap_single(mac
->dma_pdev
, map
, skb
->len
, PCI_DMA_TODEVICE
);
1023 return NETDEV_TX_BUSY
;
1026 static struct net_device_stats
*pasemi_mac_get_stats(struct net_device
*dev
)
1028 struct pasemi_mac
*mac
= netdev_priv(dev
);
1034 static void pasemi_mac_set_rx_mode(struct net_device
*dev
)
1036 struct pasemi_mac
*mac
= netdev_priv(dev
);
1039 pci_read_config_dword(mac
->pdev
, PAS_MAC_CFG_PCFG
, &flags
);
1041 /* Set promiscuous */
1042 if (dev
->flags
& IFF_PROMISC
)
1043 flags
|= PAS_MAC_CFG_PCFG_PR
;
1045 flags
&= ~PAS_MAC_CFG_PCFG_PR
;
1047 pci_write_config_dword(mac
->pdev
, PAS_MAC_CFG_PCFG
, flags
);
1051 static int pasemi_mac_poll(struct napi_struct
*napi
, int budget
)
1053 struct pasemi_mac
*mac
= container_of(napi
, struct pasemi_mac
, napi
);
1054 struct net_device
*dev
= mac
->netdev
;
1057 pkts
= pasemi_mac_clean_rx(mac
, budget
);
1058 if (pkts
< budget
) {
1059 /* all done, no more packets present */
1060 netif_rx_complete(dev
, napi
);
1062 pasemi_mac_restart_rx_intr(mac
);
1067 static int __devinit
1068 pasemi_mac_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
1070 static int index
= 0;
1071 struct net_device
*dev
;
1072 struct pasemi_mac
*mac
;
1075 err
= pci_enable_device(pdev
);
1079 dev
= alloc_etherdev(sizeof(struct pasemi_mac
));
1082 "pasemi_mac: Could not allocate ethernet device.\n");
1084 goto out_disable_device
;
1087 SET_MODULE_OWNER(dev
);
1088 pci_set_drvdata(pdev
, dev
);
1089 SET_NETDEV_DEV(dev
, &pdev
->dev
);
1091 mac
= netdev_priv(dev
);
1095 mac
->dma_pdev
= pci_get_device(PCI_VENDOR_ID_PASEMI
, 0xa007, NULL
);
1097 netif_napi_add(dev
, &mac
->napi
, pasemi_mac_poll
, 64);
1099 dev
->features
= NETIF_F_HW_CSUM
;
1101 if (!mac
->dma_pdev
) {
1102 dev_err(&pdev
->dev
, "Can't find DMA Controller\n");
1104 goto out_free_netdev
;
1107 mac
->iob_pdev
= pci_get_device(PCI_VENDOR_ID_PASEMI
, 0xa001, NULL
);
1109 if (!mac
->iob_pdev
) {
1110 dev_err(&pdev
->dev
, "Can't find I/O Bridge\n");
1112 goto out_put_dma_pdev
;
1115 /* These should come out of the device tree eventually */
1116 mac
->dma_txch
= index
;
1117 mac
->dma_rxch
= index
;
1119 /* We probe GMAC before XAUI, but the DMA interfaces are
1120 * in XAUI, GMAC order.
1123 mac
->dma_if
= index
+ 2;
1125 mac
->dma_if
= index
- 4;
1128 switch (pdev
->device
) {
1130 mac
->type
= MAC_TYPE_GMAC
;
1133 mac
->type
= MAC_TYPE_XAUI
;
1140 /* get mac addr from device tree */
1141 if (pasemi_get_mac_addr(mac
) || !is_valid_ether_addr(mac
->mac_addr
)) {
1145 memcpy(dev
->dev_addr
, mac
->mac_addr
, sizeof(mac
->mac_addr
));
1147 dev
->open
= pasemi_mac_open
;
1148 dev
->stop
= pasemi_mac_close
;
1149 dev
->hard_start_xmit
= pasemi_mac_start_tx
;
1150 dev
->get_stats
= pasemi_mac_get_stats
;
1151 dev
->set_multicast_list
= pasemi_mac_set_rx_mode
;
1153 /* The dma status structure is located in the I/O bridge, and
1154 * is cache coherent.
1157 /* XXXOJN This should come from the device tree */
1158 dma_status
= __ioremap(0xfd800000, 0x1000, 0);
1160 mac
->rx_status
= &dma_status
->rx_sta
[mac
->dma_rxch
];
1161 mac
->tx_status
= &dma_status
->tx_sta
[mac
->dma_txch
];
1163 mac
->msg_enable
= netif_msg_init(debug
, DEFAULT_MSG_ENABLE
);
1165 /* Enable most messages by default */
1166 mac
->msg_enable
= (NETIF_MSG_IFUP
<< 1 ) - 1;
1168 err
= register_netdev(dev
);
1171 dev_err(&mac
->pdev
->dev
, "register_netdev failed with error %d\n",
1175 printk(KERN_INFO
"%s: PA Semi %s: intf %d, txch %d, rxch %d, "
1176 "hw addr %02x:%02x:%02x:%02x:%02x:%02x\n",
1177 dev
->name
, mac
->type
== MAC_TYPE_GMAC
? "GMAC" : "XAUI",
1178 mac
->dma_if
, mac
->dma_txch
, mac
->dma_rxch
,
1179 dev
->dev_addr
[0], dev
->dev_addr
[1], dev
->dev_addr
[2],
1180 dev
->dev_addr
[3], dev
->dev_addr
[4], dev
->dev_addr
[5]);
1185 pci_dev_put(mac
->iob_pdev
);
1187 pci_dev_put(mac
->dma_pdev
);
1191 pci_disable_device(pdev
);
1196 static void __devexit
pasemi_mac_remove(struct pci_dev
*pdev
)
1198 struct net_device
*netdev
= pci_get_drvdata(pdev
);
1199 struct pasemi_mac
*mac
;
1204 mac
= netdev_priv(netdev
);
1206 unregister_netdev(netdev
);
1208 pci_disable_device(pdev
);
1209 pci_dev_put(mac
->dma_pdev
);
1210 pci_dev_put(mac
->iob_pdev
);
1212 pci_set_drvdata(pdev
, NULL
);
1213 free_netdev(netdev
);
1216 static struct pci_device_id pasemi_mac_pci_tbl
[] = {
1217 { PCI_DEVICE(PCI_VENDOR_ID_PASEMI
, 0xa005) },
1218 { PCI_DEVICE(PCI_VENDOR_ID_PASEMI
, 0xa006) },
1222 MODULE_DEVICE_TABLE(pci
, pasemi_mac_pci_tbl
);
1224 static struct pci_driver pasemi_mac_driver
= {
1225 .name
= "pasemi_mac",
1226 .id_table
= pasemi_mac_pci_tbl
,
1227 .probe
= pasemi_mac_probe
,
1228 .remove
= __devexit_p(pasemi_mac_remove
),
1231 static void __exit
pasemi_mac_cleanup_module(void)
1233 pci_unregister_driver(&pasemi_mac_driver
);
1234 __iounmap(dma_status
);
1238 int pasemi_mac_init_module(void)
1240 return pci_register_driver(&pasemi_mac_driver
);
1243 module_init(pasemi_mac_init_module
);
1244 module_exit(pasemi_mac_cleanup_module
);