2 * Copyright (C) 2006-2007 PA Semi, Inc
4 * Driver for the PA Semi PWRficient onchip 1G/10G Ethernet MACs
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include <linux/init.h>
21 #include <linux/module.h>
22 #include <linux/pci.h>
23 #include <linux/slab.h>
24 #include <linux/interrupt.h>
25 #include <linux/dmaengine.h>
26 #include <linux/delay.h>
27 #include <linux/netdevice.h>
28 #include <linux/of_mdio.h>
29 #include <linux/etherdevice.h>
30 #include <asm/dma-mapping.h>
32 #include <linux/skbuff.h>
35 #include <linux/tcp.h>
36 #include <net/checksum.h>
37 #include <linux/inet_lro.h>
40 #include <asm/firmware.h>
41 #include <asm/pasemi_dma.h>
43 #include "pasemi_mac.h"
45 /* We have our own align, since ppc64 in general has it at 0 because
46 * of design flaws in some of the server bridge chips. However, for
47 * PWRficient doing the unaligned copies is more expensive than doing
48 * unaligned DMA, so make sure the data is aligned instead.
50 #define LOCAL_SKB_ALIGN 2
60 #define LRO_MAX_AGGR 64
63 #define PE_MAX_MTU 9000
64 #define PE_DEF_MTU ETH_DATA_LEN
66 #define DEFAULT_MSG_ENABLE \
76 MODULE_LICENSE("GPL");
77 MODULE_AUTHOR ("Olof Johansson <olof@lixom.net>");
78 MODULE_DESCRIPTION("PA Semi PWRficient Ethernet driver");
80 static int debug
= -1; /* -1 == use DEFAULT_MSG_ENABLE as value */
81 module_param(debug
, int, 0);
82 MODULE_PARM_DESC(debug
, "PA Semi MAC bitmapped debugging message enable value");
84 extern const struct ethtool_ops pasemi_mac_ethtool_ops
;
86 static int translation_enabled(void)
88 #if defined(CONFIG_PPC_PASEMI_IOMMU_DMA_FORCE)
91 return firmware_has_feature(FW_FEATURE_LPAR
);
95 static void write_iob_reg(unsigned int reg
, unsigned int val
)
97 pasemi_write_iob_reg(reg
, val
);
100 static unsigned int read_mac_reg(const struct pasemi_mac
*mac
, unsigned int reg
)
102 return pasemi_read_mac_reg(mac
->dma_if
, reg
);
105 static void write_mac_reg(const struct pasemi_mac
*mac
, unsigned int reg
,
108 pasemi_write_mac_reg(mac
->dma_if
, reg
, val
);
111 static unsigned int read_dma_reg(unsigned int reg
)
113 return pasemi_read_dma_reg(reg
);
116 static void write_dma_reg(unsigned int reg
, unsigned int val
)
118 pasemi_write_dma_reg(reg
, val
);
121 static struct pasemi_mac_rxring
*rx_ring(const struct pasemi_mac
*mac
)
126 static struct pasemi_mac_txring
*tx_ring(const struct pasemi_mac
*mac
)
131 static inline void prefetch_skb(const struct sk_buff
*skb
)
141 static int mac_to_intf(struct pasemi_mac
*mac
)
143 struct pci_dev
*pdev
= mac
->pdev
;
145 int nintf
, off
, i
, j
;
146 int devfn
= pdev
->devfn
;
148 tmp
= read_dma_reg(PAS_DMA_CAP_IFI
);
149 nintf
= (tmp
& PAS_DMA_CAP_IFI_NIN_M
) >> PAS_DMA_CAP_IFI_NIN_S
;
150 off
= (tmp
& PAS_DMA_CAP_IFI_IOFF_M
) >> PAS_DMA_CAP_IFI_IOFF_S
;
152 /* IOFF contains the offset to the registers containing the
153 * DMA interface-to-MAC-pci-id mappings, and NIN contains number
154 * of total interfaces. Each register contains 4 devfns.
155 * Just do a linear search until we find the devfn of the MAC
156 * we're trying to look up.
159 for (i
= 0; i
< (nintf
+3)/4; i
++) {
160 tmp
= read_dma_reg(off
+4*i
);
161 for (j
= 0; j
< 4; j
++) {
162 if (((tmp
>> (8*j
)) & 0xff) == devfn
)
169 static void pasemi_mac_intf_disable(struct pasemi_mac
*mac
)
173 flags
= read_mac_reg(mac
, PAS_MAC_CFG_PCFG
);
174 flags
&= ~PAS_MAC_CFG_PCFG_PE
;
175 write_mac_reg(mac
, PAS_MAC_CFG_PCFG
, flags
);
178 static void pasemi_mac_intf_enable(struct pasemi_mac
*mac
)
182 flags
= read_mac_reg(mac
, PAS_MAC_CFG_PCFG
);
183 flags
|= PAS_MAC_CFG_PCFG_PE
;
184 write_mac_reg(mac
, PAS_MAC_CFG_PCFG
, flags
);
187 static int pasemi_get_mac_addr(struct pasemi_mac
*mac
)
189 struct pci_dev
*pdev
= mac
->pdev
;
190 struct device_node
*dn
= pci_device_to_OF_node(pdev
);
197 "No device node for mac, not configuring\n");
201 maddr
= of_get_property(dn
, "local-mac-address", &len
);
203 if (maddr
&& len
== 6) {
204 memcpy(mac
->mac_addr
, maddr
, 6);
208 /* Some old versions of firmware mistakenly uses mac-address
209 * (and as a string) instead of a byte array in local-mac-address.
213 maddr
= of_get_property(dn
, "mac-address", NULL
);
217 "no mac address in device tree, not configuring\n");
221 if (sscanf(maddr
, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &addr
[0],
222 &addr
[1], &addr
[2], &addr
[3], &addr
[4], &addr
[5]) != 6) {
224 "can't parse mac address, not configuring\n");
228 memcpy(mac
->mac_addr
, addr
, 6);
233 static int pasemi_mac_set_mac_addr(struct net_device
*dev
, void *p
)
235 struct pasemi_mac
*mac
= netdev_priv(dev
);
236 struct sockaddr
*addr
= p
;
237 unsigned int adr0
, adr1
;
239 if (!is_valid_ether_addr(addr
->sa_data
))
242 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
244 adr0
= dev
->dev_addr
[2] << 24 |
245 dev
->dev_addr
[3] << 16 |
246 dev
->dev_addr
[4] << 8 |
248 adr1
= read_mac_reg(mac
, PAS_MAC_CFG_ADR1
);
250 adr1
|= dev
->dev_addr
[0] << 8 | dev
->dev_addr
[1];
252 pasemi_mac_intf_disable(mac
);
253 write_mac_reg(mac
, PAS_MAC_CFG_ADR0
, adr0
);
254 write_mac_reg(mac
, PAS_MAC_CFG_ADR1
, adr1
);
255 pasemi_mac_intf_enable(mac
);
260 static int get_skb_hdr(struct sk_buff
*skb
, void **iphdr
,
261 void **tcph
, u64
*hdr_flags
, void *data
)
263 u64 macrx
= (u64
) data
;
267 /* IPv4 header checksum failed */
268 if ((macrx
& XCT_MACRX_HTY_M
) != XCT_MACRX_HTY_IPV4_OK
)
272 skb_reset_network_header(skb
);
274 if (iph
->protocol
!= IPPROTO_TCP
)
277 ip_len
= ip_hdrlen(skb
);
278 skb_set_transport_header(skb
, ip_len
);
279 *tcph
= tcp_hdr(skb
);
281 /* check if ip header and tcp header are complete */
282 if (ntohs(iph
->tot_len
) < ip_len
+ tcp_hdrlen(skb
))
285 *hdr_flags
= LRO_IPV4
| LRO_TCP
;
291 static int pasemi_mac_unmap_tx_skb(struct pasemi_mac
*mac
,
294 const dma_addr_t
*dmas
)
297 struct pci_dev
*pdev
= mac
->dma_pdev
;
299 pci_unmap_single(pdev
, dmas
[0], skb_headlen(skb
), PCI_DMA_TODEVICE
);
301 for (f
= 0; f
< nfrags
; f
++) {
302 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[f
];
304 pci_unmap_page(pdev
, dmas
[f
+1], frag
->size
, PCI_DMA_TODEVICE
);
306 dev_kfree_skb_irq(skb
);
308 /* Freed descriptor slot + main SKB ptr + nfrags additional ptrs,
309 * aligned up to a power of 2
311 return (nfrags
+ 3) & ~1;
314 static struct pasemi_mac_csring
*pasemi_mac_setup_csring(struct pasemi_mac
*mac
)
316 struct pasemi_mac_csring
*ring
;
321 ring
= pasemi_dma_alloc_chan(TXCHAN
, sizeof(struct pasemi_mac_csring
),
322 offsetof(struct pasemi_mac_csring
, chan
));
325 dev_err(&mac
->pdev
->dev
, "Can't allocate checksum channel\n");
329 chno
= ring
->chan
.chno
;
331 ring
->size
= CS_RING_SIZE
;
332 ring
->next_to_fill
= 0;
334 /* Allocate descriptors */
335 if (pasemi_dma_alloc_ring(&ring
->chan
, CS_RING_SIZE
))
338 write_dma_reg(PAS_DMA_TXCHAN_BASEL(chno
),
339 PAS_DMA_TXCHAN_BASEL_BRBL(ring
->chan
.ring_dma
));
340 val
= PAS_DMA_TXCHAN_BASEU_BRBH(ring
->chan
.ring_dma
>> 32);
341 val
|= PAS_DMA_TXCHAN_BASEU_SIZ(CS_RING_SIZE
>> 3);
343 write_dma_reg(PAS_DMA_TXCHAN_BASEU(chno
), val
);
345 ring
->events
[0] = pasemi_dma_alloc_flag();
346 ring
->events
[1] = pasemi_dma_alloc_flag();
347 if (ring
->events
[0] < 0 || ring
->events
[1] < 0)
350 pasemi_dma_clear_flag(ring
->events
[0]);
351 pasemi_dma_clear_flag(ring
->events
[1]);
353 ring
->fun
= pasemi_dma_alloc_fun();
357 cfg
= PAS_DMA_TXCHAN_CFG_TY_FUNC
| PAS_DMA_TXCHAN_CFG_UP
|
358 PAS_DMA_TXCHAN_CFG_TATTR(ring
->fun
) |
359 PAS_DMA_TXCHAN_CFG_LPSQ
| PAS_DMA_TXCHAN_CFG_LPDQ
;
361 if (translation_enabled())
362 cfg
|= PAS_DMA_TXCHAN_CFG_TRD
| PAS_DMA_TXCHAN_CFG_TRR
;
364 write_dma_reg(PAS_DMA_TXCHAN_CFG(chno
), cfg
);
367 pasemi_dma_start_chan(&ring
->chan
, PAS_DMA_TXCHAN_TCMDSTA_SZ
|
368 PAS_DMA_TXCHAN_TCMDSTA_DB
|
369 PAS_DMA_TXCHAN_TCMDSTA_DE
|
370 PAS_DMA_TXCHAN_TCMDSTA_DA
);
376 if (ring
->events
[0] >= 0)
377 pasemi_dma_free_flag(ring
->events
[0]);
378 if (ring
->events
[1] >= 0)
379 pasemi_dma_free_flag(ring
->events
[1]);
380 pasemi_dma_free_ring(&ring
->chan
);
382 pasemi_dma_free_chan(&ring
->chan
);
388 static void pasemi_mac_setup_csrings(struct pasemi_mac
*mac
)
391 mac
->cs
[0] = pasemi_mac_setup_csring(mac
);
392 if (mac
->type
== MAC_TYPE_XAUI
)
393 mac
->cs
[1] = pasemi_mac_setup_csring(mac
);
397 for (i
= 0; i
< MAX_CS
; i
++)
402 static void pasemi_mac_free_csring(struct pasemi_mac_csring
*csring
)
404 pasemi_dma_stop_chan(&csring
->chan
);
405 pasemi_dma_free_flag(csring
->events
[0]);
406 pasemi_dma_free_flag(csring
->events
[1]);
407 pasemi_dma_free_ring(&csring
->chan
);
408 pasemi_dma_free_chan(&csring
->chan
);
409 pasemi_dma_free_fun(csring
->fun
);
412 static int pasemi_mac_setup_rx_resources(const struct net_device
*dev
)
414 struct pasemi_mac_rxring
*ring
;
415 struct pasemi_mac
*mac
= netdev_priv(dev
);
419 ring
= pasemi_dma_alloc_chan(RXCHAN
, sizeof(struct pasemi_mac_rxring
),
420 offsetof(struct pasemi_mac_rxring
, chan
));
423 dev_err(&mac
->pdev
->dev
, "Can't allocate RX channel\n");
426 chno
= ring
->chan
.chno
;
428 spin_lock_init(&ring
->lock
);
430 ring
->size
= RX_RING_SIZE
;
431 ring
->ring_info
= kzalloc(sizeof(struct pasemi_mac_buffer
) *
432 RX_RING_SIZE
, GFP_KERNEL
);
434 if (!ring
->ring_info
)
437 /* Allocate descriptors */
438 if (pasemi_dma_alloc_ring(&ring
->chan
, RX_RING_SIZE
))
441 ring
->buffers
= dma_alloc_coherent(&mac
->dma_pdev
->dev
,
442 RX_RING_SIZE
* sizeof(u64
),
443 &ring
->buf_dma
, GFP_KERNEL
);
447 memset(ring
->buffers
, 0, RX_RING_SIZE
* sizeof(u64
));
449 write_dma_reg(PAS_DMA_RXCHAN_BASEL(chno
),
450 PAS_DMA_RXCHAN_BASEL_BRBL(ring
->chan
.ring_dma
));
452 write_dma_reg(PAS_DMA_RXCHAN_BASEU(chno
),
453 PAS_DMA_RXCHAN_BASEU_BRBH(ring
->chan
.ring_dma
>> 32) |
454 PAS_DMA_RXCHAN_BASEU_SIZ(RX_RING_SIZE
>> 3));
456 cfg
= PAS_DMA_RXCHAN_CFG_HBU(2);
458 if (translation_enabled())
459 cfg
|= PAS_DMA_RXCHAN_CFG_CTR
;
461 write_dma_reg(PAS_DMA_RXCHAN_CFG(chno
), cfg
);
463 write_dma_reg(PAS_DMA_RXINT_BASEL(mac
->dma_if
),
464 PAS_DMA_RXINT_BASEL_BRBL(ring
->buf_dma
));
466 write_dma_reg(PAS_DMA_RXINT_BASEU(mac
->dma_if
),
467 PAS_DMA_RXINT_BASEU_BRBH(ring
->buf_dma
>> 32) |
468 PAS_DMA_RXINT_BASEU_SIZ(RX_RING_SIZE
>> 3));
470 cfg
= PAS_DMA_RXINT_CFG_DHL(2) | PAS_DMA_RXINT_CFG_L2
|
471 PAS_DMA_RXINT_CFG_LW
| PAS_DMA_RXINT_CFG_RBP
|
472 PAS_DMA_RXINT_CFG_HEN
;
474 if (translation_enabled())
475 cfg
|= PAS_DMA_RXINT_CFG_ITRR
| PAS_DMA_RXINT_CFG_ITR
;
477 write_dma_reg(PAS_DMA_RXINT_CFG(mac
->dma_if
), cfg
);
479 ring
->next_to_fill
= 0;
480 ring
->next_to_clean
= 0;
487 kfree(ring
->ring_info
);
489 pasemi_dma_free_chan(&ring
->chan
);
494 static struct pasemi_mac_txring
*
495 pasemi_mac_setup_tx_resources(const struct net_device
*dev
)
497 struct pasemi_mac
*mac
= netdev_priv(dev
);
499 struct pasemi_mac_txring
*ring
;
503 ring
= pasemi_dma_alloc_chan(TXCHAN
, sizeof(struct pasemi_mac_txring
),
504 offsetof(struct pasemi_mac_txring
, chan
));
507 dev_err(&mac
->pdev
->dev
, "Can't allocate TX channel\n");
511 chno
= ring
->chan
.chno
;
513 spin_lock_init(&ring
->lock
);
515 ring
->size
= TX_RING_SIZE
;
516 ring
->ring_info
= kzalloc(sizeof(struct pasemi_mac_buffer
) *
517 TX_RING_SIZE
, GFP_KERNEL
);
518 if (!ring
->ring_info
)
521 /* Allocate descriptors */
522 if (pasemi_dma_alloc_ring(&ring
->chan
, TX_RING_SIZE
))
525 write_dma_reg(PAS_DMA_TXCHAN_BASEL(chno
),
526 PAS_DMA_TXCHAN_BASEL_BRBL(ring
->chan
.ring_dma
));
527 val
= PAS_DMA_TXCHAN_BASEU_BRBH(ring
->chan
.ring_dma
>> 32);
528 val
|= PAS_DMA_TXCHAN_BASEU_SIZ(TX_RING_SIZE
>> 3);
530 write_dma_reg(PAS_DMA_TXCHAN_BASEU(chno
), val
);
532 cfg
= PAS_DMA_TXCHAN_CFG_TY_IFACE
|
533 PAS_DMA_TXCHAN_CFG_TATTR(mac
->dma_if
) |
534 PAS_DMA_TXCHAN_CFG_UP
|
535 PAS_DMA_TXCHAN_CFG_WT(4);
537 if (translation_enabled())
538 cfg
|= PAS_DMA_TXCHAN_CFG_TRD
| PAS_DMA_TXCHAN_CFG_TRR
;
540 write_dma_reg(PAS_DMA_TXCHAN_CFG(chno
), cfg
);
542 ring
->next_to_fill
= 0;
543 ring
->next_to_clean
= 0;
549 kfree(ring
->ring_info
);
551 pasemi_dma_free_chan(&ring
->chan
);
556 static void pasemi_mac_free_tx_resources(struct pasemi_mac
*mac
)
558 struct pasemi_mac_txring
*txring
= tx_ring(mac
);
560 struct pasemi_mac_buffer
*info
;
561 dma_addr_t dmas
[MAX_SKB_FRAGS
+1];
565 start
= txring
->next_to_clean
;
566 limit
= txring
->next_to_fill
;
568 /* Compensate for when fill has wrapped and clean has not */
570 limit
+= TX_RING_SIZE
;
572 for (i
= start
; i
< limit
; i
+= freed
) {
573 info
= &txring
->ring_info
[(i
+1) & (TX_RING_SIZE
-1)];
574 if (info
->dma
&& info
->skb
) {
575 nfrags
= skb_shinfo(info
->skb
)->nr_frags
;
576 for (j
= 0; j
<= nfrags
; j
++)
577 dmas
[j
] = txring
->ring_info
[(i
+1+j
) &
578 (TX_RING_SIZE
-1)].dma
;
579 freed
= pasemi_mac_unmap_tx_skb(mac
, nfrags
,
585 kfree(txring
->ring_info
);
586 pasemi_dma_free_chan(&txring
->chan
);
590 static void pasemi_mac_free_rx_buffers(struct pasemi_mac
*mac
)
592 struct pasemi_mac_rxring
*rx
= rx_ring(mac
);
594 struct pasemi_mac_buffer
*info
;
596 for (i
= 0; i
< RX_RING_SIZE
; i
++) {
597 info
= &RX_DESC_INFO(rx
, i
);
598 if (info
->skb
&& info
->dma
) {
599 pci_unmap_single(mac
->dma_pdev
,
603 dev_kfree_skb_any(info
->skb
);
609 for (i
= 0; i
< RX_RING_SIZE
; i
++)
613 static void pasemi_mac_free_rx_resources(struct pasemi_mac
*mac
)
615 pasemi_mac_free_rx_buffers(mac
);
617 dma_free_coherent(&mac
->dma_pdev
->dev
, RX_RING_SIZE
* sizeof(u64
),
618 rx_ring(mac
)->buffers
, rx_ring(mac
)->buf_dma
);
620 kfree(rx_ring(mac
)->ring_info
);
621 pasemi_dma_free_chan(&rx_ring(mac
)->chan
);
625 static void pasemi_mac_replenish_rx_ring(const struct net_device
*dev
,
628 const struct pasemi_mac
*mac
= netdev_priv(dev
);
629 struct pasemi_mac_rxring
*rx
= rx_ring(mac
);
635 fill
= rx_ring(mac
)->next_to_fill
;
636 for (count
= 0; count
< limit
; count
++) {
637 struct pasemi_mac_buffer
*info
= &RX_DESC_INFO(rx
, fill
);
638 u64
*buff
= &RX_BUFF(rx
, fill
);
645 skb
= dev_alloc_skb(mac
->bufsz
);
646 skb_reserve(skb
, LOCAL_SKB_ALIGN
);
651 dma
= pci_map_single(mac
->dma_pdev
, skb
->data
,
652 mac
->bufsz
- LOCAL_SKB_ALIGN
,
655 if (unlikely(pci_dma_mapping_error(mac
->dma_pdev
, dma
))) {
656 dev_kfree_skb_irq(info
->skb
);
662 *buff
= XCT_RXB_LEN(mac
->bufsz
) | XCT_RXB_ADDR(dma
);
668 write_dma_reg(PAS_DMA_RXINT_INCR(mac
->dma_if
), count
);
670 rx_ring(mac
)->next_to_fill
= (rx_ring(mac
)->next_to_fill
+ count
) &
674 static void pasemi_mac_restart_rx_intr(const struct pasemi_mac
*mac
)
676 struct pasemi_mac_rxring
*rx
= rx_ring(mac
);
677 unsigned int reg
, pcnt
;
678 /* Re-enable packet count interrupts: finally
679 * ack the packet count interrupt we got in rx_intr.
682 pcnt
= *rx
->chan
.status
& PAS_STATUS_PCNT_M
;
684 reg
= PAS_IOB_DMA_RXCH_RESET_PCNT(pcnt
) | PAS_IOB_DMA_RXCH_RESET_PINTC
;
686 if (*rx
->chan
.status
& PAS_STATUS_TIMER
)
687 reg
|= PAS_IOB_DMA_RXCH_RESET_TINTC
;
689 write_iob_reg(PAS_IOB_DMA_RXCH_RESET(mac
->rx
->chan
.chno
), reg
);
692 static void pasemi_mac_restart_tx_intr(const struct pasemi_mac
*mac
)
694 unsigned int reg
, pcnt
;
696 /* Re-enable packet count interrupts */
697 pcnt
= *tx_ring(mac
)->chan
.status
& PAS_STATUS_PCNT_M
;
699 reg
= PAS_IOB_DMA_TXCH_RESET_PCNT(pcnt
) | PAS_IOB_DMA_TXCH_RESET_PINTC
;
701 write_iob_reg(PAS_IOB_DMA_TXCH_RESET(tx_ring(mac
)->chan
.chno
), reg
);
705 static inline void pasemi_mac_rx_error(const struct pasemi_mac
*mac
,
708 unsigned int rcmdsta
, ccmdsta
;
709 struct pasemi_dmachan
*chan
= &rx_ring(mac
)->chan
;
711 if (!netif_msg_rx_err(mac
))
714 rcmdsta
= read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac
->dma_if
));
715 ccmdsta
= read_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(chan
->chno
));
717 printk(KERN_ERR
"pasemi_mac: rx error. macrx %016llx, rx status %llx\n",
718 macrx
, *chan
->status
);
720 printk(KERN_ERR
"pasemi_mac: rcmdsta %08x ccmdsta %08x\n",
724 static inline void pasemi_mac_tx_error(const struct pasemi_mac
*mac
,
728 struct pasemi_dmachan
*chan
= &tx_ring(mac
)->chan
;
730 if (!netif_msg_tx_err(mac
))
733 cmdsta
= read_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(chan
->chno
));
735 printk(KERN_ERR
"pasemi_mac: tx error. mactx 0x%016llx, "\
736 "tx status 0x%016llx\n", mactx
, *chan
->status
);
738 printk(KERN_ERR
"pasemi_mac: tcmdsta 0x%08x\n", cmdsta
);
741 static int pasemi_mac_clean_rx(struct pasemi_mac_rxring
*rx
,
744 const struct pasemi_dmachan
*chan
= &rx
->chan
;
745 struct pasemi_mac
*mac
= rx
->mac
;
746 struct pci_dev
*pdev
= mac
->dma_pdev
;
748 int count
, buf_index
, tot_bytes
, packets
;
749 struct pasemi_mac_buffer
*info
;
758 spin_lock(&rx
->lock
);
760 n
= rx
->next_to_clean
;
762 prefetch(&RX_DESC(rx
, n
));
764 for (count
= 0; count
< limit
; count
++) {
765 macrx
= RX_DESC(rx
, n
);
766 prefetch(&RX_DESC(rx
, n
+4));
768 if ((macrx
& XCT_MACRX_E
) ||
769 (*chan
->status
& PAS_STATUS_ERROR
))
770 pasemi_mac_rx_error(mac
, macrx
);
772 if (!(macrx
& XCT_MACRX_O
))
777 BUG_ON(!(macrx
& XCT_MACRX_RR_8BRES
));
779 eval
= (RX_DESC(rx
, n
+1) & XCT_RXRES_8B_EVAL_M
) >>
783 dma
= (RX_DESC(rx
, n
+2) & XCT_PTR_ADDR_M
);
784 info
= &RX_DESC_INFO(rx
, buf_index
);
790 len
= (macrx
& XCT_MACRX_LLEN_M
) >> XCT_MACRX_LLEN_S
;
792 pci_unmap_single(pdev
, dma
, mac
->bufsz
- LOCAL_SKB_ALIGN
,
795 if (macrx
& XCT_MACRX_CRC
) {
796 /* CRC error flagged */
797 mac
->netdev
->stats
.rx_errors
++;
798 mac
->netdev
->stats
.rx_crc_errors
++;
799 /* No need to free skb, it'll be reused */
806 if (likely((macrx
& XCT_MACRX_HTY_M
) == XCT_MACRX_HTY_IPV4_OK
)) {
807 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
808 skb
->csum
= (macrx
& XCT_MACRX_CSUM_M
) >>
811 skb
->ip_summed
= CHECKSUM_NONE
;
816 /* Don't include CRC */
819 skb
->protocol
= eth_type_trans(skb
, mac
->netdev
);
820 lro_receive_skb(&mac
->lro_mgr
, skb
, (void *)macrx
);
824 RX_DESC(rx
, n
+1) = 0;
826 /* Need to zero it out since hardware doesn't, since the
827 * replenish loop uses it to tell when it's done.
829 RX_BUFF(rx
, buf_index
) = 0;
834 if (n
> RX_RING_SIZE
) {
835 write_iob_reg(PAS_IOB_COM_PKTHDRCNT
, 0);
836 n
&= (RX_RING_SIZE
-1);
839 rx_ring(mac
)->next_to_clean
= n
;
841 lro_flush_all(&mac
->lro_mgr
);
843 /* Increase is in number of 16-byte entries, and since each descriptor
844 * with an 8BRES takes up 3x8 bytes (padded to 4x8), increase with
847 write_dma_reg(PAS_DMA_RXCHAN_INCR(mac
->rx
->chan
.chno
), count
<< 1);
849 pasemi_mac_replenish_rx_ring(mac
->netdev
, count
);
851 mac
->netdev
->stats
.rx_bytes
+= tot_bytes
;
852 mac
->netdev
->stats
.rx_packets
+= packets
;
854 spin_unlock(&rx_ring(mac
)->lock
);
859 /* Can't make this too large or we blow the kernel stack limits */
860 #define TX_CLEAN_BATCHSIZE (128/MAX_SKB_FRAGS)
862 static int pasemi_mac_clean_tx(struct pasemi_mac_txring
*txring
)
864 struct pasemi_dmachan
*chan
= &txring
->chan
;
865 struct pasemi_mac
*mac
= txring
->mac
;
867 unsigned int start
, descr_count
, buf_count
, batch_limit
;
868 unsigned int ring_limit
;
869 unsigned int total_count
;
871 struct sk_buff
*skbs
[TX_CLEAN_BATCHSIZE
];
872 dma_addr_t dmas
[TX_CLEAN_BATCHSIZE
][MAX_SKB_FRAGS
+1];
873 int nf
[TX_CLEAN_BATCHSIZE
];
877 batch_limit
= TX_CLEAN_BATCHSIZE
;
879 spin_lock_irqsave(&txring
->lock
, flags
);
881 start
= txring
->next_to_clean
;
882 ring_limit
= txring
->next_to_fill
;
884 prefetch(&TX_DESC_INFO(txring
, start
+1).skb
);
886 /* Compensate for when fill has wrapped but clean has not */
887 if (start
> ring_limit
)
888 ring_limit
+= TX_RING_SIZE
;
894 descr_count
< batch_limit
&& i
< ring_limit
;
896 u64 mactx
= TX_DESC(txring
, i
);
899 if ((mactx
& XCT_MACTX_E
) ||
900 (*chan
->status
& PAS_STATUS_ERROR
))
901 pasemi_mac_tx_error(mac
, mactx
);
903 /* Skip over control descriptors */
904 if (!(mactx
& XCT_MACTX_LLEN_M
)) {
905 TX_DESC(txring
, i
) = 0;
906 TX_DESC(txring
, i
+1) = 0;
911 skb
= TX_DESC_INFO(txring
, i
+1).skb
;
912 nr_frags
= TX_DESC_INFO(txring
, i
).dma
;
914 if (unlikely(mactx
& XCT_MACTX_O
))
915 /* Not yet transmitted */
918 buf_count
= 2 + nr_frags
;
919 /* Since we always fill with an even number of entries, make
920 * sure we skip any unused one at the end as well.
925 for (j
= 0; j
<= nr_frags
; j
++)
926 dmas
[descr_count
][j
] = TX_DESC_INFO(txring
, i
+1+j
).dma
;
928 skbs
[descr_count
] = skb
;
929 nf
[descr_count
] = nr_frags
;
931 TX_DESC(txring
, i
) = 0;
932 TX_DESC(txring
, i
+1) = 0;
936 txring
->next_to_clean
= i
& (TX_RING_SIZE
-1);
938 spin_unlock_irqrestore(&txring
->lock
, flags
);
939 netif_wake_queue(mac
->netdev
);
941 for (i
= 0; i
< descr_count
; i
++)
942 pasemi_mac_unmap_tx_skb(mac
, nf
[i
], skbs
[i
], dmas
[i
]);
944 total_count
+= descr_count
;
946 /* If the batch was full, try to clean more */
947 if (descr_count
== batch_limit
)
954 static irqreturn_t
pasemi_mac_rx_intr(int irq
, void *data
)
956 const struct pasemi_mac_rxring
*rxring
= data
;
957 struct pasemi_mac
*mac
= rxring
->mac
;
958 const struct pasemi_dmachan
*chan
= &rxring
->chan
;
961 if (!(*chan
->status
& PAS_STATUS_CAUSE_M
))
964 /* Don't reset packet count so it won't fire again but clear
969 if (*chan
->status
& PAS_STATUS_SOFT
)
970 reg
|= PAS_IOB_DMA_RXCH_RESET_SINTC
;
971 if (*chan
->status
& PAS_STATUS_ERROR
)
972 reg
|= PAS_IOB_DMA_RXCH_RESET_DINTC
;
974 napi_schedule(&mac
->napi
);
976 write_iob_reg(PAS_IOB_DMA_RXCH_RESET(chan
->chno
), reg
);
981 #define TX_CLEAN_INTERVAL HZ
983 static void pasemi_mac_tx_timer(unsigned long data
)
985 struct pasemi_mac_txring
*txring
= (struct pasemi_mac_txring
*)data
;
986 struct pasemi_mac
*mac
= txring
->mac
;
988 pasemi_mac_clean_tx(txring
);
990 mod_timer(&txring
->clean_timer
, jiffies
+ TX_CLEAN_INTERVAL
);
992 pasemi_mac_restart_tx_intr(mac
);
995 static irqreturn_t
pasemi_mac_tx_intr(int irq
, void *data
)
997 struct pasemi_mac_txring
*txring
= data
;
998 const struct pasemi_dmachan
*chan
= &txring
->chan
;
999 struct pasemi_mac
*mac
= txring
->mac
;
1002 if (!(*chan
->status
& PAS_STATUS_CAUSE_M
))
1007 if (*chan
->status
& PAS_STATUS_SOFT
)
1008 reg
|= PAS_IOB_DMA_TXCH_RESET_SINTC
;
1009 if (*chan
->status
& PAS_STATUS_ERROR
)
1010 reg
|= PAS_IOB_DMA_TXCH_RESET_DINTC
;
1012 mod_timer(&txring
->clean_timer
, jiffies
+ (TX_CLEAN_INTERVAL
)*2);
1014 napi_schedule(&mac
->napi
);
1017 write_iob_reg(PAS_IOB_DMA_TXCH_RESET(chan
->chno
), reg
);
1022 static void pasemi_adjust_link(struct net_device
*dev
)
1024 struct pasemi_mac
*mac
= netdev_priv(dev
);
1027 unsigned int new_flags
;
1029 if (!mac
->phydev
->link
) {
1030 /* If no link, MAC speed settings don't matter. Just report
1031 * link down and return.
1033 if (mac
->link
&& netif_msg_link(mac
))
1034 printk(KERN_INFO
"%s: Link is down.\n", dev
->name
);
1036 netif_carrier_off(dev
);
1037 pasemi_mac_intf_disable(mac
);
1042 pasemi_mac_intf_enable(mac
);
1043 netif_carrier_on(dev
);
1046 flags
= read_mac_reg(mac
, PAS_MAC_CFG_PCFG
);
1047 new_flags
= flags
& ~(PAS_MAC_CFG_PCFG_HD
| PAS_MAC_CFG_PCFG_SPD_M
|
1048 PAS_MAC_CFG_PCFG_TSR_M
);
1050 if (!mac
->phydev
->duplex
)
1051 new_flags
|= PAS_MAC_CFG_PCFG_HD
;
1053 switch (mac
->phydev
->speed
) {
1055 new_flags
|= PAS_MAC_CFG_PCFG_SPD_1G
|
1056 PAS_MAC_CFG_PCFG_TSR_1G
;
1059 new_flags
|= PAS_MAC_CFG_PCFG_SPD_100M
|
1060 PAS_MAC_CFG_PCFG_TSR_100M
;
1063 new_flags
|= PAS_MAC_CFG_PCFG_SPD_10M
|
1064 PAS_MAC_CFG_PCFG_TSR_10M
;
1067 printk("Unsupported speed %d\n", mac
->phydev
->speed
);
1070 /* Print on link or speed/duplex change */
1071 msg
= mac
->link
!= mac
->phydev
->link
|| flags
!= new_flags
;
1073 mac
->duplex
= mac
->phydev
->duplex
;
1074 mac
->speed
= mac
->phydev
->speed
;
1075 mac
->link
= mac
->phydev
->link
;
1077 if (new_flags
!= flags
)
1078 write_mac_reg(mac
, PAS_MAC_CFG_PCFG
, new_flags
);
1080 if (msg
&& netif_msg_link(mac
))
1081 printk(KERN_INFO
"%s: Link is up at %d Mbps, %s duplex.\n",
1082 dev
->name
, mac
->speed
, mac
->duplex
? "full" : "half");
1085 static int pasemi_mac_phy_init(struct net_device
*dev
)
1087 struct pasemi_mac
*mac
= netdev_priv(dev
);
1088 struct device_node
*dn
, *phy_dn
;
1089 struct phy_device
*phydev
;
1091 dn
= pci_device_to_OF_node(mac
->pdev
);
1092 phy_dn
= of_parse_phandle(dn
, "phy-handle", 0);
1093 of_node_put(phy_dn
);
1099 phydev
= of_phy_connect(dev
, phy_dn
, &pasemi_adjust_link
, 0,
1100 PHY_INTERFACE_MODE_SGMII
);
1102 if (IS_ERR(phydev
)) {
1103 printk(KERN_ERR
"%s: Could not attach to phy\n", dev
->name
);
1104 return PTR_ERR(phydev
);
1107 mac
->phydev
= phydev
;
1113 static int pasemi_mac_open(struct net_device
*dev
)
1115 struct pasemi_mac
*mac
= netdev_priv(dev
);
1119 flags
= PAS_MAC_CFG_TXP_FCE
| PAS_MAC_CFG_TXP_FPC(3) |
1120 PAS_MAC_CFG_TXP_SL(3) | PAS_MAC_CFG_TXP_COB(0xf) |
1121 PAS_MAC_CFG_TXP_TIFT(8) | PAS_MAC_CFG_TXP_TIFG(12);
1123 write_mac_reg(mac
, PAS_MAC_CFG_TXP
, flags
);
1125 ret
= pasemi_mac_setup_rx_resources(dev
);
1127 goto out_rx_resources
;
1129 mac
->tx
= pasemi_mac_setup_tx_resources(dev
);
1134 /* We might already have allocated rings in case mtu was changed
1135 * before interface was brought up.
1137 if (dev
->mtu
> 1500 && !mac
->num_cs
) {
1138 pasemi_mac_setup_csrings(mac
);
1143 /* Zero out rmon counters */
1144 for (i
= 0; i
< 32; i
++)
1145 write_mac_reg(mac
, PAS_MAC_RMON(i
), 0);
1147 /* 0x3ff with 33MHz clock is about 31us */
1148 write_iob_reg(PAS_IOB_DMA_COM_TIMEOUTCFG
,
1149 PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(0x3ff));
1151 write_iob_reg(PAS_IOB_DMA_RXCH_CFG(mac
->rx
->chan
.chno
),
1152 PAS_IOB_DMA_RXCH_CFG_CNTTH(256));
1154 write_iob_reg(PAS_IOB_DMA_TXCH_CFG(mac
->tx
->chan
.chno
),
1155 PAS_IOB_DMA_TXCH_CFG_CNTTH(32));
1157 write_mac_reg(mac
, PAS_MAC_IPC_CHNL
,
1158 PAS_MAC_IPC_CHNL_DCHNO(mac
->rx
->chan
.chno
) |
1159 PAS_MAC_IPC_CHNL_BCH(mac
->rx
->chan
.chno
));
1162 write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac
->dma_if
),
1163 PAS_DMA_RXINT_RCMDSTA_EN
|
1164 PAS_DMA_RXINT_RCMDSTA_DROPS_M
|
1165 PAS_DMA_RXINT_RCMDSTA_BP
|
1166 PAS_DMA_RXINT_RCMDSTA_OO
|
1167 PAS_DMA_RXINT_RCMDSTA_BT
);
1169 /* enable rx channel */
1170 pasemi_dma_start_chan(&rx_ring(mac
)->chan
, PAS_DMA_RXCHAN_CCMDSTA_DU
|
1171 PAS_DMA_RXCHAN_CCMDSTA_OD
|
1172 PAS_DMA_RXCHAN_CCMDSTA_FD
|
1173 PAS_DMA_RXCHAN_CCMDSTA_DT
);
1175 /* enable tx channel */
1176 pasemi_dma_start_chan(&tx_ring(mac
)->chan
, PAS_DMA_TXCHAN_TCMDSTA_SZ
|
1177 PAS_DMA_TXCHAN_TCMDSTA_DB
|
1178 PAS_DMA_TXCHAN_TCMDSTA_DE
|
1179 PAS_DMA_TXCHAN_TCMDSTA_DA
);
1181 pasemi_mac_replenish_rx_ring(dev
, RX_RING_SIZE
);
1183 write_dma_reg(PAS_DMA_RXCHAN_INCR(rx_ring(mac
)->chan
.chno
),
1186 /* Clear out any residual packet count state from firmware */
1187 pasemi_mac_restart_rx_intr(mac
);
1188 pasemi_mac_restart_tx_intr(mac
);
1190 flags
= PAS_MAC_CFG_PCFG_S1
| PAS_MAC_CFG_PCFG_PR
| PAS_MAC_CFG_PCFG_CE
;
1192 if (mac
->type
== MAC_TYPE_GMAC
)
1193 flags
|= PAS_MAC_CFG_PCFG_TSR_1G
| PAS_MAC_CFG_PCFG_SPD_1G
;
1195 flags
|= PAS_MAC_CFG_PCFG_TSR_10G
| PAS_MAC_CFG_PCFG_SPD_10G
;
1197 /* Enable interface in MAC */
1198 write_mac_reg(mac
, PAS_MAC_CFG_PCFG
, flags
);
1200 ret
= pasemi_mac_phy_init(dev
);
1202 /* Since we won't get link notification, just enable RX */
1203 pasemi_mac_intf_enable(mac
);
1204 if (mac
->type
== MAC_TYPE_GMAC
) {
1205 /* Warn for missing PHY on SGMII (1Gig) ports */
1206 dev_warn(&mac
->pdev
->dev
,
1207 "PHY init failed: %d.\n", ret
);
1208 dev_warn(&mac
->pdev
->dev
,
1209 "Defaulting to 1Gbit full duplex\n");
1213 netif_start_queue(dev
);
1214 napi_enable(&mac
->napi
);
1216 snprintf(mac
->tx_irq_name
, sizeof(mac
->tx_irq_name
), "%s tx",
1219 ret
= request_irq(mac
->tx
->chan
.irq
, pasemi_mac_tx_intr
, IRQF_DISABLED
,
1220 mac
->tx_irq_name
, mac
->tx
);
1222 dev_err(&mac
->pdev
->dev
, "request_irq of irq %d failed: %d\n",
1223 mac
->tx
->chan
.irq
, ret
);
1227 snprintf(mac
->rx_irq_name
, sizeof(mac
->rx_irq_name
), "%s rx",
1230 ret
= request_irq(mac
->rx
->chan
.irq
, pasemi_mac_rx_intr
, IRQF_DISABLED
,
1231 mac
->rx_irq_name
, mac
->rx
);
1233 dev_err(&mac
->pdev
->dev
, "request_irq of irq %d failed: %d\n",
1234 mac
->rx
->chan
.irq
, ret
);
1239 phy_start(mac
->phydev
);
1241 init_timer(&mac
->tx
->clean_timer
);
1242 mac
->tx
->clean_timer
.function
= pasemi_mac_tx_timer
;
1243 mac
->tx
->clean_timer
.data
= (unsigned long)mac
->tx
;
1244 mac
->tx
->clean_timer
.expires
= jiffies
+HZ
;
1245 add_timer(&mac
->tx
->clean_timer
);
1250 free_irq(mac
->tx
->chan
.irq
, mac
->tx
);
1252 napi_disable(&mac
->napi
);
1253 netif_stop_queue(dev
);
1256 pasemi_mac_free_tx_resources(mac
);
1257 pasemi_mac_free_rx_resources(mac
);
1263 #define MAX_RETRIES 5000
1265 static void pasemi_mac_pause_txchan(struct pasemi_mac
*mac
)
1267 unsigned int sta
, retries
;
1268 int txch
= tx_ring(mac
)->chan
.chno
;
1270 write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch
),
1271 PAS_DMA_TXCHAN_TCMDSTA_ST
);
1273 for (retries
= 0; retries
< MAX_RETRIES
; retries
++) {
1274 sta
= read_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch
));
1275 if (!(sta
& PAS_DMA_TXCHAN_TCMDSTA_ACT
))
1280 if (sta
& PAS_DMA_TXCHAN_TCMDSTA_ACT
)
1281 dev_err(&mac
->dma_pdev
->dev
,
1282 "Failed to stop tx channel, tcmdsta %08x\n", sta
);
1284 write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch
), 0);
1287 static void pasemi_mac_pause_rxchan(struct pasemi_mac
*mac
)
1289 unsigned int sta
, retries
;
1290 int rxch
= rx_ring(mac
)->chan
.chno
;
1292 write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch
),
1293 PAS_DMA_RXCHAN_CCMDSTA_ST
);
1294 for (retries
= 0; retries
< MAX_RETRIES
; retries
++) {
1295 sta
= read_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch
));
1296 if (!(sta
& PAS_DMA_RXCHAN_CCMDSTA_ACT
))
1301 if (sta
& PAS_DMA_RXCHAN_CCMDSTA_ACT
)
1302 dev_err(&mac
->dma_pdev
->dev
,
1303 "Failed to stop rx channel, ccmdsta 08%x\n", sta
);
1304 write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch
), 0);
1307 static void pasemi_mac_pause_rxint(struct pasemi_mac
*mac
)
1309 unsigned int sta
, retries
;
1311 write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac
->dma_if
),
1312 PAS_DMA_RXINT_RCMDSTA_ST
);
1313 for (retries
= 0; retries
< MAX_RETRIES
; retries
++) {
1314 sta
= read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac
->dma_if
));
1315 if (!(sta
& PAS_DMA_RXINT_RCMDSTA_ACT
))
1320 if (sta
& PAS_DMA_RXINT_RCMDSTA_ACT
)
1321 dev_err(&mac
->dma_pdev
->dev
,
1322 "Failed to stop rx interface, rcmdsta %08x\n", sta
);
1323 write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac
->dma_if
), 0);
1326 static int pasemi_mac_close(struct net_device
*dev
)
1328 struct pasemi_mac
*mac
= netdev_priv(dev
);
1332 rxch
= rx_ring(mac
)->chan
.chno
;
1333 txch
= tx_ring(mac
)->chan
.chno
;
1336 phy_stop(mac
->phydev
);
1337 phy_disconnect(mac
->phydev
);
1340 del_timer_sync(&mac
->tx
->clean_timer
);
1342 netif_stop_queue(dev
);
1343 napi_disable(&mac
->napi
);
1345 sta
= read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac
->dma_if
));
1346 if (sta
& (PAS_DMA_RXINT_RCMDSTA_BP
|
1347 PAS_DMA_RXINT_RCMDSTA_OO
|
1348 PAS_DMA_RXINT_RCMDSTA_BT
))
1349 printk(KERN_DEBUG
"pasemi_mac: rcmdsta error: 0x%08x\n", sta
);
1351 sta
= read_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch
));
1352 if (sta
& (PAS_DMA_RXCHAN_CCMDSTA_DU
|
1353 PAS_DMA_RXCHAN_CCMDSTA_OD
|
1354 PAS_DMA_RXCHAN_CCMDSTA_FD
|
1355 PAS_DMA_RXCHAN_CCMDSTA_DT
))
1356 printk(KERN_DEBUG
"pasemi_mac: ccmdsta error: 0x%08x\n", sta
);
1358 sta
= read_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch
));
1359 if (sta
& (PAS_DMA_TXCHAN_TCMDSTA_SZ
| PAS_DMA_TXCHAN_TCMDSTA_DB
|
1360 PAS_DMA_TXCHAN_TCMDSTA_DE
| PAS_DMA_TXCHAN_TCMDSTA_DA
))
1361 printk(KERN_DEBUG
"pasemi_mac: tcmdsta error: 0x%08x\n", sta
);
1363 /* Clean out any pending buffers */
1364 pasemi_mac_clean_tx(tx_ring(mac
));
1365 pasemi_mac_clean_rx(rx_ring(mac
), RX_RING_SIZE
);
1367 pasemi_mac_pause_txchan(mac
);
1368 pasemi_mac_pause_rxint(mac
);
1369 pasemi_mac_pause_rxchan(mac
);
1370 pasemi_mac_intf_disable(mac
);
1372 free_irq(mac
->tx
->chan
.irq
, mac
->tx
);
1373 free_irq(mac
->rx
->chan
.irq
, mac
->rx
);
1375 for (i
= 0; i
< mac
->num_cs
; i
++) {
1376 pasemi_mac_free_csring(mac
->cs
[i
]);
1382 /* Free resources */
1383 pasemi_mac_free_rx_resources(mac
);
1384 pasemi_mac_free_tx_resources(mac
);
1389 static void pasemi_mac_queue_csdesc(const struct sk_buff
*skb
,
1390 const dma_addr_t
*map
,
1391 const unsigned int *map_size
,
1392 struct pasemi_mac_txring
*txring
,
1393 struct pasemi_mac_csring
*csring
)
1397 const int nh_off
= skb_network_offset(skb
);
1398 const int nh_len
= skb_network_header_len(skb
);
1399 const int nfrags
= skb_shinfo(skb
)->nr_frags
;
1400 int cs_size
, i
, fill
, hdr
, cpyhdr
, evt
;
1403 fund
= XCT_FUN_ST
| XCT_FUN_RR_8BRES
|
1404 XCT_FUN_O
| XCT_FUN_FUN(csring
->fun
) |
1405 XCT_FUN_CRM_SIG
| XCT_FUN_LLEN(skb
->len
- nh_off
) |
1406 XCT_FUN_SHL(nh_len
>> 2) | XCT_FUN_SE
;
1408 switch (ip_hdr(skb
)->protocol
) {
1410 fund
|= XCT_FUN_SIG_TCP4
;
1411 /* TCP checksum is 16 bytes into the header */
1412 cs_dest
= map
[0] + skb_transport_offset(skb
) + 16;
1415 fund
|= XCT_FUN_SIG_UDP4
;
1416 /* UDP checksum is 6 bytes into the header */
1417 cs_dest
= map
[0] + skb_transport_offset(skb
) + 6;
1423 /* Do the checksum offloaded */
1424 fill
= csring
->next_to_fill
;
1427 CS_DESC(csring
, fill
++) = fund
;
1428 /* Room for 8BRES. Checksum result is really 2 bytes into it */
1429 csdma
= csring
->chan
.ring_dma
+ (fill
& (CS_RING_SIZE
-1)) * 8 + 2;
1430 CS_DESC(csring
, fill
++) = 0;
1432 CS_DESC(csring
, fill
) = XCT_PTR_LEN(map_size
[0]-nh_off
) | XCT_PTR_ADDR(map
[0]+nh_off
);
1433 for (i
= 1; i
<= nfrags
; i
++)
1434 CS_DESC(csring
, fill
+i
) = XCT_PTR_LEN(map_size
[i
]) | XCT_PTR_ADDR(map
[i
]);
1440 /* Copy the result into the TCP packet */
1442 CS_DESC(csring
, fill
++) = XCT_FUN_O
| XCT_FUN_FUN(csring
->fun
) |
1443 XCT_FUN_LLEN(2) | XCT_FUN_SE
;
1444 CS_DESC(csring
, fill
++) = XCT_PTR_LEN(2) | XCT_PTR_ADDR(cs_dest
) | XCT_PTR_T
;
1445 CS_DESC(csring
, fill
++) = XCT_PTR_LEN(2) | XCT_PTR_ADDR(csdma
);
1448 evt
= !csring
->last_event
;
1449 csring
->last_event
= evt
;
1451 /* Event handshaking with MAC TX */
1452 CS_DESC(csring
, fill
++) = CTRL_CMD_T
| CTRL_CMD_META_EVT
| CTRL_CMD_O
|
1453 CTRL_CMD_ETYPE_SET
| CTRL_CMD_REG(csring
->events
[evt
]);
1454 CS_DESC(csring
, fill
++) = 0;
1455 CS_DESC(csring
, fill
++) = CTRL_CMD_T
| CTRL_CMD_META_EVT
| CTRL_CMD_O
|
1456 CTRL_CMD_ETYPE_WCLR
| CTRL_CMD_REG(csring
->events
[!evt
]);
1457 CS_DESC(csring
, fill
++) = 0;
1458 csring
->next_to_fill
= fill
& (CS_RING_SIZE
-1);
1460 cs_size
= fill
- hdr
;
1461 write_dma_reg(PAS_DMA_TXCHAN_INCR(csring
->chan
.chno
), (cs_size
) >> 1);
1463 /* TX-side event handshaking */
1464 fill
= txring
->next_to_fill
;
1465 TX_DESC(txring
, fill
++) = CTRL_CMD_T
| CTRL_CMD_META_EVT
| CTRL_CMD_O
|
1466 CTRL_CMD_ETYPE_WSET
| CTRL_CMD_REG(csring
->events
[evt
]);
1467 TX_DESC(txring
, fill
++) = 0;
1468 TX_DESC(txring
, fill
++) = CTRL_CMD_T
| CTRL_CMD_META_EVT
| CTRL_CMD_O
|
1469 CTRL_CMD_ETYPE_CLR
| CTRL_CMD_REG(csring
->events
[!evt
]);
1470 TX_DESC(txring
, fill
++) = 0;
1471 txring
->next_to_fill
= fill
;
1473 write_dma_reg(PAS_DMA_TXCHAN_INCR(txring
->chan
.chno
), 2);
1476 static int pasemi_mac_start_tx(struct sk_buff
*skb
, struct net_device
*dev
)
1478 struct pasemi_mac
* const mac
= netdev_priv(dev
);
1479 struct pasemi_mac_txring
* const txring
= tx_ring(mac
);
1480 struct pasemi_mac_csring
*csring
;
1483 dma_addr_t map
[MAX_SKB_FRAGS
+1];
1484 unsigned int map_size
[MAX_SKB_FRAGS
+1];
1485 unsigned long flags
;
1488 const int nh_off
= skb_network_offset(skb
);
1489 const int nh_len
= skb_network_header_len(skb
);
1491 prefetch(&txring
->ring_info
);
1493 dflags
= XCT_MACTX_O
| XCT_MACTX_ST
| XCT_MACTX_CRC_PAD
;
1495 nfrags
= skb_shinfo(skb
)->nr_frags
;
1497 map
[0] = pci_map_single(mac
->dma_pdev
, skb
->data
, skb_headlen(skb
),
1499 map_size
[0] = skb_headlen(skb
);
1500 if (pci_dma_mapping_error(mac
->dma_pdev
, map
[0]))
1501 goto out_err_nolock
;
1503 for (i
= 0; i
< nfrags
; i
++) {
1504 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
1506 map
[i
+1] = pci_map_page(mac
->dma_pdev
, frag
->page
,
1507 frag
->page_offset
, frag
->size
,
1509 map_size
[i
+1] = frag
->size
;
1510 if (pci_dma_mapping_error(mac
->dma_pdev
, map
[i
+1])) {
1512 goto out_err_nolock
;
1516 if (skb
->ip_summed
== CHECKSUM_PARTIAL
&& skb
->len
<= 1540) {
1517 switch (ip_hdr(skb
)->protocol
) {
1519 dflags
|= XCT_MACTX_CSUM_TCP
;
1520 dflags
|= XCT_MACTX_IPH(nh_len
>> 2);
1521 dflags
|= XCT_MACTX_IPO(nh_off
);
1524 dflags
|= XCT_MACTX_CSUM_UDP
;
1525 dflags
|= XCT_MACTX_IPH(nh_len
>> 2);
1526 dflags
|= XCT_MACTX_IPO(nh_off
);
1533 mactx
= dflags
| XCT_MACTX_LLEN(skb
->len
);
1535 spin_lock_irqsave(&txring
->lock
, flags
);
1537 /* Avoid stepping on the same cache line that the DMA controller
1538 * is currently about to send, so leave at least 8 words available.
1539 * Total free space needed is mactx + fragments + 8
1541 if (RING_AVAIL(txring
) < nfrags
+ 14) {
1542 /* no room -- stop the queue and wait for tx intr */
1543 netif_stop_queue(dev
);
1547 /* Queue up checksum + event descriptors, if needed */
1548 if (mac
->num_cs
&& skb
->ip_summed
== CHECKSUM_PARTIAL
&& skb
->len
> 1540) {
1549 csring
= mac
->cs
[mac
->last_cs
];
1550 mac
->last_cs
= (mac
->last_cs
+ 1) % mac
->num_cs
;
1552 pasemi_mac_queue_csdesc(skb
, map
, map_size
, txring
, csring
);
1555 fill
= txring
->next_to_fill
;
1556 TX_DESC(txring
, fill
) = mactx
;
1557 TX_DESC_INFO(txring
, fill
).dma
= nfrags
;
1559 TX_DESC_INFO(txring
, fill
).skb
= skb
;
1560 for (i
= 0; i
<= nfrags
; i
++) {
1561 TX_DESC(txring
, fill
+i
) =
1562 XCT_PTR_LEN(map_size
[i
]) | XCT_PTR_ADDR(map
[i
]);
1563 TX_DESC_INFO(txring
, fill
+i
).dma
= map
[i
];
1566 /* We have to add an even number of 8-byte entries to the ring
1567 * even if the last one is unused. That means always an odd number
1568 * of pointers + one mactx descriptor.
1573 txring
->next_to_fill
= (fill
+ nfrags
+ 1) & (TX_RING_SIZE
-1);
1575 dev
->stats
.tx_packets
++;
1576 dev
->stats
.tx_bytes
+= skb
->len
;
1578 spin_unlock_irqrestore(&txring
->lock
, flags
);
1580 write_dma_reg(PAS_DMA_TXCHAN_INCR(txring
->chan
.chno
), (nfrags
+2) >> 1);
1582 return NETDEV_TX_OK
;
1585 spin_unlock_irqrestore(&txring
->lock
, flags
);
1588 pci_unmap_single(mac
->dma_pdev
, map
[nfrags
], map_size
[nfrags
],
1591 return NETDEV_TX_BUSY
;
1594 static void pasemi_mac_set_rx_mode(struct net_device
*dev
)
1596 const struct pasemi_mac
*mac
= netdev_priv(dev
);
1599 flags
= read_mac_reg(mac
, PAS_MAC_CFG_PCFG
);
1601 /* Set promiscuous */
1602 if (dev
->flags
& IFF_PROMISC
)
1603 flags
|= PAS_MAC_CFG_PCFG_PR
;
1605 flags
&= ~PAS_MAC_CFG_PCFG_PR
;
1607 write_mac_reg(mac
, PAS_MAC_CFG_PCFG
, flags
);
1611 static int pasemi_mac_poll(struct napi_struct
*napi
, int budget
)
1613 struct pasemi_mac
*mac
= container_of(napi
, struct pasemi_mac
, napi
);
1616 pasemi_mac_clean_tx(tx_ring(mac
));
1617 pkts
= pasemi_mac_clean_rx(rx_ring(mac
), budget
);
1618 if (pkts
< budget
) {
1619 /* all done, no more packets present */
1620 napi_complete(napi
);
1622 pasemi_mac_restart_rx_intr(mac
);
1623 pasemi_mac_restart_tx_intr(mac
);
1628 #ifdef CONFIG_NET_POLL_CONTROLLER
1630 * Polling 'interrupt' - used by things like netconsole to send skbs
1631 * without having to re-enable interrupts. It's not called while
1632 * the interrupt routine is executing.
1634 static void pasemi_mac_netpoll(struct net_device
*dev
)
1636 const struct pasemi_mac
*mac
= netdev_priv(dev
);
1638 disable_irq(mac
->tx
->chan
.irq
);
1639 pasemi_mac_tx_intr(mac
->tx
->chan
.irq
, mac
->tx
);
1640 enable_irq(mac
->tx
->chan
.irq
);
1642 disable_irq(mac
->rx
->chan
.irq
);
1643 pasemi_mac_rx_intr(mac
->rx
->chan
.irq
, mac
->rx
);
1644 enable_irq(mac
->rx
->chan
.irq
);
1648 static int pasemi_mac_change_mtu(struct net_device
*dev
, int new_mtu
)
1650 struct pasemi_mac
*mac
= netdev_priv(dev
);
1652 unsigned int rcmdsta
= 0;
1656 if (new_mtu
< PE_MIN_MTU
|| new_mtu
> PE_MAX_MTU
)
1659 running
= netif_running(dev
);
1662 /* Need to stop the interface, clean out all already
1663 * received buffers, free all unused buffers on the RX
1664 * interface ring, then finally re-fill the rx ring with
1665 * the new-size buffers and restart.
1668 napi_disable(&mac
->napi
);
1669 netif_tx_disable(dev
);
1670 pasemi_mac_intf_disable(mac
);
1672 rcmdsta
= read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac
->dma_if
));
1673 pasemi_mac_pause_rxint(mac
);
1674 pasemi_mac_clean_rx(rx_ring(mac
), RX_RING_SIZE
);
1675 pasemi_mac_free_rx_buffers(mac
);
1679 /* Setup checksum channels if large MTU and none already allocated */
1680 if (new_mtu
> 1500 && !mac
->num_cs
) {
1681 pasemi_mac_setup_csrings(mac
);
1688 /* Change maxf, i.e. what size frames are accepted.
1689 * Need room for ethernet header and CRC word
1691 reg
= read_mac_reg(mac
, PAS_MAC_CFG_MACCFG
);
1692 reg
&= ~PAS_MAC_CFG_MACCFG_MAXF_M
;
1693 reg
|= PAS_MAC_CFG_MACCFG_MAXF(new_mtu
+ ETH_HLEN
+ 4);
1694 write_mac_reg(mac
, PAS_MAC_CFG_MACCFG
, reg
);
1697 /* MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */
1698 mac
->bufsz
= new_mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ LOCAL_SKB_ALIGN
+ 128;
1702 write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac
->dma_if
),
1703 rcmdsta
| PAS_DMA_RXINT_RCMDSTA_EN
);
1705 rx_ring(mac
)->next_to_fill
= 0;
1706 pasemi_mac_replenish_rx_ring(dev
, RX_RING_SIZE
-1);
1708 napi_enable(&mac
->napi
);
1709 netif_start_queue(dev
);
1710 pasemi_mac_intf_enable(mac
);
1716 static const struct net_device_ops pasemi_netdev_ops
= {
1717 .ndo_open
= pasemi_mac_open
,
1718 .ndo_stop
= pasemi_mac_close
,
1719 .ndo_start_xmit
= pasemi_mac_start_tx
,
1720 .ndo_set_multicast_list
= pasemi_mac_set_rx_mode
,
1721 .ndo_set_mac_address
= pasemi_mac_set_mac_addr
,
1722 .ndo_change_mtu
= pasemi_mac_change_mtu
,
1723 .ndo_validate_addr
= eth_validate_addr
,
1724 #ifdef CONFIG_NET_POLL_CONTROLLER
1725 .ndo_poll_controller
= pasemi_mac_netpoll
,
1729 static int __devinit
1730 pasemi_mac_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
1732 struct net_device
*dev
;
1733 struct pasemi_mac
*mac
;
1736 err
= pci_enable_device(pdev
);
1740 dev
= alloc_etherdev(sizeof(struct pasemi_mac
));
1743 "pasemi_mac: Could not allocate ethernet device.\n");
1745 goto out_disable_device
;
1748 pci_set_drvdata(pdev
, dev
);
1749 SET_NETDEV_DEV(dev
, &pdev
->dev
);
1751 mac
= netdev_priv(dev
);
1756 netif_napi_add(dev
, &mac
->napi
, pasemi_mac_poll
, 64);
1758 dev
->features
= NETIF_F_IP_CSUM
| NETIF_F_LLTX
| NETIF_F_SG
|
1759 NETIF_F_HIGHDMA
| NETIF_F_GSO
;
1761 mac
->lro_mgr
.max_aggr
= LRO_MAX_AGGR
;
1762 mac
->lro_mgr
.max_desc
= MAX_LRO_DESCRIPTORS
;
1763 mac
->lro_mgr
.lro_arr
= mac
->lro_desc
;
1764 mac
->lro_mgr
.get_skb_header
= get_skb_hdr
;
1765 mac
->lro_mgr
.features
= LRO_F_NAPI
| LRO_F_EXTRACT_VLAN_ID
;
1766 mac
->lro_mgr
.dev
= mac
->netdev
;
1767 mac
->lro_mgr
.ip_summed
= CHECKSUM_UNNECESSARY
;
1768 mac
->lro_mgr
.ip_summed_aggr
= CHECKSUM_UNNECESSARY
;
1771 mac
->dma_pdev
= pci_get_device(PCI_VENDOR_ID_PASEMI
, 0xa007, NULL
);
1772 if (!mac
->dma_pdev
) {
1773 dev_err(&mac
->pdev
->dev
, "Can't find DMA Controller\n");
1778 mac
->iob_pdev
= pci_get_device(PCI_VENDOR_ID_PASEMI
, 0xa001, NULL
);
1779 if (!mac
->iob_pdev
) {
1780 dev_err(&mac
->pdev
->dev
, "Can't find I/O Bridge\n");
1785 /* get mac addr from device tree */
1786 if (pasemi_get_mac_addr(mac
) || !is_valid_ether_addr(mac
->mac_addr
)) {
1790 memcpy(dev
->dev_addr
, mac
->mac_addr
, sizeof(mac
->mac_addr
));
1792 ret
= mac_to_intf(mac
);
1794 dev_err(&mac
->pdev
->dev
, "Can't map DMA interface\n");
1800 switch (pdev
->device
) {
1802 mac
->type
= MAC_TYPE_GMAC
;
1805 mac
->type
= MAC_TYPE_XAUI
;
1812 dev
->netdev_ops
= &pasemi_netdev_ops
;
1813 dev
->mtu
= PE_DEF_MTU
;
1814 /* 1500 MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */
1815 mac
->bufsz
= dev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ LOCAL_SKB_ALIGN
+ 128;
1817 dev
->ethtool_ops
= &pasemi_mac_ethtool_ops
;
1822 mac
->msg_enable
= netif_msg_init(debug
, DEFAULT_MSG_ENABLE
);
1824 /* Enable most messages by default */
1825 mac
->msg_enable
= (NETIF_MSG_IFUP
<< 1 ) - 1;
1827 err
= register_netdev(dev
);
1830 dev_err(&mac
->pdev
->dev
, "register_netdev failed with error %d\n",
1833 } else if netif_msg_probe(mac
)
1834 printk(KERN_INFO
"%s: PA Semi %s: intf %d, hw addr %pM\n",
1835 dev
->name
, mac
->type
== MAC_TYPE_GMAC
? "GMAC" : "XAUI",
1836 mac
->dma_if
, dev
->dev_addr
);
1842 pci_dev_put(mac
->iob_pdev
);
1844 pci_dev_put(mac
->dma_pdev
);
1848 pci_disable_device(pdev
);
1853 static void __devexit
pasemi_mac_remove(struct pci_dev
*pdev
)
1855 struct net_device
*netdev
= pci_get_drvdata(pdev
);
1856 struct pasemi_mac
*mac
;
1861 mac
= netdev_priv(netdev
);
1863 unregister_netdev(netdev
);
1865 pci_disable_device(pdev
);
1866 pci_dev_put(mac
->dma_pdev
);
1867 pci_dev_put(mac
->iob_pdev
);
1869 pasemi_dma_free_chan(&mac
->tx
->chan
);
1870 pasemi_dma_free_chan(&mac
->rx
->chan
);
1872 pci_set_drvdata(pdev
, NULL
);
1873 free_netdev(netdev
);
1876 static DEFINE_PCI_DEVICE_TABLE(pasemi_mac_pci_tbl
) = {
1877 { PCI_DEVICE(PCI_VENDOR_ID_PASEMI
, 0xa005) },
1878 { PCI_DEVICE(PCI_VENDOR_ID_PASEMI
, 0xa006) },
1882 MODULE_DEVICE_TABLE(pci
, pasemi_mac_pci_tbl
);
1884 static struct pci_driver pasemi_mac_driver
= {
1885 .name
= "pasemi_mac",
1886 .id_table
= pasemi_mac_pci_tbl
,
1887 .probe
= pasemi_mac_probe
,
1888 .remove
= __devexit_p(pasemi_mac_remove
),
1891 static void __exit
pasemi_mac_cleanup_module(void)
1893 pci_unregister_driver(&pasemi_mac_driver
);
1896 int pasemi_mac_init_module(void)
1900 err
= pasemi_dma_init();
1904 return pci_register_driver(&pasemi_mac_driver
);
1907 module_init(pasemi_mac_init_module
);
1908 module_exit(pasemi_mac_cleanup_module
);