2 * Driver for Marvell Discovery (MV643XX) and Marvell Orion ethernet ports
3 * Copyright (C) 2002 Matthew Dharm <mdharm@momenco.com>
5 * Based on the 64360 driver from:
6 * Copyright (C) 2002 Rabeeh Khoury <rabeeh@galileo.co.il>
7 * Rabeeh Khoury <rabeeh@marvell.com>
9 * Copyright (C) 2003 PMC-Sierra, Inc.,
10 * written by Manish Lachwani
12 * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org>
14 * Copyright (C) 2004-2006 MontaVista Software, Inc.
15 * Dale Farnsworth <dale@farnsworth.org>
17 * Copyright (C) 2004 Steven J. Hill <sjhill1@rockwellcollins.com>
18 * <sjhill@realitydiluted.com>
20 * Copyright (C) 2007-2008 Marvell Semiconductor
21 * Lennert Buytenhek <buytenh@marvell.com>
23 * This program is free software; you can redistribute it and/or
24 * modify it under the terms of the GNU General Public License
25 * as published by the Free Software Foundation; either version 2
26 * of the License, or (at your option) any later version.
28 * This program is distributed in the hope that it will be useful,
29 * but WITHOUT ANY WARRANTY; without even the implied warranty of
30 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
31 * GNU General Public License for more details.
33 * You should have received a copy of the GNU General Public License
34 * along with this program; if not, write to the Free Software
35 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
38 #include <linux/init.h>
39 #include <linux/dma-mapping.h>
41 #include <linux/tcp.h>
42 #include <linux/udp.h>
43 #include <linux/etherdevice.h>
44 #include <linux/delay.h>
45 #include <linux/ethtool.h>
46 #include <linux/platform_device.h>
47 #include <linux/module.h>
48 #include <linux/kernel.h>
49 #include <linux/spinlock.h>
50 #include <linux/workqueue.h>
51 #include <linux/mii.h>
52 #include <linux/mv643xx_eth.h>
54 #include <asm/types.h>
55 #include <asm/system.h>
57 static char mv643xx_eth_driver_name
[] = "mv643xx_eth";
58 static char mv643xx_eth_driver_version
[] = "1.0";
60 #define MV643XX_ETH_CHECKSUM_OFFLOAD_TX
61 #define MV643XX_ETH_NAPI
62 #define MV643XX_ETH_TX_FAST_REFILL
63 #undef MV643XX_ETH_COAL
65 #define MV643XX_ETH_TX_COAL 100
66 #ifdef MV643XX_ETH_COAL
67 #define MV643XX_ETH_RX_COAL 100
70 #ifdef MV643XX_ETH_CHECKSUM_OFFLOAD_TX
71 #define MAX_DESCS_PER_SKB (MAX_SKB_FRAGS + 1)
73 #define MAX_DESCS_PER_SKB 1
76 #define ETH_VLAN_HLEN 4
78 #define ETH_HW_IP_ALIGN 2 /* hw aligns IP header */
79 #define ETH_WRAPPER_LEN (ETH_HW_IP_ALIGN + ETH_HLEN + \
80 ETH_VLAN_HLEN + ETH_FCS_LEN)
81 #define ETH_RX_SKB_SIZE (dev->mtu + ETH_WRAPPER_LEN + \
82 dma_get_cache_alignment())
85 * Registers shared between all ports.
87 #define PHY_ADDR 0x0000
88 #define SMI_REG 0x0004
89 #define WINDOW_BASE(w) (0x0200 + ((w) << 3))
90 #define WINDOW_SIZE(w) (0x0204 + ((w) << 3))
91 #define WINDOW_REMAP_HIGH(w) (0x0280 + ((w) << 2))
92 #define WINDOW_BAR_ENABLE 0x0290
93 #define WINDOW_PROTECT(w) (0x0294 + ((w) << 4))
98 #define PORT_CONFIG(p) (0x0400 + ((p) << 10))
99 #define UNICAST_PROMISCUOUS_MODE 0x00000001
100 #define PORT_CONFIG_EXT(p) (0x0404 + ((p) << 10))
101 #define MAC_ADDR_LOW(p) (0x0414 + ((p) << 10))
102 #define MAC_ADDR_HIGH(p) (0x0418 + ((p) << 10))
103 #define SDMA_CONFIG(p) (0x041c + ((p) << 10))
104 #define PORT_SERIAL_CONTROL(p) (0x043c + ((p) << 10))
105 #define PORT_STATUS(p) (0x0444 + ((p) << 10))
106 #define TX_FIFO_EMPTY 0x00000400
107 #define TXQ_COMMAND(p) (0x0448 + ((p) << 10))
108 #define TX_BW_MTU(p) (0x0458 + ((p) << 10))
109 #define INT_CAUSE(p) (0x0460 + ((p) << 10))
110 #define INT_RX 0x00000804
111 #define INT_EXT 0x00000002
112 #define INT_CAUSE_EXT(p) (0x0464 + ((p) << 10))
113 #define INT_EXT_LINK 0x00100000
114 #define INT_EXT_PHY 0x00010000
115 #define INT_EXT_TX_ERROR_0 0x00000100
116 #define INT_EXT_TX_0 0x00000001
117 #define INT_EXT_TX 0x00000101
118 #define INT_MASK(p) (0x0468 + ((p) << 10))
119 #define INT_MASK_EXT(p) (0x046c + ((p) << 10))
120 #define TX_FIFO_URGENT_THRESHOLD(p) (0x0474 + ((p) << 10))
121 #define RXQ_CURRENT_DESC_PTR(p) (0x060c + ((p) << 10))
122 #define RXQ_COMMAND(p) (0x0680 + ((p) << 10))
123 #define TXQ_CURRENT_DESC_PTR(p) (0x06c0 + ((p) << 10))
124 #define MIB_COUNTERS(p) (0x1000 + ((p) << 7))
125 #define SPECIAL_MCAST_TABLE(p) (0x1400 + ((p) << 10))
126 #define OTHER_MCAST_TABLE(p) (0x1500 + ((p) << 10))
127 #define UNICAST_TABLE(p) (0x1600 + ((p) << 10))
131 * SDMA configuration register.
133 #define RX_BURST_SIZE_4_64BIT (2 << 1)
134 #define BLM_RX_NO_SWAP (1 << 4)
135 #define BLM_TX_NO_SWAP (1 << 5)
136 #define TX_BURST_SIZE_4_64BIT (2 << 22)
138 #if defined(__BIG_ENDIAN)
139 #define PORT_SDMA_CONFIG_DEFAULT_VALUE \
140 RX_BURST_SIZE_4_64BIT | \
141 TX_BURST_SIZE_4_64BIT
142 #elif defined(__LITTLE_ENDIAN)
143 #define PORT_SDMA_CONFIG_DEFAULT_VALUE \
144 RX_BURST_SIZE_4_64BIT | \
147 TX_BURST_SIZE_4_64BIT
149 #error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
154 * Port serial control register.
156 #define SET_MII_SPEED_TO_100 (1 << 24)
157 #define SET_GMII_SPEED_TO_1000 (1 << 23)
158 #define SET_FULL_DUPLEX_MODE (1 << 21)
159 #define MAX_RX_PACKET_1522BYTE (1 << 17)
160 #define MAX_RX_PACKET_9700BYTE (5 << 17)
161 #define MAX_RX_PACKET_MASK (7 << 17)
162 #define DISABLE_AUTO_NEG_SPEED_GMII (1 << 13)
163 #define DO_NOT_FORCE_LINK_FAIL (1 << 10)
164 #define SERIAL_PORT_CONTROL_RESERVED (1 << 9)
165 #define DISABLE_AUTO_NEG_FOR_FLOW_CTRL (1 << 3)
166 #define DISABLE_AUTO_NEG_FOR_DUPLEX (1 << 2)
167 #define FORCE_LINK_PASS (1 << 1)
168 #define SERIAL_PORT_ENABLE (1 << 0)
170 #define DEFAULT_RX_QUEUE_SIZE 400
171 #define DEFAULT_TX_QUEUE_SIZE 800
174 #define SMI_BUSY 0x10000000 /* 0 - Write, 1 - Read */
175 #define SMI_READ_VALID 0x08000000 /* 0 - Write, 1 - Read */
176 #define SMI_OPCODE_WRITE 0 /* Completion of Read */
177 #define SMI_OPCODE_READ 0x04000000 /* Operation is in progress */
181 typedef enum _func_ret_status
{
182 ETH_OK
, /* Returned as expected. */
183 ETH_ERROR
, /* Fundamental error. */
184 ETH_RETRY
, /* Could not process request. Try later.*/
185 ETH_END_OF_JOB
, /* Ring has nothing to process. */
186 ETH_QUEUE_FULL
, /* Ring resource error. */
187 ETH_QUEUE_LAST_RESOURCE
/* Ring resources about to exhaust. */
193 #if defined(__BIG_ENDIAN)
195 u16 byte_cnt
; /* Descriptor buffer byte count */
196 u16 buf_size
; /* Buffer size */
197 u32 cmd_sts
; /* Descriptor command status */
198 u32 next_desc_ptr
; /* Next descriptor pointer */
199 u32 buf_ptr
; /* Descriptor buffer pointer */
203 u16 byte_cnt
; /* buffer byte count */
204 u16 l4i_chk
; /* CPU provided TCP checksum */
205 u32 cmd_sts
; /* Command/status field */
206 u32 next_desc_ptr
; /* Pointer to next descriptor */
207 u32 buf_ptr
; /* pointer to buffer for this descriptor*/
209 #elif defined(__LITTLE_ENDIAN)
211 u32 cmd_sts
; /* Descriptor command status */
212 u16 buf_size
; /* Buffer size */
213 u16 byte_cnt
; /* Descriptor buffer byte count */
214 u32 buf_ptr
; /* Descriptor buffer pointer */
215 u32 next_desc_ptr
; /* Next descriptor pointer */
219 u32 cmd_sts
; /* Command/status field */
220 u16 l4i_chk
; /* CPU provided TCP checksum */
221 u16 byte_cnt
; /* buffer byte count */
222 u32 buf_ptr
; /* pointer to buffer for this descriptor*/
223 u32 next_desc_ptr
; /* Pointer to next descriptor */
226 #error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
229 /* RX & TX descriptor command */
230 #define BUFFER_OWNED_BY_DMA 0x80000000
232 /* RX & TX descriptor status */
233 #define ERROR_SUMMARY 0x00000001
235 /* RX descriptor status */
236 #define LAYER_4_CHECKSUM_OK 0x40000000
237 #define RX_ENABLE_INTERRUPT 0x20000000
238 #define RX_FIRST_DESC 0x08000000
239 #define RX_LAST_DESC 0x04000000
241 /* TX descriptor command */
242 #define TX_ENABLE_INTERRUPT 0x00800000
243 #define GEN_CRC 0x00400000
244 #define TX_FIRST_DESC 0x00200000
245 #define TX_LAST_DESC 0x00100000
246 #define ZERO_PADDING 0x00080000
247 #define GEN_IP_V4_CHECKSUM 0x00040000
248 #define GEN_TCP_UDP_CHECKSUM 0x00020000
249 #define UDP_FRAME 0x00010000
251 #define TX_IHL_SHIFT 11
254 /* Unified struct for Rx and Tx operations. The user is not required to */
255 /* be familier with neither Tx nor Rx descriptors. */
257 unsigned short byte_cnt
; /* Descriptor buffer byte count */
258 unsigned short l4i_chk
; /* Tx CPU provided TCP Checksum */
259 unsigned int cmd_sts
; /* Descriptor command status */
260 dma_addr_t buf_ptr
; /* Descriptor buffer pointer */
261 struct sk_buff
*return_info
; /* User resource return information */
265 /* global *******************************************************************/
266 struct mv643xx_eth_shared_private
{
269 /* used to protect SMI_REG, which is shared across ports */
278 /* per-port *****************************************************************/
279 struct mib_counters
{
280 u64 good_octets_received
;
281 u32 bad_octets_received
;
282 u32 internal_mac_transmit_err
;
283 u32 good_frames_received
;
284 u32 bad_frames_received
;
285 u32 broadcast_frames_received
;
286 u32 multicast_frames_received
;
287 u32 frames_64_octets
;
288 u32 frames_65_to_127_octets
;
289 u32 frames_128_to_255_octets
;
290 u32 frames_256_to_511_octets
;
291 u32 frames_512_to_1023_octets
;
292 u32 frames_1024_to_max_octets
;
293 u64 good_octets_sent
;
294 u32 good_frames_sent
;
295 u32 excessive_collision
;
296 u32 multicast_frames_sent
;
297 u32 broadcast_frames_sent
;
298 u32 unrec_mac_control_received
;
300 u32 good_fc_received
;
302 u32 undersize_received
;
303 u32 fragments_received
;
304 u32 oversize_received
;
306 u32 mac_receive_error
;
312 struct mv643xx_eth_private
{
313 struct mv643xx_eth_shared_private
*shared
;
314 int port_num
; /* User Ethernet port number */
316 struct mv643xx_eth_shared_private
*shared_smi
;
318 u32 rx_sram_addr
; /* Base address of rx sram area */
319 u32 rx_sram_size
; /* Size of rx sram area */
320 u32 tx_sram_addr
; /* Base address of tx sram area */
321 u32 tx_sram_size
; /* Size of tx sram area */
323 /* Tx/Rx rings managment indexes fields. For driver use */
325 /* Next available and first returning Rx resource */
326 int rx_curr_desc
, rx_used_desc
;
328 /* Next available and first returning Tx resource */
329 int tx_curr_desc
, tx_used_desc
;
331 #ifdef MV643XX_ETH_TX_FAST_REFILL
332 u32 tx_clean_threshold
;
335 struct rx_desc
*rx_desc_area
;
336 dma_addr_t rx_desc_dma
;
337 int rx_desc_area_size
;
338 struct sk_buff
**rx_skb
;
340 struct tx_desc
*tx_desc_area
;
341 dma_addr_t tx_desc_dma
;
342 int tx_desc_area_size
;
343 struct sk_buff
**tx_skb
;
345 struct work_struct tx_timeout_task
;
347 struct net_device
*dev
;
348 struct napi_struct napi
;
349 struct net_device_stats stats
;
350 struct mib_counters mib_counters
;
352 /* Size of Tx Ring per queue */
354 /* Number of tx descriptors in use */
356 /* Size of Rx Ring per queue */
358 /* Number of rx descriptors in use */
362 * Used in case RX Ring is empty, which can be caused when
363 * system does not have resources (skb's)
365 struct timer_list timeout
;
369 struct mii_if_info mii
;
373 /* port register accessors **************************************************/
374 static inline u32
rdl(struct mv643xx_eth_private
*mp
, int offset
)
376 return readl(mp
->shared
->base
+ offset
);
379 static inline void wrl(struct mv643xx_eth_private
*mp
, int offset
, u32 data
)
381 writel(data
, mp
->shared
->base
+ offset
);
385 /* rxq/txq helper functions *************************************************/
386 static void mv643xx_eth_port_enable_rx(struct mv643xx_eth_private
*mp
,
389 wrl(mp
, RXQ_COMMAND(mp
->port_num
), queues
);
392 static unsigned int mv643xx_eth_port_disable_rx(struct mv643xx_eth_private
*mp
)
394 unsigned int port_num
= mp
->port_num
;
397 /* Stop Rx port activity. Check port Rx activity. */
398 queues
= rdl(mp
, RXQ_COMMAND(port_num
)) & 0xFF;
400 /* Issue stop command for active queues only */
401 wrl(mp
, RXQ_COMMAND(port_num
), (queues
<< 8));
403 /* Wait for all Rx activity to terminate. */
404 /* Check port cause register that all Rx queues are stopped */
405 while (rdl(mp
, RXQ_COMMAND(port_num
)) & 0xFF)
412 static void mv643xx_eth_port_enable_tx(struct mv643xx_eth_private
*mp
,
415 wrl(mp
, TXQ_COMMAND(mp
->port_num
), queues
);
418 static unsigned int mv643xx_eth_port_disable_tx(struct mv643xx_eth_private
*mp
)
420 unsigned int port_num
= mp
->port_num
;
423 /* Stop Tx port activity. Check port Tx activity. */
424 queues
= rdl(mp
, TXQ_COMMAND(port_num
)) & 0xFF;
426 /* Issue stop command for active queues only */
427 wrl(mp
, TXQ_COMMAND(port_num
), (queues
<< 8));
429 /* Wait for all Tx activity to terminate. */
430 /* Check port cause register that all Tx queues are stopped */
431 while (rdl(mp
, TXQ_COMMAND(port_num
)) & 0xFF)
434 /* Wait for Tx FIFO to empty */
435 while (rdl(mp
, PORT_STATUS(port_num
)) & TX_FIFO_EMPTY
)
443 /* rx ***********************************************************************/
444 static void mv643xx_eth_free_completed_tx_descs(struct net_device
*dev
);
446 static void mv643xx_eth_rx_refill_descs(struct net_device
*dev
)
448 struct mv643xx_eth_private
*mp
= netdev_priv(dev
);
451 spin_lock_irqsave(&mp
->lock
, flags
);
453 while (mp
->rx_desc_count
< mp
->rx_ring_size
) {
458 skb
= dev_alloc_skb(ETH_RX_SKB_SIZE
+ dma_get_cache_alignment());
462 unaligned
= (u32
)skb
->data
& (dma_get_cache_alignment() - 1);
464 skb_reserve(skb
, dma_get_cache_alignment() - unaligned
);
467 rx
= mp
->rx_used_desc
;
468 mp
->rx_used_desc
= (rx
+ 1) % mp
->rx_ring_size
;
470 mp
->rx_desc_area
[rx
].buf_ptr
= dma_map_single(NULL
,
474 mp
->rx_desc_area
[rx
].buf_size
= ETH_RX_SKB_SIZE
;
475 mp
->rx_skb
[rx
] = skb
;
477 mp
->rx_desc_area
[rx
].cmd_sts
= BUFFER_OWNED_BY_DMA
|
481 skb_reserve(skb
, ETH_HW_IP_ALIGN
);
484 if (mp
->rx_desc_count
== 0) {
485 mp
->timeout
.expires
= jiffies
+ (HZ
/ 10);
486 add_timer(&mp
->timeout
);
489 spin_unlock_irqrestore(&mp
->lock
, flags
);
492 static inline void mv643xx_eth_rx_refill_descs_timer_wrapper(unsigned long data
)
494 mv643xx_eth_rx_refill_descs((struct net_device
*)data
);
497 static int mv643xx_eth_receive_queue(struct net_device
*dev
, int budget
)
499 struct mv643xx_eth_private
*mp
= netdev_priv(dev
);
500 struct net_device_stats
*stats
= &dev
->stats
;
501 unsigned int received_packets
= 0;
503 while (budget
-- > 0) {
505 volatile struct rx_desc
*rx_desc
;
506 unsigned int cmd_sts
;
509 spin_lock_irqsave(&mp
->lock
, flags
);
511 rx_desc
= &mp
->rx_desc_area
[mp
->rx_curr_desc
];
513 cmd_sts
= rx_desc
->cmd_sts
;
514 if (cmd_sts
& BUFFER_OWNED_BY_DMA
) {
515 spin_unlock_irqrestore(&mp
->lock
, flags
);
520 skb
= mp
->rx_skb
[mp
->rx_curr_desc
];
521 mp
->rx_skb
[mp
->rx_curr_desc
] = NULL
;
523 mp
->rx_curr_desc
= (mp
->rx_curr_desc
+ 1) % mp
->rx_ring_size
;
525 spin_unlock_irqrestore(&mp
->lock
, flags
);
527 dma_unmap_single(NULL
, rx_desc
->buf_ptr
+ ETH_HW_IP_ALIGN
,
528 ETH_RX_SKB_SIZE
, DMA_FROM_DEVICE
);
534 * Note byte count includes 4 byte CRC count
537 stats
->rx_bytes
+= rx_desc
->byte_cnt
- ETH_HW_IP_ALIGN
;
540 * In case received a packet without first / last bits on OR
541 * the error summary bit is on, the packets needs to be dropeed.
543 if (((cmd_sts
& (RX_FIRST_DESC
| RX_LAST_DESC
)) !=
544 (RX_FIRST_DESC
| RX_LAST_DESC
))
545 || (cmd_sts
& ERROR_SUMMARY
)) {
547 if ((cmd_sts
& (RX_FIRST_DESC
| RX_LAST_DESC
)) !=
548 (RX_FIRST_DESC
| RX_LAST_DESC
)) {
551 "%s: Received packet spread "
552 "on multiple descriptors\n",
555 if (cmd_sts
& ERROR_SUMMARY
)
558 dev_kfree_skb_irq(skb
);
561 * The -4 is for the CRC in the trailer of the
564 skb_put(skb
, rx_desc
->byte_cnt
- ETH_HW_IP_ALIGN
- 4);
566 if (cmd_sts
& LAYER_4_CHECKSUM_OK
) {
567 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
569 (cmd_sts
& 0x0007fff8) >> 3);
571 skb
->protocol
= eth_type_trans(skb
, dev
);
572 #ifdef MV643XX_ETH_NAPI
573 netif_receive_skb(skb
);
578 dev
->last_rx
= jiffies
;
580 mv643xx_eth_rx_refill_descs(dev
); /* Fill RX ring with skb's */
582 return received_packets
;
585 #ifdef MV643XX_ETH_NAPI
586 static int mv643xx_eth_poll(struct napi_struct
*napi
, int budget
)
588 struct mv643xx_eth_private
*mp
= container_of(napi
, struct mv643xx_eth_private
, napi
);
589 struct net_device
*dev
= mp
->dev
;
590 unsigned int port_num
= mp
->port_num
;
593 #ifdef MV643XX_ETH_TX_FAST_REFILL
594 if (++mp
->tx_clean_threshold
> 5) {
595 mv643xx_eth_free_completed_tx_descs(dev
);
596 mp
->tx_clean_threshold
= 0;
601 if ((rdl(mp
, RXQ_CURRENT_DESC_PTR(port_num
)))
602 != (u32
) mp
->rx_used_desc
)
603 work_done
= mv643xx_eth_receive_queue(dev
, budget
);
605 if (work_done
< budget
) {
606 netif_rx_complete(dev
, napi
);
607 wrl(mp
, INT_CAUSE(port_num
), 0);
608 wrl(mp
, INT_CAUSE_EXT(port_num
), 0);
609 wrl(mp
, INT_MASK(port_num
), INT_RX
| INT_EXT
);
617 /* tx ***********************************************************************/
618 static inline unsigned int has_tiny_unaligned_frags(struct sk_buff
*skb
)
623 for (frag
= 0; frag
< skb_shinfo(skb
)->nr_frags
; frag
++) {
624 fragp
= &skb_shinfo(skb
)->frags
[frag
];
625 if (fragp
->size
<= 8 && fragp
->page_offset
& 0x7)
631 static int alloc_tx_desc_index(struct mv643xx_eth_private
*mp
)
635 BUG_ON(mp
->tx_desc_count
>= mp
->tx_ring_size
);
637 tx_desc_curr
= mp
->tx_curr_desc
;
638 mp
->tx_curr_desc
= (tx_desc_curr
+ 1) % mp
->tx_ring_size
;
640 BUG_ON(mp
->tx_curr_desc
== mp
->tx_used_desc
);
645 static void tx_fill_frag_descs(struct mv643xx_eth_private
*mp
,
650 struct tx_desc
*desc
;
652 for (frag
= 0; frag
< skb_shinfo(skb
)->nr_frags
; frag
++) {
653 skb_frag_t
*this_frag
= &skb_shinfo(skb
)->frags
[frag
];
655 tx_index
= alloc_tx_desc_index(mp
);
656 desc
= &mp
->tx_desc_area
[tx_index
];
658 desc
->cmd_sts
= BUFFER_OWNED_BY_DMA
;
659 /* Last Frag enables interrupt and frees the skb */
660 if (frag
== (skb_shinfo(skb
)->nr_frags
- 1)) {
661 desc
->cmd_sts
|= ZERO_PADDING
|
664 mp
->tx_skb
[tx_index
] = skb
;
666 mp
->tx_skb
[tx_index
] = NULL
;
668 desc
= &mp
->tx_desc_area
[tx_index
];
670 desc
->byte_cnt
= this_frag
->size
;
671 desc
->buf_ptr
= dma_map_page(NULL
, this_frag
->page
,
672 this_frag
->page_offset
,
678 static inline __be16
sum16_as_be(__sum16 sum
)
680 return (__force __be16
)sum
;
683 static void tx_submit_descs_for_skb(struct mv643xx_eth_private
*mp
,
687 struct tx_desc
*desc
;
690 int nr_frags
= skb_shinfo(skb
)->nr_frags
;
692 cmd_sts
= TX_FIRST_DESC
| GEN_CRC
| BUFFER_OWNED_BY_DMA
;
694 tx_index
= alloc_tx_desc_index(mp
);
695 desc
= &mp
->tx_desc_area
[tx_index
];
698 tx_fill_frag_descs(mp
, skb
);
700 length
= skb_headlen(skb
);
701 mp
->tx_skb
[tx_index
] = NULL
;
703 cmd_sts
|= ZERO_PADDING
| TX_LAST_DESC
| TX_ENABLE_INTERRUPT
;
705 mp
->tx_skb
[tx_index
] = skb
;
708 desc
->byte_cnt
= length
;
709 desc
->buf_ptr
= dma_map_single(NULL
, skb
->data
, length
, DMA_TO_DEVICE
);
711 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
712 BUG_ON(skb
->protocol
!= htons(ETH_P_IP
));
714 cmd_sts
|= GEN_TCP_UDP_CHECKSUM
|
716 ip_hdr(skb
)->ihl
<< TX_IHL_SHIFT
;
718 switch (ip_hdr(skb
)->protocol
) {
720 cmd_sts
|= UDP_FRAME
;
721 desc
->l4i_chk
= ntohs(sum16_as_be(udp_hdr(skb
)->check
));
724 desc
->l4i_chk
= ntohs(sum16_as_be(tcp_hdr(skb
)->check
));
730 /* Errata BTS #50, IHL must be 5 if no HW checksum */
731 cmd_sts
|= 5 << TX_IHL_SHIFT
;
735 /* ensure all other descriptors are written before first cmd_sts */
737 desc
->cmd_sts
= cmd_sts
;
739 /* ensure all descriptors are written before poking hardware */
741 mv643xx_eth_port_enable_tx(mp
, 1);
743 mp
->tx_desc_count
+= nr_frags
+ 1;
746 static int mv643xx_eth_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
748 struct mv643xx_eth_private
*mp
= netdev_priv(dev
);
749 struct net_device_stats
*stats
= &dev
->stats
;
752 BUG_ON(netif_queue_stopped(dev
));
754 if (has_tiny_unaligned_frags(skb
) && __skb_linearize(skb
)) {
756 printk(KERN_DEBUG
"%s: failed to linearize tiny "
757 "unaligned fragment\n", dev
->name
);
758 return NETDEV_TX_BUSY
;
761 spin_lock_irqsave(&mp
->lock
, flags
);
763 if (mp
->tx_ring_size
- mp
->tx_desc_count
< MAX_DESCS_PER_SKB
) {
764 printk(KERN_ERR
"%s: transmit with queue full\n", dev
->name
);
765 netif_stop_queue(dev
);
766 spin_unlock_irqrestore(&mp
->lock
, flags
);
767 return NETDEV_TX_BUSY
;
770 tx_submit_descs_for_skb(mp
, skb
);
771 stats
->tx_bytes
+= skb
->len
;
773 dev
->trans_start
= jiffies
;
775 if (mp
->tx_ring_size
- mp
->tx_desc_count
< MAX_DESCS_PER_SKB
)
776 netif_stop_queue(dev
);
778 spin_unlock_irqrestore(&mp
->lock
, flags
);
784 /* mii management interface *************************************************/
785 static int phy_addr_get(struct mv643xx_eth_private
*mp
);
787 static void read_smi_reg(struct mv643xx_eth_private
*mp
,
788 unsigned int phy_reg
, unsigned int *value
)
790 void __iomem
*smi_reg
= mp
->shared_smi
->base
+ SMI_REG
;
791 int phy_addr
= phy_addr_get(mp
);
795 /* the SMI register is a shared resource */
796 spin_lock_irqsave(&mp
->shared_smi
->phy_lock
, flags
);
798 /* wait for the SMI register to become available */
799 for (i
= 0; readl(smi_reg
) & SMI_BUSY
; i
++) {
801 printk("%s: PHY busy timeout\n", mp
->dev
->name
);
807 writel((phy_addr
<< 16) | (phy_reg
<< 21) | SMI_OPCODE_READ
, smi_reg
);
809 /* now wait for the data to be valid */
810 for (i
= 0; !(readl(smi_reg
) & SMI_READ_VALID
); i
++) {
812 printk("%s: PHY read timeout\n", mp
->dev
->name
);
818 *value
= readl(smi_reg
) & 0xffff;
820 spin_unlock_irqrestore(&mp
->shared_smi
->phy_lock
, flags
);
823 static void write_smi_reg(struct mv643xx_eth_private
*mp
,
824 unsigned int phy_reg
, unsigned int value
)
826 void __iomem
*smi_reg
= mp
->shared_smi
->base
+ SMI_REG
;
827 int phy_addr
= phy_addr_get(mp
);
831 /* the SMI register is a shared resource */
832 spin_lock_irqsave(&mp
->shared_smi
->phy_lock
, flags
);
834 /* wait for the SMI register to become available */
835 for (i
= 0; readl(smi_reg
) & SMI_BUSY
; i
++) {
837 printk("%s: PHY busy timeout\n", mp
->dev
->name
);
843 writel((phy_addr
<< 16) | (phy_reg
<< 21) |
844 SMI_OPCODE_WRITE
| (value
& 0xffff), smi_reg
);
846 spin_unlock_irqrestore(&mp
->shared_smi
->phy_lock
, flags
);
850 /* mib counters *************************************************************/
851 static void clear_mib_counters(struct mv643xx_eth_private
*mp
)
853 unsigned int port_num
= mp
->port_num
;
856 /* Perform dummy reads from MIB counters */
857 for (i
= 0; i
< 0x80; i
+= 4)
858 rdl(mp
, MIB_COUNTERS(port_num
) + i
);
861 static inline u32
read_mib(struct mv643xx_eth_private
*mp
, int offset
)
863 return rdl(mp
, MIB_COUNTERS(mp
->port_num
) + offset
);
866 static void update_mib_counters(struct mv643xx_eth_private
*mp
)
868 struct mib_counters
*p
= &mp
->mib_counters
;
870 p
->good_octets_received
+= read_mib(mp
, 0x00);
871 p
->good_octets_received
+= (u64
)read_mib(mp
, 0x04) << 32;
872 p
->bad_octets_received
+= read_mib(mp
, 0x08);
873 p
->internal_mac_transmit_err
+= read_mib(mp
, 0x0c);
874 p
->good_frames_received
+= read_mib(mp
, 0x10);
875 p
->bad_frames_received
+= read_mib(mp
, 0x14);
876 p
->broadcast_frames_received
+= read_mib(mp
, 0x18);
877 p
->multicast_frames_received
+= read_mib(mp
, 0x1c);
878 p
->frames_64_octets
+= read_mib(mp
, 0x20);
879 p
->frames_65_to_127_octets
+= read_mib(mp
, 0x24);
880 p
->frames_128_to_255_octets
+= read_mib(mp
, 0x28);
881 p
->frames_256_to_511_octets
+= read_mib(mp
, 0x2c);
882 p
->frames_512_to_1023_octets
+= read_mib(mp
, 0x30);
883 p
->frames_1024_to_max_octets
+= read_mib(mp
, 0x34);
884 p
->good_octets_sent
+= read_mib(mp
, 0x38);
885 p
->good_octets_sent
+= (u64
)read_mib(mp
, 0x3c) << 32;
886 p
->good_frames_sent
+= read_mib(mp
, 0x40);
887 p
->excessive_collision
+= read_mib(mp
, 0x44);
888 p
->multicast_frames_sent
+= read_mib(mp
, 0x48);
889 p
->broadcast_frames_sent
+= read_mib(mp
, 0x4c);
890 p
->unrec_mac_control_received
+= read_mib(mp
, 0x50);
891 p
->fc_sent
+= read_mib(mp
, 0x54);
892 p
->good_fc_received
+= read_mib(mp
, 0x58);
893 p
->bad_fc_received
+= read_mib(mp
, 0x5c);
894 p
->undersize_received
+= read_mib(mp
, 0x60);
895 p
->fragments_received
+= read_mib(mp
, 0x64);
896 p
->oversize_received
+= read_mib(mp
, 0x68);
897 p
->jabber_received
+= read_mib(mp
, 0x6c);
898 p
->mac_receive_error
+= read_mib(mp
, 0x70);
899 p
->bad_crc_event
+= read_mib(mp
, 0x74);
900 p
->collision
+= read_mib(mp
, 0x78);
901 p
->late_collision
+= read_mib(mp
, 0x7c);
905 /* ethtool ******************************************************************/
906 struct mv643xx_eth_stats
{
907 char stat_string
[ETH_GSTRING_LEN
];
912 #define MV643XX_ETH_STAT(m) FIELD_SIZEOF(struct mv643xx_eth_private, m), \
913 offsetof(struct mv643xx_eth_private, m)
915 static const struct mv643xx_eth_stats mv643xx_eth_gstrings_stats
[] = {
916 { "rx_packets", MV643XX_ETH_STAT(stats
.rx_packets
) },
917 { "tx_packets", MV643XX_ETH_STAT(stats
.tx_packets
) },
918 { "rx_bytes", MV643XX_ETH_STAT(stats
.rx_bytes
) },
919 { "tx_bytes", MV643XX_ETH_STAT(stats
.tx_bytes
) },
920 { "rx_errors", MV643XX_ETH_STAT(stats
.rx_errors
) },
921 { "tx_errors", MV643XX_ETH_STAT(stats
.tx_errors
) },
922 { "rx_dropped", MV643XX_ETH_STAT(stats
.rx_dropped
) },
923 { "tx_dropped", MV643XX_ETH_STAT(stats
.tx_dropped
) },
924 { "good_octets_received", MV643XX_ETH_STAT(mib_counters
.good_octets_received
) },
925 { "bad_octets_received", MV643XX_ETH_STAT(mib_counters
.bad_octets_received
) },
926 { "internal_mac_transmit_err", MV643XX_ETH_STAT(mib_counters
.internal_mac_transmit_err
) },
927 { "good_frames_received", MV643XX_ETH_STAT(mib_counters
.good_frames_received
) },
928 { "bad_frames_received", MV643XX_ETH_STAT(mib_counters
.bad_frames_received
) },
929 { "broadcast_frames_received", MV643XX_ETH_STAT(mib_counters
.broadcast_frames_received
) },
930 { "multicast_frames_received", MV643XX_ETH_STAT(mib_counters
.multicast_frames_received
) },
931 { "frames_64_octets", MV643XX_ETH_STAT(mib_counters
.frames_64_octets
) },
932 { "frames_65_to_127_octets", MV643XX_ETH_STAT(mib_counters
.frames_65_to_127_octets
) },
933 { "frames_128_to_255_octets", MV643XX_ETH_STAT(mib_counters
.frames_128_to_255_octets
) },
934 { "frames_256_to_511_octets", MV643XX_ETH_STAT(mib_counters
.frames_256_to_511_octets
) },
935 { "frames_512_to_1023_octets", MV643XX_ETH_STAT(mib_counters
.frames_512_to_1023_octets
) },
936 { "frames_1024_to_max_octets", MV643XX_ETH_STAT(mib_counters
.frames_1024_to_max_octets
) },
937 { "good_octets_sent", MV643XX_ETH_STAT(mib_counters
.good_octets_sent
) },
938 { "good_frames_sent", MV643XX_ETH_STAT(mib_counters
.good_frames_sent
) },
939 { "excessive_collision", MV643XX_ETH_STAT(mib_counters
.excessive_collision
) },
940 { "multicast_frames_sent", MV643XX_ETH_STAT(mib_counters
.multicast_frames_sent
) },
941 { "broadcast_frames_sent", MV643XX_ETH_STAT(mib_counters
.broadcast_frames_sent
) },
942 { "unrec_mac_control_received", MV643XX_ETH_STAT(mib_counters
.unrec_mac_control_received
) },
943 { "fc_sent", MV643XX_ETH_STAT(mib_counters
.fc_sent
) },
944 { "good_fc_received", MV643XX_ETH_STAT(mib_counters
.good_fc_received
) },
945 { "bad_fc_received", MV643XX_ETH_STAT(mib_counters
.bad_fc_received
) },
946 { "undersize_received", MV643XX_ETH_STAT(mib_counters
.undersize_received
) },
947 { "fragments_received", MV643XX_ETH_STAT(mib_counters
.fragments_received
) },
948 { "oversize_received", MV643XX_ETH_STAT(mib_counters
.oversize_received
) },
949 { "jabber_received", MV643XX_ETH_STAT(mib_counters
.jabber_received
) },
950 { "mac_receive_error", MV643XX_ETH_STAT(mib_counters
.mac_receive_error
) },
951 { "bad_crc_event", MV643XX_ETH_STAT(mib_counters
.bad_crc_event
) },
952 { "collision", MV643XX_ETH_STAT(mib_counters
.collision
) },
953 { "late_collision", MV643XX_ETH_STAT(mib_counters
.late_collision
) },
956 #define MV643XX_ETH_STATS_LEN ARRAY_SIZE(mv643xx_eth_gstrings_stats)
958 static int mv643xx_eth_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
960 struct mv643xx_eth_private
*mp
= netdev_priv(dev
);
963 spin_lock_irq(&mp
->lock
);
964 err
= mii_ethtool_gset(&mp
->mii
, cmd
);
965 spin_unlock_irq(&mp
->lock
);
967 /* The PHY may support 1000baseT_Half, but the mv643xx does not */
968 cmd
->supported
&= ~SUPPORTED_1000baseT_Half
;
969 cmd
->advertising
&= ~ADVERTISED_1000baseT_Half
;
974 static int mv643xx_eth_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
976 struct mv643xx_eth_private
*mp
= netdev_priv(dev
);
979 spin_lock_irq(&mp
->lock
);
980 err
= mii_ethtool_sset(&mp
->mii
, cmd
);
981 spin_unlock_irq(&mp
->lock
);
986 static void mv643xx_eth_get_drvinfo(struct net_device
*netdev
,
987 struct ethtool_drvinfo
*drvinfo
)
989 strncpy(drvinfo
->driver
, mv643xx_eth_driver_name
, 32);
990 strncpy(drvinfo
->version
, mv643xx_eth_driver_version
, 32);
991 strncpy(drvinfo
->fw_version
, "N/A", 32);
992 strncpy(drvinfo
->bus_info
, "mv643xx", 32);
993 drvinfo
->n_stats
= MV643XX_ETH_STATS_LEN
;
996 static int mv643xx_eth_nway_restart(struct net_device
*dev
)
998 struct mv643xx_eth_private
*mp
= netdev_priv(dev
);
1000 return mii_nway_restart(&mp
->mii
);
1003 static u32
mv643xx_eth_get_link(struct net_device
*dev
)
1005 struct mv643xx_eth_private
*mp
= netdev_priv(dev
);
1007 return mii_link_ok(&mp
->mii
);
1010 static void mv643xx_eth_get_strings(struct net_device
*netdev
, uint32_t stringset
,
1017 for (i
=0; i
< MV643XX_ETH_STATS_LEN
; i
++) {
1018 memcpy(data
+ i
* ETH_GSTRING_LEN
,
1019 mv643xx_eth_gstrings_stats
[i
].stat_string
,
1026 static void mv643xx_eth_get_ethtool_stats(struct net_device
*netdev
,
1027 struct ethtool_stats
*stats
, uint64_t *data
)
1029 struct mv643xx_eth_private
*mp
= netdev
->priv
;
1032 update_mib_counters(mp
);
1034 for (i
= 0; i
< MV643XX_ETH_STATS_LEN
; i
++) {
1035 char *p
= (char *)mp
+mv643xx_eth_gstrings_stats
[i
].stat_offset
;
1036 data
[i
] = (mv643xx_eth_gstrings_stats
[i
].sizeof_stat
==
1037 sizeof(uint64_t)) ? *(uint64_t *)p
: *(uint32_t *)p
;
1041 static int mv643xx_eth_get_sset_count(struct net_device
*netdev
, int sset
)
1045 return MV643XX_ETH_STATS_LEN
;
1051 static const struct ethtool_ops mv643xx_eth_ethtool_ops
= {
1052 .get_settings
= mv643xx_eth_get_settings
,
1053 .set_settings
= mv643xx_eth_set_settings
,
1054 .get_drvinfo
= mv643xx_eth_get_drvinfo
,
1055 .get_link
= mv643xx_eth_get_link
,
1056 .set_sg
= ethtool_op_set_sg
,
1057 .get_sset_count
= mv643xx_eth_get_sset_count
,
1058 .get_ethtool_stats
= mv643xx_eth_get_ethtool_stats
,
1059 .get_strings
= mv643xx_eth_get_strings
,
1060 .nway_reset
= mv643xx_eth_nway_restart
,
1064 /* address handling *********************************************************/
1065 static void uc_addr_get(struct mv643xx_eth_private
*mp
, unsigned char *addr
)
1067 unsigned int port_num
= mp
->port_num
;
1071 mac_h
= rdl(mp
, MAC_ADDR_HIGH(port_num
));
1072 mac_l
= rdl(mp
, MAC_ADDR_LOW(port_num
));
1074 addr
[0] = (mac_h
>> 24) & 0xff;
1075 addr
[1] = (mac_h
>> 16) & 0xff;
1076 addr
[2] = (mac_h
>> 8) & 0xff;
1077 addr
[3] = mac_h
& 0xff;
1078 addr
[4] = (mac_l
>> 8) & 0xff;
1079 addr
[5] = mac_l
& 0xff;
1082 static void init_mac_tables(struct mv643xx_eth_private
*mp
)
1084 unsigned int port_num
= mp
->port_num
;
1087 /* Clear DA filter unicast table (Ex_dFUT) */
1088 for (table_index
= 0; table_index
<= 0xC; table_index
+= 4)
1089 wrl(mp
, UNICAST_TABLE(port_num
) + table_index
, 0);
1091 for (table_index
= 0; table_index
<= 0xFC; table_index
+= 4) {
1092 /* Clear DA filter special multicast table (Ex_dFSMT) */
1093 wrl(mp
, SPECIAL_MCAST_TABLE(port_num
) + table_index
, 0);
1094 /* Clear DA filter other multicast table (Ex_dFOMT) */
1095 wrl(mp
, OTHER_MCAST_TABLE(port_num
) + table_index
, 0);
1099 static void set_filter_table_entry(struct mv643xx_eth_private
*mp
,
1100 int table
, unsigned char entry
)
1102 unsigned int table_reg
;
1103 unsigned int tbl_offset
;
1104 unsigned int reg_offset
;
1106 tbl_offset
= (entry
/ 4) * 4; /* Register offset of DA table entry */
1107 reg_offset
= entry
% 4; /* Entry offset within the register */
1109 /* Set "accepts frame bit" at specified table entry */
1110 table_reg
= rdl(mp
, table
+ tbl_offset
);
1111 table_reg
|= 0x01 << (8 * reg_offset
);
1112 wrl(mp
, table
+ tbl_offset
, table_reg
);
1115 static void uc_addr_set(struct mv643xx_eth_private
*mp
, unsigned char *addr
)
1117 unsigned int port_num
= mp
->port_num
;
1122 mac_l
= (addr
[4] << 8) | (addr
[5]);
1123 mac_h
= (addr
[0] << 24) | (addr
[1] << 16) | (addr
[2] << 8) |
1126 wrl(mp
, MAC_ADDR_LOW(port_num
), mac_l
);
1127 wrl(mp
, MAC_ADDR_HIGH(port_num
), mac_h
);
1129 /* Accept frames with this address */
1130 table
= UNICAST_TABLE(port_num
);
1131 set_filter_table_entry(mp
, table
, addr
[5] & 0x0f);
1134 static void mv643xx_eth_update_mac_address(struct net_device
*dev
)
1136 struct mv643xx_eth_private
*mp
= netdev_priv(dev
);
1138 init_mac_tables(mp
);
1139 uc_addr_set(mp
, dev
->dev_addr
);
1142 static int mv643xx_eth_set_mac_address(struct net_device
*dev
, void *addr
)
1146 for (i
= 0; i
< 6; i
++)
1147 /* +2 is for the offset of the HW addr type */
1148 dev
->dev_addr
[i
] = ((unsigned char *)addr
)[i
+ 2];
1149 mv643xx_eth_update_mac_address(dev
);
1153 static void mc_addr(struct mv643xx_eth_private
*mp
, unsigned char *addr
)
1155 unsigned int port_num
= mp
->port_num
;
1158 unsigned char crc_result
= 0;
1164 if ((addr
[0] == 0x01) && (addr
[1] == 0x00) &&
1165 (addr
[2] == 0x5E) && (addr
[3] == 0x00) && (addr
[4] == 0x00)) {
1166 table
= SPECIAL_MCAST_TABLE(port_num
);
1167 set_filter_table_entry(mp
, table
, addr
[5]);
1171 /* Calculate CRC-8 out of the given address */
1172 mac_h
= (addr
[0] << 8) | (addr
[1]);
1173 mac_l
= (addr
[2] << 24) | (addr
[3] << 16) |
1174 (addr
[4] << 8) | (addr
[5] << 0);
1176 for (i
= 0; i
< 32; i
++)
1177 mac_array
[i
] = (mac_l
>> i
) & 0x1;
1178 for (i
= 32; i
< 48; i
++)
1179 mac_array
[i
] = (mac_h
>> (i
- 32)) & 0x1;
1181 crc
[0] = mac_array
[45] ^ mac_array
[43] ^ mac_array
[40] ^ mac_array
[39] ^
1182 mac_array
[35] ^ mac_array
[34] ^ mac_array
[31] ^ mac_array
[30] ^
1183 mac_array
[28] ^ mac_array
[23] ^ mac_array
[21] ^ mac_array
[19] ^
1184 mac_array
[18] ^ mac_array
[16] ^ mac_array
[14] ^ mac_array
[12] ^
1185 mac_array
[8] ^ mac_array
[7] ^ mac_array
[6] ^ mac_array
[0];
1187 crc
[1] = mac_array
[46] ^ mac_array
[45] ^ mac_array
[44] ^ mac_array
[43] ^
1188 mac_array
[41] ^ mac_array
[39] ^ mac_array
[36] ^ mac_array
[34] ^
1189 mac_array
[32] ^ mac_array
[30] ^ mac_array
[29] ^ mac_array
[28] ^
1190 mac_array
[24] ^ mac_array
[23] ^ mac_array
[22] ^ mac_array
[21] ^
1191 mac_array
[20] ^ mac_array
[18] ^ mac_array
[17] ^ mac_array
[16] ^
1192 mac_array
[15] ^ mac_array
[14] ^ mac_array
[13] ^ mac_array
[12] ^
1193 mac_array
[9] ^ mac_array
[6] ^ mac_array
[1] ^ mac_array
[0];
1195 crc
[2] = mac_array
[47] ^ mac_array
[46] ^ mac_array
[44] ^ mac_array
[43] ^
1196 mac_array
[42] ^ mac_array
[39] ^ mac_array
[37] ^ mac_array
[34] ^
1197 mac_array
[33] ^ mac_array
[29] ^ mac_array
[28] ^ mac_array
[25] ^
1198 mac_array
[24] ^ mac_array
[22] ^ mac_array
[17] ^ mac_array
[15] ^
1199 mac_array
[13] ^ mac_array
[12] ^ mac_array
[10] ^ mac_array
[8] ^
1200 mac_array
[6] ^ mac_array
[2] ^ mac_array
[1] ^ mac_array
[0];
1202 crc
[3] = mac_array
[47] ^ mac_array
[45] ^ mac_array
[44] ^ mac_array
[43] ^
1203 mac_array
[40] ^ mac_array
[38] ^ mac_array
[35] ^ mac_array
[34] ^
1204 mac_array
[30] ^ mac_array
[29] ^ mac_array
[26] ^ mac_array
[25] ^
1205 mac_array
[23] ^ mac_array
[18] ^ mac_array
[16] ^ mac_array
[14] ^
1206 mac_array
[13] ^ mac_array
[11] ^ mac_array
[9] ^ mac_array
[7] ^
1207 mac_array
[3] ^ mac_array
[2] ^ mac_array
[1];
1209 crc
[4] = mac_array
[46] ^ mac_array
[45] ^ mac_array
[44] ^ mac_array
[41] ^
1210 mac_array
[39] ^ mac_array
[36] ^ mac_array
[35] ^ mac_array
[31] ^
1211 mac_array
[30] ^ mac_array
[27] ^ mac_array
[26] ^ mac_array
[24] ^
1212 mac_array
[19] ^ mac_array
[17] ^ mac_array
[15] ^ mac_array
[14] ^
1213 mac_array
[12] ^ mac_array
[10] ^ mac_array
[8] ^ mac_array
[4] ^
1214 mac_array
[3] ^ mac_array
[2];
1216 crc
[5] = mac_array
[47] ^ mac_array
[46] ^ mac_array
[45] ^ mac_array
[42] ^
1217 mac_array
[40] ^ mac_array
[37] ^ mac_array
[36] ^ mac_array
[32] ^
1218 mac_array
[31] ^ mac_array
[28] ^ mac_array
[27] ^ mac_array
[25] ^
1219 mac_array
[20] ^ mac_array
[18] ^ mac_array
[16] ^ mac_array
[15] ^
1220 mac_array
[13] ^ mac_array
[11] ^ mac_array
[9] ^ mac_array
[5] ^
1221 mac_array
[4] ^ mac_array
[3];
1223 crc
[6] = mac_array
[47] ^ mac_array
[46] ^ mac_array
[43] ^ mac_array
[41] ^
1224 mac_array
[38] ^ mac_array
[37] ^ mac_array
[33] ^ mac_array
[32] ^
1225 mac_array
[29] ^ mac_array
[28] ^ mac_array
[26] ^ mac_array
[21] ^
1226 mac_array
[19] ^ mac_array
[17] ^ mac_array
[16] ^ mac_array
[14] ^
1227 mac_array
[12] ^ mac_array
[10] ^ mac_array
[6] ^ mac_array
[5] ^
1230 crc
[7] = mac_array
[47] ^ mac_array
[44] ^ mac_array
[42] ^ mac_array
[39] ^
1231 mac_array
[38] ^ mac_array
[34] ^ mac_array
[33] ^ mac_array
[30] ^
1232 mac_array
[29] ^ mac_array
[27] ^ mac_array
[22] ^ mac_array
[20] ^
1233 mac_array
[18] ^ mac_array
[17] ^ mac_array
[15] ^ mac_array
[13] ^
1234 mac_array
[11] ^ mac_array
[7] ^ mac_array
[6] ^ mac_array
[5];
1236 for (i
= 0; i
< 8; i
++)
1237 crc_result
= crc_result
| (crc
[i
] << i
);
1239 table
= OTHER_MCAST_TABLE(port_num
);
1240 set_filter_table_entry(mp
, table
, crc_result
);
1243 static void set_multicast_list(struct net_device
*dev
)
1246 struct dev_mc_list
*mc_list
;
1249 struct mv643xx_eth_private
*mp
= netdev_priv(dev
);
1250 unsigned int port_num
= mp
->port_num
;
1252 /* If the device is in promiscuous mode or in all multicast mode,
1253 * we will fully populate both multicast tables with accept.
1254 * This is guaranteed to yield a match on all multicast addresses...
1256 if ((dev
->flags
& IFF_PROMISC
) || (dev
->flags
& IFF_ALLMULTI
)) {
1257 for (table_index
= 0; table_index
<= 0xFC; table_index
+= 4) {
1258 /* Set all entries in DA filter special multicast
1260 * Set for ETH_Q0 for now
1262 * 0 Accept=1, Drop=0
1263 * 3-1 Queue ETH_Q0=0
1266 wrl(mp
, SPECIAL_MCAST_TABLE(port_num
) + table_index
, 0x01010101);
1268 /* Set all entries in DA filter other multicast
1270 * Set for ETH_Q0 for now
1272 * 0 Accept=1, Drop=0
1273 * 3-1 Queue ETH_Q0=0
1276 wrl(mp
, OTHER_MCAST_TABLE(port_num
) + table_index
, 0x01010101);
1281 /* We will clear out multicast tables every time we get the list.
1282 * Then add the entire new list...
1284 for (table_index
= 0; table_index
<= 0xFC; table_index
+= 4) {
1285 /* Clear DA filter special multicast table (Ex_dFSMT) */
1286 wrl(mp
, SPECIAL_MCAST_TABLE(port_num
) + table_index
, 0);
1288 /* Clear DA filter other multicast table (Ex_dFOMT) */
1289 wrl(mp
, OTHER_MCAST_TABLE(port_num
) + table_index
, 0);
1292 /* Get pointer to net_device multicast list and add each one... */
1293 for (i
= 0, mc_list
= dev
->mc_list
;
1294 (i
< 256) && (mc_list
!= NULL
) && (i
< dev
->mc_count
);
1295 i
++, mc_list
= mc_list
->next
)
1296 if (mc_list
->dmi_addrlen
== 6)
1297 mc_addr(mp
, mc_list
->dmi_addr
);
1300 static void mv643xx_eth_set_rx_mode(struct net_device
*dev
)
1302 struct mv643xx_eth_private
*mp
= netdev_priv(dev
);
1305 config_reg
= rdl(mp
, PORT_CONFIG(mp
->port_num
));
1306 if (dev
->flags
& IFF_PROMISC
)
1307 config_reg
|= UNICAST_PROMISCUOUS_MODE
;
1309 config_reg
&= ~UNICAST_PROMISCUOUS_MODE
;
1310 wrl(mp
, PORT_CONFIG(mp
->port_num
), config_reg
);
1312 set_multicast_list(dev
);
1316 /* rx/tx queue initialisation ***********************************************/
1317 static void ether_init_rx_desc_ring(struct mv643xx_eth_private
*mp
)
1319 volatile struct rx_desc
*p_rx_desc
;
1320 int rx_desc_num
= mp
->rx_ring_size
;
1323 /* initialize the next_desc_ptr links in the Rx descriptors ring */
1324 p_rx_desc
= (struct rx_desc
*)mp
->rx_desc_area
;
1325 for (i
= 0; i
< rx_desc_num
; i
++) {
1326 p_rx_desc
[i
].next_desc_ptr
= mp
->rx_desc_dma
+
1327 ((i
+ 1) % rx_desc_num
) * sizeof(struct rx_desc
);
1330 /* Save Rx desc pointer to driver struct. */
1331 mp
->rx_curr_desc
= 0;
1332 mp
->rx_used_desc
= 0;
1334 mp
->rx_desc_area_size
= rx_desc_num
* sizeof(struct rx_desc
);
1337 static void mv643xx_eth_free_rx_rings(struct net_device
*dev
)
1339 struct mv643xx_eth_private
*mp
= netdev_priv(dev
);
1342 /* Stop RX Queues */
1343 mv643xx_eth_port_disable_rx(mp
);
1345 /* Free preallocated skb's on RX rings */
1346 for (curr
= 0; mp
->rx_desc_count
&& curr
< mp
->rx_ring_size
; curr
++) {
1347 if (mp
->rx_skb
[curr
]) {
1348 dev_kfree_skb(mp
->rx_skb
[curr
]);
1349 mp
->rx_desc_count
--;
1353 if (mp
->rx_desc_count
)
1355 "%s: Error in freeing Rx Ring. %d skb's still"
1356 " stuck in RX Ring - ignoring them\n", dev
->name
,
1359 if (mp
->rx_sram_size
)
1360 iounmap(mp
->rx_desc_area
);
1362 dma_free_coherent(NULL
, mp
->rx_desc_area_size
,
1363 mp
->rx_desc_area
, mp
->rx_desc_dma
);
1366 static void ether_init_tx_desc_ring(struct mv643xx_eth_private
*mp
)
1368 int tx_desc_num
= mp
->tx_ring_size
;
1369 struct tx_desc
*p_tx_desc
;
1372 /* Initialize the next_desc_ptr links in the Tx descriptors ring */
1373 p_tx_desc
= (struct tx_desc
*)mp
->tx_desc_area
;
1374 for (i
= 0; i
< tx_desc_num
; i
++) {
1375 p_tx_desc
[i
].next_desc_ptr
= mp
->tx_desc_dma
+
1376 ((i
+ 1) % tx_desc_num
) * sizeof(struct tx_desc
);
1379 mp
->tx_curr_desc
= 0;
1380 mp
->tx_used_desc
= 0;
1382 mp
->tx_desc_area_size
= tx_desc_num
* sizeof(struct tx_desc
);
1385 static int mv643xx_eth_free_tx_descs(struct net_device
*dev
, int force
)
1387 struct mv643xx_eth_private
*mp
= netdev_priv(dev
);
1388 struct tx_desc
*desc
;
1390 struct sk_buff
*skb
;
1391 unsigned long flags
;
1397 while (mp
->tx_desc_count
> 0) {
1398 spin_lock_irqsave(&mp
->lock
, flags
);
1400 /* tx_desc_count might have changed before acquiring the lock */
1401 if (mp
->tx_desc_count
<= 0) {
1402 spin_unlock_irqrestore(&mp
->lock
, flags
);
1406 tx_index
= mp
->tx_used_desc
;
1407 desc
= &mp
->tx_desc_area
[tx_index
];
1408 cmd_sts
= desc
->cmd_sts
;
1410 if (!force
&& (cmd_sts
& BUFFER_OWNED_BY_DMA
)) {
1411 spin_unlock_irqrestore(&mp
->lock
, flags
);
1415 mp
->tx_used_desc
= (tx_index
+ 1) % mp
->tx_ring_size
;
1416 mp
->tx_desc_count
--;
1418 addr
= desc
->buf_ptr
;
1419 count
= desc
->byte_cnt
;
1420 skb
= mp
->tx_skb
[tx_index
];
1422 mp
->tx_skb
[tx_index
] = NULL
;
1424 if (cmd_sts
& ERROR_SUMMARY
) {
1425 printk("%s: Error in TX\n", dev
->name
);
1426 dev
->stats
.tx_errors
++;
1429 spin_unlock_irqrestore(&mp
->lock
, flags
);
1431 if (cmd_sts
& TX_FIRST_DESC
)
1432 dma_unmap_single(NULL
, addr
, count
, DMA_TO_DEVICE
);
1434 dma_unmap_page(NULL
, addr
, count
, DMA_TO_DEVICE
);
1437 dev_kfree_skb_irq(skb
);
1445 static void mv643xx_eth_free_completed_tx_descs(struct net_device
*dev
)
1447 struct mv643xx_eth_private
*mp
= netdev_priv(dev
);
1449 if (mv643xx_eth_free_tx_descs(dev
, 0) &&
1450 mp
->tx_ring_size
- mp
->tx_desc_count
>= MAX_DESCS_PER_SKB
)
1451 netif_wake_queue(dev
);
1454 static void mv643xx_eth_free_all_tx_descs(struct net_device
*dev
)
1456 mv643xx_eth_free_tx_descs(dev
, 1);
1459 static void mv643xx_eth_free_tx_rings(struct net_device
*dev
)
1461 struct mv643xx_eth_private
*mp
= netdev_priv(dev
);
1463 /* Stop Tx Queues */
1464 mv643xx_eth_port_disable_tx(mp
);
1466 /* Free outstanding skb's on TX ring */
1467 mv643xx_eth_free_all_tx_descs(dev
);
1469 BUG_ON(mp
->tx_used_desc
!= mp
->tx_curr_desc
);
1472 if (mp
->tx_sram_size
)
1473 iounmap(mp
->tx_desc_area
);
1475 dma_free_coherent(NULL
, mp
->tx_desc_area_size
,
1476 mp
->tx_desc_area
, mp
->tx_desc_dma
);
1480 /* netdev ops and related ***************************************************/
1481 static void port_reset(struct mv643xx_eth_private
*mp
);
1483 static void mv643xx_eth_update_pscr(struct net_device
*dev
,
1484 struct ethtool_cmd
*ecmd
)
1486 struct mv643xx_eth_private
*mp
= netdev_priv(dev
);
1487 int port_num
= mp
->port_num
;
1489 unsigned int queues
;
1491 o_pscr
= rdl(mp
, PORT_SERIAL_CONTROL(port_num
));
1494 /* clear speed, duplex and rx buffer size fields */
1495 n_pscr
&= ~(SET_MII_SPEED_TO_100
|
1496 SET_GMII_SPEED_TO_1000
|
1497 SET_FULL_DUPLEX_MODE
|
1498 MAX_RX_PACKET_MASK
);
1500 if (ecmd
->duplex
== DUPLEX_FULL
)
1501 n_pscr
|= SET_FULL_DUPLEX_MODE
;
1503 if (ecmd
->speed
== SPEED_1000
)
1504 n_pscr
|= SET_GMII_SPEED_TO_1000
|
1505 MAX_RX_PACKET_9700BYTE
;
1507 if (ecmd
->speed
== SPEED_100
)
1508 n_pscr
|= SET_MII_SPEED_TO_100
;
1509 n_pscr
|= MAX_RX_PACKET_1522BYTE
;
1512 if (n_pscr
!= o_pscr
) {
1513 if ((o_pscr
& SERIAL_PORT_ENABLE
) == 0)
1514 wrl(mp
, PORT_SERIAL_CONTROL(port_num
), n_pscr
);
1516 queues
= mv643xx_eth_port_disable_tx(mp
);
1518 o_pscr
&= ~SERIAL_PORT_ENABLE
;
1519 wrl(mp
, PORT_SERIAL_CONTROL(port_num
), o_pscr
);
1520 wrl(mp
, PORT_SERIAL_CONTROL(port_num
), n_pscr
);
1521 wrl(mp
, PORT_SERIAL_CONTROL(port_num
), n_pscr
);
1523 mv643xx_eth_port_enable_tx(mp
, queues
);
1528 static irqreturn_t
mv643xx_eth_int_handler(int irq
, void *dev_id
)
1530 struct net_device
*dev
= (struct net_device
*)dev_id
;
1531 struct mv643xx_eth_private
*mp
= netdev_priv(dev
);
1532 u32 int_cause
, int_cause_ext
= 0;
1533 unsigned int port_num
= mp
->port_num
;
1535 /* Read interrupt cause registers */
1536 int_cause
= rdl(mp
, INT_CAUSE(port_num
)) & (INT_RX
| INT_EXT
);
1537 if (int_cause
& INT_EXT
) {
1538 int_cause_ext
= rdl(mp
, INT_CAUSE_EXT(port_num
))
1539 & (INT_EXT_LINK
| INT_EXT_PHY
| INT_EXT_TX
);
1540 wrl(mp
, INT_CAUSE_EXT(port_num
), ~int_cause_ext
);
1543 /* PHY status changed */
1544 if (int_cause_ext
& (INT_EXT_LINK
| INT_EXT_PHY
)) {
1545 struct ethtool_cmd cmd
;
1547 if (mii_link_ok(&mp
->mii
)) {
1548 mii_ethtool_gset(&mp
->mii
, &cmd
);
1549 mv643xx_eth_update_pscr(dev
, &cmd
);
1550 mv643xx_eth_port_enable_tx(mp
, 1);
1551 if (!netif_carrier_ok(dev
)) {
1552 netif_carrier_on(dev
);
1553 if (mp
->tx_ring_size
- mp
->tx_desc_count
>=
1555 netif_wake_queue(dev
);
1557 } else if (netif_carrier_ok(dev
)) {
1558 netif_stop_queue(dev
);
1559 netif_carrier_off(dev
);
1563 #ifdef MV643XX_ETH_NAPI
1564 if (int_cause
& INT_RX
) {
1565 /* schedule the NAPI poll routine to maintain port */
1566 wrl(mp
, INT_MASK(port_num
), 0x00000000);
1568 /* wait for previous write to complete */
1569 rdl(mp
, INT_MASK(port_num
));
1571 netif_rx_schedule(dev
, &mp
->napi
);
1574 if (int_cause
& INT_RX
)
1575 mv643xx_eth_receive_queue(dev
, INT_MAX
);
1577 if (int_cause_ext
& INT_EXT_TX
)
1578 mv643xx_eth_free_completed_tx_descs(dev
);
1581 * If no real interrupt occured, exit.
1582 * This can happen when using gigE interrupt coalescing mechanism.
1584 if ((int_cause
== 0x0) && (int_cause_ext
== 0x0))
1590 static void phy_reset(struct mv643xx_eth_private
*mp
)
1592 unsigned int phy_reg_data
;
1595 read_smi_reg(mp
, 0, &phy_reg_data
);
1596 phy_reg_data
|= 0x8000; /* Set bit 15 to reset the PHY */
1597 write_smi_reg(mp
, 0, phy_reg_data
);
1599 /* wait for PHY to come out of reset */
1602 read_smi_reg(mp
, 0, &phy_reg_data
);
1603 } while (phy_reg_data
& 0x8000);
1606 static void port_start(struct net_device
*dev
)
1608 struct mv643xx_eth_private
*mp
= netdev_priv(dev
);
1609 unsigned int port_num
= mp
->port_num
;
1610 int tx_curr_desc
, rx_curr_desc
;
1612 struct ethtool_cmd ethtool_cmd
;
1614 /* Assignment of Tx CTRP of given queue */
1615 tx_curr_desc
= mp
->tx_curr_desc
;
1616 wrl(mp
, TXQ_CURRENT_DESC_PTR(port_num
),
1617 (u32
)((struct tx_desc
*)mp
->tx_desc_dma
+ tx_curr_desc
));
1619 /* Assignment of Rx CRDP of given queue */
1620 rx_curr_desc
= mp
->rx_curr_desc
;
1621 wrl(mp
, RXQ_CURRENT_DESC_PTR(port_num
),
1622 (u32
)((struct rx_desc
*)mp
->rx_desc_dma
+ rx_curr_desc
));
1624 /* Add the assigned Ethernet address to the port's address table */
1625 uc_addr_set(mp
, dev
->dev_addr
);
1628 * Receive all unmatched unicast, TCP, UDP, BPDU and broadcast
1629 * frames to RX queue #0.
1631 wrl(mp
, PORT_CONFIG(port_num
), 0x00000000);
1634 * Treat BPDUs as normal multicasts, and disable partition mode.
1636 wrl(mp
, PORT_CONFIG_EXT(port_num
), 0x00000000);
1638 pscr
= rdl(mp
, PORT_SERIAL_CONTROL(port_num
));
1640 pscr
&= ~(SERIAL_PORT_ENABLE
| FORCE_LINK_PASS
);
1641 wrl(mp
, PORT_SERIAL_CONTROL(port_num
), pscr
);
1643 pscr
|= DISABLE_AUTO_NEG_FOR_FLOW_CTRL
|
1644 DISABLE_AUTO_NEG_SPEED_GMII
|
1645 DISABLE_AUTO_NEG_FOR_DUPLEX
|
1646 DO_NOT_FORCE_LINK_FAIL
|
1647 SERIAL_PORT_CONTROL_RESERVED
;
1649 wrl(mp
, PORT_SERIAL_CONTROL(port_num
), pscr
);
1651 pscr
|= SERIAL_PORT_ENABLE
;
1652 wrl(mp
, PORT_SERIAL_CONTROL(port_num
), pscr
);
1654 /* Assign port SDMA configuration */
1655 wrl(mp
, SDMA_CONFIG(port_num
), PORT_SDMA_CONFIG_DEFAULT_VALUE
);
1657 /* Enable port Rx. */
1658 mv643xx_eth_port_enable_rx(mp
, 1);
1660 /* Disable port bandwidth limits by clearing MTU register */
1661 wrl(mp
, TX_BW_MTU(port_num
), 0);
1663 /* save phy settings across reset */
1664 mv643xx_eth_get_settings(dev
, ðtool_cmd
);
1666 mv643xx_eth_set_settings(dev
, ðtool_cmd
);
1669 #ifdef MV643XX_ETH_COAL
1670 static unsigned int set_rx_coal(struct mv643xx_eth_private
*mp
,
1673 unsigned int port_num
= mp
->port_num
;
1674 unsigned int coal
= ((mp
->shared
->t_clk
/ 1000000) * delay
) / 64;
1676 /* Set RX Coalescing mechanism */
1677 wrl(mp
, SDMA_CONFIG(port_num
),
1678 ((coal
& 0x3fff) << 8) |
1679 (rdl(mp
, SDMA_CONFIG(port_num
))
1686 static unsigned int set_tx_coal(struct mv643xx_eth_private
*mp
,
1689 unsigned int coal
= ((mp
->shared
->t_clk
/ 1000000) * delay
) / 64;
1691 /* Set TX Coalescing mechanism */
1692 wrl(mp
, TX_FIFO_URGENT_THRESHOLD(mp
->port_num
), coal
<< 4);
1697 static void port_init(struct mv643xx_eth_private
*mp
)
1701 init_mac_tables(mp
);
1704 static int mv643xx_eth_open(struct net_device
*dev
)
1706 struct mv643xx_eth_private
*mp
= netdev_priv(dev
);
1707 unsigned int port_num
= mp
->port_num
;
1711 /* Clear any pending ethernet port interrupts */
1712 wrl(mp
, INT_CAUSE(port_num
), 0);
1713 wrl(mp
, INT_CAUSE_EXT(port_num
), 0);
1714 /* wait for previous write to complete */
1715 rdl(mp
, INT_CAUSE_EXT(port_num
));
1717 err
= request_irq(dev
->irq
, mv643xx_eth_int_handler
,
1718 IRQF_SHARED
| IRQF_SAMPLE_RANDOM
, dev
->name
, dev
);
1720 printk(KERN_ERR
"%s: Can not assign IRQ\n", dev
->name
);
1726 memset(&mp
->timeout
, 0, sizeof(struct timer_list
));
1727 mp
->timeout
.function
= mv643xx_eth_rx_refill_descs_timer_wrapper
;
1728 mp
->timeout
.data
= (unsigned long)dev
;
1730 /* Allocate RX and TX skb rings */
1731 mp
->rx_skb
= kmalloc(sizeof(*mp
->rx_skb
) * mp
->rx_ring_size
,
1734 printk(KERN_ERR
"%s: Cannot allocate Rx skb ring\n", dev
->name
);
1738 mp
->tx_skb
= kmalloc(sizeof(*mp
->tx_skb
) * mp
->tx_ring_size
,
1741 printk(KERN_ERR
"%s: Cannot allocate Tx skb ring\n", dev
->name
);
1743 goto out_free_rx_skb
;
1746 /* Allocate TX ring */
1747 mp
->tx_desc_count
= 0;
1748 size
= mp
->tx_ring_size
* sizeof(struct tx_desc
);
1749 mp
->tx_desc_area_size
= size
;
1751 if (mp
->tx_sram_size
) {
1752 mp
->tx_desc_area
= ioremap(mp
->tx_sram_addr
,
1754 mp
->tx_desc_dma
= mp
->tx_sram_addr
;
1756 mp
->tx_desc_area
= dma_alloc_coherent(NULL
, size
,
1760 if (!mp
->tx_desc_area
) {
1761 printk(KERN_ERR
"%s: Cannot allocate Tx Ring (size %d bytes)\n",
1764 goto out_free_tx_skb
;
1766 BUG_ON((u32
) mp
->tx_desc_area
& 0xf); /* check 16-byte alignment */
1767 memset((void *)mp
->tx_desc_area
, 0, mp
->tx_desc_area_size
);
1769 ether_init_tx_desc_ring(mp
);
1771 /* Allocate RX ring */
1772 mp
->rx_desc_count
= 0;
1773 size
= mp
->rx_ring_size
* sizeof(struct rx_desc
);
1774 mp
->rx_desc_area_size
= size
;
1776 if (mp
->rx_sram_size
) {
1777 mp
->rx_desc_area
= ioremap(mp
->rx_sram_addr
,
1779 mp
->rx_desc_dma
= mp
->rx_sram_addr
;
1781 mp
->rx_desc_area
= dma_alloc_coherent(NULL
, size
,
1785 if (!mp
->rx_desc_area
) {
1786 printk(KERN_ERR
"%s: Cannot allocate Rx ring (size %d bytes)\n",
1788 printk(KERN_ERR
"%s: Freeing previously allocated TX queues...",
1790 if (mp
->rx_sram_size
)
1791 iounmap(mp
->tx_desc_area
);
1793 dma_free_coherent(NULL
, mp
->tx_desc_area_size
,
1794 mp
->tx_desc_area
, mp
->tx_desc_dma
);
1796 goto out_free_tx_skb
;
1798 memset((void *)mp
->rx_desc_area
, 0, size
);
1800 ether_init_rx_desc_ring(mp
);
1802 mv643xx_eth_rx_refill_descs(dev
); /* Fill RX ring with skb's */
1804 #ifdef MV643XX_ETH_NAPI
1805 napi_enable(&mp
->napi
);
1810 /* Interrupt Coalescing */
1812 #ifdef MV643XX_ETH_COAL
1813 mp
->rx_int_coal
= set_rx_coal(mp
, MV643XX_ETH_RX_COAL
);
1816 mp
->tx_int_coal
= set_tx_coal(mp
, MV643XX_ETH_TX_COAL
);
1818 /* Unmask phy and link status changes interrupts */
1819 wrl(mp
, INT_MASK_EXT(port_num
), INT_EXT_LINK
| INT_EXT_PHY
| INT_EXT_TX
);
1821 /* Unmask RX buffer and TX end interrupt */
1822 wrl(mp
, INT_MASK(port_num
), INT_RX
| INT_EXT
);
1831 free_irq(dev
->irq
, dev
);
1836 static void port_reset(struct mv643xx_eth_private
*mp
)
1838 unsigned int port_num
= mp
->port_num
;
1839 unsigned int reg_data
;
1841 mv643xx_eth_port_disable_tx(mp
);
1842 mv643xx_eth_port_disable_rx(mp
);
1844 /* Clear all MIB counters */
1845 clear_mib_counters(mp
);
1847 /* Reset the Enable bit in the Configuration Register */
1848 reg_data
= rdl(mp
, PORT_SERIAL_CONTROL(port_num
));
1849 reg_data
&= ~(SERIAL_PORT_ENABLE
|
1850 DO_NOT_FORCE_LINK_FAIL
|
1852 wrl(mp
, PORT_SERIAL_CONTROL(port_num
), reg_data
);
1855 static int mv643xx_eth_stop(struct net_device
*dev
)
1857 struct mv643xx_eth_private
*mp
= netdev_priv(dev
);
1858 unsigned int port_num
= mp
->port_num
;
1860 /* Mask all interrupts on ethernet port */
1861 wrl(mp
, INT_MASK(port_num
), 0x00000000);
1862 /* wait for previous write to complete */
1863 rdl(mp
, INT_MASK(port_num
));
1865 #ifdef MV643XX_ETH_NAPI
1866 napi_disable(&mp
->napi
);
1868 netif_carrier_off(dev
);
1869 netif_stop_queue(dev
);
1873 mv643xx_eth_free_tx_rings(dev
);
1874 mv643xx_eth_free_rx_rings(dev
);
1876 free_irq(dev
->irq
, dev
);
1881 static int mv643xx_eth_do_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
1883 struct mv643xx_eth_private
*mp
= netdev_priv(dev
);
1885 return generic_mii_ioctl(&mp
->mii
, if_mii(ifr
), cmd
, NULL
);
1888 static int mv643xx_eth_change_mtu(struct net_device
*dev
, int new_mtu
)
1890 if ((new_mtu
> 9500) || (new_mtu
< 64))
1894 if (!netif_running(dev
))
1898 * Stop and then re-open the interface. This will allocate RX
1899 * skbs of the new MTU.
1900 * There is a possible danger that the open will not succeed,
1901 * due to memory being full, which might fail the open function.
1903 mv643xx_eth_stop(dev
);
1904 if (mv643xx_eth_open(dev
)) {
1905 printk(KERN_ERR
"%s: Fatal error on opening device\n",
1912 static void mv643xx_eth_tx_timeout_task(struct work_struct
*ugly
)
1914 struct mv643xx_eth_private
*mp
= container_of(ugly
, struct mv643xx_eth_private
,
1916 struct net_device
*dev
= mp
->dev
;
1918 if (!netif_running(dev
))
1921 netif_stop_queue(dev
);
1926 if (mp
->tx_ring_size
- mp
->tx_desc_count
>= MAX_DESCS_PER_SKB
)
1927 netif_wake_queue(dev
);
1930 static void mv643xx_eth_tx_timeout(struct net_device
*dev
)
1932 struct mv643xx_eth_private
*mp
= netdev_priv(dev
);
1934 printk(KERN_INFO
"%s: TX timeout ", dev
->name
);
1936 /* Do the reset outside of interrupt context */
1937 schedule_work(&mp
->tx_timeout_task
);
1940 #ifdef CONFIG_NET_POLL_CONTROLLER
1941 static void mv643xx_eth_netpoll(struct net_device
*netdev
)
1943 struct mv643xx_eth_private
*mp
= netdev_priv(netdev
);
1944 int port_num
= mp
->port_num
;
1946 wrl(mp
, INT_MASK(port_num
), 0x00000000);
1947 /* wait for previous write to complete */
1948 rdl(mp
, INT_MASK(port_num
));
1950 mv643xx_eth_int_handler(netdev
->irq
, netdev
);
1952 wrl(mp
, INT_MASK(port_num
), INT_RX
| INT_CAUSE_EXT
);
1956 static int mv643xx_eth_mdio_read(struct net_device
*dev
, int phy_id
, int location
)
1958 struct mv643xx_eth_private
*mp
= netdev_priv(dev
);
1961 read_smi_reg(mp
, location
, &val
);
1965 static void mv643xx_eth_mdio_write(struct net_device
*dev
, int phy_id
, int location
, int val
)
1967 struct mv643xx_eth_private
*mp
= netdev_priv(dev
);
1968 write_smi_reg(mp
, location
, val
);
1972 /* platform glue ************************************************************/
1974 mv643xx_eth_conf_mbus_windows(struct mv643xx_eth_shared_private
*msp
,
1975 struct mbus_dram_target_info
*dram
)
1977 void __iomem
*base
= msp
->base
;
1982 for (i
= 0; i
< 6; i
++) {
1983 writel(0, base
+ WINDOW_BASE(i
));
1984 writel(0, base
+ WINDOW_SIZE(i
));
1986 writel(0, base
+ WINDOW_REMAP_HIGH(i
));
1992 for (i
= 0; i
< dram
->num_cs
; i
++) {
1993 struct mbus_dram_window
*cs
= dram
->cs
+ i
;
1995 writel((cs
->base
& 0xffff0000) |
1996 (cs
->mbus_attr
<< 8) |
1997 dram
->mbus_dram_target_id
, base
+ WINDOW_BASE(i
));
1998 writel((cs
->size
- 1) & 0xffff0000, base
+ WINDOW_SIZE(i
));
2000 win_enable
&= ~(1 << i
);
2001 win_protect
|= 3 << (2 * i
);
2004 writel(win_enable
, base
+ WINDOW_BAR_ENABLE
);
2005 msp
->win_protect
= win_protect
;
2008 static int mv643xx_eth_shared_probe(struct platform_device
*pdev
)
2010 static int mv643xx_eth_version_printed
= 0;
2011 struct mv643xx_eth_shared_platform_data
*pd
= pdev
->dev
.platform_data
;
2012 struct mv643xx_eth_shared_private
*msp
;
2013 struct resource
*res
;
2016 if (!mv643xx_eth_version_printed
++)
2017 printk(KERN_NOTICE
"MV-643xx 10/100/1000 Ethernet Driver\n");
2020 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
2025 msp
= kmalloc(sizeof(*msp
), GFP_KERNEL
);
2028 memset(msp
, 0, sizeof(*msp
));
2030 msp
->base
= ioremap(res
->start
, res
->end
- res
->start
+ 1);
2031 if (msp
->base
== NULL
)
2034 spin_lock_init(&msp
->phy_lock
);
2035 msp
->t_clk
= (pd
!= NULL
&& pd
->t_clk
!= 0) ? pd
->t_clk
: 133000000;
2037 platform_set_drvdata(pdev
, msp
);
2040 * (Re-)program MBUS remapping windows if we are asked to.
2042 if (pd
!= NULL
&& pd
->dram
!= NULL
)
2043 mv643xx_eth_conf_mbus_windows(msp
, pd
->dram
);
2053 static int mv643xx_eth_shared_remove(struct platform_device
*pdev
)
2055 struct mv643xx_eth_shared_private
*msp
= platform_get_drvdata(pdev
);
2063 static struct platform_driver mv643xx_eth_shared_driver
= {
2064 .probe
= mv643xx_eth_shared_probe
,
2065 .remove
= mv643xx_eth_shared_remove
,
2067 .name
= MV643XX_ETH_SHARED_NAME
,
2068 .owner
= THIS_MODULE
,
2072 static void phy_addr_set(struct mv643xx_eth_private
*mp
, int phy_addr
)
2075 int addr_shift
= 5 * mp
->port_num
;
2077 reg_data
= rdl(mp
, PHY_ADDR
);
2078 reg_data
&= ~(0x1f << addr_shift
);
2079 reg_data
|= (phy_addr
& 0x1f) << addr_shift
;
2080 wrl(mp
, PHY_ADDR
, reg_data
);
2083 static int phy_addr_get(struct mv643xx_eth_private
*mp
)
2085 unsigned int reg_data
;
2087 reg_data
= rdl(mp
, PHY_ADDR
);
2089 return ((reg_data
>> (5 * mp
->port_num
)) & 0x1f);
2092 static int phy_detect(struct mv643xx_eth_private
*mp
)
2094 unsigned int phy_reg_data0
;
2097 read_smi_reg(mp
, 0, &phy_reg_data0
);
2098 auto_neg
= phy_reg_data0
& 0x1000;
2099 phy_reg_data0
^= 0x1000; /* invert auto_neg */
2100 write_smi_reg(mp
, 0, phy_reg_data0
);
2102 read_smi_reg(mp
, 0, &phy_reg_data0
);
2103 if ((phy_reg_data0
& 0x1000) == auto_neg
)
2104 return -ENODEV
; /* change didn't take */
2106 phy_reg_data0
^= 0x1000;
2107 write_smi_reg(mp
, 0, phy_reg_data0
);
2111 static void mv643xx_init_ethtool_cmd(struct net_device
*dev
, int phy_address
,
2112 int speed
, int duplex
,
2113 struct ethtool_cmd
*cmd
)
2115 struct mv643xx_eth_private
*mp
= netdev_priv(dev
);
2117 memset(cmd
, 0, sizeof(*cmd
));
2119 cmd
->port
= PORT_MII
;
2120 cmd
->transceiver
= XCVR_INTERNAL
;
2121 cmd
->phy_address
= phy_address
;
2124 cmd
->autoneg
= AUTONEG_ENABLE
;
2125 /* mii lib checks, but doesn't use speed on AUTONEG_ENABLE */
2126 cmd
->speed
= SPEED_100
;
2127 cmd
->advertising
= ADVERTISED_10baseT_Half
|
2128 ADVERTISED_10baseT_Full
|
2129 ADVERTISED_100baseT_Half
|
2130 ADVERTISED_100baseT_Full
;
2131 if (mp
->mii
.supports_gmii
)
2132 cmd
->advertising
|= ADVERTISED_1000baseT_Full
;
2134 cmd
->autoneg
= AUTONEG_DISABLE
;
2136 cmd
->duplex
= duplex
;
2140 static int mv643xx_eth_probe(struct platform_device
*pdev
)
2142 struct mv643xx_eth_platform_data
*pd
;
2144 struct mv643xx_eth_private
*mp
;
2145 struct net_device
*dev
;
2147 struct resource
*res
;
2149 struct ethtool_cmd cmd
;
2150 int duplex
= DUPLEX_HALF
;
2151 int speed
= 0; /* default to auto-negotiation */
2152 DECLARE_MAC_BUF(mac
);
2154 pd
= pdev
->dev
.platform_data
;
2156 printk(KERN_ERR
"No mv643xx_eth_platform_data\n");
2160 if (pd
->shared
== NULL
) {
2161 printk(KERN_ERR
"No mv643xx_eth_platform_data->shared\n");
2165 dev
= alloc_etherdev(sizeof(struct mv643xx_eth_private
));
2169 platform_set_drvdata(pdev
, dev
);
2171 mp
= netdev_priv(dev
);
2173 #ifdef MV643XX_ETH_NAPI
2174 netif_napi_add(dev
, &mp
->napi
, mv643xx_eth_poll
, 64);
2177 res
= platform_get_resource(pdev
, IORESOURCE_IRQ
, 0);
2179 dev
->irq
= res
->start
;
2181 dev
->open
= mv643xx_eth_open
;
2182 dev
->stop
= mv643xx_eth_stop
;
2183 dev
->hard_start_xmit
= mv643xx_eth_start_xmit
;
2184 dev
->set_mac_address
= mv643xx_eth_set_mac_address
;
2185 dev
->set_multicast_list
= mv643xx_eth_set_rx_mode
;
2187 /* No need to Tx Timeout */
2188 dev
->tx_timeout
= mv643xx_eth_tx_timeout
;
2190 #ifdef CONFIG_NET_POLL_CONTROLLER
2191 dev
->poll_controller
= mv643xx_eth_netpoll
;
2194 dev
->watchdog_timeo
= 2 * HZ
;
2196 dev
->change_mtu
= mv643xx_eth_change_mtu
;
2197 dev
->do_ioctl
= mv643xx_eth_do_ioctl
;
2198 SET_ETHTOOL_OPS(dev
, &mv643xx_eth_ethtool_ops
);
2200 #ifdef MV643XX_ETH_CHECKSUM_OFFLOAD_TX
2201 #ifdef MAX_SKB_FRAGS
2203 * Zero copy can only work if we use Discovery II memory. Else, we will
2204 * have to map the buffers to ISA memory which is only 16 MB
2206 dev
->features
= NETIF_F_SG
| NETIF_F_IP_CSUM
;
2210 /* Configure the timeout task */
2211 INIT_WORK(&mp
->tx_timeout_task
, mv643xx_eth_tx_timeout_task
);
2213 spin_lock_init(&mp
->lock
);
2215 mp
->shared
= platform_get_drvdata(pd
->shared
);
2216 port_num
= mp
->port_num
= pd
->port_number
;
2218 if (mp
->shared
->win_protect
)
2219 wrl(mp
, WINDOW_PROTECT(port_num
), mp
->shared
->win_protect
);
2221 mp
->shared_smi
= mp
->shared
;
2222 if (pd
->shared_smi
!= NULL
)
2223 mp
->shared_smi
= platform_get_drvdata(pd
->shared_smi
);
2225 /* set default config values */
2226 uc_addr_get(mp
, dev
->dev_addr
);
2227 mp
->rx_ring_size
= DEFAULT_RX_QUEUE_SIZE
;
2228 mp
->tx_ring_size
= DEFAULT_TX_QUEUE_SIZE
;
2230 if (is_valid_ether_addr(pd
->mac_addr
))
2231 memcpy(dev
->dev_addr
, pd
->mac_addr
, 6);
2233 if (pd
->phy_addr
|| pd
->force_phy_addr
)
2234 phy_addr_set(mp
, pd
->phy_addr
);
2236 if (pd
->rx_queue_size
)
2237 mp
->rx_ring_size
= pd
->rx_queue_size
;
2239 if (pd
->tx_queue_size
)
2240 mp
->tx_ring_size
= pd
->tx_queue_size
;
2242 if (pd
->tx_sram_size
) {
2243 mp
->tx_sram_size
= pd
->tx_sram_size
;
2244 mp
->tx_sram_addr
= pd
->tx_sram_addr
;
2247 if (pd
->rx_sram_size
) {
2248 mp
->rx_sram_size
= pd
->rx_sram_size
;
2249 mp
->rx_sram_addr
= pd
->rx_sram_addr
;
2252 duplex
= pd
->duplex
;
2255 /* Hook up MII support for ethtool */
2257 mp
->mii
.mdio_read
= mv643xx_eth_mdio_read
;
2258 mp
->mii
.mdio_write
= mv643xx_eth_mdio_write
;
2259 mp
->mii
.phy_id
= phy_addr_get(mp
);
2260 mp
->mii
.phy_id_mask
= 0x3f;
2261 mp
->mii
.reg_num_mask
= 0x1f;
2263 err
= phy_detect(mp
);
2265 pr_debug("%s: No PHY detected at addr %d\n",
2266 dev
->name
, phy_addr_get(mp
));
2271 mp
->mii
.supports_gmii
= mii_check_gmii_support(&mp
->mii
);
2272 mv643xx_init_ethtool_cmd(dev
, mp
->mii
.phy_id
, speed
, duplex
, &cmd
);
2273 mv643xx_eth_update_pscr(dev
, &cmd
);
2274 mv643xx_eth_set_settings(dev
, &cmd
);
2276 SET_NETDEV_DEV(dev
, &pdev
->dev
);
2277 err
= register_netdev(dev
);
2283 "%s: port %d with MAC address %s\n",
2284 dev
->name
, port_num
, print_mac(mac
, p
));
2286 if (dev
->features
& NETIF_F_SG
)
2287 printk(KERN_NOTICE
"%s: Scatter Gather Enabled\n", dev
->name
);
2289 if (dev
->features
& NETIF_F_IP_CSUM
)
2290 printk(KERN_NOTICE
"%s: TX TCP/IP Checksumming Supported\n",
2293 #ifdef MV643XX_ETH_CHECKSUM_OFFLOAD_TX
2294 printk(KERN_NOTICE
"%s: RX TCP/UDP Checksum Offload ON \n", dev
->name
);
2297 #ifdef MV643XX_ETH_COAL
2298 printk(KERN_NOTICE
"%s: TX and RX Interrupt Coalescing ON \n",
2302 #ifdef MV643XX_ETH_NAPI
2303 printk(KERN_NOTICE
"%s: RX NAPI Enabled \n", dev
->name
);
2306 if (mp
->tx_sram_size
> 0)
2307 printk(KERN_NOTICE
"%s: Using SRAM\n", dev
->name
);
2317 static int mv643xx_eth_remove(struct platform_device
*pdev
)
2319 struct net_device
*dev
= platform_get_drvdata(pdev
);
2321 unregister_netdev(dev
);
2322 flush_scheduled_work();
2325 platform_set_drvdata(pdev
, NULL
);
2329 static void mv643xx_eth_shutdown(struct platform_device
*pdev
)
2331 struct net_device
*dev
= platform_get_drvdata(pdev
);
2332 struct mv643xx_eth_private
*mp
= netdev_priv(dev
);
2333 unsigned int port_num
= mp
->port_num
;
2335 /* Mask all interrupts on ethernet port */
2336 wrl(mp
, INT_MASK(port_num
), 0);
2337 rdl(mp
, INT_MASK(port_num
));
2342 static struct platform_driver mv643xx_eth_driver
= {
2343 .probe
= mv643xx_eth_probe
,
2344 .remove
= mv643xx_eth_remove
,
2345 .shutdown
= mv643xx_eth_shutdown
,
2347 .name
= MV643XX_ETH_NAME
,
2348 .owner
= THIS_MODULE
,
2352 static int __init
mv643xx_eth_init_module(void)
2356 rc
= platform_driver_register(&mv643xx_eth_shared_driver
);
2358 rc
= platform_driver_register(&mv643xx_eth_driver
);
2360 platform_driver_unregister(&mv643xx_eth_shared_driver
);
2365 static void __exit
mv643xx_eth_cleanup_module(void)
2367 platform_driver_unregister(&mv643xx_eth_driver
);
2368 platform_driver_unregister(&mv643xx_eth_shared_driver
);
2371 module_init(mv643xx_eth_init_module
);
2372 module_exit(mv643xx_eth_cleanup_module
);
2374 MODULE_LICENSE("GPL");
2375 MODULE_AUTHOR( "Rabeeh Khoury, Assaf Hoffman, Matthew Dharm, Manish Lachwani"
2376 " and Dale Farnsworth");
2377 MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX");
2378 MODULE_ALIAS("platform:" MV643XX_ETH_NAME
);
2379 MODULE_ALIAS("platform:" MV643XX_ETH_SHARED_NAME
);