2 * Driver for Marvell Discovery (MV643XX) and Marvell Orion ethernet ports
3 * Copyright (C) 2002 Matthew Dharm <mdharm@momenco.com>
5 * Based on the 64360 driver from:
6 * Copyright (C) 2002 Rabeeh Khoury <rabeeh@galileo.co.il>
7 * Rabeeh Khoury <rabeeh@marvell.com>
9 * Copyright (C) 2003 PMC-Sierra, Inc.,
10 * written by Manish Lachwani
12 * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org>
14 * Copyright (C) 2004-2006 MontaVista Software, Inc.
15 * Dale Farnsworth <dale@farnsworth.org>
17 * Copyright (C) 2004 Steven J. Hill <sjhill1@rockwellcollins.com>
18 * <sjhill@realitydiluted.com>
20 * Copyright (C) 2007-2008 Marvell Semiconductor
21 * Lennert Buytenhek <buytenh@marvell.com>
23 * This program is free software; you can redistribute it and/or
24 * modify it under the terms of the GNU General Public License
25 * as published by the Free Software Foundation; either version 2
26 * of the License, or (at your option) any later version.
28 * This program is distributed in the hope that it will be useful,
29 * but WITHOUT ANY WARRANTY; without even the implied warranty of
30 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
31 * GNU General Public License for more details.
33 * You should have received a copy of the GNU General Public License
34 * along with this program; if not, write to the Free Software
35 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
38 #include <linux/init.h>
39 #include <linux/dma-mapping.h>
41 #include <linux/tcp.h>
42 #include <linux/udp.h>
43 #include <linux/etherdevice.h>
44 #include <linux/delay.h>
45 #include <linux/ethtool.h>
46 #include <linux/platform_device.h>
47 #include <linux/module.h>
48 #include <linux/kernel.h>
49 #include <linux/spinlock.h>
50 #include <linux/workqueue.h>
51 #include <linux/mii.h>
52 #include <linux/mv643xx_eth.h>
54 #include <asm/types.h>
55 #include <asm/system.h>
57 static char mv643xx_eth_driver_name
[] = "mv643xx_eth";
58 static char mv643xx_eth_driver_version
[] = "1.0";
60 #define MV643XX_ETH_CHECKSUM_OFFLOAD_TX
61 #define MV643XX_ETH_NAPI
62 #define MV643XX_ETH_TX_FAST_REFILL
63 #undef MV643XX_ETH_COAL
65 #define MV643XX_ETH_TX_COAL 100
66 #ifdef MV643XX_ETH_COAL
67 #define MV643XX_ETH_RX_COAL 100
70 #ifdef MV643XX_ETH_CHECKSUM_OFFLOAD_TX
71 #define MAX_DESCS_PER_SKB (MAX_SKB_FRAGS + 1)
73 #define MAX_DESCS_PER_SKB 1
76 #define ETH_VLAN_HLEN 4
78 #define ETH_HW_IP_ALIGN 2 /* hw aligns IP header */
79 #define ETH_WRAPPER_LEN (ETH_HW_IP_ALIGN + ETH_HLEN + \
80 ETH_VLAN_HLEN + ETH_FCS_LEN)
81 #define ETH_RX_SKB_SIZE (dev->mtu + ETH_WRAPPER_LEN + \
82 dma_get_cache_alignment())
85 * Registers shared between all ports.
87 #define PHY_ADDR 0x0000
88 #define SMI_REG 0x0004
89 #define WINDOW_BASE(w) (0x0200 + ((w) << 3))
90 #define WINDOW_SIZE(w) (0x0204 + ((w) << 3))
91 #define WINDOW_REMAP_HIGH(w) (0x0280 + ((w) << 2))
92 #define WINDOW_BAR_ENABLE 0x0290
93 #define WINDOW_PROTECT(w) (0x0294 + ((w) << 4))
98 #define PORT_CONFIG(p) (0x0400 + ((p) << 10))
99 #define UNICAST_PROMISCUOUS_MODE 0x00000001
100 #define PORT_CONFIG_EXT(p) (0x0404 + ((p) << 10))
101 #define MAC_ADDR_LOW(p) (0x0414 + ((p) << 10))
102 #define MAC_ADDR_HIGH(p) (0x0418 + ((p) << 10))
103 #define SDMA_CONFIG(p) (0x041c + ((p) << 10))
104 #define PORT_SERIAL_CONTROL(p) (0x043c + ((p) << 10))
105 #define PORT_STATUS(p) (0x0444 + ((p) << 10))
106 #define TX_FIFO_EMPTY 0x00000400
107 #define TXQ_COMMAND(p) (0x0448 + ((p) << 10))
108 #define TX_BW_MTU(p) (0x0458 + ((p) << 10))
109 #define INT_CAUSE(p) (0x0460 + ((p) << 10))
110 #define INT_RX 0x00000804
111 #define INT_EXT 0x00000002
112 #define INT_CAUSE_EXT(p) (0x0464 + ((p) << 10))
113 #define INT_EXT_LINK 0x00100000
114 #define INT_EXT_PHY 0x00010000
115 #define INT_EXT_TX_ERROR_0 0x00000100
116 #define INT_EXT_TX_0 0x00000001
117 #define INT_EXT_TX 0x00000101
118 #define INT_MASK(p) (0x0468 + ((p) << 10))
119 #define INT_MASK_EXT(p) (0x046c + ((p) << 10))
120 #define TX_FIFO_URGENT_THRESHOLD(p) (0x0474 + ((p) << 10))
121 #define RXQ_CURRENT_DESC_PTR(p) (0x060c + ((p) << 10))
122 #define RXQ_COMMAND(p) (0x0680 + ((p) << 10))
123 #define TXQ_CURRENT_DESC_PTR(p) (0x06c0 + ((p) << 10))
124 #define MIB_COUNTERS(p) (0x1000 + ((p) << 7))
125 #define SPECIAL_MCAST_TABLE(p) (0x1400 + ((p) << 10))
126 #define OTHER_MCAST_TABLE(p) (0x1500 + ((p) << 10))
127 #define UNICAST_TABLE(p) (0x1600 + ((p) << 10))
131 * SDMA configuration register.
133 #define RX_BURST_SIZE_4_64BIT (2 << 1)
134 #define BLM_RX_NO_SWAP (1 << 4)
135 #define BLM_TX_NO_SWAP (1 << 5)
136 #define TX_BURST_SIZE_4_64BIT (2 << 22)
138 #if defined(__BIG_ENDIAN)
139 #define PORT_SDMA_CONFIG_DEFAULT_VALUE \
140 RX_BURST_SIZE_4_64BIT | \
141 TX_BURST_SIZE_4_64BIT
142 #elif defined(__LITTLE_ENDIAN)
143 #define PORT_SDMA_CONFIG_DEFAULT_VALUE \
144 RX_BURST_SIZE_4_64BIT | \
147 TX_BURST_SIZE_4_64BIT
149 #error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
154 * Port serial control register.
156 #define SET_MII_SPEED_TO_100 (1 << 24)
157 #define SET_GMII_SPEED_TO_1000 (1 << 23)
158 #define SET_FULL_DUPLEX_MODE (1 << 21)
159 #define MAX_RX_PACKET_1522BYTE (1 << 17)
160 #define MAX_RX_PACKET_9700BYTE (5 << 17)
161 #define MAX_RX_PACKET_MASK (7 << 17)
162 #define DISABLE_AUTO_NEG_SPEED_GMII (1 << 13)
163 #define DO_NOT_FORCE_LINK_FAIL (1 << 10)
164 #define SERIAL_PORT_CONTROL_RESERVED (1 << 9)
165 #define DISABLE_AUTO_NEG_FOR_FLOW_CTRL (1 << 3)
166 #define DISABLE_AUTO_NEG_FOR_DUPLEX (1 << 2)
167 #define FORCE_LINK_PASS (1 << 1)
168 #define SERIAL_PORT_ENABLE (1 << 0)
170 #define DEFAULT_RX_QUEUE_SIZE 400
171 #define DEFAULT_TX_QUEUE_SIZE 800
174 #define SMI_BUSY 0x10000000 /* 0 - Write, 1 - Read */
175 #define SMI_READ_VALID 0x08000000 /* 0 - Write, 1 - Read */
176 #define SMI_OPCODE_WRITE 0 /* Completion of Read */
177 #define SMI_OPCODE_READ 0x04000000 /* Operation is in progress */
181 typedef enum _func_ret_status
{
182 ETH_OK
, /* Returned as expected. */
183 ETH_ERROR
, /* Fundamental error. */
184 ETH_RETRY
, /* Could not process request. Try later.*/
185 ETH_END_OF_JOB
, /* Ring has nothing to process. */
186 ETH_QUEUE_FULL
, /* Ring resource error. */
187 ETH_QUEUE_LAST_RESOURCE
/* Ring resources about to exhaust. */
193 #if defined(__BIG_ENDIAN)
195 u16 byte_cnt
; /* Descriptor buffer byte count */
196 u16 buf_size
; /* Buffer size */
197 u32 cmd_sts
; /* Descriptor command status */
198 u32 next_desc_ptr
; /* Next descriptor pointer */
199 u32 buf_ptr
; /* Descriptor buffer pointer */
203 u16 byte_cnt
; /* buffer byte count */
204 u16 l4i_chk
; /* CPU provided TCP checksum */
205 u32 cmd_sts
; /* Command/status field */
206 u32 next_desc_ptr
; /* Pointer to next descriptor */
207 u32 buf_ptr
; /* pointer to buffer for this descriptor*/
209 #elif defined(__LITTLE_ENDIAN)
211 u32 cmd_sts
; /* Descriptor command status */
212 u16 buf_size
; /* Buffer size */
213 u16 byte_cnt
; /* Descriptor buffer byte count */
214 u32 buf_ptr
; /* Descriptor buffer pointer */
215 u32 next_desc_ptr
; /* Next descriptor pointer */
219 u32 cmd_sts
; /* Command/status field */
220 u16 l4i_chk
; /* CPU provided TCP checksum */
221 u16 byte_cnt
; /* buffer byte count */
222 u32 buf_ptr
; /* pointer to buffer for this descriptor*/
223 u32 next_desc_ptr
; /* Pointer to next descriptor */
226 #error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
229 /* RX & TX descriptor command */
230 #define BUFFER_OWNED_BY_DMA 0x80000000
232 /* RX & TX descriptor status */
233 #define ERROR_SUMMARY 0x00000001
235 /* RX descriptor status */
236 #define LAYER_4_CHECKSUM_OK 0x40000000
237 #define RX_ENABLE_INTERRUPT 0x20000000
238 #define RX_FIRST_DESC 0x08000000
239 #define RX_LAST_DESC 0x04000000
241 /* TX descriptor command */
242 #define TX_ENABLE_INTERRUPT 0x00800000
243 #define GEN_CRC 0x00400000
244 #define TX_FIRST_DESC 0x00200000
245 #define TX_LAST_DESC 0x00100000
246 #define ZERO_PADDING 0x00080000
247 #define GEN_IP_V4_CHECKSUM 0x00040000
248 #define GEN_TCP_UDP_CHECKSUM 0x00020000
249 #define UDP_FRAME 0x00010000
251 #define TX_IHL_SHIFT 11
254 /* Unified struct for Rx and Tx operations. The user is not required to */
255 /* be familier with neither Tx nor Rx descriptors. */
257 unsigned short byte_cnt
; /* Descriptor buffer byte count */
258 unsigned short l4i_chk
; /* Tx CPU provided TCP Checksum */
259 unsigned int cmd_sts
; /* Descriptor command status */
260 dma_addr_t buf_ptr
; /* Descriptor buffer pointer */
261 struct sk_buff
*return_info
; /* User resource return information */
265 /* global *******************************************************************/
266 struct mv643xx_eth_shared_private
{
269 /* used to protect SMI_REG, which is shared across ports */
278 /* per-port *****************************************************************/
279 struct mib_counters
{
280 u64 good_octets_received
;
281 u32 bad_octets_received
;
282 u32 internal_mac_transmit_err
;
283 u32 good_frames_received
;
284 u32 bad_frames_received
;
285 u32 broadcast_frames_received
;
286 u32 multicast_frames_received
;
287 u32 frames_64_octets
;
288 u32 frames_65_to_127_octets
;
289 u32 frames_128_to_255_octets
;
290 u32 frames_256_to_511_octets
;
291 u32 frames_512_to_1023_octets
;
292 u32 frames_1024_to_max_octets
;
293 u64 good_octets_sent
;
294 u32 good_frames_sent
;
295 u32 excessive_collision
;
296 u32 multicast_frames_sent
;
297 u32 broadcast_frames_sent
;
298 u32 unrec_mac_control_received
;
300 u32 good_fc_received
;
302 u32 undersize_received
;
303 u32 fragments_received
;
304 u32 oversize_received
;
306 u32 mac_receive_error
;
312 struct mv643xx_eth_private
{
313 struct mv643xx_eth_shared_private
*shared
;
314 int port_num
; /* User Ethernet port number */
316 struct mv643xx_eth_shared_private
*shared_smi
;
318 u32 rx_sram_addr
; /* Base address of rx sram area */
319 u32 rx_sram_size
; /* Size of rx sram area */
320 u32 tx_sram_addr
; /* Base address of tx sram area */
321 u32 tx_sram_size
; /* Size of tx sram area */
323 /* Tx/Rx rings managment indexes fields. For driver use */
325 /* Next available and first returning Rx resource */
326 int rx_curr_desc_q
, rx_used_desc_q
;
328 /* Next available and first returning Tx resource */
329 int tx_curr_desc_q
, tx_used_desc_q
;
331 #ifdef MV643XX_ETH_TX_FAST_REFILL
332 u32 tx_clean_threshold
;
335 struct rx_desc
*p_rx_desc_area
;
336 dma_addr_t rx_desc_dma
;
337 int rx_desc_area_size
;
338 struct sk_buff
**rx_skb
;
340 struct tx_desc
*p_tx_desc_area
;
341 dma_addr_t tx_desc_dma
;
342 int tx_desc_area_size
;
343 struct sk_buff
**tx_skb
;
345 struct work_struct tx_timeout_task
;
347 struct net_device
*dev
;
348 struct napi_struct napi
;
349 struct net_device_stats stats
;
350 struct mib_counters mib_counters
;
352 /* Size of Tx Ring per queue */
354 /* Number of tx descriptors in use */
356 /* Size of Rx Ring per queue */
358 /* Number of rx descriptors in use */
362 * Used in case RX Ring is empty, which can be caused when
363 * system does not have resources (skb's)
365 struct timer_list timeout
;
369 struct mii_if_info mii
;
373 /* port register accessors **************************************************/
374 static inline u32
rdl(struct mv643xx_eth_private
*mp
, int offset
)
376 return readl(mp
->shared
->base
+ offset
);
379 static inline void wrl(struct mv643xx_eth_private
*mp
, int offset
, u32 data
)
381 writel(data
, mp
->shared
->base
+ offset
);
385 /* rxq/txq helper functions *************************************************/
386 static void mv643xx_eth_port_enable_rx(struct mv643xx_eth_private
*mp
,
389 wrl(mp
, RXQ_COMMAND(mp
->port_num
), queues
);
392 static unsigned int mv643xx_eth_port_disable_rx(struct mv643xx_eth_private
*mp
)
394 unsigned int port_num
= mp
->port_num
;
397 /* Stop Rx port activity. Check port Rx activity. */
398 queues
= rdl(mp
, RXQ_COMMAND(port_num
)) & 0xFF;
400 /* Issue stop command for active queues only */
401 wrl(mp
, RXQ_COMMAND(port_num
), (queues
<< 8));
403 /* Wait for all Rx activity to terminate. */
404 /* Check port cause register that all Rx queues are stopped */
405 while (rdl(mp
, RXQ_COMMAND(port_num
)) & 0xFF)
412 static void mv643xx_eth_port_enable_tx(struct mv643xx_eth_private
*mp
,
415 wrl(mp
, TXQ_COMMAND(mp
->port_num
), queues
);
418 static unsigned int mv643xx_eth_port_disable_tx(struct mv643xx_eth_private
*mp
)
420 unsigned int port_num
= mp
->port_num
;
423 /* Stop Tx port activity. Check port Tx activity. */
424 queues
= rdl(mp
, TXQ_COMMAND(port_num
)) & 0xFF;
426 /* Issue stop command for active queues only */
427 wrl(mp
, TXQ_COMMAND(port_num
), (queues
<< 8));
429 /* Wait for all Tx activity to terminate. */
430 /* Check port cause register that all Tx queues are stopped */
431 while (rdl(mp
, TXQ_COMMAND(port_num
)) & 0xFF)
434 /* Wait for Tx FIFO to empty */
435 while (rdl(mp
, PORT_STATUS(port_num
)) & TX_FIFO_EMPTY
)
443 /* rx ***********************************************************************/
444 static void mv643xx_eth_free_completed_tx_descs(struct net_device
*dev
);
446 static FUNC_RET_STATUS
rx_return_buff(struct mv643xx_eth_private
*mp
,
447 struct pkt_info
*p_pkt_info
)
449 int used_rx_desc
; /* Where to return Rx resource */
450 volatile struct rx_desc
*p_used_rx_desc
;
453 spin_lock_irqsave(&mp
->lock
, flags
);
455 /* Get 'used' Rx descriptor */
456 used_rx_desc
= mp
->rx_used_desc_q
;
457 p_used_rx_desc
= &mp
->p_rx_desc_area
[used_rx_desc
];
459 p_used_rx_desc
->buf_ptr
= p_pkt_info
->buf_ptr
;
460 p_used_rx_desc
->buf_size
= p_pkt_info
->byte_cnt
;
461 mp
->rx_skb
[used_rx_desc
] = p_pkt_info
->return_info
;
463 /* Flush the write pipe */
465 /* Return the descriptor to DMA ownership */
467 p_used_rx_desc
->cmd_sts
= BUFFER_OWNED_BY_DMA
| RX_ENABLE_INTERRUPT
;
470 /* Move the used descriptor pointer to the next descriptor */
471 mp
->rx_used_desc_q
= (used_rx_desc
+ 1) % mp
->rx_ring_size
;
473 spin_unlock_irqrestore(&mp
->lock
, flags
);
478 static void mv643xx_eth_rx_refill_descs(struct net_device
*dev
)
480 struct mv643xx_eth_private
*mp
= netdev_priv(dev
);
481 struct pkt_info pkt_info
;
485 while (mp
->rx_desc_count
< mp
->rx_ring_size
) {
486 skb
= dev_alloc_skb(ETH_RX_SKB_SIZE
+ dma_get_cache_alignment());
490 unaligned
= (u32
)skb
->data
& (dma_get_cache_alignment() - 1);
492 skb_reserve(skb
, dma_get_cache_alignment() - unaligned
);
493 pkt_info
.cmd_sts
= RX_ENABLE_INTERRUPT
;
494 pkt_info
.byte_cnt
= ETH_RX_SKB_SIZE
;
495 pkt_info
.buf_ptr
= dma_map_single(NULL
, skb
->data
,
496 ETH_RX_SKB_SIZE
, DMA_FROM_DEVICE
);
497 pkt_info
.return_info
= skb
;
498 if (rx_return_buff(mp
, &pkt_info
) != ETH_OK
) {
500 "%s: Error allocating RX Ring\n", dev
->name
);
503 skb_reserve(skb
, ETH_HW_IP_ALIGN
);
506 * If RX ring is empty of SKB, set a timer to try allocating
507 * again at a later time.
509 if (mp
->rx_desc_count
== 0) {
510 printk(KERN_INFO
"%s: Rx ring is empty\n", dev
->name
);
511 mp
->timeout
.expires
= jiffies
+ (HZ
/ 10); /* 100 mSec */
512 add_timer(&mp
->timeout
);
516 static inline void mv643xx_eth_rx_refill_descs_timer_wrapper(unsigned long data
)
518 mv643xx_eth_rx_refill_descs((struct net_device
*)data
);
521 static FUNC_RET_STATUS
port_receive(struct mv643xx_eth_private
*mp
,
522 struct pkt_info
*p_pkt_info
)
524 int rx_next_curr_desc
, rx_curr_desc
, rx_used_desc
;
525 volatile struct rx_desc
*p_rx_desc
;
526 unsigned int command_status
;
529 spin_lock_irqsave(&mp
->lock
, flags
);
531 /* Get the Rx Desc ring 'curr and 'used' indexes */
532 rx_curr_desc
= mp
->rx_curr_desc_q
;
533 rx_used_desc
= mp
->rx_used_desc_q
;
535 p_rx_desc
= &mp
->p_rx_desc_area
[rx_curr_desc
];
537 /* The following parameters are used to save readings from memory */
538 command_status
= p_rx_desc
->cmd_sts
;
541 /* Nothing to receive... */
542 if (command_status
& BUFFER_OWNED_BY_DMA
) {
543 spin_unlock_irqrestore(&mp
->lock
, flags
);
544 return ETH_END_OF_JOB
;
547 p_pkt_info
->byte_cnt
= p_rx_desc
->byte_cnt
- ETH_HW_IP_ALIGN
;
548 p_pkt_info
->cmd_sts
= command_status
;
549 p_pkt_info
->buf_ptr
= p_rx_desc
->buf_ptr
+ ETH_HW_IP_ALIGN
;
550 p_pkt_info
->return_info
= mp
->rx_skb
[rx_curr_desc
];
551 p_pkt_info
->l4i_chk
= p_rx_desc
->buf_size
;
554 * Clean the return info field to indicate that the
555 * packet has been moved to the upper layers
557 mp
->rx_skb
[rx_curr_desc
] = NULL
;
559 /* Update current index in data structure */
560 rx_next_curr_desc
= (rx_curr_desc
+ 1) % mp
->rx_ring_size
;
561 mp
->rx_curr_desc_q
= rx_next_curr_desc
;
563 spin_unlock_irqrestore(&mp
->lock
, flags
);
568 static int mv643xx_eth_receive_queue(struct net_device
*dev
, int budget
)
570 struct mv643xx_eth_private
*mp
= netdev_priv(dev
);
571 struct net_device_stats
*stats
= &dev
->stats
;
572 unsigned int received_packets
= 0;
574 struct pkt_info pkt_info
;
576 while (budget
-- > 0 && port_receive(mp
, &pkt_info
) == ETH_OK
) {
577 dma_unmap_single(NULL
, pkt_info
.buf_ptr
, ETH_RX_SKB_SIZE
,
584 * Note byte count includes 4 byte CRC count
587 stats
->rx_bytes
+= pkt_info
.byte_cnt
;
588 skb
= pkt_info
.return_info
;
590 * In case received a packet without first / last bits on OR
591 * the error summary bit is on, the packets needs to be dropeed.
593 if (((pkt_info
.cmd_sts
& (RX_FIRST_DESC
| RX_LAST_DESC
)) !=
594 (RX_FIRST_DESC
| RX_LAST_DESC
))
595 || (pkt_info
.cmd_sts
& ERROR_SUMMARY
)) {
597 if ((pkt_info
.cmd_sts
& (RX_FIRST_DESC
|
599 (RX_FIRST_DESC
| RX_LAST_DESC
)) {
602 "%s: Received packet spread "
603 "on multiple descriptors\n",
606 if (pkt_info
.cmd_sts
& ERROR_SUMMARY
)
609 dev_kfree_skb_irq(skb
);
612 * The -4 is for the CRC in the trailer of the
615 skb_put(skb
, pkt_info
.byte_cnt
- 4);
617 if (pkt_info
.cmd_sts
& LAYER_4_CHECKSUM_OK
) {
618 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
620 (pkt_info
.cmd_sts
& 0x0007fff8) >> 3);
622 skb
->protocol
= eth_type_trans(skb
, dev
);
623 #ifdef MV643XX_ETH_NAPI
624 netif_receive_skb(skb
);
629 dev
->last_rx
= jiffies
;
631 mv643xx_eth_rx_refill_descs(dev
); /* Fill RX ring with skb's */
633 return received_packets
;
636 #ifdef MV643XX_ETH_NAPI
637 static int mv643xx_eth_poll(struct napi_struct
*napi
, int budget
)
639 struct mv643xx_eth_private
*mp
= container_of(napi
, struct mv643xx_eth_private
, napi
);
640 struct net_device
*dev
= mp
->dev
;
641 unsigned int port_num
= mp
->port_num
;
644 #ifdef MV643XX_ETH_TX_FAST_REFILL
645 if (++mp
->tx_clean_threshold
> 5) {
646 mv643xx_eth_free_completed_tx_descs(dev
);
647 mp
->tx_clean_threshold
= 0;
652 if ((rdl(mp
, RXQ_CURRENT_DESC_PTR(port_num
)))
653 != (u32
) mp
->rx_used_desc_q
)
654 work_done
= mv643xx_eth_receive_queue(dev
, budget
);
656 if (work_done
< budget
) {
657 netif_rx_complete(dev
, napi
);
658 wrl(mp
, INT_CAUSE(port_num
), 0);
659 wrl(mp
, INT_CAUSE_EXT(port_num
), 0);
660 wrl(mp
, INT_MASK(port_num
), INT_RX
| INT_EXT
);
668 /* tx ***********************************************************************/
669 static inline unsigned int has_tiny_unaligned_frags(struct sk_buff
*skb
)
674 for (frag
= 0; frag
< skb_shinfo(skb
)->nr_frags
; frag
++) {
675 fragp
= &skb_shinfo(skb
)->frags
[frag
];
676 if (fragp
->size
<= 8 && fragp
->page_offset
& 0x7)
682 static int alloc_tx_desc_index(struct mv643xx_eth_private
*mp
)
686 BUG_ON(mp
->tx_desc_count
>= mp
->tx_ring_size
);
688 tx_desc_curr
= mp
->tx_curr_desc_q
;
689 mp
->tx_curr_desc_q
= (tx_desc_curr
+ 1) % mp
->tx_ring_size
;
691 BUG_ON(mp
->tx_curr_desc_q
== mp
->tx_used_desc_q
);
696 static void tx_fill_frag_descs(struct mv643xx_eth_private
*mp
,
701 struct tx_desc
*desc
;
703 for (frag
= 0; frag
< skb_shinfo(skb
)->nr_frags
; frag
++) {
704 skb_frag_t
*this_frag
= &skb_shinfo(skb
)->frags
[frag
];
706 tx_index
= alloc_tx_desc_index(mp
);
707 desc
= &mp
->p_tx_desc_area
[tx_index
];
709 desc
->cmd_sts
= BUFFER_OWNED_BY_DMA
;
710 /* Last Frag enables interrupt and frees the skb */
711 if (frag
== (skb_shinfo(skb
)->nr_frags
- 1)) {
712 desc
->cmd_sts
|= ZERO_PADDING
|
715 mp
->tx_skb
[tx_index
] = skb
;
717 mp
->tx_skb
[tx_index
] = NULL
;
719 desc
= &mp
->p_tx_desc_area
[tx_index
];
721 desc
->byte_cnt
= this_frag
->size
;
722 desc
->buf_ptr
= dma_map_page(NULL
, this_frag
->page
,
723 this_frag
->page_offset
,
729 static inline __be16
sum16_as_be(__sum16 sum
)
731 return (__force __be16
)sum
;
734 static void tx_submit_descs_for_skb(struct mv643xx_eth_private
*mp
,
738 struct tx_desc
*desc
;
741 int nr_frags
= skb_shinfo(skb
)->nr_frags
;
743 cmd_sts
= TX_FIRST_DESC
| GEN_CRC
| BUFFER_OWNED_BY_DMA
;
745 tx_index
= alloc_tx_desc_index(mp
);
746 desc
= &mp
->p_tx_desc_area
[tx_index
];
749 tx_fill_frag_descs(mp
, skb
);
751 length
= skb_headlen(skb
);
752 mp
->tx_skb
[tx_index
] = NULL
;
754 cmd_sts
|= ZERO_PADDING
| TX_LAST_DESC
| TX_ENABLE_INTERRUPT
;
756 mp
->tx_skb
[tx_index
] = skb
;
759 desc
->byte_cnt
= length
;
760 desc
->buf_ptr
= dma_map_single(NULL
, skb
->data
, length
, DMA_TO_DEVICE
);
762 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
763 BUG_ON(skb
->protocol
!= htons(ETH_P_IP
));
765 cmd_sts
|= GEN_TCP_UDP_CHECKSUM
|
767 ip_hdr(skb
)->ihl
<< TX_IHL_SHIFT
;
769 switch (ip_hdr(skb
)->protocol
) {
771 cmd_sts
|= UDP_FRAME
;
772 desc
->l4i_chk
= ntohs(sum16_as_be(udp_hdr(skb
)->check
));
775 desc
->l4i_chk
= ntohs(sum16_as_be(tcp_hdr(skb
)->check
));
781 /* Errata BTS #50, IHL must be 5 if no HW checksum */
782 cmd_sts
|= 5 << TX_IHL_SHIFT
;
786 /* ensure all other descriptors are written before first cmd_sts */
788 desc
->cmd_sts
= cmd_sts
;
790 /* ensure all descriptors are written before poking hardware */
792 mv643xx_eth_port_enable_tx(mp
, 1);
794 mp
->tx_desc_count
+= nr_frags
+ 1;
797 static int mv643xx_eth_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
799 struct mv643xx_eth_private
*mp
= netdev_priv(dev
);
800 struct net_device_stats
*stats
= &dev
->stats
;
803 BUG_ON(netif_queue_stopped(dev
));
805 if (has_tiny_unaligned_frags(skb
) && __skb_linearize(skb
)) {
807 printk(KERN_DEBUG
"%s: failed to linearize tiny "
808 "unaligned fragment\n", dev
->name
);
809 return NETDEV_TX_BUSY
;
812 spin_lock_irqsave(&mp
->lock
, flags
);
814 if (mp
->tx_ring_size
- mp
->tx_desc_count
< MAX_DESCS_PER_SKB
) {
815 printk(KERN_ERR
"%s: transmit with queue full\n", dev
->name
);
816 netif_stop_queue(dev
);
817 spin_unlock_irqrestore(&mp
->lock
, flags
);
818 return NETDEV_TX_BUSY
;
821 tx_submit_descs_for_skb(mp
, skb
);
822 stats
->tx_bytes
+= skb
->len
;
824 dev
->trans_start
= jiffies
;
826 if (mp
->tx_ring_size
- mp
->tx_desc_count
< MAX_DESCS_PER_SKB
)
827 netif_stop_queue(dev
);
829 spin_unlock_irqrestore(&mp
->lock
, flags
);
835 /* mii management interface *************************************************/
836 static int phy_addr_get(struct mv643xx_eth_private
*mp
);
838 static void read_smi_reg(struct mv643xx_eth_private
*mp
,
839 unsigned int phy_reg
, unsigned int *value
)
841 void __iomem
*smi_reg
= mp
->shared_smi
->base
+ SMI_REG
;
842 int phy_addr
= phy_addr_get(mp
);
846 /* the SMI register is a shared resource */
847 spin_lock_irqsave(&mp
->shared_smi
->phy_lock
, flags
);
849 /* wait for the SMI register to become available */
850 for (i
= 0; readl(smi_reg
) & SMI_BUSY
; i
++) {
852 printk("%s: PHY busy timeout\n", mp
->dev
->name
);
858 writel((phy_addr
<< 16) | (phy_reg
<< 21) | SMI_OPCODE_READ
, smi_reg
);
860 /* now wait for the data to be valid */
861 for (i
= 0; !(readl(smi_reg
) & SMI_READ_VALID
); i
++) {
863 printk("%s: PHY read timeout\n", mp
->dev
->name
);
869 *value
= readl(smi_reg
) & 0xffff;
871 spin_unlock_irqrestore(&mp
->shared_smi
->phy_lock
, flags
);
874 static void write_smi_reg(struct mv643xx_eth_private
*mp
,
875 unsigned int phy_reg
, unsigned int value
)
877 void __iomem
*smi_reg
= mp
->shared_smi
->base
+ SMI_REG
;
878 int phy_addr
= phy_addr_get(mp
);
882 /* the SMI register is a shared resource */
883 spin_lock_irqsave(&mp
->shared_smi
->phy_lock
, flags
);
885 /* wait for the SMI register to become available */
886 for (i
= 0; readl(smi_reg
) & SMI_BUSY
; i
++) {
888 printk("%s: PHY busy timeout\n", mp
->dev
->name
);
894 writel((phy_addr
<< 16) | (phy_reg
<< 21) |
895 SMI_OPCODE_WRITE
| (value
& 0xffff), smi_reg
);
897 spin_unlock_irqrestore(&mp
->shared_smi
->phy_lock
, flags
);
901 /* mib counters *************************************************************/
902 static void clear_mib_counters(struct mv643xx_eth_private
*mp
)
904 unsigned int port_num
= mp
->port_num
;
907 /* Perform dummy reads from MIB counters */
908 for (i
= 0; i
< 0x80; i
+= 4)
909 rdl(mp
, MIB_COUNTERS(port_num
) + i
);
912 static inline u32
read_mib(struct mv643xx_eth_private
*mp
, int offset
)
914 return rdl(mp
, MIB_COUNTERS(mp
->port_num
) + offset
);
917 static void update_mib_counters(struct mv643xx_eth_private
*mp
)
919 struct mib_counters
*p
= &mp
->mib_counters
;
921 p
->good_octets_received
+= read_mib(mp
, 0x00);
922 p
->good_octets_received
+= (u64
)read_mib(mp
, 0x04) << 32;
923 p
->bad_octets_received
+= read_mib(mp
, 0x08);
924 p
->internal_mac_transmit_err
+= read_mib(mp
, 0x0c);
925 p
->good_frames_received
+= read_mib(mp
, 0x10);
926 p
->bad_frames_received
+= read_mib(mp
, 0x14);
927 p
->broadcast_frames_received
+= read_mib(mp
, 0x18);
928 p
->multicast_frames_received
+= read_mib(mp
, 0x1c);
929 p
->frames_64_octets
+= read_mib(mp
, 0x20);
930 p
->frames_65_to_127_octets
+= read_mib(mp
, 0x24);
931 p
->frames_128_to_255_octets
+= read_mib(mp
, 0x28);
932 p
->frames_256_to_511_octets
+= read_mib(mp
, 0x2c);
933 p
->frames_512_to_1023_octets
+= read_mib(mp
, 0x30);
934 p
->frames_1024_to_max_octets
+= read_mib(mp
, 0x34);
935 p
->good_octets_sent
+= read_mib(mp
, 0x38);
936 p
->good_octets_sent
+= (u64
)read_mib(mp
, 0x3c) << 32;
937 p
->good_frames_sent
+= read_mib(mp
, 0x40);
938 p
->excessive_collision
+= read_mib(mp
, 0x44);
939 p
->multicast_frames_sent
+= read_mib(mp
, 0x48);
940 p
->broadcast_frames_sent
+= read_mib(mp
, 0x4c);
941 p
->unrec_mac_control_received
+= read_mib(mp
, 0x50);
942 p
->fc_sent
+= read_mib(mp
, 0x54);
943 p
->good_fc_received
+= read_mib(mp
, 0x58);
944 p
->bad_fc_received
+= read_mib(mp
, 0x5c);
945 p
->undersize_received
+= read_mib(mp
, 0x60);
946 p
->fragments_received
+= read_mib(mp
, 0x64);
947 p
->oversize_received
+= read_mib(mp
, 0x68);
948 p
->jabber_received
+= read_mib(mp
, 0x6c);
949 p
->mac_receive_error
+= read_mib(mp
, 0x70);
950 p
->bad_crc_event
+= read_mib(mp
, 0x74);
951 p
->collision
+= read_mib(mp
, 0x78);
952 p
->late_collision
+= read_mib(mp
, 0x7c);
956 /* ethtool ******************************************************************/
957 struct mv643xx_eth_stats
{
958 char stat_string
[ETH_GSTRING_LEN
];
963 #define MV643XX_ETH_STAT(m) FIELD_SIZEOF(struct mv643xx_eth_private, m), \
964 offsetof(struct mv643xx_eth_private, m)
966 static const struct mv643xx_eth_stats mv643xx_eth_gstrings_stats
[] = {
967 { "rx_packets", MV643XX_ETH_STAT(stats
.rx_packets
) },
968 { "tx_packets", MV643XX_ETH_STAT(stats
.tx_packets
) },
969 { "rx_bytes", MV643XX_ETH_STAT(stats
.rx_bytes
) },
970 { "tx_bytes", MV643XX_ETH_STAT(stats
.tx_bytes
) },
971 { "rx_errors", MV643XX_ETH_STAT(stats
.rx_errors
) },
972 { "tx_errors", MV643XX_ETH_STAT(stats
.tx_errors
) },
973 { "rx_dropped", MV643XX_ETH_STAT(stats
.rx_dropped
) },
974 { "tx_dropped", MV643XX_ETH_STAT(stats
.tx_dropped
) },
975 { "good_octets_received", MV643XX_ETH_STAT(mib_counters
.good_octets_received
) },
976 { "bad_octets_received", MV643XX_ETH_STAT(mib_counters
.bad_octets_received
) },
977 { "internal_mac_transmit_err", MV643XX_ETH_STAT(mib_counters
.internal_mac_transmit_err
) },
978 { "good_frames_received", MV643XX_ETH_STAT(mib_counters
.good_frames_received
) },
979 { "bad_frames_received", MV643XX_ETH_STAT(mib_counters
.bad_frames_received
) },
980 { "broadcast_frames_received", MV643XX_ETH_STAT(mib_counters
.broadcast_frames_received
) },
981 { "multicast_frames_received", MV643XX_ETH_STAT(mib_counters
.multicast_frames_received
) },
982 { "frames_64_octets", MV643XX_ETH_STAT(mib_counters
.frames_64_octets
) },
983 { "frames_65_to_127_octets", MV643XX_ETH_STAT(mib_counters
.frames_65_to_127_octets
) },
984 { "frames_128_to_255_octets", MV643XX_ETH_STAT(mib_counters
.frames_128_to_255_octets
) },
985 { "frames_256_to_511_octets", MV643XX_ETH_STAT(mib_counters
.frames_256_to_511_octets
) },
986 { "frames_512_to_1023_octets", MV643XX_ETH_STAT(mib_counters
.frames_512_to_1023_octets
) },
987 { "frames_1024_to_max_octets", MV643XX_ETH_STAT(mib_counters
.frames_1024_to_max_octets
) },
988 { "good_octets_sent", MV643XX_ETH_STAT(mib_counters
.good_octets_sent
) },
989 { "good_frames_sent", MV643XX_ETH_STAT(mib_counters
.good_frames_sent
) },
990 { "excessive_collision", MV643XX_ETH_STAT(mib_counters
.excessive_collision
) },
991 { "multicast_frames_sent", MV643XX_ETH_STAT(mib_counters
.multicast_frames_sent
) },
992 { "broadcast_frames_sent", MV643XX_ETH_STAT(mib_counters
.broadcast_frames_sent
) },
993 { "unrec_mac_control_received", MV643XX_ETH_STAT(mib_counters
.unrec_mac_control_received
) },
994 { "fc_sent", MV643XX_ETH_STAT(mib_counters
.fc_sent
) },
995 { "good_fc_received", MV643XX_ETH_STAT(mib_counters
.good_fc_received
) },
996 { "bad_fc_received", MV643XX_ETH_STAT(mib_counters
.bad_fc_received
) },
997 { "undersize_received", MV643XX_ETH_STAT(mib_counters
.undersize_received
) },
998 { "fragments_received", MV643XX_ETH_STAT(mib_counters
.fragments_received
) },
999 { "oversize_received", MV643XX_ETH_STAT(mib_counters
.oversize_received
) },
1000 { "jabber_received", MV643XX_ETH_STAT(mib_counters
.jabber_received
) },
1001 { "mac_receive_error", MV643XX_ETH_STAT(mib_counters
.mac_receive_error
) },
1002 { "bad_crc_event", MV643XX_ETH_STAT(mib_counters
.bad_crc_event
) },
1003 { "collision", MV643XX_ETH_STAT(mib_counters
.collision
) },
1004 { "late_collision", MV643XX_ETH_STAT(mib_counters
.late_collision
) },
1007 #define MV643XX_ETH_STATS_LEN ARRAY_SIZE(mv643xx_eth_gstrings_stats)
1009 static int mv643xx_eth_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
1011 struct mv643xx_eth_private
*mp
= netdev_priv(dev
);
1014 spin_lock_irq(&mp
->lock
);
1015 err
= mii_ethtool_gset(&mp
->mii
, cmd
);
1016 spin_unlock_irq(&mp
->lock
);
1018 /* The PHY may support 1000baseT_Half, but the mv643xx does not */
1019 cmd
->supported
&= ~SUPPORTED_1000baseT_Half
;
1020 cmd
->advertising
&= ~ADVERTISED_1000baseT_Half
;
1025 static int mv643xx_eth_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
1027 struct mv643xx_eth_private
*mp
= netdev_priv(dev
);
1030 spin_lock_irq(&mp
->lock
);
1031 err
= mii_ethtool_sset(&mp
->mii
, cmd
);
1032 spin_unlock_irq(&mp
->lock
);
1037 static void mv643xx_eth_get_drvinfo(struct net_device
*netdev
,
1038 struct ethtool_drvinfo
*drvinfo
)
1040 strncpy(drvinfo
->driver
, mv643xx_eth_driver_name
, 32);
1041 strncpy(drvinfo
->version
, mv643xx_eth_driver_version
, 32);
1042 strncpy(drvinfo
->fw_version
, "N/A", 32);
1043 strncpy(drvinfo
->bus_info
, "mv643xx", 32);
1044 drvinfo
->n_stats
= MV643XX_ETH_STATS_LEN
;
1047 static int mv643xx_eth_nway_restart(struct net_device
*dev
)
1049 struct mv643xx_eth_private
*mp
= netdev_priv(dev
);
1051 return mii_nway_restart(&mp
->mii
);
1054 static u32
mv643xx_eth_get_link(struct net_device
*dev
)
1056 struct mv643xx_eth_private
*mp
= netdev_priv(dev
);
1058 return mii_link_ok(&mp
->mii
);
1061 static void mv643xx_eth_get_strings(struct net_device
*netdev
, uint32_t stringset
,
1068 for (i
=0; i
< MV643XX_ETH_STATS_LEN
; i
++) {
1069 memcpy(data
+ i
* ETH_GSTRING_LEN
,
1070 mv643xx_eth_gstrings_stats
[i
].stat_string
,
1077 static void mv643xx_eth_get_ethtool_stats(struct net_device
*netdev
,
1078 struct ethtool_stats
*stats
, uint64_t *data
)
1080 struct mv643xx_eth_private
*mp
= netdev
->priv
;
1083 update_mib_counters(mp
);
1085 for (i
= 0; i
< MV643XX_ETH_STATS_LEN
; i
++) {
1086 char *p
= (char *)mp
+mv643xx_eth_gstrings_stats
[i
].stat_offset
;
1087 data
[i
] = (mv643xx_eth_gstrings_stats
[i
].sizeof_stat
==
1088 sizeof(uint64_t)) ? *(uint64_t *)p
: *(uint32_t *)p
;
1092 static int mv643xx_eth_get_sset_count(struct net_device
*netdev
, int sset
)
1096 return MV643XX_ETH_STATS_LEN
;
1102 static const struct ethtool_ops mv643xx_eth_ethtool_ops
= {
1103 .get_settings
= mv643xx_eth_get_settings
,
1104 .set_settings
= mv643xx_eth_set_settings
,
1105 .get_drvinfo
= mv643xx_eth_get_drvinfo
,
1106 .get_link
= mv643xx_eth_get_link
,
1107 .set_sg
= ethtool_op_set_sg
,
1108 .get_sset_count
= mv643xx_eth_get_sset_count
,
1109 .get_ethtool_stats
= mv643xx_eth_get_ethtool_stats
,
1110 .get_strings
= mv643xx_eth_get_strings
,
1111 .nway_reset
= mv643xx_eth_nway_restart
,
1115 /* address handling *********************************************************/
1116 static void uc_addr_get(struct mv643xx_eth_private
*mp
, unsigned char *p_addr
)
1118 unsigned int port_num
= mp
->port_num
;
1122 mac_h
= rdl(mp
, MAC_ADDR_HIGH(port_num
));
1123 mac_l
= rdl(mp
, MAC_ADDR_LOW(port_num
));
1125 p_addr
[0] = (mac_h
>> 24) & 0xff;
1126 p_addr
[1] = (mac_h
>> 16) & 0xff;
1127 p_addr
[2] = (mac_h
>> 8) & 0xff;
1128 p_addr
[3] = mac_h
& 0xff;
1129 p_addr
[4] = (mac_l
>> 8) & 0xff;
1130 p_addr
[5] = mac_l
& 0xff;
1133 static void init_mac_tables(struct mv643xx_eth_private
*mp
)
1135 unsigned int port_num
= mp
->port_num
;
1138 /* Clear DA filter unicast table (Ex_dFUT) */
1139 for (table_index
= 0; table_index
<= 0xC; table_index
+= 4)
1140 wrl(mp
, UNICAST_TABLE(port_num
) + table_index
, 0);
1142 for (table_index
= 0; table_index
<= 0xFC; table_index
+= 4) {
1143 /* Clear DA filter special multicast table (Ex_dFSMT) */
1144 wrl(mp
, SPECIAL_MCAST_TABLE(port_num
) + table_index
, 0);
1145 /* Clear DA filter other multicast table (Ex_dFOMT) */
1146 wrl(mp
, OTHER_MCAST_TABLE(port_num
) + table_index
, 0);
1150 static void set_filter_table_entry(struct mv643xx_eth_private
*mp
,
1151 int table
, unsigned char entry
)
1153 unsigned int table_reg
;
1154 unsigned int tbl_offset
;
1155 unsigned int reg_offset
;
1157 tbl_offset
= (entry
/ 4) * 4; /* Register offset of DA table entry */
1158 reg_offset
= entry
% 4; /* Entry offset within the register */
1160 /* Set "accepts frame bit" at specified table entry */
1161 table_reg
= rdl(mp
, table
+ tbl_offset
);
1162 table_reg
|= 0x01 << (8 * reg_offset
);
1163 wrl(mp
, table
+ tbl_offset
, table_reg
);
1166 static void uc_addr_set(struct mv643xx_eth_private
*mp
, unsigned char *p_addr
)
1168 unsigned int port_num
= mp
->port_num
;
1173 mac_l
= (p_addr
[4] << 8) | (p_addr
[5]);
1174 mac_h
= (p_addr
[0] << 24) | (p_addr
[1] << 16) | (p_addr
[2] << 8) |
1177 wrl(mp
, MAC_ADDR_LOW(port_num
), mac_l
);
1178 wrl(mp
, MAC_ADDR_HIGH(port_num
), mac_h
);
1180 /* Accept frames with this address */
1181 table
= UNICAST_TABLE(port_num
);
1182 set_filter_table_entry(mp
, table
, p_addr
[5] & 0x0f);
1185 static void mv643xx_eth_update_mac_address(struct net_device
*dev
)
1187 struct mv643xx_eth_private
*mp
= netdev_priv(dev
);
1189 init_mac_tables(mp
);
1190 uc_addr_set(mp
, dev
->dev_addr
);
1193 static int mv643xx_eth_set_mac_address(struct net_device
*dev
, void *addr
)
1197 for (i
= 0; i
< 6; i
++)
1198 /* +2 is for the offset of the HW addr type */
1199 dev
->dev_addr
[i
] = ((unsigned char *)addr
)[i
+ 2];
1200 mv643xx_eth_update_mac_address(dev
);
1204 static void mc_addr(struct mv643xx_eth_private
*mp
, unsigned char *p_addr
)
1206 unsigned int port_num
= mp
->port_num
;
1209 unsigned char crc_result
= 0;
1215 if ((p_addr
[0] == 0x01) && (p_addr
[1] == 0x00) &&
1216 (p_addr
[2] == 0x5E) && (p_addr
[3] == 0x00) && (p_addr
[4] == 0x00)) {
1217 table
= SPECIAL_MCAST_TABLE(port_num
);
1218 set_filter_table_entry(mp
, table
, p_addr
[5]);
1222 /* Calculate CRC-8 out of the given address */
1223 mac_h
= (p_addr
[0] << 8) | (p_addr
[1]);
1224 mac_l
= (p_addr
[2] << 24) | (p_addr
[3] << 16) |
1225 (p_addr
[4] << 8) | (p_addr
[5] << 0);
1227 for (i
= 0; i
< 32; i
++)
1228 mac_array
[i
] = (mac_l
>> i
) & 0x1;
1229 for (i
= 32; i
< 48; i
++)
1230 mac_array
[i
] = (mac_h
>> (i
- 32)) & 0x1;
1232 crc
[0] = mac_array
[45] ^ mac_array
[43] ^ mac_array
[40] ^ mac_array
[39] ^
1233 mac_array
[35] ^ mac_array
[34] ^ mac_array
[31] ^ mac_array
[30] ^
1234 mac_array
[28] ^ mac_array
[23] ^ mac_array
[21] ^ mac_array
[19] ^
1235 mac_array
[18] ^ mac_array
[16] ^ mac_array
[14] ^ mac_array
[12] ^
1236 mac_array
[8] ^ mac_array
[7] ^ mac_array
[6] ^ mac_array
[0];
1238 crc
[1] = mac_array
[46] ^ mac_array
[45] ^ mac_array
[44] ^ mac_array
[43] ^
1239 mac_array
[41] ^ mac_array
[39] ^ mac_array
[36] ^ mac_array
[34] ^
1240 mac_array
[32] ^ mac_array
[30] ^ mac_array
[29] ^ mac_array
[28] ^
1241 mac_array
[24] ^ mac_array
[23] ^ mac_array
[22] ^ mac_array
[21] ^
1242 mac_array
[20] ^ mac_array
[18] ^ mac_array
[17] ^ mac_array
[16] ^
1243 mac_array
[15] ^ mac_array
[14] ^ mac_array
[13] ^ mac_array
[12] ^
1244 mac_array
[9] ^ mac_array
[6] ^ mac_array
[1] ^ mac_array
[0];
1246 crc
[2] = mac_array
[47] ^ mac_array
[46] ^ mac_array
[44] ^ mac_array
[43] ^
1247 mac_array
[42] ^ mac_array
[39] ^ mac_array
[37] ^ mac_array
[34] ^
1248 mac_array
[33] ^ mac_array
[29] ^ mac_array
[28] ^ mac_array
[25] ^
1249 mac_array
[24] ^ mac_array
[22] ^ mac_array
[17] ^ mac_array
[15] ^
1250 mac_array
[13] ^ mac_array
[12] ^ mac_array
[10] ^ mac_array
[8] ^
1251 mac_array
[6] ^ mac_array
[2] ^ mac_array
[1] ^ mac_array
[0];
1253 crc
[3] = mac_array
[47] ^ mac_array
[45] ^ mac_array
[44] ^ mac_array
[43] ^
1254 mac_array
[40] ^ mac_array
[38] ^ mac_array
[35] ^ mac_array
[34] ^
1255 mac_array
[30] ^ mac_array
[29] ^ mac_array
[26] ^ mac_array
[25] ^
1256 mac_array
[23] ^ mac_array
[18] ^ mac_array
[16] ^ mac_array
[14] ^
1257 mac_array
[13] ^ mac_array
[11] ^ mac_array
[9] ^ mac_array
[7] ^
1258 mac_array
[3] ^ mac_array
[2] ^ mac_array
[1];
1260 crc
[4] = mac_array
[46] ^ mac_array
[45] ^ mac_array
[44] ^ mac_array
[41] ^
1261 mac_array
[39] ^ mac_array
[36] ^ mac_array
[35] ^ mac_array
[31] ^
1262 mac_array
[30] ^ mac_array
[27] ^ mac_array
[26] ^ mac_array
[24] ^
1263 mac_array
[19] ^ mac_array
[17] ^ mac_array
[15] ^ mac_array
[14] ^
1264 mac_array
[12] ^ mac_array
[10] ^ mac_array
[8] ^ mac_array
[4] ^
1265 mac_array
[3] ^ mac_array
[2];
1267 crc
[5] = mac_array
[47] ^ mac_array
[46] ^ mac_array
[45] ^ mac_array
[42] ^
1268 mac_array
[40] ^ mac_array
[37] ^ mac_array
[36] ^ mac_array
[32] ^
1269 mac_array
[31] ^ mac_array
[28] ^ mac_array
[27] ^ mac_array
[25] ^
1270 mac_array
[20] ^ mac_array
[18] ^ mac_array
[16] ^ mac_array
[15] ^
1271 mac_array
[13] ^ mac_array
[11] ^ mac_array
[9] ^ mac_array
[5] ^
1272 mac_array
[4] ^ mac_array
[3];
1274 crc
[6] = mac_array
[47] ^ mac_array
[46] ^ mac_array
[43] ^ mac_array
[41] ^
1275 mac_array
[38] ^ mac_array
[37] ^ mac_array
[33] ^ mac_array
[32] ^
1276 mac_array
[29] ^ mac_array
[28] ^ mac_array
[26] ^ mac_array
[21] ^
1277 mac_array
[19] ^ mac_array
[17] ^ mac_array
[16] ^ mac_array
[14] ^
1278 mac_array
[12] ^ mac_array
[10] ^ mac_array
[6] ^ mac_array
[5] ^
1281 crc
[7] = mac_array
[47] ^ mac_array
[44] ^ mac_array
[42] ^ mac_array
[39] ^
1282 mac_array
[38] ^ mac_array
[34] ^ mac_array
[33] ^ mac_array
[30] ^
1283 mac_array
[29] ^ mac_array
[27] ^ mac_array
[22] ^ mac_array
[20] ^
1284 mac_array
[18] ^ mac_array
[17] ^ mac_array
[15] ^ mac_array
[13] ^
1285 mac_array
[11] ^ mac_array
[7] ^ mac_array
[6] ^ mac_array
[5];
1287 for (i
= 0; i
< 8; i
++)
1288 crc_result
= crc_result
| (crc
[i
] << i
);
1290 table
= OTHER_MCAST_TABLE(port_num
);
1291 set_filter_table_entry(mp
, table
, crc_result
);
1294 static void set_multicast_list(struct net_device
*dev
)
1297 struct dev_mc_list
*mc_list
;
1300 struct mv643xx_eth_private
*mp
= netdev_priv(dev
);
1301 unsigned int port_num
= mp
->port_num
;
1303 /* If the device is in promiscuous mode or in all multicast mode,
1304 * we will fully populate both multicast tables with accept.
1305 * This is guaranteed to yield a match on all multicast addresses...
1307 if ((dev
->flags
& IFF_PROMISC
) || (dev
->flags
& IFF_ALLMULTI
)) {
1308 for (table_index
= 0; table_index
<= 0xFC; table_index
+= 4) {
1309 /* Set all entries in DA filter special multicast
1311 * Set for ETH_Q0 for now
1313 * 0 Accept=1, Drop=0
1314 * 3-1 Queue ETH_Q0=0
1317 wrl(mp
, SPECIAL_MCAST_TABLE(port_num
) + table_index
, 0x01010101);
1319 /* Set all entries in DA filter other multicast
1321 * Set for ETH_Q0 for now
1323 * 0 Accept=1, Drop=0
1324 * 3-1 Queue ETH_Q0=0
1327 wrl(mp
, OTHER_MCAST_TABLE(port_num
) + table_index
, 0x01010101);
1332 /* We will clear out multicast tables every time we get the list.
1333 * Then add the entire new list...
1335 for (table_index
= 0; table_index
<= 0xFC; table_index
+= 4) {
1336 /* Clear DA filter special multicast table (Ex_dFSMT) */
1337 wrl(mp
, SPECIAL_MCAST_TABLE(port_num
) + table_index
, 0);
1339 /* Clear DA filter other multicast table (Ex_dFOMT) */
1340 wrl(mp
, OTHER_MCAST_TABLE(port_num
) + table_index
, 0);
1343 /* Get pointer to net_device multicast list and add each one... */
1344 for (i
= 0, mc_list
= dev
->mc_list
;
1345 (i
< 256) && (mc_list
!= NULL
) && (i
< dev
->mc_count
);
1346 i
++, mc_list
= mc_list
->next
)
1347 if (mc_list
->dmi_addrlen
== 6)
1348 mc_addr(mp
, mc_list
->dmi_addr
);
1351 static void mv643xx_eth_set_rx_mode(struct net_device
*dev
)
1353 struct mv643xx_eth_private
*mp
= netdev_priv(dev
);
1356 config_reg
= rdl(mp
, PORT_CONFIG(mp
->port_num
));
1357 if (dev
->flags
& IFF_PROMISC
)
1358 config_reg
|= UNICAST_PROMISCUOUS_MODE
;
1360 config_reg
&= ~UNICAST_PROMISCUOUS_MODE
;
1361 wrl(mp
, PORT_CONFIG(mp
->port_num
), config_reg
);
1363 set_multicast_list(dev
);
1367 /* rx/tx queue initialisation ***********************************************/
1368 static void ether_init_rx_desc_ring(struct mv643xx_eth_private
*mp
)
1370 volatile struct rx_desc
*p_rx_desc
;
1371 int rx_desc_num
= mp
->rx_ring_size
;
1374 /* initialize the next_desc_ptr links in the Rx descriptors ring */
1375 p_rx_desc
= (struct rx_desc
*)mp
->p_rx_desc_area
;
1376 for (i
= 0; i
< rx_desc_num
; i
++) {
1377 p_rx_desc
[i
].next_desc_ptr
= mp
->rx_desc_dma
+
1378 ((i
+ 1) % rx_desc_num
) * sizeof(struct rx_desc
);
1381 /* Save Rx desc pointer to driver struct. */
1382 mp
->rx_curr_desc_q
= 0;
1383 mp
->rx_used_desc_q
= 0;
1385 mp
->rx_desc_area_size
= rx_desc_num
* sizeof(struct rx_desc
);
1388 static void mv643xx_eth_free_rx_rings(struct net_device
*dev
)
1390 struct mv643xx_eth_private
*mp
= netdev_priv(dev
);
1393 /* Stop RX Queues */
1394 mv643xx_eth_port_disable_rx(mp
);
1396 /* Free preallocated skb's on RX rings */
1397 for (curr
= 0; mp
->rx_desc_count
&& curr
< mp
->rx_ring_size
; curr
++) {
1398 if (mp
->rx_skb
[curr
]) {
1399 dev_kfree_skb(mp
->rx_skb
[curr
]);
1400 mp
->rx_desc_count
--;
1404 if (mp
->rx_desc_count
)
1406 "%s: Error in freeing Rx Ring. %d skb's still"
1407 " stuck in RX Ring - ignoring them\n", dev
->name
,
1410 if (mp
->rx_sram_size
)
1411 iounmap(mp
->p_rx_desc_area
);
1413 dma_free_coherent(NULL
, mp
->rx_desc_area_size
,
1414 mp
->p_rx_desc_area
, mp
->rx_desc_dma
);
1417 static void ether_init_tx_desc_ring(struct mv643xx_eth_private
*mp
)
1419 int tx_desc_num
= mp
->tx_ring_size
;
1420 struct tx_desc
*p_tx_desc
;
1423 /* Initialize the next_desc_ptr links in the Tx descriptors ring */
1424 p_tx_desc
= (struct tx_desc
*)mp
->p_tx_desc_area
;
1425 for (i
= 0; i
< tx_desc_num
; i
++) {
1426 p_tx_desc
[i
].next_desc_ptr
= mp
->tx_desc_dma
+
1427 ((i
+ 1) % tx_desc_num
) * sizeof(struct tx_desc
);
1430 mp
->tx_curr_desc_q
= 0;
1431 mp
->tx_used_desc_q
= 0;
1433 mp
->tx_desc_area_size
= tx_desc_num
* sizeof(struct tx_desc
);
1436 static int mv643xx_eth_free_tx_descs(struct net_device
*dev
, int force
)
1438 struct mv643xx_eth_private
*mp
= netdev_priv(dev
);
1439 struct tx_desc
*desc
;
1441 struct sk_buff
*skb
;
1442 unsigned long flags
;
1448 while (mp
->tx_desc_count
> 0) {
1449 spin_lock_irqsave(&mp
->lock
, flags
);
1451 /* tx_desc_count might have changed before acquiring the lock */
1452 if (mp
->tx_desc_count
<= 0) {
1453 spin_unlock_irqrestore(&mp
->lock
, flags
);
1457 tx_index
= mp
->tx_used_desc_q
;
1458 desc
= &mp
->p_tx_desc_area
[tx_index
];
1459 cmd_sts
= desc
->cmd_sts
;
1461 if (!force
&& (cmd_sts
& BUFFER_OWNED_BY_DMA
)) {
1462 spin_unlock_irqrestore(&mp
->lock
, flags
);
1466 mp
->tx_used_desc_q
= (tx_index
+ 1) % mp
->tx_ring_size
;
1467 mp
->tx_desc_count
--;
1469 addr
= desc
->buf_ptr
;
1470 count
= desc
->byte_cnt
;
1471 skb
= mp
->tx_skb
[tx_index
];
1473 mp
->tx_skb
[tx_index
] = NULL
;
1475 if (cmd_sts
& ERROR_SUMMARY
) {
1476 printk("%s: Error in TX\n", dev
->name
);
1477 dev
->stats
.tx_errors
++;
1480 spin_unlock_irqrestore(&mp
->lock
, flags
);
1482 if (cmd_sts
& TX_FIRST_DESC
)
1483 dma_unmap_single(NULL
, addr
, count
, DMA_TO_DEVICE
);
1485 dma_unmap_page(NULL
, addr
, count
, DMA_TO_DEVICE
);
1488 dev_kfree_skb_irq(skb
);
1496 static void mv643xx_eth_free_completed_tx_descs(struct net_device
*dev
)
1498 struct mv643xx_eth_private
*mp
= netdev_priv(dev
);
1500 if (mv643xx_eth_free_tx_descs(dev
, 0) &&
1501 mp
->tx_ring_size
- mp
->tx_desc_count
>= MAX_DESCS_PER_SKB
)
1502 netif_wake_queue(dev
);
1505 static void mv643xx_eth_free_all_tx_descs(struct net_device
*dev
)
1507 mv643xx_eth_free_tx_descs(dev
, 1);
1510 static void mv643xx_eth_free_tx_rings(struct net_device
*dev
)
1512 struct mv643xx_eth_private
*mp
= netdev_priv(dev
);
1514 /* Stop Tx Queues */
1515 mv643xx_eth_port_disable_tx(mp
);
1517 /* Free outstanding skb's on TX ring */
1518 mv643xx_eth_free_all_tx_descs(dev
);
1520 BUG_ON(mp
->tx_used_desc_q
!= mp
->tx_curr_desc_q
);
1523 if (mp
->tx_sram_size
)
1524 iounmap(mp
->p_tx_desc_area
);
1526 dma_free_coherent(NULL
, mp
->tx_desc_area_size
,
1527 mp
->p_tx_desc_area
, mp
->tx_desc_dma
);
1531 /* netdev ops and related ***************************************************/
1532 static void port_reset(struct mv643xx_eth_private
*mp
);
1534 static void mv643xx_eth_update_pscr(struct net_device
*dev
,
1535 struct ethtool_cmd
*ecmd
)
1537 struct mv643xx_eth_private
*mp
= netdev_priv(dev
);
1538 int port_num
= mp
->port_num
;
1540 unsigned int queues
;
1542 o_pscr
= rdl(mp
, PORT_SERIAL_CONTROL(port_num
));
1545 /* clear speed, duplex and rx buffer size fields */
1546 n_pscr
&= ~(SET_MII_SPEED_TO_100
|
1547 SET_GMII_SPEED_TO_1000
|
1548 SET_FULL_DUPLEX_MODE
|
1549 MAX_RX_PACKET_MASK
);
1551 if (ecmd
->duplex
== DUPLEX_FULL
)
1552 n_pscr
|= SET_FULL_DUPLEX_MODE
;
1554 if (ecmd
->speed
== SPEED_1000
)
1555 n_pscr
|= SET_GMII_SPEED_TO_1000
|
1556 MAX_RX_PACKET_9700BYTE
;
1558 if (ecmd
->speed
== SPEED_100
)
1559 n_pscr
|= SET_MII_SPEED_TO_100
;
1560 n_pscr
|= MAX_RX_PACKET_1522BYTE
;
1563 if (n_pscr
!= o_pscr
) {
1564 if ((o_pscr
& SERIAL_PORT_ENABLE
) == 0)
1565 wrl(mp
, PORT_SERIAL_CONTROL(port_num
), n_pscr
);
1567 queues
= mv643xx_eth_port_disable_tx(mp
);
1569 o_pscr
&= ~SERIAL_PORT_ENABLE
;
1570 wrl(mp
, PORT_SERIAL_CONTROL(port_num
), o_pscr
);
1571 wrl(mp
, PORT_SERIAL_CONTROL(port_num
), n_pscr
);
1572 wrl(mp
, PORT_SERIAL_CONTROL(port_num
), n_pscr
);
1574 mv643xx_eth_port_enable_tx(mp
, queues
);
1579 static irqreturn_t
mv643xx_eth_int_handler(int irq
, void *dev_id
)
1581 struct net_device
*dev
= (struct net_device
*)dev_id
;
1582 struct mv643xx_eth_private
*mp
= netdev_priv(dev
);
1583 u32 int_cause
, int_cause_ext
= 0;
1584 unsigned int port_num
= mp
->port_num
;
1586 /* Read interrupt cause registers */
1587 int_cause
= rdl(mp
, INT_CAUSE(port_num
)) & (INT_RX
| INT_EXT
);
1588 if (int_cause
& INT_EXT
) {
1589 int_cause_ext
= rdl(mp
, INT_CAUSE_EXT(port_num
))
1590 & (INT_EXT_LINK
| INT_EXT_PHY
| INT_EXT_TX
);
1591 wrl(mp
, INT_CAUSE_EXT(port_num
), ~int_cause_ext
);
1594 /* PHY status changed */
1595 if (int_cause_ext
& (INT_EXT_LINK
| INT_EXT_PHY
)) {
1596 struct ethtool_cmd cmd
;
1598 if (mii_link_ok(&mp
->mii
)) {
1599 mii_ethtool_gset(&mp
->mii
, &cmd
);
1600 mv643xx_eth_update_pscr(dev
, &cmd
);
1601 mv643xx_eth_port_enable_tx(mp
, 1);
1602 if (!netif_carrier_ok(dev
)) {
1603 netif_carrier_on(dev
);
1604 if (mp
->tx_ring_size
- mp
->tx_desc_count
>=
1606 netif_wake_queue(dev
);
1608 } else if (netif_carrier_ok(dev
)) {
1609 netif_stop_queue(dev
);
1610 netif_carrier_off(dev
);
1614 #ifdef MV643XX_ETH_NAPI
1615 if (int_cause
& INT_RX
) {
1616 /* schedule the NAPI poll routine to maintain port */
1617 wrl(mp
, INT_MASK(port_num
), 0x00000000);
1619 /* wait for previous write to complete */
1620 rdl(mp
, INT_MASK(port_num
));
1622 netif_rx_schedule(dev
, &mp
->napi
);
1625 if (int_cause
& INT_RX
)
1626 mv643xx_eth_receive_queue(dev
, INT_MAX
);
1628 if (int_cause_ext
& INT_EXT_TX
)
1629 mv643xx_eth_free_completed_tx_descs(dev
);
1632 * If no real interrupt occured, exit.
1633 * This can happen when using gigE interrupt coalescing mechanism.
1635 if ((int_cause
== 0x0) && (int_cause_ext
== 0x0))
1641 static void phy_reset(struct mv643xx_eth_private
*mp
)
1643 unsigned int phy_reg_data
;
1646 read_smi_reg(mp
, 0, &phy_reg_data
);
1647 phy_reg_data
|= 0x8000; /* Set bit 15 to reset the PHY */
1648 write_smi_reg(mp
, 0, phy_reg_data
);
1650 /* wait for PHY to come out of reset */
1653 read_smi_reg(mp
, 0, &phy_reg_data
);
1654 } while (phy_reg_data
& 0x8000);
1657 static void port_start(struct net_device
*dev
)
1659 struct mv643xx_eth_private
*mp
= netdev_priv(dev
);
1660 unsigned int port_num
= mp
->port_num
;
1661 int tx_curr_desc
, rx_curr_desc
;
1663 struct ethtool_cmd ethtool_cmd
;
1665 /* Assignment of Tx CTRP of given queue */
1666 tx_curr_desc
= mp
->tx_curr_desc_q
;
1667 wrl(mp
, TXQ_CURRENT_DESC_PTR(port_num
),
1668 (u32
)((struct tx_desc
*)mp
->tx_desc_dma
+ tx_curr_desc
));
1670 /* Assignment of Rx CRDP of given queue */
1671 rx_curr_desc
= mp
->rx_curr_desc_q
;
1672 wrl(mp
, RXQ_CURRENT_DESC_PTR(port_num
),
1673 (u32
)((struct rx_desc
*)mp
->rx_desc_dma
+ rx_curr_desc
));
1675 /* Add the assigned Ethernet address to the port's address table */
1676 uc_addr_set(mp
, dev
->dev_addr
);
1679 * Receive all unmatched unicast, TCP, UDP, BPDU and broadcast
1680 * frames to RX queue #0.
1682 wrl(mp
, PORT_CONFIG(port_num
), 0x00000000);
1685 * Treat BPDUs as normal multicasts, and disable partition mode.
1687 wrl(mp
, PORT_CONFIG_EXT(port_num
), 0x00000000);
1689 pscr
= rdl(mp
, PORT_SERIAL_CONTROL(port_num
));
1691 pscr
&= ~(SERIAL_PORT_ENABLE
| FORCE_LINK_PASS
);
1692 wrl(mp
, PORT_SERIAL_CONTROL(port_num
), pscr
);
1694 pscr
|= DISABLE_AUTO_NEG_FOR_FLOW_CTRL
|
1695 DISABLE_AUTO_NEG_SPEED_GMII
|
1696 DISABLE_AUTO_NEG_FOR_DUPLEX
|
1697 DO_NOT_FORCE_LINK_FAIL
|
1698 SERIAL_PORT_CONTROL_RESERVED
;
1700 wrl(mp
, PORT_SERIAL_CONTROL(port_num
), pscr
);
1702 pscr
|= SERIAL_PORT_ENABLE
;
1703 wrl(mp
, PORT_SERIAL_CONTROL(port_num
), pscr
);
1705 /* Assign port SDMA configuration */
1706 wrl(mp
, SDMA_CONFIG(port_num
), PORT_SDMA_CONFIG_DEFAULT_VALUE
);
1708 /* Enable port Rx. */
1709 mv643xx_eth_port_enable_rx(mp
, 1);
1711 /* Disable port bandwidth limits by clearing MTU register */
1712 wrl(mp
, TX_BW_MTU(port_num
), 0);
1714 /* save phy settings across reset */
1715 mv643xx_eth_get_settings(dev
, ðtool_cmd
);
1717 mv643xx_eth_set_settings(dev
, ðtool_cmd
);
1720 #ifdef MV643XX_ETH_COAL
1721 static unsigned int set_rx_coal(struct mv643xx_eth_private
*mp
,
1724 unsigned int port_num
= mp
->port_num
;
1725 unsigned int coal
= ((mp
->shared
->t_clk
/ 1000000) * delay
) / 64;
1727 /* Set RX Coalescing mechanism */
1728 wrl(mp
, SDMA_CONFIG(port_num
),
1729 ((coal
& 0x3fff) << 8) |
1730 (rdl(mp
, SDMA_CONFIG(port_num
))
1737 static unsigned int set_tx_coal(struct mv643xx_eth_private
*mp
,
1740 unsigned int coal
= ((mp
->shared
->t_clk
/ 1000000) * delay
) / 64;
1742 /* Set TX Coalescing mechanism */
1743 wrl(mp
, TX_FIFO_URGENT_THRESHOLD(mp
->port_num
), coal
<< 4);
1748 static void port_init(struct mv643xx_eth_private
*mp
)
1752 init_mac_tables(mp
);
1755 static int mv643xx_eth_open(struct net_device
*dev
)
1757 struct mv643xx_eth_private
*mp
= netdev_priv(dev
);
1758 unsigned int port_num
= mp
->port_num
;
1762 /* Clear any pending ethernet port interrupts */
1763 wrl(mp
, INT_CAUSE(port_num
), 0);
1764 wrl(mp
, INT_CAUSE_EXT(port_num
), 0);
1765 /* wait for previous write to complete */
1766 rdl(mp
, INT_CAUSE_EXT(port_num
));
1768 err
= request_irq(dev
->irq
, mv643xx_eth_int_handler
,
1769 IRQF_SHARED
| IRQF_SAMPLE_RANDOM
, dev
->name
, dev
);
1771 printk(KERN_ERR
"%s: Can not assign IRQ\n", dev
->name
);
1777 memset(&mp
->timeout
, 0, sizeof(struct timer_list
));
1778 mp
->timeout
.function
= mv643xx_eth_rx_refill_descs_timer_wrapper
;
1779 mp
->timeout
.data
= (unsigned long)dev
;
1781 /* Allocate RX and TX skb rings */
1782 mp
->rx_skb
= kmalloc(sizeof(*mp
->rx_skb
) * mp
->rx_ring_size
,
1785 printk(KERN_ERR
"%s: Cannot allocate Rx skb ring\n", dev
->name
);
1789 mp
->tx_skb
= kmalloc(sizeof(*mp
->tx_skb
) * mp
->tx_ring_size
,
1792 printk(KERN_ERR
"%s: Cannot allocate Tx skb ring\n", dev
->name
);
1794 goto out_free_rx_skb
;
1797 /* Allocate TX ring */
1798 mp
->tx_desc_count
= 0;
1799 size
= mp
->tx_ring_size
* sizeof(struct tx_desc
);
1800 mp
->tx_desc_area_size
= size
;
1802 if (mp
->tx_sram_size
) {
1803 mp
->p_tx_desc_area
= ioremap(mp
->tx_sram_addr
,
1805 mp
->tx_desc_dma
= mp
->tx_sram_addr
;
1807 mp
->p_tx_desc_area
= dma_alloc_coherent(NULL
, size
,
1811 if (!mp
->p_tx_desc_area
) {
1812 printk(KERN_ERR
"%s: Cannot allocate Tx Ring (size %d bytes)\n",
1815 goto out_free_tx_skb
;
1817 BUG_ON((u32
) mp
->p_tx_desc_area
& 0xf); /* check 16-byte alignment */
1818 memset((void *)mp
->p_tx_desc_area
, 0, mp
->tx_desc_area_size
);
1820 ether_init_tx_desc_ring(mp
);
1822 /* Allocate RX ring */
1823 mp
->rx_desc_count
= 0;
1824 size
= mp
->rx_ring_size
* sizeof(struct rx_desc
);
1825 mp
->rx_desc_area_size
= size
;
1827 if (mp
->rx_sram_size
) {
1828 mp
->p_rx_desc_area
= ioremap(mp
->rx_sram_addr
,
1830 mp
->rx_desc_dma
= mp
->rx_sram_addr
;
1832 mp
->p_rx_desc_area
= dma_alloc_coherent(NULL
, size
,
1836 if (!mp
->p_rx_desc_area
) {
1837 printk(KERN_ERR
"%s: Cannot allocate Rx ring (size %d bytes)\n",
1839 printk(KERN_ERR
"%s: Freeing previously allocated TX queues...",
1841 if (mp
->rx_sram_size
)
1842 iounmap(mp
->p_tx_desc_area
);
1844 dma_free_coherent(NULL
, mp
->tx_desc_area_size
,
1845 mp
->p_tx_desc_area
, mp
->tx_desc_dma
);
1847 goto out_free_tx_skb
;
1849 memset((void *)mp
->p_rx_desc_area
, 0, size
);
1851 ether_init_rx_desc_ring(mp
);
1853 mv643xx_eth_rx_refill_descs(dev
); /* Fill RX ring with skb's */
1855 #ifdef MV643XX_ETH_NAPI
1856 napi_enable(&mp
->napi
);
1861 /* Interrupt Coalescing */
1863 #ifdef MV643XX_ETH_COAL
1864 mp
->rx_int_coal
= set_rx_coal(mp
, MV643XX_ETH_RX_COAL
);
1867 mp
->tx_int_coal
= set_tx_coal(mp
, MV643XX_ETH_TX_COAL
);
1869 /* Unmask phy and link status changes interrupts */
1870 wrl(mp
, INT_MASK_EXT(port_num
), INT_EXT_LINK
| INT_EXT_PHY
| INT_EXT_TX
);
1872 /* Unmask RX buffer and TX end interrupt */
1873 wrl(mp
, INT_MASK(port_num
), INT_RX
| INT_EXT
);
1882 free_irq(dev
->irq
, dev
);
1887 static void port_reset(struct mv643xx_eth_private
*mp
)
1889 unsigned int port_num
= mp
->port_num
;
1890 unsigned int reg_data
;
1892 mv643xx_eth_port_disable_tx(mp
);
1893 mv643xx_eth_port_disable_rx(mp
);
1895 /* Clear all MIB counters */
1896 clear_mib_counters(mp
);
1898 /* Reset the Enable bit in the Configuration Register */
1899 reg_data
= rdl(mp
, PORT_SERIAL_CONTROL(port_num
));
1900 reg_data
&= ~(SERIAL_PORT_ENABLE
|
1901 DO_NOT_FORCE_LINK_FAIL
|
1903 wrl(mp
, PORT_SERIAL_CONTROL(port_num
), reg_data
);
1906 static int mv643xx_eth_stop(struct net_device
*dev
)
1908 struct mv643xx_eth_private
*mp
= netdev_priv(dev
);
1909 unsigned int port_num
= mp
->port_num
;
1911 /* Mask all interrupts on ethernet port */
1912 wrl(mp
, INT_MASK(port_num
), 0x00000000);
1913 /* wait for previous write to complete */
1914 rdl(mp
, INT_MASK(port_num
));
1916 #ifdef MV643XX_ETH_NAPI
1917 napi_disable(&mp
->napi
);
1919 netif_carrier_off(dev
);
1920 netif_stop_queue(dev
);
1924 mv643xx_eth_free_tx_rings(dev
);
1925 mv643xx_eth_free_rx_rings(dev
);
1927 free_irq(dev
->irq
, dev
);
1932 static int mv643xx_eth_do_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
1934 struct mv643xx_eth_private
*mp
= netdev_priv(dev
);
1936 return generic_mii_ioctl(&mp
->mii
, if_mii(ifr
), cmd
, NULL
);
1939 static int mv643xx_eth_change_mtu(struct net_device
*dev
, int new_mtu
)
1941 if ((new_mtu
> 9500) || (new_mtu
< 64))
1945 if (!netif_running(dev
))
1949 * Stop and then re-open the interface. This will allocate RX
1950 * skbs of the new MTU.
1951 * There is a possible danger that the open will not succeed,
1952 * due to memory being full, which might fail the open function.
1954 mv643xx_eth_stop(dev
);
1955 if (mv643xx_eth_open(dev
)) {
1956 printk(KERN_ERR
"%s: Fatal error on opening device\n",
1963 static void mv643xx_eth_tx_timeout_task(struct work_struct
*ugly
)
1965 struct mv643xx_eth_private
*mp
= container_of(ugly
, struct mv643xx_eth_private
,
1967 struct net_device
*dev
= mp
->dev
;
1969 if (!netif_running(dev
))
1972 netif_stop_queue(dev
);
1977 if (mp
->tx_ring_size
- mp
->tx_desc_count
>= MAX_DESCS_PER_SKB
)
1978 netif_wake_queue(dev
);
1981 static void mv643xx_eth_tx_timeout(struct net_device
*dev
)
1983 struct mv643xx_eth_private
*mp
= netdev_priv(dev
);
1985 printk(KERN_INFO
"%s: TX timeout ", dev
->name
);
1987 /* Do the reset outside of interrupt context */
1988 schedule_work(&mp
->tx_timeout_task
);
1991 #ifdef CONFIG_NET_POLL_CONTROLLER
1992 static void mv643xx_eth_netpoll(struct net_device
*netdev
)
1994 struct mv643xx_eth_private
*mp
= netdev_priv(netdev
);
1995 int port_num
= mp
->port_num
;
1997 wrl(mp
, INT_MASK(port_num
), 0x00000000);
1998 /* wait for previous write to complete */
1999 rdl(mp
, INT_MASK(port_num
));
2001 mv643xx_eth_int_handler(netdev
->irq
, netdev
);
2003 wrl(mp
, INT_MASK(port_num
), INT_RX
| INT_CAUSE_EXT
);
2007 static int mv643xx_eth_mdio_read(struct net_device
*dev
, int phy_id
, int location
)
2009 struct mv643xx_eth_private
*mp
= netdev_priv(dev
);
2012 read_smi_reg(mp
, location
, &val
);
2016 static void mv643xx_eth_mdio_write(struct net_device
*dev
, int phy_id
, int location
, int val
)
2018 struct mv643xx_eth_private
*mp
= netdev_priv(dev
);
2019 write_smi_reg(mp
, location
, val
);
2023 /* platform glue ************************************************************/
2025 mv643xx_eth_conf_mbus_windows(struct mv643xx_eth_shared_private
*msp
,
2026 struct mbus_dram_target_info
*dram
)
2028 void __iomem
*base
= msp
->base
;
2033 for (i
= 0; i
< 6; i
++) {
2034 writel(0, base
+ WINDOW_BASE(i
));
2035 writel(0, base
+ WINDOW_SIZE(i
));
2037 writel(0, base
+ WINDOW_REMAP_HIGH(i
));
2043 for (i
= 0; i
< dram
->num_cs
; i
++) {
2044 struct mbus_dram_window
*cs
= dram
->cs
+ i
;
2046 writel((cs
->base
& 0xffff0000) |
2047 (cs
->mbus_attr
<< 8) |
2048 dram
->mbus_dram_target_id
, base
+ WINDOW_BASE(i
));
2049 writel((cs
->size
- 1) & 0xffff0000, base
+ WINDOW_SIZE(i
));
2051 win_enable
&= ~(1 << i
);
2052 win_protect
|= 3 << (2 * i
);
2055 writel(win_enable
, base
+ WINDOW_BAR_ENABLE
);
2056 msp
->win_protect
= win_protect
;
2059 static int mv643xx_eth_shared_probe(struct platform_device
*pdev
)
2061 static int mv643xx_eth_version_printed
= 0;
2062 struct mv643xx_eth_shared_platform_data
*pd
= pdev
->dev
.platform_data
;
2063 struct mv643xx_eth_shared_private
*msp
;
2064 struct resource
*res
;
2067 if (!mv643xx_eth_version_printed
++)
2068 printk(KERN_NOTICE
"MV-643xx 10/100/1000 Ethernet Driver\n");
2071 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
2076 msp
= kmalloc(sizeof(*msp
), GFP_KERNEL
);
2079 memset(msp
, 0, sizeof(*msp
));
2081 msp
->base
= ioremap(res
->start
, res
->end
- res
->start
+ 1);
2082 if (msp
->base
== NULL
)
2085 spin_lock_init(&msp
->phy_lock
);
2086 msp
->t_clk
= (pd
!= NULL
&& pd
->t_clk
!= 0) ? pd
->t_clk
: 133000000;
2088 platform_set_drvdata(pdev
, msp
);
2091 * (Re-)program MBUS remapping windows if we are asked to.
2093 if (pd
!= NULL
&& pd
->dram
!= NULL
)
2094 mv643xx_eth_conf_mbus_windows(msp
, pd
->dram
);
2104 static int mv643xx_eth_shared_remove(struct platform_device
*pdev
)
2106 struct mv643xx_eth_shared_private
*msp
= platform_get_drvdata(pdev
);
2114 static struct platform_driver mv643xx_eth_shared_driver
= {
2115 .probe
= mv643xx_eth_shared_probe
,
2116 .remove
= mv643xx_eth_shared_remove
,
2118 .name
= MV643XX_ETH_SHARED_NAME
,
2119 .owner
= THIS_MODULE
,
2123 static void phy_addr_set(struct mv643xx_eth_private
*mp
, int phy_addr
)
2126 int addr_shift
= 5 * mp
->port_num
;
2128 reg_data
= rdl(mp
, PHY_ADDR
);
2129 reg_data
&= ~(0x1f << addr_shift
);
2130 reg_data
|= (phy_addr
& 0x1f) << addr_shift
;
2131 wrl(mp
, PHY_ADDR
, reg_data
);
2134 static int phy_addr_get(struct mv643xx_eth_private
*mp
)
2136 unsigned int reg_data
;
2138 reg_data
= rdl(mp
, PHY_ADDR
);
2140 return ((reg_data
>> (5 * mp
->port_num
)) & 0x1f);
2143 static int phy_detect(struct mv643xx_eth_private
*mp
)
2145 unsigned int phy_reg_data0
;
2148 read_smi_reg(mp
, 0, &phy_reg_data0
);
2149 auto_neg
= phy_reg_data0
& 0x1000;
2150 phy_reg_data0
^= 0x1000; /* invert auto_neg */
2151 write_smi_reg(mp
, 0, phy_reg_data0
);
2153 read_smi_reg(mp
, 0, &phy_reg_data0
);
2154 if ((phy_reg_data0
& 0x1000) == auto_neg
)
2155 return -ENODEV
; /* change didn't take */
2157 phy_reg_data0
^= 0x1000;
2158 write_smi_reg(mp
, 0, phy_reg_data0
);
2162 static void mv643xx_init_ethtool_cmd(struct net_device
*dev
, int phy_address
,
2163 int speed
, int duplex
,
2164 struct ethtool_cmd
*cmd
)
2166 struct mv643xx_eth_private
*mp
= netdev_priv(dev
);
2168 memset(cmd
, 0, sizeof(*cmd
));
2170 cmd
->port
= PORT_MII
;
2171 cmd
->transceiver
= XCVR_INTERNAL
;
2172 cmd
->phy_address
= phy_address
;
2175 cmd
->autoneg
= AUTONEG_ENABLE
;
2176 /* mii lib checks, but doesn't use speed on AUTONEG_ENABLE */
2177 cmd
->speed
= SPEED_100
;
2178 cmd
->advertising
= ADVERTISED_10baseT_Half
|
2179 ADVERTISED_10baseT_Full
|
2180 ADVERTISED_100baseT_Half
|
2181 ADVERTISED_100baseT_Full
;
2182 if (mp
->mii
.supports_gmii
)
2183 cmd
->advertising
|= ADVERTISED_1000baseT_Full
;
2185 cmd
->autoneg
= AUTONEG_DISABLE
;
2187 cmd
->duplex
= duplex
;
2191 static int mv643xx_eth_probe(struct platform_device
*pdev
)
2193 struct mv643xx_eth_platform_data
*pd
;
2195 struct mv643xx_eth_private
*mp
;
2196 struct net_device
*dev
;
2198 struct resource
*res
;
2200 struct ethtool_cmd cmd
;
2201 int duplex
= DUPLEX_HALF
;
2202 int speed
= 0; /* default to auto-negotiation */
2203 DECLARE_MAC_BUF(mac
);
2205 pd
= pdev
->dev
.platform_data
;
2207 printk(KERN_ERR
"No mv643xx_eth_platform_data\n");
2211 if (pd
->shared
== NULL
) {
2212 printk(KERN_ERR
"No mv643xx_eth_platform_data->shared\n");
2216 dev
= alloc_etherdev(sizeof(struct mv643xx_eth_private
));
2220 platform_set_drvdata(pdev
, dev
);
2222 mp
= netdev_priv(dev
);
2224 #ifdef MV643XX_ETH_NAPI
2225 netif_napi_add(dev
, &mp
->napi
, mv643xx_eth_poll
, 64);
2228 res
= platform_get_resource(pdev
, IORESOURCE_IRQ
, 0);
2230 dev
->irq
= res
->start
;
2232 dev
->open
= mv643xx_eth_open
;
2233 dev
->stop
= mv643xx_eth_stop
;
2234 dev
->hard_start_xmit
= mv643xx_eth_start_xmit
;
2235 dev
->set_mac_address
= mv643xx_eth_set_mac_address
;
2236 dev
->set_multicast_list
= mv643xx_eth_set_rx_mode
;
2238 /* No need to Tx Timeout */
2239 dev
->tx_timeout
= mv643xx_eth_tx_timeout
;
2241 #ifdef CONFIG_NET_POLL_CONTROLLER
2242 dev
->poll_controller
= mv643xx_eth_netpoll
;
2245 dev
->watchdog_timeo
= 2 * HZ
;
2247 dev
->change_mtu
= mv643xx_eth_change_mtu
;
2248 dev
->do_ioctl
= mv643xx_eth_do_ioctl
;
2249 SET_ETHTOOL_OPS(dev
, &mv643xx_eth_ethtool_ops
);
2251 #ifdef MV643XX_ETH_CHECKSUM_OFFLOAD_TX
2252 #ifdef MAX_SKB_FRAGS
2254 * Zero copy can only work if we use Discovery II memory. Else, we will
2255 * have to map the buffers to ISA memory which is only 16 MB
2257 dev
->features
= NETIF_F_SG
| NETIF_F_IP_CSUM
;
2261 /* Configure the timeout task */
2262 INIT_WORK(&mp
->tx_timeout_task
, mv643xx_eth_tx_timeout_task
);
2264 spin_lock_init(&mp
->lock
);
2266 mp
->shared
= platform_get_drvdata(pd
->shared
);
2267 port_num
= mp
->port_num
= pd
->port_number
;
2269 if (mp
->shared
->win_protect
)
2270 wrl(mp
, WINDOW_PROTECT(port_num
), mp
->shared
->win_protect
);
2272 mp
->shared_smi
= mp
->shared
;
2273 if (pd
->shared_smi
!= NULL
)
2274 mp
->shared_smi
= platform_get_drvdata(pd
->shared_smi
);
2276 /* set default config values */
2277 uc_addr_get(mp
, dev
->dev_addr
);
2278 mp
->rx_ring_size
= DEFAULT_RX_QUEUE_SIZE
;
2279 mp
->tx_ring_size
= DEFAULT_TX_QUEUE_SIZE
;
2281 if (is_valid_ether_addr(pd
->mac_addr
))
2282 memcpy(dev
->dev_addr
, pd
->mac_addr
, 6);
2284 if (pd
->phy_addr
|| pd
->force_phy_addr
)
2285 phy_addr_set(mp
, pd
->phy_addr
);
2287 if (pd
->rx_queue_size
)
2288 mp
->rx_ring_size
= pd
->rx_queue_size
;
2290 if (pd
->tx_queue_size
)
2291 mp
->tx_ring_size
= pd
->tx_queue_size
;
2293 if (pd
->tx_sram_size
) {
2294 mp
->tx_sram_size
= pd
->tx_sram_size
;
2295 mp
->tx_sram_addr
= pd
->tx_sram_addr
;
2298 if (pd
->rx_sram_size
) {
2299 mp
->rx_sram_size
= pd
->rx_sram_size
;
2300 mp
->rx_sram_addr
= pd
->rx_sram_addr
;
2303 duplex
= pd
->duplex
;
2306 /* Hook up MII support for ethtool */
2308 mp
->mii
.mdio_read
= mv643xx_eth_mdio_read
;
2309 mp
->mii
.mdio_write
= mv643xx_eth_mdio_write
;
2310 mp
->mii
.phy_id
= phy_addr_get(mp
);
2311 mp
->mii
.phy_id_mask
= 0x3f;
2312 mp
->mii
.reg_num_mask
= 0x1f;
2314 err
= phy_detect(mp
);
2316 pr_debug("%s: No PHY detected at addr %d\n",
2317 dev
->name
, phy_addr_get(mp
));
2322 mp
->mii
.supports_gmii
= mii_check_gmii_support(&mp
->mii
);
2323 mv643xx_init_ethtool_cmd(dev
, mp
->mii
.phy_id
, speed
, duplex
, &cmd
);
2324 mv643xx_eth_update_pscr(dev
, &cmd
);
2325 mv643xx_eth_set_settings(dev
, &cmd
);
2327 SET_NETDEV_DEV(dev
, &pdev
->dev
);
2328 err
= register_netdev(dev
);
2334 "%s: port %d with MAC address %s\n",
2335 dev
->name
, port_num
, print_mac(mac
, p
));
2337 if (dev
->features
& NETIF_F_SG
)
2338 printk(KERN_NOTICE
"%s: Scatter Gather Enabled\n", dev
->name
);
2340 if (dev
->features
& NETIF_F_IP_CSUM
)
2341 printk(KERN_NOTICE
"%s: TX TCP/IP Checksumming Supported\n",
2344 #ifdef MV643XX_ETH_CHECKSUM_OFFLOAD_TX
2345 printk(KERN_NOTICE
"%s: RX TCP/UDP Checksum Offload ON \n", dev
->name
);
2348 #ifdef MV643XX_ETH_COAL
2349 printk(KERN_NOTICE
"%s: TX and RX Interrupt Coalescing ON \n",
2353 #ifdef MV643XX_ETH_NAPI
2354 printk(KERN_NOTICE
"%s: RX NAPI Enabled \n", dev
->name
);
2357 if (mp
->tx_sram_size
> 0)
2358 printk(KERN_NOTICE
"%s: Using SRAM\n", dev
->name
);
2368 static int mv643xx_eth_remove(struct platform_device
*pdev
)
2370 struct net_device
*dev
= platform_get_drvdata(pdev
);
2372 unregister_netdev(dev
);
2373 flush_scheduled_work();
2376 platform_set_drvdata(pdev
, NULL
);
2380 static void mv643xx_eth_shutdown(struct platform_device
*pdev
)
2382 struct net_device
*dev
= platform_get_drvdata(pdev
);
2383 struct mv643xx_eth_private
*mp
= netdev_priv(dev
);
2384 unsigned int port_num
= mp
->port_num
;
2386 /* Mask all interrupts on ethernet port */
2387 wrl(mp
, INT_MASK(port_num
), 0);
2388 rdl(mp
, INT_MASK(port_num
));
2393 static struct platform_driver mv643xx_eth_driver
= {
2394 .probe
= mv643xx_eth_probe
,
2395 .remove
= mv643xx_eth_remove
,
2396 .shutdown
= mv643xx_eth_shutdown
,
2398 .name
= MV643XX_ETH_NAME
,
2399 .owner
= THIS_MODULE
,
2403 static int __init
mv643xx_eth_init_module(void)
2407 rc
= platform_driver_register(&mv643xx_eth_shared_driver
);
2409 rc
= platform_driver_register(&mv643xx_eth_driver
);
2411 platform_driver_unregister(&mv643xx_eth_shared_driver
);
2416 static void __exit
mv643xx_eth_cleanup_module(void)
2418 platform_driver_unregister(&mv643xx_eth_driver
);
2419 platform_driver_unregister(&mv643xx_eth_shared_driver
);
2422 module_init(mv643xx_eth_init_module
);
2423 module_exit(mv643xx_eth_cleanup_module
);
2425 MODULE_LICENSE("GPL");
2426 MODULE_AUTHOR( "Rabeeh Khoury, Assaf Hoffman, Matthew Dharm, Manish Lachwani"
2427 " and Dale Farnsworth");
2428 MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX");
2429 MODULE_ALIAS("platform:" MV643XX_ETH_NAME
);
2430 MODULE_ALIAS("platform:" MV643XX_ETH_SHARED_NAME
);