2 * drivers/net/mv643xx_eth.c - Driver for MV643XX ethernet ports
3 * Copyright (C) 2002 Matthew Dharm <mdharm@momenco.com>
5 * Based on the 64360 driver from:
6 * Copyright (C) 2002 rabeeh@galileo.co.il
8 * Copyright (C) 2003 PMC-Sierra, Inc.,
9 * written by Manish Lachwani
11 * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org>
13 * Copyright (C) 2004-2005 MontaVista Software, Inc.
14 * Dale Farnsworth <dale@farnsworth.org>
16 * Copyright (C) 2004 Steven J. Hill <sjhill1@rockwellcollins.com>
17 * <sjhill@realitydiluted.com>
19 * This program is free software; you can redistribute it and/or
20 * modify it under the terms of the GNU General Public License
21 * as published by the Free Software Foundation; either version 2
22 * of the License, or (at your option) any later version.
24 * This program is distributed in the hope that it will be useful,
25 * but WITHOUT ANY WARRANTY; without even the implied warranty of
26 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
27 * GNU General Public License for more details.
29 * You should have received a copy of the GNU General Public License
30 * along with this program; if not, write to the Free Software
31 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
33 #include <linux/init.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/tcp.h>
36 #include <linux/udp.h>
37 #include <linux/etherdevice.h>
41 #include <linux/bitops.h>
42 #include <linux/delay.h>
43 #include <linux/ethtool.h>
44 #include <linux/platform_device.h>
47 #include <asm/types.h>
48 #include <asm/pgtable.h>
49 #include <asm/system.h>
50 #include <asm/delay.h>
51 #include "mv643xx_eth.h"
54 * The first part is the high level driver of the gigE ethernet ports.
60 #define DMA_ALIGN 8 /* hw requires 8-byte alignment */
61 #define HW_IP_ALIGN 2 /* hw aligns IP header */
62 #define WRAP HW_IP_ALIGN + ETH_HLEN + VLAN_HLEN + FCS_LEN
63 #define RX_SKB_SIZE ((dev->mtu + WRAP + 7) & ~0x7)
65 #define INT_UNMASK_ALL 0x0007ffff
66 #define INT_UNMASK_ALL_EXT 0x0011ffff
67 #define INT_MASK_ALL 0x00000000
68 #define INT_MASK_ALL_EXT 0x00000000
69 #define INT_CAUSE_CHECK_BITS INT_CAUSE_UNMASK_ALL
70 #define INT_CAUSE_CHECK_BITS_EXT INT_CAUSE_UNMASK_ALL_EXT
72 #ifdef MV643XX_CHECKSUM_OFFLOAD_TX
73 #define MAX_DESCS_PER_SKB (MAX_SKB_FRAGS + 1)
75 #define MAX_DESCS_PER_SKB 1
78 #define PHY_WAIT_ITERATIONS 1000 /* 1000 iterations * 10uS = 10mS max */
79 #define PHY_WAIT_MICRO_SECONDS 10
81 /* Static function declarations */
82 static void eth_port_uc_addr_get(struct net_device
*dev
,
83 unsigned char *MacAddr
);
84 static void eth_port_set_multicast_list(struct net_device
*);
85 static void mv643xx_eth_port_enable_tx(unsigned int port_num
,
86 unsigned int channels
);
87 static void mv643xx_eth_port_enable_rx(unsigned int port_num
,
88 unsigned int channels
);
89 static unsigned int mv643xx_eth_port_disable_tx(unsigned int port_num
);
90 static unsigned int mv643xx_eth_port_disable_rx(unsigned int port_num
);
91 static int mv643xx_eth_open(struct net_device
*);
92 static int mv643xx_eth_stop(struct net_device
*);
93 static int mv643xx_eth_change_mtu(struct net_device
*, int);
94 static struct net_device_stats
*mv643xx_eth_get_stats(struct net_device
*);
95 static void eth_port_init_mac_tables(unsigned int eth_port_num
);
97 static int mv643xx_poll(struct net_device
*dev
, int *budget
);
99 static int ethernet_phy_get(unsigned int eth_port_num
);
100 static void ethernet_phy_set(unsigned int eth_port_num
, int phy_addr
);
101 static int ethernet_phy_detect(unsigned int eth_port_num
);
102 static int mv643xx_mdio_read(struct net_device
*dev
, int phy_id
, int location
);
103 static void mv643xx_mdio_write(struct net_device
*dev
, int phy_id
, int location
, int val
);
104 static int mv643xx_eth_do_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
);
105 static struct ethtool_ops mv643xx_ethtool_ops
;
107 static char mv643xx_driver_name
[] = "mv643xx_eth";
108 static char mv643xx_driver_version
[] = "1.0";
110 static void __iomem
*mv643xx_eth_shared_base
;
112 /* used to protect MV643XX_ETH_SMI_REG, which is shared across ports */
113 static DEFINE_SPINLOCK(mv643xx_eth_phy_lock
);
115 static inline u32
mv_read(int offset
)
117 void __iomem
*reg_base
;
119 reg_base
= mv643xx_eth_shared_base
- MV643XX_ETH_SHARED_REGS
;
121 return readl(reg_base
+ offset
);
124 static inline void mv_write(int offset
, u32 data
)
126 void __iomem
*reg_base
;
128 reg_base
= mv643xx_eth_shared_base
- MV643XX_ETH_SHARED_REGS
;
129 writel(data
, reg_base
+ offset
);
133 * Changes MTU (maximum transfer unit) of the gigabit ethenret port
135 * Input : pointer to ethernet interface network device structure
137 * Output : 0 upon success, -EINVAL upon failure
139 static int mv643xx_eth_change_mtu(struct net_device
*dev
, int new_mtu
)
141 if ((new_mtu
> 9500) || (new_mtu
< 64))
146 * Stop then re-open the interface. This will allocate RX skb's with
148 * There is a possible danger that the open will not successed, due
149 * to memory is full, which might fail the open function.
151 if (netif_running(dev
)) {
152 mv643xx_eth_stop(dev
);
153 if (mv643xx_eth_open(dev
))
155 "%s: Fatal error on opening device\n",
163 * mv643xx_eth_rx_task
165 * Fills / refills RX queue on a certain gigabit ethernet port
167 * Input : pointer to ethernet interface network device structure
170 static void mv643xx_eth_rx_task(void *data
)
172 struct net_device
*dev
= (struct net_device
*)data
;
173 struct mv643xx_private
*mp
= netdev_priv(dev
);
174 struct pkt_info pkt_info
;
178 if (test_and_set_bit(0, &mp
->rx_task_busy
))
179 panic("%s: Error in test_set_bit / clear_bit", dev
->name
);
181 while (mp
->rx_desc_count
< (mp
->rx_ring_size
- 5)) {
182 skb
= dev_alloc_skb(RX_SKB_SIZE
+ DMA_ALIGN
);
186 unaligned
= (u32
)skb
->data
& (DMA_ALIGN
- 1);
188 skb_reserve(skb
, DMA_ALIGN
- unaligned
);
189 pkt_info
.cmd_sts
= ETH_RX_ENABLE_INTERRUPT
;
190 pkt_info
.byte_cnt
= RX_SKB_SIZE
;
191 pkt_info
.buf_ptr
= dma_map_single(NULL
, skb
->data
, RX_SKB_SIZE
,
193 pkt_info
.return_info
= skb
;
194 if (eth_rx_return_buff(mp
, &pkt_info
) != ETH_OK
) {
196 "%s: Error allocating RX Ring\n", dev
->name
);
199 skb_reserve(skb
, HW_IP_ALIGN
);
201 clear_bit(0, &mp
->rx_task_busy
);
203 * If RX ring is empty of SKB, set a timer to try allocating
204 * again in a later time .
206 if ((mp
->rx_desc_count
== 0) && (mp
->rx_timer_flag
== 0)) {
207 printk(KERN_INFO
"%s: Rx ring is empty\n", dev
->name
);
209 mp
->timeout
.expires
= jiffies
+ (HZ
/ 10);
210 add_timer(&mp
->timeout
);
211 mp
->rx_timer_flag
= 1;
213 #ifdef MV643XX_RX_QUEUE_FILL_ON_TASK
215 /* Return interrupts */
216 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(mp
->port_num
),
223 * mv643xx_eth_rx_task_timer_wrapper
225 * Timer routine to wake up RX queue filling task. This function is
226 * used only in case the RX queue is empty, and all alloc_skb has
227 * failed (due to out of memory event).
229 * Input : pointer to ethernet interface network device structure
232 static void mv643xx_eth_rx_task_timer_wrapper(unsigned long data
)
234 struct net_device
*dev
= (struct net_device
*)data
;
235 struct mv643xx_private
*mp
= netdev_priv(dev
);
237 mp
->rx_timer_flag
= 0;
238 mv643xx_eth_rx_task((void *)data
);
242 * mv643xx_eth_update_mac_address
244 * Update the MAC address of the port in the address table
246 * Input : pointer to ethernet interface network device structure
249 static void mv643xx_eth_update_mac_address(struct net_device
*dev
)
251 struct mv643xx_private
*mp
= netdev_priv(dev
);
252 unsigned int port_num
= mp
->port_num
;
254 eth_port_init_mac_tables(port_num
);
255 eth_port_uc_addr_set(port_num
, dev
->dev_addr
);
259 * mv643xx_eth_set_rx_mode
261 * Change from promiscuos to regular rx mode
263 * Input : pointer to ethernet interface network device structure
266 static void mv643xx_eth_set_rx_mode(struct net_device
*dev
)
268 struct mv643xx_private
*mp
= netdev_priv(dev
);
270 if (dev
->flags
& IFF_PROMISC
)
271 mp
->port_config
|= (u32
) MV643XX_ETH_UNICAST_PROMISCUOUS_MODE
;
273 mp
->port_config
&= ~(u32
) MV643XX_ETH_UNICAST_PROMISCUOUS_MODE
;
275 mv_write(MV643XX_ETH_PORT_CONFIG_REG(mp
->port_num
), mp
->port_config
);
277 eth_port_set_multicast_list(dev
);
281 * mv643xx_eth_set_mac_address
283 * Change the interface's mac address.
284 * No special hardware thing should be done because interface is always
285 * put in promiscuous mode.
287 * Input : pointer to ethernet interface network device structure and
288 * a pointer to the designated entry to be added to the cache.
289 * Output : zero upon success, negative upon failure
291 static int mv643xx_eth_set_mac_address(struct net_device
*dev
, void *addr
)
295 for (i
= 0; i
< 6; i
++)
296 /* +2 is for the offset of the HW addr type */
297 dev
->dev_addr
[i
] = ((unsigned char *)addr
)[i
+ 2];
298 mv643xx_eth_update_mac_address(dev
);
303 * mv643xx_eth_tx_timeout
305 * Called upon a timeout on transmitting a packet
307 * Input : pointer to ethernet interface network device structure.
310 static void mv643xx_eth_tx_timeout(struct net_device
*dev
)
312 struct mv643xx_private
*mp
= netdev_priv(dev
);
314 printk(KERN_INFO
"%s: TX timeout ", dev
->name
);
316 /* Do the reset outside of interrupt context */
317 schedule_work(&mp
->tx_timeout_task
);
321 * mv643xx_eth_tx_timeout_task
323 * Actual routine to reset the adapter when a timeout on Tx has occurred
325 static void mv643xx_eth_tx_timeout_task(struct net_device
*dev
)
327 struct mv643xx_private
*mp
= netdev_priv(dev
);
329 netif_device_detach(dev
);
330 eth_port_reset(mp
->port_num
);
332 netif_device_attach(dev
);
336 * mv643xx_eth_free_tx_queue
338 * Input : dev - a pointer to the required interface
340 * Output : 0 if was able to release skb , nonzero otherwise
342 static int mv643xx_eth_free_tx_queue(struct net_device
*dev
,
343 unsigned int eth_int_cause_ext
)
345 struct mv643xx_private
*mp
= netdev_priv(dev
);
346 struct net_device_stats
*stats
= &mp
->stats
;
347 struct pkt_info pkt_info
;
350 if (!(eth_int_cause_ext
& (BIT0
| BIT8
)))
353 /* Check only queue 0 */
354 while (eth_tx_return_desc(mp
, &pkt_info
) == ETH_OK
) {
355 if (pkt_info
.cmd_sts
& BIT0
) {
356 printk("%s: Error in TX\n", dev
->name
);
360 if (pkt_info
.cmd_sts
& ETH_TX_FIRST_DESC
)
361 dma_unmap_single(NULL
, pkt_info
.buf_ptr
,
365 dma_unmap_page(NULL
, pkt_info
.buf_ptr
,
369 if (pkt_info
.return_info
) {
370 dev_kfree_skb_irq(pkt_info
.return_info
);
379 * mv643xx_eth_receive
381 * This function is forward packets that are received from the port's
382 * queues toward kernel core or FastRoute them to another interface.
384 * Input : dev - a pointer to the required interface
385 * max - maximum number to receive (0 means unlimted)
387 * Output : number of served packets
390 static int mv643xx_eth_receive_queue(struct net_device
*dev
, int budget
)
392 static int mv643xx_eth_receive_queue(struct net_device
*dev
)
395 struct mv643xx_private
*mp
= netdev_priv(dev
);
396 struct net_device_stats
*stats
= &mp
->stats
;
397 unsigned int received_packets
= 0;
399 struct pkt_info pkt_info
;
402 while (budget
-- > 0 && eth_port_receive(mp
, &pkt_info
) == ETH_OK
) {
404 while (eth_port_receive(mp
, &pkt_info
) == ETH_OK
) {
409 /* Update statistics. Note byte count includes 4 byte CRC count */
411 stats
->rx_bytes
+= pkt_info
.byte_cnt
;
412 skb
= pkt_info
.return_info
;
414 * In case received a packet without first / last bits on OR
415 * the error summary bit is on, the packets needs to be dropeed.
417 if (((pkt_info
.cmd_sts
418 & (ETH_RX_FIRST_DESC
| ETH_RX_LAST_DESC
)) !=
419 (ETH_RX_FIRST_DESC
| ETH_RX_LAST_DESC
))
420 || (pkt_info
.cmd_sts
& ETH_ERROR_SUMMARY
)) {
422 if ((pkt_info
.cmd_sts
& (ETH_RX_FIRST_DESC
|
423 ETH_RX_LAST_DESC
)) !=
424 (ETH_RX_FIRST_DESC
| ETH_RX_LAST_DESC
)) {
427 "%s: Received packet spread "
428 "on multiple descriptors\n",
431 if (pkt_info
.cmd_sts
& ETH_ERROR_SUMMARY
)
434 dev_kfree_skb_irq(skb
);
437 * The -4 is for the CRC in the trailer of the
440 skb_put(skb
, pkt_info
.byte_cnt
- 4);
443 if (pkt_info
.cmd_sts
& ETH_LAYER_4_CHECKSUM_OK
) {
444 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
446 (pkt_info
.cmd_sts
& 0x0007fff8) >> 3);
448 skb
->protocol
= eth_type_trans(skb
, dev
);
450 netif_receive_skb(skb
);
455 dev
->last_rx
= jiffies
;
458 return received_packets
;
461 /* Set the mv643xx port configuration register for the speed/duplex mode. */
462 static void mv643xx_eth_update_pscr(struct net_device
*dev
,
463 struct ethtool_cmd
*ecmd
)
465 struct mv643xx_private
*mp
= netdev_priv(dev
);
466 int port_num
= mp
->port_num
;
468 unsigned int channels
;
470 o_pscr
= mv_read(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num
));
473 /* clear speed, duplex and rx buffer size fields */
474 n_pscr
&= ~(MV643XX_ETH_SET_MII_SPEED_TO_100
|
475 MV643XX_ETH_SET_GMII_SPEED_TO_1000
|
476 MV643XX_ETH_SET_FULL_DUPLEX_MODE
|
477 MV643XX_ETH_MAX_RX_PACKET_MASK
);
479 if (ecmd
->duplex
== DUPLEX_FULL
)
480 n_pscr
|= MV643XX_ETH_SET_FULL_DUPLEX_MODE
;
482 if (ecmd
->speed
== SPEED_1000
)
483 n_pscr
|= MV643XX_ETH_SET_GMII_SPEED_TO_1000
|
484 MV643XX_ETH_MAX_RX_PACKET_9700BYTE
;
486 if (ecmd
->speed
== SPEED_100
)
487 n_pscr
|= MV643XX_ETH_SET_MII_SPEED_TO_100
;
488 n_pscr
|= MV643XX_ETH_MAX_RX_PACKET_1522BYTE
;
491 if (n_pscr
!= o_pscr
) {
492 if ((o_pscr
& MV643XX_ETH_SERIAL_PORT_ENABLE
) == 0)
493 mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num
),
496 channels
= mv643xx_eth_port_disable_tx(port_num
);
498 o_pscr
&= ~MV643XX_ETH_SERIAL_PORT_ENABLE
;
499 mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num
),
501 mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num
),
503 mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num
),
506 mv643xx_eth_port_enable_tx(port_num
, channels
);
512 * mv643xx_eth_int_handler
514 * Main interrupt handler for the gigbit ethernet ports
516 * Input : irq - irq number (not used)
517 * dev_id - a pointer to the required interface's data structure
522 static irqreturn_t
mv643xx_eth_int_handler(int irq
, void *dev_id
,
523 struct pt_regs
*regs
)
525 struct net_device
*dev
= (struct net_device
*)dev_id
;
526 struct mv643xx_private
*mp
= netdev_priv(dev
);
527 u32 eth_int_cause
, eth_int_cause_ext
= 0;
528 unsigned int port_num
= mp
->port_num
;
530 /* Read interrupt cause registers */
531 eth_int_cause
= mv_read(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num
)) &
534 if (eth_int_cause
& BIT1
)
535 eth_int_cause_ext
= mv_read(
536 MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num
)) &
540 if (!(eth_int_cause
& 0x0007fffd)) {
541 /* Dont ack the Rx interrupt */
544 * Clear specific ethernet port intrerrupt registers by
545 * acknowleding relevant bits.
547 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num
),
549 if (eth_int_cause_ext
!= 0x0)
550 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG
551 (port_num
), ~eth_int_cause_ext
);
553 /* UDP change : We may need this */
554 if ((eth_int_cause_ext
& 0x0000ffff) &&
555 (mv643xx_eth_free_tx_queue(dev
, eth_int_cause_ext
) == 0) &&
556 (mp
->tx_ring_size
> mp
->tx_desc_count
+ MAX_DESCS_PER_SKB
))
557 netif_wake_queue(dev
);
560 if (netif_rx_schedule_prep(dev
)) {
561 /* Mask all the interrupts */
562 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num
),
564 /* wait for previous write to complete */
565 mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num
));
566 __netif_rx_schedule(dev
);
569 if (eth_int_cause
& (BIT2
| BIT11
))
570 mv643xx_eth_receive_queue(dev
, 0);
573 * After forwarded received packets to upper layer, add a task
574 * in an interrupts enabled context that refills the RX ring
577 #ifdef MV643XX_RX_QUEUE_FILL_ON_TASK
578 /* Mask all interrupts on ethernet port */
579 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num
),
581 /* wait for previous write to take effect */
582 mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num
));
584 queue_task(&mp
->rx_task
, &tq_immediate
);
585 mark_bh(IMMEDIATE_BH
);
587 mp
->rx_task
.func(dev
);
591 /* PHY status changed */
592 if (eth_int_cause_ext
& (BIT16
| BIT20
)) {
593 struct ethtool_cmd cmd
;
595 if (mii_link_ok(&mp
->mii
)) {
596 mii_ethtool_gset(&mp
->mii
, &cmd
);
597 mv643xx_eth_update_pscr(dev
, &cmd
);
598 if (!netif_carrier_ok(dev
)) {
599 netif_carrier_on(dev
);
600 if (mp
->tx_ring_size
> mp
->tx_desc_count
+
602 netif_wake_queue(dev
);
604 mv643xx_eth_port_enable_tx(port_num
, mp
->port_tx_queue_command
);
607 } else if (netif_carrier_ok(dev
)) {
608 netif_stop_queue(dev
);
609 netif_carrier_off(dev
);
614 * If no real interrupt occured, exit.
615 * This can happen when using gigE interrupt coalescing mechanism.
617 if ((eth_int_cause
== 0x0) && (eth_int_cause_ext
== 0x0))
626 * eth_port_set_rx_coal - Sets coalescing interrupt mechanism on RX path
629 * This routine sets the RX coalescing interrupt mechanism parameter.
630 * This parameter is a timeout counter, that counts in 64 t_clk
631 * chunks ; that when timeout event occurs a maskable interrupt
633 * The parameter is calculated using the tClk of the MV-643xx chip
634 * , and the required delay of the interrupt in usec.
637 * unsigned int eth_port_num Ethernet port number
638 * unsigned int t_clk t_clk of the MV-643xx chip in HZ units
639 * unsigned int delay Delay in usec
642 * Interrupt coalescing mechanism value is set in MV-643xx chip.
645 * The interrupt coalescing value set in the gigE port.
648 static unsigned int eth_port_set_rx_coal(unsigned int eth_port_num
,
649 unsigned int t_clk
, unsigned int delay
)
651 unsigned int coal
= ((t_clk
/ 1000000) * delay
) / 64;
653 /* Set RX Coalescing mechanism */
654 mv_write(MV643XX_ETH_SDMA_CONFIG_REG(eth_port_num
),
655 ((coal
& 0x3fff) << 8) |
656 (mv_read(MV643XX_ETH_SDMA_CONFIG_REG(eth_port_num
))
664 * eth_port_set_tx_coal - Sets coalescing interrupt mechanism on TX path
667 * This routine sets the TX coalescing interrupt mechanism parameter.
668 * This parameter is a timeout counter, that counts in 64 t_clk
669 * chunks ; that when timeout event occurs a maskable interrupt
671 * The parameter is calculated using the t_cLK frequency of the
672 * MV-643xx chip and the required delay in the interrupt in uSec
675 * unsigned int eth_port_num Ethernet port number
676 * unsigned int t_clk t_clk of the MV-643xx chip in HZ units
677 * unsigned int delay Delay in uSeconds
680 * Interrupt coalescing mechanism value is set in MV-643xx chip.
683 * The interrupt coalescing value set in the gigE port.
686 static unsigned int eth_port_set_tx_coal(unsigned int eth_port_num
,
687 unsigned int t_clk
, unsigned int delay
)
690 coal
= ((t_clk
/ 1000000) * delay
) / 64;
691 /* Set TX Coalescing mechanism */
692 mv_write(MV643XX_ETH_TX_FIFO_URGENT_THRESHOLD_REG(eth_port_num
),
698 * ether_init_rx_desc_ring - Curve a Rx chain desc list and buffer in memory.
701 * This function prepares a Rx chained list of descriptors and packet
702 * buffers in a form of a ring. The routine must be called after port
703 * initialization routine and before port start routine.
704 * The Ethernet SDMA engine uses CPU bus addresses to access the various
705 * devices in the system (i.e. DRAM). This function uses the ethernet
706 * struct 'virtual to physical' routine (set by the user) to set the ring
707 * with physical addresses.
710 * struct mv643xx_private *mp Ethernet Port Control srtuct.
713 * The routine updates the Ethernet port control struct with information
714 * regarding the Rx descriptors and buffers.
719 static void ether_init_rx_desc_ring(struct mv643xx_private
*mp
)
721 volatile struct eth_rx_desc
*p_rx_desc
;
722 int rx_desc_num
= mp
->rx_ring_size
;
725 /* initialize the next_desc_ptr links in the Rx descriptors ring */
726 p_rx_desc
= (struct eth_rx_desc
*)mp
->p_rx_desc_area
;
727 for (i
= 0; i
< rx_desc_num
; i
++) {
728 p_rx_desc
[i
].next_desc_ptr
= mp
->rx_desc_dma
+
729 ((i
+ 1) % rx_desc_num
) * sizeof(struct eth_rx_desc
);
732 /* Save Rx desc pointer to driver struct. */
733 mp
->rx_curr_desc_q
= 0;
734 mp
->rx_used_desc_q
= 0;
736 mp
->rx_desc_area_size
= rx_desc_num
* sizeof(struct eth_rx_desc
);
738 /* Enable queue 0 for this port */
739 mp
->port_rx_queue_command
= 1;
743 * ether_init_tx_desc_ring - Curve a Tx chain desc list and buffer in memory.
746 * This function prepares a Tx chained list of descriptors and packet
747 * buffers in a form of a ring. The routine must be called after port
748 * initialization routine and before port start routine.
749 * The Ethernet SDMA engine uses CPU bus addresses to access the various
750 * devices in the system (i.e. DRAM). This function uses the ethernet
751 * struct 'virtual to physical' routine (set by the user) to set the ring
752 * with physical addresses.
755 * struct mv643xx_private *mp Ethernet Port Control srtuct.
758 * The routine updates the Ethernet port control struct with information
759 * regarding the Tx descriptors and buffers.
764 static void ether_init_tx_desc_ring(struct mv643xx_private
*mp
)
766 int tx_desc_num
= mp
->tx_ring_size
;
767 struct eth_tx_desc
*p_tx_desc
;
770 /* Initialize the next_desc_ptr links in the Tx descriptors ring */
771 p_tx_desc
= (struct eth_tx_desc
*)mp
->p_tx_desc_area
;
772 for (i
= 0; i
< tx_desc_num
; i
++) {
773 p_tx_desc
[i
].next_desc_ptr
= mp
->tx_desc_dma
+
774 ((i
+ 1) % tx_desc_num
) * sizeof(struct eth_tx_desc
);
777 mp
->tx_curr_desc_q
= 0;
778 mp
->tx_used_desc_q
= 0;
779 #ifdef MV643XX_CHECKSUM_OFFLOAD_TX
780 mp
->tx_first_desc_q
= 0;
783 mp
->tx_desc_area_size
= tx_desc_num
* sizeof(struct eth_tx_desc
);
785 /* Enable queue 0 for this port */
786 mp
->port_tx_queue_command
= 1;
789 static int mv643xx_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
791 struct mv643xx_private
*mp
= netdev_priv(dev
);
794 spin_lock_irq(&mp
->lock
);
795 err
= mii_ethtool_sset(&mp
->mii
, cmd
);
796 spin_unlock_irq(&mp
->lock
);
801 static int mv643xx_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
803 struct mv643xx_private
*mp
= netdev_priv(dev
);
806 spin_lock_irq(&mp
->lock
);
807 err
= mii_ethtool_gset(&mp
->mii
, cmd
);
808 spin_unlock_irq(&mp
->lock
);
810 /* The PHY may support 1000baseT_Half, but the mv643xx does not */
811 cmd
->supported
&= ~SUPPORTED_1000baseT_Half
;
812 cmd
->advertising
&= ~ADVERTISED_1000baseT_Half
;
820 * This function is called when openning the network device. The function
821 * should initialize all the hardware, initialize cyclic Rx/Tx
822 * descriptors chain and buffers and allocate an IRQ to the network
825 * Input : a pointer to the network device structure
827 * Output : zero of success , nonzero if fails.
830 static int mv643xx_eth_open(struct net_device
*dev
)
832 struct mv643xx_private
*mp
= netdev_priv(dev
);
833 unsigned int port_num
= mp
->port_num
;
837 err
= request_irq(dev
->irq
, mv643xx_eth_int_handler
,
838 SA_SHIRQ
| SA_SAMPLE_RANDOM
, dev
->name
, dev
);
840 printk(KERN_ERR
"Can not assign IRQ number to MV643XX_eth%d\n",
847 INIT_WORK(&mp
->rx_task
, (void (*)(void *))mv643xx_eth_rx_task
, dev
);
849 memset(&mp
->timeout
, 0, sizeof(struct timer_list
));
850 mp
->timeout
.function
= mv643xx_eth_rx_task_timer_wrapper
;
851 mp
->timeout
.data
= (unsigned long)dev
;
853 mp
->rx_task_busy
= 0;
854 mp
->rx_timer_flag
= 0;
856 /* Allocate RX and TX skb rings */
857 mp
->rx_skb
= kmalloc(sizeof(*mp
->rx_skb
) * mp
->rx_ring_size
,
860 printk(KERN_ERR
"%s: Cannot allocate Rx skb ring\n", dev
->name
);
864 mp
->tx_skb
= kmalloc(sizeof(*mp
->tx_skb
) * mp
->tx_ring_size
,
867 printk(KERN_ERR
"%s: Cannot allocate Tx skb ring\n", dev
->name
);
869 goto out_free_rx_skb
;
872 /* Allocate TX ring */
873 mp
->tx_desc_count
= 0;
874 size
= mp
->tx_ring_size
* sizeof(struct eth_tx_desc
);
875 mp
->tx_desc_area_size
= size
;
877 if (mp
->tx_sram_size
) {
878 mp
->p_tx_desc_area
= ioremap(mp
->tx_sram_addr
,
880 mp
->tx_desc_dma
= mp
->tx_sram_addr
;
882 mp
->p_tx_desc_area
= dma_alloc_coherent(NULL
, size
,
886 if (!mp
->p_tx_desc_area
) {
887 printk(KERN_ERR
"%s: Cannot allocate Tx Ring (size %d bytes)\n",
890 goto out_free_tx_skb
;
892 BUG_ON((u32
) mp
->p_tx_desc_area
& 0xf); /* check 16-byte alignment */
893 memset((void *)mp
->p_tx_desc_area
, 0, mp
->tx_desc_area_size
);
895 ether_init_tx_desc_ring(mp
);
897 /* Allocate RX ring */
898 mp
->rx_desc_count
= 0;
899 size
= mp
->rx_ring_size
* sizeof(struct eth_rx_desc
);
900 mp
->rx_desc_area_size
= size
;
902 if (mp
->rx_sram_size
) {
903 mp
->p_rx_desc_area
= ioremap(mp
->rx_sram_addr
,
905 mp
->rx_desc_dma
= mp
->rx_sram_addr
;
907 mp
->p_rx_desc_area
= dma_alloc_coherent(NULL
, size
,
911 if (!mp
->p_rx_desc_area
) {
912 printk(KERN_ERR
"%s: Cannot allocate Rx ring (size %d bytes)\n",
914 printk(KERN_ERR
"%s: Freeing previously allocated TX queues...",
916 if (mp
->rx_sram_size
)
917 iounmap(mp
->p_tx_desc_area
);
919 dma_free_coherent(NULL
, mp
->tx_desc_area_size
,
920 mp
->p_tx_desc_area
, mp
->tx_desc_dma
);
922 goto out_free_tx_skb
;
924 memset((void *)mp
->p_rx_desc_area
, 0, size
);
926 ether_init_rx_desc_ring(mp
);
928 mv643xx_eth_rx_task(dev
); /* Fill RX ring with skb's */
930 /* Clear any pending ethernet port interrupts */
931 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num
), 0);
932 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num
), 0);
936 /* Interrupt Coalescing */
940 eth_port_set_rx_coal(port_num
, 133000000, MV643XX_RX_COAL
);
944 eth_port_set_tx_coal(port_num
, 133000000, MV643XX_TX_COAL
);
946 /* Unmask phy and link status changes interrupts */
947 mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num
),
950 /* Unmask RX buffer and TX end interrupt */
951 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num
), INT_UNMASK_ALL
);
960 free_irq(dev
->irq
, dev
);
965 static void mv643xx_eth_free_tx_rings(struct net_device
*dev
)
967 struct mv643xx_private
*mp
= netdev_priv(dev
);
968 unsigned int port_num
= mp
->port_num
;
973 mv643xx_eth_port_disable_tx(port_num
);
975 /* Free outstanding skb's on TX rings */
976 for (curr
= 0; mp
->tx_desc_count
&& curr
< mp
->tx_ring_size
; curr
++) {
977 skb
= mp
->tx_skb
[curr
];
979 mp
->tx_desc_count
-= skb_shinfo(skb
)->nr_frags
;
984 if (mp
->tx_desc_count
)
985 printk("%s: Error on Tx descriptor free - could not free %d"
986 " descriptors\n", dev
->name
, mp
->tx_desc_count
);
989 if (mp
->tx_sram_size
)
990 iounmap(mp
->p_tx_desc_area
);
992 dma_free_coherent(NULL
, mp
->tx_desc_area_size
,
993 mp
->p_tx_desc_area
, mp
->tx_desc_dma
);
996 static void mv643xx_eth_free_rx_rings(struct net_device
*dev
)
998 struct mv643xx_private
*mp
= netdev_priv(dev
);
999 unsigned int port_num
= mp
->port_num
;
1002 /* Stop RX Queues */
1003 mv643xx_eth_port_disable_rx(port_num
);
1005 /* Free preallocated skb's on RX rings */
1006 for (curr
= 0; mp
->rx_desc_count
&& curr
< mp
->rx_ring_size
; curr
++) {
1007 if (mp
->rx_skb
[curr
]) {
1008 dev_kfree_skb(mp
->rx_skb
[curr
]);
1009 mp
->rx_desc_count
--;
1013 if (mp
->rx_desc_count
)
1015 "%s: Error in freeing Rx Ring. %d skb's still"
1016 " stuck in RX Ring - ignoring them\n", dev
->name
,
1019 if (mp
->rx_sram_size
)
1020 iounmap(mp
->p_rx_desc_area
);
1022 dma_free_coherent(NULL
, mp
->rx_desc_area_size
,
1023 mp
->p_rx_desc_area
, mp
->rx_desc_dma
);
1029 * This function is used when closing the network device.
1030 * It updates the hardware,
1031 * release all memory that holds buffers and descriptors and release the IRQ.
1032 * Input : a pointer to the device structure
1033 * Output : zero if success , nonzero if fails
1036 static int mv643xx_eth_stop(struct net_device
*dev
)
1038 struct mv643xx_private
*mp
= netdev_priv(dev
);
1039 unsigned int port_num
= mp
->port_num
;
1041 /* Mask all interrupts on ethernet port */
1042 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num
), INT_MASK_ALL
);
1043 /* wait for previous write to complete */
1044 mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num
));
1047 netif_poll_disable(dev
);
1049 netif_carrier_off(dev
);
1050 netif_stop_queue(dev
);
1052 eth_port_reset(mp
->port_num
);
1054 mv643xx_eth_free_tx_rings(dev
);
1055 mv643xx_eth_free_rx_rings(dev
);
1058 netif_poll_enable(dev
);
1061 free_irq(dev
->irq
, dev
);
1067 static void mv643xx_tx(struct net_device
*dev
)
1069 struct mv643xx_private
*mp
= netdev_priv(dev
);
1070 struct pkt_info pkt_info
;
1072 while (eth_tx_return_desc(mp
, &pkt_info
) == ETH_OK
) {
1073 if (pkt_info
.cmd_sts
& ETH_TX_FIRST_DESC
)
1074 dma_unmap_single(NULL
, pkt_info
.buf_ptr
,
1078 dma_unmap_page(NULL
, pkt_info
.buf_ptr
,
1082 if (pkt_info
.return_info
)
1083 dev_kfree_skb_irq(pkt_info
.return_info
);
1086 if (netif_queue_stopped(dev
) &&
1088 mp
->tx_desc_count
+ MAX_DESCS_PER_SKB
)
1089 netif_wake_queue(dev
);
1095 * This function is used in case of NAPI
1097 static int mv643xx_poll(struct net_device
*dev
, int *budget
)
1099 struct mv643xx_private
*mp
= netdev_priv(dev
);
1100 int done
= 1, orig_budget
, work_done
;
1101 unsigned int port_num
= mp
->port_num
;
1103 #ifdef MV643XX_TX_FAST_REFILL
1104 if (++mp
->tx_clean_threshold
> 5) {
1106 mp
->tx_clean_threshold
= 0;
1110 if ((mv_read(MV643XX_ETH_RX_CURRENT_QUEUE_DESC_PTR_0(port_num
)))
1111 != (u32
) mp
->rx_used_desc_q
) {
1112 orig_budget
= *budget
;
1113 if (orig_budget
> dev
->quota
)
1114 orig_budget
= dev
->quota
;
1115 work_done
= mv643xx_eth_receive_queue(dev
, orig_budget
);
1116 mp
->rx_task
.func(dev
);
1117 *budget
-= work_done
;
1118 dev
->quota
-= work_done
;
1119 if (work_done
>= orig_budget
)
1124 netif_rx_complete(dev
);
1125 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num
), 0);
1126 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num
), 0);
1127 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num
),
1131 return done
? 0 : 1;
1135 /* Hardware can't handle unaligned fragments smaller than 9 bytes.
1136 * This helper function detects that case.
1139 static inline unsigned int has_tiny_unaligned_frags(struct sk_buff
*skb
)
1144 for (frag
= 0; frag
< skb_shinfo(skb
)->nr_frags
; frag
++) {
1145 fragp
= &skb_shinfo(skb
)->frags
[frag
];
1146 if (fragp
->size
<= 8 && fragp
->page_offset
& 0x7)
1154 * mv643xx_eth_start_xmit
1156 * This function is queues a packet in the Tx descriptor for
1159 * Input : skb - a pointer to socket buffer
1160 * dev - a pointer to the required port
1162 * Output : zero upon success
1164 static int mv643xx_eth_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1166 struct mv643xx_private
*mp
= netdev_priv(dev
);
1167 struct net_device_stats
*stats
= &mp
->stats
;
1168 ETH_FUNC_RET_STATUS status
;
1169 unsigned long flags
;
1170 struct pkt_info pkt_info
;
1172 if (netif_queue_stopped(dev
)) {
1174 "%s: Tried sending packet when interface is stopped\n",
1179 /* This is a hard error, log it. */
1180 if ((mp
->tx_ring_size
- mp
->tx_desc_count
) <=
1181 (skb_shinfo(skb
)->nr_frags
+ 1)) {
1182 netif_stop_queue(dev
);
1184 "%s: Bug in mv643xx_eth - Trying to transmit when"
1185 " queue full !\n", dev
->name
);
1189 /* Paranoid check - this shouldn't happen */
1191 stats
->tx_dropped
++;
1192 printk(KERN_ERR
"mv64320_eth paranoid check failed\n");
1196 #ifdef MV643XX_CHECKSUM_OFFLOAD_TX
1197 if (has_tiny_unaligned_frags(skb
)) {
1198 if ((skb_linearize(skb
, GFP_ATOMIC
) != 0)) {
1199 stats
->tx_dropped
++;
1200 printk(KERN_DEBUG
"%s: failed to linearize tiny "
1201 "unaligned fragment\n", dev
->name
);
1206 spin_lock_irqsave(&mp
->lock
, flags
);
1208 if (!skb_shinfo(skb
)->nr_frags
) {
1209 if (skb
->ip_summed
!= CHECKSUM_HW
) {
1210 /* Errata BTS #50, IHL must be 5 if no HW checksum */
1211 pkt_info
.cmd_sts
= ETH_TX_ENABLE_INTERRUPT
|
1214 5 << ETH_TX_IHL_SHIFT
;
1215 pkt_info
.l4i_chk
= 0;
1217 pkt_info
.cmd_sts
= ETH_TX_ENABLE_INTERRUPT
|
1220 ETH_GEN_TCP_UDP_CHECKSUM
|
1221 ETH_GEN_IP_V_4_CHECKSUM
|
1222 skb
->nh
.iph
->ihl
<< ETH_TX_IHL_SHIFT
;
1223 /* CPU already calculated pseudo header checksum. */
1224 if ((skb
->protocol
== ETH_P_IP
) &&
1225 (skb
->nh
.iph
->protocol
== IPPROTO_UDP
) ) {
1226 pkt_info
.cmd_sts
|= ETH_UDP_FRAME
;
1227 pkt_info
.l4i_chk
= skb
->h
.uh
->check
;
1228 } else if ((skb
->protocol
== ETH_P_IP
) &&
1229 (skb
->nh
.iph
->protocol
== IPPROTO_TCP
))
1230 pkt_info
.l4i_chk
= skb
->h
.th
->check
;
1233 "%s: chksum proto != IPv4 TCP or UDP\n",
1235 spin_unlock_irqrestore(&mp
->lock
, flags
);
1239 pkt_info
.byte_cnt
= skb
->len
;
1240 pkt_info
.buf_ptr
= dma_map_single(NULL
, skb
->data
, skb
->len
,
1242 pkt_info
.return_info
= skb
;
1243 status
= eth_port_send(mp
, &pkt_info
);
1244 if ((status
== ETH_ERROR
) || (status
== ETH_QUEUE_FULL
))
1245 printk(KERN_ERR
"%s: Error on transmitting packet\n",
1247 stats
->tx_bytes
+= pkt_info
.byte_cnt
;
1251 /* first frag which is skb header */
1252 pkt_info
.byte_cnt
= skb_headlen(skb
);
1253 pkt_info
.buf_ptr
= dma_map_single(NULL
, skb
->data
,
1256 pkt_info
.l4i_chk
= 0;
1257 pkt_info
.return_info
= 0;
1259 if (skb
->ip_summed
!= CHECKSUM_HW
)
1260 /* Errata BTS #50, IHL must be 5 if no HW checksum */
1261 pkt_info
.cmd_sts
= ETH_TX_FIRST_DESC
|
1262 5 << ETH_TX_IHL_SHIFT
;
1264 pkt_info
.cmd_sts
= ETH_TX_FIRST_DESC
|
1265 ETH_GEN_TCP_UDP_CHECKSUM
|
1266 ETH_GEN_IP_V_4_CHECKSUM
|
1267 skb
->nh
.iph
->ihl
<< ETH_TX_IHL_SHIFT
;
1268 /* CPU already calculated pseudo header checksum. */
1269 if ((skb
->protocol
== ETH_P_IP
) &&
1270 (skb
->nh
.iph
->protocol
== IPPROTO_UDP
)) {
1271 pkt_info
.cmd_sts
|= ETH_UDP_FRAME
;
1272 pkt_info
.l4i_chk
= skb
->h
.uh
->check
;
1273 } else if ((skb
->protocol
== ETH_P_IP
) &&
1274 (skb
->nh
.iph
->protocol
== IPPROTO_TCP
))
1275 pkt_info
.l4i_chk
= skb
->h
.th
->check
;
1278 "%s: chksum proto != IPv4 TCP or UDP\n",
1280 spin_unlock_irqrestore(&mp
->lock
, flags
);
1285 status
= eth_port_send(mp
, &pkt_info
);
1286 if (status
!= ETH_OK
) {
1287 if ((status
== ETH_ERROR
))
1289 "%s: Error on transmitting packet\n",
1291 if (status
== ETH_QUEUE_FULL
)
1292 printk("Error on Queue Full \n");
1293 if (status
== ETH_QUEUE_LAST_RESOURCE
)
1294 printk("Tx resource error \n");
1296 stats
->tx_bytes
+= pkt_info
.byte_cnt
;
1298 /* Check for the remaining frags */
1299 for (frag
= 0; frag
< skb_shinfo(skb
)->nr_frags
; frag
++) {
1300 skb_frag_t
*this_frag
= &skb_shinfo(skb
)->frags
[frag
];
1301 pkt_info
.l4i_chk
= 0x0000;
1302 pkt_info
.cmd_sts
= 0x00000000;
1304 /* Last Frag enables interrupt and frees the skb */
1305 if (frag
== (skb_shinfo(skb
)->nr_frags
- 1)) {
1306 pkt_info
.cmd_sts
|= ETH_TX_ENABLE_INTERRUPT
|
1308 pkt_info
.return_info
= skb
;
1310 pkt_info
.return_info
= 0;
1312 pkt_info
.l4i_chk
= 0;
1313 pkt_info
.byte_cnt
= this_frag
->size
;
1315 pkt_info
.buf_ptr
= dma_map_page(NULL
, this_frag
->page
,
1316 this_frag
->page_offset
,
1320 status
= eth_port_send(mp
, &pkt_info
);
1322 if (status
!= ETH_OK
) {
1323 if ((status
== ETH_ERROR
))
1324 printk(KERN_ERR
"%s: Error on "
1325 "transmitting packet\n",
1328 if (status
== ETH_QUEUE_LAST_RESOURCE
)
1329 printk("Tx resource error \n");
1331 if (status
== ETH_QUEUE_FULL
)
1332 printk("Queue is full \n");
1334 stats
->tx_bytes
+= pkt_info
.byte_cnt
;
1338 spin_lock_irqsave(&mp
->lock
, flags
);
1340 pkt_info
.cmd_sts
= ETH_TX_ENABLE_INTERRUPT
| ETH_TX_FIRST_DESC
|
1342 pkt_info
.l4i_chk
= 0;
1343 pkt_info
.byte_cnt
= skb
->len
;
1344 pkt_info
.buf_ptr
= dma_map_single(NULL
, skb
->data
, skb
->len
,
1346 pkt_info
.return_info
= skb
;
1347 status
= eth_port_send(mp
, &pkt_info
);
1348 if ((status
== ETH_ERROR
) || (status
== ETH_QUEUE_FULL
))
1349 printk(KERN_ERR
"%s: Error on transmitting packet\n",
1351 stats
->tx_bytes
+= pkt_info
.byte_cnt
;
1354 /* Check if TX queue can handle another skb. If not, then
1355 * signal higher layers to stop requesting TX
1357 if (mp
->tx_ring_size
<= (mp
->tx_desc_count
+ MAX_DESCS_PER_SKB
))
1359 * Stop getting skb's from upper layers.
1360 * Getting skb's from upper layers will be enabled again after
1361 * packets are released.
1363 netif_stop_queue(dev
);
1365 /* Update statistics and start of transmittion time */
1366 stats
->tx_packets
++;
1367 dev
->trans_start
= jiffies
;
1369 spin_unlock_irqrestore(&mp
->lock
, flags
);
1371 return 0; /* success */
1375 * mv643xx_eth_get_stats
1377 * Returns a pointer to the interface statistics.
1379 * Input : dev - a pointer to the required interface
1381 * Output : a pointer to the interface's statistics
1384 static struct net_device_stats
*mv643xx_eth_get_stats(struct net_device
*dev
)
1386 struct mv643xx_private
*mp
= netdev_priv(dev
);
1391 #ifdef CONFIG_NET_POLL_CONTROLLER
1392 static void mv643xx_netpoll(struct net_device
*netdev
)
1394 struct mv643xx_private
*mp
= netdev_priv(netdev
);
1395 int port_num
= mp
->port_num
;
1397 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num
), INT_MASK_ALL
);
1398 /* wait for previous write to complete */
1399 mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num
));
1401 mv643xx_eth_int_handler(netdev
->irq
, netdev
, NULL
);
1403 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num
), INT_UNMASK_ALL
);
1407 static void mv643xx_init_ethtool_cmd(struct net_device
*dev
, int phy_address
,
1408 int speed
, int duplex
,
1409 struct ethtool_cmd
*cmd
)
1411 struct mv643xx_private
*mp
= netdev_priv(dev
);
1413 memset(cmd
, 0, sizeof(*cmd
));
1415 cmd
->port
= PORT_MII
;
1416 cmd
->transceiver
= XCVR_INTERNAL
;
1417 cmd
->phy_address
= phy_address
;
1420 cmd
->autoneg
= AUTONEG_ENABLE
;
1421 /* mii lib checks, but doesn't use speed on AUTONEG_ENABLE */
1422 cmd
->speed
= SPEED_100
;
1423 cmd
->advertising
= ADVERTISED_10baseT_Half
|
1424 ADVERTISED_10baseT_Full
|
1425 ADVERTISED_100baseT_Half
|
1426 ADVERTISED_100baseT_Full
;
1427 if (mp
->mii
.supports_gmii
)
1428 cmd
->advertising
|= ADVERTISED_1000baseT_Full
;
1430 cmd
->autoneg
= AUTONEG_DISABLE
;
1432 cmd
->duplex
= duplex
;
1439 * First function called after registering the network device.
1440 * It's purpose is to initialize the device as an ethernet device,
1441 * fill the ethernet device structure with pointers * to functions,
1442 * and set the MAC address of the interface
1444 * Input : struct device *
1445 * Output : -ENOMEM if failed , 0 if success
1447 static int mv643xx_eth_probe(struct platform_device
*pdev
)
1449 struct mv643xx_eth_platform_data
*pd
;
1450 int port_num
= pdev
->id
;
1451 struct mv643xx_private
*mp
;
1452 struct net_device
*dev
;
1454 struct resource
*res
;
1456 struct ethtool_cmd cmd
;
1461 dev
= alloc_etherdev(sizeof(struct mv643xx_private
));
1465 platform_set_drvdata(pdev
, dev
);
1467 mp
= netdev_priv(dev
);
1469 res
= platform_get_resource(pdev
, IORESOURCE_IRQ
, 0);
1471 dev
->irq
= res
->start
;
1473 mp
->port_num
= port_num
;
1475 dev
->open
= mv643xx_eth_open
;
1476 dev
->stop
= mv643xx_eth_stop
;
1477 dev
->hard_start_xmit
= mv643xx_eth_start_xmit
;
1478 dev
->get_stats
= mv643xx_eth_get_stats
;
1479 dev
->set_mac_address
= mv643xx_eth_set_mac_address
;
1480 dev
->set_multicast_list
= mv643xx_eth_set_rx_mode
;
1482 /* No need to Tx Timeout */
1483 dev
->tx_timeout
= mv643xx_eth_tx_timeout
;
1485 dev
->poll
= mv643xx_poll
;
1489 #ifdef CONFIG_NET_POLL_CONTROLLER
1490 dev
->poll_controller
= mv643xx_netpoll
;
1493 dev
->watchdog_timeo
= 2 * HZ
;
1494 dev
->tx_queue_len
= mp
->tx_ring_size
;
1496 dev
->change_mtu
= mv643xx_eth_change_mtu
;
1497 dev
->do_ioctl
= mv643xx_eth_do_ioctl
;
1498 SET_ETHTOOL_OPS(dev
, &mv643xx_ethtool_ops
);
1500 #ifdef MV643XX_CHECKSUM_OFFLOAD_TX
1501 #ifdef MAX_SKB_FRAGS
1503 * Zero copy can only work if we use Discovery II memory. Else, we will
1504 * have to map the buffers to ISA memory which is only 16 MB
1506 dev
->features
= NETIF_F_SG
| NETIF_F_IP_CSUM
;
1510 /* Configure the timeout task */
1511 INIT_WORK(&mp
->tx_timeout_task
,
1512 (void (*)(void *))mv643xx_eth_tx_timeout_task
, dev
);
1514 spin_lock_init(&mp
->lock
);
1516 /* set default config values */
1517 eth_port_uc_addr_get(dev
, dev
->dev_addr
);
1518 mp
->port_config
= MV643XX_ETH_PORT_CONFIG_DEFAULT_VALUE
;
1519 mp
->port_config_extend
= MV643XX_ETH_PORT_CONFIG_EXTEND_DEFAULT_VALUE
;
1520 mp
->port_sdma_config
= MV643XX_ETH_PORT_SDMA_CONFIG_DEFAULT_VALUE
;
1521 mp
->port_serial_control
= MV643XX_ETH_PORT_SERIAL_CONTROL_DEFAULT_VALUE
;
1522 mp
->rx_ring_size
= MV643XX_ETH_PORT_DEFAULT_RECEIVE_QUEUE_SIZE
;
1523 mp
->tx_ring_size
= MV643XX_ETH_PORT_DEFAULT_TRANSMIT_QUEUE_SIZE
;
1525 pd
= pdev
->dev
.platform_data
;
1527 if (pd
->mac_addr
!= NULL
)
1528 memcpy(dev
->dev_addr
, pd
->mac_addr
, 6);
1530 if (pd
->phy_addr
|| pd
->force_phy_addr
)
1531 ethernet_phy_set(port_num
, pd
->phy_addr
);
1533 if (pd
->port_config
|| pd
->force_port_config
)
1534 mp
->port_config
= pd
->port_config
;
1536 if (pd
->port_config_extend
|| pd
->force_port_config_extend
)
1537 mp
->port_config_extend
= pd
->port_config_extend
;
1539 if (pd
->port_sdma_config
|| pd
->force_port_sdma_config
)
1540 mp
->port_sdma_config
= pd
->port_sdma_config
;
1542 if (pd
->port_serial_control
|| pd
->force_port_serial_control
)
1543 mp
->port_serial_control
= pd
->port_serial_control
;
1545 if (pd
->rx_queue_size
)
1546 mp
->rx_ring_size
= pd
->rx_queue_size
;
1548 if (pd
->tx_queue_size
)
1549 mp
->tx_ring_size
= pd
->tx_queue_size
;
1551 if (pd
->tx_sram_size
) {
1552 mp
->tx_sram_size
= pd
->tx_sram_size
;
1553 mp
->tx_sram_addr
= pd
->tx_sram_addr
;
1556 if (pd
->rx_sram_size
) {
1557 mp
->rx_sram_size
= pd
->rx_sram_size
;
1558 mp
->rx_sram_addr
= pd
->rx_sram_addr
;
1562 /* Hook up MII support for ethtool */
1564 mp
->mii
.mdio_read
= mv643xx_mdio_read
;
1565 mp
->mii
.mdio_write
= mv643xx_mdio_write
;
1566 mp
->mii
.phy_id
= ethernet_phy_get(port_num
);
1567 mp
->mii
.phy_id_mask
= 0x3f;
1568 mp
->mii
.reg_num_mask
= 0x1f;
1570 err
= ethernet_phy_detect(port_num
);
1572 pr_debug("MV643xx ethernet port %d: "
1573 "No PHY detected at addr %d\n",
1574 port_num
, ethernet_phy_get(port_num
));
1578 pscr
= mv_read(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num
));
1579 pscr
&= ~MV643XX_ETH_SERIAL_PORT_ENABLE
;
1580 mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num
), pscr
);
1581 pscr
= mp
->port_serial_control
;
1582 mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num
), pscr
);
1584 if (!(pscr
& MV643XX_ETH_DISABLE_AUTO_NEG_FOR_DUPLX
) &&
1585 !(pscr
& MV643XX_ETH_DISABLE_AUTO_NEG_SPEED_GMII
))
1587 else if (pscr
& MV643XX_ETH_PORT_STATUS_GMII_1000
)
1589 else if (pscr
& MV643XX_ETH_PORT_STATUS_MII_100
)
1594 if (pscr
& MV643XX_ETH_PORT_STATUS_FULL_DUPLEX
)
1595 duplex
= DUPLEX_FULL
;
1597 duplex
= DUPLEX_HALF
;
1599 ethernet_phy_reset(mp
->port_num
);
1600 mp
->mii
.supports_gmii
= mii_check_gmii_support(&mp
->mii
);
1601 mv643xx_init_ethtool_cmd(dev
, mp
->mii
.phy_id
, speed
, duplex
, &cmd
);
1602 mv643xx_eth_update_pscr(dev
, &cmd
);
1603 mv643xx_set_settings(dev
, &cmd
);
1605 err
= register_netdev(dev
);
1611 "%s: port %d with MAC address %02x:%02x:%02x:%02x:%02x:%02x\n",
1612 dev
->name
, port_num
, p
[0], p
[1], p
[2], p
[3], p
[4], p
[5]);
1614 if (dev
->features
& NETIF_F_SG
)
1615 printk(KERN_NOTICE
"%s: Scatter Gather Enabled\n", dev
->name
);
1617 if (dev
->features
& NETIF_F_IP_CSUM
)
1618 printk(KERN_NOTICE
"%s: TX TCP/IP Checksumming Supported\n",
1621 #ifdef MV643XX_CHECKSUM_OFFLOAD_TX
1622 printk(KERN_NOTICE
"%s: RX TCP/UDP Checksum Offload ON \n", dev
->name
);
1626 printk(KERN_NOTICE
"%s: TX and RX Interrupt Coalescing ON \n",
1631 printk(KERN_NOTICE
"%s: RX NAPI Enabled \n", dev
->name
);
1634 if (mp
->tx_sram_size
> 0)
1635 printk(KERN_NOTICE
"%s: Using SRAM\n", dev
->name
);
1645 static int mv643xx_eth_remove(struct platform_device
*pdev
)
1647 struct net_device
*dev
= platform_get_drvdata(pdev
);
1649 unregister_netdev(dev
);
1650 flush_scheduled_work();
1653 platform_set_drvdata(pdev
, NULL
);
1657 static int mv643xx_eth_shared_probe(struct platform_device
*pdev
)
1659 struct resource
*res
;
1661 printk(KERN_NOTICE
"MV-643xx 10/100/1000 Ethernet Driver\n");
1663 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1667 mv643xx_eth_shared_base
= ioremap(res
->start
,
1668 MV643XX_ETH_SHARED_REGS_SIZE
);
1669 if (mv643xx_eth_shared_base
== NULL
)
1676 static int mv643xx_eth_shared_remove(struct platform_device
*pdev
)
1678 iounmap(mv643xx_eth_shared_base
);
1679 mv643xx_eth_shared_base
= NULL
;
1684 static struct platform_driver mv643xx_eth_driver
= {
1685 .probe
= mv643xx_eth_probe
,
1686 .remove
= mv643xx_eth_remove
,
1688 .name
= MV643XX_ETH_NAME
,
1692 static struct platform_driver mv643xx_eth_shared_driver
= {
1693 .probe
= mv643xx_eth_shared_probe
,
1694 .remove
= mv643xx_eth_shared_remove
,
1696 .name
= MV643XX_ETH_SHARED_NAME
,
1701 * mv643xx_init_module
1703 * Registers the network drivers into the Linux kernel
1709 static int __init
mv643xx_init_module(void)
1713 rc
= platform_driver_register(&mv643xx_eth_shared_driver
);
1715 rc
= platform_driver_register(&mv643xx_eth_driver
);
1717 platform_driver_unregister(&mv643xx_eth_shared_driver
);
1723 * mv643xx_cleanup_module
1725 * Registers the network drivers into the Linux kernel
1731 static void __exit
mv643xx_cleanup_module(void)
1733 platform_driver_unregister(&mv643xx_eth_driver
);
1734 platform_driver_unregister(&mv643xx_eth_shared_driver
);
1737 module_init(mv643xx_init_module
);
1738 module_exit(mv643xx_cleanup_module
);
1740 MODULE_LICENSE("GPL");
1741 MODULE_AUTHOR( "Rabeeh Khoury, Assaf Hoffman, Matthew Dharm, Manish Lachwani"
1742 " and Dale Farnsworth");
1743 MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX");
1746 * The second part is the low level driver of the gigE ethernet ports.
1750 * Marvell's Gigabit Ethernet controller low level driver
1753 * This file introduce low level API to Marvell's Gigabit Ethernet
1754 * controller. This Gigabit Ethernet Controller driver API controls
1755 * 1) Operations (i.e. port init, start, reset etc').
1756 * 2) Data flow (i.e. port send, receive etc').
1757 * Each Gigabit Ethernet port is controlled via
1758 * struct mv643xx_private.
1759 * This struct includes user configuration information as well as
1760 * driver internal data needed for its operations.
1762 * Supported Features:
1763 * - This low level driver is OS independent. Allocating memory for
1764 * the descriptor rings and buffers are not within the scope of
1766 * - The user is free from Rx/Tx queue managing.
1767 * - This low level driver introduce functionality API that enable
1768 * the to operate Marvell's Gigabit Ethernet Controller in a
1770 * - Simple Gigabit Ethernet port operation API.
1771 * - Simple Gigabit Ethernet port data flow API.
1772 * - Data flow and operation API support per queue functionality.
1773 * - Support cached descriptors for better performance.
1774 * - Enable access to all four DRAM banks and internal SRAM memory
1776 * - PHY access and control API.
1777 * - Port control register configuration API.
1778 * - Full control over Unicast and Multicast MAC configurations.
1782 * Initialization phase
1783 * This phase complete the initialization of the the
1784 * mv643xx_private struct.
1785 * User information regarding port configuration has to be set
1786 * prior to calling the port initialization routine.
1788 * In this phase any port Tx/Rx activity is halted, MIB counters
1789 * are cleared, PHY address is set according to user parameter and
1790 * access to DRAM and internal SRAM memory spaces.
1792 * Driver ring initialization
1793 * Allocating memory for the descriptor rings and buffers is not
1794 * within the scope of this driver. Thus, the user is required to
1795 * allocate memory for the descriptors ring and buffers. Those
1796 * memory parameters are used by the Rx and Tx ring initialization
1797 * routines in order to curve the descriptor linked list in a form
1799 * Note: Pay special attention to alignment issues when using
1800 * cached descriptors/buffers. In this phase the driver store
1801 * information in the mv643xx_private struct regarding each queue
1805 * This phase prepares the Ethernet port for Rx and Tx activity.
1806 * It uses the information stored in the mv643xx_private struct to
1807 * initialize the various port registers.
1810 * All packet references to/from the driver are done using
1812 * This struct is a unified struct used with Rx and Tx operations.
1813 * This way the user is not required to be familiar with neither
1814 * Tx nor Rx descriptors structures.
1815 * The driver's descriptors rings are management by indexes.
1816 * Those indexes controls the ring resources and used to indicate
1817 * a SW resource error:
1819 * This index points to the current available resource for use. For
1820 * example in Rx process this index will point to the descriptor
1821 * that will be passed to the user upon calling the receive
1822 * routine. In Tx process, this index will point to the descriptor
1823 * that will be assigned with the user packet info and transmitted.
1825 * This index points to the descriptor that need to restore its
1826 * resources. For example in Rx process, using the Rx buffer return
1827 * API will attach the buffer returned in packet info to the
1828 * descriptor pointed by 'used'. In Tx process, using the Tx
1829 * descriptor return will merely return the user packet info with
1830 * the command status of the transmitted buffer pointed by the
1831 * 'used' index. Nevertheless, it is essential to use this routine
1832 * to update the 'used' index.
1834 * This index supports Tx Scatter-Gather. It points to the first
1835 * descriptor of a packet assembled of multiple buffers. For
1836 * example when in middle of Such packet we have a Tx resource
1837 * error the 'curr' index get the value of 'first' to indicate
1838 * that the ring returned to its state before trying to transmit
1841 * Receive operation:
1842 * The eth_port_receive API set the packet information struct,
1843 * passed by the caller, with received information from the
1844 * 'current' SDMA descriptor.
1845 * It is the user responsibility to return this resource back
1846 * to the Rx descriptor ring to enable the reuse of this source.
1847 * Return Rx resource is done using the eth_rx_return_buff API.
1849 * Transmit operation:
1850 * The eth_port_send API supports Scatter-Gather which enables to
1851 * send a packet spanned over multiple buffers. This means that
1852 * for each packet info structure given by the user and put into
1853 * the Tx descriptors ring, will be transmitted only if the 'LAST'
1854 * bit will be set in the packet info command status field. This
1855 * API also consider restriction regarding buffer alignments and
1857 * The user must return a Tx resource after ensuring the buffer
1858 * has been transmitted to enable the Tx ring indexes to update.
1861 * This device is on-board. No jumper diagram is necessary.
1863 * EXTERNAL INTERFACE
1865 * Prior to calling the initialization routine eth_port_init() the user
1866 * must set the following fields under mv643xx_private struct:
1867 * port_num User Ethernet port number.
1868 * port_config User port configuration value.
1869 * port_config_extend User port config extend value.
1870 * port_sdma_config User port SDMA config value.
1871 * port_serial_control User port serial control value.
1873 * This driver data flow is done using the struct pkt_info which
1874 * is a unified struct for Rx and Tx operations:
1876 * byte_cnt Tx/Rx descriptor buffer byte count.
1877 * l4i_chk CPU provided TCP Checksum. For Tx operation
1879 * cmd_sts Tx/Rx descriptor command status.
1880 * buf_ptr Tx/Rx descriptor buffer pointer.
1881 * return_info Tx/Rx user resource return information.
1885 static int ethernet_phy_get(unsigned int eth_port_num
);
1886 static void ethernet_phy_set(unsigned int eth_port_num
, int phy_addr
);
1888 /* Ethernet Port routines */
1889 static void eth_port_set_filter_table_entry(int table
, unsigned char entry
);
1892 * eth_port_init - Initialize the Ethernet port driver
1895 * This function prepares the ethernet port to start its activity:
1896 * 1) Completes the ethernet port driver struct initialization toward port
1898 * 2) Resets the device to a quiescent state in case of warm reboot.
1899 * 3) Enable SDMA access to all four DRAM banks as well as internal SRAM.
1900 * 4) Clean MAC tables. The reset status of those tables is unknown.
1901 * 5) Set PHY address.
1902 * Note: Call this routine prior to eth_port_start routine and after
1903 * setting user values in the user fields of Ethernet port control
1907 * struct mv643xx_private *mp Ethernet port control struct
1915 static void eth_port_init(struct mv643xx_private
*mp
)
1917 mp
->rx_resource_err
= 0;
1918 mp
->tx_resource_err
= 0;
1920 eth_port_reset(mp
->port_num
);
1922 eth_port_init_mac_tables(mp
->port_num
);
1926 * eth_port_start - Start the Ethernet port activity.
1929 * This routine prepares the Ethernet port for Rx and Tx activity:
1930 * 1. Initialize Tx and Rx Current Descriptor Pointer for each queue that
1931 * has been initialized a descriptor's ring (using
1932 * ether_init_tx_desc_ring for Tx and ether_init_rx_desc_ring for Rx)
1933 * 2. Initialize and enable the Ethernet configuration port by writing to
1934 * the port's configuration and command registers.
1935 * 3. Initialize and enable the SDMA by writing to the SDMA's
1936 * configuration and command registers. After completing these steps,
1937 * the ethernet port SDMA can starts to perform Rx and Tx activities.
1939 * Note: Each Rx and Tx queue descriptor's list must be initialized prior
1940 * to calling this function (use ether_init_tx_desc_ring for Tx queues
1941 * and ether_init_rx_desc_ring for Rx queues).
1944 * dev - a pointer to the required interface
1947 * Ethernet port is ready to receive and transmit.
1952 static void eth_port_start(struct net_device
*dev
)
1954 struct mv643xx_private
*mp
= netdev_priv(dev
);
1955 unsigned int port_num
= mp
->port_num
;
1956 int tx_curr_desc
, rx_curr_desc
;
1958 struct ethtool_cmd ethtool_cmd
;
1960 /* Assignment of Tx CTRP of given queue */
1961 tx_curr_desc
= mp
->tx_curr_desc_q
;
1962 mv_write(MV643XX_ETH_TX_CURRENT_QUEUE_DESC_PTR_0(port_num
),
1963 (u32
)((struct eth_tx_desc
*)mp
->tx_desc_dma
+ tx_curr_desc
));
1965 /* Assignment of Rx CRDP of given queue */
1966 rx_curr_desc
= mp
->rx_curr_desc_q
;
1967 mv_write(MV643XX_ETH_RX_CURRENT_QUEUE_DESC_PTR_0(port_num
),
1968 (u32
)((struct eth_rx_desc
*)mp
->rx_desc_dma
+ rx_curr_desc
));
1970 /* Add the assigned Ethernet address to the port's address table */
1971 eth_port_uc_addr_set(port_num
, dev
->dev_addr
);
1973 /* Assign port configuration and command. */
1974 mv_write(MV643XX_ETH_PORT_CONFIG_REG(port_num
), mp
->port_config
);
1976 pscr
= mv_read(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num
));
1977 pscr
&= ~MV643XX_ETH_SERIAL_PORT_ENABLE
;
1978 mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num
), pscr
);
1980 pscr
&= ~MV643XX_ETH_FORCE_LINK_PASS
;
1981 pscr
|= MV643XX_ETH_DISABLE_AUTO_NEG_FOR_FLOW_CTRL
|
1982 MV643XX_ETH_DISABLE_AUTO_NEG_SPEED_GMII
|
1983 MV643XX_ETH_DISABLE_AUTO_NEG_FOR_DUPLX
|
1984 MV643XX_ETH_DO_NOT_FORCE_LINK_FAIL
|
1985 MV643XX_ETH_SERIAL_PORT_CONTROL_RESERVED
;
1987 mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num
), pscr
);
1989 pscr
|= MV643XX_ETH_SERIAL_PORT_ENABLE
;
1990 mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num
), pscr
);
1992 /* Assign port SDMA configuration */
1993 mv_write(MV643XX_ETH_SDMA_CONFIG_REG(port_num
), mp
->port_sdma_config
);
1995 /* Enable port Rx. */
1996 mv643xx_eth_port_enable_rx(port_num
, mp
->port_rx_queue_command
);
1998 /* Disable port bandwidth limits by clearing MTU register */
1999 mv_write(MV643XX_ETH_MAXIMUM_TRANSMIT_UNIT(port_num
), 0);
2001 /* save phy settings across reset */
2002 mv643xx_get_settings(dev
, ðtool_cmd
);
2003 ethernet_phy_reset(mp
->port_num
);
2004 mv643xx_set_settings(dev
, ðtool_cmd
);
2008 * eth_port_uc_addr_set - This function Set the port Unicast address.
2011 * This function Set the port Ethernet MAC address.
2014 * unsigned int eth_port_num Port number.
2015 * char * p_addr Address to be set
2018 * Set MAC address low and high registers. also calls
2019 * eth_port_set_filter_table_entry() to set the unicast
2020 * table with the proper information.
2026 static void eth_port_uc_addr_set(unsigned int eth_port_num
,
2027 unsigned char *p_addr
)
2033 mac_l
= (p_addr
[4] << 8) | (p_addr
[5]);
2034 mac_h
= (p_addr
[0] << 24) | (p_addr
[1] << 16) | (p_addr
[2] << 8) |
2037 mv_write(MV643XX_ETH_MAC_ADDR_LOW(eth_port_num
), mac_l
);
2038 mv_write(MV643XX_ETH_MAC_ADDR_HIGH(eth_port_num
), mac_h
);
2040 /* Accept frames of this address */
2041 table
= MV643XX_ETH_DA_FILTER_UNICAST_TABLE_BASE(eth_port_num
);
2042 eth_port_set_filter_table_entry(table
, p_addr
[5] & 0x0f);
2046 * eth_port_uc_addr_get - This function retrieves the port Unicast address
2047 * (MAC address) from the ethernet hw registers.
2050 * This function retrieves the port Ethernet MAC address.
2053 * unsigned int eth_port_num Port number.
2054 * char *MacAddr pointer where the MAC address is stored
2057 * Copy the MAC address to the location pointed to by MacAddr
2063 static void eth_port_uc_addr_get(struct net_device
*dev
, unsigned char *p_addr
)
2065 struct mv643xx_private
*mp
= netdev_priv(dev
);
2069 mac_h
= mv_read(MV643XX_ETH_MAC_ADDR_HIGH(mp
->port_num
));
2070 mac_l
= mv_read(MV643XX_ETH_MAC_ADDR_LOW(mp
->port_num
));
2072 p_addr
[0] = (mac_h
>> 24) & 0xff;
2073 p_addr
[1] = (mac_h
>> 16) & 0xff;
2074 p_addr
[2] = (mac_h
>> 8) & 0xff;
2075 p_addr
[3] = mac_h
& 0xff;
2076 p_addr
[4] = (mac_l
>> 8) & 0xff;
2077 p_addr
[5] = mac_l
& 0xff;
2081 * The entries in each table are indexed by a hash of a packet's MAC
2082 * address. One bit in each entry determines whether the packet is
2083 * accepted. There are 4 entries (each 8 bits wide) in each register
2084 * of the table. The bits in each entry are defined as follows:
2085 * 0 Accept=1, Drop=0
2086 * 3-1 Queue (ETH_Q0=0)
2089 static void eth_port_set_filter_table_entry(int table
, unsigned char entry
)
2091 unsigned int table_reg
;
2092 unsigned int tbl_offset
;
2093 unsigned int reg_offset
;
2095 tbl_offset
= (entry
/ 4) * 4; /* Register offset of DA table entry */
2096 reg_offset
= entry
% 4; /* Entry offset within the register */
2098 /* Set "accepts frame bit" at specified table entry */
2099 table_reg
= mv_read(table
+ tbl_offset
);
2100 table_reg
|= 0x01 << (8 * reg_offset
);
2101 mv_write(table
+ tbl_offset
, table_reg
);
2105 * eth_port_mc_addr - Multicast address settings.
2107 * The MV device supports multicast using two tables:
2108 * 1) Special Multicast Table for MAC addresses of the form
2109 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0x_FF).
2110 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
2111 * Table entries in the DA-Filter table.
2112 * 2) Other Multicast Table for multicast of another type. A CRC-8bit
2113 * is used as an index to the Other Multicast Table entries in the
2114 * DA-Filter table. This function calculates the CRC-8bit value.
2115 * In either case, eth_port_set_filter_table_entry() is then called
2116 * to set to set the actual table entry.
2118 static void eth_port_mc_addr(unsigned int eth_port_num
, unsigned char *p_addr
)
2122 unsigned char crc_result
= 0;
2128 if ((p_addr
[0] == 0x01) && (p_addr
[1] == 0x00) &&
2129 (p_addr
[2] == 0x5E) && (p_addr
[3] == 0x00) && (p_addr
[4] == 0x00)) {
2130 table
= MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE
2132 eth_port_set_filter_table_entry(table
, p_addr
[5]);
2136 /* Calculate CRC-8 out of the given address */
2137 mac_h
= (p_addr
[0] << 8) | (p_addr
[1]);
2138 mac_l
= (p_addr
[2] << 24) | (p_addr
[3] << 16) |
2139 (p_addr
[4] << 8) | (p_addr
[5] << 0);
2141 for (i
= 0; i
< 32; i
++)
2142 mac_array
[i
] = (mac_l
>> i
) & 0x1;
2143 for (i
= 32; i
< 48; i
++)
2144 mac_array
[i
] = (mac_h
>> (i
- 32)) & 0x1;
2146 crc
[0] = mac_array
[45] ^ mac_array
[43] ^ mac_array
[40] ^ mac_array
[39] ^
2147 mac_array
[35] ^ mac_array
[34] ^ mac_array
[31] ^ mac_array
[30] ^
2148 mac_array
[28] ^ mac_array
[23] ^ mac_array
[21] ^ mac_array
[19] ^
2149 mac_array
[18] ^ mac_array
[16] ^ mac_array
[14] ^ mac_array
[12] ^
2150 mac_array
[8] ^ mac_array
[7] ^ mac_array
[6] ^ mac_array
[0];
2152 crc
[1] = mac_array
[46] ^ mac_array
[45] ^ mac_array
[44] ^ mac_array
[43] ^
2153 mac_array
[41] ^ mac_array
[39] ^ mac_array
[36] ^ mac_array
[34] ^
2154 mac_array
[32] ^ mac_array
[30] ^ mac_array
[29] ^ mac_array
[28] ^
2155 mac_array
[24] ^ mac_array
[23] ^ mac_array
[22] ^ mac_array
[21] ^
2156 mac_array
[20] ^ mac_array
[18] ^ mac_array
[17] ^ mac_array
[16] ^
2157 mac_array
[15] ^ mac_array
[14] ^ mac_array
[13] ^ mac_array
[12] ^
2158 mac_array
[9] ^ mac_array
[6] ^ mac_array
[1] ^ mac_array
[0];
2160 crc
[2] = mac_array
[47] ^ mac_array
[46] ^ mac_array
[44] ^ mac_array
[43] ^
2161 mac_array
[42] ^ mac_array
[39] ^ mac_array
[37] ^ mac_array
[34] ^
2162 mac_array
[33] ^ mac_array
[29] ^ mac_array
[28] ^ mac_array
[25] ^
2163 mac_array
[24] ^ mac_array
[22] ^ mac_array
[17] ^ mac_array
[15] ^
2164 mac_array
[13] ^ mac_array
[12] ^ mac_array
[10] ^ mac_array
[8] ^
2165 mac_array
[6] ^ mac_array
[2] ^ mac_array
[1] ^ mac_array
[0];
2167 crc
[3] = mac_array
[47] ^ mac_array
[45] ^ mac_array
[44] ^ mac_array
[43] ^
2168 mac_array
[40] ^ mac_array
[38] ^ mac_array
[35] ^ mac_array
[34] ^
2169 mac_array
[30] ^ mac_array
[29] ^ mac_array
[26] ^ mac_array
[25] ^
2170 mac_array
[23] ^ mac_array
[18] ^ mac_array
[16] ^ mac_array
[14] ^
2171 mac_array
[13] ^ mac_array
[11] ^ mac_array
[9] ^ mac_array
[7] ^
2172 mac_array
[3] ^ mac_array
[2] ^ mac_array
[1];
2174 crc
[4] = mac_array
[46] ^ mac_array
[45] ^ mac_array
[44] ^ mac_array
[41] ^
2175 mac_array
[39] ^ mac_array
[36] ^ mac_array
[35] ^ mac_array
[31] ^
2176 mac_array
[30] ^ mac_array
[27] ^ mac_array
[26] ^ mac_array
[24] ^
2177 mac_array
[19] ^ mac_array
[17] ^ mac_array
[15] ^ mac_array
[14] ^
2178 mac_array
[12] ^ mac_array
[10] ^ mac_array
[8] ^ mac_array
[4] ^
2179 mac_array
[3] ^ mac_array
[2];
2181 crc
[5] = mac_array
[47] ^ mac_array
[46] ^ mac_array
[45] ^ mac_array
[42] ^
2182 mac_array
[40] ^ mac_array
[37] ^ mac_array
[36] ^ mac_array
[32] ^
2183 mac_array
[31] ^ mac_array
[28] ^ mac_array
[27] ^ mac_array
[25] ^
2184 mac_array
[20] ^ mac_array
[18] ^ mac_array
[16] ^ mac_array
[15] ^
2185 mac_array
[13] ^ mac_array
[11] ^ mac_array
[9] ^ mac_array
[5] ^
2186 mac_array
[4] ^ mac_array
[3];
2188 crc
[6] = mac_array
[47] ^ mac_array
[46] ^ mac_array
[43] ^ mac_array
[41] ^
2189 mac_array
[38] ^ mac_array
[37] ^ mac_array
[33] ^ mac_array
[32] ^
2190 mac_array
[29] ^ mac_array
[28] ^ mac_array
[26] ^ mac_array
[21] ^
2191 mac_array
[19] ^ mac_array
[17] ^ mac_array
[16] ^ mac_array
[14] ^
2192 mac_array
[12] ^ mac_array
[10] ^ mac_array
[6] ^ mac_array
[5] ^
2195 crc
[7] = mac_array
[47] ^ mac_array
[44] ^ mac_array
[42] ^ mac_array
[39] ^
2196 mac_array
[38] ^ mac_array
[34] ^ mac_array
[33] ^ mac_array
[30] ^
2197 mac_array
[29] ^ mac_array
[27] ^ mac_array
[22] ^ mac_array
[20] ^
2198 mac_array
[18] ^ mac_array
[17] ^ mac_array
[15] ^ mac_array
[13] ^
2199 mac_array
[11] ^ mac_array
[7] ^ mac_array
[6] ^ mac_array
[5];
2201 for (i
= 0; i
< 8; i
++)
2202 crc_result
= crc_result
| (crc
[i
] << i
);
2204 table
= MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE(eth_port_num
);
2205 eth_port_set_filter_table_entry(table
, crc_result
);
2209 * Set the entire multicast list based on dev->mc_list.
2211 static void eth_port_set_multicast_list(struct net_device
*dev
)
2214 struct dev_mc_list
*mc_list
;
2217 struct mv643xx_private
*mp
= netdev_priv(dev
);
2218 unsigned int eth_port_num
= mp
->port_num
;
2220 /* If the device is in promiscuous mode or in all multicast mode,
2221 * we will fully populate both multicast tables with accept.
2222 * This is guaranteed to yield a match on all multicast addresses...
2224 if ((dev
->flags
& IFF_PROMISC
) || (dev
->flags
& IFF_ALLMULTI
)) {
2225 for (table_index
= 0; table_index
<= 0xFC; table_index
+= 4) {
2226 /* Set all entries in DA filter special multicast
2228 * Set for ETH_Q0 for now
2230 * 0 Accept=1, Drop=0
2231 * 3-1 Queue ETH_Q0=0
2234 mv_write(MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE(eth_port_num
) + table_index
, 0x01010101);
2236 /* Set all entries in DA filter other multicast
2238 * Set for ETH_Q0 for now
2240 * 0 Accept=1, Drop=0
2241 * 3-1 Queue ETH_Q0=0
2244 mv_write(MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE(eth_port_num
) + table_index
, 0x01010101);
2249 /* We will clear out multicast tables every time we get the list.
2250 * Then add the entire new list...
2252 for (table_index
= 0; table_index
<= 0xFC; table_index
+= 4) {
2253 /* Clear DA filter special multicast table (Ex_dFSMT) */
2254 mv_write(MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE
2255 (eth_port_num
) + table_index
, 0);
2257 /* Clear DA filter other multicast table (Ex_dFOMT) */
2258 mv_write(MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE
2259 (eth_port_num
) + table_index
, 0);
2262 /* Get pointer to net_device multicast list and add each one... */
2263 for (i
= 0, mc_list
= dev
->mc_list
;
2264 (i
< 256) && (mc_list
!= NULL
) && (i
< dev
->mc_count
);
2265 i
++, mc_list
= mc_list
->next
)
2266 if (mc_list
->dmi_addrlen
== 6)
2267 eth_port_mc_addr(eth_port_num
, mc_list
->dmi_addr
);
2271 * eth_port_init_mac_tables - Clear all entrance in the UC, SMC and OMC tables
2274 * Go through all the DA filter tables (Unicast, Special Multicast &
2275 * Other Multicast) and set each entry to 0.
2278 * unsigned int eth_port_num Ethernet Port number.
2281 * Multicast and Unicast packets are rejected.
2286 static void eth_port_init_mac_tables(unsigned int eth_port_num
)
2290 /* Clear DA filter unicast table (Ex_dFUT) */
2291 for (table_index
= 0; table_index
<= 0xC; table_index
+= 4)
2292 mv_write(MV643XX_ETH_DA_FILTER_UNICAST_TABLE_BASE
2293 (eth_port_num
) + table_index
, 0);
2295 for (table_index
= 0; table_index
<= 0xFC; table_index
+= 4) {
2296 /* Clear DA filter special multicast table (Ex_dFSMT) */
2297 mv_write(MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE
2298 (eth_port_num
) + table_index
, 0);
2299 /* Clear DA filter other multicast table (Ex_dFOMT) */
2300 mv_write(MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE
2301 (eth_port_num
) + table_index
, 0);
2306 * eth_clear_mib_counters - Clear all MIB counters
2309 * This function clears all MIB counters of a specific ethernet port.
2310 * A read from the MIB counter will reset the counter.
2313 * unsigned int eth_port_num Ethernet Port number.
2316 * After reading all MIB counters, the counters resets.
2319 * MIB counter value.
2322 static void eth_clear_mib_counters(unsigned int eth_port_num
)
2326 /* Perform dummy reads from MIB counters */
2327 for (i
= ETH_MIB_GOOD_OCTETS_RECEIVED_LOW
; i
< ETH_MIB_LATE_COLLISION
;
2329 mv_read(MV643XX_ETH_MIB_COUNTERS_BASE(eth_port_num
) + i
);
2332 static inline u32
read_mib(struct mv643xx_private
*mp
, int offset
)
2334 return mv_read(MV643XX_ETH_MIB_COUNTERS_BASE(mp
->port_num
) + offset
);
2337 static void eth_update_mib_counters(struct mv643xx_private
*mp
)
2339 struct mv643xx_mib_counters
*p
= &mp
->mib_counters
;
2342 p
->good_octets_received
+=
2343 read_mib(mp
, ETH_MIB_GOOD_OCTETS_RECEIVED_LOW
);
2344 p
->good_octets_received
+=
2345 (u64
)read_mib(mp
, ETH_MIB_GOOD_OCTETS_RECEIVED_HIGH
) << 32;
2347 for (offset
= ETH_MIB_BAD_OCTETS_RECEIVED
;
2348 offset
<= ETH_MIB_FRAMES_1024_TO_MAX_OCTETS
;
2350 *(u32
*)((char *)p
+ offset
) = read_mib(mp
, offset
);
2352 p
->good_octets_sent
+= read_mib(mp
, ETH_MIB_GOOD_OCTETS_SENT_LOW
);
2353 p
->good_octets_sent
+=
2354 (u64
)read_mib(mp
, ETH_MIB_GOOD_OCTETS_SENT_HIGH
) << 32;
2356 for (offset
= ETH_MIB_GOOD_FRAMES_SENT
;
2357 offset
<= ETH_MIB_LATE_COLLISION
;
2359 *(u32
*)((char *)p
+ offset
) = read_mib(mp
, offset
);
2363 * ethernet_phy_detect - Detect whether a phy is present
2366 * This function tests whether there is a PHY present on
2367 * the specified port.
2370 * unsigned int eth_port_num Ethernet Port number.
2377 * -ENODEV on failure
2380 static int ethernet_phy_detect(unsigned int port_num
)
2382 unsigned int phy_reg_data0
;
2385 eth_port_read_smi_reg(port_num
, 0, &phy_reg_data0
);
2386 auto_neg
= phy_reg_data0
& 0x1000;
2387 phy_reg_data0
^= 0x1000; /* invert auto_neg */
2388 eth_port_write_smi_reg(port_num
, 0, phy_reg_data0
);
2390 eth_port_read_smi_reg(port_num
, 0, &phy_reg_data0
);
2391 if ((phy_reg_data0
& 0x1000) == auto_neg
)
2392 return -ENODEV
; /* change didn't take */
2394 phy_reg_data0
^= 0x1000;
2395 eth_port_write_smi_reg(port_num
, 0, phy_reg_data0
);
2400 * ethernet_phy_get - Get the ethernet port PHY address.
2403 * This routine returns the given ethernet port PHY address.
2406 * unsigned int eth_port_num Ethernet Port number.
2415 static int ethernet_phy_get(unsigned int eth_port_num
)
2417 unsigned int reg_data
;
2419 reg_data
= mv_read(MV643XX_ETH_PHY_ADDR_REG
);
2421 return ((reg_data
>> (5 * eth_port_num
)) & 0x1f);
2425 * ethernet_phy_set - Set the ethernet port PHY address.
2428 * This routine sets the given ethernet port PHY address.
2431 * unsigned int eth_port_num Ethernet Port number.
2432 * int phy_addr PHY address.
2441 static void ethernet_phy_set(unsigned int eth_port_num
, int phy_addr
)
2444 int addr_shift
= 5 * eth_port_num
;
2446 reg_data
= mv_read(MV643XX_ETH_PHY_ADDR_REG
);
2447 reg_data
&= ~(0x1f << addr_shift
);
2448 reg_data
|= (phy_addr
& 0x1f) << addr_shift
;
2449 mv_write(MV643XX_ETH_PHY_ADDR_REG
, reg_data
);
2453 * ethernet_phy_reset - Reset Ethernet port PHY.
2456 * This routine utilizes the SMI interface to reset the ethernet port PHY.
2459 * unsigned int eth_port_num Ethernet Port number.
2468 static void ethernet_phy_reset(unsigned int eth_port_num
)
2470 unsigned int phy_reg_data
;
2473 eth_port_read_smi_reg(eth_port_num
, 0, &phy_reg_data
);
2474 phy_reg_data
|= 0x8000; /* Set bit 15 to reset the PHY */
2475 eth_port_write_smi_reg(eth_port_num
, 0, phy_reg_data
);
2477 /* wait for PHY to come out of reset */
2480 eth_port_read_smi_reg(eth_port_num
, 0, &phy_reg_data
);
2481 } while (phy_reg_data
& 0x8000);
2484 static void mv643xx_eth_port_enable_tx(unsigned int port_num
,
2485 unsigned int channels
)
2487 mv_write(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num
), channels
);
2490 static void mv643xx_eth_port_enable_rx(unsigned int port_num
,
2491 unsigned int channels
)
2493 mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num
), channels
);
2496 static unsigned int mv643xx_eth_port_disable_tx(unsigned int port_num
)
2500 /* Stop Tx port activity. Check port Tx activity. */
2501 channels
= mv_read(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num
))
2504 /* Issue stop command for active channels only */
2505 mv_write(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num
),
2508 /* Wait for all Tx activity to terminate. */
2509 /* Check port cause register that all Tx queues are stopped */
2510 while (mv_read(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num
))
2512 udelay(PHY_WAIT_MICRO_SECONDS
);
2514 /* Wait for Tx FIFO to empty */
2515 while (mv_read(MV643XX_ETH_PORT_STATUS_REG(port_num
)) &
2516 ETH_PORT_TX_FIFO_EMPTY
)
2517 udelay(PHY_WAIT_MICRO_SECONDS
);
2523 static unsigned int mv643xx_eth_port_disable_rx(unsigned int port_num
)
2527 /* Stop Rx port activity. Check port Rx activity. */
2528 channels
= mv_read(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num
)
2531 /* Issue stop command for active channels only */
2532 mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num
),
2535 /* Wait for all Rx activity to terminate. */
2536 /* Check port cause register that all Rx queues are stopped */
2537 while (mv_read(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num
))
2539 udelay(PHY_WAIT_MICRO_SECONDS
);
2546 * eth_port_reset - Reset Ethernet port
2549 * This routine resets the chip by aborting any SDMA engine activity and
2550 * clearing the MIB counters. The Receiver and the Transmit unit are in
2551 * idle state after this command is performed and the port is disabled.
2554 * unsigned int eth_port_num Ethernet Port number.
2557 * Channel activity is halted.
2563 static void eth_port_reset(unsigned int port_num
)
2565 unsigned int reg_data
;
2567 mv643xx_eth_port_disable_tx(port_num
);
2568 mv643xx_eth_port_disable_rx(port_num
);
2570 /* Clear all MIB counters */
2571 eth_clear_mib_counters(port_num
);
2573 /* Reset the Enable bit in the Configuration Register */
2574 reg_data
= mv_read(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num
));
2575 reg_data
&= ~(MV643XX_ETH_SERIAL_PORT_ENABLE
|
2576 MV643XX_ETH_DO_NOT_FORCE_LINK_FAIL
|
2577 MV643XX_ETH_FORCE_LINK_PASS
);
2578 mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num
), reg_data
);
2583 * eth_port_read_smi_reg - Read PHY registers
2586 * This routine utilize the SMI interface to interact with the PHY in
2587 * order to perform PHY register read.
2590 * unsigned int port_num Ethernet Port number.
2591 * unsigned int phy_reg PHY register address offset.
2592 * unsigned int *value Register value buffer.
2595 * Write the value of a specified PHY register into given buffer.
2598 * false if the PHY is busy or read data is not in valid state.
2602 static void eth_port_read_smi_reg(unsigned int port_num
,
2603 unsigned int phy_reg
, unsigned int *value
)
2605 int phy_addr
= ethernet_phy_get(port_num
);
2606 unsigned long flags
;
2609 /* the SMI register is a shared resource */
2610 spin_lock_irqsave(&mv643xx_eth_phy_lock
, flags
);
2612 /* wait for the SMI register to become available */
2613 for (i
= 0; mv_read(MV643XX_ETH_SMI_REG
) & ETH_SMI_BUSY
; i
++) {
2614 if (i
== PHY_WAIT_ITERATIONS
) {
2615 printk("mv643xx PHY busy timeout, port %d\n", port_num
);
2618 udelay(PHY_WAIT_MICRO_SECONDS
);
2621 mv_write(MV643XX_ETH_SMI_REG
,
2622 (phy_addr
<< 16) | (phy_reg
<< 21) | ETH_SMI_OPCODE_READ
);
2624 /* now wait for the data to be valid */
2625 for (i
= 0; !(mv_read(MV643XX_ETH_SMI_REG
) & ETH_SMI_READ_VALID
); i
++) {
2626 if (i
== PHY_WAIT_ITERATIONS
) {
2627 printk("mv643xx PHY read timeout, port %d\n", port_num
);
2630 udelay(PHY_WAIT_MICRO_SECONDS
);
2633 *value
= mv_read(MV643XX_ETH_SMI_REG
) & 0xffff;
2635 spin_unlock_irqrestore(&mv643xx_eth_phy_lock
, flags
);
2639 * eth_port_write_smi_reg - Write to PHY registers
2642 * This routine utilize the SMI interface to interact with the PHY in
2643 * order to perform writes to PHY registers.
2646 * unsigned int eth_port_num Ethernet Port number.
2647 * unsigned int phy_reg PHY register address offset.
2648 * unsigned int value Register value.
2651 * Write the given value to the specified PHY register.
2654 * false if the PHY is busy.
2658 static void eth_port_write_smi_reg(unsigned int eth_port_num
,
2659 unsigned int phy_reg
, unsigned int value
)
2663 unsigned long flags
;
2665 phy_addr
= ethernet_phy_get(eth_port_num
);
2667 /* the SMI register is a shared resource */
2668 spin_lock_irqsave(&mv643xx_eth_phy_lock
, flags
);
2670 /* wait for the SMI register to become available */
2671 for (i
= 0; mv_read(MV643XX_ETH_SMI_REG
) & ETH_SMI_BUSY
; i
++) {
2672 if (i
== PHY_WAIT_ITERATIONS
) {
2673 printk("mv643xx PHY busy timeout, port %d\n",
2677 udelay(PHY_WAIT_MICRO_SECONDS
);
2680 mv_write(MV643XX_ETH_SMI_REG
, (phy_addr
<< 16) | (phy_reg
<< 21) |
2681 ETH_SMI_OPCODE_WRITE
| (value
& 0xffff));
2683 spin_unlock_irqrestore(&mv643xx_eth_phy_lock
, flags
);
2687 * Wrappers for MII support library.
2689 static int mv643xx_mdio_read(struct net_device
*dev
, int phy_id
, int location
)
2692 struct mv643xx_private
*mp
= netdev_priv(dev
);
2694 eth_port_read_smi_reg(mp
->port_num
, location
, &val
);
2698 static void mv643xx_mdio_write(struct net_device
*dev
, int phy_id
, int location
, int val
)
2700 struct mv643xx_private
*mp
= netdev_priv(dev
);
2701 eth_port_write_smi_reg(mp
->port_num
, location
, val
);
2705 * eth_port_send - Send an Ethernet packet
2708 * This routine send a given packet described by p_pktinfo parameter. It
2709 * supports transmitting of a packet spaned over multiple buffers. The
2710 * routine updates 'curr' and 'first' indexes according to the packet
2711 * segment passed to the routine. In case the packet segment is first,
2712 * the 'first' index is update. In any case, the 'curr' index is updated.
2713 * If the routine get into Tx resource error it assigns 'curr' index as
2714 * 'first'. This way the function can abort Tx process of multiple
2715 * descriptors per packet.
2718 * struct mv643xx_private *mp Ethernet Port Control srtuct.
2719 * struct pkt_info *p_pkt_info User packet buffer.
2722 * Tx ring 'curr' and 'first' indexes are updated.
2725 * ETH_QUEUE_FULL in case of Tx resource error.
2726 * ETH_ERROR in case the routine can not access Tx desc ring.
2727 * ETH_QUEUE_LAST_RESOURCE if the routine uses the last Tx resource.
2731 #ifdef MV643XX_CHECKSUM_OFFLOAD_TX
2733 * Modified to include the first descriptor pointer in case of SG
2735 static ETH_FUNC_RET_STATUS
eth_port_send(struct mv643xx_private
*mp
,
2736 struct pkt_info
*p_pkt_info
)
2738 int tx_desc_curr
, tx_desc_used
, tx_first_desc
, tx_next_desc
;
2739 struct eth_tx_desc
*current_descriptor
;
2740 struct eth_tx_desc
*first_descriptor
;
2743 /* Do not process Tx ring in case of Tx ring resource error */
2744 if (mp
->tx_resource_err
)
2745 return ETH_QUEUE_FULL
;
2748 * The hardware requires that each buffer that is <= 8 bytes
2749 * in length must be aligned on an 8 byte boundary.
2751 if (p_pkt_info
->byte_cnt
<= 8 && p_pkt_info
->buf_ptr
& 0x7) {
2753 "mv643xx_eth port %d: packet size <= 8 problem\n",
2758 mp
->tx_desc_count
++;
2759 BUG_ON(mp
->tx_desc_count
> mp
->tx_ring_size
);
2761 /* Get the Tx Desc ring indexes */
2762 tx_desc_curr
= mp
->tx_curr_desc_q
;
2763 tx_desc_used
= mp
->tx_used_desc_q
;
2765 current_descriptor
= &mp
->p_tx_desc_area
[tx_desc_curr
];
2767 tx_next_desc
= (tx_desc_curr
+ 1) % mp
->tx_ring_size
;
2769 current_descriptor
->buf_ptr
= p_pkt_info
->buf_ptr
;
2770 current_descriptor
->byte_cnt
= p_pkt_info
->byte_cnt
;
2771 current_descriptor
->l4i_chk
= p_pkt_info
->l4i_chk
;
2772 mp
->tx_skb
[tx_desc_curr
] = p_pkt_info
->return_info
;
2774 command
= p_pkt_info
->cmd_sts
| ETH_ZERO_PADDING
| ETH_GEN_CRC
|
2775 ETH_BUFFER_OWNED_BY_DMA
;
2776 if (command
& ETH_TX_FIRST_DESC
) {
2777 tx_first_desc
= tx_desc_curr
;
2778 mp
->tx_first_desc_q
= tx_first_desc
;
2779 first_descriptor
= current_descriptor
;
2780 mp
->tx_first_command
= command
;
2782 tx_first_desc
= mp
->tx_first_desc_q
;
2783 first_descriptor
= &mp
->p_tx_desc_area
[tx_first_desc
];
2784 BUG_ON(first_descriptor
== NULL
);
2785 current_descriptor
->cmd_sts
= command
;
2788 if (command
& ETH_TX_LAST_DESC
) {
2790 first_descriptor
->cmd_sts
= mp
->tx_first_command
;
2793 mv643xx_eth_port_enable_tx(mp
->port_num
, mp
->port_tx_queue_command
);
2796 * Finish Tx packet. Update first desc in case of Tx resource
2798 tx_first_desc
= tx_next_desc
;
2799 mp
->tx_first_desc_q
= tx_first_desc
;
2802 /* Check for ring index overlap in the Tx desc ring */
2803 if (tx_next_desc
== tx_desc_used
) {
2804 mp
->tx_resource_err
= 1;
2805 mp
->tx_curr_desc_q
= tx_first_desc
;
2807 return ETH_QUEUE_LAST_RESOURCE
;
2810 mp
->tx_curr_desc_q
= tx_next_desc
;
2815 static ETH_FUNC_RET_STATUS
eth_port_send(struct mv643xx_private
*mp
,
2816 struct pkt_info
*p_pkt_info
)
2820 struct eth_tx_desc
*current_descriptor
;
2821 unsigned int command_status
;
2823 /* Do not process Tx ring in case of Tx ring resource error */
2824 if (mp
->tx_resource_err
)
2825 return ETH_QUEUE_FULL
;
2827 mp
->tx_desc_count
++;
2828 BUG_ON(mp
->tx_desc_count
> mp
->tx_ring_size
);
2830 /* Get the Tx Desc ring indexes */
2831 tx_desc_curr
= mp
->tx_curr_desc_q
;
2832 tx_desc_used
= mp
->tx_used_desc_q
;
2833 current_descriptor
= &mp
->p_tx_desc_area
[tx_desc_curr
];
2835 command_status
= p_pkt_info
->cmd_sts
| ETH_ZERO_PADDING
| ETH_GEN_CRC
;
2836 current_descriptor
->buf_ptr
= p_pkt_info
->buf_ptr
;
2837 current_descriptor
->byte_cnt
= p_pkt_info
->byte_cnt
;
2838 mp
->tx_skb
[tx_desc_curr
] = p_pkt_info
->return_info
;
2840 /* Set last desc with DMA ownership and interrupt enable. */
2842 current_descriptor
->cmd_sts
= command_status
|
2843 ETH_BUFFER_OWNED_BY_DMA
| ETH_TX_ENABLE_INTERRUPT
;
2846 mv643xx_eth_port_enable_tx(mp
->port_num
, mp
->port_tx_queue_command
);
2848 /* Finish Tx packet. Update first desc in case of Tx resource error */
2849 tx_desc_curr
= (tx_desc_curr
+ 1) % mp
->tx_ring_size
;
2851 /* Update the current descriptor */
2852 mp
->tx_curr_desc_q
= tx_desc_curr
;
2854 /* Check for ring index overlap in the Tx desc ring */
2855 if (tx_desc_curr
== tx_desc_used
) {
2856 mp
->tx_resource_err
= 1;
2857 return ETH_QUEUE_LAST_RESOURCE
;
2865 * eth_tx_return_desc - Free all used Tx descriptors
2868 * This routine returns the transmitted packet information to the caller.
2869 * It uses the 'first' index to support Tx desc return in case a transmit
2870 * of a packet spanned over multiple buffer still in process.
2871 * In case the Tx queue was in "resource error" condition, where there are
2872 * no available Tx resources, the function resets the resource error flag.
2875 * struct mv643xx_private *mp Ethernet Port Control srtuct.
2876 * struct pkt_info *p_pkt_info User packet buffer.
2879 * Tx ring 'first' and 'used' indexes are updated.
2883 * ETH_ERROR otherwise.
2886 static ETH_FUNC_RET_STATUS
eth_tx_return_desc(struct mv643xx_private
*mp
,
2887 struct pkt_info
*p_pkt_info
)
2891 struct eth_tx_desc
*p_tx_desc_used
;
2892 unsigned int command_status
;
2893 unsigned long flags
;
2896 spin_lock_irqsave(&mp
->lock
, flags
);
2898 #ifdef MV643XX_CHECKSUM_OFFLOAD_TX
2899 tx_busy_desc
= mp
->tx_first_desc_q
;
2901 tx_busy_desc
= mp
->tx_curr_desc_q
;
2904 /* Get the Tx Desc ring indexes */
2905 tx_desc_used
= mp
->tx_used_desc_q
;
2907 p_tx_desc_used
= &mp
->p_tx_desc_area
[tx_desc_used
];
2910 if (p_tx_desc_used
== NULL
) {
2915 /* Stop release. About to overlap the current available Tx descriptor */
2916 if (tx_desc_used
== tx_busy_desc
&& !mp
->tx_resource_err
) {
2921 command_status
= p_tx_desc_used
->cmd_sts
;
2923 /* Still transmitting... */
2924 if (command_status
& (ETH_BUFFER_OWNED_BY_DMA
)) {
2929 /* Pass the packet information to the caller */
2930 p_pkt_info
->cmd_sts
= command_status
;
2931 p_pkt_info
->return_info
= mp
->tx_skb
[tx_desc_used
];
2932 p_pkt_info
->buf_ptr
= p_tx_desc_used
->buf_ptr
;
2933 p_pkt_info
->byte_cnt
= p_tx_desc_used
->byte_cnt
;
2934 mp
->tx_skb
[tx_desc_used
] = NULL
;
2936 /* Update the next descriptor to release. */
2937 mp
->tx_used_desc_q
= (tx_desc_used
+ 1) % mp
->tx_ring_size
;
2939 /* Any Tx return cancels the Tx resource error status */
2940 mp
->tx_resource_err
= 0;
2942 BUG_ON(mp
->tx_desc_count
== 0);
2943 mp
->tx_desc_count
--;
2946 spin_unlock_irqrestore(&mp
->lock
, flags
);
2952 * eth_port_receive - Get received information from Rx ring.
2955 * This routine returns the received data to the caller. There is no
2956 * data copying during routine operation. All information is returned
2957 * using pointer to packet information struct passed from the caller.
2958 * If the routine exhausts Rx ring resources then the resource error flag
2962 * struct mv643xx_private *mp Ethernet Port Control srtuct.
2963 * struct pkt_info *p_pkt_info User packet buffer.
2966 * Rx ring current and used indexes are updated.
2969 * ETH_ERROR in case the routine can not access Rx desc ring.
2970 * ETH_QUEUE_FULL if Rx ring resources are exhausted.
2971 * ETH_END_OF_JOB if there is no received data.
2974 static ETH_FUNC_RET_STATUS
eth_port_receive(struct mv643xx_private
*mp
,
2975 struct pkt_info
*p_pkt_info
)
2977 int rx_next_curr_desc
, rx_curr_desc
, rx_used_desc
;
2978 volatile struct eth_rx_desc
*p_rx_desc
;
2979 unsigned int command_status
;
2980 unsigned long flags
;
2982 /* Do not process Rx ring in case of Rx ring resource error */
2983 if (mp
->rx_resource_err
)
2984 return ETH_QUEUE_FULL
;
2986 spin_lock_irqsave(&mp
->lock
, flags
);
2988 /* Get the Rx Desc ring 'curr and 'used' indexes */
2989 rx_curr_desc
= mp
->rx_curr_desc_q
;
2990 rx_used_desc
= mp
->rx_used_desc_q
;
2992 p_rx_desc
= &mp
->p_rx_desc_area
[rx_curr_desc
];
2994 /* The following parameters are used to save readings from memory */
2995 command_status
= p_rx_desc
->cmd_sts
;
2998 /* Nothing to receive... */
2999 if (command_status
& (ETH_BUFFER_OWNED_BY_DMA
)) {
3000 spin_unlock_irqrestore(&mp
->lock
, flags
);
3001 return ETH_END_OF_JOB
;
3004 p_pkt_info
->byte_cnt
= (p_rx_desc
->byte_cnt
) - RX_BUF_OFFSET
;
3005 p_pkt_info
->cmd_sts
= command_status
;
3006 p_pkt_info
->buf_ptr
= (p_rx_desc
->buf_ptr
) + RX_BUF_OFFSET
;
3007 p_pkt_info
->return_info
= mp
->rx_skb
[rx_curr_desc
];
3008 p_pkt_info
->l4i_chk
= p_rx_desc
->buf_size
;
3011 * Clean the return info field to indicate that the
3012 * packet has been moved to the upper layers
3014 mp
->rx_skb
[rx_curr_desc
] = NULL
;
3016 /* Update current index in data structure */
3017 rx_next_curr_desc
= (rx_curr_desc
+ 1) % mp
->rx_ring_size
;
3018 mp
->rx_curr_desc_q
= rx_next_curr_desc
;
3020 /* Rx descriptors exhausted. Set the Rx ring resource error flag */
3021 if (rx_next_curr_desc
== rx_used_desc
)
3022 mp
->rx_resource_err
= 1;
3024 spin_unlock_irqrestore(&mp
->lock
, flags
);
3030 * eth_rx_return_buff - Returns a Rx buffer back to the Rx ring.
3033 * This routine returns a Rx buffer back to the Rx ring. It retrieves the
3034 * next 'used' descriptor and attached the returned buffer to it.
3035 * In case the Rx ring was in "resource error" condition, where there are
3036 * no available Rx resources, the function resets the resource error flag.
3039 * struct mv643xx_private *mp Ethernet Port Control srtuct.
3040 * struct pkt_info *p_pkt_info Information on returned buffer.
3043 * New available Rx resource in Rx descriptor ring.
3046 * ETH_ERROR in case the routine can not access Rx desc ring.
3049 static ETH_FUNC_RET_STATUS
eth_rx_return_buff(struct mv643xx_private
*mp
,
3050 struct pkt_info
*p_pkt_info
)
3052 int used_rx_desc
; /* Where to return Rx resource */
3053 volatile struct eth_rx_desc
*p_used_rx_desc
;
3054 unsigned long flags
;
3056 spin_lock_irqsave(&mp
->lock
, flags
);
3058 /* Get 'used' Rx descriptor */
3059 used_rx_desc
= mp
->rx_used_desc_q
;
3060 p_used_rx_desc
= &mp
->p_rx_desc_area
[used_rx_desc
];
3062 p_used_rx_desc
->buf_ptr
= p_pkt_info
->buf_ptr
;
3063 p_used_rx_desc
->buf_size
= p_pkt_info
->byte_cnt
;
3064 mp
->rx_skb
[used_rx_desc
] = p_pkt_info
->return_info
;
3066 /* Flush the write pipe */
3068 /* Return the descriptor to DMA ownership */
3070 p_used_rx_desc
->cmd_sts
=
3071 ETH_BUFFER_OWNED_BY_DMA
| ETH_RX_ENABLE_INTERRUPT
;
3074 /* Move the used descriptor pointer to the next descriptor */
3075 mp
->rx_used_desc_q
= (used_rx_desc
+ 1) % mp
->rx_ring_size
;
3077 /* Any Rx return cancels the Rx resource error status */
3078 mp
->rx_resource_err
= 0;
3080 spin_unlock_irqrestore(&mp
->lock
, flags
);
3085 /************* Begin ethtool support *************************/
3087 struct mv643xx_stats
{
3088 char stat_string
[ETH_GSTRING_LEN
];
3093 #define MV643XX_STAT(m) sizeof(((struct mv643xx_private *)0)->m), \
3094 offsetof(struct mv643xx_private, m)
3096 static const struct mv643xx_stats mv643xx_gstrings_stats
[] = {
3097 { "rx_packets", MV643XX_STAT(stats
.rx_packets
) },
3098 { "tx_packets", MV643XX_STAT(stats
.tx_packets
) },
3099 { "rx_bytes", MV643XX_STAT(stats
.rx_bytes
) },
3100 { "tx_bytes", MV643XX_STAT(stats
.tx_bytes
) },
3101 { "rx_errors", MV643XX_STAT(stats
.rx_errors
) },
3102 { "tx_errors", MV643XX_STAT(stats
.tx_errors
) },
3103 { "rx_dropped", MV643XX_STAT(stats
.rx_dropped
) },
3104 { "tx_dropped", MV643XX_STAT(stats
.tx_dropped
) },
3105 { "good_octets_received", MV643XX_STAT(mib_counters
.good_octets_received
) },
3106 { "bad_octets_received", MV643XX_STAT(mib_counters
.bad_octets_received
) },
3107 { "internal_mac_transmit_err", MV643XX_STAT(mib_counters
.internal_mac_transmit_err
) },
3108 { "good_frames_received", MV643XX_STAT(mib_counters
.good_frames_received
) },
3109 { "bad_frames_received", MV643XX_STAT(mib_counters
.bad_frames_received
) },
3110 { "broadcast_frames_received", MV643XX_STAT(mib_counters
.broadcast_frames_received
) },
3111 { "multicast_frames_received", MV643XX_STAT(mib_counters
.multicast_frames_received
) },
3112 { "frames_64_octets", MV643XX_STAT(mib_counters
.frames_64_octets
) },
3113 { "frames_65_to_127_octets", MV643XX_STAT(mib_counters
.frames_65_to_127_octets
) },
3114 { "frames_128_to_255_octets", MV643XX_STAT(mib_counters
.frames_128_to_255_octets
) },
3115 { "frames_256_to_511_octets", MV643XX_STAT(mib_counters
.frames_256_to_511_octets
) },
3116 { "frames_512_to_1023_octets", MV643XX_STAT(mib_counters
.frames_512_to_1023_octets
) },
3117 { "frames_1024_to_max_octets", MV643XX_STAT(mib_counters
.frames_1024_to_max_octets
) },
3118 { "good_octets_sent", MV643XX_STAT(mib_counters
.good_octets_sent
) },
3119 { "good_frames_sent", MV643XX_STAT(mib_counters
.good_frames_sent
) },
3120 { "excessive_collision", MV643XX_STAT(mib_counters
.excessive_collision
) },
3121 { "multicast_frames_sent", MV643XX_STAT(mib_counters
.multicast_frames_sent
) },
3122 { "broadcast_frames_sent", MV643XX_STAT(mib_counters
.broadcast_frames_sent
) },
3123 { "unrec_mac_control_received", MV643XX_STAT(mib_counters
.unrec_mac_control_received
) },
3124 { "fc_sent", MV643XX_STAT(mib_counters
.fc_sent
) },
3125 { "good_fc_received", MV643XX_STAT(mib_counters
.good_fc_received
) },
3126 { "bad_fc_received", MV643XX_STAT(mib_counters
.bad_fc_received
) },
3127 { "undersize_received", MV643XX_STAT(mib_counters
.undersize_received
) },
3128 { "fragments_received", MV643XX_STAT(mib_counters
.fragments_received
) },
3129 { "oversize_received", MV643XX_STAT(mib_counters
.oversize_received
) },
3130 { "jabber_received", MV643XX_STAT(mib_counters
.jabber_received
) },
3131 { "mac_receive_error", MV643XX_STAT(mib_counters
.mac_receive_error
) },
3132 { "bad_crc_event", MV643XX_STAT(mib_counters
.bad_crc_event
) },
3133 { "collision", MV643XX_STAT(mib_counters
.collision
) },
3134 { "late_collision", MV643XX_STAT(mib_counters
.late_collision
) },
3137 #define MV643XX_STATS_LEN \
3138 sizeof(mv643xx_gstrings_stats) / sizeof(struct mv643xx_stats)
3140 static void mv643xx_get_drvinfo(struct net_device
*netdev
,
3141 struct ethtool_drvinfo
*drvinfo
)
3143 strncpy(drvinfo
->driver
, mv643xx_driver_name
, 32);
3144 strncpy(drvinfo
->version
, mv643xx_driver_version
, 32);
3145 strncpy(drvinfo
->fw_version
, "N/A", 32);
3146 strncpy(drvinfo
->bus_info
, "mv643xx", 32);
3147 drvinfo
->n_stats
= MV643XX_STATS_LEN
;
3150 static int mv643xx_get_stats_count(struct net_device
*netdev
)
3152 return MV643XX_STATS_LEN
;
3155 static void mv643xx_get_ethtool_stats(struct net_device
*netdev
,
3156 struct ethtool_stats
*stats
, uint64_t *data
)
3158 struct mv643xx_private
*mp
= netdev
->priv
;
3161 eth_update_mib_counters(mp
);
3163 for (i
= 0; i
< MV643XX_STATS_LEN
; i
++) {
3164 char *p
= (char *)mp
+mv643xx_gstrings_stats
[i
].stat_offset
;
3165 data
[i
] = (mv643xx_gstrings_stats
[i
].sizeof_stat
==
3166 sizeof(uint64_t)) ? *(uint64_t *)p
: *(uint32_t *)p
;
3170 static void mv643xx_get_strings(struct net_device
*netdev
, uint32_t stringset
,
3177 for (i
=0; i
< MV643XX_STATS_LEN
; i
++) {
3178 memcpy(data
+ i
* ETH_GSTRING_LEN
,
3179 mv643xx_gstrings_stats
[i
].stat_string
,
3186 static u32
mv643xx_eth_get_link(struct net_device
*dev
)
3188 struct mv643xx_private
*mp
= netdev_priv(dev
);
3190 return mii_link_ok(&mp
->mii
);
3193 static int mv643xx_eth_nway_restart(struct net_device
*dev
)
3195 struct mv643xx_private
*mp
= netdev_priv(dev
);
3197 return mii_nway_restart(&mp
->mii
);
3200 static int mv643xx_eth_do_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
3202 struct mv643xx_private
*mp
= netdev_priv(dev
);
3204 return generic_mii_ioctl(&mp
->mii
, if_mii(ifr
), cmd
, NULL
);
3207 static struct ethtool_ops mv643xx_ethtool_ops
= {
3208 .get_settings
= mv643xx_get_settings
,
3209 .set_settings
= mv643xx_set_settings
,
3210 .get_drvinfo
= mv643xx_get_drvinfo
,
3211 .get_link
= mv643xx_eth_get_link
,
3212 .get_sg
= ethtool_op_get_sg
,
3213 .set_sg
= ethtool_op_set_sg
,
3214 .get_strings
= mv643xx_get_strings
,
3215 .get_stats_count
= mv643xx_get_stats_count
,
3216 .get_ethtool_stats
= mv643xx_get_ethtool_stats
,
3217 .get_strings
= mv643xx_get_strings
,
3218 .get_stats_count
= mv643xx_get_stats_count
,
3219 .get_ethtool_stats
= mv643xx_get_ethtool_stats
,
3220 .nway_reset
= mv643xx_eth_nway_restart
,
3223 /************* End ethtool support *************************/