2 * drivers/net/mv643xx_eth.c - Driver for MV643XX ethernet ports
3 * Copyright (C) 2002 Matthew Dharm <mdharm@momenco.com>
5 * Based on the 64360 driver from:
6 * Copyright (C) 2002 rabeeh@galileo.co.il
8 * Copyright (C) 2003 PMC-Sierra, Inc.,
9 * written by Manish Lachwani (lachwani@pmc-sierra.com)
11 * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org>
13 * Copyright (C) 2004-2005 MontaVista Software, Inc.
14 * Dale Farnsworth <dale@farnsworth.org>
16 * Copyright (C) 2004 Steven J. Hill <sjhill1@rockwellcollins.com>
17 * <sjhill@realitydiluted.com>
19 * This program is free software; you can redistribute it and/or
20 * modify it under the terms of the GNU General Public License
21 * as published by the Free Software Foundation; either version 2
22 * of the License, or (at your option) any later version.
24 * This program is distributed in the hope that it will be useful,
25 * but WITHOUT ANY WARRANTY; without even the implied warranty of
26 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
27 * GNU General Public License for more details.
29 * You should have received a copy of the GNU General Public License
30 * along with this program; if not, write to the Free Software
31 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
33 #include <linux/init.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/tcp.h>
36 #include <linux/udp.h>
37 #include <linux/etherdevice.h>
39 #include <linux/bitops.h>
40 #include <linux/delay.h>
41 #include <linux/ethtool.h>
43 #include <asm/types.h>
44 #include <asm/pgtable.h>
45 #include <asm/system.h>
46 #include <asm/delay.h>
47 #include "mv643xx_eth.h"
50 * The first part is the high level driver of the gigE ethernet ports.
56 #define WRAP NET_IP_ALIGN + ETH_HLEN + VLAN_HLEN + FCS_LEN
57 #define RX_SKB_SIZE ((dev->mtu + WRAP + 7) & ~0x7)
59 #define INT_CAUSE_UNMASK_ALL 0x0007ffff
60 #define INT_CAUSE_UNMASK_ALL_EXT 0x0011ffff
61 #ifdef MV643XX_RX_QUEUE_FILL_ON_TASK
62 #define INT_CAUSE_MASK_ALL 0x00000000
63 #define INT_CAUSE_CHECK_BITS INT_CAUSE_UNMASK_ALL
64 #define INT_CAUSE_CHECK_BITS_EXT INT_CAUSE_UNMASK_ALL_EXT
67 #ifdef MV643XX_CHECKSUM_OFFLOAD_TX
68 #define MAX_DESCS_PER_SKB (MAX_SKB_FRAGS + 1)
70 #define MAX_DESCS_PER_SKB 1
73 #define PHY_WAIT_ITERATIONS 1000 /* 1000 iterations * 10uS = 10mS max */
74 #define PHY_WAIT_MICRO_SECONDS 10
76 /* Static function declarations */
77 static int eth_port_link_is_up(unsigned int eth_port_num
);
78 static void eth_port_uc_addr_get(struct net_device
*dev
,
79 unsigned char *MacAddr
);
80 static int mv643xx_eth_real_open(struct net_device
*);
81 static int mv643xx_eth_real_stop(struct net_device
*);
82 static int mv643xx_eth_change_mtu(struct net_device
*, int);
83 static struct net_device_stats
*mv643xx_eth_get_stats(struct net_device
*);
84 static void eth_port_init_mac_tables(unsigned int eth_port_num
);
86 static int mv643xx_poll(struct net_device
*dev
, int *budget
);
88 static void ethernet_phy_set(unsigned int eth_port_num
, int phy_addr
);
89 static int ethernet_phy_detect(unsigned int eth_port_num
);
90 static struct ethtool_ops mv643xx_ethtool_ops
;
92 static char mv643xx_driver_name
[] = "mv643xx_eth";
93 static char mv643xx_driver_version
[] = "1.0";
95 static void __iomem
*mv643xx_eth_shared_base
;
97 /* used to protect MV643XX_ETH_SMI_REG, which is shared across ports */
98 static spinlock_t mv643xx_eth_phy_lock
= SPIN_LOCK_UNLOCKED
;
100 static inline u32
mv_read(int offset
)
102 void __iomem
*reg_base
;
104 reg_base
= mv643xx_eth_shared_base
- MV643XX_ETH_SHARED_REGS
;
106 return readl(reg_base
+ offset
);
109 static inline void mv_write(int offset
, u32 data
)
111 void __iomem
*reg_base
;
113 reg_base
= mv643xx_eth_shared_base
- MV643XX_ETH_SHARED_REGS
;
114 writel(data
, reg_base
+ offset
);
118 * Changes MTU (maximum transfer unit) of the gigabit ethenret port
120 * Input : pointer to ethernet interface network device structure
122 * Output : 0 upon success, -EINVAL upon failure
124 static int mv643xx_eth_change_mtu(struct net_device
*dev
, int new_mtu
)
126 struct mv643xx_private
*mp
= netdev_priv(dev
);
129 spin_lock_irqsave(&mp
->lock
, flags
);
131 if ((new_mtu
> 9500) || (new_mtu
< 64)) {
132 spin_unlock_irqrestore(&mp
->lock
, flags
);
138 * Stop then re-open the interface. This will allocate RX skb's with
140 * There is a possible danger that the open will not successed, due
141 * to memory is full, which might fail the open function.
143 if (netif_running(dev
)) {
144 if (mv643xx_eth_real_stop(dev
))
146 "%s: Fatal error on stopping device\n",
148 if (mv643xx_eth_real_open(dev
))
150 "%s: Fatal error on opening device\n",
154 spin_unlock_irqrestore(&mp
->lock
, flags
);
159 * mv643xx_eth_rx_task
161 * Fills / refills RX queue on a certain gigabit ethernet port
163 * Input : pointer to ethernet interface network device structure
166 static void mv643xx_eth_rx_task(void *data
)
168 struct net_device
*dev
= (struct net_device
*)data
;
169 struct mv643xx_private
*mp
= netdev_priv(dev
);
170 struct pkt_info pkt_info
;
173 if (test_and_set_bit(0, &mp
->rx_task_busy
))
174 panic("%s: Error in test_set_bit / clear_bit", dev
->name
);
176 while (mp
->rx_ring_skbs
< (mp
->rx_ring_size
- 5)) {
177 skb
= dev_alloc_skb(RX_SKB_SIZE
);
181 pkt_info
.cmd_sts
= ETH_RX_ENABLE_INTERRUPT
;
182 pkt_info
.byte_cnt
= RX_SKB_SIZE
;
183 pkt_info
.buf_ptr
= dma_map_single(NULL
, skb
->data
, RX_SKB_SIZE
,
185 pkt_info
.return_info
= skb
;
186 if (eth_rx_return_buff(mp
, &pkt_info
) != ETH_OK
) {
188 "%s: Error allocating RX Ring\n", dev
->name
);
193 clear_bit(0, &mp
->rx_task_busy
);
195 * If RX ring is empty of SKB, set a timer to try allocating
196 * again in a later time .
198 if ((mp
->rx_ring_skbs
== 0) && (mp
->rx_timer_flag
== 0)) {
199 printk(KERN_INFO
"%s: Rx ring is empty\n", dev
->name
);
201 mp
->timeout
.expires
= jiffies
+ (HZ
/ 10);
202 add_timer(&mp
->timeout
);
203 mp
->rx_timer_flag
= 1;
205 #ifdef MV643XX_RX_QUEUE_FILL_ON_TASK
207 /* Return interrupts */
208 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(mp
->port_num
),
209 INT_CAUSE_UNMASK_ALL
);
215 * mv643xx_eth_rx_task_timer_wrapper
217 * Timer routine to wake up RX queue filling task. This function is
218 * used only in case the RX queue is empty, and all alloc_skb has
219 * failed (due to out of memory event).
221 * Input : pointer to ethernet interface network device structure
224 static void mv643xx_eth_rx_task_timer_wrapper(unsigned long data
)
226 struct net_device
*dev
= (struct net_device
*)data
;
227 struct mv643xx_private
*mp
= netdev_priv(dev
);
229 mp
->rx_timer_flag
= 0;
230 mv643xx_eth_rx_task((void *)data
);
234 * mv643xx_eth_update_mac_address
236 * Update the MAC address of the port in the address table
238 * Input : pointer to ethernet interface network device structure
241 static void mv643xx_eth_update_mac_address(struct net_device
*dev
)
243 struct mv643xx_private
*mp
= netdev_priv(dev
);
244 unsigned int port_num
= mp
->port_num
;
246 eth_port_init_mac_tables(port_num
);
247 memcpy(mp
->port_mac_addr
, dev
->dev_addr
, 6);
248 eth_port_uc_addr_set(port_num
, mp
->port_mac_addr
);
252 * mv643xx_eth_set_rx_mode
254 * Change from promiscuos to regular rx mode
256 * Input : pointer to ethernet interface network device structure
259 static void mv643xx_eth_set_rx_mode(struct net_device
*dev
)
261 struct mv643xx_private
*mp
= netdev_priv(dev
);
264 config_reg
= ethernet_get_config_reg(mp
->port_num
);
265 if (dev
->flags
& IFF_PROMISC
)
266 config_reg
|= (u32
) MV643XX_ETH_UNICAST_PROMISCUOUS_MODE
;
268 config_reg
&= ~(u32
) MV643XX_ETH_UNICAST_PROMISCUOUS_MODE
;
269 ethernet_set_config_reg(mp
->port_num
, config_reg
);
273 * mv643xx_eth_set_mac_address
275 * Change the interface's mac address.
276 * No special hardware thing should be done because interface is always
277 * put in promiscuous mode.
279 * Input : pointer to ethernet interface network device structure and
280 * a pointer to the designated entry to be added to the cache.
281 * Output : zero upon success, negative upon failure
283 static int mv643xx_eth_set_mac_address(struct net_device
*dev
, void *addr
)
287 for (i
= 0; i
< 6; i
++)
288 /* +2 is for the offset of the HW addr type */
289 dev
->dev_addr
[i
] = ((unsigned char *)addr
)[i
+ 2];
290 mv643xx_eth_update_mac_address(dev
);
295 * mv643xx_eth_tx_timeout
297 * Called upon a timeout on transmitting a packet
299 * Input : pointer to ethernet interface network device structure.
302 static void mv643xx_eth_tx_timeout(struct net_device
*dev
)
304 struct mv643xx_private
*mp
= netdev_priv(dev
);
306 printk(KERN_INFO
"%s: TX timeout ", dev
->name
);
308 /* Do the reset outside of interrupt context */
309 schedule_work(&mp
->tx_timeout_task
);
313 * mv643xx_eth_tx_timeout_task
315 * Actual routine to reset the adapter when a timeout on Tx has occurred
317 static void mv643xx_eth_tx_timeout_task(struct net_device
*dev
)
319 struct mv643xx_private
*mp
= netdev_priv(dev
);
321 netif_device_detach(dev
);
322 eth_port_reset(mp
->port_num
);
324 netif_device_attach(dev
);
328 * mv643xx_eth_free_tx_queue
330 * Input : dev - a pointer to the required interface
332 * Output : 0 if was able to release skb , nonzero otherwise
334 static int mv643xx_eth_free_tx_queue(struct net_device
*dev
,
335 unsigned int eth_int_cause_ext
)
337 struct mv643xx_private
*mp
= netdev_priv(dev
);
338 struct net_device_stats
*stats
= &mp
->stats
;
339 struct pkt_info pkt_info
;
342 if (!(eth_int_cause_ext
& (BIT0
| BIT8
)))
345 spin_lock(&mp
->lock
);
347 /* Check only queue 0 */
348 while (eth_tx_return_desc(mp
, &pkt_info
) == ETH_OK
) {
349 if (pkt_info
.cmd_sts
& BIT0
) {
350 printk("%s: Error in TX\n", dev
->name
);
355 * If return_info is different than 0, release the skb.
356 * The case where return_info is not 0 is only in case
357 * when transmitted a scatter/gather packet, where only
358 * last skb releases the whole chain.
360 if (pkt_info
.return_info
) {
361 if (skb_shinfo(pkt_info
.return_info
)->nr_frags
)
362 dma_unmap_page(NULL
, pkt_info
.buf_ptr
,
366 dma_unmap_single(NULL
, pkt_info
.buf_ptr
,
370 dev_kfree_skb_irq(pkt_info
.return_info
);
374 * Decrement the number of outstanding skbs counter on
377 if (mp
->tx_ring_skbs
== 0)
378 panic("ERROR - TX outstanding SKBs"
379 " counter is corrupted");
382 dma_unmap_page(NULL
, pkt_info
.buf_ptr
,
383 pkt_info
.byte_cnt
, DMA_TO_DEVICE
);
386 spin_unlock(&mp
->lock
);
392 * mv643xx_eth_receive
394 * This function is forward packets that are received from the port's
395 * queues toward kernel core or FastRoute them to another interface.
397 * Input : dev - a pointer to the required interface
398 * max - maximum number to receive (0 means unlimted)
400 * Output : number of served packets
403 static int mv643xx_eth_receive_queue(struct net_device
*dev
, int budget
)
405 static int mv643xx_eth_receive_queue(struct net_device
*dev
)
408 struct mv643xx_private
*mp
= netdev_priv(dev
);
409 struct net_device_stats
*stats
= &mp
->stats
;
410 unsigned int received_packets
= 0;
412 struct pkt_info pkt_info
;
415 while (eth_port_receive(mp
, &pkt_info
) == ETH_OK
&& budget
> 0) {
417 while (eth_port_receive(mp
, &pkt_info
) == ETH_OK
) {
424 /* Update statistics. Note byte count includes 4 byte CRC count */
426 stats
->rx_bytes
+= pkt_info
.byte_cnt
;
427 skb
= pkt_info
.return_info
;
429 * In case received a packet without first / last bits on OR
430 * the error summary bit is on, the packets needs to be dropeed.
432 if (((pkt_info
.cmd_sts
433 & (ETH_RX_FIRST_DESC
| ETH_RX_LAST_DESC
)) !=
434 (ETH_RX_FIRST_DESC
| ETH_RX_LAST_DESC
))
435 || (pkt_info
.cmd_sts
& ETH_ERROR_SUMMARY
)) {
437 if ((pkt_info
.cmd_sts
& (ETH_RX_FIRST_DESC
|
438 ETH_RX_LAST_DESC
)) !=
439 (ETH_RX_FIRST_DESC
| ETH_RX_LAST_DESC
)) {
442 "%s: Received packet spread "
443 "on multiple descriptors\n",
446 if (pkt_info
.cmd_sts
& ETH_ERROR_SUMMARY
)
449 dev_kfree_skb_irq(skb
);
452 * The -4 is for the CRC in the trailer of the
455 skb_put(skb
, pkt_info
.byte_cnt
- 4);
458 if (pkt_info
.cmd_sts
& ETH_LAYER_4_CHECKSUM_OK
) {
459 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
461 (pkt_info
.cmd_sts
& 0x0007fff8) >> 3);
463 skb
->protocol
= eth_type_trans(skb
, dev
);
465 netif_receive_skb(skb
);
472 return received_packets
;
476 * mv643xx_eth_int_handler
478 * Main interrupt handler for the gigbit ethernet ports
480 * Input : irq - irq number (not used)
481 * dev_id - a pointer to the required interface's data structure
486 static irqreturn_t
mv643xx_eth_int_handler(int irq
, void *dev_id
,
487 struct pt_regs
*regs
)
489 struct net_device
*dev
= (struct net_device
*)dev_id
;
490 struct mv643xx_private
*mp
= netdev_priv(dev
);
491 u32 eth_int_cause
, eth_int_cause_ext
= 0;
492 unsigned int port_num
= mp
->port_num
;
494 /* Read interrupt cause registers */
495 eth_int_cause
= mv_read(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num
)) &
496 INT_CAUSE_UNMASK_ALL
;
498 if (eth_int_cause
& BIT1
)
499 eth_int_cause_ext
= mv_read(
500 MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num
)) &
501 INT_CAUSE_UNMASK_ALL_EXT
;
504 if (!(eth_int_cause
& 0x0007fffd)) {
505 /* Dont ack the Rx interrupt */
508 * Clear specific ethernet port intrerrupt registers by
509 * acknowleding relevant bits.
511 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num
),
513 if (eth_int_cause_ext
!= 0x0)
514 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG
515 (port_num
), ~eth_int_cause_ext
);
517 /* UDP change : We may need this */
518 if ((eth_int_cause_ext
& 0x0000ffff) &&
519 (mv643xx_eth_free_tx_queue(dev
, eth_int_cause_ext
) == 0) &&
520 (mp
->tx_ring_size
> mp
->tx_ring_skbs
+ MAX_DESCS_PER_SKB
))
521 netif_wake_queue(dev
);
524 if (netif_rx_schedule_prep(dev
)) {
525 /* Mask all the interrupts */
526 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num
), 0);
527 mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG
529 __netif_rx_schedule(dev
);
532 if (eth_int_cause
& (BIT2
| BIT11
))
533 mv643xx_eth_receive_queue(dev
, 0);
536 * After forwarded received packets to upper layer, add a task
537 * in an interrupts enabled context that refills the RX ring
540 #ifdef MV643XX_RX_QUEUE_FILL_ON_TASK
541 /* Unmask all interrupts on ethernet port */
542 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num
),
544 queue_task(&mp
->rx_task
, &tq_immediate
);
545 mark_bh(IMMEDIATE_BH
);
547 mp
->rx_task
.func(dev
);
551 /* PHY status changed */
552 if (eth_int_cause_ext
& (BIT16
| BIT20
)) {
553 if (eth_port_link_is_up(port_num
)) {
554 netif_carrier_on(dev
);
555 netif_wake_queue(dev
);
557 mv_write(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG
560 netif_carrier_off(dev
);
561 netif_stop_queue(dev
);
566 * If no real interrupt occured, exit.
567 * This can happen when using gigE interrupt coalescing mechanism.
569 if ((eth_int_cause
== 0x0) && (eth_int_cause_ext
== 0x0))
578 * eth_port_set_rx_coal - Sets coalescing interrupt mechanism on RX path
581 * This routine sets the RX coalescing interrupt mechanism parameter.
582 * This parameter is a timeout counter, that counts in 64 t_clk
583 * chunks ; that when timeout event occurs a maskable interrupt
585 * The parameter is calculated using the tClk of the MV-643xx chip
586 * , and the required delay of the interrupt in usec.
589 * unsigned int eth_port_num Ethernet port number
590 * unsigned int t_clk t_clk of the MV-643xx chip in HZ units
591 * unsigned int delay Delay in usec
594 * Interrupt coalescing mechanism value is set in MV-643xx chip.
597 * The interrupt coalescing value set in the gigE port.
600 static unsigned int eth_port_set_rx_coal(unsigned int eth_port_num
,
601 unsigned int t_clk
, unsigned int delay
)
603 unsigned int coal
= ((t_clk
/ 1000000) * delay
) / 64;
605 /* Set RX Coalescing mechanism */
606 mv_write(MV643XX_ETH_SDMA_CONFIG_REG(eth_port_num
),
607 ((coal
& 0x3fff) << 8) |
608 (mv_read(MV643XX_ETH_SDMA_CONFIG_REG(eth_port_num
))
616 * eth_port_set_tx_coal - Sets coalescing interrupt mechanism on TX path
619 * This routine sets the TX coalescing interrupt mechanism parameter.
620 * This parameter is a timeout counter, that counts in 64 t_clk
621 * chunks ; that when timeout event occurs a maskable interrupt
623 * The parameter is calculated using the t_cLK frequency of the
624 * MV-643xx chip and the required delay in the interrupt in uSec
627 * unsigned int eth_port_num Ethernet port number
628 * unsigned int t_clk t_clk of the MV-643xx chip in HZ units
629 * unsigned int delay Delay in uSeconds
632 * Interrupt coalescing mechanism value is set in MV-643xx chip.
635 * The interrupt coalescing value set in the gigE port.
638 static unsigned int eth_port_set_tx_coal(unsigned int eth_port_num
,
639 unsigned int t_clk
, unsigned int delay
)
642 coal
= ((t_clk
/ 1000000) * delay
) / 64;
643 /* Set TX Coalescing mechanism */
644 mv_write(MV643XX_ETH_TX_FIFO_URGENT_THRESHOLD_REG(eth_port_num
),
652 * This function is called when openning the network device. The function
653 * should initialize all the hardware, initialize cyclic Rx/Tx
654 * descriptors chain and buffers and allocate an IRQ to the network
657 * Input : a pointer to the network device structure
659 * Output : zero of success , nonzero if fails.
662 static int mv643xx_eth_open(struct net_device
*dev
)
664 struct mv643xx_private
*mp
= netdev_priv(dev
);
665 unsigned int port_num
= mp
->port_num
;
668 spin_lock_irq(&mp
->lock
);
670 err
= request_irq(dev
->irq
, mv643xx_eth_int_handler
,
671 SA_SHIRQ
| SA_SAMPLE_RANDOM
, dev
->name
, dev
);
674 printk(KERN_ERR
"Can not assign IRQ number to MV643XX_eth%d\n",
680 if (mv643xx_eth_real_open(dev
)) {
681 printk("%s: Error opening interface\n", dev
->name
);
686 spin_unlock_irq(&mp
->lock
);
691 free_irq(dev
->irq
, dev
);
694 spin_unlock_irq(&mp
->lock
);
700 * ether_init_rx_desc_ring - Curve a Rx chain desc list and buffer in memory.
703 * This function prepares a Rx chained list of descriptors and packet
704 * buffers in a form of a ring. The routine must be called after port
705 * initialization routine and before port start routine.
706 * The Ethernet SDMA engine uses CPU bus addresses to access the various
707 * devices in the system (i.e. DRAM). This function uses the ethernet
708 * struct 'virtual to physical' routine (set by the user) to set the ring
709 * with physical addresses.
712 * struct mv643xx_private *mp Ethernet Port Control srtuct.
715 * The routine updates the Ethernet port control struct with information
716 * regarding the Rx descriptors and buffers.
721 static void ether_init_rx_desc_ring(struct mv643xx_private
*mp
)
723 volatile struct eth_rx_desc
*p_rx_desc
;
724 int rx_desc_num
= mp
->rx_ring_size
;
727 /* initialize the next_desc_ptr links in the Rx descriptors ring */
728 p_rx_desc
= (struct eth_rx_desc
*)mp
->p_rx_desc_area
;
729 for (i
= 0; i
< rx_desc_num
; i
++) {
730 p_rx_desc
[i
].next_desc_ptr
= mp
->rx_desc_dma
+
731 ((i
+ 1) % rx_desc_num
) * sizeof(struct eth_rx_desc
);
734 /* Save Rx desc pointer to driver struct. */
735 mp
->rx_curr_desc_q
= 0;
736 mp
->rx_used_desc_q
= 0;
738 mp
->rx_desc_area_size
= rx_desc_num
* sizeof(struct eth_rx_desc
);
740 /* Add the queue to the list of RX queues of this port */
741 mp
->port_rx_queue_command
|= 1;
745 * ether_init_tx_desc_ring - Curve a Tx chain desc list and buffer in memory.
748 * This function prepares a Tx chained list of descriptors and packet
749 * buffers in a form of a ring. The routine must be called after port
750 * initialization routine and before port start routine.
751 * The Ethernet SDMA engine uses CPU bus addresses to access the various
752 * devices in the system (i.e. DRAM). This function uses the ethernet
753 * struct 'virtual to physical' routine (set by the user) to set the ring
754 * with physical addresses.
757 * struct mv643xx_private *mp Ethernet Port Control srtuct.
760 * The routine updates the Ethernet port control struct with information
761 * regarding the Tx descriptors and buffers.
766 static void ether_init_tx_desc_ring(struct mv643xx_private
*mp
)
768 int tx_desc_num
= mp
->tx_ring_size
;
769 struct eth_tx_desc
*p_tx_desc
;
772 /* Initialize the next_desc_ptr links in the Tx descriptors ring */
773 p_tx_desc
= (struct eth_tx_desc
*)mp
->p_tx_desc_area
;
774 for (i
= 0; i
< tx_desc_num
; i
++) {
775 p_tx_desc
[i
].next_desc_ptr
= mp
->tx_desc_dma
+
776 ((i
+ 1) % tx_desc_num
) * sizeof(struct eth_tx_desc
);
779 mp
->tx_curr_desc_q
= 0;
780 mp
->tx_used_desc_q
= 0;
781 #ifdef MV643XX_CHECKSUM_OFFLOAD_TX
782 mp
->tx_first_desc_q
= 0;
785 mp
->tx_desc_area_size
= tx_desc_num
* sizeof(struct eth_tx_desc
);
787 /* Add the queue to the list of Tx queues of this port */
788 mp
->port_tx_queue_command
|= 1;
791 /* Helper function for mv643xx_eth_open */
792 static int mv643xx_eth_real_open(struct net_device
*dev
)
794 struct mv643xx_private
*mp
= netdev_priv(dev
);
795 unsigned int port_num
= mp
->port_num
;
799 mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num
), 0x0000ff00);
801 /* Clear the ethernet port interrupts */
802 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num
), 0);
803 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num
), 0);
805 /* Unmask RX buffer and TX end interrupt */
806 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num
),
807 INT_CAUSE_UNMASK_ALL
);
809 /* Unmask phy and link status changes interrupts */
810 mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num
),
811 INT_CAUSE_UNMASK_ALL_EXT
);
813 /* Set the MAC Address */
814 memcpy(mp
->port_mac_addr
, dev
->dev_addr
, 6);
818 INIT_WORK(&mp
->rx_task
, (void (*)(void *))mv643xx_eth_rx_task
, dev
);
820 memset(&mp
->timeout
, 0, sizeof(struct timer_list
));
821 mp
->timeout
.function
= mv643xx_eth_rx_task_timer_wrapper
;
822 mp
->timeout
.data
= (unsigned long)dev
;
824 mp
->rx_task_busy
= 0;
825 mp
->rx_timer_flag
= 0;
827 /* Allocate RX and TX skb rings */
828 mp
->rx_skb
= kmalloc(sizeof(*mp
->rx_skb
) * mp
->rx_ring_size
,
831 printk(KERN_ERR
"%s: Cannot allocate Rx skb ring\n", dev
->name
);
834 mp
->tx_skb
= kmalloc(sizeof(*mp
->tx_skb
) * mp
->tx_ring_size
,
837 printk(KERN_ERR
"%s: Cannot allocate Tx skb ring\n", dev
->name
);
842 /* Allocate TX ring */
843 mp
->tx_ring_skbs
= 0;
844 size
= mp
->tx_ring_size
* sizeof(struct eth_tx_desc
);
845 mp
->tx_desc_area_size
= size
;
847 if (mp
->tx_sram_size
) {
848 mp
->p_tx_desc_area
= ioremap(mp
->tx_sram_addr
,
850 mp
->tx_desc_dma
= mp
->tx_sram_addr
;
852 mp
->p_tx_desc_area
= dma_alloc_coherent(NULL
, size
,
856 if (!mp
->p_tx_desc_area
) {
857 printk(KERN_ERR
"%s: Cannot allocate Tx Ring (size %d bytes)\n",
863 BUG_ON((u32
) mp
->p_tx_desc_area
& 0xf); /* check 16-byte alignment */
864 memset((void *)mp
->p_tx_desc_area
, 0, mp
->tx_desc_area_size
);
866 ether_init_tx_desc_ring(mp
);
868 /* Allocate RX ring */
869 mp
->rx_ring_skbs
= 0;
870 size
= mp
->rx_ring_size
* sizeof(struct eth_rx_desc
);
871 mp
->rx_desc_area_size
= size
;
873 if (mp
->rx_sram_size
) {
874 mp
->p_rx_desc_area
= ioremap(mp
->rx_sram_addr
,
876 mp
->rx_desc_dma
= mp
->rx_sram_addr
;
878 mp
->p_rx_desc_area
= dma_alloc_coherent(NULL
, size
,
882 if (!mp
->p_rx_desc_area
) {
883 printk(KERN_ERR
"%s: Cannot allocate Rx ring (size %d bytes)\n",
885 printk(KERN_ERR
"%s: Freeing previously allocated TX queues...",
887 if (mp
->rx_sram_size
)
888 iounmap(mp
->p_rx_desc_area
);
890 dma_free_coherent(NULL
, mp
->tx_desc_area_size
,
891 mp
->p_tx_desc_area
, mp
->tx_desc_dma
);
896 memset((void *)mp
->p_rx_desc_area
, 0, size
);
898 ether_init_rx_desc_ring(mp
);
900 mv643xx_eth_rx_task(dev
); /* Fill RX ring with skb's */
904 /* Interrupt Coalescing */
908 eth_port_set_rx_coal(port_num
, 133000000, MV643XX_RX_COAL
);
912 eth_port_set_tx_coal(port_num
, 133000000, MV643XX_TX_COAL
);
914 netif_start_queue(dev
);
919 static void mv643xx_eth_free_tx_rings(struct net_device
*dev
)
921 struct mv643xx_private
*mp
= netdev_priv(dev
);
922 unsigned int port_num
= mp
->port_num
;
926 mv_write(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num
), 0x0000ff00);
928 /* Free outstanding skb's on TX rings */
929 for (curr
= 0; mp
->tx_ring_skbs
&& curr
< mp
->tx_ring_size
; curr
++) {
930 if (mp
->tx_skb
[curr
]) {
931 dev_kfree_skb(mp
->tx_skb
[curr
]);
935 if (mp
->tx_ring_skbs
)
936 printk("%s: Error on Tx descriptor free - could not free %d"
937 " descriptors\n", dev
->name
, mp
->tx_ring_skbs
);
940 if (mp
->tx_sram_size
)
941 iounmap(mp
->p_tx_desc_area
);
943 dma_free_coherent(NULL
, mp
->tx_desc_area_size
,
944 mp
->p_tx_desc_area
, mp
->tx_desc_dma
);
947 static void mv643xx_eth_free_rx_rings(struct net_device
*dev
)
949 struct mv643xx_private
*mp
= netdev_priv(dev
);
950 unsigned int port_num
= mp
->port_num
;
954 mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num
), 0x0000ff00);
956 /* Free preallocated skb's on RX rings */
957 for (curr
= 0; mp
->rx_ring_skbs
&& curr
< mp
->rx_ring_size
; curr
++) {
958 if (mp
->rx_skb
[curr
]) {
959 dev_kfree_skb(mp
->rx_skb
[curr
]);
964 if (mp
->rx_ring_skbs
)
966 "%s: Error in freeing Rx Ring. %d skb's still"
967 " stuck in RX Ring - ignoring them\n", dev
->name
,
970 if (mp
->rx_sram_size
)
971 iounmap(mp
->p_rx_desc_area
);
973 dma_free_coherent(NULL
, mp
->rx_desc_area_size
,
974 mp
->p_rx_desc_area
, mp
->rx_desc_dma
);
980 * This function is used when closing the network device.
981 * It updates the hardware,
982 * release all memory that holds buffers and descriptors and release the IRQ.
983 * Input : a pointer to the device structure
984 * Output : zero if success , nonzero if fails
987 /* Helper function for mv643xx_eth_stop */
989 static int mv643xx_eth_real_stop(struct net_device
*dev
)
991 struct mv643xx_private
*mp
= netdev_priv(dev
);
992 unsigned int port_num
= mp
->port_num
;
994 netif_carrier_off(dev
);
995 netif_stop_queue(dev
);
997 mv643xx_eth_free_tx_rings(dev
);
998 mv643xx_eth_free_rx_rings(dev
);
1000 eth_port_reset(mp
->port_num
);
1002 /* Disable ethernet port interrupts */
1003 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num
), 0);
1004 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num
), 0);
1006 /* Mask RX buffer and TX end interrupt */
1007 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num
), 0);
1009 /* Mask phy and link status changes interrupts */
1010 mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num
), 0);
1015 static int mv643xx_eth_stop(struct net_device
*dev
)
1017 struct mv643xx_private
*mp
= netdev_priv(dev
);
1019 spin_lock_irq(&mp
->lock
);
1021 mv643xx_eth_real_stop(dev
);
1023 free_irq(dev
->irq
, dev
);
1024 spin_unlock_irq(&mp
->lock
);
1030 static void mv643xx_tx(struct net_device
*dev
)
1032 struct mv643xx_private
*mp
= netdev_priv(dev
);
1033 struct pkt_info pkt_info
;
1035 while (eth_tx_return_desc(mp
, &pkt_info
) == ETH_OK
) {
1036 if (pkt_info
.return_info
) {
1037 if (skb_shinfo(pkt_info
.return_info
)->nr_frags
)
1038 dma_unmap_page(NULL
, pkt_info
.buf_ptr
,
1042 dma_unmap_single(NULL
, pkt_info
.buf_ptr
,
1046 dev_kfree_skb_irq(pkt_info
.return_info
);
1048 if (mp
->tx_ring_skbs
)
1051 dma_unmap_page(NULL
, pkt_info
.buf_ptr
,
1052 pkt_info
.byte_cnt
, DMA_TO_DEVICE
);
1055 if (netif_queue_stopped(dev
) &&
1056 mp
->tx_ring_size
> mp
->tx_ring_skbs
+ MAX_DESCS_PER_SKB
)
1057 netif_wake_queue(dev
);
1063 * This function is used in case of NAPI
1065 static int mv643xx_poll(struct net_device
*dev
, int *budget
)
1067 struct mv643xx_private
*mp
= netdev_priv(dev
);
1068 int done
= 1, orig_budget
, work_done
;
1069 unsigned int port_num
= mp
->port_num
;
1070 unsigned long flags
;
1072 #ifdef MV643XX_TX_FAST_REFILL
1073 if (++mp
->tx_clean_threshold
> 5) {
1074 spin_lock_irqsave(&mp
->lock
, flags
);
1076 mp
->tx_clean_threshold
= 0;
1077 spin_unlock_irqrestore(&mp
->lock
, flags
);
1081 if ((mv_read(MV643XX_ETH_RX_CURRENT_QUEUE_DESC_PTR_0(port_num
)))
1082 != (u32
) mp
->rx_used_desc_q
) {
1083 orig_budget
= *budget
;
1084 if (orig_budget
> dev
->quota
)
1085 orig_budget
= dev
->quota
;
1086 work_done
= mv643xx_eth_receive_queue(dev
, orig_budget
);
1087 mp
->rx_task
.func(dev
);
1088 *budget
-= work_done
;
1089 dev
->quota
-= work_done
;
1090 if (work_done
>= orig_budget
)
1095 spin_lock_irqsave(&mp
->lock
, flags
);
1096 __netif_rx_complete(dev
);
1097 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num
), 0);
1098 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num
), 0);
1099 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num
),
1100 INT_CAUSE_UNMASK_ALL
);
1101 mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num
),
1102 INT_CAUSE_UNMASK_ALL_EXT
);
1103 spin_unlock_irqrestore(&mp
->lock
, flags
);
1106 return done
? 0 : 1;
1111 * mv643xx_eth_start_xmit
1113 * This function is queues a packet in the Tx descriptor for
1116 * Input : skb - a pointer to socket buffer
1117 * dev - a pointer to the required port
1119 * Output : zero upon success
1121 static int mv643xx_eth_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1123 struct mv643xx_private
*mp
= netdev_priv(dev
);
1124 struct net_device_stats
*stats
= &mp
->stats
;
1125 ETH_FUNC_RET_STATUS status
;
1126 unsigned long flags
;
1127 struct pkt_info pkt_info
;
1129 if (netif_queue_stopped(dev
)) {
1131 "%s: Tried sending packet when interface is stopped\n",
1136 /* This is a hard error, log it. */
1137 if ((mp
->tx_ring_size
- mp
->tx_ring_skbs
) <=
1138 (skb_shinfo(skb
)->nr_frags
+ 1)) {
1139 netif_stop_queue(dev
);
1141 "%s: Bug in mv643xx_eth - Trying to transmit when"
1142 " queue full !\n", dev
->name
);
1146 /* Paranoid check - this shouldn't happen */
1148 stats
->tx_dropped
++;
1149 printk(KERN_ERR
"mv64320_eth paranoid check failed\n");
1153 spin_lock_irqsave(&mp
->lock
, flags
);
1155 /* Update packet info data structure -- DMA owned, first last */
1156 #ifdef MV643XX_CHECKSUM_OFFLOAD_TX
1157 if (!skb_shinfo(skb
)->nr_frags
) {
1159 if (skb
->ip_summed
!= CHECKSUM_HW
) {
1160 /* Errata BTS #50, IHL must be 5 if no HW checksum */
1161 pkt_info
.cmd_sts
= ETH_TX_ENABLE_INTERRUPT
|
1164 5 << ETH_TX_IHL_SHIFT
;
1165 pkt_info
.l4i_chk
= 0;
1168 pkt_info
.cmd_sts
= ETH_TX_ENABLE_INTERRUPT
|
1171 ETH_GEN_TCP_UDP_CHECKSUM
|
1172 ETH_GEN_IP_V_4_CHECKSUM
|
1173 skb
->nh
.iph
->ihl
<< ETH_TX_IHL_SHIFT
;
1174 /* CPU already calculated pseudo header checksum. */
1175 if (skb
->nh
.iph
->protocol
== IPPROTO_UDP
) {
1176 pkt_info
.cmd_sts
|= ETH_UDP_FRAME
;
1177 pkt_info
.l4i_chk
= skb
->h
.uh
->check
;
1178 } else if (skb
->nh
.iph
->protocol
== IPPROTO_TCP
)
1179 pkt_info
.l4i_chk
= skb
->h
.th
->check
;
1182 "%s: chksum proto != TCP or UDP\n",
1184 spin_unlock_irqrestore(&mp
->lock
, flags
);
1188 pkt_info
.byte_cnt
= skb
->len
;
1189 pkt_info
.buf_ptr
= dma_map_single(NULL
, skb
->data
, skb
->len
,
1191 pkt_info
.return_info
= skb
;
1193 status
= eth_port_send(mp
, &pkt_info
);
1194 if ((status
== ETH_ERROR
) || (status
== ETH_QUEUE_FULL
))
1195 printk(KERN_ERR
"%s: Error on transmitting packet\n",
1197 stats
->tx_bytes
+= pkt_info
.byte_cnt
;
1201 /* Since hardware can't handle unaligned fragments smaller
1202 * than 9 bytes, if we find any, we linearize the skb
1203 * and start again. When I've seen it, it's always been
1204 * the first frag (probably near the end of the page),
1205 * but we check all frags to be safe.
1207 for (frag
= 0; frag
< skb_shinfo(skb
)->nr_frags
; frag
++) {
1210 fragp
= &skb_shinfo(skb
)->frags
[frag
];
1211 if (fragp
->size
<= 8 && fragp
->page_offset
& 0x7) {
1212 skb_linearize(skb
, GFP_ATOMIC
);
1213 printk(KERN_DEBUG
"%s: unaligned tiny fragment"
1214 "%d of %d, fixed\n",
1216 skb_shinfo(skb
)->nr_frags
);
1221 /* first frag which is skb header */
1222 pkt_info
.byte_cnt
= skb_headlen(skb
);
1223 pkt_info
.buf_ptr
= dma_map_single(NULL
, skb
->data
,
1226 pkt_info
.l4i_chk
= 0;
1227 pkt_info
.return_info
= 0;
1229 if (skb
->ip_summed
!= CHECKSUM_HW
)
1230 /* Errata BTS #50, IHL must be 5 if no HW checksum */
1231 pkt_info
.cmd_sts
= ETH_TX_FIRST_DESC
|
1232 5 << ETH_TX_IHL_SHIFT
;
1234 pkt_info
.cmd_sts
= ETH_TX_FIRST_DESC
|
1235 ETH_GEN_TCP_UDP_CHECKSUM
|
1236 ETH_GEN_IP_V_4_CHECKSUM
|
1237 skb
->nh
.iph
->ihl
<< ETH_TX_IHL_SHIFT
;
1238 /* CPU already calculated pseudo header checksum. */
1239 if (skb
->nh
.iph
->protocol
== IPPROTO_UDP
) {
1240 pkt_info
.cmd_sts
|= ETH_UDP_FRAME
;
1241 pkt_info
.l4i_chk
= skb
->h
.uh
->check
;
1242 } else if (skb
->nh
.iph
->protocol
== IPPROTO_TCP
)
1243 pkt_info
.l4i_chk
= skb
->h
.th
->check
;
1246 "%s: chksum proto != TCP or UDP\n",
1248 spin_unlock_irqrestore(&mp
->lock
, flags
);
1253 status
= eth_port_send(mp
, &pkt_info
);
1254 if (status
!= ETH_OK
) {
1255 if ((status
== ETH_ERROR
))
1257 "%s: Error on transmitting packet\n",
1259 if (status
== ETH_QUEUE_FULL
)
1260 printk("Error on Queue Full \n");
1261 if (status
== ETH_QUEUE_LAST_RESOURCE
)
1262 printk("Tx resource error \n");
1264 stats
->tx_bytes
+= pkt_info
.byte_cnt
;
1266 /* Check for the remaining frags */
1267 for (frag
= 0; frag
< skb_shinfo(skb
)->nr_frags
; frag
++) {
1268 skb_frag_t
*this_frag
= &skb_shinfo(skb
)->frags
[frag
];
1269 pkt_info
.l4i_chk
= 0x0000;
1270 pkt_info
.cmd_sts
= 0x00000000;
1272 /* Last Frag enables interrupt and frees the skb */
1273 if (frag
== (skb_shinfo(skb
)->nr_frags
- 1)) {
1274 pkt_info
.cmd_sts
|= ETH_TX_ENABLE_INTERRUPT
|
1276 pkt_info
.return_info
= skb
;
1279 pkt_info
.return_info
= 0;
1281 pkt_info
.l4i_chk
= 0;
1282 pkt_info
.byte_cnt
= this_frag
->size
;
1284 pkt_info
.buf_ptr
= dma_map_page(NULL
, this_frag
->page
,
1285 this_frag
->page_offset
,
1289 status
= eth_port_send(mp
, &pkt_info
);
1291 if (status
!= ETH_OK
) {
1292 if ((status
== ETH_ERROR
))
1293 printk(KERN_ERR
"%s: Error on "
1294 "transmitting packet\n",
1297 if (status
== ETH_QUEUE_LAST_RESOURCE
)
1298 printk("Tx resource error \n");
1300 if (status
== ETH_QUEUE_FULL
)
1301 printk("Queue is full \n");
1303 stats
->tx_bytes
+= pkt_info
.byte_cnt
;
1307 pkt_info
.cmd_sts
= ETH_TX_ENABLE_INTERRUPT
| ETH_TX_FIRST_DESC
|
1309 pkt_info
.l4i_chk
= 0;
1310 pkt_info
.byte_cnt
= skb
->len
;
1311 pkt_info
.buf_ptr
= dma_map_single(NULL
, skb
->data
, skb
->len
,
1313 pkt_info
.return_info
= skb
;
1315 status
= eth_port_send(mp
, &pkt_info
);
1316 if ((status
== ETH_ERROR
) || (status
== ETH_QUEUE_FULL
))
1317 printk(KERN_ERR
"%s: Error on transmitting packet\n",
1319 stats
->tx_bytes
+= pkt_info
.byte_cnt
;
1322 /* Check if TX queue can handle another skb. If not, then
1323 * signal higher layers to stop requesting TX
1325 if (mp
->tx_ring_size
<= (mp
->tx_ring_skbs
+ MAX_DESCS_PER_SKB
))
1327 * Stop getting skb's from upper layers.
1328 * Getting skb's from upper layers will be enabled again after
1329 * packets are released.
1331 netif_stop_queue(dev
);
1333 /* Update statistics and start of transmittion time */
1334 stats
->tx_packets
++;
1335 dev
->trans_start
= jiffies
;
1337 spin_unlock_irqrestore(&mp
->lock
, flags
);
1339 return 0; /* success */
1343 * mv643xx_eth_get_stats
1345 * Returns a pointer to the interface statistics.
1347 * Input : dev - a pointer to the required interface
1349 * Output : a pointer to the interface's statistics
1352 static struct net_device_stats
*mv643xx_eth_get_stats(struct net_device
*dev
)
1354 struct mv643xx_private
*mp
= netdev_priv(dev
);
1362 * First function called after registering the network device.
1363 * It's purpose is to initialize the device as an ethernet device,
1364 * fill the ethernet device structure with pointers * to functions,
1365 * and set the MAC address of the interface
1367 * Input : struct device *
1368 * Output : -ENOMEM if failed , 0 if success
1370 static int mv643xx_eth_probe(struct device
*ddev
)
1372 struct platform_device
*pdev
= to_platform_device(ddev
);
1373 struct mv643xx_eth_platform_data
*pd
;
1374 int port_num
= pdev
->id
;
1375 struct mv643xx_private
*mp
;
1376 struct net_device
*dev
;
1378 struct resource
*res
;
1381 dev
= alloc_etherdev(sizeof(struct mv643xx_private
));
1385 dev_set_drvdata(ddev
, dev
);
1387 mp
= netdev_priv(dev
);
1389 res
= platform_get_resource(pdev
, IORESOURCE_IRQ
, 0);
1391 dev
->irq
= res
->start
;
1393 mp
->port_num
= port_num
;
1395 dev
->open
= mv643xx_eth_open
;
1396 dev
->stop
= mv643xx_eth_stop
;
1397 dev
->hard_start_xmit
= mv643xx_eth_start_xmit
;
1398 dev
->get_stats
= mv643xx_eth_get_stats
;
1399 dev
->set_mac_address
= mv643xx_eth_set_mac_address
;
1400 dev
->set_multicast_list
= mv643xx_eth_set_rx_mode
;
1402 /* No need to Tx Timeout */
1403 dev
->tx_timeout
= mv643xx_eth_tx_timeout
;
1405 dev
->poll
= mv643xx_poll
;
1409 dev
->watchdog_timeo
= 2 * HZ
;
1410 dev
->tx_queue_len
= mp
->tx_ring_size
;
1412 dev
->change_mtu
= mv643xx_eth_change_mtu
;
1413 SET_ETHTOOL_OPS(dev
, &mv643xx_ethtool_ops
);
1415 #ifdef MV643XX_CHECKSUM_OFFLOAD_TX
1416 #ifdef MAX_SKB_FRAGS
1418 * Zero copy can only work if we use Discovery II memory. Else, we will
1419 * have to map the buffers to ISA memory which is only 16 MB
1421 dev
->features
= NETIF_F_SG
| NETIF_F_IP_CSUM
| NETIF_F_HW_CSUM
;
1425 /* Configure the timeout task */
1426 INIT_WORK(&mp
->tx_timeout_task
,
1427 (void (*)(void *))mv643xx_eth_tx_timeout_task
, dev
);
1429 spin_lock_init(&mp
->lock
);
1431 /* set default config values */
1432 eth_port_uc_addr_get(dev
, dev
->dev_addr
);
1433 mp
->port_config
= MV643XX_ETH_PORT_CONFIG_DEFAULT_VALUE
;
1434 mp
->port_config_extend
= MV643XX_ETH_PORT_CONFIG_EXTEND_DEFAULT_VALUE
;
1435 mp
->port_sdma_config
= MV643XX_ETH_PORT_SDMA_CONFIG_DEFAULT_VALUE
;
1436 mp
->port_serial_control
= MV643XX_ETH_PORT_SERIAL_CONTROL_DEFAULT_VALUE
;
1437 mp
->rx_ring_size
= MV643XX_ETH_PORT_DEFAULT_RECEIVE_QUEUE_SIZE
;
1438 mp
->tx_ring_size
= MV643XX_ETH_PORT_DEFAULT_TRANSMIT_QUEUE_SIZE
;
1440 pd
= pdev
->dev
.platform_data
;
1442 if (pd
->mac_addr
!= NULL
)
1443 memcpy(dev
->dev_addr
, pd
->mac_addr
, 6);
1445 if (pd
->phy_addr
|| pd
->force_phy_addr
)
1446 ethernet_phy_set(port_num
, pd
->phy_addr
);
1448 if (pd
->port_config
|| pd
->force_port_config
)
1449 mp
->port_config
= pd
->port_config
;
1451 if (pd
->port_config_extend
|| pd
->force_port_config_extend
)
1452 mp
->port_config_extend
= pd
->port_config_extend
;
1454 if (pd
->port_sdma_config
|| pd
->force_port_sdma_config
)
1455 mp
->port_sdma_config
= pd
->port_sdma_config
;
1457 if (pd
->port_serial_control
|| pd
->force_port_serial_control
)
1458 mp
->port_serial_control
= pd
->port_serial_control
;
1460 if (pd
->rx_queue_size
)
1461 mp
->rx_ring_size
= pd
->rx_queue_size
;
1463 if (pd
->tx_queue_size
)
1464 mp
->tx_ring_size
= pd
->tx_queue_size
;
1466 if (pd
->tx_sram_size
) {
1467 mp
->tx_sram_size
= pd
->tx_sram_size
;
1468 mp
->tx_sram_addr
= pd
->tx_sram_addr
;
1471 if (pd
->rx_sram_size
) {
1472 mp
->rx_sram_size
= pd
->rx_sram_size
;
1473 mp
->rx_sram_addr
= pd
->rx_sram_addr
;
1477 err
= ethernet_phy_detect(port_num
);
1479 pr_debug("MV643xx ethernet port %d: "
1480 "No PHY detected at addr %d\n",
1481 port_num
, ethernet_phy_get(port_num
));
1485 err
= register_netdev(dev
);
1491 "%s: port %d with MAC address %02x:%02x:%02x:%02x:%02x:%02x\n",
1492 dev
->name
, port_num
, p
[0], p
[1], p
[2], p
[3], p
[4], p
[5]);
1494 if (dev
->features
& NETIF_F_SG
)
1495 printk(KERN_NOTICE
"%s: Scatter Gather Enabled\n", dev
->name
);
1497 if (dev
->features
& NETIF_F_IP_CSUM
)
1498 printk(KERN_NOTICE
"%s: TX TCP/IP Checksumming Supported\n",
1501 #ifdef MV643XX_CHECKSUM_OFFLOAD_TX
1502 printk(KERN_NOTICE
"%s: RX TCP/UDP Checksum Offload ON \n", dev
->name
);
1506 printk(KERN_NOTICE
"%s: TX and RX Interrupt Coalescing ON \n",
1511 printk(KERN_NOTICE
"%s: RX NAPI Enabled \n", dev
->name
);
1522 static int mv643xx_eth_remove(struct device
*ddev
)
1524 struct net_device
*dev
= dev_get_drvdata(ddev
);
1526 unregister_netdev(dev
);
1527 flush_scheduled_work();
1530 dev_set_drvdata(ddev
, NULL
);
1534 static int mv643xx_eth_shared_probe(struct device
*ddev
)
1536 struct platform_device
*pdev
= to_platform_device(ddev
);
1537 struct resource
*res
;
1539 printk(KERN_NOTICE
"MV-643xx 10/100/1000 Ethernet Driver\n");
1541 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1545 mv643xx_eth_shared_base
= ioremap(res
->start
,
1546 MV643XX_ETH_SHARED_REGS_SIZE
);
1547 if (mv643xx_eth_shared_base
== NULL
)
1554 static int mv643xx_eth_shared_remove(struct device
*ddev
)
1556 iounmap(mv643xx_eth_shared_base
);
1557 mv643xx_eth_shared_base
= NULL
;
1562 static struct device_driver mv643xx_eth_driver
= {
1563 .name
= MV643XX_ETH_NAME
,
1564 .bus
= &platform_bus_type
,
1565 .probe
= mv643xx_eth_probe
,
1566 .remove
= mv643xx_eth_remove
,
1569 static struct device_driver mv643xx_eth_shared_driver
= {
1570 .name
= MV643XX_ETH_SHARED_NAME
,
1571 .bus
= &platform_bus_type
,
1572 .probe
= mv643xx_eth_shared_probe
,
1573 .remove
= mv643xx_eth_shared_remove
,
1577 * mv643xx_init_module
1579 * Registers the network drivers into the Linux kernel
1585 static int __init
mv643xx_init_module(void)
1589 rc
= driver_register(&mv643xx_eth_shared_driver
);
1591 rc
= driver_register(&mv643xx_eth_driver
);
1593 driver_unregister(&mv643xx_eth_shared_driver
);
1599 * mv643xx_cleanup_module
1601 * Registers the network drivers into the Linux kernel
1607 static void __exit
mv643xx_cleanup_module(void)
1609 driver_unregister(&mv643xx_eth_driver
);
1610 driver_unregister(&mv643xx_eth_shared_driver
);
1613 module_init(mv643xx_init_module
);
1614 module_exit(mv643xx_cleanup_module
);
1616 MODULE_LICENSE("GPL");
1617 MODULE_AUTHOR( "Rabeeh Khoury, Assaf Hoffman, Matthew Dharm, Manish Lachwani"
1618 " and Dale Farnsworth");
1619 MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX");
1622 * The second part is the low level driver of the gigE ethernet ports.
1626 * Marvell's Gigabit Ethernet controller low level driver
1629 * This file introduce low level API to Marvell's Gigabit Ethernet
1630 * controller. This Gigabit Ethernet Controller driver API controls
1631 * 1) Operations (i.e. port init, start, reset etc').
1632 * 2) Data flow (i.e. port send, receive etc').
1633 * Each Gigabit Ethernet port is controlled via
1634 * struct mv643xx_private.
1635 * This struct includes user configuration information as well as
1636 * driver internal data needed for its operations.
1638 * Supported Features:
1639 * - This low level driver is OS independent. Allocating memory for
1640 * the descriptor rings and buffers are not within the scope of
1642 * - The user is free from Rx/Tx queue managing.
1643 * - This low level driver introduce functionality API that enable
1644 * the to operate Marvell's Gigabit Ethernet Controller in a
1646 * - Simple Gigabit Ethernet port operation API.
1647 * - Simple Gigabit Ethernet port data flow API.
1648 * - Data flow and operation API support per queue functionality.
1649 * - Support cached descriptors for better performance.
1650 * - Enable access to all four DRAM banks and internal SRAM memory
1652 * - PHY access and control API.
1653 * - Port control register configuration API.
1654 * - Full control over Unicast and Multicast MAC configurations.
1658 * Initialization phase
1659 * This phase complete the initialization of the the
1660 * mv643xx_private struct.
1661 * User information regarding port configuration has to be set
1662 * prior to calling the port initialization routine.
1664 * In this phase any port Tx/Rx activity is halted, MIB counters
1665 * are cleared, PHY address is set according to user parameter and
1666 * access to DRAM and internal SRAM memory spaces.
1668 * Driver ring initialization
1669 * Allocating memory for the descriptor rings and buffers is not
1670 * within the scope of this driver. Thus, the user is required to
1671 * allocate memory for the descriptors ring and buffers. Those
1672 * memory parameters are used by the Rx and Tx ring initialization
1673 * routines in order to curve the descriptor linked list in a form
1675 * Note: Pay special attention to alignment issues when using
1676 * cached descriptors/buffers. In this phase the driver store
1677 * information in the mv643xx_private struct regarding each queue
1681 * This phase prepares the Ethernet port for Rx and Tx activity.
1682 * It uses the information stored in the mv643xx_private struct to
1683 * initialize the various port registers.
1686 * All packet references to/from the driver are done using
1688 * This struct is a unified struct used with Rx and Tx operations.
1689 * This way the user is not required to be familiar with neither
1690 * Tx nor Rx descriptors structures.
1691 * The driver's descriptors rings are management by indexes.
1692 * Those indexes controls the ring resources and used to indicate
1693 * a SW resource error:
1695 * This index points to the current available resource for use. For
1696 * example in Rx process this index will point to the descriptor
1697 * that will be passed to the user upon calling the receive
1698 * routine. In Tx process, this index will point to the descriptor
1699 * that will be assigned with the user packet info and transmitted.
1701 * This index points to the descriptor that need to restore its
1702 * resources. For example in Rx process, using the Rx buffer return
1703 * API will attach the buffer returned in packet info to the
1704 * descriptor pointed by 'used'. In Tx process, using the Tx
1705 * descriptor return will merely return the user packet info with
1706 * the command status of the transmitted buffer pointed by the
1707 * 'used' index. Nevertheless, it is essential to use this routine
1708 * to update the 'used' index.
1710 * This index supports Tx Scatter-Gather. It points to the first
1711 * descriptor of a packet assembled of multiple buffers. For
1712 * example when in middle of Such packet we have a Tx resource
1713 * error the 'curr' index get the value of 'first' to indicate
1714 * that the ring returned to its state before trying to transmit
1717 * Receive operation:
1718 * The eth_port_receive API set the packet information struct,
1719 * passed by the caller, with received information from the
1720 * 'current' SDMA descriptor.
1721 * It is the user responsibility to return this resource back
1722 * to the Rx descriptor ring to enable the reuse of this source.
1723 * Return Rx resource is done using the eth_rx_return_buff API.
1725 * Transmit operation:
1726 * The eth_port_send API supports Scatter-Gather which enables to
1727 * send a packet spanned over multiple buffers. This means that
1728 * for each packet info structure given by the user and put into
1729 * the Tx descriptors ring, will be transmitted only if the 'LAST'
1730 * bit will be set in the packet info command status field. This
1731 * API also consider restriction regarding buffer alignments and
1733 * The user must return a Tx resource after ensuring the buffer
1734 * has been transmitted to enable the Tx ring indexes to update.
1737 * This device is on-board. No jumper diagram is necessary.
1739 * EXTERNAL INTERFACE
1741 * Prior to calling the initialization routine eth_port_init() the user
1742 * must set the following fields under mv643xx_private struct:
1743 * port_num User Ethernet port number.
1744 * port_mac_addr[6] User defined port MAC address.
1745 * port_config User port configuration value.
1746 * port_config_extend User port config extend value.
1747 * port_sdma_config User port SDMA config value.
1748 * port_serial_control User port serial control value.
1750 * This driver data flow is done using the struct pkt_info which
1751 * is a unified struct for Rx and Tx operations:
1753 * byte_cnt Tx/Rx descriptor buffer byte count.
1754 * l4i_chk CPU provided TCP Checksum. For Tx operation
1756 * cmd_sts Tx/Rx descriptor command status.
1757 * buf_ptr Tx/Rx descriptor buffer pointer.
1758 * return_info Tx/Rx user resource return information.
1762 /* SDMA command macros */
1763 #define ETH_ENABLE_TX_QUEUE(eth_port) \
1764 mv_write(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(eth_port), 1)
1769 static int ethernet_phy_get(unsigned int eth_port_num
);
1770 static void ethernet_phy_set(unsigned int eth_port_num
, int phy_addr
);
1772 /* Ethernet Port routines */
1773 static int eth_port_uc_addr(unsigned int eth_port_num
, unsigned char uc_nibble
,
1777 * eth_port_init - Initialize the Ethernet port driver
1780 * This function prepares the ethernet port to start its activity:
1781 * 1) Completes the ethernet port driver struct initialization toward port
1783 * 2) Resets the device to a quiescent state in case of warm reboot.
1784 * 3) Enable SDMA access to all four DRAM banks as well as internal SRAM.
1785 * 4) Clean MAC tables. The reset status of those tables is unknown.
1786 * 5) Set PHY address.
1787 * Note: Call this routine prior to eth_port_start routine and after
1788 * setting user values in the user fields of Ethernet port control
1792 * struct mv643xx_private *mp Ethernet port control struct
1800 static void eth_port_init(struct mv643xx_private
*mp
)
1802 mp
->port_rx_queue_command
= 0;
1803 mp
->port_tx_queue_command
= 0;
1805 mp
->rx_resource_err
= 0;
1806 mp
->tx_resource_err
= 0;
1808 eth_port_reset(mp
->port_num
);
1810 eth_port_init_mac_tables(mp
->port_num
);
1812 ethernet_phy_reset(mp
->port_num
);
1816 * eth_port_start - Start the Ethernet port activity.
1819 * This routine prepares the Ethernet port for Rx and Tx activity:
1820 * 1. Initialize Tx and Rx Current Descriptor Pointer for each queue that
1821 * has been initialized a descriptor's ring (using
1822 * ether_init_tx_desc_ring for Tx and ether_init_rx_desc_ring for Rx)
1823 * 2. Initialize and enable the Ethernet configuration port by writing to
1824 * the port's configuration and command registers.
1825 * 3. Initialize and enable the SDMA by writing to the SDMA's
1826 * configuration and command registers. After completing these steps,
1827 * the ethernet port SDMA can starts to perform Rx and Tx activities.
1829 * Note: Each Rx and Tx queue descriptor's list must be initialized prior
1830 * to calling this function (use ether_init_tx_desc_ring for Tx queues
1831 * and ether_init_rx_desc_ring for Rx queues).
1834 * struct mv643xx_private *mp Ethernet port control struct
1837 * Ethernet port is ready to receive and transmit.
1842 static void eth_port_start(struct mv643xx_private
*mp
)
1844 unsigned int port_num
= mp
->port_num
;
1845 int tx_curr_desc
, rx_curr_desc
;
1847 /* Assignment of Tx CTRP of given queue */
1848 tx_curr_desc
= mp
->tx_curr_desc_q
;
1849 mv_write(MV643XX_ETH_TX_CURRENT_QUEUE_DESC_PTR_0(port_num
),
1850 (u32
)((struct eth_tx_desc
*)mp
->tx_desc_dma
+ tx_curr_desc
));
1852 /* Assignment of Rx CRDP of given queue */
1853 rx_curr_desc
= mp
->rx_curr_desc_q
;
1854 mv_write(MV643XX_ETH_RX_CURRENT_QUEUE_DESC_PTR_0(port_num
),
1855 (u32
)((struct eth_rx_desc
*)mp
->rx_desc_dma
+ rx_curr_desc
));
1857 /* Add the assigned Ethernet address to the port's address table */
1858 eth_port_uc_addr_set(port_num
, mp
->port_mac_addr
);
1860 /* Assign port configuration and command. */
1861 mv_write(MV643XX_ETH_PORT_CONFIG_REG(port_num
), mp
->port_config
);
1863 mv_write(MV643XX_ETH_PORT_CONFIG_EXTEND_REG(port_num
),
1864 mp
->port_config_extend
);
1867 /* Increase the Rx side buffer size if supporting GigE */
1868 if (mp
->port_serial_control
& MV643XX_ETH_SET_GMII_SPEED_TO_1000
)
1869 mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num
),
1870 (mp
->port_serial_control
& 0xfff1ffff) | (0x5 << 17));
1872 mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num
),
1873 mp
->port_serial_control
);
1875 mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num
),
1876 mv_read(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num
)) |
1877 MV643XX_ETH_SERIAL_PORT_ENABLE
);
1879 /* Assign port SDMA configuration */
1880 mv_write(MV643XX_ETH_SDMA_CONFIG_REG(port_num
),
1881 mp
->port_sdma_config
);
1883 /* Enable port Rx. */
1884 mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num
),
1885 mp
->port_rx_queue_command
);
1889 * eth_port_uc_addr_set - This function Set the port Unicast address.
1892 * This function Set the port Ethernet MAC address.
1895 * unsigned int eth_port_num Port number.
1896 * char * p_addr Address to be set
1899 * Set MAC address low and high registers. also calls eth_port_uc_addr()
1900 * To set the unicast table with the proper information.
1906 static void eth_port_uc_addr_set(unsigned int eth_port_num
,
1907 unsigned char *p_addr
)
1912 mac_l
= (p_addr
[4] << 8) | (p_addr
[5]);
1913 mac_h
= (p_addr
[0] << 24) | (p_addr
[1] << 16) | (p_addr
[2] << 8) |
1916 mv_write(MV643XX_ETH_MAC_ADDR_LOW(eth_port_num
), mac_l
);
1917 mv_write(MV643XX_ETH_MAC_ADDR_HIGH(eth_port_num
), mac_h
);
1919 /* Accept frames of this address */
1920 eth_port_uc_addr(eth_port_num
, p_addr
[5], ACCEPT_MAC_ADDR
);
1926 * eth_port_uc_addr_get - This function retrieves the port Unicast address
1927 * (MAC address) from the ethernet hw registers.
1930 * This function retrieves the port Ethernet MAC address.
1933 * unsigned int eth_port_num Port number.
1934 * char *MacAddr pointer where the MAC address is stored
1937 * Copy the MAC address to the location pointed to by MacAddr
1943 static void eth_port_uc_addr_get(struct net_device
*dev
, unsigned char *p_addr
)
1945 struct mv643xx_private
*mp
= netdev_priv(dev
);
1949 mac_h
= mv_read(MV643XX_ETH_MAC_ADDR_HIGH(mp
->port_num
));
1950 mac_l
= mv_read(MV643XX_ETH_MAC_ADDR_LOW(mp
->port_num
));
1952 p_addr
[0] = (mac_h
>> 24) & 0xff;
1953 p_addr
[1] = (mac_h
>> 16) & 0xff;
1954 p_addr
[2] = (mac_h
>> 8) & 0xff;
1955 p_addr
[3] = mac_h
& 0xff;
1956 p_addr
[4] = (mac_l
>> 8) & 0xff;
1957 p_addr
[5] = mac_l
& 0xff;
1961 * eth_port_uc_addr - This function Set the port unicast address table
1964 * This function locates the proper entry in the Unicast table for the
1965 * specified MAC nibble and sets its properties according to function
1969 * unsigned int eth_port_num Port number.
1970 * unsigned char uc_nibble Unicast MAC Address last nibble.
1971 * int option 0 = Add, 1 = remove address.
1974 * This function add/removes MAC addresses from the port unicast address
1978 * true is output succeeded.
1979 * false if option parameter is invalid.
1982 static int eth_port_uc_addr(unsigned int eth_port_num
, unsigned char uc_nibble
,
1985 unsigned int unicast_reg
;
1986 unsigned int tbl_offset
;
1987 unsigned int reg_offset
;
1989 /* Locate the Unicast table entry */
1990 uc_nibble
= (0xf & uc_nibble
);
1991 tbl_offset
= (uc_nibble
/ 4) * 4; /* Register offset from unicast table base */
1992 reg_offset
= uc_nibble
% 4; /* Entry offset within the above register */
1995 case REJECT_MAC_ADDR
:
1996 /* Clear accepts frame bit at given unicast DA table entry */
1997 unicast_reg
= mv_read((MV643XX_ETH_DA_FILTER_UNICAST_TABLE_BASE
1998 (eth_port_num
) + tbl_offset
));
2000 unicast_reg
&= (0x0E << (8 * reg_offset
));
2002 mv_write((MV643XX_ETH_DA_FILTER_UNICAST_TABLE_BASE
2003 (eth_port_num
) + tbl_offset
), unicast_reg
);
2006 case ACCEPT_MAC_ADDR
:
2007 /* Set accepts frame bit at unicast DA filter table entry */
2009 mv_read((MV643XX_ETH_DA_FILTER_UNICAST_TABLE_BASE
2010 (eth_port_num
) + tbl_offset
));
2012 unicast_reg
|= (0x01 << (8 * reg_offset
));
2014 mv_write((MV643XX_ETH_DA_FILTER_UNICAST_TABLE_BASE
2015 (eth_port_num
) + tbl_offset
), unicast_reg
);
2027 * eth_port_init_mac_tables - Clear all entrance in the UC, SMC and OMC tables
2030 * Go through all the DA filter tables (Unicast, Special Multicast &
2031 * Other Multicast) and set each entry to 0.
2034 * unsigned int eth_port_num Ethernet Port number.
2037 * Multicast and Unicast packets are rejected.
2042 static void eth_port_init_mac_tables(unsigned int eth_port_num
)
2046 /* Clear DA filter unicast table (Ex_dFUT) */
2047 for (table_index
= 0; table_index
<= 0xC; table_index
+= 4)
2048 mv_write((MV643XX_ETH_DA_FILTER_UNICAST_TABLE_BASE
2049 (eth_port_num
) + table_index
), 0);
2051 for (table_index
= 0; table_index
<= 0xFC; table_index
+= 4) {
2052 /* Clear DA filter special multicast table (Ex_dFSMT) */
2053 mv_write((MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE
2054 (eth_port_num
) + table_index
), 0);
2055 /* Clear DA filter other multicast table (Ex_dFOMT) */
2056 mv_write((MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE
2057 (eth_port_num
) + table_index
), 0);
2062 * eth_clear_mib_counters - Clear all MIB counters
2065 * This function clears all MIB counters of a specific ethernet port.
2066 * A read from the MIB counter will reset the counter.
2069 * unsigned int eth_port_num Ethernet Port number.
2072 * After reading all MIB counters, the counters resets.
2075 * MIB counter value.
2078 static void eth_clear_mib_counters(unsigned int eth_port_num
)
2082 /* Perform dummy reads from MIB counters */
2083 for (i
= ETH_MIB_GOOD_OCTETS_RECEIVED_LOW
; i
< ETH_MIB_LATE_COLLISION
;
2085 mv_read(MV643XX_ETH_MIB_COUNTERS_BASE(eth_port_num
) + i
);
2088 static inline u32
read_mib(struct mv643xx_private
*mp
, int offset
)
2090 return mv_read(MV643XX_ETH_MIB_COUNTERS_BASE(mp
->port_num
) + offset
);
2093 static void eth_update_mib_counters(struct mv643xx_private
*mp
)
2095 struct mv643xx_mib_counters
*p
= &mp
->mib_counters
;
2098 p
->good_octets_received
+=
2099 read_mib(mp
, ETH_MIB_GOOD_OCTETS_RECEIVED_LOW
);
2100 p
->good_octets_received
+=
2101 (u64
)read_mib(mp
, ETH_MIB_GOOD_OCTETS_RECEIVED_HIGH
) << 32;
2103 for (offset
= ETH_MIB_BAD_OCTETS_RECEIVED
;
2104 offset
<= ETH_MIB_FRAMES_1024_TO_MAX_OCTETS
;
2106 *(u32
*)((char *)p
+ offset
) = read_mib(mp
, offset
);
2108 p
->good_octets_sent
+= read_mib(mp
, ETH_MIB_GOOD_OCTETS_SENT_LOW
);
2109 p
->good_octets_sent
+=
2110 (u64
)read_mib(mp
, ETH_MIB_GOOD_OCTETS_SENT_HIGH
) << 32;
2112 for (offset
= ETH_MIB_GOOD_FRAMES_SENT
;
2113 offset
<= ETH_MIB_LATE_COLLISION
;
2115 *(u32
*)((char *)p
+ offset
) = read_mib(mp
, offset
);
2119 * ethernet_phy_detect - Detect whether a phy is present
2122 * This function tests whether there is a PHY present on
2123 * the specified port.
2126 * unsigned int eth_port_num Ethernet Port number.
2133 * -ENODEV on failure
2136 static int ethernet_phy_detect(unsigned int port_num
)
2138 unsigned int phy_reg_data0
;
2141 eth_port_read_smi_reg(port_num
, 0, &phy_reg_data0
);
2142 auto_neg
= phy_reg_data0
& 0x1000;
2143 phy_reg_data0
^= 0x1000; /* invert auto_neg */
2144 eth_port_write_smi_reg(port_num
, 0, phy_reg_data0
);
2146 eth_port_read_smi_reg(port_num
, 0, &phy_reg_data0
);
2147 if ((phy_reg_data0
& 0x1000) == auto_neg
)
2148 return -ENODEV
; /* change didn't take */
2150 phy_reg_data0
^= 0x1000;
2151 eth_port_write_smi_reg(port_num
, 0, phy_reg_data0
);
2156 * ethernet_phy_get - Get the ethernet port PHY address.
2159 * This routine returns the given ethernet port PHY address.
2162 * unsigned int eth_port_num Ethernet Port number.
2171 static int ethernet_phy_get(unsigned int eth_port_num
)
2173 unsigned int reg_data
;
2175 reg_data
= mv_read(MV643XX_ETH_PHY_ADDR_REG
);
2177 return ((reg_data
>> (5 * eth_port_num
)) & 0x1f);
2181 * ethernet_phy_set - Set the ethernet port PHY address.
2184 * This routine sets the given ethernet port PHY address.
2187 * unsigned int eth_port_num Ethernet Port number.
2188 * int phy_addr PHY address.
2197 static void ethernet_phy_set(unsigned int eth_port_num
, int phy_addr
)
2200 int addr_shift
= 5 * eth_port_num
;
2202 reg_data
= mv_read(MV643XX_ETH_PHY_ADDR_REG
);
2203 reg_data
&= ~(0x1f << addr_shift
);
2204 reg_data
|= (phy_addr
& 0x1f) << addr_shift
;
2205 mv_write(MV643XX_ETH_PHY_ADDR_REG
, reg_data
);
2209 * ethernet_phy_reset - Reset Ethernet port PHY.
2212 * This routine utilizes the SMI interface to reset the ethernet port PHY.
2215 * unsigned int eth_port_num Ethernet Port number.
2224 static void ethernet_phy_reset(unsigned int eth_port_num
)
2226 unsigned int phy_reg_data
;
2229 eth_port_read_smi_reg(eth_port_num
, 0, &phy_reg_data
);
2230 phy_reg_data
|= 0x8000; /* Set bit 15 to reset the PHY */
2231 eth_port_write_smi_reg(eth_port_num
, 0, phy_reg_data
);
2235 * eth_port_reset - Reset Ethernet port
2238 * This routine resets the chip by aborting any SDMA engine activity and
2239 * clearing the MIB counters. The Receiver and the Transmit unit are in
2240 * idle state after this command is performed and the port is disabled.
2243 * unsigned int eth_port_num Ethernet Port number.
2246 * Channel activity is halted.
2252 static void eth_port_reset(unsigned int port_num
)
2254 unsigned int reg_data
;
2256 /* Stop Tx port activity. Check port Tx activity. */
2257 reg_data
= mv_read(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num
));
2259 if (reg_data
& 0xFF) {
2260 /* Issue stop command for active channels only */
2261 mv_write(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num
),
2264 /* Wait for all Tx activity to terminate. */
2265 /* Check port cause register that all Tx queues are stopped */
2266 while (mv_read(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num
))
2271 /* Stop Rx port activity. Check port Rx activity. */
2272 reg_data
= mv_read(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num
));
2274 if (reg_data
& 0xFF) {
2275 /* Issue stop command for active channels only */
2276 mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num
),
2279 /* Wait for all Rx activity to terminate. */
2280 /* Check port cause register that all Rx queues are stopped */
2281 while (mv_read(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num
))
2286 /* Clear all MIB counters */
2287 eth_clear_mib_counters(port_num
);
2289 /* Reset the Enable bit in the Configuration Register */
2290 reg_data
= mv_read(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num
));
2291 reg_data
&= ~MV643XX_ETH_SERIAL_PORT_ENABLE
;
2292 mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num
), reg_data
);
2296 * ethernet_set_config_reg - Set specified bits in configuration register.
2299 * This function sets specified bits in the given ethernet
2300 * configuration register.
2303 * unsigned int eth_port_num Ethernet Port number.
2304 * unsigned int value 32 bit value.
2307 * The set bits in the value parameter are set in the configuration
2314 static void ethernet_set_config_reg(unsigned int eth_port_num
,
2317 unsigned int eth_config_reg
;
2319 eth_config_reg
= mv_read(MV643XX_ETH_PORT_CONFIG_REG(eth_port_num
));
2320 eth_config_reg
|= value
;
2321 mv_write(MV643XX_ETH_PORT_CONFIG_REG(eth_port_num
), eth_config_reg
);
2324 static int eth_port_autoneg_supported(unsigned int eth_port_num
)
2326 unsigned int phy_reg_data0
;
2328 eth_port_read_smi_reg(eth_port_num
, 0, &phy_reg_data0
);
2330 return phy_reg_data0
& 0x1000;
2333 static int eth_port_link_is_up(unsigned int eth_port_num
)
2335 unsigned int phy_reg_data1
;
2337 eth_port_read_smi_reg(eth_port_num
, 1, &phy_reg_data1
);
2339 if (eth_port_autoneg_supported(eth_port_num
)) {
2340 if (phy_reg_data1
& 0x20) /* auto-neg complete */
2342 } else if (phy_reg_data1
& 0x4) /* link up */
2349 * ethernet_get_config_reg - Get the port configuration register
2352 * This function returns the configuration register value of the given
2356 * unsigned int eth_port_num Ethernet Port number.
2362 * Port configuration register value.
2364 static unsigned int ethernet_get_config_reg(unsigned int eth_port_num
)
2366 unsigned int eth_config_reg
;
2368 eth_config_reg
= mv_read(MV643XX_ETH_PORT_CONFIG_EXTEND_REG
2370 return eth_config_reg
;
2374 * eth_port_read_smi_reg - Read PHY registers
2377 * This routine utilize the SMI interface to interact with the PHY in
2378 * order to perform PHY register read.
2381 * unsigned int port_num Ethernet Port number.
2382 * unsigned int phy_reg PHY register address offset.
2383 * unsigned int *value Register value buffer.
2386 * Write the value of a specified PHY register into given buffer.
2389 * false if the PHY is busy or read data is not in valid state.
2393 static void eth_port_read_smi_reg(unsigned int port_num
,
2394 unsigned int phy_reg
, unsigned int *value
)
2396 int phy_addr
= ethernet_phy_get(port_num
);
2397 unsigned long flags
;
2400 /* the SMI register is a shared resource */
2401 spin_lock_irqsave(&mv643xx_eth_phy_lock
, flags
);
2403 /* wait for the SMI register to become available */
2404 for (i
= 0; mv_read(MV643XX_ETH_SMI_REG
) & ETH_SMI_BUSY
; i
++) {
2405 if (i
== PHY_WAIT_ITERATIONS
) {
2406 printk("mv643xx PHY busy timeout, port %d\n", port_num
);
2409 udelay(PHY_WAIT_MICRO_SECONDS
);
2412 mv_write(MV643XX_ETH_SMI_REG
,
2413 (phy_addr
<< 16) | (phy_reg
<< 21) | ETH_SMI_OPCODE_READ
);
2415 /* now wait for the data to be valid */
2416 for (i
= 0; !(mv_read(MV643XX_ETH_SMI_REG
) & ETH_SMI_READ_VALID
); i
++) {
2417 if (i
== PHY_WAIT_ITERATIONS
) {
2418 printk("mv643xx PHY read timeout, port %d\n", port_num
);
2421 udelay(PHY_WAIT_MICRO_SECONDS
);
2424 *value
= mv_read(MV643XX_ETH_SMI_REG
) & 0xffff;
2426 spin_unlock_irqrestore(&mv643xx_eth_phy_lock
, flags
);
2430 * eth_port_write_smi_reg - Write to PHY registers
2433 * This routine utilize the SMI interface to interact with the PHY in
2434 * order to perform writes to PHY registers.
2437 * unsigned int eth_port_num Ethernet Port number.
2438 * unsigned int phy_reg PHY register address offset.
2439 * unsigned int value Register value.
2442 * Write the given value to the specified PHY register.
2445 * false if the PHY is busy.
2449 static void eth_port_write_smi_reg(unsigned int eth_port_num
,
2450 unsigned int phy_reg
, unsigned int value
)
2454 unsigned long flags
;
2456 phy_addr
= ethernet_phy_get(eth_port_num
);
2458 /* the SMI register is a shared resource */
2459 spin_lock_irqsave(&mv643xx_eth_phy_lock
, flags
);
2461 /* wait for the SMI register to become available */
2462 for (i
= 0; mv_read(MV643XX_ETH_SMI_REG
) & ETH_SMI_BUSY
; i
++) {
2463 if (i
== PHY_WAIT_ITERATIONS
) {
2464 printk("mv643xx PHY busy timeout, port %d\n",
2468 udelay(PHY_WAIT_MICRO_SECONDS
);
2471 mv_write(MV643XX_ETH_SMI_REG
, (phy_addr
<< 16) | (phy_reg
<< 21) |
2472 ETH_SMI_OPCODE_WRITE
| (value
& 0xffff));
2474 spin_unlock_irqrestore(&mv643xx_eth_phy_lock
, flags
);
2478 * eth_port_send - Send an Ethernet packet
2481 * This routine send a given packet described by p_pktinfo parameter. It
2482 * supports transmitting of a packet spaned over multiple buffers. The
2483 * routine updates 'curr' and 'first' indexes according to the packet
2484 * segment passed to the routine. In case the packet segment is first,
2485 * the 'first' index is update. In any case, the 'curr' index is updated.
2486 * If the routine get into Tx resource error it assigns 'curr' index as
2487 * 'first'. This way the function can abort Tx process of multiple
2488 * descriptors per packet.
2491 * struct mv643xx_private *mp Ethernet Port Control srtuct.
2492 * struct pkt_info *p_pkt_info User packet buffer.
2495 * Tx ring 'curr' and 'first' indexes are updated.
2498 * ETH_QUEUE_FULL in case of Tx resource error.
2499 * ETH_ERROR in case the routine can not access Tx desc ring.
2500 * ETH_QUEUE_LAST_RESOURCE if the routine uses the last Tx resource.
2504 #ifdef MV643XX_CHECKSUM_OFFLOAD_TX
2506 * Modified to include the first descriptor pointer in case of SG
2508 static ETH_FUNC_RET_STATUS
eth_port_send(struct mv643xx_private
*mp
,
2509 struct pkt_info
*p_pkt_info
)
2511 int tx_desc_curr
, tx_desc_used
, tx_first_desc
, tx_next_desc
;
2512 struct eth_tx_desc
*current_descriptor
;
2513 struct eth_tx_desc
*first_descriptor
;
2516 /* Do not process Tx ring in case of Tx ring resource error */
2517 if (mp
->tx_resource_err
)
2518 return ETH_QUEUE_FULL
;
2521 * The hardware requires that each buffer that is <= 8 bytes
2522 * in length must be aligned on an 8 byte boundary.
2524 if (p_pkt_info
->byte_cnt
<= 8 && p_pkt_info
->buf_ptr
& 0x7) {
2526 "mv643xx_eth port %d: packet size <= 8 problem\n",
2531 /* Get the Tx Desc ring indexes */
2532 tx_desc_curr
= mp
->tx_curr_desc_q
;
2533 tx_desc_used
= mp
->tx_used_desc_q
;
2535 current_descriptor
= &mp
->p_tx_desc_area
[tx_desc_curr
];
2537 tx_next_desc
= (tx_desc_curr
+ 1) % mp
->tx_ring_size
;
2539 current_descriptor
->buf_ptr
= p_pkt_info
->buf_ptr
;
2540 current_descriptor
->byte_cnt
= p_pkt_info
->byte_cnt
;
2541 current_descriptor
->l4i_chk
= p_pkt_info
->l4i_chk
;
2542 mp
->tx_skb
[tx_desc_curr
] = p_pkt_info
->return_info
;
2544 command
= p_pkt_info
->cmd_sts
| ETH_ZERO_PADDING
| ETH_GEN_CRC
|
2545 ETH_BUFFER_OWNED_BY_DMA
;
2546 if (command
& ETH_TX_FIRST_DESC
) {
2547 tx_first_desc
= tx_desc_curr
;
2548 mp
->tx_first_desc_q
= tx_first_desc
;
2549 first_descriptor
= current_descriptor
;
2550 mp
->tx_first_command
= command
;
2552 tx_first_desc
= mp
->tx_first_desc_q
;
2553 first_descriptor
= &mp
->p_tx_desc_area
[tx_first_desc
];
2554 BUG_ON(first_descriptor
== NULL
);
2555 current_descriptor
->cmd_sts
= command
;
2558 if (command
& ETH_TX_LAST_DESC
) {
2560 first_descriptor
->cmd_sts
= mp
->tx_first_command
;
2563 ETH_ENABLE_TX_QUEUE(mp
->port_num
);
2566 * Finish Tx packet. Update first desc in case of Tx resource
2568 tx_first_desc
= tx_next_desc
;
2569 mp
->tx_first_desc_q
= tx_first_desc
;
2572 /* Check for ring index overlap in the Tx desc ring */
2573 if (tx_next_desc
== tx_desc_used
) {
2574 mp
->tx_resource_err
= 1;
2575 mp
->tx_curr_desc_q
= tx_first_desc
;
2577 return ETH_QUEUE_LAST_RESOURCE
;
2580 mp
->tx_curr_desc_q
= tx_next_desc
;
2585 static ETH_FUNC_RET_STATUS
eth_port_send(struct mv643xx_private
*mp
,
2586 struct pkt_info
*p_pkt_info
)
2590 struct eth_tx_desc
*current_descriptor
;
2591 unsigned int command_status
;
2593 /* Do not process Tx ring in case of Tx ring resource error */
2594 if (mp
->tx_resource_err
)
2595 return ETH_QUEUE_FULL
;
2597 /* Get the Tx Desc ring indexes */
2598 tx_desc_curr
= mp
->tx_curr_desc_q
;
2599 tx_desc_used
= mp
->tx_used_desc_q
;
2600 current_descriptor
= &mp
->p_tx_desc_area
[tx_desc_curr
];
2602 command_status
= p_pkt_info
->cmd_sts
| ETH_ZERO_PADDING
| ETH_GEN_CRC
;
2603 current_descriptor
->buf_ptr
= p_pkt_info
->buf_ptr
;
2604 current_descriptor
->byte_cnt
= p_pkt_info
->byte_cnt
;
2605 mp
->tx_skb
[tx_desc_curr
] = p_pkt_info
->return_info
;
2607 /* Set last desc with DMA ownership and interrupt enable. */
2609 current_descriptor
->cmd_sts
= command_status
|
2610 ETH_BUFFER_OWNED_BY_DMA
| ETH_TX_ENABLE_INTERRUPT
;
2613 ETH_ENABLE_TX_QUEUE(mp
->port_num
);
2615 /* Finish Tx packet. Update first desc in case of Tx resource error */
2616 tx_desc_curr
= (tx_desc_curr
+ 1) % mp
->tx_ring_size
;
2618 /* Update the current descriptor */
2619 mp
->tx_curr_desc_q
= tx_desc_curr
;
2621 /* Check for ring index overlap in the Tx desc ring */
2622 if (tx_desc_curr
== tx_desc_used
) {
2623 mp
->tx_resource_err
= 1;
2624 return ETH_QUEUE_LAST_RESOURCE
;
2632 * eth_tx_return_desc - Free all used Tx descriptors
2635 * This routine returns the transmitted packet information to the caller.
2636 * It uses the 'first' index to support Tx desc return in case a transmit
2637 * of a packet spanned over multiple buffer still in process.
2638 * In case the Tx queue was in "resource error" condition, where there are
2639 * no available Tx resources, the function resets the resource error flag.
2642 * struct mv643xx_private *mp Ethernet Port Control srtuct.
2643 * struct pkt_info *p_pkt_info User packet buffer.
2646 * Tx ring 'first' and 'used' indexes are updated.
2649 * ETH_ERROR in case the routine can not access Tx desc ring.
2650 * ETH_RETRY in case there is transmission in process.
2651 * ETH_END_OF_JOB if the routine has nothing to release.
2655 static ETH_FUNC_RET_STATUS
eth_tx_return_desc(struct mv643xx_private
*mp
,
2656 struct pkt_info
*p_pkt_info
)
2659 #ifdef MV643XX_CHECKSUM_OFFLOAD_TX
2660 int tx_busy_desc
= mp
->tx_first_desc_q
;
2662 int tx_busy_desc
= mp
->tx_curr_desc_q
;
2664 struct eth_tx_desc
*p_tx_desc_used
;
2665 unsigned int command_status
;
2667 /* Get the Tx Desc ring indexes */
2668 tx_desc_used
= mp
->tx_used_desc_q
;
2670 p_tx_desc_used
= &mp
->p_tx_desc_area
[tx_desc_used
];
2673 if (p_tx_desc_used
== NULL
)
2676 /* Stop release. About to overlap the current available Tx descriptor */
2677 if (tx_desc_used
== tx_busy_desc
&& !mp
->tx_resource_err
)
2678 return ETH_END_OF_JOB
;
2680 command_status
= p_tx_desc_used
->cmd_sts
;
2682 /* Still transmitting... */
2683 if (command_status
& (ETH_BUFFER_OWNED_BY_DMA
))
2686 /* Pass the packet information to the caller */
2687 p_pkt_info
->cmd_sts
= command_status
;
2688 p_pkt_info
->return_info
= mp
->tx_skb
[tx_desc_used
];
2689 mp
->tx_skb
[tx_desc_used
] = NULL
;
2691 /* Update the next descriptor to release. */
2692 mp
->tx_used_desc_q
= (tx_desc_used
+ 1) % mp
->tx_ring_size
;
2694 /* Any Tx return cancels the Tx resource error status */
2695 mp
->tx_resource_err
= 0;
2701 * eth_port_receive - Get received information from Rx ring.
2704 * This routine returns the received data to the caller. There is no
2705 * data copying during routine operation. All information is returned
2706 * using pointer to packet information struct passed from the caller.
2707 * If the routine exhausts Rx ring resources then the resource error flag
2711 * struct mv643xx_private *mp Ethernet Port Control srtuct.
2712 * struct pkt_info *p_pkt_info User packet buffer.
2715 * Rx ring current and used indexes are updated.
2718 * ETH_ERROR in case the routine can not access Rx desc ring.
2719 * ETH_QUEUE_FULL if Rx ring resources are exhausted.
2720 * ETH_END_OF_JOB if there is no received data.
2723 static ETH_FUNC_RET_STATUS
eth_port_receive(struct mv643xx_private
*mp
,
2724 struct pkt_info
*p_pkt_info
)
2726 int rx_next_curr_desc
, rx_curr_desc
, rx_used_desc
;
2727 volatile struct eth_rx_desc
*p_rx_desc
;
2728 unsigned int command_status
;
2730 /* Do not process Rx ring in case of Rx ring resource error */
2731 if (mp
->rx_resource_err
)
2732 return ETH_QUEUE_FULL
;
2734 /* Get the Rx Desc ring 'curr and 'used' indexes */
2735 rx_curr_desc
= mp
->rx_curr_desc_q
;
2736 rx_used_desc
= mp
->rx_used_desc_q
;
2738 p_rx_desc
= &mp
->p_rx_desc_area
[rx_curr_desc
];
2740 /* The following parameters are used to save readings from memory */
2741 command_status
= p_rx_desc
->cmd_sts
;
2744 /* Nothing to receive... */
2745 if (command_status
& (ETH_BUFFER_OWNED_BY_DMA
))
2746 return ETH_END_OF_JOB
;
2748 p_pkt_info
->byte_cnt
= (p_rx_desc
->byte_cnt
) - RX_BUF_OFFSET
;
2749 p_pkt_info
->cmd_sts
= command_status
;
2750 p_pkt_info
->buf_ptr
= (p_rx_desc
->buf_ptr
) + RX_BUF_OFFSET
;
2751 p_pkt_info
->return_info
= mp
->rx_skb
[rx_curr_desc
];
2752 p_pkt_info
->l4i_chk
= p_rx_desc
->buf_size
;
2754 /* Clean the return info field to indicate that the packet has been */
2755 /* moved to the upper layers */
2756 mp
->rx_skb
[rx_curr_desc
] = NULL
;
2758 /* Update current index in data structure */
2759 rx_next_curr_desc
= (rx_curr_desc
+ 1) % mp
->rx_ring_size
;
2760 mp
->rx_curr_desc_q
= rx_next_curr_desc
;
2762 /* Rx descriptors exhausted. Set the Rx ring resource error flag */
2763 if (rx_next_curr_desc
== rx_used_desc
)
2764 mp
->rx_resource_err
= 1;
2770 * eth_rx_return_buff - Returns a Rx buffer back to the Rx ring.
2773 * This routine returns a Rx buffer back to the Rx ring. It retrieves the
2774 * next 'used' descriptor and attached the returned buffer to it.
2775 * In case the Rx ring was in "resource error" condition, where there are
2776 * no available Rx resources, the function resets the resource error flag.
2779 * struct mv643xx_private *mp Ethernet Port Control srtuct.
2780 * struct pkt_info *p_pkt_info Information on returned buffer.
2783 * New available Rx resource in Rx descriptor ring.
2786 * ETH_ERROR in case the routine can not access Rx desc ring.
2789 static ETH_FUNC_RET_STATUS
eth_rx_return_buff(struct mv643xx_private
*mp
,
2790 struct pkt_info
*p_pkt_info
)
2792 int used_rx_desc
; /* Where to return Rx resource */
2793 volatile struct eth_rx_desc
*p_used_rx_desc
;
2795 /* Get 'used' Rx descriptor */
2796 used_rx_desc
= mp
->rx_used_desc_q
;
2797 p_used_rx_desc
= &mp
->p_rx_desc_area
[used_rx_desc
];
2799 p_used_rx_desc
->buf_ptr
= p_pkt_info
->buf_ptr
;
2800 p_used_rx_desc
->buf_size
= p_pkt_info
->byte_cnt
;
2801 mp
->rx_skb
[used_rx_desc
] = p_pkt_info
->return_info
;
2803 /* Flush the write pipe */
2805 /* Return the descriptor to DMA ownership */
2807 p_used_rx_desc
->cmd_sts
=
2808 ETH_BUFFER_OWNED_BY_DMA
| ETH_RX_ENABLE_INTERRUPT
;
2811 /* Move the used descriptor pointer to the next descriptor */
2812 mp
->rx_used_desc_q
= (used_rx_desc
+ 1) % mp
->rx_ring_size
;
2814 /* Any Rx return cancels the Rx resource error status */
2815 mp
->rx_resource_err
= 0;
2820 /************* Begin ethtool support *************************/
2822 struct mv643xx_stats
{
2823 char stat_string
[ETH_GSTRING_LEN
];
2828 #define MV643XX_STAT(m) sizeof(((struct mv643xx_private *)0)->m), \
2829 offsetof(struct mv643xx_private, m)
2831 static const struct mv643xx_stats mv643xx_gstrings_stats
[] = {
2832 { "rx_packets", MV643XX_STAT(stats
.rx_packets
) },
2833 { "tx_packets", MV643XX_STAT(stats
.tx_packets
) },
2834 { "rx_bytes", MV643XX_STAT(stats
.rx_bytes
) },
2835 { "tx_bytes", MV643XX_STAT(stats
.tx_bytes
) },
2836 { "rx_errors", MV643XX_STAT(stats
.rx_errors
) },
2837 { "tx_errors", MV643XX_STAT(stats
.tx_errors
) },
2838 { "rx_dropped", MV643XX_STAT(stats
.rx_dropped
) },
2839 { "tx_dropped", MV643XX_STAT(stats
.tx_dropped
) },
2840 { "good_octets_received", MV643XX_STAT(mib_counters
.good_octets_received
) },
2841 { "bad_octets_received", MV643XX_STAT(mib_counters
.bad_octets_received
) },
2842 { "internal_mac_transmit_err", MV643XX_STAT(mib_counters
.internal_mac_transmit_err
) },
2843 { "good_frames_received", MV643XX_STAT(mib_counters
.good_frames_received
) },
2844 { "bad_frames_received", MV643XX_STAT(mib_counters
.bad_frames_received
) },
2845 { "broadcast_frames_received", MV643XX_STAT(mib_counters
.broadcast_frames_received
) },
2846 { "multicast_frames_received", MV643XX_STAT(mib_counters
.multicast_frames_received
) },
2847 { "frames_64_octets", MV643XX_STAT(mib_counters
.frames_64_octets
) },
2848 { "frames_65_to_127_octets", MV643XX_STAT(mib_counters
.frames_65_to_127_octets
) },
2849 { "frames_128_to_255_octets", MV643XX_STAT(mib_counters
.frames_128_to_255_octets
) },
2850 { "frames_256_to_511_octets", MV643XX_STAT(mib_counters
.frames_256_to_511_octets
) },
2851 { "frames_512_to_1023_octets", MV643XX_STAT(mib_counters
.frames_512_to_1023_octets
) },
2852 { "frames_1024_to_max_octets", MV643XX_STAT(mib_counters
.frames_1024_to_max_octets
) },
2853 { "good_octets_sent", MV643XX_STAT(mib_counters
.good_octets_sent
) },
2854 { "good_frames_sent", MV643XX_STAT(mib_counters
.good_frames_sent
) },
2855 { "excessive_collision", MV643XX_STAT(mib_counters
.excessive_collision
) },
2856 { "multicast_frames_sent", MV643XX_STAT(mib_counters
.multicast_frames_sent
) },
2857 { "broadcast_frames_sent", MV643XX_STAT(mib_counters
.broadcast_frames_sent
) },
2858 { "unrec_mac_control_received", MV643XX_STAT(mib_counters
.unrec_mac_control_received
) },
2859 { "fc_sent", MV643XX_STAT(mib_counters
.fc_sent
) },
2860 { "good_fc_received", MV643XX_STAT(mib_counters
.good_fc_received
) },
2861 { "bad_fc_received", MV643XX_STAT(mib_counters
.bad_fc_received
) },
2862 { "undersize_received", MV643XX_STAT(mib_counters
.undersize_received
) },
2863 { "fragments_received", MV643XX_STAT(mib_counters
.fragments_received
) },
2864 { "oversize_received", MV643XX_STAT(mib_counters
.oversize_received
) },
2865 { "jabber_received", MV643XX_STAT(mib_counters
.jabber_received
) },
2866 { "mac_receive_error", MV643XX_STAT(mib_counters
.mac_receive_error
) },
2867 { "bad_crc_event", MV643XX_STAT(mib_counters
.bad_crc_event
) },
2868 { "collision", MV643XX_STAT(mib_counters
.collision
) },
2869 { "late_collision", MV643XX_STAT(mib_counters
.late_collision
) },
2872 #define MV643XX_STATS_LEN \
2873 sizeof(mv643xx_gstrings_stats) / sizeof(struct mv643xx_stats)
2876 mv643xx_get_settings(struct net_device
*netdev
, struct ethtool_cmd
*ecmd
)
2878 struct mv643xx_private
*mp
= netdev
->priv
;
2879 int port_num
= mp
->port_num
;
2880 int autoneg
= eth_port_autoneg_supported(port_num
);
2883 int half_duplex
= 0;
2884 int full_duplex
= 0;
2890 u32 pcs
= mv_read(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num
));
2891 u32 psr
= mv_read(MV643XX_ETH_PORT_STATUS_REG(port_num
));
2893 mode_10_bit
= psr
& MV643XX_ETH_PORT_STATUS_MODE_10_BIT
;
2896 ecmd
->supported
= SUPPORTED_10baseT_Half
;
2898 ecmd
->supported
= (SUPPORTED_10baseT_Half
|
2899 SUPPORTED_10baseT_Full
|
2900 SUPPORTED_100baseT_Half
|
2901 SUPPORTED_100baseT_Full
|
2902 SUPPORTED_1000baseT_Full
|
2903 (autoneg
? SUPPORTED_Autoneg
: 0) |
2906 auto_duplex
= !(pcs
& MV643XX_ETH_DISABLE_AUTO_NEG_FOR_DUPLX
);
2907 auto_speed
= !(pcs
& MV643XX_ETH_DISABLE_AUTO_NEG_SPEED_GMII
);
2909 ecmd
->advertising
= ADVERTISED_TP
;
2912 ecmd
->advertising
|= ADVERTISED_Autoneg
;
2918 if (pcs
& MV643XX_ETH_SET_FULL_DUPLEX_MODE
)
2929 if (pcs
& MV643XX_ETH_SET_GMII_SPEED_TO_1000
)
2931 else if (pcs
& MV643XX_ETH_SET_MII_SPEED_TO_100
)
2937 if (speed_10
& half_duplex
)
2938 ecmd
->advertising
|= ADVERTISED_10baseT_Half
;
2939 if (speed_10
& full_duplex
)
2940 ecmd
->advertising
|= ADVERTISED_10baseT_Full
;
2941 if (speed_100
& half_duplex
)
2942 ecmd
->advertising
|= ADVERTISED_100baseT_Half
;
2943 if (speed_100
& full_duplex
)
2944 ecmd
->advertising
|= ADVERTISED_100baseT_Full
;
2946 ecmd
->advertising
|= ADVERTISED_1000baseT_Full
;
2950 ecmd
->port
= PORT_TP
;
2951 ecmd
->phy_address
= ethernet_phy_get(port_num
);
2953 ecmd
->transceiver
= XCVR_EXTERNAL
;
2955 if (netif_carrier_ok(netdev
)) {
2957 ecmd
->speed
= SPEED_10
;
2959 if (psr
& MV643XX_ETH_PORT_STATUS_GMII_1000
)
2960 ecmd
->speed
= SPEED_1000
;
2961 else if (psr
& MV643XX_ETH_PORT_STATUS_MII_100
)
2962 ecmd
->speed
= SPEED_100
;
2964 ecmd
->speed
= SPEED_10
;
2967 if (psr
& MV643XX_ETH_PORT_STATUS_FULL_DUPLEX
)
2968 ecmd
->duplex
= DUPLEX_FULL
;
2970 ecmd
->duplex
= DUPLEX_HALF
;
2976 ecmd
->autoneg
= autoneg
? AUTONEG_ENABLE
: AUTONEG_DISABLE
;
2981 mv643xx_get_drvinfo(struct net_device
*netdev
,
2982 struct ethtool_drvinfo
*drvinfo
)
2984 strncpy(drvinfo
->driver
, mv643xx_driver_name
, 32);
2985 strncpy(drvinfo
->version
, mv643xx_driver_version
, 32);
2986 strncpy(drvinfo
->fw_version
, "N/A", 32);
2987 strncpy(drvinfo
->bus_info
, "mv643xx", 32);
2988 drvinfo
->n_stats
= MV643XX_STATS_LEN
;
2992 mv643xx_get_stats_count(struct net_device
*netdev
)
2994 return MV643XX_STATS_LEN
;
2998 mv643xx_get_ethtool_stats(struct net_device
*netdev
,
2999 struct ethtool_stats
*stats
, uint64_t *data
)
3001 struct mv643xx_private
*mp
= netdev
->priv
;
3004 eth_update_mib_counters(mp
);
3006 for(i
= 0; i
< MV643XX_STATS_LEN
; i
++) {
3007 char *p
= (char *)mp
+mv643xx_gstrings_stats
[i
].stat_offset
;
3008 data
[i
] = (mv643xx_gstrings_stats
[i
].sizeof_stat
==
3009 sizeof(uint64_t)) ? *(uint64_t *)p
: *(uint32_t *)p
;
3014 mv643xx_get_strings(struct net_device
*netdev
, uint32_t stringset
, uint8_t *data
)
3020 for (i
=0; i
< MV643XX_STATS_LEN
; i
++) {
3021 memcpy(data
+ i
* ETH_GSTRING_LEN
,
3022 mv643xx_gstrings_stats
[i
].stat_string
,
3029 static struct ethtool_ops mv643xx_ethtool_ops
= {
3030 .get_settings
= mv643xx_get_settings
,
3031 .get_drvinfo
= mv643xx_get_drvinfo
,
3032 .get_link
= ethtool_op_get_link
,
3033 .get_sg
= ethtool_op_get_sg
,
3034 .set_sg
= ethtool_op_set_sg
,
3035 .get_strings
= mv643xx_get_strings
,
3036 .get_stats_count
= mv643xx_get_stats_count
,
3037 .get_ethtool_stats
= mv643xx_get_ethtool_stats
,
3040 /************* End ethtool support *************************/