2 * drivers/net/mv643xx_eth.c - Driver for MV643XX ethernet ports
3 * Copyright (C) 2002 Matthew Dharm <mdharm@momenco.com>
5 * Based on the 64360 driver from:
6 * Copyright (C) 2002 rabeeh@galileo.co.il
8 * Copyright (C) 2003 PMC-Sierra, Inc.,
9 * written by Manish Lachwani (lachwani@pmc-sierra.com)
11 * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org>
13 * Copyright (C) 2004-2005 MontaVista Software, Inc.
14 * Dale Farnsworth <dale@farnsworth.org>
16 * Copyright (C) 2004 Steven J. Hill <sjhill1@rockwellcollins.com>
17 * <sjhill@realitydiluted.com>
19 * This program is free software; you can redistribute it and/or
20 * modify it under the terms of the GNU General Public License
21 * as published by the Free Software Foundation; either version 2
22 * of the License, or (at your option) any later version.
24 * This program is distributed in the hope that it will be useful,
25 * but WITHOUT ANY WARRANTY; without even the implied warranty of
26 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
27 * GNU General Public License for more details.
29 * You should have received a copy of the GNU General Public License
30 * along with this program; if not, write to the Free Software
31 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
33 #include <linux/init.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/tcp.h>
36 #include <linux/udp.h>
37 #include <linux/etherdevice.h>
39 #include <linux/bitops.h>
40 #include <linux/delay.h>
41 #include <linux/ethtool.h>
43 #include <asm/types.h>
44 #include <asm/pgtable.h>
45 #include <asm/system.h>
46 #include <asm/delay.h>
47 #include "mv643xx_eth.h"
50 * The first part is the high level driver of the gigE ethernet ports.
56 #define WRAP NET_IP_ALIGN + ETH_HLEN + VLAN_HLEN + FCS_LEN
57 #define RX_SKB_SIZE ((dev->mtu + WRAP + 7) & ~0x7)
59 #define INT_CAUSE_UNMASK_ALL 0x0007ffff
60 #define INT_CAUSE_UNMASK_ALL_EXT 0x0011ffff
61 #ifdef MV643XX_RX_QUEUE_FILL_ON_TASK
62 #define INT_CAUSE_MASK_ALL 0x00000000
63 #define INT_CAUSE_CHECK_BITS INT_CAUSE_UNMASK_ALL
64 #define INT_CAUSE_CHECK_BITS_EXT INT_CAUSE_UNMASK_ALL_EXT
67 #ifdef MV643XX_CHECKSUM_OFFLOAD_TX
68 #define MAX_DESCS_PER_SKB (MAX_SKB_FRAGS + 1)
70 #define MAX_DESCS_PER_SKB 1
73 #define PHY_WAIT_ITERATIONS 1000 /* 1000 iterations * 10uS = 10mS max */
74 #define PHY_WAIT_MICRO_SECONDS 10
76 /* Static function declarations */
77 static int eth_port_link_is_up(unsigned int eth_port_num
);
78 static void eth_port_uc_addr_get(struct net_device
*dev
,
79 unsigned char *MacAddr
);
80 static int mv643xx_eth_real_open(struct net_device
*);
81 static int mv643xx_eth_real_stop(struct net_device
*);
82 static int mv643xx_eth_change_mtu(struct net_device
*, int);
83 static struct net_device_stats
*mv643xx_eth_get_stats(struct net_device
*);
84 static void eth_port_init_mac_tables(unsigned int eth_port_num
);
86 static int mv643xx_poll(struct net_device
*dev
, int *budget
);
88 static void ethernet_phy_set(unsigned int eth_port_num
, int phy_addr
);
89 static int ethernet_phy_detect(unsigned int eth_port_num
);
90 static struct ethtool_ops mv643xx_ethtool_ops
;
92 static char mv643xx_driver_name
[] = "mv643xx_eth";
93 static char mv643xx_driver_version
[] = "1.0";
95 static void __iomem
*mv643xx_eth_shared_base
;
97 /* used to protect MV643XX_ETH_SMI_REG, which is shared across ports */
98 static spinlock_t mv643xx_eth_phy_lock
= SPIN_LOCK_UNLOCKED
;
100 static inline u32
mv_read(int offset
)
102 void __iomem
*reg_base
;
104 reg_base
= mv643xx_eth_shared_base
- MV643XX_ETH_SHARED_REGS
;
106 return readl(reg_base
+ offset
);
109 static inline void mv_write(int offset
, u32 data
)
111 void __iomem
*reg_base
;
113 reg_base
= mv643xx_eth_shared_base
- MV643XX_ETH_SHARED_REGS
;
114 writel(data
, reg_base
+ offset
);
118 * Changes MTU (maximum transfer unit) of the gigabit ethenret port
120 * Input : pointer to ethernet interface network device structure
122 * Output : 0 upon success, -EINVAL upon failure
124 static int mv643xx_eth_change_mtu(struct net_device
*dev
, int new_mtu
)
126 struct mv643xx_private
*mp
= netdev_priv(dev
);
129 spin_lock_irqsave(&mp
->lock
, flags
);
131 if ((new_mtu
> 9500) || (new_mtu
< 64)) {
132 spin_unlock_irqrestore(&mp
->lock
, flags
);
138 * Stop then re-open the interface. This will allocate RX skb's with
140 * There is a possible danger that the open will not successed, due
141 * to memory is full, which might fail the open function.
143 if (netif_running(dev
)) {
144 if (mv643xx_eth_real_stop(dev
))
146 "%s: Fatal error on stopping device\n",
148 if (mv643xx_eth_real_open(dev
))
150 "%s: Fatal error on opening device\n",
154 spin_unlock_irqrestore(&mp
->lock
, flags
);
159 * mv643xx_eth_rx_task
161 * Fills / refills RX queue on a certain gigabit ethernet port
163 * Input : pointer to ethernet interface network device structure
166 static void mv643xx_eth_rx_task(void *data
)
168 struct net_device
*dev
= (struct net_device
*)data
;
169 struct mv643xx_private
*mp
= netdev_priv(dev
);
170 struct pkt_info pkt_info
;
173 if (test_and_set_bit(0, &mp
->rx_task_busy
))
174 panic("%s: Error in test_set_bit / clear_bit", dev
->name
);
176 while (mp
->rx_ring_skbs
< (mp
->rx_ring_size
- 5)) {
177 skb
= dev_alloc_skb(RX_SKB_SIZE
);
181 pkt_info
.cmd_sts
= ETH_RX_ENABLE_INTERRUPT
;
182 pkt_info
.byte_cnt
= RX_SKB_SIZE
;
183 pkt_info
.buf_ptr
= dma_map_single(NULL
, skb
->data
, RX_SKB_SIZE
,
185 pkt_info
.return_info
= skb
;
186 if (eth_rx_return_buff(mp
, &pkt_info
) != ETH_OK
) {
188 "%s: Error allocating RX Ring\n", dev
->name
);
193 clear_bit(0, &mp
->rx_task_busy
);
195 * If RX ring is empty of SKB, set a timer to try allocating
196 * again in a later time .
198 if ((mp
->rx_ring_skbs
== 0) && (mp
->rx_timer_flag
== 0)) {
199 printk(KERN_INFO
"%s: Rx ring is empty\n", dev
->name
);
201 mp
->timeout
.expires
= jiffies
+ (HZ
/ 10);
202 add_timer(&mp
->timeout
);
203 mp
->rx_timer_flag
= 1;
205 #ifdef MV643XX_RX_QUEUE_FILL_ON_TASK
207 /* Return interrupts */
208 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(mp
->port_num
),
209 INT_CAUSE_UNMASK_ALL
);
215 * mv643xx_eth_rx_task_timer_wrapper
217 * Timer routine to wake up RX queue filling task. This function is
218 * used only in case the RX queue is empty, and all alloc_skb has
219 * failed (due to out of memory event).
221 * Input : pointer to ethernet interface network device structure
224 static void mv643xx_eth_rx_task_timer_wrapper(unsigned long data
)
226 struct net_device
*dev
= (struct net_device
*)data
;
227 struct mv643xx_private
*mp
= netdev_priv(dev
);
229 mp
->rx_timer_flag
= 0;
230 mv643xx_eth_rx_task((void *)data
);
234 * mv643xx_eth_update_mac_address
236 * Update the MAC address of the port in the address table
238 * Input : pointer to ethernet interface network device structure
241 static void mv643xx_eth_update_mac_address(struct net_device
*dev
)
243 struct mv643xx_private
*mp
= netdev_priv(dev
);
244 unsigned int port_num
= mp
->port_num
;
246 eth_port_init_mac_tables(port_num
);
247 memcpy(mp
->port_mac_addr
, dev
->dev_addr
, 6);
248 eth_port_uc_addr_set(port_num
, mp
->port_mac_addr
);
252 * mv643xx_eth_set_rx_mode
254 * Change from promiscuos to regular rx mode
256 * Input : pointer to ethernet interface network device structure
259 static void mv643xx_eth_set_rx_mode(struct net_device
*dev
)
261 struct mv643xx_private
*mp
= netdev_priv(dev
);
264 config_reg
= ethernet_get_config_reg(mp
->port_num
);
265 if (dev
->flags
& IFF_PROMISC
)
266 config_reg
|= (u32
) MV643XX_ETH_UNICAST_PROMISCUOUS_MODE
;
268 config_reg
&= ~(u32
) MV643XX_ETH_UNICAST_PROMISCUOUS_MODE
;
269 ethernet_set_config_reg(mp
->port_num
, config_reg
);
273 * mv643xx_eth_set_mac_address
275 * Change the interface's mac address.
276 * No special hardware thing should be done because interface is always
277 * put in promiscuous mode.
279 * Input : pointer to ethernet interface network device structure and
280 * a pointer to the designated entry to be added to the cache.
281 * Output : zero upon success, negative upon failure
283 static int mv643xx_eth_set_mac_address(struct net_device
*dev
, void *addr
)
287 for (i
= 0; i
< 6; i
++)
288 /* +2 is for the offset of the HW addr type */
289 dev
->dev_addr
[i
] = ((unsigned char *)addr
)[i
+ 2];
290 mv643xx_eth_update_mac_address(dev
);
295 * mv643xx_eth_tx_timeout
297 * Called upon a timeout on transmitting a packet
299 * Input : pointer to ethernet interface network device structure.
302 static void mv643xx_eth_tx_timeout(struct net_device
*dev
)
304 struct mv643xx_private
*mp
= netdev_priv(dev
);
306 printk(KERN_INFO
"%s: TX timeout ", dev
->name
);
308 /* Do the reset outside of interrupt context */
309 schedule_work(&mp
->tx_timeout_task
);
313 * mv643xx_eth_tx_timeout_task
315 * Actual routine to reset the adapter when a timeout on Tx has occurred
317 static void mv643xx_eth_tx_timeout_task(struct net_device
*dev
)
319 struct mv643xx_private
*mp
= netdev_priv(dev
);
321 netif_device_detach(dev
);
322 eth_port_reset(mp
->port_num
);
324 netif_device_attach(dev
);
328 * mv643xx_eth_free_tx_queue
330 * Input : dev - a pointer to the required interface
332 * Output : 0 if was able to release skb , nonzero otherwise
334 static int mv643xx_eth_free_tx_queue(struct net_device
*dev
,
335 unsigned int eth_int_cause_ext
)
337 struct mv643xx_private
*mp
= netdev_priv(dev
);
338 struct net_device_stats
*stats
= &mp
->stats
;
339 struct pkt_info pkt_info
;
342 if (!(eth_int_cause_ext
& (BIT0
| BIT8
)))
345 spin_lock(&mp
->lock
);
347 /* Check only queue 0 */
348 while (eth_tx_return_desc(mp
, &pkt_info
) == ETH_OK
) {
349 if (pkt_info
.cmd_sts
& BIT0
) {
350 printk("%s: Error in TX\n", dev
->name
);
355 * If return_info is different than 0, release the skb.
356 * The case where return_info is not 0 is only in case
357 * when transmitted a scatter/gather packet, where only
358 * last skb releases the whole chain.
360 if (pkt_info
.return_info
) {
361 if (skb_shinfo(pkt_info
.return_info
)->nr_frags
)
362 dma_unmap_page(NULL
, pkt_info
.buf_ptr
,
366 dma_unmap_single(NULL
, pkt_info
.buf_ptr
,
370 dev_kfree_skb_irq(pkt_info
.return_info
);
374 * Decrement the number of outstanding skbs counter on
377 if (mp
->tx_ring_skbs
== 0)
378 panic("ERROR - TX outstanding SKBs"
379 " counter is corrupted");
382 dma_unmap_page(NULL
, pkt_info
.buf_ptr
,
383 pkt_info
.byte_cnt
, DMA_TO_DEVICE
);
386 spin_unlock(&mp
->lock
);
392 * mv643xx_eth_receive
394 * This function is forward packets that are received from the port's
395 * queues toward kernel core or FastRoute them to another interface.
397 * Input : dev - a pointer to the required interface
398 * max - maximum number to receive (0 means unlimted)
400 * Output : number of served packets
403 static int mv643xx_eth_receive_queue(struct net_device
*dev
, int budget
)
405 static int mv643xx_eth_receive_queue(struct net_device
*dev
)
408 struct mv643xx_private
*mp
= netdev_priv(dev
);
409 struct net_device_stats
*stats
= &mp
->stats
;
410 unsigned int received_packets
= 0;
412 struct pkt_info pkt_info
;
415 while (budget
-- > 0 && eth_port_receive(mp
, &pkt_info
) == ETH_OK
) {
417 while (eth_port_receive(mp
, &pkt_info
) == ETH_OK
) {
422 /* Update statistics. Note byte count includes 4 byte CRC count */
424 stats
->rx_bytes
+= pkt_info
.byte_cnt
;
425 skb
= pkt_info
.return_info
;
427 * In case received a packet without first / last bits on OR
428 * the error summary bit is on, the packets needs to be dropeed.
430 if (((pkt_info
.cmd_sts
431 & (ETH_RX_FIRST_DESC
| ETH_RX_LAST_DESC
)) !=
432 (ETH_RX_FIRST_DESC
| ETH_RX_LAST_DESC
))
433 || (pkt_info
.cmd_sts
& ETH_ERROR_SUMMARY
)) {
435 if ((pkt_info
.cmd_sts
& (ETH_RX_FIRST_DESC
|
436 ETH_RX_LAST_DESC
)) !=
437 (ETH_RX_FIRST_DESC
| ETH_RX_LAST_DESC
)) {
440 "%s: Received packet spread "
441 "on multiple descriptors\n",
444 if (pkt_info
.cmd_sts
& ETH_ERROR_SUMMARY
)
447 dev_kfree_skb_irq(skb
);
450 * The -4 is for the CRC in the trailer of the
453 skb_put(skb
, pkt_info
.byte_cnt
- 4);
456 if (pkt_info
.cmd_sts
& ETH_LAYER_4_CHECKSUM_OK
) {
457 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
459 (pkt_info
.cmd_sts
& 0x0007fff8) >> 3);
461 skb
->protocol
= eth_type_trans(skb
, dev
);
463 netif_receive_skb(skb
);
470 return received_packets
;
474 * mv643xx_eth_int_handler
476 * Main interrupt handler for the gigbit ethernet ports
478 * Input : irq - irq number (not used)
479 * dev_id - a pointer to the required interface's data structure
484 static irqreturn_t
mv643xx_eth_int_handler(int irq
, void *dev_id
,
485 struct pt_regs
*regs
)
487 struct net_device
*dev
= (struct net_device
*)dev_id
;
488 struct mv643xx_private
*mp
= netdev_priv(dev
);
489 u32 eth_int_cause
, eth_int_cause_ext
= 0;
490 unsigned int port_num
= mp
->port_num
;
492 /* Read interrupt cause registers */
493 eth_int_cause
= mv_read(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num
)) &
494 INT_CAUSE_UNMASK_ALL
;
496 if (eth_int_cause
& BIT1
)
497 eth_int_cause_ext
= mv_read(
498 MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num
)) &
499 INT_CAUSE_UNMASK_ALL_EXT
;
502 if (!(eth_int_cause
& 0x0007fffd)) {
503 /* Dont ack the Rx interrupt */
506 * Clear specific ethernet port intrerrupt registers by
507 * acknowleding relevant bits.
509 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num
),
511 if (eth_int_cause_ext
!= 0x0)
512 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG
513 (port_num
), ~eth_int_cause_ext
);
515 /* UDP change : We may need this */
516 if ((eth_int_cause_ext
& 0x0000ffff) &&
517 (mv643xx_eth_free_tx_queue(dev
, eth_int_cause_ext
) == 0) &&
518 (mp
->tx_ring_size
> mp
->tx_ring_skbs
+ MAX_DESCS_PER_SKB
))
519 netif_wake_queue(dev
);
522 if (netif_rx_schedule_prep(dev
)) {
523 /* Mask all the interrupts */
524 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num
), 0);
525 mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG
527 __netif_rx_schedule(dev
);
530 if (eth_int_cause
& (BIT2
| BIT11
))
531 mv643xx_eth_receive_queue(dev
, 0);
534 * After forwarded received packets to upper layer, add a task
535 * in an interrupts enabled context that refills the RX ring
538 #ifdef MV643XX_RX_QUEUE_FILL_ON_TASK
539 /* Unmask all interrupts on ethernet port */
540 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num
),
542 queue_task(&mp
->rx_task
, &tq_immediate
);
543 mark_bh(IMMEDIATE_BH
);
545 mp
->rx_task
.func(dev
);
549 /* PHY status changed */
550 if (eth_int_cause_ext
& (BIT16
| BIT20
)) {
551 if (eth_port_link_is_up(port_num
)) {
552 netif_carrier_on(dev
);
553 netif_wake_queue(dev
);
555 mv_write(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG
558 netif_carrier_off(dev
);
559 netif_stop_queue(dev
);
564 * If no real interrupt occured, exit.
565 * This can happen when using gigE interrupt coalescing mechanism.
567 if ((eth_int_cause
== 0x0) && (eth_int_cause_ext
== 0x0))
576 * eth_port_set_rx_coal - Sets coalescing interrupt mechanism on RX path
579 * This routine sets the RX coalescing interrupt mechanism parameter.
580 * This parameter is a timeout counter, that counts in 64 t_clk
581 * chunks ; that when timeout event occurs a maskable interrupt
583 * The parameter is calculated using the tClk of the MV-643xx chip
584 * , and the required delay of the interrupt in usec.
587 * unsigned int eth_port_num Ethernet port number
588 * unsigned int t_clk t_clk of the MV-643xx chip in HZ units
589 * unsigned int delay Delay in usec
592 * Interrupt coalescing mechanism value is set in MV-643xx chip.
595 * The interrupt coalescing value set in the gigE port.
598 static unsigned int eth_port_set_rx_coal(unsigned int eth_port_num
,
599 unsigned int t_clk
, unsigned int delay
)
601 unsigned int coal
= ((t_clk
/ 1000000) * delay
) / 64;
603 /* Set RX Coalescing mechanism */
604 mv_write(MV643XX_ETH_SDMA_CONFIG_REG(eth_port_num
),
605 ((coal
& 0x3fff) << 8) |
606 (mv_read(MV643XX_ETH_SDMA_CONFIG_REG(eth_port_num
))
614 * eth_port_set_tx_coal - Sets coalescing interrupt mechanism on TX path
617 * This routine sets the TX coalescing interrupt mechanism parameter.
618 * This parameter is a timeout counter, that counts in 64 t_clk
619 * chunks ; that when timeout event occurs a maskable interrupt
621 * The parameter is calculated using the t_cLK frequency of the
622 * MV-643xx chip and the required delay in the interrupt in uSec
625 * unsigned int eth_port_num Ethernet port number
626 * unsigned int t_clk t_clk of the MV-643xx chip in HZ units
627 * unsigned int delay Delay in uSeconds
630 * Interrupt coalescing mechanism value is set in MV-643xx chip.
633 * The interrupt coalescing value set in the gigE port.
636 static unsigned int eth_port_set_tx_coal(unsigned int eth_port_num
,
637 unsigned int t_clk
, unsigned int delay
)
640 coal
= ((t_clk
/ 1000000) * delay
) / 64;
641 /* Set TX Coalescing mechanism */
642 mv_write(MV643XX_ETH_TX_FIFO_URGENT_THRESHOLD_REG(eth_port_num
),
650 * This function is called when openning the network device. The function
651 * should initialize all the hardware, initialize cyclic Rx/Tx
652 * descriptors chain and buffers and allocate an IRQ to the network
655 * Input : a pointer to the network device structure
657 * Output : zero of success , nonzero if fails.
660 static int mv643xx_eth_open(struct net_device
*dev
)
662 struct mv643xx_private
*mp
= netdev_priv(dev
);
663 unsigned int port_num
= mp
->port_num
;
666 spin_lock_irq(&mp
->lock
);
668 err
= request_irq(dev
->irq
, mv643xx_eth_int_handler
,
669 SA_SHIRQ
| SA_SAMPLE_RANDOM
, dev
->name
, dev
);
672 printk(KERN_ERR
"Can not assign IRQ number to MV643XX_eth%d\n",
678 if (mv643xx_eth_real_open(dev
)) {
679 printk("%s: Error opening interface\n", dev
->name
);
684 spin_unlock_irq(&mp
->lock
);
689 free_irq(dev
->irq
, dev
);
692 spin_unlock_irq(&mp
->lock
);
698 * ether_init_rx_desc_ring - Curve a Rx chain desc list and buffer in memory.
701 * This function prepares a Rx chained list of descriptors and packet
702 * buffers in a form of a ring. The routine must be called after port
703 * initialization routine and before port start routine.
704 * The Ethernet SDMA engine uses CPU bus addresses to access the various
705 * devices in the system (i.e. DRAM). This function uses the ethernet
706 * struct 'virtual to physical' routine (set by the user) to set the ring
707 * with physical addresses.
710 * struct mv643xx_private *mp Ethernet Port Control srtuct.
713 * The routine updates the Ethernet port control struct with information
714 * regarding the Rx descriptors and buffers.
719 static void ether_init_rx_desc_ring(struct mv643xx_private
*mp
)
721 volatile struct eth_rx_desc
*p_rx_desc
;
722 int rx_desc_num
= mp
->rx_ring_size
;
725 /* initialize the next_desc_ptr links in the Rx descriptors ring */
726 p_rx_desc
= (struct eth_rx_desc
*)mp
->p_rx_desc_area
;
727 for (i
= 0; i
< rx_desc_num
; i
++) {
728 p_rx_desc
[i
].next_desc_ptr
= mp
->rx_desc_dma
+
729 ((i
+ 1) % rx_desc_num
) * sizeof(struct eth_rx_desc
);
732 /* Save Rx desc pointer to driver struct. */
733 mp
->rx_curr_desc_q
= 0;
734 mp
->rx_used_desc_q
= 0;
736 mp
->rx_desc_area_size
= rx_desc_num
* sizeof(struct eth_rx_desc
);
738 /* Add the queue to the list of RX queues of this port */
739 mp
->port_rx_queue_command
|= 1;
743 * ether_init_tx_desc_ring - Curve a Tx chain desc list and buffer in memory.
746 * This function prepares a Tx chained list of descriptors and packet
747 * buffers in a form of a ring. The routine must be called after port
748 * initialization routine and before port start routine.
749 * The Ethernet SDMA engine uses CPU bus addresses to access the various
750 * devices in the system (i.e. DRAM). This function uses the ethernet
751 * struct 'virtual to physical' routine (set by the user) to set the ring
752 * with physical addresses.
755 * struct mv643xx_private *mp Ethernet Port Control srtuct.
758 * The routine updates the Ethernet port control struct with information
759 * regarding the Tx descriptors and buffers.
764 static void ether_init_tx_desc_ring(struct mv643xx_private
*mp
)
766 int tx_desc_num
= mp
->tx_ring_size
;
767 struct eth_tx_desc
*p_tx_desc
;
770 /* Initialize the next_desc_ptr links in the Tx descriptors ring */
771 p_tx_desc
= (struct eth_tx_desc
*)mp
->p_tx_desc_area
;
772 for (i
= 0; i
< tx_desc_num
; i
++) {
773 p_tx_desc
[i
].next_desc_ptr
= mp
->tx_desc_dma
+
774 ((i
+ 1) % tx_desc_num
) * sizeof(struct eth_tx_desc
);
777 mp
->tx_curr_desc_q
= 0;
778 mp
->tx_used_desc_q
= 0;
779 #ifdef MV643XX_CHECKSUM_OFFLOAD_TX
780 mp
->tx_first_desc_q
= 0;
783 mp
->tx_desc_area_size
= tx_desc_num
* sizeof(struct eth_tx_desc
);
785 /* Add the queue to the list of Tx queues of this port */
786 mp
->port_tx_queue_command
|= 1;
789 /* Helper function for mv643xx_eth_open */
790 static int mv643xx_eth_real_open(struct net_device
*dev
)
792 struct mv643xx_private
*mp
= netdev_priv(dev
);
793 unsigned int port_num
= mp
->port_num
;
797 mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num
), 0x0000ff00);
799 /* Clear the ethernet port interrupts */
800 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num
), 0);
801 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num
), 0);
803 /* Unmask RX buffer and TX end interrupt */
804 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num
),
805 INT_CAUSE_UNMASK_ALL
);
807 /* Unmask phy and link status changes interrupts */
808 mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num
),
809 INT_CAUSE_UNMASK_ALL_EXT
);
811 /* Set the MAC Address */
812 memcpy(mp
->port_mac_addr
, dev
->dev_addr
, 6);
816 INIT_WORK(&mp
->rx_task
, (void (*)(void *))mv643xx_eth_rx_task
, dev
);
818 memset(&mp
->timeout
, 0, sizeof(struct timer_list
));
819 mp
->timeout
.function
= mv643xx_eth_rx_task_timer_wrapper
;
820 mp
->timeout
.data
= (unsigned long)dev
;
822 mp
->rx_task_busy
= 0;
823 mp
->rx_timer_flag
= 0;
825 /* Allocate RX and TX skb rings */
826 mp
->rx_skb
= kmalloc(sizeof(*mp
->rx_skb
) * mp
->rx_ring_size
,
829 printk(KERN_ERR
"%s: Cannot allocate Rx skb ring\n", dev
->name
);
832 mp
->tx_skb
= kmalloc(sizeof(*mp
->tx_skb
) * mp
->tx_ring_size
,
835 printk(KERN_ERR
"%s: Cannot allocate Tx skb ring\n", dev
->name
);
840 /* Allocate TX ring */
841 mp
->tx_ring_skbs
= 0;
842 size
= mp
->tx_ring_size
* sizeof(struct eth_tx_desc
);
843 mp
->tx_desc_area_size
= size
;
845 if (mp
->tx_sram_size
) {
846 mp
->p_tx_desc_area
= ioremap(mp
->tx_sram_addr
,
848 mp
->tx_desc_dma
= mp
->tx_sram_addr
;
850 mp
->p_tx_desc_area
= dma_alloc_coherent(NULL
, size
,
854 if (!mp
->p_tx_desc_area
) {
855 printk(KERN_ERR
"%s: Cannot allocate Tx Ring (size %d bytes)\n",
861 BUG_ON((u32
) mp
->p_tx_desc_area
& 0xf); /* check 16-byte alignment */
862 memset((void *)mp
->p_tx_desc_area
, 0, mp
->tx_desc_area_size
);
864 ether_init_tx_desc_ring(mp
);
866 /* Allocate RX ring */
867 mp
->rx_ring_skbs
= 0;
868 size
= mp
->rx_ring_size
* sizeof(struct eth_rx_desc
);
869 mp
->rx_desc_area_size
= size
;
871 if (mp
->rx_sram_size
) {
872 mp
->p_rx_desc_area
= ioremap(mp
->rx_sram_addr
,
874 mp
->rx_desc_dma
= mp
->rx_sram_addr
;
876 mp
->p_rx_desc_area
= dma_alloc_coherent(NULL
, size
,
880 if (!mp
->p_rx_desc_area
) {
881 printk(KERN_ERR
"%s: Cannot allocate Rx ring (size %d bytes)\n",
883 printk(KERN_ERR
"%s: Freeing previously allocated TX queues...",
885 if (mp
->rx_sram_size
)
886 iounmap(mp
->p_rx_desc_area
);
888 dma_free_coherent(NULL
, mp
->tx_desc_area_size
,
889 mp
->p_tx_desc_area
, mp
->tx_desc_dma
);
894 memset((void *)mp
->p_rx_desc_area
, 0, size
);
896 ether_init_rx_desc_ring(mp
);
898 mv643xx_eth_rx_task(dev
); /* Fill RX ring with skb's */
902 /* Interrupt Coalescing */
906 eth_port_set_rx_coal(port_num
, 133000000, MV643XX_RX_COAL
);
910 eth_port_set_tx_coal(port_num
, 133000000, MV643XX_TX_COAL
);
912 netif_start_queue(dev
);
917 static void mv643xx_eth_free_tx_rings(struct net_device
*dev
)
919 struct mv643xx_private
*mp
= netdev_priv(dev
);
920 unsigned int port_num
= mp
->port_num
;
924 mv_write(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num
), 0x0000ff00);
926 /* Free outstanding skb's on TX rings */
927 for (curr
= 0; mp
->tx_ring_skbs
&& curr
< mp
->tx_ring_size
; curr
++) {
928 if (mp
->tx_skb
[curr
]) {
929 dev_kfree_skb(mp
->tx_skb
[curr
]);
933 if (mp
->tx_ring_skbs
)
934 printk("%s: Error on Tx descriptor free - could not free %d"
935 " descriptors\n", dev
->name
, mp
->tx_ring_skbs
);
938 if (mp
->tx_sram_size
)
939 iounmap(mp
->p_tx_desc_area
);
941 dma_free_coherent(NULL
, mp
->tx_desc_area_size
,
942 mp
->p_tx_desc_area
, mp
->tx_desc_dma
);
945 static void mv643xx_eth_free_rx_rings(struct net_device
*dev
)
947 struct mv643xx_private
*mp
= netdev_priv(dev
);
948 unsigned int port_num
= mp
->port_num
;
952 mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num
), 0x0000ff00);
954 /* Free preallocated skb's on RX rings */
955 for (curr
= 0; mp
->rx_ring_skbs
&& curr
< mp
->rx_ring_size
; curr
++) {
956 if (mp
->rx_skb
[curr
]) {
957 dev_kfree_skb(mp
->rx_skb
[curr
]);
962 if (mp
->rx_ring_skbs
)
964 "%s: Error in freeing Rx Ring. %d skb's still"
965 " stuck in RX Ring - ignoring them\n", dev
->name
,
968 if (mp
->rx_sram_size
)
969 iounmap(mp
->p_rx_desc_area
);
971 dma_free_coherent(NULL
, mp
->rx_desc_area_size
,
972 mp
->p_rx_desc_area
, mp
->rx_desc_dma
);
978 * This function is used when closing the network device.
979 * It updates the hardware,
980 * release all memory that holds buffers and descriptors and release the IRQ.
981 * Input : a pointer to the device structure
982 * Output : zero if success , nonzero if fails
985 /* Helper function for mv643xx_eth_stop */
987 static int mv643xx_eth_real_stop(struct net_device
*dev
)
989 struct mv643xx_private
*mp
= netdev_priv(dev
);
990 unsigned int port_num
= mp
->port_num
;
992 netif_carrier_off(dev
);
993 netif_stop_queue(dev
);
995 mv643xx_eth_free_tx_rings(dev
);
996 mv643xx_eth_free_rx_rings(dev
);
998 eth_port_reset(mp
->port_num
);
1000 /* Disable ethernet port interrupts */
1001 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num
), 0);
1002 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num
), 0);
1004 /* Mask RX buffer and TX end interrupt */
1005 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num
), 0);
1007 /* Mask phy and link status changes interrupts */
1008 mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num
), 0);
1013 static int mv643xx_eth_stop(struct net_device
*dev
)
1015 struct mv643xx_private
*mp
= netdev_priv(dev
);
1017 spin_lock_irq(&mp
->lock
);
1019 mv643xx_eth_real_stop(dev
);
1021 free_irq(dev
->irq
, dev
);
1022 spin_unlock_irq(&mp
->lock
);
1028 static void mv643xx_tx(struct net_device
*dev
)
1030 struct mv643xx_private
*mp
= netdev_priv(dev
);
1031 struct pkt_info pkt_info
;
1033 while (eth_tx_return_desc(mp
, &pkt_info
) == ETH_OK
) {
1034 if (pkt_info
.return_info
) {
1035 if (skb_shinfo(pkt_info
.return_info
)->nr_frags
)
1036 dma_unmap_page(NULL
, pkt_info
.buf_ptr
,
1040 dma_unmap_single(NULL
, pkt_info
.buf_ptr
,
1044 dev_kfree_skb_irq(pkt_info
.return_info
);
1046 if (mp
->tx_ring_skbs
)
1049 dma_unmap_page(NULL
, pkt_info
.buf_ptr
,
1050 pkt_info
.byte_cnt
, DMA_TO_DEVICE
);
1053 if (netif_queue_stopped(dev
) &&
1054 mp
->tx_ring_size
> mp
->tx_ring_skbs
+ MAX_DESCS_PER_SKB
)
1055 netif_wake_queue(dev
);
1061 * This function is used in case of NAPI
1063 static int mv643xx_poll(struct net_device
*dev
, int *budget
)
1065 struct mv643xx_private
*mp
= netdev_priv(dev
);
1066 int done
= 1, orig_budget
, work_done
;
1067 unsigned int port_num
= mp
->port_num
;
1068 unsigned long flags
;
1070 #ifdef MV643XX_TX_FAST_REFILL
1071 if (++mp
->tx_clean_threshold
> 5) {
1072 spin_lock_irqsave(&mp
->lock
, flags
);
1074 mp
->tx_clean_threshold
= 0;
1075 spin_unlock_irqrestore(&mp
->lock
, flags
);
1079 if ((mv_read(MV643XX_ETH_RX_CURRENT_QUEUE_DESC_PTR_0(port_num
)))
1080 != (u32
) mp
->rx_used_desc_q
) {
1081 orig_budget
= *budget
;
1082 if (orig_budget
> dev
->quota
)
1083 orig_budget
= dev
->quota
;
1084 work_done
= mv643xx_eth_receive_queue(dev
, orig_budget
);
1085 mp
->rx_task
.func(dev
);
1086 *budget
-= work_done
;
1087 dev
->quota
-= work_done
;
1088 if (work_done
>= orig_budget
)
1093 spin_lock_irqsave(&mp
->lock
, flags
);
1094 __netif_rx_complete(dev
);
1095 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num
), 0);
1096 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num
), 0);
1097 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num
),
1098 INT_CAUSE_UNMASK_ALL
);
1099 mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num
),
1100 INT_CAUSE_UNMASK_ALL_EXT
);
1101 spin_unlock_irqrestore(&mp
->lock
, flags
);
1104 return done
? 0 : 1;
1109 * mv643xx_eth_start_xmit
1111 * This function is queues a packet in the Tx descriptor for
1114 * Input : skb - a pointer to socket buffer
1115 * dev - a pointer to the required port
1117 * Output : zero upon success
1119 static int mv643xx_eth_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1121 struct mv643xx_private
*mp
= netdev_priv(dev
);
1122 struct net_device_stats
*stats
= &mp
->stats
;
1123 ETH_FUNC_RET_STATUS status
;
1124 unsigned long flags
;
1125 struct pkt_info pkt_info
;
1127 if (netif_queue_stopped(dev
)) {
1129 "%s: Tried sending packet when interface is stopped\n",
1134 /* This is a hard error, log it. */
1135 if ((mp
->tx_ring_size
- mp
->tx_ring_skbs
) <=
1136 (skb_shinfo(skb
)->nr_frags
+ 1)) {
1137 netif_stop_queue(dev
);
1139 "%s: Bug in mv643xx_eth - Trying to transmit when"
1140 " queue full !\n", dev
->name
);
1144 /* Paranoid check - this shouldn't happen */
1146 stats
->tx_dropped
++;
1147 printk(KERN_ERR
"mv64320_eth paranoid check failed\n");
1151 spin_lock_irqsave(&mp
->lock
, flags
);
1153 /* Update packet info data structure -- DMA owned, first last */
1154 #ifdef MV643XX_CHECKSUM_OFFLOAD_TX
1155 if (!skb_shinfo(skb
)->nr_frags
) {
1157 if (skb
->ip_summed
!= CHECKSUM_HW
) {
1158 /* Errata BTS #50, IHL must be 5 if no HW checksum */
1159 pkt_info
.cmd_sts
= ETH_TX_ENABLE_INTERRUPT
|
1162 5 << ETH_TX_IHL_SHIFT
;
1163 pkt_info
.l4i_chk
= 0;
1166 pkt_info
.cmd_sts
= ETH_TX_ENABLE_INTERRUPT
|
1169 ETH_GEN_TCP_UDP_CHECKSUM
|
1170 ETH_GEN_IP_V_4_CHECKSUM
|
1171 skb
->nh
.iph
->ihl
<< ETH_TX_IHL_SHIFT
;
1172 /* CPU already calculated pseudo header checksum. */
1173 if (skb
->nh
.iph
->protocol
== IPPROTO_UDP
) {
1174 pkt_info
.cmd_sts
|= ETH_UDP_FRAME
;
1175 pkt_info
.l4i_chk
= skb
->h
.uh
->check
;
1176 } else if (skb
->nh
.iph
->protocol
== IPPROTO_TCP
)
1177 pkt_info
.l4i_chk
= skb
->h
.th
->check
;
1180 "%s: chksum proto != TCP or UDP\n",
1182 spin_unlock_irqrestore(&mp
->lock
, flags
);
1186 pkt_info
.byte_cnt
= skb
->len
;
1187 pkt_info
.buf_ptr
= dma_map_single(NULL
, skb
->data
, skb
->len
,
1189 pkt_info
.return_info
= skb
;
1191 status
= eth_port_send(mp
, &pkt_info
);
1192 if ((status
== ETH_ERROR
) || (status
== ETH_QUEUE_FULL
))
1193 printk(KERN_ERR
"%s: Error on transmitting packet\n",
1195 stats
->tx_bytes
+= pkt_info
.byte_cnt
;
1199 /* Since hardware can't handle unaligned fragments smaller
1200 * than 9 bytes, if we find any, we linearize the skb
1201 * and start again. When I've seen it, it's always been
1202 * the first frag (probably near the end of the page),
1203 * but we check all frags to be safe.
1205 for (frag
= 0; frag
< skb_shinfo(skb
)->nr_frags
; frag
++) {
1208 fragp
= &skb_shinfo(skb
)->frags
[frag
];
1209 if (fragp
->size
<= 8 && fragp
->page_offset
& 0x7) {
1210 skb_linearize(skb
, GFP_ATOMIC
);
1211 printk(KERN_DEBUG
"%s: unaligned tiny fragment"
1212 "%d of %d, fixed\n",
1214 skb_shinfo(skb
)->nr_frags
);
1219 /* first frag which is skb header */
1220 pkt_info
.byte_cnt
= skb_headlen(skb
);
1221 pkt_info
.buf_ptr
= dma_map_single(NULL
, skb
->data
,
1224 pkt_info
.l4i_chk
= 0;
1225 pkt_info
.return_info
= 0;
1227 if (skb
->ip_summed
!= CHECKSUM_HW
)
1228 /* Errata BTS #50, IHL must be 5 if no HW checksum */
1229 pkt_info
.cmd_sts
= ETH_TX_FIRST_DESC
|
1230 5 << ETH_TX_IHL_SHIFT
;
1232 pkt_info
.cmd_sts
= ETH_TX_FIRST_DESC
|
1233 ETH_GEN_TCP_UDP_CHECKSUM
|
1234 ETH_GEN_IP_V_4_CHECKSUM
|
1235 skb
->nh
.iph
->ihl
<< ETH_TX_IHL_SHIFT
;
1236 /* CPU already calculated pseudo header checksum. */
1237 if (skb
->nh
.iph
->protocol
== IPPROTO_UDP
) {
1238 pkt_info
.cmd_sts
|= ETH_UDP_FRAME
;
1239 pkt_info
.l4i_chk
= skb
->h
.uh
->check
;
1240 } else if (skb
->nh
.iph
->protocol
== IPPROTO_TCP
)
1241 pkt_info
.l4i_chk
= skb
->h
.th
->check
;
1244 "%s: chksum proto != TCP or UDP\n",
1246 spin_unlock_irqrestore(&mp
->lock
, flags
);
1251 status
= eth_port_send(mp
, &pkt_info
);
1252 if (status
!= ETH_OK
) {
1253 if ((status
== ETH_ERROR
))
1255 "%s: Error on transmitting packet\n",
1257 if (status
== ETH_QUEUE_FULL
)
1258 printk("Error on Queue Full \n");
1259 if (status
== ETH_QUEUE_LAST_RESOURCE
)
1260 printk("Tx resource error \n");
1262 stats
->tx_bytes
+= pkt_info
.byte_cnt
;
1264 /* Check for the remaining frags */
1265 for (frag
= 0; frag
< skb_shinfo(skb
)->nr_frags
; frag
++) {
1266 skb_frag_t
*this_frag
= &skb_shinfo(skb
)->frags
[frag
];
1267 pkt_info
.l4i_chk
= 0x0000;
1268 pkt_info
.cmd_sts
= 0x00000000;
1270 /* Last Frag enables interrupt and frees the skb */
1271 if (frag
== (skb_shinfo(skb
)->nr_frags
- 1)) {
1272 pkt_info
.cmd_sts
|= ETH_TX_ENABLE_INTERRUPT
|
1274 pkt_info
.return_info
= skb
;
1277 pkt_info
.return_info
= 0;
1279 pkt_info
.l4i_chk
= 0;
1280 pkt_info
.byte_cnt
= this_frag
->size
;
1282 pkt_info
.buf_ptr
= dma_map_page(NULL
, this_frag
->page
,
1283 this_frag
->page_offset
,
1287 status
= eth_port_send(mp
, &pkt_info
);
1289 if (status
!= ETH_OK
) {
1290 if ((status
== ETH_ERROR
))
1291 printk(KERN_ERR
"%s: Error on "
1292 "transmitting packet\n",
1295 if (status
== ETH_QUEUE_LAST_RESOURCE
)
1296 printk("Tx resource error \n");
1298 if (status
== ETH_QUEUE_FULL
)
1299 printk("Queue is full \n");
1301 stats
->tx_bytes
+= pkt_info
.byte_cnt
;
1305 pkt_info
.cmd_sts
= ETH_TX_ENABLE_INTERRUPT
| ETH_TX_FIRST_DESC
|
1307 pkt_info
.l4i_chk
= 0;
1308 pkt_info
.byte_cnt
= skb
->len
;
1309 pkt_info
.buf_ptr
= dma_map_single(NULL
, skb
->data
, skb
->len
,
1311 pkt_info
.return_info
= skb
;
1313 status
= eth_port_send(mp
, &pkt_info
);
1314 if ((status
== ETH_ERROR
) || (status
== ETH_QUEUE_FULL
))
1315 printk(KERN_ERR
"%s: Error on transmitting packet\n",
1317 stats
->tx_bytes
+= pkt_info
.byte_cnt
;
1320 /* Check if TX queue can handle another skb. If not, then
1321 * signal higher layers to stop requesting TX
1323 if (mp
->tx_ring_size
<= (mp
->tx_ring_skbs
+ MAX_DESCS_PER_SKB
))
1325 * Stop getting skb's from upper layers.
1326 * Getting skb's from upper layers will be enabled again after
1327 * packets are released.
1329 netif_stop_queue(dev
);
1331 /* Update statistics and start of transmittion time */
1332 stats
->tx_packets
++;
1333 dev
->trans_start
= jiffies
;
1335 spin_unlock_irqrestore(&mp
->lock
, flags
);
1337 return 0; /* success */
1341 * mv643xx_eth_get_stats
1343 * Returns a pointer to the interface statistics.
1345 * Input : dev - a pointer to the required interface
1347 * Output : a pointer to the interface's statistics
1350 static struct net_device_stats
*mv643xx_eth_get_stats(struct net_device
*dev
)
1352 struct mv643xx_private
*mp
= netdev_priv(dev
);
1360 * First function called after registering the network device.
1361 * It's purpose is to initialize the device as an ethernet device,
1362 * fill the ethernet device structure with pointers * to functions,
1363 * and set the MAC address of the interface
1365 * Input : struct device *
1366 * Output : -ENOMEM if failed , 0 if success
1368 static int mv643xx_eth_probe(struct device
*ddev
)
1370 struct platform_device
*pdev
= to_platform_device(ddev
);
1371 struct mv643xx_eth_platform_data
*pd
;
1372 int port_num
= pdev
->id
;
1373 struct mv643xx_private
*mp
;
1374 struct net_device
*dev
;
1376 struct resource
*res
;
1379 dev
= alloc_etherdev(sizeof(struct mv643xx_private
));
1383 dev_set_drvdata(ddev
, dev
);
1385 mp
= netdev_priv(dev
);
1387 res
= platform_get_resource(pdev
, IORESOURCE_IRQ
, 0);
1389 dev
->irq
= res
->start
;
1391 mp
->port_num
= port_num
;
1393 dev
->open
= mv643xx_eth_open
;
1394 dev
->stop
= mv643xx_eth_stop
;
1395 dev
->hard_start_xmit
= mv643xx_eth_start_xmit
;
1396 dev
->get_stats
= mv643xx_eth_get_stats
;
1397 dev
->set_mac_address
= mv643xx_eth_set_mac_address
;
1398 dev
->set_multicast_list
= mv643xx_eth_set_rx_mode
;
1400 /* No need to Tx Timeout */
1401 dev
->tx_timeout
= mv643xx_eth_tx_timeout
;
1403 dev
->poll
= mv643xx_poll
;
1407 dev
->watchdog_timeo
= 2 * HZ
;
1408 dev
->tx_queue_len
= mp
->tx_ring_size
;
1410 dev
->change_mtu
= mv643xx_eth_change_mtu
;
1411 SET_ETHTOOL_OPS(dev
, &mv643xx_ethtool_ops
);
1413 #ifdef MV643XX_CHECKSUM_OFFLOAD_TX
1414 #ifdef MAX_SKB_FRAGS
1416 * Zero copy can only work if we use Discovery II memory. Else, we will
1417 * have to map the buffers to ISA memory which is only 16 MB
1419 dev
->features
= NETIF_F_SG
| NETIF_F_IP_CSUM
| NETIF_F_HW_CSUM
;
1423 /* Configure the timeout task */
1424 INIT_WORK(&mp
->tx_timeout_task
,
1425 (void (*)(void *))mv643xx_eth_tx_timeout_task
, dev
);
1427 spin_lock_init(&mp
->lock
);
1429 /* set default config values */
1430 eth_port_uc_addr_get(dev
, dev
->dev_addr
);
1431 mp
->port_config
= MV643XX_ETH_PORT_CONFIG_DEFAULT_VALUE
;
1432 mp
->port_config_extend
= MV643XX_ETH_PORT_CONFIG_EXTEND_DEFAULT_VALUE
;
1433 mp
->port_sdma_config
= MV643XX_ETH_PORT_SDMA_CONFIG_DEFAULT_VALUE
;
1434 mp
->port_serial_control
= MV643XX_ETH_PORT_SERIAL_CONTROL_DEFAULT_VALUE
;
1435 mp
->rx_ring_size
= MV643XX_ETH_PORT_DEFAULT_RECEIVE_QUEUE_SIZE
;
1436 mp
->tx_ring_size
= MV643XX_ETH_PORT_DEFAULT_TRANSMIT_QUEUE_SIZE
;
1438 pd
= pdev
->dev
.platform_data
;
1440 if (pd
->mac_addr
!= NULL
)
1441 memcpy(dev
->dev_addr
, pd
->mac_addr
, 6);
1443 if (pd
->phy_addr
|| pd
->force_phy_addr
)
1444 ethernet_phy_set(port_num
, pd
->phy_addr
);
1446 if (pd
->port_config
|| pd
->force_port_config
)
1447 mp
->port_config
= pd
->port_config
;
1449 if (pd
->port_config_extend
|| pd
->force_port_config_extend
)
1450 mp
->port_config_extend
= pd
->port_config_extend
;
1452 if (pd
->port_sdma_config
|| pd
->force_port_sdma_config
)
1453 mp
->port_sdma_config
= pd
->port_sdma_config
;
1455 if (pd
->port_serial_control
|| pd
->force_port_serial_control
)
1456 mp
->port_serial_control
= pd
->port_serial_control
;
1458 if (pd
->rx_queue_size
)
1459 mp
->rx_ring_size
= pd
->rx_queue_size
;
1461 if (pd
->tx_queue_size
)
1462 mp
->tx_ring_size
= pd
->tx_queue_size
;
1464 if (pd
->tx_sram_size
) {
1465 mp
->tx_sram_size
= pd
->tx_sram_size
;
1466 mp
->tx_sram_addr
= pd
->tx_sram_addr
;
1469 if (pd
->rx_sram_size
) {
1470 mp
->rx_sram_size
= pd
->rx_sram_size
;
1471 mp
->rx_sram_addr
= pd
->rx_sram_addr
;
1475 err
= ethernet_phy_detect(port_num
);
1477 pr_debug("MV643xx ethernet port %d: "
1478 "No PHY detected at addr %d\n",
1479 port_num
, ethernet_phy_get(port_num
));
1483 err
= register_netdev(dev
);
1489 "%s: port %d with MAC address %02x:%02x:%02x:%02x:%02x:%02x\n",
1490 dev
->name
, port_num
, p
[0], p
[1], p
[2], p
[3], p
[4], p
[5]);
1492 if (dev
->features
& NETIF_F_SG
)
1493 printk(KERN_NOTICE
"%s: Scatter Gather Enabled\n", dev
->name
);
1495 if (dev
->features
& NETIF_F_IP_CSUM
)
1496 printk(KERN_NOTICE
"%s: TX TCP/IP Checksumming Supported\n",
1499 #ifdef MV643XX_CHECKSUM_OFFLOAD_TX
1500 printk(KERN_NOTICE
"%s: RX TCP/UDP Checksum Offload ON \n", dev
->name
);
1504 printk(KERN_NOTICE
"%s: TX and RX Interrupt Coalescing ON \n",
1509 printk(KERN_NOTICE
"%s: RX NAPI Enabled \n", dev
->name
);
1520 static int mv643xx_eth_remove(struct device
*ddev
)
1522 struct net_device
*dev
= dev_get_drvdata(ddev
);
1524 unregister_netdev(dev
);
1525 flush_scheduled_work();
1528 dev_set_drvdata(ddev
, NULL
);
1532 static int mv643xx_eth_shared_probe(struct device
*ddev
)
1534 struct platform_device
*pdev
= to_platform_device(ddev
);
1535 struct resource
*res
;
1537 printk(KERN_NOTICE
"MV-643xx 10/100/1000 Ethernet Driver\n");
1539 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1543 mv643xx_eth_shared_base
= ioremap(res
->start
,
1544 MV643XX_ETH_SHARED_REGS_SIZE
);
1545 if (mv643xx_eth_shared_base
== NULL
)
1552 static int mv643xx_eth_shared_remove(struct device
*ddev
)
1554 iounmap(mv643xx_eth_shared_base
);
1555 mv643xx_eth_shared_base
= NULL
;
1560 static struct device_driver mv643xx_eth_driver
= {
1561 .name
= MV643XX_ETH_NAME
,
1562 .bus
= &platform_bus_type
,
1563 .probe
= mv643xx_eth_probe
,
1564 .remove
= mv643xx_eth_remove
,
1567 static struct device_driver mv643xx_eth_shared_driver
= {
1568 .name
= MV643XX_ETH_SHARED_NAME
,
1569 .bus
= &platform_bus_type
,
1570 .probe
= mv643xx_eth_shared_probe
,
1571 .remove
= mv643xx_eth_shared_remove
,
1575 * mv643xx_init_module
1577 * Registers the network drivers into the Linux kernel
1583 static int __init
mv643xx_init_module(void)
1587 rc
= driver_register(&mv643xx_eth_shared_driver
);
1589 rc
= driver_register(&mv643xx_eth_driver
);
1591 driver_unregister(&mv643xx_eth_shared_driver
);
1597 * mv643xx_cleanup_module
1599 * Registers the network drivers into the Linux kernel
1605 static void __exit
mv643xx_cleanup_module(void)
1607 driver_unregister(&mv643xx_eth_driver
);
1608 driver_unregister(&mv643xx_eth_shared_driver
);
1611 module_init(mv643xx_init_module
);
1612 module_exit(mv643xx_cleanup_module
);
1614 MODULE_LICENSE("GPL");
1615 MODULE_AUTHOR( "Rabeeh Khoury, Assaf Hoffman, Matthew Dharm, Manish Lachwani"
1616 " and Dale Farnsworth");
1617 MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX");
1620 * The second part is the low level driver of the gigE ethernet ports.
1624 * Marvell's Gigabit Ethernet controller low level driver
1627 * This file introduce low level API to Marvell's Gigabit Ethernet
1628 * controller. This Gigabit Ethernet Controller driver API controls
1629 * 1) Operations (i.e. port init, start, reset etc').
1630 * 2) Data flow (i.e. port send, receive etc').
1631 * Each Gigabit Ethernet port is controlled via
1632 * struct mv643xx_private.
1633 * This struct includes user configuration information as well as
1634 * driver internal data needed for its operations.
1636 * Supported Features:
1637 * - This low level driver is OS independent. Allocating memory for
1638 * the descriptor rings and buffers are not within the scope of
1640 * - The user is free from Rx/Tx queue managing.
1641 * - This low level driver introduce functionality API that enable
1642 * the to operate Marvell's Gigabit Ethernet Controller in a
1644 * - Simple Gigabit Ethernet port operation API.
1645 * - Simple Gigabit Ethernet port data flow API.
1646 * - Data flow and operation API support per queue functionality.
1647 * - Support cached descriptors for better performance.
1648 * - Enable access to all four DRAM banks and internal SRAM memory
1650 * - PHY access and control API.
1651 * - Port control register configuration API.
1652 * - Full control over Unicast and Multicast MAC configurations.
1656 * Initialization phase
1657 * This phase complete the initialization of the the
1658 * mv643xx_private struct.
1659 * User information regarding port configuration has to be set
1660 * prior to calling the port initialization routine.
1662 * In this phase any port Tx/Rx activity is halted, MIB counters
1663 * are cleared, PHY address is set according to user parameter and
1664 * access to DRAM and internal SRAM memory spaces.
1666 * Driver ring initialization
1667 * Allocating memory for the descriptor rings and buffers is not
1668 * within the scope of this driver. Thus, the user is required to
1669 * allocate memory for the descriptors ring and buffers. Those
1670 * memory parameters are used by the Rx and Tx ring initialization
1671 * routines in order to curve the descriptor linked list in a form
1673 * Note: Pay special attention to alignment issues when using
1674 * cached descriptors/buffers. In this phase the driver store
1675 * information in the mv643xx_private struct regarding each queue
1679 * This phase prepares the Ethernet port for Rx and Tx activity.
1680 * It uses the information stored in the mv643xx_private struct to
1681 * initialize the various port registers.
1684 * All packet references to/from the driver are done using
1686 * This struct is a unified struct used with Rx and Tx operations.
1687 * This way the user is not required to be familiar with neither
1688 * Tx nor Rx descriptors structures.
1689 * The driver's descriptors rings are management by indexes.
1690 * Those indexes controls the ring resources and used to indicate
1691 * a SW resource error:
1693 * This index points to the current available resource for use. For
1694 * example in Rx process this index will point to the descriptor
1695 * that will be passed to the user upon calling the receive
1696 * routine. In Tx process, this index will point to the descriptor
1697 * that will be assigned with the user packet info and transmitted.
1699 * This index points to the descriptor that need to restore its
1700 * resources. For example in Rx process, using the Rx buffer return
1701 * API will attach the buffer returned in packet info to the
1702 * descriptor pointed by 'used'. In Tx process, using the Tx
1703 * descriptor return will merely return the user packet info with
1704 * the command status of the transmitted buffer pointed by the
1705 * 'used' index. Nevertheless, it is essential to use this routine
1706 * to update the 'used' index.
1708 * This index supports Tx Scatter-Gather. It points to the first
1709 * descriptor of a packet assembled of multiple buffers. For
1710 * example when in middle of Such packet we have a Tx resource
1711 * error the 'curr' index get the value of 'first' to indicate
1712 * that the ring returned to its state before trying to transmit
1715 * Receive operation:
1716 * The eth_port_receive API set the packet information struct,
1717 * passed by the caller, with received information from the
1718 * 'current' SDMA descriptor.
1719 * It is the user responsibility to return this resource back
1720 * to the Rx descriptor ring to enable the reuse of this source.
1721 * Return Rx resource is done using the eth_rx_return_buff API.
1723 * Transmit operation:
1724 * The eth_port_send API supports Scatter-Gather which enables to
1725 * send a packet spanned over multiple buffers. This means that
1726 * for each packet info structure given by the user and put into
1727 * the Tx descriptors ring, will be transmitted only if the 'LAST'
1728 * bit will be set in the packet info command status field. This
1729 * API also consider restriction regarding buffer alignments and
1731 * The user must return a Tx resource after ensuring the buffer
1732 * has been transmitted to enable the Tx ring indexes to update.
1735 * This device is on-board. No jumper diagram is necessary.
1737 * EXTERNAL INTERFACE
1739 * Prior to calling the initialization routine eth_port_init() the user
1740 * must set the following fields under mv643xx_private struct:
1741 * port_num User Ethernet port number.
1742 * port_mac_addr[6] User defined port MAC address.
1743 * port_config User port configuration value.
1744 * port_config_extend User port config extend value.
1745 * port_sdma_config User port SDMA config value.
1746 * port_serial_control User port serial control value.
1748 * This driver data flow is done using the struct pkt_info which
1749 * is a unified struct for Rx and Tx operations:
1751 * byte_cnt Tx/Rx descriptor buffer byte count.
1752 * l4i_chk CPU provided TCP Checksum. For Tx operation
1754 * cmd_sts Tx/Rx descriptor command status.
1755 * buf_ptr Tx/Rx descriptor buffer pointer.
1756 * return_info Tx/Rx user resource return information.
1760 /* SDMA command macros */
1761 #define ETH_ENABLE_TX_QUEUE(eth_port) \
1762 mv_write(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(eth_port), 1)
1767 static int ethernet_phy_get(unsigned int eth_port_num
);
1768 static void ethernet_phy_set(unsigned int eth_port_num
, int phy_addr
);
1770 /* Ethernet Port routines */
1771 static int eth_port_uc_addr(unsigned int eth_port_num
, unsigned char uc_nibble
,
1775 * eth_port_init - Initialize the Ethernet port driver
1778 * This function prepares the ethernet port to start its activity:
1779 * 1) Completes the ethernet port driver struct initialization toward port
1781 * 2) Resets the device to a quiescent state in case of warm reboot.
1782 * 3) Enable SDMA access to all four DRAM banks as well as internal SRAM.
1783 * 4) Clean MAC tables. The reset status of those tables is unknown.
1784 * 5) Set PHY address.
1785 * Note: Call this routine prior to eth_port_start routine and after
1786 * setting user values in the user fields of Ethernet port control
1790 * struct mv643xx_private *mp Ethernet port control struct
1798 static void eth_port_init(struct mv643xx_private
*mp
)
1800 mp
->port_rx_queue_command
= 0;
1801 mp
->port_tx_queue_command
= 0;
1803 mp
->rx_resource_err
= 0;
1804 mp
->tx_resource_err
= 0;
1806 eth_port_reset(mp
->port_num
);
1808 eth_port_init_mac_tables(mp
->port_num
);
1810 ethernet_phy_reset(mp
->port_num
);
1814 * eth_port_start - Start the Ethernet port activity.
1817 * This routine prepares the Ethernet port for Rx and Tx activity:
1818 * 1. Initialize Tx and Rx Current Descriptor Pointer for each queue that
1819 * has been initialized a descriptor's ring (using
1820 * ether_init_tx_desc_ring for Tx and ether_init_rx_desc_ring for Rx)
1821 * 2. Initialize and enable the Ethernet configuration port by writing to
1822 * the port's configuration and command registers.
1823 * 3. Initialize and enable the SDMA by writing to the SDMA's
1824 * configuration and command registers. After completing these steps,
1825 * the ethernet port SDMA can starts to perform Rx and Tx activities.
1827 * Note: Each Rx and Tx queue descriptor's list must be initialized prior
1828 * to calling this function (use ether_init_tx_desc_ring for Tx queues
1829 * and ether_init_rx_desc_ring for Rx queues).
1832 * struct mv643xx_private *mp Ethernet port control struct
1835 * Ethernet port is ready to receive and transmit.
1840 static void eth_port_start(struct mv643xx_private
*mp
)
1842 unsigned int port_num
= mp
->port_num
;
1843 int tx_curr_desc
, rx_curr_desc
;
1845 /* Assignment of Tx CTRP of given queue */
1846 tx_curr_desc
= mp
->tx_curr_desc_q
;
1847 mv_write(MV643XX_ETH_TX_CURRENT_QUEUE_DESC_PTR_0(port_num
),
1848 (u32
)((struct eth_tx_desc
*)mp
->tx_desc_dma
+ tx_curr_desc
));
1850 /* Assignment of Rx CRDP of given queue */
1851 rx_curr_desc
= mp
->rx_curr_desc_q
;
1852 mv_write(MV643XX_ETH_RX_CURRENT_QUEUE_DESC_PTR_0(port_num
),
1853 (u32
)((struct eth_rx_desc
*)mp
->rx_desc_dma
+ rx_curr_desc
));
1855 /* Add the assigned Ethernet address to the port's address table */
1856 eth_port_uc_addr_set(port_num
, mp
->port_mac_addr
);
1858 /* Assign port configuration and command. */
1859 mv_write(MV643XX_ETH_PORT_CONFIG_REG(port_num
), mp
->port_config
);
1861 mv_write(MV643XX_ETH_PORT_CONFIG_EXTEND_REG(port_num
),
1862 mp
->port_config_extend
);
1865 /* Increase the Rx side buffer size if supporting GigE */
1866 if (mp
->port_serial_control
& MV643XX_ETH_SET_GMII_SPEED_TO_1000
)
1867 mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num
),
1868 (mp
->port_serial_control
& 0xfff1ffff) | (0x5 << 17));
1870 mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num
),
1871 mp
->port_serial_control
);
1873 mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num
),
1874 mv_read(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num
)) |
1875 MV643XX_ETH_SERIAL_PORT_ENABLE
);
1877 /* Assign port SDMA configuration */
1878 mv_write(MV643XX_ETH_SDMA_CONFIG_REG(port_num
),
1879 mp
->port_sdma_config
);
1881 /* Enable port Rx. */
1882 mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num
),
1883 mp
->port_rx_queue_command
);
1887 * eth_port_uc_addr_set - This function Set the port Unicast address.
1890 * This function Set the port Ethernet MAC address.
1893 * unsigned int eth_port_num Port number.
1894 * char * p_addr Address to be set
1897 * Set MAC address low and high registers. also calls eth_port_uc_addr()
1898 * To set the unicast table with the proper information.
1904 static void eth_port_uc_addr_set(unsigned int eth_port_num
,
1905 unsigned char *p_addr
)
1910 mac_l
= (p_addr
[4] << 8) | (p_addr
[5]);
1911 mac_h
= (p_addr
[0] << 24) | (p_addr
[1] << 16) | (p_addr
[2] << 8) |
1914 mv_write(MV643XX_ETH_MAC_ADDR_LOW(eth_port_num
), mac_l
);
1915 mv_write(MV643XX_ETH_MAC_ADDR_HIGH(eth_port_num
), mac_h
);
1917 /* Accept frames of this address */
1918 eth_port_uc_addr(eth_port_num
, p_addr
[5], ACCEPT_MAC_ADDR
);
1924 * eth_port_uc_addr_get - This function retrieves the port Unicast address
1925 * (MAC address) from the ethernet hw registers.
1928 * This function retrieves the port Ethernet MAC address.
1931 * unsigned int eth_port_num Port number.
1932 * char *MacAddr pointer where the MAC address is stored
1935 * Copy the MAC address to the location pointed to by MacAddr
1941 static void eth_port_uc_addr_get(struct net_device
*dev
, unsigned char *p_addr
)
1943 struct mv643xx_private
*mp
= netdev_priv(dev
);
1947 mac_h
= mv_read(MV643XX_ETH_MAC_ADDR_HIGH(mp
->port_num
));
1948 mac_l
= mv_read(MV643XX_ETH_MAC_ADDR_LOW(mp
->port_num
));
1950 p_addr
[0] = (mac_h
>> 24) & 0xff;
1951 p_addr
[1] = (mac_h
>> 16) & 0xff;
1952 p_addr
[2] = (mac_h
>> 8) & 0xff;
1953 p_addr
[3] = mac_h
& 0xff;
1954 p_addr
[4] = (mac_l
>> 8) & 0xff;
1955 p_addr
[5] = mac_l
& 0xff;
1959 * eth_port_uc_addr - This function Set the port unicast address table
1962 * This function locates the proper entry in the Unicast table for the
1963 * specified MAC nibble and sets its properties according to function
1967 * unsigned int eth_port_num Port number.
1968 * unsigned char uc_nibble Unicast MAC Address last nibble.
1969 * int option 0 = Add, 1 = remove address.
1972 * This function add/removes MAC addresses from the port unicast address
1976 * true is output succeeded.
1977 * false if option parameter is invalid.
1980 static int eth_port_uc_addr(unsigned int eth_port_num
, unsigned char uc_nibble
,
1983 unsigned int unicast_reg
;
1984 unsigned int tbl_offset
;
1985 unsigned int reg_offset
;
1987 /* Locate the Unicast table entry */
1988 uc_nibble
= (0xf & uc_nibble
);
1989 tbl_offset
= (uc_nibble
/ 4) * 4; /* Register offset from unicast table base */
1990 reg_offset
= uc_nibble
% 4; /* Entry offset within the above register */
1993 case REJECT_MAC_ADDR
:
1994 /* Clear accepts frame bit at given unicast DA table entry */
1995 unicast_reg
= mv_read((MV643XX_ETH_DA_FILTER_UNICAST_TABLE_BASE
1996 (eth_port_num
) + tbl_offset
));
1998 unicast_reg
&= (0x0E << (8 * reg_offset
));
2000 mv_write((MV643XX_ETH_DA_FILTER_UNICAST_TABLE_BASE
2001 (eth_port_num
) + tbl_offset
), unicast_reg
);
2004 case ACCEPT_MAC_ADDR
:
2005 /* Set accepts frame bit at unicast DA filter table entry */
2007 mv_read((MV643XX_ETH_DA_FILTER_UNICAST_TABLE_BASE
2008 (eth_port_num
) + tbl_offset
));
2010 unicast_reg
|= (0x01 << (8 * reg_offset
));
2012 mv_write((MV643XX_ETH_DA_FILTER_UNICAST_TABLE_BASE
2013 (eth_port_num
) + tbl_offset
), unicast_reg
);
2025 * eth_port_init_mac_tables - Clear all entrance in the UC, SMC and OMC tables
2028 * Go through all the DA filter tables (Unicast, Special Multicast &
2029 * Other Multicast) and set each entry to 0.
2032 * unsigned int eth_port_num Ethernet Port number.
2035 * Multicast and Unicast packets are rejected.
2040 static void eth_port_init_mac_tables(unsigned int eth_port_num
)
2044 /* Clear DA filter unicast table (Ex_dFUT) */
2045 for (table_index
= 0; table_index
<= 0xC; table_index
+= 4)
2046 mv_write((MV643XX_ETH_DA_FILTER_UNICAST_TABLE_BASE
2047 (eth_port_num
) + table_index
), 0);
2049 for (table_index
= 0; table_index
<= 0xFC; table_index
+= 4) {
2050 /* Clear DA filter special multicast table (Ex_dFSMT) */
2051 mv_write((MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE
2052 (eth_port_num
) + table_index
), 0);
2053 /* Clear DA filter other multicast table (Ex_dFOMT) */
2054 mv_write((MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE
2055 (eth_port_num
) + table_index
), 0);
2060 * eth_clear_mib_counters - Clear all MIB counters
2063 * This function clears all MIB counters of a specific ethernet port.
2064 * A read from the MIB counter will reset the counter.
2067 * unsigned int eth_port_num Ethernet Port number.
2070 * After reading all MIB counters, the counters resets.
2073 * MIB counter value.
2076 static void eth_clear_mib_counters(unsigned int eth_port_num
)
2080 /* Perform dummy reads from MIB counters */
2081 for (i
= ETH_MIB_GOOD_OCTETS_RECEIVED_LOW
; i
< ETH_MIB_LATE_COLLISION
;
2083 mv_read(MV643XX_ETH_MIB_COUNTERS_BASE(eth_port_num
) + i
);
2086 static inline u32
read_mib(struct mv643xx_private
*mp
, int offset
)
2088 return mv_read(MV643XX_ETH_MIB_COUNTERS_BASE(mp
->port_num
) + offset
);
2091 static void eth_update_mib_counters(struct mv643xx_private
*mp
)
2093 struct mv643xx_mib_counters
*p
= &mp
->mib_counters
;
2096 p
->good_octets_received
+=
2097 read_mib(mp
, ETH_MIB_GOOD_OCTETS_RECEIVED_LOW
);
2098 p
->good_octets_received
+=
2099 (u64
)read_mib(mp
, ETH_MIB_GOOD_OCTETS_RECEIVED_HIGH
) << 32;
2101 for (offset
= ETH_MIB_BAD_OCTETS_RECEIVED
;
2102 offset
<= ETH_MIB_FRAMES_1024_TO_MAX_OCTETS
;
2104 *(u32
*)((char *)p
+ offset
) = read_mib(mp
, offset
);
2106 p
->good_octets_sent
+= read_mib(mp
, ETH_MIB_GOOD_OCTETS_SENT_LOW
);
2107 p
->good_octets_sent
+=
2108 (u64
)read_mib(mp
, ETH_MIB_GOOD_OCTETS_SENT_HIGH
) << 32;
2110 for (offset
= ETH_MIB_GOOD_FRAMES_SENT
;
2111 offset
<= ETH_MIB_LATE_COLLISION
;
2113 *(u32
*)((char *)p
+ offset
) = read_mib(mp
, offset
);
2117 * ethernet_phy_detect - Detect whether a phy is present
2120 * This function tests whether there is a PHY present on
2121 * the specified port.
2124 * unsigned int eth_port_num Ethernet Port number.
2131 * -ENODEV on failure
2134 static int ethernet_phy_detect(unsigned int port_num
)
2136 unsigned int phy_reg_data0
;
2139 eth_port_read_smi_reg(port_num
, 0, &phy_reg_data0
);
2140 auto_neg
= phy_reg_data0
& 0x1000;
2141 phy_reg_data0
^= 0x1000; /* invert auto_neg */
2142 eth_port_write_smi_reg(port_num
, 0, phy_reg_data0
);
2144 eth_port_read_smi_reg(port_num
, 0, &phy_reg_data0
);
2145 if ((phy_reg_data0
& 0x1000) == auto_neg
)
2146 return -ENODEV
; /* change didn't take */
2148 phy_reg_data0
^= 0x1000;
2149 eth_port_write_smi_reg(port_num
, 0, phy_reg_data0
);
2154 * ethernet_phy_get - Get the ethernet port PHY address.
2157 * This routine returns the given ethernet port PHY address.
2160 * unsigned int eth_port_num Ethernet Port number.
2169 static int ethernet_phy_get(unsigned int eth_port_num
)
2171 unsigned int reg_data
;
2173 reg_data
= mv_read(MV643XX_ETH_PHY_ADDR_REG
);
2175 return ((reg_data
>> (5 * eth_port_num
)) & 0x1f);
2179 * ethernet_phy_set - Set the ethernet port PHY address.
2182 * This routine sets the given ethernet port PHY address.
2185 * unsigned int eth_port_num Ethernet Port number.
2186 * int phy_addr PHY address.
2195 static void ethernet_phy_set(unsigned int eth_port_num
, int phy_addr
)
2198 int addr_shift
= 5 * eth_port_num
;
2200 reg_data
= mv_read(MV643XX_ETH_PHY_ADDR_REG
);
2201 reg_data
&= ~(0x1f << addr_shift
);
2202 reg_data
|= (phy_addr
& 0x1f) << addr_shift
;
2203 mv_write(MV643XX_ETH_PHY_ADDR_REG
, reg_data
);
2207 * ethernet_phy_reset - Reset Ethernet port PHY.
2210 * This routine utilizes the SMI interface to reset the ethernet port PHY.
2213 * unsigned int eth_port_num Ethernet Port number.
2222 static void ethernet_phy_reset(unsigned int eth_port_num
)
2224 unsigned int phy_reg_data
;
2227 eth_port_read_smi_reg(eth_port_num
, 0, &phy_reg_data
);
2228 phy_reg_data
|= 0x8000; /* Set bit 15 to reset the PHY */
2229 eth_port_write_smi_reg(eth_port_num
, 0, phy_reg_data
);
2233 * eth_port_reset - Reset Ethernet port
2236 * This routine resets the chip by aborting any SDMA engine activity and
2237 * clearing the MIB counters. The Receiver and the Transmit unit are in
2238 * idle state after this command is performed and the port is disabled.
2241 * unsigned int eth_port_num Ethernet Port number.
2244 * Channel activity is halted.
2250 static void eth_port_reset(unsigned int port_num
)
2252 unsigned int reg_data
;
2254 /* Stop Tx port activity. Check port Tx activity. */
2255 reg_data
= mv_read(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num
));
2257 if (reg_data
& 0xFF) {
2258 /* Issue stop command for active channels only */
2259 mv_write(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num
),
2262 /* Wait for all Tx activity to terminate. */
2263 /* Check port cause register that all Tx queues are stopped */
2264 while (mv_read(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num
))
2269 /* Stop Rx port activity. Check port Rx activity. */
2270 reg_data
= mv_read(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num
));
2272 if (reg_data
& 0xFF) {
2273 /* Issue stop command for active channels only */
2274 mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num
),
2277 /* Wait for all Rx activity to terminate. */
2278 /* Check port cause register that all Rx queues are stopped */
2279 while (mv_read(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num
))
2284 /* Clear all MIB counters */
2285 eth_clear_mib_counters(port_num
);
2287 /* Reset the Enable bit in the Configuration Register */
2288 reg_data
= mv_read(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num
));
2289 reg_data
&= ~MV643XX_ETH_SERIAL_PORT_ENABLE
;
2290 mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num
), reg_data
);
2294 * ethernet_set_config_reg - Set specified bits in configuration register.
2297 * This function sets specified bits in the given ethernet
2298 * configuration register.
2301 * unsigned int eth_port_num Ethernet Port number.
2302 * unsigned int value 32 bit value.
2305 * The set bits in the value parameter are set in the configuration
2312 static void ethernet_set_config_reg(unsigned int eth_port_num
,
2315 unsigned int eth_config_reg
;
2317 eth_config_reg
= mv_read(MV643XX_ETH_PORT_CONFIG_REG(eth_port_num
));
2318 eth_config_reg
|= value
;
2319 mv_write(MV643XX_ETH_PORT_CONFIG_REG(eth_port_num
), eth_config_reg
);
2322 static int eth_port_autoneg_supported(unsigned int eth_port_num
)
2324 unsigned int phy_reg_data0
;
2326 eth_port_read_smi_reg(eth_port_num
, 0, &phy_reg_data0
);
2328 return phy_reg_data0
& 0x1000;
2331 static int eth_port_link_is_up(unsigned int eth_port_num
)
2333 unsigned int phy_reg_data1
;
2335 eth_port_read_smi_reg(eth_port_num
, 1, &phy_reg_data1
);
2337 if (eth_port_autoneg_supported(eth_port_num
)) {
2338 if (phy_reg_data1
& 0x20) /* auto-neg complete */
2340 } else if (phy_reg_data1
& 0x4) /* link up */
2347 * ethernet_get_config_reg - Get the port configuration register
2350 * This function returns the configuration register value of the given
2354 * unsigned int eth_port_num Ethernet Port number.
2360 * Port configuration register value.
2362 static unsigned int ethernet_get_config_reg(unsigned int eth_port_num
)
2364 unsigned int eth_config_reg
;
2366 eth_config_reg
= mv_read(MV643XX_ETH_PORT_CONFIG_EXTEND_REG
2368 return eth_config_reg
;
2372 * eth_port_read_smi_reg - Read PHY registers
2375 * This routine utilize the SMI interface to interact with the PHY in
2376 * order to perform PHY register read.
2379 * unsigned int port_num Ethernet Port number.
2380 * unsigned int phy_reg PHY register address offset.
2381 * unsigned int *value Register value buffer.
2384 * Write the value of a specified PHY register into given buffer.
2387 * false if the PHY is busy or read data is not in valid state.
2391 static void eth_port_read_smi_reg(unsigned int port_num
,
2392 unsigned int phy_reg
, unsigned int *value
)
2394 int phy_addr
= ethernet_phy_get(port_num
);
2395 unsigned long flags
;
2398 /* the SMI register is a shared resource */
2399 spin_lock_irqsave(&mv643xx_eth_phy_lock
, flags
);
2401 /* wait for the SMI register to become available */
2402 for (i
= 0; mv_read(MV643XX_ETH_SMI_REG
) & ETH_SMI_BUSY
; i
++) {
2403 if (i
== PHY_WAIT_ITERATIONS
) {
2404 printk("mv643xx PHY busy timeout, port %d\n", port_num
);
2407 udelay(PHY_WAIT_MICRO_SECONDS
);
2410 mv_write(MV643XX_ETH_SMI_REG
,
2411 (phy_addr
<< 16) | (phy_reg
<< 21) | ETH_SMI_OPCODE_READ
);
2413 /* now wait for the data to be valid */
2414 for (i
= 0; !(mv_read(MV643XX_ETH_SMI_REG
) & ETH_SMI_READ_VALID
); i
++) {
2415 if (i
== PHY_WAIT_ITERATIONS
) {
2416 printk("mv643xx PHY read timeout, port %d\n", port_num
);
2419 udelay(PHY_WAIT_MICRO_SECONDS
);
2422 *value
= mv_read(MV643XX_ETH_SMI_REG
) & 0xffff;
2424 spin_unlock_irqrestore(&mv643xx_eth_phy_lock
, flags
);
2428 * eth_port_write_smi_reg - Write to PHY registers
2431 * This routine utilize the SMI interface to interact with the PHY in
2432 * order to perform writes to PHY registers.
2435 * unsigned int eth_port_num Ethernet Port number.
2436 * unsigned int phy_reg PHY register address offset.
2437 * unsigned int value Register value.
2440 * Write the given value to the specified PHY register.
2443 * false if the PHY is busy.
2447 static void eth_port_write_smi_reg(unsigned int eth_port_num
,
2448 unsigned int phy_reg
, unsigned int value
)
2452 unsigned long flags
;
2454 phy_addr
= ethernet_phy_get(eth_port_num
);
2456 /* the SMI register is a shared resource */
2457 spin_lock_irqsave(&mv643xx_eth_phy_lock
, flags
);
2459 /* wait for the SMI register to become available */
2460 for (i
= 0; mv_read(MV643XX_ETH_SMI_REG
) & ETH_SMI_BUSY
; i
++) {
2461 if (i
== PHY_WAIT_ITERATIONS
) {
2462 printk("mv643xx PHY busy timeout, port %d\n",
2466 udelay(PHY_WAIT_MICRO_SECONDS
);
2469 mv_write(MV643XX_ETH_SMI_REG
, (phy_addr
<< 16) | (phy_reg
<< 21) |
2470 ETH_SMI_OPCODE_WRITE
| (value
& 0xffff));
2472 spin_unlock_irqrestore(&mv643xx_eth_phy_lock
, flags
);
2476 * eth_port_send - Send an Ethernet packet
2479 * This routine send a given packet described by p_pktinfo parameter. It
2480 * supports transmitting of a packet spaned over multiple buffers. The
2481 * routine updates 'curr' and 'first' indexes according to the packet
2482 * segment passed to the routine. In case the packet segment is first,
2483 * the 'first' index is update. In any case, the 'curr' index is updated.
2484 * If the routine get into Tx resource error it assigns 'curr' index as
2485 * 'first'. This way the function can abort Tx process of multiple
2486 * descriptors per packet.
2489 * struct mv643xx_private *mp Ethernet Port Control srtuct.
2490 * struct pkt_info *p_pkt_info User packet buffer.
2493 * Tx ring 'curr' and 'first' indexes are updated.
2496 * ETH_QUEUE_FULL in case of Tx resource error.
2497 * ETH_ERROR in case the routine can not access Tx desc ring.
2498 * ETH_QUEUE_LAST_RESOURCE if the routine uses the last Tx resource.
2502 #ifdef MV643XX_CHECKSUM_OFFLOAD_TX
2504 * Modified to include the first descriptor pointer in case of SG
2506 static ETH_FUNC_RET_STATUS
eth_port_send(struct mv643xx_private
*mp
,
2507 struct pkt_info
*p_pkt_info
)
2509 int tx_desc_curr
, tx_desc_used
, tx_first_desc
, tx_next_desc
;
2510 struct eth_tx_desc
*current_descriptor
;
2511 struct eth_tx_desc
*first_descriptor
;
2514 /* Do not process Tx ring in case of Tx ring resource error */
2515 if (mp
->tx_resource_err
)
2516 return ETH_QUEUE_FULL
;
2519 * The hardware requires that each buffer that is <= 8 bytes
2520 * in length must be aligned on an 8 byte boundary.
2522 if (p_pkt_info
->byte_cnt
<= 8 && p_pkt_info
->buf_ptr
& 0x7) {
2524 "mv643xx_eth port %d: packet size <= 8 problem\n",
2529 /* Get the Tx Desc ring indexes */
2530 tx_desc_curr
= mp
->tx_curr_desc_q
;
2531 tx_desc_used
= mp
->tx_used_desc_q
;
2533 current_descriptor
= &mp
->p_tx_desc_area
[tx_desc_curr
];
2535 tx_next_desc
= (tx_desc_curr
+ 1) % mp
->tx_ring_size
;
2537 current_descriptor
->buf_ptr
= p_pkt_info
->buf_ptr
;
2538 current_descriptor
->byte_cnt
= p_pkt_info
->byte_cnt
;
2539 current_descriptor
->l4i_chk
= p_pkt_info
->l4i_chk
;
2540 mp
->tx_skb
[tx_desc_curr
] = p_pkt_info
->return_info
;
2542 command
= p_pkt_info
->cmd_sts
| ETH_ZERO_PADDING
| ETH_GEN_CRC
|
2543 ETH_BUFFER_OWNED_BY_DMA
;
2544 if (command
& ETH_TX_FIRST_DESC
) {
2545 tx_first_desc
= tx_desc_curr
;
2546 mp
->tx_first_desc_q
= tx_first_desc
;
2547 first_descriptor
= current_descriptor
;
2548 mp
->tx_first_command
= command
;
2550 tx_first_desc
= mp
->tx_first_desc_q
;
2551 first_descriptor
= &mp
->p_tx_desc_area
[tx_first_desc
];
2552 BUG_ON(first_descriptor
== NULL
);
2553 current_descriptor
->cmd_sts
= command
;
2556 if (command
& ETH_TX_LAST_DESC
) {
2558 first_descriptor
->cmd_sts
= mp
->tx_first_command
;
2561 ETH_ENABLE_TX_QUEUE(mp
->port_num
);
2564 * Finish Tx packet. Update first desc in case of Tx resource
2566 tx_first_desc
= tx_next_desc
;
2567 mp
->tx_first_desc_q
= tx_first_desc
;
2570 /* Check for ring index overlap in the Tx desc ring */
2571 if (tx_next_desc
== tx_desc_used
) {
2572 mp
->tx_resource_err
= 1;
2573 mp
->tx_curr_desc_q
= tx_first_desc
;
2575 return ETH_QUEUE_LAST_RESOURCE
;
2578 mp
->tx_curr_desc_q
= tx_next_desc
;
2583 static ETH_FUNC_RET_STATUS
eth_port_send(struct mv643xx_private
*mp
,
2584 struct pkt_info
*p_pkt_info
)
2588 struct eth_tx_desc
*current_descriptor
;
2589 unsigned int command_status
;
2591 /* Do not process Tx ring in case of Tx ring resource error */
2592 if (mp
->tx_resource_err
)
2593 return ETH_QUEUE_FULL
;
2595 /* Get the Tx Desc ring indexes */
2596 tx_desc_curr
= mp
->tx_curr_desc_q
;
2597 tx_desc_used
= mp
->tx_used_desc_q
;
2598 current_descriptor
= &mp
->p_tx_desc_area
[tx_desc_curr
];
2600 command_status
= p_pkt_info
->cmd_sts
| ETH_ZERO_PADDING
| ETH_GEN_CRC
;
2601 current_descriptor
->buf_ptr
= p_pkt_info
->buf_ptr
;
2602 current_descriptor
->byte_cnt
= p_pkt_info
->byte_cnt
;
2603 mp
->tx_skb
[tx_desc_curr
] = p_pkt_info
->return_info
;
2605 /* Set last desc with DMA ownership and interrupt enable. */
2607 current_descriptor
->cmd_sts
= command_status
|
2608 ETH_BUFFER_OWNED_BY_DMA
| ETH_TX_ENABLE_INTERRUPT
;
2611 ETH_ENABLE_TX_QUEUE(mp
->port_num
);
2613 /* Finish Tx packet. Update first desc in case of Tx resource error */
2614 tx_desc_curr
= (tx_desc_curr
+ 1) % mp
->tx_ring_size
;
2616 /* Update the current descriptor */
2617 mp
->tx_curr_desc_q
= tx_desc_curr
;
2619 /* Check for ring index overlap in the Tx desc ring */
2620 if (tx_desc_curr
== tx_desc_used
) {
2621 mp
->tx_resource_err
= 1;
2622 return ETH_QUEUE_LAST_RESOURCE
;
2630 * eth_tx_return_desc - Free all used Tx descriptors
2633 * This routine returns the transmitted packet information to the caller.
2634 * It uses the 'first' index to support Tx desc return in case a transmit
2635 * of a packet spanned over multiple buffer still in process.
2636 * In case the Tx queue was in "resource error" condition, where there are
2637 * no available Tx resources, the function resets the resource error flag.
2640 * struct mv643xx_private *mp Ethernet Port Control srtuct.
2641 * struct pkt_info *p_pkt_info User packet buffer.
2644 * Tx ring 'first' and 'used' indexes are updated.
2647 * ETH_ERROR in case the routine can not access Tx desc ring.
2648 * ETH_RETRY in case there is transmission in process.
2649 * ETH_END_OF_JOB if the routine has nothing to release.
2653 static ETH_FUNC_RET_STATUS
eth_tx_return_desc(struct mv643xx_private
*mp
,
2654 struct pkt_info
*p_pkt_info
)
2657 #ifdef MV643XX_CHECKSUM_OFFLOAD_TX
2658 int tx_busy_desc
= mp
->tx_first_desc_q
;
2660 int tx_busy_desc
= mp
->tx_curr_desc_q
;
2662 struct eth_tx_desc
*p_tx_desc_used
;
2663 unsigned int command_status
;
2665 /* Get the Tx Desc ring indexes */
2666 tx_desc_used
= mp
->tx_used_desc_q
;
2668 p_tx_desc_used
= &mp
->p_tx_desc_area
[tx_desc_used
];
2671 if (p_tx_desc_used
== NULL
)
2674 /* Stop release. About to overlap the current available Tx descriptor */
2675 if (tx_desc_used
== tx_busy_desc
&& !mp
->tx_resource_err
)
2676 return ETH_END_OF_JOB
;
2678 command_status
= p_tx_desc_used
->cmd_sts
;
2680 /* Still transmitting... */
2681 if (command_status
& (ETH_BUFFER_OWNED_BY_DMA
))
2684 /* Pass the packet information to the caller */
2685 p_pkt_info
->cmd_sts
= command_status
;
2686 p_pkt_info
->return_info
= mp
->tx_skb
[tx_desc_used
];
2687 mp
->tx_skb
[tx_desc_used
] = NULL
;
2689 /* Update the next descriptor to release. */
2690 mp
->tx_used_desc_q
= (tx_desc_used
+ 1) % mp
->tx_ring_size
;
2692 /* Any Tx return cancels the Tx resource error status */
2693 mp
->tx_resource_err
= 0;
2699 * eth_port_receive - Get received information from Rx ring.
2702 * This routine returns the received data to the caller. There is no
2703 * data copying during routine operation. All information is returned
2704 * using pointer to packet information struct passed from the caller.
2705 * If the routine exhausts Rx ring resources then the resource error flag
2709 * struct mv643xx_private *mp Ethernet Port Control srtuct.
2710 * struct pkt_info *p_pkt_info User packet buffer.
2713 * Rx ring current and used indexes are updated.
2716 * ETH_ERROR in case the routine can not access Rx desc ring.
2717 * ETH_QUEUE_FULL if Rx ring resources are exhausted.
2718 * ETH_END_OF_JOB if there is no received data.
2721 static ETH_FUNC_RET_STATUS
eth_port_receive(struct mv643xx_private
*mp
,
2722 struct pkt_info
*p_pkt_info
)
2724 int rx_next_curr_desc
, rx_curr_desc
, rx_used_desc
;
2725 volatile struct eth_rx_desc
*p_rx_desc
;
2726 unsigned int command_status
;
2728 /* Do not process Rx ring in case of Rx ring resource error */
2729 if (mp
->rx_resource_err
)
2730 return ETH_QUEUE_FULL
;
2732 /* Get the Rx Desc ring 'curr and 'used' indexes */
2733 rx_curr_desc
= mp
->rx_curr_desc_q
;
2734 rx_used_desc
= mp
->rx_used_desc_q
;
2736 p_rx_desc
= &mp
->p_rx_desc_area
[rx_curr_desc
];
2738 /* The following parameters are used to save readings from memory */
2739 command_status
= p_rx_desc
->cmd_sts
;
2742 /* Nothing to receive... */
2743 if (command_status
& (ETH_BUFFER_OWNED_BY_DMA
))
2744 return ETH_END_OF_JOB
;
2746 p_pkt_info
->byte_cnt
= (p_rx_desc
->byte_cnt
) - RX_BUF_OFFSET
;
2747 p_pkt_info
->cmd_sts
= command_status
;
2748 p_pkt_info
->buf_ptr
= (p_rx_desc
->buf_ptr
) + RX_BUF_OFFSET
;
2749 p_pkt_info
->return_info
= mp
->rx_skb
[rx_curr_desc
];
2750 p_pkt_info
->l4i_chk
= p_rx_desc
->buf_size
;
2752 /* Clean the return info field to indicate that the packet has been */
2753 /* moved to the upper layers */
2754 mp
->rx_skb
[rx_curr_desc
] = NULL
;
2756 /* Update current index in data structure */
2757 rx_next_curr_desc
= (rx_curr_desc
+ 1) % mp
->rx_ring_size
;
2758 mp
->rx_curr_desc_q
= rx_next_curr_desc
;
2760 /* Rx descriptors exhausted. Set the Rx ring resource error flag */
2761 if (rx_next_curr_desc
== rx_used_desc
)
2762 mp
->rx_resource_err
= 1;
2768 * eth_rx_return_buff - Returns a Rx buffer back to the Rx ring.
2771 * This routine returns a Rx buffer back to the Rx ring. It retrieves the
2772 * next 'used' descriptor and attached the returned buffer to it.
2773 * In case the Rx ring was in "resource error" condition, where there are
2774 * no available Rx resources, the function resets the resource error flag.
2777 * struct mv643xx_private *mp Ethernet Port Control srtuct.
2778 * struct pkt_info *p_pkt_info Information on returned buffer.
2781 * New available Rx resource in Rx descriptor ring.
2784 * ETH_ERROR in case the routine can not access Rx desc ring.
2787 static ETH_FUNC_RET_STATUS
eth_rx_return_buff(struct mv643xx_private
*mp
,
2788 struct pkt_info
*p_pkt_info
)
2790 int used_rx_desc
; /* Where to return Rx resource */
2791 volatile struct eth_rx_desc
*p_used_rx_desc
;
2793 /* Get 'used' Rx descriptor */
2794 used_rx_desc
= mp
->rx_used_desc_q
;
2795 p_used_rx_desc
= &mp
->p_rx_desc_area
[used_rx_desc
];
2797 p_used_rx_desc
->buf_ptr
= p_pkt_info
->buf_ptr
;
2798 p_used_rx_desc
->buf_size
= p_pkt_info
->byte_cnt
;
2799 mp
->rx_skb
[used_rx_desc
] = p_pkt_info
->return_info
;
2801 /* Flush the write pipe */
2803 /* Return the descriptor to DMA ownership */
2805 p_used_rx_desc
->cmd_sts
=
2806 ETH_BUFFER_OWNED_BY_DMA
| ETH_RX_ENABLE_INTERRUPT
;
2809 /* Move the used descriptor pointer to the next descriptor */
2810 mp
->rx_used_desc_q
= (used_rx_desc
+ 1) % mp
->rx_ring_size
;
2812 /* Any Rx return cancels the Rx resource error status */
2813 mp
->rx_resource_err
= 0;
2818 /************* Begin ethtool support *************************/
2820 struct mv643xx_stats
{
2821 char stat_string
[ETH_GSTRING_LEN
];
2826 #define MV643XX_STAT(m) sizeof(((struct mv643xx_private *)0)->m), \
2827 offsetof(struct mv643xx_private, m)
2829 static const struct mv643xx_stats mv643xx_gstrings_stats
[] = {
2830 { "rx_packets", MV643XX_STAT(stats
.rx_packets
) },
2831 { "tx_packets", MV643XX_STAT(stats
.tx_packets
) },
2832 { "rx_bytes", MV643XX_STAT(stats
.rx_bytes
) },
2833 { "tx_bytes", MV643XX_STAT(stats
.tx_bytes
) },
2834 { "rx_errors", MV643XX_STAT(stats
.rx_errors
) },
2835 { "tx_errors", MV643XX_STAT(stats
.tx_errors
) },
2836 { "rx_dropped", MV643XX_STAT(stats
.rx_dropped
) },
2837 { "tx_dropped", MV643XX_STAT(stats
.tx_dropped
) },
2838 { "good_octets_received", MV643XX_STAT(mib_counters
.good_octets_received
) },
2839 { "bad_octets_received", MV643XX_STAT(mib_counters
.bad_octets_received
) },
2840 { "internal_mac_transmit_err", MV643XX_STAT(mib_counters
.internal_mac_transmit_err
) },
2841 { "good_frames_received", MV643XX_STAT(mib_counters
.good_frames_received
) },
2842 { "bad_frames_received", MV643XX_STAT(mib_counters
.bad_frames_received
) },
2843 { "broadcast_frames_received", MV643XX_STAT(mib_counters
.broadcast_frames_received
) },
2844 { "multicast_frames_received", MV643XX_STAT(mib_counters
.multicast_frames_received
) },
2845 { "frames_64_octets", MV643XX_STAT(mib_counters
.frames_64_octets
) },
2846 { "frames_65_to_127_octets", MV643XX_STAT(mib_counters
.frames_65_to_127_octets
) },
2847 { "frames_128_to_255_octets", MV643XX_STAT(mib_counters
.frames_128_to_255_octets
) },
2848 { "frames_256_to_511_octets", MV643XX_STAT(mib_counters
.frames_256_to_511_octets
) },
2849 { "frames_512_to_1023_octets", MV643XX_STAT(mib_counters
.frames_512_to_1023_octets
) },
2850 { "frames_1024_to_max_octets", MV643XX_STAT(mib_counters
.frames_1024_to_max_octets
) },
2851 { "good_octets_sent", MV643XX_STAT(mib_counters
.good_octets_sent
) },
2852 { "good_frames_sent", MV643XX_STAT(mib_counters
.good_frames_sent
) },
2853 { "excessive_collision", MV643XX_STAT(mib_counters
.excessive_collision
) },
2854 { "multicast_frames_sent", MV643XX_STAT(mib_counters
.multicast_frames_sent
) },
2855 { "broadcast_frames_sent", MV643XX_STAT(mib_counters
.broadcast_frames_sent
) },
2856 { "unrec_mac_control_received", MV643XX_STAT(mib_counters
.unrec_mac_control_received
) },
2857 { "fc_sent", MV643XX_STAT(mib_counters
.fc_sent
) },
2858 { "good_fc_received", MV643XX_STAT(mib_counters
.good_fc_received
) },
2859 { "bad_fc_received", MV643XX_STAT(mib_counters
.bad_fc_received
) },
2860 { "undersize_received", MV643XX_STAT(mib_counters
.undersize_received
) },
2861 { "fragments_received", MV643XX_STAT(mib_counters
.fragments_received
) },
2862 { "oversize_received", MV643XX_STAT(mib_counters
.oversize_received
) },
2863 { "jabber_received", MV643XX_STAT(mib_counters
.jabber_received
) },
2864 { "mac_receive_error", MV643XX_STAT(mib_counters
.mac_receive_error
) },
2865 { "bad_crc_event", MV643XX_STAT(mib_counters
.bad_crc_event
) },
2866 { "collision", MV643XX_STAT(mib_counters
.collision
) },
2867 { "late_collision", MV643XX_STAT(mib_counters
.late_collision
) },
2870 #define MV643XX_STATS_LEN \
2871 sizeof(mv643xx_gstrings_stats) / sizeof(struct mv643xx_stats)
2874 mv643xx_get_settings(struct net_device
*netdev
, struct ethtool_cmd
*ecmd
)
2876 struct mv643xx_private
*mp
= netdev
->priv
;
2877 int port_num
= mp
->port_num
;
2878 int autoneg
= eth_port_autoneg_supported(port_num
);
2881 int half_duplex
= 0;
2882 int full_duplex
= 0;
2888 u32 pcs
= mv_read(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num
));
2889 u32 psr
= mv_read(MV643XX_ETH_PORT_STATUS_REG(port_num
));
2891 mode_10_bit
= psr
& MV643XX_ETH_PORT_STATUS_MODE_10_BIT
;
2894 ecmd
->supported
= SUPPORTED_10baseT_Half
;
2896 ecmd
->supported
= (SUPPORTED_10baseT_Half
|
2897 SUPPORTED_10baseT_Full
|
2898 SUPPORTED_100baseT_Half
|
2899 SUPPORTED_100baseT_Full
|
2900 SUPPORTED_1000baseT_Full
|
2901 (autoneg
? SUPPORTED_Autoneg
: 0) |
2904 auto_duplex
= !(pcs
& MV643XX_ETH_DISABLE_AUTO_NEG_FOR_DUPLX
);
2905 auto_speed
= !(pcs
& MV643XX_ETH_DISABLE_AUTO_NEG_SPEED_GMII
);
2907 ecmd
->advertising
= ADVERTISED_TP
;
2910 ecmd
->advertising
|= ADVERTISED_Autoneg
;
2916 if (pcs
& MV643XX_ETH_SET_FULL_DUPLEX_MODE
)
2927 if (pcs
& MV643XX_ETH_SET_GMII_SPEED_TO_1000
)
2929 else if (pcs
& MV643XX_ETH_SET_MII_SPEED_TO_100
)
2935 if (speed_10
& half_duplex
)
2936 ecmd
->advertising
|= ADVERTISED_10baseT_Half
;
2937 if (speed_10
& full_duplex
)
2938 ecmd
->advertising
|= ADVERTISED_10baseT_Full
;
2939 if (speed_100
& half_duplex
)
2940 ecmd
->advertising
|= ADVERTISED_100baseT_Half
;
2941 if (speed_100
& full_duplex
)
2942 ecmd
->advertising
|= ADVERTISED_100baseT_Full
;
2944 ecmd
->advertising
|= ADVERTISED_1000baseT_Full
;
2948 ecmd
->port
= PORT_TP
;
2949 ecmd
->phy_address
= ethernet_phy_get(port_num
);
2951 ecmd
->transceiver
= XCVR_EXTERNAL
;
2953 if (netif_carrier_ok(netdev
)) {
2955 ecmd
->speed
= SPEED_10
;
2957 if (psr
& MV643XX_ETH_PORT_STATUS_GMII_1000
)
2958 ecmd
->speed
= SPEED_1000
;
2959 else if (psr
& MV643XX_ETH_PORT_STATUS_MII_100
)
2960 ecmd
->speed
= SPEED_100
;
2962 ecmd
->speed
= SPEED_10
;
2965 if (psr
& MV643XX_ETH_PORT_STATUS_FULL_DUPLEX
)
2966 ecmd
->duplex
= DUPLEX_FULL
;
2968 ecmd
->duplex
= DUPLEX_HALF
;
2974 ecmd
->autoneg
= autoneg
? AUTONEG_ENABLE
: AUTONEG_DISABLE
;
2979 mv643xx_get_drvinfo(struct net_device
*netdev
,
2980 struct ethtool_drvinfo
*drvinfo
)
2982 strncpy(drvinfo
->driver
, mv643xx_driver_name
, 32);
2983 strncpy(drvinfo
->version
, mv643xx_driver_version
, 32);
2984 strncpy(drvinfo
->fw_version
, "N/A", 32);
2985 strncpy(drvinfo
->bus_info
, "mv643xx", 32);
2986 drvinfo
->n_stats
= MV643XX_STATS_LEN
;
2990 mv643xx_get_stats_count(struct net_device
*netdev
)
2992 return MV643XX_STATS_LEN
;
2996 mv643xx_get_ethtool_stats(struct net_device
*netdev
,
2997 struct ethtool_stats
*stats
, uint64_t *data
)
2999 struct mv643xx_private
*mp
= netdev
->priv
;
3002 eth_update_mib_counters(mp
);
3004 for(i
= 0; i
< MV643XX_STATS_LEN
; i
++) {
3005 char *p
= (char *)mp
+mv643xx_gstrings_stats
[i
].stat_offset
;
3006 data
[i
] = (mv643xx_gstrings_stats
[i
].sizeof_stat
==
3007 sizeof(uint64_t)) ? *(uint64_t *)p
: *(uint32_t *)p
;
3012 mv643xx_get_strings(struct net_device
*netdev
, uint32_t stringset
, uint8_t *data
)
3018 for (i
=0; i
< MV643XX_STATS_LEN
; i
++) {
3019 memcpy(data
+ i
* ETH_GSTRING_LEN
,
3020 mv643xx_gstrings_stats
[i
].stat_string
,
3027 static struct ethtool_ops mv643xx_ethtool_ops
= {
3028 .get_settings
= mv643xx_get_settings
,
3029 .get_drvinfo
= mv643xx_get_drvinfo
,
3030 .get_link
= ethtool_op_get_link
,
3031 .get_sg
= ethtool_op_get_sg
,
3032 .set_sg
= ethtool_op_set_sg
,
3033 .get_strings
= mv643xx_get_strings
,
3034 .get_stats_count
= mv643xx_get_stats_count
,
3035 .get_ethtool_stats
= mv643xx_get_ethtool_stats
,
3038 /************* End ethtool support *************************/