MOXA linux-2.6.x / linux-2.6.9-uc0 from sdlinux-moxaart.tgz
[linux-2.6.9-moxart.git] / drivers / net / mv643xx_eth.c
blob41d38b3b3b4f2d9c185c3cf38d118f5d79dae708
1 /*
2 * drivers/net/mv64340_eth.c - Driver for MV64340X ethernet ports
3 * Copyright (C) 2002 Matthew Dharm <mdharm@momenco.com>
5 * Based on the 64360 driver from:
6 * Copyright (C) 2002 rabeeh@galileo.co.il
8 * Copyright (C) 2003 PMC-Sierra, Inc.,
9 * written by Manish Lachwani (lachwani@pmc-sierra.com)
11 * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org>
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version 2
16 * of the License, or (at your option) any later version.
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
27 #include <linux/config.h>
28 #include <linux/version.h>
29 #include <linux/module.h>
30 #include <linux/kernel.h>
31 #include <linux/config.h>
32 #include <linux/sched.h>
33 #include <linux/ptrace.h>
34 #include <linux/fcntl.h>
35 #include <linux/ioport.h>
36 #include <linux/interrupt.h>
37 #include <linux/slab.h>
38 #include <linux/string.h>
39 #include <linux/errno.h>
40 #include <linux/ip.h>
41 #include <linux/init.h>
42 #include <linux/in.h>
43 #include <linux/pci.h>
44 #include <linux/workqueue.h>
45 #include <asm/smp.h>
46 #include <linux/skbuff.h>
47 #include <linux/tcp.h>
48 #include <linux/netdevice.h>
49 #include <linux/etherdevice.h>
50 #include <net/ip.h>
52 #include <asm/bitops.h>
53 #include <asm/io.h>
54 #include <asm/types.h>
55 #include <asm/pgtable.h>
56 #include <asm/system.h>
57 #include "mv643xx_eth.h"
60 * The first part is the high level driver of the gigE ethernet ports.
63 /* Definition for configuring driver */
64 #undef MV64340_RX_QUEUE_FILL_ON_TASK
66 /* Constants */
67 #define EXTRA_BYTES 32
68 #define WRAP ETH_HLEN + 2 + 4 + 16
69 #define BUFFER_MTU dev->mtu + WRAP
70 #define INT_CAUSE_UNMASK_ALL 0x0007ffff
71 #define INT_CAUSE_UNMASK_ALL_EXT 0x0011ffff
72 #ifdef MV64340_RX_FILL_ON_TASK
73 #define INT_CAUSE_MASK_ALL 0x00000000
74 #define INT_CAUSE_CHECK_BITS INT_CAUSE_UNMASK_ALL
75 #define INT_CAUSE_CHECK_BITS_EXT INT_CAUSE_UNMASK_ALL_EXT
76 #endif
78 /* Static function declarations */
79 static int mv64340_eth_real_open(struct net_device *);
80 static int mv64340_eth_real_stop(struct net_device *);
81 static int mv64340_eth_change_mtu(struct net_device *, int);
82 static struct net_device_stats *mv64340_eth_get_stats(struct net_device *);
83 static void eth_port_init_mac_tables(unsigned int eth_port_num);
84 #ifdef MV64340_NAPI
85 static int mv64340_poll(struct net_device *dev, int *budget);
86 #endif
88 unsigned char prom_mac_addr_base[6];
89 unsigned long mv64340_sram_base;
92 * Changes MTU (maximum transfer unit) of the gigabit ethenret port
94 * Input : pointer to ethernet interface network device structure
95 * new mtu size
96 * Output : 0 upon success, -EINVAL upon failure
98 static int mv64340_eth_change_mtu(struct net_device *dev, int new_mtu)
100 struct mv64340_private *mp = netdev_priv(dev);
101 unsigned long flags;
103 spin_lock_irqsave(&mp->lock, flags);
105 if ((new_mtu > 9500) || (new_mtu < 64)) {
106 spin_unlock_irqrestore(&mp->lock, flags);
107 return -EINVAL;
110 dev->mtu = new_mtu;
112 * Stop then re-open the interface. This will allocate RX skb's with
113 * the new MTU.
114 * There is a possible danger that the open will not successed, due
115 * to memory is full, which might fail the open function.
117 if (netif_running(dev)) {
118 if (mv64340_eth_real_stop(dev))
119 printk(KERN_ERR
120 "%s: Fatal error on stopping device\n",
121 dev->name);
122 if (mv64340_eth_real_open(dev))
123 printk(KERN_ERR
124 "%s: Fatal error on opening device\n",
125 dev->name);
128 spin_unlock_irqrestore(&mp->lock, flags);
129 return 0;
133 * mv64340_eth_rx_task
135 * Fills / refills RX queue on a certain gigabit ethernet port
137 * Input : pointer to ethernet interface network device structure
138 * Output : N/A
140 static void mv64340_eth_rx_task(void *data)
142 struct net_device *dev = (struct net_device *) data;
143 struct mv64340_private *mp = netdev_priv(dev);
144 struct pkt_info pkt_info;
145 struct sk_buff *skb;
147 if (test_and_set_bit(0, &mp->rx_task_busy))
148 panic("%s: Error in test_set_bit / clear_bit", dev->name);
150 while (mp->rx_ring_skbs < (mp->rx_ring_size - 5)) {
151 /* The +8 for buffer allignment and another 32 byte extra */
153 skb = dev_alloc_skb(BUFFER_MTU + 8 + EXTRA_BYTES);
154 if (!skb)
155 /* Better luck next time */
156 break;
157 mp->rx_ring_skbs++;
158 pkt_info.cmd_sts = ETH_RX_ENABLE_INTERRUPT;
159 pkt_info.byte_cnt = dev->mtu + ETH_HLEN + 4 + 2 + EXTRA_BYTES;
160 /* Allign buffer to 8 bytes */
161 if (pkt_info.byte_cnt & ~0x7) {
162 pkt_info.byte_cnt &= ~0x7;
163 pkt_info.byte_cnt += 8;
165 pkt_info.buf_ptr =
166 pci_map_single(0, skb->data,
167 dev->mtu + ETH_HLEN + 4 + 2 + EXTRA_BYTES,
168 PCI_DMA_FROMDEVICE);
169 pkt_info.return_info = skb;
170 if (eth_rx_return_buff(mp, &pkt_info) != ETH_OK) {
171 printk(KERN_ERR
172 "%s: Error allocating RX Ring\n", dev->name);
173 break;
175 skb_reserve(skb, 2);
177 clear_bit(0, &mp->rx_task_busy);
179 * If RX ring is empty of SKB, set a timer to try allocating
180 * again in a later time .
182 if ((mp->rx_ring_skbs == 0) && (mp->rx_timer_flag == 0)) {
183 printk(KERN_INFO "%s: Rx ring is empty\n", dev->name);
184 /* After 100mSec */
185 mp->timeout.expires = jiffies + (HZ / 10);
186 add_timer(&mp->timeout);
187 mp->rx_timer_flag = 1;
189 #if MV64340_RX_QUEUE_FILL_ON_TASK
190 else {
191 /* Return interrupts */
192 MV_WRITE(MV64340_ETH_INTERRUPT_MASK_REG(mp->port_num),
193 INT_CAUSE_UNMASK_ALL);
195 #endif
199 * mv64340_eth_rx_task_timer_wrapper
201 * Timer routine to wake up RX queue filling task. This function is
202 * used only in case the RX queue is empty, and all alloc_skb has
203 * failed (due to out of memory event).
205 * Input : pointer to ethernet interface network device structure
206 * Output : N/A
208 static void mv64340_eth_rx_task_timer_wrapper(unsigned long data)
210 struct net_device *dev = (struct net_device *) data;
211 struct mv64340_private *mp = netdev_priv(dev);
213 mp->rx_timer_flag = 0;
214 mv64340_eth_rx_task((void *) data);
219 * mv64340_eth_update_mac_address
221 * Update the MAC address of the port in the address table
223 * Input : pointer to ethernet interface network device structure
224 * Output : N/A
226 static void mv64340_eth_update_mac_address(struct net_device *dev)
228 struct mv64340_private *mp = netdev_priv(dev);
229 unsigned int port_num = mp->port_num;
231 eth_port_init_mac_tables(port_num);
232 memcpy(mp->port_mac_addr, dev->dev_addr, 6);
233 eth_port_uc_addr_set(port_num, mp->port_mac_addr);
237 * mv64340_eth_set_rx_mode
239 * Change from promiscuos to regular rx mode
241 * Input : pointer to ethernet interface network device structure
242 * Output : N/A
244 static void mv64340_eth_set_rx_mode(struct net_device *dev)
246 struct mv64340_private *mp = netdev_priv(dev);
248 if (dev->flags & IFF_PROMISC) {
249 ethernet_set_config_reg
250 (mp->port_num,
251 ethernet_get_config_reg(mp->port_num) |
252 ETH_UNICAST_PROMISCUOUS_MODE);
253 } else {
254 ethernet_set_config_reg
255 (mp->port_num,
256 ethernet_get_config_reg(mp->port_num) &
257 ~(unsigned int) ETH_UNICAST_PROMISCUOUS_MODE);
263 * mv64340_eth_set_mac_address
265 * Change the interface's mac address.
266 * No special hardware thing should be done because interface is always
267 * put in promiscuous mode.
269 * Input : pointer to ethernet interface network device structure and
270 * a pointer to the designated entry to be added to the cache.
271 * Output : zero upon success, negative upon failure
273 static int mv64340_eth_set_mac_address(struct net_device *dev, void *addr)
275 int i;
277 for (i = 0; i < 6; i++)
278 /* +2 is for the offset of the HW addr type */
279 dev->dev_addr[i] = ((unsigned char *) addr)[i + 2];
280 mv64340_eth_update_mac_address(dev);
281 return 0;
285 * mv64340_eth_tx_timeout
287 * Called upon a timeout on transmitting a packet
289 * Input : pointer to ethernet interface network device structure.
290 * Output : N/A
292 static void mv64340_eth_tx_timeout(struct net_device *dev)
294 struct mv64340_private *mp = netdev_priv(dev);
296 printk(KERN_INFO "%s: TX timeout ", dev->name);
298 /* Do the reset outside of interrupt context */
299 schedule_work(&mp->tx_timeout_task);
303 * mv64340_eth_tx_timeout_task
305 * Actual routine to reset the adapter when a timeout on Tx has occurred
307 static void mv64340_eth_tx_timeout_task(struct net_device *dev)
309 struct mv64340_private *mp = netdev_priv(dev);
311 netif_device_detach(dev);
312 eth_port_reset(mp->port_num);
313 eth_port_start(mp);
314 netif_device_attach(dev);
318 * mv64340_eth_free_tx_queue
320 * Input : dev - a pointer to the required interface
322 * Output : 0 if was able to release skb , nonzero otherwise
324 static int mv64340_eth_free_tx_queue(struct net_device *dev,
325 unsigned int eth_int_cause_ext)
327 struct mv64340_private *mp = netdev_priv(dev);
328 struct net_device_stats *stats = &mp->stats;
329 struct pkt_info pkt_info;
330 int released = 1;
332 if (!(eth_int_cause_ext & (BIT0 | BIT8)))
333 return released;
335 spin_lock(&mp->lock);
337 /* Check only queue 0 */
338 while (eth_tx_return_desc(mp, &pkt_info) == ETH_OK) {
339 if (pkt_info.cmd_sts & BIT0) {
340 printk("%s: Error in TX\n", dev->name);
341 stats->tx_errors++;
345 * If return_info is different than 0, release the skb.
346 * The case where return_info is not 0 is only in case
347 * when transmitted a scatter/gather packet, where only
348 * last skb releases the whole chain.
350 if (pkt_info.return_info) {
351 dev_kfree_skb_irq((struct sk_buff *)
352 pkt_info.return_info);
353 released = 0;
354 if (skb_shinfo(pkt_info.return_info)->nr_frags)
355 pci_unmap_page(NULL, pkt_info.buf_ptr,
356 pkt_info.byte_cnt, PCI_DMA_TODEVICE);
358 if (mp->tx_ring_skbs != 1)
359 mp->tx_ring_skbs--;
360 } else
361 pci_unmap_page(NULL, pkt_info.buf_ptr,
362 pkt_info.byte_cnt, PCI_DMA_TODEVICE);
365 * Decrement the number of outstanding skbs counter on
366 * the TX queue.
368 if (mp->tx_ring_skbs == 0)
369 panic("ERROR - TX outstanding SKBs counter is corrupted");
373 spin_unlock(&mp->lock);
375 return released;
379 * mv64340_eth_receive
381 * This function is forward packets that are received from the port's
382 * queues toward kernel core or FastRoute them to another interface.
384 * Input : dev - a pointer to the required interface
385 * max - maximum number to receive (0 means unlimted)
387 * Output : number of served packets
389 #ifdef MV64340_NAPI
390 static int mv64340_eth_receive_queue(struct net_device *dev, unsigned int max,
391 int budget)
392 #else
393 static int mv64340_eth_receive_queue(struct net_device *dev, unsigned int max)
394 #endif
396 struct mv64340_private *mp = netdev_priv(dev);
397 struct net_device_stats *stats = &mp->stats;
398 unsigned int received_packets = 0;
399 struct sk_buff *skb;
400 struct pkt_info pkt_info;
402 #ifdef MV64340_NAPI
403 while (eth_port_receive(mp, &pkt_info) == ETH_OK && budget > 0) {
404 #else
405 while ((--max) && eth_port_receive(mp, &pkt_info) == ETH_OK) {
406 #endif
407 mp->rx_ring_skbs--;
408 received_packets++;
409 #ifdef MV64340_NAPI
410 budget--;
411 #endif
412 /* Update statistics. Note byte count includes 4 byte CRC count */
413 stats->rx_packets++;
414 stats->rx_bytes += pkt_info.byte_cnt;
415 skb = (struct sk_buff *) pkt_info.return_info;
417 * In case received a packet without first / last bits on OR
418 * the error summary bit is on, the packets needs to be dropeed.
420 if (((pkt_info.cmd_sts
421 & (ETH_RX_FIRST_DESC | ETH_RX_LAST_DESC)) !=
422 (ETH_RX_FIRST_DESC | ETH_RX_LAST_DESC))
423 || (pkt_info.cmd_sts & ETH_ERROR_SUMMARY)) {
424 stats->rx_dropped++;
425 if ((pkt_info.cmd_sts & (ETH_RX_FIRST_DESC |
426 ETH_RX_LAST_DESC)) !=
427 (ETH_RX_FIRST_DESC | ETH_RX_LAST_DESC)) {
428 if (net_ratelimit())
429 printk(KERN_ERR
430 "%s: Received packet spread on multiple"
431 " descriptors\n",
432 dev->name);
434 if (pkt_info.cmd_sts & ETH_ERROR_SUMMARY)
435 stats->rx_errors++;
437 dev_kfree_skb_irq(skb);
438 } else {
440 * The -4 is for the CRC in the trailer of the
441 * received packet
443 skb_put(skb, pkt_info.byte_cnt - 4);
444 skb->dev = dev;
446 if (pkt_info.cmd_sts & ETH_LAYER_4_CHECKSUM_OK) {
447 skb->ip_summed = CHECKSUM_UNNECESSARY;
448 skb->csum = htons((pkt_info.cmd_sts
449 & 0x0007fff8) >> 3);
451 skb->protocol = eth_type_trans(skb, dev);
452 #ifdef MV64340_NAPI
453 netif_receive_skb(skb);
454 #else
455 netif_rx(skb);
456 #endif
460 return received_packets;
464 * mv64340_eth_int_handler
466 * Main interrupt handler for the gigbit ethernet ports
468 * Input : irq - irq number (not used)
469 * dev_id - a pointer to the required interface's data structure
470 * regs - not used
471 * Output : N/A
474 static irqreturn_t mv64340_eth_int_handler(int irq, void *dev_id,
475 struct pt_regs *regs)
477 struct net_device *dev = (struct net_device *) dev_id;
478 struct mv64340_private *mp = netdev_priv(dev);
479 u32 eth_int_cause, eth_int_cause_ext = 0;
480 unsigned int port_num = mp->port_num;
482 /* Read interrupt cause registers */
483 eth_int_cause = MV_READ(MV64340_ETH_INTERRUPT_CAUSE_REG(port_num)) &
484 INT_CAUSE_UNMASK_ALL;
486 if (eth_int_cause & BIT1)
487 eth_int_cause_ext =
488 MV_READ(MV64340_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num)) &
489 INT_CAUSE_UNMASK_ALL_EXT;
491 #ifdef MV64340_NAPI
492 if (!(eth_int_cause & 0x0007fffd)) {
493 /* Dont ack the Rx interrupt */
494 #endif
496 * Clear specific ethernet port intrerrupt registers by
497 * acknowleding relevant bits.
499 MV_WRITE(MV64340_ETH_INTERRUPT_CAUSE_REG(port_num),
500 ~eth_int_cause);
501 if (eth_int_cause_ext != 0x0)
502 MV_WRITE(MV64340_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num),
503 ~eth_int_cause_ext);
505 /* UDP change : We may need this */
506 if ((eth_int_cause_ext & 0x0000ffff) &&
507 (mv64340_eth_free_tx_queue(dev, eth_int_cause_ext) == 0) &&
508 (MV64340_TX_QUEUE_SIZE > mp->tx_ring_skbs + 1))
509 netif_wake_queue(dev);
510 #ifdef MV64340_NAPI
511 } else {
512 if (netif_rx_schedule_prep(dev)) {
513 /* Mask all the interrupts */
514 MV_WRITE(MV64340_ETH_INTERRUPT_MASK_REG(port_num),0);
515 MV_WRITE(MV64340_ETH_INTERRUPT_EXTEND_MASK_REG(port_num), 0);
516 __netif_rx_schedule(dev);
518 #else
520 if (eth_int_cause & (BIT2 | BIT11))
521 mv64340_eth_receive_queue(dev, 0);
524 * After forwarded received packets to upper layer, add a task
525 * in an interrupts enabled context that refills the RX ring
526 * with skb's.
528 #if MV64340_RX_QUEUE_FILL_ON_TASK
529 /* Unmask all interrupts on ethernet port */
530 MV_WRITE(MV64340_ETH_INTERRUPT_MASK_REG(port_num),
531 INT_CAUSE_MASK_ALL);
532 queue_task(&mp->rx_task, &tq_immediate);
533 mark_bh(IMMEDIATE_BH);
534 #else
535 mp->rx_task.func(dev);
536 #endif
537 #endif
539 /* PHY status changed */
540 if (eth_int_cause_ext & (BIT16 | BIT20)) {
541 unsigned int phy_reg_data;
543 /* Check Link status on ethernet port */
544 eth_port_read_smi_reg(port_num, 1, &phy_reg_data);
545 if (!(phy_reg_data & 0x20)) {
546 netif_stop_queue(dev);
547 } else {
548 netif_wake_queue(dev);
551 * Start all TX queues on ethernet port. This is good in
552 * case of previous packets where not transmitted, due
553 * to link down and this command re-enables all TX
554 * queues.
555 * Note that it is possible to get a TX resource error
556 * interrupt after issuing this, since not all TX queues
557 * are enabled, or has anything to send.
559 MV_WRITE(MV64340_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num), 1);
564 * If no real interrupt occured, exit.
565 * This can happen when using gigE interrupt coalescing mechanism.
567 if ((eth_int_cause == 0x0) && (eth_int_cause_ext == 0x0))
568 return IRQ_NONE;
570 return IRQ_HANDLED;
573 #ifdef MV64340_COAL
576 * eth_port_set_rx_coal - Sets coalescing interrupt mechanism on RX path
578 * DESCRIPTION:
579 * This routine sets the RX coalescing interrupt mechanism parameter.
580 * This parameter is a timeout counter, that counts in 64 t_clk
581 * chunks ; that when timeout event occurs a maskable interrupt
582 * occurs.
583 * The parameter is calculated using the tClk of the MV-643xx chip
584 * , and the required delay of the interrupt in usec.
586 * INPUT:
587 * unsigned int eth_port_num Ethernet port number
588 * unsigned int t_clk t_clk of the MV-643xx chip in HZ units
589 * unsigned int delay Delay in usec
591 * OUTPUT:
592 * Interrupt coalescing mechanism value is set in MV-643xx chip.
594 * RETURN:
595 * The interrupt coalescing value set in the gigE port.
598 static unsigned int eth_port_set_rx_coal(unsigned int eth_port_num,
599 unsigned int t_clk, unsigned int delay)
601 unsigned int coal = ((t_clk / 1000000) * delay) / 64;
603 /* Set RX Coalescing mechanism */
604 MV_WRITE(MV64340_ETH_SDMA_CONFIG_REG(eth_port_num),
605 ((coal & 0x3fff) << 8) |
606 (MV_READ(MV64340_ETH_SDMA_CONFIG_REG(eth_port_num))
607 & 0xffc000ff));
609 return coal;
611 #endif
614 * eth_port_set_tx_coal - Sets coalescing interrupt mechanism on TX path
616 * DESCRIPTION:
617 * This routine sets the TX coalescing interrupt mechanism parameter.
618 * This parameter is a timeout counter, that counts in 64 t_clk
619 * chunks ; that when timeout event occurs a maskable interrupt
620 * occurs.
621 * The parameter is calculated using the t_cLK frequency of the
622 * MV-643xx chip and the required delay in the interrupt in uSec
624 * INPUT:
625 * unsigned int eth_port_num Ethernet port number
626 * unsigned int t_clk t_clk of the MV-643xx chip in HZ units
627 * unsigned int delay Delay in uSeconds
629 * OUTPUT:
630 * Interrupt coalescing mechanism value is set in MV-643xx chip.
632 * RETURN:
633 * The interrupt coalescing value set in the gigE port.
636 static unsigned int eth_port_set_tx_coal(unsigned int eth_port_num,
637 unsigned int t_clk, unsigned int delay)
639 unsigned int coal;
640 coal = ((t_clk / 1000000) * delay) / 64;
641 /* Set TX Coalescing mechanism */
642 MV_WRITE(MV64340_ETH_TX_FIFO_URGENT_THRESHOLD_REG(eth_port_num),
643 coal << 4);
644 return coal;
648 * mv64340_eth_open
650 * This function is called when openning the network device. The function
651 * should initialize all the hardware, initialize cyclic Rx/Tx
652 * descriptors chain and buffers and allocate an IRQ to the network
653 * device.
655 * Input : a pointer to the network device structure
657 * Output : zero of success , nonzero if fails.
660 static int mv64340_eth_open(struct net_device *dev)
662 struct mv64340_private *mp = netdev_priv(dev);
663 unsigned int port_num = mp->port_num;
664 int err = err;
666 spin_lock_irq(&mp->lock);
668 err = request_irq(dev->irq, mv64340_eth_int_handler,
669 SA_INTERRUPT | SA_SAMPLE_RANDOM, dev->name, dev);
671 if (err) {
672 printk(KERN_ERR "Can not assign IRQ number to MV64340_eth%d\n",
673 port_num);
674 err = -EAGAIN;
675 goto out;
678 if (mv64340_eth_real_open(dev)) {
679 printk("%s: Error opening interface\n", dev->name);
680 err = -EBUSY;
681 goto out_free;
684 spin_unlock_irq(&mp->lock);
686 return 0;
688 out_free:
689 free_irq(dev->irq, dev);
691 out:
692 spin_unlock_irq(&mp->lock);
694 return err;
698 * ether_init_rx_desc_ring - Curve a Rx chain desc list and buffer in memory.
700 * DESCRIPTION:
701 * This function prepares a Rx chained list of descriptors and packet
702 * buffers in a form of a ring. The routine must be called after port
703 * initialization routine and before port start routine.
704 * The Ethernet SDMA engine uses CPU bus addresses to access the various
705 * devices in the system (i.e. DRAM). This function uses the ethernet
706 * struct 'virtual to physical' routine (set by the user) to set the ring
707 * with physical addresses.
709 * INPUT:
710 * struct mv64340_private *mp Ethernet Port Control srtuct.
711 * int rx_desc_num Number of Rx descriptors
712 * int rx_buff_size Size of Rx buffer
713 * unsigned int rx_desc_base_addr Rx descriptors memory area base addr.
714 * unsigned int rx_buff_base_addr Rx buffer memory area base addr.
716 * OUTPUT:
717 * The routine updates the Ethernet port control struct with information
718 * regarding the Rx descriptors and buffers.
720 * RETURN:
721 * false if the given descriptors memory area is not aligned according to
722 * Ethernet SDMA specifications.
723 * true otherwise.
725 static int ether_init_rx_desc_ring(struct mv64340_private * mp,
726 unsigned long rx_buff_base_addr)
728 unsigned long buffer_addr = rx_buff_base_addr;
729 volatile struct eth_rx_desc *p_rx_desc;
730 int rx_desc_num = mp->rx_ring_size;
731 unsigned long rx_desc_base_addr = (unsigned long) mp->p_rx_desc_area;
732 int rx_buff_size = 1536; /* Dummy, will be replaced later */
733 int i;
735 p_rx_desc = (struct eth_rx_desc *) rx_desc_base_addr;
737 /* Rx desc Must be 4LW aligned (i.e. Descriptor_Address[3:0]=0000). */
738 if (rx_buff_base_addr & 0xf)
739 return 0;
741 /* Rx buffers are limited to 64K bytes and Minimum size is 8 bytes */
742 if ((rx_buff_size < 8) || (rx_buff_size > RX_BUFFER_MAX_SIZE))
743 return 0;
745 /* Rx buffers must be 64-bit aligned. */
746 if ((rx_buff_base_addr + rx_buff_size) & 0x7)
747 return 0;
749 /* initialize the Rx descriptors ring */
750 for (i = 0; i < rx_desc_num; i++) {
751 p_rx_desc[i].buf_size = rx_buff_size;
752 p_rx_desc[i].byte_cnt = 0x0000;
753 p_rx_desc[i].cmd_sts =
754 ETH_BUFFER_OWNED_BY_DMA | ETH_RX_ENABLE_INTERRUPT;
755 p_rx_desc[i].next_desc_ptr = mp->rx_desc_dma +
756 ((i + 1) % rx_desc_num) * sizeof(struct eth_rx_desc);
757 p_rx_desc[i].buf_ptr = buffer_addr;
759 mp->rx_skb[i] = NULL;
760 buffer_addr += rx_buff_size;
763 /* Save Rx desc pointer to driver struct. */
764 mp->rx_curr_desc_q = 0;
765 mp->rx_used_desc_q = 0;
767 mp->rx_desc_area_size = rx_desc_num * sizeof(struct eth_rx_desc);
769 mp->port_rx_queue_command |= 1;
771 return 1;
775 * ether_init_tx_desc_ring - Curve a Tx chain desc list and buffer in memory.
777 * DESCRIPTION:
778 * This function prepares a Tx chained list of descriptors and packet
779 * buffers in a form of a ring. The routine must be called after port
780 * initialization routine and before port start routine.
781 * The Ethernet SDMA engine uses CPU bus addresses to access the various
782 * devices in the system (i.e. DRAM). This function uses the ethernet
783 * struct 'virtual to physical' routine (set by the user) to set the ring
784 * with physical addresses.
786 * INPUT:
787 * struct mv64340_private *mp Ethernet Port Control srtuct.
788 * int tx_desc_num Number of Tx descriptors
789 * int tx_buff_size Size of Tx buffer
790 * unsigned int tx_desc_base_addr Tx descriptors memory area base addr.
792 * OUTPUT:
793 * The routine updates the Ethernet port control struct with information
794 * regarding the Tx descriptors and buffers.
796 * RETURN:
797 * false if the given descriptors memory area is not aligned according to
798 * Ethernet SDMA specifications.
799 * true otherwise.
801 static int ether_init_tx_desc_ring(struct mv64340_private *mp)
803 unsigned long tx_desc_base_addr = (unsigned long) mp->p_tx_desc_area;
804 int tx_desc_num = mp->tx_ring_size;
805 struct eth_tx_desc *p_tx_desc;
806 int i;
808 /* Tx desc Must be 4LW aligned (i.e. Descriptor_Address[3:0]=0000). */
809 if (tx_desc_base_addr & 0xf)
810 return 0;
812 /* save the first desc pointer to link with the last descriptor */
813 p_tx_desc = (struct eth_tx_desc *) tx_desc_base_addr;
815 /* Initialize the Tx descriptors ring */
816 for (i = 0; i < tx_desc_num; i++) {
817 p_tx_desc[i].byte_cnt = 0x0000;
818 p_tx_desc[i].l4i_chk = 0x0000;
819 p_tx_desc[i].cmd_sts = 0x00000000;
820 p_tx_desc[i].next_desc_ptr = mp->tx_desc_dma +
821 ((i + 1) % tx_desc_num) * sizeof(struct eth_tx_desc);
822 p_tx_desc[i].buf_ptr = 0x00000000;
823 mp->tx_skb[i] = NULL;
826 /* Set Tx desc pointer in driver struct. */
827 mp->tx_curr_desc_q = 0;
828 mp->tx_used_desc_q = 0;
829 #ifdef MV64340_CHECKSUM_OFFLOAD_TX
830 mp->tx_first_desc_q = 0;
831 #endif
832 /* Init Tx ring base and size parameters */
833 mp->tx_desc_area_size = tx_desc_num * sizeof(struct eth_tx_desc);
835 /* Add the queue to the list of Tx queues of this port */
836 mp->port_tx_queue_command |= 1;
838 return 1;
841 /* Helper function for mv64340_eth_open */
842 static int mv64340_eth_real_open(struct net_device *dev)
844 struct mv64340_private *mp = netdev_priv(dev);
845 unsigned int port_num = mp->port_num;
846 u32 phy_reg_data;
847 unsigned int size;
849 /* Stop RX Queues */
850 MV_WRITE(MV64340_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num),
851 0x0000ff00);
853 /* Clear the ethernet port interrupts */
854 MV_WRITE(MV64340_ETH_INTERRUPT_CAUSE_REG(port_num), 0);
855 MV_WRITE(MV64340_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0);
857 /* Unmask RX buffer and TX end interrupt */
858 MV_WRITE(MV64340_ETH_INTERRUPT_MASK_REG(port_num),
859 INT_CAUSE_UNMASK_ALL);
861 /* Unmask phy and link status changes interrupts */
862 MV_WRITE(MV64340_ETH_INTERRUPT_EXTEND_MASK_REG(port_num),
863 INT_CAUSE_UNMASK_ALL_EXT);
865 /* Set the MAC Address */
866 memcpy(mp->port_mac_addr, dev->dev_addr, 6);
868 eth_port_init(mp);
870 INIT_WORK(&mp->rx_task, (void (*)(void *)) mv64340_eth_rx_task, dev);
872 memset(&mp->timeout, 0, sizeof(struct timer_list));
873 mp->timeout.function = mv64340_eth_rx_task_timer_wrapper;
874 mp->timeout.data = (unsigned long) dev;
876 mp->rx_task_busy = 0;
877 mp->rx_timer_flag = 0;
879 /* Allocate TX ring */
880 mp->tx_ring_skbs = 0;
881 mp->tx_ring_size = MV64340_TX_QUEUE_SIZE;
882 size = mp->tx_ring_size * sizeof(struct eth_tx_desc);
883 mp->tx_desc_area_size = size;
885 /* Assumes allocated ring is 16 bytes alligned */
886 mp->p_tx_desc_area = pci_alloc_consistent(NULL, size, &mp->tx_desc_dma);
887 if (!mp->p_tx_desc_area) {
888 printk(KERN_ERR "%s: Cannot allocate Tx Ring (size %d bytes)\n",
889 dev->name, size);
890 return -ENOMEM;
892 memset((void *) mp->p_tx_desc_area, 0, mp->tx_desc_area_size);
894 /* Dummy will be replaced upon real tx */
895 ether_init_tx_desc_ring(mp);
897 /* Allocate RX ring */
898 /* Meantime RX Ring are fixed - but must be configurable by user */
899 mp->rx_ring_size = MV64340_RX_QUEUE_SIZE;
900 mp->rx_ring_skbs = 0;
901 size = mp->rx_ring_size * sizeof(struct eth_rx_desc);
902 mp->rx_desc_area_size = size;
904 /* Assumes allocated ring is 16 bytes aligned */
906 mp->p_rx_desc_area = pci_alloc_consistent(NULL, size, &mp->rx_desc_dma);
908 if (!mp->p_rx_desc_area) {
909 printk(KERN_ERR "%s: Cannot allocate Rx ring (size %d bytes)\n",
910 dev->name, size);
911 printk(KERN_ERR "%s: Freeing previously allocated TX queues...",
912 dev->name);
913 pci_free_consistent(0, mp->tx_desc_area_size,
914 (void *) mp->p_tx_desc_area,
915 mp->tx_desc_dma);
916 return -ENOMEM;
918 memset(mp->p_rx_desc_area, 0, size);
920 if (!(ether_init_rx_desc_ring(mp, 0)))
921 panic("%s: Error initializing RX Ring", dev->name);
923 mv64340_eth_rx_task(dev); /* Fill RX ring with skb's */
925 eth_port_start(mp);
927 /* Interrupt Coalescing */
929 #ifdef MV64340_COAL
930 mp->rx_int_coal =
931 eth_port_set_rx_coal(port_num, 133000000, MV64340_RX_COAL);
932 #endif
934 mp->tx_int_coal =
935 eth_port_set_tx_coal (port_num, 133000000, MV64340_TX_COAL);
937 /* Increase the Rx side buffer size */
939 MV_WRITE (MV64340_ETH_PORT_SERIAL_CONTROL_REG(port_num), (0x5 << 17) |
940 (MV_READ(MV64340_ETH_PORT_SERIAL_CONTROL_REG(port_num))
941 & 0xfff1ffff));
943 /* Check Link status on phy */
944 eth_port_read_smi_reg(port_num, 1, &phy_reg_data);
945 if (!(phy_reg_data & 0x20))
946 netif_stop_queue(dev);
947 else
948 netif_start_queue(dev);
950 return 0;
953 static void mv64340_eth_free_tx_rings(struct net_device *dev)
955 struct mv64340_private *mp = netdev_priv(dev);
956 unsigned int port_num = mp->port_num;
957 unsigned int curr;
959 /* Stop Tx Queues */
960 MV_WRITE(MV64340_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num),
961 0x0000ff00);
963 /* Free TX rings */
964 /* Free outstanding skb's on TX rings */
965 for (curr = 0;
966 (mp->tx_ring_skbs) && (curr < MV64340_TX_QUEUE_SIZE);
967 curr++) {
968 if (mp->tx_skb[curr]) {
969 dev_kfree_skb(mp->tx_skb[curr]);
970 mp->tx_ring_skbs--;
973 if (mp->tx_ring_skbs != 0)
974 printk("%s: Error on Tx descriptor free - could not free %d"
975 " descriptors\n", dev->name,
976 mp->tx_ring_skbs);
977 pci_free_consistent(0, mp->tx_desc_area_size,
978 (void *) mp->p_tx_desc_area, mp->tx_desc_dma);
981 static void mv64340_eth_free_rx_rings(struct net_device *dev)
983 struct mv64340_private *mp = netdev_priv(dev);
984 unsigned int port_num = mp->port_num;
985 int curr;
987 /* Stop RX Queues */
988 MV_WRITE(MV64340_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num),
989 0x0000ff00);
991 /* Free RX rings */
992 /* Free preallocated skb's on RX rings */
993 for (curr = 0;
994 mp->rx_ring_skbs && (curr < MV64340_RX_QUEUE_SIZE);
995 curr++) {
996 if (mp->rx_skb[curr]) {
997 dev_kfree_skb(mp->rx_skb[curr]);
998 mp->rx_ring_skbs--;
1002 if (mp->rx_ring_skbs != 0)
1003 printk(KERN_ERR
1004 "%s: Error in freeing Rx Ring. %d skb's still"
1005 " stuck in RX Ring - ignoring them\n", dev->name,
1006 mp->rx_ring_skbs);
1007 pci_free_consistent(0, mp->rx_desc_area_size,
1008 (void *) mp->p_rx_desc_area,
1009 mp->rx_desc_dma);
1013 * mv64340_eth_stop
1015 * This function is used when closing the network device.
1016 * It updates the hardware,
1017 * release all memory that holds buffers and descriptors and release the IRQ.
1018 * Input : a pointer to the device structure
1019 * Output : zero if success , nonzero if fails
1022 /* Helper function for mv64340_eth_stop */
1024 static int mv64340_eth_real_stop(struct net_device *dev)
1026 struct mv64340_private *mp = netdev_priv(dev);
1027 unsigned int port_num = mp->port_num;
1029 netif_stop_queue(dev);
1031 mv64340_eth_free_tx_rings(dev);
1032 mv64340_eth_free_rx_rings(dev);
1034 eth_port_reset(mp->port_num);
1036 /* Disable ethernet port interrupts */
1037 MV_WRITE(MV64340_ETH_INTERRUPT_CAUSE_REG(port_num), 0);
1038 MV_WRITE(MV64340_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0);
1040 /* Mask RX buffer and TX end interrupt */
1041 MV_WRITE(MV64340_ETH_INTERRUPT_MASK_REG(port_num), 0);
1043 /* Mask phy and link status changes interrupts */
1044 MV_WRITE(MV64340_ETH_INTERRUPT_EXTEND_MASK_REG(port_num), 0);
1046 return 0;
1049 static int mv64340_eth_stop(struct net_device *dev)
1051 struct mv64340_private *mp = netdev_priv(dev);
1053 spin_lock_irq(&mp->lock);
1055 mv64340_eth_real_stop(dev);
1057 free_irq(dev->irq, dev);
1058 spin_unlock_irq(&mp->lock);
1060 return 0;
1063 #ifdef MV64340_NAPI
1064 static void mv64340_tx(struct net_device *dev)
1066 struct mv64340_private *mp = netdev_priv(dev);
1067 struct pkt_info pkt_info;
1069 while (eth_tx_return_desc(mp, &pkt_info) == ETH_OK) {
1070 if (pkt_info.return_info) {
1071 dev_kfree_skb_irq((struct sk_buff *)
1072 pkt_info.return_info);
1073 if (skb_shinfo(pkt_info.return_info)->nr_frags)
1074 pci_unmap_page(NULL, pkt_info.buf_ptr,
1075 pkt_info.byte_cnt,
1076 PCI_DMA_TODEVICE);
1078 if (mp->tx_ring_skbs != 1)
1079 mp->tx_ring_skbs--;
1080 } else
1081 pci_unmap_page(NULL, pkt_info.buf_ptr, pkt_info.byte_cnt,
1082 PCI_DMA_TODEVICE);
1085 if (netif_queue_stopped(dev) &&
1086 MV64340_TX_QUEUE_SIZE > mp->tx_ring_skbs + 1)
1087 netif_wake_queue(dev);
1091 * mv64340_poll
1093 * This function is used in case of NAPI
1095 static int mv64340_poll(struct net_device *dev, int *budget)
1097 struct mv64340_private *mp = netdev_priv(dev);
1098 int done = 1, orig_budget, work_done;
1099 unsigned int port_num = mp->port_num;
1100 unsigned long flags;
1102 #ifdef MV64340_TX_FAST_REFILL
1103 if (++mp->tx_clean_threshold > 5) {
1104 spin_lock_irqsave(&mp->lock, flags);
1105 mv64340_tx(dev);
1106 mp->tx_clean_threshold = 0;
1107 spin_unlock_irqrestore(&mp->lock, flags);
1109 #endif
1111 if ((u32)(MV_READ(MV64340_ETH_RX_CURRENT_QUEUE_DESC_PTR_0(port_num))) != (u32)mp->rx_used_desc_q) {
1112 orig_budget = *budget;
1113 if (orig_budget > dev->quota)
1114 orig_budget = dev->quota;
1115 work_done = mv64340_eth_receive_queue(dev, 0, orig_budget);
1116 mp->rx_task.func(dev);
1117 *budget -= work_done;
1118 dev->quota -= work_done;
1119 if (work_done >= orig_budget)
1120 done = 0;
1123 if (done) {
1124 spin_lock_irqsave(&mp->lock, flags);
1125 __netif_rx_complete(dev);
1126 MV_WRITE(MV64340_ETH_INTERRUPT_CAUSE_REG(port_num),0);
1127 MV_WRITE(MV64340_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num),0);
1128 MV_WRITE(MV64340_ETH_INTERRUPT_MASK_REG(port_num),
1129 INT_CAUSE_UNMASK_ALL);
1130 MV_WRITE(MV64340_ETH_INTERRUPT_EXTEND_MASK_REG(port_num),
1131 INT_CAUSE_UNMASK_ALL_EXT);
1132 spin_unlock_irqrestore(&mp->lock, flags);
1135 return done ? 0 : 1;
1137 #endif
1140 * mv64340_eth_start_xmit
1142 * This function is queues a packet in the Tx descriptor for
1143 * required port.
1145 * Input : skb - a pointer to socket buffer
1146 * dev - a pointer to the required port
1148 * Output : zero upon success
1150 static int mv64340_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
1152 struct mv64340_private *mp = netdev_priv(dev);
1153 struct net_device_stats *stats = &mp->stats;
1154 ETH_FUNC_RET_STATUS status;
1155 unsigned long flags;
1156 struct pkt_info pkt_info;
1158 if (netif_queue_stopped(dev)) {
1159 printk(KERN_ERR
1160 "%s: Tried sending packet when interface is stopped\n",
1161 dev->name);
1162 return 1;
1165 /* This is a hard error, log it. */
1166 if ((MV64340_TX_QUEUE_SIZE - mp->tx_ring_skbs) <=
1167 (skb_shinfo(skb)->nr_frags + 1)) {
1168 netif_stop_queue(dev);
1169 printk(KERN_ERR
1170 "%s: Bug in mv64340_eth - Trying to transmit when"
1171 " queue full !\n", dev->name);
1172 return 1;
1175 /* Paranoid check - this shouldn't happen */
1176 if (skb == NULL) {
1177 stats->tx_dropped++;
1178 return 1;
1181 spin_lock_irqsave(&mp->lock, flags);
1183 /* Update packet info data structure -- DMA owned, first last */
1184 #ifdef MV64340_CHECKSUM_OFFLOAD_TX
1185 if (!skb_shinfo(skb)->nr_frags || (skb_shinfo(skb)->nr_frags > 3)) {
1186 #endif
1187 pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT |
1188 ETH_TX_FIRST_DESC | ETH_TX_LAST_DESC;
1190 pkt_info.byte_cnt = skb->len;
1191 pkt_info.buf_ptr = pci_map_single(0, skb->data, skb->len,
1192 PCI_DMA_TODEVICE);
1195 pkt_info.return_info = skb;
1196 status = eth_port_send(mp, &pkt_info);
1197 if ((status == ETH_ERROR) || (status == ETH_QUEUE_FULL))
1198 printk(KERN_ERR "%s: Error on transmitting packet\n",
1199 dev->name);
1200 mp->tx_ring_skbs++;
1201 #ifdef MV64340_CHECKSUM_OFFLOAD_TX
1202 } else {
1203 unsigned int frag;
1204 u32 ipheader;
1206 /* first frag which is skb header */
1207 pkt_info.byte_cnt = skb_headlen(skb);
1208 pkt_info.buf_ptr = pci_map_single(0, skb->data,
1209 skb_headlen(skb), PCI_DMA_TODEVICE);
1210 pkt_info.return_info = 0;
1211 ipheader = skb->nh.iph->ihl << 11;
1212 pkt_info.cmd_sts = ETH_TX_FIRST_DESC |
1213 ETH_GEN_TCP_UDP_CHECKSUM |
1214 ETH_GEN_IP_V_4_CHECKSUM |
1215 ipheader;
1216 /* CPU already calculated pseudo header checksum. So, use it */
1217 pkt_info.l4i_chk = skb->h.th->check;
1218 status = eth_port_send(mp, &pkt_info);
1219 if (status != ETH_OK) {
1220 if ((status == ETH_ERROR))
1221 printk(KERN_ERR "%s: Error on transmitting packet\n", dev->name);
1222 if (status == ETH_QUEUE_FULL)
1223 printk("Error on Queue Full \n");
1224 if (status == ETH_QUEUE_LAST_RESOURCE)
1225 printk("Tx resource error \n");
1228 /* Check for the remaining frags */
1229 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
1230 skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
1231 pkt_info.l4i_chk = 0x0000;
1232 pkt_info.cmd_sts = 0x00000000;
1234 /* Last Frag enables interrupt and frees the skb */
1235 if (frag == (skb_shinfo(skb)->nr_frags - 1)) {
1236 pkt_info.cmd_sts |= ETH_TX_ENABLE_INTERRUPT |
1237 ETH_TX_LAST_DESC;
1238 pkt_info.return_info = skb;
1239 mp->tx_ring_skbs++;
1241 else {
1242 pkt_info.return_info = 0;
1244 pkt_info.byte_cnt = this_frag->size;
1245 if (this_frag->size < 8)
1246 printk("%d : \n", skb_shinfo(skb)->nr_frags);
1248 pkt_info.buf_ptr = pci_map_page(NULL, this_frag->page,
1249 this_frag->page_offset,
1250 this_frag->size, PCI_DMA_TODEVICE);
1252 status = eth_port_send(mp, &pkt_info);
1254 if (status != ETH_OK) {
1255 if ((status == ETH_ERROR))
1256 printk(KERN_ERR "%s: Error on transmitting packet\n", dev->name);
1258 if (status == ETH_QUEUE_LAST_RESOURCE)
1259 printk("Tx resource error \n");
1261 if (status == ETH_QUEUE_FULL)
1262 printk("Queue is full \n");
1266 #endif
1268 /* Check if TX queue can handle another skb. If not, then
1269 * signal higher layers to stop requesting TX
1271 if (MV64340_TX_QUEUE_SIZE <= (mp->tx_ring_skbs + 1))
1273 * Stop getting skb's from upper layers.
1274 * Getting skb's from upper layers will be enabled again after
1275 * packets are released.
1277 netif_stop_queue(dev);
1279 /* Update statistics and start of transmittion time */
1280 stats->tx_bytes += skb->len;
1281 stats->tx_packets++;
1282 dev->trans_start = jiffies;
1284 spin_unlock_irqrestore(&mp->lock, flags);
1286 return 0; /* success */
1290 * mv64340_eth_get_stats
1292 * Returns a pointer to the interface statistics.
1294 * Input : dev - a pointer to the required interface
1296 * Output : a pointer to the interface's statistics
1299 static struct net_device_stats *mv64340_eth_get_stats(struct net_device *dev)
1301 struct mv64340_private *mp = netdev_priv(dev);
1303 return &mp->stats;
1307 * mv64340_eth_init
1309 * First function called after registering the network device.
1310 * It's purpose is to initialize the device as an ethernet device,
1311 * fill the structure that was given in registration with pointers
1312 * to functions, and setting the MAC address of the interface
1314 * Input : number of port to initialize
1315 * Output : -ENONMEM if failed , 0 if success
1317 static struct net_device *mv64340_eth_init(int port_num)
1319 struct mv64340_private *mp;
1320 struct net_device *dev;
1321 int err;
1323 dev = alloc_etherdev(sizeof(struct mv64340_private));
1324 if (!dev)
1325 return NULL;
1327 mp = netdev_priv(dev);
1329 dev->irq = ETH_PORT0_IRQ_NUM + port_num;
1331 dev->open = mv64340_eth_open;
1332 dev->stop = mv64340_eth_stop;
1333 dev->hard_start_xmit = mv64340_eth_start_xmit;
1334 dev->get_stats = mv64340_eth_get_stats;
1335 dev->set_mac_address = mv64340_eth_set_mac_address;
1336 dev->set_multicast_list = mv64340_eth_set_rx_mode;
1338 /* No need to Tx Timeout */
1339 dev->tx_timeout = mv64340_eth_tx_timeout;
1340 #ifdef MV64340_NAPI
1341 dev->poll = mv64340_poll;
1342 dev->weight = 64;
1343 #endif
1345 dev->watchdog_timeo = 2 * HZ;
1346 dev->tx_queue_len = MV64340_TX_QUEUE_SIZE;
1347 dev->base_addr = 0;
1348 dev->change_mtu = mv64340_eth_change_mtu;
1350 #ifdef MV64340_CHECKSUM_OFFLOAD_TX
1351 #ifdef MAX_SKB_FRAGS
1352 #ifndef CONFIG_JAGUAR_DMALOW
1354 * Zero copy can only work if we use Discovery II memory. Else, we will
1355 * have to map the buffers to ISA memory which is only 16 MB
1357 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_HW_CSUM;
1358 #endif
1359 #endif
1360 #endif
1362 mp->port_num = port_num;
1364 /* Configure the timeout task */
1365 INIT_WORK(&mp->tx_timeout_task,
1366 (void (*)(void *))mv64340_eth_tx_timeout_task, dev);
1368 spin_lock_init(&mp->lock);
1370 /* set MAC addresses */
1371 memcpy(dev->dev_addr, prom_mac_addr_base, 6);
1372 dev->dev_addr[5] += port_num;
1374 err = register_netdev(dev);
1375 if (err)
1376 goto out_free_dev;
1378 printk(KERN_NOTICE "%s: port %d with MAC address %02x:%02x:%02x:%02x:%02x:%02x\n",
1379 dev->name, port_num,
1380 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
1381 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
1383 if (dev->features & NETIF_F_SG)
1384 printk("Scatter Gather Enabled ");
1386 if (dev->features & NETIF_F_IP_CSUM)
1387 printk("TX TCP/IP Checksumming Supported \n");
1389 printk("RX TCP/UDP Checksum Offload ON, \n");
1390 printk("TX and RX Interrupt Coalescing ON \n");
1392 #ifdef MV64340_NAPI
1393 printk("RX NAPI Enabled \n");
1394 #endif
1396 return dev;
1398 out_free_dev:
1399 free_netdev(dev);
1401 return NULL;
1404 static void mv64340_eth_remove(struct net_device *dev)
1406 struct mv64340_private *mp = netdev_priv(dev);
1408 unregister_netdev(dev);
1409 flush_scheduled_work();
1410 free_netdev(dev);
1413 static struct net_device *mv64340_dev0;
1414 static struct net_device *mv64340_dev1;
1415 static struct net_device *mv64340_dev2;
1418 * mv64340_init_module
1420 * Registers the network drivers into the Linux kernel
1422 * Input : N/A
1424 * Output : N/A
1426 static int __init mv64340_init_module(void)
1428 printk(KERN_NOTICE "MV-643xx 10/100/1000 Ethernet Driver\n");
1430 #ifdef CONFIG_MV643XX_ETH_0
1431 mv64340_dev0 = mv64340_eth_init(0);
1432 if (!mv64340_dev0) {
1433 printk(KERN_ERR
1434 "Error registering MV-64360 ethernet port 0\n");
1436 #endif
1437 #ifdef CONFIG_MV643XX_ETH_1
1438 mv64340_dev1 = mv64340_eth_init(1);
1439 if (!mv64340_dev1) {
1440 printk(KERN_ERR
1441 "Error registering MV-64360 ethernet port 1\n");
1443 #endif
1444 #ifdef CONFIG_MV643XX_ETH_2
1445 mv64340_dev2 = mv64340_eth_init(2);
1446 if (!mv64340_dev2) {
1447 printk(KERN_ERR
1448 "Error registering MV-64360 ethernet port 2\n");
1450 #endif
1451 return 0;
1455 * mv64340_cleanup_module
1457 * Registers the network drivers into the Linux kernel
1459 * Input : N/A
1461 * Output : N/A
1463 static void __exit mv64340_cleanup_module(void)
1465 if (mv64340_dev2)
1466 mv64340_eth_remove(mv64340_dev2);
1467 if (mv64340_dev1)
1468 mv64340_eth_remove(mv64340_dev1);
1469 if (mv64340_dev0)
1470 mv64340_eth_remove(mv64340_dev0);
1473 module_init(mv64340_init_module);
1474 module_exit(mv64340_cleanup_module);
1476 MODULE_LICENSE("GPL");
1477 MODULE_AUTHOR("Rabeeh Khoury, Assaf Hoffman, Matthew Dharm and Manish Lachwani");
1478 MODULE_DESCRIPTION("Ethernet driver for Marvell MV64340");
1481 * The second part is the low level driver of the gigE ethernet ports.
1485 * Marvell's Gigabit Ethernet controller low level driver
1487 * DESCRIPTION:
1488 * This file introduce low level API to Marvell's Gigabit Ethernet
1489 * controller. This Gigabit Ethernet Controller driver API controls
1490 * 1) Operations (i.e. port init, start, reset etc').
1491 * 2) Data flow (i.e. port send, receive etc').
1492 * Each Gigabit Ethernet port is controlled via
1493 * struct mv64340_private.
1494 * This struct includes user configuration information as well as
1495 * driver internal data needed for its operations.
1497 * Supported Features:
1498 * - This low level driver is OS independent. Allocating memory for
1499 * the descriptor rings and buffers are not within the scope of
1500 * this driver.
1501 * - The user is free from Rx/Tx queue managing.
1502 * - This low level driver introduce functionality API that enable
1503 * the to operate Marvell's Gigabit Ethernet Controller in a
1504 * convenient way.
1505 * - Simple Gigabit Ethernet port operation API.
1506 * - Simple Gigabit Ethernet port data flow API.
1507 * - Data flow and operation API support per queue functionality.
1508 * - Support cached descriptors for better performance.
1509 * - Enable access to all four DRAM banks and internal SRAM memory
1510 * spaces.
1511 * - PHY access and control API.
1512 * - Port control register configuration API.
1513 * - Full control over Unicast and Multicast MAC configurations.
1515 * Operation flow:
1517 * Initialization phase
1518 * This phase complete the initialization of the the mv64340_private
1519 * struct.
1520 * User information regarding port configuration has to be set
1521 * prior to calling the port initialization routine.
1523 * In this phase any port Tx/Rx activity is halted, MIB counters
1524 * are cleared, PHY address is set according to user parameter and
1525 * access to DRAM and internal SRAM memory spaces.
1527 * Driver ring initialization
1528 * Allocating memory for the descriptor rings and buffers is not
1529 * within the scope of this driver. Thus, the user is required to
1530 * allocate memory for the descriptors ring and buffers. Those
1531 * memory parameters are used by the Rx and Tx ring initialization
1532 * routines in order to curve the descriptor linked list in a form
1533 * of a ring.
1534 * Note: Pay special attention to alignment issues when using
1535 * cached descriptors/buffers. In this phase the driver store
1536 * information in the mv64340_private struct regarding each queue
1537 * ring.
1539 * Driver start
1540 * This phase prepares the Ethernet port for Rx and Tx activity.
1541 * It uses the information stored in the mv64340_private struct to
1542 * initialize the various port registers.
1544 * Data flow:
1545 * All packet references to/from the driver are done using
1546 * struct pkt_info.
1547 * This struct is a unified struct used with Rx and Tx operations.
1548 * This way the user is not required to be familiar with neither
1549 * Tx nor Rx descriptors structures.
1550 * The driver's descriptors rings are management by indexes.
1551 * Those indexes controls the ring resources and used to indicate
1552 * a SW resource error:
1553 * 'current'
1554 * This index points to the current available resource for use. For
1555 * example in Rx process this index will point to the descriptor
1556 * that will be passed to the user upon calling the receive routine.
1557 * In Tx process, this index will point to the descriptor
1558 * that will be assigned with the user packet info and transmitted.
1559 * 'used'
1560 * This index points to the descriptor that need to restore its
1561 * resources. For example in Rx process, using the Rx buffer return
1562 * API will attach the buffer returned in packet info to the
1563 * descriptor pointed by 'used'. In Tx process, using the Tx
1564 * descriptor return will merely return the user packet info with
1565 * the command status of the transmitted buffer pointed by the
1566 * 'used' index. Nevertheless, it is essential to use this routine
1567 * to update the 'used' index.
1568 * 'first'
1569 * This index supports Tx Scatter-Gather. It points to the first
1570 * descriptor of a packet assembled of multiple buffers. For example
1571 * when in middle of Such packet we have a Tx resource error the
1572 * 'curr' index get the value of 'first' to indicate that the ring
1573 * returned to its state before trying to transmit this packet.
1575 * Receive operation:
1576 * The eth_port_receive API set the packet information struct,
1577 * passed by the caller, with received information from the
1578 * 'current' SDMA descriptor.
1579 * It is the user responsibility to return this resource back
1580 * to the Rx descriptor ring to enable the reuse of this source.
1581 * Return Rx resource is done using the eth_rx_return_buff API.
1583 * Transmit operation:
1584 * The eth_port_send API supports Scatter-Gather which enables to
1585 * send a packet spanned over multiple buffers. This means that
1586 * for each packet info structure given by the user and put into
1587 * the Tx descriptors ring, will be transmitted only if the 'LAST'
1588 * bit will be set in the packet info command status field. This
1589 * API also consider restriction regarding buffer alignments and
1590 * sizes.
1591 * The user must return a Tx resource after ensuring the buffer
1592 * has been transmitted to enable the Tx ring indexes to update.
1594 * BOARD LAYOUT
1595 * This device is on-board. No jumper diagram is necessary.
1597 * EXTERNAL INTERFACE
1599 * Prior to calling the initialization routine eth_port_init() the user
1600 * must set the following fields under mv64340_private struct:
1601 * port_num User Ethernet port number.
1602 * port_mac_addr[6] User defined port MAC address.
1603 * port_config User port configuration value.
1604 * port_config_extend User port config extend value.
1605 * port_sdma_config User port SDMA config value.
1606 * port_serial_control User port serial control value.
1608 * This driver introduce a set of default values:
1609 * PORT_CONFIG_VALUE Default port configuration value
1610 * PORT_CONFIG_EXTEND_VALUE Default port extend configuration value
1611 * PORT_SDMA_CONFIG_VALUE Default sdma control value
1612 * PORT_SERIAL_CONTROL_VALUE Default port serial control value
1614 * This driver data flow is done using the struct pkt_info which
1615 * is a unified struct for Rx and Tx operations:
1617 * byte_cnt Tx/Rx descriptor buffer byte count.
1618 * l4i_chk CPU provided TCP Checksum. For Tx operation
1619 * only.
1620 * cmd_sts Tx/Rx descriptor command status.
1621 * buf_ptr Tx/Rx descriptor buffer pointer.
1622 * return_info Tx/Rx user resource return information.
1625 /* defines */
1626 /* SDMA command macros */
1627 #define ETH_ENABLE_TX_QUEUE(eth_port) \
1628 MV_WRITE(MV64340_ETH_TRANSMIT_QUEUE_COMMAND_REG(eth_port), 1)
1630 #define ETH_DISABLE_TX_QUEUE(eth_port) \
1631 MV_WRITE(MV64340_ETH_TRANSMIT_QUEUE_COMMAND_REG(eth_port), \
1632 (1 << 8))
1634 #define ETH_ENABLE_RX_QUEUE(rx_queue, eth_port) \
1635 MV_WRITE(MV64340_ETH_RECEIVE_QUEUE_COMMAND_REG(eth_port), \
1636 (1 << rx_queue))
1638 #define ETH_DISABLE_RX_QUEUE(rx_queue, eth_port) \
1639 MV_WRITE(MV64340_ETH_RECEIVE_QUEUE_COMMAND_REG(eth_port), \
1640 (1 << (8 + rx_queue)))
1642 #define LINK_UP_TIMEOUT 100000
1643 #define PHY_BUSY_TIMEOUT 10000000
1645 /* locals */
1647 /* PHY routines */
1648 static int ethernet_phy_get(unsigned int eth_port_num);
1650 /* Ethernet Port routines */
1651 static int eth_port_uc_addr(unsigned int eth_port_num, unsigned char uc_nibble,
1652 int option);
1655 * eth_port_init - Initialize the Ethernet port driver
1657 * DESCRIPTION:
1658 * This function prepares the ethernet port to start its activity:
1659 * 1) Completes the ethernet port driver struct initialization toward port
1660 * start routine.
1661 * 2) Resets the device to a quiescent state in case of warm reboot.
1662 * 3) Enable SDMA access to all four DRAM banks as well as internal SRAM.
1663 * 4) Clean MAC tables. The reset status of those tables is unknown.
1664 * 5) Set PHY address.
1665 * Note: Call this routine prior to eth_port_start routine and after
1666 * setting user values in the user fields of Ethernet port control
1667 * struct.
1669 * INPUT:
1670 * struct mv64340_private *mp Ethernet port control struct
1672 * OUTPUT:
1673 * See description.
1675 * RETURN:
1676 * None.
1678 static void eth_port_init(struct mv64340_private * mp)
1680 mp->port_config = PORT_CONFIG_VALUE;
1681 mp->port_config_extend = PORT_CONFIG_EXTEND_VALUE;
1682 #if defined(__BIG_ENDIAN)
1683 mp->port_sdma_config = PORT_SDMA_CONFIG_VALUE;
1684 #elif defined(__LITTLE_ENDIAN)
1685 mp->port_sdma_config = PORT_SDMA_CONFIG_VALUE |
1686 ETH_BLM_RX_NO_SWAP | ETH_BLM_TX_NO_SWAP;
1687 #else
1688 #error One of __LITTLE_ENDIAN or __BIG_ENDIAN must be defined!
1689 #endif
1690 mp->port_serial_control = PORT_SERIAL_CONTROL_VALUE;
1692 mp->port_rx_queue_command = 0;
1693 mp->port_tx_queue_command = 0;
1695 mp->rx_resource_err = 0;
1696 mp->tx_resource_err = 0;
1698 eth_port_reset(mp->port_num);
1700 eth_port_init_mac_tables(mp->port_num);
1702 ethernet_phy_reset(mp->port_num);
1706 * eth_port_start - Start the Ethernet port activity.
1708 * DESCRIPTION:
1709 * This routine prepares the Ethernet port for Rx and Tx activity:
1710 * 1. Initialize Tx and Rx Current Descriptor Pointer for each queue that
1711 * has been initialized a descriptor's ring (using
1712 * ether_init_tx_desc_ring for Tx and ether_init_rx_desc_ring for Rx)
1713 * 2. Initialize and enable the Ethernet configuration port by writing to
1714 * the port's configuration and command registers.
1715 * 3. Initialize and enable the SDMA by writing to the SDMA's
1716 * configuration and command registers. After completing these steps,
1717 * the ethernet port SDMA can starts to perform Rx and Tx activities.
1719 * Note: Each Rx and Tx queue descriptor's list must be initialized prior
1720 * to calling this function (use ether_init_tx_desc_ring for Tx queues
1721 * and ether_init_rx_desc_ring for Rx queues).
1723 * INPUT:
1724 * struct mv64340_private *mp Ethernet port control struct
1726 * OUTPUT:
1727 * Ethernet port is ready to receive and transmit.
1729 * RETURN:
1730 * false if the port PHY is not up.
1731 * true otherwise.
1733 static int eth_port_start(struct mv64340_private *mp)
1735 unsigned int eth_port_num = mp->port_num;
1736 int tx_curr_desc, rx_curr_desc;
1737 unsigned int phy_reg_data;
1739 /* Assignment of Tx CTRP of given queue */
1740 tx_curr_desc = mp->tx_curr_desc_q;
1741 MV_WRITE(MV64340_ETH_TX_CURRENT_QUEUE_DESC_PTR_0(eth_port_num),
1742 (struct eth_tx_desc *) mp->tx_desc_dma + tx_curr_desc);
1744 /* Assignment of Rx CRDP of given queue */
1745 rx_curr_desc = mp->rx_curr_desc_q;
1746 MV_WRITE(MV64340_ETH_RX_CURRENT_QUEUE_DESC_PTR_0(eth_port_num),
1747 (struct eth_rx_desc *) mp->rx_desc_dma + rx_curr_desc);
1749 /* Add the assigned Ethernet address to the port's address table */
1750 eth_port_uc_addr_set(mp->port_num, mp->port_mac_addr);
1752 /* Assign port configuration and command. */
1753 MV_WRITE(MV64340_ETH_PORT_CONFIG_REG(eth_port_num),
1754 mp->port_config);
1756 MV_WRITE(MV64340_ETH_PORT_CONFIG_EXTEND_REG(eth_port_num),
1757 mp->port_config_extend);
1759 MV_WRITE(MV64340_ETH_PORT_SERIAL_CONTROL_REG(eth_port_num),
1760 mp->port_serial_control);
1762 MV_SET_REG_BITS(MV64340_ETH_PORT_SERIAL_CONTROL_REG(eth_port_num),
1763 ETH_SERIAL_PORT_ENABLE);
1765 /* Assign port SDMA configuration */
1766 MV_WRITE(MV64340_ETH_SDMA_CONFIG_REG(eth_port_num),
1767 mp->port_sdma_config);
1769 /* Enable port Rx. */
1770 MV_WRITE(MV64340_ETH_RECEIVE_QUEUE_COMMAND_REG(eth_port_num),
1771 mp->port_rx_queue_command);
1773 /* Check if link is up */
1774 eth_port_read_smi_reg(eth_port_num, 1, &phy_reg_data);
1776 if (!(phy_reg_data & 0x20))
1777 return 0;
1779 return 1;
1783 * eth_port_uc_addr_set - This function Set the port Unicast address.
1785 * DESCRIPTION:
1786 * This function Set the port Ethernet MAC address.
1788 * INPUT:
1789 * unsigned int eth_port_num Port number.
1790 * char * p_addr Address to be set
1792 * OUTPUT:
1793 * Set MAC address low and high registers. also calls eth_port_uc_addr()
1794 * To set the unicast table with the proper information.
1796 * RETURN:
1797 * N/A.
1800 static void eth_port_uc_addr_set(unsigned int eth_port_num,
1801 unsigned char *p_addr)
1803 unsigned int mac_h;
1804 unsigned int mac_l;
1806 mac_l = (p_addr[4] << 8) | (p_addr[5]);
1807 mac_h = (p_addr[0] << 24) | (p_addr[1] << 16) |
1808 (p_addr[2] << 8) | (p_addr[3] << 0);
1810 MV_WRITE(MV64340_ETH_MAC_ADDR_LOW(eth_port_num), mac_l);
1811 MV_WRITE(MV64340_ETH_MAC_ADDR_HIGH(eth_port_num), mac_h);
1813 /* Accept frames of this address */
1814 eth_port_uc_addr(eth_port_num, p_addr[5], ACCEPT_MAC_ADDR);
1816 return;
1820 * eth_port_uc_addr - This function Set the port unicast address table
1822 * DESCRIPTION:
1823 * This function locates the proper entry in the Unicast table for the
1824 * specified MAC nibble and sets its properties according to function
1825 * parameters.
1827 * INPUT:
1828 * unsigned int eth_port_num Port number.
1829 * unsigned char uc_nibble Unicast MAC Address last nibble.
1830 * int option 0 = Add, 1 = remove address.
1832 * OUTPUT:
1833 * This function add/removes MAC addresses from the port unicast address
1834 * table.
1836 * RETURN:
1837 * true is output succeeded.
1838 * false if option parameter is invalid.
1841 static int eth_port_uc_addr(unsigned int eth_port_num,
1842 unsigned char uc_nibble, int option)
1844 unsigned int unicast_reg;
1845 unsigned int tbl_offset;
1846 unsigned int reg_offset;
1848 /* Locate the Unicast table entry */
1849 uc_nibble = (0xf & uc_nibble);
1850 tbl_offset = (uc_nibble / 4) * 4; /* Register offset from unicast table base */
1851 reg_offset = uc_nibble % 4; /* Entry offset within the above register */
1853 switch (option) {
1854 case REJECT_MAC_ADDR:
1855 /* Clear accepts frame bit at specified unicast DA table entry */
1856 unicast_reg = MV_READ((MV64340_ETH_DA_FILTER_UNICAST_TABLE_BASE
1857 (eth_port_num) + tbl_offset));
1859 unicast_reg &= (0x0E << (8 * reg_offset));
1861 MV_WRITE(
1862 (MV64340_ETH_DA_FILTER_UNICAST_TABLE_BASE
1863 (eth_port_num) + tbl_offset), unicast_reg);
1864 break;
1866 case ACCEPT_MAC_ADDR:
1867 /* Set accepts frame bit at unicast DA filter table entry */
1868 unicast_reg =
1869 MV_READ(
1870 (MV64340_ETH_DA_FILTER_UNICAST_TABLE_BASE
1871 (eth_port_num) + tbl_offset));
1873 unicast_reg |= (0x01 << (8 * reg_offset));
1875 MV_WRITE(
1876 (MV64340_ETH_DA_FILTER_UNICAST_TABLE_BASE
1877 (eth_port_num) + tbl_offset), unicast_reg);
1879 break;
1881 default:
1882 return 0;
1885 return 1;
1889 * eth_port_init_mac_tables - Clear all entrance in the UC, SMC and OMC tables
1891 * DESCRIPTION:
1892 * Go through all the DA filter tables (Unicast, Special Multicast &
1893 * Other Multicast) and set each entry to 0.
1895 * INPUT:
1896 * unsigned int eth_port_num Ethernet Port number.
1898 * OUTPUT:
1899 * Multicast and Unicast packets are rejected.
1901 * RETURN:
1902 * None.
1904 static void eth_port_init_mac_tables(unsigned int eth_port_num)
1906 int table_index;
1908 /* Clear DA filter unicast table (Ex_dFUT) */
1909 for (table_index = 0; table_index <= 0xC; table_index += 4)
1910 MV_WRITE(
1911 (MV64340_ETH_DA_FILTER_UNICAST_TABLE_BASE
1912 (eth_port_num) + table_index), 0);
1914 for (table_index = 0; table_index <= 0xFC; table_index += 4) {
1915 /* Clear DA filter special multicast table (Ex_dFSMT) */
1916 MV_WRITE(
1917 (MV64340_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE
1918 (eth_port_num) + table_index), 0);
1919 /* Clear DA filter other multicast table (Ex_dFOMT) */
1920 MV_WRITE((MV64340_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE
1921 (eth_port_num) + table_index), 0);
1926 * eth_clear_mib_counters - Clear all MIB counters
1928 * DESCRIPTION:
1929 * This function clears all MIB counters of a specific ethernet port.
1930 * A read from the MIB counter will reset the counter.
1932 * INPUT:
1933 * unsigned int eth_port_num Ethernet Port number.
1935 * OUTPUT:
1936 * After reading all MIB counters, the counters resets.
1938 * RETURN:
1939 * MIB counter value.
1942 static void eth_clear_mib_counters(unsigned int eth_port_num)
1944 int i;
1946 /* Perform dummy reads from MIB counters */
1947 for (i = ETH_MIB_GOOD_OCTETS_RECEIVED_LOW; i < ETH_MIB_LATE_COLLISION; i += 4)
1948 MV_READ(MV64340_ETH_MIB_COUNTERS_BASE(eth_port_num) + i);
1953 * ethernet_phy_get - Get the ethernet port PHY address.
1955 * DESCRIPTION:
1956 * This routine returns the given ethernet port PHY address.
1958 * INPUT:
1959 * unsigned int eth_port_num Ethernet Port number.
1961 * OUTPUT:
1962 * None.
1964 * RETURN:
1965 * PHY address.
1968 static int ethernet_phy_get(unsigned int eth_port_num)
1970 unsigned int reg_data;
1972 reg_data = MV_READ(MV64340_ETH_PHY_ADDR_REG);
1974 return ((reg_data >> (5 * eth_port_num)) & 0x1f);
1978 * ethernet_phy_reset - Reset Ethernet port PHY.
1980 * DESCRIPTION:
1981 * This routine utilize the SMI interface to reset the ethernet port PHY.
1982 * The routine waits until the link is up again or link up is timeout.
1984 * INPUT:
1985 * unsigned int eth_port_num Ethernet Port number.
1987 * OUTPUT:
1988 * The ethernet port PHY renew its link.
1990 * RETURN:
1991 * None.
1994 static int ethernet_phy_reset(unsigned int eth_port_num)
1996 unsigned int time_out = 50;
1997 unsigned int phy_reg_data;
1999 /* Reset the PHY */
2000 eth_port_read_smi_reg(eth_port_num, 0, &phy_reg_data);
2001 phy_reg_data |= 0x8000; /* Set bit 15 to reset the PHY */
2002 eth_port_write_smi_reg(eth_port_num, 0, phy_reg_data);
2004 /* Poll on the PHY LINK */
2005 do {
2006 eth_port_read_smi_reg(eth_port_num, 1, &phy_reg_data);
2008 if (time_out-- == 0)
2009 return 0;
2010 } while (!(phy_reg_data & 0x20));
2012 return 1;
2016 * eth_port_reset - Reset Ethernet port
2018 * DESCRIPTION:
2019 * This routine resets the chip by aborting any SDMA engine activity and
2020 * clearing the MIB counters. The Receiver and the Transmit unit are in
2021 * idle state after this command is performed and the port is disabled.
2023 * INPUT:
2024 * unsigned int eth_port_num Ethernet Port number.
2026 * OUTPUT:
2027 * Channel activity is halted.
2029 * RETURN:
2030 * None.
2033 static void eth_port_reset(unsigned int eth_port_num)
2035 unsigned int reg_data;
2037 /* Stop Tx port activity. Check port Tx activity. */
2038 reg_data =
2039 MV_READ(MV64340_ETH_TRANSMIT_QUEUE_COMMAND_REG(eth_port_num));
2041 if (reg_data & 0xFF) {
2042 /* Issue stop command for active channels only */
2043 MV_WRITE(MV64340_ETH_TRANSMIT_QUEUE_COMMAND_REG
2044 (eth_port_num), (reg_data << 8));
2046 /* Wait for all Tx activity to terminate. */
2047 do {
2048 /* Check port cause register that all Tx queues are stopped */
2049 reg_data =
2050 MV_READ
2051 (MV64340_ETH_TRANSMIT_QUEUE_COMMAND_REG
2052 (eth_port_num));
2054 while (reg_data & 0xFF);
2057 /* Stop Rx port activity. Check port Rx activity. */
2058 reg_data =
2059 MV_READ(MV64340_ETH_RECEIVE_QUEUE_COMMAND_REG
2060 (eth_port_num));
2062 if (reg_data & 0xFF) {
2063 /* Issue stop command for active channels only */
2064 MV_WRITE(MV64340_ETH_RECEIVE_QUEUE_COMMAND_REG
2065 (eth_port_num), (reg_data << 8));
2067 /* Wait for all Rx activity to terminate. */
2068 do {
2069 /* Check port cause register that all Rx queues are stopped */
2070 reg_data =
2071 MV_READ
2072 (MV64340_ETH_RECEIVE_QUEUE_COMMAND_REG
2073 (eth_port_num));
2075 while (reg_data & 0xFF);
2079 /* Clear all MIB counters */
2080 eth_clear_mib_counters(eth_port_num);
2082 /* Reset the Enable bit in the Configuration Register */
2083 reg_data =
2084 MV_READ(MV64340_ETH_PORT_SERIAL_CONTROL_REG (eth_port_num));
2085 reg_data &= ~ETH_SERIAL_PORT_ENABLE;
2086 MV_WRITE(MV64340_ETH_PORT_SERIAL_CONTROL_REG(eth_port_num), reg_data);
2088 return;
2092 * ethernet_set_config_reg - Set specified bits in configuration register.
2094 * DESCRIPTION:
2095 * This function sets specified bits in the given ethernet
2096 * configuration register.
2098 * INPUT:
2099 * unsigned int eth_port_num Ethernet Port number.
2100 * unsigned int value 32 bit value.
2102 * OUTPUT:
2103 * The set bits in the value parameter are set in the configuration
2104 * register.
2106 * RETURN:
2107 * None.
2110 static void ethernet_set_config_reg(unsigned int eth_port_num,
2111 unsigned int value)
2113 unsigned int eth_config_reg;
2115 eth_config_reg =
2116 MV_READ(MV64340_ETH_PORT_CONFIG_REG(eth_port_num));
2117 eth_config_reg |= value;
2118 MV_WRITE(MV64340_ETH_PORT_CONFIG_REG(eth_port_num),
2119 eth_config_reg);
2123 * ethernet_get_config_reg - Get the port configuration register
2125 * DESCRIPTION:
2126 * This function returns the configuration register value of the given
2127 * ethernet port.
2129 * INPUT:
2130 * unsigned int eth_port_num Ethernet Port number.
2132 * OUTPUT:
2133 * None.
2135 * RETURN:
2136 * Port configuration register value.
2138 static unsigned int ethernet_get_config_reg(unsigned int eth_port_num)
2140 unsigned int eth_config_reg;
2142 eth_config_reg = MV_READ(MV64340_ETH_PORT_CONFIG_EXTEND_REG
2143 (eth_port_num));
2144 return eth_config_reg;
2149 * eth_port_read_smi_reg - Read PHY registers
2151 * DESCRIPTION:
2152 * This routine utilize the SMI interface to interact with the PHY in
2153 * order to perform PHY register read.
2155 * INPUT:
2156 * unsigned int eth_port_num Ethernet Port number.
2157 * unsigned int phy_reg PHY register address offset.
2158 * unsigned int *value Register value buffer.
2160 * OUTPUT:
2161 * Write the value of a specified PHY register into given buffer.
2163 * RETURN:
2164 * false if the PHY is busy or read data is not in valid state.
2165 * true otherwise.
2168 static int eth_port_read_smi_reg(unsigned int eth_port_num,
2169 unsigned int phy_reg, unsigned int *value)
2171 int phy_addr = ethernet_phy_get(eth_port_num);
2172 unsigned int time_out = PHY_BUSY_TIMEOUT;
2173 unsigned int reg_value;
2175 /* first check that it is not busy */
2176 do {
2177 reg_value = MV_READ(MV64340_ETH_SMI_REG);
2178 if (time_out-- == 0)
2179 return 0;
2180 } while (reg_value & ETH_SMI_BUSY);
2182 /* not busy */
2184 MV_WRITE(MV64340_ETH_SMI_REG,
2185 (phy_addr << 16) | (phy_reg << 21) | ETH_SMI_OPCODE_READ);
2187 time_out = PHY_BUSY_TIMEOUT; /* initialize the time out var again */
2189 do {
2190 reg_value = MV_READ(MV64340_ETH_SMI_REG);
2191 if (time_out-- == 0)
2192 return 0;
2193 } while (reg_value & ETH_SMI_READ_VALID);
2195 /* Wait for the data to update in the SMI register */
2196 for (time_out = 0; time_out < PHY_BUSY_TIMEOUT; time_out++);
2198 reg_value = MV_READ(MV64340_ETH_SMI_REG);
2200 *value = reg_value & 0xffff;
2202 return 1;
2206 * eth_port_write_smi_reg - Write to PHY registers
2208 * DESCRIPTION:
2209 * This routine utilize the SMI interface to interact with the PHY in
2210 * order to perform writes to PHY registers.
2212 * INPUT:
2213 * unsigned int eth_port_num Ethernet Port number.
2214 * unsigned int phy_reg PHY register address offset.
2215 * unsigned int value Register value.
2217 * OUTPUT:
2218 * Write the given value to the specified PHY register.
2220 * RETURN:
2221 * false if the PHY is busy.
2222 * true otherwise.
2225 static int eth_port_write_smi_reg(unsigned int eth_port_num,
2226 unsigned int phy_reg, unsigned int value)
2228 unsigned int time_out = PHY_BUSY_TIMEOUT;
2229 unsigned int reg_value;
2230 int phy_addr;
2232 phy_addr = ethernet_phy_get(eth_port_num);
2234 /* first check that it is not busy */
2235 do {
2236 reg_value = MV_READ(MV64340_ETH_SMI_REG);
2237 if (time_out-- == 0)
2238 return 0;
2239 } while (reg_value & ETH_SMI_BUSY);
2241 /* not busy */
2242 MV_WRITE(MV64340_ETH_SMI_REG, (phy_addr << 16) | (phy_reg << 21) |
2243 ETH_SMI_OPCODE_WRITE | (value & 0xffff));
2245 return 1;
2249 * eth_port_send - Send an Ethernet packet
2251 * DESCRIPTION:
2252 * This routine send a given packet described by p_pktinfo parameter. It
2253 * supports transmitting of a packet spaned over multiple buffers. The
2254 * routine updates 'curr' and 'first' indexes according to the packet
2255 * segment passed to the routine. In case the packet segment is first,
2256 * the 'first' index is update. In any case, the 'curr' index is updated.
2257 * If the routine get into Tx resource error it assigns 'curr' index as
2258 * 'first'. This way the function can abort Tx process of multiple
2259 * descriptors per packet.
2261 * INPUT:
2262 * struct mv64340_private *mp Ethernet Port Control srtuct.
2263 * struct pkt_info *p_pkt_info User packet buffer.
2265 * OUTPUT:
2266 * Tx ring 'curr' and 'first' indexes are updated.
2268 * RETURN:
2269 * ETH_QUEUE_FULL in case of Tx resource error.
2270 * ETH_ERROR in case the routine can not access Tx desc ring.
2271 * ETH_QUEUE_LAST_RESOURCE if the routine uses the last Tx resource.
2272 * ETH_OK otherwise.
2275 #ifdef MV64340_CHECKSUM_OFFLOAD_TX
2277 * Modified to include the first descriptor pointer in case of SG
2279 static ETH_FUNC_RET_STATUS eth_port_send(struct mv64340_private * mp,
2280 struct pkt_info * p_pkt_info)
2282 int tx_desc_curr, tx_desc_used, tx_first_desc, tx_next_desc;
2283 volatile struct eth_tx_desc *current_descriptor;
2284 volatile struct eth_tx_desc *first_descriptor;
2285 u32 command_status, first_chip_ptr;
2287 /* Do not process Tx ring in case of Tx ring resource error */
2288 if (mp->tx_resource_err)
2289 return ETH_QUEUE_FULL;
2291 /* Get the Tx Desc ring indexes */
2292 tx_desc_curr = mp->tx_curr_desc_q;
2293 tx_desc_used = mp->tx_used_desc_q;
2295 current_descriptor = &mp->p_tx_desc_area[tx_desc_curr];
2296 if (current_descriptor == NULL)
2297 return ETH_ERROR;
2299 tx_next_desc = (tx_desc_curr + 1) % MV64340_TX_QUEUE_SIZE;
2300 command_status = p_pkt_info->cmd_sts | ETH_ZERO_PADDING | ETH_GEN_CRC;
2302 if (command_status & ETH_TX_FIRST_DESC) {
2303 tx_first_desc = tx_desc_curr;
2304 mp->tx_first_desc_q = tx_first_desc;
2306 /* fill first descriptor */
2307 first_descriptor = &mp->p_tx_desc_area[tx_desc_curr];
2308 first_descriptor->l4i_chk = p_pkt_info->l4i_chk;
2309 first_descriptor->cmd_sts = command_status;
2310 first_descriptor->byte_cnt = p_pkt_info->byte_cnt;
2311 first_descriptor->buf_ptr = p_pkt_info->buf_ptr;
2312 first_descriptor->next_desc_ptr = mp->tx_desc_dma +
2313 tx_next_desc * sizeof(struct eth_tx_desc);
2314 wmb();
2315 } else {
2316 tx_first_desc = mp->tx_first_desc_q;
2317 first_descriptor = &mp->p_tx_desc_area[tx_first_desc];
2318 if (first_descriptor == NULL) {
2319 printk("First desc is NULL !!\n");
2320 return ETH_ERROR;
2322 if (command_status & ETH_TX_LAST_DESC)
2323 current_descriptor->next_desc_ptr = 0x00000000;
2324 else {
2325 command_status |= ETH_BUFFER_OWNED_BY_DMA;
2326 current_descriptor->next_desc_ptr = mp->tx_desc_dma +
2327 tx_next_desc * sizeof(struct eth_tx_desc);
2331 if (p_pkt_info->byte_cnt < 8) {
2332 printk(" < 8 problem \n");
2333 return ETH_ERROR;
2336 current_descriptor->buf_ptr = p_pkt_info->buf_ptr;
2337 current_descriptor->byte_cnt = p_pkt_info->byte_cnt;
2338 current_descriptor->l4i_chk = p_pkt_info->l4i_chk;
2339 current_descriptor->cmd_sts = command_status;
2341 mp->tx_skb[tx_desc_curr] = (struct sk_buff*) p_pkt_info->return_info;
2343 wmb();
2345 /* Set last desc with DMA ownership and interrupt enable. */
2346 if (command_status & ETH_TX_LAST_DESC) {
2347 current_descriptor->cmd_sts = command_status |
2348 ETH_TX_ENABLE_INTERRUPT |
2349 ETH_BUFFER_OWNED_BY_DMA;
2351 if (!(command_status & ETH_TX_FIRST_DESC))
2352 first_descriptor->cmd_sts |= ETH_BUFFER_OWNED_BY_DMA;
2353 wmb();
2355 first_chip_ptr = MV_READ(MV64340_ETH_CURRENT_SERVED_TX_DESC_PTR(mp->port_num));
2357 /* Apply send command */
2358 if (first_chip_ptr == 0x00000000)
2359 MV_WRITE(MV64340_ETH_TX_CURRENT_QUEUE_DESC_PTR_0(mp->port_num), (struct eth_tx_desc *) mp->tx_desc_dma + tx_first_desc);
2361 ETH_ENABLE_TX_QUEUE(mp->port_num);
2364 * Finish Tx packet. Update first desc in case of Tx resource
2365 * error */
2366 tx_first_desc = tx_next_desc;
2367 mp->tx_first_desc_q = tx_first_desc;
2368 } else {
2369 if (! (command_status & ETH_TX_FIRST_DESC) ) {
2370 current_descriptor->cmd_sts = command_status;
2371 wmb();
2375 /* Check for ring index overlap in the Tx desc ring */
2376 if (tx_next_desc == tx_desc_used) {
2377 mp->tx_resource_err = 1;
2378 mp->tx_curr_desc_q = tx_first_desc;
2380 return ETH_QUEUE_LAST_RESOURCE;
2383 mp->tx_curr_desc_q = tx_next_desc;
2384 wmb();
2386 return ETH_OK;
2388 #else
2389 static ETH_FUNC_RET_STATUS eth_port_send(struct mv64340_private * mp,
2390 struct pkt_info * p_pkt_info)
2392 int tx_desc_curr;
2393 int tx_desc_used;
2394 volatile struct eth_tx_desc* current_descriptor;
2395 unsigned int command_status;
2397 /* Do not process Tx ring in case of Tx ring resource error */
2398 if (mp->tx_resource_err)
2399 return ETH_QUEUE_FULL;
2401 /* Get the Tx Desc ring indexes */
2402 tx_desc_curr = mp->tx_curr_desc_q;
2403 tx_desc_used = mp->tx_used_desc_q;
2404 current_descriptor = &mp->p_tx_desc_area[tx_desc_curr];
2406 if (current_descriptor == NULL)
2407 return ETH_ERROR;
2409 command_status = p_pkt_info->cmd_sts | ETH_ZERO_PADDING | ETH_GEN_CRC;
2411 /* XXX Is this for real ?!?!? */
2412 /* Buffers with a payload smaller than 8 bytes must be aligned to a
2413 * 64-bit boundary. We use the memory allocated for Tx descriptor.
2414 * This memory is located in TX_BUF_OFFSET_IN_DESC offset within the
2415 * Tx descriptor. */
2416 if (p_pkt_info->byte_cnt <= 8) {
2417 printk(KERN_ERR
2418 "You have failed in the < 8 bytes errata - fixme\n");
2419 return ETH_ERROR;
2421 current_descriptor->buf_ptr = p_pkt_info->buf_ptr;
2422 current_descriptor->byte_cnt = p_pkt_info->byte_cnt;
2423 mp->tx_skb[tx_desc_curr] = (struct sk_buff *) p_pkt_info->return_info;
2425 mb();
2427 /* Set last desc with DMA ownership and interrupt enable. */
2428 current_descriptor->cmd_sts = command_status |
2429 ETH_BUFFER_OWNED_BY_DMA | ETH_TX_ENABLE_INTERRUPT;
2431 /* Apply send command */
2432 ETH_ENABLE_TX_QUEUE(mp->port_num);
2434 /* Finish Tx packet. Update first desc in case of Tx resource error */
2435 tx_desc_curr = (tx_desc_curr + 1) % MV64340_TX_QUEUE_SIZE;
2437 /* Update the current descriptor */
2438 mp->tx_curr_desc_q = tx_desc_curr;
2440 /* Check for ring index overlap in the Tx desc ring */
2441 if (tx_desc_curr == tx_desc_used) {
2442 mp->tx_resource_err = 1;
2443 return ETH_QUEUE_LAST_RESOURCE;
2446 return ETH_OK;
2448 #endif
2451 * eth_tx_return_desc - Free all used Tx descriptors
2453 * DESCRIPTION:
2454 * This routine returns the transmitted packet information to the caller.
2455 * It uses the 'first' index to support Tx desc return in case a transmit
2456 * of a packet spanned over multiple buffer still in process.
2457 * In case the Tx queue was in "resource error" condition, where there are
2458 * no available Tx resources, the function resets the resource error flag.
2460 * INPUT:
2461 * struct mv64340_private *mp Ethernet Port Control srtuct.
2462 * struct pkt_info *p_pkt_info User packet buffer.
2464 * OUTPUT:
2465 * Tx ring 'first' and 'used' indexes are updated.
2467 * RETURN:
2468 * ETH_ERROR in case the routine can not access Tx desc ring.
2469 * ETH_RETRY in case there is transmission in process.
2470 * ETH_END_OF_JOB if the routine has nothing to release.
2471 * ETH_OK otherwise.
2474 static ETH_FUNC_RET_STATUS eth_tx_return_desc(struct mv64340_private * mp,
2475 struct pkt_info * p_pkt_info)
2477 int tx_desc_used, tx_desc_curr;
2478 #ifdef MV64340_CHECKSUM_OFFLOAD_TX
2479 int tx_first_desc;
2480 #endif
2481 volatile struct eth_tx_desc *p_tx_desc_used;
2482 unsigned int command_status;
2484 /* Get the Tx Desc ring indexes */
2485 tx_desc_curr = mp->tx_curr_desc_q;
2486 tx_desc_used = mp->tx_used_desc_q;
2487 #ifdef MV64340_CHECKSUM_OFFLOAD_TX
2488 tx_first_desc = mp->tx_first_desc_q;
2489 #endif
2490 p_tx_desc_used = &mp->p_tx_desc_area[tx_desc_used];
2492 /* XXX Sanity check */
2493 if (p_tx_desc_used == NULL)
2494 return ETH_ERROR;
2496 command_status = p_tx_desc_used->cmd_sts;
2498 /* Still transmitting... */
2499 #ifndef MV64340_CHECKSUM_OFFLOAD_TX
2500 if (command_status & (ETH_BUFFER_OWNED_BY_DMA))
2501 return ETH_RETRY;
2502 #endif
2503 /* Stop release. About to overlap the current available Tx descriptor */
2504 #ifdef MV64340_CHECKSUM_OFFLOAD_TX
2505 if (tx_desc_used == tx_first_desc && !mp->tx_resource_err)
2506 return ETH_END_OF_JOB;
2507 #else
2508 if (tx_desc_used == tx_desc_curr && !mp->tx_resource_err)
2509 return ETH_END_OF_JOB;
2510 #endif
2512 /* Pass the packet information to the caller */
2513 p_pkt_info->cmd_sts = command_status;
2514 p_pkt_info->return_info = mp->tx_skb[tx_desc_used];
2515 mp->tx_skb[tx_desc_used] = NULL;
2517 /* Update the next descriptor to release. */
2518 mp->tx_used_desc_q = (tx_desc_used + 1) % MV64340_TX_QUEUE_SIZE;
2520 /* Any Tx return cancels the Tx resource error status */
2521 mp->tx_resource_err = 0;
2523 return ETH_OK;
2527 * eth_port_receive - Get received information from Rx ring.
2529 * DESCRIPTION:
2530 * This routine returns the received data to the caller. There is no
2531 * data copying during routine operation. All information is returned
2532 * using pointer to packet information struct passed from the caller.
2533 * If the routine exhausts Rx ring resources then the resource error flag
2534 * is set.
2536 * INPUT:
2537 * struct mv64340_private *mp Ethernet Port Control srtuct.
2538 * struct pkt_info *p_pkt_info User packet buffer.
2540 * OUTPUT:
2541 * Rx ring current and used indexes are updated.
2543 * RETURN:
2544 * ETH_ERROR in case the routine can not access Rx desc ring.
2545 * ETH_QUEUE_FULL if Rx ring resources are exhausted.
2546 * ETH_END_OF_JOB if there is no received data.
2547 * ETH_OK otherwise.
2549 static ETH_FUNC_RET_STATUS eth_port_receive(struct mv64340_private * mp,
2550 struct pkt_info * p_pkt_info)
2552 int rx_next_curr_desc, rx_curr_desc, rx_used_desc;
2553 volatile struct eth_rx_desc * p_rx_desc;
2554 unsigned int command_status;
2556 /* Do not process Rx ring in case of Rx ring resource error */
2557 if (mp->rx_resource_err)
2558 return ETH_QUEUE_FULL;
2560 /* Get the Rx Desc ring 'curr and 'used' indexes */
2561 rx_curr_desc = mp->rx_curr_desc_q;
2562 rx_used_desc = mp->rx_used_desc_q;
2564 p_rx_desc = &mp->p_rx_desc_area[rx_curr_desc];
2566 /* The following parameters are used to save readings from memory */
2567 command_status = p_rx_desc->cmd_sts;
2569 /* Nothing to receive... */
2570 if (command_status & (ETH_BUFFER_OWNED_BY_DMA))
2571 return ETH_END_OF_JOB;
2573 p_pkt_info->byte_cnt = (p_rx_desc->byte_cnt) - RX_BUF_OFFSET;
2574 p_pkt_info->cmd_sts = command_status;
2575 p_pkt_info->buf_ptr = (p_rx_desc->buf_ptr) + RX_BUF_OFFSET;
2576 p_pkt_info->return_info = mp->rx_skb[rx_curr_desc];
2577 p_pkt_info->l4i_chk = p_rx_desc->buf_size;
2579 /* Clean the return info field to indicate that the packet has been */
2580 /* moved to the upper layers */
2581 mp->rx_skb[rx_curr_desc] = NULL;
2583 /* Update current index in data structure */
2584 rx_next_curr_desc = (rx_curr_desc + 1) % MV64340_RX_QUEUE_SIZE;
2585 mp->rx_curr_desc_q = rx_next_curr_desc;
2587 /* Rx descriptors exhausted. Set the Rx ring resource error flag */
2588 if (rx_next_curr_desc == rx_used_desc)
2589 mp->rx_resource_err = 1;
2591 mb();
2592 return ETH_OK;
2596 * eth_rx_return_buff - Returns a Rx buffer back to the Rx ring.
2598 * DESCRIPTION:
2599 * This routine returns a Rx buffer back to the Rx ring. It retrieves the
2600 * next 'used' descriptor and attached the returned buffer to it.
2601 * In case the Rx ring was in "resource error" condition, where there are
2602 * no available Rx resources, the function resets the resource error flag.
2604 * INPUT:
2605 * struct mv64340_private *mp Ethernet Port Control srtuct.
2606 * struct pkt_info *p_pkt_info Information on the returned buffer.
2608 * OUTPUT:
2609 * New available Rx resource in Rx descriptor ring.
2611 * RETURN:
2612 * ETH_ERROR in case the routine can not access Rx desc ring.
2613 * ETH_OK otherwise.
2615 static ETH_FUNC_RET_STATUS eth_rx_return_buff(struct mv64340_private * mp,
2616 struct pkt_info * p_pkt_info)
2618 int used_rx_desc; /* Where to return Rx resource */
2619 volatile struct eth_rx_desc* p_used_rx_desc;
2621 /* Get 'used' Rx descriptor */
2622 used_rx_desc = mp->rx_used_desc_q;
2623 p_used_rx_desc = &mp->p_rx_desc_area[used_rx_desc];
2625 p_used_rx_desc->buf_ptr = p_pkt_info->buf_ptr;
2626 p_used_rx_desc->buf_size = p_pkt_info->byte_cnt;
2627 mp->rx_skb[used_rx_desc] = p_pkt_info->return_info;
2629 /* Flush the write pipe */
2630 mb();
2632 /* Return the descriptor to DMA ownership */
2633 p_used_rx_desc->cmd_sts =
2634 ETH_BUFFER_OWNED_BY_DMA | ETH_RX_ENABLE_INTERRUPT;
2636 /* Flush descriptor and CPU pipe */
2637 mb();
2639 /* Move the used descriptor pointer to the next descriptor */
2640 mp->rx_used_desc_q = (used_rx_desc + 1) % MV64340_RX_QUEUE_SIZE;
2642 /* Any Rx return cancels the Rx resource error status */
2643 mp->rx_resource_err = 0;
2645 return ETH_OK;