mv643xx_eth: OOM handling fixes
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / net / mv643xx_eth.c
blob67bb769bf18ec6b4a4b38ac98dec67ab9fb0e642
1 /*
2 * Driver for Marvell Discovery (MV643XX) and Marvell Orion ethernet ports
3 * Copyright (C) 2002 Matthew Dharm <mdharm@momenco.com>
5 * Based on the 64360 driver from:
6 * Copyright (C) 2002 Rabeeh Khoury <rabeeh@galileo.co.il>
7 * Rabeeh Khoury <rabeeh@marvell.com>
9 * Copyright (C) 2003 PMC-Sierra, Inc.,
10 * written by Manish Lachwani
12 * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org>
14 * Copyright (C) 2004-2006 MontaVista Software, Inc.
15 * Dale Farnsworth <dale@farnsworth.org>
17 * Copyright (C) 2004 Steven J. Hill <sjhill1@rockwellcollins.com>
18 * <sjhill@realitydiluted.com>
20 * Copyright (C) 2007-2008 Marvell Semiconductor
21 * Lennert Buytenhek <buytenh@marvell.com>
23 * This program is free software; you can redistribute it and/or
24 * modify it under the terms of the GNU General Public License
25 * as published by the Free Software Foundation; either version 2
26 * of the License, or (at your option) any later version.
28 * This program is distributed in the hope that it will be useful,
29 * but WITHOUT ANY WARRANTY; without even the implied warranty of
30 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
31 * GNU General Public License for more details.
33 * You should have received a copy of the GNU General Public License
34 * along with this program; if not, write to the Free Software
35 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
38 #include <linux/init.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/in.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/udp.h>
44 #include <linux/etherdevice.h>
45 #include <linux/delay.h>
46 #include <linux/ethtool.h>
47 #include <linux/platform_device.h>
48 #include <linux/module.h>
49 #include <linux/kernel.h>
50 #include <linux/spinlock.h>
51 #include <linux/workqueue.h>
52 #include <linux/phy.h>
53 #include <linux/mv643xx_eth.h>
54 #include <linux/io.h>
55 #include <linux/types.h>
56 #include <asm/system.h>
58 static char mv643xx_eth_driver_name[] = "mv643xx_eth";
59 static char mv643xx_eth_driver_version[] = "1.4";
63 * Registers shared between all ports.
65 #define PHY_ADDR 0x0000
66 #define SMI_REG 0x0004
67 #define SMI_BUSY 0x10000000
68 #define SMI_READ_VALID 0x08000000
69 #define SMI_OPCODE_READ 0x04000000
70 #define SMI_OPCODE_WRITE 0x00000000
71 #define ERR_INT_CAUSE 0x0080
72 #define ERR_INT_SMI_DONE 0x00000010
73 #define ERR_INT_MASK 0x0084
74 #define WINDOW_BASE(w) (0x0200 + ((w) << 3))
75 #define WINDOW_SIZE(w) (0x0204 + ((w) << 3))
76 #define WINDOW_REMAP_HIGH(w) (0x0280 + ((w) << 2))
77 #define WINDOW_BAR_ENABLE 0x0290
78 #define WINDOW_PROTECT(w) (0x0294 + ((w) << 4))
81 * Main per-port registers. These live at offset 0x0400 for
82 * port #0, 0x0800 for port #1, and 0x0c00 for port #2.
84 #define PORT_CONFIG 0x0000
85 #define UNICAST_PROMISCUOUS_MODE 0x00000001
86 #define PORT_CONFIG_EXT 0x0004
87 #define MAC_ADDR_LOW 0x0014
88 #define MAC_ADDR_HIGH 0x0018
89 #define SDMA_CONFIG 0x001c
90 #define PORT_SERIAL_CONTROL 0x003c
91 #define PORT_STATUS 0x0044
92 #define TX_FIFO_EMPTY 0x00000400
93 #define TX_IN_PROGRESS 0x00000080
94 #define PORT_SPEED_MASK 0x00000030
95 #define PORT_SPEED_1000 0x00000010
96 #define PORT_SPEED_100 0x00000020
97 #define PORT_SPEED_10 0x00000000
98 #define FLOW_CONTROL_ENABLED 0x00000008
99 #define FULL_DUPLEX 0x00000004
100 #define LINK_UP 0x00000002
101 #define TXQ_COMMAND 0x0048
102 #define TXQ_FIX_PRIO_CONF 0x004c
103 #define TX_BW_RATE 0x0050
104 #define TX_BW_MTU 0x0058
105 #define TX_BW_BURST 0x005c
106 #define INT_CAUSE 0x0060
107 #define INT_TX_END 0x07f80000
108 #define INT_RX 0x000003fc
109 #define INT_EXT 0x00000002
110 #define INT_CAUSE_EXT 0x0064
111 #define INT_EXT_LINK_PHY 0x00110000
112 #define INT_EXT_TX 0x000000ff
113 #define INT_MASK 0x0068
114 #define INT_MASK_EXT 0x006c
115 #define TX_FIFO_URGENT_THRESHOLD 0x0074
116 #define TXQ_FIX_PRIO_CONF_MOVED 0x00dc
117 #define TX_BW_RATE_MOVED 0x00e0
118 #define TX_BW_MTU_MOVED 0x00e8
119 #define TX_BW_BURST_MOVED 0x00ec
120 #define RXQ_CURRENT_DESC_PTR(q) (0x020c + ((q) << 4))
121 #define RXQ_COMMAND 0x0280
122 #define TXQ_CURRENT_DESC_PTR(q) (0x02c0 + ((q) << 2))
123 #define TXQ_BW_TOKENS(q) (0x0300 + ((q) << 4))
124 #define TXQ_BW_CONF(q) (0x0304 + ((q) << 4))
125 #define TXQ_BW_WRR_CONF(q) (0x0308 + ((q) << 4))
128 * Misc per-port registers.
130 #define MIB_COUNTERS(p) (0x1000 + ((p) << 7))
131 #define SPECIAL_MCAST_TABLE(p) (0x1400 + ((p) << 10))
132 #define OTHER_MCAST_TABLE(p) (0x1500 + ((p) << 10))
133 #define UNICAST_TABLE(p) (0x1600 + ((p) << 10))
137 * SDMA configuration register.
139 #define RX_BURST_SIZE_4_64BIT (2 << 1)
140 #define RX_BURST_SIZE_16_64BIT (4 << 1)
141 #define BLM_RX_NO_SWAP (1 << 4)
142 #define BLM_TX_NO_SWAP (1 << 5)
143 #define TX_BURST_SIZE_4_64BIT (2 << 22)
144 #define TX_BURST_SIZE_16_64BIT (4 << 22)
146 #if defined(__BIG_ENDIAN)
147 #define PORT_SDMA_CONFIG_DEFAULT_VALUE \
148 (RX_BURST_SIZE_4_64BIT | \
149 TX_BURST_SIZE_4_64BIT)
150 #elif defined(__LITTLE_ENDIAN)
151 #define PORT_SDMA_CONFIG_DEFAULT_VALUE \
152 (RX_BURST_SIZE_4_64BIT | \
153 BLM_RX_NO_SWAP | \
154 BLM_TX_NO_SWAP | \
155 TX_BURST_SIZE_4_64BIT)
156 #else
157 #error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
158 #endif
162 * Port serial control register.
164 #define SET_MII_SPEED_TO_100 (1 << 24)
165 #define SET_GMII_SPEED_TO_1000 (1 << 23)
166 #define SET_FULL_DUPLEX_MODE (1 << 21)
167 #define MAX_RX_PACKET_9700BYTE (5 << 17)
168 #define DISABLE_AUTO_NEG_SPEED_GMII (1 << 13)
169 #define DO_NOT_FORCE_LINK_FAIL (1 << 10)
170 #define SERIAL_PORT_CONTROL_RESERVED (1 << 9)
171 #define DISABLE_AUTO_NEG_FOR_FLOW_CTRL (1 << 3)
172 #define DISABLE_AUTO_NEG_FOR_DUPLEX (1 << 2)
173 #define FORCE_LINK_PASS (1 << 1)
174 #define SERIAL_PORT_ENABLE (1 << 0)
176 #define DEFAULT_RX_QUEUE_SIZE 128
177 #define DEFAULT_TX_QUEUE_SIZE 256
181 * RX/TX descriptors.
183 #if defined(__BIG_ENDIAN)
184 struct rx_desc {
185 u16 byte_cnt; /* Descriptor buffer byte count */
186 u16 buf_size; /* Buffer size */
187 u32 cmd_sts; /* Descriptor command status */
188 u32 next_desc_ptr; /* Next descriptor pointer */
189 u32 buf_ptr; /* Descriptor buffer pointer */
192 struct tx_desc {
193 u16 byte_cnt; /* buffer byte count */
194 u16 l4i_chk; /* CPU provided TCP checksum */
195 u32 cmd_sts; /* Command/status field */
196 u32 next_desc_ptr; /* Pointer to next descriptor */
197 u32 buf_ptr; /* pointer to buffer for this descriptor*/
199 #elif defined(__LITTLE_ENDIAN)
200 struct rx_desc {
201 u32 cmd_sts; /* Descriptor command status */
202 u16 buf_size; /* Buffer size */
203 u16 byte_cnt; /* Descriptor buffer byte count */
204 u32 buf_ptr; /* Descriptor buffer pointer */
205 u32 next_desc_ptr; /* Next descriptor pointer */
208 struct tx_desc {
209 u32 cmd_sts; /* Command/status field */
210 u16 l4i_chk; /* CPU provided TCP checksum */
211 u16 byte_cnt; /* buffer byte count */
212 u32 buf_ptr; /* pointer to buffer for this descriptor*/
213 u32 next_desc_ptr; /* Pointer to next descriptor */
215 #else
216 #error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
217 #endif
219 /* RX & TX descriptor command */
220 #define BUFFER_OWNED_BY_DMA 0x80000000
222 /* RX & TX descriptor status */
223 #define ERROR_SUMMARY 0x00000001
225 /* RX descriptor status */
226 #define LAYER_4_CHECKSUM_OK 0x40000000
227 #define RX_ENABLE_INTERRUPT 0x20000000
228 #define RX_FIRST_DESC 0x08000000
229 #define RX_LAST_DESC 0x04000000
231 /* TX descriptor command */
232 #define TX_ENABLE_INTERRUPT 0x00800000
233 #define GEN_CRC 0x00400000
234 #define TX_FIRST_DESC 0x00200000
235 #define TX_LAST_DESC 0x00100000
236 #define ZERO_PADDING 0x00080000
237 #define GEN_IP_V4_CHECKSUM 0x00040000
238 #define GEN_TCP_UDP_CHECKSUM 0x00020000
239 #define UDP_FRAME 0x00010000
240 #define MAC_HDR_EXTRA_4_BYTES 0x00008000
241 #define MAC_HDR_EXTRA_8_BYTES 0x00000200
243 #define TX_IHL_SHIFT 11
246 /* global *******************************************************************/
247 struct mv643xx_eth_shared_private {
249 * Ethernet controller base address.
251 void __iomem *base;
254 * Points at the right SMI instance to use.
256 struct mv643xx_eth_shared_private *smi;
259 * Provides access to local SMI interface.
261 struct mii_bus *smi_bus;
264 * If we have access to the error interrupt pin (which is
265 * somewhat misnamed as it not only reflects internal errors
266 * but also reflects SMI completion), use that to wait for
267 * SMI access completion instead of polling the SMI busy bit.
269 int err_interrupt;
270 wait_queue_head_t smi_busy_wait;
273 * Per-port MBUS window access register value.
275 u32 win_protect;
278 * Hardware-specific parameters.
280 unsigned int t_clk;
281 int extended_rx_coal_limit;
282 int tx_bw_control;
285 #define TX_BW_CONTROL_ABSENT 0
286 #define TX_BW_CONTROL_OLD_LAYOUT 1
287 #define TX_BW_CONTROL_NEW_LAYOUT 2
290 /* per-port *****************************************************************/
291 struct mib_counters {
292 u64 good_octets_received;
293 u32 bad_octets_received;
294 u32 internal_mac_transmit_err;
295 u32 good_frames_received;
296 u32 bad_frames_received;
297 u32 broadcast_frames_received;
298 u32 multicast_frames_received;
299 u32 frames_64_octets;
300 u32 frames_65_to_127_octets;
301 u32 frames_128_to_255_octets;
302 u32 frames_256_to_511_octets;
303 u32 frames_512_to_1023_octets;
304 u32 frames_1024_to_max_octets;
305 u64 good_octets_sent;
306 u32 good_frames_sent;
307 u32 excessive_collision;
308 u32 multicast_frames_sent;
309 u32 broadcast_frames_sent;
310 u32 unrec_mac_control_received;
311 u32 fc_sent;
312 u32 good_fc_received;
313 u32 bad_fc_received;
314 u32 undersize_received;
315 u32 fragments_received;
316 u32 oversize_received;
317 u32 jabber_received;
318 u32 mac_receive_error;
319 u32 bad_crc_event;
320 u32 collision;
321 u32 late_collision;
324 struct rx_queue {
325 int index;
327 int rx_ring_size;
329 int rx_desc_count;
330 int rx_curr_desc;
331 int rx_used_desc;
333 struct rx_desc *rx_desc_area;
334 dma_addr_t rx_desc_dma;
335 int rx_desc_area_size;
336 struct sk_buff **rx_skb;
339 struct tx_queue {
340 int index;
342 int tx_ring_size;
344 int tx_desc_count;
345 int tx_curr_desc;
346 int tx_used_desc;
348 struct tx_desc *tx_desc_area;
349 dma_addr_t tx_desc_dma;
350 int tx_desc_area_size;
352 struct sk_buff_head tx_skb;
354 unsigned long tx_packets;
355 unsigned long tx_bytes;
356 unsigned long tx_dropped;
359 struct mv643xx_eth_private {
360 struct mv643xx_eth_shared_private *shared;
361 void __iomem *base;
362 int port_num;
364 struct net_device *dev;
366 struct phy_device *phy;
368 struct timer_list mib_counters_timer;
369 spinlock_t mib_counters_lock;
370 struct mib_counters mib_counters;
372 struct work_struct tx_timeout_task;
374 struct napi_struct napi;
375 u8 oom;
376 u8 work_link;
377 u8 work_tx;
378 u8 work_tx_end;
379 u8 work_rx;
380 u8 work_rx_refill;
382 int skb_size;
383 struct sk_buff_head rx_recycle;
386 * RX state.
388 int default_rx_ring_size;
389 unsigned long rx_desc_sram_addr;
390 int rx_desc_sram_size;
391 int rxq_count;
392 struct timer_list rx_oom;
393 struct rx_queue rxq[8];
396 * TX state.
398 int default_tx_ring_size;
399 unsigned long tx_desc_sram_addr;
400 int tx_desc_sram_size;
401 int txq_count;
402 struct tx_queue txq[8];
406 /* port register accessors **************************************************/
407 static inline u32 rdl(struct mv643xx_eth_private *mp, int offset)
409 return readl(mp->shared->base + offset);
412 static inline u32 rdlp(struct mv643xx_eth_private *mp, int offset)
414 return readl(mp->base + offset);
417 static inline void wrl(struct mv643xx_eth_private *mp, int offset, u32 data)
419 writel(data, mp->shared->base + offset);
422 static inline void wrlp(struct mv643xx_eth_private *mp, int offset, u32 data)
424 writel(data, mp->base + offset);
428 /* rxq/txq helper functions *************************************************/
429 static struct mv643xx_eth_private *rxq_to_mp(struct rx_queue *rxq)
431 return container_of(rxq, struct mv643xx_eth_private, rxq[rxq->index]);
434 static struct mv643xx_eth_private *txq_to_mp(struct tx_queue *txq)
436 return container_of(txq, struct mv643xx_eth_private, txq[txq->index]);
439 static void rxq_enable(struct rx_queue *rxq)
441 struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
442 wrlp(mp, RXQ_COMMAND, 1 << rxq->index);
445 static void rxq_disable(struct rx_queue *rxq)
447 struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
448 u8 mask = 1 << rxq->index;
450 wrlp(mp, RXQ_COMMAND, mask << 8);
451 while (rdlp(mp, RXQ_COMMAND) & mask)
452 udelay(10);
455 static void txq_reset_hw_ptr(struct tx_queue *txq)
457 struct mv643xx_eth_private *mp = txq_to_mp(txq);
458 u32 addr;
460 addr = (u32)txq->tx_desc_dma;
461 addr += txq->tx_curr_desc * sizeof(struct tx_desc);
462 wrlp(mp, TXQ_CURRENT_DESC_PTR(txq->index), addr);
465 static void txq_enable(struct tx_queue *txq)
467 struct mv643xx_eth_private *mp = txq_to_mp(txq);
468 wrlp(mp, TXQ_COMMAND, 1 << txq->index);
471 static void txq_disable(struct tx_queue *txq)
473 struct mv643xx_eth_private *mp = txq_to_mp(txq);
474 u8 mask = 1 << txq->index;
476 wrlp(mp, TXQ_COMMAND, mask << 8);
477 while (rdlp(mp, TXQ_COMMAND) & mask)
478 udelay(10);
481 static void txq_maybe_wake(struct tx_queue *txq)
483 struct mv643xx_eth_private *mp = txq_to_mp(txq);
484 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
486 if (netif_tx_queue_stopped(nq)) {
487 __netif_tx_lock(nq, smp_processor_id());
488 if (txq->tx_ring_size - txq->tx_desc_count >= MAX_SKB_FRAGS + 1)
489 netif_tx_wake_queue(nq);
490 __netif_tx_unlock(nq);
495 /* rx napi ******************************************************************/
496 static int rxq_process(struct rx_queue *rxq, int budget)
498 struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
499 struct net_device_stats *stats = &mp->dev->stats;
500 int rx;
502 rx = 0;
503 while (rx < budget && rxq->rx_desc_count) {
504 struct rx_desc *rx_desc;
505 unsigned int cmd_sts;
506 struct sk_buff *skb;
507 u16 byte_cnt;
509 rx_desc = &rxq->rx_desc_area[rxq->rx_curr_desc];
511 cmd_sts = rx_desc->cmd_sts;
512 if (cmd_sts & BUFFER_OWNED_BY_DMA)
513 break;
514 rmb();
516 skb = rxq->rx_skb[rxq->rx_curr_desc];
517 rxq->rx_skb[rxq->rx_curr_desc] = NULL;
519 rxq->rx_curr_desc++;
520 if (rxq->rx_curr_desc == rxq->rx_ring_size)
521 rxq->rx_curr_desc = 0;
523 dma_unmap_single(NULL, rx_desc->buf_ptr,
524 rx_desc->buf_size, DMA_FROM_DEVICE);
525 rxq->rx_desc_count--;
526 rx++;
528 mp->work_rx_refill |= 1 << rxq->index;
530 byte_cnt = rx_desc->byte_cnt;
533 * Update statistics.
535 * Note that the descriptor byte count includes 2 dummy
536 * bytes automatically inserted by the hardware at the
537 * start of the packet (which we don't count), and a 4
538 * byte CRC at the end of the packet (which we do count).
540 stats->rx_packets++;
541 stats->rx_bytes += byte_cnt - 2;
544 * In case we received a packet without first / last bits
545 * on, or the error summary bit is set, the packet needs
546 * to be dropped.
548 if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC | ERROR_SUMMARY))
549 != (RX_FIRST_DESC | RX_LAST_DESC))
550 goto err;
553 * The -4 is for the CRC in the trailer of the
554 * received packet
556 skb_put(skb, byte_cnt - 2 - 4);
558 if (cmd_sts & LAYER_4_CHECKSUM_OK)
559 skb->ip_summed = CHECKSUM_UNNECESSARY;
560 skb->protocol = eth_type_trans(skb, mp->dev);
561 netif_receive_skb(skb);
563 continue;
565 err:
566 stats->rx_dropped++;
568 if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
569 (RX_FIRST_DESC | RX_LAST_DESC)) {
570 if (net_ratelimit())
571 dev_printk(KERN_ERR, &mp->dev->dev,
572 "received packet spanning "
573 "multiple descriptors\n");
576 if (cmd_sts & ERROR_SUMMARY)
577 stats->rx_errors++;
579 dev_kfree_skb(skb);
582 if (rx < budget)
583 mp->work_rx &= ~(1 << rxq->index);
585 return rx;
588 static int rxq_refill(struct rx_queue *rxq, int budget)
590 struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
591 int refilled;
593 refilled = 0;
594 while (refilled < budget && rxq->rx_desc_count < rxq->rx_ring_size) {
595 struct sk_buff *skb;
596 int unaligned;
597 int rx;
598 struct rx_desc *rx_desc;
600 skb = __skb_dequeue(&mp->rx_recycle);
601 if (skb == NULL)
602 skb = dev_alloc_skb(mp->skb_size +
603 dma_get_cache_alignment() - 1);
605 if (skb == NULL) {
606 mp->oom = 1;
607 goto oom;
610 unaligned = (u32)skb->data & (dma_get_cache_alignment() - 1);
611 if (unaligned)
612 skb_reserve(skb, dma_get_cache_alignment() - unaligned);
614 refilled++;
615 rxq->rx_desc_count++;
617 rx = rxq->rx_used_desc++;
618 if (rxq->rx_used_desc == rxq->rx_ring_size)
619 rxq->rx_used_desc = 0;
621 rx_desc = rxq->rx_desc_area + rx;
623 rx_desc->buf_ptr = dma_map_single(NULL, skb->data,
624 mp->skb_size, DMA_FROM_DEVICE);
625 rx_desc->buf_size = mp->skb_size;
626 rxq->rx_skb[rx] = skb;
627 wmb();
628 rx_desc->cmd_sts = BUFFER_OWNED_BY_DMA | RX_ENABLE_INTERRUPT;
629 wmb();
632 * The hardware automatically prepends 2 bytes of
633 * dummy data to each received packet, so that the
634 * IP header ends up 16-byte aligned.
636 skb_reserve(skb, 2);
639 if (refilled < budget)
640 mp->work_rx_refill &= ~(1 << rxq->index);
642 oom:
643 return refilled;
647 /* tx ***********************************************************************/
648 static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb)
650 int frag;
652 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
653 skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
654 if (fragp->size <= 8 && fragp->page_offset & 7)
655 return 1;
658 return 0;
661 static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
663 int nr_frags = skb_shinfo(skb)->nr_frags;
664 int frag;
666 for (frag = 0; frag < nr_frags; frag++) {
667 skb_frag_t *this_frag;
668 int tx_index;
669 struct tx_desc *desc;
671 this_frag = &skb_shinfo(skb)->frags[frag];
672 tx_index = txq->tx_curr_desc++;
673 if (txq->tx_curr_desc == txq->tx_ring_size)
674 txq->tx_curr_desc = 0;
675 desc = &txq->tx_desc_area[tx_index];
678 * The last fragment will generate an interrupt
679 * which will free the skb on TX completion.
681 if (frag == nr_frags - 1) {
682 desc->cmd_sts = BUFFER_OWNED_BY_DMA |
683 ZERO_PADDING | TX_LAST_DESC |
684 TX_ENABLE_INTERRUPT;
685 } else {
686 desc->cmd_sts = BUFFER_OWNED_BY_DMA;
689 desc->l4i_chk = 0;
690 desc->byte_cnt = this_frag->size;
691 desc->buf_ptr = dma_map_page(NULL, this_frag->page,
692 this_frag->page_offset,
693 this_frag->size,
694 DMA_TO_DEVICE);
698 static inline __be16 sum16_as_be(__sum16 sum)
700 return (__force __be16)sum;
703 static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb)
705 struct mv643xx_eth_private *mp = txq_to_mp(txq);
706 int nr_frags = skb_shinfo(skb)->nr_frags;
707 int tx_index;
708 struct tx_desc *desc;
709 u32 cmd_sts;
710 u16 l4i_chk;
711 int length;
713 cmd_sts = TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA;
714 l4i_chk = 0;
716 if (skb->ip_summed == CHECKSUM_PARTIAL) {
717 int tag_bytes;
719 BUG_ON(skb->protocol != htons(ETH_P_IP) &&
720 skb->protocol != htons(ETH_P_8021Q));
722 tag_bytes = (void *)ip_hdr(skb) - (void *)skb->data - ETH_HLEN;
723 if (unlikely(tag_bytes & ~12)) {
724 if (skb_checksum_help(skb) == 0)
725 goto no_csum;
726 kfree_skb(skb);
727 return 1;
730 if (tag_bytes & 4)
731 cmd_sts |= MAC_HDR_EXTRA_4_BYTES;
732 if (tag_bytes & 8)
733 cmd_sts |= MAC_HDR_EXTRA_8_BYTES;
735 cmd_sts |= GEN_TCP_UDP_CHECKSUM |
736 GEN_IP_V4_CHECKSUM |
737 ip_hdr(skb)->ihl << TX_IHL_SHIFT;
739 switch (ip_hdr(skb)->protocol) {
740 case IPPROTO_UDP:
741 cmd_sts |= UDP_FRAME;
742 l4i_chk = ntohs(sum16_as_be(udp_hdr(skb)->check));
743 break;
744 case IPPROTO_TCP:
745 l4i_chk = ntohs(sum16_as_be(tcp_hdr(skb)->check));
746 break;
747 default:
748 BUG();
750 } else {
751 no_csum:
752 /* Errata BTS #50, IHL must be 5 if no HW checksum */
753 cmd_sts |= 5 << TX_IHL_SHIFT;
756 tx_index = txq->tx_curr_desc++;
757 if (txq->tx_curr_desc == txq->tx_ring_size)
758 txq->tx_curr_desc = 0;
759 desc = &txq->tx_desc_area[tx_index];
761 if (nr_frags) {
762 txq_submit_frag_skb(txq, skb);
763 length = skb_headlen(skb);
764 } else {
765 cmd_sts |= ZERO_PADDING | TX_LAST_DESC | TX_ENABLE_INTERRUPT;
766 length = skb->len;
769 desc->l4i_chk = l4i_chk;
770 desc->byte_cnt = length;
771 desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE);
773 __skb_queue_tail(&txq->tx_skb, skb);
775 /* ensure all other descriptors are written before first cmd_sts */
776 wmb();
777 desc->cmd_sts = cmd_sts;
779 /* clear TX_END status */
780 mp->work_tx_end &= ~(1 << txq->index);
782 /* ensure all descriptors are written before poking hardware */
783 wmb();
784 txq_enable(txq);
786 txq->tx_desc_count += nr_frags + 1;
788 return 0;
791 static int mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
793 struct mv643xx_eth_private *mp = netdev_priv(dev);
794 int queue;
795 struct tx_queue *txq;
796 struct netdev_queue *nq;
798 queue = skb_get_queue_mapping(skb);
799 txq = mp->txq + queue;
800 nq = netdev_get_tx_queue(dev, queue);
802 if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) {
803 txq->tx_dropped++;
804 dev_printk(KERN_DEBUG, &dev->dev,
805 "failed to linearize skb with tiny "
806 "unaligned fragment\n");
807 return NETDEV_TX_BUSY;
810 if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) {
811 if (net_ratelimit())
812 dev_printk(KERN_ERR, &dev->dev, "tx queue full?!\n");
813 kfree_skb(skb);
814 return NETDEV_TX_OK;
817 if (!txq_submit_skb(txq, skb)) {
818 int entries_left;
820 txq->tx_bytes += skb->len;
821 txq->tx_packets++;
822 dev->trans_start = jiffies;
824 entries_left = txq->tx_ring_size - txq->tx_desc_count;
825 if (entries_left < MAX_SKB_FRAGS + 1)
826 netif_tx_stop_queue(nq);
829 return NETDEV_TX_OK;
833 /* tx napi ******************************************************************/
834 static void txq_kick(struct tx_queue *txq)
836 struct mv643xx_eth_private *mp = txq_to_mp(txq);
837 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
838 u32 hw_desc_ptr;
839 u32 expected_ptr;
841 __netif_tx_lock(nq, smp_processor_id());
843 if (rdlp(mp, TXQ_COMMAND) & (1 << txq->index))
844 goto out;
846 hw_desc_ptr = rdlp(mp, TXQ_CURRENT_DESC_PTR(txq->index));
847 expected_ptr = (u32)txq->tx_desc_dma +
848 txq->tx_curr_desc * sizeof(struct tx_desc);
850 if (hw_desc_ptr != expected_ptr)
851 txq_enable(txq);
853 out:
854 __netif_tx_unlock(nq);
856 mp->work_tx_end &= ~(1 << txq->index);
859 static int txq_reclaim(struct tx_queue *txq, int budget, int force)
861 struct mv643xx_eth_private *mp = txq_to_mp(txq);
862 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
863 int reclaimed;
865 __netif_tx_lock(nq, smp_processor_id());
867 reclaimed = 0;
868 while (reclaimed < budget && txq->tx_desc_count > 0) {
869 int tx_index;
870 struct tx_desc *desc;
871 u32 cmd_sts;
872 struct sk_buff *skb;
874 tx_index = txq->tx_used_desc;
875 desc = &txq->tx_desc_area[tx_index];
876 cmd_sts = desc->cmd_sts;
878 if (cmd_sts & BUFFER_OWNED_BY_DMA) {
879 if (!force)
880 break;
881 desc->cmd_sts = cmd_sts & ~BUFFER_OWNED_BY_DMA;
884 txq->tx_used_desc = tx_index + 1;
885 if (txq->tx_used_desc == txq->tx_ring_size)
886 txq->tx_used_desc = 0;
888 reclaimed++;
889 txq->tx_desc_count--;
891 skb = NULL;
892 if (cmd_sts & TX_LAST_DESC)
893 skb = __skb_dequeue(&txq->tx_skb);
895 if (cmd_sts & ERROR_SUMMARY) {
896 dev_printk(KERN_INFO, &mp->dev->dev, "tx error\n");
897 mp->dev->stats.tx_errors++;
900 if (cmd_sts & TX_FIRST_DESC) {
901 dma_unmap_single(NULL, desc->buf_ptr,
902 desc->byte_cnt, DMA_TO_DEVICE);
903 } else {
904 dma_unmap_page(NULL, desc->buf_ptr,
905 desc->byte_cnt, DMA_TO_DEVICE);
908 if (skb != NULL) {
909 if (skb_queue_len(&mp->rx_recycle) <
910 mp->default_rx_ring_size &&
911 skb_recycle_check(skb, mp->skb_size +
912 dma_get_cache_alignment() - 1))
913 __skb_queue_head(&mp->rx_recycle, skb);
914 else
915 dev_kfree_skb(skb);
919 __netif_tx_unlock(nq);
921 if (reclaimed < budget)
922 mp->work_tx &= ~(1 << txq->index);
924 return reclaimed;
928 /* tx rate control **********************************************************/
930 * Set total maximum TX rate (shared by all TX queues for this port)
931 * to 'rate' bits per second, with a maximum burst of 'burst' bytes.
933 static void tx_set_rate(struct mv643xx_eth_private *mp, int rate, int burst)
935 int token_rate;
936 int mtu;
937 int bucket_size;
939 token_rate = ((rate / 1000) * 64) / (mp->shared->t_clk / 1000);
940 if (token_rate > 1023)
941 token_rate = 1023;
943 mtu = (mp->dev->mtu + 255) >> 8;
944 if (mtu > 63)
945 mtu = 63;
947 bucket_size = (burst + 255) >> 8;
948 if (bucket_size > 65535)
949 bucket_size = 65535;
951 switch (mp->shared->tx_bw_control) {
952 case TX_BW_CONTROL_OLD_LAYOUT:
953 wrlp(mp, TX_BW_RATE, token_rate);
954 wrlp(mp, TX_BW_MTU, mtu);
955 wrlp(mp, TX_BW_BURST, bucket_size);
956 break;
957 case TX_BW_CONTROL_NEW_LAYOUT:
958 wrlp(mp, TX_BW_RATE_MOVED, token_rate);
959 wrlp(mp, TX_BW_MTU_MOVED, mtu);
960 wrlp(mp, TX_BW_BURST_MOVED, bucket_size);
961 break;
965 static void txq_set_rate(struct tx_queue *txq, int rate, int burst)
967 struct mv643xx_eth_private *mp = txq_to_mp(txq);
968 int token_rate;
969 int bucket_size;
971 token_rate = ((rate / 1000) * 64) / (mp->shared->t_clk / 1000);
972 if (token_rate > 1023)
973 token_rate = 1023;
975 bucket_size = (burst + 255) >> 8;
976 if (bucket_size > 65535)
977 bucket_size = 65535;
979 wrlp(mp, TXQ_BW_TOKENS(txq->index), token_rate << 14);
980 wrlp(mp, TXQ_BW_CONF(txq->index), (bucket_size << 10) | token_rate);
983 static void txq_set_fixed_prio_mode(struct tx_queue *txq)
985 struct mv643xx_eth_private *mp = txq_to_mp(txq);
986 int off;
987 u32 val;
990 * Turn on fixed priority mode.
992 off = 0;
993 switch (mp->shared->tx_bw_control) {
994 case TX_BW_CONTROL_OLD_LAYOUT:
995 off = TXQ_FIX_PRIO_CONF;
996 break;
997 case TX_BW_CONTROL_NEW_LAYOUT:
998 off = TXQ_FIX_PRIO_CONF_MOVED;
999 break;
1002 if (off) {
1003 val = rdlp(mp, off);
1004 val |= 1 << txq->index;
1005 wrlp(mp, off, val);
1009 static void txq_set_wrr(struct tx_queue *txq, int weight)
1011 struct mv643xx_eth_private *mp = txq_to_mp(txq);
1012 int off;
1013 u32 val;
1016 * Turn off fixed priority mode.
1018 off = 0;
1019 switch (mp->shared->tx_bw_control) {
1020 case TX_BW_CONTROL_OLD_LAYOUT:
1021 off = TXQ_FIX_PRIO_CONF;
1022 break;
1023 case TX_BW_CONTROL_NEW_LAYOUT:
1024 off = TXQ_FIX_PRIO_CONF_MOVED;
1025 break;
1028 if (off) {
1029 val = rdlp(mp, off);
1030 val &= ~(1 << txq->index);
1031 wrlp(mp, off, val);
1034 * Configure WRR weight for this queue.
1037 val = rdlp(mp, off);
1038 val = (val & ~0xff) | (weight & 0xff);
1039 wrlp(mp, TXQ_BW_WRR_CONF(txq->index), val);
1044 /* mii management interface *************************************************/
1045 static irqreturn_t mv643xx_eth_err_irq(int irq, void *dev_id)
1047 struct mv643xx_eth_shared_private *msp = dev_id;
1049 if (readl(msp->base + ERR_INT_CAUSE) & ERR_INT_SMI_DONE) {
1050 writel(~ERR_INT_SMI_DONE, msp->base + ERR_INT_CAUSE);
1051 wake_up(&msp->smi_busy_wait);
1052 return IRQ_HANDLED;
1055 return IRQ_NONE;
1058 static int smi_is_done(struct mv643xx_eth_shared_private *msp)
1060 return !(readl(msp->base + SMI_REG) & SMI_BUSY);
1063 static int smi_wait_ready(struct mv643xx_eth_shared_private *msp)
1065 if (msp->err_interrupt == NO_IRQ) {
1066 int i;
1068 for (i = 0; !smi_is_done(msp); i++) {
1069 if (i == 10)
1070 return -ETIMEDOUT;
1071 msleep(10);
1074 return 0;
1077 if (!smi_is_done(msp)) {
1078 wait_event_timeout(msp->smi_busy_wait, smi_is_done(msp),
1079 msecs_to_jiffies(100));
1080 if (!smi_is_done(msp))
1081 return -ETIMEDOUT;
1084 return 0;
1087 static int smi_bus_read(struct mii_bus *bus, int addr, int reg)
1089 struct mv643xx_eth_shared_private *msp = bus->priv;
1090 void __iomem *smi_reg = msp->base + SMI_REG;
1091 int ret;
1093 if (smi_wait_ready(msp)) {
1094 printk(KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n");
1095 return -ETIMEDOUT;
1098 writel(SMI_OPCODE_READ | (reg << 21) | (addr << 16), smi_reg);
1100 if (smi_wait_ready(msp)) {
1101 printk(KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n");
1102 return -ETIMEDOUT;
1105 ret = readl(smi_reg);
1106 if (!(ret & SMI_READ_VALID)) {
1107 printk(KERN_WARNING "mv643xx_eth: SMI bus read not valid\n");
1108 return -ENODEV;
1111 return ret & 0xffff;
1114 static int smi_bus_write(struct mii_bus *bus, int addr, int reg, u16 val)
1116 struct mv643xx_eth_shared_private *msp = bus->priv;
1117 void __iomem *smi_reg = msp->base + SMI_REG;
1119 if (smi_wait_ready(msp)) {
1120 printk(KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n");
1121 return -ETIMEDOUT;
1124 writel(SMI_OPCODE_WRITE | (reg << 21) |
1125 (addr << 16) | (val & 0xffff), smi_reg);
1127 if (smi_wait_ready(msp)) {
1128 printk(KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n");
1129 return -ETIMEDOUT;
1132 return 0;
1136 /* statistics ***************************************************************/
1137 static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *dev)
1139 struct mv643xx_eth_private *mp = netdev_priv(dev);
1140 struct net_device_stats *stats = &dev->stats;
1141 unsigned long tx_packets = 0;
1142 unsigned long tx_bytes = 0;
1143 unsigned long tx_dropped = 0;
1144 int i;
1146 for (i = 0; i < mp->txq_count; i++) {
1147 struct tx_queue *txq = mp->txq + i;
1149 tx_packets += txq->tx_packets;
1150 tx_bytes += txq->tx_bytes;
1151 tx_dropped += txq->tx_dropped;
1154 stats->tx_packets = tx_packets;
1155 stats->tx_bytes = tx_bytes;
1156 stats->tx_dropped = tx_dropped;
1158 return stats;
1161 static inline u32 mib_read(struct mv643xx_eth_private *mp, int offset)
1163 return rdl(mp, MIB_COUNTERS(mp->port_num) + offset);
1166 static void mib_counters_clear(struct mv643xx_eth_private *mp)
1168 int i;
1170 for (i = 0; i < 0x80; i += 4)
1171 mib_read(mp, i);
1174 static void mib_counters_update(struct mv643xx_eth_private *mp)
1176 struct mib_counters *p = &mp->mib_counters;
1178 spin_lock_bh(&mp->mib_counters_lock);
1179 p->good_octets_received += mib_read(mp, 0x00);
1180 p->bad_octets_received += mib_read(mp, 0x08);
1181 p->internal_mac_transmit_err += mib_read(mp, 0x0c);
1182 p->good_frames_received += mib_read(mp, 0x10);
1183 p->bad_frames_received += mib_read(mp, 0x14);
1184 p->broadcast_frames_received += mib_read(mp, 0x18);
1185 p->multicast_frames_received += mib_read(mp, 0x1c);
1186 p->frames_64_octets += mib_read(mp, 0x20);
1187 p->frames_65_to_127_octets += mib_read(mp, 0x24);
1188 p->frames_128_to_255_octets += mib_read(mp, 0x28);
1189 p->frames_256_to_511_octets += mib_read(mp, 0x2c);
1190 p->frames_512_to_1023_octets += mib_read(mp, 0x30);
1191 p->frames_1024_to_max_octets += mib_read(mp, 0x34);
1192 p->good_octets_sent += mib_read(mp, 0x38);
1193 p->good_frames_sent += mib_read(mp, 0x40);
1194 p->excessive_collision += mib_read(mp, 0x44);
1195 p->multicast_frames_sent += mib_read(mp, 0x48);
1196 p->broadcast_frames_sent += mib_read(mp, 0x4c);
1197 p->unrec_mac_control_received += mib_read(mp, 0x50);
1198 p->fc_sent += mib_read(mp, 0x54);
1199 p->good_fc_received += mib_read(mp, 0x58);
1200 p->bad_fc_received += mib_read(mp, 0x5c);
1201 p->undersize_received += mib_read(mp, 0x60);
1202 p->fragments_received += mib_read(mp, 0x64);
1203 p->oversize_received += mib_read(mp, 0x68);
1204 p->jabber_received += mib_read(mp, 0x6c);
1205 p->mac_receive_error += mib_read(mp, 0x70);
1206 p->bad_crc_event += mib_read(mp, 0x74);
1207 p->collision += mib_read(mp, 0x78);
1208 p->late_collision += mib_read(mp, 0x7c);
1209 spin_unlock_bh(&mp->mib_counters_lock);
1211 mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ);
1214 static void mib_counters_timer_wrapper(unsigned long _mp)
1216 struct mv643xx_eth_private *mp = (void *)_mp;
1218 mib_counters_update(mp);
1222 /* ethtool ******************************************************************/
1223 struct mv643xx_eth_stats {
1224 char stat_string[ETH_GSTRING_LEN];
1225 int sizeof_stat;
1226 int netdev_off;
1227 int mp_off;
1230 #define SSTAT(m) \
1231 { #m, FIELD_SIZEOF(struct net_device_stats, m), \
1232 offsetof(struct net_device, stats.m), -1 }
1234 #define MIBSTAT(m) \
1235 { #m, FIELD_SIZEOF(struct mib_counters, m), \
1236 -1, offsetof(struct mv643xx_eth_private, mib_counters.m) }
1238 static const struct mv643xx_eth_stats mv643xx_eth_stats[] = {
1239 SSTAT(rx_packets),
1240 SSTAT(tx_packets),
1241 SSTAT(rx_bytes),
1242 SSTAT(tx_bytes),
1243 SSTAT(rx_errors),
1244 SSTAT(tx_errors),
1245 SSTAT(rx_dropped),
1246 SSTAT(tx_dropped),
1247 MIBSTAT(good_octets_received),
1248 MIBSTAT(bad_octets_received),
1249 MIBSTAT(internal_mac_transmit_err),
1250 MIBSTAT(good_frames_received),
1251 MIBSTAT(bad_frames_received),
1252 MIBSTAT(broadcast_frames_received),
1253 MIBSTAT(multicast_frames_received),
1254 MIBSTAT(frames_64_octets),
1255 MIBSTAT(frames_65_to_127_octets),
1256 MIBSTAT(frames_128_to_255_octets),
1257 MIBSTAT(frames_256_to_511_octets),
1258 MIBSTAT(frames_512_to_1023_octets),
1259 MIBSTAT(frames_1024_to_max_octets),
1260 MIBSTAT(good_octets_sent),
1261 MIBSTAT(good_frames_sent),
1262 MIBSTAT(excessive_collision),
1263 MIBSTAT(multicast_frames_sent),
1264 MIBSTAT(broadcast_frames_sent),
1265 MIBSTAT(unrec_mac_control_received),
1266 MIBSTAT(fc_sent),
1267 MIBSTAT(good_fc_received),
1268 MIBSTAT(bad_fc_received),
1269 MIBSTAT(undersize_received),
1270 MIBSTAT(fragments_received),
1271 MIBSTAT(oversize_received),
1272 MIBSTAT(jabber_received),
1273 MIBSTAT(mac_receive_error),
1274 MIBSTAT(bad_crc_event),
1275 MIBSTAT(collision),
1276 MIBSTAT(late_collision),
1279 static int
1280 mv643xx_eth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1282 struct mv643xx_eth_private *mp = netdev_priv(dev);
1283 int err;
1285 err = phy_read_status(mp->phy);
1286 if (err == 0)
1287 err = phy_ethtool_gset(mp->phy, cmd);
1290 * The MAC does not support 1000baseT_Half.
1292 cmd->supported &= ~SUPPORTED_1000baseT_Half;
1293 cmd->advertising &= ~ADVERTISED_1000baseT_Half;
1295 return err;
1298 static int
1299 mv643xx_eth_get_settings_phyless(struct net_device *dev,
1300 struct ethtool_cmd *cmd)
1302 struct mv643xx_eth_private *mp = netdev_priv(dev);
1303 u32 port_status;
1305 port_status = rdlp(mp, PORT_STATUS);
1307 cmd->supported = SUPPORTED_MII;
1308 cmd->advertising = ADVERTISED_MII;
1309 switch (port_status & PORT_SPEED_MASK) {
1310 case PORT_SPEED_10:
1311 cmd->speed = SPEED_10;
1312 break;
1313 case PORT_SPEED_100:
1314 cmd->speed = SPEED_100;
1315 break;
1316 case PORT_SPEED_1000:
1317 cmd->speed = SPEED_1000;
1318 break;
1319 default:
1320 cmd->speed = -1;
1321 break;
1323 cmd->duplex = (port_status & FULL_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF;
1324 cmd->port = PORT_MII;
1325 cmd->phy_address = 0;
1326 cmd->transceiver = XCVR_INTERNAL;
1327 cmd->autoneg = AUTONEG_DISABLE;
1328 cmd->maxtxpkt = 1;
1329 cmd->maxrxpkt = 1;
1331 return 0;
1334 static int
1335 mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1337 struct mv643xx_eth_private *mp = netdev_priv(dev);
1340 * The MAC does not support 1000baseT_Half.
1342 cmd->advertising &= ~ADVERTISED_1000baseT_Half;
1344 return phy_ethtool_sset(mp->phy, cmd);
1347 static int
1348 mv643xx_eth_set_settings_phyless(struct net_device *dev,
1349 struct ethtool_cmd *cmd)
1351 return -EINVAL;
1354 static void mv643xx_eth_get_drvinfo(struct net_device *dev,
1355 struct ethtool_drvinfo *drvinfo)
1357 strncpy(drvinfo->driver, mv643xx_eth_driver_name, 32);
1358 strncpy(drvinfo->version, mv643xx_eth_driver_version, 32);
1359 strncpy(drvinfo->fw_version, "N/A", 32);
1360 strncpy(drvinfo->bus_info, "platform", 32);
1361 drvinfo->n_stats = ARRAY_SIZE(mv643xx_eth_stats);
1364 static int mv643xx_eth_nway_reset(struct net_device *dev)
1366 struct mv643xx_eth_private *mp = netdev_priv(dev);
1368 return genphy_restart_aneg(mp->phy);
1371 static int mv643xx_eth_nway_reset_phyless(struct net_device *dev)
1373 return -EINVAL;
1376 static u32 mv643xx_eth_get_link(struct net_device *dev)
1378 return !!netif_carrier_ok(dev);
1381 static void mv643xx_eth_get_strings(struct net_device *dev,
1382 uint32_t stringset, uint8_t *data)
1384 int i;
1386 if (stringset == ETH_SS_STATS) {
1387 for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
1388 memcpy(data + i * ETH_GSTRING_LEN,
1389 mv643xx_eth_stats[i].stat_string,
1390 ETH_GSTRING_LEN);
1395 static void mv643xx_eth_get_ethtool_stats(struct net_device *dev,
1396 struct ethtool_stats *stats,
1397 uint64_t *data)
1399 struct mv643xx_eth_private *mp = netdev_priv(dev);
1400 int i;
1402 mv643xx_eth_get_stats(dev);
1403 mib_counters_update(mp);
1405 for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
1406 const struct mv643xx_eth_stats *stat;
1407 void *p;
1409 stat = mv643xx_eth_stats + i;
1411 if (stat->netdev_off >= 0)
1412 p = ((void *)mp->dev) + stat->netdev_off;
1413 else
1414 p = ((void *)mp) + stat->mp_off;
1416 data[i] = (stat->sizeof_stat == 8) ?
1417 *(uint64_t *)p : *(uint32_t *)p;
1421 static int mv643xx_eth_get_sset_count(struct net_device *dev, int sset)
1423 if (sset == ETH_SS_STATS)
1424 return ARRAY_SIZE(mv643xx_eth_stats);
1426 return -EOPNOTSUPP;
1429 static const struct ethtool_ops mv643xx_eth_ethtool_ops = {
1430 .get_settings = mv643xx_eth_get_settings,
1431 .set_settings = mv643xx_eth_set_settings,
1432 .get_drvinfo = mv643xx_eth_get_drvinfo,
1433 .nway_reset = mv643xx_eth_nway_reset,
1434 .get_link = mv643xx_eth_get_link,
1435 .set_sg = ethtool_op_set_sg,
1436 .get_strings = mv643xx_eth_get_strings,
1437 .get_ethtool_stats = mv643xx_eth_get_ethtool_stats,
1438 .get_sset_count = mv643xx_eth_get_sset_count,
1441 static const struct ethtool_ops mv643xx_eth_ethtool_ops_phyless = {
1442 .get_settings = mv643xx_eth_get_settings_phyless,
1443 .set_settings = mv643xx_eth_set_settings_phyless,
1444 .get_drvinfo = mv643xx_eth_get_drvinfo,
1445 .nway_reset = mv643xx_eth_nway_reset_phyless,
1446 .get_link = mv643xx_eth_get_link,
1447 .set_sg = ethtool_op_set_sg,
1448 .get_strings = mv643xx_eth_get_strings,
1449 .get_ethtool_stats = mv643xx_eth_get_ethtool_stats,
1450 .get_sset_count = mv643xx_eth_get_sset_count,
1454 /* address handling *********************************************************/
1455 static void uc_addr_get(struct mv643xx_eth_private *mp, unsigned char *addr)
1457 unsigned int mac_h = rdlp(mp, MAC_ADDR_HIGH);
1458 unsigned int mac_l = rdlp(mp, MAC_ADDR_LOW);
1460 addr[0] = (mac_h >> 24) & 0xff;
1461 addr[1] = (mac_h >> 16) & 0xff;
1462 addr[2] = (mac_h >> 8) & 0xff;
1463 addr[3] = mac_h & 0xff;
1464 addr[4] = (mac_l >> 8) & 0xff;
1465 addr[5] = mac_l & 0xff;
1468 static void uc_addr_set(struct mv643xx_eth_private *mp, unsigned char *addr)
1470 wrlp(mp, MAC_ADDR_HIGH,
1471 (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3]);
1472 wrlp(mp, MAC_ADDR_LOW, (addr[4] << 8) | addr[5]);
1475 static u32 uc_addr_filter_mask(struct net_device *dev)
1477 struct dev_addr_list *uc_ptr;
1478 u32 nibbles;
1480 if (dev->flags & IFF_PROMISC)
1481 return 0;
1483 nibbles = 1 << (dev->dev_addr[5] & 0x0f);
1484 for (uc_ptr = dev->uc_list; uc_ptr != NULL; uc_ptr = uc_ptr->next) {
1485 if (memcmp(dev->dev_addr, uc_ptr->da_addr, 5))
1486 return 0;
1487 if ((dev->dev_addr[5] ^ uc_ptr->da_addr[5]) & 0xf0)
1488 return 0;
1490 nibbles |= 1 << (uc_ptr->da_addr[5] & 0x0f);
1493 return nibbles;
1496 static void mv643xx_eth_program_unicast_filter(struct net_device *dev)
1498 struct mv643xx_eth_private *mp = netdev_priv(dev);
1499 u32 port_config;
1500 u32 nibbles;
1501 int i;
1503 uc_addr_set(mp, dev->dev_addr);
1505 port_config = rdlp(mp, PORT_CONFIG);
1506 nibbles = uc_addr_filter_mask(dev);
1507 if (!nibbles) {
1508 port_config |= UNICAST_PROMISCUOUS_MODE;
1509 wrlp(mp, PORT_CONFIG, port_config);
1510 return;
1513 for (i = 0; i < 16; i += 4) {
1514 int off = UNICAST_TABLE(mp->port_num) + i;
1515 u32 v;
1517 v = 0;
1518 if (nibbles & 1)
1519 v |= 0x00000001;
1520 if (nibbles & 2)
1521 v |= 0x00000100;
1522 if (nibbles & 4)
1523 v |= 0x00010000;
1524 if (nibbles & 8)
1525 v |= 0x01000000;
1526 nibbles >>= 4;
1528 wrl(mp, off, v);
1531 port_config &= ~UNICAST_PROMISCUOUS_MODE;
1532 wrlp(mp, PORT_CONFIG, port_config);
1535 static int addr_crc(unsigned char *addr)
1537 int crc = 0;
1538 int i;
1540 for (i = 0; i < 6; i++) {
1541 int j;
1543 crc = (crc ^ addr[i]) << 8;
1544 for (j = 7; j >= 0; j--) {
1545 if (crc & (0x100 << j))
1546 crc ^= 0x107 << j;
1550 return crc;
1553 static void mv643xx_eth_program_multicast_filter(struct net_device *dev)
1555 struct mv643xx_eth_private *mp = netdev_priv(dev);
1556 u32 *mc_spec;
1557 u32 *mc_other;
1558 struct dev_addr_list *addr;
1559 int i;
1561 if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
1562 int port_num;
1563 u32 accept;
1564 int i;
1566 oom:
1567 port_num = mp->port_num;
1568 accept = 0x01010101;
1569 for (i = 0; i < 0x100; i += 4) {
1570 wrl(mp, SPECIAL_MCAST_TABLE(port_num) + i, accept);
1571 wrl(mp, OTHER_MCAST_TABLE(port_num) + i, accept);
1573 return;
1576 mc_spec = kmalloc(0x200, GFP_ATOMIC);
1577 if (mc_spec == NULL)
1578 goto oom;
1579 mc_other = mc_spec + (0x100 >> 2);
1581 memset(mc_spec, 0, 0x100);
1582 memset(mc_other, 0, 0x100);
1584 for (addr = dev->mc_list; addr != NULL; addr = addr->next) {
1585 u8 *a = addr->da_addr;
1586 u32 *table;
1587 int entry;
1589 if (memcmp(a, "\x01\x00\x5e\x00\x00", 5) == 0) {
1590 table = mc_spec;
1591 entry = a[5];
1592 } else {
1593 table = mc_other;
1594 entry = addr_crc(a);
1597 table[entry >> 2] |= 1 << (8 * (entry & 3));
1600 for (i = 0; i < 0x100; i += 4) {
1601 wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i, mc_spec[i >> 2]);
1602 wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i, mc_other[i >> 2]);
1605 kfree(mc_spec);
1608 static void mv643xx_eth_set_rx_mode(struct net_device *dev)
1610 mv643xx_eth_program_unicast_filter(dev);
1611 mv643xx_eth_program_multicast_filter(dev);
1614 static int mv643xx_eth_set_mac_address(struct net_device *dev, void *addr)
1616 struct sockaddr *sa = addr;
1618 memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
1620 netif_addr_lock_bh(dev);
1621 mv643xx_eth_program_unicast_filter(dev);
1622 netif_addr_unlock_bh(dev);
1624 return 0;
1628 /* rx/tx queue initialisation ***********************************************/
1629 static int rxq_init(struct mv643xx_eth_private *mp, int index)
1631 struct rx_queue *rxq = mp->rxq + index;
1632 struct rx_desc *rx_desc;
1633 int size;
1634 int i;
1636 rxq->index = index;
1638 rxq->rx_ring_size = mp->default_rx_ring_size;
1640 rxq->rx_desc_count = 0;
1641 rxq->rx_curr_desc = 0;
1642 rxq->rx_used_desc = 0;
1644 size = rxq->rx_ring_size * sizeof(struct rx_desc);
1646 if (index == 0 && size <= mp->rx_desc_sram_size) {
1647 rxq->rx_desc_area = ioremap(mp->rx_desc_sram_addr,
1648 mp->rx_desc_sram_size);
1649 rxq->rx_desc_dma = mp->rx_desc_sram_addr;
1650 } else {
1651 rxq->rx_desc_area = dma_alloc_coherent(NULL, size,
1652 &rxq->rx_desc_dma,
1653 GFP_KERNEL);
1656 if (rxq->rx_desc_area == NULL) {
1657 dev_printk(KERN_ERR, &mp->dev->dev,
1658 "can't allocate rx ring (%d bytes)\n", size);
1659 goto out;
1661 memset(rxq->rx_desc_area, 0, size);
1663 rxq->rx_desc_area_size = size;
1664 rxq->rx_skb = kmalloc(rxq->rx_ring_size * sizeof(*rxq->rx_skb),
1665 GFP_KERNEL);
1666 if (rxq->rx_skb == NULL) {
1667 dev_printk(KERN_ERR, &mp->dev->dev,
1668 "can't allocate rx skb ring\n");
1669 goto out_free;
1672 rx_desc = (struct rx_desc *)rxq->rx_desc_area;
1673 for (i = 0; i < rxq->rx_ring_size; i++) {
1674 int nexti;
1676 nexti = i + 1;
1677 if (nexti == rxq->rx_ring_size)
1678 nexti = 0;
1680 rx_desc[i].next_desc_ptr = rxq->rx_desc_dma +
1681 nexti * sizeof(struct rx_desc);
1684 return 0;
1687 out_free:
1688 if (index == 0 && size <= mp->rx_desc_sram_size)
1689 iounmap(rxq->rx_desc_area);
1690 else
1691 dma_free_coherent(NULL, size,
1692 rxq->rx_desc_area,
1693 rxq->rx_desc_dma);
1695 out:
1696 return -ENOMEM;
1699 static void rxq_deinit(struct rx_queue *rxq)
1701 struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
1702 int i;
1704 rxq_disable(rxq);
1706 for (i = 0; i < rxq->rx_ring_size; i++) {
1707 if (rxq->rx_skb[i]) {
1708 dev_kfree_skb(rxq->rx_skb[i]);
1709 rxq->rx_desc_count--;
1713 if (rxq->rx_desc_count) {
1714 dev_printk(KERN_ERR, &mp->dev->dev,
1715 "error freeing rx ring -- %d skbs stuck\n",
1716 rxq->rx_desc_count);
1719 if (rxq->index == 0 &&
1720 rxq->rx_desc_area_size <= mp->rx_desc_sram_size)
1721 iounmap(rxq->rx_desc_area);
1722 else
1723 dma_free_coherent(NULL, rxq->rx_desc_area_size,
1724 rxq->rx_desc_area, rxq->rx_desc_dma);
1726 kfree(rxq->rx_skb);
1729 static int txq_init(struct mv643xx_eth_private *mp, int index)
1731 struct tx_queue *txq = mp->txq + index;
1732 struct tx_desc *tx_desc;
1733 int size;
1734 int i;
1736 txq->index = index;
1738 txq->tx_ring_size = mp->default_tx_ring_size;
1740 txq->tx_desc_count = 0;
1741 txq->tx_curr_desc = 0;
1742 txq->tx_used_desc = 0;
1744 size = txq->tx_ring_size * sizeof(struct tx_desc);
1746 if (index == 0 && size <= mp->tx_desc_sram_size) {
1747 txq->tx_desc_area = ioremap(mp->tx_desc_sram_addr,
1748 mp->tx_desc_sram_size);
1749 txq->tx_desc_dma = mp->tx_desc_sram_addr;
1750 } else {
1751 txq->tx_desc_area = dma_alloc_coherent(NULL, size,
1752 &txq->tx_desc_dma,
1753 GFP_KERNEL);
1756 if (txq->tx_desc_area == NULL) {
1757 dev_printk(KERN_ERR, &mp->dev->dev,
1758 "can't allocate tx ring (%d bytes)\n", size);
1759 return -ENOMEM;
1761 memset(txq->tx_desc_area, 0, size);
1763 txq->tx_desc_area_size = size;
1765 tx_desc = (struct tx_desc *)txq->tx_desc_area;
1766 for (i = 0; i < txq->tx_ring_size; i++) {
1767 struct tx_desc *txd = tx_desc + i;
1768 int nexti;
1770 nexti = i + 1;
1771 if (nexti == txq->tx_ring_size)
1772 nexti = 0;
1774 txd->cmd_sts = 0;
1775 txd->next_desc_ptr = txq->tx_desc_dma +
1776 nexti * sizeof(struct tx_desc);
1779 skb_queue_head_init(&txq->tx_skb);
1781 return 0;
1784 static void txq_deinit(struct tx_queue *txq)
1786 struct mv643xx_eth_private *mp = txq_to_mp(txq);
1788 txq_disable(txq);
1789 txq_reclaim(txq, txq->tx_ring_size, 1);
1791 BUG_ON(txq->tx_used_desc != txq->tx_curr_desc);
1793 if (txq->index == 0 &&
1794 txq->tx_desc_area_size <= mp->tx_desc_sram_size)
1795 iounmap(txq->tx_desc_area);
1796 else
1797 dma_free_coherent(NULL, txq->tx_desc_area_size,
1798 txq->tx_desc_area, txq->tx_desc_dma);
1802 /* netdev ops and related ***************************************************/
1803 static int mv643xx_eth_collect_events(struct mv643xx_eth_private *mp)
1805 u32 int_cause;
1806 u32 int_cause_ext;
1808 int_cause = rdlp(mp, INT_CAUSE) & (INT_TX_END | INT_RX | INT_EXT);
1809 if (int_cause == 0)
1810 return 0;
1812 int_cause_ext = 0;
1813 if (int_cause & INT_EXT)
1814 int_cause_ext = rdlp(mp, INT_CAUSE_EXT);
1816 int_cause &= INT_TX_END | INT_RX;
1817 if (int_cause) {
1818 wrlp(mp, INT_CAUSE, ~int_cause);
1819 mp->work_tx_end |= ((int_cause & INT_TX_END) >> 19) &
1820 ~(rdlp(mp, TXQ_COMMAND) & 0xff);
1821 mp->work_rx |= (int_cause & INT_RX) >> 2;
1824 int_cause_ext &= INT_EXT_LINK_PHY | INT_EXT_TX;
1825 if (int_cause_ext) {
1826 wrlp(mp, INT_CAUSE_EXT, ~int_cause_ext);
1827 if (int_cause_ext & INT_EXT_LINK_PHY)
1828 mp->work_link = 1;
1829 mp->work_tx |= int_cause_ext & INT_EXT_TX;
1832 return 1;
1835 static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id)
1837 struct net_device *dev = (struct net_device *)dev_id;
1838 struct mv643xx_eth_private *mp = netdev_priv(dev);
1840 if (unlikely(!mv643xx_eth_collect_events(mp)))
1841 return IRQ_NONE;
1843 wrlp(mp, INT_MASK, 0);
1844 napi_schedule(&mp->napi);
1846 return IRQ_HANDLED;
1849 static void handle_link_event(struct mv643xx_eth_private *mp)
1851 struct net_device *dev = mp->dev;
1852 u32 port_status;
1853 int speed;
1854 int duplex;
1855 int fc;
1857 port_status = rdlp(mp, PORT_STATUS);
1858 if (!(port_status & LINK_UP)) {
1859 if (netif_carrier_ok(dev)) {
1860 int i;
1862 printk(KERN_INFO "%s: link down\n", dev->name);
1864 netif_carrier_off(dev);
1866 for (i = 0; i < mp->txq_count; i++) {
1867 struct tx_queue *txq = mp->txq + i;
1869 txq_reclaim(txq, txq->tx_ring_size, 1);
1870 txq_reset_hw_ptr(txq);
1873 return;
1876 switch (port_status & PORT_SPEED_MASK) {
1877 case PORT_SPEED_10:
1878 speed = 10;
1879 break;
1880 case PORT_SPEED_100:
1881 speed = 100;
1882 break;
1883 case PORT_SPEED_1000:
1884 speed = 1000;
1885 break;
1886 default:
1887 speed = -1;
1888 break;
1890 duplex = (port_status & FULL_DUPLEX) ? 1 : 0;
1891 fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0;
1893 printk(KERN_INFO "%s: link up, %d Mb/s, %s duplex, "
1894 "flow control %sabled\n", dev->name,
1895 speed, duplex ? "full" : "half",
1896 fc ? "en" : "dis");
1898 if (!netif_carrier_ok(dev))
1899 netif_carrier_on(dev);
1902 static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
1904 struct mv643xx_eth_private *mp;
1905 int work_done;
1907 mp = container_of(napi, struct mv643xx_eth_private, napi);
1909 if (unlikely(mp->oom)) {
1910 mp->oom = 0;
1911 del_timer(&mp->rx_oom);
1914 work_done = 0;
1915 while (work_done < budget) {
1916 u8 queue_mask;
1917 int queue;
1918 int work_tbd;
1920 if (mp->work_link) {
1921 mp->work_link = 0;
1922 handle_link_event(mp);
1923 continue;
1926 queue_mask = mp->work_tx | mp->work_tx_end | mp->work_rx;
1927 if (likely(!mp->oom))
1928 queue_mask |= mp->work_rx_refill;
1930 if (!queue_mask) {
1931 if (mv643xx_eth_collect_events(mp))
1932 continue;
1933 break;
1936 queue = fls(queue_mask) - 1;
1937 queue_mask = 1 << queue;
1939 work_tbd = budget - work_done;
1940 if (work_tbd > 16)
1941 work_tbd = 16;
1943 if (mp->work_tx_end & queue_mask) {
1944 txq_kick(mp->txq + queue);
1945 } else if (mp->work_tx & queue_mask) {
1946 work_done += txq_reclaim(mp->txq + queue, work_tbd, 0);
1947 txq_maybe_wake(mp->txq + queue);
1948 } else if (mp->work_rx & queue_mask) {
1949 work_done += rxq_process(mp->rxq + queue, work_tbd);
1950 } else if (!mp->oom && (mp->work_rx_refill & queue_mask)) {
1951 work_done += rxq_refill(mp->rxq + queue, work_tbd);
1952 } else {
1953 BUG();
1957 if (work_done < budget) {
1958 if (mp->oom)
1959 mod_timer(&mp->rx_oom, jiffies + (HZ / 10));
1960 napi_complete(napi);
1961 wrlp(mp, INT_MASK, INT_TX_END | INT_RX | INT_EXT);
1964 return work_done;
1967 static inline void oom_timer_wrapper(unsigned long data)
1969 struct mv643xx_eth_private *mp = (void *)data;
1971 napi_schedule(&mp->napi);
1974 static void phy_reset(struct mv643xx_eth_private *mp)
1976 int data;
1978 data = phy_read(mp->phy, MII_BMCR);
1979 if (data < 0)
1980 return;
1982 data |= BMCR_RESET;
1983 if (phy_write(mp->phy, MII_BMCR, data) < 0)
1984 return;
1986 do {
1987 data = phy_read(mp->phy, MII_BMCR);
1988 } while (data >= 0 && data & BMCR_RESET);
1991 static void port_start(struct mv643xx_eth_private *mp)
1993 u32 pscr;
1994 int i;
1997 * Perform PHY reset, if there is a PHY.
1999 if (mp->phy != NULL) {
2000 struct ethtool_cmd cmd;
2002 mv643xx_eth_get_settings(mp->dev, &cmd);
2003 phy_reset(mp);
2004 mv643xx_eth_set_settings(mp->dev, &cmd);
2008 * Configure basic link parameters.
2010 pscr = rdlp(mp, PORT_SERIAL_CONTROL);
2012 pscr |= SERIAL_PORT_ENABLE;
2013 wrlp(mp, PORT_SERIAL_CONTROL, pscr);
2015 pscr |= DO_NOT_FORCE_LINK_FAIL;
2016 if (mp->phy == NULL)
2017 pscr |= FORCE_LINK_PASS;
2018 wrlp(mp, PORT_SERIAL_CONTROL, pscr);
2020 wrlp(mp, SDMA_CONFIG, PORT_SDMA_CONFIG_DEFAULT_VALUE);
2023 * Configure TX path and queues.
2025 tx_set_rate(mp, 1000000000, 16777216);
2026 for (i = 0; i < mp->txq_count; i++) {
2027 struct tx_queue *txq = mp->txq + i;
2029 txq_reset_hw_ptr(txq);
2030 txq_set_rate(txq, 1000000000, 16777216);
2031 txq_set_fixed_prio_mode(txq);
2035 * Receive all unmatched unicast, TCP, UDP, BPDU and broadcast
2036 * frames to RX queue #0, and include the pseudo-header when
2037 * calculating receive checksums.
2039 wrlp(mp, PORT_CONFIG, 0x02000000);
2042 * Treat BPDUs as normal multicasts, and disable partition mode.
2044 wrlp(mp, PORT_CONFIG_EXT, 0x00000000);
2047 * Add configured unicast addresses to address filter table.
2049 mv643xx_eth_program_unicast_filter(mp->dev);
2052 * Enable the receive queues.
2054 for (i = 0; i < mp->rxq_count; i++) {
2055 struct rx_queue *rxq = mp->rxq + i;
2056 u32 addr;
2058 addr = (u32)rxq->rx_desc_dma;
2059 addr += rxq->rx_curr_desc * sizeof(struct rx_desc);
2060 wrlp(mp, RXQ_CURRENT_DESC_PTR(i), addr);
2062 rxq_enable(rxq);
2066 static void set_rx_coal(struct mv643xx_eth_private *mp, unsigned int delay)
2068 unsigned int coal = ((mp->shared->t_clk / 1000000) * delay) / 64;
2069 u32 val;
2071 val = rdlp(mp, SDMA_CONFIG);
2072 if (mp->shared->extended_rx_coal_limit) {
2073 if (coal > 0xffff)
2074 coal = 0xffff;
2075 val &= ~0x023fff80;
2076 val |= (coal & 0x8000) << 10;
2077 val |= (coal & 0x7fff) << 7;
2078 } else {
2079 if (coal > 0x3fff)
2080 coal = 0x3fff;
2081 val &= ~0x003fff00;
2082 val |= (coal & 0x3fff) << 8;
2084 wrlp(mp, SDMA_CONFIG, val);
2087 static void set_tx_coal(struct mv643xx_eth_private *mp, unsigned int delay)
2089 unsigned int coal = ((mp->shared->t_clk / 1000000) * delay) / 64;
2091 if (coal > 0x3fff)
2092 coal = 0x3fff;
2093 wrlp(mp, TX_FIFO_URGENT_THRESHOLD, (coal & 0x3fff) << 4);
2096 static void mv643xx_eth_recalc_skb_size(struct mv643xx_eth_private *mp)
2098 int skb_size;
2101 * Reserve 2+14 bytes for an ethernet header (the hardware
2102 * automatically prepends 2 bytes of dummy data to each
2103 * received packet), 16 bytes for up to four VLAN tags, and
2104 * 4 bytes for the trailing FCS -- 36 bytes total.
2106 skb_size = mp->dev->mtu + 36;
2109 * Make sure that the skb size is a multiple of 8 bytes, as
2110 * the lower three bits of the receive descriptor's buffer
2111 * size field are ignored by the hardware.
2113 mp->skb_size = (skb_size + 7) & ~7;
2116 static int mv643xx_eth_open(struct net_device *dev)
2118 struct mv643xx_eth_private *mp = netdev_priv(dev);
2119 int err;
2120 int i;
2122 wrlp(mp, INT_CAUSE, 0);
2123 wrlp(mp, INT_CAUSE_EXT, 0);
2124 rdlp(mp, INT_CAUSE_EXT);
2126 err = request_irq(dev->irq, mv643xx_eth_irq,
2127 IRQF_SHARED, dev->name, dev);
2128 if (err) {
2129 dev_printk(KERN_ERR, &dev->dev, "can't assign irq\n");
2130 return -EAGAIN;
2133 mv643xx_eth_recalc_skb_size(mp);
2135 napi_enable(&mp->napi);
2137 skb_queue_head_init(&mp->rx_recycle);
2139 for (i = 0; i < mp->rxq_count; i++) {
2140 err = rxq_init(mp, i);
2141 if (err) {
2142 while (--i >= 0)
2143 rxq_deinit(mp->rxq + i);
2144 goto out;
2147 rxq_refill(mp->rxq + i, INT_MAX);
2150 if (mp->oom) {
2151 mp->rx_oom.expires = jiffies + (HZ / 10);
2152 add_timer(&mp->rx_oom);
2155 for (i = 0; i < mp->txq_count; i++) {
2156 err = txq_init(mp, i);
2157 if (err) {
2158 while (--i >= 0)
2159 txq_deinit(mp->txq + i);
2160 goto out_free;
2164 netif_carrier_off(dev);
2166 port_start(mp);
2168 set_rx_coal(mp, 0);
2169 set_tx_coal(mp, 0);
2171 wrlp(mp, INT_MASK_EXT, INT_EXT_LINK_PHY | INT_EXT_TX);
2172 wrlp(mp, INT_MASK, INT_TX_END | INT_RX | INT_EXT);
2174 return 0;
2177 out_free:
2178 for (i = 0; i < mp->rxq_count; i++)
2179 rxq_deinit(mp->rxq + i);
2180 out:
2181 free_irq(dev->irq, dev);
2183 return err;
2186 static void port_reset(struct mv643xx_eth_private *mp)
2188 unsigned int data;
2189 int i;
2191 for (i = 0; i < mp->rxq_count; i++)
2192 rxq_disable(mp->rxq + i);
2193 for (i = 0; i < mp->txq_count; i++)
2194 txq_disable(mp->txq + i);
2196 while (1) {
2197 u32 ps = rdlp(mp, PORT_STATUS);
2199 if ((ps & (TX_IN_PROGRESS | TX_FIFO_EMPTY)) == TX_FIFO_EMPTY)
2200 break;
2201 udelay(10);
2204 /* Reset the Enable bit in the Configuration Register */
2205 data = rdlp(mp, PORT_SERIAL_CONTROL);
2206 data &= ~(SERIAL_PORT_ENABLE |
2207 DO_NOT_FORCE_LINK_FAIL |
2208 FORCE_LINK_PASS);
2209 wrlp(mp, PORT_SERIAL_CONTROL, data);
2212 static int mv643xx_eth_stop(struct net_device *dev)
2214 struct mv643xx_eth_private *mp = netdev_priv(dev);
2215 int i;
2217 wrlp(mp, INT_MASK_EXT, 0x00000000);
2218 wrlp(mp, INT_MASK, 0x00000000);
2219 rdlp(mp, INT_MASK);
2221 napi_disable(&mp->napi);
2223 del_timer_sync(&mp->rx_oom);
2225 netif_carrier_off(dev);
2227 free_irq(dev->irq, dev);
2229 port_reset(mp);
2230 mv643xx_eth_get_stats(dev);
2231 mib_counters_update(mp);
2232 del_timer_sync(&mp->mib_counters_timer);
2234 skb_queue_purge(&mp->rx_recycle);
2236 for (i = 0; i < mp->rxq_count; i++)
2237 rxq_deinit(mp->rxq + i);
2238 for (i = 0; i < mp->txq_count; i++)
2239 txq_deinit(mp->txq + i);
2241 return 0;
2244 static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2246 struct mv643xx_eth_private *mp = netdev_priv(dev);
2248 if (mp->phy != NULL)
2249 return phy_mii_ioctl(mp->phy, if_mii(ifr), cmd);
2251 return -EOPNOTSUPP;
2254 static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
2256 struct mv643xx_eth_private *mp = netdev_priv(dev);
2258 if (new_mtu < 64 || new_mtu > 9500)
2259 return -EINVAL;
2261 dev->mtu = new_mtu;
2262 mv643xx_eth_recalc_skb_size(mp);
2263 tx_set_rate(mp, 1000000000, 16777216);
2265 if (!netif_running(dev))
2266 return 0;
2269 * Stop and then re-open the interface. This will allocate RX
2270 * skbs of the new MTU.
2271 * There is a possible danger that the open will not succeed,
2272 * due to memory being full.
2274 mv643xx_eth_stop(dev);
2275 if (mv643xx_eth_open(dev)) {
2276 dev_printk(KERN_ERR, &dev->dev,
2277 "fatal error on re-opening device after "
2278 "MTU change\n");
2281 return 0;
2284 static void tx_timeout_task(struct work_struct *ugly)
2286 struct mv643xx_eth_private *mp;
2288 mp = container_of(ugly, struct mv643xx_eth_private, tx_timeout_task);
2289 if (netif_running(mp->dev)) {
2290 netif_tx_stop_all_queues(mp->dev);
2291 port_reset(mp);
2292 port_start(mp);
2293 netif_tx_wake_all_queues(mp->dev);
2297 static void mv643xx_eth_tx_timeout(struct net_device *dev)
2299 struct mv643xx_eth_private *mp = netdev_priv(dev);
2301 dev_printk(KERN_INFO, &dev->dev, "tx timeout\n");
2303 schedule_work(&mp->tx_timeout_task);
2306 #ifdef CONFIG_NET_POLL_CONTROLLER
2307 static void mv643xx_eth_netpoll(struct net_device *dev)
2309 struct mv643xx_eth_private *mp = netdev_priv(dev);
2311 wrlp(mp, INT_MASK, 0x00000000);
2312 rdlp(mp, INT_MASK);
2314 mv643xx_eth_irq(dev->irq, dev);
2316 wrlp(mp, INT_MASK, INT_TX_END | INT_RX | INT_EXT);
2318 #endif
2321 /* platform glue ************************************************************/
2322 static void
2323 mv643xx_eth_conf_mbus_windows(struct mv643xx_eth_shared_private *msp,
2324 struct mbus_dram_target_info *dram)
2326 void __iomem *base = msp->base;
2327 u32 win_enable;
2328 u32 win_protect;
2329 int i;
2331 for (i = 0; i < 6; i++) {
2332 writel(0, base + WINDOW_BASE(i));
2333 writel(0, base + WINDOW_SIZE(i));
2334 if (i < 4)
2335 writel(0, base + WINDOW_REMAP_HIGH(i));
2338 win_enable = 0x3f;
2339 win_protect = 0;
2341 for (i = 0; i < dram->num_cs; i++) {
2342 struct mbus_dram_window *cs = dram->cs + i;
2344 writel((cs->base & 0xffff0000) |
2345 (cs->mbus_attr << 8) |
2346 dram->mbus_dram_target_id, base + WINDOW_BASE(i));
2347 writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
2349 win_enable &= ~(1 << i);
2350 win_protect |= 3 << (2 * i);
2353 writel(win_enable, base + WINDOW_BAR_ENABLE);
2354 msp->win_protect = win_protect;
2357 static void infer_hw_params(struct mv643xx_eth_shared_private *msp)
2360 * Check whether we have a 14-bit coal limit field in bits
2361 * [21:8], or a 16-bit coal limit in bits [25,21:7] of the
2362 * SDMA config register.
2364 writel(0x02000000, msp->base + 0x0400 + SDMA_CONFIG);
2365 if (readl(msp->base + 0x0400 + SDMA_CONFIG) & 0x02000000)
2366 msp->extended_rx_coal_limit = 1;
2367 else
2368 msp->extended_rx_coal_limit = 0;
2371 * Check whether the MAC supports TX rate control, and if
2372 * yes, whether its associated registers are in the old or
2373 * the new place.
2375 writel(1, msp->base + 0x0400 + TX_BW_MTU_MOVED);
2376 if (readl(msp->base + 0x0400 + TX_BW_MTU_MOVED) & 1) {
2377 msp->tx_bw_control = TX_BW_CONTROL_NEW_LAYOUT;
2378 } else {
2379 writel(7, msp->base + 0x0400 + TX_BW_RATE);
2380 if (readl(msp->base + 0x0400 + TX_BW_RATE) & 7)
2381 msp->tx_bw_control = TX_BW_CONTROL_OLD_LAYOUT;
2382 else
2383 msp->tx_bw_control = TX_BW_CONTROL_ABSENT;
2387 static int mv643xx_eth_shared_probe(struct platform_device *pdev)
2389 static int mv643xx_eth_version_printed;
2390 struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data;
2391 struct mv643xx_eth_shared_private *msp;
2392 struct resource *res;
2393 int ret;
2395 if (!mv643xx_eth_version_printed++)
2396 printk(KERN_NOTICE "MV-643xx 10/100/1000 ethernet "
2397 "driver version %s\n", mv643xx_eth_driver_version);
2399 ret = -EINVAL;
2400 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2401 if (res == NULL)
2402 goto out;
2404 ret = -ENOMEM;
2405 msp = kmalloc(sizeof(*msp), GFP_KERNEL);
2406 if (msp == NULL)
2407 goto out;
2408 memset(msp, 0, sizeof(*msp));
2410 msp->base = ioremap(res->start, res->end - res->start + 1);
2411 if (msp->base == NULL)
2412 goto out_free;
2415 * Set up and register SMI bus.
2417 if (pd == NULL || pd->shared_smi == NULL) {
2418 msp->smi_bus = mdiobus_alloc();
2419 if (msp->smi_bus == NULL)
2420 goto out_unmap;
2422 msp->smi_bus->priv = msp;
2423 msp->smi_bus->name = "mv643xx_eth smi";
2424 msp->smi_bus->read = smi_bus_read;
2425 msp->smi_bus->write = smi_bus_write,
2426 snprintf(msp->smi_bus->id, MII_BUS_ID_SIZE, "%d", pdev->id);
2427 msp->smi_bus->parent = &pdev->dev;
2428 msp->smi_bus->phy_mask = 0xffffffff;
2429 if (mdiobus_register(msp->smi_bus) < 0)
2430 goto out_free_mii_bus;
2431 msp->smi = msp;
2432 } else {
2433 msp->smi = platform_get_drvdata(pd->shared_smi);
2436 msp->err_interrupt = NO_IRQ;
2437 init_waitqueue_head(&msp->smi_busy_wait);
2440 * Check whether the error interrupt is hooked up.
2442 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2443 if (res != NULL) {
2444 int err;
2446 err = request_irq(res->start, mv643xx_eth_err_irq,
2447 IRQF_SHARED, "mv643xx_eth", msp);
2448 if (!err) {
2449 writel(ERR_INT_SMI_DONE, msp->base + ERR_INT_MASK);
2450 msp->err_interrupt = res->start;
2455 * (Re-)program MBUS remapping windows if we are asked to.
2457 if (pd != NULL && pd->dram != NULL)
2458 mv643xx_eth_conf_mbus_windows(msp, pd->dram);
2461 * Detect hardware parameters.
2463 msp->t_clk = (pd != NULL && pd->t_clk != 0) ? pd->t_clk : 133000000;
2464 infer_hw_params(msp);
2466 platform_set_drvdata(pdev, msp);
2468 return 0;
2470 out_free_mii_bus:
2471 mdiobus_free(msp->smi_bus);
2472 out_unmap:
2473 iounmap(msp->base);
2474 out_free:
2475 kfree(msp);
2476 out:
2477 return ret;
2480 static int mv643xx_eth_shared_remove(struct platform_device *pdev)
2482 struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev);
2483 struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data;
2485 if (pd == NULL || pd->shared_smi == NULL) {
2486 mdiobus_unregister(msp->smi_bus);
2487 mdiobus_free(msp->smi_bus);
2489 if (msp->err_interrupt != NO_IRQ)
2490 free_irq(msp->err_interrupt, msp);
2491 iounmap(msp->base);
2492 kfree(msp);
2494 return 0;
2497 static struct platform_driver mv643xx_eth_shared_driver = {
2498 .probe = mv643xx_eth_shared_probe,
2499 .remove = mv643xx_eth_shared_remove,
2500 .driver = {
2501 .name = MV643XX_ETH_SHARED_NAME,
2502 .owner = THIS_MODULE,
2506 static void phy_addr_set(struct mv643xx_eth_private *mp, int phy_addr)
2508 int addr_shift = 5 * mp->port_num;
2509 u32 data;
2511 data = rdl(mp, PHY_ADDR);
2512 data &= ~(0x1f << addr_shift);
2513 data |= (phy_addr & 0x1f) << addr_shift;
2514 wrl(mp, PHY_ADDR, data);
2517 static int phy_addr_get(struct mv643xx_eth_private *mp)
2519 unsigned int data;
2521 data = rdl(mp, PHY_ADDR);
2523 return (data >> (5 * mp->port_num)) & 0x1f;
2526 static void set_params(struct mv643xx_eth_private *mp,
2527 struct mv643xx_eth_platform_data *pd)
2529 struct net_device *dev = mp->dev;
2531 if (is_valid_ether_addr(pd->mac_addr))
2532 memcpy(dev->dev_addr, pd->mac_addr, 6);
2533 else
2534 uc_addr_get(mp, dev->dev_addr);
2536 mp->default_rx_ring_size = DEFAULT_RX_QUEUE_SIZE;
2537 if (pd->rx_queue_size)
2538 mp->default_rx_ring_size = pd->rx_queue_size;
2539 mp->rx_desc_sram_addr = pd->rx_sram_addr;
2540 mp->rx_desc_sram_size = pd->rx_sram_size;
2542 mp->rxq_count = pd->rx_queue_count ? : 1;
2544 mp->default_tx_ring_size = DEFAULT_TX_QUEUE_SIZE;
2545 if (pd->tx_queue_size)
2546 mp->default_tx_ring_size = pd->tx_queue_size;
2547 mp->tx_desc_sram_addr = pd->tx_sram_addr;
2548 mp->tx_desc_sram_size = pd->tx_sram_size;
2550 mp->txq_count = pd->tx_queue_count ? : 1;
2553 static struct phy_device *phy_scan(struct mv643xx_eth_private *mp,
2554 int phy_addr)
2556 struct mii_bus *bus = mp->shared->smi->smi_bus;
2557 struct phy_device *phydev;
2558 int start;
2559 int num;
2560 int i;
2562 if (phy_addr == MV643XX_ETH_PHY_ADDR_DEFAULT) {
2563 start = phy_addr_get(mp) & 0x1f;
2564 num = 32;
2565 } else {
2566 start = phy_addr & 0x1f;
2567 num = 1;
2570 phydev = NULL;
2571 for (i = 0; i < num; i++) {
2572 int addr = (start + i) & 0x1f;
2574 if (bus->phy_map[addr] == NULL)
2575 mdiobus_scan(bus, addr);
2577 if (phydev == NULL) {
2578 phydev = bus->phy_map[addr];
2579 if (phydev != NULL)
2580 phy_addr_set(mp, addr);
2584 return phydev;
2587 static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex)
2589 struct phy_device *phy = mp->phy;
2591 phy_reset(mp);
2593 phy_attach(mp->dev, phy->dev.bus_id, 0, PHY_INTERFACE_MODE_GMII);
2595 if (speed == 0) {
2596 phy->autoneg = AUTONEG_ENABLE;
2597 phy->speed = 0;
2598 phy->duplex = 0;
2599 phy->advertising = phy->supported | ADVERTISED_Autoneg;
2600 } else {
2601 phy->autoneg = AUTONEG_DISABLE;
2602 phy->advertising = 0;
2603 phy->speed = speed;
2604 phy->duplex = duplex;
2606 phy_start_aneg(phy);
2609 static void init_pscr(struct mv643xx_eth_private *mp, int speed, int duplex)
2611 u32 pscr;
2613 pscr = rdlp(mp, PORT_SERIAL_CONTROL);
2614 if (pscr & SERIAL_PORT_ENABLE) {
2615 pscr &= ~SERIAL_PORT_ENABLE;
2616 wrlp(mp, PORT_SERIAL_CONTROL, pscr);
2619 pscr = MAX_RX_PACKET_9700BYTE | SERIAL_PORT_CONTROL_RESERVED;
2620 if (mp->phy == NULL) {
2621 pscr |= DISABLE_AUTO_NEG_SPEED_GMII;
2622 if (speed == SPEED_1000)
2623 pscr |= SET_GMII_SPEED_TO_1000;
2624 else if (speed == SPEED_100)
2625 pscr |= SET_MII_SPEED_TO_100;
2627 pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL;
2629 pscr |= DISABLE_AUTO_NEG_FOR_DUPLEX;
2630 if (duplex == DUPLEX_FULL)
2631 pscr |= SET_FULL_DUPLEX_MODE;
2634 wrlp(mp, PORT_SERIAL_CONTROL, pscr);
2637 static int mv643xx_eth_probe(struct platform_device *pdev)
2639 struct mv643xx_eth_platform_data *pd;
2640 struct mv643xx_eth_private *mp;
2641 struct net_device *dev;
2642 struct resource *res;
2643 int err;
2645 pd = pdev->dev.platform_data;
2646 if (pd == NULL) {
2647 dev_printk(KERN_ERR, &pdev->dev,
2648 "no mv643xx_eth_platform_data\n");
2649 return -ENODEV;
2652 if (pd->shared == NULL) {
2653 dev_printk(KERN_ERR, &pdev->dev,
2654 "no mv643xx_eth_platform_data->shared\n");
2655 return -ENODEV;
2658 dev = alloc_etherdev_mq(sizeof(struct mv643xx_eth_private), 8);
2659 if (!dev)
2660 return -ENOMEM;
2662 mp = netdev_priv(dev);
2663 platform_set_drvdata(pdev, mp);
2665 mp->shared = platform_get_drvdata(pd->shared);
2666 mp->base = mp->shared->base + 0x0400 + (pd->port_number << 10);
2667 mp->port_num = pd->port_number;
2669 mp->dev = dev;
2671 set_params(mp, pd);
2672 dev->real_num_tx_queues = mp->txq_count;
2674 if (pd->phy_addr != MV643XX_ETH_PHY_NONE)
2675 mp->phy = phy_scan(mp, pd->phy_addr);
2677 if (mp->phy != NULL) {
2678 phy_init(mp, pd->speed, pd->duplex);
2679 SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops);
2680 } else {
2681 SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops_phyless);
2684 init_pscr(mp, pd->speed, pd->duplex);
2687 mib_counters_clear(mp);
2689 init_timer(&mp->mib_counters_timer);
2690 mp->mib_counters_timer.data = (unsigned long)mp;
2691 mp->mib_counters_timer.function = mib_counters_timer_wrapper;
2692 mp->mib_counters_timer.expires = jiffies + 30 * HZ;
2693 add_timer(&mp->mib_counters_timer);
2695 spin_lock_init(&mp->mib_counters_lock);
2697 INIT_WORK(&mp->tx_timeout_task, tx_timeout_task);
2699 netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, 128);
2701 init_timer(&mp->rx_oom);
2702 mp->rx_oom.data = (unsigned long)mp;
2703 mp->rx_oom.function = oom_timer_wrapper;
2706 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2707 BUG_ON(!res);
2708 dev->irq = res->start;
2710 dev->get_stats = mv643xx_eth_get_stats;
2711 dev->hard_start_xmit = mv643xx_eth_xmit;
2712 dev->open = mv643xx_eth_open;
2713 dev->stop = mv643xx_eth_stop;
2714 dev->set_rx_mode = mv643xx_eth_set_rx_mode;
2715 dev->set_mac_address = mv643xx_eth_set_mac_address;
2716 dev->do_ioctl = mv643xx_eth_ioctl;
2717 dev->change_mtu = mv643xx_eth_change_mtu;
2718 dev->tx_timeout = mv643xx_eth_tx_timeout;
2719 #ifdef CONFIG_NET_POLL_CONTROLLER
2720 dev->poll_controller = mv643xx_eth_netpoll;
2721 #endif
2722 dev->watchdog_timeo = 2 * HZ;
2723 dev->base_addr = 0;
2725 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
2726 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM;
2728 SET_NETDEV_DEV(dev, &pdev->dev);
2730 if (mp->shared->win_protect)
2731 wrl(mp, WINDOW_PROTECT(mp->port_num), mp->shared->win_protect);
2733 err = register_netdev(dev);
2734 if (err)
2735 goto out;
2737 dev_printk(KERN_NOTICE, &dev->dev, "port %d with MAC address %pM\n",
2738 mp->port_num, dev->dev_addr);
2740 if (mp->tx_desc_sram_size > 0)
2741 dev_printk(KERN_NOTICE, &dev->dev, "configured with sram\n");
2743 return 0;
2745 out:
2746 free_netdev(dev);
2748 return err;
2751 static int mv643xx_eth_remove(struct platform_device *pdev)
2753 struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
2755 unregister_netdev(mp->dev);
2756 if (mp->phy != NULL)
2757 phy_detach(mp->phy);
2758 flush_scheduled_work();
2759 free_netdev(mp->dev);
2761 platform_set_drvdata(pdev, NULL);
2763 return 0;
2766 static void mv643xx_eth_shutdown(struct platform_device *pdev)
2768 struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
2770 /* Mask all interrupts on ethernet port */
2771 wrlp(mp, INT_MASK, 0);
2772 rdlp(mp, INT_MASK);
2774 if (netif_running(mp->dev))
2775 port_reset(mp);
2778 static struct platform_driver mv643xx_eth_driver = {
2779 .probe = mv643xx_eth_probe,
2780 .remove = mv643xx_eth_remove,
2781 .shutdown = mv643xx_eth_shutdown,
2782 .driver = {
2783 .name = MV643XX_ETH_NAME,
2784 .owner = THIS_MODULE,
2788 static int __init mv643xx_eth_init_module(void)
2790 int rc;
2792 rc = platform_driver_register(&mv643xx_eth_shared_driver);
2793 if (!rc) {
2794 rc = platform_driver_register(&mv643xx_eth_driver);
2795 if (rc)
2796 platform_driver_unregister(&mv643xx_eth_shared_driver);
2799 return rc;
2801 module_init(mv643xx_eth_init_module);
2803 static void __exit mv643xx_eth_cleanup_module(void)
2805 platform_driver_unregister(&mv643xx_eth_driver);
2806 platform_driver_unregister(&mv643xx_eth_shared_driver);
2808 module_exit(mv643xx_eth_cleanup_module);
2810 MODULE_AUTHOR("Rabeeh Khoury, Assaf Hoffman, Matthew Dharm, "
2811 "Manish Lachwani, Dale Farnsworth and Lennert Buytenhek");
2812 MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX");
2813 MODULE_LICENSE("GPL");
2814 MODULE_ALIAS("platform:" MV643XX_ETH_SHARED_NAME);
2815 MODULE_ALIAS("platform:" MV643XX_ETH_NAME);