RT-AC66 3.0.0.4.374.130 core
[tomato.git] / release / src-rt-6.x / linux / linux-2.6 / drivers / net / titan_ge.c
blobc58fcd83b385bc95a27336fec95d27b6fb1f077e
1 /*
2 * drivers/net/titan_ge.c - Driver for Titan ethernet ports
4 * Copyright (C) 2003 PMC-Sierra Inc.
5 * Author : Manish Lachwani (lachwani@pmc-sierra.com)
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2
10 * of the License, or (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 * The MAC unit of the Titan consists of the following:
25 * -> XDMA Engine to move data to from the memory to the MAC packet FIFO
26 * -> FIFO is where the incoming and outgoing data is placed
27 * -> TRTG is the unit that pulls the data from the FIFO for Tx and pushes
28 * the data into the FIFO for Rx
29 * -> TMAC is the outgoing MAC interface and RMAC is the incoming.
30 * -> AFX is the address filtering block
31 * -> GMII block to communicate with the PHY
33 * Rx will look like the following:
34 * GMII --> RMAC --> AFX --> TRTG --> Rx FIFO --> XDMA --> CPU memory
36 * Tx will look like the following:
37 * CPU memory --> XDMA --> Tx FIFO --> TRTG --> TMAC --> GMII
39 * The Titan driver has support for the following performance features:
40 * -> Rx side checksumming
41 * -> Jumbo Frames
42 * -> Interrupt Coalscing
43 * -> Rx NAPI
44 * -> SKB Recycling
45 * -> Transmit/Receive descriptors in SRAM
46 * -> Fast routing for IP forwarding
49 #include <linux/dma-mapping.h>
50 #include <linux/module.h>
51 #include <linux/kernel.h>
52 #include <linux/sched.h>
53 #include <linux/ioport.h>
54 #include <linux/interrupt.h>
55 #include <linux/slab.h>
56 #include <linux/string.h>
57 #include <linux/errno.h>
58 #include <linux/ip.h>
59 #include <linux/init.h>
60 #include <linux/in.h>
61 #include <linux/platform_device.h>
62 #include <linux/netdevice.h>
63 #include <linux/etherdevice.h>
64 #include <linux/skbuff.h>
65 #include <linux/mii.h>
66 #include <linux/delay.h>
67 #include <linux/skbuff.h>
68 #include <linux/prefetch.h>
70 /* For MII specifc registers, titan_mdio.h should be included */
71 #include <net/ip.h>
73 #include <asm/bitops.h>
74 #include <asm/io.h>
75 #include <asm/types.h>
76 #include <asm/pgtable.h>
77 #include <asm/system.h>
78 #include <asm/titan_dep.h>
80 #include "titan_ge.h"
81 #include "titan_mdio.h"
83 /* Static Function Declarations */
84 static int titan_ge_eth_open(struct net_device *);
85 static void titan_ge_eth_stop(struct net_device *);
86 static struct net_device_stats *titan_ge_get_stats(struct net_device *);
87 static int titan_ge_init_rx_desc_ring(titan_ge_port_info *, int, int,
88 unsigned long, unsigned long,
89 unsigned long);
90 static int titan_ge_init_tx_desc_ring(titan_ge_port_info *, int,
91 unsigned long, unsigned long);
93 static int titan_ge_open(struct net_device *);
94 static int titan_ge_start_xmit(struct sk_buff *, struct net_device *);
95 static int titan_ge_stop(struct net_device *);
97 static unsigned long titan_ge_tx_coal(unsigned long, int);
99 static void titan_ge_port_reset(unsigned int);
100 static int titan_ge_free_tx_queue(titan_ge_port_info *);
101 static int titan_ge_rx_task(struct net_device *, titan_ge_port_info *);
102 static int titan_ge_port_start(struct net_device *, titan_ge_port_info *);
104 static int titan_ge_return_tx_desc(titan_ge_port_info *, int);
107 * Some configuration for the FIFO and the XDMA channel needs
108 * to be done only once for all the ports. This flag controls
109 * that
111 static unsigned long config_done;
114 * One time out of memory flag
116 static unsigned int oom_flag;
118 static int titan_ge_poll(struct net_device *netdev, int *budget);
120 static int titan_ge_receive_queue(struct net_device *, unsigned int);
122 static struct platform_device *titan_ge_device[3];
124 /* MAC Address */
125 extern unsigned char titan_ge_mac_addr_base[6];
127 unsigned long titan_ge_base;
128 static unsigned long titan_ge_sram;
130 static char titan_string[] = "titan";
133 * The Titan GE has two alignment requirements:
134 * -> skb->data to be cacheline aligned (32 byte)
135 * -> IP header alignment to 16 bytes
137 * The latter is not implemented. So, that results in an extra copy on
138 * the Rx. This is a big performance hog. For the former case, the
139 * dev_alloc_skb() has been replaced with titan_ge_alloc_skb(). The size
140 * requested is calculated:
142 * Ethernet Frame Size : 1518
143 * Ethernet Header : 14
144 * Future Titan change for IP header alignment : 2
146 * Hence, we allocate (1518 + 14 + 2+ 64) = 1580 bytes. For IP header
147 * alignment, we use skb_reserve().
150 #define ALIGNED_RX_SKB_ADDR(addr) \
151 ((((unsigned long)(addr) + (64UL - 1UL)) \
152 & ~(64UL - 1UL)) - (unsigned long)(addr))
154 #define titan_ge_alloc_skb(__length, __gfp_flags) \
155 ({ struct sk_buff *__skb; \
156 __skb = alloc_skb((__length) + 64, (__gfp_flags)); \
157 if(__skb) { \
158 int __offset = (int) ALIGNED_RX_SKB_ADDR(__skb->data); \
159 if(__offset) \
160 skb_reserve(__skb, __offset); \
162 __skb; \
166 * Configure the GMII block of the Titan based on what the PHY tells us
168 static void titan_ge_gmii_config(int port_num)
170 unsigned int reg_data = 0, phy_reg;
171 int err;
173 err = titan_ge_mdio_read(port_num, TITAN_GE_MDIO_PHY_STATUS, &phy_reg);
175 if (err == TITAN_GE_MDIO_ERROR) {
176 printk(KERN_ERR
177 "Could not read PHY control register 0x11 \n");
178 printk(KERN_ERR
179 "Setting speed to 1000 Mbps and Duplex to Full \n");
181 return;
184 err = titan_ge_mdio_write(port_num, TITAN_GE_MDIO_PHY_IE, 0);
186 if (phy_reg & 0x8000) {
187 if (phy_reg & 0x2000) {
188 /* Full Duplex and 1000 Mbps */
189 TITAN_GE_WRITE((TITAN_GE_GMII_CONFIG_MODE +
190 (port_num << 12)), 0x201);
191 } else {
192 /* Half Duplex and 1000 Mbps */
193 TITAN_GE_WRITE((TITAN_GE_GMII_CONFIG_MODE +
194 (port_num << 12)), 0x2201);
197 if (phy_reg & 0x4000) {
198 if (phy_reg & 0x2000) {
199 /* Full Duplex and 100 Mbps */
200 TITAN_GE_WRITE((TITAN_GE_GMII_CONFIG_MODE +
201 (port_num << 12)), 0x100);
202 } else {
203 /* Half Duplex and 100 Mbps */
204 TITAN_GE_WRITE((TITAN_GE_GMII_CONFIG_MODE +
205 (port_num << 12)), 0x2100);
208 reg_data = TITAN_GE_READ(TITAN_GE_GMII_CONFIG_GENERAL +
209 (port_num << 12));
210 reg_data |= 0x3;
211 TITAN_GE_WRITE((TITAN_GE_GMII_CONFIG_GENERAL +
212 (port_num << 12)), reg_data);
216 * Enable the TMAC if it is not
218 static void titan_ge_enable_tx(unsigned int port_num)
220 unsigned long reg_data;
222 reg_data = TITAN_GE_READ(TITAN_GE_TMAC_CONFIG_1 + (port_num << 12));
223 if (!(reg_data & 0x8000)) {
224 printk("TMAC disabled for port %d!! \n", port_num);
226 reg_data |= 0x0001; /* Enable TMAC */
227 reg_data |= 0x4000; /* CRC Check Enable */
228 reg_data |= 0x2000; /* Padding enable */
229 reg_data |= 0x0800; /* CRC Add enable */
230 reg_data |= 0x0080; /* PAUSE frame */
232 TITAN_GE_WRITE((TITAN_GE_TMAC_CONFIG_1 +
233 (port_num << 12)), reg_data);
238 * Tx Timeout function
240 static void titan_ge_tx_timeout(struct net_device *netdev)
242 titan_ge_port_info *titan_ge_eth = netdev_priv(netdev);
244 printk(KERN_INFO "%s: TX timeout ", netdev->name);
245 printk(KERN_INFO "Resetting card \n");
247 /* Do the reset outside of interrupt context */
248 schedule_work(&titan_ge_eth->tx_timeout_task);
252 * Update the AFX tables for UC and MC for slice 0 only
254 static void titan_ge_update_afx(titan_ge_port_info * titan_ge_eth)
256 int port = titan_ge_eth->port_num;
257 unsigned int i;
258 volatile unsigned long reg_data = 0;
259 u8 p_addr[6];
261 memcpy(p_addr, titan_ge_eth->port_mac_addr, 6);
263 /* Set the MAC address here for TMAC and RMAC */
264 TITAN_GE_WRITE((TITAN_GE_TMAC_STATION_HI + (port << 12)),
265 ((p_addr[5] << 8) | p_addr[4]));
266 TITAN_GE_WRITE((TITAN_GE_TMAC_STATION_MID + (port << 12)),
267 ((p_addr[3] << 8) | p_addr[2]));
268 TITAN_GE_WRITE((TITAN_GE_TMAC_STATION_LOW + (port << 12)),
269 ((p_addr[1] << 8) | p_addr[0]));
271 TITAN_GE_WRITE((TITAN_GE_RMAC_STATION_HI + (port << 12)),
272 ((p_addr[5] << 8) | p_addr[4]));
273 TITAN_GE_WRITE((TITAN_GE_RMAC_STATION_MID + (port << 12)),
274 ((p_addr[3] << 8) | p_addr[2]));
275 TITAN_GE_WRITE((TITAN_GE_RMAC_STATION_LOW + (port << 12)),
276 ((p_addr[1] << 8) | p_addr[0]));
278 TITAN_GE_WRITE((0x112c | (port << 12)), 0x1);
279 /* Configure the eight address filters */
280 for (i = 0; i < 8; i++) {
281 /* Select each of the eight filters */
282 TITAN_GE_WRITE((TITAN_GE_AFX_ADDRS_FILTER_CTRL_2 +
283 (port << 12)), i);
285 /* Configure the match */
286 reg_data = 0x9; /* Forward Enable Bit */
287 TITAN_GE_WRITE((TITAN_GE_AFX_ADDRS_FILTER_CTRL_0 +
288 (port << 12)), reg_data);
290 /* Finally, AFX Exact Match Address Registers */
291 TITAN_GE_WRITE((TITAN_GE_AFX_EXACT_MATCH_LOW + (port << 12)),
292 ((p_addr[1] << 8) | p_addr[0]));
293 TITAN_GE_WRITE((TITAN_GE_AFX_EXACT_MATCH_MID + (port << 12)),
294 ((p_addr[3] << 8) | p_addr[2]));
295 TITAN_GE_WRITE((TITAN_GE_AFX_EXACT_MATCH_HIGH + (port << 12)),
296 ((p_addr[5] << 8) | p_addr[4]));
298 /* VLAN id set to 0 */
299 TITAN_GE_WRITE((TITAN_GE_AFX_EXACT_MATCH_VID +
300 (port << 12)), 0);
305 * Actual Routine to reset the adapter when the timeout occurred
307 static void titan_ge_tx_timeout_task(struct net_device *netdev)
309 titan_ge_port_info *titan_ge_eth = netdev_priv(netdev);
310 int port = titan_ge_eth->port_num;
312 printk("Titan GE: Transmit timed out. Resetting ... \n");
314 /* Dump debug info */
315 printk(KERN_ERR "TRTG cause : %x \n",
316 TITAN_GE_READ(0x100c + (port << 12)));
318 /* Fix this for the other ports */
319 printk(KERN_ERR "FIFO cause : %x \n", TITAN_GE_READ(0x482c));
320 printk(KERN_ERR "IE cause : %x \n", TITAN_GE_READ(0x0040));
321 printk(KERN_ERR "XDMA GDI ERROR : %x \n",
322 TITAN_GE_READ(0x5008 + (port << 8)));
323 printk(KERN_ERR "CHANNEL ERROR: %x \n",
324 TITAN_GE_READ(TITAN_GE_CHANNEL0_INTERRUPT
325 + (port << 8)));
327 netif_device_detach(netdev);
328 titan_ge_port_reset(titan_ge_eth->port_num);
329 titan_ge_port_start(netdev, titan_ge_eth);
330 netif_device_attach(netdev);
334 * Change the MTU of the Ethernet Device
336 static int titan_ge_change_mtu(struct net_device *netdev, int new_mtu)
338 titan_ge_port_info *titan_ge_eth = netdev_priv(netdev);
339 unsigned long flags;
341 if ((new_mtu > 9500) || (new_mtu < 64))
342 return -EINVAL;
344 spin_lock_irqsave(&titan_ge_eth->lock, flags);
346 netdev->mtu = new_mtu;
348 /* Now we have to reopen the interface so that SKBs with the new
349 * size will be allocated */
351 if (netif_running(netdev)) {
352 titan_ge_eth_stop(netdev);
354 if (titan_ge_eth_open(netdev) != TITAN_OK) {
355 printk(KERN_ERR
356 "%s: Fatal error on opening device\n",
357 netdev->name);
358 spin_unlock_irqrestore(&titan_ge_eth->lock, flags);
359 return -1;
363 spin_unlock_irqrestore(&titan_ge_eth->lock, flags);
364 return 0;
368 * Titan Gbe Interrupt Handler. All the three ports send interrupt to one line
369 * only. Once an interrupt is triggered, figure out the port and then check
370 * the channel.
372 static irqreturn_t titan_ge_int_handler(int irq, void *dev_id)
374 struct net_device *netdev = (struct net_device *) dev_id;
375 titan_ge_port_info *titan_ge_eth = netdev_priv(netdev);
376 unsigned int port_num = titan_ge_eth->port_num;
377 unsigned int reg_data;
378 unsigned int eth_int_cause_error = 0, is;
379 unsigned long eth_int_cause1;
380 int err = 0;
381 #ifdef CONFIG_SMP
382 unsigned long eth_int_cause2;
383 #endif
385 /* Ack the CPU interrupt */
386 switch (port_num) {
387 case 0:
388 is = OCD_READ(RM9000x2_OCD_INTP0STATUS1);
389 OCD_WRITE(RM9000x2_OCD_INTP0CLEAR1, is);
391 #ifdef CONFIG_SMP
392 is = OCD_READ(RM9000x2_OCD_INTP1STATUS1);
393 OCD_WRITE(RM9000x2_OCD_INTP1CLEAR1, is);
394 #endif
395 break;
397 case 1:
398 is = OCD_READ(RM9000x2_OCD_INTP0STATUS0);
399 OCD_WRITE(RM9000x2_OCD_INTP0CLEAR0, is);
401 #ifdef CONFIG_SMP
402 is = OCD_READ(RM9000x2_OCD_INTP1STATUS0);
403 OCD_WRITE(RM9000x2_OCD_INTP1CLEAR0, is);
404 #endif
405 break;
407 case 2:
408 is = OCD_READ(RM9000x2_OCD_INTP0STATUS4);
409 OCD_WRITE(RM9000x2_OCD_INTP0CLEAR4, is);
411 #ifdef CONFIG_SMP
412 is = OCD_READ(RM9000x2_OCD_INTP1STATUS4);
413 OCD_WRITE(RM9000x2_OCD_INTP1CLEAR4, is);
414 #endif
417 eth_int_cause1 = TITAN_GE_READ(TITAN_GE_INTR_XDMA_CORE_A);
418 #ifdef CONFIG_SMP
419 eth_int_cause2 = TITAN_GE_READ(TITAN_GE_INTR_XDMA_CORE_B);
420 #endif
422 /* Spurious interrupt */
423 #ifdef CONFIG_SMP
424 if ( (eth_int_cause1 == 0) && (eth_int_cause2 == 0)) {
425 #else
426 if (eth_int_cause1 == 0) {
427 #endif
428 eth_int_cause_error = TITAN_GE_READ(TITAN_GE_CHANNEL0_INTERRUPT +
429 (port_num << 8));
431 if (eth_int_cause_error == 0)
432 return IRQ_NONE;
435 /* Handle Tx first. No need to ack interrupts */
436 #ifdef CONFIG_SMP
437 if ( (eth_int_cause1 & 0x20202) ||
438 (eth_int_cause2 & 0x20202) )
439 #else
440 if (eth_int_cause1 & 0x20202)
441 #endif
442 titan_ge_free_tx_queue(titan_ge_eth);
444 /* Handle the Rx next */
445 #ifdef CONFIG_SMP
446 if ( (eth_int_cause1 & 0x10101) ||
447 (eth_int_cause2 & 0x10101)) {
448 #else
449 if (eth_int_cause1 & 0x10101) {
450 #endif
451 if (netif_rx_schedule_prep(netdev)) {
452 unsigned int ack;
454 ack = TITAN_GE_READ(TITAN_GE_INTR_XDMA_IE);
455 /* Disable Tx and Rx both */
456 if (port_num == 0)
457 ack &= ~(0x3);
458 if (port_num == 1)
459 ack &= ~(0x300);
461 if (port_num == 2)
462 ack &= ~(0x30000);
464 /* Interrupts have been disabled */
465 TITAN_GE_WRITE(TITAN_GE_INTR_XDMA_IE, ack);
467 __netif_rx_schedule(netdev);
471 /* Handle error interrupts */
472 if (eth_int_cause_error && (eth_int_cause_error != 0x2)) {
473 printk(KERN_ERR
474 "XDMA Channel Error : %x on port %d\n",
475 eth_int_cause_error, port_num);
477 printk(KERN_ERR
478 "XDMA GDI Hardware error : %x on port %d\n",
479 TITAN_GE_READ(0x5008 + (port_num << 8)), port_num);
481 printk(KERN_ERR
482 "XDMA currently has %d Rx descriptors \n",
483 TITAN_GE_READ(0x5048 + (port_num << 8)));
485 printk(KERN_ERR
486 "XDMA currently has prefetcted %d Rx descriptors \n",
487 TITAN_GE_READ(0x505c + (port_num << 8)));
489 TITAN_GE_WRITE((TITAN_GE_CHANNEL0_INTERRUPT +
490 (port_num << 8)), eth_int_cause_error);
494 * PHY interrupt to inform abt the changes. Reading the
495 * PHY Status register will clear the interrupt
497 if ((!(eth_int_cause1 & 0x30303)) &&
498 (eth_int_cause_error == 0)) {
499 err =
500 titan_ge_mdio_read(port_num,
501 TITAN_GE_MDIO_PHY_IS, &reg_data);
503 if (reg_data & 0x0400) {
504 /* Link status change */
505 titan_ge_mdio_read(port_num,
506 TITAN_GE_MDIO_PHY_STATUS, &reg_data);
507 if (!(reg_data & 0x0400)) {
508 /* Link is down */
509 netif_carrier_off(netdev);
510 netif_stop_queue(netdev);
511 } else {
512 /* Link is up */
513 netif_carrier_on(netdev);
514 netif_wake_queue(netdev);
516 /* Enable the queue */
517 titan_ge_enable_tx(port_num);
522 return IRQ_HANDLED;
526 * Multicast and Promiscuous mode set. The
527 * set_multi entry point is called whenever the
528 * multicast address list or the network interface
529 * flags are updated.
531 static void titan_ge_set_multi(struct net_device *netdev)
533 titan_ge_port_info *titan_ge_eth = netdev_priv(netdev);
534 unsigned int port_num = titan_ge_eth->port_num;
535 unsigned long reg_data;
537 reg_data = TITAN_GE_READ(TITAN_GE_AFX_ADDRS_FILTER_CTRL_1 +
538 (port_num << 12));
540 if (netdev->flags & IFF_PROMISC) {
541 reg_data |= 0x2;
543 else if (netdev->flags & IFF_ALLMULTI) {
544 reg_data |= 0x01;
545 reg_data |= 0x400; /* Use the 64-bit Multicast Hash bin */
547 else {
548 reg_data = 0x2;
551 TITAN_GE_WRITE((TITAN_GE_AFX_ADDRS_FILTER_CTRL_1 +
552 (port_num << 12)), reg_data);
553 if (reg_data & 0x01) {
554 TITAN_GE_WRITE((TITAN_GE_AFX_MULTICAST_HASH_LOW +
555 (port_num << 12)), 0xffff);
556 TITAN_GE_WRITE((TITAN_GE_AFX_MULTICAST_HASH_MIDLOW +
557 (port_num << 12)), 0xffff);
558 TITAN_GE_WRITE((TITAN_GE_AFX_MULTICAST_HASH_MIDHI +
559 (port_num << 12)), 0xffff);
560 TITAN_GE_WRITE((TITAN_GE_AFX_MULTICAST_HASH_HI +
561 (port_num << 12)), 0xffff);
566 * Open the network device
568 static int titan_ge_open(struct net_device *netdev)
570 titan_ge_port_info *titan_ge_eth = netdev_priv(netdev);
571 unsigned int port_num = titan_ge_eth->port_num;
572 unsigned int irq = TITAN_ETH_PORT_IRQ - port_num;
573 int retval;
575 retval = request_irq(irq, titan_ge_int_handler,
576 SA_INTERRUPT | SA_SAMPLE_RANDOM , netdev->name, netdev);
578 if (retval != 0) {
579 printk(KERN_ERR "Cannot assign IRQ number to TITAN GE \n");
580 return -1;
583 netdev->irq = irq;
584 printk(KERN_INFO "Assigned IRQ %d to port %d\n", irq, port_num);
586 spin_lock_irq(&(titan_ge_eth->lock));
588 if (titan_ge_eth_open(netdev) != TITAN_OK) {
589 spin_unlock_irq(&(titan_ge_eth->lock));
590 printk("%s: Error opening interface \n", netdev->name);
591 free_irq(netdev->irq, netdev);
592 return -EBUSY;
595 spin_unlock_irq(&(titan_ge_eth->lock));
597 return 0;
601 * Allocate the SKBs for the Rx ring. Also used
602 * for refilling the queue
604 static int titan_ge_rx_task(struct net_device *netdev,
605 titan_ge_port_info *titan_ge_port)
607 struct device *device = &titan_ge_device[titan_ge_port->port_num]->dev;
608 volatile titan_ge_rx_desc *rx_desc;
609 struct sk_buff *skb;
610 int rx_used_desc;
611 int count = 0;
613 while (titan_ge_port->rx_ring_skbs < titan_ge_port->rx_ring_size) {
615 /* First try to get the skb from the recycler */
616 #ifdef TITAN_GE_JUMBO_FRAMES
617 skb = titan_ge_alloc_skb(TITAN_GE_JUMBO_BUFSIZE, GFP_ATOMIC);
618 #else
619 skb = titan_ge_alloc_skb(TITAN_GE_STD_BUFSIZE, GFP_ATOMIC);
620 #endif
621 if (unlikely(!skb)) {
622 /* OOM, set the flag */
623 printk("OOM \n");
624 oom_flag = 1;
625 break;
627 count++;
628 skb->dev = netdev;
630 titan_ge_port->rx_ring_skbs++;
632 rx_used_desc = titan_ge_port->rx_used_desc_q;
633 rx_desc = &(titan_ge_port->rx_desc_area[rx_used_desc]);
635 #ifdef TITAN_GE_JUMBO_FRAMES
636 rx_desc->buffer_addr = dma_map_single(device, skb->data,
637 TITAN_GE_JUMBO_BUFSIZE - 2, DMA_FROM_DEVICE);
638 #else
639 rx_desc->buffer_addr = dma_map_single(device, skb->data,
640 TITAN_GE_STD_BUFSIZE - 2, DMA_FROM_DEVICE);
641 #endif
643 titan_ge_port->rx_skb[rx_used_desc] = skb;
644 rx_desc->cmd_sts = TITAN_GE_RX_BUFFER_OWNED;
646 titan_ge_port->rx_used_desc_q =
647 (rx_used_desc + 1) % TITAN_GE_RX_QUEUE;
650 return count;
654 * Actual init of the Tital GE port. There is one register for
655 * the channel configuration
657 static void titan_port_init(struct net_device *netdev,
658 titan_ge_port_info * titan_ge_eth)
660 unsigned long reg_data;
662 titan_ge_port_reset(titan_ge_eth->port_num);
664 /* First reset the TMAC */
665 reg_data = TITAN_GE_READ(TITAN_GE_CHANNEL0_CONFIG);
666 reg_data |= 0x80000000;
667 TITAN_GE_WRITE(TITAN_GE_CHANNEL0_CONFIG, reg_data);
669 udelay(30);
671 reg_data = TITAN_GE_READ(TITAN_GE_CHANNEL0_CONFIG);
672 reg_data &= ~(0xc0000000);
673 TITAN_GE_WRITE(TITAN_GE_CHANNEL0_CONFIG, reg_data);
675 /* Now reset the RMAC */
676 reg_data = TITAN_GE_READ(TITAN_GE_CHANNEL0_CONFIG);
677 reg_data |= 0x00080000;
678 TITAN_GE_WRITE(TITAN_GE_CHANNEL0_CONFIG, reg_data);
680 udelay(30);
682 reg_data = TITAN_GE_READ(TITAN_GE_CHANNEL0_CONFIG);
683 reg_data &= ~(0x000c0000);
684 TITAN_GE_WRITE(TITAN_GE_CHANNEL0_CONFIG, reg_data);
688 * Start the port. All the hardware specific configuration
689 * for the XDMA, Tx FIFO, Rx FIFO, TMAC, RMAC, TRTG and AFX
690 * go here
692 static int titan_ge_port_start(struct net_device *netdev,
693 titan_ge_port_info * titan_port)
695 volatile unsigned long reg_data, reg_data1;
696 int port_num = titan_port->port_num;
697 int count = 0;
698 unsigned long reg_data_1;
700 if (config_done == 0) {
701 reg_data = TITAN_GE_READ(0x0004);
702 reg_data |= 0x100;
703 TITAN_GE_WRITE(0x0004, reg_data);
705 reg_data &= ~(0x100);
706 TITAN_GE_WRITE(0x0004, reg_data);
708 /* Turn on GMII/MII mode and turn off TBI mode */
709 reg_data = TITAN_GE_READ(TITAN_GE_TSB_CTRL_1);
710 reg_data |= 0x00000700;
711 reg_data &= ~(0x00800000); /* Fencing */
713 TITAN_GE_WRITE(0x000c, 0x00001100);
715 TITAN_GE_WRITE(TITAN_GE_TSB_CTRL_1, reg_data);
717 /* Set the CPU Resource Limit register */
718 TITAN_GE_WRITE(0x00f8, 0x8);
720 /* Be conservative when using the BIU buffers */
721 TITAN_GE_WRITE(0x0068, 0x4);
724 titan_port->tx_threshold = 0;
725 titan_port->rx_threshold = 0;
727 /* We need to write the descriptors for Tx and Rx */
728 TITAN_GE_WRITE((TITAN_GE_CHANNEL0_TX_DESC + (port_num << 8)),
729 (unsigned long) titan_port->tx_dma);
730 TITAN_GE_WRITE((TITAN_GE_CHANNEL0_RX_DESC + (port_num << 8)),
731 (unsigned long) titan_port->rx_dma);
733 if (config_done == 0) {
734 /* Step 1: XDMA config */
735 reg_data = TITAN_GE_READ(TITAN_GE_XDMA_CONFIG);
736 reg_data &= ~(0x80000000); /* clear reset */
737 reg_data |= 0x1 << 29; /* sparse tx descriptor spacing */
738 reg_data |= 0x1 << 28; /* sparse rx descriptor spacing */
739 reg_data |= (0x1 << 23) | (0x1 << 24); /* Descriptor Coherency */
740 reg_data |= (0x1 << 21) | (0x1 << 22); /* Data Coherency */
741 TITAN_GE_WRITE(TITAN_GE_XDMA_CONFIG, reg_data);
744 /* IR register for the XDMA */
745 reg_data = TITAN_GE_READ(TITAN_GE_GDI_INTERRUPT_ENABLE + (port_num << 8));
746 reg_data |= 0x80068000; /* No Rx_OOD */
747 TITAN_GE_WRITE((TITAN_GE_GDI_INTERRUPT_ENABLE + (port_num << 8)), reg_data);
749 /* Start the Tx and Rx XDMA controller */
750 reg_data = TITAN_GE_READ(TITAN_GE_CHANNEL0_CONFIG + (port_num << 8));
751 reg_data &= 0x4fffffff; /* Clear tx reset */
752 reg_data &= 0xfff4ffff; /* Clear rx reset */
754 #ifdef TITAN_GE_JUMBO_FRAMES
755 reg_data |= 0xa0 | 0x30030000;
756 #else
757 reg_data |= 0x40 | 0x20030000;
758 #endif
760 #ifndef CONFIG_SMP
761 reg_data &= ~(0x10);
762 reg_data |= 0x0f; /* All of the packet */
763 #endif
765 TITAN_GE_WRITE((TITAN_GE_CHANNEL0_CONFIG + (port_num << 8)), reg_data);
767 /* Rx desc count */
768 count = titan_ge_rx_task(netdev, titan_port);
769 TITAN_GE_WRITE((0x5048 + (port_num << 8)), count);
770 count = TITAN_GE_READ(0x5048 + (port_num << 8));
772 udelay(30);
775 * Step 2: Configure the SDQPF, i.e. FIFO
777 if (config_done == 0) {
778 reg_data = TITAN_GE_READ(TITAN_GE_SDQPF_RXFIFO_CTL);
779 reg_data = 0x1;
780 TITAN_GE_WRITE(TITAN_GE_SDQPF_RXFIFO_CTL, reg_data);
781 reg_data &= ~(0x1);
782 TITAN_GE_WRITE(TITAN_GE_SDQPF_RXFIFO_CTL, reg_data);
783 reg_data = TITAN_GE_READ(TITAN_GE_SDQPF_RXFIFO_CTL);
784 TITAN_GE_WRITE(TITAN_GE_SDQPF_RXFIFO_CTL, reg_data);
786 reg_data = TITAN_GE_READ(TITAN_GE_SDQPF_TXFIFO_CTL);
787 reg_data = 0x1;
788 TITAN_GE_WRITE(TITAN_GE_SDQPF_TXFIFO_CTL, reg_data);
789 reg_data &= ~(0x1);
790 TITAN_GE_WRITE(TITAN_GE_SDQPF_TXFIFO_CTL, reg_data);
791 reg_data = TITAN_GE_READ(TITAN_GE_SDQPF_TXFIFO_CTL);
792 TITAN_GE_WRITE(TITAN_GE_SDQPF_TXFIFO_CTL, reg_data);
795 * Enable RX FIFO 0, 4 and 8
797 if (port_num == 0) {
798 reg_data = TITAN_GE_READ(TITAN_GE_SDQPF_RXFIFO_0);
800 reg_data |= 0x100000;
801 reg_data |= (0xff << 10);
803 TITAN_GE_WRITE(TITAN_GE_SDQPF_RXFIFO_0, reg_data);
805 * BAV2,BAV and DAV settings for the Rx FIFO
807 reg_data1 = TITAN_GE_READ(0x4844);
808 reg_data1 |= ( (0x10 << 20) | (0x10 << 10) | 0x1);
809 TITAN_GE_WRITE(0x4844, reg_data1);
811 reg_data &= ~(0x00100000);
812 reg_data |= 0x200000;
814 TITAN_GE_WRITE(TITAN_GE_SDQPF_RXFIFO_0, reg_data);
816 reg_data = TITAN_GE_READ(TITAN_GE_SDQPF_TXFIFO_0);
817 reg_data |= 0x100000;
819 TITAN_GE_WRITE(TITAN_GE_SDQPF_TXFIFO_0, reg_data);
821 reg_data |= (0xff << 10);
823 TITAN_GE_WRITE(TITAN_GE_SDQPF_TXFIFO_0, reg_data);
826 * BAV2, BAV and DAV settings for the Tx FIFO
828 reg_data1 = TITAN_GE_READ(0x4944);
829 reg_data1 = ( (0x1 << 20) | (0x1 << 10) | 0x10);
831 TITAN_GE_WRITE(0x4944, reg_data1);
833 reg_data &= ~(0x00100000);
834 reg_data |= 0x200000;
836 TITAN_GE_WRITE(TITAN_GE_SDQPF_TXFIFO_0, reg_data);
840 if (port_num == 1) {
841 reg_data = TITAN_GE_READ(0x4870);
843 reg_data |= 0x100000;
844 reg_data |= (0xff << 10) | (0xff + 1);
846 TITAN_GE_WRITE(0x4870, reg_data);
848 * BAV2,BAV and DAV settings for the Rx FIFO
850 reg_data1 = TITAN_GE_READ(0x4874);
851 reg_data1 |= ( (0x10 << 20) | (0x10 << 10) | 0x1);
852 TITAN_GE_WRITE(0x4874, reg_data1);
854 reg_data &= ~(0x00100000);
855 reg_data |= 0x200000;
857 TITAN_GE_WRITE(0x4870, reg_data);
859 reg_data = TITAN_GE_READ(0x494c);
860 reg_data |= 0x100000;
862 TITAN_GE_WRITE(0x494c, reg_data);
863 reg_data |= (0xff << 10) | (0xff + 1);
864 TITAN_GE_WRITE(0x494c, reg_data);
867 * BAV2, BAV and DAV settings for the Tx FIFO
869 reg_data1 = TITAN_GE_READ(0x4950);
870 reg_data1 = ( (0x1 << 20) | (0x1 << 10) | 0x10);
872 TITAN_GE_WRITE(0x4950, reg_data1);
874 reg_data &= ~(0x00100000);
875 reg_data |= 0x200000;
877 TITAN_GE_WRITE(0x494c, reg_data);
881 * Titan 1.2 revision does support port #2
883 if (port_num == 2) {
885 * Put the descriptors in the SRAM
887 reg_data = TITAN_GE_READ(0x48a0);
889 reg_data |= 0x100000;
890 reg_data |= (0xff << 10) | (2*(0xff + 1));
892 TITAN_GE_WRITE(0x48a0, reg_data);
894 * BAV2,BAV and DAV settings for the Rx FIFO
896 reg_data1 = TITAN_GE_READ(0x48a4);
897 reg_data1 |= ( (0x10 << 20) | (0x10 << 10) | 0x1);
898 TITAN_GE_WRITE(0x48a4, reg_data1);
900 reg_data &= ~(0x00100000);
901 reg_data |= 0x200000;
903 TITAN_GE_WRITE(0x48a0, reg_data);
905 reg_data = TITAN_GE_READ(0x4958);
906 reg_data |= 0x100000;
908 TITAN_GE_WRITE(0x4958, reg_data);
909 reg_data |= (0xff << 10) | (2*(0xff + 1));
910 TITAN_GE_WRITE(0x4958, reg_data);
913 * BAV2, BAV and DAV settings for the Tx FIFO
915 reg_data1 = TITAN_GE_READ(0x495c);
916 reg_data1 = ( (0x1 << 20) | (0x1 << 10) | 0x10);
918 TITAN_GE_WRITE(0x495c, reg_data1);
920 reg_data &= ~(0x00100000);
921 reg_data |= 0x200000;
923 TITAN_GE_WRITE(0x4958, reg_data);
926 if (port_num == 2) {
927 reg_data = TITAN_GE_READ(0x48a0);
929 reg_data |= 0x100000;
930 reg_data |= (0xff << 10) | (2*(0xff + 1));
932 TITAN_GE_WRITE(0x48a0, reg_data);
934 * BAV2,BAV and DAV settings for the Rx FIFO
936 reg_data1 = TITAN_GE_READ(0x48a4);
937 reg_data1 |= ( (0x10 << 20) | (0x10 << 10) | 0x1);
938 TITAN_GE_WRITE(0x48a4, reg_data1);
940 reg_data &= ~(0x00100000);
941 reg_data |= 0x200000;
943 TITAN_GE_WRITE(0x48a0, reg_data);
945 reg_data = TITAN_GE_READ(0x4958);
946 reg_data |= 0x100000;
948 TITAN_GE_WRITE(0x4958, reg_data);
949 reg_data |= (0xff << 10) | (2*(0xff + 1));
950 TITAN_GE_WRITE(0x4958, reg_data);
953 * BAV2, BAV and DAV settings for the Tx FIFO
955 reg_data1 = TITAN_GE_READ(0x495c);
956 reg_data1 = ( (0x1 << 20) | (0x1 << 10) | 0x10);
958 TITAN_GE_WRITE(0x495c, reg_data1);
960 reg_data &= ~(0x00100000);
961 reg_data |= 0x200000;
963 TITAN_GE_WRITE(0x4958, reg_data);
967 * Step 3: TRTG block enable
969 reg_data = TITAN_GE_READ(TITAN_GE_TRTG_CONFIG + (port_num << 12));
972 * This is the 1.2 revision of the chip. It has fix for the
973 * IP header alignment. Now, the IP header begins at an
974 * aligned address and this wont need an extra copy in the
975 * driver. This performance drawback existed in the previous
976 * versions of the silicon
978 reg_data_1 = TITAN_GE_READ(0x103c + (port_num << 12));
979 reg_data_1 |= 0x40000000;
980 TITAN_GE_WRITE((0x103c + (port_num << 12)), reg_data_1);
982 reg_data_1 |= 0x04000000;
983 TITAN_GE_WRITE((0x103c + (port_num << 12)), reg_data_1);
985 mdelay(5);
987 reg_data_1 &= ~(0x04000000);
988 TITAN_GE_WRITE((0x103c + (port_num << 12)), reg_data_1);
990 mdelay(5);
992 reg_data |= 0x0001;
993 TITAN_GE_WRITE((TITAN_GE_TRTG_CONFIG + (port_num << 12)), reg_data);
996 * Step 4: Start the Tx activity
998 TITAN_GE_WRITE((TITAN_GE_TMAC_CONFIG_2 + (port_num << 12)), 0xe197);
999 #ifdef TITAN_GE_JUMBO_FRAMES
1000 TITAN_GE_WRITE((0x1258 + (port_num << 12)), 0x4000);
1001 #endif
1002 reg_data = TITAN_GE_READ(TITAN_GE_TMAC_CONFIG_1 + (port_num << 12));
1003 reg_data |= 0x0001; /* Enable TMAC */
1004 reg_data |= 0x6c70; /* PAUSE also set */
1006 TITAN_GE_WRITE((TITAN_GE_TMAC_CONFIG_1 + (port_num << 12)), reg_data);
1008 udelay(30);
1010 /* Destination Address drop bit */
1011 reg_data = TITAN_GE_READ(TITAN_GE_RMAC_CONFIG_2 + (port_num << 12));
1012 reg_data |= 0x218; /* DA_DROP bit and pause */
1013 TITAN_GE_WRITE((TITAN_GE_RMAC_CONFIG_2 + (port_num << 12)), reg_data);
1015 TITAN_GE_WRITE((0x1218 + (port_num << 12)), 0x3);
1017 #ifdef TITAN_GE_JUMBO_FRAMES
1018 TITAN_GE_WRITE((0x1208 + (port_num << 12)), 0x4000);
1019 #endif
1020 /* Start the Rx activity */
1021 reg_data = TITAN_GE_READ(TITAN_GE_RMAC_CONFIG_1 + (port_num << 12));
1022 reg_data |= 0x0001; /* RMAC Enable */
1023 reg_data |= 0x0010; /* CRC Check enable */
1024 reg_data |= 0x0040; /* Min Frame check enable */
1025 reg_data |= 0x4400; /* Max Frame check enable */
1027 TITAN_GE_WRITE((TITAN_GE_RMAC_CONFIG_1 + (port_num << 12)), reg_data);
1029 udelay(30);
1032 * Enable the Interrupts for Tx and Rx
1034 reg_data1 = TITAN_GE_READ(TITAN_GE_INTR_XDMA_IE);
1036 if (port_num == 0) {
1037 reg_data1 |= 0x3;
1038 #ifdef CONFIG_SMP
1039 TITAN_GE_WRITE(0x0038, 0x003);
1040 #else
1041 TITAN_GE_WRITE(0x0038, 0x303);
1042 #endif
1045 if (port_num == 1) {
1046 reg_data1 |= 0x300;
1049 if (port_num == 2)
1050 reg_data1 |= 0x30000;
1052 TITAN_GE_WRITE(TITAN_GE_INTR_XDMA_IE, reg_data1);
1053 TITAN_GE_WRITE(0x003c, 0x300);
1055 if (config_done == 0) {
1056 TITAN_GE_WRITE(0x0024, 0x04000024); /* IRQ vector */
1057 TITAN_GE_WRITE(0x0020, 0x000fb000); /* INTMSG base */
1060 /* Priority */
1061 reg_data = TITAN_GE_READ(0x1038 + (port_num << 12));
1062 reg_data &= ~(0x00f00000);
1063 TITAN_GE_WRITE((0x1038 + (port_num << 12)), reg_data);
1065 /* Step 5: GMII config */
1066 titan_ge_gmii_config(port_num);
1068 if (config_done == 0) {
1069 TITAN_GE_WRITE(0x1a80, 0);
1070 config_done = 1;
1073 return TITAN_OK;
1077 * Function to queue the packet for the Ethernet device
1079 static void titan_ge_tx_queue(titan_ge_port_info * titan_ge_eth,
1080 struct sk_buff * skb)
1082 struct device *device = &titan_ge_device[titan_ge_eth->port_num]->dev;
1083 unsigned int curr_desc = titan_ge_eth->tx_curr_desc_q;
1084 volatile titan_ge_tx_desc *tx_curr;
1085 int port_num = titan_ge_eth->port_num;
1087 tx_curr = &(titan_ge_eth->tx_desc_area[curr_desc]);
1088 tx_curr->buffer_addr =
1089 dma_map_single(device, skb->data, skb_headlen(skb),
1090 DMA_TO_DEVICE);
1092 titan_ge_eth->tx_skb[curr_desc] = (struct sk_buff *) skb;
1093 tx_curr->buffer_len = skb_headlen(skb);
1095 /* Last descriptor enables interrupt and changes ownership */
1096 tx_curr->cmd_sts = 0x1 | (1 << 15) | (1 << 5);
1098 /* Kick the XDMA to start the transfer from memory to the FIFO */
1099 TITAN_GE_WRITE((0x5044 + (port_num << 8)), 0x1);
1101 /* Current descriptor updated */
1102 titan_ge_eth->tx_curr_desc_q = (curr_desc + 1) % TITAN_GE_TX_QUEUE;
1104 /* Prefetch the next descriptor */
1105 prefetch((const void *)
1106 &titan_ge_eth->tx_desc_area[titan_ge_eth->tx_curr_desc_q]);
1110 * Actually does the open of the Ethernet device
1112 static int titan_ge_eth_open(struct net_device *netdev)
1114 titan_ge_port_info *titan_ge_eth = netdev_priv(netdev);
1115 unsigned int port_num = titan_ge_eth->port_num;
1116 struct device *device = &titan_ge_device[port_num]->dev;
1117 unsigned long reg_data;
1118 unsigned int phy_reg;
1119 int err = 0;
1121 /* Stop the Rx activity */
1122 reg_data = TITAN_GE_READ(TITAN_GE_RMAC_CONFIG_1 + (port_num << 12));
1123 reg_data &= ~(0x00000001);
1124 TITAN_GE_WRITE((TITAN_GE_RMAC_CONFIG_1 + (port_num << 12)), reg_data);
1126 /* Clear the port interrupts */
1127 TITAN_GE_WRITE((TITAN_GE_CHANNEL0_INTERRUPT + (port_num << 8)), 0x0);
1129 if (config_done == 0) {
1130 TITAN_GE_WRITE(TITAN_GE_INTR_XDMA_CORE_A, 0);
1131 TITAN_GE_WRITE(TITAN_GE_INTR_XDMA_CORE_B, 0);
1134 /* Set the MAC Address */
1135 memcpy(titan_ge_eth->port_mac_addr, netdev->dev_addr, 6);
1137 if (config_done == 0)
1138 titan_port_init(netdev, titan_ge_eth);
1140 titan_ge_update_afx(titan_ge_eth);
1142 /* Allocate the Tx ring now */
1143 titan_ge_eth->tx_ring_skbs = 0;
1144 titan_ge_eth->tx_ring_size = TITAN_GE_TX_QUEUE;
1146 /* Allocate space in the SRAM for the descriptors */
1147 titan_ge_eth->tx_desc_area = (titan_ge_tx_desc *)
1148 (titan_ge_sram + TITAN_TX_RING_BYTES * port_num);
1149 titan_ge_eth->tx_dma = TITAN_SRAM_BASE + TITAN_TX_RING_BYTES * port_num;
1151 if (!titan_ge_eth->tx_desc_area) {
1152 printk(KERN_ERR
1153 "%s: Cannot allocate Tx Ring (size %d bytes) for port %d\n",
1154 netdev->name, TITAN_TX_RING_BYTES, port_num);
1155 return -ENOMEM;
1158 memset(titan_ge_eth->tx_desc_area, 0, titan_ge_eth->tx_desc_area_size);
1160 /* Now initialize the Tx descriptor ring */
1161 titan_ge_init_tx_desc_ring(titan_ge_eth,
1162 titan_ge_eth->tx_ring_size,
1163 (unsigned long) titan_ge_eth->tx_desc_area,
1164 (unsigned long) titan_ge_eth->tx_dma);
1166 /* Allocate the Rx ring now */
1167 titan_ge_eth->rx_ring_size = TITAN_GE_RX_QUEUE;
1168 titan_ge_eth->rx_ring_skbs = 0;
1170 titan_ge_eth->rx_desc_area =
1171 (titan_ge_rx_desc *)(titan_ge_sram + 0x1000 + TITAN_RX_RING_BYTES * port_num);
1173 titan_ge_eth->rx_dma = TITAN_SRAM_BASE + 0x1000 + TITAN_RX_RING_BYTES * port_num;
1175 if (!titan_ge_eth->rx_desc_area) {
1176 printk(KERN_ERR "%s: Cannot allocate Rx Ring (size %d bytes)\n",
1177 netdev->name, TITAN_RX_RING_BYTES);
1179 printk(KERN_ERR "%s: Freeing previously allocated TX queues...",
1180 netdev->name);
1182 dma_free_coherent(device, titan_ge_eth->tx_desc_area_size,
1183 (void *) titan_ge_eth->tx_desc_area,
1184 titan_ge_eth->tx_dma);
1186 return -ENOMEM;
1189 memset(titan_ge_eth->rx_desc_area, 0, titan_ge_eth->rx_desc_area_size);
1191 /* Now initialize the Rx ring */
1192 #ifdef TITAN_GE_JUMBO_FRAMES
1193 if ((titan_ge_init_rx_desc_ring
1194 (titan_ge_eth, titan_ge_eth->rx_ring_size, TITAN_GE_JUMBO_BUFSIZE,
1195 (unsigned long) titan_ge_eth->rx_desc_area, 0,
1196 (unsigned long) titan_ge_eth->rx_dma)) == 0)
1197 #else
1198 if ((titan_ge_init_rx_desc_ring
1199 (titan_ge_eth, titan_ge_eth->rx_ring_size, TITAN_GE_STD_BUFSIZE,
1200 (unsigned long) titan_ge_eth->rx_desc_area, 0,
1201 (unsigned long) titan_ge_eth->rx_dma)) == 0)
1202 #endif
1203 panic("%s: Error initializing RX Ring\n", netdev->name);
1205 /* Fill the Rx ring with the SKBs */
1206 titan_ge_port_start(netdev, titan_ge_eth);
1209 * Check if Interrupt Coalscing needs to be turned on. The
1210 * values specified in the register is multiplied by
1211 * (8 x 64 nanoseconds) to determine when an interrupt should
1212 * be sent to the CPU.
1215 if (TITAN_GE_TX_COAL) {
1216 titan_ge_eth->tx_int_coal =
1217 titan_ge_tx_coal(TITAN_GE_TX_COAL, port_num);
1220 err = titan_ge_mdio_read(port_num, TITAN_GE_MDIO_PHY_STATUS, &phy_reg);
1221 if (err == TITAN_GE_MDIO_ERROR) {
1222 printk(KERN_ERR
1223 "Could not read PHY control register 0x11 \n");
1224 return TITAN_ERROR;
1226 if (!(phy_reg & 0x0400)) {
1227 netif_carrier_off(netdev);
1228 netif_stop_queue(netdev);
1229 return TITAN_ERROR;
1230 } else {
1231 netif_carrier_on(netdev);
1232 netif_start_queue(netdev);
1235 return TITAN_OK;
1239 * Queue the packet for Tx. Currently no support for zero copy,
1240 * checksum offload and Scatter Gather. The chip does support
1241 * Scatter Gather only. But, that wont help here since zero copy
1242 * requires support for Tx checksumming also.
1244 int titan_ge_start_xmit(struct sk_buff *skb, struct net_device *netdev)
1246 titan_ge_port_info *titan_ge_eth = netdev_priv(netdev);
1247 unsigned long flags;
1248 struct net_device_stats *stats;
1249 //printk("titan_ge_start_xmit\n");
1251 stats = &titan_ge_eth->stats;
1252 spin_lock_irqsave(&titan_ge_eth->lock, flags);
1254 if ((TITAN_GE_TX_QUEUE - titan_ge_eth->tx_ring_skbs) <=
1255 (skb_shinfo(skb)->nr_frags + 1)) {
1256 netif_stop_queue(netdev);
1257 spin_unlock_irqrestore(&titan_ge_eth->lock, flags);
1258 printk(KERN_ERR "Tx OOD \n");
1259 return 1;
1262 titan_ge_tx_queue(titan_ge_eth, skb);
1263 titan_ge_eth->tx_ring_skbs++;
1265 if (TITAN_GE_TX_QUEUE <= (titan_ge_eth->tx_ring_skbs + 4)) {
1266 spin_unlock_irqrestore(&titan_ge_eth->lock, flags);
1267 titan_ge_free_tx_queue(titan_ge_eth);
1268 spin_lock_irqsave(&titan_ge_eth->lock, flags);
1271 stats->tx_bytes += skb->len;
1272 stats->tx_packets++;
1274 spin_unlock_irqrestore(&titan_ge_eth->lock, flags);
1276 netdev->trans_start = jiffies;
1278 return 0;
1282 * Actually does the Rx. Rx side checksumming supported.
1284 static int titan_ge_rx(struct net_device *netdev, int port_num,
1285 titan_ge_port_info * titan_ge_port,
1286 titan_ge_packet * packet)
1288 int rx_curr_desc, rx_used_desc;
1289 volatile titan_ge_rx_desc *rx_desc;
1291 rx_curr_desc = titan_ge_port->rx_curr_desc_q;
1292 rx_used_desc = titan_ge_port->rx_used_desc_q;
1294 if (((rx_curr_desc + 1) % TITAN_GE_RX_QUEUE) == rx_used_desc)
1295 return TITAN_ERROR;
1297 rx_desc = &(titan_ge_port->rx_desc_area[rx_curr_desc]);
1299 if (rx_desc->cmd_sts & TITAN_GE_RX_BUFFER_OWNED)
1300 return TITAN_ERROR;
1302 packet->skb = titan_ge_port->rx_skb[rx_curr_desc];
1303 packet->len = (rx_desc->cmd_sts & 0x7fff);
1306 * At this point, we dont know if the checksumming
1307 * actually helps relieve CPU. So, keep it for
1308 * port 0 only
1310 packet->checksum = ntohs((rx_desc->buffer & 0xffff0000) >> 16);
1311 packet->cmd_sts = rx_desc->cmd_sts;
1313 titan_ge_port->rx_curr_desc_q = (rx_curr_desc + 1) % TITAN_GE_RX_QUEUE;
1315 /* Prefetch the next descriptor */
1316 prefetch((const void *)
1317 &titan_ge_port->rx_desc_area[titan_ge_port->rx_curr_desc_q + 1]);
1319 return TITAN_OK;
1323 * Free the Tx queue of the used SKBs
1325 static int titan_ge_free_tx_queue(titan_ge_port_info *titan_ge_eth)
1327 unsigned long flags;
1329 /* Take the lock */
1330 spin_lock_irqsave(&(titan_ge_eth->lock), flags);
1332 while (titan_ge_return_tx_desc(titan_ge_eth, titan_ge_eth->port_num) == 0)
1333 if (titan_ge_eth->tx_ring_skbs != 1)
1334 titan_ge_eth->tx_ring_skbs--;
1336 spin_unlock_irqrestore(&titan_ge_eth->lock, flags);
1338 return TITAN_OK;
1342 * Threshold beyond which we do the cleaning of
1343 * Tx queue and new allocation for the Rx
1344 * queue
1346 #define TX_THRESHOLD 4
1347 #define RX_THRESHOLD 10
1350 * Receive the packets and send it to the kernel.
1352 static int titan_ge_receive_queue(struct net_device *netdev, unsigned int max)
1354 titan_ge_port_info *titan_ge_eth = netdev_priv(netdev);
1355 unsigned int port_num = titan_ge_eth->port_num;
1356 titan_ge_packet packet;
1357 struct net_device_stats *stats;
1358 struct sk_buff *skb;
1359 unsigned long received_packets = 0;
1360 unsigned int ack;
1362 stats = &titan_ge_eth->stats;
1364 while ((--max)
1365 && (titan_ge_rx(netdev, port_num, titan_ge_eth, &packet) == TITAN_OK)) {
1366 skb = (struct sk_buff *) packet.skb;
1368 titan_ge_eth->rx_ring_skbs--;
1370 if (--titan_ge_eth->rx_work_limit < 0)
1371 break;
1372 received_packets++;
1374 stats->rx_packets++;
1375 stats->rx_bytes += packet.len;
1377 if ((packet.cmd_sts & TITAN_GE_RX_PERR) ||
1378 (packet.cmd_sts & TITAN_GE_RX_OVERFLOW_ERROR) ||
1379 (packet.cmd_sts & TITAN_GE_RX_TRUNC) ||
1380 (packet.cmd_sts & TITAN_GE_RX_CRC_ERROR)) {
1381 stats->rx_dropped++;
1382 dev_kfree_skb_any(skb);
1384 continue;
1387 * Either support fast path or slow path. Decision
1388 * making can really slow down the performance. The
1389 * idea is to cut down the number of checks and improve
1390 * the fastpath.
1393 skb_put(skb, packet.len - 2);
1396 * Increment data pointer by two since thats where
1397 * the MAC starts
1399 skb_reserve(skb, 2);
1400 skb->protocol = eth_type_trans(skb, netdev);
1401 netif_receive_skb(skb);
1403 if (titan_ge_eth->rx_threshold > RX_THRESHOLD) {
1404 ack = titan_ge_rx_task(netdev, titan_ge_eth);
1405 TITAN_GE_WRITE((0x5048 + (port_num << 8)), ack);
1406 titan_ge_eth->rx_threshold = 0;
1407 } else
1408 titan_ge_eth->rx_threshold++;
1410 if (titan_ge_eth->tx_threshold > TX_THRESHOLD) {
1411 titan_ge_eth->tx_threshold = 0;
1412 titan_ge_free_tx_queue(titan_ge_eth);
1414 else
1415 titan_ge_eth->tx_threshold++;
1418 return received_packets;
1423 * Enable the Rx side interrupts
1425 static void titan_ge_enable_int(unsigned int port_num,
1426 titan_ge_port_info *titan_ge_eth,
1427 struct net_device *netdev)
1429 unsigned long reg_data = TITAN_GE_READ(TITAN_GE_INTR_XDMA_IE);
1431 if (port_num == 0)
1432 reg_data |= 0x3;
1433 if (port_num == 1)
1434 reg_data |= 0x300;
1435 if (port_num == 2)
1436 reg_data |= 0x30000;
1438 /* Re-enable interrupts */
1439 TITAN_GE_WRITE(TITAN_GE_INTR_XDMA_IE, reg_data);
1443 * Main function to handle the polling for Rx side NAPI.
1444 * Receive interrupts have been disabled at this point.
1445 * The poll schedules the transmit followed by receive.
1447 static int titan_ge_poll(struct net_device *netdev, int *budget)
1449 titan_ge_port_info *titan_ge_eth = netdev_priv(netdev);
1450 int port_num = titan_ge_eth->port_num;
1451 int work_done = 0;
1452 unsigned long flags, status;
1454 titan_ge_eth->rx_work_limit = *budget;
1455 if (titan_ge_eth->rx_work_limit > netdev->quota)
1456 titan_ge_eth->rx_work_limit = netdev->quota;
1458 do {
1459 /* Do the transmit cleaning work here */
1460 titan_ge_free_tx_queue(titan_ge_eth);
1462 /* Ack the Rx interrupts */
1463 if (port_num == 0)
1464 TITAN_GE_WRITE(TITAN_GE_INTR_XDMA_CORE_A, 0x3);
1465 if (port_num == 1)
1466 TITAN_GE_WRITE(TITAN_GE_INTR_XDMA_CORE_A, 0x300);
1467 if (port_num == 2)
1468 TITAN_GE_WRITE(TITAN_GE_INTR_XDMA_CORE_A, 0x30000);
1470 work_done += titan_ge_receive_queue(netdev, 0);
1472 /* Out of quota and there is work to be done */
1473 if (titan_ge_eth->rx_work_limit < 0)
1474 goto not_done;
1476 /* Receive alloc_skb could lead to OOM */
1477 if (oom_flag == 1) {
1478 oom_flag = 0;
1479 goto oom;
1482 status = TITAN_GE_READ(TITAN_GE_INTR_XDMA_CORE_A);
1483 } while (status & 0x30300);
1485 /* If we are here, then no more interrupts to process */
1486 goto done;
1488 not_done:
1489 *budget -= work_done;
1490 netdev->quota -= work_done;
1491 return 1;
1493 oom:
1494 printk(KERN_ERR "OOM \n");
1495 netif_rx_complete(netdev);
1496 return 0;
1498 done:
1500 * No more packets on the poll list. Turn the interrupts
1501 * back on and we should be able to catch the new
1502 * packets in the interrupt handler
1504 if (!work_done)
1505 work_done = 1;
1507 *budget -= work_done;
1508 netdev->quota -= work_done;
1510 spin_lock_irqsave(&titan_ge_eth->lock, flags);
1512 /* Remove us from the poll list */
1513 netif_rx_complete(netdev);
1515 /* Re-enable interrupts */
1516 titan_ge_enable_int(port_num, titan_ge_eth, netdev);
1518 spin_unlock_irqrestore(&titan_ge_eth->lock, flags);
1520 return 0;
1524 * Close the network device
1526 int titan_ge_stop(struct net_device *netdev)
1528 titan_ge_port_info *titan_ge_eth = netdev_priv(netdev);
1530 spin_lock_irq(&(titan_ge_eth->lock));
1531 titan_ge_eth_stop(netdev);
1532 free_irq(netdev->irq, netdev);
1533 spin_unlock_irq(&titan_ge_eth->lock);
1535 return TITAN_OK;
1539 * Free the Tx ring
1541 static void titan_ge_free_tx_rings(struct net_device *netdev)
1543 titan_ge_port_info *titan_ge_eth = netdev_priv(netdev);
1544 unsigned int port_num = titan_ge_eth->port_num;
1545 unsigned int curr;
1546 unsigned long reg_data;
1548 /* Stop the Tx DMA */
1549 reg_data = TITAN_GE_READ(TITAN_GE_CHANNEL0_CONFIG +
1550 (port_num << 8));
1551 reg_data |= 0xc0000000;
1552 TITAN_GE_WRITE((TITAN_GE_CHANNEL0_CONFIG +
1553 (port_num << 8)), reg_data);
1555 /* Disable the TMAC */
1556 reg_data = TITAN_GE_READ(TITAN_GE_TMAC_CONFIG_1 +
1557 (port_num << 12));
1558 reg_data &= ~(0x00000001);
1559 TITAN_GE_WRITE((TITAN_GE_TMAC_CONFIG_1 +
1560 (port_num << 12)), reg_data);
1562 for (curr = 0;
1563 (titan_ge_eth->tx_ring_skbs) && (curr < TITAN_GE_TX_QUEUE);
1564 curr++) {
1565 if (titan_ge_eth->tx_skb[curr]) {
1566 dev_kfree_skb(titan_ge_eth->tx_skb[curr]);
1567 titan_ge_eth->tx_ring_skbs--;
1571 if (titan_ge_eth->tx_ring_skbs != 0)
1572 printk
1573 ("%s: Error on Tx descriptor free - could not free %d"
1574 " descriptors\n", netdev->name,
1575 titan_ge_eth->tx_ring_skbs);
1577 #ifndef TITAN_RX_RING_IN_SRAM
1578 dma_free_coherent(&titan_ge_device[port_num]->dev,
1579 titan_ge_eth->tx_desc_area_size,
1580 (void *) titan_ge_eth->tx_desc_area,
1581 titan_ge_eth->tx_dma);
1582 #endif
1586 * Free the Rx ring
1588 static void titan_ge_free_rx_rings(struct net_device *netdev)
1590 titan_ge_port_info *titan_ge_eth = netdev_priv(netdev);
1591 unsigned int port_num = titan_ge_eth->port_num;
1592 unsigned int curr;
1593 unsigned long reg_data;
1595 /* Stop the Rx DMA */
1596 reg_data = TITAN_GE_READ(TITAN_GE_CHANNEL0_CONFIG +
1597 (port_num << 8));
1598 reg_data |= 0x000c0000;
1599 TITAN_GE_WRITE((TITAN_GE_CHANNEL0_CONFIG +
1600 (port_num << 8)), reg_data);
1602 /* Disable the RMAC */
1603 reg_data = TITAN_GE_READ(TITAN_GE_RMAC_CONFIG_1 +
1604 (port_num << 12));
1605 reg_data &= ~(0x00000001);
1606 TITAN_GE_WRITE((TITAN_GE_RMAC_CONFIG_1 +
1607 (port_num << 12)), reg_data);
1609 for (curr = 0;
1610 titan_ge_eth->rx_ring_skbs && (curr < TITAN_GE_RX_QUEUE);
1611 curr++) {
1612 if (titan_ge_eth->rx_skb[curr]) {
1613 dev_kfree_skb(titan_ge_eth->rx_skb[curr]);
1614 titan_ge_eth->rx_ring_skbs--;
1618 if (titan_ge_eth->rx_ring_skbs != 0)
1619 printk(KERN_ERR
1620 "%s: Error in freeing Rx Ring. %d skb's still"
1621 " stuck in RX Ring - ignoring them\n", netdev->name,
1622 titan_ge_eth->rx_ring_skbs);
1624 #ifndef TITAN_RX_RING_IN_SRAM
1625 dma_free_coherent(&titan_ge_device[port_num]->dev,
1626 titan_ge_eth->rx_desc_area_size,
1627 (void *) titan_ge_eth->rx_desc_area,
1628 titan_ge_eth->rx_dma);
1629 #endif
1633 * Actually does the stop of the Ethernet device
1635 static void titan_ge_eth_stop(struct net_device *netdev)
1637 titan_ge_port_info *titan_ge_eth = netdev_priv(netdev);
1639 netif_stop_queue(netdev);
1641 titan_ge_port_reset(titan_ge_eth->port_num);
1643 titan_ge_free_tx_rings(netdev);
1644 titan_ge_free_rx_rings(netdev);
1646 /* Disable the Tx and Rx Interrupts for all channels */
1647 TITAN_GE_WRITE(TITAN_GE_INTR_XDMA_IE, 0x0);
1651 * Update the MAC address. Note that we have to write the
1652 * address in three station registers, 16 bits each. And this
1653 * has to be done for TMAC and RMAC
1655 static void titan_ge_update_mac_address(struct net_device *netdev)
1657 titan_ge_port_info *titan_ge_eth = netdev_priv(netdev);
1658 unsigned int port_num = titan_ge_eth->port_num;
1659 u8 p_addr[6];
1661 memcpy(titan_ge_eth->port_mac_addr, netdev->dev_addr, 6);
1662 memcpy(p_addr, netdev->dev_addr, 6);
1664 /* Update the Address Filtering Match tables */
1665 titan_ge_update_afx(titan_ge_eth);
1667 printk("Station MAC : %d %d %d %d %d %d \n",
1668 p_addr[5], p_addr[4], p_addr[3],
1669 p_addr[2], p_addr[1], p_addr[0]);
1671 /* Set the MAC address here for TMAC and RMAC */
1672 TITAN_GE_WRITE((TITAN_GE_TMAC_STATION_HI + (port_num << 12)),
1673 ((p_addr[5] << 8) | p_addr[4]));
1674 TITAN_GE_WRITE((TITAN_GE_TMAC_STATION_MID + (port_num << 12)),
1675 ((p_addr[3] << 8) | p_addr[2]));
1676 TITAN_GE_WRITE((TITAN_GE_TMAC_STATION_LOW + (port_num << 12)),
1677 ((p_addr[1] << 8) | p_addr[0]));
1679 TITAN_GE_WRITE((TITAN_GE_RMAC_STATION_HI + (port_num << 12)),
1680 ((p_addr[5] << 8) | p_addr[4]));
1681 TITAN_GE_WRITE((TITAN_GE_RMAC_STATION_MID + (port_num << 12)),
1682 ((p_addr[3] << 8) | p_addr[2]));
1683 TITAN_GE_WRITE((TITAN_GE_RMAC_STATION_LOW + (port_num << 12)),
1684 ((p_addr[1] << 8) | p_addr[0]));
1688 * Set the MAC address of the Ethernet device
1690 static int titan_ge_set_mac_address(struct net_device *dev, void *addr)
1692 titan_ge_port_info *tp = netdev_priv(dev);
1693 struct sockaddr *sa = addr;
1695 memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
1697 spin_lock_irq(&tp->lock);
1698 titan_ge_update_mac_address(dev);
1699 spin_unlock_irq(&tp->lock);
1701 return 0;
1705 * Get the Ethernet device stats
1707 static struct net_device_stats *titan_ge_get_stats(struct net_device *netdev)
1709 titan_ge_port_info *titan_ge_eth = netdev_priv(netdev);
1711 return &titan_ge_eth->stats;
1715 * Initialize the Rx descriptor ring for the Titan Ge
1717 static int titan_ge_init_rx_desc_ring(titan_ge_port_info * titan_eth_port,
1718 int rx_desc_num,
1719 int rx_buff_size,
1720 unsigned long rx_desc_base_addr,
1721 unsigned long rx_buff_base_addr,
1722 unsigned long rx_dma)
1724 volatile titan_ge_rx_desc *rx_desc;
1725 unsigned long buffer_addr;
1726 int index;
1727 unsigned long titan_ge_rx_desc_bus = rx_dma;
1729 buffer_addr = rx_buff_base_addr;
1730 rx_desc = (titan_ge_rx_desc *) rx_desc_base_addr;
1732 /* Check alignment */
1733 if (rx_buff_base_addr & 0xF)
1734 return 0;
1736 /* Check Rx buffer size */
1737 if ((rx_buff_size < 8) || (rx_buff_size > TITAN_GE_MAX_RX_BUFFER))
1738 return 0;
1740 /* 64-bit alignment
1741 if ((rx_buff_base_addr + rx_buff_size) & 0x7)
1742 return 0; */
1744 /* Initialize the Rx desc ring */
1745 for (index = 0; index < rx_desc_num; index++) {
1746 titan_ge_rx_desc_bus += sizeof(titan_ge_rx_desc);
1747 rx_desc[index].cmd_sts = 0;
1748 rx_desc[index].buffer_addr = buffer_addr;
1749 titan_eth_port->rx_skb[index] = NULL;
1750 buffer_addr += rx_buff_size;
1753 titan_eth_port->rx_curr_desc_q = 0;
1754 titan_eth_port->rx_used_desc_q = 0;
1756 titan_eth_port->rx_desc_area = (titan_ge_rx_desc *) rx_desc_base_addr;
1757 titan_eth_port->rx_desc_area_size =
1758 rx_desc_num * sizeof(titan_ge_rx_desc);
1760 titan_eth_port->rx_dma = rx_dma;
1762 return TITAN_OK;
1766 * Initialize the Tx descriptor ring. Descriptors in the SRAM
1768 static int titan_ge_init_tx_desc_ring(titan_ge_port_info * titan_ge_port,
1769 int tx_desc_num,
1770 unsigned long tx_desc_base_addr,
1771 unsigned long tx_dma)
1773 titan_ge_tx_desc *tx_desc;
1774 int index;
1775 unsigned long titan_ge_tx_desc_bus = tx_dma;
1777 if (tx_desc_base_addr & 0xF)
1778 return 0;
1780 tx_desc = (titan_ge_tx_desc *) tx_desc_base_addr;
1782 for (index = 0; index < tx_desc_num; index++) {
1783 titan_ge_port->tx_dma_array[index] =
1784 (dma_addr_t) titan_ge_tx_desc_bus;
1785 titan_ge_tx_desc_bus += sizeof(titan_ge_tx_desc);
1786 tx_desc[index].cmd_sts = 0x0000;
1787 tx_desc[index].buffer_len = 0;
1788 tx_desc[index].buffer_addr = 0x00000000;
1789 titan_ge_port->tx_skb[index] = NULL;
1792 titan_ge_port->tx_curr_desc_q = 0;
1793 titan_ge_port->tx_used_desc_q = 0;
1795 titan_ge_port->tx_desc_area = (titan_ge_tx_desc *) tx_desc_base_addr;
1796 titan_ge_port->tx_desc_area_size =
1797 tx_desc_num * sizeof(titan_ge_tx_desc);
1799 titan_ge_port->tx_dma = tx_dma;
1800 return TITAN_OK;
1804 * Initialize the device as an Ethernet device
1806 static int __init titan_ge_probe(struct device *device)
1808 titan_ge_port_info *titan_ge_eth;
1809 struct net_device *netdev;
1810 int port = to_platform_device(device)->id;
1811 int err;
1813 netdev = alloc_etherdev(sizeof(titan_ge_port_info));
1814 if (!netdev) {
1815 err = -ENODEV;
1816 goto out;
1819 netdev->open = titan_ge_open;
1820 netdev->stop = titan_ge_stop;
1821 netdev->hard_start_xmit = titan_ge_start_xmit;
1822 netdev->get_stats = titan_ge_get_stats;
1823 netdev->set_multicast_list = titan_ge_set_multi;
1824 netdev->set_mac_address = titan_ge_set_mac_address;
1826 /* Tx timeout */
1827 netdev->tx_timeout = titan_ge_tx_timeout;
1828 netdev->watchdog_timeo = 2 * HZ;
1830 /* Set these to very high values */
1831 netdev->poll = titan_ge_poll;
1832 netdev->weight = 64;
1834 netdev->tx_queue_len = TITAN_GE_TX_QUEUE;
1835 netif_carrier_off(netdev);
1836 netdev->base_addr = 0;
1838 netdev->change_mtu = titan_ge_change_mtu;
1840 titan_ge_eth = netdev_priv(netdev);
1841 /* Allocation of memory for the driver structures */
1843 titan_ge_eth->port_num = port;
1845 /* Configure the Tx timeout handler */
1846 INIT_WORK(&titan_ge_eth->tx_timeout_task,
1847 (void (*)(void *)) titan_ge_tx_timeout_task, netdev);
1849 spin_lock_init(&titan_ge_eth->lock);
1851 /* set MAC addresses */
1852 memcpy(netdev->dev_addr, titan_ge_mac_addr_base, 6);
1853 netdev->dev_addr[5] += port;
1855 err = register_netdev(netdev);
1857 if (err)
1858 goto out_free_netdev;
1860 printk(KERN_NOTICE
1861 "%s: port %d with MAC address %02x:%02x:%02x:%02x:%02x:%02x\n",
1862 netdev->name, port, netdev->dev_addr[0],
1863 netdev->dev_addr[1], netdev->dev_addr[2],
1864 netdev->dev_addr[3], netdev->dev_addr[4],
1865 netdev->dev_addr[5]);
1867 printk(KERN_NOTICE "Rx NAPI supported, Tx Coalescing ON \n");
1869 return 0;
1871 out_free_netdev:
1872 kfree(netdev);
1874 out:
1875 return err;
1878 static void __devexit titan_device_remove(struct device *device)
1883 * Reset the Ethernet port
1885 static void titan_ge_port_reset(unsigned int port_num)
1887 unsigned int reg_data;
1889 /* Stop the Tx port activity */
1890 reg_data = TITAN_GE_READ(TITAN_GE_TMAC_CONFIG_1 +
1891 (port_num << 12));
1892 reg_data &= ~(0x0001);
1893 TITAN_GE_WRITE((TITAN_GE_TMAC_CONFIG_1 +
1894 (port_num << 12)), reg_data);
1896 /* Stop the Rx port activity */
1897 reg_data = TITAN_GE_READ(TITAN_GE_RMAC_CONFIG_1 +
1898 (port_num << 12));
1899 reg_data &= ~(0x0001);
1900 TITAN_GE_WRITE((TITAN_GE_RMAC_CONFIG_1 +
1901 (port_num << 12)), reg_data);
1903 return;
1907 * Return the Tx desc after use by the XDMA
1909 static int titan_ge_return_tx_desc(titan_ge_port_info * titan_ge_eth, int port)
1911 int tx_desc_used;
1912 struct sk_buff *skb;
1914 tx_desc_used = titan_ge_eth->tx_used_desc_q;
1916 /* return right away */
1917 if (tx_desc_used == titan_ge_eth->tx_curr_desc_q)
1918 return TITAN_ERROR;
1920 /* Now the critical stuff */
1921 skb = titan_ge_eth->tx_skb[tx_desc_used];
1923 dev_kfree_skb_any(skb);
1925 titan_ge_eth->tx_skb[tx_desc_used] = NULL;
1926 titan_ge_eth->tx_used_desc_q =
1927 (tx_desc_used + 1) % TITAN_GE_TX_QUEUE;
1929 return 0;
1933 * Coalescing for the Tx path
1935 static unsigned long titan_ge_tx_coal(unsigned long delay, int port)
1937 unsigned long rx_delay;
1939 rx_delay = TITAN_GE_READ(TITAN_GE_INT_COALESCING);
1940 delay = (delay << 16) | rx_delay;
1942 TITAN_GE_WRITE(TITAN_GE_INT_COALESCING, delay);
1943 TITAN_GE_WRITE(0x5038, delay);
1945 return delay;
1948 static struct device_driver titan_soc_driver = {
1949 .name = titan_string,
1950 .bus = &platform_bus_type,
1951 .probe = titan_ge_probe,
1952 .remove = __devexit_p(titan_device_remove),
1955 static void titan_platform_release (struct device *device)
1957 struct platform_device *pldev;
1959 /* free device */
1960 pldev = to_platform_device (device);
1961 kfree (pldev);
1965 * Register the Titan GE with the kernel
1967 static int __init titan_ge_init_module(void)
1969 struct platform_device *pldev;
1970 unsigned int version, device;
1971 int i;
1973 printk(KERN_NOTICE
1974 "PMC-Sierra TITAN 10/100/1000 Ethernet Driver \n");
1976 titan_ge_base = (unsigned long) ioremap(TITAN_GE_BASE, TITAN_GE_SIZE);
1977 if (!titan_ge_base) {
1978 printk("Mapping Titan GE failed\n");
1979 goto out;
1982 device = TITAN_GE_READ(TITAN_GE_DEVICE_ID);
1983 version = (device & 0x000f0000) >> 16;
1984 device &= 0x0000ffff;
1986 printk(KERN_NOTICE "Device Id : %x, Version : %x \n", device, version);
1988 #ifdef TITAN_RX_RING_IN_SRAM
1989 titan_ge_sram = (unsigned long) ioremap(TITAN_SRAM_BASE,
1990 TITAN_SRAM_SIZE);
1991 if (!titan_ge_sram) {
1992 printk("Mapping Titan SRAM failed\n");
1993 goto out_unmap_ge;
1995 #endif
1997 if (driver_register(&titan_soc_driver)) {
1998 printk(KERN_ERR "Driver registration failed\n");
1999 goto out_unmap_sram;
2002 for (i = 0; i < 3; i++) {
2003 titan_ge_device[i] = NULL;
2005 if (!(pldev = kmalloc (sizeof (*pldev), GFP_KERNEL)))
2006 continue;
2008 memset (pldev, 0, sizeof (*pldev));
2009 pldev->name = titan_string;
2010 pldev->id = i;
2011 pldev->dev.release = titan_platform_release;
2012 titan_ge_device[i] = pldev;
2014 if (platform_device_register (pldev)) {
2015 kfree (pldev);
2016 titan_ge_device[i] = NULL;
2017 continue;
2020 if (!pldev->dev.driver) {
2022 * The driver was not bound to this device, there was
2023 * no hardware at this address. Unregister it, as the
2024 * release fuction will take care of freeing the
2025 * allocated structure
2027 titan_ge_device[i] = NULL;
2028 platform_device_unregister (pldev);
2032 return 0;
2034 out_unmap_sram:
2035 iounmap((void *)titan_ge_sram);
2037 out_unmap_ge:
2038 iounmap((void *)titan_ge_base);
2040 out:
2041 return -ENOMEM;
2045 * Unregister the Titan GE from the kernel
2047 static void __exit titan_ge_cleanup_module(void)
2049 int i;
2051 driver_unregister(&titan_soc_driver);
2053 for (i = 0; i < 3; i++) {
2054 if (titan_ge_device[i]) {
2055 platform_device_unregister (titan_ge_device[i]);
2056 titan_ge_device[i] = NULL;
2060 iounmap((void *)titan_ge_sram);
2061 iounmap((void *)titan_ge_base);
2064 MODULE_AUTHOR("Manish Lachwani <lachwani@pmc-sierra.com>");
2065 MODULE_DESCRIPTION("Titan GE Ethernet driver");
2066 MODULE_LICENSE("GPL");
2068 module_init(titan_ge_init_module);
2069 module_exit(titan_ge_cleanup_module);