MOXA linux-2.6.x / linux-2.6.9-uc0 from sdlinux-moxaart.tgz
[linux-2.6.9-moxart.git] / drivers / net / arm / eth_s3c4510b.c
bloba8f9ed87b0c7ac616deaf131d6956b3f41fcc16c
1 /*
2 * linux/drivers/net/arm/eth_s3c4510b.c
4 * Copyright (c) 2004 Cucy Systems (http://www.cucy.com)
5 * Curt Brune <curt@cucy.com>
7 * Re-written from scratch for 2.6.x after studying the original 2.4.x
8 * driver by Mac Wang.
10 * Copyright (C) 2002 Mac Wang <mac@os.nctu.edu.tw>
14 #include <linux/config.h>
15 #include <linux/module.h>
16 #include <linux/init.h>
18 #include <linux/interrupt.h>
19 #include <linux/skbuff.h>
20 #include <linux/netdevice.h>
21 #include <linux/etherdevice.h>
23 #include <asm/irq.h>
24 #include <asm/arch/hardware.h>
25 #include "eth_s3c4510b.h"
27 #define __DRIVER_NAME "Samsung S3C4510B Ethernet Driver version 0.2 (2004-06-13) <curt@cucy.com>"
29 #define _SDEBUG
30 #ifdef _SDEBUG
31 # define _DPRINTK(format, args...) \
32 printk (KERN_INFO "%s():%05d "format".\n" , __FUNCTION__ , __LINE__ , ## args);
33 #else
34 # define _DPRINTK(format, args...)
35 #endif
37 #define _EPRINTK(format, args...) \
38 printk (KERN_ERR "%s():%05d "format".\n" , __FUNCTION__ , __LINE__ , ## args);
40 struct eth_priv {
42 /* Frame Descriptors */
43 TX_FrameDesc m_txFDbase[ETH_NTxFrames]; /* array of TX frame descriptors */
44 RX_FrameDesc m_rxFDbase[ETH_NRxFrames]; /* array of RX frame descriptors */
45 volatile TX_FrameDesc *m_curTX_FD; /* current TX FD to queue */
46 volatile TX_FrameDesc *m_oldTX_FD; /* oldest TX FD queued, but not transmitted */
47 volatile RX_FrameDesc *m_curRX_FD; /* current RX FD to receive */
49 struct net_device_stats stats;
50 spinlock_t lock;
53 /* This struct must be 16 byte aligned */
54 struct skb_priv {
55 volatile RX_FrameDesc *m_RxFD;
56 struct net_device *m_dev;
57 u32 m_pad[2];
60 static s32 __skb_head_offset;
62 /**
63 ** Avoid memcpy in RX handler by pre-allocating the socket buffers
64 **/
66 // static void __skb_destruct( struct sk_buff *skb);
67 static void __skb_prepare( struct net_device *dev, volatile RX_FrameDesc *pRxFD)
69 struct sk_buff *skb;
71 skb = dev_alloc_skb( sizeof(ETHFrame) + 16 + 2);
72 if ( unlikely(!skb)) {
73 _EPRINTK(" unable to allocate skb...");
76 // _DPRINTK("allocate skb: 0x%08x", (u32)skb);
78 skb->dev = dev;
80 /* attach skb to FD */
81 pRxFD->skb = skb;
82 pRxFD->m_frameDataPtr.bf.dataPtr = (u32)skb->data | CACHE_DISABLE_MASK;
83 pRxFD->m_frameDataPtr.bf.owner = 0x1; /* BDMA owner */
87 static s32 RxFDinit( struct net_device *dev) {
89 struct eth_priv *priv = (struct eth_priv *) dev->priv;
90 s32 i;
91 volatile RX_FrameDesc *rxFDbase;
92 struct sk_buff *skb;
94 /* determine skb initial headroom for later use in the skb destructor */
95 skb = dev_alloc_skb(256);
96 __skb_head_offset = skb_headroom( skb);
97 dev_kfree_skb( skb);
99 /* store start of Rx descriptors and set current */
100 rxFDbase = priv->m_curRX_FD =
101 (RX_FrameDesc *)((u32)priv->m_rxFDbase | CACHE_DISABLE_MASK);
102 for ( i = 0; i < ETH_NRxFrames; i++) {
103 __skb_prepare( dev, &rxFDbase[i]);
104 priv->m_rxFDbase[i].m_reserved = 0x0;
105 priv->m_rxFDbase[i].m_status.ui = 0x0;
106 priv->m_rxFDbase[i].m_nextFD = &rxFDbase[i+1];
107 // _DPRINTK("rxFDbase[%d]: 0x%08x", i, (u32)&rxFDbase[i]);
110 /* make the list circular */
111 priv->m_rxFDbase[i-1].m_nextFD = &rxFDbase[0];
113 outl( (unsigned int)rxFDbase, REG_BDMARXPTR);
115 return 0;
118 static s32 TxFDinit( struct net_device *dev) {
120 struct eth_priv *priv = (struct eth_priv *) dev->priv;
121 s32 i;
122 volatile TX_FrameDesc *txFDbase;
124 /* store start of Tx descriptors and set current */
125 txFDbase = priv->m_curTX_FD = priv->m_oldTX_FD =
126 (TX_FrameDesc *) ((u32)priv->m_txFDbase | CACHE_DISABLE_MASK);
128 for ( i = 0; i < ETH_NTxFrames; i++) {
129 priv->m_txFDbase[i].m_frameDataPtr.ui = 0x0; /* CPU owner */
130 priv->m_txFDbase[i].m_opt.ui = 0x0;
131 priv->m_txFDbase[i].m_status.ui = 0x0;
132 priv->m_txFDbase[i].m_nextFD = &txFDbase[i+1];
133 // _DPRINTK("txFDbase[%d]: 0x%08x", i, (u32)&txFDbase[i]);
136 /* make the list circular */
137 priv->m_txFDbase[i-1].m_nextFD = &txFDbase[0];
139 outl( (unsigned int)txFDbase, REG_BDMATXPTR);
141 return 0;
144 static irqreturn_t __s3c4510b_rx_int(int irq, void *dev_id, struct pt_regs *regs)
146 struct sk_buff *skb;
147 struct net_device *dev = (struct net_device *) dev_id;
148 struct eth_priv *priv = (struct eth_priv *) dev->priv;
149 volatile RX_FrameDesc *pRxFD;
150 volatile RX_FrameDesc *cRxFD;
152 spin_lock(&priv->lock);
154 LED_SET(4);
156 pRxFD = priv->m_curRX_FD;
157 cRxFD = (RX_FrameDesc *)inl(REG_BDMARXPTR);
159 /* clear received frame bit */
160 outl( ETH_S_BRxRDF, REG_BDMASTAT);
162 do {
163 if ( likely( pRxFD->m_status.bf.good)) {
164 skb = pRxFD->skb;
166 __skb_prepare( dev, pRxFD);
168 /* reserve two words used by protocol layers */
169 skb_reserve(skb, 2);
170 skb_put(skb, pRxFD->m_status.bf.len);
171 skb->protocol = eth_type_trans(skb, dev);
172 priv->stats.rx_packets++;
173 priv->stats.rx_bytes += pRxFD->m_status.bf.len;
174 netif_rx(skb);
176 else {
177 priv->stats.rx_errors++;
178 if( pRxFD->m_status.bf.overFlow)
179 priv->stats.rx_fifo_errors++;
180 if( pRxFD->m_status.bf.overMax)
181 priv->stats.rx_length_errors++;
182 if( pRxFD->m_status.bf.crcErr)
183 priv->stats.rx_crc_errors++;
184 if( pRxFD->m_status.bf.longErr)
185 priv->stats.rx_length_errors++;
186 if( pRxFD->m_status.bf.alignErr)
187 priv->stats.rx_frame_errors++;
189 ** No good category for these errors
190 if( pRxFD->m_status.bf.parityErr)
191 **/
195 /* set owner back to CPU */
196 pRxFD->m_frameDataPtr.bf.owner = 1;
197 /* clear status */
198 pRxFD->m_status.ui = 0x0;
199 /* advance to next descriptor */
200 pRxFD = pRxFD->m_nextFD;
202 } while ( pRxFD != cRxFD);
204 priv->m_curRX_FD = pRxFD;
206 LED_CLR(4);
208 spin_unlock(&priv->lock);
210 return IRQ_HANDLED;
214 static irqreturn_t __s3c4510b_tx_int(int irq, void *dev_id, struct pt_regs *regs)
216 struct net_device *dev = (struct net_device *) dev_id;
217 struct eth_priv *priv = (struct eth_priv *) dev->priv;
218 volatile TX_FrameDesc *pTxFD;
219 volatile TX_FrameDesc *cTxFD;
221 spin_lock(&priv->lock);
223 pTxFD = priv->m_oldTX_FD;
224 cTxFD = (TX_FrameDesc *)inl(REG_BDMATXPTR);
226 while ( pTxFD != cTxFD) {
228 if ( likely(pTxFD->m_status.bf.complete)) {
229 priv->stats.tx_packets++;
231 if( pTxFD->m_status.bf.exColl) {
232 _EPRINTK("TX collision detected");
233 priv->stats.tx_errors++;
234 priv->stats.collisions++;
236 if( pTxFD->m_status.bf.underRun) {
237 _EPRINTK("TX Underrun detected");
238 priv->stats.tx_errors++;
239 priv->stats.tx_fifo_errors++;
241 if( pTxFD->m_status.bf.noCarrier) {
242 _EPRINTK("TX no carrier detected");
243 priv->stats.tx_errors++;
244 priv->stats.tx_carrier_errors++;
246 if( pTxFD->m_status.bf.lateColl) {
247 _EPRINTK("TX late collision detected");
248 priv->stats.tx_errors++;
249 priv->stats.tx_window_errors++;
251 if( pTxFD->m_status.bf.parityErr) {
252 _EPRINTK("TX parity error detected");
253 priv->stats.tx_errors++;
254 priv->stats.tx_aborted_errors++;
257 dev_kfree_skb_irq( pTxFD->skb);
258 pTxFD = pTxFD->m_nextFD;
261 priv->m_oldTX_FD = pTxFD;
263 LED_CLR(3);
265 spin_unlock(&priv->lock);
267 return IRQ_HANDLED;
271 static int __s3c4510b_start_xmit(struct sk_buff *skb, struct net_device *dev)
273 int len;
274 u32 addr;
275 struct eth_priv *priv = (struct eth_priv *) dev->priv;
277 // _DPRINTK("entered with dev = 0x%08x", (unsigned int)dev);
279 len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
280 dev->trans_start = jiffies;
282 if ( unlikely( priv->m_curTX_FD->m_frameDataPtr.bf.owner)) {
283 _EPRINTK("Ethernet TX Frame. CPU not owner");
284 return -EBUSY;
287 /* this needs to be word aligned for the BDMA -- round down */
288 addr = ((u32)skb->data & ~0x3) | CACHE_DISABLE_MASK;
289 priv->m_curTX_FD->m_frameDataPtr.bf.dataPtr = addr;
291 /* Set TX Frame flags */
292 priv->m_curTX_FD->m_opt.bf.widgetAlign = (u32)skb->data - addr; /* compenstate for alignment */
293 priv->m_curTX_FD->m_opt.bf.frameDataDir = 1;
294 priv->m_curTX_FD->m_opt.bf.littleEndian = 1;
295 priv->m_curTX_FD->m_opt.bf.macTxIrqEnbl = 1;
296 priv->m_curTX_FD->m_opt.bf.no_crc = 0;
297 priv->m_curTX_FD->m_opt.bf.no_padding = 0;
299 /* Set TX Frame length */
300 priv->m_curTX_FD->m_status.bf.len = len;
302 priv->m_curTX_FD->skb = skb;
304 /* Change ownership to BDMA */
305 priv->m_curTX_FD->m_frameDataPtr.bf.owner = 1;
307 /* Change the Tx frame descriptor for next use */
308 priv->m_curTX_FD = priv->m_curTX_FD->m_nextFD;
310 LED_SET(3);
312 /* Enable MAC and BDMA Tx control register */
313 outl( ETH_BTxBRST | /* BDMA Tx burst size 16 words */
314 ETH_BTxMSL110 | /* BDMA Tx wait to fill 6/8 of the BDMA */
315 ETH_BTxSTSKO | /* BDMA Tx interrupt(Stop) on non-owner TX FD */
316 ETH_BTxEn, /* BDMA Tx Enable */
317 REG_BDMATXCON);
319 outl( ETH_EnComp | /* interrupt when the MAC transmits or discards packet */
320 ETH_TxEn | /* MAC transmit enable */
321 ETH_EnUnder | /* interrupt on Underrun */
322 ETH_EnNCarr | /* interrupt on No Carrier */
323 ETH_EnExColl | /* interrupt if 16 collision occur */
324 ETH_EnLateColl | /* interrupt if collision occurs after 512 bit times(64 bytes times) */
325 ETH_EnTxPar, /* interrupt if the MAC transmit FIFO has a parity error */
326 REG_MACTXCON);
328 return 0;
332 static struct irqaction __rx_irqaction = {
333 name: "eth_rx",
334 flags: SA_INTERRUPT,
335 handler: __s3c4510b_rx_int,
338 static struct irqaction __tx_irqaction = {
339 name: "eth_tx",
340 flags: SA_INTERRUPT,
341 handler: __s3c4510b_tx_int,
344 static int __s3c4510b_open(struct net_device *dev)
346 unsigned long status;
348 /* Disable interrupts */
349 INT_DISABLE(INT_BDMARX);
350 INT_DISABLE(INT_MACTX);
353 ** install RX ISR
355 __rx_irqaction.dev_id = (void *)dev;
356 status = setup_irq( INT_BDMARX, &__rx_irqaction);
357 if ( unlikely(status)) {
358 printk( KERN_ERR "Unabled to hook irq %d for ethernet RX\n", INT_BDMARX);
359 return status;
363 ** install TX ISR
365 __tx_irqaction.dev_id = (void *)dev;
366 status = setup_irq( INT_MACTX, &__tx_irqaction);
367 if ( unlikely(status)) {
368 printk( KERN_ERR "Unabled to hook irq %d for ethernet TX\n", INT_MACTX);
369 return status;
372 /* setup DBMA and MAC */
373 outl( ETH_BRxRS, REG_BDMARXCON); /* reset BDMA RX machine */
374 outl( ETH_BTxRS, REG_BDMATXCON); /* reset BDMA TX machine */
375 outl( ETH_SwReset, REG_MACCON); /* reset MAC machine */
376 outl( sizeof( ETHFrame), REG_BDMARXLSZ);
377 outl( ETH_FullDup, REG_MACCON); /* enable full duplex */
379 /* init frame descriptors */
380 TxFDinit( dev);
381 RxFDinit( dev);
383 outl( (dev->dev_addr[0] << 24) |
384 (dev->dev_addr[1] << 16) |
385 (dev->dev_addr[2] << 8) |
386 (dev->dev_addr[3]) , REG_CAM_BASE);
387 outl( (dev->dev_addr[4] << 24) |
388 (dev->dev_addr[5] << 16) , REG_CAM_BASE + 4);
390 outl( 0x0001, REG_CAMEN);
391 outl( ETH_CompEn | /* enable compare mode (check against the CAM) */
392 ETH_BroadAcc, /* accept broadcast packetes */
393 REG_CAMCON);
395 INT_ENABLE(INT_BDMARX);
396 INT_ENABLE(INT_MACTX);
398 /* enable RX machinery */
399 outl( ETH_BRxBRST | /* BDMA Rx Burst Size 16 words */
400 ETH_BRxSTSKO | /* BDMA Rx interrupt(Stop) on non-owner RX FD */
401 ETH_BRxMAINC | /* BDMA Rx Memory Address increment */
402 ETH_BRxDIE | /* BDMA Rx Every Received Frame Interrupt Enable */
403 ETH_BRxNLIE | /* BDMA Rx NULL List Interrupt Enable */
404 ETH_BRxNOIE | /* BDMA Rx Not Owner Interrupt Enable */
405 ETH_BRxLittle | /* BDMA Rx Little endian */
406 ETH_BRxWA10 | /* BDMA Rx Word Alignment- two invalid bytes */
407 ETH_BRxEn, /* BDMA Rx Enable */
408 REG_BDMARXCON);
410 outl( ETH_RxEn | /* enable MAC RX */
411 ETH_StripCRC | /* check and strip CRC */
412 ETH_EnCRCErr | /* interrupt on CRC error */
413 ETH_EnOver | /* interrupt on overflow error */
414 ETH_EnLongErr | /* interrupt on long frame error */
415 ETH_EnRxPar, /* interrupt on MAC FIFO parity error */
416 REG_MACRXCON);
418 netif_start_queue(dev);
420 return 0;
423 static int __s3c4510b_stop(struct net_device *dev)
425 // Disable irqs
426 INT_DISABLE(INT_BDMARX);
427 INT_DISABLE(INT_MACTX);
429 outl( 0, REG_BDMATXCON);
430 outl( 0, REG_BDMARXCON);
431 outl( 0, REG_MACTXCON);
432 outl( 0, REG_MACRXCON);
434 free_irq(INT_BDMARX, dev);
435 free_irq(INT_MACTX, dev);
437 netif_stop_queue(dev);
439 return 0;
442 struct net_device_stats *__s3c4510b_get_stats(struct net_device *dev)
444 return &((struct eth_priv *)dev->priv)->stats;
448 * The init function, invoked by register_netdev()
450 static int __s3c4510b_init(struct net_device *dev)
452 ether_setup(dev);
454 /* assign net_device methods */
455 dev->open = __s3c4510b_open;
456 dev->stop = __s3c4510b_stop;
457 // dev->ioctl = __s3c4510b_ioctl;
458 dev->get_stats = __s3c4510b_get_stats;
459 // dev->tx_timeout = __s3c4510b_tx_timeout;
460 dev->hard_start_xmit = __s3c4510b_start_xmit;
462 dev->irq = INT_BDMARX;
463 dev->tx_queue_len = ETH_NTxFrames;
464 dev->dma = 0;
465 dev->watchdog_timeo = HZ;
467 /* set MAC address */
468 dev->dev_addr[0] = 0x00;
469 dev->dev_addr[1] = 0x40;
470 dev->dev_addr[2] = 0x95;
471 dev->dev_addr[3] = 0x36;
472 dev->dev_addr[4] = 0x35;
473 dev->dev_addr[5] = 0x33;
475 SET_MODULE_OWNER(dev);
477 dev->priv = kmalloc(sizeof(struct eth_priv), GFP_KERNEL);
478 if( dev->priv == NULL)
479 return -ENOMEM;
480 memset(dev->priv, 0, sizeof(struct eth_priv));
481 spin_lock_init(&((struct eth_priv *) dev->priv)->lock);
482 return 0;
485 struct net_device __s3c4510b_netdevs = {
486 init: __s3c4510b_init,
489 static int __init __s3c4510b_init_module(void)
491 int status = 0;
493 printk(KERN_INFO "%s\n", __DRIVER_NAME);
495 if( (status = register_netdev( &__s3c4510b_netdevs)))
496 printk("S3C4510 eth: Error %i registering interface %s\n", status, __s3c4510b_netdevs.name);
498 return status;
501 static void __exit __s3c4510b_cleanup(void)
503 kfree( __s3c4510b_netdevs.priv);
504 unregister_netdev( &__s3c4510b_netdevs);
505 return;
508 module_init(__s3c4510b_init_module);
509 module_exit(__s3c4510b_cleanup);
511 MODULE_DESCRIPTION("Samsung S3C4510B ethernet driver");
512 MODULE_AUTHOR("Curt Brune <curt@cucy.com>");
513 MODULE_LICENSE("GPL");