MOXA linux-2.6.x / linux-2.6.19-uc1 from UC-7110-LX-BOOTLOADER-1.9_VERSION-4.2.tgz
[linux-2.6.19-moxart.git] / drivers / net / arm / eth_s3c4510b.c
blobed425665949a48286ccd934fe7d989bd7185599b
1 /*
2 * linux/drivers/net/arm/eth_s3c4510b.c
4 * Copyright (c) 2004 Cucy Systems (http://www.cucy.com)
5 * Curt Brune <curt@cucy.com>
7 * Re-written from scratch for 2.6.x after studying the original 2.4.x
8 * driver by Mac Wang.
10 * Copyright (C) 2002 Mac Wang <mac@os.nctu.edu.tw>
14 #include <linux/module.h>
15 #include <linux/init.h>
17 #include <linux/interrupt.h>
18 #include <linux/skbuff.h>
19 #include <linux/netdevice.h>
20 #include <linux/etherdevice.h>
22 #include <asm/irq.h>
23 #include <asm/arch/hardware.h>
24 #include "eth_s3c4510b.h"
26 #define __DRIVER_NAME "Samsung S3C4510B Ethernet Driver version 0.2 (2004-06-13) <curt@cucy.com>"
28 #define _SDEBUG
29 #ifdef _SDEBUG
30 # define _DPRINTK(format, args...) \
31 printk (KERN_INFO "%s():%05d "format".\n" , __FUNCTION__ , __LINE__ , ## args);
32 #else
33 # define _DPRINTK(format, args...)
34 #endif
36 #define _EPRINTK(format, args...) \
37 printk (KERN_ERR "%s():%05d "format".\n" , __FUNCTION__ , __LINE__ , ## args);
39 struct eth_priv {
41 /* Frame Descriptors */
42 TX_FrameDesc m_txFDbase[ETH_NTxFrames]; /* array of TX frame descriptors */
43 RX_FrameDesc m_rxFDbase[ETH_NRxFrames]; /* array of RX frame descriptors */
44 volatile TX_FrameDesc *m_curTX_FD; /* current TX FD to queue */
45 volatile TX_FrameDesc *m_oldTX_FD; /* oldest TX FD queued, but not transmitted */
46 volatile RX_FrameDesc *m_curRX_FD; /* current RX FD to receive */
48 struct net_device_stats stats;
49 spinlock_t lock;
52 /* This struct must be 16 byte aligned */
53 struct skb_priv {
54 volatile RX_FrameDesc *m_RxFD;
55 struct net_device *m_dev;
56 u32 m_pad[2];
59 static s32 __skb_head_offset;
61 /**
62 ** Avoid memcpy in RX handler by pre-allocating the socket buffers
63 **/
65 // static void __skb_destruct( struct sk_buff *skb);
66 static void __skb_prepare( struct net_device *dev, volatile RX_FrameDesc *pRxFD)
68 struct sk_buff *skb;
70 skb = dev_alloc_skb( sizeof(ETHFrame) + 16 + 2);
71 if ( unlikely(!skb)) {
72 _EPRINTK(" unable to allocate skb...");
75 // _DPRINTK("allocate skb: 0x%08x", (u32)skb);
77 skb->dev = dev;
79 /* attach skb to FD */
80 pRxFD->skb = skb;
81 pRxFD->m_frameDataPtr.bf.dataPtr = (u32)skb->data | CACHE_DISABLE_MASK;
82 pRxFD->m_frameDataPtr.bf.owner = 0x1; /* BDMA owner */
86 static s32 RxFDinit( struct net_device *dev) {
88 struct eth_priv *priv = (struct eth_priv *) dev->priv;
89 s32 i;
90 volatile RX_FrameDesc *rxFDbase;
91 struct sk_buff *skb;
93 /* determine skb initial headroom for later use in the skb destructor */
94 skb = dev_alloc_skb(256);
95 __skb_head_offset = skb_headroom( skb);
96 dev_kfree_skb( skb);
98 /* store start of Rx descriptors and set current */
99 rxFDbase = priv->m_curRX_FD =
100 (RX_FrameDesc *)((u32)priv->m_rxFDbase | CACHE_DISABLE_MASK);
101 for ( i = 0; i < ETH_NRxFrames; i++) {
102 __skb_prepare( dev, &rxFDbase[i]);
103 priv->m_rxFDbase[i].m_reserved = 0x0;
104 priv->m_rxFDbase[i].m_status.ui = 0x0;
105 priv->m_rxFDbase[i].m_nextFD = &rxFDbase[i+1];
106 // _DPRINTK("rxFDbase[%d]: 0x%08x", i, (u32)&rxFDbase[i]);
109 /* make the list circular */
110 priv->m_rxFDbase[i-1].m_nextFD = &rxFDbase[0];
112 outl( (unsigned int)rxFDbase, REG_BDMARXPTR);
114 return 0;
117 static s32 TxFDinit( struct net_device *dev) {
119 struct eth_priv *priv = (struct eth_priv *) dev->priv;
120 s32 i;
121 volatile TX_FrameDesc *txFDbase;
123 /* store start of Tx descriptors and set current */
124 txFDbase = priv->m_curTX_FD = priv->m_oldTX_FD =
125 (TX_FrameDesc *) ((u32)priv->m_txFDbase | CACHE_DISABLE_MASK);
127 for ( i = 0; i < ETH_NTxFrames; i++) {
128 priv->m_txFDbase[i].m_frameDataPtr.ui = 0x0; /* CPU owner */
129 priv->m_txFDbase[i].m_opt.ui = 0x0;
130 priv->m_txFDbase[i].m_status.ui = 0x0;
131 priv->m_txFDbase[i].m_nextFD = &txFDbase[i+1];
132 // _DPRINTK("txFDbase[%d]: 0x%08x", i, (u32)&txFDbase[i]);
135 /* make the list circular */
136 priv->m_txFDbase[i-1].m_nextFD = &txFDbase[0];
138 outl( (unsigned int)txFDbase, REG_BDMATXPTR);
140 return 0;
143 static irqreturn_t __s3c4510b_rx_int(int irq, void *dev_id, struct pt_regs *regs)
145 struct sk_buff *skb;
146 struct net_device *dev = (struct net_device *) dev_id;
147 struct eth_priv *priv = (struct eth_priv *) dev->priv;
148 volatile RX_FrameDesc *pRxFD;
149 volatile RX_FrameDesc *cRxFD;
151 spin_lock(&priv->lock);
153 LED_SET(4);
155 pRxFD = priv->m_curRX_FD;
156 cRxFD = (RX_FrameDesc *)inl(REG_BDMARXPTR);
158 /* clear received frame bit */
159 outl( ETH_S_BRxRDF, REG_BDMASTAT);
161 do {
162 if ( likely( pRxFD->m_status.bf.good)) {
163 skb = pRxFD->skb;
165 __skb_prepare( dev, pRxFD);
167 /* reserve two words used by protocol layers */
168 skb_reserve(skb, 2);
169 skb_put(skb, pRxFD->m_status.bf.len);
170 skb->protocol = eth_type_trans(skb, dev);
171 priv->stats.rx_packets++;
172 priv->stats.rx_bytes += pRxFD->m_status.bf.len;
173 netif_rx(skb);
175 else {
176 priv->stats.rx_errors++;
177 if( pRxFD->m_status.bf.overFlow)
178 priv->stats.rx_fifo_errors++;
179 if( pRxFD->m_status.bf.overMax)
180 priv->stats.rx_length_errors++;
181 if( pRxFD->m_status.bf.crcErr)
182 priv->stats.rx_crc_errors++;
183 if( pRxFD->m_status.bf.longErr)
184 priv->stats.rx_length_errors++;
185 if( pRxFD->m_status.bf.alignErr)
186 priv->stats.rx_frame_errors++;
188 ** No good category for these errors
189 if( pRxFD->m_status.bf.parityErr)
190 **/
194 /* set owner back to CPU */
195 pRxFD->m_frameDataPtr.bf.owner = 1;
196 /* clear status */
197 pRxFD->m_status.ui = 0x0;
198 /* advance to next descriptor */
199 pRxFD = pRxFD->m_nextFD;
201 } while ( pRxFD != cRxFD);
203 priv->m_curRX_FD = pRxFD;
205 LED_CLR(4);
207 spin_unlock(&priv->lock);
209 return IRQ_HANDLED;
213 static irqreturn_t __s3c4510b_tx_int(int irq, void *dev_id, struct pt_regs *regs)
215 struct net_device *dev = (struct net_device *) dev_id;
216 struct eth_priv *priv = (struct eth_priv *) dev->priv;
217 volatile TX_FrameDesc *pTxFD;
218 volatile TX_FrameDesc *cTxFD;
220 spin_lock(&priv->lock);
222 pTxFD = priv->m_oldTX_FD;
223 cTxFD = (TX_FrameDesc *)inl(REG_BDMATXPTR);
225 while ( pTxFD != cTxFD) {
227 if ( likely(pTxFD->m_status.bf.complete)) {
228 priv->stats.tx_packets++;
230 if( pTxFD->m_status.bf.exColl) {
231 _EPRINTK("TX collision detected");
232 priv->stats.tx_errors++;
233 priv->stats.collisions++;
235 if( pTxFD->m_status.bf.underRun) {
236 _EPRINTK("TX Underrun detected");
237 priv->stats.tx_errors++;
238 priv->stats.tx_fifo_errors++;
240 if( pTxFD->m_status.bf.noCarrier) {
241 _EPRINTK("TX no carrier detected");
242 priv->stats.tx_errors++;
243 priv->stats.tx_carrier_errors++;
245 if( pTxFD->m_status.bf.lateColl) {
246 _EPRINTK("TX late collision detected");
247 priv->stats.tx_errors++;
248 priv->stats.tx_window_errors++;
250 if( pTxFD->m_status.bf.parityErr) {
251 _EPRINTK("TX parity error detected");
252 priv->stats.tx_errors++;
253 priv->stats.tx_aborted_errors++;
256 dev_kfree_skb_irq( pTxFD->skb);
257 pTxFD = pTxFD->m_nextFD;
260 priv->m_oldTX_FD = pTxFD;
262 LED_CLR(3);
264 spin_unlock(&priv->lock);
266 return IRQ_HANDLED;
270 static int __s3c4510b_start_xmit(struct sk_buff *skb, struct net_device *dev)
272 int len;
273 u32 addr;
274 struct eth_priv *priv = (struct eth_priv *) dev->priv;
276 // _DPRINTK("entered with dev = 0x%08x", (unsigned int)dev);
278 len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
279 dev->trans_start = jiffies;
281 if ( unlikely( priv->m_curTX_FD->m_frameDataPtr.bf.owner)) {
282 _EPRINTK("Ethernet TX Frame. CPU not owner");
283 return -EBUSY;
286 /* this needs to be word aligned for the BDMA -- round down */
287 addr = ((u32)skb->data & ~0x3) | CACHE_DISABLE_MASK;
288 priv->m_curTX_FD->m_frameDataPtr.bf.dataPtr = addr;
290 /* Set TX Frame flags */
291 priv->m_curTX_FD->m_opt.bf.widgetAlign = (u32)skb->data - addr; /* compenstate for alignment */
292 priv->m_curTX_FD->m_opt.bf.frameDataDir = 1;
293 priv->m_curTX_FD->m_opt.bf.littleEndian = 1;
294 priv->m_curTX_FD->m_opt.bf.macTxIrqEnbl = 1;
295 priv->m_curTX_FD->m_opt.bf.no_crc = 0;
296 priv->m_curTX_FD->m_opt.bf.no_padding = 0;
298 /* Set TX Frame length */
299 priv->m_curTX_FD->m_status.bf.len = len;
301 priv->m_curTX_FD->skb = skb;
303 /* Change ownership to BDMA */
304 priv->m_curTX_FD->m_frameDataPtr.bf.owner = 1;
306 /* Change the Tx frame descriptor for next use */
307 priv->m_curTX_FD = priv->m_curTX_FD->m_nextFD;
309 LED_SET(3);
311 /* Enable MAC and BDMA Tx control register */
312 outl( ETH_BTxBRST | /* BDMA Tx burst size 16 words */
313 ETH_BTxMSL110 | /* BDMA Tx wait to fill 6/8 of the BDMA */
314 ETH_BTxSTSKO | /* BDMA Tx interrupt(Stop) on non-owner TX FD */
315 ETH_BTxEn, /* BDMA Tx Enable */
316 REG_BDMATXCON);
318 outl( ETH_EnComp | /* interrupt when the MAC transmits or discards packet */
319 ETH_TxEn | /* MAC transmit enable */
320 ETH_EnUnder | /* interrupt on Underrun */
321 ETH_EnNCarr | /* interrupt on No Carrier */
322 ETH_EnExColl | /* interrupt if 16 collision occur */
323 ETH_EnLateColl | /* interrupt if collision occurs after 512 bit times(64 bytes times) */
324 ETH_EnTxPar, /* interrupt if the MAC transmit FIFO has a parity error */
325 REG_MACTXCON);
327 return 0;
331 static struct irqaction __rx_irqaction = {
332 name: "eth_rx",
333 flags: SA_INTERRUPT,
334 handler: __s3c4510b_rx_int,
337 static struct irqaction __tx_irqaction = {
338 name: "eth_tx",
339 flags: SA_INTERRUPT,
340 handler: __s3c4510b_tx_int,
343 static int __s3c4510b_open(struct net_device *dev)
345 unsigned long status;
347 /* Disable interrupts */
348 INT_DISABLE(INT_BDMARX);
349 INT_DISABLE(INT_MACTX);
352 ** install RX ISR
354 __rx_irqaction.dev_id = (void *)dev;
355 status = setup_irq( INT_BDMARX, &__rx_irqaction);
356 if ( unlikely(status)) {
357 printk( KERN_ERR "Unabled to hook irq %d for ethernet RX\n", INT_BDMARX);
358 return status;
362 ** install TX ISR
364 __tx_irqaction.dev_id = (void *)dev;
365 status = setup_irq( INT_MACTX, &__tx_irqaction);
366 if ( unlikely(status)) {
367 printk( KERN_ERR "Unabled to hook irq %d for ethernet TX\n", INT_MACTX);
368 return status;
371 /* setup DBMA and MAC */
372 outl( ETH_BRxRS, REG_BDMARXCON); /* reset BDMA RX machine */
373 outl( ETH_BTxRS, REG_BDMATXCON); /* reset BDMA TX machine */
374 outl( ETH_SwReset, REG_MACCON); /* reset MAC machine */
375 outl( sizeof( ETHFrame), REG_BDMARXLSZ);
376 outl( ETH_FullDup, REG_MACCON); /* enable full duplex */
378 /* init frame descriptors */
379 TxFDinit( dev);
380 RxFDinit( dev);
382 outl( (dev->dev_addr[0] << 24) |
383 (dev->dev_addr[1] << 16) |
384 (dev->dev_addr[2] << 8) |
385 (dev->dev_addr[3]) , REG_CAM_BASE);
386 outl( (dev->dev_addr[4] << 24) |
387 (dev->dev_addr[5] << 16) , REG_CAM_BASE + 4);
389 outl( 0x0001, REG_CAMEN);
390 outl( ETH_CompEn | /* enable compare mode (check against the CAM) */
391 ETH_BroadAcc, /* accept broadcast packetes */
392 REG_CAMCON);
394 INT_ENABLE(INT_BDMARX);
395 INT_ENABLE(INT_MACTX);
397 /* enable RX machinery */
398 outl( ETH_BRxBRST | /* BDMA Rx Burst Size 16 words */
399 ETH_BRxSTSKO | /* BDMA Rx interrupt(Stop) on non-owner RX FD */
400 ETH_BRxMAINC | /* BDMA Rx Memory Address increment */
401 ETH_BRxDIE | /* BDMA Rx Every Received Frame Interrupt Enable */
402 ETH_BRxNLIE | /* BDMA Rx NULL List Interrupt Enable */
403 ETH_BRxNOIE | /* BDMA Rx Not Owner Interrupt Enable */
404 ETH_BRxLittle | /* BDMA Rx Little endian */
405 ETH_BRxWA10 | /* BDMA Rx Word Alignment- two invalid bytes */
406 ETH_BRxEn, /* BDMA Rx Enable */
407 REG_BDMARXCON);
409 outl( ETH_RxEn | /* enable MAC RX */
410 ETH_StripCRC | /* check and strip CRC */
411 ETH_EnCRCErr | /* interrupt on CRC error */
412 ETH_EnOver | /* interrupt on overflow error */
413 ETH_EnLongErr | /* interrupt on long frame error */
414 ETH_EnRxPar, /* interrupt on MAC FIFO parity error */
415 REG_MACRXCON);
417 netif_start_queue(dev);
419 return 0;
422 static int __s3c4510b_stop(struct net_device *dev)
424 // Disable irqs
425 INT_DISABLE(INT_BDMARX);
426 INT_DISABLE(INT_MACTX);
428 outl( 0, REG_BDMATXCON);
429 outl( 0, REG_BDMARXCON);
430 outl( 0, REG_MACTXCON);
431 outl( 0, REG_MACRXCON);
433 free_irq(INT_BDMARX, dev);
434 free_irq(INT_MACTX, dev);
436 netif_stop_queue(dev);
438 return 0;
441 struct net_device_stats *__s3c4510b_get_stats(struct net_device *dev)
443 return &((struct eth_priv *)dev->priv)->stats;
447 * The init function, invoked by register_netdev()
449 static int __s3c4510b_init(struct net_device *dev)
451 ether_setup(dev);
453 /* assign net_device methods */
454 dev->open = __s3c4510b_open;
455 dev->stop = __s3c4510b_stop;
456 // dev->ioctl = __s3c4510b_ioctl;
457 dev->get_stats = __s3c4510b_get_stats;
458 // dev->tx_timeout = __s3c4510b_tx_timeout;
459 dev->hard_start_xmit = __s3c4510b_start_xmit;
461 dev->irq = INT_BDMARX;
462 dev->tx_queue_len = ETH_NTxFrames;
463 dev->dma = 0;
464 dev->watchdog_timeo = HZ;
466 /* set MAC address */
467 dev->dev_addr[0] = 0x00;
468 dev->dev_addr[1] = 0x40;
469 dev->dev_addr[2] = 0x95;
470 dev->dev_addr[3] = 0x36;
471 dev->dev_addr[4] = 0x35;
472 dev->dev_addr[5] = 0x33;
474 SET_MODULE_OWNER(dev);
476 dev->priv = kmalloc(sizeof(struct eth_priv), GFP_KERNEL);
477 if( dev->priv == NULL)
478 return -ENOMEM;
479 memset(dev->priv, 0, sizeof(struct eth_priv));
480 spin_lock_init(&((struct eth_priv *) dev->priv)->lock);
481 return 0;
484 struct net_device __s3c4510b_netdevs = {
485 init: __s3c4510b_init,
488 static int __init __s3c4510b_init_module(void)
490 int status = 0;
492 printk(KERN_INFO "%s\n", __DRIVER_NAME);
494 if( (status = register_netdev( &__s3c4510b_netdevs)))
495 printk("S3C4510 eth: Error %i registering interface %s\n", status, __s3c4510b_netdevs.name);
497 return status;
500 static void __exit __s3c4510b_cleanup(void)
502 kfree( __s3c4510b_netdevs.priv);
503 unregister_netdev( &__s3c4510b_netdevs);
504 return;
507 module_init(__s3c4510b_init_module);
508 module_exit(__s3c4510b_cleanup);
510 MODULE_DESCRIPTION("Samsung S3C4510B ethernet driver");
511 MODULE_AUTHOR("Curt Brune <curt@cucy.com>");
512 MODULE_LICENSE("GPL");