Import 2.3.18pre1
[davej-history.git] / drivers / net / z85230.c
bloba802170cefda605300ecc8aab0c5d6b60477deab
1 /*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version
5 * 2 of the License, or (at your option) any later version.
7 * (c) Copyright 1998 Building Number Three Ltd
9 * Development of this driver was funded by Equiinet Ltd
10 * http://www.equiinet.com
12 * ChangeLog:
14 * Asynchronous mode dropped for 2.2. For 2.3 we will attempt the
15 * unification of all the Z85x30 asynchronous drivers for real.
17 * To Do:
19 * Finish DMA mode support.
21 * Performance
23 * Z85230:
24 * Non DMA you want a 486DX50 or better to do 64Kbits. 9600 baud
25 * X.25 is not unrealistic on all machines. DMA mode can in theory
26 * handle T1/E1 quite nicely. In practice the limit seems to be about
27 * 512Kbit->1Mbit depending on motherboard.
29 * Z85C30:
30 * 64K will take DMA, 9600 baud X.25 should be ok.
32 * Z8530:
33 * Synchronous mode without DMA is unlikely to pass about 2400 baud.
36 #include <linux/module.h>
37 #include <linux/kernel.h>
38 #include <linux/mm.h>
39 #include <linux/net.h>
40 #include <linux/skbuff.h>
41 #include <linux/netdevice.h>
42 #include <linux/if_arp.h>
43 #include <linux/delay.h>
44 #include <linux/ioport.h>
45 #include <asm/dma.h>
46 #include <asm/io.h>
47 #define RT_LOCK
48 #define RT_UNLOCK
49 #include <linux/spinlock.h>
51 #include "z85230.h"
52 #include "syncppp.h"
55 static spinlock_t z8530_buffer_lock = SPIN_LOCK_UNLOCKED;
58 * Provided port access methods. The Comtrol SV11 requires no delays
59 * between accesses and uses PC I/O. Some drivers may need a 5uS delay
62 extern __inline__ int z8530_read_port(int p)
64 u8 r=inb(Z8530_PORT_OF(p));
65 if(p&Z8530_PORT_SLEEP) /* gcc should figure this out efficiently ! */
66 udelay(5);
67 return r;
70 extern __inline__ void z8530_write_port(int p, u8 d)
72 outb(d,Z8530_PORT_OF(p));
73 if(p&Z8530_PORT_SLEEP)
74 udelay(5);
79 static void z8530_rx_done(struct z8530_channel *c);
80 static void z8530_tx_done(struct z8530_channel *c);
84 * Port accesses
87 extern inline u8 read_zsreg(struct z8530_channel *c, u8 reg)
89 u8 r;
90 unsigned long flags;
91 save_flags(flags);
92 cli();
93 if(reg)
94 z8530_write_port(c->ctrlio, reg);
95 r=z8530_read_port(c->ctrlio);
96 restore_flags(flags);
97 return r;
100 extern inline u8 read_zsdata(struct z8530_channel *c)
102 u8 r;
103 r=z8530_read_port(c->dataio);
104 return r;
107 extern inline void write_zsreg(struct z8530_channel *c, u8 reg, u8 val)
109 unsigned long flags;
110 save_flags(flags);
111 cli();
112 if(reg)
113 z8530_write_port(c->ctrlio, reg);
114 z8530_write_port(c->ctrlio, val);
115 restore_flags(flags);
118 extern inline void write_zsctrl(struct z8530_channel *c, u8 val)
120 z8530_write_port(c->ctrlio, val);
123 extern inline void write_zsdata(struct z8530_channel *c, u8 val)
125 z8530_write_port(c->dataio, val);
129 * Register loading parameters for a dead port
132 u8 z8530_dead_port[]=
137 EXPORT_SYMBOL(z8530_dead_port);
140 * Register loading parameters for currently supported circuit types
145 * Data clocked by telco end. This is the correct data for the UK
146 * "kilostream" service, and most other similar services.
149 u8 z8530_hdlc_kilostream[]=
151 4, SYNC_ENAB|SDLC|X1CLK,
152 2, 0, /* No vector */
153 1, 0,
154 3, ENT_HM|RxCRC_ENAB|Rx8,
155 5, TxCRC_ENAB|RTS|TxENAB|Tx8|DTR,
156 9, 0, /* Disable interrupts */
157 6, 0xFF,
158 7, FLAG,
159 10, ABUNDER|NRZ|CRCPS,/*MARKIDLE ??*/
160 11, TCTRxCP,
161 14, DISDPLL,
162 15, DCDIE|SYNCIE|CTSIE|TxUIE|BRKIE,
163 1, EXT_INT_ENAB|TxINT_ENAB|INT_ALL_Rx,
164 9, NV|MIE|NORESET,
168 EXPORT_SYMBOL(z8530_hdlc_kilostream);
171 * As above but for enhanced chips.
174 u8 z8530_hdlc_kilostream_85230[]=
176 4, SYNC_ENAB|SDLC|X1CLK,
177 2, 0, /* No vector */
178 1, 0,
179 3, ENT_HM|RxCRC_ENAB|Rx8,
180 5, TxCRC_ENAB|RTS|TxENAB|Tx8|DTR,
181 9, 0, /* Disable interrupts */
182 6, 0xFF,
183 7, FLAG,
184 10, ABUNDER|NRZ|CRCPS, /* MARKIDLE?? */
185 11, TCTRxCP,
186 14, DISDPLL,
187 15, DCDIE|SYNCIE|CTSIE|TxUIE|BRKIE,
188 1, EXT_INT_ENAB|TxINT_ENAB|INT_ALL_Rx,
189 9, NV|MIE|NORESET,
190 23, 3, /* Extended mode AUTO TX and EOM*/
195 EXPORT_SYMBOL(z8530_hdlc_kilostream_85230);
198 * Flush the FIFO
201 static void z8530_flush_fifo(struct z8530_channel *c)
203 read_zsreg(c, R1);
204 read_zsreg(c, R1);
205 read_zsreg(c, R1);
206 read_zsreg(c, R1);
207 if(c->dev->type==Z85230)
209 read_zsreg(c, R1);
210 read_zsreg(c, R1);
211 read_zsreg(c, R1);
212 read_zsreg(c, R1);
216 /* Sets or clears DTR/RTS on the requested line */
218 static void z8530_rtsdtr(struct z8530_channel *c, int set)
220 if (set)
221 c->regs[5] |= (RTS | DTR);
222 else
223 c->regs[5] &= ~(RTS | DTR);
224 write_zsreg(c, R5, c->regs[5]);
228 * Receive handler. This is much like the async one but not quite the
229 * same or as complex
231 * Note: Its intended that this handler can easily be separated from
232 * the main code to run realtime. That'll be needed for some machines
233 * (eg to ever clock 64kbits on a sparc ;)).
235 * The RT_LOCK macros don't do anything now. Keep the code covered
236 * by them as short as possible in all circumstances - clocks cost
237 * baud. The interrupt handler is assumed to be atomic w.r.t. to
238 * other code - this is true in the RT case too.
240 * We only cover the sync cases for this. If you want 2Mbit async
241 * do it yourself but consider medical assistance first.
243 * This non DMA synchronous mode is portable code.
246 static void z8530_rx(struct z8530_channel *c)
248 u8 ch,stat;
250 while(1)
252 /* FIFO empty ? */
253 if(!(read_zsreg(c, R0)&1))
254 break;
255 ch=read_zsdata(c);
256 stat=read_zsreg(c, R1);
259 * Overrun ?
261 if(c->count < c->max)
263 *c->dptr++=ch;
264 c->count++;
267 if(stat&END_FR)
271 * Error ?
273 if(stat&(Rx_OVR|CRC_ERR))
275 /* Rewind the buffer and return */
276 if(c->skb)
277 c->dptr=c->skb->data;
278 c->count=0;
279 if(stat&Rx_OVR)
281 printk(KERN_WARNING "%s: overrun\n", c->dev->name);
282 c->rx_overrun++;
284 if(stat&CRC_ERR)
286 c->rx_crc_err++;
287 /* printk("crc error\n"); */
289 /* Shove the frame upstream */
291 else
293 z8530_rx_done(c);
294 write_zsctrl(c, RES_Rx_CRC);
299 * Clear irq
301 write_zsctrl(c, ERR_RES);
302 write_zsctrl(c, RES_H_IUS);
307 * Z8530 transmit interrupt handler
310 static void z8530_tx(struct z8530_channel *c)
312 while(c->txcount)
314 /* FIFO full ? */
315 if(!(read_zsreg(c, R0)&4))
316 break;
317 c->txcount--;
319 * Shovel out the byte
321 write_zsreg(c, R8, *c->tx_ptr++);
322 write_zsctrl(c, RES_H_IUS);
323 /* We are about to underflow */
324 if(c->txcount==0)
326 write_zsctrl(c, RES_EOM_L);
327 write_zsreg(c, R10, c->regs[10]&~ABUNDER);
329 return;
333 * End of frame TX - fire another one
336 write_zsctrl(c, RES_Tx_P);
338 z8530_tx_done(c);
339 /* write_zsreg(c, R8, *c->tx_ptr++); */
340 write_zsctrl(c, RES_H_IUS);
343 static void z8530_status(struct z8530_channel *chan)
345 u8 status=read_zsreg(chan, R0);
346 u8 altered=chan->status^status;
348 chan->status=status;
350 if(status&TxEOM)
352 /* printk("%s: Tx underrun.\n", chan->dev->name); */
353 chan->stats.tx_fifo_errors++;
354 write_zsctrl(chan, ERR_RES);
355 z8530_tx_done(chan);
358 if(altered&DCD)
360 if(status&DCD)
362 printk(KERN_INFO "%s: DCD raised\n", chan->dev->name);
363 write_zsreg(chan, R3, chan->regs[3]|RxENABLE);
364 if(chan->netdevice)
365 sppp_reopen(chan->netdevice);
367 else
369 printk(KERN_INFO "%s: DCD lost\n", chan->dev->name);
370 write_zsreg(chan, R3, chan->regs[3]&~RxENABLE);
371 z8530_flush_fifo(chan);
375 write_zsctrl(chan, RES_EXT_INT);
376 write_zsctrl(chan, RES_H_IUS);
379 struct z8530_irqhandler z8530_sync=
381 z8530_rx,
382 z8530_tx,
383 z8530_status
386 EXPORT_SYMBOL(z8530_sync);
389 * Non bus mastering DMA interfaces for the Z8x30 devices. This
390 * is really pretty PC specific.
393 static void z8530_dma_rx(struct z8530_channel *chan)
395 if(chan->rxdma_on)
397 /* Special condition check only */
398 u8 status;
400 read_zsreg(chan, R7);
401 read_zsreg(chan, R6);
403 status=read_zsreg(chan, R1);
404 if(status&END_FR)
406 z8530_rx_done(chan); /* Fire up the next one */
408 write_zsctrl(chan, ERR_RES);
409 write_zsctrl(chan, RES_H_IUS);
411 else
413 /* DMA is off right now, drain the slow way */
414 z8530_rx(chan);
418 static void z8530_dma_tx(struct z8530_channel *chan)
420 if(!chan->dma_tx)
422 printk("Hey who turned the DMA off?\n");
423 z8530_tx(chan);
424 return;
426 /* This shouldnt occur in DMA mode */
427 printk(KERN_ERR "DMA tx ??\n");
428 z8530_tx(chan);
431 static void z8530_dma_status(struct z8530_channel *chan)
433 unsigned long flags;
434 u8 status=read_zsreg(chan, R0);
435 u8 altered=chan->status^status;
437 chan->status=status;
439 if(chan->dma_tx)
441 if(status&TxEOM)
443 flags=claim_dma_lock();
444 /* Transmit underrun */
445 disable_dma(chan->txdma);
446 clear_dma_ff(chan->txdma);
447 chan->txdma_on=0;
448 release_dma_lock(flags);
449 z8530_tx_done(chan);
452 if(altered&DCD)
454 if(status&DCD)
456 printk(KERN_INFO "%s: DCD raised\n", chan->dev->name);
457 write_zsreg(chan, R3, chan->regs[3]|RxENABLE);
458 if(chan->netdevice)
459 sppp_reopen(chan->netdevice);
461 else
463 printk(KERN_INFO "%s:DCD lost\n", chan->dev->name);
464 write_zsreg(chan, R3, chan->regs[3]&~RxENABLE);
465 z8530_flush_fifo(chan);
468 write_zsctrl(chan, RES_EXT_INT);
469 write_zsctrl(chan, RES_H_IUS);
472 struct z8530_irqhandler z8530_dma_sync=
474 z8530_dma_rx,
475 z8530_dma_tx,
476 z8530_dma_status
479 EXPORT_SYMBOL(z8530_dma_sync);
481 struct z8530_irqhandler z8530_txdma_sync=
483 z8530_rx,
484 z8530_dma_tx,
485 z8530_dma_status
488 EXPORT_SYMBOL(z8530_txdma_sync);
491 * Interrupt vectors for a Z8530 that is in 'parked' mode.
492 * For machines with PCI Z85x30 cards, or level triggered interrupts
493 * (eg the MacII) we must clear the interrupt cause or die.
497 static void z8530_rx_clear(struct z8530_channel *c)
500 * Data and status bytes
502 u8 stat;
504 read_zsdata(c);
505 stat=read_zsreg(c, R1);
507 if(stat&END_FR)
508 write_zsctrl(c, RES_Rx_CRC);
510 * Clear irq
512 write_zsctrl(c, ERR_RES);
513 write_zsctrl(c, RES_H_IUS);
516 static void z8530_tx_clear(struct z8530_channel *c)
518 write_zsctrl(c, RES_Tx_P);
519 write_zsctrl(c, RES_H_IUS);
522 static void z8530_status_clear(struct z8530_channel *chan)
524 u8 status=read_zsreg(chan, R0);
525 if(status&TxEOM)
526 write_zsctrl(chan, ERR_RES);
527 write_zsctrl(chan, RES_EXT_INT);
528 write_zsctrl(chan, RES_H_IUS);
531 struct z8530_irqhandler z8530_nop=
533 z8530_rx_clear,
534 z8530_tx_clear,
535 z8530_status_clear
539 EXPORT_SYMBOL(z8530_nop);
542 * A Z85[2]30 device has stuck its hand in the air for attention
545 void z8530_interrupt(int irq, void *dev_id, struct pt_regs *regs)
547 struct z8530_dev *dev=dev_id;
548 u8 intr;
549 static volatile int locker=0;
550 int work=0;
552 if(locker)
554 printk(KERN_ERR "IRQ re-enter\n");
555 return;
557 locker=1;
559 while(++work<5000)
561 struct z8530_irqhandler *irqs=dev->chanA.irqs;
563 intr = read_zsreg(&dev->chanA, R3);
564 if(!(intr & (CHARxIP|CHATxIP|CHAEXT|CHBRxIP|CHBTxIP|CHBEXT)))
565 break;
567 /* This holds the IRQ status. On the 8530 you must read it from chan
568 A even though it applies to the whole chip */
570 /* Now walk the chip and see what it is wanting - it may be
571 an IRQ for someone else remember */
573 if(intr & (CHARxIP|CHATxIP|CHAEXT))
575 if(intr&CHARxIP)
576 irqs->rx(&dev->chanA);
577 if(intr&CHATxIP)
578 irqs->tx(&dev->chanA);
579 if(intr&CHAEXT)
580 irqs->status(&dev->chanA);
583 irqs=dev->chanB.irqs;
585 if(intr & (CHBRxIP|CHBTxIP|CHBEXT))
587 if(intr&CHBRxIP)
588 irqs->rx(&dev->chanB);
589 if(intr&CHBTxIP)
590 irqs->tx(&dev->chanB);
591 if(intr&CHBEXT)
592 irqs->status(&dev->chanB);
595 if(work==5000)
596 printk(KERN_ERR "%s: interrupt jammed - abort(0x%X)!\n", dev->name, intr);
597 /* Ok all done */
598 locker=0;
601 EXPORT_SYMBOL(z8530_interrupt);
603 static char reg_init[16]=
605 0,0,0,0,
606 0,0,0,0,
607 0,0,0,0,
608 0x55,0,0,0
612 int z8530_sync_open(struct net_device *dev, struct z8530_channel *c)
614 c->sync = 1;
615 c->mtu = dev->mtu+64;
616 c->count = 0;
617 c->skb = NULL;
618 c->skb2 = NULL;
619 c->irqs = &z8530_sync;
620 /* This loads the double buffer up */
621 z8530_rx_done(c); /* Load the frame ring */
622 z8530_rx_done(c); /* Load the backup frame */
623 z8530_rtsdtr(c,1);
624 c->dma_tx = 0;
625 c->regs[R1]|=TxINT_ENAB;
626 write_zsreg(c, R1, c->regs[R1]);
627 write_zsreg(c, R3, c->regs[R3]|RxENABLE);
628 return 0;
632 EXPORT_SYMBOL(z8530_sync_open);
634 int z8530_sync_close(struct net_device *dev, struct z8530_channel *c)
636 u8 chk;
637 c->irqs = &z8530_nop;
638 c->max = 0;
639 c->sync = 0;
641 chk=read_zsreg(c,R0);
642 write_zsreg(c, R3, c->regs[R3]);
643 z8530_rtsdtr(c,0);
644 return 0;
647 EXPORT_SYMBOL(z8530_sync_close);
649 int z8530_sync_dma_open(struct net_device *dev, struct z8530_channel *c)
651 unsigned long flags;
653 c->sync = 1;
654 c->mtu = dev->mtu+64;
655 c->count = 0;
656 c->skb = NULL;
657 c->skb2 = NULL;
659 * Load the DMA interfaces up
661 c->rxdma_on = 0;
662 c->txdma_on = 0;
665 * Allocate the DMA flip buffers
668 c->rx_buf[0]=kmalloc(c->mtu, GFP_KERNEL|GFP_DMA);
669 if(c->rx_buf[0]==NULL)
670 return -ENOBUFS;
671 c->rx_buf[1]=kmalloc(c->mtu, GFP_KERNEL|GFP_DMA);
672 if(c->rx_buf[1]==NULL)
674 kfree(c->rx_buf[0]);
675 c->rx_buf[0]=NULL;
676 return -ENOBUFS;
679 c->tx_dma_buf[0]=kmalloc(c->mtu, GFP_KERNEL|GFP_DMA);
680 if(c->tx_dma_buf[0]==NULL)
682 kfree(c->rx_buf[0]);
683 kfree(c->rx_buf[1]);
684 c->rx_buf[0]=NULL;
685 return -ENOBUFS;
687 c->tx_dma_buf[1]=kmalloc(c->mtu, GFP_KERNEL|GFP_DMA);
688 if(c->tx_dma_buf[1]==NULL)
690 kfree(c->tx_dma_buf[0]);
691 kfree(c->rx_buf[0]);
692 kfree(c->rx_buf[1]);
693 c->rx_buf[0]=NULL;
694 c->rx_buf[1]=NULL;
695 c->tx_dma_buf[0]=NULL;
696 return -ENOBUFS;
698 c->tx_dma_used=0;
699 c->dma_tx = 1;
700 c->dma_num=0;
701 c->dma_ready=1;
704 * Enable DMA control mode
708 * TX DMA via DIR/REQ
711 c->regs[R14]|= DTRREQ;
712 write_zsreg(c, R14, c->regs[R14]);
714 c->regs[R1]&= ~TxINT_ENAB;
715 write_zsreg(c, R1, c->regs[R1]);
718 * RX DMA via W/Req
721 c->regs[R1]|= WT_FN_RDYFN;
722 c->regs[R1]|= WT_RDY_RT;
723 c->regs[R1]|= INT_ERR_Rx;
724 c->regs[R1]&= ~TxINT_ENAB;
725 write_zsreg(c, R1, c->regs[R1]);
726 c->regs[R1]|= WT_RDY_ENAB;
727 write_zsreg(c, R1, c->regs[R1]);
730 * DMA interrupts
734 * Set up the DMA configuration
737 flags=claim_dma_lock();
739 disable_dma(c->rxdma);
740 clear_dma_ff(c->rxdma);
741 set_dma_mode(c->rxdma, DMA_MODE_READ|0x10);
742 set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[0]));
743 set_dma_count(c->rxdma, c->mtu);
744 enable_dma(c->rxdma);
746 disable_dma(c->txdma);
747 clear_dma_ff(c->txdma);
748 set_dma_mode(c->txdma, DMA_MODE_WRITE);
749 disable_dma(c->txdma);
751 release_dma_lock(flags);
754 * Select the DMA interrupt handlers
757 c->rxdma_on = 1;
758 c->txdma_on = 1;
759 c->tx_dma_used = 1;
761 c->irqs = &z8530_dma_sync;
762 z8530_rtsdtr(c,1);
763 write_zsreg(c, R3, c->regs[R3]|RxENABLE);
764 return 0;
767 EXPORT_SYMBOL(z8530_sync_dma_open);
769 int z8530_sync_dma_close(struct net_device *dev, struct z8530_channel *c)
771 u8 chk;
772 unsigned long flags;
774 c->irqs = &z8530_nop;
775 c->max = 0;
776 c->sync = 0;
779 * Disable the PC DMA channels
782 flags=claim_dma_lock();
783 disable_dma(c->rxdma);
784 clear_dma_ff(c->rxdma);
786 c->rxdma_on = 0;
788 disable_dma(c->txdma);
789 clear_dma_ff(c->txdma);
790 release_dma_lock(flags);
792 c->txdma_on = 0;
793 c->tx_dma_used = 0;
796 * Disable DMA control mode
799 c->regs[R1]&= ~WT_RDY_ENAB;
800 write_zsreg(c, R1, c->regs[R1]);
801 c->regs[R1]&= ~(WT_RDY_RT|WT_FN_RDYFN|INT_ERR_Rx);
802 c->regs[R1]|= INT_ALL_Rx;
803 write_zsreg(c, R1, c->regs[R1]);
804 c->regs[R14]&= ~DTRREQ;
805 write_zsreg(c, R14, c->regs[R14]);
807 if(c->rx_buf[0])
809 kfree(c->rx_buf[0]);
810 c->rx_buf[0]=NULL;
812 if(c->rx_buf[1])
814 kfree(c->rx_buf[1]);
815 c->rx_buf[1]=NULL;
817 if(c->tx_dma_buf[0])
819 kfree(c->tx_dma_buf[0]);
820 c->tx_dma_buf[0]=NULL;
822 if(c->tx_dma_buf[1])
824 kfree(c->tx_dma_buf[1]);
825 c->tx_dma_buf[1]=NULL;
827 chk=read_zsreg(c,R0);
828 write_zsreg(c, R3, c->regs[R3]);
829 z8530_rtsdtr(c,0);
830 return 0;
833 EXPORT_SYMBOL(z8530_sync_dma_close);
835 int z8530_sync_txdma_open(struct net_device *dev, struct z8530_channel *c)
837 unsigned long flags;
839 printk("Opening sync interface for TX-DMA\n");
840 c->sync = 1;
841 c->mtu = dev->mtu+64;
842 c->count = 0;
843 c->skb = NULL;
844 c->skb2 = NULL;
847 * Load the PIO receive ring
850 z8530_rx_done(c);
851 z8530_rx_done(c);
854 * Load the DMA interfaces up
857 c->rxdma_on = 0;
858 c->txdma_on = 0;
860 c->tx_dma_buf[0]=kmalloc(c->mtu, GFP_KERNEL|GFP_DMA);
861 if(c->tx_dma_buf[0]==NULL)
863 kfree(c->rx_buf[0]);
864 kfree(c->rx_buf[1]);
865 c->rx_buf[0]=NULL;
866 return -ENOBUFS;
868 c->tx_dma_buf[1]=kmalloc(c->mtu, GFP_KERNEL|GFP_DMA);
869 if(c->tx_dma_buf[1]==NULL)
871 kfree(c->tx_dma_buf[0]);
872 kfree(c->rx_buf[0]);
873 kfree(c->rx_buf[1]);
874 c->rx_buf[0]=NULL;
875 c->rx_buf[1]=NULL;
876 c->tx_dma_buf[0]=NULL;
877 return -ENOBUFS;
879 c->tx_dma_used=0;
880 c->dma_num=0;
881 c->dma_ready=1;
882 c->dma_tx = 1;
885 * Enable DMA control mode
889 * TX DMA via DIR/REQ
891 c->regs[R14]|= DTRREQ;
892 write_zsreg(c, R14, c->regs[R14]);
894 c->regs[R1]&= ~TxINT_ENAB;
895 write_zsreg(c, R1, c->regs[R1]);
898 * Set up the DMA configuration
901 flags = claim_dma_lock();
903 disable_dma(c->txdma);
904 clear_dma_ff(c->txdma);
905 set_dma_mode(c->txdma, DMA_MODE_WRITE);
906 disable_dma(c->txdma);
908 release_dma_lock(flags);
911 * Select the DMA interrupt handlers
914 c->rxdma_on = 0;
915 c->txdma_on = 1;
916 c->tx_dma_used = 1;
918 c->irqs = &z8530_txdma_sync;
919 printk("Loading RX\n");
920 z8530_rtsdtr(c,1);
921 printk("Rx interrupts ON\n");
922 write_zsreg(c, R3, c->regs[R3]|RxENABLE);
923 return 0;
926 EXPORT_SYMBOL(z8530_sync_txdma_open);
928 int z8530_sync_txdma_close(struct net_device *dev, struct z8530_channel *c)
930 unsigned long flags;
931 u8 chk;
932 c->irqs = &z8530_nop;
933 c->max = 0;
934 c->sync = 0;
937 * Disable the PC DMA channels
940 flags = claim_dma_lock();
942 disable_dma(c->txdma);
943 clear_dma_ff(c->txdma);
944 c->txdma_on = 0;
945 c->tx_dma_used = 0;
947 release_dma_lock(flags);
950 * Disable DMA control mode
953 c->regs[R1]&= ~WT_RDY_ENAB;
954 write_zsreg(c, R1, c->regs[R1]);
955 c->regs[R1]&= ~(WT_RDY_RT|WT_FN_RDYFN|INT_ERR_Rx);
956 c->regs[R1]|= INT_ALL_Rx;
957 write_zsreg(c, R1, c->regs[R1]);
958 c->regs[R14]&= ~DTRREQ;
959 write_zsreg(c, R14, c->regs[R14]);
961 if(c->tx_dma_buf[0])
963 kfree(c->tx_dma_buf[0]);
964 c->tx_dma_buf[0]=NULL;
966 if(c->tx_dma_buf[1])
968 kfree(c->tx_dma_buf[1]);
969 c->tx_dma_buf[1]=NULL;
971 chk=read_zsreg(c,R0);
972 write_zsreg(c, R3, c->regs[R3]);
973 z8530_rtsdtr(c,0);
974 return 0;
978 EXPORT_SYMBOL(z8530_sync_txdma_close);
981 * Describe a Z8530 in a standard format. We must pass the I/O as
982 * the port offset isnt predictable. The main reason for this function
983 * is to try and get a common format of report.
986 static char *z8530_type_name[]={
987 "Z8530",
988 "Z85C30",
989 "Z85230"
992 void z8530_describe(struct z8530_dev *dev, char *mapping, int io)
994 printk(KERN_INFO "%s: %s found at %s 0x%X, IRQ %d.\n",
995 dev->name,
996 z8530_type_name[dev->type],
997 mapping,
998 Z8530_PORT_OF(io),
999 dev->irq);
1002 EXPORT_SYMBOL(z8530_describe);
1005 * Configure up a Z8530
1009 int z8530_init(struct z8530_dev *dev)
1011 /* NOP the interrupt handlers first - we might get a
1012 floating IRQ transition when we reset the chip */
1013 dev->chanA.irqs=&z8530_nop;
1014 dev->chanB.irqs=&z8530_nop;
1015 /* Reset the chip */
1016 write_zsreg(&dev->chanA, R9, 0xC0);
1017 udelay(200);
1018 /* Now check its valid */
1019 write_zsreg(&dev->chanA, R12, 0xAA);
1020 if(read_zsreg(&dev->chanA, R12)!=0xAA)
1021 return -ENODEV;
1022 write_zsreg(&dev->chanA, R12, 0x55);
1023 if(read_zsreg(&dev->chanA, R12)!=0x55)
1024 return -ENODEV;
1026 dev->type=Z8530;
1029 * See the application note.
1032 write_zsreg(&dev->chanA, R15, 0x01);
1035 * If we can set the low bit of R15 then
1036 * the chip is enhanced.
1039 if(read_zsreg(&dev->chanA, R15)==0x01)
1041 /* This C30 versus 230 detect is from Klaus Kudielka's dmascc */
1042 /* Put a char in the fifo */
1043 write_zsreg(&dev->chanA, R8, 0);
1044 if(read_zsreg(&dev->chanA, R0)&Tx_BUF_EMP)
1045 dev->type = Z85230; /* Has a FIFO */
1046 else
1047 dev->type = Z85C30; /* Z85C30, 1 byte FIFO */
1051 * The code assumes R7' and friends are
1052 * off. Use write_zsext() for these and keep
1053 * this bit clear.
1056 write_zsreg(&dev->chanA, R15, 0);
1059 * At this point it looks like the chip is behaving
1062 memcpy(dev->chanA.regs, reg_init, 16);
1063 memcpy(dev->chanB.regs, reg_init ,16);
1065 return 0;
1069 EXPORT_SYMBOL(z8530_init);
1071 int z8530_shutdown(struct z8530_dev *dev)
1073 /* Reset the chip */
1074 dev->chanA.irqs=&z8530_nop;
1075 dev->chanB.irqs=&z8530_nop;
1076 write_zsreg(&dev->chanA, R9, 0xC0);
1077 udelay(100);
1078 return 0;
1081 EXPORT_SYMBOL(z8530_shutdown);
1084 * Load a Z8530 channel up from the system data
1085 * We use +16 to indicate the 'prime' registers
1088 int z8530_channel_load(struct z8530_channel *c, u8 *rtable)
1090 while(*rtable!=255)
1092 int reg=*rtable++;
1093 if(reg>0x0F)
1094 write_zsreg(c, R15, c->regs[15]|1);
1095 write_zsreg(c, reg&0x0F, *rtable);
1096 if(reg>0x0F)
1097 write_zsreg(c, R15, c->regs[15]&~1);
1098 c->regs[reg]=*rtable++;
1100 c->rx_function=z8530_null_rx;
1101 c->skb=NULL;
1102 c->tx_skb=NULL;
1103 c->tx_next_skb=NULL;
1104 c->mtu=1500;
1105 c->max=0;
1106 c->count=0;
1107 c->status=0; /* Fixme - check DCD now */
1108 c->sync=1;
1109 write_zsreg(c, R3, c->regs[R3]|RxENABLE);
1110 return 0;
1113 EXPORT_SYMBOL(z8530_channel_load);
1117 * Higher level shovelling - transmit chains
1120 static void z8530_tx_begin(struct z8530_channel *c)
1122 unsigned long flags;
1123 if(c->tx_skb)
1124 return;
1126 c->tx_skb=c->tx_next_skb;
1127 c->tx_next_skb=NULL;
1128 c->tx_ptr=c->tx_next_ptr;
1130 mark_bh(NET_BH);
1131 if(c->tx_skb==NULL)
1133 /* Idle on */
1134 if(c->dma_tx)
1136 flags=claim_dma_lock();
1137 disable_dma(c->txdma);
1139 * Check if we crapped out.
1141 if(get_dma_residue(c->txdma))
1143 c->stats.tx_dropped++;
1144 c->stats.tx_fifo_errors++;
1146 release_dma_lock(flags);
1148 c->txcount=0;
1150 else
1152 c->txcount=c->tx_skb->len;
1155 if(c->dma_tx)
1158 * FIXME. DMA is broken for the original 8530,
1159 * on the older parts we need to set a flag and
1160 * wait for a further TX interrupt to fire this
1161 * stage off
1164 flags=claim_dma_lock();
1165 disable_dma(c->txdma);
1168 * These two are needed by the 8530/85C30
1169 * and must be issued when idling.
1172 if(c->dev->type!=Z85230)
1174 write_zsctrl(c, RES_Tx_CRC);
1175 write_zsctrl(c, RES_EOM_L);
1177 write_zsreg(c, R10, c->regs[10]&~ABUNDER);
1178 clear_dma_ff(c->txdma);
1179 set_dma_addr(c->txdma, virt_to_bus(c->tx_ptr));
1180 set_dma_count(c->txdma, c->txcount);
1181 enable_dma(c->txdma);
1182 release_dma_lock(flags);
1183 write_zsctrl(c, RES_EOM_L);
1184 write_zsreg(c, R5, c->regs[R5]|TxENAB);
1186 else
1188 save_flags(flags);
1189 cli();
1190 /* ABUNDER off */
1191 write_zsreg(c, R10, c->regs[10]);
1192 write_zsctrl(c, RES_Tx_CRC);
1193 //??? write_zsctrl(c, RES_EOM_L);
1195 while(c->txcount && (read_zsreg(c,R0)&Tx_BUF_EMP))
1197 write_zsreg(c, R8, *c->tx_ptr++);
1198 c->txcount--;
1200 restore_flags(flags);
1206 static void z8530_tx_done(struct z8530_channel *c)
1208 unsigned long flags;
1209 struct sk_buff *skb;
1211 spin_lock_irqsave(&z8530_buffer_lock, flags);
1212 c->netdevice->tbusy=0;
1213 /* Actually this can happen.*/
1214 if(c->tx_skb==NULL)
1216 spin_unlock_irqrestore(&z8530_buffer_lock, flags);
1217 return;
1219 skb=c->tx_skb;
1220 c->tx_skb=NULL;
1221 z8530_tx_begin(c);
1222 spin_unlock_irqrestore(&z8530_buffer_lock, flags);
1223 c->stats.tx_packets++;
1224 c->stats.tx_bytes+=skb->len;
1225 dev_kfree_skb(skb);
1229 * Higher level shovelling - receive chains
1232 void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb)
1234 kfree_skb(skb);
1237 EXPORT_SYMBOL(z8530_null_rx);
1239 static void z8530_rx_done(struct z8530_channel *c)
1241 struct sk_buff *skb;
1242 int ct;
1245 * Is our receive engine in DMA mode
1248 if(c->rxdma_on)
1251 * Save the ready state and the buffer currently
1252 * being used as the DMA target
1255 int ready=c->dma_ready;
1256 unsigned char *rxb=c->rx_buf[c->dma_num];
1257 unsigned long flags;
1260 * Complete this DMA. Neccessary to find the length
1263 flags=claim_dma_lock();
1265 disable_dma(c->rxdma);
1266 clear_dma_ff(c->rxdma);
1267 c->rxdma_on=0;
1268 ct=c->mtu-get_dma_residue(c->rxdma);
1269 if(ct<0)
1270 ct=2; /* Shit happens.. */
1271 c->dma_ready=0;
1274 * Normal case: the other slot is free, start the next DMA
1275 * into it immediately.
1278 if(ready)
1280 c->dma_num^=1;
1281 set_dma_mode(c->rxdma, DMA_MODE_READ|0x10);
1282 set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[c->dma_num]));
1283 set_dma_count(c->rxdma, c->mtu);
1284 c->rxdma_on = 1;
1285 enable_dma(c->rxdma);
1286 /* Stop any frames that we missed the head of
1287 from passing */
1288 write_zsreg(c, R0, RES_Rx_CRC);
1290 else
1291 /* Can't occur as we dont reenable the DMA irq until
1292 after the flip is done */
1293 printk("DMA flip overrun!\n");
1295 release_dma_lock(flags);
1298 * Shove the old buffer into an sk_buff. We can't DMA
1299 * directly into one on a PC - it might be above the 16Mb
1300 * boundary. Optimisation - we could check to see if we
1301 * can avoid the copy. Optimisation 2 - make the memcpy
1302 * a copychecksum.
1305 skb=dev_alloc_skb(ct);
1306 if(skb==NULL)
1308 c->stats.rx_dropped++;
1309 printk(KERN_WARNING "%s: Memory squeeze.\n", c->netdevice->name);
1311 else
1313 skb_put(skb, ct);
1314 memcpy(skb->data, rxb, ct);
1315 c->stats.rx_packets++;
1316 c->stats.rx_bytes+=ct;
1318 c->dma_ready=1;
1320 else
1322 RT_LOCK;
1323 skb=c->skb;
1326 * The game we play for non DMA is similar. We want to
1327 * get the controller set up for the next packet as fast
1328 * as possible. We potentially only have one byte + the
1329 * fifo length for this. Thus we want to flip to the new
1330 * buffer and then mess around copying and allocating
1331 * things. For the current case it doesn't matter but
1332 * if you build a system where the sync irq isnt blocked
1333 * by the kernel IRQ disable then you need only block the
1334 * sync IRQ for the RT_LOCK area.
1337 ct=c->count;
1339 c->skb = c->skb2;
1340 c->count = 0;
1341 c->max = c->mtu;
1342 if(c->skb)
1344 c->dptr = c->skb->data;
1345 c->max = c->mtu;
1347 else
1349 c->count= 0;
1350 c->max = 0;
1352 RT_UNLOCK;
1354 c->skb2 = dev_alloc_skb(c->mtu);
1355 if(c->skb2==NULL)
1356 printk(KERN_WARNING "%s: memory squeeze.\n",
1357 c->netdevice->name);
1358 else
1360 skb_put(c->skb2,c->mtu);
1362 c->stats.rx_packets++;
1363 c->stats.rx_bytes+=ct;
1367 * If we received a frame we must now process it.
1369 if(skb)
1371 skb_trim(skb, ct);
1372 c->rx_function(c,skb);
1374 else
1376 c->stats.rx_dropped++;
1377 printk(KERN_ERR "%s: Lost a frame\n", c->netdevice->name);
1382 * Cannot DMA over a 64K boundary on a PC
1385 extern inline int spans_boundary(struct sk_buff *skb)
1387 unsigned long a=(unsigned long)skb->data;
1388 a^=(a+skb->len);
1389 if(a&0x00010000) /* If the 64K bit is different.. */
1391 printk("spanner\n");
1392 return 1;
1394 return 0;
1398 * Queue a packet for transmission. Because we have rather
1399 * hard to hit interrupt latencies for the Z85230 per packet
1400 * even in DMA mode we do the flip to DMA buffer if needed here
1401 * not in the IRQ.
1404 int z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb)
1406 unsigned long flags;
1407 if(c->tx_next_skb)
1409 skb->dev->tbusy=1;
1410 return 1;
1413 /* PC SPECIFIC - DMA limits */
1416 * If we will DMA the transmit and its gone over the ISA bus
1417 * limit, then copy to the flip buffer
1420 if(c->dma_tx && ((unsigned long)(virt_to_bus(skb->data+skb->len))>=16*1024*1024 || spans_boundary(skb)))
1423 * Send the flip buffer, and flip the flippy bit.
1424 * We don't care which is used when just so long as
1425 * we never use the same buffer twice in a row. Since
1426 * only one buffer can be going out at a time the other
1427 * has to be safe.
1429 c->tx_next_ptr=c->tx_dma_buf[c->tx_dma_used];
1430 c->tx_dma_used^=1; /* Flip temp buffer */
1431 memcpy(c->tx_next_ptr, skb->data, skb->len);
1433 else
1434 c->tx_next_ptr=skb->data;
1435 RT_LOCK;
1436 c->tx_next_skb=skb;
1437 RT_UNLOCK;
1439 spin_lock_irqsave(&z8530_buffer_lock, flags);
1440 z8530_tx_begin(c);
1441 spin_unlock_irqrestore(&z8530_buffer_lock, flags);
1442 return 0;
1445 EXPORT_SYMBOL(z8530_queue_xmit);
1447 struct net_device_stats *z8530_get_stats(struct z8530_channel *c)
1449 return &c->stats;
1452 EXPORT_SYMBOL(z8530_get_stats);
1454 #ifdef MODULE
1457 * Module support
1460 int init_module(void)
1462 printk(KERN_INFO "Generic Z85C30/Z85230 interface driver v0.02\n");
1463 return 0;
1466 void cleanup_module(void)
1470 #endif