Linux-2.6.12-rc2
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / net / hamradio / dmascc.c
blobf3269b70a8c59e10b44961038f796640c85c8998
1 /*
2 * Driver for high-speed SCC boards (those with DMA support)
3 * Copyright (C) 1997-2000 Klaus Kudielka
5 * S5SCC/DMA support by Janko Koleznik S52HI
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #include <linux/module.h>
24 #include <linux/delay.h>
25 #include <linux/errno.h>
26 #include <linux/if_arp.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/kernel.h>
32 #include <linux/mm.h>
33 #include <linux/netdevice.h>
34 #include <linux/rtnetlink.h>
35 #include <linux/sockios.h>
36 #include <linux/workqueue.h>
37 #include <asm/atomic.h>
38 #include <asm/bitops.h>
39 #include <asm/dma.h>
40 #include <asm/io.h>
41 #include <asm/irq.h>
42 #include <asm/uaccess.h>
43 #include <net/ax25.h>
44 #include "z8530.h"
47 /* Number of buffers per channel */
49 #define NUM_TX_BUF 2 /* NUM_TX_BUF >= 1 (min. 2 recommended) */
50 #define NUM_RX_BUF 6 /* NUM_RX_BUF >= 1 (min. 2 recommended) */
51 #define BUF_SIZE 1576 /* BUF_SIZE >= mtu + hard_header_len */
54 /* Cards supported */
56 #define HW_PI { "Ottawa PI", 0x300, 0x20, 0x10, 8, \
57 0, 8, 1843200, 3686400 }
58 #define HW_PI2 { "Ottawa PI2", 0x300, 0x20, 0x10, 8, \
59 0, 8, 3686400, 7372800 }
60 #define HW_TWIN { "Gracilis PackeTwin", 0x200, 0x10, 0x10, 32, \
61 0, 4, 6144000, 6144000 }
62 #define HW_S5 { "S5SCC/DMA", 0x200, 0x10, 0x10, 32, \
63 0, 8, 4915200, 9830400 }
65 #define HARDWARE { HW_PI, HW_PI2, HW_TWIN, HW_S5 }
67 #define TMR_0_HZ 25600 /* Frequency of timer 0 */
69 #define TYPE_PI 0
70 #define TYPE_PI2 1
71 #define TYPE_TWIN 2
72 #define TYPE_S5 3
73 #define NUM_TYPES 4
75 #define MAX_NUM_DEVS 32
78 /* SCC chips supported */
80 #define Z8530 0
81 #define Z85C30 1
82 #define Z85230 2
84 #define CHIPNAMES { "Z8530", "Z85C30", "Z85230" }
87 /* I/O registers */
89 /* 8530 registers relative to card base */
90 #define SCCB_CMD 0x00
91 #define SCCB_DATA 0x01
92 #define SCCA_CMD 0x02
93 #define SCCA_DATA 0x03
95 /* 8253/8254 registers relative to card base */
96 #define TMR_CNT0 0x00
97 #define TMR_CNT1 0x01
98 #define TMR_CNT2 0x02
99 #define TMR_CTRL 0x03
101 /* Additional PI/PI2 registers relative to card base */
102 #define PI_DREQ_MASK 0x04
104 /* Additional PackeTwin registers relative to card base */
105 #define TWIN_INT_REG 0x08
106 #define TWIN_CLR_TMR1 0x09
107 #define TWIN_CLR_TMR2 0x0a
108 #define TWIN_SPARE_1 0x0b
109 #define TWIN_DMA_CFG 0x08
110 #define TWIN_SERIAL_CFG 0x09
111 #define TWIN_DMA_CLR_FF 0x0a
112 #define TWIN_SPARE_2 0x0b
115 /* PackeTwin I/O register values */
117 /* INT_REG */
118 #define TWIN_SCC_MSK 0x01
119 #define TWIN_TMR1_MSK 0x02
120 #define TWIN_TMR2_MSK 0x04
121 #define TWIN_INT_MSK 0x07
123 /* SERIAL_CFG */
124 #define TWIN_DTRA_ON 0x01
125 #define TWIN_DTRB_ON 0x02
126 #define TWIN_EXTCLKA 0x04
127 #define TWIN_EXTCLKB 0x08
128 #define TWIN_LOOPA_ON 0x10
129 #define TWIN_LOOPB_ON 0x20
130 #define TWIN_EI 0x80
132 /* DMA_CFG */
133 #define TWIN_DMA_HDX_T1 0x08
134 #define TWIN_DMA_HDX_R1 0x0a
135 #define TWIN_DMA_HDX_T3 0x14
136 #define TWIN_DMA_HDX_R3 0x16
137 #define TWIN_DMA_FDX_T3R1 0x1b
138 #define TWIN_DMA_FDX_T1R3 0x1d
141 /* Status values */
143 #define IDLE 0
144 #define TX_HEAD 1
145 #define TX_DATA 2
146 #define TX_PAUSE 3
147 #define TX_TAIL 4
148 #define RTS_OFF 5
149 #define WAIT 6
150 #define DCD_ON 7
151 #define RX_ON 8
152 #define DCD_OFF 9
155 /* Ioctls */
157 #define SIOCGSCCPARAM SIOCDEVPRIVATE
158 #define SIOCSSCCPARAM (SIOCDEVPRIVATE+1)
161 /* Data types */
163 struct scc_param {
164 int pclk_hz; /* frequency of BRG input (don't change) */
165 int brg_tc; /* BRG terminal count; BRG disabled if < 0 */
166 int nrzi; /* 0 (nrz), 1 (nrzi) */
167 int clocks; /* see dmascc_cfg documentation */
168 int txdelay; /* [1/TMR_0_HZ] */
169 int txtimeout; /* [1/HZ] */
170 int txtail; /* [1/TMR_0_HZ] */
171 int waittime; /* [1/TMR_0_HZ] */
172 int slottime; /* [1/TMR_0_HZ] */
173 int persist; /* 1 ... 256 */
174 int dma; /* -1 (disable), 0, 1, 3 */
175 int txpause; /* [1/TMR_0_HZ] */
176 int rtsoff; /* [1/TMR_0_HZ] */
177 int dcdon; /* [1/TMR_0_HZ] */
178 int dcdoff; /* [1/TMR_0_HZ] */
181 struct scc_hardware {
182 char *name;
183 int io_region;
184 int io_delta;
185 int io_size;
186 int num_devs;
187 int scc_offset;
188 int tmr_offset;
189 int tmr_hz;
190 int pclk_hz;
193 struct scc_priv {
194 int type;
195 int chip;
196 struct net_device *dev;
197 struct scc_info *info;
198 struct net_device_stats stats;
199 int channel;
200 int card_base, scc_cmd, scc_data;
201 int tmr_cnt, tmr_ctrl, tmr_mode;
202 struct scc_param param;
203 char rx_buf[NUM_RX_BUF][BUF_SIZE];
204 int rx_len[NUM_RX_BUF];
205 int rx_ptr;
206 struct work_struct rx_work;
207 int rx_head, rx_tail, rx_count;
208 int rx_over;
209 char tx_buf[NUM_TX_BUF][BUF_SIZE];
210 int tx_len[NUM_TX_BUF];
211 int tx_ptr;
212 int tx_head, tx_tail, tx_count;
213 int state;
214 unsigned long tx_start;
215 int rr0;
216 spinlock_t *register_lock; /* Per scc_info */
217 spinlock_t ring_lock;
220 struct scc_info {
221 int irq_used;
222 int twin_serial_cfg;
223 struct net_device *dev[2];
224 struct scc_priv priv[2];
225 struct scc_info *next;
226 spinlock_t register_lock; /* Per device register lock */
230 /* Function declarations */
231 static int setup_adapter(int card_base, int type, int n) __init;
233 static void write_scc(struct scc_priv *priv, int reg, int val);
234 static void write_scc_data(struct scc_priv *priv, int val, int fast);
235 static int read_scc(struct scc_priv *priv, int reg);
236 static int read_scc_data(struct scc_priv *priv);
238 static int scc_open(struct net_device *dev);
239 static int scc_close(struct net_device *dev);
240 static int scc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
241 static int scc_send_packet(struct sk_buff *skb, struct net_device *dev);
242 static struct net_device_stats *scc_get_stats(struct net_device *dev);
243 static int scc_set_mac_address(struct net_device *dev, void *sa);
245 static inline void tx_on(struct scc_priv *priv);
246 static inline void rx_on(struct scc_priv *priv);
247 static inline void rx_off(struct scc_priv *priv);
248 static void start_timer(struct scc_priv *priv, int t, int r15);
249 static inline unsigned char random(void);
251 static inline void z8530_isr(struct scc_info *info);
252 static irqreturn_t scc_isr(int irq, void *dev_id, struct pt_regs *regs);
253 static void rx_isr(struct scc_priv *priv);
254 static void special_condition(struct scc_priv *priv, int rc);
255 static void rx_bh(void *arg);
256 static void tx_isr(struct scc_priv *priv);
257 static void es_isr(struct scc_priv *priv);
258 static void tm_isr(struct scc_priv *priv);
261 /* Initialization variables */
263 static int io[MAX_NUM_DEVS] __initdata = { 0, };
265 /* Beware! hw[] is also used in cleanup_module(). */
266 static struct scc_hardware hw[NUM_TYPES] __initdata_or_module = HARDWARE;
267 static char ax25_broadcast[7] __initdata =
268 { 'Q' << 1, 'S' << 1, 'T' << 1, ' ' << 1, ' ' << 1, ' ' << 1,
269 '0' << 1 };
270 static char ax25_test[7] __initdata =
271 { 'L' << 1, 'I' << 1, 'N' << 1, 'U' << 1, 'X' << 1, ' ' << 1,
272 '1' << 1 };
275 /* Global variables */
277 static struct scc_info *first;
278 static unsigned long rand;
281 MODULE_AUTHOR("Klaus Kudielka");
282 MODULE_DESCRIPTION("Driver for high-speed SCC boards");
283 MODULE_PARM(io, "1-" __MODULE_STRING(MAX_NUM_DEVS) "i");
284 MODULE_LICENSE("GPL");
286 static void __exit dmascc_exit(void)
288 int i;
289 struct scc_info *info;
291 while (first) {
292 info = first;
294 /* Unregister devices */
295 for (i = 0; i < 2; i++)
296 unregister_netdev(info->dev[i]);
298 /* Reset board */
299 if (info->priv[0].type == TYPE_TWIN)
300 outb(0, info->dev[0]->base_addr + TWIN_SERIAL_CFG);
301 write_scc(&info->priv[0], R9, FHWRES);
302 release_region(info->dev[0]->base_addr,
303 hw[info->priv[0].type].io_size);
305 for (i = 0; i < 2; i++)
306 free_netdev(info->dev[i]);
308 /* Free memory */
309 first = info->next;
310 kfree(info);
314 #ifndef MODULE
315 void __init dmascc_setup(char *str, int *ints)
317 int i;
319 for (i = 0; i < MAX_NUM_DEVS && i < ints[0]; i++)
320 io[i] = ints[i + 1];
322 #endif
324 static int __init dmascc_init(void)
326 int h, i, j, n;
327 int base[MAX_NUM_DEVS], tcmd[MAX_NUM_DEVS], t0[MAX_NUM_DEVS],
328 t1[MAX_NUM_DEVS];
329 unsigned t_val;
330 unsigned long time, start[MAX_NUM_DEVS], delay[MAX_NUM_DEVS],
331 counting[MAX_NUM_DEVS];
333 /* Initialize random number generator */
334 rand = jiffies;
335 /* Cards found = 0 */
336 n = 0;
337 /* Warning message */
338 if (!io[0])
339 printk(KERN_INFO "dmascc: autoprobing (dangerous)\n");
341 /* Run autodetection for each card type */
342 for (h = 0; h < NUM_TYPES; h++) {
344 if (io[0]) {
345 /* User-specified I/O address regions */
346 for (i = 0; i < hw[h].num_devs; i++)
347 base[i] = 0;
348 for (i = 0; i < MAX_NUM_DEVS && io[i]; i++) {
349 j = (io[i] -
350 hw[h].io_region) / hw[h].io_delta;
351 if (j >= 0 && j < hw[h].num_devs
352 && hw[h].io_region +
353 j * hw[h].io_delta == io[i]) {
354 base[j] = io[i];
357 } else {
358 /* Default I/O address regions */
359 for (i = 0; i < hw[h].num_devs; i++) {
360 base[i] =
361 hw[h].io_region + i * hw[h].io_delta;
365 /* Check valid I/O address regions */
366 for (i = 0; i < hw[h].num_devs; i++)
367 if (base[i]) {
368 if (!request_region
369 (base[i], hw[h].io_size, "dmascc"))
370 base[i] = 0;
371 else {
372 tcmd[i] =
373 base[i] + hw[h].tmr_offset +
374 TMR_CTRL;
375 t0[i] =
376 base[i] + hw[h].tmr_offset +
377 TMR_CNT0;
378 t1[i] =
379 base[i] + hw[h].tmr_offset +
380 TMR_CNT1;
384 /* Start timers */
385 for (i = 0; i < hw[h].num_devs; i++)
386 if (base[i]) {
387 /* Timer 0: LSB+MSB, Mode 3, TMR_0_HZ */
388 outb(0x36, tcmd[i]);
389 outb((hw[h].tmr_hz / TMR_0_HZ) & 0xFF,
390 t0[i]);
391 outb((hw[h].tmr_hz / TMR_0_HZ) >> 8,
392 t0[i]);
393 /* Timer 1: LSB+MSB, Mode 0, HZ/10 */
394 outb(0x70, tcmd[i]);
395 outb((TMR_0_HZ / HZ * 10) & 0xFF, t1[i]);
396 outb((TMR_0_HZ / HZ * 10) >> 8, t1[i]);
397 start[i] = jiffies;
398 delay[i] = 0;
399 counting[i] = 1;
400 /* Timer 2: LSB+MSB, Mode 0 */
401 outb(0xb0, tcmd[i]);
403 time = jiffies;
404 /* Wait until counter registers are loaded */
405 udelay(2000000 / TMR_0_HZ);
407 /* Timing loop */
408 while (jiffies - time < 13) {
409 for (i = 0; i < hw[h].num_devs; i++)
410 if (base[i] && counting[i]) {
411 /* Read back Timer 1: latch; read LSB; read MSB */
412 outb(0x40, tcmd[i]);
413 t_val =
414 inb(t1[i]) + (inb(t1[i]) << 8);
415 /* Also check whether counter did wrap */
416 if (t_val == 0
417 || t_val > TMR_0_HZ / HZ * 10)
418 counting[i] = 0;
419 delay[i] = jiffies - start[i];
423 /* Evaluate measurements */
424 for (i = 0; i < hw[h].num_devs; i++)
425 if (base[i]) {
426 if ((delay[i] >= 9 && delay[i] <= 11) &&
427 /* Ok, we have found an adapter */
428 (setup_adapter(base[i], h, n) == 0))
429 n++;
430 else
431 release_region(base[i],
432 hw[h].io_size);
435 } /* NUM_TYPES */
437 /* If any adapter was successfully initialized, return ok */
438 if (n)
439 return 0;
441 /* If no adapter found, return error */
442 printk(KERN_INFO "dmascc: no adapters found\n");
443 return -EIO;
446 module_init(dmascc_init);
447 module_exit(dmascc_exit);
449 static void dev_setup(struct net_device *dev)
451 dev->type = ARPHRD_AX25;
452 dev->hard_header_len = 73;
453 dev->mtu = 1500;
454 dev->addr_len = 7;
455 dev->tx_queue_len = 64;
456 memcpy(dev->broadcast, ax25_broadcast, 7);
457 memcpy(dev->dev_addr, ax25_test, 7);
460 static int __init setup_adapter(int card_base, int type, int n)
462 int i, irq, chip;
463 struct scc_info *info;
464 struct net_device *dev;
465 struct scc_priv *priv;
466 unsigned long time;
467 unsigned int irqs;
468 int tmr_base = card_base + hw[type].tmr_offset;
469 int scc_base = card_base + hw[type].scc_offset;
470 char *chipnames[] = CHIPNAMES;
472 /* Allocate memory */
473 info = kmalloc(sizeof(struct scc_info), GFP_KERNEL | GFP_DMA);
474 if (!info) {
475 printk(KERN_ERR "dmascc: "
476 "could not allocate memory for %s at %#3x\n",
477 hw[type].name, card_base);
478 goto out;
481 /* Initialize what is necessary for write_scc and write_scc_data */
482 memset(info, 0, sizeof(struct scc_info));
484 info->dev[0] = alloc_netdev(0, "", dev_setup);
485 if (!info->dev[0]) {
486 printk(KERN_ERR "dmascc: "
487 "could not allocate memory for %s at %#3x\n",
488 hw[type].name, card_base);
489 goto out1;
492 info->dev[1] = alloc_netdev(0, "", dev_setup);
493 if (!info->dev[1]) {
494 printk(KERN_ERR "dmascc: "
495 "could not allocate memory for %s at %#3x\n",
496 hw[type].name, card_base);
497 goto out2;
499 spin_lock_init(&info->register_lock);
501 priv = &info->priv[0];
502 priv->type = type;
503 priv->card_base = card_base;
504 priv->scc_cmd = scc_base + SCCA_CMD;
505 priv->scc_data = scc_base + SCCA_DATA;
506 priv->register_lock = &info->register_lock;
508 /* Reset SCC */
509 write_scc(priv, R9, FHWRES | MIE | NV);
511 /* Determine type of chip by enabling SDLC/HDLC enhancements */
512 write_scc(priv, R15, SHDLCE);
513 if (!read_scc(priv, R15)) {
514 /* WR7' not present. This is an ordinary Z8530 SCC. */
515 chip = Z8530;
516 } else {
517 /* Put one character in TX FIFO */
518 write_scc_data(priv, 0, 0);
519 if (read_scc(priv, R0) & Tx_BUF_EMP) {
520 /* TX FIFO not full. This is a Z85230 ESCC with a 4-byte FIFO. */
521 chip = Z85230;
522 } else {
523 /* TX FIFO full. This is a Z85C30 SCC with a 1-byte FIFO. */
524 chip = Z85C30;
527 write_scc(priv, R15, 0);
529 /* Start IRQ auto-detection */
530 irqs = probe_irq_on();
532 /* Enable interrupts */
533 if (type == TYPE_TWIN) {
534 outb(0, card_base + TWIN_DMA_CFG);
535 inb(card_base + TWIN_CLR_TMR1);
536 inb(card_base + TWIN_CLR_TMR2);
537 info->twin_serial_cfg = TWIN_EI;
538 outb(info->twin_serial_cfg, card_base + TWIN_SERIAL_CFG);
539 } else {
540 write_scc(priv, R15, CTSIE);
541 write_scc(priv, R0, RES_EXT_INT);
542 write_scc(priv, R1, EXT_INT_ENAB);
545 /* Start timer */
546 outb(1, tmr_base + TMR_CNT1);
547 outb(0, tmr_base + TMR_CNT1);
549 /* Wait and detect IRQ */
550 time = jiffies;
551 while (jiffies - time < 2 + HZ / TMR_0_HZ);
552 irq = probe_irq_off(irqs);
554 /* Clear pending interrupt, disable interrupts */
555 if (type == TYPE_TWIN) {
556 inb(card_base + TWIN_CLR_TMR1);
557 } else {
558 write_scc(priv, R1, 0);
559 write_scc(priv, R15, 0);
560 write_scc(priv, R0, RES_EXT_INT);
563 if (irq <= 0) {
564 printk(KERN_ERR
565 "dmascc: could not find irq of %s at %#3x (irq=%d)\n",
566 hw[type].name, card_base, irq);
567 goto out3;
570 /* Set up data structures */
571 for (i = 0; i < 2; i++) {
572 dev = info->dev[i];
573 priv = &info->priv[i];
574 priv->type = type;
575 priv->chip = chip;
576 priv->dev = dev;
577 priv->info = info;
578 priv->channel = i;
579 spin_lock_init(&priv->ring_lock);
580 priv->register_lock = &info->register_lock;
581 priv->card_base = card_base;
582 priv->scc_cmd = scc_base + (i ? SCCB_CMD : SCCA_CMD);
583 priv->scc_data = scc_base + (i ? SCCB_DATA : SCCA_DATA);
584 priv->tmr_cnt = tmr_base + (i ? TMR_CNT2 : TMR_CNT1);
585 priv->tmr_ctrl = tmr_base + TMR_CTRL;
586 priv->tmr_mode = i ? 0xb0 : 0x70;
587 priv->param.pclk_hz = hw[type].pclk_hz;
588 priv->param.brg_tc = -1;
589 priv->param.clocks = TCTRxCP | RCRTxCP;
590 priv->param.persist = 256;
591 priv->param.dma = -1;
592 INIT_WORK(&priv->rx_work, rx_bh, priv);
593 dev->priv = priv;
594 sprintf(dev->name, "dmascc%i", 2 * n + i);
595 SET_MODULE_OWNER(dev);
596 dev->base_addr = card_base;
597 dev->irq = irq;
598 dev->open = scc_open;
599 dev->stop = scc_close;
600 dev->do_ioctl = scc_ioctl;
601 dev->hard_start_xmit = scc_send_packet;
602 dev->get_stats = scc_get_stats;
603 dev->hard_header = ax25_encapsulate;
604 dev->rebuild_header = ax25_rebuild_header;
605 dev->set_mac_address = scc_set_mac_address;
607 if (register_netdev(info->dev[0])) {
608 printk(KERN_ERR "dmascc: could not register %s\n",
609 info->dev[0]->name);
610 goto out3;
612 if (register_netdev(info->dev[1])) {
613 printk(KERN_ERR "dmascc: could not register %s\n",
614 info->dev[1]->name);
615 goto out4;
619 info->next = first;
620 first = info;
621 printk(KERN_INFO "dmascc: found %s (%s) at %#3x, irq %d\n",
622 hw[type].name, chipnames[chip], card_base, irq);
623 return 0;
625 out4:
626 unregister_netdev(info->dev[0]);
627 out3:
628 if (info->priv[0].type == TYPE_TWIN)
629 outb(0, info->dev[0]->base_addr + TWIN_SERIAL_CFG);
630 write_scc(&info->priv[0], R9, FHWRES);
631 free_netdev(info->dev[1]);
632 out2:
633 free_netdev(info->dev[0]);
634 out1:
635 kfree(info);
636 out:
637 return -1;
641 /* Driver functions */
643 static void write_scc(struct scc_priv *priv, int reg, int val)
645 unsigned long flags;
646 switch (priv->type) {
647 case TYPE_S5:
648 if (reg)
649 outb(reg, priv->scc_cmd);
650 outb(val, priv->scc_cmd);
651 return;
652 case TYPE_TWIN:
653 if (reg)
654 outb_p(reg, priv->scc_cmd);
655 outb_p(val, priv->scc_cmd);
656 return;
657 default:
658 spin_lock_irqsave(priv->register_lock, flags);
659 outb_p(0, priv->card_base + PI_DREQ_MASK);
660 if (reg)
661 outb_p(reg, priv->scc_cmd);
662 outb_p(val, priv->scc_cmd);
663 outb(1, priv->card_base + PI_DREQ_MASK);
664 spin_unlock_irqrestore(priv->register_lock, flags);
665 return;
670 static void write_scc_data(struct scc_priv *priv, int val, int fast)
672 unsigned long flags;
673 switch (priv->type) {
674 case TYPE_S5:
675 outb(val, priv->scc_data);
676 return;
677 case TYPE_TWIN:
678 outb_p(val, priv->scc_data);
679 return;
680 default:
681 if (fast)
682 outb_p(val, priv->scc_data);
683 else {
684 spin_lock_irqsave(priv->register_lock, flags);
685 outb_p(0, priv->card_base + PI_DREQ_MASK);
686 outb_p(val, priv->scc_data);
687 outb(1, priv->card_base + PI_DREQ_MASK);
688 spin_unlock_irqrestore(priv->register_lock, flags);
690 return;
695 static int read_scc(struct scc_priv *priv, int reg)
697 int rc;
698 unsigned long flags;
699 switch (priv->type) {
700 case TYPE_S5:
701 if (reg)
702 outb(reg, priv->scc_cmd);
703 return inb(priv->scc_cmd);
704 case TYPE_TWIN:
705 if (reg)
706 outb_p(reg, priv->scc_cmd);
707 return inb_p(priv->scc_cmd);
708 default:
709 spin_lock_irqsave(priv->register_lock, flags);
710 outb_p(0, priv->card_base + PI_DREQ_MASK);
711 if (reg)
712 outb_p(reg, priv->scc_cmd);
713 rc = inb_p(priv->scc_cmd);
714 outb(1, priv->card_base + PI_DREQ_MASK);
715 spin_unlock_irqrestore(priv->register_lock, flags);
716 return rc;
721 static int read_scc_data(struct scc_priv *priv)
723 int rc;
724 unsigned long flags;
725 switch (priv->type) {
726 case TYPE_S5:
727 return inb(priv->scc_data);
728 case TYPE_TWIN:
729 return inb_p(priv->scc_data);
730 default:
731 spin_lock_irqsave(priv->register_lock, flags);
732 outb_p(0, priv->card_base + PI_DREQ_MASK);
733 rc = inb_p(priv->scc_data);
734 outb(1, priv->card_base + PI_DREQ_MASK);
735 spin_unlock_irqrestore(priv->register_lock, flags);
736 return rc;
741 static int scc_open(struct net_device *dev)
743 struct scc_priv *priv = dev->priv;
744 struct scc_info *info = priv->info;
745 int card_base = priv->card_base;
747 /* Request IRQ if not already used by other channel */
748 if (!info->irq_used) {
749 if (request_irq(dev->irq, scc_isr, 0, "dmascc", info)) {
750 return -EAGAIN;
753 info->irq_used++;
755 /* Request DMA if required */
756 if (priv->param.dma >= 0) {
757 if (request_dma(priv->param.dma, "dmascc")) {
758 if (--info->irq_used == 0)
759 free_irq(dev->irq, info);
760 return -EAGAIN;
761 } else {
762 unsigned long flags = claim_dma_lock();
763 clear_dma_ff(priv->param.dma);
764 release_dma_lock(flags);
768 /* Initialize local variables */
769 priv->rx_ptr = 0;
770 priv->rx_over = 0;
771 priv->rx_head = priv->rx_tail = priv->rx_count = 0;
772 priv->state = IDLE;
773 priv->tx_head = priv->tx_tail = priv->tx_count = 0;
774 priv->tx_ptr = 0;
776 /* Reset channel */
777 write_scc(priv, R9, (priv->channel ? CHRB : CHRA) | MIE | NV);
778 /* X1 clock, SDLC mode */
779 write_scc(priv, R4, SDLC | X1CLK);
780 /* DMA */
781 write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
782 /* 8 bit RX char, RX disable */
783 write_scc(priv, R3, Rx8);
784 /* 8 bit TX char, TX disable */
785 write_scc(priv, R5, Tx8);
786 /* SDLC address field */
787 write_scc(priv, R6, 0);
788 /* SDLC flag */
789 write_scc(priv, R7, FLAG);
790 switch (priv->chip) {
791 case Z85C30:
792 /* Select WR7' */
793 write_scc(priv, R15, SHDLCE);
794 /* Auto EOM reset */
795 write_scc(priv, R7, AUTOEOM);
796 write_scc(priv, R15, 0);
797 break;
798 case Z85230:
799 /* Select WR7' */
800 write_scc(priv, R15, SHDLCE);
801 /* The following bits are set (see 2.5.2.1):
802 - Automatic EOM reset
803 - Interrupt request if RX FIFO is half full
804 This bit should be ignored in DMA mode (according to the
805 documentation), but actually isn't. The receiver doesn't work if
806 it is set. Thus, we have to clear it in DMA mode.
807 - Interrupt/DMA request if TX FIFO is completely empty
808 a) If set, the ESCC behaves as if it had no TX FIFO (Z85C30
809 compatibility).
810 b) If cleared, DMA requests may follow each other very quickly,
811 filling up the TX FIFO.
812 Advantage: TX works even in case of high bus latency.
813 Disadvantage: Edge-triggered DMA request circuitry may miss
814 a request. No more data is delivered, resulting
815 in a TX FIFO underrun.
816 Both PI2 and S5SCC/DMA seem to work fine with TXFIFOE cleared.
817 The PackeTwin doesn't. I don't know about the PI, but let's
818 assume it behaves like the PI2.
820 if (priv->param.dma >= 0) {
821 if (priv->type == TYPE_TWIN)
822 write_scc(priv, R7, AUTOEOM | TXFIFOE);
823 else
824 write_scc(priv, R7, AUTOEOM);
825 } else {
826 write_scc(priv, R7, AUTOEOM | RXFIFOH);
828 write_scc(priv, R15, 0);
829 break;
831 /* Preset CRC, NRZ(I) encoding */
832 write_scc(priv, R10, CRCPS | (priv->param.nrzi ? NRZI : NRZ));
834 /* Configure baud rate generator */
835 if (priv->param.brg_tc >= 0) {
836 /* Program BR generator */
837 write_scc(priv, R12, priv->param.brg_tc & 0xFF);
838 write_scc(priv, R13, (priv->param.brg_tc >> 8) & 0xFF);
839 /* BRG source = SYS CLK; enable BRG; DTR REQ function (required by
840 PackeTwin, not connected on the PI2); set DPLL source to BRG */
841 write_scc(priv, R14, SSBR | DTRREQ | BRSRC | BRENABL);
842 /* Enable DPLL */
843 write_scc(priv, R14, SEARCH | DTRREQ | BRSRC | BRENABL);
844 } else {
845 /* Disable BR generator */
846 write_scc(priv, R14, DTRREQ | BRSRC);
849 /* Configure clocks */
850 if (priv->type == TYPE_TWIN) {
851 /* Disable external TX clock receiver */
852 outb((info->twin_serial_cfg &=
853 ~(priv->channel ? TWIN_EXTCLKB : TWIN_EXTCLKA)),
854 card_base + TWIN_SERIAL_CFG);
856 write_scc(priv, R11, priv->param.clocks);
857 if ((priv->type == TYPE_TWIN) && !(priv->param.clocks & TRxCOI)) {
858 /* Enable external TX clock receiver */
859 outb((info->twin_serial_cfg |=
860 (priv->channel ? TWIN_EXTCLKB : TWIN_EXTCLKA)),
861 card_base + TWIN_SERIAL_CFG);
864 /* Configure PackeTwin */
865 if (priv->type == TYPE_TWIN) {
866 /* Assert DTR, enable interrupts */
867 outb((info->twin_serial_cfg |= TWIN_EI |
868 (priv->channel ? TWIN_DTRB_ON : TWIN_DTRA_ON)),
869 card_base + TWIN_SERIAL_CFG);
872 /* Read current status */
873 priv->rr0 = read_scc(priv, R0);
874 /* Enable DCD interrupt */
875 write_scc(priv, R15, DCDIE);
877 netif_start_queue(dev);
879 return 0;
883 static int scc_close(struct net_device *dev)
885 struct scc_priv *priv = dev->priv;
886 struct scc_info *info = priv->info;
887 int card_base = priv->card_base;
889 netif_stop_queue(dev);
891 if (priv->type == TYPE_TWIN) {
892 /* Drop DTR */
893 outb((info->twin_serial_cfg &=
894 (priv->channel ? ~TWIN_DTRB_ON : ~TWIN_DTRA_ON)),
895 card_base + TWIN_SERIAL_CFG);
898 /* Reset channel, free DMA and IRQ */
899 write_scc(priv, R9, (priv->channel ? CHRB : CHRA) | MIE | NV);
900 if (priv->param.dma >= 0) {
901 if (priv->type == TYPE_TWIN)
902 outb(0, card_base + TWIN_DMA_CFG);
903 free_dma(priv->param.dma);
905 if (--info->irq_used == 0)
906 free_irq(dev->irq, info);
908 return 0;
912 static int scc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
914 struct scc_priv *priv = dev->priv;
916 switch (cmd) {
917 case SIOCGSCCPARAM:
918 if (copy_to_user
919 (ifr->ifr_data, &priv->param,
920 sizeof(struct scc_param)))
921 return -EFAULT;
922 return 0;
923 case SIOCSSCCPARAM:
924 if (!capable(CAP_NET_ADMIN))
925 return -EPERM;
926 if (netif_running(dev))
927 return -EAGAIN;
928 if (copy_from_user
929 (&priv->param, ifr->ifr_data,
930 sizeof(struct scc_param)))
931 return -EFAULT;
932 return 0;
933 default:
934 return -EINVAL;
939 static int scc_send_packet(struct sk_buff *skb, struct net_device *dev)
941 struct scc_priv *priv = dev->priv;
942 unsigned long flags;
943 int i;
945 /* Temporarily stop the scheduler feeding us packets */
946 netif_stop_queue(dev);
948 /* Transfer data to DMA buffer */
949 i = priv->tx_head;
950 memcpy(priv->tx_buf[i], skb->data + 1, skb->len - 1);
951 priv->tx_len[i] = skb->len - 1;
953 /* Clear interrupts while we touch our circular buffers */
955 spin_lock_irqsave(&priv->ring_lock, flags);
956 /* Move the ring buffer's head */
957 priv->tx_head = (i + 1) % NUM_TX_BUF;
958 priv->tx_count++;
960 /* If we just filled up the last buffer, leave queue stopped.
961 The higher layers must wait until we have a DMA buffer
962 to accept the data. */
963 if (priv->tx_count < NUM_TX_BUF)
964 netif_wake_queue(dev);
966 /* Set new TX state */
967 if (priv->state == IDLE) {
968 /* Assert RTS, start timer */
969 priv->state = TX_HEAD;
970 priv->tx_start = jiffies;
971 write_scc(priv, R5, TxCRC_ENAB | RTS | TxENAB | Tx8);
972 write_scc(priv, R15, 0);
973 start_timer(priv, priv->param.txdelay, 0);
976 /* Turn interrupts back on and free buffer */
977 spin_unlock_irqrestore(&priv->ring_lock, flags);
978 dev_kfree_skb(skb);
980 return 0;
984 static struct net_device_stats *scc_get_stats(struct net_device *dev)
986 struct scc_priv *priv = dev->priv;
988 return &priv->stats;
992 static int scc_set_mac_address(struct net_device *dev, void *sa)
994 memcpy(dev->dev_addr, ((struct sockaddr *) sa)->sa_data,
995 dev->addr_len);
996 return 0;
1000 static inline void tx_on(struct scc_priv *priv)
1002 int i, n;
1003 unsigned long flags;
1005 if (priv->param.dma >= 0) {
1006 n = (priv->chip == Z85230) ? 3 : 1;
1007 /* Program DMA controller */
1008 flags = claim_dma_lock();
1009 set_dma_mode(priv->param.dma, DMA_MODE_WRITE);
1010 set_dma_addr(priv->param.dma,
1011 (int) priv->tx_buf[priv->tx_tail] + n);
1012 set_dma_count(priv->param.dma,
1013 priv->tx_len[priv->tx_tail] - n);
1014 release_dma_lock(flags);
1015 /* Enable TX underrun interrupt */
1016 write_scc(priv, R15, TxUIE);
1017 /* Configure DREQ */
1018 if (priv->type == TYPE_TWIN)
1019 outb((priv->param.dma ==
1020 1) ? TWIN_DMA_HDX_T1 : TWIN_DMA_HDX_T3,
1021 priv->card_base + TWIN_DMA_CFG);
1022 else
1023 write_scc(priv, R1,
1024 EXT_INT_ENAB | WT_FN_RDYFN |
1025 WT_RDY_ENAB);
1026 /* Write first byte(s) */
1027 spin_lock_irqsave(priv->register_lock, flags);
1028 for (i = 0; i < n; i++)
1029 write_scc_data(priv,
1030 priv->tx_buf[priv->tx_tail][i], 1);
1031 enable_dma(priv->param.dma);
1032 spin_unlock_irqrestore(priv->register_lock, flags);
1033 } else {
1034 write_scc(priv, R15, TxUIE);
1035 write_scc(priv, R1,
1036 EXT_INT_ENAB | WT_FN_RDYFN | TxINT_ENAB);
1037 tx_isr(priv);
1039 /* Reset EOM latch if we do not have the AUTOEOM feature */
1040 if (priv->chip == Z8530)
1041 write_scc(priv, R0, RES_EOM_L);
1045 static inline void rx_on(struct scc_priv *priv)
1047 unsigned long flags;
1049 /* Clear RX FIFO */
1050 while (read_scc(priv, R0) & Rx_CH_AV)
1051 read_scc_data(priv);
1052 priv->rx_over = 0;
1053 if (priv->param.dma >= 0) {
1054 /* Program DMA controller */
1055 flags = claim_dma_lock();
1056 set_dma_mode(priv->param.dma, DMA_MODE_READ);
1057 set_dma_addr(priv->param.dma,
1058 (int) priv->rx_buf[priv->rx_head]);
1059 set_dma_count(priv->param.dma, BUF_SIZE);
1060 release_dma_lock(flags);
1061 enable_dma(priv->param.dma);
1062 /* Configure PackeTwin DMA */
1063 if (priv->type == TYPE_TWIN) {
1064 outb((priv->param.dma ==
1065 1) ? TWIN_DMA_HDX_R1 : TWIN_DMA_HDX_R3,
1066 priv->card_base + TWIN_DMA_CFG);
1068 /* Sp. cond. intr. only, ext int enable, RX DMA enable */
1069 write_scc(priv, R1, EXT_INT_ENAB | INT_ERR_Rx |
1070 WT_RDY_RT | WT_FN_RDYFN | WT_RDY_ENAB);
1071 } else {
1072 /* Reset current frame */
1073 priv->rx_ptr = 0;
1074 /* Intr. on all Rx characters and Sp. cond., ext int enable */
1075 write_scc(priv, R1, EXT_INT_ENAB | INT_ALL_Rx | WT_RDY_RT |
1076 WT_FN_RDYFN);
1078 write_scc(priv, R0, ERR_RES);
1079 write_scc(priv, R3, RxENABLE | Rx8 | RxCRC_ENAB);
1083 static inline void rx_off(struct scc_priv *priv)
1085 /* Disable receiver */
1086 write_scc(priv, R3, Rx8);
1087 /* Disable DREQ / RX interrupt */
1088 if (priv->param.dma >= 0 && priv->type == TYPE_TWIN)
1089 outb(0, priv->card_base + TWIN_DMA_CFG);
1090 else
1091 write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
1092 /* Disable DMA */
1093 if (priv->param.dma >= 0)
1094 disable_dma(priv->param.dma);
1098 static void start_timer(struct scc_priv *priv, int t, int r15)
1100 unsigned long flags;
1102 outb(priv->tmr_mode, priv->tmr_ctrl);
1103 if (t == 0) {
1104 tm_isr(priv);
1105 } else if (t > 0) {
1106 save_flags(flags);
1107 cli();
1108 outb(t & 0xFF, priv->tmr_cnt);
1109 outb((t >> 8) & 0xFF, priv->tmr_cnt);
1110 if (priv->type != TYPE_TWIN) {
1111 write_scc(priv, R15, r15 | CTSIE);
1112 priv->rr0 |= CTS;
1114 restore_flags(flags);
1119 static inline unsigned char random(void)
1121 /* See "Numerical Recipes in C", second edition, p. 284 */
1122 rand = rand * 1664525L + 1013904223L;
1123 return (unsigned char) (rand >> 24);
1126 static inline void z8530_isr(struct scc_info *info)
1128 int is, i = 100;
1130 while ((is = read_scc(&info->priv[0], R3)) && i--) {
1131 if (is & CHARxIP) {
1132 rx_isr(&info->priv[0]);
1133 } else if (is & CHATxIP) {
1134 tx_isr(&info->priv[0]);
1135 } else if (is & CHAEXT) {
1136 es_isr(&info->priv[0]);
1137 } else if (is & CHBRxIP) {
1138 rx_isr(&info->priv[1]);
1139 } else if (is & CHBTxIP) {
1140 tx_isr(&info->priv[1]);
1141 } else {
1142 es_isr(&info->priv[1]);
1144 write_scc(&info->priv[0], R0, RES_H_IUS);
1145 i++;
1147 if (i < 0) {
1148 printk(KERN_ERR "dmascc: stuck in ISR with RR3=0x%02x.\n",
1149 is);
1151 /* Ok, no interrupts pending from this 8530. The INT line should
1152 be inactive now. */
1156 static irqreturn_t scc_isr(int irq, void *dev_id, struct pt_regs *regs)
1158 struct scc_info *info = dev_id;
1160 spin_lock(info->priv[0].register_lock);
1161 /* At this point interrupts are enabled, and the interrupt under service
1162 is already acknowledged, but masked off.
1164 Interrupt processing: We loop until we know that the IRQ line is
1165 low. If another positive edge occurs afterwards during the ISR,
1166 another interrupt will be triggered by the interrupt controller
1167 as soon as the IRQ level is enabled again (see asm/irq.h).
1169 Bottom-half handlers will be processed after scc_isr(). This is
1170 important, since we only have small ringbuffers and want new data
1171 to be fetched/delivered immediately. */
1173 if (info->priv[0].type == TYPE_TWIN) {
1174 int is, card_base = info->priv[0].card_base;
1175 while ((is = ~inb(card_base + TWIN_INT_REG)) &
1176 TWIN_INT_MSK) {
1177 if (is & TWIN_SCC_MSK) {
1178 z8530_isr(info);
1179 } else if (is & TWIN_TMR1_MSK) {
1180 inb(card_base + TWIN_CLR_TMR1);
1181 tm_isr(&info->priv[0]);
1182 } else {
1183 inb(card_base + TWIN_CLR_TMR2);
1184 tm_isr(&info->priv[1]);
1187 } else
1188 z8530_isr(info);
1189 spin_unlock(info->priv[0].register_lock);
1190 return IRQ_HANDLED;
1194 static void rx_isr(struct scc_priv *priv)
1196 if (priv->param.dma >= 0) {
1197 /* Check special condition and perform error reset. See 2.4.7.5. */
1198 special_condition(priv, read_scc(priv, R1));
1199 write_scc(priv, R0, ERR_RES);
1200 } else {
1201 /* Check special condition for each character. Error reset not necessary.
1202 Same algorithm for SCC and ESCC. See 2.4.7.1 and 2.4.7.4. */
1203 int rc;
1204 while (read_scc(priv, R0) & Rx_CH_AV) {
1205 rc = read_scc(priv, R1);
1206 if (priv->rx_ptr < BUF_SIZE)
1207 priv->rx_buf[priv->rx_head][priv->
1208 rx_ptr++] =
1209 read_scc_data(priv);
1210 else {
1211 priv->rx_over = 2;
1212 read_scc_data(priv);
1214 special_condition(priv, rc);
1220 static void special_condition(struct scc_priv *priv, int rc)
1222 int cb;
1223 unsigned long flags;
1225 /* See Figure 2-15. Only overrun and EOF need to be checked. */
1227 if (rc & Rx_OVR) {
1228 /* Receiver overrun */
1229 priv->rx_over = 1;
1230 if (priv->param.dma < 0)
1231 write_scc(priv, R0, ERR_RES);
1232 } else if (rc & END_FR) {
1233 /* End of frame. Get byte count */
1234 if (priv->param.dma >= 0) {
1235 flags = claim_dma_lock();
1236 cb = BUF_SIZE - get_dma_residue(priv->param.dma) -
1238 release_dma_lock(flags);
1239 } else {
1240 cb = priv->rx_ptr - 2;
1242 if (priv->rx_over) {
1243 /* We had an overrun */
1244 priv->stats.rx_errors++;
1245 if (priv->rx_over == 2)
1246 priv->stats.rx_length_errors++;
1247 else
1248 priv->stats.rx_fifo_errors++;
1249 priv->rx_over = 0;
1250 } else if (rc & CRC_ERR) {
1251 /* Count invalid CRC only if packet length >= minimum */
1252 if (cb >= 15) {
1253 priv->stats.rx_errors++;
1254 priv->stats.rx_crc_errors++;
1256 } else {
1257 if (cb >= 15) {
1258 if (priv->rx_count < NUM_RX_BUF - 1) {
1259 /* Put good frame in FIFO */
1260 priv->rx_len[priv->rx_head] = cb;
1261 priv->rx_head =
1262 (priv->rx_head +
1263 1) % NUM_RX_BUF;
1264 priv->rx_count++;
1265 schedule_work(&priv->rx_work);
1266 } else {
1267 priv->stats.rx_errors++;
1268 priv->stats.rx_over_errors++;
1272 /* Get ready for new frame */
1273 if (priv->param.dma >= 0) {
1274 flags = claim_dma_lock();
1275 set_dma_addr(priv->param.dma,
1276 (int) priv->rx_buf[priv->rx_head]);
1277 set_dma_count(priv->param.dma, BUF_SIZE);
1278 release_dma_lock(flags);
1279 } else {
1280 priv->rx_ptr = 0;
1286 static void rx_bh(void *arg)
1288 struct scc_priv *priv = arg;
1289 int i = priv->rx_tail;
1290 int cb;
1291 unsigned long flags;
1292 struct sk_buff *skb;
1293 unsigned char *data;
1295 spin_lock_irqsave(&priv->ring_lock, flags);
1296 while (priv->rx_count) {
1297 spin_unlock_irqrestore(&priv->ring_lock, flags);
1298 cb = priv->rx_len[i];
1299 /* Allocate buffer */
1300 skb = dev_alloc_skb(cb + 1);
1301 if (skb == NULL) {
1302 /* Drop packet */
1303 priv->stats.rx_dropped++;
1304 } else {
1305 /* Fill buffer */
1306 data = skb_put(skb, cb + 1);
1307 data[0] = 0;
1308 memcpy(&data[1], priv->rx_buf[i], cb);
1309 skb->dev = priv->dev;
1310 skb->protocol = ntohs(ETH_P_AX25);
1311 skb->mac.raw = skb->data;
1312 netif_rx(skb);
1313 priv->dev->last_rx = jiffies;
1314 priv->stats.rx_packets++;
1315 priv->stats.rx_bytes += cb;
1317 spin_lock_irqsave(&priv->ring_lock, flags);
1318 /* Move tail */
1319 priv->rx_tail = i = (i + 1) % NUM_RX_BUF;
1320 priv->rx_count--;
1322 spin_unlock_irqrestore(&priv->ring_lock, flags);
1326 static void tx_isr(struct scc_priv *priv)
1328 int i = priv->tx_tail, p = priv->tx_ptr;
1330 /* Suspend TX interrupts if we don't want to send anything.
1331 See Figure 2-22. */
1332 if (p == priv->tx_len[i]) {
1333 write_scc(priv, R0, RES_Tx_P);
1334 return;
1337 /* Write characters */
1338 while ((read_scc(priv, R0) & Tx_BUF_EMP) && p < priv->tx_len[i]) {
1339 write_scc_data(priv, priv->tx_buf[i][p++], 0);
1342 /* Reset EOM latch of Z8530 */
1343 if (!priv->tx_ptr && p && priv->chip == Z8530)
1344 write_scc(priv, R0, RES_EOM_L);
1346 priv->tx_ptr = p;
1350 static void es_isr(struct scc_priv *priv)
1352 int i, rr0, drr0, res;
1353 unsigned long flags;
1355 /* Read status, reset interrupt bit (open latches) */
1356 rr0 = read_scc(priv, R0);
1357 write_scc(priv, R0, RES_EXT_INT);
1358 drr0 = priv->rr0 ^ rr0;
1359 priv->rr0 = rr0;
1361 /* Transmit underrun (2.4.9.6). We can't check the TxEOM flag, since
1362 it might have already been cleared again by AUTOEOM. */
1363 if (priv->state == TX_DATA) {
1364 /* Get remaining bytes */
1365 i = priv->tx_tail;
1366 if (priv->param.dma >= 0) {
1367 disable_dma(priv->param.dma);
1368 flags = claim_dma_lock();
1369 res = get_dma_residue(priv->param.dma);
1370 release_dma_lock(flags);
1371 } else {
1372 res = priv->tx_len[i] - priv->tx_ptr;
1373 priv->tx_ptr = 0;
1375 /* Disable DREQ / TX interrupt */
1376 if (priv->param.dma >= 0 && priv->type == TYPE_TWIN)
1377 outb(0, priv->card_base + TWIN_DMA_CFG);
1378 else
1379 write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
1380 if (res) {
1381 /* Update packet statistics */
1382 priv->stats.tx_errors++;
1383 priv->stats.tx_fifo_errors++;
1384 /* Other underrun interrupts may already be waiting */
1385 write_scc(priv, R0, RES_EXT_INT);
1386 write_scc(priv, R0, RES_EXT_INT);
1387 } else {
1388 /* Update packet statistics */
1389 priv->stats.tx_packets++;
1390 priv->stats.tx_bytes += priv->tx_len[i];
1391 /* Remove frame from FIFO */
1392 priv->tx_tail = (i + 1) % NUM_TX_BUF;
1393 priv->tx_count--;
1394 /* Inform upper layers */
1395 netif_wake_queue(priv->dev);
1397 /* Switch state */
1398 write_scc(priv, R15, 0);
1399 if (priv->tx_count &&
1400 (jiffies - priv->tx_start) < priv->param.txtimeout) {
1401 priv->state = TX_PAUSE;
1402 start_timer(priv, priv->param.txpause, 0);
1403 } else {
1404 priv->state = TX_TAIL;
1405 start_timer(priv, priv->param.txtail, 0);
1409 /* DCD transition */
1410 if (drr0 & DCD) {
1411 if (rr0 & DCD) {
1412 switch (priv->state) {
1413 case IDLE:
1414 case WAIT:
1415 priv->state = DCD_ON;
1416 write_scc(priv, R15, 0);
1417 start_timer(priv, priv->param.dcdon, 0);
1419 } else {
1420 switch (priv->state) {
1421 case RX_ON:
1422 rx_off(priv);
1423 priv->state = DCD_OFF;
1424 write_scc(priv, R15, 0);
1425 start_timer(priv, priv->param.dcdoff, 0);
1430 /* CTS transition */
1431 if ((drr0 & CTS) && (~rr0 & CTS) && priv->type != TYPE_TWIN)
1432 tm_isr(priv);
1437 static void tm_isr(struct scc_priv *priv)
1439 switch (priv->state) {
1440 case TX_HEAD:
1441 case TX_PAUSE:
1442 tx_on(priv);
1443 priv->state = TX_DATA;
1444 break;
1445 case TX_TAIL:
1446 write_scc(priv, R5, TxCRC_ENAB | Tx8);
1447 priv->state = RTS_OFF;
1448 if (priv->type != TYPE_TWIN)
1449 write_scc(priv, R15, 0);
1450 start_timer(priv, priv->param.rtsoff, 0);
1451 break;
1452 case RTS_OFF:
1453 write_scc(priv, R15, DCDIE);
1454 priv->rr0 = read_scc(priv, R0);
1455 if (priv->rr0 & DCD) {
1456 priv->stats.collisions++;
1457 rx_on(priv);
1458 priv->state = RX_ON;
1459 } else {
1460 priv->state = WAIT;
1461 start_timer(priv, priv->param.waittime, DCDIE);
1463 break;
1464 case WAIT:
1465 if (priv->tx_count) {
1466 priv->state = TX_HEAD;
1467 priv->tx_start = jiffies;
1468 write_scc(priv, R5,
1469 TxCRC_ENAB | RTS | TxENAB | Tx8);
1470 write_scc(priv, R15, 0);
1471 start_timer(priv, priv->param.txdelay, 0);
1472 } else {
1473 priv->state = IDLE;
1474 if (priv->type != TYPE_TWIN)
1475 write_scc(priv, R15, DCDIE);
1477 break;
1478 case DCD_ON:
1479 case DCD_OFF:
1480 write_scc(priv, R15, DCDIE);
1481 priv->rr0 = read_scc(priv, R0);
1482 if (priv->rr0 & DCD) {
1483 rx_on(priv);
1484 priv->state = RX_ON;
1485 } else {
1486 priv->state = WAIT;
1487 start_timer(priv,
1488 random() / priv->param.persist *
1489 priv->param.slottime, DCDIE);
1491 break;