initial commit with v2.6.9
[linux-2.6.9-moxart.git] / drivers / net / hamradio / dmascc.c
blob4beb7ac2a8618a954b580fc0276b4db0f0505665
1 /*
2 * $Id: dmascc.c,v 1.27 2000/06/01 14:46:23 oe1kib Exp $
4 * Driver for high-speed SCC boards (those with DMA support)
5 * Copyright (C) 1997-2000 Klaus Kudielka
7 * S5SCC/DMA support by Janko Koleznik S52HI
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/module.h>
27 #include <linux/delay.h>
28 #include <linux/errno.h>
29 #include <linux/if_arp.h>
30 #include <linux/in.h>
31 #include <linux/init.h>
32 #include <linux/interrupt.h>
33 #include <linux/ioport.h>
34 #include <linux/kernel.h>
35 #include <linux/mm.h>
36 #include <linux/netdevice.h>
37 #include <linux/rtnetlink.h>
38 #include <linux/sockios.h>
39 #include <linux/workqueue.h>
40 #include <linux/version.h>
41 #include <asm/atomic.h>
42 #include <asm/bitops.h>
43 #include <asm/dma.h>
44 #include <asm/io.h>
45 #include <asm/irq.h>
46 #include <asm/uaccess.h>
47 #include <net/ax25.h>
48 #include "z8530.h"
51 /* Number of buffers per channel */
53 #define NUM_TX_BUF 2 /* NUM_TX_BUF >= 1 (min. 2 recommended) */
54 #define NUM_RX_BUF 6 /* NUM_RX_BUF >= 1 (min. 2 recommended) */
55 #define BUF_SIZE 1576 /* BUF_SIZE >= mtu + hard_header_len */
58 /* Cards supported */
60 #define HW_PI { "Ottawa PI", 0x300, 0x20, 0x10, 8, \
61 0, 8, 1843200, 3686400 }
62 #define HW_PI2 { "Ottawa PI2", 0x300, 0x20, 0x10, 8, \
63 0, 8, 3686400, 7372800 }
64 #define HW_TWIN { "Gracilis PackeTwin", 0x200, 0x10, 0x10, 32, \
65 0, 4, 6144000, 6144000 }
66 #define HW_S5 { "S5SCC/DMA", 0x200, 0x10, 0x10, 32, \
67 0, 8, 4915200, 9830400 }
69 #define HARDWARE { HW_PI, HW_PI2, HW_TWIN, HW_S5 }
71 #define TMR_0_HZ 25600 /* Frequency of timer 0 */
73 #define TYPE_PI 0
74 #define TYPE_PI2 1
75 #define TYPE_TWIN 2
76 #define TYPE_S5 3
77 #define NUM_TYPES 4
79 #define MAX_NUM_DEVS 32
82 /* SCC chips supported */
84 #define Z8530 0
85 #define Z85C30 1
86 #define Z85230 2
88 #define CHIPNAMES { "Z8530", "Z85C30", "Z85230" }
91 /* I/O registers */
93 /* 8530 registers relative to card base */
94 #define SCCB_CMD 0x00
95 #define SCCB_DATA 0x01
96 #define SCCA_CMD 0x02
97 #define SCCA_DATA 0x03
99 /* 8253/8254 registers relative to card base */
100 #define TMR_CNT0 0x00
101 #define TMR_CNT1 0x01
102 #define TMR_CNT2 0x02
103 #define TMR_CTRL 0x03
105 /* Additional PI/PI2 registers relative to card base */
106 #define PI_DREQ_MASK 0x04
108 /* Additional PackeTwin registers relative to card base */
109 #define TWIN_INT_REG 0x08
110 #define TWIN_CLR_TMR1 0x09
111 #define TWIN_CLR_TMR2 0x0a
112 #define TWIN_SPARE_1 0x0b
113 #define TWIN_DMA_CFG 0x08
114 #define TWIN_SERIAL_CFG 0x09
115 #define TWIN_DMA_CLR_FF 0x0a
116 #define TWIN_SPARE_2 0x0b
119 /* PackeTwin I/O register values */
121 /* INT_REG */
122 #define TWIN_SCC_MSK 0x01
123 #define TWIN_TMR1_MSK 0x02
124 #define TWIN_TMR2_MSK 0x04
125 #define TWIN_INT_MSK 0x07
127 /* SERIAL_CFG */
128 #define TWIN_DTRA_ON 0x01
129 #define TWIN_DTRB_ON 0x02
130 #define TWIN_EXTCLKA 0x04
131 #define TWIN_EXTCLKB 0x08
132 #define TWIN_LOOPA_ON 0x10
133 #define TWIN_LOOPB_ON 0x20
134 #define TWIN_EI 0x80
136 /* DMA_CFG */
137 #define TWIN_DMA_HDX_T1 0x08
138 #define TWIN_DMA_HDX_R1 0x0a
139 #define TWIN_DMA_HDX_T3 0x14
140 #define TWIN_DMA_HDX_R3 0x16
141 #define TWIN_DMA_FDX_T3R1 0x1b
142 #define TWIN_DMA_FDX_T1R3 0x1d
145 /* Status values */
147 #define IDLE 0
148 #define TX_HEAD 1
149 #define TX_DATA 2
150 #define TX_PAUSE 3
151 #define TX_TAIL 4
152 #define RTS_OFF 5
153 #define WAIT 6
154 #define DCD_ON 7
155 #define RX_ON 8
156 #define DCD_OFF 9
159 /* Ioctls */
161 #define SIOCGSCCPARAM SIOCDEVPRIVATE
162 #define SIOCSSCCPARAM (SIOCDEVPRIVATE+1)
165 /* Data types */
167 struct scc_param {
168 int pclk_hz; /* frequency of BRG input (don't change) */
169 int brg_tc; /* BRG terminal count; BRG disabled if < 0 */
170 int nrzi; /* 0 (nrz), 1 (nrzi) */
171 int clocks; /* see dmascc_cfg documentation */
172 int txdelay; /* [1/TMR_0_HZ] */
173 int txtimeout; /* [1/HZ] */
174 int txtail; /* [1/TMR_0_HZ] */
175 int waittime; /* [1/TMR_0_HZ] */
176 int slottime; /* [1/TMR_0_HZ] */
177 int persist; /* 1 ... 256 */
178 int dma; /* -1 (disable), 0, 1, 3 */
179 int txpause; /* [1/TMR_0_HZ] */
180 int rtsoff; /* [1/TMR_0_HZ] */
181 int dcdon; /* [1/TMR_0_HZ] */
182 int dcdoff; /* [1/TMR_0_HZ] */
185 struct scc_hardware {
186 char *name;
187 int io_region;
188 int io_delta;
189 int io_size;
190 int num_devs;
191 int scc_offset;
192 int tmr_offset;
193 int tmr_hz;
194 int pclk_hz;
197 struct scc_priv {
198 int type;
199 int chip;
200 struct net_device *dev;
201 struct scc_info *info;
202 struct net_device_stats stats;
203 int channel;
204 int card_base, scc_cmd, scc_data;
205 int tmr_cnt, tmr_ctrl, tmr_mode;
206 struct scc_param param;
207 char rx_buf[NUM_RX_BUF][BUF_SIZE];
208 int rx_len[NUM_RX_BUF];
209 int rx_ptr;
210 struct work_struct rx_work;
211 int rx_head, rx_tail, rx_count;
212 int rx_over;
213 char tx_buf[NUM_TX_BUF][BUF_SIZE];
214 int tx_len[NUM_TX_BUF];
215 int tx_ptr;
216 int tx_head, tx_tail, tx_count;
217 int state;
218 unsigned long tx_start;
219 int rr0;
220 spinlock_t *register_lock; /* Per scc_info */
221 spinlock_t ring_lock;
224 struct scc_info {
225 int irq_used;
226 int twin_serial_cfg;
227 struct net_device *dev[2];
228 struct scc_priv priv[2];
229 struct scc_info *next;
230 spinlock_t register_lock; /* Per device register lock */
234 /* Function declarations */
235 static int setup_adapter(int card_base, int type, int n) __init;
237 static void write_scc(struct scc_priv *priv, int reg, int val);
238 static void write_scc_data(struct scc_priv *priv, int val, int fast);
239 static int read_scc(struct scc_priv *priv, int reg);
240 static int read_scc_data(struct scc_priv *priv);
242 static int scc_open(struct net_device *dev);
243 static int scc_close(struct net_device *dev);
244 static int scc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
245 static int scc_send_packet(struct sk_buff *skb, struct net_device *dev);
246 static struct net_device_stats *scc_get_stats(struct net_device *dev);
247 static int scc_set_mac_address(struct net_device *dev, void *sa);
249 static inline void tx_on(struct scc_priv *priv);
250 static inline void rx_on(struct scc_priv *priv);
251 static inline void rx_off(struct scc_priv *priv);
252 static void start_timer(struct scc_priv *priv, int t, int r15);
253 static inline unsigned char random(void);
255 static inline void z8530_isr(struct scc_info *info);
256 static irqreturn_t scc_isr(int irq, void *dev_id, struct pt_regs * regs);
257 static void rx_isr(struct scc_priv *priv);
258 static void special_condition(struct scc_priv *priv, int rc);
259 static void rx_bh(void *arg);
260 static void tx_isr(struct scc_priv *priv);
261 static void es_isr(struct scc_priv *priv);
262 static void tm_isr(struct scc_priv *priv);
265 /* Initialization variables */
267 static int io[MAX_NUM_DEVS] __initdata = { 0, };
268 /* Beware! hw[] is also used in cleanup_module(). */
269 static struct scc_hardware hw[NUM_TYPES] __initdata_or_module = HARDWARE;
270 static char ax25_broadcast[7] __initdata =
271 { 'Q'<<1, 'S'<<1, 'T'<<1, ' '<<1, ' '<<1, ' '<<1, '0'<<1 };
272 static char ax25_test[7] __initdata =
273 { 'L'<<1, 'I'<<1, 'N'<<1, 'U'<<1, 'X'<<1, ' '<<1, '1'<<1 };
276 /* Global variables */
278 static struct scc_info *first;
279 static unsigned long rand;
282 MODULE_AUTHOR("Klaus Kudielka");
283 MODULE_DESCRIPTION("Driver for high-speed SCC boards");
284 MODULE_PARM(io, "1-" __MODULE_STRING(MAX_NUM_DEVS) "i");
285 MODULE_LICENSE("GPL");
287 static void __exit dmascc_exit(void) {
288 int i;
289 struct scc_info *info;
291 while (first) {
292 info = first;
294 /* Unregister devices */
295 for (i = 0; i < 2; i++)
296 unregister_netdev(info->dev[i]);
298 /* Reset board */
299 if (info->priv[0].type == TYPE_TWIN)
300 outb(0, info->dev[0]->base_addr + TWIN_SERIAL_CFG);
301 write_scc(&info->priv[0], R9, FHWRES);
302 release_region(info->dev[0]->base_addr,
303 hw[info->priv[0].type].io_size);
305 for (i = 0; i < 2; i++)
306 free_netdev(info->dev[i]);
308 /* Free memory */
309 first = info->next;
310 kfree(info);
314 #ifndef MODULE
315 void __init dmascc_setup(char *str, int *ints) {
316 int i;
318 for (i = 0; i < MAX_NUM_DEVS && i < ints[0]; i++)
319 io[i] = ints[i+1];
321 #endif
323 static int __init dmascc_init(void) {
324 int h, i, j, n;
325 int base[MAX_NUM_DEVS], tcmd[MAX_NUM_DEVS], t0[MAX_NUM_DEVS],
326 t1[MAX_NUM_DEVS];
327 unsigned t_val;
328 unsigned long time, start[MAX_NUM_DEVS], delay[MAX_NUM_DEVS],
329 counting[MAX_NUM_DEVS];
331 /* Initialize random number generator */
332 rand = jiffies;
333 /* Cards found = 0 */
334 n = 0;
335 /* Warning message */
336 if (!io[0]) printk(KERN_INFO "dmascc: autoprobing (dangerous)\n");
338 /* Run autodetection for each card type */
339 for (h = 0; h < NUM_TYPES; h++) {
341 if (io[0]) {
342 /* User-specified I/O address regions */
343 for (i = 0; i < hw[h].num_devs; i++) base[i] = 0;
344 for (i = 0; i < MAX_NUM_DEVS && io[i]; i++) {
345 j = (io[i] - hw[h].io_region) / hw[h].io_delta;
346 if (j >= 0 &&
347 j < hw[h].num_devs &&
348 hw[h].io_region + j * hw[h].io_delta == io[i]) {
349 base[j] = io[i];
352 } else {
353 /* Default I/O address regions */
354 for (i = 0; i < hw[h].num_devs; i++) {
355 base[i] = hw[h].io_region + i * hw[h].io_delta;
359 /* Check valid I/O address regions */
360 for (i = 0; i < hw[h].num_devs; i++)
361 if (base[i]) {
362 if (!request_region(base[i], hw[h].io_size, "dmascc"))
363 base[i] = 0;
364 else {
365 tcmd[i] = base[i] + hw[h].tmr_offset + TMR_CTRL;
366 t0[i] = base[i] + hw[h].tmr_offset + TMR_CNT0;
367 t1[i] = base[i] + hw[h].tmr_offset + TMR_CNT1;
371 /* Start timers */
372 for (i = 0; i < hw[h].num_devs; i++)
373 if (base[i]) {
374 /* Timer 0: LSB+MSB, Mode 3, TMR_0_HZ */
375 outb(0x36, tcmd[i]);
376 outb((hw[h].tmr_hz/TMR_0_HZ) & 0xFF, t0[i]);
377 outb((hw[h].tmr_hz/TMR_0_HZ) >> 8, t0[i]);
378 /* Timer 1: LSB+MSB, Mode 0, HZ/10 */
379 outb(0x70, tcmd[i]);
380 outb((TMR_0_HZ/HZ*10) & 0xFF, t1[i]);
381 outb((TMR_0_HZ/HZ*10) >> 8, t1[i]);
382 start[i] = jiffies;
383 delay[i] = 0;
384 counting[i] = 1;
385 /* Timer 2: LSB+MSB, Mode 0 */
386 outb(0xb0, tcmd[i]);
388 time = jiffies;
389 /* Wait until counter registers are loaded */
390 udelay(2000000/TMR_0_HZ);
392 /* Timing loop */
393 while (jiffies - time < 13) {
394 for (i = 0; i < hw[h].num_devs; i++)
395 if (base[i] && counting[i]) {
396 /* Read back Timer 1: latch; read LSB; read MSB */
397 outb(0x40, tcmd[i]);
398 t_val = inb(t1[i]) + (inb(t1[i]) << 8);
399 /* Also check whether counter did wrap */
400 if (t_val == 0 || t_val > TMR_0_HZ/HZ*10) counting[i] = 0;
401 delay[i] = jiffies - start[i];
405 /* Evaluate measurements */
406 for (i = 0; i < hw[h].num_devs; i++)
407 if (base[i]) {
408 if ((delay[i] >= 9 && delay[i] <= 11)&&
409 /* Ok, we have found an adapter */
410 (setup_adapter(base[i], h, n) == 0))
411 n++;
412 else
413 release_region(base[i], hw[h].io_size);
416 } /* NUM_TYPES */
418 /* If any adapter was successfully initialized, return ok */
419 if (n) return 0;
421 /* If no adapter found, return error */
422 printk(KERN_INFO "dmascc: no adapters found\n");
423 return -EIO;
426 module_init(dmascc_init);
427 module_exit(dmascc_exit);
429 static void dev_setup(struct net_device *dev)
431 dev->type = ARPHRD_AX25;
432 dev->hard_header_len = 73;
433 dev->mtu = 1500;
434 dev->addr_len = 7;
435 dev->tx_queue_len = 64;
436 memcpy(dev->broadcast, ax25_broadcast, 7);
437 memcpy(dev->dev_addr, ax25_test, 7);
440 static int __init setup_adapter(int card_base, int type, int n)
442 int i, irq, chip;
443 struct scc_info *info;
444 struct net_device *dev;
445 struct scc_priv *priv;
446 unsigned long time;
447 unsigned int irqs;
448 int tmr_base = card_base + hw[type].tmr_offset;
449 int scc_base = card_base + hw[type].scc_offset;
450 char *chipnames[] = CHIPNAMES;
452 /* Allocate memory */
453 info = kmalloc(sizeof(struct scc_info), GFP_KERNEL | GFP_DMA);
454 if (!info) {
455 printk(KERN_ERR "dmascc: "
456 "could not allocate memory for %s at %#3x\n",
457 hw[type].name, card_base);
458 goto out;
461 /* Initialize what is necessary for write_scc and write_scc_data */
462 memset(info, 0, sizeof(struct scc_info));
464 info->dev[0] = alloc_netdev(0, "", dev_setup);
465 if (!info->dev[0]) {
466 printk(KERN_ERR "dmascc: "
467 "could not allocate memory for %s at %#3x\n",
468 hw[type].name, card_base);
469 goto out1;
472 info->dev[1] = alloc_netdev(0, "", dev_setup);
473 if (!info->dev[1]) {
474 printk(KERN_ERR "dmascc: "
475 "could not allocate memory for %s at %#3x\n",
476 hw[type].name, card_base);
477 goto out2;
479 spin_lock_init(&info->register_lock);
481 priv = &info->priv[0];
482 priv->type = type;
483 priv->card_base = card_base;
484 priv->scc_cmd = scc_base + SCCA_CMD;
485 priv->scc_data = scc_base + SCCA_DATA;
486 priv->register_lock = &info->register_lock;
488 /* Reset SCC */
489 write_scc(priv, R9, FHWRES | MIE | NV);
491 /* Determine type of chip by enabling SDLC/HDLC enhancements */
492 write_scc(priv, R15, SHDLCE);
493 if (!read_scc(priv, R15)) {
494 /* WR7' not present. This is an ordinary Z8530 SCC. */
495 chip = Z8530;
496 } else {
497 /* Put one character in TX FIFO */
498 write_scc_data(priv, 0, 0);
499 if (read_scc(priv, R0) & Tx_BUF_EMP) {
500 /* TX FIFO not full. This is a Z85230 ESCC with a 4-byte FIFO. */
501 chip = Z85230;
502 } else {
503 /* TX FIFO full. This is a Z85C30 SCC with a 1-byte FIFO. */
504 chip = Z85C30;
507 write_scc(priv, R15, 0);
509 /* Start IRQ auto-detection */
510 irqs = probe_irq_on();
512 /* Enable interrupts */
513 if (type == TYPE_TWIN) {
514 outb(0, card_base + TWIN_DMA_CFG);
515 inb(card_base + TWIN_CLR_TMR1);
516 inb(card_base + TWIN_CLR_TMR2);
517 info->twin_serial_cfg = TWIN_EI;
518 outb(info->twin_serial_cfg, card_base + TWIN_SERIAL_CFG);
519 } else {
520 write_scc(priv, R15, CTSIE);
521 write_scc(priv, R0, RES_EXT_INT);
522 write_scc(priv, R1, EXT_INT_ENAB);
525 /* Start timer */
526 outb(1, tmr_base + TMR_CNT1);
527 outb(0, tmr_base + TMR_CNT1);
529 /* Wait and detect IRQ */
530 time = jiffies; while (jiffies - time < 2 + HZ / TMR_0_HZ);
531 irq = probe_irq_off(irqs);
533 /* Clear pending interrupt, disable interrupts */
534 if (type == TYPE_TWIN) {
535 inb(card_base + TWIN_CLR_TMR1);
536 } else {
537 write_scc(priv, R1, 0);
538 write_scc(priv, R15, 0);
539 write_scc(priv, R0, RES_EXT_INT);
542 if (irq <= 0) {
543 printk(KERN_ERR "dmascc: could not find irq of %s at %#3x (irq=%d)\n",
544 hw[type].name, card_base, irq);
545 goto out3;
548 /* Set up data structures */
549 for (i = 0; i < 2; i++) {
550 dev = info->dev[i];
551 priv = &info->priv[i];
552 priv->type = type;
553 priv->chip = chip;
554 priv->dev = dev;
555 priv->info = info;
556 priv->channel = i;
557 spin_lock_init(&priv->ring_lock);
558 priv->register_lock = &info->register_lock;
559 priv->card_base = card_base;
560 priv->scc_cmd = scc_base + (i ? SCCB_CMD : SCCA_CMD);
561 priv->scc_data = scc_base + (i ? SCCB_DATA : SCCA_DATA);
562 priv->tmr_cnt = tmr_base + (i ? TMR_CNT2 : TMR_CNT1);
563 priv->tmr_ctrl = tmr_base + TMR_CTRL;
564 priv->tmr_mode = i ? 0xb0 : 0x70;
565 priv->param.pclk_hz = hw[type].pclk_hz;
566 priv->param.brg_tc = -1;
567 priv->param.clocks = TCTRxCP | RCRTxCP;
568 priv->param.persist = 256;
569 priv->param.dma = -1;
570 INIT_WORK(&priv->rx_work, rx_bh, priv);
571 dev->priv = priv;
572 sprintf(dev->name, "dmascc%i", 2*n+i);
573 SET_MODULE_OWNER(dev);
574 dev->base_addr = card_base;
575 dev->irq = irq;
576 dev->open = scc_open;
577 dev->stop = scc_close;
578 dev->do_ioctl = scc_ioctl;
579 dev->hard_start_xmit = scc_send_packet;
580 dev->get_stats = scc_get_stats;
581 dev->hard_header = ax25_encapsulate;
582 dev->rebuild_header = ax25_rebuild_header;
583 dev->set_mac_address = scc_set_mac_address;
585 if (register_netdev(info->dev[0])) {
586 printk(KERN_ERR "dmascc: could not register %s\n",
587 info->dev[0]->name);
588 goto out3;
590 if (register_netdev(info->dev[1])) {
591 printk(KERN_ERR "dmascc: could not register %s\n",
592 info->dev[1]->name);
593 goto out4;
597 info->next = first;
598 first = info;
599 printk(KERN_INFO "dmascc: found %s (%s) at %#3x, irq %d\n", hw[type].name,
600 chipnames[chip], card_base, irq);
601 return 0;
603 out4:
604 unregister_netdev(info->dev[0]);
605 out3:
606 if (info->priv[0].type == TYPE_TWIN)
607 outb(0, info->dev[0]->base_addr + TWIN_SERIAL_CFG);
608 write_scc(&info->priv[0], R9, FHWRES);
609 free_netdev(info->dev[1]);
610 out2:
611 free_netdev(info->dev[0]);
612 out1:
613 kfree(info);
614 out:
615 return -1;
619 /* Driver functions */
621 static void write_scc(struct scc_priv *priv, int reg, int val) {
622 unsigned long flags;
623 switch (priv->type) {
624 case TYPE_S5:
625 if (reg) outb(reg, priv->scc_cmd);
626 outb(val, priv->scc_cmd);
627 return;
628 case TYPE_TWIN:
629 if (reg) outb_p(reg, priv->scc_cmd);
630 outb_p(val, priv->scc_cmd);
631 return;
632 default:
633 spin_lock_irqsave(priv->register_lock, flags);
634 outb_p(0, priv->card_base + PI_DREQ_MASK);
635 if (reg) outb_p(reg, priv->scc_cmd);
636 outb_p(val, priv->scc_cmd);
637 outb(1, priv->card_base + PI_DREQ_MASK);
638 spin_unlock_irqrestore(priv->register_lock, flags);
639 return;
644 static void write_scc_data(struct scc_priv *priv, int val, int fast) {
645 unsigned long flags;
646 switch (priv->type) {
647 case TYPE_S5:
648 outb(val, priv->scc_data);
649 return;
650 case TYPE_TWIN:
651 outb_p(val, priv->scc_data);
652 return;
653 default:
654 if (fast) outb_p(val, priv->scc_data);
655 else {
656 spin_lock_irqsave(priv->register_lock, flags);
657 outb_p(0, priv->card_base + PI_DREQ_MASK);
658 outb_p(val, priv->scc_data);
659 outb(1, priv->card_base + PI_DREQ_MASK);
660 spin_unlock_irqrestore(priv->register_lock, flags);
662 return;
667 static int read_scc(struct scc_priv *priv, int reg) {
668 int rc;
669 unsigned long flags;
670 switch (priv->type) {
671 case TYPE_S5:
672 if (reg) outb(reg, priv->scc_cmd);
673 return inb(priv->scc_cmd);
674 case TYPE_TWIN:
675 if (reg) outb_p(reg, priv->scc_cmd);
676 return inb_p(priv->scc_cmd);
677 default:
678 spin_lock_irqsave(priv->register_lock, flags);
679 outb_p(0, priv->card_base + PI_DREQ_MASK);
680 if (reg) outb_p(reg, priv->scc_cmd);
681 rc = inb_p(priv->scc_cmd);
682 outb(1, priv->card_base + PI_DREQ_MASK);
683 spin_unlock_irqrestore(priv->register_lock, flags);
684 return rc;
689 static int read_scc_data(struct scc_priv *priv) {
690 int rc;
691 unsigned long flags;
692 switch (priv->type) {
693 case TYPE_S5:
694 return inb(priv->scc_data);
695 case TYPE_TWIN:
696 return inb_p(priv->scc_data);
697 default:
698 spin_lock_irqsave(priv->register_lock, flags);
699 outb_p(0, priv->card_base + PI_DREQ_MASK);
700 rc = inb_p(priv->scc_data);
701 outb(1, priv->card_base + PI_DREQ_MASK);
702 spin_unlock_irqrestore(priv->register_lock, flags);
703 return rc;
708 static int scc_open(struct net_device *dev) {
709 struct scc_priv *priv = dev->priv;
710 struct scc_info *info = priv->info;
711 int card_base = priv->card_base;
713 /* Request IRQ if not already used by other channel */
714 if (!info->irq_used) {
715 if (request_irq(dev->irq, scc_isr, 0, "dmascc", info)) {
716 return -EAGAIN;
719 info->irq_used++;
721 /* Request DMA if required */
722 if (priv->param.dma >= 0) {
723 if (request_dma(priv->param.dma, "dmascc")) {
724 if (--info->irq_used == 0) free_irq(dev->irq, info);
725 return -EAGAIN;
726 } else {
727 unsigned long flags = claim_dma_lock();
728 clear_dma_ff(priv->param.dma);
729 release_dma_lock(flags);
733 /* Initialize local variables */
734 priv->rx_ptr = 0;
735 priv->rx_over = 0;
736 priv->rx_head = priv->rx_tail = priv->rx_count = 0;
737 priv->state = IDLE;
738 priv->tx_head = priv->tx_tail = priv->tx_count = 0;
739 priv->tx_ptr = 0;
741 /* Reset channel */
742 write_scc(priv, R9, (priv->channel ? CHRB : CHRA) | MIE | NV);
743 /* X1 clock, SDLC mode */
744 write_scc(priv, R4, SDLC | X1CLK);
745 /* DMA */
746 write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
747 /* 8 bit RX char, RX disable */
748 write_scc(priv, R3, Rx8);
749 /* 8 bit TX char, TX disable */
750 write_scc(priv, R5, Tx8);
751 /* SDLC address field */
752 write_scc(priv, R6, 0);
753 /* SDLC flag */
754 write_scc(priv, R7, FLAG);
755 switch (priv->chip) {
756 case Z85C30:
757 /* Select WR7' */
758 write_scc(priv, R15, SHDLCE);
759 /* Auto EOM reset */
760 write_scc(priv, R7, AUTOEOM);
761 write_scc(priv, R15, 0);
762 break;
763 case Z85230:
764 /* Select WR7' */
765 write_scc(priv, R15, SHDLCE);
766 /* The following bits are set (see 2.5.2.1):
767 - Automatic EOM reset
768 - Interrupt request if RX FIFO is half full
769 This bit should be ignored in DMA mode (according to the
770 documentation), but actually isn't. The receiver doesn't work if
771 it is set. Thus, we have to clear it in DMA mode.
772 - Interrupt/DMA request if TX FIFO is completely empty
773 a) If set, the ESCC behaves as if it had no TX FIFO (Z85C30
774 compatibility).
775 b) If cleared, DMA requests may follow each other very quickly,
776 filling up the TX FIFO.
777 Advantage: TX works even in case of high bus latency.
778 Disadvantage: Edge-triggered DMA request circuitry may miss
779 a request. No more data is delivered, resulting
780 in a TX FIFO underrun.
781 Both PI2 and S5SCC/DMA seem to work fine with TXFIFOE cleared.
782 The PackeTwin doesn't. I don't know about the PI, but let's
783 assume it behaves like the PI2.
785 if (priv->param.dma >= 0) {
786 if (priv->type == TYPE_TWIN) write_scc(priv, R7, AUTOEOM | TXFIFOE);
787 else write_scc(priv, R7, AUTOEOM);
788 } else {
789 write_scc(priv, R7, AUTOEOM | RXFIFOH);
791 write_scc(priv, R15, 0);
792 break;
794 /* Preset CRC, NRZ(I) encoding */
795 write_scc(priv, R10, CRCPS | (priv->param.nrzi ? NRZI : NRZ));
797 /* Configure baud rate generator */
798 if (priv->param.brg_tc >= 0) {
799 /* Program BR generator */
800 write_scc(priv, R12, priv->param.brg_tc & 0xFF);
801 write_scc(priv, R13, (priv->param.brg_tc>>8) & 0xFF);
802 /* BRG source = SYS CLK; enable BRG; DTR REQ function (required by
803 PackeTwin, not connected on the PI2); set DPLL source to BRG */
804 write_scc(priv, R14, SSBR | DTRREQ | BRSRC | BRENABL);
805 /* Enable DPLL */
806 write_scc(priv, R14, SEARCH | DTRREQ | BRSRC | BRENABL);
807 } else {
808 /* Disable BR generator */
809 write_scc(priv, R14, DTRREQ | BRSRC);
812 /* Configure clocks */
813 if (priv->type == TYPE_TWIN) {
814 /* Disable external TX clock receiver */
815 outb((info->twin_serial_cfg &=
816 ~(priv->channel ? TWIN_EXTCLKB : TWIN_EXTCLKA)),
817 card_base + TWIN_SERIAL_CFG);
819 write_scc(priv, R11, priv->param.clocks);
820 if ((priv->type == TYPE_TWIN) && !(priv->param.clocks & TRxCOI)) {
821 /* Enable external TX clock receiver */
822 outb((info->twin_serial_cfg |=
823 (priv->channel ? TWIN_EXTCLKB : TWIN_EXTCLKA)),
824 card_base + TWIN_SERIAL_CFG);
827 /* Configure PackeTwin */
828 if (priv->type == TYPE_TWIN) {
829 /* Assert DTR, enable interrupts */
830 outb((info->twin_serial_cfg |= TWIN_EI |
831 (priv->channel ? TWIN_DTRB_ON : TWIN_DTRA_ON)),
832 card_base + TWIN_SERIAL_CFG);
835 /* Read current status */
836 priv->rr0 = read_scc(priv, R0);
837 /* Enable DCD interrupt */
838 write_scc(priv, R15, DCDIE);
840 netif_start_queue(dev);
842 return 0;
846 static int scc_close(struct net_device *dev) {
847 struct scc_priv *priv = dev->priv;
848 struct scc_info *info = priv->info;
849 int card_base = priv->card_base;
851 netif_stop_queue(dev);
853 if (priv->type == TYPE_TWIN) {
854 /* Drop DTR */
855 outb((info->twin_serial_cfg &=
856 (priv->channel ? ~TWIN_DTRB_ON : ~TWIN_DTRA_ON)),
857 card_base + TWIN_SERIAL_CFG);
860 /* Reset channel, free DMA and IRQ */
861 write_scc(priv, R9, (priv->channel ? CHRB : CHRA) | MIE | NV);
862 if (priv->param.dma >= 0) {
863 if (priv->type == TYPE_TWIN) outb(0, card_base + TWIN_DMA_CFG);
864 free_dma(priv->param.dma);
866 if (--info->irq_used == 0) free_irq(dev->irq, info);
868 return 0;
872 static int scc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) {
873 struct scc_priv *priv = dev->priv;
875 switch (cmd) {
876 case SIOCGSCCPARAM:
877 if (copy_to_user(ifr->ifr_data, &priv->param, sizeof(struct scc_param)))
878 return -EFAULT;
879 return 0;
880 case SIOCSSCCPARAM:
881 if (!capable(CAP_NET_ADMIN)) return -EPERM;
882 if (netif_running(dev)) return -EAGAIN;
883 if (copy_from_user(&priv->param, ifr->ifr_data, sizeof(struct scc_param)))
884 return -EFAULT;
885 return 0;
886 default:
887 return -EINVAL;
892 static int scc_send_packet(struct sk_buff *skb, struct net_device *dev) {
893 struct scc_priv *priv = dev->priv;
894 unsigned long flags;
895 int i;
897 /* Temporarily stop the scheduler feeding us packets */
898 netif_stop_queue(dev);
900 /* Transfer data to DMA buffer */
901 i = priv->tx_head;
902 memcpy(priv->tx_buf[i], skb->data+1, skb->len-1);
903 priv->tx_len[i] = skb->len-1;
905 /* Clear interrupts while we touch our circular buffers */
907 spin_lock_irqsave(&priv->ring_lock, flags);
908 /* Move the ring buffer's head */
909 priv->tx_head = (i + 1) % NUM_TX_BUF;
910 priv->tx_count++;
912 /* If we just filled up the last buffer, leave queue stopped.
913 The higher layers must wait until we have a DMA buffer
914 to accept the data. */
915 if (priv->tx_count < NUM_TX_BUF) netif_wake_queue(dev);
917 /* Set new TX state */
918 if (priv->state == IDLE) {
919 /* Assert RTS, start timer */
920 priv->state = TX_HEAD;
921 priv->tx_start = jiffies;
922 write_scc(priv, R5, TxCRC_ENAB | RTS | TxENAB | Tx8);
923 write_scc(priv, R15, 0);
924 start_timer(priv, priv->param.txdelay, 0);
927 /* Turn interrupts back on and free buffer */
928 spin_unlock_irqrestore(&priv->ring_lock, flags);
929 dev_kfree_skb(skb);
931 return 0;
935 static struct net_device_stats *scc_get_stats(struct net_device *dev) {
936 struct scc_priv *priv = dev->priv;
938 return &priv->stats;
942 static int scc_set_mac_address(struct net_device *dev, void *sa) {
943 memcpy(dev->dev_addr, ((struct sockaddr *)sa)->sa_data, dev->addr_len);
944 return 0;
948 static inline void tx_on(struct scc_priv *priv) {
949 int i, n;
950 unsigned long flags;
952 if (priv->param.dma >= 0) {
953 n = (priv->chip == Z85230) ? 3 : 1;
954 /* Program DMA controller */
955 flags = claim_dma_lock();
956 set_dma_mode(priv->param.dma, DMA_MODE_WRITE);
957 set_dma_addr(priv->param.dma, (int) priv->tx_buf[priv->tx_tail]+n);
958 set_dma_count(priv->param.dma, priv->tx_len[priv->tx_tail]-n);
959 release_dma_lock(flags);
960 /* Enable TX underrun interrupt */
961 write_scc(priv, R15, TxUIE);
962 /* Configure DREQ */
963 if (priv->type == TYPE_TWIN)
964 outb((priv->param.dma == 1) ? TWIN_DMA_HDX_T1 : TWIN_DMA_HDX_T3,
965 priv->card_base + TWIN_DMA_CFG);
966 else
967 write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN | WT_RDY_ENAB);
968 /* Write first byte(s) */
969 spin_lock_irqsave(priv->register_lock, flags);
970 for (i = 0; i < n; i++)
971 write_scc_data(priv, priv->tx_buf[priv->tx_tail][i], 1);
972 enable_dma(priv->param.dma);
973 spin_unlock_irqrestore(priv->register_lock, flags);
974 } else {
975 write_scc(priv, R15, TxUIE);
976 write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN | TxINT_ENAB);
977 tx_isr(priv);
979 /* Reset EOM latch if we do not have the AUTOEOM feature */
980 if (priv->chip == Z8530) write_scc(priv, R0, RES_EOM_L);
984 static inline void rx_on(struct scc_priv *priv) {
985 unsigned long flags;
987 /* Clear RX FIFO */
988 while (read_scc(priv, R0) & Rx_CH_AV) read_scc_data(priv);
989 priv->rx_over = 0;
990 if (priv->param.dma >= 0) {
991 /* Program DMA controller */
992 flags = claim_dma_lock();
993 set_dma_mode(priv->param.dma, DMA_MODE_READ);
994 set_dma_addr(priv->param.dma, (int) priv->rx_buf[priv->rx_head]);
995 set_dma_count(priv->param.dma, BUF_SIZE);
996 release_dma_lock(flags);
997 enable_dma(priv->param.dma);
998 /* Configure PackeTwin DMA */
999 if (priv->type == TYPE_TWIN) {
1000 outb((priv->param.dma == 1) ? TWIN_DMA_HDX_R1 : TWIN_DMA_HDX_R3,
1001 priv->card_base + TWIN_DMA_CFG);
1003 /* Sp. cond. intr. only, ext int enable, RX DMA enable */
1004 write_scc(priv, R1, EXT_INT_ENAB | INT_ERR_Rx |
1005 WT_RDY_RT | WT_FN_RDYFN | WT_RDY_ENAB);
1006 } else {
1007 /* Reset current frame */
1008 priv->rx_ptr = 0;
1009 /* Intr. on all Rx characters and Sp. cond., ext int enable */
1010 write_scc(priv, R1, EXT_INT_ENAB | INT_ALL_Rx | WT_RDY_RT |
1011 WT_FN_RDYFN);
1013 write_scc(priv, R0, ERR_RES);
1014 write_scc(priv, R3, RxENABLE | Rx8 | RxCRC_ENAB);
1018 static inline void rx_off(struct scc_priv *priv) {
1019 /* Disable receiver */
1020 write_scc(priv, R3, Rx8);
1021 /* Disable DREQ / RX interrupt */
1022 if (priv->param.dma >= 0 && priv->type == TYPE_TWIN)
1023 outb(0, priv->card_base + TWIN_DMA_CFG);
1024 else
1025 write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
1026 /* Disable DMA */
1027 if (priv->param.dma >= 0) disable_dma(priv->param.dma);
1031 static void start_timer(struct scc_priv *priv, int t, int r15) {
1032 unsigned long flags;
1034 outb(priv->tmr_mode, priv->tmr_ctrl);
1035 if (t == 0) {
1036 tm_isr(priv);
1037 } else if (t > 0) {
1038 save_flags(flags);
1039 cli();
1040 outb(t & 0xFF, priv->tmr_cnt);
1041 outb((t >> 8) & 0xFF, priv->tmr_cnt);
1042 if (priv->type != TYPE_TWIN) {
1043 write_scc(priv, R15, r15 | CTSIE);
1044 priv->rr0 |= CTS;
1046 restore_flags(flags);
1051 static inline unsigned char random(void) {
1052 /* See "Numerical Recipes in C", second edition, p. 284 */
1053 rand = rand * 1664525L + 1013904223L;
1054 return (unsigned char) (rand >> 24);
1057 static inline void z8530_isr(struct scc_info *info) {
1058 int is, i = 100;
1060 while ((is = read_scc(&info->priv[0], R3)) && i--) {
1061 if (is & CHARxIP) {
1062 rx_isr(&info->priv[0]);
1063 } else if (is & CHATxIP) {
1064 tx_isr(&info->priv[0]);
1065 } else if (is & CHAEXT) {
1066 es_isr(&info->priv[0]);
1067 } else if (is & CHBRxIP) {
1068 rx_isr(&info->priv[1]);
1069 } else if (is & CHBTxIP) {
1070 tx_isr(&info->priv[1]);
1071 } else {
1072 es_isr(&info->priv[1]);
1074 write_scc(&info->priv[0], R0, RES_H_IUS);
1075 i++;
1077 if (i < 0) {
1078 printk(KERN_ERR "dmascc: stuck in ISR with RR3=0x%02x.\n", is);
1080 /* Ok, no interrupts pending from this 8530. The INT line should
1081 be inactive now. */
1085 static irqreturn_t scc_isr(int irq, void *dev_id, struct pt_regs * regs) {
1086 struct scc_info *info = dev_id;
1088 spin_lock(info->priv[0].register_lock);
1089 /* At this point interrupts are enabled, and the interrupt under service
1090 is already acknowledged, but masked off.
1092 Interrupt processing: We loop until we know that the IRQ line is
1093 low. If another positive edge occurs afterwards during the ISR,
1094 another interrupt will be triggered by the interrupt controller
1095 as soon as the IRQ level is enabled again (see asm/irq.h).
1097 Bottom-half handlers will be processed after scc_isr(). This is
1098 important, since we only have small ringbuffers and want new data
1099 to be fetched/delivered immediately. */
1101 if (info->priv[0].type == TYPE_TWIN) {
1102 int is, card_base = info->priv[0].card_base;
1103 while ((is = ~inb(card_base + TWIN_INT_REG)) &
1104 TWIN_INT_MSK) {
1105 if (is & TWIN_SCC_MSK) {
1106 z8530_isr(info);
1107 } else if (is & TWIN_TMR1_MSK) {
1108 inb(card_base + TWIN_CLR_TMR1);
1109 tm_isr(&info->priv[0]);
1110 } else {
1111 inb(card_base + TWIN_CLR_TMR2);
1112 tm_isr(&info->priv[1]);
1115 } else z8530_isr(info);
1116 spin_unlock(info->priv[0].register_lock);
1117 return IRQ_HANDLED;
1121 static void rx_isr(struct scc_priv *priv) {
1122 if (priv->param.dma >= 0) {
1123 /* Check special condition and perform error reset. See 2.4.7.5. */
1124 special_condition(priv, read_scc(priv, R1));
1125 write_scc(priv, R0, ERR_RES);
1126 } else {
1127 /* Check special condition for each character. Error reset not necessary.
1128 Same algorithm for SCC and ESCC. See 2.4.7.1 and 2.4.7.4. */
1129 int rc;
1130 while (read_scc(priv, R0) & Rx_CH_AV) {
1131 rc = read_scc(priv, R1);
1132 if (priv->rx_ptr < BUF_SIZE)
1133 priv->rx_buf[priv->rx_head][priv->rx_ptr++] =
1134 read_scc_data(priv);
1135 else {
1136 priv->rx_over = 2;
1137 read_scc_data(priv);
1139 special_condition(priv, rc);
1145 static void special_condition(struct scc_priv *priv, int rc) {
1146 int cb;
1147 unsigned long flags;
1149 /* See Figure 2-15. Only overrun and EOF need to be checked. */
1151 if (rc & Rx_OVR) {
1152 /* Receiver overrun */
1153 priv->rx_over = 1;
1154 if (priv->param.dma < 0) write_scc(priv, R0, ERR_RES);
1155 } else if (rc & END_FR) {
1156 /* End of frame. Get byte count */
1157 if (priv->param.dma >= 0) {
1158 flags = claim_dma_lock();
1159 cb = BUF_SIZE - get_dma_residue(priv->param.dma) - 2;
1160 release_dma_lock(flags);
1161 } else {
1162 cb = priv->rx_ptr - 2;
1164 if (priv->rx_over) {
1165 /* We had an overrun */
1166 priv->stats.rx_errors++;
1167 if (priv->rx_over == 2) priv->stats.rx_length_errors++;
1168 else priv->stats.rx_fifo_errors++;
1169 priv->rx_over = 0;
1170 } else if (rc & CRC_ERR) {
1171 /* Count invalid CRC only if packet length >= minimum */
1172 if (cb >= 15) {
1173 priv->stats.rx_errors++;
1174 priv->stats.rx_crc_errors++;
1176 } else {
1177 if (cb >= 15) {
1178 if (priv->rx_count < NUM_RX_BUF - 1) {
1179 /* Put good frame in FIFO */
1180 priv->rx_len[priv->rx_head] = cb;
1181 priv->rx_head = (priv->rx_head + 1) % NUM_RX_BUF;
1182 priv->rx_count++;
1183 schedule_work(&priv->rx_work);
1184 } else {
1185 priv->stats.rx_errors++;
1186 priv->stats.rx_over_errors++;
1190 /* Get ready for new frame */
1191 if (priv->param.dma >= 0) {
1192 flags = claim_dma_lock();
1193 set_dma_addr(priv->param.dma, (int) priv->rx_buf[priv->rx_head]);
1194 set_dma_count(priv->param.dma, BUF_SIZE);
1195 release_dma_lock(flags);
1196 } else {
1197 priv->rx_ptr = 0;
1203 static void rx_bh(void *arg) {
1204 struct scc_priv *priv = arg;
1205 int i = priv->rx_tail;
1206 int cb;
1207 unsigned long flags;
1208 struct sk_buff *skb;
1209 unsigned char *data;
1211 spin_lock_irqsave(&priv->ring_lock, flags);
1212 while (priv->rx_count) {
1213 spin_unlock_irqrestore(&priv->ring_lock, flags);
1214 cb = priv->rx_len[i];
1215 /* Allocate buffer */
1216 skb = dev_alloc_skb(cb+1);
1217 if (skb == NULL) {
1218 /* Drop packet */
1219 priv->stats.rx_dropped++;
1220 } else {
1221 /* Fill buffer */
1222 data = skb_put(skb, cb+1);
1223 data[0] = 0;
1224 memcpy(&data[1], priv->rx_buf[i], cb);
1225 skb->dev = priv->dev;
1226 skb->protocol = ntohs(ETH_P_AX25);
1227 skb->mac.raw = skb->data;
1228 netif_rx(skb);
1229 priv->dev->last_rx = jiffies;
1230 priv->stats.rx_packets++;
1231 priv->stats.rx_bytes += cb;
1233 spin_lock_irqsave(&priv->ring_lock, flags);
1234 /* Move tail */
1235 priv->rx_tail = i = (i + 1) % NUM_RX_BUF;
1236 priv->rx_count--;
1238 spin_unlock_irqrestore(&priv->ring_lock, flags);
1242 static void tx_isr(struct scc_priv *priv) {
1243 int i = priv->tx_tail, p = priv->tx_ptr;
1245 /* Suspend TX interrupts if we don't want to send anything.
1246 See Figure 2-22. */
1247 if (p == priv->tx_len[i]) {
1248 write_scc(priv, R0, RES_Tx_P);
1249 return;
1252 /* Write characters */
1253 while ((read_scc(priv, R0) & Tx_BUF_EMP) && p < priv->tx_len[i]) {
1254 write_scc_data(priv, priv->tx_buf[i][p++], 0);
1257 /* Reset EOM latch of Z8530 */
1258 if (!priv->tx_ptr && p && priv->chip == Z8530)
1259 write_scc(priv, R0, RES_EOM_L);
1261 priv->tx_ptr = p;
1265 static void es_isr(struct scc_priv *priv) {
1266 int i, rr0, drr0, res;
1267 unsigned long flags;
1269 /* Read status, reset interrupt bit (open latches) */
1270 rr0 = read_scc(priv, R0);
1271 write_scc(priv, R0, RES_EXT_INT);
1272 drr0 = priv->rr0 ^ rr0;
1273 priv->rr0 = rr0;
1275 /* Transmit underrun (2.4.9.6). We can't check the TxEOM flag, since
1276 it might have already been cleared again by AUTOEOM. */
1277 if (priv->state == TX_DATA) {
1278 /* Get remaining bytes */
1279 i = priv->tx_tail;
1280 if (priv->param.dma >= 0) {
1281 disable_dma(priv->param.dma);
1282 flags = claim_dma_lock();
1283 res = get_dma_residue(priv->param.dma);
1284 release_dma_lock(flags);
1285 } else {
1286 res = priv->tx_len[i] - priv->tx_ptr;
1287 priv->tx_ptr = 0;
1289 /* Disable DREQ / TX interrupt */
1290 if (priv->param.dma >= 0 && priv->type == TYPE_TWIN)
1291 outb(0, priv->card_base + TWIN_DMA_CFG);
1292 else
1293 write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
1294 if (res) {
1295 /* Update packet statistics */
1296 priv->stats.tx_errors++;
1297 priv->stats.tx_fifo_errors++;
1298 /* Other underrun interrupts may already be waiting */
1299 write_scc(priv, R0, RES_EXT_INT);
1300 write_scc(priv, R0, RES_EXT_INT);
1301 } else {
1302 /* Update packet statistics */
1303 priv->stats.tx_packets++;
1304 priv->stats.tx_bytes += priv->tx_len[i];
1305 /* Remove frame from FIFO */
1306 priv->tx_tail = (i + 1) % NUM_TX_BUF;
1307 priv->tx_count--;
1308 /* Inform upper layers */
1309 netif_wake_queue(priv->dev);
1311 /* Switch state */
1312 write_scc(priv, R15, 0);
1313 if (priv->tx_count &&
1314 (jiffies - priv->tx_start) < priv->param.txtimeout) {
1315 priv->state = TX_PAUSE;
1316 start_timer(priv, priv->param.txpause, 0);
1317 } else {
1318 priv->state = TX_TAIL;
1319 start_timer(priv, priv->param.txtail, 0);
1323 /* DCD transition */
1324 if (drr0 & DCD) {
1325 if (rr0 & DCD) {
1326 switch (priv->state) {
1327 case IDLE:
1328 case WAIT:
1329 priv->state = DCD_ON;
1330 write_scc(priv, R15, 0);
1331 start_timer(priv, priv->param.dcdon, 0);
1333 } else {
1334 switch (priv->state) {
1335 case RX_ON:
1336 rx_off(priv);
1337 priv->state = DCD_OFF;
1338 write_scc(priv, R15, 0);
1339 start_timer(priv, priv->param.dcdoff, 0);
1344 /* CTS transition */
1345 if ((drr0 & CTS) && (~rr0 & CTS) && priv->type != TYPE_TWIN)
1346 tm_isr(priv);
1351 static void tm_isr(struct scc_priv *priv) {
1352 switch (priv->state) {
1353 case TX_HEAD:
1354 case TX_PAUSE:
1355 tx_on(priv);
1356 priv->state = TX_DATA;
1357 break;
1358 case TX_TAIL:
1359 write_scc(priv, R5, TxCRC_ENAB | Tx8);
1360 priv->state = RTS_OFF;
1361 if (priv->type != TYPE_TWIN) write_scc(priv, R15, 0);
1362 start_timer(priv, priv->param.rtsoff, 0);
1363 break;
1364 case RTS_OFF:
1365 write_scc(priv, R15, DCDIE);
1366 priv->rr0 = read_scc(priv, R0);
1367 if (priv->rr0 & DCD) {
1368 priv->stats.collisions++;
1369 rx_on(priv);
1370 priv->state = RX_ON;
1371 } else {
1372 priv->state = WAIT;
1373 start_timer(priv, priv->param.waittime, DCDIE);
1375 break;
1376 case WAIT:
1377 if (priv->tx_count) {
1378 priv->state = TX_HEAD;
1379 priv->tx_start = jiffies;
1380 write_scc(priv, R5, TxCRC_ENAB | RTS | TxENAB | Tx8);
1381 write_scc(priv, R15, 0);
1382 start_timer(priv, priv->param.txdelay, 0);
1383 } else {
1384 priv->state = IDLE;
1385 if (priv->type != TYPE_TWIN) write_scc(priv, R15, DCDIE);
1387 break;
1388 case DCD_ON:
1389 case DCD_OFF:
1390 write_scc(priv, R15, DCDIE);
1391 priv->rr0 = read_scc(priv, R0);
1392 if (priv->rr0 & DCD) {
1393 rx_on(priv);
1394 priv->state = RX_ON;
1395 } else {
1396 priv->state = WAIT;
1397 start_timer(priv,
1398 random()/priv->param.persist*priv->param.slottime,
1399 DCDIE);
1401 break;