Import 2.3.18pre1
[davej-history.git] / drivers / net / plip.c
bloba3b04feab17d0bbe550c78074316ff4245557351
1 /* $Id: plip.c,v 1.3.6.2 1997/04/16 15:07:56 phil Exp $ */
2 /* PLIP: A parallel port "network" driver for Linux. */
3 /* This driver is for parallel port with 5-bit cable (LapLink (R) cable). */
4 /*
5 * Authors: Donald Becker <becker@super.org>
6 * Tommy Thorn <thorn@daimi.aau.dk>
7 * Tanabe Hiroyasu <hiro@sanpo.t.u-tokyo.ac.jp>
8 * Alan Cox <gw4pts@gw4pts.ampr.org>
9 * Peter Bauer <100136.3530@compuserve.com>
10 * Niibe Yutaka <gniibe@mri.co.jp>
11 * Nimrod Zimerman <zimerman@mailandnews.com>
13 * Enhancements:
14 * Modularization and ifreq/ifmap support by Alan Cox.
15 * Rewritten by Niibe Yutaka.
16 * parport-sharing awareness code by Philip Blundell.
17 * SMP locking by Niibe Yutaka.
18 * Support for parallel ports with no IRQ (poll mode),
19 * Modifications to use the parallel port API
20 * by Nimrod Zimerman.
22 * Fixes:
23 * Niibe Yutaka
24 * - Module initialization.
25 * - MTU fix.
26 * - Make sure other end is OK, before sending a packet.
27 * - Fix immediate timer problem.
29 * Al Viro
30 * - Changed {enable,disable}_irq handling to make it work
31 * with new ("stack") semantics.
33 * This program is free software; you can redistribute it and/or
34 * modify it under the terms of the GNU General Public License
35 * as published by the Free Software Foundation; either version
36 * 2 of the License, or (at your option) any later version.
40 * Original version and the name 'PLIP' from Donald Becker <becker@super.org>
41 * inspired by Russ Nelson's parallel port packet driver.
43 * NOTE:
44 * Tanabe Hiroyasu had changed the protocol, and it was in Linux v1.0.
45 * Because of the necessity to communicate to DOS machines with the
46 * Crynwr packet driver, Peter Bauer changed the protocol again
47 * back to original protocol.
49 * This version follows original PLIP protocol.
50 * So, this PLIP can't communicate the PLIP of Linux v1.0.
54 * To use with DOS box, please do (Turn on ARP switch):
55 * # ifconfig plip[0-2] arp
57 static const char *version = "NET3 PLIP version 2.4-parport gniibe@mri.co.jp\n";
60 Sources:
61 Ideas and protocols came from Russ Nelson's <nelson@crynwr.com>
62 "parallel.asm" parallel port packet driver.
64 The "Crynwr" parallel port standard specifies the following protocol:
65 Trigger by sending nibble '0x8' (this causes interrupt on other end)
66 count-low octet
67 count-high octet
68 ... data octets
69 checksum octet
70 Each octet is sent as <wait for rx. '0x1?'> <send 0x10+(octet&0x0F)>
71 <wait for rx. '0x0?'> <send 0x00+((octet>>4)&0x0F)>
73 The packet is encapsulated as if it were ethernet.
75 The cable used is a de facto standard parallel null cable -- sold as
76 a "LapLink" cable by various places. You'll need a 12-conductor cable to
77 make one yourself. The wiring is:
78 SLCTIN 17 - 17
79 GROUND 25 - 25
80 D0->ERROR 2 - 15 15 - 2
81 D1->SLCT 3 - 13 13 - 3
82 D2->PAPOUT 4 - 12 12 - 4
83 D3->ACK 5 - 10 10 - 5
84 D4->BUSY 6 - 11 11 - 6
85 Do not connect the other pins. They are
86 D5,D6,D7 are 7,8,9
87 STROBE is 1, FEED is 14, INIT is 16
88 extra grounds are 18,19,20,21,22,23,24
91 #include <linux/module.h>
92 #include <linux/kernel.h>
93 #include <linux/sched.h>
94 #include <linux/types.h>
95 #include <linux/fcntl.h>
96 #include <linux/interrupt.h>
97 #include <linux/string.h>
98 #include <linux/ptrace.h>
99 #include <linux/if_ether.h>
100 #include <asm/system.h>
101 #include <linux/in.h>
102 #include <linux/errno.h>
103 #include <linux/delay.h>
104 #include <linux/lp.h>
105 #include <linux/init.h>
107 #include <linux/netdevice.h>
108 #include <linux/etherdevice.h>
109 #include <linux/inetdevice.h>
110 #include <linux/skbuff.h>
111 #include <linux/if_plip.h>
112 #include <net/neighbour.h>
114 #include <linux/tqueue.h>
115 #include <linux/ioport.h>
116 #include <linux/spinlock.h>
117 #include <asm/bitops.h>
118 #include <asm/irq.h>
119 #include <asm/byteorder.h>
120 #include <asm/semaphore.h>
122 #include <linux/parport.h>
124 /* Maximum number of devices to support. */
125 #define PLIP_MAX 8
127 /* Use 0 for production, 1 for verification, >2 for debug */
128 #ifndef NET_DEBUG
129 #define NET_DEBUG 1
130 #endif
131 static unsigned int net_debug = NET_DEBUG;
133 #define ENABLE(irq) if (irq != -1) enable_irq(irq)
134 #define DISABLE(irq) if (irq != -1) disable_irq(irq)
136 /* In micro second */
137 #define PLIP_DELAY_UNIT 1
139 /* Connection time out = PLIP_TRIGGER_WAIT * PLIP_DELAY_UNIT usec */
140 #define PLIP_TRIGGER_WAIT 500
142 /* Nibble time out = PLIP_NIBBLE_WAIT * PLIP_DELAY_UNIT usec */
143 #define PLIP_NIBBLE_WAIT 3000
145 /* Bottom halves */
146 static void plip_kick_bh(struct net_device *dev);
147 static void plip_bh(struct net_device *dev);
148 static void plip_timer_bh(struct net_device *dev);
150 /* Interrupt handler */
151 static void plip_interrupt(int irq, void *dev_id, struct pt_regs *regs);
153 /* Functions for DEV methods */
154 static int plip_tx_packet(struct sk_buff *skb, struct net_device *dev);
155 static int plip_hard_header(struct sk_buff *skb, struct net_device *dev,
156 unsigned short type, void *daddr,
157 void *saddr, unsigned len);
158 static int plip_hard_header_cache(struct neighbour *neigh,
159 struct hh_cache *hh);
160 static int plip_open(struct net_device *dev);
161 static int plip_close(struct net_device *dev);
162 static struct net_device_stats *plip_get_stats(struct net_device *dev);
163 static int plip_config(struct net_device *dev, struct ifmap *map);
164 static int plip_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
165 static int plip_preempt(void *handle);
166 static void plip_wakeup(void *handle);
168 enum plip_connection_state {
169 PLIP_CN_NONE=0,
170 PLIP_CN_RECEIVE,
171 PLIP_CN_SEND,
172 PLIP_CN_CLOSING,
173 PLIP_CN_ERROR
176 enum plip_packet_state {
177 PLIP_PK_DONE=0,
178 PLIP_PK_TRIGGER,
179 PLIP_PK_LENGTH_LSB,
180 PLIP_PK_LENGTH_MSB,
181 PLIP_PK_DATA,
182 PLIP_PK_CHECKSUM
185 enum plip_nibble_state {
186 PLIP_NB_BEGIN,
187 PLIP_NB_1,
188 PLIP_NB_2,
191 struct plip_local {
192 enum plip_packet_state state;
193 enum plip_nibble_state nibble;
194 union {
195 struct {
196 #if defined(__LITTLE_ENDIAN)
197 unsigned char lsb;
198 unsigned char msb;
199 #elif defined(__BIG_ENDIAN)
200 unsigned char msb;
201 unsigned char lsb;
202 #else
203 #error "Please fix the endianness defines in <asm/byteorder.h>"
204 #endif
205 } b;
206 unsigned short h;
207 } length;
208 unsigned short byte;
209 unsigned char checksum;
210 unsigned char data;
211 struct sk_buff *skb;
214 struct net_local {
215 struct net_device_stats enet_stats;
216 struct tq_struct immediate;
217 struct tq_struct deferred;
218 struct tq_struct timer;
219 struct plip_local snd_data;
220 struct plip_local rcv_data;
221 struct pardevice *pardev;
222 unsigned long trigger;
223 unsigned long nibble;
224 enum plip_connection_state connection;
225 unsigned short timeout_count;
226 int is_deferred;
227 int port_owner;
228 int should_relinquish;
229 int (*orig_hard_header)(struct sk_buff *skb, struct net_device *dev,
230 unsigned short type, void *daddr,
231 void *saddr, unsigned len);
232 int (*orig_hard_header_cache)(struct neighbour *neigh,
233 struct hh_cache *hh);
234 spinlock_t lock;
235 atomic_t kill_timer;
236 struct semaphore killed_timer_sem;
239 inline static void enable_parport_interrupts (struct net_device *dev)
241 if (dev->irq != -1)
243 struct parport *port =
244 ((struct net_local *)dev->priv)->pardev->port;
245 port->ops->enable_irq (port);
249 inline static void disable_parport_interrupts (struct net_device *dev)
251 if (dev->irq != -1)
253 struct parport *port =
254 ((struct net_local *)dev->priv)->pardev->port;
255 port->ops->disable_irq (port);
259 inline static void write_data (struct net_device *dev, unsigned char data)
261 struct parport *port =
262 ((struct net_local *)dev->priv)->pardev->port;
264 port->ops->write_data (port, data);
267 inline static unsigned char read_status (struct net_device *dev)
269 struct parport *port =
270 ((struct net_local *)dev->priv)->pardev->port;
272 return port->ops->read_status (port);
275 /* Entry point of PLIP driver.
276 Probe the hardware, and register/initialize the driver.
278 PLIP is rather weird, because of the way it interacts with the parport
279 system. It is _not_ initialised from Space.c. Instead, plip_init()
280 is called, and that function makes up a "struct net_device" for each port, and
281 then calls us here.
284 int __init
285 plip_init_dev(struct net_device *dev, struct parport *pb)
287 struct net_local *nl;
288 struct pardevice *pardev;
290 dev->irq = pb->irq;
291 dev->base_addr = pb->base;
293 if (pb->irq == -1) {
294 printk(KERN_INFO "plip: %s has no IRQ. Using IRQ-less mode,"
295 "which is fairly inefficient!\n", pb->name);
298 pardev = parport_register_device(pb, dev->name, plip_preempt,
299 plip_wakeup, plip_interrupt,
300 0, dev);
302 if (!pardev)
303 return -ENODEV;
305 printk(KERN_INFO "%s", version);
306 if (dev->irq != -1)
307 printk(KERN_INFO "%s: Parallel port at %#3lx, using IRQ %d.\n",
308 dev->name, dev->base_addr, dev->irq);
309 else
310 printk(KERN_INFO "%s: Parallel port at %#3lx, not using IRQ.\n",
311 dev->name, dev->base_addr);
313 /* Fill in the generic fields of the device structure. */
314 ether_setup(dev);
316 /* Then, override parts of it */
317 dev->hard_start_xmit = plip_tx_packet;
318 dev->open = plip_open;
319 dev->stop = plip_close;
320 dev->get_stats = plip_get_stats;
321 dev->set_config = plip_config;
322 dev->do_ioctl = plip_ioctl;
323 dev->header_cache_update = NULL;
324 dev->tx_queue_len = 10;
325 dev->flags = IFF_POINTOPOINT|IFF_NOARP;
326 memset(dev->dev_addr, 0xfc, ETH_ALEN);
328 /* Set the private structure */
329 dev->priv = kmalloc(sizeof (struct net_local), GFP_KERNEL);
330 if (dev->priv == NULL) {
331 printk(KERN_ERR "%s: out of memory\n", dev->name);
332 parport_unregister_device(pardev);
333 return -ENOMEM;
335 memset(dev->priv, 0, sizeof(struct net_local));
336 nl = (struct net_local *) dev->priv;
338 nl->orig_hard_header = dev->hard_header;
339 dev->hard_header = plip_hard_header;
341 nl->orig_hard_header_cache = dev->hard_header_cache;
342 dev->hard_header_cache = plip_hard_header_cache;
344 nl->pardev = pardev;
346 nl->port_owner = 0;
348 /* Initialize constants */
349 nl->trigger = PLIP_TRIGGER_WAIT;
350 nl->nibble = PLIP_NIBBLE_WAIT;
352 /* Initialize task queue structures */
353 nl->immediate.next = NULL;
354 nl->immediate.sync = 0;
355 nl->immediate.routine = (void (*)(void *))plip_bh;
356 nl->immediate.data = dev;
358 nl->deferred.next = NULL;
359 nl->deferred.sync = 0;
360 nl->deferred.routine = (void (*)(void *))plip_kick_bh;
361 nl->deferred.data = dev;
363 if (dev->irq == -1) {
364 nl->timer.next = NULL;
365 nl->timer.sync = 0;
366 nl->timer.routine = (void (*)(void *))plip_timer_bh;
367 nl->timer.data = dev;
370 spin_lock_init(&nl->lock);
372 return 0;
375 /* Bottom half handler for the delayed request.
376 This routine is kicked by do_timer().
377 Request `plip_bh' to be invoked. */
378 static void
379 plip_kick_bh(struct net_device *dev)
381 struct net_local *nl = (struct net_local *)dev->priv;
383 if (nl->is_deferred) {
384 queue_task(&nl->immediate, &tq_immediate);
385 mark_bh(IMMEDIATE_BH);
389 /* Forward declarations of internal routines */
390 static int plip_none(struct net_device *, struct net_local *,
391 struct plip_local *, struct plip_local *);
392 static int plip_receive_packet(struct net_device *, struct net_local *,
393 struct plip_local *, struct plip_local *);
394 static int plip_send_packet(struct net_device *, struct net_local *,
395 struct plip_local *, struct plip_local *);
396 static int plip_connection_close(struct net_device *, struct net_local *,
397 struct plip_local *, struct plip_local *);
398 static int plip_error(struct net_device *, struct net_local *,
399 struct plip_local *, struct plip_local *);
400 static int plip_bh_timeout_error(struct net_device *dev, struct net_local *nl,
401 struct plip_local *snd,
402 struct plip_local *rcv,
403 int error);
405 #define OK 0
406 #define TIMEOUT 1
407 #define ERROR 2
408 #define HS_TIMEOUT 3
410 typedef int (*plip_func)(struct net_device *dev, struct net_local *nl,
411 struct plip_local *snd, struct plip_local *rcv);
413 static plip_func connection_state_table[] =
415 plip_none,
416 plip_receive_packet,
417 plip_send_packet,
418 plip_connection_close,
419 plip_error
422 /* Bottom half handler of PLIP. */
423 static void
424 plip_bh(struct net_device *dev)
426 struct net_local *nl = (struct net_local *)dev->priv;
427 struct plip_local *snd = &nl->snd_data;
428 struct plip_local *rcv = &nl->rcv_data;
429 plip_func f;
430 int r;
432 nl->is_deferred = 0;
433 f = connection_state_table[nl->connection];
434 if ((r = (*f)(dev, nl, snd, rcv)) != OK
435 && (r = plip_bh_timeout_error(dev, nl, snd, rcv, r)) != OK) {
436 nl->is_deferred = 1;
437 queue_task(&nl->deferred, &tq_timer);
441 static void
442 plip_timer_bh(struct net_device *dev)
444 struct net_local *nl = (struct net_local *)dev->priv;
446 if (!(atomic_read (&nl->kill_timer))) {
447 if (!dev->interrupt)
448 plip_interrupt (-1, dev, NULL);
450 queue_task (&nl->timer, &tq_timer);
452 else {
453 up (&nl->killed_timer_sem);
457 static int
458 plip_bh_timeout_error(struct net_device *dev, struct net_local *nl,
459 struct plip_local *snd, struct plip_local *rcv,
460 int error)
462 unsigned char c0;
464 * This is tricky. If we got here from the beginning of send (either
465 * with ERROR or HS_TIMEOUT) we have IRQ enabled. Otherwise it's
466 * already disabled. With the old variant of {enable,disable}_irq()
467 * extra disable_irq() was a no-op. Now it became mortal - it's
468 * unbalanced and thus we'll never re-enable IRQ (until rmmod plip,
469 * that is). So we have to treat HS_TIMEOUT and ERROR from send
470 * in a special way.
473 spin_lock_irq(&nl->lock);
474 if (nl->connection == PLIP_CN_SEND) {
476 if (error != ERROR) { /* Timeout */
477 nl->timeout_count++;
478 if ((error == HS_TIMEOUT
479 && nl->timeout_count <= 10)
480 || nl->timeout_count <= 3) {
481 spin_unlock_irq(&nl->lock);
482 /* Try again later */
483 return TIMEOUT;
485 c0 = read_status(dev);
486 printk(KERN_WARNING "%s: transmit timeout(%d,%02x)\n",
487 dev->name, snd->state, c0);
488 } else
489 error = HS_TIMEOUT;
490 nl->enet_stats.tx_errors++;
491 nl->enet_stats.tx_aborted_errors++;
492 } else if (nl->connection == PLIP_CN_RECEIVE) {
493 if (rcv->state == PLIP_PK_TRIGGER) {
494 /* Transmission was interrupted. */
495 spin_unlock_irq(&nl->lock);
496 return OK;
498 if (error != ERROR) { /* Timeout */
499 if (++nl->timeout_count <= 3) {
500 spin_unlock_irq(&nl->lock);
501 /* Try again later */
502 return TIMEOUT;
504 c0 = read_status(dev);
505 printk(KERN_WARNING "%s: receive timeout(%d,%02x)\n",
506 dev->name, rcv->state, c0);
508 nl->enet_stats.rx_dropped++;
510 rcv->state = PLIP_PK_DONE;
511 if (rcv->skb) {
512 kfree_skb(rcv->skb);
513 rcv->skb = NULL;
515 snd->state = PLIP_PK_DONE;
516 if (snd->skb) {
517 dev_kfree_skb(snd->skb);
518 snd->skb = NULL;
520 spin_unlock_irq(&nl->lock);
521 if (error == HS_TIMEOUT) {
522 DISABLE(dev->irq);
523 synchronize_irq();
525 disable_parport_interrupts (dev);
526 dev->tbusy = 1;
527 nl->connection = PLIP_CN_ERROR;
528 write_data (dev, 0x00);
530 return TIMEOUT;
533 static int
534 plip_none(struct net_device *dev, struct net_local *nl,
535 struct plip_local *snd, struct plip_local *rcv)
537 return OK;
540 /* PLIP_RECEIVE --- receive a byte(two nibbles)
541 Returns OK on success, TIMEOUT on timeout */
542 inline static int
543 plip_receive(unsigned short nibble_timeout, struct net_device *dev,
544 enum plip_nibble_state *ns_p, unsigned char *data_p)
546 unsigned char c0, c1;
547 unsigned int cx;
549 switch (*ns_p) {
550 case PLIP_NB_BEGIN:
551 cx = nibble_timeout;
552 while (1) {
553 c0 = read_status(dev);
554 udelay(PLIP_DELAY_UNIT);
555 if ((c0 & 0x80) == 0) {
556 c1 = read_status(dev);
557 if (c0 == c1)
558 break;
560 if (--cx == 0)
561 return TIMEOUT;
563 *data_p = (c0 >> 3) & 0x0f;
564 write_data (dev, 0x10); /* send ACK */
565 *ns_p = PLIP_NB_1;
567 case PLIP_NB_1:
568 cx = nibble_timeout;
569 while (1) {
570 c0 = read_status(dev);
571 udelay(PLIP_DELAY_UNIT);
572 if (c0 & 0x80) {
573 c1 = read_status(dev);
574 if (c0 == c1)
575 break;
577 if (--cx == 0)
578 return TIMEOUT;
580 *data_p |= (c0 << 1) & 0xf0;
581 write_data (dev, 0x00); /* send ACK */
582 *ns_p = PLIP_NB_BEGIN;
583 case PLIP_NB_2:
584 break;
586 return OK;
589 /* PLIP_RECEIVE_PACKET --- receive a packet */
590 static int
591 plip_receive_packet(struct net_device *dev, struct net_local *nl,
592 struct plip_local *snd, struct plip_local *rcv)
594 unsigned short nibble_timeout = nl->nibble;
595 unsigned char *lbuf;
597 switch (rcv->state) {
598 case PLIP_PK_TRIGGER:
599 DISABLE(dev->irq);
600 /* Don't need to synchronize irq, as we can safely ignore it */
601 disable_parport_interrupts (dev);
602 dev->interrupt = 0;
603 write_data (dev, 0x01); /* send ACK */
604 if (net_debug > 2)
605 printk(KERN_DEBUG "%s: receive start\n", dev->name);
606 rcv->state = PLIP_PK_LENGTH_LSB;
607 rcv->nibble = PLIP_NB_BEGIN;
609 case PLIP_PK_LENGTH_LSB:
610 if (snd->state != PLIP_PK_DONE) {
611 if (plip_receive(nl->trigger, dev,
612 &rcv->nibble, &rcv->length.b.lsb)) {
613 /* collision, here dev->tbusy == 1 */
614 rcv->state = PLIP_PK_DONE;
615 nl->is_deferred = 1;
616 nl->connection = PLIP_CN_SEND;
617 queue_task(&nl->deferred, &tq_timer);
618 enable_parport_interrupts (dev);
619 ENABLE(dev->irq);
620 return OK;
622 } else {
623 if (plip_receive(nibble_timeout, dev,
624 &rcv->nibble, &rcv->length.b.lsb))
625 return TIMEOUT;
627 rcv->state = PLIP_PK_LENGTH_MSB;
629 case PLIP_PK_LENGTH_MSB:
630 if (plip_receive(nibble_timeout, dev,
631 &rcv->nibble, &rcv->length.b.msb))
632 return TIMEOUT;
633 if (rcv->length.h > dev->mtu + dev->hard_header_len
634 || rcv->length.h < 8) {
635 printk(KERN_WARNING "%s: bogus packet size %d.\n", dev->name, rcv->length.h);
636 return ERROR;
638 /* Malloc up new buffer. */
639 rcv->skb = dev_alloc_skb(rcv->length.h);
640 if (rcv->skb == NULL) {
641 printk(KERN_ERR "%s: Memory squeeze.\n", dev->name);
642 return ERROR;
644 skb_put(rcv->skb,rcv->length.h);
645 rcv->skb->dev = dev;
646 rcv->state = PLIP_PK_DATA;
647 rcv->byte = 0;
648 rcv->checksum = 0;
650 case PLIP_PK_DATA:
651 lbuf = rcv->skb->data;
653 if (plip_receive(nibble_timeout, dev,
654 &rcv->nibble, &lbuf[rcv->byte]))
655 return TIMEOUT;
656 while (++rcv->byte < rcv->length.h);
658 rcv->checksum += lbuf[--rcv->byte];
659 while (rcv->byte);
660 rcv->state = PLIP_PK_CHECKSUM;
662 case PLIP_PK_CHECKSUM:
663 if (plip_receive(nibble_timeout, dev,
664 &rcv->nibble, &rcv->data))
665 return TIMEOUT;
666 if (rcv->data != rcv->checksum) {
667 nl->enet_stats.rx_crc_errors++;
668 if (net_debug)
669 printk(KERN_DEBUG "%s: checksum error\n", dev->name);
670 return ERROR;
672 rcv->state = PLIP_PK_DONE;
674 case PLIP_PK_DONE:
675 /* Inform the upper layer for the arrival of a packet. */
676 rcv->skb->protocol=eth_type_trans(rcv->skb, dev);
677 netif_rx(rcv->skb);
678 nl->enet_stats.rx_bytes += rcv->length.h;
679 nl->enet_stats.rx_packets++;
680 rcv->skb = NULL;
681 if (net_debug > 2)
682 printk(KERN_DEBUG "%s: receive end\n", dev->name);
684 /* Close the connection. */
685 write_data (dev, 0x00);
686 spin_lock_irq(&nl->lock);
687 if (snd->state != PLIP_PK_DONE) {
688 nl->connection = PLIP_CN_SEND;
689 spin_unlock_irq(&nl->lock);
690 queue_task(&nl->immediate, &tq_immediate);
691 mark_bh(IMMEDIATE_BH);
692 enable_parport_interrupts (dev);
693 ENABLE(dev->irq);
694 return OK;
695 } else {
696 nl->connection = PLIP_CN_NONE;
697 spin_unlock_irq(&nl->lock);
698 enable_parport_interrupts (dev);
699 ENABLE(dev->irq);
700 return OK;
703 return OK;
706 /* PLIP_SEND --- send a byte (two nibbles)
707 Returns OK on success, TIMEOUT when timeout */
708 inline static int
709 plip_send(unsigned short nibble_timeout, struct net_device *dev,
710 enum plip_nibble_state *ns_p, unsigned char data)
712 unsigned char c0;
713 unsigned int cx;
715 switch (*ns_p) {
716 case PLIP_NB_BEGIN:
717 write_data (dev, data & 0x0f);
718 *ns_p = PLIP_NB_1;
720 case PLIP_NB_1:
721 write_data (dev, 0x10 | (data & 0x0f));
722 cx = nibble_timeout;
723 while (1) {
724 c0 = read_status(dev);
725 if ((c0 & 0x80) == 0)
726 break;
727 if (--cx == 0)
728 return TIMEOUT;
729 udelay(PLIP_DELAY_UNIT);
731 write_data (dev, 0x10 | (data >> 4));
732 *ns_p = PLIP_NB_2;
734 case PLIP_NB_2:
735 write_data (dev, (data >> 4));
736 cx = nibble_timeout;
737 while (1) {
738 c0 = read_status(dev);
739 if (c0 & 0x80)
740 break;
741 if (--cx == 0)
742 return TIMEOUT;
743 udelay(PLIP_DELAY_UNIT);
745 *ns_p = PLIP_NB_BEGIN;
746 return OK;
748 return OK;
751 /* PLIP_SEND_PACKET --- send a packet */
752 static int
753 plip_send_packet(struct net_device *dev, struct net_local *nl,
754 struct plip_local *snd, struct plip_local *rcv)
756 unsigned short nibble_timeout = nl->nibble;
757 unsigned char *lbuf;
758 unsigned char c0;
759 unsigned int cx;
761 if (snd->skb == NULL || (lbuf = snd->skb->data) == NULL) {
762 printk(KERN_DEBUG "%s: send skb lost\n", dev->name);
763 snd->state = PLIP_PK_DONE;
764 snd->skb = NULL;
765 return ERROR;
768 switch (snd->state) {
769 case PLIP_PK_TRIGGER:
770 if ((read_status(dev) & 0xf8) != 0x80)
771 return HS_TIMEOUT;
773 /* Trigger remote rx interrupt. */
774 write_data (dev, 0x08);
775 cx = nl->trigger;
776 while (1) {
777 udelay(PLIP_DELAY_UNIT);
778 spin_lock_irq(&nl->lock);
779 if (nl->connection == PLIP_CN_RECEIVE) {
780 spin_unlock_irq(&nl->lock);
781 /* Interrupted. */
782 nl->enet_stats.collisions++;
783 return OK;
785 c0 = read_status(dev);
786 if (c0 & 0x08) {
787 spin_unlock_irq(&nl->lock);
788 DISABLE(dev->irq);
789 synchronize_irq();
790 if (nl->connection == PLIP_CN_RECEIVE) {
791 /* Interrupted.
792 We don't need to enable irq,
793 as it is soon disabled. */
794 /* Yes, we do. New variant of
795 {enable,disable}_irq *counts*
796 them. -- AV */
797 ENABLE(dev->irq);
798 nl->enet_stats.collisions++;
799 return OK;
801 disable_parport_interrupts (dev);
802 if (net_debug > 2)
803 printk(KERN_DEBUG "%s: send start\n", dev->name);
804 snd->state = PLIP_PK_LENGTH_LSB;
805 snd->nibble = PLIP_NB_BEGIN;
806 nl->timeout_count = 0;
807 break;
809 spin_unlock_irq(&nl->lock);
810 if (--cx == 0) {
811 write_data (dev, 0x00);
812 return HS_TIMEOUT;
816 case PLIP_PK_LENGTH_LSB:
817 if (plip_send(nibble_timeout, dev,
818 &snd->nibble, snd->length.b.lsb))
819 return TIMEOUT;
820 snd->state = PLIP_PK_LENGTH_MSB;
822 case PLIP_PK_LENGTH_MSB:
823 if (plip_send(nibble_timeout, dev,
824 &snd->nibble, snd->length.b.msb))
825 return TIMEOUT;
826 snd->state = PLIP_PK_DATA;
827 snd->byte = 0;
828 snd->checksum = 0;
830 case PLIP_PK_DATA:
832 if (plip_send(nibble_timeout, dev,
833 &snd->nibble, lbuf[snd->byte]))
834 return TIMEOUT;
835 while (++snd->byte < snd->length.h);
837 snd->checksum += lbuf[--snd->byte];
838 while (snd->byte);
839 snd->state = PLIP_PK_CHECKSUM;
841 case PLIP_PK_CHECKSUM:
842 if (plip_send(nibble_timeout, dev,
843 &snd->nibble, snd->checksum))
844 return TIMEOUT;
846 nl->enet_stats.tx_bytes += snd->skb->len;
847 dev_kfree_skb(snd->skb);
848 nl->enet_stats.tx_packets++;
849 snd->state = PLIP_PK_DONE;
851 case PLIP_PK_DONE:
852 /* Close the connection */
853 write_data (dev, 0x00);
854 snd->skb = NULL;
855 if (net_debug > 2)
856 printk(KERN_DEBUG "%s: send end\n", dev->name);
857 nl->connection = PLIP_CN_CLOSING;
858 nl->is_deferred = 1;
859 queue_task(&nl->deferred, &tq_timer);
860 enable_parport_interrupts (dev);
861 ENABLE(dev->irq);
862 return OK;
864 return OK;
867 static int
868 plip_connection_close(struct net_device *dev, struct net_local *nl,
869 struct plip_local *snd, struct plip_local *rcv)
871 spin_lock_irq(&nl->lock);
872 if (nl->connection == PLIP_CN_CLOSING) {
873 nl->connection = PLIP_CN_NONE;
874 dev->tbusy = 0;
875 mark_bh(NET_BH);
877 spin_unlock_irq(&nl->lock);
878 if (nl->should_relinquish) {
879 nl->should_relinquish = nl->port_owner = 0;
880 parport_release(nl->pardev);
882 return OK;
885 /* PLIP_ERROR --- wait till other end settled */
886 static int
887 plip_error(struct net_device *dev, struct net_local *nl,
888 struct plip_local *snd, struct plip_local *rcv)
890 unsigned char status;
892 status = read_status(dev);
893 if ((status & 0xf8) == 0x80) {
894 if (net_debug > 2)
895 printk(KERN_DEBUG "%s: reset interface.\n", dev->name);
896 nl->connection = PLIP_CN_NONE;
897 nl->should_relinquish = 0;
898 dev->tbusy = 0;
899 dev->interrupt = 0;
900 enable_parport_interrupts (dev);
901 ENABLE(dev->irq);
902 mark_bh(NET_BH);
903 } else {
904 nl->is_deferred = 1;
905 queue_task(&nl->deferred, &tq_timer);
908 return OK;
911 /* Handle the parallel port interrupts. */
912 static void
913 plip_interrupt(int irq, void *dev_id, struct pt_regs * regs)
915 struct net_device *dev = dev_id;
916 struct net_local *nl;
917 struct plip_local *rcv;
918 unsigned char c0;
920 if (dev == NULL) {
921 printk(KERN_DEBUG "plip_interrupt: irq %d for unknown device.\n", irq);
922 return;
925 nl = (struct net_local *)dev->priv;
926 rcv = &nl->rcv_data;
928 if (dev->interrupt)
929 return;
931 c0 = read_status(dev);
932 if ((c0 & 0xf8) != 0xc0) {
933 if ((dev->irq != -1) && (net_debug > 1))
934 printk(KERN_DEBUG "%s: spurious interrupt\n", dev->name);
935 return;
937 dev->interrupt = 1;
938 if (net_debug > 3)
939 printk(KERN_DEBUG "%s: interrupt.\n", dev->name);
941 spin_lock_irq(&nl->lock);
942 switch (nl->connection) {
943 case PLIP_CN_CLOSING:
944 dev->tbusy = 0;
945 case PLIP_CN_NONE:
946 case PLIP_CN_SEND:
947 dev->last_rx = jiffies;
948 rcv->state = PLIP_PK_TRIGGER;
949 nl->connection = PLIP_CN_RECEIVE;
950 nl->timeout_count = 0;
951 queue_task(&nl->immediate, &tq_immediate);
952 mark_bh(IMMEDIATE_BH);
953 spin_unlock_irq(&nl->lock);
954 break;
956 case PLIP_CN_RECEIVE:
957 /* May occur because there is race condition
958 around test and set of dev->interrupt.
959 Ignore this interrupt. */
960 spin_unlock_irq(&nl->lock);
961 break;
963 case PLIP_CN_ERROR:
964 spin_unlock_irq(&nl->lock);
965 printk(KERN_ERR "%s: receive interrupt in error state\n", dev->name);
966 break;
970 static int
971 plip_tx_packet(struct sk_buff *skb, struct net_device *dev)
973 struct net_local *nl = (struct net_local *)dev->priv;
974 struct plip_local *snd = &nl->snd_data;
976 if (dev->tbusy)
977 return 1;
979 /* We may need to grab the bus */
980 if (!nl->port_owner) {
981 if (parport_claim(nl->pardev))
982 return 1;
983 nl->port_owner = 1;
986 if (test_and_set_bit(0, (void*)&dev->tbusy) != 0) {
987 printk(KERN_WARNING "%s: Transmitter access conflict.\n", dev->name);
988 return 1;
991 if (skb->len > dev->mtu + dev->hard_header_len) {
992 printk(KERN_WARNING "%s: packet too big, %d.\n", dev->name, (int)skb->len);
993 dev->tbusy = 0;
994 return 0;
997 if (net_debug > 2)
998 printk(KERN_DEBUG "%s: send request\n", dev->name);
1000 spin_lock_irq(&nl->lock);
1001 dev->trans_start = jiffies;
1002 snd->skb = skb;
1003 snd->length.h = skb->len;
1004 snd->state = PLIP_PK_TRIGGER;
1005 if (nl->connection == PLIP_CN_NONE) {
1006 nl->connection = PLIP_CN_SEND;
1007 nl->timeout_count = 0;
1009 queue_task(&nl->immediate, &tq_immediate);
1010 mark_bh(IMMEDIATE_BH);
1011 spin_unlock_irq(&nl->lock);
1013 return 0;
1016 static void
1017 plip_rewrite_address(struct net_device *dev, struct ethhdr *eth)
1019 struct in_device *in_dev;
1021 if ((in_dev=dev->ip_ptr) != NULL) {
1022 /* Any address will do - we take the first */
1023 struct in_ifaddr *ifa=in_dev->ifa_list;
1024 if (ifa != NULL) {
1025 memcpy(eth->h_source, dev->dev_addr, 6);
1026 memset(eth->h_dest, 0xfc, 2);
1027 memcpy(eth->h_dest+2, &ifa->ifa_address, 4);
1032 static int
1033 plip_hard_header(struct sk_buff *skb, struct net_device *dev,
1034 unsigned short type, void *daddr,
1035 void *saddr, unsigned len)
1037 struct net_local *nl = (struct net_local *)dev->priv;
1038 int ret;
1040 if ((ret = nl->orig_hard_header(skb, dev, type, daddr, saddr, len)) >= 0)
1041 plip_rewrite_address (dev, (struct ethhdr *)skb->data);
1043 return ret;
1046 int plip_hard_header_cache(struct neighbour *neigh,
1047 struct hh_cache *hh)
1049 struct net_local *nl = (struct net_local *)neigh->dev->priv;
1050 int ret;
1052 if ((ret = nl->orig_hard_header_cache(neigh, hh)) == 0)
1054 struct ethhdr *eth = (struct ethhdr*)(((u8*)hh->hh_data) + 2);
1055 plip_rewrite_address (neigh->dev, eth);
1058 return ret;
1061 /* Open/initialize the board. This is called (in the current kernel)
1062 sometime after booting when the 'ifconfig' program is run.
1064 This routine gets exclusive access to the parallel port by allocating
1065 its IRQ line.
1067 static int
1068 plip_open(struct net_device *dev)
1070 struct net_local *nl = (struct net_local *)dev->priv;
1071 struct in_device *in_dev;
1073 /* Grab the port */
1074 if (!nl->port_owner) {
1075 if (parport_claim(nl->pardev)) return -EAGAIN;
1076 nl->port_owner = 1;
1079 nl->should_relinquish = 0;
1081 /* Clear the data port. */
1082 write_data (dev, 0x00);
1084 /* Enable rx interrupt. */
1085 enable_parport_interrupts (dev);
1086 if (dev->irq == -1)
1088 atomic_set (&nl->kill_timer, 0);
1089 queue_task (&nl->timer, &tq_timer);
1092 /* Initialize the state machine. */
1093 nl->rcv_data.state = nl->snd_data.state = PLIP_PK_DONE;
1094 nl->rcv_data.skb = nl->snd_data.skb = NULL;
1095 nl->connection = PLIP_CN_NONE;
1096 nl->is_deferred = 0;
1098 /* Fill in the MAC-level header.
1099 We used to abuse dev->broadcast to store the point-to-point
1100 MAC address, but we no longer do it. Instead, we fetch the
1101 interface address whenever it is needed, which is cheap enough
1102 because we use the hh_cache. Actually, abusing dev->broadcast
1103 didn't work, because when using plip_open the point-to-point
1104 address isn't yet known.
1105 PLIP doesn't have a real MAC address, but we need it to be
1106 DOS compatible, and to properly support taps (otherwise,
1107 when the device address isn't identical to the address of a
1108 received frame, the kernel incorrectly drops it). */
1110 if ((in_dev=dev->ip_ptr) != NULL) {
1111 /* Any address will do - we take the first. We already
1112 have the first two bytes filled with 0xfc, from
1113 plip_init_dev(). */
1114 struct in_ifaddr *ifa=in_dev->ifa_list;
1115 if (ifa != NULL) {
1116 memcpy(dev->dev_addr+2, &ifa->ifa_local, 4);
1120 dev->interrupt = 0;
1121 dev->start = 1;
1122 dev->tbusy = 0;
1124 MOD_INC_USE_COUNT;
1125 return 0;
1128 /* The inverse routine to plip_open (). */
1129 static int
1130 plip_close(struct net_device *dev)
1132 struct net_local *nl = (struct net_local *)dev->priv;
1133 struct plip_local *snd = &nl->snd_data;
1134 struct plip_local *rcv = &nl->rcv_data;
1136 dev->tbusy = 1;
1137 dev->start = 0;
1138 DISABLE(dev->irq);
1139 synchronize_irq();
1141 if (dev->irq == -1)
1143 init_MUTEX_LOCKED (&nl->killed_timer_sem);
1144 atomic_set (&nl->kill_timer, 1);
1145 down (&nl->killed_timer_sem);
1148 #ifdef NOTDEF
1149 outb(0x00, PAR_DATA(dev));
1150 #endif
1151 nl->is_deferred = 0;
1152 nl->connection = PLIP_CN_NONE;
1153 if (nl->port_owner) {
1154 parport_release(nl->pardev);
1155 nl->port_owner = 0;
1158 snd->state = PLIP_PK_DONE;
1159 if (snd->skb) {
1160 dev_kfree_skb(snd->skb);
1161 snd->skb = NULL;
1163 rcv->state = PLIP_PK_DONE;
1164 if (rcv->skb) {
1165 kfree_skb(rcv->skb);
1166 rcv->skb = NULL;
1169 #ifdef NOTDEF
1170 /* Reset. */
1171 outb(0x00, PAR_CONTROL(dev));
1172 #endif
1173 MOD_DEC_USE_COUNT;
1174 return 0;
1177 static int
1178 plip_preempt(void *handle)
1180 struct net_device *dev = (struct net_device *)handle;
1181 struct net_local *nl = (struct net_local *)dev->priv;
1183 /* Stand our ground if a datagram is on the wire */
1184 if (nl->connection != PLIP_CN_NONE) {
1185 nl->should_relinquish = 1;
1186 return 1;
1189 nl->port_owner = 0; /* Remember that we released the bus */
1190 return 0;
1193 static void
1194 plip_wakeup(void *handle)
1196 struct net_device *dev = (struct net_device *)handle;
1197 struct net_local *nl = (struct net_local *)dev->priv;
1199 if (nl->port_owner) {
1200 /* Why are we being woken up? */
1201 printk(KERN_DEBUG "%s: why am I being woken up?\n", dev->name);
1202 if (!parport_claim(nl->pardev))
1203 /* bus_owner is already set (but why?) */
1204 printk(KERN_DEBUG "%s: I'm broken.\n", dev->name);
1205 else
1206 return;
1209 if (!(dev->flags & IFF_UP))
1210 /* Don't need the port when the interface is down */
1211 return;
1213 if (!parport_claim(nl->pardev)) {
1214 nl->port_owner = 1;
1215 /* Clear the data port. */
1216 write_data (dev, 0x00);
1219 return;
1222 static struct net_device_stats *
1223 plip_get_stats(struct net_device *dev)
1225 struct net_local *nl = (struct net_local *)dev->priv;
1226 struct net_device_stats *r = &nl->enet_stats;
1228 return r;
1231 static int
1232 plip_config(struct net_device *dev, struct ifmap *map)
1234 struct net_local *nl = (struct net_local *) dev->priv;
1235 struct pardevice *pardev = nl->pardev;
1237 if (dev->flags & IFF_UP)
1238 return -EBUSY;
1240 printk(KERN_WARNING "plip: Warning, changing irq with ifconfig will be obsoleted.\n");
1241 printk(KERN_WARNING "plip: Next time, please set with /proc/parport/*/irq instead.\n");
1243 if (map->irq != (unsigned char)-1) {
1244 pardev->port->irq = dev->irq = map->irq;
1245 /* Dummy request */
1246 request_irq(dev->irq, plip_interrupt, SA_INTERRUPT,
1247 pardev->name, NULL);
1249 return 0;
1252 static int
1253 plip_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1255 struct net_local *nl = (struct net_local *) dev->priv;
1256 struct plipconf *pc = (struct plipconf *) &rq->ifr_data;
1258 switch(pc->pcmd) {
1259 case PLIP_GET_TIMEOUT:
1260 pc->trigger = nl->trigger;
1261 pc->nibble = nl->nibble;
1262 break;
1263 case PLIP_SET_TIMEOUT:
1264 nl->trigger = pc->trigger;
1265 nl->nibble = pc->nibble;
1266 break;
1267 default:
1268 return -EOPNOTSUPP;
1270 return 0;
1273 static int parport[PLIP_MAX] = { [0 ... PLIP_MAX-1] = -1 };
1274 static int timid = 0;
1276 MODULE_PARM(parport, "1-" __MODULE_STRING(PLIP_MAX) "i");
1277 MODULE_PARM(timid, "1i");
1279 static struct net_device *dev_plip[PLIP_MAX] = { NULL, };
1281 #ifdef MODULE
1282 void
1283 cleanup_module(void)
1285 int i;
1287 for (i=0; i < PLIP_MAX; i++) {
1288 if (dev_plip[i]) {
1289 struct net_local *nl =
1290 (struct net_local *)dev_plip[i]->priv;
1291 unregister_netdev(dev_plip[i]);
1292 if (nl->port_owner)
1293 parport_release(nl->pardev);
1294 parport_unregister_device(nl->pardev);
1295 kfree(dev_plip[i]->priv);
1296 kfree(dev_plip[i]->name);
1297 kfree(dev_plip[i]);
1298 dev_plip[i] = NULL;
1303 #define plip_init init_module
1305 #else /* !MODULE */
1307 static int parport_ptr = 0;
1309 void plip_setup(char *str, int *ints)
1311 /* Ugh. */
1312 if (!strncmp(str, "parport", 7)) {
1313 int n = simple_strtoul(str+7, NULL, 10);
1314 if (parport_ptr < PLIP_MAX)
1315 parport[parport_ptr++] = n;
1316 else
1317 printk(KERN_INFO "plip: too many ports, %s ignored.\n",
1318 str);
1319 } else if (!strcmp(str, "timid")) {
1320 timid = 1;
1321 } else {
1322 if (ints[0] == 0 || ints[1] == 0) {
1323 /* disable driver on "plip=" or "plip=0" */
1324 parport[0] = -2;
1325 } else {
1326 printk(KERN_WARNING "warning: 'plip=0x%x' ignored\n",
1327 ints[1]);
1332 #endif /* MODULE */
1334 static int inline
1335 plip_searchfor(int list[], int a)
1337 int i;
1338 for (i = 0; i < PLIP_MAX && list[i] != -1; i++) {
1339 if (list[i] == a) return 1;
1341 return 0;
1344 int __init
1345 plip_init(void)
1347 struct parport *pb = parport_enumerate();
1348 int i=0;
1350 if (parport[0] == -2)
1351 return 0;
1353 if (parport[0] != -1 && timid) {
1354 printk(KERN_WARNING "plip: warning, ignoring `timid' since specific ports given.\n");
1355 timid = 0;
1358 /* If the user feeds parameters, use them */
1359 while (pb) {
1360 if ((parport[0] == -1 && (!timid || !pb->devices)) ||
1361 plip_searchfor(parport, pb->number)) {
1362 if (i == PLIP_MAX) {
1363 printk(KERN_ERR "plip: too many devices\n");
1364 break;
1366 dev_plip[i] = kmalloc(sizeof(struct net_device),
1367 GFP_KERNEL);
1368 if (!dev_plip[i]) {
1369 printk(KERN_ERR "plip: memory squeeze\n");
1370 break;
1372 memset(dev_plip[i], 0, sizeof(struct net_device));
1373 dev_plip[i]->name =
1374 kmalloc(strlen("plipXXX"), GFP_KERNEL);
1375 if (!dev_plip[i]->name) {
1376 printk(KERN_ERR "plip: memory squeeze.\n");
1377 kfree(dev_plip[i]);
1378 break;
1380 sprintf(dev_plip[i]->name, "plip%d", i);
1381 dev_plip[i]->priv = pb;
1382 if (plip_init_dev(dev_plip[i],pb) || register_netdev(dev_plip[i])) {
1383 kfree(dev_plip[i]->name);
1384 kfree(dev_plip[i]);
1385 } else {
1386 i++;
1389 pb = pb->next;
1392 if (i == 0) {
1393 printk(KERN_INFO "plip: no devices registered\n");
1394 return -EIO;
1396 return 0;
1400 * Local variables:
1401 * compile-command: "gcc -DMODULE -DMODVERSIONS -D__KERNEL__ -Wall -Wstrict-prototypes -O2 -g -fomit-frame-pointer -pipe -c plip.c"
1402 * End: