Merge with Linux 2.3.40.
[linux-2.6/linux-mips.git] / drivers / net / plip.c
blob1058341b681cb786e92e86220456b2da8c1aead7
1 /* $Id: plip.c,v 1.3.6.2 1997/04/16 15:07:56 phil Exp $ */
2 /* PLIP: A parallel port "network" driver for Linux. */
3 /* This driver is for parallel port with 5-bit cable (LapLink (R) cable). */
4 /*
5 * Authors: Donald Becker <becker@super.org>
6 * Tommy Thorn <thorn@daimi.aau.dk>
7 * Tanabe Hiroyasu <hiro@sanpo.t.u-tokyo.ac.jp>
8 * Alan Cox <gw4pts@gw4pts.ampr.org>
9 * Peter Bauer <100136.3530@compuserve.com>
10 * Niibe Yutaka <gniibe@mri.co.jp>
11 * Nimrod Zimerman <zimerman@mailandnews.com>
13 * Enhancements:
14 * Modularization and ifreq/ifmap support by Alan Cox.
15 * Rewritten by Niibe Yutaka.
16 * parport-sharing awareness code by Philip Blundell.
17 * SMP locking by Niibe Yutaka.
18 * Support for parallel ports with no IRQ (poll mode),
19 * Modifications to use the parallel port API
20 * by Nimrod Zimerman.
22 * Fixes:
23 * Niibe Yutaka
24 * - Module initialization.
25 * - MTU fix.
26 * - Make sure other end is OK, before sending a packet.
27 * - Fix immediate timer problem.
29 * Al Viro
30 * - Changed {enable,disable}_irq handling to make it work
31 * with new ("stack") semantics.
33 * This program is free software; you can redistribute it and/or
34 * modify it under the terms of the GNU General Public License
35 * as published by the Free Software Foundation; either version
36 * 2 of the License, or (at your option) any later version.
40 * Original version and the name 'PLIP' from Donald Becker <becker@super.org>
41 * inspired by Russ Nelson's parallel port packet driver.
43 * NOTE:
44 * Tanabe Hiroyasu had changed the protocol, and it was in Linux v1.0.
45 * Because of the necessity to communicate to DOS machines with the
46 * Crynwr packet driver, Peter Bauer changed the protocol again
47 * back to original protocol.
49 * This version follows original PLIP protocol.
50 * So, this PLIP can't communicate the PLIP of Linux v1.0.
54 * To use with DOS box, please do (Turn on ARP switch):
55 * # ifconfig plip[0-2] arp
57 static const char *version = "NET3 PLIP version 2.4-parport gniibe@mri.co.jp\n";
60 Sources:
61 Ideas and protocols came from Russ Nelson's <nelson@crynwr.com>
62 "parallel.asm" parallel port packet driver.
64 The "Crynwr" parallel port standard specifies the following protocol:
65 Trigger by sending nibble '0x8' (this causes interrupt on other end)
66 count-low octet
67 count-high octet
68 ... data octets
69 checksum octet
70 Each octet is sent as <wait for rx. '0x1?'> <send 0x10+(octet&0x0F)>
71 <wait for rx. '0x0?'> <send 0x00+((octet>>4)&0x0F)>
73 The packet is encapsulated as if it were ethernet.
75 The cable used is a de facto standard parallel null cable -- sold as
76 a "LapLink" cable by various places. You'll need a 12-conductor cable to
77 make one yourself. The wiring is:
78 SLCTIN 17 - 17
79 GROUND 25 - 25
80 D0->ERROR 2 - 15 15 - 2
81 D1->SLCT 3 - 13 13 - 3
82 D2->PAPOUT 4 - 12 12 - 4
83 D3->ACK 5 - 10 10 - 5
84 D4->BUSY 6 - 11 11 - 6
85 Do not connect the other pins. They are
86 D5,D6,D7 are 7,8,9
87 STROBE is 1, FEED is 14, INIT is 16
88 extra grounds are 18,19,20,21,22,23,24
91 #include <linux/module.h>
92 #include <linux/kernel.h>
93 #include <linux/sched.h>
94 #include <linux/types.h>
95 #include <linux/fcntl.h>
96 #include <linux/interrupt.h>
97 #include <linux/string.h>
98 #include <linux/ptrace.h>
99 #include <linux/if_ether.h>
100 #include <asm/system.h>
101 #include <linux/in.h>
102 #include <linux/errno.h>
103 #include <linux/delay.h>
104 #include <linux/lp.h>
105 #include <linux/init.h>
107 #include <linux/netdevice.h>
108 #include <linux/etherdevice.h>
109 #include <linux/inetdevice.h>
110 #include <linux/skbuff.h>
111 #include <linux/if_plip.h>
112 #include <net/neighbour.h>
114 #include <linux/tqueue.h>
115 #include <linux/ioport.h>
116 #include <linux/spinlock.h>
117 #include <asm/bitops.h>
118 #include <asm/irq.h>
119 #include <asm/byteorder.h>
120 #include <asm/semaphore.h>
122 #include <linux/parport.h>
124 /* Maximum number of devices to support. */
125 #define PLIP_MAX 8
127 /* Use 0 for production, 1 for verification, >2 for debug */
128 #ifndef NET_DEBUG
129 #define NET_DEBUG 1
130 #endif
131 static unsigned int net_debug = NET_DEBUG;
133 #define ENABLE(irq) if (irq != -1) enable_irq(irq)
134 #define DISABLE(irq) if (irq != -1) disable_irq(irq)
136 /* In micro second */
137 #define PLIP_DELAY_UNIT 1
139 /* Connection time out = PLIP_TRIGGER_WAIT * PLIP_DELAY_UNIT usec */
140 #define PLIP_TRIGGER_WAIT 500
142 /* Nibble time out = PLIP_NIBBLE_WAIT * PLIP_DELAY_UNIT usec */
143 #define PLIP_NIBBLE_WAIT 3000
145 /* Bottom halves */
146 static void plip_kick_bh(struct net_device *dev);
147 static void plip_bh(struct net_device *dev);
148 static void plip_timer_bh(struct net_device *dev);
150 /* Interrupt handler */
151 static void plip_interrupt(int irq, void *dev_id, struct pt_regs *regs);
153 /* Functions for DEV methods */
154 static int plip_tx_packet(struct sk_buff *skb, struct net_device *dev);
155 static int plip_hard_header(struct sk_buff *skb, struct net_device *dev,
156 unsigned short type, void *daddr,
157 void *saddr, unsigned len);
158 static int plip_hard_header_cache(struct neighbour *neigh,
159 struct hh_cache *hh);
160 static int plip_open(struct net_device *dev);
161 static int plip_close(struct net_device *dev);
162 static struct net_device_stats *plip_get_stats(struct net_device *dev);
163 static int plip_config(struct net_device *dev, struct ifmap *map);
164 static int plip_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
165 static int plip_preempt(void *handle);
166 static void plip_wakeup(void *handle);
168 enum plip_connection_state {
169 PLIP_CN_NONE=0,
170 PLIP_CN_RECEIVE,
171 PLIP_CN_SEND,
172 PLIP_CN_CLOSING,
173 PLIP_CN_ERROR
176 enum plip_packet_state {
177 PLIP_PK_DONE=0,
178 PLIP_PK_TRIGGER,
179 PLIP_PK_LENGTH_LSB,
180 PLIP_PK_LENGTH_MSB,
181 PLIP_PK_DATA,
182 PLIP_PK_CHECKSUM
185 enum plip_nibble_state {
186 PLIP_NB_BEGIN,
187 PLIP_NB_1,
188 PLIP_NB_2,
191 struct plip_local {
192 enum plip_packet_state state;
193 enum plip_nibble_state nibble;
194 union {
195 struct {
196 #if defined(__LITTLE_ENDIAN)
197 unsigned char lsb;
198 unsigned char msb;
199 #elif defined(__BIG_ENDIAN)
200 unsigned char msb;
201 unsigned char lsb;
202 #else
203 #error "Please fix the endianness defines in <asm/byteorder.h>"
204 #endif
205 } b;
206 unsigned short h;
207 } length;
208 unsigned short byte;
209 unsigned char checksum;
210 unsigned char data;
211 struct sk_buff *skb;
214 struct net_local {
215 struct net_device_stats enet_stats;
216 struct tq_struct immediate;
217 struct tq_struct deferred;
218 struct tq_struct timer;
219 struct plip_local snd_data;
220 struct plip_local rcv_data;
221 struct pardevice *pardev;
222 unsigned long trigger;
223 unsigned long nibble;
224 enum plip_connection_state connection;
225 unsigned short timeout_count;
226 int is_deferred;
227 int port_owner;
228 int should_relinquish;
229 int (*orig_hard_header)(struct sk_buff *skb, struct net_device *dev,
230 unsigned short type, void *daddr,
231 void *saddr, unsigned len);
232 int (*orig_hard_header_cache)(struct neighbour *neigh,
233 struct hh_cache *hh);
234 spinlock_t lock;
235 atomic_t kill_timer;
236 struct semaphore killed_timer_sem;
239 inline static void enable_parport_interrupts (struct net_device *dev)
241 if (dev->irq != -1)
243 struct parport *port =
244 ((struct net_local *)dev->priv)->pardev->port;
245 port->ops->enable_irq (port);
249 inline static void disable_parport_interrupts (struct net_device *dev)
251 if (dev->irq != -1)
253 struct parport *port =
254 ((struct net_local *)dev->priv)->pardev->port;
255 port->ops->disable_irq (port);
259 inline static void write_data (struct net_device *dev, unsigned char data)
261 struct parport *port =
262 ((struct net_local *)dev->priv)->pardev->port;
264 port->ops->write_data (port, data);
267 inline static unsigned char read_status (struct net_device *dev)
269 struct parport *port =
270 ((struct net_local *)dev->priv)->pardev->port;
272 return port->ops->read_status (port);
275 /* Entry point of PLIP driver.
276 Probe the hardware, and register/initialize the driver.
278 PLIP is rather weird, because of the way it interacts with the parport
279 system. It is _not_ initialised from Space.c. Instead, plip_init()
280 is called, and that function makes up a "struct net_device" for each port, and
281 then calls us here.
284 int __init
285 plip_init_dev(struct net_device *dev, struct parport *pb)
287 struct net_local *nl;
288 struct pardevice *pardev;
290 dev->irq = pb->irq;
291 dev->base_addr = pb->base;
293 if (pb->irq == -1) {
294 printk(KERN_INFO "plip: %s has no IRQ. Using IRQ-less mode,"
295 "which is fairly inefficient!\n", pb->name);
298 pardev = parport_register_device(pb, dev->name, plip_preempt,
299 plip_wakeup, plip_interrupt,
300 0, dev);
302 if (!pardev)
303 return -ENODEV;
305 printk(KERN_INFO "%s", version);
306 if (dev->irq != -1)
307 printk(KERN_INFO "%s: Parallel port at %#3lx, using IRQ %d.\n",
308 dev->name, dev->base_addr, dev->irq);
309 else
310 printk(KERN_INFO "%s: Parallel port at %#3lx, not using IRQ.\n",
311 dev->name, dev->base_addr);
313 /* Fill in the generic fields of the device structure. */
314 ether_setup(dev);
316 /* Then, override parts of it */
317 dev->hard_start_xmit = plip_tx_packet;
318 dev->open = plip_open;
319 dev->stop = plip_close;
320 dev->get_stats = plip_get_stats;
321 dev->do_ioctl = plip_ioctl;
322 dev->header_cache_update = NULL;
323 dev->tx_queue_len = 10;
324 dev->flags = IFF_POINTOPOINT|IFF_NOARP;
325 memset(dev->dev_addr, 0xfc, ETH_ALEN);
327 /* Set the private structure */
328 dev->priv = kmalloc(sizeof (struct net_local), GFP_KERNEL);
329 if (dev->priv == NULL) {
330 printk(KERN_ERR "%s: out of memory\n", dev->name);
331 parport_unregister_device(pardev);
332 return -ENOMEM;
334 memset(dev->priv, 0, sizeof(struct net_local));
335 nl = (struct net_local *) dev->priv;
337 nl->orig_hard_header = dev->hard_header;
338 dev->hard_header = plip_hard_header;
340 nl->orig_hard_header_cache = dev->hard_header_cache;
341 dev->hard_header_cache = plip_hard_header_cache;
343 nl->pardev = pardev;
345 nl->port_owner = 0;
347 /* Initialize constants */
348 nl->trigger = PLIP_TRIGGER_WAIT;
349 nl->nibble = PLIP_NIBBLE_WAIT;
351 /* Initialize task queue structures */
352 nl->immediate.next = NULL;
353 nl->immediate.sync = 0;
354 nl->immediate.routine = (void (*)(void *))plip_bh;
355 nl->immediate.data = dev;
357 nl->deferred.next = NULL;
358 nl->deferred.sync = 0;
359 nl->deferred.routine = (void (*)(void *))plip_kick_bh;
360 nl->deferred.data = dev;
362 if (dev->irq == -1) {
363 nl->timer.next = NULL;
364 nl->timer.sync = 0;
365 nl->timer.routine = (void (*)(void *))plip_timer_bh;
366 nl->timer.data = dev;
369 spin_lock_init(&nl->lock);
371 return 0;
374 /* Bottom half handler for the delayed request.
375 This routine is kicked by do_timer().
376 Request `plip_bh' to be invoked. */
377 static void
378 plip_kick_bh(struct net_device *dev)
380 struct net_local *nl = (struct net_local *)dev->priv;
382 if (nl->is_deferred) {
383 queue_task(&nl->immediate, &tq_immediate);
384 mark_bh(IMMEDIATE_BH);
388 /* Forward declarations of internal routines */
389 static int plip_none(struct net_device *, struct net_local *,
390 struct plip_local *, struct plip_local *);
391 static int plip_receive_packet(struct net_device *, struct net_local *,
392 struct plip_local *, struct plip_local *);
393 static int plip_send_packet(struct net_device *, struct net_local *,
394 struct plip_local *, struct plip_local *);
395 static int plip_connection_close(struct net_device *, struct net_local *,
396 struct plip_local *, struct plip_local *);
397 static int plip_error(struct net_device *, struct net_local *,
398 struct plip_local *, struct plip_local *);
399 static int plip_bh_timeout_error(struct net_device *dev, struct net_local *nl,
400 struct plip_local *snd,
401 struct plip_local *rcv,
402 int error);
404 #define OK 0
405 #define TIMEOUT 1
406 #define ERROR 2
407 #define HS_TIMEOUT 3
409 typedef int (*plip_func)(struct net_device *dev, struct net_local *nl,
410 struct plip_local *snd, struct plip_local *rcv);
412 static plip_func connection_state_table[] =
414 plip_none,
415 plip_receive_packet,
416 plip_send_packet,
417 plip_connection_close,
418 plip_error
421 /* Bottom half handler of PLIP. */
422 static void
423 plip_bh(struct net_device *dev)
425 struct net_local *nl = (struct net_local *)dev->priv;
426 struct plip_local *snd = &nl->snd_data;
427 struct plip_local *rcv = &nl->rcv_data;
428 plip_func f;
429 int r;
431 nl->is_deferred = 0;
432 f = connection_state_table[nl->connection];
433 if ((r = (*f)(dev, nl, snd, rcv)) != OK
434 && (r = plip_bh_timeout_error(dev, nl, snd, rcv, r)) != OK) {
435 nl->is_deferred = 1;
436 queue_task(&nl->deferred, &tq_timer);
440 static void
441 plip_timer_bh(struct net_device *dev)
443 struct net_local *nl = (struct net_local *)dev->priv;
445 if (!(atomic_read (&nl->kill_timer))) {
446 if (!dev->interrupt)
447 plip_interrupt (-1, dev, NULL);
449 queue_task (&nl->timer, &tq_timer);
451 else {
452 up (&nl->killed_timer_sem);
456 static int
457 plip_bh_timeout_error(struct net_device *dev, struct net_local *nl,
458 struct plip_local *snd, struct plip_local *rcv,
459 int error)
461 unsigned char c0;
463 * This is tricky. If we got here from the beginning of send (either
464 * with ERROR or HS_TIMEOUT) we have IRQ enabled. Otherwise it's
465 * already disabled. With the old variant of {enable,disable}_irq()
466 * extra disable_irq() was a no-op. Now it became mortal - it's
467 * unbalanced and thus we'll never re-enable IRQ (until rmmod plip,
468 * that is). So we have to treat HS_TIMEOUT and ERROR from send
469 * in a special way.
472 spin_lock_irq(&nl->lock);
473 if (nl->connection == PLIP_CN_SEND) {
475 if (error != ERROR) { /* Timeout */
476 nl->timeout_count++;
477 if ((error == HS_TIMEOUT
478 && nl->timeout_count <= 10)
479 || nl->timeout_count <= 3) {
480 spin_unlock_irq(&nl->lock);
481 /* Try again later */
482 return TIMEOUT;
484 c0 = read_status(dev);
485 printk(KERN_WARNING "%s: transmit timeout(%d,%02x)\n",
486 dev->name, snd->state, c0);
487 } else
488 error = HS_TIMEOUT;
489 nl->enet_stats.tx_errors++;
490 nl->enet_stats.tx_aborted_errors++;
491 } else if (nl->connection == PLIP_CN_RECEIVE) {
492 if (rcv->state == PLIP_PK_TRIGGER) {
493 /* Transmission was interrupted. */
494 spin_unlock_irq(&nl->lock);
495 return OK;
497 if (error != ERROR) { /* Timeout */
498 if (++nl->timeout_count <= 3) {
499 spin_unlock_irq(&nl->lock);
500 /* Try again later */
501 return TIMEOUT;
503 c0 = read_status(dev);
504 printk(KERN_WARNING "%s: receive timeout(%d,%02x)\n",
505 dev->name, rcv->state, c0);
507 nl->enet_stats.rx_dropped++;
509 rcv->state = PLIP_PK_DONE;
510 if (rcv->skb) {
511 kfree_skb(rcv->skb);
512 rcv->skb = NULL;
514 snd->state = PLIP_PK_DONE;
515 if (snd->skb) {
516 dev_kfree_skb(snd->skb);
517 snd->skb = NULL;
519 spin_unlock_irq(&nl->lock);
520 if (error == HS_TIMEOUT) {
521 DISABLE(dev->irq);
522 synchronize_irq();
524 disable_parport_interrupts (dev);
525 dev->tbusy = 1;
526 nl->connection = PLIP_CN_ERROR;
527 write_data (dev, 0x00);
529 return TIMEOUT;
532 static int
533 plip_none(struct net_device *dev, struct net_local *nl,
534 struct plip_local *snd, struct plip_local *rcv)
536 return OK;
539 /* PLIP_RECEIVE --- receive a byte(two nibbles)
540 Returns OK on success, TIMEOUT on timeout */
541 inline static int
542 plip_receive(unsigned short nibble_timeout, struct net_device *dev,
543 enum plip_nibble_state *ns_p, unsigned char *data_p)
545 unsigned char c0, c1;
546 unsigned int cx;
548 switch (*ns_p) {
549 case PLIP_NB_BEGIN:
550 cx = nibble_timeout;
551 while (1) {
552 c0 = read_status(dev);
553 udelay(PLIP_DELAY_UNIT);
554 if ((c0 & 0x80) == 0) {
555 c1 = read_status(dev);
556 if (c0 == c1)
557 break;
559 if (--cx == 0)
560 return TIMEOUT;
562 *data_p = (c0 >> 3) & 0x0f;
563 write_data (dev, 0x10); /* send ACK */
564 *ns_p = PLIP_NB_1;
566 case PLIP_NB_1:
567 cx = nibble_timeout;
568 while (1) {
569 c0 = read_status(dev);
570 udelay(PLIP_DELAY_UNIT);
571 if (c0 & 0x80) {
572 c1 = read_status(dev);
573 if (c0 == c1)
574 break;
576 if (--cx == 0)
577 return TIMEOUT;
579 *data_p |= (c0 << 1) & 0xf0;
580 write_data (dev, 0x00); /* send ACK */
581 *ns_p = PLIP_NB_BEGIN;
582 case PLIP_NB_2:
583 break;
585 return OK;
588 /* PLIP_RECEIVE_PACKET --- receive a packet */
589 static int
590 plip_receive_packet(struct net_device *dev, struct net_local *nl,
591 struct plip_local *snd, struct plip_local *rcv)
593 unsigned short nibble_timeout = nl->nibble;
594 unsigned char *lbuf;
596 switch (rcv->state) {
597 case PLIP_PK_TRIGGER:
598 DISABLE(dev->irq);
599 /* Don't need to synchronize irq, as we can safely ignore it */
600 disable_parport_interrupts (dev);
601 dev->interrupt = 0;
602 write_data (dev, 0x01); /* send ACK */
603 if (net_debug > 2)
604 printk(KERN_DEBUG "%s: receive start\n", dev->name);
605 rcv->state = PLIP_PK_LENGTH_LSB;
606 rcv->nibble = PLIP_NB_BEGIN;
608 case PLIP_PK_LENGTH_LSB:
609 if (snd->state != PLIP_PK_DONE) {
610 if (plip_receive(nl->trigger, dev,
611 &rcv->nibble, &rcv->length.b.lsb)) {
612 /* collision, here dev->tbusy == 1 */
613 rcv->state = PLIP_PK_DONE;
614 nl->is_deferred = 1;
615 nl->connection = PLIP_CN_SEND;
616 queue_task(&nl->deferred, &tq_timer);
617 enable_parport_interrupts (dev);
618 ENABLE(dev->irq);
619 return OK;
621 } else {
622 if (plip_receive(nibble_timeout, dev,
623 &rcv->nibble, &rcv->length.b.lsb))
624 return TIMEOUT;
626 rcv->state = PLIP_PK_LENGTH_MSB;
628 case PLIP_PK_LENGTH_MSB:
629 if (plip_receive(nibble_timeout, dev,
630 &rcv->nibble, &rcv->length.b.msb))
631 return TIMEOUT;
632 if (rcv->length.h > dev->mtu + dev->hard_header_len
633 || rcv->length.h < 8) {
634 printk(KERN_WARNING "%s: bogus packet size %d.\n", dev->name, rcv->length.h);
635 return ERROR;
637 /* Malloc up new buffer. */
638 rcv->skb = dev_alloc_skb(rcv->length.h);
639 if (rcv->skb == NULL) {
640 printk(KERN_ERR "%s: Memory squeeze.\n", dev->name);
641 return ERROR;
643 skb_put(rcv->skb,rcv->length.h);
644 rcv->skb->dev = dev;
645 rcv->state = PLIP_PK_DATA;
646 rcv->byte = 0;
647 rcv->checksum = 0;
649 case PLIP_PK_DATA:
650 lbuf = rcv->skb->data;
652 if (plip_receive(nibble_timeout, dev,
653 &rcv->nibble, &lbuf[rcv->byte]))
654 return TIMEOUT;
655 while (++rcv->byte < rcv->length.h);
657 rcv->checksum += lbuf[--rcv->byte];
658 while (rcv->byte);
659 rcv->state = PLIP_PK_CHECKSUM;
661 case PLIP_PK_CHECKSUM:
662 if (plip_receive(nibble_timeout, dev,
663 &rcv->nibble, &rcv->data))
664 return TIMEOUT;
665 if (rcv->data != rcv->checksum) {
666 nl->enet_stats.rx_crc_errors++;
667 if (net_debug)
668 printk(KERN_DEBUG "%s: checksum error\n", dev->name);
669 return ERROR;
671 rcv->state = PLIP_PK_DONE;
673 case PLIP_PK_DONE:
674 /* Inform the upper layer for the arrival of a packet. */
675 rcv->skb->protocol=eth_type_trans(rcv->skb, dev);
676 netif_rx(rcv->skb);
677 nl->enet_stats.rx_bytes += rcv->length.h;
678 nl->enet_stats.rx_packets++;
679 rcv->skb = NULL;
680 if (net_debug > 2)
681 printk(KERN_DEBUG "%s: receive end\n", dev->name);
683 /* Close the connection. */
684 write_data (dev, 0x00);
685 spin_lock_irq(&nl->lock);
686 if (snd->state != PLIP_PK_DONE) {
687 nl->connection = PLIP_CN_SEND;
688 spin_unlock_irq(&nl->lock);
689 queue_task(&nl->immediate, &tq_immediate);
690 mark_bh(IMMEDIATE_BH);
691 enable_parport_interrupts (dev);
692 ENABLE(dev->irq);
693 return OK;
694 } else {
695 nl->connection = PLIP_CN_NONE;
696 spin_unlock_irq(&nl->lock);
697 enable_parport_interrupts (dev);
698 ENABLE(dev->irq);
699 return OK;
702 return OK;
705 /* PLIP_SEND --- send a byte (two nibbles)
706 Returns OK on success, TIMEOUT when timeout */
707 inline static int
708 plip_send(unsigned short nibble_timeout, struct net_device *dev,
709 enum plip_nibble_state *ns_p, unsigned char data)
711 unsigned char c0;
712 unsigned int cx;
714 switch (*ns_p) {
715 case PLIP_NB_BEGIN:
716 write_data (dev, data & 0x0f);
717 *ns_p = PLIP_NB_1;
719 case PLIP_NB_1:
720 write_data (dev, 0x10 | (data & 0x0f));
721 cx = nibble_timeout;
722 while (1) {
723 c0 = read_status(dev);
724 if ((c0 & 0x80) == 0)
725 break;
726 if (--cx == 0)
727 return TIMEOUT;
728 udelay(PLIP_DELAY_UNIT);
730 write_data (dev, 0x10 | (data >> 4));
731 *ns_p = PLIP_NB_2;
733 case PLIP_NB_2:
734 write_data (dev, (data >> 4));
735 cx = nibble_timeout;
736 while (1) {
737 c0 = read_status(dev);
738 if (c0 & 0x80)
739 break;
740 if (--cx == 0)
741 return TIMEOUT;
742 udelay(PLIP_DELAY_UNIT);
744 *ns_p = PLIP_NB_BEGIN;
745 return OK;
747 return OK;
750 /* PLIP_SEND_PACKET --- send a packet */
751 static int
752 plip_send_packet(struct net_device *dev, struct net_local *nl,
753 struct plip_local *snd, struct plip_local *rcv)
755 unsigned short nibble_timeout = nl->nibble;
756 unsigned char *lbuf;
757 unsigned char c0;
758 unsigned int cx;
760 if (snd->skb == NULL || (lbuf = snd->skb->data) == NULL) {
761 printk(KERN_DEBUG "%s: send skb lost\n", dev->name);
762 snd->state = PLIP_PK_DONE;
763 snd->skb = NULL;
764 return ERROR;
767 switch (snd->state) {
768 case PLIP_PK_TRIGGER:
769 if ((read_status(dev) & 0xf8) != 0x80)
770 return HS_TIMEOUT;
772 /* Trigger remote rx interrupt. */
773 write_data (dev, 0x08);
774 cx = nl->trigger;
775 while (1) {
776 udelay(PLIP_DELAY_UNIT);
777 spin_lock_irq(&nl->lock);
778 if (nl->connection == PLIP_CN_RECEIVE) {
779 spin_unlock_irq(&nl->lock);
780 /* Interrupted. */
781 nl->enet_stats.collisions++;
782 return OK;
784 c0 = read_status(dev);
785 if (c0 & 0x08) {
786 spin_unlock_irq(&nl->lock);
787 DISABLE(dev->irq);
788 synchronize_irq();
789 if (nl->connection == PLIP_CN_RECEIVE) {
790 /* Interrupted.
791 We don't need to enable irq,
792 as it is soon disabled. */
793 /* Yes, we do. New variant of
794 {enable,disable}_irq *counts*
795 them. -- AV */
796 ENABLE(dev->irq);
797 nl->enet_stats.collisions++;
798 return OK;
800 disable_parport_interrupts (dev);
801 if (net_debug > 2)
802 printk(KERN_DEBUG "%s: send start\n", dev->name);
803 snd->state = PLIP_PK_LENGTH_LSB;
804 snd->nibble = PLIP_NB_BEGIN;
805 nl->timeout_count = 0;
806 break;
808 spin_unlock_irq(&nl->lock);
809 if (--cx == 0) {
810 write_data (dev, 0x00);
811 return HS_TIMEOUT;
815 case PLIP_PK_LENGTH_LSB:
816 if (plip_send(nibble_timeout, dev,
817 &snd->nibble, snd->length.b.lsb))
818 return TIMEOUT;
819 snd->state = PLIP_PK_LENGTH_MSB;
821 case PLIP_PK_LENGTH_MSB:
822 if (plip_send(nibble_timeout, dev,
823 &snd->nibble, snd->length.b.msb))
824 return TIMEOUT;
825 snd->state = PLIP_PK_DATA;
826 snd->byte = 0;
827 snd->checksum = 0;
829 case PLIP_PK_DATA:
831 if (plip_send(nibble_timeout, dev,
832 &snd->nibble, lbuf[snd->byte]))
833 return TIMEOUT;
834 while (++snd->byte < snd->length.h);
836 snd->checksum += lbuf[--snd->byte];
837 while (snd->byte);
838 snd->state = PLIP_PK_CHECKSUM;
840 case PLIP_PK_CHECKSUM:
841 if (plip_send(nibble_timeout, dev,
842 &snd->nibble, snd->checksum))
843 return TIMEOUT;
845 nl->enet_stats.tx_bytes += snd->skb->len;
846 dev_kfree_skb(snd->skb);
847 nl->enet_stats.tx_packets++;
848 snd->state = PLIP_PK_DONE;
850 case PLIP_PK_DONE:
851 /* Close the connection */
852 write_data (dev, 0x00);
853 snd->skb = NULL;
854 if (net_debug > 2)
855 printk(KERN_DEBUG "%s: send end\n", dev->name);
856 nl->connection = PLIP_CN_CLOSING;
857 nl->is_deferred = 1;
858 queue_task(&nl->deferred, &tq_timer);
859 enable_parport_interrupts (dev);
860 ENABLE(dev->irq);
861 return OK;
863 return OK;
866 static int
867 plip_connection_close(struct net_device *dev, struct net_local *nl,
868 struct plip_local *snd, struct plip_local *rcv)
870 spin_lock_irq(&nl->lock);
871 if (nl->connection == PLIP_CN_CLOSING) {
872 nl->connection = PLIP_CN_NONE;
873 dev->tbusy = 0;
874 mark_bh(NET_BH);
876 spin_unlock_irq(&nl->lock);
877 if (nl->should_relinquish) {
878 nl->should_relinquish = nl->port_owner = 0;
879 parport_release(nl->pardev);
881 return OK;
884 /* PLIP_ERROR --- wait till other end settled */
885 static int
886 plip_error(struct net_device *dev, struct net_local *nl,
887 struct plip_local *snd, struct plip_local *rcv)
889 unsigned char status;
891 status = read_status(dev);
892 if ((status & 0xf8) == 0x80) {
893 if (net_debug > 2)
894 printk(KERN_DEBUG "%s: reset interface.\n", dev->name);
895 nl->connection = PLIP_CN_NONE;
896 nl->should_relinquish = 0;
897 dev->tbusy = 0;
898 dev->interrupt = 0;
899 enable_parport_interrupts (dev);
900 ENABLE(dev->irq);
901 mark_bh(NET_BH);
902 } else {
903 nl->is_deferred = 1;
904 queue_task(&nl->deferred, &tq_timer);
907 return OK;
910 /* Handle the parallel port interrupts. */
911 static void
912 plip_interrupt(int irq, void *dev_id, struct pt_regs * regs)
914 struct net_device *dev = dev_id;
915 struct net_local *nl;
916 struct plip_local *rcv;
917 unsigned char c0;
919 if (dev == NULL) {
920 printk(KERN_DEBUG "plip_interrupt: irq %d for unknown device.\n", irq);
921 return;
924 nl = (struct net_local *)dev->priv;
925 rcv = &nl->rcv_data;
927 if (dev->interrupt)
928 return;
930 c0 = read_status(dev);
931 if ((c0 & 0xf8) != 0xc0) {
932 if ((dev->irq != -1) && (net_debug > 1))
933 printk(KERN_DEBUG "%s: spurious interrupt\n", dev->name);
934 return;
936 dev->interrupt = 1;
937 if (net_debug > 3)
938 printk(KERN_DEBUG "%s: interrupt.\n", dev->name);
940 spin_lock_irq(&nl->lock);
941 switch (nl->connection) {
942 case PLIP_CN_CLOSING:
943 dev->tbusy = 0;
944 case PLIP_CN_NONE:
945 case PLIP_CN_SEND:
946 dev->last_rx = jiffies;
947 rcv->state = PLIP_PK_TRIGGER;
948 nl->connection = PLIP_CN_RECEIVE;
949 nl->timeout_count = 0;
950 queue_task(&nl->immediate, &tq_immediate);
951 mark_bh(IMMEDIATE_BH);
952 spin_unlock_irq(&nl->lock);
953 break;
955 case PLIP_CN_RECEIVE:
956 /* May occur because there is race condition
957 around test and set of dev->interrupt.
958 Ignore this interrupt. */
959 spin_unlock_irq(&nl->lock);
960 break;
962 case PLIP_CN_ERROR:
963 spin_unlock_irq(&nl->lock);
964 printk(KERN_ERR "%s: receive interrupt in error state\n", dev->name);
965 break;
969 static int
970 plip_tx_packet(struct sk_buff *skb, struct net_device *dev)
972 struct net_local *nl = (struct net_local *)dev->priv;
973 struct plip_local *snd = &nl->snd_data;
975 if (dev->tbusy)
976 return 1;
978 /* We may need to grab the bus */
979 if (!nl->port_owner) {
980 if (parport_claim(nl->pardev))
981 return 1;
982 nl->port_owner = 1;
985 if (test_and_set_bit(0, (void*)&dev->tbusy) != 0) {
986 printk(KERN_WARNING "%s: Transmitter access conflict.\n", dev->name);
987 return 1;
990 if (skb->len > dev->mtu + dev->hard_header_len) {
991 printk(KERN_WARNING "%s: packet too big, %d.\n", dev->name, (int)skb->len);
992 dev->tbusy = 0;
993 return 0;
996 if (net_debug > 2)
997 printk(KERN_DEBUG "%s: send request\n", dev->name);
999 spin_lock_irq(&nl->lock);
1000 dev->trans_start = jiffies;
1001 snd->skb = skb;
1002 snd->length.h = skb->len;
1003 snd->state = PLIP_PK_TRIGGER;
1004 if (nl->connection == PLIP_CN_NONE) {
1005 nl->connection = PLIP_CN_SEND;
1006 nl->timeout_count = 0;
1008 queue_task(&nl->immediate, &tq_immediate);
1009 mark_bh(IMMEDIATE_BH);
1010 spin_unlock_irq(&nl->lock);
1012 return 0;
1015 static void
1016 plip_rewrite_address(struct net_device *dev, struct ethhdr *eth)
1018 struct in_device *in_dev;
1020 if ((in_dev=dev->ip_ptr) != NULL) {
1021 /* Any address will do - we take the first */
1022 struct in_ifaddr *ifa=in_dev->ifa_list;
1023 if (ifa != NULL) {
1024 memcpy(eth->h_source, dev->dev_addr, 6);
1025 memset(eth->h_dest, 0xfc, 2);
1026 memcpy(eth->h_dest+2, &ifa->ifa_address, 4);
1031 static int
1032 plip_hard_header(struct sk_buff *skb, struct net_device *dev,
1033 unsigned short type, void *daddr,
1034 void *saddr, unsigned len)
1036 struct net_local *nl = (struct net_local *)dev->priv;
1037 int ret;
1039 if ((ret = nl->orig_hard_header(skb, dev, type, daddr, saddr, len)) >= 0)
1040 plip_rewrite_address (dev, (struct ethhdr *)skb->data);
1042 return ret;
1045 int plip_hard_header_cache(struct neighbour *neigh,
1046 struct hh_cache *hh)
1048 struct net_local *nl = (struct net_local *)neigh->dev->priv;
1049 int ret;
1051 if ((ret = nl->orig_hard_header_cache(neigh, hh)) == 0)
1053 struct ethhdr *eth = (struct ethhdr*)(((u8*)hh->hh_data) + 2);
1054 plip_rewrite_address (neigh->dev, eth);
1057 return ret;
1060 /* Open/initialize the board. This is called (in the current kernel)
1061 sometime after booting when the 'ifconfig' program is run.
1063 This routine gets exclusive access to the parallel port by allocating
1064 its IRQ line.
1066 static int
1067 plip_open(struct net_device *dev)
1069 struct net_local *nl = (struct net_local *)dev->priv;
1070 struct in_device *in_dev;
1072 /* Grab the port */
1073 if (!nl->port_owner) {
1074 if (parport_claim(nl->pardev)) return -EAGAIN;
1075 nl->port_owner = 1;
1078 nl->should_relinquish = 0;
1080 /* Clear the data port. */
1081 write_data (dev, 0x00);
1083 /* Enable rx interrupt. */
1084 enable_parport_interrupts (dev);
1085 if (dev->irq == -1)
1087 atomic_set (&nl->kill_timer, 0);
1088 queue_task (&nl->timer, &tq_timer);
1091 /* Initialize the state machine. */
1092 nl->rcv_data.state = nl->snd_data.state = PLIP_PK_DONE;
1093 nl->rcv_data.skb = nl->snd_data.skb = NULL;
1094 nl->connection = PLIP_CN_NONE;
1095 nl->is_deferred = 0;
1097 /* Fill in the MAC-level header.
1098 We used to abuse dev->broadcast to store the point-to-point
1099 MAC address, but we no longer do it. Instead, we fetch the
1100 interface address whenever it is needed, which is cheap enough
1101 because we use the hh_cache. Actually, abusing dev->broadcast
1102 didn't work, because when using plip_open the point-to-point
1103 address isn't yet known.
1104 PLIP doesn't have a real MAC address, but we need it to be
1105 DOS compatible, and to properly support taps (otherwise,
1106 when the device address isn't identical to the address of a
1107 received frame, the kernel incorrectly drops it). */
1109 if ((in_dev=dev->ip_ptr) != NULL) {
1110 /* Any address will do - we take the first. We already
1111 have the first two bytes filled with 0xfc, from
1112 plip_init_dev(). */
1113 struct in_ifaddr *ifa=in_dev->ifa_list;
1114 if (ifa != NULL) {
1115 memcpy(dev->dev_addr+2, &ifa->ifa_local, 4);
1119 dev->interrupt = 0;
1120 dev->start = 1;
1121 dev->tbusy = 0;
1123 MOD_INC_USE_COUNT;
1124 return 0;
1127 /* The inverse routine to plip_open (). */
1128 static int
1129 plip_close(struct net_device *dev)
1131 struct net_local *nl = (struct net_local *)dev->priv;
1132 struct plip_local *snd = &nl->snd_data;
1133 struct plip_local *rcv = &nl->rcv_data;
1135 dev->tbusy = 1;
1136 dev->start = 0;
1137 DISABLE(dev->irq);
1138 synchronize_irq();
1140 if (dev->irq == -1)
1142 init_MUTEX_LOCKED (&nl->killed_timer_sem);
1143 atomic_set (&nl->kill_timer, 1);
1144 down (&nl->killed_timer_sem);
1147 #ifdef NOTDEF
1148 outb(0x00, PAR_DATA(dev));
1149 #endif
1150 nl->is_deferred = 0;
1151 nl->connection = PLIP_CN_NONE;
1152 if (nl->port_owner) {
1153 parport_release(nl->pardev);
1154 nl->port_owner = 0;
1157 snd->state = PLIP_PK_DONE;
1158 if (snd->skb) {
1159 dev_kfree_skb(snd->skb);
1160 snd->skb = NULL;
1162 rcv->state = PLIP_PK_DONE;
1163 if (rcv->skb) {
1164 kfree_skb(rcv->skb);
1165 rcv->skb = NULL;
1168 #ifdef NOTDEF
1169 /* Reset. */
1170 outb(0x00, PAR_CONTROL(dev));
1171 #endif
1172 MOD_DEC_USE_COUNT;
1173 return 0;
1176 static int
1177 plip_preempt(void *handle)
1179 struct net_device *dev = (struct net_device *)handle;
1180 struct net_local *nl = (struct net_local *)dev->priv;
1182 /* Stand our ground if a datagram is on the wire */
1183 if (nl->connection != PLIP_CN_NONE) {
1184 nl->should_relinquish = 1;
1185 return 1;
1188 nl->port_owner = 0; /* Remember that we released the bus */
1189 return 0;
1192 static void
1193 plip_wakeup(void *handle)
1195 struct net_device *dev = (struct net_device *)handle;
1196 struct net_local *nl = (struct net_local *)dev->priv;
1198 if (nl->port_owner) {
1199 /* Why are we being woken up? */
1200 printk(KERN_DEBUG "%s: why am I being woken up?\n", dev->name);
1201 if (!parport_claim(nl->pardev))
1202 /* bus_owner is already set (but why?) */
1203 printk(KERN_DEBUG "%s: I'm broken.\n", dev->name);
1204 else
1205 return;
1208 if (!(dev->flags & IFF_UP))
1209 /* Don't need the port when the interface is down */
1210 return;
1212 if (!parport_claim(nl->pardev)) {
1213 nl->port_owner = 1;
1214 /* Clear the data port. */
1215 write_data (dev, 0x00);
1218 return;
1221 static struct net_device_stats *
1222 plip_get_stats(struct net_device *dev)
1224 struct net_local *nl = (struct net_local *)dev->priv;
1225 struct net_device_stats *r = &nl->enet_stats;
1227 return r;
1230 static int
1231 plip_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1233 struct net_local *nl = (struct net_local *) dev->priv;
1234 struct plipconf *pc = (struct plipconf *) &rq->ifr_data;
1236 switch(pc->pcmd) {
1237 case PLIP_GET_TIMEOUT:
1238 pc->trigger = nl->trigger;
1239 pc->nibble = nl->nibble;
1240 break;
1241 case PLIP_SET_TIMEOUT:
1242 nl->trigger = pc->trigger;
1243 nl->nibble = pc->nibble;
1244 break;
1245 default:
1246 return -EOPNOTSUPP;
1248 return 0;
1251 static int parport[PLIP_MAX] = { [0 ... PLIP_MAX-1] = -1 };
1252 static int timid = 0;
1254 MODULE_PARM(parport, "1-" __MODULE_STRING(PLIP_MAX) "i");
1255 MODULE_PARM(timid, "1i");
1257 static struct net_device *dev_plip[PLIP_MAX] = { NULL, };
1259 #ifdef MODULE
1260 void
1261 cleanup_module(void)
1263 int i;
1265 for (i=0; i < PLIP_MAX; i++) {
1266 if (dev_plip[i]) {
1267 struct net_local *nl =
1268 (struct net_local *)dev_plip[i]->priv;
1269 unregister_netdev(dev_plip[i]);
1270 if (nl->port_owner)
1271 parport_release(nl->pardev);
1272 parport_unregister_device(nl->pardev);
1273 kfree(dev_plip[i]->priv);
1274 kfree(dev_plip[i]->name);
1275 kfree(dev_plip[i]);
1276 dev_plip[i] = NULL;
1281 #define plip_init init_module
1283 #else /* !MODULE */
1285 static int parport_ptr = 0;
1287 void plip_setup(char *str, int *ints)
1289 /* Ugh. */
1290 if (!strncmp(str, "parport", 7)) {
1291 int n = simple_strtoul(str+7, NULL, 10);
1292 if (parport_ptr < PLIP_MAX)
1293 parport[parport_ptr++] = n;
1294 else
1295 printk(KERN_INFO "plip: too many ports, %s ignored.\n",
1296 str);
1297 } else if (!strcmp(str, "timid")) {
1298 timid = 1;
1299 } else {
1300 if (ints[0] == 0 || ints[1] == 0) {
1301 /* disable driver on "plip=" or "plip=0" */
1302 parport[0] = -2;
1303 } else {
1304 printk(KERN_WARNING "warning: 'plip=0x%x' ignored\n",
1305 ints[1]);
1310 #endif /* MODULE */
1312 static int inline
1313 plip_searchfor(int list[], int a)
1315 int i;
1316 for (i = 0; i < PLIP_MAX && list[i] != -1; i++) {
1317 if (list[i] == a) return 1;
1319 return 0;
1322 int __init
1323 plip_init(void)
1325 struct parport *pb = parport_enumerate();
1326 int i=0;
1328 if (parport[0] == -2)
1329 return 0;
1331 if (parport[0] != -1 && timid) {
1332 printk(KERN_WARNING "plip: warning, ignoring `timid' since specific ports given.\n");
1333 timid = 0;
1336 /* If the user feeds parameters, use them */
1337 while (pb) {
1338 if ((parport[0] == -1 && (!timid || !pb->devices)) ||
1339 plip_searchfor(parport, pb->number)) {
1340 if (i == PLIP_MAX) {
1341 printk(KERN_ERR "plip: too many devices\n");
1342 break;
1344 dev_plip[i] = kmalloc(sizeof(struct net_device),
1345 GFP_KERNEL);
1346 if (!dev_plip[i]) {
1347 printk(KERN_ERR "plip: memory squeeze\n");
1348 break;
1350 memset(dev_plip[i], 0, sizeof(struct net_device));
1351 dev_plip[i]->name =
1352 kmalloc(strlen("plipXXX"), GFP_KERNEL);
1353 if (!dev_plip[i]->name) {
1354 printk(KERN_ERR "plip: memory squeeze.\n");
1355 kfree(dev_plip[i]);
1356 dev_plip[i] = NULL;
1357 break;
1359 sprintf(dev_plip[i]->name, "plip%d", i);
1360 dev_plip[i]->priv = pb;
1361 if (plip_init_dev(dev_plip[i],pb) || register_netdev(dev_plip[i])) {
1362 kfree(dev_plip[i]->name);
1363 kfree(dev_plip[i]);
1364 dev_plip[i] = NULL;
1365 } else {
1366 i++;
1369 pb = pb->next;
1372 if (i == 0) {
1373 printk(KERN_INFO "plip: no devices registered\n");
1374 return -EIO;
1376 return 0;
1380 * Local variables:
1381 * compile-command: "gcc -DMODULE -DMODVERSIONS -D__KERNEL__ -Wall -Wstrict-prototypes -O2 -g -fomit-frame-pointer -pipe -c plip.c"
1382 * End: