Ok. I didn't make 2.4.0 in 2000. Tough. I tried, but we had some
[davej-history.git] / drivers / net / plip.c
blob61cdd84d60d783624d082055705456ca4e70c50a
1 /* $Id: plip.c,v 1.3.6.2 1997/04/16 15:07:56 phil Exp $ */
2 /* PLIP: A parallel port "network" driver for Linux. */
3 /* This driver is for parallel port with 5-bit cable (LapLink (R) cable). */
4 /*
5 * Authors: Donald Becker <becker@super.org>
6 * Tommy Thorn <thorn@daimi.aau.dk>
7 * Tanabe Hiroyasu <hiro@sanpo.t.u-tokyo.ac.jp>
8 * Alan Cox <gw4pts@gw4pts.ampr.org>
9 * Peter Bauer <100136.3530@compuserve.com>
10 * Niibe Yutaka <gniibe@mri.co.jp>
11 * Nimrod Zimerman <zimerman@mailandnews.com>
13 * Enhancements:
14 * Modularization and ifreq/ifmap support by Alan Cox.
15 * Rewritten by Niibe Yutaka.
16 * parport-sharing awareness code by Philip Blundell.
17 * SMP locking by Niibe Yutaka.
18 * Support for parallel ports with no IRQ (poll mode),
19 * Modifications to use the parallel port API
20 * by Nimrod Zimerman.
22 * Fixes:
23 * Niibe Yutaka
24 * - Module initialization.
25 * - MTU fix.
26 * - Make sure other end is OK, before sending a packet.
27 * - Fix immediate timer problem.
29 * Al Viro
30 * - Changed {enable,disable}_irq handling to make it work
31 * with new ("stack") semantics.
33 * This program is free software; you can redistribute it and/or
34 * modify it under the terms of the GNU General Public License
35 * as published by the Free Software Foundation; either version
36 * 2 of the License, or (at your option) any later version.
40 * Original version and the name 'PLIP' from Donald Becker <becker@super.org>
41 * inspired by Russ Nelson's parallel port packet driver.
43 * NOTE:
44 * Tanabe Hiroyasu had changed the protocol, and it was in Linux v1.0.
45 * Because of the necessity to communicate to DOS machines with the
46 * Crynwr packet driver, Peter Bauer changed the protocol again
47 * back to original protocol.
49 * This version follows original PLIP protocol.
50 * So, this PLIP can't communicate the PLIP of Linux v1.0.
54 * To use with DOS box, please do (Turn on ARP switch):
55 * # ifconfig plip[0-2] arp
57 static const char *version = "NET3 PLIP version 2.4-parport gniibe@mri.co.jp\n";
60 Sources:
61 Ideas and protocols came from Russ Nelson's <nelson@crynwr.com>
62 "parallel.asm" parallel port packet driver.
64 The "Crynwr" parallel port standard specifies the following protocol:
65 Trigger by sending nibble '0x8' (this causes interrupt on other end)
66 count-low octet
67 count-high octet
68 ... data octets
69 checksum octet
70 Each octet is sent as <wait for rx. '0x1?'> <send 0x10+(octet&0x0F)>
71 <wait for rx. '0x0?'> <send 0x00+((octet>>4)&0x0F)>
73 The packet is encapsulated as if it were ethernet.
75 The cable used is a de facto standard parallel null cable -- sold as
76 a "LapLink" cable by various places. You'll need a 12-conductor cable to
77 make one yourself. The wiring is:
78 SLCTIN 17 - 17
79 GROUND 25 - 25
80 D0->ERROR 2 - 15 15 - 2
81 D1->SLCT 3 - 13 13 - 3
82 D2->PAPOUT 4 - 12 12 - 4
83 D3->ACK 5 - 10 10 - 5
84 D4->BUSY 6 - 11 11 - 6
85 Do not connect the other pins. They are
86 D5,D6,D7 are 7,8,9
87 STROBE is 1, FEED is 14, INIT is 16
88 extra grounds are 18,19,20,21,22,23,24
91 #include <linux/module.h>
92 #include <linux/kernel.h>
93 #include <linux/sched.h>
94 #include <linux/types.h>
95 #include <linux/fcntl.h>
96 #include <linux/interrupt.h>
97 #include <linux/string.h>
98 #include <linux/ptrace.h>
99 #include <linux/if_ether.h>
100 #include <asm/system.h>
101 #include <linux/in.h>
102 #include <linux/errno.h>
103 #include <linux/delay.h>
104 #include <linux/lp.h>
105 #include <linux/init.h>
107 #include <linux/netdevice.h>
108 #include <linux/etherdevice.h>
109 #include <linux/inetdevice.h>
110 #include <linux/skbuff.h>
111 #include <linux/if_plip.h>
112 #include <net/neighbour.h>
114 #include <linux/tqueue.h>
115 #include <linux/ioport.h>
116 #include <linux/spinlock.h>
117 #include <asm/bitops.h>
118 #include <asm/irq.h>
119 #include <asm/byteorder.h>
120 #include <asm/semaphore.h>
122 #include <linux/parport.h>
124 /* Maximum number of devices to support. */
125 #define PLIP_MAX 8
127 /* Use 0 for production, 1 for verification, >2 for debug */
128 #ifndef NET_DEBUG
129 #define NET_DEBUG 1
130 #endif
131 static unsigned int net_debug = NET_DEBUG;
133 #define ENABLE(irq) if (irq != -1) enable_irq(irq)
134 #define DISABLE(irq) if (irq != -1) disable_irq(irq)
136 /* In micro second */
137 #define PLIP_DELAY_UNIT 1
139 /* Connection time out = PLIP_TRIGGER_WAIT * PLIP_DELAY_UNIT usec */
140 #define PLIP_TRIGGER_WAIT 500
142 /* Nibble time out = PLIP_NIBBLE_WAIT * PLIP_DELAY_UNIT usec */
143 #define PLIP_NIBBLE_WAIT 3000
145 /* Bottom halves */
146 static void plip_kick_bh(struct net_device *dev);
147 static void plip_bh(struct net_device *dev);
148 static void plip_timer_bh(struct net_device *dev);
150 /* Interrupt handler */
151 static void plip_interrupt(int irq, void *dev_id, struct pt_regs *regs);
153 /* Functions for DEV methods */
154 static int plip_tx_packet(struct sk_buff *skb, struct net_device *dev);
155 static int plip_hard_header(struct sk_buff *skb, struct net_device *dev,
156 unsigned short type, void *daddr,
157 void *saddr, unsigned len);
158 static int plip_hard_header_cache(struct neighbour *neigh,
159 struct hh_cache *hh);
160 static int plip_open(struct net_device *dev);
161 static int plip_close(struct net_device *dev);
162 static struct net_device_stats *plip_get_stats(struct net_device *dev);
163 static int plip_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
164 static int plip_preempt(void *handle);
165 static void plip_wakeup(void *handle);
167 enum plip_connection_state {
168 PLIP_CN_NONE=0,
169 PLIP_CN_RECEIVE,
170 PLIP_CN_SEND,
171 PLIP_CN_CLOSING,
172 PLIP_CN_ERROR
175 enum plip_packet_state {
176 PLIP_PK_DONE=0,
177 PLIP_PK_TRIGGER,
178 PLIP_PK_LENGTH_LSB,
179 PLIP_PK_LENGTH_MSB,
180 PLIP_PK_DATA,
181 PLIP_PK_CHECKSUM
184 enum plip_nibble_state {
185 PLIP_NB_BEGIN,
186 PLIP_NB_1,
187 PLIP_NB_2,
190 struct plip_local {
191 enum plip_packet_state state;
192 enum plip_nibble_state nibble;
193 union {
194 struct {
195 #if defined(__LITTLE_ENDIAN)
196 unsigned char lsb;
197 unsigned char msb;
198 #elif defined(__BIG_ENDIAN)
199 unsigned char msb;
200 unsigned char lsb;
201 #else
202 #error "Please fix the endianness defines in <asm/byteorder.h>"
203 #endif
204 } b;
205 unsigned short h;
206 } length;
207 unsigned short byte;
208 unsigned char checksum;
209 unsigned char data;
210 struct sk_buff *skb;
213 struct net_local {
214 struct net_device_stats enet_stats;
215 struct tq_struct immediate;
216 struct tq_struct deferred;
217 struct tq_struct timer;
218 struct plip_local snd_data;
219 struct plip_local rcv_data;
220 struct pardevice *pardev;
221 unsigned long trigger;
222 unsigned long nibble;
223 enum plip_connection_state connection;
224 unsigned short timeout_count;
225 int is_deferred;
226 int port_owner;
227 int should_relinquish;
228 int (*orig_hard_header)(struct sk_buff *skb, struct net_device *dev,
229 unsigned short type, void *daddr,
230 void *saddr, unsigned len);
231 int (*orig_hard_header_cache)(struct neighbour *neigh,
232 struct hh_cache *hh);
233 spinlock_t lock;
234 atomic_t kill_timer;
235 struct semaphore killed_timer_sem;
238 inline static void enable_parport_interrupts (struct net_device *dev)
240 if (dev->irq != -1)
242 struct parport *port =
243 ((struct net_local *)dev->priv)->pardev->port;
244 port->ops->enable_irq (port);
248 inline static void disable_parport_interrupts (struct net_device *dev)
250 if (dev->irq != -1)
252 struct parport *port =
253 ((struct net_local *)dev->priv)->pardev->port;
254 port->ops->disable_irq (port);
258 inline static void write_data (struct net_device *dev, unsigned char data)
260 struct parport *port =
261 ((struct net_local *)dev->priv)->pardev->port;
263 port->ops->write_data (port, data);
266 inline static unsigned char read_status (struct net_device *dev)
268 struct parport *port =
269 ((struct net_local *)dev->priv)->pardev->port;
271 return port->ops->read_status (port);
274 /* Entry point of PLIP driver.
275 Probe the hardware, and register/initialize the driver.
277 PLIP is rather weird, because of the way it interacts with the parport
278 system. It is _not_ initialised from Space.c. Instead, plip_init()
279 is called, and that function makes up a "struct net_device" for each port, and
280 then calls us here.
283 int __init
284 plip_init_dev(struct net_device *dev, struct parport *pb)
286 struct net_local *nl;
287 struct pardevice *pardev;
289 SET_MODULE_OWNER(dev);
290 dev->irq = pb->irq;
291 dev->base_addr = pb->base;
293 if (pb->irq == -1) {
294 printk(KERN_INFO "plip: %s has no IRQ. Using IRQ-less mode,"
295 "which is fairly inefficient!\n", pb->name);
298 pardev = parport_register_device(pb, dev->name, plip_preempt,
299 plip_wakeup, plip_interrupt,
300 0, dev);
302 if (!pardev)
303 return -ENODEV;
305 printk(KERN_INFO "%s", version);
306 if (dev->irq != -1)
307 printk(KERN_INFO "%s: Parallel port at %#3lx, using IRQ %d.\n",
308 dev->name, dev->base_addr, dev->irq);
309 else
310 printk(KERN_INFO "%s: Parallel port at %#3lx, not using IRQ.\n",
311 dev->name, dev->base_addr);
313 /* Fill in the generic fields of the device structure. */
314 ether_setup(dev);
316 /* Then, override parts of it */
317 dev->hard_start_xmit = plip_tx_packet;
318 dev->open = plip_open;
319 dev->stop = plip_close;
320 dev->get_stats = plip_get_stats;
321 dev->do_ioctl = plip_ioctl;
322 dev->header_cache_update = NULL;
323 dev->tx_queue_len = 10;
324 dev->flags = IFF_POINTOPOINT|IFF_NOARP;
325 memset(dev->dev_addr, 0xfc, ETH_ALEN);
327 /* Set the private structure */
328 dev->priv = kmalloc(sizeof (struct net_local), GFP_KERNEL);
329 if (dev->priv == NULL) {
330 printk(KERN_ERR "%s: out of memory\n", dev->name);
331 parport_unregister_device(pardev);
332 return -ENOMEM;
334 memset(dev->priv, 0, sizeof(struct net_local));
335 nl = (struct net_local *) dev->priv;
337 nl->orig_hard_header = dev->hard_header;
338 dev->hard_header = plip_hard_header;
340 nl->orig_hard_header_cache = dev->hard_header_cache;
341 dev->hard_header_cache = plip_hard_header_cache;
343 nl->pardev = pardev;
345 nl->port_owner = 0;
347 /* Initialize constants */
348 nl->trigger = PLIP_TRIGGER_WAIT;
349 nl->nibble = PLIP_NIBBLE_WAIT;
351 /* Initialize task queue structures */
352 INIT_LIST_HEAD(&nl->immediate.list);
353 nl->immediate.sync = 0;
354 nl->immediate.routine = (void (*)(void *))plip_bh;
355 nl->immediate.data = dev;
357 INIT_LIST_HEAD(&nl->deferred.list);
358 nl->deferred.sync = 0;
359 nl->deferred.routine = (void (*)(void *))plip_kick_bh;
360 nl->deferred.data = dev;
362 if (dev->irq == -1) {
363 INIT_LIST_HEAD(&nl->timer.list);
364 nl->timer.sync = 0;
365 nl->timer.routine = (void (*)(void *))plip_timer_bh;
366 nl->timer.data = dev;
369 spin_lock_init(&nl->lock);
371 return 0;
374 /* Bottom half handler for the delayed request.
375 This routine is kicked by do_timer().
376 Request `plip_bh' to be invoked. */
377 static void
378 plip_kick_bh(struct net_device *dev)
380 struct net_local *nl = (struct net_local *)dev->priv;
382 if (nl->is_deferred) {
383 queue_task(&nl->immediate, &tq_immediate);
384 mark_bh(IMMEDIATE_BH);
388 /* Forward declarations of internal routines */
389 static int plip_none(struct net_device *, struct net_local *,
390 struct plip_local *, struct plip_local *);
391 static int plip_receive_packet(struct net_device *, struct net_local *,
392 struct plip_local *, struct plip_local *);
393 static int plip_send_packet(struct net_device *, struct net_local *,
394 struct plip_local *, struct plip_local *);
395 static int plip_connection_close(struct net_device *, struct net_local *,
396 struct plip_local *, struct plip_local *);
397 static int plip_error(struct net_device *, struct net_local *,
398 struct plip_local *, struct plip_local *);
399 static int plip_bh_timeout_error(struct net_device *dev, struct net_local *nl,
400 struct plip_local *snd,
401 struct plip_local *rcv,
402 int error);
404 #define OK 0
405 #define TIMEOUT 1
406 #define ERROR 2
407 #define HS_TIMEOUT 3
409 typedef int (*plip_func)(struct net_device *dev, struct net_local *nl,
410 struct plip_local *snd, struct plip_local *rcv);
412 static plip_func connection_state_table[] =
414 plip_none,
415 plip_receive_packet,
416 plip_send_packet,
417 plip_connection_close,
418 plip_error
421 /* Bottom half handler of PLIP. */
422 static void
423 plip_bh(struct net_device *dev)
425 struct net_local *nl = (struct net_local *)dev->priv;
426 struct plip_local *snd = &nl->snd_data;
427 struct plip_local *rcv = &nl->rcv_data;
428 plip_func f;
429 int r;
431 nl->is_deferred = 0;
432 f = connection_state_table[nl->connection];
433 if ((r = (*f)(dev, nl, snd, rcv)) != OK
434 && (r = plip_bh_timeout_error(dev, nl, snd, rcv, r)) != OK) {
435 nl->is_deferred = 1;
436 queue_task(&nl->deferred, &tq_timer);
440 static void
441 plip_timer_bh(struct net_device *dev)
443 struct net_local *nl = (struct net_local *)dev->priv;
445 if (!(atomic_read (&nl->kill_timer))) {
446 plip_interrupt (-1, dev, NULL);
448 queue_task (&nl->timer, &tq_timer);
450 else {
451 up (&nl->killed_timer_sem);
455 static int
456 plip_bh_timeout_error(struct net_device *dev, struct net_local *nl,
457 struct plip_local *snd, struct plip_local *rcv,
458 int error)
460 unsigned char c0;
462 * This is tricky. If we got here from the beginning of send (either
463 * with ERROR or HS_TIMEOUT) we have IRQ enabled. Otherwise it's
464 * already disabled. With the old variant of {enable,disable}_irq()
465 * extra disable_irq() was a no-op. Now it became mortal - it's
466 * unbalanced and thus we'll never re-enable IRQ (until rmmod plip,
467 * that is). So we have to treat HS_TIMEOUT and ERROR from send
468 * in a special way.
471 spin_lock_irq(&nl->lock);
472 if (nl->connection == PLIP_CN_SEND) {
474 if (error != ERROR) { /* Timeout */
475 nl->timeout_count++;
476 if ((error == HS_TIMEOUT
477 && nl->timeout_count <= 10)
478 || nl->timeout_count <= 3) {
479 spin_unlock_irq(&nl->lock);
480 /* Try again later */
481 return TIMEOUT;
483 c0 = read_status(dev);
484 printk(KERN_WARNING "%s: transmit timeout(%d,%02x)\n",
485 dev->name, snd->state, c0);
486 } else
487 error = HS_TIMEOUT;
488 nl->enet_stats.tx_errors++;
489 nl->enet_stats.tx_aborted_errors++;
490 } else if (nl->connection == PLIP_CN_RECEIVE) {
491 if (rcv->state == PLIP_PK_TRIGGER) {
492 /* Transmission was interrupted. */
493 spin_unlock_irq(&nl->lock);
494 return OK;
496 if (error != ERROR) { /* Timeout */
497 if (++nl->timeout_count <= 3) {
498 spin_unlock_irq(&nl->lock);
499 /* Try again later */
500 return TIMEOUT;
502 c0 = read_status(dev);
503 printk(KERN_WARNING "%s: receive timeout(%d,%02x)\n",
504 dev->name, rcv->state, c0);
506 nl->enet_stats.rx_dropped++;
508 rcv->state = PLIP_PK_DONE;
509 if (rcv->skb) {
510 kfree_skb(rcv->skb);
511 rcv->skb = NULL;
513 snd->state = PLIP_PK_DONE;
514 if (snd->skb) {
515 dev_kfree_skb(snd->skb);
516 snd->skb = NULL;
518 spin_unlock_irq(&nl->lock);
519 if (error == HS_TIMEOUT) {
520 DISABLE(dev->irq);
521 synchronize_irq();
523 disable_parport_interrupts (dev);
524 netif_stop_queue (dev);
525 nl->connection = PLIP_CN_ERROR;
526 write_data (dev, 0x00);
528 return TIMEOUT;
531 static int
532 plip_none(struct net_device *dev, struct net_local *nl,
533 struct plip_local *snd, struct plip_local *rcv)
535 return OK;
538 /* PLIP_RECEIVE --- receive a byte(two nibbles)
539 Returns OK on success, TIMEOUT on timeout */
540 inline static int
541 plip_receive(unsigned short nibble_timeout, struct net_device *dev,
542 enum plip_nibble_state *ns_p, unsigned char *data_p)
544 unsigned char c0, c1;
545 unsigned int cx;
547 switch (*ns_p) {
548 case PLIP_NB_BEGIN:
549 cx = nibble_timeout;
550 while (1) {
551 c0 = read_status(dev);
552 udelay(PLIP_DELAY_UNIT);
553 if ((c0 & 0x80) == 0) {
554 c1 = read_status(dev);
555 if (c0 == c1)
556 break;
558 if (--cx == 0)
559 return TIMEOUT;
561 *data_p = (c0 >> 3) & 0x0f;
562 write_data (dev, 0x10); /* send ACK */
563 *ns_p = PLIP_NB_1;
565 case PLIP_NB_1:
566 cx = nibble_timeout;
567 while (1) {
568 c0 = read_status(dev);
569 udelay(PLIP_DELAY_UNIT);
570 if (c0 & 0x80) {
571 c1 = read_status(dev);
572 if (c0 == c1)
573 break;
575 if (--cx == 0)
576 return TIMEOUT;
578 *data_p |= (c0 << 1) & 0xf0;
579 write_data (dev, 0x00); /* send ACK */
580 *ns_p = PLIP_NB_BEGIN;
581 case PLIP_NB_2:
582 break;
584 return OK;
588 * Determine the packet's protocol ID. The rule here is that we
589 * assume 802.3 if the type field is short enough to be a length.
590 * This is normal practice and works for any 'now in use' protocol.
592 * PLIP is ethernet ish but the daddr might not be valid if unicast.
593 * PLIP fortunately has no bus architecture (its Point-to-point).
595 * We can't fix the daddr thing as that quirk (more bug) is embedded
596 * in far too many old systems not all even running Linux.
599 static unsigned short plip_type_trans(struct sk_buff *skb, struct net_device *dev)
601 struct ethhdr *eth;
602 unsigned char *rawp;
604 skb->mac.raw=skb->data;
605 skb_pull(skb,dev->hard_header_len);
606 eth= skb->mac.ethernet;
608 if(*eth->h_dest&1)
610 if(memcmp(eth->h_dest,dev->broadcast, ETH_ALEN)==0)
611 skb->pkt_type=PACKET_BROADCAST;
612 else
613 skb->pkt_type=PACKET_MULTICAST;
617 * This ALLMULTI check should be redundant by 1.4
618 * so don't forget to remove it.
621 if (ntohs(eth->h_proto) >= 1536)
622 return eth->h_proto;
624 rawp = skb->data;
627 * This is a magic hack to spot IPX packets. Older Novell breaks
628 * the protocol design and runs IPX over 802.3 without an 802.2 LLC
629 * layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This
630 * won't work for fault tolerant netware but does for the rest.
632 if (*(unsigned short *)rawp == 0xFFFF)
633 return htons(ETH_P_802_3);
636 * Real 802.2 LLC
638 return htons(ETH_P_802_2);
642 /* PLIP_RECEIVE_PACKET --- receive a packet */
643 static int
644 plip_receive_packet(struct net_device *dev, struct net_local *nl,
645 struct plip_local *snd, struct plip_local *rcv)
647 unsigned short nibble_timeout = nl->nibble;
648 unsigned char *lbuf;
650 switch (rcv->state) {
651 case PLIP_PK_TRIGGER:
652 DISABLE(dev->irq);
653 /* Don't need to synchronize irq, as we can safely ignore it */
654 disable_parport_interrupts (dev);
655 write_data (dev, 0x01); /* send ACK */
656 if (net_debug > 2)
657 printk(KERN_DEBUG "%s: receive start\n", dev->name);
658 rcv->state = PLIP_PK_LENGTH_LSB;
659 rcv->nibble = PLIP_NB_BEGIN;
661 case PLIP_PK_LENGTH_LSB:
662 if (snd->state != PLIP_PK_DONE) {
663 if (plip_receive(nl->trigger, dev,
664 &rcv->nibble, &rcv->length.b.lsb)) {
665 /* collision, here dev->tbusy == 1 */
666 rcv->state = PLIP_PK_DONE;
667 nl->is_deferred = 1;
668 nl->connection = PLIP_CN_SEND;
669 queue_task(&nl->deferred, &tq_timer);
670 enable_parport_interrupts (dev);
671 ENABLE(dev->irq);
672 return OK;
674 } else {
675 if (plip_receive(nibble_timeout, dev,
676 &rcv->nibble, &rcv->length.b.lsb))
677 return TIMEOUT;
679 rcv->state = PLIP_PK_LENGTH_MSB;
681 case PLIP_PK_LENGTH_MSB:
682 if (plip_receive(nibble_timeout, dev,
683 &rcv->nibble, &rcv->length.b.msb))
684 return TIMEOUT;
685 if (rcv->length.h > dev->mtu + dev->hard_header_len
686 || rcv->length.h < 8) {
687 printk(KERN_WARNING "%s: bogus packet size %d.\n", dev->name, rcv->length.h);
688 return ERROR;
690 /* Malloc up new buffer. */
691 rcv->skb = dev_alloc_skb(rcv->length.h + 2);
692 if (rcv->skb == NULL) {
693 printk(KERN_ERR "%s: Memory squeeze.\n", dev->name);
694 return ERROR;
696 skb_reserve(rcv->skb, 2); /* Align IP on 16 byte boundaries */
697 skb_put(rcv->skb,rcv->length.h);
698 rcv->skb->dev = dev;
699 rcv->state = PLIP_PK_DATA;
700 rcv->byte = 0;
701 rcv->checksum = 0;
703 case PLIP_PK_DATA:
704 lbuf = rcv->skb->data;
706 if (plip_receive(nibble_timeout, dev,
707 &rcv->nibble, &lbuf[rcv->byte]))
708 return TIMEOUT;
709 while (++rcv->byte < rcv->length.h);
711 rcv->checksum += lbuf[--rcv->byte];
712 while (rcv->byte);
713 rcv->state = PLIP_PK_CHECKSUM;
715 case PLIP_PK_CHECKSUM:
716 if (plip_receive(nibble_timeout, dev,
717 &rcv->nibble, &rcv->data))
718 return TIMEOUT;
719 if (rcv->data != rcv->checksum) {
720 nl->enet_stats.rx_crc_errors++;
721 if (net_debug)
722 printk(KERN_DEBUG "%s: checksum error\n", dev->name);
723 return ERROR;
725 rcv->state = PLIP_PK_DONE;
727 case PLIP_PK_DONE:
728 /* Inform the upper layer for the arrival of a packet. */
729 rcv->skb->protocol=plip_type_trans(rcv->skb, dev);
730 netif_rx(rcv->skb);
731 nl->enet_stats.rx_bytes += rcv->length.h;
732 nl->enet_stats.rx_packets++;
733 rcv->skb = NULL;
734 if (net_debug > 2)
735 printk(KERN_DEBUG "%s: receive end\n", dev->name);
737 /* Close the connection. */
738 write_data (dev, 0x00);
739 spin_lock_irq(&nl->lock);
740 if (snd->state != PLIP_PK_DONE) {
741 nl->connection = PLIP_CN_SEND;
742 spin_unlock_irq(&nl->lock);
743 queue_task(&nl->immediate, &tq_immediate);
744 mark_bh(IMMEDIATE_BH);
745 enable_parport_interrupts (dev);
746 ENABLE(dev->irq);
747 return OK;
748 } else {
749 nl->connection = PLIP_CN_NONE;
750 spin_unlock_irq(&nl->lock);
751 enable_parport_interrupts (dev);
752 ENABLE(dev->irq);
753 return OK;
756 return OK;
759 /* PLIP_SEND --- send a byte (two nibbles)
760 Returns OK on success, TIMEOUT when timeout */
761 inline static int
762 plip_send(unsigned short nibble_timeout, struct net_device *dev,
763 enum plip_nibble_state *ns_p, unsigned char data)
765 unsigned char c0;
766 unsigned int cx;
768 switch (*ns_p) {
769 case PLIP_NB_BEGIN:
770 write_data (dev, data & 0x0f);
771 *ns_p = PLIP_NB_1;
773 case PLIP_NB_1:
774 write_data (dev, 0x10 | (data & 0x0f));
775 cx = nibble_timeout;
776 while (1) {
777 c0 = read_status(dev);
778 if ((c0 & 0x80) == 0)
779 break;
780 if (--cx == 0)
781 return TIMEOUT;
782 udelay(PLIP_DELAY_UNIT);
784 write_data (dev, 0x10 | (data >> 4));
785 *ns_p = PLIP_NB_2;
787 case PLIP_NB_2:
788 write_data (dev, (data >> 4));
789 cx = nibble_timeout;
790 while (1) {
791 c0 = read_status(dev);
792 if (c0 & 0x80)
793 break;
794 if (--cx == 0)
795 return TIMEOUT;
796 udelay(PLIP_DELAY_UNIT);
798 *ns_p = PLIP_NB_BEGIN;
799 return OK;
801 return OK;
804 /* PLIP_SEND_PACKET --- send a packet */
805 static int
806 plip_send_packet(struct net_device *dev, struct net_local *nl,
807 struct plip_local *snd, struct plip_local *rcv)
809 unsigned short nibble_timeout = nl->nibble;
810 unsigned char *lbuf;
811 unsigned char c0;
812 unsigned int cx;
814 if (snd->skb == NULL || (lbuf = snd->skb->data) == NULL) {
815 printk(KERN_DEBUG "%s: send skb lost\n", dev->name);
816 snd->state = PLIP_PK_DONE;
817 snd->skb = NULL;
818 return ERROR;
821 switch (snd->state) {
822 case PLIP_PK_TRIGGER:
823 if ((read_status(dev) & 0xf8) != 0x80)
824 return HS_TIMEOUT;
826 /* Trigger remote rx interrupt. */
827 write_data (dev, 0x08);
828 cx = nl->trigger;
829 while (1) {
830 udelay(PLIP_DELAY_UNIT);
831 spin_lock_irq(&nl->lock);
832 if (nl->connection == PLIP_CN_RECEIVE) {
833 spin_unlock_irq(&nl->lock);
834 /* Interrupted. */
835 nl->enet_stats.collisions++;
836 return OK;
838 c0 = read_status(dev);
839 if (c0 & 0x08) {
840 spin_unlock_irq(&nl->lock);
841 DISABLE(dev->irq);
842 synchronize_irq();
843 if (nl->connection == PLIP_CN_RECEIVE) {
844 /* Interrupted.
845 We don't need to enable irq,
846 as it is soon disabled. */
847 /* Yes, we do. New variant of
848 {enable,disable}_irq *counts*
849 them. -- AV */
850 ENABLE(dev->irq);
851 nl->enet_stats.collisions++;
852 return OK;
854 disable_parport_interrupts (dev);
855 if (net_debug > 2)
856 printk(KERN_DEBUG "%s: send start\n", dev->name);
857 snd->state = PLIP_PK_LENGTH_LSB;
858 snd->nibble = PLIP_NB_BEGIN;
859 nl->timeout_count = 0;
860 break;
862 spin_unlock_irq(&nl->lock);
863 if (--cx == 0) {
864 write_data (dev, 0x00);
865 return HS_TIMEOUT;
869 case PLIP_PK_LENGTH_LSB:
870 if (plip_send(nibble_timeout, dev,
871 &snd->nibble, snd->length.b.lsb))
872 return TIMEOUT;
873 snd->state = PLIP_PK_LENGTH_MSB;
875 case PLIP_PK_LENGTH_MSB:
876 if (plip_send(nibble_timeout, dev,
877 &snd->nibble, snd->length.b.msb))
878 return TIMEOUT;
879 snd->state = PLIP_PK_DATA;
880 snd->byte = 0;
881 snd->checksum = 0;
883 case PLIP_PK_DATA:
885 if (plip_send(nibble_timeout, dev,
886 &snd->nibble, lbuf[snd->byte]))
887 return TIMEOUT;
888 while (++snd->byte < snd->length.h);
890 snd->checksum += lbuf[--snd->byte];
891 while (snd->byte);
892 snd->state = PLIP_PK_CHECKSUM;
894 case PLIP_PK_CHECKSUM:
895 if (plip_send(nibble_timeout, dev,
896 &snd->nibble, snd->checksum))
897 return TIMEOUT;
899 nl->enet_stats.tx_bytes += snd->skb->len;
900 dev_kfree_skb(snd->skb);
901 nl->enet_stats.tx_packets++;
902 snd->state = PLIP_PK_DONE;
904 case PLIP_PK_DONE:
905 /* Close the connection */
906 write_data (dev, 0x00);
907 snd->skb = NULL;
908 if (net_debug > 2)
909 printk(KERN_DEBUG "%s: send end\n", dev->name);
910 nl->connection = PLIP_CN_CLOSING;
911 nl->is_deferred = 1;
912 queue_task(&nl->deferred, &tq_timer);
913 enable_parport_interrupts (dev);
914 ENABLE(dev->irq);
915 return OK;
917 return OK;
920 static int
921 plip_connection_close(struct net_device *dev, struct net_local *nl,
922 struct plip_local *snd, struct plip_local *rcv)
924 spin_lock_irq(&nl->lock);
925 if (nl->connection == PLIP_CN_CLOSING) {
926 nl->connection = PLIP_CN_NONE;
927 netif_wake_queue (dev);
929 spin_unlock_irq(&nl->lock);
930 if (nl->should_relinquish) {
931 nl->should_relinquish = nl->port_owner = 0;
932 parport_release(nl->pardev);
934 return OK;
937 /* PLIP_ERROR --- wait till other end settled */
938 static int
939 plip_error(struct net_device *dev, struct net_local *nl,
940 struct plip_local *snd, struct plip_local *rcv)
942 unsigned char status;
944 status = read_status(dev);
945 if ((status & 0xf8) == 0x80) {
946 if (net_debug > 2)
947 printk(KERN_DEBUG "%s: reset interface.\n", dev->name);
948 nl->connection = PLIP_CN_NONE;
949 nl->should_relinquish = 0;
950 netif_start_queue (dev);
951 enable_parport_interrupts (dev);
952 ENABLE(dev->irq);
953 netif_wake_queue (dev);
954 } else {
955 nl->is_deferred = 1;
956 queue_task(&nl->deferred, &tq_timer);
959 return OK;
962 /* Handle the parallel port interrupts. */
963 static void
964 plip_interrupt(int irq, void *dev_id, struct pt_regs * regs)
966 struct net_device *dev = dev_id;
967 struct net_local *nl;
968 struct plip_local *rcv;
969 unsigned char c0;
971 if (dev == NULL) {
972 printk(KERN_DEBUG "plip_interrupt: irq %d for unknown device.\n", irq);
973 return;
976 nl = (struct net_local *)dev->priv;
977 rcv = &nl->rcv_data;
979 spin_lock_irq (&nl->lock);
981 c0 = read_status(dev);
982 if ((c0 & 0xf8) != 0xc0) {
983 if ((dev->irq != -1) && (net_debug > 1))
984 printk(KERN_DEBUG "%s: spurious interrupt\n", dev->name);
985 spin_unlock_irq (&nl->lock);
986 return;
989 if (net_debug > 3)
990 printk(KERN_DEBUG "%s: interrupt.\n", dev->name);
992 switch (nl->connection) {
993 case PLIP_CN_CLOSING:
994 netif_wake_queue (dev);
995 case PLIP_CN_NONE:
996 case PLIP_CN_SEND:
997 dev->last_rx = jiffies;
998 rcv->state = PLIP_PK_TRIGGER;
999 nl->connection = PLIP_CN_RECEIVE;
1000 nl->timeout_count = 0;
1001 queue_task(&nl->immediate, &tq_immediate);
1002 mark_bh(IMMEDIATE_BH);
1003 break;
1005 case PLIP_CN_RECEIVE:
1006 /* May occur because there is race condition
1007 around test and set of dev->interrupt.
1008 Ignore this interrupt. */
1009 break;
1011 case PLIP_CN_ERROR:
1012 printk(KERN_ERR "%s: receive interrupt in error state\n", dev->name);
1013 break;
1016 spin_unlock_irq(&nl->lock);
1019 static int
1020 plip_tx_packet(struct sk_buff *skb, struct net_device *dev)
1022 struct net_local *nl = (struct net_local *)dev->priv;
1023 struct plip_local *snd = &nl->snd_data;
1025 if (netif_queue_stopped(dev))
1026 return 1;
1028 /* We may need to grab the bus */
1029 if (!nl->port_owner) {
1030 if (parport_claim(nl->pardev))
1031 return 1;
1032 nl->port_owner = 1;
1035 netif_stop_queue (dev);
1037 if (skb->len > dev->mtu + dev->hard_header_len) {
1038 printk(KERN_WARNING "%s: packet too big, %d.\n", dev->name, (int)skb->len);
1039 netif_start_queue (dev);
1040 return 1;
1043 if (net_debug > 2)
1044 printk(KERN_DEBUG "%s: send request\n", dev->name);
1046 spin_lock_irq(&nl->lock);
1047 dev->trans_start = jiffies;
1048 snd->skb = skb;
1049 snd->length.h = skb->len;
1050 snd->state = PLIP_PK_TRIGGER;
1051 if (nl->connection == PLIP_CN_NONE) {
1052 nl->connection = PLIP_CN_SEND;
1053 nl->timeout_count = 0;
1055 queue_task(&nl->immediate, &tq_immediate);
1056 mark_bh(IMMEDIATE_BH);
1057 spin_unlock_irq(&nl->lock);
1059 return 0;
1062 static void
1063 plip_rewrite_address(struct net_device *dev, struct ethhdr *eth)
1065 struct in_device *in_dev;
1067 if ((in_dev=dev->ip_ptr) != NULL) {
1068 /* Any address will do - we take the first */
1069 struct in_ifaddr *ifa=in_dev->ifa_list;
1070 if (ifa != NULL) {
1071 memcpy(eth->h_source, dev->dev_addr, 6);
1072 memset(eth->h_dest, 0xfc, 2);
1073 memcpy(eth->h_dest+2, &ifa->ifa_address, 4);
1078 static int
1079 plip_hard_header(struct sk_buff *skb, struct net_device *dev,
1080 unsigned short type, void *daddr,
1081 void *saddr, unsigned len)
1083 struct net_local *nl = (struct net_local *)dev->priv;
1084 int ret;
1086 if ((ret = nl->orig_hard_header(skb, dev, type, daddr, saddr, len)) >= 0)
1087 plip_rewrite_address (dev, (struct ethhdr *)skb->data);
1089 return ret;
1092 int plip_hard_header_cache(struct neighbour *neigh,
1093 struct hh_cache *hh)
1095 struct net_local *nl = (struct net_local *)neigh->dev->priv;
1096 int ret;
1098 if ((ret = nl->orig_hard_header_cache(neigh, hh)) == 0)
1100 struct ethhdr *eth = (struct ethhdr*)(((u8*)hh->hh_data) + 2);
1101 plip_rewrite_address (neigh->dev, eth);
1104 return ret;
1107 /* Open/initialize the board. This is called (in the current kernel)
1108 sometime after booting when the 'ifconfig' program is run.
1110 This routine gets exclusive access to the parallel port by allocating
1111 its IRQ line.
1113 static int
1114 plip_open(struct net_device *dev)
1116 struct net_local *nl = (struct net_local *)dev->priv;
1117 struct in_device *in_dev;
1119 /* Grab the port */
1120 if (!nl->port_owner) {
1121 if (parport_claim(nl->pardev)) return -EAGAIN;
1122 nl->port_owner = 1;
1125 nl->should_relinquish = 0;
1127 /* Clear the data port. */
1128 write_data (dev, 0x00);
1130 /* Enable rx interrupt. */
1131 enable_parport_interrupts (dev);
1132 if (dev->irq == -1)
1134 atomic_set (&nl->kill_timer, 0);
1135 queue_task (&nl->timer, &tq_timer);
1138 /* Initialize the state machine. */
1139 nl->rcv_data.state = nl->snd_data.state = PLIP_PK_DONE;
1140 nl->rcv_data.skb = nl->snd_data.skb = NULL;
1141 nl->connection = PLIP_CN_NONE;
1142 nl->is_deferred = 0;
1144 /* Fill in the MAC-level header.
1145 We used to abuse dev->broadcast to store the point-to-point
1146 MAC address, but we no longer do it. Instead, we fetch the
1147 interface address whenever it is needed, which is cheap enough
1148 because we use the hh_cache. Actually, abusing dev->broadcast
1149 didn't work, because when using plip_open the point-to-point
1150 address isn't yet known.
1151 PLIP doesn't have a real MAC address, but we need it to be
1152 DOS compatible, and to properly support taps (otherwise,
1153 when the device address isn't identical to the address of a
1154 received frame, the kernel incorrectly drops it). */
1156 if ((in_dev=dev->ip_ptr) != NULL) {
1157 /* Any address will do - we take the first. We already
1158 have the first two bytes filled with 0xfc, from
1159 plip_init_dev(). */
1160 struct in_ifaddr *ifa=in_dev->ifa_list;
1161 if (ifa != NULL) {
1162 memcpy(dev->dev_addr+2, &ifa->ifa_local, 4);
1166 netif_start_queue (dev);
1168 return 0;
1171 /* The inverse routine to plip_open (). */
1172 static int
1173 plip_close(struct net_device *dev)
1175 struct net_local *nl = (struct net_local *)dev->priv;
1176 struct plip_local *snd = &nl->snd_data;
1177 struct plip_local *rcv = &nl->rcv_data;
1179 netif_stop_queue (dev);
1180 DISABLE(dev->irq);
1181 synchronize_irq();
1183 if (dev->irq == -1)
1185 init_MUTEX_LOCKED (&nl->killed_timer_sem);
1186 atomic_set (&nl->kill_timer, 1);
1187 down (&nl->killed_timer_sem);
1190 #ifdef NOTDEF
1191 outb(0x00, PAR_DATA(dev));
1192 #endif
1193 nl->is_deferred = 0;
1194 nl->connection = PLIP_CN_NONE;
1195 if (nl->port_owner) {
1196 parport_release(nl->pardev);
1197 nl->port_owner = 0;
1200 snd->state = PLIP_PK_DONE;
1201 if (snd->skb) {
1202 dev_kfree_skb(snd->skb);
1203 snd->skb = NULL;
1205 rcv->state = PLIP_PK_DONE;
1206 if (rcv->skb) {
1207 kfree_skb(rcv->skb);
1208 rcv->skb = NULL;
1211 #ifdef NOTDEF
1212 /* Reset. */
1213 outb(0x00, PAR_CONTROL(dev));
1214 #endif
1215 return 0;
1218 static int
1219 plip_preempt(void *handle)
1221 struct net_device *dev = (struct net_device *)handle;
1222 struct net_local *nl = (struct net_local *)dev->priv;
1224 /* Stand our ground if a datagram is on the wire */
1225 if (nl->connection != PLIP_CN_NONE) {
1226 nl->should_relinquish = 1;
1227 return 1;
1230 nl->port_owner = 0; /* Remember that we released the bus */
1231 return 0;
1234 static void
1235 plip_wakeup(void *handle)
1237 struct net_device *dev = (struct net_device *)handle;
1238 struct net_local *nl = (struct net_local *)dev->priv;
1240 if (nl->port_owner) {
1241 /* Why are we being woken up? */
1242 printk(KERN_DEBUG "%s: why am I being woken up?\n", dev->name);
1243 if (!parport_claim(nl->pardev))
1244 /* bus_owner is already set (but why?) */
1245 printk(KERN_DEBUG "%s: I'm broken.\n", dev->name);
1246 else
1247 return;
1250 if (!(dev->flags & IFF_UP))
1251 /* Don't need the port when the interface is down */
1252 return;
1254 if (!parport_claim(nl->pardev)) {
1255 nl->port_owner = 1;
1256 /* Clear the data port. */
1257 write_data (dev, 0x00);
1260 return;
1263 static struct net_device_stats *
1264 plip_get_stats(struct net_device *dev)
1266 struct net_local *nl = (struct net_local *)dev->priv;
1267 struct net_device_stats *r = &nl->enet_stats;
1269 return r;
1272 static int
1273 plip_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1275 struct net_local *nl = (struct net_local *) dev->priv;
1276 struct plipconf *pc = (struct plipconf *) &rq->ifr_data;
1278 switch(pc->pcmd) {
1279 case PLIP_GET_TIMEOUT:
1280 pc->trigger = nl->trigger;
1281 pc->nibble = nl->nibble;
1282 break;
1283 case PLIP_SET_TIMEOUT:
1284 if(!capable(CAP_NET_ADMIN))
1285 return -EPERM;
1286 nl->trigger = pc->trigger;
1287 nl->nibble = pc->nibble;
1288 break;
1289 default:
1290 return -EOPNOTSUPP;
1292 return 0;
1295 static int parport[PLIP_MAX] = { [0 ... PLIP_MAX-1] = -1 };
1296 static int timid = 0;
1298 MODULE_PARM(parport, "1-" __MODULE_STRING(PLIP_MAX) "i");
1299 MODULE_PARM(timid, "1i");
1301 static struct net_device *dev_plip[PLIP_MAX] = { NULL, };
1303 static int inline
1304 plip_searchfor(int list[], int a)
1306 int i;
1307 for (i = 0; i < PLIP_MAX && list[i] != -1; i++) {
1308 if (list[i] == a) return 1;
1310 return 0;
1313 /* plip_attach() is called (by the parport code) when a port is
1314 * available to use. */
1315 static void plip_attach (struct parport *port)
1317 static int i = 0;
1319 if ((parport[0] == -1 && (!timid || !port->devices)) ||
1320 plip_searchfor(parport, port->number)) {
1321 if (i == PLIP_MAX) {
1322 printk(KERN_ERR "plip: too many devices\n");
1323 return;
1325 dev_plip[i] = kmalloc(sizeof(struct net_device),
1326 GFP_KERNEL);
1327 if (!dev_plip[i]) {
1328 printk(KERN_ERR "plip: memory squeeze\n");
1329 return;
1331 memset(dev_plip[i], 0, sizeof(struct net_device));
1332 sprintf(dev_plip[i]->name, "plip%d", i);
1333 dev_plip[i]->priv = port;
1334 if (plip_init_dev(dev_plip[i],port) ||
1335 register_netdev(dev_plip[i])) {
1336 kfree(dev_plip[i]);
1337 dev_plip[i] = NULL;
1338 } else {
1339 i++;
1344 /* plip_detach() is called (by the parport code) when a port is
1345 * no longer available to use. */
1346 static void plip_detach (struct parport *port)
1348 /* Nothing to do */
1351 static struct parport_driver plip_driver = {
1352 name: "plip",
1353 attach: plip_attach,
1354 detach: plip_detach
1357 static void __exit plip_cleanup_module (void)
1359 int i;
1361 parport_unregister_driver (&plip_driver);
1363 for (i=0; i < PLIP_MAX; i++) {
1364 if (dev_plip[i]) {
1365 struct net_local *nl =
1366 (struct net_local *)dev_plip[i]->priv;
1367 unregister_netdev(dev_plip[i]);
1368 if (nl->port_owner)
1369 parport_release(nl->pardev);
1370 parport_unregister_device(nl->pardev);
1371 kfree(dev_plip[i]->priv);
1372 kfree(dev_plip[i]);
1373 dev_plip[i] = NULL;
1378 #ifndef MODULE
1380 static int parport_ptr = 0;
1382 static int __init plip_setup(char *str)
1384 int ints[4];
1386 str = get_options(str, ARRAY_SIZE(ints), ints);
1388 /* Ugh. */
1389 if (!strncmp(str, "parport", 7)) {
1390 int n = simple_strtoul(str+7, NULL, 10);
1391 if (parport_ptr < PLIP_MAX)
1392 parport[parport_ptr++] = n;
1393 else
1394 printk(KERN_INFO "plip: too many ports, %s ignored.\n",
1395 str);
1396 } else if (!strcmp(str, "timid")) {
1397 timid = 1;
1398 } else {
1399 if (ints[0] == 0 || ints[1] == 0) {
1400 /* disable driver on "plip=" or "plip=0" */
1401 parport[0] = -2;
1402 } else {
1403 printk(KERN_WARNING "warning: 'plip=0x%x' ignored\n",
1404 ints[1]);
1407 return 1;
1410 __setup("plip=", plip_setup);
1412 #endif /* !MODULE */
1414 static int __init plip_init (void)
1416 if (parport[0] == -2)
1417 return 0;
1419 if (parport[0] != -1 && timid) {
1420 printk(KERN_WARNING "plip: warning, ignoring `timid' since specific ports given.\n");
1421 timid = 0;
1424 if (parport_register_driver (&plip_driver)) {
1425 printk (KERN_WARNING "plip: couldn't register driver\n");
1426 return 1;
1429 return 0;
1432 module_init(plip_init);
1433 module_exit(plip_cleanup_module);
1436 * Local variables:
1437 * compile-command: "gcc -DMODULE -DMODVERSIONS -D__KERNEL__ -Wall -Wstrict-prototypes -O2 -g -fomit-frame-pointer -pipe -c plip.c"
1438 * End: