More meth updates.
[linux-2.6/linux-mips.git] / drivers / net / plip.c
blob620b5c4223e548169d16028acd6310c4436cc458
1 /* $Id: plip.c,v 1.3.6.2 1997/04/16 15:07:56 phil Exp $ */
2 /* PLIP: A parallel port "network" driver for Linux. */
3 /* This driver is for parallel port with 5-bit cable (LapLink (R) cable). */
4 /*
5 * Authors: Donald Becker <becker@scyld.com>
6 * Tommy Thorn <thorn@daimi.aau.dk>
7 * Tanabe Hiroyasu <hiro@sanpo.t.u-tokyo.ac.jp>
8 * Alan Cox <gw4pts@gw4pts.ampr.org>
9 * Peter Bauer <100136.3530@compuserve.com>
10 * Niibe Yutaka <gniibe@mri.co.jp>
11 * Nimrod Zimerman <zimerman@mailandnews.com>
13 * Enhancements:
14 * Modularization and ifreq/ifmap support by Alan Cox.
15 * Rewritten by Niibe Yutaka.
16 * parport-sharing awareness code by Philip Blundell.
17 * SMP locking by Niibe Yutaka.
18 * Support for parallel ports with no IRQ (poll mode),
19 * Modifications to use the parallel port API
20 * by Nimrod Zimerman.
22 * Fixes:
23 * Niibe Yutaka
24 * - Module initialization.
25 * - MTU fix.
26 * - Make sure other end is OK, before sending a packet.
27 * - Fix immediate timer problem.
29 * Al Viro
30 * - Changed {enable,disable}_irq handling to make it work
31 * with new ("stack") semantics.
33 * This program is free software; you can redistribute it and/or
34 * modify it under the terms of the GNU General Public License
35 * as published by the Free Software Foundation; either version
36 * 2 of the License, or (at your option) any later version.
40 * Original version and the name 'PLIP' from Donald Becker <becker@scyld.com>
41 * inspired by Russ Nelson's parallel port packet driver.
43 * NOTE:
44 * Tanabe Hiroyasu had changed the protocol, and it was in Linux v1.0.
45 * Because of the necessity to communicate to DOS machines with the
46 * Crynwr packet driver, Peter Bauer changed the protocol again
47 * back to original protocol.
49 * This version follows original PLIP protocol.
50 * So, this PLIP can't communicate the PLIP of Linux v1.0.
54 * To use with DOS box, please do (Turn on ARP switch):
55 * # ifconfig plip[0-2] arp
57 static const char version[] = "NET3 PLIP version 2.4-parport gniibe@mri.co.jp\n";
60 Sources:
61 Ideas and protocols came from Russ Nelson's <nelson@crynwr.com>
62 "parallel.asm" parallel port packet driver.
64 The "Crynwr" parallel port standard specifies the following protocol:
65 Trigger by sending nibble '0x8' (this causes interrupt on other end)
66 count-low octet
67 count-high octet
68 ... data octets
69 checksum octet
70 Each octet is sent as <wait for rx. '0x1?'> <send 0x10+(octet&0x0F)>
71 <wait for rx. '0x0?'> <send 0x00+((octet>>4)&0x0F)>
73 The packet is encapsulated as if it were ethernet.
75 The cable used is a de facto standard parallel null cable -- sold as
76 a "LapLink" cable by various places. You'll need a 12-conductor cable to
77 make one yourself. The wiring is:
78 SLCTIN 17 - 17
79 GROUND 25 - 25
80 D0->ERROR 2 - 15 15 - 2
81 D1->SLCT 3 - 13 13 - 3
82 D2->PAPOUT 4 - 12 12 - 4
83 D3->ACK 5 - 10 10 - 5
84 D4->BUSY 6 - 11 11 - 6
85 Do not connect the other pins. They are
86 D5,D6,D7 are 7,8,9
87 STROBE is 1, FEED is 14, INIT is 16
88 extra grounds are 18,19,20,21,22,23,24
91 #include <linux/module.h>
92 #include <linux/kernel.h>
93 #include <linux/types.h>
94 #include <linux/fcntl.h>
95 #include <linux/interrupt.h>
96 #include <linux/string.h>
97 #include <linux/if_ether.h>
98 #include <linux/in.h>
99 #include <linux/errno.h>
100 #include <linux/delay.h>
101 #include <linux/lp.h>
102 #include <linux/init.h>
103 #include <linux/netdevice.h>
104 #include <linux/etherdevice.h>
105 #include <linux/inetdevice.h>
106 #include <linux/skbuff.h>
107 #include <linux/if_plip.h>
108 #include <linux/workqueue.h>
109 #include <linux/ioport.h>
110 #include <linux/spinlock.h>
111 #include <linux/parport.h>
113 #include <net/neighbour.h>
115 #include <asm/system.h>
116 #include <asm/bitops.h>
117 #include <asm/irq.h>
118 #include <asm/byteorder.h>
119 #include <asm/semaphore.h>
121 /* Maximum number of devices to support. */
122 #define PLIP_MAX 8
124 /* Use 0 for production, 1 for verification, >2 for debug */
125 #ifndef NET_DEBUG
126 #define NET_DEBUG 1
127 #endif
128 static unsigned int net_debug = NET_DEBUG;
130 #define ENABLE(irq) if (irq != -1) enable_irq(irq)
131 #define DISABLE(irq) if (irq != -1) disable_irq(irq)
133 /* In micro second */
134 #define PLIP_DELAY_UNIT 1
136 /* Connection time out = PLIP_TRIGGER_WAIT * PLIP_DELAY_UNIT usec */
137 #define PLIP_TRIGGER_WAIT 500
139 /* Nibble time out = PLIP_NIBBLE_WAIT * PLIP_DELAY_UNIT usec */
140 #define PLIP_NIBBLE_WAIT 3000
142 /* Bottom halves */
143 static void plip_kick_bh(struct net_device *dev);
144 static void plip_bh(struct net_device *dev);
145 static void plip_timer_bh(struct net_device *dev);
147 /* Interrupt handler */
148 static void plip_interrupt(int irq, void *dev_id, struct pt_regs *regs);
150 /* Functions for DEV methods */
151 static int plip_tx_packet(struct sk_buff *skb, struct net_device *dev);
152 static int plip_hard_header(struct sk_buff *skb, struct net_device *dev,
153 unsigned short type, void *daddr,
154 void *saddr, unsigned len);
155 static int plip_hard_header_cache(struct neighbour *neigh,
156 struct hh_cache *hh);
157 static int plip_open(struct net_device *dev);
158 static int plip_close(struct net_device *dev);
159 static struct net_device_stats *plip_get_stats(struct net_device *dev);
160 static int plip_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
161 static int plip_preempt(void *handle);
162 static void plip_wakeup(void *handle);
164 enum plip_connection_state {
165 PLIP_CN_NONE=0,
166 PLIP_CN_RECEIVE,
167 PLIP_CN_SEND,
168 PLIP_CN_CLOSING,
169 PLIP_CN_ERROR
172 enum plip_packet_state {
173 PLIP_PK_DONE=0,
174 PLIP_PK_TRIGGER,
175 PLIP_PK_LENGTH_LSB,
176 PLIP_PK_LENGTH_MSB,
177 PLIP_PK_DATA,
178 PLIP_PK_CHECKSUM
181 enum plip_nibble_state {
182 PLIP_NB_BEGIN,
183 PLIP_NB_1,
184 PLIP_NB_2,
187 struct plip_local {
188 enum plip_packet_state state;
189 enum plip_nibble_state nibble;
190 union {
191 struct {
192 #if defined(__LITTLE_ENDIAN)
193 unsigned char lsb;
194 unsigned char msb;
195 #elif defined(__BIG_ENDIAN)
196 unsigned char msb;
197 unsigned char lsb;
198 #else
199 #error "Please fix the endianness defines in <asm/byteorder.h>"
200 #endif
201 } b;
202 unsigned short h;
203 } length;
204 unsigned short byte;
205 unsigned char checksum;
206 unsigned char data;
207 struct sk_buff *skb;
210 struct net_local {
211 struct net_device_stats enet_stats;
212 struct work_struct immediate;
213 struct work_struct deferred;
214 struct work_struct timer;
215 struct plip_local snd_data;
216 struct plip_local rcv_data;
217 struct pardevice *pardev;
218 unsigned long trigger;
219 unsigned long nibble;
220 enum plip_connection_state connection;
221 unsigned short timeout_count;
222 int is_deferred;
223 int port_owner;
224 int should_relinquish;
225 int (*orig_hard_header)(struct sk_buff *skb, struct net_device *dev,
226 unsigned short type, void *daddr,
227 void *saddr, unsigned len);
228 int (*orig_hard_header_cache)(struct neighbour *neigh,
229 struct hh_cache *hh);
230 spinlock_t lock;
231 atomic_t kill_timer;
232 struct semaphore killed_timer_sem;
235 inline static void enable_parport_interrupts (struct net_device *dev)
237 if (dev->irq != -1)
239 struct parport *port =
240 ((struct net_local *)dev->priv)->pardev->port;
241 port->ops->enable_irq (port);
245 inline static void disable_parport_interrupts (struct net_device *dev)
247 if (dev->irq != -1)
249 struct parport *port =
250 ((struct net_local *)dev->priv)->pardev->port;
251 port->ops->disable_irq (port);
255 inline static void write_data (struct net_device *dev, unsigned char data)
257 struct parport *port =
258 ((struct net_local *)dev->priv)->pardev->port;
260 port->ops->write_data (port, data);
263 inline static unsigned char read_status (struct net_device *dev)
265 struct parport *port =
266 ((struct net_local *)dev->priv)->pardev->port;
268 return port->ops->read_status (port);
271 /* Entry point of PLIP driver.
272 Probe the hardware, and register/initialize the driver.
274 PLIP is rather weird, because of the way it interacts with the parport
275 system. It is _not_ initialised from Space.c. Instead, plip_init()
276 is called, and that function makes up a "struct net_device" for each port, and
277 then calls us here.
280 int __init
281 plip_init_dev(struct net_device *dev, struct parport *pb)
283 struct net_local *nl;
284 struct pardevice *pardev;
286 SET_MODULE_OWNER(dev);
287 dev->irq = pb->irq;
288 dev->base_addr = pb->base;
290 if (pb->irq == -1) {
291 printk(KERN_INFO "plip: %s has no IRQ. Using IRQ-less mode,"
292 "which is fairly inefficient!\n", pb->name);
295 pardev = parport_register_device(pb, dev->name, plip_preempt,
296 plip_wakeup, plip_interrupt,
297 0, dev);
299 if (!pardev)
300 return -ENODEV;
302 printk(KERN_INFO "%s", version);
303 if (dev->irq != -1)
304 printk(KERN_INFO "%s: Parallel port at %#3lx, using IRQ %d.\n",
305 dev->name, dev->base_addr, dev->irq);
306 else
307 printk(KERN_INFO "%s: Parallel port at %#3lx, not using IRQ.\n",
308 dev->name, dev->base_addr);
310 /* Fill in the generic fields of the device structure. */
311 ether_setup(dev);
313 /* Then, override parts of it */
314 dev->hard_start_xmit = plip_tx_packet;
315 dev->open = plip_open;
316 dev->stop = plip_close;
317 dev->get_stats = plip_get_stats;
318 dev->do_ioctl = plip_ioctl;
319 dev->header_cache_update = NULL;
320 dev->tx_queue_len = 10;
321 dev->flags = IFF_POINTOPOINT|IFF_NOARP;
322 memset(dev->dev_addr, 0xfc, ETH_ALEN);
324 /* Set the private structure */
325 dev->priv = kmalloc(sizeof (struct net_local), GFP_KERNEL);
326 if (dev->priv == NULL) {
327 printk(KERN_ERR "%s: out of memory\n", dev->name);
328 parport_unregister_device(pardev);
329 return -ENOMEM;
331 memset(dev->priv, 0, sizeof(struct net_local));
332 nl = (struct net_local *) dev->priv;
334 nl->orig_hard_header = dev->hard_header;
335 dev->hard_header = plip_hard_header;
337 nl->orig_hard_header_cache = dev->hard_header_cache;
338 dev->hard_header_cache = plip_hard_header_cache;
340 nl->pardev = pardev;
342 nl->port_owner = 0;
344 /* Initialize constants */
345 nl->trigger = PLIP_TRIGGER_WAIT;
346 nl->nibble = PLIP_NIBBLE_WAIT;
348 /* Initialize task queue structures */
349 INIT_WORK(&nl->immediate, (void (*)(void *))plip_bh, dev);
350 INIT_WORK(&nl->deferred, (void (*)(void *))plip_kick_bh, dev);
352 if (dev->irq == -1)
353 INIT_WORK(&nl->timer, (void (*)(void *))plip_timer_bh, dev);
355 spin_lock_init(&nl->lock);
357 return 0;
360 /* Bottom half handler for the delayed request.
361 This routine is kicked by do_timer().
362 Request `plip_bh' to be invoked. */
363 static void
364 plip_kick_bh(struct net_device *dev)
366 struct net_local *nl = (struct net_local *)dev->priv;
368 if (nl->is_deferred)
369 schedule_work(&nl->immediate);
372 /* Forward declarations of internal routines */
373 static int plip_none(struct net_device *, struct net_local *,
374 struct plip_local *, struct plip_local *);
375 static int plip_receive_packet(struct net_device *, struct net_local *,
376 struct plip_local *, struct plip_local *);
377 static int plip_send_packet(struct net_device *, struct net_local *,
378 struct plip_local *, struct plip_local *);
379 static int plip_connection_close(struct net_device *, struct net_local *,
380 struct plip_local *, struct plip_local *);
381 static int plip_error(struct net_device *, struct net_local *,
382 struct plip_local *, struct plip_local *);
383 static int plip_bh_timeout_error(struct net_device *dev, struct net_local *nl,
384 struct plip_local *snd,
385 struct plip_local *rcv,
386 int error);
388 #define OK 0
389 #define TIMEOUT 1
390 #define ERROR 2
391 #define HS_TIMEOUT 3
393 typedef int (*plip_func)(struct net_device *dev, struct net_local *nl,
394 struct plip_local *snd, struct plip_local *rcv);
396 static plip_func connection_state_table[] =
398 plip_none,
399 plip_receive_packet,
400 plip_send_packet,
401 plip_connection_close,
402 plip_error
405 /* Bottom half handler of PLIP. */
406 static void
407 plip_bh(struct net_device *dev)
409 struct net_local *nl = (struct net_local *)dev->priv;
410 struct plip_local *snd = &nl->snd_data;
411 struct plip_local *rcv = &nl->rcv_data;
412 plip_func f;
413 int r;
415 nl->is_deferred = 0;
416 f = connection_state_table[nl->connection];
417 if ((r = (*f)(dev, nl, snd, rcv)) != OK
418 && (r = plip_bh_timeout_error(dev, nl, snd, rcv, r)) != OK) {
419 nl->is_deferred = 1;
420 schedule_delayed_work(&nl->deferred, 1);
424 static void
425 plip_timer_bh(struct net_device *dev)
427 struct net_local *nl = (struct net_local *)dev->priv;
429 if (!(atomic_read (&nl->kill_timer))) {
430 plip_interrupt (-1, dev, NULL);
432 schedule_delayed_work(&nl->timer, 1);
434 else {
435 up (&nl->killed_timer_sem);
439 static int
440 plip_bh_timeout_error(struct net_device *dev, struct net_local *nl,
441 struct plip_local *snd, struct plip_local *rcv,
442 int error)
444 unsigned char c0;
446 * This is tricky. If we got here from the beginning of send (either
447 * with ERROR or HS_TIMEOUT) we have IRQ enabled. Otherwise it's
448 * already disabled. With the old variant of {enable,disable}_irq()
449 * extra disable_irq() was a no-op. Now it became mortal - it's
450 * unbalanced and thus we'll never re-enable IRQ (until rmmod plip,
451 * that is). So we have to treat HS_TIMEOUT and ERROR from send
452 * in a special way.
455 spin_lock_irq(&nl->lock);
456 if (nl->connection == PLIP_CN_SEND) {
458 if (error != ERROR) { /* Timeout */
459 nl->timeout_count++;
460 if ((error == HS_TIMEOUT
461 && nl->timeout_count <= 10)
462 || nl->timeout_count <= 3) {
463 spin_unlock_irq(&nl->lock);
464 /* Try again later */
465 return TIMEOUT;
467 c0 = read_status(dev);
468 printk(KERN_WARNING "%s: transmit timeout(%d,%02x)\n",
469 dev->name, snd->state, c0);
470 } else
471 error = HS_TIMEOUT;
472 nl->enet_stats.tx_errors++;
473 nl->enet_stats.tx_aborted_errors++;
474 } else if (nl->connection == PLIP_CN_RECEIVE) {
475 if (rcv->state == PLIP_PK_TRIGGER) {
476 /* Transmission was interrupted. */
477 spin_unlock_irq(&nl->lock);
478 return OK;
480 if (error != ERROR) { /* Timeout */
481 if (++nl->timeout_count <= 3) {
482 spin_unlock_irq(&nl->lock);
483 /* Try again later */
484 return TIMEOUT;
486 c0 = read_status(dev);
487 printk(KERN_WARNING "%s: receive timeout(%d,%02x)\n",
488 dev->name, rcv->state, c0);
490 nl->enet_stats.rx_dropped++;
492 rcv->state = PLIP_PK_DONE;
493 if (rcv->skb) {
494 kfree_skb(rcv->skb);
495 rcv->skb = NULL;
497 snd->state = PLIP_PK_DONE;
498 if (snd->skb) {
499 dev_kfree_skb(snd->skb);
500 snd->skb = NULL;
502 spin_unlock_irq(&nl->lock);
503 if (error == HS_TIMEOUT) {
504 DISABLE(dev->irq);
505 synchronize_irq(dev->irq);
507 disable_parport_interrupts (dev);
508 netif_stop_queue (dev);
509 nl->connection = PLIP_CN_ERROR;
510 write_data (dev, 0x00);
512 return TIMEOUT;
515 static int
516 plip_none(struct net_device *dev, struct net_local *nl,
517 struct plip_local *snd, struct plip_local *rcv)
519 return OK;
522 /* PLIP_RECEIVE --- receive a byte(two nibbles)
523 Returns OK on success, TIMEOUT on timeout */
524 inline static int
525 plip_receive(unsigned short nibble_timeout, struct net_device *dev,
526 enum plip_nibble_state *ns_p, unsigned char *data_p)
528 unsigned char c0, c1;
529 unsigned int cx;
531 switch (*ns_p) {
532 case PLIP_NB_BEGIN:
533 cx = nibble_timeout;
534 while (1) {
535 c0 = read_status(dev);
536 udelay(PLIP_DELAY_UNIT);
537 if ((c0 & 0x80) == 0) {
538 c1 = read_status(dev);
539 if (c0 == c1)
540 break;
542 if (--cx == 0)
543 return TIMEOUT;
545 *data_p = (c0 >> 3) & 0x0f;
546 write_data (dev, 0x10); /* send ACK */
547 *ns_p = PLIP_NB_1;
549 case PLIP_NB_1:
550 cx = nibble_timeout;
551 while (1) {
552 c0 = read_status(dev);
553 udelay(PLIP_DELAY_UNIT);
554 if (c0 & 0x80) {
555 c1 = read_status(dev);
556 if (c0 == c1)
557 break;
559 if (--cx == 0)
560 return TIMEOUT;
562 *data_p |= (c0 << 1) & 0xf0;
563 write_data (dev, 0x00); /* send ACK */
564 *ns_p = PLIP_NB_BEGIN;
565 case PLIP_NB_2:
566 break;
568 return OK;
572 * Determine the packet's protocol ID. The rule here is that we
573 * assume 802.3 if the type field is short enough to be a length.
574 * This is normal practice and works for any 'now in use' protocol.
576 * PLIP is ethernet ish but the daddr might not be valid if unicast.
577 * PLIP fortunately has no bus architecture (its Point-to-point).
579 * We can't fix the daddr thing as that quirk (more bug) is embedded
580 * in far too many old systems not all even running Linux.
583 static unsigned short plip_type_trans(struct sk_buff *skb, struct net_device *dev)
585 struct ethhdr *eth;
586 unsigned char *rawp;
588 skb->mac.raw=skb->data;
589 skb_pull(skb,dev->hard_header_len);
590 eth= skb->mac.ethernet;
592 if(*eth->h_dest&1)
594 if(memcmp(eth->h_dest,dev->broadcast, ETH_ALEN)==0)
595 skb->pkt_type=PACKET_BROADCAST;
596 else
597 skb->pkt_type=PACKET_MULTICAST;
601 * This ALLMULTI check should be redundant by 1.4
602 * so don't forget to remove it.
605 if (ntohs(eth->h_proto) >= 1536)
606 return eth->h_proto;
608 rawp = skb->data;
611 * This is a magic hack to spot IPX packets. Older Novell breaks
612 * the protocol design and runs IPX over 802.3 without an 802.2 LLC
613 * layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This
614 * won't work for fault tolerant netware but does for the rest.
616 if (*(unsigned short *)rawp == 0xFFFF)
617 return htons(ETH_P_802_3);
620 * Real 802.2 LLC
622 return htons(ETH_P_802_2);
626 /* PLIP_RECEIVE_PACKET --- receive a packet */
627 static int
628 plip_receive_packet(struct net_device *dev, struct net_local *nl,
629 struct plip_local *snd, struct plip_local *rcv)
631 unsigned short nibble_timeout = nl->nibble;
632 unsigned char *lbuf;
634 switch (rcv->state) {
635 case PLIP_PK_TRIGGER:
636 DISABLE(dev->irq);
637 /* Don't need to synchronize irq, as we can safely ignore it */
638 disable_parport_interrupts (dev);
639 write_data (dev, 0x01); /* send ACK */
640 if (net_debug > 2)
641 printk(KERN_DEBUG "%s: receive start\n", dev->name);
642 rcv->state = PLIP_PK_LENGTH_LSB;
643 rcv->nibble = PLIP_NB_BEGIN;
645 case PLIP_PK_LENGTH_LSB:
646 if (snd->state != PLIP_PK_DONE) {
647 if (plip_receive(nl->trigger, dev,
648 &rcv->nibble, &rcv->length.b.lsb)) {
649 /* collision, here dev->tbusy == 1 */
650 rcv->state = PLIP_PK_DONE;
651 nl->is_deferred = 1;
652 nl->connection = PLIP_CN_SEND;
653 schedule_delayed_work(&nl->deferred, 1);
654 enable_parport_interrupts (dev);
655 ENABLE(dev->irq);
656 return OK;
658 } else {
659 if (plip_receive(nibble_timeout, dev,
660 &rcv->nibble, &rcv->length.b.lsb))
661 return TIMEOUT;
663 rcv->state = PLIP_PK_LENGTH_MSB;
665 case PLIP_PK_LENGTH_MSB:
666 if (plip_receive(nibble_timeout, dev,
667 &rcv->nibble, &rcv->length.b.msb))
668 return TIMEOUT;
669 if (rcv->length.h > dev->mtu + dev->hard_header_len
670 || rcv->length.h < 8) {
671 printk(KERN_WARNING "%s: bogus packet size %d.\n", dev->name, rcv->length.h);
672 return ERROR;
674 /* Malloc up new buffer. */
675 rcv->skb = dev_alloc_skb(rcv->length.h + 2);
676 if (rcv->skb == NULL) {
677 printk(KERN_ERR "%s: Memory squeeze.\n", dev->name);
678 return ERROR;
680 skb_reserve(rcv->skb, 2); /* Align IP on 16 byte boundaries */
681 skb_put(rcv->skb,rcv->length.h);
682 rcv->skb->dev = dev;
683 rcv->state = PLIP_PK_DATA;
684 rcv->byte = 0;
685 rcv->checksum = 0;
687 case PLIP_PK_DATA:
688 lbuf = rcv->skb->data;
690 if (plip_receive(nibble_timeout, dev,
691 &rcv->nibble, &lbuf[rcv->byte]))
692 return TIMEOUT;
693 while (++rcv->byte < rcv->length.h);
695 rcv->checksum += lbuf[--rcv->byte];
696 while (rcv->byte);
697 rcv->state = PLIP_PK_CHECKSUM;
699 case PLIP_PK_CHECKSUM:
700 if (plip_receive(nibble_timeout, dev,
701 &rcv->nibble, &rcv->data))
702 return TIMEOUT;
703 if (rcv->data != rcv->checksum) {
704 nl->enet_stats.rx_crc_errors++;
705 if (net_debug)
706 printk(KERN_DEBUG "%s: checksum error\n", dev->name);
707 return ERROR;
709 rcv->state = PLIP_PK_DONE;
711 case PLIP_PK_DONE:
712 /* Inform the upper layer for the arrival of a packet. */
713 rcv->skb->protocol=plip_type_trans(rcv->skb, dev);
714 netif_rx(rcv->skb);
715 dev->last_rx = jiffies;
716 nl->enet_stats.rx_bytes += rcv->length.h;
717 nl->enet_stats.rx_packets++;
718 rcv->skb = NULL;
719 if (net_debug > 2)
720 printk(KERN_DEBUG "%s: receive end\n", dev->name);
722 /* Close the connection. */
723 write_data (dev, 0x00);
724 spin_lock_irq(&nl->lock);
725 if (snd->state != PLIP_PK_DONE) {
726 nl->connection = PLIP_CN_SEND;
727 spin_unlock_irq(&nl->lock);
728 schedule_work(&nl->immediate);
729 enable_parport_interrupts (dev);
730 ENABLE(dev->irq);
731 return OK;
732 } else {
733 nl->connection = PLIP_CN_NONE;
734 spin_unlock_irq(&nl->lock);
735 enable_parport_interrupts (dev);
736 ENABLE(dev->irq);
737 return OK;
740 return OK;
743 /* PLIP_SEND --- send a byte (two nibbles)
744 Returns OK on success, TIMEOUT when timeout */
745 inline static int
746 plip_send(unsigned short nibble_timeout, struct net_device *dev,
747 enum plip_nibble_state *ns_p, unsigned char data)
749 unsigned char c0;
750 unsigned int cx;
752 switch (*ns_p) {
753 case PLIP_NB_BEGIN:
754 write_data (dev, data & 0x0f);
755 *ns_p = PLIP_NB_1;
757 case PLIP_NB_1:
758 write_data (dev, 0x10 | (data & 0x0f));
759 cx = nibble_timeout;
760 while (1) {
761 c0 = read_status(dev);
762 if ((c0 & 0x80) == 0)
763 break;
764 if (--cx == 0)
765 return TIMEOUT;
766 udelay(PLIP_DELAY_UNIT);
768 write_data (dev, 0x10 | (data >> 4));
769 *ns_p = PLIP_NB_2;
771 case PLIP_NB_2:
772 write_data (dev, (data >> 4));
773 cx = nibble_timeout;
774 while (1) {
775 c0 = read_status(dev);
776 if (c0 & 0x80)
777 break;
778 if (--cx == 0)
779 return TIMEOUT;
780 udelay(PLIP_DELAY_UNIT);
782 *ns_p = PLIP_NB_BEGIN;
783 return OK;
785 return OK;
788 /* PLIP_SEND_PACKET --- send a packet */
789 static int
790 plip_send_packet(struct net_device *dev, struct net_local *nl,
791 struct plip_local *snd, struct plip_local *rcv)
793 unsigned short nibble_timeout = nl->nibble;
794 unsigned char *lbuf;
795 unsigned char c0;
796 unsigned int cx;
798 if (snd->skb == NULL || (lbuf = snd->skb->data) == NULL) {
799 printk(KERN_DEBUG "%s: send skb lost\n", dev->name);
800 snd->state = PLIP_PK_DONE;
801 snd->skb = NULL;
802 return ERROR;
805 switch (snd->state) {
806 case PLIP_PK_TRIGGER:
807 if ((read_status(dev) & 0xf8) != 0x80)
808 return HS_TIMEOUT;
810 /* Trigger remote rx interrupt. */
811 write_data (dev, 0x08);
812 cx = nl->trigger;
813 while (1) {
814 udelay(PLIP_DELAY_UNIT);
815 spin_lock_irq(&nl->lock);
816 if (nl->connection == PLIP_CN_RECEIVE) {
817 spin_unlock_irq(&nl->lock);
818 /* Interrupted. */
819 nl->enet_stats.collisions++;
820 return OK;
822 c0 = read_status(dev);
823 if (c0 & 0x08) {
824 spin_unlock_irq(&nl->lock);
825 DISABLE(dev->irq);
826 synchronize_irq(dev->irq);
827 if (nl->connection == PLIP_CN_RECEIVE) {
828 /* Interrupted.
829 We don't need to enable irq,
830 as it is soon disabled. */
831 /* Yes, we do. New variant of
832 {enable,disable}_irq *counts*
833 them. -- AV */
834 ENABLE(dev->irq);
835 nl->enet_stats.collisions++;
836 return OK;
838 disable_parport_interrupts (dev);
839 if (net_debug > 2)
840 printk(KERN_DEBUG "%s: send start\n", dev->name);
841 snd->state = PLIP_PK_LENGTH_LSB;
842 snd->nibble = PLIP_NB_BEGIN;
843 nl->timeout_count = 0;
844 break;
846 spin_unlock_irq(&nl->lock);
847 if (--cx == 0) {
848 write_data (dev, 0x00);
849 return HS_TIMEOUT;
853 case PLIP_PK_LENGTH_LSB:
854 if (plip_send(nibble_timeout, dev,
855 &snd->nibble, snd->length.b.lsb))
856 return TIMEOUT;
857 snd->state = PLIP_PK_LENGTH_MSB;
859 case PLIP_PK_LENGTH_MSB:
860 if (plip_send(nibble_timeout, dev,
861 &snd->nibble, snd->length.b.msb))
862 return TIMEOUT;
863 snd->state = PLIP_PK_DATA;
864 snd->byte = 0;
865 snd->checksum = 0;
867 case PLIP_PK_DATA:
869 if (plip_send(nibble_timeout, dev,
870 &snd->nibble, lbuf[snd->byte]))
871 return TIMEOUT;
872 while (++snd->byte < snd->length.h);
874 snd->checksum += lbuf[--snd->byte];
875 while (snd->byte);
876 snd->state = PLIP_PK_CHECKSUM;
878 case PLIP_PK_CHECKSUM:
879 if (plip_send(nibble_timeout, dev,
880 &snd->nibble, snd->checksum))
881 return TIMEOUT;
883 nl->enet_stats.tx_bytes += snd->skb->len;
884 dev_kfree_skb(snd->skb);
885 nl->enet_stats.tx_packets++;
886 snd->state = PLIP_PK_DONE;
888 case PLIP_PK_DONE:
889 /* Close the connection */
890 write_data (dev, 0x00);
891 snd->skb = NULL;
892 if (net_debug > 2)
893 printk(KERN_DEBUG "%s: send end\n", dev->name);
894 nl->connection = PLIP_CN_CLOSING;
895 nl->is_deferred = 1;
896 schedule_delayed_work(&nl->deferred, 1);
897 enable_parport_interrupts (dev);
898 ENABLE(dev->irq);
899 return OK;
901 return OK;
904 static int
905 plip_connection_close(struct net_device *dev, struct net_local *nl,
906 struct plip_local *snd, struct plip_local *rcv)
908 spin_lock_irq(&nl->lock);
909 if (nl->connection == PLIP_CN_CLOSING) {
910 nl->connection = PLIP_CN_NONE;
911 netif_wake_queue (dev);
913 spin_unlock_irq(&nl->lock);
914 if (nl->should_relinquish) {
915 nl->should_relinquish = nl->port_owner = 0;
916 parport_release(nl->pardev);
918 return OK;
921 /* PLIP_ERROR --- wait till other end settled */
922 static int
923 plip_error(struct net_device *dev, struct net_local *nl,
924 struct plip_local *snd, struct plip_local *rcv)
926 unsigned char status;
928 status = read_status(dev);
929 if ((status & 0xf8) == 0x80) {
930 if (net_debug > 2)
931 printk(KERN_DEBUG "%s: reset interface.\n", dev->name);
932 nl->connection = PLIP_CN_NONE;
933 nl->should_relinquish = 0;
934 netif_start_queue (dev);
935 enable_parport_interrupts (dev);
936 ENABLE(dev->irq);
937 netif_wake_queue (dev);
938 } else {
939 nl->is_deferred = 1;
940 schedule_delayed_work(&nl->deferred, 1);
943 return OK;
946 /* Handle the parallel port interrupts. */
947 static void
948 plip_interrupt(int irq, void *dev_id, struct pt_regs * regs)
950 struct net_device *dev = dev_id;
951 struct net_local *nl;
952 struct plip_local *rcv;
953 unsigned char c0;
955 if (dev == NULL) {
956 printk(KERN_DEBUG "plip_interrupt: irq %d for unknown device.\n", irq);
957 return;
960 nl = (struct net_local *)dev->priv;
961 rcv = &nl->rcv_data;
963 spin_lock_irq (&nl->lock);
965 c0 = read_status(dev);
966 if ((c0 & 0xf8) != 0xc0) {
967 if ((dev->irq != -1) && (net_debug > 1))
968 printk(KERN_DEBUG "%s: spurious interrupt\n", dev->name);
969 spin_unlock_irq (&nl->lock);
970 return;
973 if (net_debug > 3)
974 printk(KERN_DEBUG "%s: interrupt.\n", dev->name);
976 switch (nl->connection) {
977 case PLIP_CN_CLOSING:
978 netif_wake_queue (dev);
979 case PLIP_CN_NONE:
980 case PLIP_CN_SEND:
981 rcv->state = PLIP_PK_TRIGGER;
982 nl->connection = PLIP_CN_RECEIVE;
983 nl->timeout_count = 0;
984 schedule_work(&nl->immediate);
985 break;
987 case PLIP_CN_RECEIVE:
988 /* May occur because there is race condition
989 around test and set of dev->interrupt.
990 Ignore this interrupt. */
991 break;
993 case PLIP_CN_ERROR:
994 printk(KERN_ERR "%s: receive interrupt in error state\n", dev->name);
995 break;
998 spin_unlock_irq(&nl->lock);
1001 static int
1002 plip_tx_packet(struct sk_buff *skb, struct net_device *dev)
1004 struct net_local *nl = (struct net_local *)dev->priv;
1005 struct plip_local *snd = &nl->snd_data;
1007 if (netif_queue_stopped(dev))
1008 return 1;
1010 /* We may need to grab the bus */
1011 if (!nl->port_owner) {
1012 if (parport_claim(nl->pardev))
1013 return 1;
1014 nl->port_owner = 1;
1017 netif_stop_queue (dev);
1019 if (skb->len > dev->mtu + dev->hard_header_len) {
1020 printk(KERN_WARNING "%s: packet too big, %d.\n", dev->name, (int)skb->len);
1021 netif_start_queue (dev);
1022 return 1;
1025 if (net_debug > 2)
1026 printk(KERN_DEBUG "%s: send request\n", dev->name);
1028 spin_lock_irq(&nl->lock);
1029 dev->trans_start = jiffies;
1030 snd->skb = skb;
1031 snd->length.h = skb->len;
1032 snd->state = PLIP_PK_TRIGGER;
1033 if (nl->connection == PLIP_CN_NONE) {
1034 nl->connection = PLIP_CN_SEND;
1035 nl->timeout_count = 0;
1037 schedule_work(&nl->immediate);
1038 spin_unlock_irq(&nl->lock);
1040 return 0;
1043 static void
1044 plip_rewrite_address(struct net_device *dev, struct ethhdr *eth)
1046 struct in_device *in_dev;
1048 if ((in_dev=dev->ip_ptr) != NULL) {
1049 /* Any address will do - we take the first */
1050 struct in_ifaddr *ifa=in_dev->ifa_list;
1051 if (ifa != NULL) {
1052 memcpy(eth->h_source, dev->dev_addr, 6);
1053 memset(eth->h_dest, 0xfc, 2);
1054 memcpy(eth->h_dest+2, &ifa->ifa_address, 4);
1059 static int
1060 plip_hard_header(struct sk_buff *skb, struct net_device *dev,
1061 unsigned short type, void *daddr,
1062 void *saddr, unsigned len)
1064 struct net_local *nl = (struct net_local *)dev->priv;
1065 int ret;
1067 if ((ret = nl->orig_hard_header(skb, dev, type, daddr, saddr, len)) >= 0)
1068 plip_rewrite_address (dev, (struct ethhdr *)skb->data);
1070 return ret;
1073 int plip_hard_header_cache(struct neighbour *neigh,
1074 struct hh_cache *hh)
1076 struct net_local *nl = (struct net_local *)neigh->dev->priv;
1077 int ret;
1079 if ((ret = nl->orig_hard_header_cache(neigh, hh)) == 0)
1081 struct ethhdr *eth;
1083 eth = (struct ethhdr*)(((u8*)hh->hh_data) +
1084 HH_DATA_OFF(sizeof(*eth)));
1085 plip_rewrite_address (neigh->dev, eth);
1088 return ret;
1091 /* Open/initialize the board. This is called (in the current kernel)
1092 sometime after booting when the 'ifconfig' program is run.
1094 This routine gets exclusive access to the parallel port by allocating
1095 its IRQ line.
1097 static int
1098 plip_open(struct net_device *dev)
1100 struct net_local *nl = (struct net_local *)dev->priv;
1101 struct in_device *in_dev;
1103 /* Grab the port */
1104 if (!nl->port_owner) {
1105 if (parport_claim(nl->pardev)) return -EAGAIN;
1106 nl->port_owner = 1;
1109 nl->should_relinquish = 0;
1111 /* Clear the data port. */
1112 write_data (dev, 0x00);
1114 /* Enable rx interrupt. */
1115 enable_parport_interrupts (dev);
1116 if (dev->irq == -1)
1118 atomic_set (&nl->kill_timer, 0);
1119 schedule_delayed_work(&nl->timer, 1);
1122 /* Initialize the state machine. */
1123 nl->rcv_data.state = nl->snd_data.state = PLIP_PK_DONE;
1124 nl->rcv_data.skb = nl->snd_data.skb = NULL;
1125 nl->connection = PLIP_CN_NONE;
1126 nl->is_deferred = 0;
1128 /* Fill in the MAC-level header.
1129 We used to abuse dev->broadcast to store the point-to-point
1130 MAC address, but we no longer do it. Instead, we fetch the
1131 interface address whenever it is needed, which is cheap enough
1132 because we use the hh_cache. Actually, abusing dev->broadcast
1133 didn't work, because when using plip_open the point-to-point
1134 address isn't yet known.
1135 PLIP doesn't have a real MAC address, but we need it to be
1136 DOS compatible, and to properly support taps (otherwise,
1137 when the device address isn't identical to the address of a
1138 received frame, the kernel incorrectly drops it). */
1140 if ((in_dev=dev->ip_ptr) != NULL) {
1141 /* Any address will do - we take the first. We already
1142 have the first two bytes filled with 0xfc, from
1143 plip_init_dev(). */
1144 struct in_ifaddr *ifa=in_dev->ifa_list;
1145 if (ifa != NULL) {
1146 memcpy(dev->dev_addr+2, &ifa->ifa_local, 4);
1150 netif_start_queue (dev);
1152 return 0;
1155 /* The inverse routine to plip_open (). */
1156 static int
1157 plip_close(struct net_device *dev)
1159 struct net_local *nl = (struct net_local *)dev->priv;
1160 struct plip_local *snd = &nl->snd_data;
1161 struct plip_local *rcv = &nl->rcv_data;
1163 netif_stop_queue (dev);
1164 DISABLE(dev->irq);
1165 synchronize_irq(dev->irq);
1167 if (dev->irq == -1)
1169 init_MUTEX_LOCKED (&nl->killed_timer_sem);
1170 atomic_set (&nl->kill_timer, 1);
1171 down (&nl->killed_timer_sem);
1174 #ifdef NOTDEF
1175 outb(0x00, PAR_DATA(dev));
1176 #endif
1177 nl->is_deferred = 0;
1178 nl->connection = PLIP_CN_NONE;
1179 if (nl->port_owner) {
1180 parport_release(nl->pardev);
1181 nl->port_owner = 0;
1184 snd->state = PLIP_PK_DONE;
1185 if (snd->skb) {
1186 dev_kfree_skb(snd->skb);
1187 snd->skb = NULL;
1189 rcv->state = PLIP_PK_DONE;
1190 if (rcv->skb) {
1191 kfree_skb(rcv->skb);
1192 rcv->skb = NULL;
1195 #ifdef NOTDEF
1196 /* Reset. */
1197 outb(0x00, PAR_CONTROL(dev));
1198 #endif
1199 return 0;
1202 static int
1203 plip_preempt(void *handle)
1205 struct net_device *dev = (struct net_device *)handle;
1206 struct net_local *nl = (struct net_local *)dev->priv;
1208 /* Stand our ground if a datagram is on the wire */
1209 if (nl->connection != PLIP_CN_NONE) {
1210 nl->should_relinquish = 1;
1211 return 1;
1214 nl->port_owner = 0; /* Remember that we released the bus */
1215 return 0;
1218 static void
1219 plip_wakeup(void *handle)
1221 struct net_device *dev = (struct net_device *)handle;
1222 struct net_local *nl = (struct net_local *)dev->priv;
1224 if (nl->port_owner) {
1225 /* Why are we being woken up? */
1226 printk(KERN_DEBUG "%s: why am I being woken up?\n", dev->name);
1227 if (!parport_claim(nl->pardev))
1228 /* bus_owner is already set (but why?) */
1229 printk(KERN_DEBUG "%s: I'm broken.\n", dev->name);
1230 else
1231 return;
1234 if (!(dev->flags & IFF_UP))
1235 /* Don't need the port when the interface is down */
1236 return;
1238 if (!parport_claim(nl->pardev)) {
1239 nl->port_owner = 1;
1240 /* Clear the data port. */
1241 write_data (dev, 0x00);
1244 return;
1247 static struct net_device_stats *
1248 plip_get_stats(struct net_device *dev)
1250 struct net_local *nl = (struct net_local *)dev->priv;
1251 struct net_device_stats *r = &nl->enet_stats;
1253 return r;
1256 static int
1257 plip_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1259 struct net_local *nl = (struct net_local *) dev->priv;
1260 struct plipconf *pc = (struct plipconf *) &rq->ifr_data;
1262 switch(pc->pcmd) {
1263 case PLIP_GET_TIMEOUT:
1264 pc->trigger = nl->trigger;
1265 pc->nibble = nl->nibble;
1266 break;
1267 case PLIP_SET_TIMEOUT:
1268 if(!capable(CAP_NET_ADMIN))
1269 return -EPERM;
1270 nl->trigger = pc->trigger;
1271 nl->nibble = pc->nibble;
1272 break;
1273 default:
1274 return -EOPNOTSUPP;
1276 return 0;
1279 static int parport[PLIP_MAX] = { [0 ... PLIP_MAX-1] = -1 };
1280 static int timid;
1282 MODULE_PARM(parport, "1-" __MODULE_STRING(PLIP_MAX) "i");
1283 MODULE_PARM(timid, "1i");
1284 MODULE_PARM_DESC(parport, "List of parport device numbers to use by plip");
1286 static struct net_device *dev_plip[PLIP_MAX] = { NULL, };
1288 static inline int
1289 plip_searchfor(int list[], int a)
1291 int i;
1292 for (i = 0; i < PLIP_MAX && list[i] != -1; i++) {
1293 if (list[i] == a) return 1;
1295 return 0;
1298 /* plip_attach() is called (by the parport code) when a port is
1299 * available to use. */
1300 static void plip_attach (struct parport *port)
1302 static int i;
1304 if ((parport[0] == -1 && (!timid || !port->devices)) ||
1305 plip_searchfor(parport, port->number)) {
1306 if (i == PLIP_MAX) {
1307 printk(KERN_ERR "plip: too many devices\n");
1308 return;
1310 dev_plip[i] = kmalloc(sizeof(struct net_device),
1311 GFP_KERNEL);
1312 if (!dev_plip[i]) {
1313 printk(KERN_ERR "plip: memory squeeze\n");
1314 return;
1316 memset(dev_plip[i], 0, sizeof(struct net_device));
1317 sprintf(dev_plip[i]->name, "plip%d", i);
1318 dev_plip[i]->priv = port;
1319 if (plip_init_dev(dev_plip[i],port) ||
1320 register_netdev(dev_plip[i])) {
1321 kfree(dev_plip[i]);
1322 dev_plip[i] = NULL;
1323 } else {
1324 i++;
1329 /* plip_detach() is called (by the parport code) when a port is
1330 * no longer available to use. */
1331 static void plip_detach (struct parport *port)
1333 /* Nothing to do */
1336 static struct parport_driver plip_driver = {
1337 .name = "plip",
1338 .attach = plip_attach,
1339 .detach = plip_detach
1342 static void __exit plip_cleanup_module (void)
1344 int i;
1346 parport_unregister_driver (&plip_driver);
1348 for (i=0; i < PLIP_MAX; i++) {
1349 if (dev_plip[i]) {
1350 struct net_local *nl =
1351 (struct net_local *)dev_plip[i]->priv;
1352 unregister_netdev(dev_plip[i]);
1353 if (nl->port_owner)
1354 parport_release(nl->pardev);
1355 parport_unregister_device(nl->pardev);
1356 kfree(dev_plip[i]->priv);
1357 kfree(dev_plip[i]);
1358 dev_plip[i] = NULL;
1363 #ifndef MODULE
1365 static int parport_ptr;
1367 static int __init plip_setup(char *str)
1369 int ints[4];
1371 str = get_options(str, ARRAY_SIZE(ints), ints);
1373 /* Ugh. */
1374 if (!strncmp(str, "parport", 7)) {
1375 int n = simple_strtoul(str+7, NULL, 10);
1376 if (parport_ptr < PLIP_MAX)
1377 parport[parport_ptr++] = n;
1378 else
1379 printk(KERN_INFO "plip: too many ports, %s ignored.\n",
1380 str);
1381 } else if (!strcmp(str, "timid")) {
1382 timid = 1;
1383 } else {
1384 if (ints[0] == 0 || ints[1] == 0) {
1385 /* disable driver on "plip=" or "plip=0" */
1386 parport[0] = -2;
1387 } else {
1388 printk(KERN_WARNING "warning: 'plip=0x%x' ignored\n",
1389 ints[1]);
1392 return 1;
1395 __setup("plip=", plip_setup);
1397 #endif /* !MODULE */
1399 static int __init plip_init (void)
1401 if (parport[0] == -2)
1402 return 0;
1404 if (parport[0] != -1 && timid) {
1405 printk(KERN_WARNING "plip: warning, ignoring `timid' since specific ports given.\n");
1406 timid = 0;
1409 if (parport_register_driver (&plip_driver)) {
1410 printk (KERN_WARNING "plip: couldn't register driver\n");
1411 return 1;
1414 return 0;
1417 module_init(plip_init);
1418 module_exit(plip_cleanup_module);
1419 MODULE_LICENSE("GPL");
1422 * Local variables:
1423 * compile-command: "gcc -DMODULE -DMODVERSIONS -D__KERNEL__ -Wall -Wstrict-prototypes -O2 -g -fomit-frame-pointer -pipe -c plip.c"
1424 * End: