1 /* $Id: plip.c,v 1.3.6.2 1997/04/16 15:07:56 phil Exp $ */
2 /* PLIP: A parallel port "network" driver for Linux. */
3 /* This driver is for parallel port with 5-bit cable (LapLink (R) cable). */
5 * Authors: Donald Becker <becker@super.org>
6 * Tommy Thorn <thorn@daimi.aau.dk>
7 * Tanabe Hiroyasu <hiro@sanpo.t.u-tokyo.ac.jp>
8 * Alan Cox <gw4pts@gw4pts.ampr.org>
9 * Peter Bauer <100136.3530@compuserve.com>
10 * Niibe Yutaka <gniibe@mri.co.jp>
11 * Nimrod Zimerman <zimerman@mailandnews.com>
14 * Modularization and ifreq/ifmap support by Alan Cox.
15 * Rewritten by Niibe Yutaka.
16 * parport-sharing awareness code by Philip Blundell.
17 * SMP locking by Niibe Yutaka.
18 * Support for parallel ports with no IRQ (poll mode),
19 * Modifications to use the parallel port API
24 * - Module initialization.
26 * - Make sure other end is OK, before sending a packet.
27 * - Fix immediate timer problem.
30 * - Changed {enable,disable}_irq handling to make it work
31 * with new ("stack") semantics.
33 * This program is free software; you can redistribute it and/or
34 * modify it under the terms of the GNU General Public License
35 * as published by the Free Software Foundation; either version
36 * 2 of the License, or (at your option) any later version.
40 * Original version and the name 'PLIP' from Donald Becker <becker@super.org>
41 * inspired by Russ Nelson's parallel port packet driver.
44 * Tanabe Hiroyasu had changed the protocol, and it was in Linux v1.0.
45 * Because of the necessity to communicate to DOS machines with the
46 * Crynwr packet driver, Peter Bauer changed the protocol again
47 * back to original protocol.
49 * This version follows original PLIP protocol.
50 * So, this PLIP can't communicate the PLIP of Linux v1.0.
54 * To use with DOS box, please do (Turn on ARP switch):
55 * # ifconfig plip[0-2] arp
57 static const char *version
= "NET3 PLIP version 2.4-parport gniibe@mri.co.jp\n";
61 Ideas and protocols came from Russ Nelson's <nelson@crynwr.com>
62 "parallel.asm" parallel port packet driver.
64 The "Crynwr" parallel port standard specifies the following protocol:
65 Trigger by sending nibble '0x8' (this causes interrupt on other end)
70 Each octet is sent as <wait for rx. '0x1?'> <send 0x10+(octet&0x0F)>
71 <wait for rx. '0x0?'> <send 0x00+((octet>>4)&0x0F)>
73 The packet is encapsulated as if it were ethernet.
75 The cable used is a de facto standard parallel null cable -- sold as
76 a "LapLink" cable by various places. You'll need a 12-conductor cable to
77 make one yourself. The wiring is:
80 D0->ERROR 2 - 15 15 - 2
81 D1->SLCT 3 - 13 13 - 3
82 D2->PAPOUT 4 - 12 12 - 4
84 D4->BUSY 6 - 11 11 - 6
85 Do not connect the other pins. They are
87 STROBE is 1, FEED is 14, INIT is 16
88 extra grounds are 18,19,20,21,22,23,24
91 #include <linux/module.h>
92 #include <linux/kernel.h>
93 #include <linux/sched.h>
94 #include <linux/types.h>
95 #include <linux/fcntl.h>
96 #include <linux/interrupt.h>
97 #include <linux/string.h>
98 #include <linux/ptrace.h>
99 #include <linux/if_ether.h>
100 #include <asm/system.h>
101 #include <linux/in.h>
102 #include <linux/errno.h>
103 #include <linux/delay.h>
104 #include <linux/lp.h>
105 #include <linux/init.h>
107 #include <linux/netdevice.h>
108 #include <linux/etherdevice.h>
109 #include <linux/inetdevice.h>
110 #include <linux/skbuff.h>
111 #include <linux/if_plip.h>
112 #include <net/neighbour.h>
114 #include <linux/tqueue.h>
115 #include <linux/ioport.h>
116 #include <linux/spinlock.h>
117 #include <asm/bitops.h>
119 #include <asm/byteorder.h>
120 #include <asm/semaphore.h>
122 #include <linux/parport.h>
124 /* Maximum number of devices to support. */
127 /* Use 0 for production, 1 for verification, >2 for debug */
131 static unsigned int net_debug
= NET_DEBUG
;
133 #define ENABLE(irq) if (irq != -1) enable_irq(irq)
134 #define DISABLE(irq) if (irq != -1) disable_irq(irq)
136 /* In micro second */
137 #define PLIP_DELAY_UNIT 1
139 /* Connection time out = PLIP_TRIGGER_WAIT * PLIP_DELAY_UNIT usec */
140 #define PLIP_TRIGGER_WAIT 500
142 /* Nibble time out = PLIP_NIBBLE_WAIT * PLIP_DELAY_UNIT usec */
143 #define PLIP_NIBBLE_WAIT 3000
146 static void plip_kick_bh(struct net_device
*dev
);
147 static void plip_bh(struct net_device
*dev
);
148 static void plip_timer_bh(struct net_device
*dev
);
150 /* Interrupt handler */
151 static void plip_interrupt(int irq
, void *dev_id
, struct pt_regs
*regs
);
153 /* Functions for DEV methods */
154 static int plip_tx_packet(struct sk_buff
*skb
, struct net_device
*dev
);
155 static int plip_hard_header(struct sk_buff
*skb
, struct net_device
*dev
,
156 unsigned short type
, void *daddr
,
157 void *saddr
, unsigned len
);
158 static int plip_hard_header_cache(struct neighbour
*neigh
,
159 struct hh_cache
*hh
);
160 static int plip_open(struct net_device
*dev
);
161 static int plip_close(struct net_device
*dev
);
162 static struct net_device_stats
*plip_get_stats(struct net_device
*dev
);
163 static int plip_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
);
164 static int plip_preempt(void *handle
);
165 static void plip_wakeup(void *handle
);
167 enum plip_connection_state
{
175 enum plip_packet_state
{
184 enum plip_nibble_state
{
191 enum plip_packet_state state
;
192 enum plip_nibble_state nibble
;
195 #if defined(__LITTLE_ENDIAN)
198 #elif defined(__BIG_ENDIAN)
202 #error "Please fix the endianness defines in <asm/byteorder.h>"
208 unsigned char checksum
;
214 struct net_device_stats enet_stats
;
215 struct tq_struct immediate
;
216 struct tq_struct deferred
;
217 struct tq_struct timer
;
218 struct plip_local snd_data
;
219 struct plip_local rcv_data
;
220 struct pardevice
*pardev
;
221 unsigned long trigger
;
222 unsigned long nibble
;
223 enum plip_connection_state connection
;
224 unsigned short timeout_count
;
227 int should_relinquish
;
228 int (*orig_hard_header
)(struct sk_buff
*skb
, struct net_device
*dev
,
229 unsigned short type
, void *daddr
,
230 void *saddr
, unsigned len
);
231 int (*orig_hard_header_cache
)(struct neighbour
*neigh
,
232 struct hh_cache
*hh
);
235 struct semaphore killed_timer_sem
;
238 inline static void enable_parport_interrupts (struct net_device
*dev
)
242 struct parport
*port
=
243 ((struct net_local
*)dev
->priv
)->pardev
->port
;
244 port
->ops
->enable_irq (port
);
248 inline static void disable_parport_interrupts (struct net_device
*dev
)
252 struct parport
*port
=
253 ((struct net_local
*)dev
->priv
)->pardev
->port
;
254 port
->ops
->disable_irq (port
);
258 inline static void write_data (struct net_device
*dev
, unsigned char data
)
260 struct parport
*port
=
261 ((struct net_local
*)dev
->priv
)->pardev
->port
;
263 port
->ops
->write_data (port
, data
);
266 inline static unsigned char read_status (struct net_device
*dev
)
268 struct parport
*port
=
269 ((struct net_local
*)dev
->priv
)->pardev
->port
;
271 return port
->ops
->read_status (port
);
274 /* Entry point of PLIP driver.
275 Probe the hardware, and register/initialize the driver.
277 PLIP is rather weird, because of the way it interacts with the parport
278 system. It is _not_ initialised from Space.c. Instead, plip_init()
279 is called, and that function makes up a "struct net_device" for each port, and
284 plip_init_dev(struct net_device
*dev
, struct parport
*pb
)
286 struct net_local
*nl
;
287 struct pardevice
*pardev
;
289 SET_MODULE_OWNER(dev
);
291 dev
->base_addr
= pb
->base
;
294 printk(KERN_INFO
"plip: %s has no IRQ. Using IRQ-less mode,"
295 "which is fairly inefficient!\n", pb
->name
);
298 pardev
= parport_register_device(pb
, dev
->name
, plip_preempt
,
299 plip_wakeup
, plip_interrupt
,
305 printk(KERN_INFO
"%s", version
);
307 printk(KERN_INFO
"%s: Parallel port at %#3lx, using IRQ %d.\n",
308 dev
->name
, dev
->base_addr
, dev
->irq
);
310 printk(KERN_INFO
"%s: Parallel port at %#3lx, not using IRQ.\n",
311 dev
->name
, dev
->base_addr
);
313 /* Fill in the generic fields of the device structure. */
316 /* Then, override parts of it */
317 dev
->hard_start_xmit
= plip_tx_packet
;
318 dev
->open
= plip_open
;
319 dev
->stop
= plip_close
;
320 dev
->get_stats
= plip_get_stats
;
321 dev
->do_ioctl
= plip_ioctl
;
322 dev
->header_cache_update
= NULL
;
323 dev
->tx_queue_len
= 10;
324 dev
->flags
= IFF_POINTOPOINT
|IFF_NOARP
;
325 memset(dev
->dev_addr
, 0xfc, ETH_ALEN
);
327 /* Set the private structure */
328 dev
->priv
= kmalloc(sizeof (struct net_local
), GFP_KERNEL
);
329 if (dev
->priv
== NULL
) {
330 printk(KERN_ERR
"%s: out of memory\n", dev
->name
);
331 parport_unregister_device(pardev
);
334 memset(dev
->priv
, 0, sizeof(struct net_local
));
335 nl
= (struct net_local
*) dev
->priv
;
337 nl
->orig_hard_header
= dev
->hard_header
;
338 dev
->hard_header
= plip_hard_header
;
340 nl
->orig_hard_header_cache
= dev
->hard_header_cache
;
341 dev
->hard_header_cache
= plip_hard_header_cache
;
347 /* Initialize constants */
348 nl
->trigger
= PLIP_TRIGGER_WAIT
;
349 nl
->nibble
= PLIP_NIBBLE_WAIT
;
351 /* Initialize task queue structures */
352 INIT_LIST_HEAD(&nl
->immediate
.list
);
353 nl
->immediate
.sync
= 0;
354 nl
->immediate
.routine
= (void (*)(void *))plip_bh
;
355 nl
->immediate
.data
= dev
;
357 INIT_LIST_HEAD(&nl
->deferred
.list
);
358 nl
->deferred
.sync
= 0;
359 nl
->deferred
.routine
= (void (*)(void *))plip_kick_bh
;
360 nl
->deferred
.data
= dev
;
362 if (dev
->irq
== -1) {
363 INIT_LIST_HEAD(&nl
->timer
.list
);
365 nl
->timer
.routine
= (void (*)(void *))plip_timer_bh
;
366 nl
->timer
.data
= dev
;
369 spin_lock_init(&nl
->lock
);
374 /* Bottom half handler for the delayed request.
375 This routine is kicked by do_timer().
376 Request `plip_bh' to be invoked. */
378 plip_kick_bh(struct net_device
*dev
)
380 struct net_local
*nl
= (struct net_local
*)dev
->priv
;
382 if (nl
->is_deferred
) {
383 queue_task(&nl
->immediate
, &tq_immediate
);
384 mark_bh(IMMEDIATE_BH
);
388 /* Forward declarations of internal routines */
389 static int plip_none(struct net_device
*, struct net_local
*,
390 struct plip_local
*, struct plip_local
*);
391 static int plip_receive_packet(struct net_device
*, struct net_local
*,
392 struct plip_local
*, struct plip_local
*);
393 static int plip_send_packet(struct net_device
*, struct net_local
*,
394 struct plip_local
*, struct plip_local
*);
395 static int plip_connection_close(struct net_device
*, struct net_local
*,
396 struct plip_local
*, struct plip_local
*);
397 static int plip_error(struct net_device
*, struct net_local
*,
398 struct plip_local
*, struct plip_local
*);
399 static int plip_bh_timeout_error(struct net_device
*dev
, struct net_local
*nl
,
400 struct plip_local
*snd
,
401 struct plip_local
*rcv
,
409 typedef int (*plip_func
)(struct net_device
*dev
, struct net_local
*nl
,
410 struct plip_local
*snd
, struct plip_local
*rcv
);
412 static plip_func connection_state_table
[] =
417 plip_connection_close
,
421 /* Bottom half handler of PLIP. */
423 plip_bh(struct net_device
*dev
)
425 struct net_local
*nl
= (struct net_local
*)dev
->priv
;
426 struct plip_local
*snd
= &nl
->snd_data
;
427 struct plip_local
*rcv
= &nl
->rcv_data
;
432 f
= connection_state_table
[nl
->connection
];
433 if ((r
= (*f
)(dev
, nl
, snd
, rcv
)) != OK
434 && (r
= plip_bh_timeout_error(dev
, nl
, snd
, rcv
, r
)) != OK
) {
436 queue_task(&nl
->deferred
, &tq_timer
);
441 plip_timer_bh(struct net_device
*dev
)
443 struct net_local
*nl
= (struct net_local
*)dev
->priv
;
445 if (!(atomic_read (&nl
->kill_timer
))) {
446 plip_interrupt (-1, dev
, NULL
);
448 queue_task (&nl
->timer
, &tq_timer
);
451 up (&nl
->killed_timer_sem
);
456 plip_bh_timeout_error(struct net_device
*dev
, struct net_local
*nl
,
457 struct plip_local
*snd
, struct plip_local
*rcv
,
462 * This is tricky. If we got here from the beginning of send (either
463 * with ERROR or HS_TIMEOUT) we have IRQ enabled. Otherwise it's
464 * already disabled. With the old variant of {enable,disable}_irq()
465 * extra disable_irq() was a no-op. Now it became mortal - it's
466 * unbalanced and thus we'll never re-enable IRQ (until rmmod plip,
467 * that is). So we have to treat HS_TIMEOUT and ERROR from send
471 spin_lock_irq(&nl
->lock
);
472 if (nl
->connection
== PLIP_CN_SEND
) {
474 if (error
!= ERROR
) { /* Timeout */
476 if ((error
== HS_TIMEOUT
477 && nl
->timeout_count
<= 10)
478 || nl
->timeout_count
<= 3) {
479 spin_unlock_irq(&nl
->lock
);
480 /* Try again later */
483 c0
= read_status(dev
);
484 printk(KERN_WARNING
"%s: transmit timeout(%d,%02x)\n",
485 dev
->name
, snd
->state
, c0
);
488 nl
->enet_stats
.tx_errors
++;
489 nl
->enet_stats
.tx_aborted_errors
++;
490 } else if (nl
->connection
== PLIP_CN_RECEIVE
) {
491 if (rcv
->state
== PLIP_PK_TRIGGER
) {
492 /* Transmission was interrupted. */
493 spin_unlock_irq(&nl
->lock
);
496 if (error
!= ERROR
) { /* Timeout */
497 if (++nl
->timeout_count
<= 3) {
498 spin_unlock_irq(&nl
->lock
);
499 /* Try again later */
502 c0
= read_status(dev
);
503 printk(KERN_WARNING
"%s: receive timeout(%d,%02x)\n",
504 dev
->name
, rcv
->state
, c0
);
506 nl
->enet_stats
.rx_dropped
++;
508 rcv
->state
= PLIP_PK_DONE
;
513 snd
->state
= PLIP_PK_DONE
;
515 dev_kfree_skb(snd
->skb
);
518 spin_unlock_irq(&nl
->lock
);
519 if (error
== HS_TIMEOUT
) {
523 disable_parport_interrupts (dev
);
524 netif_stop_queue (dev
);
525 nl
->connection
= PLIP_CN_ERROR
;
526 write_data (dev
, 0x00);
532 plip_none(struct net_device
*dev
, struct net_local
*nl
,
533 struct plip_local
*snd
, struct plip_local
*rcv
)
538 /* PLIP_RECEIVE --- receive a byte(two nibbles)
539 Returns OK on success, TIMEOUT on timeout */
541 plip_receive(unsigned short nibble_timeout
, struct net_device
*dev
,
542 enum plip_nibble_state
*ns_p
, unsigned char *data_p
)
544 unsigned char c0
, c1
;
551 c0
= read_status(dev
);
552 udelay(PLIP_DELAY_UNIT
);
553 if ((c0
& 0x80) == 0) {
554 c1
= read_status(dev
);
561 *data_p
= (c0
>> 3) & 0x0f;
562 write_data (dev
, 0x10); /* send ACK */
568 c0
= read_status(dev
);
569 udelay(PLIP_DELAY_UNIT
);
571 c1
= read_status(dev
);
578 *data_p
|= (c0
<< 1) & 0xf0;
579 write_data (dev
, 0x00); /* send ACK */
580 *ns_p
= PLIP_NB_BEGIN
;
588 * Determine the packet's protocol ID. The rule here is that we
589 * assume 802.3 if the type field is short enough to be a length.
590 * This is normal practice and works for any 'now in use' protocol.
592 * PLIP is ethernet ish but the daddr might not be valid if unicast.
593 * PLIP fortunately has no bus architecture (its Point-to-point).
595 * We can't fix the daddr thing as that quirk (more bug) is embedded
596 * in far too many old systems not all even running Linux.
599 static unsigned short plip_type_trans(struct sk_buff
*skb
, struct net_device
*dev
)
604 skb
->mac
.raw
=skb
->data
;
605 skb_pull(skb
,dev
->hard_header_len
);
606 eth
= skb
->mac
.ethernet
;
610 if(memcmp(eth
->h_dest
,dev
->broadcast
, ETH_ALEN
)==0)
611 skb
->pkt_type
=PACKET_BROADCAST
;
613 skb
->pkt_type
=PACKET_MULTICAST
;
617 * This ALLMULTI check should be redundant by 1.4
618 * so don't forget to remove it.
621 if (ntohs(eth
->h_proto
) >= 1536)
627 * This is a magic hack to spot IPX packets. Older Novell breaks
628 * the protocol design and runs IPX over 802.3 without an 802.2 LLC
629 * layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This
630 * won't work for fault tolerant netware but does for the rest.
632 if (*(unsigned short *)rawp
== 0xFFFF)
633 return htons(ETH_P_802_3
);
638 return htons(ETH_P_802_2
);
642 /* PLIP_RECEIVE_PACKET --- receive a packet */
644 plip_receive_packet(struct net_device
*dev
, struct net_local
*nl
,
645 struct plip_local
*snd
, struct plip_local
*rcv
)
647 unsigned short nibble_timeout
= nl
->nibble
;
650 switch (rcv
->state
) {
651 case PLIP_PK_TRIGGER
:
653 /* Don't need to synchronize irq, as we can safely ignore it */
654 disable_parport_interrupts (dev
);
655 write_data (dev
, 0x01); /* send ACK */
657 printk(KERN_DEBUG
"%s: receive start\n", dev
->name
);
658 rcv
->state
= PLIP_PK_LENGTH_LSB
;
659 rcv
->nibble
= PLIP_NB_BEGIN
;
661 case PLIP_PK_LENGTH_LSB
:
662 if (snd
->state
!= PLIP_PK_DONE
) {
663 if (plip_receive(nl
->trigger
, dev
,
664 &rcv
->nibble
, &rcv
->length
.b
.lsb
)) {
665 /* collision, here dev->tbusy == 1 */
666 rcv
->state
= PLIP_PK_DONE
;
668 nl
->connection
= PLIP_CN_SEND
;
669 queue_task(&nl
->deferred
, &tq_timer
);
670 enable_parport_interrupts (dev
);
675 if (plip_receive(nibble_timeout
, dev
,
676 &rcv
->nibble
, &rcv
->length
.b
.lsb
))
679 rcv
->state
= PLIP_PK_LENGTH_MSB
;
681 case PLIP_PK_LENGTH_MSB
:
682 if (plip_receive(nibble_timeout
, dev
,
683 &rcv
->nibble
, &rcv
->length
.b
.msb
))
685 if (rcv
->length
.h
> dev
->mtu
+ dev
->hard_header_len
686 || rcv
->length
.h
< 8) {
687 printk(KERN_WARNING
"%s: bogus packet size %d.\n", dev
->name
, rcv
->length
.h
);
690 /* Malloc up new buffer. */
691 rcv
->skb
= dev_alloc_skb(rcv
->length
.h
+ 2);
692 if (rcv
->skb
== NULL
) {
693 printk(KERN_ERR
"%s: Memory squeeze.\n", dev
->name
);
696 skb_reserve(rcv
->skb
, 2); /* Align IP on 16 byte boundaries */
697 skb_put(rcv
->skb
,rcv
->length
.h
);
699 rcv
->state
= PLIP_PK_DATA
;
704 lbuf
= rcv
->skb
->data
;
706 if (plip_receive(nibble_timeout
, dev
,
707 &rcv
->nibble
, &lbuf
[rcv
->byte
]))
709 while (++rcv
->byte
< rcv
->length
.h
);
711 rcv
->checksum
+= lbuf
[--rcv
->byte
];
713 rcv
->state
= PLIP_PK_CHECKSUM
;
715 case PLIP_PK_CHECKSUM
:
716 if (plip_receive(nibble_timeout
, dev
,
717 &rcv
->nibble
, &rcv
->data
))
719 if (rcv
->data
!= rcv
->checksum
) {
720 nl
->enet_stats
.rx_crc_errors
++;
722 printk(KERN_DEBUG
"%s: checksum error\n", dev
->name
);
725 rcv
->state
= PLIP_PK_DONE
;
728 /* Inform the upper layer for the arrival of a packet. */
729 rcv
->skb
->protocol
=plip_type_trans(rcv
->skb
, dev
);
731 nl
->enet_stats
.rx_bytes
+= rcv
->length
.h
;
732 nl
->enet_stats
.rx_packets
++;
735 printk(KERN_DEBUG
"%s: receive end\n", dev
->name
);
737 /* Close the connection. */
738 write_data (dev
, 0x00);
739 spin_lock_irq(&nl
->lock
);
740 if (snd
->state
!= PLIP_PK_DONE
) {
741 nl
->connection
= PLIP_CN_SEND
;
742 spin_unlock_irq(&nl
->lock
);
743 queue_task(&nl
->immediate
, &tq_immediate
);
744 mark_bh(IMMEDIATE_BH
);
745 enable_parport_interrupts (dev
);
749 nl
->connection
= PLIP_CN_NONE
;
750 spin_unlock_irq(&nl
->lock
);
751 enable_parport_interrupts (dev
);
759 /* PLIP_SEND --- send a byte (two nibbles)
760 Returns OK on success, TIMEOUT when timeout */
762 plip_send(unsigned short nibble_timeout
, struct net_device
*dev
,
763 enum plip_nibble_state
*ns_p
, unsigned char data
)
770 write_data (dev
, data
& 0x0f);
774 write_data (dev
, 0x10 | (data
& 0x0f));
777 c0
= read_status(dev
);
778 if ((c0
& 0x80) == 0)
782 udelay(PLIP_DELAY_UNIT
);
784 write_data (dev
, 0x10 | (data
>> 4));
788 write_data (dev
, (data
>> 4));
791 c0
= read_status(dev
);
796 udelay(PLIP_DELAY_UNIT
);
798 *ns_p
= PLIP_NB_BEGIN
;
804 /* PLIP_SEND_PACKET --- send a packet */
806 plip_send_packet(struct net_device
*dev
, struct net_local
*nl
,
807 struct plip_local
*snd
, struct plip_local
*rcv
)
809 unsigned short nibble_timeout
= nl
->nibble
;
814 if (snd
->skb
== NULL
|| (lbuf
= snd
->skb
->data
) == NULL
) {
815 printk(KERN_DEBUG
"%s: send skb lost\n", dev
->name
);
816 snd
->state
= PLIP_PK_DONE
;
821 switch (snd
->state
) {
822 case PLIP_PK_TRIGGER
:
823 if ((read_status(dev
) & 0xf8) != 0x80)
826 /* Trigger remote rx interrupt. */
827 write_data (dev
, 0x08);
830 udelay(PLIP_DELAY_UNIT
);
831 spin_lock_irq(&nl
->lock
);
832 if (nl
->connection
== PLIP_CN_RECEIVE
) {
833 spin_unlock_irq(&nl
->lock
);
835 nl
->enet_stats
.collisions
++;
838 c0
= read_status(dev
);
840 spin_unlock_irq(&nl
->lock
);
843 if (nl
->connection
== PLIP_CN_RECEIVE
) {
845 We don't need to enable irq,
846 as it is soon disabled. */
847 /* Yes, we do. New variant of
848 {enable,disable}_irq *counts*
851 nl
->enet_stats
.collisions
++;
854 disable_parport_interrupts (dev
);
856 printk(KERN_DEBUG
"%s: send start\n", dev
->name
);
857 snd
->state
= PLIP_PK_LENGTH_LSB
;
858 snd
->nibble
= PLIP_NB_BEGIN
;
859 nl
->timeout_count
= 0;
862 spin_unlock_irq(&nl
->lock
);
864 write_data (dev
, 0x00);
869 case PLIP_PK_LENGTH_LSB
:
870 if (plip_send(nibble_timeout
, dev
,
871 &snd
->nibble
, snd
->length
.b
.lsb
))
873 snd
->state
= PLIP_PK_LENGTH_MSB
;
875 case PLIP_PK_LENGTH_MSB
:
876 if (plip_send(nibble_timeout
, dev
,
877 &snd
->nibble
, snd
->length
.b
.msb
))
879 snd
->state
= PLIP_PK_DATA
;
885 if (plip_send(nibble_timeout
, dev
,
886 &snd
->nibble
, lbuf
[snd
->byte
]))
888 while (++snd
->byte
< snd
->length
.h
);
890 snd
->checksum
+= lbuf
[--snd
->byte
];
892 snd
->state
= PLIP_PK_CHECKSUM
;
894 case PLIP_PK_CHECKSUM
:
895 if (plip_send(nibble_timeout
, dev
,
896 &snd
->nibble
, snd
->checksum
))
899 nl
->enet_stats
.tx_bytes
+= snd
->skb
->len
;
900 dev_kfree_skb(snd
->skb
);
901 nl
->enet_stats
.tx_packets
++;
902 snd
->state
= PLIP_PK_DONE
;
905 /* Close the connection */
906 write_data (dev
, 0x00);
909 printk(KERN_DEBUG
"%s: send end\n", dev
->name
);
910 nl
->connection
= PLIP_CN_CLOSING
;
912 queue_task(&nl
->deferred
, &tq_timer
);
913 enable_parport_interrupts (dev
);
921 plip_connection_close(struct net_device
*dev
, struct net_local
*nl
,
922 struct plip_local
*snd
, struct plip_local
*rcv
)
924 spin_lock_irq(&nl
->lock
);
925 if (nl
->connection
== PLIP_CN_CLOSING
) {
926 nl
->connection
= PLIP_CN_NONE
;
927 netif_wake_queue (dev
);
929 spin_unlock_irq(&nl
->lock
);
930 if (nl
->should_relinquish
) {
931 nl
->should_relinquish
= nl
->port_owner
= 0;
932 parport_release(nl
->pardev
);
937 /* PLIP_ERROR --- wait till other end settled */
939 plip_error(struct net_device
*dev
, struct net_local
*nl
,
940 struct plip_local
*snd
, struct plip_local
*rcv
)
942 unsigned char status
;
944 status
= read_status(dev
);
945 if ((status
& 0xf8) == 0x80) {
947 printk(KERN_DEBUG
"%s: reset interface.\n", dev
->name
);
948 nl
->connection
= PLIP_CN_NONE
;
949 nl
->should_relinquish
= 0;
950 netif_start_queue (dev
);
951 enable_parport_interrupts (dev
);
953 netif_wake_queue (dev
);
956 queue_task(&nl
->deferred
, &tq_timer
);
962 /* Handle the parallel port interrupts. */
964 plip_interrupt(int irq
, void *dev_id
, struct pt_regs
* regs
)
966 struct net_device
*dev
= dev_id
;
967 struct net_local
*nl
;
968 struct plip_local
*rcv
;
972 printk(KERN_DEBUG
"plip_interrupt: irq %d for unknown device.\n", irq
);
976 nl
= (struct net_local
*)dev
->priv
;
979 spin_lock_irq (&nl
->lock
);
981 c0
= read_status(dev
);
982 if ((c0
& 0xf8) != 0xc0) {
983 if ((dev
->irq
!= -1) && (net_debug
> 1))
984 printk(KERN_DEBUG
"%s: spurious interrupt\n", dev
->name
);
985 spin_unlock_irq (&nl
->lock
);
990 printk(KERN_DEBUG
"%s: interrupt.\n", dev
->name
);
992 switch (nl
->connection
) {
993 case PLIP_CN_CLOSING
:
994 netif_wake_queue (dev
);
997 dev
->last_rx
= jiffies
;
998 rcv
->state
= PLIP_PK_TRIGGER
;
999 nl
->connection
= PLIP_CN_RECEIVE
;
1000 nl
->timeout_count
= 0;
1001 queue_task(&nl
->immediate
, &tq_immediate
);
1002 mark_bh(IMMEDIATE_BH
);
1005 case PLIP_CN_RECEIVE
:
1006 /* May occur because there is race condition
1007 around test and set of dev->interrupt.
1008 Ignore this interrupt. */
1012 printk(KERN_ERR
"%s: receive interrupt in error state\n", dev
->name
);
1016 spin_unlock_irq(&nl
->lock
);
1020 plip_tx_packet(struct sk_buff
*skb
, struct net_device
*dev
)
1022 struct net_local
*nl
= (struct net_local
*)dev
->priv
;
1023 struct plip_local
*snd
= &nl
->snd_data
;
1025 if (netif_queue_stopped(dev
))
1028 /* We may need to grab the bus */
1029 if (!nl
->port_owner
) {
1030 if (parport_claim(nl
->pardev
))
1035 netif_stop_queue (dev
);
1037 if (skb
->len
> dev
->mtu
+ dev
->hard_header_len
) {
1038 printk(KERN_WARNING
"%s: packet too big, %d.\n", dev
->name
, (int)skb
->len
);
1039 netif_start_queue (dev
);
1044 printk(KERN_DEBUG
"%s: send request\n", dev
->name
);
1046 spin_lock_irq(&nl
->lock
);
1047 dev
->trans_start
= jiffies
;
1049 snd
->length
.h
= skb
->len
;
1050 snd
->state
= PLIP_PK_TRIGGER
;
1051 if (nl
->connection
== PLIP_CN_NONE
) {
1052 nl
->connection
= PLIP_CN_SEND
;
1053 nl
->timeout_count
= 0;
1055 queue_task(&nl
->immediate
, &tq_immediate
);
1056 mark_bh(IMMEDIATE_BH
);
1057 spin_unlock_irq(&nl
->lock
);
1063 plip_rewrite_address(struct net_device
*dev
, struct ethhdr
*eth
)
1065 struct in_device
*in_dev
;
1067 if ((in_dev
=dev
->ip_ptr
) != NULL
) {
1068 /* Any address will do - we take the first */
1069 struct in_ifaddr
*ifa
=in_dev
->ifa_list
;
1071 memcpy(eth
->h_source
, dev
->dev_addr
, 6);
1072 memset(eth
->h_dest
, 0xfc, 2);
1073 memcpy(eth
->h_dest
+2, &ifa
->ifa_address
, 4);
1079 plip_hard_header(struct sk_buff
*skb
, struct net_device
*dev
,
1080 unsigned short type
, void *daddr
,
1081 void *saddr
, unsigned len
)
1083 struct net_local
*nl
= (struct net_local
*)dev
->priv
;
1086 if ((ret
= nl
->orig_hard_header(skb
, dev
, type
, daddr
, saddr
, len
)) >= 0)
1087 plip_rewrite_address (dev
, (struct ethhdr
*)skb
->data
);
1092 int plip_hard_header_cache(struct neighbour
*neigh
,
1093 struct hh_cache
*hh
)
1095 struct net_local
*nl
= (struct net_local
*)neigh
->dev
->priv
;
1098 if ((ret
= nl
->orig_hard_header_cache(neigh
, hh
)) == 0)
1100 struct ethhdr
*eth
= (struct ethhdr
*)(((u8
*)hh
->hh_data
) + 2);
1101 plip_rewrite_address (neigh
->dev
, eth
);
1107 /* Open/initialize the board. This is called (in the current kernel)
1108 sometime after booting when the 'ifconfig' program is run.
1110 This routine gets exclusive access to the parallel port by allocating
1114 plip_open(struct net_device
*dev
)
1116 struct net_local
*nl
= (struct net_local
*)dev
->priv
;
1117 struct in_device
*in_dev
;
1120 if (!nl
->port_owner
) {
1121 if (parport_claim(nl
->pardev
)) return -EAGAIN
;
1125 nl
->should_relinquish
= 0;
1127 /* Clear the data port. */
1128 write_data (dev
, 0x00);
1130 /* Enable rx interrupt. */
1131 enable_parport_interrupts (dev
);
1134 atomic_set (&nl
->kill_timer
, 0);
1135 queue_task (&nl
->timer
, &tq_timer
);
1138 /* Initialize the state machine. */
1139 nl
->rcv_data
.state
= nl
->snd_data
.state
= PLIP_PK_DONE
;
1140 nl
->rcv_data
.skb
= nl
->snd_data
.skb
= NULL
;
1141 nl
->connection
= PLIP_CN_NONE
;
1142 nl
->is_deferred
= 0;
1144 /* Fill in the MAC-level header.
1145 We used to abuse dev->broadcast to store the point-to-point
1146 MAC address, but we no longer do it. Instead, we fetch the
1147 interface address whenever it is needed, which is cheap enough
1148 because we use the hh_cache. Actually, abusing dev->broadcast
1149 didn't work, because when using plip_open the point-to-point
1150 address isn't yet known.
1151 PLIP doesn't have a real MAC address, but we need it to be
1152 DOS compatible, and to properly support taps (otherwise,
1153 when the device address isn't identical to the address of a
1154 received frame, the kernel incorrectly drops it). */
1156 if ((in_dev
=dev
->ip_ptr
) != NULL
) {
1157 /* Any address will do - we take the first. We already
1158 have the first two bytes filled with 0xfc, from
1160 struct in_ifaddr
*ifa
=in_dev
->ifa_list
;
1162 memcpy(dev
->dev_addr
+2, &ifa
->ifa_local
, 4);
1166 netif_start_queue (dev
);
1171 /* The inverse routine to plip_open (). */
1173 plip_close(struct net_device
*dev
)
1175 struct net_local
*nl
= (struct net_local
*)dev
->priv
;
1176 struct plip_local
*snd
= &nl
->snd_data
;
1177 struct plip_local
*rcv
= &nl
->rcv_data
;
1179 netif_stop_queue (dev
);
1185 init_MUTEX_LOCKED (&nl
->killed_timer_sem
);
1186 atomic_set (&nl
->kill_timer
, 1);
1187 down (&nl
->killed_timer_sem
);
1191 outb(0x00, PAR_DATA(dev
));
1193 nl
->is_deferred
= 0;
1194 nl
->connection
= PLIP_CN_NONE
;
1195 if (nl
->port_owner
) {
1196 parport_release(nl
->pardev
);
1200 snd
->state
= PLIP_PK_DONE
;
1202 dev_kfree_skb(snd
->skb
);
1205 rcv
->state
= PLIP_PK_DONE
;
1207 kfree_skb(rcv
->skb
);
1213 outb(0x00, PAR_CONTROL(dev
));
1219 plip_preempt(void *handle
)
1221 struct net_device
*dev
= (struct net_device
*)handle
;
1222 struct net_local
*nl
= (struct net_local
*)dev
->priv
;
1224 /* Stand our ground if a datagram is on the wire */
1225 if (nl
->connection
!= PLIP_CN_NONE
) {
1226 nl
->should_relinquish
= 1;
1230 nl
->port_owner
= 0; /* Remember that we released the bus */
1235 plip_wakeup(void *handle
)
1237 struct net_device
*dev
= (struct net_device
*)handle
;
1238 struct net_local
*nl
= (struct net_local
*)dev
->priv
;
1240 if (nl
->port_owner
) {
1241 /* Why are we being woken up? */
1242 printk(KERN_DEBUG
"%s: why am I being woken up?\n", dev
->name
);
1243 if (!parport_claim(nl
->pardev
))
1244 /* bus_owner is already set (but why?) */
1245 printk(KERN_DEBUG
"%s: I'm broken.\n", dev
->name
);
1250 if (!(dev
->flags
& IFF_UP
))
1251 /* Don't need the port when the interface is down */
1254 if (!parport_claim(nl
->pardev
)) {
1256 /* Clear the data port. */
1257 write_data (dev
, 0x00);
1263 static struct net_device_stats
*
1264 plip_get_stats(struct net_device
*dev
)
1266 struct net_local
*nl
= (struct net_local
*)dev
->priv
;
1267 struct net_device_stats
*r
= &nl
->enet_stats
;
1273 plip_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
1275 struct net_local
*nl
= (struct net_local
*) dev
->priv
;
1276 struct plipconf
*pc
= (struct plipconf
*) &rq
->ifr_data
;
1279 case PLIP_GET_TIMEOUT
:
1280 pc
->trigger
= nl
->trigger
;
1281 pc
->nibble
= nl
->nibble
;
1283 case PLIP_SET_TIMEOUT
:
1284 if(!capable(CAP_NET_ADMIN
))
1286 nl
->trigger
= pc
->trigger
;
1287 nl
->nibble
= pc
->nibble
;
1295 static int parport
[PLIP_MAX
] = { [0 ... PLIP_MAX
-1] = -1 };
1296 static int timid
= 0;
1298 MODULE_PARM(parport
, "1-" __MODULE_STRING(PLIP_MAX
) "i");
1299 MODULE_PARM(timid
, "1i");
1301 static struct net_device
*dev_plip
[PLIP_MAX
] = { NULL
, };
1304 plip_searchfor(int list
[], int a
)
1307 for (i
= 0; i
< PLIP_MAX
&& list
[i
] != -1; i
++) {
1308 if (list
[i
] == a
) return 1;
1313 /* plip_attach() is called (by the parport code) when a port is
1314 * available to use. */
1315 static void plip_attach (struct parport
*port
)
1319 if ((parport
[0] == -1 && (!timid
|| !port
->devices
)) ||
1320 plip_searchfor(parport
, port
->number
)) {
1321 if (i
== PLIP_MAX
) {
1322 printk(KERN_ERR
"plip: too many devices\n");
1325 dev_plip
[i
] = kmalloc(sizeof(struct net_device
),
1328 printk(KERN_ERR
"plip: memory squeeze\n");
1331 memset(dev_plip
[i
], 0, sizeof(struct net_device
));
1332 sprintf(dev_plip
[i
]->name
, "plip%d", i
);
1333 dev_plip
[i
]->priv
= port
;
1334 if (plip_init_dev(dev_plip
[i
],port
) ||
1335 register_netdev(dev_plip
[i
])) {
1344 /* plip_detach() is called (by the parport code) when a port is
1345 * no longer available to use. */
1346 static void plip_detach (struct parport
*port
)
1351 static struct parport_driver plip_driver
= {
1353 attach
: plip_attach
,
1357 static void __exit
plip_cleanup_module (void)
1361 parport_unregister_driver (&plip_driver
);
1363 for (i
=0; i
< PLIP_MAX
; i
++) {
1365 struct net_local
*nl
=
1366 (struct net_local
*)dev_plip
[i
]->priv
;
1367 unregister_netdev(dev_plip
[i
]);
1369 parport_release(nl
->pardev
);
1370 parport_unregister_device(nl
->pardev
);
1371 kfree(dev_plip
[i
]->priv
);
1380 static int parport_ptr
= 0;
1382 static int __init
plip_setup(char *str
)
1386 str
= get_options(str
, ARRAY_SIZE(ints
), ints
);
1389 if (!strncmp(str
, "parport", 7)) {
1390 int n
= simple_strtoul(str
+7, NULL
, 10);
1391 if (parport_ptr
< PLIP_MAX
)
1392 parport
[parport_ptr
++] = n
;
1394 printk(KERN_INFO
"plip: too many ports, %s ignored.\n",
1396 } else if (!strcmp(str
, "timid")) {
1399 if (ints
[0] == 0 || ints
[1] == 0) {
1400 /* disable driver on "plip=" or "plip=0" */
1403 printk(KERN_WARNING
"warning: 'plip=0x%x' ignored\n",
1410 __setup("plip=", plip_setup
);
1412 #endif /* !MODULE */
1414 static int __init
plip_init (void)
1416 if (parport
[0] == -2)
1419 if (parport
[0] != -1 && timid
) {
1420 printk(KERN_WARNING
"plip: warning, ignoring `timid' since specific ports given.\n");
1424 if (parport_register_driver (&plip_driver
)) {
1425 printk (KERN_WARNING
"plip: couldn't register driver\n");
1432 module_init(plip_init
);
1433 module_exit(plip_cleanup_module
);
1437 * compile-command: "gcc -DMODULE -DMODVERSIONS -D__KERNEL__ -Wall -Wstrict-prototypes -O2 -g -fomit-frame-pointer -pipe -c plip.c"