1 /* $Id: plip.c,v 1.3.6.2 1997/04/16 15:07:56 phil Exp $ */
2 /* PLIP: A parallel port "network" driver for Linux. */
3 /* This driver is for parallel port with 5-bit cable (LapLink (R) cable). */
5 * Authors: Donald Becker <becker@super.org>
6 * Tommy Thorn <thorn@daimi.aau.dk>
7 * Tanabe Hiroyasu <hiro@sanpo.t.u-tokyo.ac.jp>
8 * Alan Cox <gw4pts@gw4pts.ampr.org>
9 * Peter Bauer <100136.3530@compuserve.com>
10 * Niibe Yutaka <gniibe@mri.co.jp>
11 * Nimrod Zimerman <zimerman@mailandnews.com>
14 * Modularization and ifreq/ifmap support by Alan Cox.
15 * Rewritten by Niibe Yutaka.
16 * parport-sharing awareness code by Philip Blundell.
17 * SMP locking by Niibe Yutaka.
18 * Support for parallel ports with no IRQ (poll mode),
19 * Modifications to use the parallel port API
24 * - Module initialization.
26 * - Make sure other end is OK, before sending a packet.
27 * - Fix immediate timer problem.
30 * - Changed {enable,disable}_irq handling to make it work
31 * with new ("stack") semantics.
33 * This program is free software; you can redistribute it and/or
34 * modify it under the terms of the GNU General Public License
35 * as published by the Free Software Foundation; either version
36 * 2 of the License, or (at your option) any later version.
40 * Original version and the name 'PLIP' from Donald Becker <becker@super.org>
41 * inspired by Russ Nelson's parallel port packet driver.
44 * Tanabe Hiroyasu had changed the protocol, and it was in Linux v1.0.
45 * Because of the necessity to communicate to DOS machines with the
46 * Crynwr packet driver, Peter Bauer changed the protocol again
47 * back to original protocol.
49 * This version follows original PLIP protocol.
50 * So, this PLIP can't communicate the PLIP of Linux v1.0.
54 * To use with DOS box, please do (Turn on ARP switch):
55 * # ifconfig plip[0-2] arp
57 static const char *version
= "NET3 PLIP version 2.4-parport gniibe@mri.co.jp\n";
61 Ideas and protocols came from Russ Nelson's <nelson@crynwr.com>
62 "parallel.asm" parallel port packet driver.
64 The "Crynwr" parallel port standard specifies the following protocol:
65 Trigger by sending nibble '0x8' (this causes interrupt on other end)
70 Each octet is sent as <wait for rx. '0x1?'> <send 0x10+(octet&0x0F)>
71 <wait for rx. '0x0?'> <send 0x00+((octet>>4)&0x0F)>
73 The packet is encapsulated as if it were ethernet.
75 The cable used is a de facto standard parallel null cable -- sold as
76 a "LapLink" cable by various places. You'll need a 12-conductor cable to
77 make one yourself. The wiring is:
80 D0->ERROR 2 - 15 15 - 2
81 D1->SLCT 3 - 13 13 - 3
82 D2->PAPOUT 4 - 12 12 - 4
84 D4->BUSY 6 - 11 11 - 6
85 Do not connect the other pins. They are
87 STROBE is 1, FEED is 14, INIT is 16
88 extra grounds are 18,19,20,21,22,23,24
91 #include <linux/module.h>
92 #include <linux/kernel.h>
93 #include <linux/sched.h>
94 #include <linux/types.h>
95 #include <linux/fcntl.h>
96 #include <linux/interrupt.h>
97 #include <linux/string.h>
98 #include <linux/ptrace.h>
99 #include <linux/if_ether.h>
100 #include <asm/system.h>
101 #include <linux/in.h>
102 #include <linux/errno.h>
103 #include <linux/delay.h>
104 #include <linux/lp.h>
105 #include <linux/init.h>
107 #include <linux/netdevice.h>
108 #include <linux/etherdevice.h>
109 #include <linux/inetdevice.h>
110 #include <linux/skbuff.h>
111 #include <linux/if_plip.h>
112 #include <net/neighbour.h>
114 #include <linux/tqueue.h>
115 #include <linux/ioport.h>
116 #include <linux/spinlock.h>
117 #include <asm/bitops.h>
119 #include <asm/byteorder.h>
120 #include <asm/semaphore.h>
122 #include <linux/parport.h>
124 /* Maximum number of devices to support. */
127 /* Use 0 for production, 1 for verification, >2 for debug */
131 static unsigned int net_debug
= NET_DEBUG
;
133 #define ENABLE(irq) if (irq != -1) enable_irq(irq)
134 #define DISABLE(irq) if (irq != -1) disable_irq(irq)
136 /* In micro second */
137 #define PLIP_DELAY_UNIT 1
139 /* Connection time out = PLIP_TRIGGER_WAIT * PLIP_DELAY_UNIT usec */
140 #define PLIP_TRIGGER_WAIT 500
142 /* Nibble time out = PLIP_NIBBLE_WAIT * PLIP_DELAY_UNIT usec */
143 #define PLIP_NIBBLE_WAIT 3000
146 static void plip_kick_bh(struct net_device
*dev
);
147 static void plip_bh(struct net_device
*dev
);
148 static void plip_timer_bh(struct net_device
*dev
);
150 /* Interrupt handler */
151 static void plip_interrupt(int irq
, void *dev_id
, struct pt_regs
*regs
);
153 /* Functions for DEV methods */
154 static int plip_tx_packet(struct sk_buff
*skb
, struct net_device
*dev
);
155 static int plip_hard_header(struct sk_buff
*skb
, struct net_device
*dev
,
156 unsigned short type
, void *daddr
,
157 void *saddr
, unsigned len
);
158 static int plip_hard_header_cache(struct neighbour
*neigh
,
159 struct hh_cache
*hh
);
160 static int plip_open(struct net_device
*dev
);
161 static int plip_close(struct net_device
*dev
);
162 static struct net_device_stats
*plip_get_stats(struct net_device
*dev
);
163 static int plip_config(struct net_device
*dev
, struct ifmap
*map
);
164 static int plip_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
);
165 static int plip_preempt(void *handle
);
166 static void plip_wakeup(void *handle
);
168 enum plip_connection_state
{
176 enum plip_packet_state
{
185 enum plip_nibble_state
{
192 enum plip_packet_state state
;
193 enum plip_nibble_state nibble
;
196 #if defined(__LITTLE_ENDIAN)
199 #elif defined(__BIG_ENDIAN)
203 #error "Please fix the endianness defines in <asm/byteorder.h>"
209 unsigned char checksum
;
215 struct net_device_stats enet_stats
;
216 struct tq_struct immediate
;
217 struct tq_struct deferred
;
218 struct tq_struct timer
;
219 struct plip_local snd_data
;
220 struct plip_local rcv_data
;
221 struct pardevice
*pardev
;
222 unsigned long trigger
;
223 unsigned long nibble
;
224 enum plip_connection_state connection
;
225 unsigned short timeout_count
;
228 int should_relinquish
;
229 int (*orig_hard_header
)(struct sk_buff
*skb
, struct net_device
*dev
,
230 unsigned short type
, void *daddr
,
231 void *saddr
, unsigned len
);
232 int (*orig_hard_header_cache
)(struct neighbour
*neigh
,
233 struct hh_cache
*hh
);
236 struct semaphore killed_timer_sem
;
239 inline static void enable_parport_interrupts (struct net_device
*dev
)
243 struct parport
*port
=
244 ((struct net_local
*)dev
->priv
)->pardev
->port
;
245 port
->ops
->enable_irq (port
);
249 inline static void disable_parport_interrupts (struct net_device
*dev
)
253 struct parport
*port
=
254 ((struct net_local
*)dev
->priv
)->pardev
->port
;
255 port
->ops
->disable_irq (port
);
259 inline static void write_data (struct net_device
*dev
, unsigned char data
)
261 struct parport
*port
=
262 ((struct net_local
*)dev
->priv
)->pardev
->port
;
264 port
->ops
->write_data (port
, data
);
267 inline static unsigned char read_status (struct net_device
*dev
)
269 struct parport
*port
=
270 ((struct net_local
*)dev
->priv
)->pardev
->port
;
272 return port
->ops
->read_status (port
);
275 /* Entry point of PLIP driver.
276 Probe the hardware, and register/initialize the driver.
278 PLIP is rather weird, because of the way it interacts with the parport
279 system. It is _not_ initialised from Space.c. Instead, plip_init()
280 is called, and that function makes up a "struct net_device" for each port, and
285 plip_init_dev(struct net_device
*dev
, struct parport
*pb
)
287 struct net_local
*nl
;
288 struct pardevice
*pardev
;
291 dev
->base_addr
= pb
->base
;
294 printk(KERN_INFO
"plip: %s has no IRQ. Using IRQ-less mode,"
295 "which is fairly inefficient!\n", pb
->name
);
298 pardev
= parport_register_device(pb
, dev
->name
, plip_preempt
,
299 plip_wakeup
, plip_interrupt
,
305 printk(KERN_INFO
"%s", version
);
307 printk(KERN_INFO
"%s: Parallel port at %#3lx, using IRQ %d.\n",
308 dev
->name
, dev
->base_addr
, dev
->irq
);
310 printk(KERN_INFO
"%s: Parallel port at %#3lx, not using IRQ.\n",
311 dev
->name
, dev
->base_addr
);
313 /* Fill in the generic fields of the device structure. */
316 /* Then, override parts of it */
317 dev
->hard_start_xmit
= plip_tx_packet
;
318 dev
->open
= plip_open
;
319 dev
->stop
= plip_close
;
320 dev
->get_stats
= plip_get_stats
;
321 dev
->do_ioctl
= plip_ioctl
;
322 dev
->header_cache_update
= NULL
;
323 dev
->tx_queue_len
= 10;
324 dev
->flags
= IFF_POINTOPOINT
|IFF_NOARP
;
325 memset(dev
->dev_addr
, 0xfc, ETH_ALEN
);
327 /* Set the private structure */
328 dev
->priv
= kmalloc(sizeof (struct net_local
), GFP_KERNEL
);
329 if (dev
->priv
== NULL
) {
330 printk(KERN_ERR
"%s: out of memory\n", dev
->name
);
331 parport_unregister_device(pardev
);
334 memset(dev
->priv
, 0, sizeof(struct net_local
));
335 nl
= (struct net_local
*) dev
->priv
;
337 nl
->orig_hard_header
= dev
->hard_header
;
338 dev
->hard_header
= plip_hard_header
;
340 nl
->orig_hard_header_cache
= dev
->hard_header_cache
;
341 dev
->hard_header_cache
= plip_hard_header_cache
;
347 /* Initialize constants */
348 nl
->trigger
= PLIP_TRIGGER_WAIT
;
349 nl
->nibble
= PLIP_NIBBLE_WAIT
;
351 /* Initialize task queue structures */
352 nl
->immediate
.next
= NULL
;
353 nl
->immediate
.sync
= 0;
354 nl
->immediate
.routine
= (void (*)(void *))plip_bh
;
355 nl
->immediate
.data
= dev
;
357 nl
->deferred
.next
= NULL
;
358 nl
->deferred
.sync
= 0;
359 nl
->deferred
.routine
= (void (*)(void *))plip_kick_bh
;
360 nl
->deferred
.data
= dev
;
362 if (dev
->irq
== -1) {
363 nl
->timer
.next
= NULL
;
365 nl
->timer
.routine
= (void (*)(void *))plip_timer_bh
;
366 nl
->timer
.data
= dev
;
369 spin_lock_init(&nl
->lock
);
374 /* Bottom half handler for the delayed request.
375 This routine is kicked by do_timer().
376 Request `plip_bh' to be invoked. */
378 plip_kick_bh(struct net_device
*dev
)
380 struct net_local
*nl
= (struct net_local
*)dev
->priv
;
382 if (nl
->is_deferred
) {
383 queue_task(&nl
->immediate
, &tq_immediate
);
384 mark_bh(IMMEDIATE_BH
);
388 /* Forward declarations of internal routines */
389 static int plip_none(struct net_device
*, struct net_local
*,
390 struct plip_local
*, struct plip_local
*);
391 static int plip_receive_packet(struct net_device
*, struct net_local
*,
392 struct plip_local
*, struct plip_local
*);
393 static int plip_send_packet(struct net_device
*, struct net_local
*,
394 struct plip_local
*, struct plip_local
*);
395 static int plip_connection_close(struct net_device
*, struct net_local
*,
396 struct plip_local
*, struct plip_local
*);
397 static int plip_error(struct net_device
*, struct net_local
*,
398 struct plip_local
*, struct plip_local
*);
399 static int plip_bh_timeout_error(struct net_device
*dev
, struct net_local
*nl
,
400 struct plip_local
*snd
,
401 struct plip_local
*rcv
,
409 typedef int (*plip_func
)(struct net_device
*dev
, struct net_local
*nl
,
410 struct plip_local
*snd
, struct plip_local
*rcv
);
412 static plip_func connection_state_table
[] =
417 plip_connection_close
,
421 /* Bottom half handler of PLIP. */
423 plip_bh(struct net_device
*dev
)
425 struct net_local
*nl
= (struct net_local
*)dev
->priv
;
426 struct plip_local
*snd
= &nl
->snd_data
;
427 struct plip_local
*rcv
= &nl
->rcv_data
;
432 f
= connection_state_table
[nl
->connection
];
433 if ((r
= (*f
)(dev
, nl
, snd
, rcv
)) != OK
434 && (r
= plip_bh_timeout_error(dev
, nl
, snd
, rcv
, r
)) != OK
) {
436 queue_task(&nl
->deferred
, &tq_timer
);
441 plip_timer_bh(struct net_device
*dev
)
443 struct net_local
*nl
= (struct net_local
*)dev
->priv
;
445 if (!(atomic_read (&nl
->kill_timer
))) {
447 plip_interrupt (-1, dev
, NULL
);
449 queue_task (&nl
->timer
, &tq_timer
);
452 up (&nl
->killed_timer_sem
);
457 plip_bh_timeout_error(struct net_device
*dev
, struct net_local
*nl
,
458 struct plip_local
*snd
, struct plip_local
*rcv
,
463 * This is tricky. If we got here from the beginning of send (either
464 * with ERROR or HS_TIMEOUT) we have IRQ enabled. Otherwise it's
465 * already disabled. With the old variant of {enable,disable}_irq()
466 * extra disable_irq() was a no-op. Now it became mortal - it's
467 * unbalanced and thus we'll never re-enable IRQ (until rmmod plip,
468 * that is). So we have to treat HS_TIMEOUT and ERROR from send
472 spin_lock_irq(&nl
->lock
);
473 if (nl
->connection
== PLIP_CN_SEND
) {
475 if (error
!= ERROR
) { /* Timeout */
477 if ((error
== HS_TIMEOUT
478 && nl
->timeout_count
<= 10)
479 || nl
->timeout_count
<= 3) {
480 spin_unlock_irq(&nl
->lock
);
481 /* Try again later */
484 c0
= read_status(dev
);
485 printk(KERN_WARNING
"%s: transmit timeout(%d,%02x)\n",
486 dev
->name
, snd
->state
, c0
);
489 nl
->enet_stats
.tx_errors
++;
490 nl
->enet_stats
.tx_aborted_errors
++;
491 } else if (nl
->connection
== PLIP_CN_RECEIVE
) {
492 if (rcv
->state
== PLIP_PK_TRIGGER
) {
493 /* Transmission was interrupted. */
494 spin_unlock_irq(&nl
->lock
);
497 if (error
!= ERROR
) { /* Timeout */
498 if (++nl
->timeout_count
<= 3) {
499 spin_unlock_irq(&nl
->lock
);
500 /* Try again later */
503 c0
= read_status(dev
);
504 printk(KERN_WARNING
"%s: receive timeout(%d,%02x)\n",
505 dev
->name
, rcv
->state
, c0
);
507 nl
->enet_stats
.rx_dropped
++;
509 rcv
->state
= PLIP_PK_DONE
;
514 snd
->state
= PLIP_PK_DONE
;
516 dev_kfree_skb(snd
->skb
);
519 spin_unlock_irq(&nl
->lock
);
520 if (error
== HS_TIMEOUT
) {
524 disable_parport_interrupts (dev
);
526 nl
->connection
= PLIP_CN_ERROR
;
527 write_data (dev
, 0x00);
533 plip_none(struct net_device
*dev
, struct net_local
*nl
,
534 struct plip_local
*snd
, struct plip_local
*rcv
)
539 /* PLIP_RECEIVE --- receive a byte(two nibbles)
540 Returns OK on success, TIMEOUT on timeout */
542 plip_receive(unsigned short nibble_timeout
, struct net_device
*dev
,
543 enum plip_nibble_state
*ns_p
, unsigned char *data_p
)
545 unsigned char c0
, c1
;
552 c0
= read_status(dev
);
553 udelay(PLIP_DELAY_UNIT
);
554 if ((c0
& 0x80) == 0) {
555 c1
= read_status(dev
);
562 *data_p
= (c0
>> 3) & 0x0f;
563 write_data (dev
, 0x10); /* send ACK */
569 c0
= read_status(dev
);
570 udelay(PLIP_DELAY_UNIT
);
572 c1
= read_status(dev
);
579 *data_p
|= (c0
<< 1) & 0xf0;
580 write_data (dev
, 0x00); /* send ACK */
581 *ns_p
= PLIP_NB_BEGIN
;
588 /* PLIP_RECEIVE_PACKET --- receive a packet */
590 plip_receive_packet(struct net_device
*dev
, struct net_local
*nl
,
591 struct plip_local
*snd
, struct plip_local
*rcv
)
593 unsigned short nibble_timeout
= nl
->nibble
;
596 switch (rcv
->state
) {
597 case PLIP_PK_TRIGGER
:
599 /* Don't need to synchronize irq, as we can safely ignore it */
600 disable_parport_interrupts (dev
);
602 write_data (dev
, 0x01); /* send ACK */
604 printk(KERN_DEBUG
"%s: receive start\n", dev
->name
);
605 rcv
->state
= PLIP_PK_LENGTH_LSB
;
606 rcv
->nibble
= PLIP_NB_BEGIN
;
608 case PLIP_PK_LENGTH_LSB
:
609 if (snd
->state
!= PLIP_PK_DONE
) {
610 if (plip_receive(nl
->trigger
, dev
,
611 &rcv
->nibble
, &rcv
->length
.b
.lsb
)) {
612 /* collision, here dev->tbusy == 1 */
613 rcv
->state
= PLIP_PK_DONE
;
615 nl
->connection
= PLIP_CN_SEND
;
616 queue_task(&nl
->deferred
, &tq_timer
);
617 enable_parport_interrupts (dev
);
622 if (plip_receive(nibble_timeout
, dev
,
623 &rcv
->nibble
, &rcv
->length
.b
.lsb
))
626 rcv
->state
= PLIP_PK_LENGTH_MSB
;
628 case PLIP_PK_LENGTH_MSB
:
629 if (plip_receive(nibble_timeout
, dev
,
630 &rcv
->nibble
, &rcv
->length
.b
.msb
))
632 if (rcv
->length
.h
> dev
->mtu
+ dev
->hard_header_len
633 || rcv
->length
.h
< 8) {
634 printk(KERN_WARNING
"%s: bogus packet size %d.\n", dev
->name
, rcv
->length
.h
);
637 /* Malloc up new buffer. */
638 rcv
->skb
= dev_alloc_skb(rcv
->length
.h
);
639 if (rcv
->skb
== NULL
) {
640 printk(KERN_ERR
"%s: Memory squeeze.\n", dev
->name
);
643 skb_put(rcv
->skb
,rcv
->length
.h
);
645 rcv
->state
= PLIP_PK_DATA
;
650 lbuf
= rcv
->skb
->data
;
652 if (plip_receive(nibble_timeout
, dev
,
653 &rcv
->nibble
, &lbuf
[rcv
->byte
]))
655 while (++rcv
->byte
< rcv
->length
.h
);
657 rcv
->checksum
+= lbuf
[--rcv
->byte
];
659 rcv
->state
= PLIP_PK_CHECKSUM
;
661 case PLIP_PK_CHECKSUM
:
662 if (plip_receive(nibble_timeout
, dev
,
663 &rcv
->nibble
, &rcv
->data
))
665 if (rcv
->data
!= rcv
->checksum
) {
666 nl
->enet_stats
.rx_crc_errors
++;
668 printk(KERN_DEBUG
"%s: checksum error\n", dev
->name
);
671 rcv
->state
= PLIP_PK_DONE
;
674 /* Inform the upper layer for the arrival of a packet. */
675 rcv
->skb
->protocol
=eth_type_trans(rcv
->skb
, dev
);
677 nl
->enet_stats
.rx_bytes
+= rcv
->length
.h
;
678 nl
->enet_stats
.rx_packets
++;
681 printk(KERN_DEBUG
"%s: receive end\n", dev
->name
);
683 /* Close the connection. */
684 write_data (dev
, 0x00);
685 spin_lock_irq(&nl
->lock
);
686 if (snd
->state
!= PLIP_PK_DONE
) {
687 nl
->connection
= PLIP_CN_SEND
;
688 spin_unlock_irq(&nl
->lock
);
689 queue_task(&nl
->immediate
, &tq_immediate
);
690 mark_bh(IMMEDIATE_BH
);
691 enable_parport_interrupts (dev
);
695 nl
->connection
= PLIP_CN_NONE
;
696 spin_unlock_irq(&nl
->lock
);
697 enable_parport_interrupts (dev
);
705 /* PLIP_SEND --- send a byte (two nibbles)
706 Returns OK on success, TIMEOUT when timeout */
708 plip_send(unsigned short nibble_timeout
, struct net_device
*dev
,
709 enum plip_nibble_state
*ns_p
, unsigned char data
)
716 write_data (dev
, data
& 0x0f);
720 write_data (dev
, 0x10 | (data
& 0x0f));
723 c0
= read_status(dev
);
724 if ((c0
& 0x80) == 0)
728 udelay(PLIP_DELAY_UNIT
);
730 write_data (dev
, 0x10 | (data
>> 4));
734 write_data (dev
, (data
>> 4));
737 c0
= read_status(dev
);
742 udelay(PLIP_DELAY_UNIT
);
744 *ns_p
= PLIP_NB_BEGIN
;
750 /* PLIP_SEND_PACKET --- send a packet */
752 plip_send_packet(struct net_device
*dev
, struct net_local
*nl
,
753 struct plip_local
*snd
, struct plip_local
*rcv
)
755 unsigned short nibble_timeout
= nl
->nibble
;
760 if (snd
->skb
== NULL
|| (lbuf
= snd
->skb
->data
) == NULL
) {
761 printk(KERN_DEBUG
"%s: send skb lost\n", dev
->name
);
762 snd
->state
= PLIP_PK_DONE
;
767 switch (snd
->state
) {
768 case PLIP_PK_TRIGGER
:
769 if ((read_status(dev
) & 0xf8) != 0x80)
772 /* Trigger remote rx interrupt. */
773 write_data (dev
, 0x08);
776 udelay(PLIP_DELAY_UNIT
);
777 spin_lock_irq(&nl
->lock
);
778 if (nl
->connection
== PLIP_CN_RECEIVE
) {
779 spin_unlock_irq(&nl
->lock
);
781 nl
->enet_stats
.collisions
++;
784 c0
= read_status(dev
);
786 spin_unlock_irq(&nl
->lock
);
789 if (nl
->connection
== PLIP_CN_RECEIVE
) {
791 We don't need to enable irq,
792 as it is soon disabled. */
793 /* Yes, we do. New variant of
794 {enable,disable}_irq *counts*
797 nl
->enet_stats
.collisions
++;
800 disable_parport_interrupts (dev
);
802 printk(KERN_DEBUG
"%s: send start\n", dev
->name
);
803 snd
->state
= PLIP_PK_LENGTH_LSB
;
804 snd
->nibble
= PLIP_NB_BEGIN
;
805 nl
->timeout_count
= 0;
808 spin_unlock_irq(&nl
->lock
);
810 write_data (dev
, 0x00);
815 case PLIP_PK_LENGTH_LSB
:
816 if (plip_send(nibble_timeout
, dev
,
817 &snd
->nibble
, snd
->length
.b
.lsb
))
819 snd
->state
= PLIP_PK_LENGTH_MSB
;
821 case PLIP_PK_LENGTH_MSB
:
822 if (plip_send(nibble_timeout
, dev
,
823 &snd
->nibble
, snd
->length
.b
.msb
))
825 snd
->state
= PLIP_PK_DATA
;
831 if (plip_send(nibble_timeout
, dev
,
832 &snd
->nibble
, lbuf
[snd
->byte
]))
834 while (++snd
->byte
< snd
->length
.h
);
836 snd
->checksum
+= lbuf
[--snd
->byte
];
838 snd
->state
= PLIP_PK_CHECKSUM
;
840 case PLIP_PK_CHECKSUM
:
841 if (plip_send(nibble_timeout
, dev
,
842 &snd
->nibble
, snd
->checksum
))
845 nl
->enet_stats
.tx_bytes
+= snd
->skb
->len
;
846 dev_kfree_skb(snd
->skb
);
847 nl
->enet_stats
.tx_packets
++;
848 snd
->state
= PLIP_PK_DONE
;
851 /* Close the connection */
852 write_data (dev
, 0x00);
855 printk(KERN_DEBUG
"%s: send end\n", dev
->name
);
856 nl
->connection
= PLIP_CN_CLOSING
;
858 queue_task(&nl
->deferred
, &tq_timer
);
859 enable_parport_interrupts (dev
);
867 plip_connection_close(struct net_device
*dev
, struct net_local
*nl
,
868 struct plip_local
*snd
, struct plip_local
*rcv
)
870 spin_lock_irq(&nl
->lock
);
871 if (nl
->connection
== PLIP_CN_CLOSING
) {
872 nl
->connection
= PLIP_CN_NONE
;
876 spin_unlock_irq(&nl
->lock
);
877 if (nl
->should_relinquish
) {
878 nl
->should_relinquish
= nl
->port_owner
= 0;
879 parport_release(nl
->pardev
);
884 /* PLIP_ERROR --- wait till other end settled */
886 plip_error(struct net_device
*dev
, struct net_local
*nl
,
887 struct plip_local
*snd
, struct plip_local
*rcv
)
889 unsigned char status
;
891 status
= read_status(dev
);
892 if ((status
& 0xf8) == 0x80) {
894 printk(KERN_DEBUG
"%s: reset interface.\n", dev
->name
);
895 nl
->connection
= PLIP_CN_NONE
;
896 nl
->should_relinquish
= 0;
899 enable_parport_interrupts (dev
);
904 queue_task(&nl
->deferred
, &tq_timer
);
910 /* Handle the parallel port interrupts. */
912 plip_interrupt(int irq
, void *dev_id
, struct pt_regs
* regs
)
914 struct net_device
*dev
= dev_id
;
915 struct net_local
*nl
;
916 struct plip_local
*rcv
;
920 printk(KERN_DEBUG
"plip_interrupt: irq %d for unknown device.\n", irq
);
924 nl
= (struct net_local
*)dev
->priv
;
930 c0
= read_status(dev
);
931 if ((c0
& 0xf8) != 0xc0) {
932 if ((dev
->irq
!= -1) && (net_debug
> 1))
933 printk(KERN_DEBUG
"%s: spurious interrupt\n", dev
->name
);
938 printk(KERN_DEBUG
"%s: interrupt.\n", dev
->name
);
940 spin_lock_irq(&nl
->lock
);
941 switch (nl
->connection
) {
942 case PLIP_CN_CLOSING
:
946 dev
->last_rx
= jiffies
;
947 rcv
->state
= PLIP_PK_TRIGGER
;
948 nl
->connection
= PLIP_CN_RECEIVE
;
949 nl
->timeout_count
= 0;
950 queue_task(&nl
->immediate
, &tq_immediate
);
951 mark_bh(IMMEDIATE_BH
);
952 spin_unlock_irq(&nl
->lock
);
955 case PLIP_CN_RECEIVE
:
956 /* May occur because there is race condition
957 around test and set of dev->interrupt.
958 Ignore this interrupt. */
959 spin_unlock_irq(&nl
->lock
);
963 spin_unlock_irq(&nl
->lock
);
964 printk(KERN_ERR
"%s: receive interrupt in error state\n", dev
->name
);
970 plip_tx_packet(struct sk_buff
*skb
, struct net_device
*dev
)
972 struct net_local
*nl
= (struct net_local
*)dev
->priv
;
973 struct plip_local
*snd
= &nl
->snd_data
;
978 /* We may need to grab the bus */
979 if (!nl
->port_owner
) {
980 if (parport_claim(nl
->pardev
))
985 if (test_and_set_bit(0, (void*)&dev
->tbusy
) != 0) {
986 printk(KERN_WARNING
"%s: Transmitter access conflict.\n", dev
->name
);
990 if (skb
->len
> dev
->mtu
+ dev
->hard_header_len
) {
991 printk(KERN_WARNING
"%s: packet too big, %d.\n", dev
->name
, (int)skb
->len
);
997 printk(KERN_DEBUG
"%s: send request\n", dev
->name
);
999 spin_lock_irq(&nl
->lock
);
1000 dev
->trans_start
= jiffies
;
1002 snd
->length
.h
= skb
->len
;
1003 snd
->state
= PLIP_PK_TRIGGER
;
1004 if (nl
->connection
== PLIP_CN_NONE
) {
1005 nl
->connection
= PLIP_CN_SEND
;
1006 nl
->timeout_count
= 0;
1008 queue_task(&nl
->immediate
, &tq_immediate
);
1009 mark_bh(IMMEDIATE_BH
);
1010 spin_unlock_irq(&nl
->lock
);
1016 plip_rewrite_address(struct net_device
*dev
, struct ethhdr
*eth
)
1018 struct in_device
*in_dev
;
1020 if ((in_dev
=dev
->ip_ptr
) != NULL
) {
1021 /* Any address will do - we take the first */
1022 struct in_ifaddr
*ifa
=in_dev
->ifa_list
;
1024 memcpy(eth
->h_source
, dev
->dev_addr
, 6);
1025 memset(eth
->h_dest
, 0xfc, 2);
1026 memcpy(eth
->h_dest
+2, &ifa
->ifa_address
, 4);
1032 plip_hard_header(struct sk_buff
*skb
, struct net_device
*dev
,
1033 unsigned short type
, void *daddr
,
1034 void *saddr
, unsigned len
)
1036 struct net_local
*nl
= (struct net_local
*)dev
->priv
;
1039 if ((ret
= nl
->orig_hard_header(skb
, dev
, type
, daddr
, saddr
, len
)) >= 0)
1040 plip_rewrite_address (dev
, (struct ethhdr
*)skb
->data
);
1045 int plip_hard_header_cache(struct neighbour
*neigh
,
1046 struct hh_cache
*hh
)
1048 struct net_local
*nl
= (struct net_local
*)neigh
->dev
->priv
;
1051 if ((ret
= nl
->orig_hard_header_cache(neigh
, hh
)) == 0)
1053 struct ethhdr
*eth
= (struct ethhdr
*)(((u8
*)hh
->hh_data
) + 2);
1054 plip_rewrite_address (neigh
->dev
, eth
);
1060 /* Open/initialize the board. This is called (in the current kernel)
1061 sometime after booting when the 'ifconfig' program is run.
1063 This routine gets exclusive access to the parallel port by allocating
1067 plip_open(struct net_device
*dev
)
1069 struct net_local
*nl
= (struct net_local
*)dev
->priv
;
1070 struct in_device
*in_dev
;
1073 if (!nl
->port_owner
) {
1074 if (parport_claim(nl
->pardev
)) return -EAGAIN
;
1078 nl
->should_relinquish
= 0;
1080 /* Clear the data port. */
1081 write_data (dev
, 0x00);
1083 /* Enable rx interrupt. */
1084 enable_parport_interrupts (dev
);
1087 atomic_set (&nl
->kill_timer
, 0);
1088 queue_task (&nl
->timer
, &tq_timer
);
1091 /* Initialize the state machine. */
1092 nl
->rcv_data
.state
= nl
->snd_data
.state
= PLIP_PK_DONE
;
1093 nl
->rcv_data
.skb
= nl
->snd_data
.skb
= NULL
;
1094 nl
->connection
= PLIP_CN_NONE
;
1095 nl
->is_deferred
= 0;
1097 /* Fill in the MAC-level header.
1098 We used to abuse dev->broadcast to store the point-to-point
1099 MAC address, but we no longer do it. Instead, we fetch the
1100 interface address whenever it is needed, which is cheap enough
1101 because we use the hh_cache. Actually, abusing dev->broadcast
1102 didn't work, because when using plip_open the point-to-point
1103 address isn't yet known.
1104 PLIP doesn't have a real MAC address, but we need it to be
1105 DOS compatible, and to properly support taps (otherwise,
1106 when the device address isn't identical to the address of a
1107 received frame, the kernel incorrectly drops it). */
1109 if ((in_dev
=dev
->ip_ptr
) != NULL
) {
1110 /* Any address will do - we take the first. We already
1111 have the first two bytes filled with 0xfc, from
1113 struct in_ifaddr
*ifa
=in_dev
->ifa_list
;
1115 memcpy(dev
->dev_addr
+2, &ifa
->ifa_local
, 4);
1127 /* The inverse routine to plip_open (). */
1129 plip_close(struct net_device
*dev
)
1131 struct net_local
*nl
= (struct net_local
*)dev
->priv
;
1132 struct plip_local
*snd
= &nl
->snd_data
;
1133 struct plip_local
*rcv
= &nl
->rcv_data
;
1142 init_MUTEX_LOCKED (&nl
->killed_timer_sem
);
1143 atomic_set (&nl
->kill_timer
, 1);
1144 down (&nl
->killed_timer_sem
);
1148 outb(0x00, PAR_DATA(dev
));
1150 nl
->is_deferred
= 0;
1151 nl
->connection
= PLIP_CN_NONE
;
1152 if (nl
->port_owner
) {
1153 parport_release(nl
->pardev
);
1157 snd
->state
= PLIP_PK_DONE
;
1159 dev_kfree_skb(snd
->skb
);
1162 rcv
->state
= PLIP_PK_DONE
;
1164 kfree_skb(rcv
->skb
);
1170 outb(0x00, PAR_CONTROL(dev
));
1177 plip_preempt(void *handle
)
1179 struct net_device
*dev
= (struct net_device
*)handle
;
1180 struct net_local
*nl
= (struct net_local
*)dev
->priv
;
1182 /* Stand our ground if a datagram is on the wire */
1183 if (nl
->connection
!= PLIP_CN_NONE
) {
1184 nl
->should_relinquish
= 1;
1188 nl
->port_owner
= 0; /* Remember that we released the bus */
1193 plip_wakeup(void *handle
)
1195 struct net_device
*dev
= (struct net_device
*)handle
;
1196 struct net_local
*nl
= (struct net_local
*)dev
->priv
;
1198 if (nl
->port_owner
) {
1199 /* Why are we being woken up? */
1200 printk(KERN_DEBUG
"%s: why am I being woken up?\n", dev
->name
);
1201 if (!parport_claim(nl
->pardev
))
1202 /* bus_owner is already set (but why?) */
1203 printk(KERN_DEBUG
"%s: I'm broken.\n", dev
->name
);
1208 if (!(dev
->flags
& IFF_UP
))
1209 /* Don't need the port when the interface is down */
1212 if (!parport_claim(nl
->pardev
)) {
1214 /* Clear the data port. */
1215 write_data (dev
, 0x00);
1221 static struct net_device_stats
*
1222 plip_get_stats(struct net_device
*dev
)
1224 struct net_local
*nl
= (struct net_local
*)dev
->priv
;
1225 struct net_device_stats
*r
= &nl
->enet_stats
;
1231 plip_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
1233 struct net_local
*nl
= (struct net_local
*) dev
->priv
;
1234 struct plipconf
*pc
= (struct plipconf
*) &rq
->ifr_data
;
1237 case PLIP_GET_TIMEOUT
:
1238 pc
->trigger
= nl
->trigger
;
1239 pc
->nibble
= nl
->nibble
;
1241 case PLIP_SET_TIMEOUT
:
1242 nl
->trigger
= pc
->trigger
;
1243 nl
->nibble
= pc
->nibble
;
1251 static int parport
[PLIP_MAX
] = { [0 ... PLIP_MAX
-1] = -1 };
1252 static int timid
= 0;
1254 MODULE_PARM(parport
, "1-" __MODULE_STRING(PLIP_MAX
) "i");
1255 MODULE_PARM(timid
, "1i");
1257 static struct net_device
*dev_plip
[PLIP_MAX
] = { NULL
, };
1261 cleanup_module(void)
1265 for (i
=0; i
< PLIP_MAX
; i
++) {
1267 struct net_local
*nl
=
1268 (struct net_local
*)dev_plip
[i
]->priv
;
1269 unregister_netdev(dev_plip
[i
]);
1271 parport_release(nl
->pardev
);
1272 parport_unregister_device(nl
->pardev
);
1273 kfree(dev_plip
[i
]->priv
);
1274 kfree(dev_plip
[i
]->name
);
1281 #define plip_init init_module
1285 static int parport_ptr
= 0;
1287 void plip_setup(char *str
, int *ints
)
1290 if (!strncmp(str
, "parport", 7)) {
1291 int n
= simple_strtoul(str
+7, NULL
, 10);
1292 if (parport_ptr
< PLIP_MAX
)
1293 parport
[parport_ptr
++] = n
;
1295 printk(KERN_INFO
"plip: too many ports, %s ignored.\n",
1297 } else if (!strcmp(str
, "timid")) {
1300 if (ints
[0] == 0 || ints
[1] == 0) {
1301 /* disable driver on "plip=" or "plip=0" */
1304 printk(KERN_WARNING
"warning: 'plip=0x%x' ignored\n",
1313 plip_searchfor(int list
[], int a
)
1316 for (i
= 0; i
< PLIP_MAX
&& list
[i
] != -1; i
++) {
1317 if (list
[i
] == a
) return 1;
1325 struct parport
*pb
= parport_enumerate();
1328 if (parport
[0] == -2)
1331 if (parport
[0] != -1 && timid
) {
1332 printk(KERN_WARNING
"plip: warning, ignoring `timid' since specific ports given.\n");
1336 /* If the user feeds parameters, use them */
1338 if ((parport
[0] == -1 && (!timid
|| !pb
->devices
)) ||
1339 plip_searchfor(parport
, pb
->number
)) {
1340 if (i
== PLIP_MAX
) {
1341 printk(KERN_ERR
"plip: too many devices\n");
1344 dev_plip
[i
] = kmalloc(sizeof(struct net_device
),
1347 printk(KERN_ERR
"plip: memory squeeze\n");
1350 memset(dev_plip
[i
], 0, sizeof(struct net_device
));
1352 kmalloc(strlen("plipXXX"), GFP_KERNEL
);
1353 if (!dev_plip
[i
]->name
) {
1354 printk(KERN_ERR
"plip: memory squeeze.\n");
1359 sprintf(dev_plip
[i
]->name
, "plip%d", i
);
1360 dev_plip
[i
]->priv
= pb
;
1361 if (plip_init_dev(dev_plip
[i
],pb
) || register_netdev(dev_plip
[i
])) {
1362 kfree(dev_plip
[i
]->name
);
1373 printk(KERN_INFO
"plip: no devices registered\n");
1381 * compile-command: "gcc -DMODULE -DMODVERSIONS -D__KERNEL__ -Wall -Wstrict-prototypes -O2 -g -fomit-frame-pointer -pipe -c plip.c"