2 * PPP async serial channel driver for Linux.
4 * Copyright 1999 Paul Mackerras.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 * This driver provides the encapsulation and framing for sending
12 * and receiving PPP frames over async serial lines. It relies on
13 * the generic PPP layer to give it frames to send and to process
14 * received frames. It implements the PPP line discipline.
16 * Part of the code in this driver was inspired by the old async-only
17 * PPP driver, written by Michael Callahan and Al Longyear, and
18 * subsequently hacked by Paul Mackerras.
20 * ==FILEVERSION 20020125==
23 #include <linux/module.h>
24 #include <linux/kernel.h>
25 #include <linux/skbuff.h>
26 #include <linux/tty.h>
27 #include <linux/netdevice.h>
28 #include <linux/poll.h>
29 #include <linux/ppp_defs.h>
30 #include <linux/if_ppp.h>
31 #include <linux/ppp_channel.h>
32 #include <linux/spinlock.h>
33 #include <linux/init.h>
34 #include <asm/uaccess.h>
36 #define PPP_VERSION "2.4.2"
40 /* Structure for storing local state. */
42 struct tty_struct
*tty
;
49 unsigned long xmit_flags
;
52 unsigned int bytes_sent
;
53 unsigned int bytes_rcvd
;
60 unsigned long last_xmit
;
66 struct semaphore dead_sem
;
67 struct ppp_channel chan
; /* interface to generic ppp layer */
68 unsigned char obuf
[OBUFSIZE
];
71 /* Bit numbers in xmit_flags */
77 #define SC_TOSS 0x20000000
78 #define SC_ESCAPE 0x40000000
81 #define SC_RCV_BITS (SC_RCV_B7_1|SC_RCV_B7_0|SC_RCV_ODDP|SC_RCV_EVNP)
83 static int flag_time
= HZ
;
84 MODULE_PARM(flag_time
, "i");
85 MODULE_PARM_DESC(flag_time
, "ppp_async: interval between flagged packets (in clock ticks)");
86 MODULE_LICENSE("GPL");
92 static int ppp_async_encode(struct asyncppp
*ap
);
93 static int ppp_async_send(struct ppp_channel
*chan
, struct sk_buff
*skb
);
94 static int ppp_async_push(struct asyncppp
*ap
);
95 static void ppp_async_flush_output(struct asyncppp
*ap
);
96 static void ppp_async_input(struct asyncppp
*ap
, const unsigned char *buf
,
97 char *flags
, int count
);
98 static int ppp_async_ioctl(struct ppp_channel
*chan
, unsigned int cmd
,
100 static void async_lcp_peek(struct asyncppp
*ap
, unsigned char *data
,
101 int len
, int inbound
);
103 static struct ppp_channel_ops async_ops
= {
109 * Routines implementing the PPP line discipline.
113 * We have a potential race on dereferencing tty->disc_data,
114 * because the tty layer provides no locking at all - thus one
115 * cpu could be running ppp_asynctty_receive while another
116 * calls ppp_asynctty_close, which zeroes tty->disc_data and
117 * frees the memory that ppp_asynctty_receive is using. The best
118 * way to fix this is to use a rwlock in the tty struct, but for now
119 * we use a single global rwlock for all ttys in ppp line discipline.
121 static rwlock_t disc_data_lock
= RW_LOCK_UNLOCKED
;
123 static struct asyncppp
*ap_get(struct tty_struct
*tty
)
127 read_lock(&disc_data_lock
);
130 atomic_inc(&ap
->refcnt
);
131 read_unlock(&disc_data_lock
);
135 static void ap_put(struct asyncppp
*ap
)
137 if (atomic_dec_and_test(&ap
->refcnt
))
142 * Called when a tty is put into PPP line discipline.
145 ppp_asynctty_open(struct tty_struct
*tty
)
151 ap
= kmalloc(sizeof(*ap
), GFP_KERNEL
);
155 /* initialize the asyncppp structure */
156 memset(ap
, 0, sizeof(*ap
));
159 spin_lock_init(&ap
->xmit_lock
);
160 spin_lock_init(&ap
->recv_lock
);
162 ap
->xaccm
[3] = 0x60000000U
;
168 atomic_set(&ap
->refcnt
, 1);
169 init_MUTEX_LOCKED(&ap
->dead_sem
);
171 ap
->chan
.private = ap
;
172 ap
->chan
.ops
= &async_ops
;
173 ap
->chan
.mtu
= PPP_MRU
;
174 err
= ppp_register_channel(&ap
->chan
);
189 * Called when the tty is put into another line discipline
190 * or it hangs up. We have to wait for any cpu currently
191 * executing in any of the other ppp_asynctty_* routines to
192 * finish before we can call ppp_unregister_channel and free
193 * the asyncppp struct. This routine must be called from
194 * process context, not interrupt or softirq context.
197 ppp_asynctty_close(struct tty_struct
*tty
)
201 write_lock(&disc_data_lock
);
204 write_unlock(&disc_data_lock
);
209 * We have now ensured that nobody can start using ap from now
210 * on, but we have to wait for all existing users to finish.
211 * Note that ppp_unregister_channel ensures that no calls to
212 * our channel ops (i.e. ppp_async_send/ioctl) are in progress
213 * by the time it returns.
215 if (!atomic_dec_and_test(&ap
->refcnt
))
218 ppp_unregister_channel(&ap
->chan
);
227 * Read does nothing - no data is ever available this way.
228 * Pppd reads and writes packets via /dev/ppp instead.
231 ppp_asynctty_read(struct tty_struct
*tty
, struct file
*file
,
232 unsigned char *buf
, size_t count
)
238 * Write on the tty does nothing, the packets all come in
239 * from the ppp generic stuff.
242 ppp_asynctty_write(struct tty_struct
*tty
, struct file
*file
,
243 const unsigned char *buf
, size_t count
)
249 ppp_asynctty_ioctl(struct tty_struct
*tty
, struct file
*file
,
250 unsigned int cmd
, unsigned long arg
)
252 struct asyncppp
*ap
= ap_get(tty
);
264 if (put_user(ppp_channel_index(&ap
->chan
), (int *) arg
))
274 if (put_user(ppp_unit_number(&ap
->chan
), (int *) arg
))
281 err
= n_tty_ioctl(tty
, file
, cmd
, arg
);
285 /* flush our buffers and the serial port's buffer */
286 if (arg
== TCIOFLUSH
|| arg
== TCOFLUSH
)
287 ppp_async_flush_output(ap
);
288 err
= n_tty_ioctl(tty
, file
, cmd
, arg
);
293 if (put_user(val
, (int *) arg
))
306 /* No kernel lock - fine */
308 ppp_asynctty_poll(struct tty_struct
*tty
, struct file
*file
, poll_table
*wait
)
314 ppp_asynctty_room(struct tty_struct
*tty
)
320 ppp_asynctty_receive(struct tty_struct
*tty
, const unsigned char *buf
,
321 char *flags
, int count
)
323 struct asyncppp
*ap
= ap_get(tty
);
327 spin_lock_bh(&ap
->recv_lock
);
328 ppp_async_input(ap
, buf
, flags
, count
);
329 spin_unlock_bh(&ap
->recv_lock
);
331 if (test_and_clear_bit(TTY_THROTTLED
, &tty
->flags
)
332 && tty
->driver
->unthrottle
)
333 tty
->driver
->unthrottle(tty
);
337 ppp_asynctty_wakeup(struct tty_struct
*tty
)
339 struct asyncppp
*ap
= ap_get(tty
);
341 clear_bit(TTY_DO_WRITE_WAKEUP
, &tty
->flags
);
344 if (ppp_async_push(ap
))
345 ppp_output_wakeup(&ap
->chan
);
350 static struct tty_ldisc ppp_ldisc
= {
351 .owner
= THIS_MODULE
,
352 .magic
= TTY_LDISC_MAGIC
,
354 .open
= ppp_asynctty_open
,
355 .close
= ppp_asynctty_close
,
356 .read
= ppp_asynctty_read
,
357 .write
= ppp_asynctty_write
,
358 .ioctl
= ppp_asynctty_ioctl
,
359 .poll
= ppp_asynctty_poll
,
360 .receive_room
= ppp_asynctty_room
,
361 .receive_buf
= ppp_asynctty_receive
,
362 .write_wakeup
= ppp_asynctty_wakeup
,
370 err
= tty_register_ldisc(N_PPP
, &ppp_ldisc
);
372 printk(KERN_ERR
"PPP_async: error %d registering line disc.\n",
378 * The following routines provide the PPP channel interface.
381 ppp_async_ioctl(struct ppp_channel
*chan
, unsigned int cmd
, unsigned long arg
)
383 struct asyncppp
*ap
= chan
->private;
390 val
= ap
->flags
| ap
->rbits
;
391 if (put_user(val
, (int *) arg
))
396 if (get_user(val
, (int *) arg
))
398 ap
->flags
= val
& ~SC_RCV_BITS
;
399 spin_lock_bh(&ap
->recv_lock
);
400 ap
->rbits
= val
& SC_RCV_BITS
;
401 spin_unlock_bh(&ap
->recv_lock
);
405 case PPPIOCGASYNCMAP
:
406 if (put_user(ap
->xaccm
[0], (u32
*) arg
))
410 case PPPIOCSASYNCMAP
:
411 if (get_user(ap
->xaccm
[0], (u32
*) arg
))
416 case PPPIOCGRASYNCMAP
:
417 if (put_user(ap
->raccm
, (u32
*) arg
))
421 case PPPIOCSRASYNCMAP
:
422 if (get_user(ap
->raccm
, (u32
*) arg
))
427 case PPPIOCGXASYNCMAP
:
428 if (copy_to_user((void __user
*) arg
, ap
->xaccm
, sizeof(ap
->xaccm
)))
432 case PPPIOCSXASYNCMAP
:
433 if (copy_from_user(accm
, (void __user
*) arg
, sizeof(accm
)))
435 accm
[2] &= ~0x40000000U
; /* can't escape 0x5e */
436 accm
[3] |= 0x60000000U
; /* must escape 0x7d, 0x7e */
437 memcpy(ap
->xaccm
, accm
, sizeof(ap
->xaccm
));
442 if (put_user(ap
->mru
, (int *) arg
))
447 if (get_user(val
, (int *) arg
))
463 * Procedures for encapsulation and framing.
466 u16 ppp_crc16_table
[256] = {
467 0x0000, 0x1189, 0x2312, 0x329b, 0x4624, 0x57ad, 0x6536, 0x74bf,
468 0x8c48, 0x9dc1, 0xaf5a, 0xbed3, 0xca6c, 0xdbe5, 0xe97e, 0xf8f7,
469 0x1081, 0x0108, 0x3393, 0x221a, 0x56a5, 0x472c, 0x75b7, 0x643e,
470 0x9cc9, 0x8d40, 0xbfdb, 0xae52, 0xdaed, 0xcb64, 0xf9ff, 0xe876,
471 0x2102, 0x308b, 0x0210, 0x1399, 0x6726, 0x76af, 0x4434, 0x55bd,
472 0xad4a, 0xbcc3, 0x8e58, 0x9fd1, 0xeb6e, 0xfae7, 0xc87c, 0xd9f5,
473 0x3183, 0x200a, 0x1291, 0x0318, 0x77a7, 0x662e, 0x54b5, 0x453c,
474 0xbdcb, 0xac42, 0x9ed9, 0x8f50, 0xfbef, 0xea66, 0xd8fd, 0xc974,
475 0x4204, 0x538d, 0x6116, 0x709f, 0x0420, 0x15a9, 0x2732, 0x36bb,
476 0xce4c, 0xdfc5, 0xed5e, 0xfcd7, 0x8868, 0x99e1, 0xab7a, 0xbaf3,
477 0x5285, 0x430c, 0x7197, 0x601e, 0x14a1, 0x0528, 0x37b3, 0x263a,
478 0xdecd, 0xcf44, 0xfddf, 0xec56, 0x98e9, 0x8960, 0xbbfb, 0xaa72,
479 0x6306, 0x728f, 0x4014, 0x519d, 0x2522, 0x34ab, 0x0630, 0x17b9,
480 0xef4e, 0xfec7, 0xcc5c, 0xddd5, 0xa96a, 0xb8e3, 0x8a78, 0x9bf1,
481 0x7387, 0x620e, 0x5095, 0x411c, 0x35a3, 0x242a, 0x16b1, 0x0738,
482 0xffcf, 0xee46, 0xdcdd, 0xcd54, 0xb9eb, 0xa862, 0x9af9, 0x8b70,
483 0x8408, 0x9581, 0xa71a, 0xb693, 0xc22c, 0xd3a5, 0xe13e, 0xf0b7,
484 0x0840, 0x19c9, 0x2b52, 0x3adb, 0x4e64, 0x5fed, 0x6d76, 0x7cff,
485 0x9489, 0x8500, 0xb79b, 0xa612, 0xd2ad, 0xc324, 0xf1bf, 0xe036,
486 0x18c1, 0x0948, 0x3bd3, 0x2a5a, 0x5ee5, 0x4f6c, 0x7df7, 0x6c7e,
487 0xa50a, 0xb483, 0x8618, 0x9791, 0xe32e, 0xf2a7, 0xc03c, 0xd1b5,
488 0x2942, 0x38cb, 0x0a50, 0x1bd9, 0x6f66, 0x7eef, 0x4c74, 0x5dfd,
489 0xb58b, 0xa402, 0x9699, 0x8710, 0xf3af, 0xe226, 0xd0bd, 0xc134,
490 0x39c3, 0x284a, 0x1ad1, 0x0b58, 0x7fe7, 0x6e6e, 0x5cf5, 0x4d7c,
491 0xc60c, 0xd785, 0xe51e, 0xf497, 0x8028, 0x91a1, 0xa33a, 0xb2b3,
492 0x4a44, 0x5bcd, 0x6956, 0x78df, 0x0c60, 0x1de9, 0x2f72, 0x3efb,
493 0xd68d, 0xc704, 0xf59f, 0xe416, 0x90a9, 0x8120, 0xb3bb, 0xa232,
494 0x5ac5, 0x4b4c, 0x79d7, 0x685e, 0x1ce1, 0x0d68, 0x3ff3, 0x2e7a,
495 0xe70e, 0xf687, 0xc41c, 0xd595, 0xa12a, 0xb0a3, 0x8238, 0x93b1,
496 0x6b46, 0x7acf, 0x4854, 0x59dd, 0x2d62, 0x3ceb, 0x0e70, 0x1ff9,
497 0xf78f, 0xe606, 0xd49d, 0xc514, 0xb1ab, 0xa022, 0x92b9, 0x8330,
498 0x7bc7, 0x6a4e, 0x58d5, 0x495c, 0x3de3, 0x2c6a, 0x1ef1, 0x0f78
500 EXPORT_SYMBOL(ppp_crc16_table
);
501 #define fcstab ppp_crc16_table /* for PPP_FCS macro */
504 * Procedure to encode the data for async serial transmission.
505 * Does octet stuffing (escaping), puts the address/control bytes
506 * on if A/C compression is disabled, and does protocol compression.
507 * Assumes ap->tpkt != 0 on entry.
508 * Returns 1 if we finished the current frame, 0 otherwise.
511 #define PUT_BYTE(ap, buf, c, islcp) do { \
512 if ((islcp && c < 0x20) || (ap->xaccm[c >> 5] & (1 << (c & 0x1f)))) {\
513 *buf++ = PPP_ESCAPE; \
520 ppp_async_encode(struct asyncppp
*ap
)
522 int fcs
, i
, count
, c
, proto
;
523 unsigned char *buf
, *buflim
;
531 data
= ap
->tpkt
->data
;
532 count
= ap
->tpkt
->len
;
534 proto
= (data
[0] << 8) + data
[1];
537 * LCP packets with code values between 1 (configure-reqest)
538 * and 7 (code-reject) must be sent as though no options
539 * had been negotiated.
541 islcp
= proto
== PPP_LCP
&& 1 <= data
[2] && data
[2] <= 7;
545 async_lcp_peek(ap
, data
, count
, 0);
548 * Start of a new packet - insert the leading FLAG
549 * character if necessary.
551 if (islcp
|| flag_time
== 0
552 || jiffies
- ap
->last_xmit
>= flag_time
)
554 ap
->last_xmit
= jiffies
;
558 * Put in the address/control bytes if necessary
560 if ((ap
->flags
& SC_COMP_AC
) == 0 || islcp
) {
561 PUT_BYTE(ap
, buf
, 0xff, islcp
);
562 fcs
= PPP_FCS(fcs
, 0xff);
563 PUT_BYTE(ap
, buf
, 0x03, islcp
);
564 fcs
= PPP_FCS(fcs
, 0x03);
569 * Once we put in the last byte, we need to put in the FCS
570 * and closing flag, so make sure there is at least 7 bytes
571 * of free space in the output buffer.
573 buflim
= ap
->obuf
+ OBUFSIZE
- 6;
574 while (i
< count
&& buf
< buflim
) {
576 if (i
== 1 && c
== 0 && (ap
->flags
& SC_COMP_PROT
))
577 continue; /* compress protocol field */
578 fcs
= PPP_FCS(fcs
, c
);
579 PUT_BYTE(ap
, buf
, c
, islcp
);
584 * Remember where we are up to in this packet.
593 * We have finished the packet. Add the FCS and flag.
597 PUT_BYTE(ap
, buf
, c
, islcp
);
598 c
= (fcs
>> 8) & 0xff;
599 PUT_BYTE(ap
, buf
, c
, islcp
);
609 * Transmit-side routines.
613 * Send a packet to the peer over an async tty line.
614 * Returns 1 iff the packet was accepted.
615 * If the packet was not accepted, we will call ppp_output_wakeup
616 * at some later time.
619 ppp_async_send(struct ppp_channel
*chan
, struct sk_buff
*skb
)
621 struct asyncppp
*ap
= chan
->private;
625 if (test_and_set_bit(XMIT_FULL
, &ap
->xmit_flags
))
626 return 0; /* already full */
635 * Push as much data as possible out to the tty.
638 ppp_async_push(struct asyncppp
*ap
)
640 int avail
, sent
, done
= 0;
641 struct tty_struct
*tty
= ap
->tty
;
644 set_bit(XMIT_WAKEUP
, &ap
->xmit_flags
);
646 * We can get called recursively here if the tty write
647 * function calls our wakeup function. This can happen
648 * for example on a pty with both the master and slave
649 * set to PPP line discipline.
650 * We use the XMIT_BUSY bit to detect this and get out,
651 * leaving the XMIT_WAKEUP bit set to tell the other
652 * instance that it may now be able to write more now.
654 if (test_and_set_bit(XMIT_BUSY
, &ap
->xmit_flags
))
656 spin_lock_bh(&ap
->xmit_lock
);
658 if (test_and_clear_bit(XMIT_WAKEUP
, &ap
->xmit_flags
))
660 if (!tty_stuffed
&& ap
->optr
< ap
->olim
) {
661 avail
= ap
->olim
- ap
->optr
;
662 set_bit(TTY_DO_WRITE_WAKEUP
, &tty
->flags
);
663 sent
= tty
->driver
->write(tty
, 0, ap
->optr
, avail
);
665 goto flush
; /* error, e.g. loss of CD */
671 if (ap
->optr
>= ap
->olim
&& ap
->tpkt
!= 0) {
672 if (ppp_async_encode(ap
)) {
673 /* finished processing ap->tpkt */
674 clear_bit(XMIT_FULL
, &ap
->xmit_flags
);
680 * We haven't made any progress this time around.
681 * Clear XMIT_BUSY to let other callers in, but
682 * after doing so we have to check if anyone set
683 * XMIT_WAKEUP since we last checked it. If they
684 * did, we should try again to set XMIT_BUSY and go
685 * around again in case XMIT_BUSY was still set when
686 * the other caller tried.
688 clear_bit(XMIT_BUSY
, &ap
->xmit_flags
);
689 /* any more work to do? if not, exit the loop */
690 if (!(test_bit(XMIT_WAKEUP
, &ap
->xmit_flags
)
691 || (!tty_stuffed
&& ap
->tpkt
!= 0)))
693 /* more work to do, see if we can do it now */
694 if (test_and_set_bit(XMIT_BUSY
, &ap
->xmit_flags
))
697 spin_unlock_bh(&ap
->xmit_lock
);
701 clear_bit(XMIT_BUSY
, &ap
->xmit_flags
);
705 clear_bit(XMIT_FULL
, &ap
->xmit_flags
);
709 spin_unlock_bh(&ap
->xmit_lock
);
714 * Flush output from our internal buffers.
715 * Called for the TCFLSH ioctl.
718 ppp_async_flush_output(struct asyncppp
*ap
)
722 spin_lock_bh(&ap
->xmit_lock
);
724 if (ap
->tpkt
!= NULL
) {
727 clear_bit(XMIT_FULL
, &ap
->xmit_flags
);
730 spin_unlock_bh(&ap
->xmit_lock
);
732 ppp_output_wakeup(&ap
->chan
);
736 * Receive-side routines.
739 /* see how many ordinary chars there are at the start of buf */
741 scan_ordinary(struct asyncppp
*ap
, const unsigned char *buf
, int count
)
745 for (i
= 0; i
< count
; ++i
) {
747 if (c
== PPP_ESCAPE
|| c
== PPP_FLAG
748 || (c
< 0x20 && (ap
->raccm
& (1 << c
)) != 0))
754 /* called when a flag is seen - do end-of-packet processing */
756 process_input_packet(struct asyncppp
*ap
)
760 unsigned int len
, fcs
, proto
;
765 if ((ap
->state
& (SC_TOSS
| SC_ESCAPE
)) || skb
== 0) {
766 ap
->state
&= ~(SC_TOSS
| SC_ESCAPE
);
776 goto err
; /* too short */
778 for (; len
> 0; --len
)
779 fcs
= PPP_FCS(fcs
, *p
++);
780 if (fcs
!= PPP_GOODFCS
)
781 goto err
; /* bad FCS */
782 skb_trim(skb
, skb
->len
- 2);
784 /* check for address/control and protocol compression */
786 if (p
[0] == PPP_ALLSTATIONS
&& p
[1] == PPP_UI
) {
787 /* chop off address/control */
790 p
= skb_pull(skb
, 2);
794 /* protocol is compressed */
795 skb_push(skb
, 1)[0] = 0;
799 proto
= (proto
<< 8) + p
[1];
800 if (proto
== PPP_LCP
)
801 async_lcp_peek(ap
, p
, skb
->len
, 1);
804 /* all OK, give it to the generic layer */
805 ppp_input(&ap
->chan
, skb
);
810 ppp_input_error(&ap
->chan
, code
);
814 input_error(struct asyncppp
*ap
, int code
)
816 ap
->state
|= SC_TOSS
;
817 ppp_input_error(&ap
->chan
, code
);
820 /* called when the tty driver has data for us. */
822 ppp_async_input(struct asyncppp
*ap
, const unsigned char *buf
,
823 char *flags
, int count
)
826 int c
, i
, j
, n
, s
, f
;
829 /* update bits used for 8-bit cleanness detection */
830 if (~ap
->rbits
& SC_RCV_BITS
) {
832 for (i
= 0; i
< count
; ++i
) {
834 if (flags
!= 0 && flags
[i
] != 0)
836 s
|= (c
& 0x80)? SC_RCV_B7_1
: SC_RCV_B7_0
;
837 c
= ((c
>> 4) ^ c
) & 0xf;
838 s
|= (0x6996 & (1 << c
))? SC_RCV_ODDP
: SC_RCV_EVNP
;
844 /* scan through and see how many chars we can do in bulk */
845 if ((ap
->state
& SC_ESCAPE
) && buf
[0] == PPP_ESCAPE
)
848 n
= scan_ordinary(ap
, buf
, count
);
851 if (flags
!= 0 && (ap
->state
& SC_TOSS
) == 0) {
852 /* check the flags to see if any char had an error */
853 for (j
= 0; j
< n
; ++j
)
854 if ((f
= flags
[j
]) != 0)
861 } else if (n
> 0 && (ap
->state
& SC_TOSS
) == 0) {
862 /* stuff the chars in the skb */
865 skb
= dev_alloc_skb(ap
->mru
+ PPP_HDRLEN
+ 2);
868 /* Try to get the payload 4-byte aligned */
869 if (buf
[0] != PPP_ALLSTATIONS
)
870 skb_reserve(skb
, 2 + (buf
[0] & 1));
873 if (n
> skb_tailroom(skb
)) {
874 /* packet overflowed MRU */
877 sp
= skb_put(skb
, n
);
879 if (ap
->state
& SC_ESCAPE
) {
881 ap
->state
&= ~SC_ESCAPE
;
891 process_input_packet(ap
);
892 } else if (c
== PPP_ESCAPE
) {
893 ap
->state
|= SC_ESCAPE
;
895 /* otherwise it's a char in the recv ACCM */
906 printk(KERN_ERR
"PPPasync: no memory (input pkt)\n");
911 * We look at LCP frames going past so that we can notice
912 * and react to the LCP configure-ack from the peer.
913 * In the situation where the peer has been sent a configure-ack
914 * already, LCP is up once it has sent its configure-ack
915 * so the immediately following packet can be sent with the
916 * configured LCP options. This allows us to process the following
917 * packet correctly without pppd needing to respond quickly.
919 * We only respond to the received configure-ack if we have just
920 * sent a configure-request, and the configure-ack contains the
921 * same data (this is checked using a 16-bit crc of the data).
923 #define CONFREQ 1 /* LCP code field values */
925 #define LCP_MRU 1 /* LCP option numbers */
926 #define LCP_ASYNCMAP 2
928 static void async_lcp_peek(struct asyncppp
*ap
, unsigned char *data
,
929 int len
, int inbound
)
931 int dlen
, fcs
, i
, code
;
934 data
+= 2; /* skip protocol bytes */
936 if (len
< 4) /* 4 = code, ID, length */
939 if (code
!= CONFACK
&& code
!= CONFREQ
)
941 dlen
= (data
[2] << 8) + data
[3];
943 return; /* packet got truncated or length is bogus */
945 if (code
== (inbound
? CONFACK
: CONFREQ
)) {
947 * sent confreq or received confack:
948 * calculate the crc of the data from the ID field on.
951 for (i
= 1; i
< dlen
; ++i
)
952 fcs
= PPP_FCS(fcs
, data
[i
]);
955 /* outbound confreq - remember the crc for later */
960 /* received confack, check the crc */
966 return; /* not interested in received confreq */
968 /* process the options in the confack */
971 /* data[0] is code, data[1] is length */
972 while (dlen
>= 2 && dlen
>= data
[1]) {
975 val
= (data
[2] << 8) + data
[3];
982 val
= (data
[2] << 24) + (data
[3] << 16)
983 + (data
[4] << 8) + data
[5];
995 static void __exit
ppp_async_cleanup(void)
997 if (tty_register_ldisc(N_PPP
, NULL
) != 0)
998 printk(KERN_ERR
"failed to unregister PPP line discipline\n");
1001 module_init(ppp_async_init
);
1002 module_exit(ppp_async_cleanup
);