2 * PPP async serial channel driver for Linux.
4 * Copyright 1999 Paul Mackerras.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 * This driver provides the encapsulation and framing for sending
12 * and receiving PPP frames over async serial lines. It relies on
13 * the generic PPP layer to give it frames to send and to process
14 * received frames. It implements the PPP line discipline.
16 * Part of the code in this driver was inspired by the old async-only
17 * PPP driver, written by Michael Callahan and Al Longyear, and
18 * subsequently hacked by Paul Mackerras.
21 #include <linux/module.h>
22 #include <linux/kernel.h>
23 #include <linux/skbuff.h>
24 #include <linux/tty.h>
25 #include <linux/netdevice.h>
26 #include <linux/poll.h>
27 #include <linux/crc-ccitt.h>
28 #include <linux/ppp_defs.h>
29 #include <linux/if_ppp.h>
30 #include <linux/ppp_channel.h>
31 #include <linux/spinlock.h>
32 #include <linux/init.h>
33 #include <linux/interrupt.h>
34 #include <linux/jiffies.h>
35 #include <linux/slab.h>
36 #include <asm/unaligned.h>
37 #include <asm/uaccess.h>
38 #include <asm/string.h>
40 #define PPP_VERSION "2.4.2"
44 /* Structure for storing local state. */
46 struct tty_struct
*tty
;
53 unsigned long xmit_flags
;
56 unsigned int bytes_sent
;
57 unsigned int bytes_rcvd
;
64 unsigned long last_xmit
;
68 struct sk_buff_head rqueue
;
70 struct tasklet_struct tsk
;
73 struct semaphore dead_sem
;
74 struct ppp_channel chan
; /* interface to generic ppp layer */
75 unsigned char obuf
[OBUFSIZE
];
78 /* Bit numbers in xmit_flags */
86 #define SC_PREV_ERROR 4
89 #define SC_RCV_BITS (SC_RCV_B7_1|SC_RCV_B7_0|SC_RCV_ODDP|SC_RCV_EVNP)
91 static int flag_time
= HZ
;
92 module_param(flag_time
, int, 0);
93 MODULE_PARM_DESC(flag_time
, "ppp_async: interval between flagged packets (in clock ticks)");
94 MODULE_LICENSE("GPL");
95 MODULE_ALIAS_LDISC(N_PPP
);
100 static int ppp_async_encode(struct asyncppp
*ap
);
101 static int ppp_async_send(struct ppp_channel
*chan
, struct sk_buff
*skb
);
102 static int ppp_async_push(struct asyncppp
*ap
);
103 static void ppp_async_flush_output(struct asyncppp
*ap
);
104 static void ppp_async_input(struct asyncppp
*ap
, const unsigned char *buf
,
105 char *flags
, int count
);
106 static int ppp_async_ioctl(struct ppp_channel
*chan
, unsigned int cmd
,
108 static void ppp_async_process(unsigned long arg
);
110 static void async_lcp_peek(struct asyncppp
*ap
, unsigned char *data
,
111 int len
, int inbound
);
113 static const struct ppp_channel_ops async_ops
= {
114 .start_xmit
= ppp_async_send
,
115 .ioctl
= ppp_async_ioctl
,
119 * Routines implementing the PPP line discipline.
123 * We have a potential race on dereferencing tty->disc_data,
124 * because the tty layer provides no locking at all - thus one
125 * cpu could be running ppp_asynctty_receive while another
126 * calls ppp_asynctty_close, which zeroes tty->disc_data and
127 * frees the memory that ppp_asynctty_receive is using. The best
128 * way to fix this is to use a rwlock in the tty struct, but for now
129 * we use a single global rwlock for all ttys in ppp line discipline.
131 * FIXME: this is no longer true. The _close path for the ldisc is
132 * now guaranteed to be sane.
134 static DEFINE_RWLOCK(disc_data_lock
);
136 static struct asyncppp
*ap_get(struct tty_struct
*tty
)
140 read_lock(&disc_data_lock
);
143 atomic_inc(&ap
->refcnt
);
144 read_unlock(&disc_data_lock
);
148 static void ap_put(struct asyncppp
*ap
)
150 if (atomic_dec_and_test(&ap
->refcnt
))
155 * Called when a tty is put into PPP line discipline. Called in process
159 ppp_asynctty_open(struct tty_struct
*tty
)
165 if (tty
->ops
->write
== NULL
)
169 ap
= kzalloc(sizeof(*ap
), GFP_KERNEL
);
173 /* initialize the asyncppp structure */
176 spin_lock_init(&ap
->xmit_lock
);
177 spin_lock_init(&ap
->recv_lock
);
179 ap
->xaccm
[3] = 0x60000000U
;
185 skb_queue_head_init(&ap
->rqueue
);
186 tasklet_init(&ap
->tsk
, ppp_async_process
, (unsigned long) ap
);
188 atomic_set(&ap
->refcnt
, 1);
189 sema_init(&ap
->dead_sem
, 0);
191 ap
->chan
.private = ap
;
192 ap
->chan
.ops
= &async_ops
;
193 ap
->chan
.mtu
= PPP_MRU
;
194 speed
= tty_get_baud_rate(tty
);
195 ap
->chan
.speed
= speed
;
196 err
= ppp_register_channel(&ap
->chan
);
201 tty
->receive_room
= 65536;
211 * Called when the tty is put into another line discipline
212 * or it hangs up. We have to wait for any cpu currently
213 * executing in any of the other ppp_asynctty_* routines to
214 * finish before we can call ppp_unregister_channel and free
215 * the asyncppp struct. This routine must be called from
216 * process context, not interrupt or softirq context.
219 ppp_asynctty_close(struct tty_struct
*tty
)
223 write_lock_irq(&disc_data_lock
);
225 tty
->disc_data
= NULL
;
226 write_unlock_irq(&disc_data_lock
);
231 * We have now ensured that nobody can start using ap from now
232 * on, but we have to wait for all existing users to finish.
233 * Note that ppp_unregister_channel ensures that no calls to
234 * our channel ops (i.e. ppp_async_send/ioctl) are in progress
235 * by the time it returns.
237 if (!atomic_dec_and_test(&ap
->refcnt
))
239 tasklet_kill(&ap
->tsk
);
241 ppp_unregister_channel(&ap
->chan
);
243 skb_queue_purge(&ap
->rqueue
);
249 * Called on tty hangup in process context.
251 * Wait for I/O to driver to complete and unregister PPP channel.
252 * This is already done by the close routine, so just call that.
254 static int ppp_asynctty_hangup(struct tty_struct
*tty
)
256 ppp_asynctty_close(tty
);
261 * Read does nothing - no data is ever available this way.
262 * Pppd reads and writes packets via /dev/ppp instead.
265 ppp_asynctty_read(struct tty_struct
*tty
, struct file
*file
,
266 unsigned char __user
*buf
, size_t count
)
272 * Write on the tty does nothing, the packets all come in
273 * from the ppp generic stuff.
276 ppp_asynctty_write(struct tty_struct
*tty
, struct file
*file
,
277 const unsigned char *buf
, size_t count
)
283 * Called in process context only. May be re-entered by multiple
284 * ioctl calling threads.
288 ppp_asynctty_ioctl(struct tty_struct
*tty
, struct file
*file
,
289 unsigned int cmd
, unsigned long arg
)
291 struct asyncppp
*ap
= ap_get(tty
);
293 int __user
*p
= (int __user
*)arg
;
301 if (put_user(ppp_channel_index(&ap
->chan
), p
))
308 if (put_user(ppp_unit_number(&ap
->chan
), p
))
314 /* flush our buffers and the serial port's buffer */
315 if (arg
== TCIOFLUSH
|| arg
== TCOFLUSH
)
316 ppp_async_flush_output(ap
);
317 err
= tty_perform_flush(tty
, arg
);
322 if (put_user(val
, p
))
328 /* Try the various mode ioctls */
329 err
= tty_mode_ioctl(tty
, file
, cmd
, arg
);
336 /* No kernel lock - fine */
338 ppp_asynctty_poll(struct tty_struct
*tty
, struct file
*file
, poll_table
*wait
)
343 /* May sleep, don't call from interrupt level or with interrupts disabled */
345 ppp_asynctty_receive(struct tty_struct
*tty
, const unsigned char *buf
,
346 char *cflags
, int count
)
348 struct asyncppp
*ap
= ap_get(tty
);
353 spin_lock_irqsave(&ap
->recv_lock
, flags
);
354 ppp_async_input(ap
, buf
, cflags
, count
);
355 spin_unlock_irqrestore(&ap
->recv_lock
, flags
);
356 if (!skb_queue_empty(&ap
->rqueue
))
357 tasklet_schedule(&ap
->tsk
);
363 ppp_asynctty_wakeup(struct tty_struct
*tty
)
365 struct asyncppp
*ap
= ap_get(tty
);
367 clear_bit(TTY_DO_WRITE_WAKEUP
, &tty
->flags
);
370 set_bit(XMIT_WAKEUP
, &ap
->xmit_flags
);
371 tasklet_schedule(&ap
->tsk
);
376 static struct tty_ldisc_ops ppp_ldisc
= {
377 .owner
= THIS_MODULE
,
378 .magic
= TTY_LDISC_MAGIC
,
380 .open
= ppp_asynctty_open
,
381 .close
= ppp_asynctty_close
,
382 .hangup
= ppp_asynctty_hangup
,
383 .read
= ppp_asynctty_read
,
384 .write
= ppp_asynctty_write
,
385 .ioctl
= ppp_asynctty_ioctl
,
386 .poll
= ppp_asynctty_poll
,
387 .receive_buf
= ppp_asynctty_receive
,
388 .write_wakeup
= ppp_asynctty_wakeup
,
396 err
= tty_register_ldisc(N_PPP
, &ppp_ldisc
);
398 printk(KERN_ERR
"PPP_async: error %d registering line disc.\n",
404 * The following routines provide the PPP channel interface.
407 ppp_async_ioctl(struct ppp_channel
*chan
, unsigned int cmd
, unsigned long arg
)
409 struct asyncppp
*ap
= chan
->private;
410 void __user
*argp
= (void __user
*)arg
;
411 int __user
*p
= argp
;
418 val
= ap
->flags
| ap
->rbits
;
419 if (put_user(val
, p
))
424 if (get_user(val
, p
))
426 ap
->flags
= val
& ~SC_RCV_BITS
;
427 spin_lock_irq(&ap
->recv_lock
);
428 ap
->rbits
= val
& SC_RCV_BITS
;
429 spin_unlock_irq(&ap
->recv_lock
);
433 case PPPIOCGASYNCMAP
:
434 if (put_user(ap
->xaccm
[0], (u32 __user
*)argp
))
438 case PPPIOCSASYNCMAP
:
439 if (get_user(ap
->xaccm
[0], (u32 __user
*)argp
))
444 case PPPIOCGRASYNCMAP
:
445 if (put_user(ap
->raccm
, (u32 __user
*)argp
))
449 case PPPIOCSRASYNCMAP
:
450 if (get_user(ap
->raccm
, (u32 __user
*)argp
))
455 case PPPIOCGXASYNCMAP
:
456 if (copy_to_user(argp
, ap
->xaccm
, sizeof(ap
->xaccm
)))
460 case PPPIOCSXASYNCMAP
:
461 if (copy_from_user(accm
, argp
, sizeof(accm
)))
463 accm
[2] &= ~0x40000000U
; /* can't escape 0x5e */
464 accm
[3] |= 0x60000000U
; /* must escape 0x7d, 0x7e */
465 memcpy(ap
->xaccm
, accm
, sizeof(ap
->xaccm
));
470 if (put_user(ap
->mru
, p
))
475 if (get_user(val
, p
))
491 * This is called at softirq level to deliver received packets
492 * to the ppp_generic code, and to tell the ppp_generic code
493 * if we can accept more output now.
495 static void ppp_async_process(unsigned long arg
)
497 struct asyncppp
*ap
= (struct asyncppp
*) arg
;
500 /* process received packets */
501 while ((skb
= skb_dequeue(&ap
->rqueue
)) != NULL
) {
503 ppp_input_error(&ap
->chan
, 0);
504 ppp_input(&ap
->chan
, skb
);
507 /* try to push more stuff out */
508 if (test_bit(XMIT_WAKEUP
, &ap
->xmit_flags
) && ppp_async_push(ap
))
509 ppp_output_wakeup(&ap
->chan
);
513 * Procedures for encapsulation and framing.
517 * Procedure to encode the data for async serial transmission.
518 * Does octet stuffing (escaping), puts the address/control bytes
519 * on if A/C compression is disabled, and does protocol compression.
520 * Assumes ap->tpkt != 0 on entry.
521 * Returns 1 if we finished the current frame, 0 otherwise.
524 #define PUT_BYTE(ap, buf, c, islcp) do { \
525 if ((islcp && c < 0x20) || (ap->xaccm[c >> 5] & (1 << (c & 0x1f)))) {\
526 *buf++ = PPP_ESCAPE; \
527 *buf++ = c ^ PPP_TRANS; \
533 ppp_async_encode(struct asyncppp
*ap
)
535 int fcs
, i
, count
, c
, proto
;
536 unsigned char *buf
, *buflim
;
544 data
= ap
->tpkt
->data
;
545 count
= ap
->tpkt
->len
;
547 proto
= get_unaligned_be16(data
);
550 * LCP packets with code values between 1 (configure-reqest)
551 * and 7 (code-reject) must be sent as though no options
552 * had been negotiated.
554 islcp
= proto
== PPP_LCP
&& 1 <= data
[2] && data
[2] <= 7;
558 async_lcp_peek(ap
, data
, count
, 0);
561 * Start of a new packet - insert the leading FLAG
562 * character if necessary.
564 if (islcp
|| flag_time
== 0 ||
565 time_after_eq(jiffies
, ap
->last_xmit
+ flag_time
))
567 ap
->last_xmit
= jiffies
;
571 * Put in the address/control bytes if necessary
573 if ((ap
->flags
& SC_COMP_AC
) == 0 || islcp
) {
574 PUT_BYTE(ap
, buf
, 0xff, islcp
);
575 fcs
= PPP_FCS(fcs
, 0xff);
576 PUT_BYTE(ap
, buf
, 0x03, islcp
);
577 fcs
= PPP_FCS(fcs
, 0x03);
582 * Once we put in the last byte, we need to put in the FCS
583 * and closing flag, so make sure there is at least 7 bytes
584 * of free space in the output buffer.
586 buflim
= ap
->obuf
+ OBUFSIZE
- 6;
587 while (i
< count
&& buf
< buflim
) {
589 if (i
== 1 && c
== 0 && (ap
->flags
& SC_COMP_PROT
))
590 continue; /* compress protocol field */
591 fcs
= PPP_FCS(fcs
, c
);
592 PUT_BYTE(ap
, buf
, c
, islcp
);
597 * Remember where we are up to in this packet.
606 * We have finished the packet. Add the FCS and flag.
610 PUT_BYTE(ap
, buf
, c
, islcp
);
611 c
= (fcs
>> 8) & 0xff;
612 PUT_BYTE(ap
, buf
, c
, islcp
);
622 * Transmit-side routines.
626 * Send a packet to the peer over an async tty line.
627 * Returns 1 iff the packet was accepted.
628 * If the packet was not accepted, we will call ppp_output_wakeup
629 * at some later time.
632 ppp_async_send(struct ppp_channel
*chan
, struct sk_buff
*skb
)
634 struct asyncppp
*ap
= chan
->private;
638 if (test_and_set_bit(XMIT_FULL
, &ap
->xmit_flags
))
639 return 0; /* already full */
648 * Push as much data as possible out to the tty.
651 ppp_async_push(struct asyncppp
*ap
)
653 int avail
, sent
, done
= 0;
654 struct tty_struct
*tty
= ap
->tty
;
658 * We can get called recursively here if the tty write
659 * function calls our wakeup function. This can happen
660 * for example on a pty with both the master and slave
661 * set to PPP line discipline.
662 * We use the XMIT_BUSY bit to detect this and get out,
663 * leaving the XMIT_WAKEUP bit set to tell the other
664 * instance that it may now be able to write more now.
666 if (test_and_set_bit(XMIT_BUSY
, &ap
->xmit_flags
))
668 spin_lock_bh(&ap
->xmit_lock
);
670 if (test_and_clear_bit(XMIT_WAKEUP
, &ap
->xmit_flags
))
672 if (!tty_stuffed
&& ap
->optr
< ap
->olim
) {
673 avail
= ap
->olim
- ap
->optr
;
674 set_bit(TTY_DO_WRITE_WAKEUP
, &tty
->flags
);
675 sent
= tty
->ops
->write(tty
, ap
->optr
, avail
);
677 goto flush
; /* error, e.g. loss of CD */
683 if (ap
->optr
>= ap
->olim
&& ap
->tpkt
) {
684 if (ppp_async_encode(ap
)) {
685 /* finished processing ap->tpkt */
686 clear_bit(XMIT_FULL
, &ap
->xmit_flags
);
692 * We haven't made any progress this time around.
693 * Clear XMIT_BUSY to let other callers in, but
694 * after doing so we have to check if anyone set
695 * XMIT_WAKEUP since we last checked it. If they
696 * did, we should try again to set XMIT_BUSY and go
697 * around again in case XMIT_BUSY was still set when
698 * the other caller tried.
700 clear_bit(XMIT_BUSY
, &ap
->xmit_flags
);
701 /* any more work to do? if not, exit the loop */
702 if (!(test_bit(XMIT_WAKEUP
, &ap
->xmit_flags
) ||
703 (!tty_stuffed
&& ap
->tpkt
)))
705 /* more work to do, see if we can do it now */
706 if (test_and_set_bit(XMIT_BUSY
, &ap
->xmit_flags
))
709 spin_unlock_bh(&ap
->xmit_lock
);
713 clear_bit(XMIT_BUSY
, &ap
->xmit_flags
);
717 clear_bit(XMIT_FULL
, &ap
->xmit_flags
);
721 spin_unlock_bh(&ap
->xmit_lock
);
726 * Flush output from our internal buffers.
727 * Called for the TCFLSH ioctl. Can be entered in parallel
728 * but this is covered by the xmit_lock.
731 ppp_async_flush_output(struct asyncppp
*ap
)
735 spin_lock_bh(&ap
->xmit_lock
);
737 if (ap
->tpkt
!= NULL
) {
740 clear_bit(XMIT_FULL
, &ap
->xmit_flags
);
743 spin_unlock_bh(&ap
->xmit_lock
);
745 ppp_output_wakeup(&ap
->chan
);
749 * Receive-side routines.
752 /* see how many ordinary chars there are at the start of buf */
754 scan_ordinary(struct asyncppp
*ap
, const unsigned char *buf
, int count
)
758 for (i
= 0; i
< count
; ++i
) {
760 if (c
== PPP_ESCAPE
|| c
== PPP_FLAG
||
761 (c
< 0x20 && (ap
->raccm
& (1 << c
)) != 0))
767 /* called when a flag is seen - do end-of-packet processing */
769 process_input_packet(struct asyncppp
*ap
)
773 unsigned int len
, fcs
, proto
;
776 if (ap
->state
& (SC_TOSS
| SC_ESCAPE
))
780 return; /* 0-length packet */
786 goto err
; /* too short */
788 for (; len
> 0; --len
)
789 fcs
= PPP_FCS(fcs
, *p
++);
790 if (fcs
!= PPP_GOODFCS
)
791 goto err
; /* bad FCS */
792 skb_trim(skb
, skb
->len
- 2);
794 /* check for address/control and protocol compression */
796 if (p
[0] == PPP_ALLSTATIONS
) {
797 /* chop off address/control */
798 if (p
[1] != PPP_UI
|| skb
->len
< 3)
800 p
= skb_pull(skb
, 2);
804 /* protocol is compressed */
805 skb_push(skb
, 1)[0] = 0;
809 proto
= (proto
<< 8) + p
[1];
810 if (proto
== PPP_LCP
)
811 async_lcp_peek(ap
, p
, skb
->len
, 1);
814 /* queue the frame to be processed */
815 skb
->cb
[0] = ap
->state
;
816 skb_queue_tail(&ap
->rqueue
, skb
);
822 /* frame had an error, remember that, reset SC_TOSS & SC_ESCAPE */
823 ap
->state
= SC_PREV_ERROR
;
825 /* make skb appear as freshly allocated */
827 skb_reserve(skb
, - skb_headroom(skb
));
831 /* Called when the tty driver has data for us. Runs parallel with the
832 other ldisc functions but will not be re-entered */
835 ppp_async_input(struct asyncppp
*ap
, const unsigned char *buf
,
836 char *flags
, int count
)
839 int c
, i
, j
, n
, s
, f
;
842 /* update bits used for 8-bit cleanness detection */
843 if (~ap
->rbits
& SC_RCV_BITS
) {
845 for (i
= 0; i
< count
; ++i
) {
847 if (flags
&& flags
[i
] != 0)
849 s
|= (c
& 0x80)? SC_RCV_B7_1
: SC_RCV_B7_0
;
850 c
= ((c
>> 4) ^ c
) & 0xf;
851 s
|= (0x6996 & (1 << c
))? SC_RCV_ODDP
: SC_RCV_EVNP
;
857 /* scan through and see how many chars we can do in bulk */
858 if ((ap
->state
& SC_ESCAPE
) && buf
[0] == PPP_ESCAPE
)
861 n
= scan_ordinary(ap
, buf
, count
);
864 if (flags
&& (ap
->state
& SC_TOSS
) == 0) {
865 /* check the flags to see if any char had an error */
866 for (j
= 0; j
< n
; ++j
)
867 if ((f
= flags
[j
]) != 0)
872 ap
->state
|= SC_TOSS
;
874 } else if (n
> 0 && (ap
->state
& SC_TOSS
) == 0) {
875 /* stuff the chars in the skb */
878 skb
= dev_alloc_skb(ap
->mru
+ PPP_HDRLEN
+ 2);
884 /* Try to get the payload 4-byte aligned.
885 * This should match the
886 * PPP_ALLSTATIONS/PPP_UI/compressed tests in
887 * process_input_packet, but we do not have
888 * enough chars here to test buf[1] and buf[2].
890 if (buf
[0] != PPP_ALLSTATIONS
)
891 skb_reserve(skb
, 2 + (buf
[0] & 1));
893 if (n
> skb_tailroom(skb
)) {
894 /* packet overflowed MRU */
895 ap
->state
|= SC_TOSS
;
897 sp
= skb_put(skb
, n
);
899 if (ap
->state
& SC_ESCAPE
) {
901 ap
->state
&= ~SC_ESCAPE
;
910 if (flags
!= NULL
&& flags
[n
] != 0) {
911 ap
->state
|= SC_TOSS
;
912 } else if (c
== PPP_FLAG
) {
913 process_input_packet(ap
);
914 } else if (c
== PPP_ESCAPE
) {
915 ap
->state
|= SC_ESCAPE
;
916 } else if (I_IXON(ap
->tty
)) {
917 if (c
== START_CHAR(ap
->tty
))
919 else if (c
== STOP_CHAR(ap
->tty
))
922 /* otherwise it's a char in the recv ACCM */
933 printk(KERN_ERR
"PPPasync: no memory (input pkt)\n");
934 ap
->state
|= SC_TOSS
;
938 * We look at LCP frames going past so that we can notice
939 * and react to the LCP configure-ack from the peer.
940 * In the situation where the peer has been sent a configure-ack
941 * already, LCP is up once it has sent its configure-ack
942 * so the immediately following packet can be sent with the
943 * configured LCP options. This allows us to process the following
944 * packet correctly without pppd needing to respond quickly.
946 * We only respond to the received configure-ack if we have just
947 * sent a configure-request, and the configure-ack contains the
948 * same data (this is checked using a 16-bit crc of the data).
950 #define CONFREQ 1 /* LCP code field values */
952 #define LCP_MRU 1 /* LCP option numbers */
953 #define LCP_ASYNCMAP 2
955 static void async_lcp_peek(struct asyncppp
*ap
, unsigned char *data
,
956 int len
, int inbound
)
958 int dlen
, fcs
, i
, code
;
961 data
+= 2; /* skip protocol bytes */
963 if (len
< 4) /* 4 = code, ID, length */
966 if (code
!= CONFACK
&& code
!= CONFREQ
)
968 dlen
= get_unaligned_be16(data
+ 2);
970 return; /* packet got truncated or length is bogus */
972 if (code
== (inbound
? CONFACK
: CONFREQ
)) {
974 * sent confreq or received confack:
975 * calculate the crc of the data from the ID field on.
978 for (i
= 1; i
< dlen
; ++i
)
979 fcs
= PPP_FCS(fcs
, data
[i
]);
982 /* outbound confreq - remember the crc for later */
987 /* received confack, check the crc */
993 return; /* not interested in received confreq */
995 /* process the options in the confack */
998 /* data[0] is code, data[1] is length */
999 while (dlen
>= 2 && dlen
>= data
[1] && data
[1] >= 2) {
1002 val
= get_unaligned_be16(data
+ 2);
1009 val
= get_unaligned_be32(data
+ 2);
1021 static void __exit
ppp_async_cleanup(void)
1023 if (tty_unregister_ldisc(N_PPP
) != 0)
1024 printk(KERN_ERR
"failed to unregister PPP line discipline\n");
1027 module_init(ppp_async_init
);
1028 module_exit(ppp_async_cleanup
);