More meth updates.
[linux-2.6/linux-mips.git] / drivers / net / ppp_async.c
blob5edde59242059ce88077f7726edc250bc19621d9
1 /*
2 * PPP async serial channel driver for Linux.
4 * Copyright 1999 Paul Mackerras.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 * This driver provides the encapsulation and framing for sending
12 * and receiving PPP frames over async serial lines. It relies on
13 * the generic PPP layer to give it frames to send and to process
14 * received frames. It implements the PPP line discipline.
16 * Part of the code in this driver was inspired by the old async-only
17 * PPP driver, written by Michael Callahan and Al Longyear, and
18 * subsequently hacked by Paul Mackerras.
20 * ==FILEVERSION 20020125==
23 #include <linux/module.h>
24 #include <linux/kernel.h>
25 #include <linux/skbuff.h>
26 #include <linux/tty.h>
27 #include <linux/netdevice.h>
28 #include <linux/poll.h>
29 #include <linux/ppp_defs.h>
30 #include <linux/if_ppp.h>
31 #include <linux/ppp_channel.h>
32 #include <linux/spinlock.h>
33 #include <linux/init.h>
34 #include <asm/uaccess.h>
36 #define PPP_VERSION "2.4.2"
38 #define OBUFSIZE 256
40 /* Structure for storing local state. */
41 struct asyncppp {
42 struct tty_struct *tty;
43 unsigned int flags;
44 unsigned int state;
45 unsigned int rbits;
46 int mru;
47 spinlock_t xmit_lock;
48 spinlock_t recv_lock;
49 unsigned long xmit_flags;
50 u32 xaccm[8];
51 u32 raccm;
52 unsigned int bytes_sent;
53 unsigned int bytes_rcvd;
55 struct sk_buff *tpkt;
56 int tpkt_pos;
57 u16 tfcs;
58 unsigned char *optr;
59 unsigned char *olim;
60 unsigned long last_xmit;
62 struct sk_buff *rpkt;
63 int lcp_fcs;
65 atomic_t refcnt;
66 struct semaphore dead_sem;
67 struct ppp_channel chan; /* interface to generic ppp layer */
68 unsigned char obuf[OBUFSIZE];
71 /* Bit numbers in xmit_flags */
72 #define XMIT_WAKEUP 0
73 #define XMIT_FULL 1
74 #define XMIT_BUSY 2
76 /* State bits */
77 #define SC_TOSS 0x20000000
78 #define SC_ESCAPE 0x40000000
80 /* Bits in rbits */
81 #define SC_RCV_BITS (SC_RCV_B7_1|SC_RCV_B7_0|SC_RCV_ODDP|SC_RCV_EVNP)
83 static int flag_time = HZ;
84 MODULE_PARM(flag_time, "i");
85 MODULE_PARM_DESC(flag_time, "ppp_async: interval between flagged packets (in clock ticks)");
86 MODULE_LICENSE("GPL");
90 * Prototypes.
92 static int ppp_async_encode(struct asyncppp *ap);
93 static int ppp_async_send(struct ppp_channel *chan, struct sk_buff *skb);
94 static int ppp_async_push(struct asyncppp *ap);
95 static void ppp_async_flush_output(struct asyncppp *ap);
96 static void ppp_async_input(struct asyncppp *ap, const unsigned char *buf,
97 char *flags, int count);
98 static int ppp_async_ioctl(struct ppp_channel *chan, unsigned int cmd,
99 unsigned long arg);
100 static void async_lcp_peek(struct asyncppp *ap, unsigned char *data,
101 int len, int inbound);
103 static struct ppp_channel_ops async_ops = {
104 ppp_async_send,
105 ppp_async_ioctl
109 * Routines implementing the PPP line discipline.
113 * We have a potential race on dereferencing tty->disc_data,
114 * because the tty layer provides no locking at all - thus one
115 * cpu could be running ppp_asynctty_receive while another
116 * calls ppp_asynctty_close, which zeroes tty->disc_data and
117 * frees the memory that ppp_asynctty_receive is using. The best
118 * way to fix this is to use a rwlock in the tty struct, but for now
119 * we use a single global rwlock for all ttys in ppp line discipline.
121 static rwlock_t disc_data_lock = RW_LOCK_UNLOCKED;
123 static struct asyncppp *ap_get(struct tty_struct *tty)
125 struct asyncppp *ap;
127 read_lock(&disc_data_lock);
128 ap = tty->disc_data;
129 if (ap != NULL)
130 atomic_inc(&ap->refcnt);
131 read_unlock(&disc_data_lock);
132 return ap;
135 static void ap_put(struct asyncppp *ap)
137 if (atomic_dec_and_test(&ap->refcnt))
138 up(&ap->dead_sem);
142 * Called when a tty is put into PPP line discipline.
144 static int
145 ppp_asynctty_open(struct tty_struct *tty)
147 struct asyncppp *ap;
148 int err;
150 err = -ENOMEM;
151 ap = kmalloc(sizeof(*ap), GFP_KERNEL);
152 if (ap == 0)
153 goto out;
155 /* initialize the asyncppp structure */
156 memset(ap, 0, sizeof(*ap));
157 ap->tty = tty;
158 ap->mru = PPP_MRU;
159 spin_lock_init(&ap->xmit_lock);
160 spin_lock_init(&ap->recv_lock);
161 ap->xaccm[0] = ~0U;
162 ap->xaccm[3] = 0x60000000U;
163 ap->raccm = ~0U;
164 ap->optr = ap->obuf;
165 ap->olim = ap->obuf;
166 ap->lcp_fcs = -1;
168 atomic_set(&ap->refcnt, 1);
169 init_MUTEX_LOCKED(&ap->dead_sem);
171 ap->chan.private = ap;
172 ap->chan.ops = &async_ops;
173 ap->chan.mtu = PPP_MRU;
174 err = ppp_register_channel(&ap->chan);
175 if (err)
176 goto out_free;
178 tty->disc_data = ap;
180 return 0;
182 out_free:
183 kfree(ap);
184 out:
185 return err;
189 * Called when the tty is put into another line discipline
190 * or it hangs up. We have to wait for any cpu currently
191 * executing in any of the other ppp_asynctty_* routines to
192 * finish before we can call ppp_unregister_channel and free
193 * the asyncppp struct. This routine must be called from
194 * process context, not interrupt or softirq context.
196 static void
197 ppp_asynctty_close(struct tty_struct *tty)
199 struct asyncppp *ap;
201 write_lock(&disc_data_lock);
202 ap = tty->disc_data;
203 tty->disc_data = 0;
204 write_unlock(&disc_data_lock);
205 if (ap == 0)
206 return;
209 * We have now ensured that nobody can start using ap from now
210 * on, but we have to wait for all existing users to finish.
211 * Note that ppp_unregister_channel ensures that no calls to
212 * our channel ops (i.e. ppp_async_send/ioctl) are in progress
213 * by the time it returns.
215 if (!atomic_dec_and_test(&ap->refcnt))
216 down(&ap->dead_sem);
218 ppp_unregister_channel(&ap->chan);
219 if (ap->rpkt != 0)
220 kfree_skb(ap->rpkt);
221 if (ap->tpkt != 0)
222 kfree_skb(ap->tpkt);
223 kfree(ap);
227 * Read does nothing - no data is ever available this way.
228 * Pppd reads and writes packets via /dev/ppp instead.
230 static ssize_t
231 ppp_asynctty_read(struct tty_struct *tty, struct file *file,
232 unsigned char *buf, size_t count)
234 return -EAGAIN;
238 * Write on the tty does nothing, the packets all come in
239 * from the ppp generic stuff.
241 static ssize_t
242 ppp_asynctty_write(struct tty_struct *tty, struct file *file,
243 const unsigned char *buf, size_t count)
245 return -EAGAIN;
248 static int
249 ppp_asynctty_ioctl(struct tty_struct *tty, struct file *file,
250 unsigned int cmd, unsigned long arg)
252 struct asyncppp *ap = ap_get(tty);
253 int err, val;
255 if (ap == 0)
256 return -ENXIO;
257 err = -EFAULT;
258 switch (cmd) {
259 case PPPIOCGCHAN:
260 err = -ENXIO;
261 if (ap == 0)
262 break;
263 err = -EFAULT;
264 if (put_user(ppp_channel_index(&ap->chan), (int *) arg))
265 break;
266 err = 0;
267 break;
269 case PPPIOCGUNIT:
270 err = -ENXIO;
271 if (ap == 0)
272 break;
273 err = -EFAULT;
274 if (put_user(ppp_unit_number(&ap->chan), (int *) arg))
275 break;
276 err = 0;
277 break;
279 case TCGETS:
280 case TCGETA:
281 err = n_tty_ioctl(tty, file, cmd, arg);
282 break;
284 case TCFLSH:
285 /* flush our buffers and the serial port's buffer */
286 if (arg == TCIOFLUSH || arg == TCOFLUSH)
287 ppp_async_flush_output(ap);
288 err = n_tty_ioctl(tty, file, cmd, arg);
289 break;
291 case FIONREAD:
292 val = 0;
293 if (put_user(val, (int *) arg))
294 break;
295 err = 0;
296 break;
298 default:
299 err = -ENOIOCTLCMD;
302 ap_put(ap);
303 return err;
306 /* No kernel lock - fine */
307 static unsigned int
308 ppp_asynctty_poll(struct tty_struct *tty, struct file *file, poll_table *wait)
310 return 0;
313 static int
314 ppp_asynctty_room(struct tty_struct *tty)
316 return 65535;
319 static void
320 ppp_asynctty_receive(struct tty_struct *tty, const unsigned char *buf,
321 char *flags, int count)
323 struct asyncppp *ap = ap_get(tty);
325 if (ap == 0)
326 return;
327 spin_lock_bh(&ap->recv_lock);
328 ppp_async_input(ap, buf, flags, count);
329 spin_unlock_bh(&ap->recv_lock);
330 ap_put(ap);
331 if (test_and_clear_bit(TTY_THROTTLED, &tty->flags)
332 && tty->driver->unthrottle)
333 tty->driver->unthrottle(tty);
336 static void
337 ppp_asynctty_wakeup(struct tty_struct *tty)
339 struct asyncppp *ap = ap_get(tty);
341 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
342 if (ap == 0)
343 return;
344 if (ppp_async_push(ap))
345 ppp_output_wakeup(&ap->chan);
346 ap_put(ap);
350 static struct tty_ldisc ppp_ldisc = {
351 .owner = THIS_MODULE,
352 .magic = TTY_LDISC_MAGIC,
353 .name = "ppp",
354 .open = ppp_asynctty_open,
355 .close = ppp_asynctty_close,
356 .read = ppp_asynctty_read,
357 .write = ppp_asynctty_write,
358 .ioctl = ppp_asynctty_ioctl,
359 .poll = ppp_asynctty_poll,
360 .receive_room = ppp_asynctty_room,
361 .receive_buf = ppp_asynctty_receive,
362 .write_wakeup = ppp_asynctty_wakeup,
365 static int __init
366 ppp_async_init(void)
368 int err;
370 err = tty_register_ldisc(N_PPP, &ppp_ldisc);
371 if (err != 0)
372 printk(KERN_ERR "PPP_async: error %d registering line disc.\n",
373 err);
374 return err;
378 * The following routines provide the PPP channel interface.
380 static int
381 ppp_async_ioctl(struct ppp_channel *chan, unsigned int cmd, unsigned long arg)
383 struct asyncppp *ap = chan->private;
384 int err, val;
385 u32 accm[8];
387 err = -EFAULT;
388 switch (cmd) {
389 case PPPIOCGFLAGS:
390 val = ap->flags | ap->rbits;
391 if (put_user(val, (int *) arg))
392 break;
393 err = 0;
394 break;
395 case PPPIOCSFLAGS:
396 if (get_user(val, (int *) arg))
397 break;
398 ap->flags = val & ~SC_RCV_BITS;
399 spin_lock_bh(&ap->recv_lock);
400 ap->rbits = val & SC_RCV_BITS;
401 spin_unlock_bh(&ap->recv_lock);
402 err = 0;
403 break;
405 case PPPIOCGASYNCMAP:
406 if (put_user(ap->xaccm[0], (u32 *) arg))
407 break;
408 err = 0;
409 break;
410 case PPPIOCSASYNCMAP:
411 if (get_user(ap->xaccm[0], (u32 *) arg))
412 break;
413 err = 0;
414 break;
416 case PPPIOCGRASYNCMAP:
417 if (put_user(ap->raccm, (u32 *) arg))
418 break;
419 err = 0;
420 break;
421 case PPPIOCSRASYNCMAP:
422 if (get_user(ap->raccm, (u32 *) arg))
423 break;
424 err = 0;
425 break;
427 case PPPIOCGXASYNCMAP:
428 if (copy_to_user((void __user *) arg, ap->xaccm, sizeof(ap->xaccm)))
429 break;
430 err = 0;
431 break;
432 case PPPIOCSXASYNCMAP:
433 if (copy_from_user(accm, (void __user *) arg, sizeof(accm)))
434 break;
435 accm[2] &= ~0x40000000U; /* can't escape 0x5e */
436 accm[3] |= 0x60000000U; /* must escape 0x7d, 0x7e */
437 memcpy(ap->xaccm, accm, sizeof(ap->xaccm));
438 err = 0;
439 break;
441 case PPPIOCGMRU:
442 if (put_user(ap->mru, (int *) arg))
443 break;
444 err = 0;
445 break;
446 case PPPIOCSMRU:
447 if (get_user(val, (int *) arg))
448 break;
449 if (val < PPP_MRU)
450 val = PPP_MRU;
451 ap->mru = val;
452 err = 0;
453 break;
455 default:
456 err = -ENOTTY;
459 return err;
463 * Procedures for encapsulation and framing.
466 u16 ppp_crc16_table[256] = {
467 0x0000, 0x1189, 0x2312, 0x329b, 0x4624, 0x57ad, 0x6536, 0x74bf,
468 0x8c48, 0x9dc1, 0xaf5a, 0xbed3, 0xca6c, 0xdbe5, 0xe97e, 0xf8f7,
469 0x1081, 0x0108, 0x3393, 0x221a, 0x56a5, 0x472c, 0x75b7, 0x643e,
470 0x9cc9, 0x8d40, 0xbfdb, 0xae52, 0xdaed, 0xcb64, 0xf9ff, 0xe876,
471 0x2102, 0x308b, 0x0210, 0x1399, 0x6726, 0x76af, 0x4434, 0x55bd,
472 0xad4a, 0xbcc3, 0x8e58, 0x9fd1, 0xeb6e, 0xfae7, 0xc87c, 0xd9f5,
473 0x3183, 0x200a, 0x1291, 0x0318, 0x77a7, 0x662e, 0x54b5, 0x453c,
474 0xbdcb, 0xac42, 0x9ed9, 0x8f50, 0xfbef, 0xea66, 0xd8fd, 0xc974,
475 0x4204, 0x538d, 0x6116, 0x709f, 0x0420, 0x15a9, 0x2732, 0x36bb,
476 0xce4c, 0xdfc5, 0xed5e, 0xfcd7, 0x8868, 0x99e1, 0xab7a, 0xbaf3,
477 0x5285, 0x430c, 0x7197, 0x601e, 0x14a1, 0x0528, 0x37b3, 0x263a,
478 0xdecd, 0xcf44, 0xfddf, 0xec56, 0x98e9, 0x8960, 0xbbfb, 0xaa72,
479 0x6306, 0x728f, 0x4014, 0x519d, 0x2522, 0x34ab, 0x0630, 0x17b9,
480 0xef4e, 0xfec7, 0xcc5c, 0xddd5, 0xa96a, 0xb8e3, 0x8a78, 0x9bf1,
481 0x7387, 0x620e, 0x5095, 0x411c, 0x35a3, 0x242a, 0x16b1, 0x0738,
482 0xffcf, 0xee46, 0xdcdd, 0xcd54, 0xb9eb, 0xa862, 0x9af9, 0x8b70,
483 0x8408, 0x9581, 0xa71a, 0xb693, 0xc22c, 0xd3a5, 0xe13e, 0xf0b7,
484 0x0840, 0x19c9, 0x2b52, 0x3adb, 0x4e64, 0x5fed, 0x6d76, 0x7cff,
485 0x9489, 0x8500, 0xb79b, 0xa612, 0xd2ad, 0xc324, 0xf1bf, 0xe036,
486 0x18c1, 0x0948, 0x3bd3, 0x2a5a, 0x5ee5, 0x4f6c, 0x7df7, 0x6c7e,
487 0xa50a, 0xb483, 0x8618, 0x9791, 0xe32e, 0xf2a7, 0xc03c, 0xd1b5,
488 0x2942, 0x38cb, 0x0a50, 0x1bd9, 0x6f66, 0x7eef, 0x4c74, 0x5dfd,
489 0xb58b, 0xa402, 0x9699, 0x8710, 0xf3af, 0xe226, 0xd0bd, 0xc134,
490 0x39c3, 0x284a, 0x1ad1, 0x0b58, 0x7fe7, 0x6e6e, 0x5cf5, 0x4d7c,
491 0xc60c, 0xd785, 0xe51e, 0xf497, 0x8028, 0x91a1, 0xa33a, 0xb2b3,
492 0x4a44, 0x5bcd, 0x6956, 0x78df, 0x0c60, 0x1de9, 0x2f72, 0x3efb,
493 0xd68d, 0xc704, 0xf59f, 0xe416, 0x90a9, 0x8120, 0xb3bb, 0xa232,
494 0x5ac5, 0x4b4c, 0x79d7, 0x685e, 0x1ce1, 0x0d68, 0x3ff3, 0x2e7a,
495 0xe70e, 0xf687, 0xc41c, 0xd595, 0xa12a, 0xb0a3, 0x8238, 0x93b1,
496 0x6b46, 0x7acf, 0x4854, 0x59dd, 0x2d62, 0x3ceb, 0x0e70, 0x1ff9,
497 0xf78f, 0xe606, 0xd49d, 0xc514, 0xb1ab, 0xa022, 0x92b9, 0x8330,
498 0x7bc7, 0x6a4e, 0x58d5, 0x495c, 0x3de3, 0x2c6a, 0x1ef1, 0x0f78
500 EXPORT_SYMBOL(ppp_crc16_table);
501 #define fcstab ppp_crc16_table /* for PPP_FCS macro */
504 * Procedure to encode the data for async serial transmission.
505 * Does octet stuffing (escaping), puts the address/control bytes
506 * on if A/C compression is disabled, and does protocol compression.
507 * Assumes ap->tpkt != 0 on entry.
508 * Returns 1 if we finished the current frame, 0 otherwise.
511 #define PUT_BYTE(ap, buf, c, islcp) do { \
512 if ((islcp && c < 0x20) || (ap->xaccm[c >> 5] & (1 << (c & 0x1f)))) {\
513 *buf++ = PPP_ESCAPE; \
514 *buf++ = c ^ 0x20; \
515 } else \
516 *buf++ = c; \
517 } while (0)
519 static int
520 ppp_async_encode(struct asyncppp *ap)
522 int fcs, i, count, c, proto;
523 unsigned char *buf, *buflim;
524 unsigned char *data;
525 int islcp;
527 buf = ap->obuf;
528 ap->olim = buf;
529 ap->optr = buf;
530 i = ap->tpkt_pos;
531 data = ap->tpkt->data;
532 count = ap->tpkt->len;
533 fcs = ap->tfcs;
534 proto = (data[0] << 8) + data[1];
537 * LCP packets with code values between 1 (configure-reqest)
538 * and 7 (code-reject) must be sent as though no options
539 * had been negotiated.
541 islcp = proto == PPP_LCP && 1 <= data[2] && data[2] <= 7;
543 if (i == 0) {
544 if (islcp)
545 async_lcp_peek(ap, data, count, 0);
548 * Start of a new packet - insert the leading FLAG
549 * character if necessary.
551 if (islcp || flag_time == 0
552 || jiffies - ap->last_xmit >= flag_time)
553 *buf++ = PPP_FLAG;
554 ap->last_xmit = jiffies;
555 fcs = PPP_INITFCS;
558 * Put in the address/control bytes if necessary
560 if ((ap->flags & SC_COMP_AC) == 0 || islcp) {
561 PUT_BYTE(ap, buf, 0xff, islcp);
562 fcs = PPP_FCS(fcs, 0xff);
563 PUT_BYTE(ap, buf, 0x03, islcp);
564 fcs = PPP_FCS(fcs, 0x03);
569 * Once we put in the last byte, we need to put in the FCS
570 * and closing flag, so make sure there is at least 7 bytes
571 * of free space in the output buffer.
573 buflim = ap->obuf + OBUFSIZE - 6;
574 while (i < count && buf < buflim) {
575 c = data[i++];
576 if (i == 1 && c == 0 && (ap->flags & SC_COMP_PROT))
577 continue; /* compress protocol field */
578 fcs = PPP_FCS(fcs, c);
579 PUT_BYTE(ap, buf, c, islcp);
582 if (i < count) {
584 * Remember where we are up to in this packet.
586 ap->olim = buf;
587 ap->tpkt_pos = i;
588 ap->tfcs = fcs;
589 return 0;
593 * We have finished the packet. Add the FCS and flag.
595 fcs = ~fcs;
596 c = fcs & 0xff;
597 PUT_BYTE(ap, buf, c, islcp);
598 c = (fcs >> 8) & 0xff;
599 PUT_BYTE(ap, buf, c, islcp);
600 *buf++ = PPP_FLAG;
601 ap->olim = buf;
603 kfree_skb(ap->tpkt);
604 ap->tpkt = 0;
605 return 1;
609 * Transmit-side routines.
613 * Send a packet to the peer over an async tty line.
614 * Returns 1 iff the packet was accepted.
615 * If the packet was not accepted, we will call ppp_output_wakeup
616 * at some later time.
618 static int
619 ppp_async_send(struct ppp_channel *chan, struct sk_buff *skb)
621 struct asyncppp *ap = chan->private;
623 ppp_async_push(ap);
625 if (test_and_set_bit(XMIT_FULL, &ap->xmit_flags))
626 return 0; /* already full */
627 ap->tpkt = skb;
628 ap->tpkt_pos = 0;
630 ppp_async_push(ap);
631 return 1;
635 * Push as much data as possible out to the tty.
637 static int
638 ppp_async_push(struct asyncppp *ap)
640 int avail, sent, done = 0;
641 struct tty_struct *tty = ap->tty;
642 int tty_stuffed = 0;
644 set_bit(XMIT_WAKEUP, &ap->xmit_flags);
646 * We can get called recursively here if the tty write
647 * function calls our wakeup function. This can happen
648 * for example on a pty with both the master and slave
649 * set to PPP line discipline.
650 * We use the XMIT_BUSY bit to detect this and get out,
651 * leaving the XMIT_WAKEUP bit set to tell the other
652 * instance that it may now be able to write more now.
654 if (test_and_set_bit(XMIT_BUSY, &ap->xmit_flags))
655 return 0;
656 spin_lock_bh(&ap->xmit_lock);
657 for (;;) {
658 if (test_and_clear_bit(XMIT_WAKEUP, &ap->xmit_flags))
659 tty_stuffed = 0;
660 if (!tty_stuffed && ap->optr < ap->olim) {
661 avail = ap->olim - ap->optr;
662 set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
663 sent = tty->driver->write(tty, 0, ap->optr, avail);
664 if (sent < 0)
665 goto flush; /* error, e.g. loss of CD */
666 ap->optr += sent;
667 if (sent < avail)
668 tty_stuffed = 1;
669 continue;
671 if (ap->optr >= ap->olim && ap->tpkt != 0) {
672 if (ppp_async_encode(ap)) {
673 /* finished processing ap->tpkt */
674 clear_bit(XMIT_FULL, &ap->xmit_flags);
675 done = 1;
677 continue;
680 * We haven't made any progress this time around.
681 * Clear XMIT_BUSY to let other callers in, but
682 * after doing so we have to check if anyone set
683 * XMIT_WAKEUP since we last checked it. If they
684 * did, we should try again to set XMIT_BUSY and go
685 * around again in case XMIT_BUSY was still set when
686 * the other caller tried.
688 clear_bit(XMIT_BUSY, &ap->xmit_flags);
689 /* any more work to do? if not, exit the loop */
690 if (!(test_bit(XMIT_WAKEUP, &ap->xmit_flags)
691 || (!tty_stuffed && ap->tpkt != 0)))
692 break;
693 /* more work to do, see if we can do it now */
694 if (test_and_set_bit(XMIT_BUSY, &ap->xmit_flags))
695 break;
697 spin_unlock_bh(&ap->xmit_lock);
698 return done;
700 flush:
701 clear_bit(XMIT_BUSY, &ap->xmit_flags);
702 if (ap->tpkt != 0) {
703 kfree_skb(ap->tpkt);
704 ap->tpkt = 0;
705 clear_bit(XMIT_FULL, &ap->xmit_flags);
706 done = 1;
708 ap->optr = ap->olim;
709 spin_unlock_bh(&ap->xmit_lock);
710 return done;
714 * Flush output from our internal buffers.
715 * Called for the TCFLSH ioctl.
717 static void
718 ppp_async_flush_output(struct asyncppp *ap)
720 int done = 0;
722 spin_lock_bh(&ap->xmit_lock);
723 ap->optr = ap->olim;
724 if (ap->tpkt != NULL) {
725 kfree_skb(ap->tpkt);
726 ap->tpkt = 0;
727 clear_bit(XMIT_FULL, &ap->xmit_flags);
728 done = 1;
730 spin_unlock_bh(&ap->xmit_lock);
731 if (done)
732 ppp_output_wakeup(&ap->chan);
736 * Receive-side routines.
739 /* see how many ordinary chars there are at the start of buf */
740 static inline int
741 scan_ordinary(struct asyncppp *ap, const unsigned char *buf, int count)
743 int i, c;
745 for (i = 0; i < count; ++i) {
746 c = buf[i];
747 if (c == PPP_ESCAPE || c == PPP_FLAG
748 || (c < 0x20 && (ap->raccm & (1 << c)) != 0))
749 break;
751 return i;
754 /* called when a flag is seen - do end-of-packet processing */
755 static inline void
756 process_input_packet(struct asyncppp *ap)
758 struct sk_buff *skb;
759 unsigned char *p;
760 unsigned int len, fcs, proto;
761 int code = 0;
763 skb = ap->rpkt;
764 ap->rpkt = 0;
765 if ((ap->state & (SC_TOSS | SC_ESCAPE)) || skb == 0) {
766 ap->state &= ~(SC_TOSS | SC_ESCAPE);
767 if (skb != 0)
768 kfree_skb(skb);
769 return;
772 /* check the FCS */
773 p = skb->data;
774 len = skb->len;
775 if (len < 3)
776 goto err; /* too short */
777 fcs = PPP_INITFCS;
778 for (; len > 0; --len)
779 fcs = PPP_FCS(fcs, *p++);
780 if (fcs != PPP_GOODFCS)
781 goto err; /* bad FCS */
782 skb_trim(skb, skb->len - 2);
784 /* check for address/control and protocol compression */
785 p = skb->data;
786 if (p[0] == PPP_ALLSTATIONS && p[1] == PPP_UI) {
787 /* chop off address/control */
788 if (skb->len < 3)
789 goto err;
790 p = skb_pull(skb, 2);
792 proto = p[0];
793 if (proto & 1) {
794 /* protocol is compressed */
795 skb_push(skb, 1)[0] = 0;
796 } else {
797 if (skb->len < 2)
798 goto err;
799 proto = (proto << 8) + p[1];
800 if (proto == PPP_LCP)
801 async_lcp_peek(ap, p, skb->len, 1);
804 /* all OK, give it to the generic layer */
805 ppp_input(&ap->chan, skb);
806 return;
808 err:
809 kfree_skb(skb);
810 ppp_input_error(&ap->chan, code);
813 static inline void
814 input_error(struct asyncppp *ap, int code)
816 ap->state |= SC_TOSS;
817 ppp_input_error(&ap->chan, code);
820 /* called when the tty driver has data for us. */
821 static void
822 ppp_async_input(struct asyncppp *ap, const unsigned char *buf,
823 char *flags, int count)
825 struct sk_buff *skb;
826 int c, i, j, n, s, f;
827 unsigned char *sp;
829 /* update bits used for 8-bit cleanness detection */
830 if (~ap->rbits & SC_RCV_BITS) {
831 s = 0;
832 for (i = 0; i < count; ++i) {
833 c = buf[i];
834 if (flags != 0 && flags[i] != 0)
835 continue;
836 s |= (c & 0x80)? SC_RCV_B7_1: SC_RCV_B7_0;
837 c = ((c >> 4) ^ c) & 0xf;
838 s |= (0x6996 & (1 << c))? SC_RCV_ODDP: SC_RCV_EVNP;
840 ap->rbits |= s;
843 while (count > 0) {
844 /* scan through and see how many chars we can do in bulk */
845 if ((ap->state & SC_ESCAPE) && buf[0] == PPP_ESCAPE)
846 n = 1;
847 else
848 n = scan_ordinary(ap, buf, count);
850 f = 0;
851 if (flags != 0 && (ap->state & SC_TOSS) == 0) {
852 /* check the flags to see if any char had an error */
853 for (j = 0; j < n; ++j)
854 if ((f = flags[j]) != 0)
855 break;
857 if (f != 0) {
858 /* start tossing */
859 input_error(ap, f);
861 } else if (n > 0 && (ap->state & SC_TOSS) == 0) {
862 /* stuff the chars in the skb */
863 skb = ap->rpkt;
864 if (skb == 0) {
865 skb = dev_alloc_skb(ap->mru + PPP_HDRLEN + 2);
866 if (skb == 0)
867 goto nomem;
868 /* Try to get the payload 4-byte aligned */
869 if (buf[0] != PPP_ALLSTATIONS)
870 skb_reserve(skb, 2 + (buf[0] & 1));
871 ap->rpkt = skb;
873 if (n > skb_tailroom(skb)) {
874 /* packet overflowed MRU */
875 input_error(ap, 1);
876 } else {
877 sp = skb_put(skb, n);
878 memcpy(sp, buf, n);
879 if (ap->state & SC_ESCAPE) {
880 sp[0] ^= 0x20;
881 ap->state &= ~SC_ESCAPE;
886 if (n >= count)
887 break;
889 c = buf[n];
890 if (c == PPP_FLAG) {
891 process_input_packet(ap);
892 } else if (c == PPP_ESCAPE) {
893 ap->state |= SC_ESCAPE;
895 /* otherwise it's a char in the recv ACCM */
896 ++n;
898 buf += n;
899 if (flags != 0)
900 flags += n;
901 count -= n;
903 return;
905 nomem:
906 printk(KERN_ERR "PPPasync: no memory (input pkt)\n");
907 input_error(ap, 0);
911 * We look at LCP frames going past so that we can notice
912 * and react to the LCP configure-ack from the peer.
913 * In the situation where the peer has been sent a configure-ack
914 * already, LCP is up once it has sent its configure-ack
915 * so the immediately following packet can be sent with the
916 * configured LCP options. This allows us to process the following
917 * packet correctly without pppd needing to respond quickly.
919 * We only respond to the received configure-ack if we have just
920 * sent a configure-request, and the configure-ack contains the
921 * same data (this is checked using a 16-bit crc of the data).
923 #define CONFREQ 1 /* LCP code field values */
924 #define CONFACK 2
925 #define LCP_MRU 1 /* LCP option numbers */
926 #define LCP_ASYNCMAP 2
928 static void async_lcp_peek(struct asyncppp *ap, unsigned char *data,
929 int len, int inbound)
931 int dlen, fcs, i, code;
932 u32 val;
934 data += 2; /* skip protocol bytes */
935 len -= 2;
936 if (len < 4) /* 4 = code, ID, length */
937 return;
938 code = data[0];
939 if (code != CONFACK && code != CONFREQ)
940 return;
941 dlen = (data[2] << 8) + data[3];
942 if (len < dlen)
943 return; /* packet got truncated or length is bogus */
945 if (code == (inbound? CONFACK: CONFREQ)) {
947 * sent confreq or received confack:
948 * calculate the crc of the data from the ID field on.
950 fcs = PPP_INITFCS;
951 for (i = 1; i < dlen; ++i)
952 fcs = PPP_FCS(fcs, data[i]);
954 if (!inbound) {
955 /* outbound confreq - remember the crc for later */
956 ap->lcp_fcs = fcs;
957 return;
960 /* received confack, check the crc */
961 fcs ^= ap->lcp_fcs;
962 ap->lcp_fcs = -1;
963 if (fcs != 0)
964 return;
965 } else if (inbound)
966 return; /* not interested in received confreq */
968 /* process the options in the confack */
969 data += 4;
970 dlen -= 4;
971 /* data[0] is code, data[1] is length */
972 while (dlen >= 2 && dlen >= data[1]) {
973 switch (data[0]) {
974 case LCP_MRU:
975 val = (data[2] << 8) + data[3];
976 if (inbound)
977 ap->mru = val;
978 else
979 ap->chan.mtu = val;
980 break;
981 case LCP_ASYNCMAP:
982 val = (data[2] << 24) + (data[3] << 16)
983 + (data[4] << 8) + data[5];
984 if (inbound)
985 ap->raccm = val;
986 else
987 ap->xaccm[0] = val;
988 break;
990 dlen -= data[1];
991 data += data[1];
995 static void __exit ppp_async_cleanup(void)
997 if (tty_register_ldisc(N_PPP, NULL) != 0)
998 printk(KERN_ERR "failed to unregister PPP line discipline\n");
1001 module_init(ppp_async_init);
1002 module_exit(ppp_async_cleanup);