Import 2.3.18pre1
[davej-history.git] / drivers / net / ppp_generic.c
blob32851a5d8d4e4025431dc521a82433de3e526294
1 /*
2 * Generic PPP layer for Linux.
4 * Copyright 1999 Paul Mackerras.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 * The generic PPP layer handles the PPP network interfaces, the
12 * /dev/ppp device, packet and VJ compression, and multilink.
13 * It talks to PPP `channels' via the interface defined in
14 * include/linux/ppp_channel.h. Channels provide the basic means for
15 * sending and receiving PPP frames on some kind of communications
16 * channel.
18 * Part of the code in this driver was inspired by the old async-only
19 * PPP driver, written by Michael Callahan and Al Longyear, and
20 * subsequently hacked by Paul Mackerras.
22 * ==FILEVERSION 990806==
25 /* $Id: ppp_generic.c,v 1.3 1999/09/02 05:30:12 paulus Exp $ */
27 #include <linux/config.h>
28 #include <linux/module.h>
29 #include <linux/kernel.h>
30 #include <linux/kmod.h>
31 #include <linux/list.h>
32 #include <linux/netdevice.h>
33 #include <linux/poll.h>
34 #include <linux/ppp_defs.h>
35 #include <linux/if_ppp.h>
36 #include <linux/ppp_channel.h>
37 #include <linux/ppp-comp.h>
38 #include <linux/skbuff.h>
39 #include <linux/rtnetlink.h>
40 #include <linux/if_arp.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/spinlock.h>
44 #include <net/slhc_vj.h>
46 #define PPP_VERSION "2.4.0"
48 EXPORT_SYMBOL(ppp_register_channel);
49 EXPORT_SYMBOL(ppp_unregister_channel);
50 EXPORT_SYMBOL(ppp_input);
51 EXPORT_SYMBOL(ppp_input_error);
52 EXPORT_SYMBOL(ppp_output_wakeup);
53 EXPORT_SYMBOL(ppp_register_compressor);
54 EXPORT_SYMBOL(ppp_unregister_compressor);
57 * Network protocols we support.
59 #define NP_IP 0 /* Internet Protocol V4 */
60 #define NP_IPV6 1 /* Internet Protocol V6 */
61 #define NP_IPX 2 /* IPX protocol */
62 #define NP_AT 3 /* Appletalk protocol */
63 #define NUM_NP 4 /* Number of NPs. */
66 * Data structure describing one ppp unit.
67 * A ppp unit corresponds to a ppp network interface device
68 * and represents a multilink bundle.
69 * It may have 0 or more ppp channels connected to it.
71 struct ppp {
72 struct list_head list; /* link in list of ppp units */
73 int index; /* interface unit number */
74 char name[16]; /* unit name */
75 int refcnt; /* # open /dev/ppp attached */
76 unsigned long busy; /* lock and other bits */
77 struct list_head channels; /* list of attached channels */
78 int n_channels; /* how many channels are attached */
79 int mru; /* max receive unit */
80 unsigned int flags; /* control bits */
81 unsigned int xstate; /* transmit state bits */
82 unsigned int rstate; /* receive state bits */
83 int debug; /* debug flags */
84 struct slcompress *vj; /* state for VJ header compression */
85 struct sk_buff_head xq; /* pppd transmit queue */
86 struct sk_buff_head rq; /* receive queue for pppd */
87 wait_queue_head_t rwait; /* for poll on reading /dev/ppp */
88 enum NPmode npmode[NUM_NP]; /* what to do with each net proto */
89 struct sk_buff *xmit_pending; /* a packet ready to go out */
90 struct sk_buff_head recv_pending;/* pending input packets */
91 struct compressor *xcomp; /* transmit packet compressor */
92 void *xc_state; /* its internal state */
93 struct compressor *rcomp; /* receive decompressor */
94 void *rc_state; /* its internal state */
95 unsigned long last_xmit; /* jiffies when last pkt sent */
96 unsigned long last_recv; /* jiffies when last pkt rcvd */
97 struct net_device *dev; /* network interface device */
98 struct net_device_stats stats; /* statistics */
101 static LIST_HEAD(all_ppp_units);
102 static spinlock_t all_ppp_lock = SPIN_LOCK_UNLOCKED;
105 * Private data structure for each channel.
106 * Ultimately this will have multilink stuff etc. in it.
108 struct channel {
109 struct list_head list; /* link in list of channels per unit */
110 struct ppp_channel *chan; /* public channel data structure */
111 int blocked; /* if channel refused last packet */
112 struct ppp *ppp; /* ppp unit we're connected to */
115 /* Bit numbers in busy */
116 #define XMIT_BUSY 0
117 #define RECV_BUSY 1
118 #define XMIT_WAKEUP 2
121 * Bits in flags: SC_NO_TCP_CCID, SC_CCP_OPEN, SC_CCP_UP, SC_LOOP_TRAFFIC.
122 * Bits in rstate: SC_DECOMP_RUN, SC_DC_ERROR, SC_DC_FERROR.
123 * Bits in xstate: SC_COMP_RUN
125 #define SC_FLAG_BITS (SC_NO_TCP_CCID|SC_CCP_OPEN|SC_CCP_UP|SC_LOOP_TRAFFIC)
127 /* Get the PPP protocol number from a skb */
128 #define PPP_PROTO(skb) (((skb)->data[0] << 8) + (skb)->data[1])
130 /* We limit the length of ppp->rq to this (arbitrary) value */
131 #define PPP_MAX_RQLEN 32
133 /* Prototypes. */
134 static void ppp_xmit_unlock(struct ppp *ppp);
135 static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb);
136 static void ppp_push(struct ppp *ppp);
137 static void ppp_recv_unlock(struct ppp *ppp);
138 static void ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb);
139 static struct sk_buff *ppp_decompress_frame(struct ppp *ppp,
140 struct sk_buff *skb);
141 static int ppp_set_compress(struct ppp *ppp, unsigned long arg);
142 static void ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound);
143 static void ppp_ccp_closed(struct ppp *ppp);
144 static struct compressor *find_compressor(int type);
145 static void ppp_get_stats(struct ppp *ppp, struct ppp_stats *st);
146 static struct ppp *ppp_create_unit(int unit, int *retp);
147 static void ppp_release_unit(struct ppp *ppp);
148 static struct ppp *ppp_find_unit(int unit);
150 /* Translates a PPP protocol number to a NP index (NP == network protocol) */
151 static inline int proto_to_npindex(int proto)
153 switch (proto) {
154 case PPP_IP:
155 return NP_IP;
156 case PPP_IPV6:
157 return NP_IPV6;
158 case PPP_IPX:
159 return NP_IPX;
160 case PPP_AT:
161 return NP_AT;
163 return -EINVAL;
166 /* Translates an NP index into a PPP protocol number */
167 static const int npindex_to_proto[NUM_NP] = {
168 PPP_IP,
169 PPP_IPV6,
170 PPP_IPX,
171 PPP_AT,
174 /* Translates an ethertype into an NP index */
175 static inline int ethertype_to_npindex(int ethertype)
177 switch (ethertype) {
178 case ETH_P_IP:
179 return NP_IP;
180 case ETH_P_IPV6:
181 return NP_IPV6;
182 case ETH_P_IPX:
183 return NP_IPX;
184 case ETH_P_PPPTALK:
185 case ETH_P_ATALK:
186 return NP_AT;
188 return -1;
191 /* Translates an NP index into an ethertype */
192 static const int npindex_to_ethertype[NUM_NP] = {
193 ETH_P_IP,
194 ETH_P_IPV6,
195 ETH_P_IPX,
196 ETH_P_PPPTALK,
200 * Routines for locking and unlocking the transmit and receive paths
201 * of each unit.
203 static inline void
204 lock_path(struct ppp *ppp, int bit)
206 int timeout = 1000000;
208 do {
209 while (test_bit(bit, &ppp->busy)) {
210 mb();
211 if (--timeout == 0) {
212 printk(KERN_ERR "lock_path timeout ppp=%p bit=%x\n", ppp, bit);
213 return;
216 } while (test_and_set_bit(bit, &ppp->busy));
217 mb();
220 static inline int
221 trylock_path(struct ppp *ppp, int bit)
223 if (test_and_set_bit(bit, &ppp->busy))
224 return 0;
225 mb();
226 return 1;
229 static inline void
230 unlock_path(struct ppp *ppp, int bit)
232 mb();
233 clear_bit(bit, &ppp->busy);
236 #define lock_xmit_path(ppp) lock_path(ppp, XMIT_BUSY)
237 #define trylock_xmit_path(ppp) trylock_path(ppp, XMIT_BUSY)
238 #define unlock_xmit_path(ppp) unlock_path(ppp, XMIT_BUSY)
239 #define lock_recv_path(ppp) lock_path(ppp, RECV_BUSY)
240 #define trylock_recv_path(ppp) trylock_path(ppp, RECV_BUSY)
241 #define unlock_recv_path(ppp) unlock_path(ppp, RECV_BUSY)
243 static inline void
244 free_skbs(struct sk_buff_head *head)
246 struct sk_buff *skb;
248 while ((skb = skb_dequeue(head)) != 0)
249 kfree_skb(skb);
253 * /dev/ppp device routines.
254 * The /dev/ppp device is used by pppd to control the ppp unit.
255 * It supports the read, write, ioctl and poll functions.
257 static int ppp_open(struct inode *inode, struct file *file)
260 * This could (should?) be enforced by the permissions on /dev/ppp.
262 if (!capable(CAP_NET_ADMIN))
263 return -EPERM;
264 MOD_INC_USE_COUNT;
265 return 0;
268 static int ppp_release(struct inode *inode, struct file *file)
270 struct ppp *ppp = (struct ppp *) file->private_data;
272 if (ppp != 0) {
273 file->private_data = 0;
274 ppp_release_unit(ppp);
276 MOD_DEC_USE_COUNT;
277 return 0;
280 static ssize_t ppp_read(struct file *file, char *buf,
281 size_t count, loff_t *ppos)
283 struct ppp *ppp = (struct ppp *) file->private_data;
284 DECLARE_WAITQUEUE(wait, current);
285 ssize_t ret;
286 struct sk_buff *skb = 0;
288 ret = -ENXIO;
289 if (ppp == 0)
290 goto out; /* not currently attached */
292 add_wait_queue(&ppp->rwait, &wait);
293 current->state = TASK_INTERRUPTIBLE;
294 for (;;) {
295 ret = -EAGAIN;
296 skb = skb_dequeue(&ppp->rq);
297 if (skb)
298 break;
299 if (file->f_flags & O_NONBLOCK)
300 break;
301 ret = -ERESTARTSYS;
302 if (signal_pending(current))
303 break;
304 schedule();
306 current->state = TASK_RUNNING;
307 remove_wait_queue(&ppp->rwait, &wait);
309 if (skb == 0)
310 goto out;
312 ret = -EOVERFLOW;
313 if (skb->len > count)
314 goto outf;
315 ret = -EFAULT;
316 if (copy_to_user(buf, skb->data, skb->len))
317 goto outf;
318 ret = skb->len;
320 outf:
321 kfree_skb(skb);
322 out:
323 return ret;
326 static ssize_t ppp_write(struct file *file, const char *buf,
327 size_t count, loff_t *ppos)
329 struct ppp *ppp = (struct ppp *) file->private_data;
330 struct sk_buff *skb;
331 ssize_t ret;
333 ret = -ENXIO;
334 if (ppp == 0)
335 goto out;
337 ret = -ENOMEM;
338 skb = alloc_skb(count + 2, GFP_KERNEL);
339 if (skb == 0)
340 goto out;
341 skb_reserve(skb, 2);
342 ret = -EFAULT;
343 if (copy_from_user(skb_put(skb, count), buf, count)) {
344 kfree_skb(skb);
345 goto out;
348 skb_queue_tail(&ppp->xq, skb);
349 if (trylock_xmit_path(ppp))
350 ppp_xmit_unlock(ppp);
352 ret = count;
354 out:
355 return ret;
358 static unsigned int ppp_poll(struct file *file, poll_table *wait)
360 struct ppp *ppp = (struct ppp *) file->private_data;
361 unsigned int mask;
363 if (ppp == 0)
364 return 0;
365 poll_wait(file, &ppp->rwait, wait);
366 mask = POLLOUT | POLLWRNORM;
367 if (skb_peek(&ppp->rq) != 0)
368 mask |= POLLIN | POLLRDNORM;
369 return mask;
372 static int ppp_ioctl(struct inode *inode, struct file *file,
373 unsigned int cmd, unsigned long arg)
375 struct ppp *ppp = (struct ppp *) file->private_data;
376 int err, val, val2, i;
377 struct ppp_idle idle;
378 struct npioctl npi;
380 if (cmd == PPPIOCNEWUNIT) {
381 /* Create a new ppp unit */
382 int unit, ret;
384 if (ppp != 0)
385 return -EINVAL;
386 if (get_user(unit, (int *) arg))
387 return -EFAULT;
388 ppp = ppp_create_unit(unit, &ret);
389 if (ppp == 0)
390 return ret;
391 file->private_data = ppp;
392 if (put_user(ppp->index, (int *) arg))
393 return -EFAULT;
394 return 0;
396 if (cmd == PPPIOCATTACH) {
397 /* Attach to an existing ppp unit */
398 int unit;
400 if (ppp != 0)
401 return -EINVAL;
402 if (get_user(unit, (int *) arg))
403 return -EFAULT;
404 spin_lock(&all_ppp_lock);
405 ppp = ppp_find_unit(unit);
406 if (ppp != 0)
407 ++ppp->refcnt;
408 spin_unlock(&all_ppp_lock);
409 if (ppp == 0)
410 return -ENXIO;
411 file->private_data = ppp;
412 return 0;
415 if (ppp == 0)
416 return -ENXIO;
417 err = -EFAULT;
418 switch (cmd) {
419 case PPPIOCDETACH:
420 file->private_data = 0;
421 ppp_release_unit(ppp);
422 err = 0;
423 break;
425 case PPPIOCSMRU:
426 if (get_user(val, (int *) arg))
427 break;
428 ppp->mru = val;
429 err = 0;
430 break;
432 case PPPIOCSFLAGS:
433 if (get_user(val, (int *) arg))
434 break;
435 if (ppp->flags & ~val & SC_CCP_OPEN)
436 ppp_ccp_closed(ppp);
437 ppp->flags = val & SC_FLAG_BITS;
438 err = 0;
439 break;
441 case PPPIOCGFLAGS:
442 val = ppp->flags | ppp->xstate | ppp->rstate;
443 if (put_user(val, (int *) arg))
444 break;
445 err = 0;
446 break;
448 case PPPIOCSCOMPRESS:
449 err = ppp_set_compress(ppp, arg);
450 break;
452 case PPPIOCGUNIT:
453 if (put_user(ppp->index, (int *) arg))
454 break;
455 err = 0;
456 break;
458 case PPPIOCSDEBUG:
459 if (get_user(val, (int *) arg))
460 break;
461 ppp->debug = val;
462 err = 0;
463 break;
465 case PPPIOCGDEBUG:
466 if (put_user(ppp->debug, (int *) arg))
467 break;
468 err = 0;
469 break;
471 case PPPIOCGIDLE:
472 idle.xmit_idle = (jiffies - ppp->last_xmit) / HZ;
473 idle.recv_idle = (jiffies - ppp->last_recv) / HZ;
474 if (copy_to_user((void *) arg, &idle, sizeof(idle)))
475 break;
476 err = 0;
477 break;
479 case PPPIOCSMAXCID:
480 if (get_user(val, (int *) arg))
481 break;
482 val2 = 15;
483 if ((val >> 16) != 0) {
484 val2 = val >> 16;
485 val &= 0xffff;
487 lock_xmit_path(ppp);
488 lock_recv_path(ppp);
489 if (ppp->vj != 0)
490 slhc_free(ppp->vj);
491 ppp->vj = slhc_init(val2+1, val+1);
492 ppp_recv_unlock(ppp);
493 ppp_xmit_unlock(ppp);
494 err = -ENOMEM;
495 if (ppp->vj == 0) {
496 printk(KERN_ERR "PPP: no memory (VJ compressor)\n");
497 break;
499 err = 0;
500 break;
502 case PPPIOCGNPMODE:
503 case PPPIOCSNPMODE:
504 if (copy_from_user(&npi, (void *) arg, sizeof(npi)))
505 break;
506 err = proto_to_npindex(npi.protocol);
507 if (err < 0)
508 break;
509 i = err;
510 if (cmd == PPPIOCGNPMODE) {
511 err = -EFAULT;
512 npi.mode = ppp->npmode[i];
513 if (copy_to_user((void *) arg, &npi, sizeof(npi)))
514 break;
515 } else {
516 ppp->npmode[i] = npi.mode;
517 /* we may be able to transmit more packets now (??) */
518 mark_bh(NET_BH);
520 err = 0;
521 break;
523 default:
524 err = -ENOTTY;
526 return err;
529 static struct file_operations ppp_device_fops = {
530 NULL, /* seek */
531 ppp_read,
532 ppp_write,
533 NULL, /* readdir */
534 ppp_poll,
535 ppp_ioctl,
536 NULL, /* mmap */
537 ppp_open,
538 NULL, /* flush */
539 ppp_release
542 #define PPP_MAJOR 108
544 /* Called at boot time if ppp is compiled into the kernel,
545 or at module load time (from init_module) if compiled as a module. */
547 ppp_init(struct net_device *dev)
549 int err;
550 #ifndef MODULE
551 extern struct compressor ppp_deflate, ppp_deflate_draft;
552 extern int ppp_async_init(void);
553 #endif
555 printk(KERN_INFO "PPP generic driver version " PPP_VERSION "\n");
556 err = register_chrdev(PPP_MAJOR, "ppp", &ppp_device_fops);
557 if (err)
558 printk(KERN_ERR "failed to register PPP device (%d)\n", err);
559 #ifndef MODULE
560 #ifdef CONFIG_PPP_ASYNC
561 ppp_async_init();
562 #endif
563 #ifdef CONFIG_PPP_DEFLATE
564 if (ppp_register_compressor(&ppp_deflate) == 0)
565 printk(KERN_INFO "PPP Deflate compression module registered\n");
566 ppp_register_compressor(&ppp_deflate_draft);
567 #endif
568 #endif /* MODULE */
570 return -ENODEV;
574 * Network interface unit routines.
576 static int
577 ppp_start_xmit(struct sk_buff *skb, struct net_device *dev)
579 struct ppp *ppp = (struct ppp *) dev->priv;
580 int npi, proto;
581 unsigned char *pp;
583 if (skb == 0)
584 return 0;
585 /* can skb->data ever be 0? */
587 npi = ethertype_to_npindex(ntohs(skb->protocol));
588 if (npi < 0)
589 goto outf;
591 /* Drop, accept or reject the packet */
592 switch (ppp->npmode[npi]) {
593 case NPMODE_PASS:
594 break;
595 case NPMODE_QUEUE:
596 /* it would be nice to have a way to tell the network
597 system to queue this one up for later. */
598 goto outf;
599 case NPMODE_DROP:
600 case NPMODE_ERROR:
601 goto outf;
604 /* The transmit side of the ppp interface is serialized by
605 the XMIT_BUSY bit in ppp->busy. */
606 if (!trylock_xmit_path(ppp)) {
607 dev->tbusy = 1;
608 return 1;
610 if (ppp->xmit_pending)
611 ppp_push(ppp);
612 if (ppp->xmit_pending) {
613 dev->tbusy = 1;
614 ppp_xmit_unlock(ppp);
615 return 1;
617 dev->tbusy = 0;
619 /* Put the 2-byte PPP protocol number on the front,
620 making sure there is room for the address and control fields. */
621 if (skb_headroom(skb) < PPP_HDRLEN) {
622 struct sk_buff *ns;
624 ns = alloc_skb(skb->len + PPP_HDRLEN, GFP_ATOMIC);
625 if (ns == 0)
626 goto outnbusy;
627 skb_reserve(ns, PPP_HDRLEN);
628 memcpy(skb_put(ns, skb->len), skb->data, skb->len);
629 kfree_skb(skb);
630 skb = ns;
632 pp = skb_push(skb, 2);
633 proto = npindex_to_proto[npi];
634 pp[0] = proto >> 8;
635 pp[1] = proto;
637 ppp_send_frame(ppp, skb);
638 ppp_xmit_unlock(ppp);
639 return 0;
641 outnbusy:
642 ppp_xmit_unlock(ppp);
644 outf:
645 kfree_skb(skb);
646 return 0;
649 static struct net_device_stats *
650 ppp_net_stats(struct net_device *dev)
652 struct ppp *ppp = (struct ppp *) dev->priv;
654 return &ppp->stats;
657 static int
658 ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
660 struct ppp *ppp = dev->priv;
661 int err = -EFAULT;
662 void *addr = (void *) ifr->ifr_ifru.ifru_data;
663 struct ppp_stats stats;
664 struct ppp_comp_stats cstats;
665 char *vers;
667 switch (cmd) {
668 case SIOCGPPPSTATS:
669 ppp_get_stats(ppp, &stats);
670 if (copy_to_user(addr, &stats, sizeof(stats)))
671 break;
672 err = 0;
673 break;
675 case SIOCGPPPCSTATS:
676 memset(&cstats, 0, sizeof(cstats));
677 if (ppp->xc_state != 0)
678 ppp->xcomp->comp_stat(ppp->xc_state, &cstats.c);
679 if (ppp->rc_state != 0)
680 ppp->rcomp->decomp_stat(ppp->rc_state, &cstats.d);
681 if (copy_to_user(addr, &cstats, sizeof(cstats)))
682 break;
683 err = 0;
684 break;
686 case SIOCGPPPVER:
687 vers = PPP_VERSION;
688 if (copy_to_user(addr, vers, strlen(vers) + 1))
689 break;
690 err = 0;
691 break;
693 default:
694 err = -EINVAL;
697 return err;
701 ppp_net_init(struct net_device *dev)
703 dev->hard_header_len = PPP_HDRLEN;
704 dev->mtu = PPP_MTU;
705 dev->hard_start_xmit = ppp_start_xmit;
706 dev->get_stats = ppp_net_stats;
707 dev->do_ioctl = ppp_net_ioctl;
708 dev->addr_len = 0;
709 dev->tx_queue_len = 3;
710 dev->type = ARPHRD_PPP;
711 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
713 dev_init_buffers(dev);
714 return 0;
718 * Transmit-side routines.
722 * Called to unlock the transmit side of the ppp unit,
723 * making sure that any work queued up gets done.
725 static void
726 ppp_xmit_unlock(struct ppp *ppp)
728 struct sk_buff *skb;
730 for (;;) {
731 if (test_and_clear_bit(XMIT_WAKEUP, &ppp->busy))
732 ppp_push(ppp);
733 while (ppp->xmit_pending == 0
734 && (skb = skb_dequeue(&ppp->xq)) != 0)
735 ppp_send_frame(ppp, skb);
736 unlock_xmit_path(ppp);
737 if (!(test_bit(XMIT_WAKEUP, &ppp->busy)
738 || (ppp->xmit_pending == 0 && skb_peek(&ppp->xq))))
739 break;
740 if (!trylock_xmit_path(ppp))
741 break;
746 * Compress and send a frame.
747 * The caller should have locked the xmit path,
748 * and xmit_pending should be 0.
750 static void
751 ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
753 int proto = PPP_PROTO(skb);
754 struct sk_buff *new_skb;
755 int len;
756 unsigned char *cp;
758 ++ppp->stats.tx_packets;
759 ppp->stats.tx_bytes += skb->len - 2;
761 switch (proto) {
762 case PPP_IP:
763 if (ppp->vj == 0 || (ppp->flags & SC_COMP_TCP) == 0)
764 break;
765 /* try to do VJ TCP header compression */
766 new_skb = alloc_skb(skb->len + 2, GFP_ATOMIC);
767 if (new_skb == 0) {
768 printk(KERN_ERR "PPP: no memory (VJ comp pkt)\n");
769 goto drop;
771 skb_reserve(new_skb, 2);
772 cp = skb->data + 2;
773 len = slhc_compress(ppp->vj, cp, skb->len - 2,
774 new_skb->data + 2, &cp,
775 !(ppp->flags & SC_NO_TCP_CCID));
776 if (cp == skb->data + 2) {
777 /* didn't compress */
778 kfree_skb(new_skb);
779 } else {
780 if (cp[0] & SL_TYPE_COMPRESSED_TCP) {
781 proto = PPP_VJC_COMP;
782 cp[0] &= ~SL_TYPE_COMPRESSED_TCP;
783 } else {
784 proto = PPP_VJC_UNCOMP;
785 cp[0] = skb->data[2];
787 kfree_skb(skb);
788 skb = new_skb;
789 cp = skb_put(skb, len + 2);
790 cp[0] = 0;
791 cp[1] = proto;
793 break;
795 case PPP_CCP:
796 /* peek at outbound CCP frames */
797 ppp_ccp_peek(ppp, skb, 0);
798 break;
801 /* try to do packet compression */
802 if ((ppp->xstate & SC_COMP_RUN) && ppp->xc_state != 0
803 && proto != PPP_LCP && proto != PPP_CCP) {
804 new_skb = alloc_skb(ppp->dev->mtu + PPP_HDRLEN, GFP_ATOMIC);
805 if (new_skb == 0) {
806 printk(KERN_ERR "PPP: no memory (comp pkt)\n");
807 goto drop;
810 /* compressor still expects A/C bytes in hdr */
811 len = ppp->xcomp->compress(ppp->xc_state, skb->data - 2,
812 new_skb->data, skb->len + 2,
813 ppp->dev->mtu + PPP_HDRLEN);
814 if (len > 0 && (ppp->flags & SC_CCP_UP)) {
815 kfree_skb(skb);
816 skb = new_skb;
817 skb_put(skb, len);
818 skb_pull(skb, 2); /* pull off A/C bytes */
819 } else {
820 /* didn't compress, or CCP not up yet */
821 kfree_skb(new_skb);
825 /* for data packets, record the time */
826 if (proto < 0x8000)
827 ppp->last_xmit = jiffies;
830 * If we are waiting for traffic (demand dialling),
831 * queue it up for pppd to receive.
833 if (ppp->flags & SC_LOOP_TRAFFIC) {
834 if (ppp->rq.qlen > PPP_MAX_RQLEN)
835 goto drop;
836 skb_queue_tail(&ppp->rq, skb);
837 wake_up_interruptible(&ppp->rwait);
838 return;
841 ppp->xmit_pending = skb;
842 ppp_push(ppp);
843 return;
845 drop:
846 kfree_skb(skb);
847 ++ppp->stats.tx_errors;
851 * Try to send the frame in xmit_pending.
852 * The caller should have the xmit path locked.
854 static void
855 ppp_push(struct ppp *ppp)
857 struct list_head *list;
858 struct channel *chan;
859 struct sk_buff *skb = ppp->xmit_pending;
861 if (skb == 0)
862 return;
864 list = &ppp->channels;
865 if (list_empty(list)) {
866 /* nowhere to send the packet, just drop it */
867 ppp->xmit_pending = 0;
868 kfree_skb(skb);
869 return;
872 /* If we are doing multilink, decide which channel gets the
873 packet, and/or fragment the packet over several links. */
874 /* XXX for now, just take the first channel */
875 list = list->next;
876 chan = list_entry(list, struct channel, list);
878 if (chan->chan->ops->start_xmit(chan->chan, skb)) {
879 ppp->xmit_pending = 0;
880 chan->blocked = 0;
881 } else
882 chan->blocked = 1;
886 * Receive-side routines.
888 static inline void
889 ppp_do_recv(struct ppp *ppp, struct sk_buff *skb)
891 skb_queue_tail(&ppp->recv_pending, skb);
892 if (trylock_recv_path(ppp))
893 ppp_recv_unlock(ppp);
896 void
897 ppp_input(struct ppp_channel *chan, struct sk_buff *skb)
899 struct channel *pch = chan->ppp;
901 if (pch == 0 || skb->len == 0) {
902 kfree_skb(skb);
903 return;
905 ppp_do_recv(pch->ppp, skb);
908 /* Put a 0-length skb in the receive queue as an error indication */
909 void
910 ppp_input_error(struct ppp_channel *chan, int code)
912 struct channel *pch = chan->ppp;
913 struct sk_buff *skb;
915 if (pch == 0)
916 return;
917 skb = alloc_skb(0, GFP_ATOMIC);
918 if (skb == 0)
919 return;
920 skb->len = 0; /* probably unnecessary */
921 skb->cb[0] = code;
922 ppp_do_recv(pch->ppp, skb);
925 static void
926 ppp_recv_unlock(struct ppp *ppp)
928 struct sk_buff *skb;
930 for (;;) {
931 while ((skb = skb_dequeue(&ppp->recv_pending)) != 0)
932 ppp_receive_frame(ppp, skb);
933 unlock_recv_path(ppp);
934 if (skb_peek(&ppp->recv_pending) == 0)
935 break;
936 if (!trylock_recv_path(ppp))
937 break;
941 static void
942 ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb)
944 struct sk_buff *ns;
945 int proto, len, npi;
947 if (skb->len == 0) {
948 /* XXX should do something with code in skb->cb[0] */
949 goto err; /* error indication */
952 if (skb->len < 2) {
953 ++ppp->stats.rx_length_errors;
954 goto err;
957 /* Decompress the frame, if compressed. */
958 if (ppp->rc_state != 0 && (ppp->rstate & SC_DECOMP_RUN)
959 && (ppp->rstate & (SC_DC_FERROR | SC_DC_ERROR)) == 0)
960 skb = ppp_decompress_frame(ppp, skb);
962 proto = PPP_PROTO(skb);
963 switch (proto) {
964 case PPP_VJC_COMP:
965 /* decompress VJ compressed packets */
966 if (ppp->vj == 0 || (ppp->flags & SC_REJ_COMP_TCP))
967 goto err;
968 if (skb_tailroom(skb) < 124) {
969 /* copy to a new sk_buff with more tailroom */
970 ns = dev_alloc_skb(skb->len + 128);
971 if (ns == 0) {
972 printk(KERN_ERR"PPP: no memory (VJ decomp)\n");
973 goto err;
975 skb_reserve(ns, 2);
976 memcpy(skb_put(ns, skb->len), skb->data, skb->len);
977 kfree_skb(skb);
978 skb = ns;
980 len = slhc_uncompress(ppp->vj, skb->data + 2, skb->len - 2);
981 if (len <= 0) {
982 printk(KERN_ERR "PPP: VJ decompression error\n");
983 goto err;
985 len += 2;
986 if (len > skb->len)
987 skb_put(skb, len - skb->len);
988 else if (len < skb->len)
989 skb_trim(skb, len);
990 proto = PPP_IP;
991 break;
993 case PPP_VJC_UNCOMP:
994 if (ppp->vj == 0 || (ppp->flags & SC_REJ_COMP_TCP))
995 goto err;
996 if (slhc_remember(ppp->vj, skb->data + 2, skb->len - 2) <= 0) {
997 printk(KERN_ERR "PPP: VJ uncompressed error\n");
998 goto err;
1000 proto = PPP_IP;
1001 break;
1003 case PPP_CCP:
1004 ppp_ccp_peek(ppp, skb, 1);
1005 break;
1008 ++ppp->stats.rx_packets;
1009 ppp->stats.rx_bytes += skb->len - 2;
1011 npi = proto_to_npindex(proto);
1012 if (npi < 0) {
1013 /* control or unknown frame - pass it to pppd */
1014 skb_queue_tail(&ppp->rq, skb);
1015 /* limit queue length by dropping old frames */
1016 while (ppp->rq.qlen > PPP_MAX_RQLEN) {
1017 skb = skb_dequeue(&ppp->rq);
1018 if (skb)
1019 kfree_skb(skb);
1021 /* wake up any process polling or blocking on read */
1022 wake_up_interruptible(&ppp->rwait);
1024 } else {
1025 /* network protocol frame - give it to the kernel */
1026 ppp->last_recv = jiffies;
1027 if ((ppp->dev->flags & IFF_UP) == 0
1028 || ppp->npmode[npi] != NPMODE_PASS) {
1029 kfree_skb(skb);
1030 } else {
1031 skb_pull(skb, 2); /* chop off protocol */
1032 skb->dev = ppp->dev;
1033 skb->protocol = htons(npindex_to_ethertype[npi]);
1034 skb->mac.raw = skb->data;
1035 netif_rx(skb);
1038 return;
1040 err:
1041 ++ppp->stats.rx_errors;
1042 if (ppp->vj != 0)
1043 slhc_toss(ppp->vj);
1044 kfree_skb(skb);
1047 static struct sk_buff *
1048 ppp_decompress_frame(struct ppp *ppp, struct sk_buff *skb)
1050 int proto = PPP_PROTO(skb);
1051 struct sk_buff *ns;
1052 int len;
1054 if (proto == PPP_COMP) {
1055 ns = dev_alloc_skb(ppp->mru + PPP_HDRLEN);
1056 if (ns == 0) {
1057 printk(KERN_ERR "ppp_receive: no memory\n");
1058 goto err;
1060 /* the decompressor still expects the A/C bytes in the hdr */
1061 len = ppp->rcomp->decompress(ppp->rc_state, skb->data - 2,
1062 skb->len + 2, ns->data, ppp->mru + PPP_HDRLEN);
1063 if (len < 0) {
1064 /* Pass the compressed frame to pppd as an
1065 error indication. */
1066 if (len == DECOMP_FATALERROR)
1067 ppp->rstate |= SC_DC_FERROR;
1068 goto err;
1071 kfree_skb(skb);
1072 skb = ns;
1073 skb_put(skb, len);
1074 skb_pull(skb, 2); /* pull off the A/C bytes */
1076 } else {
1077 /* Uncompressed frame - pass to decompressor so it
1078 can update its dictionary if necessary. */
1079 if (ppp->rcomp->incomp)
1080 ppp->rcomp->incomp(ppp->rc_state, skb->data - 2,
1081 skb->len + 2);
1084 return skb;
1086 err:
1087 ppp->rstate |= SC_DC_ERROR;
1088 if (ppp->vj != 0)
1089 slhc_toss(ppp->vj);
1090 ++ppp->stats.rx_errors;
1091 return skb;
1095 * Channel interface.
1099 * Connect a channel to a given PPP unit.
1100 * The channel MUST NOT be connected to a PPP unit already.
1103 ppp_register_channel(struct ppp_channel *chan, int unit)
1105 struct ppp *ppp;
1106 struct channel *pch;
1107 int ret = -ENXIO;
1109 spin_lock(&all_ppp_lock);
1110 ppp = ppp_find_unit(unit);
1111 if (ppp == 0)
1112 goto out;
1113 pch = kmalloc(sizeof(struct channel), GFP_ATOMIC);
1114 ret = -ENOMEM;
1115 if (pch == 0)
1116 goto out;
1117 memset(pch, 0, sizeof(struct channel));
1118 pch->ppp = ppp;
1119 pch->chan = chan;
1120 list_add(&pch->list, &ppp->channels);
1121 chan->ppp = pch;
1122 ++ppp->n_channels;
1123 ret = 0;
1124 out:
1125 spin_unlock(&all_ppp_lock);
1126 return ret;
1130 * Disconnect a channel from its PPP unit.
1132 void
1133 ppp_unregister_channel(struct ppp_channel *chan)
1135 struct channel *pch;
1137 spin_lock(&all_ppp_lock);
1138 if ((pch = chan->ppp) != 0) {
1139 chan->ppp = 0;
1140 list_del(&pch->list);
1141 --pch->ppp->n_channels;
1142 kfree(pch);
1144 spin_unlock(&all_ppp_lock);
1148 * Callback from a channel when it can accept more to transmit.
1149 * This should ideally be called at BH level, not interrupt level.
1151 void
1152 ppp_output_wakeup(struct ppp_channel *chan)
1154 struct channel *pch = chan->ppp;
1155 struct ppp *ppp;
1157 if (pch == 0)
1158 return;
1159 ppp = pch->ppp;
1160 pch->blocked = 0;
1161 set_bit(XMIT_WAKEUP, &ppp->busy);
1162 if (trylock_xmit_path(ppp))
1163 ppp_xmit_unlock(ppp);
1164 if (ppp->xmit_pending == 0) {
1165 ppp->dev->tbusy = 0;
1166 mark_bh(NET_BH);
1171 * Compression control.
1174 /* Process the PPPIOCSCOMPRESS ioctl. */
1175 static int
1176 ppp_set_compress(struct ppp *ppp, unsigned long arg)
1178 int err;
1179 struct compressor *cp;
1180 struct ppp_option_data data;
1181 unsigned char ccp_option[CCP_MAX_OPTION_LENGTH];
1182 #ifdef CONFIG_KMOD
1183 char modname[32];
1184 #endif
1186 err = -EFAULT;
1187 if (copy_from_user(&data, (void *) arg, sizeof(data))
1188 || (data.length <= CCP_MAX_OPTION_LENGTH
1189 && copy_from_user(ccp_option, data.ptr, data.length)))
1190 goto out;
1191 err = -EINVAL;
1192 if (data.length > CCP_MAX_OPTION_LENGTH
1193 || ccp_option[1] < 2 || ccp_option[1] > data.length)
1194 goto out;
1196 cp = find_compressor(ccp_option[0]);
1197 #ifdef CONFIG_KMOD
1198 if (cp == 0) {
1199 sprintf(modname, "ppp-compress-%d", ccp_option[0]);
1200 request_module(modname);
1201 cp = find_compressor(ccp_option[0]);
1203 #endif /* CONFIG_KMOD */
1204 if (cp == 0)
1205 goto out;
1207 err = -ENOBUFS;
1208 if (data.transmit) {
1209 lock_xmit_path(ppp);
1210 ppp->xstate &= ~SC_COMP_RUN;
1211 if (ppp->xc_state != 0) {
1212 ppp->xcomp->comp_free(ppp->xc_state);
1213 ppp->xc_state = 0;
1216 ppp->xcomp = cp;
1217 ppp->xc_state = cp->comp_alloc(ccp_option, data.length);
1218 ppp_xmit_unlock(ppp);
1219 if (ppp->xc_state == 0)
1220 goto out;
1222 } else {
1223 lock_recv_path(ppp);
1224 ppp->rstate &= ~SC_DECOMP_RUN;
1225 if (ppp->rc_state != 0) {
1226 ppp->rcomp->decomp_free(ppp->rc_state);
1227 ppp->rc_state = 0;
1230 ppp->rcomp = cp;
1231 ppp->rc_state = cp->decomp_alloc(ccp_option, data.length);
1232 ppp_recv_unlock(ppp);
1233 if (ppp->rc_state == 0)
1234 goto out;
1236 err = 0;
1238 out:
1239 return err;
1243 * Look at a CCP packet and update our state accordingly.
1244 * We assume the caller has the xmit or recv path locked.
1246 static void
1247 ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound)
1249 unsigned char *dp = skb->data + 2;
1250 int len;
1252 if (skb->len < CCP_HDRLEN + 2
1253 || skb->len < (len = CCP_LENGTH(dp)) + 2)
1254 return; /* too short */
1256 switch (CCP_CODE(dp)) {
1257 case CCP_CONFREQ:
1258 case CCP_TERMREQ:
1259 case CCP_TERMACK:
1261 * CCP is going down - disable compression.
1263 if (inbound)
1264 ppp->rstate &= ~SC_DECOMP_RUN;
1265 else
1266 ppp->xstate &= ~SC_COMP_RUN;
1267 break;
1269 case CCP_CONFACK:
1270 if ((ppp->flags & (SC_CCP_OPEN | SC_CCP_UP)) != SC_CCP_OPEN)
1271 break;
1272 dp += CCP_HDRLEN;
1273 len -= CCP_HDRLEN;
1274 if (len < CCP_OPT_MINLEN || len < CCP_OPT_LENGTH(dp))
1275 break;
1276 if (inbound) {
1277 /* we will start receiving compressed packets */
1278 if (ppp->rc_state == 0)
1279 break;
1280 if (ppp->rcomp->decomp_init(ppp->rc_state, dp, len,
1281 ppp->index, 0, ppp->mru, ppp->debug)) {
1282 ppp->rstate |= SC_DECOMP_RUN;
1283 ppp->rstate &= ~(SC_DC_ERROR | SC_DC_FERROR);
1285 } else {
1286 /* we will soon start sending compressed packets */
1287 if (ppp->xc_state == 0)
1288 break;
1289 if (ppp->xcomp->comp_init(ppp->xc_state, dp, len,
1290 ppp->index, 0, ppp->debug))
1291 ppp->xstate |= SC_COMP_RUN;
1293 break;
1295 case CCP_RESETACK:
1296 /* reset the [de]compressor */
1297 if ((ppp->flags & SC_CCP_UP) == 0)
1298 break;
1299 if (inbound) {
1300 if (ppp->rc_state && (ppp->rstate & SC_DECOMP_RUN)) {
1301 ppp->rcomp->decomp_reset(ppp->rc_state);
1302 ppp->rstate &= ~SC_DC_ERROR;
1304 } else {
1305 if (ppp->xc_state && (ppp->xstate & SC_COMP_RUN))
1306 ppp->xcomp->comp_reset(ppp->xc_state);
1308 break;
1312 /* Free up compression resources. */
1313 static void
1314 ppp_ccp_closed(struct ppp *ppp)
1316 ppp->flags &= ~(SC_CCP_OPEN | SC_CCP_UP);
1318 lock_xmit_path(ppp);
1319 ppp->xstate &= ~SC_COMP_RUN;
1320 if (ppp->xc_state) {
1321 ppp->xcomp->comp_free(ppp->xc_state);
1322 ppp->xc_state = 0;
1324 ppp_xmit_unlock(ppp);
1326 lock_recv_path(ppp);
1327 ppp->xstate &= ~SC_DECOMP_RUN;
1328 if (ppp->rc_state) {
1329 ppp->rcomp->decomp_free(ppp->rc_state);
1330 ppp->rc_state = 0;
1332 ppp_recv_unlock(ppp);
1335 /* List of compressors. */
1336 static LIST_HEAD(compressor_list);
1337 static spinlock_t compressor_list_lock = SPIN_LOCK_UNLOCKED;
1339 struct compressor_entry {
1340 struct list_head list;
1341 struct compressor *comp;
1344 static struct compressor_entry *
1345 find_comp_entry(int proto)
1347 struct compressor_entry *ce;
1348 struct list_head *list = &compressor_list;
1350 while ((list = list->next) != &compressor_list) {
1351 ce = list_entry(list, struct compressor_entry, list);
1352 if (ce->comp->compress_proto == proto)
1353 return ce;
1355 return 0;
1358 /* Register a compressor */
1360 ppp_register_compressor(struct compressor *cp)
1362 struct compressor_entry *ce;
1363 int ret;
1365 spin_lock(&compressor_list_lock);
1366 ret = -EEXIST;
1367 if (find_comp_entry(cp->compress_proto) != 0)
1368 goto out;
1369 ret = -ENOMEM;
1370 ce = kmalloc(sizeof(struct compressor_entry), GFP_KERNEL);
1371 if (ce == 0)
1372 goto out;
1373 ret = 0;
1374 ce->comp = cp;
1375 list_add(&ce->list, &compressor_list);
1376 out:
1377 spin_unlock(&compressor_list_lock);
1378 return ret;
1381 /* Unregister a compressor */
1382 void
1383 ppp_unregister_compressor(struct compressor *cp)
1385 struct compressor_entry *ce;
1387 spin_lock(&compressor_list_lock);
1388 ce = find_comp_entry(cp->compress_proto);
1389 if (ce != 0 && ce->comp == cp) {
1390 list_del(&ce->list);
1391 kfree(ce);
1393 spin_unlock(&compressor_list_lock);
1396 /* Find a compressor. */
1397 static struct compressor *
1398 find_compressor(int type)
1400 struct compressor_entry *ce;
1401 struct compressor *cp = 0;
1403 spin_lock(&compressor_list_lock);
1404 ce = find_comp_entry(type);
1405 if (ce != 0)
1406 cp = ce->comp;
1407 spin_unlock(&compressor_list_lock);
1408 return cp;
1412 * Miscelleneous stuff.
1415 static void
1416 ppp_get_stats(struct ppp *ppp, struct ppp_stats *st)
1418 struct slcompress *vj = ppp->vj;
1420 memset(st, 0, sizeof(*st));
1421 st->p.ppp_ipackets = ppp->stats.rx_packets;
1422 st->p.ppp_ierrors = ppp->stats.rx_errors;
1423 st->p.ppp_ibytes = ppp->stats.rx_bytes;
1424 st->p.ppp_opackets = ppp->stats.tx_packets;
1425 st->p.ppp_oerrors = ppp->stats.tx_errors;
1426 st->p.ppp_obytes = ppp->stats.tx_bytes;
1427 if (vj == 0)
1428 return;
1429 st->vj.vjs_packets = vj->sls_o_compressed + vj->sls_o_uncompressed;
1430 st->vj.vjs_compressed = vj->sls_o_compressed;
1431 st->vj.vjs_searches = vj->sls_o_searches;
1432 st->vj.vjs_misses = vj->sls_o_misses;
1433 st->vj.vjs_errorin = vj->sls_i_error;
1434 st->vj.vjs_tossed = vj->sls_i_tossed;
1435 st->vj.vjs_uncompressedin = vj->sls_i_uncompressed;
1436 st->vj.vjs_compressedin = vj->sls_i_compressed;
1440 * Stuff for handling the list of ppp units and for initialization.
1444 * Create a new ppp unit. Fails if it can't allocate memory or
1445 * if there is already a unit with the requested number.
1446 * unit == -1 means allocate a new number.
1448 static struct ppp *
1449 ppp_create_unit(int unit, int *retp)
1451 struct ppp *ppp;
1452 struct net_device *dev;
1453 struct list_head *list;
1454 int last_unit = -1;
1455 int ret = -EEXIST;
1456 int i;
1458 spin_lock(&all_ppp_lock);
1459 list = &all_ppp_units;
1460 while ((list = list->next) != &all_ppp_units) {
1461 ppp = list_entry(list, struct ppp, list);
1462 if ((unit < 0 && ppp->index > last_unit + 1)
1463 || (unit >= 0 && unit < ppp->index))
1464 break;
1465 if (unit == ppp->index)
1466 goto out; /* unit already exists */
1467 last_unit = ppp->index;
1469 if (unit < 0)
1470 unit = last_unit + 1;
1472 /* Create a new ppp structure and link it before `list'. */
1473 ret = -ENOMEM;
1474 ppp = kmalloc(sizeof(struct ppp), GFP_KERNEL);
1475 if (ppp == 0)
1476 goto out;
1477 memset(ppp, 0, sizeof(struct ppp));
1478 dev = kmalloc(sizeof(struct net_device), GFP_KERNEL);
1479 if (dev == 0) {
1480 kfree(ppp);
1481 goto out;
1483 memset(dev, 0, sizeof(struct net_device));
1485 ppp->index = unit;
1486 sprintf(ppp->name, "ppp%d", unit);
1487 ppp->mru = PPP_MRU;
1488 skb_queue_head_init(&ppp->xq);
1489 skb_queue_head_init(&ppp->rq);
1490 init_waitqueue_head(&ppp->rwait);
1491 ppp->refcnt = 1;
1492 for (i = 0; i < NUM_NP; ++i)
1493 ppp->npmode[i] = NPMODE_PASS;
1494 INIT_LIST_HEAD(&ppp->channels);
1495 skb_queue_head_init(&ppp->recv_pending);
1497 ppp->dev = dev;
1498 dev->init = ppp_net_init;
1499 dev->name = ppp->name;
1500 dev->priv = ppp;
1501 dev->new_style = 1;
1503 rtnl_lock();
1504 ret = register_netdevice(dev);
1505 rtnl_unlock();
1506 if (ret != 0) {
1507 printk(KERN_ERR "PPP: couldn't register device (%d)\n", ret);
1508 kfree(dev);
1509 kfree(ppp);
1510 goto out;
1513 list_add(&ppp->list, list->prev);
1514 out:
1515 spin_unlock(&all_ppp_lock);
1516 *retp = ret;
1517 if (ret != 0)
1518 ppp = 0;
1519 return ppp;
1523 * Remove a reference to a ppp unit, and destroy it if
1524 * the reference count goes to 0.
1526 static void ppp_release_unit(struct ppp *ppp)
1528 struct list_head *list, *next;
1529 int ref;
1531 spin_lock(&all_ppp_lock);
1532 ref = --ppp->refcnt;
1533 if (ref == 0)
1534 list_del(&ppp->list);
1535 spin_unlock(&all_ppp_lock);
1536 if (ref != 0)
1537 return;
1539 /* Last fd open to this ppp unit is being closed or detached:
1540 mark the interface down, free the ppp unit */
1541 if (ppp->dev) {
1542 rtnl_lock();
1543 dev_close(ppp->dev);
1544 rtnl_unlock();
1546 for (list = ppp->channels.next; list != &ppp->channels; list = next) {
1547 /* forcibly detach this channel */
1548 struct channel *chan;
1549 chan = list_entry(list, struct channel, list);
1550 chan->chan->ppp = 0;
1551 next = list->next;
1552 kfree(chan);
1555 /* Free up resources. */
1556 ppp_ccp_closed(ppp);
1557 lock_xmit_path(ppp);
1558 lock_recv_path(ppp);
1559 if (ppp->vj) {
1560 slhc_free(ppp->vj);
1561 ppp->vj = 0;
1563 free_skbs(&ppp->xq);
1564 free_skbs(&ppp->rq);
1565 free_skbs(&ppp->recv_pending);
1566 if (ppp->dev) {
1567 rtnl_lock();
1568 unregister_netdevice(ppp->dev);
1569 ppp->dev = 0;
1570 rtnl_unlock();
1572 kfree(ppp);
1576 * Locate an existing ppp unit.
1577 * The caller should have locked the all_ppp_lock.
1579 static struct ppp *
1580 ppp_find_unit(int unit)
1582 struct ppp *ppp;
1583 struct list_head *list;
1585 list = &all_ppp_units;
1586 while ((list = list->next) != &all_ppp_units) {
1587 ppp = list_entry(list, struct ppp, list);
1588 if (ppp->index == unit)
1589 return ppp;
1591 return 0;
1595 * Module stuff.
1597 #ifdef MODULE
1599 init_module(void)
1601 ppp_init(0);
1602 return 0;
1605 void
1606 cleanup_module(void)
1608 /* should never happen */
1609 if (!list_empty(&all_ppp_units))
1610 printk(KERN_ERR "PPP: removing module but units remain!\n");
1611 if (unregister_chrdev(PPP_MAJOR, "ppp") != 0)
1612 printk(KERN_ERR "PPP: failed to unregister PPP device\n");
1614 #endif /* MODULE */