2 * Generic PPP layer for Linux.
4 * Copyright 1999-2002 Paul Mackerras.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 * The generic PPP layer handles the PPP network interfaces, the
12 * /dev/ppp device, packet and VJ compression, and multilink.
13 * It talks to PPP `channels' via the interface defined in
14 * include/linux/ppp_channel.h. Channels provide the basic means for
15 * sending and receiving PPP frames on some kind of communications
18 * Part of the code in this driver was inspired by the old async-only
19 * PPP driver, written by Michael Callahan and Al Longyear, and
20 * subsequently hacked by Paul Mackerras.
22 * ==FILEVERSION 20050110==
25 #include <linux/module.h>
26 #include <linux/kernel.h>
27 #include <linux/kmod.h>
28 #include <linux/init.h>
29 #include <linux/list.h>
30 #include <linux/netdevice.h>
31 #include <linux/poll.h>
32 #include <linux/ppp_defs.h>
33 #include <linux/filter.h>
34 #include <linux/if_ppp.h>
35 #include <linux/ppp_channel.h>
36 #include <linux/ppp-comp.h>
37 #include <linux/skbuff.h>
38 #include <linux/rtnetlink.h>
39 #include <linux/if_arp.h>
41 #include <linux/tcp.h>
42 #include <linux/spinlock.h>
43 #include <linux/smp_lock.h>
44 #include <linux/rwsem.h>
45 #include <linux/stddef.h>
46 #include <linux/device.h>
47 #include <linux/mutex.h>
48 #include <net/slhc_vj.h>
49 #include <asm/atomic.h>
51 #define PPP_VERSION "2.4.2"
54 * Network protocols we support.
56 #define NP_IP 0 /* Internet Protocol V4 */
57 #define NP_IPV6 1 /* Internet Protocol V6 */
58 #define NP_IPX 2 /* IPX protocol */
59 #define NP_AT 3 /* Appletalk protocol */
60 #define NP_MPLS_UC 4 /* MPLS unicast */
61 #define NP_MPLS_MC 5 /* MPLS multicast */
62 #define NUM_NP 6 /* Number of NPs. */
64 #define MPHDRLEN 6 /* multilink protocol header length */
65 #define MPHDRLEN_SSN 4 /* ditto with short sequence numbers */
66 #define MIN_FRAG_SIZE 64
69 * An instance of /dev/ppp can be associated with either a ppp
70 * interface unit or a ppp channel. In both cases, file->private_data
71 * points to one of these.
77 struct sk_buff_head xq
; /* pppd transmit queue */
78 struct sk_buff_head rq
; /* receive queue for pppd */
79 wait_queue_head_t rwait
; /* for poll on reading /dev/ppp */
80 atomic_t refcnt
; /* # refs (incl /dev/ppp attached) */
81 int hdrlen
; /* space to leave for headers */
82 int index
; /* interface unit / channel number */
83 int dead
; /* unit/channel has been shut down */
86 #define PF_TO_X(pf, X) ((X *)((char *)(pf) - offsetof(X, file)))
88 #define PF_TO_PPP(pf) PF_TO_X(pf, struct ppp)
89 #define PF_TO_CHANNEL(pf) PF_TO_X(pf, struct channel)
91 #define ROUNDUP(n, x) (((n) + (x) - 1) / (x))
94 * Data structure describing one ppp unit.
95 * A ppp unit corresponds to a ppp network interface device
96 * and represents a multilink bundle.
97 * It can have 0 or more ppp channels connected to it.
100 struct ppp_file file
; /* stuff for read/write/poll 0 */
101 struct file
*owner
; /* file that owns this unit 48 */
102 struct list_head channels
; /* list of attached channels 4c */
103 int n_channels
; /* how many channels are attached 54 */
104 spinlock_t rlock
; /* lock for receive side 58 */
105 spinlock_t wlock
; /* lock for transmit side 5c */
106 int mru
; /* max receive unit 60 */
107 int mru_alloc
; /* MAX(1500,MRU) for dev_alloc_skb() */
108 unsigned int flags
; /* control bits 64 */
109 unsigned int xstate
; /* transmit state bits 68 */
110 unsigned int rstate
; /* receive state bits 6c */
111 int debug
; /* debug flags 70 */
112 struct slcompress
*vj
; /* state for VJ header compression */
113 enum NPmode npmode
[NUM_NP
]; /* what to do with each net proto 78 */
114 struct sk_buff
*xmit_pending
; /* a packet ready to go out 88 */
115 struct compressor
*xcomp
; /* transmit packet compressor 8c */
116 void *xc_state
; /* its internal state 90 */
117 struct compressor
*rcomp
; /* receive decompressor 94 */
118 void *rc_state
; /* its internal state 98 */
119 unsigned long last_xmit
; /* jiffies when last pkt sent 9c */
120 unsigned long last_recv
; /* jiffies when last pkt rcvd a0 */
121 struct net_device
*dev
; /* network interface device a4 */
122 #ifdef CONFIG_PPP_MULTILINK
123 int nxchan
; /* next channel to send something on */
124 u32 nxseq
; /* next sequence number to send */
125 int mrru
; /* MP: max reconst. receive unit */
126 u32 nextseq
; /* MP: seq no of next packet */
127 u32 minseq
; /* MP: min of most recent seqnos */
128 struct sk_buff_head mrq
; /* MP: receive reconstruction queue */
129 #endif /* CONFIG_PPP_MULTILINK */
130 struct net_device_stats stats
; /* statistics */
131 #ifdef CONFIG_PPP_FILTER
132 struct sock_filter
*pass_filter
; /* filter for packets to pass */
133 struct sock_filter
*active_filter
;/* filter for pkts to reset idle */
134 unsigned pass_len
, active_len
;
135 #endif /* CONFIG_PPP_FILTER */
139 * Bits in flags: SC_NO_TCP_CCID, SC_CCP_OPEN, SC_CCP_UP, SC_LOOP_TRAFFIC,
140 * SC_MULTILINK, SC_MP_SHORTSEQ, SC_MP_XSHORTSEQ, SC_COMP_TCP, SC_REJ_COMP_TCP.
141 * Bits in rstate: SC_DECOMP_RUN, SC_DC_ERROR, SC_DC_FERROR.
142 * Bits in xstate: SC_COMP_RUN
144 #define SC_FLAG_BITS (SC_NO_TCP_CCID|SC_CCP_OPEN|SC_CCP_UP|SC_LOOP_TRAFFIC \
145 |SC_MULTILINK|SC_MP_SHORTSEQ|SC_MP_XSHORTSEQ \
146 |SC_COMP_TCP|SC_REJ_COMP_TCP)
149 * Private data structure for each channel.
150 * This includes the data structure used for multilink.
153 struct ppp_file file
; /* stuff for read/write/poll */
154 struct list_head list
; /* link in all/new_channels list */
155 struct ppp_channel
*chan
; /* public channel data structure */
156 struct rw_semaphore chan_sem
; /* protects `chan' during chan ioctl */
157 spinlock_t downl
; /* protects `chan', file.xq dequeue */
158 struct ppp
*ppp
; /* ppp unit we're connected to */
159 struct list_head clist
; /* link in list of channels per unit */
160 rwlock_t upl
; /* protects `ppp' */
161 #ifdef CONFIG_PPP_MULTILINK
162 u8 avail
; /* flag used in multilink stuff */
163 u8 had_frag
; /* >= 1 fragments have been sent */
164 u32 lastseq
; /* MP: last sequence # received */
165 #endif /* CONFIG_PPP_MULTILINK */
169 * SMP locking issues:
170 * Both the ppp.rlock and ppp.wlock locks protect the ppp.channels
171 * list and the ppp.n_channels field, you need to take both locks
172 * before you modify them.
173 * The lock ordering is: channel.upl -> ppp.wlock -> ppp.rlock ->
178 * A cardmap represents a mapping from unsigned integers to pointers,
179 * and provides a fast "find lowest unused number" operation.
180 * It uses a broad (32-way) tree with a bitmap at each level.
181 * It is designed to be space-efficient for small numbers of entries
182 * and time-efficient for large numbers of entries.
184 #define CARDMAP_ORDER 5
185 #define CARDMAP_WIDTH (1U << CARDMAP_ORDER)
186 #define CARDMAP_MASK (CARDMAP_WIDTH - 1)
191 struct cardmap
*parent
;
192 void *ptr
[CARDMAP_WIDTH
];
194 static void *cardmap_get(struct cardmap
*map
, unsigned int nr
);
195 static int cardmap_set(struct cardmap
**map
, unsigned int nr
, void *ptr
);
196 static unsigned int cardmap_find_first_free(struct cardmap
*map
);
197 static void cardmap_destroy(struct cardmap
**map
);
200 * all_ppp_mutex protects the all_ppp_units mapping.
201 * It also ensures that finding a ppp unit in the all_ppp_units map
202 * and updating its file.refcnt field is atomic.
204 static DEFINE_MUTEX(all_ppp_mutex
);
205 static struct cardmap
*all_ppp_units
;
206 static atomic_t ppp_unit_count
= ATOMIC_INIT(0);
209 * all_channels_lock protects all_channels and last_channel_index,
210 * and the atomicity of find a channel and updating its file.refcnt
213 static DEFINE_SPINLOCK(all_channels_lock
);
214 static LIST_HEAD(all_channels
);
215 static LIST_HEAD(new_channels
);
216 static int last_channel_index
;
217 static atomic_t channel_count
= ATOMIC_INIT(0);
219 /* Get the PPP protocol number from a skb */
220 #define PPP_PROTO(skb) (((skb)->data[0] << 8) + (skb)->data[1])
223 #define IP_PROTO(skb) (skb)->data[11]
224 #define SRC_PORT(skb) (((skb)->data[22] << 8) + (skb)->data[23])
225 #define DST_PORT(skb) (((skb)->data[24] << 8) + (skb)->data[25])
227 /* We limit the length of ppp->file.rq to this (arbitrary) value */
228 #define PPP_MAX_RQLEN 32
231 * Maximum number of multilink fragments queued up.
232 * This has to be large enough to cope with the maximum latency of
233 * the slowest channel relative to the others. Strictly it should
234 * depend on the number of channels and their characteristics.
236 #define PPP_MP_MAX_QLEN 128
238 /* Multilink header bits. */
239 #define B 0x80 /* this fragment begins a packet */
240 #define E 0x40 /* this fragment ends a packet */
242 /* Compare multilink sequence numbers (assumed to be 32 bits wide) */
243 #define seq_before(a, b) ((s32)((a) - (b)) < 0)
244 #define seq_after(a, b) ((s32)((a) - (b)) > 0)
247 static int ppp_unattached_ioctl(struct ppp_file
*pf
, struct file
*file
,
248 unsigned int cmd
, unsigned long arg
);
249 static void ppp_xmit_process(struct ppp
*ppp
);
250 static void ppp_send_frame(struct ppp
*ppp
, struct sk_buff
*skb
);
251 static void ppp_push(struct ppp
*ppp
);
252 static void ppp_channel_push(struct channel
*pch
);
253 static void ppp_receive_frame(struct ppp
*ppp
, struct sk_buff
*skb
,
254 struct channel
*pch
);
255 static void ppp_receive_error(struct ppp
*ppp
);
256 static void ppp_receive_nonmp_frame(struct ppp
*ppp
, struct sk_buff
*skb
);
257 static struct sk_buff
*ppp_decompress_frame(struct ppp
*ppp
,
258 struct sk_buff
*skb
);
259 #ifdef CONFIG_PPP_MULTILINK
260 static void ppp_receive_mp_frame(struct ppp
*ppp
, struct sk_buff
*skb
,
261 struct channel
*pch
);
262 static void ppp_mp_insert(struct ppp
*ppp
, struct sk_buff
*skb
);
263 static struct sk_buff
*ppp_mp_reconstruct(struct ppp
*ppp
);
264 static int ppp_mp_explode(struct ppp
*ppp
, struct sk_buff
*skb
);
265 #endif /* CONFIG_PPP_MULTILINK */
266 static int ppp_set_compress(struct ppp
*ppp
, unsigned long arg
);
267 static void ppp_ccp_peek(struct ppp
*ppp
, struct sk_buff
*skb
, int inbound
);
268 static void ppp_ccp_closed(struct ppp
*ppp
);
269 static struct compressor
*find_compressor(int type
);
270 static void ppp_get_stats(struct ppp
*ppp
, struct ppp_stats
*st
);
271 static struct ppp
*ppp_create_interface(int unit
, int *retp
);
272 static void init_ppp_file(struct ppp_file
*pf
, int kind
);
273 static void ppp_shutdown_interface(struct ppp
*ppp
);
274 static void ppp_destroy_interface(struct ppp
*ppp
);
275 static struct ppp
*ppp_find_unit(int unit
);
276 static struct channel
*ppp_find_channel(int unit
);
277 static int ppp_connect_channel(struct channel
*pch
, int unit
);
278 static int ppp_disconnect_channel(struct channel
*pch
);
279 static void ppp_destroy_channel(struct channel
*pch
);
281 static struct class *ppp_class
;
283 /* Translates a PPP protocol number to a NP index (NP == network protocol) */
284 static inline int proto_to_npindex(int proto
)
303 /* Translates an NP index into a PPP protocol number */
304 static const int npindex_to_proto
[NUM_NP
] = {
313 /* Translates an ethertype into an NP index */
314 static inline int ethertype_to_npindex(int ethertype
)
334 /* Translates an NP index into an ethertype */
335 static const int npindex_to_ethertype
[NUM_NP
] = {
347 #define ppp_xmit_lock(ppp) spin_lock_bh(&(ppp)->wlock)
348 #define ppp_xmit_unlock(ppp) spin_unlock_bh(&(ppp)->wlock)
349 #define ppp_recv_lock(ppp) spin_lock_bh(&(ppp)->rlock)
350 #define ppp_recv_unlock(ppp) spin_unlock_bh(&(ppp)->rlock)
351 #define ppp_lock(ppp) do { ppp_xmit_lock(ppp); \
352 ppp_recv_lock(ppp); } while (0)
353 #define ppp_unlock(ppp) do { ppp_recv_unlock(ppp); \
354 ppp_xmit_unlock(ppp); } while (0)
357 * /dev/ppp device routines.
358 * The /dev/ppp device is used by pppd to control the ppp unit.
359 * It supports the read, write, ioctl and poll functions.
360 * Open instances of /dev/ppp can be in one of three states:
361 * unattached, attached to a ppp unit, or attached to a ppp channel.
363 static int ppp_open(struct inode
*inode
, struct file
*file
)
366 * This could (should?) be enforced by the permissions on /dev/ppp.
368 if (!capable(CAP_NET_ADMIN
))
373 static int ppp_release(struct inode
*inode
, struct file
*file
)
375 struct ppp_file
*pf
= file
->private_data
;
379 file
->private_data
= NULL
;
380 if (pf
->kind
== INTERFACE
) {
382 if (file
== ppp
->owner
)
383 ppp_shutdown_interface(ppp
);
385 if (atomic_dec_and_test(&pf
->refcnt
)) {
388 ppp_destroy_interface(PF_TO_PPP(pf
));
391 ppp_destroy_channel(PF_TO_CHANNEL(pf
));
399 static ssize_t
ppp_read(struct file
*file
, char __user
*buf
,
400 size_t count
, loff_t
*ppos
)
402 struct ppp_file
*pf
= file
->private_data
;
403 DECLARE_WAITQUEUE(wait
, current
);
405 struct sk_buff
*skb
= NULL
;
412 add_wait_queue(&pf
->rwait
, &wait
);
414 set_current_state(TASK_INTERRUPTIBLE
);
415 skb
= skb_dequeue(&pf
->rq
);
421 if (pf
->kind
== INTERFACE
) {
423 * Return 0 (EOF) on an interface that has no
424 * channels connected, unless it is looping
425 * network traffic (demand mode).
427 struct ppp
*ppp
= PF_TO_PPP(pf
);
428 if (ppp
->n_channels
== 0
429 && (ppp
->flags
& SC_LOOP_TRAFFIC
) == 0)
433 if (file
->f_flags
& O_NONBLOCK
)
436 if (signal_pending(current
))
440 set_current_state(TASK_RUNNING
);
441 remove_wait_queue(&pf
->rwait
, &wait
);
447 if (skb
->len
> count
)
452 if (skb_copy_datagram_iovec(skb
, 0, &iov
, skb
->len
))
462 static ssize_t
ppp_write(struct file
*file
, const char __user
*buf
,
463 size_t count
, loff_t
*ppos
)
465 struct ppp_file
*pf
= file
->private_data
;
472 skb
= alloc_skb(count
+ pf
->hdrlen
, GFP_KERNEL
);
475 skb_reserve(skb
, pf
->hdrlen
);
477 if (copy_from_user(skb_put(skb
, count
), buf
, count
)) {
482 skb_queue_tail(&pf
->xq
, skb
);
486 ppp_xmit_process(PF_TO_PPP(pf
));
489 ppp_channel_push(PF_TO_CHANNEL(pf
));
499 /* No kernel lock - fine */
500 static unsigned int ppp_poll(struct file
*file
, poll_table
*wait
)
502 struct ppp_file
*pf
= file
->private_data
;
507 poll_wait(file
, &pf
->rwait
, wait
);
508 mask
= POLLOUT
| POLLWRNORM
;
509 if (skb_peek(&pf
->rq
) != 0)
510 mask
|= POLLIN
| POLLRDNORM
;
513 else if (pf
->kind
== INTERFACE
) {
514 /* see comment in ppp_read */
515 struct ppp
*ppp
= PF_TO_PPP(pf
);
516 if (ppp
->n_channels
== 0
517 && (ppp
->flags
& SC_LOOP_TRAFFIC
) == 0)
518 mask
|= POLLIN
| POLLRDNORM
;
524 #ifdef CONFIG_PPP_FILTER
525 static int get_filter(void __user
*arg
, struct sock_filter
**p
)
527 struct sock_fprog uprog
;
528 struct sock_filter
*code
= NULL
;
531 if (copy_from_user(&uprog
, arg
, sizeof(uprog
)))
539 len
= uprog
.len
* sizeof(struct sock_filter
);
540 code
= kmalloc(len
, GFP_KERNEL
);
544 if (copy_from_user(code
, uprog
.filter
, len
)) {
549 err
= sk_chk_filter(code
, uprog
.len
);
558 #endif /* CONFIG_PPP_FILTER */
560 static int ppp_ioctl(struct inode
*inode
, struct file
*file
,
561 unsigned int cmd
, unsigned long arg
)
563 struct ppp_file
*pf
= file
->private_data
;
565 int err
= -EFAULT
, val
, val2
, i
;
566 struct ppp_idle idle
;
569 struct slcompress
*vj
;
570 void __user
*argp
= (void __user
*)arg
;
571 int __user
*p
= argp
;
574 return ppp_unattached_ioctl(pf
, file
, cmd
, arg
);
576 if (cmd
== PPPIOCDETACH
) {
578 * We have to be careful here... if the file descriptor
579 * has been dup'd, we could have another process in the
580 * middle of a poll using the same file *, so we had
581 * better not free the interface data structures -
582 * instead we fail the ioctl. Even in this case, we
583 * shut down the interface if we are the owner of it.
584 * Actually, we should get rid of PPPIOCDETACH, userland
585 * (i.e. pppd) could achieve the same effect by closing
586 * this fd and reopening /dev/ppp.
589 if (pf
->kind
== INTERFACE
) {
591 if (file
== ppp
->owner
)
592 ppp_shutdown_interface(ppp
);
594 if (atomic_read(&file
->f_count
) <= 2) {
595 ppp_release(inode
, file
);
598 printk(KERN_DEBUG
"PPPIOCDETACH file->f_count=%d\n",
599 atomic_read(&file
->f_count
));
603 if (pf
->kind
== CHANNEL
) {
604 struct channel
*pch
= PF_TO_CHANNEL(pf
);
605 struct ppp_channel
*chan
;
609 if (get_user(unit
, p
))
611 err
= ppp_connect_channel(pch
, unit
);
615 err
= ppp_disconnect_channel(pch
);
619 down_read(&pch
->chan_sem
);
622 if (chan
&& chan
->ops
->ioctl
)
623 err
= chan
->ops
->ioctl(chan
, cmd
, arg
);
624 up_read(&pch
->chan_sem
);
629 if (pf
->kind
!= INTERFACE
) {
631 printk(KERN_ERR
"PPP: not interface or channel??\n");
638 if (get_user(val
, p
))
640 ppp
->mru_alloc
= ppp
->mru
= val
;
641 if (ppp
->mru_alloc
< PPP_MRU
)
642 ppp
->mru_alloc
= PPP_MRU
; /* increase for broken peers */
647 if (get_user(val
, p
))
650 cflags
= ppp
->flags
& ~val
;
651 ppp
->flags
= val
& SC_FLAG_BITS
;
653 if (cflags
& SC_CCP_OPEN
)
659 val
= ppp
->flags
| ppp
->xstate
| ppp
->rstate
;
660 if (put_user(val
, p
))
665 case PPPIOCSCOMPRESS
:
666 err
= ppp_set_compress(ppp
, arg
);
670 if (put_user(ppp
->file
.index
, p
))
676 if (get_user(val
, p
))
683 if (put_user(ppp
->debug
, p
))
689 idle
.xmit_idle
= (jiffies
- ppp
->last_xmit
) / HZ
;
690 idle
.recv_idle
= (jiffies
- ppp
->last_recv
) / HZ
;
691 if (copy_to_user(argp
, &idle
, sizeof(idle
)))
697 if (get_user(val
, p
))
700 if ((val
>> 16) != 0) {
704 vj
= slhc_init(val2
+1, val
+1);
706 printk(KERN_ERR
"PPP: no memory (VJ compressor)\n");
720 if (copy_from_user(&npi
, argp
, sizeof(npi
)))
722 err
= proto_to_npindex(npi
.protocol
);
726 if (cmd
== PPPIOCGNPMODE
) {
728 npi
.mode
= ppp
->npmode
[i
];
729 if (copy_to_user(argp
, &npi
, sizeof(npi
)))
732 ppp
->npmode
[i
] = npi
.mode
;
733 /* we may be able to transmit more packets now (??) */
734 netif_wake_queue(ppp
->dev
);
739 #ifdef CONFIG_PPP_FILTER
742 struct sock_filter
*code
;
743 err
= get_filter(argp
, &code
);
746 kfree(ppp
->pass_filter
);
747 ppp
->pass_filter
= code
;
756 struct sock_filter
*code
;
757 err
= get_filter(argp
, &code
);
760 kfree(ppp
->active_filter
);
761 ppp
->active_filter
= code
;
762 ppp
->active_len
= err
;
768 #endif /* CONFIG_PPP_FILTER */
770 #ifdef CONFIG_PPP_MULTILINK
772 if (get_user(val
, p
))
776 ppp_recv_unlock(ppp
);
779 #endif /* CONFIG_PPP_MULTILINK */
788 static int ppp_unattached_ioctl(struct ppp_file
*pf
, struct file
*file
,
789 unsigned int cmd
, unsigned long arg
)
791 int unit
, err
= -EFAULT
;
793 struct channel
*chan
;
794 int __user
*p
= (int __user
*)arg
;
798 /* Create a new ppp unit */
799 if (get_user(unit
, p
))
801 ppp
= ppp_create_interface(unit
, &err
);
804 file
->private_data
= &ppp
->file
;
807 if (put_user(ppp
->file
.index
, p
))
813 /* Attach to an existing ppp unit */
814 if (get_user(unit
, p
))
816 mutex_lock(&all_ppp_mutex
);
818 ppp
= ppp_find_unit(unit
);
820 atomic_inc(&ppp
->file
.refcnt
);
821 file
->private_data
= &ppp
->file
;
824 mutex_unlock(&all_ppp_mutex
);
828 if (get_user(unit
, p
))
830 spin_lock_bh(&all_channels_lock
);
832 chan
= ppp_find_channel(unit
);
834 atomic_inc(&chan
->file
.refcnt
);
835 file
->private_data
= &chan
->file
;
838 spin_unlock_bh(&all_channels_lock
);
847 static const struct file_operations ppp_device_fops
= {
848 .owner
= THIS_MODULE
,
854 .release
= ppp_release
857 #define PPP_MAJOR 108
859 /* Called at boot time if ppp is compiled into the kernel,
860 or at module load time (from init_module) if compiled as a module. */
861 static int __init
ppp_init(void)
865 printk(KERN_INFO
"PPP generic driver version " PPP_VERSION
"\n");
866 err
= register_chrdev(PPP_MAJOR
, "ppp", &ppp_device_fops
);
868 ppp_class
= class_create(THIS_MODULE
, "ppp");
869 if (IS_ERR(ppp_class
)) {
870 err
= PTR_ERR(ppp_class
);
873 device_create(ppp_class
, NULL
, MKDEV(PPP_MAJOR
, 0), "ppp");
878 printk(KERN_ERR
"failed to register PPP device (%d)\n", err
);
882 unregister_chrdev(PPP_MAJOR
, "ppp");
887 * Network interface unit routines.
890 ppp_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
892 struct ppp
*ppp
= (struct ppp
*) dev
->priv
;
896 npi
= ethertype_to_npindex(ntohs(skb
->protocol
));
900 /* Drop, accept or reject the packet */
901 switch (ppp
->npmode
[npi
]) {
905 /* it would be nice to have a way to tell the network
906 system to queue this one up for later. */
913 /* Put the 2-byte PPP protocol number on the front,
914 making sure there is room for the address and control fields. */
915 if (skb_cow_head(skb
, PPP_HDRLEN
))
918 pp
= skb_push(skb
, 2);
919 proto
= npindex_to_proto
[npi
];
923 netif_stop_queue(dev
);
924 skb_queue_tail(&ppp
->file
.xq
, skb
);
925 ppp_xmit_process(ppp
);
930 ++ppp
->stats
.tx_dropped
;
934 static struct net_device_stats
*
935 ppp_net_stats(struct net_device
*dev
)
937 struct ppp
*ppp
= (struct ppp
*) dev
->priv
;
943 ppp_net_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
945 struct ppp
*ppp
= dev
->priv
;
947 void __user
*addr
= (void __user
*) ifr
->ifr_ifru
.ifru_data
;
948 struct ppp_stats stats
;
949 struct ppp_comp_stats cstats
;
954 ppp_get_stats(ppp
, &stats
);
955 if (copy_to_user(addr
, &stats
, sizeof(stats
)))
961 memset(&cstats
, 0, sizeof(cstats
));
962 if (ppp
->xc_state
!= 0)
963 ppp
->xcomp
->comp_stat(ppp
->xc_state
, &cstats
.c
);
964 if (ppp
->rc_state
!= 0)
965 ppp
->rcomp
->decomp_stat(ppp
->rc_state
, &cstats
.d
);
966 if (copy_to_user(addr
, &cstats
, sizeof(cstats
)))
973 if (copy_to_user(addr
, vers
, strlen(vers
) + 1))
985 static void ppp_setup(struct net_device
*dev
)
987 dev
->hard_header_len
= PPP_HDRLEN
;
990 dev
->tx_queue_len
= 3;
991 dev
->type
= ARPHRD_PPP
;
992 dev
->flags
= IFF_POINTOPOINT
| IFF_NOARP
| IFF_MULTICAST
;
996 * Transmit-side routines.
1000 * Called to do any work queued up on the transmit side
1001 * that can now be done.
1004 ppp_xmit_process(struct ppp
*ppp
)
1006 struct sk_buff
*skb
;
1009 if (ppp
->dev
!= 0) {
1011 while (ppp
->xmit_pending
== 0
1012 && (skb
= skb_dequeue(&ppp
->file
.xq
)) != 0)
1013 ppp_send_frame(ppp
, skb
);
1014 /* If there's no work left to do, tell the core net
1015 code that we can accept some more. */
1016 if (ppp
->xmit_pending
== 0 && skb_peek(&ppp
->file
.xq
) == 0)
1017 netif_wake_queue(ppp
->dev
);
1019 ppp_xmit_unlock(ppp
);
1023 * Compress and send a frame.
1024 * The caller should have locked the xmit path,
1025 * and xmit_pending should be 0.
1028 ppp_send_frame(struct ppp
*ppp
, struct sk_buff
*skb
)
1030 int proto
= PPP_PROTO(skb
);
1031 struct sk_buff
*new_skb
;
1035 if (proto
< 0x8000) {
1036 #ifdef CONFIG_PPP_FILTER
1037 /* check if we should pass this packet */
1038 /* the filter instructions are constructed assuming
1039 a four-byte PPP header on each packet */
1040 *skb_push(skb
, 2) = 1;
1041 if (ppp
->pass_filter
1042 && sk_run_filter(skb
, ppp
->pass_filter
,
1043 ppp
->pass_len
) == 0) {
1045 printk(KERN_DEBUG
"PPP: outbound frame not passed\n");
1049 /* if this packet passes the active filter, record the time */
1050 if (!(ppp
->active_filter
1051 && sk_run_filter(skb
, ppp
->active_filter
,
1052 ppp
->active_len
) == 0))
1053 ppp
->last_xmit
= jiffies
;
1058 switch (IP_PROTO(skb
)) {
1060 switch (DST_PORT(skb
)) {
1061 case 139: // netbios-ssn
1062 case 445: // microsoft-ds
1065 ppp
->last_xmit
= jiffies
;
1070 switch (DST_PORT(skb
)) {
1071 case 137: // netbios-ns
1072 case 138: // netbios-dgm
1075 ppp
->last_xmit
= jiffies
;
1080 ppp
->last_xmit
= jiffies
;
1084 /* for data packets, record the time */
1085 ppp
->last_xmit
= jiffies
;
1087 #endif /* CONFIG_PPP_FILTER */
1090 ++ppp
->stats
.tx_packets
;
1091 ppp
->stats
.tx_bytes
+= skb
->len
- 2;
1095 if (ppp
->vj
== 0 || (ppp
->flags
& SC_COMP_TCP
) == 0)
1097 /* try to do VJ TCP header compression */
1098 new_skb
= alloc_skb(skb
->len
+ ppp
->dev
->hard_header_len
- 2,
1101 printk(KERN_ERR
"PPP: no memory (VJ comp pkt)\n");
1104 skb_reserve(new_skb
, ppp
->dev
->hard_header_len
- 2);
1106 len
= slhc_compress(ppp
->vj
, cp
, skb
->len
- 2,
1107 new_skb
->data
+ 2, &cp
,
1108 !(ppp
->flags
& SC_NO_TCP_CCID
));
1109 if (cp
== skb
->data
+ 2) {
1110 /* didn't compress */
1113 if (cp
[0] & SL_TYPE_COMPRESSED_TCP
) {
1114 proto
= PPP_VJC_COMP
;
1115 cp
[0] &= ~SL_TYPE_COMPRESSED_TCP
;
1117 proto
= PPP_VJC_UNCOMP
;
1118 cp
[0] = skb
->data
[2];
1122 cp
= skb_put(skb
, len
+ 2);
1129 /* peek at outbound CCP frames */
1130 ppp_ccp_peek(ppp
, skb
, 0);
1132 * When LZS or MPPE/MPPC has been negotiated we don't send
1133 * CCP_RESETACK after receiving CCP_RESETREQ; in fact pppd
1134 * sends such a packet but we silently discard it here
1136 if (CCP_CODE(skb
->data
+2) == CCP_RESETACK
1137 && (ppp
->xcomp
->compress_proto
== CI_MPPE
1138 || ppp
->xcomp
->compress_proto
== CI_LZS
)) {
1139 --ppp
->stats
.tx_packets
;
1140 ppp
->stats
.tx_bytes
-= skb
->len
- 2;
1147 /* try to do packet compression */
1148 if ((ppp
->xstate
& SC_COMP_RUN
) && ppp
->xc_state
!= 0
1149 && proto
!= PPP_LCP
&& proto
!= PPP_CCP
) {
1152 * because of possible data expansion when MPPC or LZS
1153 * is used, allocate compressor's buffer 12.5% bigger
1156 if (ppp
->xcomp
->compress_proto
== CI_MPPE
)
1157 comp_ovhd
= ((ppp
->dev
->mtu
* 9) / 8) + 1 + MPPE_OVHD
;
1158 else if (ppp
->xcomp
->compress_proto
== CI_LZS
)
1159 comp_ovhd
= ((ppp
->dev
->mtu
* 9) / 8) + 1 + LZS_OVHD
;
1160 new_skb
= alloc_skb(ppp
->dev
->mtu
+ ppp
->dev
->hard_header_len
1161 + comp_ovhd
, GFP_ATOMIC
);
1163 printk(KERN_ERR
"PPP: no memory (comp pkt)\n");
1166 if (ppp
->dev
->hard_header_len
> PPP_HDRLEN
)
1167 skb_reserve(new_skb
,
1168 ppp
->dev
->hard_header_len
- PPP_HDRLEN
);
1170 /* compressor still expects A/C bytes in hdr */
1171 len
= ppp
->xcomp
->compress(ppp
->xc_state
, skb
->data
- 2,
1172 new_skb
->data
, skb
->len
+ 2,
1173 ppp
->dev
->mtu
+ PPP_HDRLEN
);
1174 if (len
> 0 && (ppp
->flags
& SC_CCP_UP
)) {
1178 skb_pull(skb
, 2); /* pull off A/C bytes */
1179 } else if (len
== 0) {
1180 /* didn't compress, or CCP not up yet */
1185 * MPPE requires that we do not send unencrypted
1186 * frames. The compressor will return -1 if we
1187 * should drop the frame. We cannot simply test
1188 * the compress_proto because MPPE and MPPC share
1191 printk(KERN_ERR
"ppp: compressor dropped pkt\n");
1198 * If we are waiting for traffic (demand dialling),
1199 * queue it up for pppd to receive.
1201 if (ppp
->flags
& SC_LOOP_TRAFFIC
) {
1202 if (ppp
->file
.rq
.qlen
> PPP_MAX_RQLEN
)
1204 skb_queue_tail(&ppp
->file
.rq
, skb
);
1205 wake_up_interruptible(&ppp
->file
.rwait
);
1209 ppp
->xmit_pending
= skb
;
1215 ++ppp
->stats
.tx_errors
;
1219 * Try to send the frame in xmit_pending.
1220 * The caller should have the xmit path locked.
1223 ppp_push(struct ppp
*ppp
)
1225 struct list_head
*list
;
1226 struct channel
*pch
;
1227 struct sk_buff
*skb
= ppp
->xmit_pending
;
1232 list
= &ppp
->channels
;
1233 if (list_empty(list
)) {
1234 /* nowhere to send the packet, just drop it */
1235 ppp
->xmit_pending
= NULL
;
1240 if ((ppp
->flags
& SC_MULTILINK
) == 0) {
1241 /* not doing multilink: send it down the first channel */
1243 pch
= list_entry(list
, struct channel
, clist
);
1245 spin_lock_bh(&pch
->downl
);
1247 if (pch
->chan
->ops
->start_xmit(pch
->chan
, skb
))
1248 ppp
->xmit_pending
= NULL
;
1250 /* channel got unregistered */
1252 ppp
->xmit_pending
= NULL
;
1254 spin_unlock_bh(&pch
->downl
);
1258 #ifdef CONFIG_PPP_MULTILINK
1259 /* Multilink: fragment the packet over as many links
1260 as can take the packet at the moment. */
1261 if (!ppp_mp_explode(ppp
, skb
))
1263 #endif /* CONFIG_PPP_MULTILINK */
1265 ppp
->xmit_pending
= NULL
;
1269 #ifdef CONFIG_PPP_MULTILINK
1271 * Divide a packet to be transmitted into fragments and
1272 * send them out the individual links.
1274 static int ppp_mp_explode(struct ppp
*ppp
, struct sk_buff
*skb
)
1277 int i
, bits
, hdrlen
, mtu
;
1281 unsigned char *p
, *q
;
1282 struct list_head
*list
;
1283 struct channel
*pch
;
1284 struct sk_buff
*frag
;
1285 struct ppp_channel
*chan
;
1287 nfree
= 0; /* # channels which have no packet already queued */
1288 navail
= 0; /* total # of usable channels (not deregistered) */
1289 hdrlen
= (ppp
->flags
& SC_MP_XSHORTSEQ
)? MPHDRLEN_SSN
: MPHDRLEN
;
1291 list_for_each_entry(pch
, &ppp
->channels
, clist
) {
1292 navail
+= pch
->avail
= (pch
->chan
!= NULL
);
1294 if (skb_queue_empty(&pch
->file
.xq
) ||
1299 if (!pch
->had_frag
&& i
< ppp
->nxchan
)
1306 * Don't start sending this packet unless at least half of
1307 * the channels are free. This gives much better TCP
1308 * performance if we have a lot of channels.
1310 if (nfree
== 0 || nfree
< navail
/ 2)
1311 return 0; /* can't take now, leave it in xmit_pending */
1313 /* Do protocol field compression (XXX this should be optional) */
1322 * Decide on fragment size.
1323 * We create a fragment for each free channel regardless of
1324 * how small they are (i.e. even 0 length) in order to minimize
1325 * the time that it will take to detect when a channel drops
1330 fragsize
= DIV_ROUND_UP(fragsize
, nfree
);
1331 /* nbigger channels get fragsize bytes, the rest get fragsize-1,
1332 except if nbigger==0, then they all get fragsize. */
1333 nbigger
= len
% nfree
;
1335 /* skip to the channel after the one we last used
1336 and start at that one */
1337 list
= &ppp
->channels
;
1338 for (i
= 0; i
< ppp
->nxchan
; ++i
) {
1340 if (list
== &ppp
->channels
) {
1346 /* create a fragment for each channel */
1348 while (nfree
> 0 || len
> 0) {
1350 if (list
== &ppp
->channels
) {
1354 pch
= list_entry(list
, struct channel
, clist
);
1360 * Skip this channel if it has a fragment pending already and
1361 * we haven't given a fragment to all of the free channels.
1363 if (pch
->avail
== 1) {
1371 /* check the channel's mtu and whether it is still attached. */
1372 spin_lock_bh(&pch
->downl
);
1373 if (pch
->chan
== NULL
) {
1374 /* can't use this channel, it's being deregistered */
1375 spin_unlock_bh(&pch
->downl
);
1383 * Create a fragment for this channel of
1384 * min(max(mtu+2-hdrlen, 4), fragsize, len) bytes.
1385 * If mtu+2-hdrlen < 4, that is a ridiculously small
1386 * MTU, so we use mtu = 2 + hdrlen.
1391 mtu
= pch
->chan
->mtu
+ 2 - hdrlen
;
1396 if (flen
== len
&& nfree
== 0)
1398 frag
= alloc_skb(flen
+ hdrlen
+ (flen
== 0), GFP_ATOMIC
);
1401 q
= skb_put(frag
, flen
+ hdrlen
);
1403 /* make the MP header */
1406 if (ppp
->flags
& SC_MP_XSHORTSEQ
) {
1407 q
[2] = bits
+ ((ppp
->nxseq
>> 8) & 0xf);
1411 q
[3] = ppp
->nxseq
>> 16;
1412 q
[4] = ppp
->nxseq
>> 8;
1418 * Unfortunately there is a bug in older versions of
1419 * the Linux PPP multilink reconstruction code where it
1420 * drops 0-length fragments. Therefore we make sure the
1421 * fragment has at least one byte of data. Any bytes
1422 * we add in this situation will end up as padding on the
1423 * end of the reconstructed packet.
1426 *skb_put(frag
, 1) = 0;
1428 memcpy(q
+ hdrlen
, p
, flen
);
1430 /* try to send it down the channel */
1432 if (!skb_queue_empty(&pch
->file
.xq
) ||
1433 !chan
->ops
->start_xmit(chan
, frag
))
1434 skb_queue_tail(&pch
->file
.xq
, frag
);
1440 spin_unlock_bh(&pch
->downl
);
1442 if (--nbigger
== 0 && fragsize
> 0)
1450 spin_unlock_bh(&pch
->downl
);
1452 printk(KERN_ERR
"PPP: no memory (fragment)\n");
1453 ++ppp
->stats
.tx_errors
;
1455 return 1; /* abandon the frame */
1457 #endif /* CONFIG_PPP_MULTILINK */
1460 * Try to send data out on a channel.
1463 ppp_channel_push(struct channel
*pch
)
1465 struct sk_buff
*skb
;
1468 spin_lock_bh(&pch
->downl
);
1469 if (pch
->chan
!= 0) {
1470 while (!skb_queue_empty(&pch
->file
.xq
)) {
1471 skb
= skb_dequeue(&pch
->file
.xq
);
1472 if (!pch
->chan
->ops
->start_xmit(pch
->chan
, skb
)) {
1473 /* put the packet back and try again later */
1474 skb_queue_head(&pch
->file
.xq
, skb
);
1479 /* channel got deregistered */
1480 skb_queue_purge(&pch
->file
.xq
);
1482 spin_unlock_bh(&pch
->downl
);
1483 /* see if there is anything from the attached unit to be sent */
1484 if (skb_queue_empty(&pch
->file
.xq
)) {
1485 read_lock_bh(&pch
->upl
);
1488 ppp_xmit_process(ppp
);
1489 read_unlock_bh(&pch
->upl
);
1494 * Receive-side routines.
1497 /* misuse a few fields of the skb for MP reconstruction */
1498 #define sequence priority
1499 #define BEbits cb[0]
1502 ppp_do_recv(struct ppp
*ppp
, struct sk_buff
*skb
, struct channel
*pch
)
1505 /* ppp->dev == 0 means interface is closing down */
1507 ppp_receive_frame(ppp
, skb
, pch
);
1510 ppp_recv_unlock(ppp
);
1514 ppp_input(struct ppp_channel
*chan
, struct sk_buff
*skb
)
1516 struct channel
*pch
= chan
->ppp
;
1524 read_lock_bh(&pch
->upl
);
1525 if (!pskb_may_pull(skb
, 2)) {
1528 ++pch
->ppp
->stats
.rx_length_errors
;
1529 ppp_receive_error(pch
->ppp
);
1534 proto
= PPP_PROTO(skb
);
1535 if (pch
->ppp
== 0 || proto
>= 0xc000 || proto
== PPP_CCPFRAG
) {
1536 /* put it on the channel queue */
1537 skb_queue_tail(&pch
->file
.rq
, skb
);
1538 /* drop old frames if queue too long */
1539 while (pch
->file
.rq
.qlen
> PPP_MAX_RQLEN
1540 && (skb
= skb_dequeue(&pch
->file
.rq
)) != 0)
1542 wake_up_interruptible(&pch
->file
.rwait
);
1544 ppp_do_recv(pch
->ppp
, skb
, pch
);
1548 read_unlock_bh(&pch
->upl
);
1551 /* Put a 0-length skb in the receive queue as an error indication */
1553 ppp_input_error(struct ppp_channel
*chan
, int code
)
1555 struct channel
*pch
= chan
->ppp
;
1556 struct sk_buff
*skb
;
1561 read_lock_bh(&pch
->upl
);
1562 if (pch
->ppp
!= 0) {
1563 skb
= alloc_skb(0, GFP_ATOMIC
);
1565 skb
->len
= 0; /* probably unnecessary */
1567 ppp_do_recv(pch
->ppp
, skb
, pch
);
1570 read_unlock_bh(&pch
->upl
);
1574 * We come in here to process a received frame.
1575 * The receive side of the ppp unit is locked.
1578 ppp_receive_frame(struct ppp
*ppp
, struct sk_buff
*skb
, struct channel
*pch
)
1580 /* note: a 0-length skb is used as an error indication */
1582 #ifdef CONFIG_PPP_MULTILINK
1583 /* XXX do channel-level decompression here */
1584 if (PPP_PROTO(skb
) == PPP_MP
)
1585 ppp_receive_mp_frame(ppp
, skb
, pch
);
1587 #endif /* CONFIG_PPP_MULTILINK */
1588 ppp_receive_nonmp_frame(ppp
, skb
);
1591 ppp_receive_error(ppp
);
1596 ppp_receive_error(struct ppp
*ppp
)
1598 ++ppp
->stats
.rx_errors
;
1604 ppp_receive_nonmp_frame(struct ppp
*ppp
, struct sk_buff
*skb
)
1607 int proto
, len
, npi
;
1610 * Decompress the frame, if compressed.
1611 * Note that some decompressors need to see uncompressed frames
1612 * that come in as well as compressed frames.
1614 if (ppp
->rc_state
!= 0 && (ppp
->rstate
& SC_DECOMP_RUN
)
1615 && (ppp
->rstate
& (SC_DC_FERROR
| SC_DC_ERROR
)) == 0)
1616 skb
= ppp_decompress_frame(ppp
, skb
);
1618 proto
= PPP_PROTO(skb
);
1621 /* decompress VJ compressed packets */
1622 if (ppp
->vj
== 0 || (ppp
->flags
& SC_REJ_COMP_TCP
))
1625 if (skb_tailroom(skb
) < 124 || skb_cloned(skb
)) {
1626 /* copy to a new sk_buff with more tailroom */
1627 ns
= dev_alloc_skb(skb
->len
+ 128);
1629 printk(KERN_ERR
"PPP: no memory (VJ decomp)\n");
1633 skb_copy_bits(skb
, 0, skb_put(ns
, skb
->len
), skb
->len
);
1638 skb
->ip_summed
= CHECKSUM_NONE
;
1640 len
= slhc_uncompress(ppp
->vj
, skb
->data
+ 2, skb
->len
- 2);
1642 printk(KERN_DEBUG
"PPP: VJ decompression error\n");
1647 skb_put(skb
, len
- skb
->len
);
1648 else if (len
< skb
->len
)
1653 case PPP_VJC_UNCOMP
:
1654 if (ppp
->vj
== 0 || (ppp
->flags
& SC_REJ_COMP_TCP
))
1657 /* Until we fix the decompressor need to make sure
1658 * data portion is linear.
1660 if (!pskb_may_pull(skb
, skb
->len
))
1663 if (slhc_remember(ppp
->vj
, skb
->data
+ 2, skb
->len
- 2) <= 0) {
1664 printk(KERN_ERR
"PPP: VJ uncompressed error\n");
1671 ppp_ccp_peek(ppp
, skb
, 1);
1675 ++ppp
->stats
.rx_packets
;
1676 ppp
->stats
.rx_bytes
+= skb
->len
- 2;
1678 npi
= proto_to_npindex(proto
);
1680 /* control or unknown frame - pass it to pppd */
1681 skb_queue_tail(&ppp
->file
.rq
, skb
);
1682 /* limit queue length by dropping old frames */
1683 while (ppp
->file
.rq
.qlen
> PPP_MAX_RQLEN
1684 && (skb
= skb_dequeue(&ppp
->file
.rq
)) != 0)
1686 /* wake up any process polling or blocking on read */
1687 wake_up_interruptible(&ppp
->file
.rwait
);
1690 /* network protocol frame - give it to the kernel */
1692 #ifdef CONFIG_PPP_FILTER
1693 /* check if the packet passes the pass and active filters */
1694 /* the filter instructions are constructed assuming
1695 a four-byte PPP header on each packet */
1696 if (ppp
->pass_filter
|| ppp
->active_filter
) {
1697 if (skb_cloned(skb
) &&
1698 pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
))
1701 *skb_push(skb
, 2) = 0;
1702 if (ppp
->pass_filter
1703 && sk_run_filter(skb
, ppp
->pass_filter
,
1704 ppp
->pass_len
) == 0) {
1706 printk(KERN_DEBUG
"PPP: inbound frame "
1711 if (!(ppp
->active_filter
1712 && sk_run_filter(skb
, ppp
->active_filter
,
1713 ppp
->active_len
) == 0))
1714 ppp
->last_recv
= jiffies
;
1717 #endif /* CONFIG_PPP_FILTER */
1718 ppp
->last_recv
= jiffies
;
1720 if ((ppp
->dev
->flags
& IFF_UP
) == 0
1721 || ppp
->npmode
[npi
] != NPMODE_PASS
) {
1724 /* chop off protocol */
1725 skb_pull_rcsum(skb
, 2);
1726 skb
->dev
= ppp
->dev
;
1727 skb
->protocol
= htons(npindex_to_ethertype
[npi
]);
1728 skb_reset_mac_header(skb
);
1730 ppp
->dev
->last_rx
= jiffies
;
1737 ppp_receive_error(ppp
);
1740 static struct sk_buff
*
1741 ppp_decompress_frame(struct ppp
*ppp
, struct sk_buff
*skb
)
1743 int proto
= PPP_PROTO(skb
);
1747 /* Until we fix all the decompressor's need to make sure
1748 * data portion is linear.
1750 if (!pskb_may_pull(skb
, skb
->len
))
1753 if (proto
== PPP_COMP
) {
1756 switch(ppp
->rcomp
->compress_proto
) {
1758 obuff_size
= ppp
->mru_alloc
+ PPP_HDRLEN
+ 1;
1761 obuff_size
= ppp
->mru_alloc
+ PPP_HDRLEN
;
1765 ns
= dev_alloc_skb(obuff_size
);
1767 printk(KERN_ERR
"ppp_decompress_frame: no memory\n");
1770 /* the decompressor still expects the A/C bytes in the hdr */
1771 len
= ppp
->rcomp
->decompress(ppp
->rc_state
, skb
->data
- 2,
1772 skb
->len
+ 2, ns
->data
, obuff_size
);
1774 /* Pass the compressed frame to pppd as an
1775 error indication. */
1776 if (len
== DECOMP_FATALERROR
)
1777 ppp
->rstate
|= SC_DC_FERROR
;
1785 skb_pull(skb
, 2); /* pull off the A/C bytes */
1788 /* Uncompressed frame - pass to decompressor so it
1789 can update its dictionary if necessary. */
1790 if (ppp
->rcomp
->incomp
)
1791 ppp
->rcomp
->incomp(ppp
->rc_state
, skb
->data
- 2,
1798 if (ppp
->rcomp
->compress_proto
!= CI_MPPE
1799 && ppp
->rcomp
->compress_proto
!= CI_LZS
) {
1801 * If decompression protocol isn't MPPE/MPPC or LZS, we set
1802 * SC_DC_ERROR flag and wait for CCP_RESETACK
1804 ppp
->rstate
|= SC_DC_ERROR
;
1806 ppp_receive_error(ppp
);
1810 #ifdef CONFIG_PPP_MULTILINK
1812 * Receive a multilink frame.
1813 * We put it on the reconstruction queue and then pull off
1814 * as many completed frames as we can.
1817 ppp_receive_mp_frame(struct ppp
*ppp
, struct sk_buff
*skb
, struct channel
*pch
)
1821 int mphdrlen
= (ppp
->flags
& SC_MP_SHORTSEQ
)? MPHDRLEN_SSN
: MPHDRLEN
;
1823 if (!pskb_may_pull(skb
, mphdrlen
+ 1) || ppp
->mrru
== 0)
1824 goto err
; /* no good, throw it away */
1826 /* Decode sequence number and begin/end bits */
1827 if (ppp
->flags
& SC_MP_SHORTSEQ
) {
1828 seq
= ((skb
->data
[2] & 0x0f) << 8) | skb
->data
[3];
1831 seq
= (skb
->data
[3] << 16) | (skb
->data
[4] << 8)| skb
->data
[5];
1834 skb
->BEbits
= skb
->data
[2];
1835 skb_pull(skb
, mphdrlen
); /* pull off PPP and MP headers */
1838 * Do protocol ID decompression on the first fragment of each packet.
1840 if ((skb
->BEbits
& B
) && (skb
->data
[0] & 1))
1841 *skb_push(skb
, 1) = 0;
1844 * Expand sequence number to 32 bits, making it as close
1845 * as possible to ppp->minseq.
1847 seq
|= ppp
->minseq
& ~mask
;
1848 if ((int)(ppp
->minseq
- seq
) > (int)(mask
>> 1))
1850 else if ((int)(seq
- ppp
->minseq
) > (int)(mask
>> 1))
1851 seq
-= mask
+ 1; /* should never happen */
1852 skb
->sequence
= seq
;
1856 * If this packet comes before the next one we were expecting,
1859 if (seq_before(seq
, ppp
->nextseq
)) {
1861 ++ppp
->stats
.rx_dropped
;
1862 ppp_receive_error(ppp
);
1867 * Reevaluate minseq, the minimum over all channels of the
1868 * last sequence number received on each channel. Because of
1869 * the increasing sequence number rule, we know that any fragment
1870 * before `minseq' which hasn't arrived is never going to arrive.
1871 * The list of channels can't change because we have the receive
1872 * side of the ppp unit locked.
1874 list_for_each_entry(ch
, &ppp
->channels
, clist
) {
1875 if (seq_before(ch
->lastseq
, seq
))
1878 if (seq_before(ppp
->minseq
, seq
))
1881 /* Put the fragment on the reconstruction queue */
1882 ppp_mp_insert(ppp
, skb
);
1884 /* If the queue is getting long, don't wait any longer for packets
1885 before the start of the queue. */
1886 if (skb_queue_len(&ppp
->mrq
) >= PPP_MP_MAX_QLEN
1887 && seq_before(ppp
->minseq
, ppp
->mrq
.next
->sequence
))
1888 ppp
->minseq
= ppp
->mrq
.next
->sequence
;
1890 /* Pull completed packets off the queue and receive them. */
1891 while ((skb
= ppp_mp_reconstruct(ppp
)) != 0) {
1892 if (pskb_may_pull(skb
, 2))
1893 ppp_receive_nonmp_frame(ppp
, skb
);
1895 ++ppp
->dev
->stats
.rx_length_errors
;
1897 ppp_receive_error(ppp
);
1905 ppp_receive_error(ppp
);
1909 * Insert a fragment on the MP reconstruction queue.
1910 * The queue is ordered by increasing sequence number.
1913 ppp_mp_insert(struct ppp
*ppp
, struct sk_buff
*skb
)
1916 struct sk_buff_head
*list
= &ppp
->mrq
;
1917 u32 seq
= skb
->sequence
;
1919 /* N.B. we don't need to lock the list lock because we have the
1920 ppp unit receive-side lock. */
1921 for (p
= list
->next
; p
!= (struct sk_buff
*)list
; p
= p
->next
)
1922 if (seq_before(seq
, p
->sequence
))
1924 __skb_insert(skb
, p
->prev
, p
, list
);
1928 * Reconstruct a packet from the MP fragment queue.
1929 * We go through increasing sequence numbers until we find a
1930 * complete packet, or we get to the sequence number for a fragment
1931 * which hasn't arrived but might still do so.
1934 ppp_mp_reconstruct(struct ppp
*ppp
)
1936 u32 seq
= ppp
->nextseq
;
1937 u32 minseq
= ppp
->minseq
;
1938 struct sk_buff_head
*list
= &ppp
->mrq
;
1939 struct sk_buff
*p
, *next
;
1940 struct sk_buff
*head
, *tail
;
1941 struct sk_buff
*skb
= NULL
;
1942 int lost
= 0, len
= 0;
1944 if (ppp
->mrru
== 0) /* do nothing until mrru is set */
1948 for (p
= head
; p
!= (struct sk_buff
*) list
; p
= next
) {
1950 if (seq_before(p
->sequence
, seq
)) {
1951 /* this can't happen, anyway ignore the skb */
1952 printk(KERN_ERR
"ppp_mp_reconstruct bad seq %u < %u\n",
1957 if (p
->sequence
!= seq
) {
1958 /* Fragment `seq' is missing. If it is after
1959 minseq, it might arrive later, so stop here. */
1960 if (seq_after(seq
, minseq
))
1962 /* Fragment `seq' is lost, keep going. */
1964 seq
= seq_before(minseq
, p
->sequence
)?
1965 minseq
+ 1: p
->sequence
;
1971 * At this point we know that all the fragments from
1972 * ppp->nextseq to seq are either present or lost.
1973 * Also, there are no complete packets in the queue
1974 * that have no missing fragments and end before this
1978 /* B bit set indicates this fragment starts a packet */
1979 if (p
->BEbits
& B
) {
1987 /* Got a complete packet yet? */
1988 if (lost
== 0 && (p
->BEbits
& E
) && (head
->BEbits
& B
)) {
1989 if (len
> ppp
->mrru
+ 2) {
1990 ++ppp
->stats
.rx_length_errors
;
1991 printk(KERN_DEBUG
"PPP: reconstructed packet"
1992 " is too long (%d)\n", len
);
1993 } else if (p
== head
) {
1994 /* fragment is complete packet - reuse skb */
1998 } else if ((skb
= dev_alloc_skb(len
)) == NULL
) {
1999 ++ppp
->stats
.rx_missed_errors
;
2000 printk(KERN_DEBUG
"PPP: no memory for "
2001 "reconstructed packet");
2006 ppp
->nextseq
= seq
+ 1;
2010 * If this is the ending fragment of a packet,
2011 * and we haven't found a complete valid packet yet,
2012 * we can discard up to and including this fragment.
2020 /* If we have a complete packet, copy it all into one skb. */
2022 /* If we have discarded any fragments,
2023 signal a receive error. */
2024 if (head
->sequence
!= ppp
->nextseq
) {
2026 printk(KERN_DEBUG
" missed pkts %u..%u\n",
2027 ppp
->nextseq
, head
->sequence
-1);
2028 ++ppp
->stats
.rx_dropped
;
2029 ppp_receive_error(ppp
);
2033 /* copy to a single skb */
2034 for (p
= head
; p
!= tail
->next
; p
= p
->next
)
2035 skb_copy_bits(p
, 0, skb_put(skb
, p
->len
), p
->len
);
2036 ppp
->nextseq
= tail
->sequence
+ 1;
2040 /* Discard all the skbuffs that we have copied the data out of
2041 or that we can't use. */
2042 while ((p
= list
->next
) != head
) {
2043 __skb_unlink(p
, list
);
2049 #endif /* CONFIG_PPP_MULTILINK */
2052 * Channel interface.
2056 * Create a new, unattached ppp channel.
2059 ppp_register_channel(struct ppp_channel
*chan
)
2061 struct channel
*pch
;
2063 pch
= kzalloc(sizeof(struct channel
), GFP_KERNEL
);
2069 init_ppp_file(&pch
->file
, CHANNEL
);
2070 pch
->file
.hdrlen
= chan
->hdrlen
;
2071 #ifdef CONFIG_PPP_MULTILINK
2073 #endif /* CONFIG_PPP_MULTILINK */
2074 init_rwsem(&pch
->chan_sem
);
2075 spin_lock_init(&pch
->downl
);
2076 rwlock_init(&pch
->upl
);
2077 spin_lock_bh(&all_channels_lock
);
2078 pch
->file
.index
= ++last_channel_index
;
2079 list_add(&pch
->list
, &new_channels
);
2080 atomic_inc(&channel_count
);
2081 spin_unlock_bh(&all_channels_lock
);
2086 * Return the index of a channel.
2088 int ppp_channel_index(struct ppp_channel
*chan
)
2090 struct channel
*pch
= chan
->ppp
;
2093 return pch
->file
.index
;
2098 * Return the PPP unit number to which a channel is connected.
2100 int ppp_unit_number(struct ppp_channel
*chan
)
2102 struct channel
*pch
= chan
->ppp
;
2106 read_lock_bh(&pch
->upl
);
2108 unit
= pch
->ppp
->file
.index
;
2109 read_unlock_bh(&pch
->upl
);
2115 * Disconnect a channel from the generic layer.
2116 * This must be called in process context.
2119 ppp_unregister_channel(struct ppp_channel
*chan
)
2121 struct channel
*pch
= chan
->ppp
;
2124 return; /* should never happen */
2128 * This ensures that we have returned from any calls into the
2129 * the channel's start_xmit or ioctl routine before we proceed.
2131 down_write(&pch
->chan_sem
);
2132 spin_lock_bh(&pch
->downl
);
2134 spin_unlock_bh(&pch
->downl
);
2135 up_write(&pch
->chan_sem
);
2136 ppp_disconnect_channel(pch
);
2137 spin_lock_bh(&all_channels_lock
);
2138 list_del(&pch
->list
);
2139 spin_unlock_bh(&all_channels_lock
);
2141 wake_up_interruptible(&pch
->file
.rwait
);
2142 if (atomic_dec_and_test(&pch
->file
.refcnt
))
2143 ppp_destroy_channel(pch
);
2147 * Callback from a channel when it can accept more to transmit.
2148 * This should be called at BH/softirq level, not interrupt level.
2151 ppp_output_wakeup(struct ppp_channel
*chan
)
2153 struct channel
*pch
= chan
->ppp
;
2157 ppp_channel_push(pch
);
2161 * Compression control.
2164 /* Process the PPPIOCSCOMPRESS ioctl. */
2166 ppp_set_compress(struct ppp
*ppp
, unsigned long arg
)
2169 struct compressor
*cp
, *ocomp
;
2170 struct ppp_option_data data
;
2171 void *state
, *ostate
;
2172 unsigned char ccp_option
[CCP_MAX_OPTION_LENGTH
];
2175 if (copy_from_user(&data
, (void __user
*) arg
, sizeof(data
))
2176 || (data
.length
<= CCP_MAX_OPTION_LENGTH
2177 && copy_from_user(ccp_option
, (void __user
*) data
.ptr
, data
.length
)))
2180 if (data
.length
> CCP_MAX_OPTION_LENGTH
2181 || ccp_option
[1] < 2 || ccp_option
[1] > data
.length
)
2184 cp
= find_compressor(ccp_option
[0]);
2187 request_module("ppp-compress-%d", ccp_option
[0]);
2188 cp
= find_compressor(ccp_option
[0]);
2190 #endif /* CONFIG_KMOD */
2195 if (data
.transmit
) {
2196 state
= cp
->comp_alloc(ccp_option
, data
.length
);
2199 ppp
->xstate
&= ~SC_COMP_RUN
;
2201 ostate
= ppp
->xc_state
;
2203 ppp
->xc_state
= state
;
2204 ppp_xmit_unlock(ppp
);
2206 ocomp
->comp_free(ostate
);
2207 module_put(ocomp
->owner
);
2211 module_put(cp
->owner
);
2214 state
= cp
->decomp_alloc(ccp_option
, data
.length
);
2217 ppp
->rstate
&= ~SC_DECOMP_RUN
;
2219 ostate
= ppp
->rc_state
;
2221 ppp
->rc_state
= state
;
2222 ppp_recv_unlock(ppp
);
2224 ocomp
->decomp_free(ostate
);
2225 module_put(ocomp
->owner
);
2229 module_put(cp
->owner
);
2237 * Look at a CCP packet and update our state accordingly.
2238 * We assume the caller has the xmit or recv path locked.
2241 ppp_ccp_peek(struct ppp
*ppp
, struct sk_buff
*skb
, int inbound
)
2246 if (!pskb_may_pull(skb
, CCP_HDRLEN
+ 2))
2247 return; /* no header */
2250 switch (CCP_CODE(dp
)) {
2253 /* A ConfReq starts negotiation of compression
2254 * in one direction of transmission,
2255 * and hence brings it down...but which way?
2258 * A ConfReq indicates what the sender would like to receive
2261 /* He is proposing what I should send */
2262 ppp
->xstate
&= ~SC_COMP_RUN
;
2264 /* I am proposing to what he should send */
2265 ppp
->rstate
&= ~SC_DECOMP_RUN
;
2272 * CCP is going down, both directions of transmission
2274 ppp
->rstate
&= ~SC_DECOMP_RUN
;
2275 ppp
->xstate
&= ~SC_COMP_RUN
;
2279 if ((ppp
->flags
& (SC_CCP_OPEN
| SC_CCP_UP
)) != SC_CCP_OPEN
)
2281 len
= CCP_LENGTH(dp
);
2282 if (!pskb_may_pull(skb
, len
+ 2))
2283 return; /* too short */
2286 if (len
< CCP_OPT_MINLEN
|| len
< CCP_OPT_LENGTH(dp
))
2289 /* we will start receiving compressed packets */
2290 if (ppp
->rc_state
== 0)
2292 if (ppp
->rcomp
->decomp_init(ppp
->rc_state
, dp
, len
,
2293 ppp
->file
.index
, 0, ppp
->mru
, ppp
->debug
)) {
2294 ppp
->rstate
|= SC_DECOMP_RUN
;
2295 ppp
->rstate
&= ~(SC_DC_ERROR
| SC_DC_FERROR
);
2298 /* we will soon start sending compressed packets */
2299 if (ppp
->xc_state
== 0)
2301 if (ppp
->xcomp
->comp_init(ppp
->xc_state
, dp
, len
,
2302 ppp
->file
.index
, 0, ppp
->debug
))
2303 ppp
->xstate
|= SC_COMP_RUN
;
2308 /* reset the [de]compressor */
2309 if ((ppp
->flags
& SC_CCP_UP
) == 0)
2312 if (ppp
->rc_state
&& (ppp
->rstate
& SC_DECOMP_RUN
)) {
2313 ppp
->rcomp
->decomp_reset(ppp
->rc_state
);
2314 ppp
->rstate
&= ~SC_DC_ERROR
;
2317 if (ppp
->xc_state
&& (ppp
->xstate
& SC_COMP_RUN
))
2318 ppp
->xcomp
->comp_reset(ppp
->xc_state
);
2324 /* Free up compression resources. */
2326 ppp_ccp_closed(struct ppp
*ppp
)
2328 void *xstate
, *rstate
;
2329 struct compressor
*xcomp
, *rcomp
;
2332 ppp
->flags
&= ~(SC_CCP_OPEN
| SC_CCP_UP
);
2335 xstate
= ppp
->xc_state
;
2336 ppp
->xc_state
= NULL
;
2339 rstate
= ppp
->rc_state
;
2340 ppp
->rc_state
= NULL
;
2344 xcomp
->comp_free(xstate
);
2345 module_put(xcomp
->owner
);
2348 rcomp
->decomp_free(rstate
);
2349 module_put(rcomp
->owner
);
2353 /* List of compressors. */
2354 static LIST_HEAD(compressor_list
);
2355 static DEFINE_SPINLOCK(compressor_list_lock
);
2357 struct compressor_entry
{
2358 struct list_head list
;
2359 struct compressor
*comp
;
2362 static struct compressor_entry
*
2363 find_comp_entry(int proto
)
2365 struct compressor_entry
*ce
;
2367 list_for_each_entry(ce
, &compressor_list
, list
) {
2368 if (ce
->comp
->compress_proto
== proto
)
2374 /* Register a compressor */
2376 ppp_register_compressor(struct compressor
*cp
)
2378 struct compressor_entry
*ce
;
2380 spin_lock(&compressor_list_lock
);
2382 if (find_comp_entry(cp
->compress_proto
) != 0)
2385 ce
= kmalloc(sizeof(struct compressor_entry
), GFP_ATOMIC
);
2390 list_add(&ce
->list
, &compressor_list
);
2392 spin_unlock(&compressor_list_lock
);
2396 /* Unregister a compressor */
2398 ppp_unregister_compressor(struct compressor
*cp
)
2400 struct compressor_entry
*ce
;
2402 spin_lock(&compressor_list_lock
);
2403 ce
= find_comp_entry(cp
->compress_proto
);
2404 if (ce
!= 0 && ce
->comp
== cp
) {
2405 list_del(&ce
->list
);
2408 spin_unlock(&compressor_list_lock
);
2411 /* Find a compressor. */
2412 static struct compressor
*
2413 find_compressor(int type
)
2415 struct compressor_entry
*ce
;
2416 struct compressor
*cp
= NULL
;
2418 spin_lock(&compressor_list_lock
);
2419 ce
= find_comp_entry(type
);
2422 if (!try_module_get(cp
->owner
))
2425 spin_unlock(&compressor_list_lock
);
2430 * Miscelleneous stuff.
2434 ppp_get_stats(struct ppp
*ppp
, struct ppp_stats
*st
)
2436 struct slcompress
*vj
= ppp
->vj
;
2438 memset(st
, 0, sizeof(*st
));
2439 st
->p
.ppp_ipackets
= ppp
->stats
.rx_packets
;
2440 st
->p
.ppp_ierrors
= ppp
->stats
.rx_errors
;
2441 st
->p
.ppp_ibytes
= ppp
->stats
.rx_bytes
;
2442 st
->p
.ppp_opackets
= ppp
->stats
.tx_packets
;
2443 st
->p
.ppp_oerrors
= ppp
->stats
.tx_errors
;
2444 st
->p
.ppp_obytes
= ppp
->stats
.tx_bytes
;
2447 st
->vj
.vjs_packets
= vj
->sls_o_compressed
+ vj
->sls_o_uncompressed
;
2448 st
->vj
.vjs_compressed
= vj
->sls_o_compressed
;
2449 st
->vj
.vjs_searches
= vj
->sls_o_searches
;
2450 st
->vj
.vjs_misses
= vj
->sls_o_misses
;
2451 st
->vj
.vjs_errorin
= vj
->sls_i_error
;
2452 st
->vj
.vjs_tossed
= vj
->sls_i_tossed
;
2453 st
->vj
.vjs_uncompressedin
= vj
->sls_i_uncompressed
;
2454 st
->vj
.vjs_compressedin
= vj
->sls_i_compressed
;
2458 * Stuff for handling the lists of ppp units and channels
2459 * and for initialization.
2463 * Create a new ppp interface unit. Fails if it can't allocate memory
2464 * or if there is already a unit with the requested number.
2465 * unit == -1 means allocate a new number.
2468 ppp_create_interface(int unit
, int *retp
)
2471 struct net_device
*dev
= NULL
;
2475 ppp
= kzalloc(sizeof(struct ppp
), GFP_KERNEL
);
2478 dev
= alloc_netdev(0, "", ppp_setup
);
2483 ppp
->mru_alloc
= PPP_MRU
;
2484 init_ppp_file(&ppp
->file
, INTERFACE
);
2485 ppp
->file
.hdrlen
= PPP_HDRLEN
- 2; /* don't count proto bytes */
2486 for (i
= 0; i
< NUM_NP
; ++i
)
2487 ppp
->npmode
[i
] = NPMODE_PASS
;
2488 INIT_LIST_HEAD(&ppp
->channels
);
2489 spin_lock_init(&ppp
->rlock
);
2490 spin_lock_init(&ppp
->wlock
);
2491 #ifdef CONFIG_PPP_MULTILINK
2493 skb_queue_head_init(&ppp
->mrq
);
2494 #endif /* CONFIG_PPP_MULTILINK */
2498 dev
->hard_start_xmit
= ppp_start_xmit
;
2499 dev
->get_stats
= ppp_net_stats
;
2500 dev
->do_ioctl
= ppp_net_ioctl
;
2503 mutex_lock(&all_ppp_mutex
);
2505 unit
= cardmap_find_first_free(all_ppp_units
);
2506 else if (cardmap_get(all_ppp_units
, unit
) != NULL
)
2507 goto out2
; /* unit already exists */
2509 /* Initialize the new ppp unit */
2510 ppp
->file
.index
= unit
;
2511 sprintf(dev
->name
, "ppp%d", unit
);
2513 ret
= register_netdev(dev
);
2515 printk(KERN_ERR
"PPP: couldn't register device %s (%d)\n",
2520 atomic_inc(&ppp_unit_count
);
2521 ret
= cardmap_set(&all_ppp_units
, unit
, ppp
);
2525 mutex_unlock(&all_ppp_mutex
);
2530 atomic_dec(&ppp_unit_count
);
2532 mutex_unlock(&all_ppp_mutex
);
2542 * Initialize a ppp_file structure.
2545 init_ppp_file(struct ppp_file
*pf
, int kind
)
2548 skb_queue_head_init(&pf
->xq
);
2549 skb_queue_head_init(&pf
->rq
);
2550 atomic_set(&pf
->refcnt
, 1);
2551 init_waitqueue_head(&pf
->rwait
);
2555 * Take down a ppp interface unit - called when the owning file
2556 * (the one that created the unit) is closed or detached.
2558 static void ppp_shutdown_interface(struct ppp
*ppp
)
2560 struct net_device
*dev
;
2562 mutex_lock(&all_ppp_mutex
);
2567 /* This will call dev_close() for us. */
2569 unregister_netdev(dev
);
2572 cardmap_set(&all_ppp_units
, ppp
->file
.index
, NULL
);
2575 wake_up_interruptible(&ppp
->file
.rwait
);
2576 mutex_unlock(&all_ppp_mutex
);
2580 * Free the memory used by a ppp unit. This is only called once
2581 * there are no channels connected to the unit and no file structs
2582 * that reference the unit.
2584 static void ppp_destroy_interface(struct ppp
*ppp
)
2586 atomic_dec(&ppp_unit_count
);
2588 if (!ppp
->file
.dead
|| ppp
->n_channels
) {
2589 /* "can't happen" */
2590 printk(KERN_ERR
"ppp: destroying ppp struct %p but dead=%d "
2591 "n_channels=%d !\n", ppp
, ppp
->file
.dead
,
2596 ppp_ccp_closed(ppp
);
2601 skb_queue_purge(&ppp
->file
.xq
);
2602 skb_queue_purge(&ppp
->file
.rq
);
2603 #ifdef CONFIG_PPP_MULTILINK
2604 skb_queue_purge(&ppp
->mrq
);
2605 #endif /* CONFIG_PPP_MULTILINK */
2606 #ifdef CONFIG_PPP_FILTER
2607 kfree(ppp
->pass_filter
);
2608 ppp
->pass_filter
= NULL
;
2609 kfree(ppp
->active_filter
);
2610 ppp
->active_filter
= NULL
;
2611 #endif /* CONFIG_PPP_FILTER */
2613 if (ppp
->xmit_pending
)
2614 kfree_skb(ppp
->xmit_pending
);
2620 * Locate an existing ppp unit.
2621 * The caller should have locked the all_ppp_mutex.
2624 ppp_find_unit(int unit
)
2626 return cardmap_get(all_ppp_units
, unit
);
2630 * Locate an existing ppp channel.
2631 * The caller should have locked the all_channels_lock.
2632 * First we look in the new_channels list, then in the
2633 * all_channels list. If found in the new_channels list,
2634 * we move it to the all_channels list. This is for speed
2635 * when we have a lot of channels in use.
2637 static struct channel
*
2638 ppp_find_channel(int unit
)
2640 struct channel
*pch
;
2642 list_for_each_entry(pch
, &new_channels
, list
) {
2643 if (pch
->file
.index
== unit
) {
2644 list_move(&pch
->list
, &all_channels
);
2648 list_for_each_entry(pch
, &all_channels
, list
) {
2649 if (pch
->file
.index
== unit
)
2656 * Connect a PPP channel to a PPP interface unit.
2659 ppp_connect_channel(struct channel
*pch
, int unit
)
2665 mutex_lock(&all_ppp_mutex
);
2666 ppp
= ppp_find_unit(unit
);
2669 write_lock_bh(&pch
->upl
);
2675 if (pch
->file
.hdrlen
> ppp
->file
.hdrlen
)
2676 ppp
->file
.hdrlen
= pch
->file
.hdrlen
;
2677 hdrlen
= pch
->file
.hdrlen
+ 2; /* for protocol bytes */
2678 if (ppp
->dev
&& hdrlen
> ppp
->dev
->hard_header_len
)
2679 ppp
->dev
->hard_header_len
= hdrlen
;
2680 list_add_tail(&pch
->clist
, &ppp
->channels
);
2683 atomic_inc(&ppp
->file
.refcnt
);
2688 write_unlock_bh(&pch
->upl
);
2690 mutex_unlock(&all_ppp_mutex
);
2695 * Disconnect a channel from its ppp unit.
2698 ppp_disconnect_channel(struct channel
*pch
)
2703 write_lock_bh(&pch
->upl
);
2706 write_unlock_bh(&pch
->upl
);
2708 /* remove it from the ppp unit's list */
2710 list_del(&pch
->clist
);
2711 if (--ppp
->n_channels
== 0)
2712 wake_up_interruptible(&ppp
->file
.rwait
);
2714 if (atomic_dec_and_test(&ppp
->file
.refcnt
))
2715 ppp_destroy_interface(ppp
);
2722 * Free up the resources used by a ppp channel.
2724 static void ppp_destroy_channel(struct channel
*pch
)
2726 atomic_dec(&channel_count
);
2728 if (!pch
->file
.dead
) {
2729 /* "can't happen" */
2730 printk(KERN_ERR
"ppp: destroying undead channel %p !\n",
2734 skb_queue_purge(&pch
->file
.xq
);
2735 skb_queue_purge(&pch
->file
.rq
);
2739 static void __exit
ppp_cleanup(void)
2741 /* should never happen */
2742 if (atomic_read(&ppp_unit_count
) || atomic_read(&channel_count
))
2743 printk(KERN_ERR
"PPP: removing module but units remain!\n");
2744 cardmap_destroy(&all_ppp_units
);
2745 if (unregister_chrdev(PPP_MAJOR
, "ppp") != 0)
2746 printk(KERN_ERR
"PPP: failed to unregister PPP device\n");
2747 device_destroy(ppp_class
, MKDEV(PPP_MAJOR
, 0));
2748 class_destroy(ppp_class
);
2752 * Cardmap implementation.
2754 static void *cardmap_get(struct cardmap
*map
, unsigned int nr
)
2759 for (p
= map
; p
!= NULL
; ) {
2760 if ((i
= nr
>> p
->shift
) >= CARDMAP_WIDTH
)
2764 nr
&= ~(CARDMAP_MASK
<< p
->shift
);
2770 static int cardmap_set(struct cardmap
**pmap
, unsigned int nr
, void *ptr
)
2776 if (p
== NULL
|| (nr
>> p
->shift
) >= CARDMAP_WIDTH
) {
2778 /* need a new top level */
2779 struct cardmap
*np
= kzalloc(sizeof(*np
), GFP_KERNEL
);
2784 np
->shift
= p
->shift
+ CARDMAP_ORDER
;
2789 } while ((nr
>> p
->shift
) >= CARDMAP_WIDTH
);
2792 while (p
->shift
> 0) {
2793 i
= (nr
>> p
->shift
) & CARDMAP_MASK
;
2794 if (p
->ptr
[i
] == NULL
) {
2795 struct cardmap
*np
= kzalloc(sizeof(*np
), GFP_KERNEL
);
2798 np
->shift
= p
->shift
- CARDMAP_ORDER
;
2803 clear_bit(i
, &p
->inuse
);
2806 i
= nr
& CARDMAP_MASK
;
2809 set_bit(i
, &p
->inuse
);
2811 clear_bit(i
, &p
->inuse
);
2817 static unsigned int cardmap_find_first_free(struct cardmap
*map
)
2820 unsigned int nr
= 0;
2823 if ((p
= map
) == NULL
)
2826 i
= find_first_zero_bit(&p
->inuse
, CARDMAP_WIDTH
);
2827 if (i
>= CARDMAP_WIDTH
) {
2828 if (p
->parent
== NULL
)
2829 return CARDMAP_WIDTH
<< p
->shift
;
2831 i
= (nr
>> p
->shift
) & CARDMAP_MASK
;
2832 set_bit(i
, &p
->inuse
);
2835 nr
= (nr
& (~CARDMAP_MASK
<< p
->shift
)) | (i
<< p
->shift
);
2836 if (p
->shift
== 0 || p
->ptr
[i
] == NULL
)
2842 static void cardmap_destroy(struct cardmap
**pmap
)
2844 struct cardmap
*p
, *np
;
2847 for (p
= *pmap
; p
!= NULL
; p
= np
) {
2848 if (p
->shift
!= 0) {
2849 for (i
= 0; i
< CARDMAP_WIDTH
; ++i
)
2850 if (p
->ptr
[i
] != NULL
)
2852 if (i
< CARDMAP_WIDTH
) {
2864 /* Module/initialization stuff */
2866 module_init(ppp_init
);
2867 module_exit(ppp_cleanup
);
2869 EXPORT_SYMBOL(ppp_register_channel
);
2870 EXPORT_SYMBOL(ppp_unregister_channel
);
2871 EXPORT_SYMBOL(ppp_channel_index
);
2872 EXPORT_SYMBOL(ppp_unit_number
);
2873 EXPORT_SYMBOL(ppp_input
);
2874 EXPORT_SYMBOL(ppp_input_error
);
2875 EXPORT_SYMBOL(ppp_output_wakeup
);
2876 EXPORT_SYMBOL(ppp_register_compressor
);
2877 EXPORT_SYMBOL(ppp_unregister_compressor
);
2878 MODULE_LICENSE("GPL");
2879 MODULE_ALIAS_CHARDEV_MAJOR(PPP_MAJOR
);
2880 MODULE_ALIAS("/dev/ppp");