2 * X.25 Packet Layer release 002
4 * This is ALPHA test software. This code may break your machine,
5 * randomly fail to work with new releases, misbehave and/or generally
6 * screw up. It might even work.
8 * This code REQUIRES 2.1.15 or higher
11 * This module is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
17 * X.25 001 Jonathan Naylor Started coding.
18 * X.25 002 Jonathan Naylor Centralised disconnection processing.
19 * mar/20/00 Daniela Squassoni Disabling/enabling of facilities
21 * jun/24/01 Arnaldo C. Melo use skb_queue_purge, cleanups
22 * apr/04/15 Shaun Pereira Fast select with no
23 * restriction on response.
26 #include <linux/slab.h>
27 #include <linux/kernel.h>
28 #include <linux/string.h>
29 #include <linux/skbuff.h>
31 #include <net/tcp_states.h>
35 * This routine purges all of the queues of frames.
37 void x25_clear_queues(struct sock
*sk
)
39 struct x25_sock
*x25
= x25_sk(sk
);
41 skb_queue_purge(&sk
->sk_write_queue
);
42 skb_queue_purge(&x25
->ack_queue
);
43 skb_queue_purge(&x25
->interrupt_in_queue
);
44 skb_queue_purge(&x25
->interrupt_out_queue
);
45 skb_queue_purge(&x25
->fragment_queue
);
50 * This routine purges the input queue of those frames that have been
51 * acknowledged. This replaces the boxes labelled "V(a) <- N(r)" on the
54 void x25_frames_acked(struct sock
*sk
, unsigned short nr
)
57 struct x25_sock
*x25
= x25_sk(sk
);
58 int modulus
= x25
->neighbour
->extended
? X25_EMODULUS
: X25_SMODULUS
;
61 * Remove all the ack-ed frames from the ack queue.
64 while (skb_peek(&x25
->ack_queue
) && x25
->va
!= nr
) {
65 skb
= skb_dequeue(&x25
->ack_queue
);
67 x25
->va
= (x25
->va
+ 1) % modulus
;
71 void x25_requeue_frames(struct sock
*sk
)
73 struct sk_buff
*skb
, *skb_prev
= NULL
;
76 * Requeue all the un-ack-ed frames on the output queue to be picked
77 * up by x25_kick. This arrangement handles the possibility of an empty
80 while ((skb
= skb_dequeue(&x25_sk(sk
)->ack_queue
)) != NULL
) {
82 skb_queue_head(&sk
->sk_write_queue
, skb
);
84 skb_append(skb_prev
, skb
, &sk
->sk_write_queue
);
90 * Validate that the value of nr is between va and vs. Return true or
93 int x25_validate_nr(struct sock
*sk
, unsigned short nr
)
95 struct x25_sock
*x25
= x25_sk(sk
);
96 unsigned short vc
= x25
->va
;
97 int modulus
= x25
->neighbour
->extended
? X25_EMODULUS
: X25_SMODULUS
;
99 while (vc
!= x25
->vs
) {
102 vc
= (vc
+ 1) % modulus
;
105 return nr
== x25
->vs
? 1 : 0;
109 * This routine is called when the packet layer internally generates a
112 void x25_write_internal(struct sock
*sk
, int frametype
)
114 struct x25_sock
*x25
= x25_sk(sk
);
117 unsigned char facilities
[X25_MAX_FAC_LEN
];
118 unsigned char addresses
[1 + X25_ADDR_LEN
];
119 unsigned char lci1
, lci2
;
121 * Default safe frame size.
123 int len
= X25_MAX_L2_LEN
+ X25_EXT_MIN_LEN
;
129 case X25_CALL_REQUEST
:
130 len
+= 1 + X25_ADDR_LEN
+ X25_MAX_FAC_LEN
+ X25_MAX_CUD_LEN
;
132 case X25_CALL_ACCEPTED
: /* fast sel with no restr on resp */
133 if (x25
->facilities
.reverse
& 0x80) {
134 len
+= 1 + X25_MAX_FAC_LEN
+ X25_MAX_CUD_LEN
;
136 len
+= 1 + X25_MAX_FAC_LEN
;
139 case X25_CLEAR_REQUEST
:
140 case X25_RESET_REQUEST
:
146 case X25_CLEAR_CONFIRMATION
:
147 case X25_INTERRUPT_CONFIRMATION
:
148 case X25_RESET_CONFIRMATION
:
151 printk(KERN_ERR
"X.25: invalid frame type %02X\n", frametype
);
155 if ((skb
= alloc_skb(len
, GFP_ATOMIC
)) == NULL
)
159 * Space for Ethernet and 802.2 LLC headers.
161 skb_reserve(skb
, X25_MAX_L2_LEN
);
164 * Make space for the GFI and LCI, and fill them in.
166 dptr
= skb_put(skb
, 2);
168 lci1
= (x25
->lci
>> 8) & 0x0F;
169 lci2
= (x25
->lci
>> 0) & 0xFF;
171 if (x25
->neighbour
->extended
) {
172 *dptr
++ = lci1
| X25_GFI_EXTSEQ
;
175 *dptr
++ = lci1
| X25_GFI_STDSEQ
;
180 * Now fill in the frame type specific information.
184 case X25_CALL_REQUEST
:
185 dptr
= skb_put(skb
, 1);
186 *dptr
++ = X25_CALL_REQUEST
;
187 len
= x25_addr_aton(addresses
, &x25
->dest_addr
,
189 dptr
= skb_put(skb
, len
);
190 memcpy(dptr
, addresses
, len
);
191 len
= x25_create_facilities(facilities
,
193 &x25
->dte_facilities
,
194 x25
->neighbour
->global_facil_mask
);
195 dptr
= skb_put(skb
, len
);
196 memcpy(dptr
, facilities
, len
);
197 dptr
= skb_put(skb
, x25
->calluserdata
.cudlength
);
198 memcpy(dptr
, x25
->calluserdata
.cuddata
,
199 x25
->calluserdata
.cudlength
);
200 x25
->calluserdata
.cudlength
= 0;
203 case X25_CALL_ACCEPTED
:
204 dptr
= skb_put(skb
, 2);
205 *dptr
++ = X25_CALL_ACCEPTED
;
206 *dptr
++ = 0x00; /* Address lengths */
207 len
= x25_create_facilities(facilities
,
209 &x25
->dte_facilities
,
211 dptr
= skb_put(skb
, len
);
212 memcpy(dptr
, facilities
, len
);
214 /* fast select with no restriction on response
215 allows call user data. Userland must
216 ensure it is ours and not theirs */
217 if(x25
->facilities
.reverse
& 0x80) {
219 x25
->calluserdata
.cudlength
);
220 memcpy(dptr
, x25
->calluserdata
.cuddata
,
221 x25
->calluserdata
.cudlength
);
223 x25
->calluserdata
.cudlength
= 0;
226 case X25_CLEAR_REQUEST
:
227 dptr
= skb_put(skb
, 3);
229 *dptr
++ = x25
->causediag
.cause
;
230 *dptr
++ = x25
->causediag
.diagnostic
;
233 case X25_RESET_REQUEST
:
234 dptr
= skb_put(skb
, 3);
236 *dptr
++ = 0x00; /* XXX */
237 *dptr
++ = 0x00; /* XXX */
243 if (x25
->neighbour
->extended
) {
244 dptr
= skb_put(skb
, 2);
246 *dptr
++ = (x25
->vr
<< 1) & 0xFE;
248 dptr
= skb_put(skb
, 1);
250 *dptr
++ |= (x25
->vr
<< 5) & 0xE0;
254 case X25_CLEAR_CONFIRMATION
:
255 case X25_INTERRUPT_CONFIRMATION
:
256 case X25_RESET_CONFIRMATION
:
257 dptr
= skb_put(skb
, 1);
262 x25_transmit_link(skb
, x25
->neighbour
);
266 * Unpick the contents of the passed X.25 Packet Layer frame.
268 int x25_decode(struct sock
*sk
, struct sk_buff
*skb
, int *ns
, int *nr
, int *q
,
271 struct x25_sock
*x25
= x25_sk(sk
);
272 unsigned char *frame
;
274 if (!pskb_may_pull(skb
, X25_STD_MIN_LEN
))
278 *ns
= *nr
= *q
= *d
= *m
= 0;
281 case X25_CALL_REQUEST
:
282 case X25_CALL_ACCEPTED
:
283 case X25_CLEAR_REQUEST
:
284 case X25_CLEAR_CONFIRMATION
:
286 case X25_INTERRUPT_CONFIRMATION
:
287 case X25_RESET_REQUEST
:
288 case X25_RESET_CONFIRMATION
:
289 case X25_RESTART_REQUEST
:
290 case X25_RESTART_CONFIRMATION
:
291 case X25_REGISTRATION_REQUEST
:
292 case X25_REGISTRATION_CONFIRMATION
:
297 if (x25
->neighbour
->extended
) {
298 if (frame
[2] == X25_RR
||
299 frame
[2] == X25_RNR
||
300 frame
[2] == X25_REJ
) {
301 if (!pskb_may_pull(skb
, X25_EXT_MIN_LEN
))
305 *nr
= (frame
[3] >> 1) & 0x7F;
309 if ((frame
[2] & 0x1F) == X25_RR
||
310 (frame
[2] & 0x1F) == X25_RNR
||
311 (frame
[2] & 0x1F) == X25_REJ
) {
312 *nr
= (frame
[2] >> 5) & 0x07;
313 return frame
[2] & 0x1F;
317 if (x25
->neighbour
->extended
) {
318 if ((frame
[2] & 0x01) == X25_DATA
) {
319 if (!pskb_may_pull(skb
, X25_EXT_MIN_LEN
))
323 *q
= (frame
[0] & X25_Q_BIT
) == X25_Q_BIT
;
324 *d
= (frame
[0] & X25_D_BIT
) == X25_D_BIT
;
325 *m
= (frame
[3] & X25_EXT_M_BIT
) == X25_EXT_M_BIT
;
326 *nr
= (frame
[3] >> 1) & 0x7F;
327 *ns
= (frame
[2] >> 1) & 0x7F;
331 if ((frame
[2] & 0x01) == X25_DATA
) {
332 *q
= (frame
[0] & X25_Q_BIT
) == X25_Q_BIT
;
333 *d
= (frame
[0] & X25_D_BIT
) == X25_D_BIT
;
334 *m
= (frame
[2] & X25_STD_M_BIT
) == X25_STD_M_BIT
;
335 *nr
= (frame
[2] >> 5) & 0x07;
336 *ns
= (frame
[2] >> 1) & 0x07;
341 printk(KERN_DEBUG
"X.25: invalid PLP frame %02X %02X %02X\n",
342 frame
[0], frame
[1], frame
[2]);
347 void x25_disconnect(struct sock
*sk
, int reason
, unsigned char cause
,
348 unsigned char diagnostic
)
350 struct x25_sock
*x25
= x25_sk(sk
);
352 x25_clear_queues(sk
);
356 x25
->state
= X25_STATE_0
;
358 x25
->causediag
.cause
= cause
;
359 x25
->causediag
.diagnostic
= diagnostic
;
361 sk
->sk_state
= TCP_CLOSE
;
363 sk
->sk_shutdown
|= SEND_SHUTDOWN
;
365 if (!sock_flag(sk
, SOCK_DEAD
)) {
366 sk
->sk_state_change(sk
);
367 sock_set_flag(sk
, SOCK_DEAD
);
372 * Clear an own-rx-busy condition and tell the peer about this, provided
373 * that there is a significant amount of free receive buffer space available.
375 void x25_check_rbuf(struct sock
*sk
)
377 struct x25_sock
*x25
= x25_sk(sk
);
379 if (atomic_read(&sk
->sk_rmem_alloc
) < (sk
->sk_rcvbuf
>> 1) &&
380 (x25
->condition
& X25_COND_OWN_RX_BUSY
)) {
381 x25
->condition
&= ~X25_COND_OWN_RX_BUSY
;
382 x25
->condition
&= ~X25_COND_ACK_PENDING
;
384 x25_write_internal(sk
, X25_RR
);