2 * X.25 Packet Layer release 002
4 * This is ALPHA test software. This code may break your machine,
5 * randomly fail to work with new releases, misbehave and/or generally
6 * screw up. It might even work.
8 * This code REQUIRES 2.1.15 or higher
11 * This module is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
17 * X.25 001 Jonathan Naylor Started coding.
18 * X.25 002 Jonathan Naylor New timer architecture.
19 * mar/20/00 Daniela Squassoni Disabling/enabling of facilities
21 * 2000-09-04 Henner Eisen dev_hold() / dev_put() for x25_neigh.
24 #include <linux/kernel.h>
25 #include <linux/jiffies.h>
26 #include <linux/timer.h>
27 #include <linux/slab.h>
28 #include <linux/netdevice.h>
29 #include <linux/skbuff.h>
30 #include <asm/uaccess.h>
31 #include <linux/init.h>
34 LIST_HEAD(x25_neigh_list
);
35 DEFINE_RWLOCK(x25_neigh_list_lock
);
37 static void x25_t20timer_expiry(unsigned long);
39 static void x25_transmit_restart_confirmation(struct x25_neigh
*nb
);
40 static void x25_transmit_restart_request(struct x25_neigh
*nb
);
43 * Linux set/reset timer routines
45 static inline void x25_start_t20timer(struct x25_neigh
*nb
)
47 mod_timer(&nb
->t20timer
, jiffies
+ nb
->t20
);
50 static void x25_t20timer_expiry(unsigned long param
)
52 struct x25_neigh
*nb
= (struct x25_neigh
*)param
;
54 x25_transmit_restart_request(nb
);
56 x25_start_t20timer(nb
);
59 static inline void x25_stop_t20timer(struct x25_neigh
*nb
)
61 del_timer(&nb
->t20timer
);
64 static inline int x25_t20timer_pending(struct x25_neigh
*nb
)
66 return timer_pending(&nb
->t20timer
);
70 * This handles all restart and diagnostic frames.
72 void x25_link_control(struct sk_buff
*skb
, struct x25_neigh
*nb
,
73 unsigned short frametype
)
79 case X25_RESTART_REQUEST
:
80 confirm
= !x25_t20timer_pending(nb
);
81 x25_stop_t20timer(nb
);
82 nb
->state
= X25_LINK_STATE_3
;
84 x25_transmit_restart_confirmation(nb
);
87 case X25_RESTART_CONFIRMATION
:
88 x25_stop_t20timer(nb
);
89 nb
->state
= X25_LINK_STATE_3
;
93 if (!pskb_may_pull(skb
, X25_STD_MIN_LEN
+ 4))
96 printk(KERN_WARNING
"x25: diagnostic #%d - %02X %02X %02X\n",
97 skb
->data
[3], skb
->data
[4],
98 skb
->data
[5], skb
->data
[6]);
102 printk(KERN_WARNING
"x25: received unknown %02X with LCI 000\n",
107 if (nb
->state
== X25_LINK_STATE_3
)
108 while ((skbn
= skb_dequeue(&nb
->queue
)) != NULL
)
109 x25_send_frame(skbn
, nb
);
113 * This routine is called when a Restart Request is needed
115 static void x25_transmit_restart_request(struct x25_neigh
*nb
)
118 int len
= X25_MAX_L2_LEN
+ X25_STD_MIN_LEN
+ 2;
119 struct sk_buff
*skb
= alloc_skb(len
, GFP_ATOMIC
);
124 skb_reserve(skb
, X25_MAX_L2_LEN
);
126 dptr
= skb_put(skb
, X25_STD_MIN_LEN
+ 2);
128 *dptr
++ = nb
->extended
? X25_GFI_EXTSEQ
: X25_GFI_STDSEQ
;
130 *dptr
++ = X25_RESTART_REQUEST
;
136 x25_send_frame(skb
, nb
);
140 * This routine is called when a Restart Confirmation is needed
142 static void x25_transmit_restart_confirmation(struct x25_neigh
*nb
)
145 int len
= X25_MAX_L2_LEN
+ X25_STD_MIN_LEN
;
146 struct sk_buff
*skb
= alloc_skb(len
, GFP_ATOMIC
);
151 skb_reserve(skb
, X25_MAX_L2_LEN
);
153 dptr
= skb_put(skb
, X25_STD_MIN_LEN
);
155 *dptr
++ = nb
->extended
? X25_GFI_EXTSEQ
: X25_GFI_STDSEQ
;
157 *dptr
++ = X25_RESTART_CONFIRMATION
;
161 x25_send_frame(skb
, nb
);
165 * This routine is called when a Clear Request is needed outside of the context
166 * of a connected socket.
168 void x25_transmit_clear_request(struct x25_neigh
*nb
, unsigned int lci
,
172 int len
= X25_MAX_L2_LEN
+ X25_STD_MIN_LEN
+ 2;
173 struct sk_buff
*skb
= alloc_skb(len
, GFP_ATOMIC
);
178 skb_reserve(skb
, X25_MAX_L2_LEN
);
180 dptr
= skb_put(skb
, X25_STD_MIN_LEN
+ 2);
182 *dptr
++ = ((lci
>> 8) & 0x0F) | (nb
->extended
?
185 *dptr
++ = (lci
>> 0) & 0xFF;
186 *dptr
++ = X25_CLEAR_REQUEST
;
192 x25_send_frame(skb
, nb
);
195 void x25_transmit_link(struct sk_buff
*skb
, struct x25_neigh
*nb
)
198 case X25_LINK_STATE_0
:
199 skb_queue_tail(&nb
->queue
, skb
);
200 nb
->state
= X25_LINK_STATE_1
;
201 x25_establish_link(nb
);
203 case X25_LINK_STATE_1
:
204 case X25_LINK_STATE_2
:
205 skb_queue_tail(&nb
->queue
, skb
);
207 case X25_LINK_STATE_3
:
208 x25_send_frame(skb
, nb
);
214 * Called when the link layer has become established.
216 void x25_link_established(struct x25_neigh
*nb
)
219 case X25_LINK_STATE_0
:
220 nb
->state
= X25_LINK_STATE_2
;
222 case X25_LINK_STATE_1
:
223 x25_transmit_restart_request(nb
);
224 nb
->state
= X25_LINK_STATE_2
;
225 x25_start_t20timer(nb
);
231 * Called when the link layer has terminated, or an establishment
232 * request has failed.
235 void x25_link_terminated(struct x25_neigh
*nb
)
237 nb
->state
= X25_LINK_STATE_0
;
238 /* Out of order: clear existing virtual calls (X.25 03/93 4.6.3) */
239 x25_kill_by_neigh(nb
);
245 void x25_link_device_up(struct net_device
*dev
)
247 struct x25_neigh
*nb
= kmalloc(sizeof(*nb
), GFP_ATOMIC
);
252 skb_queue_head_init(&nb
->queue
);
253 setup_timer(&nb
->t20timer
, x25_t20timer_expiry
, (unsigned long)nb
);
257 nb
->state
= X25_LINK_STATE_0
;
260 * Enables negotiation
262 nb
->global_facil_mask
= X25_MASK_REVERSE
|
263 X25_MASK_THROUGHPUT
|
264 X25_MASK_PACKET_SIZE
|
265 X25_MASK_WINDOW_SIZE
;
266 nb
->t20
= sysctl_x25_restart_request_timeout
;
267 atomic_set(&nb
->refcnt
, 1);
269 write_lock_bh(&x25_neigh_list_lock
);
270 list_add(&nb
->node
, &x25_neigh_list
);
271 write_unlock_bh(&x25_neigh_list_lock
);
275 * __x25_remove_neigh - remove neighbour from x25_neigh_list
276 * @nb - neigh to remove
278 * Remove neighbour from x25_neigh_list. If it was there.
279 * Caller must hold x25_neigh_list_lock.
281 static void __x25_remove_neigh(struct x25_neigh
*nb
)
283 skb_queue_purge(&nb
->queue
);
284 x25_stop_t20timer(nb
);
293 * A device has been removed, remove its links.
295 void x25_link_device_down(struct net_device
*dev
)
297 struct x25_neigh
*nb
;
298 struct list_head
*entry
, *tmp
;
300 write_lock_bh(&x25_neigh_list_lock
);
302 list_for_each_safe(entry
, tmp
, &x25_neigh_list
) {
303 nb
= list_entry(entry
, struct x25_neigh
, node
);
305 if (nb
->dev
== dev
) {
306 __x25_remove_neigh(nb
);
311 write_unlock_bh(&x25_neigh_list_lock
);
315 * Given a device, return the neighbour address.
317 struct x25_neigh
*x25_get_neigh(struct net_device
*dev
)
319 struct x25_neigh
*nb
, *use
= NULL
;
320 struct list_head
*entry
;
322 read_lock_bh(&x25_neigh_list_lock
);
323 list_for_each(entry
, &x25_neigh_list
) {
324 nb
= list_entry(entry
, struct x25_neigh
, node
);
326 if (nb
->dev
== dev
) {
334 read_unlock_bh(&x25_neigh_list_lock
);
339 * Handle the ioctls that control the subscription functions.
341 int x25_subscr_ioctl(unsigned int cmd
, void __user
*arg
)
343 struct x25_subscrip_struct x25_subscr
;
344 struct x25_neigh
*nb
;
345 struct net_device
*dev
;
348 if (cmd
!= SIOCX25GSUBSCRIP
&& cmd
!= SIOCX25SSUBSCRIP
)
352 if (copy_from_user(&x25_subscr
, arg
, sizeof(x25_subscr
)))
356 if ((dev
= x25_dev_get(x25_subscr
.device
)) == NULL
)
359 if ((nb
= x25_get_neigh(dev
)) == NULL
)
364 if (cmd
== SIOCX25GSUBSCRIP
) {
365 read_lock_bh(&x25_neigh_list_lock
);
366 x25_subscr
.extended
= nb
->extended
;
367 x25_subscr
.global_facil_mask
= nb
->global_facil_mask
;
368 read_unlock_bh(&x25_neigh_list_lock
);
369 rc
= copy_to_user(arg
, &x25_subscr
,
370 sizeof(x25_subscr
)) ? -EFAULT
: 0;
373 if (!(x25_subscr
.extended
&& x25_subscr
.extended
!= 1)) {
375 write_lock_bh(&x25_neigh_list_lock
);
376 nb
->extended
= x25_subscr
.extended
;
377 nb
->global_facil_mask
= x25_subscr
.global_facil_mask
;
378 write_unlock_bh(&x25_neigh_list_lock
);
391 * Release all memory associated with X.25 neighbour structures.
393 void __exit
x25_link_free(void)
395 struct x25_neigh
*nb
;
396 struct list_head
*entry
, *tmp
;
398 write_lock_bh(&x25_neigh_list_lock
);
400 list_for_each_safe(entry
, tmp
, &x25_neigh_list
) {
401 struct net_device
*dev
;
403 nb
= list_entry(entry
, struct x25_neigh
, node
);
405 __x25_remove_neigh(nb
);
408 write_unlock_bh(&x25_neigh_list_lock
);