md: Remove unnecessary #includes, #defines, and function declarations.
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / ax25 / ax25_in.c
blob4a5ba978a804ea3d85e04de4e04446974e188512
1 /*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
7 * Copyright (C) Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk)
8 * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
9 * Copyright (C) Joerg Reuter DL1BKE (jreuter@yaina.de)
10 * Copyright (C) Hans-Joachim Hetscher DD8NE (dd8ne@bnv-bamberg.de)
12 #include <linux/errno.h>
13 #include <linux/types.h>
14 #include <linux/socket.h>
15 #include <linux/in.h>
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/string.h>
19 #include <linux/sockios.h>
20 #include <linux/net.h>
21 #include <net/ax25.h>
22 #include <linux/inet.h>
23 #include <linux/netdevice.h>
24 #include <linux/skbuff.h>
25 #include <linux/netfilter.h>
26 #include <net/sock.h>
27 #include <net/tcp_states.h>
28 #include <asm/uaccess.h>
29 #include <asm/system.h>
30 #include <linux/fcntl.h>
31 #include <linux/mm.h>
32 #include <linux/interrupt.h>
35 * Given a fragment, queue it on the fragment queue and if the fragment
36 * is complete, send it back to ax25_rx_iframe.
38 static int ax25_rx_fragment(ax25_cb *ax25, struct sk_buff *skb)
40 struct sk_buff *skbn, *skbo;
42 if (ax25->fragno != 0) {
43 if (!(*skb->data & AX25_SEG_FIRST)) {
44 if ((ax25->fragno - 1) == (*skb->data & AX25_SEG_REM)) {
45 /* Enqueue fragment */
46 ax25->fragno = *skb->data & AX25_SEG_REM;
47 skb_pull(skb, 1); /* skip fragno */
48 ax25->fraglen += skb->len;
49 skb_queue_tail(&ax25->frag_queue, skb);
51 /* Last fragment received ? */
52 if (ax25->fragno == 0) {
53 skbn = alloc_skb(AX25_MAX_HEADER_LEN +
54 ax25->fraglen,
55 GFP_ATOMIC);
56 if (!skbn) {
57 skb_queue_purge(&ax25->frag_queue);
58 return 1;
61 skb_reserve(skbn, AX25_MAX_HEADER_LEN);
63 skbn->dev = ax25->ax25_dev->dev;
64 skb_reset_network_header(skbn);
65 skb_reset_transport_header(skbn);
67 /* Copy data from the fragments */
68 while ((skbo = skb_dequeue(&ax25->frag_queue)) != NULL) {
69 skb_copy_from_linear_data(skbo,
70 skb_put(skbn, skbo->len),
71 skbo->len);
72 kfree_skb(skbo);
75 ax25->fraglen = 0;
77 if (ax25_rx_iframe(ax25, skbn) == 0)
78 kfree_skb(skbn);
81 return 1;
84 } else {
85 /* First fragment received */
86 if (*skb->data & AX25_SEG_FIRST) {
87 skb_queue_purge(&ax25->frag_queue);
88 ax25->fragno = *skb->data & AX25_SEG_REM;
89 skb_pull(skb, 1); /* skip fragno */
90 ax25->fraglen = skb->len;
91 skb_queue_tail(&ax25->frag_queue, skb);
92 return 1;
96 return 0;
100 * This is where all valid I frames are sent to, to be dispatched to
101 * whichever protocol requires them.
103 int ax25_rx_iframe(ax25_cb *ax25, struct sk_buff *skb)
105 int (*func)(struct sk_buff *, ax25_cb *);
106 unsigned char pid;
107 int queued = 0;
109 if (skb == NULL) return 0;
111 ax25_start_idletimer(ax25);
113 pid = *skb->data;
115 if (pid == AX25_P_IP) {
116 /* working around a TCP bug to keep additional listeners
117 * happy. TCP re-uses the buffer and destroys the original
118 * content.
120 struct sk_buff *skbn = skb_copy(skb, GFP_ATOMIC);
121 if (skbn != NULL) {
122 kfree_skb(skb);
123 skb = skbn;
126 skb_pull(skb, 1); /* Remove PID */
127 skb->mac_header = skb->network_header;
128 skb_reset_network_header(skb);
129 skb->dev = ax25->ax25_dev->dev;
130 skb->pkt_type = PACKET_HOST;
131 skb->protocol = htons(ETH_P_IP);
132 netif_rx(skb);
133 return 1;
135 if (pid == AX25_P_SEGMENT) {
136 skb_pull(skb, 1); /* Remove PID */
137 return ax25_rx_fragment(ax25, skb);
140 if ((func = ax25_protocol_function(pid)) != NULL) {
141 skb_pull(skb, 1); /* Remove PID */
142 return (*func)(skb, ax25);
145 if (ax25->sk != NULL && ax25->ax25_dev->values[AX25_VALUES_CONMODE] == 2) {
146 if ((!ax25->pidincl && ax25->sk->sk_protocol == pid) ||
147 ax25->pidincl) {
148 if (sock_queue_rcv_skb(ax25->sk, skb) == 0)
149 queued = 1;
150 else
151 ax25->condition |= AX25_COND_OWN_RX_BUSY;
155 return queued;
159 * Higher level upcall for a LAPB frame
161 static int ax25_process_rx_frame(ax25_cb *ax25, struct sk_buff *skb, int type, int dama)
163 int queued = 0;
165 if (ax25->state == AX25_STATE_0)
166 return 0;
168 switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
169 case AX25_PROTO_STD_SIMPLEX:
170 case AX25_PROTO_STD_DUPLEX:
171 queued = ax25_std_frame_in(ax25, skb, type);
172 break;
174 #ifdef CONFIG_AX25_DAMA_SLAVE
175 case AX25_PROTO_DAMA_SLAVE:
176 if (dama || ax25->ax25_dev->dama.slave)
177 queued = ax25_ds_frame_in(ax25, skb, type);
178 else
179 queued = ax25_std_frame_in(ax25, skb, type);
180 break;
181 #endif
184 return queued;
187 static int ax25_rcv(struct sk_buff *skb, struct net_device *dev,
188 ax25_address *dev_addr, struct packet_type *ptype)
190 ax25_address src, dest, *next_digi = NULL;
191 int type = 0, mine = 0, dama;
192 struct sock *make, *sk;
193 ax25_digi dp, reverse_dp;
194 ax25_cb *ax25;
195 ax25_dev *ax25_dev;
198 * Process the AX.25/LAPB frame.
201 skb_reset_transport_header(skb);
203 if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL) {
204 kfree_skb(skb);
205 return 0;
209 * Parse the address header.
212 if (ax25_addr_parse(skb->data, skb->len, &src, &dest, &dp, &type, &dama) == NULL) {
213 kfree_skb(skb);
214 return 0;
218 * Ours perhaps ?
220 if (dp.lastrepeat + 1 < dp.ndigi) /* Not yet digipeated completely */
221 next_digi = &dp.calls[dp.lastrepeat + 1];
224 * Pull of the AX.25 headers leaving the CTRL/PID bytes
226 skb_pull(skb, ax25_addr_size(&dp));
228 /* For our port addresses ? */
229 if (ax25cmp(&dest, dev_addr) == 0 && dp.lastrepeat + 1 == dp.ndigi)
230 mine = 1;
232 /* Also match on any registered callsign from L3/4 */
233 if (!mine && ax25_listen_mine(&dest, dev) && dp.lastrepeat + 1 == dp.ndigi)
234 mine = 1;
236 /* UI frame - bypass LAPB processing */
237 if ((*skb->data & ~0x10) == AX25_UI && dp.lastrepeat + 1 == dp.ndigi) {
238 skb_set_transport_header(skb, 2); /* skip control and pid */
240 ax25_send_to_raw(&dest, skb, skb->data[1]);
242 if (!mine && ax25cmp(&dest, (ax25_address *)dev->broadcast) != 0) {
243 kfree_skb(skb);
244 return 0;
247 /* Now we are pointing at the pid byte */
248 switch (skb->data[1]) {
249 case AX25_P_IP:
250 skb_pull(skb,2); /* drop PID/CTRL */
251 skb_reset_transport_header(skb);
252 skb_reset_network_header(skb);
253 skb->dev = dev;
254 skb->pkt_type = PACKET_HOST;
255 skb->protocol = htons(ETH_P_IP);
256 netif_rx(skb);
257 break;
259 case AX25_P_ARP:
260 skb_pull(skb,2);
261 skb_reset_transport_header(skb);
262 skb_reset_network_header(skb);
263 skb->dev = dev;
264 skb->pkt_type = PACKET_HOST;
265 skb->protocol = htons(ETH_P_ARP);
266 netif_rx(skb);
267 break;
268 case AX25_P_TEXT:
269 /* Now find a suitable dgram socket */
270 sk = ax25_get_socket(&dest, &src, SOCK_DGRAM);
271 if (sk != NULL) {
272 bh_lock_sock(sk);
273 if (atomic_read(&sk->sk_rmem_alloc) >=
274 sk->sk_rcvbuf) {
275 kfree_skb(skb);
276 } else {
278 * Remove the control and PID.
280 skb_pull(skb, 2);
281 if (sock_queue_rcv_skb(sk, skb) != 0)
282 kfree_skb(skb);
284 bh_unlock_sock(sk);
285 sock_put(sk);
286 } else {
287 kfree_skb(skb);
289 break;
291 default:
292 kfree_skb(skb); /* Will scan SOCK_AX25 RAW sockets */
293 break;
296 return 0;
300 * Is connected mode supported on this device ?
301 * If not, should we DM the incoming frame (except DMs) or
302 * silently ignore them. For now we stay quiet.
304 if (ax25_dev->values[AX25_VALUES_CONMODE] == 0) {
305 kfree_skb(skb);
306 return 0;
309 /* LAPB */
311 /* AX.25 state 1-4 */
313 ax25_digi_invert(&dp, &reverse_dp);
315 if ((ax25 = ax25_find_cb(&dest, &src, &reverse_dp, dev)) != NULL) {
317 * Process the frame. If it is queued up internally it
318 * returns one otherwise we free it immediately. This
319 * routine itself wakes the user context layers so we do
320 * no further work
322 if (ax25_process_rx_frame(ax25, skb, type, dama) == 0)
323 kfree_skb(skb);
325 ax25_cb_put(ax25);
326 return 0;
329 /* AX.25 state 0 (disconnected) */
331 /* a) received not a SABM(E) */
333 if ((*skb->data & ~AX25_PF) != AX25_SABM &&
334 (*skb->data & ~AX25_PF) != AX25_SABME) {
336 * Never reply to a DM. Also ignore any connects for
337 * addresses that are not our interfaces and not a socket.
339 if ((*skb->data & ~AX25_PF) != AX25_DM && mine)
340 ax25_return_dm(dev, &src, &dest, &dp);
342 kfree_skb(skb);
343 return 0;
346 /* b) received SABM(E) */
348 if (dp.lastrepeat + 1 == dp.ndigi)
349 sk = ax25_find_listener(&dest, 0, dev, SOCK_SEQPACKET);
350 else
351 sk = ax25_find_listener(next_digi, 1, dev, SOCK_SEQPACKET);
353 if (sk != NULL) {
354 bh_lock_sock(sk);
355 if (sk_acceptq_is_full(sk) ||
356 (make = ax25_make_new(sk, ax25_dev)) == NULL) {
357 if (mine)
358 ax25_return_dm(dev, &src, &dest, &dp);
359 kfree_skb(skb);
360 bh_unlock_sock(sk);
361 sock_put(sk);
363 return 0;
366 ax25 = ax25_sk(make);
367 skb_set_owner_r(skb, make);
368 skb_queue_head(&sk->sk_receive_queue, skb);
370 make->sk_state = TCP_ESTABLISHED;
372 sk->sk_ack_backlog++;
373 bh_unlock_sock(sk);
374 } else {
375 if (!mine) {
376 kfree_skb(skb);
377 return 0;
380 if ((ax25 = ax25_create_cb()) == NULL) {
381 ax25_return_dm(dev, &src, &dest, &dp);
382 kfree_skb(skb);
383 return 0;
386 ax25_fillin_cb(ax25, ax25_dev);
389 ax25->source_addr = dest;
390 ax25->dest_addr = src;
393 * Sort out any digipeated paths.
395 if (dp.ndigi && !ax25->digipeat &&
396 (ax25->digipeat = kmalloc(sizeof(ax25_digi), GFP_ATOMIC)) == NULL) {
397 kfree_skb(skb);
398 ax25_destroy_socket(ax25);
399 if (sk)
400 sock_put(sk);
401 return 0;
404 if (dp.ndigi == 0) {
405 kfree(ax25->digipeat);
406 ax25->digipeat = NULL;
407 } else {
408 /* Reverse the source SABM's path */
409 memcpy(ax25->digipeat, &reverse_dp, sizeof(ax25_digi));
412 if ((*skb->data & ~AX25_PF) == AX25_SABME) {
413 ax25->modulus = AX25_EMODULUS;
414 ax25->window = ax25_dev->values[AX25_VALUES_EWINDOW];
415 } else {
416 ax25->modulus = AX25_MODULUS;
417 ax25->window = ax25_dev->values[AX25_VALUES_WINDOW];
420 ax25_send_control(ax25, AX25_UA, AX25_POLLON, AX25_RESPONSE);
422 #ifdef CONFIG_AX25_DAMA_SLAVE
423 if (dama && ax25->ax25_dev->values[AX25_VALUES_PROTOCOL] == AX25_PROTO_DAMA_SLAVE)
424 ax25_dama_on(ax25);
425 #endif
427 ax25->state = AX25_STATE_3;
429 ax25_cb_add(ax25);
431 ax25_start_heartbeat(ax25);
432 ax25_start_t3timer(ax25);
433 ax25_start_idletimer(ax25);
435 if (sk) {
436 if (!sock_flag(sk, SOCK_DEAD))
437 sk->sk_data_ready(sk, skb->len);
438 sock_put(sk);
439 } else
440 kfree_skb(skb);
442 return 0;
446 * Receive an AX.25 frame via a SLIP interface.
448 int ax25_kiss_rcv(struct sk_buff *skb, struct net_device *dev,
449 struct packet_type *ptype, struct net_device *orig_dev)
451 skb->sk = NULL; /* Initially we don't know who it's for */
452 skb->destructor = NULL; /* Who initializes this, dammit?! */
454 if (!net_eq(dev_net(dev), &init_net)) {
455 kfree_skb(skb);
456 return 0;
459 if ((*skb->data & 0x0F) != 0) {
460 kfree_skb(skb); /* Not a KISS data frame */
461 return 0;
464 skb_pull(skb, AX25_KISS_HEADER_LEN); /* Remove the KISS byte */
466 return ax25_rcv(skb, dev, (ax25_address *)dev->dev_addr, ptype);