kpacket_gen: use constants for cmdlength
[cor.git] / net / cor / kpacket_parse.c
blobb3d3aa844582674401dea3c0f9937c7813dc95ed
1 /**
2 * Connection oriented routing
3 * Copyright (C) 2007-2012 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
21 #include <asm/byteorder.h>
23 #include "cor.h"
25 static void parse_ack_conn(struct neighbor *nb, struct sk_buff *skb,
26 __u64 *bytes_acked)
28 __u32 conn_id = pull_u32(skb);
29 __u8 flags = pull_u8(skb);
31 struct conn *src_in = get_conn(nb, conn_id);
33 if ((flags & KP_ACK_CONN_FLAGS_SEQNO) != 0) {
34 __u64 seqno = pull_u48(skb);
35 int setwindow = 0;
36 __u8 window = 0;
38 if ((flags & KP_ACK_CONN_FLAGS_WINDOW) != 0) {
39 setwindow = 1;
40 window = pull_u8(skb);
43 if (likely(src_in != 0))
44 conn_ack_rcvd(nb, conn_id, src_in->reversedir, seqno,
45 setwindow, window, bytes_acked);
48 if (ooolen(flags) != 0) {
49 __u64 seqno_ooo = pull_u48(skb);
50 __u32 ooo_len;
52 if (ooolen(flags) == 1) {
53 ooo_len = pull_u8(skb);
54 } else if (ooolen(flags) == 2) {
55 ooo_len = pull_u16(skb);
56 } else if (ooolen(flags) == 4) {
57 ooo_len = pull_u32(skb);
58 } else {
59 BUG();
62 if (likely(src_in != 0))
63 conn_ack_ooo_rcvd(nb, conn_id, src_in->reversedir,
64 seqno_ooo, ooo_len, bytes_acked);
67 if (flags & KP_ACK_CONN_FLAGS_PRIORITY) {
68 __u8 priority_seqno = pull_u8(skb);
69 __u8 priority = pull_u8(skb);
71 if (likely(src_in != 0))
72 set_conn_in_priority(nb, conn_id, src_in,
73 priority_seqno, priority);
76 if (unlikely(src_in == 0)) {
77 send_reset_conn(nb, conn_id ^ (conn_id & (1 << 31)), 0);
78 return;
81 spin_lock_bh(&(src_in->rcv_lock));
83 if (unlikely(is_conn_in(src_in, nb, conn_id) == 0)) {
84 send_reset_conn(nb, conn_id ^ (conn_id & (1 << 31)), 0);
85 } else {
86 set_last_act(src_in);
89 spin_unlock_bh(&(src_in->rcv_lock));
91 kref_put(&(src_in->ref), free_conn);
94 static void parse_connect(struct neighbor *nb, struct sk_buff *skb)
96 struct conn *src_in;
97 __u32 rcv_conn_id = pull_u32(skb);
98 __u32 snd_conn_id = (rcv_conn_id & ~(1 << 31));
99 __u64 rcv_seqno = pull_u48(skb);
100 __u64 snd_seqno = pull_u48(skb);
101 __u8 window = pull_u8(skb);
102 __u8 priority_seqno = pull_u8(skb);
103 __u8 priority = pull_u8(skb);
104 struct control_msg_out *cm = 0;
106 /* do not send reset - doing so would only corrupt things further */
107 if (unlikely((rcv_conn_id & (1 << 31)) == 0))
108 return;
110 BUG_ON((snd_conn_id & (1 << 31)) != 0);
112 if (unlikely(snd_conn_id == 0))
113 return;
115 if (is_clientmode())
116 goto err;
118 if (newconn_checkpriority(nb, priority) != 0)
119 goto err;
121 cm = alloc_control_msg(nb, ACM_PRIORITY_MED);
122 if (unlikely(cm == 0))
123 goto err;
125 src_in = alloc_conn(GFP_ATOMIC);
127 if (unlikely(src_in == 0))
128 goto err;
130 src_in->is_client = 1;
132 spin_lock_bh(&(src_in->rcv_lock));
133 spin_lock_bh(&(src_in->reversedir->rcv_lock));
134 if (unlikely(conn_init_out(src_in->reversedir, nb, rcv_conn_id, 1))) {
135 src_in->isreset = 2;
136 src_in->reversedir->isreset = 2;
137 spin_unlock_bh(&(src_in->reversedir->rcv_lock));
138 spin_unlock_bh(&(src_in->rcv_lock));
139 kref_put(&(src_in->reversedir->ref), free_conn);
140 kref_put(&(src_in->ref), free_conn);
141 src_in = 0;
142 goto err;
145 src_in->source.in.established = 1;
146 src_in->reversedir->target.out.established = 1;
148 src_in->source.in.next_seqno = rcv_seqno;
149 src_in->source.in.window_seqnolimit = rcv_seqno;
150 src_in->source.in.window_seqnolimit_remote = rcv_seqno;
152 update_windowlimit(src_in);
154 src_in->reversedir->target.out.conn_id = snd_conn_id;
156 src_in->reversedir->target.out.seqno_nextsend = snd_seqno;
157 src_in->reversedir->target.out.seqno_acked = snd_seqno;
158 reset_seqno(src_in->reversedir, snd_seqno);
160 src_in->reversedir->target.out.seqno_windowlimit =
161 src_in->reversedir->target.out.seqno_nextsend +
162 dec_log_64_7(window);
164 spin_unlock_bh(&(src_in->reversedir->rcv_lock));
165 spin_unlock_bh(&(src_in->rcv_lock));
167 src_in->source.in.priority_seqno = priority_seqno;
168 set_conn_in_priority(nb, rcv_conn_id, src_in, priority_seqno, priority);
170 send_connect_success(cm, snd_conn_id, src_in);
172 if (0) {
173 err:
174 if (cm != 0) {
175 free_control_msg(cm);
177 send_reset_conn(nb, snd_conn_id, 0);
181 static void parse_conn_success(struct neighbor *nb, struct sk_buff *skb)
183 __u32 conn_id = pull_u32(skb);
184 __u8 window = pull_u8(skb);
186 struct conn *src_in = get_conn(nb, conn_id);
188 if (unlikely(src_in == 0))
189 goto err;
191 if (unlikely(src_in->is_client))
192 goto err;
195 spin_lock_bh(&(src_in->reversedir->rcv_lock));
196 spin_lock_bh(&(src_in->rcv_lock));
198 if (unlikely(is_conn_in(src_in, nb, conn_id) == 0))
199 goto err_unlock;
201 BUG_ON(src_in->reversedir->targettype != TARGET_OUT);
202 BUG_ON(src_in->reversedir->target.out.nb != nb);
204 if (unlikely(src_in->reversedir->isreset != 0))
205 goto err_unlock;
207 set_last_act(src_in);
209 src_in->source.in.established = 1;
211 if (likely(src_in->reversedir->target.out.established == 0)) {
212 src_in->reversedir->target.out.established = 1;
213 src_in->reversedir->target.out.priority_send_allowed = 1;
215 src_in->reversedir->target.out.seqno_windowlimit =
216 src_in->reversedir->target.out.seqno_nextsend +
217 dec_log_64_7(window);
221 spin_unlock_bh(&(src_in->rcv_lock));
223 flush_buf(src_in->reversedir);
225 spin_unlock_bh(&(src_in->reversedir->rcv_lock));
227 wake_sender(src_in->reversedir);
229 if (0) {
230 err_unlock:
231 spin_unlock_bh(&(src_in->reversedir->rcv_lock));
232 spin_unlock_bh(&(src_in->rcv_lock));
233 err:
234 send_reset_conn(nb, conn_id ^ (1 << 31), 0);
237 if (likely(src_in != 0))
238 kref_put(&(src_in->ref), free_conn);
241 static void parse_conndata(struct neighbor *nb, struct sk_buff *skb,
242 int rcv_delayed_lowbuf, __u8 flush)
244 __u32 conn_id = pull_u32(skb);
245 __u64 seqno = pull_u48(skb);
246 __u16 datalength = pull_u16(skb);
247 char *data = cor_pull_skb(skb, datalength);
249 BUG_ON(data == 0);
251 if (unlikely(datalength == 0))
252 return;
254 conn_rcv(nb, 0, data, datalength, conn_id, seqno, rcv_delayed_lowbuf,
255 flush);
258 static void parse_reset(struct neighbor *nb, struct sk_buff *skb)
260 __u32 conn_id = pull_u32(skb);
262 int send;
264 struct conn *src_in = get_conn(nb, conn_id);
265 if (src_in == 0)
266 return;
268 if (src_in->is_client) {
269 spin_lock_bh(&(src_in->rcv_lock));
270 spin_lock_bh(&(src_in->reversedir->rcv_lock));
271 } else {
272 spin_lock_bh(&(src_in->reversedir->rcv_lock));
273 spin_lock_bh(&(src_in->rcv_lock));
276 send = unlikely(is_conn_in(src_in, nb, conn_id));
277 if (send && src_in->reversedir->isreset == 0)
278 src_in->reversedir->isreset = 1;
280 if (src_in->is_client) {
281 spin_unlock_bh(&(src_in->rcv_lock));
282 spin_unlock_bh(&(src_in->reversedir->rcv_lock));
283 } else {
284 spin_unlock_bh(&(src_in->reversedir->rcv_lock));
285 spin_unlock_bh(&(src_in->rcv_lock));
288 if (send)
289 reset_conn(src_in);
291 kref_put(&(src_in->ref), free_conn);
294 static void kernel_packet2(struct neighbor *nb, struct sk_buff *skb,
295 __u64 seqno1)
297 int ack = 0;
298 int ping_rcvd = 0;
299 __u32 pingcookie = 0;
300 __u64 bytes_acked = 0;
302 if (unlikely(atomic_read(&(nb->sessionid_rcv_needed)) != 0)) {
303 __u8 *codeptr = cor_pull_skb(skb, 1);
304 __u8 code;
305 __be32 sessionid;
307 if (codeptr == 0)
308 return;
310 code = *codeptr;
311 if (code != KP_INIT_SESSION)
312 return;
314 sessionid = pull_be32(skb);
315 if (sessionid == nb->sessionid) {
316 atomic_set(&(nb->sessionid_rcv_needed), 0);
317 announce_send_stop(nb->dev, nb->mac,
318 ANNOUNCE_TYPE_UNICAST);
319 } else {
320 return;
324 while (1) {
325 __be32 sessionid;
326 __u64 seqno2;
328 __u32 cookie;
329 __u32 respdelay;
331 __u8 *codeptr = cor_pull_skb(skb, 1);
332 __u8 code;
334 if (codeptr == 0)
335 break;
337 code = *codeptr;
339 switch (code) {
340 case KP_PADDING:
341 break;
342 case KP_INIT_SESSION:
343 /* ignore if sessionid_rcv_needed is false */
344 sessionid = pull_be32(skb);
345 if (sessionid != nb->sessionid) {
346 return;
348 break;
349 case KP_PING:
350 ping_rcvd = 1;
351 pingcookie = pull_u32(skb);
352 break;
353 case KP_PONG:
354 cookie = pull_u32(skb);
355 respdelay = pull_u32(skb);
356 ping_resp(nb, cookie, respdelay);
357 if (get_neigh_state(nb) == NEIGHBOR_STATE_ACTIVE)
358 ack = 1;
359 break;
360 case KP_ACK:
361 seqno2 = pull_u48(skb);
362 kern_ack_rcvd(nb, seqno2);
363 break;
364 case KP_ACK_CONN:
365 parse_ack_conn(nb, skb, &bytes_acked);
366 ack = 1;
367 break;
368 case KP_CONNECT:
369 parse_connect(nb, skb);
370 ack = 1;
371 break;
372 case KP_CONNECT_SUCCESS:
373 parse_conn_success(nb, skb);
374 ack = 1;
375 break;
376 case KP_CONN_DATA:
377 parse_conndata(nb, skb, 0, 0);
378 break;
379 case KP_CONN_DATA_LOWBUFDELAYED:
380 parse_conndata(nb, skb, 1, 0);
381 break;
382 case KP_CONN_DATA_FLUSH:
383 parse_conndata(nb, skb, 0, 1);
384 break;
385 case KP_CONN_DATA_LOWBUFDELAYED_FLUSH:
386 parse_conndata(nb, skb, 1, 1);
387 break;
388 case KP_RESET_CONN:
389 parse_reset(nb, skb);
390 ack = 1;
391 break;
392 case KP_SET_MAX_CMSG_DELAY:
393 atomic_set(&(nb->max_remote_ack_delay_us),
394 pull_u32(skb));
395 atomic_set(&(nb->max_remote_ackconn_delay_us),
396 pull_u32(skb));
397 atomic_set(&(nb->max_remote_other_delay_us),
398 pull_u32(skb));
399 ack = 1;
400 break;
401 default:
402 BUG();
406 #ifdef COR_NBCONGWIN
407 if (bytes_acked > 0)
408 nbcongwin_data_acked(nb, bytes_acked);
409 #endif
411 if (ack)
412 send_ack(nb, seqno1);
413 if (ping_rcvd)
414 send_pong(nb, pingcookie);
417 static int kernel_packet_checklen(struct sk_buff *skb)
419 while (1) {
420 __u8 *codeptr = cor_pull_skb(skb, 1);
421 __u8 code;
422 __u8 *flags;
424 char *lengthptr;
425 __u32 length;
427 if (codeptr == 0)
428 break;
429 code = *codeptr;
431 switch (code) {
432 case KP_PADDING:
433 break;
434 case KP_INIT_SESSION:
435 case KP_PING:
436 if (cor_pull_skb(skb, 4) == 0)
437 return 1;
438 break;
439 case KP_PONG:
440 if (cor_pull_skb(skb, 8) == 0)
441 return 1;
442 break;
443 case KP_ACK:
444 if (cor_pull_skb(skb, 6) == 0)
445 return 1;
446 break;
447 case KP_ACK_CONN:
448 if (cor_pull_skb(skb, 4) == 0)
449 return 1;
450 flags = cor_pull_skb(skb, 1);
451 if (flags == 0)
452 return 1;
453 if (cor_pull_skb(skb, ack_conn_len(*flags)) == 0)
454 return 1;
455 break;
456 case KP_CONNECT:
457 if (cor_pull_skb(skb, 19) == 0)
458 return 1;
459 break;
460 case KP_CONNECT_SUCCESS:
461 if (cor_pull_skb(skb, 5) == 0)
462 return 1;
463 break;
464 case KP_CONN_DATA:
465 case KP_CONN_DATA_LOWBUFDELAYED:
466 case KP_CONN_DATA_FLUSH:
467 case KP_CONN_DATA_LOWBUFDELAYED_FLUSH:
468 if (cor_pull_skb(skb, 10) == 0)
469 return 1;
470 lengthptr = cor_pull_skb(skb, 2);
471 if (lengthptr == 0)
472 return 1;
473 length = ntohs(*((__u16 *)lengthptr));
474 if (cor_pull_skb(skb, length) == 0)
475 return 1;
476 break;
477 case KP_RESET_CONN:
478 if (cor_pull_skb(skb, 4) == 0)
479 return 1;
480 break;
481 case KP_SET_MAX_CMSG_DELAY:
482 if (cor_pull_skb(skb, 12) == 0)
483 return 1;
484 break;
485 default:
486 return 1;
489 return 0;
492 void kernel_packet(struct neighbor *nb, struct sk_buff *skb, __u64 seqno)
494 unsigned char *data = skb->data;
495 unsigned int len = skb->len;
497 if (unlikely(kernel_packet_checklen(skb) != 0)) {
498 /* printk(KERN_ERR "kpacket discard"); */
499 goto discard;
502 skb->data = data;
503 skb->len = len;
505 kernel_packet2(nb, skb, seqno);
506 discard:
507 kfree_skb(skb);
510 MODULE_LICENSE("GPL");