convert rcv_conn_list to snd_conn_list
[cor.git] / net / cor / neigh_rcv.c
blob43aef8abd89032041de8382c332082d6b2e47868
1 /**
2 * Connection oriented routing
3 * Copyright (C) 2007-2021 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
21 #include <asm/byteorder.h>
23 #include "cor.h"
25 static void cor_parse_connect(struct cor_neighbor *nb, struct sk_buff *skb)
27 struct cor_conn_bidir *cnb;
28 struct cor_conn *src_in;
29 struct cor_conn *trgt_out;
30 __u32 rcv_conn_id = cor_pull_u32(skb);
31 __u32 snd_conn_id = cor_get_connid_reverse(rcv_conn_id);
32 __u64 rcv_seqno = cor_pull_u48(skb);
33 __u64 snd_seqno = cor_pull_u48(skb);
34 __u16 window = cor_pull_u16(skb);
35 __u16 priority_raw = cor_pull_u16(skb);
36 __u8 priority_seqno = (priority_raw >> 12);
37 __u16 priority = (priority_raw & 4095);
38 __u8 is_highlatency = cor_pull_u8(skb);
39 struct cor_control_msg_out *cm = 0;
41 /* do not send reset - doing so would only corrupt things further */
42 if (unlikely((rcv_conn_id & (1 << 31)) == 0))
43 return;
45 BUG_ON((snd_conn_id & (1 << 31)) != 0);
47 if (unlikely(snd_conn_id == 0))
48 return;
50 if (cor_is_clientmode())
51 goto err;
53 if (cor_new_incoming_conn_allowed(nb) == 0)
54 goto err;
56 cm = cor_alloc_control_msg(nb, ACM_PRIORITY_MED);
57 if (unlikely(cm == 0))
58 goto err;
60 if (is_highlatency != 1)
61 is_highlatency = 0;
63 cnb = cor_alloc_conn(GFP_ATOMIC, is_highlatency);
64 if (unlikely(cnb == 0))
65 goto err;
67 src_in = &(cnb->cli);
68 trgt_out = &(cnb->srv);
70 spin_lock_bh(&(src_in->rcv_lock));
71 spin_lock_bh(&(trgt_out->rcv_lock));
72 if (unlikely(cor_conn_init_out(trgt_out, nb, rcv_conn_id, 1) != 0)) {
73 src_in->isreset = 2;
74 trgt_out->isreset = 2;
75 spin_unlock_bh(&(trgt_out->rcv_lock));
76 spin_unlock_bh(&(src_in->rcv_lock));
77 cor_conn_kref_put(src_in, "alloc_conn");
78 src_in = 0;
79 goto err;
82 src_in->source.in.established = 1;
83 trgt_out->target.out.established = 1;
85 src_in->source.in.next_seqno = rcv_seqno;
86 src_in->source.in.window_seqnolimit = rcv_seqno;
87 src_in->source.in.window_seqnolimit_remote = rcv_seqno;
89 cor_update_windowlimit(src_in);
91 trgt_out->target.out.conn_id = snd_conn_id;
93 trgt_out->target.out.seqno_nextsend = snd_seqno;
94 trgt_out->target.out.seqno_acked = snd_seqno;
95 cor_reset_seqno(trgt_out, snd_seqno);
97 trgt_out->target.out.seqno_windowlimit =
98 trgt_out->target.out.seqno_nextsend +
99 cor_dec_window(window);
101 spin_unlock_bh(&(trgt_out->rcv_lock));
102 spin_unlock_bh(&(src_in->rcv_lock));
104 src_in->source.in.priority_seqno = priority_seqno;
105 cor_set_conn_in_priority(nb, rcv_conn_id, src_in, priority_seqno,
106 priority);
108 cor_send_connect_success(cm, snd_conn_id, src_in);
110 cor_conn_kref_put(src_in, "alloc_conn");
112 if (0) {
113 struct cor_conn *src_in;
114 err:
115 if (cm != 0)
116 cor_free_control_msg(cm);
118 src_in = cor_get_conn(nb, rcv_conn_id);
119 if (src_in == 0) {
120 cor_send_reset_conn(nb, snd_conn_id, 0);
121 } else {
122 cor_conn_kref_put(src_in, "stack");
127 static void cor_parse_conn_success(struct cor_neighbor *nb, struct sk_buff *skb)
129 __u32 rcv_conn_id = cor_pull_u32(skb);
130 __u16 window = cor_pull_u16(skb);
132 struct cor_conn *src_in = cor_get_conn(nb, rcv_conn_id);
133 struct cor_conn *trgt_out;
135 if (unlikely(src_in == 0))
136 goto err;
138 if (unlikely(src_in->is_client))
139 goto err;
141 trgt_out = cor_get_conn_reversedir(src_in);
144 spin_lock_bh(&(trgt_out->rcv_lock));
145 spin_lock_bh(&(src_in->rcv_lock));
147 if (unlikely(cor_is_conn_in(src_in, nb, rcv_conn_id) == 0))
148 goto err_unlock;
150 BUG_ON(trgt_out->targettype != TARGET_OUT);
151 BUG_ON(trgt_out->target.out.nb != nb);
153 if (unlikely(trgt_out->isreset != 0))
154 goto err_unlock;
156 _cor_set_last_act(trgt_out);
158 src_in->source.in.established = 1;
160 if (likely(trgt_out->target.out.established == 0)) {
161 trgt_out->target.out.established = 1;
162 trgt_out->target.out.priority_send_allowed = 1;
163 cor_conn_refresh_priority(trgt_out, 1);
165 trgt_out->target.out.seqno_windowlimit =
166 trgt_out->target.out.seqno_nextsend +
167 cor_dec_window(window);
171 spin_unlock_bh(&(src_in->rcv_lock));
173 cor_flush_buf(trgt_out);
175 spin_unlock_bh(&(trgt_out->rcv_lock));
179 cor_wake_sender(trgt_out);
181 if (0) {
182 err_unlock:
183 spin_unlock_bh(&(trgt_out->rcv_lock));
184 spin_unlock_bh(&(src_in->rcv_lock));
185 err:
186 if (src_in == 0)
187 cor_send_reset_conn(nb,
188 cor_get_connid_reverse(rcv_conn_id), 0);
191 if (likely(src_in != 0))
192 cor_conn_kref_put(src_in, "stack");
195 static void cor_parse_reset(struct cor_neighbor *nb, struct sk_buff *skb)
197 __u32 conn_id = cor_pull_u32(skb);
199 int send;
201 struct cor_conn_bidir *cnb;
203 struct cor_conn *src_in = cor_get_conn(nb, conn_id);
204 if (src_in == 0)
205 return;
207 cnb = cor_get_conn_bidir(src_in);
209 spin_lock_bh(&(cnb->cli.rcv_lock));
210 spin_lock_bh(&(cnb->srv.rcv_lock));
212 send = unlikely(cor_is_conn_in(src_in, nb, conn_id));
213 if (send && cor_get_conn_reversedir(src_in)->isreset == 0)
214 cor_get_conn_reversedir(src_in)->isreset = 1;
216 spin_unlock_bh(&(cnb->srv.rcv_lock));
217 spin_unlock_bh(&(cnb->cli.rcv_lock));
219 if (send)
220 cor_reset_conn(src_in);
222 cor_conn_kref_put(src_in, "stack");
225 static int _cor_kernel_packet_misc(struct cor_neighbor *nb,
226 struct sk_buff *skb, int *ping_rcvd,
227 __u32 *pingcookie, __u8 code_min)
229 if (code_min == KP_MISC_PADDING) {
230 } else if (code_min == KP_MISC_INIT_SESSION) {
231 /* ignore if sessionid_rcv_needed is false */
232 __be32 sessionid = cor_pull_be32(skb);
233 if (sessionid != nb->sessionid) {
234 return 1;
236 } else if (code_min == KP_MISC_PING) {
237 *ping_rcvd = 1;
238 *pingcookie = cor_pull_u32(skb);
239 } else if (code_min == KP_MISC_PONG) {
240 __u32 cookie = cor_pull_u32(skb);
241 __u32 respdelay_full = cor_pull_u32(skb);
242 cor_pull_u32(skb); /* respdelay_netonly */
243 cor_ping_resp(nb, cookie, respdelay_full);
244 } else if (code_min == KP_MISC_ACK) {
245 __u64 seqno = cor_pull_u48(skb);
246 cor_kern_ack_rcvd(nb, seqno);
247 } else if (code_min == KP_MISC_CONNECT) {
248 cor_parse_connect(nb, skb);
249 } else if (code_min == KP_MISC_CONNECT_SUCCESS) {
250 cor_parse_conn_success(nb, skb);
251 } else if (code_min == KP_MISC_RESET_CONN) {
252 cor_parse_reset(nb, skb);
253 } else if (code_min == KP_MISC_SET_MAX_CMSG_DELAY) {
254 atomic_set(&(nb->max_remote_ack_fast_delay_us),
255 cor_pull_u32(skb));
256 atomic_set(&(nb->max_remote_ack_slow_delay_us),
257 cor_pull_u32(skb));
258 atomic_set(&(nb->max_remote_ackconn_lowlat_delay_us),
259 cor_pull_u32(skb));
260 atomic_set(&(nb->max_remote_ackconn_highlat_delay_us),
261 cor_pull_u32(skb));
262 atomic_set(&(nb->max_remote_pong_delay_us), cor_pull_u32(skb));
263 } else {
264 BUG();
266 return 0;
269 static void cor_parse_ack_conn(struct cor_neighbor *nb, struct sk_buff *skb,
270 __u8 code_min, __u64 *bytes_acked)
272 __u32 conn_id = cor_pull_u32(skb);
273 __u8 delay_remaining = 0;
275 struct cor_conn *src_in = cor_get_conn(nb, conn_id);
277 if ((code_min & KP_ACK_CONN_FLAGS_SEQNO) != 0 ||
278 cor_ooolen(code_min) != 0)
279 delay_remaining = cor_pull_u8(skb);
281 if ((code_min & KP_ACK_CONN_FLAGS_SEQNO) != 0) {
282 __u64 seqno = cor_pull_u48(skb);
283 int setwindow = 0;
284 __u16 window = 0;
285 __u8 bufsize_changerate = 0;
287 if ((code_min & KP_ACK_CONN_FLAGS_WINDOW) != 0) {
288 setwindow = 1;
289 window = cor_pull_u16(skb);
290 bufsize_changerate = cor_pull_u8(skb);
293 if (likely(src_in != 0))
294 cor_conn_ack_rcvd(nb, conn_id,
295 cor_get_conn_reversedir(src_in),
296 seqno, setwindow, window,
297 bufsize_changerate, bytes_acked);
300 if (cor_ooolen(code_min) != 0) {
301 __u64 seqno_ooo = cor_pull_u48(skb);
302 __u32 ooo_len;
304 if (cor_ooolen(code_min) == 1) {
305 ooo_len = cor_pull_u8(skb);
306 } else if (cor_ooolen(code_min) == 2) {
307 ooo_len = cor_pull_u16(skb);
308 } else if (cor_ooolen(code_min) == 4) {
309 ooo_len = cor_pull_u32(skb);
310 } else {
311 BUG();
314 if (likely(src_in != 0))
315 cor_conn_ack_ooo_rcvd(nb, conn_id,
316 cor_get_conn_reversedir(src_in),
317 seqno_ooo, ooo_len, bytes_acked);
320 if (code_min & KP_ACK_CONN_FLAGS_PRIORITY) {
321 __u16 priority_raw = cor_pull_u16(skb);
322 __u8 priority_seqno = (priority_raw >> 12);
323 __u16 priority = (priority_raw & 4095);
325 if (likely(src_in != 0))
326 cor_set_conn_in_priority(nb, conn_id, src_in,
327 priority_seqno, priority);
330 if (unlikely(src_in == 0)) {
331 cor_send_reset_conn(nb, cor_get_connid_reverse(conn_id), 0);
332 return;
335 cor_conn_kref_put(src_in, "stack");
338 static int cor_parse_conndata_length(struct sk_buff *skb, __u32 *ret)
340 char *highptr = cor_pull_skb(skb, 1);
341 if (highptr == 0)
342 return 1;
344 if (cor_parse_u8(highptr) < 128) {
345 *ret = cor_parse_u8(highptr);
346 } else {
347 char *lowptr = cor_pull_skb(skb, 1);
348 if (lowptr == 0)
349 return 1;
350 *ret = 128 +
351 ((__u32) ((cor_parse_u8(highptr)-128))*256) +
352 ((__u32) cor_parse_u8(lowptr));
355 return 0;
358 static void cor_parse_conndata(struct cor_neighbor *nb, struct sk_buff *skb,
359 __u8 code_min)
361 __u8 flush = ((code_min & KP_CONN_DATA_FLAGS_FLUSH) != 0) ? 1 : 0;
362 __u8 windowused = (code_min & KP_CONN_DATA_FLAGS_WINDOWUSED);
363 __u32 conn_id = cor_pull_u32(skb);
364 __u64 seqno = cor_pull_u48(skb);
365 __u32 datalength = 0;
366 char *data;
368 if (unlikely(cor_parse_conndata_length(skb, &datalength) != 0))
369 BUG();
371 if (unlikely(datalength == 0))
372 return;
374 data = cor_pull_skb(skb, datalength);
375 BUG_ON(data == 0);
377 cor_conn_rcv(nb, 0, data, datalength, conn_id, seqno,
378 windowused, flush);
381 static int __cor_kernel_packet(struct cor_neighbor *nb, struct sk_buff *skb,
382 int *ping_rcvd, __u32 *pingcookie, __u64 *bytes_acked,
383 __u8 code)
385 __u8 code_maj = kp_maj(code);
386 __u8 code_min = kp_min(code);
388 if (code_maj == KP_MISC) {
389 return _cor_kernel_packet_misc(nb, skb, ping_rcvd,
390 pingcookie, code_min);
391 } else if (code_maj == KP_ACK_CONN) {
392 cor_parse_ack_conn(nb, skb, code_min, bytes_acked);
393 return 0;
394 } else if (code_maj == KP_CONN_DATA) {
395 cor_parse_conndata(nb, skb, code_min);
396 return 0;
397 } else {
398 BUG();
399 return 1;
403 static void _cor_kernel_packet(struct cor_neighbor *nb, struct sk_buff *skb,
404 int ackneeded, ktime_t pkg_rcv_start)
406 int ping_rcvd = 0;
407 __u32 pingcookie = 0;
408 __u64 bytes_acked = 0;
410 __u64 seqno = cor_parse_u48(cor_pull_skb(skb, 6));
412 if (unlikely(atomic_read(&(nb->sessionid_rcv_needed)) != 0)) {
413 __u8 *codeptr = cor_pull_skb(skb, 1);
414 __u8 code;
415 __be32 sessionid;
417 if (codeptr == 0)
418 return;
420 code = *codeptr;
421 if (kp_maj(code) != KP_MISC ||
422 kp_min(code) != KP_MISC_INIT_SESSION)
423 return;
425 sessionid = cor_pull_be32(skb);
426 if (sessionid == nb->sessionid) {
427 atomic_set(&(nb->sessionid_rcv_needed), 0);
428 cor_announce_send_stop(nb->dev, nb->mac,
429 ANNOUNCE_TYPE_UNICAST);
430 } else {
431 return;
435 while (1) {
436 __u8 *codeptr = cor_pull_skb(skb, 1);
437 __u8 code;
439 if (codeptr == 0)
440 break;
442 code = *codeptr;
444 if (__cor_kernel_packet(nb, skb, &ping_rcvd,
445 &pingcookie, &bytes_acked, code) != 0)
446 return;
449 if (bytes_acked > 0)
450 cor_nbcongwin_data_acked(nb, bytes_acked);
452 if (ackneeded == ACK_NEEDED_SLOW)
453 cor_send_ack(nb, seqno, 0);
454 else if (ackneeded == ACK_NEEDED_FAST)
455 cor_send_ack(nb, seqno, 1);
457 if (ping_rcvd)
458 cor_send_pong(nb, pingcookie, pkg_rcv_start);
461 static int _cor_kernel_packet_checklen_misc(struct sk_buff *skb, __u8 code_min)
463 if (code_min == KP_MISC_PADDING) {
464 } else if (code_min == KP_MISC_INIT_SESSION ||
465 code_min == KP_MISC_PING) {
466 if (cor_pull_skb(skb, 4) == 0)
467 return 1;
468 } else if (code_min == KP_MISC_PONG) {
469 if (cor_pull_skb(skb, 12) == 0)
470 return 1;
471 } else if (code_min == KP_MISC_ACK) {
472 if (cor_pull_skb(skb, 6) == 0)
473 return 1;
474 } else if (code_min == KP_MISC_CONNECT) {
475 if (cor_pull_skb(skb, 21) == 0)
476 return 1;
477 } else if (code_min == KP_MISC_CONNECT_SUCCESS) {
478 if (cor_pull_skb(skb, 6) == 0)
479 return 1;
480 } else if (code_min == KP_MISC_RESET_CONN) {
481 if (cor_pull_skb(skb, 4) == 0)
482 return 1;
483 } else if (code_min == KP_MISC_SET_MAX_CMSG_DELAY) {
484 if (cor_pull_skb(skb, 20) == 0)
485 return 1;
486 } else {
487 return 1;
489 return 0;
493 static int _cor_kernel_packet_checklen_ackconn(struct sk_buff *skb,
494 __u8 code_min)
496 if (cor_pull_skb(skb, 4 + cor_ack_conn_len(code_min)) == 0)
497 return 1;
498 return 0;
501 static int _cor_kernel_packet_checklen_conndata(struct sk_buff *skb,
502 __u8 code_min)
504 __u32 datalength;
506 if (cor_pull_skb(skb, 10) == 0)
507 return 1;
509 if (unlikely(cor_parse_conndata_length(skb, &datalength) != 0))
510 return 1;
512 if (cor_pull_skb(skb, datalength) == 0)
513 return 1;
515 return 0;
518 static int _cor_kernel_packet_checklen(struct sk_buff *skb, __u8 code)
520 __u8 code_maj = kp_maj(code);
521 __u8 code_min = kp_min(code);
523 if (code_maj == KP_MISC)
524 return _cor_kernel_packet_checklen_misc(skb, code_min);
525 else if (code_maj == KP_ACK_CONN)
526 return _cor_kernel_packet_checklen_ackconn(skb, code_min);
527 else if (code_maj == KP_CONN_DATA)
528 return _cor_kernel_packet_checklen_conndata(skb, code_min);
529 else
530 return 1;
533 static int cor_kernel_packet_checklen(struct sk_buff *skb)
535 if (cor_pull_skb(skb, 6) == 0) /* seqno */
536 return 1;
538 while (1) {
539 __u8 *codeptr = cor_pull_skb(skb, 1);
540 __u8 code;
542 if (codeptr == 0)
543 break;
544 code = *codeptr;
546 if (unlikely(_cor_kernel_packet_checklen(skb, code) != 0))
547 return 1;
549 return 0;
552 void cor_kernel_packet(struct cor_neighbor *nb, struct sk_buff *skb,
553 int ackneeded)
555 unsigned char *data = skb->data;
556 unsigned int len = skb->len;
558 ktime_t pkg_rcv_start = ktime_get();
560 if (unlikely(cor_kernel_packet_checklen(skb) != 0)) {
561 /* printk(KERN_ERR "kpacket discard\n"); */
562 goto discard;
565 skb->data = data;
566 skb->len = len;
568 _cor_kernel_packet(nb, skb, ackneeded, pkg_rcv_start);
569 discard:
570 kfree_skb(skb);
573 MODULE_LICENSE("GPL");