send windowused instead of senddelayed flag
[cor.git] / net / cor / neigh_rcv.c
blob8636cb6455cb8e5609c37f6123d66e03194274d0
1 /**
2 * Connection oriented routing
3 * Copyright (C) 2007-2021 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
21 #include <asm/byteorder.h>
23 #include "cor.h"
25 static void cor_parse_connect(struct cor_neighbor *nb, struct sk_buff *skb)
27 struct cor_conn_bidir *cnb;
28 struct cor_conn *src_in;
29 struct cor_conn *trgt_out;
30 __u32 rcv_conn_id = cor_pull_u32(skb);
31 __u32 snd_conn_id = cor_get_connid_reverse(rcv_conn_id);
32 __u64 rcv_seqno = cor_pull_u48(skb);
33 __u64 snd_seqno = cor_pull_u48(skb);
34 __u8 window = cor_pull_u8(skb);
35 __u16 priority_raw = cor_pull_u16(skb);
36 __u8 priority_seqno = (priority_raw >> 12);
37 __u16 priority = (priority_raw & 4095);
38 __u8 is_highlatency = cor_pull_u8(skb);
39 struct cor_control_msg_out *cm = 0;
41 /* do not send reset - doing so would only corrupt things further */
42 if (unlikely((rcv_conn_id & (1 << 31)) == 0))
43 return;
45 BUG_ON((snd_conn_id & (1 << 31)) != 0);
47 if (unlikely(snd_conn_id == 0))
48 return;
50 if (cor_is_clientmode())
51 goto err;
53 if (cor_newconn_checkpriority(nb, priority) != 0)
54 goto err;
56 cm = cor_alloc_control_msg(nb, ACM_PRIORITY_MED);
57 if (unlikely(cm == 0))
58 goto err;
60 if (is_highlatency != 1)
61 is_highlatency = 0;
63 cnb = cor_alloc_conn(GFP_ATOMIC, is_highlatency);
64 if (unlikely(cnb == 0))
65 goto err;
67 src_in = &(cnb->cli);
68 trgt_out = &(cnb->srv);
70 spin_lock_bh(&(src_in->rcv_lock));
71 spin_lock_bh(&(trgt_out->rcv_lock));
72 if (unlikely(cor_conn_init_out(trgt_out, nb, rcv_conn_id, 1))) {
73 src_in->isreset = 2;
74 trgt_out->isreset = 2;
75 spin_unlock_bh(&(trgt_out->rcv_lock));
76 spin_unlock_bh(&(src_in->rcv_lock));
77 cor_conn_kref_put(src_in, "alloc_conn");
78 src_in = 0;
79 goto err;
82 src_in->source.in.established = 1;
83 trgt_out->target.out.established = 1;
85 src_in->source.in.next_seqno = rcv_seqno;
86 src_in->source.in.window_seqnolimit = rcv_seqno;
87 src_in->source.in.window_seqnolimit_remote = rcv_seqno;
89 cor_update_windowlimit(src_in);
91 trgt_out->target.out.conn_id = snd_conn_id;
93 trgt_out->target.out.seqno_nextsend = snd_seqno;
94 trgt_out->target.out.seqno_acked = snd_seqno;
95 cor_reset_seqno(trgt_out, snd_seqno);
97 trgt_out->target.out.seqno_windowlimit =
98 trgt_out->target.out.seqno_nextsend +
99 cor_dec_log_64_7(window);
101 spin_unlock_bh(&(trgt_out->rcv_lock));
102 spin_unlock_bh(&(src_in->rcv_lock));
104 src_in->source.in.priority_seqno = priority_seqno;
105 cor_set_conn_in_priority(nb, rcv_conn_id, src_in, priority_seqno,
106 priority);
108 cor_send_connect_success(cm, snd_conn_id, src_in);
110 cor_conn_kref_put(src_in, "alloc_conn");
112 if (0) {
113 struct cor_conn *src_in;
114 err:
115 if (cm != 0)
116 cor_free_control_msg(cm);
118 src_in = cor_get_conn(nb, rcv_conn_id);
119 if (src_in == 0) {
120 cor_send_reset_conn(nb, snd_conn_id, 0);
121 } else {
122 cor_conn_kref_put(src_in, "stack");
127 static void cor_parse_conn_success(struct cor_neighbor *nb, struct sk_buff *skb)
129 __u32 rcv_conn_id = cor_pull_u32(skb);
130 __u8 window = cor_pull_u8(skb);
132 struct cor_conn *src_in = cor_get_conn(nb, rcv_conn_id);
133 struct cor_conn *trgt_out;
135 if (unlikely(src_in == 0))
136 goto err;
138 if (unlikely(src_in->is_client))
139 goto err;
141 trgt_out = cor_get_conn_reversedir(src_in);
144 spin_lock_bh(&(trgt_out->rcv_lock));
145 spin_lock_bh(&(src_in->rcv_lock));
147 if (unlikely(cor_is_conn_in(src_in, nb, rcv_conn_id) == 0))
148 goto err_unlock;
150 BUG_ON(trgt_out->targettype != TARGET_OUT);
151 BUG_ON(trgt_out->target.out.nb != nb);
153 if (unlikely(trgt_out->isreset != 0))
154 goto err_unlock;
156 cor_set_last_act(src_in);
158 src_in->source.in.established = 1;
160 if (likely(trgt_out->target.out.established == 0)) {
161 trgt_out->target.out.established = 1;
162 trgt_out->target.out.priority_send_allowed = 1;
163 cor_conn_refresh_priority(trgt_out, 1);
165 trgt_out->target.out.seqno_windowlimit =
166 trgt_out->target.out.seqno_nextsend +
167 cor_dec_log_64_7(window);
171 spin_unlock_bh(&(src_in->rcv_lock));
173 cor_flush_buf(trgt_out);
175 spin_unlock_bh(&(trgt_out->rcv_lock));
179 cor_wake_sender(trgt_out);
181 if (0) {
182 err_unlock:
183 spin_unlock_bh(&(trgt_out->rcv_lock));
184 spin_unlock_bh(&(src_in->rcv_lock));
185 err:
186 if (src_in == 0)
187 cor_send_reset_conn(nb,
188 cor_get_connid_reverse(rcv_conn_id), 0);
191 if (likely(src_in != 0))
192 cor_conn_kref_put(src_in, "stack");
195 static void cor_parse_reset(struct cor_neighbor *nb, struct sk_buff *skb)
197 __u32 conn_id = cor_pull_u32(skb);
199 int send;
201 struct cor_conn_bidir *cnb;
203 struct cor_conn *src_in = cor_get_conn(nb, conn_id);
204 if (src_in == 0)
205 return;
207 cnb = cor_get_conn_bidir(src_in);
209 spin_lock_bh(&(cnb->cli.rcv_lock));
210 spin_lock_bh(&(cnb->srv.rcv_lock));
212 send = unlikely(cor_is_conn_in(src_in, nb, conn_id));
213 if (send && cor_get_conn_reversedir(src_in)->isreset == 0)
214 cor_get_conn_reversedir(src_in)->isreset = 1;
216 spin_unlock_bh(&(cnb->srv.rcv_lock));
217 spin_unlock_bh(&(cnb->cli.rcv_lock));
219 if (send)
220 cor_reset_conn(src_in);
222 cor_conn_kref_put(src_in, "stack");
225 static int _cor_kernel_packet_misc(struct cor_neighbor *nb,
226 struct sk_buff *skb, int *ping_rcvd,
227 __u32 *pingcookie, __u8 code_min)
229 if (code_min == KP_MISC_PADDING) {
230 } else if (code_min == KP_MISC_INIT_SESSION) {
231 /* ignore if sessionid_rcv_needed is false */
232 __be32 sessionid = cor_pull_be32(skb);
233 if (sessionid != nb->sessionid) {
234 return 1;
236 } else if (code_min == KP_MISC_PING) {
237 *ping_rcvd = 1;
238 *pingcookie = cor_pull_u32(skb);
239 } else if (code_min == KP_MISC_PONG) {
240 __u32 cookie = cor_pull_u32(skb);
241 __u32 respdelay_full = cor_pull_u32(skb);
242 cor_pull_u32(skb); /* respdelay_netonly */
243 cor_ping_resp(nb, cookie, respdelay_full);
244 } else if (code_min == KP_MISC_ACK) {
245 __u64 seqno = cor_pull_u48(skb);
246 cor_kern_ack_rcvd(nb, seqno);
247 } else if (code_min == KP_MISC_CONNECT) {
248 cor_parse_connect(nb, skb);
249 } else if (code_min == KP_MISC_CONNECT_SUCCESS) {
250 cor_parse_conn_success(nb, skb);
251 } else if (code_min == KP_MISC_RESET_CONN) {
252 cor_parse_reset(nb, skb);
253 } else if (code_min == KP_MISC_SET_MAX_CMSG_DELAY) {
254 atomic_set(&(nb->max_remote_ack_fast_delay_us),
255 cor_pull_u32(skb));
256 atomic_set(&(nb->max_remote_ack_slow_delay_us),
257 cor_pull_u32(skb));
258 atomic_set(&(nb->max_remote_ackconn_lowlat_delay_us),
259 cor_pull_u32(skb));
260 atomic_set(&(nb->max_remote_ackconn_highlat_delay_us),
261 cor_pull_u32(skb));
262 atomic_set(&(nb->max_remote_pong_delay_us), cor_pull_u32(skb));
263 } else {
264 BUG();
266 return 0;
269 static void cor_parse_ack_conn(struct cor_neighbor *nb, struct sk_buff *skb,
270 __u8 code_min, __u64 *bytes_acked)
272 __u32 conn_id = cor_pull_u32(skb);
273 __u8 delay_remaining = 0;
275 struct cor_conn *src_in = cor_get_conn(nb, conn_id);
277 if ((code_min & KP_ACK_CONN_FLAGS_SEQNO) != 0 ||
278 cor_ooolen(code_min) != 0)
279 delay_remaining = cor_pull_u8(skb);
281 if ((code_min & KP_ACK_CONN_FLAGS_SEQNO) != 0) {
282 __u64 seqno = cor_pull_u48(skb);
283 int setwindow = 0;
284 __u8 window = 0;
285 __u8 bufsize_changerate = 0;
287 if ((code_min & KP_ACK_CONN_FLAGS_WINDOW) != 0) {
288 setwindow = 1;
289 window = cor_pull_u8(skb);
290 bufsize_changerate = cor_pull_u8(skb);
293 if (likely(src_in != 0))
294 cor_conn_ack_rcvd(nb, conn_id,
295 cor_get_conn_reversedir(src_in),
296 seqno, setwindow, window,
297 bufsize_changerate, bytes_acked);
300 if (cor_ooolen(code_min) != 0) {
301 __u64 seqno_ooo = cor_pull_u48(skb);
302 __u32 ooo_len;
304 if (cor_ooolen(code_min) == 1) {
305 ooo_len = cor_pull_u8(skb);
306 } else if (cor_ooolen(code_min) == 2) {
307 ooo_len = cor_pull_u16(skb);
308 } else if (cor_ooolen(code_min) == 4) {
309 ooo_len = cor_pull_u32(skb);
310 } else {
311 BUG();
314 if (likely(src_in != 0))
315 cor_conn_ack_ooo_rcvd(nb, conn_id,
316 cor_get_conn_reversedir(src_in),
317 seqno_ooo, ooo_len, bytes_acked);
320 if (code_min & KP_ACK_CONN_FLAGS_PRIORITY) {
321 __u16 priority_raw = cor_pull_u16(skb);
322 __u8 priority_seqno = (priority_raw >> 12);
323 __u16 priority = (priority_raw & 4095);
325 if (likely(src_in != 0))
326 cor_set_conn_in_priority(nb, conn_id, src_in,
327 priority_seqno, priority);
330 if (unlikely(src_in == 0)) {
331 cor_send_reset_conn(nb, cor_get_connid_reverse(conn_id), 0);
332 return;
335 spin_lock_bh(&(src_in->rcv_lock));
337 if (unlikely(cor_is_conn_in(src_in, nb, conn_id) == 0)) {
338 cor_send_reset_conn(nb, cor_get_connid_reverse(conn_id), 0);
339 } else {
340 cor_set_last_act(src_in);
343 spin_unlock_bh(&(src_in->rcv_lock));
345 cor_conn_kref_put(src_in, "stack");
348 static int cor_parse_conndata_length(struct sk_buff *skb, __u32 *ret)
350 char *highptr = cor_pull_skb(skb, 1);
351 if (highptr == 0)
352 return 1;
354 if (cor_parse_u8(highptr) < 128) {
355 *ret = cor_parse_u8(highptr);
356 } else {
357 char *lowptr = cor_pull_skb(skb, 1);
358 if (lowptr == 0)
359 return 1;
360 *ret = 128 +
361 ((__u32) ((cor_parse_u8(highptr)-128))*256) +
362 ((__u32) cor_parse_u8(lowptr));
365 return 0;
368 static void cor_parse_conndata(struct cor_neighbor *nb, struct sk_buff *skb,
369 __u8 code_min)
371 __u8 flush = ((code_min & KP_CONN_DATA_FLAGS_FLUSH) != 0) ? 1 : 0;
372 __u8 windowused = (code_min & KP_CONN_DATA_FLAGS_WINDOWUSED);
373 __u32 conn_id = cor_pull_u32(skb);
374 __u64 seqno = cor_pull_u48(skb);
375 __u32 datalength = 0;
376 char *data;
378 if (unlikely(cor_parse_conndata_length(skb, &datalength) != 0))
379 BUG();
381 if (unlikely(datalength == 0))
382 return;
384 data = cor_pull_skb(skb, datalength);
385 BUG_ON(data == 0);
387 cor_conn_rcv(nb, 0, data, datalength, conn_id, seqno,
388 windowused, flush);
391 static int __cor_kernel_packet(struct cor_neighbor *nb, struct sk_buff *skb,
392 int *ping_rcvd, __u32 *pingcookie, __u64 *bytes_acked,
393 __u8 code)
395 __u8 code_maj = kp_maj(code);
396 __u8 code_min = kp_min(code);
398 if (code_maj == KP_MISC) {
399 return _cor_kernel_packet_misc(nb, skb, ping_rcvd,
400 pingcookie, code_min);
401 } else if (code_maj == KP_ACK_CONN) {
402 cor_parse_ack_conn(nb, skb, code_min, bytes_acked);
403 return 0;
404 } else if (code_maj == KP_CONN_DATA) {
405 cor_parse_conndata(nb, skb, code_min);
406 return 0;
407 } else {
408 BUG();
409 return 1;
413 static void _cor_kernel_packet(struct cor_neighbor *nb, struct sk_buff *skb,
414 int ackneeded, ktime_t pkg_rcv_start)
416 int ping_rcvd = 0;
417 __u32 pingcookie = 0;
418 __u64 bytes_acked = 0;
420 __u64 seqno = cor_parse_u48(cor_pull_skb(skb, 6));
422 if (unlikely(atomic_read(&(nb->sessionid_rcv_needed)) != 0)) {
423 __u8 *codeptr = cor_pull_skb(skb, 1);
424 __u8 code;
425 __be32 sessionid;
427 if (codeptr == 0)
428 return;
430 code = *codeptr;
431 if (kp_maj(code) != KP_MISC ||
432 kp_min(code) != KP_MISC_INIT_SESSION)
433 return;
435 sessionid = cor_pull_be32(skb);
436 if (sessionid == nb->sessionid) {
437 atomic_set(&(nb->sessionid_rcv_needed), 0);
438 cor_announce_send_stop(nb->dev, nb->mac,
439 ANNOUNCE_TYPE_UNICAST);
440 } else {
441 return;
445 while (1) {
446 __u8 *codeptr = cor_pull_skb(skb, 1);
447 __u8 code;
449 if (codeptr == 0)
450 break;
452 code = *codeptr;
454 if (__cor_kernel_packet(nb, skb, &ping_rcvd,
455 &pingcookie, &bytes_acked, code) != 0)
456 return;
459 if (bytes_acked > 0)
460 cor_nbcongwin_data_acked(nb, bytes_acked);
462 if (ackneeded == ACK_NEEDED_SLOW)
463 cor_send_ack(nb, seqno, 0);
464 else if (ackneeded == ACK_NEEDED_FAST)
465 cor_send_ack(nb, seqno, 1);
467 if (ping_rcvd)
468 cor_send_pong(nb, pingcookie, pkg_rcv_start);
471 static int _cor_kernel_packet_checklen_misc(struct sk_buff *skb, __u8 code_min)
473 if (code_min == KP_MISC_PADDING) {
474 } else if (code_min == KP_MISC_INIT_SESSION ||
475 code_min == KP_MISC_PING) {
476 if (cor_pull_skb(skb, 4) == 0)
477 return 1;
478 } else if (code_min == KP_MISC_PONG) {
479 if (cor_pull_skb(skb, 12) == 0)
480 return 1;
481 } else if (code_min == KP_MISC_ACK) {
482 if (cor_pull_skb(skb, 6) == 0)
483 return 1;
484 } else if (code_min == KP_MISC_CONNECT) {
485 if (cor_pull_skb(skb, 20) == 0)
486 return 1;
487 } else if (code_min == KP_MISC_CONNECT_SUCCESS) {
488 if (cor_pull_skb(skb, 5) == 0)
489 return 1;
490 } else if (code_min == KP_MISC_RESET_CONN) {
491 if (cor_pull_skb(skb, 4) == 0)
492 return 1;
493 } else if (code_min == KP_MISC_SET_MAX_CMSG_DELAY) {
494 if (cor_pull_skb(skb, 20) == 0)
495 return 1;
496 } else {
497 return 1;
499 return 0;
503 static int _cor_kernel_packet_checklen_ackconn(struct sk_buff *skb,
504 __u8 code_min)
506 if (cor_pull_skb(skb, 4 + cor_ack_conn_len(code_min)) == 0)
507 return 1;
508 return 0;
511 static int _cor_kernel_packet_checklen_conndata(struct sk_buff *skb,
512 __u8 code_min)
514 __u32 datalength;
516 if (cor_pull_skb(skb, 10) == 0)
517 return 1;
519 if (unlikely(cor_parse_conndata_length(skb, &datalength) != 0))
520 return 1;
522 if (cor_pull_skb(skb, datalength) == 0)
523 return 1;
525 return 0;
528 static int _cor_kernel_packet_checklen(struct sk_buff *skb, __u8 code)
530 __u8 code_maj = kp_maj(code);
531 __u8 code_min = kp_min(code);
533 if (code_maj == KP_MISC)
534 return _cor_kernel_packet_checklen_misc(skb, code_min);
535 else if (code_maj == KP_ACK_CONN)
536 return _cor_kernel_packet_checklen_ackconn(skb, code_min);
537 else if (code_maj == KP_CONN_DATA)
538 return _cor_kernel_packet_checklen_conndata(skb, code_min);
539 else
540 return 1;
543 static int cor_kernel_packet_checklen(struct sk_buff *skb)
545 if (cor_pull_skb(skb, 6) == 0) /* seqno */
546 return 1;
548 while (1) {
549 __u8 *codeptr = cor_pull_skb(skb, 1);
550 __u8 code;
552 if (codeptr == 0)
553 break;
554 code = *codeptr;
556 if (unlikely(_cor_kernel_packet_checklen(skb, code) != 0))
557 return 1;
559 return 0;
562 void cor_kernel_packet(struct cor_neighbor *nb, struct sk_buff *skb,
563 int ackneeded)
565 unsigned char *data = skb->data;
566 unsigned int len = skb->len;
568 ktime_t pkg_rcv_start = ktime_get();
570 if (unlikely(cor_kernel_packet_checklen(skb) != 0)) {
571 /* printk(KERN_ERR "kpacket discard"); */
572 goto discard;
575 skb->data = data;
576 skb->len = len;
578 _cor_kernel_packet(nb, skb, ackneeded, pkg_rcv_start);
579 discard:
580 kfree_skb(skb);
583 MODULE_LICENSE("GPL");