shrink window size field
[cor.git] / net / cor / neigh_rcv.c
blob53e039ff626e23968d5cf2863263cf4c6933f9ba
1 /**
2 * Connection oriented routing
3 * Copyright (C) 2007-2021 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
16 #include <asm/byteorder.h>
18 #include "cor.h"
20 static void cor_parse_connect(struct cor_neighbor *nb, struct sk_buff *skb)
22 struct cor_conn_bidir *cnb;
23 struct cor_conn *src_in;
24 struct cor_conn *trgt_out;
25 __u32 rcv_conn_id = cor_pull_u32(skb);
26 __u32 snd_conn_id = cor_get_connid_reverse(rcv_conn_id);
27 __u32 rcv_seqno = cor_pull_u32(skb);
28 __u32 snd_seqno = cor_pull_u32(skb);
29 __u8 window = cor_pull_u8(skb);
30 __u16 priority_raw = cor_pull_u16(skb);
31 __u8 priority_seqno = (priority_raw >> 12);
32 __u16 priority = (priority_raw & 4095);
33 __u8 is_highlatency = cor_pull_u8(skb);
34 struct cor_control_msg_out *cm = 0;
36 /* do not send reset - doing so would only corrupt things further */
37 if (unlikely((rcv_conn_id & (1 << 31)) == 0))
38 return;
40 BUG_ON((snd_conn_id & (1 << 31)) != 0);
42 if (unlikely(snd_conn_id == 0))
43 return;
45 if (cor_is_clientmode())
46 goto err;
48 if (cor_new_incoming_conn_allowed(nb) == 0)
49 goto err;
51 cm = cor_alloc_control_msg(nb, ACM_PRIORITY_MED);
52 if (unlikely(cm == 0))
53 goto err;
55 if (is_highlatency != 1)
56 is_highlatency = 0;
58 cnb = cor_alloc_conn(GFP_ATOMIC, is_highlatency);
59 if (unlikely(cnb == 0))
60 goto err;
62 src_in = &cnb->cli;
63 trgt_out = &cnb->srv;
65 spin_lock_bh(&src_in->rcv_lock);
66 spin_lock_bh(&trgt_out->rcv_lock);
67 if (unlikely(cor_conn_init_out(trgt_out, nb, rcv_conn_id, 1) != 0)) {
68 src_in->isreset = 2;
69 trgt_out->isreset = 2;
70 spin_unlock_bh(&trgt_out->rcv_lock);
71 spin_unlock_bh(&src_in->rcv_lock);
72 cor_conn_kref_put(src_in, "alloc_conn");
73 src_in = 0;
74 goto err;
77 src_in->src.in.established = 1;
78 trgt_out->trgt.out.established = 1;
80 src_in->src.in.next_seqno = rcv_seqno;
81 src_in->src.in.window_seqnolimit = rcv_seqno;
82 src_in->src.in.window_seqnolimit_remote = rcv_seqno;
84 cor_update_windowlimit(src_in);
86 trgt_out->trgt.out.conn_id = snd_conn_id;
88 trgt_out->trgt.out.seqno_nextsend = snd_seqno;
89 trgt_out->trgt.out.seqno_acked = snd_seqno;
90 cor_reset_seqno(trgt_out, snd_seqno);
92 trgt_out->trgt.out.seqno_windowlimit =
93 trgt_out->trgt.out.seqno_nextsend +
94 cor_dec_log_64_11(window);
96 spin_unlock_bh(&trgt_out->rcv_lock);
97 spin_unlock_bh(&src_in->rcv_lock);
99 src_in->src.in.priority_seqno = priority_seqno;
100 cor_set_conn_in_priority(nb, rcv_conn_id, src_in, priority_seqno,
101 priority, is_highlatency);
103 cor_send_connect_success(cm, snd_conn_id, src_in);
105 cor_conn_kref_put(src_in, "alloc_conn");
107 if (0) {
108 struct cor_conn *src_in;
109 err:
110 if (cm != 0)
111 cor_free_control_msg(cm);
113 src_in = cor_get_conn(nb, rcv_conn_id);
114 if (src_in == 0) {
115 cor_send_reset_conn(nb, snd_conn_id, 0);
116 } else {
117 cor_conn_kref_put(src_in, "stack");
122 static void cor_parse_conn_success(struct cor_neighbor *nb, struct sk_buff *skb)
124 __u32 rcv_conn_id = cor_pull_u32(skb);
125 __u8 window = cor_pull_u8(skb);
127 struct cor_conn *src_in = cor_get_conn(nb, rcv_conn_id);
128 struct cor_conn *trgt_out;
130 if (unlikely(src_in == 0))
131 goto err;
133 if (unlikely(src_in->is_client))
134 goto err;
136 trgt_out = cor_get_conn_reversedir(src_in);
139 spin_lock_bh(&trgt_out->rcv_lock);
140 spin_lock_bh(&src_in->rcv_lock);
142 if (unlikely(cor_is_conn_in(src_in, nb, rcv_conn_id) == 0))
143 goto err_unlock;
145 BUG_ON(trgt_out->targettype != TARGET_OUT);
146 BUG_ON(trgt_out->trgt.out.nb != nb);
148 if (unlikely(trgt_out->isreset != 0))
149 goto err_unlock;
151 cor_conn_set_last_act(trgt_out);
153 src_in->src.in.established = 1;
155 if (likely(trgt_out->trgt.out.established == 0)) {
156 trgt_out->trgt.out.established = 1;
157 trgt_out->trgt.out.priority_send_allowed = 1;
158 cor_conn_refresh_priority(trgt_out, 1);
160 trgt_out->trgt.out.seqno_windowlimit =
161 trgt_out->trgt.out.seqno_nextsend +
162 cor_dec_log_64_11(window);
165 spin_unlock_bh(&src_in->rcv_lock);
167 cor_flush_buf(trgt_out);
169 spin_unlock_bh(&trgt_out->rcv_lock);
172 cor_wake_sender(trgt_out);
174 if (0) {
175 err_unlock:
176 spin_unlock_bh(&trgt_out->rcv_lock);
177 spin_unlock_bh(&src_in->rcv_lock);
178 err:
179 if (src_in == 0)
180 cor_send_reset_conn(nb,
181 cor_get_connid_reverse(rcv_conn_id), 0);
184 if (likely(src_in != 0))
185 cor_conn_kref_put(src_in, "stack");
188 static void cor_parse_reset(struct cor_neighbor *nb, struct sk_buff *skb)
190 __u32 conn_id = cor_pull_u32(skb);
192 int send;
194 struct cor_conn_bidir *cnb;
196 struct cor_conn *src_in = cor_get_conn(nb, conn_id);
198 if (src_in == 0)
199 return;
201 cnb = cor_get_conn_bidir(src_in);
203 spin_lock_bh(&cnb->cli.rcv_lock);
204 spin_lock_bh(&cnb->srv.rcv_lock);
206 send = unlikely(cor_is_conn_in(src_in, nb, conn_id));
207 if (send && cor_get_conn_reversedir(src_in)->isreset == 0)
208 cor_get_conn_reversedir(src_in)->isreset = 1;
210 spin_unlock_bh(&cnb->srv.rcv_lock);
211 spin_unlock_bh(&cnb->cli.rcv_lock);
213 if (send)
214 cor_reset_conn(src_in);
216 cor_conn_kref_put(src_in, "stack");
219 static int _cor_kernel_packet_misc(struct cor_neighbor *nb,
220 struct sk_buff *skb, int *ping_rcvd,
221 __u32 *pingcookie, __u8 code_min)
223 if (code_min == KP_MISC_PADDING) {
224 } else if (code_min == KP_MISC_INIT_SESSION) {
225 /* ignore if sessionid_rcv_needed is false */
226 __be32 sessionid = cor_pull_be32(skb);
228 if (sessionid != nb->sessionid) {
229 return 1;
231 } else if (code_min == KP_MISC_PING) {
232 *ping_rcvd = 1;
233 *pingcookie = cor_pull_u32(skb);
234 } else if (code_min == KP_MISC_PONG) {
235 __u32 cookie = cor_pull_u32(skb);
236 __u32 respdelay_full = cor_pull_u32(skb);
238 cor_pull_u32(skb); /* respdelay_netonly */
239 cor_ping_resp(nb, cookie, respdelay_full);
240 } else if (code_min == KP_MISC_ACK) {
241 __u32 seqno = cor_pull_u32(skb);
243 cor_kern_ack_rcvd(nb, seqno);
244 } else if (code_min == KP_MISC_CONNECT) {
245 cor_parse_connect(nb, skb);
246 } else if (code_min == KP_MISC_CONNECT_SUCCESS) {
247 cor_parse_conn_success(nb, skb);
248 } else if (code_min == KP_MISC_RESET_CONN) {
249 cor_parse_reset(nb, skb);
250 } else if (code_min == KP_MISC_SET_MAX_CMSG_DELAY) {
251 atomic_set(&nb->max_remote_ack_fast_delay_us,
252 cor_pull_u32(skb));
253 atomic_set(&nb->max_remote_ack_slow_delay_us,
254 cor_pull_u32(skb));
255 atomic_set(&nb->max_remote_ackconn_delay_us, cor_pull_u32(skb));
256 atomic_set(&nb->max_remote_pong_delay_us, cor_pull_u32(skb));
257 } else if (code_min == KP_MISC_SET_RECEIVE_MTU) {
258 atomic_set(&nb->remote_rcvmtu, cor_pull_u32(skb));
259 } else {
260 BUG();
262 return 0;
265 static void cor_parse_ack_conn(struct cor_neighbor *nb, struct sk_buff *skb,
266 __u8 code_min, __u64 *bytes_acked)
268 __u32 conn_id = cor_pull_u32(skb);
269 __u8 delay_remaining = 0;
271 struct cor_conn *src_in = cor_get_conn(nb, conn_id);
273 if ((code_min & KP_ACK_CONN_FLAGS_SEQNO) != 0 ||
274 cor_ooolen(code_min) != 0)
275 delay_remaining = cor_pull_u8(skb);
277 if ((code_min & KP_ACK_CONN_FLAGS_SEQNO) != 0) {
278 __u32 seqno = cor_pull_u32(skb);
279 int setwindow = 0;
280 __u8 window = 0;
281 __u8 bufsize_changerate = 0;
283 if ((code_min & KP_ACK_CONN_FLAGS_WINDOW) != 0) {
284 setwindow = 1;
285 window = cor_pull_u8(skb);
286 bufsize_changerate = cor_pull_u8(skb);
289 if (likely(src_in != 0))
290 cor_conn_ack_rcvd(nb, conn_id,
291 cor_get_conn_reversedir(src_in),
292 seqno, setwindow, window,
293 bufsize_changerate, bytes_acked);
296 if (cor_ooolen(code_min) != 0) {
297 __u32 seqno_ooo = cor_pull_u32(skb);
298 __u32 ooo_len;
300 if (cor_ooolen(code_min) == 1) {
301 ooo_len = cor_pull_u8(skb);
302 } else if (cor_ooolen(code_min) == 2) {
303 ooo_len = cor_pull_u16(skb);
304 } else if (cor_ooolen(code_min) == 4) {
305 ooo_len = cor_pull_u32(skb);
306 } else {
307 BUG();
310 if (likely(src_in != 0))
311 cor_conn_ack_ooo_rcvd(nb, conn_id,
312 cor_get_conn_reversedir(src_in),
313 seqno_ooo, ooo_len, bytes_acked);
316 if (code_min & KP_ACK_CONN_FLAGS_PRIORITY) {
317 __u16 priority_raw = cor_pull_u16(skb);
318 __u8 priority_seqno = (priority_raw >> 12);
319 __u16 priority = (priority_raw & 4095);
320 __u8 is_highlatency = cor_pull_u8(skb);
322 if (likely(src_in != 0))
323 cor_set_conn_in_priority(nb, conn_id, src_in,
324 priority_seqno, priority,
325 is_highlatency);
328 if (unlikely(src_in == 0)) {
329 cor_send_reset_conn(nb, cor_get_connid_reverse(conn_id), 0);
330 return;
333 cor_conn_kref_put(src_in, "stack");
336 static int cor_parse_conndata_length(struct sk_buff *skb, __u32 *ret)
338 char *highptr = cor_pull_skb(skb, 1);
339 __u8 high;
341 if (highptr == 0)
342 return 1;
344 high = cor_parse_u8(highptr);
346 if (high < 128) {
347 *ret = cor_parse_u8(highptr);
348 } else {
349 char *lowptr = cor_pull_skb(skb, 1);
351 if (lowptr == 0)
352 return 1;
353 *ret = 128 +
354 ((__u32) (high - 128)) * 256 +
355 ((__u32) cor_parse_u8(lowptr));
358 return 0;
361 static void cor_parse_conndata(struct cor_neighbor *nb, struct sk_buff *skb,
362 __u8 code_min)
364 __u8 flush = ((code_min & KP_CONN_DATA_FLAGS_FLUSH) != 0) ? 1 : 0;
365 __u8 windowused = (code_min & KP_CONN_DATA_FLAGS_WINDOWUSED);
366 __u32 conn_id = cor_pull_u32(skb);
367 __u32 seqno = cor_pull_u32(skb);
368 __u32 datalength = 0;
369 char *data;
371 if (unlikely(cor_parse_conndata_length(skb, &datalength) != 0))
372 BUG();
374 if (unlikely(datalength == 0))
375 return;
377 data = cor_pull_skb(skb, datalength);
378 BUG_ON(data == 0);
380 cor_conn_rcv(nb, 0, data, datalength, conn_id, seqno,
381 windowused, flush);
384 static int __cor_kernel_packet(struct cor_neighbor *nb, struct sk_buff *skb,
385 int *ping_rcvd, __u32 *pingcookie, __u64 *bytes_acked,
386 __u8 code)
388 __u8 code_maj = kp_maj(code);
389 __u8 code_min = kp_min(code);
391 if (code_maj == KP_MISC) {
392 return _cor_kernel_packet_misc(nb, skb, ping_rcvd,
393 pingcookie, code_min);
394 } else if (code_maj == KP_ACK_CONN) {
395 cor_parse_ack_conn(nb, skb, code_min, bytes_acked);
396 return 0;
397 } else if (code_maj == KP_CONN_DATA) {
398 cor_parse_conndata(nb, skb, code_min);
399 return 0;
400 } else {
401 BUG();
402 return 1;
406 static void _cor_kernel_packet(struct cor_neighbor *nb, struct sk_buff *skb,
407 int ackneeded, ktime_t pkg_rcv_start)
409 int ping_rcvd = 0;
410 __u32 pingcookie = 0;
411 __u64 bytes_acked = 0;
413 __u32 seqno = cor_pull_u32(skb);
415 if (unlikely(atomic_read(&nb->sessionid_rcv_needed) != 0)) {
416 __u8 *codeptr = cor_pull_skb(skb, 1);
417 __u8 code;
418 __be32 sessionid;
420 if (codeptr == 0)
421 return;
423 code = *codeptr;
424 if (kp_maj(code) != KP_MISC ||
425 kp_min(code) != KP_MISC_INIT_SESSION)
426 return;
428 sessionid = cor_pull_be32(skb);
429 if (sessionid == nb->sessionid) {
430 atomic_set(&nb->sessionid_rcv_needed, 0);
431 cor_announce_send_stop(nb->dev, nb->mac,
432 ANNOUNCE_TYPE_UNICAST);
433 } else {
434 return;
438 while (1) {
439 __u8 *codeptr = cor_pull_skb(skb, 1);
440 __u8 code;
442 if (codeptr == 0)
443 break;
445 code = *codeptr;
447 if (__cor_kernel_packet(nb, skb, &ping_rcvd,
448 &pingcookie, &bytes_acked, code) != 0)
449 return;
452 if (bytes_acked > 0)
453 cor_nbcongwin_data_acked(nb, bytes_acked);
455 if (ackneeded == ACK_NEEDED_SLOW)
456 cor_send_ack(nb, seqno, 0);
457 else if (ackneeded == ACK_NEEDED_FAST)
458 cor_send_ack(nb, seqno, 1);
460 /* do this at the end to include packet processing time */
461 if (ping_rcvd)
462 cor_send_pong(nb, pingcookie, pkg_rcv_start);
465 static int _cor_kernel_packet_checklen_misc(struct sk_buff *skb, __u8 code_min)
467 if (code_min == KP_MISC_PADDING) {
468 } else if (code_min == KP_MISC_INIT_SESSION ||
469 code_min == KP_MISC_PING) {
470 if (cor_pull_skb(skb, 4) == 0)
471 return 1;
472 } else if (code_min == KP_MISC_PONG) {
473 if (cor_pull_skb(skb, 12) == 0)
474 return 1;
475 } else if (code_min == KP_MISC_ACK) {
476 if (cor_pull_skb(skb, 4) == 0)
477 return 1;
478 } else if (code_min == KP_MISC_CONNECT) {
479 if (cor_pull_skb(skb, 16) == 0)
480 return 1;
481 } else if (code_min == KP_MISC_CONNECT_SUCCESS) {
482 if (cor_pull_skb(skb, 5) == 0)
483 return 1;
484 } else if (code_min == KP_MISC_RESET_CONN) {
485 if (cor_pull_skb(skb, 4) == 0)
486 return 1;
487 } else if (code_min == KP_MISC_SET_MAX_CMSG_DELAY) {
488 if (cor_pull_skb(skb, 16) == 0)
489 return 1;
490 } else if (code_min == KP_MISC_SET_RECEIVE_MTU) {
491 if (cor_pull_skb(skb, 4) == 0)
492 return 1;
493 } else {
494 return 1;
496 return 0;
500 static int _cor_kernel_packet_checklen_ackconn(struct sk_buff *skb,
501 __u8 code_min)
503 if (cor_pull_skb(skb, 4 + cor_ack_conn_len(code_min)) == 0)
504 return 1;
505 return 0;
508 static int _cor_kernel_packet_checklen_conndata(struct sk_buff *skb,
509 __u8 code_min)
511 __u32 datalength;
513 if (cor_pull_skb(skb, 8) == 0)
514 return 1;
516 if (unlikely(cor_parse_conndata_length(skb, &datalength) != 0))
517 return 1;
519 if (cor_pull_skb(skb, datalength) == 0)
520 return 1;
522 return 0;
525 static int _cor_kernel_packet_checklen(struct sk_buff *skb, __u8 code)
527 __u8 code_maj = kp_maj(code);
528 __u8 code_min = kp_min(code);
530 if (code_maj == KP_MISC)
531 return _cor_kernel_packet_checklen_misc(skb, code_min);
532 else if (code_maj == KP_ACK_CONN)
533 return _cor_kernel_packet_checklen_ackconn(skb, code_min);
534 else if (code_maj == KP_CONN_DATA)
535 return _cor_kernel_packet_checklen_conndata(skb, code_min);
536 else
537 return 1;
540 static int cor_kernel_packet_checklen(struct sk_buff *skb)
542 if (cor_pull_skb(skb, 4) == 0) /* seqno */
543 return 1;
545 while (1) {
546 __u8 *codeptr = cor_pull_skb(skb, 1);
547 __u8 code;
549 if (codeptr == 0)
550 break;
551 code = *codeptr;
553 if (unlikely(_cor_kernel_packet_checklen(skb, code) != 0))
554 return 1;
556 return 0;
559 void cor_kernel_packet(struct cor_neighbor *nb, struct sk_buff *skb,
560 int ackneeded)
562 unsigned char *data = skb->data;
563 unsigned int len = skb->len;
565 ktime_t pkg_rcv_start = ktime_get();
567 if (unlikely(cor_kernel_packet_checklen(skb) != 0)) {
568 /* printk(KERN_ERR "kpacket discard\n"); */
569 goto discard;
572 skb->data = data;
573 skb->len = len;
575 _cor_kernel_packet(nb, skb, ackneeded, pkg_rcv_start);
576 discard:
577 kfree_skb(skb);
580 MODULE_LICENSE("GPL");