2 * Connection oriented routing
3 * Copyright (C) 2007-2021 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 #include <asm/byteorder.h>
25 static void cor_parse_connect(struct cor_neighbor
*nb
, struct sk_buff
*skb
)
27 struct cor_conn_bidir
*cnb
;
28 struct cor_conn
*src_in
;
29 struct cor_conn
*trgt_out
;
30 __u32 rcv_conn_id
= cor_pull_u32(skb
);
31 __u32 snd_conn_id
= cor_get_connid_reverse(rcv_conn_id
);
32 __u64 rcv_seqno
= cor_pull_u48(skb
);
33 __u64 snd_seqno
= cor_pull_u48(skb
);
34 __u8 window
= cor_pull_u8(skb
);
35 __u16 priority_raw
= cor_pull_u16(skb
);
36 __u8 priority_seqno
= (priority_raw
>> 12);
37 __u16 priority
= (priority_raw
& 4095);
38 __u8 is_highlatency
= cor_pull_u8(skb
);
39 struct cor_control_msg_out
*cm
= 0;
41 /* do not send reset - doing so would only corrupt things further */
42 if (unlikely((rcv_conn_id
& (1 << 31)) == 0))
45 BUG_ON((snd_conn_id
& (1 << 31)) != 0);
47 if (unlikely(snd_conn_id
== 0))
50 if (cor_is_clientmode())
53 if (cor_newconn_checkpriority(nb
, priority
) != 0)
56 cm
= cor_alloc_control_msg(nb
, ACM_PRIORITY_MED
);
57 if (unlikely(cm
== 0))
60 if (is_highlatency
!= 1)
63 cnb
= cor_alloc_conn(GFP_ATOMIC
, is_highlatency
);
64 if (unlikely(cnb
== 0))
68 trgt_out
= &(cnb
->srv
);
70 spin_lock_bh(&(src_in
->rcv_lock
));
71 spin_lock_bh(&(trgt_out
->rcv_lock
));
72 if (unlikely(cor_conn_init_out(trgt_out
, nb
, rcv_conn_id
, 1))) {
74 trgt_out
->isreset
= 2;
75 spin_unlock_bh(&(trgt_out
->rcv_lock
));
76 spin_unlock_bh(&(src_in
->rcv_lock
));
77 cor_conn_kref_put(src_in
, "alloc_conn");
82 src_in
->source
.in
.established
= 1;
83 trgt_out
->target
.out
.established
= 1;
85 src_in
->source
.in
.next_seqno
= rcv_seqno
;
86 src_in
->source
.in
.window_seqnolimit
= rcv_seqno
;
87 src_in
->source
.in
.window_seqnolimit_remote
= rcv_seqno
;
89 cor_update_windowlimit(src_in
);
91 trgt_out
->target
.out
.conn_id
= snd_conn_id
;
93 trgt_out
->target
.out
.seqno_nextsend
= snd_seqno
;
94 trgt_out
->target
.out
.seqno_acked
= snd_seqno
;
95 cor_reset_seqno(trgt_out
, snd_seqno
);
97 trgt_out
->target
.out
.seqno_windowlimit
=
98 trgt_out
->target
.out
.seqno_nextsend
+
99 cor_dec_log_64_7(window
);
101 spin_unlock_bh(&(trgt_out
->rcv_lock
));
102 spin_unlock_bh(&(src_in
->rcv_lock
));
104 src_in
->source
.in
.priority_seqno
= priority_seqno
;
105 cor_set_conn_in_priority(nb
, rcv_conn_id
, src_in
, priority_seqno
,
108 cor_send_connect_success(cm
, snd_conn_id
, src_in
);
110 cor_conn_kref_put(src_in
, "alloc_conn");
113 struct cor_conn
*src_in
;
116 cor_free_control_msg(cm
);
118 src_in
= cor_get_conn(nb
, rcv_conn_id
);
120 cor_send_reset_conn(nb
, snd_conn_id
, 0);
122 cor_conn_kref_put(src_in
, "stack");
127 static void cor_parse_conn_success(struct cor_neighbor
*nb
, struct sk_buff
*skb
)
129 __u32 rcv_conn_id
= cor_pull_u32(skb
);
130 __u8 window
= cor_pull_u8(skb
);
132 struct cor_conn
*src_in
= cor_get_conn(nb
, rcv_conn_id
);
133 struct cor_conn
*trgt_out
;
135 if (unlikely(src_in
== 0))
138 if (unlikely(src_in
->is_client
))
141 trgt_out
= cor_get_conn_reversedir(src_in
);
144 spin_lock_bh(&(trgt_out
->rcv_lock
));
145 spin_lock_bh(&(src_in
->rcv_lock
));
147 if (unlikely(cor_is_conn_in(src_in
, nb
, rcv_conn_id
) == 0))
150 BUG_ON(trgt_out
->targettype
!= TARGET_OUT
);
151 BUG_ON(trgt_out
->target
.out
.nb
!= nb
);
153 if (unlikely(trgt_out
->isreset
!= 0))
156 cor_set_last_act(src_in
);
158 src_in
->source
.in
.established
= 1;
160 if (likely(trgt_out
->target
.out
.established
== 0)) {
161 trgt_out
->target
.out
.established
= 1;
162 trgt_out
->target
.out
.priority_send_allowed
= 1;
163 cor_conn_refresh_priority(trgt_out
, 1);
165 trgt_out
->target
.out
.seqno_windowlimit
=
166 trgt_out
->target
.out
.seqno_nextsend
+
167 cor_dec_log_64_7(window
);
171 spin_unlock_bh(&(src_in
->rcv_lock
));
173 cor_flush_buf(trgt_out
);
175 spin_unlock_bh(&(trgt_out
->rcv_lock
));
179 cor_wake_sender(trgt_out
);
183 spin_unlock_bh(&(trgt_out
->rcv_lock
));
184 spin_unlock_bh(&(src_in
->rcv_lock
));
187 cor_send_reset_conn(nb
,
188 cor_get_connid_reverse(rcv_conn_id
), 0);
191 if (likely(src_in
!= 0))
192 cor_conn_kref_put(src_in
, "stack");
195 static void cor_parse_reset(struct cor_neighbor
*nb
, struct sk_buff
*skb
)
197 __u32 conn_id
= cor_pull_u32(skb
);
201 struct cor_conn_bidir
*cnb
;
203 struct cor_conn
*src_in
= cor_get_conn(nb
, conn_id
);
207 cnb
= cor_get_conn_bidir(src_in
);
209 spin_lock_bh(&(cnb
->cli
.rcv_lock
));
210 spin_lock_bh(&(cnb
->srv
.rcv_lock
));
212 send
= unlikely(cor_is_conn_in(src_in
, nb
, conn_id
));
213 if (send
&& cor_get_conn_reversedir(src_in
)->isreset
== 0)
214 cor_get_conn_reversedir(src_in
)->isreset
= 1;
216 spin_unlock_bh(&(cnb
->srv
.rcv_lock
));
217 spin_unlock_bh(&(cnb
->cli
.rcv_lock
));
220 cor_reset_conn(src_in
);
222 cor_conn_kref_put(src_in
, "stack");
225 static int _cor_kernel_packet_misc(struct cor_neighbor
*nb
,
226 struct sk_buff
*skb
, int *ping_rcvd
,
227 __u32
*pingcookie
, __u8 code_min
)
229 if (code_min
== KP_MISC_PADDING
) {
230 } else if (code_min
== KP_MISC_INIT_SESSION
) {
231 /* ignore if sessionid_rcv_needed is false */
232 __be32 sessionid
= cor_pull_be32(skb
);
233 if (sessionid
!= nb
->sessionid
) {
236 } else if (code_min
== KP_MISC_PING
) {
238 *pingcookie
= cor_pull_u32(skb
);
239 } else if (code_min
== KP_MISC_PONG
) {
240 __u32 cookie
= cor_pull_u32(skb
);
241 __u32 respdelay_full
= cor_pull_u32(skb
);
242 cor_pull_u32(skb
); /* respdelay_netonly */
243 cor_ping_resp(nb
, cookie
, respdelay_full
);
244 } else if (code_min
== KP_MISC_ACK
) {
245 __u64 seqno
= cor_pull_u48(skb
);
246 cor_kern_ack_rcvd(nb
, seqno
);
247 } else if (code_min
== KP_MISC_CONNECT
) {
248 cor_parse_connect(nb
, skb
);
249 } else if (code_min
== KP_MISC_CONNECT_SUCCESS
) {
250 cor_parse_conn_success(nb
, skb
);
251 } else if (code_min
== KP_MISC_RESET_CONN
) {
252 cor_parse_reset(nb
, skb
);
253 } else if (code_min
== KP_MISC_SET_MAX_CMSG_DELAY
) {
254 atomic_set(&(nb
->max_remote_ack_fast_delay_us
),
256 atomic_set(&(nb
->max_remote_ack_slow_delay_us
),
258 atomic_set(&(nb
->max_remote_ackconn_lowlat_delay_us
),
260 atomic_set(&(nb
->max_remote_ackconn_highlat_delay_us
),
262 atomic_set(&(nb
->max_remote_pong_delay_us
), cor_pull_u32(skb
));
269 static void cor_parse_ack_conn(struct cor_neighbor
*nb
, struct sk_buff
*skb
,
270 __u8 code_min
, __u64
*bytes_acked
)
272 __u32 conn_id
= cor_pull_u32(skb
);
273 __u8 delay_remaining
= 0;
275 struct cor_conn
*src_in
= cor_get_conn(nb
, conn_id
);
277 if ((code_min
& KP_ACK_CONN_FLAGS_SEQNO
) != 0 ||
278 cor_ooolen(code_min
) != 0)
279 delay_remaining
= cor_pull_u8(skb
);
281 if ((code_min
& KP_ACK_CONN_FLAGS_SEQNO
) != 0) {
282 __u64 seqno
= cor_pull_u48(skb
);
285 __u8 bufsize_changerate
= 0;
287 if ((code_min
& KP_ACK_CONN_FLAGS_WINDOW
) != 0) {
289 window
= cor_pull_u8(skb
);
290 bufsize_changerate
= cor_pull_u8(skb
);
293 if (likely(src_in
!= 0))
294 cor_conn_ack_rcvd(nb
, conn_id
,
295 cor_get_conn_reversedir(src_in
),
296 seqno
, setwindow
, window
,
297 bufsize_changerate
, bytes_acked
);
300 if (cor_ooolen(code_min
) != 0) {
301 __u64 seqno_ooo
= cor_pull_u48(skb
);
304 if (cor_ooolen(code_min
) == 1) {
305 ooo_len
= cor_pull_u8(skb
);
306 } else if (cor_ooolen(code_min
) == 2) {
307 ooo_len
= cor_pull_u16(skb
);
308 } else if (cor_ooolen(code_min
) == 4) {
309 ooo_len
= cor_pull_u32(skb
);
314 if (likely(src_in
!= 0))
315 cor_conn_ack_ooo_rcvd(nb
, conn_id
,
316 cor_get_conn_reversedir(src_in
),
317 seqno_ooo
, ooo_len
, bytes_acked
);
320 if (code_min
& KP_ACK_CONN_FLAGS_PRIORITY
) {
321 __u16 priority_raw
= cor_pull_u16(skb
);
322 __u8 priority_seqno
= (priority_raw
>> 12);
323 __u16 priority
= (priority_raw
& 4095);
325 if (likely(src_in
!= 0))
326 cor_set_conn_in_priority(nb
, conn_id
, src_in
,
327 priority_seqno
, priority
);
330 if (unlikely(src_in
== 0)) {
331 cor_send_reset_conn(nb
, cor_get_connid_reverse(conn_id
), 0);
335 spin_lock_bh(&(src_in
->rcv_lock
));
337 if (unlikely(cor_is_conn_in(src_in
, nb
, conn_id
) == 0)) {
338 cor_send_reset_conn(nb
, cor_get_connid_reverse(conn_id
), 0);
340 cor_set_last_act(src_in
);
343 spin_unlock_bh(&(src_in
->rcv_lock
));
345 cor_conn_kref_put(src_in
, "stack");
348 static int cor_parse_conndata_length(struct sk_buff
*skb
, __u32
*ret
)
350 char *highptr
= cor_pull_skb(skb
, 1);
354 if (cor_parse_u8(highptr
) < 128) {
355 *ret
= cor_parse_u8(highptr
);
357 char *lowptr
= cor_pull_skb(skb
, 1);
361 ((__u32
) ((cor_parse_u8(highptr
)-128))*256) +
362 ((__u32
) cor_parse_u8(lowptr
));
368 static void cor_parse_conndata(struct cor_neighbor
*nb
, struct sk_buff
*skb
,
371 __u8 flush
= ((code_min
& KP_CONN_DATA_FLAGS_FLUSH
) != 0) ? 1 : 0;
372 __u8 windowused
= (code_min
& KP_CONN_DATA_FLAGS_WINDOWUSED
);
373 __u32 conn_id
= cor_pull_u32(skb
);
374 __u64 seqno
= cor_pull_u48(skb
);
375 __u32 datalength
= 0;
378 if (unlikely(cor_parse_conndata_length(skb
, &datalength
) != 0))
381 if (unlikely(datalength
== 0))
384 data
= cor_pull_skb(skb
, datalength
);
387 cor_conn_rcv(nb
, 0, data
, datalength
, conn_id
, seqno
,
391 static int __cor_kernel_packet(struct cor_neighbor
*nb
, struct sk_buff
*skb
,
392 int *ping_rcvd
, __u32
*pingcookie
, __u64
*bytes_acked
,
395 __u8 code_maj
= kp_maj(code
);
396 __u8 code_min
= kp_min(code
);
398 if (code_maj
== KP_MISC
) {
399 return _cor_kernel_packet_misc(nb
, skb
, ping_rcvd
,
400 pingcookie
, code_min
);
401 } else if (code_maj
== KP_ACK_CONN
) {
402 cor_parse_ack_conn(nb
, skb
, code_min
, bytes_acked
);
404 } else if (code_maj
== KP_CONN_DATA
) {
405 cor_parse_conndata(nb
, skb
, code_min
);
413 static void _cor_kernel_packet(struct cor_neighbor
*nb
, struct sk_buff
*skb
,
414 int ackneeded
, ktime_t pkg_rcv_start
)
417 __u32 pingcookie
= 0;
418 __u64 bytes_acked
= 0;
420 __u64 seqno
= cor_parse_u48(cor_pull_skb(skb
, 6));
422 if (unlikely(atomic_read(&(nb
->sessionid_rcv_needed
)) != 0)) {
423 __u8
*codeptr
= cor_pull_skb(skb
, 1);
431 if (kp_maj(code
) != KP_MISC
||
432 kp_min(code
) != KP_MISC_INIT_SESSION
)
435 sessionid
= cor_pull_be32(skb
);
436 if (sessionid
== nb
->sessionid
) {
437 atomic_set(&(nb
->sessionid_rcv_needed
), 0);
438 cor_announce_send_stop(nb
->dev
, nb
->mac
,
439 ANNOUNCE_TYPE_UNICAST
);
446 __u8
*codeptr
= cor_pull_skb(skb
, 1);
454 if (__cor_kernel_packet(nb
, skb
, &ping_rcvd
,
455 &pingcookie
, &bytes_acked
, code
) != 0)
460 cor_nbcongwin_data_acked(nb
, bytes_acked
);
462 if (ackneeded
== ACK_NEEDED_SLOW
)
463 cor_send_ack(nb
, seqno
, 0);
464 else if (ackneeded
== ACK_NEEDED_FAST
)
465 cor_send_ack(nb
, seqno
, 1);
468 cor_send_pong(nb
, pingcookie
, pkg_rcv_start
);
471 static int _cor_kernel_packet_checklen_misc(struct sk_buff
*skb
, __u8 code_min
)
473 if (code_min
== KP_MISC_PADDING
) {
474 } else if (code_min
== KP_MISC_INIT_SESSION
||
475 code_min
== KP_MISC_PING
) {
476 if (cor_pull_skb(skb
, 4) == 0)
478 } else if (code_min
== KP_MISC_PONG
) {
479 if (cor_pull_skb(skb
, 12) == 0)
481 } else if (code_min
== KP_MISC_ACK
) {
482 if (cor_pull_skb(skb
, 6) == 0)
484 } else if (code_min
== KP_MISC_CONNECT
) {
485 if (cor_pull_skb(skb
, 20) == 0)
487 } else if (code_min
== KP_MISC_CONNECT_SUCCESS
) {
488 if (cor_pull_skb(skb
, 5) == 0)
490 } else if (code_min
== KP_MISC_RESET_CONN
) {
491 if (cor_pull_skb(skb
, 4) == 0)
493 } else if (code_min
== KP_MISC_SET_MAX_CMSG_DELAY
) {
494 if (cor_pull_skb(skb
, 20) == 0)
503 static int _cor_kernel_packet_checklen_ackconn(struct sk_buff
*skb
,
506 if (cor_pull_skb(skb
, 4 + cor_ack_conn_len(code_min
)) == 0)
511 static int _cor_kernel_packet_checklen_conndata(struct sk_buff
*skb
,
516 if (cor_pull_skb(skb
, 10) == 0)
519 if (unlikely(cor_parse_conndata_length(skb
, &datalength
) != 0))
522 if (cor_pull_skb(skb
, datalength
) == 0)
528 static int _cor_kernel_packet_checklen(struct sk_buff
*skb
, __u8 code
)
530 __u8 code_maj
= kp_maj(code
);
531 __u8 code_min
= kp_min(code
);
533 if (code_maj
== KP_MISC
)
534 return _cor_kernel_packet_checklen_misc(skb
, code_min
);
535 else if (code_maj
== KP_ACK_CONN
)
536 return _cor_kernel_packet_checklen_ackconn(skb
, code_min
);
537 else if (code_maj
== KP_CONN_DATA
)
538 return _cor_kernel_packet_checklen_conndata(skb
, code_min
);
543 static int cor_kernel_packet_checklen(struct sk_buff
*skb
)
545 if (cor_pull_skb(skb
, 6) == 0) /* seqno */
549 __u8
*codeptr
= cor_pull_skb(skb
, 1);
556 if (unlikely(_cor_kernel_packet_checklen(skb
, code
) != 0))
562 void cor_kernel_packet(struct cor_neighbor
*nb
, struct sk_buff
*skb
,
565 unsigned char *data
= skb
->data
;
566 unsigned int len
= skb
->len
;
568 ktime_t pkg_rcv_start
= ktime_get();
570 if (unlikely(cor_kernel_packet_checklen(skb
) != 0)) {
571 /* printk(KERN_ERR "kpacket discard"); */
578 _cor_kernel_packet(nb
, skb
, ackneeded
, pkg_rcv_start
);
583 MODULE_LICENSE("GPL");