2 * Connection oriented routing
3 * Copyright (C) 2007-2021 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 #include <asm/byteorder.h>
25 static void cor_parse_connect(struct cor_neighbor
*nb
, struct sk_buff
*skb
)
27 struct cor_conn_bidir
*cnb
;
28 struct cor_conn
*src_in
;
29 struct cor_conn
*trgt_out
;
30 __u32 rcv_conn_id
= cor_pull_u32(skb
);
31 __u32 snd_conn_id
= cor_get_connid_reverse(rcv_conn_id
);
32 __u64 rcv_seqno
= cor_pull_u48(skb
);
33 __u64 snd_seqno
= cor_pull_u48(skb
);
34 __u16 window
= cor_pull_u16(skb
);
35 __u16 priority_raw
= cor_pull_u16(skb
);
36 __u8 priority_seqno
= (priority_raw
>> 12);
37 __u16 priority
= (priority_raw
& 4095);
38 __u8 is_highlatency
= cor_pull_u8(skb
);
39 struct cor_control_msg_out
*cm
= 0;
41 /* do not send reset - doing so would only corrupt things further */
42 if (unlikely((rcv_conn_id
& (1 << 31)) == 0))
45 BUG_ON((snd_conn_id
& (1 << 31)) != 0);
47 if (unlikely(snd_conn_id
== 0))
50 if (cor_is_clientmode())
53 if (cor_new_incoming_conn_allowed(nb
) == 0)
56 cm
= cor_alloc_control_msg(nb
, ACM_PRIORITY_MED
);
57 if (unlikely(cm
== 0))
60 if (is_highlatency
!= 1)
63 cnb
= cor_alloc_conn(GFP_ATOMIC
, is_highlatency
);
64 if (unlikely(cnb
== 0))
68 trgt_out
= &(cnb
->srv
);
70 spin_lock_bh(&(src_in
->rcv_lock
));
71 spin_lock_bh(&(trgt_out
->rcv_lock
));
72 if (unlikely(cor_conn_init_out(trgt_out
, nb
, rcv_conn_id
, 1) != 0)) {
74 trgt_out
->isreset
= 2;
75 spin_unlock_bh(&(trgt_out
->rcv_lock
));
76 spin_unlock_bh(&(src_in
->rcv_lock
));
77 cor_conn_kref_put(src_in
, "alloc_conn");
82 src_in
->source
.in
.established
= 1;
83 trgt_out
->target
.out
.established
= 1;
85 src_in
->source
.in
.next_seqno
= rcv_seqno
;
86 src_in
->source
.in
.window_seqnolimit
= rcv_seqno
;
87 src_in
->source
.in
.window_seqnolimit_remote
= rcv_seqno
;
89 cor_update_windowlimit(src_in
);
91 trgt_out
->target
.out
.conn_id
= snd_conn_id
;
93 trgt_out
->target
.out
.seqno_nextsend
= snd_seqno
;
94 trgt_out
->target
.out
.seqno_acked
= snd_seqno
;
95 cor_reset_seqno(trgt_out
, snd_seqno
);
97 trgt_out
->target
.out
.seqno_windowlimit
=
98 trgt_out
->target
.out
.seqno_nextsend
+
99 cor_dec_window(window
);
101 spin_unlock_bh(&(trgt_out
->rcv_lock
));
102 spin_unlock_bh(&(src_in
->rcv_lock
));
104 src_in
->source
.in
.priority_seqno
= priority_seqno
;
105 cor_set_conn_in_priority(nb
, rcv_conn_id
, src_in
, priority_seqno
,
108 cor_send_connect_success(cm
, snd_conn_id
, src_in
);
110 cor_conn_kref_put(src_in
, "alloc_conn");
113 struct cor_conn
*src_in
;
116 cor_free_control_msg(cm
);
118 src_in
= cor_get_conn(nb
, rcv_conn_id
);
120 cor_send_reset_conn(nb
, snd_conn_id
, 0);
122 cor_conn_kref_put(src_in
, "stack");
127 static void cor_parse_conn_success(struct cor_neighbor
*nb
, struct sk_buff
*skb
)
129 __u32 rcv_conn_id
= cor_pull_u32(skb
);
130 __u16 window
= cor_pull_u16(skb
);
132 struct cor_conn
*src_in
= cor_get_conn(nb
, rcv_conn_id
);
133 struct cor_conn
*trgt_out
;
135 if (unlikely(src_in
== 0))
138 if (unlikely(src_in
->is_client
))
141 trgt_out
= cor_get_conn_reversedir(src_in
);
144 spin_lock_bh(&(trgt_out
->rcv_lock
));
145 spin_lock_bh(&(src_in
->rcv_lock
));
147 if (unlikely(cor_is_conn_in(src_in
, nb
, rcv_conn_id
) == 0))
150 BUG_ON(trgt_out
->targettype
!= TARGET_OUT
);
151 BUG_ON(trgt_out
->target
.out
.nb
!= nb
);
153 if (unlikely(trgt_out
->isreset
!= 0))
156 _cor_set_last_act(trgt_out
);
158 src_in
->source
.in
.established
= 1;
160 if (likely(trgt_out
->target
.out
.established
== 0)) {
161 trgt_out
->target
.out
.established
= 1;
162 trgt_out
->target
.out
.priority_send_allowed
= 1;
163 cor_conn_refresh_priority(trgt_out
, 1);
165 trgt_out
->target
.out
.seqno_windowlimit
=
166 trgt_out
->target
.out
.seqno_nextsend
+
167 cor_dec_window(window
);
171 spin_unlock_bh(&(src_in
->rcv_lock
));
173 cor_flush_buf(trgt_out
);
175 spin_unlock_bh(&(trgt_out
->rcv_lock
));
179 cor_wake_sender(trgt_out
);
183 spin_unlock_bh(&(trgt_out
->rcv_lock
));
184 spin_unlock_bh(&(src_in
->rcv_lock
));
187 cor_send_reset_conn(nb
,
188 cor_get_connid_reverse(rcv_conn_id
), 0);
191 if (likely(src_in
!= 0))
192 cor_conn_kref_put(src_in
, "stack");
195 static void cor_parse_reset(struct cor_neighbor
*nb
, struct sk_buff
*skb
)
197 __u32 conn_id
= cor_pull_u32(skb
);
201 struct cor_conn_bidir
*cnb
;
203 struct cor_conn
*src_in
= cor_get_conn(nb
, conn_id
);
207 cnb
= cor_get_conn_bidir(src_in
);
209 spin_lock_bh(&(cnb
->cli
.rcv_lock
));
210 spin_lock_bh(&(cnb
->srv
.rcv_lock
));
212 send
= unlikely(cor_is_conn_in(src_in
, nb
, conn_id
));
213 if (send
&& cor_get_conn_reversedir(src_in
)->isreset
== 0)
214 cor_get_conn_reversedir(src_in
)->isreset
= 1;
216 spin_unlock_bh(&(cnb
->srv
.rcv_lock
));
217 spin_unlock_bh(&(cnb
->cli
.rcv_lock
));
220 cor_reset_conn(src_in
);
222 cor_conn_kref_put(src_in
, "stack");
225 static int _cor_kernel_packet_misc(struct cor_neighbor
*nb
,
226 struct sk_buff
*skb
, int *ping_rcvd
,
227 __u32
*pingcookie
, __u8 code_min
)
229 if (code_min
== KP_MISC_PADDING
) {
230 } else if (code_min
== KP_MISC_INIT_SESSION
) {
231 /* ignore if sessionid_rcv_needed is false */
232 __be32 sessionid
= cor_pull_be32(skb
);
233 if (sessionid
!= nb
->sessionid
) {
236 } else if (code_min
== KP_MISC_PING
) {
238 *pingcookie
= cor_pull_u32(skb
);
239 } else if (code_min
== KP_MISC_PONG
) {
240 __u32 cookie
= cor_pull_u32(skb
);
241 __u32 respdelay_full
= cor_pull_u32(skb
);
242 cor_pull_u32(skb
); /* respdelay_netonly */
243 cor_ping_resp(nb
, cookie
, respdelay_full
);
244 } else if (code_min
== KP_MISC_ACK
) {
245 __u64 seqno
= cor_pull_u48(skb
);
246 cor_kern_ack_rcvd(nb
, seqno
);
247 } else if (code_min
== KP_MISC_CONNECT
) {
248 cor_parse_connect(nb
, skb
);
249 } else if (code_min
== KP_MISC_CONNECT_SUCCESS
) {
250 cor_parse_conn_success(nb
, skb
);
251 } else if (code_min
== KP_MISC_RESET_CONN
) {
252 cor_parse_reset(nb
, skb
);
253 } else if (code_min
== KP_MISC_SET_MAX_CMSG_DELAY
) {
254 atomic_set(&(nb
->max_remote_ack_fast_delay_us
),
256 atomic_set(&(nb
->max_remote_ack_slow_delay_us
),
258 atomic_set(&(nb
->max_remote_ackconn_lowlat_delay_us
),
260 atomic_set(&(nb
->max_remote_ackconn_highlat_delay_us
),
262 atomic_set(&(nb
->max_remote_pong_delay_us
), cor_pull_u32(skb
));
269 static void cor_parse_ack_conn(struct cor_neighbor
*nb
, struct sk_buff
*skb
,
270 __u8 code_min
, __u64
*bytes_acked
)
272 __u32 conn_id
= cor_pull_u32(skb
);
273 __u8 delay_remaining
= 0;
275 struct cor_conn
*src_in
= cor_get_conn(nb
, conn_id
);
277 if ((code_min
& KP_ACK_CONN_FLAGS_SEQNO
) != 0 ||
278 cor_ooolen(code_min
) != 0)
279 delay_remaining
= cor_pull_u8(skb
);
281 if ((code_min
& KP_ACK_CONN_FLAGS_SEQNO
) != 0) {
282 __u64 seqno
= cor_pull_u48(skb
);
285 __u8 bufsize_changerate
= 0;
287 if ((code_min
& KP_ACK_CONN_FLAGS_WINDOW
) != 0) {
289 window
= cor_pull_u16(skb
);
290 bufsize_changerate
= cor_pull_u8(skb
);
293 if (likely(src_in
!= 0))
294 cor_conn_ack_rcvd(nb
, conn_id
,
295 cor_get_conn_reversedir(src_in
),
296 seqno
, setwindow
, window
,
297 bufsize_changerate
, bytes_acked
);
300 if (cor_ooolen(code_min
) != 0) {
301 __u64 seqno_ooo
= cor_pull_u48(skb
);
304 if (cor_ooolen(code_min
) == 1) {
305 ooo_len
= cor_pull_u8(skb
);
306 } else if (cor_ooolen(code_min
) == 2) {
307 ooo_len
= cor_pull_u16(skb
);
308 } else if (cor_ooolen(code_min
) == 4) {
309 ooo_len
= cor_pull_u32(skb
);
314 if (likely(src_in
!= 0))
315 cor_conn_ack_ooo_rcvd(nb
, conn_id
,
316 cor_get_conn_reversedir(src_in
),
317 seqno_ooo
, ooo_len
, bytes_acked
);
320 if (code_min
& KP_ACK_CONN_FLAGS_PRIORITY
) {
321 __u16 priority_raw
= cor_pull_u16(skb
);
322 __u8 priority_seqno
= (priority_raw
>> 12);
323 __u16 priority
= (priority_raw
& 4095);
325 if (likely(src_in
!= 0))
326 cor_set_conn_in_priority(nb
, conn_id
, src_in
,
327 priority_seqno
, priority
);
330 if (unlikely(src_in
== 0)) {
331 cor_send_reset_conn(nb
, cor_get_connid_reverse(conn_id
), 0);
335 cor_conn_kref_put(src_in
, "stack");
338 static int cor_parse_conndata_length(struct sk_buff
*skb
, __u32
*ret
)
340 char *highptr
= cor_pull_skb(skb
, 1);
344 if (cor_parse_u8(highptr
) < 128) {
345 *ret
= cor_parse_u8(highptr
);
347 char *lowptr
= cor_pull_skb(skb
, 1);
351 ((__u32
) ((cor_parse_u8(highptr
)-128))*256) +
352 ((__u32
) cor_parse_u8(lowptr
));
358 static void cor_parse_conndata(struct cor_neighbor
*nb
, struct sk_buff
*skb
,
361 __u8 flush
= ((code_min
& KP_CONN_DATA_FLAGS_FLUSH
) != 0) ? 1 : 0;
362 __u8 windowused
= (code_min
& KP_CONN_DATA_FLAGS_WINDOWUSED
);
363 __u32 conn_id
= cor_pull_u32(skb
);
364 __u64 seqno
= cor_pull_u48(skb
);
365 __u32 datalength
= 0;
368 if (unlikely(cor_parse_conndata_length(skb
, &datalength
) != 0))
371 if (unlikely(datalength
== 0))
374 data
= cor_pull_skb(skb
, datalength
);
377 cor_conn_rcv(nb
, 0, data
, datalength
, conn_id
, seqno
,
381 static int __cor_kernel_packet(struct cor_neighbor
*nb
, struct sk_buff
*skb
,
382 int *ping_rcvd
, __u32
*pingcookie
, __u64
*bytes_acked
,
385 __u8 code_maj
= kp_maj(code
);
386 __u8 code_min
= kp_min(code
);
388 if (code_maj
== KP_MISC
) {
389 return _cor_kernel_packet_misc(nb
, skb
, ping_rcvd
,
390 pingcookie
, code_min
);
391 } else if (code_maj
== KP_ACK_CONN
) {
392 cor_parse_ack_conn(nb
, skb
, code_min
, bytes_acked
);
394 } else if (code_maj
== KP_CONN_DATA
) {
395 cor_parse_conndata(nb
, skb
, code_min
);
403 static void _cor_kernel_packet(struct cor_neighbor
*nb
, struct sk_buff
*skb
,
404 int ackneeded
, ktime_t pkg_rcv_start
)
407 __u32 pingcookie
= 0;
408 __u64 bytes_acked
= 0;
410 __u64 seqno
= cor_parse_u48(cor_pull_skb(skb
, 6));
412 if (unlikely(atomic_read(&(nb
->sessionid_rcv_needed
)) != 0)) {
413 __u8
*codeptr
= cor_pull_skb(skb
, 1);
421 if (kp_maj(code
) != KP_MISC
||
422 kp_min(code
) != KP_MISC_INIT_SESSION
)
425 sessionid
= cor_pull_be32(skb
);
426 if (sessionid
== nb
->sessionid
) {
427 atomic_set(&(nb
->sessionid_rcv_needed
), 0);
428 cor_announce_send_stop(nb
->dev
, nb
->mac
,
429 ANNOUNCE_TYPE_UNICAST
);
436 __u8
*codeptr
= cor_pull_skb(skb
, 1);
444 if (__cor_kernel_packet(nb
, skb
, &ping_rcvd
,
445 &pingcookie
, &bytes_acked
, code
) != 0)
450 cor_nbcongwin_data_acked(nb
, bytes_acked
);
452 if (ackneeded
== ACK_NEEDED_SLOW
)
453 cor_send_ack(nb
, seqno
, 0);
454 else if (ackneeded
== ACK_NEEDED_FAST
)
455 cor_send_ack(nb
, seqno
, 1);
458 cor_send_pong(nb
, pingcookie
, pkg_rcv_start
);
461 static int _cor_kernel_packet_checklen_misc(struct sk_buff
*skb
, __u8 code_min
)
463 if (code_min
== KP_MISC_PADDING
) {
464 } else if (code_min
== KP_MISC_INIT_SESSION
||
465 code_min
== KP_MISC_PING
) {
466 if (cor_pull_skb(skb
, 4) == 0)
468 } else if (code_min
== KP_MISC_PONG
) {
469 if (cor_pull_skb(skb
, 12) == 0)
471 } else if (code_min
== KP_MISC_ACK
) {
472 if (cor_pull_skb(skb
, 6) == 0)
474 } else if (code_min
== KP_MISC_CONNECT
) {
475 if (cor_pull_skb(skb
, 21) == 0)
477 } else if (code_min
== KP_MISC_CONNECT_SUCCESS
) {
478 if (cor_pull_skb(skb
, 6) == 0)
480 } else if (code_min
== KP_MISC_RESET_CONN
) {
481 if (cor_pull_skb(skb
, 4) == 0)
483 } else if (code_min
== KP_MISC_SET_MAX_CMSG_DELAY
) {
484 if (cor_pull_skb(skb
, 20) == 0)
493 static int _cor_kernel_packet_checklen_ackconn(struct sk_buff
*skb
,
496 if (cor_pull_skb(skb
, 4 + cor_ack_conn_len(code_min
)) == 0)
501 static int _cor_kernel_packet_checklen_conndata(struct sk_buff
*skb
,
506 if (cor_pull_skb(skb
, 10) == 0)
509 if (unlikely(cor_parse_conndata_length(skb
, &datalength
) != 0))
512 if (cor_pull_skb(skb
, datalength
) == 0)
518 static int _cor_kernel_packet_checklen(struct sk_buff
*skb
, __u8 code
)
520 __u8 code_maj
= kp_maj(code
);
521 __u8 code_min
= kp_min(code
);
523 if (code_maj
== KP_MISC
)
524 return _cor_kernel_packet_checklen_misc(skb
, code_min
);
525 else if (code_maj
== KP_ACK_CONN
)
526 return _cor_kernel_packet_checklen_ackconn(skb
, code_min
);
527 else if (code_maj
== KP_CONN_DATA
)
528 return _cor_kernel_packet_checklen_conndata(skb
, code_min
);
533 static int cor_kernel_packet_checklen(struct sk_buff
*skb
)
535 if (cor_pull_skb(skb
, 6) == 0) /* seqno */
539 __u8
*codeptr
= cor_pull_skb(skb
, 1);
546 if (unlikely(_cor_kernel_packet_checklen(skb
, code
) != 0))
552 void cor_kernel_packet(struct cor_neighbor
*nb
, struct sk_buff
*skb
,
555 unsigned char *data
= skb
->data
;
556 unsigned int len
= skb
->len
;
558 ktime_t pkg_rcv_start
= ktime_get();
560 if (unlikely(cor_kernel_packet_checklen(skb
) != 0)) {
561 /* printk(KERN_ERR "kpacket discard\n"); */
568 _cor_kernel_packet(nb
, skb
, ackneeded
, pkg_rcv_start
);
573 MODULE_LICENSE("GPL");