2 * Connection oriented routing
3 * Copyright (C) 2007-2021 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 #include <linux/version.h>
22 #include <linux/kernel.h>
23 #include <linux/init.h>
29 static struct kmem_cache
*cor_rcvooo_buf_slab
;
31 void cor_reset_ooo_queue(struct cor_conn
*src_in_lx
)
33 BUG_ON(src_in_lx
->sourcetype
!= SOURCE_IN
);
35 while (list_empty(&(src_in_lx
->source
.in
.reorder_queue
)) == 0) {
36 struct cor_rcvooo
*r
= container_of(
37 src_in_lx
->source
.in
.reorder_queue
.next
,
38 struct cor_rcvooo
, lh
);
42 if (r
->type
== RCVOOO_BUF
) {
43 struct cor_rcvooo_buf
*rb
= container_of(r
,
44 struct cor_rcvooo_buf
, r
);
45 src_in_lx
->source
.in
.reorder_memused
-= (rb
->len
+
46 sizeof(struct cor_rcvooo_buf
));
48 kmem_cache_free(cor_rcvooo_buf_slab
, rb
);
49 } else if (r
->type
== RCVOOO_SKB
) {
50 struct cor_skb_procstate
*ps
= container_of(r
,
51 struct cor_skb_procstate
,
53 struct sk_buff
*skb
= cor_skb_from_pstate(ps
);
54 src_in_lx
->source
.in
.reorder_memused
-=
55 ps
->funcstate
.rcv_ooo
.skb_memused
;
62 src_in_lx
->source
.in
.small_ooo_packets
= 0;
63 BUG_ON(src_in_lx
->source
.in
.reorder_memused
!= 0);
65 cor_account_bufspace(src_in_lx
);
68 static int cor_drain_ooo_queue_buf(struct cor_conn
*src_in_l
,
69 struct cor_rcvooo
*r
, __u8 flush
)
71 struct cor_rcvooo_buf
*rb
= container_of(r
, struct cor_rcvooo_buf
, r
);
73 __u32 data_offset
= 0;
76 if (unlikely(cor_seqno_after(src_in_l
->source
.in
.next_seqno
,
78 __u64 overlap
= cor_seqno_clean(r
->seqno
-
79 src_in_l
->source
.in
.next_seqno
);
81 if (overlap
>= rb
->len
)
84 src_in_l
->source
.in
.reorder_memused
-= overlap
;
86 data_offset
+= overlap
;
90 BUG_ON(cor_seqno_eq(src_in_l
->source
.in
.next_seqno
, r
->seqno
) == 0);
91 rc
= cor_receive_buf(src_in_l
, rb
->data
+ data_offset
, rb
->len
,
92 r
->windowused
, flush
);
95 src_in_l
->source
.in
.next_seqno
+= rc
;
97 if (unlikely(rc
!= rb
->len
)) {
98 src_in_l
->source
.in
.reorder_memused
-= rc
;
103 memmove(rb
->data
, rb
->data
+ data_offset
, rb
->len
);
105 if (rb
->len
+ data_offset
> SMALL_OOO_PACKET_MAXSIZE
&&
106 rb
->len
<= SMALL_OOO_PACKET_MAXSIZE
) {
107 src_in_l
->source
.in
.small_ooo_packets
++;
108 BUG_ON(src_in_l
->source
.in
.small_ooo_packets
== 0);
114 src_in_l
->source
.in
.reorder_memused
-= (rb
->len
+
115 sizeof(struct cor_rcvooo_buf
));
118 if (rb
->len
<= SMALL_OOO_PACKET_MAXSIZE
) {
119 BUG_ON(src_in_l
->source
.in
.small_ooo_packets
== 0);
120 src_in_l
->source
.in
.small_ooo_packets
--;
122 kmem_cache_free(cor_rcvooo_buf_slab
, rb
);
128 static int cor_drain_ooo_queue_skb(struct cor_conn
*src_in_l
,
129 struct cor_rcvooo
*r
, __u8 flush
)
131 struct cor_skb_procstate
*ps
= container_of(r
, struct cor_skb_procstate
,
132 funcstate
.rcv_ooo
.r
);
133 struct sk_buff
*skb
= cor_skb_from_pstate(ps
);
137 if (unlikely(cor_seqno_after(src_in_l
->source
.in
.next_seqno
,
139 __u64 overlap
= cor_seqno_clean(r
->seqno
-
140 src_in_l
->source
.in
.next_seqno
);
142 if (overlap
>= skb
->len
) {
143 src_in_l
->source
.in
.reorder_memused
-=
144 ps
->funcstate
.rcv_ooo
.skb_memused
;
150 skb
->data
+= overlap
;
157 BUG_ON(cor_seqno_eq(src_in_l
->source
.in
.next_seqno
, r
->seqno
) == 0);
158 BUG_ON(skb
->len
<= 0);
160 if (unlikely(rcv_as_buf
!= 0)) {
161 __u32 rc
= cor_receive_buf(src_in_l
, skb
->data
, skb
->len
,
162 r
->windowused
, flush
);
164 BUG_ON(rc
> skb
->len
);
166 src_in_l
->source
.in
.next_seqno
+= rc
;
168 if (unlikely(rc
!= skb
->len
)) {
175 src_in_l
->source
.in
.reorder_memused
-=
176 ps
->funcstate
.rcv_ooo
.skb_memused
;
183 __u32 len
= skb
->len
;
185 __u32 memused
= ps
->funcstate
.rcv_ooo
.skb_memused
;
188 rc
= cor_receive_skb(src_in_l
, skb
, 0, flush
);
192 src_in_l
->source
.in
.next_seqno
+= rc
;
194 if (unlikely(rc
!= len
)) {
195 BUG_ON(rc
> skb
->len
);
200 &(src_in_l
->source
.in
.reorder_queue
));
204 src_in_l
->source
.in
.reorder_memused
-= memused
;
210 void cor_drain_ooo_queue(struct cor_conn
*src_in_l
)
214 BUG_ON(src_in_l
->sourcetype
!= SOURCE_IN
);
216 while (list_empty(&(src_in_l
->source
.in
.reorder_queue
)) == 0) {
217 struct cor_rcvooo
*r
= container_of(
218 src_in_l
->source
.in
.reorder_queue
.next
,
219 struct cor_rcvooo
, lh
);
220 __u8 flush
= r
->flush
;
223 if (cor_seqno_before(src_in_l
->source
.in
.next_seqno
, r
->seqno
))
226 /* do not flush if there are more ooo packets in queue */
227 if (src_in_l
->source
.in
.reorder_queue
.prev
!=
228 src_in_l
->source
.in
.reorder_queue
.next
)
231 if (r
->type
== RCVOOO_BUF
)
232 rc
= cor_drain_ooo_queue_buf(src_in_l
, r
, flush
);
233 else if (r
->type
== RCVOOO_SKB
)
234 rc
= cor_drain_ooo_queue_skb(src_in_l
, r
, flush
);
238 if (unlikely(rc
!= 0)) {
245 BUG_ON(list_empty(&(src_in_l
->source
.in
.reorder_queue
)) != 0 &&
246 src_in_l
->source
.in
.reorder_memused
!= 0);
247 BUG_ON(list_empty(&(src_in_l
->source
.in
.reorder_queue
)) == 0 &&
248 src_in_l
->source
.in
.reorder_memused
== 0);
251 cor_account_bufspace(src_in_l
);
254 static __u32
cor_rcvooo_len(struct cor_rcvooo
*r
)
256 if (r
->type
== RCVOOO_BUF
) {
257 struct cor_rcvooo_buf
*rb
= container_of(r
,
258 struct cor_rcvooo_buf
, r
);
260 } else if (r
->type
== RCVOOO_SKB
) {
261 struct sk_buff
*skb
= cor_skb_from_pstate(container_of(r
,
262 struct cor_skb_procstate
, funcstate
.rcv_ooo
.r
));
269 static struct cor_rcvooo_buf
*_cor_conn_rcv_ooo_buf_checkmerge(
270 struct cor_conn
*src_in_l
, struct list_head
*lh_rcvooo
)
272 struct cor_rcvooo
*r
;
273 struct cor_rcvooo_buf
*rb
;
275 if (lh_rcvooo
== &(src_in_l
->source
.in
.reorder_queue
))
278 r
= container_of(lh_rcvooo
, struct cor_rcvooo
, lh
);
279 if (r
->type
!= RCVOOO_BUF
)
282 rb
= container_of(r
, struct cor_rcvooo_buf
, r
);
289 static int _cor_conn_rcv_ooo_accountmem(struct cor_conn
*src_in_l
,
295 if (unlikely(src_in_l
->source
.in
.reorder_memused
+ new_bytes
<
296 src_in_l
->source
.in
.reorder_memused
))
299 src_in_l
->source
.in
.reorder_memused
+= new_bytes
;
301 if (unlikely(cor_account_bufspace(src_in_l
))) {
302 src_in_l
->source
.in
.reorder_memused
-= new_bytes
;
303 cor_account_bufspace(src_in_l
);
310 static void _cor_conn_rcv_ooo_merge(struct cor_conn
*src_in_l
, char *data
,
311 __u32 len
, __u64 seqno
, __u8 windowused
, __u8 flush
,
312 struct cor_rcvooo_buf
*merge_prev
,
313 struct cor_rcvooo_buf
*merge_next
)
316 __u32 tmpbuf_len
= 0;
317 __u32 tmpbuf_offset
= 0;
319 struct cor_rcvooo_buf
*rb
;
322 tmpbuf_len
+= merge_prev
->len
;
325 tmpbuf_len
+= merge_next
->len
;
327 tmpbuf
= kmalloc(tmpbuf_len
, GFP_ATOMIC
);
328 if (unlikely(tmpbuf
== 0))
330 if (merge_prev
!= 0 && merge_next
!= 0 && len
<
331 sizeof(struct cor_rcvooo_buf
)) {
332 src_in_l
->source
.in
.reorder_memused
+= len
-
333 sizeof(struct cor_rcvooo_buf
);
335 __u32 new_bytes
= len
;
336 if (merge_prev
!= 0 && merge_next
!= 0)
337 new_bytes
-= sizeof(struct cor_rcvooo_buf
);
339 if (unlikely(_cor_conn_rcv_ooo_accountmem(src_in_l
,
347 if (merge_prev
!= 0) {
348 memcpy(tmpbuf
+ tmpbuf_offset
, merge_prev
->data
,
350 tmpbuf_offset
+= merge_prev
->len
;
351 windowused
= merge_prev
->r
.windowused
;
353 memcpy(tmpbuf
+ tmpbuf_offset
, data
, len
);
354 tmpbuf_offset
+= len
;
355 if (merge_next
!= 0) {
356 memcpy(tmpbuf
+ tmpbuf_offset
, merge_next
->data
,
358 tmpbuf_offset
+= merge_next
->len
;
361 BUG_ON(tmpbuf_offset
!= tmpbuf_len
);
364 if (merge_prev
!= 0) {
365 kfree(merge_prev
->data
);
366 merge_prev
->data
= 0;
367 if (merge_prev
->len
<= SMALL_OOO_PACKET_MAXSIZE
) {
368 BUG_ON(src_in_l
->source
.in
.small_ooo_packets
== 0);
369 src_in_l
->source
.in
.small_ooo_packets
--;
375 if (merge_next
!= 0) {
376 kfree(merge_next
->data
);
377 merge_next
->data
= 0;
378 if (merge_next
->len
<= SMALL_OOO_PACKET_MAXSIZE
) {
379 BUG_ON(src_in_l
->source
.in
.small_ooo_packets
== 0);
380 src_in_l
->source
.in
.small_ooo_packets
--;
383 flush
= merge_next
->r
.flush
;
385 if (merge_prev
!= 0) {
386 list_del(&(merge_next
->r
.lh
));
387 kmem_cache_free(cor_rcvooo_buf_slab
, merge_next
);
392 if (merge_prev
!= 0) {
395 BUG_ON(merge_next
== 0);
401 rb
->len
= tmpbuf_len
;
402 rb
->r
.windowused
= windowused
;
405 if (tmpbuf_len
<= SMALL_OOO_PACKET_MAXSIZE
) {
406 src_in_l
->source
.in
.small_ooo_packets
++;
407 BUG_ON(src_in_l
->source
.in
.small_ooo_packets
== 0);
410 cor_send_ack_conn_ifneeded(src_in_l
, seqno
, len
);
413 static void _cor_conn_rcv_ooo_nomerge(struct cor_conn
*src_in_l
, char *data
,
414 __u32 len
, __u64 seqno
, __u8 windowused
, __u8 flush
,
415 struct list_head
*next_rcvooo
)
417 struct cor_rcvooo_buf
*rb
;
419 /* avoid oom if a neighbor sends very small packets */
420 if (len
<= SMALL_OOO_PACKET_MAXSIZE
&&
421 src_in_l
->source
.in
.small_ooo_packets
>=
422 MAX_SMALL_OOO_PACKETS_PER_CONN
)
425 if (unlikely(_cor_conn_rcv_ooo_accountmem(src_in_l
,
426 len
+ sizeof(struct cor_rcvooo_buf
))))
429 rb
= kmem_cache_alloc(cor_rcvooo_buf_slab
, GFP_ATOMIC
);
430 if (unlikely(rb
== 0)) {
431 src_in_l
->source
.in
.reorder_memused
-=
432 (len
+ sizeof(struct cor_rcvooo_buf
));
433 cor_account_bufspace(src_in_l
);
436 memset(rb
, 0, sizeof(struct cor_rcvooo_buf
));
438 rb
->data
= kmalloc(len
, GFP_ATOMIC
);
439 if (unlikely(rb
->data
== 0)) {
440 kmem_cache_free(cor_rcvooo_buf_slab
, rb
);
442 src_in_l
->source
.in
.reorder_memused
-=
443 (len
+ sizeof(struct cor_rcvooo_buf
));
444 cor_account_bufspace(src_in_l
);
448 memcpy(rb
->data
, data
, len
);
450 rb
->r
.type
= RCVOOO_BUF
;
452 rb
->r
.windowused
= windowused
;
457 list_add_tail(&(rb
->r
.lh
), next_rcvooo
);
459 if (len
<= SMALL_OOO_PACKET_MAXSIZE
) {
460 src_in_l
->source
.in
.small_ooo_packets
++;
461 BUG_ON(src_in_l
->source
.in
.small_ooo_packets
== 0);
464 cor_send_ack_conn_ifneeded(src_in_l
, seqno
, len
);
467 static void _cor_conn_rcv_ooo_buf(struct cor_conn
*src_in_l
, char *data
,
468 __u32 len
, __u64 seqno
, __u8 windowused
, __u8 flush
,
469 struct list_head
*next_rcvooo
)
471 struct cor_rcvooo_buf
*merge_prev
;
472 struct cor_rcvooo_buf
*merge_next
;
477 merge_prev
= _cor_conn_rcv_ooo_buf_checkmerge(src_in_l
,
479 if (merge_prev
!= 0) {
480 __u64 next_seqno
= merge_prev
->r
.seqno
+ merge_prev
->len
;
481 BUG_ON(cor_seqno_after(next_seqno
, seqno
));
482 if (cor_seqno_eq(next_seqno
, seqno
) == 0)
486 merge_next
= _cor_conn_rcv_ooo_buf_checkmerge(src_in_l
, next_rcvooo
);
487 if (merge_next
!= 0) {
488 __u64 next_seqno
= seqno
+ len
;
489 BUG_ON(cor_seqno_after(next_seqno
, merge_next
->r
.seqno
));
490 if (cor_seqno_eq(next_seqno
, merge_next
->r
.seqno
) == 0)
494 if (merge_prev
== 0 && merge_next
== 0) {
496 _cor_conn_rcv_ooo_nomerge(src_in_l
, data
, len
, seqno
,
497 windowused
, flush
, next_rcvooo
);
499 _cor_conn_rcv_ooo_merge(src_in_l
, data
, len
, seqno
,
500 windowused
, flush
, merge_prev
, merge_next
);
504 static void _cor_conn_rcv_ooo_skb(struct cor_conn
*src_in_l
,
505 struct sk_buff
*skb
, __u64 seqno
, __u8 windowused
, __u8 flush
,
506 struct list_head
*next_rcvooo
)
508 struct cor_rcvooo
*newr
;
509 struct cor_skb_procstate
*ps
= cor_skb_pstate(skb
);
511 memset(&(ps
->funcstate
), 0, sizeof(ps
->funcstate
));
512 ps
->funcstate
.rcv_ooo
.skb_memused
= sizeof(struct sk_buff
) +
515 if (unlikely(_cor_conn_rcv_ooo_accountmem(src_in_l
,
516 ps
->funcstate
.rcv_ooo
.skb_memused
))) {
521 newr
= &(ps
->funcstate
.rcv_ooo
.r
);
522 newr
->type
= RCVOOO_SKB
;
524 newr
->windowused
= windowused
;
526 list_add_tail(&(newr
->lh
), next_rcvooo
);
528 cor_send_ack_conn_ifneeded(src_in_l
, seqno
, skb
->len
);
531 static void __cor_conn_rcv_ooo(struct cor_conn
*src_in_l
, struct sk_buff
*skb
,
532 char *data
, __u32 len
, __u64 seqno
, __u8 windowused
, __u8 flush
,
533 struct list_head
*prev_rcvooo_lh
)
535 struct list_head
*reorder_queue
= &(src_in_l
->source
.in
.reorder_queue
);
536 struct list_head
*next_rcvooo_lh
= prev_rcvooo_lh
->next
;
538 if (prev_rcvooo_lh
!= reorder_queue
) {
539 struct cor_rcvooo
*prev_rcvooo
= container_of(prev_rcvooo_lh
,
540 struct cor_rcvooo
, lh
);
541 __u32 currlen
= cor_rcvooo_len(prev_rcvooo
);
543 if (cor_seqno_after(prev_rcvooo
->seqno
+ currlen
, seqno
)) {
544 __u64 overlap
= cor_seqno_clean(prev_rcvooo
->seqno
+
547 if (unlikely(len
<= overlap
))
556 if (next_rcvooo_lh
!= reorder_queue
) {
557 struct cor_rcvooo
*next_rcvooo
= container_of(next_rcvooo_lh
,
558 struct cor_rcvooo
, lh
);
560 if (unlikely(cor_seqno_before_eq(next_rcvooo
->seqno
, seqno
)))
563 if (unlikely(cor_seqno_before(next_rcvooo
->seqno
, seqno
+ len
)))
564 len
= cor_seqno_clean(next_rcvooo
->seqno
- seqno
);
567 if (unlikely(len
== 0)) {
574 if (skb
== 0 || len
< 1024 ||
575 skb
->data
!= ((unsigned char *) data
) ||
577 _cor_conn_rcv_ooo_buf(src_in_l
, data
, len
, seqno
, windowused
,
578 flush
, next_rcvooo_lh
);
586 _cor_conn_rcv_ooo_skb(src_in_l
, skb
, seqno
, windowused
, flush
,
591 static void _cor_conn_rcv_ooo(struct cor_conn
*src_in_l
, struct sk_buff
*skb
,
592 char *data
, __u32 len
, __u64 seqno
, __u8 windowused
, __u8 flush
)
594 struct list_head
*reorder_queue
= &(src_in_l
->source
.in
.reorder_queue
);
595 struct list_head
*currlh
= reorder_queue
->prev
;
597 BUG_ON(skb
!= 0 && skb
->data
!= ((unsigned char *)data
));
598 BUG_ON(skb
!= 0 && skb
->len
!= len
);
600 while (currlh
!= reorder_queue
) {
601 struct cor_rcvooo
*currr
= container_of(currlh
,
602 struct cor_rcvooo
, lh
);
604 if (cor_seqno_before_eq(currr
->seqno
, seqno
))
607 currlh
= currlh
->prev
;
610 __cor_conn_rcv_ooo(src_in_l
, skb
, data
, len
, seqno
, windowused
, flush
,
614 static void _cor_conn_rcv(struct cor_neighbor
*nb
, struct cor_conn
*src_in
,
615 __u32 conn_id
, struct sk_buff
*skb
, char *data
, __u32 len
,
616 __u64 seqno
, __u8 windowused
, __u8 flush
)
620 spin_lock_bh(&(src_in
->rcv_lock
));
622 if (cor_is_conn_in(src_in
, nb
, conn_id
) == 0)
625 cor_set_last_act(src_in
);
627 if (unlikely(cor_seqno_before(seqno
+ len
,
628 src_in
->source
.in
.next_seqno
)))
630 if (unlikely(unlikely(cor_seqno_after(seqno
+ len
,
631 src_in
->source
.in
.window_seqnolimit
)) &&
632 cor_seqno_after(seqno
+ len
,
633 src_in
->source
.in
.window_seqnolimit_remote
)))
636 if (cor_seqno_after(seqno
, src_in
->source
.in
.next_seqno
)) {
637 _cor_conn_rcv_ooo(src_in
, skb
, data
, len
, seqno
, windowused
,
642 if (cor_seqno_after(src_in
->source
.in
.next_seqno
, seqno
)) {
643 __u64 overlap
= cor_seqno_clean(
644 src_in
->source
.in
.next_seqno
- seqno
);
646 BUG_ON(overlap
> len
);
652 rcvlen
= cor_receive_buf(src_in
, data
, len
, windowused
,
656 } else if (skb
!= 0) {
657 __u32 skblen
= skb
->len
;
658 rcvlen
= cor_receive_skb(src_in
, skb
, windowused
,
660 if (unlikely(rcvlen
< skblen
))
663 rcvlen
= cor_receive_buf(src_in
, data
, len
, windowused
,
667 if (likely(rcvlen
> 0)) {
668 src_in
->source
.in
.next_seqno
+= rcvlen
;
670 cor_drain_ooo_queue(src_in
);
671 src_in
->source
.in
.inorder_ack_needed
= 1;
672 cor_flush_buf(src_in
);
673 cor_send_ack_conn_ifneeded(src_in
, 0, 0);
679 cor_send_ack_conn_ifneeded(src_in
, 0, 0);
685 spin_unlock_bh(&(src_in
->rcv_lock
));
688 void cor_conn_rcv(struct cor_neighbor
*nb
, struct sk_buff
*skb
, char *data
,
689 __u32 len
, __u32 conn_id
, __u64 seqno
, __u8 windowused
,
692 struct cor_conn
*src_in
;
704 src_in
= cor_get_conn(nb
, conn_id
);
706 if (unlikely(src_in
== 0)) {
707 /* printk(KERN_DEBUG "unknown conn_id when receiving: %d",
712 cor_send_reset_conn(nb
, cor_get_connid_reverse(conn_id
), 0);
721 _cor_conn_rcv(nb
, src_in
, conn_id
, skb
, data
, len
, seqno
, windowused
,
723 cor_conn_kref_put(src_in
, "stack");
726 int __init
cor_rcv_init(void)
728 BUG_ON(sizeof(struct cor_skb_procstate
) > 48);
730 cor_rcvooo_buf_slab
= kmem_cache_create("cor_rcvooo_buf",
731 sizeof(struct cor_rcvooo_buf
), 8, 0, 0);
732 if (unlikely(cor_rcvooo_buf_slab
== 0))
738 void __exit
cor_rcv_exit2(void)
740 kmem_cache_destroy(cor_rcvooo_buf_slab
);
741 cor_rcvooo_buf_slab
= 0;
744 MODULE_LICENSE("GPL");