2 * Connection oriented routing
3 * Copyright (C) 2007-2021 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 #include <linux/version.h>
22 #include <linux/kernel.h>
23 #include <linux/init.h>
29 static struct kmem_cache
*cor_rcvooo_buf_slab
;
31 void cor_reset_ooo_queue(struct cor_conn
*src_in_lx
)
33 BUG_ON(src_in_lx
->sourcetype
!= SOURCE_IN
);
35 while (list_empty(&(src_in_lx
->source
.in
.reorder_queue
)) == 0) {
36 struct cor_rcvooo
*r
= container_of(
37 src_in_lx
->source
.in
.reorder_queue
.next
,
38 struct cor_rcvooo
, lh
);
42 if (r
->type
== RCVOOO_BUF
) {
43 struct cor_rcvooo_buf
*rb
= container_of(r
,
44 struct cor_rcvooo_buf
, r
);
45 src_in_lx
->source
.in
.reorder_memused
-= (rb
->len
+
46 sizeof(struct cor_rcvooo_buf
));
48 kmem_cache_free(cor_rcvooo_buf_slab
, rb
);
49 } else if (r
->type
== RCVOOO_SKB
) {
50 struct cor_skb_procstate
*ps
= container_of(r
,
51 struct cor_skb_procstate
,
53 struct sk_buff
*skb
= cor_skb_from_pstate(ps
);
54 src_in_lx
->source
.in
.reorder_memused
-=
55 ps
->funcstate
.rcv_ooo
.skb_memused
;
62 src_in_lx
->source
.in
.small_ooo_packets
= 0;
63 BUG_ON(src_in_lx
->source
.in
.reorder_memused
!= 0);
65 cor_account_bufspace(src_in_lx
);
68 static int cor_drain_ooo_queue_buf(struct cor_conn
*src_in_l
,
69 struct cor_rcvooo
*r
, __u8 flush
)
71 struct cor_rcvooo_buf
*rb
= container_of(r
, struct cor_rcvooo_buf
, r
);
73 __u32 data_offset
= 0;
76 if (unlikely(cor_seqno_after(src_in_l
->source
.in
.next_seqno
,
78 __u64 overlap
= cor_seqno_clean(r
->seqno
-
79 src_in_l
->source
.in
.next_seqno
);
81 if (overlap
>= rb
->len
)
84 src_in_l
->source
.in
.reorder_memused
-= overlap
;
86 data_offset
+= overlap
;
90 BUG_ON(cor_seqno_eq(src_in_l
->source
.in
.next_seqno
, r
->seqno
) == 0);
91 rc
= cor_receive_buf(src_in_l
, rb
->data
+ data_offset
, rb
->len
, 0,
95 src_in_l
->source
.in
.next_seqno
+= rc
;
97 if (unlikely(rc
!= rb
->len
)) {
98 src_in_l
->source
.in
.reorder_memused
-= rc
;
103 memmove(rb
->data
, rb
->data
+ data_offset
, rb
->len
);
105 if (rb
->len
+ data_offset
> SMALL_OOO_PACKET_MAXSIZE
&&
106 rb
->len
<= SMALL_OOO_PACKET_MAXSIZE
) {
107 src_in_l
->source
.in
.small_ooo_packets
++;
108 BUG_ON(src_in_l
->source
.in
.small_ooo_packets
== 0);
114 src_in_l
->source
.in
.reorder_memused
-= (rb
->len
+
115 sizeof(struct cor_rcvooo_buf
));
118 if (rb
->len
<= SMALL_OOO_PACKET_MAXSIZE
) {
119 BUG_ON(src_in_l
->source
.in
.small_ooo_packets
== 0);
120 src_in_l
->source
.in
.small_ooo_packets
--;
122 kmem_cache_free(cor_rcvooo_buf_slab
, rb
);
128 static int cor_drain_ooo_queue_skb(struct cor_conn
*src_in_l
,
129 struct cor_rcvooo
*r
, __u8 flush
)
131 struct cor_skb_procstate
*ps
= container_of(r
, struct cor_skb_procstate
,
132 funcstate
.rcv_ooo
.r
);
133 struct sk_buff
*skb
= cor_skb_from_pstate(ps
);
137 if (unlikely(cor_seqno_after(src_in_l
->source
.in
.next_seqno
,
139 __u64 overlap
= cor_seqno_clean(r
->seqno
-
140 src_in_l
->source
.in
.next_seqno
);
142 if (overlap
>= skb
->len
) {
143 src_in_l
->source
.in
.reorder_memused
-=
144 ps
->funcstate
.rcv_ooo
.skb_memused
;
150 skb
->data
+= overlap
;
157 BUG_ON(cor_seqno_eq(src_in_l
->source
.in
.next_seqno
, r
->seqno
) == 0);
158 BUG_ON(skb
->len
<= 0);
160 if (unlikely(rcv_as_buf
!= 0)) {
161 __u32 rc
= cor_receive_buf(src_in_l
, skb
->data
, skb
->len
, 0,
164 BUG_ON(rc
> skb
->len
);
166 src_in_l
->source
.in
.next_seqno
+= rc
;
168 if (unlikely(rc
!= skb
->len
)) {
175 src_in_l
->source
.in
.reorder_memused
-=
176 ps
->funcstate
.rcv_ooo
.skb_memused
;
183 __u32 len
= skb
->len
;
185 __u32 memused
= ps
->funcstate
.rcv_ooo
.skb_memused
;
189 rc
= cor_receive_skb(src_in_l
, skb
, 0, flush
);
193 src_in_l
->source
.in
.next_seqno
+= rc
;
195 if (unlikely(rc
!= len
)) {
196 BUG_ON(rc
> skb
->len
);
201 &(src_in_l
->source
.in
.reorder_queue
));
205 src_in_l
->source
.in
.reorder_memused
-= memused
;
211 void cor_drain_ooo_queue(struct cor_conn
*src_in_l
)
215 BUG_ON(src_in_l
->sourcetype
!= SOURCE_IN
);
217 while (list_empty(&(src_in_l
->source
.in
.reorder_queue
)) == 0) {
218 struct cor_rcvooo
*r
= container_of(
219 src_in_l
->source
.in
.reorder_queue
.next
,
220 struct cor_rcvooo
, lh
);
221 __u8 flush
= r
->flush
;
224 if (cor_seqno_before(src_in_l
->source
.in
.next_seqno
, r
->seqno
))
227 /* do not flush if there are more ooo packets in queue */
228 if (src_in_l
->source
.in
.reorder_queue
.prev
!=
229 src_in_l
->source
.in
.reorder_queue
.next
)
232 if (r
->type
== RCVOOO_BUF
)
233 rc
= cor_drain_ooo_queue_buf(src_in_l
, r
, flush
);
234 else if (r
->type
== RCVOOO_SKB
)
235 rc
= cor_drain_ooo_queue_skb(src_in_l
, r
, flush
);
239 if (unlikely(rc
!= 0)) {
246 BUG_ON(list_empty(&(src_in_l
->source
.in
.reorder_queue
)) != 0 &&
247 src_in_l
->source
.in
.reorder_memused
!= 0);
248 BUG_ON(list_empty(&(src_in_l
->source
.in
.reorder_queue
)) == 0 &&
249 src_in_l
->source
.in
.reorder_memused
== 0);
252 cor_account_bufspace(src_in_l
);
255 static __u32
cor_rcvooo_len(struct cor_rcvooo
*r
)
257 if (r
->type
== RCVOOO_BUF
) {
258 struct cor_rcvooo_buf
*rb
= container_of(r
,
259 struct cor_rcvooo_buf
, r
);
261 } else if (r
->type
== RCVOOO_SKB
) {
262 struct sk_buff
*skb
= cor_skb_from_pstate(container_of(r
,
263 struct cor_skb_procstate
, funcstate
.rcv_ooo
.r
));
270 static struct cor_rcvooo_buf
*_cor_conn_rcv_ooo_buf_checkmerge(
271 struct cor_conn
*src_in_l
, struct list_head
*lh_rcvooo
)
273 struct cor_rcvooo
*r
;
274 struct cor_rcvooo_buf
*rb
;
276 if (lh_rcvooo
== &(src_in_l
->source
.in
.reorder_queue
))
279 r
= container_of(lh_rcvooo
, struct cor_rcvooo
, lh
);
280 if (r
->type
!= RCVOOO_BUF
)
283 rb
= container_of(r
, struct cor_rcvooo_buf
, r
);
290 static int _cor_conn_rcv_ooo_accountmem(struct cor_conn
*src_in_l
,
296 if (unlikely(src_in_l
->source
.in
.reorder_memused
+ new_bytes
<
297 src_in_l
->source
.in
.reorder_memused
))
300 src_in_l
->source
.in
.reorder_memused
+= new_bytes
;
302 if (unlikely(cor_account_bufspace(src_in_l
))) {
303 src_in_l
->source
.in
.reorder_memused
-= new_bytes
;
304 cor_account_bufspace(src_in_l
);
311 static void _cor_conn_rcv_ooo_merge(struct cor_conn
*src_in_l
, char *data
,
312 __u32 len
, __u64 seqno
, __u8 flush
,
313 struct cor_rcvooo_buf
*merge_prev
,
314 struct cor_rcvooo_buf
*merge_next
)
317 __u32 tmpbuf_len
= 0;
318 __u32 tmpbuf_offset
= 0;
320 struct cor_rcvooo_buf
*rb
;
323 tmpbuf_len
+= merge_prev
->len
;
326 tmpbuf_len
+= merge_next
->len
;
328 tmpbuf
= kmalloc(tmpbuf_len
, GFP_ATOMIC
);
329 if (unlikely(tmpbuf
== 0))
331 if (merge_prev
!= 0 && merge_next
!= 0 && len
<
332 sizeof(struct cor_rcvooo_buf
)) {
333 src_in_l
->source
.in
.reorder_memused
+= len
-
334 sizeof(struct cor_rcvooo_buf
);
336 __u32 new_bytes
= len
;
337 if (merge_prev
!= 0 && merge_next
!= 0)
338 new_bytes
-= sizeof(struct cor_rcvooo_buf
);
340 if (unlikely(_cor_conn_rcv_ooo_accountmem(src_in_l
,
348 if (merge_prev
!= 0) {
349 memcpy(tmpbuf
+ tmpbuf_offset
, merge_prev
->data
,
351 tmpbuf_offset
+= merge_prev
->len
;
353 memcpy(tmpbuf
+ tmpbuf_offset
, data
, len
);
354 tmpbuf_offset
+= len
;
355 if (merge_next
!= 0) {
356 memcpy(tmpbuf
+ tmpbuf_offset
, merge_next
->data
,
358 tmpbuf_offset
+= merge_next
->len
;
361 BUG_ON(tmpbuf_offset
!= tmpbuf_len
);
364 if (merge_prev
!= 0) {
365 kfree(merge_prev
->data
);
366 merge_prev
->data
= 0;
367 if (merge_prev
->len
<= SMALL_OOO_PACKET_MAXSIZE
) {
368 BUG_ON(src_in_l
->source
.in
.small_ooo_packets
== 0);
369 src_in_l
->source
.in
.small_ooo_packets
--;
375 if (merge_next
!= 0) {
376 kfree(merge_next
->data
);
377 merge_next
->data
= 0;
378 if (merge_next
->len
<= SMALL_OOO_PACKET_MAXSIZE
) {
379 BUG_ON(src_in_l
->source
.in
.small_ooo_packets
== 0);
380 src_in_l
->source
.in
.small_ooo_packets
--;
383 flush
= merge_next
->r
.flush
;
385 if (merge_prev
!= 0) {
386 list_del(&(merge_next
->r
.lh
));
387 kmem_cache_free(cor_rcvooo_buf_slab
, merge_next
);
392 if (merge_prev
!= 0) {
395 BUG_ON(merge_next
== 0);
401 rb
->len
= tmpbuf_len
;
404 if (tmpbuf_len
<= SMALL_OOO_PACKET_MAXSIZE
) {
405 src_in_l
->source
.in
.small_ooo_packets
++;
406 BUG_ON(src_in_l
->source
.in
.small_ooo_packets
== 0);
409 cor_send_ack_conn_ifneeded(src_in_l
, seqno
, len
);
412 static void _cor_conn_rcv_ooo_nomerge(struct cor_conn
*src_in_l
, char *data
,
413 __u32 len
, __u64 seqno
, __u8 flush
,
414 struct list_head
*next_rcvooo
)
416 struct cor_rcvooo_buf
*rb
;
418 /* avoid oom if a neighbor sends very small packets */
419 if (len
<= SMALL_OOO_PACKET_MAXSIZE
&&
420 src_in_l
->source
.in
.small_ooo_packets
>=
421 MAX_SMALL_OOO_PACKETS_PER_CONN
)
424 if (unlikely(_cor_conn_rcv_ooo_accountmem(src_in_l
,
425 len
+ sizeof(struct cor_rcvooo_buf
))))
428 rb
= kmem_cache_alloc(cor_rcvooo_buf_slab
, GFP_ATOMIC
);
429 if (unlikely(rb
== 0)) {
430 src_in_l
->source
.in
.reorder_memused
-=
431 (len
+ sizeof(struct cor_rcvooo_buf
));
432 cor_account_bufspace(src_in_l
);
435 memset(rb
, 0, sizeof(struct cor_rcvooo_buf
));
437 rb
->data
= kmalloc(len
, GFP_ATOMIC
);
438 if (unlikely(rb
->data
== 0)) {
439 kmem_cache_free(cor_rcvooo_buf_slab
, rb
);
441 src_in_l
->source
.in
.reorder_memused
-=
442 (len
+ sizeof(struct cor_rcvooo_buf
));
443 cor_account_bufspace(src_in_l
);
447 memcpy(rb
->data
, data
, len
);
449 rb
->r
.type
= RCVOOO_BUF
;
455 list_add_tail(&(rb
->r
.lh
), next_rcvooo
);
457 if (len
<= SMALL_OOO_PACKET_MAXSIZE
) {
458 src_in_l
->source
.in
.small_ooo_packets
++;
459 BUG_ON(src_in_l
->source
.in
.small_ooo_packets
== 0);
462 cor_send_ack_conn_ifneeded(src_in_l
, seqno
, len
);
465 static void _cor_conn_rcv_ooo_buf(struct cor_conn
*src_in_l
, char *data
,
466 __u32 len
, __u64 seqno
, __u8 flush
,
467 struct list_head
*next_rcvooo
)
469 struct cor_rcvooo_buf
*merge_prev
;
470 struct cor_rcvooo_buf
*merge_next
;
475 merge_prev
= _cor_conn_rcv_ooo_buf_checkmerge(src_in_l
,
477 if (merge_prev
!= 0) {
478 __u64 next_seqno
= merge_prev
->r
.seqno
+ merge_prev
->len
;
479 BUG_ON(cor_seqno_after(next_seqno
, seqno
));
480 if (cor_seqno_eq(next_seqno
, seqno
) == 0)
484 merge_next
= _cor_conn_rcv_ooo_buf_checkmerge(src_in_l
, next_rcvooo
);
485 if (merge_next
!= 0) {
486 __u64 next_seqno
= seqno
+ len
;
487 BUG_ON(cor_seqno_after(next_seqno
, merge_next
->r
.seqno
));
488 if (cor_seqno_eq(next_seqno
, merge_next
->r
.seqno
) == 0)
492 if (merge_prev
== 0 && merge_next
== 0) {
494 _cor_conn_rcv_ooo_nomerge(src_in_l
, data
, len
, seqno
, flush
,
497 _cor_conn_rcv_ooo_merge(src_in_l
, data
, len
, seqno
, flush
,
498 merge_prev
, merge_next
);
502 static void _cor_conn_rcv_ooo_skb(struct cor_conn
*src_in_l
,
503 struct sk_buff
*skb
, __u64 seqno
, __u8 flush
,
504 struct list_head
*next_rcvooo
)
506 struct cor_rcvooo
*newr
;
507 struct cor_skb_procstate
*ps
= cor_skb_pstate(skb
);
509 memset(&(ps
->funcstate
), 0, sizeof(ps
->funcstate
));
510 ps
->funcstate
.rcv_ooo
.skb_memused
= sizeof(struct sk_buff
) +
513 if (unlikely(_cor_conn_rcv_ooo_accountmem(src_in_l
,
514 ps
->funcstate
.rcv_ooo
.skb_memused
))) {
519 newr
= &(ps
->funcstate
.rcv_ooo
.r
);
520 newr
->type
= RCVOOO_SKB
;
524 list_add_tail(&(newr
->lh
), next_rcvooo
);
526 cor_send_ack_conn_ifneeded(src_in_l
, seqno
, skb
->len
);
529 static void __cor_conn_rcv_ooo(struct cor_conn
*src_in_l
, struct sk_buff
*skb
,
530 char *data
, __u32 len
, __u64 seqno
, __u8 flush
,
531 struct list_head
*prev_rcvooo_lh
)
533 struct list_head
*reorder_queue
= &(src_in_l
->source
.in
.reorder_queue
);
534 struct list_head
*next_rcvooo_lh
= prev_rcvooo_lh
->next
;
536 if (prev_rcvooo_lh
!= reorder_queue
) {
537 struct cor_rcvooo
*prev_rcvooo
= container_of(prev_rcvooo_lh
,
538 struct cor_rcvooo
, lh
);
539 __u32 currlen
= cor_rcvooo_len(prev_rcvooo
);
541 if (cor_seqno_after(prev_rcvooo
->seqno
+ currlen
, seqno
)) {
542 __u64 overlap
= cor_seqno_clean(prev_rcvooo
->seqno
+
545 if (unlikely(len
<= overlap
))
554 if (next_rcvooo_lh
!= reorder_queue
) {
555 struct cor_rcvooo
*next_rcvooo
= container_of(next_rcvooo_lh
,
556 struct cor_rcvooo
, lh
);
558 if (unlikely(cor_seqno_before_eq(next_rcvooo
->seqno
, seqno
)))
561 if (unlikely(cor_seqno_before(next_rcvooo
->seqno
, seqno
+ len
)))
562 len
= cor_seqno_clean(next_rcvooo
->seqno
- seqno
);
565 if (unlikely(len
== 0)) {
572 if (skb
== 0 || len
< 1024 ||
573 skb
->data
!= ((unsigned char *) data
) ||
575 _cor_conn_rcv_ooo_buf(src_in_l
, data
, len
, seqno
, flush
,
584 _cor_conn_rcv_ooo_skb(src_in_l
, skb
, seqno
, flush
,
589 static void _cor_conn_rcv_ooo(struct cor_conn
*src_in_l
, struct sk_buff
*skb
,
590 char *data
, __u32 len
, __u64 seqno
, __u8 flush
)
592 struct list_head
*reorder_queue
= &(src_in_l
->source
.in
.reorder_queue
);
593 struct list_head
*currlh
= reorder_queue
->prev
;
595 BUG_ON(skb
!= 0 && skb
->data
!= ((unsigned char *)data
));
596 BUG_ON(skb
!= 0 && skb
->len
!= len
);
598 while (currlh
!= reorder_queue
) {
599 struct cor_rcvooo
*currr
= container_of(currlh
,
600 struct cor_rcvooo
, lh
);
602 if (cor_seqno_before_eq(currr
->seqno
, seqno
))
605 currlh
= currlh
->prev
;
608 __cor_conn_rcv_ooo(src_in_l
, skb
, data
, len
, seqno
, flush
, currlh
);
611 static void _cor_conn_rcv(struct cor_neighbor
*nb
, struct cor_conn
*src_in
,
612 __u32 conn_id
, struct sk_buff
*skb
, char *data
, __u32 len
,
613 __u64 seqno
, int rcv_delayed_lowbuf
, __u8 flush
)
615 spin_lock_bh(&(src_in
->rcv_lock
));
617 if (unlikely(unlikely(src_in
->isreset
!= 0) ||
618 unlikely(src_in
->sourcetype
!= SOURCE_IN
) ||
619 unlikely(src_in
->source
.in
.conn_id
!= conn_id
)))
624 if (unlikely(cor_is_from_nb(skb
, src_in
->source
.in
.nb
) == 0))
627 if (unlikely(src_in
->source
.in
.nb
!= nb
))
631 cor_set_last_act(src_in
);
633 if (unlikely(cor_seqno_before(seqno
+ len
,
634 src_in
->source
.in
.next_seqno
)))
636 if (unlikely(unlikely(cor_seqno_after(seqno
+ len
,
637 src_in
->source
.in
.window_seqnolimit
)) &&
638 cor_seqno_after(seqno
+ len
,
639 src_in
->source
.in
.window_seqnolimit_remote
)))
642 if (cor_seqno_after(seqno
, src_in
->source
.in
.next_seqno
)) {
643 _cor_conn_rcv_ooo(src_in
, skb
, data
, len
, seqno
, flush
);
647 if (cor_seqno_after(src_in
->source
.in
.next_seqno
, seqno
)) {
648 __u64 overlap
= cor_seqno_clean(
649 src_in
->source
.in
.next_seqno
- seqno
);
651 BUG_ON(overlap
> len
);
657 rcvlen
= cor_receive_buf(src_in
, data
, len
,
658 rcv_delayed_lowbuf
, flush
);
661 } else if (skb
!= 0) {
662 __u32 skblen
= skb
->len
;
663 rcvlen
= cor_receive_skb(src_in
, skb
,
664 rcv_delayed_lowbuf
, flush
);
665 if (unlikely(rcvlen
< skblen
))
668 rcvlen
= cor_receive_buf(src_in
, data
, len
,
669 rcv_delayed_lowbuf
, flush
);
672 if (likely(rcvlen
> 0)) {
673 src_in
->source
.in
.next_seqno
+= rcvlen
;
675 cor_drain_ooo_queue(src_in
);
676 src_in
->source
.in
.inorder_ack_needed
= 1;
677 cor_flush_buf(src_in
);
678 cor_send_ack_conn_ifneeded(src_in
, 0, 0);
684 cor_send_ack_conn_ifneeded(src_in
, 0, 0);
690 spin_unlock_bh(&(src_in
->rcv_lock
));
693 void cor_conn_rcv(struct cor_neighbor
*nb
, struct sk_buff
*skb
, char *data
,
694 __u32 len
, __u32 conn_id
, __u64 seqno
, int rcv_delayed_lowbuf
,
697 struct cor_conn
*src_in
;
709 src_in
= cor_get_conn(nb
, conn_id
);
711 if (unlikely(src_in
== 0)) {
712 /* printk(KERN_DEBUG "unknown conn_id when receiving: %d",
717 cor_send_reset_conn(nb
, conn_id
^ (conn_id
& (1 << 31)), 0);
726 _cor_conn_rcv(nb
, src_in
, conn_id
, skb
, data
, len
, seqno
,
727 rcv_delayed_lowbuf
, flush
);
728 kref_put(&(src_in
->ref
), cor_free_conn
);
731 int __init
cor_rcv_init(void)
733 BUG_ON(sizeof(struct cor_skb_procstate
) > 48);
735 cor_rcvooo_buf_slab
= kmem_cache_create("cor_rcvooo_buf",
736 sizeof(struct cor_rcvooo_buf
), 8, 0, 0);
737 if (unlikely(cor_rcvooo_buf_slab
== 0))
743 void __exit
cor_rcv_exit2(void)
745 kmem_cache_destroy(cor_rcvooo_buf_slab
);
746 cor_rcvooo_buf_slab
= 0;
749 MODULE_LICENSE("GPL");