send windowused instead of senddelayed flag
[cor.git] / net / cor / conn_src_in.c
blobaee0aa3f31e9e7943021e020e318fcded9b80599
1 /**
2 * Connection oriented routing
3 * Copyright (C) 2007-2021 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
21 #include <linux/version.h>
22 #include <linux/kernel.h>
23 #include <linux/init.h>
24 #include <linux/in.h>
27 #include "cor.h"
29 static struct kmem_cache *cor_rcvooo_buf_slab;
31 void cor_reset_ooo_queue(struct cor_conn *src_in_lx)
33 BUG_ON(src_in_lx->sourcetype != SOURCE_IN);
35 while (list_empty(&(src_in_lx->source.in.reorder_queue)) == 0) {
36 struct cor_rcvooo *r = container_of(
37 src_in_lx->source.in.reorder_queue.next,
38 struct cor_rcvooo, lh);
40 list_del(&(r->lh));
42 if (r->type == RCVOOO_BUF) {
43 struct cor_rcvooo_buf *rb = container_of(r,
44 struct cor_rcvooo_buf, r);
45 src_in_lx->source.in.reorder_memused -= (rb->len +
46 sizeof(struct cor_rcvooo_buf));
47 kfree(rb->data);
48 kmem_cache_free(cor_rcvooo_buf_slab, rb);
49 } else if (r->type == RCVOOO_SKB) {
50 struct cor_skb_procstate *ps = container_of(r,
51 struct cor_skb_procstate,
52 funcstate.rcv_ooo.r);
53 struct sk_buff *skb = cor_skb_from_pstate(ps);
54 src_in_lx->source.in.reorder_memused -=
55 ps->funcstate.rcv_ooo.skb_memused;
56 kfree_skb(skb);
57 } else {
58 BUG();
62 src_in_lx->source.in.small_ooo_packets = 0;
63 BUG_ON(src_in_lx->source.in.reorder_memused != 0);
65 cor_account_bufspace(src_in_lx);
68 static int cor_drain_ooo_queue_buf(struct cor_conn *src_in_l,
69 struct cor_rcvooo *r, __u8 flush)
71 struct cor_rcvooo_buf *rb = container_of(r, struct cor_rcvooo_buf, r);
73 __u32 data_offset = 0;
74 __u32 rc;
76 if (unlikely(cor_seqno_after(src_in_l->source.in.next_seqno,
77 r->seqno))) {
78 __u64 overlap = cor_seqno_clean(r->seqno -
79 src_in_l->source.in.next_seqno);
81 if (overlap >= rb->len)
82 goto free;
84 src_in_l->source.in.reorder_memused -= overlap;
85 rb->len -= overlap;
86 data_offset += overlap;
87 r->seqno += overlap;
90 BUG_ON(cor_seqno_eq(src_in_l->source.in.next_seqno, r->seqno) == 0);
91 rc = cor_receive_buf(src_in_l, rb->data + data_offset, rb->len,
92 r->windowused, flush);
94 BUG_ON(rc > rb->len);
95 src_in_l->source.in.next_seqno += rc;
97 if (unlikely(rc != rb->len)) {
98 src_in_l->source.in.reorder_memused -= rc;
99 rb->len -= rc;
100 r->seqno += rc;
102 data_offset += rc;
103 memmove(rb->data, rb->data + data_offset, rb->len);
105 if (rb->len + data_offset > SMALL_OOO_PACKET_MAXSIZE &&
106 rb->len <= SMALL_OOO_PACKET_MAXSIZE) {
107 src_in_l->source.in.small_ooo_packets++;
108 BUG_ON(src_in_l->source.in.small_ooo_packets == 0);
111 return 1;
112 } else {
113 free:
114 src_in_l->source.in.reorder_memused -= (rb->len +
115 sizeof(struct cor_rcvooo_buf));
116 list_del(&(r->lh));
117 kfree(rb->data);
118 if (rb->len <= SMALL_OOO_PACKET_MAXSIZE) {
119 BUG_ON(src_in_l->source.in.small_ooo_packets == 0);
120 src_in_l->source.in.small_ooo_packets--;
122 kmem_cache_free(cor_rcvooo_buf_slab, rb);
124 return 0;
128 static int cor_drain_ooo_queue_skb(struct cor_conn *src_in_l,
129 struct cor_rcvooo *r, __u8 flush)
131 struct cor_skb_procstate *ps = container_of(r, struct cor_skb_procstate,
132 funcstate.rcv_ooo.r);
133 struct sk_buff *skb = cor_skb_from_pstate(ps);
135 __u8 rcv_as_buf = 0;
137 if (unlikely(cor_seqno_after(src_in_l->source.in.next_seqno,
138 r->seqno))) {
139 __u64 overlap = cor_seqno_clean(r->seqno -
140 src_in_l->source.in.next_seqno);
142 if (overlap >= skb->len) {
143 src_in_l->source.in.reorder_memused -=
144 ps->funcstate.rcv_ooo.skb_memused;
145 list_del(&(r->lh));
146 kfree_skb(skb);
147 return 0;
150 skb->data += overlap;
151 skb->len -= overlap;
152 r->seqno += overlap;
154 rcv_as_buf = 1;
157 BUG_ON(cor_seqno_eq(src_in_l->source.in.next_seqno, r->seqno) == 0);
158 BUG_ON(skb->len <= 0);
160 if (unlikely(rcv_as_buf != 0)) {
161 __u32 rc = cor_receive_buf(src_in_l, skb->data, skb->len,
162 r->windowused, flush);
164 BUG_ON(rc > skb->len);
166 src_in_l->source.in.next_seqno += rc;
168 if (unlikely(rc != skb->len)) {
169 skb->data += rc;
170 skb->len -= rc;
171 r->seqno += rc;
173 return 1;
174 } else {
175 src_in_l->source.in.reorder_memused -=
176 ps->funcstate.rcv_ooo.skb_memused;
177 list_del(&(r->lh));
178 kfree_skb(skb);
180 return 0;
182 } else {
183 __u32 len = skb->len;
184 __u32 rc;
185 __u32 memused = ps->funcstate.rcv_ooo.skb_memused;
187 list_del(&(r->lh));
188 rc = cor_receive_skb(src_in_l, skb, 0, flush);
190 BUG_ON(rc > len);
192 src_in_l->source.in.next_seqno += rc;
194 if (unlikely(rc != len)) {
195 BUG_ON(rc > skb->len);
196 skb->data += rc;
197 skb->len -= rc;
198 r->seqno += rc;
199 list_add(&(r->lh),
200 &(src_in_l->source.in.reorder_queue));
201 return 1;
204 src_in_l->source.in.reorder_memused -= memused;
206 return 0;
210 void cor_drain_ooo_queue(struct cor_conn *src_in_l)
212 int drained = 0;
214 BUG_ON(src_in_l->sourcetype != SOURCE_IN);
216 while (list_empty(&(src_in_l->source.in.reorder_queue)) == 0) {
217 struct cor_rcvooo *r = container_of(
218 src_in_l->source.in.reorder_queue.next,
219 struct cor_rcvooo, lh);
220 __u8 flush = r->flush;
221 int rc;
223 if (cor_seqno_before(src_in_l->source.in.next_seqno, r->seqno))
224 break;
226 /* do not flush if there are more ooo packets in queue */
227 if (src_in_l->source.in.reorder_queue.prev !=
228 src_in_l->source.in.reorder_queue.next)
229 flush = 0;
231 if (r->type == RCVOOO_BUF)
232 rc = cor_drain_ooo_queue_buf(src_in_l, r, flush);
233 else if (r->type == RCVOOO_SKB)
234 rc = cor_drain_ooo_queue_skb(src_in_l, r, flush);
235 else
236 BUG();
238 if (unlikely(rc != 0)) {
239 break;
242 drained = 1;
245 BUG_ON(list_empty(&(src_in_l->source.in.reorder_queue)) != 0 &&
246 src_in_l->source.in.reorder_memused != 0);
247 BUG_ON(list_empty(&(src_in_l->source.in.reorder_queue)) == 0 &&
248 src_in_l->source.in.reorder_memused == 0);
250 if (drained)
251 cor_account_bufspace(src_in_l);
254 static __u32 cor_rcvooo_len(struct cor_rcvooo *r)
256 if (r->type == RCVOOO_BUF) {
257 struct cor_rcvooo_buf *rb = container_of(r,
258 struct cor_rcvooo_buf, r);
259 return rb->len;
260 } else if (r->type == RCVOOO_SKB) {
261 struct sk_buff *skb = cor_skb_from_pstate(container_of(r,
262 struct cor_skb_procstate, funcstate.rcv_ooo.r));
263 return skb->len;
264 } else {
265 BUG();
269 static struct cor_rcvooo_buf *_cor_conn_rcv_ooo_buf_checkmerge(
270 struct cor_conn *src_in_l, struct list_head *lh_rcvooo)
272 struct cor_rcvooo *r;
273 struct cor_rcvooo_buf *rb;
275 if (lh_rcvooo == &(src_in_l->source.in.reorder_queue))
276 return 0;
278 r = container_of(lh_rcvooo, struct cor_rcvooo, lh);
279 if (r->type != RCVOOO_BUF)
280 return 0;
282 rb = container_of(r, struct cor_rcvooo_buf, r);
283 if (rb->len > 256)
284 return 0;
286 return rb;
289 static int _cor_conn_rcv_ooo_accountmem(struct cor_conn *src_in_l,
290 __u32 new_bytes)
292 if (new_bytes == 0)
293 return 0;
295 if (unlikely(src_in_l->source.in.reorder_memused + new_bytes <
296 src_in_l->source.in.reorder_memused))
297 return 1;
299 src_in_l->source.in.reorder_memused += new_bytes;
301 if (unlikely(cor_account_bufspace(src_in_l))) {
302 src_in_l->source.in.reorder_memused -= new_bytes;
303 cor_account_bufspace(src_in_l);
304 return 1;
307 return 0;
310 static void _cor_conn_rcv_ooo_merge(struct cor_conn *src_in_l, char *data,
311 __u32 len, __u64 seqno, __u8 windowused, __u8 flush,
312 struct cor_rcvooo_buf *merge_prev,
313 struct cor_rcvooo_buf *merge_next)
315 char *tmpbuf;
316 __u32 tmpbuf_len = 0;
317 __u32 tmpbuf_offset = 0;
319 struct cor_rcvooo_buf *rb;
321 if (merge_prev != 0)
322 tmpbuf_len += merge_prev->len;
323 tmpbuf_len += len;
324 if (merge_next != 0)
325 tmpbuf_len += merge_next->len;
327 tmpbuf = kmalloc(tmpbuf_len, GFP_ATOMIC);
328 if (unlikely(tmpbuf == 0))
329 return;
330 if (merge_prev != 0 && merge_next != 0 && len <
331 sizeof(struct cor_rcvooo_buf)) {
332 src_in_l->source.in.reorder_memused += len -
333 sizeof(struct cor_rcvooo_buf);
334 } else {
335 __u32 new_bytes = len;
336 if (merge_prev != 0 && merge_next != 0)
337 new_bytes -= sizeof(struct cor_rcvooo_buf);
339 if (unlikely(_cor_conn_rcv_ooo_accountmem(src_in_l,
340 new_bytes))) {
341 kfree(tmpbuf);
342 return;
347 if (merge_prev != 0) {
348 memcpy(tmpbuf + tmpbuf_offset, merge_prev->data,
349 merge_prev->len);
350 tmpbuf_offset += merge_prev->len;
351 windowused = merge_prev->r.windowused;
353 memcpy(tmpbuf + tmpbuf_offset, data, len);
354 tmpbuf_offset += len;
355 if (merge_next != 0) {
356 memcpy(tmpbuf + tmpbuf_offset, merge_next->data,
357 merge_next->len);
358 tmpbuf_offset += merge_next->len;
361 BUG_ON(tmpbuf_offset != tmpbuf_len);
364 if (merge_prev != 0) {
365 kfree(merge_prev->data);
366 merge_prev->data = 0;
367 if (merge_prev->len <= SMALL_OOO_PACKET_MAXSIZE) {
368 BUG_ON(src_in_l->source.in.small_ooo_packets == 0);
369 src_in_l->source.in.small_ooo_packets--;
375 if (merge_next != 0) {
376 kfree(merge_next->data);
377 merge_next->data = 0;
378 if (merge_next->len <= SMALL_OOO_PACKET_MAXSIZE) {
379 BUG_ON(src_in_l->source.in.small_ooo_packets == 0);
380 src_in_l->source.in.small_ooo_packets--;
383 flush = merge_next->r.flush;
385 if (merge_prev != 0) {
386 list_del(&(merge_next->r.lh));
387 kmem_cache_free(cor_rcvooo_buf_slab, merge_next);
388 merge_next = 0;
392 if (merge_prev != 0) {
393 rb = merge_prev;
394 } else {
395 BUG_ON(merge_next == 0);
396 rb = merge_next;
397 rb->r.seqno = seqno;
400 rb->data = tmpbuf;
401 rb->len = tmpbuf_len;
402 rb->r.windowused = windowused;
403 rb->r.flush = flush;
405 if (tmpbuf_len <= SMALL_OOO_PACKET_MAXSIZE) {
406 src_in_l->source.in.small_ooo_packets++;
407 BUG_ON(src_in_l->source.in.small_ooo_packets == 0);
410 cor_send_ack_conn_ifneeded(src_in_l, seqno, len);
413 static void _cor_conn_rcv_ooo_nomerge(struct cor_conn *src_in_l, char *data,
414 __u32 len, __u64 seqno, __u8 windowused, __u8 flush,
415 struct list_head *next_rcvooo)
417 struct cor_rcvooo_buf *rb;
419 /* avoid oom if a neighbor sends very small packets */
420 if (len <= SMALL_OOO_PACKET_MAXSIZE &&
421 src_in_l->source.in.small_ooo_packets >=
422 MAX_SMALL_OOO_PACKETS_PER_CONN)
423 return;
425 if (unlikely(_cor_conn_rcv_ooo_accountmem(src_in_l,
426 len + sizeof(struct cor_rcvooo_buf))))
427 return;
429 rb = kmem_cache_alloc(cor_rcvooo_buf_slab, GFP_ATOMIC);
430 if (unlikely(rb == 0)) {
431 src_in_l->source.in.reorder_memused -=
432 (len + sizeof(struct cor_rcvooo_buf));
433 cor_account_bufspace(src_in_l);
434 return;
436 memset(rb, 0, sizeof(struct cor_rcvooo_buf));
438 rb->data = kmalloc(len, GFP_ATOMIC);
439 if (unlikely(rb->data == 0)) {
440 kmem_cache_free(cor_rcvooo_buf_slab, rb);
442 src_in_l->source.in.reorder_memused -=
443 (len + sizeof(struct cor_rcvooo_buf));
444 cor_account_bufspace(src_in_l);
445 return;
448 memcpy(rb->data, data, len);
449 rb->len = len;
450 rb->r.type = RCVOOO_BUF;
451 rb->r.seqno = seqno;
452 rb->r.windowused = windowused;
453 if (flush)
454 rb->r.flush = 1;
455 else
456 rb->r.flush = 0;
457 list_add_tail(&(rb->r.lh), next_rcvooo);
459 if (len <= SMALL_OOO_PACKET_MAXSIZE) {
460 src_in_l->source.in.small_ooo_packets++;
461 BUG_ON(src_in_l->source.in.small_ooo_packets == 0);
464 cor_send_ack_conn_ifneeded(src_in_l, seqno, len);
467 static void _cor_conn_rcv_ooo_buf(struct cor_conn *src_in_l, char *data,
468 __u32 len, __u64 seqno, __u8 windowused, __u8 flush,
469 struct list_head *next_rcvooo)
471 struct cor_rcvooo_buf *merge_prev;
472 struct cor_rcvooo_buf *merge_next;
474 if (len > 128)
475 goto nomerge;
477 merge_prev = _cor_conn_rcv_ooo_buf_checkmerge(src_in_l,
478 next_rcvooo->prev);
479 if (merge_prev != 0) {
480 __u64 next_seqno = merge_prev->r.seqno + merge_prev->len;
481 BUG_ON(cor_seqno_after(next_seqno, seqno));
482 if (cor_seqno_eq(next_seqno, seqno) == 0)
483 merge_prev = 0;
486 merge_next = _cor_conn_rcv_ooo_buf_checkmerge(src_in_l, next_rcvooo);
487 if (merge_next != 0) {
488 __u64 next_seqno = seqno + len;
489 BUG_ON(cor_seqno_after(next_seqno, merge_next->r.seqno));
490 if (cor_seqno_eq(next_seqno, merge_next->r.seqno) == 0)
491 merge_next = 0;
494 if (merge_prev == 0 && merge_next == 0) {
495 nomerge:
496 _cor_conn_rcv_ooo_nomerge(src_in_l, data, len, seqno,
497 windowused, flush, next_rcvooo);
498 } else {
499 _cor_conn_rcv_ooo_merge(src_in_l, data, len, seqno,
500 windowused, flush, merge_prev, merge_next);
504 static void _cor_conn_rcv_ooo_skb(struct cor_conn *src_in_l,
505 struct sk_buff *skb, __u64 seqno, __u8 windowused, __u8 flush,
506 struct list_head *next_rcvooo)
508 struct cor_rcvooo *newr;
509 struct cor_skb_procstate *ps = cor_skb_pstate(skb);
511 memset(&(ps->funcstate), 0, sizeof(ps->funcstate));
512 ps->funcstate.rcv_ooo.skb_memused = sizeof(struct sk_buff) +
513 skb->len;
515 if (unlikely(_cor_conn_rcv_ooo_accountmem(src_in_l,
516 ps->funcstate.rcv_ooo.skb_memused))) {
517 kfree_skb(skb);
518 return;
521 newr = &(ps->funcstate.rcv_ooo.r);
522 newr->type = RCVOOO_SKB;
523 newr->seqno = seqno;
524 newr->windowused = windowused;
525 newr->flush = flush;
526 list_add_tail(&(newr->lh), next_rcvooo);
528 cor_send_ack_conn_ifneeded(src_in_l, seqno, skb->len);
531 static void __cor_conn_rcv_ooo(struct cor_conn *src_in_l, struct sk_buff *skb,
532 char *data, __u32 len, __u64 seqno, __u8 windowused, __u8 flush,
533 struct list_head *prev_rcvooo_lh)
535 struct list_head *reorder_queue = &(src_in_l->source.in.reorder_queue);
536 struct list_head *next_rcvooo_lh = prev_rcvooo_lh->next;
538 if (prev_rcvooo_lh != reorder_queue) {
539 struct cor_rcvooo *prev_rcvooo = container_of(prev_rcvooo_lh,
540 struct cor_rcvooo, lh);
541 __u32 currlen = cor_rcvooo_len(prev_rcvooo);
543 if (cor_seqno_after(prev_rcvooo->seqno + currlen, seqno)) {
544 __u64 overlap = cor_seqno_clean(prev_rcvooo->seqno +
545 currlen - seqno);
547 if (unlikely(len <= overlap))
548 goto drop;
550 data += overlap;
551 len -= overlap;
552 seqno += overlap;
556 if (next_rcvooo_lh != reorder_queue) {
557 struct cor_rcvooo *next_rcvooo = container_of(next_rcvooo_lh,
558 struct cor_rcvooo, lh);
560 if (unlikely(cor_seqno_before_eq(next_rcvooo->seqno, seqno)))
561 goto drop;
563 if (unlikely(cor_seqno_before(next_rcvooo->seqno, seqno + len)))
564 len = cor_seqno_clean(next_rcvooo->seqno - seqno);
567 if (unlikely(len == 0)) {
568 drop:
569 if (skb != 0)
570 kfree_skb(skb);
571 return;
574 if (skb == 0 || len < 1024 ||
575 skb->data != ((unsigned char *) data) ||
576 skb->len != len) {
577 _cor_conn_rcv_ooo_buf(src_in_l, data, len, seqno, windowused,
578 flush, next_rcvooo_lh);
580 if (skb != 0)
581 kfree_skb(skb);
582 } else {
583 skb->data = data;
584 skb->len = len;
586 _cor_conn_rcv_ooo_skb(src_in_l, skb, seqno, windowused, flush,
587 next_rcvooo_lh);
591 static void _cor_conn_rcv_ooo(struct cor_conn *src_in_l, struct sk_buff *skb,
592 char *data, __u32 len, __u64 seqno, __u8 windowused, __u8 flush)
594 struct list_head *reorder_queue = &(src_in_l->source.in.reorder_queue);
595 struct list_head *currlh = reorder_queue->prev;
597 BUG_ON(skb != 0 && skb->data != ((unsigned char *)data));
598 BUG_ON(skb != 0 && skb->len != len);
600 while (currlh != reorder_queue) {
601 struct cor_rcvooo *currr = container_of(currlh,
602 struct cor_rcvooo, lh);
604 if (cor_seqno_before_eq(currr->seqno, seqno))
605 break;
607 currlh = currlh->prev;
610 __cor_conn_rcv_ooo(src_in_l, skb, data, len, seqno, windowused, flush,
611 currlh);
614 static void _cor_conn_rcv(struct cor_neighbor *nb, struct cor_conn *src_in,
615 __u32 conn_id, struct sk_buff *skb, char *data, __u32 len,
616 __u64 seqno, __u8 windowused, __u8 flush)
618 BUG_ON(nb == 0);
620 spin_lock_bh(&(src_in->rcv_lock));
622 if (cor_is_conn_in(src_in, nb, conn_id) == 0)
623 goto drop;
625 cor_set_last_act(src_in);
627 if (unlikely(cor_seqno_before(seqno + len,
628 src_in->source.in.next_seqno)))
629 goto drop_ack;
630 if (unlikely(unlikely(cor_seqno_after(seqno + len,
631 src_in->source.in.window_seqnolimit)) &&
632 cor_seqno_after(seqno + len,
633 src_in->source.in.window_seqnolimit_remote)))
634 goto drop;
636 if (cor_seqno_after(seqno, src_in->source.in.next_seqno)) {
637 _cor_conn_rcv_ooo(src_in, skb, data, len, seqno, windowused,
638 flush);
639 } else {
640 __u32 rcvlen;
642 if (cor_seqno_after(src_in->source.in.next_seqno, seqno)) {
643 __u64 overlap = cor_seqno_clean(
644 src_in->source.in.next_seqno - seqno);
646 BUG_ON(overlap > len);
648 data += overlap;
649 len -= overlap;
650 seqno += overlap;
652 rcvlen = cor_receive_buf(src_in, data, len, windowused,
653 flush);
654 if (skb != 0)
655 kfree_skb(skb);
656 } else if (skb != 0) {
657 __u32 skblen = skb->len;
658 rcvlen = cor_receive_skb(src_in, skb, windowused,
659 flush);
660 if (unlikely(rcvlen < skblen))
661 kfree_skb(skb);
662 } else {
663 rcvlen = cor_receive_buf(src_in, data, len, windowused,
664 flush);
667 if (likely(rcvlen > 0)) {
668 src_in->source.in.next_seqno += rcvlen;
670 cor_drain_ooo_queue(src_in);
671 src_in->source.in.inorder_ack_needed = 1;
672 cor_flush_buf(src_in);
673 cor_send_ack_conn_ifneeded(src_in, 0, 0);
677 if (0) {
678 drop_ack:
679 cor_send_ack_conn_ifneeded(src_in, 0, 0);
680 drop:
681 if (skb != 0) {
682 kfree_skb(skb);
685 spin_unlock_bh(&(src_in->rcv_lock));
688 void cor_conn_rcv(struct cor_neighbor *nb, struct sk_buff *skb, char *data,
689 __u32 len, __u32 conn_id, __u64 seqno, __u8 windowused,
690 __u8 flush)
692 struct cor_conn *src_in;
694 BUG_ON(nb == 0);
696 if (skb != 0) {
697 BUG_ON(data != 0);
698 BUG_ON(len != 0);
700 data = skb->data;
701 len = skb->len;
704 src_in = cor_get_conn(nb, conn_id);
706 if (unlikely(src_in == 0)) {
707 /* printk(KERN_DEBUG "unknown conn_id when receiving: %d",
708 conn_id); */
710 if (skb != 0)
711 kfree_skb(skb);
712 cor_send_reset_conn(nb, cor_get_connid_reverse(conn_id), 0);
713 return;
716 /* for testing */
717 /* len = 1;
718 if (skb != 0)
719 skb->len = 1; */
721 _cor_conn_rcv(nb, src_in, conn_id, skb, data, len, seqno, windowused,
722 flush);
723 cor_conn_kref_put(src_in, "stack");
726 int __init cor_rcv_init(void)
728 BUG_ON(sizeof(struct cor_skb_procstate) > 48);
730 cor_rcvooo_buf_slab = kmem_cache_create("cor_rcvooo_buf",
731 sizeof(struct cor_rcvooo_buf), 8, 0, 0);
732 if (unlikely(cor_rcvooo_buf_slab == 0))
733 return -ENOMEM;
735 return 0;
738 void __exit cor_rcv_exit2(void)
740 kmem_cache_destroy(cor_rcvooo_buf_slab);
741 cor_rcvooo_buf_slab = 0;
744 MODULE_LICENSE("GPL");