move some stuff from common.c to neighbor.c
[cor.git] / net / cor / rcv.c
blobdaf1dc1bcf0fa871246f300413533ee5cf4b8f7f
1 /**
2 * Connection oriented routing
3 * Copyright (C) 2007-2021 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
21 #include <linux/version.h>
22 #include <linux/kernel.h>
23 #include <linux/init.h>
24 #include <linux/in.h>
27 #include "cor.h"
29 static struct kmem_cache *cor_rcvooo_buf_slab;
31 void cor_reset_ooo_queue(struct cor_conn *src_in_lx)
33 BUG_ON(src_in_lx->sourcetype != SOURCE_IN);
35 while (list_empty(&(src_in_lx->source.in.reorder_queue)) == 0) {
36 struct cor_rcvooo *r = container_of(
37 src_in_lx->source.in.reorder_queue.next,
38 struct cor_rcvooo, lh);
40 list_del(&(r->lh));
42 if (r->type == RCVOOO_BUF) {
43 struct cor_rcvooo_buf *rb = container_of(r,
44 struct cor_rcvooo_buf, r);
45 src_in_lx->source.in.reorder_memused -= (rb->len +
46 sizeof(struct cor_rcvooo_buf));
47 kfree(rb->data);
48 kmem_cache_free(cor_rcvooo_buf_slab, rb);
49 } else if (r->type == RCVOOO_SKB) {
50 struct cor_skb_procstate *ps = container_of(r,
51 struct cor_skb_procstate,
52 funcstate.rcv_ooo.r);
53 struct sk_buff *skb = cor_skb_from_pstate(ps);
54 src_in_lx->source.in.reorder_memused -=
55 ps->funcstate.rcv_ooo.skb_memused;
56 kfree_skb(skb);
57 } else {
58 BUG();
62 src_in_lx->source.in.small_ooo_packets = 0;
63 BUG_ON(src_in_lx->source.in.reorder_memused != 0);
65 cor_account_bufspace(src_in_lx);
68 static int cor_drain_ooo_queue_buf(struct cor_conn *src_in_l,
69 struct cor_rcvooo *r, __u8 flush)
71 struct cor_rcvooo_buf *rb = container_of(r, struct cor_rcvooo_buf, r);
73 __u32 data_offset = 0;
74 __u32 rc;
76 if (unlikely(cor_seqno_after(src_in_l->source.in.next_seqno,
77 r->seqno))) {
78 __u64 overlap = cor_seqno_clean(r->seqno -
79 src_in_l->source.in.next_seqno);
81 if (overlap >= rb->len)
82 goto free;
84 src_in_l->source.in.reorder_memused -= overlap;
85 rb->len -= overlap;
86 data_offset += overlap;
87 r->seqno += overlap;
90 BUG_ON(cor_seqno_eq(src_in_l->source.in.next_seqno, r->seqno) == 0);
91 rc = cor_receive_buf(src_in_l, rb->data + data_offset, rb->len, 0,
92 flush);
94 BUG_ON(rc > rb->len);
95 src_in_l->source.in.next_seqno += rc;
97 if (unlikely(rc != rb->len)) {
98 src_in_l->source.in.reorder_memused -= rc;
99 rb->len -= rc;
100 r->seqno += rc;
102 data_offset += rc;
103 memmove(rb->data, rb->data + data_offset, rb->len);
105 if (rb->len + data_offset > SMALL_OOO_PACKET_MAXSIZE &&
106 rb->len <= SMALL_OOO_PACKET_MAXSIZE) {
107 src_in_l->source.in.small_ooo_packets++;
108 BUG_ON(src_in_l->source.in.small_ooo_packets == 0);
111 return 1;
112 } else {
113 free:
114 src_in_l->source.in.reorder_memused -= (rb->len +
115 sizeof(struct cor_rcvooo_buf));
116 list_del(&(r->lh));
117 kfree(rb->data);
118 if (rb->len <= SMALL_OOO_PACKET_MAXSIZE) {
119 BUG_ON(src_in_l->source.in.small_ooo_packets == 0);
120 src_in_l->source.in.small_ooo_packets--;
122 kmem_cache_free(cor_rcvooo_buf_slab, rb);
124 return 0;
128 static int cor_drain_ooo_queue_skb(struct cor_conn *src_in_l,
129 struct cor_rcvooo *r, __u8 flush)
131 struct cor_skb_procstate *ps = container_of(r, struct cor_skb_procstate,
132 funcstate.rcv_ooo.r);
133 struct sk_buff *skb = cor_skb_from_pstate(ps);
135 __u8 rcv_as_buf = 0;
137 if (unlikely(cor_seqno_after(src_in_l->source.in.next_seqno,
138 r->seqno))) {
139 __u64 overlap = cor_seqno_clean(r->seqno -
140 src_in_l->source.in.next_seqno);
142 if (overlap >= skb->len) {
143 src_in_l->source.in.reorder_memused -=
144 ps->funcstate.rcv_ooo.skb_memused;
145 list_del(&(r->lh));
146 kfree_skb(skb);
147 return 0;
150 skb->data += overlap;
151 skb->len -= overlap;
152 r->seqno += overlap;
154 rcv_as_buf = 1;
157 BUG_ON(cor_seqno_eq(src_in_l->source.in.next_seqno, r->seqno) == 0);
158 BUG_ON(skb->len <= 0);
160 if (unlikely(rcv_as_buf != 0)) {
161 __u32 rc = cor_receive_buf(src_in_l, skb->data, skb->len, 0,
162 flush);
164 BUG_ON(rc > skb->len);
166 src_in_l->source.in.next_seqno += rc;
168 if (unlikely(rc != skb->len)) {
169 skb->data += rc;
170 skb->len -= rc;
171 r->seqno += rc;
173 return 1;
174 } else {
175 src_in_l->source.in.reorder_memused -=
176 ps->funcstate.rcv_ooo.skb_memused;
177 list_del(&(r->lh));
178 kfree_skb(skb);
180 return 0;
182 } else {
183 __u32 len = skb->len;
184 __u32 rc;
185 __u32 memused = ps->funcstate.rcv_ooo.skb_memused;
187 list_del(&(r->lh));
189 rc = cor_receive_skb(src_in_l, skb, 0, flush);
191 BUG_ON(rc > len);
193 src_in_l->source.in.next_seqno += rc;
195 if (unlikely(rc != len)) {
196 BUG_ON(rc > skb->len);
197 skb->data += rc;
198 skb->len -= rc;
199 r->seqno += rc;
200 list_add(&(r->lh),
201 &(src_in_l->source.in.reorder_queue));
202 return 1;
205 src_in_l->source.in.reorder_memused -= memused;
207 return 0;
211 void cor_drain_ooo_queue(struct cor_conn *src_in_l)
213 int drained = 0;
215 BUG_ON(src_in_l->sourcetype != SOURCE_IN);
217 while (list_empty(&(src_in_l->source.in.reorder_queue)) == 0) {
218 struct cor_rcvooo *r = container_of(
219 src_in_l->source.in.reorder_queue.next,
220 struct cor_rcvooo, lh);
221 __u8 flush = r->flush;
222 int rc;
224 if (cor_seqno_before(src_in_l->source.in.next_seqno, r->seqno))
225 break;
227 /* do not flush if there are more ooo packets in queue */
228 if (src_in_l->source.in.reorder_queue.prev !=
229 src_in_l->source.in.reorder_queue.next)
230 flush = 0;
232 if (r->type == RCVOOO_BUF)
233 rc = cor_drain_ooo_queue_buf(src_in_l, r, flush);
234 else if (r->type == RCVOOO_SKB)
235 rc = cor_drain_ooo_queue_skb(src_in_l, r, flush);
236 else
237 BUG();
239 if (unlikely(rc != 0)) {
240 break;
243 drained = 1;
246 BUG_ON(list_empty(&(src_in_l->source.in.reorder_queue)) != 0 &&
247 src_in_l->source.in.reorder_memused != 0);
248 BUG_ON(list_empty(&(src_in_l->source.in.reorder_queue)) == 0 &&
249 src_in_l->source.in.reorder_memused == 0);
251 if (drained)
252 cor_account_bufspace(src_in_l);
255 static __u32 cor_rcvooo_len(struct cor_rcvooo *r)
257 if (r->type == RCVOOO_BUF) {
258 struct cor_rcvooo_buf *rb = container_of(r,
259 struct cor_rcvooo_buf, r);
260 return rb->len;
261 } else if (r->type == RCVOOO_SKB) {
262 struct sk_buff *skb = cor_skb_from_pstate(container_of(r,
263 struct cor_skb_procstate, funcstate.rcv_ooo.r));
264 return skb->len;
265 } else {
266 BUG();
270 static struct cor_rcvooo_buf *_cor_conn_rcv_ooo_buf_checkmerge(
271 struct cor_conn *src_in_l, struct list_head *lh_rcvooo)
273 struct cor_rcvooo *r;
274 struct cor_rcvooo_buf *rb;
276 if (lh_rcvooo == &(src_in_l->source.in.reorder_queue))
277 return 0;
279 r = container_of(lh_rcvooo, struct cor_rcvooo, lh);
280 if (r->type != RCVOOO_BUF)
281 return 0;
283 rb = container_of(r, struct cor_rcvooo_buf, r);
284 if (rb->len > 256)
285 return 0;
287 return rb;
290 static int _cor_conn_rcv_ooo_accountmem(struct cor_conn *src_in_l,
291 __u32 new_bytes)
293 if (new_bytes == 0)
294 return 0;
296 if (unlikely(src_in_l->source.in.reorder_memused + new_bytes <
297 src_in_l->source.in.reorder_memused))
298 return 1;
300 src_in_l->source.in.reorder_memused += new_bytes;
302 if (unlikely(cor_account_bufspace(src_in_l))) {
303 src_in_l->source.in.reorder_memused -= new_bytes;
304 cor_account_bufspace(src_in_l);
305 return 1;
308 return 0;
311 static void _cor_conn_rcv_ooo_merge(struct cor_conn *src_in_l, char *data,
312 __u32 len, __u64 seqno, __u8 flush,
313 struct cor_rcvooo_buf *merge_prev,
314 struct cor_rcvooo_buf *merge_next)
316 char *tmpbuf;
317 __u32 tmpbuf_len = 0;
318 __u32 tmpbuf_offset = 0;
320 struct cor_rcvooo_buf *rb;
322 if (merge_prev != 0)
323 tmpbuf_len += merge_prev->len;
324 tmpbuf_len += len;
325 if (merge_next != 0)
326 tmpbuf_len += merge_next->len;
328 tmpbuf = kmalloc(tmpbuf_len, GFP_ATOMIC);
329 if (unlikely(tmpbuf == 0))
330 return;
331 if (merge_prev != 0 && merge_next != 0 && len <
332 sizeof(struct cor_rcvooo_buf)) {
333 src_in_l->source.in.reorder_memused += len -
334 sizeof(struct cor_rcvooo_buf);
335 } else {
336 __u32 new_bytes = len;
337 if (merge_prev != 0 && merge_next != 0)
338 new_bytes -= sizeof(struct cor_rcvooo_buf);
340 if (unlikely(_cor_conn_rcv_ooo_accountmem(src_in_l,
341 new_bytes))) {
342 kfree(tmpbuf);
343 return;
348 if (merge_prev != 0) {
349 memcpy(tmpbuf + tmpbuf_offset, merge_prev->data,
350 merge_prev->len);
351 tmpbuf_offset += merge_prev->len;
353 memcpy(tmpbuf + tmpbuf_offset, data, len);
354 tmpbuf_offset += len;
355 if (merge_next != 0) {
356 memcpy(tmpbuf + tmpbuf_offset, merge_next->data,
357 merge_next->len);
358 tmpbuf_offset += merge_next->len;
361 BUG_ON(tmpbuf_offset != tmpbuf_len);
364 if (merge_prev != 0) {
365 kfree(merge_prev->data);
366 merge_prev->data = 0;
367 if (merge_prev->len <= SMALL_OOO_PACKET_MAXSIZE) {
368 BUG_ON(src_in_l->source.in.small_ooo_packets == 0);
369 src_in_l->source.in.small_ooo_packets--;
375 if (merge_next != 0) {
376 kfree(merge_next->data);
377 merge_next->data = 0;
378 if (merge_next->len <= SMALL_OOO_PACKET_MAXSIZE) {
379 BUG_ON(src_in_l->source.in.small_ooo_packets == 0);
380 src_in_l->source.in.small_ooo_packets--;
383 flush = merge_next->r.flush;
385 if (merge_prev != 0) {
386 list_del(&(merge_next->r.lh));
387 kmem_cache_free(cor_rcvooo_buf_slab, merge_next);
388 merge_next = 0;
392 if (merge_prev != 0) {
393 rb = merge_prev;
394 } else {
395 BUG_ON(merge_next == 0);
396 rb = merge_next;
397 rb->r.seqno = seqno;
400 rb->data = tmpbuf;
401 rb->len = tmpbuf_len;
402 rb->r.flush = flush;
404 if (tmpbuf_len <= SMALL_OOO_PACKET_MAXSIZE) {
405 src_in_l->source.in.small_ooo_packets++;
406 BUG_ON(src_in_l->source.in.small_ooo_packets == 0);
409 cor_send_ack_conn_ifneeded(src_in_l, seqno, len);
412 static void _cor_conn_rcv_ooo_nomerge(struct cor_conn *src_in_l, char *data,
413 __u32 len, __u64 seqno, __u8 flush,
414 struct list_head *next_rcvooo)
416 struct cor_rcvooo_buf *rb;
418 /* avoid oom if a neighbor sends very small packets */
419 if (len <= SMALL_OOO_PACKET_MAXSIZE &&
420 src_in_l->source.in.small_ooo_packets >=
421 MAX_SMALL_OOO_PACKETS_PER_CONN)
422 return;
424 if (unlikely(_cor_conn_rcv_ooo_accountmem(src_in_l,
425 len + sizeof(struct cor_rcvooo_buf))))
426 return;
428 rb = kmem_cache_alloc(cor_rcvooo_buf_slab, GFP_ATOMIC);
429 if (unlikely(rb == 0)) {
430 src_in_l->source.in.reorder_memused -=
431 (len + sizeof(struct cor_rcvooo_buf));
432 cor_account_bufspace(src_in_l);
433 return;
435 memset(rb, 0, sizeof(struct cor_rcvooo_buf));
437 rb->data = kmalloc(len, GFP_ATOMIC);
438 if (unlikely(rb->data == 0)) {
439 kmem_cache_free(cor_rcvooo_buf_slab, rb);
441 src_in_l->source.in.reorder_memused -=
442 (len + sizeof(struct cor_rcvooo_buf));
443 cor_account_bufspace(src_in_l);
444 return;
447 memcpy(rb->data, data, len);
448 rb->len = len;
449 rb->r.type = RCVOOO_BUF;
450 rb->r.seqno = seqno;
451 if (flush)
452 rb->r.flush = 1;
453 else
454 rb->r.flush = 0;
455 list_add_tail(&(rb->r.lh), next_rcvooo);
457 if (len <= SMALL_OOO_PACKET_MAXSIZE) {
458 src_in_l->source.in.small_ooo_packets++;
459 BUG_ON(src_in_l->source.in.small_ooo_packets == 0);
462 cor_send_ack_conn_ifneeded(src_in_l, seqno, len);
465 static void _cor_conn_rcv_ooo_buf(struct cor_conn *src_in_l, char *data,
466 __u32 len, __u64 seqno, __u8 flush,
467 struct list_head *next_rcvooo)
469 struct cor_rcvooo_buf *merge_prev;
470 struct cor_rcvooo_buf *merge_next;
472 if (len > 128)
473 goto nomerge;
475 merge_prev = _cor_conn_rcv_ooo_buf_checkmerge(src_in_l,
476 next_rcvooo->prev);
477 if (merge_prev != 0) {
478 __u64 next_seqno = merge_prev->r.seqno + merge_prev->len;
479 BUG_ON(cor_seqno_after(next_seqno, seqno));
480 if (cor_seqno_eq(next_seqno, seqno) == 0)
481 merge_prev = 0;
484 merge_next = _cor_conn_rcv_ooo_buf_checkmerge(src_in_l, next_rcvooo);
485 if (merge_next != 0) {
486 __u64 next_seqno = seqno + len;
487 BUG_ON(cor_seqno_after(next_seqno, merge_next->r.seqno));
488 if (cor_seqno_eq(next_seqno, merge_next->r.seqno) == 0)
489 merge_next = 0;
492 if (merge_prev == 0 && merge_next == 0) {
493 nomerge:
494 _cor_conn_rcv_ooo_nomerge(src_in_l, data, len, seqno, flush,
495 next_rcvooo);
496 } else {
497 _cor_conn_rcv_ooo_merge(src_in_l, data, len, seqno, flush,
498 merge_prev, merge_next);
502 static void _cor_conn_rcv_ooo_skb(struct cor_conn *src_in_l,
503 struct sk_buff *skb, __u64 seqno, __u8 flush,
504 struct list_head *next_rcvooo)
506 struct cor_rcvooo *newr;
507 struct cor_skb_procstate *ps = cor_skb_pstate(skb);
509 memset(&(ps->funcstate), 0, sizeof(ps->funcstate));
510 ps->funcstate.rcv_ooo.skb_memused = sizeof(struct sk_buff) +
511 skb->len;
513 if (unlikely(_cor_conn_rcv_ooo_accountmem(src_in_l,
514 ps->funcstate.rcv_ooo.skb_memused))) {
515 kfree_skb(skb);
516 return;
519 newr = &(ps->funcstate.rcv_ooo.r);
520 newr->type = RCVOOO_SKB;
522 newr->seqno = seqno;
523 newr->flush = flush;
524 list_add_tail(&(newr->lh), next_rcvooo);
526 cor_send_ack_conn_ifneeded(src_in_l, seqno, skb->len);
529 static void __cor_conn_rcv_ooo(struct cor_conn *src_in_l, struct sk_buff *skb,
530 char *data, __u32 len, __u64 seqno, __u8 flush,
531 struct list_head *prev_rcvooo_lh)
533 struct list_head *reorder_queue = &(src_in_l->source.in.reorder_queue);
534 struct list_head *next_rcvooo_lh = prev_rcvooo_lh->next;
536 if (prev_rcvooo_lh != reorder_queue) {
537 struct cor_rcvooo *prev_rcvooo = container_of(prev_rcvooo_lh,
538 struct cor_rcvooo, lh);
539 __u32 currlen = cor_rcvooo_len(prev_rcvooo);
541 if (cor_seqno_after(prev_rcvooo->seqno + currlen, seqno)) {
542 __u64 overlap = cor_seqno_clean(prev_rcvooo->seqno +
543 currlen - seqno);
545 if (unlikely(len <= overlap))
546 goto drop;
548 data += overlap;
549 len -= overlap;
550 seqno += overlap;
554 if (next_rcvooo_lh != reorder_queue) {
555 struct cor_rcvooo *next_rcvooo = container_of(next_rcvooo_lh,
556 struct cor_rcvooo, lh);
558 if (unlikely(cor_seqno_before_eq(next_rcvooo->seqno, seqno)))
559 goto drop;
561 if (unlikely(cor_seqno_before(next_rcvooo->seqno, seqno + len)))
562 len = cor_seqno_clean(next_rcvooo->seqno - seqno);
565 if (unlikely(len == 0)) {
566 drop:
567 if (skb != 0)
568 kfree_skb(skb);
569 return;
572 if (skb == 0 || len < 1024 ||
573 skb->data != ((unsigned char *) data) ||
574 skb->len != len) {
575 _cor_conn_rcv_ooo_buf(src_in_l, data, len, seqno, flush,
576 next_rcvooo_lh);
578 if (skb != 0)
579 kfree_skb(skb);
580 } else {
581 skb->data = data;
582 skb->len = len;
584 _cor_conn_rcv_ooo_skb(src_in_l, skb, seqno, flush,
585 next_rcvooo_lh);
589 static void _cor_conn_rcv_ooo(struct cor_conn *src_in_l, struct sk_buff *skb,
590 char *data, __u32 len, __u64 seqno, __u8 flush)
592 struct list_head *reorder_queue = &(src_in_l->source.in.reorder_queue);
593 struct list_head *currlh = reorder_queue->prev;
595 BUG_ON(skb != 0 && skb->data != ((unsigned char *)data));
596 BUG_ON(skb != 0 && skb->len != len);
598 while (currlh != reorder_queue) {
599 struct cor_rcvooo *currr = container_of(currlh,
600 struct cor_rcvooo, lh);
602 if (cor_seqno_before_eq(currr->seqno, seqno))
603 break;
605 currlh = currlh->prev;
608 __cor_conn_rcv_ooo(src_in_l, skb, data, len, seqno, flush, currlh);
611 static void _cor_conn_rcv(struct cor_neighbor *nb, struct cor_conn *src_in,
612 __u32 conn_id, struct sk_buff *skb, char *data, __u32 len,
613 __u64 seqno, int rcv_delayed_lowbuf, __u8 flush)
615 spin_lock_bh(&(src_in->rcv_lock));
617 if (unlikely(unlikely(src_in->isreset != 0) ||
618 unlikely(src_in->sourcetype != SOURCE_IN) ||
619 unlikely(src_in->source.in.conn_id != conn_id)))
620 goto drop;
622 if (nb == 0) {
623 BUG_ON(skb == 0);
624 if (unlikely(cor_is_from_nb(skb, src_in->source.in.nb) == 0))
625 goto drop;
626 } else {
627 if (unlikely(src_in->source.in.nb != nb))
628 goto drop;
631 cor_set_last_act(src_in);
633 if (unlikely(cor_seqno_before(seqno + len,
634 src_in->source.in.next_seqno)))
635 goto drop_ack;
636 if (unlikely(unlikely(cor_seqno_after(seqno + len,
637 src_in->source.in.window_seqnolimit)) &&
638 cor_seqno_after(seqno + len,
639 src_in->source.in.window_seqnolimit_remote)))
640 goto drop;
642 if (cor_seqno_after(seqno, src_in->source.in.next_seqno)) {
643 _cor_conn_rcv_ooo(src_in, skb, data, len, seqno, flush);
644 } else {
645 __u32 rcvlen;
647 if (cor_seqno_after(src_in->source.in.next_seqno, seqno)) {
648 __u64 overlap = cor_seqno_clean(
649 src_in->source.in.next_seqno - seqno);
651 BUG_ON(overlap > len);
653 data += overlap;
654 len -= overlap;
655 seqno += overlap;
657 rcvlen = cor_receive_buf(src_in, data, len,
658 rcv_delayed_lowbuf, flush);
659 if (skb != 0)
660 kfree_skb(skb);
661 } else if (skb != 0) {
662 __u32 skblen = skb->len;
663 rcvlen = cor_receive_skb(src_in, skb,
664 rcv_delayed_lowbuf, flush);
665 if (unlikely(rcvlen < skblen))
666 kfree_skb(skb);
667 } else {
668 rcvlen = cor_receive_buf(src_in, data, len,
669 rcv_delayed_lowbuf, flush);
672 if (likely(rcvlen > 0)) {
673 src_in->source.in.next_seqno += rcvlen;
675 cor_drain_ooo_queue(src_in);
676 src_in->source.in.inorder_ack_needed = 1;
677 cor_flush_buf(src_in);
678 cor_send_ack_conn_ifneeded(src_in, 0, 0);
682 if (0) {
683 drop_ack:
684 cor_send_ack_conn_ifneeded(src_in, 0, 0);
685 drop:
686 if (skb != 0) {
687 kfree_skb(skb);
690 spin_unlock_bh(&(src_in->rcv_lock));
693 void cor_conn_rcv(struct cor_neighbor *nb, struct sk_buff *skb, char *data,
694 __u32 len, __u32 conn_id, __u64 seqno, int rcv_delayed_lowbuf,
695 __u8 flush)
697 struct cor_conn *src_in;
699 BUG_ON(nb == 0);
701 if (skb != 0) {
702 BUG_ON(data != 0);
703 BUG_ON(len != 0);
705 data = skb->data;
706 len = skb->len;
709 src_in = cor_get_conn(nb, conn_id);
711 if (unlikely(src_in == 0)) {
712 /* printk(KERN_DEBUG "unknown conn_id when receiving: %d",
713 conn_id); */
715 if (skb != 0)
716 kfree_skb(skb);
717 cor_send_reset_conn(nb, conn_id ^ (conn_id & (1 << 31)), 0);
718 return;
721 /* for testing */
722 /* len = 1;
723 if (skb != 0)
724 skb->len = 1; */
726 _cor_conn_rcv(nb, src_in, conn_id, skb, data, len, seqno,
727 rcv_delayed_lowbuf, flush);
728 kref_put(&(src_in->ref), cor_free_conn);
731 int __init cor_rcv_init(void)
733 BUG_ON(sizeof(struct cor_skb_procstate) > 48);
735 cor_rcvooo_buf_slab = kmem_cache_create("cor_rcvooo_buf",
736 sizeof(struct cor_rcvooo_buf), 8, 0, 0);
737 if (unlikely(cor_rcvooo_buf_slab == 0))
738 return -ENOMEM;
740 return 0;
743 void __exit cor_rcv_exit2(void)
745 kmem_cache_destroy(cor_rcvooo_buf_slab);
746 cor_rcvooo_buf_slab = 0;
749 MODULE_LICENSE("GPL");