namespace cleanup
[cor.git] / net / cor / rcv.c
blob2cb0f838ef9a084838c15866151d311134a245d6
1 /**
2 * Connection oriented routing
3 * Copyright (C) 2007-2012 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
21 #include <linux/version.h>
22 #include <linux/kernel.h>
23 #include <linux/init.h>
24 #include <linux/in.h>
27 #include "cor.h"
29 static struct kmem_cache *rcvooo_buf_slab;
31 __u8 pack_registered = 0;
33 void reset_ooo_queue(struct cor_conn *src_in_lx)
35 BUG_ON(src_in_lx->sourcetype != SOURCE_IN);
37 while (list_empty(&(src_in_lx->source.in.reorder_queue)) == 0) {
38 struct cor_rcvooo *r = container_of(
39 src_in_lx->source.in.reorder_queue.next,
40 struct cor_rcvooo, lh);
42 list_del(&(r->lh));
44 if (r->type == RCVOOO_BUF) {
45 struct cor_rcvooo_buf *rb = container_of(r,
46 struct cor_rcvooo_buf, r);
47 src_in_lx->source.in.reorder_memused -= (rb->len +
48 sizeof(struct cor_rcvooo_buf));
49 kfree(rb->data);
50 kmem_cache_free(rcvooo_buf_slab, rb);
51 } else if (r->type == RCVOOO_SKB) {
52 struct cor_skb_procstate *ps = container_of(r,
53 struct cor_skb_procstate,
54 funcstate.rcv_ooo.r);
55 struct sk_buff *skb = skb_from_pstate(ps);
56 src_in_lx->source.in.reorder_memused -=
57 ps->funcstate.rcv_ooo.skb_memused;
58 kfree_skb(skb);
59 } else {
60 BUG();
64 src_in_lx->source.in.small_ooo_packets = 0;
65 BUG_ON(src_in_lx->source.in.reorder_memused != 0);
67 account_bufspace(src_in_lx);
70 static int drain_ooo_queue_buf(struct cor_conn *src_in_l, struct cor_rcvooo *r,
71 __u8 flush)
73 struct cor_rcvooo_buf *rb = container_of(r, struct cor_rcvooo_buf, r);
75 __u32 data_offset = 0;
76 __u32 rc;
78 if (unlikely(seqno_after(src_in_l->source.in.next_seqno, r->seqno))) {
79 __u64 overlap = seqno_clean(r->seqno -
80 src_in_l->source.in.next_seqno);
82 if (overlap >= rb->len)
83 goto free;
85 src_in_l->source.in.reorder_memused -= overlap;
86 rb->len -= overlap;
87 data_offset += overlap;
88 r->seqno += overlap;
91 BUG_ON(seqno_eq(src_in_l->source.in.next_seqno, r->seqno) == 0);
92 rc = receive_buf(src_in_l, rb->data + data_offset, rb->len, 0, flush);
94 BUG_ON(rc > rb->len);
95 src_in_l->source.in.next_seqno += rc;
97 if (unlikely(rc != rb->len)) {
98 src_in_l->source.in.reorder_memused -= rc;
99 rb->len -= rc;
100 r->seqno += rc;
102 data_offset += rc;
103 memmove(rb->data, rb->data + data_offset, rb->len);
105 if (rb->len + data_offset > SMALL_OOO_PACKET_MAXSIZE &&
106 rb->len <= SMALL_OOO_PACKET_MAXSIZE) {
107 src_in_l->source.in.small_ooo_packets++;
108 BUG_ON(src_in_l->source.in.small_ooo_packets == 0);
111 return 1;
112 } else {
113 free:
114 src_in_l->source.in.reorder_memused -= (rb->len +
115 sizeof(struct cor_rcvooo_buf));
116 list_del(&(r->lh));
117 kfree(rb->data);
118 if (rb->len <= SMALL_OOO_PACKET_MAXSIZE) {
119 BUG_ON(src_in_l->source.in.small_ooo_packets == 0);
120 src_in_l->source.in.small_ooo_packets--;
122 kmem_cache_free(rcvooo_buf_slab, rb);
124 return 0;
128 static int drain_ooo_queue_skb(struct cor_conn *src_in_l, struct cor_rcvooo *r,
129 __u8 flush)
131 struct cor_skb_procstate *ps = container_of(r, struct cor_skb_procstate,
132 funcstate.rcv_ooo.r);
133 struct sk_buff *skb = skb_from_pstate(ps);
135 __u8 rcv_as_buf = 0;
137 if (unlikely(seqno_after(src_in_l->source.in.next_seqno, r->seqno))) {
138 __u64 overlap = seqno_clean(r->seqno -
139 src_in_l->source.in.next_seqno);
141 if (overlap >= skb->len) {
142 src_in_l->source.in.reorder_memused -=
143 ps->funcstate.rcv_ooo.skb_memused;
144 list_del(&(r->lh));
145 kfree_skb(skb);
146 return 0;
149 skb->data += overlap;
150 skb->len -= overlap;
151 r->seqno += overlap;
153 rcv_as_buf = 1;
156 BUG_ON(seqno_eq(src_in_l->source.in.next_seqno, r->seqno) == 0);
157 BUG_ON(skb->len <= 0);
159 if (unlikely(rcv_as_buf != 0)) {
160 __u32 rc = receive_buf(src_in_l, skb->data, skb->len, 0, flush);
162 BUG_ON(rc > skb->len);
164 src_in_l->source.in.next_seqno += rc;
166 if (unlikely(rc != skb->len)) {
167 skb->data += rc;
168 skb->len -= rc;
169 r->seqno += rc;
171 return 1;
172 } else {
173 src_in_l->source.in.reorder_memused -=
174 ps->funcstate.rcv_ooo.skb_memused;
175 list_del(&(r->lh));
176 kfree_skb(skb);
178 return 0;
180 } else {
181 __u32 len = skb->len;
182 __u32 rc;
183 __u32 memused = ps->funcstate.rcv_ooo.skb_memused;
185 list_del(&(r->lh));
187 rc = receive_skb(src_in_l, skb, 0, flush);
189 BUG_ON(rc > len);
191 src_in_l->source.in.next_seqno += rc;
193 if (unlikely(rc != len)) {
194 BUG_ON(rc > skb->len);
195 skb->data += rc;
196 skb->len -= rc;
197 r->seqno += rc;
198 list_add(&(r->lh),
199 &(src_in_l->source.in.reorder_queue));
200 return 1;
203 src_in_l->source.in.reorder_memused -= memused;
205 return 0;
209 void drain_ooo_queue(struct cor_conn *src_in_l)
211 int drained = 0;
213 BUG_ON(src_in_l->sourcetype != SOURCE_IN);
215 while (list_empty(&(src_in_l->source.in.reorder_queue)) == 0) {
216 struct cor_rcvooo *r = container_of(
217 src_in_l->source.in.reorder_queue.next,
218 struct cor_rcvooo, lh);
219 __u8 flush = r->flush;
220 int rc;
222 if (seqno_before(src_in_l->source.in.next_seqno, r->seqno))
223 break;
225 /* do not flush if there are more ooo packets in queue */
226 if (src_in_l->source.in.reorder_queue.prev !=
227 src_in_l->source.in.reorder_queue.next)
228 flush = 0;
230 if (r->type == RCVOOO_BUF)
231 rc = drain_ooo_queue_buf(src_in_l, r, flush);
232 else if (r->type == RCVOOO_SKB)
233 rc = drain_ooo_queue_skb(src_in_l, r, flush);
234 else
235 BUG();
237 if (unlikely(rc != 0)) {
238 break;
241 drained = 1;
244 BUG_ON(list_empty(&(src_in_l->source.in.reorder_queue)) != 0 &&
245 src_in_l->source.in.reorder_memused != 0);
246 BUG_ON(list_empty(&(src_in_l->source.in.reorder_queue)) == 0 &&
247 src_in_l->source.in.reorder_memused == 0);
249 if (drained)
250 account_bufspace(src_in_l);
253 static __u32 rcvooo_len(struct cor_rcvooo *r)
255 if (r->type == RCVOOO_BUF) {
256 struct cor_rcvooo_buf *rb = container_of(r,
257 struct cor_rcvooo_buf, r);
258 return rb->len;
259 } else if (r->type == RCVOOO_SKB) {
260 struct sk_buff *skb = skb_from_pstate(container_of(r,
261 struct cor_skb_procstate, funcstate.rcv_ooo.r));
262 return skb->len;
263 } else {
264 BUG();
268 static struct cor_rcvooo_buf *_conn_rcv_ooo_buf_checkmerge(
269 struct cor_conn *src_in_l, struct list_head *lh_rcvooo)
271 struct cor_rcvooo *r;
272 struct cor_rcvooo_buf *rb;
274 if (lh_rcvooo == &(src_in_l->source.in.reorder_queue))
275 return 0;
277 r = container_of(lh_rcvooo, struct cor_rcvooo, lh);
278 if (r->type != RCVOOO_BUF)
279 return 0;
281 rb = container_of(r, struct cor_rcvooo_buf, r);
282 if (rb->len > 256)
283 return 0;
285 return rb;
288 static int _conn_rcv_ooo_accountmem(struct cor_conn *src_in_l, __u32 new_bytes)
290 if (new_bytes == 0)
291 return 0;
293 if (unlikely(src_in_l->source.in.reorder_memused + new_bytes <
294 src_in_l->source.in.reorder_memused))
295 return 1;
297 src_in_l->source.in.reorder_memused += new_bytes;
299 if (unlikely(account_bufspace(src_in_l))) {
300 src_in_l->source.in.reorder_memused -= new_bytes;
301 account_bufspace(src_in_l);
302 return 1;
305 return 0;
308 static void _conn_rcv_ooo_merge(struct cor_conn *src_in_l, char *data,
309 __u32 len, __u64 seqno, __u8 flush,
310 struct cor_rcvooo_buf *merge_prev,
311 struct cor_rcvooo_buf *merge_next)
313 char *tmpbuf;
314 __u32 tmpbuf_len = 0;
315 __u32 tmpbuf_offset = 0;
317 struct cor_rcvooo_buf *rb;
319 if (merge_prev != 0)
320 tmpbuf_len += merge_prev->len;
321 tmpbuf_len += len;
322 if (merge_next != 0)
323 tmpbuf_len += merge_next->len;
325 tmpbuf = kmalloc(tmpbuf_len, GFP_ATOMIC);
326 if (unlikely(tmpbuf == 0))
327 return;
328 if (merge_prev != 0 && merge_next != 0 && len <
329 sizeof(struct cor_rcvooo_buf)) {
330 src_in_l->source.in.reorder_memused += len -
331 sizeof(struct cor_rcvooo_buf);
332 } else {
333 __u32 new_bytes = len;
334 if (merge_prev != 0 && merge_next != 0)
335 new_bytes -= sizeof(struct cor_rcvooo_buf);
337 if (unlikely(_conn_rcv_ooo_accountmem(src_in_l, new_bytes))) {
338 kfree(tmpbuf);
339 return;
344 if (merge_prev != 0) {
345 memcpy(tmpbuf + tmpbuf_offset, merge_prev->data,
346 merge_prev->len);
347 tmpbuf_offset += merge_prev->len;
349 memcpy(tmpbuf + tmpbuf_offset, data, len);
350 tmpbuf_offset += len;
351 if (merge_next != 0) {
352 memcpy(tmpbuf + tmpbuf_offset, merge_next->data,
353 merge_next->len);
354 tmpbuf_offset += merge_next->len;
357 BUG_ON(tmpbuf_offset != tmpbuf_len);
360 if (merge_prev != 0) {
361 kfree(merge_prev->data);
362 merge_prev->data = 0;
363 if (merge_prev->len <= SMALL_OOO_PACKET_MAXSIZE) {
364 BUG_ON(src_in_l->source.in.small_ooo_packets == 0);
365 src_in_l->source.in.small_ooo_packets--;
371 if (merge_next != 0) {
372 kfree(merge_next->data);
373 merge_next->data = 0;
374 if (merge_next->len <= SMALL_OOO_PACKET_MAXSIZE) {
375 BUG_ON(src_in_l->source.in.small_ooo_packets == 0);
376 src_in_l->source.in.small_ooo_packets--;
379 flush = merge_next->r.flush;
381 if (merge_prev != 0) {
382 list_del(&(merge_next->r.lh));
383 kmem_cache_free(rcvooo_buf_slab, merge_next);
384 merge_next = 0;
388 if (merge_prev != 0) {
389 rb = merge_prev;
390 } else {
391 BUG_ON(merge_next == 0);
392 rb = merge_next;
393 rb->r.seqno = seqno;
396 rb->data = tmpbuf;
397 rb->len = tmpbuf_len;
398 rb->r.flush = flush;
400 if (tmpbuf_len <= SMALL_OOO_PACKET_MAXSIZE) {
401 src_in_l->source.in.small_ooo_packets++;
402 BUG_ON(src_in_l->source.in.small_ooo_packets == 0);
405 send_ack_conn_ifneeded(src_in_l, seqno, len);
408 static void _conn_rcv_ooo_nomerge(struct cor_conn *src_in_l, char *data,
409 __u32 len, __u64 seqno, __u8 flush,
410 struct list_head *next_rcvooo)
412 struct cor_rcvooo_buf *rb;
414 /* avoid oom if a neighbor sends very small packets */
415 if (len <= SMALL_OOO_PACKET_MAXSIZE &&
416 src_in_l->source.in.small_ooo_packets >=
417 MAX_SMALL_OOO_PACKETS_PER_CONN)
418 return;
420 if (unlikely(_conn_rcv_ooo_accountmem(src_in_l,
421 len + sizeof(struct cor_rcvooo_buf))))
422 return;
424 rb = kmem_cache_alloc(rcvooo_buf_slab, GFP_ATOMIC);
425 if (unlikely(rb == 0)) {
426 src_in_l->source.in.reorder_memused -=
427 (len + sizeof(struct cor_rcvooo_buf));
428 account_bufspace(src_in_l);
429 return;
431 memset(rb, 0, sizeof(struct cor_rcvooo_buf));
433 rb->data = kmalloc(len, GFP_ATOMIC);
434 if (unlikely(rb->data == 0)) {
435 kmem_cache_free(rcvooo_buf_slab, rb);
437 src_in_l->source.in.reorder_memused -=
438 (len + sizeof(struct cor_rcvooo_buf));
439 account_bufspace(src_in_l);
440 return;
443 memcpy(rb->data, data, len);
444 rb->len = len;
445 rb->r.type = RCVOOO_BUF;
446 rb->r.seqno = seqno;
447 if (flush)
448 rb->r.flush = 1;
449 else
450 rb->r.flush = 0;
451 list_add_tail(&(rb->r.lh), next_rcvooo);
453 if (len <= SMALL_OOO_PACKET_MAXSIZE) {
454 src_in_l->source.in.small_ooo_packets++;
455 BUG_ON(src_in_l->source.in.small_ooo_packets == 0);
458 send_ack_conn_ifneeded(src_in_l, seqno, len);
461 static void _conn_rcv_ooo_buf(struct cor_conn *src_in_l, char *data, __u32 len,
462 __u64 seqno, __u8 flush, struct list_head *next_rcvooo)
464 struct cor_rcvooo_buf *merge_prev;
465 struct cor_rcvooo_buf *merge_next;
467 if (len > 128)
468 goto nomerge;
470 merge_prev = _conn_rcv_ooo_buf_checkmerge(src_in_l, next_rcvooo->prev);
471 if (merge_prev != 0) {
472 __u64 next_seqno = merge_prev->r.seqno + merge_prev->len;
473 BUG_ON(seqno_after(next_seqno, seqno));
474 if (seqno_eq(next_seqno, seqno) == 0)
475 merge_prev = 0;
478 merge_next = _conn_rcv_ooo_buf_checkmerge(src_in_l, next_rcvooo);
479 if (merge_next != 0) {
480 __u64 next_seqno = seqno + len;
481 BUG_ON(seqno_after(next_seqno, merge_next->r.seqno));
482 if (seqno_eq(next_seqno, merge_next->r.seqno) == 0)
483 merge_next = 0;
486 if (merge_prev == 0 && merge_next == 0) {
487 nomerge:
488 _conn_rcv_ooo_nomerge(src_in_l, data, len, seqno, flush,
489 next_rcvooo);
490 } else {
491 _conn_rcv_ooo_merge(src_in_l, data, len, seqno, flush,
492 merge_prev, merge_next);
496 static void _conn_rcv_ooo_skb(struct cor_conn *src_in_l, struct sk_buff *skb,
497 __u64 seqno, __u8 flush, struct list_head *next_rcvooo)
499 struct cor_rcvooo *newr;
500 struct cor_skb_procstate *ps = skb_pstate(skb);
502 memset(&(ps->funcstate), 0, sizeof(ps->funcstate));
503 ps->funcstate.rcv_ooo.skb_memused = sizeof(struct sk_buff) +
504 skb->len;
506 if (unlikely(_conn_rcv_ooo_accountmem(src_in_l,
507 ps->funcstate.rcv_ooo.skb_memused))) {
508 kfree_skb(skb);
509 return;
512 newr = &(ps->funcstate.rcv_ooo.r);
513 newr->type = RCVOOO_SKB;
515 newr->seqno = seqno;
516 newr->flush = flush;
517 list_add_tail(&(newr->lh), next_rcvooo);
519 send_ack_conn_ifneeded(src_in_l, seqno, skb->len);
522 static void __conn_rcv_ooo(struct cor_conn *src_in_l, struct sk_buff *skb,
523 char *data, __u32 len, __u64 seqno, __u8 flush,
524 struct list_head *prev_rcvooo_lh)
526 struct list_head *reorder_queue = &(src_in_l->source.in.reorder_queue);
527 struct list_head *next_rcvooo_lh = prev_rcvooo_lh->next;
529 if (prev_rcvooo_lh != reorder_queue) {
530 struct cor_rcvooo *prev_rcvooo = container_of(prev_rcvooo_lh,
531 struct cor_rcvooo, lh);
532 __u32 currlen = rcvooo_len(prev_rcvooo);
534 if (seqno_after(prev_rcvooo->seqno + currlen, seqno)) {
535 __u64 overlap = seqno_clean(prev_rcvooo->seqno +
536 currlen - seqno);
538 if (unlikely(len <= overlap))
539 goto drop;
541 data += overlap;
542 len -= overlap;
543 seqno += overlap;
547 if (next_rcvooo_lh != reorder_queue) {
548 struct cor_rcvooo *next_rcvooo = container_of(next_rcvooo_lh,
549 struct cor_rcvooo, lh);
551 if (unlikely(seqno_before_eq(next_rcvooo->seqno, seqno)))
552 goto drop;
554 if (unlikely(seqno_before(next_rcvooo->seqno, seqno + len)))
555 len = seqno_clean(next_rcvooo->seqno - seqno);
558 if (unlikely(len == 0)) {
559 drop:
560 if (skb != 0)
561 kfree_skb(skb);
562 return;
565 if (skb == 0 || len < 1024 ||
566 skb->data != ((unsigned char *) data) ||
567 skb->len != len) {
568 _conn_rcv_ooo_buf(src_in_l, data, len, seqno, flush,
569 next_rcvooo_lh);
571 if (skb != 0)
572 kfree_skb(skb);
573 } else {
574 skb->data = data;
575 skb->len = len;
577 _conn_rcv_ooo_skb(src_in_l, skb, seqno, flush, next_rcvooo_lh);
581 static void _conn_rcv_ooo(struct cor_conn *src_in_l, struct sk_buff *skb,
582 char *data, __u32 len, __u64 seqno, __u8 flush)
584 struct list_head *reorder_queue = &(src_in_l->source.in.reorder_queue);
585 struct list_head *currlh = reorder_queue->prev;
587 BUG_ON(skb != 0 && skb->data != ((unsigned char *)data));
588 BUG_ON(skb != 0 && skb->len != len);
590 while (currlh != reorder_queue) {
591 struct cor_rcvooo *currr = container_of(currlh,
592 struct cor_rcvooo, lh);
594 if (seqno_before_eq(currr->seqno, seqno))
595 break;
597 currlh = currlh->prev;
600 __conn_rcv_ooo(src_in_l, skb, data, len, seqno, flush, currlh);
603 static void _conn_rcv(struct cor_neighbor *nb, struct cor_conn *src_in,
604 __u32 conn_id, struct sk_buff *skb, char *data, __u32 len,
605 __u64 seqno, int rcv_delayed_lowbuf, __u8 flush)
607 spin_lock_bh(&(src_in->rcv_lock));
609 if (unlikely(unlikely(src_in->isreset != 0) ||
610 unlikely(src_in->sourcetype != SOURCE_IN) ||
611 unlikely(src_in->source.in.conn_id != conn_id)))
612 goto drop;
614 if (nb == 0) {
615 BUG_ON(skb == 0);
616 if (unlikely(is_from_nb(skb, src_in->source.in.nb) == 0))
617 goto drop;
618 } else {
619 if (unlikely(src_in->source.in.nb != nb))
620 goto drop;
623 set_last_act(src_in);
625 if (unlikely(seqno_before(seqno + len, src_in->source.in.next_seqno)))
626 goto drop_ack;
627 if (unlikely(unlikely(seqno_after(seqno + len,
628 src_in->source.in.window_seqnolimit)) &&
629 seqno_after(seqno + len,
630 src_in->source.in.window_seqnolimit_remote)))
631 goto drop;
633 if (seqno_after(seqno, src_in->source.in.next_seqno)) {
634 _conn_rcv_ooo(src_in, skb, data, len, seqno, flush);
635 } else {
636 __u32 rcvlen;
638 if (seqno_after(src_in->source.in.next_seqno, seqno)) {
639 __u64 overlap = seqno_clean(
640 src_in->source.in.next_seqno - seqno);
642 BUG_ON(overlap > len);
644 data += overlap;
645 len -= overlap;
646 seqno += overlap;
648 rcvlen = receive_buf(src_in, data, len,
649 rcv_delayed_lowbuf, flush);
650 if (skb != 0)
651 kfree_skb(skb);
652 } else if (skb != 0) {
653 __u32 skblen = skb->len;
654 rcvlen = receive_skb(src_in, skb, rcv_delayed_lowbuf,
655 flush);
656 if (unlikely(rcvlen < skblen))
657 kfree_skb(skb);
658 } else {
659 rcvlen = receive_buf(src_in, data, len,
660 rcv_delayed_lowbuf, flush);
663 if (likely(rcvlen > 0)) {
664 src_in->source.in.next_seqno += rcvlen;
666 drain_ooo_queue(src_in);
667 src_in->source.in.inorder_ack_needed = 1;
668 flush_buf(src_in);
669 send_ack_conn_ifneeded(src_in, 0, 0);
673 if (0) {
674 drop_ack:
675 send_ack_conn_ifneeded(src_in, 0, 0);
676 drop:
677 if (skb != 0) {
678 kfree_skb(skb);
681 spin_unlock_bh(&(src_in->rcv_lock));
684 void conn_rcv(struct cor_neighbor *nb, struct sk_buff *skb, char *data,
685 __u32 len, __u32 conn_id, __u64 seqno, int rcv_delayed_lowbuf,
686 __u8 flush)
688 struct cor_conn *src_in;
690 BUG_ON(nb == 0);
692 if (skb != 0) {
693 BUG_ON(data != 0);
694 BUG_ON(len != 0);
696 data = skb->data;
697 len = skb->len;
700 src_in = get_conn(nb, conn_id);
702 if (unlikely(src_in == 0)) {
703 /* printk(KERN_DEBUG "unknown conn_id when receiving: %d",
704 conn_id); */
706 if (skb != 0)
707 kfree_skb(skb);
708 send_reset_conn(nb, conn_id ^ (conn_id & (1 << 31)), 0);
709 return;
712 /* for testing */
713 /* len = 1;
714 if (skb != 0)
715 skb->len = 1; */
717 _conn_rcv(nb, src_in, conn_id, skb, data, len, seqno,
718 rcv_delayed_lowbuf, flush);
719 kref_put(&(src_in->ref), free_conn);
722 static void rcv_conndata(struct sk_buff *skb, int rcv_delayed_lowbuf,
723 __u8 flush)
725 struct cor_neighbor *nb = get_neigh_by_mac(skb);
727 __u32 conn_id;
728 __u64 seqno;
730 char *connid_p;
731 char *seqno_p;
733 /* __u8 rand; */
735 if (unlikely(nb == 0))
736 goto drop;
738 connid_p = cor_pull_skb(skb, 4);
739 if (unlikely(connid_p == 0))
740 goto drop;
742 seqno_p = cor_pull_skb(skb, 6);
743 if (unlikely(seqno_p == 0))
744 goto drop;
746 conn_id = parse_u32(connid_p);
747 seqno = parse_u48(seqno_p);
749 /* get_random_bytes(&rand, 1);
750 if (rand < 64)
751 goto drop; */
753 if (unlikely(skb->len <= 0))
754 goto drop;
756 conn_rcv(nb, skb, 0, 0, conn_id, seqno, rcv_delayed_lowbuf, flush);
758 if (0) {
759 drop:
760 kfree_skb(skb);
763 if (nb != 0) {
764 kref_put(&(nb->ref), neighbor_free);
768 static void rcv_cmsg(struct sk_buff *skb)
770 struct cor_neighbor *nb = get_neigh_by_mac(skb);
772 __u64 seqno;
774 char *seqno_p;
776 /* __u8 rand; */
778 if (unlikely(nb == 0))
779 goto drop;
781 seqno_p = cor_pull_skb(skb, 6);
782 if (unlikely(seqno_p == 0))
783 goto drop;
785 seqno = parse_u48(seqno_p);
787 /* get_random_bytes(&rand, 1);
789 if (rand < 64)
790 goto drop; */
792 kernel_packet(nb, skb, seqno);
794 if (0) {
795 drop:
796 kfree_skb(skb);
799 if (nb != 0) {
800 kref_put(&(nb->ref), neighbor_free);
804 static int rcv(struct sk_buff *skb, struct net_device *dev,
805 struct packet_type *pt, struct net_device *orig_dev)
807 __u8 packet_type;
808 char *packet_type_p;
810 if (skb->pkt_type == PACKET_OTHERHOST ||
811 unlikely(skb->pkt_type == PACKET_LOOPBACK))
812 goto drop;
814 packet_type_p = cor_pull_skb(skb, 1);
816 if (unlikely(packet_type_p == 0))
817 goto drop;
819 packet_type = *packet_type_p;
821 if (unlikely(packet_type == PACKET_TYPE_ANNOUNCE)) {
822 rcv_announce(skb);
823 return NET_RX_SUCCESS;
824 } else if (packet_type == PACKET_TYPE_CMSG) {
825 rcv_cmsg(skb);
826 return NET_RX_SUCCESS;
827 } else if (packet_type == PACKET_TYPE_CONNDATA) {
828 rcv_conndata(skb, 0, 0);
829 return NET_RX_SUCCESS;
830 } else if (packet_type == PACKET_TYPE_CONNDATA_LOWBUFDELAYED) {
831 rcv_conndata(skb, 1, 0);
832 return NET_RX_SUCCESS;
833 } else if (packet_type == PACKET_TYPE_CONNDATA_FLUSH) {
834 rcv_conndata(skb, 0, 1);
835 return NET_RX_SUCCESS;
836 } else if (packet_type == PACKET_TYPE_CONNDATA_LOWBUFDELAYED_FLUSH) {
837 rcv_conndata(skb, 1, 1);
838 return NET_RX_SUCCESS;
839 } else {
840 kfree_skb(skb);
841 return NET_RX_SUCCESS;
844 drop:
845 kfree_skb(skb);
846 return NET_RX_DROP;
849 static struct packet_type ptype_cor = {
850 .type = htons(ETH_P_COR),
851 .dev = 0,
852 .func = rcv
855 void cor_rcv_down(void)
857 if (pack_registered == 0)
858 return;
859 pack_registered = 0;
860 dev_remove_pack(&ptype_cor);
863 void cor_rcv_up(void)
865 if (pack_registered != 0)
866 return;
867 pack_registered = 1;
868 dev_add_pack(&ptype_cor);
871 int __init cor_rcv_init(void)
873 BUG_ON(sizeof(struct cor_skb_procstate) > 48);
875 rcvooo_buf_slab = kmem_cache_create("cor_rcvooo_buf",
876 sizeof(struct cor_rcvooo_buf), 8, 0, 0);
877 if (unlikely(rcvooo_buf_slab == 0))
878 return -ENOMEM;
880 return 0;
883 void __exit cor_rcv_exit2(void)
885 kmem_cache_destroy(rcvooo_buf_slab);
886 rcvooo_buf_slab = 0;
889 MODULE_LICENSE("GPL");