2 * Connection oriented routing
3 * Copyright (C) 2007-2012 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 #include <linux/version.h>
22 #include <linux/kernel.h>
23 #include <linux/init.h>
29 static struct kmem_cache
*rcvooo_buf_slab
;
31 __u8 pack_registered
= 0;
33 void reset_ooo_queue(struct cor_conn
*src_in_lx
)
35 BUG_ON(src_in_lx
->sourcetype
!= SOURCE_IN
);
37 while (list_empty(&(src_in_lx
->source
.in
.reorder_queue
)) == 0) {
38 struct cor_rcvooo
*r
= container_of(
39 src_in_lx
->source
.in
.reorder_queue
.next
,
40 struct cor_rcvooo
, lh
);
44 if (r
->type
== RCVOOO_BUF
) {
45 struct cor_rcvooo_buf
*rb
= container_of(r
,
46 struct cor_rcvooo_buf
, r
);
47 src_in_lx
->source
.in
.reorder_memused
-= (rb
->len
+
48 sizeof(struct cor_rcvooo_buf
));
50 kmem_cache_free(rcvooo_buf_slab
, rb
);
51 } else if (r
->type
== RCVOOO_SKB
) {
52 struct cor_skb_procstate
*ps
= container_of(r
,
53 struct cor_skb_procstate
,
55 struct sk_buff
*skb
= skb_from_pstate(ps
);
56 src_in_lx
->source
.in
.reorder_memused
-=
57 ps
->funcstate
.rcv_ooo
.skb_memused
;
64 src_in_lx
->source
.in
.small_ooo_packets
= 0;
65 BUG_ON(src_in_lx
->source
.in
.reorder_memused
!= 0);
67 account_bufspace(src_in_lx
);
70 static int drain_ooo_queue_buf(struct cor_conn
*src_in_l
, struct cor_rcvooo
*r
,
73 struct cor_rcvooo_buf
*rb
= container_of(r
, struct cor_rcvooo_buf
, r
);
75 __u32 data_offset
= 0;
78 if (unlikely(seqno_after(src_in_l
->source
.in
.next_seqno
, r
->seqno
))) {
79 __u64 overlap
= seqno_clean(r
->seqno
-
80 src_in_l
->source
.in
.next_seqno
);
82 if (overlap
>= rb
->len
)
85 src_in_l
->source
.in
.reorder_memused
-= overlap
;
87 data_offset
+= overlap
;
91 BUG_ON(seqno_eq(src_in_l
->source
.in
.next_seqno
, r
->seqno
) == 0);
92 rc
= receive_buf(src_in_l
, rb
->data
+ data_offset
, rb
->len
, 0, flush
);
95 src_in_l
->source
.in
.next_seqno
+= rc
;
97 if (unlikely(rc
!= rb
->len
)) {
98 src_in_l
->source
.in
.reorder_memused
-= rc
;
103 memmove(rb
->data
, rb
->data
+ data_offset
, rb
->len
);
105 if (rb
->len
+ data_offset
> SMALL_OOO_PACKET_MAXSIZE
&&
106 rb
->len
<= SMALL_OOO_PACKET_MAXSIZE
) {
107 src_in_l
->source
.in
.small_ooo_packets
++;
108 BUG_ON(src_in_l
->source
.in
.small_ooo_packets
== 0);
114 src_in_l
->source
.in
.reorder_memused
-= (rb
->len
+
115 sizeof(struct cor_rcvooo_buf
));
118 if (rb
->len
<= SMALL_OOO_PACKET_MAXSIZE
) {
119 BUG_ON(src_in_l
->source
.in
.small_ooo_packets
== 0);
120 src_in_l
->source
.in
.small_ooo_packets
--;
122 kmem_cache_free(rcvooo_buf_slab
, rb
);
128 static int drain_ooo_queue_skb(struct cor_conn
*src_in_l
, struct cor_rcvooo
*r
,
131 struct cor_skb_procstate
*ps
= container_of(r
, struct cor_skb_procstate
,
132 funcstate
.rcv_ooo
.r
);
133 struct sk_buff
*skb
= skb_from_pstate(ps
);
137 if (unlikely(seqno_after(src_in_l
->source
.in
.next_seqno
, r
->seqno
))) {
138 __u64 overlap
= seqno_clean(r
->seqno
-
139 src_in_l
->source
.in
.next_seqno
);
141 if (overlap
>= skb
->len
) {
142 src_in_l
->source
.in
.reorder_memused
-=
143 ps
->funcstate
.rcv_ooo
.skb_memused
;
149 skb
->data
+= overlap
;
156 BUG_ON(seqno_eq(src_in_l
->source
.in
.next_seqno
, r
->seqno
) == 0);
157 BUG_ON(skb
->len
<= 0);
159 if (unlikely(rcv_as_buf
!= 0)) {
160 __u32 rc
= receive_buf(src_in_l
, skb
->data
, skb
->len
, 0, flush
);
162 BUG_ON(rc
> skb
->len
);
164 src_in_l
->source
.in
.next_seqno
+= rc
;
166 if (unlikely(rc
!= skb
->len
)) {
173 src_in_l
->source
.in
.reorder_memused
-=
174 ps
->funcstate
.rcv_ooo
.skb_memused
;
181 __u32 len
= skb
->len
;
183 __u32 memused
= ps
->funcstate
.rcv_ooo
.skb_memused
;
187 rc
= receive_skb(src_in_l
, skb
, 0, flush
);
191 src_in_l
->source
.in
.next_seqno
+= rc
;
193 if (unlikely(rc
!= len
)) {
194 BUG_ON(rc
> skb
->len
);
199 &(src_in_l
->source
.in
.reorder_queue
));
203 src_in_l
->source
.in
.reorder_memused
-= memused
;
209 void drain_ooo_queue(struct cor_conn
*src_in_l
)
213 BUG_ON(src_in_l
->sourcetype
!= SOURCE_IN
);
215 while (list_empty(&(src_in_l
->source
.in
.reorder_queue
)) == 0) {
216 struct cor_rcvooo
*r
= container_of(
217 src_in_l
->source
.in
.reorder_queue
.next
,
218 struct cor_rcvooo
, lh
);
219 __u8 flush
= r
->flush
;
222 if (seqno_before(src_in_l
->source
.in
.next_seqno
, r
->seqno
))
225 /* do not flush if there are more ooo packets in queue */
226 if (src_in_l
->source
.in
.reorder_queue
.prev
!=
227 src_in_l
->source
.in
.reorder_queue
.next
)
230 if (r
->type
== RCVOOO_BUF
)
231 rc
= drain_ooo_queue_buf(src_in_l
, r
, flush
);
232 else if (r
->type
== RCVOOO_SKB
)
233 rc
= drain_ooo_queue_skb(src_in_l
, r
, flush
);
237 if (unlikely(rc
!= 0)) {
244 BUG_ON(list_empty(&(src_in_l
->source
.in
.reorder_queue
)) != 0 &&
245 src_in_l
->source
.in
.reorder_memused
!= 0);
246 BUG_ON(list_empty(&(src_in_l
->source
.in
.reorder_queue
)) == 0 &&
247 src_in_l
->source
.in
.reorder_memused
== 0);
250 account_bufspace(src_in_l
);
253 static __u32
rcvooo_len(struct cor_rcvooo
*r
)
255 if (r
->type
== RCVOOO_BUF
) {
256 struct cor_rcvooo_buf
*rb
= container_of(r
,
257 struct cor_rcvooo_buf
, r
);
259 } else if (r
->type
== RCVOOO_SKB
) {
260 struct sk_buff
*skb
= skb_from_pstate(container_of(r
,
261 struct cor_skb_procstate
, funcstate
.rcv_ooo
.r
));
268 static struct cor_rcvooo_buf
*_conn_rcv_ooo_buf_checkmerge(
269 struct cor_conn
*src_in_l
, struct list_head
*lh_rcvooo
)
271 struct cor_rcvooo
*r
;
272 struct cor_rcvooo_buf
*rb
;
274 if (lh_rcvooo
== &(src_in_l
->source
.in
.reorder_queue
))
277 r
= container_of(lh_rcvooo
, struct cor_rcvooo
, lh
);
278 if (r
->type
!= RCVOOO_BUF
)
281 rb
= container_of(r
, struct cor_rcvooo_buf
, r
);
288 static int _conn_rcv_ooo_accountmem(struct cor_conn
*src_in_l
, __u32 new_bytes
)
293 if (unlikely(src_in_l
->source
.in
.reorder_memused
+ new_bytes
<
294 src_in_l
->source
.in
.reorder_memused
))
297 src_in_l
->source
.in
.reorder_memused
+= new_bytes
;
299 if (unlikely(account_bufspace(src_in_l
))) {
300 src_in_l
->source
.in
.reorder_memused
-= new_bytes
;
301 account_bufspace(src_in_l
);
308 static void _conn_rcv_ooo_merge(struct cor_conn
*src_in_l
, char *data
,
309 __u32 len
, __u64 seqno
, __u8 flush
,
310 struct cor_rcvooo_buf
*merge_prev
,
311 struct cor_rcvooo_buf
*merge_next
)
314 __u32 tmpbuf_len
= 0;
315 __u32 tmpbuf_offset
= 0;
317 struct cor_rcvooo_buf
*rb
;
320 tmpbuf_len
+= merge_prev
->len
;
323 tmpbuf_len
+= merge_next
->len
;
325 tmpbuf
= kmalloc(tmpbuf_len
, GFP_ATOMIC
);
326 if (unlikely(tmpbuf
== 0))
328 if (merge_prev
!= 0 && merge_next
!= 0 && len
<
329 sizeof(struct cor_rcvooo_buf
)) {
330 src_in_l
->source
.in
.reorder_memused
+= len
-
331 sizeof(struct cor_rcvooo_buf
);
333 __u32 new_bytes
= len
;
334 if (merge_prev
!= 0 && merge_next
!= 0)
335 new_bytes
-= sizeof(struct cor_rcvooo_buf
);
337 if (unlikely(_conn_rcv_ooo_accountmem(src_in_l
, new_bytes
))) {
344 if (merge_prev
!= 0) {
345 memcpy(tmpbuf
+ tmpbuf_offset
, merge_prev
->data
,
347 tmpbuf_offset
+= merge_prev
->len
;
349 memcpy(tmpbuf
+ tmpbuf_offset
, data
, len
);
350 tmpbuf_offset
+= len
;
351 if (merge_next
!= 0) {
352 memcpy(tmpbuf
+ tmpbuf_offset
, merge_next
->data
,
354 tmpbuf_offset
+= merge_next
->len
;
357 BUG_ON(tmpbuf_offset
!= tmpbuf_len
);
360 if (merge_prev
!= 0) {
361 kfree(merge_prev
->data
);
362 merge_prev
->data
= 0;
363 if (merge_prev
->len
<= SMALL_OOO_PACKET_MAXSIZE
) {
364 BUG_ON(src_in_l
->source
.in
.small_ooo_packets
== 0);
365 src_in_l
->source
.in
.small_ooo_packets
--;
371 if (merge_next
!= 0) {
372 kfree(merge_next
->data
);
373 merge_next
->data
= 0;
374 if (merge_next
->len
<= SMALL_OOO_PACKET_MAXSIZE
) {
375 BUG_ON(src_in_l
->source
.in
.small_ooo_packets
== 0);
376 src_in_l
->source
.in
.small_ooo_packets
--;
379 flush
= merge_next
->r
.flush
;
381 if (merge_prev
!= 0) {
382 list_del(&(merge_next
->r
.lh
));
383 kmem_cache_free(rcvooo_buf_slab
, merge_next
);
388 if (merge_prev
!= 0) {
391 BUG_ON(merge_next
== 0);
397 rb
->len
= tmpbuf_len
;
400 if (tmpbuf_len
<= SMALL_OOO_PACKET_MAXSIZE
) {
401 src_in_l
->source
.in
.small_ooo_packets
++;
402 BUG_ON(src_in_l
->source
.in
.small_ooo_packets
== 0);
405 send_ack_conn_ifneeded(src_in_l
, seqno
, len
);
408 static void _conn_rcv_ooo_nomerge(struct cor_conn
*src_in_l
, char *data
,
409 __u32 len
, __u64 seqno
, __u8 flush
,
410 struct list_head
*next_rcvooo
)
412 struct cor_rcvooo_buf
*rb
;
414 /* avoid oom if a neighbor sends very small packets */
415 if (len
<= SMALL_OOO_PACKET_MAXSIZE
&&
416 src_in_l
->source
.in
.small_ooo_packets
>=
417 MAX_SMALL_OOO_PACKETS_PER_CONN
)
420 if (unlikely(_conn_rcv_ooo_accountmem(src_in_l
,
421 len
+ sizeof(struct cor_rcvooo_buf
))))
424 rb
= kmem_cache_alloc(rcvooo_buf_slab
, GFP_ATOMIC
);
425 if (unlikely(rb
== 0)) {
426 src_in_l
->source
.in
.reorder_memused
-=
427 (len
+ sizeof(struct cor_rcvooo_buf
));
428 account_bufspace(src_in_l
);
431 memset(rb
, 0, sizeof(struct cor_rcvooo_buf
));
433 rb
->data
= kmalloc(len
, GFP_ATOMIC
);
434 if (unlikely(rb
->data
== 0)) {
435 kmem_cache_free(rcvooo_buf_slab
, rb
);
437 src_in_l
->source
.in
.reorder_memused
-=
438 (len
+ sizeof(struct cor_rcvooo_buf
));
439 account_bufspace(src_in_l
);
443 memcpy(rb
->data
, data
, len
);
445 rb
->r
.type
= RCVOOO_BUF
;
451 list_add_tail(&(rb
->r
.lh
), next_rcvooo
);
453 if (len
<= SMALL_OOO_PACKET_MAXSIZE
) {
454 src_in_l
->source
.in
.small_ooo_packets
++;
455 BUG_ON(src_in_l
->source
.in
.small_ooo_packets
== 0);
458 send_ack_conn_ifneeded(src_in_l
, seqno
, len
);
461 static void _conn_rcv_ooo_buf(struct cor_conn
*src_in_l
, char *data
, __u32 len
,
462 __u64 seqno
, __u8 flush
, struct list_head
*next_rcvooo
)
464 struct cor_rcvooo_buf
*merge_prev
;
465 struct cor_rcvooo_buf
*merge_next
;
470 merge_prev
= _conn_rcv_ooo_buf_checkmerge(src_in_l
, next_rcvooo
->prev
);
471 if (merge_prev
!= 0) {
472 __u64 next_seqno
= merge_prev
->r
.seqno
+ merge_prev
->len
;
473 BUG_ON(seqno_after(next_seqno
, seqno
));
474 if (seqno_eq(next_seqno
, seqno
) == 0)
478 merge_next
= _conn_rcv_ooo_buf_checkmerge(src_in_l
, next_rcvooo
);
479 if (merge_next
!= 0) {
480 __u64 next_seqno
= seqno
+ len
;
481 BUG_ON(seqno_after(next_seqno
, merge_next
->r
.seqno
));
482 if (seqno_eq(next_seqno
, merge_next
->r
.seqno
) == 0)
486 if (merge_prev
== 0 && merge_next
== 0) {
488 _conn_rcv_ooo_nomerge(src_in_l
, data
, len
, seqno
, flush
,
491 _conn_rcv_ooo_merge(src_in_l
, data
, len
, seqno
, flush
,
492 merge_prev
, merge_next
);
496 static void _conn_rcv_ooo_skb(struct cor_conn
*src_in_l
, struct sk_buff
*skb
,
497 __u64 seqno
, __u8 flush
, struct list_head
*next_rcvooo
)
499 struct cor_rcvooo
*newr
;
500 struct cor_skb_procstate
*ps
= skb_pstate(skb
);
502 memset(&(ps
->funcstate
), 0, sizeof(ps
->funcstate
));
503 ps
->funcstate
.rcv_ooo
.skb_memused
= sizeof(struct sk_buff
) +
506 if (unlikely(_conn_rcv_ooo_accountmem(src_in_l
,
507 ps
->funcstate
.rcv_ooo
.skb_memused
))) {
512 newr
= &(ps
->funcstate
.rcv_ooo
.r
);
513 newr
->type
= RCVOOO_SKB
;
517 list_add_tail(&(newr
->lh
), next_rcvooo
);
519 send_ack_conn_ifneeded(src_in_l
, seqno
, skb
->len
);
522 static void __conn_rcv_ooo(struct cor_conn
*src_in_l
, struct sk_buff
*skb
,
523 char *data
, __u32 len
, __u64 seqno
, __u8 flush
,
524 struct list_head
*prev_rcvooo_lh
)
526 struct list_head
*reorder_queue
= &(src_in_l
->source
.in
.reorder_queue
);
527 struct list_head
*next_rcvooo_lh
= prev_rcvooo_lh
->next
;
529 if (prev_rcvooo_lh
!= reorder_queue
) {
530 struct cor_rcvooo
*prev_rcvooo
= container_of(prev_rcvooo_lh
,
531 struct cor_rcvooo
, lh
);
532 __u32 currlen
= rcvooo_len(prev_rcvooo
);
534 if (seqno_after(prev_rcvooo
->seqno
+ currlen
, seqno
)) {
535 __u64 overlap
= seqno_clean(prev_rcvooo
->seqno
+
538 if (unlikely(len
<= overlap
))
547 if (next_rcvooo_lh
!= reorder_queue
) {
548 struct cor_rcvooo
*next_rcvooo
= container_of(next_rcvooo_lh
,
549 struct cor_rcvooo
, lh
);
551 if (unlikely(seqno_before_eq(next_rcvooo
->seqno
, seqno
)))
554 if (unlikely(seqno_before(next_rcvooo
->seqno
, seqno
+ len
)))
555 len
= seqno_clean(next_rcvooo
->seqno
- seqno
);
558 if (unlikely(len
== 0)) {
565 if (skb
== 0 || len
< 1024 ||
566 skb
->data
!= ((unsigned char *) data
) ||
568 _conn_rcv_ooo_buf(src_in_l
, data
, len
, seqno
, flush
,
577 _conn_rcv_ooo_skb(src_in_l
, skb
, seqno
, flush
, next_rcvooo_lh
);
581 static void _conn_rcv_ooo(struct cor_conn
*src_in_l
, struct sk_buff
*skb
,
582 char *data
, __u32 len
, __u64 seqno
, __u8 flush
)
584 struct list_head
*reorder_queue
= &(src_in_l
->source
.in
.reorder_queue
);
585 struct list_head
*currlh
= reorder_queue
->prev
;
587 BUG_ON(skb
!= 0 && skb
->data
!= ((unsigned char *)data
));
588 BUG_ON(skb
!= 0 && skb
->len
!= len
);
590 while (currlh
!= reorder_queue
) {
591 struct cor_rcvooo
*currr
= container_of(currlh
,
592 struct cor_rcvooo
, lh
);
594 if (seqno_before_eq(currr
->seqno
, seqno
))
597 currlh
= currlh
->prev
;
600 __conn_rcv_ooo(src_in_l
, skb
, data
, len
, seqno
, flush
, currlh
);
603 static void _conn_rcv(struct cor_neighbor
*nb
, struct cor_conn
*src_in
,
604 __u32 conn_id
, struct sk_buff
*skb
, char *data
, __u32 len
,
605 __u64 seqno
, int rcv_delayed_lowbuf
, __u8 flush
)
607 spin_lock_bh(&(src_in
->rcv_lock
));
609 if (unlikely(unlikely(src_in
->isreset
!= 0) ||
610 unlikely(src_in
->sourcetype
!= SOURCE_IN
) ||
611 unlikely(src_in
->source
.in
.conn_id
!= conn_id
)))
616 if (unlikely(is_from_nb(skb
, src_in
->source
.in
.nb
) == 0))
619 if (unlikely(src_in
->source
.in
.nb
!= nb
))
623 set_last_act(src_in
);
625 if (unlikely(seqno_before(seqno
+ len
, src_in
->source
.in
.next_seqno
)))
627 if (unlikely(unlikely(seqno_after(seqno
+ len
,
628 src_in
->source
.in
.window_seqnolimit
)) &&
629 seqno_after(seqno
+ len
,
630 src_in
->source
.in
.window_seqnolimit_remote
)))
633 if (seqno_after(seqno
, src_in
->source
.in
.next_seqno
)) {
634 _conn_rcv_ooo(src_in
, skb
, data
, len
, seqno
, flush
);
638 if (seqno_after(src_in
->source
.in
.next_seqno
, seqno
)) {
639 __u64 overlap
= seqno_clean(
640 src_in
->source
.in
.next_seqno
- seqno
);
642 BUG_ON(overlap
> len
);
648 rcvlen
= receive_buf(src_in
, data
, len
,
649 rcv_delayed_lowbuf
, flush
);
652 } else if (skb
!= 0) {
653 __u32 skblen
= skb
->len
;
654 rcvlen
= receive_skb(src_in
, skb
, rcv_delayed_lowbuf
,
656 if (unlikely(rcvlen
< skblen
))
659 rcvlen
= receive_buf(src_in
, data
, len
,
660 rcv_delayed_lowbuf
, flush
);
663 if (likely(rcvlen
> 0)) {
664 src_in
->source
.in
.next_seqno
+= rcvlen
;
666 drain_ooo_queue(src_in
);
667 src_in
->source
.in
.inorder_ack_needed
= 1;
669 send_ack_conn_ifneeded(src_in
, 0, 0);
675 send_ack_conn_ifneeded(src_in
, 0, 0);
681 spin_unlock_bh(&(src_in
->rcv_lock
));
684 void conn_rcv(struct cor_neighbor
*nb
, struct sk_buff
*skb
, char *data
,
685 __u32 len
, __u32 conn_id
, __u64 seqno
, int rcv_delayed_lowbuf
,
688 struct cor_conn
*src_in
;
700 src_in
= get_conn(nb
, conn_id
);
702 if (unlikely(src_in
== 0)) {
703 /* printk(KERN_DEBUG "unknown conn_id when receiving: %d",
708 send_reset_conn(nb
, conn_id
^ (conn_id
& (1 << 31)), 0);
717 _conn_rcv(nb
, src_in
, conn_id
, skb
, data
, len
, seqno
,
718 rcv_delayed_lowbuf
, flush
);
719 kref_put(&(src_in
->ref
), free_conn
);
722 static void rcv_conndata(struct sk_buff
*skb
, int rcv_delayed_lowbuf
,
725 struct cor_neighbor
*nb
= get_neigh_by_mac(skb
);
735 if (unlikely(nb
== 0))
738 connid_p
= cor_pull_skb(skb
, 4);
739 if (unlikely(connid_p
== 0))
742 seqno_p
= cor_pull_skb(skb
, 6);
743 if (unlikely(seqno_p
== 0))
746 conn_id
= parse_u32(connid_p
);
747 seqno
= parse_u48(seqno_p
);
749 /* get_random_bytes(&rand, 1);
753 if (unlikely(skb
->len
<= 0))
756 conn_rcv(nb
, skb
, 0, 0, conn_id
, seqno
, rcv_delayed_lowbuf
, flush
);
764 kref_put(&(nb
->ref
), neighbor_free
);
768 static void rcv_cmsg(struct sk_buff
*skb
)
770 struct cor_neighbor
*nb
= get_neigh_by_mac(skb
);
778 if (unlikely(nb
== 0))
781 seqno_p
= cor_pull_skb(skb
, 6);
782 if (unlikely(seqno_p
== 0))
785 seqno
= parse_u48(seqno_p
);
787 /* get_random_bytes(&rand, 1);
792 kernel_packet(nb
, skb
, seqno
);
800 kref_put(&(nb
->ref
), neighbor_free
);
804 static int rcv(struct sk_buff
*skb
, struct net_device
*dev
,
805 struct packet_type
*pt
, struct net_device
*orig_dev
)
810 if (skb
->pkt_type
== PACKET_OTHERHOST
||
811 unlikely(skb
->pkt_type
== PACKET_LOOPBACK
))
814 packet_type_p
= cor_pull_skb(skb
, 1);
816 if (unlikely(packet_type_p
== 0))
819 packet_type
= *packet_type_p
;
821 if (unlikely(packet_type
== PACKET_TYPE_ANNOUNCE
)) {
823 return NET_RX_SUCCESS
;
824 } else if (packet_type
== PACKET_TYPE_CMSG
) {
826 return NET_RX_SUCCESS
;
827 } else if (packet_type
== PACKET_TYPE_CONNDATA
) {
828 rcv_conndata(skb
, 0, 0);
829 return NET_RX_SUCCESS
;
830 } else if (packet_type
== PACKET_TYPE_CONNDATA_LOWBUFDELAYED
) {
831 rcv_conndata(skb
, 1, 0);
832 return NET_RX_SUCCESS
;
833 } else if (packet_type
== PACKET_TYPE_CONNDATA_FLUSH
) {
834 rcv_conndata(skb
, 0, 1);
835 return NET_RX_SUCCESS
;
836 } else if (packet_type
== PACKET_TYPE_CONNDATA_LOWBUFDELAYED_FLUSH
) {
837 rcv_conndata(skb
, 1, 1);
838 return NET_RX_SUCCESS
;
841 return NET_RX_SUCCESS
;
849 static struct packet_type ptype_cor
= {
850 .type
= htons(ETH_P_COR
),
855 void cor_rcv_down(void)
857 if (pack_registered
== 0)
860 dev_remove_pack(&ptype_cor
);
863 void cor_rcv_up(void)
865 if (pack_registered
!= 0)
868 dev_add_pack(&ptype_cor
);
871 int __init
cor_rcv_init(void)
873 BUG_ON(sizeof(struct cor_skb_procstate
) > 48);
875 rcvooo_buf_slab
= kmem_cache_create("cor_rcvooo_buf",
876 sizeof(struct cor_rcvooo_buf
), 8, 0, 0);
877 if (unlikely(rcvooo_buf_slab
== 0))
883 void __exit
cor_rcv_exit2(void)
885 kmem_cache_destroy(rcvooo_buf_slab
);
889 MODULE_LICENSE("GPL");