kpacket_gen: use constants for cmdlength
[cor.git] / net / cor / kpacket_gen.c
blob5dedfa0e37344de1c5ae311d707dae1f7e9d15a4
1 /**
2 * Connection oriented routing
3 * Copyright (C) 2007-2020 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
21 #include <asm/byteorder.h>
23 #include "cor.h"
25 /* not sent over the network - internal meaning only */
26 #define MSGTYPE_PONG 1
27 #define MSGTYPE_ACK 2
28 #define MSGTYPE_ACK_CONN 3
29 #define MSGTYPE_CONNECT 4
30 #define MSGTYPE_CONNECT_SUCCESS 5
31 #define MSGTYPE_RESET_CONN 6
32 #define MSGTYPE_CONNDATA 7
33 #define MSGTYPE_SET_MAX_CMSG_DELAY 8
35 #define MSGTYPE_PONG_TIMEENQUEUED 1
36 #define MSGTYPE_PONG_RESPDELAY 2
38 struct control_msg_out{
39 __u8 type;
40 __u32 length;
41 struct kref ref;
42 struct neighbor *nb;
44 /* either queue or control_retrans_packet */
45 struct list_head lh;
47 unsigned long time_added;
49 union{
50 struct{
51 __u32 cookie;
52 __u8 type;
54 ktime_t time_enqueued;
55 }pong;
57 struct{
58 __u64 seqno;
59 }ack;
61 struct{
62 struct conn *src_in;
63 struct list_head conn_acks;
64 __u32 conn_id;
65 __u64 seqno;
66 __u64 seqno_ooo;
67 __u32 length;
69 __u8 priority_seqno;
70 __u8 priority;
72 __u8 flags;
74 __u32 ack_seqno;
75 }ack_conn;
77 struct{
78 __u32 conn_id;
79 __u64 seqno1;
80 __u64 seqno2;
81 struct conn *src_in;
82 }connect;
84 struct{
85 __u32 conn_id;
86 struct conn *src_in;
87 }connect_success;
89 struct{
90 struct rb_node rbn;
91 __u8 in_pending_conn_resets;
92 __u32 conn_id;
93 }reset_conn;
95 struct{
96 __u32 conn_id;
97 __u64 seqno;
98 __u32 datalen;
99 __u8 snd_delayed_lowbuf;
100 __u8 flush;
101 __u8 highlatency;
102 char *data_orig;
103 char *data;
104 struct conn_retrans *cr;
105 }conn_data;
107 struct{
108 __u32 ack_delay;
109 __u32 ackconn_delay;
110 __u32 other_delay;
111 }set_max_cmsg_delay;
112 }msg;
115 struct control_retrans {
116 struct kref ref;
118 struct neighbor *nb;
119 __u64 seqno;
121 unsigned long timeout;
123 struct list_head msgs;
125 struct rb_node rbn;
126 struct list_head timeout_list;
129 struct unknownconnid_matchparam {
130 struct neighbor *nb;
131 __u32 conn_id;
135 static struct kmem_cache *controlmsg_slab;
136 static struct kmem_cache *controlretrans_slab;
138 static atomic_t cmsg_othercnt = ATOMIC_INIT(0);
140 #define ADDCMSG_SRC_NEW 1
141 #define ADDCMSG_SRC_SPLITCONNDATA 2
142 #define ADDCMSG_SRC_READD 3
143 #define ADDCMSG_SRC_RETRANS 4
145 static void enqueue_control_msg(struct control_msg_out *msg, int src);
147 static void try_merge_ackconns(struct conn *src_in_l,
148 struct control_msg_out *cm);
150 static void merge_or_enqueue_ackconn(struct conn *src_in_l,
151 struct control_msg_out *cm, int src);
153 static struct control_msg_out *_alloc_control_msg(struct neighbor *nb)
155 struct control_msg_out *cm;
157 BUG_ON(nb == 0);
159 cm = kmem_cache_alloc(controlmsg_slab, GFP_ATOMIC);
160 if (unlikely(cm == 0))
161 return 0;
162 memset(cm, 0, sizeof(struct control_msg_out));
163 kref_init(&(cm->ref));
164 cm->nb = nb;
165 return cm;
168 static int calc_limit(int limit, int priority)
170 if (priority == ACM_PRIORITY_LOW)
171 return (limit+1)/2;
172 else if (priority == ACM_PRIORITY_MED)
173 return (limit * 3 + 1)/4;
174 else if (priority == ACM_PRIORITY_HIGH)
175 return limit;
176 else
177 BUG();
180 struct control_msg_out *alloc_control_msg(struct neighbor *nb, int priority)
182 struct control_msg_out *cm = 0;
184 long packets1;
185 long packets2;
187 BUG_ON(nb == 0);
189 packets1 = atomic_inc_return(&(nb->cmsg_othercnt));
190 packets2 = atomic_inc_return(&(cmsg_othercnt));
192 BUG_ON(packets1 <= 0);
193 BUG_ON(packets2 <= 0);
195 if (packets1 <= calc_limit(GUARANTEED_CMSGS_PER_NEIGH, priority))
196 goto alloc;
198 if (unlikely(unlikely(packets1 > calc_limit(MAX_CMSGS_PER_NEIGH,
199 priority)) ||
200 unlikely(packets2 > calc_limit(MAX_CMSGS, priority))))
201 goto full;
203 alloc:
204 cm = _alloc_control_msg(nb);
205 if (unlikely(cm == 0)) {
206 full:
208 /* printk(KERN_ERR "alloc_control_msg failed %ld %ld", packets1, packets2); */
209 atomic_dec(&(nb->cmsg_othercnt));
210 atomic_dec(&(cmsg_othercnt));
212 return cm;
215 static void cmsg_kref_free(struct kref *ref)
217 struct control_msg_out *cm = container_of(ref, struct control_msg_out,
218 ref);
219 kmem_cache_free(controlmsg_slab, cm);
222 void free_control_msg(struct control_msg_out *cm)
224 if (likely(cm->type != MSGTYPE_PONG)) {
225 atomic_dec(&(cm->nb->cmsg_othercnt));
226 atomic_dec(&(cmsg_othercnt));
229 if (cm->type == MSGTYPE_ACK_CONN) {
230 struct conn *trgt_out = cm->msg.ack_conn.src_in->reversedir;
231 BUG_ON(cm->msg.ack_conn.src_in == 0);
232 if ((cm->msg.ack_conn.flags & KP_ACK_CONN_FLAGS_PRIORITY) != 0){
233 spin_lock_bh(&(trgt_out->rcv_lock));
234 BUG_ON(trgt_out->targettype != TARGET_OUT);
235 if (trgt_out->target.out.priority_send_allowed != 0) {
236 trgt_out->target.out.priority_send_allowed = 1;
237 spin_unlock_bh(&(trgt_out->rcv_lock));
238 refresh_conn_priority(trgt_out, 0);
239 } else {
240 spin_unlock_bh(&(trgt_out->rcv_lock));
243 kref_put(&(cm->msg.ack_conn.src_in->ref), free_conn);
244 cm->msg.ack_conn.src_in = 0;
245 } else if (cm->type == MSGTYPE_CONNECT) {
246 BUG_ON(cm->msg.connect.src_in == 0);
247 kref_put(&(cm->msg.connect.src_in->ref), free_conn);
248 cm->msg.connect.src_in = 0;
249 } else if (cm->type == MSGTYPE_CONNECT_SUCCESS) {
250 BUG_ON(cm->msg.connect_success.src_in == 0);
251 kref_put(&(cm->msg.connect_success.src_in->ref), free_conn);
252 cm->msg.connect_success.src_in = 0;
253 } else if (cm->type == MSGTYPE_RESET_CONN) {
254 spin_lock_bh(&(cm->nb->cmsg_lock));
255 if (cm->msg.reset_conn.in_pending_conn_resets != 0) {
256 rb_erase(&(cm->msg.reset_conn.rbn),
257 &(cm->nb->pending_conn_resets_rb));
258 cm->msg.reset_conn.in_pending_conn_resets = 0;
260 kref_put(&(cm->ref), kreffree_bug);
262 spin_unlock_bh(&(cm->nb->cmsg_lock));
265 kref_put(&(cm->ref), cmsg_kref_free);
268 static void free_control_retrans(struct kref *ref)
270 struct control_retrans *cr = container_of(ref, struct control_retrans,
271 ref);
273 while (list_empty(&(cr->msgs)) == 0) {
274 struct control_msg_out *cm = container_of(cr->msgs.next,
275 struct control_msg_out, lh);
277 if (cm->type == MSGTYPE_PONG)
278 atomic_dec(&(cm->nb->cmsg_pongs_retrans_cnt));
280 list_del(&(cm->lh));
281 free_control_msg(cm);
284 kmem_cache_free(controlretrans_slab, cr);
287 struct control_retrans *get_control_retrans(struct neighbor *nb_retranslocked,
288 __u64 seqno)
290 struct rb_node *n = 0;
291 struct control_retrans *ret = 0;
293 n = nb_retranslocked->kp_retransmits_rb.rb_node;
295 while (likely(n != 0) && ret == 0) {
296 struct control_retrans *cr = container_of(n,
297 struct control_retrans, rbn);
299 BUG_ON(cr->nb != nb_retranslocked);
301 if (seqno_before(seqno, cr->seqno))
302 n = n->rb_left;
303 else if (seqno_after(seqno, cr->seqno))
304 n = n->rb_right;
305 else
306 ret = cr;
309 if (ret != 0)
310 kref_get(&(ret->ref));
312 return ret;
315 /* nb->retrans_lock must be held */
316 void insert_control_retrans(struct control_retrans *ins)
318 struct neighbor *nb = ins->nb;
319 __u64 seqno = ins->seqno;
321 struct rb_root *root;
322 struct rb_node **p;
323 struct rb_node *parent = 0;
325 BUG_ON(nb == 0);
327 root = &(nb->kp_retransmits_rb);
328 p = &(root->rb_node);
330 while ((*p) != 0) {
331 struct control_retrans *cr = container_of(*p,
332 struct control_retrans, rbn);
334 BUG_ON(cr->nb != nb);
336 parent = *p;
337 if (unlikely(seqno_eq(seqno, cr->seqno))) {
338 BUG();
339 } else if (seqno_before(seqno, cr->seqno)) {
340 p = &(*p)->rb_left;
341 } else if (seqno_after(seqno, cr->seqno)) {
342 p = &(*p)->rb_right;
343 } else {
344 BUG();
348 kref_get(&(ins->ref));
349 rb_link_node(&(ins->rbn), parent, p);
350 rb_insert_color(&(ins->rbn), root);
353 static void remove_connack_oooflag_ifold(struct conn *src_in_l,
354 struct control_msg_out *cm)
356 if (ooolen(cm->msg.ack_conn.flags) != 0 && seqno_before_eq(
357 cm->msg.ack_conn.seqno_ooo +
358 cm->msg.ack_conn.length,
359 src_in_l->source.in.next_seqno)) {
360 cm->msg.ack_conn.length = 0;
361 cm->msg.ack_conn.flags = (cm->msg.ack_conn.flags &
362 (~KP_ACK_CONN_FLAGS_OOO));
366 static int ackconn_prepare_requeue(struct conn *cn_l,
367 struct control_msg_out *cm)
369 if (unlikely(unlikely(cn_l->sourcetype != SOURCE_IN) ||
370 unlikely(cn_l->source.in.nb != cm->nb) ||
371 unlikely(cn_l->reversedir->target.out.conn_id !=
372 cm->msg.ack_conn.conn_id) ||
373 unlikely(cn_l->isreset != 0)))
374 return 0;
376 remove_connack_oooflag_ifold(cn_l, cm);
378 if (!seqno_eq(cm->msg.ack_conn.ack_seqno, cn_l->source.in.ack_seqno))
379 cm->msg.ack_conn.flags = (cm->msg.ack_conn.flags &
380 (~KP_ACK_CONN_FLAGS_SEQNO) &
381 (~KP_ACK_CONN_FLAGS_WINDOW));
383 if (cm->msg.ack_conn.flags == 0)
384 return 0;
386 cm->length = 6 + ack_conn_len(cm->msg.ack_conn.flags);
388 return 1;
391 static void requeue_control_retrans(struct control_retrans *cr)
393 atomic_inc(&(cr->nb->cmsg_bulk_readds));
395 while (list_empty(&(cr->msgs)) == 0) {
396 struct control_msg_out *cm = container_of(cr->msgs.prev,
397 struct control_msg_out, lh);
398 list_del(&(cm->lh));
400 BUG_ON(cm->nb != cr->nb);
402 if (cm->type == MSGTYPE_ACK_CONN) {
403 struct conn *cn_l = cm->msg.ack_conn.src_in;
404 spin_lock_bh(&(cn_l->rcv_lock));
405 if (unlikely(ackconn_prepare_requeue(cn_l, cm) == 0)) {
406 free_control_msg(cm);
407 } else {
408 merge_or_enqueue_ackconn(cn_l, cm,
409 ADDCMSG_SRC_RETRANS);
412 spin_unlock_bh(&(cn_l->rcv_lock));
413 } else {
414 if (cm->type == MSGTYPE_PONG)
415 atomic_dec(&(cm->nb->cmsg_pongs_retrans_cnt));
416 enqueue_control_msg(cm, ADDCMSG_SRC_RETRANS);
420 atomic_dec(&(cr->nb->cmsg_bulk_readds));
422 spin_lock_bh(&(cr->nb->cmsg_lock));
423 schedule_controlmsg_timer(cr->nb);
424 spin_unlock_bh(&(cr->nb->cmsg_lock));
427 static void empty_retrans_queue(struct neighbor *nb_retranslocked)
429 while (!list_empty(&(nb_retranslocked->retrans_list))) {
430 struct control_retrans *cr = container_of(
431 nb_retranslocked->retrans_list.next,
432 struct control_retrans, timeout_list);
434 BUG_ON(cr->nb != nb_retranslocked);
436 list_del(&(cr->timeout_list));
437 rb_erase(&(cr->rbn), &(nb_retranslocked->kp_retransmits_rb));
439 kref_put(&(cr->ref), kreffree_bug); /* rb */
440 kref_put(&(cr->ref), free_control_retrans); /* list */
444 void retransmit_timerfunc(struct timer_list *retrans_timer)
446 struct neighbor *nb = container_of(retrans_timer,
447 struct neighbor, retrans_timer);
448 int nbstate = get_neigh_state(nb);
449 struct control_retrans *cr = 0;
451 spin_lock_bh(&(nb->retrans_lock));
453 if (list_empty(&(nb->retrans_list))) {
454 spin_unlock_bh(&(nb->retrans_lock));
455 kref_put(&(nb->ref), neighbor_free);
456 return;
459 if (unlikely(nbstate == NEIGHBOR_STATE_KILLED)) {
460 empty_retrans_queue(nb);
461 spin_unlock_bh(&(nb->retrans_lock));
462 kref_put(&(nb->ref), neighbor_free);
463 return;
466 cr = container_of(nb->retrans_list.next, struct control_retrans,
467 timeout_list);
469 BUG_ON(cr->nb != nb);
471 if (time_after(cr->timeout, jiffies)) {
472 int rc = mod_timer(&(nb->retrans_timer), cr->timeout);
473 spin_unlock_bh(&(nb->retrans_lock));
474 if (rc != 0)
475 kref_put(&(nb->ref), neighbor_free);
476 return;
479 spin_unlock_bh(&(nb->retrans_lock));
481 spin_lock_bh(&(nb->cmsg_lock));
482 nb->add_retrans_needed = 1;
483 schedule_controlmsg_timer(nb);
484 spin_unlock_bh(&(nb->cmsg_lock));
486 kref_put(&(nb->ref), neighbor_free);
489 static void schedule_retransmit(struct control_retrans *cr, struct neighbor *nb)
491 int first;
493 cr->timeout = calc_timeout(atomic_read(&(nb->latency_retrans_us)),
494 atomic_read(&(nb->latency_stddev_retrans_us)),
495 atomic_read(&(nb->max_remote_ack_delay_us)));
497 spin_lock_bh(&(nb->retrans_lock));
498 insert_control_retrans(cr);
499 first = list_empty(&(nb->retrans_list));
500 list_add_tail(&(cr->timeout_list), &(nb->retrans_list));
502 if (first) {
503 if (mod_timer(&(nb->retrans_timer), cr->timeout) == 0) {
504 kref_get(&(nb->ref));
508 spin_unlock_bh(&(nb->retrans_lock));
511 void kern_ack_rcvd(struct neighbor *nb, __u64 seqno)
513 struct control_retrans *cr = 0;
515 spin_lock_bh(&(nb->retrans_lock));
517 cr = get_control_retrans(nb, seqno);
519 if (cr == 0) {
520 /* char *seqno_p = (char *) &seqno;
521 seqno = cpu_to_be32(seqno);
522 printk(KERN_ERR "bogus/duplicate ack received %d %d %d %d",
523 seqno_p[0], seqno_p[1], seqno_p[2], seqno_p[3]);
525 goto out;
528 rb_erase(&(cr->rbn), &(nb->kp_retransmits_rb));
530 BUG_ON(cr->nb != nb);
532 list_del(&(cr->timeout_list));
534 out:
535 spin_unlock_bh(&(nb->retrans_lock));
537 if (cr != 0) {
538 kref_put(&(cr->ref), kreffree_bug); /* get_control_retrans */
539 kref_put(&(cr->ref), kreffree_bug); /* rb_erase */
540 kref_put(&(cr->ref), free_control_retrans); /* list */
544 static __u8 get_window(struct conn *cn, struct neighbor *expectedsender,
545 __u32 expected_connid)
547 __u8 window = 0;
549 spin_lock_bh(&(cn->rcv_lock));
551 if (unlikely(unlikely(cn->sourcetype != SOURCE_IN) ||
552 unlikely(expectedsender != 0 && (cn->source.in.nb !=
553 expectedsender || cn->reversedir->target.out.conn_id !=
554 expected_connid))))
555 goto out;
557 window = enc_log_64_7(seqno_clean(cn->source.in.window_seqnolimit -
558 cn->source.in.next_seqno));
560 cn->source.in.window_seqnolimit_remote = cn->source.in.next_seqno +
561 dec_log_64_7(window);
563 out:
564 spin_unlock_bh(&(cn->rcv_lock));
566 return window;
569 /* static void padding(struct sk_buff *skb, __u32 length)
571 char *dst;
572 if (length <= 0)
573 return;
574 dst = skb_put(skb, length);
575 BUG_ON(dst == 0);
576 memset(dst, KP_PADDING, length);
577 } */
580 static int add_init_session(struct sk_buff *skb, __be32 sessionid,
581 __u32 spaceleft)
583 char *dst;
585 BUG_ON(KP_INIT_SESSION_CMDLEN != 5);
587 if (unlikely(spaceleft < 5))
588 return 0;
590 dst = skb_put(skb, 5);
591 BUG_ON(dst == 0);
593 dst[0] = KP_INIT_SESSION;
594 put_be32(dst + 1, sessionid);
596 return 5;
599 static int add_ack(struct sk_buff *skb, struct control_retrans *cr,
600 struct control_msg_out *cm, __u32 spaceleft)
602 char *dst;
604 if (unlikely(spaceleft < 7))
605 return 0;
607 dst = skb_put(skb, 7);
608 BUG_ON(dst == 0);
610 dst[0] = KP_ACK;
611 put_u48(dst + 1, cm->msg.ack.seqno);
613 free_control_msg(cm);
615 return 7;
618 static int add_ack_conn(struct sk_buff *skb, struct control_retrans *cr,
619 struct control_msg_out *cm, __u32 spaceleft)
621 char *dst;
622 int offset = 0;
624 if (unlikely(spaceleft < cm->length))
625 return 0;
627 dst = skb_put(skb, cm->length);
628 BUG_ON(dst == 0);
630 dst[offset] = KP_ACK_CONN;
631 offset++;
632 put_u32(dst + offset, cm->msg.ack_conn.conn_id);
633 offset += 4;
634 dst[offset] = cm->msg.ack_conn.flags;
635 offset++;
637 if ((cm->msg.ack_conn.flags & KP_ACK_CONN_FLAGS_SEQNO) != 0) {
638 put_u48(dst + offset, cm->msg.ack_conn.seqno);
639 offset += 6;
641 if ((cm->msg.ack_conn.flags & KP_ACK_CONN_FLAGS_WINDOW) != 0) {
642 BUG_ON(cm->msg.ack_conn.src_in == 0);
643 dst[offset] = get_window(cm->msg.ack_conn.src_in,
644 cm->nb, cm->msg.ack_conn.conn_id);
645 offset++;
649 if (ooolen(cm->msg.ack_conn.flags) != 0) {
650 put_u48(dst + offset, cm->msg.ack_conn.seqno_ooo);
651 offset += 6;
652 if (ooolen(cm->msg.ack_conn.flags) == 1) {
653 BUG_ON(cm->msg.ack_conn.length > 255);
654 dst[offset] = cm->msg.ack_conn.length;
655 offset += 1;
656 } else if (ooolen(cm->msg.ack_conn.flags) == 2) {
657 BUG_ON(cm->msg.ack_conn.length <= 255);
658 BUG_ON(cm->msg.ack_conn.length > 65535);
659 put_u16(dst + offset, cm->msg.ack_conn.length);
660 offset += 2;
661 } else if (ooolen(cm->msg.ack_conn.flags) == 4) {
662 BUG_ON(cm->msg.ack_conn.length <= 65535);
663 put_u32(dst + offset, cm->msg.ack_conn.length);
664 offset += 4;
665 } else {
666 BUG();
670 if ((cm->msg.ack_conn.flags & KP_ACK_CONN_FLAGS_PRIORITY) != 0) {
671 dst[offset] = cm->msg.ack_conn.priority_seqno;
672 offset++;
673 dst[offset] = cm->msg.ack_conn.priority;
674 offset++;
677 list_add_tail(&(cm->lh), &(cr->msgs));
679 BUG_ON(offset != cm->length);
680 return offset;
683 static int add_ping(struct sk_buff *skb, __u32 cookie, __u32 spaceleft)
685 char *dst;
687 BUG_ON(KP_PING_CMDLEN != 5);
689 if (unlikely(spaceleft < 5))
690 return 0;
692 dst = skb_put(skb, 5);
693 BUG_ON(dst == 0);
695 dst[0] = KP_PING;
696 put_u32(dst + 1, cookie);
698 return 5;
701 static int add_pong(struct sk_buff *skb, struct control_retrans *cr,
702 struct control_msg_out *cm, __u32 spaceleft,
703 ktime_t packetgen_start)
705 __s64 respdelay;
706 char *dst;
708 if (unlikely(spaceleft < 9))
709 return 0;
711 respdelay = div_u64(ktime_to_ns(packetgen_start) -
712 ktime_to_ns(cm->msg.pong.time_enqueued) + 500, 1000);
713 if (unlikely(respdelay > U32_MAX))
714 respdelay = U32_MAX;
715 if (unlikely(respdelay < 0))
716 respdelay = 0;
718 dst = skb_put(skb, 9);
719 BUG_ON(dst == 0);
721 dst[0] = KP_PONG;
722 put_u32(dst + 1, cm->msg.pong.cookie);
723 put_u32(dst + 5, (__u32) respdelay);
725 list_add_tail(&(cm->lh), &(cr->msgs));
727 return 9;
730 static int add_connect(struct sk_buff *skb, struct control_retrans *cr,
731 struct control_msg_out *cm, __u32 spaceleft)
733 char *dst;
734 struct conn *src_in = cm->msg.connect.src_in;
736 if (unlikely(spaceleft < 20))
737 return 0;
739 dst = skb_put(skb, 20);
740 BUG_ON(dst == 0);
742 dst[0] = KP_CONNECT;
743 put_u32(dst + 1, cm->msg.connect.conn_id);
744 put_u48(dst + 5, cm->msg.connect.seqno1);
745 put_u48(dst + 11, cm->msg.connect.seqno2);
746 BUG_ON(cm->msg.connect.src_in == 0);
747 dst[17] = get_window(cm->msg.connect.src_in, cm->nb,
748 cm->msg.connect.conn_id);
750 spin_lock_bh(&(src_in->reversedir->rcv_lock));
751 BUG_ON(src_in->reversedir->targettype != TARGET_OUT);
753 dst[18] = src_in->reversedir->target.out.priority_seqno;
754 dst[19] = src_in->reversedir->target.out.priority_last;
756 spin_unlock_bh(&(src_in->reversedir->rcv_lock));
758 list_add_tail(&(cm->lh), &(cr->msgs));
760 return 20;
763 static int add_connect_success(struct sk_buff *skb, struct control_retrans *cr,
764 struct control_msg_out *cm, __u32 spaceleft)
766 char *dst;
768 if (unlikely(spaceleft < 6))
769 return 0;
771 dst = skb_put(skb, 6);
772 BUG_ON(dst == 0);
774 dst[0] = KP_CONNECT_SUCCESS;
775 put_u32(dst + 1, cm->msg.connect_success.conn_id);
776 BUG_ON(cm->msg.connect_success.src_in == 0);
777 dst[5] = get_window(cm->msg.connect_success.src_in, cm->nb,
778 cm->msg.connect_success.conn_id);
780 list_add_tail(&(cm->lh), &(cr->msgs));
782 return 6;
785 static int add_reset_conn(struct sk_buff *skb, struct control_retrans *cr,
786 struct control_msg_out *cm, __u32 spaceleft)
788 char *dst;
790 if (unlikely(spaceleft < 5))
791 return 0;
793 dst = skb_put(skb, 5);
794 BUG_ON(dst == 0);
796 dst[0] = KP_RESET_CONN;
797 put_u32(dst + 1, cm->msg.reset_conn.conn_id);
799 list_add_tail(&(cm->lh), &(cr->msgs));
801 return 5;
804 static int add_conndata(struct sk_buff *skb, struct control_retrans *cr,
805 struct control_msg_out *cm, __u32 spaceleft,
806 struct control_msg_out **split_conndata, __u32 *sc_sendlen)
808 char *dst;
810 __u32 totallen = cm->msg.conn_data.datalen + KP_CONN_DATA_CMDLEN;
811 __u32 putlen = min(totallen, spaceleft);
812 __u32 dataputlen = putlen - KP_CONN_DATA_CMDLEN;
814 BUG_ON(KP_CONN_DATA_CMDLEN != 13);
816 BUG_ON(putlen > 1024*1024*1024);
818 BUG_ON(split_conndata == 0);
819 BUG_ON(*split_conndata != 0);
820 BUG_ON(sc_sendlen == 0);
821 BUG_ON(*sc_sendlen != 0);
823 if (putlen < KP_CONN_DATA_CMDLEN + 1)
824 return 0;
826 dst = skb_put(skb, putlen);
827 BUG_ON(dst == 0);
829 if (cm->msg.conn_data.flush != 0) {
830 if (cm->msg.conn_data.snd_delayed_lowbuf == 0) {
831 dst[0] = KP_CONN_DATA_FLUSH;
832 } else {
833 dst[0] = KP_CONN_DATA_LOWBUFDELAYED_FLUSH;
835 } else {
836 if (cm->msg.conn_data.snd_delayed_lowbuf == 0) {
837 dst[0] = KP_CONN_DATA;
838 } else {
839 dst[0] = KP_CONN_DATA_LOWBUFDELAYED;
842 put_u32(dst + 1, cm->msg.conn_data.conn_id);
843 put_u48(dst + 5, cm->msg.conn_data.seqno);
844 put_u16(dst + 11, dataputlen);
846 memcpy(dst + 13, cm->msg.conn_data.data, dataputlen);
848 if (cm->msg.conn_data.datalen == dataputlen) {
849 list_add_tail(&(cm->lh), &(cr->msgs));
850 } else {
851 *split_conndata = cm;
852 *sc_sendlen = dataputlen;
855 return putlen;
858 static int add_set_max_cmsg_dly(struct sk_buff *skb, struct control_retrans *cr,
859 struct control_msg_out *cm, __u32 spaceleft)
861 char *dst;
863 BUG_ON(KP_SET_MAX_CMSG_DELAY_CMDLEN != 13);
865 if (unlikely(spaceleft < 13))
866 return 0;
868 dst = skb_put(skb, 13);
869 BUG_ON(dst == 0);
871 dst[0] = KP_SET_MAX_CMSG_DELAY;
872 put_u32(dst + 1, cm->msg.set_max_cmsg_delay.ack_delay);
873 put_u32(dst + 5, cm->msg.set_max_cmsg_delay.ackconn_delay);
874 put_u32(dst + 9, cm->msg.set_max_cmsg_delay.other_delay);
876 list_add_tail(&(cm->lh), &(cr->msgs));
878 return 13;
881 static int add_message(struct sk_buff *skb, struct control_retrans *cr,
882 struct control_msg_out *cm, __u32 spaceleft,
883 ktime_t packetgen_start,
884 struct control_msg_out **split_conndata, __u32 *sc_sendlen)
886 BUG_ON(split_conndata != 0 && *split_conndata != 0);
887 BUG_ON(sc_sendlen != 0 && *sc_sendlen != 0);
889 switch (cm->type) {
890 case MSGTYPE_ACK:
891 return add_ack(skb, cr, cm, spaceleft);
892 case MSGTYPE_ACK_CONN:
893 return add_ack_conn(skb, cr, cm, spaceleft);
894 case MSGTYPE_PONG:
895 return add_pong(skb, cr, cm, spaceleft, packetgen_start);
896 case MSGTYPE_CONNECT:
897 return add_connect(skb, cr, cm, spaceleft);
898 case MSGTYPE_CONNECT_SUCCESS:
899 return add_connect_success(skb, cr, cm, spaceleft);
900 case MSGTYPE_RESET_CONN:
901 return add_reset_conn(skb, cr, cm, spaceleft);
902 case MSGTYPE_CONNDATA:
903 return add_conndata(skb, cr, cm, spaceleft, split_conndata,
904 sc_sendlen);
905 case MSGTYPE_SET_MAX_CMSG_DELAY:
906 return add_set_max_cmsg_dly(skb, cr, cm, spaceleft);
907 default:
908 BUG();
910 BUG();
911 return 0;
914 static __u32 __send_messages(struct neighbor *nb, struct sk_buff *skb,
915 struct control_retrans *cr, struct list_head *cmsgs,
916 __u32 spaceleft, int nbstate, ktime_t packetgen_start,
917 struct control_msg_out **split_conndata, __u32 *sc_sendlen)
919 __u32 length = 0;
920 while (!list_empty(cmsgs)) {
921 int rc;
922 struct control_msg_out *cm = container_of(cmsgs->next,
923 struct control_msg_out, lh);
925 list_del(&(cm->lh));
927 rc = add_message(skb, cr, cm, spaceleft - length,
928 packetgen_start, split_conndata, sc_sendlen);
929 if (rc == 0) {
930 BUG();
931 list_add(&(cm->lh), cmsgs);
932 break;
935 length += rc;
938 return length;
941 static __u32 __send_messages_smcd(struct neighbor *nb, struct sk_buff *skb,
942 struct control_retrans *cr, __u32 spaceleft,
943 ktime_t packetgen_start)
945 struct control_msg_out *cm;
946 int rc;
948 cm = alloc_control_msg(nb, ACM_PRIORITY_MED);
950 if (unlikely(cm == 0))
951 return 0;
953 cm->type = MSGTYPE_SET_MAX_CMSG_DELAY;
954 cm->msg.set_max_cmsg_delay.ack_delay =
955 CMSG_MAXDELAY_ACK_MS * 1000;
956 cm->msg.set_max_cmsg_delay.ackconn_delay =
957 CMSG_MAXDELAY_ACKCONN_MS * 1000;
958 cm->msg.set_max_cmsg_delay.other_delay =
959 CMSG_MAXDELAY_OTHER_MS * 1000;
960 cm->length = KP_SET_MAX_CMSG_DELAY_CMDLEN;
962 rc = add_message(skb, cr, cm, spaceleft, packetgen_start, 0, 0);
964 #warning todo recover packet loss
965 nb->max_cmsg_delay_sent = 1;
967 return rc;
970 static void requeue_message(struct control_msg_out *cm)
972 if (cm->type == MSGTYPE_ACK_CONN) {
973 struct conn *cn_l = cm->msg.ack_conn.src_in;
975 spin_lock_bh(&(cn_l->rcv_lock));
976 if (unlikely(ackconn_prepare_requeue(cn_l, cm) == 0)) {
977 free_control_msg(cm);
978 } else {
979 spin_lock_bh(&(cm->nb->cmsg_lock));
981 list_add(&(cm->lh), &(cm->nb->cmsg_queue_ackconn));
982 cm->nb->cmsg_otherlength += cm->length;
984 list_add(&(cm->msg.ack_conn.conn_acks),
985 &(cn_l->source.in.acks_pending));
986 try_merge_ackconns(cn_l, cm);
988 spin_unlock_bh(&(cm->nb->cmsg_lock));
990 spin_unlock_bh(&(cn_l->rcv_lock));
991 return;
994 enqueue_control_msg(cm, ADDCMSG_SRC_READD);
997 static void requeue_messages(struct list_head *lh)
999 while (list_empty(lh) == 0) {
1000 struct control_msg_out *cm = container_of(lh->prev,
1001 struct control_msg_out, lh);
1002 list_del(&(cm->lh));
1003 requeue_message(cm);
1007 static int _send_messages_send2(struct neighbor *nb, struct sk_buff *skb,
1008 int ping, int initsession, struct control_retrans *cr,
1009 struct list_head *cmsgs, __u32 spaceleft, int nbstate,
1010 int *sent)
1012 int rc;
1013 __u32 length = 0;
1014 __u32 pinglen = 0;
1015 __u32 pingcookie = 0;
1016 unsigned long last_ping_time;
1017 struct control_msg_out *split_conndata = 0;
1018 __u32 sc_sendlen = 0;
1020 ktime_t packetgen_start = ktime_get();
1022 if (ping != TIMETOSENDPING_NO) {
1023 int rc;
1025 if (unlikely(initsession)) {
1026 rc = add_init_session(skb, nb->sessionid,
1027 spaceleft - length);
1028 BUG_ON(rc <= 0);
1029 pinglen = rc;
1030 length += rc;
1033 pingcookie = add_ping_req(nb, &last_ping_time, packetgen_start);
1034 rc = add_ping(skb, pingcookie, spaceleft - length);
1035 BUG_ON(rc <= 0);
1036 pinglen += rc;
1037 length += rc;
1040 if (likely(nbstate == NEIGHBOR_STATE_ACTIVE) &&
1041 unlikely(nb->max_cmsg_delay_sent == 0))
1042 length += __send_messages_smcd(nb, skb, cr, spaceleft - length,
1043 packetgen_start);
1045 length += __send_messages(nb, skb, cr, cmsgs, spaceleft - length,
1046 nbstate, packetgen_start, &split_conndata, &sc_sendlen);
1048 BUG_ON(length > spaceleft);
1050 if (likely(ping != TIMETOSENDPING_FORCE) &&
1051 pinglen != 0 && unlikely(length == pinglen)) {
1052 unadd_ping_req(nb, pingcookie, last_ping_time, 0);
1053 goto drop;
1056 if (unlikely(length == 0)) {
1057 drop:
1058 kfree_skb(skb);
1060 BUG_ON(list_empty(&(cr->msgs)) == 0);
1061 kref_put(&(cr->ref), free_control_retrans);
1063 nb->kpacket_seqno--;
1064 return QOS_RESUME_DONE;
1067 //padding(skb, spaceleft - length);
1068 BUG_ON(spaceleft - length != 0);
1070 rc = cor_dev_queue_xmit(skb, nb->queue, QOS_CALLER_KPACKET);
1071 if (rc == NET_XMIT_SUCCESS)
1072 *sent = 1;
1074 if (rc == NET_XMIT_DROP) {
1075 if (ping != 0)
1076 unadd_ping_req(nb, pingcookie, last_ping_time, 1);
1078 atomic_inc(&(nb->cmsg_bulk_readds));
1079 if (split_conndata != 0)
1080 requeue_message(split_conndata);
1082 requeue_messages(&(cr->msgs));
1084 kref_put(&(cr->ref), free_control_retrans);
1086 atomic_dec(&(nb->cmsg_bulk_readds));
1088 spin_lock_bh(&(nb->cmsg_lock));
1089 schedule_controlmsg_timer(nb);
1090 spin_unlock_bh(&(nb->cmsg_lock));
1091 } else {
1092 struct list_head *curr = cr->msgs.next;
1094 if (pingcookie != 0)
1095 ping_sent(nb, pingcookie);
1097 while (curr != &(cr->msgs)) {
1098 struct control_msg_out *cm = container_of(curr,
1099 struct control_msg_out, lh);
1101 curr = curr->next;
1103 if (unlikely(cm->type == MSGTYPE_PONG &&
1104 (nbstate != NEIGHBOR_STATE_ACTIVE))) {
1105 list_del(&(cm->lh));
1106 free_control_msg(cm);
1107 } else if (unlikely(cm->type == MSGTYPE_PONG &&
1108 atomic_inc_return(
1109 &(nb->cmsg_pongs_retrans_cnt)) >
1110 MAX_PONG_CMSGS_RETRANS_PER_NEIGH)) {
1111 atomic_dec(&(nb->cmsg_pongs_retrans_cnt));
1112 list_del(&(cm->lh));
1113 free_control_msg(cm);
1114 } else if (cm->type == MSGTYPE_CONNDATA) {
1115 schedule_retransmit_conn(cm->msg.conn_data.cr,
1116 0, 0);
1117 kfree(cm->msg.conn_data.data_orig);
1118 list_del(&(cm->lh));
1119 free_control_msg(cm);
1123 if (split_conndata != 0) {
1124 BUG_ON(sc_sendlen == 0);
1125 BUG_ON(sc_sendlen >=
1126 split_conndata->msg.conn_data.datalen);
1128 split_conndata->msg.conn_data.seqno += sc_sendlen;
1129 split_conndata->msg.conn_data.data += sc_sendlen;
1130 split_conndata->msg.conn_data.datalen -= sc_sendlen;
1131 split_conndata->length = KP_CONN_DATA_CMDLEN +
1132 split_conndata->msg.conn_data.datalen;
1133 enqueue_control_msg(split_conndata,
1134 ADDCMSG_SRC_SPLITCONNDATA);
1138 if (list_empty(&(cr->msgs)))
1139 kref_put(&(cr->ref), free_control_retrans);
1140 else
1141 schedule_retransmit(cr, nb);
1144 return (rc == NET_XMIT_SUCCESS) ? QOS_RESUME_DONE : QOS_RESUME_CONG;
1147 static int _send_messages_send(struct neighbor *nb, int ping,
1148 int initsession, struct list_head *cmsgs, int nbstate,
1149 __u32 length, __u64 seqno, int *sent)
1151 struct sk_buff *skb;
1152 struct control_retrans *cr;
1153 int rc;
1155 skb = create_packet_cmsg(nb, length, GFP_ATOMIC, seqno);
1156 if (unlikely(skb == 0)) {
1157 printk(KERN_ERR "cor: send_messages: cannot allocate skb (out of memory?)");
1159 requeue_messages(cmsgs);
1160 return QOS_RESUME_CONG;
1163 cr = kmem_cache_alloc(controlretrans_slab, GFP_ATOMIC);
1164 if (unlikely(cr == 0)) {
1165 printk(KERN_ERR "cor: send_messages: cannot allocate control_retrans (out of memory?)");
1166 kfree_skb(skb);
1168 requeue_messages(cmsgs);
1169 return QOS_RESUME_CONG;
1172 memset(cr, 0, sizeof(struct control_retrans));
1173 kref_init(&(cr->ref));
1174 cr->nb = nb;
1175 cr->seqno = seqno;
1176 INIT_LIST_HEAD(&(cr->msgs));
1178 rc = _send_messages_send2(nb, skb, ping, initsession, cr, cmsgs, length,
1179 nbstate, sent);
1181 BUG_ON(!list_empty(cmsgs));
1183 return rc;
1186 #define CMSGQUEUE_PONG 1
1187 #define CMSGQUEUE_ACK 2
1188 #define CMSGQUEUE_ACK_CONN 3
1189 #define CMSGQUEUE_CONNDATA_LOWLAT 4
1190 #define CMSGQUEUE_CONNDATA_HIGHLAT 5
1191 #define CMSGQUEUE_OTHER 6
1193 static unsigned long get_cmsg_timeout(struct control_msg_out *cm, int queue)
1195 if (cm->type == MSGTYPE_ACK) {
1196 BUG_ON(queue != CMSGQUEUE_ACK);
1197 return cm->time_added +
1198 msecs_to_jiffies(CMSG_MAXDELAY_ACK_MS) - 1;
1199 } else if (cm->type == MSGTYPE_ACK_CONN) {
1200 BUG_ON(queue != CMSGQUEUE_ACK_CONN);
1201 return cm->time_added +
1202 msecs_to_jiffies(CMSG_MAXDELAY_ACKCONN_MS) - 1;
1203 } else if (cm->type == MSGTYPE_CONNDATA) {
1204 if (cm->msg.conn_data.highlatency != 0) {
1205 BUG_ON(queue != CMSGQUEUE_CONNDATA_HIGHLAT);
1206 return cm->time_added +
1207 msecs_to_jiffies(
1208 CMSG_MAXDELAY_CONNDATA_MS) - 1;
1209 } else {
1210 BUG_ON(queue != CMSGQUEUE_CONNDATA_LOWLAT);
1211 return cm->time_added;
1213 } else {
1214 BUG_ON(cm->type == MSGTYPE_PONG && queue != CMSGQUEUE_PONG);
1215 BUG_ON(cm->type != MSGTYPE_PONG && queue != CMSGQUEUE_OTHER);
1217 return cm->time_added +
1218 msecs_to_jiffies(CMSG_MAXDELAY_OTHER_MS) - 1;
1222 static void _peek_message(struct neighbor *nb_cmsglocked, int queue,
1223 struct control_msg_out **currcm, unsigned long *currtimeout,
1224 __u32 **currlen)
1226 struct control_msg_out *cm;
1227 unsigned long cmtimeout;
1229 struct list_head *queuelh;
1230 if (queue == CMSGQUEUE_PONG) {
1231 queuelh = &(nb_cmsglocked->cmsg_queue_pong);
1232 } else if (queue == CMSGQUEUE_ACK) {
1233 queuelh = &(nb_cmsglocked->cmsg_queue_ack);
1234 } else if (queue == CMSGQUEUE_ACK_CONN) {
1235 queuelh = &(nb_cmsglocked->cmsg_queue_ackconn);
1236 } else if (queue == CMSGQUEUE_CONNDATA_LOWLAT) {
1237 queuelh = &(nb_cmsglocked->cmsg_queue_conndata_lowlat);
1238 } else if (queue == CMSGQUEUE_CONNDATA_HIGHLAT) {
1239 queuelh = &(nb_cmsglocked->cmsg_queue_conndata_highlat);
1240 } else if (queue == CMSGQUEUE_OTHER) {
1241 queuelh = &(nb_cmsglocked->cmsg_queue_other);
1242 } else {
1243 BUG();
1246 if (list_empty(queuelh))
1247 return;
1249 cm = container_of(queuelh->next, struct control_msg_out, lh);
1250 cmtimeout = get_cmsg_timeout(cm, queue);
1252 BUG_ON(cm->nb != nb_cmsglocked);
1254 if (*currcm == 0 || (time_before(cmtimeout, *currtimeout) &&
1255 time_before(jiffies, *currtimeout))) {
1256 *currcm = cm;
1257 *currtimeout = cmtimeout;
1259 if (queue == CMSGQUEUE_PONG) {
1260 *currlen = &(nb_cmsglocked->cmsg_pongslength);
1261 } else {
1262 *currlen = &(nb_cmsglocked->cmsg_otherlength);
1267 static void peek_message(struct neighbor *nb_cmsglocked, int nbstate,
1268 struct control_msg_out **cm, unsigned long *cmtimeout,
1269 __u32 **len, int for_timeout)
1271 _peek_message(nb_cmsglocked, CMSGQUEUE_PONG, cm, cmtimeout, len);
1272 if (likely(nbstate == NEIGHBOR_STATE_ACTIVE)) {
1273 _peek_message(nb_cmsglocked, CMSGQUEUE_ACK, cm, cmtimeout, len);
1274 _peek_message(nb_cmsglocked, CMSGQUEUE_ACK_CONN, cm, cmtimeout,
1275 len);
1276 if (!for_timeout || atomic_read(
1277 &(nb_cmsglocked->cmsg_delay_conndata)) == 0) {
1278 _peek_message(nb_cmsglocked, CMSGQUEUE_CONNDATA_LOWLAT,
1279 cm, cmtimeout, len);
1280 _peek_message(nb_cmsglocked, CMSGQUEUE_CONNDATA_HIGHLAT,
1281 cm, cmtimeout, len);
1283 _peek_message(nb_cmsglocked, CMSGQUEUE_OTHER, cm, cmtimeout, len);
1287 static unsigned long get_cmsg_timer_timeout(struct neighbor *nb_cmsglocked,
1288 int nbstate)
1290 unsigned long pingtimeout = get_next_ping_time(nb_cmsglocked);
1292 struct control_msg_out *cm = 0;
1293 unsigned long cmtimeout;
1294 __u32 *len;
1296 peek_message(nb_cmsglocked, nbstate, &cm, &cmtimeout, &len, 1);
1298 if (cm != 0) {
1299 unsigned long jiffies_tmp = jiffies;
1301 if (time_before(cmtimeout, jiffies_tmp))
1302 return jiffies_tmp;
1303 if (time_before(cmtimeout, pingtimeout))
1304 return cmtimeout;
1307 return pingtimeout;
1310 static void _dequeue_messages(struct neighbor *nb_cmsglocked, int nbstate,
1311 __u32 targetmss, __u32 *length, struct list_head *cmsgs)
1313 while (1) {
1314 __u32 spaceleft = targetmss - *length;
1315 struct control_msg_out *cm = 0;
1316 unsigned long cmtimeout;
1317 __u32 *len;
1319 peek_message(nb_cmsglocked, nbstate, &cm, &cmtimeout, &len, 0);
1321 if (unlikely(cm == 0))
1322 break;
1324 BUG_ON(len == 0);
1326 if (cm->length > spaceleft) {
1327 if (cm->type != MSGTYPE_CONNDATA ||
1328 spaceleft < KP_CONN_DATA_CMDLEN + 1) {
1329 WARN_ONCE(1, "cor: maximum segment size is too small");
1330 BUG_ON(*length == 0);
1331 break;
1334 if ((*length/4)*3 > targetmss)
1335 break;
1338 list_del(&(cm->lh));
1339 *len -= cm->length;
1341 if (cm->type == MSGTYPE_ACK_CONN)
1342 list_del(&(cm->msg.ack_conn.conn_acks));
1343 if (unlikely(cm->type == MSGTYPE_PONG)) {
1344 BUG_ON(cm->nb->cmsg_pongscnt == 0);
1345 cm->nb->cmsg_pongscnt--;
1348 if (unlikely(cm->type == MSGTYPE_RESET_CONN)) {
1349 BUG_ON(cm->msg.reset_conn.in_pending_conn_resets == 0);
1350 rb_erase(&(cm->msg.reset_conn.rbn),
1351 &(cm->nb->pending_conn_resets_rb));
1352 cm->msg.reset_conn.in_pending_conn_resets = 0;
1353 kref_put(&(cm->ref), kreffree_bug);
1356 BUG_ON(*length + cm->length < *length);
1357 *length += cm->length;
1359 list_add_tail(&(cm->lh), cmsgs);
1363 static __u32 get_total_messages_length(struct neighbor *nb, int ping,
1364 int initsession, int nbstate, int *extralength)
1366 __u32 length = nb->cmsg_pongslength;
1368 if (likely(nbstate == NEIGHBOR_STATE_ACTIVE)) {
1369 length += nb->cmsg_otherlength;
1371 if (unlikely(nb->max_cmsg_delay_sent == 0)) {
1372 length += KP_SET_MAX_CMSG_DELAY_CMDLEN;
1373 *extralength += KP_SET_MAX_CMSG_DELAY_CMDLEN;
1377 if (ping == TIMETOSENDPING_FORCE ||
1378 (length > 0 && ping != TIMETOSENDPING_NO)) {
1379 length += KP_PING_CMDLEN;
1380 *extralength += KP_PING_CMDLEN;
1382 if (unlikely(initsession)) {
1383 length += KP_INIT_SESSION_CMDLEN;
1384 *extralength += KP_INIT_SESSION_CMDLEN;
1388 return length;
1391 static int dequeue_messages(struct neighbor *nb_cmsglocked, int ping,
1392 int initsession, int nbstate, __u32 targetmss,
1393 __u32 *length, struct list_head *cmsgs)
1395 __u32 extralength = 0;
1396 __u32 totallength;
1398 int cmsgqueue_nonpong_empty = (
1399 list_empty(&(nb_cmsglocked->cmsg_queue_ack)) &&
1400 list_empty(&(nb_cmsglocked->cmsg_queue_ackconn)) &&
1401 list_empty(&(nb_cmsglocked->cmsg_queue_conndata_lowlat)) &&
1402 list_empty(&(nb_cmsglocked->cmsg_queue_conndata_highlat)) &&
1403 list_empty(&(nb_cmsglocked->cmsg_queue_other)));
1405 BUG_ON(list_empty(&(nb_cmsglocked->cmsg_queue_pong)) &&
1406 nb_cmsglocked->cmsg_pongslength != 0);
1407 BUG_ON(!list_empty(&(nb_cmsglocked->cmsg_queue_pong)) &&
1408 nb_cmsglocked->cmsg_pongslength == 0);
1409 BUG_ON(cmsgqueue_nonpong_empty &&
1410 nb_cmsglocked->cmsg_otherlength != 0);
1411 BUG_ON(!cmsgqueue_nonpong_empty &&
1412 nb_cmsglocked->cmsg_otherlength == 0);
1414 totallength = get_total_messages_length(nb_cmsglocked, ping,
1415 initsession, nbstate, &extralength);
1417 if (totallength == 0)
1418 return 1;
1420 if (totallength < targetmss && ping != TIMETOSENDPING_FORCE &&
1421 time_after(get_cmsg_timer_timeout(nb_cmsglocked,
1422 nbstate), jiffies))
1423 return 1;
1425 *length = extralength;
1427 _dequeue_messages(nb_cmsglocked, nbstate, targetmss, length, cmsgs);
1429 BUG_ON(*length == 0);
1430 BUG_ON(*length > targetmss);
1432 return 0;
1435 static void add_timeouted_retrans(struct neighbor *nb)
1437 spin_lock_bh(&(nb->retrans_lock));
1439 while (!list_empty(&(nb->retrans_list))) {
1440 struct control_retrans *cr = container_of(nb->retrans_list.next,
1441 struct control_retrans, timeout_list);
1443 BUG_ON(cr->nb != nb);
1445 if (time_after(cr->timeout, jiffies)) {
1446 if (mod_timer(&(nb->retrans_timer), cr->timeout) == 0) {
1447 kref_get(&(nb->ref));
1449 break;
1452 list_del(&(cr->timeout_list));
1453 rb_erase(&(cr->rbn), &(nb->kp_retransmits_rb));
1455 kref_put(&(cr->ref), kreffree_bug); /* rb */
1457 requeue_control_retrans(cr);
1460 spin_unlock_bh(&(nb->retrans_lock));
1463 static void _delete_all_cmsgs(struct list_head *cmsgs)
1465 while (!list_empty(cmsgs)) {
1466 struct control_msg_out *cm = container_of(cmsgs->next,
1467 struct control_msg_out, lh);
1469 list_del(&(cm->lh));
1471 if (cm->type == MSGTYPE_CONNDATA) {
1472 schedule_retransmit_conn(cm->msg.conn_data.cr, 0, 0);
1473 kfree(cm->msg.conn_data.data_orig);
1476 free_control_msg(cm);
1480 static void delete_all_cmsgs(struct neighbor *nb)
1482 while (1) {
1483 struct list_head cmsgs;
1484 __u32 length = 0;
1486 INIT_LIST_HEAD(&cmsgs);
1488 spin_lock_bh(&(nb->cmsg_lock));
1489 _dequeue_messages(nb, NEIGHBOR_STATE_ACTIVE, 65536, &length,
1490 &cmsgs);
1491 spin_unlock_bh(&(nb->cmsg_lock));
1493 if (list_empty(&cmsgs))
1494 break;
1496 _delete_all_cmsgs(&cmsgs);
1500 static int reset_timeouted_conn_needed(struct neighbor *nb,
1501 struct conn *src_in_l)
1503 if (unlikely(src_in_l->sourcetype != SOURCE_IN ||
1504 src_in_l->source.in.nb != nb ||
1505 src_in_l->isreset != 0))
1506 return 0;
1507 else if (likely(time_after(src_in_l->source.in.jiffies_last_act +
1508 CONN_ACTIVITY_UPDATEINTERVAL_SEC * HZ +
1509 CONN_INACTIVITY_TIMEOUT_SEC * HZ, jiffies)))
1510 return 0;
1512 return 1;
1515 static int reset_timeouted_conn(struct neighbor *nb, struct conn *src_in)
1517 int resetted = 0;
1519 if (src_in->is_client) {
1520 spin_lock_bh(&(src_in->rcv_lock));
1521 spin_lock_bh(&(src_in->reversedir->rcv_lock));
1522 } else {
1523 spin_lock_bh(&(src_in->reversedir->rcv_lock));
1524 spin_lock_bh(&(src_in->rcv_lock));
1527 resetted = reset_timeouted_conn_needed(nb, src_in);
1528 if (unlikely(resetted == 0))
1529 goto unlock;
1531 resetted = (send_reset_conn(nb, src_in->reversedir->target.out.conn_id,
1532 1) == 0);
1533 if (unlikely(resetted == 0))
1534 goto unlock;
1537 BUG_ON(src_in->reversedir->isreset != 0);
1538 src_in->reversedir->isreset = 1;
1540 unlock:
1541 if (src_in->is_client) {
1542 spin_unlock_bh(&(src_in->rcv_lock));
1543 spin_unlock_bh(&(src_in->reversedir->rcv_lock));
1544 } else {
1545 spin_unlock_bh(&(src_in->reversedir->rcv_lock));
1546 spin_unlock_bh(&(src_in->rcv_lock));
1549 if (resetted)
1550 reset_conn(src_in);
1552 return resetted;
1555 static void reset_timeouted_conns(struct neighbor *nb)
1557 int i;
1558 for (i=0;i<10000;i++) {
1559 unsigned long iflags;
1560 struct list_head *lh;
1561 struct conn *src_in;
1563 int resetted = 1;
1565 spin_lock_irqsave(&(nb->conn_list_lock), iflags);
1567 if (list_empty(&(nb->rcv_conn_list))) {
1568 spin_unlock_irqrestore(&(nb->conn_list_lock), iflags);
1569 break;
1572 lh = nb->rcv_conn_list.next;
1573 list_del(lh);
1574 list_add_tail(lh, &(nb->rcv_conn_list));
1576 src_in = container_of(lh, struct conn, source.in.nb_list);
1577 kref_get(&(src_in->ref));
1579 spin_unlock_irqrestore(&(nb->conn_list_lock), iflags);
1582 spin_lock_bh(&(src_in->rcv_lock));
1583 BUG_ON(src_in->sourcetype != SOURCE_IN);
1584 BUG_ON(src_in->source.in.nb != nb);
1585 resetted = reset_timeouted_conn_needed(nb, src_in);
1586 spin_unlock_bh(&(src_in->rcv_lock));
1587 if (likely(resetted == 0))
1588 goto put;
1590 resetted = reset_timeouted_conn(nb, src_in);
1592 put:
1593 kref_put(&(src_in->ref), free_conn);
1595 if (likely(resetted == 0))
1596 break;
1601 * may not be called by more than one thread at the same time, because
1602 * 1) readding control_msg_out may reorder them
1603 * 2) multiple pings may be sent
1605 int send_messages(struct neighbor *nb, int *sent)
1607 int rc = QOS_RESUME_DONE;
1608 int ping;
1609 int initsession;
1610 __u32 targetmss = mss_cmsg(nb);
1612 int nbstate = get_neigh_state(nb);
1614 if (likely(nbstate == NEIGHBOR_STATE_ACTIVE))
1615 reset_timeouted_conns(nb);
1617 if (unlikely(nbstate == NEIGHBOR_STATE_KILLED)) {
1618 spin_lock_bh(&(nb->retrans_lock));
1619 empty_retrans_queue(nb);
1620 spin_unlock_bh(&(nb->retrans_lock));
1622 delete_all_cmsgs(nb);
1623 return QOS_RESUME_DONE;
1626 ping = time_to_send_ping(nb);
1628 spin_lock_bh(&(nb->cmsg_lock));
1630 if (nb->add_retrans_needed != 0) {
1631 nb->add_retrans_needed = 0;
1632 spin_unlock_bh(&(nb->cmsg_lock));
1633 add_timeouted_retrans(nb);
1634 spin_lock_bh(&(nb->cmsg_lock));
1637 initsession = unlikely(atomic_read(&(nb->sessionid_snd_needed)) != 0);
1639 while (1) {
1640 struct list_head cmsgs;
1641 __u32 length = 0;
1642 __u64 seqno;
1644 INIT_LIST_HEAD(&cmsgs);
1646 if (dequeue_messages(nb, ping, initsession, nbstate,
1647 targetmss, &length, &cmsgs) != 0) {
1648 schedule_controlmsg_timer(nb);
1649 spin_unlock_bh(&(nb->cmsg_lock));
1650 return QOS_RESUME_DONE;
1653 nb->kpacket_seqno++;
1654 seqno = nb->kpacket_seqno;
1656 spin_unlock_bh(&(nb->cmsg_lock));
1658 rc = _send_messages_send(nb, ping, initsession, &cmsgs, nbstate,
1659 length, seqno, sent);
1661 if (rc != QOS_RESUME_DONE)
1662 return rc;
1664 ping = 0;
1665 initsession = 0;
1667 spin_lock_bh(&(nb->cmsg_lock));
1671 void controlmsg_timerfunc(struct timer_list *cmsg_timer)
1673 struct neighbor *nb = container_of(cmsg_timer,
1674 struct neighbor, cmsg_timer);
1675 qos_enqueue(nb->queue, &(nb->rb_kp), QOS_CALLER_KPACKET);
1676 kref_put(&(nb->ref), neighbor_free);
1679 static int cmsg_full_packet(struct neighbor *nb, int nbstate)
1681 __u32 extralength = 0;
1682 int ping = time_to_send_ping(nb);
1683 int initsession = unlikely(atomic_read(&(nb->sessionid_snd_needed)) !=
1685 __u32 len = get_total_messages_length(nb, ping, initsession, nbstate,
1686 &extralength);
1688 if (len == 0)
1689 return 0;
1690 if (len < mss_cmsg(nb))
1691 return 0;
1693 return 1;
1696 void schedule_controlmsg_timer(struct neighbor *nb_cmsglocked)
1698 unsigned long timeout;
1699 int nbstate = get_neigh_state(nb_cmsglocked);
1701 if (unlikely(nbstate == NEIGHBOR_STATE_KILLED))
1702 goto now;
1704 if (atomic_read(&(nb_cmsglocked->cmsg_bulk_readds)) != 0)
1705 return;
1707 if (cmsg_full_packet(nb_cmsglocked, nbstate))
1708 goto now;
1710 if (nb_cmsglocked->add_retrans_needed != 0)
1711 goto now;
1713 timeout = get_cmsg_timer_timeout(nb_cmsglocked, nbstate);
1715 if (time_before_eq(timeout, jiffies)) {
1716 now:
1717 qos_enqueue(nb_cmsglocked->queue, &(nb_cmsglocked->rb_kp),
1718 QOS_CALLER_KPACKET);
1719 } else {
1720 if (mod_timer(&(nb_cmsglocked->cmsg_timer), timeout) == 0) {
1721 kref_get(&(nb_cmsglocked->ref));
1726 static int insert_pending_conn_resets(struct control_msg_out *ins)
1728 struct neighbor *nb = ins->nb;
1729 __u32 conn_id = ins->msg.reset_conn.conn_id;
1731 struct rb_root *root;
1732 struct rb_node **p;
1733 struct rb_node *parent = 0;
1735 BUG_ON(nb == 0);
1736 BUG_ON(ins->msg.reset_conn.in_pending_conn_resets != 0);
1738 root = &(nb->pending_conn_resets_rb);
1739 p = &(root->rb_node);
1741 while ((*p) != 0) {
1742 struct control_msg_out *cm = container_of(*p,
1743 struct control_msg_out,
1744 msg.reset_conn.rbn);
1745 __u32 cm_connid = cm->msg.reset_conn.conn_id;
1747 BUG_ON(cm->nb != ins->nb);
1748 BUG_ON(cm->type != MSGTYPE_RESET_CONN);
1750 parent = *p;
1751 if (conn_id == cm_connid) {
1752 return 1;
1753 } else if (conn_id < cm_connid) {
1754 p = &(*p)->rb_left;
1755 } else if (conn_id > cm_connid) {
1756 p = &(*p)->rb_right;
1757 } else {
1758 BUG();
1762 kref_get(&(ins->ref));
1763 rb_link_node(&(ins->msg.reset_conn.rbn), parent, p);
1764 rb_insert_color(&(ins->msg.reset_conn.rbn), root);
1765 ins->msg.reset_conn.in_pending_conn_resets = 1;
1767 return 0;
1770 static void free_oldest_pong(struct neighbor *nb)
1772 struct control_msg_out *cm = container_of(nb->cmsg_queue_pong.next,
1773 struct control_msg_out, lh);
1775 BUG_ON(list_empty(&(nb->cmsg_queue_pong)));
1776 BUG_ON(unlikely(cm->type != MSGTYPE_PONG));
1778 list_del(&(cm->lh));
1779 nb->cmsg_pongslength -= cm->length;
1780 BUG_ON(nb->cmsg_pongscnt == 0);
1781 cm->nb->cmsg_pongscnt--;
1782 free_control_msg(cm);
1785 static int _enqueue_control_msg(struct control_msg_out *cm, int src)
1787 if (unlikely(cm->type == MSGTYPE_PONG)) {
1788 BUG_ON(src == ADDCMSG_SRC_SPLITCONNDATA);
1790 if (cm->nb->cmsg_pongscnt >= MAX_PONG_CMSGS_PER_NEIGH) {
1791 if (src != ADDCMSG_SRC_NEW) {
1792 BUG_ON(cm->nb->cmsg_pongscnt == 0);
1793 cm->nb->cmsg_pongscnt--;
1794 free_control_msg(cm);
1795 return 1;
1796 } else {
1797 free_oldest_pong(cm->nb);
1801 cm->nb->cmsg_pongscnt++;
1802 cm->nb->cmsg_pongslength += cm->length;
1804 if (src != ADDCMSG_SRC_NEW) {
1805 list_add(&(cm->lh), &(cm->nb->cmsg_queue_pong));
1806 } else {
1807 list_add_tail(&(cm->lh), &(cm->nb->cmsg_queue_pong));
1810 return 0;
1811 } else if (unlikely(cm->type == MSGTYPE_RESET_CONN)) {
1812 if (insert_pending_conn_resets(cm) != 0) {
1813 cm->type = 0;
1814 free_control_msg(cm);
1815 return 1;
1819 cm->nb->cmsg_otherlength += cm->length;
1820 if (src == ADDCMSG_SRC_NEW) {
1821 if (cm->type == MSGTYPE_ACK) {
1822 list_add_tail(&(cm->lh), &(cm->nb->cmsg_queue_ack));
1823 } else if (cm->type == MSGTYPE_ACK_CONN) {
1824 list_add_tail(&(cm->lh), &(cm->nb->cmsg_queue_ackconn));
1825 } else if (cm->type == MSGTYPE_CONNDATA &&
1826 cm->msg.conn_data.highlatency != 0) {
1827 list_add_tail(&(cm->lh),
1828 &(cm->nb->cmsg_queue_conndata_highlat));
1829 } else if (cm->type == MSGTYPE_CONNDATA) {
1830 list_add_tail(&(cm->lh),
1831 &(cm->nb->cmsg_queue_conndata_lowlat));
1832 } else {
1833 list_add_tail(&(cm->lh), &(cm->nb->cmsg_queue_other));
1835 } else {
1836 BUG_ON(src == ADDCMSG_SRC_SPLITCONNDATA &&
1837 cm->type != MSGTYPE_CONNDATA);
1838 BUG_ON(src == ADDCMSG_SRC_READD &&
1839 cm->type == MSGTYPE_ACK_CONN);
1841 if (cm->type == MSGTYPE_ACK) {
1842 list_add(&(cm->lh), &(cm->nb->cmsg_queue_ack));
1843 } else if (cm->type == MSGTYPE_ACK_CONN) {
1844 list_add(&(cm->lh), &(cm->nb->cmsg_queue_ackconn));
1845 } else if (cm->type == MSGTYPE_CONNDATA &&
1846 cm->msg.conn_data.highlatency != 0) {
1847 list_add(&(cm->lh),
1848 &(cm->nb->cmsg_queue_conndata_highlat));
1849 } else if (cm->type == MSGTYPE_CONNDATA) {
1850 list_add(&(cm->lh),
1851 &(cm->nb->cmsg_queue_conndata_lowlat));
1852 } else {
1853 list_add(&(cm->lh), &(cm->nb->cmsg_queue_other));
1857 return 0;
1860 static void enqueue_control_msg(struct control_msg_out *cm, int src)
1862 BUG_ON(cm == 0);
1863 BUG_ON(cm->nb == 0);
1865 if (src == ADDCMSG_SRC_NEW)
1866 cm->time_added = jiffies;
1868 spin_lock_bh(&(cm->nb->cmsg_lock));
1870 if (_enqueue_control_msg(cm, src) != 0)
1871 goto out;
1873 if (src != ADDCMSG_SRC_READD && src != ADDCMSG_SRC_RETRANS)
1874 schedule_controlmsg_timer(cm->nb);
1876 out:
1877 spin_unlock_bh(&(cm->nb->cmsg_lock));
1881 void send_pong(struct neighbor *nb, __u32 cookie)
1883 struct control_msg_out *cm = _alloc_control_msg(nb);
1885 if (unlikely(cm == 0))
1886 return;
1888 cm->nb = nb;
1889 cm->type = MSGTYPE_PONG;
1890 cm->msg.pong.cookie = cookie;
1891 cm->msg.pong.type = MSGTYPE_PONG_TIMEENQUEUED;
1892 cm->msg.pong.time_enqueued = ktime_get();
1893 cm->length = 9;
1894 enqueue_control_msg(cm, ADDCMSG_SRC_NEW);
1897 void send_ack(struct neighbor *nb, __u64 seqno)
1899 struct control_msg_out *cm = alloc_control_msg(nb, ACM_PRIORITY_HIGH);
1901 if (unlikely(cm == 0))
1902 return;
1904 cm->nb = nb;
1905 cm->type = MSGTYPE_ACK;
1906 cm->msg.ack.seqno = seqno;
1907 cm->length = 7;
1908 enqueue_control_msg(cm, ADDCMSG_SRC_NEW);
1911 static void set_ooolen_flags(struct control_msg_out *cm)
1913 cm->msg.ack_conn.flags = (cm->msg.ack_conn.flags &
1914 (~KP_ACK_CONN_FLAGS_OOO));
1915 cm->msg.ack_conn.flags = (cm->msg.ack_conn.flags |
1916 ooolen_to_flags(cm->msg.ack_conn.length));
1919 /* cmsg_lock must be held */
1920 static void remove_pending_ackconn(struct control_msg_out *cm)
1922 cm->nb->cmsg_otherlength -= cm->length;
1923 list_del(&(cm->lh));
1925 list_del(&(cm->msg.ack_conn.conn_acks));
1926 kref_put(&(cm->msg.ack_conn.src_in->ref), free_conn);
1927 cm->msg.ack_conn.src_in = 0;
1929 cm->type = 0;
1930 free_control_msg(cm);
1933 /* cmsg_lock must be held */
1934 static void recalc_scheduled_ackconn_size(struct control_msg_out *cm)
1936 cm->nb->cmsg_otherlength -= cm->length;
1937 cm->length = 6 + ack_conn_len(cm->msg.ack_conn.flags);
1938 cm->nb->cmsg_otherlength += cm->length;
1941 /* cmsg_lock must be held */
1942 static int _try_merge_ackconn(struct conn *src_in_l,
1943 struct control_msg_out *fromcm, struct control_msg_out *tocm,
1944 int from_newack)
1946 if (ooolen(fromcm->msg.ack_conn.flags) != 0 &&
1947 ooolen(tocm->msg.ack_conn.flags) != 0) {
1948 __u64 tocmseqno = tocm->msg.ack_conn.seqno_ooo;
1949 __u64 tocmlength = tocm->msg.ack_conn.length;
1950 __u64 fromcmseqno = fromcm->msg.ack_conn.seqno_ooo;
1951 __u64 fromcmlength = fromcm->msg.ack_conn.length;
1953 if (seqno_eq(tocmseqno, fromcmseqno)) {
1954 if (fromcmlength > tocmlength)
1955 tocm->msg.ack_conn.length = fromcmlength;
1956 } else if (seqno_after(fromcmseqno, tocmseqno) &&
1957 seqno_before_eq(fromcmseqno, tocmseqno +
1958 tocmlength)) {
1959 __u64 len = seqno_clean(fromcmseqno + fromcmlength -
1960 tocmseqno);
1961 BUG_ON(len > U32_MAX);
1962 tocm->msg.ack_conn.length = (__u32) len;
1963 } else if (seqno_before(fromcmseqno, tocmseqno) &&
1964 seqno_after_eq(fromcmseqno, tocmseqno)) {
1965 __u64 len = seqno_clean(tocmseqno + tocmlength -
1966 fromcmseqno);
1967 BUG_ON(len > U32_MAX);
1968 tocm->msg.ack_conn.seqno_ooo = fromcmseqno;
1969 tocm->msg.ack_conn.length = (__u32) len;
1970 } else {
1971 return 1;
1973 set_ooolen_flags(tocm);
1976 if ((fromcm->msg.ack_conn.flags &
1977 KP_ACK_CONN_FLAGS_SEQNO) != 0) {
1978 if ((tocm->msg.ack_conn.flags & KP_ACK_CONN_FLAGS_SEQNO) == 0)
1979 goto setseqno;
1981 BUG_ON(seqno_eq(fromcm->msg.ack_conn.ack_seqno,
1982 tocm->msg.ack_conn.ack_seqno));
1983 if (seqno_after_eq(tocm->msg.ack_conn.ack_seqno,
1984 fromcm->msg.ack_conn.ack_seqno)) {
1985 BUG_ON(seqno_after(fromcm->msg.ack_conn.seqno,
1986 tocm->msg.ack_conn.seqno));
1987 goto skipseqno;
1990 BUG_ON(seqno_before(fromcm->msg.ack_conn.seqno,
1991 tocm->msg.ack_conn.seqno));
1993 setseqno:
1994 tocm->msg.ack_conn.flags = (tocm->msg.ack_conn.flags |
1995 KP_ACK_CONN_FLAGS_SEQNO);
1996 tocm->msg.ack_conn.seqno = fromcm->msg.ack_conn.seqno;
1997 tocm->msg.ack_conn.ack_seqno = fromcm->msg.ack_conn.ack_seqno;
1999 skipseqno:
2000 if ((fromcm->msg.ack_conn.flags &
2001 KP_ACK_CONN_FLAGS_WINDOW) != 0)
2002 tocm->msg.ack_conn.flags = (tocm->msg.ack_conn.flags |
2003 KP_ACK_CONN_FLAGS_WINDOW);
2007 if (ooolen(fromcm->msg.ack_conn.flags) != 0) {
2008 tocm->msg.ack_conn.seqno_ooo = fromcm->msg.ack_conn.seqno_ooo;
2009 tocm->msg.ack_conn.length = fromcm->msg.ack_conn.length;
2010 set_ooolen_flags(tocm);
2013 if ((fromcm->msg.ack_conn.flags & KP_ACK_CONN_FLAGS_PRIORITY) != 0) {
2014 BUG_ON((tocm->msg.ack_conn.flags &
2015 KP_ACK_CONN_FLAGS_PRIORITY) != 0);
2016 tocm->msg.ack_conn.priority_seqno =
2017 fromcm->msg.ack_conn.priority_seqno;
2018 tocm->msg.ack_conn.priority = fromcm->msg.ack_conn.priority;
2021 recalc_scheduled_ackconn_size(tocm);
2022 if (from_newack == 0)
2023 remove_pending_ackconn(fromcm);
2025 return 0;
2028 /* cmsg_lock must be held */
2029 static void try_merge_ackconns(struct conn *src_in_l,
2030 struct control_msg_out *cm)
2032 struct list_head *currlh = cm->msg.ack_conn.conn_acks.next;
2034 while (currlh != &(src_in_l->source.in.acks_pending)) {
2035 struct control_msg_out *currcm = container_of(currlh,
2036 struct control_msg_out,
2037 msg.ack_conn.conn_acks);
2038 currlh = currlh->next;
2039 remove_connack_oooflag_ifold(src_in_l, currcm);
2040 _try_merge_ackconn(src_in_l, currcm, cm, 0);
2044 static void merge_or_enqueue_ackconn(struct conn *src_in_l,
2045 struct control_msg_out *cm, int src)
2047 struct list_head *currlh;
2049 BUG_ON(src_in_l->sourcetype != SOURCE_IN);
2051 spin_lock_bh(&(cm->nb->cmsg_lock));
2053 currlh = src_in_l->source.in.acks_pending.next;
2055 while (currlh != &(src_in_l->source.in.acks_pending)) {
2056 struct control_msg_out *currcm = container_of(currlh,
2057 struct control_msg_out,
2058 msg.ack_conn.conn_acks);
2060 BUG_ON(currcm->nb != cm->nb);
2061 BUG_ON(currcm->type != MSGTYPE_ACK_CONN);
2062 BUG_ON(cm->msg.ack_conn.src_in != src_in_l);
2063 BUG_ON(currcm->msg.ack_conn.conn_id !=
2064 cm->msg.ack_conn.conn_id);
2066 if (_try_merge_ackconn(src_in_l, cm, currcm, 1) == 0) {
2067 try_merge_ackconns(src_in_l, currcm);
2068 schedule_controlmsg_timer(currcm->nb);
2069 spin_unlock_bh(&(currcm->nb->cmsg_lock));
2071 * flags:
2072 * when calling free_control_msg here conn may already
2073 * be locked and priority_send_allowed and
2074 * priority_send_allowed should not be reset
2076 cm->msg.ack_conn.flags = 0;
2077 free_control_msg(cm);
2078 return;
2081 currlh = currlh->next;
2084 list_add_tail(&(cm->msg.ack_conn.conn_acks),
2085 &(src_in_l->source.in.acks_pending));
2087 spin_unlock_bh(&(cm->nb->cmsg_lock));
2089 enqueue_control_msg(cm, src);
2092 static int try_update_ackconn_seqno(struct conn *src_in_l)
2094 int rc = 1;
2096 spin_lock_bh(&(src_in_l->source.in.nb->cmsg_lock));
2098 if (list_empty(&(src_in_l->source.in.acks_pending)) == 0) {
2099 struct control_msg_out *cm = container_of(
2100 src_in_l->source.in.acks_pending.next,
2101 struct control_msg_out,
2102 msg.ack_conn.conn_acks);
2103 BUG_ON(cm->nb != src_in_l->source.in.nb);
2104 BUG_ON(cm->type != MSGTYPE_ACK_CONN);
2105 BUG_ON(cm->msg.ack_conn.src_in != src_in_l);
2106 BUG_ON(cm->msg.ack_conn.conn_id !=
2107 src_in_l->reversedir->target.out.conn_id);
2109 cm->msg.ack_conn.flags = (cm->msg.ack_conn.flags |
2110 KP_ACK_CONN_FLAGS_SEQNO |
2111 KP_ACK_CONN_FLAGS_WINDOW);
2112 cm->msg.ack_conn.seqno = src_in_l->source.in.next_seqno;
2114 src_in_l->source.in.ack_seqno++;
2115 cm->msg.ack_conn.ack_seqno = src_in_l->source.in.ack_seqno;
2117 remove_connack_oooflag_ifold(src_in_l, cm);
2118 recalc_scheduled_ackconn_size(cm);
2120 try_merge_ackconns(src_in_l, cm);
2122 rc = 0;
2125 spin_unlock_bh(&(src_in_l->source.in.nb->cmsg_lock));
2127 return rc;
2130 void send_ack_conn_ifneeded(struct conn *src_in_l, __u64 seqno_ooo,
2131 __u32 ooo_length)
2133 struct control_msg_out *cm;
2135 BUG_ON(src_in_l->sourcetype != SOURCE_IN);
2137 BUG_ON(ooo_length > 0 && seqno_before_eq(seqno_ooo,
2138 src_in_l->source.in.next_seqno));
2140 update_windowlimit(src_in_l);
2142 if (ooo_length != 0) {
2143 cm = alloc_control_msg(src_in_l->source.in.nb,
2144 ACM_PRIORITY_LOW);
2145 if (cm != 0)
2146 goto add;
2149 if (src_in_l->source.in.inorder_ack_needed != 0)
2150 goto ack_needed;
2152 if (seqno_clean(src_in_l->source.in.window_seqnolimit -
2153 src_in_l->source.in.next_seqno) < WINDOW_ENCODE_MIN)
2154 return;
2156 if (seqno_clean(src_in_l->source.in.window_seqnolimit_remote -
2157 src_in_l->source.in.next_seqno) >= WINDOW_ENCODE_MIN &&
2158 seqno_clean(src_in_l->source.in.window_seqnolimit -
2159 src_in_l->source.in.next_seqno) * 7 <
2160 seqno_clean(
2161 src_in_l->source.in.window_seqnolimit_remote -
2162 src_in_l->source.in.next_seqno) * 8)
2163 return;
2165 ack_needed:
2166 if (try_update_ackconn_seqno(src_in_l) == 0)
2167 goto out;
2169 cm = alloc_control_msg(src_in_l->source.in.nb, ACM_PRIORITY_MED);
2170 if (cm == 0) {
2171 printk(KERN_ERR "error allocating inorder ack");
2172 return;
2175 add:
2176 cm->type = MSGTYPE_ACK_CONN;
2177 src_in_l->source.in.ack_seqno++;
2178 cm->msg.ack_conn.ack_seqno = src_in_l->source.in.ack_seqno;
2179 kref_get(&(src_in_l->ref));
2180 cm->msg.ack_conn.src_in = src_in_l;
2181 cm->msg.ack_conn.conn_id = src_in_l->reversedir->target.out.conn_id;
2182 cm->msg.ack_conn.seqno = src_in_l->source.in.next_seqno;
2183 cm->msg.ack_conn.seqno_ooo = seqno_ooo;
2184 cm->msg.ack_conn.length = ooo_length;
2185 cm->msg.ack_conn.flags = KP_ACK_CONN_FLAGS_SEQNO |
2186 KP_ACK_CONN_FLAGS_WINDOW;
2187 set_ooolen_flags(cm);
2188 cm->length = 6 + ack_conn_len(cm->msg.ack_conn.flags);
2190 merge_or_enqueue_ackconn(src_in_l, cm, ADDCMSG_SRC_NEW);
2192 out:
2193 src_in_l->source.in.inorder_ack_needed = 0;
2194 src_in_l->source.in.window_seqnolimit_remote =
2195 src_in_l->source.in.window_seqnolimit;
2198 static int try_add_priority(struct conn *trgt_out_l, __u8 priority)
2200 int rc = 1;
2201 struct conn *src_in = trgt_out_l->reversedir;
2203 spin_lock_bh(&(trgt_out_l->target.out.nb->cmsg_lock));
2205 if (list_empty(&(src_in->source.in.acks_pending)) == 0) {
2206 struct control_msg_out *cm = container_of(
2207 src_in->source.in.acks_pending.next,
2208 struct control_msg_out,
2209 msg.ack_conn.conn_acks);
2210 BUG_ON(cm->nb != trgt_out_l->target.out.nb);
2211 BUG_ON(cm->type != MSGTYPE_ACK_CONN);
2212 BUG_ON(cm->msg.ack_conn.src_in != trgt_out_l->reversedir);
2213 BUG_ON(cm->msg.ack_conn.conn_id !=
2214 trgt_out_l->target.out.conn_id);
2216 BUG_ON((cm->msg.ack_conn.flags & KP_ACK_CONN_FLAGS_PRIORITY) !=
2218 cm->msg.ack_conn.flags = (cm->msg.ack_conn.flags |
2219 KP_ACK_CONN_FLAGS_PRIORITY);
2220 cm->msg.ack_conn.priority_seqno =
2221 trgt_out_l->target.out.priority_seqno;
2222 cm->msg.ack_conn.priority = priority;
2223 recalc_scheduled_ackconn_size(cm);
2225 rc = 0;
2228 spin_unlock_bh(&(trgt_out_l->target.out.nb->cmsg_lock));
2230 return rc;
2233 void send_priority(struct conn *trgt_out_ll, int force, __u8 priority)
2235 struct control_msg_out *cm;
2237 if (try_add_priority(trgt_out_ll, priority) == 0)
2238 goto out;
2240 if (force == 0)
2241 return;
2243 cm = alloc_control_msg(trgt_out_ll->target.out.nb, ACM_PRIORITY_LOW);
2245 if (cm == 0)
2246 return;
2248 cm->type = MSGTYPE_ACK_CONN;
2249 cm->msg.ack_conn.flags = KP_ACK_CONN_FLAGS_PRIORITY;
2250 kref_get(&(trgt_out_ll->reversedir->ref));
2251 BUG_ON(trgt_out_ll->targettype != TARGET_OUT);
2252 cm->msg.ack_conn.src_in = trgt_out_ll->reversedir;
2253 cm->msg.ack_conn.conn_id = trgt_out_ll->target.out.conn_id;
2254 cm->msg.ack_conn.priority_seqno =
2255 trgt_out_ll->target.out.priority_seqno;
2256 cm->msg.ack_conn.priority = priority;
2258 cm->length = 6 + ack_conn_len(cm->msg.ack_conn.flags);
2259 merge_or_enqueue_ackconn(trgt_out_ll->reversedir, cm, ADDCMSG_SRC_NEW);
2261 out:
2262 trgt_out_ll->target.out.priority_last = priority;
2263 trgt_out_ll->target.out.priority_seqno++;
2264 trgt_out_ll->target.out.priority_send_allowed = 0;
2267 void free_ack_conns(struct conn *src_in_lx)
2269 int changed = 0;
2270 spin_lock_bh(&(src_in_lx->source.in.nb->cmsg_lock));
2271 while (list_empty(&(src_in_lx->source.in.acks_pending)) == 0) {
2272 struct list_head *currlh =
2273 src_in_lx->source.in.acks_pending.next;
2274 struct control_msg_out *currcm = container_of(currlh,
2275 struct control_msg_out,
2276 msg.ack_conn.conn_acks);
2278 remove_pending_ackconn(currcm);
2279 changed = 1;
2281 if (changed)
2282 schedule_controlmsg_timer(src_in_lx->source.in.nb);
2283 spin_unlock_bh(&(src_in_lx->source.in.nb->cmsg_lock));
2286 void send_connect_success(struct control_msg_out *cm, __u32 conn_id,
2287 struct conn *src_in)
2289 cm->type = MSGTYPE_CONNECT_SUCCESS;
2290 cm->msg.connect_success.conn_id = conn_id;
2291 kref_get(&(src_in->ref));
2292 cm->msg.connect_success.src_in = src_in;
2293 cm->length = 6;
2294 enqueue_control_msg(cm, ADDCMSG_SRC_NEW);
2297 void send_connect_nb(struct control_msg_out *cm, __u32 conn_id, __u64 seqno1,
2298 __u64 seqno2, struct conn *src_in_ll)
2300 cm->type = MSGTYPE_CONNECT;
2301 cm->msg.connect.conn_id = conn_id;
2302 cm->msg.connect.seqno1 = seqno1;
2303 cm->msg.connect.seqno2 = seqno2;
2304 kref_get(&(src_in_ll->ref));
2305 BUG_ON(src_in_ll->sourcetype != SOURCE_IN);
2306 cm->msg.connect.src_in = src_in_ll;
2307 cm->length = 20;
2308 enqueue_control_msg(cm, ADDCMSG_SRC_NEW);
2311 void send_conndata(struct control_msg_out *cm, __u32 conn_id, __u64 seqno,
2312 char *data_orig, char *data, __u32 datalen,
2313 __u8 snd_delayed_lowbuf, __u8 flush, __u8 highlatency,
2314 struct conn_retrans *cr)
2316 cm->type = MSGTYPE_CONNDATA;
2317 cm->msg.conn_data.conn_id = conn_id;
2318 cm->msg.conn_data.seqno = seqno;
2319 cm->msg.conn_data.data_orig = data_orig;
2320 cm->msg.conn_data.data = data;
2321 cm->msg.conn_data.datalen = datalen;
2322 cm->msg.conn_data.snd_delayed_lowbuf = snd_delayed_lowbuf;
2323 cm->msg.conn_data.flush = flush;
2324 cm->msg.conn_data.highlatency = highlatency;
2325 cm->msg.conn_data.cr = cr;
2326 cm->length = KP_CONN_DATA_CMDLEN + datalen;
2327 enqueue_control_msg(cm, ADDCMSG_SRC_NEW);
2330 int send_reset_conn(struct neighbor *nb, __u32 conn_id, int lowprio)
2332 struct control_msg_out *cm;
2334 if (unlikely(get_neigh_state(nb) == NEIGHBOR_STATE_KILLED))
2335 return 0;
2337 cm = alloc_control_msg(nb, lowprio ?
2338 ACM_PRIORITY_LOW : ACM_PRIORITY_MED);
2340 if (unlikely(cm == 0))
2341 return 1;
2343 cm->type = MSGTYPE_RESET_CONN;
2344 cm->msg.reset_conn.conn_id = conn_id;
2345 cm->length = 5;
2347 enqueue_control_msg(cm, ADDCMSG_SRC_NEW);
2349 return 0;
2352 int __init cor_kgen_init(void)
2354 controlmsg_slab = kmem_cache_create("cor_controlmsg",
2355 sizeof(struct control_msg_out), 8, 0, 0);
2356 if (unlikely(controlmsg_slab == 0))
2357 return -ENOMEM;
2359 controlretrans_slab = kmem_cache_create("cor_controlretransmsg",
2360 sizeof(struct control_retrans), 8, 0, 0);
2361 if (unlikely(controlretrans_slab == 0))
2362 return -ENOMEM;
2364 return 0;
2367 MODULE_LICENSE("GPL");