checkpatch fixes
[cor.git] / net / cor / neigh_snd.c
blob182e91669b81c5404e95c634422f4a12746e1123
1 /**
2 * Connection oriented routing
3 * Copyright (C) 2007-2021 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
16 #include <asm/byteorder.h>
18 #include "cor.h"
20 /* not sent over the network - internal meaning only */
21 #define MSGTYPE_PONG 1
22 #define MSGTYPE_ACK 2
23 #define MSGTYPE_ACK_CONN 3
24 #define MSGTYPE_CONNECT 4
25 #define MSGTYPE_CONNECT_SUCCESS 5
26 #define MSGTYPE_RESET_CONN 6
27 #define MSGTYPE_CONNDATA 7
28 #define MSGTYPE_SET_MAX_CMSG_DELAY 8
29 #define MSGTYPE_SET_RCVMTU 9
31 #define MSGTYPE_PONG_TIMEENQUEUED 1
32 #define MSGTYPE_PONG_RESPDELAY 2
34 struct cor_control_msg_out{
35 __u8 type;
36 __u32 length;
37 struct kref ref;
38 struct cor_neighbor *nb;
40 /* either queue or control_retrans_packet */
41 struct list_head lh;
43 unsigned long time_added;
45 union{
46 struct{
47 __u32 cookie;
48 __u8 type;
50 ktime_t ping_rcvtime;
51 ktime_t time_enqueued;
52 } pong;
54 struct{
55 __u64 seqno;
56 __u8 fast;
57 } ack;
59 struct{
60 struct cor_conn *src_in;
61 struct list_head conn_acks;
62 __u32 conn_id;
63 __u64 seqno;
64 __u64 seqno_ooo;
65 __u32 length;
67 __u8 flags;
69 __u8 bufsize_changerate;
71 __u16 priority;
72 __u8 priority_seqno;
74 __u8 is_highlatency;
75 __u8 queue;
77 __u32 ack_seqno;
78 } ack_conn;
80 struct{
81 __u32 conn_id;
82 __u64 seqno1;
83 __u64 seqno2;
84 struct cor_conn *src_in;
85 } connect;
87 struct{
88 __u32 conn_id;
89 struct cor_conn *src_in;
90 } connect_success;
92 struct{
93 struct rb_node rbn;
94 __u8 in_pending_conn_resets;
95 __u32 conn_id;
96 } reset_conn;
98 struct{
99 __u32 conn_id;
100 __u64 seqno;
101 __u32 datalen;
102 __u8 windowused;
103 __u8 flush;
104 __u8 highlatency;
105 char *data_orig;
106 char *data;
107 struct cor_conn_retrans *cr;
108 }conn_data;
110 struct{
111 __u32 ack_fast_delay;
112 __u32 ack_slow_delay;
113 __u32 ackconn_lowlatency_delay;
114 __u32 ackconn_highlatency_delay;
115 __u32 pong_delay;
116 } set_max_cmsg_delay;
118 struct{
119 __u32 rcvmtu;
120 } set_rcvmtu;
121 } msg;
124 struct cor_control_retrans {
125 struct kref ref;
127 struct cor_neighbor *nb;
128 __u64 seqno;
130 unsigned long timeout;
132 struct list_head msgs;
134 struct rb_node rbn;
135 struct list_head timeout_list;
139 static struct kmem_cache *cor_controlmsg_slab;
140 static struct kmem_cache *cor_controlretrans_slab;
142 static atomic_t cor_cmsg_othercnt = ATOMIC_INIT(0);
144 #define ADDCMSG_SRC_NEW 1
145 #define ADDCMSG_SRC_SPLITCONNDATA 2
146 #define ADDCMSG_SRC_READD 3
147 #define ADDCMSG_SRC_RETRANS 4
149 static void cor_enqueue_control_msg(struct cor_control_msg_out *msg, int src);
151 static void cor_try_merge_ackconns(struct cor_conn *src_in_l,
152 struct cor_control_msg_out *cm);
154 static void cor_merge_or_enqueue_ackconn(struct cor_conn *src_in_l,
155 struct cor_control_msg_out *cm, int src);
157 static struct cor_control_msg_out *_cor_alloc_control_msg(
158 struct cor_neighbor *nb)
160 struct cor_control_msg_out *cm;
162 BUG_ON(nb == 0);
164 cm = kmem_cache_alloc(cor_controlmsg_slab, GFP_ATOMIC);
165 if (unlikely(cm == 0))
166 return 0;
167 memset(cm, 0, sizeof(struct cor_control_msg_out));
168 kref_init(&(cm->ref));
169 cm->nb = nb;
170 return cm;
173 static int cor_calc_limit(int limit, int priority)
175 if (priority == ACM_PRIORITY_LOW)
176 return (limit+1)/2;
177 else if (priority == ACM_PRIORITY_MED)
178 return (limit * 3 + 1)/4;
179 else if (priority == ACM_PRIORITY_HIGH)
180 return limit;
181 else
182 BUG();
185 struct cor_control_msg_out *cor_alloc_control_msg(struct cor_neighbor *nb,
186 int priority)
188 struct cor_control_msg_out *cm = 0;
190 long packets1;
191 long packets2;
193 BUG_ON(nb == 0);
195 packets1 = atomic_inc_return(&(nb->cmsg_othercnt));
196 packets2 = atomic_inc_return(&(cor_cmsg_othercnt));
198 BUG_ON(packets1 <= 0);
199 BUG_ON(packets2 <= 0);
201 if (packets1 <= cor_calc_limit(GUARANTEED_CMSGS_PER_NEIGH, priority))
202 goto alloc;
204 if (unlikely(unlikely(packets1 > cor_calc_limit(MAX_CMSGS_PER_NEIGH,
205 priority)) ||
206 unlikely(packets2 > cor_calc_limit(MAX_CMSGS,
207 priority))))
208 goto full;
210 alloc:
211 cm = _cor_alloc_control_msg(nb);
212 if (unlikely(cm == 0)) {
213 full:
215 /* printk(KERN_ERR "cor_alloc_control_msg failed %ld %ld\n",
216 packets1, packets2); */
217 atomic_dec(&(nb->cmsg_othercnt));
218 atomic_dec(&(cor_cmsg_othercnt));
220 return cm;
223 static void cor_cmsg_kref_free(struct kref *ref)
225 struct cor_control_msg_out *cm = container_of(ref,
226 struct cor_control_msg_out, ref);
227 kmem_cache_free(cor_controlmsg_slab, cm);
230 void cor_free_control_msg(struct cor_control_msg_out *cm)
232 if (likely(cm->type != MSGTYPE_PONG)) {
233 atomic_dec(&(cm->nb->cmsg_othercnt));
234 atomic_dec(&(cor_cmsg_othercnt));
237 if (cm->type == MSGTYPE_ACK_CONN) {
238 BUG_ON(cm->msg.ack_conn.src_in == 0);
239 if ((cm->msg.ack_conn.flags & KP_ACK_CONN_FLAGS_PRIORITY) != 0){
240 struct cor_conn *trgt_out = cor_get_conn_reversedir(
241 cm->msg.ack_conn.src_in);
242 spin_lock_bh(&(trgt_out->rcv_lock));
243 BUG_ON(trgt_out->targettype != TARGET_OUT);
244 if (trgt_out->trgt.out.priority_send_allowed != 0) {
245 trgt_out->trgt.out.priority_send_allowed = 1;
246 spin_unlock_bh(&(trgt_out->rcv_lock));
247 cor_conn_refresh_priority(trgt_out, 0);
248 } else {
249 spin_unlock_bh(&(trgt_out->rcv_lock));
252 cor_conn_kref_put(cm->msg.ack_conn.src_in,
253 "cor_control_msg_out ack_conn");
254 cm->msg.ack_conn.src_in = 0;
255 } else if (cm->type == MSGTYPE_CONNECT) {
256 BUG_ON(cm->msg.connect.src_in == 0);
257 cor_conn_kref_put(cm->msg.connect.src_in,
258 "cor_control_msg_out connect");
259 cm->msg.connect.src_in = 0;
260 } else if (cm->type == MSGTYPE_CONNECT_SUCCESS) {
261 BUG_ON(cm->msg.connect_success.src_in == 0);
262 cor_conn_kref_put(cm->msg.connect_success.src_in,
263 "cor_control_msg_out connect_success");
264 cm->msg.connect_success.src_in = 0;
265 } else if (cm->type == MSGTYPE_RESET_CONN) {
266 spin_lock_bh(&(cm->nb->cmsg_lock));
267 if (cm->msg.reset_conn.in_pending_conn_resets != 0) {
268 rb_erase(&(cm->msg.reset_conn.rbn),
269 &(cm->nb->pending_conn_resets_rb));
270 cm->msg.reset_conn.in_pending_conn_resets = 0;
272 kref_put(&(cm->ref), cor_kreffree_bug);
274 spin_unlock_bh(&(cm->nb->cmsg_lock));
277 kref_put(&(cm->ref), cor_cmsg_kref_free);
280 static void cor_free_control_retrans(struct kref *ref)
282 struct cor_control_retrans *cr = container_of(ref,
283 struct cor_control_retrans, ref);
285 while (list_empty(&(cr->msgs)) == 0) {
286 struct cor_control_msg_out *cm = container_of(cr->msgs.next,
287 struct cor_control_msg_out, lh);
289 if (cm->type == MSGTYPE_PONG)
290 atomic_dec(&(cm->nb->cmsg_pongs_retrans_cnt));
292 list_del(&(cm->lh));
293 cor_free_control_msg(cm);
296 kmem_cache_free(cor_controlretrans_slab, cr);
299 struct cor_control_retrans *cor_get_control_retrans(
300 struct cor_neighbor *nb_retranslocked, __u64 seqno)
302 struct rb_node *n = 0;
303 struct cor_control_retrans *ret = 0;
305 n = nb_retranslocked->kp_retransmits_rb.rb_node;
307 while (likely(n != 0) && ret == 0) {
308 struct cor_control_retrans *cr = container_of(n,
309 struct cor_control_retrans, rbn);
311 BUG_ON(cr->nb != nb_retranslocked);
313 if (cor_seqno_before(seqno, cr->seqno))
314 n = n->rb_left;
315 else if (cor_seqno_after(seqno, cr->seqno))
316 n = n->rb_right;
317 else
318 ret = cr;
321 if (ret != 0)
322 kref_get(&(ret->ref));
324 return ret;
327 /* nb->retrans_lock must be held */
328 void cor_insert_control_retrans(struct cor_control_retrans *ins)
330 struct cor_neighbor *nb = ins->nb;
331 __u64 seqno = ins->seqno;
333 struct rb_root *root;
334 struct rb_node **p;
335 struct rb_node *parent = 0;
337 BUG_ON(nb == 0);
339 root = &(nb->kp_retransmits_rb);
340 p = &(root->rb_node);
342 while ((*p) != 0) {
343 struct cor_control_retrans *cr = container_of(*p,
344 struct cor_control_retrans, rbn);
346 BUG_ON(cr->nb != nb);
348 parent = *p;
349 if (unlikely(cor_seqno_eq(seqno, cr->seqno))) {
350 BUG();
351 } else if (cor_seqno_before(seqno, cr->seqno)) {
352 p = &(*p)->rb_left;
353 } else if (cor_seqno_after(seqno, cr->seqno)) {
354 p = &(*p)->rb_right;
355 } else {
356 BUG();
360 kref_get(&(ins->ref));
361 rb_link_node(&(ins->rbn), parent, p);
362 rb_insert_color(&(ins->rbn), root);
365 static void cor_remove_connack_oooflag_ifold(struct cor_conn *src_in_l,
366 struct cor_control_msg_out *cm)
368 if (cor_ooolen(cm->msg.ack_conn.flags) != 0 && cor_seqno_before_eq(
369 cm->msg.ack_conn.seqno_ooo +
370 cm->msg.ack_conn.length,
371 src_in_l->src.in.next_seqno)) {
372 cm->msg.ack_conn.length = 0;
373 cm->msg.ack_conn.flags = (cm->msg.ack_conn.flags &
374 (~KP_ACK_CONN_FLAGS_OOO));
378 static int cor_ackconn_prepare_requeue(struct cor_conn *cn_l,
379 struct cor_control_msg_out *cm)
381 if (unlikely(unlikely(cn_l->sourcetype != SOURCE_IN) ||
382 unlikely(cn_l->src.in.nb != cm->nb) ||
383 unlikely(
384 cor_get_connid_reverse(cn_l->src.in.conn_id) !=
385 cm->msg.ack_conn.conn_id) ||
386 unlikely(cn_l->isreset != 0)))
387 return 0;
389 cor_remove_connack_oooflag_ifold(cn_l, cm);
391 if (!cor_seqno_eq(cm->msg.ack_conn.ack_seqno, cn_l->src.in.ack_seqno))
392 cm->msg.ack_conn.flags = (cm->msg.ack_conn.flags &
393 (~KP_ACK_CONN_FLAGS_SEQNO) &
394 (~KP_ACK_CONN_FLAGS_WINDOW));
396 if (cm->msg.ack_conn.flags == 0)
397 return 0;
399 cm->length = 5 + cor_ack_conn_len(cm->msg.ack_conn.flags);
401 return 1;
404 static void cor_requeue_control_retrans(struct cor_control_retrans *cr)
406 atomic_inc(&(cr->nb->cmsg_bulk_readds));
408 while (list_empty(&(cr->msgs)) == 0) {
409 struct cor_control_msg_out *cm = container_of(cr->msgs.prev,
410 struct cor_control_msg_out, lh);
411 list_del(&(cm->lh));
413 BUG_ON(cm->nb != cr->nb);
415 if (cm->type == MSGTYPE_ACK_CONN) {
416 struct cor_conn *cn_l = cm->msg.ack_conn.src_in;
417 spin_lock_bh(&(cn_l->rcv_lock));
418 if (unlikely(cor_ackconn_prepare_requeue(cn_l,
419 cm) == 0)) {
420 cor_free_control_msg(cm);
421 } else {
422 cor_merge_or_enqueue_ackconn(cn_l, cm,
423 ADDCMSG_SRC_RETRANS);
426 spin_unlock_bh(&(cn_l->rcv_lock));
427 } else {
428 if (cm->type == MSGTYPE_PONG)
429 atomic_dec(&(cm->nb->cmsg_pongs_retrans_cnt));
430 cor_enqueue_control_msg(cm, ADDCMSG_SRC_RETRANS);
434 atomic_dec(&(cr->nb->cmsg_bulk_readds));
436 spin_lock_bh(&(cr->nb->cmsg_lock));
437 cor_schedule_controlmsg_timer(cr->nb);
438 spin_unlock_bh(&(cr->nb->cmsg_lock));
441 static void _cor_empty_retrans_queue(struct cor_neighbor *nb_retranslocked,
442 struct list_head *retrans_list)
444 while (!list_empty(retrans_list)) {
445 struct cor_control_retrans *cr = container_of(
446 retrans_list->next, struct cor_control_retrans,
447 timeout_list);
449 BUG_ON(cr->nb != nb_retranslocked);
451 list_del(&(cr->timeout_list));
452 rb_erase(&(cr->rbn), &(nb_retranslocked->kp_retransmits_rb));
454 kref_put(&(cr->ref), cor_kreffree_bug); /* rb */
455 kref_put(&(cr->ref), cor_free_control_retrans); /* list */
459 static void cor_empty_retrans_queue(struct cor_neighbor *nb_retranslocked)
461 _cor_empty_retrans_queue(nb_retranslocked,
462 &(nb_retranslocked->retrans_fast_list));
463 _cor_empty_retrans_queue(nb_retranslocked,
464 &(nb_retranslocked->retrans_slow_list));
467 static unsigned long cor_get_retransmit_timeout(
468 struct cor_neighbor *nb_retranslocked)
470 struct cor_control_retrans *cr1 = 0;
471 struct cor_control_retrans *cr2 = 0;
472 struct cor_control_retrans *cr = 0;
474 if (list_empty(&(nb_retranslocked->retrans_fast_list)) == 0) {
475 cr1 = container_of(nb_retranslocked->retrans_fast_list.next,
476 struct cor_control_retrans, timeout_list);
477 BUG_ON(cr1->nb != nb_retranslocked);
480 if (list_empty(&(nb_retranslocked->retrans_slow_list)) == 0) {
481 cr2 = container_of(nb_retranslocked->retrans_slow_list.next,
482 struct cor_control_retrans, timeout_list);
483 BUG_ON(cr2->nb != nb_retranslocked);
486 if (cr1 == 0)
487 cr = cr2;
488 else if (cr2 == 0)
489 cr = cr1;
490 else
491 cr = (time_after(cr1->timeout, cr2->timeout) ? cr2 : cr1);
493 BUG_ON(cr == 0);
495 return cr->timeout;
498 void cor_retransmit_timerfunc(struct timer_list *retrans_timer)
500 struct cor_neighbor *nb = container_of(retrans_timer,
501 struct cor_neighbor, retrans_timer);
502 int nbstate = cor_get_neigh_state(nb);
503 unsigned long timeout;
505 spin_lock_bh(&(nb->retrans_lock));
507 if (list_empty(&(nb->retrans_fast_list)) &&
508 list_empty(&(nb->retrans_slow_list))) {
509 spin_unlock_bh(&(nb->retrans_lock));
510 cor_nb_kref_put(nb, "retransmit_timer");
511 return;
514 if (unlikely(nbstate == NEIGHBOR_STATE_KILLED)) {
515 cor_empty_retrans_queue(nb);
516 spin_unlock_bh(&(nb->retrans_lock));
517 cor_nb_kref_put(nb, "retransmit_timer");
518 return;
521 timeout = cor_get_retransmit_timeout(nb);
523 if (time_after(timeout, jiffies)) {
524 int rc = mod_timer(&(nb->retrans_timer), timeout);
525 spin_unlock_bh(&(nb->retrans_lock));
526 if (rc != 0)
527 cor_nb_kref_put(nb, "retransmit_timer");
528 return;
531 spin_unlock_bh(&(nb->retrans_lock));
533 spin_lock_bh(&(nb->cmsg_lock));
534 nb->add_retrans_needed = 1;
535 cor_schedule_controlmsg_timer(nb);
536 spin_unlock_bh(&(nb->cmsg_lock));
538 cor_nb_kref_put(nb, "retransmit_timer");
541 static void cor_schedule_retransmit(struct cor_control_retrans *cr,
542 struct cor_neighbor *nb, int fastack)
544 int first;
546 cr->timeout = cor_calc_timeout(atomic_read(&(nb->latency_retrans_us)),
547 atomic_read(&(nb->latency_stddev_retrans_us)),
548 fastack ?
549 atomic_read(&(nb->max_remote_ack_fast_delay_us)) :
550 atomic_read(&(nb->max_remote_ack_slow_delay_us)));
552 spin_lock_bh(&(nb->retrans_lock));
554 cor_insert_control_retrans(cr);
555 if (fastack) {
556 first = list_empty(&(nb->retrans_fast_list));
557 list_add_tail(&(cr->timeout_list), &(nb->retrans_fast_list));
558 } else {
559 first = list_empty(&(nb->retrans_slow_list));
560 list_add_tail(&(cr->timeout_list), &(nb->retrans_slow_list));
563 if (first) {
564 if (mod_timer(&(nb->retrans_timer),
565 cor_get_retransmit_timeout(nb)) == 0) {
566 cor_nb_kref_get(nb, "retransmit_timer");
570 spin_unlock_bh(&(nb->retrans_lock));
573 void cor_kern_ack_rcvd(struct cor_neighbor *nb, __u64 seqno)
575 struct cor_control_retrans *cr = 0;
577 spin_lock_bh(&(nb->retrans_lock));
579 cr = cor_get_control_retrans(nb, seqno);
581 if (cr == 0) {
582 /* char *seqno_p = (char *) &seqno;
583 seqno = cpu_to_be32(seqno);
584 printk(KERN_ERR "bogus/duplicate ack received %d %d %d %d\n",
585 seqno_p[0], seqno_p[1], seqno_p[2], seqno_p[3]);
587 goto out;
590 rb_erase(&(cr->rbn), &(nb->kp_retransmits_rb));
592 BUG_ON(cr->nb != nb);
594 list_del(&(cr->timeout_list));
596 out:
597 spin_unlock_bh(&(nb->retrans_lock));
599 if (cr != 0) {
600 /* cor_get_control_retrans */
601 kref_put(&(cr->ref), cor_kreffree_bug);
603 kref_put(&(cr->ref), cor_kreffree_bug); /* rb_erase */
604 kref_put(&(cr->ref), cor_free_control_retrans); /* list */
608 static __u16 cor_get_window(struct cor_conn *cn,
609 struct cor_neighbor *expectedsender, __u32 expected_connid)
611 __u16 window = 0;
613 BUG_ON(expectedsender == 0);
615 spin_lock_bh(&(cn->rcv_lock));
617 if (cor_is_conn_in(cn, expectedsender, expected_connid) == 0)
618 goto out;
620 window = cor_enc_window(cor_seqno_clean(
621 cn->src.in.window_seqnolimit -
622 cn->src.in.next_seqno));
624 cn->src.in.window_seqnolimit_remote = cn->src.in.next_seqno +
625 cor_dec_window(window);
627 out:
628 spin_unlock_bh(&(cn->rcv_lock));
630 return window;
633 /* static void padding(struct sk_buff *skb, __u32 length)
635 char *dst;
636 if (length <= 0)
637 return;
638 dst = skb_put(skb, length);
639 BUG_ON(dst == 0);
640 memset(dst, KP_PADDING, length);
641 } */
644 static __u32 cor_add_init_session(struct sk_buff *skb, __be32 sessionid,
645 __u32 spaceleft)
647 char *dst;
649 BUG_ON(KP_MISC_INIT_SESSION_CMDLEN != 5);
651 if (unlikely(spaceleft < 5))
652 return 0;
654 dst = skb_put(skb, 5);
655 BUG_ON(dst == 0);
657 dst[0] = get_kp_code(KP_MISC, KP_MISC_INIT_SESSION);
658 cor_put_be32(dst + 1, sessionid);
660 return 5;
663 static __u32 cor_add_ack(struct sk_buff *skb, struct cor_control_retrans *cr,
664 struct cor_control_msg_out *cm, __u32 spaceleft)
666 char *dst;
668 BUG_ON(cm->length != 7);
670 if (unlikely(spaceleft < 7))
671 return 0;
673 dst = skb_put(skb, 7);
674 BUG_ON(dst == 0);
676 dst[0] = get_kp_code(KP_MISC, KP_MISC_ACK);
677 cor_put_u48(dst + 1, cm->msg.ack.seqno);
679 list_add_tail(&(cm->lh), &(cr->msgs));
681 return 7;
684 static inline __u8 cor_add_ack_conn_get_delayremaining(
685 struct cor_control_msg_out *cm, unsigned long cmsg_send_start_j)
687 __u32 maxdelay_ms = 0;
688 unsigned long jiffies_timeout;
689 if (cm->msg.ack_conn.is_highlatency) {
690 maxdelay_ms = CMSG_MAXDELAY_ACKCONN_HIGHLATENCY_MS;
691 } else {
692 maxdelay_ms = CMSG_MAXDELAY_ACKCONN_LOWLATENCY_MS;
695 jiffies_timeout = cm->time_added + msecs_to_jiffies(maxdelay_ms);
697 if (time_before_eq(cmsg_send_start_j, cm->time_added)) {
698 return 255;
699 } else if (time_after_eq(cmsg_send_start_j, jiffies_timeout)) {
700 return 0;
701 } else {
702 __u64 delay_remaining = jiffies_timeout - cmsg_send_start_j;
704 BUG_ON(delay_remaining > U32_MAX);
705 BUG_ON(delay_remaining > msecs_to_jiffies(maxdelay_ms));
707 return (__u8) div64_u64(255 * delay_remaining +
708 msecs_to_jiffies(maxdelay_ms)/2,
709 msecs_to_jiffies(maxdelay_ms));
713 static __u32 cor_add_ack_conn(struct sk_buff *skb,
714 struct cor_control_retrans *cr, struct cor_control_msg_out *cm,
715 __u32 spaceleft, unsigned long cmsg_send_start_j,
716 int *ackneeded)
718 char *dst;
719 __u32 offset = 0;
721 if (unlikely(spaceleft < cm->length))
722 return 0;
724 dst = skb_put(skb, cm->length);
725 BUG_ON(dst == 0);
727 dst[offset] = get_kp_code(KP_ACK_CONN, cm->msg.ack_conn.flags);
728 offset++;
729 cor_put_u32(dst + offset, cm->msg.ack_conn.conn_id);
730 offset += 4;
732 if (likely((cm->msg.ack_conn.flags & KP_ACK_CONN_FLAGS_SEQNO) != 0 ||
733 cor_ooolen(cm->msg.ack_conn.flags) != 0)) {
734 dst[offset] = cor_add_ack_conn_get_delayremaining(cm,
735 cmsg_send_start_j);
736 offset++;
739 if ((cm->msg.ack_conn.flags & KP_ACK_CONN_FLAGS_SEQNO) != 0) {
740 cor_put_u48(dst + offset, cm->msg.ack_conn.seqno);
741 offset += 6;
743 if ((cm->msg.ack_conn.flags & KP_ACK_CONN_FLAGS_WINDOW) != 0) {
744 BUG_ON(cm->msg.ack_conn.src_in == 0);
745 cor_put_u16(dst + offset, cor_get_window(
746 cm->msg.ack_conn.src_in,
747 cm->nb, cor_get_connid_reverse(
748 cm->msg.ack_conn.conn_id)));
749 offset += 2;
750 dst[offset] = cm->msg.ack_conn.bufsize_changerate;
751 offset++;
755 if (cor_ooolen(cm->msg.ack_conn.flags) != 0) {
756 cor_put_u48(dst + offset, cm->msg.ack_conn.seqno_ooo);
757 offset += 6;
758 if (cor_ooolen(cm->msg.ack_conn.flags) == 1) {
759 BUG_ON(cm->msg.ack_conn.length > 255);
760 dst[offset] = cm->msg.ack_conn.length;
761 offset += 1;
762 } else if (cor_ooolen(cm->msg.ack_conn.flags) == 2) {
763 BUG_ON(cm->msg.ack_conn.length <= 255);
764 BUG_ON(cm->msg.ack_conn.length > 65535);
765 cor_put_u16(dst + offset, cm->msg.ack_conn.length);
766 offset += 2;
767 } else if (cor_ooolen(cm->msg.ack_conn.flags) == 4) {
768 BUG_ON(cm->msg.ack_conn.length <= 65535);
769 cor_put_u32(dst + offset, cm->msg.ack_conn.length);
770 offset += 4;
771 } else {
772 BUG();
776 if (unlikely((cm->msg.ack_conn.flags &
777 KP_ACK_CONN_FLAGS_PRIORITY) != 0)) {
778 __u16 priority = (cm->msg.ack_conn.priority_seqno << 12) &
779 cm->msg.ack_conn.priority;
780 BUG_ON(cm->msg.ack_conn.priority_seqno > 15);
781 BUG_ON(cm->msg.ack_conn.priority > 4095);
783 cor_put_u16(dst + offset, priority);
784 offset += 2;
787 list_add_tail(&(cm->lh), &(cr->msgs));
788 if (likely((cm->msg.ack_conn.flags & KP_ACK_CONN_FLAGS_SEQNO) != 0 ||
789 cor_ooolen(cm->msg.ack_conn.flags) != 0) &&
790 cm->msg.ack_conn.is_highlatency == 0) {
791 *ackneeded = ACK_NEEDED_FAST;
792 } else if (*ackneeded != ACK_NEEDED_FAST) {
793 *ackneeded = ACK_NEEDED_SLOW;
796 BUG_ON(offset != cm->length);
797 return offset;
800 static __u32 cor_add_ping(struct sk_buff *skb, __u32 cookie, __u32 spaceleft)
802 char *dst;
804 BUG_ON(KP_MISC_PING_CMDLEN != 5);
806 if (unlikely(spaceleft < 5))
807 return 0;
809 dst = skb_put(skb, 5);
810 BUG_ON(dst == 0);
812 dst[0] = get_kp_code(KP_MISC, KP_MISC_PING);
813 cor_put_u32(dst + 1, cookie);
815 return 5;
818 static __u32 cor_calc_respdelay(ktime_t time_pong_enqueued, ktime_t time_end)
820 if (unlikely(ktime_before(time_end, time_pong_enqueued))) {
821 return 0;
822 } else {
823 __s64 respdelay = div_u64(ktime_to_ns(time_end) -
824 ktime_to_ns(time_pong_enqueued) + 500,
825 1000);
827 if (unlikely(respdelay > U32_MAX))
828 return U32_MAX;
829 else if (unlikely(respdelay < 0))
830 return 0;
831 else
832 return (__u32) respdelay;
836 static __u32 cor_add_pong(struct sk_buff *skb, struct cor_control_retrans *cr,
837 struct cor_control_msg_out *cm, __u32 spaceleft, int nbstate,
838 ktime_t cmsg_send_start, int *ackneeded)
840 __u32 respdelay_full;
841 __u32 respdelay_netonly;
842 char *dst;
844 BUG_ON(cm->length != 13);
846 if (unlikely(spaceleft < 13))
847 return 0;
849 respdelay_full = cor_calc_respdelay(cm->msg.pong.time_enqueued,
850 cmsg_send_start);
851 respdelay_netonly = cor_calc_respdelay(cm->msg.pong.ping_rcvtime,
852 ktime_get());
854 dst = skb_put(skb, 13);
855 BUG_ON(dst == 0);
857 dst[0] = get_kp_code(KP_MISC, KP_MISC_PONG);
858 cor_put_u32(dst + 1, cm->msg.pong.cookie);
859 cor_put_u32(dst + 5, (__u32) respdelay_full);
860 cor_put_u32(dst + 9, (__u32) respdelay_netonly);
862 list_add_tail(&(cm->lh), &(cr->msgs));
863 if (likely(nbstate == NEIGHBOR_STATE_ACTIVE) &&
864 *ackneeded != ACK_NEEDED_FAST)
865 *ackneeded = ACK_NEEDED_SLOW;
867 return 13;
870 static __u32 cor_add_connect(struct sk_buff *skb,
871 struct cor_control_retrans *cr, struct cor_control_msg_out *cm,
872 __u32 spaceleft, int *ackneeded)
874 char *dst;
875 struct cor_conn *src_in = cm->msg.connect.src_in;
876 struct cor_conn *trgt_out = cor_get_conn_reversedir(src_in);
877 __u16 priority;
879 BUG_ON(cm->length != 22);
881 if (unlikely(spaceleft < 22))
882 return 0;
884 dst = skb_put(skb, 22);
885 BUG_ON(dst == 0);
887 dst[0] = get_kp_code(KP_MISC, KP_MISC_CONNECT);
888 cor_put_u32(dst + 1, cm->msg.connect.conn_id);
889 cor_put_u48(dst + 5, cm->msg.connect.seqno1);
890 cor_put_u48(dst + 11, cm->msg.connect.seqno2);
891 BUG_ON(cm->msg.connect.src_in == 0);
892 cor_put_u16(dst + 17, cor_get_window(cm->msg.connect.src_in, cm->nb,
893 cor_get_connid_reverse(cm->msg.connect.conn_id)));
895 spin_lock_bh(&(trgt_out->rcv_lock));
896 BUG_ON(trgt_out->targettype != TARGET_OUT);
898 priority = (trgt_out->trgt.out.priority_seqno << 12) &
899 trgt_out->trgt.out.priority_last;
900 BUG_ON(trgt_out->trgt.out.priority_seqno > 15);
901 BUG_ON(trgt_out->trgt.out.priority_last > 4095);
902 cor_put_u16(dst + 19, priority);
904 if (src_in->is_highlatency == 0)
905 dst[21] = 0;
906 else
907 dst[21] = 1;
909 spin_unlock_bh(&(trgt_out->rcv_lock));
911 list_add_tail(&(cm->lh), &(cr->msgs));
912 if (*ackneeded != ACK_NEEDED_FAST)
913 *ackneeded = ACK_NEEDED_SLOW;
915 return 22;
918 static __u32 cor_add_connect_success(struct sk_buff *skb,
919 struct cor_control_retrans *cr, struct cor_control_msg_out *cm,
920 __u32 spaceleft, int *ackneeded)
922 char *dst;
924 BUG_ON(cm->length != 7);
926 if (unlikely(spaceleft < 7))
927 return 0;
929 dst = skb_put(skb, 7);
930 BUG_ON(dst == 0);
932 dst[0] = get_kp_code(KP_MISC, KP_MISC_CONNECT_SUCCESS);
933 cor_put_u32(dst + 1, cm->msg.connect_success.conn_id);
934 BUG_ON(cm->msg.connect_success.src_in == 0);
935 cor_put_u16(dst + 5, cor_get_window(
936 cm->msg.connect_success.src_in, cm->nb,
937 cor_get_connid_reverse(
938 cm->msg.connect_success.conn_id)));
940 list_add_tail(&(cm->lh), &(cr->msgs));
941 if (*ackneeded != ACK_NEEDED_FAST)
942 *ackneeded = ACK_NEEDED_SLOW;
944 return 7;
947 static __u32 cor_add_reset_conn(struct sk_buff *skb,
948 struct cor_control_retrans *cr, struct cor_control_msg_out *cm,
949 __u32 spaceleft, int *ackneeded)
951 char *dst;
953 BUG_ON(cm->length != 5);
955 if (unlikely(spaceleft < 5))
956 return 0;
958 dst = skb_put(skb, 5);
959 BUG_ON(dst == 0);
961 dst[0] = get_kp_code(KP_MISC, KP_MISC_RESET_CONN);
962 cor_put_u32(dst + 1, cm->msg.reset_conn.conn_id);
964 list_add_tail(&(cm->lh), &(cr->msgs));
965 if (*ackneeded != ACK_NEEDED_FAST)
966 *ackneeded = ACK_NEEDED_SLOW;
968 return 5;
971 static __u32 cor_add_conndata(struct sk_buff *skb,
972 struct cor_control_retrans *cr, struct cor_control_msg_out *cm,
973 __u32 spaceleft, struct cor_control_msg_out **split_conndata,
974 __u32 *sc_sendlen)
976 char *dst;
977 __u32 offset = 0;
979 __u32 totallen = get_kp_conn_data_length(cm->msg.conn_data.datalen);
980 __u32 putlen = totallen;
981 __u32 dataputlen = cm->msg.conn_data.datalen;
982 __u8 code_min = 0;
984 BUILD_BUG_ON(KP_CONN_DATA_MAXLEN != 128+32767);
985 BUG_ON(cm->msg.conn_data.datalen > KP_CONN_DATA_MAXLEN);
987 BUG_ON(cm->length != totallen);
989 BUG_ON(putlen > 1024*1024*1024);
991 BUG_ON(split_conndata == 0);
992 BUG_ON(*split_conndata != 0);
993 BUG_ON(sc_sendlen == 0);
994 BUG_ON(*sc_sendlen != 0);
996 if (putlen > spaceleft) {
997 if (spaceleft < get_kp_conn_data_length(1))
998 return 0;
1000 BUG_ON(spaceleft < 13);
1002 if (spaceleft <= 127 + 12) {
1003 dataputlen = spaceleft - 12;
1004 putlen = spaceleft;
1005 } else if (spaceleft == 127 - 12 + 1) {
1006 dataputlen = spaceleft - 12 - 1;
1007 putlen = spaceleft - 1;
1008 } else {
1009 dataputlen = spaceleft - 13;
1010 putlen = spaceleft;
1013 BUG_ON(putlen != get_kp_conn_data_length(dataputlen));
1016 dst = skb_put(skb, putlen);
1017 BUG_ON(dst == 0);
1019 BUG_ON((cm->msg.conn_data.windowused &
1020 (~KP_CONN_DATA_FLAGS_WINDOWUSED)) != 0);
1021 code_min = 0;
1022 if (cm->msg.conn_data.flush != 0)
1023 code_min |= KP_CONN_DATA_FLAGS_FLUSH;
1024 code_min |= cm->msg.conn_data.windowused;
1026 dst[0] = get_kp_code(KP_CONN_DATA, code_min);
1027 offset++;
1028 cor_put_u32(dst + offset, cm->msg.conn_data.conn_id);
1029 offset += 4;
1030 cor_put_u48(dst + offset, cm->msg.conn_data.seqno);
1031 offset += 6;
1033 if (dataputlen < 128) {
1034 dst[offset] = (__u8) dataputlen;
1035 offset++;
1036 } else {
1037 __u8 high = (__u8) (128 + ((dataputlen - 128) / 256));
1038 __u8 low = (__u8) ((dataputlen - 128) % 256);
1039 BUG_ON(((dataputlen - 128) / 256) > 127);
1040 dst[offset] = high;
1041 dst[offset+1] = low;
1042 offset += 2;
1045 BUG_ON(offset > putlen);
1046 BUG_ON(putlen - offset != dataputlen);
1047 memcpy(dst + offset, cm->msg.conn_data.data, dataputlen);
1048 offset += dataputlen;
1050 if (cm->msg.conn_data.datalen == dataputlen) {
1051 BUG_ON(cm->length != putlen);
1052 list_add_tail(&(cm->lh), &(cr->msgs));
1053 } else {
1054 *split_conndata = cm;
1055 *sc_sendlen = dataputlen;
1058 return putlen;
1061 static __u32 cor_add_set_max_cmsg_dly(struct sk_buff *skb,
1062 struct cor_control_retrans *cr, struct cor_control_msg_out *cm,
1063 __u32 spaceleft, int *ackneeded)
1065 char *dst;
1067 BUG_ON(KP_MISC_SET_MAX_CMSG_DELAY_CMDLEN != 21);
1068 BUG_ON(cm->length != KP_MISC_SET_MAX_CMSG_DELAY_CMDLEN);
1070 if (unlikely(spaceleft < 21))
1071 return 0;
1073 dst = skb_put(skb, 21);
1074 BUG_ON(dst == 0);
1076 dst[0] = get_kp_code(KP_MISC, KP_MISC_SET_MAX_CMSG_DELAY);
1077 cor_put_u32(dst + 1, cm->msg.set_max_cmsg_delay.ack_fast_delay);
1078 cor_put_u32(dst + 5, cm->msg.set_max_cmsg_delay.ack_slow_delay);
1079 cor_put_u32(dst + 9,
1080 cm->msg.set_max_cmsg_delay.ackconn_lowlatency_delay);
1081 cor_put_u32(dst + 13,
1082 cm->msg.set_max_cmsg_delay.ackconn_highlatency_delay);
1083 cor_put_u32(dst + 17, cm->msg.set_max_cmsg_delay.pong_delay);
1085 list_add_tail(&(cm->lh), &(cr->msgs));
1086 if (*ackneeded != ACK_NEEDED_FAST)
1087 *ackneeded = ACK_NEEDED_SLOW;
1089 return 21;
1092 static __u32 cor_add_set_rcvmtu(struct sk_buff *skb,
1093 struct cor_control_retrans *cr, struct cor_control_msg_out *cm,
1094 __u32 spaceleft, int *ackneeded)
1096 char *dst;
1098 BUG_ON(KP_MISC_SET_RECEIVE_MTU_CMDLEN != 5);
1099 BUG_ON(cm->length != KP_MISC_SET_RECEIVE_MTU_CMDLEN);
1101 if (unlikely(spaceleft < 5))
1102 return 0;
1104 dst = skb_put(skb, 5);
1105 BUG_ON(dst == 0);
1107 dst[0] = get_kp_code(KP_MISC, KP_MISC_SET_RECEIVE_MTU);
1108 cor_put_u32(dst + 1, cm->msg.set_rcvmtu.rcvmtu);
1110 list_add_tail(&(cm->lh), &(cr->msgs));
1111 if (*ackneeded != ACK_NEEDED_FAST)
1112 *ackneeded = ACK_NEEDED_SLOW;
1114 return 5;
1117 static __u32 cor_add_message(struct sk_buff *skb,
1118 struct cor_control_retrans *cr, struct cor_control_msg_out *cm,
1119 __u32 spaceleft, int nbstate, unsigned long cmsg_send_start_j,
1120 ktime_t cmsg_send_start_kt,
1121 struct cor_control_msg_out **split_conndata, __u32 *sc_sendlen,
1122 int *ackneeded)
1124 BUG_ON(split_conndata != 0 && *split_conndata != 0);
1125 BUG_ON(sc_sendlen != 0 && *sc_sendlen != 0);
1127 switch (cm->type) {
1128 case MSGTYPE_ACK:
1129 return cor_add_ack(skb, cr, cm, spaceleft);
1130 case MSGTYPE_ACK_CONN:
1131 return cor_add_ack_conn(skb, cr, cm, spaceleft,
1132 cmsg_send_start_j, ackneeded);
1133 case MSGTYPE_PONG:
1134 return cor_add_pong(skb, cr, cm, spaceleft, nbstate,
1135 cmsg_send_start_kt, ackneeded);
1136 case MSGTYPE_CONNECT:
1137 return cor_add_connect(skb, cr, cm, spaceleft, ackneeded);
1138 case MSGTYPE_CONNECT_SUCCESS:
1139 return cor_add_connect_success(skb, cr, cm, spaceleft,
1140 ackneeded);
1141 case MSGTYPE_RESET_CONN:
1142 return cor_add_reset_conn(skb, cr, cm, spaceleft, ackneeded);
1143 case MSGTYPE_CONNDATA:
1144 return cor_add_conndata(skb, cr, cm, spaceleft, split_conndata,
1145 sc_sendlen);
1146 case MSGTYPE_SET_MAX_CMSG_DELAY:
1147 return cor_add_set_max_cmsg_dly(skb, cr, cm, spaceleft,
1148 ackneeded);
1149 case MSGTYPE_SET_RCVMTU:
1150 return cor_add_set_rcvmtu(skb, cr, cm, spaceleft,
1151 ackneeded);
1152 default:
1153 BUG();
1155 BUG();
1156 return 0;
1159 static __u32 ___cor_send_messages(struct cor_neighbor *nb, struct sk_buff *skb,
1160 struct cor_control_retrans *cr, struct list_head *cmsgs,
1161 __u32 spaceleft, int nbstate, unsigned long cmsg_send_start_j,
1162 ktime_t cmsg_send_start_kt,
1163 struct cor_control_msg_out **split_conndata, __u32 *sc_sendlen,
1164 int *ackneeded)
1166 __u32 length = 0;
1167 while (!list_empty(cmsgs)) {
1168 __u32 rc;
1169 struct cor_control_msg_out *cm = container_of(cmsgs->next,
1170 struct cor_control_msg_out, lh);
1172 list_del(&(cm->lh));
1174 rc = cor_add_message(skb, cr, cm, spaceleft - length, nbstate,
1175 cmsg_send_start_j, cmsg_send_start_kt,
1176 split_conndata, sc_sendlen, ackneeded);
1177 if (rc == 0) {
1178 BUG();
1179 list_add(&(cm->lh), cmsgs);
1180 break;
1183 BUG_ON(rc != cm->length && cm->type != MSGTYPE_CONNDATA);
1185 length += rc;
1188 return length;
1191 static __u32 ___cor_send_messages_smcd(struct cor_neighbor *nb,
1192 struct sk_buff *skb, struct cor_control_retrans *cr,
1193 __u32 spaceleft, int nbstate, unsigned long cmsg_send_start_j,
1194 ktime_t cmsg_send_start_kt, int *ackneeded)
1196 struct cor_control_msg_out *cm;
1197 __u32 rc;
1199 cm = cor_alloc_control_msg(nb, ACM_PRIORITY_MED);
1201 if (unlikely(cm == 0))
1202 return 0;
1204 cm->type = MSGTYPE_SET_MAX_CMSG_DELAY;
1205 cm->msg.set_max_cmsg_delay.ack_fast_delay =
1206 CMSG_MAXDELAY_ACK_FAST_MS * 1000;
1207 cm->msg.set_max_cmsg_delay.ack_slow_delay =
1208 CMSG_MAXDELAY_ACK_SLOW_MS * 1000;
1209 cm->msg.set_max_cmsg_delay.ackconn_lowlatency_delay =
1210 CMSG_MAXDELAY_ACKCONN_LOWLATENCY_MS * 1000;
1211 cm->msg.set_max_cmsg_delay.ackconn_highlatency_delay =
1212 CMSG_MAXDELAY_ACKCONN_HIGHLATENCY_MS * 1000;
1213 cm->msg.set_max_cmsg_delay.pong_delay =
1214 CMSG_MAXDELAY_OTHER_MS * 1000;
1215 cm->length = KP_MISC_SET_MAX_CMSG_DELAY_CMDLEN;
1217 rc = cor_add_message(skb, cr, cm, spaceleft, nbstate, cmsg_send_start_j,
1218 cmsg_send_start_kt, 0, 0, ackneeded);
1220 nb->max_cmsg_delay_sent = 1;
1222 return rc;
1225 static __u32 ___cor_send_messages_rcvmtu(struct cor_neighbor *nb,
1226 struct sk_buff *skb, struct cor_control_retrans *cr,
1227 __u32 spaceleft, int nbstate, unsigned long cmsg_send_start_j,
1228 ktime_t cmsg_send_start_kt, int *ackneeded)
1230 struct cor_control_msg_out *cm;
1231 __u32 rc;
1233 cm = cor_alloc_control_msg(nb, ACM_PRIORITY_MED);
1235 if (unlikely(cm == 0))
1236 return 0;
1238 cm->type = MSGTYPE_SET_RCVMTU;
1239 cm->msg.set_rcvmtu.rcvmtu = cor_rcv_mtu(nb);
1240 cm->length = KP_MISC_SET_RECEIVE_MTU_CMDLEN;
1242 rc = cor_add_message(skb, cr, cm, spaceleft, nbstate, cmsg_send_start_j,
1243 cmsg_send_start_kt, 0, 0, ackneeded);
1245 atomic_set(&(nb->rcvmtu_sendneeded), 0);
1247 return rc;
1250 #define CMSGQUEUE_PONG 1
1251 #define CMSGQUEUE_ACK_FAST 2
1252 #define CMSGQUEUE_ACK_SLOW 3
1253 #define CMSGQUEUE_ACK_CONN_URGENT 4
1254 #define CMSGQUEUE_ACK_CONN_LOWLAT 5
1255 #define CMSGQUEUE_ACK_CONN_HIGHLAT 6
1256 #define CMSGQUEUE_CONNDATA_LOWLAT 7
1257 #define CMSGQUEUE_CONNDATA_HIGHLAT 8
1258 #define CMSGQUEUE_OTHER 9
1260 static void cor_requeue_message(struct cor_control_msg_out *cm)
1262 if (cm->type == MSGTYPE_ACK_CONN) {
1263 struct cor_conn *cn_l = cm->msg.ack_conn.src_in;
1265 spin_lock_bh(&(cn_l->rcv_lock));
1266 if (unlikely(cor_ackconn_prepare_requeue(cn_l, cm) == 0)) {
1267 cor_free_control_msg(cm);
1268 } else {
1269 spin_lock_bh(&(cm->nb->cmsg_lock));
1271 if (unlikely(cm->msg.ack_conn.queue ==
1272 CMSGQUEUE_ACK_CONN_URGENT)) {
1273 list_add(&(cm->lh), &(cm->nb->
1274 cmsg_queue_ackconn_urgent));
1275 } else if (cm->msg.ack_conn.queue ==
1276 CMSGQUEUE_ACK_CONN_LOWLAT) {
1277 list_add(&(cm->lh), &(cm->nb->
1278 cmsg_queue_ackconn_lowlat));
1279 } else if (cm->msg.ack_conn.queue ==
1280 CMSGQUEUE_ACK_CONN_HIGHLAT) {
1281 list_add(&(cm->lh), &(cm->nb->
1282 cmsg_queue_ackconn_highlat));
1283 } else {
1284 BUG();
1287 cm->nb->cmsg_otherlength += cm->length;
1289 list_add(&(cm->msg.ack_conn.conn_acks),
1290 &(cn_l->src.in.acks_pending));
1291 cor_try_merge_ackconns(cn_l, cm);
1293 spin_unlock_bh(&(cm->nb->cmsg_lock));
1295 spin_unlock_bh(&(cn_l->rcv_lock));
1296 return;
1299 cor_enqueue_control_msg(cm, ADDCMSG_SRC_READD);
1302 static void cor_requeue_messages(struct list_head *lh)
1304 while (list_empty(lh) == 0) {
1305 struct cor_control_msg_out *cm = container_of(lh->prev,
1306 struct cor_control_msg_out, lh);
1307 list_del(&(cm->lh));
1308 cor_requeue_message(cm);
1312 static int __cor_send_messages_send(struct cor_neighbor *nb,
1313 struct sk_buff *skb, char *packet_type, int ping,
1314 int initsession, struct cor_control_retrans *cr,
1315 struct list_head *cmsgs, __u32 spaceleft, int nbstate,
1316 unsigned long cmsg_send_start_j, ktime_t cmsg_send_start_kt,
1317 int *sent)
1319 int rc;
1320 int ackneeded = ACK_NEEDED_NO;
1321 __u32 length = 0;
1322 __u32 pinglen = 0;
1323 __u32 pingcookie = 0;
1324 unsigned long last_ping_time;
1325 struct cor_control_msg_out *split_conndata = 0;
1326 __u32 sc_sendlen = 0;
1328 if (ping != TIMETOSENDPING_NO) {
1329 __u32 rc;
1331 if (unlikely(initsession)) {
1332 rc = cor_add_init_session(skb, nb->sessionid,
1333 spaceleft - length);
1334 BUG_ON(rc <= 0);
1335 pinglen = rc;
1336 length += rc;
1339 pingcookie = cor_add_ping_req(nb, &last_ping_time);
1340 rc = cor_add_ping(skb, pingcookie, spaceleft - length);
1341 BUG_ON(rc <= 0);
1342 pinglen += rc;
1343 length += rc;
1346 if (likely(nbstate == NEIGHBOR_STATE_ACTIVE) &&
1347 unlikely(nb->max_cmsg_delay_sent == 0))
1348 length += ___cor_send_messages_smcd(nb, skb, cr,
1349 spaceleft - length, nbstate, cmsg_send_start_j,
1350 cmsg_send_start_kt, &ackneeded);
1352 if (unlikely(atomic_read(&(nb->rcvmtu_sendneeded)) != 0)) {
1353 length += ___cor_send_messages_rcvmtu(nb, skb, cr,
1354 spaceleft - length, nbstate, cmsg_send_start_j,
1355 cmsg_send_start_kt, &ackneeded);
1358 length += ___cor_send_messages(nb, skb, cr, cmsgs, spaceleft - length,
1359 nbstate, cmsg_send_start_j, cmsg_send_start_kt,
1360 &split_conndata, &sc_sendlen, &ackneeded);
1362 BUG_ON(length > spaceleft);
1364 if (likely(ping != TIMETOSENDPING_FORCE) &&
1365 pinglen != 0 && unlikely(length == pinglen)) {
1366 cor_unadd_ping_req(nb, pingcookie, last_ping_time, 0);
1367 goto drop;
1370 if (unlikely(length == 0)) {
1371 drop:
1372 kfree_skb(skb);
1374 BUG_ON(list_empty(&(cr->msgs)) == 0);
1375 kref_put(&(cr->ref), cor_free_control_retrans);
1377 nb->kpacket_seqno--;
1378 return QOS_RESUME_DONE;
1381 //padding(skb, spaceleft - length);
1382 BUG_ON(spaceleft - length != 0 &&
1383 (split_conndata == 0 || spaceleft - length != 1));
1385 if (ackneeded == ACK_NEEDED_NO) {
1386 *packet_type = PACKET_TYPE_CMSG_NOACK;
1387 } else if (ackneeded == ACK_NEEDED_SLOW) {
1388 *packet_type = PACKET_TYPE_CMSG_ACKSLOW;
1389 } else if (ackneeded == ACK_NEEDED_FAST) {
1390 *packet_type = PACKET_TYPE_CMSG_ACKFAST;
1391 } else {
1392 BUG();
1395 rc = cor_dev_queue_xmit(skb, nb->queue, QOS_CALLER_KPACKET);
1396 if (rc == NET_XMIT_SUCCESS)
1397 *sent = 1;
1399 if (rc == NET_XMIT_DROP) {
1400 if (ping != 0)
1401 cor_unadd_ping_req(nb, pingcookie, last_ping_time, 1);
1403 atomic_inc(&(nb->cmsg_bulk_readds));
1404 if (split_conndata != 0)
1405 cor_requeue_message(split_conndata);
1407 cor_requeue_messages(&(cr->msgs));
1409 kref_put(&(cr->ref), cor_free_control_retrans);
1411 atomic_dec(&(nb->cmsg_bulk_readds));
1413 spin_lock_bh(&(nb->cmsg_lock));
1414 cor_schedule_controlmsg_timer(nb);
1415 spin_unlock_bh(&(nb->cmsg_lock));
1416 } else {
1417 struct list_head *curr = cr->msgs.next;
1419 if (pingcookie != 0)
1420 cor_ping_sent(nb, pingcookie);
1422 while (curr != &(cr->msgs)) {
1423 struct cor_control_msg_out *cm = container_of(curr,
1424 struct cor_control_msg_out, lh);
1426 curr = curr->next;
1428 if (cm->type == MSGTYPE_ACK || unlikely(
1429 cm->type == MSGTYPE_PONG &&
1430 (nbstate != NEIGHBOR_STATE_ACTIVE))) {
1431 list_del(&(cm->lh));
1432 cor_free_control_msg(cm);
1433 } else if (unlikely(cm->type == MSGTYPE_PONG &&
1434 atomic_inc_return(
1435 &(nb->cmsg_pongs_retrans_cnt)) >
1436 MAX_PONG_CMSGS_RETRANS_PER_NEIGH)) {
1437 atomic_dec(&(nb->cmsg_pongs_retrans_cnt));
1438 list_del(&(cm->lh));
1439 cor_free_control_msg(cm);
1440 } else if (cm->type == MSGTYPE_CONNDATA) {
1441 cor_schedule_retransmit_conn(
1442 cm->msg.conn_data.cr, 0, 0);
1443 kref_put(&(cm->msg.conn_data.cr->ref),
1444 cor_free_connretrans);
1445 cm->msg.conn_data.cr = 0;
1446 kfree(cm->msg.conn_data.data_orig);
1447 list_del(&(cm->lh));
1448 cor_free_control_msg(cm);
1452 if (split_conndata != 0) {
1453 BUG_ON(sc_sendlen == 0);
1454 BUG_ON(sc_sendlen >=
1455 split_conndata->msg.conn_data.datalen);
1457 split_conndata->msg.conn_data.seqno += sc_sendlen;
1458 split_conndata->msg.conn_data.data += sc_sendlen;
1459 split_conndata->msg.conn_data.datalen -= sc_sendlen;
1460 split_conndata->length = get_kp_conn_data_length(
1461 split_conndata->msg.conn_data.datalen);
1462 cor_enqueue_control_msg(split_conndata,
1463 ADDCMSG_SRC_SPLITCONNDATA);
1467 if (list_empty(&(cr->msgs))) {
1468 kref_put(&(cr->ref), cor_free_control_retrans);
1469 } else {
1470 int fastack = (ackneeded == ACK_NEEDED_FAST);
1471 BUG_ON(ackneeded != ACK_NEEDED_FAST &&
1472 ackneeded != ACK_NEEDED_SLOW);
1473 cor_schedule_retransmit(cr, nb, fastack);
1477 return (rc == NET_XMIT_SUCCESS) ? QOS_RESUME_DONE : QOS_RESUME_CONG;
1480 static int _cor_send_messages_send(struct cor_neighbor *nb, int ping,
1481 int initsession, struct list_head *cmsgs, int nbstate,
1482 __u32 length, __u64 seqno, unsigned long cmsg_send_start_j,
1483 ktime_t cmsg_send_start_kt, int *sent)
1485 struct sk_buff *skb;
1486 struct cor_control_retrans *cr;
1487 char *dst;
1488 int rc;
1490 BUG_ON(length > cor_mss_cmsg(nb));
1491 skb = cor_create_packet(nb, length + 7, GFP_ATOMIC);
1492 if (unlikely(skb == 0)) {
1493 printk(KERN_ERR "cor_send_messages(): cannot allocate skb (out of memory?)\n");
1495 cor_requeue_messages(cmsgs);
1496 return QOS_RESUME_CONG;
1499 cr = kmem_cache_alloc(cor_controlretrans_slab, GFP_ATOMIC);
1500 if (unlikely(cr == 0)) {
1501 printk(KERN_ERR "cor_send_messages(): cannot allocate cor_control_retrans (out of memory?)\n");
1502 kfree_skb(skb);
1504 cor_requeue_messages(cmsgs);
1505 return QOS_RESUME_CONG;
1508 memset(cr, 0, sizeof(struct cor_control_retrans));
1509 kref_init(&(cr->ref));
1510 cr->nb = nb;
1511 cr->seqno = seqno;
1512 INIT_LIST_HEAD(&(cr->msgs));
1515 dst = skb_put(skb, 7);
1516 BUG_ON(dst == 0);
1518 dst[0] = PACKET_TYPE_NONE;
1519 cor_put_u48(dst + 1, seqno);
1521 rc = __cor_send_messages_send(nb, skb, &(dst[0]), ping, initsession, cr,
1522 cmsgs, length, nbstate, cmsg_send_start_j,
1523 cmsg_send_start_kt, sent);
1525 BUG_ON(!list_empty(cmsgs));
1527 return rc;
1530 static unsigned long cor_get_cmsg_timeout(struct cor_control_msg_out *cm,
1531 int queue)
1533 if (cm->type == MSGTYPE_ACK) {
1534 if (cm->msg.ack.fast != 0) {
1535 BUG_ON(queue != CMSGQUEUE_ACK_FAST);
1536 return cm->time_added + msecs_to_jiffies(
1537 CMSG_MAXDELAY_ACK_FAST_MS);
1538 } else {
1539 BUG_ON(queue != CMSGQUEUE_ACK_SLOW);
1540 return cm->time_added + msecs_to_jiffies(
1541 CMSG_MAXDELAY_ACK_SLOW_MS);
1543 } else if (cm->type == MSGTYPE_ACK_CONN) {
1544 __u32 maxdelay_ms = 0;
1545 if (unlikely(queue == CMSGQUEUE_ACK_CONN_URGENT)) {
1546 maxdelay_ms = CMSG_MAXDELAY_ACKCONN_URGENT_MS;
1547 } else if (queue == CMSGQUEUE_ACK_CONN_LOWLAT) {
1548 maxdelay_ms = CMSG_MAXDELAY_ACKCONN_LOWLATENCY_MS;
1549 } else if (queue == CMSGQUEUE_ACK_CONN_HIGHLAT) {
1550 maxdelay_ms = CMSG_MAXDELAY_ACKCONN_HIGHLATENCY_MS;
1551 } else {
1552 BUG();
1554 return cm->time_added + msecs_to_jiffies(maxdelay_ms);
1555 } else if (cm->type == MSGTYPE_CONNDATA) {
1556 if (cm->msg.conn_data.highlatency != 0) {
1557 BUG_ON(queue != CMSGQUEUE_CONNDATA_HIGHLAT);
1558 return cm->time_added +
1559 msecs_to_jiffies(
1560 CMSG_MAXDELAY_CONNDATA_MS);
1561 } else {
1562 BUG_ON(queue != CMSGQUEUE_CONNDATA_LOWLAT);
1563 return cm->time_added;
1565 } else {
1566 BUG_ON(cm->type == MSGTYPE_PONG && queue != CMSGQUEUE_PONG);
1567 BUG_ON(cm->type != MSGTYPE_PONG && queue != CMSGQUEUE_OTHER);
1569 return cm->time_added +
1570 msecs_to_jiffies(CMSG_MAXDELAY_OTHER_MS);
1574 static void _cor_peek_message(struct cor_neighbor *nb_cmsglocked, int queue,
1575 struct cor_control_msg_out **currcm, unsigned long *currtimeout,
1576 __u32 **currlen)
1578 struct cor_control_msg_out *cm;
1579 unsigned long cmtimeout;
1581 struct list_head *queuelh;
1582 if (queue == CMSGQUEUE_PONG) {
1583 queuelh = &(nb_cmsglocked->cmsg_queue_pong);
1584 } else if (queue == CMSGQUEUE_ACK_FAST) {
1585 queuelh = &(nb_cmsglocked->cmsg_queue_ack_fast);
1586 } else if (queue == CMSGQUEUE_ACK_SLOW) {
1587 queuelh = &(nb_cmsglocked->cmsg_queue_ack_slow);
1588 } else if (queue == CMSGQUEUE_ACK_CONN_URGENT) {
1589 queuelh = &(nb_cmsglocked->cmsg_queue_ackconn_urgent);
1590 } else if (queue == CMSGQUEUE_ACK_CONN_LOWLAT) {
1591 queuelh = &(nb_cmsglocked->cmsg_queue_ackconn_lowlat);
1592 } else if (queue == CMSGQUEUE_ACK_CONN_HIGHLAT) {
1593 queuelh = &(nb_cmsglocked->cmsg_queue_ackconn_highlat);
1594 } else if (queue == CMSGQUEUE_CONNDATA_LOWLAT) {
1595 queuelh = &(nb_cmsglocked->cmsg_queue_conndata_lowlat);
1596 } else if (queue == CMSGQUEUE_CONNDATA_HIGHLAT) {
1597 queuelh = &(nb_cmsglocked->cmsg_queue_conndata_highlat);
1598 } else if (queue == CMSGQUEUE_OTHER) {
1599 queuelh = &(nb_cmsglocked->cmsg_queue_other);
1600 } else {
1601 BUG();
1604 if (list_empty(queuelh))
1605 return;
1607 cm = container_of(queuelh->next, struct cor_control_msg_out, lh);
1608 cmtimeout = cor_get_cmsg_timeout(cm, queue);
1610 BUG_ON(cm->nb != nb_cmsglocked);
1612 if (*currcm == 0 || (time_before(cmtimeout, *currtimeout) &&
1613 time_before(jiffies, *currtimeout))) {
1614 *currcm = cm;
1615 *currtimeout = cmtimeout;
1617 if (queue == CMSGQUEUE_PONG) {
1618 *currlen = &(nb_cmsglocked->cmsg_pongslength);
1619 } else {
1620 *currlen = &(nb_cmsglocked->cmsg_otherlength);
1625 static void cor_peek_message(struct cor_neighbor *nb_cmsglocked, int nbstate,
1626 struct cor_control_msg_out **cm, unsigned long *cmtimeout,
1627 __u32 **len, int for_timeout)
1629 _cor_peek_message(nb_cmsglocked, CMSGQUEUE_PONG, cm, cmtimeout, len);
1630 if (likely(nbstate == NEIGHBOR_STATE_ACTIVE)) {
1631 _cor_peek_message(nb_cmsglocked, CMSGQUEUE_ACK_FAST, cm,
1632 cmtimeout, len);
1633 _cor_peek_message(nb_cmsglocked, CMSGQUEUE_ACK_SLOW, cm,
1634 cmtimeout, len);
1635 _cor_peek_message(nb_cmsglocked, CMSGQUEUE_ACK_CONN_URGENT, cm,
1636 cmtimeout, len);
1637 _cor_peek_message(nb_cmsglocked, CMSGQUEUE_ACK_CONN_LOWLAT, cm,
1638 cmtimeout, len);
1639 _cor_peek_message(nb_cmsglocked, CMSGQUEUE_ACK_CONN_HIGHLAT, cm,
1640 cmtimeout, len);
1641 if (!for_timeout || atomic_read(
1642 &(nb_cmsglocked->cmsg_delay_conndata)) == 0) {
1643 _cor_peek_message(nb_cmsglocked,
1644 CMSGQUEUE_CONNDATA_LOWLAT,
1645 cm, cmtimeout, len);
1646 _cor_peek_message(nb_cmsglocked,
1647 CMSGQUEUE_CONNDATA_HIGHLAT,
1648 cm, cmtimeout, len);
1650 _cor_peek_message(nb_cmsglocked, CMSGQUEUE_OTHER, cm, cmtimeout,
1651 len);
1655 static unsigned long cor_get_cmsg_timer_timeout(
1656 struct cor_neighbor *nb_cmsglocked, int nbstate)
1658 unsigned long pingtimeout = cor_get_next_ping_time(nb_cmsglocked);
1660 struct cor_control_msg_out *cm = 0;
1661 unsigned long cmtimeout;
1662 __u32 *len;
1664 cor_peek_message(nb_cmsglocked, nbstate, &cm, &cmtimeout, &len, 1);
1666 if (cm != 0) {
1667 unsigned long jiffies_tmp = jiffies;
1669 if (time_before(cmtimeout, jiffies_tmp))
1670 return jiffies_tmp;
1671 if (time_before(cmtimeout, pingtimeout))
1672 return cmtimeout;
1675 return pingtimeout;
1678 static void _cor_dequeue_messages(struct cor_neighbor *nb_cmsglocked,
1679 int nbstate, __u32 targetmss, __u32 *length,
1680 struct list_head *cmsgs)
1682 while (1) {
1683 __u32 spaceleft = targetmss - *length;
1684 struct cor_control_msg_out *cm = 0;
1685 unsigned long cmtimeout;
1686 __u32 *len;
1688 cor_peek_message(nb_cmsglocked, nbstate, &cm, &cmtimeout, &len,
1691 if (unlikely(cm == 0))
1692 break;
1694 BUG_ON(len == 0);
1696 if (cm->length > spaceleft) {
1697 if (cm->type == MSGTYPE_CONNDATA) {
1698 BUG_ON(*length == 0 && spaceleft <
1699 get_kp_conn_data_length(1));
1701 if (spaceleft < get_kp_conn_data_length(1) ||
1702 *length > (targetmss/4)*3)
1703 break;
1704 } else {
1705 BUG_ON(*length == 0);
1706 break;
1710 list_del(&(cm->lh));
1711 *len -= cm->length;
1713 if (cm->type == MSGTYPE_ACK_CONN)
1714 list_del(&(cm->msg.ack_conn.conn_acks));
1715 if (unlikely(cm->type == MSGTYPE_PONG)) {
1716 BUG_ON(cm->nb->cmsg_pongscnt == 0);
1717 cm->nb->cmsg_pongscnt--;
1720 if (unlikely(cm->type == MSGTYPE_RESET_CONN)) {
1721 BUG_ON(cm->msg.reset_conn.in_pending_conn_resets == 0);
1722 rb_erase(&(cm->msg.reset_conn.rbn),
1723 &(cm->nb->pending_conn_resets_rb));
1724 cm->msg.reset_conn.in_pending_conn_resets = 0;
1725 kref_put(&(cm->ref), cor_kreffree_bug);
1728 BUG_ON(*length + cm->length < *length);
1729 if (cm->length > targetmss - *length) {
1730 BUG_ON(*length >= targetmss);
1731 BUG_ON(cm->type != MSGTYPE_CONNDATA);
1732 *length = targetmss;
1733 } else {
1734 *length += cm->length;
1737 list_add_tail(&(cm->lh), cmsgs);
1741 static __u32 cor_get_total_messages_length(struct cor_neighbor *nb, int ping,
1742 int initsession, int nbstate, int *extralength)
1744 __u32 length = nb->cmsg_pongslength;
1746 if (likely(nbstate == NEIGHBOR_STATE_ACTIVE)) {
1747 length += nb->cmsg_otherlength;
1749 if (unlikely(nb->max_cmsg_delay_sent == 0)) {
1750 length += KP_MISC_SET_MAX_CMSG_DELAY_CMDLEN;
1751 *extralength += KP_MISC_SET_MAX_CMSG_DELAY_CMDLEN;
1755 if (unlikely(atomic_read(&(nb->rcvmtu_sendneeded)) != 0)) {
1756 length += KP_MISC_SET_RECEIVE_MTU_CMDLEN;
1757 *extralength += KP_MISC_SET_RECEIVE_MTU_CMDLEN;
1760 if (ping == TIMETOSENDPING_FORCE ||
1761 (length > 0 && ping != TIMETOSENDPING_NO)) {
1762 length += KP_MISC_PING_CMDLEN;
1763 *extralength += KP_MISC_PING_CMDLEN;
1765 if (unlikely(initsession)) {
1766 length += KP_MISC_INIT_SESSION_CMDLEN;
1767 *extralength += KP_MISC_INIT_SESSION_CMDLEN;
1771 return length;
1774 static int cor_dequeue_messages(struct cor_neighbor *nb_cmsglocked, int ping,
1775 int initsession, int nbstate, __u32 targetmss,
1776 __u32 *length, struct list_head *cmsgs)
1778 __u32 extralength = 0;
1779 __u32 totallength;
1781 int cmsgqueue_nonpong_empty = (
1782 list_empty(&(nb_cmsglocked->cmsg_queue_ack_fast)) &&
1783 list_empty(&(nb_cmsglocked->cmsg_queue_ack_slow)) &&
1784 list_empty(&(nb_cmsglocked->cmsg_queue_ackconn_urgent)) &&
1785 list_empty(&(nb_cmsglocked->cmsg_queue_ackconn_lowlat)) &&
1786 list_empty(&(nb_cmsglocked->cmsg_queue_ackconn_highlat)) &&
1787 list_empty(&(nb_cmsglocked->cmsg_queue_conndata_lowlat)) &&
1788 list_empty(&(nb_cmsglocked->cmsg_queue_conndata_highlat)) &&
1789 list_empty(&(nb_cmsglocked->cmsg_queue_other)));
1791 BUG_ON(list_empty(&(nb_cmsglocked->cmsg_queue_pong)) &&
1792 nb_cmsglocked->cmsg_pongslength != 0);
1793 BUG_ON(!list_empty(&(nb_cmsglocked->cmsg_queue_pong)) &&
1794 nb_cmsglocked->cmsg_pongslength == 0);
1795 BUG_ON(cmsgqueue_nonpong_empty &&
1796 nb_cmsglocked->cmsg_otherlength != 0);
1797 BUG_ON(!cmsgqueue_nonpong_empty &&
1798 nb_cmsglocked->cmsg_otherlength == 0);
1800 totallength = cor_get_total_messages_length(nb_cmsglocked, ping,
1801 initsession, nbstate, &extralength);
1803 if (totallength == 0)
1804 return 1;
1806 if (totallength < targetmss && ping != TIMETOSENDPING_FORCE &&
1807 time_after(cor_get_cmsg_timer_timeout(nb_cmsglocked,
1808 nbstate), jiffies))
1809 return 1;
1811 *length = extralength;
1813 _cor_dequeue_messages(nb_cmsglocked, nbstate, targetmss, length, cmsgs);
1815 BUG_ON(*length == 0);
1816 BUG_ON(*length > targetmss);
1818 return 0;
1821 static struct cor_control_retrans *cor_get_next_timeouted_retrans(
1822 struct cor_neighbor *nb_retranslocked)
1824 if (list_empty(&(nb_retranslocked->retrans_fast_list)) == 0) {
1825 struct cor_control_retrans *cr = container_of(
1826 nb_retranslocked->retrans_fast_list.next,
1827 struct cor_control_retrans, timeout_list);
1828 BUG_ON(cr->nb != nb_retranslocked);
1830 if (time_before_eq(cr->timeout, jiffies)) {
1831 return cr;
1835 if (list_empty(&(nb_retranslocked->retrans_slow_list)) == 0) {
1836 struct cor_control_retrans *cr = container_of(
1837 nb_retranslocked->retrans_slow_list.next,
1838 struct cor_control_retrans, timeout_list);
1839 BUG_ON(cr->nb != nb_retranslocked);
1841 if (time_before_eq(cr->timeout, jiffies)) {
1842 return cr;
1846 return 0;
1849 static void cor_add_timeouted_retrans(struct cor_neighbor *nb)
1851 spin_lock_bh(&(nb->retrans_lock));
1853 while (1) {
1854 struct cor_control_retrans *cr =
1855 cor_get_next_timeouted_retrans(nb);
1857 if (cr == 0)
1858 break;
1860 list_del(&(cr->timeout_list));
1861 rb_erase(&(cr->rbn), &(nb->kp_retransmits_rb));
1863 cor_requeue_control_retrans(cr);
1865 kref_put(&(cr->ref), cor_kreffree_bug); /* list_del */
1866 kref_put(&(cr->ref), cor_free_control_retrans); /* rb */
1869 if (list_empty(&(nb->retrans_fast_list)) == 0 ||
1870 list_empty(&(nb->retrans_slow_list)) == 0) {
1871 if (mod_timer(&(nb->retrans_timer),
1872 cor_get_retransmit_timeout(nb)) == 0) {
1873 cor_nb_kref_get(nb, "retransmit_timer");
1877 spin_unlock_bh(&(nb->retrans_lock));
1880 static void _cor_delete_all_cmsgs(struct list_head *cmsgs)
1882 while (!list_empty(cmsgs)) {
1883 struct cor_control_msg_out *cm = container_of(cmsgs->next,
1884 struct cor_control_msg_out, lh);
1886 list_del(&(cm->lh));
1888 if (cm->type == MSGTYPE_CONNDATA) {
1889 cor_schedule_retransmit_conn(cm->msg.conn_data.cr, 0,
1891 kfree(cm->msg.conn_data.data_orig);
1894 cor_free_control_msg(cm);
1898 static void cor_delete_all_cmsgs(struct cor_neighbor *nb)
1900 while (1) {
1901 struct list_head cmsgs;
1902 __u32 length = 0;
1904 INIT_LIST_HEAD(&cmsgs);
1906 spin_lock_bh(&(nb->cmsg_lock));
1907 _cor_dequeue_messages(nb, NEIGHBOR_STATE_ACTIVE, 65536, &length,
1908 &cmsgs);
1909 spin_unlock_bh(&(nb->cmsg_lock));
1911 if (list_empty(&cmsgs))
1912 break;
1914 _cor_delete_all_cmsgs(&cmsgs);
1918 static int cor_reset_timeouted_conn(struct cor_neighbor *nb,
1919 struct cor_conn *trgt_out)
1921 struct cor_conn_bidir *cnb = cor_get_conn_bidir(trgt_out);
1922 struct cor_conn *src_in = cor_get_conn_reversedir(trgt_out);
1924 int resetted = 0;
1926 spin_lock_bh(&(cnb->cli.rcv_lock));
1927 spin_lock_bh(&(cnb->srv.rcv_lock));
1929 BUG_ON(trgt_out->targettype != TARGET_OUT);
1930 BUG_ON(trgt_out->trgt.out.nb != nb);
1932 if (unlikely(trgt_out->isreset != 0))
1933 goto unlock;
1935 if (likely(trgt_out->trgt.out.in_nb_busy_list != 0)) {
1936 if (likely(time_before(jiffies,
1937 trgt_out->trgt.out.jiffies_last_act +
1938 CONN_BUSY_INACTIVITY_TIMEOUT_SEC * HZ))) {
1939 goto unlock;
1941 } else {
1942 if (likely(time_before(jiffies,
1943 trgt_out->trgt.out.jiffies_last_act +
1944 CONN_ACTIVITY_UPDATEINTERVAL_SEC * HZ +
1945 CONN_INACTIVITY_TIMEOUT_SEC * HZ))) {
1946 goto unlock;
1950 resetted = (cor_send_reset_conn(nb, cor_get_connid_reverse(
1951 src_in->src.in.conn_id), 1) == 0);
1952 if (unlikely(resetted == 0))
1953 goto unlock;
1955 BUG_ON(trgt_out->isreset != 0);
1956 trgt_out->isreset = 1;
1958 cor_reset_conn_locked(cnb);
1960 unlock:
1961 spin_unlock_bh(&(cnb->srv.rcv_lock));
1962 spin_unlock_bh(&(cnb->cli.rcv_lock));
1964 return resetted;
1967 static void _cor_reset_timeouted_conns(struct cor_neighbor *nb,
1968 struct list_head *nb_snd_conn_list)
1970 int i;
1971 for (i = 0; i < 10000; i++) {
1972 unsigned long iflags;
1973 struct cor_conn *trgt_out;
1975 int resetted;
1977 spin_lock_irqsave(&(nb->conn_list_lock), iflags);
1979 if (list_empty(nb_snd_conn_list)) {
1980 spin_unlock_irqrestore(&(nb->conn_list_lock), iflags);
1981 break;
1984 trgt_out = container_of(nb_snd_conn_list->next, struct cor_conn,
1985 trgt.out.nb_list);
1986 cor_conn_kref_get(trgt_out, "stack");
1988 spin_unlock_irqrestore(&(nb->conn_list_lock), iflags);
1990 resetted = cor_reset_timeouted_conn(nb, trgt_out);
1992 cor_conn_kref_put(trgt_out, "stack");
1994 if (likely(resetted == 0))
1995 break;
1999 static void cor_reset_timeouted_conns(struct cor_neighbor *nb)
2001 _cor_reset_timeouted_conns(nb, &(nb->snd_conn_busy_list));
2002 _cor_reset_timeouted_conns(nb, &(nb->snd_conn_idle_list));
2007 * may not be called by more than one thread at the same time, because
2008 * 1) readding cor_control_msg_out may reorder them
2009 * 2) multiple pings may be sent
2011 int cor_send_messages(struct cor_neighbor *nb, unsigned long cmsg_send_start_j,
2012 ktime_t cmsg_send_start_kt, int *sent)
2014 int rc = QOS_RESUME_DONE;
2015 int ping;
2016 int initsession;
2017 __u32 targetmss = cor_mss_cmsg(nb);
2019 int nbstate = cor_get_neigh_state(nb);
2021 if (likely(nbstate == NEIGHBOR_STATE_ACTIVE))
2022 cor_reset_timeouted_conns(nb);
2024 if (unlikely(nbstate == NEIGHBOR_STATE_KILLED)) {
2025 spin_lock_bh(&(nb->retrans_lock));
2026 cor_empty_retrans_queue(nb);
2027 spin_unlock_bh(&(nb->retrans_lock));
2029 cor_delete_all_cmsgs(nb);
2030 return QOS_RESUME_DONE;
2033 ping = cor_time_to_send_ping(nb);
2035 spin_lock_bh(&(nb->cmsg_lock));
2037 if (nb->add_retrans_needed != 0) {
2038 nb->add_retrans_needed = 0;
2039 spin_unlock_bh(&(nb->cmsg_lock));
2040 cor_add_timeouted_retrans(nb);
2041 spin_lock_bh(&(nb->cmsg_lock));
2044 initsession = unlikely(atomic_read(&(nb->sessionid_snd_needed)) != 0);
2046 while (1) {
2047 struct list_head cmsgs;
2048 __u32 length = 0;
2049 __u64 seqno;
2051 INIT_LIST_HEAD(&cmsgs);
2053 if (cor_dequeue_messages(nb, ping, initsession, nbstate,
2054 targetmss, &length, &cmsgs) != 0) {
2055 cor_schedule_controlmsg_timer(nb);
2056 spin_unlock_bh(&(nb->cmsg_lock));
2057 return QOS_RESUME_DONE;
2060 nb->kpacket_seqno++;
2061 seqno = nb->kpacket_seqno;
2063 spin_unlock_bh(&(nb->cmsg_lock));
2065 rc = _cor_send_messages_send(nb, ping, initsession, &cmsgs,
2066 nbstate, length, seqno, cmsg_send_start_j,
2067 cmsg_send_start_kt, sent);
2069 if (rc != QOS_RESUME_DONE)
2070 return rc;
2072 ping = 0;
2073 initsession = 0;
2075 spin_lock_bh(&(nb->cmsg_lock));
2079 static unsigned long cor_calc_cmsg_send_start_j(unsigned long cmsg_timer_timeout)
2081 unsigned long jiffies_tmp = jiffies;
2082 if (unlikely(time_after(cmsg_timer_timeout, jiffies_tmp)))
2083 return jiffies_tmp;
2084 else
2085 return cmsg_timer_timeout;
2088 static ktime_t cor_calc_cmsg_send_start_kt(unsigned long cmsg_timer_timeout)
2090 ktime_t now = ktime_get();
2091 unsigned long jiffies_tmp = jiffies;
2093 unsigned long jiffies_delayed;
2094 if (unlikely(time_after(cmsg_timer_timeout, jiffies_tmp))) {
2095 jiffies_delayed = 0;
2096 } else {
2097 jiffies_delayed = jiffies_tmp - cmsg_timer_timeout;
2098 if (unlikely(jiffies_delayed > HZ/10)) {
2099 jiffies_delayed = HZ/10;
2103 return ns_to_ktime(ktime_to_ns(now) -
2104 1000LL * jiffies_to_usecs(jiffies_delayed));
2108 void cor_controlmsg_timerfunc(struct timer_list *cmsg_timer)
2110 struct cor_neighbor *nb = container_of(cmsg_timer,
2111 struct cor_neighbor, cmsg_timer);
2112 unsigned long cmsg_timer_timeout = (unsigned long)
2113 atomic64_read(&(nb->cmsg_timer_timeout));
2114 unsigned long cmsg_send_start_j = cor_calc_cmsg_send_start_j(
2115 cmsg_timer_timeout);
2116 ktime_t cmsg_send_start_kt = cor_calc_cmsg_send_start_kt(
2117 cmsg_timer_timeout);
2118 cor_qos_enqueue(nb->queue, &(nb->rb_kp), cmsg_send_start_j,
2119 cmsg_send_start_kt, QOS_CALLER_KPACKET, 0);
2120 cor_nb_kref_put(nb, "controlmsg_timer");
2123 static int cor_cmsg_full_packet(struct cor_neighbor *nb, int nbstate)
2125 __u32 extralength = 0;
2126 int ping = cor_time_to_send_ping(nb);
2127 int initsession = unlikely(atomic_read(&(nb->sessionid_snd_needed)) !=
2129 __u32 len = cor_get_total_messages_length(nb, ping, initsession,
2130 nbstate, &extralength);
2132 if (len == 0)
2133 return 0;
2134 if (len < cor_mss_cmsg(nb))
2135 return 0;
2137 return 1;
2140 void cor_schedule_controlmsg_timer(struct cor_neighbor *nb_cmsglocked)
2142 unsigned long timeout;
2143 int nbstate = cor_get_neigh_state(nb_cmsglocked);
2145 if (unlikely(nbstate == NEIGHBOR_STATE_KILLED))
2146 goto now;
2148 if (unlikely(atomic_read(&(nb_cmsglocked->rcvmtu_sendneeded)) != 0))
2149 goto now;
2151 if (atomic_read(&(nb_cmsglocked->cmsg_bulk_readds)) != 0)
2152 return;
2154 if (cor_cmsg_full_packet(nb_cmsglocked, nbstate))
2155 goto now;
2157 if (nb_cmsglocked->add_retrans_needed != 0)
2158 goto now;
2160 timeout = cor_get_cmsg_timer_timeout(nb_cmsglocked, nbstate);
2162 if (0) {
2163 now:
2164 cor_qos_enqueue(nb_cmsglocked->queue, &(nb_cmsglocked->rb_kp),
2165 jiffies, ktime_get(), QOS_CALLER_KPACKET, 0);
2166 } else if (time_before_eq(timeout, jiffies)) {
2167 unsigned long cmsg_send_start_j = cor_calc_cmsg_send_start_j(
2168 timeout);
2169 ktime_t cmsg_send_start_kt = cor_calc_cmsg_send_start_kt(
2170 timeout);
2171 cor_qos_enqueue(nb_cmsglocked->queue, &(nb_cmsglocked->rb_kp),
2172 cmsg_send_start_j, cmsg_send_start_kt,
2173 QOS_CALLER_KPACKET, 0);
2174 } else {
2175 atomic64_set(&(nb_cmsglocked->cmsg_timer_timeout), timeout);
2176 barrier();
2177 if (mod_timer(&(nb_cmsglocked->cmsg_timer), timeout) == 0) {
2178 cor_nb_kref_get(nb_cmsglocked, "controlmsg_timer");
2183 static int cor_insert_pending_conn_resets(struct cor_control_msg_out *ins)
2185 struct cor_neighbor *nb = ins->nb;
2186 __u32 conn_id = ins->msg.reset_conn.conn_id;
2188 struct rb_root *root;
2189 struct rb_node **p;
2190 struct rb_node *parent = 0;
2192 BUG_ON(nb == 0);
2193 BUG_ON(ins->msg.reset_conn.in_pending_conn_resets != 0);
2195 root = &(nb->pending_conn_resets_rb);
2196 p = &(root->rb_node);
2198 while ((*p) != 0) {
2199 struct cor_control_msg_out *cm = container_of(*p,
2200 struct cor_control_msg_out,
2201 msg.reset_conn.rbn);
2202 __u32 cm_connid = cm->msg.reset_conn.conn_id;
2204 BUG_ON(cm->nb != ins->nb);
2205 BUG_ON(cm->type != MSGTYPE_RESET_CONN);
2207 parent = *p;
2208 if (conn_id == cm_connid) {
2209 return 1;
2210 } else if (conn_id < cm_connid) {
2211 p = &(*p)->rb_left;
2212 } else if (conn_id > cm_connid) {
2213 p = &(*p)->rb_right;
2214 } else {
2215 BUG();
2219 kref_get(&(ins->ref));
2220 rb_link_node(&(ins->msg.reset_conn.rbn), parent, p);
2221 rb_insert_color(&(ins->msg.reset_conn.rbn), root);
2222 ins->msg.reset_conn.in_pending_conn_resets = 1;
2224 return 0;
2227 static void cor_free_oldest_pong(struct cor_neighbor *nb)
2229 struct cor_control_msg_out *cm = container_of(nb->cmsg_queue_pong.next,
2230 struct cor_control_msg_out, lh);
2232 BUG_ON(list_empty(&(nb->cmsg_queue_pong)));
2233 BUG_ON(unlikely(cm->type != MSGTYPE_PONG));
2235 list_del(&(cm->lh));
2236 nb->cmsg_pongslength -= cm->length;
2237 BUG_ON(nb->cmsg_pongscnt == 0);
2238 cm->nb->cmsg_pongscnt--;
2239 cor_free_control_msg(cm);
2242 static struct list_head * _cor_enqueue_control_msg_getqueue(
2243 struct cor_control_msg_out *cm)
2245 if (cm->type == MSGTYPE_ACK) {
2246 if (cm->msg.ack.fast != 0) {
2247 return &(cm->nb->cmsg_queue_ack_fast);
2248 } else {
2249 return &(cm->nb->cmsg_queue_ack_slow);
2251 } else if (cm->type == MSGTYPE_ACK_CONN) {
2252 if (unlikely(cm->msg.ack_conn.queue == CMSGQUEUE_ACK_CONN_URGENT)) {
2253 return &(cm->nb->cmsg_queue_ackconn_urgent);
2254 } else if (cm->msg.ack_conn.queue == CMSGQUEUE_ACK_CONN_LOWLAT) {
2255 return &(cm->nb->cmsg_queue_ackconn_lowlat);
2256 } else if (cm->msg.ack_conn.queue == CMSGQUEUE_ACK_CONN_HIGHLAT) {
2257 return &(cm->nb->cmsg_queue_ackconn_highlat);
2258 } else {
2259 BUG();
2261 } else if (cm->type == MSGTYPE_CONNDATA) {
2262 if (cm->msg.conn_data.highlatency != 0) {
2263 return &(cm->nb->cmsg_queue_conndata_highlat);
2264 } else {
2265 return &(cm->nb->cmsg_queue_conndata_lowlat);
2267 } else {
2268 return &(cm->nb->cmsg_queue_other);
2272 static int _cor_enqueue_control_msg(struct cor_control_msg_out *cm, int src)
2274 if (unlikely(cm->type == MSGTYPE_PONG)) {
2275 BUG_ON(src == ADDCMSG_SRC_SPLITCONNDATA);
2277 if (cm->nb->cmsg_pongscnt >= MAX_PONG_CMSGS_PER_NEIGH) {
2278 if (src != ADDCMSG_SRC_NEW) {
2279 BUG_ON(cm->nb->cmsg_pongscnt == 0);
2280 cm->nb->cmsg_pongscnt--;
2281 cor_free_control_msg(cm);
2282 return 1;
2283 } else {
2284 cor_free_oldest_pong(cm->nb);
2288 cm->nb->cmsg_pongscnt++;
2289 cm->nb->cmsg_pongslength += cm->length;
2291 if (src != ADDCMSG_SRC_NEW) {
2292 list_add(&(cm->lh), &(cm->nb->cmsg_queue_pong));
2293 } else {
2294 list_add_tail(&(cm->lh), &(cm->nb->cmsg_queue_pong));
2297 return 0;
2298 } else if (unlikely(cm->type == MSGTYPE_RESET_CONN)) {
2299 if (cor_insert_pending_conn_resets(cm) != 0) {
2300 cm->type = 0;
2301 cor_free_control_msg(cm);
2302 return 1;
2306 cm->nb->cmsg_otherlength += cm->length;
2307 if (src == ADDCMSG_SRC_NEW) {
2308 list_add_tail(&(cm->lh), _cor_enqueue_control_msg_getqueue(cm));
2309 } else {
2310 BUG_ON(src == ADDCMSG_SRC_SPLITCONNDATA &&
2311 cm->type != MSGTYPE_CONNDATA);
2312 BUG_ON(src == ADDCMSG_SRC_READD &&
2313 cm->type == MSGTYPE_ACK_CONN);
2315 list_add(&(cm->lh), _cor_enqueue_control_msg_getqueue(cm));
2318 return 0;
2321 static void cor_enqueue_control_msg(struct cor_control_msg_out *cm, int src)
2323 struct cor_neighbor *nb;
2325 BUG_ON(cm == 0);
2326 nb = cm->nb;
2327 BUG_ON(nb == 0);
2330 if (src == ADDCMSG_SRC_NEW)
2331 cm->time_added = jiffies;
2333 spin_lock_bh(&(nb->cmsg_lock));
2335 if (_cor_enqueue_control_msg(cm, src) != 0)
2336 goto out;
2338 if (src != ADDCMSG_SRC_READD && src != ADDCMSG_SRC_RETRANS)
2339 cor_schedule_controlmsg_timer(nb);
2341 out:
2342 spin_unlock_bh(&(nb->cmsg_lock));
2345 void cor_send_rcvmtu(struct cor_neighbor *nb)
2347 atomic_set(&(nb->rcvmtu_sendneeded), 1);
2349 spin_lock_bh(&(nb->cmsg_lock));
2350 cor_schedule_controlmsg_timer(nb);
2351 spin_unlock_bh(&(nb->cmsg_lock));
2354 void cor_send_pong(struct cor_neighbor *nb, __u32 cookie, ktime_t ping_rcvtime)
2356 struct cor_control_msg_out *cm = _cor_alloc_control_msg(nb);
2358 if (unlikely(cm == 0))
2359 return;
2361 cm->nb = nb;
2362 cm->type = MSGTYPE_PONG;
2363 cm->msg.pong.cookie = cookie;
2364 cm->msg.pong.type = MSGTYPE_PONG_TIMEENQUEUED;
2365 cm->msg.pong.ping_rcvtime = ping_rcvtime;
2366 cm->msg.pong.time_enqueued = ktime_get();
2367 cm->length = 13;
2368 cor_enqueue_control_msg(cm, ADDCMSG_SRC_NEW);
2371 void cor_send_ack(struct cor_neighbor *nb, __u64 seqno, __u8 fast)
2373 struct cor_control_msg_out *cm = cor_alloc_control_msg(nb,
2374 ACM_PRIORITY_HIGH);
2376 if (unlikely(cm == 0))
2377 return;
2379 cm->nb = nb;
2380 cm->type = MSGTYPE_ACK;
2381 cm->msg.ack.seqno = seqno;
2382 cm->msg.ack.fast = fast;
2383 cm->length = 7;
2384 cor_enqueue_control_msg(cm, ADDCMSG_SRC_NEW);
2387 static __u8 get_queue_for_ackconn(struct cor_conn *src_in_lx)
2389 if (src_in_lx->is_highlatency != 0) {
2390 if (unlikely(cor_ackconn_urgent(src_in_lx))) {
2391 return CMSGQUEUE_ACK_CONN_LOWLAT;
2392 } else {
2393 return CMSGQUEUE_ACK_CONN_HIGHLAT;
2395 } else {
2396 if (unlikely(cor_ackconn_urgent(src_in_lx))) {
2397 return CMSGQUEUE_ACK_CONN_URGENT;
2398 } else {
2399 return CMSGQUEUE_ACK_CONN_LOWLAT;
2404 static void cor_set_ooolen_flags(struct cor_control_msg_out *cm)
2406 cm->msg.ack_conn.flags = (cm->msg.ack_conn.flags &
2407 (~KP_ACK_CONN_FLAGS_OOO));
2408 cm->msg.ack_conn.flags = (cm->msg.ack_conn.flags |
2409 cor_ooolen_to_flags(cm->msg.ack_conn.length));
2412 /* cmsg_lock must be held */
2413 static void cor_remove_pending_ackconn(struct cor_control_msg_out *cm)
2415 cm->nb->cmsg_otherlength -= cm->length;
2416 list_del(&(cm->lh));
2418 list_del(&(cm->msg.ack_conn.conn_acks));
2419 cor_conn_kref_put(cm->msg.ack_conn.src_in,
2420 "cor_control_msg_out ack_conn");
2421 cm->msg.ack_conn.src_in = 0;
2423 cm->type = 0;
2424 cor_free_control_msg(cm);
2427 /* cmsg_lock must be held */
2428 static void cor_recalc_scheduled_ackconn_size(struct cor_control_msg_out *cm)
2430 cm->nb->cmsg_otherlength -= cm->length;
2431 cm->length = 5 + cor_ack_conn_len(cm->msg.ack_conn.flags);
2432 cm->nb->cmsg_otherlength += cm->length;
2435 /* cmsg_lock must be held */
2436 static int _cor_try_merge_ackconn(struct cor_conn *src_in_l,
2437 struct cor_control_msg_out *fromcm,
2438 struct cor_control_msg_out *tocm, int from_newack)
2440 if (cor_ooolen(fromcm->msg.ack_conn.flags) != 0 &&
2441 cor_ooolen(tocm->msg.ack_conn.flags) != 0) {
2442 __u64 tocmseqno = tocm->msg.ack_conn.seqno_ooo;
2443 __u64 tocmlength = tocm->msg.ack_conn.length;
2444 __u64 fromcmseqno = fromcm->msg.ack_conn.seqno_ooo;
2445 __u64 fromcmlength = fromcm->msg.ack_conn.length;
2447 if (cor_seqno_eq(tocmseqno, fromcmseqno)) {
2448 if (fromcmlength > tocmlength)
2449 tocm->msg.ack_conn.length = fromcmlength;
2450 } else if (cor_seqno_after(fromcmseqno, tocmseqno) &&
2451 cor_seqno_before_eq(fromcmseqno, tocmseqno +
2452 tocmlength)) {
2453 __u64 len = cor_seqno_clean(fromcmseqno + fromcmlength -
2454 tocmseqno);
2455 BUG_ON(len > U32_MAX);
2456 tocm->msg.ack_conn.length = (__u32) len;
2457 } else if (cor_seqno_before(fromcmseqno, tocmseqno) &&
2458 cor_seqno_after_eq(fromcmseqno, tocmseqno)) {
2459 __u64 len = cor_seqno_clean(tocmseqno + tocmlength -
2460 fromcmseqno);
2461 BUG_ON(len > U32_MAX);
2462 tocm->msg.ack_conn.seqno_ooo = fromcmseqno;
2463 tocm->msg.ack_conn.length = (__u32) len;
2464 } else {
2465 return 1;
2467 cor_set_ooolen_flags(tocm);
2470 if ((fromcm->msg.ack_conn.flags &
2471 KP_ACK_CONN_FLAGS_SEQNO) != 0) {
2472 if ((tocm->msg.ack_conn.flags & KP_ACK_CONN_FLAGS_SEQNO) == 0)
2473 goto setseqno;
2475 BUG_ON(cor_seqno_eq(fromcm->msg.ack_conn.ack_seqno,
2476 tocm->msg.ack_conn.ack_seqno));
2477 if (cor_seqno_after_eq(tocm->msg.ack_conn.ack_seqno,
2478 fromcm->msg.ack_conn.ack_seqno)) {
2479 BUG_ON(cor_seqno_after(fromcm->msg.ack_conn.seqno,
2480 tocm->msg.ack_conn.seqno));
2481 goto skipseqno;
2484 BUG_ON(cor_seqno_before(fromcm->msg.ack_conn.seqno,
2485 tocm->msg.ack_conn.seqno));
2487 setseqno:
2488 tocm->msg.ack_conn.flags = (tocm->msg.ack_conn.flags |
2489 KP_ACK_CONN_FLAGS_SEQNO);
2490 tocm->msg.ack_conn.seqno = fromcm->msg.ack_conn.seqno;
2491 tocm->msg.ack_conn.ack_seqno = fromcm->msg.ack_conn.ack_seqno;
2493 skipseqno:
2494 if ((fromcm->msg.ack_conn.flags &
2495 KP_ACK_CONN_FLAGS_WINDOW) != 0)
2496 tocm->msg.ack_conn.flags = (tocm->msg.ack_conn.flags |
2497 KP_ACK_CONN_FLAGS_WINDOW);
2501 if (cor_ooolen(fromcm->msg.ack_conn.flags) != 0) {
2502 tocm->msg.ack_conn.seqno_ooo = fromcm->msg.ack_conn.seqno_ooo;
2503 tocm->msg.ack_conn.length = fromcm->msg.ack_conn.length;
2504 cor_set_ooolen_flags(tocm);
2507 if ((fromcm->msg.ack_conn.flags & KP_ACK_CONN_FLAGS_PRIORITY) != 0) {
2508 BUG_ON((tocm->msg.ack_conn.flags &
2509 KP_ACK_CONN_FLAGS_PRIORITY) != 0);
2510 tocm->msg.ack_conn.priority_seqno =
2511 fromcm->msg.ack_conn.priority_seqno;
2512 tocm->msg.ack_conn.priority = fromcm->msg.ack_conn.priority;
2515 cor_recalc_scheduled_ackconn_size(tocm);
2516 if (from_newack == 0)
2517 cor_remove_pending_ackconn(fromcm);
2519 return 0;
2522 /* cmsg_lock must be held */
2523 static void cor_try_merge_ackconns(struct cor_conn *src_in_l,
2524 struct cor_control_msg_out *cm)
2526 struct list_head *currlh = cm->msg.ack_conn.conn_acks.next;
2528 while (currlh != &(src_in_l->src.in.acks_pending)) {
2529 struct cor_control_msg_out *currcm = container_of(currlh,
2530 struct cor_control_msg_out,
2531 msg.ack_conn.conn_acks);
2532 currlh = currlh->next;
2533 cor_remove_connack_oooflag_ifold(src_in_l, currcm);
2534 _cor_try_merge_ackconn(src_in_l, currcm, cm, 0);
2538 static void cor_merge_or_enqueue_ackconn(struct cor_conn *src_in_l,
2539 struct cor_control_msg_out *cm, int src)
2541 struct list_head *currlh;
2543 BUG_ON(src_in_l->sourcetype != SOURCE_IN);
2545 spin_lock_bh(&(cm->nb->cmsg_lock));
2547 currlh = src_in_l->src.in.acks_pending.next;
2548 while (currlh != &(src_in_l->src.in.acks_pending)) {
2549 struct cor_control_msg_out *currcm = container_of(currlh,
2550 struct cor_control_msg_out,
2551 msg.ack_conn.conn_acks);
2553 BUG_ON(currcm->nb != cm->nb);
2554 BUG_ON(currcm->type != MSGTYPE_ACK_CONN);
2555 BUG_ON(cm->msg.ack_conn.src_in != src_in_l);
2556 BUG_ON(currcm->msg.ack_conn.conn_id !=
2557 cm->msg.ack_conn.conn_id);
2559 if (_cor_try_merge_ackconn(src_in_l, cm, currcm, 1) == 0) {
2560 cor_try_merge_ackconns(src_in_l, currcm);
2561 cor_schedule_controlmsg_timer(currcm->nb);
2562 spin_unlock_bh(&(currcm->nb->cmsg_lock));
2564 * flags:
2565 * when calling cor_free_control_msg here conn may
2566 * already be locked and priority_send_allowed and
2567 * priority_send_allowed should not be reset
2569 cm->msg.ack_conn.flags = 0;
2570 cor_free_control_msg(cm);
2571 return;
2574 currlh = currlh->next;
2577 list_add_tail(&(cm->msg.ack_conn.conn_acks),
2578 &(src_in_l->src.in.acks_pending));
2580 spin_unlock_bh(&(cm->nb->cmsg_lock));
2582 cor_enqueue_control_msg(cm, src);
2585 static int cor_try_update_ackconn_seqno(struct cor_conn *src_in_l)
2587 int rc = 1;
2589 spin_lock_bh(&(src_in_l->src.in.nb->cmsg_lock));
2591 if (list_empty(&(src_in_l->src.in.acks_pending)) == 0) {
2592 struct cor_control_msg_out *cm = container_of(
2593 src_in_l->src.in.acks_pending.next,
2594 struct cor_control_msg_out,
2595 msg.ack_conn.conn_acks);
2596 BUG_ON(cm->nb != src_in_l->src.in.nb);
2597 BUG_ON(cm->type != MSGTYPE_ACK_CONN);
2598 BUG_ON(cm->msg.ack_conn.src_in != src_in_l);
2599 BUG_ON(cm->msg.ack_conn.conn_id != cor_get_connid_reverse(
2600 src_in_l->src.in.conn_id));
2602 cm->msg.ack_conn.flags = (cm->msg.ack_conn.flags |
2603 KP_ACK_CONN_FLAGS_SEQNO |
2604 KP_ACK_CONN_FLAGS_WINDOW);
2605 cm->msg.ack_conn.seqno = src_in_l->src.in.next_seqno;
2607 src_in_l->src.in.ack_seqno++;
2608 cm->msg.ack_conn.ack_seqno = src_in_l->src.in.ack_seqno;
2610 cor_remove_connack_oooflag_ifold(src_in_l, cm);
2611 cor_recalc_scheduled_ackconn_size(cm);
2613 cor_try_merge_ackconns(src_in_l, cm);
2615 rc = 0;
2618 spin_unlock_bh(&(src_in_l->src.in.nb->cmsg_lock));
2620 return rc;
2623 void cor_send_ack_conn_ifneeded(struct cor_conn *src_in_l, __u64 seqno_ooo,
2624 __u32 ooo_length)
2626 struct cor_control_msg_out *cm;
2628 BUG_ON(src_in_l->sourcetype != SOURCE_IN);
2630 BUG_ON(ooo_length > 0 && cor_seqno_before_eq(seqno_ooo,
2631 src_in_l->src.in.next_seqno));
2633 cor_update_windowlimit(src_in_l);
2635 if (ooo_length != 0) {
2636 cm = cor_alloc_control_msg(src_in_l->src.in.nb,
2637 ACM_PRIORITY_LOW);
2638 if (cm != 0)
2639 goto add;
2642 if (src_in_l->src.in.inorder_ack_needed != 0)
2643 goto ack_needed;
2645 if (cor_seqno_clean(src_in_l->src.in.window_seqnolimit -
2646 src_in_l->src.in.next_seqno) < WINDOW_ENCODE_MIN)
2647 return;
2649 if (cor_seqno_clean(src_in_l->src.in.window_seqnolimit_remote -
2650 src_in_l->src.in.next_seqno) >= WINDOW_ENCODE_MIN &&
2651 cor_seqno_clean(src_in_l->src.in.window_seqnolimit -
2652 src_in_l->src.in.next_seqno) * 7 <
2653 cor_seqno_clean(
2654 src_in_l->src.in.window_seqnolimit_remote -
2655 src_in_l->src.in.next_seqno) * 8)
2656 return;
2658 ack_needed:
2659 if (cor_try_update_ackconn_seqno(src_in_l) == 0)
2660 goto out;
2662 cm = cor_alloc_control_msg(src_in_l->src.in.nb, ACM_PRIORITY_MED);
2663 if (cm == 0) {
2664 printk(KERN_ERR "error allocating inorder ack\n");
2665 return;
2668 add:
2669 cm->type = MSGTYPE_ACK_CONN;
2670 src_in_l->src.in.ack_seqno++;
2671 cm->msg.ack_conn.ack_seqno = src_in_l->src.in.ack_seqno;
2672 cor_conn_kref_get(src_in_l, "cor_control_msg_out ack_conn");
2673 cm->msg.ack_conn.src_in = src_in_l;
2674 cm->msg.ack_conn.conn_id =
2675 cor_get_connid_reverse(src_in_l->src.in.conn_id);
2676 cm->msg.ack_conn.seqno = src_in_l->src.in.next_seqno;
2677 cm->msg.ack_conn.seqno_ooo = seqno_ooo;
2678 cm->msg.ack_conn.length = ooo_length;
2679 cm->msg.ack_conn.bufsize_changerate =
2680 _cor_bufsize_update_get_changerate(src_in_l);
2681 cm->msg.ack_conn.flags = KP_ACK_CONN_FLAGS_SEQNO |
2682 KP_ACK_CONN_FLAGS_WINDOW;
2683 cor_set_ooolen_flags(cm);
2684 cm->msg.ack_conn.is_highlatency = src_in_l->is_highlatency;
2685 cm->msg.ack_conn.queue = get_queue_for_ackconn(src_in_l);
2686 cm->length = 5 + cor_ack_conn_len(cm->msg.ack_conn.flags);
2688 cor_merge_or_enqueue_ackconn(src_in_l, cm, ADDCMSG_SRC_NEW);
2690 out:
2691 src_in_l->src.in.inorder_ack_needed = 0;
2692 src_in_l->src.in.window_seqnolimit_remote =
2693 src_in_l->src.in.window_seqnolimit;
2696 static int cor_try_add_priority(struct cor_conn *trgt_out_ll, __u16 priority)
2698 int rc = 1;
2699 struct cor_conn *src_in_ll = cor_get_conn_reversedir(trgt_out_ll);
2701 spin_lock_bh(&(trgt_out_ll->trgt.out.nb->cmsg_lock));
2703 if (list_empty(&(src_in_ll->src.in.acks_pending)) == 0) {
2704 struct cor_control_msg_out *cm = container_of(
2705 src_in_ll->src.in.acks_pending.next,
2706 struct cor_control_msg_out,
2707 msg.ack_conn.conn_acks);
2708 BUG_ON(cm->nb != trgt_out_ll->trgt.out.nb);
2709 BUG_ON(cm->type != MSGTYPE_ACK_CONN);
2710 BUG_ON(cm->msg.ack_conn.src_in != src_in_ll);
2711 BUG_ON(cm->msg.ack_conn.conn_id !=
2712 trgt_out_ll->trgt.out.conn_id);
2714 BUG_ON((cm->msg.ack_conn.flags & KP_ACK_CONN_FLAGS_PRIORITY) !=
2716 cm->msg.ack_conn.flags = (cm->msg.ack_conn.flags |
2717 KP_ACK_CONN_FLAGS_PRIORITY);
2718 cm->msg.ack_conn.priority_seqno =
2719 trgt_out_ll->trgt.out.priority_seqno;
2720 cm->msg.ack_conn.priority = priority;
2721 cor_recalc_scheduled_ackconn_size(cm);
2723 rc = 0;
2726 spin_unlock_bh(&(trgt_out_ll->trgt.out.nb->cmsg_lock));
2728 return rc;
2731 void cor_send_priority(struct cor_conn *trgt_out_ll, __u16 priority)
2733 struct cor_conn *src_in_ll = cor_get_conn_reversedir(trgt_out_ll);
2734 struct cor_control_msg_out *cm;
2736 if (cor_try_add_priority(trgt_out_ll, priority) == 0)
2737 goto out;
2739 cm = cor_alloc_control_msg(trgt_out_ll->trgt.out.nb, ACM_PRIORITY_LOW);
2740 if (cm == 0)
2741 return;
2743 cm->type = MSGTYPE_ACK_CONN;
2744 cm->msg.ack_conn.flags = KP_ACK_CONN_FLAGS_PRIORITY;
2745 cor_conn_kref_get(src_in_ll, "cor_control_msg_out ack_conn");
2746 BUG_ON(trgt_out_ll->targettype != TARGET_OUT);
2747 cm->msg.ack_conn.src_in = src_in_ll;
2748 cm->msg.ack_conn.conn_id = trgt_out_ll->trgt.out.conn_id;
2749 cm->msg.ack_conn.bufsize_changerate =
2750 _cor_bufsize_update_get_changerate(src_in_ll);
2751 cm->msg.ack_conn.priority_seqno = trgt_out_ll->trgt.out.priority_seqno;
2752 cm->msg.ack_conn.priority = priority;
2753 cm->msg.ack_conn.is_highlatency = trgt_out_ll->is_highlatency;
2754 cm->msg.ack_conn.queue = get_queue_for_ackconn(src_in_ll);
2756 cm->length = 5 + cor_ack_conn_len(cm->msg.ack_conn.flags);
2757 cor_merge_or_enqueue_ackconn(src_in_ll, cm, ADDCMSG_SRC_NEW);
2759 out:
2760 trgt_out_ll->trgt.out.priority_last = priority;
2761 trgt_out_ll->trgt.out.priority_seqno =
2762 (trgt_out_ll->trgt.out.priority_seqno + 1) & 15;
2763 trgt_out_ll->trgt.out.priority_send_allowed = 0;
2766 void cor_free_ack_conns(struct cor_conn *src_in_lx)
2768 int changed = 0;
2769 spin_lock_bh(&(src_in_lx->src.in.nb->cmsg_lock));
2770 while (list_empty(&(src_in_lx->src.in.acks_pending)) == 0) {
2771 struct list_head *currlh =
2772 src_in_lx->src.in.acks_pending.next;
2773 struct cor_control_msg_out *currcm = container_of(currlh,
2774 struct cor_control_msg_out,
2775 msg.ack_conn.conn_acks);
2777 cor_remove_pending_ackconn(currcm);
2778 changed = 1;
2780 if (changed)
2781 cor_schedule_controlmsg_timer(src_in_lx->src.in.nb);
2782 spin_unlock_bh(&(src_in_lx->src.in.nb->cmsg_lock));
2785 void cor_send_connect_success(struct cor_control_msg_out *cm, __u32 conn_id,
2786 struct cor_conn *src_in)
2788 cm->type = MSGTYPE_CONNECT_SUCCESS;
2789 cm->msg.connect_success.conn_id = conn_id;
2790 cor_conn_kref_get(src_in, "cor_control_msg_out connect_success");
2791 cm->msg.connect_success.src_in = src_in;
2792 cm->length = 7;
2793 cor_enqueue_control_msg(cm, ADDCMSG_SRC_NEW);
2796 void cor_send_connect_nb(struct cor_control_msg_out *cm, __u32 conn_id,
2797 __u64 seqno1, __u64 seqno2, struct cor_conn *src_in_ll)
2799 cm->type = MSGTYPE_CONNECT;
2800 cm->msg.connect.conn_id = conn_id;
2801 cm->msg.connect.seqno1 = seqno1;
2802 cm->msg.connect.seqno2 = seqno2;
2803 cor_conn_kref_get(src_in_ll, "cor_control_msg_out connect");
2804 BUG_ON(src_in_ll->sourcetype != SOURCE_IN);
2805 cm->msg.connect.src_in = src_in_ll;
2806 cm->length = 22;
2807 cor_enqueue_control_msg(cm, ADDCMSG_SRC_NEW);
2810 void cor_send_conndata(struct cor_control_msg_out *cm, __u32 conn_id,
2811 __u64 seqno, char *data_orig, char *data, __u32 datalen,
2812 __u8 windowused, __u8 flush, __u8 highlatency,
2813 struct cor_conn_retrans *cr)
2815 cm->type = MSGTYPE_CONNDATA;
2816 cm->msg.conn_data.conn_id = conn_id;
2817 cm->msg.conn_data.seqno = seqno;
2818 cm->msg.conn_data.data_orig = data_orig;
2819 cm->msg.conn_data.data = data;
2820 cm->msg.conn_data.datalen = datalen;
2821 cm->msg.conn_data.windowused = windowused;
2822 cm->msg.conn_data.flush = flush;
2823 cm->msg.conn_data.highlatency = highlatency;
2824 cm->msg.conn_data.cr = cr;
2825 kref_get(&(cr->ref));
2826 cm->length = get_kp_conn_data_length(datalen);
2827 cor_enqueue_control_msg(cm, ADDCMSG_SRC_NEW);
2830 int cor_send_reset_conn(struct cor_neighbor *nb, __u32 conn_id, int lowprio)
2832 struct cor_control_msg_out *cm;
2834 if (unlikely(cor_get_neigh_state(nb) == NEIGHBOR_STATE_KILLED))
2835 return 0;
2837 cm = cor_alloc_control_msg(nb, lowprio ?
2838 ACM_PRIORITY_LOW : ACM_PRIORITY_MED);
2840 if (unlikely(cm == 0))
2841 return 1;
2843 cm->type = MSGTYPE_RESET_CONN;
2844 cm->msg.reset_conn.conn_id = conn_id;
2845 cm->length = 5;
2847 cor_enqueue_control_msg(cm, ADDCMSG_SRC_NEW);
2849 return 0;
2852 int __init cor_kgen_init(void)
2854 cor_controlmsg_slab = kmem_cache_create("cor_controlmsg",
2855 sizeof(struct cor_control_msg_out), 8, 0, 0);
2856 if (unlikely(cor_controlmsg_slab == 0))
2857 return -ENOMEM;
2859 cor_controlretrans_slab = kmem_cache_create("cor_controlretransmsg",
2860 sizeof(struct cor_control_retrans), 8, 0, 0);
2861 if (unlikely(cor_controlretrans_slab == 0))
2862 return -ENOMEM;
2864 return 0;
2867 void __exit cor_kgen_exit2(void)
2869 kmem_cache_destroy(cor_controlretrans_slab);
2870 cor_controlretrans_slab = 0;
2872 kmem_cache_destroy(cor_controlmsg_slab);
2873 cor_controlmsg_slab = 0;
2876 MODULE_LICENSE("GPL");