2 * Connection oriented routing
3 * Copyright (C) 2007-2020 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 #include <asm/byteorder.h>
25 /* not sent over the network - internal meaning only */
26 #define MSGTYPE_PONG 1
28 #define MSGTYPE_ACK_CONN 3
29 #define MSGTYPE_CONNECT 4
30 #define MSGTYPE_CONNECT_SUCCESS 5
31 #define MSGTYPE_RESET_CONN 6
32 #define MSGTYPE_CONNDATA 7
33 #define MSGTYPE_SET_MAX_CMSG_DELAY 8
35 #define MSGTYPE_PONG_TIMEENQUEUED 1
36 #define MSGTYPE_PONG_RESPDELAY 2
38 struct control_msg_out
{
44 /* either queue or control_retrans_packet */
47 unsigned long time_added
;
54 ktime_t time_enqueued
;
63 struct list_head conn_acks
;
91 __u8 in_pending_conn_resets
;
99 __u8 snd_delayed_lowbuf
;
104 struct conn_retrans
*cr
;
115 struct control_retrans
{
121 unsigned long timeout
;
123 struct list_head msgs
;
126 struct list_head timeout_list
;
129 struct unknownconnid_matchparam
{
135 static struct kmem_cache
*controlmsg_slab
;
136 static struct kmem_cache
*controlretrans_slab
;
138 static atomic_t cmsg_othercnt
= ATOMIC_INIT(0);
140 #define ADDCMSG_SRC_NEW 1
141 #define ADDCMSG_SRC_SPLITCONNDATA 2
142 #define ADDCMSG_SRC_READD 3
143 #define ADDCMSG_SRC_RETRANS 4
145 static void enqueue_control_msg(struct control_msg_out
*msg
, int src
);
147 static void try_merge_ackconns(struct conn
*src_in_l
,
148 struct control_msg_out
*cm
);
150 static void merge_or_enqueue_ackconn(struct conn
*src_in_l
,
151 struct control_msg_out
*cm
, int src
);
153 static struct control_msg_out
*_alloc_control_msg(struct neighbor
*nb
)
155 struct control_msg_out
*cm
;
159 cm
= kmem_cache_alloc(controlmsg_slab
, GFP_ATOMIC
);
160 if (unlikely(cm
== 0))
162 memset(cm
, 0, sizeof(struct control_msg_out
));
163 kref_init(&(cm
->ref
));
168 static int calc_limit(int limit
, int priority
)
170 if (priority
== ACM_PRIORITY_LOW
)
172 else if (priority
== ACM_PRIORITY_MED
)
173 return (limit
* 3 + 1)/4;
174 else if (priority
== ACM_PRIORITY_HIGH
)
180 struct control_msg_out
*alloc_control_msg(struct neighbor
*nb
, int priority
)
182 struct control_msg_out
*cm
= 0;
189 packets1
= atomic_inc_return(&(nb
->cmsg_othercnt
));
190 packets2
= atomic_inc_return(&(cmsg_othercnt
));
192 BUG_ON(packets1
<= 0);
193 BUG_ON(packets2
<= 0);
195 if (packets1
<= calc_limit(GUARANTEED_CMSGS_PER_NEIGH
, priority
))
198 if (unlikely(unlikely(packets1
> calc_limit(MAX_CMSGS_PER_NEIGH
,
200 unlikely(packets2
> calc_limit(MAX_CMSGS
, priority
))))
204 cm
= _alloc_control_msg(nb
);
205 if (unlikely(cm
== 0)) {
208 /* printk(KERN_ERR "alloc_control_msg failed %ld %ld", packets1, packets2); */
209 atomic_dec(&(nb
->cmsg_othercnt
));
210 atomic_dec(&(cmsg_othercnt
));
215 static void cmsg_kref_free(struct kref
*ref
)
217 struct control_msg_out
*cm
= container_of(ref
, struct control_msg_out
,
219 kmem_cache_free(controlmsg_slab
, cm
);
222 void free_control_msg(struct control_msg_out
*cm
)
224 if (likely(cm
->type
!= MSGTYPE_PONG
)) {
225 atomic_dec(&(cm
->nb
->cmsg_othercnt
));
226 atomic_dec(&(cmsg_othercnt
));
229 if (cm
->type
== MSGTYPE_ACK_CONN
) {
230 struct conn
*trgt_out
= cm
->msg
.ack_conn
.src_in
->reversedir
;
231 BUG_ON(cm
->msg
.ack_conn
.src_in
== 0);
232 if ((cm
->msg
.ack_conn
.flags
& KP_ACK_CONN_FLAGS_PRIORITY
) != 0){
233 spin_lock_bh(&(trgt_out
->rcv_lock
));
234 BUG_ON(trgt_out
->targettype
!= TARGET_OUT
);
235 if (trgt_out
->target
.out
.priority_send_allowed
!= 0) {
236 trgt_out
->target
.out
.priority_send_allowed
= 1;
237 spin_unlock_bh(&(trgt_out
->rcv_lock
));
238 refresh_conn_priority(trgt_out
, 0);
240 spin_unlock_bh(&(trgt_out
->rcv_lock
));
243 kref_put(&(cm
->msg
.ack_conn
.src_in
->ref
), free_conn
);
244 cm
->msg
.ack_conn
.src_in
= 0;
245 } else if (cm
->type
== MSGTYPE_CONNECT
) {
246 BUG_ON(cm
->msg
.connect
.src_in
== 0);
247 kref_put(&(cm
->msg
.connect
.src_in
->ref
), free_conn
);
248 cm
->msg
.connect
.src_in
= 0;
249 } else if (cm
->type
== MSGTYPE_CONNECT_SUCCESS
) {
250 BUG_ON(cm
->msg
.connect_success
.src_in
== 0);
251 kref_put(&(cm
->msg
.connect_success
.src_in
->ref
), free_conn
);
252 cm
->msg
.connect_success
.src_in
= 0;
253 } else if (cm
->type
== MSGTYPE_RESET_CONN
) {
254 spin_lock_bh(&(cm
->nb
->cmsg_lock
));
255 if (cm
->msg
.reset_conn
.in_pending_conn_resets
!= 0) {
256 rb_erase(&(cm
->msg
.reset_conn
.rbn
),
257 &(cm
->nb
->pending_conn_resets_rb
));
258 cm
->msg
.reset_conn
.in_pending_conn_resets
= 0;
260 kref_put(&(cm
->ref
), kreffree_bug
);
262 spin_unlock_bh(&(cm
->nb
->cmsg_lock
));
265 kref_put(&(cm
->ref
), cmsg_kref_free
);
268 static void free_control_retrans(struct kref
*ref
)
270 struct control_retrans
*cr
= container_of(ref
, struct control_retrans
,
273 while (list_empty(&(cr
->msgs
)) == 0) {
274 struct control_msg_out
*cm
= container_of(cr
->msgs
.next
,
275 struct control_msg_out
, lh
);
277 if (cm
->type
== MSGTYPE_PONG
)
278 atomic_dec(&(cm
->nb
->cmsg_pongs_retrans_cnt
));
281 free_control_msg(cm
);
284 kmem_cache_free(controlretrans_slab
, cr
);
287 struct control_retrans
*get_control_retrans(struct neighbor
*nb_retranslocked
,
290 struct rb_node
*n
= 0;
291 struct control_retrans
*ret
= 0;
293 n
= nb_retranslocked
->kp_retransmits_rb
.rb_node
;
295 while (likely(n
!= 0) && ret
== 0) {
296 struct control_retrans
*cr
= container_of(n
,
297 struct control_retrans
, rbn
);
299 BUG_ON(cr
->nb
!= nb_retranslocked
);
301 if (seqno_before(seqno
, cr
->seqno
))
303 else if (seqno_after(seqno
, cr
->seqno
))
310 kref_get(&(ret
->ref
));
315 /* nb->retrans_lock must be held */
316 void insert_control_retrans(struct control_retrans
*ins
)
318 struct neighbor
*nb
= ins
->nb
;
319 __u64 seqno
= ins
->seqno
;
321 struct rb_root
*root
;
323 struct rb_node
*parent
= 0;
327 root
= &(nb
->kp_retransmits_rb
);
328 p
= &(root
->rb_node
);
331 struct control_retrans
*cr
= container_of(*p
,
332 struct control_retrans
, rbn
);
334 BUG_ON(cr
->nb
!= nb
);
337 if (unlikely(seqno_eq(seqno
, cr
->seqno
))) {
339 } else if (seqno_before(seqno
, cr
->seqno
)) {
341 } else if (seqno_after(seqno
, cr
->seqno
)) {
348 kref_get(&(ins
->ref
));
349 rb_link_node(&(ins
->rbn
), parent
, p
);
350 rb_insert_color(&(ins
->rbn
), root
);
353 static void remove_connack_oooflag_ifold(struct conn
*src_in_l
,
354 struct control_msg_out
*cm
)
356 if (ooolen(cm
->msg
.ack_conn
.flags
) != 0 && seqno_before_eq(
357 cm
->msg
.ack_conn
.seqno_ooo
+
358 cm
->msg
.ack_conn
.length
,
359 src_in_l
->source
.in
.next_seqno
)) {
360 cm
->msg
.ack_conn
.length
= 0;
361 cm
->msg
.ack_conn
.flags
= (cm
->msg
.ack_conn
.flags
&
362 (~KP_ACK_CONN_FLAGS_OOO
));
366 static int ackconn_prepare_requeue(struct conn
*cn_l
,
367 struct control_msg_out
*cm
)
369 if (unlikely(unlikely(cn_l
->sourcetype
!= SOURCE_IN
) ||
370 unlikely(cn_l
->source
.in
.nb
!= cm
->nb
) ||
371 unlikely(cn_l
->reversedir
->target
.out
.conn_id
!=
372 cm
->msg
.ack_conn
.conn_id
) ||
373 unlikely(cn_l
->isreset
!= 0)))
376 remove_connack_oooflag_ifold(cn_l
, cm
);
378 if (!seqno_eq(cm
->msg
.ack_conn
.ack_seqno
, cn_l
->source
.in
.ack_seqno
))
379 cm
->msg
.ack_conn
.flags
= (cm
->msg
.ack_conn
.flags
&
380 (~KP_ACK_CONN_FLAGS_SEQNO
) &
381 (~KP_ACK_CONN_FLAGS_WINDOW
));
383 if (cm
->msg
.ack_conn
.flags
== 0)
386 cm
->length
= 6 + ack_conn_len(cm
->msg
.ack_conn
.flags
);
391 static void requeue_control_retrans(struct control_retrans
*cr
)
393 atomic_inc(&(cr
->nb
->cmsg_bulk_readds
));
395 while (list_empty(&(cr
->msgs
)) == 0) {
396 struct control_msg_out
*cm
= container_of(cr
->msgs
.prev
,
397 struct control_msg_out
, lh
);
400 BUG_ON(cm
->nb
!= cr
->nb
);
402 if (cm
->type
== MSGTYPE_ACK_CONN
) {
403 struct conn
*cn_l
= cm
->msg
.ack_conn
.src_in
;
404 spin_lock_bh(&(cn_l
->rcv_lock
));
405 if (unlikely(ackconn_prepare_requeue(cn_l
, cm
) == 0)) {
406 free_control_msg(cm
);
408 merge_or_enqueue_ackconn(cn_l
, cm
,
409 ADDCMSG_SRC_RETRANS
);
412 spin_unlock_bh(&(cn_l
->rcv_lock
));
414 if (cm
->type
== MSGTYPE_PONG
)
415 atomic_dec(&(cm
->nb
->cmsg_pongs_retrans_cnt
));
416 enqueue_control_msg(cm
, ADDCMSG_SRC_RETRANS
);
420 atomic_dec(&(cr
->nb
->cmsg_bulk_readds
));
422 spin_lock_bh(&(cr
->nb
->cmsg_lock
));
423 schedule_controlmsg_timer(cr
->nb
);
424 spin_unlock_bh(&(cr
->nb
->cmsg_lock
));
427 static void empty_retrans_queue(struct neighbor
*nb_retranslocked
)
429 while (!list_empty(&(nb_retranslocked
->retrans_list
))) {
430 struct control_retrans
*cr
= container_of(
431 nb_retranslocked
->retrans_list
.next
,
432 struct control_retrans
, timeout_list
);
434 BUG_ON(cr
->nb
!= nb_retranslocked
);
436 list_del(&(cr
->timeout_list
));
437 rb_erase(&(cr
->rbn
), &(nb_retranslocked
->kp_retransmits_rb
));
439 kref_put(&(cr
->ref
), kreffree_bug
); /* rb */
440 kref_put(&(cr
->ref
), free_control_retrans
); /* list */
444 void retransmit_timerfunc(struct timer_list
*retrans_timer
)
446 struct neighbor
*nb
= container_of(retrans_timer
,
447 struct neighbor
, retrans_timer
);
448 int nbstate
= get_neigh_state(nb
);
449 struct control_retrans
*cr
= 0;
451 spin_lock_bh(&(nb
->retrans_lock
));
453 if (list_empty(&(nb
->retrans_list
))) {
454 spin_unlock_bh(&(nb
->retrans_lock
));
455 kref_put(&(nb
->ref
), neighbor_free
);
459 if (unlikely(nbstate
== NEIGHBOR_STATE_KILLED
)) {
460 empty_retrans_queue(nb
);
461 spin_unlock_bh(&(nb
->retrans_lock
));
462 kref_put(&(nb
->ref
), neighbor_free
);
466 cr
= container_of(nb
->retrans_list
.next
, struct control_retrans
,
469 BUG_ON(cr
->nb
!= nb
);
471 if (time_after(cr
->timeout
, jiffies
)) {
472 int rc
= mod_timer(&(nb
->retrans_timer
), cr
->timeout
);
473 spin_unlock_bh(&(nb
->retrans_lock
));
475 kref_put(&(nb
->ref
), neighbor_free
);
479 spin_unlock_bh(&(nb
->retrans_lock
));
481 spin_lock_bh(&(nb
->cmsg_lock
));
482 nb
->add_retrans_needed
= 1;
483 schedule_controlmsg_timer(nb
);
484 spin_unlock_bh(&(nb
->cmsg_lock
));
486 kref_put(&(nb
->ref
), neighbor_free
);
489 static void schedule_retransmit(struct control_retrans
*cr
, struct neighbor
*nb
)
493 cr
->timeout
= calc_timeout(atomic_read(&(nb
->latency_retrans_us
)),
494 atomic_read(&(nb
->latency_stddev_retrans_us
)),
495 atomic_read(&(nb
->max_remote_ack_delay_us
)));
497 spin_lock_bh(&(nb
->retrans_lock
));
498 insert_control_retrans(cr
);
499 first
= list_empty(&(nb
->retrans_list
));
500 list_add_tail(&(cr
->timeout_list
), &(nb
->retrans_list
));
503 if (mod_timer(&(nb
->retrans_timer
), cr
->timeout
) == 0) {
504 kref_get(&(nb
->ref
));
508 spin_unlock_bh(&(nb
->retrans_lock
));
511 void kern_ack_rcvd(struct neighbor
*nb
, __u64 seqno
)
513 struct control_retrans
*cr
= 0;
515 spin_lock_bh(&(nb
->retrans_lock
));
517 cr
= get_control_retrans(nb
, seqno
);
520 /* char *seqno_p = (char *) &seqno;
521 seqno = cpu_to_be32(seqno);
522 printk(KERN_ERR "bogus/duplicate ack received %d %d %d %d",
523 seqno_p[0], seqno_p[1], seqno_p[2], seqno_p[3]);
528 rb_erase(&(cr
->rbn
), &(nb
->kp_retransmits_rb
));
530 BUG_ON(cr
->nb
!= nb
);
532 list_del(&(cr
->timeout_list
));
535 spin_unlock_bh(&(nb
->retrans_lock
));
538 kref_put(&(cr
->ref
), kreffree_bug
); /* get_control_retrans */
539 kref_put(&(cr
->ref
), kreffree_bug
); /* rb_erase */
540 kref_put(&(cr
->ref
), free_control_retrans
); /* list */
544 static __u8
get_window(struct conn
*cn
, struct neighbor
*expectedsender
,
545 __u32 expected_connid
)
549 spin_lock_bh(&(cn
->rcv_lock
));
551 if (unlikely(unlikely(cn
->sourcetype
!= SOURCE_IN
) ||
552 unlikely(expectedsender
!= 0 && (cn
->source
.in
.nb
!=
553 expectedsender
|| cn
->reversedir
->target
.out
.conn_id
!=
557 window
= enc_log_64_7(seqno_clean(cn
->source
.in
.window_seqnolimit
-
558 cn
->source
.in
.next_seqno
));
560 cn
->source
.in
.window_seqnolimit_remote
= cn
->source
.in
.next_seqno
+
561 dec_log_64_7(window
);
564 spin_unlock_bh(&(cn
->rcv_lock
));
569 /* static void padding(struct sk_buff *skb, __u32 length)
574 dst = skb_put(skb, length);
576 memset(dst, KP_PADDING, length);
580 static int add_init_session(struct sk_buff
*skb
, __be32 sessionid
,
585 BUG_ON(KP_INIT_SESSION_CMDLEN
!= 5);
587 if (unlikely(spaceleft
< 5))
590 dst
= skb_put(skb
, 5);
593 dst
[0] = KP_INIT_SESSION
;
594 put_be32(dst
+ 1, sessionid
);
599 static int add_ack(struct sk_buff
*skb
, struct control_retrans
*cr
,
600 struct control_msg_out
*cm
, __u32 spaceleft
)
604 if (unlikely(spaceleft
< 7))
607 dst
= skb_put(skb
, 7);
611 put_u48(dst
+ 1, cm
->msg
.ack
.seqno
);
613 free_control_msg(cm
);
618 static int add_ack_conn(struct sk_buff
*skb
, struct control_retrans
*cr
,
619 struct control_msg_out
*cm
, __u32 spaceleft
)
624 if (unlikely(spaceleft
< cm
->length
))
627 dst
= skb_put(skb
, cm
->length
);
630 dst
[offset
] = KP_ACK_CONN
;
632 put_u32(dst
+ offset
, cm
->msg
.ack_conn
.conn_id
);
634 dst
[offset
] = cm
->msg
.ack_conn
.flags
;
637 if ((cm
->msg
.ack_conn
.flags
& KP_ACK_CONN_FLAGS_SEQNO
) != 0) {
638 put_u48(dst
+ offset
, cm
->msg
.ack_conn
.seqno
);
641 if ((cm
->msg
.ack_conn
.flags
& KP_ACK_CONN_FLAGS_WINDOW
) != 0) {
642 BUG_ON(cm
->msg
.ack_conn
.src_in
== 0);
643 dst
[offset
] = get_window(cm
->msg
.ack_conn
.src_in
,
644 cm
->nb
, cm
->msg
.ack_conn
.conn_id
);
649 if (ooolen(cm
->msg
.ack_conn
.flags
) != 0) {
650 put_u48(dst
+ offset
, cm
->msg
.ack_conn
.seqno_ooo
);
652 if (ooolen(cm
->msg
.ack_conn
.flags
) == 1) {
653 BUG_ON(cm
->msg
.ack_conn
.length
> 255);
654 dst
[offset
] = cm
->msg
.ack_conn
.length
;
656 } else if (ooolen(cm
->msg
.ack_conn
.flags
) == 2) {
657 BUG_ON(cm
->msg
.ack_conn
.length
<= 255);
658 BUG_ON(cm
->msg
.ack_conn
.length
> 65535);
659 put_u16(dst
+ offset
, cm
->msg
.ack_conn
.length
);
661 } else if (ooolen(cm
->msg
.ack_conn
.flags
) == 4) {
662 BUG_ON(cm
->msg
.ack_conn
.length
<= 65535);
663 put_u32(dst
+ offset
, cm
->msg
.ack_conn
.length
);
670 if ((cm
->msg
.ack_conn
.flags
& KP_ACK_CONN_FLAGS_PRIORITY
) != 0) {
671 dst
[offset
] = cm
->msg
.ack_conn
.priority_seqno
;
673 dst
[offset
] = cm
->msg
.ack_conn
.priority
;
677 list_add_tail(&(cm
->lh
), &(cr
->msgs
));
679 BUG_ON(offset
!= cm
->length
);
683 static int add_ping(struct sk_buff
*skb
, __u32 cookie
, __u32 spaceleft
)
687 BUG_ON(KP_PING_CMDLEN
!= 5);
689 if (unlikely(spaceleft
< 5))
692 dst
= skb_put(skb
, 5);
696 put_u32(dst
+ 1, cookie
);
701 static int add_pong(struct sk_buff
*skb
, struct control_retrans
*cr
,
702 struct control_msg_out
*cm
, __u32 spaceleft
,
703 ktime_t packetgen_start
)
708 if (unlikely(spaceleft
< 9))
711 respdelay
= div_u64(ktime_to_ns(packetgen_start
) -
712 ktime_to_ns(cm
->msg
.pong
.time_enqueued
) + 500, 1000);
713 if (unlikely(respdelay
> U32_MAX
))
715 if (unlikely(respdelay
< 0))
718 dst
= skb_put(skb
, 9);
722 put_u32(dst
+ 1, cm
->msg
.pong
.cookie
);
723 put_u32(dst
+ 5, (__u32
) respdelay
);
725 list_add_tail(&(cm
->lh
), &(cr
->msgs
));
730 static int add_connect(struct sk_buff
*skb
, struct control_retrans
*cr
,
731 struct control_msg_out
*cm
, __u32 spaceleft
)
734 struct conn
*src_in
= cm
->msg
.connect
.src_in
;
736 if (unlikely(spaceleft
< 20))
739 dst
= skb_put(skb
, 20);
743 put_u32(dst
+ 1, cm
->msg
.connect
.conn_id
);
744 put_u48(dst
+ 5, cm
->msg
.connect
.seqno1
);
745 put_u48(dst
+ 11, cm
->msg
.connect
.seqno2
);
746 BUG_ON(cm
->msg
.connect
.src_in
== 0);
747 dst
[17] = get_window(cm
->msg
.connect
.src_in
, cm
->nb
,
748 cm
->msg
.connect
.conn_id
);
750 spin_lock_bh(&(src_in
->reversedir
->rcv_lock
));
751 BUG_ON(src_in
->reversedir
->targettype
!= TARGET_OUT
);
753 dst
[18] = src_in
->reversedir
->target
.out
.priority_seqno
;
754 dst
[19] = src_in
->reversedir
->target
.out
.priority_last
;
756 spin_unlock_bh(&(src_in
->reversedir
->rcv_lock
));
758 list_add_tail(&(cm
->lh
), &(cr
->msgs
));
763 static int add_connect_success(struct sk_buff
*skb
, struct control_retrans
*cr
,
764 struct control_msg_out
*cm
, __u32 spaceleft
)
768 if (unlikely(spaceleft
< 6))
771 dst
= skb_put(skb
, 6);
774 dst
[0] = KP_CONNECT_SUCCESS
;
775 put_u32(dst
+ 1, cm
->msg
.connect_success
.conn_id
);
776 BUG_ON(cm
->msg
.connect_success
.src_in
== 0);
777 dst
[5] = get_window(cm
->msg
.connect_success
.src_in
, cm
->nb
,
778 cm
->msg
.connect_success
.conn_id
);
780 list_add_tail(&(cm
->lh
), &(cr
->msgs
));
785 static int add_reset_conn(struct sk_buff
*skb
, struct control_retrans
*cr
,
786 struct control_msg_out
*cm
, __u32 spaceleft
)
790 if (unlikely(spaceleft
< 5))
793 dst
= skb_put(skb
, 5);
796 dst
[0] = KP_RESET_CONN
;
797 put_u32(dst
+ 1, cm
->msg
.reset_conn
.conn_id
);
799 list_add_tail(&(cm
->lh
), &(cr
->msgs
));
804 static int add_conndata(struct sk_buff
*skb
, struct control_retrans
*cr
,
805 struct control_msg_out
*cm
, __u32 spaceleft
,
806 struct control_msg_out
**split_conndata
, __u32
*sc_sendlen
)
810 __u32 totallen
= cm
->msg
.conn_data
.datalen
+ KP_CONN_DATA_CMDLEN
;
811 __u32 putlen
= min(totallen
, spaceleft
);
812 __u32 dataputlen
= putlen
- KP_CONN_DATA_CMDLEN
;
814 BUG_ON(KP_CONN_DATA_CMDLEN
!= 13);
816 BUG_ON(putlen
> 1024*1024*1024);
818 BUG_ON(split_conndata
== 0);
819 BUG_ON(*split_conndata
!= 0);
820 BUG_ON(sc_sendlen
== 0);
821 BUG_ON(*sc_sendlen
!= 0);
823 if (putlen
< KP_CONN_DATA_CMDLEN
+ 1)
826 dst
= skb_put(skb
, putlen
);
829 if (cm
->msg
.conn_data
.flush
!= 0) {
830 if (cm
->msg
.conn_data
.snd_delayed_lowbuf
== 0) {
831 dst
[0] = KP_CONN_DATA_FLUSH
;
833 dst
[0] = KP_CONN_DATA_LOWBUFDELAYED_FLUSH
;
836 if (cm
->msg
.conn_data
.snd_delayed_lowbuf
== 0) {
837 dst
[0] = KP_CONN_DATA
;
839 dst
[0] = KP_CONN_DATA_LOWBUFDELAYED
;
842 put_u32(dst
+ 1, cm
->msg
.conn_data
.conn_id
);
843 put_u48(dst
+ 5, cm
->msg
.conn_data
.seqno
);
844 put_u16(dst
+ 11, dataputlen
);
846 memcpy(dst
+ 13, cm
->msg
.conn_data
.data
, dataputlen
);
848 if (cm
->msg
.conn_data
.datalen
== dataputlen
) {
849 list_add_tail(&(cm
->lh
), &(cr
->msgs
));
851 *split_conndata
= cm
;
852 *sc_sendlen
= dataputlen
;
858 static int add_set_max_cmsg_dly(struct sk_buff
*skb
, struct control_retrans
*cr
,
859 struct control_msg_out
*cm
, __u32 spaceleft
)
863 BUG_ON(KP_SET_MAX_CMSG_DELAY_CMDLEN
!= 13);
865 if (unlikely(spaceleft
< 13))
868 dst
= skb_put(skb
, 13);
871 dst
[0] = KP_SET_MAX_CMSG_DELAY
;
872 put_u32(dst
+ 1, cm
->msg
.set_max_cmsg_delay
.ack_delay
);
873 put_u32(dst
+ 5, cm
->msg
.set_max_cmsg_delay
.ackconn_delay
);
874 put_u32(dst
+ 9, cm
->msg
.set_max_cmsg_delay
.other_delay
);
876 list_add_tail(&(cm
->lh
), &(cr
->msgs
));
881 static int add_message(struct sk_buff
*skb
, struct control_retrans
*cr
,
882 struct control_msg_out
*cm
, __u32 spaceleft
,
883 ktime_t packetgen_start
,
884 struct control_msg_out
**split_conndata
, __u32
*sc_sendlen
)
886 BUG_ON(split_conndata
!= 0 && *split_conndata
!= 0);
887 BUG_ON(sc_sendlen
!= 0 && *sc_sendlen
!= 0);
891 return add_ack(skb
, cr
, cm
, spaceleft
);
892 case MSGTYPE_ACK_CONN
:
893 return add_ack_conn(skb
, cr
, cm
, spaceleft
);
895 return add_pong(skb
, cr
, cm
, spaceleft
, packetgen_start
);
896 case MSGTYPE_CONNECT
:
897 return add_connect(skb
, cr
, cm
, spaceleft
);
898 case MSGTYPE_CONNECT_SUCCESS
:
899 return add_connect_success(skb
, cr
, cm
, spaceleft
);
900 case MSGTYPE_RESET_CONN
:
901 return add_reset_conn(skb
, cr
, cm
, spaceleft
);
902 case MSGTYPE_CONNDATA
:
903 return add_conndata(skb
, cr
, cm
, spaceleft
, split_conndata
,
905 case MSGTYPE_SET_MAX_CMSG_DELAY
:
906 return add_set_max_cmsg_dly(skb
, cr
, cm
, spaceleft
);
914 static __u32
__send_messages(struct neighbor
*nb
, struct sk_buff
*skb
,
915 struct control_retrans
*cr
, struct list_head
*cmsgs
,
916 __u32 spaceleft
, int nbstate
, ktime_t packetgen_start
,
917 struct control_msg_out
**split_conndata
, __u32
*sc_sendlen
)
920 while (!list_empty(cmsgs
)) {
922 struct control_msg_out
*cm
= container_of(cmsgs
->next
,
923 struct control_msg_out
, lh
);
927 rc
= add_message(skb
, cr
, cm
, spaceleft
- length
,
928 packetgen_start
, split_conndata
, sc_sendlen
);
931 list_add(&(cm
->lh
), cmsgs
);
941 static __u32
__send_messages_smcd(struct neighbor
*nb
, struct sk_buff
*skb
,
942 struct control_retrans
*cr
, __u32 spaceleft
,
943 ktime_t packetgen_start
)
945 struct control_msg_out
*cm
;
948 cm
= alloc_control_msg(nb
, ACM_PRIORITY_MED
);
950 if (unlikely(cm
== 0))
953 cm
->type
= MSGTYPE_SET_MAX_CMSG_DELAY
;
954 cm
->msg
.set_max_cmsg_delay
.ack_delay
=
955 CMSG_MAXDELAY_ACK_MS
* 1000;
956 cm
->msg
.set_max_cmsg_delay
.ackconn_delay
=
957 CMSG_MAXDELAY_ACKCONN_MS
* 1000;
958 cm
->msg
.set_max_cmsg_delay
.other_delay
=
959 CMSG_MAXDELAY_OTHER_MS
* 1000;
960 cm
->length
= KP_SET_MAX_CMSG_DELAY_CMDLEN
;
962 rc
= add_message(skb
, cr
, cm
, spaceleft
, packetgen_start
, 0, 0);
964 #warning todo recover packet loss
965 nb
->max_cmsg_delay_sent
= 1;
970 static void requeue_message(struct control_msg_out
*cm
)
972 if (cm
->type
== MSGTYPE_ACK_CONN
) {
973 struct conn
*cn_l
= cm
->msg
.ack_conn
.src_in
;
975 spin_lock_bh(&(cn_l
->rcv_lock
));
976 if (unlikely(ackconn_prepare_requeue(cn_l
, cm
) == 0)) {
977 free_control_msg(cm
);
979 spin_lock_bh(&(cm
->nb
->cmsg_lock
));
981 list_add(&(cm
->lh
), &(cm
->nb
->cmsg_queue_ackconn
));
982 cm
->nb
->cmsg_otherlength
+= cm
->length
;
984 list_add(&(cm
->msg
.ack_conn
.conn_acks
),
985 &(cn_l
->source
.in
.acks_pending
));
986 try_merge_ackconns(cn_l
, cm
);
988 spin_unlock_bh(&(cm
->nb
->cmsg_lock
));
990 spin_unlock_bh(&(cn_l
->rcv_lock
));
994 enqueue_control_msg(cm
, ADDCMSG_SRC_READD
);
997 static void requeue_messages(struct list_head
*lh
)
999 while (list_empty(lh
) == 0) {
1000 struct control_msg_out
*cm
= container_of(lh
->prev
,
1001 struct control_msg_out
, lh
);
1002 list_del(&(cm
->lh
));
1003 requeue_message(cm
);
1007 static int _send_messages_send2(struct neighbor
*nb
, struct sk_buff
*skb
,
1008 int ping
, int initsession
, struct control_retrans
*cr
,
1009 struct list_head
*cmsgs
, __u32 spaceleft
, int nbstate
,
1015 __u32 pingcookie
= 0;
1016 unsigned long last_ping_time
;
1017 struct control_msg_out
*split_conndata
= 0;
1018 __u32 sc_sendlen
= 0;
1020 ktime_t packetgen_start
= ktime_get();
1022 if (ping
!= TIMETOSENDPING_NO
) {
1025 if (unlikely(initsession
)) {
1026 rc
= add_init_session(skb
, nb
->sessionid
,
1027 spaceleft
- length
);
1033 pingcookie
= add_ping_req(nb
, &last_ping_time
, packetgen_start
);
1034 rc
= add_ping(skb
, pingcookie
, spaceleft
- length
);
1040 if (likely(nbstate
== NEIGHBOR_STATE_ACTIVE
) &&
1041 unlikely(nb
->max_cmsg_delay_sent
== 0))
1042 length
+= __send_messages_smcd(nb
, skb
, cr
, spaceleft
- length
,
1045 length
+= __send_messages(nb
, skb
, cr
, cmsgs
, spaceleft
- length
,
1046 nbstate
, packetgen_start
, &split_conndata
, &sc_sendlen
);
1048 BUG_ON(length
> spaceleft
);
1050 if (likely(ping
!= TIMETOSENDPING_FORCE
) &&
1051 pinglen
!= 0 && unlikely(length
== pinglen
)) {
1052 unadd_ping_req(nb
, pingcookie
, last_ping_time
, 0);
1056 if (unlikely(length
== 0)) {
1060 BUG_ON(list_empty(&(cr
->msgs
)) == 0);
1061 kref_put(&(cr
->ref
), free_control_retrans
);
1063 nb
->kpacket_seqno
--;
1064 return QOS_RESUME_DONE
;
1067 //padding(skb, spaceleft - length);
1068 BUG_ON(spaceleft
- length
!= 0);
1070 rc
= cor_dev_queue_xmit(skb
, nb
->queue
, QOS_CALLER_KPACKET
);
1071 if (rc
== NET_XMIT_SUCCESS
)
1074 if (rc
== NET_XMIT_DROP
) {
1076 unadd_ping_req(nb
, pingcookie
, last_ping_time
, 1);
1078 atomic_inc(&(nb
->cmsg_bulk_readds
));
1079 if (split_conndata
!= 0)
1080 requeue_message(split_conndata
);
1082 requeue_messages(&(cr
->msgs
));
1084 kref_put(&(cr
->ref
), free_control_retrans
);
1086 atomic_dec(&(nb
->cmsg_bulk_readds
));
1088 spin_lock_bh(&(nb
->cmsg_lock
));
1089 schedule_controlmsg_timer(nb
);
1090 spin_unlock_bh(&(nb
->cmsg_lock
));
1092 struct list_head
*curr
= cr
->msgs
.next
;
1094 if (pingcookie
!= 0)
1095 ping_sent(nb
, pingcookie
);
1097 while (curr
!= &(cr
->msgs
)) {
1098 struct control_msg_out
*cm
= container_of(curr
,
1099 struct control_msg_out
, lh
);
1103 if (unlikely(cm
->type
== MSGTYPE_PONG
&&
1104 (nbstate
!= NEIGHBOR_STATE_ACTIVE
))) {
1105 list_del(&(cm
->lh
));
1106 free_control_msg(cm
);
1107 } else if (unlikely(cm
->type
== MSGTYPE_PONG
&&
1109 &(nb
->cmsg_pongs_retrans_cnt
)) >
1110 MAX_PONG_CMSGS_RETRANS_PER_NEIGH
)) {
1111 atomic_dec(&(nb
->cmsg_pongs_retrans_cnt
));
1112 list_del(&(cm
->lh
));
1113 free_control_msg(cm
);
1114 } else if (cm
->type
== MSGTYPE_CONNDATA
) {
1115 schedule_retransmit_conn(cm
->msg
.conn_data
.cr
,
1117 kfree(cm
->msg
.conn_data
.data_orig
);
1118 list_del(&(cm
->lh
));
1119 free_control_msg(cm
);
1123 if (split_conndata
!= 0) {
1124 BUG_ON(sc_sendlen
== 0);
1125 BUG_ON(sc_sendlen
>=
1126 split_conndata
->msg
.conn_data
.datalen
);
1128 split_conndata
->msg
.conn_data
.seqno
+= sc_sendlen
;
1129 split_conndata
->msg
.conn_data
.data
+= sc_sendlen
;
1130 split_conndata
->msg
.conn_data
.datalen
-= sc_sendlen
;
1131 split_conndata
->length
= KP_CONN_DATA_CMDLEN
+
1132 split_conndata
->msg
.conn_data
.datalen
;
1133 enqueue_control_msg(split_conndata
,
1134 ADDCMSG_SRC_SPLITCONNDATA
);
1138 if (list_empty(&(cr
->msgs
)))
1139 kref_put(&(cr
->ref
), free_control_retrans
);
1141 schedule_retransmit(cr
, nb
);
1144 return (rc
== NET_XMIT_SUCCESS
) ? QOS_RESUME_DONE
: QOS_RESUME_CONG
;
1147 static int _send_messages_send(struct neighbor
*nb
, int ping
,
1148 int initsession
, struct list_head
*cmsgs
, int nbstate
,
1149 __u32 length
, __u64 seqno
, int *sent
)
1151 struct sk_buff
*skb
;
1152 struct control_retrans
*cr
;
1155 skb
= create_packet_cmsg(nb
, length
, GFP_ATOMIC
, seqno
);
1156 if (unlikely(skb
== 0)) {
1157 printk(KERN_ERR
"cor: send_messages: cannot allocate skb (out of memory?)");
1159 requeue_messages(cmsgs
);
1160 return QOS_RESUME_CONG
;
1163 cr
= kmem_cache_alloc(controlretrans_slab
, GFP_ATOMIC
);
1164 if (unlikely(cr
== 0)) {
1165 printk(KERN_ERR
"cor: send_messages: cannot allocate control_retrans (out of memory?)");
1168 requeue_messages(cmsgs
);
1169 return QOS_RESUME_CONG
;
1172 memset(cr
, 0, sizeof(struct control_retrans
));
1173 kref_init(&(cr
->ref
));
1176 INIT_LIST_HEAD(&(cr
->msgs
));
1178 rc
= _send_messages_send2(nb
, skb
, ping
, initsession
, cr
, cmsgs
, length
,
1181 BUG_ON(!list_empty(cmsgs
));
1186 #define CMSGQUEUE_PONG 1
1187 #define CMSGQUEUE_ACK 2
1188 #define CMSGQUEUE_ACK_CONN 3
1189 #define CMSGQUEUE_CONNDATA_LOWLAT 4
1190 #define CMSGQUEUE_CONNDATA_HIGHLAT 5
1191 #define CMSGQUEUE_OTHER 6
1193 static unsigned long get_cmsg_timeout(struct control_msg_out
*cm
, int queue
)
1195 if (cm
->type
== MSGTYPE_ACK
) {
1196 BUG_ON(queue
!= CMSGQUEUE_ACK
);
1197 return cm
->time_added
+
1198 msecs_to_jiffies(CMSG_MAXDELAY_ACK_MS
) - 1;
1199 } else if (cm
->type
== MSGTYPE_ACK_CONN
) {
1200 BUG_ON(queue
!= CMSGQUEUE_ACK_CONN
);
1201 return cm
->time_added
+
1202 msecs_to_jiffies(CMSG_MAXDELAY_ACKCONN_MS
) - 1;
1203 } else if (cm
->type
== MSGTYPE_CONNDATA
) {
1204 if (cm
->msg
.conn_data
.highlatency
!= 0) {
1205 BUG_ON(queue
!= CMSGQUEUE_CONNDATA_HIGHLAT
);
1206 return cm
->time_added
+
1208 CMSG_MAXDELAY_CONNDATA_MS
) - 1;
1210 BUG_ON(queue
!= CMSGQUEUE_CONNDATA_LOWLAT
);
1211 return cm
->time_added
;
1214 BUG_ON(cm
->type
== MSGTYPE_PONG
&& queue
!= CMSGQUEUE_PONG
);
1215 BUG_ON(cm
->type
!= MSGTYPE_PONG
&& queue
!= CMSGQUEUE_OTHER
);
1217 return cm
->time_added
+
1218 msecs_to_jiffies(CMSG_MAXDELAY_OTHER_MS
) - 1;
1222 static void _peek_message(struct neighbor
*nb_cmsglocked
, int queue
,
1223 struct control_msg_out
**currcm
, unsigned long *currtimeout
,
1226 struct control_msg_out
*cm
;
1227 unsigned long cmtimeout
;
1229 struct list_head
*queuelh
;
1230 if (queue
== CMSGQUEUE_PONG
) {
1231 queuelh
= &(nb_cmsglocked
->cmsg_queue_pong
);
1232 } else if (queue
== CMSGQUEUE_ACK
) {
1233 queuelh
= &(nb_cmsglocked
->cmsg_queue_ack
);
1234 } else if (queue
== CMSGQUEUE_ACK_CONN
) {
1235 queuelh
= &(nb_cmsglocked
->cmsg_queue_ackconn
);
1236 } else if (queue
== CMSGQUEUE_CONNDATA_LOWLAT
) {
1237 queuelh
= &(nb_cmsglocked
->cmsg_queue_conndata_lowlat
);
1238 } else if (queue
== CMSGQUEUE_CONNDATA_HIGHLAT
) {
1239 queuelh
= &(nb_cmsglocked
->cmsg_queue_conndata_highlat
);
1240 } else if (queue
== CMSGQUEUE_OTHER
) {
1241 queuelh
= &(nb_cmsglocked
->cmsg_queue_other
);
1246 if (list_empty(queuelh
))
1249 cm
= container_of(queuelh
->next
, struct control_msg_out
, lh
);
1250 cmtimeout
= get_cmsg_timeout(cm
, queue
);
1252 BUG_ON(cm
->nb
!= nb_cmsglocked
);
1254 if (*currcm
== 0 || (time_before(cmtimeout
, *currtimeout
) &&
1255 time_before(jiffies
, *currtimeout
))) {
1257 *currtimeout
= cmtimeout
;
1259 if (queue
== CMSGQUEUE_PONG
) {
1260 *currlen
= &(nb_cmsglocked
->cmsg_pongslength
);
1262 *currlen
= &(nb_cmsglocked
->cmsg_otherlength
);
1267 static void peek_message(struct neighbor
*nb_cmsglocked
, int nbstate
,
1268 struct control_msg_out
**cm
, unsigned long *cmtimeout
,
1269 __u32
**len
, int for_timeout
)
1271 _peek_message(nb_cmsglocked
, CMSGQUEUE_PONG
, cm
, cmtimeout
, len
);
1272 if (likely(nbstate
== NEIGHBOR_STATE_ACTIVE
)) {
1273 _peek_message(nb_cmsglocked
, CMSGQUEUE_ACK
, cm
, cmtimeout
, len
);
1274 _peek_message(nb_cmsglocked
, CMSGQUEUE_ACK_CONN
, cm
, cmtimeout
,
1276 if (!for_timeout
|| atomic_read(
1277 &(nb_cmsglocked
->cmsg_delay_conndata
)) == 0) {
1278 _peek_message(nb_cmsglocked
, CMSGQUEUE_CONNDATA_LOWLAT
,
1279 cm
, cmtimeout
, len
);
1280 _peek_message(nb_cmsglocked
, CMSGQUEUE_CONNDATA_HIGHLAT
,
1281 cm
, cmtimeout
, len
);
1283 _peek_message(nb_cmsglocked
, CMSGQUEUE_OTHER
, cm
, cmtimeout
, len
);
1287 static unsigned long get_cmsg_timer_timeout(struct neighbor
*nb_cmsglocked
,
1290 unsigned long pingtimeout
= get_next_ping_time(nb_cmsglocked
);
1292 struct control_msg_out
*cm
= 0;
1293 unsigned long cmtimeout
;
1296 peek_message(nb_cmsglocked
, nbstate
, &cm
, &cmtimeout
, &len
, 1);
1299 unsigned long jiffies_tmp
= jiffies
;
1301 if (time_before(cmtimeout
, jiffies_tmp
))
1303 if (time_before(cmtimeout
, pingtimeout
))
1310 static void _dequeue_messages(struct neighbor
*nb_cmsglocked
, int nbstate
,
1311 __u32 targetmss
, __u32
*length
, struct list_head
*cmsgs
)
1314 __u32 spaceleft
= targetmss
- *length
;
1315 struct control_msg_out
*cm
= 0;
1316 unsigned long cmtimeout
;
1319 peek_message(nb_cmsglocked
, nbstate
, &cm
, &cmtimeout
, &len
, 0);
1321 if (unlikely(cm
== 0))
1326 if (cm
->length
> spaceleft
) {
1327 if (cm
->type
!= MSGTYPE_CONNDATA
||
1328 spaceleft
< KP_CONN_DATA_CMDLEN
+ 1) {
1329 WARN_ONCE(1, "cor: maximum segment size is too small");
1330 BUG_ON(*length
== 0);
1334 if ((*length
/4)*3 > targetmss
)
1338 list_del(&(cm
->lh
));
1341 if (cm
->type
== MSGTYPE_ACK_CONN
)
1342 list_del(&(cm
->msg
.ack_conn
.conn_acks
));
1343 if (unlikely(cm
->type
== MSGTYPE_PONG
)) {
1344 BUG_ON(cm
->nb
->cmsg_pongscnt
== 0);
1345 cm
->nb
->cmsg_pongscnt
--;
1348 if (unlikely(cm
->type
== MSGTYPE_RESET_CONN
)) {
1349 BUG_ON(cm
->msg
.reset_conn
.in_pending_conn_resets
== 0);
1350 rb_erase(&(cm
->msg
.reset_conn
.rbn
),
1351 &(cm
->nb
->pending_conn_resets_rb
));
1352 cm
->msg
.reset_conn
.in_pending_conn_resets
= 0;
1353 kref_put(&(cm
->ref
), kreffree_bug
);
1356 BUG_ON(*length
+ cm
->length
< *length
);
1357 *length
+= cm
->length
;
1359 list_add_tail(&(cm
->lh
), cmsgs
);
1363 static __u32
get_total_messages_length(struct neighbor
*nb
, int ping
,
1364 int initsession
, int nbstate
, int *extralength
)
1366 __u32 length
= nb
->cmsg_pongslength
;
1368 if (likely(nbstate
== NEIGHBOR_STATE_ACTIVE
)) {
1369 length
+= nb
->cmsg_otherlength
;
1371 if (unlikely(nb
->max_cmsg_delay_sent
== 0)) {
1372 length
+= KP_SET_MAX_CMSG_DELAY_CMDLEN
;
1373 *extralength
+= KP_SET_MAX_CMSG_DELAY_CMDLEN
;
1377 if (ping
== TIMETOSENDPING_FORCE
||
1378 (length
> 0 && ping
!= TIMETOSENDPING_NO
)) {
1379 length
+= KP_PING_CMDLEN
;
1380 *extralength
+= KP_PING_CMDLEN
;
1382 if (unlikely(initsession
)) {
1383 length
+= KP_INIT_SESSION_CMDLEN
;
1384 *extralength
+= KP_INIT_SESSION_CMDLEN
;
1391 static int dequeue_messages(struct neighbor
*nb_cmsglocked
, int ping
,
1392 int initsession
, int nbstate
, __u32 targetmss
,
1393 __u32
*length
, struct list_head
*cmsgs
)
1395 __u32 extralength
= 0;
1398 int cmsgqueue_nonpong_empty
= (
1399 list_empty(&(nb_cmsglocked
->cmsg_queue_ack
)) &&
1400 list_empty(&(nb_cmsglocked
->cmsg_queue_ackconn
)) &&
1401 list_empty(&(nb_cmsglocked
->cmsg_queue_conndata_lowlat
)) &&
1402 list_empty(&(nb_cmsglocked
->cmsg_queue_conndata_highlat
)) &&
1403 list_empty(&(nb_cmsglocked
->cmsg_queue_other
)));
1405 BUG_ON(list_empty(&(nb_cmsglocked
->cmsg_queue_pong
)) &&
1406 nb_cmsglocked
->cmsg_pongslength
!= 0);
1407 BUG_ON(!list_empty(&(nb_cmsglocked
->cmsg_queue_pong
)) &&
1408 nb_cmsglocked
->cmsg_pongslength
== 0);
1409 BUG_ON(cmsgqueue_nonpong_empty
&&
1410 nb_cmsglocked
->cmsg_otherlength
!= 0);
1411 BUG_ON(!cmsgqueue_nonpong_empty
&&
1412 nb_cmsglocked
->cmsg_otherlength
== 0);
1414 totallength
= get_total_messages_length(nb_cmsglocked
, ping
,
1415 initsession
, nbstate
, &extralength
);
1417 if (totallength
== 0)
1420 if (totallength
< targetmss
&& ping
!= TIMETOSENDPING_FORCE
&&
1421 time_after(get_cmsg_timer_timeout(nb_cmsglocked
,
1425 *length
= extralength
;
1427 _dequeue_messages(nb_cmsglocked
, nbstate
, targetmss
, length
, cmsgs
);
1429 BUG_ON(*length
== 0);
1430 BUG_ON(*length
> targetmss
);
1435 static void add_timeouted_retrans(struct neighbor
*nb
)
1437 spin_lock_bh(&(nb
->retrans_lock
));
1439 while (!list_empty(&(nb
->retrans_list
))) {
1440 struct control_retrans
*cr
= container_of(nb
->retrans_list
.next
,
1441 struct control_retrans
, timeout_list
);
1443 BUG_ON(cr
->nb
!= nb
);
1445 if (time_after(cr
->timeout
, jiffies
)) {
1446 if (mod_timer(&(nb
->retrans_timer
), cr
->timeout
) == 0) {
1447 kref_get(&(nb
->ref
));
1452 list_del(&(cr
->timeout_list
));
1453 rb_erase(&(cr
->rbn
), &(nb
->kp_retransmits_rb
));
1455 kref_put(&(cr
->ref
), kreffree_bug
); /* rb */
1457 requeue_control_retrans(cr
);
1460 spin_unlock_bh(&(nb
->retrans_lock
));
1463 static void _delete_all_cmsgs(struct list_head
*cmsgs
)
1465 while (!list_empty(cmsgs
)) {
1466 struct control_msg_out
*cm
= container_of(cmsgs
->next
,
1467 struct control_msg_out
, lh
);
1469 list_del(&(cm
->lh
));
1471 if (cm
->type
== MSGTYPE_CONNDATA
) {
1472 schedule_retransmit_conn(cm
->msg
.conn_data
.cr
, 0, 0);
1473 kfree(cm
->msg
.conn_data
.data_orig
);
1476 free_control_msg(cm
);
1480 static void delete_all_cmsgs(struct neighbor
*nb
)
1483 struct list_head cmsgs
;
1486 INIT_LIST_HEAD(&cmsgs
);
1488 spin_lock_bh(&(nb
->cmsg_lock
));
1489 _dequeue_messages(nb
, NEIGHBOR_STATE_ACTIVE
, 65536, &length
,
1491 spin_unlock_bh(&(nb
->cmsg_lock
));
1493 if (list_empty(&cmsgs
))
1496 _delete_all_cmsgs(&cmsgs
);
1500 static int reset_timeouted_conn_needed(struct neighbor
*nb
,
1501 struct conn
*src_in_l
)
1503 if (unlikely(src_in_l
->sourcetype
!= SOURCE_IN
||
1504 src_in_l
->source
.in
.nb
!= nb
||
1505 src_in_l
->isreset
!= 0))
1507 else if (likely(time_after(src_in_l
->source
.in
.jiffies_last_act
+
1508 CONN_ACTIVITY_UPDATEINTERVAL_SEC
* HZ
+
1509 CONN_INACTIVITY_TIMEOUT_SEC
* HZ
, jiffies
)))
1515 static int reset_timeouted_conn(struct neighbor
*nb
, struct conn
*src_in
)
1519 if (src_in
->is_client
) {
1520 spin_lock_bh(&(src_in
->rcv_lock
));
1521 spin_lock_bh(&(src_in
->reversedir
->rcv_lock
));
1523 spin_lock_bh(&(src_in
->reversedir
->rcv_lock
));
1524 spin_lock_bh(&(src_in
->rcv_lock
));
1527 resetted
= reset_timeouted_conn_needed(nb
, src_in
);
1528 if (unlikely(resetted
== 0))
1531 resetted
= (send_reset_conn(nb
, src_in
->reversedir
->target
.out
.conn_id
,
1533 if (unlikely(resetted
== 0))
1537 BUG_ON(src_in
->reversedir
->isreset
!= 0);
1538 src_in
->reversedir
->isreset
= 1;
1541 if (src_in
->is_client
) {
1542 spin_unlock_bh(&(src_in
->rcv_lock
));
1543 spin_unlock_bh(&(src_in
->reversedir
->rcv_lock
));
1545 spin_unlock_bh(&(src_in
->reversedir
->rcv_lock
));
1546 spin_unlock_bh(&(src_in
->rcv_lock
));
1555 static void reset_timeouted_conns(struct neighbor
*nb
)
1558 for (i
=0;i
<10000;i
++) {
1559 unsigned long iflags
;
1560 struct list_head
*lh
;
1561 struct conn
*src_in
;
1565 spin_lock_irqsave(&(nb
->conn_list_lock
), iflags
);
1567 if (list_empty(&(nb
->rcv_conn_list
))) {
1568 spin_unlock_irqrestore(&(nb
->conn_list_lock
), iflags
);
1572 lh
= nb
->rcv_conn_list
.next
;
1574 list_add_tail(lh
, &(nb
->rcv_conn_list
));
1576 src_in
= container_of(lh
, struct conn
, source
.in
.nb_list
);
1577 kref_get(&(src_in
->ref
));
1579 spin_unlock_irqrestore(&(nb
->conn_list_lock
), iflags
);
1582 spin_lock_bh(&(src_in
->rcv_lock
));
1583 BUG_ON(src_in
->sourcetype
!= SOURCE_IN
);
1584 BUG_ON(src_in
->source
.in
.nb
!= nb
);
1585 resetted
= reset_timeouted_conn_needed(nb
, src_in
);
1586 spin_unlock_bh(&(src_in
->rcv_lock
));
1587 if (likely(resetted
== 0))
1590 resetted
= reset_timeouted_conn(nb
, src_in
);
1593 kref_put(&(src_in
->ref
), free_conn
);
1595 if (likely(resetted
== 0))
1601 * may not be called by more than one thread at the same time, because
1602 * 1) readding control_msg_out may reorder them
1603 * 2) multiple pings may be sent
1605 int send_messages(struct neighbor
*nb
, int *sent
)
1607 int rc
= QOS_RESUME_DONE
;
1610 __u32 targetmss
= mss_cmsg(nb
);
1612 int nbstate
= get_neigh_state(nb
);
1614 if (likely(nbstate
== NEIGHBOR_STATE_ACTIVE
))
1615 reset_timeouted_conns(nb
);
1617 if (unlikely(nbstate
== NEIGHBOR_STATE_KILLED
)) {
1618 spin_lock_bh(&(nb
->retrans_lock
));
1619 empty_retrans_queue(nb
);
1620 spin_unlock_bh(&(nb
->retrans_lock
));
1622 delete_all_cmsgs(nb
);
1623 return QOS_RESUME_DONE
;
1626 ping
= time_to_send_ping(nb
);
1628 spin_lock_bh(&(nb
->cmsg_lock
));
1630 if (nb
->add_retrans_needed
!= 0) {
1631 nb
->add_retrans_needed
= 0;
1632 spin_unlock_bh(&(nb
->cmsg_lock
));
1633 add_timeouted_retrans(nb
);
1634 spin_lock_bh(&(nb
->cmsg_lock
));
1637 initsession
= unlikely(atomic_read(&(nb
->sessionid_snd_needed
)) != 0);
1640 struct list_head cmsgs
;
1644 INIT_LIST_HEAD(&cmsgs
);
1646 if (dequeue_messages(nb
, ping
, initsession
, nbstate
,
1647 targetmss
, &length
, &cmsgs
) != 0) {
1648 schedule_controlmsg_timer(nb
);
1649 spin_unlock_bh(&(nb
->cmsg_lock
));
1650 return QOS_RESUME_DONE
;
1653 nb
->kpacket_seqno
++;
1654 seqno
= nb
->kpacket_seqno
;
1656 spin_unlock_bh(&(nb
->cmsg_lock
));
1658 rc
= _send_messages_send(nb
, ping
, initsession
, &cmsgs
, nbstate
,
1659 length
, seqno
, sent
);
1661 if (rc
!= QOS_RESUME_DONE
)
1667 spin_lock_bh(&(nb
->cmsg_lock
));
1671 void controlmsg_timerfunc(struct timer_list
*cmsg_timer
)
1673 struct neighbor
*nb
= container_of(cmsg_timer
,
1674 struct neighbor
, cmsg_timer
);
1675 qos_enqueue(nb
->queue
, &(nb
->rb_kp
), QOS_CALLER_KPACKET
);
1676 kref_put(&(nb
->ref
), neighbor_free
);
1679 static int cmsg_full_packet(struct neighbor
*nb
, int nbstate
)
1681 __u32 extralength
= 0;
1682 int ping
= time_to_send_ping(nb
);
1683 int initsession
= unlikely(atomic_read(&(nb
->sessionid_snd_needed
)) !=
1685 __u32 len
= get_total_messages_length(nb
, ping
, initsession
, nbstate
,
1690 if (len
< mss_cmsg(nb
))
1696 void schedule_controlmsg_timer(struct neighbor
*nb_cmsglocked
)
1698 unsigned long timeout
;
1699 int nbstate
= get_neigh_state(nb_cmsglocked
);
1701 if (unlikely(nbstate
== NEIGHBOR_STATE_KILLED
))
1704 if (atomic_read(&(nb_cmsglocked
->cmsg_bulk_readds
)) != 0)
1707 if (cmsg_full_packet(nb_cmsglocked
, nbstate
))
1710 if (nb_cmsglocked
->add_retrans_needed
!= 0)
1713 timeout
= get_cmsg_timer_timeout(nb_cmsglocked
, nbstate
);
1715 if (time_before_eq(timeout
, jiffies
)) {
1717 qos_enqueue(nb_cmsglocked
->queue
, &(nb_cmsglocked
->rb_kp
),
1718 QOS_CALLER_KPACKET
);
1720 if (mod_timer(&(nb_cmsglocked
->cmsg_timer
), timeout
) == 0) {
1721 kref_get(&(nb_cmsglocked
->ref
));
1726 static int insert_pending_conn_resets(struct control_msg_out
*ins
)
1728 struct neighbor
*nb
= ins
->nb
;
1729 __u32 conn_id
= ins
->msg
.reset_conn
.conn_id
;
1731 struct rb_root
*root
;
1733 struct rb_node
*parent
= 0;
1736 BUG_ON(ins
->msg
.reset_conn
.in_pending_conn_resets
!= 0);
1738 root
= &(nb
->pending_conn_resets_rb
);
1739 p
= &(root
->rb_node
);
1742 struct control_msg_out
*cm
= container_of(*p
,
1743 struct control_msg_out
,
1744 msg
.reset_conn
.rbn
);
1745 __u32 cm_connid
= cm
->msg
.reset_conn
.conn_id
;
1747 BUG_ON(cm
->nb
!= ins
->nb
);
1748 BUG_ON(cm
->type
!= MSGTYPE_RESET_CONN
);
1751 if (conn_id
== cm_connid
) {
1753 } else if (conn_id
< cm_connid
) {
1755 } else if (conn_id
> cm_connid
) {
1756 p
= &(*p
)->rb_right
;
1762 kref_get(&(ins
->ref
));
1763 rb_link_node(&(ins
->msg
.reset_conn
.rbn
), parent
, p
);
1764 rb_insert_color(&(ins
->msg
.reset_conn
.rbn
), root
);
1765 ins
->msg
.reset_conn
.in_pending_conn_resets
= 1;
1770 static void free_oldest_pong(struct neighbor
*nb
)
1772 struct control_msg_out
*cm
= container_of(nb
->cmsg_queue_pong
.next
,
1773 struct control_msg_out
, lh
);
1775 BUG_ON(list_empty(&(nb
->cmsg_queue_pong
)));
1776 BUG_ON(unlikely(cm
->type
!= MSGTYPE_PONG
));
1778 list_del(&(cm
->lh
));
1779 nb
->cmsg_pongslength
-= cm
->length
;
1780 BUG_ON(nb
->cmsg_pongscnt
== 0);
1781 cm
->nb
->cmsg_pongscnt
--;
1782 free_control_msg(cm
);
1785 static int _enqueue_control_msg(struct control_msg_out
*cm
, int src
)
1787 if (unlikely(cm
->type
== MSGTYPE_PONG
)) {
1788 BUG_ON(src
== ADDCMSG_SRC_SPLITCONNDATA
);
1790 if (cm
->nb
->cmsg_pongscnt
>= MAX_PONG_CMSGS_PER_NEIGH
) {
1791 if (src
!= ADDCMSG_SRC_NEW
) {
1792 BUG_ON(cm
->nb
->cmsg_pongscnt
== 0);
1793 cm
->nb
->cmsg_pongscnt
--;
1794 free_control_msg(cm
);
1797 free_oldest_pong(cm
->nb
);
1801 cm
->nb
->cmsg_pongscnt
++;
1802 cm
->nb
->cmsg_pongslength
+= cm
->length
;
1804 if (src
!= ADDCMSG_SRC_NEW
) {
1805 list_add(&(cm
->lh
), &(cm
->nb
->cmsg_queue_pong
));
1807 list_add_tail(&(cm
->lh
), &(cm
->nb
->cmsg_queue_pong
));
1811 } else if (unlikely(cm
->type
== MSGTYPE_RESET_CONN
)) {
1812 if (insert_pending_conn_resets(cm
) != 0) {
1814 free_control_msg(cm
);
1819 cm
->nb
->cmsg_otherlength
+= cm
->length
;
1820 if (src
== ADDCMSG_SRC_NEW
) {
1821 if (cm
->type
== MSGTYPE_ACK
) {
1822 list_add_tail(&(cm
->lh
), &(cm
->nb
->cmsg_queue_ack
));
1823 } else if (cm
->type
== MSGTYPE_ACK_CONN
) {
1824 list_add_tail(&(cm
->lh
), &(cm
->nb
->cmsg_queue_ackconn
));
1825 } else if (cm
->type
== MSGTYPE_CONNDATA
&&
1826 cm
->msg
.conn_data
.highlatency
!= 0) {
1827 list_add_tail(&(cm
->lh
),
1828 &(cm
->nb
->cmsg_queue_conndata_highlat
));
1829 } else if (cm
->type
== MSGTYPE_CONNDATA
) {
1830 list_add_tail(&(cm
->lh
),
1831 &(cm
->nb
->cmsg_queue_conndata_lowlat
));
1833 list_add_tail(&(cm
->lh
), &(cm
->nb
->cmsg_queue_other
));
1836 BUG_ON(src
== ADDCMSG_SRC_SPLITCONNDATA
&&
1837 cm
->type
!= MSGTYPE_CONNDATA
);
1838 BUG_ON(src
== ADDCMSG_SRC_READD
&&
1839 cm
->type
== MSGTYPE_ACK_CONN
);
1841 if (cm
->type
== MSGTYPE_ACK
) {
1842 list_add(&(cm
->lh
), &(cm
->nb
->cmsg_queue_ack
));
1843 } else if (cm
->type
== MSGTYPE_ACK_CONN
) {
1844 list_add(&(cm
->lh
), &(cm
->nb
->cmsg_queue_ackconn
));
1845 } else if (cm
->type
== MSGTYPE_CONNDATA
&&
1846 cm
->msg
.conn_data
.highlatency
!= 0) {
1848 &(cm
->nb
->cmsg_queue_conndata_highlat
));
1849 } else if (cm
->type
== MSGTYPE_CONNDATA
) {
1851 &(cm
->nb
->cmsg_queue_conndata_lowlat
));
1853 list_add(&(cm
->lh
), &(cm
->nb
->cmsg_queue_other
));
1860 static void enqueue_control_msg(struct control_msg_out
*cm
, int src
)
1863 BUG_ON(cm
->nb
== 0);
1865 if (src
== ADDCMSG_SRC_NEW
)
1866 cm
->time_added
= jiffies
;
1868 spin_lock_bh(&(cm
->nb
->cmsg_lock
));
1870 if (_enqueue_control_msg(cm
, src
) != 0)
1873 if (src
!= ADDCMSG_SRC_READD
&& src
!= ADDCMSG_SRC_RETRANS
)
1874 schedule_controlmsg_timer(cm
->nb
);
1877 spin_unlock_bh(&(cm
->nb
->cmsg_lock
));
1881 void send_pong(struct neighbor
*nb
, __u32 cookie
)
1883 struct control_msg_out
*cm
= _alloc_control_msg(nb
);
1885 if (unlikely(cm
== 0))
1889 cm
->type
= MSGTYPE_PONG
;
1890 cm
->msg
.pong
.cookie
= cookie
;
1891 cm
->msg
.pong
.type
= MSGTYPE_PONG_TIMEENQUEUED
;
1892 cm
->msg
.pong
.time_enqueued
= ktime_get();
1894 enqueue_control_msg(cm
, ADDCMSG_SRC_NEW
);
1897 void send_ack(struct neighbor
*nb
, __u64 seqno
)
1899 struct control_msg_out
*cm
= alloc_control_msg(nb
, ACM_PRIORITY_HIGH
);
1901 if (unlikely(cm
== 0))
1905 cm
->type
= MSGTYPE_ACK
;
1906 cm
->msg
.ack
.seqno
= seqno
;
1908 enqueue_control_msg(cm
, ADDCMSG_SRC_NEW
);
1911 static void set_ooolen_flags(struct control_msg_out
*cm
)
1913 cm
->msg
.ack_conn
.flags
= (cm
->msg
.ack_conn
.flags
&
1914 (~KP_ACK_CONN_FLAGS_OOO
));
1915 cm
->msg
.ack_conn
.flags
= (cm
->msg
.ack_conn
.flags
|
1916 ooolen_to_flags(cm
->msg
.ack_conn
.length
));
1919 /* cmsg_lock must be held */
1920 static void remove_pending_ackconn(struct control_msg_out
*cm
)
1922 cm
->nb
->cmsg_otherlength
-= cm
->length
;
1923 list_del(&(cm
->lh
));
1925 list_del(&(cm
->msg
.ack_conn
.conn_acks
));
1926 kref_put(&(cm
->msg
.ack_conn
.src_in
->ref
), free_conn
);
1927 cm
->msg
.ack_conn
.src_in
= 0;
1930 free_control_msg(cm
);
1933 /* cmsg_lock must be held */
1934 static void recalc_scheduled_ackconn_size(struct control_msg_out
*cm
)
1936 cm
->nb
->cmsg_otherlength
-= cm
->length
;
1937 cm
->length
= 6 + ack_conn_len(cm
->msg
.ack_conn
.flags
);
1938 cm
->nb
->cmsg_otherlength
+= cm
->length
;
1941 /* cmsg_lock must be held */
1942 static int _try_merge_ackconn(struct conn
*src_in_l
,
1943 struct control_msg_out
*fromcm
, struct control_msg_out
*tocm
,
1946 if (ooolen(fromcm
->msg
.ack_conn
.flags
) != 0 &&
1947 ooolen(tocm
->msg
.ack_conn
.flags
) != 0) {
1948 __u64 tocmseqno
= tocm
->msg
.ack_conn
.seqno_ooo
;
1949 __u64 tocmlength
= tocm
->msg
.ack_conn
.length
;
1950 __u64 fromcmseqno
= fromcm
->msg
.ack_conn
.seqno_ooo
;
1951 __u64 fromcmlength
= fromcm
->msg
.ack_conn
.length
;
1953 if (seqno_eq(tocmseqno
, fromcmseqno
)) {
1954 if (fromcmlength
> tocmlength
)
1955 tocm
->msg
.ack_conn
.length
= fromcmlength
;
1956 } else if (seqno_after(fromcmseqno
, tocmseqno
) &&
1957 seqno_before_eq(fromcmseqno
, tocmseqno
+
1959 __u64 len
= seqno_clean(fromcmseqno
+ fromcmlength
-
1961 BUG_ON(len
> U32_MAX
);
1962 tocm
->msg
.ack_conn
.length
= (__u32
) len
;
1963 } else if (seqno_before(fromcmseqno
, tocmseqno
) &&
1964 seqno_after_eq(fromcmseqno
, tocmseqno
)) {
1965 __u64 len
= seqno_clean(tocmseqno
+ tocmlength
-
1967 BUG_ON(len
> U32_MAX
);
1968 tocm
->msg
.ack_conn
.seqno_ooo
= fromcmseqno
;
1969 tocm
->msg
.ack_conn
.length
= (__u32
) len
;
1973 set_ooolen_flags(tocm
);
1976 if ((fromcm
->msg
.ack_conn
.flags
&
1977 KP_ACK_CONN_FLAGS_SEQNO
) != 0) {
1978 if ((tocm
->msg
.ack_conn
.flags
& KP_ACK_CONN_FLAGS_SEQNO
) == 0)
1981 BUG_ON(seqno_eq(fromcm
->msg
.ack_conn
.ack_seqno
,
1982 tocm
->msg
.ack_conn
.ack_seqno
));
1983 if (seqno_after_eq(tocm
->msg
.ack_conn
.ack_seqno
,
1984 fromcm
->msg
.ack_conn
.ack_seqno
)) {
1985 BUG_ON(seqno_after(fromcm
->msg
.ack_conn
.seqno
,
1986 tocm
->msg
.ack_conn
.seqno
));
1990 BUG_ON(seqno_before(fromcm
->msg
.ack_conn
.seqno
,
1991 tocm
->msg
.ack_conn
.seqno
));
1994 tocm
->msg
.ack_conn
.flags
= (tocm
->msg
.ack_conn
.flags
|
1995 KP_ACK_CONN_FLAGS_SEQNO
);
1996 tocm
->msg
.ack_conn
.seqno
= fromcm
->msg
.ack_conn
.seqno
;
1997 tocm
->msg
.ack_conn
.ack_seqno
= fromcm
->msg
.ack_conn
.ack_seqno
;
2000 if ((fromcm
->msg
.ack_conn
.flags
&
2001 KP_ACK_CONN_FLAGS_WINDOW
) != 0)
2002 tocm
->msg
.ack_conn
.flags
= (tocm
->msg
.ack_conn
.flags
|
2003 KP_ACK_CONN_FLAGS_WINDOW
);
2007 if (ooolen(fromcm
->msg
.ack_conn
.flags
) != 0) {
2008 tocm
->msg
.ack_conn
.seqno_ooo
= fromcm
->msg
.ack_conn
.seqno_ooo
;
2009 tocm
->msg
.ack_conn
.length
= fromcm
->msg
.ack_conn
.length
;
2010 set_ooolen_flags(tocm
);
2013 if ((fromcm
->msg
.ack_conn
.flags
& KP_ACK_CONN_FLAGS_PRIORITY
) != 0) {
2014 BUG_ON((tocm
->msg
.ack_conn
.flags
&
2015 KP_ACK_CONN_FLAGS_PRIORITY
) != 0);
2016 tocm
->msg
.ack_conn
.priority_seqno
=
2017 fromcm
->msg
.ack_conn
.priority_seqno
;
2018 tocm
->msg
.ack_conn
.priority
= fromcm
->msg
.ack_conn
.priority
;
2021 recalc_scheduled_ackconn_size(tocm
);
2022 if (from_newack
== 0)
2023 remove_pending_ackconn(fromcm
);
2028 /* cmsg_lock must be held */
2029 static void try_merge_ackconns(struct conn
*src_in_l
,
2030 struct control_msg_out
*cm
)
2032 struct list_head
*currlh
= cm
->msg
.ack_conn
.conn_acks
.next
;
2034 while (currlh
!= &(src_in_l
->source
.in
.acks_pending
)) {
2035 struct control_msg_out
*currcm
= container_of(currlh
,
2036 struct control_msg_out
,
2037 msg
.ack_conn
.conn_acks
);
2038 currlh
= currlh
->next
;
2039 remove_connack_oooflag_ifold(src_in_l
, currcm
);
2040 _try_merge_ackconn(src_in_l
, currcm
, cm
, 0);
2044 static void merge_or_enqueue_ackconn(struct conn
*src_in_l
,
2045 struct control_msg_out
*cm
, int src
)
2047 struct list_head
*currlh
;
2049 BUG_ON(src_in_l
->sourcetype
!= SOURCE_IN
);
2051 spin_lock_bh(&(cm
->nb
->cmsg_lock
));
2053 currlh
= src_in_l
->source
.in
.acks_pending
.next
;
2055 while (currlh
!= &(src_in_l
->source
.in
.acks_pending
)) {
2056 struct control_msg_out
*currcm
= container_of(currlh
,
2057 struct control_msg_out
,
2058 msg
.ack_conn
.conn_acks
);
2060 BUG_ON(currcm
->nb
!= cm
->nb
);
2061 BUG_ON(currcm
->type
!= MSGTYPE_ACK_CONN
);
2062 BUG_ON(cm
->msg
.ack_conn
.src_in
!= src_in_l
);
2063 BUG_ON(currcm
->msg
.ack_conn
.conn_id
!=
2064 cm
->msg
.ack_conn
.conn_id
);
2066 if (_try_merge_ackconn(src_in_l
, cm
, currcm
, 1) == 0) {
2067 try_merge_ackconns(src_in_l
, currcm
);
2068 schedule_controlmsg_timer(currcm
->nb
);
2069 spin_unlock_bh(&(currcm
->nb
->cmsg_lock
));
2072 * when calling free_control_msg here conn may already
2073 * be locked and priority_send_allowed and
2074 * priority_send_allowed should not be reset
2076 cm
->msg
.ack_conn
.flags
= 0;
2077 free_control_msg(cm
);
2081 currlh
= currlh
->next
;
2084 list_add_tail(&(cm
->msg
.ack_conn
.conn_acks
),
2085 &(src_in_l
->source
.in
.acks_pending
));
2087 spin_unlock_bh(&(cm
->nb
->cmsg_lock
));
2089 enqueue_control_msg(cm
, src
);
2092 static int try_update_ackconn_seqno(struct conn
*src_in_l
)
2096 spin_lock_bh(&(src_in_l
->source
.in
.nb
->cmsg_lock
));
2098 if (list_empty(&(src_in_l
->source
.in
.acks_pending
)) == 0) {
2099 struct control_msg_out
*cm
= container_of(
2100 src_in_l
->source
.in
.acks_pending
.next
,
2101 struct control_msg_out
,
2102 msg
.ack_conn
.conn_acks
);
2103 BUG_ON(cm
->nb
!= src_in_l
->source
.in
.nb
);
2104 BUG_ON(cm
->type
!= MSGTYPE_ACK_CONN
);
2105 BUG_ON(cm
->msg
.ack_conn
.src_in
!= src_in_l
);
2106 BUG_ON(cm
->msg
.ack_conn
.conn_id
!=
2107 src_in_l
->reversedir
->target
.out
.conn_id
);
2109 cm
->msg
.ack_conn
.flags
= (cm
->msg
.ack_conn
.flags
|
2110 KP_ACK_CONN_FLAGS_SEQNO
|
2111 KP_ACK_CONN_FLAGS_WINDOW
);
2112 cm
->msg
.ack_conn
.seqno
= src_in_l
->source
.in
.next_seqno
;
2114 src_in_l
->source
.in
.ack_seqno
++;
2115 cm
->msg
.ack_conn
.ack_seqno
= src_in_l
->source
.in
.ack_seqno
;
2117 remove_connack_oooflag_ifold(src_in_l
, cm
);
2118 recalc_scheduled_ackconn_size(cm
);
2120 try_merge_ackconns(src_in_l
, cm
);
2125 spin_unlock_bh(&(src_in_l
->source
.in
.nb
->cmsg_lock
));
2130 void send_ack_conn_ifneeded(struct conn
*src_in_l
, __u64 seqno_ooo
,
2133 struct control_msg_out
*cm
;
2135 BUG_ON(src_in_l
->sourcetype
!= SOURCE_IN
);
2137 BUG_ON(ooo_length
> 0 && seqno_before_eq(seqno_ooo
,
2138 src_in_l
->source
.in
.next_seqno
));
2140 update_windowlimit(src_in_l
);
2142 if (ooo_length
!= 0) {
2143 cm
= alloc_control_msg(src_in_l
->source
.in
.nb
,
2149 if (src_in_l
->source
.in
.inorder_ack_needed
!= 0)
2152 if (seqno_clean(src_in_l
->source
.in
.window_seqnolimit
-
2153 src_in_l
->source
.in
.next_seqno
) < WINDOW_ENCODE_MIN
)
2156 if (seqno_clean(src_in_l
->source
.in
.window_seqnolimit_remote
-
2157 src_in_l
->source
.in
.next_seqno
) >= WINDOW_ENCODE_MIN
&&
2158 seqno_clean(src_in_l
->source
.in
.window_seqnolimit
-
2159 src_in_l
->source
.in
.next_seqno
) * 7 <
2161 src_in_l
->source
.in
.window_seqnolimit_remote
-
2162 src_in_l
->source
.in
.next_seqno
) * 8)
2166 if (try_update_ackconn_seqno(src_in_l
) == 0)
2169 cm
= alloc_control_msg(src_in_l
->source
.in
.nb
, ACM_PRIORITY_MED
);
2171 printk(KERN_ERR
"error allocating inorder ack");
2176 cm
->type
= MSGTYPE_ACK_CONN
;
2177 src_in_l
->source
.in
.ack_seqno
++;
2178 cm
->msg
.ack_conn
.ack_seqno
= src_in_l
->source
.in
.ack_seqno
;
2179 kref_get(&(src_in_l
->ref
));
2180 cm
->msg
.ack_conn
.src_in
= src_in_l
;
2181 cm
->msg
.ack_conn
.conn_id
= src_in_l
->reversedir
->target
.out
.conn_id
;
2182 cm
->msg
.ack_conn
.seqno
= src_in_l
->source
.in
.next_seqno
;
2183 cm
->msg
.ack_conn
.seqno_ooo
= seqno_ooo
;
2184 cm
->msg
.ack_conn
.length
= ooo_length
;
2185 cm
->msg
.ack_conn
.flags
= KP_ACK_CONN_FLAGS_SEQNO
|
2186 KP_ACK_CONN_FLAGS_WINDOW
;
2187 set_ooolen_flags(cm
);
2188 cm
->length
= 6 + ack_conn_len(cm
->msg
.ack_conn
.flags
);
2190 merge_or_enqueue_ackconn(src_in_l
, cm
, ADDCMSG_SRC_NEW
);
2193 src_in_l
->source
.in
.inorder_ack_needed
= 0;
2194 src_in_l
->source
.in
.window_seqnolimit_remote
=
2195 src_in_l
->source
.in
.window_seqnolimit
;
2198 static int try_add_priority(struct conn
*trgt_out_l
, __u8 priority
)
2201 struct conn
*src_in
= trgt_out_l
->reversedir
;
2203 spin_lock_bh(&(trgt_out_l
->target
.out
.nb
->cmsg_lock
));
2205 if (list_empty(&(src_in
->source
.in
.acks_pending
)) == 0) {
2206 struct control_msg_out
*cm
= container_of(
2207 src_in
->source
.in
.acks_pending
.next
,
2208 struct control_msg_out
,
2209 msg
.ack_conn
.conn_acks
);
2210 BUG_ON(cm
->nb
!= trgt_out_l
->target
.out
.nb
);
2211 BUG_ON(cm
->type
!= MSGTYPE_ACK_CONN
);
2212 BUG_ON(cm
->msg
.ack_conn
.src_in
!= trgt_out_l
->reversedir
);
2213 BUG_ON(cm
->msg
.ack_conn
.conn_id
!=
2214 trgt_out_l
->target
.out
.conn_id
);
2216 BUG_ON((cm
->msg
.ack_conn
.flags
& KP_ACK_CONN_FLAGS_PRIORITY
) !=
2218 cm
->msg
.ack_conn
.flags
= (cm
->msg
.ack_conn
.flags
|
2219 KP_ACK_CONN_FLAGS_PRIORITY
);
2220 cm
->msg
.ack_conn
.priority_seqno
=
2221 trgt_out_l
->target
.out
.priority_seqno
;
2222 cm
->msg
.ack_conn
.priority
= priority
;
2223 recalc_scheduled_ackconn_size(cm
);
2228 spin_unlock_bh(&(trgt_out_l
->target
.out
.nb
->cmsg_lock
));
2233 void send_priority(struct conn
*trgt_out_ll
, int force
, __u8 priority
)
2235 struct control_msg_out
*cm
;
2237 if (try_add_priority(trgt_out_ll
, priority
) == 0)
2243 cm
= alloc_control_msg(trgt_out_ll
->target
.out
.nb
, ACM_PRIORITY_LOW
);
2248 cm
->type
= MSGTYPE_ACK_CONN
;
2249 cm
->msg
.ack_conn
.flags
= KP_ACK_CONN_FLAGS_PRIORITY
;
2250 kref_get(&(trgt_out_ll
->reversedir
->ref
));
2251 BUG_ON(trgt_out_ll
->targettype
!= TARGET_OUT
);
2252 cm
->msg
.ack_conn
.src_in
= trgt_out_ll
->reversedir
;
2253 cm
->msg
.ack_conn
.conn_id
= trgt_out_ll
->target
.out
.conn_id
;
2254 cm
->msg
.ack_conn
.priority_seqno
=
2255 trgt_out_ll
->target
.out
.priority_seqno
;
2256 cm
->msg
.ack_conn
.priority
= priority
;
2258 cm
->length
= 6 + ack_conn_len(cm
->msg
.ack_conn
.flags
);
2259 merge_or_enqueue_ackconn(trgt_out_ll
->reversedir
, cm
, ADDCMSG_SRC_NEW
);
2262 trgt_out_ll
->target
.out
.priority_last
= priority
;
2263 trgt_out_ll
->target
.out
.priority_seqno
++;
2264 trgt_out_ll
->target
.out
.priority_send_allowed
= 0;
2267 void free_ack_conns(struct conn
*src_in_lx
)
2270 spin_lock_bh(&(src_in_lx
->source
.in
.nb
->cmsg_lock
));
2271 while (list_empty(&(src_in_lx
->source
.in
.acks_pending
)) == 0) {
2272 struct list_head
*currlh
=
2273 src_in_lx
->source
.in
.acks_pending
.next
;
2274 struct control_msg_out
*currcm
= container_of(currlh
,
2275 struct control_msg_out
,
2276 msg
.ack_conn
.conn_acks
);
2278 remove_pending_ackconn(currcm
);
2282 schedule_controlmsg_timer(src_in_lx
->source
.in
.nb
);
2283 spin_unlock_bh(&(src_in_lx
->source
.in
.nb
->cmsg_lock
));
2286 void send_connect_success(struct control_msg_out
*cm
, __u32 conn_id
,
2287 struct conn
*src_in
)
2289 cm
->type
= MSGTYPE_CONNECT_SUCCESS
;
2290 cm
->msg
.connect_success
.conn_id
= conn_id
;
2291 kref_get(&(src_in
->ref
));
2292 cm
->msg
.connect_success
.src_in
= src_in
;
2294 enqueue_control_msg(cm
, ADDCMSG_SRC_NEW
);
2297 void send_connect_nb(struct control_msg_out
*cm
, __u32 conn_id
, __u64 seqno1
,
2298 __u64 seqno2
, struct conn
*src_in_ll
)
2300 cm
->type
= MSGTYPE_CONNECT
;
2301 cm
->msg
.connect
.conn_id
= conn_id
;
2302 cm
->msg
.connect
.seqno1
= seqno1
;
2303 cm
->msg
.connect
.seqno2
= seqno2
;
2304 kref_get(&(src_in_ll
->ref
));
2305 BUG_ON(src_in_ll
->sourcetype
!= SOURCE_IN
);
2306 cm
->msg
.connect
.src_in
= src_in_ll
;
2308 enqueue_control_msg(cm
, ADDCMSG_SRC_NEW
);
2311 void send_conndata(struct control_msg_out
*cm
, __u32 conn_id
, __u64 seqno
,
2312 char *data_orig
, char *data
, __u32 datalen
,
2313 __u8 snd_delayed_lowbuf
, __u8 flush
, __u8 highlatency
,
2314 struct conn_retrans
*cr
)
2316 cm
->type
= MSGTYPE_CONNDATA
;
2317 cm
->msg
.conn_data
.conn_id
= conn_id
;
2318 cm
->msg
.conn_data
.seqno
= seqno
;
2319 cm
->msg
.conn_data
.data_orig
= data_orig
;
2320 cm
->msg
.conn_data
.data
= data
;
2321 cm
->msg
.conn_data
.datalen
= datalen
;
2322 cm
->msg
.conn_data
.snd_delayed_lowbuf
= snd_delayed_lowbuf
;
2323 cm
->msg
.conn_data
.flush
= flush
;
2324 cm
->msg
.conn_data
.highlatency
= highlatency
;
2325 cm
->msg
.conn_data
.cr
= cr
;
2326 cm
->length
= KP_CONN_DATA_CMDLEN
+ datalen
;
2327 enqueue_control_msg(cm
, ADDCMSG_SRC_NEW
);
2330 int send_reset_conn(struct neighbor
*nb
, __u32 conn_id
, int lowprio
)
2332 struct control_msg_out
*cm
;
2334 if (unlikely(get_neigh_state(nb
) == NEIGHBOR_STATE_KILLED
))
2337 cm
= alloc_control_msg(nb
, lowprio
?
2338 ACM_PRIORITY_LOW
: ACM_PRIORITY_MED
);
2340 if (unlikely(cm
== 0))
2343 cm
->type
= MSGTYPE_RESET_CONN
;
2344 cm
->msg
.reset_conn
.conn_id
= conn_id
;
2347 enqueue_control_msg(cm
, ADDCMSG_SRC_NEW
);
2352 int __init
cor_kgen_init(void)
2354 controlmsg_slab
= kmem_cache_create("cor_controlmsg",
2355 sizeof(struct control_msg_out
), 8, 0, 0);
2356 if (unlikely(controlmsg_slab
== 0))
2359 controlretrans_slab
= kmem_cache_create("cor_controlretransmsg",
2360 sizeof(struct control_retrans
), 8, 0, 0);
2361 if (unlikely(controlretrans_slab
== 0))
2367 MODULE_LICENSE("GPL");