2 * Connection oriented routing
3 * Copyright (C) 2007-2010 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 #include <linux/gfp.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
27 struct kmem_cache
*connretrans_slab
;
30 /* timeout_list and conn_list share a single ref */
32 struct list_head timeout_list
;
33 struct list_head conn_list
;
34 struct htab_entry htab_entry
;
39 unsigned long timeout
;
42 static void free_connretrans(struct kref
*ref
)
44 struct conn_retrans
*cr
= container_of(ref
, struct conn_retrans
, ref
);
45 kmem_cache_free(connretrans_slab
, cr
);
46 kref_put(&(cr
->rconn
->ref
), free_conn
);
49 DEFINE_MUTEX(queues_lock
);
51 struct delayed_work qos_resume_work
;
52 int qos_resume_scheduled
;
55 struct list_head queue_list
;
57 struct net_device
*dev
;
59 struct list_head kpackets_waiting
;
60 struct list_head conn_retrans_waiting
;
61 struct list_head announce_waiting
;
62 struct list_head conns_waiting
;
65 /* Higherst bidder "pays" the credits the second has bid */
66 static int _resume_conns(struct qos_queue
*q
)
68 struct conn
*best
= 0;
70 __u64 secondcredit
= 0;
74 struct list_head
*lh
= q
->conns_waiting
.next
;
76 while (lh
!= &(q
->conns_waiting
)) {
77 struct conn
*rconn
= container_of(lh
, struct conn
,
83 refresh_conn_credits(rconn
, 0, 0);
85 mutex_lock(&(rconn
->rcv_lock
));
87 BUG_ON(rconn
->targettype
!= TARGET_OUT
);
89 if (atomic_read(&(rconn
->isreset
)) != 0) {
90 rconn
->target
.out
.rb
.in_queue
= 0;
91 list_del(&(rconn
->target
.out
.rb
.lh
));
92 mutex_unlock(&(rconn
->rcv_lock
));
93 kref_put(&(rconn
->ref
), free_conn
);
98 BUG_ON(rconn
->data_buf
.read_remaining
== 0);
100 if (may_alloc_control_msg(rconn
->target
.out
.nb
,
101 ACM_PRIORITY_MED
) == 0)
104 if (rconn
->credits
<= 0)
107 credits
= multiply_div(rconn
->credits
, 1LL << 24,
108 rconn
->data_buf
.read_remaining
);
109 mutex_unlock(&(rconn
->rcv_lock
));
111 if (best
== 0 || bestcredit
< credits
) {
112 secondcredit
= bestcredit
;
114 bestcredit
= credits
;
115 } else if (secondcredit
< credits
) {
116 secondcredit
= credits
;
121 return RC_FLUSH_CONN_OUT_OK
;
123 mutex_lock(&(best
->rcv_lock
));
124 rc
= flush_out(best
, 1, (__u32
) (secondcredit
>> 32));
126 if (rc
== RC_FLUSH_CONN_OUT_OK
|| rc
== RC_FLUSH_CONN_OUT_OK_SENT
) {
127 best
->target
.out
.rb
.in_queue
= 0;
128 list_del(&(best
->target
.out
.rb
.lh
));
130 mutex_unlock(&(best
->rcv_lock
));
132 refresh_conn_credits(best
, 0, 0);
133 unreserve_sock_buffer(best
);
135 if (rc
== RC_FLUSH_CONN_OUT_OK_SENT
)
138 if (rc
== RC_FLUSH_CONN_OUT_OK
|| rc
== RC_FLUSH_CONN_OUT_OK_SENT
)
139 kref_put(&(best
->ref
), free_conn
);
144 static int resume_conns(struct qos_queue
*q
)
146 while (list_empty(&(q
->conns_waiting
)) == 0) {
147 int rc
= _resume_conns(q
);
148 if (rc
!= RC_FLUSH_CONN_OUT_OK
&&
149 rc
!= RC_FLUSH_CONN_OUT_OK_SENT
)
155 static int send_retrans(struct neighbor
*nb
, int fromqos
);
157 static int _qos_resume(struct qos_queue
*q
, int caller
)
161 struct list_head
*lh
;
163 if (caller
== QOS_CALLER_KPACKET
)
164 lh
= &(q
->conn_retrans_waiting
);
165 else if (caller
== QOS_CALLER_CONN_RETRANS
)
166 lh
= &(q
->kpackets_waiting
);
167 else if (caller
== QOS_CALLER_ANNOUNCE
)
168 lh
= &(q
->announce_waiting
);
172 while (list_empty(lh
) == 0) {
173 struct list_head
*curr
= lh
->next
;
174 struct resume_block
*rb
= container_of(curr
,
175 struct resume_block
, lh
);
179 if (caller
== QOS_CALLER_KPACKET
) {
180 struct neighbor
*nb
= container_of(rb
, struct neighbor
,
182 rc
= send_messages(nb
, 0, 1);
183 } else if (caller
== QOS_CALLER_CONN_RETRANS
) {
184 struct neighbor
*nb
= container_of(rb
, struct neighbor
,
186 #warning todo do not send if neighbor is stalled
187 rc
= send_retrans(nb
, 1);
188 } else if (caller
== QOS_CALLER_ANNOUNCE
) {
189 struct announce_data
*ann
= container_of(rb
,
190 struct announce_data
, rb
);
191 rc
= send_announce_qos(ann
);
196 if (rc
!= 0 && rb
->in_queue
== 0) {
200 if (caller
== QOS_CALLER_KPACKET
) {
201 kref_put(&(container_of(rb
, struct neighbor
,
202 rb_kp
)->ref
), neighbor_free
);
203 } else if (caller
== QOS_CALLER_CONN_RETRANS
) {
204 kref_put(&(container_of(rb
, struct neighbor
,
205 rb_cr
)->ref
), neighbor_free
);
206 } else if (caller
== QOS_CALLER_ANNOUNCE
) {
207 kref_put(&(container_of(rb
,
208 struct announce_data
, rb
)->ref
),
222 static void qos_resume(struct work_struct
*work
)
224 struct list_head
*curr
;
226 mutex_lock(&(queues_lock
));
229 while (curr
!= (&queues
)) {
230 struct qos_queue
*q
= container_of(curr
,
231 struct qos_queue
, queue_list
);
237 rc
= resume_conns(q
);
239 rc
= _qos_resume(q
, i
);
247 if (i
== 4 && unlikely(q
->dev
== 0)) {
248 list_del(&(q
->queue_list
));
253 qos_resume_scheduled
= 0;
257 schedule_delayed_work(&(qos_resume_work
), 1);
260 mutex_unlock(&(queues_lock
));
263 static struct qos_queue
*get_queue(struct net_device
*dev
)
265 struct list_head
*curr
= queues
.next
;
266 while (curr
!= (&queues
)) {
267 struct qos_queue
*q
= container_of(curr
,
268 struct qos_queue
, queue_list
);
275 int destroy_queue(struct net_device
*dev
)
279 mutex_lock(&(queues_lock
));
284 mutex_unlock(&(queues_lock
));
292 mutex_unlock(&(queues_lock
));
297 int create_queue(struct net_device
*dev
)
299 struct qos_queue
*q
= kmalloc(sizeof(struct qos_queue
), GFP_KERNEL
);
302 printk(KERN_ERR
"cor: unable to allocate memory for device "
303 "queue, not enabling device");
310 INIT_LIST_HEAD(&(q
->kpackets_waiting
));
311 INIT_LIST_HEAD(&(q
->conn_retrans_waiting
));
312 INIT_LIST_HEAD(&(q
->announce_waiting
));
313 INIT_LIST_HEAD(&(q
->conns_waiting
));
315 mutex_lock(&(queues_lock
));
316 list_add(&(q
->queue_list
), &queues
);
317 mutex_unlock(&(queues_lock
));
322 void qos_enqueue(struct net_device
*dev
, struct resume_block
*rb
, int caller
)
326 mutex_lock(&(queues_lock
));
332 if (unlikely(q
== 0))
337 if (caller
== QOS_CALLER_KPACKET
) {
338 list_add(&(rb
->lh
) , &(q
->conn_retrans_waiting
));
339 kref_get(&(container_of(rb
, struct neighbor
, rb_kp
)->ref
));
340 } else if (caller
== QOS_CALLER_CONN_RETRANS
) {
341 list_add(&(rb
->lh
), &(q
->kpackets_waiting
));
342 kref_get(&(container_of(rb
, struct neighbor
, rb_cr
)->ref
));
343 } else if (caller
== QOS_CALLER_ANNOUNCE
) {
344 list_add(&(rb
->lh
), &(q
->announce_waiting
));
345 kref_get(&(container_of(rb
, struct announce_data
, rb
)->ref
));
346 } else if (caller
== QOS_CALLER_CONN
) {
347 struct conn
*rconn
= container_of(rb
, struct conn
,
349 mutex_lock(&(rconn
->rcv_lock
));
350 BUG_ON(rconn
->targettype
!= TARGET_OUT
);
351 list_add(&(rb
->lh
), &(q
->conns_waiting
));
352 kref_get(&(rconn
->ref
));
353 mutex_lock(&(rconn
->rcv_lock
));
358 if (qos_resume_scheduled
== 0) {
359 schedule_delayed_work(&(qos_resume_work
), 1);
360 qos_resume_scheduled
= 1;
364 mutex_unlock(&(queues_lock
));
367 void qos_remove_conn(struct conn
*rconn
)
370 mutex_lock(&(queues_lock
));
371 if (rconn
->targettype
!= TARGET_OUT
)
374 if (rconn
->target
.out
.rb
.in_queue
== 0)
377 rconn
->target
.out
.rb
.in_queue
= 0;
378 list_del(&(rconn
->target
.out
.rb
.lh
));
382 mutex_unlock(&(queues_lock
));
385 kref_put(&(rconn
->ref
), free_conn
);
388 static int may_send_conn_retrans(struct neighbor
*nb
)
393 mutex_lock(&(queues_lock
));
395 q
= get_queue(nb
->dev
);
396 if (unlikely(q
== 0))
399 rc
= (list_empty(&(q
->kpackets_waiting
)));
402 mutex_unlock(&(queues_lock
));
407 static int may_send_conn(struct conn
*rconn
)
412 mutex_lock(&(queues_lock
));
414 q
= get_queue(rconn
->target
.out
.nb
->dev
);
415 if (unlikely(q
== 0))
418 rc
= (list_empty(&(q
->kpackets_waiting
)) &&
419 list_empty(&(q
->conn_retrans_waiting
)) &&
420 list_empty(&(q
->announce_waiting
)) &&
421 list_empty(&(q
->conns_waiting
)));
424 mutex_unlock(&(queues_lock
));
430 struct sk_buff
*create_packet(struct neighbor
*nb
, int size
,
431 gfp_t alloc_flags
, __u32 conn_id
, __u32 seqno
)
436 ret
= alloc_skb(size
+ 9 + LL_ALLOCATED_SPACE(nb
->dev
), alloc_flags
);
437 if (unlikely(0 == ret
))
440 ret
->protocol
= htons(ETH_P_COR
);
443 skb_reserve(ret
, LL_RESERVED_SPACE(nb
->dev
));
444 if(unlikely(dev_hard_header(ret
, nb
->dev
, ETH_P_COR
, nb
->mac
,
445 nb
->dev
->dev_addr
, ret
->len
) < 0))
447 skb_reset_network_header(ret
);
449 dest
= skb_put(ret
, 9);
452 dest
[0] = PACKET_TYPE_DATA
;
455 put_u32(dest
, conn_id
, 1);
457 put_u32(dest
, seqno
, 1);
463 static void set_conn_retrans_timeout(struct conn_retrans
*cr
)
465 struct neighbor
*nb
= cr
->rconn
->target
.out
.nb
;
466 cr
->timeout
= jiffies
+ usecs_to_jiffies(100000 +
467 ((__u32
) atomic_read(&(nb
->latency
))) +
468 ((__u32
) atomic_read(&(nb
->max_remote_cmsg_delay
))));
471 static struct conn_retrans
*readd_conn_retrans(struct conn_retrans
*cr
,
472 struct neighbor
*nb
, __u32 length
, int *dontsend
)
474 unsigned long iflags
;
476 struct conn_retrans
*ret
= 0;
478 spin_lock_irqsave( &(nb
->retrans_lock
), iflags
);
480 if (unlikely(cr
->ackrcvd
)) {
486 if (unlikely(cr
->length
> length
)) {
487 ret
= kmem_cache_alloc(connretrans_slab
, GFP_ATOMIC
);
488 if (unlikely(ret
== 0)) {
489 cr
->timeout
= jiffies
+ 1;
493 memset(ret
, 0, sizeof (struct conn_retrans
));
494 ret
->rconn
= cr
->rconn
;
495 kref_get(&(cr
->rconn
->ref
));
496 ret
->seqno
= cr
->seqno
+ length
;
497 ret
->length
= cr
->length
- length
;
498 kref_init(&(ret
->ref
));
500 list_add(&(ret
->timeout_list
), &(nb
->retrans_list_conn
));
501 list_add(&(ret
->conn_list
), &(cr
->conn_list
));
505 list_del(&(cr
->timeout_list
));
506 list_add_tail(&(cr
->timeout_list
), &(nb
->retrans_list_conn
));
507 set_conn_retrans_timeout(cr
);
509 BUG_ON(cr
->length
!= length
);
513 spin_unlock_irqrestore( &(nb
->retrans_lock
), iflags
);
518 /* rcvlock *must* be held while calling this */
519 void cancel_retrans(struct conn
*rconn
)
521 unsigned long iflags
;
522 struct neighbor
*nb
= rconn
->target
.out
.nb
;
524 spin_lock_irqsave( &(nb
->retrans_lock
), iflags
);
526 while (list_empty(&(rconn
->target
.out
.retrans_list
)) == 0) {
527 struct conn_retrans
*cr
= container_of(
528 rconn
->target
.out
.retrans_list
.next
,
529 struct conn_retrans
, conn_list
);
530 BUG_ON(cr
->rconn
!= rconn
);
532 list_del(&(cr
->timeout_list
));
533 list_del(&(cr
->conn_list
));
535 kref_put(&(cr
->ref
), free_connretrans
);
538 spin_unlock_irqrestore( &(nb
->retrans_lock
), iflags
);
541 static int _send_retrans(struct neighbor
*nb
, struct conn_retrans
*cr
)
543 int targetmss
= mss(nb
);
547 mutex_lock(&(cr
->rconn
->rcv_lock
));
549 BUG_ON(cr
->rconn
->targettype
!= TARGET_OUT
);
550 BUG_ON(cr
->rconn
->target
.out
.nb
!= nb
);
552 kref_get(&(cr
->rconn
->ref
));
554 if (unlikely(atomic_read(&(cr
->rconn
->isreset
)) != 0)) {
555 cancel_retrans(cr
->rconn
);
559 while (cr
->length
>= targetmss
) {
562 struct conn_retrans
*cr2
;
565 if (may_send_conn_retrans(nb
) == 0)
568 skb
= create_packet(nb
, targetmss
, GFP_KERNEL
,
569 cr
->rconn
->target
.out
.conn_id
, cr
->seqno
);
570 if (unlikely(skb
== 0)) {
571 cr
->timeout
= jiffies
+ 1;
575 cr2
= readd_conn_retrans(cr
, nb
, targetmss
, &dontsend
);
576 if (unlikely(unlikely(dontsend
) || unlikely(cr2
== 0 &&
577 unlikely(cr
->length
> targetmss
)))) {
582 dst
= skb_put(skb
, targetmss
);
584 databuf_pullold(cr
->rconn
, cr
->seqno
, dst
, targetmss
);
585 rc
= dev_queue_xmit(skb
);
588 unsigned long iflags
;
590 spin_lock_irqsave( &(nb
->retrans_lock
), iflags
);
591 if (unlikely(cr
->ackrcvd
)) {
594 list_del(&(cr
->timeout_list
));
595 list_add(&(cr
->timeout_list
),
596 &(nb
->retrans_list_conn
));
598 spin_unlock_irqrestore( &(nb
->retrans_lock
), iflags
);
609 if (unlikely(cr
->length
<= 0)) {
612 struct control_msg_out
*cm
;
613 char *buf
= kmalloc(cr
->length
, GFP_KERNEL
);
615 if (unlikely(buf
== 0)) {
616 cr
->timeout
= jiffies
+ 1;
620 cm
= alloc_control_msg(nb
, ACM_PRIORITY_MED
);
621 if (unlikely(cm
== 0)) {
622 cr
->timeout
= jiffies
+ 1;
627 databuf_pullold(cr
->rconn
, cr
->seqno
, buf
, cr
->length
);
629 if (unlikely(readd_conn_retrans(cr
, nb
, cr
->length
, &dontsend
)
633 if (likely(dontsend
== 0)) {
634 send_conndata(cm
, cr
->rconn
->target
.out
.conn_id
,
635 cr
->seqno
, buf
, buf
, cr
->length
);
644 mutex_unlock(&(cr
->rconn
->rcv_lock
));
646 kref_put(&(cr
->rconn
->ref
), free_conn
);
651 static int send_retrans(struct neighbor
*nb
, int fromqos
)
653 unsigned long iflags
;
655 struct conn_retrans
*cr
= 0;
661 spin_lock_irqsave( &(nb
->state_lock
), iflags
);
663 spin_unlock_irqrestore( &(nb
->state_lock
), iflags
);
666 spin_lock_irqsave( &(nb
->retrans_lock
), iflags
);
668 if (list_empty(&(nb
->retrans_list_conn
))) {
669 nb
->retrans_timer_conn_running
= 0;
670 spin_unlock_irqrestore( &(nb
->retrans_lock
), iflags
);
674 cr
= container_of(nb
->retrans_list_conn
.next
,
675 struct conn_retrans
, timeout_list
);
677 BUG_ON(cr
->rconn
->targettype
!= TARGET_OUT
);
679 if (unlikely(unlikely(nbstate
== NEIGHBOR_STATE_KILLED
) ||
680 unlikely(atomic_read(
681 &(cr
->rconn
->isreset
)) != 0))) {
682 list_del(&(cr
->timeout_list
));
683 list_del(&(cr
->conn_list
));
684 spin_unlock_irqrestore( &(nb
->retrans_lock
), iflags
);
686 kref_put(&(cr
->ref
), free_connretrans
);
690 BUG_ON(nb
!= cr
->rconn
->target
.out
.nb
);
692 #warning todo check window limit
694 if (time_after(cr
->timeout
, jiffies
)) {
695 schedule_delayed_work(&(nb
->retrans_timer_conn
),
696 cr
->timeout
- jiffies
);
698 spin_unlock_irqrestore( &(nb
->retrans_lock
), iflags
);
702 kref_get(&(cr
->ref
));
703 spin_unlock_irqrestore( &(nb
->retrans_lock
), iflags
);
704 queuefull
= _send_retrans(nb
, cr
);
705 kref_put(&(cr
->ref
), free_connretrans
);
709 qos_enqueue(nb
->dev
, &(nb
->rb_cr
),
710 QOS_CALLER_CONN_RETRANS
);
715 if (rescheduled
== 0)
716 kref_put(&(nb
->ref
), neighbor_free
);
721 void retransmit_conn_timerfunc(struct work_struct
*work
)
723 struct neighbor
*nb
= container_of(to_delayed_work(work
),
724 struct neighbor
, retrans_timer_conn
);
729 void conn_ack_ooo_rcvd(struct conn
*rconn
, __u32 seqno_ooo
, __u32 length
)
731 unsigned long iflags
;
733 struct list_head
*curr
;
735 if (unlikely(length
== 0))
738 mutex_lock(&(rconn
->rcv_lock
));
740 BUG_ON(rconn
->targettype
!= TARGET_OUT
);
742 nb
= rconn
->target
.out
.nb
;
744 spin_lock_irqsave( &(nb
->retrans_lock
), iflags
);
746 curr
= rconn
->target
.out
.retrans_list
.next
;
748 while (curr
!= &(rconn
->target
.out
.retrans_list
)) {
749 struct conn_retrans
*cr
= container_of(curr
,
750 struct conn_retrans
, conn_list
);
752 if (((__s32
)(cr
->seqno
+ cr
->length
- seqno_ooo
)) > 0)
755 if (((__s32
)(cr
->seqno
+ cr
->length
- seqno_ooo
- length
)) >0) {
756 if (((__s32
)(cr
->seqno
- seqno_ooo
- length
)) < 0) {
757 __u32 newseqno
= seqno_ooo
+ length
;
758 cr
->length
-= (newseqno
- cr
->seqno
);
759 cr
->seqno
= newseqno
;
765 if (((__s32
)(cr
->seqno
- seqno_ooo
)) < 0 &&
766 ((__s32
)(cr
->seqno
+ cr
->length
- seqno_ooo
-
768 __u32 diff
= seqno_ooo
+ length
- cr
->seqno
-
773 list_del(&(cr
->timeout_list
));
774 list_del(&(cr
->conn_list
));
776 kref_put(&(cr
->ref
), free_connretrans
);
783 if (unlikely(list_empty(&(rconn
->target
.out
.retrans_list
))) == 0) {
784 struct conn_retrans
*cr
= container_of(
785 rconn
->target
.out
.retrans_list
.next
,
786 struct conn_retrans
, conn_list
);
787 if (unlikely(((__s32
) (cr
->seqno
-
788 rconn
->target
.out
.seqno_acked
)) > 0))
789 rconn
->target
.out
.seqno_acked
= cr
->seqno
;
792 spin_unlock_irqrestore( &(nb
->retrans_lock
), iflags
);
794 mutex_unlock(&(rconn
->rcv_lock
));
797 void conn_ack_rcvd(struct conn
*rconn
, __u32 seqno
, int setwindow
, __u8 window
)
799 unsigned long iflags
;
802 mutex_lock(&(rconn
->rcv_lock
));
804 BUG_ON(rconn
->targettype
!= TARGET_OUT
);
806 if (unlikely(((__s32
)(seqno
- rconn
->target
.out
.seqno_nextsend
)) > 0) ||
807 ((__s32
)(seqno
- rconn
->target
.out
.seqno_acked
)) < 0)
811 __u32 windowdec
= dec_log_64_11(window
);
812 if (unlikely(seqno
== rconn
->target
.out
.seqno_acked
&&
813 ((__s32
) (seqno
+ windowdec
-
814 rconn
->target
.out
.seqno_windowlimit
)) < 0))
817 rconn
->target
.out
.seqno_windowlimit
= seqno
+ windowdec
;
821 if (seqno
== rconn
->target
.out
.seqno_acked
)
824 nb
= rconn
->target
.out
.nb
;
826 spin_lock_irqsave( &(nb
->retrans_lock
), iflags
);
828 rconn
->target
.out
.seqno_acked
= seqno
;
830 while (list_empty(&(rconn
->target
.out
.retrans_list
)) == 0) {
831 struct conn_retrans
*cr
= container_of(
832 rconn
->target
.out
.retrans_list
.next
,
833 struct conn_retrans
, conn_list
);
835 if (((__s32
)(cr
->seqno
+ cr
->length
- seqno
)) > 0) {
836 if (((__s32
)(cr
->seqno
- seqno
)) < 0) {
837 cr
->length
-= (seqno
- cr
->seqno
);
843 list_del(&(cr
->timeout_list
));
844 list_del(&(cr
->conn_list
));
846 kref_put(&(cr
->ref
), free_connretrans
);
849 spin_unlock_irqrestore( &(nb
->retrans_lock
), iflags
);
850 databuf_ack(rconn
, rconn
->target
.out
.seqno_acked
);
853 mutex_unlock(&(rconn
->rcv_lock
));
858 static void schedule_retransmit_conn(struct conn_retrans
*cr
,
859 struct conn
*rconn
, __u32 seqno
, __u32 len
)
861 unsigned long iflags
;
863 struct neighbor
*nb
= rconn
->target
.out
.nb
;
867 BUG_ON(rconn
->targettype
!= TARGET_OUT
);
869 memset(cr
, 0, sizeof (struct conn_retrans
));
871 kref_get(&(rconn
->ref
));
874 kref_init(&(cr
->ref
));
875 set_conn_retrans_timeout(cr
);
877 spin_lock_irqsave( &(nb
->retrans_lock
), iflags
);
879 first
= unlikely(list_empty(&(nb
->retrans_list_conn
)));
880 list_add_tail(&(cr
->timeout_list
), &(nb
->retrans_list_conn
));
882 list_add_tail(&(cr
->conn_list
), &(rconn
->target
.out
.retrans_list
));
884 if (unlikely(unlikely(first
) &&
885 unlikely(nb
->retrans_timer_conn_running
== 0))) {
886 schedule_delayed_work(&(nb
->retrans_timer_conn
),
887 cr
->timeout
- jiffies
);
888 nb
->retrans_timer_conn_running
= 1;
889 kref_get(&(nb
->ref
));
892 spin_unlock_irqrestore( &(nb
->retrans_lock
), iflags
);
895 static __u32
get_windowlimit(struct conn
*rconn
)
897 __s32 windowlimit
= (__s32
)(rconn
->target
.out
.seqno_windowlimit
-
898 rconn
->target
.out
.seqno_nextsend
);
899 if (unlikely(windowlimit
< 0))
904 #warning todo reset connections which are SOURCE_NONE and are stuck for too long
905 int flush_out(struct conn
*rconn
, int fromqos
, __u32 creditsperbyte
)
907 int targetmss
= mss(rconn
->target
.out
.nb
);
911 BUG_ON(rconn
->targettype
!= TARGET_OUT
);
913 if (unlikely(rconn
->target
.out
.conn_id
== 0))
914 return RC_FLUSH_CONN_OUT_OK
;
916 if (unlikely(atomic_read(&(rconn
->isreset
)) != 0))
917 return RC_FLUSH_CONN_OUT_OK
;
919 if (unlikely(rconn
->sourcetype
== SOURCE_SOCK
&&
920 rconn
->source
.sock
.delay_flush
!= 0))
921 return RC_FLUSH_CONN_OUT_OK
;
923 if (fromqos
== 0 && may_send_conn(rconn
) == 0)
924 return RC_FLUSH_CONN_OUT_CONG
;
926 while (rconn
->data_buf
.read_remaining
>= targetmss
&&
927 get_windowlimit(rconn
) >= targetmss
) {
928 struct conn_retrans
*cr
;
933 if (unlikely(creditsperbyte
* targetmss
>
935 return RC_FLUSH_CONN_OUT_CREDITS
;
937 seqno
= rconn
->target
.out
.seqno_nextsend
;
938 skb
= create_packet(rconn
->target
.out
.nb
, targetmss
, GFP_ATOMIC
,
939 rconn
->target
.out
.conn_id
, seqno
);
940 if (unlikely(skb
== 0))
941 return RC_FLUSH_CONN_OUT_OOM
;
943 cr
= kmem_cache_alloc(connretrans_slab
, GFP_KERNEL
);
944 if (unlikely(cr
== 0)) {
946 return RC_FLUSH_CONN_OUT_OOM
;
949 dst
= skb_put(skb
, targetmss
);
951 databuf_pull(rconn
, dst
, targetmss
);
953 rc
= dev_queue_xmit(skb
);
955 databuf_unpull(rconn
, targetmss
);
956 kmem_cache_free(connretrans_slab
, cr
);
957 return RC_FLUSH_CONN_OUT_CONG
;
960 rconn
->credits
-= creditsperbyte
* targetmss
;
961 rconn
->target
.out
.seqno_nextsend
+= targetmss
;
962 schedule_retransmit_conn(cr
, rconn
, seqno
, targetmss
);
966 if (rconn
->data_buf
.read_remaining
> 0 && (rconn
->tos
== TOS_LATENCY
||
967 rconn
->target
.out
.seqno_nextsend
==
968 rconn
->target
.out
.seqno_acked
)) {
969 struct control_msg_out
*cm
;
970 struct conn_retrans
*cr
;
971 __u32 len
= rconn
->data_buf
.read_remaining
;
972 __s32 windowlimit
= get_windowlimit(rconn
);
975 if (windowlimit
== 0)
978 if (windowlimit
< len
/2 && rconn
->target
.out
.seqno_nextsend
!=
979 rconn
->target
.out
.seqno_acked
)
982 if (len
> windowlimit
)
985 buf
= kmalloc(len
, GFP_KERNEL
);
987 if (unlikely(creditsperbyte
* len
> rconn
->credits
))
988 return RC_FLUSH_CONN_OUT_CREDITS
;
990 if (unlikely(buf
== 0))
991 return RC_FLUSH_CONN_OUT_OOM
;
993 cm
= alloc_control_msg(rconn
->target
.out
.nb
, ACM_PRIORITY_MED
);
994 if (unlikely(cm
== 0)) {
996 return RC_FLUSH_CONN_OUT_OOM
;
999 cr
= kmem_cache_alloc(connretrans_slab
, GFP_KERNEL
);
1000 if (unlikely(cr
== 0)) {
1002 free_control_msg(cm
);
1003 return RC_FLUSH_CONN_OUT_CONG
;
1006 databuf_pull(rconn
, buf
, len
);
1008 seqno
= rconn
->target
.out
.seqno_nextsend
;
1009 rconn
->credits
-= creditsperbyte
* len
;
1010 rconn
->target
.out
.seqno_nextsend
+= len
;
1012 schedule_retransmit_conn(cr
, rconn
, seqno
, len
);
1014 send_conndata(cm
, rconn
->target
.out
.conn_id
, seqno
, buf
, buf
,
1021 return RC_FLUSH_CONN_OUT_OK_SENT
;
1023 return RC_FLUSH_CONN_OUT_OK
;
1026 int __init
cor_snd_init(void)
1028 connretrans_slab
= kmem_cache_create("cor_connretrans",
1029 sizeof(struct conn_retrans
), 8, 0, 0);
1031 if (unlikely(connretrans_slab
== 0))
1034 INIT_DELAYED_WORK(&(qos_resume_work
), qos_resume
);
1035 qos_resume_scheduled
= 0;
1040 MODULE_LICENSE("GPL");