2 * Connection oriented routing
3 * Copyright (C) 2007-2011 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 #include <linux/gfp.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
27 struct kmem_cache
*connretrans_slab
;
30 /* timeout_list and conn_list share a single ref */
32 struct list_head timeout_list
;
33 struct list_head conn_list
;
34 struct htab_entry htab_entry
;
35 struct conn
*trgt_out_o
;
39 unsigned long timeout
;
42 static void free_connretrans(struct kref
*ref
)
44 struct conn_retrans
*cr
= container_of(ref
, struct conn_retrans
, ref
);
45 kmem_cache_free(connretrans_slab
, cr
);
46 kref_put(&(cr
->trgt_out_o
->ref
), free_conn
);
49 DEFINE_MUTEX(queues_lock
);
51 struct delayed_work qos_resume_work
;
52 int qos_resume_scheduled
;
55 struct list_head queue_list
;
57 struct net_device
*dev
;
59 struct list_head kpackets_waiting
;
60 struct list_head conn_retrans_waiting
;
61 struct list_head announce_waiting
;
62 struct list_head conns_waiting
;
65 /* Highest bidder "pays" the credits the second has bid */
66 static int _resume_conns(struct qos_queue
*q
)
68 struct conn
*best
= 0;
70 __u64 secondcredit
= 0;
74 struct list_head
*lh
= q
->conns_waiting
.next
;
76 while (lh
!= &(q
->conns_waiting
)) {
77 struct conn
*trgt_out_o
= container_of(lh
, struct conn
,
83 refresh_conn_credits(trgt_out_o
, 0, 0);
85 mutex_lock(&(trgt_out_o
->rcv_lock
));
87 BUG_ON(trgt_out_o
->targettype
!= TARGET_OUT
);
89 if (atomic_read(&(trgt_out_o
->isreset
)) != 0) {
90 trgt_out_o
->target
.out
.rb
.in_queue
= 0;
91 list_del(&(trgt_out_o
->target
.out
.rb
.lh
));
92 mutex_unlock(&(trgt_out_o
->rcv_lock
));
93 kref_put(&(trgt_out_o
->ref
), free_conn
);
98 BUG_ON(trgt_out_o
->data_buf
.read_remaining
== 0);
100 if (may_alloc_control_msg(trgt_out_o
->target
.out
.nb
,
101 ACM_PRIORITY_LOW
) == 0)
104 if (trgt_out_o
->credits
<= 0)
107 credits
= multiply_div(trgt_out_o
->credits
, 1LL << 24,
108 trgt_out_o
->data_buf
.read_remaining
);
109 mutex_unlock(&(trgt_out_o
->rcv_lock
));
111 if (best
== 0 || bestcredit
< credits
) {
112 secondcredit
= bestcredit
;
114 bestcredit
= credits
;
115 } else if (secondcredit
< credits
) {
116 secondcredit
= credits
;
121 return RC_FLUSH_CONN_OUT_OK
;
123 mutex_lock(&(best
->rcv_lock
));
124 rc
= flush_out(best
, 1, (__u32
) (secondcredit
>> 32));
126 if (rc
== RC_FLUSH_CONN_OUT_OK
|| rc
== RC_FLUSH_CONN_OUT_OK_SENT
) {
127 best
->target
.out
.rb
.in_queue
= 0;
128 list_del(&(best
->target
.out
.rb
.lh
));
130 mutex_unlock(&(best
->rcv_lock
));
132 refresh_conn_credits(best
, 0, 0);
133 unreserve_sock_buffer(best
);
135 if (rc
== RC_FLUSH_CONN_OUT_OK_SENT
)
138 if (rc
== RC_FLUSH_CONN_OUT_OK
|| rc
== RC_FLUSH_CONN_OUT_OK_SENT
)
139 kref_put(&(best
->ref
), free_conn
);
144 static int resume_conns(struct qos_queue
*q
)
146 while (list_empty(&(q
->conns_waiting
)) == 0) {
147 int rc
= _resume_conns(q
);
148 if (rc
!= RC_FLUSH_CONN_OUT_OK
&&
149 rc
!= RC_FLUSH_CONN_OUT_OK_SENT
)
155 static int send_retrans(struct neighbor
*nb
, int fromqos
);
157 static int _qos_resume(struct qos_queue
*q
, int caller
)
161 struct list_head
*lh
;
163 if (caller
== QOS_CALLER_KPACKET
)
164 lh
= &(q
->conn_retrans_waiting
);
165 else if (caller
== QOS_CALLER_CONN_RETRANS
)
166 lh
= &(q
->kpackets_waiting
);
167 else if (caller
== QOS_CALLER_ANNOUNCE
)
168 lh
= &(q
->announce_waiting
);
172 while (list_empty(lh
) == 0) {
173 struct list_head
*curr
= lh
->next
;
174 struct resume_block
*rb
= container_of(curr
,
175 struct resume_block
, lh
);
179 if (caller
== QOS_CALLER_KPACKET
) {
180 struct neighbor
*nb
= container_of(rb
, struct neighbor
,
182 rc
= send_messages(nb
, 0, 1);
183 } else if (caller
== QOS_CALLER_CONN_RETRANS
) {
184 struct neighbor
*nb
= container_of(rb
, struct neighbor
,
186 #warning todo do not send if neighbor is stalled
187 rc
= send_retrans(nb
, 1);
188 } else if (caller
== QOS_CALLER_ANNOUNCE
) {
189 struct announce_data
*ann
= container_of(rb
,
190 struct announce_data
, rb
);
191 rc
= send_announce_qos(ann
);
196 if (rc
!= 0 && rb
->in_queue
== 0) {
200 if (caller
== QOS_CALLER_KPACKET
) {
201 kref_put(&(container_of(rb
, struct neighbor
,
202 rb_kp
)->ref
), neighbor_free
);
203 } else if (caller
== QOS_CALLER_CONN_RETRANS
) {
204 kref_put(&(container_of(rb
, struct neighbor
,
205 rb_cr
)->ref
), neighbor_free
);
206 } else if (caller
== QOS_CALLER_ANNOUNCE
) {
207 kref_put(&(container_of(rb
,
208 struct announce_data
, rb
)->ref
),
222 static void qos_resume(struct work_struct
*work
)
224 struct list_head
*curr
;
226 mutex_lock(&(queues_lock
));
229 while (curr
!= (&queues
)) {
230 struct qos_queue
*q
= container_of(curr
,
231 struct qos_queue
, queue_list
);
237 rc
= resume_conns(q
);
239 rc
= _qos_resume(q
, i
);
247 if (i
== 4 && unlikely(q
->dev
== 0)) {
248 list_del(&(q
->queue_list
));
253 qos_resume_scheduled
= 0;
257 schedule_delayed_work(&(qos_resume_work
), 1);
260 mutex_unlock(&(queues_lock
));
263 static struct qos_queue
*get_queue(struct net_device
*dev
)
265 struct list_head
*curr
= queues
.next
;
266 while (curr
!= (&queues
)) {
267 struct qos_queue
*q
= container_of(curr
,
268 struct qos_queue
, queue_list
);
275 #warning todo content of the queue?
276 int destroy_queue(struct net_device
*dev
)
280 mutex_lock(&(queues_lock
));
285 mutex_unlock(&(queues_lock
));
293 mutex_unlock(&(queues_lock
));
298 int create_queue(struct net_device
*dev
)
300 struct qos_queue
*q
= kmalloc(sizeof(struct qos_queue
), GFP_KERNEL
);
303 printk(KERN_ERR
"cor: unable to allocate memory for device "
304 "queue, not enabling device");
311 INIT_LIST_HEAD(&(q
->kpackets_waiting
));
312 INIT_LIST_HEAD(&(q
->conn_retrans_waiting
));
313 INIT_LIST_HEAD(&(q
->announce_waiting
));
314 INIT_LIST_HEAD(&(q
->conns_waiting
));
316 mutex_lock(&(queues_lock
));
317 list_add(&(q
->queue_list
), &queues
);
318 mutex_unlock(&(queues_lock
));
323 void qos_enqueue(struct net_device
*dev
, struct resume_block
*rb
, int caller
)
327 mutex_lock(&(queues_lock
));
333 if (unlikely(q
== 0))
338 if (caller
== QOS_CALLER_KPACKET
) {
339 list_add(&(rb
->lh
) , &(q
->conn_retrans_waiting
));
340 kref_get(&(container_of(rb
, struct neighbor
, rb_kp
)->ref
));
341 } else if (caller
== QOS_CALLER_CONN_RETRANS
) {
342 list_add(&(rb
->lh
), &(q
->kpackets_waiting
));
343 kref_get(&(container_of(rb
, struct neighbor
, rb_cr
)->ref
));
344 } else if (caller
== QOS_CALLER_ANNOUNCE
) {
345 list_add(&(rb
->lh
), &(q
->announce_waiting
));
346 kref_get(&(container_of(rb
, struct announce_data
, rb
)->ref
));
347 } else if (caller
== QOS_CALLER_CONN
) {
348 struct conn
*trgt_out
= container_of(rb
, struct conn
,
350 mutex_lock(&(trgt_out
->rcv_lock
));
351 #warning todo targettype might have changed
352 BUG_ON(trgt_out
->targettype
!= TARGET_OUT
);
353 list_add(&(rb
->lh
), &(q
->conns_waiting
));
354 kref_get(&(trgt_out
->ref
));
355 mutex_unlock(&(trgt_out
->rcv_lock
));
360 if (qos_resume_scheduled
== 0) {
361 schedule_delayed_work(&(qos_resume_work
), 1);
362 qos_resume_scheduled
= 1;
366 mutex_unlock(&(queues_lock
));
369 void qos_remove_conn(struct conn
*cn
)
372 mutex_lock(&(queues_lock
));
373 mutex_lock(&(cn
->rcv_lock
));
374 if (cn
->targettype
!= TARGET_OUT
)
376 if (cn
->target
.out
.rb
.in_queue
== 0)
379 cn
->target
.out
.rb
.in_queue
= 0;
380 list_del(&(cn
->target
.out
.rb
.lh
));
384 mutex_unlock(&(cn
->rcv_lock
));
385 mutex_unlock(&(queues_lock
));
388 kref_put(&(cn
->ref
), free_conn
);
391 static int may_send_conn_retrans(struct neighbor
*nb
)
396 mutex_lock(&(queues_lock
));
398 q
= get_queue(nb
->dev
);
399 if (unlikely(q
== 0))
402 rc
= (list_empty(&(q
->kpackets_waiting
)));
405 mutex_unlock(&(queues_lock
));
410 static int may_send_conn(struct conn
*trgt_out_l
)
415 #warning todo this may deadlock, use atomic_ops instead, modify get_queue (move pointer to neighbor?)
416 mutex_lock(&(queues_lock
));
418 q
= get_queue(trgt_out_l
->target
.out
.nb
->dev
);
419 if (unlikely(q
== 0))
422 rc
= (list_empty(&(q
->kpackets_waiting
)) &&
423 list_empty(&(q
->conn_retrans_waiting
)) &&
424 list_empty(&(q
->announce_waiting
)) &&
425 list_empty(&(q
->conns_waiting
)));
428 mutex_unlock(&(queues_lock
));
434 struct sk_buff
*create_packet(struct neighbor
*nb
, int size
,
435 gfp_t alloc_flags
, __u32 conn_id
, __u32 seqno
)
440 ret
= alloc_skb(size
+ 9 + LL_ALLOCATED_SPACE(nb
->dev
), alloc_flags
);
441 if (unlikely(0 == ret
))
444 ret
->protocol
= htons(ETH_P_COR
);
447 skb_reserve(ret
, LL_RESERVED_SPACE(nb
->dev
));
448 if(unlikely(dev_hard_header(ret
, nb
->dev
, ETH_P_COR
, nb
->mac
,
449 nb
->dev
->dev_addr
, ret
->len
) < 0))
451 skb_reset_network_header(ret
);
453 dest
= skb_put(ret
, 9);
456 dest
[0] = PACKET_TYPE_DATA
;
459 put_u32(dest
, conn_id
, 1);
461 put_u32(dest
, seqno
, 1);
467 static void set_conn_retrans_timeout(struct conn_retrans
*cr
)
469 struct neighbor
*nb
= cr
->trgt_out_o
->target
.out
.nb
;
470 cr
->timeout
= jiffies
+ usecs_to_jiffies(100000 +
471 ((__u32
) atomic_read(&(nb
->latency
))) +
472 ((__u32
) atomic_read(&(nb
->max_remote_cmsg_delay
))));
475 static struct conn_retrans
*readd_conn_retrans(struct conn_retrans
*cr
,
476 struct neighbor
*nb
, __u32 length
, int *dontsend
)
478 unsigned long iflags
;
480 struct conn_retrans
*ret
= 0;
482 spin_lock_irqsave(&(nb
->retrans_lock
), iflags
);
484 if (unlikely(cr
->ackrcvd
)) {
490 if (unlikely(cr
->length
> length
)) {
491 ret
= kmem_cache_alloc(connretrans_slab
, GFP_ATOMIC
);
492 if (unlikely(ret
== 0)) {
493 cr
->timeout
= jiffies
+ 1;
497 memset(ret
, 0, sizeof (struct conn_retrans
));
498 ret
->trgt_out_o
= cr
->trgt_out_o
;
499 kref_get(&(cr
->trgt_out_o
->ref
));
500 ret
->seqno
= cr
->seqno
+ length
;
501 ret
->length
= cr
->length
- length
;
502 kref_init(&(ret
->ref
));
504 list_add(&(ret
->timeout_list
), &(nb
->retrans_list_conn
));
505 list_add(&(ret
->conn_list
), &(cr
->conn_list
));
509 list_del(&(cr
->timeout_list
));
510 list_add_tail(&(cr
->timeout_list
), &(nb
->retrans_list_conn
));
511 set_conn_retrans_timeout(cr
);
513 BUG_ON(cr
->length
!= length
);
517 spin_unlock_irqrestore(&(nb
->retrans_lock
), iflags
);
522 void cancel_retrans(struct conn
*trgt_out_l
)
524 unsigned long iflags
;
525 struct neighbor
*nb
= trgt_out_l
->target
.out
.nb
;
527 spin_lock_irqsave(&(nb
->retrans_lock
), iflags
);
529 while (list_empty(&(trgt_out_l
->target
.out
.retrans_list
)) == 0) {
530 struct conn_retrans
*cr
= container_of(
531 trgt_out_l
->target
.out
.retrans_list
.next
,
532 struct conn_retrans
, conn_list
);
533 BUG_ON(cr
->trgt_out_o
!= trgt_out_l
);
535 list_del(&(cr
->timeout_list
));
536 list_del(&(cr
->conn_list
));
538 kref_put(&(cr
->ref
), free_connretrans
);
541 spin_unlock_irqrestore(&(nb
->retrans_lock
), iflags
);
544 static int _send_retrans(struct neighbor
*nb
, struct conn_retrans
*cr
)
546 int targetmss
= mss(nb
);
550 mutex_lock(&(cr
->trgt_out_o
->rcv_lock
));
552 BUG_ON(cr
->trgt_out_o
->targettype
!= TARGET_OUT
);
553 BUG_ON(cr
->trgt_out_o
->target
.out
.nb
!= nb
);
555 kref_get(&(cr
->trgt_out_o
->ref
));
557 if (unlikely(atomic_read(&(cr
->trgt_out_o
->isreset
)) != 0)) {
558 cancel_retrans(cr
->trgt_out_o
);
562 while (cr
->length
>= targetmss
) {
565 struct conn_retrans
*cr2
;
568 if (may_send_conn_retrans(nb
) == 0)
571 skb
= create_packet(nb
, targetmss
, GFP_KERNEL
,
572 cr
->trgt_out_o
->target
.out
.conn_id
, cr
->seqno
);
573 if (unlikely(skb
== 0)) {
574 cr
->timeout
= jiffies
+ 1;
578 cr2
= readd_conn_retrans(cr
, nb
, targetmss
, &dontsend
);
579 if (unlikely(unlikely(dontsend
) || unlikely(cr2
== 0 &&
580 unlikely(cr
->length
> targetmss
)))) {
585 dst
= skb_put(skb
, targetmss
);
587 databuf_pullold(cr
->trgt_out_o
, cr
->seqno
, dst
, targetmss
);
588 rc
= dev_queue_xmit(skb
);
591 unsigned long iflags
;
593 spin_lock_irqsave(&(nb
->retrans_lock
), iflags
);
594 if (unlikely(cr
->ackrcvd
)) {
597 list_del(&(cr
->timeout_list
));
598 list_add(&(cr
->timeout_list
),
599 &(nb
->retrans_list_conn
));
601 spin_unlock_irqrestore(&(nb
->retrans_lock
), iflags
);
612 if (unlikely(cr
->length
<= 0)) {
615 struct control_msg_out
*cm
;
616 char *buf
= kmalloc(cr
->length
, GFP_KERNEL
);
618 if (unlikely(buf
== 0)) {
619 cr
->timeout
= jiffies
+ 1;
623 cm
= alloc_control_msg(nb
, ACM_PRIORITY_LOW
);
624 if (unlikely(cm
== 0)) {
625 cr
->timeout
= jiffies
+ 1;
630 databuf_pullold(cr
->trgt_out_o
, cr
->seqno
, buf
, cr
->length
);
632 if (unlikely(readd_conn_retrans(cr
, nb
, cr
->length
, &dontsend
)
636 if (likely(dontsend
== 0)) {
637 send_conndata(cm
, cr
->trgt_out_o
->target
.out
.conn_id
,
638 cr
->seqno
, buf
, buf
, cr
->length
);
647 mutex_unlock(&(cr
->trgt_out_o
->rcv_lock
));
649 kref_put(&(cr
->trgt_out_o
->ref
), free_conn
);
654 static int send_retrans(struct neighbor
*nb
, int fromqos
)
656 unsigned long iflags
;
658 struct conn_retrans
*cr
= 0;
664 spin_lock_irqsave(&(nb
->state_lock
), iflags
);
666 spin_unlock_irqrestore(&(nb
->state_lock
), iflags
);
669 spin_lock_irqsave(&(nb
->retrans_lock
), iflags
);
671 if (list_empty(&(nb
->retrans_list_conn
))) {
672 nb
->retrans_timer_conn_running
= 0;
673 spin_unlock_irqrestore(&(nb
->retrans_lock
), iflags
);
677 cr
= container_of(nb
->retrans_list_conn
.next
,
678 struct conn_retrans
, timeout_list
);
681 BUG_ON(cr
->trgt_out_o
->targettype
!= TARGET_OUT
);
683 if (unlikely(unlikely(nbstate
== NEIGHBOR_STATE_KILLED
) ||
684 unlikely(atomic_read(
685 &(cr
->trgt_out_o
->isreset
)) != 0))) {
686 list_del(&(cr
->timeout_list
));
687 list_del(&(cr
->conn_list
));
688 spin_unlock_irqrestore(&(nb
->retrans_lock
), iflags
);
690 kref_put(&(cr
->ref
), free_connretrans
);
694 BUG_ON(nb
!= cr
->trgt_out_o
->target
.out
.nb
);
696 #warning todo check window limit
698 if (time_after(cr
->timeout
, jiffies
)) {
699 schedule_delayed_work(&(nb
->retrans_timer_conn
),
700 cr
->timeout
- jiffies
);
702 spin_unlock_irqrestore(&(nb
->retrans_lock
), iflags
);
706 kref_get(&(cr
->ref
));
707 spin_unlock_irqrestore(&(nb
->retrans_lock
), iflags
);
708 queuefull
= _send_retrans(nb
, cr
);
709 kref_put(&(cr
->ref
), free_connretrans
);
713 qos_enqueue(nb
->dev
, &(nb
->rb_cr
),
714 QOS_CALLER_CONN_RETRANS
);
719 if (rescheduled
== 0)
720 kref_put(&(nb
->ref
), neighbor_free
);
725 void retransmit_conn_timerfunc(struct work_struct
*work
)
727 struct neighbor
*nb
= container_of(to_delayed_work(work
),
728 struct neighbor
, retrans_timer_conn
);
733 void conn_ack_ooo_rcvd(struct neighbor
*nb
, __u32 conn_id
,
734 struct conn
*trgt_out
, __u32 seqno_ooo
, __u32 length
)
736 unsigned long iflags
;
737 struct list_head
*curr
;
739 if (unlikely(length
== 0))
742 mutex_lock(&(trgt_out
->rcv_lock
));
744 if (unlikely(trgt_out
->targettype
!= TARGET_OUT
))
746 if (unlikely(trgt_out
->target
.out
.nb
!= nb
))
748 if (unlikely(trgt_out
->target
.out
.conn_id
!= conn_id
))
751 spin_lock_irqsave(&(nb
->retrans_lock
), iflags
);
753 curr
= trgt_out
->target
.out
.retrans_list
.next
;
755 while (curr
!= &(trgt_out
->target
.out
.retrans_list
)) {
756 struct conn_retrans
*cr
= container_of(curr
,
757 struct conn_retrans
, conn_list
);
759 if (((__s32
)(cr
->seqno
+ cr
->length
- seqno_ooo
)) > 0)
762 if (((__s32
)(cr
->seqno
+ cr
->length
- seqno_ooo
- length
)) >0) {
763 if (((__s32
)(cr
->seqno
- seqno_ooo
- length
)) < 0) {
764 __u32 newseqno
= seqno_ooo
+ length
;
765 cr
->length
-= (newseqno
- cr
->seqno
);
766 cr
->seqno
= newseqno
;
772 if (((__s32
)(cr
->seqno
- seqno_ooo
)) < 0 &&
773 ((__s32
)(cr
->seqno
+ cr
->length
- seqno_ooo
-
775 __u32 diff
= seqno_ooo
+ length
- cr
->seqno
-
780 list_del(&(cr
->timeout_list
));
781 list_del(&(cr
->conn_list
));
783 kref_put(&(cr
->ref
), free_connretrans
);
790 if (unlikely(list_empty(&(trgt_out
->target
.out
.retrans_list
))) == 0) {
791 struct conn_retrans
*cr
= container_of(
792 trgt_out
->target
.out
.retrans_list
.next
,
793 struct conn_retrans
, conn_list
);
794 if (unlikely(((__s32
) (cr
->seqno
-
795 trgt_out
->target
.out
.seqno_acked
)) > 0))
796 trgt_out
->target
.out
.seqno_acked
= cr
->seqno
;
799 spin_unlock_irqrestore(&(nb
->retrans_lock
), iflags
);
802 mutex_unlock(&(trgt_out
->rcv_lock
));
805 void conn_ack_rcvd(struct neighbor
*nb
, __u32 conn_id
, struct conn
*trgt_out
,
806 __u32 seqno
, int setwindow
, __u8 window
)
809 unsigned long iflags
;
811 mutex_lock(&(trgt_out
->rcv_lock
));
813 #warning todo reset check?
814 if (unlikely(trgt_out
->targettype
!= TARGET_OUT
))
816 if (unlikely(trgt_out
->target
.out
.nb
!= nb
))
818 if (unlikely(trgt_out
->reversedir
->source
.in
.conn_id
!= conn_id
))
821 if (unlikely(((__s32
)(seqno
- trgt_out
->target
.out
.seqno_nextsend
)) > 0)
823 ((__s32
)(seqno
- trgt_out
->target
.out
.seqno_acked
)) < 0)
827 __u32 windowdec
= dec_log_64_11(window
);
828 if (unlikely(seqno
== trgt_out
->target
.out
.seqno_acked
&&
829 ((__s32
) (seqno
+ windowdec
-
830 trgt_out
->target
.out
.seqno_windowlimit
)) <= 0))
833 trgt_out
->target
.out
.seqno_windowlimit
= seqno
+ windowdec
;
838 if (seqno
== trgt_out
->target
.out
.seqno_acked
)
841 spin_lock_irqsave(&(nb
->retrans_lock
), iflags
);
843 trgt_out
->target
.out
.seqno_acked
= seqno
;
845 while (list_empty(&(trgt_out
->target
.out
.retrans_list
)) == 0) {
846 struct conn_retrans
*cr
= container_of(
847 trgt_out
->target
.out
.retrans_list
.next
,
848 struct conn_retrans
, conn_list
);
850 if (((__s32
)(cr
->seqno
+ cr
->length
- seqno
)) > 0) {
851 if (((__s32
)(cr
->seqno
- seqno
)) < 0) {
852 cr
->length
-= (seqno
- cr
->seqno
);
858 list_del(&(cr
->timeout_list
));
859 list_del(&(cr
->conn_list
));
861 kref_put(&(cr
->ref
), free_connretrans
);
864 spin_unlock_irqrestore(&(nb
->retrans_lock
), iflags
);
865 databuf_ack(trgt_out
, trgt_out
->target
.out
.seqno_acked
);
868 mutex_unlock(&(trgt_out
->rcv_lock
));
874 static void schedule_retransmit_conn(struct conn_retrans
*cr
,
875 struct conn
*trgt_out_l
, __u32 seqno
, __u32 len
)
877 unsigned long iflags
;
879 struct neighbor
*nb
= trgt_out_l
->target
.out
.nb
;
883 BUG_ON(trgt_out_l
->targettype
!= TARGET_OUT
);
885 memset(cr
, 0, sizeof (struct conn_retrans
));
886 cr
->trgt_out_o
= trgt_out_l
;
887 kref_get(&(trgt_out_l
->ref
));
890 kref_init(&(cr
->ref
));
891 set_conn_retrans_timeout(cr
);
893 spin_lock_irqsave(&(nb
->retrans_lock
), iflags
);
895 first
= unlikely(list_empty(&(nb
->retrans_list_conn
)));
896 list_add_tail(&(cr
->timeout_list
), &(nb
->retrans_list_conn
));
898 list_add_tail(&(cr
->conn_list
), &(trgt_out_l
->target
.out
.retrans_list
));
900 if (unlikely(unlikely(first
) &&
901 unlikely(nb
->retrans_timer_conn_running
== 0))) {
902 schedule_delayed_work(&(nb
->retrans_timer_conn
),
903 cr
->timeout
- jiffies
);
904 nb
->retrans_timer_conn_running
= 1;
905 kref_get(&(nb
->ref
));
908 spin_unlock_irqrestore(&(nb
->retrans_lock
), iflags
);
911 static __u32
get_windowlimit(struct conn
*trgt_out_l
)
913 __s32 windowlimit
= (__s32
)(trgt_out_l
->target
.out
.seqno_windowlimit
-
914 trgt_out_l
->target
.out
.seqno_nextsend
);
915 if (unlikely(windowlimit
< 0))
920 #warning todo reset connections which are SOURCE_NONE and are stuck for too long
921 int flush_out(struct conn
*trgt_out_l
, int fromqos
, __u32 creditsperbyte
)
927 BUG_ON(trgt_out_l
->targettype
!= TARGET_OUT
);
929 targetmss
= mss(trgt_out_l
->target
.out
.nb
);
931 if (unlikely(trgt_out_l
->target
.out
.conn_id
== 0))
932 return RC_FLUSH_CONN_OUT_OK
;
934 if (unlikely(atomic_read(&(trgt_out_l
->isreset
)) != 0))
935 return RC_FLUSH_CONN_OUT_OK
;
937 if (unlikely(trgt_out_l
->sourcetype
== SOURCE_SOCK
&&
938 trgt_out_l
->source
.sock
.delay_flush
!= 0))
939 return RC_FLUSH_CONN_OUT_OK
;
941 if (fromqos
== 0 && may_send_conn(trgt_out_l
) == 0)
942 return RC_FLUSH_CONN_OUT_CONG
;
944 while (trgt_out_l
->data_buf
.read_remaining
>= targetmss
&&
945 get_windowlimit(trgt_out_l
) >= targetmss
) {
946 struct conn_retrans
*cr
;
951 if (unlikely(creditsperbyte
* targetmss
>
952 trgt_out_l
->credits
))
953 return RC_FLUSH_CONN_OUT_CREDITS
;
955 seqno
= trgt_out_l
->target
.out
.seqno_nextsend
;
956 skb
= create_packet(trgt_out_l
->target
.out
.nb
, targetmss
, GFP_ATOMIC
,
957 trgt_out_l
->target
.out
.conn_id
, seqno
);
958 if (unlikely(skb
== 0))
959 return RC_FLUSH_CONN_OUT_OOM
;
961 cr
= kmem_cache_alloc(connretrans_slab
, GFP_KERNEL
);
962 if (unlikely(cr
== 0)) {
964 return RC_FLUSH_CONN_OUT_OOM
;
967 dst
= skb_put(skb
, targetmss
);
969 databuf_pull(trgt_out_l
, dst
, targetmss
);
971 rc
= dev_queue_xmit(skb
);
973 databuf_unpull(trgt_out_l
, targetmss
);
974 kmem_cache_free(connretrans_slab
, cr
);
975 return RC_FLUSH_CONN_OUT_CONG
;
978 trgt_out_l
->credits
-= creditsperbyte
* targetmss
;
979 trgt_out_l
->target
.out
.seqno_nextsend
+= targetmss
;
980 schedule_retransmit_conn(cr
, trgt_out_l
, seqno
, targetmss
);
984 if (trgt_out_l
->data_buf
.read_remaining
> 0 && (trgt_out_l
->tos
==
985 TOS_LATENCY
|| trgt_out_l
->target
.out
.seqno_nextsend
==
986 trgt_out_l
->target
.out
.seqno_acked
)) {
987 struct control_msg_out
*cm
;
988 struct conn_retrans
*cr
;
989 __u32 len
= trgt_out_l
->data_buf
.read_remaining
;
990 __s32 windowlimit
= get_windowlimit(trgt_out_l
);
993 if (windowlimit
== 0)
996 if (windowlimit
< len
/2 &&
997 trgt_out_l
->target
.out
.seqno_nextsend
!=
998 trgt_out_l
->target
.out
.seqno_acked
)
1001 if (len
> windowlimit
)
1004 buf
= kmalloc(len
, GFP_KERNEL
);
1006 if (unlikely(creditsperbyte
* len
> trgt_out_l
->credits
))
1007 return RC_FLUSH_CONN_OUT_CREDITS
;
1009 if (unlikely(buf
== 0))
1010 return RC_FLUSH_CONN_OUT_OOM
;
1012 cm
= alloc_control_msg(trgt_out_l
->target
.out
.nb
,
1014 if (unlikely(cm
== 0)) {
1016 return RC_FLUSH_CONN_OUT_OOM
;
1019 cr
= kmem_cache_alloc(connretrans_slab
, GFP_KERNEL
);
1020 if (unlikely(cr
== 0)) {
1022 free_control_msg(cm
);
1023 return RC_FLUSH_CONN_OUT_CONG
;
1026 databuf_pull(trgt_out_l
, buf
, len
);
1028 seqno
= trgt_out_l
->target
.out
.seqno_nextsend
;
1029 trgt_out_l
->credits
-= creditsperbyte
* len
;
1030 trgt_out_l
->target
.out
.seqno_nextsend
+= len
;
1032 schedule_retransmit_conn(cr
, trgt_out_l
, seqno
, len
);
1034 send_conndata(cm
, trgt_out_l
->target
.out
.conn_id
, seqno
, buf
,
1041 return RC_FLUSH_CONN_OUT_OK_SENT
;
1043 return RC_FLUSH_CONN_OUT_OK
;
1046 int __init
cor_snd_init(void)
1048 connretrans_slab
= kmem_cache_create("cor_connretrans",
1049 sizeof(struct conn_retrans
), 8, 0, 0);
1051 if (unlikely(connretrans_slab
== 0))
1054 INIT_DELAYED_WORK(&(qos_resume_work
), qos_resume
);
1055 qos_resume_scheduled
= 0;
1060 MODULE_LICENSE("GPL");