set credits on new connections
[cor_2_6_31.git] / net / cor / snd.c
blob629d78d9cc57423a4a7c3c0d544c758609c09587
1 /*
2 * Connection oriented routing
3 * Copyright (C) 2007-2010 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
21 #include <linux/gfp.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
25 #include "cor.h"
27 struct kmem_cache *connretrans_slab;
29 struct conn_retrans {
30 /* timeout_list and conn_list share a single ref */
31 struct kref ref;
32 struct list_head timeout_list;
33 struct list_head conn_list;
34 struct htab_entry htab_entry;
35 struct conn *rconn;
36 __u32 seqno;
37 __u32 length;
38 __u8 ackrcvd;
39 unsigned long timeout;
42 static void free_connretrans(struct kref *ref)
44 struct conn_retrans *cr = container_of(ref, struct conn_retrans, ref);
45 kmem_cache_free(connretrans_slab, cr);
46 kref_put(&(cr->rconn->ref), free_conn);
49 DEFINE_MUTEX(queues_lock);
50 LIST_HEAD(queues);
51 struct delayed_work qos_resume_work;
52 int qos_resume_scheduled;
54 struct qos_queue {
55 struct list_head queue_list;
57 struct net_device *dev;
59 struct list_head kpackets_waiting;
60 struct list_head conn_retrans_waiting;
61 struct list_head announce_waiting;
62 struct list_head conns_waiting;
65 /* Higherst bidder "pays" the credits the second has bid */
66 static int _resume_conns(struct qos_queue *q)
68 struct conn *best = 0;
69 __u64 bestcredit = 0;
70 __u64 secondcredit = 0;
72 int rc;
74 struct list_head *lh = q->conns_waiting.next;
76 while (lh != &(q->conns_waiting)) {
77 struct conn *rconn = container_of(lh, struct conn,
78 target.out.rb.lh);
79 __u64 credits;
81 lh = lh->next;
83 refresh_conn_credits(rconn, 0, 0);
85 mutex_lock(&(rconn->rcv_lock));
87 BUG_ON(rconn->targettype != TARGET_OUT);
89 if (atomic_read(&(rconn->isreset)) != 0) {
90 rconn->target.out.rb.in_queue = 0;
91 list_del(&(rconn->target.out.rb.lh));
92 mutex_unlock(&(rconn->rcv_lock));
93 kref_put(&(rconn->ref), free_conn);
95 continue;
98 BUG_ON(rconn->data_buf.read_remaining == 0);
100 if (may_alloc_control_msg(rconn->target.out.nb,
101 ACM_PRIORITY_MED) == 0)
102 continue;
104 if (rconn->credits <= 0)
105 credits = 0;
106 else
107 credits = multiply_div(rconn->credits, 1LL << 24,
108 rconn->data_buf.read_remaining);
109 mutex_unlock(&(rconn->rcv_lock));
111 if (best == 0 || bestcredit < credits) {
112 secondcredit = bestcredit;
113 best = rconn;
114 bestcredit = credits;
115 } else if (secondcredit < credits) {
116 secondcredit = credits;
120 if (best == 0)
121 return RC_FLUSH_CONN_OUT_OK;
123 mutex_lock(&(best->rcv_lock));
124 rc = flush_out(best, 1, (__u32) (secondcredit >> 32));
126 if (rc == RC_FLUSH_CONN_OUT_OK || rc == RC_FLUSH_CONN_OUT_OK_SENT) {
127 best->target.out.rb.in_queue = 0;
128 list_del(&(best->target.out.rb.lh));
130 mutex_unlock(&(best->rcv_lock));
132 refresh_conn_credits(best, 0, 0);
133 unreserve_sock_buffer(best);
135 if (rc == RC_FLUSH_CONN_OUT_OK_SENT)
136 wake_sender(best);
138 if (rc == RC_FLUSH_CONN_OUT_OK || rc == RC_FLUSH_CONN_OUT_OK_SENT)
139 kref_put(&(best->ref), free_conn);
141 return rc;
144 static int resume_conns(struct qos_queue *q)
146 while (list_empty(&(q->conns_waiting)) == 0) {
147 int rc = _resume_conns(q);
148 if (rc != RC_FLUSH_CONN_OUT_OK &&
149 rc != RC_FLUSH_CONN_OUT_OK_SENT)
150 return 1;
152 return 0;
155 static int send_retrans(struct neighbor *nb, int fromqos);
157 static int _qos_resume(struct qos_queue *q, int caller)
159 int rc = 0;
161 struct list_head *lh;
163 if (caller == QOS_CALLER_KPACKET)
164 lh = &(q->conn_retrans_waiting);
165 else if (caller == QOS_CALLER_CONN_RETRANS)
166 lh = &(q->kpackets_waiting);
167 else if (caller == QOS_CALLER_ANNOUNCE)
168 lh = &(q->announce_waiting);
169 else
170 BUG();
172 while (list_empty(lh) == 0) {
173 struct list_head *curr = lh->next;
174 struct resume_block *rb = container_of(curr,
175 struct resume_block, lh);
176 rb->in_queue = 0;
177 list_del(curr);
179 if (caller == QOS_CALLER_KPACKET) {
180 struct neighbor *nb = container_of(rb, struct neighbor,
181 rb_kp);
182 rc = send_messages(nb, 0, 1);
183 } else if (caller == QOS_CALLER_CONN_RETRANS) {
184 struct neighbor *nb = container_of(rb, struct neighbor,
185 rb_cr);
186 #warning todo do not send if neighbor is stalled
187 rc = send_retrans(nb, 1);
188 } else if (caller == QOS_CALLER_ANNOUNCE) {
189 struct announce_data *ann = container_of(rb,
190 struct announce_data, rb);
191 rc = send_announce_qos(ann);
192 } else {
193 BUG();
196 if (rc != 0 && rb->in_queue == 0) {
197 rb->in_queue = 1;
198 list_add(curr , lh);
199 } else {
200 if (caller == QOS_CALLER_KPACKET) {
201 kref_put(&(container_of(rb, struct neighbor,
202 rb_kp)->ref), neighbor_free);
203 } else if (caller == QOS_CALLER_CONN_RETRANS) {
204 kref_put(&(container_of(rb, struct neighbor,
205 rb_cr)->ref), neighbor_free);
206 } else if (caller == QOS_CALLER_ANNOUNCE) {
207 kref_put(&(container_of(rb,
208 struct announce_data, rb)->ref),
209 announce_data_free);
210 } else {
211 BUG();
216 if (rc != 0)
217 break;
219 return rc;
222 static void qos_resume(struct work_struct *work)
224 struct list_head *curr;
226 mutex_lock(&(queues_lock));
228 curr = queues.next;
229 while (curr != (&queues)) {
230 struct qos_queue *q = container_of(curr,
231 struct qos_queue, queue_list);
232 int i;
234 for (i=0;i<4;i++) {
235 int rc;
236 if (i == 3)
237 rc = resume_conns(q);
238 else
239 rc = _qos_resume(q, i);
241 if (rc != 0)
242 goto congested;
245 curr = curr->next;
247 if (i == 4 && unlikely(q->dev == 0)) {
248 list_del(&(q->queue_list));
249 kfree(q);
253 qos_resume_scheduled = 0;
255 if (0) {
256 congested:
257 schedule_delayed_work(&(qos_resume_work), 1);
260 mutex_unlock(&(queues_lock));
263 static struct qos_queue *get_queue(struct net_device *dev)
265 struct list_head *curr = queues.next;
266 while (curr != (&queues)) {
267 struct qos_queue *q = container_of(curr,
268 struct qos_queue, queue_list);
269 if (q->dev == dev)
270 return q;
272 return 0;
275 int destroy_queue(struct net_device *dev)
277 struct qos_queue *q;
279 mutex_lock(&(queues_lock));
281 q = get_queue(dev);
283 if (q == 0) {
284 mutex_unlock(&(queues_lock));
285 return 1;
288 q->dev = 0;
290 dev_put(dev);
292 mutex_unlock(&(queues_lock));
294 return 0;
297 int create_queue(struct net_device *dev)
299 struct qos_queue *q = kmalloc(sizeof(struct qos_queue), GFP_KERNEL);
301 if (q == 0) {
302 printk(KERN_ERR "cor: unable to allocate memory for device "
303 "queue, not enabling device");
304 return 1;
307 q->dev = dev;
308 dev_hold(dev);
310 INIT_LIST_HEAD(&(q->kpackets_waiting));
311 INIT_LIST_HEAD(&(q->conn_retrans_waiting));
312 INIT_LIST_HEAD(&(q->announce_waiting));
313 INIT_LIST_HEAD(&(q->conns_waiting));
315 mutex_lock(&(queues_lock));
316 list_add(&(q->queue_list), &queues);
317 mutex_unlock(&(queues_lock));
319 return 0;
322 void qos_enqueue(struct net_device *dev, struct resume_block *rb, int caller)
324 struct qos_queue *q;
326 mutex_lock(&(queues_lock));
328 if (rb->in_queue)
329 goto out;
331 q = get_queue(dev);
332 if (unlikely(q == 0))
333 goto out;
335 rb->in_queue = 1;
337 if (caller == QOS_CALLER_KPACKET) {
338 list_add(&(rb->lh) , &(q->conn_retrans_waiting));
339 kref_get(&(container_of(rb, struct neighbor, rb_kp)->ref));
340 } else if (caller == QOS_CALLER_CONN_RETRANS) {
341 list_add(&(rb->lh), &(q->kpackets_waiting));
342 kref_get(&(container_of(rb, struct neighbor, rb_cr)->ref));
343 } else if (caller == QOS_CALLER_ANNOUNCE) {
344 list_add(&(rb->lh), &(q->announce_waiting));
345 kref_get(&(container_of(rb, struct announce_data, rb)->ref));
346 } else if (caller == QOS_CALLER_CONN) {
347 struct conn *rconn = container_of(rb, struct conn,
348 target.out.rb);
349 mutex_lock(&(rconn->rcv_lock));
350 BUG_ON(rconn->targettype != TARGET_OUT);
351 list_add(&(rb->lh), &(q->conns_waiting));
352 kref_get(&(rconn->ref));
353 mutex_lock(&(rconn->rcv_lock));
354 } else {
355 BUG();
358 if (qos_resume_scheduled == 0) {
359 schedule_delayed_work(&(qos_resume_work), 1);
360 qos_resume_scheduled = 1;
363 out:
364 mutex_unlock(&(queues_lock));
367 void qos_remove_conn(struct conn *rconn)
369 int kref = 0;
370 mutex_lock(&(queues_lock));
371 if (rconn->targettype != TARGET_OUT)
372 goto out;
374 if (rconn->target.out.rb.in_queue == 0)
375 goto out;
377 rconn->target.out.rb.in_queue = 0;
378 list_del(&(rconn->target.out.rb.lh));
379 kref = 1;
381 out:
382 mutex_unlock(&(queues_lock));
384 if (kref)
385 kref_put(&(rconn->ref), free_conn);
388 static int may_send_conn_retrans(struct neighbor *nb)
390 struct qos_queue *q;
391 int rc = 0;
393 mutex_lock(&(queues_lock));
395 q = get_queue(nb->dev);
396 if (unlikely(q == 0))
397 goto out;
399 rc = (list_empty(&(q->kpackets_waiting)));
401 out:
402 mutex_unlock(&(queues_lock));
404 return rc;
407 static int may_send_conn(struct conn *rconn)
409 struct qos_queue *q;
410 int rc = 0;
412 mutex_lock(&(queues_lock));
414 q = get_queue(rconn->target.out.nb->dev);
415 if (unlikely(q == 0))
416 goto out;
418 rc = (list_empty(&(q->kpackets_waiting)) &&
419 list_empty(&(q->conn_retrans_waiting)) &&
420 list_empty(&(q->announce_waiting)) &&
421 list_empty(&(q->conns_waiting)));
423 out:
424 mutex_unlock(&(queues_lock));
426 return rc;
430 struct sk_buff *create_packet(struct neighbor *nb, int size,
431 gfp_t alloc_flags, __u32 conn_id, __u32 seqno)
433 struct sk_buff *ret;
434 char *dest;
436 ret = alloc_skb(size + 9 + LL_ALLOCATED_SPACE(nb->dev), alloc_flags);
437 if (unlikely(0 == ret))
438 return 0;
440 ret->protocol = htons(ETH_P_COR);
441 ret->dev = nb->dev;
443 skb_reserve(ret, LL_RESERVED_SPACE(nb->dev));
444 if(unlikely(dev_hard_header(ret, nb->dev, ETH_P_COR, nb->mac,
445 nb->dev->dev_addr, ret->len) < 0))
446 return 0;
447 skb_reset_network_header(ret);
449 dest = skb_put(ret, 9);
450 BUG_ON(0 == dest);
452 dest[0] = PACKET_TYPE_DATA;
453 dest += 1;
455 put_u32(dest, conn_id, 1);
456 dest += 4;
457 put_u32(dest, seqno, 1);
458 dest += 4;
460 return ret;
463 static void set_conn_retrans_timeout(struct conn_retrans *cr)
465 struct neighbor *nb = cr->rconn->target.out.nb;
466 cr->timeout = jiffies + usecs_to_jiffies(100000 +
467 ((__u32) atomic_read(&(nb->latency))) +
468 ((__u32) atomic_read(&(nb->max_remote_cmsg_delay))));
471 static struct conn_retrans *readd_conn_retrans(struct conn_retrans *cr,
472 struct neighbor *nb, __u32 length, int *dontsend)
474 unsigned long iflags;
476 struct conn_retrans *ret = 0;
478 spin_lock_irqsave( &(nb->retrans_lock), iflags );
480 if (unlikely(cr->ackrcvd)) {
481 *dontsend = 1;
482 goto out;
483 } else
484 *dontsend = 0;
486 if (unlikely(cr->length > length)) {
487 ret = kmem_cache_alloc(connretrans_slab, GFP_ATOMIC);
488 if (unlikely(ret == 0)) {
489 cr->timeout = jiffies + 1;
490 goto out;
493 memset(ret, 0, sizeof (struct conn_retrans));
494 ret->rconn = cr->rconn;
495 kref_get(&(cr->rconn->ref));
496 ret->seqno = cr->seqno + length;
497 ret->length = cr->length - length;
498 kref_init(&(ret->ref));
500 list_add(&(ret->timeout_list), &(nb->retrans_list_conn));
501 list_add(&(ret->conn_list), &(cr->conn_list));
503 cr->length = length;
504 } else {
505 list_del(&(cr->timeout_list));
506 list_add_tail(&(cr->timeout_list), &(nb->retrans_list_conn));
507 set_conn_retrans_timeout(cr);
509 BUG_ON(cr->length != length);
512 out:
513 spin_unlock_irqrestore( &(nb->retrans_lock), iflags );
515 return ret;
518 /* rcvlock *must* be held while calling this */
519 void cancel_retrans(struct conn *rconn)
521 unsigned long iflags;
522 struct neighbor *nb = rconn->target.out.nb;
524 spin_lock_irqsave( &(nb->retrans_lock), iflags );
526 while (list_empty(&(rconn->target.out.retrans_list)) == 0) {
527 struct conn_retrans *cr = container_of(
528 rconn->target.out.retrans_list.next,
529 struct conn_retrans, conn_list);
530 BUG_ON(cr->rconn != rconn);
532 list_del(&(cr->timeout_list));
533 list_del(&(cr->conn_list));
534 cr->ackrcvd = 1;
535 kref_put(&(cr->ref), free_connretrans);
538 spin_unlock_irqrestore( &(nb->retrans_lock), iflags );
541 static int _send_retrans(struct neighbor *nb, struct conn_retrans *cr)
543 int targetmss = mss(nb);
544 int dontsend;
545 int queuefull = 0;
547 mutex_lock(&(cr->rconn->rcv_lock));
549 BUG_ON(cr->rconn->targettype != TARGET_OUT);
550 BUG_ON(cr->rconn->target.out.nb != nb);
552 kref_get(&(cr->rconn->ref));
554 if (unlikely(atomic_read(&(cr->rconn->isreset)) != 0)) {
555 cancel_retrans(cr->rconn);
556 goto out;
559 while (cr->length >= targetmss) {
560 struct sk_buff *skb;
561 char *dst;
562 struct conn_retrans *cr2;
563 int rc;
565 if (may_send_conn_retrans(nb) == 0)
566 goto qos_enqueue;
568 skb = create_packet(nb, targetmss, GFP_KERNEL,
569 cr->rconn->target.out.conn_id, cr->seqno);
570 if (unlikely(skb == 0)) {
571 cr->timeout = jiffies + 1;
572 goto out;
575 cr2 = readd_conn_retrans(cr, nb, targetmss, &dontsend);
576 if (unlikely(unlikely(dontsend) || unlikely(cr2 == 0 &&
577 unlikely(cr->length > targetmss)))) {
578 kfree_skb(skb);
579 goto out;
582 dst = skb_put(skb, targetmss);
584 databuf_pullold(cr->rconn, cr->seqno, dst, targetmss);
585 rc = dev_queue_xmit(skb);
587 if (rc != 0) {
588 unsigned long iflags;
590 spin_lock_irqsave( &(nb->retrans_lock), iflags );
591 if (unlikely(cr->ackrcvd)) {
592 dontsend = 1;
593 } else {
594 list_del(&(cr->timeout_list));
595 list_add(&(cr->timeout_list),
596 &(nb->retrans_list_conn));
598 spin_unlock_irqrestore( &(nb->retrans_lock), iflags );
599 if (dontsend == 0)
600 goto qos_enqueue;
603 cr = cr2;
605 if (likely(cr == 0))
606 goto out;
609 if (unlikely(cr->length <= 0)) {
610 BUG();
611 } else {
612 struct control_msg_out *cm;
613 char *buf = kmalloc(cr->length, GFP_KERNEL);
615 if (unlikely(buf == 0)) {
616 cr->timeout = jiffies + 1;
617 goto out;
620 cm = alloc_control_msg(nb, ACM_PRIORITY_MED);
621 if (unlikely(cm == 0)) {
622 cr->timeout = jiffies + 1;
623 kfree(buf);
624 goto out;
627 databuf_pullold(cr->rconn, cr->seqno, buf, cr->length);
629 if (unlikely(readd_conn_retrans(cr, nb, cr->length, &dontsend)
630 != 0))
631 BUG();
633 if (likely(dontsend == 0)) {
634 send_conndata(cm, cr->rconn->target.out.conn_id,
635 cr->seqno, buf, buf, cr->length);
639 if (0) {
640 qos_enqueue:
641 queuefull = 1;
643 out:
644 mutex_unlock(&(cr->rconn->rcv_lock));
646 kref_put(&(cr->rconn->ref), free_conn);
648 return queuefull;
651 static int send_retrans(struct neighbor *nb, int fromqos)
653 unsigned long iflags;
655 struct conn_retrans *cr = 0;
657 int nbstate;
658 int rescheduled = 0;
659 int queuefull = 0;
661 spin_lock_irqsave( &(nb->state_lock), iflags );
662 nbstate = nb->state;
663 spin_unlock_irqrestore( &(nb->state_lock), iflags );
665 while (1) {
666 spin_lock_irqsave( &(nb->retrans_lock), iflags );
668 if (list_empty(&(nb->retrans_list_conn))) {
669 nb->retrans_timer_conn_running = 0;
670 spin_unlock_irqrestore( &(nb->retrans_lock), iflags );
671 break;
674 cr = container_of(nb->retrans_list_conn.next,
675 struct conn_retrans, timeout_list);
677 BUG_ON(cr->rconn->targettype != TARGET_OUT);
679 if (unlikely(unlikely(nbstate == NEIGHBOR_STATE_KILLED) ||
680 unlikely(atomic_read(
681 &(cr->rconn->isreset)) != 0))) {
682 list_del(&(cr->timeout_list));
683 list_del(&(cr->conn_list));
684 spin_unlock_irqrestore( &(nb->retrans_lock), iflags );
686 kref_put(&(cr->ref), free_connretrans);
687 continue;
690 BUG_ON(nb != cr->rconn->target.out.nb);
692 #warning todo check window limit
694 if (time_after(cr->timeout, jiffies)) {
695 schedule_delayed_work(&(nb->retrans_timer_conn),
696 cr->timeout - jiffies);
697 rescheduled = 1;
698 spin_unlock_irqrestore( &(nb->retrans_lock), iflags );
699 break;
702 kref_get(&(cr->ref));
703 spin_unlock_irqrestore( &(nb->retrans_lock), iflags );
704 queuefull = _send_retrans(nb, cr);
705 kref_put(&(cr->ref), free_connretrans);
706 if (queuefull) {
707 rescheduled = 1;
708 if (fromqos == 0)
709 qos_enqueue(nb->dev, &(nb->rb_cr),
710 QOS_CALLER_CONN_RETRANS);
711 break;
715 if (rescheduled == 0)
716 kref_put(&(nb->ref), neighbor_free);
718 return queuefull;
721 void retransmit_conn_timerfunc(struct work_struct *work)
723 struct neighbor *nb = container_of(to_delayed_work(work),
724 struct neighbor, retrans_timer_conn);
726 send_retrans(nb, 0);
729 void conn_ack_ooo_rcvd(struct conn *rconn, __u32 seqno_ooo, __u32 length)
731 unsigned long iflags;
732 struct neighbor *nb;
733 struct list_head *curr;
735 if (unlikely(length == 0))
736 return;
738 mutex_lock(&(rconn->rcv_lock));
740 BUG_ON(rconn->targettype != TARGET_OUT);
742 nb = rconn->target.out.nb;
744 spin_lock_irqsave( &(nb->retrans_lock), iflags );
746 curr = rconn->target.out.retrans_list.next;
748 while (curr != &(rconn->target.out.retrans_list)) {
749 struct conn_retrans *cr = container_of(curr,
750 struct conn_retrans, conn_list);
752 if (((__s32)(cr->seqno + cr->length - seqno_ooo)) > 0)
753 goto cont;
755 if (((__s32)(cr->seqno + cr->length - seqno_ooo - length)) >0) {
756 if (((__s32)(cr->seqno - seqno_ooo - length)) < 0) {
757 __u32 newseqno = seqno_ooo + length;
758 cr->length -= (newseqno - cr->seqno);
759 cr->seqno = newseqno;
762 break;
765 if (((__s32)(cr->seqno - seqno_ooo)) < 0 &&
766 ((__s32)(cr->seqno + cr->length - seqno_ooo -
767 length)) <= 0) {
768 __u32 diff = seqno_ooo + length - cr->seqno -
769 cr->length;
770 cr->seqno += diff;
771 cr->length -= diff;
772 } else {
773 list_del(&(cr->timeout_list));
774 list_del(&(cr->conn_list));
775 cr->ackrcvd = 1;
776 kref_put(&(cr->ref), free_connretrans);
779 cont:
780 curr = curr->next;
783 if (unlikely(list_empty(&(rconn->target.out.retrans_list))) == 0) {
784 struct conn_retrans *cr = container_of(
785 rconn->target.out.retrans_list.next,
786 struct conn_retrans, conn_list);
787 if (unlikely(((__s32) (cr->seqno -
788 rconn->target.out.seqno_acked)) > 0))
789 rconn->target.out.seqno_acked = cr->seqno;
792 spin_unlock_irqrestore( &(nb->retrans_lock), iflags );
794 mutex_unlock(&(rconn->rcv_lock));
797 void conn_ack_rcvd(struct conn *rconn, __u32 seqno, int setwindow, __u8 window)
799 unsigned long iflags;
800 struct neighbor *nb;
802 mutex_lock(&(rconn->rcv_lock));
804 BUG_ON(rconn->targettype != TARGET_OUT);
806 if (unlikely(((__s32)(seqno - rconn->target.out.seqno_nextsend)) > 0) ||
807 ((__s32)(seqno - rconn->target.out.seqno_acked)) < 0)
808 goto out;
810 if (setwindow) {
811 __u32 windowdec = dec_log_64_11(window);
812 if (unlikely(seqno == rconn->target.out.seqno_acked &&
813 ((__s32) (seqno + windowdec -
814 rconn->target.out.seqno_windowlimit )) < 0))
815 goto skipwindow;
817 rconn->target.out.seqno_windowlimit = seqno + windowdec;
820 skipwindow:
821 if (seqno == rconn->target.out.seqno_acked)
822 goto out;
824 nb = rconn->target.out.nb;
826 spin_lock_irqsave( &(nb->retrans_lock), iflags );
828 rconn->target.out.seqno_acked = seqno;
830 while (list_empty(&(rconn->target.out.retrans_list)) == 0) {
831 struct conn_retrans *cr = container_of(
832 rconn->target.out.retrans_list.next,
833 struct conn_retrans, conn_list);
835 if (((__s32)(cr->seqno + cr->length - seqno)) > 0) {
836 if (((__s32)(cr->seqno - seqno)) < 0) {
837 cr->length -= (seqno - cr->seqno);
838 cr->seqno = seqno;
840 break;
843 list_del(&(cr->timeout_list));
844 list_del(&(cr->conn_list));
845 cr->ackrcvd = 1;
846 kref_put(&(cr->ref), free_connretrans);
849 spin_unlock_irqrestore( &(nb->retrans_lock), iflags );
850 databuf_ack(rconn, rconn->target.out.seqno_acked);
852 out:
853 mutex_unlock(&(rconn->rcv_lock));
855 flush_buf(rconn);
858 static void schedule_retransmit_conn(struct conn_retrans *cr,
859 struct conn *rconn, __u32 seqno, __u32 len)
861 unsigned long iflags;
863 struct neighbor *nb = rconn->target.out.nb;
865 int first;
867 BUG_ON(rconn->targettype != TARGET_OUT);
869 memset(cr, 0, sizeof (struct conn_retrans));
870 cr->rconn = rconn;
871 kref_get(&(rconn->ref));
872 cr->seqno = seqno;
873 cr->length = len;
874 kref_init(&(cr->ref));
875 set_conn_retrans_timeout(cr);
877 spin_lock_irqsave( &(nb->retrans_lock), iflags );
879 first = unlikely(list_empty(&(nb->retrans_list_conn)));
880 list_add_tail(&(cr->timeout_list), &(nb->retrans_list_conn));
882 list_add_tail(&(cr->conn_list), &(rconn->target.out.retrans_list));
884 if (unlikely(unlikely(first) &&
885 unlikely(nb->retrans_timer_conn_running == 0))) {
886 schedule_delayed_work(&(nb->retrans_timer_conn),
887 cr->timeout - jiffies);
888 nb->retrans_timer_conn_running = 1;
889 kref_get(&(nb->ref));
892 spin_unlock_irqrestore( &(nb->retrans_lock), iflags );
895 static __u32 get_windowlimit(struct conn *rconn)
897 __s32 windowlimit = (__s32)(rconn->target.out.seqno_windowlimit -
898 rconn->target.out.seqno_nextsend);
899 if (unlikely(windowlimit < 0))
900 return 0;
901 return windowlimit;
904 #warning todo reset connections which are SOURCE_NONE and are stuck for too long
905 int flush_out(struct conn *rconn, int fromqos, __u32 creditsperbyte)
907 int targetmss = mss(rconn->target.out.nb);
908 __u32 seqno;
909 int sent = 0;
911 BUG_ON(rconn->targettype != TARGET_OUT);
913 if (unlikely(rconn->target.out.conn_id == 0))
914 return RC_FLUSH_CONN_OUT_OK;
916 if (unlikely(atomic_read(&(rconn->isreset)) != 0))
917 return RC_FLUSH_CONN_OUT_OK;
919 if (unlikely(rconn->sourcetype == SOURCE_SOCK &&
920 rconn->source.sock.delay_flush != 0))
921 return RC_FLUSH_CONN_OUT_OK;
923 if (fromqos == 0 && may_send_conn(rconn) == 0)
924 return RC_FLUSH_CONN_OUT_CONG;
926 while (rconn->data_buf.read_remaining >= targetmss &&
927 get_windowlimit(rconn) >= targetmss) {
928 struct conn_retrans *cr;
929 struct sk_buff *skb;
930 char *dst;
931 int rc;
933 if (unlikely(creditsperbyte * targetmss >
934 rconn->credits))
935 return RC_FLUSH_CONN_OUT_CREDITS;
937 seqno = rconn->target.out.seqno_nextsend;
938 skb = create_packet(rconn->target.out.nb, targetmss, GFP_ATOMIC,
939 rconn->target.out.conn_id, seqno);
940 if (unlikely(skb == 0))
941 return RC_FLUSH_CONN_OUT_OOM;
943 cr = kmem_cache_alloc(connretrans_slab, GFP_KERNEL);
944 if (unlikely(cr == 0)) {
945 kfree_skb(skb);
946 return RC_FLUSH_CONN_OUT_OOM;
949 dst = skb_put(skb, targetmss);
951 databuf_pull(rconn, dst, targetmss);
953 rc = dev_queue_xmit(skb);
954 if (rc != 0) {
955 databuf_unpull(rconn, targetmss);
956 kmem_cache_free(connretrans_slab, cr);
957 return RC_FLUSH_CONN_OUT_CONG;
960 rconn->credits -= creditsperbyte * targetmss;
961 rconn->target.out.seqno_nextsend += targetmss;
962 schedule_retransmit_conn(cr, rconn, seqno, targetmss);
963 sent = 1;
966 if (rconn->data_buf.read_remaining > 0 && (rconn->tos == TOS_LATENCY ||
967 rconn->target.out.seqno_nextsend ==
968 rconn->target.out.seqno_acked)) {
969 struct control_msg_out *cm;
970 struct conn_retrans *cr;
971 __u32 len = rconn->data_buf.read_remaining;
972 __s32 windowlimit = get_windowlimit(rconn);
973 char *buf;
975 if (windowlimit == 0)
976 goto out;
978 if (windowlimit < len/2 && rconn->target.out.seqno_nextsend !=
979 rconn->target.out.seqno_acked)
980 goto out;
982 if (len > windowlimit)
983 len = windowlimit;
985 buf = kmalloc(len, GFP_KERNEL);
987 if (unlikely(creditsperbyte * len > rconn->credits))
988 return RC_FLUSH_CONN_OUT_CREDITS;
990 if (unlikely(buf == 0))
991 return RC_FLUSH_CONN_OUT_OOM;
993 cm = alloc_control_msg(rconn->target.out.nb, ACM_PRIORITY_MED);
994 if (unlikely(cm == 0)) {
995 kfree(buf);
996 return RC_FLUSH_CONN_OUT_OOM;
999 cr = kmem_cache_alloc(connretrans_slab, GFP_KERNEL);
1000 if (unlikely(cr == 0)) {
1001 kfree(buf);
1002 free_control_msg(cm);
1003 return RC_FLUSH_CONN_OUT_CONG;
1006 databuf_pull(rconn, buf, len);
1008 seqno = rconn->target.out.seqno_nextsend;
1009 rconn->credits -= creditsperbyte * len;
1010 rconn->target.out.seqno_nextsend += len;
1012 schedule_retransmit_conn(cr, rconn, seqno, len);
1014 send_conndata(cm, rconn->target.out.conn_id, seqno, buf, buf,
1015 len);
1016 sent = 1;
1019 out:
1020 if (sent)
1021 return RC_FLUSH_CONN_OUT_OK_SENT;
1023 return RC_FLUSH_CONN_OUT_OK;
1026 int __init cor_snd_init(void)
1028 connretrans_slab = kmem_cache_create("cor_connretrans",
1029 sizeof(struct conn_retrans), 8, 0, 0);
1031 if (unlikely(connretrans_slab == 0))
1032 return 1;
1034 INIT_DELAYED_WORK(&(qos_resume_work), qos_resume);
1035 qos_resume_scheduled = 0;
1037 return 0;
1040 MODULE_LICENSE("GPL");