ping conn removed, conn timeout added, target.out.nb_list removed, conn_list lock...
[cor_2_6_31.git] / net / cor / snd.c
blob5ddadc957507bfb978b043a801a51de382cb77d4
1 /**
2 * Connection oriented routing
3 * Copyright (C) 2007-2011 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
21 #include <linux/gfp.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
25 #include "cor.h"
27 struct kmem_cache *connretrans_slab;
29 struct conn_retrans {
30 /* timeout_list and conn_list share a single ref */
31 struct kref ref;
32 struct list_head timeout_list;
33 struct list_head conn_list;
34 struct htab_entry htab_entry;
35 struct conn *trgt_out_o;
36 __u32 seqno;
37 __u32 length;
38 __u8 ackrcvd;
39 unsigned long timeout;
42 static void free_connretrans(struct kref *ref)
44 struct conn_retrans *cr = container_of(ref, struct conn_retrans, ref);
45 kmem_cache_free(connretrans_slab, cr);
46 kref_put(&(cr->trgt_out_o->ref), free_conn);
49 DEFINE_MUTEX(queues_lock);
50 LIST_HEAD(queues);
51 struct delayed_work qos_resume_work;
52 int qos_resume_scheduled;
54 struct qos_queue {
55 struct list_head queue_list;
57 struct net_device *dev;
59 struct list_head kpackets_waiting;
60 struct list_head conn_retrans_waiting;
61 struct list_head announce_waiting;
62 struct list_head conns_waiting;
65 /* Highest bidder "pays" the credits the second has bid */
66 static int _resume_conns(struct qos_queue *q)
68 struct conn *best = 0;
69 __u64 bestcredit = 0;
70 __u64 secondcredit = 0;
72 int rc;
74 struct list_head *lh = q->conns_waiting.next;
76 while (lh != &(q->conns_waiting)) {
77 struct conn *trgt_out_o = container_of(lh, struct conn,
78 target.out.rb.lh);
79 __u64 credits;
81 lh = lh->next;
83 refresh_conn_credits(trgt_out_o, 0, 0);
85 mutex_lock(&(trgt_out_o->rcv_lock));
87 BUG_ON(trgt_out_o->targettype != TARGET_OUT);
89 if (atomic_read(&(trgt_out_o->isreset)) != 0) {
90 trgt_out_o->target.out.rb.in_queue = 0;
91 list_del(&(trgt_out_o->target.out.rb.lh));
92 mutex_unlock(&(trgt_out_o->rcv_lock));
93 kref_put(&(trgt_out_o->ref), free_conn);
95 continue;
98 BUG_ON(trgt_out_o->data_buf.read_remaining == 0);
100 if (may_alloc_control_msg(trgt_out_o->target.out.nb,
101 ACM_PRIORITY_LOW) == 0)
102 continue;
104 if (trgt_out_o->credits <= 0)
105 credits = 0;
106 else
107 credits = multiply_div(trgt_out_o->credits, 1LL << 24,
108 trgt_out_o->data_buf.read_remaining);
109 mutex_unlock(&(trgt_out_o->rcv_lock));
111 if (best == 0 || bestcredit < credits) {
112 secondcredit = bestcredit;
113 best = trgt_out_o;
114 bestcredit = credits;
115 } else if (secondcredit < credits) {
116 secondcredit = credits;
120 if (best == 0)
121 return RC_FLUSH_CONN_OUT_OK;
123 mutex_lock(&(best->rcv_lock));
124 rc = flush_out(best, 1, (__u32) (secondcredit >> 32));
126 if (rc == RC_FLUSH_CONN_OUT_OK || rc == RC_FLUSH_CONN_OUT_OK_SENT) {
127 best->target.out.rb.in_queue = 0;
128 list_del(&(best->target.out.rb.lh));
130 mutex_unlock(&(best->rcv_lock));
132 refresh_conn_credits(best, 0, 0);
133 unreserve_sock_buffer(best);
135 if (rc == RC_FLUSH_CONN_OUT_OK_SENT)
136 wake_sender(best);
138 if (rc == RC_FLUSH_CONN_OUT_OK || rc == RC_FLUSH_CONN_OUT_OK_SENT)
139 kref_put(&(best->ref), free_conn);
141 return rc;
144 static int resume_conns(struct qos_queue *q)
146 while (list_empty(&(q->conns_waiting)) == 0) {
147 int rc = _resume_conns(q);
148 if (rc != RC_FLUSH_CONN_OUT_OK &&
149 rc != RC_FLUSH_CONN_OUT_OK_SENT)
150 return 1;
152 return 0;
155 static int send_retrans(struct neighbor *nb, int fromqos);
157 static int _qos_resume(struct qos_queue *q, int caller)
159 int rc = 0;
161 struct list_head *lh;
163 if (caller == QOS_CALLER_KPACKET)
164 lh = &(q->conn_retrans_waiting);
165 else if (caller == QOS_CALLER_CONN_RETRANS)
166 lh = &(q->kpackets_waiting);
167 else if (caller == QOS_CALLER_ANNOUNCE)
168 lh = &(q->announce_waiting);
169 else
170 BUG();
172 while (list_empty(lh) == 0) {
173 struct list_head *curr = lh->next;
174 struct resume_block *rb = container_of(curr,
175 struct resume_block, lh);
176 rb->in_queue = 0;
177 list_del(curr);
179 if (caller == QOS_CALLER_KPACKET) {
180 struct neighbor *nb = container_of(rb, struct neighbor,
181 rb_kp);
182 rc = send_messages(nb, 0, 1);
183 } else if (caller == QOS_CALLER_CONN_RETRANS) {
184 struct neighbor *nb = container_of(rb, struct neighbor,
185 rb_cr);
186 #warning todo do not send if neighbor is stalled
187 rc = send_retrans(nb, 1);
188 } else if (caller == QOS_CALLER_ANNOUNCE) {
189 struct announce_data *ann = container_of(rb,
190 struct announce_data, rb);
191 rc = send_announce_qos(ann);
192 } else {
193 BUG();
196 if (rc != 0 && rb->in_queue == 0) {
197 rb->in_queue = 1;
198 list_add(curr , lh);
199 } else {
200 if (caller == QOS_CALLER_KPACKET) {
201 kref_put(&(container_of(rb, struct neighbor,
202 rb_kp)->ref), neighbor_free);
203 } else if (caller == QOS_CALLER_CONN_RETRANS) {
204 kref_put(&(container_of(rb, struct neighbor,
205 rb_cr)->ref), neighbor_free);
206 } else if (caller == QOS_CALLER_ANNOUNCE) {
207 kref_put(&(container_of(rb,
208 struct announce_data, rb)->ref),
209 announce_data_free);
210 } else {
211 BUG();
216 if (rc != 0)
217 break;
219 return rc;
222 static void qos_resume(struct work_struct *work)
224 struct list_head *curr;
226 mutex_lock(&(queues_lock));
228 curr = queues.next;
229 while (curr != (&queues)) {
230 struct qos_queue *q = container_of(curr,
231 struct qos_queue, queue_list);
232 int i;
234 for (i=0;i<4;i++) {
235 int rc;
236 if (i == 3)
237 rc = resume_conns(q);
238 else
239 rc = _qos_resume(q, i);
241 if (rc != 0)
242 goto congested;
245 curr = curr->next;
247 if (i == 4 && unlikely(q->dev == 0)) {
248 list_del(&(q->queue_list));
249 kfree(q);
253 qos_resume_scheduled = 0;
255 if (0) {
256 congested:
257 schedule_delayed_work(&(qos_resume_work), 1);
260 mutex_unlock(&(queues_lock));
263 static struct qos_queue *get_queue(struct net_device *dev)
265 struct list_head *curr = queues.next;
266 while (curr != (&queues)) {
267 struct qos_queue *q = container_of(curr,
268 struct qos_queue, queue_list);
269 if (q->dev == dev)
270 return q;
272 return 0;
275 #warning todo content of the queue?
276 int destroy_queue(struct net_device *dev)
278 struct qos_queue *q;
280 mutex_lock(&(queues_lock));
282 q = get_queue(dev);
284 if (q == 0) {
285 mutex_unlock(&(queues_lock));
286 return 1;
289 q->dev = 0;
291 dev_put(dev);
293 mutex_unlock(&(queues_lock));
295 return 0;
298 int create_queue(struct net_device *dev)
300 struct qos_queue *q = kmalloc(sizeof(struct qos_queue), GFP_KERNEL);
302 if (q == 0) {
303 printk(KERN_ERR "cor: unable to allocate memory for device "
304 "queue, not enabling device");
305 return 1;
308 q->dev = dev;
309 dev_hold(dev);
311 INIT_LIST_HEAD(&(q->kpackets_waiting));
312 INIT_LIST_HEAD(&(q->conn_retrans_waiting));
313 INIT_LIST_HEAD(&(q->announce_waiting));
314 INIT_LIST_HEAD(&(q->conns_waiting));
316 mutex_lock(&(queues_lock));
317 list_add(&(q->queue_list), &queues);
318 mutex_unlock(&(queues_lock));
320 return 0;
323 void qos_enqueue(struct net_device *dev, struct resume_block *rb, int caller)
325 struct qos_queue *q;
327 mutex_lock(&(queues_lock));
329 if (rb->in_queue)
330 goto out;
332 q = get_queue(dev);
333 if (unlikely(q == 0))
334 goto out;
336 rb->in_queue = 1;
338 if (caller == QOS_CALLER_KPACKET) {
339 list_add(&(rb->lh) , &(q->conn_retrans_waiting));
340 kref_get(&(container_of(rb, struct neighbor, rb_kp)->ref));
341 } else if (caller == QOS_CALLER_CONN_RETRANS) {
342 list_add(&(rb->lh), &(q->kpackets_waiting));
343 kref_get(&(container_of(rb, struct neighbor, rb_cr)->ref));
344 } else if (caller == QOS_CALLER_ANNOUNCE) {
345 list_add(&(rb->lh), &(q->announce_waiting));
346 kref_get(&(container_of(rb, struct announce_data, rb)->ref));
347 } else if (caller == QOS_CALLER_CONN) {
348 struct conn *trgt_out = container_of(rb, struct conn,
349 target.out.rb);
350 mutex_lock(&(trgt_out->rcv_lock));
351 #warning todo targettype might have changed
352 BUG_ON(trgt_out->targettype != TARGET_OUT);
353 list_add(&(rb->lh), &(q->conns_waiting));
354 kref_get(&(trgt_out->ref));
355 mutex_unlock(&(trgt_out->rcv_lock));
356 } else {
357 BUG();
360 if (qos_resume_scheduled == 0) {
361 schedule_delayed_work(&(qos_resume_work), 1);
362 qos_resume_scheduled = 1;
365 out:
366 mutex_unlock(&(queues_lock));
369 void qos_remove_conn(struct conn *cn)
371 int kref = 0;
372 mutex_lock(&(queues_lock));
373 mutex_lock(&(cn->rcv_lock));
374 if (cn->targettype != TARGET_OUT)
375 goto out;
376 if (cn->target.out.rb.in_queue == 0)
377 goto out;
379 cn->target.out.rb.in_queue = 0;
380 list_del(&(cn->target.out.rb.lh));
381 kref = 1;
383 out:
384 mutex_unlock(&(cn->rcv_lock));
385 mutex_unlock(&(queues_lock));
387 if (kref)
388 kref_put(&(cn->ref), free_conn);
391 static int may_send_conn_retrans(struct neighbor *nb)
393 struct qos_queue *q;
394 int rc = 0;
396 mutex_lock(&(queues_lock));
398 q = get_queue(nb->dev);
399 if (unlikely(q == 0))
400 goto out;
402 rc = (list_empty(&(q->kpackets_waiting)));
404 out:
405 mutex_unlock(&(queues_lock));
407 return rc;
410 static int may_send_conn(struct conn *trgt_out_l)
412 struct qos_queue *q;
413 int rc = 0;
415 #warning todo this may deadlock, use atomic_ops instead, modify get_queue (move pointer to neighbor?)
416 mutex_lock(&(queues_lock));
418 q = get_queue(trgt_out_l->target.out.nb->dev);
419 if (unlikely(q == 0))
420 goto out;
422 rc = (list_empty(&(q->kpackets_waiting)) &&
423 list_empty(&(q->conn_retrans_waiting)) &&
424 list_empty(&(q->announce_waiting)) &&
425 list_empty(&(q->conns_waiting)));
427 out:
428 mutex_unlock(&(queues_lock));
430 return rc;
434 struct sk_buff *create_packet(struct neighbor *nb, int size,
435 gfp_t alloc_flags, __u32 conn_id, __u32 seqno)
437 struct sk_buff *ret;
438 char *dest;
440 ret = alloc_skb(size + 9 + LL_ALLOCATED_SPACE(nb->dev), alloc_flags);
441 if (unlikely(0 == ret))
442 return 0;
444 ret->protocol = htons(ETH_P_COR);
445 ret->dev = nb->dev;
447 skb_reserve(ret, LL_RESERVED_SPACE(nb->dev));
448 if(unlikely(dev_hard_header(ret, nb->dev, ETH_P_COR, nb->mac,
449 nb->dev->dev_addr, ret->len) < 0))
450 return 0;
451 skb_reset_network_header(ret);
453 dest = skb_put(ret, 9);
454 BUG_ON(0 == dest);
456 dest[0] = PACKET_TYPE_DATA;
457 dest += 1;
459 put_u32(dest, conn_id, 1);
460 dest += 4;
461 put_u32(dest, seqno, 1);
462 dest += 4;
464 return ret;
467 static void set_conn_retrans_timeout(struct conn_retrans *cr)
469 struct neighbor *nb = cr->trgt_out_o->target.out.nb;
470 cr->timeout = jiffies + usecs_to_jiffies(100000 +
471 ((__u32) atomic_read(&(nb->latency))) +
472 ((__u32) atomic_read(&(nb->max_remote_cmsg_delay))));
475 static struct conn_retrans *readd_conn_retrans(struct conn_retrans *cr,
476 struct neighbor *nb, __u32 length, int *dontsend)
478 unsigned long iflags;
480 struct conn_retrans *ret = 0;
482 spin_lock_irqsave(&(nb->retrans_lock), iflags);
484 if (unlikely(cr->ackrcvd)) {
485 *dontsend = 1;
486 goto out;
487 } else
488 *dontsend = 0;
490 if (unlikely(cr->length > length)) {
491 ret = kmem_cache_alloc(connretrans_slab, GFP_ATOMIC);
492 if (unlikely(ret == 0)) {
493 cr->timeout = jiffies + 1;
494 goto out;
497 memset(ret, 0, sizeof (struct conn_retrans));
498 ret->trgt_out_o = cr->trgt_out_o;
499 kref_get(&(cr->trgt_out_o->ref));
500 ret->seqno = cr->seqno + length;
501 ret->length = cr->length - length;
502 kref_init(&(ret->ref));
504 list_add(&(ret->timeout_list), &(nb->retrans_list_conn));
505 list_add(&(ret->conn_list), &(cr->conn_list));
507 cr->length = length;
508 } else {
509 list_del(&(cr->timeout_list));
510 list_add_tail(&(cr->timeout_list), &(nb->retrans_list_conn));
511 set_conn_retrans_timeout(cr);
513 BUG_ON(cr->length != length);
516 out:
517 spin_unlock_irqrestore(&(nb->retrans_lock), iflags);
519 return ret;
522 void cancel_retrans(struct conn *trgt_out_l)
524 unsigned long iflags;
525 struct neighbor *nb = trgt_out_l->target.out.nb;
527 spin_lock_irqsave(&(nb->retrans_lock), iflags);
529 while (list_empty(&(trgt_out_l->target.out.retrans_list)) == 0) {
530 struct conn_retrans *cr = container_of(
531 trgt_out_l->target.out.retrans_list.next,
532 struct conn_retrans, conn_list);
533 BUG_ON(cr->trgt_out_o != trgt_out_l);
535 list_del(&(cr->timeout_list));
536 list_del(&(cr->conn_list));
537 cr->ackrcvd = 1;
538 kref_put(&(cr->ref), free_connretrans);
541 spin_unlock_irqrestore(&(nb->retrans_lock), iflags);
544 static int _send_retrans(struct neighbor *nb, struct conn_retrans *cr)
546 int targetmss = mss(nb);
547 int dontsend;
548 int queuefull = 0;
550 mutex_lock(&(cr->trgt_out_o->rcv_lock));
552 BUG_ON(cr->trgt_out_o->targettype != TARGET_OUT);
553 BUG_ON(cr->trgt_out_o->target.out.nb != nb);
555 kref_get(&(cr->trgt_out_o->ref));
557 if (unlikely(atomic_read(&(cr->trgt_out_o->isreset)) != 0)) {
558 cancel_retrans(cr->trgt_out_o);
559 goto out;
562 while (cr->length >= targetmss) {
563 struct sk_buff *skb;
564 char *dst;
565 struct conn_retrans *cr2;
566 int rc;
568 if (may_send_conn_retrans(nb) == 0)
569 goto qos_enqueue;
571 skb = create_packet(nb, targetmss, GFP_KERNEL,
572 cr->trgt_out_o->target.out.conn_id, cr->seqno);
573 if (unlikely(skb == 0)) {
574 cr->timeout = jiffies + 1;
575 goto out;
578 cr2 = readd_conn_retrans(cr, nb, targetmss, &dontsend);
579 if (unlikely(unlikely(dontsend) || unlikely(cr2 == 0 &&
580 unlikely(cr->length > targetmss)))) {
581 kfree_skb(skb);
582 goto out;
585 dst = skb_put(skb, targetmss);
587 databuf_pullold(cr->trgt_out_o, cr->seqno, dst, targetmss);
588 rc = dev_queue_xmit(skb);
590 if (rc != 0) {
591 unsigned long iflags;
593 spin_lock_irqsave(&(nb->retrans_lock), iflags);
594 if (unlikely(cr->ackrcvd)) {
595 dontsend = 1;
596 } else {
597 list_del(&(cr->timeout_list));
598 list_add(&(cr->timeout_list),
599 &(nb->retrans_list_conn));
601 spin_unlock_irqrestore(&(nb->retrans_lock), iflags);
602 if (dontsend == 0)
603 goto qos_enqueue;
606 cr = cr2;
608 if (likely(cr == 0))
609 goto out;
612 if (unlikely(cr->length <= 0)) {
613 BUG();
614 } else {
615 struct control_msg_out *cm;
616 char *buf = kmalloc(cr->length, GFP_KERNEL);
618 if (unlikely(buf == 0)) {
619 cr->timeout = jiffies + 1;
620 goto out;
623 cm = alloc_control_msg(nb, ACM_PRIORITY_LOW);
624 if (unlikely(cm == 0)) {
625 cr->timeout = jiffies + 1;
626 kfree(buf);
627 goto out;
630 databuf_pullold(cr->trgt_out_o, cr->seqno, buf, cr->length);
632 if (unlikely(readd_conn_retrans(cr, nb, cr->length, &dontsend)
633 != 0))
634 BUG();
636 if (likely(dontsend == 0)) {
637 send_conndata(cm, cr->trgt_out_o->target.out.conn_id,
638 cr->seqno, buf, buf, cr->length);
642 if (0) {
643 qos_enqueue:
644 queuefull = 1;
646 out:
647 mutex_unlock(&(cr->trgt_out_o->rcv_lock));
649 kref_put(&(cr->trgt_out_o->ref), free_conn);
651 return queuefull;
654 static int send_retrans(struct neighbor *nb, int fromqos)
656 unsigned long iflags;
658 struct conn_retrans *cr = 0;
660 int nbstate;
661 int rescheduled = 0;
662 int queuefull = 0;
664 spin_lock_irqsave(&(nb->state_lock), iflags);
665 nbstate = nb->state;
666 spin_unlock_irqrestore(&(nb->state_lock), iflags);
668 while (1) {
669 spin_lock_irqsave(&(nb->retrans_lock), iflags);
671 if (list_empty(&(nb->retrans_list_conn))) {
672 nb->retrans_timer_conn_running = 0;
673 spin_unlock_irqrestore(&(nb->retrans_lock), iflags);
674 break;
677 cr = container_of(nb->retrans_list_conn.next,
678 struct conn_retrans, timeout_list);
680 #warning todo lock
681 BUG_ON(cr->trgt_out_o->targettype != TARGET_OUT);
683 if (unlikely(unlikely(nbstate == NEIGHBOR_STATE_KILLED) ||
684 unlikely(atomic_read(
685 &(cr->trgt_out_o->isreset)) != 0))) {
686 list_del(&(cr->timeout_list));
687 list_del(&(cr->conn_list));
688 spin_unlock_irqrestore(&(nb->retrans_lock), iflags);
690 kref_put(&(cr->ref), free_connretrans);
691 continue;
694 BUG_ON(nb != cr->trgt_out_o->target.out.nb);
696 #warning todo check window limit
698 if (time_after(cr->timeout, jiffies)) {
699 schedule_delayed_work(&(nb->retrans_timer_conn),
700 cr->timeout - jiffies);
701 rescheduled = 1;
702 spin_unlock_irqrestore(&(nb->retrans_lock), iflags);
703 break;
706 kref_get(&(cr->ref));
707 spin_unlock_irqrestore(&(nb->retrans_lock), iflags);
708 queuefull = _send_retrans(nb, cr);
709 kref_put(&(cr->ref), free_connretrans);
710 if (queuefull) {
711 rescheduled = 1;
712 if (fromqos == 0)
713 qos_enqueue(nb->dev, &(nb->rb_cr),
714 QOS_CALLER_CONN_RETRANS);
715 break;
719 if (rescheduled == 0)
720 kref_put(&(nb->ref), neighbor_free);
722 return queuefull;
725 void retransmit_conn_timerfunc(struct work_struct *work)
727 struct neighbor *nb = container_of(to_delayed_work(work),
728 struct neighbor, retrans_timer_conn);
730 send_retrans(nb, 0);
733 void conn_ack_ooo_rcvd(struct neighbor *nb, __u32 conn_id,
734 struct conn *trgt_out, __u32 seqno_ooo, __u32 length)
736 unsigned long iflags;
737 struct list_head *curr;
739 if (unlikely(length == 0))
740 return;
742 mutex_lock(&(trgt_out->rcv_lock));
744 if (unlikely(trgt_out->targettype != TARGET_OUT))
745 goto out;
746 if (unlikely(trgt_out->target.out.nb != nb))
747 goto out;
748 if (unlikely(trgt_out->target.out.conn_id != conn_id))
749 goto out;
751 spin_lock_irqsave(&(nb->retrans_lock), iflags);
753 curr = trgt_out->target.out.retrans_list.next;
755 while (curr != &(trgt_out->target.out.retrans_list)) {
756 struct conn_retrans *cr = container_of(curr,
757 struct conn_retrans, conn_list);
759 if (((__s32)(cr->seqno + cr->length - seqno_ooo)) > 0)
760 goto cont;
762 if (((__s32)(cr->seqno + cr->length - seqno_ooo - length)) >0) {
763 if (((__s32)(cr->seqno - seqno_ooo - length)) < 0) {
764 __u32 newseqno = seqno_ooo + length;
765 cr->length -= (newseqno - cr->seqno);
766 cr->seqno = newseqno;
769 break;
772 if (((__s32)(cr->seqno - seqno_ooo)) < 0 &&
773 ((__s32)(cr->seqno + cr->length - seqno_ooo -
774 length)) <= 0) {
775 __u32 diff = seqno_ooo + length - cr->seqno -
776 cr->length;
777 cr->seqno += diff;
778 cr->length -= diff;
779 } else {
780 list_del(&(cr->timeout_list));
781 list_del(&(cr->conn_list));
782 cr->ackrcvd = 1;
783 kref_put(&(cr->ref), free_connretrans);
786 cont:
787 curr = curr->next;
790 if (unlikely(list_empty(&(trgt_out->target.out.retrans_list))) == 0) {
791 struct conn_retrans *cr = container_of(
792 trgt_out->target.out.retrans_list.next,
793 struct conn_retrans, conn_list);
794 if (unlikely(((__s32) (cr->seqno -
795 trgt_out->target.out.seqno_acked)) > 0))
796 trgt_out->target.out.seqno_acked = cr->seqno;
799 spin_unlock_irqrestore(&(nb->retrans_lock), iflags);
801 out:
802 mutex_unlock(&(trgt_out->rcv_lock));
805 void conn_ack_rcvd(struct neighbor *nb, __u32 conn_id, struct conn *trgt_out,
806 __u32 seqno, int setwindow, __u8 window)
808 int flush = 0;
809 unsigned long iflags;
811 mutex_lock(&(trgt_out->rcv_lock));
813 #warning todo reset check?
814 if (unlikely(trgt_out->targettype != TARGET_OUT))
815 goto out;
816 if (unlikely(trgt_out->target.out.nb != nb))
817 goto out;
818 if (unlikely(trgt_out->reversedir->source.in.conn_id != conn_id))
819 goto out;
821 if (unlikely(((__s32)(seqno - trgt_out->target.out.seqno_nextsend)) > 0)
823 ((__s32)(seqno - trgt_out->target.out.seqno_acked)) < 0)
824 goto out;
826 if (setwindow) {
827 __u32 windowdec = dec_log_64_11(window);
828 if (unlikely(seqno == trgt_out->target.out.seqno_acked &&
829 ((__s32) (seqno + windowdec -
830 trgt_out->target.out.seqno_windowlimit )) <= 0))
831 goto skipwindow;
833 trgt_out->target.out.seqno_windowlimit = seqno + windowdec;
834 flush = 1;
837 skipwindow:
838 if (seqno == trgt_out->target.out.seqno_acked)
839 goto out;
841 spin_lock_irqsave(&(nb->retrans_lock), iflags);
843 trgt_out->target.out.seqno_acked = seqno;
845 while (list_empty(&(trgt_out->target.out.retrans_list)) == 0) {
846 struct conn_retrans *cr = container_of(
847 trgt_out->target.out.retrans_list.next,
848 struct conn_retrans, conn_list);
850 if (((__s32)(cr->seqno + cr->length - seqno)) > 0) {
851 if (((__s32)(cr->seqno - seqno)) < 0) {
852 cr->length -= (seqno - cr->seqno);
853 cr->seqno = seqno;
855 break;
858 list_del(&(cr->timeout_list));
859 list_del(&(cr->conn_list));
860 cr->ackrcvd = 1;
861 kref_put(&(cr->ref), free_connretrans);
864 spin_unlock_irqrestore(&(nb->retrans_lock), iflags);
865 databuf_ack(trgt_out, trgt_out->target.out.seqno_acked);
867 out:
868 mutex_unlock(&(trgt_out->rcv_lock));
870 if (flush)
871 flush_buf(trgt_out);
874 static void schedule_retransmit_conn(struct conn_retrans *cr,
875 struct conn *trgt_out_l, __u32 seqno, __u32 len)
877 unsigned long iflags;
879 struct neighbor *nb = trgt_out_l->target.out.nb;
881 int first;
883 BUG_ON(trgt_out_l->targettype != TARGET_OUT);
885 memset(cr, 0, sizeof (struct conn_retrans));
886 cr->trgt_out_o = trgt_out_l;
887 kref_get(&(trgt_out_l->ref));
888 cr->seqno = seqno;
889 cr->length = len;
890 kref_init(&(cr->ref));
891 set_conn_retrans_timeout(cr);
893 spin_lock_irqsave(&(nb->retrans_lock), iflags);
895 first = unlikely(list_empty(&(nb->retrans_list_conn)));
896 list_add_tail(&(cr->timeout_list), &(nb->retrans_list_conn));
898 list_add_tail(&(cr->conn_list), &(trgt_out_l->target.out.retrans_list));
900 if (unlikely(unlikely(first) &&
901 unlikely(nb->retrans_timer_conn_running == 0))) {
902 schedule_delayed_work(&(nb->retrans_timer_conn),
903 cr->timeout - jiffies);
904 nb->retrans_timer_conn_running = 1;
905 kref_get(&(nb->ref));
908 spin_unlock_irqrestore(&(nb->retrans_lock), iflags);
911 static __u32 get_windowlimit(struct conn *trgt_out_l)
913 __s32 windowlimit = (__s32)(trgt_out_l->target.out.seqno_windowlimit -
914 trgt_out_l->target.out.seqno_nextsend);
915 if (unlikely(windowlimit < 0))
916 return 0;
917 return windowlimit;
920 #warning todo reset connections which are SOURCE_NONE and are stuck for too long
921 int flush_out(struct conn *trgt_out_l, int fromqos, __u32 creditsperbyte)
923 int targetmss;
924 __u32 seqno;
925 int sent = 0;
927 BUG_ON(trgt_out_l->targettype != TARGET_OUT);
929 targetmss = mss(trgt_out_l->target.out.nb);
931 if (unlikely(trgt_out_l->target.out.conn_id == 0))
932 return RC_FLUSH_CONN_OUT_OK;
934 if (unlikely(atomic_read(&(trgt_out_l->isreset)) != 0))
935 return RC_FLUSH_CONN_OUT_OK;
937 if (unlikely(trgt_out_l->sourcetype == SOURCE_SOCK &&
938 trgt_out_l->source.sock.delay_flush != 0))
939 return RC_FLUSH_CONN_OUT_OK;
941 if (fromqos == 0 && may_send_conn(trgt_out_l) == 0)
942 return RC_FLUSH_CONN_OUT_CONG;
944 while (trgt_out_l->data_buf.read_remaining >= targetmss &&
945 get_windowlimit(trgt_out_l) >= targetmss) {
946 struct conn_retrans *cr;
947 struct sk_buff *skb;
948 char *dst;
949 int rc;
951 if (unlikely(creditsperbyte * targetmss >
952 trgt_out_l->credits))
953 return RC_FLUSH_CONN_OUT_CREDITS;
955 seqno = trgt_out_l->target.out.seqno_nextsend;
956 skb = create_packet(trgt_out_l->target.out.nb, targetmss, GFP_ATOMIC,
957 trgt_out_l->target.out.conn_id, seqno);
958 if (unlikely(skb == 0))
959 return RC_FLUSH_CONN_OUT_OOM;
961 cr = kmem_cache_alloc(connretrans_slab, GFP_KERNEL);
962 if (unlikely(cr == 0)) {
963 kfree_skb(skb);
964 return RC_FLUSH_CONN_OUT_OOM;
967 dst = skb_put(skb, targetmss);
969 databuf_pull(trgt_out_l, dst, targetmss);
971 rc = dev_queue_xmit(skb);
972 if (rc != 0) {
973 databuf_unpull(trgt_out_l, targetmss);
974 kmem_cache_free(connretrans_slab, cr);
975 return RC_FLUSH_CONN_OUT_CONG;
978 trgt_out_l->credits -= creditsperbyte * targetmss;
979 trgt_out_l->target.out.seqno_nextsend += targetmss;
980 schedule_retransmit_conn(cr, trgt_out_l, seqno, targetmss);
981 sent = 1;
984 if (trgt_out_l->data_buf.read_remaining > 0 && (trgt_out_l->tos ==
985 TOS_LATENCY || trgt_out_l->target.out.seqno_nextsend ==
986 trgt_out_l->target.out.seqno_acked)) {
987 struct control_msg_out *cm;
988 struct conn_retrans *cr;
989 __u32 len = trgt_out_l->data_buf.read_remaining;
990 __s32 windowlimit = get_windowlimit(trgt_out_l);
991 char *buf;
993 if (windowlimit == 0)
994 goto out;
996 if (windowlimit < len/2 &&
997 trgt_out_l->target.out.seqno_nextsend !=
998 trgt_out_l->target.out.seqno_acked)
999 goto out;
1001 if (len > windowlimit)
1002 len = windowlimit;
1004 buf = kmalloc(len, GFP_KERNEL);
1006 if (unlikely(creditsperbyte * len > trgt_out_l->credits))
1007 return RC_FLUSH_CONN_OUT_CREDITS;
1009 if (unlikely(buf == 0))
1010 return RC_FLUSH_CONN_OUT_OOM;
1012 cm = alloc_control_msg(trgt_out_l->target.out.nb,
1013 ACM_PRIORITY_LOW);
1014 if (unlikely(cm == 0)) {
1015 kfree(buf);
1016 return RC_FLUSH_CONN_OUT_OOM;
1019 cr = kmem_cache_alloc(connretrans_slab, GFP_KERNEL);
1020 if (unlikely(cr == 0)) {
1021 kfree(buf);
1022 free_control_msg(cm);
1023 return RC_FLUSH_CONN_OUT_CONG;
1026 databuf_pull(trgt_out_l, buf, len);
1028 seqno = trgt_out_l->target.out.seqno_nextsend;
1029 trgt_out_l->credits -= creditsperbyte * len;
1030 trgt_out_l->target.out.seqno_nextsend += len;
1032 schedule_retransmit_conn(cr, trgt_out_l, seqno, len);
1034 send_conndata(cm, trgt_out_l->target.out.conn_id, seqno, buf,
1035 buf, len);
1036 sent = 1;
1039 out:
1040 if (sent)
1041 return RC_FLUSH_CONN_OUT_OK_SENT;
1043 return RC_FLUSH_CONN_OUT_OK;
1046 int __init cor_snd_init(void)
1048 connretrans_slab = kmem_cache_create("cor_connretrans",
1049 sizeof(struct conn_retrans), 8, 0, 0);
1051 if (unlikely(connretrans_slab == 0))
1052 return 1;
1054 INIT_DELAYED_WORK(&(qos_resume_work), qos_resume);
1055 qos_resume_scheduled = 0;
1057 return 0;
1060 MODULE_LICENSE("GPL");