remove conn_retrans_task
[cor.git] / net / cor / snd.c
blobd58fad964e378a9f689067b2a48d5a42f1ea40f5
1 /**
2 * Connection oriented routing
3 * Copyright (C) 2007-2020 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
21 #include <linux/gfp.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
25 #include "cor.h"
27 static struct kmem_cache *connretrans_slab;
29 static DEFINE_SPINLOCK(queues_lock);
30 static LIST_HEAD(queues);
32 static int _flush_out(struct conn *trgt_out_lx, __u32 maxsend, __u32 *sent,
33 int from_qos);
35 static void _qos_enqueue(struct qos_queue *q, struct resume_block *rb,
36 int caller, int from_nbcongwin_resume);
39 #ifdef DEBUG_QOS_SLOWSEND
40 static DEFINE_SPINLOCK(slowsend_lock);
41 static unsigned long last_send;
44 int _cor_dev_queue_xmit(struct sk_buff *skb, int caller)
46 int allowsend = 0;
47 unsigned long jiffies_tmp;
48 spin_lock_bh(&slowsend_lock);
49 jiffies_tmp = jiffies;
50 if (last_send != jiffies_tmp) {
51 if (last_send + 1 == jiffies_tmp) {
52 last_send = jiffies_tmp;
53 } else {
54 last_send = jiffies_tmp - 1;
56 allowsend = 1;
58 spin_unlock_bh(&slowsend_lock);
60 /* printk(KERN_ERR "cor_dev_queue_xmit %d, %d", caller, allowsend); */
61 if (allowsend) {
62 return dev_queue_xmit(skb);
63 } else {
64 kfree_skb(skb);
65 return NET_XMIT_DROP;
68 #endif
70 static void free_connretrans(struct kref *ref)
72 struct conn_retrans *cr = container_of(ref, struct conn_retrans, ref);
73 struct conn *cn = cr->trgt_out_o;
75 BUG_ON(cr->state != CONN_RETRANS_ACKED);
77 kmem_cache_free(connretrans_slab, cr);
78 kref_put(&(cn->ref), free_conn);
81 void free_qos(struct kref *ref)
83 struct qos_queue *q = container_of(ref, struct qos_queue, ref);
84 kfree(q);
88 static void qos_queue_set_congstatus(struct qos_queue *q_locked);
90 /**
91 * neighbor congestion window:
92 * increment by 4096 every round trip if more that 2/3 of cwin is used
94 * in case of packet loss decrease by 1/4:
95 * - <= 1/8 immediately and
96 * - <= 1/4 during the next round trip
98 * in case of multiple packet loss events, do not decrement more than once per
99 * round trip
102 #ifdef COR_NBCONGWIN
104 /*extern __u64 get_bufspace_used(void);
106 static void print_conn_bufstats(struct neighbor *nb)
108 / * not threadsafe, but this is only for debugging... * /
109 __u64 totalsize = 0;
110 __u64 read_remaining = 0;
111 __u32 numconns = 0;
112 struct list_head *lh;
113 unsigned long iflags;
115 spin_lock_irqsave(&(nb->conns_waiting.lock), iflags);
117 lh = nb->conns_waiting.lh.next;
118 while (lh != &(nb->conns_waiting.lh)) {
119 struct conn *cn = container_of(lh, struct conn,
120 target.out.rb.lh);
121 totalsize += cn->data_buf.datasize;
122 read_remaining += cn->data_buf.read_remaining;
123 lh = lh->next;
126 lh = nb->conns_waiting.lh_nextpass.next;
127 while (lh != &(nb->conns_waiting.lh_nextpass)) {
128 struct conn *cn = container_of(lh, struct conn,
129 target.out.rb.lh);
130 totalsize += cn->data_buf.datasize;
131 read_remaining += cn->data_buf.read_remaining;
132 lh = lh->next;
135 numconns = nb->conns_waiting.cnt;
137 spin_unlock_irqrestore(&(nb->conns_waiting.lock), iflags);
139 printk(KERN_ERR "conn %llu %llu %u", totalsize, read_remaining, numconns);
140 } */
142 static void nbcongwin_data_retransmitted(struct neighbor *nb, __u64 bytes_sent)
144 __u64 cwin;
146 unsigned long iflags;
148 spin_lock_irqsave(&(nb->nbcongwin.lock), iflags);
150 cwin = atomic64_read(&(nb->nbcongwin.cwin));
152 /* printk(KERN_ERR "retrans %llu %llu", cwin >> NBCONGWIN_SHIFT,
153 get_bufspace_used());
154 print_conn_bufstats(nb); */
156 BUG_ON(nb->nbcongwin.cwin_shrinkto > cwin);
157 BUG_ON(cwin >= U64_MAX/1024);
159 if (bytes_sent > 1024)
160 bytes_sent = 1024;
162 if (nb->nbcongwin.cwin_shrinkto == cwin) {
163 if (bytes_sent > 512) {
164 cwin -= cwin/8;
165 } else {
166 cwin -= (bytes_sent * cwin) / (1024 * 4);
168 atomic64_set(&(nb->nbcongwin.cwin), cwin);
171 nb->nbcongwin.cwin_shrinkto -=
172 (bytes_sent * nb->nbcongwin.cwin_shrinkto) / (1024 * 4);
174 nb->nbcongwin.cwin_shrinkto = max(nb->nbcongwin.cwin_shrinkto,
175 cwin - cwin/4);
177 spin_unlock_irqrestore(&(nb->nbcongwin.lock), iflags);
180 static __u64 nbcongwin_update_cwin(struct neighbor *nb_cwlocked,
181 __u64 data_intransit, __u64 bytes_acked)
183 __u64 CWIN_MUL = (1 << NBCONGWIN_SHIFT);
184 __u32 INCR_PER_RTT = 4096;
186 __u64 cwin = atomic64_read(&(nb_cwlocked->nbcongwin.cwin));
188 __u64 cwin_tmp;
189 __u64 incrby;
191 if (nb_cwlocked->nbcongwin.cwin_shrinkto < cwin) {
192 __u64 shrinkby = (bytes_acked << (NBCONGWIN_SHIFT-2));
193 if (unlikely(shrinkby > cwin))
194 cwin = 0;
195 else
196 cwin -= shrinkby;
198 if (cwin < nb_cwlocked->nbcongwin.cwin_shrinkto)
199 cwin = nb_cwlocked->nbcongwin.cwin_shrinkto;
203 if (cwin * 2 > data_intransit * CWIN_MUL * 3)
204 goto out;
206 cwin_tmp = max(cwin, bytes_acked << NBCONGWIN_SHIFT);
208 if (unlikely(bytes_acked >= U64_MAX/INCR_PER_RTT/CWIN_MUL))
209 incrby = div64_u64(bytes_acked * INCR_PER_RTT,
210 cwin_tmp / CWIN_MUL / CWIN_MUL);
211 else if (unlikely(bytes_acked >=
212 U64_MAX/INCR_PER_RTT/CWIN_MUL/CWIN_MUL))
213 incrby = div64_u64(bytes_acked * INCR_PER_RTT * CWIN_MUL,
214 cwin_tmp / CWIN_MUL);
215 else
216 incrby = div64_u64(bytes_acked * INCR_PER_RTT * CWIN_MUL *
217 CWIN_MUL, cwin_tmp);
219 BUG_ON(incrby > INCR_PER_RTT * CWIN_MUL);
221 if (unlikely(cwin + incrby < cwin))
222 cwin = U64_MAX;
223 else
224 cwin += incrby;
226 if (unlikely(nb_cwlocked->nbcongwin.cwin_shrinkto + incrby <
227 nb_cwlocked->nbcongwin.cwin_shrinkto))
228 nb_cwlocked->nbcongwin.cwin_shrinkto = U64_MAX;
229 else
230 nb_cwlocked->nbcongwin.cwin_shrinkto += incrby;
232 out:
233 atomic64_set(&(nb_cwlocked->nbcongwin.cwin), cwin);
235 return cwin;
238 void nbcongwin_data_acked(struct neighbor *nb, __u64 bytes_acked)
240 unsigned long iflags;
241 struct qos_queue *q = nb->queue;
242 __u64 data_intransit;
243 __u64 cwin;
245 spin_lock_irqsave(&(nb->nbcongwin.lock), iflags);
247 data_intransit = atomic64_read(&(nb->nbcongwin.data_intransit));
249 cwin = nbcongwin_update_cwin(nb, data_intransit, bytes_acked);
251 BUG_ON(bytes_acked > data_intransit);
252 atomic64_sub(bytes_acked, &(nb->nbcongwin.data_intransit));
253 data_intransit -= bytes_acked;
255 if (data_intransit >= cwin >> NBCONGWIN_SHIFT)
256 goto out_sendnok;
258 spin_lock(&(q->qlock));
259 if (nb->rb.in_queue == RB_INQUEUE_NBCONGWIN) {
260 if (nb->conns_waiting.cnt == 0) {
261 nb->rb.in_queue = RB_INQUEUE_FALSE;
262 } else {
263 _qos_enqueue(q, &(nb->rb), QOS_CALLER_NEIGHBOR, 1);
266 spin_unlock(&(q->qlock));
269 out_sendnok:
270 spin_unlock_irqrestore(&(nb->nbcongwin.lock), iflags);
273 static void nbcongwin_data_sent(struct neighbor *nb, __u32 bytes_sent)
275 atomic64_add(bytes_sent, &(nb->nbcongwin.data_intransit));
278 #warning todo do not shrink below mss
279 static int nbcongwin_send_allowed(struct neighbor *nb)
281 unsigned long iflags;
282 int ret = 1;
283 struct qos_queue *q = nb->queue;
284 int krefput_queue = 0;
286 if (atomic64_read(&(nb->nbcongwin.data_intransit)) <=
287 atomic64_read(&(nb->nbcongwin.cwin)) >> NBCONGWIN_SHIFT)
288 return 1;
290 spin_lock_irqsave(&(nb->nbcongwin.lock), iflags);
292 if (atomic64_read(&(nb->nbcongwin.data_intransit)) <=
293 atomic64_read(&(nb->nbcongwin.cwin)) >> NBCONGWIN_SHIFT)
294 goto out_ok;
296 ret = 0;
298 spin_lock(&(q->qlock));
299 if (nb->rb.in_queue == RB_INQUEUE_FALSE) {
300 nb->rb.in_queue = RB_INQUEUE_NBCONGWIN;
301 } else if (nb->rb.in_queue == RB_INQUEUE_TRUE) {
302 list_del(&(nb->rb.lh));
303 kref_put(&(nb->ref), kreffree_bug);
304 nb->rb.in_queue = RB_INQUEUE_NBCONGWIN;
305 BUG_ON(q->numconns < nb->conns_waiting.cnt);
306 q->numconns -= nb->conns_waiting.cnt;
307 q->priority_sum -= nb->conns_waiting.priority_sum;
308 krefput_queue = 1;
310 qos_queue_set_congstatus(q);
311 } else if (nb->rb.in_queue == RB_INQUEUE_NBCONGWIN) {
312 } else {
313 BUG();
315 spin_unlock(&(q->qlock));
317 if (krefput_queue != 0)
318 kref_put(&(q->ref), free_qos);
320 out_ok:
321 spin_unlock_irqrestore(&(nb->nbcongwin.lock), iflags);
323 return ret;
326 #else
328 static inline void nbcongwin_data_retransmitted(struct neighbor *nb,
329 __u64 bytes_sent)
333 static inline void nbcongwin_data_acked(struct neighbor *nb, __u64 bytes_acked)
337 static inline void nbcongwin_data_sent(struct neighbor *nb, __u32 bytes_sent)
341 static inline int nbcongwin_send_allowed(struct neighbor *nb)
343 return 1;
346 #endif
348 static __u64 _resume_conns_maxsend(struct qos_queue *q, struct conn *trgt_out_l,
349 __u32 newpriority)
351 unsigned long iflags;
353 struct neighbor *nb = trgt_out_l->target.out.nb;
354 __u32 oldpriority = trgt_out_l->target.out.rb_priority;
355 __u64 priority_sum;
356 __u32 numconns;
358 spin_lock_irqsave(&(nb->conns_waiting.lock), iflags);
359 spin_lock(&(q->qlock));
361 BUG_ON(nb->conns_waiting.priority_sum < oldpriority);
362 BUG_ON(q->priority_sum < oldpriority);
363 nb->conns_waiting.priority_sum -= oldpriority;
364 q->priority_sum -= oldpriority;
366 BUG_ON(nb->conns_waiting.priority_sum + newpriority <
367 nb->conns_waiting.priority_sum);
368 BUG_ON(q->priority_sum + newpriority < q->priority_sum);
369 nb->conns_waiting.priority_sum += newpriority;
370 q->priority_sum += newpriority;
372 priority_sum = q->priority_sum;
373 numconns = q->numconns;
375 spin_unlock(&(q->qlock));
376 spin_unlock_irqrestore(&(nb->conns_waiting.lock), iflags);
378 trgt_out_l->target.out.rb_priority = newpriority;
380 return div_u64(1024LL * ((__u64) newpriority) * ((__u64) numconns),
381 priority_sum);
384 static int _resume_neighbors_nextpass(struct neighbor *nb_waitingconnslocked)
386 BUG_ON(list_empty(&(nb_waitingconnslocked->conns_waiting.lh)) == 0);
388 if (list_empty(&(nb_waitingconnslocked->conns_waiting.lh_nextpass))) {
389 BUG_ON(nb_waitingconnslocked->conns_waiting.cnt != 0);
390 return 1;
393 BUG_ON(nb_waitingconnslocked->conns_waiting.cnt == 0);
395 nb_waitingconnslocked->conns_waiting.lh.next =
396 nb_waitingconnslocked->conns_waiting.lh_nextpass.next;
397 nb_waitingconnslocked->conns_waiting.lh.prev =
398 nb_waitingconnslocked->conns_waiting.lh_nextpass.prev;
399 nb_waitingconnslocked->conns_waiting.lh.next->prev =
400 &(nb_waitingconnslocked->conns_waiting.lh);
401 nb_waitingconnslocked->conns_waiting.lh.prev->next =
402 &(nb_waitingconnslocked->conns_waiting.lh);
403 nb_waitingconnslocked->conns_waiting.lh_nextpass.next =
404 &(nb_waitingconnslocked->conns_waiting.lh_nextpass);
405 nb_waitingconnslocked->conns_waiting.lh_nextpass.prev =
406 &(nb_waitingconnslocked->conns_waiting.lh_nextpass);
408 return 0;
411 static int _resume_neighbors(struct qos_queue *q, struct neighbor *nb,
412 int *progress)
414 unsigned long iflags;
416 while (1) {
417 __u32 priority;
418 __u32 maxsend;
420 int rc2;
421 __u32 sent2 = 0;
423 struct conn *cn = 0;
424 spin_lock_irqsave(&(nb->conns_waiting.lock), iflags);
425 if (list_empty(&(nb->conns_waiting.lh)) != 0) {
426 int done = _resume_neighbors_nextpass(nb);
427 spin_unlock_irqrestore(&(nb->conns_waiting.lock),
428 iflags);
429 return done ? QOS_RESUME_DONE : QOS_RESUME_NEXTNEIGHBOR;
431 BUG_ON(nb->conns_waiting.cnt == 0);
433 cn = container_of(nb->conns_waiting.lh.next, struct conn,
434 target.out.rb.lh);
435 BUG_ON(cn->targettype != TARGET_OUT);
436 BUG_ON(cn->target.out.rb.lh.prev != &(nb->conns_waiting.lh));
437 BUG_ON((cn->target.out.rb.lh.next == &(nb->conns_waiting.lh)) &&
438 (nb->conns_waiting.lh.prev !=
439 &(cn->target.out.rb.lh)));
440 list_del(&(cn->target.out.rb.lh));
441 list_add_tail(&(cn->target.out.rb.lh),
442 &(nb->conns_waiting.lh_nextpass));
443 kref_get(&(cn->ref));
444 spin_unlock_irqrestore(&(nb->conns_waiting.lock), iflags);
447 priority = refresh_conn_priority(cn, 0);
449 spin_lock_bh(&(cn->rcv_lock));
451 if (unlikely(cn->targettype != TARGET_OUT)) {
452 spin_unlock_bh(&(cn->rcv_lock));
453 continue;
456 maxsend = _resume_conns_maxsend(q, cn, priority);
457 maxsend += cn->target.out.maxsend_extra;
458 if (unlikely(maxsend > U32_MAX))
459 maxsend = U32_MAX;
461 rc2 = _flush_out(cn, maxsend, &sent2, 1);
463 if (rc2 == RC_FLUSH_CONN_OUT_OK ||
464 rc2 == RC_FLUSH_CONN_OUT_NBNOTACTIVE) {
465 cn->target.out.maxsend_extra = 0;
466 qos_remove_conn(cn);
467 } else if (sent2 == 0 && (rc2 == RC_FLUSH_CONN_OUT_CONG ||
468 rc2 == RC_FLUSH_CONN_OUT_OOM)) {
469 spin_lock_irqsave(&(nb->conns_waiting.lock), iflags);
470 if (likely(cn->target.out.rb.in_queue !=
471 RB_INQUEUE_FALSE)) {
472 list_del(&(cn->target.out.rb.lh));
473 list_add(&(cn->target.out.rb.lh),
474 &(nb->conns_waiting.lh));
476 spin_unlock_irqrestore(&(nb->conns_waiting.lock),
477 iflags);
478 } else if (rc2 == RC_FLUSH_CONN_OUT_CONG ||
479 rc2 == RC_FLUSH_CONN_OUT_OOM) {
480 cn->target.out.maxsend_extra = 0;
481 } else if (likely(rc2 == RC_FLUSH_CONN_OUT_MAXSENT)) {
482 if (unlikely(maxsend - sent2 > 65535))
483 cn->target.out.maxsend_extra = 65535;
484 else
485 cn->target.out.maxsend_extra = maxsend - sent2;
488 spin_unlock_bh(&(cn->rcv_lock));
490 if (sent2 != 0) {
491 *progress = 1;
492 wake_sender(cn);
495 kref_put(&(cn->ref), free_conn);
497 if (rc2 == RC_FLUSH_CONN_OUT_CONG ||
498 rc2 == RC_FLUSH_CONN_OUT_OOM) {
499 return QOS_RESUME_CONG;
504 static int resume_neighbors(struct qos_queue *q, int *sent)
506 unsigned long iflags;
508 spin_lock_irqsave(&(q->qlock), iflags);
510 while (1) {
511 struct neighbor *nb;
512 int rc;
514 if (list_empty(&(q->neighbors_waiting)) != 0) {
515 BUG_ON(q->numconns != 0);
516 spin_unlock_irqrestore(&(q->qlock), iflags);
517 return QOS_RESUME_DONE;
519 BUG_ON(q->numconns == 0);
521 nb = container_of(q->neighbors_waiting.next, struct neighbor,
522 rb.lh);
524 BUG_ON(nb->rb.in_queue != RB_INQUEUE_TRUE);
525 BUG_ON(nb->rb.lh.prev != &(q->neighbors_waiting));
526 BUG_ON((nb->rb.lh.next == &(q->neighbors_waiting)) &&
527 (q->neighbors_waiting.prev != &(nb->rb.lh)));
529 kref_get(&(nb->ref));
531 spin_unlock_irqrestore(&(q->qlock), iflags);
533 atomic_set(&(nb->cmsg_delay_conndata), 1);
535 rc = _resume_neighbors(q, nb, sent);
536 if (rc == QOS_RESUME_CONG) {
537 kref_put(&(nb->ref), neighbor_free);
538 return QOS_RESUME_CONG;
541 atomic_set(&(nb->cmsg_delay_conndata), 0);
542 spin_lock_bh(&(nb->cmsg_lock));
543 schedule_controlmsg_timer(nb);
544 spin_unlock_bh(&(nb->cmsg_lock));
546 spin_lock_irqsave(&(q->qlock), iflags);
547 if (rc == QOS_RESUME_DONE) {
548 if (nb->conns_waiting.cnt == 0 &&
549 nb->rb.in_queue == RB_INQUEUE_TRUE) {
550 nb->rb.in_queue = RB_INQUEUE_FALSE;
551 list_del(&(nb->rb.lh));
552 kref_put(&(nb->ref), kreffree_bug);
554 } else if (rc == QOS_RESUME_NEXTNEIGHBOR) {
555 if (nb->rb.in_queue == RB_INQUEUE_TRUE) {
556 list_del(&(nb->rb.lh));
557 list_add_tail(&(nb->rb.lh),
558 &(q->neighbors_waiting));
560 } else {
561 BUG();
564 kref_put(&(nb->ref), neighbor_free);
566 if (rc == QOS_RESUME_NEXTNEIGHBOR) {
567 spin_unlock_irqrestore(&(q->qlock), iflags);
568 return QOS_RESUME_NEXTNEIGHBOR;
573 static int send_retrans(struct neighbor *nb, int *sent);
575 static int _qos_resume(struct qos_queue *q, int caller, int *sent)
577 unsigned long iflags;
578 int rc = QOS_RESUME_DONE;
579 struct list_head *lh;
581 spin_lock_irqsave(&(q->qlock), iflags);
583 if (caller == QOS_CALLER_KPACKET)
584 lh = &(q->kpackets_waiting);
585 else if (caller == QOS_CALLER_CONN_RETRANS)
586 lh = &(q->conn_retrans_waiting);
587 else if (caller == QOS_CALLER_ANNOUNCE)
588 lh = &(q->announce_waiting);
589 else
590 BUG();
592 while (list_empty(lh) == 0) {
593 struct resume_block *rb = container_of(lh->next,
594 struct resume_block, lh);
595 BUG_ON(rb->in_queue != RB_INQUEUE_TRUE);
596 rb->in_queue = RB_INQUEUE_FALSE;
597 list_del(&(rb->lh));
599 spin_unlock_irqrestore(&(q->qlock), iflags);
600 if (caller == QOS_CALLER_KPACKET) {
601 rc = send_messages(container_of(rb, struct neighbor,
602 rb_kp), sent);
603 } else if (caller == QOS_CALLER_CONN_RETRANS) {
604 rc = send_retrans(container_of(rb, struct neighbor,
605 rb_cr), sent);
606 } else if (caller == QOS_CALLER_ANNOUNCE) {
607 rc = _send_announce(container_of(rb,
608 struct announce_data, rb), 1, sent);
609 } else {
610 BUG();
612 spin_lock_irqsave(&(q->qlock), iflags);
614 if (rc != QOS_RESUME_DONE && rb->in_queue == RB_INQUEUE_FALSE) {
615 rb->in_queue = RB_INQUEUE_TRUE;
616 list_add(&(rb->lh), lh);
617 break;
620 if (caller == QOS_CALLER_KPACKET) {
621 kref_put(&(container_of(rb, struct neighbor,
622 rb_kp)->ref), neighbor_free);
623 } else if (caller == QOS_CALLER_CONN_RETRANS) {
624 kref_put(&(container_of(rb, struct neighbor,
625 rb_cr)->ref), neighbor_free);
626 } else if (caller == QOS_CALLER_ANNOUNCE) {
627 kref_put(&(container_of(rb,
628 struct announce_data, rb)->ref),
629 announce_data_free);
630 } else {
631 BUG();
634 kref_put(&(q->ref), kreffree_bug);
637 spin_unlock_irqrestore(&(q->qlock), iflags);
639 return rc;
642 void qos_resume_taskfunc(unsigned long arg)
644 struct qos_queue *q = (struct qos_queue *) arg;
646 int rc;
647 int sent = 0;
648 unsigned long iflags;
649 int i = 0;
651 #warning todo limit runtime of resume task
653 spin_lock_irqsave(&(q->qlock), iflags);
655 while (i<4) {
656 struct list_head *lh;
658 rc = QOS_RESUME_DONE;
660 if (i == QOS_CALLER_KPACKET)
661 lh = &(q->kpackets_waiting);
662 else if (i == QOS_CALLER_CONN_RETRANS)
663 lh = &(q->conn_retrans_waiting);
664 else if (i == QOS_CALLER_ANNOUNCE)
665 lh = &(q->announce_waiting);
666 else if (i == QOS_CALLER_NEIGHBOR)
667 lh = &(q->neighbors_waiting);
668 else
669 BUG();
671 if (list_empty(lh)) {
672 i++;
673 continue;
676 spin_unlock_irqrestore(&(q->qlock), iflags);
677 if (i == QOS_CALLER_NEIGHBOR) {
678 rc = resume_neighbors(q, &sent);
679 } else {
680 rc = _qos_resume(q, i, &sent);
683 spin_lock_irqsave(&(q->qlock), iflags);
685 i = 0;
687 if (rc != QOS_RESUME_DONE && rc != QOS_RESUME_NEXTNEIGHBOR)
688 break;
691 if (rc == QOS_RESUME_DONE) {
692 BUG_ON(!list_empty(&(q->kpackets_waiting)));
693 BUG_ON(!list_empty(&(q->conn_retrans_waiting)));
694 BUG_ON(!list_empty(&(q->announce_waiting)));
695 BUG_ON(!list_empty(&(q->neighbors_waiting)));
697 q->qos_resume_scheduled = 0;
698 } else {
699 unsigned long jiffies_tmp = jiffies;
700 unsigned long delay = (jiffies_tmp - q->jiffies_lastprogress +
701 3) / 4;
703 if (sent || unlikely(delay <= 0)) {
704 q->jiffies_lastprogress = jiffies_tmp;
705 delay = 1;
706 } else if (delay > HZ/10) {
707 q->jiffies_lastprogress = jiffies_tmp - (HZ*4)/10;
708 delay = HZ/10;
711 /* If we retry too fast here, we might starve layer 2 */
712 if (mod_timer(&(q->qos_resume_timer), jiffies_tmp + delay) ==
713 0) {
714 kref_get(&(q->ref));
718 qos_queue_set_congstatus(q);
720 spin_unlock_irqrestore(&(q->qlock), iflags);
723 static inline int qos_queue_is_destroyed(struct qos_queue *q_locked)
725 return q_locked->dev == 0;
728 #warning todo kref (kref_put if tasklet is scheduled)
729 void qos_resume_timerfunc(struct timer_list *qos_resume_timer)
731 unsigned long iflags;
732 struct qos_queue *q = container_of(qos_resume_timer,
733 struct qos_queue, qos_resume_timer);
734 spin_lock_irqsave(&(q->qlock), iflags);
735 if (likely(!qos_queue_is_destroyed(q)))
736 tasklet_schedule(&(q->qos_resume_task));
737 spin_unlock_irqrestore(&(q->qlock), iflags);
739 kref_put(&(q->ref), free_qos);
742 struct qos_queue *get_queue(struct net_device *dev)
744 struct qos_queue *ret = 0;
745 struct list_head *curr;
747 spin_lock_bh(&(queues_lock));
748 curr = queues.next;
749 while (curr != (&queues)) {
750 struct qos_queue *q = container_of(curr,
751 struct qos_queue, queue_list);
752 if (q->dev == dev) {
753 ret = q;
754 kref_get(&(ret->ref));
755 break;
757 curr = curr->next;
759 spin_unlock_bh(&(queues_lock));
760 return ret;
763 static void _destroy_queue(struct qos_queue *q, int caller)
765 struct list_head *lh;
767 if (caller == QOS_CALLER_KPACKET)
768 lh = &(q->kpackets_waiting);
769 else if (caller == QOS_CALLER_CONN_RETRANS)
770 lh = &(q->conn_retrans_waiting);
771 else if (caller == QOS_CALLER_ANNOUNCE)
772 lh = &(q->announce_waiting);
773 else if (caller == QOS_CALLER_NEIGHBOR)
774 lh = &(q->neighbors_waiting);
775 else
776 BUG();
778 while (list_empty(lh) == 0) {
779 struct list_head *curr = lh->next;
780 struct resume_block *rb = container_of(curr,
781 struct resume_block, lh);
782 BUG_ON(rb->in_queue != RB_INQUEUE_TRUE);
783 rb->in_queue = RB_INQUEUE_FALSE;
784 list_del(curr);
786 if (caller == QOS_CALLER_KPACKET) {
787 kref_put(&(container_of(rb, struct neighbor,
788 rb_kp)->ref), neighbor_free);
789 } else if (caller == QOS_CALLER_CONN_RETRANS) {
790 kref_put(&(container_of(rb, struct neighbor,
791 rb_cr)->ref), neighbor_free);
792 } else if (caller == QOS_CALLER_ANNOUNCE) {
793 kref_put(&(container_of(rb,
794 struct announce_data, rb)->ref),
795 announce_data_free);
796 } else if (caller == QOS_CALLER_NEIGHBOR) {
797 kref_put(&(container_of(rb,
798 struct neighbor, rb)->ref),
799 neighbor_free);
800 } else {
801 BUG();
803 kref_put(&(q->ref), kreffree_bug);
807 static struct qos_queue *unlink_queue(struct net_device *dev)
809 struct qos_queue *ret = 0;
810 struct list_head *curr;
812 spin_lock_bh(&(queues_lock));
813 curr = queues.next;
814 while (curr != (&queues)) {
815 struct qos_queue *q = container_of(curr,
816 struct qos_queue, queue_list);
817 if (dev == 0 || q->dev == dev) {
818 ret = q;
819 kref_get(&(ret->ref));
821 list_del(&(q->queue_list));
822 kref_put(&(q->ref), kreffree_bug);
823 break;
825 curr = curr->next;
827 spin_unlock_bh(&(queues_lock));
828 return ret;
831 int destroy_queue(struct net_device *dev)
833 int rc = 1;
834 unsigned long iflags;
836 while (1) {
837 struct qos_queue *q = unlink_queue(dev);
839 if (q == 0)
840 break;
842 rc = 0;
844 spin_lock_irqsave(&(q->qlock), iflags);
845 if (q->dev != 0) {
846 dev_put(q->dev);
847 q->dev = 0;
849 _destroy_queue(q, QOS_CALLER_KPACKET);
850 _destroy_queue(q, QOS_CALLER_CONN_RETRANS);
851 _destroy_queue(q, QOS_CALLER_ANNOUNCE);
852 _destroy_queue(q, QOS_CALLER_NEIGHBOR);
853 spin_unlock_irqrestore(&(q->qlock), iflags);
855 tasklet_kill(&(q->qos_resume_task));
857 kref_put(&(q->ref), free_qos);
860 return rc;
863 int create_queue(struct net_device *dev)
865 struct qos_queue *q = kmalloc(sizeof(struct qos_queue), GFP_KERNEL);
867 if (q == 0) {
868 printk(KERN_ERR "cor: unable to allocate memory for device "
869 "queue, not enabling device");
870 return 1;
873 memset(q, 0, sizeof(struct qos_queue));
875 spin_lock_init(&(q->qlock));
877 kref_init(&(q->ref));
879 q->dev = dev;
880 dev_hold(dev);
882 timer_setup(&(q->qos_resume_timer), qos_resume_timerfunc, 0);
883 tasklet_init(&(q->qos_resume_task), qos_resume_taskfunc,
884 (unsigned long) q);
886 INIT_LIST_HEAD(&(q->kpackets_waiting));
887 INIT_LIST_HEAD(&(q->conn_retrans_waiting));
888 INIT_LIST_HEAD(&(q->announce_waiting));
889 INIT_LIST_HEAD(&(q->neighbors_waiting));
891 atomic_set(&(q->cong_status), 0);
893 spin_lock_bh(&(queues_lock));
894 list_add(&(q->queue_list), &queues);
895 spin_unlock_bh(&(queues_lock));
897 return 0;
900 static void qos_queue_set_congstatus(struct qos_queue *q_locked)
902 __u32 newstatus;
904 if (time_before(q_locked->jiffies_lastdrop, jiffies - HZ/50)) {
905 newstatus = CONGSTATUS_NONE;
906 } else if (list_empty(&(q_locked->kpackets_waiting)) == 0) {
907 newstatus = CONGSTATUS_KPACKETS;
908 } else if (list_empty(&(q_locked->conn_retrans_waiting)) == 0) {
909 newstatus = CONGSTATUS_RETRANS;
910 } else if (list_empty(&(q_locked->announce_waiting)) == 0) {
911 newstatus = CONGSTATUS_ANNOUNCE;
912 } else if (list_empty(&(q_locked->neighbors_waiting)) == 0) {
913 newstatus = CONGSTATUS_CONNDATA;
914 } else {
915 newstatus = CONGSTATUS_NONE;
918 atomic_set(&(q_locked->cong_status), newstatus);
921 void qos_set_lastdrop(struct qos_queue *q)
923 unsigned long iflags;
925 spin_lock_irqsave(&(q->qlock), iflags);
926 q->jiffies_lastdrop = jiffies;
927 qos_queue_set_congstatus(q);
928 spin_unlock_irqrestore(&(q->qlock), iflags);
932 * if caller == QOS_CALLER_NEIGHBOR, nb->conns_waiting.lock must be held by
933 * caller
935 static void _qos_enqueue(struct qos_queue *q, struct resume_block *rb,
936 int caller, int from_nbcongwin_resume)
938 int queues_empty;
940 if (rb->in_queue == RB_INQUEUE_TRUE) {
941 BUG_ON(caller == QOS_CALLER_NEIGHBOR);
942 return;
943 } else if (rb->in_queue == RB_INQUEUE_NBCONGWIN &&
944 from_nbcongwin_resume == 0) {
945 return;
948 if (unlikely(qos_queue_is_destroyed(q)))
949 return;
951 queues_empty = list_empty(&(q->kpackets_waiting)) &&
952 list_empty(&(q->conn_retrans_waiting)) &&
953 list_empty(&(q->announce_waiting)) &&
954 list_empty(&(q->neighbors_waiting));
956 BUG_ON(!queues_empty && q->qos_resume_scheduled == 0);
958 rb->in_queue = RB_INQUEUE_TRUE;
960 if (caller == QOS_CALLER_KPACKET) {
961 list_add(&(rb->lh), &(q->kpackets_waiting));
962 kref_get(&(container_of(rb, struct neighbor, rb_kp)->ref));
963 } else if (caller == QOS_CALLER_CONN_RETRANS) {
964 list_add(&(rb->lh) , &(q->conn_retrans_waiting));
965 kref_get(&(container_of(rb, struct neighbor, rb_cr)->ref));
966 } else if (caller == QOS_CALLER_ANNOUNCE) {
967 list_add(&(rb->lh), &(q->announce_waiting));
968 kref_get(&(container_of(rb, struct announce_data, rb)->ref));
969 } else if (caller == QOS_CALLER_NEIGHBOR) {
970 struct neighbor *nb = container_of(rb, struct neighbor, rb);
971 list_add(&(rb->lh), &(q->neighbors_waiting));
972 kref_get(&(nb->ref));
973 BUG_ON(nb->conns_waiting.cnt == 0);
974 q->numconns += nb->conns_waiting.cnt;
975 q->priority_sum += nb->conns_waiting.priority_sum;
976 } else {
977 BUG();
979 kref_get(&(q->ref));
981 if (q->qos_resume_scheduled == 0) {
982 q->jiffies_lastprogress = jiffies;
983 q->qos_resume_scheduled = 1;
984 if (caller == QOS_CALLER_KPACKET || from_nbcongwin_resume) {
985 tasklet_schedule(&(q->qos_resume_task));
986 } else {
987 if (mod_timer(&(q->qos_resume_timer), jiffies + 1) ==
988 0) {
989 kref_get(&(q->ref));
994 qos_queue_set_congstatus(q);
997 void qos_enqueue(struct qos_queue *q, struct resume_block *rb, int caller)
999 unsigned long iflags;
1001 spin_lock_irqsave(&(q->qlock), iflags);
1002 _qos_enqueue(q, rb, caller, 0);
1003 spin_unlock_irqrestore(&(q->qlock), iflags);
1006 void qos_remove_conn(struct conn *trgt_out_lx)
1008 unsigned long iflags;
1009 struct neighbor *nb = trgt_out_lx->target.out.nb;
1010 struct qos_queue *q = nb->queue;
1011 int sched_cmsg = 0;
1012 int krefput_nb = 0;
1014 BUG_ON(trgt_out_lx->targettype != TARGET_OUT);
1015 BUG_ON(q == 0);
1017 spin_lock_irqsave(&(nb->conns_waiting.lock), iflags);
1018 if (trgt_out_lx->target.out.rb.in_queue == RB_INQUEUE_FALSE) {
1019 spin_unlock_irqrestore(&(nb->conns_waiting.lock), iflags);
1020 return;
1022 spin_lock(&(q->qlock));
1024 trgt_out_lx->target.out.rb.in_queue = RB_INQUEUE_FALSE;
1025 list_del(&(trgt_out_lx->target.out.rb.lh));
1026 BUG_ON(nb->conns_waiting.cnt == 0);
1027 nb->conns_waiting.cnt--;
1028 if (nb->rb.in_queue == RB_INQUEUE_TRUE) {
1029 BUG_ON(q->numconns == 0);
1030 q->numconns--;
1033 BUG_ON(nb->conns_waiting.priority_sum <
1034 trgt_out_lx->target.out.rb_priority);
1035 BUG_ON(q->priority_sum < trgt_out_lx->target.out.rb_priority);
1036 nb->conns_waiting.priority_sum -=
1037 trgt_out_lx->target.out.rb_priority;
1038 q->priority_sum -= trgt_out_lx->target.out.rb_priority;
1039 trgt_out_lx->target.out.rb_priority = 0;
1041 if (list_empty(&(nb->conns_waiting.lh)) &&
1042 list_empty(&(nb->conns_waiting.lh_nextpass))) {
1043 BUG_ON(nb->conns_waiting.priority_sum != 0);
1044 BUG_ON(nb->conns_waiting.cnt != 0);
1045 } else {
1046 BUG_ON(nb->conns_waiting.cnt == 0);
1049 if (list_empty(&(nb->conns_waiting.lh)) &&
1050 list_empty(&(nb->conns_waiting.lh_nextpass)) &&
1051 nb->rb.in_queue == RB_INQUEUE_TRUE) {
1052 nb->rb.in_queue = RB_INQUEUE_FALSE;
1053 list_del(&(nb->rb.lh));
1054 if (atomic_read(&(nb->cmsg_delay_conndata)) != 0) {
1055 atomic_set(&(nb->cmsg_delay_conndata), 0);
1056 sched_cmsg = 1;
1059 krefput_nb = 1;
1061 BUG_ON(list_empty(&(q->neighbors_waiting)) && q->numconns != 0);
1062 BUG_ON(list_empty(&(q->neighbors_waiting)) &&
1063 q->priority_sum != 0);
1065 qos_queue_set_congstatus(q);
1068 spin_unlock(&(q->qlock));
1069 spin_unlock_irqrestore(&(nb->conns_waiting.lock), iflags);
1071 if (sched_cmsg) {
1072 spin_lock_bh(&(nb->cmsg_lock));
1073 schedule_controlmsg_timer(nb);
1074 spin_unlock_bh(&(nb->cmsg_lock));
1077 kref_put(&(trgt_out_lx->ref), kreffree_bug);
1079 if (krefput_nb)
1080 kref_put(&(nb->ref), neighbor_free);
1083 static void qos_enqueue_conn(struct conn *trgt_out_lx)
1085 unsigned long iflags;
1086 struct neighbor *nb = trgt_out_lx->target.out.nb;
1087 struct qos_queue *q;
1089 BUG_ON(trgt_out_lx->data_buf.read_remaining == 0);
1091 spin_lock_irqsave(&(nb->conns_waiting.lock), iflags);
1093 if (trgt_out_lx->target.out.rb.in_queue != RB_INQUEUE_FALSE)
1094 goto out;
1096 trgt_out_lx->target.out.rb.in_queue = RB_INQUEUE_TRUE;
1097 list_add(&(trgt_out_lx->target.out.rb.lh), &(nb->conns_waiting.lh));
1098 kref_get(&(trgt_out_lx->ref));
1099 nb->conns_waiting.cnt++;
1101 q = trgt_out_lx->target.out.nb->queue;
1102 spin_lock(&(q->qlock));
1103 if (nb->rb.in_queue == RB_INQUEUE_TRUE) {
1104 q->numconns++;
1105 } else {
1106 _qos_enqueue(q, &(nb->rb), QOS_CALLER_NEIGHBOR, 0);
1108 spin_unlock(&(q->qlock));
1110 out:
1111 spin_unlock_irqrestore(&(nb->conns_waiting.lock), iflags);
1114 static struct sk_buff *create_packet(struct neighbor *nb, int size,
1115 gfp_t alloc_flags)
1117 struct sk_buff *ret;
1119 ret = alloc_skb(size + LL_RESERVED_SPACE(nb->dev) +
1120 nb->dev->needed_tailroom, alloc_flags);
1121 if (unlikely(ret == 0))
1122 return 0;
1124 ret->protocol = htons(ETH_P_COR);
1125 ret->dev = nb->dev;
1127 skb_reserve(ret, LL_RESERVED_SPACE(nb->dev));
1128 if(unlikely(dev_hard_header(ret, nb->dev, ETH_P_COR, nb->mac,
1129 nb->dev->dev_addr, ret->len) < 0))
1130 return 0;
1131 skb_reset_network_header(ret);
1133 return ret;
1136 struct sk_buff *create_packet_cmsg(struct neighbor *nb, int size,
1137 gfp_t alloc_flags, __u64 seqno)
1139 struct sk_buff *ret;
1140 char *dest;
1142 ret = create_packet(nb, size + 7, alloc_flags);
1143 if (unlikely(ret == 0))
1144 return 0;
1146 dest = skb_put(ret, 7);
1147 BUG_ON(dest == 0);
1149 dest[0] = PACKET_TYPE_CMSG;
1150 dest += 1;
1152 put_u48(dest, seqno);
1153 dest += 6;
1155 return ret;
1158 struct sk_buff *create_packet_conndata(struct neighbor *nb, int size,
1159 gfp_t alloc_flags, __u32 conn_id, __u64 seqno,
1160 __u8 snd_delayed_lowbuf, __u8 flush)
1162 struct sk_buff *ret;
1163 char *dest;
1165 ret = create_packet(nb, size + 11, alloc_flags);
1166 if (unlikely(ret == 0))
1167 return 0;
1169 dest = skb_put(ret, 11);
1170 BUG_ON(dest == 0);
1172 if (flush != 0) {
1173 if (snd_delayed_lowbuf != 0) {
1174 dest[0] = PACKET_TYPE_CONNDATA_LOWBUFDELAYED_FLUSH;
1175 } else {
1176 dest[0] = PACKET_TYPE_CONNDATA_FLUSH;
1178 } else {
1179 if (snd_delayed_lowbuf != 0) {
1180 dest[0] = PACKET_TYPE_CONNDATA_LOWBUFDELAYED;
1181 } else {
1182 dest[0] = PACKET_TYPE_CONNDATA;
1185 dest += 1;
1187 put_u32(dest, conn_id);
1188 dest += 4;
1189 put_u48(dest, seqno);
1190 dest += 6;
1192 return ret;
1195 void reschedule_conn_retrans_timer(struct neighbor *nb_retransconnlocked)
1197 struct conn_retrans *cr = 0;
1199 if (list_empty(&(nb_retransconnlocked->retrans_conn_list)))
1200 return;
1202 cr = container_of(nb_retransconnlocked->retrans_conn_list.next,
1203 struct conn_retrans, timeout_list);
1205 if (time_before_eq(cr->timeout, jiffies)) {
1206 qos_enqueue(nb_retransconnlocked->queue,
1207 &(nb_retransconnlocked->rb_cr),
1208 QOS_CALLER_CONN_RETRANS);
1209 } else {
1210 if (mod_timer(&(nb_retransconnlocked->retrans_conn_timer),
1211 cr->timeout) == 0) {
1212 kref_get(&(nb_retransconnlocked->ref));
1218 * warning:
1219 * caller must also call kref_get/put, see reschedule_conn_retrans_timer
1221 static void cancel_conn_retrans(struct neighbor *nb_retransconnlocked,
1222 struct conn *trgt_out_lx, struct conn_retrans *cr,
1223 __u64 *bytes_acked)
1225 if (unlikely(cr->state == CONN_RETRANS_ACKED))
1226 return;
1228 if (cr->state == CONN_RETRANS_SCHEDULED) {
1229 list_del(&(cr->timeout_list));
1230 } else if (cr->state == CONN_RETRANS_LOWWINDOW) {
1231 BUG_ON(trgt_out_lx->target.out.retrans_lowwindow == 0);
1232 if (likely(trgt_out_lx->target.out.retrans_lowwindow != 65535))
1233 trgt_out_lx->target.out.retrans_lowwindow--;
1236 if (cr->state != CONN_RETRANS_INITIAL)
1237 *bytes_acked += cr->length;
1239 list_del(&(cr->conn_list));
1240 cr->state = CONN_RETRANS_ACKED;
1242 kref_put(&(cr->ref), free_connretrans);
1246 * nb->retrans_conn_lock must be held when calling this
1247 * (see schedule_retransmit_conn())
1249 static void cancel_acked_conn_retrans(struct conn *trgt_out_l,
1250 __u64 *bytes_acked)
1252 __u64 seqno_acked = trgt_out_l->target.out.seqno_acked;
1254 while (list_empty(&(trgt_out_l->target.out.retrans_list)) == 0) {
1255 struct conn_retrans *cr = container_of(
1256 trgt_out_l->target.out.retrans_list.next,
1257 struct conn_retrans, conn_list);
1259 if (seqno_after(cr->seqno + cr->length, seqno_acked)) {
1260 if (seqno_before(cr->seqno, seqno_acked)) {
1261 *bytes_acked += seqno_clean(seqno_acked -
1262 cr->seqno);
1263 cr->length -= seqno_clean(seqno_acked -
1264 cr->seqno);
1265 cr->seqno = seqno_acked;
1267 break;
1270 cancel_conn_retrans(trgt_out_l->target.out.nb, trgt_out_l, cr,
1271 bytes_acked);
1274 reschedule_conn_retrans_timer(trgt_out_l->target.out.nb);
1277 void cancel_all_conn_retrans(struct conn *trgt_out_lx)
1279 struct neighbor *nb = trgt_out_lx->target.out.nb;
1280 __u64 bytes_acked = 0;
1282 spin_lock_bh(&(nb->retrans_conn_lock));
1284 while (list_empty(&(trgt_out_lx->target.out.retrans_list)) == 0) {
1285 struct conn_retrans *cr = container_of(
1286 trgt_out_lx->target.out.retrans_list.next,
1287 struct conn_retrans, conn_list);
1288 BUG_ON(cr->trgt_out_o != trgt_out_lx);
1290 cancel_conn_retrans(nb, trgt_out_lx, cr, &bytes_acked);
1293 reschedule_conn_retrans_timer(nb);
1295 spin_unlock_bh(&(nb->retrans_conn_lock));
1297 if (bytes_acked > 0)
1298 nbcongwin_data_acked(nb, bytes_acked);
1301 static void cancel_all_conn_retrans_nb(struct neighbor *nb)
1303 __u64 bytes_acked = 0;
1305 while (1) {
1306 struct conn_retrans *cr;
1308 spin_lock_bh(&(nb->retrans_conn_lock));
1310 if (list_empty(&(nb->retrans_conn_list))) {
1311 spin_unlock_bh(&(nb->retrans_conn_lock));
1312 break;
1315 cr = container_of(nb->retrans_conn_list.next,
1316 struct conn_retrans, timeout_list);
1318 kref_get(&(cr->ref));
1320 spin_unlock_bh(&(nb->retrans_conn_lock));
1323 spin_lock_bh(&(cr->trgt_out_o->rcv_lock));
1324 spin_lock_bh(&(nb->retrans_conn_lock));
1326 if (likely(cr == container_of(nb->retrans_conn_list.next,
1327 struct conn_retrans, timeout_list)))
1328 cancel_conn_retrans(nb, cr->trgt_out_o, cr,
1329 &bytes_acked);
1331 spin_unlock_bh(&(nb->retrans_conn_lock));
1332 spin_unlock_bh(&(cr->trgt_out_o->rcv_lock));
1334 kref_put(&(cr->ref), free_connretrans);
1337 if (bytes_acked > 0)
1338 nbcongwin_data_acked(nb, bytes_acked);
1341 static struct conn_retrans *prepare_conn_retrans(struct conn *trgt_out_l,
1342 __u64 seqno, __u32 len, __u8 snd_delayed_lowbuf,
1343 struct conn_retrans *cr_splitted, int retransconnlocked)
1345 struct neighbor *nb = trgt_out_l->target.out.nb;
1347 struct conn_retrans *cr = kmem_cache_alloc(connretrans_slab,
1348 GFP_ATOMIC);
1350 if (unlikely(cr == 0))
1351 return 0;
1353 BUG_ON(trgt_out_l->isreset != 0);
1355 memset(cr, 0, sizeof (struct conn_retrans));
1356 cr->trgt_out_o = trgt_out_l;
1357 kref_get(&(trgt_out_l->ref));
1358 cr->seqno = seqno;
1359 cr->length = len;
1360 cr->snd_delayed_lowbuf = snd_delayed_lowbuf;
1361 kref_init(&(cr->ref));
1363 kref_get(&(cr->ref));
1364 if (retransconnlocked == 0)
1365 spin_lock_bh(&(nb->retrans_conn_lock));
1367 if (cr_splitted != 0)
1368 list_add(&(cr->conn_list), &(cr_splitted->conn_list));
1369 else
1370 list_add_tail(&(cr->conn_list),
1371 &(cr->trgt_out_o->target.out.retrans_list));
1373 if (retransconnlocked == 0)
1374 spin_unlock_bh(&(nb->retrans_conn_lock));
1376 return cr;
1379 #define RC_SENDRETRANS_OK 0
1380 #define RC_SENDRETRANS_OOM 1
1381 #define RC_SENDRETRANS_QUEUEFULL 2
1382 #define RC_SENDRETRANS_QUEUEFULLDROPPED 3
1384 static int __send_retrans(struct neighbor *nb, struct conn *trgt_out_l,
1385 struct conn_retrans *cr, __u64 *bytes_sent)
1387 __u8 flush = 0;
1389 BUG_ON(cr->length == 0);
1391 if (trgt_out_l->flush != 0 && seqno_eq(cr->seqno + cr->length,
1392 trgt_out_l->target.out.seqno_nextsend) &&
1393 trgt_out_l->data_buf.read_remaining == 0)
1394 flush = 1;
1396 if (send_conndata_as_skb(nb, cr->length)) {
1397 struct sk_buff *skb;
1398 char *dst;
1399 int rc;
1401 skb = create_packet_conndata(nb, cr->length, GFP_ATOMIC,
1402 trgt_out_l->target.out.conn_id, cr->seqno,
1403 cr->snd_delayed_lowbuf, flush);
1404 if (unlikely(skb == 0))
1405 return RC_SENDRETRANS_OOM;
1407 dst = skb_put(skb, cr->length);
1409 databuf_pullold(trgt_out_l, cr->seqno, dst, cr->length);
1411 rc = cor_dev_queue_xmit(skb, nb->queue,
1412 QOS_CALLER_CONN_RETRANS);
1413 if (rc == NET_XMIT_DROP)
1414 return RC_SENDRETRANS_QUEUEFULLDROPPED;
1415 schedule_retransmit_conn(cr, 1, 0);
1416 if (rc != NET_XMIT_SUCCESS)
1417 return RC_SENDRETRANS_QUEUEFULL;
1419 } else {
1420 struct control_msg_out *cm;
1421 char *buf;
1423 buf = kmalloc(cr->length, GFP_ATOMIC);
1424 if (unlikely(buf == 0))
1425 return RC_SENDRETRANS_OOM;
1427 cm = alloc_control_msg(nb, ACM_PRIORITY_LOW);
1428 if (unlikely(cm == 0)) {
1429 kfree(buf);
1430 return RC_SENDRETRANS_OOM;
1433 databuf_pullold(trgt_out_l, cr->seqno, buf, cr->length);
1435 send_conndata(cm, trgt_out_l->target.out.conn_id,
1436 cr->seqno, buf, buf, cr->length,
1437 cr->snd_delayed_lowbuf, flush,
1438 trgt_out_l->is_highlatency, cr);
1441 *bytes_sent += cr->length;
1443 return RC_SENDRETRANS_OK;
1446 static int _send_retrans_splitcr_ifneeded(struct neighbor *nb_retransconnlocked,
1447 struct conn *trgt_out_l, struct conn_retrans *cr)
1449 __u32 targetmss = mss_conndata(nb_retransconnlocked);
1450 __u64 windowlimit = seqno_clean(
1451 trgt_out_l->target.out.seqno_windowlimit -
1452 cr->seqno);
1453 __u32 maxsize = targetmss;
1454 if (windowlimit < maxsize)
1455 maxsize = windowlimit;
1457 if (unlikely(cr->length > maxsize)) {
1458 struct conn_retrans *cr2 = prepare_conn_retrans(trgt_out_l,
1459 cr->seqno + maxsize, cr->length - maxsize,
1460 cr->snd_delayed_lowbuf, cr, 1);
1461 if (unlikely(cr2 == 0))
1462 return RC_SENDRETRANS_OOM;
1464 cr2->timeout = cr->timeout;
1466 list_add(&(cr2->timeout_list),
1467 &(nb_retransconnlocked->retrans_conn_list));
1468 cr2->state = CONN_RETRANS_SCHEDULED;
1470 cr->length = maxsize;
1473 return RC_SENDRETRANS_OK;
1476 static int _send_retrans(struct neighbor *nb, struct conn_retrans *cr,
1477 __u64 *bytes_sent)
1480 struct conn *trgt_out_o = cr->trgt_out_o;
1481 int rc = RC_SENDRETRANS_OK;
1483 spin_lock_bh(&(trgt_out_o->rcv_lock));
1485 BUG_ON(trgt_out_o->targettype != TARGET_OUT);
1486 BUG_ON(trgt_out_o->target.out.nb != nb);
1488 spin_lock_bh(&(nb->retrans_conn_lock));
1489 if (unlikely(cr->state == CONN_RETRANS_ACKED)) {
1490 spin_unlock_bh(&(nb->retrans_conn_lock));
1491 goto out;
1494 BUG_ON(trgt_out_o->isreset != 0);
1496 BUG_ON(seqno_before(cr->seqno, trgt_out_o->target.out.seqno_acked));
1498 if (seqno_after_eq(cr->seqno,
1499 trgt_out_o->target.out.seqno_windowlimit)) {
1500 BUG_ON(cr->state != CONN_RETRANS_SENDING);
1501 cr->state = CONN_RETRANS_LOWWINDOW;
1502 if (likely(trgt_out_o->target.out.retrans_lowwindow != 65535))
1503 trgt_out_o->target.out.retrans_lowwindow++;
1505 spin_unlock_bh(&(nb->retrans_conn_lock));
1506 goto out;
1509 rc = _send_retrans_splitcr_ifneeded(nb, trgt_out_o, cr);
1511 spin_unlock_bh(&(nb->retrans_conn_lock));
1513 kref_get(&(trgt_out_o->ref));
1515 if (rc == RC_SENDRETRANS_OK)
1516 rc = __send_retrans(nb, trgt_out_o, cr, bytes_sent);
1518 if (rc == RC_SENDRETRANS_OOM || rc == RC_SENDRETRANS_QUEUEFULLDROPPED) {
1519 spin_lock_bh(&(nb->retrans_conn_lock));
1520 if (unlikely(cr->state == CONN_RETRANS_ACKED)) {
1521 } else if (likely(cr->state == CONN_RETRANS_SENDING)) {
1522 if (rc == RC_SENDRETRANS_OOM)
1523 cr->timeout = jiffies + 1;
1524 list_add(&(cr->timeout_list), &(nb->retrans_conn_list));
1525 cr->state = CONN_RETRANS_SCHEDULED;
1526 } else {
1527 BUG();
1529 spin_unlock_bh(&(nb->retrans_conn_lock));
1532 out:
1533 spin_unlock_bh(&(trgt_out_o->rcv_lock));
1535 kref_put(&(trgt_out_o->ref), free_conn);
1537 return (rc == RC_SENDRETRANS_OOM ||
1538 rc == RC_SENDRETRANS_QUEUEFULL ||
1539 rc == RC_SENDRETRANS_QUEUEFULLDROPPED);
1542 static int send_retrans(struct neighbor *nb, int *sent)
1544 int queuefull = 0;
1545 int nbstate = get_neigh_state(nb);
1546 __u64 bytes_sent = 0;
1548 if (unlikely(nbstate == NEIGHBOR_STATE_STALLED)) {
1549 return QOS_RESUME_DONE;
1550 } else if (unlikely(nbstate == NEIGHBOR_STATE_KILLED)) {
1552 * cancel_all_conn_retrans_nb should not be needed, because
1553 * reset_all_conns calls cancel_all_conn_retrans
1555 cancel_all_conn_retrans_nb(nb);
1556 return QOS_RESUME_DONE;
1559 while (1) {
1560 struct conn_retrans *cr = 0;
1562 spin_lock_bh(&(nb->retrans_conn_lock));
1564 if (list_empty(&(nb->retrans_conn_list))) {
1565 spin_unlock_bh(&(nb->retrans_conn_lock));
1566 break;
1569 cr = container_of(nb->retrans_conn_list.next,
1570 struct conn_retrans, timeout_list);
1572 BUG_ON(cr->state != CONN_RETRANS_SCHEDULED);
1574 if (time_after(cr->timeout, jiffies)) {
1575 spin_unlock_bh(&(nb->retrans_conn_lock));
1576 break;
1579 kref_get(&(cr->ref));
1580 list_del(&(cr->timeout_list));
1581 cr->state = CONN_RETRANS_SENDING;
1583 spin_unlock_bh(&(nb->retrans_conn_lock));
1585 queuefull = _send_retrans(nb, cr, &bytes_sent);
1586 kref_put(&(cr->ref), free_connretrans);
1587 if (queuefull) {
1588 break;
1589 } else {
1590 *sent = 1;
1594 if (bytes_sent > 0)
1595 nbcongwin_data_retransmitted(nb, bytes_sent);
1597 return queuefull ? QOS_RESUME_CONG : QOS_RESUME_DONE;
1600 void retransmit_conn_timerfunc(struct timer_list *retrans_conn_timer)
1602 struct neighbor *nb = container_of(retrans_conn_timer,
1603 struct neighbor, retrans_conn_timer);
1604 qos_enqueue(nb->queue, &(nb->rb_cr), QOS_CALLER_CONN_RETRANS);
1605 kref_put(&(nb->ref), neighbor_free);
1608 static void conn_ack_ooo_rcvd_splitcr(struct conn *trgt_out_l,
1609 struct conn_retrans *cr, __u64 seqno_ooo, __u32 length,
1610 __u64 *bytes_acked)
1612 struct conn_retrans *cr2;
1613 __u64 seqno_cr2start;
1614 __u32 oldcrlenght = cr->length;
1616 if (cr->state != CONN_RETRANS_SCHEDULED &&
1617 cr->state != CONN_RETRANS_LOWWINDOW)
1618 return;
1620 seqno_cr2start = seqno_ooo+length;
1621 cr2 = prepare_conn_retrans(trgt_out_l, seqno_cr2start,
1622 seqno_clean(cr->seqno + cr->length - seqno_cr2start),
1623 cr->snd_delayed_lowbuf, cr, 1);
1625 if (unlikely(cr2 == 0))
1626 return;
1628 BUG_ON(cr2->length > cr->length);
1630 cr2->timeout = cr->timeout;
1631 cr2->state = cr->state;
1633 if (cr->state != CONN_RETRANS_SCHEDULED)
1634 list_add(&(cr2->timeout_list), &(cr->timeout_list));
1636 BUG_ON(seqno_clean(seqno_ooo - cr->seqno) > cr->length);
1638 cr->length -= seqno_clean(seqno_ooo - cr->seqno);
1639 BUG_ON(cr->length + length + cr2->length != oldcrlenght);
1641 *bytes_acked += length;
1644 void conn_ack_ooo_rcvd(struct neighbor *nb, __u32 conn_id,
1645 struct conn *trgt_out, __u64 seqno_ooo, __u32 length,
1646 __u64 *bytes_acked)
1648 struct list_head *curr;
1650 if (unlikely(length == 0))
1651 return;
1653 spin_lock_bh(&(trgt_out->rcv_lock));
1655 if (unlikely(trgt_out->targettype != TARGET_OUT))
1656 goto out;
1657 if (unlikely(trgt_out->target.out.nb != nb))
1658 goto out;
1659 if (unlikely(trgt_out->target.out.conn_id != conn_id))
1660 goto out;
1662 kref_get(&(nb->ref));
1663 spin_lock_bh(&(nb->retrans_conn_lock));
1665 curr = trgt_out->target.out.retrans_list.next;
1667 while (curr != &(trgt_out->target.out.retrans_list)) {
1668 struct conn_retrans *cr = container_of(curr,
1669 struct conn_retrans, conn_list);
1671 int ack_covers_start = seqno_after_eq(cr->seqno, seqno_ooo);
1672 int ack_covers_end = seqno_before_eq(cr->seqno + cr->length,
1673 seqno_ooo + length);
1675 curr = curr->next;
1677 if (seqno_before(cr->seqno + cr->length, seqno_ooo))
1678 continue;
1680 if (seqno_after(cr->seqno, seqno_ooo + length))
1681 break;
1683 if (likely(ack_covers_start && ack_covers_end)) {
1684 cancel_conn_retrans(nb, trgt_out, cr, bytes_acked);
1685 reschedule_conn_retrans_timer(nb);
1686 } else if (ack_covers_start) {
1687 __u32 diff = seqno_ooo + length - cr->seqno -
1688 cr->length;
1689 BUG_ON(diff >= cr->length);
1690 cr->seqno += diff;
1691 cr->length -= diff;
1692 *bytes_acked =+ diff;
1693 } else if (ack_covers_end) {
1694 __u32 diff = seqno_ooo + length - cr->seqno;
1695 BUG_ON(diff >= length);
1696 cr->length -= diff;
1697 *bytes_acked += diff;
1698 } else {
1699 conn_ack_ooo_rcvd_splitcr(trgt_out, cr, seqno_ooo,
1700 length, bytes_acked);
1701 break;
1705 if (unlikely(list_empty(&(trgt_out->target.out.retrans_list)) == 0)) {
1706 trgt_out->target.out.seqno_acked =
1707 trgt_out->target.out.seqno_nextsend;
1708 } else {
1709 struct conn_retrans *cr = container_of(
1710 trgt_out->target.out.retrans_list.next,
1711 struct conn_retrans, conn_list);
1712 if (seqno_after(cr->seqno, trgt_out->target.out.seqno_acked))
1713 trgt_out->target.out.seqno_acked = cr->seqno;
1716 spin_unlock_bh(&(nb->retrans_conn_lock));
1717 kref_put(&(nb->ref), neighbor_free);
1719 out:
1720 spin_unlock_bh(&(trgt_out->rcv_lock));
1723 static void _conn_ack_rcvd_nosendwin(struct conn *trgt_out_l)
1725 if (trgt_out_l->bufsize.state == BUFSIZE_INCR ||
1726 trgt_out_l->bufsize.state == BUFSIZE_INCR_FAST)
1727 trgt_out_l->bufsize.state = BUFSIZE_NOACTION;
1729 if (trgt_out_l->bufsize.state == BUFSIZE_NOACTION)
1730 trgt_out_l->bufsize.act.noact.bytesleft = max(
1731 trgt_out_l->bufsize.act.noact.bytesleft,
1732 (__u32) BUF_OUT_WIN_NOK_NOINCR);
1734 trgt_out_l->bufsize.ignore_rcv_lowbuf = max(
1735 trgt_out_l->bufsize.ignore_rcv_lowbuf,
1736 (__u32) BUF_OUT_WIN_NOK_NOINCR);
1740 * nb->retrans_conn_lock must be held when calling this
1741 * (see schedule_retransmit_conn())
1743 static void reschedule_lowwindow_retrans(struct conn *trgt_out_l)
1745 struct list_head *lh = trgt_out_l->target.out.retrans_list.next;
1746 int cnt = 0;
1748 while (trgt_out_l->target.out.retrans_lowwindow > 0 && cnt < 100) {
1749 struct conn_retrans *cr;
1751 if (unlikely(lh == &(trgt_out_l->target.out.retrans_list))) {
1752 BUG_ON(trgt_out_l->target.out.retrans_lowwindow !=
1753 65535);
1754 trgt_out_l->target.out.retrans_lowwindow = 0;
1755 break;
1758 cr = container_of(lh, struct conn_retrans, conn_list);
1760 if (seqno_after_eq(cr->seqno,
1761 trgt_out_l->target.out.seqno_windowlimit)) {
1762 break;
1765 if (cr->state == CONN_RETRANS_LOWWINDOW)
1766 schedule_retransmit_conn(cr, 1, 1);
1768 lh = lh->next;
1769 cnt++;
1773 void conn_ack_rcvd(struct neighbor *nb, __u32 conn_id, struct conn *trgt_out,
1774 __u64 seqno, int setwindow, __u8 window, __u64 *bytes_acked)
1776 int seqno_advanced = 0;
1777 int window_enlarged = 0;
1779 spin_lock_bh(&(trgt_out->rcv_lock));
1781 if (unlikely(trgt_out->isreset != 0))
1782 goto out;
1783 if (unlikely(trgt_out->targettype != TARGET_OUT))
1784 goto out;
1785 if (unlikely(trgt_out->target.out.nb != nb))
1786 goto out;
1787 if (unlikely(trgt_out->reversedir->source.in.conn_id != conn_id))
1788 goto out;
1790 if (unlikely(seqno_after(seqno, trgt_out->target.out.seqno_nextsend) ||
1791 seqno_before(seqno, trgt_out->target.out.seqno_acked)))
1792 goto out;
1794 if (setwindow) {
1795 __u64 windowdec = dec_log_64_7(window);
1796 if (likely(seqno_after(seqno,
1797 trgt_out->target.out.seqno_acked)) ||
1798 seqno_after(seqno + windowdec,
1799 trgt_out->target.out.seqno_windowlimit)) {
1800 trgt_out->target.out.seqno_windowlimit = seqno +
1801 windowdec;
1802 window_enlarged = 1;
1806 if (seqno_after(seqno, trgt_out->target.out.seqno_acked))
1807 seqno_advanced = 1;
1809 if (seqno_advanced == 0 && window_enlarged == 0)
1810 goto out;
1812 kref_get(&(nb->ref));
1813 spin_lock_bh(&(nb->retrans_conn_lock));
1815 if (seqno_advanced) {
1816 trgt_out->target.out.seqno_acked = seqno;
1817 cancel_acked_conn_retrans(trgt_out, bytes_acked);
1820 if (window_enlarged)
1821 reschedule_lowwindow_retrans(trgt_out);
1823 spin_unlock_bh(&(nb->retrans_conn_lock));
1824 kref_put(&(nb->ref), neighbor_free);
1826 if (seqno_advanced)
1827 databuf_ack(trgt_out, trgt_out->target.out.seqno_acked);
1829 if (seqno_eq(trgt_out->target.out.seqno_acked,
1830 trgt_out->target.out.seqno_nextsend))
1831 _conn_ack_rcvd_nosendwin(trgt_out);
1833 out:
1834 if (seqno_advanced || window_enlarged)
1835 flush_buf(trgt_out);
1837 spin_unlock_bh(&(trgt_out->rcv_lock));
1839 wake_sender(trgt_out);
1842 static void try_combine_conn_retrans_prev(struct neighbor *nb_retransconnlocked,
1843 struct conn *trgt_out_lx, struct conn_retrans *cr)
1845 struct conn_retrans *cr_prev;
1846 __u64 bytes_dummyacked = 0;
1848 BUG_ON(cr->state != CONN_RETRANS_SCHEDULED);
1850 if (cr->conn_list.prev == &(trgt_out_lx->target.out.retrans_list))
1851 return;
1853 cr_prev = container_of(cr->conn_list.prev, struct conn_retrans,
1854 conn_list);
1856 if (cr_prev->state != CONN_RETRANS_SCHEDULED)
1857 return;
1858 if (cr_prev->timeout != cr->timeout)
1859 return;
1860 if (!seqno_eq(cr_prev->seqno + cr_prev->length, cr->seqno))
1861 return;
1863 cr->seqno -= cr_prev->length;
1864 cr->length += cr_prev->length;
1866 cancel_conn_retrans(nb_retransconnlocked, trgt_out_lx, cr_prev,
1867 &bytes_dummyacked);
1870 static void try_combine_conn_retrans_next(struct neighbor *nb_retranslocked,
1871 struct conn *trgt_out_lx, struct conn_retrans *cr)
1873 struct conn_retrans *cr_next;
1874 __u64 bytes_dummyacked = 0;
1876 BUG_ON(cr->state != CONN_RETRANS_SCHEDULED);
1878 if (cr->conn_list.next == &(trgt_out_lx->target.out.retrans_list))
1879 return;
1881 cr_next = container_of(cr->conn_list.next, struct conn_retrans,
1882 conn_list);
1884 if (cr_next->state != CONN_RETRANS_SCHEDULED)
1885 return;
1886 if (cr_next->timeout != cr->timeout)
1887 return;
1888 if (!seqno_eq(cr->seqno + cr->length, cr_next->seqno))
1889 return;
1891 cr->length += cr_next->length;
1893 cancel_conn_retrans(nb_retranslocked, trgt_out_lx, cr_next,
1894 &bytes_dummyacked);
1897 void schedule_retransmit_conn(struct conn_retrans *cr, int connlocked,
1898 int nbretransconn_locked)
1900 struct conn *trgt_out_o = cr->trgt_out_o;
1901 struct neighbor *nb;
1902 int first;
1904 if (connlocked == 0)
1905 spin_lock_bh(&(trgt_out_o->rcv_lock));
1907 BUG_ON(trgt_out_o->targettype != TARGET_OUT);
1908 nb = trgt_out_o->target.out.nb;
1910 cr->timeout = calc_timeout(atomic_read(&(nb->latency_retrans_us)),
1911 atomic_read(&(nb->latency_stddev_retrans_us)),
1912 atomic_read(&(nb->max_remote_ackconn_delay_us)));
1914 if (nbretransconn_locked == 0)
1915 spin_lock_bh(&(nb->retrans_conn_lock));
1917 kref_get(&(nb->ref));
1919 BUG_ON(cr->state == CONN_RETRANS_SCHEDULED);
1921 if (unlikely(cr->state == CONN_RETRANS_ACKED)) {
1922 goto out;
1923 } else if (unlikely(cr->state == CONN_RETRANS_LOWWINDOW)) {
1924 BUG_ON(trgt_out_o->target.out.retrans_lowwindow == 0);
1925 if (likely(trgt_out_o->target.out.retrans_lowwindow != 65535))
1926 trgt_out_o->target.out.retrans_lowwindow--;
1929 first = unlikely(list_empty(&(nb->retrans_conn_list)));
1930 list_add_tail(&(cr->timeout_list), &(nb->retrans_conn_list));
1931 cr->state = CONN_RETRANS_SCHEDULED;
1933 if (unlikely(first)) {
1934 reschedule_conn_retrans_timer(nb);
1935 } else {
1936 try_combine_conn_retrans_prev(nb, trgt_out_o, cr);
1937 try_combine_conn_retrans_next(nb, trgt_out_o, cr);
1940 out:
1941 if (nbretransconn_locked == 0)
1942 spin_unlock_bh(&(nb->retrans_conn_lock));
1944 kref_put(&(nb->ref), neighbor_free);
1946 if (connlocked == 0)
1947 spin_unlock_bh(&(trgt_out_o->rcv_lock));
1950 static int _flush_out_skb(struct conn *trgt_out_lx, __u32 len,
1951 __u8 snd_delayed_lowbuf)
1953 struct neighbor *nb = trgt_out_lx->target.out.nb;
1955 __u64 seqno;
1956 struct conn_retrans *cr;
1957 struct sk_buff *skb;
1958 char *dst;
1959 __u8 flush;
1960 int rc;
1962 if (trgt_out_lx->flush != 0 &&
1963 trgt_out_lx->data_buf.read_remaining == len)
1964 flush = 1;
1966 seqno = trgt_out_lx->target.out.seqno_nextsend;
1967 skb = create_packet_conndata(trgt_out_lx->target.out.nb, len,
1968 GFP_ATOMIC, trgt_out_lx->target.out.conn_id, seqno,
1969 snd_delayed_lowbuf, flush);
1970 if (unlikely(skb == 0))
1971 return RC_FLUSH_CONN_OUT_OOM;
1973 cr = prepare_conn_retrans(trgt_out_lx, seqno, len, snd_delayed_lowbuf,
1974 0, 0);
1975 if (unlikely(cr == 0)) {
1976 kfree_skb(skb);
1977 return RC_FLUSH_CONN_OUT_OOM;
1980 dst = skb_put(skb, len);
1982 databuf_pull(trgt_out_lx, dst, len);
1984 rc = cor_dev_queue_xmit(skb, nb->queue, QOS_CALLER_NEIGHBOR);
1985 if (rc == NET_XMIT_DROP) {
1986 databuf_unpull(trgt_out_lx, len);
1987 spin_lock_bh(&(nb->retrans_conn_lock));
1988 cancel_conn_retrans(nb, trgt_out_lx, cr, 0);
1989 spin_unlock_bh(&(nb->retrans_conn_lock));
1990 kref_put(&(cr->ref), free_connretrans);
1991 return RC_FLUSH_CONN_OUT_CONG;
1994 trgt_out_lx->target.out.seqno_nextsend += len;
1995 nbcongwin_data_sent(nb, len);
1996 schedule_retransmit_conn(cr, 1, 0);
1997 if (trgt_out_lx->sourcetype == SOURCE_SOCK)
1998 update_src_sock_sndspeed(trgt_out_lx, len);
2000 kref_put(&(cr->ref), free_connretrans);
2002 return (rc == NET_XMIT_SUCCESS) ?
2003 RC_FLUSH_CONN_OUT_OK : RC_FLUSH_CONN_OUT_SENT_CONG;
2006 static int _flush_out_conndata(struct conn *trgt_out_lx, __u32 len,
2007 __u8 snd_delayed_lowbuf)
2009 __u64 seqno;
2010 struct control_msg_out *cm;
2011 struct conn_retrans *cr;
2012 char *buf;
2013 __u8 flush = 0;
2015 if (trgt_out_lx->flush != 0 &&
2016 trgt_out_lx->data_buf.read_remaining == len)
2017 flush = 1;
2019 buf = kmalloc(len, GFP_ATOMIC);
2021 if (unlikely(buf == 0))
2022 return RC_FLUSH_CONN_OUT_OOM;
2024 cm = alloc_control_msg(trgt_out_lx->target.out.nb, ACM_PRIORITY_LOW);
2025 if (unlikely(cm == 0)) {
2026 kfree(buf);
2027 return RC_FLUSH_CONN_OUT_OOM;
2030 seqno = trgt_out_lx->target.out.seqno_nextsend;
2032 cr = prepare_conn_retrans(trgt_out_lx, seqno, len, snd_delayed_lowbuf,
2033 0, 0);
2034 if (unlikely(cr == 0)) {
2035 kfree(buf);
2036 free_control_msg(cm);
2037 return RC_FLUSH_CONN_OUT_OOM;
2040 databuf_pull(trgt_out_lx, buf, len);
2041 trgt_out_lx->target.out.seqno_nextsend += len;
2042 nbcongwin_data_sent(trgt_out_lx->target.out.nb, len);
2043 if (trgt_out_lx->sourcetype == SOURCE_SOCK)
2044 update_src_sock_sndspeed(trgt_out_lx, len);
2046 send_conndata(cm, trgt_out_lx->target.out.conn_id, seqno, buf, buf, len,
2047 snd_delayed_lowbuf, flush, trgt_out_lx->is_highlatency,
2048 cr);
2050 return RC_FLUSH_CONN_OUT_OK;
2053 int srcin_buflimit_reached(struct conn *src_in_lx)
2055 __u64 window_left;
2057 if (unlikely(seqno_before(src_in_lx->source.in.window_seqnolimit,
2058 src_in_lx->source.in.next_seqno)))
2059 return 1;
2061 window_left = seqno_clean(src_in_lx->source.in.window_seqnolimit -
2062 src_in_lx->source.in.next_seqno);
2064 if (window_left < WINDOW_ENCODE_MIN)
2065 return 1;
2067 if (window_left/2 < src_in_lx->data_buf.read_remaining)
2068 return 1;
2070 return 0;
2073 static __u32 maxsend_left_to_len(__u32 maxsend_left)
2075 __u32 i;
2076 if (maxsend_left < 128)
2077 return maxsend_left;
2079 for (i=128;i<4096;) {
2080 if (i*2 > maxsend_left)
2081 return i;
2082 i = i*2;
2085 return maxsend_left - maxsend_left%4096;
2088 static int seqno_low_sendlimit(struct conn *trgt_out_lx, __u64 windowlimit,
2089 __u32 sndlen)
2091 __u64 bytes_ackpending;
2093 BUG_ON(seqno_before(trgt_out_lx->target.out.seqno_nextsend,
2094 trgt_out_lx->target.out.seqno_acked));
2096 bytes_ackpending = seqno_clean(trgt_out_lx->target.out.seqno_nextsend -
2097 trgt_out_lx->target.out.seqno_acked);
2099 if (windowlimit <= sndlen)
2100 return 1;
2102 if (unlikely(bytes_ackpending + sndlen < bytes_ackpending))
2103 return 0;
2105 if (trgt_out_lx->is_highlatency != 0)
2106 return (windowlimit - sndlen < (bytes_ackpending + sndlen) / 4)
2107 ? 1 : 0;
2108 else
2109 return (windowlimit - sndlen < (bytes_ackpending + sndlen) / 8)
2110 ? 1 : 0;
2113 static void _flush_out_ignore_lowbuf(struct conn *trgt_out_lx)
2115 trgt_out_lx->bufsize.ignore_rcv_lowbuf = max(
2116 trgt_out_lx->bufsize.ignore_rcv_lowbuf,
2117 trgt_out_lx->bufsize.bufsize >> BUFSIZE_SHIFT);
2120 static __u64 get_windowlimit(struct conn *trgt_out_lx)
2122 if (unlikely(seqno_before(trgt_out_lx->target.out.seqno_windowlimit,
2123 trgt_out_lx->target.out.seqno_nextsend)))
2124 return 0;
2126 return seqno_clean(trgt_out_lx->target.out.seqno_windowlimit -
2127 trgt_out_lx->target.out.seqno_nextsend);
2130 static int _flush_out(struct conn *trgt_out_lx, __u32 maxsend, __u32 *sent,
2131 int from_qos)
2133 struct neighbor *nb = trgt_out_lx->target.out.nb;
2135 __u32 targetmss;
2137 int nbstate;
2139 __u8 snd_delayed_lowbuf = trgt_out_lx->target.out.windowlimit_reached;
2141 __u32 maxsend_left = maxsend;
2143 trgt_out_lx->target.out.windowlimit_reached = 0;
2145 BUG_ON(trgt_out_lx->targettype != TARGET_OUT);
2147 if (unlikely(trgt_out_lx->target.out.established == 0))
2148 return RC_FLUSH_CONN_OUT_OK;
2150 if (unlikely(trgt_out_lx->isreset != 0))
2151 return RC_FLUSH_CONN_OUT_OK;
2153 BUG_ON(trgt_out_lx->target.out.conn_id == 0);
2155 if (unlikely(trgt_out_lx->data_buf.read_remaining == 0))
2156 return RC_FLUSH_CONN_OUT_OK;
2158 #warning todo burst queue
2159 if (from_qos == 0 && qos_fastsend_allowed_conn(trgt_out_lx) == 0)
2160 return RC_FLUSH_CONN_OUT_CONG;
2162 spin_lock_bh(&(nb->stalledconn_lock));
2163 nbstate = get_neigh_state(nb);
2164 if (unlikely(nbstate == NEIGHBOR_STATE_STALLED)) {
2165 BUG_ON(trgt_out_lx->target.out.nbstalled_lh.prev == 0 &&
2166 trgt_out_lx->target.out.nbstalled_lh.next != 0);
2167 BUG_ON(trgt_out_lx->target.out.nbstalled_lh.prev != 0 &&
2168 trgt_out_lx->target.out.nbstalled_lh.next == 0);
2170 if (trgt_out_lx->target.out.nbstalled_lh.prev == 0) {
2171 kref_get(&(trgt_out_lx->ref));
2172 list_add_tail(&(trgt_out_lx->target.out.nbstalled_lh),
2173 &(nb->stalledconn_list));
2176 spin_unlock_bh(&(nb->stalledconn_lock));
2178 if (unlikely(nbstate != NEIGHBOR_STATE_ACTIVE))
2179 return RC_FLUSH_CONN_OUT_NBNOTACTIVE;
2181 /* printk(KERN_ERR "flush %p %llu %u", trgt_out_l,
2182 get_windowlimit(trgt_out_l),
2183 trgt_out_l->data_buf.read_remaining); */
2185 targetmss = mss_conndata(nb);
2187 while (trgt_out_lx->data_buf.read_remaining >= targetmss) {
2188 __u64 windowlimit = get_windowlimit(trgt_out_lx);
2189 int rc;
2191 if (maxsend_left < targetmss)
2192 break;
2194 if (windowlimit < targetmss) {
2195 trgt_out_lx->target.out.windowlimit_reached = 1;
2196 snd_delayed_lowbuf = 1;
2197 _flush_out_ignore_lowbuf(trgt_out_lx);
2198 break;
2201 if (nbcongwin_send_allowed(nb) == 0)
2202 return RC_FLUSH_CONN_OUT_CONG;
2204 if (seqno_low_sendlimit(trgt_out_lx, windowlimit, targetmss)) {
2205 trgt_out_lx->target.out.windowlimit_reached = 1;
2206 snd_delayed_lowbuf = 1;
2209 if (likely(send_conndata_as_skb(nb, targetmss)))
2210 rc = _flush_out_skb(trgt_out_lx, targetmss,
2211 snd_delayed_lowbuf);
2212 else
2213 rc = _flush_out_conndata(trgt_out_lx, targetmss,
2214 snd_delayed_lowbuf);
2216 if (rc == RC_FLUSH_CONN_OUT_OK ||
2217 rc == RC_FLUSH_CONN_OUT_SENT_CONG) {
2218 maxsend_left -= targetmss;
2219 *sent += targetmss;
2222 if (rc == RC_FLUSH_CONN_OUT_SENT_CONG)
2223 return RC_FLUSH_CONN_OUT_CONG;
2224 if (rc != RC_FLUSH_CONN_OUT_OK)
2225 return rc;
2228 if (trgt_out_lx->data_buf.read_remaining > 0) {
2229 __u32 len = trgt_out_lx->data_buf.read_remaining;
2230 __u64 windowlimit = get_windowlimit(trgt_out_lx);
2231 int rc;
2233 if (maxsend_left < len) {
2234 if (maxsend_left == maxsend && maxsend_left >= 128 &&
2235 trgt_out_lx->is_highlatency == 0) {
2236 len = maxsend_left_to_len(maxsend_left);
2237 } else {
2238 return RC_FLUSH_CONN_OUT_MAXSENT;
2242 if (trgt_out_lx->flush == 0 &&
2243 trgt_out_lx->sourcetype == SOURCE_SOCK &&
2244 cor_sock_sndbufavailable(trgt_out_lx) != 0)
2245 goto out;
2247 if (trgt_out_lx->flush == 0 &&
2248 trgt_out_lx->sourcetype == SOURCE_IN &&
2249 srcin_buflimit_reached(trgt_out_lx)
2250 == 0 && (
2251 seqno_eq(trgt_out_lx->target.out.seqno_nextsend,
2252 trgt_out_lx->target.out.seqno_acked) == 0 ||
2253 trgt_out_lx->is_highlatency != 0))
2254 goto out;
2256 if (trgt_out_lx->flush == 0 &&
2257 trgt_out_lx->sourcetype == SOURCE_UNCONNECTED &&
2258 cpacket_write_allowed(trgt_out_lx) != 0)
2259 goto out;
2261 if (windowlimit == 0 || (windowlimit < len &&
2262 seqno_eq(trgt_out_lx->target.out.seqno_nextsend,
2263 trgt_out_lx->target.out.seqno_acked) == 0)) {
2264 trgt_out_lx->target.out.windowlimit_reached = 1;
2265 snd_delayed_lowbuf = 1;
2266 _flush_out_ignore_lowbuf(trgt_out_lx);
2267 goto out;
2270 if (nbcongwin_send_allowed(nb) == 0)
2271 return RC_FLUSH_CONN_OUT_CONG;
2273 if (seqno_low_sendlimit(trgt_out_lx, windowlimit, len)) {
2274 trgt_out_lx->target.out.windowlimit_reached = 1;
2275 snd_delayed_lowbuf = 1;
2278 if (len > windowlimit) {
2279 len = windowlimit;
2280 _flush_out_ignore_lowbuf(trgt_out_lx);
2283 if (send_conndata_as_skb(nb, len))
2284 rc = _flush_out_skb(trgt_out_lx, len,
2285 snd_delayed_lowbuf);
2286 else
2287 rc = _flush_out_conndata(trgt_out_lx, len,
2288 snd_delayed_lowbuf);
2291 if (rc == RC_FLUSH_CONN_OUT_OK ||
2292 rc == RC_FLUSH_CONN_OUT_SENT_CONG) {
2293 maxsend_left -= len;
2294 *sent += len;
2297 if (rc == RC_FLUSH_CONN_OUT_SENT_CONG)
2298 return RC_FLUSH_CONN_OUT_CONG;
2299 if (rc != RC_FLUSH_CONN_OUT_OK)
2300 return rc;
2303 out:
2304 return RC_FLUSH_CONN_OUT_OK;
2307 int flush_out(struct conn *trgt_out_lx, __u32 *sent)
2309 int rc = _flush_out(trgt_out_lx, 1 << 30, sent, 0);
2311 if (rc == RC_FLUSH_CONN_OUT_CONG || rc == RC_FLUSH_CONN_OUT_MAXSENT ||
2312 rc == RC_FLUSH_CONN_OUT_OOM)
2313 qos_enqueue_conn(trgt_out_lx);
2315 return rc;
2318 void resume_nbstalled_conns(struct work_struct *work)
2320 struct neighbor *nb = container_of(work, struct neighbor,
2321 stalledconn_work);
2322 int rc = RC_FLUSH_CONN_OUT_OK;
2324 spin_lock_bh(&(nb->stalledconn_lock));
2325 nb->stalledconn_work_scheduled = 0;
2326 while (rc != RC_FLUSH_CONN_OUT_NBNOTACTIVE &&
2327 list_empty(&(nb->stalledconn_list)) == 0) {
2328 struct list_head *lh = nb->stalledconn_list.next;
2329 struct conn *trgt_out = container_of(lh, struct conn,
2330 target.out.nbstalled_lh);
2331 __u32 sent = 0;
2332 BUG_ON(trgt_out->targettype != TARGET_OUT);
2333 list_del(lh);
2334 lh->prev = 0;
2335 lh->next = 0;
2337 spin_unlock_bh(&(nb->stalledconn_lock));
2339 spin_lock_bh(&(trgt_out->rcv_lock));
2340 if (likely(trgt_out->targettype == TARGET_OUT))
2341 rc = flush_out(trgt_out, &sent);
2342 spin_unlock_bh(&(trgt_out->rcv_lock));
2344 if (sent != 0)
2345 wake_sender(trgt_out);
2347 kref_put(&(trgt_out->ref), free_conn);
2349 spin_lock_bh(&(nb->stalledconn_lock));
2351 spin_unlock_bh(&(nb->stalledconn_lock));
2353 kref_put(&(nb->ref), neighbor_free);
2356 int __init cor_snd_init(void)
2358 connretrans_slab = kmem_cache_create("cor_connretrans",
2359 sizeof(struct conn_retrans), 8, 0, 0);
2360 if (unlikely(connretrans_slab == 0))
2361 return -ENOMEM;
2363 return 0;
2366 MODULE_LICENSE("GPL");