use list instead of heap for queueing conn_data
[cor.git] / net / cor / snd.c
blob8b6f15fdb30d39745918516c1e7aaffdd57669f1
1 /**
2 * Connection oriented routing
3 * Copyright (C) 2007-2019 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
21 #include <linux/gfp.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
25 #include "cor.h"
27 static struct kmem_cache *connretrans_slab;
29 static DEFINE_SPINLOCK(queues_lock);
30 static LIST_HEAD(queues);
32 static int _flush_out(struct conn *trgt_out_l, __u32 maxsend, __u32 *sent,
33 int from_qos);
35 #warning todo packet loss should slow sending of effected neighbor down
37 #ifdef DEBUG_QOS_SLOWSEND
38 static DEFINE_SPINLOCK(slowsend_lock);
39 static unsigned long last_send;
42 int cor_dev_queue_xmit(struct sk_buff *skb, int caller)
44 int allowsend = 0;
45 unsigned long jiffies_tmp;
46 spin_lock_bh(&slowsend_lock);
47 jiffies_tmp = jiffies;
48 if (time_after(last_send, jiffies_tmp) || time_before_eq(last_send +
49 HZ/10, jiffies_tmp)) {
50 last_send = jiffies_tmp;
51 allowsend = 1;
53 spin_unlock_bh(&slowsend_lock);
55 /* printk(KERN_ERR "cor_dev_queue_xmit %d, %d", caller, allowsend); */
56 if (allowsend) {
57 return dev_queue_xmit(skb);
58 } else {
59 kfree_skb(skb);
60 return NET_XMIT_DROP;
63 #endif
65 static void free_connretrans(struct kref *ref)
67 struct conn_retrans *cr = container_of(ref, struct conn_retrans, ref);
68 struct conn *cn = cr->trgt_out_o;
69 kmem_cache_free(connretrans_slab, cr);
70 kref_put(&(cn->ref), free_conn);
73 void free_qos(struct kref *ref)
75 struct qos_queue *q = container_of(ref, struct qos_queue, ref);
76 kfree(q);
80 static __u64 _resume_conns_maxsend(struct qos_queue *q, __u32 numconns,
81 struct conn *trgt_out_l, __u32 newpriority)
83 __u32 oldpriority = trgt_out_l->target.out.rb_priority;
84 __u64 priority_sum_old = atomic64_read(&(q->priority_sum));
86 __u64 priority_sum;
88 while (1) {
89 __u64 cmpxchg_ret;
91 priority_sum = priority_sum_old;
93 BUG_ON(priority_sum < oldpriority);
94 priority_sum -= oldpriority;
96 BUG_ON(priority_sum + newpriority < priority_sum);
97 priority_sum += newpriority;
99 cmpxchg_ret = atomic64_cmpxchg(&(q->priority_sum),
100 priority_sum_old, priority_sum);
102 if (likely(cmpxchg_ret == priority_sum_old))
103 break;
105 priority_sum_old = cmpxchg_ret;
108 trgt_out_l->target.out.rb_priority = newpriority;
110 return div_u64(2048LL * ((__u64) newpriority) * ((__u64) numconns),
111 priority_sum);
114 static int resume_conns(struct qos_queue *q)
116 unsigned long iflags;
117 int rc1;
118 int sent1 = 0;
120 while (1) {
121 __u32 numconns;
122 __u32 priority;
123 __u32 maxsend;
125 int rc2;
126 __u32 sent2 = 0;
128 struct conn *cn = 0;
129 spin_lock_irqsave(&(q->qlock), iflags);
130 if (list_empty(&(q->conns_waiting)) == 0) {
131 cn = container_of(q->conns_waiting.next,
132 struct conn, target.out.rb.lh);
133 numconns = q->numconns;
134 BUG_ON(cn->targettype != TARGET_OUT);
135 BUG_ON(cn->target.out.rb.lh.prev !=
136 &(q->conns_waiting));
137 BUG_ON((cn->target.out.rb.lh.next ==
138 &(q->conns_waiting)) && (
139 q->conns_waiting.prev !=
140 &(cn->target.out.rb.lh)));
141 list_del(&(cn->target.out.rb.lh));
142 list_add_tail(&(cn->target.out.rb.lh),
143 &(q->conns_waiting));
144 kref_get(&(cn->ref));
146 spin_unlock_irqrestore(&(q->qlock), iflags);
149 if (cn == 0)
150 return QOS_RESUME_DONE;
152 priority = refresh_conn_priority(cn, 0);
154 spin_lock_bh(&(cn->rcv_lock));
156 if (unlikely(cn->targettype != TARGET_OUT)) {
157 spin_unlock_bh(&(cn->rcv_lock));
158 continue;
161 maxsend = _resume_conns_maxsend(q, numconns, cn, priority);
162 maxsend += cn->target.out.maxsend_extra;
163 if (unlikely(maxsend > U32_MAX))
164 maxsend = U32_MAX;
166 rc2 = _flush_out(cn, maxsend, &sent2, 1);
168 if (rc2 == RC_FLUSH_CONN_OUT_OK ||
169 rc2 == RC_FLUSH_CONN_OUT_NBNOTACTIVE) {
170 cn->target.out.maxsend_extra = 0;
171 qos_remove_conn(cn);
172 } else if (sent2 == 0 && (rc2 == RC_FLUSH_CONN_OUT_CONG ||
173 rc2 == RC_FLUSH_CONN_OUT_OOM)) {
174 spin_lock_irqsave(&(q->qlock), iflags);
175 if (likely(cn->target.out.rb.in_queue != 0)) {
176 list_del(&(cn->target.out.rb.lh));
177 list_add(&(cn->target.out.rb.lh),
178 &(q->conns_waiting));
180 spin_unlock_irqrestore(&(q->qlock), iflags);
181 } else if (rc2 == RC_FLUSH_CONN_OUT_CONG ||
182 rc2 == RC_FLUSH_CONN_OUT_OOM) {
183 cn->target.out.maxsend_extra = 0;
184 } else if (likely(rc2 == RC_FLUSH_CONN_OUT_MAXSENT)) {
185 if (unlikely(maxsend - sent2 > 65535))
186 cn->target.out.maxsend_extra = 65535;
187 else
188 cn->target.out.maxsend_extra = maxsend - sent2;
191 spin_unlock_bh(&(cn->rcv_lock));
193 if (sent2 != 0) {
194 sent1 = 1;
195 wake_sender(cn);
198 kref_put(&(cn->ref), free_conn);
200 if (rc2 == RC_FLUSH_CONN_OUT_CONG ||
201 rc2 == RC_FLUSH_CONN_OUT_OOM) {
202 if (sent1)
203 return QOS_RESUME_CONG;
204 else
205 return QOS_RESUME_CONG_NOPROGRESS;
209 return rc1;
212 static int send_retrans(struct neighbor *nb, int fromqos);
214 static int _qos_resume(struct qos_queue *q, int caller)
216 unsigned long iflags;
217 int rc = 0;
218 struct list_head *lh;
220 spin_lock_irqsave(&(q->qlock), iflags);
222 if (caller == QOS_CALLER_KPACKET)
223 lh = &(q->conn_retrans_waiting);
224 else if (caller == QOS_CALLER_CONN_RETRANS)
225 lh = &(q->kpackets_waiting);
226 else if (caller == QOS_CALLER_ANNOUNCE)
227 lh = &(q->announce_waiting);
228 else
229 BUG();
231 while (list_empty(lh) == 0) {
232 struct list_head *curr = lh->next;
233 struct resume_block *rb = container_of(curr,
234 struct resume_block, lh);
235 rb->in_queue = 0;
236 list_del(curr);
238 spin_unlock_irqrestore(&(q->qlock), iflags);
239 if (caller == QOS_CALLER_KPACKET) {
240 rc = send_messages(container_of(rb, struct neighbor,
241 rb_kp), 1);
242 } else if (caller == QOS_CALLER_CONN_RETRANS) {
243 rc = send_retrans(container_of(rb, struct neighbor,
244 rb_cr), 1);
245 } else if (caller == QOS_CALLER_ANNOUNCE) {
246 rc = _send_announce(container_of(rb,
247 struct announce_data, rb), 1);
248 } else {
249 BUG();
251 spin_lock_irqsave(&(q->qlock), iflags);
253 if (rc != 0 && rb->in_queue == 0) {
254 rb->in_queue = 1;
255 list_add(curr, lh);
256 break;
259 if (caller == QOS_CALLER_KPACKET) {
260 kref_put(&(container_of(rb, struct neighbor,
261 rb_kp)->ref), neighbor_free);
262 } else if (caller == QOS_CALLER_CONN_RETRANS) {
263 kref_put(&(container_of(rb, struct neighbor,
264 rb_cr)->ref), neighbor_free);
265 } else if (caller == QOS_CALLER_ANNOUNCE) {
266 kref_put(&(container_of(rb,
267 struct announce_data, rb)->ref),
268 announce_data_free);
269 } else {
270 BUG();
273 kref_put(&(q->ref), kreffree_bug);
275 if (rc != 0)
276 break;
279 spin_unlock_irqrestore(&(q->qlock), iflags);
281 return rc;
284 void qos_resume_taskfunc(unsigned long arg)
286 struct qos_queue *q = (struct qos_queue *) arg;
288 int rc = QOS_RESUME_DONE;
289 int sent = 0;
290 unsigned long iflags;
291 int i;
293 spin_lock_irqsave(&(q->qlock), iflags);
295 for (i=0;i<4 && rc == QOS_RESUME_DONE;i++) {
296 struct list_head *lh;
298 if (i == QOS_CALLER_KPACKET)
299 lh = &(q->conn_retrans_waiting);
300 else if (i == QOS_CALLER_CONN_RETRANS)
301 lh = &(q->kpackets_waiting);
302 else if (i == QOS_CALLER_ANNOUNCE)
303 lh = &(q->announce_waiting);
304 else if (i == QOS_CALLER_CONN)
305 lh = &(q->conns_waiting);
306 else
307 BUG();
309 if (list_empty(lh))
310 continue;
312 spin_unlock_irqrestore(&(q->qlock), iflags);
313 if (i == QOS_CALLER_CONN) {
314 rc = resume_conns(q);
315 } else {
316 rc = _qos_resume(q, i);
319 sent = sent || (rc != QOS_RESUME_CONG_NOPROGRESS);
321 spin_lock_irqsave(&(q->qlock), iflags);
323 i = 0;
326 if (rc == QOS_RESUME_DONE) {
327 q->qos_resume_scheduled = 0;
328 } else {
329 unsigned long jiffies_tmp = jiffies;
330 unsigned long delay = (jiffies_tmp - q->jiffies_lastprogress +
331 3) / 4;
333 if (sent || unlikely(delay <= 0)) {
334 q->jiffies_lastprogress = jiffies_tmp;
335 delay = 1;
336 } else if (delay > HZ/10) {
337 q->jiffies_lastprogress = jiffies_tmp - (HZ*4)/10;
338 delay = HZ/10;
341 /* If we retry too fast here, we might starve layer 2 */
342 mod_timer(&(q->qos_resume_timer), jiffies_tmp + delay);
345 spin_unlock_irqrestore(&(q->qlock), iflags);
347 if (rc == QOS_RESUME_DONE)
348 kref_put(&(q->ref), free_qos);
351 void qos_resume_timerfunc(struct timer_list *qos_resume_timer)
353 struct qos_queue *q = container_of(qos_resume_timer,
354 struct qos_queue, qos_resume_timer);
355 tasklet_schedule(&(q->qos_resume_task));
358 struct qos_queue *get_queue(struct net_device *dev)
360 struct qos_queue *ret = 0;
361 struct list_head *curr;
363 spin_lock_bh(&(queues_lock));
364 curr = queues.next;
365 while (curr != (&queues)) {
366 struct qos_queue *q = container_of(curr,
367 struct qos_queue, queue_list);
368 if (q->dev == dev) {
369 ret = q;
370 kref_get(&(ret->ref));
371 break;
373 curr = curr->next;
375 spin_unlock_bh(&(queues_lock));
376 return ret;
379 static void _destroy_queue(struct qos_queue *q, int caller)
381 struct list_head *lh;
383 if (caller == QOS_CALLER_KPACKET)
384 lh = &(q->conn_retrans_waiting);
385 else if (caller == QOS_CALLER_CONN_RETRANS)
386 lh = &(q->kpackets_waiting);
387 else if (caller == QOS_CALLER_ANNOUNCE)
388 lh = &(q->announce_waiting);
389 else
390 BUG();
392 while (list_empty(lh) == 0) {
393 struct list_head *curr = lh->next;
394 struct resume_block *rb = container_of(curr,
395 struct resume_block, lh);
396 rb->in_queue = 0;
397 list_del(curr);
399 if (caller == QOS_CALLER_KPACKET) {
400 kref_put(&(container_of(rb, struct neighbor,
401 rb_kp)->ref), neighbor_free);
402 } else if (caller == QOS_CALLER_CONN_RETRANS) {
403 kref_put(&(container_of(rb, struct neighbor,
404 rb_cr)->ref), neighbor_free);
405 } else if (caller == QOS_CALLER_ANNOUNCE) {
406 kref_put(&(container_of(rb,
407 struct announce_data, rb)->ref),
408 announce_data_free);
409 } else {
410 BUG();
412 kref_put(&(q->ref), kreffree_bug);
416 static struct qos_queue *unlink_queue(struct net_device *dev)
418 struct qos_queue *ret = 0;
419 struct list_head *curr;
421 spin_lock_bh(&(queues_lock));
422 curr = queues.next;
423 while (curr != (&queues)) {
424 struct qos_queue *q = container_of(curr,
425 struct qos_queue, queue_list);
426 if (dev == 0 || q->dev == dev) {
427 ret = q;
428 kref_get(&(ret->ref));
430 list_del(&(q->queue_list));
431 kref_put(&(q->ref), kreffree_bug);
432 break;
434 curr = curr->next;
436 spin_unlock_bh(&(queues_lock));
437 return ret;
440 int destroy_queue(struct net_device *dev)
442 int rc = 1;
443 unsigned long iflags;
445 while (1) {
446 struct qos_queue *q = unlink_queue(dev);
448 if (q == 0)
449 break;
451 rc = 0;
453 spin_lock_irqsave(&(q->qlock), iflags);
454 if (q->dev != 0) {
455 dev_put(q->dev);
456 q->dev = 0;
458 _destroy_queue(q, QOS_CALLER_KPACKET);
459 _destroy_queue(q, QOS_CALLER_CONN_RETRANS);
460 _destroy_queue(q, QOS_CALLER_ANNOUNCE);
461 spin_unlock_irqrestore(&(q->qlock), iflags);
463 kref_put(&(q->ref), free_qos);
466 return rc;
469 int create_queue(struct net_device *dev)
471 struct qos_queue *q = kmalloc(sizeof(struct qos_queue), GFP_KERNEL);
473 if (q == 0) {
474 printk(KERN_ERR "cor: unable to allocate memory for device "
475 "queue, not enabling device");
476 return 1;
479 memset(q, 0, sizeof(struct qos_queue));
481 spin_lock_init(&(q->qlock));
483 kref_init(&(q->ref));
485 q->dev = dev;
486 dev_hold(dev);
488 timer_setup(&(q->qos_resume_timer), qos_resume_timerfunc, 0);
489 tasklet_init(&(q->qos_resume_task), qos_resume_taskfunc,
490 (unsigned long) q);
492 INIT_LIST_HEAD(&(q->kpackets_waiting));
493 INIT_LIST_HEAD(&(q->conn_retrans_waiting));
494 INIT_LIST_HEAD(&(q->announce_waiting));
495 INIT_LIST_HEAD(&(q->conns_waiting));
497 spin_lock_bh(&(queues_lock));
498 list_add(&(q->queue_list), &queues);
499 spin_unlock_bh(&(queues_lock));
501 atomic64_set(&(q->priority_sum), 0);
503 return 0;
506 void qos_enqueue(struct qos_queue *q, struct resume_block *rb, int caller)
508 unsigned long iflags;
510 spin_lock_irqsave(&(q->qlock), iflags);
512 if (rb->in_queue)
513 goto out;
515 rb->in_queue = 1;
517 if (caller == QOS_CALLER_KPACKET) {
518 list_add(&(rb->lh) , &(q->conn_retrans_waiting));
519 kref_get(&(container_of(rb, struct neighbor, rb_kp)->ref));
520 } else if (caller == QOS_CALLER_CONN_RETRANS) {
521 list_add(&(rb->lh), &(q->kpackets_waiting));
522 kref_get(&(container_of(rb, struct neighbor, rb_cr)->ref));
523 } else if (caller == QOS_CALLER_ANNOUNCE) {
524 list_add(&(rb->lh), &(q->announce_waiting));
525 kref_get(&(container_of(rb, struct announce_data, rb)->ref));
526 } else if (caller == QOS_CALLER_CONN) {
527 list_add(&(rb->lh), &(q->conns_waiting));
528 kref_get(&(container_of(rb, struct conn, target.out.rb)->ref));
529 q->numconns++;
530 } else {
531 BUG();
533 kref_get(&(q->ref));
535 if (q->qos_resume_scheduled == 0) {
536 q->jiffies_lastprogress = jiffies;
537 mod_timer(&(q->qos_resume_timer), jiffies + 1);
538 q->qos_resume_scheduled = 1;
539 kref_get(&(q->ref));
542 out:
543 spin_unlock_irqrestore(&(q->qlock), iflags);
546 void qos_remove_conn(struct conn *trgt_out_l)
548 unsigned long iflags;
549 struct qos_queue *q;
551 BUG_ON(trgt_out_l->targettype != TARGET_OUT);
553 q = trgt_out_l->target.out.nb->queue;
555 BUG_ON(q == 0);
557 spin_lock_irqsave(&(q->qlock), iflags);
559 if (trgt_out_l->target.out.rb.in_queue == 0) {
560 spin_unlock_irqrestore(&(q->qlock), iflags);
561 return;
564 trgt_out_l->target.out.rb.in_queue = 0;
565 list_del(&(trgt_out_l->target.out.rb.lh));
566 q->numconns--;
567 atomic64_sub(trgt_out_l->target.out.rb_priority, &(q->priority_sum));
568 trgt_out_l->target.out.rb_priority = 0;
569 spin_unlock_irqrestore(&(q->qlock), iflags);
571 kref_put(&(trgt_out_l->ref), kreffree_bug);
573 kref_put(&(q->ref), free_qos);
576 static void qos_enqueue_conn(struct conn *trgt_out_l)
578 BUG_ON(trgt_out_l->data_buf.read_remaining == 0);
579 qos_enqueue(trgt_out_l->target.out.nb->queue,
580 &(trgt_out_l->target.out.rb), QOS_CALLER_CONN);
583 static int may_send_conn_retrans(struct neighbor *nb)
585 unsigned long iflags;
586 int rc;
588 BUG_ON(nb->queue == 0);
590 spin_lock_irqsave(&(nb->queue->qlock), iflags);
591 rc = (list_empty(&(nb->queue->kpackets_waiting)));
592 spin_unlock_irqrestore(&(nb->queue->qlock), iflags);
594 return rc;
597 int may_send_announce(struct net_device *dev)
599 unsigned long iflags;
600 struct qos_queue *q = get_queue(dev);
601 int rc;
603 if (q == 0)
604 return 0;
606 spin_lock_irqsave(&(q->qlock), iflags);
607 rc = (list_empty(&(q->kpackets_waiting)) &&
608 list_empty(&(q->conn_retrans_waiting)) &&
609 list_empty(&(q->announce_waiting)));
610 spin_unlock_irqrestore(&(q->qlock), iflags);
612 kref_put(&(q->ref), free_qos);
614 return rc;
617 static int may_send_conn(struct conn *trgt_out_l)
619 unsigned long iflags;
620 struct qos_queue *q = trgt_out_l->target.out.nb->queue;
621 int rc;
623 BUG_ON(q == 0);
625 spin_lock_irqsave(&(q->qlock), iflags);
626 rc = (list_empty(&(q->kpackets_waiting)) &&
627 list_empty(&(q->conn_retrans_waiting)) &&
628 list_empty(&(q->announce_waiting)) &&
629 list_empty(&(q->conns_waiting)));
630 spin_unlock_irqrestore(&(q->qlock), iflags);
632 return rc;
636 #warning todo activate this - needs newer upstream kernel (e.g. 3.13)
638 #include <net/cfg80211.h>
640 #include "../wireless/core.h"
641 #include "../wireless/rdev-ops.h"
643 static DEFINE_SPINLOCK(sinfo_lock);
644 static struct station_info sinfo;
646 static __u32 mss_tmp(struct neighbor *nb, __u32 l3overhead)
648 struct net_device *dev = nb->dev;
649 struct wireless_dev *wdev;
650 struct cfg80211_registered_device *rdev;
651 __u32 rate_kbit = 0;
653 __u32 mtu = ((dev->mtu > 4096) ? 4096 : dev->mtu) -
654 LL_RESERVED_SPACE(dev);
656 / * see cfg80211_wext_giwrate * /
658 wdev = dev->ieee80211_ptr;
659 if (wdev == 0)
660 goto nowireless;
662 rdev = wiphy_to_dev(wdev->wiphy);
663 if (wdev == 0 || rdev->ops->get_station == 0)
664 goto unknownrate;
666 if (sizeof(nb->mac) < ETH_ALEN || MAX_ADDR_LEN < ETH_ALEN)
667 goto unknownrate;
669 / * sinfo is global because of size * /
670 spin_lock_bh(&sinfo_lock);
671 if (rdev_get_station(rdev, dev, addr, &sinfo) == 0) {
672 / * unknown neighbor * /
673 } else if ((sinfo.filled & STATION_INFO_TX_BITRATE) == 0) {
674 / * unknown rate * /
675 } else {
676 rate_kbit = 100 * cfg80211_calculate_bitrate(&(sinfo.txrate));
678 spin_unlock_bh(&sinfo_lock);
680 unknownrate:
682 if (rate_kbit > 0) {
683 / * amount of data which can be sent in 1ms * /
684 __u32 mtu_ratemax = rate_kbit/8;
686 if (mtu_ratemax < 128)
687 mtu_ratemax = 128;
689 if (mtu > mtu_ratemax)
690 mtu = mtu_ratemax;
693 if (0) {
694 nowireless:
695 if (mtu > 1500)
696 mtu = 1500;
699 if (unlikely(mtu < l3overhead))
700 return 0;
702 return mtu - l3overhead;
703 } */
705 static struct sk_buff *create_packet(struct neighbor *nb, int size,
706 gfp_t alloc_flags)
708 struct sk_buff *ret;
710 ret = alloc_skb(size + LL_RESERVED_SPACE(nb->dev) +
711 nb->dev->needed_tailroom, alloc_flags);
712 if (unlikely(ret == 0))
713 return 0;
715 ret->protocol = htons(ETH_P_COR);
716 ret->dev = nb->dev;
718 skb_reserve(ret, LL_RESERVED_SPACE(nb->dev));
719 if(unlikely(dev_hard_header(ret, nb->dev, ETH_P_COR, nb->mac,
720 nb->dev->dev_addr, ret->len) < 0))
721 return 0;
722 skb_reset_network_header(ret);
724 return ret;
727 struct sk_buff *create_packet_cmsg(struct neighbor *nb, int size,
728 gfp_t alloc_flags, __u64 seqno)
730 struct sk_buff *ret;
731 char *dest;
733 ret = create_packet(nb, size + 7, alloc_flags);
734 if (unlikely(ret == 0))
735 return 0;
737 dest = skb_put(ret, 7);
738 BUG_ON(dest == 0);
740 dest[0] = PACKET_TYPE_CMSG;
741 dest += 1;
743 put_u48(dest, seqno);
744 dest += 6;
746 return ret;
749 struct sk_buff *create_packet_conndata(struct neighbor *nb, int size,
750 gfp_t alloc_flags, __u32 conn_id, __u64 seqno,
751 __u8 snd_delayed_lowbuf)
753 struct sk_buff *ret;
754 char *dest;
756 ret = create_packet(nb, size + 11, alloc_flags);
757 if (unlikely(ret == 0))
758 return 0;
760 dest = skb_put(ret, 11);
761 BUG_ON(dest == 0);
763 if (snd_delayed_lowbuf != 0)
764 dest[0] = PACKET_TYPE_CONNDATA_LOWBUFDELAYED;
765 else
766 dest[0] = PACKET_TYPE_CONNDATA;
767 dest += 1;
769 put_u32(dest, conn_id);
770 dest += 4;
771 put_u48(dest, seqno);
772 dest += 6;
774 return ret;
778 * warning: all callers must do the folling calls in this order:
779 * kref_get
780 * spin_lock
781 * reschedule_conn_retrans_timer
782 * spin_unlock
783 * kref put
785 * This is because this function calls kref_put
787 void reschedule_conn_retrans_timer(struct neighbor *nb_retranslocked)
789 struct conn_retrans *cr = 0;
791 if (list_empty(&(nb_retranslocked->retrans_list_conn)))
792 return;
794 if (nb_retranslocked->retrans_conn_running != 0)
795 return;
797 cr = container_of(nb_retranslocked->retrans_list_conn.next,
798 struct conn_retrans, timeout_list);
800 if (nb_retranslocked->retrans_timer_conn_running == 0) {
801 nb_retranslocked->retrans_timer_conn_running = 1;
802 kref_get(&(nb_retranslocked->ref));
805 mod_timer(&(nb_retranslocked->retrans_timer_conn), cr->timeout);
809 * warning:
810 * caller must also call kref_get/put, see reschedule_conn_retrans_timer
812 static void cancel_conn_retrans(struct neighbor *nb_retranslocked,
813 struct conn_retrans *cr)
815 if (unlikely(cr->ackrcvd))
816 return;
818 if (cr->scheduled) {
819 list_del(&(cr->timeout_list));
820 cr->scheduled = 0;
821 kref_put(&(cr->ref), kreffree_bug);
824 list_del(&(cr->conn_list));
825 cr->ackrcvd = 1;
827 kref_put(&(cr->ref), free_connretrans);
829 reschedule_conn_retrans_timer(nb_retranslocked);
832 void cancel_conn_all_retrans(struct conn *trgt_out_l)
834 struct neighbor *nb = trgt_out_l->target.out.nb;
836 spin_lock_bh(&(nb->retrans_lock));
838 while (list_empty(&(trgt_out_l->target.out.retrans_list)) == 0) {
839 struct conn_retrans *cr = container_of(
840 trgt_out_l->target.out.retrans_list.next,
841 struct conn_retrans, conn_list);
842 BUG_ON(cr->trgt_out_o != trgt_out_l);
844 cancel_conn_retrans(nb, cr);
847 spin_unlock_bh(&(nb->retrans_lock));
850 static struct conn_retrans *prepare_conn_retrans(struct conn *trgt_out_l,
851 __u64 seqno, __u32 len)
853 struct neighbor *nb = trgt_out_l->target.out.nb;
855 struct conn_retrans *cr = kmem_cache_alloc(connretrans_slab,
856 GFP_ATOMIC);
858 if (unlikely(cr == 0))
859 return 0;
861 BUG_ON(trgt_out_l->isreset != 0);
863 memset(cr, 0, sizeof (struct conn_retrans));
864 cr->trgt_out_o = trgt_out_l;
865 kref_get(&(trgt_out_l->ref));
866 cr->seqno = seqno;
867 cr->length = len;
868 kref_init(&(cr->ref));
870 kref_get(&(cr->ref));
871 spin_lock_bh(&(nb->retrans_lock));
872 list_add_tail(&(cr->conn_list),
873 &(cr->trgt_out_o->target.out.retrans_list));
874 spin_unlock_bh(&(nb->retrans_lock));
876 return cr;
879 static int _send_retrans(struct neighbor *nb, struct conn_retrans *cr)
881 int targetmss = mss_conndata(nb);
882 int queuefull = 0;
883 int oom = 0;
884 struct conn *trgt_out_o = cr->trgt_out_o;
886 spin_lock_bh(&(trgt_out_o->rcv_lock));
888 BUG_ON(trgt_out_o->targettype != TARGET_OUT);
889 BUG_ON(trgt_out_o->target.out.nb != nb);
891 spin_lock_bh(&(nb->retrans_lock));
892 if (unlikely(cr->ackrcvd)) {
893 spin_unlock_bh(&(nb->retrans_lock));
894 goto out;
896 spin_unlock_bh(&(nb->retrans_lock));
898 kref_get(&(trgt_out_o->ref));
900 BUG_ON(trgt_out_o->isreset != 0);
901 BUG_ON(seqno_before(cr->seqno, trgt_out_o->target.out.seqno_acked));
903 if (unlikely(cr->length > targetmss)) {
904 struct conn_retrans *cr2 = prepare_conn_retrans(trgt_out_o,
905 cr->seqno + targetmss, cr->length - targetmss);
906 if (unlikely(cr2 == 0))
907 goto out_oom;
909 cr2->timeout = cr->timeout;
911 spin_lock_bh(&(nb->retrans_lock));
912 list_add(&(cr2->timeout_list), &(nb->retrans_list_conn));
913 cr2->scheduled = 1;
914 spin_unlock_bh(&(nb->retrans_lock));
916 cr->length = targetmss;
919 BUG_ON(cr->length == 0);
921 if (send_conndata_as_skb(nb, cr->length)) {
922 struct sk_buff *skb;
923 char *dst;
925 skb = create_packet_conndata(nb, cr->length, GFP_ATOMIC,
926 trgt_out_o->target.out.conn_id, cr->seqno,
927 cr->snd_delayed_lowbuf);
928 if (unlikely(skb == 0))
929 goto out_oom;
931 dst = skb_put(skb, cr->length);
933 databuf_pullold(trgt_out_o, cr->seqno, dst, cr->length);
935 if (cor_dev_queue_xmit(skb, QOS_CALLER_CONN_RETRANS) != 0)
936 goto qos_enqueue;
937 schedule_retransmit_conn(cr, 1);
938 } else {
939 struct control_msg_out *cm;
940 char *buf;
942 buf = kmalloc(cr->length, GFP_ATOMIC);
943 if (unlikely(buf == 0))
944 goto out_oom;
946 cm = alloc_control_msg(nb, ACM_PRIORITY_LOW);
947 if (unlikely(cm == 0)) {
948 kfree(buf);
949 goto out_oom;
952 databuf_pullold(trgt_out_o, cr->seqno, buf, cr->length);
954 send_conndata(cm, trgt_out_o->target.out.conn_id,
955 cr->seqno, buf, buf, cr->length,
956 cr->snd_delayed_lowbuf, cr);
959 if (0) {
960 qos_enqueue:
961 queuefull = 1;
962 if (0) {
963 out_oom:
964 oom = 1;
966 spin_lock_bh(&(nb->retrans_lock));
967 BUG_ON(cr->scheduled == 1);
968 if (unlikely(cr->ackrcvd)) {
969 kref_put(&(cr->ref), kreffree_bug);
970 } else {
971 if (oom)
972 cr->timeout = jiffies + 1;
973 list_add(&(cr->timeout_list), &(nb->retrans_list_conn));
974 cr->scheduled = 1;
976 spin_unlock_bh(&(nb->retrans_lock));
978 out:
979 spin_unlock_bh(&(trgt_out_o->rcv_lock));
981 kref_put(&(trgt_out_o->ref), free_conn);
983 return queuefull;
986 static int send_retrans(struct neighbor *nb, int fromqos)
988 int sent = 0;
989 int queuefull = 0;
990 int nbstate = get_neigh_state(nb);
991 if (unlikely(nbstate == NEIGHBOR_STATE_STALLED))
992 goto out;
994 #warning todo check windowlimit
996 while (1) {
997 struct conn_retrans *cr = 0;
999 if (may_send_conn_retrans(nb) == 0)
1000 goto qos_enqueue;
1002 spin_lock_bh(&(nb->retrans_lock));
1004 killed_cont:
1005 if (list_empty(&(nb->retrans_list_conn)))
1006 break;
1008 cr = container_of(nb->retrans_list_conn.next,
1009 struct conn_retrans, timeout_list);
1011 BUG_ON(cr->scheduled == 0);
1013 if (unlikely(nbstate == NEIGHBOR_STATE_KILLED)) {
1014 cancel_conn_retrans(nb, cr);
1015 goto killed_cont;
1018 if (time_after(cr->timeout, jiffies))
1019 break;
1021 kref_get(&(cr->ref));
1022 list_del(&(cr->timeout_list));
1023 cr->scheduled = 0;
1025 spin_unlock_bh(&(nb->retrans_lock));
1026 queuefull = _send_retrans(nb, cr);
1027 kref_put(&(cr->ref), free_connretrans);
1028 if (queuefull) {
1029 qos_enqueue:
1030 if (fromqos == 0)
1031 qos_enqueue(nb->queue, &(nb->rb_cr),
1032 QOS_CALLER_CONN_RETRANS);
1033 goto out;
1034 } else {
1035 sent = 1;
1039 if (0) {
1040 out:
1041 spin_lock_bh(&(nb->retrans_lock));
1044 if (queuefull == 0) {
1045 nb->retrans_conn_running = 0;
1046 reschedule_conn_retrans_timer(nb);
1049 spin_unlock_bh(&(nb->retrans_lock));
1051 kref_put(&(nb->ref), neighbor_free);
1053 if (queuefull)
1054 return sent ? QOS_RESUME_CONG : QOS_RESUME_CONG_NOPROGRESS;
1055 return QOS_RESUME_DONE;
1058 void retransmit_conn_taskfunc(unsigned long arg)
1060 struct neighbor *nb = (struct neighbor *) arg;
1061 send_retrans(nb, 0);
1064 void retransmit_conn_timerfunc(struct timer_list *retrans_timer_conn)
1066 struct neighbor *nb = container_of(retrans_timer_conn,
1067 struct neighbor, retrans_timer_conn);
1069 spin_lock_bh(&(nb->retrans_lock));
1071 BUG_ON(nb->retrans_timer_conn_running == 0);
1072 BUG_ON(nb->retrans_conn_running == 1);
1074 nb->retrans_timer_conn_running = 0;
1075 nb->retrans_conn_running = 1;
1077 spin_unlock_bh(&(nb->retrans_lock));
1079 tasklet_schedule(&(nb->retrans_task_conn));
1082 void conn_ack_ooo_rcvd(struct neighbor *nb, __u32 conn_id,
1083 struct conn *trgt_out, __u64 seqno_ooo, __u32 length)
1085 struct list_head *curr;
1087 if (unlikely(length == 0))
1088 return;
1090 spin_lock_bh(&(trgt_out->rcv_lock));
1092 if (unlikely(trgt_out->targettype != TARGET_OUT))
1093 goto out;
1094 if (unlikely(trgt_out->target.out.nb != nb))
1095 goto out;
1096 if (unlikely(trgt_out->target.out.conn_id != conn_id))
1097 goto out;
1099 kref_get(&(nb->ref));
1100 spin_lock_bh(&(nb->retrans_lock));
1102 curr = trgt_out->target.out.retrans_list.next;
1104 while (curr != &(trgt_out->target.out.retrans_list)) {
1105 struct conn_retrans *cr = container_of(curr,
1106 struct conn_retrans, conn_list);
1108 int ack_covers_start = seqno_after_eq(cr->seqno, seqno_ooo);
1109 int ack_covers_end = seqno_before_eq(cr->seqno + cr->length,
1110 seqno_ooo + length);
1112 curr = curr->next;
1114 if (seqno_before(cr->seqno + cr->length, seqno_ooo))
1115 continue;
1117 if (seqno_after(cr->seqno, seqno_ooo + length))
1118 break;
1120 if (likely(ack_covers_start && ack_covers_end)) {
1121 cancel_conn_retrans(nb, cr);
1122 } else if (ack_covers_start) {
1123 __u32 diff = seqno_ooo + length - cr->seqno -
1124 cr->length;
1125 cr->seqno += diff;
1126 cr->length -= diff;
1127 } else if (ack_covers_end) {
1128 cr->length -= seqno_ooo + length - cr->seqno;
1129 } else {
1130 break;
1134 if (unlikely(list_empty(&(trgt_out->target.out.retrans_list)) == 0)) {
1135 trgt_out->target.out.seqno_acked =
1136 trgt_out->target.out.seqno_nextsend;
1137 } else {
1138 struct conn_retrans *cr = container_of(
1139 trgt_out->target.out.retrans_list.next,
1140 struct conn_retrans, conn_list);
1141 if (seqno_after(cr->seqno, trgt_out->target.out.seqno_acked))
1142 trgt_out->target.out.seqno_acked = cr->seqno;
1145 spin_unlock_bh(&(nb->retrans_lock));
1146 kref_put(&(nb->ref), neighbor_free);
1148 out:
1149 spin_unlock_bh(&(trgt_out->rcv_lock));
1152 void conn_ack_rcvd(struct neighbor *nb, __u32 conn_id, struct conn *trgt_out,
1153 __u64 seqno, int setwindow, __u8 window)
1155 int flush = 0;
1157 spin_lock_bh(&(trgt_out->rcv_lock));
1159 if (unlikely(trgt_out->isreset != 0))
1160 goto out;
1161 if (unlikely(trgt_out->targettype != TARGET_OUT))
1162 goto out;
1163 if (unlikely(trgt_out->target.out.nb != nb))
1164 goto out;
1165 if (unlikely(trgt_out->reversedir->source.in.conn_id != conn_id))
1166 goto out;
1168 if (unlikely(seqno_after(seqno, trgt_out->target.out.seqno_nextsend) ||
1169 seqno_before(seqno, trgt_out->target.out.seqno_acked)))
1170 goto out;
1172 if (setwindow) {
1173 __u64 windowdec = dec_log_64_7(window);
1174 if (unlikely(seqno_eq(seqno, trgt_out->target.out.seqno_acked)&&
1175 unlikely(seqno_before(seqno + windowdec,
1176 trgt_out->target.out.seqno_windowlimit))))
1177 goto skipwindow;
1179 trgt_out->target.out.seqno_windowlimit = seqno + windowdec;
1180 flush = 1;
1183 skipwindow:
1184 if (seqno == trgt_out->target.out.seqno_acked)
1185 goto out;
1187 kref_get(&(nb->ref));
1188 spin_lock_bh(&(nb->retrans_lock));
1190 trgt_out->target.out.seqno_acked = seqno;
1191 flush = 1;
1193 while (list_empty(&(trgt_out->target.out.retrans_list)) == 0) {
1194 struct conn_retrans *cr = container_of(
1195 trgt_out->target.out.retrans_list.next,
1196 struct conn_retrans, conn_list);
1198 if (seqno_after(cr->seqno + cr->length, seqno)) {
1199 if (seqno_before(cr->seqno, seqno)) {
1200 cr->length -= (seqno - cr->seqno);
1201 cr->seqno = seqno;
1203 break;
1206 cancel_conn_retrans(nb, cr);
1209 spin_unlock_bh(&(nb->retrans_lock));
1210 kref_put(&(nb->ref), neighbor_free);
1211 databuf_ack(trgt_out, trgt_out->target.out.seqno_acked);
1213 out:
1214 spin_unlock_bh(&(trgt_out->rcv_lock));
1216 if (flush)
1217 flush_buf(trgt_out, 0);
1218 wake_sender(trgt_out);
1221 void schedule_retransmit_conn(struct conn_retrans *cr, int connlocked)
1223 struct conn *trgt_out_o = cr->trgt_out_o;
1224 struct neighbor *nb;
1225 int first;
1227 if (connlocked == 0)
1228 spin_lock_bh(&(trgt_out_o->rcv_lock));
1230 BUG_ON(trgt_out_o->targettype != TARGET_OUT);
1231 nb = trgt_out_o->target.out.nb;
1233 cr->timeout = calc_timeout(atomic_read(&(nb->latency_retrans_us)),
1234 atomic_read(&(nb->latency_stddev_retrans_us)),
1235 atomic_read(&(nb->max_remote_ackconn_delay_us)));
1237 spin_lock_bh(&(nb->retrans_lock));
1239 kref_get(&(nb->ref));
1241 BUG_ON(cr->scheduled != 0);
1243 if (unlikely(cr->ackrcvd)) {
1244 kref_put(&(cr->ref), free_connretrans);
1245 goto out;
1248 first = unlikely(list_empty(&(nb->retrans_list_conn)));
1249 list_add_tail(&(cr->timeout_list), &(nb->retrans_list_conn));
1250 cr->scheduled = 1;
1252 if (unlikely(first))
1253 reschedule_conn_retrans_timer(nb);
1255 out:
1256 spin_unlock_bh(&(nb->retrans_lock));
1258 kref_put(&(nb->ref), neighbor_free);
1260 if (connlocked == 0)
1261 spin_unlock_bh(&(trgt_out_o->rcv_lock));
1264 static __u64 get_windowlimit(struct conn *trgt_out_l)
1266 if (unlikely(seqno_before(trgt_out_l->target.out.seqno_windowlimit,
1267 trgt_out_l->target.out.seqno_nextsend)))
1268 return 0;
1270 return seqno_clean(trgt_out_l->target.out.seqno_windowlimit -
1271 trgt_out_l->target.out.seqno_nextsend);
1274 static int seqno_low_sendlimit(struct conn *trgt_out_l, __u64 windowlimit,
1275 __u32 sndlen)
1277 __u64 bytes_ackpending;
1279 BUG_ON(seqno_before(trgt_out_l->target.out.seqno_nextsend,
1280 trgt_out_l->target.out.seqno_acked));
1282 bytes_ackpending = seqno_clean(trgt_out_l->target.out.seqno_nextsend -
1283 trgt_out_l->target.out.seqno_acked);
1285 if (windowlimit <= sndlen)
1286 return 1;
1288 if (unlikely(bytes_ackpending + sndlen < bytes_ackpending))
1289 return 0;
1291 return (windowlimit - sndlen < (bytes_ackpending + sndlen) / 8) ? 1 : 0;
1294 static int _flush_out_skb(struct conn *trgt_out_l, __u32 len,
1295 __u8 snd_delayed_lowbuf)
1297 struct neighbor *nb = trgt_out_l->target.out.nb;
1299 __u64 seqno;
1300 struct conn_retrans *cr;
1301 struct sk_buff *skb;
1302 char *dst;
1304 seqno = trgt_out_l->target.out.seqno_nextsend;
1305 skb = create_packet_conndata(trgt_out_l->target.out.nb, len,
1306 GFP_ATOMIC, trgt_out_l->target.out.conn_id, seqno,
1307 snd_delayed_lowbuf);
1308 if (unlikely(skb == 0))
1309 return RC_FLUSH_CONN_OUT_OOM;
1311 cr = prepare_conn_retrans(trgt_out_l, seqno, len);
1312 if (unlikely(cr == 0)) {
1313 kfree_skb(skb);
1314 return RC_FLUSH_CONN_OUT_OOM;
1317 dst = skb_put(skb, len);
1319 databuf_pull(trgt_out_l, dst, len);
1321 if (cor_dev_queue_xmit(skb, QOS_CALLER_CONN) != 0) {
1322 databuf_unpull(trgt_out_l, len);
1323 spin_lock_bh(&(nb->retrans_lock));
1324 cancel_conn_retrans(nb, cr);
1325 spin_unlock_bh(&(nb->retrans_lock));
1326 kref_put(&(cr->ref), free_connretrans);
1327 return RC_FLUSH_CONN_OUT_CONG;
1330 trgt_out_l->target.out.seqno_nextsend += len;
1331 schedule_retransmit_conn(cr, 1);
1333 return RC_FLUSH_CONN_OUT_OK;
1336 #warning todo check if a conn_data is in the queue and combine
1337 static int _flush_out_conndata(struct conn *trgt_out_l, __u16 len,
1338 __u8 snd_delayed_lowbuf)
1340 __u64 seqno;
1341 struct control_msg_out *cm;
1342 struct conn_retrans *cr;
1343 char *buf;
1345 buf = kmalloc(len, GFP_ATOMIC);
1347 if (unlikely(buf == 0))
1348 return RC_FLUSH_CONN_OUT_OOM;
1350 cm = alloc_control_msg(trgt_out_l->target.out.nb, ACM_PRIORITY_LOW);
1351 if (unlikely(cm == 0)) {
1352 kfree(buf);
1353 return RC_FLUSH_CONN_OUT_OOM;
1356 seqno = trgt_out_l->target.out.seqno_nextsend;
1358 cr = prepare_conn_retrans(trgt_out_l, seqno, len);
1359 if (unlikely(cr == 0)) {
1360 kfree(buf);
1361 free_control_msg(cm);
1362 return RC_FLUSH_CONN_OUT_OOM;
1365 databuf_pull(trgt_out_l, buf, len);
1366 cr->snd_delayed_lowbuf = snd_delayed_lowbuf;
1368 trgt_out_l->target.out.seqno_nextsend += len;
1370 send_conndata(cm, trgt_out_l->target.out.conn_id, seqno, buf, buf, len,
1371 snd_delayed_lowbuf, cr);
1373 return RC_FLUSH_CONN_OUT_OK;
1376 static int _flush_out(struct conn *trgt_out_l, __u32 maxsend, __u32 *sent,
1377 int from_qos)
1379 struct neighbor *nb = trgt_out_l->target.out.nb;
1381 __u32 targetmss;
1383 int nbstate;
1385 __u8 snd_delayed_lowbuf = trgt_out_l->target.out.windowlimit_reached;
1387 __u32 maxsend_left = maxsend;
1389 trgt_out_l->target.out.windowlimit_reached = 0;
1391 BUG_ON(trgt_out_l->targettype != TARGET_OUT);
1393 if (unlikely(trgt_out_l->target.out.established == 0))
1394 return RC_FLUSH_CONN_OUT_OK;
1396 if (unlikely(trgt_out_l->isreset != 0))
1397 return RC_FLUSH_CONN_OUT_OK;
1399 BUG_ON(trgt_out_l->target.out.conn_id == 0);
1401 if (unlikely(trgt_out_l->data_buf.read_remaining == 0))
1402 return RC_FLUSH_CONN_OUT_OK;
1404 #warning todo burst queue
1405 if (from_qos == 0 && may_send_conn(trgt_out_l) == 0)
1406 return RC_FLUSH_CONN_OUT_CONG;
1408 spin_lock_bh(&(nb->stalledconn_lock));
1409 nbstate = get_neigh_state(nb);
1410 if (unlikely(nbstate == NEIGHBOR_STATE_STALLED)) {
1411 BUG_ON(trgt_out_l->target.out.nbstalled_lh.prev == 0 &&
1412 trgt_out_l->target.out.nbstalled_lh.next != 0);
1413 BUG_ON(trgt_out_l->target.out.nbstalled_lh.prev != 0 &&
1414 trgt_out_l->target.out.nbstalled_lh.next == 0);
1416 if (trgt_out_l->target.out.nbstalled_lh.prev == 0) {
1417 kref_get(&(trgt_out_l->ref));
1418 list_add_tail(&(trgt_out_l->target.out.nbstalled_lh),
1419 &(nb->stalledconn_list));
1422 spin_unlock_bh(&(nb->stalledconn_lock));
1424 if (unlikely(nbstate != NEIGHBOR_STATE_ACTIVE))
1425 return RC_FLUSH_CONN_OUT_NBNOTACTIVE;
1427 /* printk(KERN_ERR "flush %p %llu %u", trgt_out_l,
1428 get_windowlimit(trgt_out_l),
1429 trgt_out_l->data_buf.read_remaining); */
1431 targetmss = mss_conndata(nb);
1433 while (trgt_out_l->data_buf.read_remaining >= targetmss) {
1434 __u64 windowlimit = get_windowlimit(trgt_out_l);
1435 int rc;
1437 if (windowlimit < targetmss) {
1438 trgt_out_l->target.out.windowlimit_reached = 1;
1439 snd_delayed_lowbuf = 1;
1440 break;
1443 if (seqno_low_sendlimit(trgt_out_l, windowlimit, targetmss)) {
1444 trgt_out_l->target.out.windowlimit_reached = 1;
1445 snd_delayed_lowbuf = 1;
1448 if (maxsend_left < targetmss)
1449 break;
1451 if (likely(send_conndata_as_skb(nb, targetmss)))
1452 rc = _flush_out_skb(trgt_out_l, targetmss,
1453 snd_delayed_lowbuf);
1454 else
1455 rc = _flush_out_conndata(trgt_out_l, targetmss,
1456 snd_delayed_lowbuf);
1458 maxsend_left -= targetmss;
1459 *sent += targetmss;
1461 if (rc != RC_FLUSH_CONN_OUT_OK)
1462 return rc;
1465 if (trgt_out_l->data_buf.read_remaining > 0) {
1466 __u16 len = trgt_out_l->data_buf.read_remaining;
1467 __u64 windowlimit = get_windowlimit(trgt_out_l);
1468 int rc;
1470 if (trgt_out_l->sourcetype == SOURCE_SOCK &&
1471 trgt_out_l->source.sock.delay_flush ==
1472 FLUSHDELAY_DELAY &&
1473 cor_sock_sndbufavailable(trgt_out_l) != 0 && (
1474 windowlimit > len ||
1475 seqno_eq(trgt_out_l->target.out.seqno_nextsend,
1476 trgt_out_l->target.out.seqno_acked) == 0))
1477 goto out;
1479 if (windowlimit == 0 || (windowlimit < len &&
1480 seqno_eq(trgt_out_l->target.out.seqno_nextsend,
1481 trgt_out_l->target.out.seqno_acked) == 0)) {
1482 trgt_out_l->target.out.windowlimit_reached = 1;
1483 snd_delayed_lowbuf = 1;
1484 goto out;
1487 if (seqno_low_sendlimit(trgt_out_l, windowlimit, len)) {
1488 trgt_out_l->target.out.windowlimit_reached = 1;
1489 snd_delayed_lowbuf = 1;
1492 if (len > windowlimit)
1493 len = windowlimit;
1495 if (maxsend_left < len) {
1496 if (maxsend == maxsend_left) {
1497 len = maxsend_left;
1498 } else {
1499 return RC_FLUSH_CONN_OUT_MAXSENT;
1503 if (send_conndata_as_skb(nb, len))
1504 rc = _flush_out_skb(trgt_out_l, len,
1505 snd_delayed_lowbuf);
1506 else
1507 rc = _flush_out_conndata(trgt_out_l, len,
1508 snd_delayed_lowbuf);
1510 maxsend -= targetmss;
1511 *sent += targetmss;
1513 if (rc != RC_FLUSH_CONN_OUT_OK)
1514 return rc;
1517 out:
1518 return RC_FLUSH_CONN_OUT_OK;
1521 int flush_out(struct conn *trgt_out_l, __u32 *sent)
1523 int rc = _flush_out(trgt_out_l, 1 << 30, sent, 0);
1525 if (rc == RC_FLUSH_CONN_OUT_CONG || rc == RC_FLUSH_CONN_OUT_MAXSENT ||
1526 rc == RC_FLUSH_CONN_OUT_OOM)
1527 qos_enqueue_conn(trgt_out_l);
1529 return rc;
1532 void resume_nbstalled_conns(struct work_struct *work)
1534 struct neighbor *nb = container_of(work, struct neighbor,
1535 stalledconn_work);
1536 int rc = RC_FLUSH_CONN_OUT_OK;
1538 spin_lock_bh(&(nb->stalledconn_lock));
1539 nb->stalledconn_work_scheduled = 0;
1540 while (rc != RC_FLUSH_CONN_OUT_NBNOTACTIVE &&
1541 list_empty(&(nb->stalledconn_list)) == 0) {
1542 struct list_head *lh = nb->stalledconn_list.next;
1543 struct conn *trgt_out = container_of(lh, struct conn,
1544 target.out.nbstalled_lh);
1545 __u32 sent = 0;
1546 BUG_ON(trgt_out->targettype != TARGET_OUT);
1547 list_del(lh);
1548 lh->prev = 0;
1549 lh->next = 0;
1551 spin_unlock_bh(&(nb->stalledconn_lock));
1553 spin_lock_bh(&(trgt_out->rcv_lock));
1554 if (likely(trgt_out->targettype == TARGET_OUT))
1555 rc = flush_out(trgt_out, &sent);
1556 spin_unlock_bh(&(trgt_out->rcv_lock));
1558 if (sent != 0)
1559 wake_sender(trgt_out);
1561 kref_put(&(trgt_out->ref), free_conn);
1563 spin_lock_bh(&(nb->stalledconn_lock));
1565 spin_unlock_bh(&(nb->stalledconn_lock));
1567 kref_put(&(nb->ref), neighbor_free);
1570 int __init cor_snd_init(void)
1572 connretrans_slab = kmem_cache_create("cor_connretrans",
1573 sizeof(struct conn_retrans), 8, 0, 0);
1574 if (unlikely(connretrans_slab == 0))
1575 return -ENOMEM;
1577 return 0;
1580 MODULE_LICENSE("GPL");