2 * Connection oriented routing
3 * Copyright (C) 2007-2019 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 #include <linux/gfp.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
27 static struct kmem_cache
*connretrans_slab
;
29 static DEFINE_SPINLOCK(queues_lock
);
30 static LIST_HEAD(queues
);
32 static int _flush_out(struct conn
*trgt_out_l
, __u32 maxsend
, __u32
*sent
,
35 #warning todo packet loss should slow sending of effected neighbor down
37 #ifdef DEBUG_QOS_SLOWSEND
38 static DEFINE_SPINLOCK(slowsend_lock
);
39 static unsigned long last_send
;
42 int cor_dev_queue_xmit(struct sk_buff
*skb
, int caller
)
45 unsigned long jiffies_tmp
;
46 spin_lock_bh(&slowsend_lock
);
47 jiffies_tmp
= jiffies
;
48 if (time_after(last_send
, jiffies_tmp
) || time_before_eq(last_send
+
49 HZ
/10, jiffies_tmp
)) {
50 last_send
= jiffies_tmp
;
53 spin_unlock_bh(&slowsend_lock
);
55 /* printk(KERN_ERR "cor_dev_queue_xmit %d, %d", caller, allowsend); */
57 return dev_queue_xmit(skb
);
65 static void free_connretrans(struct kref
*ref
)
67 struct conn_retrans
*cr
= container_of(ref
, struct conn_retrans
, ref
);
68 struct conn
*cn
= cr
->trgt_out_o
;
69 kmem_cache_free(connretrans_slab
, cr
);
70 kref_put(&(cn
->ref
), free_conn
);
73 void free_qos(struct kref
*ref
)
75 struct qos_queue
*q
= container_of(ref
, struct qos_queue
, ref
);
80 static __u64
_resume_conns_maxsend(struct qos_queue
*q
, __u32 numconns
,
81 struct conn
*trgt_out_l
, __u32 newpriority
)
83 __u32 oldpriority
= trgt_out_l
->target
.out
.rb_priority
;
84 __u64 priority_sum_old
= atomic64_read(&(q
->priority_sum
));
91 priority_sum
= priority_sum_old
;
93 BUG_ON(priority_sum
< oldpriority
);
94 priority_sum
-= oldpriority
;
96 BUG_ON(priority_sum
+ newpriority
< priority_sum
);
97 priority_sum
+= newpriority
;
99 cmpxchg_ret
= atomic64_cmpxchg(&(q
->priority_sum
),
100 priority_sum_old
, priority_sum
);
102 if (likely(cmpxchg_ret
== priority_sum_old
))
105 priority_sum_old
= cmpxchg_ret
;
108 trgt_out_l
->target
.out
.rb_priority
= newpriority
;
110 return div_u64(2048LL * ((__u64
) newpriority
) * ((__u64
) numconns
),
114 static int resume_conns(struct qos_queue
*q
)
116 unsigned long iflags
;
129 spin_lock_irqsave(&(q
->qlock
), iflags
);
130 if (list_empty(&(q
->conns_waiting
)) == 0) {
131 cn
= container_of(q
->conns_waiting
.next
,
132 struct conn
, target
.out
.rb
.lh
);
133 numconns
= q
->numconns
;
134 BUG_ON(cn
->targettype
!= TARGET_OUT
);
135 BUG_ON(cn
->target
.out
.rb
.lh
.prev
!=
136 &(q
->conns_waiting
));
137 BUG_ON((cn
->target
.out
.rb
.lh
.next
==
138 &(q
->conns_waiting
)) && (
139 q
->conns_waiting
.prev
!=
140 &(cn
->target
.out
.rb
.lh
)));
141 list_del(&(cn
->target
.out
.rb
.lh
));
142 list_add_tail(&(cn
->target
.out
.rb
.lh
),
143 &(q
->conns_waiting
));
144 kref_get(&(cn
->ref
));
146 spin_unlock_irqrestore(&(q
->qlock
), iflags
);
150 return QOS_RESUME_DONE
;
152 priority
= refresh_conn_priority(cn
, 0);
154 spin_lock_bh(&(cn
->rcv_lock
));
156 if (unlikely(cn
->targettype
!= TARGET_OUT
)) {
157 spin_unlock_bh(&(cn
->rcv_lock
));
161 maxsend
= _resume_conns_maxsend(q
, numconns
, cn
, priority
);
162 maxsend
+= cn
->target
.out
.maxsend_extra
;
163 if (unlikely(maxsend
> U32_MAX
))
166 rc2
= _flush_out(cn
, maxsend
, &sent2
, 1);
168 if (rc2
== RC_FLUSH_CONN_OUT_OK
||
169 rc2
== RC_FLUSH_CONN_OUT_NBNOTACTIVE
) {
170 cn
->target
.out
.maxsend_extra
= 0;
172 } else if (sent2
== 0 && (rc2
== RC_FLUSH_CONN_OUT_CONG
||
173 rc2
== RC_FLUSH_CONN_OUT_OOM
)) {
174 spin_lock_irqsave(&(q
->qlock
), iflags
);
175 if (likely(cn
->target
.out
.rb
.in_queue
!= 0)) {
176 list_del(&(cn
->target
.out
.rb
.lh
));
177 list_add(&(cn
->target
.out
.rb
.lh
),
178 &(q
->conns_waiting
));
180 spin_unlock_irqrestore(&(q
->qlock
), iflags
);
181 } else if (rc2
== RC_FLUSH_CONN_OUT_CONG
||
182 rc2
== RC_FLUSH_CONN_OUT_OOM
) {
183 cn
->target
.out
.maxsend_extra
= 0;
184 } else if (likely(rc2
== RC_FLUSH_CONN_OUT_MAXSENT
)) {
185 if (unlikely(maxsend
- sent2
> 65535))
186 cn
->target
.out
.maxsend_extra
= 65535;
188 cn
->target
.out
.maxsend_extra
= maxsend
- sent2
;
191 spin_unlock_bh(&(cn
->rcv_lock
));
198 kref_put(&(cn
->ref
), free_conn
);
200 if (rc2
== RC_FLUSH_CONN_OUT_CONG
||
201 rc2
== RC_FLUSH_CONN_OUT_OOM
) {
203 return QOS_RESUME_CONG
;
205 return QOS_RESUME_CONG_NOPROGRESS
;
212 static int send_retrans(struct neighbor
*nb
, int fromqos
);
214 static int _qos_resume(struct qos_queue
*q
, int caller
)
216 unsigned long iflags
;
218 struct list_head
*lh
;
220 spin_lock_irqsave(&(q
->qlock
), iflags
);
222 if (caller
== QOS_CALLER_KPACKET
)
223 lh
= &(q
->conn_retrans_waiting
);
224 else if (caller
== QOS_CALLER_CONN_RETRANS
)
225 lh
= &(q
->kpackets_waiting
);
226 else if (caller
== QOS_CALLER_ANNOUNCE
)
227 lh
= &(q
->announce_waiting
);
231 while (list_empty(lh
) == 0) {
232 struct list_head
*curr
= lh
->next
;
233 struct resume_block
*rb
= container_of(curr
,
234 struct resume_block
, lh
);
238 spin_unlock_irqrestore(&(q
->qlock
), iflags
);
239 if (caller
== QOS_CALLER_KPACKET
) {
240 rc
= send_messages(container_of(rb
, struct neighbor
,
242 } else if (caller
== QOS_CALLER_CONN_RETRANS
) {
243 rc
= send_retrans(container_of(rb
, struct neighbor
,
245 } else if (caller
== QOS_CALLER_ANNOUNCE
) {
246 rc
= _send_announce(container_of(rb
,
247 struct announce_data
, rb
), 1);
251 spin_lock_irqsave(&(q
->qlock
), iflags
);
253 if (rc
!= 0 && rb
->in_queue
== 0) {
259 if (caller
== QOS_CALLER_KPACKET
) {
260 kref_put(&(container_of(rb
, struct neighbor
,
261 rb_kp
)->ref
), neighbor_free
);
262 } else if (caller
== QOS_CALLER_CONN_RETRANS
) {
263 kref_put(&(container_of(rb
, struct neighbor
,
264 rb_cr
)->ref
), neighbor_free
);
265 } else if (caller
== QOS_CALLER_ANNOUNCE
) {
266 kref_put(&(container_of(rb
,
267 struct announce_data
, rb
)->ref
),
273 kref_put(&(q
->ref
), kreffree_bug
);
279 spin_unlock_irqrestore(&(q
->qlock
), iflags
);
284 void qos_resume_taskfunc(unsigned long arg
)
286 struct qos_queue
*q
= (struct qos_queue
*) arg
;
288 int rc
= QOS_RESUME_DONE
;
290 unsigned long iflags
;
293 spin_lock_irqsave(&(q
->qlock
), iflags
);
295 for (i
=0;i
<4 && rc
== QOS_RESUME_DONE
;i
++) {
296 struct list_head
*lh
;
298 if (i
== QOS_CALLER_KPACKET
)
299 lh
= &(q
->conn_retrans_waiting
);
300 else if (i
== QOS_CALLER_CONN_RETRANS
)
301 lh
= &(q
->kpackets_waiting
);
302 else if (i
== QOS_CALLER_ANNOUNCE
)
303 lh
= &(q
->announce_waiting
);
304 else if (i
== QOS_CALLER_CONN
)
305 lh
= &(q
->conns_waiting
);
312 spin_unlock_irqrestore(&(q
->qlock
), iflags
);
313 if (i
== QOS_CALLER_CONN
) {
314 rc
= resume_conns(q
);
316 rc
= _qos_resume(q
, i
);
319 sent
= sent
|| (rc
!= QOS_RESUME_CONG_NOPROGRESS
);
321 spin_lock_irqsave(&(q
->qlock
), iflags
);
326 if (rc
== QOS_RESUME_DONE
) {
327 q
->qos_resume_scheduled
= 0;
329 unsigned long jiffies_tmp
= jiffies
;
330 unsigned long delay
= (jiffies_tmp
- q
->jiffies_lastprogress
+
333 if (sent
|| unlikely(delay
<= 0)) {
334 q
->jiffies_lastprogress
= jiffies_tmp
;
336 } else if (delay
> HZ
/10) {
337 q
->jiffies_lastprogress
= jiffies_tmp
- (HZ
*4)/10;
341 /* If we retry too fast here, we might starve layer 2 */
342 mod_timer(&(q
->qos_resume_timer
), jiffies_tmp
+ delay
);
345 spin_unlock_irqrestore(&(q
->qlock
), iflags
);
347 if (rc
== QOS_RESUME_DONE
)
348 kref_put(&(q
->ref
), free_qos
);
351 void qos_resume_timerfunc(struct timer_list
*qos_resume_timer
)
353 struct qos_queue
*q
= container_of(qos_resume_timer
,
354 struct qos_queue
, qos_resume_timer
);
355 tasklet_schedule(&(q
->qos_resume_task
));
358 struct qos_queue
*get_queue(struct net_device
*dev
)
360 struct qos_queue
*ret
= 0;
361 struct list_head
*curr
;
363 spin_lock_bh(&(queues_lock
));
365 while (curr
!= (&queues
)) {
366 struct qos_queue
*q
= container_of(curr
,
367 struct qos_queue
, queue_list
);
370 kref_get(&(ret
->ref
));
375 spin_unlock_bh(&(queues_lock
));
379 static void _destroy_queue(struct qos_queue
*q
, int caller
)
381 struct list_head
*lh
;
383 if (caller
== QOS_CALLER_KPACKET
)
384 lh
= &(q
->conn_retrans_waiting
);
385 else if (caller
== QOS_CALLER_CONN_RETRANS
)
386 lh
= &(q
->kpackets_waiting
);
387 else if (caller
== QOS_CALLER_ANNOUNCE
)
388 lh
= &(q
->announce_waiting
);
392 while (list_empty(lh
) == 0) {
393 struct list_head
*curr
= lh
->next
;
394 struct resume_block
*rb
= container_of(curr
,
395 struct resume_block
, lh
);
399 if (caller
== QOS_CALLER_KPACKET
) {
400 kref_put(&(container_of(rb
, struct neighbor
,
401 rb_kp
)->ref
), neighbor_free
);
402 } else if (caller
== QOS_CALLER_CONN_RETRANS
) {
403 kref_put(&(container_of(rb
, struct neighbor
,
404 rb_cr
)->ref
), neighbor_free
);
405 } else if (caller
== QOS_CALLER_ANNOUNCE
) {
406 kref_put(&(container_of(rb
,
407 struct announce_data
, rb
)->ref
),
412 kref_put(&(q
->ref
), kreffree_bug
);
416 static struct qos_queue
*unlink_queue(struct net_device
*dev
)
418 struct qos_queue
*ret
= 0;
419 struct list_head
*curr
;
421 spin_lock_bh(&(queues_lock
));
423 while (curr
!= (&queues
)) {
424 struct qos_queue
*q
= container_of(curr
,
425 struct qos_queue
, queue_list
);
426 if (dev
== 0 || q
->dev
== dev
) {
428 kref_get(&(ret
->ref
));
430 list_del(&(q
->queue_list
));
431 kref_put(&(q
->ref
), kreffree_bug
);
436 spin_unlock_bh(&(queues_lock
));
440 int destroy_queue(struct net_device
*dev
)
443 unsigned long iflags
;
446 struct qos_queue
*q
= unlink_queue(dev
);
453 spin_lock_irqsave(&(q
->qlock
), iflags
);
458 _destroy_queue(q
, QOS_CALLER_KPACKET
);
459 _destroy_queue(q
, QOS_CALLER_CONN_RETRANS
);
460 _destroy_queue(q
, QOS_CALLER_ANNOUNCE
);
461 spin_unlock_irqrestore(&(q
->qlock
), iflags
);
463 kref_put(&(q
->ref
), free_qos
);
469 int create_queue(struct net_device
*dev
)
471 struct qos_queue
*q
= kmalloc(sizeof(struct qos_queue
), GFP_KERNEL
);
474 printk(KERN_ERR
"cor: unable to allocate memory for device "
475 "queue, not enabling device");
479 memset(q
, 0, sizeof(struct qos_queue
));
481 spin_lock_init(&(q
->qlock
));
483 kref_init(&(q
->ref
));
488 timer_setup(&(q
->qos_resume_timer
), qos_resume_timerfunc
, 0);
489 tasklet_init(&(q
->qos_resume_task
), qos_resume_taskfunc
,
492 INIT_LIST_HEAD(&(q
->kpackets_waiting
));
493 INIT_LIST_HEAD(&(q
->conn_retrans_waiting
));
494 INIT_LIST_HEAD(&(q
->announce_waiting
));
495 INIT_LIST_HEAD(&(q
->conns_waiting
));
497 spin_lock_bh(&(queues_lock
));
498 list_add(&(q
->queue_list
), &queues
);
499 spin_unlock_bh(&(queues_lock
));
501 atomic64_set(&(q
->priority_sum
), 0);
506 void qos_enqueue(struct qos_queue
*q
, struct resume_block
*rb
, int caller
)
508 unsigned long iflags
;
510 spin_lock_irqsave(&(q
->qlock
), iflags
);
517 if (caller
== QOS_CALLER_KPACKET
) {
518 list_add(&(rb
->lh
) , &(q
->conn_retrans_waiting
));
519 kref_get(&(container_of(rb
, struct neighbor
, rb_kp
)->ref
));
520 } else if (caller
== QOS_CALLER_CONN_RETRANS
) {
521 list_add(&(rb
->lh
), &(q
->kpackets_waiting
));
522 kref_get(&(container_of(rb
, struct neighbor
, rb_cr
)->ref
));
523 } else if (caller
== QOS_CALLER_ANNOUNCE
) {
524 list_add(&(rb
->lh
), &(q
->announce_waiting
));
525 kref_get(&(container_of(rb
, struct announce_data
, rb
)->ref
));
526 } else if (caller
== QOS_CALLER_CONN
) {
527 list_add(&(rb
->lh
), &(q
->conns_waiting
));
528 kref_get(&(container_of(rb
, struct conn
, target
.out
.rb
)->ref
));
535 if (q
->qos_resume_scheduled
== 0) {
536 q
->jiffies_lastprogress
= jiffies
;
537 mod_timer(&(q
->qos_resume_timer
), jiffies
+ 1);
538 q
->qos_resume_scheduled
= 1;
543 spin_unlock_irqrestore(&(q
->qlock
), iflags
);
546 void qos_remove_conn(struct conn
*trgt_out_l
)
548 unsigned long iflags
;
551 BUG_ON(trgt_out_l
->targettype
!= TARGET_OUT
);
553 q
= trgt_out_l
->target
.out
.nb
->queue
;
557 spin_lock_irqsave(&(q
->qlock
), iflags
);
559 if (trgt_out_l
->target
.out
.rb
.in_queue
== 0) {
560 spin_unlock_irqrestore(&(q
->qlock
), iflags
);
564 trgt_out_l
->target
.out
.rb
.in_queue
= 0;
565 list_del(&(trgt_out_l
->target
.out
.rb
.lh
));
567 atomic64_sub(trgt_out_l
->target
.out
.rb_priority
, &(q
->priority_sum
));
568 trgt_out_l
->target
.out
.rb_priority
= 0;
569 spin_unlock_irqrestore(&(q
->qlock
), iflags
);
571 kref_put(&(trgt_out_l
->ref
), kreffree_bug
);
573 kref_put(&(q
->ref
), free_qos
);
576 static void qos_enqueue_conn(struct conn
*trgt_out_l
)
578 BUG_ON(trgt_out_l
->data_buf
.read_remaining
== 0);
579 qos_enqueue(trgt_out_l
->target
.out
.nb
->queue
,
580 &(trgt_out_l
->target
.out
.rb
), QOS_CALLER_CONN
);
583 static int may_send_conn_retrans(struct neighbor
*nb
)
585 unsigned long iflags
;
588 BUG_ON(nb
->queue
== 0);
590 spin_lock_irqsave(&(nb
->queue
->qlock
), iflags
);
591 rc
= (list_empty(&(nb
->queue
->kpackets_waiting
)));
592 spin_unlock_irqrestore(&(nb
->queue
->qlock
), iflags
);
597 int may_send_announce(struct net_device
*dev
)
599 unsigned long iflags
;
600 struct qos_queue
*q
= get_queue(dev
);
606 spin_lock_irqsave(&(q
->qlock
), iflags
);
607 rc
= (list_empty(&(q
->kpackets_waiting
)) &&
608 list_empty(&(q
->conn_retrans_waiting
)) &&
609 list_empty(&(q
->announce_waiting
)));
610 spin_unlock_irqrestore(&(q
->qlock
), iflags
);
612 kref_put(&(q
->ref
), free_qos
);
617 static int may_send_conn(struct conn
*trgt_out_l
)
619 unsigned long iflags
;
620 struct qos_queue
*q
= trgt_out_l
->target
.out
.nb
->queue
;
625 spin_lock_irqsave(&(q
->qlock
), iflags
);
626 rc
= (list_empty(&(q
->kpackets_waiting
)) &&
627 list_empty(&(q
->conn_retrans_waiting
)) &&
628 list_empty(&(q
->announce_waiting
)) &&
629 list_empty(&(q
->conns_waiting
)));
630 spin_unlock_irqrestore(&(q
->qlock
), iflags
);
636 #warning todo activate this - needs newer upstream kernel (e.g. 3.13)
638 #include <net/cfg80211.h>
640 #include "../wireless/core.h"
641 #include "../wireless/rdev-ops.h"
643 static DEFINE_SPINLOCK(sinfo_lock);
644 static struct station_info sinfo;
646 static __u32 mss_tmp(struct neighbor *nb, __u32 l3overhead)
648 struct net_device *dev = nb->dev;
649 struct wireless_dev *wdev;
650 struct cfg80211_registered_device *rdev;
653 __u32 mtu = ((dev->mtu > 4096) ? 4096 : dev->mtu) -
654 LL_RESERVED_SPACE(dev);
656 / * see cfg80211_wext_giwrate * /
658 wdev = dev->ieee80211_ptr;
662 rdev = wiphy_to_dev(wdev->wiphy);
663 if (wdev == 0 || rdev->ops->get_station == 0)
666 if (sizeof(nb->mac) < ETH_ALEN || MAX_ADDR_LEN < ETH_ALEN)
669 / * sinfo is global because of size * /
670 spin_lock_bh(&sinfo_lock);
671 if (rdev_get_station(rdev, dev, addr, &sinfo) == 0) {
672 / * unknown neighbor * /
673 } else if ((sinfo.filled & STATION_INFO_TX_BITRATE) == 0) {
676 rate_kbit = 100 * cfg80211_calculate_bitrate(&(sinfo.txrate));
678 spin_unlock_bh(&sinfo_lock);
683 / * amount of data which can be sent in 1ms * /
684 __u32 mtu_ratemax = rate_kbit/8;
686 if (mtu_ratemax < 128)
689 if (mtu > mtu_ratemax)
699 if (unlikely(mtu < l3overhead))
702 return mtu - l3overhead;
705 static struct sk_buff
*create_packet(struct neighbor
*nb
, int size
,
710 ret
= alloc_skb(size
+ LL_RESERVED_SPACE(nb
->dev
) +
711 nb
->dev
->needed_tailroom
, alloc_flags
);
712 if (unlikely(ret
== 0))
715 ret
->protocol
= htons(ETH_P_COR
);
718 skb_reserve(ret
, LL_RESERVED_SPACE(nb
->dev
));
719 if(unlikely(dev_hard_header(ret
, nb
->dev
, ETH_P_COR
, nb
->mac
,
720 nb
->dev
->dev_addr
, ret
->len
) < 0))
722 skb_reset_network_header(ret
);
727 struct sk_buff
*create_packet_cmsg(struct neighbor
*nb
, int size
,
728 gfp_t alloc_flags
, __u64 seqno
)
733 ret
= create_packet(nb
, size
+ 7, alloc_flags
);
734 if (unlikely(ret
== 0))
737 dest
= skb_put(ret
, 7);
740 dest
[0] = PACKET_TYPE_CMSG
;
743 put_u48(dest
, seqno
);
749 struct sk_buff
*create_packet_conndata(struct neighbor
*nb
, int size
,
750 gfp_t alloc_flags
, __u32 conn_id
, __u64 seqno
,
751 __u8 snd_delayed_lowbuf
)
756 ret
= create_packet(nb
, size
+ 11, alloc_flags
);
757 if (unlikely(ret
== 0))
760 dest
= skb_put(ret
, 11);
763 if (snd_delayed_lowbuf
!= 0)
764 dest
[0] = PACKET_TYPE_CONNDATA_LOWBUFDELAYED
;
766 dest
[0] = PACKET_TYPE_CONNDATA
;
769 put_u32(dest
, conn_id
);
771 put_u48(dest
, seqno
);
778 * warning: all callers must do the folling calls in this order:
781 * reschedule_conn_retrans_timer
785 * This is because this function calls kref_put
787 void reschedule_conn_retrans_timer(struct neighbor
*nb_retranslocked
)
789 struct conn_retrans
*cr
= 0;
791 if (list_empty(&(nb_retranslocked
->retrans_list_conn
)))
794 if (nb_retranslocked
->retrans_conn_running
!= 0)
797 cr
= container_of(nb_retranslocked
->retrans_list_conn
.next
,
798 struct conn_retrans
, timeout_list
);
800 if (nb_retranslocked
->retrans_timer_conn_running
== 0) {
801 nb_retranslocked
->retrans_timer_conn_running
= 1;
802 kref_get(&(nb_retranslocked
->ref
));
805 mod_timer(&(nb_retranslocked
->retrans_timer_conn
), cr
->timeout
);
810 * caller must also call kref_get/put, see reschedule_conn_retrans_timer
812 static void cancel_conn_retrans(struct neighbor
*nb_retranslocked
,
813 struct conn_retrans
*cr
)
815 if (unlikely(cr
->ackrcvd
))
819 list_del(&(cr
->timeout_list
));
821 kref_put(&(cr
->ref
), kreffree_bug
);
824 list_del(&(cr
->conn_list
));
827 kref_put(&(cr
->ref
), free_connretrans
);
829 reschedule_conn_retrans_timer(nb_retranslocked
);
832 void cancel_conn_all_retrans(struct conn
*trgt_out_l
)
834 struct neighbor
*nb
= trgt_out_l
->target
.out
.nb
;
836 spin_lock_bh(&(nb
->retrans_lock
));
838 while (list_empty(&(trgt_out_l
->target
.out
.retrans_list
)) == 0) {
839 struct conn_retrans
*cr
= container_of(
840 trgt_out_l
->target
.out
.retrans_list
.next
,
841 struct conn_retrans
, conn_list
);
842 BUG_ON(cr
->trgt_out_o
!= trgt_out_l
);
844 cancel_conn_retrans(nb
, cr
);
847 spin_unlock_bh(&(nb
->retrans_lock
));
850 static struct conn_retrans
*prepare_conn_retrans(struct conn
*trgt_out_l
,
851 __u64 seqno
, __u32 len
)
853 struct neighbor
*nb
= trgt_out_l
->target
.out
.nb
;
855 struct conn_retrans
*cr
= kmem_cache_alloc(connretrans_slab
,
858 if (unlikely(cr
== 0))
861 BUG_ON(trgt_out_l
->isreset
!= 0);
863 memset(cr
, 0, sizeof (struct conn_retrans
));
864 cr
->trgt_out_o
= trgt_out_l
;
865 kref_get(&(trgt_out_l
->ref
));
868 kref_init(&(cr
->ref
));
870 kref_get(&(cr
->ref
));
871 spin_lock_bh(&(nb
->retrans_lock
));
872 list_add_tail(&(cr
->conn_list
),
873 &(cr
->trgt_out_o
->target
.out
.retrans_list
));
874 spin_unlock_bh(&(nb
->retrans_lock
));
879 static int _send_retrans(struct neighbor
*nb
, struct conn_retrans
*cr
)
881 int targetmss
= mss_conndata(nb
);
884 struct conn
*trgt_out_o
= cr
->trgt_out_o
;
886 spin_lock_bh(&(trgt_out_o
->rcv_lock
));
888 BUG_ON(trgt_out_o
->targettype
!= TARGET_OUT
);
889 BUG_ON(trgt_out_o
->target
.out
.nb
!= nb
);
891 spin_lock_bh(&(nb
->retrans_lock
));
892 if (unlikely(cr
->ackrcvd
)) {
893 spin_unlock_bh(&(nb
->retrans_lock
));
896 spin_unlock_bh(&(nb
->retrans_lock
));
898 kref_get(&(trgt_out_o
->ref
));
900 BUG_ON(trgt_out_o
->isreset
!= 0);
901 BUG_ON(seqno_before(cr
->seqno
, trgt_out_o
->target
.out
.seqno_acked
));
903 if (unlikely(cr
->length
> targetmss
)) {
904 struct conn_retrans
*cr2
= prepare_conn_retrans(trgt_out_o
,
905 cr
->seqno
+ targetmss
, cr
->length
- targetmss
);
906 if (unlikely(cr2
== 0))
909 cr2
->timeout
= cr
->timeout
;
911 spin_lock_bh(&(nb
->retrans_lock
));
912 list_add(&(cr2
->timeout_list
), &(nb
->retrans_list_conn
));
914 spin_unlock_bh(&(nb
->retrans_lock
));
916 cr
->length
= targetmss
;
919 BUG_ON(cr
->length
== 0);
921 if (send_conndata_as_skb(nb
, cr
->length
)) {
925 skb
= create_packet_conndata(nb
, cr
->length
, GFP_ATOMIC
,
926 trgt_out_o
->target
.out
.conn_id
, cr
->seqno
,
927 cr
->snd_delayed_lowbuf
);
928 if (unlikely(skb
== 0))
931 dst
= skb_put(skb
, cr
->length
);
933 databuf_pullold(trgt_out_o
, cr
->seqno
, dst
, cr
->length
);
935 if (cor_dev_queue_xmit(skb
, QOS_CALLER_CONN_RETRANS
) != 0)
937 schedule_retransmit_conn(cr
, 1);
939 struct control_msg_out
*cm
;
942 buf
= kmalloc(cr
->length
, GFP_ATOMIC
);
943 if (unlikely(buf
== 0))
946 cm
= alloc_control_msg(nb
, ACM_PRIORITY_LOW
);
947 if (unlikely(cm
== 0)) {
952 databuf_pullold(trgt_out_o
, cr
->seqno
, buf
, cr
->length
);
954 send_conndata(cm
, trgt_out_o
->target
.out
.conn_id
,
955 cr
->seqno
, buf
, buf
, cr
->length
,
956 cr
->snd_delayed_lowbuf
, cr
);
966 spin_lock_bh(&(nb
->retrans_lock
));
967 BUG_ON(cr
->scheduled
== 1);
968 if (unlikely(cr
->ackrcvd
)) {
969 kref_put(&(cr
->ref
), kreffree_bug
);
972 cr
->timeout
= jiffies
+ 1;
973 list_add(&(cr
->timeout_list
), &(nb
->retrans_list_conn
));
976 spin_unlock_bh(&(nb
->retrans_lock
));
979 spin_unlock_bh(&(trgt_out_o
->rcv_lock
));
981 kref_put(&(trgt_out_o
->ref
), free_conn
);
986 static int send_retrans(struct neighbor
*nb
, int fromqos
)
990 int nbstate
= get_neigh_state(nb
);
991 if (unlikely(nbstate
== NEIGHBOR_STATE_STALLED
))
994 #warning todo check windowlimit
997 struct conn_retrans
*cr
= 0;
999 if (may_send_conn_retrans(nb
) == 0)
1002 spin_lock_bh(&(nb
->retrans_lock
));
1005 if (list_empty(&(nb
->retrans_list_conn
)))
1008 cr
= container_of(nb
->retrans_list_conn
.next
,
1009 struct conn_retrans
, timeout_list
);
1011 BUG_ON(cr
->scheduled
== 0);
1013 if (unlikely(nbstate
== NEIGHBOR_STATE_KILLED
)) {
1014 cancel_conn_retrans(nb
, cr
);
1018 if (time_after(cr
->timeout
, jiffies
))
1021 kref_get(&(cr
->ref
));
1022 list_del(&(cr
->timeout_list
));
1025 spin_unlock_bh(&(nb
->retrans_lock
));
1026 queuefull
= _send_retrans(nb
, cr
);
1027 kref_put(&(cr
->ref
), free_connretrans
);
1031 qos_enqueue(nb
->queue
, &(nb
->rb_cr
),
1032 QOS_CALLER_CONN_RETRANS
);
1041 spin_lock_bh(&(nb
->retrans_lock
));
1044 if (queuefull
== 0) {
1045 nb
->retrans_conn_running
= 0;
1046 reschedule_conn_retrans_timer(nb
);
1049 spin_unlock_bh(&(nb
->retrans_lock
));
1051 kref_put(&(nb
->ref
), neighbor_free
);
1054 return sent
? QOS_RESUME_CONG
: QOS_RESUME_CONG_NOPROGRESS
;
1055 return QOS_RESUME_DONE
;
1058 void retransmit_conn_taskfunc(unsigned long arg
)
1060 struct neighbor
*nb
= (struct neighbor
*) arg
;
1061 send_retrans(nb
, 0);
1064 void retransmit_conn_timerfunc(struct timer_list
*retrans_timer_conn
)
1066 struct neighbor
*nb
= container_of(retrans_timer_conn
,
1067 struct neighbor
, retrans_timer_conn
);
1069 spin_lock_bh(&(nb
->retrans_lock
));
1071 BUG_ON(nb
->retrans_timer_conn_running
== 0);
1072 BUG_ON(nb
->retrans_conn_running
== 1);
1074 nb
->retrans_timer_conn_running
= 0;
1075 nb
->retrans_conn_running
= 1;
1077 spin_unlock_bh(&(nb
->retrans_lock
));
1079 tasklet_schedule(&(nb
->retrans_task_conn
));
1082 void conn_ack_ooo_rcvd(struct neighbor
*nb
, __u32 conn_id
,
1083 struct conn
*trgt_out
, __u64 seqno_ooo
, __u32 length
)
1085 struct list_head
*curr
;
1087 if (unlikely(length
== 0))
1090 spin_lock_bh(&(trgt_out
->rcv_lock
));
1092 if (unlikely(trgt_out
->targettype
!= TARGET_OUT
))
1094 if (unlikely(trgt_out
->target
.out
.nb
!= nb
))
1096 if (unlikely(trgt_out
->target
.out
.conn_id
!= conn_id
))
1099 kref_get(&(nb
->ref
));
1100 spin_lock_bh(&(nb
->retrans_lock
));
1102 curr
= trgt_out
->target
.out
.retrans_list
.next
;
1104 while (curr
!= &(trgt_out
->target
.out
.retrans_list
)) {
1105 struct conn_retrans
*cr
= container_of(curr
,
1106 struct conn_retrans
, conn_list
);
1108 int ack_covers_start
= seqno_after_eq(cr
->seqno
, seqno_ooo
);
1109 int ack_covers_end
= seqno_before_eq(cr
->seqno
+ cr
->length
,
1110 seqno_ooo
+ length
);
1114 if (seqno_before(cr
->seqno
+ cr
->length
, seqno_ooo
))
1117 if (seqno_after(cr
->seqno
, seqno_ooo
+ length
))
1120 if (likely(ack_covers_start
&& ack_covers_end
)) {
1121 cancel_conn_retrans(nb
, cr
);
1122 } else if (ack_covers_start
) {
1123 __u32 diff
= seqno_ooo
+ length
- cr
->seqno
-
1127 } else if (ack_covers_end
) {
1128 cr
->length
-= seqno_ooo
+ length
- cr
->seqno
;
1134 if (unlikely(list_empty(&(trgt_out
->target
.out
.retrans_list
)) == 0)) {
1135 trgt_out
->target
.out
.seqno_acked
=
1136 trgt_out
->target
.out
.seqno_nextsend
;
1138 struct conn_retrans
*cr
= container_of(
1139 trgt_out
->target
.out
.retrans_list
.next
,
1140 struct conn_retrans
, conn_list
);
1141 if (seqno_after(cr
->seqno
, trgt_out
->target
.out
.seqno_acked
))
1142 trgt_out
->target
.out
.seqno_acked
= cr
->seqno
;
1145 spin_unlock_bh(&(nb
->retrans_lock
));
1146 kref_put(&(nb
->ref
), neighbor_free
);
1149 spin_unlock_bh(&(trgt_out
->rcv_lock
));
1152 void conn_ack_rcvd(struct neighbor
*nb
, __u32 conn_id
, struct conn
*trgt_out
,
1153 __u64 seqno
, int setwindow
, __u8 window
)
1157 spin_lock_bh(&(trgt_out
->rcv_lock
));
1159 if (unlikely(trgt_out
->isreset
!= 0))
1161 if (unlikely(trgt_out
->targettype
!= TARGET_OUT
))
1163 if (unlikely(trgt_out
->target
.out
.nb
!= nb
))
1165 if (unlikely(trgt_out
->reversedir
->source
.in
.conn_id
!= conn_id
))
1168 if (unlikely(seqno_after(seqno
, trgt_out
->target
.out
.seqno_nextsend
) ||
1169 seqno_before(seqno
, trgt_out
->target
.out
.seqno_acked
)))
1173 __u64 windowdec
= dec_log_64_7(window
);
1174 if (unlikely(seqno_eq(seqno
, trgt_out
->target
.out
.seqno_acked
)&&
1175 unlikely(seqno_before(seqno
+ windowdec
,
1176 trgt_out
->target
.out
.seqno_windowlimit
))))
1179 trgt_out
->target
.out
.seqno_windowlimit
= seqno
+ windowdec
;
1184 if (seqno
== trgt_out
->target
.out
.seqno_acked
)
1187 kref_get(&(nb
->ref
));
1188 spin_lock_bh(&(nb
->retrans_lock
));
1190 trgt_out
->target
.out
.seqno_acked
= seqno
;
1193 while (list_empty(&(trgt_out
->target
.out
.retrans_list
)) == 0) {
1194 struct conn_retrans
*cr
= container_of(
1195 trgt_out
->target
.out
.retrans_list
.next
,
1196 struct conn_retrans
, conn_list
);
1198 if (seqno_after(cr
->seqno
+ cr
->length
, seqno
)) {
1199 if (seqno_before(cr
->seqno
, seqno
)) {
1200 cr
->length
-= (seqno
- cr
->seqno
);
1206 cancel_conn_retrans(nb
, cr
);
1209 spin_unlock_bh(&(nb
->retrans_lock
));
1210 kref_put(&(nb
->ref
), neighbor_free
);
1211 databuf_ack(trgt_out
, trgt_out
->target
.out
.seqno_acked
);
1214 spin_unlock_bh(&(trgt_out
->rcv_lock
));
1217 flush_buf(trgt_out
, 0);
1218 wake_sender(trgt_out
);
1221 void schedule_retransmit_conn(struct conn_retrans
*cr
, int connlocked
)
1223 struct conn
*trgt_out_o
= cr
->trgt_out_o
;
1224 struct neighbor
*nb
;
1227 if (connlocked
== 0)
1228 spin_lock_bh(&(trgt_out_o
->rcv_lock
));
1230 BUG_ON(trgt_out_o
->targettype
!= TARGET_OUT
);
1231 nb
= trgt_out_o
->target
.out
.nb
;
1233 cr
->timeout
= calc_timeout(atomic_read(&(nb
->latency_retrans_us
)),
1234 atomic_read(&(nb
->latency_stddev_retrans_us
)),
1235 atomic_read(&(nb
->max_remote_ackconn_delay_us
)));
1237 spin_lock_bh(&(nb
->retrans_lock
));
1239 kref_get(&(nb
->ref
));
1241 BUG_ON(cr
->scheduled
!= 0);
1243 if (unlikely(cr
->ackrcvd
)) {
1244 kref_put(&(cr
->ref
), free_connretrans
);
1248 first
= unlikely(list_empty(&(nb
->retrans_list_conn
)));
1249 list_add_tail(&(cr
->timeout_list
), &(nb
->retrans_list_conn
));
1252 if (unlikely(first
))
1253 reschedule_conn_retrans_timer(nb
);
1256 spin_unlock_bh(&(nb
->retrans_lock
));
1258 kref_put(&(nb
->ref
), neighbor_free
);
1260 if (connlocked
== 0)
1261 spin_unlock_bh(&(trgt_out_o
->rcv_lock
));
1264 static __u64
get_windowlimit(struct conn
*trgt_out_l
)
1266 if (unlikely(seqno_before(trgt_out_l
->target
.out
.seqno_windowlimit
,
1267 trgt_out_l
->target
.out
.seqno_nextsend
)))
1270 return seqno_clean(trgt_out_l
->target
.out
.seqno_windowlimit
-
1271 trgt_out_l
->target
.out
.seqno_nextsend
);
1274 static int seqno_low_sendlimit(struct conn
*trgt_out_l
, __u64 windowlimit
,
1277 __u64 bytes_ackpending
;
1279 BUG_ON(seqno_before(trgt_out_l
->target
.out
.seqno_nextsend
,
1280 trgt_out_l
->target
.out
.seqno_acked
));
1282 bytes_ackpending
= seqno_clean(trgt_out_l
->target
.out
.seqno_nextsend
-
1283 trgt_out_l
->target
.out
.seqno_acked
);
1285 if (windowlimit
<= sndlen
)
1288 if (unlikely(bytes_ackpending
+ sndlen
< bytes_ackpending
))
1291 return (windowlimit
- sndlen
< (bytes_ackpending
+ sndlen
) / 8) ? 1 : 0;
1294 static int _flush_out_skb(struct conn
*trgt_out_l
, __u32 len
,
1295 __u8 snd_delayed_lowbuf
)
1297 struct neighbor
*nb
= trgt_out_l
->target
.out
.nb
;
1300 struct conn_retrans
*cr
;
1301 struct sk_buff
*skb
;
1304 seqno
= trgt_out_l
->target
.out
.seqno_nextsend
;
1305 skb
= create_packet_conndata(trgt_out_l
->target
.out
.nb
, len
,
1306 GFP_ATOMIC
, trgt_out_l
->target
.out
.conn_id
, seqno
,
1307 snd_delayed_lowbuf
);
1308 if (unlikely(skb
== 0))
1309 return RC_FLUSH_CONN_OUT_OOM
;
1311 cr
= prepare_conn_retrans(trgt_out_l
, seqno
, len
);
1312 if (unlikely(cr
== 0)) {
1314 return RC_FLUSH_CONN_OUT_OOM
;
1317 dst
= skb_put(skb
, len
);
1319 databuf_pull(trgt_out_l
, dst
, len
);
1321 if (cor_dev_queue_xmit(skb
, QOS_CALLER_CONN
) != 0) {
1322 databuf_unpull(trgt_out_l
, len
);
1323 spin_lock_bh(&(nb
->retrans_lock
));
1324 cancel_conn_retrans(nb
, cr
);
1325 spin_unlock_bh(&(nb
->retrans_lock
));
1326 kref_put(&(cr
->ref
), free_connretrans
);
1327 return RC_FLUSH_CONN_OUT_CONG
;
1330 trgt_out_l
->target
.out
.seqno_nextsend
+= len
;
1331 schedule_retransmit_conn(cr
, 1);
1333 return RC_FLUSH_CONN_OUT_OK
;
1336 #warning todo check if a conn_data is in the queue and combine
1337 static int _flush_out_conndata(struct conn
*trgt_out_l
, __u16 len
,
1338 __u8 snd_delayed_lowbuf
)
1341 struct control_msg_out
*cm
;
1342 struct conn_retrans
*cr
;
1345 buf
= kmalloc(len
, GFP_ATOMIC
);
1347 if (unlikely(buf
== 0))
1348 return RC_FLUSH_CONN_OUT_OOM
;
1350 cm
= alloc_control_msg(trgt_out_l
->target
.out
.nb
, ACM_PRIORITY_LOW
);
1351 if (unlikely(cm
== 0)) {
1353 return RC_FLUSH_CONN_OUT_OOM
;
1356 seqno
= trgt_out_l
->target
.out
.seqno_nextsend
;
1358 cr
= prepare_conn_retrans(trgt_out_l
, seqno
, len
);
1359 if (unlikely(cr
== 0)) {
1361 free_control_msg(cm
);
1362 return RC_FLUSH_CONN_OUT_OOM
;
1365 databuf_pull(trgt_out_l
, buf
, len
);
1366 cr
->snd_delayed_lowbuf
= snd_delayed_lowbuf
;
1368 trgt_out_l
->target
.out
.seqno_nextsend
+= len
;
1370 send_conndata(cm
, trgt_out_l
->target
.out
.conn_id
, seqno
, buf
, buf
, len
,
1371 snd_delayed_lowbuf
, cr
);
1373 return RC_FLUSH_CONN_OUT_OK
;
1376 static int _flush_out(struct conn
*trgt_out_l
, __u32 maxsend
, __u32
*sent
,
1379 struct neighbor
*nb
= trgt_out_l
->target
.out
.nb
;
1385 __u8 snd_delayed_lowbuf
= trgt_out_l
->target
.out
.windowlimit_reached
;
1387 __u32 maxsend_left
= maxsend
;
1389 trgt_out_l
->target
.out
.windowlimit_reached
= 0;
1391 BUG_ON(trgt_out_l
->targettype
!= TARGET_OUT
);
1393 if (unlikely(trgt_out_l
->target
.out
.established
== 0))
1394 return RC_FLUSH_CONN_OUT_OK
;
1396 if (unlikely(trgt_out_l
->isreset
!= 0))
1397 return RC_FLUSH_CONN_OUT_OK
;
1399 BUG_ON(trgt_out_l
->target
.out
.conn_id
== 0);
1401 if (unlikely(trgt_out_l
->data_buf
.read_remaining
== 0))
1402 return RC_FLUSH_CONN_OUT_OK
;
1404 #warning todo burst queue
1405 if (from_qos
== 0 && may_send_conn(trgt_out_l
) == 0)
1406 return RC_FLUSH_CONN_OUT_CONG
;
1408 spin_lock_bh(&(nb
->stalledconn_lock
));
1409 nbstate
= get_neigh_state(nb
);
1410 if (unlikely(nbstate
== NEIGHBOR_STATE_STALLED
)) {
1411 BUG_ON(trgt_out_l
->target
.out
.nbstalled_lh
.prev
== 0 &&
1412 trgt_out_l
->target
.out
.nbstalled_lh
.next
!= 0);
1413 BUG_ON(trgt_out_l
->target
.out
.nbstalled_lh
.prev
!= 0 &&
1414 trgt_out_l
->target
.out
.nbstalled_lh
.next
== 0);
1416 if (trgt_out_l
->target
.out
.nbstalled_lh
.prev
== 0) {
1417 kref_get(&(trgt_out_l
->ref
));
1418 list_add_tail(&(trgt_out_l
->target
.out
.nbstalled_lh
),
1419 &(nb
->stalledconn_list
));
1422 spin_unlock_bh(&(nb
->stalledconn_lock
));
1424 if (unlikely(nbstate
!= NEIGHBOR_STATE_ACTIVE
))
1425 return RC_FLUSH_CONN_OUT_NBNOTACTIVE
;
1427 /* printk(KERN_ERR "flush %p %llu %u", trgt_out_l,
1428 get_windowlimit(trgt_out_l),
1429 trgt_out_l->data_buf.read_remaining); */
1431 targetmss
= mss_conndata(nb
);
1433 while (trgt_out_l
->data_buf
.read_remaining
>= targetmss
) {
1434 __u64 windowlimit
= get_windowlimit(trgt_out_l
);
1437 if (windowlimit
< targetmss
) {
1438 trgt_out_l
->target
.out
.windowlimit_reached
= 1;
1439 snd_delayed_lowbuf
= 1;
1443 if (seqno_low_sendlimit(trgt_out_l
, windowlimit
, targetmss
)) {
1444 trgt_out_l
->target
.out
.windowlimit_reached
= 1;
1445 snd_delayed_lowbuf
= 1;
1448 if (maxsend_left
< targetmss
)
1451 if (likely(send_conndata_as_skb(nb
, targetmss
)))
1452 rc
= _flush_out_skb(trgt_out_l
, targetmss
,
1453 snd_delayed_lowbuf
);
1455 rc
= _flush_out_conndata(trgt_out_l
, targetmss
,
1456 snd_delayed_lowbuf
);
1458 maxsend_left
-= targetmss
;
1461 if (rc
!= RC_FLUSH_CONN_OUT_OK
)
1465 if (trgt_out_l
->data_buf
.read_remaining
> 0) {
1466 __u16 len
= trgt_out_l
->data_buf
.read_remaining
;
1467 __u64 windowlimit
= get_windowlimit(trgt_out_l
);
1470 if (trgt_out_l
->sourcetype
== SOURCE_SOCK
&&
1471 trgt_out_l
->source
.sock
.delay_flush
==
1473 cor_sock_sndbufavailable(trgt_out_l
) != 0 && (
1474 windowlimit
> len
||
1475 seqno_eq(trgt_out_l
->target
.out
.seqno_nextsend
,
1476 trgt_out_l
->target
.out
.seqno_acked
) == 0))
1479 if (windowlimit
== 0 || (windowlimit
< len
&&
1480 seqno_eq(trgt_out_l
->target
.out
.seqno_nextsend
,
1481 trgt_out_l
->target
.out
.seqno_acked
) == 0)) {
1482 trgt_out_l
->target
.out
.windowlimit_reached
= 1;
1483 snd_delayed_lowbuf
= 1;
1487 if (seqno_low_sendlimit(trgt_out_l
, windowlimit
, len
)) {
1488 trgt_out_l
->target
.out
.windowlimit_reached
= 1;
1489 snd_delayed_lowbuf
= 1;
1492 if (len
> windowlimit
)
1495 if (maxsend_left
< len
) {
1496 if (maxsend
== maxsend_left
) {
1499 return RC_FLUSH_CONN_OUT_MAXSENT
;
1503 if (send_conndata_as_skb(nb
, len
))
1504 rc
= _flush_out_skb(trgt_out_l
, len
,
1505 snd_delayed_lowbuf
);
1507 rc
= _flush_out_conndata(trgt_out_l
, len
,
1508 snd_delayed_lowbuf
);
1510 maxsend
-= targetmss
;
1513 if (rc
!= RC_FLUSH_CONN_OUT_OK
)
1518 return RC_FLUSH_CONN_OUT_OK
;
1521 int flush_out(struct conn
*trgt_out_l
, __u32
*sent
)
1523 int rc
= _flush_out(trgt_out_l
, 1 << 30, sent
, 0);
1525 if (rc
== RC_FLUSH_CONN_OUT_CONG
|| rc
== RC_FLUSH_CONN_OUT_MAXSENT
||
1526 rc
== RC_FLUSH_CONN_OUT_OOM
)
1527 qos_enqueue_conn(trgt_out_l
);
1532 void resume_nbstalled_conns(struct work_struct
*work
)
1534 struct neighbor
*nb
= container_of(work
, struct neighbor
,
1536 int rc
= RC_FLUSH_CONN_OUT_OK
;
1538 spin_lock_bh(&(nb
->stalledconn_lock
));
1539 nb
->stalledconn_work_scheduled
= 0;
1540 while (rc
!= RC_FLUSH_CONN_OUT_NBNOTACTIVE
&&
1541 list_empty(&(nb
->stalledconn_list
)) == 0) {
1542 struct list_head
*lh
= nb
->stalledconn_list
.next
;
1543 struct conn
*trgt_out
= container_of(lh
, struct conn
,
1544 target
.out
.nbstalled_lh
);
1546 BUG_ON(trgt_out
->targettype
!= TARGET_OUT
);
1551 spin_unlock_bh(&(nb
->stalledconn_lock
));
1553 spin_lock_bh(&(trgt_out
->rcv_lock
));
1554 if (likely(trgt_out
->targettype
== TARGET_OUT
))
1555 rc
= flush_out(trgt_out
, &sent
);
1556 spin_unlock_bh(&(trgt_out
->rcv_lock
));
1559 wake_sender(trgt_out
);
1561 kref_put(&(trgt_out
->ref
), free_conn
);
1563 spin_lock_bh(&(nb
->stalledconn_lock
));
1565 spin_unlock_bh(&(nb
->stalledconn_lock
));
1567 kref_put(&(nb
->ref
), neighbor_free
);
1570 int __init
cor_snd_init(void)
1572 connretrans_slab
= kmem_cache_create("cor_connretrans",
1573 sizeof(struct conn_retrans
), 8, 0, 0);
1574 if (unlikely(connretrans_slab
== 0))
1580 MODULE_LICENSE("GPL");