From 8436451b68fe6f5c7abfb6f690a3bfc8cd8cecfb Mon Sep 17 00:00:00 2001 From: Michael Blizek Date: Sat, 11 Feb 2023 10:26:01 +0100 Subject: [PATCH] 32bit seqno, bugfixes --- net/cor/conn.c | 10 ++- net/cor/conn_databuf.c | 30 +++++--- net/cor/conn_src_in.c | 34 ++++----- net/cor/conn_src_sock.c | 12 +-- net/cor/conn_trgt_out.c | 186 +++++++++++++++++++++++++-------------------- net/cor/conn_trgt_unconn.c | 4 +- net/cor/cor.h | 140 ++++++++++++---------------------- net/cor/dev.c | 37 +++++---- net/cor/neigh.c | 2 +- net/cor/neigh_rcv.c | 27 ++++--- net/cor/neigh_snd.c | 170 +++++++++++++++++++---------------------- net/cor/settings.h | 1 + net/cor/sock_managed.c | 13 +++- net/cor/util.c | 5 +- 14 files changed, 334 insertions(+), 337 deletions(-) diff --git a/net/cor/conn.c b/net/cor/conn.c index ce2195d8c84a..7d80fd4d3e94 100644 --- a/net/cor/conn.c +++ b/net/cor/conn.c @@ -482,8 +482,14 @@ int cor_conn_init_sock_source(struct cor_conn *cn) memset(&cn->src.sock, 0, sizeof(cn->src.sock)); cn->src.sock.ed = ed; cn->src.sock.ed->priority = cor_priority_max(); - cn->src.sock.ed->snd_speed.jiffies_last_refresh = jiffies; + + cn->src.sock.ed->snd_speed.state = SNDSPEED_ACTIVE; cn->src.sock.ed->snd_speed.flushed = 1; + cn->src.sock.ed->snd_speed.jiffies_last_refresh = jiffies; + cn->src.sock.ed->snd_speed.bytes_sent = 0; + cn->src.sock.ed->snd_speed.speed = SND_SPEED_START; + cn->src.sock.ed->snd_speed.speed_limited = SND_SPEED_START; + timer_setup(&cn->src.sock.keepalive_timer, cor_keepalive_req_timerfunc, 0); @@ -933,7 +939,7 @@ static int __init cor_init(void) printk(KERN_ERR "sizeof mutex: %u\n", (__u32) sizeof(struct mutex)); printk(KERN_ERR "sizeof spinlock: %u\n", (__u32) sizeof(spinlock_t)); - printk(KERN_ERR "sizeof spinlock: %u\n", (__u32) sizeof(struct timer_list)); + printk(KERN_ERR "sizeof timer_list: %u\n", (__u32) sizeof(struct timer_list)); printk(KERN_ERR "sizeof kref: %u\n", (__u32) sizeof(struct kref)); printk(KERN_ERR "sizeof list_head: %u\n", (__u32) sizeof(struct list_head)); diff --git a/net/cor/conn_databuf.c b/net/cor/conn_databuf.c index 151418e51ac8..e390b49d1a45 100644 --- a/net/cor/conn_databuf.c +++ b/net/cor/conn_databuf.c @@ -133,10 +133,8 @@ void cor_update_windowlimit(struct cor_conn *src_in_lx) bufsize = min(bufsize, (__u32) WINDOW_MAX_PER_CONN_MIN); } } else { - __u32 windowleft = (__u32) min((__u64) U32_MAX, - cor_seqno_clean( - src_in_lx->trgt.out.seqno_windowlimit - - src_in_lx->trgt.out.seqno_nextsend)); + __u32 windowleft = src_in_lx->trgt.out.seqno_windowlimit - + src_in_lx->trgt.out.seqno_nextsend; bufsize = max(bufsize, min(windowleft, (__u32) WINDOW_MAX_PER_CONN_MIN_OUT_WINOK)); @@ -150,9 +148,6 @@ void cor_update_windowlimit(struct cor_conn *src_in_lx) if (bufsize > WINDOW_MAX_PER_CONN_MAX) bufsize = WINDOW_MAX_PER_CONN_MAX; - /* printk(KERN_ERR "window %p %u %u\n", src_in_lx, bufsize, - src_in_lx->data_buf.read_remaining); */ - if (unlikely(src_in_lx->data_buf.read_remaining > bufsize)) bufsize = 0; else @@ -385,6 +380,13 @@ static void cor_bufsize_update(struct cor_conn *cn_lx, __u32 rcvd, BUG_ON(cn_lx->data_buf.read_remaining < rcvd); BUG_ON(windowused > 31); + /* if (cn_lx->is_highlatency == 0) + printk(KERN_ERR "bufsize %p %u %u %u %u %u %u\n", + cn_lx, bufsize_bytes, + cn_lx->data_buf.read_remaining, rcvd, + windowused, rcv_flushrcvd, + cn_lx->bufsize.ignore_rcv_lowbuf); */ + if (cn_lx->bufsize.ignore_rcv_lowbuf > 0) { if (rcvd > cn_lx->bufsize.ignore_rcv_lowbuf) cn_lx->bufsize.ignore_rcv_lowbuf = 0; @@ -541,6 +543,10 @@ void cor_bufsize_read_to_sock(struct cor_conn *trgt_sock_lx) __u32 latency_limit = (trgt_sock_lx->is_highlatency != 0 ? HZ / 10 : HZ / 40); + /** + * High cpu usage may cause high latency of the userspace receiver. + * Increasing bufferspace to compensate may increase latency further. + */ if (trgt_sock_lx->trgt.sock.waiting_for_userspace != 0 && time_before( trgt_sock_lx->trgt.sock.waiting_for_userspace_since, jiffies - latency_limit)) { @@ -598,7 +604,7 @@ void cor_databuf_ackdiscard(struct cor_conn *cn_lx) cn_lx->data_buf.read_remaining = 0; } -void cor_reset_seqno(struct cor_conn *cn_l, __u64 initseqno) +void cor_reset_seqno(struct cor_conn *cn_l, __u32 initseqno) { cn_l->data_buf.first_offset = initseqno - cn_l->data_buf.datasize + @@ -768,10 +774,10 @@ void cor_databuf_unpull(struct cor_conn *trgt_out_l, __u32 bytes) trgt_out_l->data_buf.next_read_offset -= bytes; } -void cor_databuf_pullold(struct cor_conn *trgt_out_l, __u64 startpos, char *dst, +void cor_databuf_pullold(struct cor_conn *trgt_out_l, __u32 startpos, char *dst, int len) { - __u64 pos = trgt_out_l->data_buf.first_offset; + __u32 pos = trgt_out_l->data_buf.first_offset; struct cor_data_buf_item *dbi = container_of( trgt_out_l->data_buf.items.next, struct cor_data_buf_item, buf_list); @@ -793,7 +799,7 @@ void cor_databuf_pullold(struct cor_conn *trgt_out_l, __u64 startpos, char *dst, char *srcbufcpystart = 0; int srcbufcpylen = 0; - __u64 offset = cor_seqno_clean(startpos - pos); + __u32 offset = startpos - pos; BUG_ON(&dbi->buf_list == &trgt_out_l->data_buf.items); @@ -819,7 +825,7 @@ void cor_databuf_pullold(struct cor_conn *trgt_out_l, __u64 startpos, char *dst, } /* ack up to *not* including pos */ -void cor_databuf_ack(struct cor_conn *trgt_out_l, __u64 pos) +void cor_databuf_ack(struct cor_conn *trgt_out_l, __u32 pos) { __u32 acked = 0; diff --git a/net/cor/conn_src_in.c b/net/cor/conn_src_in.c index 67f6a7156b57..ec94fe4d1250 100644 --- a/net/cor/conn_src_in.c +++ b/net/cor/conn_src_in.c @@ -71,8 +71,7 @@ static int cor_drain_ooo_queue_buf(struct cor_conn *src_in_l, if (unlikely(cor_seqno_after(src_in_l->src.in.next_seqno, r->seqno))) { - __u64 overlap = cor_seqno_clean(r->seqno - - src_in_l->src.in.next_seqno); + __u32 overlap = r->seqno - src_in_l->src.in.next_seqno; if (overlap >= rb->len) goto free; @@ -132,8 +131,7 @@ static int cor_drain_ooo_queue_skb(struct cor_conn *src_in_l, if (unlikely(cor_seqno_after(src_in_l->src.in.next_seqno, r->seqno))) { - __u64 overlap = cor_seqno_clean(r->seqno - - src_in_l->src.in.next_seqno); + __u32 overlap = r->seqno - src_in_l->src.in.next_seqno; if (overlap >= skb->len) { src_in_l->src.in.reorder_memused -= @@ -302,7 +300,7 @@ static int _cor_conn_rcv_ooo_accountmem(struct cor_conn *src_in_l, } static void _cor_conn_rcv_ooo_merge(struct cor_conn *src_in_l, char *data, - __u32 len, __u64 seqno, __u8 windowused, __u8 flush, + __u32 len, __u32 seqno, __u8 windowused, __u8 flush, struct cor_rcvooo_buf *merge_prev, struct cor_rcvooo_buf *merge_next) { @@ -404,7 +402,7 @@ static void _cor_conn_rcv_ooo_merge(struct cor_conn *src_in_l, char *data, } static void _cor_conn_rcv_ooo_nomerge(struct cor_conn *src_in_l, char *data, - __u32 len, __u64 seqno, __u8 windowused, __u8 flush, + __u32 len, __u32 seqno, __u8 windowused, __u8 flush, struct list_head *next_rcvooo) { struct cor_rcvooo_buf *rb; @@ -458,7 +456,7 @@ static void _cor_conn_rcv_ooo_nomerge(struct cor_conn *src_in_l, char *data, } static void _cor_conn_rcv_ooo_buf(struct cor_conn *src_in_l, char *data, - __u32 len, __u64 seqno, __u8 windowused, __u8 flush, + __u32 len, __u32 seqno, __u8 windowused, __u8 flush, struct list_head *next_rcvooo) { struct cor_rcvooo_buf *merge_prev; @@ -470,7 +468,7 @@ static void _cor_conn_rcv_ooo_buf(struct cor_conn *src_in_l, char *data, merge_prev = _cor_conn_rcv_ooo_buf_checkmerge(src_in_l, next_rcvooo->prev); if (merge_prev != 0) { - __u64 next_seqno = merge_prev->r.seqno + merge_prev->len; + __u32 next_seqno = merge_prev->r.seqno + merge_prev->len; BUG_ON(cor_seqno_after(next_seqno, seqno)); if (cor_seqno_eq(next_seqno, seqno) == 0) @@ -479,7 +477,7 @@ static void _cor_conn_rcv_ooo_buf(struct cor_conn *src_in_l, char *data, merge_next = _cor_conn_rcv_ooo_buf_checkmerge(src_in_l, next_rcvooo); if (merge_next != 0) { - __u64 next_seqno = seqno + len; + __u32 next_seqno = seqno + len; BUG_ON(cor_seqno_after(next_seqno, merge_next->r.seqno)); if (cor_seqno_eq(next_seqno, merge_next->r.seqno) == 0) @@ -497,7 +495,7 @@ nomerge: } static void _cor_conn_rcv_ooo_skb(struct cor_conn *src_in_l, - struct sk_buff *skb, __u64 seqno, __u8 windowused, __u8 flush, + struct sk_buff *skb, __u32 seqno, __u8 windowused, __u8 flush, struct list_head *next_rcvooo) { struct cor_rcvooo *newr; @@ -524,7 +522,7 @@ static void _cor_conn_rcv_ooo_skb(struct cor_conn *src_in_l, } static void __cor_conn_rcv_ooo(struct cor_conn *src_in_l, struct sk_buff *skb, - char *data, __u32 len, __u64 seqno, __u8 windowused, __u8 flush, + char *data, __u32 len, __u32 seqno, __u8 windowused, __u8 flush, struct list_head *prev_rcvooo_lh) { struct list_head *reorder_queue = &src_in_l->src.in.reorder_queue; @@ -536,8 +534,7 @@ static void __cor_conn_rcv_ooo(struct cor_conn *src_in_l, struct sk_buff *skb, __u32 currlen = cor_rcvooo_len(prev_rcvooo); if (cor_seqno_after(prev_rcvooo->seqno + currlen, seqno)) { - __u64 overlap = cor_seqno_clean(prev_rcvooo->seqno + - currlen - seqno); + __u32 overlap = prev_rcvooo->seqno + currlen - seqno; if (unlikely(len <= overlap)) goto drop; @@ -556,7 +553,7 @@ static void __cor_conn_rcv_ooo(struct cor_conn *src_in_l, struct sk_buff *skb, goto drop; if (unlikely(cor_seqno_before(next_rcvooo->seqno, seqno + len))) - len = cor_seqno_clean(next_rcvooo->seqno - seqno); + len = next_rcvooo->seqno - seqno; } if (unlikely(len == 0)) { @@ -584,7 +581,7 @@ drop: } static void _cor_conn_rcv_ooo(struct cor_conn *src_in_l, struct sk_buff *skb, - char *data, __u32 len, __u64 seqno, __u8 windowused, __u8 flush) + char *data, __u32 len, __u32 seqno, __u8 windowused, __u8 flush) { struct list_head *reorder_queue = &src_in_l->src.in.reorder_queue; struct list_head *currlh = reorder_queue->prev; @@ -608,7 +605,7 @@ static void _cor_conn_rcv_ooo(struct cor_conn *src_in_l, struct sk_buff *skb, static void _cor_conn_rcv(struct cor_neighbor *nb, struct cor_conn *src_in, __u32 conn_id, struct sk_buff *skb, char *data, __u32 len, - __u64 seqno, __u8 windowused, __u8 flush) + __u32 seqno, __u8 windowused, __u8 flush) { BUG_ON(nb == 0); @@ -632,8 +629,7 @@ static void _cor_conn_rcv(struct cor_neighbor *nb, struct cor_conn *src_in, __u32 rcvlen; if (cor_seqno_after(src_in->src.in.next_seqno, seqno)) { - __u64 overlap = cor_seqno_clean( - src_in->src.in.next_seqno - seqno); + __u32 overlap = src_in->src.in.next_seqno - seqno; BUG_ON(overlap > len); @@ -679,7 +675,7 @@ drop: } void cor_conn_rcv(struct cor_neighbor *nb, struct sk_buff *skb, char *data, - __u32 len, __u32 conn_id, __u64 seqno, __u8 windowused, + __u32 len, __u32 conn_id, __u32 seqno, __u8 windowused, __u8 flush) { struct cor_conn *src_in; diff --git a/net/cor/conn_src_sock.c b/net/cor/conn_src_sock.c index f37691ecf66e..6b9791d355d1 100644 --- a/net/cor/conn_src_sock.c +++ b/net/cor/conn_src_sock.c @@ -106,21 +106,23 @@ void cor_update_src_sock_sndspeed(struct cor_conn *src_sock_l, __u32 bytes_sent) int cor_sock_sndbufavailable(struct cor_conn *src_sock_lx, int for_wakeup) { + __u32 limit = src_sock_lx->bufsize.bufsize >> BUFSIZE_SHIFT; + /* printk(KERN_ERR "sndbuf %p %u %u\n", src_sock_lx, (src_sock_lx->bufsize.bufsize >> BUFSIZE_SHIFT), src_sock_lx->data_buf.read_remaining); */ - __u32 limit = src_sock_lx->bufsize.bufsize >> BUFSIZE_SHIFT; if (src_sock_lx->targettype == TARGET_OUT && cor_seqno_after( src_sock_lx->trgt.out.seqno_windowlimit, src_sock_lx->trgt.out.seqno_nextsend + src_sock_lx->data_buf.read_remaining)) { - __u32 windowleft = (__u32) min((__u64) U32_MAX, - cor_seqno_clean( - src_sock_lx->trgt.out.seqno_windowlimit - + __u32 windowleft = src_sock_lx->trgt.out.seqno_windowlimit - src_sock_lx->trgt.out.seqno_nextsend - - src_sock_lx->data_buf.read_remaining)); + src_sock_lx->data_buf.read_remaining; + + if (unlikely(windowleft > 1 << 31)) + windowleft = 0; limit = max(limit, min(windowleft, (__u32) WINDOW_MAX_PER_CONN_MIN_OUT_WINOK)); diff --git a/net/cor/conn_trgt_out.c b/net/cor/conn_trgt_out.c index 628e4a39e6cc..b1f13346b2ff 100644 --- a/net/cor/conn_trgt_out.c +++ b/net/cor/conn_trgt_out.c @@ -121,7 +121,7 @@ static void cor_cancel_conn_retrans(struct cor_neighbor *nb_retransconnlocked, static void cor_cancel_acked_conn_retrans(struct cor_conn *trgt_out_l, __u64 *bytes_acked) { - __u64 seqno_acked = trgt_out_l->trgt.out.seqno_acked; + __u32 seqno_acked = trgt_out_l->trgt.out.seqno_acked; while (list_empty(&trgt_out_l->trgt.out.retrans_list) == 0) { struct cor_conn_retrans *cr = container_of( @@ -130,10 +130,8 @@ static void cor_cancel_acked_conn_retrans(struct cor_conn *trgt_out_l, if (cor_seqno_after(cr->seqno + cr->length, seqno_acked)) { if (cor_seqno_before(cr->seqno, seqno_acked)) { - *bytes_acked += cor_seqno_clean(seqno_acked - - cr->seqno); - cr->length -= cor_seqno_clean(seqno_acked - - cr->seqno); + *bytes_acked += seqno_acked - cr->seqno; + cr->length -= seqno_acked - cr->seqno; cr->seqno = seqno_acked; } break; @@ -220,7 +218,7 @@ static void cor_cancel_all_conn_retrans_nb(struct cor_neighbor *nb) } static struct cor_conn_retrans *cor_prepare_conn_retrans( - struct cor_conn *trgt_out_l, __u64 seqno, __u32 len, + struct cor_conn *trgt_out_l, __u32 seqno, __u32 len, __u8 windowused, struct cor_conn_retrans *cr_splitted, int retransconnlocked) { @@ -331,9 +329,7 @@ static int _cor_send_retrans_splitcr_ifneeded( { __u32 targetmss = cor_mss_conndata(nb_retransconnlocked, trgt_out_l->is_highlatency != 0); - __u64 windowlimit = cor_seqno_clean( - trgt_out_l->trgt.out.seqno_windowlimit - - cr->seqno); + __u32 windowlimit = trgt_out_l->trgt.out.seqno_windowlimit - cr->seqno; __u32 maxsize = targetmss; if (windowlimit < maxsize) @@ -501,11 +497,11 @@ void cor_retransmit_conn_timerfunc(struct timer_list *retrans_conn_timer) } static void cor_conn_ack_ooo_rcvd_splitcr(struct cor_conn *trgt_out_l, - struct cor_conn_retrans *cr, __u64 seqno_ooo, __u32 length, + struct cor_conn_retrans *cr, __u32 seqno_ooo, __u32 length, __u64 *bytes_acked) { struct cor_conn_retrans *cr2; - __u64 seqno_cr2start; + __u32 seqno_cr2start; __u32 oldcrlenght = cr->length; if (cr->state != CONN_RETRANS_SCHEDULED && @@ -514,8 +510,7 @@ static void cor_conn_ack_ooo_rcvd_splitcr(struct cor_conn *trgt_out_l, seqno_cr2start = seqno_ooo + length; cr2 = cor_prepare_conn_retrans(trgt_out_l, seqno_cr2start, - cor_seqno_clean(cr->seqno + cr->length - - seqno_cr2start), + cr->seqno + cr->length - seqno_cr2start, cr->windowused, cr, 1); if (unlikely(cr2 == 0)) @@ -531,9 +526,9 @@ static void cor_conn_ack_ooo_rcvd_splitcr(struct cor_conn *trgt_out_l, kref_get(&cr2->ref); } - BUG_ON(cor_seqno_clean(seqno_ooo - cr->seqno) > cr->length); + BUG_ON(seqno_ooo - cr->seqno > cr->length); - cr->length -= cor_seqno_clean(seqno_ooo - cr->seqno); + cr->length -= seqno_ooo - cr->seqno; BUG_ON(cr->length + length + cr2->length != oldcrlenght); kref_put(&cr2->ref, cor_kreffree_bug); /* alloc */ @@ -541,7 +536,7 @@ static void cor_conn_ack_ooo_rcvd_splitcr(struct cor_conn *trgt_out_l, } void cor_conn_ack_ooo_rcvd(struct cor_neighbor *nb, __u32 conn_id, - struct cor_conn *trgt_out, __u64 seqno_ooo, __u32 length, + struct cor_conn *trgt_out, __u32 seqno_ooo, __u32 length, __u64 *bytes_acked) { struct list_head *curr; @@ -622,6 +617,10 @@ out: static void _cor_conn_ack_rcvd_nosendwin(struct cor_conn *trgt_out_l) { + if (trgt_out_l->sourcetype != SOURCE_IN || + trgt_out_l->is_highlatency != 0) + return; + if (trgt_out_l->bufsize.state == BUFSIZE_INCR || trgt_out_l->bufsize.state == BUFSIZE_INCR_FAST) trgt_out_l->bufsize.state = BUFSIZE_NOACTION; @@ -670,7 +669,7 @@ static void cor_reschedule_lowwindow_retrans(struct cor_conn *trgt_out_l) } void cor_conn_ack_rcvd(struct cor_neighbor *nb, __u32 conn_id, - struct cor_conn *trgt_out, __u64 seqno, int setwindow, + struct cor_conn *trgt_out, __u32 seqno, int setwindow, __u16 window, __u8 bufsize_changerate, __u64 *bytes_acked) { int seqno_advanced = 0; @@ -678,6 +677,12 @@ void cor_conn_ack_rcvd(struct cor_neighbor *nb, __u32 conn_id, spin_lock_bh(&trgt_out->rcv_lock); + /* printk(KERN_ERR "ack rcvd %x %u (+%u) %u %u %u \n", conn_id, seqno, + seqno - trgt_out->trgt.out.seqno_acked, + cor_dec_window(window), + trgt_out->bufsize.bufsize >> BUFSIZE_SHIFT, + trgt_out->data_buf.read_remaining); */ + if (unlikely(trgt_out->isreset != 0)) goto out; if (unlikely(trgt_out->targettype != TARGET_OUT)) @@ -695,7 +700,7 @@ void cor_conn_ack_rcvd(struct cor_neighbor *nb, __u32 conn_id, goto out; if (setwindow) { - __u64 windowdec = cor_dec_window(window); + __u32 windowdec = cor_dec_window(window); if (likely(cor_seqno_after(seqno, trgt_out->trgt.out.seqno_acked)) || @@ -820,12 +825,10 @@ void cor_schedule_retransmit_conn(struct cor_conn_retrans *cr, int connlocked, atomic_read(&nb->latency_stddev_retrans_us), atomic_read(&nb->max_remote_ackconn_delay_us)); - if (trgt_out_o->is_highlatency) { - cr->timeout += cor_calc_timeout( - atomic_read(&nb->latency_retrans_us), - atomic_read(&nb->latency_stddev_retrans_us), - CMSG_MAXDELAY_ACK_FAST_MS * 1000); - } + if (trgt_out_o->is_highlatency) + cr->timeout += 1 + msecs_to_jiffies( + atomic_read(&nb->latency_retrans_us)/1000 + + CMSG_MAXDELAY_ACK_FAST_MS); if (nbretransconn_locked == 0) spin_lock_bh(&nb->retrans_conn_lock); @@ -877,7 +880,7 @@ static int _cor_flush_out_skb(struct cor_conn *trgt_out_lx, __u32 len) { struct cor_neighbor *nb = trgt_out_lx->trgt.out.nb; - __u64 seqno; + __u32 seqno; struct cor_conn_retrans *cr; struct sk_buff *skb; char *dst; @@ -930,7 +933,7 @@ static int _cor_flush_out_skb(struct cor_conn *trgt_out_lx, __u32 len) static int _cor_flush_out_conndata(struct cor_conn *trgt_out_lx, __u32 len) { - __u64 seqno; + __u32 seqno; struct cor_control_msg_out *cm; struct cor_conn_retrans *cr; char *buf; @@ -978,14 +981,14 @@ static int _cor_flush_out_conndata(struct cor_conn *trgt_out_lx, __u32 len) int cor_srcin_buflimit_reached(struct cor_conn *src_in_lx) { - __u64 window_left; + __u32 window_left; if (unlikely(cor_seqno_before(src_in_lx->src.in.window_seqnolimit, src_in_lx->src.in.next_seqno))) return 1; - window_left = cor_seqno_clean(src_in_lx->src.in.window_seqnolimit - - src_in_lx->src.in.next_seqno); + window_left = src_in_lx->src.in.window_seqnolimit - + src_in_lx->src.in.next_seqno; if (window_left < WINDOW_ENCODE_MIN) return 1; @@ -1017,20 +1020,25 @@ static void cor_set_last_windowused(struct cor_conn *trgt_out_lx) __u64 total_window; __u64 bytes_ackpending; + if (trgt_out_lx->bufsize.ignore_rcv_lowbuf > 0) { + trgt_out_lx->trgt.out.lastsend_windowused = 31; + return; + } + + BUG_ON(cor_seqno_before(trgt_out_lx->trgt.out.seqno_windowlimit, trgt_out_lx->trgt.out.seqno_acked)); BUG_ON(cor_seqno_before(trgt_out_lx->trgt.out.seqno_nextsend, trgt_out_lx->trgt.out.seqno_acked)); - total_window = cor_seqno_clean( + total_window = (__u64) ( trgt_out_lx->trgt.out.seqno_windowlimit - trgt_out_lx->trgt.out.seqno_acked); - bytes_ackpending = cor_seqno_clean( + bytes_ackpending = (__u64) ( trgt_out_lx->trgt.out.seqno_nextsend - trgt_out_lx->trgt.out.seqno_acked); BUG_ON(bytes_ackpending > total_window); - BUG_ON(bytes_ackpending > (U64_MAX / 64)); trgt_out_lx->trgt.out.lastsend_windowused = div64_u64( bytes_ackpending * 31 + total_window - 1, total_window); @@ -1038,19 +1046,56 @@ static void cor_set_last_windowused(struct cor_conn *trgt_out_lx) static void _cor_flush_out_ignore_lowbuf(struct cor_conn *trgt_out_lx) { - trgt_out_lx->bufsize.ignore_rcv_lowbuf = max( - trgt_out_lx->bufsize.ignore_rcv_lowbuf, - trgt_out_lx->bufsize.bufsize >> BUFSIZE_SHIFT); + if (trgt_out_lx->sourcetype == SOURCE_IN && + trgt_out_lx->is_highlatency == 0) + trgt_out_lx->bufsize.ignore_rcv_lowbuf = max( + trgt_out_lx->bufsize.ignore_rcv_lowbuf, + trgt_out_lx->bufsize.bufsize >> BUFSIZE_SHIFT); } -static __u64 cor_get_windowlimit(struct cor_conn *trgt_out_lx) +static __u32 cor_get_windowlimit(struct cor_conn *trgt_out_lx) { if (unlikely(cor_seqno_before(trgt_out_lx->trgt.out.seqno_windowlimit, trgt_out_lx->trgt.out.seqno_nextsend))) return 0; - return cor_seqno_clean(trgt_out_lx->trgt.out.seqno_windowlimit - - trgt_out_lx->trgt.out.seqno_nextsend); + return trgt_out_lx->trgt.out.seqno_windowlimit - + trgt_out_lx->trgt.out.seqno_nextsend; +} + +static int cor_delay_send(struct cor_conn *trgt_out_lx, __u32 len) +{ + __u32 data_inflight = trgt_out_lx->trgt.out.seqno_nextsend - + trgt_out_lx->trgt.out.seqno_acked; + int buflimit_reached; + + if (trgt_out_lx->sourcetype == SOURCE_IN) { + buflimit_reached = cor_srcin_buflimit_reached(trgt_out_lx); + } else if (trgt_out_lx->sourcetype == SOURCE_SOCK) { + buflimit_reached = (cor_sock_sndbufavailable( + trgt_out_lx, 1) == 0); + } else if (trgt_out_lx->sourcetype == SOURCE_UNCONNECTED) { + buflimit_reached = (cor_conn_src_unconn_write_allowed( + trgt_out_lx) == 0); + } else { + WARN_ONCE(1, "cor_delay_send: invalid sourcetype"); + buflimit_reached = 1; + } + + if ((trgt_out_lx->flush != 0 || buflimit_reached) && + data_inflight == 0) + return 0; + + if (trgt_out_lx->flush == 0) + return 1; + + if (trgt_out_lx->is_highlatency != 0) + return 1; + + if (data_inflight > 0) + return 1; + + return 0; } int _cor_flush_out(struct cor_conn *trgt_out_lx, __u32 maxsend, __u32 *sent, @@ -1087,28 +1132,28 @@ int _cor_flush_out(struct cor_conn *trgt_out_lx, __u32 maxsend, __u32 *sent, if (unlikely(nbstate != NEIGHBOR_STATE_ACTIVE)) return RC_FLUSH_CONN_OUT_NBNOTACTIVE; - /* printk(KERN_ERR "flush %p %llu %u\n", trgt_out_l, - cor_get_windowlimit(trgt_out_l), - trgt_out_l->data_buf.read_remaining); */ + /* printk(KERN_ERR "flush %p %u %u\n", trgt_out_lx, + cor_get_windowlimit(trgt_out_lx), + trgt_out_lx->data_buf.read_remaining); */ targetmss = cor_mss_conndata(nb, trgt_out_lx->is_highlatency != 0); while (trgt_out_lx->data_buf.read_remaining >= targetmss) { - __u64 windowlimit = cor_get_windowlimit(trgt_out_lx); + __u32 windowlimit = cor_get_windowlimit(trgt_out_lx); int rc; - if (maxsend_left < targetmss) + if (targetmss > maxsend_left) break; - if (windowlimit < targetmss) { + if (cor_nbcongwin_send_allowed(nb) == 0) + return RC_FLUSH_CONN_OUT_CONG; + + if (targetmss > windowlimit) { trgt_out_lx->trgt.out.lastsend_windowused = 31; _cor_flush_out_ignore_lowbuf(trgt_out_lx); break; } - if (cor_nbcongwin_send_allowed(nb) == 0) - return RC_FLUSH_CONN_OUT_CONG; - if (likely(cor_send_conndata_as_skb(nb, targetmss))) rc = _cor_flush_out_skb(trgt_out_lx, targetmss); else @@ -1129,10 +1174,10 @@ int _cor_flush_out(struct cor_conn *trgt_out_lx, __u32 maxsend, __u32 *sent, if (trgt_out_lx->data_buf.read_remaining > 0) { __u32 len = trgt_out_lx->data_buf.read_remaining; - __u64 windowlimit = cor_get_windowlimit(trgt_out_lx); + __u32 windowlimit = cor_get_windowlimit(trgt_out_lx); int rc; - if (maxsend_left < len) { + if (len > maxsend_left) { if (maxsend_left >= 65536 || ( maxsend_left == maxsend && maxsend_left >= 128 && @@ -1144,43 +1189,22 @@ int _cor_flush_out(struct cor_conn *trgt_out_lx, __u32 maxsend, __u32 *sent, } } - if (trgt_out_lx->flush == 0 && - trgt_out_lx->sourcetype == SOURCE_SOCK && - cor_sock_sndbufavailable(trgt_out_lx, 1) != 0) - goto out; - - if (trgt_out_lx->flush == 0 && - trgt_out_lx->sourcetype == SOURCE_IN && - cor_srcin_buflimit_reached(trgt_out_lx) - == 0 && ( - cor_seqno_eq( - trgt_out_lx->trgt.out.seqno_nextsend, - trgt_out_lx->trgt.out.seqno_acked) == 0 || - trgt_out_lx->is_highlatency != 0 || - LOWLATENCY_SEND_UNFLUSHED_DATA != 0)) - goto out; - - if (trgt_out_lx->flush == 0 && - trgt_out_lx->sourcetype == SOURCE_UNCONNECTED && - cor_conn_src_unconn_write_allowed( - trgt_out_lx) != 0) - goto out; - - if (windowlimit == 0 || (windowlimit < len && - cor_seqno_eq( - trgt_out_lx->trgt.out.seqno_nextsend, - trgt_out_lx->trgt.out.seqno_acked) == 0)) { - trgt_out_lx->trgt.out.lastsend_windowused = 31; - _cor_flush_out_ignore_lowbuf(trgt_out_lx); - goto out; - } + if (cor_delay_send(trgt_out_lx, len)) + return RC_FLUSH_CONN_OUT_OK; if (cor_nbcongwin_send_allowed(nb) == 0) return RC_FLUSH_CONN_OUT_CONG; if (len > windowlimit) { - len = windowlimit; + trgt_out_lx->trgt.out.lastsend_windowused = 31; _cor_flush_out_ignore_lowbuf(trgt_out_lx); + + if (windowlimit == 0 || cor_seqno_eq( + trgt_out_lx->trgt.out.seqno_nextsend, + trgt_out_lx->trgt.out.seqno_acked) == 0) + return RC_FLUSH_CONN_OUT_OK; + + len = windowlimit; } if (cor_send_conndata_as_skb(nb, len)) @@ -1189,6 +1213,7 @@ int _cor_flush_out(struct cor_conn *trgt_out_lx, __u32 maxsend, __u32 *sent, rc = _cor_flush_out_conndata(trgt_out_lx, len); + #warning todo cleanup if (rc == RC_FLUSH_CONN_OUT_OK || rc == RC_FLUSH_CONN_OUT_SENT_CONG) { maxsend_left -= len; @@ -1202,7 +1227,6 @@ int _cor_flush_out(struct cor_conn *trgt_out_lx, __u32 maxsend, __u32 *sent, return rc; } -out: return RC_FLUSH_CONN_OUT_OK; } diff --git a/net/cor/conn_trgt_unconn.c b/net/cor/conn_trgt_unconn.c index ea5174d18bce..7e1c205b2b57 100644 --- a/net/cor/conn_trgt_unconn.c +++ b/net/cor/conn_trgt_unconn.c @@ -473,9 +473,7 @@ void cor_proc_cpacket(struct cor_conn *trgt_unconn_l) } else { BUG_ON(rc != RC_PC_FINISHED); - if (trgt_unconn_l->targettype == TARGET_SOCK || - trgt_unconn_l->is_highlatency == 0 || - LOWLATENCY_SEND_UNFLUSHED_DATA != 0 || ( + if (trgt_unconn_l->targettype == TARGET_SOCK || ( trgt_unconn_l->flush != 0 && trgt_unconn_l->data_buf.read_remaining == 0)) src_in->flush = 1; diff --git a/net/cor/cor.h b/net/cor/cor.h index edd658bde205..45e7cfe88212 100644 --- a/net/cor/cor.h +++ b/net/cor/cor.h @@ -143,7 +143,7 @@ struct cor_sockaddr { #define KP_MISC_PONG 3 -/* KP_ACK[1] seqno[6] */ +/* KP_ACK[1] seqno[4] */ #define KP_MISC_ACK 4 /** @@ -162,7 +162,7 @@ struct cor_sockaddr { * incoming connection * seqno1... used to ack data sent from the side which initiated the connection * seqno2... used to ack data sent to the side which initiated the connection - * KP_CONNECT[1] conn_id[4] seqno1[6] seqno2[6] window[2] priority_seqno[0.5] + * KP_CONNECT[1] conn_id[4] seqno1[4] seqno2[4] window[2] priority_seqno[0.5] * priority[1.5] is_highlatency[1] */ #define KP_MISC_CONNECT 5 @@ -199,8 +199,8 @@ struct cor_sockaddr { /** - * KP_ACK_CONN[1] conn_id[4] delay_remaining[1] seqno[6] window[2] - * bufsize_changerate[1] seqno_ooo[6] + * KP_ACK_CONN[1] conn_id[4] delay_remaining[1] seqno[4] window[2] + * bufsize_changerate[1] seqno_ooo[4] * length[1-4] priority_seqno[0.5] priority[1.5] is_highlatency[1] * * conn_id is the conn_id we use if we sent something through this conn and @@ -265,13 +265,13 @@ static inline int cor_ack_conn_len(__u8 flags) int len = 0; if ((flags & KP_ACK_CONN_FLAGS_SEQNO) != 0) { - len += 6; + len += 4; if ((flags & KP_ACK_CONN_FLAGS_WINDOW) != 0) len += 3; } if (cor_ooolen(flags) != 0) { - len += 6; + len += 4; len += cor_ooolen(flags); } @@ -286,7 +286,7 @@ static inline int cor_ack_conn_len(__u8 flags) return len; } -/* KP_CONN_DATA[1] conn_id[4] seqno[6] length[1-2] data[length] */ +/* KP_CONN_DATA[1] conn_id[4] seqno[4] length[1-2] data[length] */ #define KP_CONN_DATA_FLAGS_WINDOWUSED 31 #define KP_CONN_DATA_FLAGS_FLUSH 32 @@ -295,9 +295,9 @@ static inline int cor_ack_conn_len(__u8 flags) static inline __u32 get_kp_conn_data_length(__u32 datalen) { if (datalen < 128) - return 12 + datalen; + return 10 + datalen; else - return 13 + datalen; + return 11 + datalen; } static inline __u8 get_kp_code(__u8 maj, __u8 min) @@ -607,7 +607,7 @@ struct cor_neighbor { struct list_head cmsg_queue_conndata_highlat; struct list_head cmsg_queue_other; __u8 add_retrans_needed; - __u64 kpacket_seqno; /* not locked, only accessed by single tasklet */ + __u32 kpacket_seqno; /* not locked, only accessed by single tasklet */ struct rb_root pending_conn_resets_rb; @@ -918,7 +918,7 @@ struct cor_conn { struct rb_node rbn; __u32 conn_id; - __u64 next_seqno; + __u32 next_seqno; /* number of ack sent, not data seqno */ __u32 ack_seqno; @@ -931,8 +931,8 @@ struct cor_conn { __u8 established; - __u64 window_seqnolimit; - __u64 window_seqnolimit_remote; + __u32 window_seqnolimit; + __u32 window_seqnolimit_remote; /* protected by nb->cmsg_lock */ struct list_head acks_pending; @@ -995,9 +995,9 @@ struct cor_conn { __u32 nblist_busy_remaining; __u32 conn_id; - __u64 seqno_nextsend; - __u64 seqno_acked; - __u64 seqno_windowlimit; + __u32 seqno_nextsend; + __u32 seqno_acked; + __u32 seqno_windowlimit; /* protected by nb->retrans_conn_lock, sorted by seqno */ @@ -1049,7 +1049,7 @@ struct cor_conn { struct{ struct list_head items; struct cor_data_buf_item *nextread; - __u64 first_offset; + __u32 first_offset; __u32 datasize; __u32 overhead; @@ -1150,7 +1150,7 @@ struct cor_conn_retrans { struct list_head timeout_list; struct list_head conn_list; struct cor_conn *trgt_out_o; - __u64 seqno; + __u32 seqno; __u32 length; __u8 windowused; @@ -1162,7 +1162,7 @@ struct cor_conn_retrans { #define RCVOOO_SKB 1 struct cor_rcvooo { struct list_head lh; - __u64 seqno; + __u32 seqno; __u8 type; __u8 windowused; __u8 flush; @@ -1404,13 +1404,13 @@ void cor_qos_remove_conn(struct cor_conn *trgt_out_l); int cor_may_send_announce(struct net_device *dev); struct sk_buff *cor_create_packet_cmsg(struct cor_neighbor *nb, int size, - gfp_t alloc_flags, __u64 seqno); + gfp_t alloc_flags, __u32 seqno); struct sk_buff *cor_create_packet(struct cor_neighbor *nb, int size, gfp_t alloc_flags); struct sk_buff *cor_create_packet_conndata(struct cor_neighbor *nb, int size, - gfp_t alloc_flags, __u32 conn_id, __u64 seqno, __u8 windowused, + gfp_t alloc_flags, __u32 conn_id, __u32 seqno, __u8 windowused, __u8 flush); void cor_qos_enqueue_conn(struct cor_conn *trgt_out_lx); @@ -1455,7 +1455,7 @@ static inline __u32 cor_priority_max(void) return cor_dec_priority(4095); } -static inline __u16 cor_enc_window(__u64 value) +static inline __u16 cor_enc_window(__u32 value) { __u16 exponent = 0; __u16 mantissa; @@ -1477,8 +1477,10 @@ static inline __u32 cor_dec_window(__u16 value) { __u64 mantissa = (__u64) (value >> 5); __u16 exponent = (value & 31); - - return (mantissa << exponent); + __u64 ret = (mantissa << exponent); + if (unlikely(ret > U32_MAX/4)) + return U32_MAX/4; + return (__u32) ret; } __u8 __attribute__((const)) cor_enc_log_64_11(__u32 value); @@ -1575,7 +1577,7 @@ void cor_free_control_msg(struct cor_control_msg_out *cm); void cor_retransmit_timerfunc(struct timer_list *retrans_timer); -void cor_kern_ack_rcvd(struct cor_neighbor *nb, __u64 seqno); +void cor_kern_ack_rcvd(struct cor_neighbor *nb, __u32 seqno); int cor_send_messages(struct cor_neighbor *nb, unsigned long cmsg_send_start_j, ktime_t cmsg_send_start_kt, int *sent); @@ -1590,9 +1592,9 @@ void cor_send_pong(struct cor_neighbor *nb, __u32 cookie, ktime_t ping_rcvtime); int cor_send_reset_conn(struct cor_neighbor *nb, __u32 conn_id, int lowprio); -void cor_send_ack(struct cor_neighbor *nb, __u64 seqno, __u8 fast); +void cor_send_ack(struct cor_neighbor *nb, __u32 seqno, __u8 fast); -void cor_send_ack_conn_ifneeded(struct cor_conn *src_in_l, __u64 seqno_ooo, +void cor_send_ack_conn_ifneeded(struct cor_conn *src_in_l, __u32 seqno_ooo, __u32 ooo_length); void cor_send_priority(struct cor_conn *trgt_out_ll, __u16 priority); @@ -1603,10 +1605,10 @@ void cor_send_connect_success(struct cor_control_msg_out *cm, __u32 conn_id, struct cor_conn *src_in); void cor_send_connect_nb(struct cor_control_msg_out *cm, __u32 conn_id, - __u64 seqno1, __u64 seqno2, struct cor_conn *src_in_ll); + __u32 seqno1, __u32 seqno2, struct cor_conn *src_in_ll); void cor_send_conndata(struct cor_control_msg_out *cm, __u32 conn_id, - __u64 seqno, char *data_orig, char *data, __u32 datalen, + __u32 seqno, char *data_orig, char *data, __u32 datalen, __u8 windowused, __u8 flush, __u8 highlatency, struct cor_conn_retrans *cr); @@ -1671,7 +1673,7 @@ void cor_reset_ooo_queue(struct cor_conn *src_in_lx); void cor_drain_ooo_queue(struct cor_conn *src_in_l); void cor_conn_rcv(struct cor_neighbor *nb, struct sk_buff *skb, char *data, - __u32 len, __u32 conn_id, __u64 seqno, __u8 windowused, + __u32 len, __u32 conn_id, __u32 seqno, __u8 windowused, __u8 flush); int __init cor_rcv_init(void); @@ -1722,11 +1724,11 @@ int cor_send_retrans(struct cor_neighbor *nb, int *sent); void cor_retransmit_conn_timerfunc(struct timer_list *retrans_timer_conn); void cor_conn_ack_ooo_rcvd(struct cor_neighbor *nb, __u32 conn_id, - struct cor_conn *trgt_out, __u64 seqno_ooo, __u32 length, + struct cor_conn *trgt_out, __u32 seqno_ooo, __u32 length, __u64 *bytes_acked); void cor_conn_ack_rcvd(struct cor_neighbor *nb, __u32 conn_id, - struct cor_conn *trgt_out, __u64 seqno, int setwindow, + struct cor_conn *trgt_out, __u32 seqno, int setwindow, __u16 window, __u8 bufsize_changerate, __u64 *bytes_acked); void cor_schedule_retransmit_conn(struct cor_conn_retrans *cr, int connlocked, @@ -1798,7 +1800,7 @@ void cor_bufsize_read_to_sock(struct cor_conn *trgt_sock_lx); void cor_databuf_ackdiscard(struct cor_conn *cn_lx); -void cor_reset_seqno(struct cor_conn *cn_l, __u64 initseqno); +void cor_reset_seqno(struct cor_conn *cn_l, __u32 initseqno); void cor_databuf_pull(struct cor_conn *cn_lx, char *dst, __u32 len); @@ -1818,10 +1820,10 @@ void cor_databuf_pull_dbi(struct cor_sock *cs_rl, struct cor_conn *trgt_sock_l); void cor_databuf_unpull(struct cor_conn *trgt_out_l, __u32 bytes); -void cor_databuf_pullold(struct cor_conn *trgt_out_l, __u64 startpos, char *dst, +void cor_databuf_pullold(struct cor_conn *trgt_out_l, __u32 startpos, char *dst, int len); -void cor_databuf_ack(struct cor_conn *trgt_out_l, __u64 pos); +void cor_databuf_ack(struct cor_conn *trgt_out_l, __u32 pos); void cor_databuf_ackread(struct cor_conn *cn_lx); @@ -1852,7 +1854,7 @@ static inline __u32 cor_receive_sock(struct cor_conn *src_sock_l, char *buf, src_sock_l->src.sock.last_windowused = 31; else if (unlikely(bufused * 31 > U32_MAX)) src_sock_l->src.sock.last_windowused = - bufused / (bufsize / 31); + bufused / ((bufsize + 30) / 31); else src_sock_l->src.sock.last_windowused = (bufused * 31) / bufsize; @@ -1931,8 +1933,10 @@ static inline void cor_set_sock_connecterror(__be64 cookie, int errorno) { struct cor_sock *cs = cor_get_sock_by_cookie(cookie); - _cor_set_sock_connecterror(cs, errorno); - kref_put(&cs->ref, cor_free_sock); + if (cs != 0) { + _cor_set_sock_connecterror(cs, errorno); + kref_put(&cs->ref, cor_free_sock); + } } void cor_mngdsocket_readfromconn_fromatomic(struct cor_sock *cs); @@ -2001,12 +2005,12 @@ static inline __u32 cor_mss(struct cor_neighbor *nb, __u32 l3overhead) static inline __u32 cor_mss_cmsg(struct cor_neighbor *nb) { - return cor_mss(nb, 7); + return cor_mss(nb, 5); } static inline __u32 cor_mss_conndata(struct cor_neighbor *nb, int highlatency) { - __u32 mss_tmp = cor_mss(nb, 11); + __u32 mss_tmp = cor_mss(nb, 9); __u32 i; if (mss_tmp < 256 || highlatency || LOWLATENCY_LOWERMTU == 0) @@ -2073,20 +2077,6 @@ static inline void cor_put_u64(char *dst, __u64 value) cor_put_be64(dst, cpu_to_be64(value)); } -static inline void cor_put_u48(char *dst, __u64 value) -{ - char *p_value = (char *) &value; - - value = cpu_to_be64(value); - - dst[0] = p_value[2]; - dst[1] = p_value[3]; - dst[2] = p_value[4]; - dst[3] = p_value[5]; - dst[4] = p_value[6]; - dst[5] = p_value[7]; -} - static inline void cor_put_be32(char *dst, __be32 value) { char *p_value = (char *) &value; @@ -2148,22 +2138,6 @@ static inline __u64 cor_parse_u64(char *buf) return be64_to_cpu(cor_parse_be64(buf)); } -static inline __u64 cor_parse_u48(char *ptr) -{ - __u64 ret = 0; - - ((char *)&ret)[0] = 0; - ((char *)&ret)[1] = 0; - ((char *)&ret)[2] = ptr[0]; - ((char *)&ret)[3] = ptr[1]; - ((char *)&ret)[4] = ptr[2]; - ((char *)&ret)[5] = ptr[3]; - ((char *)&ret)[6] = ptr[4]; - ((char *)&ret)[7] = ptr[5]; - - return be64_to_cpu(ret); -} - static inline __be32 cor_parse_be32(char *ptr) { __be32 ret = 0; @@ -2206,11 +2180,6 @@ static inline __u8 cor_parse_u8(char *ptr) return (__u8) ptr[0]; } -static inline __u64 cor_pull_u48(struct sk_buff *skb) -{ - return cor_parse_u48(cor_pull_skb(skb, 6)); -} - static inline __be32 cor_pull_be32(struct sk_buff *skb) { return cor_parse_be32(cor_pull_skb(skb, 4)); @@ -2292,36 +2261,27 @@ static inline void cor_databuf_item_free(struct cor_data_buf_item *item) } } -static inline __u64 cor_seqno_clean(__u64 seqno) -{ - return seqno & ((1LL << 48) - 1); -} - -static inline int cor_seqno_eq(__u64 seqno1, __u64 seqno2) +static inline int cor_seqno_eq(__u32 seqno1, __u32 seqno2) { - seqno1 = seqno1 << 16; - seqno2 = seqno2 << 16; return seqno1 == seqno2; } -static inline int cor_seqno_before(__u64 seqno1, __u64 seqno2) +static inline int cor_seqno_before(__u32 seqno1, __u32 seqno2) { - seqno1 = seqno1 << 16; - seqno2 = seqno2 << 16; - return (seqno1 - seqno2) >= (1LL << 63); + return (seqno1 - seqno2) >= (1LL << 31); } -static inline int cor_seqno_before_eq(__u64 seqno1, __u64 seqno2) +static inline int cor_seqno_before_eq(__u32 seqno1, __u32 seqno2) { return cor_seqno_eq(seqno1, seqno2) || cor_seqno_before(seqno1, seqno2); } -static inline int cor_seqno_after(__u64 seqno1, __u64 seqno2) +static inline int cor_seqno_after(__u32 seqno1, __u32 seqno2) { return cor_seqno_before_eq(seqno1, seqno2) ? 0 : 1; } -static inline int cor_seqno_after_eq(__u64 seqno1, __u64 seqno2) +static inline int cor_seqno_after_eq(__u32 seqno1, __u32 seqno2) { return cor_seqno_before(seqno1, seqno2) ? 0 : 1; } diff --git a/net/cor/dev.c b/net/cor/dev.c index 810a54d6e3cb..2166472fe752 100644 --- a/net/cor/dev.c +++ b/net/cor/dev.c @@ -446,7 +446,7 @@ static __u32 _cor_resume_conns_burstprio(struct cor_conn *trgt_out_l, return (__u32) newprio; } -static __u64 _cor_resume_conns_maxsend(struct cor_qos_queue *q, +static __u32 _cor_resume_conns_maxsend(struct cor_qos_queue *q, struct cor_conn *trgt_out_l, __u32 newpriority, int *maxsend_forcedelay) { @@ -457,6 +457,7 @@ static __u64 _cor_resume_conns_maxsend(struct cor_qos_queue *q, __u64 priority_sum; __u32 numconns; __u64 bytes_per_round; + __u64 ret; spin_lock_irqsave(&nb->conns_waiting.lock, iflags); spin_lock(&q->qlock); @@ -467,7 +468,7 @@ static __u64 _cor_resume_conns_maxsend(struct cor_qos_queue *q, spin_unlock(&q->qlock); spin_unlock_irqrestore(&nb->conns_waiting.lock, iflags); - return 1024LL; + return 1024; } BUG_ON(nb->conns_waiting.priority_sum < oldpriority); @@ -491,14 +492,17 @@ static __u64 _cor_resume_conns_maxsend(struct cor_qos_queue *q, if (numconns <= 4) { *maxsend_forcedelay = 1; - bytes_per_round = 2048LL; + bytes_per_round = 2048; } else { *maxsend_forcedelay = 0; - bytes_per_round = 1024LL; + bytes_per_round = 1024; } - return div_u64(bytes_per_round * ((__u64) newpriority) * + ret = div_u64(bytes_per_round * ((__u64) newpriority) * ((__u64) numconns), priority_sum); + if (unlikely(ret > U32_MAX)) + return U32_MAX; + return (__u32) ret; } static int _cor_resume_neighbors_nextpass( @@ -1223,8 +1227,6 @@ static void _cor_qos_enqueue(struct cor_qos_queue *q, BUG_ON(!queues_empty && atomic_read(&q->qos_resume_scheduled) == 0); - rb->in_queue = RB_INQUEUE_TRUE; - if (caller == QOS_CALLER_KPACKET) { struct cor_neighbor *nb = container_of(rb, struct cor_neighbor, rb_kp); @@ -1242,9 +1244,11 @@ static void _cor_qos_enqueue(struct cor_qos_queue *q, } else if (caller == QOS_CALLER_NEIGHBOR) { struct cor_neighbor *nb = container_of(rb, struct cor_neighbor, rb); + if (unlikely(nb->conns_waiting.cnt == 0)) + return; + list_add_tail(&rb->lh, &q->neighbors_waiting_nextpass); cor_nb_kref_get(nb, "qos_queue_nb"); - BUG_ON(nb->conns_waiting.cnt == 0); q->numconns += nb->conns_waiting.cnt; q->priority_sum += nb->conns_waiting.priority_sum; q->jiffies_nb_lastduration = 0; @@ -1252,6 +1256,7 @@ static void _cor_qos_enqueue(struct cor_qos_queue *q, } else { BUG(); } + rb->in_queue = RB_INQUEUE_TRUE; kref_get(&q->ref); cor_schedule_qos_resume(q); @@ -1406,17 +1411,17 @@ struct sk_buff *cor_create_packet(struct cor_neighbor *nb, int size, } struct sk_buff *cor_create_packet_conndata(struct cor_neighbor *nb, int size, - gfp_t alloc_flags, __u32 conn_id, __u64 seqno, + gfp_t alloc_flags, __u32 conn_id, __u32 seqno, __u8 windowused, __u8 flush) { struct sk_buff *ret; char *dest; - ret = cor_create_packet(nb, size + 11, alloc_flags); + ret = cor_create_packet(nb, size + 9, alloc_flags); if (unlikely(ret == 0)) return 0; - dest = skb_put(ret, 11); + dest = skb_put(ret, 9); BUG_ON(dest == 0); BUG_ON((windowused & (~PACKET_TYPE_CONNDATA_FLAGS_WINDOWUSED)) != 0); @@ -1428,8 +1433,8 @@ struct sk_buff *cor_create_packet_conndata(struct cor_neighbor *nb, int size, cor_put_u32(dest, conn_id); dest += 4; - cor_put_u48(dest, seqno); - dest += 6; + cor_put_u32(dest, seqno); + dest += 4; return ret; } @@ -1440,7 +1445,7 @@ static void cor_rcv_conndata(struct sk_buff *skb, __u8 windowused, __u8 flush) struct cor_neighbor *nb = cor_get_neigh_by_mac(skb); __u32 conn_id; - __u64 seqno; + __u32 seqno; char *connid_p; char *seqno_p; @@ -1454,12 +1459,12 @@ static void cor_rcv_conndata(struct sk_buff *skb, __u8 windowused, __u8 flush) if (unlikely(connid_p == 0)) goto drop; - seqno_p = cor_pull_skb(skb, 6); + seqno_p = cor_pull_skb(skb, 4); if (unlikely(seqno_p == 0)) goto drop; conn_id = cor_parse_u32(connid_p); - seqno = cor_parse_u48(seqno_p); + seqno = cor_parse_u32(seqno_p); /* get_random_bytes(&rand, 1); if (rand < 64) diff --git a/net/cor/neigh.c b/net/cor/neigh.c index 5a03da9e6788..c57b937dd44c 100644 --- a/net/cor/neigh.c +++ b/net/cor/neigh.c @@ -100,7 +100,7 @@ static void _cor_reset_neighbor(struct work_struct *work); static struct cor_neighbor *cor_alloc_neighbor(gfp_t allocflags) { struct cor_neighbor *nb; - __u64 seqno; + __u32 seqno; if (atomic_inc_return(&cor_num_neighs) >= MAX_NEIGHBORS) { atomic_dec(&cor_num_neighs); diff --git a/net/cor/neigh_rcv.c b/net/cor/neigh_rcv.c index 0895d07df21c..1702a30e7da1 100644 --- a/net/cor/neigh_rcv.c +++ b/net/cor/neigh_rcv.c @@ -24,8 +24,8 @@ static void cor_parse_connect(struct cor_neighbor *nb, struct sk_buff *skb) struct cor_conn *trgt_out; __u32 rcv_conn_id = cor_pull_u32(skb); __u32 snd_conn_id = cor_get_connid_reverse(rcv_conn_id); - __u64 rcv_seqno = cor_pull_u48(skb); - __u64 snd_seqno = cor_pull_u48(skb); + __u32 rcv_seqno = cor_pull_u32(skb); + __u32 snd_seqno = cor_pull_u32(skb); __u16 window = cor_pull_u16(skb); __u16 priority_raw = cor_pull_u16(skb); __u8 priority_seqno = (priority_raw >> 12); @@ -162,7 +162,6 @@ static void cor_parse_conn_success(struct cor_neighbor *nb, struct sk_buff *skb) cor_dec_window(window); } - spin_unlock_bh(&src_in->rcv_lock); cor_flush_buf(trgt_out); @@ -170,7 +169,6 @@ static void cor_parse_conn_success(struct cor_neighbor *nb, struct sk_buff *skb) spin_unlock_bh(&trgt_out->rcv_lock); - cor_wake_sender(trgt_out); if (0) { @@ -240,7 +238,7 @@ static int _cor_kernel_packet_misc(struct cor_neighbor *nb, cor_pull_u32(skb); /* respdelay_netonly */ cor_ping_resp(nb, cookie, respdelay_full); } else if (code_min == KP_MISC_ACK) { - __u64 seqno = cor_pull_u48(skb); + __u32 seqno = cor_pull_u32(skb); cor_kern_ack_rcvd(nb, seqno); } else if (code_min == KP_MISC_CONNECT) { @@ -277,7 +275,7 @@ static void cor_parse_ack_conn(struct cor_neighbor *nb, struct sk_buff *skb, delay_remaining = cor_pull_u8(skb); if ((code_min & KP_ACK_CONN_FLAGS_SEQNO) != 0) { - __u64 seqno = cor_pull_u48(skb); + __u32 seqno = cor_pull_u32(skb); int setwindow = 0; __u16 window = 0; __u8 bufsize_changerate = 0; @@ -296,7 +294,7 @@ static void cor_parse_ack_conn(struct cor_neighbor *nb, struct sk_buff *skb, } if (cor_ooolen(code_min) != 0) { - __u64 seqno_ooo = cor_pull_u48(skb); + __u32 seqno_ooo = cor_pull_u32(skb); __u32 ooo_len; if (cor_ooolen(code_min) == 1) { @@ -353,7 +351,7 @@ static int cor_parse_conndata_length(struct sk_buff *skb, __u32 *ret) if (lowptr == 0) return 1; *ret = 128 + - ((__u32) (highptr - 128) * 256) + + ((__u32) (high - 128)) * 256 + ((__u32) cor_parse_u8(lowptr)); } @@ -366,7 +364,7 @@ static void cor_parse_conndata(struct cor_neighbor *nb, struct sk_buff *skb, __u8 flush = ((code_min & KP_CONN_DATA_FLAGS_FLUSH) != 0) ? 1 : 0; __u8 windowused = (code_min & KP_CONN_DATA_FLAGS_WINDOWUSED); __u32 conn_id = cor_pull_u32(skb); - __u64 seqno = cor_pull_u48(skb); + __u32 seqno = cor_pull_u32(skb); __u32 datalength = 0; char *data; @@ -412,7 +410,7 @@ static void _cor_kernel_packet(struct cor_neighbor *nb, struct sk_buff *skb, __u32 pingcookie = 0; __u64 bytes_acked = 0; - __u64 seqno = cor_parse_u48(cor_pull_skb(skb, 6)); + __u32 seqno = cor_pull_u32(skb); if (unlikely(atomic_read(&nb->sessionid_rcv_needed) != 0)) { __u8 *codeptr = cor_pull_skb(skb, 1); @@ -459,6 +457,7 @@ static void _cor_kernel_packet(struct cor_neighbor *nb, struct sk_buff *skb, else if (ackneeded == ACK_NEEDED_FAST) cor_send_ack(nb, seqno, 1); + /* do this at the end to include packet processing time */ if (ping_rcvd) cor_send_pong(nb, pingcookie, pkg_rcv_start); } @@ -474,10 +473,10 @@ static int _cor_kernel_packet_checklen_misc(struct sk_buff *skb, __u8 code_min) if (cor_pull_skb(skb, 12) == 0) return 1; } else if (code_min == KP_MISC_ACK) { - if (cor_pull_skb(skb, 6) == 0) + if (cor_pull_skb(skb, 4) == 0) return 1; } else if (code_min == KP_MISC_CONNECT) { - if (cor_pull_skb(skb, 21) == 0) + if (cor_pull_skb(skb, 17) == 0) return 1; } else if (code_min == KP_MISC_CONNECT_SUCCESS) { if (cor_pull_skb(skb, 6) == 0) @@ -511,7 +510,7 @@ static int _cor_kernel_packet_checklen_conndata(struct sk_buff *skb, { __u32 datalength; - if (cor_pull_skb(skb, 10) == 0) + if (cor_pull_skb(skb, 8) == 0) return 1; if (unlikely(cor_parse_conndata_length(skb, &datalength) != 0)) @@ -540,7 +539,7 @@ static int _cor_kernel_packet_checklen(struct sk_buff *skb, __u8 code) static int cor_kernel_packet_checklen(struct sk_buff *skb) { - if (cor_pull_skb(skb, 6) == 0) /* seqno */ + if (cor_pull_skb(skb, 4) == 0) /* seqno */ return 1; while (1) { diff --git a/net/cor/neigh_snd.c b/net/cor/neigh_snd.c index 09d7ac7587f1..c9a42e0a0952 100644 --- a/net/cor/neigh_snd.c +++ b/net/cor/neigh_snd.c @@ -52,7 +52,7 @@ struct cor_control_msg_out { } pong; struct{ - __u64 seqno; + __u32 seqno; __u8 fast; } ack; @@ -60,8 +60,8 @@ struct cor_control_msg_out { struct cor_conn *src_in; struct list_head conn_acks; __u32 conn_id; - __u64 seqno; - __u64 seqno_ooo; + __u32 seqno; + __u32 seqno_ooo; __u32 length; __u8 flags; @@ -79,8 +79,8 @@ struct cor_control_msg_out { struct{ __u32 conn_id; - __u64 seqno1; - __u64 seqno2; + __u32 seqno1; + __u32 seqno2; struct cor_conn *src_in; } connect; @@ -97,7 +97,7 @@ struct cor_control_msg_out { struct{ __u32 conn_id; - __u64 seqno; + __u32 seqno; __u32 datalen; __u8 windowused; __u8 flush; @@ -124,7 +124,7 @@ struct cor_control_retrans { struct kref ref; struct cor_neighbor *nb; - __u64 seqno; + __u32 seqno; unsigned long timeout; @@ -296,38 +296,34 @@ static void cor_free_control_retrans(struct kref *ref) } struct cor_control_retrans *cor_get_control_retrans( - struct cor_neighbor *nb_retranslocked, __u64 seqno) + struct cor_neighbor *nb_retranslocked, __u32 seqno) { - struct rb_node *n = 0; - struct cor_control_retrans *ret = 0; + struct rb_node *n = nb_retranslocked->kp_retransmits_rb.rb_node; - n = nb_retranslocked->kp_retransmits_rb.rb_node; - - while (likely(n != 0) && ret == 0) { + while (likely(n != 0)) { struct cor_control_retrans *cr = container_of(n, struct cor_control_retrans, rbn); BUG_ON(cr->nb != nb_retranslocked); - if (cor_seqno_before(seqno, cr->seqno)) + if (seqno < cr->seqno) { n = n->rb_left; - else if (cor_seqno_after(seqno, cr->seqno)) + } else if (seqno > cr->seqno) { n = n->rb_right; - else - ret = cr; + } else { + kref_get(&cr->ref); + return cr; + } } - if (ret != 0) - kref_get(&ret->ref); - - return ret; + return 0; } /* nb->retrans_lock must be held */ void cor_insert_control_retrans(struct cor_control_retrans *ins) { struct cor_neighbor *nb = ins->nb; - __u64 seqno = ins->seqno; + __u32 seqno = ins->seqno; struct rb_root *root; struct rb_node **p; @@ -345,11 +341,11 @@ void cor_insert_control_retrans(struct cor_control_retrans *ins) BUG_ON(cr->nb != nb); parent = *p; - if (unlikely(cor_seqno_eq(seqno, cr->seqno))) { + if (unlikely(seqno == cr->seqno)) { BUG(); - } else if (cor_seqno_before(seqno, cr->seqno)) { + } else if (seqno < cr->seqno) { p = &(*p)->rb_left; - } else if (cor_seqno_after(seqno, cr->seqno)) { + } else if (seqno > cr->seqno) { p = &(*p)->rb_right; } } @@ -569,7 +565,7 @@ static void cor_schedule_retransmit(struct cor_control_retrans *cr, spin_unlock_bh(&nb->retrans_lock); } -void cor_kern_ack_rcvd(struct cor_neighbor *nb, __u64 seqno) +void cor_kern_ack_rcvd(struct cor_neighbor *nb, __u32 seqno) { struct cor_control_retrans *cr = 0; @@ -578,11 +574,7 @@ void cor_kern_ack_rcvd(struct cor_neighbor *nb, __u64 seqno) cr = cor_get_control_retrans(nb, seqno); if (cr == 0) { - /* char *seqno_p = (char *) &seqno; - seqno = cpu_to_be32(seqno); - printk(KERN_ERR "bogus/duplicate ack received %d %d %d %d\n", - seqno_p[0], seqno_p[1], seqno_p[2], seqno_p[3]); - */ + /* printk(KERN_ERR "bogus/duplicate ack received %x\n", seqno); */ goto out; } @@ -616,9 +608,8 @@ static __u16 cor_get_window(struct cor_conn *cn, if (cor_is_conn_in(cn, expectedsender, expected_connid) == 0) goto out; - window = cor_enc_window(cor_seqno_clean( - cn->src.in.window_seqnolimit - - cn->src.in.next_seqno)); + window = cor_enc_window(cn->src.in.window_seqnolimit - + cn->src.in.next_seqno); cn->src.in.window_seqnolimit_remote = cn->src.in.next_seqno + cor_dec_window(window); @@ -664,20 +655,20 @@ static __u32 cor_add_ack(struct sk_buff *skb, struct cor_control_retrans *cr, { char *dst; - BUG_ON(cm->length != 7); + BUG_ON(cm->length != 5); - if (unlikely(spaceleft < 7)) + if (unlikely(spaceleft < 5)) return 0; - dst = skb_put(skb, 7); + dst = skb_put(skb, 5); BUG_ON(dst == 0); dst[0] = get_kp_code(KP_MISC, KP_MISC_ACK); - cor_put_u48(dst + 1, cm->msg.ack.seqno); + cor_put_u32(dst + 1, cm->msg.ack.seqno); list_add_tail(&cm->lh, &cr->msgs); - return 7; + return 5; } static inline __u8 cor_add_ack_conn_get_delayremaining( @@ -731,8 +722,8 @@ static __u32 cor_add_ack_conn(struct sk_buff *skb, } if ((cm->msg.ack_conn.flags & KP_ACK_CONN_FLAGS_SEQNO) != 0) { - cor_put_u48(dst + offset, cm->msg.ack_conn.seqno); - offset += 6; + cor_put_u32(dst + offset, cm->msg.ack_conn.seqno); + offset += 4; if ((cm->msg.ack_conn.flags & KP_ACK_CONN_FLAGS_WINDOW) != 0) { BUG_ON(cm->msg.ack_conn.src_in == 0); @@ -747,8 +738,8 @@ static __u32 cor_add_ack_conn(struct sk_buff *skb, } if (cor_ooolen(cm->msg.ack_conn.flags) != 0) { - cor_put_u48(dst + offset, cm->msg.ack_conn.seqno_ooo); - offset += 6; + cor_put_u32(dst + offset, cm->msg.ack_conn.seqno_ooo); + offset += 4; if (cor_ooolen(cm->msg.ack_conn.flags) == 1) { BUG_ON(cm->msg.ack_conn.length > 255); dst[offset] = cm->msg.ack_conn.length; @@ -872,20 +863,20 @@ static __u32 cor_add_connect(struct sk_buff *skb, struct cor_conn *trgt_out = cor_get_conn_reversedir(src_in); __u16 priority; - BUG_ON(cm->length != 22); + BUG_ON(cm->length != 18); - if (unlikely(spaceleft < 22)) + if (unlikely(spaceleft < 18)) return 0; - dst = skb_put(skb, 22); + dst = skb_put(skb, 18); BUG_ON(dst == 0); dst[0] = get_kp_code(KP_MISC, KP_MISC_CONNECT); cor_put_u32(dst + 1, cm->msg.connect.conn_id); - cor_put_u48(dst + 5, cm->msg.connect.seqno1); - cor_put_u48(dst + 11, cm->msg.connect.seqno2); + cor_put_u32(dst + 5, cm->msg.connect.seqno1); + cor_put_u32(dst + 9, cm->msg.connect.seqno2); BUG_ON(cm->msg.connect.src_in == 0); - cor_put_u16(dst + 17, cor_get_window(cm->msg.connect.src_in, cm->nb, + cor_put_u16(dst + 13, cor_get_window(cm->msg.connect.src_in, cm->nb, cor_get_connid_reverse(cm->msg.connect.conn_id))); spin_lock_bh(&trgt_out->rcv_lock); @@ -895,12 +886,12 @@ static __u32 cor_add_connect(struct sk_buff *skb, trgt_out->trgt.out.priority_last; BUG_ON(trgt_out->trgt.out.priority_seqno > 15); BUG_ON(trgt_out->trgt.out.priority_last > 4095); - cor_put_u16(dst + 19, priority); + cor_put_u16(dst + 15, priority); if (trgt_out->is_highlatency == 0) - dst[21] = 0; + dst[17] = 0; else - dst[21] = 1; + dst[17] = 1; spin_unlock_bh(&trgt_out->rcv_lock); @@ -908,7 +899,7 @@ static __u32 cor_add_connect(struct sk_buff *skb, if (*ackneeded != ACK_NEEDED_FAST) *ackneeded = ACK_NEEDED_SLOW; - return 22; + return 18; } static __u32 cor_add_connect_success(struct sk_buff *skb, @@ -993,16 +984,16 @@ static __u32 cor_add_conndata(struct sk_buff *skb, if (spaceleft < get_kp_conn_data_length(1)) return 0; - BUG_ON(spaceleft < 13); + BUG_ON(spaceleft < 11); - if (spaceleft <= 127 + 12) { - dataputlen = spaceleft - 12; + if (spaceleft <= 127 + 10) { + dataputlen = spaceleft - 10; putlen = spaceleft; - } else if (spaceleft == 127 - 12 + 1) { - dataputlen = spaceleft - 12 - 1; + } else if (spaceleft == 127 + 10 + 1) { + dataputlen = spaceleft - 10 - 1; putlen = spaceleft - 1; } else { - dataputlen = spaceleft - 13; + dataputlen = spaceleft - 11; putlen = spaceleft; } @@ -1023,8 +1014,8 @@ static __u32 cor_add_conndata(struct sk_buff *skb, offset++; cor_put_u32(dst + offset, cm->msg.conn_data.conn_id); offset += 4; - cor_put_u48(dst + offset, cm->msg.conn_data.seqno); - offset += 6; + cor_put_u32(dst + offset, cm->msg.conn_data.seqno); + offset += 4; if (dataputlen < 128) { dst[offset] = (__u8) dataputlen; @@ -1467,7 +1458,7 @@ drop: static int _cor_send_messages_send(struct cor_neighbor *nb, int ping, int initsession, struct list_head *cmsgs, int nbstate, - __u32 length, __u64 seqno, unsigned long cmsg_send_start_j, + __u32 length, __u32 seqno, unsigned long cmsg_send_start_j, ktime_t cmsg_send_start_kt, int *sent) { struct sk_buff *skb; @@ -1476,7 +1467,7 @@ static int _cor_send_messages_send(struct cor_neighbor *nb, int ping, int rc; BUG_ON(length > cor_mss_cmsg(nb)); - skb = cor_create_packet(nb, length + 7, GFP_ATOMIC); + skb = cor_create_packet(nb, length + 5, GFP_ATOMIC); if (unlikely(skb == 0)) { printk(KERN_ERR "cor_send_messages(): cannot allocate skb (out of memory?)\n"); @@ -1500,11 +1491,11 @@ static int _cor_send_messages_send(struct cor_neighbor *nb, int ping, INIT_LIST_HEAD(&cr->msgs); - dst = skb_put(skb, 7); + dst = skb_put(skb, 5); BUG_ON(dst == 0); dst[0] = PACKET_TYPE_NONE; - cor_put_u48(dst + 1, seqno); + cor_put_u32(dst + 1, seqno); rc = __cor_send_messages_send(nb, skb, &dst[0], ping, initsession, cr, cmsgs, length, nbstate, cmsg_send_start_j, @@ -2031,7 +2022,7 @@ int cor_send_messages(struct cor_neighbor *nb, unsigned long cmsg_send_start_j, while (1) { struct list_head cmsgs; __u32 length = 0; - __u64 seqno; + __u32 seqno; INIT_LIST_HEAD(&cmsgs); @@ -2261,7 +2252,6 @@ static int _cor_enqueue_control_msg(struct cor_control_msg_out *cm, int src) if (cm->nb->cmsg_pongscnt >= MAX_PONG_CMSGS_PER_NEIGH) { if (src != ADDCMSG_SRC_NEW) { BUG_ON(cm->nb->cmsg_pongscnt == 0); - cm->nb->cmsg_pongscnt--; cor_free_control_msg(cm); return 1; } else { @@ -2352,7 +2342,7 @@ void cor_send_pong(struct cor_neighbor *nb, __u32 cookie, ktime_t ping_rcvtime) cor_enqueue_control_msg(cm, ADDCMSG_SRC_NEW); } -void cor_send_ack(struct cor_neighbor *nb, __u64 seqno, __u8 fast) +void cor_send_ack(struct cor_neighbor *nb, __u32 seqno, __u8 fast) { struct cor_control_msg_out *cm = cor_alloc_control_msg(nb, ACM_PRIORITY_HIGH); @@ -2364,7 +2354,7 @@ void cor_send_ack(struct cor_neighbor *nb, __u64 seqno, __u8 fast) cm->type = MSGTYPE_ACK; cm->msg.ack.seqno = seqno; cm->msg.ack.fast = fast; - cm->length = 7; + cm->length = 5; cor_enqueue_control_msg(cm, ADDCMSG_SRC_NEW); } @@ -2415,10 +2405,10 @@ static int _cor_try_merge_ackconn(struct cor_conn *src_in_l, { if (cor_ooolen(fromcm->msg.ack_conn.flags) != 0 && cor_ooolen(tocm->msg.ack_conn.flags) != 0) { - __u64 tocmseqno = tocm->msg.ack_conn.seqno_ooo; - __u64 tocmlength = tocm->msg.ack_conn.length; - __u64 fromcmseqno = fromcm->msg.ack_conn.seqno_ooo; - __u64 fromcmlength = fromcm->msg.ack_conn.length; + __u32 tocmseqno = tocm->msg.ack_conn.seqno_ooo; + __u32 tocmlength = tocm->msg.ack_conn.length; + __u32 fromcmseqno = fromcm->msg.ack_conn.seqno_ooo; + __u32 fromcmlength = fromcm->msg.ack_conn.length; if (cor_seqno_eq(tocmseqno, fromcmseqno)) { if (fromcmlength > tocmlength) @@ -2426,17 +2416,14 @@ static int _cor_try_merge_ackconn(struct cor_conn *src_in_l, } else if (cor_seqno_after(fromcmseqno, tocmseqno) && cor_seqno_before_eq(fromcmseqno, tocmseqno + tocmlength)) { - __u64 len = cor_seqno_clean(fromcmseqno + fromcmlength - - tocmseqno); + __u32 len = fromcmseqno + fromcmlength - tocmseqno; BUG_ON(len > U32_MAX); tocm->msg.ack_conn.length = (__u32) len; } else if (cor_seqno_before(fromcmseqno, tocmseqno) && cor_seqno_after_eq(fromcmseqno, tocmseqno)) { - __u64 len = cor_seqno_clean(tocmseqno + tocmlength - - fromcmseqno); - BUG_ON(len > U32_MAX); + __u32 len = tocmseqno + tocmlength - fromcmseqno; tocm->msg.ack_conn.seqno_ooo = fromcmseqno; - tocm->msg.ack_conn.length = (__u32) len; + tocm->msg.ack_conn.length = len; } else { return 1; } @@ -2598,7 +2585,7 @@ static int cor_try_update_ackconn_seqno(struct cor_conn *src_in_l) return rc; } -void cor_send_ack_conn_ifneeded(struct cor_conn *src_in_l, __u64 seqno_ooo, +void cor_send_ack_conn_ifneeded(struct cor_conn *src_in_l, __u32 seqno_ooo, __u32 ooo_length) { struct cor_control_msg_out *cm; @@ -2620,16 +2607,19 @@ void cor_send_ack_conn_ifneeded(struct cor_conn *src_in_l, __u64 seqno_ooo, if (src_in_l->src.in.inorder_ack_needed != 0) goto ack_needed; - if (cor_seqno_clean(src_in_l->src.in.window_seqnolimit - - src_in_l->src.in.next_seqno) < WINDOW_ENCODE_MIN) + if (src_in_l->src.in.window_seqnolimit - src_in_l->src.in.next_seqno < + WINDOW_ENCODE_MIN) + return; + + if (src_in_l->src.in.window_seqnolimit - + src_in_l->src.in.next_seqno < WINDOW_ENCODE_MIN) return; - if (cor_seqno_clean(src_in_l->src.in.window_seqnolimit_remote - - src_in_l->src.in.next_seqno) >= WINDOW_ENCODE_MIN && - cor_seqno_clean(src_in_l->src.in.window_seqnolimit - + if (src_in_l->src.in.window_seqnolimit_remote - + src_in_l->src.in.next_seqno >= WINDOW_ENCODE_MIN && + (src_in_l->src.in.window_seqnolimit - src_in_l->src.in.next_seqno) * 7 < - cor_seqno_clean( - src_in_l->src.in.window_seqnolimit_remote - + (src_in_l->src.in.window_seqnolimit_remote - src_in_l->src.in.next_seqno) * 8) return; @@ -2774,7 +2764,7 @@ void cor_send_connect_success(struct cor_control_msg_out *cm, __u32 conn_id, } void cor_send_connect_nb(struct cor_control_msg_out *cm, __u32 conn_id, - __u64 seqno1, __u64 seqno2, struct cor_conn *src_in_ll) + __u32 seqno1, __u32 seqno2, struct cor_conn *src_in_ll) { cm->type = MSGTYPE_CONNECT; cm->msg.connect.conn_id = conn_id; @@ -2783,12 +2773,12 @@ void cor_send_connect_nb(struct cor_control_msg_out *cm, __u32 conn_id, cor_conn_kref_get(src_in_ll, "cor_control_msg_out connect"); BUG_ON(src_in_ll->sourcetype != SOURCE_IN); cm->msg.connect.src_in = src_in_ll; - cm->length = 22; + cm->length = 18; cor_enqueue_control_msg(cm, ADDCMSG_SRC_NEW); } void cor_send_conndata(struct cor_control_msg_out *cm, __u32 conn_id, - __u64 seqno, char *data_orig, char *data, __u32 datalen, + __u32 seqno, char *data_orig, char *data, __u32 datalen, __u8 windowused, __u8 flush, __u8 highlatency, struct cor_conn_retrans *cr) { diff --git a/net/cor/settings.h b/net/cor/settings.h index 6c449eb5a9fa..e099e9ab0c29 100644 --- a/net/cor/settings.h +++ b/net/cor/settings.h @@ -70,6 +70,7 @@ #define CD_RESP_BIN_MAXSIZE 4096 #define BUFFERLIMIT_SRC_UNCONN 64 +#define SND_SPEED_START 16384 #define CONN_ACTIVITY_UPDATEINTERVAL_SEC 60 #define CONN_INACTIVITY_TIMEOUT_SEC 3600 diff --git a/net/cor/sock_managed.c b/net/cor/sock_managed.c index 77a3adac1830..dc1c1ecdff14 100644 --- a/net/cor/sock_managed.c +++ b/net/cor/sock_managed.c @@ -906,8 +906,8 @@ static int _cor_mngdsocket_sendmsg(struct msghdr *msg, __u32 totallen, } else if (cor_sock_sndbufavailable(src_sock, 0) == 0) { rc = -EAGAIN; src_sock->flush = 0; - spin_unlock_bh(&src_sock->rcv_lock); atomic_set(&cs->ready_to_write, 0); + spin_unlock_bh(&src_sock->rcv_lock); goto out; } @@ -1212,6 +1212,8 @@ void cor_mngdsocket_readfromconn_wq(struct work_struct *work) { struct cor_sock *cs = container_of(work, struct cor_sock, readfromconn_work); + int rc; + __u8 data_ready = 0; mutex_lock(&cs->lock); @@ -1222,11 +1224,18 @@ void cor_mngdsocket_readfromconn_wq(struct work_struct *work) goto out; BUG_ON(cs->type != CS_TYPE_CONN_MANAGED); - cor_mngdsocket_readfromconn(cs); + + rc = cor_mngdsocket_readfromconn(cs); + + if (rc == RCV_BUF_STATE_OK && cs->data.conn_managed.rcv_data_len > 0) + data_ready = 1; out: mutex_unlock(&cs->lock); + if (data_ready != 0) + cor_sk_data_ready(cs); + kref_put(&cs->ref, cor_free_sock); } diff --git a/net/cor/util.c b/net/cor/util.c index 0b789b7df5eb..e9c917cc54ec 100644 --- a/net/cor/util.c +++ b/net/cor/util.c @@ -67,11 +67,12 @@ static const __u32 cor_log_64_11_table[] = {0, 473300701, 504084694, 536870912, 571789581}; +#warning todo use this for the window size __u8 __attribute__((const)) cor_enc_log_64_11(__u32 value) { int i; - BUG_ON(cor_log_64_11_table[255] != 571789581); + BUILD_BUG_ON(cor_log_64_11_table[255] != 571789581); for (i = 1; i < 256; i++) { if (cor_log_64_11_table[i] > value) break; @@ -82,7 +83,7 @@ __u8 __attribute__((const)) cor_enc_log_64_11(__u32 value) __u32 __attribute__((const)) cor_dec_log_64_11(__u8 value) { - BUG_ON(cor_log_64_11_table[255] != 571789581); + BUILD_BUG_ON(cor_log_64_11_table[255] != 571789581); return cor_log_64_11_table[value]; } -- 2.11.4.GIT