From 8935606d06aa8efe373caba47cf2fc18a0c85c38 Mon Sep 17 00:00:00 2001 From: Michael Blizek Date: Mon, 2 Aug 2021 07:28:19 +0200 Subject: [PATCH] send_announce: handle queue does not exist, mngdsocket: fix flushing, high latency conns stalling bugfix, kpacket size exceeding packet size bugfix --- net/cor/cor.h | 3 +-- net/cor/forward.c | 1 + net/cor/kpacket_gen.c | 28 ++++++++++++++++++++++++---- net/cor/neighbor.c | 10 +++++++++- net/cor/snd.c | 1 + net/cor/sock_managed.c | 17 ++++++++++++++--- 6 files changed, 50 insertions(+), 10 deletions(-) diff --git a/net/cor/cor.h b/net/cor/cor.h index b1f1fe3e0582..69d2728daed9 100644 --- a/net/cor/cor.h +++ b/net/cor/cor.h @@ -1328,8 +1328,7 @@ extern int create_queue(struct net_device *dev); static inline void schedule_qos_resume(struct qos_queue *q) { - if (atomic_read(&(q->qos_resume_scheduled)) == 0) { - atomic_set(&(q->qos_resume_scheduled), 1); + if (atomic_cmpxchg(&(q->qos_resume_scheduled), 0, 1) == 0) { barrier(); wake_up(&(q->qos_resume_wq)); } diff --git a/net/cor/forward.c b/net/cor/forward.c index d475a0659b5e..706cabed66e3 100644 --- a/net/cor/forward.c +++ b/net/cor/forward.c @@ -908,6 +908,7 @@ void wake_sender(struct conn *cn) spin_unlock_bh(&(cn->reversedir->rcv_lock)); break; case SOURCE_SOCK: + #warning todo call flushtoconn if (cn->source.sock.cs != 0 /*&& cor_sock_sndbufavailable(cn)*/) cor_sk_write_space(cn->source.sock.cs); spin_unlock_bh(&(cn->rcv_lock)); diff --git a/net/cor/kpacket_gen.c b/net/cor/kpacket_gen.c index 3304f5172fe3..93be04cd31b5 100644 --- a/net/cor/kpacket_gen.c +++ b/net/cor/kpacket_gen.c @@ -602,6 +602,8 @@ static __u32 add_ack(struct sk_buff *skb, struct control_retrans *cr, { char *dst; + BUG_ON(cm->length != 7); + if (unlikely(spaceleft < 7)) return 0; @@ -725,6 +727,8 @@ static __u32 add_pong(struct sk_buff *skb, struct control_retrans *cr, __u32 respdelay_netonly; char *dst; + BUG_ON(cm->length != 13); + if (unlikely(spaceleft < 13)) return 0; @@ -752,6 +756,8 @@ static __u32 add_connect(struct sk_buff *skb, struct control_retrans *cr, char *dst; struct conn *src_in = cm->msg.connect.src_in; + BUG_ON(cm->length != 21); + if (unlikely(spaceleft < 21)) return 0; @@ -788,6 +794,8 @@ static __u32 add_connect_success(struct sk_buff *skb, struct control_retrans *cr { char *dst; + BUG_ON(cm->length != 6); + if (unlikely(spaceleft < 6)) return 0; @@ -810,6 +818,8 @@ static __u32 add_reset_conn(struct sk_buff *skb, struct control_retrans *cr, { char *dst; + BUG_ON(cm->length != 5); + if (unlikely(spaceleft < 5)) return 0; @@ -835,6 +845,7 @@ static __u32 add_conndata(struct sk_buff *skb, struct control_retrans *cr, __u32 dataputlen = putlen - KP_CONN_DATA_CMDLEN; BUG_ON(KP_CONN_DATA_CMDLEN != 13); + BUG_ON(cm->length != totallen); BUG_ON(putlen > 1024*1024*1024); @@ -869,6 +880,7 @@ static __u32 add_conndata(struct sk_buff *skb, struct control_retrans *cr, memcpy(dst + 13, cm->msg.conn_data.data, dataputlen); if (cm->msg.conn_data.datalen == dataputlen) { + BUG_ON(cm->length != putlen); list_add_tail(&(cm->lh), &(cr->msgs)); } else { *split_conndata = cm; @@ -884,6 +896,7 @@ static __u32 add_set_max_cmsg_dly(struct sk_buff *skb, struct control_retrans *c char *dst; BUG_ON(KP_SET_MAX_CMSG_DELAY_CMDLEN != 13); + BUG_ON(cm->length != KP_SET_MAX_CMSG_DELAY_CMDLEN); if (unlikely(spaceleft < 13)) return 0; @@ -954,6 +967,7 @@ static __u32 __send_messages(struct neighbor *nb, struct sk_buff *skb, list_add(&(cm->lh), cmsgs); break; } + BUG_ON(rc != cm->length && cm->type != MSGTYPE_CONNDATA); length += rc; } @@ -1344,12 +1358,17 @@ static void _dequeue_messages(struct neighbor *nb_cmsglocked, int nbstate, BUG_ON(len == 0); if (cm->length > spaceleft) { - BUG_ON(*length == 0 && cm->type != MSGTYPE_CONNDATA); - BUG_ON(*length == 0 && cm->type == MSGTYPE_CONNDATA && - spaceleft < KP_CONN_DATA_CMDLEN + 1); + if (cm->type == MSGTYPE_CONNDATA) { + BUG_ON(*length == 0 && spaceleft < + KP_CONN_DATA_CMDLEN + 1); - if ((*length/4)*3 > targetmss) + if (spaceleft < KP_CONN_DATA_CMDLEN + 1 || + (*length/4)*3 > targetmss) + break; + } else { + BUG_ON(*length == 0); break; + } } list_del(&(cm->lh)); @@ -1372,6 +1391,7 @@ static void _dequeue_messages(struct neighbor *nb_cmsglocked, int nbstate, BUG_ON(*length + cm->length < *length); if (cm->length > targetmss - *length) { + BUG_ON(*length >= targetmss); BUG_ON(cm->type != MSGTYPE_CONNDATA); *length = targetmss; } else { diff --git a/net/cor/neighbor.c b/net/cor/neighbor.c index d58f213b176b..16e13c8aa58f 100644 --- a/net/cor/neighbor.c +++ b/net/cor/neighbor.c @@ -1426,6 +1426,12 @@ static int ___send_announce(struct sk_buff *skb, int *sent) int rc; struct qos_queue *q = get_queue(skb->dev); + if (q == 0) { + kfree_skb(skb); + *sent = 1; + return NET_XMIT_SUCCESS; + } + rc = cor_dev_queue_xmit(skb, q, QOS_CALLER_ANNOUNCE); kref_put(&(q->ref), free_qos); if (rc != NET_XMIT_DROP) @@ -1543,8 +1549,10 @@ int _send_announce(struct announce_data *ann, int fromqos, int *sent) spin_lock_bh(&(announce_snd_lock)); - if (unlikely(ann->dev == 0)) + if (unlikely(ann->dev == 0)) { + rc = NET_XMIT_SUCCESS; goto out; + } if (is_device_configurated(ann->dev) == 0) rc = NET_XMIT_SUCCESS; diff --git a/net/cor/snd.c b/net/cor/snd.c index 47e37deb1c9f..04ae818940b7 100644 --- a/net/cor/snd.c +++ b/net/cor/snd.c @@ -822,6 +822,7 @@ static int _qos_resume(struct qos_queue *q, int *sent) } spin_unlock_irqrestore(&(q->qlock), iflags); + if (i == QOS_CALLER_NEIGHBOR) { rc = resume_neighbors(q, sent); } else { diff --git a/net/cor/sock_managed.c b/net/cor/sock_managed.c index 4c069b032621..d9533ac69a2f 100644 --- a/net/cor/sock_managed.c +++ b/net/cor/sock_managed.c @@ -665,6 +665,8 @@ static void _cor_mngdsocket_shutdown(struct cor_sock *cs_m_l, int flags) } cs_m_l->data.conn_managed.shutdownflags |= CS_SHUTDOWN_SHUTDOWN_WR; + + cs_m_l->data.conn_managed.flush = 1; } if (send_eof != 0 || send_rcvend != 0) @@ -1111,6 +1113,7 @@ static void cor_mngdsocket_flushtoconn_oomresume(struct work_struct *work) } } +#warning send data before eof static int cor_mngdsocket_flushtoconn_ctrl(struct cor_sock *cs_m_l, __u8 send_eof, __u8 send_rcvend) { @@ -1132,8 +1135,10 @@ static int cor_mngdsocket_flushtoconn_ctrl(struct cor_sock *cs_m_l, goto out_err; } - if (send_eof != 0) + if (send_eof != 0) { src_sock->source.sock.send_eof_needed = 1; + src_sock->source.sock.flush = 1; + } if (send_rcvend != 0) src_sock->source.sock.send_rcvend_needed = 1; @@ -1168,6 +1173,8 @@ static int cor_mngdsocket_flushtoconn(struct cor_sock *cs_m_l) goto out_err; } + src_sock->source.sock.flush = cs_m_l->data.conn_managed.flush; + if (unlikely(cs_m_l->data.conn_managed.send_in_progress != 0)) { if (src_sock->source.sock.buf_data_filled == 0) { cs_m_l->data.conn_managed.send_in_progress = 0; @@ -1327,6 +1334,7 @@ static int _cor_mngdsocket_sendmsg(struct msghdr *msg, __u32 totallen, if (unlikely(cs->data.conn_managed.send_in_progress != 0 || cs->data.conn_managed.snd_data_len == cs->data.conn_managed.snd_segment_size)) { + cs->data.conn_managed.flush = 0; cor_mngdsocket_flushtoconn(cs); if (cs->data.conn_managed.send_in_progress != 0 || cs->data.conn_managed.snd_data_len == @@ -1588,7 +1596,10 @@ out: trgt_sock_lx->target.sock.rcv_buf_state = rc; if (from_recvmsg == 0) { - if (trgt_sock_lx->is_highlatency != 0 && + /* This does not work because on the next call + * rcv_buf_state == RCV_BUF_STATE_OK and we do not get this far + */ + /*if (trgt_sock_lx->is_highlatency != 0 && trgt_sock_lx->flush == 0 && trgt_sock_lx->data_buf.read_remaining < 4096 && trgt_sock_lx->sourcetype == SOURCE_SOCK && @@ -1600,7 +1611,7 @@ out: trgt_sock_lx->data_buf.read_remaining < 4096 && trgt_sock_lx->sourcetype == SOURCE_IN && srcin_buflimit_reached(trgt_sock_lx) == 0) - return; + return;*/ if (rc == RCV_BUF_STATE_OK && trgt_sock_lx->target.sock.rcv_data_len > 0) { -- 2.11.4.GIT