From 3e937801edea70791e6bb40fa1045c4b5d9e04f0 Mon Sep 17 00:00:00 2001 From: Michael Blizek Date: Sun, 19 Sep 2021 09:41:07 +0200 Subject: [PATCH] create dev.c --- net/cor/Makefile | 2 +- net/cor/common.c | 6 +- net/cor/cor.h | 202 ++++--- net/cor/{snd.c => dev.c} | 1341 ++++++------------------------------------- net/cor/kpacket_gen.c | 1 - net/cor/neighbor.c | 88 +-- net/cor/rcv.c | 151 ----- net/cor/snd.c | 1420 +--------------------------------------------- 8 files changed, 310 insertions(+), 2901 deletions(-) copy net/cor/{snd.c => dev.c} (52%) diff --git a/net/cor/Makefile b/net/cor/Makefile index ce32129f85a9..91b71b8e183d 100644 --- a/net/cor/Makefile +++ b/net/cor/Makefile @@ -1 +1 @@ -obj-y := common.o util.o credits.o rcv.o snd.o sock_rdaemon.o sock_raw.o sock_managed.o sock.o kpacket_parse.o kpacket_gen.o cpacket_parse.o neighbor.o forward.o +obj-y := dev.o common.o util.o credits.o rcv.o snd.o sock_rdaemon.o sock_raw.o sock_managed.o sock.o kpacket_parse.o kpacket_gen.o cpacket_parse.o neighbor.o forward.o diff --git a/net/cor/common.c b/net/cor/common.c index f80d30836961..3abd6e91022f 100644 --- a/net/cor/common.c +++ b/net/cor/common.c @@ -1035,6 +1035,10 @@ static int __init cor_init(void) if (unlikely(rc != 0)) return rc; + rc = cor_dev_init(); + if (unlikely(rc != 0)) + return rc; + rc = cor_rcv_init(); if (unlikely(rc != 0)) return rc; @@ -1059,7 +1063,7 @@ static void __exit cor_exit(void) cor_rd_exit1(); cor_sock_exit1(); cor_sock_managed_exit1(); - cor_snd_exit1(); + cor_dev_exit1(); cor_neighbor_exit1(); cor_rcv_exit2(); diff --git a/net/cor/cor.h b/net/cor/cor.h index a2583d4ad636..8e4d968cabb3 100644 --- a/net/cor/cor.h +++ b/net/cor/cor.h @@ -1080,6 +1080,114 @@ struct cor_sock { }; +/* dev.c */ + +extern void cor_qos_set_lastdrop(struct cor_qos_queue *q); + +#ifdef DEBUG_QOS_SLOWSEND +extern int _cor_dev_queue_xmit(struct sk_buff *skb, int caller); +#else +static inline int _cor_dev_queue_xmit(struct sk_buff *skb, int caller) +{ + return dev_queue_xmit(skb); +} +#endif + +static inline int cor_dev_queue_xmit(struct sk_buff *skb, + struct cor_qos_queue *q, int caller) +{ + int rc = _cor_dev_queue_xmit(skb, caller); + if (unlikely(rc != NET_XMIT_SUCCESS)) + cor_qos_set_lastdrop(q); + return rc; +} + +extern void cor_free_qos(struct kref *ref); + +#ifdef COR_NBCONGWIN +extern void cor_nbcongwin_data_retransmitted(struct cor_neighbor *nb, + __u64 bytes_sent); + +extern void cor_nbcongwin_data_acked(struct cor_neighbor *nb, + __u64 bytes_acked); + +extern void cor_nbcongwin_data_sent(struct cor_neighbor *nb, __u32 bytes_sent); + +extern int cor_nbcongwin_send_allowed(struct cor_neighbor *nb); + +#else + +static inline void cor_nbcongwin_data_retransmitted(struct cor_neighbor *nb, + __u64 bytes_sent) +{ +} + +static inline void cor_nbcongwin_data_acked(struct cor_neighbor *nb, + __u64 bytes_acked) +{ +} + +static inline void cor_nbcongwin_data_sent(struct cor_neighbor *nb, + __u32 bytes_sent) +{ +} + +static inline int cor_nbcongwin_send_allowed(struct cor_neighbor *nb) +{ + return 1; +} +#endif + +extern unsigned long cor_get_conn_idletime(struct cor_conn *trgt_out_l); + +extern struct cor_qos_queue *cor_get_queue(struct net_device *dev); + +extern int cor_destroy_queue(struct net_device *dev); + +extern int cor_create_queue(struct net_device *dev); + +#define QOS_RESUME_DONE 0 +#define QOS_RESUME_CONG 1 +#define QOS_RESUME_NEXTNEIGHBOR 2 /* cor_resume_neighbors() internal */ +#define QOS_RESUME_EXIT 3 + +#define QOS_CALLER_KPACKET 0 +#define QOS_CALLER_CONN_RETRANS 1 +#define QOS_CALLER_ANNOUNCE 2 +#define QOS_CALLER_NEIGHBOR 3 + +static inline void cor_schedule_qos_resume(struct cor_qos_queue *q) +{ + if (atomic_cmpxchg(&(q->qos_resume_scheduled), 0, 1) == 0) { + barrier(); + wake_up(&(q->qos_resume_wq)); + } +} + +extern void cor_qos_enqueue(struct cor_qos_queue *q, + struct cor_resume_block *rb, ktime_t cmsg_send_start, + int caller); + +extern void cor_qos_remove_conn(struct cor_conn *trgt_out_l); + +extern int cor_may_send_announce(struct net_device *dev); + +extern struct sk_buff *cor_create_packet_cmsg(struct cor_neighbor *nb, int size, + gfp_t alloc_flags, __u64 seqno); + +extern struct sk_buff *cor_create_packet_conndata(struct cor_neighbor *nb, + int size, gfp_t alloc_flags, __u32 conn_id, __u64 seqno, + __u8 snd_delayed_lowbuf, __u8 flush); + +extern void cor_qos_enqueue_conn(struct cor_conn *trgt_out_lx); + +extern void cor_dev_down(void); + +extern int cor_dev_up(void); + +extern void __exit cor_dev_exit1(void); + +extern int __init cor_dev_init(void); /* common.c */ extern atomic_t cor_num_conns; @@ -1163,6 +1271,8 @@ extern struct cor_neighbor *cor_find_neigh(char *addr, __u16 addrlen); extern __u32 cor_generate_neigh_list(char *buf, __u32 buflen); +extern void cor_reset_neighbors(struct net_device *dev); + extern int cor_get_neigh_state(struct cor_neighbor *nb); extern void cor_ping_resp(struct cor_neighbor *nb, __u32 cookie, @@ -1190,6 +1300,9 @@ extern int _cor_send_announce(struct cor_announce_data *ann, int fromqos, extern void cor_announce_data_free(struct kref *ref); +extern void cor_announce_send_start(struct net_device *dev, char *mac, + int type); + extern void cor_announce_send_stop(struct net_device *dev, char *mac, int type); extern void cor_neighbor_down(void); @@ -1214,10 +1327,6 @@ extern void cor_conn_rcv(struct cor_neighbor *nb, __u32 conn_id, __u64 seqno, int rcv_delayed_lowbuf, __u8 flush); -extern void cor_rcv_down(void); - -extern void cor_rcv_up(void); - extern int __init cor_rcv_init(void); extern void __exit cor_rcv_exit2(void); @@ -1286,78 +1395,13 @@ extern int cor_encode_len(char *buf, int buflen, __u32 len); extern void cor_proc_cpacket(struct cor_conn *trgt_unconn); /* snd.c */ -extern void cor_qos_set_lastdrop(struct cor_qos_queue *q); - -#ifdef DEBUG_QOS_SLOWSEND -extern int _cor_dev_queue_xmit(struct sk_buff *skb, int caller); -#else -static inline int _cor_dev_queue_xmit(struct sk_buff *skb, int caller) -{ - return dev_queue_xmit(skb); -} -#endif - -static inline int cor_dev_queue_xmit(struct sk_buff *skb, - struct cor_qos_queue *q, int caller) -{ - int rc = _cor_dev_queue_xmit(skb, caller); - if (unlikely(rc != NET_XMIT_SUCCESS)) - cor_qos_set_lastdrop(q); - return rc; -} - -extern void cor_free_qos(struct kref *ref); - -#ifdef COR_NBCONGWIN -extern void cor_nbcongwin_data_acked(struct cor_neighbor *nb, - __u64 bytes_acked); -#else -static inline void cor_nbcongwin_data_acked(struct cor_neighbor *nb, - __u64 bytes_acked) -{ -} -#endif - -extern struct cor_qos_queue *cor_get_queue(struct net_device *dev); - -extern int cor_destroy_queue(struct net_device *dev); - -extern int cor_create_queue(struct net_device *dev); - -#define QOS_RESUME_DONE 0 -#define QOS_RESUME_CONG 1 -#define QOS_RESUME_NEXTNEIGHBOR 2 /* cor_resume_neighbors() internal */ -#define QOS_RESUME_EXIT 3 - -#define QOS_CALLER_KPACKET 0 -#define QOS_CALLER_CONN_RETRANS 1 -#define QOS_CALLER_ANNOUNCE 2 -#define QOS_CALLER_NEIGHBOR 3 - -static inline void cor_schedule_qos_resume(struct cor_qos_queue *q) -{ - if (atomic_cmpxchg(&(q->qos_resume_scheduled), 0, 1) == 0) { - barrier(); - wake_up(&(q->qos_resume_wq)); - } -} - -extern void cor_qos_enqueue(struct cor_qos_queue *q, - struct cor_resume_block *rb, ktime_t cmsg_send_start, - int caller); - -extern void cor_qos_remove_conn(struct cor_conn *trgt_out_l); - -extern int cor_may_send_announce(struct net_device *dev); - -extern struct sk_buff *cor_create_packet_cmsg(struct cor_neighbor *nb, int size, - gfp_t alloc_flags, __u64 seqno); - extern void cor_reschedule_conn_retrans_timer( struct cor_neighbor *nb_retranslocked); extern void cor_cancel_all_conn_retrans(struct cor_conn *trgt_out_l); +extern int cor_send_retrans(struct cor_neighbor *nb, int *sent); + extern void cor_retransmit_conn_timerfunc(struct timer_list *retrans_timer_conn); extern void cor_conn_ack_ooo_rcvd(struct cor_neighbor *nb, __u32 conn_id, @@ -1381,14 +1425,24 @@ extern int cor_srcin_buflimit_reached(struct cor_conn *src_in_lx); #define RC_FLUSH_CONN_OUT_MAXSENT 5 #define RC_FLUSH_CONN_OUT_OOM 6 -extern int cor_flush_out(struct cor_conn *trgt_out_lx, __u32 *sent); +extern int _cor_flush_out(struct cor_conn *trgt_out_lx, __u32 maxsend, + __u32 *sent, int from_qos, int maxsend_forcedelay); + +static inline int cor_flush_out(struct cor_conn *trgt_out_lx, __u32 *sent) +{ + int rc = _cor_flush_out(trgt_out_lx, 1 << 30, sent, 0, 0); + + if (rc == RC_FLUSH_CONN_OUT_CONG || rc == RC_FLUSH_CONN_OUT_MAXSENT || + rc == RC_FLUSH_CONN_OUT_OOM) + cor_qos_enqueue_conn(trgt_out_lx); + + return rc; +} extern void cor_resume_nbstalled_conns(struct work_struct *work); extern int __init cor_snd_init(void); -extern void __exit cor_snd_exit1(void); - extern void __exit cor_snd_exit2(void); /* forward.c */ diff --git a/net/cor/snd.c b/net/cor/dev.c similarity index 52% copy from net/cor/snd.c copy to net/cor/dev.c index 645a7ed77b96..43e6fbf12aa7 100644 --- a/net/cor/snd.c +++ b/net/cor/dev.c @@ -18,14 +18,19 @@ * 02110-1301, USA. */ -#include -#include -#include +#include +#include +#include +#include #include + #include "cor.h" -static struct kmem_cache *cor_connretrans_slab; +static struct notifier_block cor_netdev_notify; +__u8 cor_netdev_notify_registered = 0; + +__u8 cor_pack_registered = 0; static DEFINE_SPINLOCK(cor_queues_lock); static LIST_HEAD(cor_queues); @@ -34,8 +39,6 @@ static LIST_HEAD(cor_queues_waitexit); static void cor_qos_waitexit(struct work_struct *work); DECLARE_WORK(cor_qos_waitexit_work, cor_qos_waitexit); -static int _cor_flush_out(struct cor_conn *trgt_out_lx, __u32 maxsend, - __u32 *sent, int from_qos, int maxsend_forcedelay); static void _cor_qos_enqueue(struct cor_qos_queue *q, struct cor_resume_block *rb, ktime_t cmsg_send_start, @@ -73,18 +76,6 @@ int _cor_dev_queue_xmit(struct sk_buff *skb, int caller) } #endif -static void cor_free_connretrans(struct kref *ref) -{ - struct cor_conn_retrans *cr = container_of(ref, struct cor_conn_retrans, - ref); - struct cor_conn *cn = cr->trgt_out_o; - - BUG_ON(cr->state != CONN_RETRANS_ACKED); - - kmem_cache_free(cor_connretrans_slab, cr); - kref_put(&(cn->ref), cor_free_conn); -} - void cor_free_qos(struct kref *ref) { struct cor_qos_queue *q = container_of(ref, struct cor_qos_queue, ref); @@ -147,7 +138,7 @@ static void print_conn_bufstats(struct cor_neighbor *nb) numconns); } */ -static void cor_nbcongwin_data_retransmitted(struct cor_neighbor *nb, +void cor_nbcongwin_data_retransmitted(struct cor_neighbor *nb, __u64 bytes_sent) { __u64 min_cwin = cor_mss_conndata(nb, 0)*2 << NBCONGWIN_SHIFT; @@ -286,12 +277,12 @@ out_sendnok: spin_unlock_irqrestore(&(nb->nbcongwin.lock), iflags); } -static void cor_nbcongwin_data_sent(struct cor_neighbor *nb, __u32 bytes_sent) +void cor_nbcongwin_data_sent(struct cor_neighbor *nb, __u32 bytes_sent) { atomic64_add(bytes_sent, &(nb->nbcongwin.data_intransit)); } -static int cor_nbcongwin_send_allowed(struct cor_neighbor *nb) +int cor_nbcongwin_send_allowed(struct cor_neighbor *nb) { unsigned long iflags; int ret = 1; @@ -338,23 +329,6 @@ out_ok: return ret; } -#else - -static inline void cor_nbcongwin_data_retransmitted(struct cor_neighbor *nb, - __u64 bytes_sent) -{ -} - -static inline void cor_nbcongwin_data_sent(struct cor_neighbor *nb, - __u32 bytes_sent) -{ -} - -static inline int cor_nbcongwin_send_allowed(struct cor_neighbor *nb) -{ - return 1; -} - #endif static void _cor_resume_conns_accountbusytime(struct cor_conn *trgt_out_l, @@ -393,7 +367,7 @@ static void _cor_resume_conns_accountbusytime(struct cor_conn *trgt_out_l, jiffies_tmp << JIFFIES_LAST_IDLE_SHIFT; } -static unsigned long cor_get_conn_idletime(struct cor_conn *trgt_out_l) +unsigned long cor_get_conn_idletime(struct cor_conn *trgt_out_l) { unsigned long jiffies_shifted = jiffies << JIFFIES_LAST_IDLE_SHIFT; __u32 burst_maxidle_hz_shifted = (BURSTPRIO_MAXIDLETIME_SECS*HZ) << @@ -717,8 +691,6 @@ static int cor_resume_neighbors(struct cor_qos_queue *q, int *sent) return QOS_RESUME_NEXTNEIGHBOR; } -static int cor_send_retrans(struct cor_neighbor *nb, int *sent); - static int __cor_qos_resume(struct cor_qos_queue *q, int caller, int *sent) { unsigned long iflags; @@ -1323,7 +1295,7 @@ void cor_qos_remove_conn(struct cor_conn *trgt_out_lx) kref_put(&(nb->ref), cor_neighbor_free); } -static void cor_qos_enqueue_conn(struct cor_conn *trgt_out_lx) +void cor_qos_enqueue_conn(struct cor_conn *trgt_out_lx) { unsigned long iflags; struct cor_neighbor *nb = trgt_out_lx->target.out.nb; @@ -1437,1208 +1409,233 @@ struct sk_buff *cor_create_packet_conndata(struct cor_neighbor *nb, int size, return ret; } -void cor_reschedule_conn_retrans_timer( - struct cor_neighbor *nb_retransconnlocked) -{ - struct cor_conn_retrans *cr = 0; - if (list_empty(&(nb_retransconnlocked->retrans_conn_list))) - return; - - cr = container_of(nb_retransconnlocked->retrans_conn_list.next, - struct cor_conn_retrans, timeout_list); - - if (time_before_eq(cr->timeout, jiffies)) { - cor_qos_enqueue(nb_retransconnlocked->queue, - &(nb_retransconnlocked->rb_cr), ns_to_ktime(0), - QOS_CALLER_CONN_RETRANS); - } else { - if (mod_timer(&(nb_retransconnlocked->retrans_conn_timer), - cr->timeout) == 0) { - kref_get(&(nb_retransconnlocked->ref)); - } - } -} - -/** - * warning: - * caller must also call kref_get/put, see cor_reschedule_conn_retrans_timer - */ -static void cor_cancel_conn_retrans(struct cor_neighbor *nb_retransconnlocked, - struct cor_conn *trgt_out_lx, struct cor_conn_retrans *cr, - __u64 *bytes_acked) -{ - if (unlikely(cr->state == CONN_RETRANS_ACKED)) - return; - - if (cr->state == CONN_RETRANS_SCHEDULED) { - list_del(&(cr->timeout_list)); - } else if (cr->state == CONN_RETRANS_LOWWINDOW) { - BUG_ON(trgt_out_lx->target.out.retrans_lowwindow == 0); - if (likely(trgt_out_lx->target.out.retrans_lowwindow != 65535)) - trgt_out_lx->target.out.retrans_lowwindow--; - } - - if (cr->state != CONN_RETRANS_INITIAL) - *bytes_acked += cr->length; - - list_del(&(cr->conn_list)); - cr->state = CONN_RETRANS_ACKED; - - kref_put(&(cr->ref), cor_free_connretrans); -} - -/** - * nb->retrans_conn_lock must be held when calling this - * (see cor_schedule_retransmit_conn()) - */ -static void cor_cancel_acked_conn_retrans(struct cor_conn *trgt_out_l, - __u64 *bytes_acked) -{ - __u64 seqno_acked = trgt_out_l->target.out.seqno_acked; - - while (list_empty(&(trgt_out_l->target.out.retrans_list)) == 0) { - struct cor_conn_retrans *cr = container_of( - trgt_out_l->target.out.retrans_list.next, - struct cor_conn_retrans, conn_list); - - if (cor_seqno_after(cr->seqno + cr->length, seqno_acked)) { - if (cor_seqno_before(cr->seqno, seqno_acked)) { - *bytes_acked += cor_seqno_clean(seqno_acked - - cr->seqno); - cr->length -= cor_seqno_clean(seqno_acked - - cr->seqno); - cr->seqno = seqno_acked; - } - break; - } - - cor_cancel_conn_retrans(trgt_out_l->target.out.nb, trgt_out_l, - cr, bytes_acked); - } - - cor_reschedule_conn_retrans_timer(trgt_out_l->target.out.nb); -} - -void cor_cancel_all_conn_retrans(struct cor_conn *trgt_out_lx) +static void cor_rcv_conndata(struct sk_buff *skb, int rcv_delayed_lowbuf, + __u8 flush) { - struct cor_neighbor *nb = trgt_out_lx->target.out.nb; - __u64 bytes_acked = 0; - - spin_lock_bh(&(nb->retrans_conn_lock)); - - while (list_empty(&(trgt_out_lx->target.out.retrans_list)) == 0) { - struct cor_conn_retrans *cr = container_of( - trgt_out_lx->target.out.retrans_list.next, - struct cor_conn_retrans, conn_list); - BUG_ON(cr->trgt_out_o != trgt_out_lx); - - cor_cancel_conn_retrans(nb, trgt_out_lx, cr, &bytes_acked); - } + struct cor_neighbor *nb = cor_get_neigh_by_mac(skb); - cor_reschedule_conn_retrans_timer(nb); - - spin_unlock_bh(&(nb->retrans_conn_lock)); - - if (bytes_acked > 0) - cor_nbcongwin_data_acked(nb, bytes_acked); -} - -static void cor_cancel_all_conn_retrans_nb(struct cor_neighbor *nb) -{ - __u64 bytes_acked = 0; - - while (1) { - struct cor_conn_retrans *cr; - - spin_lock_bh(&(nb->retrans_conn_lock)); - - if (list_empty(&(nb->retrans_conn_list))) { - spin_unlock_bh(&(nb->retrans_conn_lock)); - break; - } - - cr = container_of(nb->retrans_conn_list.next, - struct cor_conn_retrans, timeout_list); - - kref_get(&(cr->ref)); - - spin_unlock_bh(&(nb->retrans_conn_lock)); - - - spin_lock_bh(&(cr->trgt_out_o->rcv_lock)); - spin_lock_bh(&(nb->retrans_conn_lock)); - - if (likely(cr == container_of(nb->retrans_conn_list.next, - struct cor_conn_retrans, timeout_list))) - cor_cancel_conn_retrans(nb, cr->trgt_out_o, cr, - &bytes_acked); - - spin_unlock_bh(&(nb->retrans_conn_lock)); - spin_unlock_bh(&(cr->trgt_out_o->rcv_lock)); - - kref_put(&(cr->ref), cor_free_connretrans); - } - - if (bytes_acked > 0) - cor_nbcongwin_data_acked(nb, bytes_acked); -} - -static struct cor_conn_retrans *cor_prepare_conn_retrans( - struct cor_conn *trgt_out_l, __u64 seqno, __u32 len, - __u8 snd_delayed_lowbuf, struct cor_conn_retrans *cr_splitted, - int retransconnlocked) -{ - struct cor_neighbor *nb = trgt_out_l->target.out.nb; - - struct cor_conn_retrans *cr = kmem_cache_alloc(cor_connretrans_slab, - GFP_ATOMIC); - - if (unlikely(cr == 0)) - return 0; - - BUG_ON(trgt_out_l->isreset != 0); - - memset(cr, 0, sizeof (struct cor_conn_retrans)); - cr->trgt_out_o = trgt_out_l; - kref_get(&(trgt_out_l->ref)); - cr->seqno = seqno; - cr->length = len; - cr->snd_delayed_lowbuf = snd_delayed_lowbuf; - kref_init(&(cr->ref)); - - kref_get(&(cr->ref)); - if (retransconnlocked == 0) - spin_lock_bh(&(nb->retrans_conn_lock)); - - if (cr_splitted != 0) - list_add(&(cr->conn_list), &(cr_splitted->conn_list)); - else - list_add_tail(&(cr->conn_list), - &(cr->trgt_out_o->target.out.retrans_list)); - - if (retransconnlocked == 0) - spin_unlock_bh(&(nb->retrans_conn_lock)); - - return cr; -} - -#define RC_SENDRETRANS_OK 0 -#define RC_SENDRETRANS_OOM 1 -#define RC_SENDRETRANS_QUEUEFULL 2 -#define RC_SENDRETRANS_QUEUEFULLDROPPED 3 - -static int __cor_send_retrans(struct cor_neighbor *nb, - struct cor_conn *trgt_out_l, struct cor_conn_retrans *cr, - __u64 *bytes_sent) -{ - __u8 flush = 0; - - BUG_ON(cr->length == 0); - - if (trgt_out_l->flush != 0 && cor_seqno_eq(cr->seqno + cr->length, - trgt_out_l->target.out.seqno_nextsend) && - trgt_out_l->data_buf.read_remaining == 0) - flush = 1; - - if (cor_send_conndata_as_skb(nb, cr->length)) { - struct sk_buff *skb; - char *dst; - int rc; - - skb = cor_create_packet_conndata(nb, cr->length, GFP_ATOMIC, - trgt_out_l->target.out.conn_id, cr->seqno, - cr->snd_delayed_lowbuf, flush); - if (unlikely(skb == 0)) - return RC_SENDRETRANS_OOM; - - dst = skb_put(skb, cr->length); - - cor_databuf_pullold(trgt_out_l, cr->seqno, dst, cr->length); - - rc = cor_dev_queue_xmit(skb, nb->queue, - QOS_CALLER_CONN_RETRANS); - if (rc == NET_XMIT_DROP) - return RC_SENDRETRANS_QUEUEFULLDROPPED; - cor_schedule_retransmit_conn(cr, 1, 0); - if (rc != NET_XMIT_SUCCESS) - return RC_SENDRETRANS_QUEUEFULL; - - } else { - struct cor_control_msg_out *cm; - char *buf; - - buf = kmalloc(cr->length, GFP_ATOMIC); - if (unlikely(buf == 0)) - return RC_SENDRETRANS_OOM; - - cm = cor_alloc_control_msg(nb, ACM_PRIORITY_LOW); - if (unlikely(cm == 0)) { - kfree(buf); - return RC_SENDRETRANS_OOM; - } - - cor_databuf_pullold(trgt_out_l, cr->seqno, buf, cr->length); - - cor_send_conndata(cm, trgt_out_l->target.out.conn_id, - cr->seqno, buf, buf, cr->length, - cr->snd_delayed_lowbuf, flush, - trgt_out_l->is_highlatency, cr); - } - - *bytes_sent += cr->length; - - return RC_SENDRETRANS_OK; -} - -static int _cor_send_retrans_splitcr_ifneeded( - struct cor_neighbor *nb_retransconnlocked, - struct cor_conn *trgt_out_l, struct cor_conn_retrans *cr) -{ - __u32 targetmss = cor_mss_conndata(nb_retransconnlocked, - trgt_out_l->is_highlatency != 0); - __u64 windowlimit = cor_seqno_clean( - trgt_out_l->target.out.seqno_windowlimit - - cr->seqno); - __u32 maxsize = targetmss; - if (windowlimit < maxsize) - maxsize = windowlimit; - - if (unlikely(cr->length > maxsize)) { - struct cor_conn_retrans *cr2 = cor_prepare_conn_retrans( - trgt_out_l, cr->seqno + maxsize, - cr->length - maxsize, cr->snd_delayed_lowbuf, - cr, 1); - if (unlikely(cr2 == 0)) - return RC_SENDRETRANS_OOM; - - cr2->timeout = cr->timeout; - - list_add(&(cr2->timeout_list), - &(nb_retransconnlocked->retrans_conn_list)); - cr2->state = CONN_RETRANS_SCHEDULED; - - cr->length = maxsize; - } - - return RC_SENDRETRANS_OK; -} - -static int _cor_send_retrans(struct cor_neighbor *nb, - struct cor_conn_retrans *cr, __u64 *bytes_sent) -{ - - struct cor_conn *trgt_out_o = cr->trgt_out_o; - int rc = RC_SENDRETRANS_OK; - - spin_lock_bh(&(trgt_out_o->rcv_lock)); - - BUG_ON(trgt_out_o->targettype != TARGET_OUT); - BUG_ON(trgt_out_o->target.out.nb != nb); - - spin_lock_bh(&(nb->retrans_conn_lock)); - if (unlikely(cr->state == CONN_RETRANS_ACKED)) { - spin_unlock_bh(&(nb->retrans_conn_lock)); - goto out; - } - - BUG_ON(trgt_out_o->isreset != 0); - - BUG_ON(cor_seqno_before(cr->seqno, trgt_out_o->target.out.seqno_acked)); - - if (cor_seqno_after_eq(cr->seqno, - trgt_out_o->target.out.seqno_windowlimit)) { - BUG_ON(cr->state != CONN_RETRANS_SENDING); - cr->state = CONN_RETRANS_LOWWINDOW; - if (likely(trgt_out_o->target.out.retrans_lowwindow != 65535)) - trgt_out_o->target.out.retrans_lowwindow++; - - spin_unlock_bh(&(nb->retrans_conn_lock)); - goto out; - } + __u32 conn_id; + __u64 seqno; - rc = _cor_send_retrans_splitcr_ifneeded(nb, trgt_out_o, cr); + char *connid_p; + char *seqno_p; - spin_unlock_bh(&(nb->retrans_conn_lock)); + /* __u8 rand; */ - kref_get(&(trgt_out_o->ref)); + if (unlikely(nb == 0)) + goto drop; - if (rc == RC_SENDRETRANS_OK) - rc = __cor_send_retrans(nb, trgt_out_o, cr, bytes_sent); + connid_p = cor_pull_skb(skb, 4); + if (unlikely(connid_p == 0)) + goto drop; - if (rc == RC_SENDRETRANS_OOM || rc == RC_SENDRETRANS_QUEUEFULLDROPPED) { - spin_lock_bh(&(nb->retrans_conn_lock)); - if (unlikely(cr->state == CONN_RETRANS_ACKED)) { - } else if (likely(cr->state == CONN_RETRANS_SENDING)) { - if (rc == RC_SENDRETRANS_OOM) - cr->timeout = jiffies + 1; - list_add(&(cr->timeout_list), &(nb->retrans_conn_list)); - cr->state = CONN_RETRANS_SCHEDULED; - } else { - BUG(); - } - spin_unlock_bh(&(nb->retrans_conn_lock)); - } + seqno_p = cor_pull_skb(skb, 6); + if (unlikely(seqno_p == 0)) + goto drop; -out: - spin_unlock_bh(&(trgt_out_o->rcv_lock)); + conn_id = cor_parse_u32(connid_p); + seqno = cor_parse_u48(seqno_p); - kref_put(&(trgt_out_o->ref), cor_free_conn); + /* get_random_bytes(&rand, 1); + if (rand < 64) + goto drop; */ - return (rc == RC_SENDRETRANS_OOM || - rc == RC_SENDRETRANS_QUEUEFULL || - rc == RC_SENDRETRANS_QUEUEFULLDROPPED); -} + if (unlikely(skb->len <= 0)) + goto drop; -static int cor_send_retrans(struct cor_neighbor *nb, int *sent) -{ - int queuefull = 0; - int nbstate = cor_get_neigh_state(nb); - __u64 bytes_sent = 0; + cor_conn_rcv(nb, skb, 0, 0, conn_id, seqno, rcv_delayed_lowbuf, flush); - if (unlikely(nbstate == NEIGHBOR_STATE_STALLED)) { - return QOS_RESUME_DONE; - } else if (unlikely(nbstate == NEIGHBOR_STATE_KILLED)) { - /** - * cor_cancel_all_conn_retrans_nb should not be needed, because - * cor_reset_all_conns calls cor_cancel_all_conn_retrans - */ - cor_cancel_all_conn_retrans_nb(nb); - return QOS_RESUME_DONE; + if (0) { +drop: + kfree_skb(skb); } - while (1) { - struct cor_conn_retrans *cr = 0; - - spin_lock_bh(&(nb->retrans_conn_lock)); - - if (list_empty(&(nb->retrans_conn_list))) { - spin_unlock_bh(&(nb->retrans_conn_lock)); - break; - } - - cr = container_of(nb->retrans_conn_list.next, - struct cor_conn_retrans, timeout_list); - - BUG_ON(cr->state != CONN_RETRANS_SCHEDULED); - - if (time_after(cr->timeout, jiffies)) { - cor_reschedule_conn_retrans_timer(nb); - spin_unlock_bh(&(nb->retrans_conn_lock)); - break; - } - - kref_get(&(cr->ref)); - list_del(&(cr->timeout_list)); - cr->state = CONN_RETRANS_SENDING; - - spin_unlock_bh(&(nb->retrans_conn_lock)); - - queuefull = _cor_send_retrans(nb, cr, &bytes_sent); - kref_put(&(cr->ref), cor_free_connretrans); - if (queuefull) { - break; - } else { - *sent = 1; - } + if (nb != 0) { + kref_put(&(nb->ref), cor_neighbor_free); } - - if (bytes_sent > 0) - cor_nbcongwin_data_retransmitted(nb, bytes_sent); - - return queuefull ? QOS_RESUME_CONG : QOS_RESUME_DONE; -} - -void cor_retransmit_conn_timerfunc(struct timer_list *retrans_conn_timer) -{ - struct cor_neighbor *nb = container_of(retrans_conn_timer, - struct cor_neighbor, retrans_conn_timer); - cor_qos_enqueue(nb->queue, &(nb->rb_cr), ns_to_ktime(0), - QOS_CALLER_CONN_RETRANS); - kref_put(&(nb->ref), cor_neighbor_free); -} - -static void cor_conn_ack_ooo_rcvd_splitcr(struct cor_conn *trgt_out_l, - struct cor_conn_retrans *cr, __u64 seqno_ooo, __u32 length, - __u64 *bytes_acked) -{ - struct cor_conn_retrans *cr2; - __u64 seqno_cr2start; - __u32 oldcrlenght = cr->length; - - if (cr->state != CONN_RETRANS_SCHEDULED && - cr->state != CONN_RETRANS_LOWWINDOW) - return; - - seqno_cr2start = seqno_ooo+length; - cr2 = cor_prepare_conn_retrans(trgt_out_l, seqno_cr2start, - cor_seqno_clean(cr->seqno + cr->length - - seqno_cr2start), - cr->snd_delayed_lowbuf, cr, 1); - - if (unlikely(cr2 == 0)) - return; - - BUG_ON(cr2->length > cr->length); - - cr2->timeout = cr->timeout; - cr2->state = cr->state; - - if (cr->state != CONN_RETRANS_SCHEDULED) - list_add(&(cr2->timeout_list), &(cr->timeout_list)); - - BUG_ON(cor_seqno_clean(seqno_ooo - cr->seqno) > cr->length); - - cr->length -= cor_seqno_clean(seqno_ooo - cr->seqno); - BUG_ON(cr->length + length + cr2->length != oldcrlenght); - - *bytes_acked += length; } -void cor_conn_ack_ooo_rcvd(struct cor_neighbor *nb, __u32 conn_id, - struct cor_conn *trgt_out, __u64 seqno_ooo, __u32 length, - __u64 *bytes_acked) +static void cor_rcv_cmsg(struct sk_buff *skb) { - struct list_head *curr; - - if (unlikely(length == 0)) - return; - - spin_lock_bh(&(trgt_out->rcv_lock)); - - if (unlikely(trgt_out->targettype != TARGET_OUT)) - goto out; - if (unlikely(trgt_out->target.out.nb != nb)) - goto out; - if (unlikely(trgt_out->target.out.conn_id != conn_id)) - goto out; - - kref_get(&(nb->ref)); - spin_lock_bh(&(nb->retrans_conn_lock)); - - curr = trgt_out->target.out.retrans_list.next; - while (curr != &(trgt_out->target.out.retrans_list)) { - struct cor_conn_retrans *cr = container_of(curr, - struct cor_conn_retrans, conn_list); - - int ack_covers_start = cor_seqno_after_eq(cr->seqno, seqno_ooo); - int ack_covers_end = cor_seqno_before_eq(cr->seqno + cr->length, - seqno_ooo + length); - - curr = curr->next; - - if (cor_seqno_before(cr->seqno + cr->length, seqno_ooo)) - continue; - - if (cor_seqno_after(cr->seqno, seqno_ooo + length)) - break; - - if (likely(ack_covers_start && ack_covers_end)) { - cor_cancel_conn_retrans(nb, trgt_out, cr, bytes_acked); - cor_reschedule_conn_retrans_timer(nb); - } else if (ack_covers_start) { - __u32 diff = seqno_ooo + length - cr->seqno - - cr->length; - BUG_ON(diff >= cr->length); - cr->seqno += diff; - cr->length -= diff; - *bytes_acked =+ diff; - } else if (ack_covers_end) { - __u32 diff = seqno_ooo + length - cr->seqno; - BUG_ON(diff >= length); - cr->length -= diff; - *bytes_acked += diff; - } else { - cor_conn_ack_ooo_rcvd_splitcr(trgt_out, cr, seqno_ooo, - length, bytes_acked); - break; - } - } - - if (unlikely(list_empty(&(trgt_out->target.out.retrans_list)) == 0)) { - trgt_out->target.out.seqno_acked = - trgt_out->target.out.seqno_nextsend; - } else { - struct cor_conn_retrans *cr = container_of( - trgt_out->target.out.retrans_list.next, - struct cor_conn_retrans, conn_list); - if (cor_seqno_after(cr->seqno, - trgt_out->target.out.seqno_acked)) - trgt_out->target.out.seqno_acked = cr->seqno; - } - - spin_unlock_bh(&(nb->retrans_conn_lock)); - kref_put(&(nb->ref), cor_neighbor_free); - -out: - spin_unlock_bh(&(trgt_out->rcv_lock)); -} + struct cor_neighbor *nb = cor_get_neigh_by_mac(skb); -static void _cor_conn_ack_rcvd_nosendwin(struct cor_conn *trgt_out_l) -{ - if (trgt_out_l->bufsize.state == BUFSIZE_INCR || - trgt_out_l->bufsize.state == BUFSIZE_INCR_FAST) - trgt_out_l->bufsize.state = BUFSIZE_NOACTION; - - if (trgt_out_l->bufsize.state == BUFSIZE_NOACTION) - trgt_out_l->bufsize.act.noact.bytesleft = max( - trgt_out_l->bufsize.act.noact.bytesleft, - (__u32) BUF_OUT_WIN_NOK_NOINCR); - - trgt_out_l->bufsize.ignore_rcv_lowbuf = max( - trgt_out_l->bufsize.ignore_rcv_lowbuf, - (__u32) BUF_OUT_WIN_NOK_NOINCR); -} - -/** - * nb->retrans_conn_lock must be held when calling this - * (see cor_schedule_retransmit_conn()) - */ -static void cor_reschedule_lowwindow_retrans(struct cor_conn *trgt_out_l) -{ - struct list_head *lh = trgt_out_l->target.out.retrans_list.next; - int cnt = 0; - - while (trgt_out_l->target.out.retrans_lowwindow > 0 && cnt < 100) { - struct cor_conn_retrans *cr; - - if (unlikely(lh == &(trgt_out_l->target.out.retrans_list))) { - BUG_ON(trgt_out_l->target.out.retrans_lowwindow != - 65535); - trgt_out_l->target.out.retrans_lowwindow = 0; - break; - } + __u64 seqno; - cr = container_of(lh, struct cor_conn_retrans, conn_list); + char *seqno_p; - if (cor_seqno_after_eq(cr->seqno, - trgt_out_l->target.out.seqno_windowlimit)) { - break; - } + /* __u8 rand; */ - if (cr->state == CONN_RETRANS_LOWWINDOW) - cor_schedule_retransmit_conn(cr, 1, 1); + if (unlikely(nb == 0)) + goto drop; - lh = lh->next; - cnt++; - } -} + seqno_p = cor_pull_skb(skb, 6); + if (unlikely(seqno_p == 0)) + goto drop; -void cor_conn_ack_rcvd(struct cor_neighbor *nb, __u32 conn_id, - struct cor_conn *trgt_out, __u64 seqno, int setwindow, - __u8 window, __u64 *bytes_acked) -{ - int seqno_advanced = 0; - int window_enlarged = 0; + seqno = cor_parse_u48(seqno_p); - spin_lock_bh(&(trgt_out->rcv_lock)); + /* get_random_bytes(&rand, 1); - if (unlikely(trgt_out->isreset != 0)) - goto out; - if (unlikely(trgt_out->targettype != TARGET_OUT)) - goto out; - if (unlikely(trgt_out->target.out.nb != nb)) - goto out; - if (unlikely(trgt_out->reversedir->source.in.conn_id != conn_id)) - goto out; + if (rand < 64) + goto drop; */ - if (unlikely(cor_seqno_after(seqno, - trgt_out->target.out.seqno_nextsend) || - cor_seqno_before(seqno, - trgt_out->target.out.seqno_acked))) - goto out; + cor_kernel_packet(nb, skb, seqno); - if (setwindow) { - __u64 windowdec = cor_dec_log_64_7(window); - if (likely(cor_seqno_after(seqno, - trgt_out->target.out.seqno_acked)) || - cor_seqno_after(seqno + windowdec, - trgt_out->target.out.seqno_windowlimit)) { - trgt_out->target.out.seqno_windowlimit = seqno + - windowdec; - window_enlarged = 1; - } + if (0) { +drop: + kfree_skb(skb); } - if (cor_seqno_after(seqno, trgt_out->target.out.seqno_acked)) - seqno_advanced = 1; - - if (seqno_advanced == 0 && window_enlarged == 0) - goto out; - - kref_get(&(nb->ref)); - spin_lock_bh(&(nb->retrans_conn_lock)); - - if (seqno_advanced) { - trgt_out->target.out.seqno_acked = seqno; - cor_cancel_acked_conn_retrans(trgt_out, bytes_acked); + if (nb != 0) { + kref_put(&(nb->ref), cor_neighbor_free); } - - if (window_enlarged) - cor_reschedule_lowwindow_retrans(trgt_out); - - spin_unlock_bh(&(nb->retrans_conn_lock)); - kref_put(&(nb->ref), cor_neighbor_free); - - if (seqno_advanced) - cor_databuf_ack(trgt_out, trgt_out->target.out.seqno_acked); - - if (cor_seqno_eq(trgt_out->target.out.seqno_acked, - trgt_out->target.out.seqno_nextsend)) - _cor_conn_ack_rcvd_nosendwin(trgt_out); - -out: - if (seqno_advanced || window_enlarged) - cor_flush_buf(trgt_out); - - spin_unlock_bh(&(trgt_out->rcv_lock)); - - cor_wake_sender(trgt_out); -} - -static void cor_try_combine_conn_retrans_prev( - struct cor_neighbor *nb_retransconnlocked, - struct cor_conn *trgt_out_lx, struct cor_conn_retrans *cr) -{ - struct cor_conn_retrans *cr_prev; - __u64 bytes_dummyacked = 0; - - BUG_ON(cr->state != CONN_RETRANS_SCHEDULED); - - if (cr->conn_list.prev == &(trgt_out_lx->target.out.retrans_list)) - return; - - cr_prev = container_of(cr->conn_list.prev, struct cor_conn_retrans, - conn_list); - - if (cr_prev->state != CONN_RETRANS_SCHEDULED) - return; - if (cr_prev->timeout != cr->timeout) - return; - if (!cor_seqno_eq(cr_prev->seqno + cr_prev->length, cr->seqno)) - return; - - cr->seqno -= cr_prev->length; - cr->length += cr_prev->length; - - cor_cancel_conn_retrans(nb_retransconnlocked, trgt_out_lx, cr_prev, - &bytes_dummyacked); } -static void cor_try_combine_conn_retrans_next( - struct cor_neighbor *nb_retranslocked, - struct cor_conn *trgt_out_lx, struct cor_conn_retrans *cr) +static int cor_rcv(struct sk_buff *skb, struct net_device *dev, + struct packet_type *pt, struct net_device *orig_dev) { - struct cor_conn_retrans *cr_next; - __u64 bytes_dummyacked = 0; + __u8 packet_type; + char *packet_type_p; - BUG_ON(cr->state != CONN_RETRANS_SCHEDULED); + if (skb->pkt_type == PACKET_OTHERHOST || + unlikely(skb->pkt_type == PACKET_LOOPBACK)) + goto drop; - if (cr->conn_list.next == &(trgt_out_lx->target.out.retrans_list)) - return; - - cr_next = container_of(cr->conn_list.next, struct cor_conn_retrans, - conn_list); - - if (cr_next->state != CONN_RETRANS_SCHEDULED) - return; - if (cr_next->timeout != cr->timeout) - return; - if (!cor_seqno_eq(cr->seqno + cr->length, cr_next->seqno)) - return; - - cr->length += cr_next->length; - - cor_cancel_conn_retrans(nb_retranslocked, trgt_out_lx, cr_next, - &bytes_dummyacked); -} - -void cor_schedule_retransmit_conn(struct cor_conn_retrans *cr, int connlocked, - int nbretransconn_locked) -{ - struct cor_conn *trgt_out_o = cr->trgt_out_o; - struct cor_neighbor *nb; - int first; - - if (connlocked == 0) - spin_lock_bh(&(trgt_out_o->rcv_lock)); + packet_type_p = cor_pull_skb(skb, 1); - BUG_ON(trgt_out_o->targettype != TARGET_OUT); - nb = trgt_out_o->target.out.nb; + if (unlikely(packet_type_p == 0)) + goto drop; - cr->timeout = cor_calc_timeout(atomic_read(&(nb->latency_retrans_us)), - atomic_read(&(nb->latency_stddev_retrans_us)), - atomic_read(&(nb->max_remote_ackconn_delay_us))); + packet_type = *packet_type_p; - if (nbretransconn_locked == 0) - spin_lock_bh(&(nb->retrans_conn_lock)); - - kref_get(&(nb->ref)); - - BUG_ON(cr->state == CONN_RETRANS_SCHEDULED); - - if (unlikely(cr->state == CONN_RETRANS_ACKED)) { - goto out; - } else if (unlikely(cr->state == CONN_RETRANS_LOWWINDOW)) { - BUG_ON(trgt_out_o->target.out.retrans_lowwindow == 0); - if (likely(trgt_out_o->target.out.retrans_lowwindow != 65535)) - trgt_out_o->target.out.retrans_lowwindow--; - } - - first = unlikely(list_empty(&(nb->retrans_conn_list))); - list_add_tail(&(cr->timeout_list), &(nb->retrans_conn_list)); - cr->state = CONN_RETRANS_SCHEDULED; - - if (unlikely(first)) { - cor_reschedule_conn_retrans_timer(nb); + if (unlikely(packet_type == PACKET_TYPE_ANNOUNCE)) { + cor_rcv_announce(skb); + return NET_RX_SUCCESS; + } else if (packet_type == PACKET_TYPE_CMSG) { + cor_rcv_cmsg(skb); + return NET_RX_SUCCESS; + } else if (packet_type == PACKET_TYPE_CONNDATA) { + cor_rcv_conndata(skb, 0, 0); + return NET_RX_SUCCESS; + } else if (packet_type == PACKET_TYPE_CONNDATA_LOWBUFDELAYED) { + cor_rcv_conndata(skb, 1, 0); + return NET_RX_SUCCESS; + } else if (packet_type == PACKET_TYPE_CONNDATA_FLUSH) { + cor_rcv_conndata(skb, 0, 1); + return NET_RX_SUCCESS; + } else if (packet_type == PACKET_TYPE_CONNDATA_LOWBUFDELAYED_FLUSH) { + cor_rcv_conndata(skb, 1, 1); + return NET_RX_SUCCESS; } else { - cor_try_combine_conn_retrans_prev(nb, trgt_out_o, cr); - cor_try_combine_conn_retrans_next(nb, trgt_out_o, cr); - } - -out: - if (nbretransconn_locked == 0) - spin_unlock_bh(&(nb->retrans_conn_lock)); - - kref_put(&(nb->ref), cor_neighbor_free); - - if (connlocked == 0) - spin_unlock_bh(&(trgt_out_o->rcv_lock)); -} - -static int _cor_flush_out_skb(struct cor_conn *trgt_out_lx, __u32 len, - __u8 snd_delayed_lowbuf) -{ - struct cor_neighbor *nb = trgt_out_lx->target.out.nb; - - __u64 seqno; - struct cor_conn_retrans *cr; - struct sk_buff *skb; - char *dst; - __u8 flush; - int rc; - - if (trgt_out_lx->flush != 0 && - trgt_out_lx->data_buf.read_remaining == len) - flush = 1; - - seqno = trgt_out_lx->target.out.seqno_nextsend; - skb = cor_create_packet_conndata(trgt_out_lx->target.out.nb, len, - GFP_ATOMIC, trgt_out_lx->target.out.conn_id, seqno, - snd_delayed_lowbuf, flush); - if (unlikely(skb == 0)) - return RC_FLUSH_CONN_OUT_OOM; - - cr = cor_prepare_conn_retrans(trgt_out_lx, seqno, len, - snd_delayed_lowbuf, 0, 0); - if (unlikely(cr == 0)) { kfree_skb(skb); - return RC_FLUSH_CONN_OUT_OOM; - } - - dst = skb_put(skb, len); - - cor_databuf_pull(trgt_out_lx, dst, len); - - rc = cor_dev_queue_xmit(skb, nb->queue, QOS_CALLER_NEIGHBOR); - if (rc == NET_XMIT_DROP) { - cor_databuf_unpull(trgt_out_lx, len); - spin_lock_bh(&(nb->retrans_conn_lock)); - cor_cancel_conn_retrans(nb, trgt_out_lx, cr, 0); - spin_unlock_bh(&(nb->retrans_conn_lock)); - kref_put(&(cr->ref), cor_free_connretrans); - return RC_FLUSH_CONN_OUT_CONG; - } - - trgt_out_lx->target.out.seqno_nextsend += len; - cor_nbcongwin_data_sent(nb, len); - cor_schedule_retransmit_conn(cr, 1, 0); - if (trgt_out_lx->sourcetype == SOURCE_SOCK) - cor_update_src_sock_sndspeed(trgt_out_lx, len); - - kref_put(&(cr->ref), cor_free_connretrans); - - return (rc == NET_XMIT_SUCCESS) ? - RC_FLUSH_CONN_OUT_OK : RC_FLUSH_CONN_OUT_SENT_CONG; -} - -static int _cor_flush_out_conndata(struct cor_conn *trgt_out_lx, __u32 len, - __u8 snd_delayed_lowbuf) -{ - __u64 seqno; - struct cor_control_msg_out *cm; - struct cor_conn_retrans *cr; - char *buf; - __u8 flush = 0; - - if (trgt_out_lx->flush != 0 && - trgt_out_lx->data_buf.read_remaining == len) - flush = 1; - - buf = kmalloc(len, GFP_ATOMIC); - - if (unlikely(buf == 0)) - return RC_FLUSH_CONN_OUT_OOM; - - cm = cor_alloc_control_msg(trgt_out_lx->target.out.nb, ACM_PRIORITY_LOW); - if (unlikely(cm == 0)) { - kfree(buf); - return RC_FLUSH_CONN_OUT_OOM; - } - - seqno = trgt_out_lx->target.out.seqno_nextsend; - - cr = cor_prepare_conn_retrans(trgt_out_lx, seqno, len, - snd_delayed_lowbuf, 0, 0); - if (unlikely(cr == 0)) { - kfree(buf); - cor_free_control_msg(cm); - return RC_FLUSH_CONN_OUT_OOM; + return NET_RX_SUCCESS; } - cor_databuf_pull(trgt_out_lx, buf, len); - trgt_out_lx->target.out.seqno_nextsend += len; - cor_nbcongwin_data_sent(trgt_out_lx->target.out.nb, len); - if (trgt_out_lx->sourcetype == SOURCE_SOCK) - cor_update_src_sock_sndspeed(trgt_out_lx, len); - - cor_send_conndata(cm, trgt_out_lx->target.out.conn_id, seqno, buf, buf, - len, snd_delayed_lowbuf, flush, - trgt_out_lx->is_highlatency, cr); - - return RC_FLUSH_CONN_OUT_OK; +drop: + kfree_skb(skb); + return NET_RX_DROP; } -int cor_srcin_buflimit_reached(struct cor_conn *src_in_lx) +int cor_netdev_notify_func(struct notifier_block *not, unsigned long event, + void *ptr) { - __u64 window_left; - - if (unlikely(cor_seqno_before(src_in_lx->source.in.window_seqnolimit, - src_in_lx->source.in.next_seqno))) - return 1; - - window_left = cor_seqno_clean(src_in_lx->source.in.window_seqnolimit - - src_in_lx->source.in.next_seqno); + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + int rc; - if (window_left < WINDOW_ENCODE_MIN) - return 1; + switch(event){ + case NETDEV_UP: + if (dev->flags & IFF_LOOPBACK) + break; - if (window_left/2 < src_in_lx->data_buf.read_remaining) + BUG_ON(dev == 0); + rc = cor_create_queue(dev); + if (rc == 1) + return 1; + if (cor_is_clientmode() == 0) + cor_announce_send_start(dev, dev->broadcast, + ANNOUNCE_TYPE_BROADCAST); + break; + case NETDEV_DOWN: + printk(KERN_ERR "down 1"); + udelay(100); + BUG_ON(dev == 0); + printk(KERN_ERR "down 2"); + udelay(100); + cor_announce_send_stop(dev, 0, ANNOUNCE_TYPE_BROADCAST); + printk(KERN_ERR "down 3"); + udelay(100); + cor_reset_neighbors(dev); + printk(KERN_ERR "down 4"); + udelay(100); + cor_destroy_queue(dev); + printk(KERN_ERR "down 5"); + udelay(100); + break; + case NETDEV_REBOOT: + case NETDEV_CHANGE: + case NETDEV_REGISTER: + case NETDEV_UNREGISTER: + case NETDEV_CHANGEMTU: + case NETDEV_CHANGEADDR: + case NETDEV_GOING_DOWN: + case NETDEV_CHANGENAME: + case NETDEV_FEAT_CHANGE: + case NETDEV_BONDING_FAILOVER: + break; + default: return 1; - - return 0; -} - -static __u32 cor_maxsend_left_to_len(__u32 maxsend_left) -{ - __u32 i; - if (maxsend_left < 128) - return maxsend_left; - - for (i=128;i<4096;) { - if (i*2 > maxsend_left) - return i; - i = i*2; } - return maxsend_left - maxsend_left%4096; -} - -static int cor_seqno_low_sendlimit(struct cor_conn *trgt_out_lx, - __u64 windowlimit, __u32 sndlen) -{ - __u64 bytes_ackpending; - - BUG_ON(cor_seqno_before(trgt_out_lx->target.out.seqno_nextsend, - trgt_out_lx->target.out.seqno_acked)); - - bytes_ackpending = cor_seqno_clean( - trgt_out_lx->target.out.seqno_nextsend - - trgt_out_lx->target.out.seqno_acked); - - if (windowlimit <= sndlen) - return 1; - - if (unlikely(bytes_ackpending + sndlen < bytes_ackpending)) - return 0; - - if (trgt_out_lx->is_highlatency != 0) - return (windowlimit - sndlen < (bytes_ackpending + sndlen) / 4) - ? 1 : 0; - else - return (windowlimit - sndlen < (bytes_ackpending + sndlen) / 8) - ? 1 : 0; + return 0; } -static void _cor_flush_out_ignore_lowbuf(struct cor_conn *trgt_out_lx) -{ - trgt_out_lx->bufsize.ignore_rcv_lowbuf = max( - trgt_out_lx->bufsize.ignore_rcv_lowbuf, - trgt_out_lx->bufsize.bufsize >> BUFSIZE_SHIFT); -} +static struct packet_type cor_ptype = { + .type = htons(ETH_P_COR), + .dev = 0, + .func = cor_rcv +}; -static __u64 cor_get_windowlimit(struct cor_conn *trgt_out_lx) +void cor_dev_down(void) { - if (unlikely(cor_seqno_before(trgt_out_lx->target.out.seqno_windowlimit, - trgt_out_lx->target.out.seqno_nextsend))) - return 0; - - return cor_seqno_clean(trgt_out_lx->target.out.seqno_windowlimit - - trgt_out_lx->target.out.seqno_nextsend); -} - -static int _cor_flush_out(struct cor_conn *trgt_out_lx, __u32 maxsend, - __u32 *sent, int from_qos, int maxsend_forcedelay) -{ - struct cor_neighbor *nb = trgt_out_lx->target.out.nb; - - __u32 targetmss; - - int nbstate; - - __u8 snd_delayed_lowbuf = trgt_out_lx->target.out.windowlimit_reached; - - __u32 maxsend_left = maxsend; - - trgt_out_lx->target.out.windowlimit_reached = 0; - - BUG_ON(trgt_out_lx->targettype != TARGET_OUT); - - if (unlikely(trgt_out_lx->target.out.established == 0)) - return RC_FLUSH_CONN_OUT_OK; - - if (unlikely(trgt_out_lx->isreset != 0)) - return RC_FLUSH_CONN_OUT_OK; - - BUG_ON(trgt_out_lx->target.out.conn_id == 0); - - if (unlikely(trgt_out_lx->data_buf.read_remaining == 0)) - return RC_FLUSH_CONN_OUT_OK; - - if (from_qos == 0 && cor_qos_fastsend_allowed_conn(trgt_out_lx) == 0) - return RC_FLUSH_CONN_OUT_CONG; - - cor_get_conn_idletime(trgt_out_lx); - - spin_lock_bh(&(nb->stalledconn_lock)); - nbstate = cor_get_neigh_state(nb); - if (unlikely(nbstate == NEIGHBOR_STATE_STALLED)) { - BUG_ON(trgt_out_lx->target.out.nbstalled_lh.prev == 0 && - trgt_out_lx->target.out.nbstalled_lh.next != 0); - BUG_ON(trgt_out_lx->target.out.nbstalled_lh.prev != 0 && - trgt_out_lx->target.out.nbstalled_lh.next == 0); - - if (trgt_out_lx->target.out.nbstalled_lh.prev == 0) { - kref_get(&(trgt_out_lx->ref)); - list_add_tail(&(trgt_out_lx->target.out.nbstalled_lh), - &(nb->stalledconn_list)); - } - } - spin_unlock_bh(&(nb->stalledconn_lock)); - - if (unlikely(nbstate != NEIGHBOR_STATE_ACTIVE)) - return RC_FLUSH_CONN_OUT_NBNOTACTIVE; - - /* printk(KERN_ERR "flush %p %llu %u", trgt_out_l, - cor_get_windowlimit(trgt_out_l), - trgt_out_l->data_buf.read_remaining); */ - - targetmss = cor_mss_conndata(nb, trgt_out_lx->is_highlatency != 0); - - while (trgt_out_lx->data_buf.read_remaining >= targetmss) { - __u64 windowlimit = cor_get_windowlimit(trgt_out_lx); - int rc; - - if (maxsend_left < targetmss) - break; - - if (windowlimit < targetmss) { - trgt_out_lx->target.out.windowlimit_reached = 1; - snd_delayed_lowbuf = 1; - _cor_flush_out_ignore_lowbuf(trgt_out_lx); - break; - } - - if (cor_nbcongwin_send_allowed(nb) == 0) - return RC_FLUSH_CONN_OUT_CONG; - - if (cor_seqno_low_sendlimit(trgt_out_lx, windowlimit, - targetmss)) { - trgt_out_lx->target.out.windowlimit_reached = 1; - snd_delayed_lowbuf = 1; - } - - if (likely(cor_send_conndata_as_skb(nb, targetmss))) - rc = _cor_flush_out_skb(trgt_out_lx, targetmss, - snd_delayed_lowbuf); - else - rc = _cor_flush_out_conndata(trgt_out_lx, targetmss, - snd_delayed_lowbuf); - - if (rc == RC_FLUSH_CONN_OUT_OK || - rc == RC_FLUSH_CONN_OUT_SENT_CONG) { - maxsend_left -= targetmss; - *sent += targetmss; - } - - if (rc == RC_FLUSH_CONN_OUT_SENT_CONG) - return RC_FLUSH_CONN_OUT_CONG; - if (rc != RC_FLUSH_CONN_OUT_OK) - return rc; + if (cor_pack_registered != 0) { + cor_pack_registered = 0; + dev_remove_pack(&cor_ptype); } - if (trgt_out_lx->data_buf.read_remaining > 0) { - __u32 len = trgt_out_lx->data_buf.read_remaining; - __u64 windowlimit = cor_get_windowlimit(trgt_out_lx); - int rc; - - if (maxsend_left < len) { - if (maxsend_left >= 65536 || ( - maxsend_left == maxsend && - maxsend_left >= 128 && - trgt_out_lx->is_highlatency == 0 && - !maxsend_forcedelay)) { - len = cor_maxsend_left_to_len(maxsend_left); - } else { - return RC_FLUSH_CONN_OUT_MAXSENT; - } - } - - if (trgt_out_lx->flush == 0 && - trgt_out_lx->sourcetype == SOURCE_SOCK && - cor_sock_sndbufavailable(trgt_out_lx) != 0) - goto out; - - if (trgt_out_lx->flush == 0 && - trgt_out_lx->sourcetype == SOURCE_IN && - cor_srcin_buflimit_reached(trgt_out_lx) - == 0 && ( - cor_seqno_eq( - trgt_out_lx->target.out.seqno_nextsend, - trgt_out_lx->target.out.seqno_acked) == 0 || - trgt_out_lx->is_highlatency != 0)) - goto out; - - if (trgt_out_lx->flush == 0 && - trgt_out_lx->sourcetype == SOURCE_UNCONNECTED && - cor_cpacket_write_allowed(trgt_out_lx) != 0) - goto out; - - if (windowlimit == 0 || (windowlimit < len && - cor_seqno_eq( - trgt_out_lx->target.out.seqno_nextsend, - trgt_out_lx->target.out.seqno_acked) == 0)) { - trgt_out_lx->target.out.windowlimit_reached = 1; - snd_delayed_lowbuf = 1; - _cor_flush_out_ignore_lowbuf(trgt_out_lx); - goto out; - } - - if (cor_nbcongwin_send_allowed(nb) == 0) - return RC_FLUSH_CONN_OUT_CONG; - - if (cor_seqno_low_sendlimit(trgt_out_lx, windowlimit, len)) { - trgt_out_lx->target.out.windowlimit_reached = 1; - snd_delayed_lowbuf = 1; - } - - if (len > windowlimit) { - len = windowlimit; - _cor_flush_out_ignore_lowbuf(trgt_out_lx); - } - - if (cor_send_conndata_as_skb(nb, len)) - rc = _cor_flush_out_skb(trgt_out_lx, len, - snd_delayed_lowbuf); - else - rc = _cor_flush_out_conndata(trgt_out_lx, len, - snd_delayed_lowbuf); - - - if (rc == RC_FLUSH_CONN_OUT_OK || - rc == RC_FLUSH_CONN_OUT_SENT_CONG) { - maxsend_left -= len; - *sent += len; + if (cor_netdev_notify_registered != 0) { + if (unregister_netdevice_notifier(&cor_netdev_notify) != 0) { + printk(KERN_WARNING "warning: cor_dev_down: " + "unregister_netdevice_notifier failed"); + BUG(); } - - if (rc == RC_FLUSH_CONN_OUT_SENT_CONG) - return RC_FLUSH_CONN_OUT_CONG; - if (rc != RC_FLUSH_CONN_OUT_OK) - return rc; + cor_netdev_notify_registered = 0; } - -out: - return RC_FLUSH_CONN_OUT_OK; } -int cor_flush_out(struct cor_conn *trgt_out_lx, __u32 *sent) +int cor_dev_up(void) { - int rc = _cor_flush_out(trgt_out_lx, 1 << 30, sent, 0, 0); - - if (rc == RC_FLUSH_CONN_OUT_CONG || rc == RC_FLUSH_CONN_OUT_MAXSENT || - rc == RC_FLUSH_CONN_OUT_OOM) - cor_qos_enqueue_conn(trgt_out_lx); - - return rc; -} + BUG_ON(cor_netdev_notify_registered != 0); + if (register_netdevice_notifier(&cor_netdev_notify) != 0) + return 1; + cor_netdev_notify_registered = 1; -void cor_resume_nbstalled_conns(struct work_struct *work) -{ - struct cor_neighbor *nb = container_of(work, struct cor_neighbor, - stalledconn_work); - int rc = RC_FLUSH_CONN_OUT_OK; - - spin_lock_bh(&(nb->stalledconn_lock)); - nb->stalledconn_work_scheduled = 0; - while (rc != RC_FLUSH_CONN_OUT_NBNOTACTIVE && - list_empty(&(nb->stalledconn_list)) == 0) { - struct list_head *lh = nb->stalledconn_list.next; - struct cor_conn *trgt_out = container_of(lh, struct cor_conn, - target.out.nbstalled_lh); - __u32 sent = 0; - BUG_ON(trgt_out->targettype != TARGET_OUT); - list_del(lh); - lh->prev = 0; - lh->next = 0; - - spin_unlock_bh(&(nb->stalledconn_lock)); - - spin_lock_bh(&(trgt_out->rcv_lock)); - if (likely(trgt_out->targettype == TARGET_OUT)) - rc = cor_flush_out(trgt_out, &sent); - spin_unlock_bh(&(trgt_out->rcv_lock)); - - if (sent != 0) - cor_wake_sender(trgt_out); - - kref_put(&(trgt_out->ref), cor_free_conn); - - spin_lock_bh(&(nb->stalledconn_lock)); - } - spin_unlock_bh(&(nb->stalledconn_lock)); + BUG_ON(cor_pack_registered != 0); + dev_add_pack(&cor_ptype); + cor_pack_registered = 1; - kref_put(&(nb->ref), cor_neighbor_free); + return 0; } -int __init cor_snd_init(void) +int __init cor_dev_init(void) { - cor_connretrans_slab = kmem_cache_create("cor_connretrans", - sizeof(struct cor_conn_retrans), 8, 0, 0); - if (unlikely(cor_connretrans_slab == 0)) - return -ENOMEM; + memset(&cor_netdev_notify, 0, sizeof(cor_netdev_notify)); + cor_netdev_notify.notifier_call = cor_netdev_notify_func; return 0; } -void __exit cor_snd_exit1(void) +void __exit cor_dev_exit1(void) { flush_work(&cor_qos_waitexit_work); } - -void __exit cor_snd_exit2(void) -{ - kmem_cache_destroy(cor_connretrans_slab); - cor_connretrans_slab = 0; -} - -MODULE_LICENSE("GPL"); diff --git a/net/cor/kpacket_gen.c b/net/cor/kpacket_gen.c index b90f43f65271..1e046c4e0a9a 100644 --- a/net/cor/kpacket_gen.c +++ b/net/cor/kpacket_gen.c @@ -2405,7 +2405,6 @@ void cor_send_connect_nb(struct cor_control_msg_out *cm, __u32 conn_id, cor_enqueue_control_msg(cm, ADDCMSG_SRC_NEW); } -#warning todo inline this void cor_send_conndata(struct cor_control_msg_out *cm, __u32 conn_id, __u64 seqno, char *data_orig, char *data, __u32 datalen, __u8 snd_delayed_lowbuf, __u8 flush, __u8 highlatency, diff --git a/net/cor/neighbor.c b/net/cor/neighbor.c index 637ddd812417..84e0ccbbb62e 100644 --- a/net/cor/neighbor.c +++ b/net/cor/neighbor.c @@ -87,9 +87,6 @@ static atomic_t cor_num_neighs; static LIST_HEAD(cor_announce_out_list); -static struct notifier_block cor_netdev_notify; -__u8 cor_netdev_notify_registered = 0; - void cor_neighbor_free(struct kref *ref) { @@ -518,7 +515,7 @@ static void cor_reset_neighbor(struct cor_neighbor *nb, int use_workqueue) } } -static void cor_reset_neighbors(struct net_device *dev) +void cor_reset_neighbors(struct net_device *dev) { struct list_head *currlh; @@ -1266,9 +1263,6 @@ static void cor_neighbor_discdata_free(struct cor_neighbor_discdata *nb_dd) cor_num_nb_dd--; } -static void cor_announce_send_start(struct net_device *dev, char *mac, - int type); - static struct cor_neighbor_discdata *cor_findoralloc_neighbor_discdata( struct net_device *dev, char *source_hw, __be32 sessionid) { @@ -1610,7 +1604,7 @@ static void cor_send_announce(struct work_struct *work) _cor_send_announce(ann, 0, &sent); } -static void cor_announce_send_start(struct net_device *dev, char *mac, int type) +void cor_announce_send_start(struct net_device *dev, char *mac, int type) { struct cor_announce_data *ann; @@ -1670,62 +1664,9 @@ void cor_announce_send_stop(struct net_device *dev, char *mac, int type) spin_unlock_bh(&(cor_announce_snd_lock)); } -int cor_netdev_notify_func(struct notifier_block *not, unsigned long event, - void *ptr) -{ - struct net_device *dev = netdev_notifier_info_to_dev(ptr); - int rc; - - switch(event){ - case NETDEV_UP: - if (dev->flags & IFF_LOOPBACK) - break; - - BUG_ON(dev == 0); - rc = cor_create_queue(dev); - if (rc == 1) - return 1; - if (cor_is_clientmode() == 0) - cor_announce_send_start(dev, dev->broadcast, - ANNOUNCE_TYPE_BROADCAST); - break; - case NETDEV_DOWN: - printk(KERN_ERR "down 1"); - udelay(100); - BUG_ON(dev == 0); - printk(KERN_ERR "down 2"); - udelay(100); - cor_announce_send_stop(dev, 0, ANNOUNCE_TYPE_BROADCAST); - printk(KERN_ERR "down 3"); - udelay(100); - cor_reset_neighbors(dev); - printk(KERN_ERR "down 4"); - udelay(100); - cor_destroy_queue(dev); - printk(KERN_ERR "down 5"); - udelay(100); - break; - case NETDEV_REBOOT: - case NETDEV_CHANGE: - case NETDEV_REGISTER: - case NETDEV_UNREGISTER: - case NETDEV_CHANGEMTU: - case NETDEV_CHANGEADDR: - case NETDEV_GOING_DOWN: - case NETDEV_CHANGENAME: - case NETDEV_FEAT_CHANGE: - case NETDEV_BONDING_FAILOVER: - break; - default: - return 1; - } - - return 0; -} - void _cor_neighbor_down(void) { - cor_rcv_down(); + cor_dev_down(); spin_lock_bh(&cor_local_addr_lock); if (cor_local_addr != 0) { @@ -1740,14 +1681,6 @@ void _cor_neighbor_down(void) cor_destroy_queue(0); cor_announce_send_stop(0, 0, ANNOUNCE_TYPE_BROADCAST); - - if (cor_netdev_notify_registered != 0 && unregister_netdevice_notifier( - &cor_netdev_notify) != 0) { - printk(KERN_WARNING "warning: cor_neighbor_down: " - "unregister_netdevice_notifier failed"); - BUG(); - } - cor_netdev_notify_registered = 0; } void cor_neighbor_down(void) @@ -1784,17 +1717,7 @@ int cor_neighbor_up(char *addr2, __u32 addrlen2) spin_unlock_bh(&cor_local_addr_lock); - BUG_ON(cor_netdev_notify_registered != 0); - - if (register_netdevice_notifier(&cor_netdev_notify) != 0) - goto out_err2; - - cor_netdev_notify_registered = 1; - - cor_rcv_up(); - - if (0) { -out_err2: + if (cor_dev_up() != 0) { spin_lock_bh(&cor_local_addr_lock); kfree(cor_local_addr); cor_local_addr = 0; @@ -1831,9 +1754,6 @@ int __init cor_neighbor_init(void) atomic_set(&cor_num_neighs, 0); - memset(&cor_netdev_notify, 0, sizeof(cor_netdev_notify)); - cor_netdev_notify.notifier_call = cor_netdev_notify_func; - return 0; } diff --git a/net/cor/rcv.c b/net/cor/rcv.c index 20c81fed4173..daf1dc1bcf0f 100644 --- a/net/cor/rcv.c +++ b/net/cor/rcv.c @@ -28,8 +28,6 @@ static struct kmem_cache *cor_rcvooo_buf_slab; -__u8 cor_pack_registered = 0; - void cor_reset_ooo_queue(struct cor_conn *src_in_lx) { BUG_ON(src_in_lx->sourcetype != SOURCE_IN); @@ -730,155 +728,6 @@ void cor_conn_rcv(struct cor_neighbor *nb, struct sk_buff *skb, char *data, kref_put(&(src_in->ref), cor_free_conn); } -static void cor_rcv_conndata(struct sk_buff *skb, int rcv_delayed_lowbuf, - __u8 flush) -{ - struct cor_neighbor *nb = cor_get_neigh_by_mac(skb); - - __u32 conn_id; - __u64 seqno; - - char *connid_p; - char *seqno_p; - - /* __u8 rand; */ - - if (unlikely(nb == 0)) - goto drop; - - connid_p = cor_pull_skb(skb, 4); - if (unlikely(connid_p == 0)) - goto drop; - - seqno_p = cor_pull_skb(skb, 6); - if (unlikely(seqno_p == 0)) - goto drop; - - conn_id = cor_parse_u32(connid_p); - seqno = cor_parse_u48(seqno_p); - - /* get_random_bytes(&rand, 1); - if (rand < 64) - goto drop; */ - - if (unlikely(skb->len <= 0)) - goto drop; - - cor_conn_rcv(nb, skb, 0, 0, conn_id, seqno, rcv_delayed_lowbuf, flush); - - if (0) { -drop: - kfree_skb(skb); - } - - if (nb != 0) { - kref_put(&(nb->ref), cor_neighbor_free); - } -} - -static void cor_rcv_cmsg(struct sk_buff *skb) -{ - struct cor_neighbor *nb = cor_get_neigh_by_mac(skb); - - __u64 seqno; - - char *seqno_p; - - /* __u8 rand; */ - - if (unlikely(nb == 0)) - goto drop; - - seqno_p = cor_pull_skb(skb, 6); - if (unlikely(seqno_p == 0)) - goto drop; - - seqno = cor_parse_u48(seqno_p); - - /* get_random_bytes(&rand, 1); - - if (rand < 64) - goto drop; */ - - cor_kernel_packet(nb, skb, seqno); - - if (0) { -drop: - kfree_skb(skb); - } - - if (nb != 0) { - kref_put(&(nb->ref), cor_neighbor_free); - } -} - -static int cor_rcv(struct sk_buff *skb, struct net_device *dev, - struct packet_type *pt, struct net_device *orig_dev) -{ - __u8 packet_type; - char *packet_type_p; - - if (skb->pkt_type == PACKET_OTHERHOST || - unlikely(skb->pkt_type == PACKET_LOOPBACK)) - goto drop; - - packet_type_p = cor_pull_skb(skb, 1); - - if (unlikely(packet_type_p == 0)) - goto drop; - - packet_type = *packet_type_p; - - if (unlikely(packet_type == PACKET_TYPE_ANNOUNCE)) { - cor_rcv_announce(skb); - return NET_RX_SUCCESS; - } else if (packet_type == PACKET_TYPE_CMSG) { - cor_rcv_cmsg(skb); - return NET_RX_SUCCESS; - } else if (packet_type == PACKET_TYPE_CONNDATA) { - cor_rcv_conndata(skb, 0, 0); - return NET_RX_SUCCESS; - } else if (packet_type == PACKET_TYPE_CONNDATA_LOWBUFDELAYED) { - cor_rcv_conndata(skb, 1, 0); - return NET_RX_SUCCESS; - } else if (packet_type == PACKET_TYPE_CONNDATA_FLUSH) { - cor_rcv_conndata(skb, 0, 1); - return NET_RX_SUCCESS; - } else if (packet_type == PACKET_TYPE_CONNDATA_LOWBUFDELAYED_FLUSH) { - cor_rcv_conndata(skb, 1, 1); - return NET_RX_SUCCESS; - } else { - kfree_skb(skb); - return NET_RX_SUCCESS; - } - -drop: - kfree_skb(skb); - return NET_RX_DROP; -} - -static struct packet_type cor_ptype = { - .type = htons(ETH_P_COR), - .dev = 0, - .func = cor_rcv -}; - -void cor_rcv_down(void) -{ - if (cor_pack_registered == 0) - return; - cor_pack_registered = 0; - dev_remove_pack(&cor_ptype); -} - -void cor_rcv_up(void) -{ - if (cor_pack_registered != 0) - return; - cor_pack_registered = 1; - dev_add_pack(&cor_ptype); -} - int __init cor_rcv_init(void) { BUG_ON(sizeof(struct cor_skb_procstate) > 48); diff --git a/net/cor/snd.c b/net/cor/snd.c index 645a7ed77b96..b2380bf97e61 100644 --- a/net/cor/snd.c +++ b/net/cor/snd.c @@ -21,57 +21,11 @@ #include #include #include -#include #include "cor.h" static struct kmem_cache *cor_connretrans_slab; -static DEFINE_SPINLOCK(cor_queues_lock); -static LIST_HEAD(cor_queues); -static LIST_HEAD(cor_queues_waitexit); - -static void cor_qos_waitexit(struct work_struct *work); -DECLARE_WORK(cor_qos_waitexit_work, cor_qos_waitexit); - -static int _cor_flush_out(struct cor_conn *trgt_out_lx, __u32 maxsend, - __u32 *sent, int from_qos, int maxsend_forcedelay); - -static void _cor_qos_enqueue(struct cor_qos_queue *q, - struct cor_resume_block *rb, ktime_t cmsg_send_start, - int caller, int from_nbcongwin_resume); - - -#ifdef DEBUG_QOS_SLOWSEND -static DEFINE_SPINLOCK(slowsend_lock); -static unsigned long cor_last_send; - - -int _cor_dev_queue_xmit(struct sk_buff *skb, int caller) -{ - int allowsend = 0; - unsigned long jiffies_tmp; - spin_lock_bh(&slowsend_lock); - jiffies_tmp = jiffies; - if (cor_last_send != jiffies_tmp) { - if (cor_last_send + 1 == jiffies_tmp) { - cor_last_send = jiffies_tmp; - } else { - cor_last_send = jiffies_tmp - 1; - } - allowsend = 1; - } - spin_unlock_bh(&slowsend_lock); - - /* printk(KERN_ERR "cor_dev_queue_xmit %d, %d", caller, allowsend); */ - if (allowsend) { - return dev_queue_xmit(skb); - } else { - kfree_skb(skb); - return NET_XMIT_DROP; - } -} -#endif static void cor_free_connretrans(struct kref *ref) { @@ -85,1358 +39,6 @@ static void cor_free_connretrans(struct kref *ref) kref_put(&(cn->ref), cor_free_conn); } -void cor_free_qos(struct kref *ref) -{ - struct cor_qos_queue *q = container_of(ref, struct cor_qos_queue, ref); - kfree(q); -} - - -static void cor_qos_queue_set_congstatus(struct cor_qos_queue *q_locked); - -/** - * neighbor congestion window: - * increment by 4096 every round trip if more that 2/3 of cwin is used - * - * in case of packet loss decrease by 1/4: - * - <= 1/8 immediately and - * - <= 1/4 during the next round trip - * - * in case of multiple packet loss events, do not decrement more than once per - * round trip - */ - -#ifdef COR_NBCONGWIN - -/*extern __u64 get_bufspace_used(void); - -static void print_conn_bufstats(struct cor_neighbor *nb) -{ - / * not threadsafe, but this is only for debugging... * / - __u64 totalsize = 0; - __u64 read_remaining = 0; - __u32 numconns = 0; - struct list_head *lh; - unsigned long iflags; - - spin_lock_irqsave(&(nb->conns_waiting.lock), iflags); - - lh = nb->conns_waiting.lh.next; - while (lh != &(nb->conns_waiting.lh)) { - struct cor_conn *cn = container_of(lh, struct cor_conn, - target.out.rb.lh); - totalsize += cn->data_buf.datasize; - read_remaining += cn->data_buf.read_remaining; - lh = lh->next; - } - - lh = nb->conns_waiting.lh_nextpass.next; - while (lh != &(nb->conns_waiting.lh_nextpass)) { - struct cor_conn *cn = container_of(lh, struct cor_conn, - target.out.rb.lh); - totalsize += cn->data_buf.datasize; - read_remaining += cn->data_buf.read_remaining; - lh = lh->next; - } - - numconns = nb->conns_waiting.cnt; - - spin_unlock_irqrestore(&(nb->conns_waiting.lock), iflags); - - printk(KERN_ERR "conn %llu %llu %u", totalsize, read_remaining, - numconns); -} */ - -static void cor_nbcongwin_data_retransmitted(struct cor_neighbor *nb, - __u64 bytes_sent) -{ - __u64 min_cwin = cor_mss_conndata(nb, 0)*2 << NBCONGWIN_SHIFT; - __u64 cwin; - - unsigned long iflags; - - spin_lock_irqsave(&(nb->nbcongwin.lock), iflags); - - cwin = atomic64_read(&(nb->nbcongwin.cwin)); - - /* printk(KERN_ERR "retrans %llu %llu", cwin >> NBCONGWIN_SHIFT, - get_bufspace_used()); - print_conn_bufstats(nb); */ - - BUG_ON(nb->nbcongwin.cwin_shrinkto > cwin); - BUG_ON(cwin >= U64_MAX/1024); - - if (bytes_sent > 1024) - bytes_sent = 1024; - - if (nb->nbcongwin.cwin_shrinkto == cwin) { - if (bytes_sent > 512) { - cwin -= cwin/16; - } else { - cwin -= (bytes_sent * cwin) / (1024 * 8); - } - if (cwin < min_cwin) - cwin = min_cwin; - atomic64_set(&(nb->nbcongwin.cwin), cwin); - } - - nb->nbcongwin.cwin_shrinkto -= - (bytes_sent * nb->nbcongwin.cwin_shrinkto) / (1024 * 8); - - nb->nbcongwin.cwin_shrinkto = max(nb->nbcongwin.cwin_shrinkto, - cwin - cwin/8); - - if (nb->nbcongwin.cwin_shrinkto < min_cwin) - nb->nbcongwin.cwin_shrinkto = min_cwin; - - spin_unlock_irqrestore(&(nb->nbcongwin.lock), iflags); -} - -static __u64 cor_nbcongwin_update_cwin(struct cor_neighbor *nb_cwlocked, - __u64 data_intransit, __u64 bytes_acked) -{ - __u64 CWIN_MUL = (1 << NBCONGWIN_SHIFT); - __u32 INCR_PER_RTT = 4096; - - __u64 cwin = atomic64_read(&(nb_cwlocked->nbcongwin.cwin)); - - __u64 cwin_tmp; - __u64 incrby; - - if (nb_cwlocked->nbcongwin.cwin_shrinkto < cwin) { - __u64 shrinkby = (bytes_acked << (NBCONGWIN_SHIFT-2)); - if (unlikely(shrinkby > cwin)) - cwin = 0; - else - cwin -= shrinkby; - - if (cwin < nb_cwlocked->nbcongwin.cwin_shrinkto) - cwin = nb_cwlocked->nbcongwin.cwin_shrinkto; - } - - - if (cwin * 2 > data_intransit * CWIN_MUL * 3) - goto out; - - cwin_tmp = max(cwin, bytes_acked << NBCONGWIN_SHIFT); - - if (unlikely(bytes_acked >= U64_MAX/INCR_PER_RTT/CWIN_MUL)) - incrby = div64_u64(bytes_acked * INCR_PER_RTT, - cwin_tmp / CWIN_MUL / CWIN_MUL); - else if (unlikely(bytes_acked >= - U64_MAX/INCR_PER_RTT/CWIN_MUL/CWIN_MUL)) - incrby = div64_u64(bytes_acked * INCR_PER_RTT * CWIN_MUL, - cwin_tmp / CWIN_MUL); - else - incrby = div64_u64(bytes_acked * INCR_PER_RTT * CWIN_MUL * - CWIN_MUL, cwin_tmp); - - BUG_ON(incrby > INCR_PER_RTT * CWIN_MUL); - - if (unlikely(cwin + incrby < cwin)) - cwin = U64_MAX; - else - cwin += incrby; - - if (unlikely(nb_cwlocked->nbcongwin.cwin_shrinkto + incrby < - nb_cwlocked->nbcongwin.cwin_shrinkto)) - nb_cwlocked->nbcongwin.cwin_shrinkto = U64_MAX; - else - nb_cwlocked->nbcongwin.cwin_shrinkto += incrby; - -out: - atomic64_set(&(nb_cwlocked->nbcongwin.cwin), cwin); - - return cwin; -} - -void cor_nbcongwin_data_acked(struct cor_neighbor *nb, __u64 bytes_acked) -{ - unsigned long iflags; - struct cor_qos_queue *q = nb->queue; - __u64 data_intransit; - __u64 cwin; - - spin_lock_irqsave(&(nb->nbcongwin.lock), iflags); - - data_intransit = atomic64_read(&(nb->nbcongwin.data_intransit)); - - cwin = cor_nbcongwin_update_cwin(nb, data_intransit, bytes_acked); - - BUG_ON(bytes_acked > data_intransit); - atomic64_sub(bytes_acked, &(nb->nbcongwin.data_intransit)); - data_intransit -= bytes_acked; - - if (data_intransit >= cwin >> NBCONGWIN_SHIFT) - goto out_sendnok; - - spin_lock(&(q->qlock)); - if (nb->rb.in_queue == RB_INQUEUE_NBCONGWIN) { - if (nb->conns_waiting.cnt == 0) { - nb->rb.in_queue = RB_INQUEUE_FALSE; - } else { - _cor_qos_enqueue(q, &(nb->rb), ns_to_ktime(0), - QOS_CALLER_NEIGHBOR, 1); - } - } - spin_unlock(&(q->qlock)); - - -out_sendnok: - spin_unlock_irqrestore(&(nb->nbcongwin.lock), iflags); -} - -static void cor_nbcongwin_data_sent(struct cor_neighbor *nb, __u32 bytes_sent) -{ - atomic64_add(bytes_sent, &(nb->nbcongwin.data_intransit)); -} - -static int cor_nbcongwin_send_allowed(struct cor_neighbor *nb) -{ - unsigned long iflags; - int ret = 1; - struct cor_qos_queue *q = nb->queue; - int krefput_queue = 0; - - if (atomic64_read(&(nb->nbcongwin.data_intransit)) <= - atomic64_read(&(nb->nbcongwin.cwin)) >> NBCONGWIN_SHIFT) - return 1; - - spin_lock_irqsave(&(nb->nbcongwin.lock), iflags); - - if (atomic64_read(&(nb->nbcongwin.data_intransit)) <= - atomic64_read(&(nb->nbcongwin.cwin)) >> NBCONGWIN_SHIFT) - goto out_ok; - - ret = 0; - - spin_lock(&(q->qlock)); - if (nb->rb.in_queue == RB_INQUEUE_FALSE) { - nb->rb.in_queue = RB_INQUEUE_NBCONGWIN; - } else if (nb->rb.in_queue == RB_INQUEUE_TRUE) { - list_del(&(nb->rb.lh)); - kref_put(&(nb->ref), cor_kreffree_bug); - nb->rb.in_queue = RB_INQUEUE_NBCONGWIN; - BUG_ON(q->numconns < nb->conns_waiting.cnt); - q->numconns -= nb->conns_waiting.cnt; - q->priority_sum -= nb->conns_waiting.priority_sum; - krefput_queue = 1; - - cor_qos_queue_set_congstatus(q); - } else if (nb->rb.in_queue == RB_INQUEUE_NBCONGWIN) { - } else { - BUG(); - } - spin_unlock(&(q->qlock)); - - if (krefput_queue != 0) - kref_put(&(q->ref), cor_free_qos); - -out_ok: - spin_unlock_irqrestore(&(nb->nbcongwin.lock), iflags); - - return ret; -} - -#else - -static inline void cor_nbcongwin_data_retransmitted(struct cor_neighbor *nb, - __u64 bytes_sent) -{ -} - -static inline void cor_nbcongwin_data_sent(struct cor_neighbor *nb, - __u32 bytes_sent) -{ -} - -static inline int cor_nbcongwin_send_allowed(struct cor_neighbor *nb) -{ - return 1; -} - -#endif - -static void _cor_resume_conns_accountbusytime(struct cor_conn *trgt_out_l, - __u32 priority, __u32 burstprio, - unsigned long jiffies_nb_lastduration) -{ - - unsigned long jiffies_tmp = jiffies; - __u64 jiffies_last_idle_mul = (1LL << JIFFIES_LAST_IDLE_SHIFT); - __u64 burstfactor; - __u64 jiffies_shifted_busy; - - BUG_ON(burstprio < priority); - - burstfactor = div_u64(1024LL * (__u64) burstprio, priority); - BUG_ON(burstfactor < 1024); - burstfactor = 1024 + (burstfactor - 1024) * 2; - - jiffies_shifted_busy = (jiffies_nb_lastduration * burstfactor * - jiffies_last_idle_mul) / 1024; - - BUG_ON(BURSTPRIO_MAXIDLETIME_SECS > - (1 << 30) / (HZ*jiffies_last_idle_mul)); - - if (unlikely(jiffies_shifted_busy > HZ * BURSTPRIO_MAXIDLETIME_SECS * - jiffies_last_idle_mul)) - trgt_out_l->target.out.jiffies_idle_since = - jiffies_tmp << JIFFIES_LAST_IDLE_SHIFT; - else - trgt_out_l->target.out.jiffies_idle_since += - jiffies_shifted_busy; - - if (unlikely(time_before(jiffies_tmp << JIFFIES_LAST_IDLE_SHIFT, - trgt_out_l->target.out.jiffies_idle_since))) - trgt_out_l->target.out.jiffies_idle_since = - jiffies_tmp << JIFFIES_LAST_IDLE_SHIFT; -} - -static unsigned long cor_get_conn_idletime(struct cor_conn *trgt_out_l) -{ - unsigned long jiffies_shifted = jiffies << JIFFIES_LAST_IDLE_SHIFT; - __u32 burst_maxidle_hz_shifted = (BURSTPRIO_MAXIDLETIME_SECS*HZ) << - JIFFIES_LAST_IDLE_SHIFT; - unsigned long idletime_hz_shifted; - - if (unlikely(time_before(jiffies_shifted, - trgt_out_l->target.out.jiffies_idle_since))) { - idletime_hz_shifted = 0; - trgt_out_l->target.out.jiffies_idle_since = jiffies_shifted - - burst_maxidle_hz_shifted; - } else { - idletime_hz_shifted = jiffies_shifted - - trgt_out_l->target.out.jiffies_idle_since; - - if (unlikely(idletime_hz_shifted > burst_maxidle_hz_shifted)) { - idletime_hz_shifted = burst_maxidle_hz_shifted; - trgt_out_l->target.out.jiffies_idle_since = - jiffies_shifted - - burst_maxidle_hz_shifted; - } - } - - return idletime_hz_shifted; -} - -static __u32 _cor_resume_conns_burstprio(struct cor_conn *trgt_out_l, - __u32 priority) -{ - unsigned long idletime_hz_shifted = cor_get_conn_idletime(trgt_out_l); - __u32 idletime_msecs = jiffies_to_msecs(idletime_hz_shifted >> - JIFFIES_LAST_IDLE_SHIFT); - __u32 burstfactor; - __u64 newprio; - - BUG_ON(idletime_msecs > BURSTPRIO_MAXIDLETIME_SECS*1000); - BUG_ON(BURSTPRIO_MAXIDLETIME_SECS*1000LL > U32_MAX / 1024); - - burstfactor = (1024 * idletime_msecs) / - (BURSTPRIO_MAXIDLETIME_SECS * 1000); - - if (trgt_out_l->is_highlatency != 0) - newprio = (((__u64) priority) * (1024 + 1 * burstfactor)) / - 1024; - else - newprio = (((__u64) priority) * (1024 + 2 * burstfactor)) / - 1024; - - BUG_ON(newprio > U32_MAX); - return (__u32) newprio; -} - -static __u64 _cor_resume_conns_maxsend(struct cor_qos_queue *q, - struct cor_conn *trgt_out_l, __u32 newpriority, - int *maxsend_forcedelay) -{ - unsigned long iflags; - - struct cor_neighbor *nb = trgt_out_l->target.out.nb; - __u32 oldpriority = trgt_out_l->target.out.rb_priority; - __u64 priority_sum; - __u32 numconns; - __u64 bytes_per_round; - - spin_lock_irqsave(&(nb->conns_waiting.lock), iflags); - spin_lock(&(q->qlock)); - - BUG_ON(nb->conns_waiting.priority_sum < oldpriority); - BUG_ON(q->priority_sum < oldpriority); - nb->conns_waiting.priority_sum -= oldpriority; - q->priority_sum -= oldpriority; - - BUG_ON(nb->conns_waiting.priority_sum + newpriority < - nb->conns_waiting.priority_sum); - BUG_ON(q->priority_sum + newpriority < q->priority_sum); - nb->conns_waiting.priority_sum += newpriority; - q->priority_sum += newpriority; - - priority_sum = q->priority_sum; - numconns = q->numconns; - - spin_unlock(&(q->qlock)); - spin_unlock_irqrestore(&(nb->conns_waiting.lock), iflags); - - trgt_out_l->target.out.rb_priority = newpriority; - - if (numconns <= 4) { - *maxsend_forcedelay = 1; - bytes_per_round = 2048LL; - } else { - *maxsend_forcedelay = 0; - bytes_per_round = 1024LL; - } - - if (trgt_out_l->is_highlatency != 0) - bytes_per_round += bytes_per_round/8; - - return div_u64(bytes_per_round * ((__u64) newpriority) * - ((__u64) numconns), priority_sum); -} - -static int _cor_resume_neighbors_nextpass( - struct cor_neighbor *nb_waitingconnslocked) -{ - BUG_ON(list_empty(&(nb_waitingconnslocked->conns_waiting.lh)) == 0); - - if (list_empty(&(nb_waitingconnslocked->conns_waiting.lh_nextpass))) { - BUG_ON(nb_waitingconnslocked->conns_waiting.cnt != 0); - return 1; - } - - BUG_ON(nb_waitingconnslocked->conns_waiting.cnt == 0); - - cor_swap_list_items(&(nb_waitingconnslocked->conns_waiting.lh), - &(nb_waitingconnslocked->conns_waiting.lh_nextpass)); - - return 0; -} - -static int _cor_resume_neighbors(struct cor_qos_queue *q, - struct cor_neighbor *nb, unsigned long jiffies_nb_lastduration, - int *progress) -{ - unsigned long iflags; - - while (1) { - __u32 priority; - __u32 burstprio; - __u32 maxsend; - int maxsend_forcedelay; - - int rc2; - __u32 sent2 = 0; - - struct cor_conn *cn = 0; - spin_lock_irqsave(&(nb->conns_waiting.lock), iflags); - if (list_empty(&(nb->conns_waiting.lh)) != 0) { - int done = _cor_resume_neighbors_nextpass(nb); - spin_unlock_irqrestore(&(nb->conns_waiting.lock), - iflags); - return done ? QOS_RESUME_DONE : QOS_RESUME_NEXTNEIGHBOR; - } - BUG_ON(nb->conns_waiting.cnt == 0); - - cn = container_of(nb->conns_waiting.lh.next, struct cor_conn, - target.out.rb.lh); - BUG_ON(cn->targettype != TARGET_OUT); - BUG_ON(cn->target.out.rb.lh.prev != &(nb->conns_waiting.lh)); - BUG_ON((cn->target.out.rb.lh.next == &(nb->conns_waiting.lh)) && - (nb->conns_waiting.lh.prev != - &(cn->target.out.rb.lh))); - list_del(&(cn->target.out.rb.lh)); - list_add_tail(&(cn->target.out.rb.lh), - &(nb->conns_waiting.lh_nextpass)); - kref_get(&(cn->ref)); - spin_unlock_irqrestore(&(nb->conns_waiting.lock), iflags); - - - priority = cor_refresh_conn_priority(cn, 0); - - spin_lock_bh(&(cn->rcv_lock)); - - if (unlikely(cn->targettype != TARGET_OUT)) { - spin_unlock_bh(&(cn->rcv_lock)); - continue; - } - - burstprio = _cor_resume_conns_burstprio(cn, priority); - - maxsend = _cor_resume_conns_maxsend(q, cn, burstprio, - &maxsend_forcedelay); - if (cn->target.out.maxsend_extra >= maxsend) - maxsend_forcedelay = 0; - maxsend += cn->target.out.maxsend_extra; - if (unlikely(maxsend > U32_MAX)) - maxsend = U32_MAX; - if (unlikely(maxsend >= 65536)) - maxsend_forcedelay = 0; - - rc2 = _cor_flush_out(cn, maxsend, &sent2, 1, - maxsend_forcedelay); - - if (rc2 == RC_FLUSH_CONN_OUT_OK || - rc2 == RC_FLUSH_CONN_OUT_NBNOTACTIVE) { - cn->target.out.maxsend_extra = 0; - cor_qos_remove_conn(cn); - } else if (sent2 == 0 && (rc2 == RC_FLUSH_CONN_OUT_CONG || - rc2 == RC_FLUSH_CONN_OUT_OOM)) { - spin_lock_irqsave(&(nb->conns_waiting.lock), iflags); - if (likely(cn->target.out.rb.in_queue != - RB_INQUEUE_FALSE)) { - list_del(&(cn->target.out.rb.lh)); - list_add(&(cn->target.out.rb.lh), - &(nb->conns_waiting.lh)); - } - spin_unlock_irqrestore(&(nb->conns_waiting.lock), - iflags); - } else if (rc2 == RC_FLUSH_CONN_OUT_CONG || - rc2 == RC_FLUSH_CONN_OUT_OOM) { - cn->target.out.maxsend_extra = 0; - } else if (likely(rc2 == RC_FLUSH_CONN_OUT_MAXSENT)) { - if (unlikely(maxsend - sent2 > 65535)) - cn->target.out.maxsend_extra = 65535; - else - cn->target.out.maxsend_extra = maxsend - sent2; - } - - if (sent2 != 0) - _cor_resume_conns_accountbusytime(cn, priority, - burstprio, jiffies_nb_lastduration); - - spin_unlock_bh(&(cn->rcv_lock)); - - if (sent2 != 0) { - *progress = 1; - cor_wake_sender(cn); - } - - kref_put(&(cn->ref), cor_free_conn); - - if (rc2 == RC_FLUSH_CONN_OUT_CONG || - rc2 == RC_FLUSH_CONN_OUT_OOM) { - return QOS_RESUME_CONG; - } - } -} - -static struct cor_neighbor *cor_resume_neighbors_peeknextnb( - struct cor_qos_queue *q, unsigned long *jiffies_nb_lastduration) -{ - unsigned long iflags; - - struct cor_neighbor *nb; - - spin_lock_irqsave(&(q->qlock), iflags); - - if (list_empty(&(q->neighbors_waiting))) { - if (list_empty(&(q->neighbors_waiting_nextpass))) { - BUG_ON(q->numconns != 0); - spin_unlock_irqrestore(&(q->qlock), iflags); - - return 0; - } else { - unsigned long jiffies_tmp = jiffies; - cor_swap_list_items(&(q->neighbors_waiting), - &(q->neighbors_waiting_nextpass)); - - WARN_ONCE(time_before(jiffies_tmp, - q->jiffies_nb_pass_start), - "cor_resume_neighbors_peeknextnb: jiffies after jiffies_nb_pass_start (this is only a performance issue)"); - - q->jiffies_nb_lastduration = jiffies - - q->jiffies_nb_pass_start; - q->jiffies_nb_pass_start = jiffies_tmp; - } - } - - *jiffies_nb_lastduration = q->jiffies_nb_lastduration; - - - BUG_ON(q->numconns == 0); - BUG_ON(list_empty(&(q->neighbors_waiting))); - - nb = container_of(q->neighbors_waiting.next, struct cor_neighbor, - rb.lh); - - BUG_ON(nb->rb.in_queue != RB_INQUEUE_TRUE); - BUG_ON(nb->rb.lh.prev != &(q->neighbors_waiting)); - BUG_ON((nb->rb.lh.next == &(q->neighbors_waiting)) && - (q->neighbors_waiting.prev != &(nb->rb.lh))); - - kref_get(&(nb->ref)); - - spin_unlock_irqrestore(&(q->qlock), iflags); - - return nb; -} - -static int cor_resume_neighbors(struct cor_qos_queue *q, int *sent) -{ - unsigned long iflags; - int rc; - - unsigned long jiffies_nb_lastduration; - struct cor_neighbor *nb = cor_resume_neighbors_peeknextnb(q, - &jiffies_nb_lastduration); - - if (nb == 0) - return QOS_RESUME_DONE; - - atomic_set(&(nb->cmsg_delay_conndata), 1); - - rc = _cor_resume_neighbors(q, nb, jiffies_nb_lastduration, sent); - if (rc == QOS_RESUME_CONG) { - kref_put(&(nb->ref), cor_neighbor_free); - return QOS_RESUME_CONG; - } - BUG_ON(rc != QOS_RESUME_DONE && rc != QOS_RESUME_NEXTNEIGHBOR); - - atomic_set(&(nb->cmsg_delay_conndata), 0); - spin_lock_bh(&(nb->cmsg_lock)); - cor_schedule_controlmsg_timer(nb); - spin_unlock_bh(&(nb->cmsg_lock)); - - spin_lock_irqsave(&(q->qlock), iflags); - if (likely(nb->rb.in_queue == RB_INQUEUE_TRUE)) { - if (nb->conns_waiting.cnt == 0) { - nb->rb.in_queue = RB_INQUEUE_FALSE; - list_del(&(nb->rb.lh)); - kref_put(&(nb->ref), cor_kreffree_bug); - } else { - list_del(&(nb->rb.lh)); - list_add_tail(&(nb->rb.lh), - &(q->neighbors_waiting_nextpass)); - } - } - spin_unlock_irqrestore(&(q->qlock), iflags); - - kref_put(&(nb->ref), cor_neighbor_free); - - return QOS_RESUME_NEXTNEIGHBOR; -} - -static int cor_send_retrans(struct cor_neighbor *nb, int *sent); - -static int __cor_qos_resume(struct cor_qos_queue *q, int caller, int *sent) -{ - unsigned long iflags; - int rc = QOS_RESUME_DONE; - struct list_head *lh; - - spin_lock_irqsave(&(q->qlock), iflags); - - if (caller == QOS_CALLER_KPACKET) - lh = &(q->kpackets_waiting); - else if (caller == QOS_CALLER_CONN_RETRANS) - lh = &(q->conn_retrans_waiting); - else if (caller == QOS_CALLER_ANNOUNCE) - lh = &(q->announce_waiting); - else - BUG(); - - while (list_empty(lh) == 0) { - struct cor_resume_block *rb = container_of(lh->next, - struct cor_resume_block, lh); - ktime_t cmsg_send_start; - BUG_ON(rb->in_queue != RB_INQUEUE_TRUE); - rb->in_queue = RB_INQUEUE_FALSE; - list_del(&(rb->lh)); - - if (caller == QOS_CALLER_KPACKET) - cmsg_send_start = container_of(rb, struct cor_neighbor, - rb_kp)->cmsg_send_start; - - spin_unlock_irqrestore(&(q->qlock), iflags); - if (caller == QOS_CALLER_KPACKET) { - rc = cor_send_messages(container_of(rb, - struct cor_neighbor, rb_kp), - cmsg_send_start, sent); - } else if (caller == QOS_CALLER_CONN_RETRANS) { - rc = cor_send_retrans(container_of(rb, - struct cor_neighbor, rb_cr), sent); - } else if (caller == QOS_CALLER_ANNOUNCE) { - rc = _cor_send_announce(container_of(rb, - struct cor_announce_data, rb), 1, sent); - } else { - BUG(); - } - spin_lock_irqsave(&(q->qlock), iflags); - - if (rc != QOS_RESUME_DONE && caller == QOS_CALLER_KPACKET) - container_of(rb, struct cor_neighbor, rb_kp - )->cmsg_send_start = cmsg_send_start; - - if (rc != QOS_RESUME_DONE && rb->in_queue == RB_INQUEUE_FALSE) { - rb->in_queue = RB_INQUEUE_TRUE; - list_add(&(rb->lh), lh); - break; - } - - if (caller == QOS_CALLER_KPACKET) { - kref_put(&(container_of(rb, struct cor_neighbor, - rb_kp)->ref), cor_neighbor_free); - } else if (caller == QOS_CALLER_CONN_RETRANS) { - kref_put(&(container_of(rb, struct cor_neighbor, - rb_cr)->ref), cor_neighbor_free); - } else if (caller == QOS_CALLER_ANNOUNCE) { - kref_put(&(container_of(rb, - struct cor_announce_data, rb)->ref), - cor_announce_data_free); - } else { - BUG(); - } - - kref_put(&(q->ref), cor_kreffree_bug); - } - - spin_unlock_irqrestore(&(q->qlock), iflags); - - return rc; -} - -static int _cor_qos_resume(struct cor_qos_queue *q, int *sent) -{ - unsigned long iflags; - int i = QOS_CALLER_KPACKET; - int rc; - - spin_lock_irqsave(&(q->qlock), iflags); - - while (1) { - if (q->dev == 0) { - rc = QOS_RESUME_EXIT; - break; - } - - if (i == QOS_CALLER_KPACKET && - list_empty(&(q->kpackets_waiting))) { - i = QOS_CALLER_CONN_RETRANS; - continue; - } else if (i == QOS_CALLER_CONN_RETRANS && - list_empty(&(q->conn_retrans_waiting))) { - i = QOS_CALLER_ANNOUNCE; - continue; - } else if (i == QOS_CALLER_ANNOUNCE && - list_empty(&(q->announce_waiting))) { - i = QOS_CALLER_NEIGHBOR; - continue; - } else if (i == QOS_CALLER_NEIGHBOR && - list_empty(&(q->neighbors_waiting)) && - list_empty(&(q->neighbors_waiting_nextpass))) { - rc = QOS_RESUME_DONE; - break; - } - - spin_unlock_irqrestore(&(q->qlock), iflags); - - if (i == QOS_CALLER_NEIGHBOR) { - rc = cor_resume_neighbors(q, sent); - } else { - rc = __cor_qos_resume(q, i, sent); - } - - spin_lock_irqsave(&(q->qlock), iflags); - - if (rc == QOS_RESUME_CONG) - break; - - i = QOS_CALLER_KPACKET; - } - - if (rc == QOS_RESUME_DONE) { - BUG_ON(!list_empty(&(q->kpackets_waiting))); - BUG_ON(!list_empty(&(q->conn_retrans_waiting))); - BUG_ON(!list_empty(&(q->announce_waiting))); - BUG_ON(!list_empty(&(q->neighbors_waiting))); - BUG_ON(!list_empty(&(q->neighbors_waiting_nextpass))); - - atomic_set(&(q->qos_resume_scheduled), 0); - } - - cor_qos_queue_set_congstatus(q); - - if (q->dev == 0) - rc = QOS_RESUME_EXIT; - - spin_unlock_irqrestore(&(q->qlock), iflags); - - return rc; -} - -int cor_qos_resume_threadfunc(void *data) -{ - struct cor_qos_queue *q = (struct cor_qos_queue *) data; - - while (1) { - int sent = 0; - int rc; - - rc = _cor_qos_resume(q, &sent); - - if (rc == QOS_RESUME_DONE) { - wait_event(q->qos_resume_wq, - atomic_read(&(q->qos_resume_scheduled)) - != 0); - } else if (rc == QOS_RESUME_CONG) { - unsigned long jiffies_tmp = jiffies; - unsigned long delay_ms = 0; - - if (sent) - q->jiffies_lastprogress = jiffies_tmp; - delay_ms = (jiffies_to_msecs(jiffies_tmp - - q->jiffies_lastprogress) + 8) / 4; - if (delay_ms < 2) { - delay_ms = 2; - } else if (delay_ms > 20) { - delay_ms = 20; - } - - msleep(delay_ms); - } else if (rc == QOS_RESUME_EXIT) { - return 0; - } else { - BUG(); - } - } -} - -static inline int cor_qos_queue_is_destroyed(struct cor_qos_queue *q_locked) -{ - return q_locked->dev == 0; -} - -struct cor_qos_queue *cor_get_queue(struct net_device *dev) -{ - struct cor_qos_queue *ret = 0; - struct list_head *curr; - - spin_lock_bh(&cor_queues_lock); - curr = cor_queues.next; - while (curr != (&cor_queues)) { - struct cor_qos_queue *q = container_of(curr, - struct cor_qos_queue, queue_list); - if (q->dev == dev) { - ret = q; - kref_get(&(ret->ref)); - break; - } - curr = curr->next; - } - spin_unlock_bh(&cor_queues_lock); - return ret; -} - -static void cor_qos_waitexit(struct work_struct *work) -{ - spin_lock_bh(&cor_queues_lock); - while (!list_empty(&cor_queues_waitexit)) { - struct cor_qos_queue *q = container_of(cor_queues_waitexit.next, - struct cor_qos_queue, queue_list); - list_del(&(q->queue_list)); - - spin_unlock_bh(&cor_queues_lock); - - kthread_stop(q->qos_resume_thread); - put_task_struct(q->qos_resume_thread); - kref_put(&(q->ref), cor_free_qos); - - spin_lock_bh(&cor_queues_lock); - } - spin_unlock_bh(&cor_queues_lock); -} - -static void _cor_destroy_queue_kpackets(struct cor_qos_queue *q) -{ - while (list_empty(&(q->kpackets_waiting)) == 0) { - struct list_head *curr = q->kpackets_waiting.next; - struct cor_resume_block *rb = container_of(curr, - struct cor_resume_block, lh); - BUG_ON(rb->in_queue != RB_INQUEUE_TRUE); - rb->in_queue = RB_INQUEUE_FALSE; - list_del(curr); - - kref_put(&(container_of(rb, struct cor_neighbor, rb_kp)->ref), - cor_neighbor_free); - kref_put(&(q->ref), cor_kreffree_bug); - } -} - -static void _cor_destroy_queue_conn_retrans(struct cor_qos_queue *q) -{ - while (list_empty(&(q->conn_retrans_waiting)) == 0) { - struct list_head *curr = q->conn_retrans_waiting.next; - struct cor_resume_block *rb = container_of(curr, - struct cor_resume_block, lh); - BUG_ON(rb->in_queue != RB_INQUEUE_TRUE); - rb->in_queue = RB_INQUEUE_FALSE; - list_del(curr); - - kref_put(&(container_of(rb, struct cor_neighbor, rb_cr)->ref), - cor_neighbor_free); - kref_put(&(q->ref), cor_kreffree_bug); - } -} - -static void _cor_destroy_queue_announce(struct cor_qos_queue *q) -{ - while (list_empty(&(q->announce_waiting)) == 0) { - struct list_head *curr = q->announce_waiting.next; - struct cor_resume_block *rb = container_of(curr, - struct cor_resume_block, lh); - BUG_ON(rb->in_queue != RB_INQUEUE_TRUE); - rb->in_queue = RB_INQUEUE_FALSE; - list_del(curr); - - kref_put(&(container_of(rb, struct cor_announce_data, rb)->ref), - cor_announce_data_free); - kref_put(&(q->ref), cor_kreffree_bug); - } -} - -static void _cor_destroy_queue_neighbor(struct cor_qos_queue *q, - struct list_head *lh) -{ - while (list_empty(lh) == 0) { - struct list_head *curr = lh->next; - struct cor_resume_block *rb = container_of(curr, - struct cor_resume_block, lh); - BUG_ON(rb->in_queue != RB_INQUEUE_TRUE); - rb->in_queue = RB_INQUEUE_FALSE; - list_del(curr); - - kref_put(&(container_of(rb, struct cor_neighbor, rb)->ref), - cor_neighbor_free); - kref_put(&(q->ref), cor_kreffree_bug); - } -} - -static struct cor_qos_queue *cor_unlink_queue(struct net_device *dev) -{ - struct cor_qos_queue *ret = 0; - struct list_head *curr; - - spin_lock_bh(&cor_queues_lock); - curr = cor_queues.next; - while (curr != (&cor_queues)) { - struct cor_qos_queue *q = container_of(curr, - struct cor_qos_queue, queue_list); - if (dev == 0 || q->dev == dev) { - ret = q; - kref_get(&(ret->ref)); - - list_del(&(q->queue_list)); - kref_put(&(q->ref), cor_kreffree_bug); - break; - } - curr = curr->next; - } - spin_unlock_bh(&cor_queues_lock); - return ret; -} - -int cor_destroy_queue(struct net_device *dev) -{ - int rc = 1; - unsigned long iflags; - - while (1) { - struct cor_qos_queue *q = cor_unlink_queue(dev); - - if (q == 0) - break; - - rc = 0; - - spin_lock_irqsave(&(q->qlock), iflags); - if (q->dev != 0) { - dev_put(q->dev); - q->dev = 0; - } - _cor_destroy_queue_kpackets(q); - _cor_destroy_queue_conn_retrans(q); - _cor_destroy_queue_announce(q); - _cor_destroy_queue_neighbor(q, &(q->neighbors_waiting)); - _cor_destroy_queue_neighbor(q, &(q->neighbors_waiting_nextpass)); - - spin_unlock_irqrestore(&(q->qlock), iflags); - - cor_schedule_qos_resume(q); - - spin_lock_bh(&cor_queues_lock); - list_add(&(q->queue_list), &cor_queues_waitexit); - spin_unlock_bh(&cor_queues_lock); - - schedule_work(&cor_qos_waitexit_work); - } - - return rc; -} - -int cor_create_queue(struct net_device *dev) -{ - struct cor_qos_queue *q = kmalloc(sizeof(struct cor_qos_queue), - GFP_KERNEL); - - if (q == 0) { - printk(KERN_ERR "cor: unable to allocate memory for device " - "queue, not enabling device"); - return 1; - } - - memset(q, 0, sizeof(struct cor_qos_queue)); - - spin_lock_init(&(q->qlock)); - - kref_init(&(q->ref)); - - q->dev = dev; - dev_hold(dev); - - atomic_set(&(q->qos_resume_scheduled), 0); - - init_waitqueue_head(&(q->qos_resume_wq)); - - INIT_LIST_HEAD(&(q->kpackets_waiting)); - INIT_LIST_HEAD(&(q->conn_retrans_waiting)); - INIT_LIST_HEAD(&(q->announce_waiting)); - INIT_LIST_HEAD(&(q->neighbors_waiting)); - INIT_LIST_HEAD(&(q->neighbors_waiting_nextpass)); - - atomic_set(&(q->cong_status), 0); - - q->qos_resume_thread = kthread_create(cor_qos_resume_threadfunc, - q, "cor_qos_resume"); - if (q->qos_resume_thread == 0) { - printk(KERN_ERR "cor: unable to start qos_resume thread"); - - if (q->dev != 0) { - dev_put(q->dev); - q->dev = 0; - } - - kref_put(&(q->ref), cor_free_qos); - - return 1; - } - get_task_struct(q->qos_resume_thread); - wake_up_process(q->qos_resume_thread); - - spin_lock_bh(&cor_queues_lock); - list_add(&(q->queue_list), &cor_queues); - spin_unlock_bh(&cor_queues_lock); - - return 0; -} - -static void cor_qos_queue_set_congstatus(struct cor_qos_queue *q_locked) -{ - __u32 newstatus; - - if (time_before(q_locked->jiffies_lastdrop, jiffies - HZ/50)) { - newstatus = CONGSTATUS_NONE; - } else if (list_empty(&(q_locked->kpackets_waiting)) == 0) { - newstatus = CONGSTATUS_KPACKETS; - } else if (list_empty(&(q_locked->conn_retrans_waiting)) == 0) { - newstatus = CONGSTATUS_RETRANS; - } else if (list_empty(&(q_locked->announce_waiting)) == 0) { - newstatus = CONGSTATUS_ANNOUNCE; - } else if (list_empty(&(q_locked->neighbors_waiting)) == 0 || - list_empty(&(q_locked->neighbors_waiting_nextpass)) == - 0) { - newstatus = CONGSTATUS_CONNDATA; - } else { - newstatus = CONGSTATUS_NONE; - } - - atomic_set(&(q_locked->cong_status), newstatus); -} - -void cor_qos_set_lastdrop(struct cor_qos_queue *q) -{ - unsigned long iflags; - - spin_lock_irqsave(&(q->qlock), iflags); - q->jiffies_lastdrop = jiffies; - cor_qos_queue_set_congstatus(q); - spin_unlock_irqrestore(&(q->qlock), iflags); -} - -/** - * if caller == QOS_CALLER_NEIGHBOR, nb->conns_waiting.lock must be held by - * caller - */ -static void _cor_qos_enqueue(struct cor_qos_queue *q, - struct cor_resume_block *rb, ktime_t cmsg_send_start, - int caller, int from_nbcongwin_resume) -{ - int queues_empty; - - if (rb->in_queue == RB_INQUEUE_TRUE) { - BUG_ON(caller == QOS_CALLER_NEIGHBOR); - - if (caller == QOS_CALLER_KPACKET) { - struct cor_neighbor *nb = container_of(rb, - struct cor_neighbor, rb_kp); - if (ktime_before(cmsg_send_start, nb->cmsg_send_start)) - nb->cmsg_send_start = cmsg_send_start; - } - return; - } else if (rb->in_queue == RB_INQUEUE_NBCONGWIN && - from_nbcongwin_resume == 0) { - return; - } - - if (unlikely(cor_qos_queue_is_destroyed(q))) - return; - - queues_empty = list_empty(&(q->kpackets_waiting)) && - list_empty(&(q->conn_retrans_waiting)) && - list_empty(&(q->announce_waiting)) && - list_empty(&(q->neighbors_waiting)) && - list_empty(&(q->neighbors_waiting_nextpass)); - - BUG_ON(!queues_empty && atomic_read(&(q->qos_resume_scheduled)) == 0); - - rb->in_queue = RB_INQUEUE_TRUE; - - if (caller == QOS_CALLER_KPACKET) { - struct cor_neighbor *nb = container_of(rb, struct cor_neighbor, - rb_kp); - nb->cmsg_send_start = cmsg_send_start; - list_add_tail(&(rb->lh), &(q->kpackets_waiting)); - kref_get(&(nb->ref)); - } else if (caller == QOS_CALLER_CONN_RETRANS) { - list_add_tail(&(rb->lh) , &(q->conn_retrans_waiting)); - kref_get(&(container_of(rb, struct cor_neighbor, rb_cr)->ref)); - } else if (caller == QOS_CALLER_ANNOUNCE) { - list_add_tail(&(rb->lh), &(q->announce_waiting)); - kref_get(&(container_of(rb, struct cor_announce_data, rb)->ref)); - } else if (caller == QOS_CALLER_NEIGHBOR) { - struct cor_neighbor *nb = container_of(rb, struct cor_neighbor, - rb); - list_add_tail(&(rb->lh), &(q->neighbors_waiting_nextpass)); - kref_get(&(nb->ref)); - BUG_ON(nb->conns_waiting.cnt == 0); - q->numconns += nb->conns_waiting.cnt; - q->priority_sum += nb->conns_waiting.priority_sum; - q->jiffies_nb_lastduration = 0; - q->jiffies_nb_pass_start = jiffies; - } else { - BUG(); - } - kref_get(&(q->ref)); - - cor_schedule_qos_resume(q); - - cor_qos_queue_set_congstatus(q); -} - -void cor_qos_enqueue(struct cor_qos_queue *q, struct cor_resume_block *rb, - ktime_t cmsg_send_start, int caller) -{ - unsigned long iflags; - - spin_lock_irqsave(&(q->qlock), iflags); - _cor_qos_enqueue(q, rb, cmsg_send_start, caller, 0); - spin_unlock_irqrestore(&(q->qlock), iflags); -} - -void cor_qos_remove_conn(struct cor_conn *trgt_out_lx) -{ - unsigned long iflags; - struct cor_neighbor *nb = trgt_out_lx->target.out.nb; - struct cor_qos_queue *q = nb->queue; - int sched_cmsg = 0; - int krefput_nb = 0; - - BUG_ON(trgt_out_lx->targettype != TARGET_OUT); - BUG_ON(q == 0); - - spin_lock_irqsave(&(nb->conns_waiting.lock), iflags); - if (trgt_out_lx->target.out.rb.in_queue == RB_INQUEUE_FALSE) { - spin_unlock_irqrestore(&(nb->conns_waiting.lock), iflags); - return; - } - spin_lock(&(q->qlock)); - - trgt_out_lx->target.out.rb.in_queue = RB_INQUEUE_FALSE; - list_del(&(trgt_out_lx->target.out.rb.lh)); - BUG_ON(nb->conns_waiting.cnt == 0); - nb->conns_waiting.cnt--; - if (nb->rb.in_queue == RB_INQUEUE_TRUE) { - BUG_ON(q->numconns == 0); - q->numconns--; - } - - BUG_ON(nb->conns_waiting.priority_sum < - trgt_out_lx->target.out.rb_priority); - BUG_ON(q->priority_sum < trgt_out_lx->target.out.rb_priority); - nb->conns_waiting.priority_sum -= - trgt_out_lx->target.out.rb_priority; - q->priority_sum -= trgt_out_lx->target.out.rb_priority; - trgt_out_lx->target.out.rb_priority = 0; - - if (list_empty(&(nb->conns_waiting.lh)) && - list_empty(&(nb->conns_waiting.lh_nextpass))) { - BUG_ON(nb->conns_waiting.priority_sum != 0); - BUG_ON(nb->conns_waiting.cnt != 0); - } else { - BUG_ON(nb->conns_waiting.cnt == 0); - } - - if (list_empty(&(nb->conns_waiting.lh)) && - list_empty(&(nb->conns_waiting.lh_nextpass)) && - nb->rb.in_queue == RB_INQUEUE_TRUE) { - nb->rb.in_queue = RB_INQUEUE_FALSE; - list_del(&(nb->rb.lh)); - if (atomic_read(&(nb->cmsg_delay_conndata)) != 0) { - atomic_set(&(nb->cmsg_delay_conndata), 0); - sched_cmsg = 1; - - } - krefput_nb = 1; - - BUG_ON(list_empty(&(q->neighbors_waiting)) && - list_empty(&(q->neighbors_waiting_nextpass)) && - q->numconns != 0); - BUG_ON(list_empty(&(q->neighbors_waiting)) && - list_empty(&(q->neighbors_waiting_nextpass)) && - q->priority_sum != 0); - - cor_qos_queue_set_congstatus(q); - } - - spin_unlock(&(q->qlock)); - spin_unlock_irqrestore(&(nb->conns_waiting.lock), iflags); - - if (sched_cmsg) { - spin_lock_bh(&(nb->cmsg_lock)); - cor_schedule_controlmsg_timer(nb); - spin_unlock_bh(&(nb->cmsg_lock)); - } - - kref_put(&(trgt_out_lx->ref), cor_kreffree_bug); - - if (krefput_nb) - kref_put(&(nb->ref), cor_neighbor_free); -} - -static void cor_qos_enqueue_conn(struct cor_conn *trgt_out_lx) -{ - unsigned long iflags; - struct cor_neighbor *nb = trgt_out_lx->target.out.nb; - struct cor_qos_queue *q; - - BUG_ON(trgt_out_lx->data_buf.read_remaining == 0); - - spin_lock_irqsave(&(nb->conns_waiting.lock), iflags); - - if (trgt_out_lx->target.out.rb.in_queue != RB_INQUEUE_FALSE) - goto out; - - trgt_out_lx->target.out.rb.in_queue = RB_INQUEUE_TRUE; - list_add_tail(&(trgt_out_lx->target.out.rb.lh), - &(nb->conns_waiting.lh)); - kref_get(&(trgt_out_lx->ref)); - nb->conns_waiting.cnt++; - - q = trgt_out_lx->target.out.nb->queue; - spin_lock(&(q->qlock)); - if (nb->rb.in_queue == RB_INQUEUE_TRUE) { - q->numconns++; - } else { - _cor_qos_enqueue(q, &(nb->rb), ns_to_ktime(0), - QOS_CALLER_NEIGHBOR, 0); - } - spin_unlock(&(q->qlock)); - -out: - spin_unlock_irqrestore(&(nb->conns_waiting.lock), iflags); -} - -static struct sk_buff *cor_create_packet(struct cor_neighbor *nb, int size, - gfp_t alloc_flags) -{ - struct sk_buff *ret; - - ret = alloc_skb(size + LL_RESERVED_SPACE(nb->dev) + - nb->dev->needed_tailroom, alloc_flags); - if (unlikely(ret == 0)) - return 0; - - ret->protocol = htons(ETH_P_COR); - ret->dev = nb->dev; - - skb_reserve(ret, LL_RESERVED_SPACE(nb->dev)); - if(unlikely(dev_hard_header(ret, nb->dev, ETH_P_COR, nb->mac, - nb->dev->dev_addr, ret->len) < 0)) - return 0; - skb_reset_network_header(ret); - - return ret; -} - -struct sk_buff *cor_create_packet_cmsg(struct cor_neighbor *nb, int size, - gfp_t alloc_flags, __u64 seqno) -{ - struct sk_buff *ret; - char *dest; - - ret = cor_create_packet(nb, size + 7, alloc_flags); - if (unlikely(ret == 0)) - return 0; - - dest = skb_put(ret, 7); - BUG_ON(dest == 0); - - dest[0] = PACKET_TYPE_CMSG; - dest += 1; - - cor_put_u48(dest, seqno); - dest += 6; - - return ret; -} - -struct sk_buff *cor_create_packet_conndata(struct cor_neighbor *nb, int size, - gfp_t alloc_flags, __u32 conn_id, __u64 seqno, - __u8 snd_delayed_lowbuf, __u8 flush) -{ - struct sk_buff *ret; - char *dest; - - ret = cor_create_packet(nb, size + 11, alloc_flags); - if (unlikely(ret == 0)) - return 0; - - dest = skb_put(ret, 11); - BUG_ON(dest == 0); - - if (flush != 0) { - if (snd_delayed_lowbuf != 0) { - dest[0] = PACKET_TYPE_CONNDATA_LOWBUFDELAYED_FLUSH; - } else { - dest[0] = PACKET_TYPE_CONNDATA_FLUSH; - } - } else { - if (snd_delayed_lowbuf != 0) { - dest[0] = PACKET_TYPE_CONNDATA_LOWBUFDELAYED; - } else { - dest[0] = PACKET_TYPE_CONNDATA; - } - } - dest += 1; - - cor_put_u32(dest, conn_id); - dest += 4; - cor_put_u48(dest, seqno); - dest += 6; - - return ret; -} - void cor_reschedule_conn_retrans_timer( struct cor_neighbor *nb_retransconnlocked) { @@ -1790,7 +392,7 @@ out: rc == RC_SENDRETRANS_QUEUEFULLDROPPED); } -static int cor_send_retrans(struct cor_neighbor *nb, int *sent) +int cor_send_retrans(struct cor_neighbor *nb, int *sent) { int queuefull = 0; int nbstate = cor_get_neigh_state(nb); @@ -2387,8 +989,8 @@ static __u64 cor_get_windowlimit(struct cor_conn *trgt_out_lx) trgt_out_lx->target.out.seqno_nextsend); } -static int _cor_flush_out(struct cor_conn *trgt_out_lx, __u32 maxsend, - __u32 *sent, int from_qos, int maxsend_forcedelay) +int _cor_flush_out(struct cor_conn *trgt_out_lx, __u32 maxsend, __u32 *sent, + int from_qos, int maxsend_forcedelay) { struct cor_neighbor *nb = trgt_out_lx->target.out.nb; @@ -2571,17 +1173,6 @@ out: return RC_FLUSH_CONN_OUT_OK; } -int cor_flush_out(struct cor_conn *trgt_out_lx, __u32 *sent) -{ - int rc = _cor_flush_out(trgt_out_lx, 1 << 30, sent, 0, 0); - - if (rc == RC_FLUSH_CONN_OUT_CONG || rc == RC_FLUSH_CONN_OUT_MAXSENT || - rc == RC_FLUSH_CONN_OUT_OOM) - cor_qos_enqueue_conn(trgt_out_lx); - - return rc; -} - void cor_resume_nbstalled_conns(struct work_struct *work) { struct cor_neighbor *nb = container_of(work, struct cor_neighbor, @@ -2630,11 +1221,6 @@ int __init cor_snd_init(void) return 0; } -void __exit cor_snd_exit1(void) -{ - flush_work(&cor_qos_waitexit_work); -} - void __exit cor_snd_exit2(void) { kmem_cache_destroy(cor_connretrans_slab); -- 2.11.4.GIT