2 * Connection oriented routing
3 * Copyright (C) 2007-2021 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
16 #include <linux/atomic.h>
18 #include <linux/module.h>
19 #include <linux/types.h>
20 #include <linux/interrupt.h>
21 #include <linux/sched.h>
22 #include <linux/netdevice.h>
23 #include <linux/skbuff.h>
24 #include <linux/spinlock.h>
25 #include <linux/workqueue.h>
26 #include <linux/kref.h>
27 #include <linux/ktime.h>
28 #include <linux/rbtree.h>
30 #include <linux/socket.h>
33 #include <linux/math64.h>
38 #define ETH_P_COR 0x1022
42 #define PROTO_COR_RAW 0
43 #define PROTO_COR_RDEAMON 1
52 #define COR_PASS_ON_CLOSE 1
54 #define COR_PUBLISH_SERVICE 2
57 #define COR_TOS_DEFAULT 0
58 #define COR_TOS_LOW_LATENCY 1
59 #define COR_TOS_HIGH_LATENCY 2
61 #define COR_PRIORITY 4
63 #define MAX_CONN_CMD_LEN 64
65 #define PACKET_TYPE_NONE 0
66 #define PACKET_TYPE_ANNOUNCE 1
67 #define PACKET_TYPE_CMSG_NOACK 2
68 #define PACKET_TYPE_CMSG_ACKSLOW 3
69 #define PACKET_TYPE_CMSG_ACKFAST 4
70 #define PACKET_TYPE_CONNDATA 64
72 #define PACKET_TYPE_CONNDATA_FLAGS 63
73 #define PACKET_TYPE_CONNDATA_FLAGS_FLUSH 32
74 #define PACKET_TYPE_CONNDATA_FLAGS_WINDOWUSED 31
78 * Announce data format:
80 * is 0, may be increased if the protocol changes
82 * is 0, must be increased if a future version of the protocol is incompatible
83 * to the current version
86 * Data format of the announce packet "data" field:
87 *{command [2] commandlength [2] commanddata [commandlength]}[...]
92 /* ANNCMD_VERSION: version[2] minversion[2] */
93 #define ANNCMD_VERSION 1
95 /* ANNCMD_ADDR: addr[8] */
98 /* ANNCMD_NOADDR: (no params) */
99 #define ANNCMD_NOADDR 3
103 * Kernel packet data - these commands are sent by the neighbor
104 * The end nodes may cause these commands to be sent, but they see them beyond
108 #define KP_ACK_CONN 1
109 #define KP_CONN_DATA 2
112 #define KP_MISC_PADDING 0
115 * KP_INIT_SESSION[1] sessionid[4]
117 * finishes neighbor discovery and starts a session
119 * Before this is received all other commands are ignored. The sessionid is used
120 * to prevent usage of old neighbor discovery data (e.g. addresses)
122 #define KP_MISC_INIT_SESSION 1
124 #define KP_MISC_INIT_SESSION_CMDLEN 5
127 * KP_PING[1] cookie[4]
128 * KP_PONG[1] cookie[4] respdelay_full[4] respdelay_netonly[4]
130 * This is needed to find out whether the other node is reachable. After a new
131 * neighbor is seen, ping requests are sent and the neighbor is only reachable
132 * after a few pongs are received. These requests are also used to find out
133 * whether a neighber is gone.
136 * The receiver of a ping may delay the sending of the pong e.g. to create
137 * bigger packets. The respdelay is the time in microseconds the packet was
140 #define KP_MISC_PING 2
142 #define KP_MISC_PING_CMDLEN 5
144 #define KP_MISC_PONG 3
146 /* KP_ACK[1] seqno[4] */
147 #define KP_MISC_ACK 4
150 * NOTE on connection ids:
151 * connection ids we receive with most significant bit 0 have been generated by
153 * connection ids we receive with most significant bit 1 have been generated by
156 * ATTENTION: the priority seqno are reversed:
157 * priority seqnos we send are used when we send updates
158 * priority seqnos we received are used when we receive updates
162 * incoming connection
163 * seqno1... used to ack data sent from the side which initiated the connection
164 * seqno2... used to ack data sent to the side which initiated the connection
165 * KP_CONNECT[1] conn_id[4] seqno1[4] seqno2[4] window[1] priority_seqno[0.5]
166 * priority[1.5] is_highlatency[1]
168 #define KP_MISC_CONNECT 5
171 * incoming connection successful,
172 * KP_CONNECT_SUCCESS[1] conn_id[4] window[1]
174 #define KP_MISC_CONNECT_SUCCESS 6
177 * KP_RESET_CONN[1] conn_id[4]
178 * We send this, if there is an established connection we want to close.
180 #define KP_MISC_RESET_CONN 7
183 * KP_SET_MAX_CMSG_DELAY[1] cpacket_ack_fast_delay[4] cpacket_ack_slow_delay[4]
184 * data_ack_delay[4] cmsg_delay[4]
185 * Sent after connecting and at any change
186 * delay in specifies in microsecs
188 #define KP_MISC_SET_MAX_CMSG_DELAY 8
190 #define KP_MISC_SET_MAX_CMSG_DELAY_CMDLEN 17
193 * KP_MISC_SET_RECEIVE_MTU[1] receive_mtu[4]
194 * Sent after connecting and at any change
196 #define KP_MISC_SET_RECEIVE_MTU 9
198 #define KP_MISC_SET_RECEIVE_MTU_CMDLEN 5
202 * KP_ACK_CONN[1] conn_id[4] delay_remaining[1] seqno[4] window[2]
203 * bufsize_changerate[1] seqno_ooo[4]
204 * length[1-4] priority_seqno[0.5] priority[1.5] is_highlatency[1]
206 * conn_id is the conn_id we use if we sent something through this conn and
207 * *not* the conn_id that the neighbor used to send us the data
209 * delay_remaining = time the ack_conn could have remained in the queue
210 * 255 means the ack_conn has been sent immediately
211 * 0 means it has been delayed by as much the delay set by SET_MAX_CMSG_DELAY
213 * seqno = the seqno which is expected in the next non-out-of-order packet
215 * window = amount of data which can be sent without receiving the next ack
216 * packets with lower seqno do not overwrite the last window size
217 * The window may also be reduced. However, this only indicates a wish.
218 * Packets must be accepted if they exceed the new window, but not the old
223 * 1...255 = 64*2^((value-1)/7) end result is rounded down to an integer
225 * bufsize_changerate = if the next router(s) is increasing or decreasing its
227 * 0 = for every byte we can send, the end host will receive 2 bytes
228 * 64 = for every byte we can send, the end host will receive 1 byte
229 * 128 = for every 2 byte we can send, the end host will receive 1 byte
232 * seqno_ooo, length = This packet was received out of order. Maybe a previous
233 * packet has been lost. Out of order data should not be retransmitted.
234 * Multiple ooo packets may be merged into a single ack. Ooo packets may be
235 * partially accepted, so that the length does not cover the full packet and/
236 * or the seqno starts in the middle of a packet
238 #define KP_ACK_CONN_FLAGS_SEQNO 1
239 #define KP_ACK_CONN_FLAGS_WINDOW 2
240 #define KP_ACK_CONN_FLAGS_OOO 12 /* 4+8 */
241 #define KP_ACK_CONN_FLAGS_PRIORITY 16
243 static inline __u8
cor_ooolen_to_flags(__u32 len
)
254 static inline int cor_ooolen(__u8 flags
)
256 int len
= ((flags
& KP_ACK_CONN_FLAGS_OOO
) >> 2);
258 if (unlikely(len
== 3))
263 static inline int cor_ack_conn_len(__u8 flags
)
267 if ((flags
& KP_ACK_CONN_FLAGS_SEQNO
) != 0) {
269 if ((flags
& KP_ACK_CONN_FLAGS_WINDOW
) != 0)
273 if (cor_ooolen(flags
) != 0) {
275 len
+= cor_ooolen(flags
);
278 /* delay_remaining */
279 if ((flags
& KP_ACK_CONN_FLAGS_SEQNO
) != 0 ||
280 cor_ooolen(flags
) != 0)
283 if (flags
& KP_ACK_CONN_FLAGS_PRIORITY
)
289 /* KP_CONN_DATA[1] conn_id[4] seqno[4] length[1-2] data[length] */
290 #define KP_CONN_DATA_FLAGS_WINDOWUSED 31
291 #define KP_CONN_DATA_FLAGS_FLUSH 32
293 #define KP_CONN_DATA_MAXLEN (128 + 32767)
295 static inline __u32
get_kp_conn_data_length(__u32 datalen
)
303 static inline __u8
get_kp_code(__u8 maj
, __u8 min
)
305 BUILD_BUG_ON(maj
> 3);
307 return (maj
<< 6) + min
;
310 static inline __u8
kp_maj(__u8 code
)
315 static inline __u8
kp_min(__u8 code
)
322 * Connection data which in interpreted when connection has no target yet
323 * These commands are sent by the end node.
326 * cmd[2] length[1-4] parameter[length]
327 * unrecogniced commands are ignored
328 * parameters which are longer than expected are ignored as well
331 #define CD_CONTINUE_ON_ERROR_FLAG 32768
332 #define CD_NOPARAM_FLAG 16384
334 /* outgoing connection: CD_CONNECT_NB[2] length[1-4] addr[8] */
335 #define CD_CONNECT_NB 1
337 /* connection to local open part: CD_CONNECT_PORT[2] length[1-4] port[4] */
338 #define CD_CONNECT_PORT 2
341 * list connected neighbors: CD_LIST_NEIGH[2] length[1-4]
342 * responds with CDR_BINDATA if successful
347 * numfields[1-4] (field[2] fieldlen[1-4])[numfields]
348 * rows[responserows]:
349 * fieldlen[1-4], only if fieldlen in the header was "0"
350 * fielddata[fieldlen]
352 * Future versions may append data to field definition. Clients must silently
353 * discard fields they do not expect.
355 #define CD_LIST_NEIGH 3
360 #define LIST_NEIGH_FIELD_ADDR 1
363 * latency_in_microsecs[1] (64_11 encoding)
364 * Only raw network latency in measured. Delays caused by the priority queues
365 * are *not* included.
367 #define LIST_NEIGH_FIELD_LATENCY 2
370 * list services: CD_LIST_SERVICES[2] length[1-4]
371 * responds with CDR_BINDATA if successful
373 #define CD_LIST_SERVICES 4
376 * list services: CD_LIST_SERVICES[2] length[1-4]
377 * responds with CDR_BINDATA if successful
379 #define CD_LIST_L4PROTOCOLS 5
383 * Connection data response
384 * Format is the same as with connection data
393 * CDR_EXEOK_BINDATA[1] bindatalen[1-4] bindata[bindatalen]
394 * CDR_EXECOK_BINDATA_NORESP[1]
396 #define CDR_EXECOK_BINDATA 2
397 #define CDR_EXECOK_BINDATA_NORESP 3
400 * CDR_EXECFAILED[1] reasoncode[2]
402 #define CDR_EXECFAILED 4
403 #define CDR_EXECFAILED_INVALID_COMMAND 1
404 #define CDR_EXECFAILED_COMMAND_PARSE_ERROR 2
405 #define CDR_EXECFAILED_TEMPORARILY_OUT_OF_RESOURCES 3
406 #define CDR_EXECFAILED_NB_DOESNTEXIST 4
407 #define CDR_EXECFAILED_UNKNOWN_L4PROTOCOL 5
408 #define CDR_EXECFAILED_PORTCLOSED 6
410 #define L4PROTO_STREAM 42399
414 * routing daemon sock
416 * cmdcode[4] length[4] cmddata[length]
418 #define CRD_KTU_SUPPORTEDVERSIONS 1
420 * CRD_KTU_SUPPORTEDVERSIONS[4] length[4] min[4] max[4]
423 #define CRD_KTU_CONNECT 2
425 * CRD_KTU_KTOU_CONNECT[4] length[4] cookie[8] targetlen[4] target[targetlen]
428 #define CRD_UTK_VERSION 1
430 * CRD_UTK_VERSION[4] length[4] version[4]
434 #define CRD_UTK_UP_FLAGS_ADDR 1
435 #define CRD_UTK_UP_FLAGS_INTERFACES 2
437 * CRD_UTK_UP[4] length[4] flags[8]
438 * if CRD_UTK_UP_FLAGS_ADDR
440 * if CRD_UTK_UP_FLAGS_INTERFACES:
441 * num_interfaces[4] (length[4] interface[length])[num_interfaces]
445 #define CRD_UTK_CONNECTERROR 3
447 * CRD_UTK_CONNECTERROR[4] length[4] cookie[8] error[4]
450 #define CRD_UTK_CONNECTERROR_ACCES 1
451 #define CRD_UTK_CONNECTERROR_NETUNREACH 2
452 #define CRD_UTK_CONNECTERROR_TIMEDOUT 3
453 #define CRD_UTK_CONNECTERROR_REFUSED 4
455 #define CONN_MNGD_HEADERLEN 2
456 #define CONN_MNGD_MAX_CTRL_DATALEN 8
457 #define CONN_MNGD_CHECKSUMLEN 4
459 #define CONN_MNGD_HASDATA (1 << 15)
460 #define CONN_MNGD_EOF (1 << 0)
461 #define CONN_MNGD_RCVEND (1 << 1)
462 #define CONN_MNGD_KEEPALIVE_REQ (1 << 2)
463 #define CONN_MNGD_KEEPALIVE_RESP (1 << 3)
464 #define CONN_MNGD_DATALEN 4095
466 #define CONN_MNGD_MAX_SEGMENT_SIZE (CONN_MNGD_DATALEN + 1)
469 struct cor_interface_config
{
474 #define CONGSTATUS_NONE 0
475 #define CONGSTATUS_CONNDATA 1
476 #define CONGSTATUS_ANNOUNCE 2
477 #define CONGSTATUS_RETRANS 3
478 #define CONGSTATUS_KPACKETS 4
480 struct cor_qos_queue
{
485 struct list_head queue_list
;
487 struct net_device
*dev
; /* may not change while queue is in list */
489 struct task_struct
*qos_resume_thread
;
490 wait_queue_head_t qos_resume_wq
;
491 atomic_t qos_resume_scheduled
;
492 unsigned long jiffies_lastprogress
;
494 struct list_head kpackets_waiting
;
495 struct list_head conn_retrans_waiting
;
496 struct list_head announce_waiting
;
497 struct list_head neighbors_waiting
;
498 struct list_head neighbors_waiting_nextpass
;
500 unsigned long jiffies_nb_pass_start
;
501 unsigned long jiffies_nb_lastduration
;
504 unsigned long jiffies_lastdrop
;
509 atomic_t cong_status
;
514 * switch to and from RB_INQUEUE_NBCONGWIN is only done with nbcongwin.lock
517 #define RB_INQUEUE_FALSE 0
518 #define RB_INQUEUE_TRUE 1
519 #define RB_INQUEUE_NBCONGWIN 2 /* only for nb->rb */
520 #define RB_INQUEUE_NBNOTACTIVE 3 /* only for nb->rb */
522 struct cor_resume_block
{
527 #define ANNOUNCE_TYPE_BROADCAST 1
528 #define ANNOUNCE_TYPE_UNICAST 2
530 struct cor_announce_data
{
536 struct net_device
*dev
;
537 char mac
[MAX_ADDR_LEN
];
538 struct delayed_work announce_work
;
539 struct cor_resume_block rb
;
542 struct cor_neighbor_discdata
{
544 unsigned long jiffies_created
;
548 struct net_device
*dev
;
549 char mac
[MAX_ADDR_LEN
];
563 struct cor_ping_cookie
{
564 ktime_t time_created
;
566 unsigned long jiffies_sent
;
569 __u8 pongs
; /* count of pongs for pings sent after this one */
572 #define NEIGHBOR_STATE_INITIAL 0
573 #define NEIGHBOR_STATE_ACTIVE 1
574 #define NEIGHBOR_STATE_STALLED 2
575 #define NEIGHBOR_STATE_KILLED 3
577 #define NBCONGWIN_SHIFT 16
578 #define NBCONGWIN_MUL (1 << NBCONGWIN_SHIFT)
580 struct cor_neighbor
{
581 struct list_head nb_list
;
586 struct net_device
*dev
;
587 char mac
[MAX_ADDR_LEN
];
588 struct cor_qos_queue
*queue
;
592 atomic_t sessionid_rcv_needed
;
593 atomic_t sessionid_snd_needed
;
598 atomic64_t cmsg_timer_timeout
;
599 struct timer_list cmsg_timer
;
600 spinlock_t cmsg_lock
;
601 struct list_head cmsg_queue_pong
;
602 struct list_head cmsg_queue_ack_fast
;
603 struct list_head cmsg_queue_ack_slow
;
604 struct list_head cmsg_queue_ackconn_urgent
;
605 struct list_head cmsg_queue_ackconn
;
606 struct list_head cmsg_queue_conndata_lowlat
;
607 struct list_head cmsg_queue_conndata_highlat
;
608 struct list_head cmsg_queue_other
;
609 __u8 add_retrans_needed
;
610 __u32 kpacket_seqno
; /* not locked, only accessed by single tasklet */
612 struct rb_root pending_conn_resets_rb
;
614 __u32 cmsg_pongslength
;
615 __u32 cmsg_otherlength
;
617 __u32 cmsg_pongscnt
; /* size of queue only, protected by cmsg_lock */
618 atomic_t cmsg_pongs_retrans_cnt
; /* number of retransmits only */
619 atomic_t cmsg_othercnt
; /* size of queue + retransmits */
621 atomic_t cmsg_bulk_readds
;
623 atomic_t cmsg_delay_conndata
;
625 /* not locked, only accessed by single thread */
626 __u8 max_cmsg_delay_sent
;
628 atomic_t rcvmtu_sendneeded
;
631 /* procected by cor_qos_queue->qlock */
632 struct cor_resume_block rb_kp
;
633 struct cor_resume_block rb_cr
;
634 struct cor_resume_block rb
;
635 unsigned long cmsg_send_start_j
;
636 ktime_t cmsg_send_start_kt
;
641 struct list_head lh_nextpass
;
648 atomic64_t data_intransit
;
653 spinlock_t state_lock
;
654 unsigned long last_ping_time
;
655 struct cor_ping_cookie cookies
[PING_COOKIES_PER_NEIGH
];
656 __u32 ping_intransit
;
659 __u64 latency_variance_retrans_us
; /* microsecs */
660 atomic_t latency_retrans_us
; /* microsecs */
661 atomic_t latency_stddev_retrans_us
; /* microsecs */
662 atomic_t latency_advertised_us
; /* microsecs */
663 __u8 rcvmtu_delayed_send_needed
:1,
664 rcvmtu_allowed_countdown
:2;
666 atomic_t max_remote_ack_fast_delay_us
; /* microsecs */
667 atomic_t max_remote_ack_slow_delay_us
; /* microsecs */
668 atomic_t max_remote_ackconn_delay_us
; /* microsecs */
669 atomic_t max_remote_pong_delay_us
; /* microsecs */
671 atomic_t remote_rcvmtu
;
674 unsigned long initial_state_since
;/* initial state */
677 * time of the last sent packet which has been acked or
678 * otherwise responded to (e.g. pong)
680 unsigned long last_roundtrip
;/* active/stalled state */
682 ktime_t last_roundtrip_end
;
686 __u8 str_timer_pending
;
687 struct delayed_work stalltimeout_timer
;
689 spinlock_t connid_lock
;
690 struct rb_root connid_rb
;
692 spinlock_t connid_reuse_lock
;
693 struct rb_root connid_reuse_rb
;
694 struct list_head connid_reuse_list
;
695 __u16 connid_reuse_pingcnt
;
696 __u8 connid_reuse_oom_countdown
;
698 atomic64_t priority_sum
;
701 * connecions which receive data from/send data to this node
702 * used when terminating all connections of a neighbor and terminating
703 * inactive connections
705 spinlock_t conn_list_lock
;
706 struct list_head snd_conn_idle_list
;
707 struct list_head snd_conn_busy_list
;
710 * the timer has to be inited when adding the neighbor
712 * add_timer(struct timer_list * timer);
714 spinlock_t retrans_lock
;
715 struct timer_list retrans_timer
;
716 struct list_head retrans_fast_list
;
717 struct list_head retrans_slow_list
;
718 struct rb_root kp_retransmits_rb
;
720 spinlock_t retrans_conn_lock
;
721 struct timer_list retrans_conn_timer
;
722 struct list_head retrans_conn_lowlatency_list
;
723 struct list_head retrans_conn_highlatency_list
;
725 struct work_struct reset_neigh_work
;
728 static inline void cor_nb_kref_get(struct cor_neighbor
*nb
, char *reason
)
730 /* printk(KERN_ERR "cor_nb_kref_get %p %s\n", nb, reason); */
734 void cor_neighbor_free(struct kref
*ref
); /* neigh.c */
736 static inline void cor_nb_kref_put(struct cor_neighbor
*nb
, char *reason
)
738 /* printk(KERN_ERR "cor_nb_kref_put %p %s\n", nb, reason); */
739 kref_put(&nb
->ref
, cor_neighbor_free
);
742 void cor_kreffree_bug(struct kref
*ref
); /* util.c */
744 static inline void cor_nb_kref_put_bug(struct cor_neighbor
*nb
, char *reason
)
746 /* printk(KERN_ERR "cor_nb_kref_put_bug %p %s\n", nb, reason); */
747 kref_put(&nb
->ref
, cor_kreffree_bug
);
751 #define DATABUF_BUF 0
752 #define DATABUF_SKB 1
754 struct cor_data_buf_item
{
755 struct list_head buf_list
;
764 struct cor_connid_reuse_item
{
774 #define SNDSPEED_INIT 0
775 #define SNDSPEED_ACTIVE 1
776 struct cor_snd_speed
{
779 unsigned long jiffies_last_refresh
;
782 /* bytes per second */
789 /* This struct helps keep struct cor_conn small. */
790 struct cor_conn_src_sock_extradata
{
795 struct cor_snd_speed snd_speed
;
797 __be32 keepalive_req_cookie
;
798 __be32 keepalive_resp_cookie
;
800 * keepalive_intransit == 0... last resp received
801 * keepalive_intransit == 1... req sent
803 unsigned long jiffies_keepalive_lastact
;
806 char snd_hdr
[CONN_MNGD_HEADERLEN
];
807 char snd_data
[CONN_MNGD_MAX_CTRL_DATALEN
];
808 char snd_chksum
[CONN_MNGD_CHECKSUMLEN
];
813 char snd_hdr
[CONN_MNGD_HEADERLEN
];
814 char snd_chksum
[CONN_MNGD_CHECKSUMLEN
];
823 * There are 2 conn objects per bi-directional connection. They refer to each
824 * other with in the reversedir field.
830 * cn: conn we do not know what is inside
831 * src_in, trgt_unconn, trgt_out, ...: A conn with the specified source or
832 * targettype. In the unlocked case the types are only a guess, because they
833 * might have changed since the last access. After locking the
834 * source/destination parameters have to be checked whether they still are what
835 * we expect. This includes source/targettype, neighbor, conn_id
839 * no suffix: unlocked
841 * _l: this direction is locked
843 * _ll: both directions are locked
845 * _lx: this direction is locked, the other direction may be locked
847 * _o: unlocked, but source or target is known for sure, because an outside
848 * lock is taken; For variables on the heap this means that an outside lock must
849 * be taken before accessing the struct which points to the conn can be
853 * Most fields are protected by rcv_lock. Fields which which control
854 * source and destination of the data flow require both directions to
855 * to be locked and external references to be cleared before the change can
856 * happen. This includes fields like sourcetype, targettype, connid,
857 * list_heads, ???. In this case the side with is_client == 1 needs to be locked
860 * Some other fields are locked outside (e.g. at struct neighbor).
862 #define SOURCE_UNCONNECTED 0
864 #define SOURCE_SOCK 2
866 #define TARGET_UNCONNECTED 0
868 #define TARGET_SOCK 2
869 #define TARGET_DISCARD 3
871 #define BUFSIZE_NOACTION 0
872 #define BUFSIZE_DECR 1
873 #define BUFSIZE_DECR_FAST 2
874 #define BUFSIZE_INCR 3
875 #define BUFSIZE_INCR_FAST 4
877 #define JIFFIES_LAST_IDLE_SHIFT 8
878 #define BUFSIZE_SHIFT 5
880 #define SOCKTYPE_RAW 0
881 #define SOCKTYPE_MANAGED 1
883 #define RCV_BUF_STATE_OK 0
884 #define RCV_BUF_STATE_INCOMPLETE 1
885 #define RCV_BUF_STATE_RESET 2
887 #define SND_BUF_STATE_INCOMPLETE 0
888 #define SND_BUF_STATE_FILLED 1
895 __u8 is_client
; /* immutable after allocated */
899 * 0... connection active
900 * 1... connection is about to be reset, target does not need to be
902 * 2... connection is reset
908 is_highlatency_send_needed
:1;
914 struct cor_neighbor
*nb
;
916 struct list_head reorder_queue
;
917 __u32 reorder_memused
;
923 /* number of ack sent, not data seqno */
926 __u16 small_ooo_packets
;
930 __u8 inorder_ack_needed
;
934 __u32 window_seqnolimit
;
935 __u32 window_seqnolimit_remote
;
937 /* protected by nb->cmsg_lock */
938 struct list_head acks_pending
;
942 struct cor_conn_src_sock_extradata
*ed
;
945 * cl_list and in_cl_list is protected by cor_bindnodes
947 struct list_head cl_list
;
951 * keepalive_lh and in_keepalive_list is protected by
952 * cor_keepalive_req_lock
954 struct timer_list keepalive_timer
;
955 struct list_head keepalive_lh
;
957 __u8 in_keepalive_list
;
959 /* protected by flushtoconn_oom_lock */
960 struct list_head flushtoconn_oom_lh
;
961 /* protected by conn->rcv_lock */
962 __u8 in_flushtoconn_oom_list
;
965 __u8 keepalive_intransit
:1,
968 send_keepalive_req_needed
:1,
969 send_keepalive_resp_needed
:1,
971 send_rcvend_needed
:1;
973 __u8 last_windowused
;
986 char paramlen_buf
[4];
990 struct cor_neighbor
*nb
;
992 /* list of all connections to this neighbor */
993 struct list_head nb_list
;
994 unsigned long jiffies_last_act
;
995 __u32 nblist_busy_remaining
;
998 __u32 seqno_nextsend
;
1000 __u32 seqno_windowlimit
;
1002 /* protected by nb->retrans_conn_lock, sorted by seqno
1004 struct list_head retrans_list
;
1006 struct cor_resume_block rb
;
1009 unsigned long jiffies_idle_since
;
1012 __u16 maxsend_extra
;
1016 __u8 lastsend_windowused
;
1018 __u8 remote_bufsize_changerate
;
1020 __u8 priority_send_allowed
:1,
1024 __u16 priority_last
:12,
1027 /* protected by nb->retrans_conn_lock */
1028 __u16 retrans_lowwindow
;
1032 __u8 waiting_for_userspace
;
1033 unsigned long waiting_for_userspace_since
;
1035 struct cor_sock
*cs
;
1040 char rcv_hdr
[CONN_MNGD_HEADERLEN
];
1041 char rcv_chksum
[CONN_MNGD_CHECKSUMLEN
];
1044 __u16 rcv_hdr_flags
;
1050 struct list_head items
;
1051 struct cor_data_buf_item
*nextread
;
1056 __u32 read_remaining
;
1058 __u16 next_read_offset
;
1061 __u32 bufspace_accounted
;
1064 __u32 bufsize
; /* 32 ==> 1 byte, see BUFSIZE_SHIFT */
1065 __u32 ignore_rcv_lowbuf
;
1086 static inline __u32
cor_get_connid_reverse(__u32 conn_id
)
1088 return conn_id
^ (1 << 31);
1091 struct cor_conn_bidir
{
1092 struct cor_conn cli
;
1093 struct cor_conn srv
;
1098 static inline struct cor_conn_bidir
*cor_get_conn_bidir(struct cor_conn
*cn
)
1101 return container_of(cn
, struct cor_conn_bidir
, cli
);
1103 return container_of(cn
, struct cor_conn_bidir
, srv
);
1106 static inline struct cor_conn
*cor_get_conn_reversedir(struct cor_conn
*cn
)
1108 if (cn
->is_client
) {
1109 struct cor_conn_bidir
*cnb
= container_of(cn
,
1110 struct cor_conn_bidir
, cli
);
1113 struct cor_conn_bidir
*cnb
= container_of(cn
,
1114 struct cor_conn_bidir
, srv
);
1119 static inline void cor_conn_kref_get(struct cor_conn
*cn
, char *reason
)
1121 /* printk(KERN_ERR "cor_conn_kref_get %p %s\n", cn, reason); */
1122 kref_get(&cor_get_conn_bidir(cn
)->ref
);
1125 void cor_free_conn(struct kref
*ref
); /* conn.c */
1127 static inline void cor_conn_kref_put(struct cor_conn
*cn
, char *reason
)
1129 /* printk(KERN_ERR "cor_conn_kref_put %p %s\n", cn, reason); */
1130 kref_put(&cor_get_conn_bidir(cn
)->ref
, cor_free_conn
);
1133 static inline void cor_conn_kref_put_bug(struct cor_conn
*cn
, char *reason
)
1135 /* printk(KERN_ERR "cor_conn_kref_put_bug %p %s\n", cn, reason); */
1136 kref_put(&cor_get_conn_bidir(cn
)->ref
, cor_kreffree_bug
);
1141 #define CONN_RETRANS_INITIAL 0
1142 #define CONN_RETRANS_SCHEDULED 1
1143 #define CONN_RETRANS_LOWWINDOW 2
1144 #define CONN_RETRANS_SENDING 3
1145 #define CONN_RETRANS_ACKED 4
1146 struct cor_conn_retrans
{
1147 /* timeout_list and conn_list share a single ref */
1149 /* only in timeout_list if state == CONN_RETRANS_SCHEDULED */
1150 struct list_head timeout_list
;
1151 struct list_head conn_list
;
1152 struct cor_conn
*trgt_out_o
;
1158 unsigned long timeout
;
1161 #define RCVOOO_BUF 0
1162 #define RCVOOO_SKB 1
1164 struct list_head lh
;
1171 struct cor_rcvooo_buf
{
1172 struct cor_rcvooo r
;
1177 /* inside skb->cb */
1178 struct cor_skb_procstate
{
1181 struct work_struct work
;
1190 struct cor_rcvooo r
;
1194 struct cor_data_buf_item dbi
;
1199 #define CS_TYPE_UNCONNECTED 0
1200 #define CS_TYPE_LISTENER 1
1201 #define CS_TYPE_CONN_RAW 2
1202 #define CS_TYPE_CONN_MANAGED 3
1204 #define CS_CONNECTSTATE_UNCONNECTED 0
1205 #define CS_CONNECTSTATE_CONNECTING 1
1206 #define CS_CONNECTSTATE_CONNECTED 2
1207 #define CS_CONNECTSTATE_ERROR 3
1210 struct sock sk
; /* must be first */
1215 /* type may not change once it is set to != CS_TYPE_UNCONNECTED */
1219 __u8 publish_service
;
1221 __u8 is_highlatency
;
1227 /* listener is protected by cor_bindnodes */
1228 struct list_head lh
;
1230 __u8 publish_service
;
1233 struct list_head conn_queue
;
1237 struct cor_conn
*src_sock
;
1238 struct cor_conn
*trgt_sock
;
1240 struct cor_data_buf_item
*rcvitem
;
1243 struct cor_sock
*pass_on_close
;
1247 struct cor_sockaddr remoteaddr
;
1249 struct list_head rd_msgs
;
1250 struct list_head crd_lh
;
1269 struct cor_conn
*src_sock
;
1270 struct cor_conn
*trgt_sock
;
1274 __u16 snd_segment_size
;
1276 __u8 send_in_progress
;
1281 __u16 rcvbuf_consumed
;
1286 struct work_struct readfromconn_work
;
1287 atomic_t readfromconn_work_scheduled
;
1289 atomic_t ready_to_read
;
1290 atomic_t ready_to_write
;
1291 atomic_t ready_to_accept
;
1294 #define ACK_NEEDED_NO 0
1295 #define ACK_NEEDED_SLOW 1
1296 #define ACK_NEEDED_FAST 2
1299 extern spinlock_t cor_local_addr_lock
;
1300 extern __u8 cor_local_has_addr
;
1301 extern __be64 cor_local_addr
;
1302 extern __be32 cor_local_addr_sessionid
;
1304 int cor_is_device_configurated(struct net_device
*dev
);
1306 void cor_set_interface_config(struct cor_interface_config
*new_config
,
1307 __u32 new_num_interfaces
, int new_all_interfaces
);
1309 void cor_config_down(void);
1311 int cor_config_up(__u8 has_addr
, __be64 addr
);
1313 int cor_is_clientmode(void);
1316 void cor_qos_set_lastdrop(struct cor_qos_queue
*q
);
1318 #ifdef DEBUG_QOS_SLOWSEND
1319 int _cor_dev_queue_xmit(struct sk_buff
*skb
, int caller
);
1321 static inline int _cor_dev_queue_xmit(struct sk_buff
*skb
, int caller
)
1323 return dev_queue_xmit(skb
);
1327 static inline int cor_dev_queue_xmit(struct sk_buff
*skb
,
1328 struct cor_qos_queue
*q
, int caller
)
1330 int rc
= _cor_dev_queue_xmit(skb
, caller
);
1332 if (unlikely(rc
!= NET_XMIT_SUCCESS
))
1333 cor_qos_set_lastdrop(q
);
1337 void cor_free_qos(struct kref
*ref
);
1339 #ifdef COR_NBCONGWIN
1340 void cor_nbcongwin_data_retransmitted(struct cor_neighbor
*nb
,
1343 void cor_nbcongwin_data_acked(struct cor_neighbor
*nb
, __u64 bytes_acked
);
1345 void cor_nbcongwin_data_sent(struct cor_neighbor
*nb
, __u32 bytes_sent
);
1347 int cor_nbcongwin_send_allowed(struct cor_neighbor
*nb
);
1351 static inline void cor_nbcongwin_data_retransmitted(struct cor_neighbor
*nb
,
1356 static inline void cor_nbcongwin_data_acked(struct cor_neighbor
*nb
,
1361 static inline void cor_nbcongwin_data_sent(struct cor_neighbor
*nb
,
1366 static inline int cor_nbcongwin_send_allowed(struct cor_neighbor
*nb
)
1372 unsigned long cor_get_conn_idletime(struct cor_conn
*trgt_out_lx
);
1374 struct cor_qos_queue
*cor_get_queue(struct net_device
*dev
);
1376 int cor_destroy_queue(struct net_device
*dev
);
1378 int cor_create_queue(struct net_device
*dev
);
1380 #define QOS_RESUME_DONE 0
1381 #define QOS_RESUME_CONG 1
1382 #define QOS_RESUME_NEXTNEIGHBOR 2 /* cor_resume_neighbors() internal */
1383 #define QOS_RESUME_EXIT 3
1385 #define QOS_CALLER_KPACKET 0
1386 #define QOS_CALLER_CONN_RETRANS 1
1387 #define QOS_CALLER_ANNOUNCE 2
1388 #define QOS_CALLER_NEIGHBOR 3
1390 static inline void cor_schedule_qos_resume(struct cor_qos_queue
*q
)
1392 if (atomic_cmpxchg(&q
->qos_resume_scheduled
, 0, 1) == 0) {
1394 wake_up(&q
->qos_resume_wq
);
1398 void cor_qos_enqueue(struct cor_qos_queue
*q
, struct cor_resume_block
*rb
,
1399 unsigned long cmsg_send_start_j
, ktime_t cmsg_send_start_kt
,
1400 int caller
, int from_nbnotactive_resume
);
1402 void cor_qos_remove_conn(struct cor_conn
*trgt_out_l
);
1404 int cor_may_send_announce(struct net_device
*dev
);
1406 struct sk_buff
*cor_create_packet_cmsg(struct cor_neighbor
*nb
, int size
,
1407 gfp_t alloc_flags
, __u32 seqno
);
1409 struct sk_buff
*cor_create_packet(struct cor_neighbor
*nb
, int size
,
1412 struct sk_buff
*cor_create_packet_conndata(struct cor_neighbor
*nb
, int size
,
1413 gfp_t alloc_flags
, __u32 conn_id
, __u32 seqno
, __u8 windowused
,
1416 void cor_qos_enqueue_conn(struct cor_conn
*trgt_out_lx
);
1418 void cor_dev_down(void);
1420 int cor_dev_up(void);
1422 void __exit
cor_dev_exit1(void);
1424 int __init
cor_dev_init(void);
1427 static inline __u16
cor_enc_priority(__u32 value
)
1433 while ((value
>> exponent
) > 255) {
1436 BUG_ON(exponent
> 15);
1438 mantissa
= (value
>> exponent
);
1439 ret
= (mantissa
<< 4) | exponent
;
1444 static inline __u32
cor_dec_priority(__u16 priority
)
1446 __u32 mantissa
= (__u32
) (priority
>> 4);
1447 __u16 exponent
= (priority
& 15);
1449 BUG_ON(priority
> 4095);
1450 return (mantissa
<< exponent
);
1453 static inline __u32
cor_priority_max(void)
1455 return cor_dec_priority(4095);
1458 __u8
__attribute__((const)) cor_enc_log_64_11(__u32 value
);
1460 __u32
__attribute__((const)) cor_dec_log_64_11(__u8 value
);
1462 void cor_swap_list_items(struct list_head
*lh1
, struct list_head
*lh2
);
1464 int __init
cor_util_init(void);
1469 extern atomic_t cor_num_neighs
;
1471 int cor_is_from_nb(struct sk_buff
*skb
, struct cor_neighbor
*nb
);
1473 struct cor_neighbor
*_cor_get_neigh_by_mac(struct net_device
*dev
,
1476 struct cor_neighbor
*cor_get_neigh_by_mac(struct sk_buff
*skb
);
1478 struct cor_neighbor
*cor_find_neigh(__be64 addr
);
1480 void cor_resend_rcvmtu(struct net_device
*dev
);
1482 __u32
cor_generate_neigh_list(char *buf
, __u32 buflen
);
1484 void cor_reset_neighbors(struct net_device
*dev
);
1486 int cor_get_neigh_state(struct cor_neighbor
*nb
);
1488 void cor_ping_resp(struct cor_neighbor
*nb
, __u32 cookie
, __u32 respdelay
);
1490 __u32
cor_add_ping_req(struct cor_neighbor
*nb
, unsigned long *last_ping_time
);
1492 void cor_ping_sent(struct cor_neighbor
*nb
, __u32 cookie
);
1494 void cor_unadd_ping_req(struct cor_neighbor
*nb
, __u32 cookie
,
1495 unsigned long last_ping_time
, int congested
);
1497 #define TIMETOSENDPING_NO 0
1498 #define TIMETOSENDPING_YES 1
1499 #define TIMETOSENDPING_FORCE 2
1500 int cor_time_to_send_ping(struct cor_neighbor
*nb
);
1502 unsigned long cor_get_next_ping_time(struct cor_neighbor
*nb
);
1504 void cor_add_neighbor(struct cor_neighbor_discdata
*nb_dd
);
1506 struct cor_conn
*cor_get_conn(struct cor_neighbor
*nb
, __u32 conn_id
);
1508 int cor_insert_connid(struct cor_neighbor
*nb
, struct cor_conn
*src_in_ll
);
1510 void cor_insert_connid_reuse(struct cor_neighbor
*nb
, __u32 conn_id
);
1512 int cor_connid_alloc(struct cor_neighbor
*nb
, struct cor_conn
*src_in_ll
);
1514 int __init
cor_neighbor_init(void);
1516 void __exit
cor_neighbor_exit2(void);
1518 /* neigh_ann_rcv.c */
1519 int cor_rcv_announce(struct sk_buff
*skb
);
1521 int __init
cor_neigh_ann_rcv_init(void);
1523 void __exit
cor_neigh_ann_rcv_exit2(void);
1525 /* neigh_ann_snd.c */
1526 int _cor_send_announce(struct cor_announce_data
*ann
, int fromqos
, int *sent
);
1528 void cor_announce_data_free(struct kref
*ref
);
1530 void cor_announce_send_start(struct net_device
*dev
, char *mac
, int type
);
1532 void cor_announce_send_stop(struct net_device
*dev
, char *mac
, int type
);
1535 void cor_kernel_packet(struct cor_neighbor
*nb
, struct sk_buff
*skb
,
1539 struct cor_control_msg_out
;
1541 #define ACM_PRIORITY_LOW 1 /* oom recovery easy */
1542 #define ACM_PRIORITY_MED 2 /* oom may cause timeouts */
1543 #define ACM_PRIORITY_HIGH 3 /* cm acks - needed for freeing old cms */
1545 struct cor_control_msg_out
*cor_alloc_control_msg(struct cor_neighbor
*nb
,
1548 void cor_free_control_msg(struct cor_control_msg_out
*cm
);
1550 void cor_retransmit_timerfunc(struct timer_list
*retrans_timer
);
1552 void cor_kern_ack_rcvd(struct cor_neighbor
*nb
, __u32 seqno
);
1554 int cor_send_messages(struct cor_neighbor
*nb
, unsigned long cmsg_send_start_j
,
1555 ktime_t cmsg_send_start_kt
, int *sent
);
1557 void cor_controlmsg_timerfunc(struct timer_list
*cmsg_timer
);
1559 void cor_schedule_controlmsg_timer(struct cor_neighbor
*nb_cmsglocked
);
1561 void cor_send_rcvmtu(struct cor_neighbor
*nb
);
1563 void cor_send_pong(struct cor_neighbor
*nb
, __u32 cookie
, ktime_t ping_rcvtime
);
1565 int cor_send_reset_conn(struct cor_neighbor
*nb
, __u32 conn_id
, int lowprio
);
1567 void cor_send_ack(struct cor_neighbor
*nb
, __u32 seqno
, __u8 fast
);
1569 void cor_send_ack_conn_ifneeded(struct cor_conn
*src_in_l
, __u32 seqno_ooo
,
1572 void cor_send_priority(struct cor_conn
*trgt_out_ll
, __u16 priority
);
1574 void cor_free_ack_conns(struct cor_conn
*src_in_lx
);
1576 void cor_send_connect_success(struct cor_control_msg_out
*cm
, __u32 conn_id
,
1577 struct cor_conn
*src_in
);
1579 void cor_send_connect_nb(struct cor_control_msg_out
*cm
, __u32 conn_id
,
1580 __u32 seqno1
, __u32 seqno2
, struct cor_conn
*src_in_ll
);
1582 void cor_send_conndata(struct cor_control_msg_out
*cm
, __u32 conn_id
,
1583 __u32 seqno
, char *data_orig
, char *data
, __u32 datalen
,
1584 __u8 windowused
, __u8 flush
, __u8 highlatency
,
1585 struct cor_conn_retrans
*cr
);
1587 int __init
cor_kgen_init(void);
1589 void __exit
cor_kgen_exit2(void);
1592 extern struct kmem_cache
*cor_connid_reuse_slab
;
1594 extern atomic_t cor_num_conns
;
1596 extern spinlock_t cor_bindnodes
;
1598 int cor_new_incoming_conn_allowed(struct cor_neighbor
*nb
);
1600 __u32
_cor_conn_refresh_priority(struct cor_conn
*cn_lx
);
1602 __u32
cor_conn_refresh_priority(struct cor_conn
*cn
, int locked
);
1604 void cor_set_conn_is_highlatency(struct cor_conn
*cn
, __u8 is_highlatency
,
1605 int locked
, int call_refresh_priority
);
1607 void cor_set_conn_in_priority(struct cor_neighbor
*nb
, __u32 conn_id
,
1608 struct cor_conn
*src_in
, __u8 priority_seqno
, __u16 priority
,
1609 __u8 is_highlatency
);
1611 void cor_conn_set_last_act(struct cor_conn
*trgt_out_lx
);
1613 int cor_conn_init_out(struct cor_conn
*trgt_unconn_ll
, struct cor_neighbor
*nb
,
1614 __u32 rcvd_connid
, int use_rcvd_connid
);
1616 int cor_conn_init_sock_source(struct cor_conn
*cn
);
1618 void cor_conn_init_sock_target(struct cor_conn
*cn
);
1620 __u32
cor_list_services(char *buf
, __u32 buflen
);
1622 void cor_set_publish_service(struct cor_sock
*cs
, __u8 value
);
1624 void cor_close_port(struct cor_sock
*cs
);
1626 int cor_open_port(struct cor_sock
*cs_l
, __be32 port
);
1628 #define CONNECT_PORT_OK 0
1629 #define CONNECT_PORT_PORTCLOSED 1
1630 #define CONNECT_PORT_TEMPORARILY_OUT_OF_RESOURCES 2
1632 int cor_connect_port(struct cor_conn
*trgt_unconn_ll
, __be32 port
);
1634 int cor_connect_neigh(struct cor_conn
*trgt_unconn_ll
, __be64 addr
);
1636 struct cor_conn_bidir
*cor_alloc_conn(gfp_t allocflags
, __u8 is_highlatency
);
1638 void cor_reset_conn_locked(struct cor_conn_bidir
*cnb_ll
);
1640 void cor_reset_conn(struct cor_conn
*cn
);
1643 void cor_reset_ooo_queue(struct cor_conn
*src_in_lx
);
1645 void cor_drain_ooo_queue(struct cor_conn
*src_in_l
);
1647 void cor_conn_rcv(struct cor_neighbor
*nb
, struct sk_buff
*skb
, char *data
,
1648 __u32 len
, __u32 conn_id
, __u32 seqno
, __u8 windowused
,
1651 int __init
cor_rcv_init(void);
1653 void __exit
cor_rcv_exit2(void);
1655 /* conn_src_sock.c */
1656 void cor_update_src_sock_sndspeed(struct cor_conn
*src_sock_l
,
1659 int cor_sock_sndbufavailable(struct cor_conn
*src_sock_lx
, int for_wakeup
);
1662 #define RC_FTC_OOM 1
1663 #define RC_FTC_ERR 2
1664 int _cor_mngdsocket_flushtoconn(struct cor_conn
*src_sock_l
);
1666 int cor_mngdsocket_flushtoconn_ctrl(struct cor_sock
*cs_m_l
, __u8 send_eof
,
1667 __u8 send_rcvend
, __u8 send_keepalive_resp
,
1668 __be32 keepalive_resp_cookie
);
1670 int cor_mngdsocket_flushtoconn_data(struct cor_sock
*cs_m_l
);
1672 void cor_keepalive_req_timerfunc(struct timer_list
*retrans_conn_timer
);
1674 void cor_keepalive_req_sched_timer(struct cor_conn
*src_sock_lx
);
1676 void cor_keepalive_resp_rcvd(struct cor_sock
*cs_m_l
, __be32 cookie
);
1678 int __init
cor_conn_src_sock_init1(void);
1680 void __exit
cor_conn_src_sock_exit1(void);
1682 /* conn_trgt_unconn.c */
1683 int cor_encode_len(char *buf
, int buflen
, __u32 len
);
1685 void cor_proc_cpacket(struct cor_conn
*trgt_unconn
);
1687 /* conn_trgt_out.c */
1688 void cor_free_connretrans(struct kref
*ref
);
1690 void cor_reschedule_conn_retrans_timer(struct cor_neighbor
*nb_retranslocked
);
1692 void cor_cancel_all_conn_retrans(struct cor_conn
*trgt_out_l
);
1694 int cor_send_retrans(struct cor_neighbor
*nb
, int *sent
);
1696 void cor_retransmit_conn_timerfunc(struct timer_list
*retrans_timer_conn
);
1698 void cor_conn_ack_ooo_rcvd(struct cor_neighbor
*nb
, __u32 conn_id
,
1699 struct cor_conn
*trgt_out
, __u32 seqno_ooo
, __u32 length
,
1700 __u64
*bytes_acked
);
1702 void cor_conn_ack_rcvd(struct cor_neighbor
*nb
, __u32 conn_id
,
1703 struct cor_conn
*trgt_out
, __u32 seqno
, int setwindow
,
1704 __u8 window
, __u8 bufsize_changerate
, __u64
*bytes_acked
);
1706 void cor_schedule_retransmit_conn(struct cor_conn_retrans
*cr
, int connlocked
,
1707 int nbretrans_locked
);
1709 int cor_srcin_buflimit_reached(struct cor_conn
*src_in_lx
);
1711 /* RC_FLUSH_CONN_OUT_SENT | RC_FLUSH_CONN_OUT_{^SENT} */
1712 #define RC_FLUSH_CONN_OUT_OK 1
1713 #define RC_FLUSH_CONN_OUT_SENT_CONG 2 /* cor_flush_out internal only */
1714 #define RC_FLUSH_CONN_OUT_NBNOTACTIVE 3
1715 #define RC_FLUSH_CONN_OUT_CONG 4
1716 #define RC_FLUSH_CONN_OUT_MAXSENT 5
1717 #define RC_FLUSH_CONN_OUT_OOM 6
1719 int _cor_flush_out(struct cor_conn
*trgt_out_lx
, __u32 maxsend
, __u32
*sent
,
1720 int from_qos
, int maxsend_forcedelay
);
1722 static inline int cor_flush_out(struct cor_conn
*trgt_out_lx
, __u32
*sent
)
1724 int rc
= _cor_flush_out(trgt_out_lx
, 1 << 30, sent
, 0, 0);
1726 if (rc
== RC_FLUSH_CONN_OUT_CONG
|| rc
== RC_FLUSH_CONN_OUT_MAXSENT
||
1727 rc
== RC_FLUSH_CONN_OUT_OOM
||
1728 rc
== RC_FLUSH_CONN_OUT_NBNOTACTIVE
)
1729 cor_qos_enqueue_conn(trgt_out_lx
);
1734 int __init
cor_snd_init(void);
1736 void __exit
cor_snd_exit2(void);
1738 /* conn_trgt_sock.c */
1739 void cor_flush_sock_managed(struct cor_conn
*trgt_sock_lx
, int from_recvmsg
,
1740 __u8
*do_wake_sender
);
1742 void cor_flush_sock(struct cor_conn
*trgt_sock_lx
);
1744 /* conn_databuf.c */
1745 extern struct kmem_cache
*cor_data_buf_item_slab
;
1747 void cor_databuf_init(struct cor_conn
*cn_init
);
1749 void cor_bufsize_init(struct cor_conn
*cn_l
, __u32 bufsize
);
1751 int cor_account_bufspace(struct cor_conn
*cn_lx
);
1753 int cor_conn_src_unconn_write_allowed(struct cor_conn
*src_unconn_lx
);
1755 void cor_update_windowlimit(struct cor_conn
*src_in_lx
);
1757 __u8
_cor_bufsize_update_get_changerate(struct cor_conn
*cn_lx
);
1759 static inline int cor_bufsize_initial_phase(struct cor_conn
*cn_lx
)
1761 return unlikely(cn_lx
->bufsize
.bytes_rcvd
!= (1 << 24) - 1 &&
1762 cn_lx
->bufsize
.bytes_rcvd
< cn_lx
->bufsize
.bufsize
);
1765 static inline int cor_ackconn_urgent(struct cor_conn
*cn_lx
)
1767 return cor_bufsize_initial_phase(cn_lx
) ||
1768 cn_lx
->bufsize
.state
== BUFSIZE_INCR_FAST
;
1771 void cor_bufsize_read_to_sock(struct cor_conn
*trgt_sock_lx
);
1773 void cor_databuf_ackdiscard(struct cor_conn
*cn_lx
);
1775 void cor_reset_seqno(struct cor_conn
*cn_l
, __u32 initseqno
);
1777 void cor_databuf_pull(struct cor_conn
*cn_lx
, char *dst
, __u32 len
);
1779 static inline __u32
cor_databuf_trypull(struct cor_conn
*cn_l
, char *dst
,
1782 if (len
> cn_l
->data_buf
.read_remaining
)
1783 len
= cn_l
->data_buf
.read_remaining
;
1784 cor_databuf_pull(cn_l
, dst
, len
);
1788 void cor_databuf_unpull_dpi(struct cor_conn
*trgt_sock
, struct cor_sock
*cs
,
1789 struct cor_data_buf_item
*item
, __u16 next_read_offset
);
1791 void cor_databuf_pull_dbi(struct cor_sock
*cs_rl
, struct cor_conn
*trgt_sock_l
);
1793 void cor_databuf_unpull(struct cor_conn
*trgt_out_l
, __u32 bytes
);
1795 void cor_databuf_pullold(struct cor_conn
*trgt_out_l
, __u32 startpos
, char *dst
,
1798 void cor_databuf_ack(struct cor_conn
*trgt_out_l
, __u32 pos
);
1800 void cor_databuf_ackread(struct cor_conn
*cn_lx
);
1802 __u32
_cor_receive_buf(struct cor_conn
*cn_lx
, char *buf
, __u32 datalen
,
1803 int from_sock
, __u8 windowused
, __u8 flush
);
1805 static inline __u32
cor_receive_buf(struct cor_conn
*cn_lx
, char *buf
,
1806 __u32 datalen
, __u8 windowused
, __u8 flush
)
1808 return _cor_receive_buf(cn_lx
, buf
, datalen
, 0, windowused
, flush
);
1811 static inline __u32
cor_receive_sock(struct cor_conn
*src_sock_l
, char *buf
,
1812 __u32 datalen
, __u8 flush
)
1816 BUG_ON(src_sock_l
->sourcetype
!= SOURCE_SOCK
);
1818 ret
= _cor_receive_buf(src_sock_l
, buf
, datalen
, 1,
1819 src_sock_l
->src
.sock
.last_windowused
, flush
);
1821 if (likely(ret
> 0)) {
1822 __u32 bufsize
= src_sock_l
->bufsize
.bufsize
>> BUFSIZE_SHIFT
;
1823 __u32 bufused
= src_sock_l
->data_buf
.read_remaining
;
1825 if (bufused
>= bufsize
)
1826 src_sock_l
->src
.sock
.last_windowused
= 31;
1827 else if (unlikely(bufused
* 31 > U32_MAX
))
1828 src_sock_l
->src
.sock
.last_windowused
=
1829 bufused
/ ((bufsize
+ 30) / 31);
1831 src_sock_l
->src
.sock
.last_windowused
=
1832 (bufused
* 31) / bufsize
;
1838 __u32
cor_receive_skb(struct cor_conn
*src_in_l
, struct sk_buff
*skb
,
1839 __u8 windowused
, __u8 flush
);
1841 void cor_wake_sender(struct cor_conn
*cn
);
1843 int __init
cor_forward_init(void);
1845 void __exit
cor_forward_exit2(void);
1848 void cor_free_sock(struct kref
*ref
);
1850 int cor_socket_setsockopt_tos(struct socket
*sock
, char __user
*optval
,
1851 unsigned int optlen
);
1853 int cor_socket_setsockopt_priority(struct socket
*sock
, char __user
*optval
,
1854 unsigned int optlen
);
1856 int cor_socket_socketpair(struct socket
*sock1
, struct socket
*sock2
);
1858 int cor_socket_getname(struct socket
*sock
, struct sockaddr
*addr
, int peer
);
1860 int cor_socket_mmap(struct file
*file
, struct socket
*sock
,
1861 struct vm_area_struct
*vma
);
1863 int _cor_createsock(struct net
*net
, struct socket
*sock
, int protocol
,
1864 int kern
, __u8 is_client
);
1866 int __init
cor_sock_init1(void);
1868 int __init
cor_sock_init2(void);
1870 void __exit
cor_sock_exit1(void);
1872 /* sock_rdaemon.c */
1873 int cor_is_device_configurated(struct net_device
*dev
);
1875 int cor_create_rdaemon_sock(struct net
*net
, struct socket
*sock
, int protocol
,
1878 int cor_rdreq_connect(struct cor_sock
*cs
);
1880 void cor_usersock_release(struct cor_sock
*cs
);
1882 int __init
cor_rd_init1(void);
1884 int __init
cor_rd_init2(void);
1886 void __exit
cor_rd_exit1(void);
1888 void __exit
cor_rd_exit2(void);
1891 int cor_create_raw_sock(struct net
*net
, struct socket
*sock
, int protocol
,
1894 /* sock_managed.c */
1895 struct cor_sock
*cor_get_sock_by_cookie(__be64 cookie
);
1897 void __cor_set_sock_connecterror(struct cor_sock
*cs_m_l
, int errorno
);
1899 void _cor_set_sock_connecterror(struct cor_sock
*cs
, int errorno
);
1901 void cor_mngdsocket_chksum(char *hdr
, __u32 hdrlen
, char *data
, __u32 datalen
,
1902 char *chksum
, __u32 chksum_len
);
1904 static inline void cor_set_sock_connecterror(__be64 cookie
, int errorno
)
1906 struct cor_sock
*cs
= cor_get_sock_by_cookie(cookie
);
1909 _cor_set_sock_connecterror(cs
, errorno
);
1910 kref_put(&cs
->ref
, cor_free_sock
);
1914 void cor_mngdsocket_readfromconn_fromatomic(struct cor_sock
*cs
);
1916 void cor_mngdsocket_readfromconn_wq(struct work_struct
*work
);
1918 int cor_create_managed_sock(struct net
*net
, struct socket
*sock
, int protocol
,
1921 int __init
cor_sock_managed_init1(void);
1924 static inline struct cor_skb_procstate
*cor_skb_pstate(struct sk_buff
*skb
)
1926 BUILD_BUG_ON(sizeof(struct cor_skb_procstate
) > sizeof(skb
->cb
));
1927 return (struct cor_skb_procstate
*) &skb
->cb
[0];
1930 static inline struct sk_buff
*cor_skb_from_pstate(struct cor_skb_procstate
*ps
)
1932 return (struct sk_buff
*) (((char *)ps
) - offsetof(struct sk_buff
, cb
));
1935 static inline int cor_qos_fastsend_allowed_conn_retrans(struct cor_neighbor
*nb
)
1937 return atomic_read(&nb
->queue
->cong_status
) < CONGSTATUS_RETRANS
;
1940 static inline int cor_qos_fastsend_allowed_announce(struct net_device
*dev
)
1943 struct cor_qos_queue
*q
= cor_get_queue(dev
);
1948 rc
= atomic_read(&q
->cong_status
) < CONGSTATUS_ANNOUNCE
;
1950 kref_put(&q
->ref
, cor_free_qos
);
1955 static inline int cor_qos_fastsend_allowed_conn(struct cor_conn
*trgt_out_lx
)
1957 struct cor_qos_queue
*q
= trgt_out_lx
->trgt
.out
.nb
->queue
;
1959 return atomic_read(&q
->cong_status
) < CONGSTATUS_CONNDATA
;
1962 static inline __u32
cor_rcv_mtu(struct cor_neighbor
*nb
)
1964 return nb
->dev
->mtu
;
1967 static inline __u32
cor_snd_mtu(struct cor_neighbor
*nb
)
1969 return min((__u32
) nb
->dev
->mtu
,
1970 (__u32
) atomic_read(&nb
->remote_rcvmtu
));
1973 static inline __u32
cor_mss(struct cor_neighbor
*nb
, __u32 l3overhead
)
1975 return cor_snd_mtu(nb
) - LL_RESERVED_SPACE(nb
->dev
) - l3overhead
;
1978 static inline __u32
cor_mss_cmsg(struct cor_neighbor
*nb
)
1980 return cor_mss(nb
, 5);
1983 static inline __u32
cor_mss_conndata(struct cor_neighbor
*nb
, int highlatency
)
1985 __u32 mss_tmp
= cor_mss(nb
, 9);
1988 if (mss_tmp
< 256 || highlatency
|| LOWLATENCY_LOWERMTU
== 0)
1991 for (i
= 256; i
< 4096; i
*= 2) {
1992 if (i
* 2 > mss_tmp
)
1996 return mss_tmp
- mss_tmp
% 4096;
1999 static inline __u32
cor_send_conndata_as_skb(struct cor_neighbor
*nb
,
2002 return size
>= cor_mss_conndata(nb
, 0) / 2 ||
2003 size
> KP_CONN_DATA_MAXLEN
;
2006 static inline long cor_calc_timeout(__u32 latency_us
, __u32 latency_stddev_us
,
2007 __u32 max_remote_ack_delay_us
)
2009 unsigned long addto
;
2011 if (unlikely(unlikely(latency_us
> 1000000000) ||
2012 unlikely(latency_stddev_us
> 500000000) ||
2013 unlikely(max_remote_ack_delay_us
> 1000000000))) {
2014 addto
= msecs_to_jiffies(latency_us
/ 1000 + latency_us
/ 4000 +
2015 latency_stddev_us
/ 333 +
2016 max_remote_ack_delay_us
/ 1000);
2018 addto
= usecs_to_jiffies(latency_us
+ latency_us
/ 4 +
2019 latency_stddev_us
* 3 +
2020 max_remote_ack_delay_us
);
2024 * 2 is added because
2025 * 1) _to_jiffies rounds down, but should round up, so add 1 to
2027 * 2) even if latency is 0, we never want to schedule the retransmit
2028 * to run right now, so add 1 more
2030 return jiffies
+ 2 + addto
;
2033 static inline void cor_put_be64(char *dst
, __be64 value
)
2035 char *p_value
= (char *) &value
;
2037 dst
[0] = p_value
[0];
2038 dst
[1] = p_value
[1];
2039 dst
[2] = p_value
[2];
2040 dst
[3] = p_value
[3];
2041 dst
[4] = p_value
[4];
2042 dst
[5] = p_value
[5];
2043 dst
[6] = p_value
[6];
2044 dst
[7] = p_value
[7];
2047 static inline void cor_put_u64(char *dst
, __u64 value
)
2049 cor_put_be64(dst
, cpu_to_be64(value
));
2052 static inline void cor_put_be32(char *dst
, __be32 value
)
2054 char *p_value
= (char *) &value
;
2056 dst
[0] = p_value
[0];
2057 dst
[1] = p_value
[1];
2058 dst
[2] = p_value
[2];
2059 dst
[3] = p_value
[3];
2062 static inline void cor_put_u32(char *dst
, __u32 value
)
2064 cor_put_be32(dst
, cpu_to_be32(value
));
2067 static inline void cor_put_be16(char *dst
, __be16 value
)
2069 char *p_value
= (char *) &value
;
2071 dst
[0] = p_value
[0];
2072 dst
[1] = p_value
[1];
2075 static inline void cor_put_u16(char *dst
, __u16 value
)
2077 cor_put_be16(dst
, cpu_to_be16(value
));
2080 static inline char *cor_pull_skb(struct sk_buff
*skb
, unsigned int len
)
2082 char *ptr
= skb_pull(skb
, len
);
2084 if (unlikely(ptr
== 0))
2090 static inline __be64
cor_parse_be64(char *buf
)
2096 ((char *)&ret
)[0] = buf
[0];
2097 ((char *)&ret
)[1] = buf
[1];
2098 ((char *)&ret
)[2] = buf
[2];
2099 ((char *)&ret
)[3] = buf
[3];
2100 ((char *)&ret
)[4] = buf
[4];
2101 ((char *)&ret
)[5] = buf
[5];
2102 ((char *)&ret
)[6] = buf
[6];
2103 ((char *)&ret
)[7] = buf
[7];
2108 static inline __u64
cor_parse_u64(char *buf
)
2110 return be64_to_cpu(cor_parse_be64(buf
));
2113 static inline __be32
cor_parse_be32(char *ptr
)
2119 ((char *)&ret
)[0] = ptr
[0];
2120 ((char *)&ret
)[1] = ptr
[1];
2121 ((char *)&ret
)[2] = ptr
[2];
2122 ((char *)&ret
)[3] = ptr
[3];
2127 static inline __u32
cor_parse_u32(char *ptr
)
2129 return be32_to_cpu(cor_parse_be32(ptr
));
2132 static inline __be16
cor_parse_be16(char *ptr
)
2138 ((char *)&ret
)[0] = ptr
[0];
2139 ((char *)&ret
)[1] = ptr
[1];
2144 static inline __u16
cor_parse_u16(char *ptr
)
2146 return be16_to_cpu(cor_parse_be16(ptr
));
2149 static inline __u8
cor_parse_u8(char *ptr
)
2152 return (__u8
) ptr
[0];
2155 static inline __be32
cor_pull_be32(struct sk_buff
*skb
)
2157 return cor_parse_be32(cor_pull_skb(skb
, 4));
2160 static inline __u32
cor_pull_u32(struct sk_buff
*skb
)
2162 return cor_parse_u32(cor_pull_skb(skb
, 4));
2165 static inline __u16
cor_pull_u16(struct sk_buff
*skb
)
2167 return cor_parse_u16(cor_pull_skb(skb
, 2));
2170 static inline __u8
cor_pull_u8(struct sk_buff
*skb
)
2172 char *ptr
= cor_pull_skb(skb
, 1);
2178 static inline int cor_is_conn_in(struct cor_conn
*cn_l
, struct cor_neighbor
*nb
,
2181 if (unlikely(unlikely(cn_l
->sourcetype
!= SOURCE_IN
) ||
2182 unlikely(cn_l
->src
.in
.nb
!= nb
) ||
2183 unlikely(cn_l
->src
.in
.conn_id
!= conn_id
) ||
2184 unlikely(cn_l
->isreset
!= 0)))
2189 static inline int cor_is_src_sock(struct cor_conn
*cn_l
, struct cor_sock
*cs
)
2191 if (unlikely(unlikely(cn_l
->sourcetype
!= SOURCE_SOCK
) ||
2192 unlikely(cn_l
->src
.sock
.ed
->cs
!= cs
)))
2197 static inline int cor_is_trgt_sock(struct cor_conn
*cn_l
, struct cor_sock
*cs
)
2199 if (unlikely(unlikely(cn_l
->targettype
!= TARGET_SOCK
) ||
2200 unlikely(cn_l
->trgt
.sock
.cs
!= cs
)))
2205 #define BUFLEN_MIN 128
2206 #define BUFLEN_MAX 4096
2207 #define PAGESIZE (1 << PAGE_SHIFT)
2209 static inline __u32
cor_buf_optlen(__u32 datalen
, int from_sock
)
2211 __u32 optlen
= BUFLEN_MIN
;
2216 while (optlen
< datalen
&& optlen
< PAGESIZE
&& optlen
< BUFLEN_MAX
)
2217 optlen
= (optlen
<< 1);
2222 static inline void cor_databuf_item_free(struct cor_data_buf_item
*item
)
2224 if (item
->type
== DATABUF_BUF
) {
2226 kmem_cache_free(cor_data_buf_item_slab
, item
);
2227 } else if (item
->type
== DATABUF_SKB
) {
2228 struct sk_buff
*skb
= cor_skb_from_pstate(container_of(item
,
2229 struct cor_skb_procstate
, funcstate
.rcv
.dbi
));
2236 static inline int cor_seqno_eq(__u32 seqno1
, __u32 seqno2
)
2238 return seqno1
== seqno2
;
2241 static inline int cor_seqno_before(__u32 seqno1
, __u32 seqno2
)
2243 return (seqno1
- seqno2
) >= (1LL << 31);
2246 static inline int cor_seqno_before_eq(__u32 seqno1
, __u32 seqno2
)
2248 return cor_seqno_eq(seqno1
, seqno2
) || cor_seqno_before(seqno1
, seqno2
);
2251 static inline int cor_seqno_after(__u32 seqno1
, __u32 seqno2
)
2253 return cor_seqno_before_eq(seqno1
, seqno2
) ? 0 : 1;
2256 static inline int cor_seqno_after_eq(__u32 seqno1
, __u32 seqno2
)
2258 return cor_seqno_before(seqno1
, seqno2
) ? 0 : 1;
2261 static inline int ktime_before_eq(ktime_t time1
, ktime_t time2
)
2263 return ktime_after(time1
, time2
) ? 0 : 1;
2266 static inline int ktime_after_eq(ktime_t time1
, ktime_t time2
)
2268 return ktime_before(time1
, time2
) ? 0 : 1;
2271 static inline __u64
cor_update_atomic_sum(atomic64_t
*atomic_sum
,
2272 __u32 oldvalue
, __u32 newvalue
)
2274 __u64 sum_old
= atomic64_read(atomic_sum
);
2282 BUG_ON(sum
< oldvalue
);
2285 BUG_ON(sum
+ newvalue
< sum
);
2288 cmpxchg_ret
= atomic64_cmpxchg(atomic_sum
, sum_old
, sum
);
2290 if (likely(cmpxchg_ret
== sum_old
))
2293 sum_old
= cmpxchg_ret
;
2299 static inline void cor_sk_write_space(struct cor_sock
*cs
)
2301 atomic_set(&cs
->ready_to_write
, 1);
2303 cs
->sk
.sk_write_space(&cs
->sk
);
2306 static inline void cor_sk_data_ready(struct cor_sock
*cs
)
2308 atomic_set(&cs
->ready_to_read
, 1);
2310 cs
->sk
.sk_data_ready(&cs
->sk
);
2313 /* the other direction may be locked only if called from cor_proc_cpacket */
2314 static inline void cor_flush_buf(struct cor_conn
*cn_lx
)
2316 if (unlikely(cn_lx
->targettype
== TARGET_UNCONNECTED
)) {
2317 cor_proc_cpacket(cn_lx
);
2318 } else if (cn_lx
->targettype
== TARGET_SOCK
) {
2319 cor_flush_sock(cn_lx
);
2320 } else if (cn_lx
->targettype
== TARGET_OUT
) {
2321 __u32 bytessent
= 0;
2323 cor_flush_out(cn_lx
, &bytessent
);
2324 } else if (unlikely(cn_lx
->targettype
== TARGET_DISCARD
)) {
2325 cor_databuf_ackdiscard(cn_lx
);