2 * Connection oriented routing
3 * Copyright (C) 2007-2020 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 #include <asm/atomic.h>
23 #include <linux/module.h>
24 #include <linux/types.h>
25 #include <linux/interrupt.h>
26 #include <linux/sched.h>
27 #include <linux/netdevice.h>
28 #include <linux/skbuff.h>
29 #include <linux/spinlock.h>
30 #include <linux/workqueue.h>
31 #include <linux/kref.h>
32 #include <linux/ktime.h>
33 #include <linux/rbtree.h>
35 #include <linux/socket.h>
38 #include <linux/math64.h>
43 #define ETH_P_COR 0x1022
47 #define PROTO_COR_RAW 0
48 #define PROTO_COR_RDEAMON 1
50 #define SOCKADDRTYPE_PORT 1
51 #define SOCKADDRTYPE_ADDRPORT 2
59 #define COR_PASS_ON_CLOSE 1
60 #define COR_PUBLISH_SERVICE 2
62 #define MAX_CONN_CMD_LEN 64
65 #define PACKET_TYPE_ANNOUNCE 1
66 #define PACKET_TYPE_CMSG 2
67 #define PACKET_TYPE_CONNDATA 4
68 #define PACKET_TYPE_CONNDATA_LOWBUFDELAYED 5
69 #define PACKET_TYPE_CONNDATA_FLUSH 6
70 #define PACKET_TYPE_CONNDATA_LOWBUFDELAYED_FLUSH 7
73 * Kernel packet data - these commands are sent by the neighbor
74 * The end nodes may cause these commands to be sent, but they see them beyond
82 * KP_INIT_SESSION[1] sessionid[4]
84 * finishes neighbor discovery and starts a session
86 * Before this is received all other commands are ignored. The sessionid is used
87 * to prevent usage of old neighbor discovery data (e.g. addresses)
89 #define KP_INIT_SESSION 1
91 #define KP_INIT_SESSION_CMDLEN 5
94 * KP_PING[1] cookie[4]
95 * KP_PONG[1] cookie[4] respdelay[4]
97 * This is needed to find out whether the other node is reachable. After a new
98 * neighbor is seen, ping requests are sent and the neighbor is only reachable
99 * after a few pongs are received. These requests are also used to find out
100 * whether a neighber is gone.
103 * The receiver of a ping may delay the sending of the pong e.g. to create
104 * bigger packets. The respdelay is the time in microseconds the packet was
109 #define KP_PING_CMDLEN 5
113 /* KP_ACK[1] seqno[6] */
117 * KP_ACK_CONN[1] conn_id[4] flags[1] seqno[6] window[1] seqno_ooo[6]
118 * length[1-4] priority_seqno[1] priority[1]
120 * conn_id is the conn_id we use if we sent something through this conn and
121 * *not* the conn_id that the neighbor used to send us the data
123 * flags defines which of the following fields are sent
125 * seqno = the seqno which is expected in the next non-out-of-order packet
127 * window = amount of data which can be sent without receiving the next ack
128 * packets with lower seqno do not overwrite the last window size
129 * The window may also be reduced. However, this only indicates a wish.
130 * Packets must be accepted if they exceed the new window, but not the old
135 * 1...255 = 64*2^((value-1)/7) end result is rounded down to an integer
137 * seqno_ooo, length = This packet was received out of order. Maybe a previous
138 * packet has been lost. Out of order data should not be retransmitted.
139 * Multiple ooo packets may be merged into a single ack. Ooo packets may be
140 * partially accepted, so that the length does not cover the full packet and/
141 * or the seqno starts in the middle of a packet
143 #define KP_ACK_CONN 5
145 #define KP_ACK_CONN_FLAGS_SEQNO 1
146 #define KP_ACK_CONN_FLAGS_WINDOW 2
147 #define KP_ACK_CONN_FLAGS_OOO 12 /* 4+8 */
148 #define KP_ACK_CONN_FLAGS_PRIORITY 16
150 static inline __u8
ooolen_to_flags(__u32 len
)
161 static inline int ooolen(__u8 flags
)
163 int len
= ((flags
& KP_ACK_CONN_FLAGS_OOO
) >> 2);
164 if (unlikely(len
== 3))
169 static inline int ack_conn_len(__u8 flags
)
172 if ((flags
& KP_ACK_CONN_FLAGS_SEQNO
) != 0) {
174 if ((flags
& KP_ACK_CONN_FLAGS_WINDOW
) != 0)
178 if (ooolen(flags
) != 0) {
180 len
+= ooolen(flags
);
183 if (flags
& KP_ACK_CONN_FLAGS_PRIORITY
)
190 * NOTE on connection ids:
191 * connection ids we receive with most significant bit 0 have been generated by
193 * connection ids we receive with most significant bit 1 have been generated by
196 * ATTENTION: the priority seqno are reversed:
197 * priority seqnos we send are used when we send updates
198 * priority seqnos we received are used when we receive updates
202 * incoming connection
203 * seqno1... used to ack data sent from the side which initiated the connection
204 * seqno2... used to ack data sent to the side which initiated the connection
205 * KP_CONNECT[1] conn_id[4] seqno1[6] seqno2[6] window[1] priority_seqno[1]
211 * incoming connection successful,
212 * KP_CONNECT_SUCCESS[1] conn_id[4] window[1]
214 #define KP_CONNECT_SUCCESS 7
216 /* KP_CONN_DATA[1] conn_id[4] seqno[6] length[2] data[length] */
217 #define KP_CONN_DATA 8
218 #define KP_CONN_DATA_LOWBUFDELAYED 9
219 #define KP_CONN_DATA_FLUSH 10
220 #define KP_CONN_DATA_LOWBUFDELAYED_FLUSH 11
222 #define KP_CONN_DATA_CMDLEN 13
225 * KP_RESET_CONN[1] conn_id[4]
226 * We send this, if there is an established connection we want to close.
228 #define KP_RESET_CONN 12
231 * KP_SET_MAX_CMSG_DELAY[1] cpacket_ack_delay[4] data_ack_delay[4] cmsg_delay[4]
232 * Sent after connecting and at any change
233 * delay in specifies in microsecs
235 #define KP_SET_MAX_CMSG_DELAY 13
237 #define KP_SET_MAX_CMSG_DELAY_CMDLEN 13
241 * Connection data which in interpreted when connection has no target yet
242 * These commands are sent by the end node.
245 * cmd[2] length[1-4] parameter[length]
246 * unrecogniced commands are ignored
247 * parameters which are longer than expected are ignored as well
250 #define CD_CONTINUE_ON_ERROR_FLAG 32768
252 /* outgoing connection: CD_CONNECT_NB[2] length[1-4]
253 * addrlen[1-4] addr[addrlen] */
254 #define CD_CONNECT_NB 1
256 /* connection to local open part: CD_CONNECT_PORT[2] length[1-4] port[2] */
257 #define CD_CONNECT_PORT 2
260 * CD_LIST_NEIGH sends CDR_BINDATA if the command was successful. The response
264 * numfields[1-4] (field[2] fieldlen[1-4])[numfields]
265 * rows[responserows]:
266 * fieldlen[1-4], only if fieldlen in the header was "0"
267 * fielddata[fieldlen]
269 * Future versions may append data to field definition. Therefore clients must
270 * silently discard data at the end they do not expect.
273 /* list connected neighbors: CD_LIST_NEIGH[2] length[1-4] */
274 #define CD_LIST_NEIGH 3
279 #define LIST_NEIGH_FIELD_ADDR 1
282 * latency_in_microsecs[1] (64_11 encoding)
283 * Only raw network latency in measured. Delays caused by the priority queues
284 * are *not* included.
286 #define LIST_NEIGH_FIELD_LATENCY 2
288 /* list services: CD_LIST_SERVICES[2] length[1-4] */
289 #define CD_LIST_SERVICES 4
293 * Connection data response
294 * Format is the same as with connection data
303 * CDR_EXECFAILED[1] reasoncode[2]
305 #define CDR_EXECFAILED 2
306 #define CDR_EXECFAILED_INVALID_COMMAND 1
307 #define CDR_EXECFAILED_TEMPORARILY_OUT_OF_RESSOURCES 2
308 #define CDR_EXECFAILED_NB_DOESNTEXIST 3
309 #define CDR_EXECFAILED_PORTCLOSED 4
312 * must be sent after CDR_EXEC{OK|FAILED}
313 * CDR_EXEOK_BINDATA[1] bindatalen[1-4] bindata[bindatalen] */
314 #define CDR_BINDATA 3
317 * routing daemon sock
319 * cmdcode[4] length[4] cmddata[length]
323 #define CRD_KTU_SUPPORTEDVERSIONS 1
325 * CRD_KTU_SUPPORTEDVERSIONS[4] length[4] min[4] max[4]
328 #define CRD_KTU_CONNECT 2
330 * CRD_KTU_KTOU_CONNECT[4] length[4] cookie[8] targetlen[4] target[targetlen]
333 #define CRD_UTK_VERSION 1
335 * CRD_UTK_VERSION[4] length[4] version[4]
341 * CRD_UTK_UP[4] length[4] flags[8] addrlen[4] addr[addrlen]
346 #define CRD_UTK_CONNECTERROR 3
348 * CRD_UTK_CONNECTERROR[4] length[4] cookie[8] error[4]
351 #define CRD_UTK_CONNECTERROR_ACCES 1
352 #define CRD_UTK_CONNECTERROR_NETUNREACH 2
353 #define CRD_UTK_CONNECTERROR_TIMEDOUT 3
354 #define CRD_UTK_CONNECTERROR_REFUSED 4
356 #define CONN_MNGD_HEADERLEN 2
357 #define CONN_MNGD_CHECKSUMLEN 4
359 #define CONN_MNGD_HASDATA (1 << 15)
360 #define CONN_MNGD_EOF (1 << 0)
361 #define CONN_MNGD_RCVEND (1 << 1)
362 #define CONN_MNGD_DATALEN 4095
364 #define CONN_MNGD_MAX_SEGMENT_SIZE (CONN_MNGD_DATALEN + 1)
366 #define PRIORITY_MAX 15384774
371 /* result codes for rcv.c/proc_packet */
373 #define RC_FINISHED 1
375 #define RC_RCV1_ANNOUNCE 2
376 #define RC_RCV1_KERNEL 3
377 #define RC_RCV1_CONN 4
380 #define CONGSTATUS_NONE 0
381 #define CONGSTATUS_CONNDATA 1
382 #define CONGSTATUS_ANNOUNCE 2
383 #define CONGSTATUS_RETRANS 3
384 #define CONGSTATUS_KPACKETS 4
391 struct list_head queue_list
;
393 struct net_device
*dev
; /* may not change while queue is in list */
395 struct timer_list qos_resume_timer
;
396 struct tasklet_struct qos_resume_task
;
397 int qos_resume_scheduled
;
398 unsigned long jiffies_lastprogress
;
400 struct list_head kpackets_waiting
;
401 struct list_head conn_retrans_waiting
;
402 struct list_head announce_waiting
;
403 struct list_head neighbors_waiting
;
405 unsigned long jiffies_lastdrop
;
410 atomic_t cong_status
;
415 * switch to and from RB_INQUEUE_NBCONGWIN is only done with nbcongwin.lock
418 #define RB_INQUEUE_FALSE 0
419 #define RB_INQUEUE_TRUE 1
420 #define RB_INQUEUE_NBCONGWIN 2 /* only for nb->rb */
427 #define ANNOUNCE_TYPE_BROADCAST 1
428 #define ANNOUNCE_TYPE_UNICAST 2
430 struct announce_data
{
436 struct net_device
*dev
;
437 char mac
[MAX_ADDR_LEN
];
438 struct delayed_work announce_work
;
439 struct resume_block rb
;
443 ktime_t time_created
;
445 unsigned long jiffies_sent
;
448 __u8 pongs
; /* count of pongs for pings sent after this one */
451 #define NEIGHBOR_STATE_INITIAL 0
452 #define NEIGHBOR_STATE_ACTIVE 1
453 #define NEIGHBOR_STATE_STALLED 2
454 #define NEIGHBOR_STATE_KILLED 3
456 #define NBCONGWIN_SHIFT 16
457 #define NBCONGWIN_MUL (1 << NBCONGWIN_SHIFT)
460 struct list_head nb_list
;
464 struct net_device
*dev
;
465 char mac
[MAX_ADDR_LEN
];
466 struct qos_queue
*queue
;
470 atomic_t sessionid_rcv_needed
;
471 atomic_t sessionid_snd_needed
;
476 struct timer_list cmsg_timer
;
477 spinlock_t cmsg_lock
;
478 struct list_head cmsg_queue_pong
;
479 struct list_head cmsg_queue_ack
;
480 struct list_head cmsg_queue_ackconn
;
481 struct list_head cmsg_queue_conndata_lowlat
;
482 struct list_head cmsg_queue_conndata_highlat
;
483 struct list_head cmsg_queue_other
;
484 __u8 add_retrans_needed
;
485 __u64 kpacket_seqno
; /* not locked, only accessed by single tasklet */
487 struct rb_root pending_conn_resets_rb
;
489 unsigned long timeout
;
491 __u32 cmsg_pongslength
;
492 __u32 cmsg_otherlength
;
494 __u32 cmsg_pongscnt
; /* size of queue only, protected by cmsg_lock */
495 atomic_t cmsg_pongs_retrans_cnt
; /* number of retransmits only */
496 atomic_t cmsg_othercnt
; /* size of queue + retransmits */
498 atomic_t cmsg_bulk_readds
;
500 atomic_t cmsg_delay_conndata
;
502 /* not locked, only accessed by single tasklet */
503 __u8 max_cmsg_delay_sent
;
505 /* procected by qos_queue->qlock */
506 struct resume_block rb_kp
;
507 struct resume_block rb_cr
;
508 struct resume_block rb
;
513 struct list_head lh_nextpass
;
520 atomic64_t data_intransit
;
525 spinlock_t state_lock
;
526 unsigned long last_ping_time
;
528 struct ping_cookie cookies
[PING_COOKIES_PER_NEIGH
];
529 __u32 ping_intransit
;
532 __u64 latency_variance_retrans_us
; /* microsecs */
533 atomic_t latency_retrans_us
; /* microsecs */
534 atomic_t latency_stddev_retrans_us
; /* microsecs */
535 atomic_t latency_advertised_us
; /* microsecs */
536 atomic_t max_remote_ack_delay_us
; /* microsecs */
537 atomic_t max_remote_ackconn_delay_us
; /* microsecs */
538 atomic_t max_remote_other_delay_us
; /* microsecs */
541 unsigned long initial_state_since
;/* initial state */
544 * time of the last sent packet which has been acked or
545 * otherwise responded to (e.g. pong)
547 unsigned long last_roundtrip
;/* active/stalled state */
549 ktime_t last_roundtrip_end
;
553 __u8 str_timer_pending
;
554 struct delayed_work stalltimeout_timer
;
556 spinlock_t connid_lock
;
557 struct rb_root connid_rb
;
559 spinlock_t connid_reuse_lock
;
560 struct rb_root connid_reuse_rb
;
561 struct list_head connid_reuse_list
;
562 __u16 connid_reuse_pingcnt
;
564 atomic64_t priority_sum
;
567 * connecions which receive data from/send data to this node
568 * used when terminating all connections of a neighbor and terminating
569 * inactive connections
571 spinlock_t conn_list_lock
;
572 struct list_head rcv_conn_list
;
574 spinlock_t stalledconn_lock
;
575 struct work_struct stalledconn_work
;
576 __u8 stalledconn_work_scheduled
;
577 struct list_head stalledconn_list
;
580 * the timer has to be inited when adding the neighbor
582 * add_timer(struct timer_list * timer);
584 spinlock_t retrans_lock
;
585 struct timer_list retrans_timer
;
586 struct list_head retrans_list
;
587 struct rb_root kp_retransmits_rb
;
589 spinlock_t retrans_conn_lock
;
590 struct timer_list retrans_conn_timer
;
591 struct list_head retrans_conn_list
;
594 #define DATABUF_BUF 0
595 #define DATABUF_SKB 1
597 struct data_buf_item
{
598 struct list_head buf_list
;
607 struct connid_reuse_item
{
617 #define SNDSPEED_INIT 0
618 #define SNDSPEED_ACTIVE 1
622 unsigned long jiffies_last_refresh
;
625 /* bytes per second */
633 * There are 2 conn objects per bi-directional connection. They refer to each
634 * other with in the reversedir field.
640 * cn: conn we have no clue what is inside
641 * src_in, trgt_unconn, trgt_out, ...: A conn with the specified source or
642 * targettype. In the unlocked case the types are actually just a guess,
643 * because they could have changed since the last access. After locking the
644 * source/destination parameters have to be checked whether they still are
645 * what we expect. This includes source/targettype, neighbor, conn_id
647 * Exception: they may not change after they are set to source/target sock
648 * until the socket is released.
652 * no suffix: unlocked
654 * _l: this direction is locked
656 * _ll: both directions are locked
658 * _lx: this direction is locked, the other direction may be locked
660 * _o: unlocked, but source or target is known for sure, because an outside
661 * lock is taken; For variables on the heap this means that an outside lock must
662 * be taken before accessing the struct which points to the conn can be
668 * The following fields are immutable after the conn has been allocated:
669 * is_client, reversedir
671 * Most fields are protected by rcv_lock. Fields which which control
672 * source and destination of the data flow require both directions to
673 * to be locked and external references to be cleared before the change can
674 * happen. This includes fields like sourcetype, targettype, connid,
675 * list_heads, htab_entries, ???. In this case the side with is_client == 1
676 * needs to be locked first. Changes to conn_id and neighbor also require
677 * removing the conn from the htables first.
679 * Some other fields are locked outside (e.g. at struct neighbor).
681 #define SOURCE_UNCONNECTED 0
683 #define SOURCE_SOCK 2
685 #define TARGET_UNCONNECTED 0
687 #define TARGET_SOCK 2
688 #define TARGET_DISCARD 3
690 #define BUFSIZE_NOACTION 0
691 #define BUFSIZE_DECR 1
692 #define BUFSIZE_DECR_FAST 2
693 #define BUFSIZE_INCR 3
694 #define BUFSIZE_INCR_FAST 4
696 #define BUFSIZE_SHIFT 5
698 #define SOCKTYPE_RAW 0
699 #define SOCKTYPE_MANAGED 1
701 #define RCV_BUF_STATE_OK 0
702 #define RCV_BUF_STATE_INCOMPLETE 1
703 #define RCV_BUF_STATE_RESET 2
714 * 0... connection active
715 * 1... connection is about to be reset, target does not need to be
717 * 2... connection is reset
718 * 3... connection is reset + no pointers to "struct conn *reversedir"
719 * remaining except from this conn
732 /* list of all connections from this neighbor */
733 struct list_head nb_list
;
735 struct list_head reorder_queue
;
736 __u32 reorder_memused
;
739 struct connid_reuse_item
*cir
;
743 /* number of ack sent, not data seqno */
746 __u16 small_ooo_packets
;
750 __u8 inorder_ack_needed
;
754 __u64 window_seqnolimit
;
755 __u64 window_seqnolimit_remote
;
757 /* protected by nb->cmsg_lock */
758 struct list_head acks_pending
;
760 unsigned long jiffies_last_act
;
766 struct list_head cl_list
;
770 struct snd_speed snd_speed
;
781 char paramlen_buf
[4];
786 /* protected by nb->retrans_conn_lock, sorted by seqno
788 struct list_head retrans_list
;
790 /* protected by nb->stalledconn_lock */
791 struct list_head nbstalled_lh
;
794 __u64 seqno_nextsend
;
796 __u64 seqno_windowlimit
;
798 struct resume_block rb
;
807 __u8 priority_send_allowed
;
809 __u8 windowlimit_reached
;
813 /* protected by nb->retrans_conn_lock */
814 __u16 retrans_lowwindow
;
818 __u8 waiting_for_userspace
;
819 unsigned long waiting_for_userspace_since
;
826 char rcv_hdr
[CONN_MNGD_HEADERLEN
];
827 char rcv_chksum
[CONN_MNGD_CHECKSUMLEN
];
836 struct list_head items
;
837 struct data_buf_item
*nextread
;
842 __u32 read_remaining
;
844 __u16 next_read_offset
;
847 __u32 bufspace_accounted
;
850 __u32 bufsize
; /* 32 ==> 1 byte, see BUFSIZE_SHIFT */
851 __u32 ignore_rcv_lowbuf
;
870 struct conn
*reversedir
;
873 #define CONN_RETRANS_INITIAL 0
874 #define CONN_RETRANS_SCHEDULED 1
875 #define CONN_RETRANS_LOWWINDOW 2
876 #define CONN_RETRANS_SENDING 3
877 #define CONN_RETRANS_ACKED 4
878 struct conn_retrans
{
879 /* timeout_list and conn_list share a single ref */
881 /* only in timeout_list if state == CONN_RETRANS_SCHEDULED */
882 struct list_head timeout_list
;
883 struct list_head conn_list
;
884 struct conn
*trgt_out_o
;
888 __u8 snd_delayed_lowbuf
;
890 unsigned long timeout
;
909 struct skb_procstate
{
912 struct work_struct work
;
925 struct data_buf_item dbi
;
930 #define CS_TYPE_UNCONNECTED 0
931 #define CS_TYPE_LISTENER 1
932 #define CS_TYPE_CONN_RAW 2
933 #define CS_TYPE_CONN_MANAGED 3
935 #define CS_CONNECTSTATE_UNCONNECTED 0
936 #define CS_CONNECTSTATE_CONNECTING 1
937 #define CS_CONNECTSTATE_CONNECTED 2
938 #define CS_CONNECTSTATE_ERROR 3
940 #define CS_SHUTDOWN_SHUTDOWN_RD (1 << 0)
941 #define CS_SHUTDOWN_SHUTDOWN_WR (1 << 1)
942 #define CS_SHUTDOWN_SENT_EOF (1 << 2)
943 #define CS_SHUTDOWN_SENT_RCVEND (1 << 3)
944 #define CS_SHUTDOWN_RCVD_EOF (1 << 4)
945 #define CS_SHUTDOWN_RCVD_RCVEND (1 << 5)
948 struct sock sk
; /* must be first */
953 /* type may not change once it is set to != CS_TYPE_UNCONNECTED */
957 __u8 publish_service
;
961 /* listener is protected by cor_bindnodes */
964 __u8 publish_service
;
967 struct list_head conn_queue
;
971 struct conn
*src_sock
;
972 struct conn
*trgt_sock
;
974 struct data_buf_item
*rcvitem
;
977 __u8 snd_delayed_lowbuf
;
979 struct cor_sock
*pass_on_close
;
983 struct cor_sockaddr remoteaddr
;
985 struct list_head rd_msgs
;
986 struct list_head crd_lh
;
997 __u8 snd_delayed_lowbuf
;
1002 struct conn
*src_sock
;
1003 struct conn
*trgt_sock
;
1005 char snd_hdr
[CONN_MNGD_HEADERLEN
];
1006 char snd_chksum
[CONN_MNGD_CHECKSUMLEN
];
1008 __u16 snd_segment_size
;
1009 __u16 snd_hdr_flags
;
1012 /* protected by cor_sock->lock */
1013 __u8 in_flushtoconn_oom_list
;
1014 /* protected by flushtoconn_oom_lock */
1015 struct list_head flushtoconn_oom_lh
;
1018 char rcv_hdr
[CONN_MNGD_HEADERLEN
];
1019 char rcv_chksum
[CONN_MNGD_CHECKSUMLEN
];
1021 __u16 rcv_hdr_flags
;
1023 __u16 rcvbuf_consumed
;
1028 struct work_struct readfromconn_work
;
1029 atomic_t readfromconn_work_scheduled
;
1031 atomic_t ready_to_read
;
1032 atomic_t ready_to_write
;
1033 atomic_t ready_to_accept
;
1039 extern atomic_t num_conns
;
1041 extern spinlock_t cor_bindnodes
;
1043 extern struct conn
*get_conn(struct neighbor
*nb
, __u32 conn_id
);
1045 extern void delete_connid_reuse_items(struct neighbor
*nb
);
1047 extern void connid_used_pingsuccess(struct neighbor
*nb
);
1049 extern void _set_last_act(struct conn
*src_in_l
);
1051 extern void free_conn(struct kref
*ref
);
1053 extern int conn_init_out(struct conn
*trgt_unconn_ll
, struct neighbor
*nb
,
1054 __u32 rcvd_connid
, int use_rcvd_connid
);
1056 extern void conn_init_sock_source(struct conn
*cn
);
1058 extern void conn_init_sock_target(struct conn
*cn
);
1060 extern __u32
list_services(char *buf
, __u32 buflen
);
1062 extern void set_publish_service(struct cor_sock
*cs
, __u8 value
);
1064 extern void close_port(struct cor_sock
*cs
);
1066 extern int open_port(struct cor_sock
*cs_l
, __be16 port
);
1068 extern int connect_port(struct conn
*trgt_unconn_ll
, __be16 port
);
1070 extern int connect_neigh(struct conn
*trgt_unconn_ll
, char *addr
,
1073 extern struct conn
* alloc_conn(gfp_t allocflags
);
1075 extern void reset_conn_locked(struct conn
*cn_ll
);
1077 extern void reset_conn(struct conn
*cn
);
1080 extern __u8
__attribute__((const)) enc_log_256_16(__u32 value
);
1082 extern __u32
__attribute__((const)) dec_log_256_16(__u8 value
);
1084 extern __u8
__attribute__((const)) enc_log_64_11(__u32 value
);
1086 extern __u32
__attribute__((const)) dec_log_64_11(__u8 value
);
1088 extern __u8
__attribute__((const)) enc_log_64_7(__u64 value
);
1090 extern __u64
__attribute__((const)) dec_log_64_7(__u8 value
);
1092 extern void kreffree_bug(struct kref
*ref
);
1094 extern int __init
cor_util_init(void);
1097 extern int newconn_checkpriority(struct neighbor
*nb
, __u8 priority
);
1099 extern __u32
refresh_conn_priority(struct conn
*cn
, int locked
);
1101 extern void set_conn_in_priority(struct neighbor
*nb
, __u32 conn_id
,
1102 struct conn
*src_in
, __u8 priority_seqno
, __u8 priority
);
1104 extern void connreset_priority(struct conn
*cn
);
1106 extern int __init
credits_init(void);
1109 extern void neighbor_free(struct kref
*ref
);
1111 extern int is_from_nb(struct sk_buff
*skb
, struct neighbor
*nb
);
1113 extern struct neighbor
*get_neigh_by_mac(struct sk_buff
*skb
);
1115 extern struct neighbor
*find_neigh(char *addr
, __u16 addrlen
);
1117 extern __u32
generate_neigh_list(char *buf
, __u32 buflen
);
1119 extern int get_neigh_state(struct neighbor
*nb
);
1121 extern void ping_resp(struct neighbor
*nb
, __u32 cookie
, __u32 respdelay
);
1123 extern __u32
add_ping_req(struct neighbor
*nb
, unsigned long *last_ping_time
,
1126 extern void ping_sent(struct neighbor
*nb
, __u32 cookie
);
1128 extern void unadd_ping_req(struct neighbor
*nb
, __u32 cookie
,
1129 unsigned long last_ping_time
, int congested
);
1131 #define TIMETOSENDPING_NO 0
1132 #define TIMETOSENDPING_YES 1
1133 #define TIMETOSENDPING_FORCE 2
1134 extern int time_to_send_ping(struct neighbor
*nb
);
1136 extern unsigned long get_next_ping_time(struct neighbor
*nb
);
1138 extern int force_ping(struct neighbor
*nb
);
1140 extern int rcv_announce(struct sk_buff
*skb
);
1142 extern int _send_announce(struct announce_data
*ann
, int fromqos
, int *sent
);
1144 extern void announce_data_free(struct kref
*ref
);
1146 extern void announce_send_stop(struct net_device
*dev
, char *mac
, int type
);
1148 extern void cor_neighbor_down(void);
1150 extern int cor_neighbor_up(char *addr2
, __u32 addrlen2
);
1152 extern int is_clientmode(void);
1154 extern int __init
cor_neighbor_init(void);
1157 extern void reset_ooo_queue(struct conn
*src_in_lx
);
1159 extern void drain_ooo_queue(struct conn
*src_in_l
);
1161 extern void conn_rcv(struct neighbor
*nb
, struct sk_buff
*skb
, char *data
, __u32 len
,
1162 __u32 conn_id
, __u64 seqno
, int rcv_delayed_lowbuf
, __u8 flush
);
1164 extern void cor_rcv_down(void);
1166 extern void cor_rcv_up(void);
1168 extern int __init
cor_rcv_init(void);
1170 /* kpacket_parse.c */
1171 extern void kernel_packet(struct neighbor
*nb
, struct sk_buff
*skb
,
1175 struct control_msg_out
;
1177 #define ACM_PRIORITY_LOW 1 /* oom recovery easy */
1178 #define ACM_PRIORITY_MED 2 /* oom may cause timeouts */
1179 #define ACM_PRIORITY_HIGH 3 /* cm acks - needed for freeing old cms */
1181 extern struct control_msg_out
*alloc_control_msg(struct neighbor
*nb
,
1184 extern void free_control_msg(struct control_msg_out
*cm
);
1186 extern void retransmit_timerfunc(struct timer_list
*retrans_timer
);
1188 extern void kern_ack_rcvd(struct neighbor
*nb
, __u64 seqno
);
1190 extern int send_messages(struct neighbor
*nb
, int *sent
);
1192 extern void controlmsg_timerfunc(struct timer_list
*cmsg_timer
);
1194 extern void schedule_controlmsg_timer(struct neighbor
*nb_cmsglocked
);
1196 extern void send_pong(struct neighbor
*nb
, __u32 cookie
);
1198 extern int send_reset_conn(struct neighbor
*nb
, __u32 conn_id
, int lowprio
);
1200 extern void send_ack(struct neighbor
*nb
, __u64 seqno
);
1202 extern void send_ack_conn_ifneeded(struct conn
*src_in_l
, __u64 seqno_ooo
,
1205 extern void send_priority(struct conn
*trgt_out_ll
, int force
,
1208 extern void free_ack_conns(struct conn
*src_in_lx
);
1210 extern void send_connect_success(struct control_msg_out
*cm
, __u32 conn_id
,
1211 struct conn
*src_in
);
1213 extern void send_connect_nb(struct control_msg_out
*cm
, __u32 conn_id
,
1214 __u64 seqno1
, __u64 seqno2
, struct conn
*src_in_ll
);
1216 extern void send_conndata(struct control_msg_out
*cm
, __u32 conn_id
,
1217 __u64 seqno
, char *data_orig
, char *data
, __u32 datalen
,
1218 __u8 snd_delayed_lowbuf
, __u8 flush
, __u8 highlatency
,
1219 struct conn_retrans
*cr
);
1221 extern int __init
cor_kgen_init(void);
1223 /* cpacket_parse.c */
1224 extern int encode_len(char *buf
, int buflen
, __u32 len
);
1226 extern void proc_cpacket(struct conn
*trgt_unconn
);
1228 extern int __init
cor_cpacket_init(void);
1231 extern void qos_set_lastdrop(struct qos_queue
*q
);
1233 #ifdef DEBUG_QOS_SLOWSEND
1234 extern int _cor_dev_queue_xmit(struct sk_buff
*skb
, int caller
);
1236 static inline int _cor_dev_queue_xmit(struct sk_buff
*skb
, int caller
)
1238 return dev_queue_xmit(skb
);
1242 static inline int cor_dev_queue_xmit(struct sk_buff
*skb
, struct qos_queue
*q
,
1245 int rc
= _cor_dev_queue_xmit(skb
, caller
);
1246 if (unlikely(rc
!= NET_XMIT_SUCCESS
))
1247 qos_set_lastdrop(q
);
1251 extern void free_qos(struct kref
*ref
);
1253 #ifdef COR_NBCONGWIN
1254 extern void nbcongwin_data_acked(struct neighbor
*nb
, __u64 bytes_acked
);
1257 extern struct qos_queue
*get_queue(struct net_device
*dev
);
1259 extern int destroy_queue(struct net_device
*dev
);
1261 extern int create_queue(struct net_device
*dev
);
1263 #define QOS_RESUME_DONE 0
1264 #define QOS_RESUME_CONG 1
1265 #define QOS_RESUME_NEXTNEIGHBOR 2 /* resume_neighbors() internal */
1267 #define QOS_CALLER_KPACKET 0
1268 #define QOS_CALLER_CONN_RETRANS 1
1269 #define QOS_CALLER_ANNOUNCE 2
1270 #define QOS_CALLER_NEIGHBOR 3
1272 extern void qos_enqueue(struct qos_queue
*q
, struct resume_block
*rb
,
1275 extern void qos_remove_conn(struct conn
*trgt_out_l
);
1277 extern int may_send_announce(struct net_device
*dev
);
1279 extern struct sk_buff
*create_packet_cmsg(struct neighbor
*nb
, int size
,
1280 gfp_t alloc_flags
, __u64 seqno
);
1282 extern void reschedule_conn_retrans_timer(struct neighbor
*nb_retranslocked
);
1284 extern void cancel_all_conn_retrans(struct conn
*trgt_out_l
);
1286 extern void retransmit_conn_taskfunc(unsigned long nb
);
1288 extern void retransmit_conn_timerfunc(struct timer_list
*retrans_timer_conn
);
1290 extern void conn_ack_ooo_rcvd(struct neighbor
*nb
, __u32 conn_id
,
1291 struct conn
*trgt_out
, __u64 seqno_ooo
, __u32 length
,
1292 __u64
*bytes_acked
);
1294 extern void conn_ack_rcvd(struct neighbor
*nb
, __u32 conn_id
,
1295 struct conn
*trgt_out
, __u64 seqno
, int setwindow
, __u8 window
,
1296 __u64
*bytes_acked
);
1298 extern void schedule_retransmit_conn(struct conn_retrans
*cr
, int connlocked
,
1299 int nbretrans_locked
);
1301 extern int srcin_buflimit_reached(struct conn
*src_in_lx
);
1303 /* RC_FLUSH_CONN_OUT_SENT | RC_FLUSH_CONN_OUT_{^SENT} */
1304 #define RC_FLUSH_CONN_OUT_OK 1
1305 #define RC_FLUSH_CONN_OUT_SENT_CONG 2 /* flush_out internal only */
1306 #define RC_FLUSH_CONN_OUT_NBNOTACTIVE 3
1307 #define RC_FLUSH_CONN_OUT_CONG 4
1308 #define RC_FLUSH_CONN_OUT_MAXSENT 5
1309 #define RC_FLUSH_CONN_OUT_OOM 6
1311 extern int flush_out(struct conn
*trgt_out_lx
, __u32
*sent
);
1313 extern void resume_nbstalled_conns(struct work_struct
*work
);
1315 extern int __init
cor_snd_init(void);
1318 extern struct kmem_cache
*data_buf_item_slab
;
1320 extern void databuf_init(struct conn
*cn_init
);
1322 extern void bufsize_init(struct conn
*cn_l
, __u32 bufsize
);
1324 extern int account_bufspace(struct conn
*cn_lx
);
1326 extern int cpacket_write_allowed(struct conn
*src_unconn_lx
);
1328 extern void update_windowlimit(struct conn
*src_in_lx
);
1330 extern void bufsize_read_to_sock(struct conn
*trgt_sock_lx
);
1332 extern void databuf_ackdiscard(struct conn
*cn_lx
);
1334 extern void reset_seqno(struct conn
*cn_l
, __u64 initseqno
);
1336 extern void databuf_pull(struct conn
*cn_lx
, char *dst
, __u32 len
);
1338 static inline __u32
databuf_trypull(struct conn
*cn_l
, char *dst
, __u32 len
)
1340 if (len
> cn_l
->data_buf
.read_remaining
)
1341 len
= cn_l
->data_buf
.read_remaining
;
1342 databuf_pull(cn_l
, dst
, len
);
1346 extern void databuf_unpull_dpi(struct conn
*trgt_sock
, struct cor_sock
*cs
,
1347 struct data_buf_item
*item
, __u16 next_read_offset
);
1349 extern void databuf_pull_dbi(struct cor_sock
*cs_rl
, struct conn
*trgt_sock_l
);
1351 extern void databuf_unpull(struct conn
*trgt_out_l
, __u32 bytes
);
1353 extern void databuf_pullold(struct conn
*trgt_out_l
, __u64 startpos
, char *dst
,
1356 extern void databuf_ack(struct conn
*trgt_out_l
, __u64 pos
);
1358 extern void databuf_ackread(struct conn
*cn_lx
);
1360 extern __u32
receive_buf(struct conn
*cn_lx
, char *buf
, __u32 datalen
,
1361 int rcv_delayed_lowbuf
, __u8 flush
);
1363 extern __u32
receive_skb(struct conn
*src_in_l
, struct sk_buff
*skb
,
1364 int rcv_delayed_lowbuf
, __u8 flush
);
1366 extern void wake_sender(struct conn
*cn
);
1368 extern int __init
forward_init(void);
1370 /* sock_rdaemon.c */
1371 extern int cor_create_rdaemon_sock(struct net
*net
, struct socket
*sock
,
1372 int protocol
, int kern
);
1374 extern int rdreq_connect(struct cor_sock
*cs
);
1376 extern void cor_usersock_release(struct cor_sock
*cs
);
1378 extern int __init
cor_rd_init1(void);
1380 extern int __init
cor_rd_init2(void);
1383 extern int cor_create_raw_sock(struct net
*net
, struct socket
*sock
,
1384 int protocol
, int kern
);
1386 /* sock_managed.c */
1387 extern struct cor_sock
*get_corsock_by_cookie(__be64 cookie
);
1389 extern void __set_sock_connecterror(struct cor_sock
*cs_m_l
, int errorno
);
1391 extern void _set_sock_connecterror(struct cor_sock
*cs
, int errorno
);
1393 static inline void set_sock_connecterror(__be64 cookie
, int errorno
)
1395 struct cor_sock
*cs
= get_corsock_by_cookie(cookie
);
1396 _set_sock_connecterror(cs
, errorno
);
1399 extern void flush_sock_managed(struct conn
*trgt_sock_lx
, int from_recvmsg
,
1400 __u8
*do_wake_sender
);
1402 extern void cor_mngdsocket_readfromconn_fromatomic(struct cor_sock
*cs
);
1404 extern void cor_mngdsocket_readfromconn_wq(struct work_struct
*work
);
1406 extern int cor_create_managed_sock(struct net
*net
, struct socket
*sock
,
1407 int protocol
, int kern
);
1409 extern int __init
cor_sock_managed_init1(void);
1412 extern void free_sock(struct kref
*ref
);
1414 extern void flush_sock(struct conn
*trgt_sock_lx
);
1416 extern void update_src_sock_sndspeed(struct conn
*src_sock_l
, __u32 bytes_sent
);
1418 extern int cor_sock_sndbufavailable(struct conn
*src_sock_lx
);
1420 extern int cor_socket_socketpair(struct socket
*sock1
, struct socket
*sock2
);
1422 extern int cor_socket_getname(struct socket
*sock
, struct sockaddr
*addr
,
1425 extern int cor_socket_mmap(struct file
*file
, struct socket
*sock
,
1426 struct vm_area_struct
*vma
);
1428 extern int _cor_createsock(struct net
*net
, struct socket
*sock
, int protocol
,
1431 extern int __init
cor_sock_init1(void);
1433 extern int __init
cor_sock_init2(void);
1436 static inline struct skb_procstate
*skb_pstate(struct sk_buff
*skb
)
1438 return (struct skb_procstate
*) &(skb
->cb
[0]);
1441 static inline struct sk_buff
*skb_from_pstate(struct skb_procstate
*ps
)
1443 return (struct sk_buff
*) (((char *)ps
) - offsetof(struct sk_buff
,cb
));
1446 static inline int qos_fastsend_allowed_conn_retrans(struct neighbor
*nb
)
1448 return atomic_read(&(nb
->queue
->cong_status
)) < CONGSTATUS_RETRANS
;
1451 static inline int qos_fastsend_allowed_announce(struct net_device
*dev
)
1454 struct qos_queue
*q
= get_queue(dev
);
1459 rc
= atomic_read(&(q
->cong_status
)) < CONGSTATUS_ANNOUNCE
;
1461 kref_put(&(q
->ref
), free_qos
);
1466 static inline int qos_fastsend_allowed_conn(struct conn
*trgt_out_lx
)
1468 struct qos_queue
*q
= trgt_out_lx
->target
.out
.nb
->queue
;
1469 return atomic_read(&(q
->cong_status
)) < CONGSTATUS_CONNDATA
;
1472 static inline __u32
mss(struct neighbor
*nb
, __u32 l3overhead
)
1474 __u32 mtu
= (nb
->dev
->mtu
> 4096) ? 4096 : nb
->dev
->mtu
;
1475 return mtu
- LL_RESERVED_SPACE(nb
->dev
) - l3overhead
;
1478 static inline __u32
mss_cmsg(struct neighbor
*nb
)
1483 static inline __u32
mss_conndata(struct neighbor
*nb
)
1485 __u32 mss_tmp
= mss(nb
, 11);
1491 for (i
=256;i
<4096;i
*=2) {
1496 return mss_tmp
- mss_tmp
%4096;
1499 static inline __u32
send_conndata_as_skb(struct neighbor
*nb
, __u32 size
)
1501 return size
>= mss_conndata(nb
)/2;
1504 static inline long calc_timeout(__u32 latency_us
, __u32 latency_stddev_us
,
1505 __u32 max_remote_ack_delay_us
)
1507 unsigned long addto
;
1508 if (unlikely(unlikely(latency_us
> 1000000000) ||
1509 unlikely(latency_stddev_us
> 500000000) ||
1510 unlikely(max_remote_ack_delay_us
> 1000000000))) {
1511 addto
= msecs_to_jiffies(latency_us
/1000 + latency_us
/4000 +
1512 latency_stddev_us
/333 +
1513 max_remote_ack_delay_us
/1000);
1515 addto
= usecs_to_jiffies(latency_us
+ latency_us
/4 +
1516 latency_stddev_us
*3 + max_remote_ack_delay_us
);
1520 * 2 is added because
1521 * 1) _to_jiffies rounds down, but should round up, so add 1 to
1523 * 2) even if latency is 0, we never want to schedule the retransmit
1524 * to run right now, so add 1 more
1526 return jiffies
+ 2 + addto
;
1529 static inline void put_be64(char *dst
, __be64 value
)
1531 char *p_value
= (char *) &value
;
1533 dst
[0] = p_value
[0];
1534 dst
[1] = p_value
[1];
1535 dst
[2] = p_value
[2];
1536 dst
[3] = p_value
[3];
1537 dst
[4] = p_value
[4];
1538 dst
[5] = p_value
[5];
1539 dst
[6] = p_value
[6];
1540 dst
[7] = p_value
[7];
1543 static inline void put_u64(char *dst
, __u64 value
)
1545 put_be64(dst
, cpu_to_be64(value
));
1548 static inline void put_u48(char *dst
, __u64 value
)
1550 char *p_value
= (char *) &value
;
1552 value
= cpu_to_be64(value
);
1554 dst
[0] = p_value
[2];
1555 dst
[1] = p_value
[3];
1556 dst
[2] = p_value
[4];
1557 dst
[3] = p_value
[5];
1558 dst
[4] = p_value
[6];
1559 dst
[5] = p_value
[7];
1562 static inline void put_be32(char *dst
, __be32 value
)
1564 char *p_value
= (char *) &value
;
1565 dst
[0] = p_value
[0];
1566 dst
[1] = p_value
[1];
1567 dst
[2] = p_value
[2];
1568 dst
[3] = p_value
[3];
1571 static inline void put_u32(char *dst
, __u32 value
)
1573 put_be32(dst
, cpu_to_be32(value
));
1576 static inline void put_be16(char *dst
, __be16 value
)
1578 char *p_value
= (char *) &value
;
1579 dst
[0] = p_value
[0];
1580 dst
[1] = p_value
[1];
1583 static inline void put_u16(char *dst
, __u16 value
)
1585 put_be16(dst
, cpu_to_be16(value
));
1588 static inline char *cor_pull_skb(struct sk_buff
*skb
, unsigned int len
)
1590 char *ptr
= skb_pull(skb
, len
);
1592 if (unlikely(ptr
== 0))
1598 static inline __be64
parse_be64(char *buf
)
1604 ((char *)&ret
)[0] = buf
[0];
1605 ((char *)&ret
)[1] = buf
[1];
1606 ((char *)&ret
)[2] = buf
[2];
1607 ((char *)&ret
)[3] = buf
[3];
1608 ((char *)&ret
)[4] = buf
[4];
1609 ((char *)&ret
)[5] = buf
[5];
1610 ((char *)&ret
)[6] = buf
[6];
1611 ((char *)&ret
)[7] = buf
[7];
1616 static inline __u64
parse_u64(char *buf
)
1618 return be64_to_cpu(parse_be64(buf
));
1621 static inline __u64
parse_u48(char *ptr
)
1625 ((char *)&ret
)[0] = 0;
1626 ((char *)&ret
)[1] = 0;
1627 ((char *)&ret
)[2] = ptr
[0];
1628 ((char *)&ret
)[3] = ptr
[1];
1629 ((char *)&ret
)[4] = ptr
[2];
1630 ((char *)&ret
)[5] = ptr
[3];
1631 ((char *)&ret
)[6] = ptr
[4];
1632 ((char *)&ret
)[7] = ptr
[5];
1634 return be64_to_cpu(ret
);
1637 static inline __be32
parse_be32(char *ptr
)
1643 ((char *)&ret
)[0] = ptr
[0];
1644 ((char *)&ret
)[1] = ptr
[1];
1645 ((char *)&ret
)[2] = ptr
[2];
1646 ((char *)&ret
)[3] = ptr
[3];
1651 static inline __u32
parse_u32(char *ptr
)
1653 return be32_to_cpu(parse_be32(ptr
));
1656 static inline __be16
parse_be16(char *ptr
)
1662 ((char *)&ret
)[0] = ptr
[0];
1663 ((char *)&ret
)[1] = ptr
[1];
1668 static inline __u16
parse_u16(char *ptr
)
1670 return be16_to_cpu(parse_be16(ptr
));
1673 static inline __u64
pull_u48(struct sk_buff
*skb
)
1675 return parse_u48(cor_pull_skb(skb
, 6));
1678 static inline __be32
pull_be32(struct sk_buff
*skb
)
1680 return parse_be32(cor_pull_skb(skb
, 4));
1683 static inline __u32
pull_u32(struct sk_buff
*skb
)
1685 return parse_u32(cor_pull_skb(skb
, 4));
1688 static inline __u16
pull_u16(struct sk_buff
*skb
)
1690 return parse_u16(cor_pull_skb(skb
, 2));
1693 static inline __u8
pull_u8(struct sk_buff
*skb
)
1695 char *ptr
= cor_pull_skb(skb
, 1);
1700 static inline int is_conn_in(struct conn
*cn_l
, struct neighbor
*nb
,
1703 if (unlikely(unlikely(cn_l
->sourcetype
!= SOURCE_IN
) ||
1704 unlikely(cn_l
->source
.in
.nb
!= nb
) ||
1705 unlikely(cn_l
->source
.in
.conn_id
!= conn_id
) ||
1706 unlikely(cn_l
->isreset
!= 0)))
1711 static inline int is_src_sock(struct conn
*cn_l
, struct cor_sock
*cs
)
1713 if (unlikely(unlikely(cn_l
->sourcetype
!= SOURCE_SOCK
) ||
1714 unlikely(cn_l
->source
.sock
.cs
!= cs
)))
1719 static inline int is_trgt_sock(struct conn
*cn_l
, struct cor_sock
*cs
)
1721 if (unlikely(unlikely(cn_l
->targettype
!= TARGET_SOCK
) ||
1722 unlikely(cn_l
->target
.sock
.cs
!= cs
)))
1727 static inline void set_last_act(struct conn
*src_in_l
)
1729 unsigned long jiffies_tmp
= jiffies
;
1731 BUG_ON(src_in_l
->sourcetype
!= SOURCE_IN
);
1733 if (unlikely(time_after(jiffies_tmp
,
1734 src_in_l
->source
.in
.jiffies_last_act
+
1735 HZ
* CONN_ACTIVITY_UPDATEINTERVAL_SEC
)))
1736 _set_last_act(src_in_l
);
1739 #define BUFLEN_MIN 128
1740 #define BUFLEN_MAX 4096
1741 #define PAGESIZE (1 << PAGE_SHIFT)
1743 static inline __u32
buf_optlen(__u32 datalen
)
1745 __u32 optlen
= BUFLEN_MIN
;
1746 while (optlen
< datalen
&& optlen
< PAGESIZE
&& optlen
< BUFLEN_MAX
)
1747 optlen
= (optlen
<< 1);
1752 inline static void databuf_item_free(struct data_buf_item
*item
)
1754 if (item
->type
== DATABUF_BUF
) {
1756 kmem_cache_free(data_buf_item_slab
, item
);
1757 } else if (item
->type
== DATABUF_SKB
) {
1758 struct sk_buff
*skb
= skb_from_pstate(container_of(item
,
1759 struct skb_procstate
, funcstate
.rcv
.dbi
));
1766 static inline __u64
seqno_clean(__u64 seqno
)
1768 return seqno
& ((1LL << 48) - 1);
1771 static inline int seqno_eq(__u64 seqno1
, __u64 seqno2
)
1773 seqno1
= seqno1
<< 16;
1774 seqno2
= seqno2
<< 16;
1775 return seqno1
== seqno2
;
1778 static inline int seqno_before(__u64 seqno1
, __u64 seqno2
)
1780 seqno1
= seqno1
<< 16;
1781 seqno2
= seqno2
<< 16;
1782 return (seqno1
- seqno2
) >= (1LL << 63);
1785 static inline int seqno_before_eq(__u64 seqno1
, __u64 seqno2
)
1787 return seqno_eq(seqno1
, seqno2
) || seqno_before(seqno1
, seqno2
);
1790 static inline int seqno_after(__u64 seqno1
, __u64 seqno2
)
1792 return seqno_before_eq(seqno1
, seqno2
) ? 0 : 1;
1795 static inline int seqno_after_eq(__u64 seqno1
, __u64 seqno2
)
1797 return seqno_before(seqno1
, seqno2
) ? 0 : 1;
1800 static inline int ktime_before_eq(ktime_t time1
, ktime_t time2
)
1802 return ktime_after(time1
, time2
) ? 0 : 1;
1805 static inline int ktime_after_eq(ktime_t time1
, ktime_t time2
)
1807 return ktime_before(time1
, time2
) ? 0 : 1;
1810 static inline __u64
update_atomic_sum(atomic64_t
*atomic_sum
, __u32 oldvalue
,
1813 __u64 sum_old
= atomic64_read(atomic_sum
);
1821 BUG_ON(sum
< oldvalue
);
1824 BUG_ON(sum
+ newvalue
< sum
);
1827 cmpxchg_ret
= atomic64_cmpxchg(atomic_sum
, sum_old
, sum
);
1829 if (likely(cmpxchg_ret
== sum_old
))
1832 sum_old
= cmpxchg_ret
;
1838 static inline void cor_sk_write_space(struct cor_sock
*cs
)
1840 atomic_set(&(cs
->ready_to_write
), 1);
1842 cs
->sk
.sk_write_space(&(cs
->sk
));
1845 static inline void cor_sk_data_ready(struct cor_sock
*cs
)
1847 atomic_set(&(cs
->ready_to_read
), 1);
1849 cs
->sk
.sk_data_ready(&(cs
->sk
));
1852 /* the other direction may be locked only if called from proc_cpacket */
1853 static inline void flush_buf(struct conn
*cn_lx
)
1855 if (unlikely(cn_lx
->targettype
== TARGET_UNCONNECTED
)) {
1856 proc_cpacket(cn_lx
);
1857 } else if (cn_lx
->targettype
== TARGET_SOCK
) {
1859 } else if (cn_lx
->targettype
== TARGET_OUT
) {
1860 __u32 bytessent
= 0;
1861 flush_out(cn_lx
, &bytessent
);
1862 } else if (unlikely(cn_lx
->targettype
== TARGET_DISCARD
)) {
1863 databuf_ackdiscard(cn_lx
);