Merge from vendor branch NETBSD:
[dragonfly/port-amd64.git] / sys / netinet / sctp_structs.h
blob183dc3058161ffe99034a44450835968850c6ac4
1 /* $KAME: sctp_structs.h,v 1.12 2004/08/17 04:06:19 itojun Exp $ */
2 /* $DragonFly: src/sys/netinet/sctp_structs.h,v 1.3 2006/06/23 17:20:14 eirikn Exp $ */
4 #ifndef _NETINET_SCTP_STRUCTS_H_
5 #define _NETINET_SCTP_STRUCTS_H_
7 /*
8 * Copyright (c) 2001, 2002, 2003, 2004 Cisco Systems, Inc.
9 * All rights reserved.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by Cisco Systems, Inc.
22 * 4. Neither the name of the project nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY CISCO SYSTEMS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL CISCO SYSTEMS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
39 #ifndef _SYS_TYPES_H_
40 #include <sys/types.h>
41 #endif
42 #ifndef _SYS_QUEUE_H_
43 #include <sys/queue.h>
44 #endif
45 #ifndef _NETINET_IN_H_
46 #include <netinet/in.h>
47 #endif
49 #if defined(__APPLE__)
50 #include <netinet/sctp_callout.h>
51 #elif defined(__OpenBSD__)
52 #include <sys/timeout.h>
53 #else
54 #include <sys/callout.h>
55 #endif
57 #ifdef IPSEC
58 #ifndef __OpenBSD__
59 #include <netinet6/ipsec.h>
60 #include <netproto/key/key.h>
61 #endif
62 #endif
64 #include <netinet/sctp_header.h>
65 #include <netinet/sctp_uio.h>
67 struct sctp_tcb;
68 struct sctp_inpcb;
70 struct sctp_timer {
71 #if defined(__OpenBSD__)
72 struct timeout timer;
73 #else
74 struct callout timer;
75 #endif
76 int type;
78 * Depending on the timer type these will be setup and cast with
79 * the appropriate entity.
81 void *ep;
82 void *tcb;
83 void *net;
87 * This is the information we track on each interface that we know about * from the distant end.
89 TAILQ_HEAD(sctpnetlisthead, sctp_nets);
92 * Users of the iterator need to malloc a iterator with a call to
93 * sctp_initiate_iterator(func, pcb_flags, asoc_state, void-ptr-arg, u_int32_t,
94 * u_int32-arg, end_func, inp);
96 * Use the following two defines if you don't care what pcb flags are on the
97 * EP and/or you don't care what state the association is in.
99 * Note that if you specify an INP as the last argument then ONLY each
100 * association of that single INP will be executed upon. Note that the
101 * pcb flags STILL apply so if the inp you specify has different pcb_flags
102 * then what you put in pcb_flags nothing will happen. use SCTP_PCB_ANY_FLAGS
103 * to assure the inp you specify gets treated.
105 #define SCTP_PCB_ANY_FLAGS 0x00000000
106 #define SCTP_ASOC_ANY_STATE 0x00000000
108 typedef void (*asoc_func)(struct sctp_inpcb *, struct sctp_tcb *, void *ptr,
109 u_int32_t val);
110 typedef void (*end_func)(void *ptr, u_int32_t val);
112 #define SCTP_ITERATOR_DO_ALL_INP 0x00000001
113 #define SCTP_ITERATOR_DO_SINGLE_INP 0x00000002
115 struct sctp_iterator {
116 LIST_ENTRY(sctp_iterator) sctp_nxt_itr;
117 struct sctp_timer tmr;
118 struct sctp_inpcb *inp; /* ep */
119 struct sctp_tcb *stcb; /* assoc */
120 asoc_func function_toapply;
121 end_func function_atend;
122 void *pointer; /* pointer for apply func to use */
123 u_int32_t val; /* value for apply func to use */
124 u_int32_t pcb_flags;
125 u_int32_t asoc_state;
126 u_int32_t iterator_flags;
129 LIST_HEAD(sctpiterators, sctp_iterator);
131 struct sctp_copy_all {
132 struct sctp_inpcb *inp; /* ep */
133 struct mbuf *m;
134 struct sctp_sndrcvinfo sndrcv;
135 int sndlen;
136 int cnt_sent;
137 int cnt_failed;
140 union sctp_sockstore {
141 #ifdef AF_INET
142 struct sockaddr_in sin;
143 #endif
144 #ifdef AF_INET6
145 struct sockaddr_in6 sin6;
146 #endif
147 struct sockaddr sa;
150 struct sctp_nets {
151 TAILQ_ENTRY(sctp_nets) sctp_next; /* next link */
153 /* Things on the top half may be able to be split
154 * into a common structure shared by all.
156 struct sctp_timer pmtu_timer;
159 * The following two in combination equate to a route entry for
160 * v6 or v4.
162 struct sctp_route {
163 struct rtentry *ro_rt;
164 union sctp_sockstore _l_addr; /* remote peer addr */
165 union sctp_sockstore _s_addr; /* our selected src addr */
166 } ro;
167 /* mtu discovered so far */
168 u_int32_t mtu;
169 u_int32_t ssthresh; /* not sure about this one for split */
171 /* smoothed average things for RTT and RTO itself */
172 int lastsa;
173 int lastsv;
174 unsigned int RTO;
176 /* This is used for SHUTDOWN/SHUTDOWN-ACK/SEND or INIT timers */
177 struct sctp_timer rxt_timer;
179 /* last time in seconds I sent to it */
180 struct timeval last_sent_time;
181 int ref_count;
183 /* Congestion stats per destination */
185 * flight size variables and such, sorry Vern, I could not avoid
186 * this if I wanted performance :>
188 u_int32_t flight_size;
189 u_int32_t cwnd; /* actual cwnd */
190 u_int32_t prev_cwnd; /* cwnd before any processing */
191 u_int32_t partial_bytes_acked; /* in CA tracks when to incr a MTU */
193 /* tracking variables to avoid the aloc/free in sack processing */
194 unsigned int net_ack;
195 unsigned int net_ack2;
197 * These only are valid if the primary dest_sstate holds the
198 * SCTP_ADDR_SWITCH_PRIMARY flag
200 u_int32_t next_tsn_at_change;
201 u_int32_t heartbeat_random1;
202 u_int32_t heartbeat_random2;
204 /* if this guy is ok or not ... status */
205 u_int16_t dest_state;
206 /* number of transmit failures to down this guy */
207 u_int16_t failure_threshold;
208 /* error stats on destination */
209 u_int16_t error_count;
211 /* Flags that probably can be combined into dest_state */
212 u_int8_t rto_pending; /* is segment marked for RTO update ** if we split?*/
213 u_int8_t fast_retran_ip; /* fast retransmit in progress */
214 u_int8_t hb_responded;
215 u_int8_t cacc_saw_newack; /* CACC algorithm flag */
216 u_int8_t src_addr_selected; /* if we split we move */
217 u_int8_t indx_of_eligible_next_to_use;
218 u_int8_t addr_is_local; /* its a local address (if known) could move in split */
219 #ifdef SCTP_HIGH_SPEED
220 u_int8_t last_hs_used; /* index into the last HS table entry we used */
221 #endif
225 struct sctp_data_chunkrec {
226 u_int32_t TSN_seq; /* the TSN of this transmit */
227 u_int16_t stream_seq; /* the stream sequence number of this transmit */
228 u_int16_t stream_number; /* the stream number of this guy */
229 u_int32_t payloadtype;
230 u_int32_t context; /* from send */
232 /* ECN Nonce: Nonce Value for this chunk */
233 u_int8_t ect_nonce;
235 /* part of the Highest sacked algorithm to be able to
236 * stroke counts on ones that are FR'd.
238 u_int32_t fast_retran_tsn; /* sending_seq at the time of FR */
239 struct timeval timetodrop; /* time we drop it from queue */
240 u_int8_t doing_fast_retransmit;
241 u_int8_t rcv_flags; /* flags pulled from data chunk on inbound
242 * for outbound holds sending flags.
244 u_int8_t state_flags;
247 TAILQ_HEAD(sctpchunk_listhead, sctp_tmit_chunk);
249 #define CHUNK_FLAGS_FRAGMENT_OK 0x0001
251 struct sctp_tmit_chunk {
252 union {
253 struct sctp_data_chunkrec data;
254 int chunk_id;
255 } rec;
256 int32_t sent; /* the send status */
257 int32_t snd_count; /* number of times I sent */
258 u_int32_t flags; /* flags, such as FRAGMENT_OK */
259 u_int32_t send_size;
260 u_int32_t book_size;
261 u_int32_t mbcnt;
262 struct sctp_association *asoc; /* bp to asoc this belongs to */
263 struct timeval sent_rcv_time; /* filled in if RTT being calculated */
264 struct mbuf *data; /* pointer to mbuf chain of data */
265 struct sctp_nets *whoTo;
266 TAILQ_ENTRY(sctp_tmit_chunk) sctp_next; /* next link */
267 uint8_t do_rtt;
272 * this struct contains info that is used to track inbound stream data
273 * and help with ordering.
275 TAILQ_HEAD(sctpwheelunrel_listhead, sctp_stream_in);
276 struct sctp_stream_in {
277 struct sctpchunk_listhead inqueue;
278 TAILQ_ENTRY(sctp_stream_in) next_spoke;
279 uint16_t stream_no;
280 uint16_t last_sequence_delivered; /* used for re-order */
283 /* This struct is used to track the traffic on outbound streams */
284 TAILQ_HEAD(sctpwheel_listhead, sctp_stream_out);
285 struct sctp_stream_out {
286 struct sctpchunk_listhead outqueue;
287 TAILQ_ENTRY(sctp_stream_out) next_spoke; /* next link in wheel */
288 uint16_t stream_no;
289 uint16_t next_sequence_sent; /* next one I expect to send out */
292 /* used to keep track of the addresses yet to try to add/delete */
293 TAILQ_HEAD(sctp_asconf_addrhead, sctp_asconf_addr);
294 struct sctp_asconf_addr {
295 TAILQ_ENTRY(sctp_asconf_addr) next;
296 struct sctp_asconf_addr_param ap;
297 struct ifaddr *ifa; /* save the ifa for add/del ip */
298 uint8_t sent; /* has this been sent yet? */
301 struct sctp_laddr;
302 LIST_HEAD(sctpladdr, sctp_laddr);
305 * Here we have information about each individual association that we
306 * track. We probably in production would be more dynamic. But for ease
307 * of implementation we will have a fixed array that we hunt for in a
308 * linear fashion.
310 struct sctp_association {
311 /* association state */
312 int state;
313 /* queue of pending addrs to add/delete */
314 struct sctp_asconf_addrhead asconf_queue;
315 struct timeval time_entered; /* time we entered state */
316 struct timeval time_last_rcvd;
317 struct timeval time_last_sent;
318 struct timeval time_last_sat_advance;
319 struct sctp_sndrcvinfo def_send; /* default send parameters */
321 /* timers and such */
322 struct sctp_timer hb_timer; /* hb timer */
323 struct sctp_timer dack_timer; /* Delayed ack timer */
324 struct sctp_timer asconf_timer; /* Asconf */
325 struct sctp_timer strreset_timer; /* stream reset */
326 struct sctp_timer shut_guard_timer; /* guard */
327 struct sctp_timer autoclose_timer; /* automatic close timer */
328 struct sctp_timer delayed_event_timer; /* timer for delayed events */
330 /* list of local addresses when add/del in progress */
331 struct sctpladdr sctp_local_addr_list;
332 struct sctpnetlisthead nets;
334 /* Control chunk queue */
335 struct sctpchunk_listhead control_send_queue;
337 /* Once a TSN hits the wire it is moved to the sent_queue. We
338 * maintain two counts here (don't know if any but retran_cnt
339 * is needed). The idea is that the sent_queue_retran_cnt
340 * reflects how many chunks have been marked for retranmission
341 * by either T3-rxt or FR.
343 struct sctpchunk_listhead sent_queue;
344 struct sctpchunk_listhead send_queue;
347 /* re-assembly queue for fragmented chunks on the inbound path */
348 struct sctpchunk_listhead reasmqueue;
351 * this queue is used when we reach a condition that we can NOT
352 * put data into the socket buffer. We track the size of this
353 * queue and set our rwnd to the space in the socket minus also
354 * the size_on_delivery_queue.
356 struct sctpchunk_listhead delivery_queue;
358 struct sctpwheel_listhead out_wheel;
360 /* If an iterator is looking at me, this is it */
361 struct sctp_iterator *stcb_starting_point_for_iterator;
363 /* ASCONF destination address last sent to */
364 struct sctp_nets *asconf_last_sent_to;
366 /* ASCONF save the last ASCONF-ACK so we can resend it if necessary */
367 struct mbuf *last_asconf_ack_sent;
370 * if Source Address Selection happening, this will rotate through
371 * the link list.
373 struct sctp_laddr *last_used_address;
375 /* stream arrays */
376 struct sctp_stream_in *strmin;
377 struct sctp_stream_out *strmout;
378 u_int8_t *mapping_array;
379 /* primary destination to use */
380 struct sctp_nets *primary_destination;
382 /* last place I got a data chunk from */
383 struct sctp_nets *last_data_chunk_from;
384 /* last place I got a control from */
385 struct sctp_nets *last_control_chunk_from;
387 /* circular looking for output selection */
388 struct sctp_stream_out *last_out_stream;
390 /* wait to the point the cum-ack passes
391 * pending_reply->sr_resp.reset_at_tsn.
393 struct sctp_stream_reset_response *pending_reply;
394 struct sctpchunk_listhead pending_reply_queue;
396 u_int32_t cookie_preserve_req;
397 /* ASCONF next seq I am sending out, inits at init-tsn */
398 uint32_t asconf_seq_out;
399 /* ASCONF last received ASCONF from peer, starts at peer's TSN-1 */
400 uint32_t asconf_seq_in;
402 /* next seq I am sending in str reset messages */
403 uint32_t str_reset_seq_out;
405 /* next seq I am expecting in str reset messages */
406 uint32_t str_reset_seq_in;
407 u_int32_t str_reset_sending_seq;
409 /* various verification tag information */
410 u_int32_t my_vtag; /*
411 * The tag to be used. if assoc is
412 * re-initited by remote end, and
413 * I have unlocked this will be
414 * regenrated to a new random value.
416 u_int32_t peer_vtag; /* The peers last tag */
418 u_int32_t my_vtag_nonce;
419 u_int32_t peer_vtag_nonce;
422 /* This is the SCTP fragmentation threshold */
423 u_int32_t smallest_mtu;
426 * Special hook for Fast retransmit, allows us to track the highest
427 * TSN that is NEW in this SACK if gap ack blocks are present.
429 u_int32_t this_sack_highest_gap;
432 * The highest consecutive TSN that has been acked by peer on my
433 * sends
435 u_int32_t last_acked_seq;
437 /* The next TSN that I will use in sending. */
438 u_int32_t sending_seq;
440 /* Original seq number I used ??questionable to keep?? */
441 u_int32_t init_seq_number;
444 * We use this value to know if FR's are allowed, i.e. did the
445 * cum-ack pass this point or equal it so FR's are now allowed.
447 u_int32_t t3timeout_highest_marked;
449 /* The Advanced Peer Ack Point, as required by the PR-SCTP */
450 /* (A1 in Section 4.2) */
451 u_int32_t advanced_peer_ack_point;
454 * The highest consequetive TSN at the bottom of the mapping
455 * array (for his sends).
457 u_int32_t cumulative_tsn;
459 * Used to track the mapping array and its offset bits. This
460 * MAY be lower then cumulative_tsn.
462 u_int32_t mapping_array_base_tsn;
464 * used to track highest TSN we have received and is listed in
465 * the mapping array.
467 u_int32_t highest_tsn_inside_map;
469 u_int32_t last_echo_tsn;
470 u_int32_t last_cwr_tsn;
471 u_int32_t fast_recovery_tsn;
472 u_int32_t sat_t3_recovery_tsn;
474 u_int32_t tsn_last_delivered;
477 * window state information and smallest MTU that I use to bound
478 * segmentation
480 u_int32_t peers_rwnd;
481 u_int32_t my_rwnd;
482 u_int32_t my_last_reported_rwnd;
483 u_int32_t my_rwnd_control_len;
485 u_int32_t total_output_queue_size;
486 u_int32_t total_output_mbuf_queue_size;
488 /* 32 bit nonce stuff */
489 u_int32_t nonce_resync_tsn;
490 u_int32_t nonce_wait_tsn;
492 int ctrl_queue_cnt; /* could be removed REM */
494 * All outbound datagrams queue into this list from the
495 * individual stream queue. Here they get assigned a TSN
496 * and then await sending. The stream seq comes when it
497 * is first put in the individual str queue
499 unsigned int stream_queue_cnt;
500 unsigned int send_queue_cnt;
501 unsigned int sent_queue_cnt;
502 unsigned int sent_queue_cnt_removeable;
504 * Number on sent queue that are marked for retran until this
505 * value is 0 we only send one packet of retran'ed data.
507 unsigned int sent_queue_retran_cnt;
509 unsigned int size_on_reasm_queue;
510 unsigned int cnt_on_reasm_queue;
511 /* amount of data (bytes) currently in flight (on all destinations) */
512 unsigned int total_flight;
513 /* Total book size in flight */
514 unsigned int total_flight_count; /* count of chunks used with book total */
515 /* count of destinaton nets and list of destination nets */
516 unsigned int numnets;
518 /* Total error count on this association */
519 unsigned int overall_error_count;
521 unsigned int size_on_delivery_queue;
522 unsigned int cnt_on_delivery_queue;
524 unsigned int cnt_msg_on_sb;
526 /* All stream count of chunks for delivery */
527 unsigned int size_on_all_streams;
528 unsigned int cnt_on_all_streams;
530 /* Heart Beat delay in ticks */
531 unsigned int heart_beat_delay;
533 /* autoclose */
534 unsigned int sctp_autoclose_ticks;
536 /* how many preopen streams we have */
537 unsigned int pre_open_streams;
539 /* How many streams I support coming into me */
540 unsigned int max_inbound_streams;
542 /* the cookie life I award for any cookie, in seconds */
543 unsigned int cookie_life;
545 unsigned int numduptsns;
546 int dup_tsns[SCTP_MAX_DUP_TSNS];
547 unsigned int initial_init_rto_max; /* initial RTO for INIT's */
548 unsigned int initial_rto; /* initial send RTO */
549 unsigned int minrto; /* per assoc RTO-MIN */
550 unsigned int maxrto; /* per assoc RTO-MAX */
551 /* Being that we have no bag to collect stale cookies, and
552 * that we really would not want to anyway.. we will count
553 * them in this counter. We of course feed them to the
554 * pigeons right away (I have always thought of pigeons
555 * as flying rats).
557 u_int16_t stale_cookie_count;
559 /* For the partial delivery API, if up, invoked
560 * this is what last TSN I delivered
562 u_int16_t str_of_pdapi;
563 u_int16_t ssn_of_pdapi;
566 /* counts of actual built streams. Allocation may be more however */
567 /* could re-arrange to optimize space here. */
568 u_int16_t streamincnt;
569 u_int16_t streamoutcnt;
571 /* my maximum number of retrans of INIT and SEND */
572 /* copied from SCTP but should be individually setable */
573 u_int16_t max_init_times;
574 u_int16_t max_send_times;
576 u_int16_t def_net_failure;
579 * lock flag: 0 is ok to send, 1+ (duals as a retran count) is
580 * awaiting ACK
582 u_int16_t asconf_sent; /* possibly removable REM */
583 u_int16_t mapping_array_size;
585 u_int16_t chunks_on_out_queue; /* total chunks floating around */
586 int16_t num_send_timers_up;
588 * This flag indicates that we need to send the first SACK. If
589 * in place it says we have NOT yet sent a SACK and need to.
591 u_int8_t first_ack_sent;
593 /* max burst after fast retransmit completes */
594 u_int8_t max_burst;
596 u_int8_t sat_network; /* RTT is in range of sat net or greater */
597 u_int8_t sat_network_lockout;/* lockout code */
598 u_int8_t burst_limit_applied; /* Burst limit in effect at last send? */
599 /* flag goes on when we are doing a partial delivery api */
600 u_int8_t hb_random_values[4];
601 u_int8_t fragmented_delivery_inprogress;
602 u_int8_t fragment_flags;
603 u_int8_t hb_ect_randombit;
604 u_int8_t hb_random_idx;
606 /* ECN Nonce stuff */
607 u_int8_t receiver_nonce_sum; /* nonce I sum and put in my sack */
608 u_int8_t ecn_nonce_allowed; /* Tells us if ECN nonce is on */
609 u_int8_t nonce_sum_check; /* On off switch used during re-sync */
610 u_int8_t nonce_wait_for_ecne;/* flag when we expect a ECN */
611 u_int8_t peer_supports_ecn_nonce;
614 * This value, plus all other ack'd but above cum-ack is added
615 * together to cross check against the bit that we have yet to
616 * define (probably in the SACK).
617 * When the cum-ack is updated, this sum is updated as well.
619 u_int8_t nonce_sum_expect_base;
620 /* Flag to tell if ECN is allowed */
621 u_int8_t ecn_allowed;
623 /* flag to indicate if peer can do asconf */
624 uint8_t peer_supports_asconf;
625 uint8_t peer_supports_asconf_setprim; /* possibly removable REM */
626 /* pr-sctp support flag */
627 uint8_t peer_supports_prsctp;
629 /* stream resets are supported by the peer */
630 uint8_t peer_supports_strreset;
633 * packet drop's are supported by the peer, we don't really care
634 * about this but we bookkeep it anyway.
636 uint8_t peer_supports_pktdrop;
638 /* Do we allow V6/V4? */
639 u_int8_t ipv4_addr_legal;
640 u_int8_t ipv6_addr_legal;
641 /* Address scoping flags */
642 /* scope value for IPv4 */
643 u_int8_t ipv4_local_scope;
644 /* scope values for IPv6 */
645 u_int8_t local_scope;
646 u_int8_t site_scope;
647 /* loopback scope */
648 u_int8_t loopback_scope;
649 /* flags to handle send alternate net tracking */
650 u_int8_t used_alt_onsack;
651 u_int8_t used_alt_asconfack;
652 u_int8_t fast_retran_loss_recovery;
653 u_int8_t sat_t3_loss_recovery;
654 u_int8_t dropped_special_cnt;
655 u_int8_t seen_a_sack_this_pkt;
656 u_int8_t stream_reset_outstanding;
657 u_int8_t delayed_connection;
658 u_int8_t ifp_had_enobuf;
659 u_int8_t saw_sack_with_frags;
661 * The mapping array is used to track out of order sequences above
662 * last_acked_seq. 0 indicates packet missing 1 indicates packet
663 * rec'd. We slide it up every time we raise last_acked_seq and 0
664 * trailing locactions out. If I get a TSN above the array
665 * mappingArraySz, I discard the datagram and let retransmit happen.
669 #endif