1 /* $KAME: sctp_pcb.h,v 1.19 2004/08/17 06:28:02 t-momose Exp $ */
2 /* $DragonFly: src/sys/netinet/sctp_pcb.h,v 1.3 2006/05/20 02:42:12 dillon Exp $ */
5 * Copyright (c) 2001, 2002, 2003, 2004 Cisco Systems, Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Cisco Systems, Inc.
19 * 4. Neither the name of the project nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY CISCO SYSTEMS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL CISCO SYSTEMS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 #ifndef _NETINET_SCTP_PCB_H_
37 #define _NETINET_SCTP_PCB_H_
39 #ifndef _NETINET_IN_PCB_H_
40 #include <netinet/in_pcb.h>
44 * We must have V6 so the size of the proto can be calculated. Otherwise
45 * we would not allocate enough for Net/Open BSD :-<
47 #if defined(__FreeBSD__) && __FreeBSD_version > 500000
52 #include <net/if_var.h>
54 #include <netinet/ip6.h>
55 #include <netinet6/ip6_var.h>
56 #include <netinet6/ip6protosw.h>
57 #include <netinet6/in6_var.h>
58 #if defined(__OpenBSD__)
59 #include <netinet/in_pcb.h>
61 #include <netinet6/in6_pcb.h>
64 #if defined(__OpenBSD__) || defined(__FreeBSD__)
70 #include <netinet/sctp.h>
71 #include <netinet/sctp_constants.h>
73 LIST_HEAD(sctppcbhead
, sctp_inpcb
);
74 LIST_HEAD(sctpasochead
, sctp_tcb
);
75 TAILQ_HEAD(sctpsocketq
, sctp_socket_q_list
);
76 LIST_HEAD(sctpvtaghead
, sctp_tagblock
);
78 #include <netinet/sctp_structs.h>
79 #include <netinet/sctp_uio.h>
84 #define SCTP_PCB_FLAGS_UDPTYPE 0x00000001
85 #define SCTP_PCB_FLAGS_TCPTYPE 0x00000002
86 #define SCTP_PCB_FLAGS_BOUNDALL 0x00000004
87 #define SCTP_PCB_FLAGS_ACCEPTING 0x00000008
88 #define SCTP_PCB_FLAGS_UNBOUND 0x00000010
89 #define SCTP_PCB_FLAGS_DO_ASCONF 0x00000020
90 #define SCTP_PCB_FLAGS_AUTO_ASCONF 0x00000040
92 #define SCTP_PCB_FLAGS_NODELAY 0x00000100
93 #define SCTP_PCB_FLAGS_AUTOCLOSE 0x00000200
94 #define SCTP_PCB_FLAGS_RECVDATAIOEVNT 0x00000400
95 #define SCTP_PCB_FLAGS_RECVASSOCEVNT 0x00000800
96 #define SCTP_PCB_FLAGS_RECVPADDREVNT 0x00001000
97 #define SCTP_PCB_FLAGS_RECVPEERERR 0x00002000
98 #define SCTP_PCB_FLAGS_RECVSENDFAILEVNT 0x00004000
99 #define SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT 0x00008000
100 #define SCTP_PCB_FLAGS_ADAPTIONEVNT 0x00010000
101 #define SCTP_PCB_FLAGS_PDAPIEVNT 0x00020000
102 #define SCTP_PCB_FLAGS_STREAM_RESETEVNT 0x00040000
103 #define SCTP_PCB_FLAGS_NO_FRAGMENT 0x00080000
104 /* TCP model support */
105 #define SCTP_PCB_FLAGS_CONNECTED 0x00100000
106 #define SCTP_PCB_FLAGS_IN_TCPPOOL 0x00200000
107 #define SCTP_PCB_FLAGS_DONT_WAKE 0x00400000
108 #define SCTP_PCB_FLAGS_WAKEOUTPUT 0x00800000
109 #define SCTP_PCB_FLAGS_WAKEINPUT 0x01000000
110 #define SCTP_PCB_FLAGS_BOUND_V6 0x02000000
111 #define SCTP_PCB_FLAGS_NEEDS_MAPPED_V4 0x04000000
112 #define SCTP_PCB_FLAGS_BLOCKING_IO 0x08000000
113 #define SCTP_PCB_FLAGS_SOCKET_GONE 0x10000000
114 #define SCTP_PCB_FLAGS_SOCKET_ALLGONE 0x20000000
116 /* flags to copy to new PCB */
117 #define SCTP_PCB_COPY_FLAGS 0x0707ff64
119 #define SCTP_PCBHASH_ALLADDR(port, mask) (port & mask)
120 #define SCTP_PCBHASH_ASOC(tag, mask) (tag & mask)
123 LIST_ENTRY(sctp_laddr
) sctp_nxt_addr
; /* next in list */
127 struct sctp_timewait
{
128 uint32_t tv_sec_at_expire
; /* the seconds from boot to expire */
129 uint32_t v_tag
; /* the vtag that can not be reused */
132 struct sctp_tagblock
{
133 LIST_ENTRY(sctp_tagblock
) sctp_nxt_tagblock
;
134 struct sctp_timewait vtag_block
[SCTP_NUMBER_IN_VTAG_BLOCK
];
138 struct sctpasochead
*sctp_asochash
;
141 struct sctppcbhead
*sctp_ephash
;
145 * The TCP model represents a substantial overhead in that we get
146 * an additional hash table to keep explicit connections in. The
147 * listening TCP endpoint will exist in the usual ephash above and
148 * accept only INIT's. It will be incapable of sending off an INIT.
149 * When a dg arrives we must look in the normal ephash. If we find
150 * a TCP endpoint that will tell us to go to the specific endpoint
151 * hash and re-hash to find the right assoc/socket. If we find a
152 * UDP model socket we then must complete the lookup. If this fails,
153 * i.e. no association can be found then we must continue to see if
154 * a sctp_peeloff()'d socket is in the tcpephash (a spun off socket
155 * acts like a TCP model connected socket).
157 struct sctppcbhead
*sctp_tcpephash
;
159 uint32_t hashtblsize
;
161 struct sctppcbhead listhead
;
163 struct sctpiterators iteratorhead
;
166 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
167 #if __FreeBSD_version >= 500000
168 struct uma_zone
*ipi_zone_ep
;
169 struct uma_zone
*ipi_zone_asoc
;
170 struct uma_zone
*ipi_zone_laddr
;
171 struct uma_zone
*ipi_zone_net
;
172 struct uma_zone
*ipi_zone_chunk
;
173 struct uma_zone
*ipi_zone_sockq
;
175 struct vm_zone
*ipi_zone_ep
;
176 struct vm_zone
*ipi_zone_asoc
;
177 struct vm_zone
*ipi_zone_laddr
;
178 struct vm_zone
*ipi_zone_net
;
179 struct vm_zone
*ipi_zone_chunk
;
180 struct vm_zone
*ipi_zone_sockq
;
183 #if defined(__NetBSD__) || defined(__OpenBSD__)
184 struct pool ipi_zone_ep
;
185 struct pool ipi_zone_asoc
;
186 struct pool ipi_zone_laddr
;
187 struct pool ipi_zone_net
;
188 struct pool ipi_zone_chunk
;
189 struct pool ipi_zone_sockq
;
192 #if defined(__FreeBSD__) && __FreeBSD_version >= 503000
193 struct mtx ipi_ep_mtx
;
197 u_quad_t ipi_gencnt_ep
;
199 /* assoc/tcb zone info */
200 u_int ipi_count_asoc
;
201 u_quad_t ipi_gencnt_asoc
;
203 /* local addrlist zone info */
204 u_int ipi_count_laddr
;
205 u_quad_t ipi_gencnt_laddr
;
207 /* remote addrlist zone info */
208 u_int ipi_count_raddr
;
209 u_quad_t ipi_gencnt_raddr
;
211 /* chunk structure list for output */
212 u_int ipi_count_chunk
;
213 u_quad_t ipi_gencnt_chunk
;
215 /* socket queue zone info */
216 u_int ipi_count_sockq
;
217 u_quad_t ipi_gencnt_sockq
;
219 struct sctpvtaghead vtag_timewait
[SCTP_STACK_VTAG_HASH_SIZE
];
221 #ifdef _SCTP_NEEDS_CALLOUT_
222 struct calloutlist callqueue
;
223 #endif /* _SCTP_NEEDS_CALLOUT_ */
227 /* for port allocations */
234 extern uint32_t sctp_pegs
[SCTP_NUMBER_OF_PEGS
];
236 * Here we have all the relevant information for each SCTP entity created.
237 * We will need to modify this as approprate. We also need to figure out
238 * how to access /dev/random.
241 unsigned int time_of_secret_change
; /* number of seconds from timeval.tv_sec */
242 uint32_t secret_key
[SCTP_HOW_MANY_SECRETS
][SCTP_NUMBER_OF_SECRETS
];
243 unsigned int size_of_a_cookie
;
245 unsigned int sctp_timeoutticks
[SCTP_NUM_TMRS
];
246 unsigned int sctp_minrto
;
247 unsigned int sctp_maxrto
;
248 unsigned int initial_rto
;
250 int initial_init_rto_max
;
252 uint32_t sctp_sws_sender
;
253 uint32_t sctp_sws_receiver
;
255 /* various thresholds */
256 /* Max times I will init at a guy */
257 uint16_t max_init_times
;
259 /* Max times I will send before we consider someone dead */
260 uint16_t max_send_times
;
262 uint16_t def_net_failure
;
264 /* number of streams to pre-open on a association */
265 uint16_t pre_open_stream_count
;
266 uint16_t max_open_streams_intome
;
268 /* random number generator */
269 uint32_t random_counter
;
270 uint8_t random_numbers
[SCTP_SIGNATURE_ALOC_SIZE
];
271 uint8_t random_store
[SCTP_SIGNATURE_ALOC_SIZE
];
274 * This timer is kept running per endpoint. When it fires it
275 * will change the secret key. The default is once a hour
277 struct sctp_timer signature_change
;
281 uint32_t initial_sequence_debug
;
282 uint32_t adaption_layer_indicator
;
285 char current_secret_number
;
286 char last_secret_number
;
289 #ifndef SCTP_ALIGNMENT
290 #define SCTP_ALIGNMENT 32
294 #define SCTP_ALIGNM1 (SCTP_ALIGNMENT-1)
297 #define sctp_lport ip_inp.inp.inp_lport
299 struct sctp_socket_q_list
{
300 struct sctp_tcb
*tcb
;
301 TAILQ_ENTRY(sctp_socket_q_list
) next_sq
;
306 * put an inpcb in front of it all, kind of a waste but we need
307 * to for compatability with all the other stuff.
311 char align
[(sizeof(struct in6pcb
) + SCTP_ALIGNM1
) &
314 LIST_ENTRY(sctp_inpcb
) sctp_list
; /* lists all endpoints */
315 /* hash of all endpoints for model */
316 LIST_ENTRY(sctp_inpcb
) sctp_hash
;
318 /* count of local addresses bound, 0 if bound all */
320 /* list of addrs in use by the EP */
321 struct sctpladdr sctp_addr_list
;
322 /* used for source address selection rotation */
323 struct sctp_laddr
*next_addr_touse
;
324 struct ifnet
*next_ifn_touse
;
325 /* back pointer to our socket */
326 struct socket
*sctp_socket
;
327 uint32_t sctp_flags
; /* flag set */
328 struct sctp_pcb sctp_ep
; /* SCTP ep data */
329 /* head of the hash of all associations */
330 struct sctpasochead
*sctp_tcbhash
;
331 u_long sctp_hashmark
;
332 /* head of the list of all associations */
333 struct sctpasochead sctp_asoc_list
;
334 /* queue of TCB's waiting to stuff data up the socket */
335 struct sctpsocketq sctp_queue_list
;
336 void *sctp_tcb_at_block
;
337 struct sctp_iterator
*inp_starting_point_for_iterator
;
339 uint32_t sctp_frag_point
;
340 uint32_t sctp_vtag_first
;
341 struct mbuf
*pkt
, *pkt_last
, *sb_last_mpkt
;
342 struct mbuf
*control
;
343 #if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__))
355 #if defined(__FreeBSD__) && __FreeBSD_version >= 503000
357 struct mtx inp_create_mtx
;
363 struct socket
*sctp_socket
; /* back pointer to socket */
364 struct sctp_inpcb
*sctp_ep
; /* back pointer to ep */
365 LIST_ENTRY(sctp_tcb
) sctp_tcbhash
; /* next link in hash table */
366 LIST_ENTRY(sctp_tcb
) sctp_tcblist
; /* list of all of the TCB's */
367 LIST_ENTRY(sctp_tcb
) sctp_asocs
;
368 struct sctp_association asoc
;
369 uint16_t rport
; /* remote port in network format */
371 #if defined(__FreeBSD__) && __FreeBSD_version >= 503000
376 #if defined(__FreeBSD__) && __FreeBSD_version >= 503000
378 /* General locking concepts:
379 * The goal of our locking is to of course provide
380 * consistency and yet minimize overhead. We will
381 * attempt to use non-recursive locks which are supposed
382 * to be quite inexpensive. Now in order to do this the goal
383 * is that most functions are not aware of locking. Once we
384 * have a TCB we lock it and unlock when we are through. This
385 * means that the TCB lock is kind-of a "global" lock when
386 * working on an association. Caution must be used when
387 * asserting a TCB_LOCK since if we recurse we deadlock.
389 * Most other locks (INP and INFO) attempt to localize
390 * the locking i.e. we try to contain the lock and
391 * unlock within the function that needs to lock it. This
392 * sometimes mean we do extra locks and unlocks and loose
393 * a bit of efficency, but if the performance statements about
394 * non-recursive locks are true this should not be a problem.
395 * One issue that arises with this only lock when needed
396 * is that if an implicit association setup is done we
397 * have a problem. If at the time I lookup an association
398 * I have NULL in the tcb return, by the time I call to
399 * create the association some other processor could
400 * have created it. This is what the CREATE lock on
401 * the endpoint. Places where we will be implicitly
402 * creating the association OR just creating an association
403 * (the connect call) will assert the CREATE_INP lock. This
404 * will assure us that during all the lookup of INP and INFO
405 * if another creator is also locking/looking up we can
406 * gate the two to synchronize. So the CREATE_INP lock is
407 * also another one we must use extreme caution in locking
408 * to make sure we don't hit a re-entrancy issue.
410 * For non FreeBSD 5.x and above we provide a bunch
411 * of EMPTY lock macro's so we can blatantly put locks
412 * everywhere and they reduce to nothing on NetBSD/OpenBSD
418 /* When working with the global SCTP lists we lock and unlock
419 * the INP_INFO lock. So when we go to lookup an association
420 * we will want to do a SCTP_INP_INFO_RLOCK() and then when
421 * we want to add a new association to the sctppcbinfo list's
422 * we will do a SCTP_INP_INFO_WLOCK().
426 * FIX ME, all locks right now have a
427 * recursive check/panic to validate that I
428 * don't have any lock recursion going on.
431 #define SCTP_INP_INFO_LOCK_INIT() \
432 mtx_init(&sctppcbinfo.ipi_ep_mtx, "sctp", "inp_info", MTX_DEF)
435 #define SCTP_INP_INFO_RLOCK() do { \
436 if (mtx_owned(&sctppcbinfo.ipi_ep_mtx)) \
437 panic("INP INFO Recursive Lock-R"); \
438 mtx_lock(&sctppcbinfo.ipi_ep_mtx); \
441 #define SCTP_INP_INFO_WLOCK() do { \
442 if (mtx_owned(&sctppcbinfo.ipi_ep_mtx)) \
443 panic("INP INFO Recursive Lock-W"); \
444 mtx_lock(&sctppcbinfo.ipi_ep_mtx); \
449 void SCTP_INP_INFO_RLOCK(void);
450 void SCTP_INP_INFO_WLOCK(void);
454 #define SCTP_INP_INFO_RUNLOCK() mtx_unlock(&sctppcbinfo.ipi_ep_mtx)
455 #define SCTP_INP_INFO_WUNLOCK() mtx_unlock(&sctppcbinfo.ipi_ep_mtx)
457 /* The INP locks we will use for locking an SCTP endpoint, so for
458 * example if we want to change something at the endpoint level for
459 * example random_store or cookie secrets we lock the INP level.
461 #define SCTP_INP_LOCK_INIT(_inp) \
462 mtx_init(&(_inp)->inp_mtx, "sctp", "inp", MTX_DEF | MTX_DUPOK)
464 #define SCTP_ASOC_CREATE_LOCK_INIT(_inp) \
465 mtx_init(&(_inp)->inp_create_mtx, "sctp", "inp_create", \
468 #define SCTP_INP_LOCK_DESTROY(_inp) mtx_destroy(&(_inp)->inp_mtx)
469 #define SCTP_ASOC_CREATE_LOCK_DESTROY(_inp) mtx_destroy(&(_inp)->inp_create_mtx)
472 #define SCTP_INP_RLOCK(_inp) do { \
473 struct sctp_tcb *xx_stcb; \
474 xx_stcb = LIST_FIRST(&_inp->sctp_asoc_list); \
476 if (mtx_owned(&(xx_stcb)->tcb_mtx)) \
477 panic("I own TCB lock?"); \
478 if (mtx_owned(&(_inp)->inp_mtx)) \
479 panic("INP Recursive Lock-R"); \
480 mtx_lock(&(_inp)->inp_mtx); \
483 #define SCTP_INP_WLOCK(_inp) do { \
484 struct sctp_tcb *xx_stcb; \
485 xx_stcb = LIST_FIRST(&_inp->sctp_asoc_list); \
487 if (mtx_owned(&(xx_stcb)->tcb_mtx)) \
488 panic("I own TCB lock?"); \
489 if (mtx_owned(&(_inp)->inp_mtx)) \
490 panic("INP Recursive Lock-W"); \
491 mtx_lock(&(_inp)->inp_mtx); \
495 void SCTP_INP_RLOCK(struct sctp_inpcb
*);
496 void SCTP_INP_WLOCK(struct sctp_inpcb
*);
501 #define SCTP_INP_INCR_REF(_inp) _inp->refcount++
503 #define SCTP_INP_DECR_REF(_inp) do { \
504 if (_inp->refcount > 0) \
507 panic("bad inp refcount"); \
510 #define SCTP_ASOC_CREATE_LOCK(_inp) do { \
511 if (mtx_owned(&(_inp)->inp_create_mtx)) \
512 panic("INP Recursive CREATE"); \
513 mtx_lock(&(_inp)->inp_create_mtx); \
516 #define SCTP_INP_RUNLOCK(_inp) mtx_unlock(&(_inp)->inp_mtx)
517 #define SCTP_INP_WUNLOCK(_inp) mtx_unlock(&(_inp)->inp_mtx)
518 #define SCTP_ASOC_CREATE_UNLOCK(_inp) mtx_unlock(&(_inp)->inp_create_mtx)
520 /* For the majority of things (once we have found the association) we
521 * will lock the actual association mutex. This will protect all
522 * the assoiciation level queues and streams and such. We will
523 * need to lock the socket layer when we stuff data up into
524 * the receiving sb_mb. I.e. we will need to do an extra
525 * SOCKBUF_LOCK(&so->so_rcv) even though the association is
529 #define SCTP_TCB_LOCK_INIT(_tcb) \
530 mtx_init(&(_tcb)->tcb_mtx, "sctp", "tcb", MTX_DEF | MTX_DUPOK)
531 #define SCTP_TCB_LOCK_DESTROY(_tcb) mtx_destroy(&(_tcb)->tcb_mtx)
532 #define SCTP_TCB_LOCK(_tcb) do { \
533 if (!mtx_owned(&(_tcb->sctp_ep->inp_mtx))) \
534 panic("TCB locking and no INP lock"); \
535 if (mtx_owned(&(_tcb)->tcb_mtx)) \
536 panic("TCB Lock-recursive"); \
537 mtx_lock(&(_tcb)->tcb_mtx); \
539 #define SCTP_TCB_UNLOCK(_tcb) mtx_unlock(&(_tcb)->tcb_mtx)
541 #define SCTP_ITERATOR_LOCK_INIT() \
542 mtx_init(&sctppcbinfo.it_mtx, "sctp", "iterator", MTX_DEF)
543 #define SCTP_ITERATOR_LOCK() do { \
544 if (mtx_owned(&sctppcbinfo.it_mtx)) \
545 panic("Iterator Lock"); \
546 mtx_lock(&sctppcbinfo.it_mtx); \
549 #define SCTP_ITERATOR_UNLOCK() mtx_unlock(&sctppcbinfo.it_mtx)
550 #define SCTP_ITERATOR_LOCK_DESTROY() mtx_destroy(&sctppcbinfo.it_mtx)
553 /* Empty Lock declarations for all other
554 * platforms pre-process away to nothing.
557 /* Lock for INFO stuff */
558 #define SCTP_INP_INFO_LOCK_INIT()
559 #define SCTP_INP_INFO_RLOCK()
560 #define SCTP_INP_INFO_RLOCK()
561 #define SCTP_INP_INFO_WLOCK()
563 #define SCTP_INP_INFO_RUNLOCK()
564 #define SCTP_INP_INFO_WUNLOCK()
566 #define SCTP_INP_LOCK_INIT(_inp)
567 #define SCTP_INP_LOCK_DESTROY(_inp)
568 #define SCTP_INP_RLOCK(_inp)
569 #define SCTP_INP_RUNLOCK(_inp)
570 #define SCTP_INP_WLOCK(_inp)
571 #define SCTP_INP_INCR_REF(_inp)
572 #define SCTP_INP_DECR_REF(_inp)
573 #define SCTP_INP_WUNLOCK(_inp)
574 #define SCTP_ASOC_CREATE_LOCK_INIT(_inp)
575 #define SCTP_ASOC_CREATE_LOCK_DESTROY(_inp)
576 #define SCTP_ASOC_CREATE_LOCK(_inp)
577 #define SCTP_ASOC_CREATE_UNLOCK(_inp)
579 #define SCTP_TCB_LOCK_INIT(_tcb)
580 #define SCTP_TCB_LOCK_DESTROY(_tcb)
581 #define SCTP_TCB_LOCK(_tcb)
582 #define SCTP_TCB_UNLOCK(_tcb)
583 /* socket locks that are not here in other than 5.3 > FreeBSD*/
584 #define SOCK_LOCK(_so)
585 #define SOCK_UNLOCK(_so)
586 #define SOCKBUF_LOCK(_so_buf)
587 #define SOCKBUF_UNLOCK(_so_buf)
589 #define SCTP_ITERATOR_LOCK_INIT()
590 #define SCTP_ITERATOR_LOCK()
591 #define SCTP_ITERATOR_UNLOCK()
592 #define SCTP_ITERATOR_LOCK_DESTROY()
595 #if defined(_KERNEL) || (defined(__APPLE__) && defined(KERNEL))
597 extern struct sctp_epinfo sctppcbinfo
;
598 extern int sctp_auto_asconf
;
600 int SCTP6_ARE_ADDR_EQUAL(struct in6_addr
*a
, struct in6_addr
*b
);
602 void sctp_fill_pcbinfo(struct sctp_pcbinfo
*);
604 struct sctp_nets
*sctp_findnet(struct sctp_tcb
*, struct sockaddr
*);
606 struct sctp_inpcb
*sctp_pcb_findep(struct sockaddr
*, int, int);
608 #if (defined(__FreeBSD__) && __FreeBSD_version >= 500000) || defined(__DragonFly__)
609 int sctp_inpcb_bind(struct socket
*, struct sockaddr
*, struct thread
*);
611 int sctp_inpcb_bind(struct socket
*, struct sockaddr
*, struct proc
*);
614 struct sctp_tcb
*sctp_findassociation_addr(struct mbuf
*, int, int,
615 struct sctphdr
*, struct sctp_chunkhdr
*, struct sctp_inpcb
**,
616 struct sctp_nets
**);
618 struct sctp_tcb
*sctp_findassociation_addr_sa(struct sockaddr
*,
619 struct sockaddr
*, struct sctp_inpcb
**, struct sctp_nets
**, int);
621 void sctp_move_pcb_and_assoc(struct sctp_inpcb
*, struct sctp_inpcb
*,
625 * For this call ep_addr, the to is the destination endpoint address
626 * of the peer (relative to outbound). The from field is only used if
627 * the TCP model is enabled and helps distingush amongst the subset
628 * bound (non-boundall). The TCP model MAY change the actual ep field,
629 * this is why it is passed.
631 struct sctp_tcb
*sctp_findassociation_ep_addr(struct sctp_inpcb
**,
632 struct sockaddr
*, struct sctp_nets
**, struct sockaddr
*, struct sctp_tcb
*);
634 struct sctp_tcb
*sctp_findassociation_ep_asocid(struct sctp_inpcb
*, caddr_t
);
636 struct sctp_tcb
*sctp_findassociation_ep_asconf(struct mbuf
*, int, int,
637 struct sctphdr
*, struct sctp_inpcb
**, struct sctp_nets
**);
639 int sctp_inpcb_alloc(struct socket
*);
642 int sctp_is_address_on_local_host(struct sockaddr
*addr
);
644 void sctp_inpcb_free(struct sctp_inpcb
*, int);
646 struct sctp_tcb
*sctp_aloc_assoc(struct sctp_inpcb
*, struct sockaddr
*,
647 int, int *, uint32_t);
649 void sctp_free_assoc(struct sctp_inpcb
*, struct sctp_tcb
*);
651 int sctp_add_local_addr_ep(struct sctp_inpcb
*, struct ifaddr
*);
653 int sctp_insert_laddr(struct sctpladdr
*, struct ifaddr
*);
655 void sctp_remove_laddr(struct sctp_laddr
*);
657 int sctp_del_local_addr_ep(struct sctp_inpcb
*, struct ifaddr
*);
659 int sctp_del_local_addr_ep_sa(struct sctp_inpcb
*, struct sockaddr
*);
661 int sctp_add_remote_addr(struct sctp_tcb
*, struct sockaddr
*, int, int);
663 int sctp_del_remote_addr(struct sctp_tcb
*, struct sockaddr
*);
665 void sctp_pcb_init(void);
667 void sctp_free_remote_addr(struct sctp_nets
*);
669 int sctp_add_local_addr_assoc(struct sctp_tcb
*, struct ifaddr
*);
671 int sctp_del_local_addr_assoc(struct sctp_tcb
*, struct ifaddr
*);
673 int sctp_del_local_addr_assoc_sa(struct sctp_tcb
*, struct sockaddr
*);
675 int sctp_load_addresses_from_init(struct sctp_tcb
*, struct mbuf
*, int, int,
676 int, struct sctphdr
*, struct sockaddr
*);
678 int sctp_set_primary_addr(struct sctp_tcb
*, struct sockaddr
*, struct sctp_nets
*);
680 int sctp_is_vtag_good(struct sctp_inpcb
*, uint32_t, struct timeval
*);
682 /*void sctp_drain(void);*/
684 int sctp_destination_is_reachable(struct sctp_tcb
*, struct sockaddr
*);
686 int sctp_add_to_socket_q(struct sctp_inpcb
*, struct sctp_tcb
*);
688 struct sctp_tcb
*sctp_remove_from_socket_q(struct sctp_inpcb
*);
691 /* Null in last arg inpcb indicate run on ALL ep's. Specific
692 * inp in last arg indicates run on ONLY assoc's of the
693 * specified endpoint.
696 sctp_initiate_iterator(asoc_func af
, uint32_t, uint32_t, void *, uint32_t,
697 end_func ef
, struct sctp_inpcb
*);
699 #if defined(__APPLE__)
700 void sctp_callout_alloc(struct sctp_timer
*);
701 void sctp_callout_free(struct callout
*);
705 extern void in6_sin6_2_sin (struct sockaddr_in
*,
706 struct sockaddr_in6
*sin6
);
710 #endif /* _NETINET_SCTP_PCB_H_ */