1 /* $KAME: sctp_pcb.c,v 1.37 2004/08/17 06:28:02 t-momose Exp $ */
2 /* $DragonFly: src/sys/netinet/sctp_pcb.c,v 1.12 2007/04/22 01:13:14 dillon Exp $ */
5 * Copyright (c) 2001, 2002, 2003, 2004 Cisco Systems, Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Cisco Systems, Inc.
19 * 4. Neither the name of the project nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY CISCO SYSTEMS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL CISCO SYSTEMS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #if !(defined(__OpenBSD__) || defined(__APPLE__))
36 #include "opt_ipsec.h"
38 #if defined(__FreeBSD__) || defined(__DragonFly__)
39 #include "opt_compat.h"
40 #include "opt_inet6.h"
43 #if defined(__NetBSD__)
48 #elif !defined(__OpenBSD__)
52 #include <sys/param.h>
53 #include <sys/systm.h>
54 #include <sys/malloc.h>
56 #include <sys/domain.h>
57 #include <sys/protosw.h>
58 #include <sys/socket.h>
59 #include <sys/socketvar.h>
61 #include <sys/kernel.h>
62 #include <sys/sysctl.h>
63 #include <sys/thread2.h>
64 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
65 #include <sys/random.h>
67 #if defined(__NetBSD__)
70 #if defined(__OpenBSD__)
71 #include <dev/rndvar.h>
74 #if defined(__APPLE__)
75 #include <netinet/sctp_callout.h>
76 #elif defined(__OpenBSD__)
77 #include <sys/timeout.h>
79 #include <sys/callout.h>
82 #if (defined(__FreeBSD__) && __FreeBSD_version >= 500000)
83 #include <sys/limits.h>
85 #include <machine/limits.h>
87 #include <machine/cpu.h>
90 #include <net/if_types.h>
91 #include <net/route.h>
92 #include <netinet/in.h>
93 #include <netinet/in_systm.h>
94 #include <netinet/ip.h>
95 #include <netinet/in_pcb.h>
96 #include <netinet/in_var.h>
97 #include <netinet/ip_var.h>
100 #include <netinet/ip6.h>
101 #include <netinet6/ip6_var.h>
102 #include <netinet6/scope6_var.h>
103 #if defined(__FreeBSD__) || (__NetBSD__) || defined(__DragonFly__)
104 #include <netinet6/in6_pcb.h>
105 #elif defined(__OpenBSD__)
106 #include <netinet/in_pcb.h>
112 #include <netinet6/ipsec.h>
113 #include <netproto/key/key.h>
119 #include <netinet/sctp_var.h>
120 #include <netinet/sctp_pcb.h>
121 #include <netinet/sctputil.h>
122 #include <netinet/sctp.h>
123 #include <netinet/sctp_header.h>
124 #include <netinet/sctp_asconf.h>
125 #include <netinet/sctp_output.h>
126 #include <netinet/sctp_timer.h>
128 #ifndef SCTP_PCBHASHSIZE
129 /* default number of association hash buckets in each endpoint */
130 #define SCTP_PCBHASHSIZE 256
134 u_int32_t sctp_debug_on
= 0;
135 #endif /* SCTP_DEBUG */
137 u_int32_t sctp_pegs
[SCTP_NUMBER_OF_PEGS
];
139 int sctp_pcbtblsize
= SCTP_PCBHASHSIZE
;
141 struct sctp_epinfo sctppcbinfo
;
143 /* FIX: we don't handle multiple link local scopes */
144 /* "scopeless" replacement IN6_ARE_ADDR_EQUAL */
146 SCTP6_ARE_ADDR_EQUAL(struct in6_addr
*a
, struct in6_addr
*b
)
148 struct in6_addr tmp_a
, tmp_b
;
149 /* use a copy of a and b */
152 in6_clearscope(&tmp_a
);
153 in6_clearscope(&tmp_b
);
154 return (IN6_ARE_ADDR_EQUAL(&tmp_a
, &tmp_b
));
158 extern int ipport_firstauto
;
159 extern int ipport_lastauto
;
160 extern int ipport_hifirstauto
;
161 extern int ipport_hilastauto
;
164 #if defined(__FreeBSD__) && __FreeBSD_version > 500000
167 void sctp_validate_no_locks(void);
170 SCTP_INP_RLOCK(struct sctp_inpcb
*inp
)
172 struct sctp_tcb
*stcb
;
173 LIST_FOREACH(stcb
, &inp
->sctp_asoc_list
, sctp_tcblist
) {
174 if (mtx_owned(&(stcb
)->tcb_mtx
))
175 panic("I own TCB lock?");
177 if (mtx_owned(&(inp
)->inp_mtx
))
178 panic("INP Recursive Lock-R");
179 mtx_lock(&(inp
)->inp_mtx
);
183 SCTP_INP_WLOCK(struct sctp_inpcb
*inp
)
189 SCTP_INP_INFO_RLOCK(void)
191 struct sctp_inpcb
*inp
;
192 struct sctp_tcb
*stcb
;
193 LIST_FOREACH(inp
, &sctppcbinfo
.listhead
, sctp_list
) {
194 if (mtx_owned(&(inp
)->inp_mtx
))
195 panic("info-lock and own inp lock?");
196 LIST_FOREACH(stcb
, &inp
->sctp_asoc_list
, sctp_tcblist
) {
197 if (mtx_owned(&(stcb
)->tcb_mtx
))
198 panic("Info lock and own a tcb lock?");
201 if (mtx_owned(&sctppcbinfo
.ipi_ep_mtx
))
202 panic("INP INFO Recursive Lock-R");
203 mtx_lock(&sctppcbinfo
.ipi_ep_mtx
);
207 SCTP_INP_INFO_WLOCK(void)
209 SCTP_INP_INFO_RLOCK();
213 void sctp_validate_no_locks(void)
215 struct sctp_inpcb
*inp
;
216 struct sctp_tcb
*stcb
;
218 if (mtx_owned(&sctppcbinfo
.ipi_ep_mtx
))
219 panic("INP INFO lock is owned?");
221 LIST_FOREACH(inp
, &sctppcbinfo
.listhead
, sctp_list
) {
222 if (mtx_owned(&(inp
)->inp_mtx
))
223 panic("You own an INP lock?");
224 LIST_FOREACH(stcb
, &inp
->sctp_asoc_list
, sctp_tcblist
) {
225 if (mtx_owned(&(stcb
)->tcb_mtx
))
226 panic("You own a TCB lock?");
235 sctp_fill_pcbinfo(struct sctp_pcbinfo
*spcb
)
237 /* We really don't need
238 * to lock this, but I will
239 * just because it does not hurt.
241 SCTP_INP_INFO_RLOCK();
242 spcb
->ep_count
= sctppcbinfo
.ipi_count_ep
;
243 spcb
->asoc_count
= sctppcbinfo
.ipi_count_asoc
;
244 spcb
->laddr_count
= sctppcbinfo
.ipi_count_laddr
;
245 spcb
->raddr_count
= sctppcbinfo
.ipi_count_raddr
;
246 spcb
->chk_count
= sctppcbinfo
.ipi_count_chunk
;
247 spcb
->sockq_count
= sctppcbinfo
.ipi_count_sockq
;
248 spcb
->mbuf_track
= sctppcbinfo
.mbuf_track
;
249 SCTP_INP_INFO_RUNLOCK();
254 * Notes on locks for FreeBSD 5 and up. All association
255 * lookups that have a definte ep, the INP structure is
256 * assumed to be locked for reading. If we need to go
257 * find the INP (ususally when a **inp is passed) then
258 * we must lock the INFO structure first and if needed
259 * lock the INP too. Note that if we lock it we must
265 * Given a endpoint, look and find in its association list any association
266 * with the "to" address given. This can be a "from" address, too, for
267 * inbound packets. For outbound packets it is a true "to" address.
269 static struct sctp_tcb
*
270 sctp_tcb_special_locate(struct sctp_inpcb
**inp_p
, struct sockaddr
*from
,
271 struct sockaddr
*to
, struct sctp_nets
**netp
)
273 /**** ASSUMSES THE CALLER holds the INP_INFO_RLOCK */
276 * Note for this module care must be taken when observing what to is
277 * for. In most of the rest of the code the TO field represents my
278 * peer and the FROM field represents my address. For this module it
279 * is reversed of that.
282 * If we support the TCP model, then we must now dig through to
283 * see if we can find our endpoint in the list of tcp ep's.
285 uint16_t lport
, rport
;
286 struct sctppcbhead
*ephead
;
287 struct sctp_inpcb
*inp
;
288 struct sctp_laddr
*laddr
;
289 struct sctp_tcb
*stcb
;
290 struct sctp_nets
*net
;
292 if ((to
== NULL
) || (from
== NULL
)) {
296 if (to
->sa_family
== AF_INET
&& from
->sa_family
== AF_INET
) {
297 lport
= ((struct sockaddr_in
*)to
)->sin_port
;
298 rport
= ((struct sockaddr_in
*)from
)->sin_port
;
299 } else if (to
->sa_family
== AF_INET6
&& from
->sa_family
== AF_INET6
) {
300 lport
= ((struct sockaddr_in6
*)to
)->sin6_port
;
301 rport
= ((struct sockaddr_in6
*)from
)->sin6_port
;
305 ephead
= &sctppcbinfo
.sctp_tcpephash
[SCTP_PCBHASH_ALLADDR(
306 (lport
+ rport
), sctppcbinfo
.hashtcpmark
)];
308 * Ok now for each of the guys in this bucket we must look
310 * - Does the remote port match.
311 * - Does there single association's addresses match this
313 * If so we update p_ep to point to this ep and return the
316 LIST_FOREACH(inp
, ephead
, sctp_hash
) {
317 if (lport
!= inp
->sctp_lport
) {
321 /* check to see if the ep has one of the addresses */
322 if ((inp
->sctp_flags
& SCTP_PCB_FLAGS_BOUNDALL
) == 0) {
323 /* We are NOT bound all, so look further */
326 LIST_FOREACH(laddr
, &inp
->sctp_addr_list
, sctp_nxt_addr
) {
327 if (laddr
->ifa
== NULL
) {
329 if (sctp_debug_on
& SCTP_DEBUG_PCB1
) {
330 kprintf("An ounce of prevention is worth a pound of cure\n");
335 if (laddr
->ifa
->ifa_addr
== NULL
) {
337 if (sctp_debug_on
& SCTP_DEBUG_PCB1
) {
338 kprintf("ifa with a NULL address\n");
343 if (laddr
->ifa
->ifa_addr
->sa_family
==
345 /* see if it matches */
346 struct sockaddr_in
*intf_addr
, *sin
;
347 intf_addr
= (struct sockaddr_in
*)
348 laddr
->ifa
->ifa_addr
;
349 sin
= (struct sockaddr_in
*)to
;
350 if (from
->sa_family
== AF_INET
) {
351 if (sin
->sin_addr
.s_addr
==
352 intf_addr
->sin_addr
.s_addr
) {
354 SCTP_INP_RUNLOCK(inp
);
358 struct sockaddr_in6
*intf_addr6
;
359 struct sockaddr_in6
*sin6
;
360 sin6
= (struct sockaddr_in6
*)
362 intf_addr6
= (struct sockaddr_in6
*)
363 laddr
->ifa
->ifa_addr
;
365 if (SCTP6_ARE_ADDR_EQUAL(&sin6
->sin6_addr
,
366 &intf_addr6
->sin6_addr
)) {
368 SCTP_INP_RUNLOCK(inp
);
375 /* This endpoint does not have this address */
376 SCTP_INP_RUNLOCK(inp
);
381 * Ok if we hit here the ep has the address, does it hold the
385 stcb
= LIST_FIRST(&inp
->sctp_asoc_list
);
387 SCTP_INP_RUNLOCK(inp
);
391 if (stcb
->rport
!= rport
) {
392 /* remote port does not match. */
393 SCTP_TCB_UNLOCK(stcb
);
394 SCTP_INP_RUNLOCK(inp
);
397 /* Does this TCB have a matching address? */
398 TAILQ_FOREACH(net
, &stcb
->asoc
.nets
, sctp_next
) {
399 if (net
->ro
._l_addr
.sa
.sa_family
!= from
->sa_family
) {
400 /* not the same family, can't be a match */
403 if (from
->sa_family
== AF_INET
) {
404 struct sockaddr_in
*sin
, *rsin
;
405 sin
= (struct sockaddr_in
*)&net
->ro
._l_addr
;
406 rsin
= (struct sockaddr_in
*)from
;
407 if (sin
->sin_addr
.s_addr
==
408 rsin
->sin_addr
.s_addr
) {
413 /* Update the endpoint pointer */
415 SCTP_INP_RUNLOCK(inp
);
419 struct sockaddr_in6
*sin6
, *rsin6
;
420 sin6
= (struct sockaddr_in6
*)&net
->ro
._l_addr
;
421 rsin6
= (struct sockaddr_in6
*)from
;
422 if (SCTP6_ARE_ADDR_EQUAL(&sin6
->sin6_addr
,
423 &rsin6
->sin6_addr
)) {
428 /* Update the endpoint pointer */
430 SCTP_INP_RUNLOCK(inp
);
435 SCTP_TCB_UNLOCK(stcb
);
437 SCTP_INP_RUNLOCK(inp
);
443 sctp_findassociation_ep_asconf(struct mbuf
*m
, int iphlen
, int offset
,
444 struct sctphdr
*sh
, struct sctp_inpcb
**inp_p
, struct sctp_nets
**netp
)
446 struct sctp_tcb
*stcb
;
447 struct sockaddr_in
*sin
;
448 struct sockaddr_in6
*sin6
;
449 struct sockaddr_storage local_store
, remote_store
;
451 struct sctp_paramhdr parm_buf
, *phdr
;
454 memset(&local_store
, 0, sizeof(local_store
));
455 memset(&remote_store
, 0, sizeof(remote_store
));
457 /* First get the destination address setup too. */
458 iph
= mtod(m
, struct ip
*);
459 if (iph
->ip_v
== IPVERSION
) {
461 sin
= (struct sockaddr_in
*)&local_store
;
462 sin
->sin_family
= AF_INET
;
463 sin
->sin_len
= sizeof(*sin
);
464 sin
->sin_port
= sh
->dest_port
;
465 sin
->sin_addr
.s_addr
= iph
->ip_dst
.s_addr
;
466 } else if (iph
->ip_v
== (IPV6_VERSION
>> 4)) {
469 ip6
= mtod(m
, struct ip6_hdr
*);
470 sin6
= (struct sockaddr_in6
*)&local_store
;
471 sin6
->sin6_family
= AF_INET6
;
472 sin6
->sin6_len
= sizeof(*sin6
);
473 sin6
->sin6_port
= sh
->dest_port
;
474 sin6
->sin6_addr
= ip6
->ip6_dst
;
479 phdr
= sctp_get_next_param(m
, offset
+ sizeof(struct sctp_asconf_chunk
),
480 &parm_buf
, sizeof(struct sctp_paramhdr
));
483 if (sctp_debug_on
& SCTP_DEBUG_INPUT3
) {
484 kprintf("sctp_process_control: failed to get asconf lookup addr\n");
486 #endif /* SCTP_DEBUG */
489 ptype
= (int)((u_int
)ntohs(phdr
->param_type
));
490 /* get the correlation address */
491 if (ptype
== SCTP_IPV6_ADDRESS
) {
492 /* ipv6 address param */
493 struct sctp_ipv6addr_param
*p6
, p6_buf
;
494 if (ntohs(phdr
->param_length
) != sizeof(struct sctp_ipv6addr_param
)) {
498 p6
= (struct sctp_ipv6addr_param
*)sctp_get_next_param(m
,
499 offset
+ sizeof(struct sctp_asconf_chunk
),
500 &p6_buf
.ph
, sizeof(*p6
));
503 if (sctp_debug_on
& SCTP_DEBUG_INPUT3
) {
504 kprintf("sctp_process_control: failed to get asconf v6 lookup addr\n");
506 #endif /* SCTP_DEBUG */
509 sin6
= (struct sockaddr_in6
*)&remote_store
;
510 sin6
->sin6_family
= AF_INET6
;
511 sin6
->sin6_len
= sizeof(*sin6
);
512 sin6
->sin6_port
= sh
->src_port
;
513 memcpy(&sin6
->sin6_addr
, &p6
->addr
, sizeof(struct in6_addr
));
514 } else if (ptype
== SCTP_IPV4_ADDRESS
) {
515 /* ipv4 address param */
516 struct sctp_ipv4addr_param
*p4
, p4_buf
;
517 if (ntohs(phdr
->param_length
) != sizeof(struct sctp_ipv4addr_param
)) {
521 p4
= (struct sctp_ipv4addr_param
*)sctp_get_next_param(m
,
522 offset
+ sizeof(struct sctp_asconf_chunk
),
523 &p4_buf
.ph
, sizeof(*p4
));
526 if (sctp_debug_on
& SCTP_DEBUG_INPUT3
) {
527 kprintf("sctp_process_control: failed to get asconf v4 lookup addr\n");
529 #endif /* SCTP_DEBUG */
532 sin
= (struct sockaddr_in
*)&remote_store
;
533 sin
->sin_family
= AF_INET
;
534 sin
->sin_len
= sizeof(*sin
);
535 sin
->sin_port
= sh
->src_port
;
536 memcpy(&sin
->sin_addr
, &p4
->addr
, sizeof(struct in_addr
));
538 /* invalid address param type */
542 stcb
= sctp_findassociation_ep_addr(inp_p
,
543 (struct sockaddr
*)&remote_store
, netp
,
544 (struct sockaddr
*)&local_store
, NULL
);
549 sctp_findassociation_ep_addr(struct sctp_inpcb
**inp_p
, struct sockaddr
*remote
,
550 struct sctp_nets
**netp
, struct sockaddr
*local
, struct sctp_tcb
*locked_tcb
)
552 struct sctpasochead
*head
;
553 struct sctp_inpcb
*inp
;
554 struct sctp_tcb
*stcb
;
555 struct sctp_nets
*net
;
559 if (remote
->sa_family
== AF_INET
) {
560 rport
= (((struct sockaddr_in
*)remote
)->sin_port
);
561 } else if (remote
->sa_family
== AF_INET6
) {
562 rport
= (((struct sockaddr_in6
*)remote
)->sin6_port
);
567 /* UN-lock so we can do proper locking here
568 * this occurs when called from load_addresses_from_init.
570 SCTP_TCB_UNLOCK(locked_tcb
);
572 SCTP_INP_INFO_RLOCK();
573 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_TCPTYPE
) {
575 * Now either this guy is our listner or it's the connector.
576 * If it is the one that issued the connect, then it's only
577 * chance is to be the first TCB in the list. If it is the
578 * acceptor, then do the special_lookup to hash and find the
581 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_ACCEPTING
) {
582 /* to is peer addr, from is my addr */
583 stcb
= sctp_tcb_special_locate(inp_p
, remote
, local
,
585 if ((stcb
!= NULL
) && (locked_tcb
== NULL
)){
586 /* we have a locked tcb, lower refcount */
588 SCTP_INP_DECR_REF(inp
);
589 SCTP_INP_WUNLOCK(inp
);
591 if (locked_tcb
!= NULL
) {
592 SCTP_INP_RLOCK(locked_tcb
->sctp_ep
);
593 SCTP_TCB_LOCK(locked_tcb
);
594 SCTP_INP_RUNLOCK(locked_tcb
->sctp_ep
);
596 SCTP_TCB_UNLOCK(stcb
);
598 SCTP_INP_INFO_RUNLOCK();
602 stcb
= LIST_FIRST(&inp
->sctp_asoc_list
);
607 if (stcb
->rport
!= rport
) {
608 /* remote port does not match. */
609 SCTP_TCB_UNLOCK(stcb
);
612 /* now look at the list of remote addresses */
613 TAILQ_FOREACH(net
, &stcb
->asoc
.nets
, sctp_next
) {
614 if (net
->ro
._l_addr
.sa
.sa_family
!=
616 /* not the same family */
619 if (remote
->sa_family
== AF_INET
) {
620 struct sockaddr_in
*sin
, *rsin
;
621 sin
= (struct sockaddr_in
*)
623 rsin
= (struct sockaddr_in
*)remote
;
624 if (sin
->sin_addr
.s_addr
==
625 rsin
->sin_addr
.s_addr
) {
630 if (locked_tcb
== NULL
) {
631 SCTP_INP_DECR_REF(inp
);
633 SCTP_INP_WUNLOCK(inp
);
634 SCTP_INP_INFO_RUNLOCK();
637 } else if (remote
->sa_family
== AF_INET6
) {
638 struct sockaddr_in6
*sin6
, *rsin6
;
639 sin6
= (struct sockaddr_in6
*)&net
->ro
._l_addr
;
640 rsin6
= (struct sockaddr_in6
*)remote
;
641 if (SCTP6_ARE_ADDR_EQUAL(&sin6
->sin6_addr
,
642 &rsin6
->sin6_addr
)) {
647 if (locked_tcb
== NULL
) {
648 SCTP_INP_DECR_REF(inp
);
650 SCTP_INP_WUNLOCK(inp
);
651 SCTP_INP_INFO_RUNLOCK();
656 SCTP_TCB_UNLOCK(stcb
);
660 head
= &inp
->sctp_tcbhash
[SCTP_PCBHASH_ALLADDR(rport
,
661 inp
->sctp_hashmark
)];
665 LIST_FOREACH(stcb
, head
, sctp_tcbhash
) {
666 if (stcb
->rport
!= rport
) {
667 /* remote port does not match */
670 /* now look at the list of remote addresses */
672 TAILQ_FOREACH(net
, &stcb
->asoc
.nets
, sctp_next
) {
673 if (net
->ro
._l_addr
.sa
.sa_family
!=
675 /* not the same family */
678 if (remote
->sa_family
== AF_INET
) {
679 struct sockaddr_in
*sin
, *rsin
;
680 sin
= (struct sockaddr_in
*)
682 rsin
= (struct sockaddr_in
*)remote
;
683 if (sin
->sin_addr
.s_addr
==
684 rsin
->sin_addr
.s_addr
) {
689 if (locked_tcb
== NULL
) {
690 SCTP_INP_DECR_REF(inp
);
692 SCTP_INP_WUNLOCK(inp
);
693 SCTP_INP_INFO_RUNLOCK();
696 } else if (remote
->sa_family
== AF_INET6
) {
697 struct sockaddr_in6
*sin6
, *rsin6
;
698 sin6
= (struct sockaddr_in6
*)
700 rsin6
= (struct sockaddr_in6
*)remote
;
701 if (SCTP6_ARE_ADDR_EQUAL(&sin6
->sin6_addr
,
702 &rsin6
->sin6_addr
)) {
707 if (locked_tcb
== NULL
) {
708 SCTP_INP_DECR_REF(inp
);
710 SCTP_INP_WUNLOCK(inp
);
711 SCTP_INP_INFO_RUNLOCK();
716 SCTP_TCB_UNLOCK(stcb
);
720 /* clean up for returning null */
722 if (locked_tcb
->sctp_ep
!= inp
) {
723 SCTP_INP_RLOCK(locked_tcb
->sctp_ep
);
724 SCTP_TCB_LOCK(locked_tcb
);
725 SCTP_INP_RUNLOCK(locked_tcb
->sctp_ep
);
727 SCTP_TCB_LOCK(locked_tcb
);
729 SCTP_INP_WUNLOCK(inp
);
730 SCTP_INP_INFO_RUNLOCK();
736 * Find an association for a specific endpoint using the association id
737 * given out in the COMM_UP notification
740 sctp_findassociation_ep_asocid(struct sctp_inpcb
*inp
, caddr_t asoc_id
)
743 * Use my the assoc_id to find a endpoint
745 struct sctpasochead
*head
;
746 struct sctp_tcb
*stcb
;
749 if (asoc_id
== 0 || inp
== NULL
) {
752 SCTP_INP_INFO_RLOCK();
753 vtag
= (u_int32_t
)asoc_id
;
754 head
= &sctppcbinfo
.sctp_asochash
[SCTP_PCBHASH_ASOC(vtag
,
755 sctppcbinfo
.hashasocmark
)];
758 SCTP_INP_INFO_RUNLOCK();
761 LIST_FOREACH(stcb
, head
, sctp_asocs
) {
762 SCTP_INP_RLOCK(stcb
->sctp_ep
);
764 SCTP_INP_RUNLOCK(stcb
->sctp_ep
);
765 if (stcb
->asoc
.my_vtag
== vtag
) {
767 if (inp
!= stcb
->sctp_ep
) {
768 /* some other guy has the
769 * same vtag active (vtag collision).
771 sctp_pegs
[SCTP_VTAG_BOGUS
]++;
772 SCTP_TCB_UNLOCK(stcb
);
775 sctp_pegs
[SCTP_VTAG_EXPR
]++;
776 SCTP_INP_INFO_RUNLOCK();
779 SCTP_TCB_UNLOCK(stcb
);
781 SCTP_INP_INFO_RUNLOCK();
785 static struct sctp_inpcb
*
786 sctp_endpoint_probe(struct sockaddr
*nam
, struct sctppcbhead
*head
,
789 struct sctp_inpcb
*inp
;
790 struct sockaddr_in
*sin
;
791 struct sockaddr_in6
*sin6
;
792 struct sctp_laddr
*laddr
;
794 /* Endpoing probe expects
795 * that the INP_INFO is locked.
797 if (nam
->sa_family
== AF_INET
) {
798 sin
= (struct sockaddr_in
*)nam
;
800 } else if (nam
->sa_family
== AF_INET6
) {
801 sin6
= (struct sockaddr_in6
*)nam
;
804 /* unsupported family */
810 LIST_FOREACH(inp
, head
, sctp_hash
) {
813 if ((inp
->sctp_flags
& SCTP_PCB_FLAGS_BOUNDALL
) &&
814 (inp
->sctp_lport
== lport
)) {
816 if ((nam
->sa_family
== AF_INET
) &&
817 (inp
->sctp_flags
& SCTP_PCB_FLAGS_BOUND_V6
) &&
818 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
819 (((struct inpcb
*)inp
)->inp_flags
& IN6P_IPV6_V6ONLY
)
821 #if defined(__OpenBSD__)
822 (0) /* For open bsd we do dual bind only */
824 (((struct in6pcb
*)inp
)->in6p_flags
& IN6P_IPV6_V6ONLY
)
828 /* IPv4 on a IPv6 socket with ONLY IPv6 set */
829 SCTP_INP_RUNLOCK(inp
);
832 /* A V6 address and the endpoint is NOT bound V6 */
833 if (nam
->sa_family
== AF_INET6
&&
834 (inp
->sctp_flags
& SCTP_PCB_FLAGS_BOUND_V6
) == 0) {
835 SCTP_INP_RUNLOCK(inp
);
838 SCTP_INP_RUNLOCK(inp
);
841 SCTP_INP_RUNLOCK(inp
);
844 if ((nam
->sa_family
== AF_INET
) &&
845 (sin
->sin_addr
.s_addr
== INADDR_ANY
)) {
846 /* Can't hunt for one that has no address specified */
848 } else if ((nam
->sa_family
== AF_INET6
) &&
849 (IN6_IS_ADDR_UNSPECIFIED(&sin6
->sin6_addr
))) {
850 /* Can't hunt for one that has no address specified */
854 * ok, not bound to all so see if we can find a EP bound to this
858 if (sctp_debug_on
& SCTP_DEBUG_PCB1
) {
859 kprintf("Ok, there is NO bound-all available for port:%x\n", ntohs(lport
));
862 LIST_FOREACH(inp
, head
, sctp_hash
) {
864 if ((inp
->sctp_flags
& SCTP_PCB_FLAGS_BOUNDALL
)) {
865 SCTP_INP_RUNLOCK(inp
);
869 * Ok this could be a likely candidate, look at all of
872 if (inp
->sctp_lport
!= lport
) {
873 SCTP_INP_RUNLOCK(inp
);
877 if (sctp_debug_on
& SCTP_DEBUG_PCB1
) {
878 kprintf("Ok, found maching local port\n");
881 LIST_FOREACH(laddr
, &inp
->sctp_addr_list
, sctp_nxt_addr
) {
882 if (laddr
->ifa
== NULL
) {
884 if (sctp_debug_on
& SCTP_DEBUG_PCB1
) {
885 kprintf("An ounce of prevention is worth a pound of cure\n");
891 if (sctp_debug_on
& SCTP_DEBUG_PCB1
) {
892 kprintf("Ok laddr->ifa:%p is possible, ",
896 if (laddr
->ifa
->ifa_addr
== NULL
) {
898 if (sctp_debug_on
& SCTP_DEBUG_PCB1
) {
899 kprintf("Huh IFA as an ifa_addr=NULL, ");
905 if (sctp_debug_on
& SCTP_DEBUG_PCB1
) {
906 kprintf("Ok laddr->ifa:%p is possible, ",
907 laddr
->ifa
->ifa_addr
);
908 sctp_print_address(laddr
->ifa
->ifa_addr
);
909 kprintf("looking for ");
910 sctp_print_address(nam
);
913 if (laddr
->ifa
->ifa_addr
->sa_family
== nam
->sa_family
) {
914 /* possible, see if it matches */
915 struct sockaddr_in
*intf_addr
;
916 intf_addr
= (struct sockaddr_in
*)
917 laddr
->ifa
->ifa_addr
;
918 if (nam
->sa_family
== AF_INET
) {
919 if (sin
->sin_addr
.s_addr
==
920 intf_addr
->sin_addr
.s_addr
) {
922 if (sctp_debug_on
& SCTP_DEBUG_PCB1
) {
923 kprintf("YES, return ep:%p\n", inp
);
926 SCTP_INP_RUNLOCK(inp
);
929 } else if (nam
->sa_family
== AF_INET6
) {
930 struct sockaddr_in6
*intf_addr6
;
931 intf_addr6
= (struct sockaddr_in6
*)
932 laddr
->ifa
->ifa_addr
;
933 if (SCTP6_ARE_ADDR_EQUAL(&sin6
->sin6_addr
,
934 &intf_addr6
->sin6_addr
)) {
936 if (sctp_debug_on
& SCTP_DEBUG_PCB1
) {
937 kprintf("YES, return ep:%p\n", inp
);
940 SCTP_INP_RUNLOCK(inp
);
945 SCTP_INP_RUNLOCK(inp
);
949 if (sctp_debug_on
& SCTP_DEBUG_PCB1
) {
950 kprintf("NO, Falls out to NULL\n");
958 sctp_pcb_findep(struct sockaddr
*nam
, int find_tcp_pool
, int have_lock
)
961 * First we check the hash table to see if someone has this port
962 * bound with just the port.
964 struct sctp_inpcb
*inp
;
965 struct sctppcbhead
*head
;
966 struct sockaddr_in
*sin
;
967 struct sockaddr_in6
*sin6
;
970 if (sctp_debug_on
& SCTP_DEBUG_PCB1
) {
971 kprintf("Looking for endpoint %d :",
972 ntohs(((struct sockaddr_in
*)nam
)->sin_port
));
973 sctp_print_address(nam
);
976 if (nam
->sa_family
== AF_INET
) {
977 sin
= (struct sockaddr_in
*)nam
;
978 lport
= ((struct sockaddr_in
*)nam
)->sin_port
;
979 } else if (nam
->sa_family
== AF_INET6
) {
980 sin6
= (struct sockaddr_in6
*)nam
;
981 lport
= ((struct sockaddr_in6
*)nam
)->sin6_port
;
983 /* unsupported family */
987 * I could cheat here and just cast to one of the types but we will
988 * do it right. It also provides the check against an Unsupported
991 /* Find the head of the ALLADDR chain */
993 SCTP_INP_INFO_RLOCK();
994 head
= &sctppcbinfo
.sctp_ephash
[SCTP_PCBHASH_ALLADDR(lport
,
995 sctppcbinfo
.hashmark
)];
997 if (sctp_debug_on
& SCTP_DEBUG_PCB1
) {
998 kprintf("Main hash to lookup at head:%p\n", head
);
1001 inp
= sctp_endpoint_probe(nam
, head
, lport
);
1004 * If the TCP model exists it could be that the main listening
1005 * endpoint is gone but there exists a connected socket for this
1006 * guy yet. If so we can return the first one that we find. This
1007 * may NOT be the correct one but the sctp_findassociation_ep_addr
1008 * has further code to look at all TCP models.
1010 if (inp
== NULL
&& find_tcp_pool
) {
1013 if (sctp_debug_on
& SCTP_DEBUG_PCB1
) {
1014 kprintf("EP was NULL and TCP model is supported\n");
1017 for (i
= 0; i
< sctppcbinfo
.hashtblsize
; i
++) {
1019 * This is real gross, but we do NOT have a remote
1020 * port at this point depending on who is calling. We
1021 * must therefore look for ANY one that matches our
1024 head
= &sctppcbinfo
.sctp_tcpephash
[i
];
1025 if (LIST_FIRST(head
)) {
1026 inp
= sctp_endpoint_probe(nam
, head
, lport
);
1035 if (sctp_debug_on
& SCTP_DEBUG_PCB1
) {
1036 kprintf("EP to return is %p\n", inp
);
1039 if (have_lock
== 0) {
1041 SCTP_INP_WLOCK(inp
);
1042 SCTP_INP_INCR_REF(inp
);
1043 SCTP_INP_WUNLOCK(inp
);
1045 SCTP_INP_INFO_RUNLOCK();
1048 SCTP_INP_WLOCK(inp
);
1049 SCTP_INP_INCR_REF(inp
);
1050 SCTP_INP_WUNLOCK(inp
);
1057 * Find an association for an endpoint with the pointer to whom you want
1058 * to send to and the endpoint pointer. The address can be IPv4 or IPv6.
1059 * We may need to change the *to to some other struct like a mbuf...
1062 sctp_findassociation_addr_sa(struct sockaddr
*to
, struct sockaddr
*from
,
1063 struct sctp_inpcb
**inp_p
, struct sctp_nets
**netp
, int find_tcp_pool
)
1065 struct sctp_inpcb
*inp
;
1066 struct sctp_tcb
*retval
;
1068 SCTP_INP_INFO_RLOCK();
1069 if (find_tcp_pool
) {
1070 if (inp_p
!= NULL
) {
1071 retval
= sctp_tcb_special_locate(inp_p
, from
, to
, netp
);
1073 retval
= sctp_tcb_special_locate(&inp
, from
, to
, netp
);
1075 if (retval
!= NULL
) {
1076 SCTP_INP_INFO_RUNLOCK();
1080 inp
= sctp_pcb_findep(to
, 0, 1);
1081 if (inp_p
!= NULL
) {
1084 SCTP_INP_INFO_RUNLOCK();
1091 * ok, we have an endpoint, now lets find the assoc for it (if any)
1092 * we now place the source address or from in the to of the find
1093 * endpoint call. Since in reality this chain is used from the
1094 * inbound packet side.
1096 if (inp_p
!= NULL
) {
1097 return (sctp_findassociation_ep_addr(inp_p
, from
, netp
, to
, NULL
));
1099 return (sctp_findassociation_ep_addr(&inp
, from
, netp
, to
, NULL
));
1105 * This routine will grub through the mbuf that is a INIT or INIT-ACK and
1106 * find all addresses that the sender has specified in any address list.
1107 * Each address will be used to lookup the TCB and see if one exits.
1109 static struct sctp_tcb
*
1110 sctp_findassociation_special_addr(struct mbuf
*m
, int iphlen
, int offset
,
1111 struct sctphdr
*sh
, struct sctp_inpcb
**inp_p
, struct sctp_nets
**netp
,
1112 struct sockaddr
*dest
)
1114 struct sockaddr_in sin4
;
1115 struct sockaddr_in6 sin6
;
1116 struct sctp_paramhdr
*phdr
, parm_buf
;
1117 struct sctp_tcb
*retval
;
1118 u_int32_t ptype
, plen
;
1120 memset(&sin4
, 0, sizeof(sin4
));
1121 memset(&sin6
, 0, sizeof(sin6
));
1122 sin4
.sin_len
= sizeof(sin4
);
1123 sin4
.sin_family
= AF_INET
;
1124 sin4
.sin_port
= sh
->src_port
;
1125 sin6
.sin6_len
= sizeof(sin6
);
1126 sin6
.sin6_family
= AF_INET6
;
1127 sin6
.sin6_port
= sh
->src_port
;
1130 offset
+= sizeof(struct sctp_init_chunk
);
1132 phdr
= sctp_get_next_param(m
, offset
, &parm_buf
, sizeof(parm_buf
));
1133 while (phdr
!= NULL
) {
1134 /* now we must see if we want the parameter */
1135 ptype
= ntohs(phdr
->param_type
);
1136 plen
= ntohs(phdr
->param_length
);
1139 if (sctp_debug_on
& SCTP_DEBUG_PCB1
) {
1140 kprintf("sctp_findassociation_special_addr: Impossible length in parameter\n");
1142 #endif /* SCTP_DEBUG */
1145 if (ptype
== SCTP_IPV4_ADDRESS
&&
1146 plen
== sizeof(struct sctp_ipv4addr_param
)) {
1147 /* Get the rest of the address */
1148 struct sctp_ipv4addr_param ip4_parm
, *p4
;
1150 phdr
= sctp_get_next_param(m
, offset
,
1151 (struct sctp_paramhdr
*)&ip4_parm
, plen
);
1155 p4
= (struct sctp_ipv4addr_param
*)phdr
;
1156 memcpy(&sin4
.sin_addr
, &p4
->addr
, sizeof(p4
->addr
));
1158 retval
= sctp_findassociation_ep_addr(inp_p
,
1159 (struct sockaddr
*)&sin4
, netp
, dest
, NULL
);
1160 if (retval
!= NULL
) {
1163 } else if (ptype
== SCTP_IPV6_ADDRESS
&&
1164 plen
== sizeof(struct sctp_ipv6addr_param
)) {
1165 /* Get the rest of the address */
1166 struct sctp_ipv6addr_param ip6_parm
, *p6
;
1168 phdr
= sctp_get_next_param(m
, offset
,
1169 (struct sctp_paramhdr
*)&ip6_parm
, plen
);
1173 p6
= (struct sctp_ipv6addr_param
*)phdr
;
1174 memcpy(&sin6
.sin6_addr
, &p6
->addr
, sizeof(p6
->addr
));
1176 retval
= sctp_findassociation_ep_addr(inp_p
,
1177 (struct sockaddr
*)&sin6
, netp
, dest
, NULL
);
1178 if (retval
!= NULL
) {
1182 offset
+= SCTP_SIZE32(plen
);
1183 phdr
= sctp_get_next_param(m
, offset
, &parm_buf
,
1189 static struct sctp_tcb
*
1190 sctp_findassoc_by_vtag(struct sockaddr
*from
, uint32_t vtag
,
1191 struct sctp_inpcb
**inp_p
, struct sctp_nets
**netp
, uint16_t rport
,
1195 * Use my vtag to hash. If we find it we then verify the source addr
1196 * is in the assoc. If all goes well we save a bit on rec of a packet.
1198 struct sctpasochead
*head
;
1199 struct sctp_nets
*net
;
1200 struct sctp_tcb
*stcb
;
1202 SCTP_INP_INFO_RLOCK();
1203 head
= &sctppcbinfo
.sctp_asochash
[SCTP_PCBHASH_ASOC(vtag
,
1204 sctppcbinfo
.hashasocmark
)];
1207 SCTP_INP_INFO_RUNLOCK();
1210 LIST_FOREACH(stcb
, head
, sctp_asocs
) {
1211 SCTP_INP_RLOCK(stcb
->sctp_ep
);
1212 SCTP_TCB_LOCK(stcb
);
1213 SCTP_INP_RUNLOCK(stcb
->sctp_ep
);
1214 if (stcb
->asoc
.my_vtag
== vtag
) {
1216 if (stcb
->rport
!= rport
) {
1218 * we could remove this if vtags are unique
1219 * across the system.
1221 SCTP_TCB_UNLOCK(stcb
);
1224 if (stcb
->sctp_ep
->sctp_lport
!= lport
) {
1226 * we could remove this if vtags are unique
1227 * across the system.
1229 SCTP_TCB_UNLOCK(stcb
);
1232 net
= sctp_findnet(stcb
, from
);
1236 sctp_pegs
[SCTP_VTAG_EXPR
]++;
1237 *inp_p
= stcb
->sctp_ep
;
1238 SCTP_INP_INFO_RUNLOCK();
1241 /* not him, this should only
1242 * happen in rare cases so
1245 sctp_pegs
[SCTP_VTAG_BOGUS
]++;
1248 SCTP_TCB_UNLOCK(stcb
);
1250 SCTP_INP_INFO_RUNLOCK();
1255 * Find an association with the pointer to the inbound IP packet. This
1256 * can be a IPv4 or IPv6 packet.
1259 sctp_findassociation_addr(struct mbuf
*m
, int iphlen
, int offset
,
1260 struct sctphdr
*sh
, struct sctp_chunkhdr
*ch
,
1261 struct sctp_inpcb
**inp_p
, struct sctp_nets
**netp
)
1265 struct sctp_tcb
*retval
;
1266 struct sockaddr_storage to_store
, from_store
;
1267 struct sockaddr
*to
= (struct sockaddr
*)&to_store
;
1268 struct sockaddr
*from
= (struct sockaddr
*)&from_store
;
1269 struct sctp_inpcb
*inp
;
1272 iph
= mtod(m
, struct ip
*);
1273 if (iph
->ip_v
== IPVERSION
) {
1275 struct sockaddr_in
*to4
, *from4
;
1277 to4
= (struct sockaddr_in
*)&to_store
;
1278 from4
= (struct sockaddr_in
*)&from_store
;
1279 bzero(to4
, sizeof(*to4
));
1280 bzero(from4
, sizeof(*from4
));
1281 from4
->sin_family
= to4
->sin_family
= AF_INET
;
1282 from4
->sin_len
= to4
->sin_len
= sizeof(struct sockaddr_in
);
1283 from4
->sin_addr
.s_addr
= iph
->ip_src
.s_addr
;
1284 to4
->sin_addr
.s_addr
= iph
->ip_dst
.s_addr
;
1285 from4
->sin_port
= sh
->src_port
;
1286 to4
->sin_port
= sh
->dest_port
;
1287 } else if (iph
->ip_v
== (IPV6_VERSION
>> 4)) {
1289 struct ip6_hdr
*ip6
;
1290 struct sockaddr_in6
*to6
, *from6
;
1292 ip6
= mtod(m
, struct ip6_hdr
*);
1293 to6
= (struct sockaddr_in6
*)&to_store
;
1294 from6
= (struct sockaddr_in6
*)&from_store
;
1295 bzero(to6
, sizeof(*to6
));
1296 bzero(from6
, sizeof(*from6
));
1297 from6
->sin6_family
= to6
->sin6_family
= AF_INET6
;
1298 from6
->sin6_len
= to6
->sin6_len
= sizeof(struct sockaddr_in6
);
1299 to6
->sin6_addr
= ip6
->ip6_dst
;
1300 from6
->sin6_addr
= ip6
->ip6_src
;
1301 from6
->sin6_port
= sh
->src_port
;
1302 to6
->sin6_port
= sh
->dest_port
;
1303 /* Get the scopes in properly to the sin6 addr's */
1304 in6_recoverscope(to6
, &to6
->sin6_addr
, NULL
);
1305 #if defined(SCTP_BASE_FREEBSD) || defined(__APPLE__) || defined(__DragonFly__)
1306 in6_embedscope(&to6
->sin6_addr
, to6
, NULL
, NULL
);
1308 in6_embedscope(&to6
->sin6_addr
, to6
);
1311 in6_recoverscope(from6
, &from6
->sin6_addr
, NULL
);
1312 #if defined(SCTP_BASE_FREEBSD) || defined(__APPLE__) || defined(__DragonFly__)
1313 in6_embedscope(&from6
->sin6_addr
, from6
, NULL
, NULL
);
1315 in6_embedscope(&from6
->sin6_addr
, from6
);
1318 /* Currently not supported. */
1322 if (sctp_debug_on
& SCTP_DEBUG_PCB1
) {
1323 kprintf("Looking for port %d address :",
1324 ntohs(((struct sockaddr_in
*)to
)->sin_port
));
1325 sctp_print_address(to
);
1326 kprintf("From for port %d address :",
1327 ntohs(((struct sockaddr_in
*)from
)->sin_port
));
1328 sctp_print_address(from
);
1333 /* we only go down this path if vtag is non-zero */
1334 retval
= sctp_findassoc_by_vtag(from
, ntohl(sh
->v_tag
),
1335 inp_p
, netp
, sh
->src_port
, sh
->dest_port
);
1341 if ((ch
->chunk_type
!= SCTP_INITIATION
) &&
1342 (ch
->chunk_type
!= SCTP_INITIATION_ACK
) &&
1343 (ch
->chunk_type
!= SCTP_COOKIE_ACK
) &&
1344 (ch
->chunk_type
!= SCTP_COOKIE_ECHO
)) {
1345 /* Other chunk types go to the tcp pool. */
1349 retval
= sctp_findassociation_addr_sa(to
, from
, inp_p
, netp
,
1353 retval
= sctp_findassociation_addr_sa(to
, from
, &inp
, netp
,
1357 if (sctp_debug_on
& SCTP_DEBUG_PCB1
) {
1358 kprintf("retval:%p inp:%p\n", retval
, inp
);
1361 if (retval
== NULL
&& inp
) {
1362 /* Found a EP but not this address */
1364 if (sctp_debug_on
& SCTP_DEBUG_PCB1
) {
1365 kprintf("Found endpoint %p but no asoc - ep state:%x\n",
1366 inp
, inp
->sctp_flags
);
1369 if ((ch
->chunk_type
== SCTP_INITIATION
) ||
1370 (ch
->chunk_type
== SCTP_INITIATION_ACK
)) {
1372 * special hook, we do NOT return linp or an
1373 * association that is linked to an existing
1374 * association that is under the TCP pool (i.e. no
1375 * listener exists). The endpoint finding routine
1376 * will always find a listner before examining the
1379 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_IN_TCPPOOL
) {
1381 if (sctp_debug_on
& SCTP_DEBUG_PCB1
) {
1382 kprintf("Gak, its in the TCP pool... return NULL");
1391 if (sctp_debug_on
& SCTP_DEBUG_PCB1
) {
1392 kprintf("Now doing SPECIAL find\n");
1395 retval
= sctp_findassociation_special_addr(m
, iphlen
,
1396 offset
, sh
, inp_p
, netp
, to
);
1400 if (sctp_debug_on
& SCTP_DEBUG_PCB1
) {
1401 kprintf("retval is %p\n", retval
);
1407 extern int sctp_max_burst_default
;
1409 extern unsigned int sctp_delayed_sack_time_default
;
1410 extern unsigned int sctp_heartbeat_interval_default
;
1411 extern unsigned int sctp_pmtu_raise_time_default
;
1412 extern unsigned int sctp_shutdown_guard_time_default
;
1413 extern unsigned int sctp_secret_lifetime_default
;
1415 extern unsigned int sctp_rto_max_default
;
1416 extern unsigned int sctp_rto_min_default
;
1417 extern unsigned int sctp_rto_initial_default
;
1418 extern unsigned int sctp_init_rto_max_default
;
1419 extern unsigned int sctp_valid_cookie_life_default
;
1420 extern unsigned int sctp_init_rtx_max_default
;
1421 extern unsigned int sctp_assoc_rtx_max_default
;
1422 extern unsigned int sctp_path_rtx_max_default
;
1423 extern unsigned int sctp_nr_outgoing_streams_default
;
1426 * allocate a sctp_inpcb and setup a temporary binding to a port/all
1427 * addresses. This way if we don't get a bind we by default pick a ephemeral
1428 * port with all addresses bound.
1431 sctp_inpcb_alloc(struct socket
*so
)
1434 * we get called when a new endpoint starts up. We need to allocate
1435 * the sctp_inpcb structure from the zone and init it. Mark it as
1436 * unbound and find a port that we can use as an ephemeral with
1437 * INADDR_ANY. If the user binds later no problem we can then add
1438 * in the specific addresses. And setup the default parameters for
1442 struct sctp_inpcb
*inp
, *n_inp
;
1444 struct timeval time
;
1450 * This code audits the entire INP list to see if
1451 * any ep's that are in the GONE state are now
1452 * all free. This should not happen really since when
1453 * the last association if freed we should end up deleting
1454 * the inpcb. This code including the locks should
1455 * be taken out ... since the last set of fixes I
1456 * have not seen the "Found a GONE on list" has not
1457 * came out. But i am paranoid and we will leave this
1458 * in at the cost of efficency on allocation of PCB's.
1459 * Probably we should move this to the invariant
1462 /* #ifdef INVARIANTS*/
1463 SCTP_INP_INFO_RLOCK();
1464 inp
= LIST_FIRST(&sctppcbinfo
.listhead
);
1466 n_inp
= LIST_NEXT(inp
, sctp_list
);
1467 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_SOCKET_GONE
) {
1468 if (LIST_FIRST(&inp
->sctp_asoc_list
) == NULL
) {
1469 /* finish the job now */
1470 kprintf("Found a GONE on list\n");
1471 SCTP_INP_INFO_RUNLOCK();
1472 sctp_inpcb_free(inp
, 1);
1473 SCTP_INP_INFO_RLOCK();
1478 SCTP_INP_INFO_RUNLOCK();
1479 /* #endif INVARIANTS*/
1481 SCTP_INP_INFO_WLOCK();
1482 inp
= (struct sctp_inpcb
*)SCTP_ZONE_GET(sctppcbinfo
.ipi_zone_ep
);
1484 kprintf("Out of SCTP-INPCB structures - no resources\n");
1485 SCTP_INP_INFO_WUNLOCK();
1490 bzero(inp
, sizeof(*inp
));
1492 /* bump generations */
1493 inp
->ip_inp
.inp
.inp_socket
= so
;
1495 /* setup socket pointers */
1496 inp
->sctp_socket
= so
;
1498 /* setup inpcb socket too */
1499 inp
->ip_inp
.inp
.inp_socket
= so
;
1500 inp
->sctp_frag_point
= SCTP_DEFAULT_MAXSEGMENT
;
1502 #if !(defined(__OpenBSD__) || defined(__APPLE__))
1504 struct inpcbpolicy
*pcb_sp
= NULL
;
1505 error
= ipsec_init_policy(so
, &pcb_sp
);
1506 /* Arrange to share the policy */
1507 inp
->ip_inp
.inp
.inp_sp
= pcb_sp
;
1508 ((struct in6pcb
*)(&inp
->ip_inp
.inp
))->in6p_sp
= pcb_sp
;
1511 /* not sure what to do for openbsd here */
1515 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_ep
, inp
);
1516 SCTP_INP_INFO_WUNLOCK();
1520 sctppcbinfo
.ipi_count_ep
++;
1521 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
1522 inp
->ip_inp
.inp
.inp_gencnt
= ++sctppcbinfo
.ipi_gencnt_ep
;
1523 inp
->ip_inp
.inp
.inp_ip_ttl
= ip_defttl
;
1525 inp
->inp_ip_ttl
= ip_defttl
;
1526 inp
->inp_ip_tos
= 0;
1529 so
->so_pcb
= (caddr_t
)inp
;
1531 if ((so
->so_type
== SOCK_DGRAM
) ||
1532 (so
->so_type
== SOCK_SEQPACKET
)) {
1533 /* UDP style socket */
1534 inp
->sctp_flags
= (SCTP_PCB_FLAGS_UDPTYPE
|
1535 SCTP_PCB_FLAGS_UNBOUND
);
1536 inp
->sctp_flags
|= (SCTP_PCB_FLAGS_RECVDATAIOEVNT
);
1537 } else if (so
->so_type
== SOCK_STREAM
) {
1538 /* TCP style socket */
1539 inp
->sctp_flags
= (SCTP_PCB_FLAGS_TCPTYPE
|
1540 SCTP_PCB_FLAGS_UNBOUND
);
1541 inp
->sctp_flags
|= (SCTP_PCB_FLAGS_RECVDATAIOEVNT
);
1544 * unsupported socket type (RAW, etc)- in case we missed
1547 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_ep
, inp
);
1548 SCTP_INP_INFO_WUNLOCK();
1549 return (EOPNOTSUPP
);
1551 inp
->sctp_tcbhash
= hashinit(sctp_pcbtblsize
,
1556 #if defined(__NetBSD__) || defined(__OpenBSD__)
1559 &inp
->sctp_hashmark
);
1560 if (inp
->sctp_tcbhash
== NULL
) {
1561 kprintf("Out of SCTP-INPCB->hashinit - no resources\n");
1562 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_ep
, inp
);
1563 SCTP_INP_INFO_WUNLOCK();
1567 SCTP_INP_LOCK_INIT(inp
);
1568 SCTP_ASOC_CREATE_LOCK_INIT(inp
);
1569 /* lock the new ep */
1570 SCTP_INP_WLOCK(inp
);
1572 /* add it to the info area */
1573 LIST_INSERT_HEAD(&sctppcbinfo
.listhead
, inp
, sctp_list
);
1574 SCTP_INP_INFO_WUNLOCK();
1576 LIST_INIT(&inp
->sctp_addr_list
);
1577 LIST_INIT(&inp
->sctp_asoc_list
);
1578 TAILQ_INIT(&inp
->sctp_queue_list
);
1579 /* Init the timer structure for signature change */
1580 #if defined (__FreeBSD__) && __FreeBSD_version >= 500000
1581 callout_init(&inp
->sctp_ep
.signature_change
.timer
, 0);
1583 callout_init(&inp
->sctp_ep
.signature_change
.timer
);
1585 inp
->sctp_ep
.signature_change
.type
= SCTP_TIMER_TYPE_NEWCOOKIE
;
1587 /* now init the actual endpoint default data */
1590 /* setup the base timeout information */
1591 m
->sctp_timeoutticks
[SCTP_TIMER_SEND
] = SEC_TO_TICKS(SCTP_SEND_SEC
); /* needed ? */
1592 m
->sctp_timeoutticks
[SCTP_TIMER_INIT
] = SEC_TO_TICKS(SCTP_INIT_SEC
); /* needed ? */
1593 m
->sctp_timeoutticks
[SCTP_TIMER_RECV
] = MSEC_TO_TICKS(sctp_delayed_sack_time_default
);
1594 m
->sctp_timeoutticks
[SCTP_TIMER_HEARTBEAT
] = sctp_heartbeat_interval_default
; /* this is in MSEC */
1595 m
->sctp_timeoutticks
[SCTP_TIMER_PMTU
] = SEC_TO_TICKS(sctp_pmtu_raise_time_default
);
1596 m
->sctp_timeoutticks
[SCTP_TIMER_MAXSHUTDOWN
] = SEC_TO_TICKS(sctp_shutdown_guard_time_default
);
1597 m
->sctp_timeoutticks
[SCTP_TIMER_SIGNATURE
] = SEC_TO_TICKS(sctp_secret_lifetime_default
);
1598 /* all max/min max are in ms */
1599 m
->sctp_maxrto
= sctp_rto_max_default
;
1600 m
->sctp_minrto
= sctp_rto_min_default
;
1601 m
->initial_rto
= sctp_rto_initial_default
;
1602 m
->initial_init_rto_max
= sctp_init_rto_max_default
;
1604 m
->max_open_streams_intome
= MAX_SCTP_STREAMS
;
1606 m
->max_init_times
= sctp_init_rtx_max_default
;
1607 m
->max_send_times
= sctp_assoc_rtx_max_default
;
1608 m
->def_net_failure
= sctp_path_rtx_max_default
;
1609 m
->sctp_sws_sender
= SCTP_SWS_SENDER_DEF
;
1610 m
->sctp_sws_receiver
= SCTP_SWS_RECEIVER_DEF
;
1611 m
->max_burst
= sctp_max_burst_default
;
1612 /* number of streams to pre-open on a association */
1613 m
->pre_open_stream_count
= sctp_nr_outgoing_streams_default
;
1615 /* Add adaption cookie */
1616 m
->adaption_layer_indicator
= 0x504C5253;
1618 /* seed random number generator */
1619 m
->random_counter
= 1;
1620 m
->store_at
= SCTP_SIGNATURE_SIZE
;
1621 #if (defined(__FreeBSD__) && (__FreeBSD_version < 500000)) || defined(__DragonFly__)
1622 read_random_unlimited(m
->random_numbers
, sizeof(m
->random_numbers
));
1623 #elif defined(__APPLE__) || (__FreeBSD_version > 500000)
1624 read_random(m
->random_numbers
, sizeof(m
->random_numbers
));
1625 #elif defined(__OpenBSD__)
1626 get_random_bytes(m
->random_numbers
, sizeof(m
->random_numbers
));
1627 #elif defined(__NetBSD__) && NRND > 0
1628 rnd_extract_data(m
->random_numbers
, sizeof(m
->random_numbers
),
1632 u_int32_t
*ranm
, *ranp
;
1633 ranp
= (u_int32_t
*)&m
->random_numbers
;
1634 ranm
= ranp
+ (SCTP_SIGNATURE_ALOC_SIZE
/sizeof(u_int32_t
));
1635 if ((u_long
)ranp
% 4) {
1636 /* not a even boundary? */
1637 ranp
= (u_int32_t
*)SCTP_SIZE32((u_long
)ranp
);
1639 while (ranp
< ranm
) {
1645 sctp_fill_random_store(m
);
1647 /* Minimum cookie size */
1648 m
->size_of_a_cookie
= (sizeof(struct sctp_init_msg
) * 2) +
1649 sizeof(struct sctp_state_cookie
);
1650 m
->size_of_a_cookie
+= SCTP_SIGNATURE_SIZE
;
1652 /* Setup the initial secret */
1653 SCTP_GETTIME_TIMEVAL(&time
);
1654 m
->time_of_secret_change
= time
.tv_sec
;
1656 for (i
= 0; i
< SCTP_NUMBER_OF_SECRETS
; i
++) {
1657 m
->secret_key
[0][i
] = sctp_select_initial_TSN(m
);
1659 sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE
, inp
, NULL
, NULL
);
1661 /* How long is a cookie good for ? */
1662 m
->def_cookie_life
= sctp_valid_cookie_life_default
;
1663 SCTP_INP_WUNLOCK(inp
);
1669 sctp_move_pcb_and_assoc(struct sctp_inpcb
*old_inp
, struct sctp_inpcb
*new_inp
,
1670 struct sctp_tcb
*stcb
)
1672 uint16_t lport
, rport
;
1673 struct sctppcbhead
*head
;
1674 struct sctp_laddr
*laddr
, *oladdr
;
1676 SCTP_TCB_UNLOCK(stcb
);
1677 SCTP_INP_INFO_WLOCK();
1678 SCTP_INP_WLOCK(old_inp
);
1679 SCTP_INP_WLOCK(new_inp
);
1680 SCTP_TCB_LOCK(stcb
);
1682 new_inp
->sctp_ep
.time_of_secret_change
=
1683 old_inp
->sctp_ep
.time_of_secret_change
;
1684 memcpy(new_inp
->sctp_ep
.secret_key
, old_inp
->sctp_ep
.secret_key
,
1685 sizeof(old_inp
->sctp_ep
.secret_key
));
1686 new_inp
->sctp_ep
.current_secret_number
=
1687 old_inp
->sctp_ep
.current_secret_number
;
1688 new_inp
->sctp_ep
.last_secret_number
=
1689 old_inp
->sctp_ep
.last_secret_number
;
1690 new_inp
->sctp_ep
.size_of_a_cookie
= old_inp
->sctp_ep
.size_of_a_cookie
;
1692 /* Copy the port across */
1693 lport
= new_inp
->sctp_lport
= old_inp
->sctp_lport
;
1694 rport
= stcb
->rport
;
1695 /* Pull the tcb from the old association */
1696 LIST_REMOVE(stcb
, sctp_tcbhash
);
1697 LIST_REMOVE(stcb
, sctp_tcblist
);
1699 /* Now insert the new_inp into the TCP connected hash */
1700 head
= &sctppcbinfo
.sctp_tcpephash
[SCTP_PCBHASH_ALLADDR((lport
+ rport
),
1701 sctppcbinfo
.hashtcpmark
)];
1703 LIST_INSERT_HEAD(head
, new_inp
, sctp_hash
);
1705 /* Now move the tcb into the endpoint list */
1706 LIST_INSERT_HEAD(&new_inp
->sctp_asoc_list
, stcb
, sctp_tcblist
);
1708 * Question, do we even need to worry about the ep-hash since
1709 * we only have one connection? Probably not :> so lets
1710 * get rid of it and not suck up any kernel memory in that.
1712 SCTP_INP_INFO_WUNLOCK();
1713 stcb
->sctp_socket
= new_inp
->sctp_socket
;
1714 stcb
->sctp_ep
= new_inp
;
1715 if (new_inp
->sctp_tcbhash
!= NULL
) {
1716 FREE(new_inp
->sctp_tcbhash
, M_PCB
);
1717 new_inp
->sctp_tcbhash
= NULL
;
1719 if ((new_inp
->sctp_flags
& SCTP_PCB_FLAGS_BOUNDALL
) == 0) {
1720 /* Subset bound, so copy in the laddr list from the old_inp */
1721 LIST_FOREACH(oladdr
, &old_inp
->sctp_addr_list
, sctp_nxt_addr
) {
1722 laddr
= (struct sctp_laddr
*)SCTP_ZONE_GET(
1723 sctppcbinfo
.ipi_zone_laddr
);
1724 if (laddr
== NULL
) {
1726 * Gak, what can we do? This assoc is really
1727 * HOSED. We probably should send an abort
1731 if (sctp_debug_on
& SCTP_DEBUG_PCB1
) {
1732 kprintf("Association hosed in TCP model, out of laddr memory\n");
1734 #endif /* SCTP_DEBUG */
1737 sctppcbinfo
.ipi_count_laddr
++;
1738 sctppcbinfo
.ipi_gencnt_laddr
++;
1739 bzero(laddr
, sizeof(*laddr
));
1740 laddr
->ifa
= oladdr
->ifa
;
1741 LIST_INSERT_HEAD(&new_inp
->sctp_addr_list
, laddr
,
1743 new_inp
->laddr_count
++;
1746 SCTP_INP_WUNLOCK(new_inp
);
1747 SCTP_INP_WUNLOCK(old_inp
);
1751 sctp_isport_inuse(struct sctp_inpcb
*inp
, uint16_t lport
)
1753 struct sctppcbhead
*head
;
1754 struct sctp_inpcb
*t_inp
;
1756 head
= &sctppcbinfo
.sctp_ephash
[SCTP_PCBHASH_ALLADDR(lport
,
1757 sctppcbinfo
.hashmark
)];
1758 LIST_FOREACH(t_inp
, head
, sctp_hash
) {
1759 if (t_inp
->sctp_lport
!= lport
) {
1762 /* This one is in use. */
1763 /* check the v6/v4 binding issue */
1764 if ((t_inp
->sctp_flags
& SCTP_PCB_FLAGS_BOUND_V6
) &&
1765 #if defined(__FreeBSD__)
1766 (((struct inpcb
*)t_inp
)->inp_flags
& IN6P_IPV6_V6ONLY
)
1768 #if defined(__OpenBSD__)
1769 (0) /* For open bsd we do dual bind only */
1771 (((struct in6pcb
*)t_inp
)->in6p_flags
& IN6P_IPV6_V6ONLY
)
1775 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_BOUND_V6
) {
1776 /* collision in V6 space */
1779 /* inp is BOUND_V4 no conflict */
1782 } else if (t_inp
->sctp_flags
& SCTP_PCB_FLAGS_BOUND_V6
) {
1783 /* t_inp is bound v4 and v6, conflict always */
1786 /* t_inp is bound only V4 */
1787 if ((inp
->sctp_flags
& SCTP_PCB_FLAGS_BOUND_V6
) &&
1788 #if defined(__FreeBSD__)
1789 (((struct inpcb
*)inp
)->inp_flags
& IN6P_IPV6_V6ONLY
)
1791 #if defined(__OpenBSD__)
1792 (0) /* For open bsd we do dual bind only */
1794 (((struct in6pcb
*)inp
)->in6p_flags
& IN6P_IPV6_V6ONLY
)
1801 /* else fall through to conflict */
1808 #if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__))
1810 * Don't know why, but without this there is an unknown reference when
1811 * compiling NetBSD... hmm
1813 extern void in6_sin6_2_sin (struct sockaddr_in
*, struct sockaddr_in6
*sin6
);
1818 #if (defined(__FreeBSD__) && __FreeBSD_version >= 500000) || defined(__DragonFly__)
1819 sctp_inpcb_bind(struct socket
*so
, struct sockaddr
*addr
, struct thread
*p
)
1821 sctp_inpcb_bind(struct socket
*so
, struct sockaddr
*addr
, struct proc
*p
)
1824 /* bind a ep to a socket address */
1825 struct sctppcbhead
*head
;
1826 struct sctp_inpcb
*inp
, *inp_tmp
;
1827 struct inpcb
*ip_inp
;
1835 inp
= (struct sctp_inpcb
*)so
->so_pcb
;
1836 ip_inp
= (struct inpcb
*)so
->so_pcb
;
1838 if (sctp_debug_on
& SCTP_DEBUG_PCB1
) {
1840 kprintf("Bind called port:%d\n",
1841 ntohs(((struct sockaddr_in
*)addr
)->sin_port
));
1843 sctp_print_address(addr
);
1846 #endif /* SCTP_DEBUG */
1847 if ((inp
->sctp_flags
& SCTP_PCB_FLAGS_UNBOUND
) == 0) {
1848 /* already did a bind, subsequent binds NOT allowed ! */
1853 if (addr
->sa_family
== AF_INET
) {
1854 struct sockaddr_in
*sin
;
1856 /* IPV6_V6ONLY socket? */
1858 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
1859 (ip_inp
->inp_flags
& IN6P_IPV6_V6ONLY
)
1861 #if defined(__OpenBSD__)
1862 (0) /* For openbsd we do dual bind only */
1864 (((struct in6pcb
*)inp
)->in6p_flags
& IN6P_IPV6_V6ONLY
)
1871 if (addr
->sa_len
!= sizeof(*sin
))
1874 sin
= (struct sockaddr_in
*)addr
;
1875 lport
= sin
->sin_port
;
1877 if (sin
->sin_addr
.s_addr
!= INADDR_ANY
) {
1880 } else if (addr
->sa_family
== AF_INET6
) {
1881 /* Only for pure IPv6 Address. (No IPv4 Mapped!) */
1882 struct sockaddr_in6
*sin6
;
1884 sin6
= (struct sockaddr_in6
*)addr
;
1886 if (addr
->sa_len
!= sizeof(*sin6
))
1889 lport
= sin6
->sin6_port
;
1890 if (!IN6_IS_ADDR_UNSPECIFIED(&sin6
->sin6_addr
)) {
1892 /* KAME hack: embed scopeid */
1893 #if defined(SCTP_BASE_FREEBSD) || defined(__APPLE__) || defined(__DragonFly__)
1894 if (in6_embedscope(&sin6
->sin6_addr
, sin6
,
1897 #elif defined(__FreeBSD__)
1898 error
= scope6_check_id(sin6
, ip6_use_defzone
);
1902 if (in6_embedscope(&sin6
->sin6_addr
, sin6
) != 0) {
1907 #ifndef SCOPEDROUTING
1908 /* this must be cleared for ifa_ifwithaddr() */
1909 sin6
->sin6_scope_id
= 0;
1910 #endif /* SCOPEDROUTING */
1912 return (EAFNOSUPPORT
);
1915 SCTP_INP_INFO_WLOCK();
1916 SCTP_INP_WLOCK(inp
);
1917 /* increase our count due to the unlock we do */
1918 SCTP_INP_INCR_REF(inp
);
1921 * Did the caller specify a port? if so we must see if a
1922 * ep already has this one bound.
1924 /* got to be root to get at low ports */
1925 if (ntohs(lport
) < IPPORT_RESERVED
) {
1928 #if __FreeBSD_version >= 500000
1929 suser_cred(p
->td_ucred
, 0)
1933 #elif defined(__NetBSD__) || defined(__APPLE__)
1934 suser(p
->p_ucred
, &p
->p_acflag
)
1935 #elif defined(__DragonFly__)
1941 SCTP_INP_DECR_REF(inp
);
1942 SCTP_INP_WUNLOCK(inp
);
1943 SCTP_INP_INFO_WUNLOCK();
1948 SCTP_INP_DECR_REF(inp
);
1949 SCTP_INP_WUNLOCK(inp
);
1950 SCTP_INP_INFO_WUNLOCK();
1953 SCTP_INP_WUNLOCK(inp
);
1954 inp_tmp
= sctp_pcb_findep(addr
, 0, 1);
1955 if (inp_tmp
!= NULL
) {
1956 /* lock guy returned and lower count
1957 * note that we are not bound so inp_tmp
1958 * should NEVER be inp. And it is this
1959 * inp (inp_tmp) that gets the reference
1960 * bump, so we must lower it.
1962 SCTP_INP_WLOCK(inp_tmp
);
1963 SCTP_INP_DECR_REF(inp_tmp
);
1964 SCTP_INP_WUNLOCK(inp_tmp
);
1967 SCTP_INP_INFO_WUNLOCK();
1968 return (EADDRNOTAVAIL
);
1970 SCTP_INP_WLOCK(inp
);
1972 /* verify that no lport is not used by a singleton */
1973 if (sctp_isport_inuse(inp
, lport
)) {
1974 /* Sorry someone already has this one bound */
1975 SCTP_INP_DECR_REF(inp
);
1976 SCTP_INP_WUNLOCK(inp
);
1977 SCTP_INP_INFO_WUNLOCK();
1978 return (EADDRNOTAVAIL
);
1983 * get any port but lets make sure no one has any address
1984 * with this port bound
1988 * setup the inp to the top (I could use the union but this
1991 uint32_t port_guess
;
1992 uint16_t port_attempt
;
1996 port_guess
= sctp_select_initial_TSN(&inp
->sctp_ep
);
1997 port_attempt
= (port_guess
& 0x0000ffff);
1998 if (port_attempt
== 0) {
2001 if (port_attempt
< IPPORT_RESERVED
) {
2002 port_attempt
+= IPPORT_RESERVED
;
2005 if (sctp_isport_inuse(inp
, htons(port_attempt
)) == 0) {
2006 /* got a port we can use */
2010 /* try upper half */
2012 port_attempt
= ((port_guess
>> 16) & 0x0000ffff);
2013 if (port_attempt
== 0) {
2016 if (port_attempt
< IPPORT_RESERVED
) {
2017 port_attempt
+= IPPORT_RESERVED
;
2019 if (sctp_isport_inuse(inp
, htons(port_attempt
)) == 0) {
2020 /* got a port we can use */
2024 /* try two half's added together */
2026 port_attempt
= (((port_guess
>> 16) & 0x0000ffff) + (port_guess
& 0x0000ffff));
2027 if (port_attempt
== 0) {
2028 /* get a new random number */
2031 if (port_attempt
< IPPORT_RESERVED
) {
2032 port_attempt
+= IPPORT_RESERVED
;
2034 if (sctp_isport_inuse(inp
, htons(port_attempt
)) == 0) {
2035 /* got a port we can use */
2040 /* we don't get out of the loop until we have a port */
2041 lport
= htons(port_attempt
);
2043 SCTP_INP_DECR_REF(inp
);
2044 if (inp
->sctp_flags
& (SCTP_PCB_FLAGS_SOCKET_GONE
|SCTP_PCB_FLAGS_SOCKET_ALLGONE
)) {
2045 /* this really should not happen. The guy
2046 * did a non-blocking bind and then did a close
2049 SCTP_INP_WUNLOCK(inp
);
2050 SCTP_INP_INFO_WUNLOCK();
2053 /* ok we look clear to give out this port, so lets setup the binding */
2055 /* binding to all addresses, so just set in the proper flags */
2056 inp
->sctp_flags
|= (SCTP_PCB_FLAGS_BOUNDALL
|
2057 SCTP_PCB_FLAGS_DO_ASCONF
);
2058 /* set the automatic addr changes from kernel flag */
2059 if (sctp_auto_asconf
== 0) {
2060 inp
->sctp_flags
&= ~SCTP_PCB_FLAGS_AUTO_ASCONF
;
2062 inp
->sctp_flags
|= SCTP_PCB_FLAGS_AUTO_ASCONF
;
2066 * bind specific, make sure flags is off and add a new address
2067 * structure to the sctp_addr_list inside the ep structure.
2069 * We will need to allocate one and insert it at the head.
2070 * The socketopt call can just insert new addresses in there
2071 * as well. It will also have to do the embed scope kame hack
2072 * too (before adding).
2075 struct sockaddr_storage store_sa
;
2077 memset(&store_sa
, 0, sizeof(store_sa
));
2078 if (addr
->sa_family
== AF_INET
) {
2079 struct sockaddr_in
*sin
;
2081 sin
= (struct sockaddr_in
*)&store_sa
;
2082 memcpy(sin
, addr
, sizeof(struct sockaddr_in
));
2084 } else if (addr
->sa_family
== AF_INET6
) {
2085 struct sockaddr_in6
*sin6
;
2087 sin6
= (struct sockaddr_in6
*)&store_sa
;
2088 memcpy(sin6
, addr
, sizeof(struct sockaddr_in6
));
2089 sin6
->sin6_port
= 0;
2092 * first find the interface with the bound address
2093 * need to zero out the port to find the address! yuck!
2094 * can't do this earlier since need port for sctp_pcb_findep()
2096 ifa
= sctp_find_ifa_by_addr((struct sockaddr
*)&store_sa
);
2098 /* Can't find an interface with that address */
2099 SCTP_INP_WUNLOCK(inp
);
2100 SCTP_INP_INFO_WUNLOCK();
2101 return (EADDRNOTAVAIL
);
2103 if (addr
->sa_family
== AF_INET6
) {
2104 struct in6_ifaddr
*ifa6
;
2105 ifa6
= (struct in6_ifaddr
*)ifa
;
2107 * allow binding of deprecated addresses as per
2108 * RFC 2462 and ipng discussion
2110 if (ifa6
->ia6_flags
& (IN6_IFF_DETACHED
|
2112 IN6_IFF_NOTREADY
)) {
2113 /* Can't bind a non-existent addr. */
2114 SCTP_INP_WUNLOCK(inp
);
2115 SCTP_INP_INFO_WUNLOCK();
2119 /* we're not bound all */
2120 inp
->sctp_flags
&= ~SCTP_PCB_FLAGS_BOUNDALL
;
2121 #if 0 /* use sysctl now */
2122 /* don't allow automatic addr changes from kernel */
2123 inp
->sctp_flags
&= ~SCTP_PCB_FLAGS_AUTO_ASCONF
;
2125 /* set the automatic addr changes from kernel flag */
2126 if (sctp_auto_asconf
== 0) {
2127 inp
->sctp_flags
&= ~SCTP_PCB_FLAGS_AUTO_ASCONF
;
2129 inp
->sctp_flags
|= SCTP_PCB_FLAGS_AUTO_ASCONF
;
2131 /* allow bindx() to send ASCONF's for binding changes */
2132 inp
->sctp_flags
|= SCTP_PCB_FLAGS_DO_ASCONF
;
2133 /* add this address to the endpoint list */
2134 error
= sctp_insert_laddr(&inp
->sctp_addr_list
, ifa
);
2136 SCTP_INP_WUNLOCK(inp
);
2137 SCTP_INP_INFO_WUNLOCK();
2142 /* find the bucket */
2143 head
= &sctppcbinfo
.sctp_ephash
[SCTP_PCBHASH_ALLADDR(lport
,
2144 sctppcbinfo
.hashmark
)];
2145 /* put it in the bucket */
2146 LIST_INSERT_HEAD(head
, inp
, sctp_hash
);
2148 if (sctp_debug_on
& SCTP_DEBUG_PCB1
) {
2149 kprintf("Main hash to bind at head:%p, bound port:%d\n", head
, ntohs(lport
));
2152 /* set in the port */
2153 inp
->sctp_lport
= lport
;
2155 /* turn off just the unbound flag */
2156 inp
->sctp_flags
&= ~SCTP_PCB_FLAGS_UNBOUND
;
2157 SCTP_INP_WUNLOCK(inp
);
2158 SCTP_INP_INFO_WUNLOCK();
2164 sctp_iterator_inp_being_freed(struct sctp_inpcb
*inp
, struct sctp_inpcb
*inp_next
)
2166 struct sctp_iterator
*it
;
2167 /* We enter with the only the ITERATOR_LOCK in place and
2168 * A write lock on the inp_info stuff.
2171 /* Go through all iterators, we must do this since
2172 * it is possible that some iterator does NOT have
2173 * the lock, but is waiting for it. And the one that
2174 * had the lock has either moved in the last iteration
2175 * or we just cleared it above. We need to find all
2176 * of those guys. The list of iterators should never
2177 * be very big though.
2179 LIST_FOREACH(it
, &sctppcbinfo
.iteratorhead
, sctp_nxt_itr
) {
2180 if (it
== inp
->inp_starting_point_for_iterator
)
2181 /* skip this guy, he's special */
2183 if (it
->inp
== inp
) {
2184 /* This is tricky and we DON'T lock the iterator.
2185 * Reason is he's running but waiting for me since
2186 * inp->inp_starting_point_for_iterator has the lock
2187 * on me (the guy above we skipped). This tells us
2188 * its is not running but waiting for inp->inp_starting_point_for_iterator
2189 * to be released by the guy that does have our INP in a lock.
2191 if (it
->iterator_flags
& SCTP_ITERATOR_DO_SINGLE_INP
) {
2195 /* set him up to do the next guy not me */
2201 it
= inp
->inp_starting_point_for_iterator
;
2203 if (it
->iterator_flags
& SCTP_ITERATOR_DO_SINGLE_INP
) {
2212 /* release sctp_inpcb unbind the port */
2214 sctp_inpcb_free(struct sctp_inpcb
*inp
, int immediate
)
2217 * Here we free a endpoint. We must find it (if it is in the Hash
2218 * table) and remove it from there. Then we must also find it in
2219 * the overall list and remove it from there. After all removals are
2220 * complete then any timer has to be stopped. Then start the actual
2222 * a) Any local lists.
2223 * b) Any associations.
2224 * c) The hash of all associations.
2225 * d) finally the ep itself.
2228 struct sctp_inpcb
*inp_save
;
2229 struct sctp_tcb
*asoc
, *nasoc
;
2230 struct sctp_laddr
*laddr
, *nladdr
;
2231 struct inpcb
*ip_pcb
;
2233 struct sctp_socket_q_list
*sq
;
2234 #if !defined(__FreeBSD__) || __FreeBSD_version < 500000
2240 SCTP_ASOC_CREATE_LOCK(inp
);
2241 SCTP_INP_WLOCK(inp
);
2243 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_SOCKET_ALLGONE
) {
2244 /* been here before */
2246 kprintf("Endpoint was all gone (dup free)?\n");
2247 SCTP_INP_WUNLOCK(inp
);
2248 SCTP_ASOC_CREATE_UNLOCK(inp
);
2251 sctp_timer_stop(SCTP_TIMER_TYPE_NEWCOOKIE
, inp
, NULL
, NULL
);
2254 sctp_m_freem(inp
->control
);
2255 inp
->control
= NULL
;
2258 sctp_m_freem(inp
->pkt
);
2261 so
= inp
->sctp_socket
;
2263 ip_pcb
= &inp
->ip_inp
.inp
; /* we could just cast the main
2264 * pointer here but I will
2265 * be nice :> (i.e. ip_pcb = ep;)
2268 if (immediate
== 0) {
2271 for ((asoc
= LIST_FIRST(&inp
->sctp_asoc_list
)); asoc
!= NULL
;
2273 nasoc
= LIST_NEXT(asoc
, sctp_tcblist
);
2274 if ((SCTP_GET_STATE(&asoc
->asoc
) == SCTP_STATE_COOKIE_WAIT
) ||
2275 (SCTP_GET_STATE(&asoc
->asoc
) == SCTP_STATE_COOKIE_ECHOED
)) {
2276 /* Just abandon things in the front states */
2277 SCTP_TCB_LOCK(asoc
);
2278 SCTP_INP_WUNLOCK(inp
);
2279 sctp_free_assoc(inp
, asoc
);
2280 SCTP_INP_WLOCK(inp
);
2283 asoc
->asoc
.state
|= SCTP_STATE_CLOSED_SOCKET
;
2285 if ((asoc
->asoc
.size_on_delivery_queue
> 0) ||
2286 (asoc
->asoc
.size_on_reasm_queue
> 0) ||
2287 (asoc
->asoc
.size_on_all_streams
> 0) ||
2288 (so
&& (so
->so_rcv
.ssb_cc
> 0))
2290 /* Left with Data unread */
2291 struct mbuf
*op_err
;
2292 MGET(op_err
, MB_DONTWAIT
, MT_DATA
);
2294 /* Fill in the user initiated abort */
2295 struct sctp_paramhdr
*ph
;
2297 sizeof(struct sctp_paramhdr
);
2299 struct sctp_paramhdr
*);
2300 ph
->param_type
= htons(
2301 SCTP_CAUSE_USER_INITIATED_ABT
);
2302 ph
->param_length
= htons(op_err
->m_len
);
2304 SCTP_TCB_LOCK(asoc
);
2305 sctp_send_abort_tcb(asoc
, op_err
);
2307 SCTP_INP_WUNLOCK(inp
);
2308 sctp_free_assoc(inp
, asoc
);
2309 SCTP_INP_WLOCK(inp
);
2311 } else if (TAILQ_EMPTY(&asoc
->asoc
.send_queue
) &&
2312 TAILQ_EMPTY(&asoc
->asoc
.sent_queue
)) {
2313 if ((SCTP_GET_STATE(&asoc
->asoc
) != SCTP_STATE_SHUTDOWN_SENT
) &&
2314 (SCTP_GET_STATE(&asoc
->asoc
) != SCTP_STATE_SHUTDOWN_ACK_SENT
)) {
2315 /* there is nothing queued to send, so I send shutdown */
2316 SCTP_TCB_LOCK(asoc
);
2317 sctp_send_shutdown(asoc
, asoc
->asoc
.primary_destination
);
2318 asoc
->asoc
.state
= SCTP_STATE_SHUTDOWN_SENT
;
2319 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN
, asoc
->sctp_ep
, asoc
,
2320 asoc
->asoc
.primary_destination
);
2321 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD
, asoc
->sctp_ep
, asoc
,
2322 asoc
->asoc
.primary_destination
);
2323 sctp_chunk_output(inp
, asoc
, 1);
2324 SCTP_TCB_UNLOCK(asoc
);
2327 /* mark into shutdown pending */
2328 asoc
->asoc
.state
|= SCTP_STATE_SHUTDOWN_PENDING
;
2332 /* now is there some left in our SHUTDOWN state? */
2334 inp
->sctp_flags
|= SCTP_PCB_FLAGS_SOCKET_GONE
;
2336 SCTP_INP_WUNLOCK(inp
);
2337 SCTP_ASOC_CREATE_UNLOCK(inp
);
2341 #if defined(__FreeBSD__) && __FreeBSD_version >= 503000
2342 if (inp
->refcount
) {
2343 sctp_timer_start(SCTP_TIMER_TYPE_INPKILL
, inp
, NULL
, NULL
);
2344 SCTP_INP_WUNLOCK(inp
);
2345 SCTP_ASOC_CREATE_UNLOCK(inp
);
2349 inp
->sctp_flags
|= SCTP_PCB_FLAGS_SOCKET_ALLGONE
;
2350 #if !defined(__FreeBSD__) || __FreeBSD_version < 500000
2351 rt
= ip_pcb
->inp_route
.ro_rt
;
2354 callout_stop(&inp
->sctp_ep
.signature_change
.timer
);
2357 /* First take care of socket level things */
2360 /* XXX IPsec cleanup here */
2362 if (ip_pcb
->inp_tdb_in
)
2363 TAILQ_REMOVE(&ip_pcb
->inp_tdb_in
->tdb_inp_in
,
2364 ip_pcb
, inp_tdb_in_next
);
2365 if (ip_pcb
->inp_tdb_out
)
2366 TAILQ_REMOVE(&ip_pcb
->inp_tdb_out
->tdb_inp_out
, ip_pcb
,
2368 if (ip_pcb
->inp_ipsec_localid
)
2369 ipsp_reffree(ip_pcb
->inp_ipsec_localid
);
2370 if (ip_pcb
->inp_ipsec_remoteid
)
2371 ipsp_reffree(ip_pcb
->inp_ipsec_remoteid
);
2372 if (ip_pcb
->inp_ipsec_localcred
)
2373 ipsp_reffree(ip_pcb
->inp_ipsec_localcred
);
2374 if (ip_pcb
->inp_ipsec_remotecred
)
2375 ipsp_reffree(ip_pcb
->inp_ipsec_remotecred
);
2376 if (ip_pcb
->inp_ipsec_localauth
)
2377 ipsp_reffree(ip_pcb
->inp_ipsec_localauth
);
2378 if (ip_pcb
->inp_ipsec_remoteauth
)
2379 ipsp_reffree(ip_pcb
->inp_ipsec_remoteauth
);
2382 ipsec4_delete_pcbpolicy(ip_pcb
);
2385 #if defined(__FreeBSD__) && __FreeBSD_version > 500000
2390 #if defined(__FreeBSD__) && __FreeBSD_version > 500000
2397 if (ip_pcb
->inp_options
) {
2398 m_free(ip_pcb
->inp_options
);
2399 ip_pcb
->inp_options
= 0;
2401 #if !defined(__FreeBSD__) || __FreeBSD_version < 500000
2404 ip_pcb
->inp_route
.ro_rt
= 0;
2407 if (ip_pcb
->inp_moptions
) {
2408 ip_freemoptions(ip_pcb
->inp_moptions
);
2409 ip_pcb
->inp_moptions
= 0;
2411 #if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__))
2414 ip_pcb
->inp_vflag
= 0;
2417 /* Now the sctp_pcb things */
2420 * free each asoc if it is not already closed/free. we can't use
2421 * the macro here since le_next will get freed as part of the
2422 * sctp_free_assoc() call.
2425 for ((asoc
= LIST_FIRST(&inp
->sctp_asoc_list
)); asoc
!= NULL
;
2427 nasoc
= LIST_NEXT(asoc
, sctp_tcblist
);
2428 SCTP_TCB_LOCK(asoc
);
2429 if (SCTP_GET_STATE(&asoc
->asoc
) != SCTP_STATE_COOKIE_WAIT
) {
2430 struct mbuf
*op_err
;
2431 MGET(op_err
, MB_DONTWAIT
, MT_DATA
);
2433 /* Fill in the user initiated abort */
2434 struct sctp_paramhdr
*ph
;
2435 op_err
->m_len
= sizeof(struct sctp_paramhdr
);
2436 ph
= mtod(op_err
, struct sctp_paramhdr
*);
2437 ph
->param_type
= htons(
2438 SCTP_CAUSE_USER_INITIATED_ABT
);
2439 ph
->param_length
= htons(op_err
->m_len
);
2441 sctp_send_abort_tcb(asoc
, op_err
);
2445 * sctp_free_assoc() will call sctp_inpcb_free(),
2446 * if SCTP_PCB_FLAGS_SOCKET_GONE set.
2447 * So, we clear it before sctp_free_assoc() making sure
2448 * no double sctp_inpcb_free().
2450 inp
->sctp_flags
&= ~SCTP_PCB_FLAGS_SOCKET_GONE
;
2451 SCTP_INP_WUNLOCK(inp
);
2452 sctp_free_assoc(inp
, asoc
);
2453 SCTP_INP_WLOCK(inp
);
2455 while ((sq
= TAILQ_FIRST(&inp
->sctp_queue_list
)) != NULL
) {
2456 TAILQ_REMOVE(&inp
->sctp_queue_list
, sq
, next_sq
);
2457 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_sockq
, sq
);
2458 sctppcbinfo
.ipi_count_sockq
--;
2459 sctppcbinfo
.ipi_gencnt_sockq
++;
2461 inp
->sctp_socket
= 0;
2462 /* Now first we remove ourselves from the overall list of all EP's */
2464 /* Unlock inp first, need correct order */
2465 SCTP_INP_WUNLOCK(inp
);
2466 /* now iterator lock */
2467 SCTP_ITERATOR_LOCK();
2469 SCTP_INP_INFO_WLOCK();
2470 /* now reget the inp lock */
2471 SCTP_INP_WLOCK(inp
);
2473 inp_save
= LIST_NEXT(inp
, sctp_list
);
2474 LIST_REMOVE(inp
, sctp_list
);
2476 * Now the question comes as to if this EP was ever bound at all.
2477 * If it was, then we must pull it out of the EP hash list.
2479 if ((inp
->sctp_flags
& SCTP_PCB_FLAGS_UNBOUND
) !=
2480 SCTP_PCB_FLAGS_UNBOUND
) {
2482 * ok, this guy has been bound. It's port is somewhere
2483 * in the sctppcbinfo hash table. Remove it!
2485 LIST_REMOVE(inp
, sctp_hash
);
2487 /* fix any iterators only after out of the list */
2488 sctp_iterator_inp_being_freed(inp
, inp_save
);
2489 SCTP_ITERATOR_UNLOCK();
2491 * if we have an address list the following will free the list of
2492 * ifaddr's that are set into this ep. Again macro limitations here,
2493 * since the LIST_FOREACH could be a bad idea.
2495 for ((laddr
= LIST_FIRST(&inp
->sctp_addr_list
)); laddr
!= NULL
;
2497 nladdr
= LIST_NEXT(laddr
, sctp_nxt_addr
);
2498 LIST_REMOVE(laddr
, sctp_nxt_addr
);
2499 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_laddr
, laddr
);
2500 sctppcbinfo
.ipi_gencnt_laddr
++;
2501 sctppcbinfo
.ipi_count_laddr
--;
2503 /* Now lets see about freeing the EP hash table. */
2504 if (inp
->sctp_tcbhash
!= NULL
) {
2505 FREE(inp
->sctp_tcbhash
, M_PCB
);
2506 inp
->sctp_tcbhash
= 0;
2508 SCTP_INP_WUNLOCK(inp
);
2509 SCTP_ASOC_CREATE_UNLOCK(inp
);
2510 SCTP_INP_LOCK_DESTROY(inp
);
2511 SCTP_ASOC_CREATE_LOCK_DESTROY(inp
);
2513 /* Now we must put the ep memory back into the zone pool */
2514 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_ep
, inp
);
2515 sctppcbinfo
.ipi_count_ep
--;
2517 SCTP_INP_INFO_WUNLOCK();
2523 sctp_findnet(struct sctp_tcb
*stcb
, struct sockaddr
*addr
)
2525 struct sctp_nets
*net
;
2526 struct sockaddr_in
*sin
;
2527 struct sockaddr_in6
*sin6
;
2528 /* use the peer's/remote port for lookup if unspecified */
2529 sin
= (struct sockaddr_in
*)addr
;
2530 sin6
= (struct sockaddr_in6
*)addr
;
2531 #if 0 /* why do we need to check the port for a nets list on an assoc? */
2532 if (stcb
->rport
!= sin
->sin_port
) {
2533 /* we cheat and just a sin for this test */
2537 /* locate the address */
2538 TAILQ_FOREACH(net
, &stcb
->asoc
.nets
, sctp_next
) {
2539 if (sctp_cmpaddr(addr
, (struct sockaddr
*)&net
->ro
._l_addr
))
2547 * add's a remote endpoint address, done with the INIT/INIT-ACK
2548 * as well as when a ASCONF arrives that adds it. It will also
2549 * initialize all the cwnd stats of stuff.
2552 sctp_is_address_on_local_host(struct sockaddr
*addr
)
2556 TAILQ_FOREACH(ifn
, &ifnet
, if_list
) {
2557 TAILQ_FOREACH(ifa
, &ifn
->if_addrlist
, ifa_list
) {
2558 if (addr
->sa_family
== ifa
->ifa_addr
->sa_family
) {
2560 if (addr
->sa_family
== AF_INET
) {
2561 struct sockaddr_in
*sin
, *sin_c
;
2562 sin
= (struct sockaddr_in
*)addr
;
2563 sin_c
= (struct sockaddr_in
*)
2565 if (sin
->sin_addr
.s_addr
==
2566 sin_c
->sin_addr
.s_addr
) {
2567 /* we are on the same machine */
2570 } else if (addr
->sa_family
== AF_INET6
) {
2571 struct sockaddr_in6
*sin6
, *sin_c6
;
2572 sin6
= (struct sockaddr_in6
*)addr
;
2573 sin_c6
= (struct sockaddr_in6
*)
2575 if (SCTP6_ARE_ADDR_EQUAL(&sin6
->sin6_addr
,
2576 &sin_c6
->sin6_addr
)) {
2577 /* we are on the same machine */
2588 sctp_add_remote_addr(struct sctp_tcb
*stcb
, struct sockaddr
*newaddr
,
2589 int set_scope
, int from
)
2592 * The following is redundant to the same lines in the
2593 * sctp_aloc_assoc() but is needed since other's call the add
2596 struct sctp_nets
*net
, *netfirst
;
2600 if (sctp_debug_on
& SCTP_DEBUG_PCB1
) {
2601 kprintf("Adding an address (from:%d) to the peer: ", from
);
2602 sctp_print_address(newaddr
);
2605 netfirst
= sctp_findnet(stcb
, newaddr
);
2608 * Lie and return ok, we don't want to make the association
2609 * go away for this behavior. It will happen in the TCP model
2610 * in a connected socket. It does not reach the hash table
2611 * until after the association is built so it can't be found.
2612 * Mark as reachable, since the initial creation will have
2613 * been cleared and the NOT_IN_ASSOC flag will have been
2614 * added... and we don't want to end up removing it back out.
2616 if (netfirst
->dest_state
& SCTP_ADDR_UNCONFIRMED
) {
2617 netfirst
->dest_state
= (SCTP_ADDR_REACHABLE
|
2618 SCTP_ADDR_UNCONFIRMED
);
2620 netfirst
->dest_state
= SCTP_ADDR_REACHABLE
;
2626 if (newaddr
->sa_family
== AF_INET
) {
2627 struct sockaddr_in
*sin
;
2628 sin
= (struct sockaddr_in
*)newaddr
;
2629 if (sin
->sin_addr
.s_addr
== 0) {
2630 /* Invalid address */
2633 /* zero out the bzero area */
2634 memset(&sin
->sin_zero
, 0, sizeof(sin
->sin_zero
));
2636 /* assure len is set */
2637 sin
->sin_len
= sizeof(struct sockaddr_in
);
2639 #ifdef SCTP_DONT_DO_PRIVADDR_SCOPE
2640 stcb
->ipv4_local_scope
= 1;
2642 if (IN4_ISPRIVATE_ADDRESS(&sin
->sin_addr
)) {
2643 stcb
->asoc
.ipv4_local_scope
= 1;
2645 #endif /* SCTP_DONT_DO_PRIVADDR_SCOPE */
2647 if (sctp_is_address_on_local_host(newaddr
)) {
2648 stcb
->asoc
.loopback_scope
= 1;
2649 stcb
->asoc
.ipv4_local_scope
= 1;
2650 stcb
->asoc
.local_scope
= 1;
2651 stcb
->asoc
.site_scope
= 1;
2656 if (sctp_is_address_on_local_host(newaddr
)) {
2657 stcb
->asoc
.loopback_scope
= 1;
2658 stcb
->asoc
.ipv4_local_scope
= 1;
2659 stcb
->asoc
.local_scope
= 1;
2660 stcb
->asoc
.site_scope
= 1;
2663 /* Validate the address is in scope */
2664 if ((IN4_ISPRIVATE_ADDRESS(&sin
->sin_addr
)) &&
2665 (stcb
->asoc
.ipv4_local_scope
== 0)) {
2669 } else if (newaddr
->sa_family
== AF_INET6
) {
2670 struct sockaddr_in6
*sin6
;
2671 sin6
= (struct sockaddr_in6
*)newaddr
;
2672 if (IN6_IS_ADDR_UNSPECIFIED(&sin6
->sin6_addr
)) {
2673 /* Invalid address */
2676 /* assure len is set */
2677 sin6
->sin6_len
= sizeof(struct sockaddr_in6
);
2679 if (sctp_is_address_on_local_host(newaddr
)) {
2680 stcb
->asoc
.loopback_scope
= 1;
2681 stcb
->asoc
.local_scope
= 1;
2682 stcb
->asoc
.ipv4_local_scope
= 1;
2683 stcb
->asoc
.site_scope
= 1;
2684 } else if (IN6_IS_ADDR_LINKLOCAL(&sin6
->sin6_addr
)) {
2686 * If the new destination is a LINK_LOCAL
2687 * we must have common site scope. Don't set
2688 * the local scope since we may not share all
2689 * links, only loopback can do this.
2690 * Links on the local network would also
2691 * be on our private network for v4 too.
2693 stcb
->asoc
.ipv4_local_scope
= 1;
2694 stcb
->asoc
.site_scope
= 1;
2695 } else if (IN6_IS_ADDR_SITELOCAL(&sin6
->sin6_addr
)) {
2697 * If the new destination is SITE_LOCAL
2698 * then we must have site scope in common.
2700 stcb
->asoc
.site_scope
= 1;
2705 if (sctp_is_address_on_local_host(newaddr
)) {
2706 stcb
->asoc
.loopback_scope
= 1;
2707 stcb
->asoc
.ipv4_local_scope
= 1;
2708 stcb
->asoc
.local_scope
= 1;
2709 stcb
->asoc
.site_scope
= 1;
2712 /* Validate the address is in scope */
2713 if (IN6_IS_ADDR_LOOPBACK(&sin6
->sin6_addr
) &&
2714 (stcb
->asoc
.loopback_scope
== 0)) {
2716 } else if (IN6_IS_ADDR_LINKLOCAL(&sin6
->sin6_addr
) &&
2717 (stcb
->asoc
.local_scope
== 0)) {
2719 } else if (IN6_IS_ADDR_SITELOCAL(&sin6
->sin6_addr
) &&
2720 (stcb
->asoc
.site_scope
== 0)) {
2725 /* not supported family type */
2728 net
= (struct sctp_nets
*)SCTP_ZONE_GET(sctppcbinfo
.ipi_zone_net
);
2732 sctppcbinfo
.ipi_count_raddr
++;
2733 sctppcbinfo
.ipi_gencnt_raddr
++;
2734 bzero(net
, sizeof(*net
));
2735 memcpy(&net
->ro
._l_addr
, newaddr
, newaddr
->sa_len
);
2736 if (newaddr
->sa_family
== AF_INET
) {
2737 ((struct sockaddr_in
*)&net
->ro
._l_addr
)->sin_port
= stcb
->rport
;
2738 } else if (newaddr
->sa_family
== AF_INET6
) {
2739 ((struct sockaddr_in6
*)&net
->ro
._l_addr
)->sin6_port
= stcb
->rport
;
2741 net
->addr_is_local
= sctp_is_address_on_local_host(newaddr
);
2742 net
->failure_threshold
= stcb
->asoc
.def_net_failure
;
2743 if (addr_inscope
== 0) {
2745 if (sctp_debug_on
& SCTP_DEBUG_PCB1
) {
2746 kprintf("Adding an address which is OUT OF SCOPE\n");
2748 #endif /* SCTP_DEBUG */
2749 net
->dest_state
= (SCTP_ADDR_REACHABLE
|
2750 SCTP_ADDR_OUT_OF_SCOPE
);
2753 /* 8 is passed by connect_x */
2754 net
->dest_state
= SCTP_ADDR_REACHABLE
;
2756 net
->dest_state
= SCTP_ADDR_REACHABLE
|
2757 SCTP_ADDR_UNCONFIRMED
;
2759 net
->RTO
= stcb
->asoc
.initial_rto
;
2760 stcb
->asoc
.numnets
++;
2763 /* Init the timer structure */
2764 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000
2765 callout_init(&net
->rxt_timer
.timer
, 0);
2766 callout_init(&net
->pmtu_timer
.timer
, 0);
2768 callout_init(&net
->rxt_timer
.timer
);
2769 callout_init(&net
->pmtu_timer
.timer
);
2772 /* Now generate a route for this guy */
2773 /* KAME hack: embed scopeid */
2774 if (newaddr
->sa_family
== AF_INET6
) {
2775 struct sockaddr_in6
*sin6
;
2776 sin6
= (struct sockaddr_in6
*)&net
->ro
._l_addr
;
2777 #if defined(SCTP_BASE_FREEBSD) || defined(__APPLE__) || defined(__DragonFly__)
2778 in6_embedscope(&sin6
->sin6_addr
, sin6
,
2779 &stcb
->sctp_ep
->ip_inp
.inp
, NULL
);
2781 in6_embedscope(&sin6
->sin6_addr
, sin6
);
2783 #ifndef SCOPEDROUTING
2784 sin6
->sin6_scope_id
= 0;
2787 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
2788 rtalloc_ign((struct route
*)&net
->ro
, 0UL);
2790 rtalloc((struct route
*)&net
->ro
);
2792 if (newaddr
->sa_family
== AF_INET6
) {
2793 struct sockaddr_in6
*sin6
;
2794 sin6
= (struct sockaddr_in6
*)&net
->ro
._l_addr
;
2795 in6_recoverscope(sin6
, &sin6
->sin6_addr
, NULL
);
2797 if ((net
->ro
.ro_rt
) &&
2798 (net
->ro
.ro_rt
->rt_ifp
)) {
2799 net
->mtu
= net
->ro
.ro_rt
->rt_ifp
->if_mtu
;
2801 stcb
->asoc
.smallest_mtu
= net
->mtu
;
2803 /* start things off to match mtu of interface please. */
2804 net
->ro
.ro_rt
->rt_rmx
.rmx_mtu
= net
->ro
.ro_rt
->rt_ifp
->if_mtu
;
2806 net
->mtu
= stcb
->asoc
.smallest_mtu
;
2808 if (stcb
->asoc
.smallest_mtu
> net
->mtu
) {
2809 stcb
->asoc
.smallest_mtu
= net
->mtu
;
2811 /* We take the max of the burst limit times a MTU or the INITIAL_CWND.
2812 * We then limit this to 4 MTU's of sending.
2814 net
->cwnd
= min((net
->mtu
* 4), max((stcb
->asoc
.max_burst
* net
->mtu
), SCTP_INITIAL_CWND
));
2816 /* we always get at LEAST 2 MTU's */
2817 if (net
->cwnd
< (2 * net
->mtu
)) {
2818 net
->cwnd
= 2 * net
->mtu
;
2821 net
->ssthresh
= stcb
->asoc
.peers_rwnd
;
2823 net
->src_addr_selected
= 0;
2824 netfirst
= TAILQ_FIRST(&stcb
->asoc
.nets
);
2825 if (net
->ro
.ro_rt
== NULL
) {
2826 /* Since we have no route put it at the back */
2827 TAILQ_INSERT_TAIL(&stcb
->asoc
.nets
, net
, sctp_next
);
2828 } else if (netfirst
== NULL
) {
2829 /* We are the first one in the pool. */
2830 TAILQ_INSERT_HEAD(&stcb
->asoc
.nets
, net
, sctp_next
);
2831 } else if (netfirst
->ro
.ro_rt
== NULL
) {
2833 * First one has NO route. Place this one ahead of the
2836 TAILQ_INSERT_HEAD(&stcb
->asoc
.nets
, net
, sctp_next
);
2837 } else if (net
->ro
.ro_rt
->rt_ifp
!= netfirst
->ro
.ro_rt
->rt_ifp
) {
2839 * This one has a different interface than the one at the
2840 * top of the list. Place it ahead.
2842 TAILQ_INSERT_HEAD(&stcb
->asoc
.nets
, net
, sctp_next
);
2845 * Ok we have the same interface as the first one. Move
2846 * forward until we find either
2847 * a) one with a NULL route... insert ahead of that
2848 * b) one with a different ifp.. insert after that.
2849 * c) end of the list.. insert at the tail.
2851 struct sctp_nets
*netlook
;
2853 netlook
= TAILQ_NEXT(netfirst
, sctp_next
);
2854 if (netlook
== NULL
) {
2855 /* End of the list */
2856 TAILQ_INSERT_TAIL(&stcb
->asoc
.nets
, net
,
2859 } else if (netlook
->ro
.ro_rt
== NULL
) {
2860 /* next one has NO route */
2861 TAILQ_INSERT_BEFORE(netfirst
, net
, sctp_next
);
2863 } else if (netlook
->ro
.ro_rt
->rt_ifp
!=
2864 net
->ro
.ro_rt
->rt_ifp
) {
2865 TAILQ_INSERT_AFTER(&stcb
->asoc
.nets
, netlook
,
2871 } while (netlook
!= NULL
);
2873 /* got to have a primary set */
2874 if (stcb
->asoc
.primary_destination
== 0) {
2875 stcb
->asoc
.primary_destination
= net
;
2876 } else if ((stcb
->asoc
.primary_destination
->ro
.ro_rt
== NULL
) &&
2878 /* No route to current primary adopt new primary */
2879 stcb
->asoc
.primary_destination
= net
;
2881 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE
, stcb
->sctp_ep
, stcb
,
2889 * allocate an association and add it to the endpoint. The caller must
2890 * be careful to add all additional addresses once they are know right
2891 * away or else the assoc will be may experience a blackout scenario.
2894 sctp_aloc_assoc(struct sctp_inpcb
*inp
, struct sockaddr
*firstaddr
,
2895 int for_a_init
, int *error
, uint32_t override_tag
)
2897 struct sctp_tcb
*stcb
;
2898 struct sctp_association
*asoc
;
2899 struct sctpasochead
*head
;
2904 * Assumption made here:
2905 * Caller has done a sctp_findassociation_ep_addr(ep, addr's);
2906 * to make sure the address does not exist already.
2908 if (sctppcbinfo
.ipi_count_asoc
>= SCTP_MAX_NUM_OF_ASOC
) {
2909 /* Hit max assoc, sorry no more */
2913 SCTP_INP_RLOCK(inp
);
2914 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_IN_TCPPOOL
) {
2916 * If its in the TCP pool, its NOT allowed to create an
2917 * association. The parent listener needs to call
2918 * sctp_aloc_assoc.. or the one-2-many socket. If a
2919 * peeled off, or connected one does this.. its an error.
2921 SCTP_INP_RUNLOCK(inp
);
2927 if (sctp_debug_on
& SCTP_DEBUG_PCB3
) {
2928 kprintf("Allocate an association for peer:");
2930 sctp_print_address(firstaddr
);
2933 kprintf("Port:%d\n",
2934 ntohs(((struct sockaddr_in
*)firstaddr
)->sin_port
));
2936 #endif /* SCTP_DEBUG */
2937 if (firstaddr
->sa_family
== AF_INET
) {
2938 struct sockaddr_in
*sin
;
2939 sin
= (struct sockaddr_in
*)firstaddr
;
2940 if ((sin
->sin_port
== 0) || (sin
->sin_addr
.s_addr
== 0)) {
2941 /* Invalid address */
2943 if (sctp_debug_on
& SCTP_DEBUG_PCB3
) {
2944 kprintf("peer address invalid\n");
2947 SCTP_INP_RUNLOCK(inp
);
2951 rport
= sin
->sin_port
;
2952 } else if (firstaddr
->sa_family
== AF_INET6
) {
2953 struct sockaddr_in6
*sin6
;
2954 sin6
= (struct sockaddr_in6
*)firstaddr
;
2955 if ((sin6
->sin6_port
== 0) ||
2956 (IN6_IS_ADDR_UNSPECIFIED(&sin6
->sin6_addr
))) {
2957 /* Invalid address */
2959 if (sctp_debug_on
& SCTP_DEBUG_PCB3
) {
2960 kprintf("peer address invalid\n");
2963 SCTP_INP_RUNLOCK(inp
);
2967 rport
= sin6
->sin6_port
;
2969 /* not supported family type */
2971 if (sctp_debug_on
& SCTP_DEBUG_PCB3
) {
2972 kprintf("BAD family %d\n", firstaddr
->sa_family
);
2975 SCTP_INP_RUNLOCK(inp
);
2979 SCTP_INP_RUNLOCK(inp
);
2980 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_UNBOUND
) {
2982 * If you have not performed a bind, then we need to do
2983 * the ephemerial bind for you.
2986 if (sctp_debug_on
& SCTP_DEBUG_PCB3
) {
2987 kprintf("Doing implicit BIND\n");
2991 if ((err
= sctp_inpcb_bind(inp
->sctp_socket
,
2992 (struct sockaddr
*)NULL
,
2993 #if (defined(__FreeBSD__) && __FreeBSD_version >= 500000) || defined(__DragonFly__)
2994 (struct thread
*)NULL
2999 /* bind error, probably perm */
3001 if (sctp_debug_on
& SCTP_DEBUG_PCB3
) {
3002 kprintf("BIND FAILS ret:%d\n", err
);
3010 stcb
= (struct sctp_tcb
*)SCTP_ZONE_GET(sctppcbinfo
.ipi_zone_asoc
);
3012 /* out of memory? */
3014 if (sctp_debug_on
& SCTP_DEBUG_PCB3
) {
3015 kprintf("aloc_assoc: no assoc mem left, stcb=NULL\n");
3021 sctppcbinfo
.ipi_count_asoc
++;
3022 sctppcbinfo
.ipi_gencnt_asoc
++;
3024 bzero(stcb
, sizeof(*stcb
));
3026 SCTP_TCB_LOCK_INIT(stcb
);
3027 /* setup back pointer's */
3028 stcb
->sctp_ep
= inp
;
3029 stcb
->sctp_socket
= inp
->sctp_socket
;
3030 if ((err
= sctp_init_asoc(inp
, asoc
, for_a_init
, override_tag
))) {
3032 SCTP_TCB_LOCK_DESTROY (stcb
);
3033 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_asoc
, stcb
);
3034 sctppcbinfo
.ipi_count_asoc
--;
3036 if (sctp_debug_on
& SCTP_DEBUG_PCB3
) {
3037 kprintf("aloc_assoc: couldn't init asoc, out of mem?!\n");
3044 stcb
->rport
= rport
;
3045 SCTP_INP_INFO_WLOCK();
3046 SCTP_INP_WLOCK(inp
);
3047 if (inp
->sctp_flags
& (SCTP_PCB_FLAGS_SOCKET_GONE
|SCTP_PCB_FLAGS_SOCKET_ALLGONE
)) {
3048 /* inpcb freed while alloc going on */
3049 SCTP_TCB_LOCK_DESTROY (stcb
);
3050 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_asoc
, stcb
);
3051 SCTP_INP_WUNLOCK(inp
);
3052 SCTP_INP_INFO_WUNLOCK();
3053 sctppcbinfo
.ipi_count_asoc
--;
3055 if (sctp_debug_on
& SCTP_DEBUG_PCB3
) {
3056 kprintf("aloc_assoc: couldn't init asoc, out of mem?!\n");
3062 SCTP_TCB_LOCK(stcb
);
3064 /* now that my_vtag is set, add it to the hash */
3065 head
= &sctppcbinfo
.sctp_asochash
[SCTP_PCBHASH_ASOC(stcb
->asoc
.my_vtag
,
3066 sctppcbinfo
.hashasocmark
)];
3067 /* put it in the bucket in the vtag hash of assoc's for the system */
3068 LIST_INSERT_HEAD(head
, stcb
, sctp_asocs
);
3069 SCTP_INP_INFO_WUNLOCK();
3072 if ((err
= sctp_add_remote_addr(stcb
, firstaddr
, 1, 1))) {
3073 /* failure.. memory error? */
3075 FREE(asoc
->strmout
, M_PCB
);
3076 if (asoc
->mapping_array
)
3077 FREE(asoc
->mapping_array
, M_PCB
);
3079 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_asoc
, stcb
);
3080 sctppcbinfo
.ipi_count_asoc
--;
3082 if (sctp_debug_on
& SCTP_DEBUG_PCB3
) {
3083 kprintf("aloc_assoc: couldn't add remote addr!\n");
3086 SCTP_TCB_LOCK_DESTROY (stcb
);
3090 /* Init all the timers */
3091 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000
3092 callout_init(&asoc
->hb_timer
.timer
, 0);
3093 callout_init(&asoc
->dack_timer
.timer
, 0);
3094 callout_init(&asoc
->asconf_timer
.timer
, 0);
3095 callout_init(&asoc
->shut_guard_timer
.timer
, 0);
3096 callout_init(&asoc
->autoclose_timer
.timer
, 0);
3097 callout_init(&asoc
->delayed_event_timer
.timer
, 0);
3099 callout_init(&asoc
->hb_timer
.timer
);
3100 callout_init(&asoc
->dack_timer
.timer
);
3101 callout_init(&asoc
->asconf_timer
.timer
);
3102 callout_init(&asoc
->shut_guard_timer
.timer
);
3103 callout_init(&asoc
->autoclose_timer
.timer
);
3104 callout_init(&asoc
->delayed_event_timer
.timer
);
3106 LIST_INSERT_HEAD(&inp
->sctp_asoc_list
, stcb
, sctp_tcblist
);
3107 /* now file the port under the hash as well */
3108 if (inp
->sctp_tcbhash
!= NULL
) {
3109 head
= &inp
->sctp_tcbhash
[SCTP_PCBHASH_ALLADDR(stcb
->rport
,
3110 inp
->sctp_hashmark
)];
3111 LIST_INSERT_HEAD(head
, stcb
, sctp_tcbhash
);
3113 SCTP_INP_WUNLOCK(inp
);
3115 if (sctp_debug_on
& SCTP_DEBUG_PCB1
) {
3116 kprintf("Association %p now allocated\n", stcb
);
3123 sctp_free_remote_addr(struct sctp_nets
*net
)
3128 if (net
->ref_count
<= 0) {
3129 /* stop timer if running */
3130 callout_stop(&net
->rxt_timer
.timer
);
3131 callout_stop(&net
->pmtu_timer
.timer
);
3132 net
->dest_state
= SCTP_ADDR_NOT_REACHABLE
;
3133 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_net
, net
);
3134 sctppcbinfo
.ipi_count_raddr
--;
3139 * remove a remote endpoint address from an association, it
3140 * will fail if the address does not exist.
3143 sctp_del_remote_addr(struct sctp_tcb
*stcb
, struct sockaddr
*remaddr
)
3146 * Here we need to remove a remote address. This is quite simple, we
3147 * first find it in the list of address for the association
3148 * (tasoc->asoc.nets) and then if it is there, we do a LIST_REMOVE on
3150 * Note we do not allow it to be removed if there are no other
3153 struct sctp_association
*asoc
;
3154 struct sctp_nets
*net
, *net_tmp
;
3156 if (asoc
->numnets
< 2) {
3157 /* Must have at LEAST two remote addresses */
3160 /* locate the address */
3161 for (net
= TAILQ_FIRST(&asoc
->nets
); net
!= NULL
; net
= net_tmp
) {
3162 net_tmp
= TAILQ_NEXT(net
, sctp_next
);
3163 if (net
->ro
._l_addr
.sa
.sa_family
!= remaddr
->sa_family
) {
3166 if (sctp_cmpaddr((struct sockaddr
*)&net
->ro
._l_addr
,
3168 /* we found the guy */
3170 TAILQ_REMOVE(&asoc
->nets
, net
, sctp_next
);
3171 sctp_free_remote_addr(net
);
3172 if (net
== asoc
->primary_destination
) {
3174 struct sctp_nets
*lnet
;
3175 lnet
= TAILQ_FIRST(&asoc
->nets
);
3176 /* Try to find a confirmed primary */
3177 asoc
->primary_destination
=
3178 sctp_find_alternate_net(stcb
, lnet
);
3180 if (net
== asoc
->last_data_chunk_from
) {
3182 asoc
->last_data_chunk_from
=
3183 TAILQ_FIRST(&asoc
->nets
);
3185 if (net
== asoc
->last_control_chunk_from
) {
3187 asoc
->last_control_chunk_from
=
3188 TAILQ_FIRST(&asoc
->nets
);
3190 if (net
== asoc
->asconf_last_sent_to
) {
3192 asoc
->asconf_last_sent_to
=
3193 TAILQ_FIRST(&asoc
->nets
);
3204 sctp_add_vtag_to_timewait(struct sctp_inpcb
*inp
, u_int32_t tag
)
3206 struct sctpvtaghead
*chain
;
3207 struct sctp_tagblock
*twait_block
;
3210 SCTP_GETTIME_TIMEVAL(&now
);
3211 chain
= &sctppcbinfo
.vtag_timewait
[(tag
% SCTP_STACK_VTAG_HASH_SIZE
)];
3213 if (!LIST_EMPTY(chain
)) {
3214 /* Block(s) present, lets find space, and expire on the fly */
3215 LIST_FOREACH(twait_block
, chain
, sctp_nxt_tagblock
) {
3216 for (i
= 0; i
< SCTP_NUMBER_IN_VTAG_BLOCK
; i
++) {
3217 if ((twait_block
->vtag_block
[i
].v_tag
== 0) &&
3219 twait_block
->vtag_block
[0].tv_sec_at_expire
=
3220 now
.tv_sec
+ SCTP_TIME_WAIT
;
3221 twait_block
->vtag_block
[0].v_tag
= tag
;
3223 } else if ((twait_block
->vtag_block
[i
].v_tag
) &&
3224 ((long)twait_block
->vtag_block
[i
].tv_sec_at_expire
>
3226 /* Audit expires this guy */
3227 twait_block
->vtag_block
[i
].tv_sec_at_expire
= 0;
3228 twait_block
->vtag_block
[i
].v_tag
= 0;
3230 /* Reuse it for my new tag */
3231 twait_block
->vtag_block
[0].tv_sec_at_expire
= now
.tv_sec
+ SCTP_TIME_WAIT
;
3232 twait_block
->vtag_block
[0].v_tag
= tag
;
3239 * We only do up to the block where we can
3240 * place our tag for audits
3246 /* Need to add a new block to chain */
3248 MALLOC(twait_block
, struct sctp_tagblock
*,
3249 sizeof(struct sctp_tagblock
), M_PCB
, M_NOWAIT
);
3250 if (twait_block
== NULL
) {
3253 memset(twait_block
, 0, sizeof(struct sctp_timewait
));
3254 LIST_INSERT_HEAD(chain
, twait_block
, sctp_nxt_tagblock
);
3255 twait_block
->vtag_block
[0].tv_sec_at_expire
= now
.tv_sec
+
3257 twait_block
->vtag_block
[0].v_tag
= tag
;
3263 sctp_iterator_asoc_being_freed(struct sctp_inpcb
*inp
, struct sctp_tcb
*stcb
)
3265 struct sctp_iterator
*it
;
3269 /* Unlock the tcb lock we do this so
3270 * we avoid a dead lock scenario where
3271 * the iterator is waiting on the TCB lock
3272 * and the TCB lock is waiting on the iterator
3275 SCTP_ITERATOR_LOCK();
3276 SCTP_INP_INFO_WLOCK();
3277 SCTP_INP_WLOCK(inp
);
3278 SCTP_TCB_LOCK(stcb
);
3280 it
= stcb
->asoc
.stcb_starting_point_for_iterator
;
3284 if (it
->inp
!= stcb
->sctp_ep
) {
3285 /* hm, focused on the wrong one? */
3288 if (it
->stcb
!= stcb
) {
3291 it
->stcb
= LIST_NEXT(stcb
, sctp_tcblist
);
3292 if (it
->stcb
== NULL
) {
3293 /* done with all asoc's in this assoc */
3294 if (it
->iterator_flags
& SCTP_ITERATOR_DO_SINGLE_INP
) {
3298 it
->inp
= LIST_NEXT(inp
, sctp_list
);
3304 * Free the association after un-hashing the remote port.
3307 sctp_free_assoc(struct sctp_inpcb
*inp
, struct sctp_tcb
*stcb
)
3309 struct sctp_association
*asoc
;
3310 struct sctp_nets
*net
, *prev
;
3311 struct sctp_laddr
*laddr
;
3312 struct sctp_tmit_chunk
*chk
;
3313 struct sctp_asconf_addr
*aparam
;
3314 struct sctp_socket_q_list
*sq
;
3316 /* first, lets purge the entry from the hash table. */
3318 if (stcb
->asoc
.state
== 0) {
3319 kprintf("Freeing already free association:%p - huh??\n",
3326 /* now clean up any other timers */
3327 callout_stop(&asoc
->hb_timer
.timer
);
3328 callout_stop(&asoc
->dack_timer
.timer
);
3329 callout_stop(&asoc
->asconf_timer
.timer
);
3330 callout_stop(&asoc
->shut_guard_timer
.timer
);
3331 callout_stop(&asoc
->autoclose_timer
.timer
);
3332 callout_stop(&asoc
->delayed_event_timer
.timer
);
3333 TAILQ_FOREACH(net
, &asoc
->nets
, sctp_next
) {
3334 callout_stop(&net
->rxt_timer
.timer
);
3335 callout_stop(&net
->pmtu_timer
.timer
);
3338 /* Iterator asoc being freed we send an
3339 * unlocked TCB. It returns with INP_INFO
3340 * and INP write locked and the TCB locked
3341 * too and of course the iterator lock
3342 * in place as well..
3344 SCTP_TCB_UNLOCK(stcb
);
3345 sctp_iterator_asoc_being_freed(inp
, stcb
);
3347 /* Null all of my entry's on the socket q */
3348 TAILQ_FOREACH(sq
, &inp
->sctp_queue_list
, next_sq
) {
3349 if (sq
->tcb
== stcb
) {
3354 if (inp
->sctp_tcb_at_block
== (void *)stcb
) {
3355 inp
->error_on_block
= ECONNRESET
;
3358 if (inp
->sctp_tcbhash
) {
3359 LIST_REMOVE(stcb
, sctp_tcbhash
);
3361 /* Now lets remove it from the list of ALL associations in the EP */
3362 LIST_REMOVE(stcb
, sctp_tcblist
);
3363 SCTP_INP_WUNLOCK(inp
);
3364 SCTP_ITERATOR_UNLOCK();
3367 /* pull from vtag hash */
3368 LIST_REMOVE(stcb
, sctp_asocs
);
3371 * Now before we can free the assoc, we must remove all of the
3372 * networks and any other allocated space.. i.e. add removes here
3373 * before the SCTP_ZONE_FREE() of the tasoc entry.
3376 sctp_add_vtag_to_timewait(inp
, asoc
->my_vtag
);
3377 SCTP_INP_INFO_WUNLOCK();
3379 while (!TAILQ_EMPTY(&asoc
->nets
)) {
3380 net
= TAILQ_FIRST(&asoc
->nets
);
3381 /* pull from list */
3382 if ((sctppcbinfo
.ipi_count_raddr
== 0) || (prev
== net
)) {
3386 TAILQ_REMOVE(&asoc
->nets
, net
, sctp_next
);
3389 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_net
, net
);
3390 sctppcbinfo
.ipi_count_raddr
--;
3393 * The chunk lists and such SHOULD be empty but we check them
3396 /* anything on the wheel needs to be removed */
3397 while (!TAILQ_EMPTY(&asoc
->out_wheel
)) {
3398 struct sctp_stream_out
*outs
;
3399 outs
= TAILQ_FIRST(&asoc
->out_wheel
);
3400 TAILQ_REMOVE(&asoc
->out_wheel
, outs
, next_spoke
);
3401 /* now clean up any chunks here */
3402 chk
= TAILQ_FIRST(&outs
->outqueue
);
3404 TAILQ_REMOVE(&outs
->outqueue
, chk
, sctp_next
);
3406 sctp_m_freem(chk
->data
);
3411 /* Free the chunk */
3412 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, chk
);
3413 sctppcbinfo
.ipi_count_chunk
--;
3414 sctppcbinfo
.ipi_gencnt_chunk
++;
3415 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
3416 panic("Chunk count is negative");
3418 chk
= TAILQ_FIRST(&outs
->outqueue
);
3420 outs
= TAILQ_FIRST(&asoc
->out_wheel
);
3423 if (asoc
->pending_reply
) {
3424 FREE(asoc
->pending_reply
, M_PCB
);
3425 asoc
->pending_reply
= NULL
;
3427 chk
= TAILQ_FIRST(&asoc
->pending_reply_queue
);
3429 TAILQ_REMOVE(&asoc
->pending_reply_queue
, chk
, sctp_next
);
3431 sctp_m_freem(chk
->data
);
3436 /* Free the chunk */
3437 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, chk
);
3438 sctppcbinfo
.ipi_count_chunk
--;
3439 sctppcbinfo
.ipi_gencnt_chunk
++;
3440 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
3441 panic("Chunk count is negative");
3443 chk
= TAILQ_FIRST(&asoc
->pending_reply_queue
);
3445 /* pending send queue SHOULD be empty */
3446 if (!TAILQ_EMPTY(&asoc
->send_queue
)) {
3447 chk
= TAILQ_FIRST(&asoc
->send_queue
);
3449 TAILQ_REMOVE(&asoc
->send_queue
, chk
, sctp_next
);
3451 sctp_m_freem(chk
->data
);
3454 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, chk
);
3455 sctppcbinfo
.ipi_count_chunk
--;
3456 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
3457 panic("Chunk count is negative");
3459 sctppcbinfo
.ipi_gencnt_chunk
++;
3460 chk
= TAILQ_FIRST(&asoc
->send_queue
);
3463 /* sent queue SHOULD be empty */
3464 if (!TAILQ_EMPTY(&asoc
->sent_queue
)) {
3465 chk
= TAILQ_FIRST(&asoc
->sent_queue
);
3467 TAILQ_REMOVE(&asoc
->sent_queue
, chk
, sctp_next
);
3469 sctp_m_freem(chk
->data
);
3472 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, chk
);
3473 sctppcbinfo
.ipi_count_chunk
--;
3474 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
3475 panic("Chunk count is negative");
3477 sctppcbinfo
.ipi_gencnt_chunk
++;
3478 chk
= TAILQ_FIRST(&asoc
->sent_queue
);
3481 /* control queue MAY not be empty */
3482 if (!TAILQ_EMPTY(&asoc
->control_send_queue
)) {
3483 chk
= TAILQ_FIRST(&asoc
->control_send_queue
);
3485 TAILQ_REMOVE(&asoc
->control_send_queue
, chk
, sctp_next
);
3487 sctp_m_freem(chk
->data
);
3490 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, chk
);
3491 sctppcbinfo
.ipi_count_chunk
--;
3492 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
3493 panic("Chunk count is negative");
3495 sctppcbinfo
.ipi_gencnt_chunk
++;
3496 chk
= TAILQ_FIRST(&asoc
->control_send_queue
);
3499 if (!TAILQ_EMPTY(&asoc
->reasmqueue
)) {
3500 chk
= TAILQ_FIRST(&asoc
->reasmqueue
);
3502 TAILQ_REMOVE(&asoc
->reasmqueue
, chk
, sctp_next
);
3504 sctp_m_freem(chk
->data
);
3507 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, chk
);
3508 sctppcbinfo
.ipi_count_chunk
--;
3509 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
3510 panic("Chunk count is negative");
3512 sctppcbinfo
.ipi_gencnt_chunk
++;
3513 chk
= TAILQ_FIRST(&asoc
->reasmqueue
);
3516 if (!TAILQ_EMPTY(&asoc
->delivery_queue
)) {
3517 chk
= TAILQ_FIRST(&asoc
->delivery_queue
);
3519 TAILQ_REMOVE(&asoc
->delivery_queue
, chk
, sctp_next
);
3521 sctp_m_freem(chk
->data
);
3524 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, chk
);
3525 sctppcbinfo
.ipi_count_chunk
--;
3526 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
3527 panic("Chunk count is negative");
3529 sctppcbinfo
.ipi_gencnt_chunk
++;
3530 chk
= TAILQ_FIRST(&asoc
->delivery_queue
);
3533 if (asoc
->mapping_array
) {
3534 FREE(asoc
->mapping_array
, M_PCB
);
3535 asoc
->mapping_array
= NULL
;
3538 /* the stream outs */
3539 if (asoc
->strmout
) {
3540 FREE(asoc
->strmout
, M_PCB
);
3541 asoc
->strmout
= NULL
;
3543 asoc
->streamoutcnt
= 0;
3546 for (i
= 0; i
< asoc
->streamincnt
; i
++) {
3547 if (!TAILQ_EMPTY(&asoc
->strmin
[i
].inqueue
)) {
3548 /* We have somethings on the streamin queue */
3549 chk
= TAILQ_FIRST(&asoc
->strmin
[i
].inqueue
);
3551 TAILQ_REMOVE(&asoc
->strmin
[i
].inqueue
,
3554 sctp_m_freem(chk
->data
);
3557 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
,
3559 sctppcbinfo
.ipi_count_chunk
--;
3560 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
3561 panic("Chunk count is negative");
3563 sctppcbinfo
.ipi_gencnt_chunk
++;
3564 chk
= TAILQ_FIRST(&asoc
->strmin
[i
].inqueue
);
3568 FREE(asoc
->strmin
, M_PCB
);
3569 asoc
->strmin
= NULL
;
3571 asoc
->streamincnt
= 0;
3572 /* local addresses, if any */
3573 while (!LIST_EMPTY(&asoc
->sctp_local_addr_list
)) {
3574 laddr
= LIST_FIRST(&asoc
->sctp_local_addr_list
);
3575 LIST_REMOVE(laddr
, sctp_nxt_addr
);
3576 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_laddr
, laddr
);
3577 sctppcbinfo
.ipi_count_laddr
--;
3579 /* pending asconf (address) parameters */
3580 while (!TAILQ_EMPTY(&asoc
->asconf_queue
)) {
3581 aparam
= TAILQ_FIRST(&asoc
->asconf_queue
);
3582 TAILQ_REMOVE(&asoc
->asconf_queue
, aparam
, next
);
3583 FREE(aparam
, M_PCB
);
3585 if (asoc
->last_asconf_ack_sent
!= NULL
) {
3586 sctp_m_freem(asoc
->last_asconf_ack_sent
);
3587 asoc
->last_asconf_ack_sent
= NULL
;
3589 /* Insert new items here :> */
3591 /* Get rid of LOCK */
3592 SCTP_TCB_LOCK_DESTROY(stcb
);
3594 /* now clean up the tasoc itself */
3595 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_asoc
, stcb
);
3596 sctppcbinfo
.ipi_count_asoc
--;
3597 if ((inp
->sctp_socket
->so_snd
.ssb_cc
) ||
3598 (inp
->sctp_socket
->so_snd
.ssb_mbcnt
)) {
3599 /* This will happen when a abort is done */
3600 inp
->sctp_socket
->so_snd
.ssb_cc
= 0;
3601 inp
->sctp_socket
->so_snd
.ssb_mbcnt
= 0;
3603 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_TCPTYPE
) {
3604 if ((inp
->sctp_flags
& SCTP_PCB_FLAGS_IN_TCPPOOL
) == 0) {
3605 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_CONNECTED
) {
3607 * For the base fd, that is NOT in TCP pool we
3608 * turn off the connected flag. This allows
3609 * non-listening endpoints to connect/shutdown/
3612 inp
->sctp_flags
&= ~SCTP_PCB_FLAGS_CONNECTED
;
3613 soisdisconnected(inp
->sctp_socket
);
3616 * For those that are in the TCP pool we just leave
3617 * so it cannot be used. When they close the fd we
3622 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_SOCKET_GONE
) {
3623 sctp_inpcb_free(inp
, 0);
3630 * determine if a destination is "reachable" based upon the addresses
3631 * bound to the current endpoint (e.g. only v4 or v6 currently bound)
3634 * FIX: if we allow assoc-level bindx(), then this needs to be fixed
3635 * to use assoc level v4/v6 flags, as the assoc *may* not have the
3636 * same address types bound as its endpoint
3639 sctp_destination_is_reachable(struct sctp_tcb
*stcb
, struct sockaddr
*destaddr
)
3641 struct sctp_inpcb
*inp
;
3644 /* No locks here, the TCB, in all cases is already
3645 * locked and an assoc is up. There is either a
3646 * INP lock by the caller applied (in asconf case when
3647 * deleting an address) or NOT in the HB case, however
3648 * if HB then the INP increment is up and the INP
3649 * will not be removed (on top of the fact that
3650 * we have a TCB lock). So we only want to
3651 * read the sctp_flags, which is either bound-all
3652 * or not.. no protection needed since once an
3653 * assoc is up you can't be changing your binding.
3655 inp
= stcb
->sctp_ep
;
3656 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_BOUNDALL
) {
3657 /* if bound all, destination is not restricted */
3658 /* RRS: Question during lock work: Is this
3659 * correct? If you are bound-all you still
3660 * might need to obey the V4--V6 flags???
3661 * IMO this bound-all stuff needs to be removed!
3665 /* NOTE: all "scope" checks are done when local addresses are added */
3666 if (destaddr
->sa_family
== AF_INET6
) {
3667 #if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__))
3668 answer
= inp
->inp_vflag
& INP_IPV6
;
3670 answer
= inp
->ip_inp
.inp
.inp_vflag
& INP_IPV6
;
3672 } else if (destaddr
->sa_family
== AF_INET
) {
3673 #if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__))
3674 answer
= inp
->inp_vflag
& INP_IPV4
;
3676 answer
= inp
->ip_inp
.inp
.inp_vflag
& INP_IPV4
;
3679 /* invalid family, so it's unreachable */
3686 * update the inp_vflags on an endpoint
3689 sctp_update_ep_vflag(struct sctp_inpcb
*inp
) {
3690 struct sctp_laddr
*laddr
;
3692 /* first clear the flag */
3693 #if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__))
3696 inp
->ip_inp
.inp
.inp_vflag
= 0;
3698 /* set the flag based on addresses on the ep list */
3699 LIST_FOREACH(laddr
, &inp
->sctp_addr_list
, sctp_nxt_addr
) {
3700 if (laddr
->ifa
== NULL
) {
3702 if (sctp_debug_on
& SCTP_DEBUG_PCB1
) {
3703 kprintf("An ounce of prevention is worth a pound of cure\n");
3705 #endif /* SCTP_DEBUG */
3708 if (laddr
->ifa
->ifa_addr
) {
3711 if (laddr
->ifa
->ifa_addr
->sa_family
== AF_INET6
) {
3712 #if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__))
3713 inp
->inp_vflag
|= INP_IPV6
;
3715 inp
->ip_inp
.inp
.inp_vflag
|= INP_IPV6
;
3717 } else if (laddr
->ifa
->ifa_addr
->sa_family
== AF_INET
) {
3718 #if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__))
3719 inp
->inp_vflag
|= INP_IPV4
;
3721 inp
->ip_inp
.inp
.inp_vflag
|= INP_IPV4
;
3728 * Add the address to the endpoint local address list
3729 * There is nothing to be done if we are bound to all addresses
3732 sctp_add_local_addr_ep(struct sctp_inpcb
*inp
, struct ifaddr
*ifa
)
3734 struct sctp_laddr
*laddr
;
3738 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_BOUNDALL
) {
3739 /* You are already bound to all. You have it already */
3742 if (ifa
->ifa_addr
->sa_family
== AF_INET6
) {
3743 struct in6_ifaddr
*ifa6
;
3744 ifa6
= (struct in6_ifaddr
*)ifa
;
3745 if (ifa6
->ia6_flags
& (IN6_IFF_DETACHED
|
3746 IN6_IFF_DEPRECATED
| IN6_IFF_ANYCAST
| IN6_IFF_NOTREADY
))
3747 /* Can't bind a non-existent addr. */
3750 /* first, is it already present? */
3751 LIST_FOREACH(laddr
, &inp
->sctp_addr_list
, sctp_nxt_addr
) {
3752 if (laddr
->ifa
== ifa
) {
3758 if (((inp
->sctp_flags
& SCTP_PCB_FLAGS_BOUNDALL
) == 0) && (fnd
== 0)) {
3759 /* Not bound to all */
3760 error
= sctp_insert_laddr(&inp
->sctp_addr_list
, ifa
);
3764 /* update inp_vflag flags */
3765 if (ifa
->ifa_addr
->sa_family
== AF_INET6
) {
3766 #if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__))
3767 inp
->inp_vflag
|= INP_IPV6
;
3769 inp
->ip_inp
.inp
.inp_vflag
|= INP_IPV6
;
3771 } else if (ifa
->ifa_addr
->sa_family
== AF_INET
) {
3772 #if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__))
3773 inp
->inp_vflag
|= INP_IPV4
;
3775 inp
->ip_inp
.inp
.inp_vflag
|= INP_IPV4
;
3784 * select a new (hopefully reachable) destination net
3785 * (should only be used when we deleted an ep addr that is the
3786 * only usable source address to reach the destination net)
3789 sctp_select_primary_destination(struct sctp_tcb
*stcb
)
3791 struct sctp_nets
*net
;
3793 TAILQ_FOREACH(net
, &stcb
->asoc
.nets
, sctp_next
) {
3794 /* for now, we'll just pick the first reachable one we find */
3795 if (net
->dest_state
& SCTP_ADDR_UNCONFIRMED
)
3797 if (sctp_destination_is_reachable(stcb
,
3798 (struct sockaddr
*)&net
->ro
._l_addr
)) {
3799 /* found a reachable destination */
3800 stcb
->asoc
.primary_destination
= net
;
3803 /* I can't there from here! ...we're gonna die shortly... */
3808 * Delete the address from the endpoint local address list
3809 * There is nothing to be done if we are bound to all addresses
3812 sctp_del_local_addr_ep(struct sctp_inpcb
*inp
, struct ifaddr
*ifa
)
3814 struct sctp_laddr
*laddr
;
3817 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_BOUNDALL
) {
3818 /* You are already bound to all. You have it already */
3822 LIST_FOREACH(laddr
, &inp
->sctp_addr_list
, sctp_nxt_addr
) {
3823 if (laddr
->ifa
== ifa
) {
3828 if (fnd
&& (inp
->laddr_count
< 2)) {
3829 /* can't delete unless there are at LEAST 2 addresses */
3832 if (((inp
->sctp_flags
& SCTP_PCB_FLAGS_BOUNDALL
) == 0) && (fnd
)) {
3834 * clean up any use of this address
3835 * go through our associations and clear any
3836 * last_used_address that match this one
3837 * for each assoc, see if a new primary_destination is needed
3839 struct sctp_tcb
*stcb
;
3841 /* clean up "next_addr_touse" */
3842 if (inp
->next_addr_touse
== laddr
)
3843 /* delete this address */
3844 inp
->next_addr_touse
= NULL
;
3846 /* clean up "last_used_address" */
3847 LIST_FOREACH(stcb
, &inp
->sctp_asoc_list
, sctp_tcblist
) {
3848 if (stcb
->asoc
.last_used_address
== laddr
)
3849 /* delete this address */
3850 stcb
->asoc
.last_used_address
= NULL
;
3851 } /* for each tcb */
3853 /* remove it from the ep list */
3854 sctp_remove_laddr(laddr
);
3856 /* update inp_vflag flags */
3857 sctp_update_ep_vflag(inp
);
3858 /* select a new primary destination if needed */
3859 LIST_FOREACH(stcb
, &inp
->sctp_asoc_list
, sctp_tcblist
) {
3860 /* presume caller (sctp_asconf.c) already owns INP lock */
3861 SCTP_TCB_LOCK(stcb
);
3862 if (sctp_destination_is_reachable(stcb
,
3863 (struct sockaddr
*)&stcb
->asoc
.primary_destination
->ro
._l_addr
) == 0) {
3864 sctp_select_primary_destination(stcb
);
3866 SCTP_TCB_UNLOCK(stcb
);
3867 } /* for each tcb */
3873 * Add the addr to the TCB local address list
3874 * For the BOUNDALL or dynamic case, this is a "pending" address list
3875 * (eg. addresses waiting for an ASCONF-ACK response)
3876 * For the subset binding, static case, this is a "valid" address list
3879 sctp_add_local_addr_assoc(struct sctp_tcb
*stcb
, struct ifaddr
*ifa
)
3881 struct sctp_inpcb
*inp
;
3882 struct sctp_laddr
*laddr
;
3885 /* Assumes TCP is locked.. and possiblye
3886 * the INP. May need to confirm/fix that if
3887 * we need it and is not the case.
3889 inp
= stcb
->sctp_ep
;
3890 if (ifa
->ifa_addr
->sa_family
== AF_INET6
) {
3891 struct in6_ifaddr
*ifa6
;
3892 ifa6
= (struct in6_ifaddr
*)ifa
;
3893 if (ifa6
->ia6_flags
& (IN6_IFF_DETACHED
|
3894 /* IN6_IFF_DEPRECATED | */
3897 /* Can't bind a non-existent addr. */
3900 /* does the address already exist? */
3901 LIST_FOREACH(laddr
, &stcb
->asoc
.sctp_local_addr_list
, sctp_nxt_addr
) {
3902 if (laddr
->ifa
== ifa
) {
3907 /* add to the list */
3908 error
= sctp_insert_laddr(&stcb
->asoc
.sctp_local_addr_list
, ifa
);
3915 * insert an laddr entry with the given ifa for the desired list
3918 sctp_insert_laddr(struct sctpladdr
*list
, struct ifaddr
*ifa
) {
3919 struct sctp_laddr
*laddr
;
3922 laddr
= (struct sctp_laddr
*)SCTP_ZONE_GET(sctppcbinfo
.ipi_zone_laddr
);
3923 if (laddr
== NULL
) {
3924 /* out of memory? */
3928 sctppcbinfo
.ipi_count_laddr
++;
3929 sctppcbinfo
.ipi_gencnt_laddr
++;
3930 bzero(laddr
, sizeof(*laddr
));
3933 LIST_INSERT_HEAD(list
, laddr
, sctp_nxt_addr
);
3940 * Remove an laddr entry from the local address list (on an assoc)
3943 sctp_remove_laddr(struct sctp_laddr
*laddr
)
3946 /* remove from the list */
3947 LIST_REMOVE(laddr
, sctp_nxt_addr
);
3948 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_laddr
, laddr
);
3949 sctppcbinfo
.ipi_count_laddr
--;
3950 sctppcbinfo
.ipi_gencnt_laddr
++;
3955 * Remove an address from the TCB local address list
3958 sctp_del_local_addr_assoc(struct sctp_tcb
*stcb
, struct ifaddr
*ifa
)
3960 struct sctp_inpcb
*inp
;
3961 struct sctp_laddr
*laddr
;
3963 /* This is called by asconf work. It is assumed that
3964 * a) The TCB is locked
3966 * b) The INP is locked.
3967 * This is true in as much as I can trace through
3968 * the entry asconf code where I did these locks.
3969 * Again, the ASCONF code is a bit different in
3970 * that it does lock the INP during its work often
3971 * times. This must be since we don't want other
3972 * proc's looking up things while what they are
3973 * looking up is changing :-D
3976 inp
= stcb
->sctp_ep
;
3977 /* if subset bound and don't allow ASCONF's, can't delete last */
3978 if (((inp
->sctp_flags
& SCTP_PCB_FLAGS_BOUNDALL
) == 0) &&
3979 ((inp
->sctp_flags
& SCTP_PCB_FLAGS_DO_ASCONF
) == 0)) {
3980 if (stcb
->asoc
.numnets
< 2) {
3981 /* can't delete last address */
3986 LIST_FOREACH(laddr
, &stcb
->asoc
.sctp_local_addr_list
, sctp_nxt_addr
) {
3987 /* remove the address if it exists */
3988 if (laddr
->ifa
== NULL
)
3990 if (laddr
->ifa
== ifa
) {
3991 sctp_remove_laddr(laddr
);
3996 /* address not found! */
4001 * Remove an address from the TCB local address list
4002 * lookup using a sockaddr addr
4005 sctp_del_local_addr_assoc_sa(struct sctp_tcb
*stcb
, struct sockaddr
*sa
)
4007 struct sctp_inpcb
*inp
;
4008 struct sctp_laddr
*laddr
;
4009 struct sockaddr
*l_sa
;
4012 * This function I find does not seem to have a caller.
4013 * As such we NEED TO DELETE this code. If we do
4014 * find a caller, the caller MUST have locked the TCB
4015 * at the least and probably the INP as well.
4017 inp
= stcb
->sctp_ep
;
4018 /* if subset bound and don't allow ASCONF's, can't delete last */
4019 if (((inp
->sctp_flags
& SCTP_PCB_FLAGS_BOUNDALL
) == 0) &&
4020 ((inp
->sctp_flags
& SCTP_PCB_FLAGS_DO_ASCONF
) == 0)) {
4021 if (stcb
->asoc
.numnets
< 2) {
4022 /* can't delete last address */
4027 LIST_FOREACH(laddr
, &stcb
->asoc
.sctp_local_addr_list
, sctp_nxt_addr
) {
4028 /* make sure the address exists */
4029 if (laddr
->ifa
== NULL
)
4031 if (laddr
->ifa
->ifa_addr
== NULL
)
4034 l_sa
= laddr
->ifa
->ifa_addr
;
4035 if (l_sa
->sa_family
== AF_INET6
) {
4037 struct sockaddr_in6
*sin1
, *sin2
;
4038 sin1
= (struct sockaddr_in6
*)l_sa
;
4039 sin2
= (struct sockaddr_in6
*)sa
;
4040 if (memcmp(&sin1
->sin6_addr
, &sin2
->sin6_addr
,
4041 sizeof(struct in6_addr
)) == 0) {
4043 sctp_remove_laddr(laddr
);
4046 } else if (l_sa
->sa_family
== AF_INET
) {
4048 struct sockaddr_in
*sin1
, *sin2
;
4049 sin1
= (struct sockaddr_in
*)l_sa
;
4050 sin2
= (struct sockaddr_in
*)sa
;
4051 if (sin1
->sin_addr
.s_addr
== sin2
->sin_addr
.s_addr
) {
4053 sctp_remove_laddr(laddr
);
4057 /* invalid family */
4061 /* address not found! */
4065 static char sctp_pcb_initialized
= 0;
4067 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
4069 static int sctp_max_number_of_assoc
= SCTP_MAX_NUM_OF_ASOC
;
4070 static int sctp_scale_up_for_address
= SCTP_SCALE_FOR_ADDR
;
4072 #endif /* FreeBSD || APPLE || DragonFly */
4074 #ifndef SCTP_TCBHASHSIZE
4075 #define SCTP_TCBHASHSIZE 1024
4078 #ifndef SCTP_CHUNKQUEUE_SCALE
4079 #define SCTP_CHUNKQUEUE_SCALE 10
4086 * SCTP initialization for the PCB structures
4087 * should be called by the sctp_init() funciton.
4090 int hashtblsize
= SCTP_TCBHASHSIZE
;
4092 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
4093 int sctp_chunkscale
= SCTP_CHUNKQUEUE_SCALE
;
4096 if (sctp_pcb_initialized
!= 0) {
4097 /* error I was called twice */
4100 sctp_pcb_initialized
= 1;
4102 /* Init all peg counts */
4103 for (i
= 0; i
< SCTP_NUMBER_OF_PEGS
; i
++) {
4107 /* init the empty list of (All) Endpoints */
4108 LIST_INIT(&sctppcbinfo
.listhead
);
4110 /* init the iterator head */
4111 LIST_INIT(&sctppcbinfo
.iteratorhead
);
4113 /* init the hash table of endpoints */
4114 #if defined(__FreeBSD__)
4115 #if defined(__FreeBSD_cc_version) && __FreeBSD_cc_version >= 440000
4116 TUNABLE_INT_FETCH("net.inet.sctp.tcbhashsize", &hashtblsize
);
4117 TUNABLE_INT_FETCH("net.inet.sctp.pcbhashsize", &sctp_pcbtblsize
);
4118 TUNABLE_INT_FETCH("net.inet.sctp.chunkscale", &sctp_chunkscale
);
4120 TUNABLE_INT_FETCH("net.inet.sctp.tcbhashsize", SCTP_TCBHASHSIZE
,
4122 TUNABLE_INT_FETCH("net.inet.sctp.pcbhashsize", SCTP_PCBHASHSIZE
,
4124 TUNABLE_INT_FETCH("net.inet.sctp.chunkscale", SCTP_CHUNKQUEUE_SCALE
,
4129 sctppcbinfo
.sctp_asochash
= hashinit((hashtblsize
* 31),
4134 #if defined(__NetBSD__) || defined(__OpenBSD__)
4137 &sctppcbinfo
.hashasocmark
);
4139 sctppcbinfo
.sctp_ephash
= hashinit(hashtblsize
,
4144 #if defined(__NetBSD__) || defined(__OpenBSD__)
4147 &sctppcbinfo
.hashmark
);
4149 sctppcbinfo
.sctp_tcpephash
= hashinit(hashtblsize
,
4154 #if defined(__NetBSD__) || defined(__OpenBSD__)
4157 &sctppcbinfo
.hashtcpmark
);
4159 sctppcbinfo
.hashtblsize
= hashtblsize
;
4161 /* init the zones */
4163 * FIX ME: Should check for NULL returns, but if it does fail we
4164 * are doomed to panic anyways... add later maybe.
4166 SCTP_ZONE_INIT(sctppcbinfo
.ipi_zone_ep
, "sctp_ep",
4167 sizeof(struct sctp_inpcb
), maxsockets
);
4169 SCTP_ZONE_INIT(sctppcbinfo
.ipi_zone_asoc
, "sctp_asoc",
4170 sizeof(struct sctp_tcb
), sctp_max_number_of_assoc
);
4172 SCTP_ZONE_INIT(sctppcbinfo
.ipi_zone_laddr
, "sctp_laddr",
4173 sizeof(struct sctp_laddr
),
4174 (sctp_max_number_of_assoc
* sctp_scale_up_for_address
));
4176 SCTP_ZONE_INIT(sctppcbinfo
.ipi_zone_net
, "sctp_raddr",
4177 sizeof(struct sctp_nets
),
4178 (sctp_max_number_of_assoc
* sctp_scale_up_for_address
));
4180 SCTP_ZONE_INIT(sctppcbinfo
.ipi_zone_chunk
, "sctp_chunk",
4181 sizeof(struct sctp_tmit_chunk
),
4182 (sctp_max_number_of_assoc
* sctp_scale_up_for_address
*
4185 SCTP_ZONE_INIT(sctppcbinfo
.ipi_zone_sockq
, "sctp_sockq",
4186 sizeof(struct sctp_socket_q_list
),
4187 (sctp_max_number_of_assoc
* sctp_scale_up_for_address
*
4190 /* Master Lock INIT for info structure */
4191 SCTP_INP_INFO_LOCK_INIT();
4192 SCTP_ITERATOR_LOCK_INIT();
4193 /* not sure if we need all the counts */
4194 sctppcbinfo
.ipi_count_ep
= 0;
4195 sctppcbinfo
.ipi_gencnt_ep
= 0;
4196 /* assoc/tcb zone info */
4197 sctppcbinfo
.ipi_count_asoc
= 0;
4198 sctppcbinfo
.ipi_gencnt_asoc
= 0;
4199 /* local addrlist zone info */
4200 sctppcbinfo
.ipi_count_laddr
= 0;
4201 sctppcbinfo
.ipi_gencnt_laddr
= 0;
4202 /* remote addrlist zone info */
4203 sctppcbinfo
.ipi_count_raddr
= 0;
4204 sctppcbinfo
.ipi_gencnt_raddr
= 0;
4206 sctppcbinfo
.ipi_count_chunk
= 0;
4207 sctppcbinfo
.ipi_gencnt_chunk
= 0;
4209 /* socket queue zone info */
4210 sctppcbinfo
.ipi_count_sockq
= 0;
4211 sctppcbinfo
.ipi_gencnt_sockq
= 0;
4214 sctppcbinfo
.mbuf_track
= 0;
4216 #if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__APPLE__) \
4217 || defined(__DragonFly__)
4218 sctppcbinfo
.lastlow
= ipport_firstauto
;
4220 sctppcbinfo
.lastlow
= anonportmin
;
4222 /* Init the TIMEWAIT list */
4223 for (i
= 0; i
< SCTP_STACK_VTAG_HASH_SIZE
; i
++) {
4224 LIST_INIT(&sctppcbinfo
.vtag_timewait
[i
]);
4227 #if defined(_SCTP_NEEDS_CALLOUT_) && !defined(__APPLE__)
4228 TAILQ_INIT(&sctppcbinfo
.callqueue
);
4234 sctp_load_addresses_from_init(struct sctp_tcb
*stcb
, struct mbuf
*m
,
4235 int iphlen
, int offset
, int limit
, struct sctphdr
*sh
,
4236 struct sockaddr
*altsa
)
4239 * grub through the INIT pulling addresses and
4240 * loading them to the nets structure in the asoc.
4241 * The from address in the mbuf should also be loaded
4242 * (if it is not already). This routine can be called
4243 * with either INIT or INIT-ACK's as long as the
4244 * m points to the IP packet and the offset points
4245 * to the beginning of the parameters.
4247 struct sctp_inpcb
*inp
, *l_inp
;
4248 struct sctp_nets
*net
, *net_tmp
;
4250 struct sctp_paramhdr
*phdr
, parm_buf
;
4251 struct sctp_tcb
*stcb_tmp
;
4252 u_int16_t ptype
, plen
;
4253 struct sockaddr
*sa
;
4254 struct sockaddr_storage dest_store
;
4255 struct sockaddr
*local_sa
= (struct sockaddr
*)&dest_store
;
4256 struct sockaddr_in sin
;
4257 struct sockaddr_in6 sin6
;
4259 /* First get the destination address setup too. */
4260 memset(&sin
, 0, sizeof(sin
));
4261 memset(&sin6
, 0, sizeof(sin6
));
4263 sin
.sin_family
= AF_INET
;
4264 sin
.sin_len
= sizeof(sin
);
4265 sin
.sin_port
= stcb
->rport
;
4267 sin6
.sin6_family
= AF_INET6
;
4268 sin6
.sin6_len
= sizeof(struct sockaddr_in6
);
4269 sin6
.sin6_port
= stcb
->rport
;
4270 if (altsa
== NULL
) {
4271 iph
= mtod(m
, struct ip
*);
4272 if (iph
->ip_v
== IPVERSION
) {
4274 struct sockaddr_in
*sin_2
;
4275 sin_2
= (struct sockaddr_in
*)(local_sa
);
4276 memset(sin_2
, 0, sizeof(sin
));
4277 sin_2
->sin_family
= AF_INET
;
4278 sin_2
->sin_len
= sizeof(sin
);
4279 sin_2
->sin_port
= sh
->dest_port
;
4280 sin_2
->sin_addr
.s_addr
= iph
->ip_dst
.s_addr
;
4281 sin
.sin_addr
= iph
->ip_src
;
4282 sa
= (struct sockaddr
*)&sin
;
4283 } else if (iph
->ip_v
== (IPV6_VERSION
>> 4)) {
4285 struct ip6_hdr
*ip6
;
4286 struct sockaddr_in6
*sin6_2
;
4288 ip6
= mtod(m
, struct ip6_hdr
*);
4289 sin6_2
= (struct sockaddr_in6
*)(local_sa
);
4290 memset(sin6_2
, 0, sizeof(sin6
));
4291 sin6_2
->sin6_family
= AF_INET6
;
4292 sin6_2
->sin6_len
= sizeof(struct sockaddr_in6
);
4293 sin6_2
->sin6_port
= sh
->dest_port
;
4294 sin6
.sin6_addr
= ip6
->ip6_src
;
4295 sa
= (struct sockaddr
*)&sin6
;
4301 * For cookies we use the src address NOT from the packet
4302 * but from the original INIT
4306 /* Turn off ECN until we get through all params */
4307 stcb
->asoc
.ecn_allowed
= 0;
4309 TAILQ_FOREACH(net
, &stcb
->asoc
.nets
, sctp_next
) {
4310 /* mark all addresses that we have currently on the list */
4311 net
->dest_state
|= SCTP_ADDR_NOT_IN_ASSOC
;
4313 /* does the source address already exist? if so skip it */
4314 l_inp
= inp
= stcb
->sctp_ep
;
4315 stcb_tmp
= sctp_findassociation_ep_addr(&inp
, sa
, &net_tmp
, local_sa
, stcb
);
4316 if ((stcb_tmp
== NULL
&& inp
== stcb
->sctp_ep
) || inp
== NULL
) {
4317 /* we must add the source address */
4318 /* no scope set here since we have a tcb already. */
4319 if ((sa
->sa_family
== AF_INET
) &&
4320 (stcb
->asoc
.ipv4_addr_legal
)) {
4321 if (sctp_add_remote_addr(stcb
, sa
, 0, 2)) {
4324 } else if ((sa
->sa_family
== AF_INET6
) &&
4325 (stcb
->asoc
.ipv6_addr_legal
)) {
4326 if (sctp_add_remote_addr(stcb
, sa
, 0, 3)) {
4331 if (net_tmp
!= NULL
&& stcb_tmp
== stcb
) {
4332 net_tmp
->dest_state
&= ~SCTP_ADDR_NOT_IN_ASSOC
;
4333 } else if (stcb_tmp
!= stcb
) {
4334 /* It belongs to another association? */
4338 /* since a unlock occured we must check the
4339 * TCB's state and the pcb's gone flags.
4341 if (l_inp
->sctp_flags
& (SCTP_PCB_FLAGS_SOCKET_GONE
|SCTP_PCB_FLAGS_SOCKET_ALLGONE
)) {
4342 /* the user freed the ep */
4345 if (stcb
->asoc
.state
== 0) {
4346 /* the assoc was freed? */
4350 /* now we must go through each of the params. */
4351 phdr
= sctp_get_next_param(m
, offset
, &parm_buf
, sizeof(parm_buf
));
4353 ptype
= ntohs(phdr
->param_type
);
4354 plen
= ntohs(phdr
->param_length
);
4355 /*kprintf("ptype => %d, plen => %d\n", ptype, plen);*/
4356 if (offset
+ plen
> limit
) {
4362 if ((ptype
== SCTP_IPV4_ADDRESS
) &&
4363 (stcb
->asoc
.ipv4_addr_legal
)) {
4364 struct sctp_ipv4addr_param
*p4
, p4_buf
;
4365 /* ok get the v4 address and check/add */
4366 phdr
= sctp_get_next_param(m
, offset
,
4367 (struct sctp_paramhdr
*)&p4_buf
, sizeof(p4_buf
));
4368 if (plen
!= sizeof(struct sctp_ipv4addr_param
) ||
4372 p4
= (struct sctp_ipv4addr_param
*)phdr
;
4373 sin
.sin_addr
.s_addr
= p4
->addr
;
4374 sa
= (struct sockaddr
*)&sin
;
4375 inp
= stcb
->sctp_ep
;
4376 stcb_tmp
= sctp_findassociation_ep_addr(&inp
, sa
, &net
,
4379 if ((stcb_tmp
== NULL
&& inp
== stcb
->sctp_ep
) ||
4381 /* we must add the source address */
4382 /* no scope set since we have a tcb already */
4384 /* we must validate the state again here */
4385 if (l_inp
->sctp_flags
& (SCTP_PCB_FLAGS_SOCKET_GONE
|SCTP_PCB_FLAGS_SOCKET_ALLGONE
)) {
4386 /* the user freed the ep */
4389 if (stcb
->asoc
.state
== 0) {
4390 /* the assoc was freed? */
4393 if (sctp_add_remote_addr(stcb
, sa
, 0, 4)) {
4396 } else if (stcb_tmp
== stcb
) {
4397 if (l_inp
->sctp_flags
& (SCTP_PCB_FLAGS_SOCKET_GONE
|SCTP_PCB_FLAGS_SOCKET_ALLGONE
)) {
4398 /* the user freed the ep */
4401 if (stcb
->asoc
.state
== 0) {
4402 /* the assoc was freed? */
4408 ~SCTP_ADDR_NOT_IN_ASSOC
;
4411 /* strange, address is in another assoc?
4412 * straighten out locks.
4414 SCTP_TCB_UNLOCK(stcb_tmp
);
4415 SCTP_INP_RLOCK(inp
);
4416 if (l_inp
->sctp_flags
& (SCTP_PCB_FLAGS_SOCKET_GONE
|SCTP_PCB_FLAGS_SOCKET_ALLGONE
)) {
4417 /* the user freed the ep */
4418 SCTP_INP_RUNLOCK(l_inp
);
4421 if (stcb
->asoc
.state
== 0) {
4422 /* the assoc was freed? */
4423 SCTP_INP_RUNLOCK(l_inp
);
4426 SCTP_TCB_LOCK(stcb
);
4427 SCTP_INP_RUNLOCK(stcb
->sctp_ep
);
4430 } else if ((ptype
== SCTP_IPV6_ADDRESS
) &&
4431 (stcb
->asoc
.ipv6_addr_legal
)) {
4432 /* ok get the v6 address and check/add */
4433 struct sctp_ipv6addr_param
*p6
, p6_buf
;
4434 phdr
= sctp_get_next_param(m
, offset
,
4435 (struct sctp_paramhdr
*)&p6_buf
, sizeof(p6_buf
));
4436 if (plen
!= sizeof(struct sctp_ipv6addr_param
) ||
4440 p6
= (struct sctp_ipv6addr_param
*)phdr
;
4441 memcpy((caddr_t
)&sin6
.sin6_addr
, p6
->addr
,
4443 sa
= (struct sockaddr
*)&sin6
;
4444 inp
= stcb
->sctp_ep
;
4445 stcb_tmp
= sctp_findassociation_ep_addr(&inp
, sa
, &net
,
4447 if (stcb_tmp
== NULL
&& (inp
== stcb
->sctp_ep
||
4449 /* we must validate the state again here */
4450 if (l_inp
->sctp_flags
& (SCTP_PCB_FLAGS_SOCKET_GONE
|SCTP_PCB_FLAGS_SOCKET_ALLGONE
)) {
4451 /* the user freed the ep */
4454 if (stcb
->asoc
.state
== 0) {
4455 /* the assoc was freed? */
4458 /* we must add the address, no scope set */
4459 if (sctp_add_remote_addr(stcb
, sa
, 0, 5)) {
4462 } else if (stcb_tmp
== stcb
) {
4463 /* we must validate the state again here */
4464 if (l_inp
->sctp_flags
& (SCTP_PCB_FLAGS_SOCKET_GONE
|SCTP_PCB_FLAGS_SOCKET_ALLGONE
)) {
4465 /* the user freed the ep */
4468 if (stcb
->asoc
.state
== 0) {
4469 /* the assoc was freed? */
4475 ~SCTP_ADDR_NOT_IN_ASSOC
;
4478 /* strange, address is in another assoc?
4479 * straighten out locks.
4481 SCTP_TCB_UNLOCK(stcb_tmp
);
4482 SCTP_INP_RLOCK(l_inp
);
4483 /* we must validate the state again here */
4484 if (l_inp
->sctp_flags
& (SCTP_PCB_FLAGS_SOCKET_GONE
|SCTP_PCB_FLAGS_SOCKET_ALLGONE
)) {
4485 /* the user freed the ep */
4486 SCTP_INP_RUNLOCK(l_inp
);
4489 if (stcb
->asoc
.state
== 0) {
4490 /* the assoc was freed? */
4491 SCTP_INP_RUNLOCK(l_inp
);
4494 SCTP_TCB_LOCK(stcb
);
4495 SCTP_INP_RUNLOCK(l_inp
);
4498 } else if (ptype
== SCTP_ECN_CAPABLE
) {
4499 stcb
->asoc
.ecn_allowed
= 1;
4500 } else if (ptype
== SCTP_ULP_ADAPTION
) {
4501 if (stcb
->asoc
.state
!= SCTP_STATE_OPEN
) {
4502 struct sctp_adaption_layer_indication ai
, *aip
;
4504 phdr
= sctp_get_next_param(m
, offset
,
4505 (struct sctp_paramhdr
*)&ai
, sizeof(ai
));
4506 aip
= (struct sctp_adaption_layer_indication
*)phdr
;
4507 sctp_ulp_notify(SCTP_NOTIFY_ADAPTION_INDICATION
,
4508 stcb
, ntohl(aip
->indication
), NULL
);
4510 } else if (ptype
== SCTP_SET_PRIM_ADDR
) {
4511 struct sctp_asconf_addr_param lstore
, *fee
;
4512 struct sctp_asconf_addrv4_param
*fii
;
4514 struct sockaddr
*lsa
= NULL
;
4516 stcb
->asoc
.peer_supports_asconf
= 1;
4517 stcb
->asoc
.peer_supports_asconf_setprim
= 1;
4518 if (plen
> sizeof(lstore
)) {
4521 phdr
= sctp_get_next_param(m
, offset
,
4522 (struct sctp_paramhdr
*)&lstore
, plen
);
4527 fee
= (struct sctp_asconf_addr_param
*)phdr
;
4528 lptype
= ntohs(fee
->addrp
.ph
.param_type
);
4529 if (lptype
== SCTP_IPV4_ADDRESS
) {
4531 sizeof(struct sctp_asconf_addrv4_param
)) {
4532 kprintf("Sizeof setprim in init/init ack not %d but %d - ignored\n",
4533 (int)sizeof(struct sctp_asconf_addrv4_param
),
4536 fii
= (struct sctp_asconf_addrv4_param
*)fee
;
4537 sin
.sin_addr
.s_addr
= fii
->addrp
.addr
;
4538 lsa
= (struct sockaddr
*)&sin
;
4540 } else if (lptype
== SCTP_IPV6_ADDRESS
) {
4542 sizeof(struct sctp_asconf_addr_param
)) {
4543 kprintf("Sizeof setprim (v6) in init/init ack not %d but %d - ignored\n",
4544 (int)sizeof(struct sctp_asconf_addr_param
),
4547 memcpy(sin6
.sin6_addr
.s6_addr
,
4549 sizeof(fee
->addrp
.addr
));
4550 lsa
= (struct sockaddr
*)&sin6
;
4554 sctp_set_primary_addr(stcb
, sa
, NULL
);
4557 } else if (ptype
== SCTP_PRSCTP_SUPPORTED
) {
4558 /* Peer supports pr-sctp */
4559 stcb
->asoc
.peer_supports_prsctp
= 1;
4560 } else if (ptype
== SCTP_SUPPORTED_CHUNK_EXT
) {
4561 /* A supported extension chunk */
4562 struct sctp_supported_chunk_types_param
*pr_supported
;
4563 uint8_t local_store
[128];
4566 phdr
= sctp_get_next_param(m
, offset
,
4567 (struct sctp_paramhdr
*)&local_store
, plen
);
4571 stcb
->asoc
.peer_supports_asconf
= 0;
4572 stcb
->asoc
.peer_supports_asconf_setprim
= 0;
4573 stcb
->asoc
.peer_supports_prsctp
= 0;
4574 stcb
->asoc
.peer_supports_pktdrop
= 0;
4575 stcb
->asoc
.peer_supports_strreset
= 0;
4576 pr_supported
= (struct sctp_supported_chunk_types_param
*)phdr
;
4577 num_ent
= plen
- sizeof(struct sctp_paramhdr
);
4578 for (i
=0; i
<num_ent
; i
++) {
4579 switch (pr_supported
->chunk_types
[i
]) {
4581 stcb
->asoc
.peer_supports_asconf
= 1;
4582 stcb
->asoc
.peer_supports_asconf_setprim
= 1;
4584 case SCTP_ASCONF_ACK
:
4585 stcb
->asoc
.peer_supports_asconf
= 1;
4586 stcb
->asoc
.peer_supports_asconf_setprim
= 1;
4588 case SCTP_FORWARD_CUM_TSN
:
4589 stcb
->asoc
.peer_supports_prsctp
= 1;
4591 case SCTP_PACKET_DROPPED
:
4592 stcb
->asoc
.peer_supports_pktdrop
= 1;
4594 case SCTP_STREAM_RESET
:
4595 stcb
->asoc
.peer_supports_strreset
= 1;
4598 /* one I have not learned yet */
4603 } else if (ptype
== SCTP_ECN_NONCE_SUPPORTED
) {
4604 /* Peer supports ECN-nonce */
4605 stcb
->asoc
.peer_supports_ecn_nonce
= 1;
4606 stcb
->asoc
.ecn_nonce_allowed
= 1;
4607 } else if ((ptype
== SCTP_HEARTBEAT_INFO
) ||
4608 (ptype
== SCTP_STATE_COOKIE
) ||
4609 (ptype
== SCTP_UNRECOG_PARAM
) ||
4610 (ptype
== SCTP_COOKIE_PRESERVE
) ||
4611 (ptype
== SCTP_SUPPORTED_ADDRTYPE
) ||
4612 (ptype
== SCTP_ADD_IP_ADDRESS
) ||
4613 (ptype
== SCTP_DEL_IP_ADDRESS
) ||
4614 (ptype
== SCTP_ERROR_CAUSE_IND
) ||
4615 (ptype
== SCTP_SUCCESS_REPORT
)) {
4618 if ((ptype
& 0x8000) == 0x0000) {
4619 /* must stop processing the rest of
4620 * the param's. Any report bits were
4621 * handled with the call to sctp_arethere_unrecognized_parameters()
4622 * when the INIT or INIT-ACK was first seen.
4627 offset
+= SCTP_SIZE32(plen
);
4628 if (offset
>= limit
) {
4631 phdr
= sctp_get_next_param(m
, offset
, &parm_buf
,
4634 /* Now check to see if we need to purge any addresses */
4635 for (net
= TAILQ_FIRST(&stcb
->asoc
.nets
); net
!= NULL
; net
= net_tmp
) {
4636 net_tmp
= TAILQ_NEXT(net
, sctp_next
);
4637 if ((net
->dest_state
& SCTP_ADDR_NOT_IN_ASSOC
) ==
4638 SCTP_ADDR_NOT_IN_ASSOC
) {
4639 /* This address has been removed from the asoc */
4640 /* remove and free it */
4641 stcb
->asoc
.numnets
--;
4642 TAILQ_REMOVE(&stcb
->asoc
.nets
, net
, sctp_next
);
4643 sctp_free_remote_addr(net
);
4644 if (net
== stcb
->asoc
.primary_destination
) {
4645 stcb
->asoc
.primary_destination
= NULL
;
4646 sctp_select_primary_destination(stcb
);
4654 sctp_set_primary_addr(struct sctp_tcb
*stcb
, struct sockaddr
*sa
,
4655 struct sctp_nets
*net
)
4657 /* make sure the requested primary address exists in the assoc */
4658 if (net
== NULL
&& sa
)
4659 net
= sctp_findnet(stcb
, sa
);
4662 /* didn't find the requested primary address! */
4665 /* set the primary address */
4666 if (net
->dest_state
& SCTP_ADDR_UNCONFIRMED
) {
4667 /* Must be confirmed */
4670 stcb
->asoc
.primary_destination
= net
;
4671 net
->dest_state
&= ~SCTP_ADDR_WAS_PRIMARY
;
4678 sctp_is_vtag_good(struct sctp_inpcb
*inp
, u_int32_t tag
, struct timeval
*now
)
4681 * This function serves two purposes. It will see if a TAG can be
4682 * re-used and return 1 for yes it is ok and 0 for don't use that
4684 * A secondary function it will do is purge out old tags that can
4687 struct sctpasochead
*head
;
4688 struct sctpvtaghead
*chain
;
4689 struct sctp_tagblock
*twait_block
;
4690 struct sctp_tcb
*stcb
;
4693 SCTP_INP_INFO_WLOCK();
4694 chain
= &sctppcbinfo
.vtag_timewait
[(tag
% SCTP_STACK_VTAG_HASH_SIZE
)];
4695 /* First is the vtag in use ? */
4697 head
= &sctppcbinfo
.sctp_asochash
[SCTP_PCBHASH_ASOC(tag
,
4698 sctppcbinfo
.hashasocmark
)];
4700 SCTP_INP_INFO_WUNLOCK();
4703 LIST_FOREACH(stcb
, head
, sctp_asocs
) {
4704 if (stcb
->asoc
.my_vtag
== tag
) {
4705 /* We should remove this if and
4706 * return 0 always if we want vtags
4707 * unique across all endpoints. For
4708 * now within a endpoint is ok.
4710 if (inp
== stcb
->sctp_ep
) {
4711 /* bad tag, in use */
4712 SCTP_INP_INFO_WUNLOCK();
4717 if (!LIST_EMPTY(chain
)) {
4719 * Block(s) are present, lets see if we have this tag in
4722 LIST_FOREACH(twait_block
, chain
, sctp_nxt_tagblock
) {
4723 for (i
= 0; i
< SCTP_NUMBER_IN_VTAG_BLOCK
; i
++) {
4724 if (twait_block
->vtag_block
[i
].v_tag
== 0) {
4727 } else if ((long)twait_block
->vtag_block
[i
].tv_sec_at_expire
>
4729 /* Audit expires this guy */
4730 twait_block
->vtag_block
[i
].tv_sec_at_expire
= 0;
4731 twait_block
->vtag_block
[i
].v_tag
= 0;
4732 } else if (twait_block
->vtag_block
[i
].v_tag
==
4734 /* Bad tag, sorry :< */
4735 SCTP_INP_INFO_WUNLOCK();
4741 /* Not found, ok to use the tag */
4742 SCTP_INP_INFO_WUNLOCK();
4748 * Delete the address from the endpoint local address list
4749 * Lookup using a sockaddr address (ie. not an ifaddr)
4752 sctp_del_local_addr_ep_sa(struct sctp_inpcb
*inp
, struct sockaddr
*sa
)
4754 struct sctp_laddr
*laddr
;
4755 struct sockaddr
*l_sa
;
4757 /* Here is another function I cannot find a
4758 * caller for. As such we SHOULD delete it
4759 * if we have no users. If we find a user that
4760 * user MUST have the INP locked.
4764 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_BOUNDALL
) {
4765 /* You are already bound to all. You have it already */
4769 LIST_FOREACH(laddr
, &inp
->sctp_addr_list
, sctp_nxt_addr
) {
4770 /* make sure the address exists */
4771 if (laddr
->ifa
== NULL
)
4773 if (laddr
->ifa
->ifa_addr
== NULL
)
4776 l_sa
= laddr
->ifa
->ifa_addr
;
4777 if (l_sa
->sa_family
== AF_INET6
) {
4779 struct sockaddr_in6
*sin1
, *sin2
;
4780 sin1
= (struct sockaddr_in6
*)l_sa
;
4781 sin2
= (struct sockaddr_in6
*)sa
;
4782 if (memcmp(&sin1
->sin6_addr
, &sin2
->sin6_addr
,
4783 sizeof(struct in6_addr
)) == 0) {
4788 } else if (l_sa
->sa_family
== AF_INET
) {
4790 struct sockaddr_in
*sin1
, *sin2
;
4791 sin1
= (struct sockaddr_in
*)l_sa
;
4792 sin2
= (struct sockaddr_in
*)sa
;
4793 if (sin1
->sin_addr
.s_addr
== sin2
->sin_addr
.s_addr
) {
4799 /* invalid family */
4804 if (found
&& inp
->laddr_count
< 2) {
4805 /* can't delete unless there are at LEAST 2 addresses */
4809 if (found
&& (inp
->sctp_flags
& SCTP_PCB_FLAGS_BOUNDALL
) == 0) {
4811 * remove it from the ep list, this should NOT be
4812 * done until its really gone from the interface list and
4813 * we won't be receiving more of these. Probably right
4814 * away. If we do allow a removal of an address from
4815 * an association (sub-set bind) than this should NOT
4816 * be called until the all ASCONF come back from this
4819 sctp_remove_laddr(laddr
);
4827 sctp_drain_mbufs(struct sctp_inpcb
*inp
, struct sctp_tcb
*stcb
)
4830 * We must hunt this association for MBUF's past the cumack
4831 * (i.e. out of order data that we can renege on).
4833 struct sctp_association
*asoc
;
4834 struct sctp_tmit_chunk
*chk
, *nchk
;
4835 u_int32_t cumulative_tsn_p1
, tsn
;
4836 int cnt
, strmat
, gap
;
4837 /* We look for anything larger than the cum-ack + 1 */
4840 cumulative_tsn_p1
= asoc
->cumulative_tsn
+ 1;
4842 /* First look in the re-assembly queue */
4843 chk
= TAILQ_FIRST(&asoc
->reasmqueue
);
4845 /* Get the next one */
4846 nchk
= TAILQ_NEXT(chk
, sctp_next
);
4847 if (compare_with_wrap(chk
->rec
.data
.TSN_seq
,
4848 cumulative_tsn_p1
, MAX_TSN
)) {
4849 /* Yep it is above cum-ack */
4851 tsn
= chk
->rec
.data
.TSN_seq
;
4852 if (tsn
>= asoc
->mapping_array_base_tsn
) {
4853 gap
= tsn
- asoc
->mapping_array_base_tsn
;
4855 gap
= (MAX_TSN
- asoc
->mapping_array_base_tsn
) +
4858 asoc
->size_on_reasm_queue
-= chk
->send_size
;
4859 asoc
->cnt_on_reasm_queue
--;
4860 SCTP_UNSET_TSN_PRESENT(asoc
->mapping_array
, gap
);
4861 TAILQ_REMOVE(&asoc
->reasmqueue
, chk
, sctp_next
);
4863 sctp_m_freem(chk
->data
);
4866 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, chk
);
4867 sctppcbinfo
.ipi_count_chunk
--;
4868 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
4869 panic("Chunk count is negative");
4871 sctppcbinfo
.ipi_gencnt_chunk
++;
4875 /* Ok that was fun, now we will drain all the inbound streams? */
4876 for (strmat
= 0; strmat
< asoc
->streamincnt
; strmat
++) {
4877 chk
= TAILQ_FIRST(&asoc
->strmin
[strmat
].inqueue
);
4879 nchk
= TAILQ_NEXT(chk
, sctp_next
);
4880 if (compare_with_wrap(chk
->rec
.data
.TSN_seq
,
4881 cumulative_tsn_p1
, MAX_TSN
)) {
4882 /* Yep it is above cum-ack */
4884 tsn
= chk
->rec
.data
.TSN_seq
;
4885 if (tsn
>= asoc
->mapping_array_base_tsn
) {
4887 asoc
->mapping_array_base_tsn
;
4890 asoc
->mapping_array_base_tsn
) +
4893 asoc
->size_on_all_streams
-= chk
->send_size
;
4894 asoc
->cnt_on_all_streams
--;
4896 SCTP_UNSET_TSN_PRESENT(asoc
->mapping_array
,
4898 TAILQ_REMOVE(&asoc
->strmin
[strmat
].inqueue
,
4901 sctp_m_freem(chk
->data
);
4904 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, chk
);
4905 sctppcbinfo
.ipi_count_chunk
--;
4906 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
4907 panic("Chunk count is negative");
4909 sctppcbinfo
.ipi_gencnt_chunk
++;
4915 * Question, should we go through the delivery queue?
4916 * The only reason things are on here is the app not reading OR a
4917 * p-d-api up. An attacker COULD send enough in to initiate the
4918 * PD-API and then send a bunch of stuff to other streams... these
4919 * would wind up on the delivery queue.. and then we would not get
4920 * to them. But in order to do this I then have to back-track and
4921 * un-deliver sequence numbers in streams.. el-yucko. I think for
4922 * now we will NOT look at the delivery queue and leave it to be
4923 * something to consider later. An alternative would be to abort
4924 * the P-D-API with a notification and then deliver the data....
4925 * Or another method might be to keep track of how many times the
4926 * situation occurs and if we see a possible attack underway just
4927 * abort the association.
4930 if (sctp_debug_on
& SCTP_DEBUG_PCB1
) {
4932 kprintf("Freed %d chunks from reneg harvest\n", cnt
);
4935 #endif /* SCTP_DEBUG */
4938 * Another issue, in un-setting the TSN's in the mapping array we
4939 * DID NOT adjust the higest_tsn marker. This will cause one of
4940 * two things to occur. It may cause us to do extra work in checking
4941 * for our mapping array movement. More importantly it may cause us
4942 * to SACK every datagram. This may not be a bad thing though since
4943 * we will recover once we get our cum-ack above and all this stuff
4944 * we dumped recovered.
4952 * We must walk the PCB lists for ALL associations here. The system
4953 * is LOW on MBUF's and needs help. This is where reneging will
4954 * occur. We really hope this does NOT happen!
4956 struct sctp_inpcb
*inp
;
4957 struct sctp_tcb
*stcb
;
4959 SCTP_INP_INFO_RLOCK();
4960 LIST_FOREACH(inp
, &sctppcbinfo
.listhead
, sctp_list
) {
4961 /* For each endpoint */
4962 SCTP_INP_RLOCK(inp
);
4963 LIST_FOREACH(stcb
, &inp
->sctp_asoc_list
, sctp_tcblist
) {
4964 /* For each association */
4965 SCTP_TCB_LOCK(stcb
);
4966 sctp_drain_mbufs(inp
, stcb
);
4967 SCTP_TCB_UNLOCK(stcb
);
4969 SCTP_INP_RUNLOCK(inp
);
4971 SCTP_INP_INFO_RUNLOCK();
4975 sctp_add_to_socket_q(struct sctp_inpcb
*inp
, struct sctp_tcb
*stcb
)
4977 struct sctp_socket_q_list
*sq
;
4979 /* write lock on INP assumed */
4980 if ((inp
== NULL
) || (stcb
== NULL
)) {
4984 sq
= (struct sctp_socket_q_list
*)SCTP_ZONE_GET(
4985 sctppcbinfo
.ipi_zone_sockq
);
4987 /* out of sq structs */
4990 sctppcbinfo
.ipi_count_sockq
++;
4991 sctppcbinfo
.ipi_gencnt_sockq
++;
4993 stcb
->asoc
.cnt_msg_on_sb
++;
4995 TAILQ_INSERT_TAIL(&inp
->sctp_queue_list
, sq
, next_sq
);
5001 sctp_remove_from_socket_q(struct sctp_inpcb
*inp
)
5003 struct sctp_tcb
*stcb
= NULL
;
5004 struct sctp_socket_q_list
*sq
;
5006 /* W-Lock on INP assumed held */
5007 sq
= TAILQ_FIRST(&inp
->sctp_queue_list
);
5012 TAILQ_REMOVE(&inp
->sctp_queue_list
, sq
, next_sq
);
5013 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_sockq
, sq
);
5014 sctppcbinfo
.ipi_count_sockq
--;
5015 sctppcbinfo
.ipi_gencnt_sockq
++;
5017 stcb
->asoc
.cnt_msg_on_sb
--;
5023 sctp_initiate_iterator(asoc_func af
, uint32_t pcb_state
, uint32_t asoc_state
,
5024 void *argp
, uint32_t argi
, end_func ef
,
5025 struct sctp_inpcb
*s_inp
)
5027 struct sctp_iterator
*it
=NULL
;
5032 MALLOC(it
, struct sctp_iterator
*, sizeof(struct sctp_iterator
), M_PCB
,
5037 memset(it
, 0, sizeof(*it
));
5038 it
->function_toapply
= af
;
5039 it
->function_atend
= ef
;
5042 it
->pcb_flags
= pcb_state
;
5043 it
->asoc_state
= asoc_state
;
5046 it
->iterator_flags
= SCTP_ITERATOR_DO_SINGLE_INP
;
5048 SCTP_INP_INFO_RLOCK();
5049 it
->inp
= LIST_FIRST(&sctppcbinfo
.listhead
);
5050 SCTP_INP_INFO_RUNLOCK();
5051 it
->iterator_flags
= SCTP_ITERATOR_DO_ALL_INP
;
5054 /* Init the timer */
5055 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000
5056 callout_init(&it
->tmr
.timer
, 0);
5058 callout_init(&it
->tmr
.timer
);
5060 /* add to the list of all iterators */
5061 SCTP_INP_INFO_WLOCK();
5062 LIST_INSERT_HEAD(&sctppcbinfo
.iteratorhead
, it
, sctp_nxt_itr
);
5063 SCTP_INP_INFO_WUNLOCK();
5065 sctp_iterator_timer(it
);
5072 * Callout/Timer routines for OS that doesn't have them
5074 #ifdef _SCTP_NEEDS_CALLOUT_
5080 callout_init(struct callout
*c
)
5082 bzero(c
, sizeof(*c
));
5086 callout_reset(struct callout
*c
, int to_ticks
, void (*ftn
)(void *), void *arg
)
5089 if (c
->c_flags
& CALLOUT_PENDING
)
5093 * We could spl down here and back up at the TAILQ_INSERT_TAIL,
5094 * but there's no point since doing this setup doesn't take much
5101 c
->c_flags
= (CALLOUT_ACTIVE
| CALLOUT_PENDING
);
5104 c
->c_time
= to_ticks
; /* just store the requested timeout */
5105 timeout(ftn
, arg
, to_ticks
);
5107 c
->c_time
= ticks
+ to_ticks
;
5108 TAILQ_INSERT_TAIL(&sctppcbinfo
.callqueue
, c
, tqe
);
5114 callout_stop(struct callout
*c
)
5118 * Don't attempt to delete a callout that's not on the queue.
5120 if (!(c
->c_flags
& CALLOUT_PENDING
)) {
5121 c
->c_flags
&= ~CALLOUT_ACTIVE
;
5125 c
->c_flags
&= ~(CALLOUT_ACTIVE
| CALLOUT_PENDING
| CALLOUT_FIRED
);
5127 /* thread_call_cancel(c->c_call); */
5128 untimeout(c
->c_func
, c
->c_arg
);
5130 TAILQ_REMOVE(&sctppcbinfo
.callqueue
, c
, tqe
);
5137 #if !defined(__APPLE__)
5141 struct callout
*c
, *n
;
5142 struct calloutlist locallist
;
5146 /* run through and subtract and mark all callouts */
5147 c
= TAILQ_FIRST(&sctppcbinfo
.callqueue
);
5149 n
= TAILQ_NEXT(c
, tqe
);
5150 if (c
->c_time
<= ticks
) {
5151 c
->c_flags
|= CALLOUT_FIRED
;
5153 TAILQ_REMOVE(&sctppcbinfo
.callqueue
, c
, tqe
);
5155 TAILQ_INIT(&locallist
);
5158 /* move off of main list */
5159 TAILQ_INSERT_TAIL(&locallist
, c
, tqe
);
5163 /* Now all the ones on the locallist must be called */
5165 c
= TAILQ_FIRST(&locallist
);
5168 TAILQ_REMOVE(&locallist
, c
, tqe
);
5169 /* now validate that it did not get canceled */
5170 if (c
->c_flags
& CALLOUT_FIRED
) {
5171 c
->c_flags
&= ~CALLOUT_PENDING
;
5173 (*c
->c_func
)(c
->c_arg
);
5176 c
= TAILQ_FIRST(&locallist
);
5182 #endif /* _SCTP_NEEDS_CALLOUT_ */