1 /* $KAME: sctp_pcb.c,v 1.37 2004/08/17 06:28:02 t-momose Exp $ */
2 /* $DragonFly: src/sys/netinet/sctp_pcb.c,v 1.14 2008/03/07 11:34:20 sephe Exp $ */
5 * Copyright (c) 2001, 2002, 2003, 2004 Cisco Systems, Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Cisco Systems, Inc.
19 * 4. Neither the name of the project nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY CISCO SYSTEMS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL CISCO SYSTEMS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #if !(defined(__OpenBSD__) || defined(__APPLE__))
36 #include "opt_ipsec.h"
38 #if defined(__FreeBSD__) || defined(__DragonFly__)
39 #include "opt_compat.h"
40 #include "opt_inet6.h"
43 #if defined(__NetBSD__)
48 #elif !defined(__OpenBSD__)
52 #include <sys/param.h>
53 #include <sys/systm.h>
54 #include <sys/malloc.h>
56 #include <sys/domain.h>
57 #include <sys/protosw.h>
58 #include <sys/socket.h>
59 #include <sys/socketvar.h>
61 #include <sys/kernel.h>
62 #include <sys/sysctl.h>
63 #include <sys/thread2.h>
64 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
65 #include <sys/random.h>
67 #if defined(__NetBSD__)
70 #if defined(__OpenBSD__)
71 #include <dev/rndvar.h>
74 #if defined(__APPLE__)
75 #include <netinet/sctp_callout.h>
76 #elif defined(__OpenBSD__)
77 #include <sys/timeout.h>
79 #include <sys/callout.h>
82 #if (defined(__FreeBSD__) && __FreeBSD_version >= 500000)
83 #include <sys/limits.h>
85 #include <machine/limits.h>
87 #include <machine/cpu.h>
90 #include <net/if_types.h>
91 #include <net/route.h>
92 #include <netinet/in.h>
93 #include <netinet/in_systm.h>
94 #include <netinet/ip.h>
95 #include <netinet/in_pcb.h>
96 #include <netinet/in_var.h>
97 #include <netinet/ip_var.h>
100 #include <netinet/ip6.h>
101 #include <netinet6/ip6_var.h>
102 #include <netinet6/scope6_var.h>
103 #if defined(__FreeBSD__) || (__NetBSD__) || defined(__DragonFly__)
104 #include <netinet6/in6_pcb.h>
105 #elif defined(__OpenBSD__)
106 #include <netinet/in_pcb.h>
112 #include <netinet6/ipsec.h>
113 #include <netproto/key/key.h>
119 #include <netinet/sctp_var.h>
120 #include <netinet/sctp_pcb.h>
121 #include <netinet/sctputil.h>
122 #include <netinet/sctp.h>
123 #include <netinet/sctp_header.h>
124 #include <netinet/sctp_asconf.h>
125 #include <netinet/sctp_output.h>
126 #include <netinet/sctp_timer.h>
128 #ifndef SCTP_PCBHASHSIZE
129 /* default number of association hash buckets in each endpoint */
130 #define SCTP_PCBHASHSIZE 256
134 u_int32_t sctp_debug_on
= 0;
135 #endif /* SCTP_DEBUG */
137 u_int32_t sctp_pegs
[SCTP_NUMBER_OF_PEGS
];
139 int sctp_pcbtblsize
= SCTP_PCBHASHSIZE
;
141 struct sctp_epinfo sctppcbinfo
;
143 /* FIX: we don't handle multiple link local scopes */
144 /* "scopeless" replacement IN6_ARE_ADDR_EQUAL */
146 SCTP6_ARE_ADDR_EQUAL(struct in6_addr
*a
, struct in6_addr
*b
)
148 struct in6_addr tmp_a
, tmp_b
;
149 /* use a copy of a and b */
152 in6_clearscope(&tmp_a
);
153 in6_clearscope(&tmp_b
);
154 return (IN6_ARE_ADDR_EQUAL(&tmp_a
, &tmp_b
));
158 extern int ipport_firstauto
;
159 extern int ipport_lastauto
;
160 extern int ipport_hifirstauto
;
161 extern int ipport_hilastauto
;
164 #if defined(__FreeBSD__) && __FreeBSD_version > 500000
167 void sctp_validate_no_locks(void);
170 SCTP_INP_RLOCK(struct sctp_inpcb
*inp
)
172 struct sctp_tcb
*stcb
;
173 LIST_FOREACH(stcb
, &inp
->sctp_asoc_list
, sctp_tcblist
) {
174 if (mtx_owned(&(stcb
)->tcb_mtx
))
175 panic("I own TCB lock?");
177 if (mtx_owned(&(inp
)->inp_mtx
))
178 panic("INP Recursive Lock-R");
179 mtx_lock(&(inp
)->inp_mtx
);
183 SCTP_INP_WLOCK(struct sctp_inpcb
*inp
)
189 SCTP_INP_INFO_RLOCK(void)
191 struct sctp_inpcb
*inp
;
192 struct sctp_tcb
*stcb
;
193 LIST_FOREACH(inp
, &sctppcbinfo
.listhead
, sctp_list
) {
194 if (mtx_owned(&(inp
)->inp_mtx
))
195 panic("info-lock and own inp lock?");
196 LIST_FOREACH(stcb
, &inp
->sctp_asoc_list
, sctp_tcblist
) {
197 if (mtx_owned(&(stcb
)->tcb_mtx
))
198 panic("Info lock and own a tcb lock?");
201 if (mtx_owned(&sctppcbinfo
.ipi_ep_mtx
))
202 panic("INP INFO Recursive Lock-R");
203 mtx_lock(&sctppcbinfo
.ipi_ep_mtx
);
207 SCTP_INP_INFO_WLOCK(void)
209 SCTP_INP_INFO_RLOCK();
213 void sctp_validate_no_locks(void)
215 struct sctp_inpcb
*inp
;
216 struct sctp_tcb
*stcb
;
218 if (mtx_owned(&sctppcbinfo
.ipi_ep_mtx
))
219 panic("INP INFO lock is owned?");
221 LIST_FOREACH(inp
, &sctppcbinfo
.listhead
, sctp_list
) {
222 if (mtx_owned(&(inp
)->inp_mtx
))
223 panic("You own an INP lock?");
224 LIST_FOREACH(stcb
, &inp
->sctp_asoc_list
, sctp_tcblist
) {
225 if (mtx_owned(&(stcb
)->tcb_mtx
))
226 panic("You own a TCB lock?");
235 sctp_fill_pcbinfo(struct sctp_pcbinfo
*spcb
)
237 /* We really don't need
238 * to lock this, but I will
239 * just because it does not hurt.
241 SCTP_INP_INFO_RLOCK();
242 spcb
->ep_count
= sctppcbinfo
.ipi_count_ep
;
243 spcb
->asoc_count
= sctppcbinfo
.ipi_count_asoc
;
244 spcb
->laddr_count
= sctppcbinfo
.ipi_count_laddr
;
245 spcb
->raddr_count
= sctppcbinfo
.ipi_count_raddr
;
246 spcb
->chk_count
= sctppcbinfo
.ipi_count_chunk
;
247 spcb
->sockq_count
= sctppcbinfo
.ipi_count_sockq
;
248 spcb
->mbuf_track
= sctppcbinfo
.mbuf_track
;
249 SCTP_INP_INFO_RUNLOCK();
254 * Notes on locks for FreeBSD 5 and up. All association
255 * lookups that have a definte ep, the INP structure is
256 * assumed to be locked for reading. If we need to go
257 * find the INP (ususally when a **inp is passed) then
258 * we must lock the INFO structure first and if needed
259 * lock the INP too. Note that if we lock it we must
265 * Given a endpoint, look and find in its association list any association
266 * with the "to" address given. This can be a "from" address, too, for
267 * inbound packets. For outbound packets it is a true "to" address.
269 static struct sctp_tcb
*
270 sctp_tcb_special_locate(struct sctp_inpcb
**inp_p
, struct sockaddr
*from
,
271 struct sockaddr
*to
, struct sctp_nets
**netp
)
273 /**** ASSUMSES THE CALLER holds the INP_INFO_RLOCK */
276 * Note for this module care must be taken when observing what to is
277 * for. In most of the rest of the code the TO field represents my
278 * peer and the FROM field represents my address. For this module it
279 * is reversed of that.
282 * If we support the TCP model, then we must now dig through to
283 * see if we can find our endpoint in the list of tcp ep's.
285 uint16_t lport
, rport
;
286 struct sctppcbhead
*ephead
;
287 struct sctp_inpcb
*inp
;
288 struct sctp_laddr
*laddr
;
289 struct sctp_tcb
*stcb
;
290 struct sctp_nets
*net
;
292 if ((to
== NULL
) || (from
== NULL
)) {
296 if (to
->sa_family
== AF_INET
&& from
->sa_family
== AF_INET
) {
297 lport
= ((struct sockaddr_in
*)to
)->sin_port
;
298 rport
= ((struct sockaddr_in
*)from
)->sin_port
;
299 } else if (to
->sa_family
== AF_INET6
&& from
->sa_family
== AF_INET6
) {
300 lport
= ((struct sockaddr_in6
*)to
)->sin6_port
;
301 rport
= ((struct sockaddr_in6
*)from
)->sin6_port
;
305 ephead
= &sctppcbinfo
.sctp_tcpephash
[SCTP_PCBHASH_ALLADDR(
306 (lport
+ rport
), sctppcbinfo
.hashtcpmark
)];
308 * Ok now for each of the guys in this bucket we must look
310 * - Does the remote port match.
311 * - Does there single association's addresses match this
313 * If so we update p_ep to point to this ep and return the
316 LIST_FOREACH(inp
, ephead
, sctp_hash
) {
317 if (lport
!= inp
->sctp_lport
) {
321 /* check to see if the ep has one of the addresses */
322 if ((inp
->sctp_flags
& SCTP_PCB_FLAGS_BOUNDALL
) == 0) {
323 /* We are NOT bound all, so look further */
326 LIST_FOREACH(laddr
, &inp
->sctp_addr_list
, sctp_nxt_addr
) {
327 if (laddr
->ifa
== NULL
) {
329 if (sctp_debug_on
& SCTP_DEBUG_PCB1
) {
330 kprintf("An ounce of prevention is worth a pound of cure\n");
335 if (laddr
->ifa
->ifa_addr
== NULL
) {
337 if (sctp_debug_on
& SCTP_DEBUG_PCB1
) {
338 kprintf("ifa with a NULL address\n");
343 if (laddr
->ifa
->ifa_addr
->sa_family
==
345 /* see if it matches */
346 struct sockaddr_in
*intf_addr
, *sin
;
347 intf_addr
= (struct sockaddr_in
*)
348 laddr
->ifa
->ifa_addr
;
349 sin
= (struct sockaddr_in
*)to
;
350 if (from
->sa_family
== AF_INET
) {
351 if (sin
->sin_addr
.s_addr
==
352 intf_addr
->sin_addr
.s_addr
) {
354 SCTP_INP_RUNLOCK(inp
);
358 struct sockaddr_in6
*intf_addr6
;
359 struct sockaddr_in6
*sin6
;
360 sin6
= (struct sockaddr_in6
*)
362 intf_addr6
= (struct sockaddr_in6
*)
363 laddr
->ifa
->ifa_addr
;
365 if (SCTP6_ARE_ADDR_EQUAL(&sin6
->sin6_addr
,
366 &intf_addr6
->sin6_addr
)) {
368 SCTP_INP_RUNLOCK(inp
);
375 /* This endpoint does not have this address */
376 SCTP_INP_RUNLOCK(inp
);
381 * Ok if we hit here the ep has the address, does it hold the
385 stcb
= LIST_FIRST(&inp
->sctp_asoc_list
);
387 SCTP_INP_RUNLOCK(inp
);
391 if (stcb
->rport
!= rport
) {
392 /* remote port does not match. */
393 SCTP_TCB_UNLOCK(stcb
);
394 SCTP_INP_RUNLOCK(inp
);
397 /* Does this TCB have a matching address? */
398 TAILQ_FOREACH(net
, &stcb
->asoc
.nets
, sctp_next
) {
399 if (net
->ro
._l_addr
.sa
.sa_family
!= from
->sa_family
) {
400 /* not the same family, can't be a match */
403 if (from
->sa_family
== AF_INET
) {
404 struct sockaddr_in
*sin
, *rsin
;
405 sin
= (struct sockaddr_in
*)&net
->ro
._l_addr
;
406 rsin
= (struct sockaddr_in
*)from
;
407 if (sin
->sin_addr
.s_addr
==
408 rsin
->sin_addr
.s_addr
) {
413 /* Update the endpoint pointer */
415 SCTP_INP_RUNLOCK(inp
);
419 struct sockaddr_in6
*sin6
, *rsin6
;
420 sin6
= (struct sockaddr_in6
*)&net
->ro
._l_addr
;
421 rsin6
= (struct sockaddr_in6
*)from
;
422 if (SCTP6_ARE_ADDR_EQUAL(&sin6
->sin6_addr
,
423 &rsin6
->sin6_addr
)) {
428 /* Update the endpoint pointer */
430 SCTP_INP_RUNLOCK(inp
);
435 SCTP_TCB_UNLOCK(stcb
);
437 SCTP_INP_RUNLOCK(inp
);
443 sctp_findassociation_ep_asconf(struct mbuf
*m
, int iphlen
, int offset
,
444 struct sctphdr
*sh
, struct sctp_inpcb
**inp_p
, struct sctp_nets
**netp
)
446 struct sctp_tcb
*stcb
;
447 struct sockaddr_in
*sin
;
448 struct sockaddr_in6
*sin6
;
449 struct sockaddr_storage local_store
, remote_store
;
451 struct sctp_paramhdr parm_buf
, *phdr
;
454 memset(&local_store
, 0, sizeof(local_store
));
455 memset(&remote_store
, 0, sizeof(remote_store
));
457 /* First get the destination address setup too. */
458 iph
= mtod(m
, struct ip
*);
459 if (iph
->ip_v
== IPVERSION
) {
461 sin
= (struct sockaddr_in
*)&local_store
;
462 sin
->sin_family
= AF_INET
;
463 sin
->sin_len
= sizeof(*sin
);
464 sin
->sin_port
= sh
->dest_port
;
465 sin
->sin_addr
.s_addr
= iph
->ip_dst
.s_addr
;
466 } else if (iph
->ip_v
== (IPV6_VERSION
>> 4)) {
469 ip6
= mtod(m
, struct ip6_hdr
*);
470 sin6
= (struct sockaddr_in6
*)&local_store
;
471 sin6
->sin6_family
= AF_INET6
;
472 sin6
->sin6_len
= sizeof(*sin6
);
473 sin6
->sin6_port
= sh
->dest_port
;
474 sin6
->sin6_addr
= ip6
->ip6_dst
;
479 phdr
= sctp_get_next_param(m
, offset
+ sizeof(struct sctp_asconf_chunk
),
480 &parm_buf
, sizeof(struct sctp_paramhdr
));
483 if (sctp_debug_on
& SCTP_DEBUG_INPUT3
) {
484 kprintf("sctp_process_control: failed to get asconf lookup addr\n");
486 #endif /* SCTP_DEBUG */
489 ptype
= (int)((u_int
)ntohs(phdr
->param_type
));
490 /* get the correlation address */
491 if (ptype
== SCTP_IPV6_ADDRESS
) {
492 /* ipv6 address param */
493 struct sctp_ipv6addr_param
*p6
, p6_buf
;
494 if (ntohs(phdr
->param_length
) != sizeof(struct sctp_ipv6addr_param
)) {
498 p6
= (struct sctp_ipv6addr_param
*)sctp_get_next_param(m
,
499 offset
+ sizeof(struct sctp_asconf_chunk
),
500 &p6_buf
.ph
, sizeof(*p6
));
503 if (sctp_debug_on
& SCTP_DEBUG_INPUT3
) {
504 kprintf("sctp_process_control: failed to get asconf v6 lookup addr\n");
506 #endif /* SCTP_DEBUG */
509 sin6
= (struct sockaddr_in6
*)&remote_store
;
510 sin6
->sin6_family
= AF_INET6
;
511 sin6
->sin6_len
= sizeof(*sin6
);
512 sin6
->sin6_port
= sh
->src_port
;
513 memcpy(&sin6
->sin6_addr
, &p6
->addr
, sizeof(struct in6_addr
));
514 } else if (ptype
== SCTP_IPV4_ADDRESS
) {
515 /* ipv4 address param */
516 struct sctp_ipv4addr_param
*p4
, p4_buf
;
517 if (ntohs(phdr
->param_length
) != sizeof(struct sctp_ipv4addr_param
)) {
521 p4
= (struct sctp_ipv4addr_param
*)sctp_get_next_param(m
,
522 offset
+ sizeof(struct sctp_asconf_chunk
),
523 &p4_buf
.ph
, sizeof(*p4
));
526 if (sctp_debug_on
& SCTP_DEBUG_INPUT3
) {
527 kprintf("sctp_process_control: failed to get asconf v4 lookup addr\n");
529 #endif /* SCTP_DEBUG */
532 sin
= (struct sockaddr_in
*)&remote_store
;
533 sin
->sin_family
= AF_INET
;
534 sin
->sin_len
= sizeof(*sin
);
535 sin
->sin_port
= sh
->src_port
;
536 memcpy(&sin
->sin_addr
, &p4
->addr
, sizeof(struct in_addr
));
538 /* invalid address param type */
542 stcb
= sctp_findassociation_ep_addr(inp_p
,
543 (struct sockaddr
*)&remote_store
, netp
,
544 (struct sockaddr
*)&local_store
, NULL
);
549 sctp_findassociation_ep_addr(struct sctp_inpcb
**inp_p
, struct sockaddr
*remote
,
550 struct sctp_nets
**netp
, struct sockaddr
*local
, struct sctp_tcb
*locked_tcb
)
552 struct sctpasochead
*head
;
553 struct sctp_inpcb
*inp
;
554 struct sctp_tcb
*stcb
;
555 struct sctp_nets
*net
;
559 if (remote
->sa_family
== AF_INET
) {
560 rport
= (((struct sockaddr_in
*)remote
)->sin_port
);
561 } else if (remote
->sa_family
== AF_INET6
) {
562 rport
= (((struct sockaddr_in6
*)remote
)->sin6_port
);
567 /* UN-lock so we can do proper locking here
568 * this occurs when called from load_addresses_from_init.
570 SCTP_TCB_UNLOCK(locked_tcb
);
572 SCTP_INP_INFO_RLOCK();
573 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_TCPTYPE
) {
575 * Now either this guy is our listner or it's the connector.
576 * If it is the one that issued the connect, then it's only
577 * chance is to be the first TCB in the list. If it is the
578 * acceptor, then do the special_lookup to hash and find the
581 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_ACCEPTING
) {
582 /* to is peer addr, from is my addr */
583 stcb
= sctp_tcb_special_locate(inp_p
, remote
, local
,
585 if ((stcb
!= NULL
) && (locked_tcb
== NULL
)){
586 /* we have a locked tcb, lower refcount */
588 SCTP_INP_DECR_REF(inp
);
589 SCTP_INP_WUNLOCK(inp
);
591 if (locked_tcb
!= NULL
) {
592 SCTP_INP_RLOCK(locked_tcb
->sctp_ep
);
593 SCTP_TCB_LOCK(locked_tcb
);
594 SCTP_INP_RUNLOCK(locked_tcb
->sctp_ep
);
596 SCTP_TCB_UNLOCK(stcb
);
598 SCTP_INP_INFO_RUNLOCK();
602 stcb
= LIST_FIRST(&inp
->sctp_asoc_list
);
607 if (stcb
->rport
!= rport
) {
608 /* remote port does not match. */
609 SCTP_TCB_UNLOCK(stcb
);
612 /* now look at the list of remote addresses */
613 TAILQ_FOREACH(net
, &stcb
->asoc
.nets
, sctp_next
) {
614 if (net
->ro
._l_addr
.sa
.sa_family
!=
616 /* not the same family */
619 if (remote
->sa_family
== AF_INET
) {
620 struct sockaddr_in
*sin
, *rsin
;
621 sin
= (struct sockaddr_in
*)
623 rsin
= (struct sockaddr_in
*)remote
;
624 if (sin
->sin_addr
.s_addr
==
625 rsin
->sin_addr
.s_addr
) {
630 if (locked_tcb
== NULL
) {
631 SCTP_INP_DECR_REF(inp
);
633 SCTP_INP_WUNLOCK(inp
);
634 SCTP_INP_INFO_RUNLOCK();
637 } else if (remote
->sa_family
== AF_INET6
) {
638 struct sockaddr_in6
*sin6
, *rsin6
;
639 sin6
= (struct sockaddr_in6
*)&net
->ro
._l_addr
;
640 rsin6
= (struct sockaddr_in6
*)remote
;
641 if (SCTP6_ARE_ADDR_EQUAL(&sin6
->sin6_addr
,
642 &rsin6
->sin6_addr
)) {
647 if (locked_tcb
== NULL
) {
648 SCTP_INP_DECR_REF(inp
);
650 SCTP_INP_WUNLOCK(inp
);
651 SCTP_INP_INFO_RUNLOCK();
656 SCTP_TCB_UNLOCK(stcb
);
660 head
= &inp
->sctp_tcbhash
[SCTP_PCBHASH_ALLADDR(rport
,
661 inp
->sctp_hashmark
)];
665 LIST_FOREACH(stcb
, head
, sctp_tcbhash
) {
666 if (stcb
->rport
!= rport
) {
667 /* remote port does not match */
670 /* now look at the list of remote addresses */
672 TAILQ_FOREACH(net
, &stcb
->asoc
.nets
, sctp_next
) {
673 if (net
->ro
._l_addr
.sa
.sa_family
!=
675 /* not the same family */
678 if (remote
->sa_family
== AF_INET
) {
679 struct sockaddr_in
*sin
, *rsin
;
680 sin
= (struct sockaddr_in
*)
682 rsin
= (struct sockaddr_in
*)remote
;
683 if (sin
->sin_addr
.s_addr
==
684 rsin
->sin_addr
.s_addr
) {
689 if (locked_tcb
== NULL
) {
690 SCTP_INP_DECR_REF(inp
);
692 SCTP_INP_WUNLOCK(inp
);
693 SCTP_INP_INFO_RUNLOCK();
696 } else if (remote
->sa_family
== AF_INET6
) {
697 struct sockaddr_in6
*sin6
, *rsin6
;
698 sin6
= (struct sockaddr_in6
*)
700 rsin6
= (struct sockaddr_in6
*)remote
;
701 if (SCTP6_ARE_ADDR_EQUAL(&sin6
->sin6_addr
,
702 &rsin6
->sin6_addr
)) {
707 if (locked_tcb
== NULL
) {
708 SCTP_INP_DECR_REF(inp
);
710 SCTP_INP_WUNLOCK(inp
);
711 SCTP_INP_INFO_RUNLOCK();
716 SCTP_TCB_UNLOCK(stcb
);
720 /* clean up for returning null */
722 if (locked_tcb
->sctp_ep
!= inp
) {
723 SCTP_INP_RLOCK(locked_tcb
->sctp_ep
);
724 SCTP_TCB_LOCK(locked_tcb
);
725 SCTP_INP_RUNLOCK(locked_tcb
->sctp_ep
);
727 SCTP_TCB_LOCK(locked_tcb
);
729 SCTP_INP_WUNLOCK(inp
);
730 SCTP_INP_INFO_RUNLOCK();
736 * Find an association for a specific endpoint using the association id
737 * given out in the COMM_UP notification
740 sctp_findassociation_ep_asocid(struct sctp_inpcb
*inp
, caddr_t asoc_id
)
743 * Use my the assoc_id to find a endpoint
745 struct sctpasochead
*head
;
746 struct sctp_tcb
*stcb
;
749 if (asoc_id
== 0 || inp
== NULL
) {
752 SCTP_INP_INFO_RLOCK();
753 vtag
= (u_int32_t
)asoc_id
;
754 head
= &sctppcbinfo
.sctp_asochash
[SCTP_PCBHASH_ASOC(vtag
,
755 sctppcbinfo
.hashasocmark
)];
758 SCTP_INP_INFO_RUNLOCK();
761 LIST_FOREACH(stcb
, head
, sctp_asocs
) {
762 SCTP_INP_RLOCK(stcb
->sctp_ep
);
764 SCTP_INP_RUNLOCK(stcb
->sctp_ep
);
765 if (stcb
->asoc
.my_vtag
== vtag
) {
767 if (inp
!= stcb
->sctp_ep
) {
768 /* some other guy has the
769 * same vtag active (vtag collision).
771 sctp_pegs
[SCTP_VTAG_BOGUS
]++;
772 SCTP_TCB_UNLOCK(stcb
);
775 sctp_pegs
[SCTP_VTAG_EXPR
]++;
776 SCTP_INP_INFO_RUNLOCK();
779 SCTP_TCB_UNLOCK(stcb
);
781 SCTP_INP_INFO_RUNLOCK();
785 static struct sctp_inpcb
*
786 sctp_endpoint_probe(struct sockaddr
*nam
, struct sctppcbhead
*head
,
789 struct sctp_inpcb
*inp
;
790 struct sockaddr_in
*sin
;
791 struct sockaddr_in6
*sin6
;
792 struct sctp_laddr
*laddr
;
794 /* Endpoing probe expects
795 * that the INP_INFO is locked.
797 if (nam
->sa_family
== AF_INET
) {
798 sin
= (struct sockaddr_in
*)nam
;
800 } else if (nam
->sa_family
== AF_INET6
) {
801 sin6
= (struct sockaddr_in6
*)nam
;
804 /* unsupported family */
810 LIST_FOREACH(inp
, head
, sctp_hash
) {
813 if ((inp
->sctp_flags
& SCTP_PCB_FLAGS_BOUNDALL
) &&
814 (inp
->sctp_lport
== lport
)) {
816 if ((nam
->sa_family
== AF_INET
) &&
817 (inp
->sctp_flags
& SCTP_PCB_FLAGS_BOUND_V6
) &&
818 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
819 (((struct inpcb
*)inp
)->inp_flags
& IN6P_IPV6_V6ONLY
)
821 #if defined(__OpenBSD__)
822 (0) /* For open bsd we do dual bind only */
824 (((struct in6pcb
*)inp
)->in6p_flags
& IN6P_IPV6_V6ONLY
)
828 /* IPv4 on a IPv6 socket with ONLY IPv6 set */
829 SCTP_INP_RUNLOCK(inp
);
832 /* A V6 address and the endpoint is NOT bound V6 */
833 if (nam
->sa_family
== AF_INET6
&&
834 (inp
->sctp_flags
& SCTP_PCB_FLAGS_BOUND_V6
) == 0) {
835 SCTP_INP_RUNLOCK(inp
);
838 SCTP_INP_RUNLOCK(inp
);
841 SCTP_INP_RUNLOCK(inp
);
844 if ((nam
->sa_family
== AF_INET
) &&
845 (sin
->sin_addr
.s_addr
== INADDR_ANY
)) {
846 /* Can't hunt for one that has no address specified */
848 } else if ((nam
->sa_family
== AF_INET6
) &&
849 (IN6_IS_ADDR_UNSPECIFIED(&sin6
->sin6_addr
))) {
850 /* Can't hunt for one that has no address specified */
854 * ok, not bound to all so see if we can find a EP bound to this
858 if (sctp_debug_on
& SCTP_DEBUG_PCB1
) {
859 kprintf("Ok, there is NO bound-all available for port:%x\n", ntohs(lport
));
862 LIST_FOREACH(inp
, head
, sctp_hash
) {
864 if ((inp
->sctp_flags
& SCTP_PCB_FLAGS_BOUNDALL
)) {
865 SCTP_INP_RUNLOCK(inp
);
869 * Ok this could be a likely candidate, look at all of
872 if (inp
->sctp_lport
!= lport
) {
873 SCTP_INP_RUNLOCK(inp
);
877 if (sctp_debug_on
& SCTP_DEBUG_PCB1
) {
878 kprintf("Ok, found maching local port\n");
881 LIST_FOREACH(laddr
, &inp
->sctp_addr_list
, sctp_nxt_addr
) {
882 if (laddr
->ifa
== NULL
) {
884 if (sctp_debug_on
& SCTP_DEBUG_PCB1
) {
885 kprintf("An ounce of prevention is worth a pound of cure\n");
891 if (sctp_debug_on
& SCTP_DEBUG_PCB1
) {
892 kprintf("Ok laddr->ifa:%p is possible, ",
896 if (laddr
->ifa
->ifa_addr
== NULL
) {
898 if (sctp_debug_on
& SCTP_DEBUG_PCB1
) {
899 kprintf("Huh IFA as an ifa_addr=NULL, ");
905 if (sctp_debug_on
& SCTP_DEBUG_PCB1
) {
906 kprintf("Ok laddr->ifa:%p is possible, ",
907 laddr
->ifa
->ifa_addr
);
908 sctp_print_address(laddr
->ifa
->ifa_addr
);
909 kprintf("looking for ");
910 sctp_print_address(nam
);
913 if (laddr
->ifa
->ifa_addr
->sa_family
== nam
->sa_family
) {
914 /* possible, see if it matches */
915 struct sockaddr_in
*intf_addr
;
916 intf_addr
= (struct sockaddr_in
*)
917 laddr
->ifa
->ifa_addr
;
918 if (nam
->sa_family
== AF_INET
) {
919 if (sin
->sin_addr
.s_addr
==
920 intf_addr
->sin_addr
.s_addr
) {
922 if (sctp_debug_on
& SCTP_DEBUG_PCB1
) {
923 kprintf("YES, return ep:%p\n", inp
);
926 SCTP_INP_RUNLOCK(inp
);
929 } else if (nam
->sa_family
== AF_INET6
) {
930 struct sockaddr_in6
*intf_addr6
;
931 intf_addr6
= (struct sockaddr_in6
*)
932 laddr
->ifa
->ifa_addr
;
933 if (SCTP6_ARE_ADDR_EQUAL(&sin6
->sin6_addr
,
934 &intf_addr6
->sin6_addr
)) {
936 if (sctp_debug_on
& SCTP_DEBUG_PCB1
) {
937 kprintf("YES, return ep:%p\n", inp
);
940 SCTP_INP_RUNLOCK(inp
);
945 SCTP_INP_RUNLOCK(inp
);
949 if (sctp_debug_on
& SCTP_DEBUG_PCB1
) {
950 kprintf("NO, Falls out to NULL\n");
958 sctp_pcb_findep(struct sockaddr
*nam
, int find_tcp_pool
, int have_lock
)
961 * First we check the hash table to see if someone has this port
962 * bound with just the port.
964 struct sctp_inpcb
*inp
;
965 struct sctppcbhead
*head
;
966 struct sockaddr_in
*sin
;
967 struct sockaddr_in6
*sin6
;
970 if (sctp_debug_on
& SCTP_DEBUG_PCB1
) {
971 kprintf("Looking for endpoint %d :",
972 ntohs(((struct sockaddr_in
*)nam
)->sin_port
));
973 sctp_print_address(nam
);
976 if (nam
->sa_family
== AF_INET
) {
977 sin
= (struct sockaddr_in
*)nam
;
978 lport
= ((struct sockaddr_in
*)nam
)->sin_port
;
979 } else if (nam
->sa_family
== AF_INET6
) {
980 sin6
= (struct sockaddr_in6
*)nam
;
981 lport
= ((struct sockaddr_in6
*)nam
)->sin6_port
;
983 /* unsupported family */
987 * I could cheat here and just cast to one of the types but we will
988 * do it right. It also provides the check against an Unsupported
991 /* Find the head of the ALLADDR chain */
993 SCTP_INP_INFO_RLOCK();
994 head
= &sctppcbinfo
.sctp_ephash
[SCTP_PCBHASH_ALLADDR(lport
,
995 sctppcbinfo
.hashmark
)];
997 if (sctp_debug_on
& SCTP_DEBUG_PCB1
) {
998 kprintf("Main hash to lookup at head:%p\n", head
);
1001 inp
= sctp_endpoint_probe(nam
, head
, lport
);
1004 * If the TCP model exists it could be that the main listening
1005 * endpoint is gone but there exists a connected socket for this
1006 * guy yet. If so we can return the first one that we find. This
1007 * may NOT be the correct one but the sctp_findassociation_ep_addr
1008 * has further code to look at all TCP models.
1010 if (inp
== NULL
&& find_tcp_pool
) {
1013 if (sctp_debug_on
& SCTP_DEBUG_PCB1
) {
1014 kprintf("EP was NULL and TCP model is supported\n");
1017 for (i
= 0; i
< sctppcbinfo
.hashtblsize
; i
++) {
1019 * This is real gross, but we do NOT have a remote
1020 * port at this point depending on who is calling. We
1021 * must therefore look for ANY one that matches our
1024 head
= &sctppcbinfo
.sctp_tcpephash
[i
];
1025 if (LIST_FIRST(head
)) {
1026 inp
= sctp_endpoint_probe(nam
, head
, lport
);
1035 if (sctp_debug_on
& SCTP_DEBUG_PCB1
) {
1036 kprintf("EP to return is %p\n", inp
);
1039 if (have_lock
== 0) {
1041 SCTP_INP_WLOCK(inp
);
1042 SCTP_INP_INCR_REF(inp
);
1043 SCTP_INP_WUNLOCK(inp
);
1045 SCTP_INP_INFO_RUNLOCK();
1048 SCTP_INP_WLOCK(inp
);
1049 SCTP_INP_INCR_REF(inp
);
1050 SCTP_INP_WUNLOCK(inp
);
1057 * Find an association for an endpoint with the pointer to whom you want
1058 * to send to and the endpoint pointer. The address can be IPv4 or IPv6.
1059 * We may need to change the *to to some other struct like a mbuf...
1062 sctp_findassociation_addr_sa(struct sockaddr
*to
, struct sockaddr
*from
,
1063 struct sctp_inpcb
**inp_p
, struct sctp_nets
**netp
, int find_tcp_pool
)
1065 struct sctp_inpcb
*inp
;
1066 struct sctp_tcb
*retval
;
1068 SCTP_INP_INFO_RLOCK();
1069 if (find_tcp_pool
) {
1070 if (inp_p
!= NULL
) {
1071 retval
= sctp_tcb_special_locate(inp_p
, from
, to
, netp
);
1073 retval
= sctp_tcb_special_locate(&inp
, from
, to
, netp
);
1075 if (retval
!= NULL
) {
1076 SCTP_INP_INFO_RUNLOCK();
1080 inp
= sctp_pcb_findep(to
, 0, 1);
1081 if (inp_p
!= NULL
) {
1084 SCTP_INP_INFO_RUNLOCK();
1091 * ok, we have an endpoint, now lets find the assoc for it (if any)
1092 * we now place the source address or from in the to of the find
1093 * endpoint call. Since in reality this chain is used from the
1094 * inbound packet side.
1096 if (inp_p
!= NULL
) {
1097 return (sctp_findassociation_ep_addr(inp_p
, from
, netp
, to
, NULL
));
1099 return (sctp_findassociation_ep_addr(&inp
, from
, netp
, to
, NULL
));
1105 * This routine will grub through the mbuf that is a INIT or INIT-ACK and
1106 * find all addresses that the sender has specified in any address list.
1107 * Each address will be used to lookup the TCB and see if one exits.
1109 static struct sctp_tcb
*
1110 sctp_findassociation_special_addr(struct mbuf
*m
, int iphlen
, int offset
,
1111 struct sctphdr
*sh
, struct sctp_inpcb
**inp_p
, struct sctp_nets
**netp
,
1112 struct sockaddr
*dest
)
1114 struct sockaddr_in sin4
;
1115 struct sockaddr_in6 sin6
;
1116 struct sctp_paramhdr
*phdr
, parm_buf
;
1117 struct sctp_tcb
*retval
;
1118 u_int32_t ptype
, plen
;
1120 memset(&sin4
, 0, sizeof(sin4
));
1121 memset(&sin6
, 0, sizeof(sin6
));
1122 sin4
.sin_len
= sizeof(sin4
);
1123 sin4
.sin_family
= AF_INET
;
1124 sin4
.sin_port
= sh
->src_port
;
1125 sin6
.sin6_len
= sizeof(sin6
);
1126 sin6
.sin6_family
= AF_INET6
;
1127 sin6
.sin6_port
= sh
->src_port
;
1130 offset
+= sizeof(struct sctp_init_chunk
);
1132 phdr
= sctp_get_next_param(m
, offset
, &parm_buf
, sizeof(parm_buf
));
1133 while (phdr
!= NULL
) {
1134 /* now we must see if we want the parameter */
1135 ptype
= ntohs(phdr
->param_type
);
1136 plen
= ntohs(phdr
->param_length
);
1139 if (sctp_debug_on
& SCTP_DEBUG_PCB1
) {
1140 kprintf("sctp_findassociation_special_addr: Impossible length in parameter\n");
1142 #endif /* SCTP_DEBUG */
1145 if (ptype
== SCTP_IPV4_ADDRESS
&&
1146 plen
== sizeof(struct sctp_ipv4addr_param
)) {
1147 /* Get the rest of the address */
1148 struct sctp_ipv4addr_param ip4_parm
, *p4
;
1150 phdr
= sctp_get_next_param(m
, offset
,
1151 (struct sctp_paramhdr
*)&ip4_parm
, plen
);
1155 p4
= (struct sctp_ipv4addr_param
*)phdr
;
1156 memcpy(&sin4
.sin_addr
, &p4
->addr
, sizeof(p4
->addr
));
1158 retval
= sctp_findassociation_ep_addr(inp_p
,
1159 (struct sockaddr
*)&sin4
, netp
, dest
, NULL
);
1160 if (retval
!= NULL
) {
1163 } else if (ptype
== SCTP_IPV6_ADDRESS
&&
1164 plen
== sizeof(struct sctp_ipv6addr_param
)) {
1165 /* Get the rest of the address */
1166 struct sctp_ipv6addr_param ip6_parm
, *p6
;
1168 phdr
= sctp_get_next_param(m
, offset
,
1169 (struct sctp_paramhdr
*)&ip6_parm
, plen
);
1173 p6
= (struct sctp_ipv6addr_param
*)phdr
;
1174 memcpy(&sin6
.sin6_addr
, &p6
->addr
, sizeof(p6
->addr
));
1176 retval
= sctp_findassociation_ep_addr(inp_p
,
1177 (struct sockaddr
*)&sin6
, netp
, dest
, NULL
);
1178 if (retval
!= NULL
) {
1182 offset
+= SCTP_SIZE32(plen
);
1183 phdr
= sctp_get_next_param(m
, offset
, &parm_buf
,
1189 static struct sctp_tcb
*
1190 sctp_findassoc_by_vtag(struct sockaddr
*from
, uint32_t vtag
,
1191 struct sctp_inpcb
**inp_p
, struct sctp_nets
**netp
, uint16_t rport
,
1195 * Use my vtag to hash. If we find it we then verify the source addr
1196 * is in the assoc. If all goes well we save a bit on rec of a packet.
1198 struct sctpasochead
*head
;
1199 struct sctp_nets
*net
;
1200 struct sctp_tcb
*stcb
;
1202 SCTP_INP_INFO_RLOCK();
1203 head
= &sctppcbinfo
.sctp_asochash
[SCTP_PCBHASH_ASOC(vtag
,
1204 sctppcbinfo
.hashasocmark
)];
1207 SCTP_INP_INFO_RUNLOCK();
1210 LIST_FOREACH(stcb
, head
, sctp_asocs
) {
1211 SCTP_INP_RLOCK(stcb
->sctp_ep
);
1212 SCTP_TCB_LOCK(stcb
);
1213 SCTP_INP_RUNLOCK(stcb
->sctp_ep
);
1214 if (stcb
->asoc
.my_vtag
== vtag
) {
1216 if (stcb
->rport
!= rport
) {
1218 * we could remove this if vtags are unique
1219 * across the system.
1221 SCTP_TCB_UNLOCK(stcb
);
1224 if (stcb
->sctp_ep
->sctp_lport
!= lport
) {
1226 * we could remove this if vtags are unique
1227 * across the system.
1229 SCTP_TCB_UNLOCK(stcb
);
1232 net
= sctp_findnet(stcb
, from
);
1236 sctp_pegs
[SCTP_VTAG_EXPR
]++;
1237 *inp_p
= stcb
->sctp_ep
;
1238 SCTP_INP_INFO_RUNLOCK();
1241 /* not him, this should only
1242 * happen in rare cases so
1245 sctp_pegs
[SCTP_VTAG_BOGUS
]++;
1248 SCTP_TCB_UNLOCK(stcb
);
1250 SCTP_INP_INFO_RUNLOCK();
1255 * Find an association with the pointer to the inbound IP packet. This
1256 * can be a IPv4 or IPv6 packet.
1259 sctp_findassociation_addr(struct mbuf
*m
, int iphlen
, int offset
,
1260 struct sctphdr
*sh
, struct sctp_chunkhdr
*ch
,
1261 struct sctp_inpcb
**inp_p
, struct sctp_nets
**netp
)
1265 struct sctp_tcb
*retval
;
1266 struct sockaddr_storage to_store
, from_store
;
1267 struct sockaddr
*to
= (struct sockaddr
*)&to_store
;
1268 struct sockaddr
*from
= (struct sockaddr
*)&from_store
;
1269 struct sctp_inpcb
*inp
;
1272 iph
= mtod(m
, struct ip
*);
1273 if (iph
->ip_v
== IPVERSION
) {
1275 struct sockaddr_in
*to4
, *from4
;
1277 to4
= (struct sockaddr_in
*)&to_store
;
1278 from4
= (struct sockaddr_in
*)&from_store
;
1279 bzero(to4
, sizeof(*to4
));
1280 bzero(from4
, sizeof(*from4
));
1281 from4
->sin_family
= to4
->sin_family
= AF_INET
;
1282 from4
->sin_len
= to4
->sin_len
= sizeof(struct sockaddr_in
);
1283 from4
->sin_addr
.s_addr
= iph
->ip_src
.s_addr
;
1284 to4
->sin_addr
.s_addr
= iph
->ip_dst
.s_addr
;
1285 from4
->sin_port
= sh
->src_port
;
1286 to4
->sin_port
= sh
->dest_port
;
1287 } else if (iph
->ip_v
== (IPV6_VERSION
>> 4)) {
1289 struct ip6_hdr
*ip6
;
1290 struct sockaddr_in6
*to6
, *from6
;
1292 ip6
= mtod(m
, struct ip6_hdr
*);
1293 to6
= (struct sockaddr_in6
*)&to_store
;
1294 from6
= (struct sockaddr_in6
*)&from_store
;
1295 bzero(to6
, sizeof(*to6
));
1296 bzero(from6
, sizeof(*from6
));
1297 from6
->sin6_family
= to6
->sin6_family
= AF_INET6
;
1298 from6
->sin6_len
= to6
->sin6_len
= sizeof(struct sockaddr_in6
);
1299 to6
->sin6_addr
= ip6
->ip6_dst
;
1300 from6
->sin6_addr
= ip6
->ip6_src
;
1301 from6
->sin6_port
= sh
->src_port
;
1302 to6
->sin6_port
= sh
->dest_port
;
1303 /* Get the scopes in properly to the sin6 addr's */
1304 in6_recoverscope(to6
, &to6
->sin6_addr
, NULL
);
1305 #if defined(SCTP_BASE_FREEBSD) || defined(__APPLE__) || defined(__DragonFly__)
1306 in6_embedscope(&to6
->sin6_addr
, to6
, NULL
, NULL
);
1308 in6_embedscope(&to6
->sin6_addr
, to6
);
1311 in6_recoverscope(from6
, &from6
->sin6_addr
, NULL
);
1312 #if defined(SCTP_BASE_FREEBSD) || defined(__APPLE__) || defined(__DragonFly__)
1313 in6_embedscope(&from6
->sin6_addr
, from6
, NULL
, NULL
);
1315 in6_embedscope(&from6
->sin6_addr
, from6
);
1318 /* Currently not supported. */
1322 if (sctp_debug_on
& SCTP_DEBUG_PCB1
) {
1323 kprintf("Looking for port %d address :",
1324 ntohs(((struct sockaddr_in
*)to
)->sin_port
));
1325 sctp_print_address(to
);
1326 kprintf("From for port %d address :",
1327 ntohs(((struct sockaddr_in
*)from
)->sin_port
));
1328 sctp_print_address(from
);
1333 /* we only go down this path if vtag is non-zero */
1334 retval
= sctp_findassoc_by_vtag(from
, ntohl(sh
->v_tag
),
1335 inp_p
, netp
, sh
->src_port
, sh
->dest_port
);
1341 if ((ch
->chunk_type
!= SCTP_INITIATION
) &&
1342 (ch
->chunk_type
!= SCTP_INITIATION_ACK
) &&
1343 (ch
->chunk_type
!= SCTP_COOKIE_ACK
) &&
1344 (ch
->chunk_type
!= SCTP_COOKIE_ECHO
)) {
1345 /* Other chunk types go to the tcp pool. */
1349 retval
= sctp_findassociation_addr_sa(to
, from
, inp_p
, netp
,
1353 retval
= sctp_findassociation_addr_sa(to
, from
, &inp
, netp
,
1357 if (sctp_debug_on
& SCTP_DEBUG_PCB1
) {
1358 kprintf("retval:%p inp:%p\n", retval
, inp
);
1361 if (retval
== NULL
&& inp
) {
1362 /* Found a EP but not this address */
1364 if (sctp_debug_on
& SCTP_DEBUG_PCB1
) {
1365 kprintf("Found endpoint %p but no asoc - ep state:%x\n",
1366 inp
, inp
->sctp_flags
);
1369 if ((ch
->chunk_type
== SCTP_INITIATION
) ||
1370 (ch
->chunk_type
== SCTP_INITIATION_ACK
)) {
1372 * special hook, we do NOT return linp or an
1373 * association that is linked to an existing
1374 * association that is under the TCP pool (i.e. no
1375 * listener exists). The endpoint finding routine
1376 * will always find a listner before examining the
1379 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_IN_TCPPOOL
) {
1381 if (sctp_debug_on
& SCTP_DEBUG_PCB1
) {
1382 kprintf("Gak, its in the TCP pool... return NULL");
1391 if (sctp_debug_on
& SCTP_DEBUG_PCB1
) {
1392 kprintf("Now doing SPECIAL find\n");
1395 retval
= sctp_findassociation_special_addr(m
, iphlen
,
1396 offset
, sh
, inp_p
, netp
, to
);
1400 if (sctp_debug_on
& SCTP_DEBUG_PCB1
) {
1401 kprintf("retval is %p\n", retval
);
1407 extern int sctp_max_burst_default
;
1409 extern unsigned int sctp_delayed_sack_time_default
;
1410 extern unsigned int sctp_heartbeat_interval_default
;
1411 extern unsigned int sctp_pmtu_raise_time_default
;
1412 extern unsigned int sctp_shutdown_guard_time_default
;
1413 extern unsigned int sctp_secret_lifetime_default
;
1415 extern unsigned int sctp_rto_max_default
;
1416 extern unsigned int sctp_rto_min_default
;
1417 extern unsigned int sctp_rto_initial_default
;
1418 extern unsigned int sctp_init_rto_max_default
;
1419 extern unsigned int sctp_valid_cookie_life_default
;
1420 extern unsigned int sctp_init_rtx_max_default
;
1421 extern unsigned int sctp_assoc_rtx_max_default
;
1422 extern unsigned int sctp_path_rtx_max_default
;
1423 extern unsigned int sctp_nr_outgoing_streams_default
;
1426 * allocate a sctp_inpcb and setup a temporary binding to a port/all
1427 * addresses. This way if we don't get a bind we by default pick a ephemeral
1428 * port with all addresses bound.
1431 sctp_inpcb_alloc(struct socket
*so
)
1434 * we get called when a new endpoint starts up. We need to allocate
1435 * the sctp_inpcb structure from the zone and init it. Mark it as
1436 * unbound and find a port that we can use as an ephemeral with
1437 * INADDR_ANY. If the user binds later no problem we can then add
1438 * in the specific addresses. And setup the default parameters for
1442 struct sctp_inpcb
*inp
, *n_inp
;
1444 struct timeval time
;
1450 * This code audits the entire INP list to see if
1451 * any ep's that are in the GONE state are now
1452 * all free. This should not happen really since when
1453 * the last association if freed we should end up deleting
1454 * the inpcb. This code including the locks should
1455 * be taken out ... since the last set of fixes I
1456 * have not seen the "Found a GONE on list" has not
1457 * came out. But i am paranoid and we will leave this
1458 * in at the cost of efficency on allocation of PCB's.
1459 * Probably we should move this to the invariant
1462 /* #ifdef INVARIANTS*/
1463 SCTP_INP_INFO_RLOCK();
1464 inp
= LIST_FIRST(&sctppcbinfo
.listhead
);
1466 n_inp
= LIST_NEXT(inp
, sctp_list
);
1467 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_SOCKET_GONE
) {
1468 if (LIST_FIRST(&inp
->sctp_asoc_list
) == NULL
) {
1469 /* finish the job now */
1470 kprintf("Found a GONE on list\n");
1471 SCTP_INP_INFO_RUNLOCK();
1472 sctp_inpcb_free(inp
, 1);
1473 SCTP_INP_INFO_RLOCK();
1478 SCTP_INP_INFO_RUNLOCK();
1479 /* #endif INVARIANTS*/
1481 SCTP_INP_INFO_WLOCK();
1482 inp
= (struct sctp_inpcb
*)SCTP_ZONE_GET(sctppcbinfo
.ipi_zone_ep
);
1484 kprintf("Out of SCTP-INPCB structures - no resources\n");
1485 SCTP_INP_INFO_WUNLOCK();
1490 bzero(inp
, sizeof(*inp
));
1492 /* bump generations */
1493 inp
->ip_inp
.inp
.inp_socket
= so
;
1495 /* setup socket pointers */
1496 inp
->sctp_socket
= so
;
1498 /* setup inpcb socket too */
1499 inp
->ip_inp
.inp
.inp_socket
= so
;
1500 inp
->sctp_frag_point
= SCTP_DEFAULT_MAXSEGMENT
;
1502 #if !(defined(__OpenBSD__) || defined(__APPLE__))
1504 struct inpcbpolicy
*pcb_sp
= NULL
;
1505 error
= ipsec_init_policy(so
, &pcb_sp
);
1506 /* Arrange to share the policy */
1507 inp
->ip_inp
.inp
.inp_sp
= pcb_sp
;
1508 ((struct in6pcb
*)(&inp
->ip_inp
.inp
))->in6p_sp
= pcb_sp
;
1511 /* not sure what to do for openbsd here */
1515 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_ep
, inp
);
1516 SCTP_INP_INFO_WUNLOCK();
1520 sctppcbinfo
.ipi_count_ep
++;
1521 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
1522 inp
->ip_inp
.inp
.inp_gencnt
= ++sctppcbinfo
.ipi_gencnt_ep
;
1523 inp
->ip_inp
.inp
.inp_ip_ttl
= ip_defttl
;
1525 inp
->inp_ip_ttl
= ip_defttl
;
1526 inp
->inp_ip_tos
= 0;
1529 so
->so_pcb
= (caddr_t
)inp
;
1531 if ((so
->so_type
== SOCK_DGRAM
) ||
1532 (so
->so_type
== SOCK_SEQPACKET
)) {
1533 /* UDP style socket */
1534 inp
->sctp_flags
= (SCTP_PCB_FLAGS_UDPTYPE
|
1535 SCTP_PCB_FLAGS_UNBOUND
);
1536 inp
->sctp_flags
|= (SCTP_PCB_FLAGS_RECVDATAIOEVNT
);
1537 } else if (so
->so_type
== SOCK_STREAM
) {
1538 /* TCP style socket */
1539 inp
->sctp_flags
= (SCTP_PCB_FLAGS_TCPTYPE
|
1540 SCTP_PCB_FLAGS_UNBOUND
);
1541 inp
->sctp_flags
|= (SCTP_PCB_FLAGS_RECVDATAIOEVNT
);
1544 * unsupported socket type (RAW, etc)- in case we missed
1547 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_ep
, inp
);
1548 SCTP_INP_INFO_WUNLOCK();
1549 return (EOPNOTSUPP
);
1551 inp
->sctp_tcbhash
= hashinit(sctp_pcbtblsize
,
1556 #if defined(__NetBSD__) || defined(__OpenBSD__)
1559 &inp
->sctp_hashmark
);
1560 if (inp
->sctp_tcbhash
== NULL
) {
1561 kprintf("Out of SCTP-INPCB->hashinit - no resources\n");
1562 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_ep
, inp
);
1563 SCTP_INP_INFO_WUNLOCK();
1567 SCTP_INP_LOCK_INIT(inp
);
1568 SCTP_ASOC_CREATE_LOCK_INIT(inp
);
1569 /* lock the new ep */
1570 SCTP_INP_WLOCK(inp
);
1572 /* add it to the info area */
1573 LIST_INSERT_HEAD(&sctppcbinfo
.listhead
, inp
, sctp_list
);
1574 SCTP_INP_INFO_WUNLOCK();
1576 LIST_INIT(&inp
->sctp_addr_list
);
1577 LIST_INIT(&inp
->sctp_asoc_list
);
1578 TAILQ_INIT(&inp
->sctp_queue_list
);
1579 /* Init the timer structure for signature change */
1580 #if defined (__FreeBSD__) && __FreeBSD_version >= 500000
1581 callout_init(&inp
->sctp_ep
.signature_change
.timer
, 0);
1583 callout_init(&inp
->sctp_ep
.signature_change
.timer
);
1585 inp
->sctp_ep
.signature_change
.type
= SCTP_TIMER_TYPE_NEWCOOKIE
;
1587 /* now init the actual endpoint default data */
1590 /* setup the base timeout information */
1591 m
->sctp_timeoutticks
[SCTP_TIMER_SEND
] = SEC_TO_TICKS(SCTP_SEND_SEC
); /* needed ? */
1592 m
->sctp_timeoutticks
[SCTP_TIMER_INIT
] = SEC_TO_TICKS(SCTP_INIT_SEC
); /* needed ? */
1593 m
->sctp_timeoutticks
[SCTP_TIMER_RECV
] = MSEC_TO_TICKS(sctp_delayed_sack_time_default
);
1594 m
->sctp_timeoutticks
[SCTP_TIMER_HEARTBEAT
] = sctp_heartbeat_interval_default
; /* this is in MSEC */
1595 m
->sctp_timeoutticks
[SCTP_TIMER_PMTU
] = SEC_TO_TICKS(sctp_pmtu_raise_time_default
);
1596 m
->sctp_timeoutticks
[SCTP_TIMER_MAXSHUTDOWN
] = SEC_TO_TICKS(sctp_shutdown_guard_time_default
);
1597 m
->sctp_timeoutticks
[SCTP_TIMER_SIGNATURE
] = SEC_TO_TICKS(sctp_secret_lifetime_default
);
1598 /* all max/min max are in ms */
1599 m
->sctp_maxrto
= sctp_rto_max_default
;
1600 m
->sctp_minrto
= sctp_rto_min_default
;
1601 m
->initial_rto
= sctp_rto_initial_default
;
1602 m
->initial_init_rto_max
= sctp_init_rto_max_default
;
1604 m
->max_open_streams_intome
= MAX_SCTP_STREAMS
;
1606 m
->max_init_times
= sctp_init_rtx_max_default
;
1607 m
->max_send_times
= sctp_assoc_rtx_max_default
;
1608 m
->def_net_failure
= sctp_path_rtx_max_default
;
1609 m
->sctp_sws_sender
= SCTP_SWS_SENDER_DEF
;
1610 m
->sctp_sws_receiver
= SCTP_SWS_RECEIVER_DEF
;
1611 m
->max_burst
= sctp_max_burst_default
;
1612 /* number of streams to pre-open on a association */
1613 m
->pre_open_stream_count
= sctp_nr_outgoing_streams_default
;
1615 /* Add adaption cookie */
1616 m
->adaption_layer_indicator
= 0x504C5253;
1618 /* seed random number generator */
1619 m
->random_counter
= 1;
1620 m
->store_at
= SCTP_SIGNATURE_SIZE
;
1621 #if (defined(__FreeBSD__) && (__FreeBSD_version < 500000)) || defined(__DragonFly__)
1622 read_random_unlimited(m
->random_numbers
, sizeof(m
->random_numbers
));
1623 #elif defined(__APPLE__) || (__FreeBSD_version > 500000)
1624 read_random(m
->random_numbers
, sizeof(m
->random_numbers
));
1625 #elif defined(__OpenBSD__)
1626 get_random_bytes(m
->random_numbers
, sizeof(m
->random_numbers
));
1627 #elif defined(__NetBSD__) && NRND > 0
1628 rnd_extract_data(m
->random_numbers
, sizeof(m
->random_numbers
),
1632 u_int32_t
*ranm
, *ranp
;
1633 ranp
= (u_int32_t
*)&m
->random_numbers
;
1634 ranm
= ranp
+ (SCTP_SIGNATURE_ALOC_SIZE
/sizeof(u_int32_t
));
1635 if ((u_long
)ranp
% 4) {
1636 /* not a even boundary? */
1637 ranp
= (u_int32_t
*)SCTP_SIZE32((u_long
)ranp
);
1639 while (ranp
< ranm
) {
1645 sctp_fill_random_store(m
);
1647 /* Minimum cookie size */
1648 m
->size_of_a_cookie
= (sizeof(struct sctp_init_msg
) * 2) +
1649 sizeof(struct sctp_state_cookie
);
1650 m
->size_of_a_cookie
+= SCTP_SIGNATURE_SIZE
;
1652 /* Setup the initial secret */
1653 SCTP_GETTIME_TIMEVAL(&time
);
1654 m
->time_of_secret_change
= time
.tv_sec
;
1656 for (i
= 0; i
< SCTP_NUMBER_OF_SECRETS
; i
++) {
1657 m
->secret_key
[0][i
] = sctp_select_initial_TSN(m
);
1659 sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE
, inp
, NULL
, NULL
);
1661 /* How long is a cookie good for ? */
1662 m
->def_cookie_life
= sctp_valid_cookie_life_default
;
1663 SCTP_INP_WUNLOCK(inp
);
1669 sctp_move_pcb_and_assoc(struct sctp_inpcb
*old_inp
, struct sctp_inpcb
*new_inp
,
1670 struct sctp_tcb
*stcb
)
1672 uint16_t lport
, rport
;
1673 struct sctppcbhead
*head
;
1674 struct sctp_laddr
*laddr
, *oladdr
;
1676 SCTP_TCB_UNLOCK(stcb
);
1677 SCTP_INP_INFO_WLOCK();
1678 SCTP_INP_WLOCK(old_inp
);
1679 SCTP_INP_WLOCK(new_inp
);
1680 SCTP_TCB_LOCK(stcb
);
1682 new_inp
->sctp_ep
.time_of_secret_change
=
1683 old_inp
->sctp_ep
.time_of_secret_change
;
1684 memcpy(new_inp
->sctp_ep
.secret_key
, old_inp
->sctp_ep
.secret_key
,
1685 sizeof(old_inp
->sctp_ep
.secret_key
));
1686 new_inp
->sctp_ep
.current_secret_number
=
1687 old_inp
->sctp_ep
.current_secret_number
;
1688 new_inp
->sctp_ep
.last_secret_number
=
1689 old_inp
->sctp_ep
.last_secret_number
;
1690 new_inp
->sctp_ep
.size_of_a_cookie
= old_inp
->sctp_ep
.size_of_a_cookie
;
1692 /* Copy the port across */
1693 lport
= new_inp
->sctp_lport
= old_inp
->sctp_lport
;
1694 rport
= stcb
->rport
;
1695 /* Pull the tcb from the old association */
1696 LIST_REMOVE(stcb
, sctp_tcbhash
);
1697 LIST_REMOVE(stcb
, sctp_tcblist
);
1699 /* Now insert the new_inp into the TCP connected hash */
1700 head
= &sctppcbinfo
.sctp_tcpephash
[SCTP_PCBHASH_ALLADDR((lport
+ rport
),
1701 sctppcbinfo
.hashtcpmark
)];
1703 LIST_INSERT_HEAD(head
, new_inp
, sctp_hash
);
1705 /* Now move the tcb into the endpoint list */
1706 LIST_INSERT_HEAD(&new_inp
->sctp_asoc_list
, stcb
, sctp_tcblist
);
1708 * Question, do we even need to worry about the ep-hash since
1709 * we only have one connection? Probably not :> so lets
1710 * get rid of it and not suck up any kernel memory in that.
1712 SCTP_INP_INFO_WUNLOCK();
1713 stcb
->sctp_socket
= new_inp
->sctp_socket
;
1714 stcb
->sctp_ep
= new_inp
;
1715 if (new_inp
->sctp_tcbhash
!= NULL
) {
1716 FREE(new_inp
->sctp_tcbhash
, M_PCB
);
1717 new_inp
->sctp_tcbhash
= NULL
;
1719 if ((new_inp
->sctp_flags
& SCTP_PCB_FLAGS_BOUNDALL
) == 0) {
1720 /* Subset bound, so copy in the laddr list from the old_inp */
1721 LIST_FOREACH(oladdr
, &old_inp
->sctp_addr_list
, sctp_nxt_addr
) {
1722 laddr
= (struct sctp_laddr
*)SCTP_ZONE_GET(
1723 sctppcbinfo
.ipi_zone_laddr
);
1724 if (laddr
== NULL
) {
1726 * Gak, what can we do? This assoc is really
1727 * HOSED. We probably should send an abort
1731 if (sctp_debug_on
& SCTP_DEBUG_PCB1
) {
1732 kprintf("Association hosed in TCP model, out of laddr memory\n");
1734 #endif /* SCTP_DEBUG */
1737 sctppcbinfo
.ipi_count_laddr
++;
1738 sctppcbinfo
.ipi_gencnt_laddr
++;
1739 bzero(laddr
, sizeof(*laddr
));
1740 laddr
->ifa
= oladdr
->ifa
;
1741 LIST_INSERT_HEAD(&new_inp
->sctp_addr_list
, laddr
,
1743 new_inp
->laddr_count
++;
1746 SCTP_INP_WUNLOCK(new_inp
);
1747 SCTP_INP_WUNLOCK(old_inp
);
1751 sctp_isport_inuse(struct sctp_inpcb
*inp
, uint16_t lport
)
1753 struct sctppcbhead
*head
;
1754 struct sctp_inpcb
*t_inp
;
1756 head
= &sctppcbinfo
.sctp_ephash
[SCTP_PCBHASH_ALLADDR(lport
,
1757 sctppcbinfo
.hashmark
)];
1758 LIST_FOREACH(t_inp
, head
, sctp_hash
) {
1759 if (t_inp
->sctp_lport
!= lport
) {
1762 /* This one is in use. */
1763 /* check the v6/v4 binding issue */
1764 if ((t_inp
->sctp_flags
& SCTP_PCB_FLAGS_BOUND_V6
) &&
1765 #if defined(__FreeBSD__)
1766 (((struct inpcb
*)t_inp
)->inp_flags
& IN6P_IPV6_V6ONLY
)
1768 #if defined(__OpenBSD__)
1769 (0) /* For open bsd we do dual bind only */
1771 (((struct in6pcb
*)t_inp
)->in6p_flags
& IN6P_IPV6_V6ONLY
)
1775 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_BOUND_V6
) {
1776 /* collision in V6 space */
1779 /* inp is BOUND_V4 no conflict */
1782 } else if (t_inp
->sctp_flags
& SCTP_PCB_FLAGS_BOUND_V6
) {
1783 /* t_inp is bound v4 and v6, conflict always */
1786 /* t_inp is bound only V4 */
1787 if ((inp
->sctp_flags
& SCTP_PCB_FLAGS_BOUND_V6
) &&
1788 #if defined(__FreeBSD__)
1789 (((struct inpcb
*)inp
)->inp_flags
& IN6P_IPV6_V6ONLY
)
1791 #if defined(__OpenBSD__)
1792 (0) /* For open bsd we do dual bind only */
1794 (((struct in6pcb
*)inp
)->in6p_flags
& IN6P_IPV6_V6ONLY
)
1801 /* else fall through to conflict */
1808 #if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__))
1810 * Don't know why, but without this there is an unknown reference when
1811 * compiling NetBSD... hmm
1813 extern void in6_sin6_2_sin (struct sockaddr_in
*, struct sockaddr_in6
*sin6
);
1818 #if (defined(__FreeBSD__) && __FreeBSD_version >= 500000) || defined(__DragonFly__)
1819 sctp_inpcb_bind(struct socket
*so
, struct sockaddr
*addr
, struct thread
*p
)
1821 sctp_inpcb_bind(struct socket
*so
, struct sockaddr
*addr
, struct proc
*p
)
1824 /* bind a ep to a socket address */
1825 struct sctppcbhead
*head
;
1826 struct sctp_inpcb
*inp
, *inp_tmp
;
1827 struct inpcb
*ip_inp
;
1835 inp
= (struct sctp_inpcb
*)so
->so_pcb
;
1836 ip_inp
= (struct inpcb
*)so
->so_pcb
;
1838 if (sctp_debug_on
& SCTP_DEBUG_PCB1
) {
1840 kprintf("Bind called port:%d\n",
1841 ntohs(((struct sockaddr_in
*)addr
)->sin_port
));
1843 sctp_print_address(addr
);
1846 #endif /* SCTP_DEBUG */
1847 if ((inp
->sctp_flags
& SCTP_PCB_FLAGS_UNBOUND
) == 0) {
1848 /* already did a bind, subsequent binds NOT allowed ! */
1853 if (addr
->sa_family
== AF_INET
) {
1854 struct sockaddr_in
*sin
;
1856 /* IPV6_V6ONLY socket? */
1858 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
1859 (ip_inp
->inp_flags
& IN6P_IPV6_V6ONLY
)
1861 #if defined(__OpenBSD__)
1862 (0) /* For openbsd we do dual bind only */
1864 (((struct in6pcb
*)inp
)->in6p_flags
& IN6P_IPV6_V6ONLY
)
1871 if (addr
->sa_len
!= sizeof(*sin
))
1874 sin
= (struct sockaddr_in
*)addr
;
1875 lport
= sin
->sin_port
;
1877 if (sin
->sin_addr
.s_addr
!= INADDR_ANY
) {
1880 } else if (addr
->sa_family
== AF_INET6
) {
1881 /* Only for pure IPv6 Address. (No IPv4 Mapped!) */
1882 struct sockaddr_in6
*sin6
;
1884 sin6
= (struct sockaddr_in6
*)addr
;
1886 if (addr
->sa_len
!= sizeof(*sin6
))
1889 lport
= sin6
->sin6_port
;
1890 if (!IN6_IS_ADDR_UNSPECIFIED(&sin6
->sin6_addr
)) {
1892 /* KAME hack: embed scopeid */
1893 #if defined(SCTP_BASE_FREEBSD) || defined(__APPLE__) || defined(__DragonFly__)
1894 if (in6_embedscope(&sin6
->sin6_addr
, sin6
,
1897 #elif defined(__FreeBSD__)
1898 error
= scope6_check_id(sin6
, ip6_use_defzone
);
1902 if (in6_embedscope(&sin6
->sin6_addr
, sin6
) != 0) {
1907 #ifndef SCOPEDROUTING
1908 /* this must be cleared for ifa_ifwithaddr() */
1909 sin6
->sin6_scope_id
= 0;
1910 #endif /* SCOPEDROUTING */
1912 return (EAFNOSUPPORT
);
1915 SCTP_INP_INFO_WLOCK();
1916 SCTP_INP_WLOCK(inp
);
1917 /* increase our count due to the unlock we do */
1918 SCTP_INP_INCR_REF(inp
);
1921 * Did the caller specify a port? if so we must see if a
1922 * ep already has this one bound.
1924 /* got to be root to get at low ports */
1925 if (ntohs(lport
) < IPPORT_RESERVED
) {
1928 #if __FreeBSD_version >= 500000
1929 suser_cred(p
->td_ucred
, 0)
1933 #elif defined(__NetBSD__) || defined(__APPLE__)
1934 suser(p
->p_ucred
, &p
->p_acflag
)
1935 #elif defined(__DragonFly__)
1941 SCTP_INP_DECR_REF(inp
);
1942 SCTP_INP_WUNLOCK(inp
);
1943 SCTP_INP_INFO_WUNLOCK();
1948 SCTP_INP_DECR_REF(inp
);
1949 SCTP_INP_WUNLOCK(inp
);
1950 SCTP_INP_INFO_WUNLOCK();
1953 SCTP_INP_WUNLOCK(inp
);
1954 inp_tmp
= sctp_pcb_findep(addr
, 0, 1);
1955 if (inp_tmp
!= NULL
) {
1956 /* lock guy returned and lower count
1957 * note that we are not bound so inp_tmp
1958 * should NEVER be inp. And it is this
1959 * inp (inp_tmp) that gets the reference
1960 * bump, so we must lower it.
1962 SCTP_INP_WLOCK(inp_tmp
);
1963 SCTP_INP_DECR_REF(inp_tmp
);
1964 SCTP_INP_WUNLOCK(inp_tmp
);
1967 SCTP_INP_INFO_WUNLOCK();
1968 return (EADDRNOTAVAIL
);
1970 SCTP_INP_WLOCK(inp
);
1972 /* verify that no lport is not used by a singleton */
1973 if (sctp_isport_inuse(inp
, lport
)) {
1974 /* Sorry someone already has this one bound */
1975 SCTP_INP_DECR_REF(inp
);
1976 SCTP_INP_WUNLOCK(inp
);
1977 SCTP_INP_INFO_WUNLOCK();
1978 return (EADDRNOTAVAIL
);
1983 * get any port but lets make sure no one has any address
1984 * with this port bound
1988 * setup the inp to the top (I could use the union but this
1991 uint32_t port_guess
;
1992 uint16_t port_attempt
;
1996 port_guess
= sctp_select_initial_TSN(&inp
->sctp_ep
);
1997 port_attempt
= (port_guess
& 0x0000ffff);
1998 if (port_attempt
== 0) {
2001 if (port_attempt
< IPPORT_RESERVED
) {
2002 port_attempt
+= IPPORT_RESERVED
;
2005 if (sctp_isport_inuse(inp
, htons(port_attempt
)) == 0) {
2006 /* got a port we can use */
2010 /* try upper half */
2012 port_attempt
= ((port_guess
>> 16) & 0x0000ffff);
2013 if (port_attempt
== 0) {
2016 if (port_attempt
< IPPORT_RESERVED
) {
2017 port_attempt
+= IPPORT_RESERVED
;
2019 if (sctp_isport_inuse(inp
, htons(port_attempt
)) == 0) {
2020 /* got a port we can use */
2024 /* try two half's added together */
2026 port_attempt
= (((port_guess
>> 16) & 0x0000ffff) + (port_guess
& 0x0000ffff));
2027 if (port_attempt
== 0) {
2028 /* get a new random number */
2031 if (port_attempt
< IPPORT_RESERVED
) {
2032 port_attempt
+= IPPORT_RESERVED
;
2034 if (sctp_isport_inuse(inp
, htons(port_attempt
)) == 0) {
2035 /* got a port we can use */
2040 /* we don't get out of the loop until we have a port */
2041 lport
= htons(port_attempt
);
2043 SCTP_INP_DECR_REF(inp
);
2044 if (inp
->sctp_flags
& (SCTP_PCB_FLAGS_SOCKET_GONE
|SCTP_PCB_FLAGS_SOCKET_ALLGONE
)) {
2045 /* this really should not happen. The guy
2046 * did a non-blocking bind and then did a close
2049 SCTP_INP_WUNLOCK(inp
);
2050 SCTP_INP_INFO_WUNLOCK();
2053 /* ok we look clear to give out this port, so lets setup the binding */
2055 /* binding to all addresses, so just set in the proper flags */
2056 inp
->sctp_flags
|= (SCTP_PCB_FLAGS_BOUNDALL
|
2057 SCTP_PCB_FLAGS_DO_ASCONF
);
2058 /* set the automatic addr changes from kernel flag */
2059 if (sctp_auto_asconf
== 0) {
2060 inp
->sctp_flags
&= ~SCTP_PCB_FLAGS_AUTO_ASCONF
;
2062 inp
->sctp_flags
|= SCTP_PCB_FLAGS_AUTO_ASCONF
;
2066 * bind specific, make sure flags is off and add a new address
2067 * structure to the sctp_addr_list inside the ep structure.
2069 * We will need to allocate one and insert it at the head.
2070 * The socketopt call can just insert new addresses in there
2071 * as well. It will also have to do the embed scope kame hack
2072 * too (before adding).
2075 struct sockaddr_storage store_sa
;
2077 memset(&store_sa
, 0, sizeof(store_sa
));
2078 if (addr
->sa_family
== AF_INET
) {
2079 struct sockaddr_in
*sin
;
2081 sin
= (struct sockaddr_in
*)&store_sa
;
2082 memcpy(sin
, addr
, sizeof(struct sockaddr_in
));
2084 } else if (addr
->sa_family
== AF_INET6
) {
2085 struct sockaddr_in6
*sin6
;
2087 sin6
= (struct sockaddr_in6
*)&store_sa
;
2088 memcpy(sin6
, addr
, sizeof(struct sockaddr_in6
));
2089 sin6
->sin6_port
= 0;
2092 * first find the interface with the bound address
2093 * need to zero out the port to find the address! yuck!
2094 * can't do this earlier since need port for sctp_pcb_findep()
2096 ifa
= sctp_find_ifa_by_addr((struct sockaddr
*)&store_sa
);
2098 /* Can't find an interface with that address */
2099 SCTP_INP_WUNLOCK(inp
);
2100 SCTP_INP_INFO_WUNLOCK();
2101 return (EADDRNOTAVAIL
);
2103 if (addr
->sa_family
== AF_INET6
) {
2104 struct in6_ifaddr
*ifa6
;
2105 ifa6
= (struct in6_ifaddr
*)ifa
;
2107 * allow binding of deprecated addresses as per
2108 * RFC 2462 and ipng discussion
2110 if (ifa6
->ia6_flags
& (IN6_IFF_DETACHED
|
2112 IN6_IFF_NOTREADY
)) {
2113 /* Can't bind a non-existent addr. */
2114 SCTP_INP_WUNLOCK(inp
);
2115 SCTP_INP_INFO_WUNLOCK();
2119 /* we're not bound all */
2120 inp
->sctp_flags
&= ~SCTP_PCB_FLAGS_BOUNDALL
;
2121 #if 0 /* use sysctl now */
2122 /* don't allow automatic addr changes from kernel */
2123 inp
->sctp_flags
&= ~SCTP_PCB_FLAGS_AUTO_ASCONF
;
2125 /* set the automatic addr changes from kernel flag */
2126 if (sctp_auto_asconf
== 0) {
2127 inp
->sctp_flags
&= ~SCTP_PCB_FLAGS_AUTO_ASCONF
;
2129 inp
->sctp_flags
|= SCTP_PCB_FLAGS_AUTO_ASCONF
;
2131 /* allow bindx() to send ASCONF's for binding changes */
2132 inp
->sctp_flags
|= SCTP_PCB_FLAGS_DO_ASCONF
;
2133 /* add this address to the endpoint list */
2134 error
= sctp_insert_laddr(&inp
->sctp_addr_list
, ifa
);
2136 SCTP_INP_WUNLOCK(inp
);
2137 SCTP_INP_INFO_WUNLOCK();
2142 /* find the bucket */
2143 head
= &sctppcbinfo
.sctp_ephash
[SCTP_PCBHASH_ALLADDR(lport
,
2144 sctppcbinfo
.hashmark
)];
2145 /* put it in the bucket */
2146 LIST_INSERT_HEAD(head
, inp
, sctp_hash
);
2148 if (sctp_debug_on
& SCTP_DEBUG_PCB1
) {
2149 kprintf("Main hash to bind at head:%p, bound port:%d\n", head
, ntohs(lport
));
2152 /* set in the port */
2153 inp
->sctp_lport
= lport
;
2155 /* turn off just the unbound flag */
2156 inp
->sctp_flags
&= ~SCTP_PCB_FLAGS_UNBOUND
;
2157 SCTP_INP_WUNLOCK(inp
);
2158 SCTP_INP_INFO_WUNLOCK();
2164 sctp_iterator_inp_being_freed(struct sctp_inpcb
*inp
, struct sctp_inpcb
*inp_next
)
2166 struct sctp_iterator
*it
;
2167 /* We enter with the only the ITERATOR_LOCK in place and
2168 * A write lock on the inp_info stuff.
2171 /* Go through all iterators, we must do this since
2172 * it is possible that some iterator does NOT have
2173 * the lock, but is waiting for it. And the one that
2174 * had the lock has either moved in the last iteration
2175 * or we just cleared it above. We need to find all
2176 * of those guys. The list of iterators should never
2177 * be very big though.
2179 LIST_FOREACH(it
, &sctppcbinfo
.iteratorhead
, sctp_nxt_itr
) {
2180 if (it
== inp
->inp_starting_point_for_iterator
)
2181 /* skip this guy, he's special */
2183 if (it
->inp
== inp
) {
2184 /* This is tricky and we DON'T lock the iterator.
2185 * Reason is he's running but waiting for me since
2186 * inp->inp_starting_point_for_iterator has the lock
2187 * on me (the guy above we skipped). This tells us
2188 * its is not running but waiting for inp->inp_starting_point_for_iterator
2189 * to be released by the guy that does have our INP in a lock.
2191 if (it
->iterator_flags
& SCTP_ITERATOR_DO_SINGLE_INP
) {
2195 /* set him up to do the next guy not me */
2201 it
= inp
->inp_starting_point_for_iterator
;
2203 if (it
->iterator_flags
& SCTP_ITERATOR_DO_SINGLE_INP
) {
2212 /* release sctp_inpcb unbind the port */
2214 sctp_inpcb_free(struct sctp_inpcb
*inp
, int immediate
)
2217 * Here we free a endpoint. We must find it (if it is in the Hash
2218 * table) and remove it from there. Then we must also find it in
2219 * the overall list and remove it from there. After all removals are
2220 * complete then any timer has to be stopped. Then start the actual
2222 * a) Any local lists.
2223 * b) Any associations.
2224 * c) The hash of all associations.
2225 * d) finally the ep itself.
2228 struct sctp_inpcb
*inp_save
;
2229 struct sctp_tcb
*asoc
, *nasoc
;
2230 struct sctp_laddr
*laddr
, *nladdr
;
2231 struct inpcb
*ip_pcb
;
2233 struct sctp_socket_q_list
*sq
;
2234 #if !defined(__FreeBSD__) || __FreeBSD_version < 500000
2240 SCTP_ASOC_CREATE_LOCK(inp
);
2241 SCTP_INP_WLOCK(inp
);
2243 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_SOCKET_ALLGONE
) {
2244 /* been here before */
2246 kprintf("Endpoint was all gone (dup free)?\n");
2247 SCTP_INP_WUNLOCK(inp
);
2248 SCTP_ASOC_CREATE_UNLOCK(inp
);
2251 sctp_timer_stop(SCTP_TIMER_TYPE_NEWCOOKIE
, inp
, NULL
, NULL
);
2254 sctp_m_freem(inp
->control
);
2255 inp
->control
= NULL
;
2258 sctp_m_freem(inp
->pkt
);
2261 so
= inp
->sctp_socket
;
2263 ip_pcb
= &inp
->ip_inp
.inp
; /* we could just cast the main
2264 * pointer here but I will
2265 * be nice :> (i.e. ip_pcb = ep;)
2268 if (immediate
== 0) {
2271 for ((asoc
= LIST_FIRST(&inp
->sctp_asoc_list
)); asoc
!= NULL
;
2273 nasoc
= LIST_NEXT(asoc
, sctp_tcblist
);
2274 if ((SCTP_GET_STATE(&asoc
->asoc
) == SCTP_STATE_COOKIE_WAIT
) ||
2275 (SCTP_GET_STATE(&asoc
->asoc
) == SCTP_STATE_COOKIE_ECHOED
)) {
2276 /* Just abandon things in the front states */
2277 SCTP_TCB_LOCK(asoc
);
2278 SCTP_INP_WUNLOCK(inp
);
2279 sctp_free_assoc(inp
, asoc
);
2280 SCTP_INP_WLOCK(inp
);
2283 asoc
->asoc
.state
|= SCTP_STATE_CLOSED_SOCKET
;
2285 if ((asoc
->asoc
.size_on_delivery_queue
> 0) ||
2286 (asoc
->asoc
.size_on_reasm_queue
> 0) ||
2287 (asoc
->asoc
.size_on_all_streams
> 0) ||
2288 (so
&& (so
->so_rcv
.ssb_cc
> 0))
2290 /* Left with Data unread */
2291 struct mbuf
*op_err
;
2292 MGET(op_err
, MB_DONTWAIT
, MT_DATA
);
2294 /* Fill in the user initiated abort */
2295 struct sctp_paramhdr
*ph
;
2297 sizeof(struct sctp_paramhdr
);
2299 struct sctp_paramhdr
*);
2300 ph
->param_type
= htons(
2301 SCTP_CAUSE_USER_INITIATED_ABT
);
2302 ph
->param_length
= htons(op_err
->m_len
);
2304 SCTP_TCB_LOCK(asoc
);
2305 sctp_send_abort_tcb(asoc
, op_err
);
2307 SCTP_INP_WUNLOCK(inp
);
2308 sctp_free_assoc(inp
, asoc
);
2309 SCTP_INP_WLOCK(inp
);
2311 } else if (TAILQ_EMPTY(&asoc
->asoc
.send_queue
) &&
2312 TAILQ_EMPTY(&asoc
->asoc
.sent_queue
)) {
2313 if ((SCTP_GET_STATE(&asoc
->asoc
) != SCTP_STATE_SHUTDOWN_SENT
) &&
2314 (SCTP_GET_STATE(&asoc
->asoc
) != SCTP_STATE_SHUTDOWN_ACK_SENT
)) {
2315 /* there is nothing queued to send, so I send shutdown */
2316 SCTP_TCB_LOCK(asoc
);
2317 sctp_send_shutdown(asoc
, asoc
->asoc
.primary_destination
);
2318 asoc
->asoc
.state
= SCTP_STATE_SHUTDOWN_SENT
;
2319 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN
, asoc
->sctp_ep
, asoc
,
2320 asoc
->asoc
.primary_destination
);
2321 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD
, asoc
->sctp_ep
, asoc
,
2322 asoc
->asoc
.primary_destination
);
2323 sctp_chunk_output(inp
, asoc
, 1);
2324 SCTP_TCB_UNLOCK(asoc
);
2327 /* mark into shutdown pending */
2328 asoc
->asoc
.state
|= SCTP_STATE_SHUTDOWN_PENDING
;
2332 /* now is there some left in our SHUTDOWN state? */
2334 inp
->sctp_flags
|= SCTP_PCB_FLAGS_SOCKET_GONE
;
2336 SCTP_INP_WUNLOCK(inp
);
2337 SCTP_ASOC_CREATE_UNLOCK(inp
);
2341 #if defined(__FreeBSD__) && __FreeBSD_version >= 503000
2342 if (inp
->refcount
) {
2343 sctp_timer_start(SCTP_TIMER_TYPE_INPKILL
, inp
, NULL
, NULL
);
2344 SCTP_INP_WUNLOCK(inp
);
2345 SCTP_ASOC_CREATE_UNLOCK(inp
);
2349 inp
->sctp_flags
|= SCTP_PCB_FLAGS_SOCKET_ALLGONE
;
2350 #if !defined(__FreeBSD__) || __FreeBSD_version < 500000
2351 rt
= ip_pcb
->inp_route
.ro_rt
;
2354 callout_stop(&inp
->sctp_ep
.signature_change
.timer
);
2357 /* First take care of socket level things */
2360 /* XXX IPsec cleanup here */
2362 if (ip_pcb
->inp_tdb_in
)
2363 TAILQ_REMOVE(&ip_pcb
->inp_tdb_in
->tdb_inp_in
,
2364 ip_pcb
, inp_tdb_in_next
);
2365 if (ip_pcb
->inp_tdb_out
)
2366 TAILQ_REMOVE(&ip_pcb
->inp_tdb_out
->tdb_inp_out
, ip_pcb
,
2368 if (ip_pcb
->inp_ipsec_localid
)
2369 ipsp_reffree(ip_pcb
->inp_ipsec_localid
);
2370 if (ip_pcb
->inp_ipsec_remoteid
)
2371 ipsp_reffree(ip_pcb
->inp_ipsec_remoteid
);
2372 if (ip_pcb
->inp_ipsec_localcred
)
2373 ipsp_reffree(ip_pcb
->inp_ipsec_localcred
);
2374 if (ip_pcb
->inp_ipsec_remotecred
)
2375 ipsp_reffree(ip_pcb
->inp_ipsec_remotecred
);
2376 if (ip_pcb
->inp_ipsec_localauth
)
2377 ipsp_reffree(ip_pcb
->inp_ipsec_localauth
);
2378 if (ip_pcb
->inp_ipsec_remoteauth
)
2379 ipsp_reffree(ip_pcb
->inp_ipsec_remoteauth
);
2382 ipsec4_delete_pcbpolicy(ip_pcb
);
2385 #if defined(__FreeBSD__) && __FreeBSD_version > 500000
2390 #if defined(__FreeBSD__) && __FreeBSD_version > 500000
2397 if (ip_pcb
->inp_options
) {
2398 m_free(ip_pcb
->inp_options
);
2399 ip_pcb
->inp_options
= 0;
2401 #if !defined(__FreeBSD__) || __FreeBSD_version < 500000
2404 ip_pcb
->inp_route
.ro_rt
= 0;
2407 if (ip_pcb
->inp_moptions
) {
2408 ip_freemoptions(ip_pcb
->inp_moptions
);
2409 ip_pcb
->inp_moptions
= 0;
2411 #if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__))
2414 ip_pcb
->inp_vflag
= 0;
2417 /* Now the sctp_pcb things */
2420 * free each asoc if it is not already closed/free. we can't use
2421 * the macro here since le_next will get freed as part of the
2422 * sctp_free_assoc() call.
2425 for ((asoc
= LIST_FIRST(&inp
->sctp_asoc_list
)); asoc
!= NULL
;
2427 nasoc
= LIST_NEXT(asoc
, sctp_tcblist
);
2428 SCTP_TCB_LOCK(asoc
);
2429 if (SCTP_GET_STATE(&asoc
->asoc
) != SCTP_STATE_COOKIE_WAIT
) {
2430 struct mbuf
*op_err
;
2431 MGET(op_err
, MB_DONTWAIT
, MT_DATA
);
2433 /* Fill in the user initiated abort */
2434 struct sctp_paramhdr
*ph
;
2435 op_err
->m_len
= sizeof(struct sctp_paramhdr
);
2436 ph
= mtod(op_err
, struct sctp_paramhdr
*);
2437 ph
->param_type
= htons(
2438 SCTP_CAUSE_USER_INITIATED_ABT
);
2439 ph
->param_length
= htons(op_err
->m_len
);
2441 sctp_send_abort_tcb(asoc
, op_err
);
2445 * sctp_free_assoc() will call sctp_inpcb_free(),
2446 * if SCTP_PCB_FLAGS_SOCKET_GONE set.
2447 * So, we clear it before sctp_free_assoc() making sure
2448 * no double sctp_inpcb_free().
2450 inp
->sctp_flags
&= ~SCTP_PCB_FLAGS_SOCKET_GONE
;
2451 SCTP_INP_WUNLOCK(inp
);
2452 sctp_free_assoc(inp
, asoc
);
2453 SCTP_INP_WLOCK(inp
);
2455 while ((sq
= TAILQ_FIRST(&inp
->sctp_queue_list
)) != NULL
) {
2456 TAILQ_REMOVE(&inp
->sctp_queue_list
, sq
, next_sq
);
2457 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_sockq
, sq
);
2458 sctppcbinfo
.ipi_count_sockq
--;
2459 sctppcbinfo
.ipi_gencnt_sockq
++;
2461 inp
->sctp_socket
= 0;
2462 /* Now first we remove ourselves from the overall list of all EP's */
2464 /* Unlock inp first, need correct order */
2465 SCTP_INP_WUNLOCK(inp
);
2466 /* now iterator lock */
2467 SCTP_ITERATOR_LOCK();
2469 SCTP_INP_INFO_WLOCK();
2470 /* now reget the inp lock */
2471 SCTP_INP_WLOCK(inp
);
2473 inp_save
= LIST_NEXT(inp
, sctp_list
);
2474 LIST_REMOVE(inp
, sctp_list
);
2476 * Now the question comes as to if this EP was ever bound at all.
2477 * If it was, then we must pull it out of the EP hash list.
2479 if ((inp
->sctp_flags
& SCTP_PCB_FLAGS_UNBOUND
) !=
2480 SCTP_PCB_FLAGS_UNBOUND
) {
2482 * ok, this guy has been bound. It's port is somewhere
2483 * in the sctppcbinfo hash table. Remove it!
2485 LIST_REMOVE(inp
, sctp_hash
);
2487 /* fix any iterators only after out of the list */
2488 sctp_iterator_inp_being_freed(inp
, inp_save
);
2489 SCTP_ITERATOR_UNLOCK();
2491 * if we have an address list the following will free the list of
2492 * ifaddr's that are set into this ep. Again macro limitations here,
2493 * since the LIST_FOREACH could be a bad idea.
2495 for ((laddr
= LIST_FIRST(&inp
->sctp_addr_list
)); laddr
!= NULL
;
2497 nladdr
= LIST_NEXT(laddr
, sctp_nxt_addr
);
2498 LIST_REMOVE(laddr
, sctp_nxt_addr
);
2499 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_laddr
, laddr
);
2500 sctppcbinfo
.ipi_gencnt_laddr
++;
2501 sctppcbinfo
.ipi_count_laddr
--;
2503 /* Now lets see about freeing the EP hash table. */
2504 if (inp
->sctp_tcbhash
!= NULL
) {
2505 FREE(inp
->sctp_tcbhash
, M_PCB
);
2506 inp
->sctp_tcbhash
= 0;
2508 SCTP_INP_WUNLOCK(inp
);
2509 SCTP_ASOC_CREATE_UNLOCK(inp
);
2510 SCTP_INP_LOCK_DESTROY(inp
);
2511 SCTP_ASOC_CREATE_LOCK_DESTROY(inp
);
2513 /* Now we must put the ep memory back into the zone pool */
2514 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_ep
, inp
);
2515 sctppcbinfo
.ipi_count_ep
--;
2517 SCTP_INP_INFO_WUNLOCK();
2523 sctp_findnet(struct sctp_tcb
*stcb
, struct sockaddr
*addr
)
2525 struct sctp_nets
*net
;
2526 struct sockaddr_in
*sin
;
2527 struct sockaddr_in6
*sin6
;
2528 /* use the peer's/remote port for lookup if unspecified */
2529 sin
= (struct sockaddr_in
*)addr
;
2530 sin6
= (struct sockaddr_in6
*)addr
;
2531 #if 0 /* why do we need to check the port for a nets list on an assoc? */
2532 if (stcb
->rport
!= sin
->sin_port
) {
2533 /* we cheat and just a sin for this test */
2537 /* locate the address */
2538 TAILQ_FOREACH(net
, &stcb
->asoc
.nets
, sctp_next
) {
2539 if (sctp_cmpaddr(addr
, (struct sockaddr
*)&net
->ro
._l_addr
))
2547 * add's a remote endpoint address, done with the INIT/INIT-ACK
2548 * as well as when a ASCONF arrives that adds it. It will also
2549 * initialize all the cwnd stats of stuff.
2552 sctp_is_address_on_local_host(struct sockaddr
*addr
)
2556 TAILQ_FOREACH(ifn
, &ifnet
, if_list
) {
2557 struct ifaddr_container
*ifac
;
2559 TAILQ_FOREACH(ifac
, &ifn
->if_addrheads
[mycpuid
], ifa_link
) {
2560 struct ifaddr
*ifa
= ifac
->ifa
;
2562 if (addr
->sa_family
== ifa
->ifa_addr
->sa_family
) {
2564 if (addr
->sa_family
== AF_INET
) {
2565 struct sockaddr_in
*sin
, *sin_c
;
2566 sin
= (struct sockaddr_in
*)addr
;
2567 sin_c
= (struct sockaddr_in
*)
2569 if (sin
->sin_addr
.s_addr
==
2570 sin_c
->sin_addr
.s_addr
) {
2571 /* we are on the same machine */
2574 } else if (addr
->sa_family
== AF_INET6
) {
2575 struct sockaddr_in6
*sin6
, *sin_c6
;
2576 sin6
= (struct sockaddr_in6
*)addr
;
2577 sin_c6
= (struct sockaddr_in6
*)
2579 if (SCTP6_ARE_ADDR_EQUAL(&sin6
->sin6_addr
,
2580 &sin_c6
->sin6_addr
)) {
2581 /* we are on the same machine */
2592 sctp_add_remote_addr(struct sctp_tcb
*stcb
, struct sockaddr
*newaddr
,
2593 int set_scope
, int from
)
2596 * The following is redundant to the same lines in the
2597 * sctp_aloc_assoc() but is needed since other's call the add
2600 struct sctp_nets
*net
, *netfirst
;
2604 if (sctp_debug_on
& SCTP_DEBUG_PCB1
) {
2605 kprintf("Adding an address (from:%d) to the peer: ", from
);
2606 sctp_print_address(newaddr
);
2609 netfirst
= sctp_findnet(stcb
, newaddr
);
2612 * Lie and return ok, we don't want to make the association
2613 * go away for this behavior. It will happen in the TCP model
2614 * in a connected socket. It does not reach the hash table
2615 * until after the association is built so it can't be found.
2616 * Mark as reachable, since the initial creation will have
2617 * been cleared and the NOT_IN_ASSOC flag will have been
2618 * added... and we don't want to end up removing it back out.
2620 if (netfirst
->dest_state
& SCTP_ADDR_UNCONFIRMED
) {
2621 netfirst
->dest_state
= (SCTP_ADDR_REACHABLE
|
2622 SCTP_ADDR_UNCONFIRMED
);
2624 netfirst
->dest_state
= SCTP_ADDR_REACHABLE
;
2630 if (newaddr
->sa_family
== AF_INET
) {
2631 struct sockaddr_in
*sin
;
2632 sin
= (struct sockaddr_in
*)newaddr
;
2633 if (sin
->sin_addr
.s_addr
== 0) {
2634 /* Invalid address */
2637 /* zero out the bzero area */
2638 memset(&sin
->sin_zero
, 0, sizeof(sin
->sin_zero
));
2640 /* assure len is set */
2641 sin
->sin_len
= sizeof(struct sockaddr_in
);
2643 #ifdef SCTP_DONT_DO_PRIVADDR_SCOPE
2644 stcb
->ipv4_local_scope
= 1;
2646 if (IN4_ISPRIVATE_ADDRESS(&sin
->sin_addr
)) {
2647 stcb
->asoc
.ipv4_local_scope
= 1;
2649 #endif /* SCTP_DONT_DO_PRIVADDR_SCOPE */
2651 if (sctp_is_address_on_local_host(newaddr
)) {
2652 stcb
->asoc
.loopback_scope
= 1;
2653 stcb
->asoc
.ipv4_local_scope
= 1;
2654 stcb
->asoc
.local_scope
= 1;
2655 stcb
->asoc
.site_scope
= 1;
2660 if (sctp_is_address_on_local_host(newaddr
)) {
2661 stcb
->asoc
.loopback_scope
= 1;
2662 stcb
->asoc
.ipv4_local_scope
= 1;
2663 stcb
->asoc
.local_scope
= 1;
2664 stcb
->asoc
.site_scope
= 1;
2667 /* Validate the address is in scope */
2668 if ((IN4_ISPRIVATE_ADDRESS(&sin
->sin_addr
)) &&
2669 (stcb
->asoc
.ipv4_local_scope
== 0)) {
2673 } else if (newaddr
->sa_family
== AF_INET6
) {
2674 struct sockaddr_in6
*sin6
;
2675 sin6
= (struct sockaddr_in6
*)newaddr
;
2676 if (IN6_IS_ADDR_UNSPECIFIED(&sin6
->sin6_addr
)) {
2677 /* Invalid address */
2680 /* assure len is set */
2681 sin6
->sin6_len
= sizeof(struct sockaddr_in6
);
2683 if (sctp_is_address_on_local_host(newaddr
)) {
2684 stcb
->asoc
.loopback_scope
= 1;
2685 stcb
->asoc
.local_scope
= 1;
2686 stcb
->asoc
.ipv4_local_scope
= 1;
2687 stcb
->asoc
.site_scope
= 1;
2688 } else if (IN6_IS_ADDR_LINKLOCAL(&sin6
->sin6_addr
)) {
2690 * If the new destination is a LINK_LOCAL
2691 * we must have common site scope. Don't set
2692 * the local scope since we may not share all
2693 * links, only loopback can do this.
2694 * Links on the local network would also
2695 * be on our private network for v4 too.
2697 stcb
->asoc
.ipv4_local_scope
= 1;
2698 stcb
->asoc
.site_scope
= 1;
2699 } else if (IN6_IS_ADDR_SITELOCAL(&sin6
->sin6_addr
)) {
2701 * If the new destination is SITE_LOCAL
2702 * then we must have site scope in common.
2704 stcb
->asoc
.site_scope
= 1;
2709 if (sctp_is_address_on_local_host(newaddr
)) {
2710 stcb
->asoc
.loopback_scope
= 1;
2711 stcb
->asoc
.ipv4_local_scope
= 1;
2712 stcb
->asoc
.local_scope
= 1;
2713 stcb
->asoc
.site_scope
= 1;
2716 /* Validate the address is in scope */
2717 if (IN6_IS_ADDR_LOOPBACK(&sin6
->sin6_addr
) &&
2718 (stcb
->asoc
.loopback_scope
== 0)) {
2720 } else if (IN6_IS_ADDR_LINKLOCAL(&sin6
->sin6_addr
) &&
2721 (stcb
->asoc
.local_scope
== 0)) {
2723 } else if (IN6_IS_ADDR_SITELOCAL(&sin6
->sin6_addr
) &&
2724 (stcb
->asoc
.site_scope
== 0)) {
2729 /* not supported family type */
2732 net
= (struct sctp_nets
*)SCTP_ZONE_GET(sctppcbinfo
.ipi_zone_net
);
2736 sctppcbinfo
.ipi_count_raddr
++;
2737 sctppcbinfo
.ipi_gencnt_raddr
++;
2738 bzero(net
, sizeof(*net
));
2739 memcpy(&net
->ro
._l_addr
, newaddr
, newaddr
->sa_len
);
2740 if (newaddr
->sa_family
== AF_INET
) {
2741 ((struct sockaddr_in
*)&net
->ro
._l_addr
)->sin_port
= stcb
->rport
;
2742 } else if (newaddr
->sa_family
== AF_INET6
) {
2743 ((struct sockaddr_in6
*)&net
->ro
._l_addr
)->sin6_port
= stcb
->rport
;
2745 net
->addr_is_local
= sctp_is_address_on_local_host(newaddr
);
2746 net
->failure_threshold
= stcb
->asoc
.def_net_failure
;
2747 if (addr_inscope
== 0) {
2749 if (sctp_debug_on
& SCTP_DEBUG_PCB1
) {
2750 kprintf("Adding an address which is OUT OF SCOPE\n");
2752 #endif /* SCTP_DEBUG */
2753 net
->dest_state
= (SCTP_ADDR_REACHABLE
|
2754 SCTP_ADDR_OUT_OF_SCOPE
);
2757 /* 8 is passed by connect_x */
2758 net
->dest_state
= SCTP_ADDR_REACHABLE
;
2760 net
->dest_state
= SCTP_ADDR_REACHABLE
|
2761 SCTP_ADDR_UNCONFIRMED
;
2763 net
->RTO
= stcb
->asoc
.initial_rto
;
2764 stcb
->asoc
.numnets
++;
2767 /* Init the timer structure */
2768 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000
2769 callout_init(&net
->rxt_timer
.timer
, 0);
2770 callout_init(&net
->pmtu_timer
.timer
, 0);
2772 callout_init(&net
->rxt_timer
.timer
);
2773 callout_init(&net
->pmtu_timer
.timer
);
2776 /* Now generate a route for this guy */
2777 /* KAME hack: embed scopeid */
2778 if (newaddr
->sa_family
== AF_INET6
) {
2779 struct sockaddr_in6
*sin6
;
2780 sin6
= (struct sockaddr_in6
*)&net
->ro
._l_addr
;
2781 #if defined(SCTP_BASE_FREEBSD) || defined(__APPLE__) || defined(__DragonFly__)
2782 in6_embedscope(&sin6
->sin6_addr
, sin6
,
2783 &stcb
->sctp_ep
->ip_inp
.inp
, NULL
);
2785 in6_embedscope(&sin6
->sin6_addr
, sin6
);
2787 #ifndef SCOPEDROUTING
2788 sin6
->sin6_scope_id
= 0;
2791 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
2792 rtalloc_ign((struct route
*)&net
->ro
, 0UL);
2794 rtalloc((struct route
*)&net
->ro
);
2796 if (newaddr
->sa_family
== AF_INET6
) {
2797 struct sockaddr_in6
*sin6
;
2798 sin6
= (struct sockaddr_in6
*)&net
->ro
._l_addr
;
2799 in6_recoverscope(sin6
, &sin6
->sin6_addr
, NULL
);
2801 if ((net
->ro
.ro_rt
) &&
2802 (net
->ro
.ro_rt
->rt_ifp
)) {
2803 net
->mtu
= net
->ro
.ro_rt
->rt_ifp
->if_mtu
;
2805 stcb
->asoc
.smallest_mtu
= net
->mtu
;
2807 /* start things off to match mtu of interface please. */
2808 net
->ro
.ro_rt
->rt_rmx
.rmx_mtu
= net
->ro
.ro_rt
->rt_ifp
->if_mtu
;
2810 net
->mtu
= stcb
->asoc
.smallest_mtu
;
2812 if (stcb
->asoc
.smallest_mtu
> net
->mtu
) {
2813 stcb
->asoc
.smallest_mtu
= net
->mtu
;
2815 /* We take the max of the burst limit times a MTU or the INITIAL_CWND.
2816 * We then limit this to 4 MTU's of sending.
2818 net
->cwnd
= min((net
->mtu
* 4), max((stcb
->asoc
.max_burst
* net
->mtu
), SCTP_INITIAL_CWND
));
2820 /* we always get at LEAST 2 MTU's */
2821 if (net
->cwnd
< (2 * net
->mtu
)) {
2822 net
->cwnd
= 2 * net
->mtu
;
2825 net
->ssthresh
= stcb
->asoc
.peers_rwnd
;
2827 net
->src_addr_selected
= 0;
2828 netfirst
= TAILQ_FIRST(&stcb
->asoc
.nets
);
2829 if (net
->ro
.ro_rt
== NULL
) {
2830 /* Since we have no route put it at the back */
2831 TAILQ_INSERT_TAIL(&stcb
->asoc
.nets
, net
, sctp_next
);
2832 } else if (netfirst
== NULL
) {
2833 /* We are the first one in the pool. */
2834 TAILQ_INSERT_HEAD(&stcb
->asoc
.nets
, net
, sctp_next
);
2835 } else if (netfirst
->ro
.ro_rt
== NULL
) {
2837 * First one has NO route. Place this one ahead of the
2840 TAILQ_INSERT_HEAD(&stcb
->asoc
.nets
, net
, sctp_next
);
2841 } else if (net
->ro
.ro_rt
->rt_ifp
!= netfirst
->ro
.ro_rt
->rt_ifp
) {
2843 * This one has a different interface than the one at the
2844 * top of the list. Place it ahead.
2846 TAILQ_INSERT_HEAD(&stcb
->asoc
.nets
, net
, sctp_next
);
2849 * Ok we have the same interface as the first one. Move
2850 * forward until we find either
2851 * a) one with a NULL route... insert ahead of that
2852 * b) one with a different ifp.. insert after that.
2853 * c) end of the list.. insert at the tail.
2855 struct sctp_nets
*netlook
;
2857 netlook
= TAILQ_NEXT(netfirst
, sctp_next
);
2858 if (netlook
== NULL
) {
2859 /* End of the list */
2860 TAILQ_INSERT_TAIL(&stcb
->asoc
.nets
, net
,
2863 } else if (netlook
->ro
.ro_rt
== NULL
) {
2864 /* next one has NO route */
2865 TAILQ_INSERT_BEFORE(netfirst
, net
, sctp_next
);
2867 } else if (netlook
->ro
.ro_rt
->rt_ifp
!=
2868 net
->ro
.ro_rt
->rt_ifp
) {
2869 TAILQ_INSERT_AFTER(&stcb
->asoc
.nets
, netlook
,
2875 } while (netlook
!= NULL
);
2877 /* got to have a primary set */
2878 if (stcb
->asoc
.primary_destination
== 0) {
2879 stcb
->asoc
.primary_destination
= net
;
2880 } else if ((stcb
->asoc
.primary_destination
->ro
.ro_rt
== NULL
) &&
2882 /* No route to current primary adopt new primary */
2883 stcb
->asoc
.primary_destination
= net
;
2885 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE
, stcb
->sctp_ep
, stcb
,
2893 * allocate an association and add it to the endpoint. The caller must
2894 * be careful to add all additional addresses once they are know right
2895 * away or else the assoc will be may experience a blackout scenario.
2898 sctp_aloc_assoc(struct sctp_inpcb
*inp
, struct sockaddr
*firstaddr
,
2899 int for_a_init
, int *error
, uint32_t override_tag
)
2901 struct sctp_tcb
*stcb
;
2902 struct sctp_association
*asoc
;
2903 struct sctpasochead
*head
;
2908 * Assumption made here:
2909 * Caller has done a sctp_findassociation_ep_addr(ep, addr's);
2910 * to make sure the address does not exist already.
2912 if (sctppcbinfo
.ipi_count_asoc
>= SCTP_MAX_NUM_OF_ASOC
) {
2913 /* Hit max assoc, sorry no more */
2917 SCTP_INP_RLOCK(inp
);
2918 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_IN_TCPPOOL
) {
2920 * If its in the TCP pool, its NOT allowed to create an
2921 * association. The parent listener needs to call
2922 * sctp_aloc_assoc.. or the one-2-many socket. If a
2923 * peeled off, or connected one does this.. its an error.
2925 SCTP_INP_RUNLOCK(inp
);
2931 if (sctp_debug_on
& SCTP_DEBUG_PCB3
) {
2932 kprintf("Allocate an association for peer:");
2934 sctp_print_address(firstaddr
);
2937 kprintf("Port:%d\n",
2938 ntohs(((struct sockaddr_in
*)firstaddr
)->sin_port
));
2940 #endif /* SCTP_DEBUG */
2941 if (firstaddr
->sa_family
== AF_INET
) {
2942 struct sockaddr_in
*sin
;
2943 sin
= (struct sockaddr_in
*)firstaddr
;
2944 if ((sin
->sin_port
== 0) || (sin
->sin_addr
.s_addr
== 0)) {
2945 /* Invalid address */
2947 if (sctp_debug_on
& SCTP_DEBUG_PCB3
) {
2948 kprintf("peer address invalid\n");
2951 SCTP_INP_RUNLOCK(inp
);
2955 rport
= sin
->sin_port
;
2956 } else if (firstaddr
->sa_family
== AF_INET6
) {
2957 struct sockaddr_in6
*sin6
;
2958 sin6
= (struct sockaddr_in6
*)firstaddr
;
2959 if ((sin6
->sin6_port
== 0) ||
2960 (IN6_IS_ADDR_UNSPECIFIED(&sin6
->sin6_addr
))) {
2961 /* Invalid address */
2963 if (sctp_debug_on
& SCTP_DEBUG_PCB3
) {
2964 kprintf("peer address invalid\n");
2967 SCTP_INP_RUNLOCK(inp
);
2971 rport
= sin6
->sin6_port
;
2973 /* not supported family type */
2975 if (sctp_debug_on
& SCTP_DEBUG_PCB3
) {
2976 kprintf("BAD family %d\n", firstaddr
->sa_family
);
2979 SCTP_INP_RUNLOCK(inp
);
2983 SCTP_INP_RUNLOCK(inp
);
2984 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_UNBOUND
) {
2986 * If you have not performed a bind, then we need to do
2987 * the ephemerial bind for you.
2990 if (sctp_debug_on
& SCTP_DEBUG_PCB3
) {
2991 kprintf("Doing implicit BIND\n");
2995 if ((err
= sctp_inpcb_bind(inp
->sctp_socket
,
2996 (struct sockaddr
*)NULL
,
2997 #if (defined(__FreeBSD__) && __FreeBSD_version >= 500000) || defined(__DragonFly__)
2998 (struct thread
*)NULL
3003 /* bind error, probably perm */
3005 if (sctp_debug_on
& SCTP_DEBUG_PCB3
) {
3006 kprintf("BIND FAILS ret:%d\n", err
);
3014 stcb
= (struct sctp_tcb
*)SCTP_ZONE_GET(sctppcbinfo
.ipi_zone_asoc
);
3016 /* out of memory? */
3018 if (sctp_debug_on
& SCTP_DEBUG_PCB3
) {
3019 kprintf("aloc_assoc: no assoc mem left, stcb=NULL\n");
3025 sctppcbinfo
.ipi_count_asoc
++;
3026 sctppcbinfo
.ipi_gencnt_asoc
++;
3028 bzero(stcb
, sizeof(*stcb
));
3030 SCTP_TCB_LOCK_INIT(stcb
);
3031 /* setup back pointer's */
3032 stcb
->sctp_ep
= inp
;
3033 stcb
->sctp_socket
= inp
->sctp_socket
;
3034 if ((err
= sctp_init_asoc(inp
, asoc
, for_a_init
, override_tag
))) {
3036 SCTP_TCB_LOCK_DESTROY (stcb
);
3037 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_asoc
, stcb
);
3038 sctppcbinfo
.ipi_count_asoc
--;
3040 if (sctp_debug_on
& SCTP_DEBUG_PCB3
) {
3041 kprintf("aloc_assoc: couldn't init asoc, out of mem?!\n");
3048 stcb
->rport
= rport
;
3049 SCTP_INP_INFO_WLOCK();
3050 SCTP_INP_WLOCK(inp
);
3051 if (inp
->sctp_flags
& (SCTP_PCB_FLAGS_SOCKET_GONE
|SCTP_PCB_FLAGS_SOCKET_ALLGONE
)) {
3052 /* inpcb freed while alloc going on */
3053 SCTP_TCB_LOCK_DESTROY (stcb
);
3054 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_asoc
, stcb
);
3055 SCTP_INP_WUNLOCK(inp
);
3056 SCTP_INP_INFO_WUNLOCK();
3057 sctppcbinfo
.ipi_count_asoc
--;
3059 if (sctp_debug_on
& SCTP_DEBUG_PCB3
) {
3060 kprintf("aloc_assoc: couldn't init asoc, out of mem?!\n");
3066 SCTP_TCB_LOCK(stcb
);
3068 /* now that my_vtag is set, add it to the hash */
3069 head
= &sctppcbinfo
.sctp_asochash
[SCTP_PCBHASH_ASOC(stcb
->asoc
.my_vtag
,
3070 sctppcbinfo
.hashasocmark
)];
3071 /* put it in the bucket in the vtag hash of assoc's for the system */
3072 LIST_INSERT_HEAD(head
, stcb
, sctp_asocs
);
3073 SCTP_INP_INFO_WUNLOCK();
3076 if ((err
= sctp_add_remote_addr(stcb
, firstaddr
, 1, 1))) {
3077 /* failure.. memory error? */
3079 FREE(asoc
->strmout
, M_PCB
);
3080 if (asoc
->mapping_array
)
3081 FREE(asoc
->mapping_array
, M_PCB
);
3083 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_asoc
, stcb
);
3084 sctppcbinfo
.ipi_count_asoc
--;
3086 if (sctp_debug_on
& SCTP_DEBUG_PCB3
) {
3087 kprintf("aloc_assoc: couldn't add remote addr!\n");
3090 SCTP_TCB_LOCK_DESTROY (stcb
);
3094 /* Init all the timers */
3095 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000
3096 callout_init(&asoc
->hb_timer
.timer
, 0);
3097 callout_init(&asoc
->dack_timer
.timer
, 0);
3098 callout_init(&asoc
->asconf_timer
.timer
, 0);
3099 callout_init(&asoc
->shut_guard_timer
.timer
, 0);
3100 callout_init(&asoc
->autoclose_timer
.timer
, 0);
3101 callout_init(&asoc
->delayed_event_timer
.timer
, 0);
3103 callout_init(&asoc
->hb_timer
.timer
);
3104 callout_init(&asoc
->dack_timer
.timer
);
3105 callout_init(&asoc
->asconf_timer
.timer
);
3106 callout_init(&asoc
->shut_guard_timer
.timer
);
3107 callout_init(&asoc
->autoclose_timer
.timer
);
3108 callout_init(&asoc
->delayed_event_timer
.timer
);
3110 LIST_INSERT_HEAD(&inp
->sctp_asoc_list
, stcb
, sctp_tcblist
);
3111 /* now file the port under the hash as well */
3112 if (inp
->sctp_tcbhash
!= NULL
) {
3113 head
= &inp
->sctp_tcbhash
[SCTP_PCBHASH_ALLADDR(stcb
->rport
,
3114 inp
->sctp_hashmark
)];
3115 LIST_INSERT_HEAD(head
, stcb
, sctp_tcbhash
);
3117 SCTP_INP_WUNLOCK(inp
);
3119 if (sctp_debug_on
& SCTP_DEBUG_PCB1
) {
3120 kprintf("Association %p now allocated\n", stcb
);
3127 sctp_free_remote_addr(struct sctp_nets
*net
)
3132 if (net
->ref_count
<= 0) {
3133 /* stop timer if running */
3134 callout_stop(&net
->rxt_timer
.timer
);
3135 callout_stop(&net
->pmtu_timer
.timer
);
3136 net
->dest_state
= SCTP_ADDR_NOT_REACHABLE
;
3137 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_net
, net
);
3138 sctppcbinfo
.ipi_count_raddr
--;
3143 * remove a remote endpoint address from an association, it
3144 * will fail if the address does not exist.
3147 sctp_del_remote_addr(struct sctp_tcb
*stcb
, struct sockaddr
*remaddr
)
3150 * Here we need to remove a remote address. This is quite simple, we
3151 * first find it in the list of address for the association
3152 * (tasoc->asoc.nets) and then if it is there, we do a LIST_REMOVE on
3154 * Note we do not allow it to be removed if there are no other
3157 struct sctp_association
*asoc
;
3158 struct sctp_nets
*net
, *net_tmp
;
3160 if (asoc
->numnets
< 2) {
3161 /* Must have at LEAST two remote addresses */
3164 /* locate the address */
3165 for (net
= TAILQ_FIRST(&asoc
->nets
); net
!= NULL
; net
= net_tmp
) {
3166 net_tmp
= TAILQ_NEXT(net
, sctp_next
);
3167 if (net
->ro
._l_addr
.sa
.sa_family
!= remaddr
->sa_family
) {
3170 if (sctp_cmpaddr((struct sockaddr
*)&net
->ro
._l_addr
,
3172 /* we found the guy */
3174 TAILQ_REMOVE(&asoc
->nets
, net
, sctp_next
);
3175 sctp_free_remote_addr(net
);
3176 if (net
== asoc
->primary_destination
) {
3178 struct sctp_nets
*lnet
;
3179 lnet
= TAILQ_FIRST(&asoc
->nets
);
3180 /* Try to find a confirmed primary */
3181 asoc
->primary_destination
=
3182 sctp_find_alternate_net(stcb
, lnet
);
3184 if (net
== asoc
->last_data_chunk_from
) {
3186 asoc
->last_data_chunk_from
=
3187 TAILQ_FIRST(&asoc
->nets
);
3189 if (net
== asoc
->last_control_chunk_from
) {
3191 asoc
->last_control_chunk_from
=
3192 TAILQ_FIRST(&asoc
->nets
);
3194 if (net
== asoc
->asconf_last_sent_to
) {
3196 asoc
->asconf_last_sent_to
=
3197 TAILQ_FIRST(&asoc
->nets
);
3208 sctp_add_vtag_to_timewait(struct sctp_inpcb
*inp
, u_int32_t tag
)
3210 struct sctpvtaghead
*chain
;
3211 struct sctp_tagblock
*twait_block
;
3214 SCTP_GETTIME_TIMEVAL(&now
);
3215 chain
= &sctppcbinfo
.vtag_timewait
[(tag
% SCTP_STACK_VTAG_HASH_SIZE
)];
3217 if (!LIST_EMPTY(chain
)) {
3218 /* Block(s) present, lets find space, and expire on the fly */
3219 LIST_FOREACH(twait_block
, chain
, sctp_nxt_tagblock
) {
3220 for (i
= 0; i
< SCTP_NUMBER_IN_VTAG_BLOCK
; i
++) {
3221 if ((twait_block
->vtag_block
[i
].v_tag
== 0) &&
3223 twait_block
->vtag_block
[0].tv_sec_at_expire
=
3224 now
.tv_sec
+ SCTP_TIME_WAIT
;
3225 twait_block
->vtag_block
[0].v_tag
= tag
;
3227 } else if ((twait_block
->vtag_block
[i
].v_tag
) &&
3228 ((long)twait_block
->vtag_block
[i
].tv_sec_at_expire
>
3230 /* Audit expires this guy */
3231 twait_block
->vtag_block
[i
].tv_sec_at_expire
= 0;
3232 twait_block
->vtag_block
[i
].v_tag
= 0;
3234 /* Reuse it for my new tag */
3235 twait_block
->vtag_block
[0].tv_sec_at_expire
= now
.tv_sec
+ SCTP_TIME_WAIT
;
3236 twait_block
->vtag_block
[0].v_tag
= tag
;
3243 * We only do up to the block where we can
3244 * place our tag for audits
3250 /* Need to add a new block to chain */
3252 MALLOC(twait_block
, struct sctp_tagblock
*,
3253 sizeof(struct sctp_tagblock
), M_PCB
, M_NOWAIT
);
3254 if (twait_block
== NULL
) {
3257 memset(twait_block
, 0, sizeof(struct sctp_timewait
));
3258 LIST_INSERT_HEAD(chain
, twait_block
, sctp_nxt_tagblock
);
3259 twait_block
->vtag_block
[0].tv_sec_at_expire
= now
.tv_sec
+
3261 twait_block
->vtag_block
[0].v_tag
= tag
;
3267 sctp_iterator_asoc_being_freed(struct sctp_inpcb
*inp
, struct sctp_tcb
*stcb
)
3269 struct sctp_iterator
*it
;
3273 /* Unlock the tcb lock we do this so
3274 * we avoid a dead lock scenario where
3275 * the iterator is waiting on the TCB lock
3276 * and the TCB lock is waiting on the iterator
3279 SCTP_ITERATOR_LOCK();
3280 SCTP_INP_INFO_WLOCK();
3281 SCTP_INP_WLOCK(inp
);
3282 SCTP_TCB_LOCK(stcb
);
3284 it
= stcb
->asoc
.stcb_starting_point_for_iterator
;
3288 if (it
->inp
!= stcb
->sctp_ep
) {
3289 /* hm, focused on the wrong one? */
3292 if (it
->stcb
!= stcb
) {
3295 it
->stcb
= LIST_NEXT(stcb
, sctp_tcblist
);
3296 if (it
->stcb
== NULL
) {
3297 /* done with all asoc's in this assoc */
3298 if (it
->iterator_flags
& SCTP_ITERATOR_DO_SINGLE_INP
) {
3302 it
->inp
= LIST_NEXT(inp
, sctp_list
);
3308 * Free the association after un-hashing the remote port.
3311 sctp_free_assoc(struct sctp_inpcb
*inp
, struct sctp_tcb
*stcb
)
3313 struct sctp_association
*asoc
;
3314 struct sctp_nets
*net
, *prev
;
3315 struct sctp_laddr
*laddr
;
3316 struct sctp_tmit_chunk
*chk
;
3317 struct sctp_asconf_addr
*aparam
;
3318 struct sctp_socket_q_list
*sq
;
3320 /* first, lets purge the entry from the hash table. */
3322 if (stcb
->asoc
.state
== 0) {
3323 kprintf("Freeing already free association:%p - huh??\n",
3330 /* now clean up any other timers */
3331 callout_stop(&asoc
->hb_timer
.timer
);
3332 callout_stop(&asoc
->dack_timer
.timer
);
3333 callout_stop(&asoc
->asconf_timer
.timer
);
3334 callout_stop(&asoc
->shut_guard_timer
.timer
);
3335 callout_stop(&asoc
->autoclose_timer
.timer
);
3336 callout_stop(&asoc
->delayed_event_timer
.timer
);
3337 TAILQ_FOREACH(net
, &asoc
->nets
, sctp_next
) {
3338 callout_stop(&net
->rxt_timer
.timer
);
3339 callout_stop(&net
->pmtu_timer
.timer
);
3342 /* Iterator asoc being freed we send an
3343 * unlocked TCB. It returns with INP_INFO
3344 * and INP write locked and the TCB locked
3345 * too and of course the iterator lock
3346 * in place as well..
3348 SCTP_TCB_UNLOCK(stcb
);
3349 sctp_iterator_asoc_being_freed(inp
, stcb
);
3351 /* Null all of my entry's on the socket q */
3352 TAILQ_FOREACH(sq
, &inp
->sctp_queue_list
, next_sq
) {
3353 if (sq
->tcb
== stcb
) {
3358 if (inp
->sctp_tcb_at_block
== (void *)stcb
) {
3359 inp
->error_on_block
= ECONNRESET
;
3362 if (inp
->sctp_tcbhash
) {
3363 LIST_REMOVE(stcb
, sctp_tcbhash
);
3365 /* Now lets remove it from the list of ALL associations in the EP */
3366 LIST_REMOVE(stcb
, sctp_tcblist
);
3367 SCTP_INP_WUNLOCK(inp
);
3368 SCTP_ITERATOR_UNLOCK();
3371 /* pull from vtag hash */
3372 LIST_REMOVE(stcb
, sctp_asocs
);
3375 * Now before we can free the assoc, we must remove all of the
3376 * networks and any other allocated space.. i.e. add removes here
3377 * before the SCTP_ZONE_FREE() of the tasoc entry.
3380 sctp_add_vtag_to_timewait(inp
, asoc
->my_vtag
);
3381 SCTP_INP_INFO_WUNLOCK();
3383 while (!TAILQ_EMPTY(&asoc
->nets
)) {
3384 net
= TAILQ_FIRST(&asoc
->nets
);
3385 /* pull from list */
3386 if ((sctppcbinfo
.ipi_count_raddr
== 0) || (prev
== net
)) {
3390 TAILQ_REMOVE(&asoc
->nets
, net
, sctp_next
);
3393 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_net
, net
);
3394 sctppcbinfo
.ipi_count_raddr
--;
3397 * The chunk lists and such SHOULD be empty but we check them
3400 /* anything on the wheel needs to be removed */
3401 while (!TAILQ_EMPTY(&asoc
->out_wheel
)) {
3402 struct sctp_stream_out
*outs
;
3403 outs
= TAILQ_FIRST(&asoc
->out_wheel
);
3404 TAILQ_REMOVE(&asoc
->out_wheel
, outs
, next_spoke
);
3405 /* now clean up any chunks here */
3406 chk
= TAILQ_FIRST(&outs
->outqueue
);
3408 TAILQ_REMOVE(&outs
->outqueue
, chk
, sctp_next
);
3410 sctp_m_freem(chk
->data
);
3415 /* Free the chunk */
3416 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, chk
);
3417 sctppcbinfo
.ipi_count_chunk
--;
3418 sctppcbinfo
.ipi_gencnt_chunk
++;
3419 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
3420 panic("Chunk count is negative");
3422 chk
= TAILQ_FIRST(&outs
->outqueue
);
3424 outs
= TAILQ_FIRST(&asoc
->out_wheel
);
3427 if (asoc
->pending_reply
) {
3428 FREE(asoc
->pending_reply
, M_PCB
);
3429 asoc
->pending_reply
= NULL
;
3431 chk
= TAILQ_FIRST(&asoc
->pending_reply_queue
);
3433 TAILQ_REMOVE(&asoc
->pending_reply_queue
, chk
, sctp_next
);
3435 sctp_m_freem(chk
->data
);
3440 /* Free the chunk */
3441 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, chk
);
3442 sctppcbinfo
.ipi_count_chunk
--;
3443 sctppcbinfo
.ipi_gencnt_chunk
++;
3444 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
3445 panic("Chunk count is negative");
3447 chk
= TAILQ_FIRST(&asoc
->pending_reply_queue
);
3449 /* pending send queue SHOULD be empty */
3450 if (!TAILQ_EMPTY(&asoc
->send_queue
)) {
3451 chk
= TAILQ_FIRST(&asoc
->send_queue
);
3453 TAILQ_REMOVE(&asoc
->send_queue
, chk
, sctp_next
);
3455 sctp_m_freem(chk
->data
);
3458 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, chk
);
3459 sctppcbinfo
.ipi_count_chunk
--;
3460 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
3461 panic("Chunk count is negative");
3463 sctppcbinfo
.ipi_gencnt_chunk
++;
3464 chk
= TAILQ_FIRST(&asoc
->send_queue
);
3467 /* sent queue SHOULD be empty */
3468 if (!TAILQ_EMPTY(&asoc
->sent_queue
)) {
3469 chk
= TAILQ_FIRST(&asoc
->sent_queue
);
3471 TAILQ_REMOVE(&asoc
->sent_queue
, chk
, sctp_next
);
3473 sctp_m_freem(chk
->data
);
3476 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, chk
);
3477 sctppcbinfo
.ipi_count_chunk
--;
3478 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
3479 panic("Chunk count is negative");
3481 sctppcbinfo
.ipi_gencnt_chunk
++;
3482 chk
= TAILQ_FIRST(&asoc
->sent_queue
);
3485 /* control queue MAY not be empty */
3486 if (!TAILQ_EMPTY(&asoc
->control_send_queue
)) {
3487 chk
= TAILQ_FIRST(&asoc
->control_send_queue
);
3489 TAILQ_REMOVE(&asoc
->control_send_queue
, chk
, sctp_next
);
3491 sctp_m_freem(chk
->data
);
3494 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, chk
);
3495 sctppcbinfo
.ipi_count_chunk
--;
3496 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
3497 panic("Chunk count is negative");
3499 sctppcbinfo
.ipi_gencnt_chunk
++;
3500 chk
= TAILQ_FIRST(&asoc
->control_send_queue
);
3503 if (!TAILQ_EMPTY(&asoc
->reasmqueue
)) {
3504 chk
= TAILQ_FIRST(&asoc
->reasmqueue
);
3506 TAILQ_REMOVE(&asoc
->reasmqueue
, chk
, sctp_next
);
3508 sctp_m_freem(chk
->data
);
3511 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, chk
);
3512 sctppcbinfo
.ipi_count_chunk
--;
3513 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
3514 panic("Chunk count is negative");
3516 sctppcbinfo
.ipi_gencnt_chunk
++;
3517 chk
= TAILQ_FIRST(&asoc
->reasmqueue
);
3520 if (!TAILQ_EMPTY(&asoc
->delivery_queue
)) {
3521 chk
= TAILQ_FIRST(&asoc
->delivery_queue
);
3523 TAILQ_REMOVE(&asoc
->delivery_queue
, chk
, sctp_next
);
3525 sctp_m_freem(chk
->data
);
3528 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, chk
);
3529 sctppcbinfo
.ipi_count_chunk
--;
3530 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
3531 panic("Chunk count is negative");
3533 sctppcbinfo
.ipi_gencnt_chunk
++;
3534 chk
= TAILQ_FIRST(&asoc
->delivery_queue
);
3537 if (asoc
->mapping_array
) {
3538 FREE(asoc
->mapping_array
, M_PCB
);
3539 asoc
->mapping_array
= NULL
;
3542 /* the stream outs */
3543 if (asoc
->strmout
) {
3544 FREE(asoc
->strmout
, M_PCB
);
3545 asoc
->strmout
= NULL
;
3547 asoc
->streamoutcnt
= 0;
3550 for (i
= 0; i
< asoc
->streamincnt
; i
++) {
3551 if (!TAILQ_EMPTY(&asoc
->strmin
[i
].inqueue
)) {
3552 /* We have somethings on the streamin queue */
3553 chk
= TAILQ_FIRST(&asoc
->strmin
[i
].inqueue
);
3555 TAILQ_REMOVE(&asoc
->strmin
[i
].inqueue
,
3558 sctp_m_freem(chk
->data
);
3561 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
,
3563 sctppcbinfo
.ipi_count_chunk
--;
3564 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
3565 panic("Chunk count is negative");
3567 sctppcbinfo
.ipi_gencnt_chunk
++;
3568 chk
= TAILQ_FIRST(&asoc
->strmin
[i
].inqueue
);
3572 FREE(asoc
->strmin
, M_PCB
);
3573 asoc
->strmin
= NULL
;
3575 asoc
->streamincnt
= 0;
3576 /* local addresses, if any */
3577 while (!LIST_EMPTY(&asoc
->sctp_local_addr_list
)) {
3578 laddr
= LIST_FIRST(&asoc
->sctp_local_addr_list
);
3579 LIST_REMOVE(laddr
, sctp_nxt_addr
);
3580 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_laddr
, laddr
);
3581 sctppcbinfo
.ipi_count_laddr
--;
3583 /* pending asconf (address) parameters */
3584 while (!TAILQ_EMPTY(&asoc
->asconf_queue
)) {
3585 aparam
= TAILQ_FIRST(&asoc
->asconf_queue
);
3586 TAILQ_REMOVE(&asoc
->asconf_queue
, aparam
, next
);
3587 FREE(aparam
, M_PCB
);
3589 if (asoc
->last_asconf_ack_sent
!= NULL
) {
3590 sctp_m_freem(asoc
->last_asconf_ack_sent
);
3591 asoc
->last_asconf_ack_sent
= NULL
;
3593 /* Insert new items here :> */
3595 /* Get rid of LOCK */
3596 SCTP_TCB_LOCK_DESTROY(stcb
);
3598 /* now clean up the tasoc itself */
3599 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_asoc
, stcb
);
3600 sctppcbinfo
.ipi_count_asoc
--;
3601 if ((inp
->sctp_socket
->so_snd
.ssb_cc
) ||
3602 (inp
->sctp_socket
->so_snd
.ssb_mbcnt
)) {
3603 /* This will happen when a abort is done */
3604 inp
->sctp_socket
->so_snd
.ssb_cc
= 0;
3605 inp
->sctp_socket
->so_snd
.ssb_mbcnt
= 0;
3607 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_TCPTYPE
) {
3608 if ((inp
->sctp_flags
& SCTP_PCB_FLAGS_IN_TCPPOOL
) == 0) {
3609 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_CONNECTED
) {
3611 * For the base fd, that is NOT in TCP pool we
3612 * turn off the connected flag. This allows
3613 * non-listening endpoints to connect/shutdown/
3616 inp
->sctp_flags
&= ~SCTP_PCB_FLAGS_CONNECTED
;
3617 soisdisconnected(inp
->sctp_socket
);
3620 * For those that are in the TCP pool we just leave
3621 * so it cannot be used. When they close the fd we
3626 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_SOCKET_GONE
) {
3627 sctp_inpcb_free(inp
, 0);
3634 * determine if a destination is "reachable" based upon the addresses
3635 * bound to the current endpoint (e.g. only v4 or v6 currently bound)
3638 * FIX: if we allow assoc-level bindx(), then this needs to be fixed
3639 * to use assoc level v4/v6 flags, as the assoc *may* not have the
3640 * same address types bound as its endpoint
3643 sctp_destination_is_reachable(struct sctp_tcb
*stcb
, struct sockaddr
*destaddr
)
3645 struct sctp_inpcb
*inp
;
3648 /* No locks here, the TCB, in all cases is already
3649 * locked and an assoc is up. There is either a
3650 * INP lock by the caller applied (in asconf case when
3651 * deleting an address) or NOT in the HB case, however
3652 * if HB then the INP increment is up and the INP
3653 * will not be removed (on top of the fact that
3654 * we have a TCB lock). So we only want to
3655 * read the sctp_flags, which is either bound-all
3656 * or not.. no protection needed since once an
3657 * assoc is up you can't be changing your binding.
3659 inp
= stcb
->sctp_ep
;
3660 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_BOUNDALL
) {
3661 /* if bound all, destination is not restricted */
3662 /* RRS: Question during lock work: Is this
3663 * correct? If you are bound-all you still
3664 * might need to obey the V4--V6 flags???
3665 * IMO this bound-all stuff needs to be removed!
3669 /* NOTE: all "scope" checks are done when local addresses are added */
3670 if (destaddr
->sa_family
== AF_INET6
) {
3671 #if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__))
3672 answer
= inp
->inp_vflag
& INP_IPV6
;
3674 answer
= inp
->ip_inp
.inp
.inp_vflag
& INP_IPV6
;
3676 } else if (destaddr
->sa_family
== AF_INET
) {
3677 #if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__))
3678 answer
= inp
->inp_vflag
& INP_IPV4
;
3680 answer
= inp
->ip_inp
.inp
.inp_vflag
& INP_IPV4
;
3683 /* invalid family, so it's unreachable */
3690 * update the inp_vflags on an endpoint
3693 sctp_update_ep_vflag(struct sctp_inpcb
*inp
) {
3694 struct sctp_laddr
*laddr
;
3696 /* first clear the flag */
3697 #if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__))
3700 inp
->ip_inp
.inp
.inp_vflag
= 0;
3702 /* set the flag based on addresses on the ep list */
3703 LIST_FOREACH(laddr
, &inp
->sctp_addr_list
, sctp_nxt_addr
) {
3704 if (laddr
->ifa
== NULL
) {
3706 if (sctp_debug_on
& SCTP_DEBUG_PCB1
) {
3707 kprintf("An ounce of prevention is worth a pound of cure\n");
3709 #endif /* SCTP_DEBUG */
3712 if (laddr
->ifa
->ifa_addr
) {
3715 if (laddr
->ifa
->ifa_addr
->sa_family
== AF_INET6
) {
3716 #if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__))
3717 inp
->inp_vflag
|= INP_IPV6
;
3719 inp
->ip_inp
.inp
.inp_vflag
|= INP_IPV6
;
3721 } else if (laddr
->ifa
->ifa_addr
->sa_family
== AF_INET
) {
3722 #if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__))
3723 inp
->inp_vflag
|= INP_IPV4
;
3725 inp
->ip_inp
.inp
.inp_vflag
|= INP_IPV4
;
3732 * Add the address to the endpoint local address list
3733 * There is nothing to be done if we are bound to all addresses
3736 sctp_add_local_addr_ep(struct sctp_inpcb
*inp
, struct ifaddr
*ifa
)
3738 struct sctp_laddr
*laddr
;
3742 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_BOUNDALL
) {
3743 /* You are already bound to all. You have it already */
3746 if (ifa
->ifa_addr
->sa_family
== AF_INET6
) {
3747 struct in6_ifaddr
*ifa6
;
3748 ifa6
= (struct in6_ifaddr
*)ifa
;
3749 if (ifa6
->ia6_flags
& (IN6_IFF_DETACHED
|
3750 IN6_IFF_DEPRECATED
| IN6_IFF_ANYCAST
| IN6_IFF_NOTREADY
))
3751 /* Can't bind a non-existent addr. */
3754 /* first, is it already present? */
3755 LIST_FOREACH(laddr
, &inp
->sctp_addr_list
, sctp_nxt_addr
) {
3756 if (laddr
->ifa
== ifa
) {
3762 if (((inp
->sctp_flags
& SCTP_PCB_FLAGS_BOUNDALL
) == 0) && (fnd
== 0)) {
3763 /* Not bound to all */
3764 error
= sctp_insert_laddr(&inp
->sctp_addr_list
, ifa
);
3768 /* update inp_vflag flags */
3769 if (ifa
->ifa_addr
->sa_family
== AF_INET6
) {
3770 #if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__))
3771 inp
->inp_vflag
|= INP_IPV6
;
3773 inp
->ip_inp
.inp
.inp_vflag
|= INP_IPV6
;
3775 } else if (ifa
->ifa_addr
->sa_family
== AF_INET
) {
3776 #if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__))
3777 inp
->inp_vflag
|= INP_IPV4
;
3779 inp
->ip_inp
.inp
.inp_vflag
|= INP_IPV4
;
3788 * select a new (hopefully reachable) destination net
3789 * (should only be used when we deleted an ep addr that is the
3790 * only usable source address to reach the destination net)
3793 sctp_select_primary_destination(struct sctp_tcb
*stcb
)
3795 struct sctp_nets
*net
;
3797 TAILQ_FOREACH(net
, &stcb
->asoc
.nets
, sctp_next
) {
3798 /* for now, we'll just pick the first reachable one we find */
3799 if (net
->dest_state
& SCTP_ADDR_UNCONFIRMED
)
3801 if (sctp_destination_is_reachable(stcb
,
3802 (struct sockaddr
*)&net
->ro
._l_addr
)) {
3803 /* found a reachable destination */
3804 stcb
->asoc
.primary_destination
= net
;
3807 /* I can't there from here! ...we're gonna die shortly... */
3812 * Delete the address from the endpoint local address list
3813 * There is nothing to be done if we are bound to all addresses
3816 sctp_del_local_addr_ep(struct sctp_inpcb
*inp
, struct ifaddr
*ifa
)
3818 struct sctp_laddr
*laddr
;
3821 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_BOUNDALL
) {
3822 /* You are already bound to all. You have it already */
3826 LIST_FOREACH(laddr
, &inp
->sctp_addr_list
, sctp_nxt_addr
) {
3827 if (laddr
->ifa
== ifa
) {
3832 if (fnd
&& (inp
->laddr_count
< 2)) {
3833 /* can't delete unless there are at LEAST 2 addresses */
3836 if (((inp
->sctp_flags
& SCTP_PCB_FLAGS_BOUNDALL
) == 0) && (fnd
)) {
3838 * clean up any use of this address
3839 * go through our associations and clear any
3840 * last_used_address that match this one
3841 * for each assoc, see if a new primary_destination is needed
3843 struct sctp_tcb
*stcb
;
3845 /* clean up "next_addr_touse" */
3846 if (inp
->next_addr_touse
== laddr
)
3847 /* delete this address */
3848 inp
->next_addr_touse
= NULL
;
3850 /* clean up "last_used_address" */
3851 LIST_FOREACH(stcb
, &inp
->sctp_asoc_list
, sctp_tcblist
) {
3852 if (stcb
->asoc
.last_used_address
== laddr
)
3853 /* delete this address */
3854 stcb
->asoc
.last_used_address
= NULL
;
3855 } /* for each tcb */
3857 /* remove it from the ep list */
3858 sctp_remove_laddr(laddr
);
3860 /* update inp_vflag flags */
3861 sctp_update_ep_vflag(inp
);
3862 /* select a new primary destination if needed */
3863 LIST_FOREACH(stcb
, &inp
->sctp_asoc_list
, sctp_tcblist
) {
3864 /* presume caller (sctp_asconf.c) already owns INP lock */
3865 SCTP_TCB_LOCK(stcb
);
3866 if (sctp_destination_is_reachable(stcb
,
3867 (struct sockaddr
*)&stcb
->asoc
.primary_destination
->ro
._l_addr
) == 0) {
3868 sctp_select_primary_destination(stcb
);
3870 SCTP_TCB_UNLOCK(stcb
);
3871 } /* for each tcb */
3877 * Add the addr to the TCB local address list
3878 * For the BOUNDALL or dynamic case, this is a "pending" address list
3879 * (eg. addresses waiting for an ASCONF-ACK response)
3880 * For the subset binding, static case, this is a "valid" address list
3883 sctp_add_local_addr_assoc(struct sctp_tcb
*stcb
, struct ifaddr
*ifa
)
3885 struct sctp_inpcb
*inp
;
3886 struct sctp_laddr
*laddr
;
3889 /* Assumes TCP is locked.. and possiblye
3890 * the INP. May need to confirm/fix that if
3891 * we need it and is not the case.
3893 inp
= stcb
->sctp_ep
;
3894 if (ifa
->ifa_addr
->sa_family
== AF_INET6
) {
3895 struct in6_ifaddr
*ifa6
;
3896 ifa6
= (struct in6_ifaddr
*)ifa
;
3897 if (ifa6
->ia6_flags
& (IN6_IFF_DETACHED
|
3898 /* IN6_IFF_DEPRECATED | */
3901 /* Can't bind a non-existent addr. */
3904 /* does the address already exist? */
3905 LIST_FOREACH(laddr
, &stcb
->asoc
.sctp_local_addr_list
, sctp_nxt_addr
) {
3906 if (laddr
->ifa
== ifa
) {
3911 /* add to the list */
3912 error
= sctp_insert_laddr(&stcb
->asoc
.sctp_local_addr_list
, ifa
);
3919 * insert an laddr entry with the given ifa for the desired list
3922 sctp_insert_laddr(struct sctpladdr
*list
, struct ifaddr
*ifa
) {
3923 struct sctp_laddr
*laddr
;
3926 laddr
= (struct sctp_laddr
*)SCTP_ZONE_GET(sctppcbinfo
.ipi_zone_laddr
);
3927 if (laddr
== NULL
) {
3928 /* out of memory? */
3932 sctppcbinfo
.ipi_count_laddr
++;
3933 sctppcbinfo
.ipi_gencnt_laddr
++;
3934 bzero(laddr
, sizeof(*laddr
));
3937 LIST_INSERT_HEAD(list
, laddr
, sctp_nxt_addr
);
3944 * Remove an laddr entry from the local address list (on an assoc)
3947 sctp_remove_laddr(struct sctp_laddr
*laddr
)
3950 /* remove from the list */
3951 LIST_REMOVE(laddr
, sctp_nxt_addr
);
3952 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_laddr
, laddr
);
3953 sctppcbinfo
.ipi_count_laddr
--;
3954 sctppcbinfo
.ipi_gencnt_laddr
++;
3959 * Remove an address from the TCB local address list
3962 sctp_del_local_addr_assoc(struct sctp_tcb
*stcb
, struct ifaddr
*ifa
)
3964 struct sctp_inpcb
*inp
;
3965 struct sctp_laddr
*laddr
;
3967 /* This is called by asconf work. It is assumed that
3968 * a) The TCB is locked
3970 * b) The INP is locked.
3971 * This is true in as much as I can trace through
3972 * the entry asconf code where I did these locks.
3973 * Again, the ASCONF code is a bit different in
3974 * that it does lock the INP during its work often
3975 * times. This must be since we don't want other
3976 * proc's looking up things while what they are
3977 * looking up is changing :-D
3980 inp
= stcb
->sctp_ep
;
3981 /* if subset bound and don't allow ASCONF's, can't delete last */
3982 if (((inp
->sctp_flags
& SCTP_PCB_FLAGS_BOUNDALL
) == 0) &&
3983 ((inp
->sctp_flags
& SCTP_PCB_FLAGS_DO_ASCONF
) == 0)) {
3984 if (stcb
->asoc
.numnets
< 2) {
3985 /* can't delete last address */
3990 LIST_FOREACH(laddr
, &stcb
->asoc
.sctp_local_addr_list
, sctp_nxt_addr
) {
3991 /* remove the address if it exists */
3992 if (laddr
->ifa
== NULL
)
3994 if (laddr
->ifa
== ifa
) {
3995 sctp_remove_laddr(laddr
);
4000 /* address not found! */
4005 * Remove an address from the TCB local address list
4006 * lookup using a sockaddr addr
4009 sctp_del_local_addr_assoc_sa(struct sctp_tcb
*stcb
, struct sockaddr
*sa
)
4011 struct sctp_inpcb
*inp
;
4012 struct sctp_laddr
*laddr
;
4013 struct sockaddr
*l_sa
;
4016 * This function I find does not seem to have a caller.
4017 * As such we NEED TO DELETE this code. If we do
4018 * find a caller, the caller MUST have locked the TCB
4019 * at the least and probably the INP as well.
4021 inp
= stcb
->sctp_ep
;
4022 /* if subset bound and don't allow ASCONF's, can't delete last */
4023 if (((inp
->sctp_flags
& SCTP_PCB_FLAGS_BOUNDALL
) == 0) &&
4024 ((inp
->sctp_flags
& SCTP_PCB_FLAGS_DO_ASCONF
) == 0)) {
4025 if (stcb
->asoc
.numnets
< 2) {
4026 /* can't delete last address */
4031 LIST_FOREACH(laddr
, &stcb
->asoc
.sctp_local_addr_list
, sctp_nxt_addr
) {
4032 /* make sure the address exists */
4033 if (laddr
->ifa
== NULL
)
4035 if (laddr
->ifa
->ifa_addr
== NULL
)
4038 l_sa
= laddr
->ifa
->ifa_addr
;
4039 if (l_sa
->sa_family
== AF_INET6
) {
4041 struct sockaddr_in6
*sin1
, *sin2
;
4042 sin1
= (struct sockaddr_in6
*)l_sa
;
4043 sin2
= (struct sockaddr_in6
*)sa
;
4044 if (memcmp(&sin1
->sin6_addr
, &sin2
->sin6_addr
,
4045 sizeof(struct in6_addr
)) == 0) {
4047 sctp_remove_laddr(laddr
);
4050 } else if (l_sa
->sa_family
== AF_INET
) {
4052 struct sockaddr_in
*sin1
, *sin2
;
4053 sin1
= (struct sockaddr_in
*)l_sa
;
4054 sin2
= (struct sockaddr_in
*)sa
;
4055 if (sin1
->sin_addr
.s_addr
== sin2
->sin_addr
.s_addr
) {
4057 sctp_remove_laddr(laddr
);
4061 /* invalid family */
4065 /* address not found! */
4069 static char sctp_pcb_initialized
= 0;
4071 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
4073 static int sctp_max_number_of_assoc
= SCTP_MAX_NUM_OF_ASOC
;
4074 static int sctp_scale_up_for_address
= SCTP_SCALE_FOR_ADDR
;
4076 #endif /* FreeBSD || APPLE || DragonFly */
4078 #ifndef SCTP_TCBHASHSIZE
4079 #define SCTP_TCBHASHSIZE 1024
4082 #ifndef SCTP_CHUNKQUEUE_SCALE
4083 #define SCTP_CHUNKQUEUE_SCALE 10
4090 * SCTP initialization for the PCB structures
4091 * should be called by the sctp_init() funciton.
4094 int hashtblsize
= SCTP_TCBHASHSIZE
;
4096 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
4097 int sctp_chunkscale
= SCTP_CHUNKQUEUE_SCALE
;
4100 if (sctp_pcb_initialized
!= 0) {
4101 /* error I was called twice */
4104 sctp_pcb_initialized
= 1;
4106 /* Init all peg counts */
4107 for (i
= 0; i
< SCTP_NUMBER_OF_PEGS
; i
++) {
4111 /* init the empty list of (All) Endpoints */
4112 LIST_INIT(&sctppcbinfo
.listhead
);
4114 /* init the iterator head */
4115 LIST_INIT(&sctppcbinfo
.iteratorhead
);
4117 /* init the hash table of endpoints */
4118 #if defined(__FreeBSD__)
4119 #if defined(__FreeBSD_cc_version) && __FreeBSD_cc_version >= 440000
4120 TUNABLE_INT_FETCH("net.inet.sctp.tcbhashsize", &hashtblsize
);
4121 TUNABLE_INT_FETCH("net.inet.sctp.pcbhashsize", &sctp_pcbtblsize
);
4122 TUNABLE_INT_FETCH("net.inet.sctp.chunkscale", &sctp_chunkscale
);
4124 TUNABLE_INT_FETCH("net.inet.sctp.tcbhashsize", SCTP_TCBHASHSIZE
,
4126 TUNABLE_INT_FETCH("net.inet.sctp.pcbhashsize", SCTP_PCBHASHSIZE
,
4128 TUNABLE_INT_FETCH("net.inet.sctp.chunkscale", SCTP_CHUNKQUEUE_SCALE
,
4133 sctppcbinfo
.sctp_asochash
= hashinit((hashtblsize
* 31),
4138 #if defined(__NetBSD__) || defined(__OpenBSD__)
4141 &sctppcbinfo
.hashasocmark
);
4143 sctppcbinfo
.sctp_ephash
= hashinit(hashtblsize
,
4148 #if defined(__NetBSD__) || defined(__OpenBSD__)
4151 &sctppcbinfo
.hashmark
);
4153 sctppcbinfo
.sctp_tcpephash
= hashinit(hashtblsize
,
4158 #if defined(__NetBSD__) || defined(__OpenBSD__)
4161 &sctppcbinfo
.hashtcpmark
);
4163 sctppcbinfo
.hashtblsize
= hashtblsize
;
4165 /* init the zones */
4167 * FIX ME: Should check for NULL returns, but if it does fail we
4168 * are doomed to panic anyways... add later maybe.
4170 SCTP_ZONE_INIT(sctppcbinfo
.ipi_zone_ep
, "sctp_ep",
4171 sizeof(struct sctp_inpcb
), maxsockets
);
4173 SCTP_ZONE_INIT(sctppcbinfo
.ipi_zone_asoc
, "sctp_asoc",
4174 sizeof(struct sctp_tcb
), sctp_max_number_of_assoc
);
4176 SCTP_ZONE_INIT(sctppcbinfo
.ipi_zone_laddr
, "sctp_laddr",
4177 sizeof(struct sctp_laddr
),
4178 (sctp_max_number_of_assoc
* sctp_scale_up_for_address
));
4180 SCTP_ZONE_INIT(sctppcbinfo
.ipi_zone_net
, "sctp_raddr",
4181 sizeof(struct sctp_nets
),
4182 (sctp_max_number_of_assoc
* sctp_scale_up_for_address
));
4184 SCTP_ZONE_INIT(sctppcbinfo
.ipi_zone_chunk
, "sctp_chunk",
4185 sizeof(struct sctp_tmit_chunk
),
4186 (sctp_max_number_of_assoc
* sctp_scale_up_for_address
*
4189 SCTP_ZONE_INIT(sctppcbinfo
.ipi_zone_sockq
, "sctp_sockq",
4190 sizeof(struct sctp_socket_q_list
),
4191 (sctp_max_number_of_assoc
* sctp_scale_up_for_address
*
4194 /* Master Lock INIT for info structure */
4195 SCTP_INP_INFO_LOCK_INIT();
4196 SCTP_ITERATOR_LOCK_INIT();
4197 /* not sure if we need all the counts */
4198 sctppcbinfo
.ipi_count_ep
= 0;
4199 sctppcbinfo
.ipi_gencnt_ep
= 0;
4200 /* assoc/tcb zone info */
4201 sctppcbinfo
.ipi_count_asoc
= 0;
4202 sctppcbinfo
.ipi_gencnt_asoc
= 0;
4203 /* local addrlist zone info */
4204 sctppcbinfo
.ipi_count_laddr
= 0;
4205 sctppcbinfo
.ipi_gencnt_laddr
= 0;
4206 /* remote addrlist zone info */
4207 sctppcbinfo
.ipi_count_raddr
= 0;
4208 sctppcbinfo
.ipi_gencnt_raddr
= 0;
4210 sctppcbinfo
.ipi_count_chunk
= 0;
4211 sctppcbinfo
.ipi_gencnt_chunk
= 0;
4213 /* socket queue zone info */
4214 sctppcbinfo
.ipi_count_sockq
= 0;
4215 sctppcbinfo
.ipi_gencnt_sockq
= 0;
4218 sctppcbinfo
.mbuf_track
= 0;
4220 #if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__APPLE__) \
4221 || defined(__DragonFly__)
4222 sctppcbinfo
.lastlow
= ipport_firstauto
;
4224 sctppcbinfo
.lastlow
= anonportmin
;
4226 /* Init the TIMEWAIT list */
4227 for (i
= 0; i
< SCTP_STACK_VTAG_HASH_SIZE
; i
++) {
4228 LIST_INIT(&sctppcbinfo
.vtag_timewait
[i
]);
4231 #if defined(_SCTP_NEEDS_CALLOUT_) && !defined(__APPLE__)
4232 TAILQ_INIT(&sctppcbinfo
.callqueue
);
4238 sctp_load_addresses_from_init(struct sctp_tcb
*stcb
, struct mbuf
*m
,
4239 int iphlen
, int offset
, int limit
, struct sctphdr
*sh
,
4240 struct sockaddr
*altsa
)
4243 * grub through the INIT pulling addresses and
4244 * loading them to the nets structure in the asoc.
4245 * The from address in the mbuf should also be loaded
4246 * (if it is not already). This routine can be called
4247 * with either INIT or INIT-ACK's as long as the
4248 * m points to the IP packet and the offset points
4249 * to the beginning of the parameters.
4251 struct sctp_inpcb
*inp
, *l_inp
;
4252 struct sctp_nets
*net
, *net_tmp
;
4254 struct sctp_paramhdr
*phdr
, parm_buf
;
4255 struct sctp_tcb
*stcb_tmp
;
4256 u_int16_t ptype
, plen
;
4257 struct sockaddr
*sa
;
4258 struct sockaddr_storage dest_store
;
4259 struct sockaddr
*local_sa
= (struct sockaddr
*)&dest_store
;
4260 struct sockaddr_in sin
;
4261 struct sockaddr_in6 sin6
;
4263 /* First get the destination address setup too. */
4264 memset(&sin
, 0, sizeof(sin
));
4265 memset(&sin6
, 0, sizeof(sin6
));
4267 sin
.sin_family
= AF_INET
;
4268 sin
.sin_len
= sizeof(sin
);
4269 sin
.sin_port
= stcb
->rport
;
4271 sin6
.sin6_family
= AF_INET6
;
4272 sin6
.sin6_len
= sizeof(struct sockaddr_in6
);
4273 sin6
.sin6_port
= stcb
->rport
;
4274 if (altsa
== NULL
) {
4275 iph
= mtod(m
, struct ip
*);
4276 if (iph
->ip_v
== IPVERSION
) {
4278 struct sockaddr_in
*sin_2
;
4279 sin_2
= (struct sockaddr_in
*)(local_sa
);
4280 memset(sin_2
, 0, sizeof(sin
));
4281 sin_2
->sin_family
= AF_INET
;
4282 sin_2
->sin_len
= sizeof(sin
);
4283 sin_2
->sin_port
= sh
->dest_port
;
4284 sin_2
->sin_addr
.s_addr
= iph
->ip_dst
.s_addr
;
4285 sin
.sin_addr
= iph
->ip_src
;
4286 sa
= (struct sockaddr
*)&sin
;
4287 } else if (iph
->ip_v
== (IPV6_VERSION
>> 4)) {
4289 struct ip6_hdr
*ip6
;
4290 struct sockaddr_in6
*sin6_2
;
4292 ip6
= mtod(m
, struct ip6_hdr
*);
4293 sin6_2
= (struct sockaddr_in6
*)(local_sa
);
4294 memset(sin6_2
, 0, sizeof(sin6
));
4295 sin6_2
->sin6_family
= AF_INET6
;
4296 sin6_2
->sin6_len
= sizeof(struct sockaddr_in6
);
4297 sin6_2
->sin6_port
= sh
->dest_port
;
4298 sin6
.sin6_addr
= ip6
->ip6_src
;
4299 sa
= (struct sockaddr
*)&sin6
;
4305 * For cookies we use the src address NOT from the packet
4306 * but from the original INIT
4310 /* Turn off ECN until we get through all params */
4311 stcb
->asoc
.ecn_allowed
= 0;
4313 TAILQ_FOREACH(net
, &stcb
->asoc
.nets
, sctp_next
) {
4314 /* mark all addresses that we have currently on the list */
4315 net
->dest_state
|= SCTP_ADDR_NOT_IN_ASSOC
;
4317 /* does the source address already exist? if so skip it */
4318 l_inp
= inp
= stcb
->sctp_ep
;
4319 stcb_tmp
= sctp_findassociation_ep_addr(&inp
, sa
, &net_tmp
, local_sa
, stcb
);
4320 if ((stcb_tmp
== NULL
&& inp
== stcb
->sctp_ep
) || inp
== NULL
) {
4321 /* we must add the source address */
4322 /* no scope set here since we have a tcb already. */
4323 if ((sa
->sa_family
== AF_INET
) &&
4324 (stcb
->asoc
.ipv4_addr_legal
)) {
4325 if (sctp_add_remote_addr(stcb
, sa
, 0, 2)) {
4328 } else if ((sa
->sa_family
== AF_INET6
) &&
4329 (stcb
->asoc
.ipv6_addr_legal
)) {
4330 if (sctp_add_remote_addr(stcb
, sa
, 0, 3)) {
4335 if (net_tmp
!= NULL
&& stcb_tmp
== stcb
) {
4336 net_tmp
->dest_state
&= ~SCTP_ADDR_NOT_IN_ASSOC
;
4337 } else if (stcb_tmp
!= stcb
) {
4338 /* It belongs to another association? */
4342 /* since a unlock occured we must check the
4343 * TCB's state and the pcb's gone flags.
4345 if (l_inp
->sctp_flags
& (SCTP_PCB_FLAGS_SOCKET_GONE
|SCTP_PCB_FLAGS_SOCKET_ALLGONE
)) {
4346 /* the user freed the ep */
4349 if (stcb
->asoc
.state
== 0) {
4350 /* the assoc was freed? */
4354 /* now we must go through each of the params. */
4355 phdr
= sctp_get_next_param(m
, offset
, &parm_buf
, sizeof(parm_buf
));
4357 ptype
= ntohs(phdr
->param_type
);
4358 plen
= ntohs(phdr
->param_length
);
4359 /*kprintf("ptype => %d, plen => %d\n", ptype, plen);*/
4360 if (offset
+ plen
> limit
) {
4366 if ((ptype
== SCTP_IPV4_ADDRESS
) &&
4367 (stcb
->asoc
.ipv4_addr_legal
)) {
4368 struct sctp_ipv4addr_param
*p4
, p4_buf
;
4369 /* ok get the v4 address and check/add */
4370 phdr
= sctp_get_next_param(m
, offset
,
4371 (struct sctp_paramhdr
*)&p4_buf
, sizeof(p4_buf
));
4372 if (plen
!= sizeof(struct sctp_ipv4addr_param
) ||
4376 p4
= (struct sctp_ipv4addr_param
*)phdr
;
4377 sin
.sin_addr
.s_addr
= p4
->addr
;
4378 sa
= (struct sockaddr
*)&sin
;
4379 inp
= stcb
->sctp_ep
;
4380 stcb_tmp
= sctp_findassociation_ep_addr(&inp
, sa
, &net
,
4383 if ((stcb_tmp
== NULL
&& inp
== stcb
->sctp_ep
) ||
4385 /* we must add the source address */
4386 /* no scope set since we have a tcb already */
4388 /* we must validate the state again here */
4389 if (l_inp
->sctp_flags
& (SCTP_PCB_FLAGS_SOCKET_GONE
|SCTP_PCB_FLAGS_SOCKET_ALLGONE
)) {
4390 /* the user freed the ep */
4393 if (stcb
->asoc
.state
== 0) {
4394 /* the assoc was freed? */
4397 if (sctp_add_remote_addr(stcb
, sa
, 0, 4)) {
4400 } else if (stcb_tmp
== stcb
) {
4401 if (l_inp
->sctp_flags
& (SCTP_PCB_FLAGS_SOCKET_GONE
|SCTP_PCB_FLAGS_SOCKET_ALLGONE
)) {
4402 /* the user freed the ep */
4405 if (stcb
->asoc
.state
== 0) {
4406 /* the assoc was freed? */
4412 ~SCTP_ADDR_NOT_IN_ASSOC
;
4415 /* strange, address is in another assoc?
4416 * straighten out locks.
4418 SCTP_TCB_UNLOCK(stcb_tmp
);
4419 SCTP_INP_RLOCK(inp
);
4420 if (l_inp
->sctp_flags
& (SCTP_PCB_FLAGS_SOCKET_GONE
|SCTP_PCB_FLAGS_SOCKET_ALLGONE
)) {
4421 /* the user freed the ep */
4422 SCTP_INP_RUNLOCK(l_inp
);
4425 if (stcb
->asoc
.state
== 0) {
4426 /* the assoc was freed? */
4427 SCTP_INP_RUNLOCK(l_inp
);
4430 SCTP_TCB_LOCK(stcb
);
4431 SCTP_INP_RUNLOCK(stcb
->sctp_ep
);
4434 } else if ((ptype
== SCTP_IPV6_ADDRESS
) &&
4435 (stcb
->asoc
.ipv6_addr_legal
)) {
4436 /* ok get the v6 address and check/add */
4437 struct sctp_ipv6addr_param
*p6
, p6_buf
;
4438 phdr
= sctp_get_next_param(m
, offset
,
4439 (struct sctp_paramhdr
*)&p6_buf
, sizeof(p6_buf
));
4440 if (plen
!= sizeof(struct sctp_ipv6addr_param
) ||
4444 p6
= (struct sctp_ipv6addr_param
*)phdr
;
4445 memcpy((caddr_t
)&sin6
.sin6_addr
, p6
->addr
,
4447 sa
= (struct sockaddr
*)&sin6
;
4448 inp
= stcb
->sctp_ep
;
4449 stcb_tmp
= sctp_findassociation_ep_addr(&inp
, sa
, &net
,
4451 if (stcb_tmp
== NULL
&& (inp
== stcb
->sctp_ep
||
4453 /* we must validate the state again here */
4454 if (l_inp
->sctp_flags
& (SCTP_PCB_FLAGS_SOCKET_GONE
|SCTP_PCB_FLAGS_SOCKET_ALLGONE
)) {
4455 /* the user freed the ep */
4458 if (stcb
->asoc
.state
== 0) {
4459 /* the assoc was freed? */
4462 /* we must add the address, no scope set */
4463 if (sctp_add_remote_addr(stcb
, sa
, 0, 5)) {
4466 } else if (stcb_tmp
== stcb
) {
4467 /* we must validate the state again here */
4468 if (l_inp
->sctp_flags
& (SCTP_PCB_FLAGS_SOCKET_GONE
|SCTP_PCB_FLAGS_SOCKET_ALLGONE
)) {
4469 /* the user freed the ep */
4472 if (stcb
->asoc
.state
== 0) {
4473 /* the assoc was freed? */
4479 ~SCTP_ADDR_NOT_IN_ASSOC
;
4482 /* strange, address is in another assoc?
4483 * straighten out locks.
4485 SCTP_TCB_UNLOCK(stcb_tmp
);
4486 SCTP_INP_RLOCK(l_inp
);
4487 /* we must validate the state again here */
4488 if (l_inp
->sctp_flags
& (SCTP_PCB_FLAGS_SOCKET_GONE
|SCTP_PCB_FLAGS_SOCKET_ALLGONE
)) {
4489 /* the user freed the ep */
4490 SCTP_INP_RUNLOCK(l_inp
);
4493 if (stcb
->asoc
.state
== 0) {
4494 /* the assoc was freed? */
4495 SCTP_INP_RUNLOCK(l_inp
);
4498 SCTP_TCB_LOCK(stcb
);
4499 SCTP_INP_RUNLOCK(l_inp
);
4502 } else if (ptype
== SCTP_ECN_CAPABLE
) {
4503 stcb
->asoc
.ecn_allowed
= 1;
4504 } else if (ptype
== SCTP_ULP_ADAPTION
) {
4505 if (stcb
->asoc
.state
!= SCTP_STATE_OPEN
) {
4506 struct sctp_adaption_layer_indication ai
, *aip
;
4508 phdr
= sctp_get_next_param(m
, offset
,
4509 (struct sctp_paramhdr
*)&ai
, sizeof(ai
));
4510 aip
= (struct sctp_adaption_layer_indication
*)phdr
;
4511 sctp_ulp_notify(SCTP_NOTIFY_ADAPTION_INDICATION
,
4512 stcb
, ntohl(aip
->indication
), NULL
);
4514 } else if (ptype
== SCTP_SET_PRIM_ADDR
) {
4515 struct sctp_asconf_addr_param lstore
, *fee
;
4516 struct sctp_asconf_addrv4_param
*fii
;
4518 struct sockaddr
*lsa
= NULL
;
4520 stcb
->asoc
.peer_supports_asconf
= 1;
4521 stcb
->asoc
.peer_supports_asconf_setprim
= 1;
4522 if (plen
> sizeof(lstore
)) {
4525 phdr
= sctp_get_next_param(m
, offset
,
4526 (struct sctp_paramhdr
*)&lstore
, plen
);
4531 fee
= (struct sctp_asconf_addr_param
*)phdr
;
4532 lptype
= ntohs(fee
->addrp
.ph
.param_type
);
4533 if (lptype
== SCTP_IPV4_ADDRESS
) {
4535 sizeof(struct sctp_asconf_addrv4_param
)) {
4536 kprintf("Sizeof setprim in init/init ack not %d but %d - ignored\n",
4537 (int)sizeof(struct sctp_asconf_addrv4_param
),
4540 fii
= (struct sctp_asconf_addrv4_param
*)fee
;
4541 sin
.sin_addr
.s_addr
= fii
->addrp
.addr
;
4542 lsa
= (struct sockaddr
*)&sin
;
4544 } else if (lptype
== SCTP_IPV6_ADDRESS
) {
4546 sizeof(struct sctp_asconf_addr_param
)) {
4547 kprintf("Sizeof setprim (v6) in init/init ack not %d but %d - ignored\n",
4548 (int)sizeof(struct sctp_asconf_addr_param
),
4551 memcpy(sin6
.sin6_addr
.s6_addr
,
4553 sizeof(fee
->addrp
.addr
));
4554 lsa
= (struct sockaddr
*)&sin6
;
4558 sctp_set_primary_addr(stcb
, sa
, NULL
);
4561 } else if (ptype
== SCTP_PRSCTP_SUPPORTED
) {
4562 /* Peer supports pr-sctp */
4563 stcb
->asoc
.peer_supports_prsctp
= 1;
4564 } else if (ptype
== SCTP_SUPPORTED_CHUNK_EXT
) {
4565 /* A supported extension chunk */
4566 struct sctp_supported_chunk_types_param
*pr_supported
;
4567 uint8_t local_store
[128];
4570 phdr
= sctp_get_next_param(m
, offset
,
4571 (struct sctp_paramhdr
*)&local_store
, plen
);
4575 stcb
->asoc
.peer_supports_asconf
= 0;
4576 stcb
->asoc
.peer_supports_asconf_setprim
= 0;
4577 stcb
->asoc
.peer_supports_prsctp
= 0;
4578 stcb
->asoc
.peer_supports_pktdrop
= 0;
4579 stcb
->asoc
.peer_supports_strreset
= 0;
4580 pr_supported
= (struct sctp_supported_chunk_types_param
*)phdr
;
4581 num_ent
= plen
- sizeof(struct sctp_paramhdr
);
4582 for (i
=0; i
<num_ent
; i
++) {
4583 switch (pr_supported
->chunk_types
[i
]) {
4585 stcb
->asoc
.peer_supports_asconf
= 1;
4586 stcb
->asoc
.peer_supports_asconf_setprim
= 1;
4588 case SCTP_ASCONF_ACK
:
4589 stcb
->asoc
.peer_supports_asconf
= 1;
4590 stcb
->asoc
.peer_supports_asconf_setprim
= 1;
4592 case SCTP_FORWARD_CUM_TSN
:
4593 stcb
->asoc
.peer_supports_prsctp
= 1;
4595 case SCTP_PACKET_DROPPED
:
4596 stcb
->asoc
.peer_supports_pktdrop
= 1;
4598 case SCTP_STREAM_RESET
:
4599 stcb
->asoc
.peer_supports_strreset
= 1;
4602 /* one I have not learned yet */
4607 } else if (ptype
== SCTP_ECN_NONCE_SUPPORTED
) {
4608 /* Peer supports ECN-nonce */
4609 stcb
->asoc
.peer_supports_ecn_nonce
= 1;
4610 stcb
->asoc
.ecn_nonce_allowed
= 1;
4611 } else if ((ptype
== SCTP_HEARTBEAT_INFO
) ||
4612 (ptype
== SCTP_STATE_COOKIE
) ||
4613 (ptype
== SCTP_UNRECOG_PARAM
) ||
4614 (ptype
== SCTP_COOKIE_PRESERVE
) ||
4615 (ptype
== SCTP_SUPPORTED_ADDRTYPE
) ||
4616 (ptype
== SCTP_ADD_IP_ADDRESS
) ||
4617 (ptype
== SCTP_DEL_IP_ADDRESS
) ||
4618 (ptype
== SCTP_ERROR_CAUSE_IND
) ||
4619 (ptype
== SCTP_SUCCESS_REPORT
)) {
4622 if ((ptype
& 0x8000) == 0x0000) {
4623 /* must stop processing the rest of
4624 * the param's. Any report bits were
4625 * handled with the call to sctp_arethere_unrecognized_parameters()
4626 * when the INIT or INIT-ACK was first seen.
4631 offset
+= SCTP_SIZE32(plen
);
4632 if (offset
>= limit
) {
4635 phdr
= sctp_get_next_param(m
, offset
, &parm_buf
,
4638 /* Now check to see if we need to purge any addresses */
4639 for (net
= TAILQ_FIRST(&stcb
->asoc
.nets
); net
!= NULL
; net
= net_tmp
) {
4640 net_tmp
= TAILQ_NEXT(net
, sctp_next
);
4641 if ((net
->dest_state
& SCTP_ADDR_NOT_IN_ASSOC
) ==
4642 SCTP_ADDR_NOT_IN_ASSOC
) {
4643 /* This address has been removed from the asoc */
4644 /* remove and free it */
4645 stcb
->asoc
.numnets
--;
4646 TAILQ_REMOVE(&stcb
->asoc
.nets
, net
, sctp_next
);
4647 sctp_free_remote_addr(net
);
4648 if (net
== stcb
->asoc
.primary_destination
) {
4649 stcb
->asoc
.primary_destination
= NULL
;
4650 sctp_select_primary_destination(stcb
);
4658 sctp_set_primary_addr(struct sctp_tcb
*stcb
, struct sockaddr
*sa
,
4659 struct sctp_nets
*net
)
4661 /* make sure the requested primary address exists in the assoc */
4662 if (net
== NULL
&& sa
)
4663 net
= sctp_findnet(stcb
, sa
);
4666 /* didn't find the requested primary address! */
4669 /* set the primary address */
4670 if (net
->dest_state
& SCTP_ADDR_UNCONFIRMED
) {
4671 /* Must be confirmed */
4674 stcb
->asoc
.primary_destination
= net
;
4675 net
->dest_state
&= ~SCTP_ADDR_WAS_PRIMARY
;
4682 sctp_is_vtag_good(struct sctp_inpcb
*inp
, u_int32_t tag
, struct timeval
*now
)
4685 * This function serves two purposes. It will see if a TAG can be
4686 * re-used and return 1 for yes it is ok and 0 for don't use that
4688 * A secondary function it will do is purge out old tags that can
4691 struct sctpasochead
*head
;
4692 struct sctpvtaghead
*chain
;
4693 struct sctp_tagblock
*twait_block
;
4694 struct sctp_tcb
*stcb
;
4697 SCTP_INP_INFO_WLOCK();
4698 chain
= &sctppcbinfo
.vtag_timewait
[(tag
% SCTP_STACK_VTAG_HASH_SIZE
)];
4699 /* First is the vtag in use ? */
4701 head
= &sctppcbinfo
.sctp_asochash
[SCTP_PCBHASH_ASOC(tag
,
4702 sctppcbinfo
.hashasocmark
)];
4704 SCTP_INP_INFO_WUNLOCK();
4707 LIST_FOREACH(stcb
, head
, sctp_asocs
) {
4708 if (stcb
->asoc
.my_vtag
== tag
) {
4709 /* We should remove this if and
4710 * return 0 always if we want vtags
4711 * unique across all endpoints. For
4712 * now within a endpoint is ok.
4714 if (inp
== stcb
->sctp_ep
) {
4715 /* bad tag, in use */
4716 SCTP_INP_INFO_WUNLOCK();
4721 if (!LIST_EMPTY(chain
)) {
4723 * Block(s) are present, lets see if we have this tag in
4726 LIST_FOREACH(twait_block
, chain
, sctp_nxt_tagblock
) {
4727 for (i
= 0; i
< SCTP_NUMBER_IN_VTAG_BLOCK
; i
++) {
4728 if (twait_block
->vtag_block
[i
].v_tag
== 0) {
4731 } else if ((long)twait_block
->vtag_block
[i
].tv_sec_at_expire
>
4733 /* Audit expires this guy */
4734 twait_block
->vtag_block
[i
].tv_sec_at_expire
= 0;
4735 twait_block
->vtag_block
[i
].v_tag
= 0;
4736 } else if (twait_block
->vtag_block
[i
].v_tag
==
4738 /* Bad tag, sorry :< */
4739 SCTP_INP_INFO_WUNLOCK();
4745 /* Not found, ok to use the tag */
4746 SCTP_INP_INFO_WUNLOCK();
4752 * Delete the address from the endpoint local address list
4753 * Lookup using a sockaddr address (ie. not an ifaddr)
4756 sctp_del_local_addr_ep_sa(struct sctp_inpcb
*inp
, struct sockaddr
*sa
)
4758 struct sctp_laddr
*laddr
;
4759 struct sockaddr
*l_sa
;
4761 /* Here is another function I cannot find a
4762 * caller for. As such we SHOULD delete it
4763 * if we have no users. If we find a user that
4764 * user MUST have the INP locked.
4768 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_BOUNDALL
) {
4769 /* You are already bound to all. You have it already */
4773 LIST_FOREACH(laddr
, &inp
->sctp_addr_list
, sctp_nxt_addr
) {
4774 /* make sure the address exists */
4775 if (laddr
->ifa
== NULL
)
4777 if (laddr
->ifa
->ifa_addr
== NULL
)
4780 l_sa
= laddr
->ifa
->ifa_addr
;
4781 if (l_sa
->sa_family
== AF_INET6
) {
4783 struct sockaddr_in6
*sin1
, *sin2
;
4784 sin1
= (struct sockaddr_in6
*)l_sa
;
4785 sin2
= (struct sockaddr_in6
*)sa
;
4786 if (memcmp(&sin1
->sin6_addr
, &sin2
->sin6_addr
,
4787 sizeof(struct in6_addr
)) == 0) {
4792 } else if (l_sa
->sa_family
== AF_INET
) {
4794 struct sockaddr_in
*sin1
, *sin2
;
4795 sin1
= (struct sockaddr_in
*)l_sa
;
4796 sin2
= (struct sockaddr_in
*)sa
;
4797 if (sin1
->sin_addr
.s_addr
== sin2
->sin_addr
.s_addr
) {
4803 /* invalid family */
4808 if (found
&& inp
->laddr_count
< 2) {
4809 /* can't delete unless there are at LEAST 2 addresses */
4813 if (found
&& (inp
->sctp_flags
& SCTP_PCB_FLAGS_BOUNDALL
) == 0) {
4815 * remove it from the ep list, this should NOT be
4816 * done until its really gone from the interface list and
4817 * we won't be receiving more of these. Probably right
4818 * away. If we do allow a removal of an address from
4819 * an association (sub-set bind) than this should NOT
4820 * be called until the all ASCONF come back from this
4823 sctp_remove_laddr(laddr
);
4831 sctp_drain_mbufs(struct sctp_inpcb
*inp
, struct sctp_tcb
*stcb
)
4834 * We must hunt this association for MBUF's past the cumack
4835 * (i.e. out of order data that we can renege on).
4837 struct sctp_association
*asoc
;
4838 struct sctp_tmit_chunk
*chk
, *nchk
;
4839 u_int32_t cumulative_tsn_p1
, tsn
;
4840 int cnt
, strmat
, gap
;
4841 /* We look for anything larger than the cum-ack + 1 */
4844 cumulative_tsn_p1
= asoc
->cumulative_tsn
+ 1;
4846 /* First look in the re-assembly queue */
4847 chk
= TAILQ_FIRST(&asoc
->reasmqueue
);
4849 /* Get the next one */
4850 nchk
= TAILQ_NEXT(chk
, sctp_next
);
4851 if (compare_with_wrap(chk
->rec
.data
.TSN_seq
,
4852 cumulative_tsn_p1
, MAX_TSN
)) {
4853 /* Yep it is above cum-ack */
4855 tsn
= chk
->rec
.data
.TSN_seq
;
4856 if (tsn
>= asoc
->mapping_array_base_tsn
) {
4857 gap
= tsn
- asoc
->mapping_array_base_tsn
;
4859 gap
= (MAX_TSN
- asoc
->mapping_array_base_tsn
) +
4862 asoc
->size_on_reasm_queue
-= chk
->send_size
;
4863 asoc
->cnt_on_reasm_queue
--;
4864 SCTP_UNSET_TSN_PRESENT(asoc
->mapping_array
, gap
);
4865 TAILQ_REMOVE(&asoc
->reasmqueue
, chk
, sctp_next
);
4867 sctp_m_freem(chk
->data
);
4870 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, chk
);
4871 sctppcbinfo
.ipi_count_chunk
--;
4872 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
4873 panic("Chunk count is negative");
4875 sctppcbinfo
.ipi_gencnt_chunk
++;
4879 /* Ok that was fun, now we will drain all the inbound streams? */
4880 for (strmat
= 0; strmat
< asoc
->streamincnt
; strmat
++) {
4881 chk
= TAILQ_FIRST(&asoc
->strmin
[strmat
].inqueue
);
4883 nchk
= TAILQ_NEXT(chk
, sctp_next
);
4884 if (compare_with_wrap(chk
->rec
.data
.TSN_seq
,
4885 cumulative_tsn_p1
, MAX_TSN
)) {
4886 /* Yep it is above cum-ack */
4888 tsn
= chk
->rec
.data
.TSN_seq
;
4889 if (tsn
>= asoc
->mapping_array_base_tsn
) {
4891 asoc
->mapping_array_base_tsn
;
4894 asoc
->mapping_array_base_tsn
) +
4897 asoc
->size_on_all_streams
-= chk
->send_size
;
4898 asoc
->cnt_on_all_streams
--;
4900 SCTP_UNSET_TSN_PRESENT(asoc
->mapping_array
,
4902 TAILQ_REMOVE(&asoc
->strmin
[strmat
].inqueue
,
4905 sctp_m_freem(chk
->data
);
4908 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, chk
);
4909 sctppcbinfo
.ipi_count_chunk
--;
4910 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
4911 panic("Chunk count is negative");
4913 sctppcbinfo
.ipi_gencnt_chunk
++;
4919 * Question, should we go through the delivery queue?
4920 * The only reason things are on here is the app not reading OR a
4921 * p-d-api up. An attacker COULD send enough in to initiate the
4922 * PD-API and then send a bunch of stuff to other streams... these
4923 * would wind up on the delivery queue.. and then we would not get
4924 * to them. But in order to do this I then have to back-track and
4925 * un-deliver sequence numbers in streams.. el-yucko. I think for
4926 * now we will NOT look at the delivery queue and leave it to be
4927 * something to consider later. An alternative would be to abort
4928 * the P-D-API with a notification and then deliver the data....
4929 * Or another method might be to keep track of how many times the
4930 * situation occurs and if we see a possible attack underway just
4931 * abort the association.
4934 if (sctp_debug_on
& SCTP_DEBUG_PCB1
) {
4936 kprintf("Freed %d chunks from reneg harvest\n", cnt
);
4939 #endif /* SCTP_DEBUG */
4942 * Another issue, in un-setting the TSN's in the mapping array we
4943 * DID NOT adjust the higest_tsn marker. This will cause one of
4944 * two things to occur. It may cause us to do extra work in checking
4945 * for our mapping array movement. More importantly it may cause us
4946 * to SACK every datagram. This may not be a bad thing though since
4947 * we will recover once we get our cum-ack above and all this stuff
4948 * we dumped recovered.
4956 * We must walk the PCB lists for ALL associations here. The system
4957 * is LOW on MBUF's and needs help. This is where reneging will
4958 * occur. We really hope this does NOT happen!
4960 struct sctp_inpcb
*inp
;
4961 struct sctp_tcb
*stcb
;
4963 SCTP_INP_INFO_RLOCK();
4964 LIST_FOREACH(inp
, &sctppcbinfo
.listhead
, sctp_list
) {
4965 /* For each endpoint */
4966 SCTP_INP_RLOCK(inp
);
4967 LIST_FOREACH(stcb
, &inp
->sctp_asoc_list
, sctp_tcblist
) {
4968 /* For each association */
4969 SCTP_TCB_LOCK(stcb
);
4970 sctp_drain_mbufs(inp
, stcb
);
4971 SCTP_TCB_UNLOCK(stcb
);
4973 SCTP_INP_RUNLOCK(inp
);
4975 SCTP_INP_INFO_RUNLOCK();
4979 sctp_add_to_socket_q(struct sctp_inpcb
*inp
, struct sctp_tcb
*stcb
)
4981 struct sctp_socket_q_list
*sq
;
4983 /* write lock on INP assumed */
4984 if ((inp
== NULL
) || (stcb
== NULL
)) {
4988 sq
= (struct sctp_socket_q_list
*)SCTP_ZONE_GET(
4989 sctppcbinfo
.ipi_zone_sockq
);
4991 /* out of sq structs */
4994 sctppcbinfo
.ipi_count_sockq
++;
4995 sctppcbinfo
.ipi_gencnt_sockq
++;
4997 stcb
->asoc
.cnt_msg_on_sb
++;
4999 TAILQ_INSERT_TAIL(&inp
->sctp_queue_list
, sq
, next_sq
);
5005 sctp_remove_from_socket_q(struct sctp_inpcb
*inp
)
5007 struct sctp_tcb
*stcb
= NULL
;
5008 struct sctp_socket_q_list
*sq
;
5010 /* W-Lock on INP assumed held */
5011 sq
= TAILQ_FIRST(&inp
->sctp_queue_list
);
5016 TAILQ_REMOVE(&inp
->sctp_queue_list
, sq
, next_sq
);
5017 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_sockq
, sq
);
5018 sctppcbinfo
.ipi_count_sockq
--;
5019 sctppcbinfo
.ipi_gencnt_sockq
++;
5021 stcb
->asoc
.cnt_msg_on_sb
--;
5027 sctp_initiate_iterator(asoc_func af
, uint32_t pcb_state
, uint32_t asoc_state
,
5028 void *argp
, uint32_t argi
, end_func ef
,
5029 struct sctp_inpcb
*s_inp
)
5031 struct sctp_iterator
*it
=NULL
;
5036 MALLOC(it
, struct sctp_iterator
*, sizeof(struct sctp_iterator
), M_PCB
,
5038 memset(it
, 0, sizeof(*it
));
5039 it
->function_toapply
= af
;
5040 it
->function_atend
= ef
;
5043 it
->pcb_flags
= pcb_state
;
5044 it
->asoc_state
= asoc_state
;
5047 it
->iterator_flags
= SCTP_ITERATOR_DO_SINGLE_INP
;
5049 SCTP_INP_INFO_RLOCK();
5050 it
->inp
= LIST_FIRST(&sctppcbinfo
.listhead
);
5051 SCTP_INP_INFO_RUNLOCK();
5052 it
->iterator_flags
= SCTP_ITERATOR_DO_ALL_INP
;
5055 /* Init the timer */
5056 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000
5057 callout_init(&it
->tmr
.timer
, 0);
5059 callout_init(&it
->tmr
.timer
);
5061 /* add to the list of all iterators */
5062 SCTP_INP_INFO_WLOCK();
5063 LIST_INSERT_HEAD(&sctppcbinfo
.iteratorhead
, it
, sctp_nxt_itr
);
5064 SCTP_INP_INFO_WUNLOCK();
5066 sctp_iterator_timer(it
);
5073 * Callout/Timer routines for OS that doesn't have them
5075 #ifdef _SCTP_NEEDS_CALLOUT_
5081 callout_init(struct callout
*c
)
5083 bzero(c
, sizeof(*c
));
5087 callout_reset(struct callout
*c
, int to_ticks
, void (*ftn
)(void *), void *arg
)
5090 if (c
->c_flags
& CALLOUT_PENDING
)
5094 * We could spl down here and back up at the TAILQ_INSERT_TAIL,
5095 * but there's no point since doing this setup doesn't take much
5102 c
->c_flags
= (CALLOUT_ACTIVE
| CALLOUT_PENDING
);
5105 c
->c_time
= to_ticks
; /* just store the requested timeout */
5106 timeout(ftn
, arg
, to_ticks
);
5108 c
->c_time
= ticks
+ to_ticks
;
5109 TAILQ_INSERT_TAIL(&sctppcbinfo
.callqueue
, c
, tqe
);
5115 callout_stop(struct callout
*c
)
5119 * Don't attempt to delete a callout that's not on the queue.
5121 if (!(c
->c_flags
& CALLOUT_PENDING
)) {
5122 c
->c_flags
&= ~CALLOUT_ACTIVE
;
5126 c
->c_flags
&= ~(CALLOUT_ACTIVE
| CALLOUT_PENDING
| CALLOUT_FIRED
);
5128 /* thread_call_cancel(c->c_call); */
5129 untimeout(c
->c_func
, c
->c_arg
);
5131 TAILQ_REMOVE(&sctppcbinfo
.callqueue
, c
, tqe
);
5138 #if !defined(__APPLE__)
5142 struct callout
*c
, *n
;
5143 struct calloutlist locallist
;
5147 /* run through and subtract and mark all callouts */
5148 c
= TAILQ_FIRST(&sctppcbinfo
.callqueue
);
5150 n
= TAILQ_NEXT(c
, tqe
);
5151 if (c
->c_time
<= ticks
) {
5152 c
->c_flags
|= CALLOUT_FIRED
;
5154 TAILQ_REMOVE(&sctppcbinfo
.callqueue
, c
, tqe
);
5156 TAILQ_INIT(&locallist
);
5159 /* move off of main list */
5160 TAILQ_INSERT_TAIL(&locallist
, c
, tqe
);
5164 /* Now all the ones on the locallist must be called */
5166 c
= TAILQ_FIRST(&locallist
);
5169 TAILQ_REMOVE(&locallist
, c
, tqe
);
5170 /* now validate that it did not get canceled */
5171 if (c
->c_flags
& CALLOUT_FIRED
) {
5172 c
->c_flags
&= ~CALLOUT_PENDING
;
5174 (*c
->c_func
)(c
->c_arg
);
5177 c
= TAILQ_FIRST(&locallist
);
5183 #endif /* _SCTP_NEEDS_CALLOUT_ */