It's syslogd which calls fsync(), not syslog.conf.
[dragonfly.git] / sys / netinet / sctp_pcb.c
blob6ca91617e44878feb9927edc1a53e04e1f3f4aa7
1 /* $KAME: sctp_pcb.c,v 1.37 2004/08/17 06:28:02 t-momose Exp $ */
2 /* $DragonFly: src/sys/netinet/sctp_pcb.c,v 1.14 2008/03/07 11:34:20 sephe Exp $ */
4 /*
5 * Copyright (c) 2001, 2002, 2003, 2004 Cisco Systems, Inc.
6 * All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Cisco Systems, Inc.
19 * 4. Neither the name of the project nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY CISCO SYSTEMS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL CISCO SYSTEMS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
35 #if !(defined(__OpenBSD__) || defined(__APPLE__))
36 #include "opt_ipsec.h"
37 #endif
38 #if defined(__FreeBSD__) || defined(__DragonFly__)
39 #include "opt_compat.h"
40 #include "opt_inet6.h"
41 #include "opt_inet.h"
42 #endif
43 #if defined(__NetBSD__)
44 #include "opt_inet.h"
45 #endif
46 #ifdef __APPLE__
47 #include <sctp.h>
48 #elif !defined(__OpenBSD__)
49 #include "opt_sctp.h"
50 #endif
52 #include <sys/param.h>
53 #include <sys/systm.h>
54 #include <sys/malloc.h>
55 #include <sys/mbuf.h>
56 #include <sys/domain.h>
57 #include <sys/protosw.h>
58 #include <sys/socket.h>
59 #include <sys/socketvar.h>
60 #include <sys/proc.h>
61 #include <sys/kernel.h>
62 #include <sys/sysctl.h>
63 #include <sys/thread2.h>
64 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
65 #include <sys/random.h>
66 #endif
67 #if defined(__NetBSD__)
68 #include <sys/rnd.h>
69 #endif
70 #if defined(__OpenBSD__)
71 #include <dev/rndvar.h>
72 #endif
74 #if defined(__APPLE__)
75 #include <netinet/sctp_callout.h>
76 #elif defined(__OpenBSD__)
77 #include <sys/timeout.h>
78 #else
79 #include <sys/callout.h>
80 #endif
82 #if (defined(__FreeBSD__) && __FreeBSD_version >= 500000)
83 #include <sys/limits.h>
84 #else
85 #include <machine/limits.h>
86 #endif
87 #include <machine/cpu.h>
89 #include <net/if.h>
90 #include <net/if_types.h>
91 #include <net/route.h>
92 #include <netinet/in.h>
93 #include <netinet/in_systm.h>
94 #include <netinet/ip.h>
95 #include <netinet/in_pcb.h>
96 #include <netinet/in_var.h>
97 #include <netinet/ip_var.h>
99 #ifdef INET6
100 #include <netinet/ip6.h>
101 #include <netinet6/ip6_var.h>
102 #include <netinet6/scope6_var.h>
103 #if defined(__FreeBSD__) || (__NetBSD__) || defined(__DragonFly__)
104 #include <netinet6/in6_pcb.h>
105 #elif defined(__OpenBSD__)
106 #include <netinet/in_pcb.h>
107 #endif
108 #endif /* INET6 */
110 #ifdef IPSEC
111 #ifndef __OpenBSD__
112 #include <netinet6/ipsec.h>
113 #include <netproto/key/key.h>
114 #else
115 #undef IPSEC
116 #endif
117 #endif /* IPSEC */
119 #include <netinet/sctp_var.h>
120 #include <netinet/sctp_pcb.h>
121 #include <netinet/sctputil.h>
122 #include <netinet/sctp.h>
123 #include <netinet/sctp_header.h>
124 #include <netinet/sctp_asconf.h>
125 #include <netinet/sctp_output.h>
126 #include <netinet/sctp_timer.h>
128 #ifndef SCTP_PCBHASHSIZE
129 /* default number of association hash buckets in each endpoint */
130 #define SCTP_PCBHASHSIZE 256
131 #endif
133 #ifdef SCTP_DEBUG
134 u_int32_t sctp_debug_on = 0;
135 #endif /* SCTP_DEBUG */
137 u_int32_t sctp_pegs[SCTP_NUMBER_OF_PEGS];
139 int sctp_pcbtblsize = SCTP_PCBHASHSIZE;
141 struct sctp_epinfo sctppcbinfo;
143 /* FIX: we don't handle multiple link local scopes */
144 /* "scopeless" replacement IN6_ARE_ADDR_EQUAL */
146 SCTP6_ARE_ADDR_EQUAL(struct in6_addr *a, struct in6_addr *b)
148 struct in6_addr tmp_a, tmp_b;
149 /* use a copy of a and b */
150 tmp_a = *a;
151 tmp_b = *b;
152 in6_clearscope(&tmp_a);
153 in6_clearscope(&tmp_b);
154 return (IN6_ARE_ADDR_EQUAL(&tmp_a, &tmp_b));
157 #ifdef __OpenBSD__
158 extern int ipport_firstauto;
159 extern int ipport_lastauto;
160 extern int ipport_hifirstauto;
161 extern int ipport_hilastauto;
162 #endif
164 #if defined(__FreeBSD__) && __FreeBSD_version > 500000
166 #ifndef xyzzy
167 void sctp_validate_no_locks(void);
169 void
170 SCTP_INP_RLOCK(struct sctp_inpcb *inp)
172 struct sctp_tcb *stcb;
173 LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
174 if (mtx_owned(&(stcb)->tcb_mtx))
175 panic("I own TCB lock?");
177 if (mtx_owned(&(inp)->inp_mtx))
178 panic("INP Recursive Lock-R");
179 mtx_lock(&(inp)->inp_mtx);
182 void
183 SCTP_INP_WLOCK(struct sctp_inpcb *inp)
185 SCTP_INP_RLOCK(inp);
188 void
189 SCTP_INP_INFO_RLOCK(void)
191 struct sctp_inpcb *inp;
192 struct sctp_tcb *stcb;
193 LIST_FOREACH(inp, &sctppcbinfo.listhead, sctp_list) {
194 if (mtx_owned(&(inp)->inp_mtx))
195 panic("info-lock and own inp lock?");
196 LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
197 if (mtx_owned(&(stcb)->tcb_mtx))
198 panic("Info lock and own a tcb lock?");
201 if (mtx_owned(&sctppcbinfo.ipi_ep_mtx))
202 panic("INP INFO Recursive Lock-R");
203 mtx_lock(&sctppcbinfo.ipi_ep_mtx);
206 void
207 SCTP_INP_INFO_WLOCK(void)
209 SCTP_INP_INFO_RLOCK();
213 void sctp_validate_no_locks(void)
215 struct sctp_inpcb *inp;
216 struct sctp_tcb *stcb;
218 if (mtx_owned(&sctppcbinfo.ipi_ep_mtx))
219 panic("INP INFO lock is owned?");
221 LIST_FOREACH(inp, &sctppcbinfo.listhead, sctp_list) {
222 if (mtx_owned(&(inp)->inp_mtx))
223 panic("You own an INP lock?");
224 LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
225 if (mtx_owned(&(stcb)->tcb_mtx))
226 panic("You own a TCB lock?");
231 #endif
232 #endif
234 void
235 sctp_fill_pcbinfo(struct sctp_pcbinfo *spcb)
237 /* We really don't need
238 * to lock this, but I will
239 * just because it does not hurt.
241 SCTP_INP_INFO_RLOCK();
242 spcb->ep_count = sctppcbinfo.ipi_count_ep;
243 spcb->asoc_count = sctppcbinfo.ipi_count_asoc;
244 spcb->laddr_count = sctppcbinfo.ipi_count_laddr;
245 spcb->raddr_count = sctppcbinfo.ipi_count_raddr;
246 spcb->chk_count = sctppcbinfo.ipi_count_chunk;
247 spcb->sockq_count = sctppcbinfo.ipi_count_sockq;
248 spcb->mbuf_track = sctppcbinfo.mbuf_track;
249 SCTP_INP_INFO_RUNLOCK();
254 * Notes on locks for FreeBSD 5 and up. All association
255 * lookups that have a definte ep, the INP structure is
256 * assumed to be locked for reading. If we need to go
257 * find the INP (ususally when a **inp is passed) then
258 * we must lock the INFO structure first and if needed
259 * lock the INP too. Note that if we lock it we must
265 * Given a endpoint, look and find in its association list any association
266 * with the "to" address given. This can be a "from" address, too, for
267 * inbound packets. For outbound packets it is a true "to" address.
269 static struct sctp_tcb *
270 sctp_tcb_special_locate(struct sctp_inpcb **inp_p, struct sockaddr *from,
271 struct sockaddr *to, struct sctp_nets **netp)
273 /**** ASSUMSES THE CALLER holds the INP_INFO_RLOCK */
276 * Note for this module care must be taken when observing what to is
277 * for. In most of the rest of the code the TO field represents my
278 * peer and the FROM field represents my address. For this module it
279 * is reversed of that.
282 * If we support the TCP model, then we must now dig through to
283 * see if we can find our endpoint in the list of tcp ep's.
285 uint16_t lport, rport;
286 struct sctppcbhead *ephead;
287 struct sctp_inpcb *inp;
288 struct sctp_laddr *laddr;
289 struct sctp_tcb *stcb;
290 struct sctp_nets *net;
292 if ((to == NULL) || (from == NULL)) {
293 return (NULL);
296 if (to->sa_family == AF_INET && from->sa_family == AF_INET) {
297 lport = ((struct sockaddr_in *)to)->sin_port;
298 rport = ((struct sockaddr_in *)from)->sin_port;
299 } else if (to->sa_family == AF_INET6 && from->sa_family == AF_INET6) {
300 lport = ((struct sockaddr_in6 *)to)->sin6_port;
301 rport = ((struct sockaddr_in6 *)from)->sin6_port;
302 } else {
303 return NULL;
305 ephead = &sctppcbinfo.sctp_tcpephash[SCTP_PCBHASH_ALLADDR(
306 (lport + rport), sctppcbinfo.hashtcpmark)];
308 * Ok now for each of the guys in this bucket we must look
309 * and see:
310 * - Does the remote port match.
311 * - Does there single association's addresses match this
312 * address (to).
313 * If so we update p_ep to point to this ep and return the
314 * tcb from it.
316 LIST_FOREACH(inp, ephead, sctp_hash) {
317 if (lport != inp->sctp_lport) {
318 continue;
320 SCTP_INP_RLOCK(inp);
321 /* check to see if the ep has one of the addresses */
322 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) {
323 /* We are NOT bound all, so look further */
324 int match = 0;
326 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
327 if (laddr->ifa == NULL) {
328 #ifdef SCTP_DEBUG
329 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
330 kprintf("An ounce of prevention is worth a pound of cure\n");
332 #endif
333 continue;
335 if (laddr->ifa->ifa_addr == NULL) {
336 #ifdef SCTP_DEBUG
337 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
338 kprintf("ifa with a NULL address\n");
340 #endif
341 continue;
343 if (laddr->ifa->ifa_addr->sa_family ==
344 to->sa_family) {
345 /* see if it matches */
346 struct sockaddr_in *intf_addr, *sin;
347 intf_addr = (struct sockaddr_in *)
348 laddr->ifa->ifa_addr;
349 sin = (struct sockaddr_in *)to;
350 if (from->sa_family == AF_INET) {
351 if (sin->sin_addr.s_addr ==
352 intf_addr->sin_addr.s_addr) {
353 match = 1;
354 SCTP_INP_RUNLOCK(inp);
355 break;
357 } else {
358 struct sockaddr_in6 *intf_addr6;
359 struct sockaddr_in6 *sin6;
360 sin6 = (struct sockaddr_in6 *)
362 intf_addr6 = (struct sockaddr_in6 *)
363 laddr->ifa->ifa_addr;
365 if (SCTP6_ARE_ADDR_EQUAL(&sin6->sin6_addr,
366 &intf_addr6->sin6_addr)) {
367 match = 1;
368 SCTP_INP_RUNLOCK(inp);
369 break;
374 if (match == 0) {
375 /* This endpoint does not have this address */
376 SCTP_INP_RUNLOCK(inp);
377 continue;
381 * Ok if we hit here the ep has the address, does it hold the
382 * tcb?
385 stcb = LIST_FIRST(&inp->sctp_asoc_list);
386 if (stcb == NULL) {
387 SCTP_INP_RUNLOCK(inp);
388 continue;
390 SCTP_TCB_LOCK(stcb);
391 if (stcb->rport != rport) {
392 /* remote port does not match. */
393 SCTP_TCB_UNLOCK(stcb);
394 SCTP_INP_RUNLOCK(inp);
395 continue;
397 /* Does this TCB have a matching address? */
398 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
399 if (net->ro._l_addr.sa.sa_family != from->sa_family) {
400 /* not the same family, can't be a match */
401 continue;
403 if (from->sa_family == AF_INET) {
404 struct sockaddr_in *sin, *rsin;
405 sin = (struct sockaddr_in *)&net->ro._l_addr;
406 rsin = (struct sockaddr_in *)from;
407 if (sin->sin_addr.s_addr ==
408 rsin->sin_addr.s_addr) {
409 /* found it */
410 if (netp != NULL) {
411 *netp = net;
413 /* Update the endpoint pointer */
414 *inp_p = inp;
415 SCTP_INP_RUNLOCK(inp);
416 return (stcb);
418 } else {
419 struct sockaddr_in6 *sin6, *rsin6;
420 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
421 rsin6 = (struct sockaddr_in6 *)from;
422 if (SCTP6_ARE_ADDR_EQUAL(&sin6->sin6_addr,
423 &rsin6->sin6_addr)) {
424 /* found it */
425 if (netp != NULL) {
426 *netp = net;
428 /* Update the endpoint pointer */
429 *inp_p = inp;
430 SCTP_INP_RUNLOCK(inp);
431 return (stcb);
435 SCTP_TCB_UNLOCK(stcb);
437 SCTP_INP_RUNLOCK(inp);
439 return (NULL);
442 struct sctp_tcb *
443 sctp_findassociation_ep_asconf(struct mbuf *m, int iphlen, int offset,
444 struct sctphdr *sh, struct sctp_inpcb **inp_p, struct sctp_nets **netp)
446 struct sctp_tcb *stcb;
447 struct sockaddr_in *sin;
448 struct sockaddr_in6 *sin6;
449 struct sockaddr_storage local_store, remote_store;
450 struct ip *iph;
451 struct sctp_paramhdr parm_buf, *phdr;
452 int ptype;
454 memset(&local_store, 0, sizeof(local_store));
455 memset(&remote_store, 0, sizeof(remote_store));
457 /* First get the destination address setup too. */
458 iph = mtod(m, struct ip *);
459 if (iph->ip_v == IPVERSION) {
460 /* its IPv4 */
461 sin = (struct sockaddr_in *)&local_store;
462 sin->sin_family = AF_INET;
463 sin->sin_len = sizeof(*sin);
464 sin->sin_port = sh->dest_port;
465 sin->sin_addr.s_addr = iph->ip_dst.s_addr ;
466 } else if (iph->ip_v == (IPV6_VERSION >> 4)) {
467 /* its IPv6 */
468 struct ip6_hdr *ip6;
469 ip6 = mtod(m, struct ip6_hdr *);
470 sin6 = (struct sockaddr_in6 *)&local_store;
471 sin6->sin6_family = AF_INET6;
472 sin6->sin6_len = sizeof(*sin6);
473 sin6->sin6_port = sh->dest_port;
474 sin6->sin6_addr = ip6->ip6_dst;
475 } else {
476 return NULL;
479 phdr = sctp_get_next_param(m, offset + sizeof(struct sctp_asconf_chunk),
480 &parm_buf, sizeof(struct sctp_paramhdr));
481 if (phdr == NULL) {
482 #ifdef SCTP_DEBUG
483 if (sctp_debug_on & SCTP_DEBUG_INPUT3) {
484 kprintf("sctp_process_control: failed to get asconf lookup addr\n");
486 #endif /* SCTP_DEBUG */
487 return NULL;
489 ptype = (int)((u_int)ntohs(phdr->param_type));
490 /* get the correlation address */
491 if (ptype == SCTP_IPV6_ADDRESS) {
492 /* ipv6 address param */
493 struct sctp_ipv6addr_param *p6, p6_buf;
494 if (ntohs(phdr->param_length) != sizeof(struct sctp_ipv6addr_param)) {
495 return NULL;
498 p6 = (struct sctp_ipv6addr_param *)sctp_get_next_param(m,
499 offset + sizeof(struct sctp_asconf_chunk),
500 &p6_buf.ph, sizeof(*p6));
501 if (p6 == NULL) {
502 #ifdef SCTP_DEBUG
503 if (sctp_debug_on & SCTP_DEBUG_INPUT3) {
504 kprintf("sctp_process_control: failed to get asconf v6 lookup addr\n");
506 #endif /* SCTP_DEBUG */
507 return (NULL);
509 sin6 = (struct sockaddr_in6 *)&remote_store;
510 sin6->sin6_family = AF_INET6;
511 sin6->sin6_len = sizeof(*sin6);
512 sin6->sin6_port = sh->src_port;
513 memcpy(&sin6->sin6_addr, &p6->addr, sizeof(struct in6_addr));
514 } else if (ptype == SCTP_IPV4_ADDRESS) {
515 /* ipv4 address param */
516 struct sctp_ipv4addr_param *p4, p4_buf;
517 if (ntohs(phdr->param_length) != sizeof(struct sctp_ipv4addr_param)) {
518 return NULL;
521 p4 = (struct sctp_ipv4addr_param *)sctp_get_next_param(m,
522 offset + sizeof(struct sctp_asconf_chunk),
523 &p4_buf.ph, sizeof(*p4));
524 if (p4 == NULL) {
525 #ifdef SCTP_DEBUG
526 if (sctp_debug_on & SCTP_DEBUG_INPUT3) {
527 kprintf("sctp_process_control: failed to get asconf v4 lookup addr\n");
529 #endif /* SCTP_DEBUG */
530 return (NULL);
532 sin = (struct sockaddr_in *)&remote_store;
533 sin->sin_family = AF_INET;
534 sin->sin_len = sizeof(*sin);
535 sin->sin_port = sh->src_port;
536 memcpy(&sin->sin_addr, &p4->addr, sizeof(struct in_addr));
537 } else {
538 /* invalid address param type */
539 return NULL;
542 stcb = sctp_findassociation_ep_addr(inp_p,
543 (struct sockaddr *)&remote_store, netp,
544 (struct sockaddr *)&local_store, NULL);
545 return (stcb);
548 struct sctp_tcb *
549 sctp_findassociation_ep_addr(struct sctp_inpcb **inp_p, struct sockaddr *remote,
550 struct sctp_nets **netp, struct sockaddr *local, struct sctp_tcb *locked_tcb)
552 struct sctpasochead *head;
553 struct sctp_inpcb *inp;
554 struct sctp_tcb *stcb;
555 struct sctp_nets *net;
556 uint16_t rport;
558 inp = *inp_p;
559 if (remote->sa_family == AF_INET) {
560 rport = (((struct sockaddr_in *)remote)->sin_port);
561 } else if (remote->sa_family == AF_INET6) {
562 rport = (((struct sockaddr_in6 *)remote)->sin6_port);
563 } else {
564 return (NULL);
566 if (locked_tcb) {
567 /* UN-lock so we can do proper locking here
568 * this occurs when called from load_addresses_from_init.
570 SCTP_TCB_UNLOCK(locked_tcb);
572 SCTP_INP_INFO_RLOCK();
573 if (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
575 * Now either this guy is our listner or it's the connector.
576 * If it is the one that issued the connect, then it's only
577 * chance is to be the first TCB in the list. If it is the
578 * acceptor, then do the special_lookup to hash and find the
579 * real inp.
581 if (inp->sctp_flags & SCTP_PCB_FLAGS_ACCEPTING) {
582 /* to is peer addr, from is my addr */
583 stcb = sctp_tcb_special_locate(inp_p, remote, local,
584 netp);
585 if ((stcb != NULL) && (locked_tcb == NULL)){
586 /* we have a locked tcb, lower refcount */
587 SCTP_INP_WLOCK(inp);
588 SCTP_INP_DECR_REF(inp);
589 SCTP_INP_WUNLOCK(inp);
591 if (locked_tcb != NULL) {
592 SCTP_INP_RLOCK(locked_tcb->sctp_ep);
593 SCTP_TCB_LOCK(locked_tcb);
594 SCTP_INP_RUNLOCK(locked_tcb->sctp_ep);
595 if (stcb != NULL)
596 SCTP_TCB_UNLOCK(stcb);
598 SCTP_INP_INFO_RUNLOCK();
599 return (stcb);
600 } else {
601 SCTP_INP_WLOCK(inp);
602 stcb = LIST_FIRST(&inp->sctp_asoc_list);
603 if (stcb == NULL) {
604 goto null_return;
606 SCTP_TCB_LOCK(stcb);
607 if (stcb->rport != rport) {
608 /* remote port does not match. */
609 SCTP_TCB_UNLOCK(stcb);
610 goto null_return;
612 /* now look at the list of remote addresses */
613 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
614 if (net->ro._l_addr.sa.sa_family !=
615 remote->sa_family) {
616 /* not the same family */
617 continue;
619 if (remote->sa_family == AF_INET) {
620 struct sockaddr_in *sin, *rsin;
621 sin = (struct sockaddr_in *)
622 &net->ro._l_addr;
623 rsin = (struct sockaddr_in *)remote;
624 if (sin->sin_addr.s_addr ==
625 rsin->sin_addr.s_addr) {
626 /* found it */
627 if (netp != NULL) {
628 *netp = net;
630 if (locked_tcb == NULL) {
631 SCTP_INP_DECR_REF(inp);
633 SCTP_INP_WUNLOCK(inp);
634 SCTP_INP_INFO_RUNLOCK();
635 return (stcb);
637 } else if (remote->sa_family == AF_INET6) {
638 struct sockaddr_in6 *sin6, *rsin6;
639 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
640 rsin6 = (struct sockaddr_in6 *)remote;
641 if (SCTP6_ARE_ADDR_EQUAL(&sin6->sin6_addr,
642 &rsin6->sin6_addr)) {
643 /* found it */
644 if (netp != NULL) {
645 *netp = net;
647 if (locked_tcb == NULL) {
648 SCTP_INP_DECR_REF(inp);
650 SCTP_INP_WUNLOCK(inp);
651 SCTP_INP_INFO_RUNLOCK();
652 return (stcb);
656 SCTP_TCB_UNLOCK(stcb);
658 } else {
659 SCTP_INP_WLOCK(inp);
660 head = &inp->sctp_tcbhash[SCTP_PCBHASH_ALLADDR(rport,
661 inp->sctp_hashmark)];
662 if (head == NULL) {
663 goto null_return;
665 LIST_FOREACH(stcb, head, sctp_tcbhash) {
666 if (stcb->rport != rport) {
667 /* remote port does not match */
668 continue;
670 /* now look at the list of remote addresses */
671 SCTP_TCB_LOCK(stcb);
672 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
673 if (net->ro._l_addr.sa.sa_family !=
674 remote->sa_family) {
675 /* not the same family */
676 continue;
678 if (remote->sa_family == AF_INET) {
679 struct sockaddr_in *sin, *rsin;
680 sin = (struct sockaddr_in *)
681 &net->ro._l_addr;
682 rsin = (struct sockaddr_in *)remote;
683 if (sin->sin_addr.s_addr ==
684 rsin->sin_addr.s_addr) {
685 /* found it */
686 if (netp != NULL) {
687 *netp = net;
689 if (locked_tcb == NULL) {
690 SCTP_INP_DECR_REF(inp);
692 SCTP_INP_WUNLOCK(inp);
693 SCTP_INP_INFO_RUNLOCK();
694 return (stcb);
696 } else if (remote->sa_family == AF_INET6) {
697 struct sockaddr_in6 *sin6, *rsin6;
698 sin6 = (struct sockaddr_in6 *)
699 &net->ro._l_addr;
700 rsin6 = (struct sockaddr_in6 *)remote;
701 if (SCTP6_ARE_ADDR_EQUAL(&sin6->sin6_addr,
702 &rsin6->sin6_addr)) {
703 /* found it */
704 if (netp != NULL) {
705 *netp = net;
707 if (locked_tcb == NULL) {
708 SCTP_INP_DECR_REF(inp);
710 SCTP_INP_WUNLOCK(inp);
711 SCTP_INP_INFO_RUNLOCK();
712 return (stcb);
716 SCTP_TCB_UNLOCK(stcb);
719 null_return:
720 /* clean up for returning null */
721 if (locked_tcb){
722 if (locked_tcb->sctp_ep != inp) {
723 SCTP_INP_RLOCK(locked_tcb->sctp_ep);
724 SCTP_TCB_LOCK(locked_tcb);
725 SCTP_INP_RUNLOCK(locked_tcb->sctp_ep);
726 } else
727 SCTP_TCB_LOCK(locked_tcb);
729 SCTP_INP_WUNLOCK(inp);
730 SCTP_INP_INFO_RUNLOCK();
731 /* not found */
732 return (NULL);
736 * Find an association for a specific endpoint using the association id
737 * given out in the COMM_UP notification
739 struct sctp_tcb *
740 sctp_findassociation_ep_asocid(struct sctp_inpcb *inp, caddr_t asoc_id)
743 * Use my the assoc_id to find a endpoint
745 struct sctpasochead *head;
746 struct sctp_tcb *stcb;
747 u_int32_t vtag;
749 if (asoc_id == 0 || inp == NULL) {
750 return (NULL);
752 SCTP_INP_INFO_RLOCK();
753 vtag = (u_int32_t)asoc_id;
754 head = &sctppcbinfo.sctp_asochash[SCTP_PCBHASH_ASOC(vtag,
755 sctppcbinfo.hashasocmark)];
756 if (head == NULL) {
757 /* invalid vtag */
758 SCTP_INP_INFO_RUNLOCK();
759 return (NULL);
761 LIST_FOREACH(stcb, head, sctp_asocs) {
762 SCTP_INP_RLOCK(stcb->sctp_ep);
763 SCTP_TCB_LOCK(stcb);
764 SCTP_INP_RUNLOCK(stcb->sctp_ep);
765 if (stcb->asoc.my_vtag == vtag) {
766 /* candidate */
767 if (inp != stcb->sctp_ep) {
768 /* some other guy has the
769 * same vtag active (vtag collision).
771 sctp_pegs[SCTP_VTAG_BOGUS]++;
772 SCTP_TCB_UNLOCK(stcb);
773 continue;
775 sctp_pegs[SCTP_VTAG_EXPR]++;
776 SCTP_INP_INFO_RUNLOCK();
777 return (stcb);
779 SCTP_TCB_UNLOCK(stcb);
781 SCTP_INP_INFO_RUNLOCK();
782 return (NULL);
785 static struct sctp_inpcb *
786 sctp_endpoint_probe(struct sockaddr *nam, struct sctppcbhead *head,
787 uint16_t lport)
789 struct sctp_inpcb *inp;
790 struct sockaddr_in *sin;
791 struct sockaddr_in6 *sin6;
792 struct sctp_laddr *laddr;
794 /* Endpoing probe expects
795 * that the INP_INFO is locked.
797 if (nam->sa_family == AF_INET) {
798 sin = (struct sockaddr_in *)nam;
799 sin6 = NULL;
800 } else if (nam->sa_family == AF_INET6) {
801 sin6 = (struct sockaddr_in6 *)nam;
802 sin = NULL;
803 } else {
804 /* unsupported family */
805 return (NULL);
807 if (head == NULL)
808 return (NULL);
810 LIST_FOREACH(inp, head, sctp_hash) {
811 SCTP_INP_RLOCK(inp);
813 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) &&
814 (inp->sctp_lport == lport)) {
815 /* got it */
816 if ((nam->sa_family == AF_INET) &&
817 (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
818 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
819 (((struct inpcb *)inp)->inp_flags & IN6P_IPV6_V6ONLY)
820 #else
821 #if defined(__OpenBSD__)
822 (0) /* For open bsd we do dual bind only */
823 #else
824 (((struct in6pcb *)inp)->in6p_flags & IN6P_IPV6_V6ONLY)
825 #endif
826 #endif
828 /* IPv4 on a IPv6 socket with ONLY IPv6 set */
829 SCTP_INP_RUNLOCK(inp);
830 continue;
832 /* A V6 address and the endpoint is NOT bound V6 */
833 if (nam->sa_family == AF_INET6 &&
834 (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
835 SCTP_INP_RUNLOCK(inp);
836 continue;
838 SCTP_INP_RUNLOCK(inp);
839 return (inp);
841 SCTP_INP_RUNLOCK(inp);
844 if ((nam->sa_family == AF_INET) &&
845 (sin->sin_addr.s_addr == INADDR_ANY)) {
846 /* Can't hunt for one that has no address specified */
847 return (NULL);
848 } else if ((nam->sa_family == AF_INET6) &&
849 (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr))) {
850 /* Can't hunt for one that has no address specified */
851 return (NULL);
854 * ok, not bound to all so see if we can find a EP bound to this
855 * address.
857 #ifdef SCTP_DEBUG
858 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
859 kprintf("Ok, there is NO bound-all available for port:%x\n", ntohs(lport));
861 #endif
862 LIST_FOREACH(inp, head, sctp_hash) {
863 SCTP_INP_RLOCK(inp);
864 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL)) {
865 SCTP_INP_RUNLOCK(inp);
866 continue;
869 * Ok this could be a likely candidate, look at all of
870 * its addresses
872 if (inp->sctp_lport != lport) {
873 SCTP_INP_RUNLOCK(inp);
874 continue;
876 #ifdef SCTP_DEBUG
877 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
878 kprintf("Ok, found maching local port\n");
880 #endif
881 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
882 if (laddr->ifa == NULL) {
883 #ifdef SCTP_DEBUG
884 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
885 kprintf("An ounce of prevention is worth a pound of cure\n");
887 #endif
888 continue;
890 #ifdef SCTP_DEBUG
891 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
892 kprintf("Ok laddr->ifa:%p is possible, ",
893 laddr->ifa);
895 #endif
896 if (laddr->ifa->ifa_addr == NULL) {
897 #ifdef SCTP_DEBUG
898 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
899 kprintf("Huh IFA as an ifa_addr=NULL, ");
901 #endif
902 continue;
904 #ifdef SCTP_DEBUG
905 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
906 kprintf("Ok laddr->ifa:%p is possible, ",
907 laddr->ifa->ifa_addr);
908 sctp_print_address(laddr->ifa->ifa_addr);
909 kprintf("looking for ");
910 sctp_print_address(nam);
912 #endif
913 if (laddr->ifa->ifa_addr->sa_family == nam->sa_family) {
914 /* possible, see if it matches */
915 struct sockaddr_in *intf_addr;
916 intf_addr = (struct sockaddr_in *)
917 laddr->ifa->ifa_addr;
918 if (nam->sa_family == AF_INET) {
919 if (sin->sin_addr.s_addr ==
920 intf_addr->sin_addr.s_addr) {
921 #ifdef SCTP_DEBUG
922 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
923 kprintf("YES, return ep:%p\n", inp);
925 #endif
926 SCTP_INP_RUNLOCK(inp);
927 return (inp);
929 } else if (nam->sa_family == AF_INET6) {
930 struct sockaddr_in6 *intf_addr6;
931 intf_addr6 = (struct sockaddr_in6 *)
932 laddr->ifa->ifa_addr;
933 if (SCTP6_ARE_ADDR_EQUAL(&sin6->sin6_addr,
934 &intf_addr6->sin6_addr)) {
935 #ifdef SCTP_DEBUG
936 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
937 kprintf("YES, return ep:%p\n", inp);
939 #endif
940 SCTP_INP_RUNLOCK(inp);
941 return (inp);
945 SCTP_INP_RUNLOCK(inp);
948 #ifdef SCTP_DEBUG
949 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
950 kprintf("NO, Falls out to NULL\n");
952 #endif
953 return (NULL);
957 struct sctp_inpcb *
958 sctp_pcb_findep(struct sockaddr *nam, int find_tcp_pool, int have_lock)
961 * First we check the hash table to see if someone has this port
962 * bound with just the port.
964 struct sctp_inpcb *inp;
965 struct sctppcbhead *head;
966 struct sockaddr_in *sin;
967 struct sockaddr_in6 *sin6;
968 int lport;
969 #ifdef SCTP_DEBUG
970 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
971 kprintf("Looking for endpoint %d :",
972 ntohs(((struct sockaddr_in *)nam)->sin_port));
973 sctp_print_address(nam);
975 #endif
976 if (nam->sa_family == AF_INET) {
977 sin = (struct sockaddr_in *)nam;
978 lport = ((struct sockaddr_in *)nam)->sin_port;
979 } else if (nam->sa_family == AF_INET6) {
980 sin6 = (struct sockaddr_in6 *)nam;
981 lport = ((struct sockaddr_in6 *)nam)->sin6_port;
982 } else {
983 /* unsupported family */
984 return (NULL);
987 * I could cheat here and just cast to one of the types but we will
988 * do it right. It also provides the check against an Unsupported
989 * type too.
991 /* Find the head of the ALLADDR chain */
992 if (have_lock == 0)
993 SCTP_INP_INFO_RLOCK();
994 head = &sctppcbinfo.sctp_ephash[SCTP_PCBHASH_ALLADDR(lport,
995 sctppcbinfo.hashmark)];
996 #ifdef SCTP_DEBUG
997 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
998 kprintf("Main hash to lookup at head:%p\n", head);
1000 #endif
1001 inp = sctp_endpoint_probe(nam, head, lport);
1004 * If the TCP model exists it could be that the main listening
1005 * endpoint is gone but there exists a connected socket for this
1006 * guy yet. If so we can return the first one that we find. This
1007 * may NOT be the correct one but the sctp_findassociation_ep_addr
1008 * has further code to look at all TCP models.
1010 if (inp == NULL && find_tcp_pool) {
1011 unsigned int i;
1012 #ifdef SCTP_DEBUG
1013 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
1014 kprintf("EP was NULL and TCP model is supported\n");
1016 #endif
1017 for (i = 0; i < sctppcbinfo.hashtblsize; i++) {
1019 * This is real gross, but we do NOT have a remote
1020 * port at this point depending on who is calling. We
1021 * must therefore look for ANY one that matches our
1022 * local port :/
1024 head = &sctppcbinfo.sctp_tcpephash[i];
1025 if (LIST_FIRST(head)) {
1026 inp = sctp_endpoint_probe(nam, head, lport);
1027 if (inp) {
1028 /* Found one */
1029 break;
1034 #ifdef SCTP_DEBUG
1035 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
1036 kprintf("EP to return is %p\n", inp);
1038 #endif
1039 if (have_lock == 0) {
1040 if (inp) {
1041 SCTP_INP_WLOCK(inp);
1042 SCTP_INP_INCR_REF(inp);
1043 SCTP_INP_WUNLOCK(inp);
1045 SCTP_INP_INFO_RUNLOCK();
1046 } else {
1047 if (inp) {
1048 SCTP_INP_WLOCK(inp);
1049 SCTP_INP_INCR_REF(inp);
1050 SCTP_INP_WUNLOCK(inp);
1053 return (inp);
1057 * Find an association for an endpoint with the pointer to whom you want
1058 * to send to and the endpoint pointer. The address can be IPv4 or IPv6.
1059 * We may need to change the *to to some other struct like a mbuf...
1061 struct sctp_tcb *
1062 sctp_findassociation_addr_sa(struct sockaddr *to, struct sockaddr *from,
1063 struct sctp_inpcb **inp_p, struct sctp_nets **netp, int find_tcp_pool)
1065 struct sctp_inpcb *inp;
1066 struct sctp_tcb *retval;
1068 SCTP_INP_INFO_RLOCK();
1069 if (find_tcp_pool) {
1070 if (inp_p != NULL) {
1071 retval = sctp_tcb_special_locate(inp_p, from, to, netp);
1072 } else {
1073 retval = sctp_tcb_special_locate(&inp, from, to, netp);
1075 if (retval != NULL) {
1076 SCTP_INP_INFO_RUNLOCK();
1077 return (retval);
1080 inp = sctp_pcb_findep(to, 0, 1);
1081 if (inp_p != NULL) {
1082 *inp_p = inp;
1084 SCTP_INP_INFO_RUNLOCK();
1086 if (inp == NULL) {
1087 return (NULL);
1091 * ok, we have an endpoint, now lets find the assoc for it (if any)
1092 * we now place the source address or from in the to of the find
1093 * endpoint call. Since in reality this chain is used from the
1094 * inbound packet side.
1096 if (inp_p != NULL) {
1097 return (sctp_findassociation_ep_addr(inp_p, from, netp, to, NULL));
1098 } else {
1099 return (sctp_findassociation_ep_addr(&inp, from, netp, to, NULL));
1105 * This routine will grub through the mbuf that is a INIT or INIT-ACK and
1106 * find all addresses that the sender has specified in any address list.
1107 * Each address will be used to lookup the TCB and see if one exits.
1109 static struct sctp_tcb *
1110 sctp_findassociation_special_addr(struct mbuf *m, int iphlen, int offset,
1111 struct sctphdr *sh, struct sctp_inpcb **inp_p, struct sctp_nets **netp,
1112 struct sockaddr *dest)
1114 struct sockaddr_in sin4;
1115 struct sockaddr_in6 sin6;
1116 struct sctp_paramhdr *phdr, parm_buf;
1117 struct sctp_tcb *retval;
1118 u_int32_t ptype, plen;
1120 memset(&sin4, 0, sizeof(sin4));
1121 memset(&sin6, 0, sizeof(sin6));
1122 sin4.sin_len = sizeof(sin4);
1123 sin4.sin_family = AF_INET;
1124 sin4.sin_port = sh->src_port;
1125 sin6.sin6_len = sizeof(sin6);
1126 sin6.sin6_family = AF_INET6;
1127 sin6.sin6_port = sh->src_port;
1129 retval = NULL;
1130 offset += sizeof(struct sctp_init_chunk);
1132 phdr = sctp_get_next_param(m, offset, &parm_buf, sizeof(parm_buf));
1133 while (phdr != NULL) {
1134 /* now we must see if we want the parameter */
1135 ptype = ntohs(phdr->param_type);
1136 plen = ntohs(phdr->param_length);
1137 if (plen == 0) {
1138 #ifdef SCTP_DEBUG
1139 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
1140 kprintf("sctp_findassociation_special_addr: Impossible length in parameter\n");
1142 #endif /* SCTP_DEBUG */
1143 break;
1145 if (ptype == SCTP_IPV4_ADDRESS &&
1146 plen == sizeof(struct sctp_ipv4addr_param)) {
1147 /* Get the rest of the address */
1148 struct sctp_ipv4addr_param ip4_parm, *p4;
1150 phdr = sctp_get_next_param(m, offset,
1151 (struct sctp_paramhdr *)&ip4_parm, plen);
1152 if (phdr == NULL) {
1153 return (NULL);
1155 p4 = (struct sctp_ipv4addr_param *)phdr;
1156 memcpy(&sin4.sin_addr, &p4->addr, sizeof(p4->addr));
1157 /* look it up */
1158 retval = sctp_findassociation_ep_addr(inp_p,
1159 (struct sockaddr *)&sin4, netp, dest, NULL);
1160 if (retval != NULL) {
1161 return (retval);
1163 } else if (ptype == SCTP_IPV6_ADDRESS &&
1164 plen == sizeof(struct sctp_ipv6addr_param)) {
1165 /* Get the rest of the address */
1166 struct sctp_ipv6addr_param ip6_parm, *p6;
1168 phdr = sctp_get_next_param(m, offset,
1169 (struct sctp_paramhdr *)&ip6_parm, plen);
1170 if (phdr == NULL) {
1171 return (NULL);
1173 p6 = (struct sctp_ipv6addr_param *)phdr;
1174 memcpy(&sin6.sin6_addr, &p6->addr, sizeof(p6->addr));
1175 /* look it up */
1176 retval = sctp_findassociation_ep_addr(inp_p,
1177 (struct sockaddr *)&sin6, netp, dest, NULL);
1178 if (retval != NULL) {
1179 return (retval);
1182 offset += SCTP_SIZE32(plen);
1183 phdr = sctp_get_next_param(m, offset, &parm_buf,
1184 sizeof(parm_buf));
1186 return (NULL);
1189 static struct sctp_tcb *
1190 sctp_findassoc_by_vtag(struct sockaddr *from, uint32_t vtag,
1191 struct sctp_inpcb **inp_p, struct sctp_nets **netp, uint16_t rport,
1192 uint16_t lport)
1195 * Use my vtag to hash. If we find it we then verify the source addr
1196 * is in the assoc. If all goes well we save a bit on rec of a packet.
1198 struct sctpasochead *head;
1199 struct sctp_nets *net;
1200 struct sctp_tcb *stcb;
1202 SCTP_INP_INFO_RLOCK();
1203 head = &sctppcbinfo.sctp_asochash[SCTP_PCBHASH_ASOC(vtag,
1204 sctppcbinfo.hashasocmark)];
1205 if (head == NULL) {
1206 /* invalid vtag */
1207 SCTP_INP_INFO_RUNLOCK();
1208 return (NULL);
1210 LIST_FOREACH(stcb, head, sctp_asocs) {
1211 SCTP_INP_RLOCK(stcb->sctp_ep);
1212 SCTP_TCB_LOCK(stcb);
1213 SCTP_INP_RUNLOCK(stcb->sctp_ep);
1214 if (stcb->asoc.my_vtag == vtag) {
1215 /* candidate */
1216 if (stcb->rport != rport) {
1218 * we could remove this if vtags are unique
1219 * across the system.
1221 SCTP_TCB_UNLOCK(stcb);
1222 continue;
1224 if (stcb->sctp_ep->sctp_lport != lport) {
1226 * we could remove this if vtags are unique
1227 * across the system.
1229 SCTP_TCB_UNLOCK(stcb);
1230 continue;
1232 net = sctp_findnet(stcb, from);
1233 if (net) {
1234 /* yep its him. */
1235 *netp = net;
1236 sctp_pegs[SCTP_VTAG_EXPR]++;
1237 *inp_p = stcb->sctp_ep;
1238 SCTP_INP_INFO_RUNLOCK();
1239 return (stcb);
1240 } else {
1241 /* not him, this should only
1242 * happen in rare cases so
1243 * I peg it.
1245 sctp_pegs[SCTP_VTAG_BOGUS]++;
1248 SCTP_TCB_UNLOCK(stcb);
1250 SCTP_INP_INFO_RUNLOCK();
1251 return (NULL);
1255 * Find an association with the pointer to the inbound IP packet. This
1256 * can be a IPv4 or IPv6 packet.
1258 struct sctp_tcb *
1259 sctp_findassociation_addr(struct mbuf *m, int iphlen, int offset,
1260 struct sctphdr *sh, struct sctp_chunkhdr *ch,
1261 struct sctp_inpcb **inp_p, struct sctp_nets **netp)
1263 int find_tcp_pool;
1264 struct ip *iph;
1265 struct sctp_tcb *retval;
1266 struct sockaddr_storage to_store, from_store;
1267 struct sockaddr *to = (struct sockaddr *)&to_store;
1268 struct sockaddr *from = (struct sockaddr *)&from_store;
1269 struct sctp_inpcb *inp;
1272 iph = mtod(m, struct ip *);
1273 if (iph->ip_v == IPVERSION) {
1274 /* its IPv4 */
1275 struct sockaddr_in *to4, *from4;
1277 to4 = (struct sockaddr_in *)&to_store;
1278 from4 = (struct sockaddr_in *)&from_store;
1279 bzero(to4, sizeof(*to4));
1280 bzero(from4, sizeof(*from4));
1281 from4->sin_family = to4->sin_family = AF_INET;
1282 from4->sin_len = to4->sin_len = sizeof(struct sockaddr_in);
1283 from4->sin_addr.s_addr = iph->ip_src.s_addr;
1284 to4->sin_addr.s_addr = iph->ip_dst.s_addr ;
1285 from4->sin_port = sh->src_port;
1286 to4->sin_port = sh->dest_port;
1287 } else if (iph->ip_v == (IPV6_VERSION >> 4)) {
1288 /* its IPv6 */
1289 struct ip6_hdr *ip6;
1290 struct sockaddr_in6 *to6, *from6;
1292 ip6 = mtod(m, struct ip6_hdr *);
1293 to6 = (struct sockaddr_in6 *)&to_store;
1294 from6 = (struct sockaddr_in6 *)&from_store;
1295 bzero(to6, sizeof(*to6));
1296 bzero(from6, sizeof(*from6));
1297 from6->sin6_family = to6->sin6_family = AF_INET6;
1298 from6->sin6_len = to6->sin6_len = sizeof(struct sockaddr_in6);
1299 to6->sin6_addr = ip6->ip6_dst;
1300 from6->sin6_addr = ip6->ip6_src;
1301 from6->sin6_port = sh->src_port;
1302 to6->sin6_port = sh->dest_port;
1303 /* Get the scopes in properly to the sin6 addr's */
1304 in6_recoverscope(to6, &to6->sin6_addr, NULL);
1305 #if defined(SCTP_BASE_FREEBSD) || defined(__APPLE__) || defined(__DragonFly__)
1306 in6_embedscope(&to6->sin6_addr, to6, NULL, NULL);
1307 #else
1308 in6_embedscope(&to6->sin6_addr, to6);
1309 #endif
1311 in6_recoverscope(from6, &from6->sin6_addr, NULL);
1312 #if defined(SCTP_BASE_FREEBSD) || defined(__APPLE__) || defined(__DragonFly__)
1313 in6_embedscope(&from6->sin6_addr, from6, NULL, NULL);
1314 #else
1315 in6_embedscope(&from6->sin6_addr, from6);
1316 #endif
1317 } else {
1318 /* Currently not supported. */
1319 return (NULL);
1321 #ifdef SCTP_DEBUG
1322 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
1323 kprintf("Looking for port %d address :",
1324 ntohs(((struct sockaddr_in *)to)->sin_port));
1325 sctp_print_address(to);
1326 kprintf("From for port %d address :",
1327 ntohs(((struct sockaddr_in *)from)->sin_port));
1328 sctp_print_address(from);
1330 #endif
1332 if (sh->v_tag) {
1333 /* we only go down this path if vtag is non-zero */
1334 retval = sctp_findassoc_by_vtag(from, ntohl(sh->v_tag),
1335 inp_p, netp, sh->src_port, sh->dest_port);
1336 if (retval) {
1337 return (retval);
1340 find_tcp_pool = 0;
1341 if ((ch->chunk_type != SCTP_INITIATION) &&
1342 (ch->chunk_type != SCTP_INITIATION_ACK) &&
1343 (ch->chunk_type != SCTP_COOKIE_ACK) &&
1344 (ch->chunk_type != SCTP_COOKIE_ECHO)) {
1345 /* Other chunk types go to the tcp pool. */
1346 find_tcp_pool = 1;
1348 if (inp_p) {
1349 retval = sctp_findassociation_addr_sa(to, from, inp_p, netp,
1350 find_tcp_pool);
1351 inp = *inp_p;
1352 } else {
1353 retval = sctp_findassociation_addr_sa(to, from, &inp, netp,
1354 find_tcp_pool);
1356 #ifdef SCTP_DEBUG
1357 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
1358 kprintf("retval:%p inp:%p\n", retval, inp);
1360 #endif
1361 if (retval == NULL && inp) {
1362 /* Found a EP but not this address */
1363 #ifdef SCTP_DEBUG
1364 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
1365 kprintf("Found endpoint %p but no asoc - ep state:%x\n",
1366 inp, inp->sctp_flags);
1368 #endif
1369 if ((ch->chunk_type == SCTP_INITIATION) ||
1370 (ch->chunk_type == SCTP_INITIATION_ACK)) {
1372 * special hook, we do NOT return linp or an
1373 * association that is linked to an existing
1374 * association that is under the TCP pool (i.e. no
1375 * listener exists). The endpoint finding routine
1376 * will always find a listner before examining the
1377 * TCP pool.
1379 if (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) {
1380 #ifdef SCTP_DEBUG
1381 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
1382 kprintf("Gak, its in the TCP pool... return NULL");
1384 #endif
1385 if (inp_p) {
1386 *inp_p = NULL;
1388 return (NULL);
1390 #ifdef SCTP_DEBUG
1391 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
1392 kprintf("Now doing SPECIAL find\n");
1394 #endif
1395 retval = sctp_findassociation_special_addr(m, iphlen,
1396 offset, sh, inp_p, netp, to);
1399 #ifdef SCTP_DEBUG
1400 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
1401 kprintf("retval is %p\n", retval);
1403 #endif
1404 return (retval);
1407 extern int sctp_max_burst_default;
1409 extern unsigned int sctp_delayed_sack_time_default;
1410 extern unsigned int sctp_heartbeat_interval_default;
1411 extern unsigned int sctp_pmtu_raise_time_default;
1412 extern unsigned int sctp_shutdown_guard_time_default;
1413 extern unsigned int sctp_secret_lifetime_default;
1415 extern unsigned int sctp_rto_max_default;
1416 extern unsigned int sctp_rto_min_default;
1417 extern unsigned int sctp_rto_initial_default;
1418 extern unsigned int sctp_init_rto_max_default;
1419 extern unsigned int sctp_valid_cookie_life_default;
1420 extern unsigned int sctp_init_rtx_max_default;
1421 extern unsigned int sctp_assoc_rtx_max_default;
1422 extern unsigned int sctp_path_rtx_max_default;
1423 extern unsigned int sctp_nr_outgoing_streams_default;
1426 * allocate a sctp_inpcb and setup a temporary binding to a port/all
1427 * addresses. This way if we don't get a bind we by default pick a ephemeral
1428 * port with all addresses bound.
1431 sctp_inpcb_alloc(struct socket *so)
1434 * we get called when a new endpoint starts up. We need to allocate
1435 * the sctp_inpcb structure from the zone and init it. Mark it as
1436 * unbound and find a port that we can use as an ephemeral with
1437 * INADDR_ANY. If the user binds later no problem we can then add
1438 * in the specific addresses. And setup the default parameters for
1439 * the EP.
1441 int i, error;
1442 struct sctp_inpcb *inp, *n_inp;
1443 struct sctp_pcb *m;
1444 struct timeval time;
1446 error = 0;
1448 /* Hack alert:
1450 * This code audits the entire INP list to see if
1451 * any ep's that are in the GONE state are now
1452 * all free. This should not happen really since when
1453 * the last association if freed we should end up deleting
1454 * the inpcb. This code including the locks should
1455 * be taken out ... since the last set of fixes I
1456 * have not seen the "Found a GONE on list" has not
1457 * came out. But i am paranoid and we will leave this
1458 * in at the cost of efficency on allocation of PCB's.
1459 * Probably we should move this to the invariant
1460 * compile options
1462 /* #ifdef INVARIANTS*/
1463 SCTP_INP_INFO_RLOCK();
1464 inp = LIST_FIRST(&sctppcbinfo.listhead);
1465 while (inp) {
1466 n_inp = LIST_NEXT(inp, sctp_list);
1467 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
1468 if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
1469 /* finish the job now */
1470 kprintf("Found a GONE on list\n");
1471 SCTP_INP_INFO_RUNLOCK();
1472 sctp_inpcb_free(inp, 1);
1473 SCTP_INP_INFO_RLOCK();
1476 inp = n_inp;
1478 SCTP_INP_INFO_RUNLOCK();
1479 /* #endif INVARIANTS*/
1481 SCTP_INP_INFO_WLOCK();
1482 inp = (struct sctp_inpcb *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_ep);
1483 if (inp == NULL) {
1484 kprintf("Out of SCTP-INPCB structures - no resources\n");
1485 SCTP_INP_INFO_WUNLOCK();
1486 return (ENOBUFS);
1489 /* zap it */
1490 bzero(inp, sizeof(*inp));
1492 /* bump generations */
1493 inp->ip_inp.inp.inp_socket = so;
1495 /* setup socket pointers */
1496 inp->sctp_socket = so;
1498 /* setup inpcb socket too */
1499 inp->ip_inp.inp.inp_socket = so;
1500 inp->sctp_frag_point = SCTP_DEFAULT_MAXSEGMENT;
1501 #ifdef IPSEC
1502 #if !(defined(__OpenBSD__) || defined(__APPLE__))
1504 struct inpcbpolicy *pcb_sp = NULL;
1505 error = ipsec_init_policy(so, &pcb_sp);
1506 /* Arrange to share the policy */
1507 inp->ip_inp.inp.inp_sp = pcb_sp;
1508 ((struct in6pcb *)(&inp->ip_inp.inp))->in6p_sp = pcb_sp;
1510 #else
1511 /* not sure what to do for openbsd here */
1512 error = 0;
1513 #endif
1514 if (error != 0) {
1515 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_ep, inp);
1516 SCTP_INP_INFO_WUNLOCK();
1517 return error;
1519 #endif /* IPSEC */
1520 sctppcbinfo.ipi_count_ep++;
1521 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
1522 inp->ip_inp.inp.inp_gencnt = ++sctppcbinfo.ipi_gencnt_ep;
1523 inp->ip_inp.inp.inp_ip_ttl = ip_defttl;
1524 #else
1525 inp->inp_ip_ttl = ip_defttl;
1526 inp->inp_ip_tos = 0;
1527 #endif
1529 so->so_pcb = (caddr_t)inp;
1531 if ((so->so_type == SOCK_DGRAM) ||
1532 (so->so_type == SOCK_SEQPACKET)) {
1533 /* UDP style socket */
1534 inp->sctp_flags = (SCTP_PCB_FLAGS_UDPTYPE |
1535 SCTP_PCB_FLAGS_UNBOUND);
1536 inp->sctp_flags |= (SCTP_PCB_FLAGS_RECVDATAIOEVNT);
1537 } else if (so->so_type == SOCK_STREAM) {
1538 /* TCP style socket */
1539 inp->sctp_flags = (SCTP_PCB_FLAGS_TCPTYPE |
1540 SCTP_PCB_FLAGS_UNBOUND);
1541 inp->sctp_flags |= (SCTP_PCB_FLAGS_RECVDATAIOEVNT);
1542 } else {
1544 * unsupported socket type (RAW, etc)- in case we missed
1545 * it in protosw
1547 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_ep, inp);
1548 SCTP_INP_INFO_WUNLOCK();
1549 return (EOPNOTSUPP);
1551 inp->sctp_tcbhash = hashinit(sctp_pcbtblsize,
1552 #ifdef __NetBSD__
1553 HASH_LIST,
1554 #endif
1555 M_PCB,
1556 #if defined(__NetBSD__) || defined(__OpenBSD__)
1557 M_WAITOK,
1558 #endif
1559 &inp->sctp_hashmark);
1560 if (inp->sctp_tcbhash == NULL) {
1561 kprintf("Out of SCTP-INPCB->hashinit - no resources\n");
1562 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_ep, inp);
1563 SCTP_INP_INFO_WUNLOCK();
1564 return (ENOBUFS);
1566 /* LOCK init's */
1567 SCTP_INP_LOCK_INIT(inp);
1568 SCTP_ASOC_CREATE_LOCK_INIT(inp);
1569 /* lock the new ep */
1570 SCTP_INP_WLOCK(inp);
1572 /* add it to the info area */
1573 LIST_INSERT_HEAD(&sctppcbinfo.listhead, inp, sctp_list);
1574 SCTP_INP_INFO_WUNLOCK();
1576 LIST_INIT(&inp->sctp_addr_list);
1577 LIST_INIT(&inp->sctp_asoc_list);
1578 TAILQ_INIT(&inp->sctp_queue_list);
1579 /* Init the timer structure for signature change */
1580 #if defined (__FreeBSD__) && __FreeBSD_version >= 500000
1581 callout_init(&inp->sctp_ep.signature_change.timer, 0);
1582 #else
1583 callout_init(&inp->sctp_ep.signature_change.timer);
1584 #endif
1585 inp->sctp_ep.signature_change.type = SCTP_TIMER_TYPE_NEWCOOKIE;
1587 /* now init the actual endpoint default data */
1588 m = &inp->sctp_ep;
1590 /* setup the base timeout information */
1591 m->sctp_timeoutticks[SCTP_TIMER_SEND] = SEC_TO_TICKS(SCTP_SEND_SEC); /* needed ? */
1592 m->sctp_timeoutticks[SCTP_TIMER_INIT] = SEC_TO_TICKS(SCTP_INIT_SEC); /* needed ? */
1593 m->sctp_timeoutticks[SCTP_TIMER_RECV] = MSEC_TO_TICKS(sctp_delayed_sack_time_default);
1594 m->sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = sctp_heartbeat_interval_default; /* this is in MSEC */
1595 m->sctp_timeoutticks[SCTP_TIMER_PMTU] = SEC_TO_TICKS(sctp_pmtu_raise_time_default);
1596 m->sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] = SEC_TO_TICKS(sctp_shutdown_guard_time_default);
1597 m->sctp_timeoutticks[SCTP_TIMER_SIGNATURE] = SEC_TO_TICKS(sctp_secret_lifetime_default);
1598 /* all max/min max are in ms */
1599 m->sctp_maxrto = sctp_rto_max_default;
1600 m->sctp_minrto = sctp_rto_min_default;
1601 m->initial_rto = sctp_rto_initial_default;
1602 m->initial_init_rto_max = sctp_init_rto_max_default;
1604 m->max_open_streams_intome = MAX_SCTP_STREAMS;
1606 m->max_init_times = sctp_init_rtx_max_default;
1607 m->max_send_times = sctp_assoc_rtx_max_default;
1608 m->def_net_failure = sctp_path_rtx_max_default;
1609 m->sctp_sws_sender = SCTP_SWS_SENDER_DEF;
1610 m->sctp_sws_receiver = SCTP_SWS_RECEIVER_DEF;
1611 m->max_burst = sctp_max_burst_default;
1612 /* number of streams to pre-open on a association */
1613 m->pre_open_stream_count = sctp_nr_outgoing_streams_default;
1615 /* Add adaption cookie */
1616 m->adaption_layer_indicator = 0x504C5253;
1618 /* seed random number generator */
1619 m->random_counter = 1;
1620 m->store_at = SCTP_SIGNATURE_SIZE;
1621 #if (defined(__FreeBSD__) && (__FreeBSD_version < 500000)) || defined(__DragonFly__)
1622 read_random_unlimited(m->random_numbers, sizeof(m->random_numbers));
1623 #elif defined(__APPLE__) || (__FreeBSD_version > 500000)
1624 read_random(m->random_numbers, sizeof(m->random_numbers));
1625 #elif defined(__OpenBSD__)
1626 get_random_bytes(m->random_numbers, sizeof(m->random_numbers));
1627 #elif defined(__NetBSD__) && NRND > 0
1628 rnd_extract_data(m->random_numbers, sizeof(m->random_numbers),
1629 RND_EXTRACT_ANY);
1630 #else
1632 u_int32_t *ranm, *ranp;
1633 ranp = (u_int32_t *)&m->random_numbers;
1634 ranm = ranp + (SCTP_SIGNATURE_ALOC_SIZE/sizeof(u_int32_t));
1635 if ((u_long)ranp % 4) {
1636 /* not a even boundary? */
1637 ranp = (u_int32_t *)SCTP_SIZE32((u_long)ranp);
1639 while (ranp < ranm) {
1640 *ranp = random();
1641 ranp++;
1644 #endif
1645 sctp_fill_random_store(m);
1647 /* Minimum cookie size */
1648 m->size_of_a_cookie = (sizeof(struct sctp_init_msg) * 2) +
1649 sizeof(struct sctp_state_cookie);
1650 m->size_of_a_cookie += SCTP_SIGNATURE_SIZE;
1652 /* Setup the initial secret */
1653 SCTP_GETTIME_TIMEVAL(&time);
1654 m->time_of_secret_change = time.tv_sec;
1656 for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1657 m->secret_key[0][i] = sctp_select_initial_TSN(m);
1659 sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, NULL, NULL);
1661 /* How long is a cookie good for ? */
1662 m->def_cookie_life = sctp_valid_cookie_life_default;
1663 SCTP_INP_WUNLOCK(inp);
1664 return (error);
1668 void
1669 sctp_move_pcb_and_assoc(struct sctp_inpcb *old_inp, struct sctp_inpcb *new_inp,
1670 struct sctp_tcb *stcb)
1672 uint16_t lport, rport;
1673 struct sctppcbhead *head;
1674 struct sctp_laddr *laddr, *oladdr;
1676 SCTP_TCB_UNLOCK(stcb);
1677 SCTP_INP_INFO_WLOCK();
1678 SCTP_INP_WLOCK(old_inp);
1679 SCTP_INP_WLOCK(new_inp);
1680 SCTP_TCB_LOCK(stcb);
1682 new_inp->sctp_ep.time_of_secret_change =
1683 old_inp->sctp_ep.time_of_secret_change;
1684 memcpy(new_inp->sctp_ep.secret_key, old_inp->sctp_ep.secret_key,
1685 sizeof(old_inp->sctp_ep.secret_key));
1686 new_inp->sctp_ep.current_secret_number =
1687 old_inp->sctp_ep.current_secret_number;
1688 new_inp->sctp_ep.last_secret_number =
1689 old_inp->sctp_ep.last_secret_number;
1690 new_inp->sctp_ep.size_of_a_cookie = old_inp->sctp_ep.size_of_a_cookie;
1692 /* Copy the port across */
1693 lport = new_inp->sctp_lport = old_inp->sctp_lport;
1694 rport = stcb->rport;
1695 /* Pull the tcb from the old association */
1696 LIST_REMOVE(stcb, sctp_tcbhash);
1697 LIST_REMOVE(stcb, sctp_tcblist);
1699 /* Now insert the new_inp into the TCP connected hash */
1700 head = &sctppcbinfo.sctp_tcpephash[SCTP_PCBHASH_ALLADDR((lport + rport),
1701 sctppcbinfo.hashtcpmark)];
1703 LIST_INSERT_HEAD(head, new_inp, sctp_hash);
1705 /* Now move the tcb into the endpoint list */
1706 LIST_INSERT_HEAD(&new_inp->sctp_asoc_list, stcb, sctp_tcblist);
1708 * Question, do we even need to worry about the ep-hash since
1709 * we only have one connection? Probably not :> so lets
1710 * get rid of it and not suck up any kernel memory in that.
1712 SCTP_INP_INFO_WUNLOCK();
1713 stcb->sctp_socket = new_inp->sctp_socket;
1714 stcb->sctp_ep = new_inp;
1715 if (new_inp->sctp_tcbhash != NULL) {
1716 FREE(new_inp->sctp_tcbhash, M_PCB);
1717 new_inp->sctp_tcbhash = NULL;
1719 if ((new_inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) {
1720 /* Subset bound, so copy in the laddr list from the old_inp */
1721 LIST_FOREACH(oladdr, &old_inp->sctp_addr_list, sctp_nxt_addr) {
1722 laddr = (struct sctp_laddr *)SCTP_ZONE_GET(
1723 sctppcbinfo.ipi_zone_laddr);
1724 if (laddr == NULL) {
1726 * Gak, what can we do? This assoc is really
1727 * HOSED. We probably should send an abort
1728 * here.
1730 #ifdef SCTP_DEBUG
1731 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
1732 kprintf("Association hosed in TCP model, out of laddr memory\n");
1734 #endif /* SCTP_DEBUG */
1735 continue;
1737 sctppcbinfo.ipi_count_laddr++;
1738 sctppcbinfo.ipi_gencnt_laddr++;
1739 bzero(laddr, sizeof(*laddr));
1740 laddr->ifa = oladdr->ifa;
1741 LIST_INSERT_HEAD(&new_inp->sctp_addr_list, laddr,
1742 sctp_nxt_addr);
1743 new_inp->laddr_count++;
1746 SCTP_INP_WUNLOCK(new_inp);
1747 SCTP_INP_WUNLOCK(old_inp);
1750 static int
1751 sctp_isport_inuse(struct sctp_inpcb *inp, uint16_t lport)
1753 struct sctppcbhead *head;
1754 struct sctp_inpcb *t_inp;
1756 head = &sctppcbinfo.sctp_ephash[SCTP_PCBHASH_ALLADDR(lport,
1757 sctppcbinfo.hashmark)];
1758 LIST_FOREACH(t_inp, head, sctp_hash) {
1759 if (t_inp->sctp_lport != lport) {
1760 continue;
1762 /* This one is in use. */
1763 /* check the v6/v4 binding issue */
1764 if ((t_inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
1765 #if defined(__FreeBSD__)
1766 (((struct inpcb *)t_inp)->inp_flags & IN6P_IPV6_V6ONLY)
1767 #else
1768 #if defined(__OpenBSD__)
1769 (0) /* For open bsd we do dual bind only */
1770 #else
1771 (((struct in6pcb *)t_inp)->in6p_flags & IN6P_IPV6_V6ONLY)
1772 #endif
1773 #endif
1775 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1776 /* collision in V6 space */
1777 return (1);
1778 } else {
1779 /* inp is BOUND_V4 no conflict */
1780 continue;
1782 } else if (t_inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1783 /* t_inp is bound v4 and v6, conflict always */
1784 return (1);
1785 } else {
1786 /* t_inp is bound only V4 */
1787 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
1788 #if defined(__FreeBSD__)
1789 (((struct inpcb *)inp)->inp_flags & IN6P_IPV6_V6ONLY)
1790 #else
1791 #if defined(__OpenBSD__)
1792 (0) /* For open bsd we do dual bind only */
1793 #else
1794 (((struct in6pcb *)inp)->in6p_flags & IN6P_IPV6_V6ONLY)
1795 #endif
1796 #endif
1798 /* no conflict */
1799 continue;
1801 /* else fall through to conflict */
1803 return (1);
1805 return (0);
1808 #if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__))
1810 * Don't know why, but without this there is an unknown reference when
1811 * compiling NetBSD... hmm
1813 extern void in6_sin6_2_sin (struct sockaddr_in *, struct sockaddr_in6 *sin6);
1814 #endif
1818 #if (defined(__FreeBSD__) && __FreeBSD_version >= 500000) || defined(__DragonFly__)
1819 sctp_inpcb_bind(struct socket *so, struct sockaddr *addr, struct thread *p)
1820 #else
1821 sctp_inpcb_bind(struct socket *so, struct sockaddr *addr, struct proc *p)
1822 #endif
1824 /* bind a ep to a socket address */
1825 struct sctppcbhead *head;
1826 struct sctp_inpcb *inp, *inp_tmp;
1827 struct inpcb *ip_inp;
1828 int bindall;
1829 uint16_t lport;
1830 int error;
1832 lport = 0;
1833 error = 0;
1834 bindall = 1;
1835 inp = (struct sctp_inpcb *)so->so_pcb;
1836 ip_inp = (struct inpcb *)so->so_pcb;
1837 #ifdef SCTP_DEBUG
1838 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
1839 if (addr) {
1840 kprintf("Bind called port:%d\n",
1841 ntohs(((struct sockaddr_in *)addr)->sin_port));
1842 kprintf("Addr :");
1843 sctp_print_address(addr);
1846 #endif /* SCTP_DEBUG */
1847 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) == 0) {
1848 /* already did a bind, subsequent binds NOT allowed ! */
1849 return (EINVAL);
1852 if (addr != NULL) {
1853 if (addr->sa_family == AF_INET) {
1854 struct sockaddr_in *sin;
1856 /* IPV6_V6ONLY socket? */
1857 if (
1858 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
1859 (ip_inp->inp_flags & IN6P_IPV6_V6ONLY)
1860 #else
1861 #if defined(__OpenBSD__)
1862 (0) /* For openbsd we do dual bind only */
1863 #else
1864 (((struct in6pcb *)inp)->in6p_flags & IN6P_IPV6_V6ONLY)
1865 #endif
1866 #endif
1868 return (EINVAL);
1871 if (addr->sa_len != sizeof(*sin))
1872 return (EINVAL);
1874 sin = (struct sockaddr_in *)addr;
1875 lport = sin->sin_port;
1877 if (sin->sin_addr.s_addr != INADDR_ANY) {
1878 bindall = 0;
1880 } else if (addr->sa_family == AF_INET6) {
1881 /* Only for pure IPv6 Address. (No IPv4 Mapped!) */
1882 struct sockaddr_in6 *sin6;
1884 sin6 = (struct sockaddr_in6 *)addr;
1886 if (addr->sa_len != sizeof(*sin6))
1887 return (EINVAL);
1889 lport = sin6->sin6_port;
1890 if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
1891 bindall = 0;
1892 /* KAME hack: embed scopeid */
1893 #if defined(SCTP_BASE_FREEBSD) || defined(__APPLE__) || defined(__DragonFly__)
1894 if (in6_embedscope(&sin6->sin6_addr, sin6,
1895 ip_inp, NULL) != 0)
1896 return (EINVAL);
1897 #elif defined(__FreeBSD__)
1898 error = scope6_check_id(sin6, ip6_use_defzone);
1899 if (error != 0)
1900 return (error);
1901 #else
1902 if (in6_embedscope(&sin6->sin6_addr, sin6) != 0) {
1903 return (EINVAL);
1905 #endif
1907 #ifndef SCOPEDROUTING
1908 /* this must be cleared for ifa_ifwithaddr() */
1909 sin6->sin6_scope_id = 0;
1910 #endif /* SCOPEDROUTING */
1911 } else {
1912 return (EAFNOSUPPORT);
1915 SCTP_INP_INFO_WLOCK();
1916 SCTP_INP_WLOCK(inp);
1917 /* increase our count due to the unlock we do */
1918 SCTP_INP_INCR_REF(inp);
1919 if (lport) {
1921 * Did the caller specify a port? if so we must see if a
1922 * ep already has this one bound.
1924 /* got to be root to get at low ports */
1925 if (ntohs(lport) < IPPORT_RESERVED) {
1926 if (p && (error =
1927 #ifdef __FreeBSD__
1928 #if __FreeBSD_version >= 500000
1929 suser_cred(p->td_ucred, 0)
1930 #else
1931 suser(p)
1932 #endif
1933 #elif defined(__NetBSD__) || defined(__APPLE__)
1934 suser(p->p_ucred, &p->p_acflag)
1935 #elif defined(__DragonFly__)
1936 suser(p)
1937 #else
1938 suser(p, 0)
1939 #endif
1940 )) {
1941 SCTP_INP_DECR_REF(inp);
1942 SCTP_INP_WUNLOCK(inp);
1943 SCTP_INP_INFO_WUNLOCK();
1944 return (error);
1947 if (p == NULL) {
1948 SCTP_INP_DECR_REF(inp);
1949 SCTP_INP_WUNLOCK(inp);
1950 SCTP_INP_INFO_WUNLOCK();
1951 return (error);
1953 SCTP_INP_WUNLOCK(inp);
1954 inp_tmp = sctp_pcb_findep(addr, 0, 1);
1955 if (inp_tmp != NULL) {
1956 /* lock guy returned and lower count
1957 * note that we are not bound so inp_tmp
1958 * should NEVER be inp. And it is this
1959 * inp (inp_tmp) that gets the reference
1960 * bump, so we must lower it.
1962 SCTP_INP_WLOCK(inp_tmp);
1963 SCTP_INP_DECR_REF(inp_tmp);
1964 SCTP_INP_WUNLOCK(inp_tmp);
1966 /* unlock info */
1967 SCTP_INP_INFO_WUNLOCK();
1968 return (EADDRNOTAVAIL);
1970 SCTP_INP_WLOCK(inp);
1971 if (bindall) {
1972 /* verify that no lport is not used by a singleton */
1973 if (sctp_isport_inuse(inp, lport)) {
1974 /* Sorry someone already has this one bound */
1975 SCTP_INP_DECR_REF(inp);
1976 SCTP_INP_WUNLOCK(inp);
1977 SCTP_INP_INFO_WUNLOCK();
1978 return (EADDRNOTAVAIL);
1981 } else {
1983 * get any port but lets make sure no one has any address
1984 * with this port bound
1988 * setup the inp to the top (I could use the union but this
1989 * is just as easy
1991 uint32_t port_guess;
1992 uint16_t port_attempt;
1993 int not_done=1;
1995 while (not_done) {
1996 port_guess = sctp_select_initial_TSN(&inp->sctp_ep);
1997 port_attempt = (port_guess & 0x0000ffff);
1998 if (port_attempt == 0) {
1999 goto next_half;
2001 if (port_attempt < IPPORT_RESERVED) {
2002 port_attempt += IPPORT_RESERVED;
2005 if (sctp_isport_inuse(inp, htons(port_attempt)) == 0) {
2006 /* got a port we can use */
2007 not_done = 0;
2008 continue;
2010 /* try upper half */
2011 next_half:
2012 port_attempt = ((port_guess >> 16) & 0x0000ffff);
2013 if (port_attempt == 0) {
2014 goto last_try;
2016 if (port_attempt < IPPORT_RESERVED) {
2017 port_attempt += IPPORT_RESERVED;
2019 if (sctp_isport_inuse(inp, htons(port_attempt)) == 0) {
2020 /* got a port we can use */
2021 not_done = 0;
2022 continue;
2024 /* try two half's added together */
2025 last_try:
2026 port_attempt = (((port_guess >> 16) & 0x0000ffff) + (port_guess & 0x0000ffff));
2027 if (port_attempt == 0) {
2028 /* get a new random number */
2029 continue;
2031 if (port_attempt < IPPORT_RESERVED) {
2032 port_attempt += IPPORT_RESERVED;
2034 if (sctp_isport_inuse(inp, htons(port_attempt)) == 0) {
2035 /* got a port we can use */
2036 not_done = 0;
2037 continue;
2040 /* we don't get out of the loop until we have a port */
2041 lport = htons(port_attempt);
2043 SCTP_INP_DECR_REF(inp);
2044 if (inp->sctp_flags & (SCTP_PCB_FLAGS_SOCKET_GONE|SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
2045 /* this really should not happen. The guy
2046 * did a non-blocking bind and then did a close
2047 * at the same time.
2049 SCTP_INP_WUNLOCK(inp);
2050 SCTP_INP_INFO_WUNLOCK();
2051 return (EINVAL);
2053 /* ok we look clear to give out this port, so lets setup the binding */
2054 if (bindall) {
2055 /* binding to all addresses, so just set in the proper flags */
2056 inp->sctp_flags |= (SCTP_PCB_FLAGS_BOUNDALL |
2057 SCTP_PCB_FLAGS_DO_ASCONF);
2058 /* set the automatic addr changes from kernel flag */
2059 if (sctp_auto_asconf == 0) {
2060 inp->sctp_flags &= ~SCTP_PCB_FLAGS_AUTO_ASCONF;
2061 } else {
2062 inp->sctp_flags |= SCTP_PCB_FLAGS_AUTO_ASCONF;
2064 } else {
2066 * bind specific, make sure flags is off and add a new address
2067 * structure to the sctp_addr_list inside the ep structure.
2069 * We will need to allocate one and insert it at the head.
2070 * The socketopt call can just insert new addresses in there
2071 * as well. It will also have to do the embed scope kame hack
2072 * too (before adding).
2074 struct ifaddr *ifa;
2075 struct sockaddr_storage store_sa;
2077 memset(&store_sa, 0, sizeof(store_sa));
2078 if (addr->sa_family == AF_INET) {
2079 struct sockaddr_in *sin;
2081 sin = (struct sockaddr_in *)&store_sa;
2082 memcpy(sin, addr, sizeof(struct sockaddr_in));
2083 sin->sin_port = 0;
2084 } else if (addr->sa_family == AF_INET6) {
2085 struct sockaddr_in6 *sin6;
2087 sin6 = (struct sockaddr_in6 *)&store_sa;
2088 memcpy(sin6, addr, sizeof(struct sockaddr_in6));
2089 sin6->sin6_port = 0;
2092 * first find the interface with the bound address
2093 * need to zero out the port to find the address! yuck!
2094 * can't do this earlier since need port for sctp_pcb_findep()
2096 ifa = sctp_find_ifa_by_addr((struct sockaddr *)&store_sa);
2097 if (ifa == NULL) {
2098 /* Can't find an interface with that address */
2099 SCTP_INP_WUNLOCK(inp);
2100 SCTP_INP_INFO_WUNLOCK();
2101 return (EADDRNOTAVAIL);
2103 if (addr->sa_family == AF_INET6) {
2104 struct in6_ifaddr *ifa6;
2105 ifa6 = (struct in6_ifaddr *)ifa;
2107 * allow binding of deprecated addresses as per
2108 * RFC 2462 and ipng discussion
2110 if (ifa6->ia6_flags & (IN6_IFF_DETACHED |
2111 IN6_IFF_ANYCAST |
2112 IN6_IFF_NOTREADY)) {
2113 /* Can't bind a non-existent addr. */
2114 SCTP_INP_WUNLOCK(inp);
2115 SCTP_INP_INFO_WUNLOCK();
2116 return (EINVAL);
2119 /* we're not bound all */
2120 inp->sctp_flags &= ~SCTP_PCB_FLAGS_BOUNDALL;
2121 #if 0 /* use sysctl now */
2122 /* don't allow automatic addr changes from kernel */
2123 inp->sctp_flags &= ~SCTP_PCB_FLAGS_AUTO_ASCONF;
2124 #endif
2125 /* set the automatic addr changes from kernel flag */
2126 if (sctp_auto_asconf == 0) {
2127 inp->sctp_flags &= ~SCTP_PCB_FLAGS_AUTO_ASCONF;
2128 } else {
2129 inp->sctp_flags |= SCTP_PCB_FLAGS_AUTO_ASCONF;
2131 /* allow bindx() to send ASCONF's for binding changes */
2132 inp->sctp_flags |= SCTP_PCB_FLAGS_DO_ASCONF;
2133 /* add this address to the endpoint list */
2134 error = sctp_insert_laddr(&inp->sctp_addr_list, ifa);
2135 if (error != 0) {
2136 SCTP_INP_WUNLOCK(inp);
2137 SCTP_INP_INFO_WUNLOCK();
2138 return (error);
2140 inp->laddr_count++;
2142 /* find the bucket */
2143 head = &sctppcbinfo.sctp_ephash[SCTP_PCBHASH_ALLADDR(lport,
2144 sctppcbinfo.hashmark)];
2145 /* put it in the bucket */
2146 LIST_INSERT_HEAD(head, inp, sctp_hash);
2147 #ifdef SCTP_DEBUG
2148 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
2149 kprintf("Main hash to bind at head:%p, bound port:%d\n", head, ntohs(lport));
2151 #endif
2152 /* set in the port */
2153 inp->sctp_lport = lport;
2155 /* turn off just the unbound flag */
2156 inp->sctp_flags &= ~SCTP_PCB_FLAGS_UNBOUND;
2157 SCTP_INP_WUNLOCK(inp);
2158 SCTP_INP_INFO_WUNLOCK();
2159 return (0);
2163 static void
2164 sctp_iterator_inp_being_freed(struct sctp_inpcb *inp, struct sctp_inpcb *inp_next)
2166 struct sctp_iterator *it;
2167 /* We enter with the only the ITERATOR_LOCK in place and
2168 * A write lock on the inp_info stuff.
2171 /* Go through all iterators, we must do this since
2172 * it is possible that some iterator does NOT have
2173 * the lock, but is waiting for it. And the one that
2174 * had the lock has either moved in the last iteration
2175 * or we just cleared it above. We need to find all
2176 * of those guys. The list of iterators should never
2177 * be very big though.
2179 LIST_FOREACH(it, &sctppcbinfo.iteratorhead, sctp_nxt_itr) {
2180 if (it == inp->inp_starting_point_for_iterator)
2181 /* skip this guy, he's special */
2182 continue;
2183 if (it->inp == inp) {
2184 /* This is tricky and we DON'T lock the iterator.
2185 * Reason is he's running but waiting for me since
2186 * inp->inp_starting_point_for_iterator has the lock
2187 * on me (the guy above we skipped). This tells us
2188 * its is not running but waiting for inp->inp_starting_point_for_iterator
2189 * to be released by the guy that does have our INP in a lock.
2191 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
2192 it->inp = NULL;
2193 it->stcb = NULL;
2194 } else {
2195 /* set him up to do the next guy not me */
2196 it->inp = inp_next;
2197 it->stcb = NULL;
2201 it = inp->inp_starting_point_for_iterator;
2202 if (it) {
2203 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
2204 it->inp = NULL;
2205 } else {
2206 it->inp = inp_next;
2208 it->stcb = NULL;
2212 /* release sctp_inpcb unbind the port */
2213 void
2214 sctp_inpcb_free(struct sctp_inpcb *inp, int immediate)
2217 * Here we free a endpoint. We must find it (if it is in the Hash
2218 * table) and remove it from there. Then we must also find it in
2219 * the overall list and remove it from there. After all removals are
2220 * complete then any timer has to be stopped. Then start the actual
2221 * freeing.
2222 * a) Any local lists.
2223 * b) Any associations.
2224 * c) The hash of all associations.
2225 * d) finally the ep itself.
2227 struct sctp_pcb *m;
2228 struct sctp_inpcb *inp_save;
2229 struct sctp_tcb *asoc, *nasoc;
2230 struct sctp_laddr *laddr, *nladdr;
2231 struct inpcb *ip_pcb;
2232 struct socket *so;
2233 struct sctp_socket_q_list *sq;
2234 #if !defined(__FreeBSD__) || __FreeBSD_version < 500000
2235 struct rtentry *rt;
2236 #endif
2237 int cnt;
2239 crit_enter();
2240 SCTP_ASOC_CREATE_LOCK(inp);
2241 SCTP_INP_WLOCK(inp);
2243 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
2244 /* been here before */
2245 crit_exit();
2246 kprintf("Endpoint was all gone (dup free)?\n");
2247 SCTP_INP_WUNLOCK(inp);
2248 SCTP_ASOC_CREATE_UNLOCK(inp);
2249 return;
2251 sctp_timer_stop(SCTP_TIMER_TYPE_NEWCOOKIE, inp, NULL, NULL);
2253 if (inp->control) {
2254 sctp_m_freem(inp->control);
2255 inp->control = NULL;
2257 if (inp->pkt) {
2258 sctp_m_freem(inp->pkt);
2259 inp->pkt = NULL;
2261 so = inp->sctp_socket;
2262 m = &inp->sctp_ep;
2263 ip_pcb = &inp->ip_inp.inp; /* we could just cast the main
2264 * pointer here but I will
2265 * be nice :> (i.e. ip_pcb = ep;)
2268 if (immediate == 0) {
2269 int cnt_in_sd;
2270 cnt_in_sd = 0;
2271 for ((asoc = LIST_FIRST(&inp->sctp_asoc_list)); asoc != NULL;
2272 asoc = nasoc) {
2273 nasoc = LIST_NEXT(asoc, sctp_tcblist);
2274 if ((SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_COOKIE_WAIT) ||
2275 (SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
2276 /* Just abandon things in the front states */
2277 SCTP_TCB_LOCK(asoc);
2278 SCTP_INP_WUNLOCK(inp);
2279 sctp_free_assoc(inp, asoc);
2280 SCTP_INP_WLOCK(inp);
2281 continue;
2282 } else {
2283 asoc->asoc.state |= SCTP_STATE_CLOSED_SOCKET;
2285 if ((asoc->asoc.size_on_delivery_queue > 0) ||
2286 (asoc->asoc.size_on_reasm_queue > 0) ||
2287 (asoc->asoc.size_on_all_streams > 0) ||
2288 (so && (so->so_rcv.ssb_cc > 0))
2290 /* Left with Data unread */
2291 struct mbuf *op_err;
2292 MGET(op_err, MB_DONTWAIT, MT_DATA);
2293 if (op_err) {
2294 /* Fill in the user initiated abort */
2295 struct sctp_paramhdr *ph;
2296 op_err->m_len =
2297 sizeof(struct sctp_paramhdr);
2298 ph = mtod(op_err,
2299 struct sctp_paramhdr *);
2300 ph->param_type = htons(
2301 SCTP_CAUSE_USER_INITIATED_ABT);
2302 ph->param_length = htons(op_err->m_len);
2304 SCTP_TCB_LOCK(asoc);
2305 sctp_send_abort_tcb(asoc, op_err);
2307 SCTP_INP_WUNLOCK(inp);
2308 sctp_free_assoc(inp, asoc);
2309 SCTP_INP_WLOCK(inp);
2310 continue;
2311 } else if (TAILQ_EMPTY(&asoc->asoc.send_queue) &&
2312 TAILQ_EMPTY(&asoc->asoc.sent_queue)) {
2313 if ((SCTP_GET_STATE(&asoc->asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
2314 (SCTP_GET_STATE(&asoc->asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
2315 /* there is nothing queued to send, so I send shutdown */
2316 SCTP_TCB_LOCK(asoc);
2317 sctp_send_shutdown(asoc, asoc->asoc.primary_destination);
2318 asoc->asoc.state = SCTP_STATE_SHUTDOWN_SENT;
2319 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, asoc->sctp_ep, asoc,
2320 asoc->asoc.primary_destination);
2321 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, asoc->sctp_ep, asoc,
2322 asoc->asoc.primary_destination);
2323 sctp_chunk_output(inp, asoc, 1);
2324 SCTP_TCB_UNLOCK(asoc);
2326 } else {
2327 /* mark into shutdown pending */
2328 asoc->asoc.state |= SCTP_STATE_SHUTDOWN_PENDING;
2330 cnt_in_sd++;
2332 /* now is there some left in our SHUTDOWN state? */
2333 if (cnt_in_sd) {
2334 inp->sctp_flags |= SCTP_PCB_FLAGS_SOCKET_GONE;
2335 crit_exit();
2336 SCTP_INP_WUNLOCK(inp);
2337 SCTP_ASOC_CREATE_UNLOCK(inp);
2338 return;
2341 #if defined(__FreeBSD__) && __FreeBSD_version >= 503000
2342 if (inp->refcount) {
2343 sctp_timer_start(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL);
2344 SCTP_INP_WUNLOCK(inp);
2345 SCTP_ASOC_CREATE_UNLOCK(inp);
2346 return;
2348 #endif
2349 inp->sctp_flags |= SCTP_PCB_FLAGS_SOCKET_ALLGONE;
2350 #if !defined(__FreeBSD__) || __FreeBSD_version < 500000
2351 rt = ip_pcb->inp_route.ro_rt;
2352 #endif
2354 callout_stop(&inp->sctp_ep.signature_change.timer);
2356 if (so) {
2357 /* First take care of socket level things */
2358 #ifdef IPSEC
2359 #ifdef __OpenBSD__
2360 /* XXX IPsec cleanup here */
2361 crit_enter();
2362 if (ip_pcb->inp_tdb_in)
2363 TAILQ_REMOVE(&ip_pcb->inp_tdb_in->tdb_inp_in,
2364 ip_pcb, inp_tdb_in_next);
2365 if (ip_pcb->inp_tdb_out)
2366 TAILQ_REMOVE(&ip_pcb->inp_tdb_out->tdb_inp_out, ip_pcb,
2367 inp_tdb_out_next);
2368 if (ip_pcb->inp_ipsec_localid)
2369 ipsp_reffree(ip_pcb->inp_ipsec_localid);
2370 if (ip_pcb->inp_ipsec_remoteid)
2371 ipsp_reffree(ip_pcb->inp_ipsec_remoteid);
2372 if (ip_pcb->inp_ipsec_localcred)
2373 ipsp_reffree(ip_pcb->inp_ipsec_localcred);
2374 if (ip_pcb->inp_ipsec_remotecred)
2375 ipsp_reffree(ip_pcb->inp_ipsec_remotecred);
2376 if (ip_pcb->inp_ipsec_localauth)
2377 ipsp_reffree(ip_pcb->inp_ipsec_localauth);
2378 if (ip_pcb->inp_ipsec_remoteauth)
2379 ipsp_reffree(ip_pcb->inp_ipsec_remoteauth);
2380 crit_exit();
2381 #else
2382 ipsec4_delete_pcbpolicy(ip_pcb);
2383 #endif
2384 #endif /*IPSEC*/
2385 #if defined(__FreeBSD__) && __FreeBSD_version > 500000
2386 ACCEPT_LOCK();
2387 SOCK_LOCK(so);
2388 #endif
2389 so->so_pcb = 0;
2390 #if defined(__FreeBSD__) && __FreeBSD_version > 500000
2391 sotryfree(so);
2392 #else
2393 sofree(so);
2394 #endif
2397 if (ip_pcb->inp_options) {
2398 m_free(ip_pcb->inp_options);
2399 ip_pcb->inp_options = 0;
2401 #if !defined(__FreeBSD__) || __FreeBSD_version < 500000
2402 if (rt) {
2403 RTFREE(rt);
2404 ip_pcb->inp_route.ro_rt = 0;
2406 #endif
2407 if (ip_pcb->inp_moptions) {
2408 ip_freemoptions(ip_pcb->inp_moptions);
2409 ip_pcb->inp_moptions = 0;
2411 #if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__))
2412 inp->inp_vflag = 0;
2413 #else
2414 ip_pcb->inp_vflag = 0;
2415 #endif
2417 /* Now the sctp_pcb things */
2420 * free each asoc if it is not already closed/free. we can't use
2421 * the macro here since le_next will get freed as part of the
2422 * sctp_free_assoc() call.
2424 cnt = 0;
2425 for ((asoc = LIST_FIRST(&inp->sctp_asoc_list)); asoc != NULL;
2426 asoc = nasoc) {
2427 nasoc = LIST_NEXT(asoc, sctp_tcblist);
2428 SCTP_TCB_LOCK(asoc);
2429 if (SCTP_GET_STATE(&asoc->asoc) != SCTP_STATE_COOKIE_WAIT) {
2430 struct mbuf *op_err;
2431 MGET(op_err, MB_DONTWAIT, MT_DATA);
2432 if (op_err) {
2433 /* Fill in the user initiated abort */
2434 struct sctp_paramhdr *ph;
2435 op_err->m_len = sizeof(struct sctp_paramhdr);
2436 ph = mtod(op_err, struct sctp_paramhdr *);
2437 ph->param_type = htons(
2438 SCTP_CAUSE_USER_INITIATED_ABT);
2439 ph->param_length = htons(op_err->m_len);
2441 sctp_send_abort_tcb(asoc, op_err);
2443 cnt++;
2445 * sctp_free_assoc() will call sctp_inpcb_free(),
2446 * if SCTP_PCB_FLAGS_SOCKET_GONE set.
2447 * So, we clear it before sctp_free_assoc() making sure
2448 * no double sctp_inpcb_free().
2450 inp->sctp_flags &= ~SCTP_PCB_FLAGS_SOCKET_GONE;
2451 SCTP_INP_WUNLOCK(inp);
2452 sctp_free_assoc(inp, asoc);
2453 SCTP_INP_WLOCK(inp);
2455 while ((sq = TAILQ_FIRST(&inp->sctp_queue_list)) != NULL) {
2456 TAILQ_REMOVE(&inp->sctp_queue_list, sq, next_sq);
2457 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_sockq, sq);
2458 sctppcbinfo.ipi_count_sockq--;
2459 sctppcbinfo.ipi_gencnt_sockq++;
2461 inp->sctp_socket = 0;
2462 /* Now first we remove ourselves from the overall list of all EP's */
2464 /* Unlock inp first, need correct order */
2465 SCTP_INP_WUNLOCK(inp);
2466 /* now iterator lock */
2467 SCTP_ITERATOR_LOCK();
2468 /* now info lock */
2469 SCTP_INP_INFO_WLOCK();
2470 /* now reget the inp lock */
2471 SCTP_INP_WLOCK(inp);
2473 inp_save = LIST_NEXT(inp, sctp_list);
2474 LIST_REMOVE(inp, sctp_list);
2476 * Now the question comes as to if this EP was ever bound at all.
2477 * If it was, then we must pull it out of the EP hash list.
2479 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) !=
2480 SCTP_PCB_FLAGS_UNBOUND) {
2482 * ok, this guy has been bound. It's port is somewhere
2483 * in the sctppcbinfo hash table. Remove it!
2485 LIST_REMOVE(inp, sctp_hash);
2487 /* fix any iterators only after out of the list */
2488 sctp_iterator_inp_being_freed(inp, inp_save);
2489 SCTP_ITERATOR_UNLOCK();
2491 * if we have an address list the following will free the list of
2492 * ifaddr's that are set into this ep. Again macro limitations here,
2493 * since the LIST_FOREACH could be a bad idea.
2495 for ((laddr = LIST_FIRST(&inp->sctp_addr_list)); laddr != NULL;
2496 laddr = nladdr) {
2497 nladdr = LIST_NEXT(laddr, sctp_nxt_addr);
2498 LIST_REMOVE(laddr, sctp_nxt_addr);
2499 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_laddr, laddr);
2500 sctppcbinfo.ipi_gencnt_laddr++;
2501 sctppcbinfo.ipi_count_laddr--;
2503 /* Now lets see about freeing the EP hash table. */
2504 if (inp->sctp_tcbhash != NULL) {
2505 FREE(inp->sctp_tcbhash, M_PCB);
2506 inp->sctp_tcbhash = 0;
2508 SCTP_INP_WUNLOCK(inp);
2509 SCTP_ASOC_CREATE_UNLOCK(inp);
2510 SCTP_INP_LOCK_DESTROY(inp);
2511 SCTP_ASOC_CREATE_LOCK_DESTROY(inp);
2513 /* Now we must put the ep memory back into the zone pool */
2514 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_ep, inp);
2515 sctppcbinfo.ipi_count_ep--;
2517 SCTP_INP_INFO_WUNLOCK();
2518 crit_exit();
2522 struct sctp_nets *
2523 sctp_findnet(struct sctp_tcb *stcb, struct sockaddr *addr)
2525 struct sctp_nets *net;
2526 struct sockaddr_in *sin;
2527 struct sockaddr_in6 *sin6;
2528 /* use the peer's/remote port for lookup if unspecified */
2529 sin = (struct sockaddr_in *)addr;
2530 sin6 = (struct sockaddr_in6 *)addr;
2531 #if 0 /* why do we need to check the port for a nets list on an assoc? */
2532 if (stcb->rport != sin->sin_port) {
2533 /* we cheat and just a sin for this test */
2534 return (NULL);
2536 #endif
2537 /* locate the address */
2538 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
2539 if (sctp_cmpaddr(addr, (struct sockaddr *)&net->ro._l_addr))
2540 return (net);
2542 return (NULL);
2547 * add's a remote endpoint address, done with the INIT/INIT-ACK
2548 * as well as when a ASCONF arrives that adds it. It will also
2549 * initialize all the cwnd stats of stuff.
2552 sctp_is_address_on_local_host(struct sockaddr *addr)
2554 struct ifnet *ifn;
2556 TAILQ_FOREACH(ifn, &ifnet, if_list) {
2557 struct ifaddr_container *ifac;
2559 TAILQ_FOREACH(ifac, &ifn->if_addrheads[mycpuid], ifa_link) {
2560 struct ifaddr *ifa = ifac->ifa;
2562 if (addr->sa_family == ifa->ifa_addr->sa_family) {
2563 /* same family */
2564 if (addr->sa_family == AF_INET) {
2565 struct sockaddr_in *sin, *sin_c;
2566 sin = (struct sockaddr_in *)addr;
2567 sin_c = (struct sockaddr_in *)
2568 ifa->ifa_addr;
2569 if (sin->sin_addr.s_addr ==
2570 sin_c->sin_addr.s_addr) {
2571 /* we are on the same machine */
2572 return (1);
2574 } else if (addr->sa_family == AF_INET6) {
2575 struct sockaddr_in6 *sin6, *sin_c6;
2576 sin6 = (struct sockaddr_in6 *)addr;
2577 sin_c6 = (struct sockaddr_in6 *)
2578 ifa->ifa_addr;
2579 if (SCTP6_ARE_ADDR_EQUAL(&sin6->sin6_addr,
2580 &sin_c6->sin6_addr)) {
2581 /* we are on the same machine */
2582 return (1);
2588 return (0);
2592 sctp_add_remote_addr(struct sctp_tcb *stcb, struct sockaddr *newaddr,
2593 int set_scope, int from)
2596 * The following is redundant to the same lines in the
2597 * sctp_aloc_assoc() but is needed since other's call the add
2598 * address function
2600 struct sctp_nets *net, *netfirst;
2601 int addr_inscope;
2603 #ifdef SCTP_DEBUG
2604 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
2605 kprintf("Adding an address (from:%d) to the peer: ", from);
2606 sctp_print_address(newaddr);
2608 #endif
2609 netfirst = sctp_findnet(stcb, newaddr);
2610 if (netfirst) {
2612 * Lie and return ok, we don't want to make the association
2613 * go away for this behavior. It will happen in the TCP model
2614 * in a connected socket. It does not reach the hash table
2615 * until after the association is built so it can't be found.
2616 * Mark as reachable, since the initial creation will have
2617 * been cleared and the NOT_IN_ASSOC flag will have been
2618 * added... and we don't want to end up removing it back out.
2620 if (netfirst->dest_state & SCTP_ADDR_UNCONFIRMED) {
2621 netfirst->dest_state = (SCTP_ADDR_REACHABLE|
2622 SCTP_ADDR_UNCONFIRMED);
2623 } else {
2624 netfirst->dest_state = SCTP_ADDR_REACHABLE;
2627 return (0);
2629 addr_inscope = 1;
2630 if (newaddr->sa_family == AF_INET) {
2631 struct sockaddr_in *sin;
2632 sin = (struct sockaddr_in *)newaddr;
2633 if (sin->sin_addr.s_addr == 0) {
2634 /* Invalid address */
2635 return (-1);
2637 /* zero out the bzero area */
2638 memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
2640 /* assure len is set */
2641 sin->sin_len = sizeof(struct sockaddr_in);
2642 if (set_scope) {
2643 #ifdef SCTP_DONT_DO_PRIVADDR_SCOPE
2644 stcb->ipv4_local_scope = 1;
2645 #else
2646 if (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr)) {
2647 stcb->asoc.ipv4_local_scope = 1;
2649 #endif /* SCTP_DONT_DO_PRIVADDR_SCOPE */
2651 if (sctp_is_address_on_local_host(newaddr)) {
2652 stcb->asoc.loopback_scope = 1;
2653 stcb->asoc.ipv4_local_scope = 1;
2654 stcb->asoc.local_scope = 1;
2655 stcb->asoc.site_scope = 1;
2657 } else {
2658 if (from == 8) {
2659 /* From connectx */
2660 if (sctp_is_address_on_local_host(newaddr)) {
2661 stcb->asoc.loopback_scope = 1;
2662 stcb->asoc.ipv4_local_scope = 1;
2663 stcb->asoc.local_scope = 1;
2664 stcb->asoc.site_scope = 1;
2667 /* Validate the address is in scope */
2668 if ((IN4_ISPRIVATE_ADDRESS(&sin->sin_addr)) &&
2669 (stcb->asoc.ipv4_local_scope == 0)) {
2670 addr_inscope = 0;
2673 } else if (newaddr->sa_family == AF_INET6) {
2674 struct sockaddr_in6 *sin6;
2675 sin6 = (struct sockaddr_in6 *)newaddr;
2676 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
2677 /* Invalid address */
2678 return (-1);
2680 /* assure len is set */
2681 sin6->sin6_len = sizeof(struct sockaddr_in6);
2682 if (set_scope) {
2683 if (sctp_is_address_on_local_host(newaddr)) {
2684 stcb->asoc.loopback_scope = 1;
2685 stcb->asoc.local_scope = 1;
2686 stcb->asoc.ipv4_local_scope = 1;
2687 stcb->asoc.site_scope = 1;
2688 } else if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
2690 * If the new destination is a LINK_LOCAL
2691 * we must have common site scope. Don't set
2692 * the local scope since we may not share all
2693 * links, only loopback can do this.
2694 * Links on the local network would also
2695 * be on our private network for v4 too.
2697 stcb->asoc.ipv4_local_scope = 1;
2698 stcb->asoc.site_scope = 1;
2699 } else if (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr)) {
2701 * If the new destination is SITE_LOCAL
2702 * then we must have site scope in common.
2704 stcb->asoc.site_scope = 1;
2706 } else {
2707 if (from == 8) {
2708 /* From connectx */
2709 if (sctp_is_address_on_local_host(newaddr)) {
2710 stcb->asoc.loopback_scope = 1;
2711 stcb->asoc.ipv4_local_scope = 1;
2712 stcb->asoc.local_scope = 1;
2713 stcb->asoc.site_scope = 1;
2716 /* Validate the address is in scope */
2717 if (IN6_IS_ADDR_LOOPBACK(&sin6->sin6_addr) &&
2718 (stcb->asoc.loopback_scope == 0)) {
2719 addr_inscope = 0;
2720 } else if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr) &&
2721 (stcb->asoc.local_scope == 0)) {
2722 addr_inscope = 0;
2723 } else if (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr) &&
2724 (stcb->asoc.site_scope == 0)) {
2725 addr_inscope = 0;
2728 } else {
2729 /* not supported family type */
2730 return (-1);
2732 net = (struct sctp_nets *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_net);
2733 if (net == NULL) {
2734 return (-1);
2736 sctppcbinfo.ipi_count_raddr++;
2737 sctppcbinfo.ipi_gencnt_raddr++;
2738 bzero(net, sizeof(*net));
2739 memcpy(&net->ro._l_addr, newaddr, newaddr->sa_len);
2740 if (newaddr->sa_family == AF_INET) {
2741 ((struct sockaddr_in *)&net->ro._l_addr)->sin_port = stcb->rport;
2742 } else if (newaddr->sa_family == AF_INET6) {
2743 ((struct sockaddr_in6 *)&net->ro._l_addr)->sin6_port = stcb->rport;
2745 net->addr_is_local = sctp_is_address_on_local_host(newaddr);
2746 net->failure_threshold = stcb->asoc.def_net_failure;
2747 if (addr_inscope == 0) {
2748 #ifdef SCTP_DEBUG
2749 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
2750 kprintf("Adding an address which is OUT OF SCOPE\n");
2752 #endif /* SCTP_DEBUG */
2753 net->dest_state = (SCTP_ADDR_REACHABLE |
2754 SCTP_ADDR_OUT_OF_SCOPE);
2755 } else {
2756 if (from == 8)
2757 /* 8 is passed by connect_x */
2758 net->dest_state = SCTP_ADDR_REACHABLE;
2759 else
2760 net->dest_state = SCTP_ADDR_REACHABLE |
2761 SCTP_ADDR_UNCONFIRMED;
2763 net->RTO = stcb->asoc.initial_rto;
2764 stcb->asoc.numnets++;
2765 net->ref_count = 1;
2767 /* Init the timer structure */
2768 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000
2769 callout_init(&net->rxt_timer.timer, 0);
2770 callout_init(&net->pmtu_timer.timer, 0);
2771 #else
2772 callout_init(&net->rxt_timer.timer);
2773 callout_init(&net->pmtu_timer.timer);
2774 #endif
2776 /* Now generate a route for this guy */
2777 /* KAME hack: embed scopeid */
2778 if (newaddr->sa_family == AF_INET6) {
2779 struct sockaddr_in6 *sin6;
2780 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
2781 #if defined(SCTP_BASE_FREEBSD) || defined(__APPLE__) || defined(__DragonFly__)
2782 in6_embedscope(&sin6->sin6_addr, sin6,
2783 &stcb->sctp_ep->ip_inp.inp, NULL);
2784 #else
2785 in6_embedscope(&sin6->sin6_addr, sin6);
2786 #endif
2787 #ifndef SCOPEDROUTING
2788 sin6->sin6_scope_id = 0;
2789 #endif
2791 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
2792 rtalloc_ign((struct route *)&net->ro, 0UL);
2793 #else
2794 rtalloc((struct route *)&net->ro);
2795 #endif
2796 if (newaddr->sa_family == AF_INET6) {
2797 struct sockaddr_in6 *sin6;
2798 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
2799 in6_recoverscope(sin6, &sin6->sin6_addr, NULL);
2801 if ((net->ro.ro_rt) &&
2802 (net->ro.ro_rt->rt_ifp)) {
2803 net->mtu = net->ro.ro_rt->rt_ifp->if_mtu;
2804 if (from == 1) {
2805 stcb->asoc.smallest_mtu = net->mtu;
2807 /* start things off to match mtu of interface please. */
2808 net->ro.ro_rt->rt_rmx.rmx_mtu = net->ro.ro_rt->rt_ifp->if_mtu;
2809 } else {
2810 net->mtu = stcb->asoc.smallest_mtu;
2812 if (stcb->asoc.smallest_mtu > net->mtu) {
2813 stcb->asoc.smallest_mtu = net->mtu;
2815 /* We take the max of the burst limit times a MTU or the INITIAL_CWND.
2816 * We then limit this to 4 MTU's of sending.
2818 net->cwnd = min((net->mtu * 4), max((stcb->asoc.max_burst * net->mtu), SCTP_INITIAL_CWND));
2820 /* we always get at LEAST 2 MTU's */
2821 if (net->cwnd < (2 * net->mtu)) {
2822 net->cwnd = 2 * net->mtu;
2825 net->ssthresh = stcb->asoc.peers_rwnd;
2827 net->src_addr_selected = 0;
2828 netfirst = TAILQ_FIRST(&stcb->asoc.nets);
2829 if (net->ro.ro_rt == NULL) {
2830 /* Since we have no route put it at the back */
2831 TAILQ_INSERT_TAIL(&stcb->asoc.nets, net, sctp_next);
2832 } else if (netfirst == NULL) {
2833 /* We are the first one in the pool. */
2834 TAILQ_INSERT_HEAD(&stcb->asoc.nets, net, sctp_next);
2835 } else if (netfirst->ro.ro_rt == NULL) {
2837 * First one has NO route. Place this one ahead of the
2838 * first one.
2840 TAILQ_INSERT_HEAD(&stcb->asoc.nets, net, sctp_next);
2841 } else if (net->ro.ro_rt->rt_ifp != netfirst->ro.ro_rt->rt_ifp) {
2843 * This one has a different interface than the one at the
2844 * top of the list. Place it ahead.
2846 TAILQ_INSERT_HEAD(&stcb->asoc.nets, net, sctp_next);
2847 } else {
2849 * Ok we have the same interface as the first one. Move
2850 * forward until we find either
2851 * a) one with a NULL route... insert ahead of that
2852 * b) one with a different ifp.. insert after that.
2853 * c) end of the list.. insert at the tail.
2855 struct sctp_nets *netlook;
2856 do {
2857 netlook = TAILQ_NEXT(netfirst, sctp_next);
2858 if (netlook == NULL) {
2859 /* End of the list */
2860 TAILQ_INSERT_TAIL(&stcb->asoc.nets, net,
2861 sctp_next);
2862 break;
2863 } else if (netlook->ro.ro_rt == NULL) {
2864 /* next one has NO route */
2865 TAILQ_INSERT_BEFORE(netfirst, net, sctp_next);
2866 break;
2867 } else if (netlook->ro.ro_rt->rt_ifp !=
2868 net->ro.ro_rt->rt_ifp) {
2869 TAILQ_INSERT_AFTER(&stcb->asoc.nets, netlook,
2870 net, sctp_next);
2871 break;
2873 /* Shift forward */
2874 netfirst = netlook;
2875 } while (netlook != NULL);
2877 /* got to have a primary set */
2878 if (stcb->asoc.primary_destination == 0) {
2879 stcb->asoc.primary_destination = net;
2880 } else if ((stcb->asoc.primary_destination->ro.ro_rt == NULL) &&
2881 (net->ro.ro_rt)) {
2882 /* No route to current primary adopt new primary */
2883 stcb->asoc.primary_destination = net;
2885 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, stcb->sctp_ep, stcb,
2886 net);
2888 return (0);
2893 * allocate an association and add it to the endpoint. The caller must
2894 * be careful to add all additional addresses once they are know right
2895 * away or else the assoc will be may experience a blackout scenario.
2897 struct sctp_tcb *
2898 sctp_aloc_assoc(struct sctp_inpcb *inp, struct sockaddr *firstaddr,
2899 int for_a_init, int *error, uint32_t override_tag)
2901 struct sctp_tcb *stcb;
2902 struct sctp_association *asoc;
2903 struct sctpasochead *head;
2904 uint16_t rport;
2905 int err;
2908 * Assumption made here:
2909 * Caller has done a sctp_findassociation_ep_addr(ep, addr's);
2910 * to make sure the address does not exist already.
2912 if (sctppcbinfo.ipi_count_asoc >= SCTP_MAX_NUM_OF_ASOC) {
2913 /* Hit max assoc, sorry no more */
2914 *error = ENOBUFS;
2915 return (NULL);
2917 SCTP_INP_RLOCK(inp);
2918 if (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) {
2920 * If its in the TCP pool, its NOT allowed to create an
2921 * association. The parent listener needs to call
2922 * sctp_aloc_assoc.. or the one-2-many socket. If a
2923 * peeled off, or connected one does this.. its an error.
2925 SCTP_INP_RUNLOCK(inp);
2926 *error = EINVAL;
2927 return (NULL);
2930 #ifdef SCTP_DEBUG
2931 if (sctp_debug_on & SCTP_DEBUG_PCB3) {
2932 kprintf("Allocate an association for peer:");
2933 if (firstaddr)
2934 sctp_print_address(firstaddr);
2935 else
2936 kprintf("None\n");
2937 kprintf("Port:%d\n",
2938 ntohs(((struct sockaddr_in *)firstaddr)->sin_port));
2940 #endif /* SCTP_DEBUG */
2941 if (firstaddr->sa_family == AF_INET) {
2942 struct sockaddr_in *sin;
2943 sin = (struct sockaddr_in *)firstaddr;
2944 if ((sin->sin_port == 0) || (sin->sin_addr.s_addr == 0)) {
2945 /* Invalid address */
2946 #ifdef SCTP_DEBUG
2947 if (sctp_debug_on & SCTP_DEBUG_PCB3) {
2948 kprintf("peer address invalid\n");
2950 #endif
2951 SCTP_INP_RUNLOCK(inp);
2952 *error = EINVAL;
2953 return (NULL);
2955 rport = sin->sin_port;
2956 } else if (firstaddr->sa_family == AF_INET6) {
2957 struct sockaddr_in6 *sin6;
2958 sin6 = (struct sockaddr_in6 *)firstaddr;
2959 if ((sin6->sin6_port == 0) ||
2960 (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr))) {
2961 /* Invalid address */
2962 #ifdef SCTP_DEBUG
2963 if (sctp_debug_on & SCTP_DEBUG_PCB3) {
2964 kprintf("peer address invalid\n");
2966 #endif
2967 SCTP_INP_RUNLOCK(inp);
2968 *error = EINVAL;
2969 return (NULL);
2971 rport = sin6->sin6_port;
2972 } else {
2973 /* not supported family type */
2974 #ifdef SCTP_DEBUG
2975 if (sctp_debug_on & SCTP_DEBUG_PCB3) {
2976 kprintf("BAD family %d\n", firstaddr->sa_family);
2978 #endif
2979 SCTP_INP_RUNLOCK(inp);
2980 *error = EINVAL;
2981 return (NULL);
2983 SCTP_INP_RUNLOCK(inp);
2984 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
2986 * If you have not performed a bind, then we need to do
2987 * the ephemerial bind for you.
2989 #ifdef SCTP_DEBUG
2990 if (sctp_debug_on & SCTP_DEBUG_PCB3) {
2991 kprintf("Doing implicit BIND\n");
2993 #endif
2995 if ((err = sctp_inpcb_bind(inp->sctp_socket,
2996 (struct sockaddr *)NULL,
2997 #if (defined(__FreeBSD__) && __FreeBSD_version >= 500000) || defined(__DragonFly__)
2998 (struct thread *)NULL
2999 #else
3000 (struct proc *)NULL
3001 #endif
3002 ))){
3003 /* bind error, probably perm */
3004 #ifdef SCTP_DEBUG
3005 if (sctp_debug_on & SCTP_DEBUG_PCB3) {
3006 kprintf("BIND FAILS ret:%d\n", err);
3008 #endif
3010 *error = err;
3011 return (NULL);
3014 stcb = (struct sctp_tcb *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_asoc);
3015 if (stcb == NULL) {
3016 /* out of memory? */
3017 #ifdef SCTP_DEBUG
3018 if (sctp_debug_on & SCTP_DEBUG_PCB3) {
3019 kprintf("aloc_assoc: no assoc mem left, stcb=NULL\n");
3021 #endif
3022 *error = ENOMEM;
3023 return (NULL);
3025 sctppcbinfo.ipi_count_asoc++;
3026 sctppcbinfo.ipi_gencnt_asoc++;
3028 bzero(stcb, sizeof(*stcb));
3029 asoc = &stcb->asoc;
3030 SCTP_TCB_LOCK_INIT(stcb);
3031 /* setup back pointer's */
3032 stcb->sctp_ep = inp;
3033 stcb->sctp_socket = inp->sctp_socket;
3034 if ((err = sctp_init_asoc(inp, asoc, for_a_init, override_tag))) {
3035 /* failed */
3036 SCTP_TCB_LOCK_DESTROY (stcb);
3037 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_asoc, stcb);
3038 sctppcbinfo.ipi_count_asoc--;
3039 #ifdef SCTP_DEBUG
3040 if (sctp_debug_on & SCTP_DEBUG_PCB3) {
3041 kprintf("aloc_assoc: couldn't init asoc, out of mem?!\n");
3043 #endif
3044 *error = err;
3045 return (NULL);
3047 /* and the port */
3048 stcb->rport = rport;
3049 SCTP_INP_INFO_WLOCK();
3050 SCTP_INP_WLOCK(inp);
3051 if (inp->sctp_flags & (SCTP_PCB_FLAGS_SOCKET_GONE|SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
3052 /* inpcb freed while alloc going on */
3053 SCTP_TCB_LOCK_DESTROY (stcb);
3054 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_asoc, stcb);
3055 SCTP_INP_WUNLOCK(inp);
3056 SCTP_INP_INFO_WUNLOCK();
3057 sctppcbinfo.ipi_count_asoc--;
3058 #ifdef SCTP_DEBUG
3059 if (sctp_debug_on & SCTP_DEBUG_PCB3) {
3060 kprintf("aloc_assoc: couldn't init asoc, out of mem?!\n");
3062 #endif
3063 *error = EINVAL;
3064 return (NULL);
3066 SCTP_TCB_LOCK(stcb);
3068 /* now that my_vtag is set, add it to the hash */
3069 head = &sctppcbinfo.sctp_asochash[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag,
3070 sctppcbinfo.hashasocmark)];
3071 /* put it in the bucket in the vtag hash of assoc's for the system */
3072 LIST_INSERT_HEAD(head, stcb, sctp_asocs);
3073 SCTP_INP_INFO_WUNLOCK();
3076 if ((err = sctp_add_remote_addr(stcb, firstaddr, 1, 1))) {
3077 /* failure.. memory error? */
3078 if (asoc->strmout)
3079 FREE(asoc->strmout, M_PCB);
3080 if (asoc->mapping_array)
3081 FREE(asoc->mapping_array, M_PCB);
3083 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_asoc, stcb);
3084 sctppcbinfo.ipi_count_asoc--;
3085 #ifdef SCTP_DEBUG
3086 if (sctp_debug_on & SCTP_DEBUG_PCB3) {
3087 kprintf("aloc_assoc: couldn't add remote addr!\n");
3089 #endif
3090 SCTP_TCB_LOCK_DESTROY (stcb);
3091 *error = ENOBUFS;
3092 return (NULL);
3094 /* Init all the timers */
3095 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000
3096 callout_init(&asoc->hb_timer.timer, 0);
3097 callout_init(&asoc->dack_timer.timer, 0);
3098 callout_init(&asoc->asconf_timer.timer, 0);
3099 callout_init(&asoc->shut_guard_timer.timer, 0);
3100 callout_init(&asoc->autoclose_timer.timer, 0);
3101 callout_init(&asoc->delayed_event_timer.timer, 0);
3102 #else
3103 callout_init(&asoc->hb_timer.timer);
3104 callout_init(&asoc->dack_timer.timer);
3105 callout_init(&asoc->asconf_timer.timer);
3106 callout_init(&asoc->shut_guard_timer.timer);
3107 callout_init(&asoc->autoclose_timer.timer);
3108 callout_init(&asoc->delayed_event_timer.timer);
3109 #endif
3110 LIST_INSERT_HEAD(&inp->sctp_asoc_list, stcb, sctp_tcblist);
3111 /* now file the port under the hash as well */
3112 if (inp->sctp_tcbhash != NULL) {
3113 head = &inp->sctp_tcbhash[SCTP_PCBHASH_ALLADDR(stcb->rport,
3114 inp->sctp_hashmark)];
3115 LIST_INSERT_HEAD(head, stcb, sctp_tcbhash);
3117 SCTP_INP_WUNLOCK(inp);
3118 #ifdef SCTP_DEBUG
3119 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
3120 kprintf("Association %p now allocated\n", stcb);
3122 #endif
3123 return (stcb);
3126 void
3127 sctp_free_remote_addr(struct sctp_nets *net)
3129 if (net == NULL)
3130 return;
3131 net->ref_count--;
3132 if (net->ref_count <= 0) {
3133 /* stop timer if running */
3134 callout_stop(&net->rxt_timer.timer);
3135 callout_stop(&net->pmtu_timer.timer);
3136 net->dest_state = SCTP_ADDR_NOT_REACHABLE;
3137 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_net, net);
3138 sctppcbinfo.ipi_count_raddr--;
3143 * remove a remote endpoint address from an association, it
3144 * will fail if the address does not exist.
3147 sctp_del_remote_addr(struct sctp_tcb *stcb, struct sockaddr *remaddr)
3150 * Here we need to remove a remote address. This is quite simple, we
3151 * first find it in the list of address for the association
3152 * (tasoc->asoc.nets) and then if it is there, we do a LIST_REMOVE on
3153 * that item.
3154 * Note we do not allow it to be removed if there are no other
3155 * addresses.
3157 struct sctp_association *asoc;
3158 struct sctp_nets *net, *net_tmp;
3159 asoc = &stcb->asoc;
3160 if (asoc->numnets < 2) {
3161 /* Must have at LEAST two remote addresses */
3162 return (-1);
3164 /* locate the address */
3165 for (net = TAILQ_FIRST(&asoc->nets); net != NULL; net = net_tmp) {
3166 net_tmp = TAILQ_NEXT(net, sctp_next);
3167 if (net->ro._l_addr.sa.sa_family != remaddr->sa_family) {
3168 continue;
3170 if (sctp_cmpaddr((struct sockaddr *)&net->ro._l_addr,
3171 remaddr)) {
3172 /* we found the guy */
3173 asoc->numnets--;
3174 TAILQ_REMOVE(&asoc->nets, net, sctp_next);
3175 sctp_free_remote_addr(net);
3176 if (net == asoc->primary_destination) {
3177 /* Reset primary */
3178 struct sctp_nets *lnet;
3179 lnet = TAILQ_FIRST(&asoc->nets);
3180 /* Try to find a confirmed primary */
3181 asoc->primary_destination =
3182 sctp_find_alternate_net(stcb, lnet);
3184 if (net == asoc->last_data_chunk_from) {
3185 /* Reset primary */
3186 asoc->last_data_chunk_from =
3187 TAILQ_FIRST(&asoc->nets);
3189 if (net == asoc->last_control_chunk_from) {
3190 /* Reset primary */
3191 asoc->last_control_chunk_from =
3192 TAILQ_FIRST(&asoc->nets);
3194 if (net == asoc->asconf_last_sent_to) {
3195 /* Reset primary */
3196 asoc->asconf_last_sent_to =
3197 TAILQ_FIRST(&asoc->nets);
3199 return (0);
3202 /* not found. */
3203 return (-2);
3207 static void
3208 sctp_add_vtag_to_timewait(struct sctp_inpcb *inp, u_int32_t tag)
3210 struct sctpvtaghead *chain;
3211 struct sctp_tagblock *twait_block;
3212 struct timeval now;
3213 int set, i;
3214 SCTP_GETTIME_TIMEVAL(&now);
3215 chain = &sctppcbinfo.vtag_timewait[(tag % SCTP_STACK_VTAG_HASH_SIZE)];
3216 set = 0;
3217 if (!LIST_EMPTY(chain)) {
3218 /* Block(s) present, lets find space, and expire on the fly */
3219 LIST_FOREACH(twait_block, chain, sctp_nxt_tagblock) {
3220 for (i = 0; i < SCTP_NUMBER_IN_VTAG_BLOCK; i++) {
3221 if ((twait_block->vtag_block[i].v_tag == 0) &&
3222 !set) {
3223 twait_block->vtag_block[0].tv_sec_at_expire =
3224 now.tv_sec + SCTP_TIME_WAIT;
3225 twait_block->vtag_block[0].v_tag = tag;
3226 set = 1;
3227 } else if ((twait_block->vtag_block[i].v_tag) &&
3228 ((long)twait_block->vtag_block[i].tv_sec_at_expire >
3229 now.tv_sec)) {
3230 /* Audit expires this guy */
3231 twait_block->vtag_block[i].tv_sec_at_expire = 0;
3232 twait_block->vtag_block[i].v_tag = 0;
3233 if (set == 0) {
3234 /* Reuse it for my new tag */
3235 twait_block->vtag_block[0].tv_sec_at_expire = now.tv_sec + SCTP_TIME_WAIT;
3236 twait_block->vtag_block[0].v_tag = tag;
3237 set = 1;
3241 if (set) {
3243 * We only do up to the block where we can
3244 * place our tag for audits
3246 break;
3250 /* Need to add a new block to chain */
3251 if (!set) {
3252 MALLOC(twait_block, struct sctp_tagblock *,
3253 sizeof(struct sctp_tagblock), M_PCB, M_NOWAIT);
3254 if (twait_block == NULL) {
3255 return;
3257 memset(twait_block, 0, sizeof(struct sctp_timewait));
3258 LIST_INSERT_HEAD(chain, twait_block, sctp_nxt_tagblock);
3259 twait_block->vtag_block[0].tv_sec_at_expire = now.tv_sec +
3260 SCTP_TIME_WAIT;
3261 twait_block->vtag_block[0].v_tag = tag;
3266 static void
3267 sctp_iterator_asoc_being_freed(struct sctp_inpcb *inp, struct sctp_tcb *stcb)
3269 struct sctp_iterator *it;
3273 /* Unlock the tcb lock we do this so
3274 * we avoid a dead lock scenario where
3275 * the iterator is waiting on the TCB lock
3276 * and the TCB lock is waiting on the iterator
3277 * lock.
3279 SCTP_ITERATOR_LOCK();
3280 SCTP_INP_INFO_WLOCK();
3281 SCTP_INP_WLOCK(inp);
3282 SCTP_TCB_LOCK(stcb);
3284 it = stcb->asoc.stcb_starting_point_for_iterator;
3285 if (it == NULL) {
3286 return;
3288 if (it->inp != stcb->sctp_ep) {
3289 /* hm, focused on the wrong one? */
3290 return;
3292 if (it->stcb != stcb) {
3293 return;
3295 it->stcb = LIST_NEXT(stcb, sctp_tcblist);
3296 if (it->stcb == NULL) {
3297 /* done with all asoc's in this assoc */
3298 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
3299 it->inp = NULL;
3300 } else {
3302 it->inp = LIST_NEXT(inp, sctp_list);
3308 * Free the association after un-hashing the remote port.
3310 void
3311 sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb)
3313 struct sctp_association *asoc;
3314 struct sctp_nets *net, *prev;
3315 struct sctp_laddr *laddr;
3316 struct sctp_tmit_chunk *chk;
3317 struct sctp_asconf_addr *aparam;
3318 struct sctp_socket_q_list *sq;
3320 /* first, lets purge the entry from the hash table. */
3321 crit_enter();
3322 if (stcb->asoc.state == 0) {
3323 kprintf("Freeing already free association:%p - huh??\n",
3324 stcb);
3325 crit_exit();
3326 return;
3328 asoc = &stcb->asoc;
3329 asoc->state = 0;
3330 /* now clean up any other timers */
3331 callout_stop(&asoc->hb_timer.timer);
3332 callout_stop(&asoc->dack_timer.timer);
3333 callout_stop(&asoc->asconf_timer.timer);
3334 callout_stop(&asoc->shut_guard_timer.timer);
3335 callout_stop(&asoc->autoclose_timer.timer);
3336 callout_stop(&asoc->delayed_event_timer.timer);
3337 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3338 callout_stop(&net->rxt_timer.timer);
3339 callout_stop(&net->pmtu_timer.timer);
3342 /* Iterator asoc being freed we send an
3343 * unlocked TCB. It returns with INP_INFO
3344 * and INP write locked and the TCB locked
3345 * too and of course the iterator lock
3346 * in place as well..
3348 SCTP_TCB_UNLOCK(stcb);
3349 sctp_iterator_asoc_being_freed(inp, stcb);
3351 /* Null all of my entry's on the socket q */
3352 TAILQ_FOREACH(sq, &inp->sctp_queue_list, next_sq) {
3353 if (sq->tcb == stcb) {
3354 sq->tcb = NULL;
3358 if (inp->sctp_tcb_at_block == (void *)stcb) {
3359 inp->error_on_block = ECONNRESET;
3362 if (inp->sctp_tcbhash) {
3363 LIST_REMOVE(stcb, sctp_tcbhash);
3365 /* Now lets remove it from the list of ALL associations in the EP */
3366 LIST_REMOVE(stcb, sctp_tcblist);
3367 SCTP_INP_WUNLOCK(inp);
3368 SCTP_ITERATOR_UNLOCK();
3371 /* pull from vtag hash */
3372 LIST_REMOVE(stcb, sctp_asocs);
3375 * Now before we can free the assoc, we must remove all of the
3376 * networks and any other allocated space.. i.e. add removes here
3377 * before the SCTP_ZONE_FREE() of the tasoc entry.
3380 sctp_add_vtag_to_timewait(inp, asoc->my_vtag);
3381 SCTP_INP_INFO_WUNLOCK();
3382 prev = NULL;
3383 while (!TAILQ_EMPTY(&asoc->nets)) {
3384 net = TAILQ_FIRST(&asoc->nets);
3385 /* pull from list */
3386 if ((sctppcbinfo.ipi_count_raddr == 0) || (prev == net)) {
3387 break;
3389 prev = net;
3390 TAILQ_REMOVE(&asoc->nets, net, sctp_next);
3391 /* free it */
3392 net->ref_count = 0;
3393 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_net, net);
3394 sctppcbinfo.ipi_count_raddr--;
3397 * The chunk lists and such SHOULD be empty but we check them
3398 * just in case.
3400 /* anything on the wheel needs to be removed */
3401 while (!TAILQ_EMPTY(&asoc->out_wheel)) {
3402 struct sctp_stream_out *outs;
3403 outs = TAILQ_FIRST(&asoc->out_wheel);
3404 TAILQ_REMOVE(&asoc->out_wheel, outs, next_spoke);
3405 /* now clean up any chunks here */
3406 chk = TAILQ_FIRST(&outs->outqueue);
3407 while (chk) {
3408 TAILQ_REMOVE(&outs->outqueue, chk, sctp_next);
3409 if (chk->data) {
3410 sctp_m_freem(chk->data);
3411 chk->data = NULL;
3413 chk->whoTo = NULL;
3414 chk->asoc = NULL;
3415 /* Free the chunk */
3416 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
3417 sctppcbinfo.ipi_count_chunk--;
3418 sctppcbinfo.ipi_gencnt_chunk++;
3419 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
3420 panic("Chunk count is negative");
3422 chk = TAILQ_FIRST(&outs->outqueue);
3424 outs = TAILQ_FIRST(&asoc->out_wheel);
3427 if (asoc->pending_reply) {
3428 FREE(asoc->pending_reply, M_PCB);
3429 asoc->pending_reply = NULL;
3431 chk = TAILQ_FIRST(&asoc->pending_reply_queue);
3432 while (chk) {
3433 TAILQ_REMOVE(&asoc->pending_reply_queue, chk, sctp_next);
3434 if (chk->data) {
3435 sctp_m_freem(chk->data);
3436 chk->data = NULL;
3438 chk->whoTo = NULL;
3439 chk->asoc = NULL;
3440 /* Free the chunk */
3441 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
3442 sctppcbinfo.ipi_count_chunk--;
3443 sctppcbinfo.ipi_gencnt_chunk++;
3444 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
3445 panic("Chunk count is negative");
3447 chk = TAILQ_FIRST(&asoc->pending_reply_queue);
3449 /* pending send queue SHOULD be empty */
3450 if (!TAILQ_EMPTY(&asoc->send_queue)) {
3451 chk = TAILQ_FIRST(&asoc->send_queue);
3452 while (chk) {
3453 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3454 if (chk->data) {
3455 sctp_m_freem(chk->data);
3456 chk->data = NULL;
3458 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
3459 sctppcbinfo.ipi_count_chunk--;
3460 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
3461 panic("Chunk count is negative");
3463 sctppcbinfo.ipi_gencnt_chunk++;
3464 chk = TAILQ_FIRST(&asoc->send_queue);
3467 /* sent queue SHOULD be empty */
3468 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3469 chk = TAILQ_FIRST(&asoc->sent_queue);
3470 while (chk) {
3471 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3472 if (chk->data) {
3473 sctp_m_freem(chk->data);
3474 chk->data = NULL;
3476 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
3477 sctppcbinfo.ipi_count_chunk--;
3478 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
3479 panic("Chunk count is negative");
3481 sctppcbinfo.ipi_gencnt_chunk++;
3482 chk = TAILQ_FIRST(&asoc->sent_queue);
3485 /* control queue MAY not be empty */
3486 if (!TAILQ_EMPTY(&asoc->control_send_queue)) {
3487 chk = TAILQ_FIRST(&asoc->control_send_queue);
3488 while (chk) {
3489 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
3490 if (chk->data) {
3491 sctp_m_freem(chk->data);
3492 chk->data = NULL;
3494 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
3495 sctppcbinfo.ipi_count_chunk--;
3496 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
3497 panic("Chunk count is negative");
3499 sctppcbinfo.ipi_gencnt_chunk++;
3500 chk = TAILQ_FIRST(&asoc->control_send_queue);
3503 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
3504 chk = TAILQ_FIRST(&asoc->reasmqueue);
3505 while (chk) {
3506 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
3507 if (chk->data) {
3508 sctp_m_freem(chk->data);
3509 chk->data = NULL;
3511 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
3512 sctppcbinfo.ipi_count_chunk--;
3513 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
3514 panic("Chunk count is negative");
3516 sctppcbinfo.ipi_gencnt_chunk++;
3517 chk = TAILQ_FIRST(&asoc->reasmqueue);
3520 if (!TAILQ_EMPTY(&asoc->delivery_queue)) {
3521 chk = TAILQ_FIRST(&asoc->delivery_queue);
3522 while (chk) {
3523 TAILQ_REMOVE(&asoc->delivery_queue, chk, sctp_next);
3524 if (chk->data) {
3525 sctp_m_freem(chk->data);
3526 chk->data = NULL;
3528 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
3529 sctppcbinfo.ipi_count_chunk--;
3530 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
3531 panic("Chunk count is negative");
3533 sctppcbinfo.ipi_gencnt_chunk++;
3534 chk = TAILQ_FIRST(&asoc->delivery_queue);
3537 if (asoc->mapping_array) {
3538 FREE(asoc->mapping_array, M_PCB);
3539 asoc->mapping_array = NULL;
3542 /* the stream outs */
3543 if (asoc->strmout) {
3544 FREE(asoc->strmout, M_PCB);
3545 asoc->strmout = NULL;
3547 asoc->streamoutcnt = 0;
3548 if (asoc->strmin) {
3549 int i;
3550 for (i = 0; i < asoc->streamincnt; i++) {
3551 if (!TAILQ_EMPTY(&asoc->strmin[i].inqueue)) {
3552 /* We have somethings on the streamin queue */
3553 chk = TAILQ_FIRST(&asoc->strmin[i].inqueue);
3554 while (chk) {
3555 TAILQ_REMOVE(&asoc->strmin[i].inqueue,
3556 chk, sctp_next);
3557 if (chk->data) {
3558 sctp_m_freem(chk->data);
3559 chk->data = NULL;
3561 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk,
3562 chk);
3563 sctppcbinfo.ipi_count_chunk--;
3564 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
3565 panic("Chunk count is negative");
3567 sctppcbinfo.ipi_gencnt_chunk++;
3568 chk = TAILQ_FIRST(&asoc->strmin[i].inqueue);
3572 FREE(asoc->strmin, M_PCB);
3573 asoc->strmin = NULL;
3575 asoc->streamincnt = 0;
3576 /* local addresses, if any */
3577 while (!LIST_EMPTY(&asoc->sctp_local_addr_list)) {
3578 laddr = LIST_FIRST(&asoc->sctp_local_addr_list);
3579 LIST_REMOVE(laddr, sctp_nxt_addr);
3580 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_laddr, laddr);
3581 sctppcbinfo.ipi_count_laddr--;
3583 /* pending asconf (address) parameters */
3584 while (!TAILQ_EMPTY(&asoc->asconf_queue)) {
3585 aparam = TAILQ_FIRST(&asoc->asconf_queue);
3586 TAILQ_REMOVE(&asoc->asconf_queue, aparam, next);
3587 FREE(aparam, M_PCB);
3589 if (asoc->last_asconf_ack_sent != NULL) {
3590 sctp_m_freem(asoc->last_asconf_ack_sent);
3591 asoc->last_asconf_ack_sent = NULL;
3593 /* Insert new items here :> */
3595 /* Get rid of LOCK */
3596 SCTP_TCB_LOCK_DESTROY(stcb);
3598 /* now clean up the tasoc itself */
3599 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_asoc, stcb);
3600 sctppcbinfo.ipi_count_asoc--;
3601 if ((inp->sctp_socket->so_snd.ssb_cc) ||
3602 (inp->sctp_socket->so_snd.ssb_mbcnt)) {
3603 /* This will happen when a abort is done */
3604 inp->sctp_socket->so_snd.ssb_cc = 0;
3605 inp->sctp_socket->so_snd.ssb_mbcnt = 0;
3607 if (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
3608 if ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) {
3609 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
3611 * For the base fd, that is NOT in TCP pool we
3612 * turn off the connected flag. This allows
3613 * non-listening endpoints to connect/shutdown/
3614 * connect.
3616 inp->sctp_flags &= ~SCTP_PCB_FLAGS_CONNECTED;
3617 soisdisconnected(inp->sctp_socket);
3620 * For those that are in the TCP pool we just leave
3621 * so it cannot be used. When they close the fd we
3622 * will free it all.
3626 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3627 sctp_inpcb_free(inp, 0);
3629 crit_exit();
3634 * determine if a destination is "reachable" based upon the addresses
3635 * bound to the current endpoint (e.g. only v4 or v6 currently bound)
3638 * FIX: if we allow assoc-level bindx(), then this needs to be fixed
3639 * to use assoc level v4/v6 flags, as the assoc *may* not have the
3640 * same address types bound as its endpoint
3643 sctp_destination_is_reachable(struct sctp_tcb *stcb, struct sockaddr *destaddr)
3645 struct sctp_inpcb *inp;
3646 int answer;
3648 /* No locks here, the TCB, in all cases is already
3649 * locked and an assoc is up. There is either a
3650 * INP lock by the caller applied (in asconf case when
3651 * deleting an address) or NOT in the HB case, however
3652 * if HB then the INP increment is up and the INP
3653 * will not be removed (on top of the fact that
3654 * we have a TCB lock). So we only want to
3655 * read the sctp_flags, which is either bound-all
3656 * or not.. no protection needed since once an
3657 * assoc is up you can't be changing your binding.
3659 inp = stcb->sctp_ep;
3660 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
3661 /* if bound all, destination is not restricted */
3662 /* RRS: Question during lock work: Is this
3663 * correct? If you are bound-all you still
3664 * might need to obey the V4--V6 flags???
3665 * IMO this bound-all stuff needs to be removed!
3667 return (1);
3669 /* NOTE: all "scope" checks are done when local addresses are added */
3670 if (destaddr->sa_family == AF_INET6) {
3671 #if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__))
3672 answer = inp->inp_vflag & INP_IPV6;
3673 #else
3674 answer = inp->ip_inp.inp.inp_vflag & INP_IPV6;
3675 #endif
3676 } else if (destaddr->sa_family == AF_INET) {
3677 #if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__))
3678 answer = inp->inp_vflag & INP_IPV4;
3679 #else
3680 answer = inp->ip_inp.inp.inp_vflag & INP_IPV4;
3681 #endif
3682 } else {
3683 /* invalid family, so it's unreachable */
3684 answer = 0;
3686 return (answer);
3690 * update the inp_vflags on an endpoint
3692 static void
3693 sctp_update_ep_vflag(struct sctp_inpcb *inp) {
3694 struct sctp_laddr *laddr;
3696 /* first clear the flag */
3697 #if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__))
3698 inp->inp_vflag = 0;
3699 #else
3700 inp->ip_inp.inp.inp_vflag = 0;
3701 #endif
3702 /* set the flag based on addresses on the ep list */
3703 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
3704 if (laddr->ifa == NULL) {
3705 #ifdef SCTP_DEBUG
3706 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
3707 kprintf("An ounce of prevention is worth a pound of cure\n");
3709 #endif /* SCTP_DEBUG */
3710 continue;
3712 if (laddr->ifa->ifa_addr) {
3713 continue;
3715 if (laddr->ifa->ifa_addr->sa_family == AF_INET6) {
3716 #if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__))
3717 inp->inp_vflag |= INP_IPV6;
3718 #else
3719 inp->ip_inp.inp.inp_vflag |= INP_IPV6;
3720 #endif
3721 } else if (laddr->ifa->ifa_addr->sa_family == AF_INET) {
3722 #if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__))
3723 inp->inp_vflag |= INP_IPV4;
3724 #else
3725 inp->ip_inp.inp.inp_vflag |= INP_IPV4;
3726 #endif
3732 * Add the address to the endpoint local address list
3733 * There is nothing to be done if we are bound to all addresses
3736 sctp_add_local_addr_ep(struct sctp_inpcb *inp, struct ifaddr *ifa)
3738 struct sctp_laddr *laddr;
3739 int fnd, error;
3740 fnd = 0;
3742 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
3743 /* You are already bound to all. You have it already */
3744 return (0);
3746 if (ifa->ifa_addr->sa_family == AF_INET6) {
3747 struct in6_ifaddr *ifa6;
3748 ifa6 = (struct in6_ifaddr *)ifa;
3749 if (ifa6->ia6_flags & (IN6_IFF_DETACHED |
3750 IN6_IFF_DEPRECATED | IN6_IFF_ANYCAST | IN6_IFF_NOTREADY))
3751 /* Can't bind a non-existent addr. */
3752 return (-1);
3754 /* first, is it already present? */
3755 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
3756 if (laddr->ifa == ifa) {
3757 fnd = 1;
3758 break;
3762 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) && (fnd == 0)) {
3763 /* Not bound to all */
3764 error = sctp_insert_laddr(&inp->sctp_addr_list, ifa);
3765 if (error != 0)
3766 return (error);
3767 inp->laddr_count++;
3768 /* update inp_vflag flags */
3769 if (ifa->ifa_addr->sa_family == AF_INET6) {
3770 #if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__))
3771 inp->inp_vflag |= INP_IPV6;
3772 #else
3773 inp->ip_inp.inp.inp_vflag |= INP_IPV6;
3774 #endif
3775 } else if (ifa->ifa_addr->sa_family == AF_INET) {
3776 #if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__))
3777 inp->inp_vflag |= INP_IPV4;
3778 #else
3779 inp->ip_inp.inp.inp_vflag |= INP_IPV4;
3780 #endif
3783 return (0);
3788 * select a new (hopefully reachable) destination net
3789 * (should only be used when we deleted an ep addr that is the
3790 * only usable source address to reach the destination net)
3792 static void
3793 sctp_select_primary_destination(struct sctp_tcb *stcb)
3795 struct sctp_nets *net;
3797 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
3798 /* for now, we'll just pick the first reachable one we find */
3799 if (net->dest_state & SCTP_ADDR_UNCONFIRMED)
3800 continue;
3801 if (sctp_destination_is_reachable(stcb,
3802 (struct sockaddr *)&net->ro._l_addr)) {
3803 /* found a reachable destination */
3804 stcb->asoc.primary_destination = net;
3807 /* I can't there from here! ...we're gonna die shortly... */
3812 * Delete the address from the endpoint local address list
3813 * There is nothing to be done if we are bound to all addresses
3816 sctp_del_local_addr_ep(struct sctp_inpcb *inp, struct ifaddr *ifa)
3818 struct sctp_laddr *laddr;
3819 int fnd;
3820 fnd = 0;
3821 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
3822 /* You are already bound to all. You have it already */
3823 return (EINVAL);
3826 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
3827 if (laddr->ifa == ifa) {
3828 fnd = 1;
3829 break;
3832 if (fnd && (inp->laddr_count < 2)) {
3833 /* can't delete unless there are at LEAST 2 addresses */
3834 return (-1);
3836 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) && (fnd)) {
3838 * clean up any use of this address
3839 * go through our associations and clear any
3840 * last_used_address that match this one
3841 * for each assoc, see if a new primary_destination is needed
3843 struct sctp_tcb *stcb;
3845 /* clean up "next_addr_touse" */
3846 if (inp->next_addr_touse == laddr)
3847 /* delete this address */
3848 inp->next_addr_touse = NULL;
3850 /* clean up "last_used_address" */
3851 LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
3852 if (stcb->asoc.last_used_address == laddr)
3853 /* delete this address */
3854 stcb->asoc.last_used_address = NULL;
3855 } /* for each tcb */
3857 /* remove it from the ep list */
3858 sctp_remove_laddr(laddr);
3859 inp->laddr_count--;
3860 /* update inp_vflag flags */
3861 sctp_update_ep_vflag(inp);
3862 /* select a new primary destination if needed */
3863 LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
3864 /* presume caller (sctp_asconf.c) already owns INP lock */
3865 SCTP_TCB_LOCK(stcb);
3866 if (sctp_destination_is_reachable(stcb,
3867 (struct sockaddr *)&stcb->asoc.primary_destination->ro._l_addr) == 0) {
3868 sctp_select_primary_destination(stcb);
3870 SCTP_TCB_UNLOCK(stcb);
3871 } /* for each tcb */
3873 return (0);
3877 * Add the addr to the TCB local address list
3878 * For the BOUNDALL or dynamic case, this is a "pending" address list
3879 * (eg. addresses waiting for an ASCONF-ACK response)
3880 * For the subset binding, static case, this is a "valid" address list
3883 sctp_add_local_addr_assoc(struct sctp_tcb *stcb, struct ifaddr *ifa)
3885 struct sctp_inpcb *inp;
3886 struct sctp_laddr *laddr;
3887 int error;
3889 /* Assumes TCP is locked.. and possiblye
3890 * the INP. May need to confirm/fix that if
3891 * we need it and is not the case.
3893 inp = stcb->sctp_ep;
3894 if (ifa->ifa_addr->sa_family == AF_INET6) {
3895 struct in6_ifaddr *ifa6;
3896 ifa6 = (struct in6_ifaddr *)ifa;
3897 if (ifa6->ia6_flags & (IN6_IFF_DETACHED |
3898 /* IN6_IFF_DEPRECATED | */
3899 IN6_IFF_ANYCAST |
3900 IN6_IFF_NOTREADY))
3901 /* Can't bind a non-existent addr. */
3902 return (-1);
3904 /* does the address already exist? */
3905 LIST_FOREACH(laddr, &stcb->asoc.sctp_local_addr_list, sctp_nxt_addr) {
3906 if (laddr->ifa == ifa) {
3907 return (-1);
3911 /* add to the list */
3912 error = sctp_insert_laddr(&stcb->asoc.sctp_local_addr_list, ifa);
3913 if (error != 0)
3914 return (error);
3915 return (0);
3919 * insert an laddr entry with the given ifa for the desired list
3922 sctp_insert_laddr(struct sctpladdr *list, struct ifaddr *ifa) {
3923 struct sctp_laddr *laddr;
3925 crit_enter();
3926 laddr = (struct sctp_laddr *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_laddr);
3927 if (laddr == NULL) {
3928 /* out of memory? */
3929 crit_exit();
3930 return (EINVAL);
3932 sctppcbinfo.ipi_count_laddr++;
3933 sctppcbinfo.ipi_gencnt_laddr++;
3934 bzero(laddr, sizeof(*laddr));
3935 laddr->ifa = ifa;
3936 /* insert it */
3937 LIST_INSERT_HEAD(list, laddr, sctp_nxt_addr);
3939 crit_exit();
3940 return (0);
3944 * Remove an laddr entry from the local address list (on an assoc)
3946 void
3947 sctp_remove_laddr(struct sctp_laddr *laddr)
3949 crit_enter();
3950 /* remove from the list */
3951 LIST_REMOVE(laddr, sctp_nxt_addr);
3952 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_laddr, laddr);
3953 sctppcbinfo.ipi_count_laddr--;
3954 sctppcbinfo.ipi_gencnt_laddr++;
3955 crit_exit();
3959 * Remove an address from the TCB local address list
3962 sctp_del_local_addr_assoc(struct sctp_tcb *stcb, struct ifaddr *ifa)
3964 struct sctp_inpcb *inp;
3965 struct sctp_laddr *laddr;
3967 /* This is called by asconf work. It is assumed that
3968 * a) The TCB is locked
3969 * and
3970 * b) The INP is locked.
3971 * This is true in as much as I can trace through
3972 * the entry asconf code where I did these locks.
3973 * Again, the ASCONF code is a bit different in
3974 * that it does lock the INP during its work often
3975 * times. This must be since we don't want other
3976 * proc's looking up things while what they are
3977 * looking up is changing :-D
3980 inp = stcb->sctp_ep;
3981 /* if subset bound and don't allow ASCONF's, can't delete last */
3982 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) &&
3983 ((inp->sctp_flags & SCTP_PCB_FLAGS_DO_ASCONF) == 0)) {
3984 if (stcb->asoc.numnets < 2) {
3985 /* can't delete last address */
3986 return (-1);
3990 LIST_FOREACH(laddr, &stcb->asoc.sctp_local_addr_list, sctp_nxt_addr) {
3991 /* remove the address if it exists */
3992 if (laddr->ifa == NULL)
3993 continue;
3994 if (laddr->ifa == ifa) {
3995 sctp_remove_laddr(laddr);
3996 return (0);
4000 /* address not found! */
4001 return (-1);
4005 * Remove an address from the TCB local address list
4006 * lookup using a sockaddr addr
4009 sctp_del_local_addr_assoc_sa(struct sctp_tcb *stcb, struct sockaddr *sa)
4011 struct sctp_inpcb *inp;
4012 struct sctp_laddr *laddr;
4013 struct sockaddr *l_sa;
4016 * This function I find does not seem to have a caller.
4017 * As such we NEED TO DELETE this code. If we do
4018 * find a caller, the caller MUST have locked the TCB
4019 * at the least and probably the INP as well.
4021 inp = stcb->sctp_ep;
4022 /* if subset bound and don't allow ASCONF's, can't delete last */
4023 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) &&
4024 ((inp->sctp_flags & SCTP_PCB_FLAGS_DO_ASCONF) == 0)) {
4025 if (stcb->asoc.numnets < 2) {
4026 /* can't delete last address */
4027 return (-1);
4031 LIST_FOREACH(laddr, &stcb->asoc.sctp_local_addr_list, sctp_nxt_addr) {
4032 /* make sure the address exists */
4033 if (laddr->ifa == NULL)
4034 continue;
4035 if (laddr->ifa->ifa_addr == NULL)
4036 continue;
4038 l_sa = laddr->ifa->ifa_addr;
4039 if (l_sa->sa_family == AF_INET6) {
4040 /* IPv6 address */
4041 struct sockaddr_in6 *sin1, *sin2;
4042 sin1 = (struct sockaddr_in6 *)l_sa;
4043 sin2 = (struct sockaddr_in6 *)sa;
4044 if (memcmp(&sin1->sin6_addr, &sin2->sin6_addr,
4045 sizeof(struct in6_addr)) == 0) {
4046 /* matched */
4047 sctp_remove_laddr(laddr);
4048 return (0);
4050 } else if (l_sa->sa_family == AF_INET) {
4051 /* IPv4 address */
4052 struct sockaddr_in *sin1, *sin2;
4053 sin1 = (struct sockaddr_in *)l_sa;
4054 sin2 = (struct sockaddr_in *)sa;
4055 if (sin1->sin_addr.s_addr == sin2->sin_addr.s_addr) {
4056 /* matched */
4057 sctp_remove_laddr(laddr);
4058 return (0);
4060 } else {
4061 /* invalid family */
4062 return (-1);
4064 } /* end foreach */
4065 /* address not found! */
4066 return (-1);
4069 static char sctp_pcb_initialized = 0;
4071 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
4072 /* sysctl */
4073 static int sctp_max_number_of_assoc = SCTP_MAX_NUM_OF_ASOC;
4074 static int sctp_scale_up_for_address = SCTP_SCALE_FOR_ADDR;
4076 #endif /* FreeBSD || APPLE || DragonFly */
4078 #ifndef SCTP_TCBHASHSIZE
4079 #define SCTP_TCBHASHSIZE 1024
4080 #endif
4082 #ifndef SCTP_CHUNKQUEUE_SCALE
4083 #define SCTP_CHUNKQUEUE_SCALE 10
4084 #endif
4086 void
4087 sctp_pcb_init(void)
4090 * SCTP initialization for the PCB structures
4091 * should be called by the sctp_init() funciton.
4093 int i;
4094 int hashtblsize = SCTP_TCBHASHSIZE;
4096 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
4097 int sctp_chunkscale = SCTP_CHUNKQUEUE_SCALE;
4098 #endif
4100 if (sctp_pcb_initialized != 0) {
4101 /* error I was called twice */
4102 return;
4104 sctp_pcb_initialized = 1;
4106 /* Init all peg counts */
4107 for (i = 0; i < SCTP_NUMBER_OF_PEGS; i++) {
4108 sctp_pegs[i] = 0;
4111 /* init the empty list of (All) Endpoints */
4112 LIST_INIT(&sctppcbinfo.listhead);
4114 /* init the iterator head */
4115 LIST_INIT(&sctppcbinfo.iteratorhead);
4117 /* init the hash table of endpoints */
4118 #if defined(__FreeBSD__)
4119 #if defined(__FreeBSD_cc_version) && __FreeBSD_cc_version >= 440000
4120 TUNABLE_INT_FETCH("net.inet.sctp.tcbhashsize", &hashtblsize);
4121 TUNABLE_INT_FETCH("net.inet.sctp.pcbhashsize", &sctp_pcbtblsize);
4122 TUNABLE_INT_FETCH("net.inet.sctp.chunkscale", &sctp_chunkscale);
4123 #else
4124 TUNABLE_INT_FETCH("net.inet.sctp.tcbhashsize", SCTP_TCBHASHSIZE,
4125 hashtblsize);
4126 TUNABLE_INT_FETCH("net.inet.sctp.pcbhashsize", SCTP_PCBHASHSIZE,
4127 sctp_pcbtblsize);
4128 TUNABLE_INT_FETCH("net.inet.sctp.chunkscale", SCTP_CHUNKQUEUE_SCALE,
4129 sctp_chunkscale);
4130 #endif
4131 #endif
4133 sctppcbinfo.sctp_asochash = hashinit((hashtblsize * 31),
4134 #ifdef __NetBSD__
4135 HASH_LIST,
4136 #endif
4137 M_PCB,
4138 #if defined(__NetBSD__) || defined(__OpenBSD__)
4139 M_WAITOK,
4140 #endif
4141 &sctppcbinfo.hashasocmark);
4143 sctppcbinfo.sctp_ephash = hashinit(hashtblsize,
4144 #ifdef __NetBSD__
4145 HASH_LIST,
4146 #endif
4147 M_PCB,
4148 #if defined(__NetBSD__) || defined(__OpenBSD__)
4149 M_WAITOK,
4150 #endif
4151 &sctppcbinfo.hashmark);
4153 sctppcbinfo.sctp_tcpephash = hashinit(hashtblsize,
4154 #ifdef __NetBSD__
4155 HASH_LIST,
4156 #endif
4157 M_PCB,
4158 #if defined(__NetBSD__) || defined(__OpenBSD__)
4159 M_WAITOK,
4160 #endif
4161 &sctppcbinfo.hashtcpmark);
4163 sctppcbinfo.hashtblsize = hashtblsize;
4165 /* init the zones */
4167 * FIX ME: Should check for NULL returns, but if it does fail we
4168 * are doomed to panic anyways... add later maybe.
4170 SCTP_ZONE_INIT(sctppcbinfo.ipi_zone_ep, "sctp_ep",
4171 sizeof(struct sctp_inpcb), maxsockets);
4173 SCTP_ZONE_INIT(sctppcbinfo.ipi_zone_asoc, "sctp_asoc",
4174 sizeof(struct sctp_tcb), sctp_max_number_of_assoc);
4176 SCTP_ZONE_INIT(sctppcbinfo.ipi_zone_laddr, "sctp_laddr",
4177 sizeof(struct sctp_laddr),
4178 (sctp_max_number_of_assoc * sctp_scale_up_for_address));
4180 SCTP_ZONE_INIT(sctppcbinfo.ipi_zone_net, "sctp_raddr",
4181 sizeof(struct sctp_nets),
4182 (sctp_max_number_of_assoc * sctp_scale_up_for_address));
4184 SCTP_ZONE_INIT(sctppcbinfo.ipi_zone_chunk, "sctp_chunk",
4185 sizeof(struct sctp_tmit_chunk),
4186 (sctp_max_number_of_assoc * sctp_scale_up_for_address *
4187 sctp_chunkscale));
4189 SCTP_ZONE_INIT(sctppcbinfo.ipi_zone_sockq, "sctp_sockq",
4190 sizeof(struct sctp_socket_q_list),
4191 (sctp_max_number_of_assoc * sctp_scale_up_for_address *
4192 sctp_chunkscale));
4194 /* Master Lock INIT for info structure */
4195 SCTP_INP_INFO_LOCK_INIT();
4196 SCTP_ITERATOR_LOCK_INIT();
4197 /* not sure if we need all the counts */
4198 sctppcbinfo.ipi_count_ep = 0;
4199 sctppcbinfo.ipi_gencnt_ep = 0;
4200 /* assoc/tcb zone info */
4201 sctppcbinfo.ipi_count_asoc = 0;
4202 sctppcbinfo.ipi_gencnt_asoc = 0;
4203 /* local addrlist zone info */
4204 sctppcbinfo.ipi_count_laddr = 0;
4205 sctppcbinfo.ipi_gencnt_laddr = 0;
4206 /* remote addrlist zone info */
4207 sctppcbinfo.ipi_count_raddr = 0;
4208 sctppcbinfo.ipi_gencnt_raddr = 0;
4209 /* chunk info */
4210 sctppcbinfo.ipi_count_chunk = 0;
4211 sctppcbinfo.ipi_gencnt_chunk = 0;
4213 /* socket queue zone info */
4214 sctppcbinfo.ipi_count_sockq = 0;
4215 sctppcbinfo.ipi_gencnt_sockq = 0;
4217 /* mbuf tracker */
4218 sctppcbinfo.mbuf_track = 0;
4219 /* port stuff */
4220 #if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__APPLE__) \
4221 || defined(__DragonFly__)
4222 sctppcbinfo.lastlow = ipport_firstauto;
4223 #else
4224 sctppcbinfo.lastlow = anonportmin;
4225 #endif
4226 /* Init the TIMEWAIT list */
4227 for (i = 0; i < SCTP_STACK_VTAG_HASH_SIZE; i++) {
4228 LIST_INIT(&sctppcbinfo.vtag_timewait[i]);
4231 #if defined(_SCTP_NEEDS_CALLOUT_) && !defined(__APPLE__)
4232 TAILQ_INIT(&sctppcbinfo.callqueue);
4233 #endif
4238 sctp_load_addresses_from_init(struct sctp_tcb *stcb, struct mbuf *m,
4239 int iphlen, int offset, int limit, struct sctphdr *sh,
4240 struct sockaddr *altsa)
4243 * grub through the INIT pulling addresses and
4244 * loading them to the nets structure in the asoc.
4245 * The from address in the mbuf should also be loaded
4246 * (if it is not already). This routine can be called
4247 * with either INIT or INIT-ACK's as long as the
4248 * m points to the IP packet and the offset points
4249 * to the beginning of the parameters.
4251 struct sctp_inpcb *inp, *l_inp;
4252 struct sctp_nets *net, *net_tmp;
4253 struct ip *iph;
4254 struct sctp_paramhdr *phdr, parm_buf;
4255 struct sctp_tcb *stcb_tmp;
4256 u_int16_t ptype, plen;
4257 struct sockaddr *sa;
4258 struct sockaddr_storage dest_store;
4259 struct sockaddr *local_sa = (struct sockaddr *)&dest_store;
4260 struct sockaddr_in sin;
4261 struct sockaddr_in6 sin6;
4263 /* First get the destination address setup too. */
4264 memset(&sin, 0, sizeof(sin));
4265 memset(&sin6, 0, sizeof(sin6));
4267 sin.sin_family = AF_INET;
4268 sin.sin_len = sizeof(sin);
4269 sin.sin_port = stcb->rport;
4271 sin6.sin6_family = AF_INET6;
4272 sin6.sin6_len = sizeof(struct sockaddr_in6);
4273 sin6.sin6_port = stcb->rport;
4274 if (altsa == NULL) {
4275 iph = mtod(m, struct ip *);
4276 if (iph->ip_v == IPVERSION) {
4277 /* its IPv4 */
4278 struct sockaddr_in *sin_2;
4279 sin_2 = (struct sockaddr_in *)(local_sa);
4280 memset(sin_2, 0, sizeof(sin));
4281 sin_2->sin_family = AF_INET;
4282 sin_2->sin_len = sizeof(sin);
4283 sin_2->sin_port = sh->dest_port;
4284 sin_2->sin_addr.s_addr = iph->ip_dst.s_addr ;
4285 sin.sin_addr = iph->ip_src;
4286 sa = (struct sockaddr *)&sin;
4287 } else if (iph->ip_v == (IPV6_VERSION >> 4)) {
4288 /* its IPv6 */
4289 struct ip6_hdr *ip6;
4290 struct sockaddr_in6 *sin6_2;
4292 ip6 = mtod(m, struct ip6_hdr *);
4293 sin6_2 = (struct sockaddr_in6 *)(local_sa);
4294 memset(sin6_2, 0, sizeof(sin6));
4295 sin6_2->sin6_family = AF_INET6;
4296 sin6_2->sin6_len = sizeof(struct sockaddr_in6);
4297 sin6_2->sin6_port = sh->dest_port;
4298 sin6.sin6_addr = ip6->ip6_src;
4299 sa = (struct sockaddr *)&sin6;
4300 } else {
4301 sa = NULL;
4303 } else {
4305 * For cookies we use the src address NOT from the packet
4306 * but from the original INIT
4308 sa = altsa;
4310 /* Turn off ECN until we get through all params */
4311 stcb->asoc.ecn_allowed = 0;
4313 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
4314 /* mark all addresses that we have currently on the list */
4315 net->dest_state |= SCTP_ADDR_NOT_IN_ASSOC;
4317 /* does the source address already exist? if so skip it */
4318 l_inp = inp = stcb->sctp_ep;
4319 stcb_tmp = sctp_findassociation_ep_addr(&inp, sa, &net_tmp, local_sa, stcb);
4320 if ((stcb_tmp == NULL && inp == stcb->sctp_ep) || inp == NULL) {
4321 /* we must add the source address */
4322 /* no scope set here since we have a tcb already. */
4323 if ((sa->sa_family == AF_INET) &&
4324 (stcb->asoc.ipv4_addr_legal)) {
4325 if (sctp_add_remote_addr(stcb, sa, 0, 2)) {
4326 return (-1);
4328 } else if ((sa->sa_family == AF_INET6) &&
4329 (stcb->asoc.ipv6_addr_legal)) {
4330 if (sctp_add_remote_addr(stcb, sa, 0, 3)) {
4331 return (-1);
4334 } else {
4335 if (net_tmp != NULL && stcb_tmp == stcb) {
4336 net_tmp->dest_state &= ~SCTP_ADDR_NOT_IN_ASSOC;
4337 } else if (stcb_tmp != stcb) {
4338 /* It belongs to another association? */
4339 return (-1);
4342 /* since a unlock occured we must check the
4343 * TCB's state and the pcb's gone flags.
4345 if (l_inp->sctp_flags & (SCTP_PCB_FLAGS_SOCKET_GONE|SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
4346 /* the user freed the ep */
4347 return (-1);
4349 if (stcb->asoc.state == 0) {
4350 /* the assoc was freed? */
4351 return (-1);
4354 /* now we must go through each of the params. */
4355 phdr = sctp_get_next_param(m, offset, &parm_buf, sizeof(parm_buf));
4356 while (phdr) {
4357 ptype = ntohs(phdr->param_type);
4358 plen = ntohs(phdr->param_length);
4359 /*kprintf("ptype => %d, plen => %d\n", ptype, plen);*/
4360 if (offset + plen > limit) {
4361 break;
4363 if (plen == 0) {
4364 break;
4366 if ((ptype == SCTP_IPV4_ADDRESS) &&
4367 (stcb->asoc.ipv4_addr_legal)) {
4368 struct sctp_ipv4addr_param *p4, p4_buf;
4369 /* ok get the v4 address and check/add */
4370 phdr = sctp_get_next_param(m, offset,
4371 (struct sctp_paramhdr *)&p4_buf, sizeof(p4_buf));
4372 if (plen != sizeof(struct sctp_ipv4addr_param) ||
4373 phdr == NULL) {
4374 return (-1);
4376 p4 = (struct sctp_ipv4addr_param *)phdr;
4377 sin.sin_addr.s_addr = p4->addr;
4378 sa = (struct sockaddr *)&sin;
4379 inp = stcb->sctp_ep;
4380 stcb_tmp = sctp_findassociation_ep_addr(&inp, sa, &net,
4381 local_sa, stcb);
4383 if ((stcb_tmp== NULL && inp == stcb->sctp_ep) ||
4384 inp == NULL) {
4385 /* we must add the source address */
4386 /* no scope set since we have a tcb already */
4388 /* we must validate the state again here */
4389 if (l_inp->sctp_flags & (SCTP_PCB_FLAGS_SOCKET_GONE|SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
4390 /* the user freed the ep */
4391 return (-1);
4393 if (stcb->asoc.state == 0) {
4394 /* the assoc was freed? */
4395 return (-1);
4397 if (sctp_add_remote_addr(stcb, sa, 0, 4)) {
4398 return (-1);
4400 } else if (stcb_tmp == stcb) {
4401 if (l_inp->sctp_flags & (SCTP_PCB_FLAGS_SOCKET_GONE|SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
4402 /* the user freed the ep */
4403 return (-1);
4405 if (stcb->asoc.state == 0) {
4406 /* the assoc was freed? */
4407 return (-1);
4409 if (net != NULL) {
4410 /* clear flag */
4411 net->dest_state &=
4412 ~SCTP_ADDR_NOT_IN_ASSOC;
4414 } else {
4415 /* strange, address is in another assoc?
4416 * straighten out locks.
4418 SCTP_TCB_UNLOCK(stcb_tmp);
4419 SCTP_INP_RLOCK(inp);
4420 if (l_inp->sctp_flags & (SCTP_PCB_FLAGS_SOCKET_GONE|SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
4421 /* the user freed the ep */
4422 SCTP_INP_RUNLOCK(l_inp);
4423 return (-1);
4425 if (stcb->asoc.state == 0) {
4426 /* the assoc was freed? */
4427 SCTP_INP_RUNLOCK(l_inp);
4428 return (-1);
4430 SCTP_TCB_LOCK(stcb);
4431 SCTP_INP_RUNLOCK(stcb->sctp_ep);
4432 return (-1);
4434 } else if ((ptype == SCTP_IPV6_ADDRESS) &&
4435 (stcb->asoc.ipv6_addr_legal)) {
4436 /* ok get the v6 address and check/add */
4437 struct sctp_ipv6addr_param *p6, p6_buf;
4438 phdr = sctp_get_next_param(m, offset,
4439 (struct sctp_paramhdr *)&p6_buf, sizeof(p6_buf));
4440 if (plen != sizeof(struct sctp_ipv6addr_param) ||
4441 phdr == NULL) {
4442 return (-1);
4444 p6 = (struct sctp_ipv6addr_param *)phdr;
4445 memcpy((caddr_t)&sin6.sin6_addr, p6->addr,
4446 sizeof(p6->addr));
4447 sa = (struct sockaddr *)&sin6;
4448 inp = stcb->sctp_ep;
4449 stcb_tmp= sctp_findassociation_ep_addr(&inp, sa, &net,
4450 local_sa, stcb);
4451 if (stcb_tmp == NULL && (inp == stcb->sctp_ep ||
4452 inp == NULL)) {
4453 /* we must validate the state again here */
4454 if (l_inp->sctp_flags & (SCTP_PCB_FLAGS_SOCKET_GONE|SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
4455 /* the user freed the ep */
4456 return (-1);
4458 if (stcb->asoc.state == 0) {
4459 /* the assoc was freed? */
4460 return (-1);
4462 /* we must add the address, no scope set */
4463 if (sctp_add_remote_addr(stcb, sa, 0, 5)) {
4464 return (-1);
4466 } else if (stcb_tmp == stcb) {
4467 /* we must validate the state again here */
4468 if (l_inp->sctp_flags & (SCTP_PCB_FLAGS_SOCKET_GONE|SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
4469 /* the user freed the ep */
4470 return (-1);
4472 if (stcb->asoc.state == 0) {
4473 /* the assoc was freed? */
4474 return (-1);
4476 if (net != NULL) {
4477 /* clear flag */
4478 net->dest_state &=
4479 ~SCTP_ADDR_NOT_IN_ASSOC;
4481 } else {
4482 /* strange, address is in another assoc?
4483 * straighten out locks.
4485 SCTP_TCB_UNLOCK(stcb_tmp);
4486 SCTP_INP_RLOCK(l_inp);
4487 /* we must validate the state again here */
4488 if (l_inp->sctp_flags & (SCTP_PCB_FLAGS_SOCKET_GONE|SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
4489 /* the user freed the ep */
4490 SCTP_INP_RUNLOCK(l_inp);
4491 return (-1);
4493 if (stcb->asoc.state == 0) {
4494 /* the assoc was freed? */
4495 SCTP_INP_RUNLOCK(l_inp);
4496 return (-1);
4498 SCTP_TCB_LOCK(stcb);
4499 SCTP_INP_RUNLOCK(l_inp);
4500 return (-1);
4502 } else if (ptype == SCTP_ECN_CAPABLE) {
4503 stcb->asoc.ecn_allowed = 1;
4504 } else if (ptype == SCTP_ULP_ADAPTION) {
4505 if (stcb->asoc.state != SCTP_STATE_OPEN) {
4506 struct sctp_adaption_layer_indication ai, *aip;
4508 phdr = sctp_get_next_param(m, offset,
4509 (struct sctp_paramhdr *)&ai, sizeof(ai));
4510 aip = (struct sctp_adaption_layer_indication *)phdr;
4511 sctp_ulp_notify(SCTP_NOTIFY_ADAPTION_INDICATION,
4512 stcb, ntohl(aip->indication), NULL);
4514 } else if (ptype == SCTP_SET_PRIM_ADDR) {
4515 struct sctp_asconf_addr_param lstore, *fee;
4516 struct sctp_asconf_addrv4_param *fii;
4517 int lptype;
4518 struct sockaddr *lsa = NULL;
4520 stcb->asoc.peer_supports_asconf = 1;
4521 stcb->asoc.peer_supports_asconf_setprim = 1;
4522 if (plen > sizeof(lstore)) {
4523 return (-1);
4525 phdr = sctp_get_next_param(m, offset,
4526 (struct sctp_paramhdr *)&lstore, plen);
4527 if (phdr == NULL) {
4528 return (-1);
4531 fee = (struct sctp_asconf_addr_param *)phdr;
4532 lptype = ntohs(fee->addrp.ph.param_type);
4533 if (lptype == SCTP_IPV4_ADDRESS) {
4534 if (plen !=
4535 sizeof(struct sctp_asconf_addrv4_param)) {
4536 kprintf("Sizeof setprim in init/init ack not %d but %d - ignored\n",
4537 (int)sizeof(struct sctp_asconf_addrv4_param),
4538 plen);
4539 } else {
4540 fii = (struct sctp_asconf_addrv4_param *)fee;
4541 sin.sin_addr.s_addr = fii->addrp.addr;
4542 lsa = (struct sockaddr *)&sin;
4544 } else if (lptype == SCTP_IPV6_ADDRESS) {
4545 if (plen !=
4546 sizeof(struct sctp_asconf_addr_param)) {
4547 kprintf("Sizeof setprim (v6) in init/init ack not %d but %d - ignored\n",
4548 (int)sizeof(struct sctp_asconf_addr_param),
4549 plen);
4550 } else {
4551 memcpy(sin6.sin6_addr.s6_addr,
4552 fee->addrp.addr,
4553 sizeof(fee->addrp.addr));
4554 lsa = (struct sockaddr *)&sin6;
4557 if (lsa) {
4558 sctp_set_primary_addr(stcb, sa, NULL);
4561 } else if (ptype == SCTP_PRSCTP_SUPPORTED) {
4562 /* Peer supports pr-sctp */
4563 stcb->asoc.peer_supports_prsctp = 1;
4564 } else if (ptype == SCTP_SUPPORTED_CHUNK_EXT) {
4565 /* A supported extension chunk */
4566 struct sctp_supported_chunk_types_param *pr_supported;
4567 uint8_t local_store[128];
4568 int num_ent, i;
4570 phdr = sctp_get_next_param(m, offset,
4571 (struct sctp_paramhdr *)&local_store, plen);
4572 if (phdr == NULL) {
4573 return (-1);
4575 stcb->asoc.peer_supports_asconf = 0;
4576 stcb->asoc.peer_supports_asconf_setprim = 0;
4577 stcb->asoc.peer_supports_prsctp = 0;
4578 stcb->asoc.peer_supports_pktdrop = 0;
4579 stcb->asoc.peer_supports_strreset = 0;
4580 pr_supported = (struct sctp_supported_chunk_types_param *)phdr;
4581 num_ent = plen - sizeof(struct sctp_paramhdr);
4582 for (i=0; i<num_ent; i++) {
4583 switch (pr_supported->chunk_types[i]) {
4584 case SCTP_ASCONF:
4585 stcb->asoc.peer_supports_asconf = 1;
4586 stcb->asoc.peer_supports_asconf_setprim = 1;
4587 break;
4588 case SCTP_ASCONF_ACK:
4589 stcb->asoc.peer_supports_asconf = 1;
4590 stcb->asoc.peer_supports_asconf_setprim = 1;
4591 break;
4592 case SCTP_FORWARD_CUM_TSN:
4593 stcb->asoc.peer_supports_prsctp = 1;
4594 break;
4595 case SCTP_PACKET_DROPPED:
4596 stcb->asoc.peer_supports_pktdrop = 1;
4597 break;
4598 case SCTP_STREAM_RESET:
4599 stcb->asoc.peer_supports_strreset = 1;
4600 break;
4601 default:
4602 /* one I have not learned yet */
4603 break;
4607 } else if (ptype == SCTP_ECN_NONCE_SUPPORTED) {
4608 /* Peer supports ECN-nonce */
4609 stcb->asoc.peer_supports_ecn_nonce = 1;
4610 stcb->asoc.ecn_nonce_allowed = 1;
4611 } else if ((ptype == SCTP_HEARTBEAT_INFO) ||
4612 (ptype == SCTP_STATE_COOKIE) ||
4613 (ptype == SCTP_UNRECOG_PARAM) ||
4614 (ptype == SCTP_COOKIE_PRESERVE) ||
4615 (ptype == SCTP_SUPPORTED_ADDRTYPE) ||
4616 (ptype == SCTP_ADD_IP_ADDRESS) ||
4617 (ptype == SCTP_DEL_IP_ADDRESS) ||
4618 (ptype == SCTP_ERROR_CAUSE_IND) ||
4619 (ptype == SCTP_SUCCESS_REPORT)) {
4620 /* don't care */;
4621 } else {
4622 if ((ptype & 0x8000) == 0x0000) {
4623 /* must stop processing the rest of
4624 * the param's. Any report bits were
4625 * handled with the call to sctp_arethere_unrecognized_parameters()
4626 * when the INIT or INIT-ACK was first seen.
4628 break;
4631 offset += SCTP_SIZE32(plen);
4632 if (offset >= limit) {
4633 break;
4635 phdr = sctp_get_next_param(m, offset, &parm_buf,
4636 sizeof(parm_buf));
4638 /* Now check to see if we need to purge any addresses */
4639 for (net = TAILQ_FIRST(&stcb->asoc.nets); net != NULL; net = net_tmp) {
4640 net_tmp = TAILQ_NEXT(net, sctp_next);
4641 if ((net->dest_state & SCTP_ADDR_NOT_IN_ASSOC) ==
4642 SCTP_ADDR_NOT_IN_ASSOC) {
4643 /* This address has been removed from the asoc */
4644 /* remove and free it */
4645 stcb->asoc.numnets--;
4646 TAILQ_REMOVE(&stcb->asoc.nets, net, sctp_next);
4647 sctp_free_remote_addr(net);
4648 if (net == stcb->asoc.primary_destination) {
4649 stcb->asoc.primary_destination = NULL;
4650 sctp_select_primary_destination(stcb);
4654 return (0);
4658 sctp_set_primary_addr(struct sctp_tcb *stcb, struct sockaddr *sa,
4659 struct sctp_nets *net)
4661 /* make sure the requested primary address exists in the assoc */
4662 if (net == NULL && sa)
4663 net = sctp_findnet(stcb, sa);
4665 if (net == NULL) {
4666 /* didn't find the requested primary address! */
4667 return (-1);
4668 } else {
4669 /* set the primary address */
4670 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
4671 /* Must be confirmed */
4672 return (-1);
4674 stcb->asoc.primary_destination = net;
4675 net->dest_state &= ~SCTP_ADDR_WAS_PRIMARY;
4676 return (0);
4682 sctp_is_vtag_good(struct sctp_inpcb *inp, u_int32_t tag, struct timeval *now)
4685 * This function serves two purposes. It will see if a TAG can be
4686 * re-used and return 1 for yes it is ok and 0 for don't use that
4687 * tag.
4688 * A secondary function it will do is purge out old tags that can
4689 * be removed.
4691 struct sctpasochead *head;
4692 struct sctpvtaghead *chain;
4693 struct sctp_tagblock *twait_block;
4694 struct sctp_tcb *stcb;
4696 int i;
4697 SCTP_INP_INFO_WLOCK();
4698 chain = &sctppcbinfo.vtag_timewait[(tag % SCTP_STACK_VTAG_HASH_SIZE)];
4699 /* First is the vtag in use ? */
4701 head = &sctppcbinfo.sctp_asochash[SCTP_PCBHASH_ASOC(tag,
4702 sctppcbinfo.hashasocmark)];
4703 if (head == NULL) {
4704 SCTP_INP_INFO_WUNLOCK();
4705 return (0);
4707 LIST_FOREACH(stcb, head, sctp_asocs) {
4708 if (stcb->asoc.my_vtag == tag) {
4709 /* We should remove this if and
4710 * return 0 always if we want vtags
4711 * unique across all endpoints. For
4712 * now within a endpoint is ok.
4714 if (inp == stcb->sctp_ep) {
4715 /* bad tag, in use */
4716 SCTP_INP_INFO_WUNLOCK();
4717 return (0);
4721 if (!LIST_EMPTY(chain)) {
4723 * Block(s) are present, lets see if we have this tag in
4724 * the list
4726 LIST_FOREACH(twait_block, chain, sctp_nxt_tagblock) {
4727 for (i = 0; i < SCTP_NUMBER_IN_VTAG_BLOCK; i++) {
4728 if (twait_block->vtag_block[i].v_tag == 0) {
4729 /* not used */
4730 continue;
4731 } else if ((long)twait_block->vtag_block[i].tv_sec_at_expire >
4732 now->tv_sec) {
4733 /* Audit expires this guy */
4734 twait_block->vtag_block[i].tv_sec_at_expire = 0;
4735 twait_block->vtag_block[i].v_tag = 0;
4736 } else if (twait_block->vtag_block[i].v_tag ==
4737 tag) {
4738 /* Bad tag, sorry :< */
4739 SCTP_INP_INFO_WUNLOCK();
4740 return (0);
4745 /* Not found, ok to use the tag */
4746 SCTP_INP_INFO_WUNLOCK();
4747 return (1);
4752 * Delete the address from the endpoint local address list
4753 * Lookup using a sockaddr address (ie. not an ifaddr)
4756 sctp_del_local_addr_ep_sa(struct sctp_inpcb *inp, struct sockaddr *sa)
4758 struct sctp_laddr *laddr;
4759 struct sockaddr *l_sa;
4760 int found = 0;
4761 /* Here is another function I cannot find a
4762 * caller for. As such we SHOULD delete it
4763 * if we have no users. If we find a user that
4764 * user MUST have the INP locked.
4768 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
4769 /* You are already bound to all. You have it already */
4770 return (EINVAL);
4773 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4774 /* make sure the address exists */
4775 if (laddr->ifa == NULL)
4776 continue;
4777 if (laddr->ifa->ifa_addr == NULL)
4778 continue;
4780 l_sa = laddr->ifa->ifa_addr;
4781 if (l_sa->sa_family == AF_INET6) {
4782 /* IPv6 address */
4783 struct sockaddr_in6 *sin1, *sin2;
4784 sin1 = (struct sockaddr_in6 *)l_sa;
4785 sin2 = (struct sockaddr_in6 *)sa;
4786 if (memcmp(&sin1->sin6_addr, &sin2->sin6_addr,
4787 sizeof(struct in6_addr)) == 0) {
4788 /* matched */
4789 found = 1;
4790 break;
4792 } else if (l_sa->sa_family == AF_INET) {
4793 /* IPv4 address */
4794 struct sockaddr_in *sin1, *sin2;
4795 sin1 = (struct sockaddr_in *)l_sa;
4796 sin2 = (struct sockaddr_in *)sa;
4797 if (sin1->sin_addr.s_addr == sin2->sin_addr.s_addr) {
4798 /* matched */
4799 found = 1;
4800 break;
4802 } else {
4803 /* invalid family */
4804 return (-1);
4808 if (found && inp->laddr_count < 2) {
4809 /* can't delete unless there are at LEAST 2 addresses */
4810 return (-1);
4813 if (found && (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) {
4815 * remove it from the ep list, this should NOT be
4816 * done until its really gone from the interface list and
4817 * we won't be receiving more of these. Probably right
4818 * away. If we do allow a removal of an address from
4819 * an association (sub-set bind) than this should NOT
4820 * be called until the all ASCONF come back from this
4821 * association.
4823 sctp_remove_laddr(laddr);
4824 return (0);
4825 } else {
4826 return (-1);
4830 static void
4831 sctp_drain_mbufs(struct sctp_inpcb *inp, struct sctp_tcb *stcb)
4834 * We must hunt this association for MBUF's past the cumack
4835 * (i.e. out of order data that we can renege on).
4837 struct sctp_association *asoc;
4838 struct sctp_tmit_chunk *chk, *nchk;
4839 u_int32_t cumulative_tsn_p1, tsn;
4840 int cnt, strmat, gap;
4841 /* We look for anything larger than the cum-ack + 1 */
4843 asoc = &stcb->asoc;
4844 cumulative_tsn_p1 = asoc->cumulative_tsn + 1;
4845 cnt = 0;
4846 /* First look in the re-assembly queue */
4847 chk = TAILQ_FIRST(&asoc->reasmqueue);
4848 while (chk) {
4849 /* Get the next one */
4850 nchk = TAILQ_NEXT(chk, sctp_next);
4851 if (compare_with_wrap(chk->rec.data.TSN_seq,
4852 cumulative_tsn_p1, MAX_TSN)) {
4853 /* Yep it is above cum-ack */
4854 cnt++;
4855 tsn = chk->rec.data.TSN_seq;
4856 if (tsn >= asoc->mapping_array_base_tsn) {
4857 gap = tsn - asoc->mapping_array_base_tsn;
4858 } else {
4859 gap = (MAX_TSN - asoc->mapping_array_base_tsn) +
4860 tsn + 1;
4862 asoc->size_on_reasm_queue -= chk->send_size;
4863 asoc->cnt_on_reasm_queue--;
4864 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
4865 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
4866 if (chk->data) {
4867 sctp_m_freem(chk->data);
4868 chk->data = NULL;
4870 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
4871 sctppcbinfo.ipi_count_chunk--;
4872 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
4873 panic("Chunk count is negative");
4875 sctppcbinfo.ipi_gencnt_chunk++;
4877 chk = nchk;
4879 /* Ok that was fun, now we will drain all the inbound streams? */
4880 for (strmat = 0; strmat < asoc->streamincnt; strmat++) {
4881 chk = TAILQ_FIRST(&asoc->strmin[strmat].inqueue);
4882 while (chk) {
4883 nchk = TAILQ_NEXT(chk, sctp_next);
4884 if (compare_with_wrap(chk->rec.data.TSN_seq,
4885 cumulative_tsn_p1, MAX_TSN)) {
4886 /* Yep it is above cum-ack */
4887 cnt++;
4888 tsn = chk->rec.data.TSN_seq;
4889 if (tsn >= asoc->mapping_array_base_tsn) {
4890 gap = tsn -
4891 asoc->mapping_array_base_tsn;
4892 } else {
4893 gap = (MAX_TSN -
4894 asoc->mapping_array_base_tsn) +
4895 tsn + 1;
4897 asoc->size_on_all_streams -= chk->send_size;
4898 asoc->cnt_on_all_streams--;
4900 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array,
4901 gap);
4902 TAILQ_REMOVE(&asoc->strmin[strmat].inqueue,
4903 chk, sctp_next);
4904 if (chk->data) {
4905 sctp_m_freem(chk->data);
4906 chk->data = NULL;
4908 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
4909 sctppcbinfo.ipi_count_chunk--;
4910 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
4911 panic("Chunk count is negative");
4913 sctppcbinfo.ipi_gencnt_chunk++;
4915 chk = nchk;
4919 * Question, should we go through the delivery queue?
4920 * The only reason things are on here is the app not reading OR a
4921 * p-d-api up. An attacker COULD send enough in to initiate the
4922 * PD-API and then send a bunch of stuff to other streams... these
4923 * would wind up on the delivery queue.. and then we would not get
4924 * to them. But in order to do this I then have to back-track and
4925 * un-deliver sequence numbers in streams.. el-yucko. I think for
4926 * now we will NOT look at the delivery queue and leave it to be
4927 * something to consider later. An alternative would be to abort
4928 * the P-D-API with a notification and then deliver the data....
4929 * Or another method might be to keep track of how many times the
4930 * situation occurs and if we see a possible attack underway just
4931 * abort the association.
4933 #ifdef SCTP_DEBUG
4934 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
4935 if (cnt) {
4936 kprintf("Freed %d chunks from reneg harvest\n", cnt);
4939 #endif /* SCTP_DEBUG */
4942 * Another issue, in un-setting the TSN's in the mapping array we
4943 * DID NOT adjust the higest_tsn marker. This will cause one of
4944 * two things to occur. It may cause us to do extra work in checking
4945 * for our mapping array movement. More importantly it may cause us
4946 * to SACK every datagram. This may not be a bad thing though since
4947 * we will recover once we get our cum-ack above and all this stuff
4948 * we dumped recovered.
4952 void
4953 sctp_drain(void)
4956 * We must walk the PCB lists for ALL associations here. The system
4957 * is LOW on MBUF's and needs help. This is where reneging will
4958 * occur. We really hope this does NOT happen!
4960 struct sctp_inpcb *inp;
4961 struct sctp_tcb *stcb;
4963 SCTP_INP_INFO_RLOCK();
4964 LIST_FOREACH(inp, &sctppcbinfo.listhead, sctp_list) {
4965 /* For each endpoint */
4966 SCTP_INP_RLOCK(inp);
4967 LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
4968 /* For each association */
4969 SCTP_TCB_LOCK(stcb);
4970 sctp_drain_mbufs(inp, stcb);
4971 SCTP_TCB_UNLOCK(stcb);
4973 SCTP_INP_RUNLOCK(inp);
4975 SCTP_INP_INFO_RUNLOCK();
4979 sctp_add_to_socket_q(struct sctp_inpcb *inp, struct sctp_tcb *stcb)
4981 struct sctp_socket_q_list *sq;
4983 /* write lock on INP assumed */
4984 if ((inp == NULL) || (stcb == NULL)) {
4985 /* I am paranoid */
4986 return (0);
4988 sq = (struct sctp_socket_q_list *)SCTP_ZONE_GET(
4989 sctppcbinfo.ipi_zone_sockq);
4990 if (sq == NULL) {
4991 /* out of sq structs */
4992 return (0);
4994 sctppcbinfo.ipi_count_sockq++;
4995 sctppcbinfo.ipi_gencnt_sockq++;
4996 if (stcb)
4997 stcb->asoc.cnt_msg_on_sb++;
4998 sq->tcb = stcb;
4999 TAILQ_INSERT_TAIL(&inp->sctp_queue_list, sq, next_sq);
5000 return (1);
5004 struct sctp_tcb *
5005 sctp_remove_from_socket_q(struct sctp_inpcb *inp)
5007 struct sctp_tcb *stcb = NULL;
5008 struct sctp_socket_q_list *sq;
5010 /* W-Lock on INP assumed held */
5011 sq = TAILQ_FIRST(&inp->sctp_queue_list);
5012 if (sq == NULL)
5013 return (NULL);
5015 stcb = sq->tcb;
5016 TAILQ_REMOVE(&inp->sctp_queue_list, sq, next_sq);
5017 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_sockq, sq);
5018 sctppcbinfo.ipi_count_sockq--;
5019 sctppcbinfo.ipi_gencnt_sockq++;
5020 if (stcb) {
5021 stcb->asoc.cnt_msg_on_sb--;
5023 return (stcb);
5027 sctp_initiate_iterator(asoc_func af, uint32_t pcb_state, uint32_t asoc_state,
5028 void *argp, uint32_t argi, end_func ef,
5029 struct sctp_inpcb *s_inp)
5031 struct sctp_iterator *it=NULL;
5033 if (af == NULL) {
5034 return (-1);
5036 MALLOC(it, struct sctp_iterator *, sizeof(struct sctp_iterator), M_PCB,
5037 M_WAITOK);
5038 memset(it, 0, sizeof(*it));
5039 it->function_toapply = af;
5040 it->function_atend = ef;
5041 it->pointer = argp;
5042 it->val = argi;
5043 it->pcb_flags = pcb_state;
5044 it->asoc_state = asoc_state;
5045 if (s_inp) {
5046 it->inp = s_inp;
5047 it->iterator_flags = SCTP_ITERATOR_DO_SINGLE_INP;
5048 } else {
5049 SCTP_INP_INFO_RLOCK();
5050 it->inp = LIST_FIRST(&sctppcbinfo.listhead);
5051 SCTP_INP_INFO_RUNLOCK();
5052 it->iterator_flags = SCTP_ITERATOR_DO_ALL_INP;
5055 /* Init the timer */
5056 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000
5057 callout_init(&it->tmr.timer, 0);
5058 #else
5059 callout_init(&it->tmr.timer);
5060 #endif
5061 /* add to the list of all iterators */
5062 SCTP_INP_INFO_WLOCK();
5063 LIST_INSERT_HEAD(&sctppcbinfo.iteratorhead, it, sctp_nxt_itr);
5064 SCTP_INP_INFO_WUNLOCK();
5065 crit_enter();
5066 sctp_iterator_timer(it);
5067 crit_exit();
5068 return (0);
5073 * Callout/Timer routines for OS that doesn't have them
5075 #ifdef _SCTP_NEEDS_CALLOUT_
5076 #ifndef __APPLE__
5077 extern int ticks;
5078 #endif
5080 void
5081 callout_init(struct callout *c)
5083 bzero(c, sizeof(*c));
5086 void
5087 callout_reset(struct callout *c, int to_ticks, void (*ftn)(void *), void *arg)
5089 crit_enter();
5090 if (c->c_flags & CALLOUT_PENDING)
5091 callout_stop(c);
5094 * We could spl down here and back up at the TAILQ_INSERT_TAIL,
5095 * but there's no point since doing this setup doesn't take much
5096 * time.
5098 if (to_ticks <= 0)
5099 to_ticks = 1;
5101 c->c_arg = arg;
5102 c->c_flags = (CALLOUT_ACTIVE | CALLOUT_PENDING);
5103 c->c_func = ftn;
5104 #ifdef __APPLE__
5105 c->c_time = to_ticks; /* just store the requested timeout */
5106 timeout(ftn, arg, to_ticks);
5107 #else
5108 c->c_time = ticks + to_ticks;
5109 TAILQ_INSERT_TAIL(&sctppcbinfo.callqueue, c, tqe);
5110 #endif
5111 crit_exit();
5115 callout_stop(struct callout *c)
5117 crit_enter();
5119 * Don't attempt to delete a callout that's not on the queue.
5121 if (!(c->c_flags & CALLOUT_PENDING)) {
5122 c->c_flags &= ~CALLOUT_ACTIVE;
5123 crit_exit();
5124 return (0);
5126 c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING| CALLOUT_FIRED);
5127 #ifdef __APPLE__
5128 /* thread_call_cancel(c->c_call); */
5129 untimeout(c->c_func, c->c_arg);
5130 #else
5131 TAILQ_REMOVE(&sctppcbinfo.callqueue, c, tqe);
5132 c->c_func = NULL;
5133 #endif
5134 crit_exit();
5135 return (1);
5138 #if !defined(__APPLE__)
5139 void
5140 sctp_fasttim(void)
5142 struct callout *c, *n;
5143 struct calloutlist locallist;
5144 int inited = 0;
5146 crit_enter();
5147 /* run through and subtract and mark all callouts */
5148 c = TAILQ_FIRST(&sctppcbinfo.callqueue);
5149 while (c) {
5150 n = TAILQ_NEXT(c, tqe);
5151 if (c->c_time <= ticks) {
5152 c->c_flags |= CALLOUT_FIRED;
5153 c->c_time = 0;
5154 TAILQ_REMOVE(&sctppcbinfo.callqueue, c, tqe);
5155 if (inited == 0) {
5156 TAILQ_INIT(&locallist);
5157 inited = 1;
5159 /* move off of main list */
5160 TAILQ_INSERT_TAIL(&locallist, c, tqe);
5162 c = n;
5164 /* Now all the ones on the locallist must be called */
5165 if (inited) {
5166 c = TAILQ_FIRST(&locallist);
5167 while (c) {
5168 /* remove it */
5169 TAILQ_REMOVE(&locallist, c, tqe);
5170 /* now validate that it did not get canceled */
5171 if (c->c_flags & CALLOUT_FIRED) {
5172 c->c_flags &= ~CALLOUT_PENDING;
5173 crit_exit();
5174 (*c->c_func)(c->c_arg);
5175 crit_enter();
5177 c = TAILQ_FIRST(&locallist);
5180 crit_exit();
5182 #endif
5183 #endif /* _SCTP_NEEDS_CALLOUT_ */