fdisk - Use heads = 255 on file images
[dragonfly.git] / sys / netinet / sctp_pcb.c
blob5583efb6200f1a8795175103b9349deab93be988
1 /* $KAME: sctp_pcb.c,v 1.37 2004/08/17 06:28:02 t-momose Exp $ */
2 /* $DragonFly: src/sys/netinet/sctp_pcb.c,v 1.14 2008/03/07 11:34:20 sephe Exp $ */
4 /*
5 * Copyright (c) 2001, 2002, 2003, 2004 Cisco Systems, Inc.
6 * All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Cisco Systems, Inc.
19 * 4. Neither the name of the project nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY CISCO SYSTEMS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL CISCO SYSTEMS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
35 #if !(defined(__OpenBSD__) || defined(__APPLE__))
36 #include "opt_ipsec.h"
37 #endif
38 #if defined(__FreeBSD__) || defined(__DragonFly__)
39 #include "opt_compat.h"
40 #include "opt_inet6.h"
41 #include "opt_inet.h"
42 #endif
43 #if defined(__NetBSD__)
44 #include "opt_inet.h"
45 #endif
46 #ifdef __APPLE__
47 #include <sctp.h>
48 #elif !defined(__OpenBSD__)
49 #include "opt_sctp.h"
50 #endif
52 #include <sys/param.h>
53 #include <sys/systm.h>
54 #include <sys/malloc.h>
55 #include <sys/mbuf.h>
56 #include <sys/domain.h>
57 #include <sys/protosw.h>
58 #include <sys/socket.h>
59 #include <sys/socketvar.h>
60 #include <sys/proc.h>
61 #include <sys/priv.h>
62 #include <sys/kernel.h>
63 #include <sys/sysctl.h>
64 #include <sys/thread2.h>
65 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
66 #include <sys/random.h>
67 #endif
68 #if defined(__NetBSD__)
69 #include <sys/rnd.h>
70 #endif
71 #if defined(__OpenBSD__)
72 #include <dev/rndvar.h>
73 #endif
75 #if defined(__APPLE__)
76 #include <netinet/sctp_callout.h>
77 #elif defined(__OpenBSD__)
78 #include <sys/timeout.h>
79 #else
80 #include <sys/callout.h>
81 #endif
83 #if (defined(__FreeBSD__) && __FreeBSD_version >= 500000)
84 #include <sys/limits.h>
85 #else
86 #include <machine/limits.h>
87 #endif
88 #include <machine/cpu.h>
90 #include <net/if.h>
91 #include <net/if_types.h>
92 #include <net/route.h>
93 #include <netinet/in.h>
94 #include <netinet/in_systm.h>
95 #include <netinet/ip.h>
96 #include <netinet/in_pcb.h>
97 #include <netinet/in_var.h>
98 #include <netinet/ip_var.h>
100 #ifdef INET6
101 #include <netinet/ip6.h>
102 #include <netinet6/ip6_var.h>
103 #include <netinet6/scope6_var.h>
104 #if defined(__FreeBSD__) || (__NetBSD__) || defined(__DragonFly__)
105 #include <netinet6/in6_pcb.h>
106 #elif defined(__OpenBSD__)
107 #include <netinet/in_pcb.h>
108 #endif
109 #endif /* INET6 */
111 #ifdef IPSEC
112 #ifndef __OpenBSD__
113 #include <netinet6/ipsec.h>
114 #include <netproto/key/key.h>
115 #else
116 #undef IPSEC
117 #endif
118 #endif /* IPSEC */
120 #include <netinet/sctp_var.h>
121 #include <netinet/sctp_pcb.h>
122 #include <netinet/sctputil.h>
123 #include <netinet/sctp.h>
124 #include <netinet/sctp_header.h>
125 #include <netinet/sctp_asconf.h>
126 #include <netinet/sctp_output.h>
127 #include <netinet/sctp_timer.h>
129 #ifndef SCTP_PCBHASHSIZE
130 /* default number of association hash buckets in each endpoint */
131 #define SCTP_PCBHASHSIZE 256
132 #endif
134 #ifdef SCTP_DEBUG
135 u_int32_t sctp_debug_on = 0;
136 #endif /* SCTP_DEBUG */
138 u_int32_t sctp_pegs[SCTP_NUMBER_OF_PEGS];
140 int sctp_pcbtblsize = SCTP_PCBHASHSIZE;
142 struct sctp_epinfo sctppcbinfo;
144 /* FIX: we don't handle multiple link local scopes */
145 /* "scopeless" replacement IN6_ARE_ADDR_EQUAL */
147 SCTP6_ARE_ADDR_EQUAL(struct in6_addr *a, struct in6_addr *b)
149 struct in6_addr tmp_a, tmp_b;
150 /* use a copy of a and b */
151 tmp_a = *a;
152 tmp_b = *b;
153 in6_clearscope(&tmp_a);
154 in6_clearscope(&tmp_b);
155 return (IN6_ARE_ADDR_EQUAL(&tmp_a, &tmp_b));
158 #ifdef __OpenBSD__
159 extern int ipport_firstauto;
160 extern int ipport_lastauto;
161 extern int ipport_hifirstauto;
162 extern int ipport_hilastauto;
163 #endif
165 #if defined(__FreeBSD__) && __FreeBSD_version > 500000
167 #ifndef xyzzy
168 void sctp_validate_no_locks(void);
170 void
171 SCTP_INP_RLOCK(struct sctp_inpcb *inp)
173 struct sctp_tcb *stcb;
174 LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
175 if (mtx_owned(&(stcb)->tcb_mtx))
176 panic("I own TCB lock?");
178 if (mtx_owned(&(inp)->inp_mtx))
179 panic("INP Recursive Lock-R");
180 mtx_lock(&(inp)->inp_mtx);
183 void
184 SCTP_INP_WLOCK(struct sctp_inpcb *inp)
186 SCTP_INP_RLOCK(inp);
189 void
190 SCTP_INP_INFO_RLOCK(void)
192 struct sctp_inpcb *inp;
193 struct sctp_tcb *stcb;
194 LIST_FOREACH(inp, &sctppcbinfo.listhead, sctp_list) {
195 if (mtx_owned(&(inp)->inp_mtx))
196 panic("info-lock and own inp lock?");
197 LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
198 if (mtx_owned(&(stcb)->tcb_mtx))
199 panic("Info lock and own a tcb lock?");
202 if (mtx_owned(&sctppcbinfo.ipi_ep_mtx))
203 panic("INP INFO Recursive Lock-R");
204 mtx_lock(&sctppcbinfo.ipi_ep_mtx);
207 void
208 SCTP_INP_INFO_WLOCK(void)
210 SCTP_INP_INFO_RLOCK();
214 void sctp_validate_no_locks(void)
216 struct sctp_inpcb *inp;
217 struct sctp_tcb *stcb;
219 if (mtx_owned(&sctppcbinfo.ipi_ep_mtx))
220 panic("INP INFO lock is owned?");
222 LIST_FOREACH(inp, &sctppcbinfo.listhead, sctp_list) {
223 if (mtx_owned(&(inp)->inp_mtx))
224 panic("You own an INP lock?");
225 LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
226 if (mtx_owned(&(stcb)->tcb_mtx))
227 panic("You own a TCB lock?");
232 #endif
233 #endif
235 void
236 sctp_fill_pcbinfo(struct sctp_pcbinfo *spcb)
238 /* We really don't need
239 * to lock this, but I will
240 * just because it does not hurt.
242 SCTP_INP_INFO_RLOCK();
243 spcb->ep_count = sctppcbinfo.ipi_count_ep;
244 spcb->asoc_count = sctppcbinfo.ipi_count_asoc;
245 spcb->laddr_count = sctppcbinfo.ipi_count_laddr;
246 spcb->raddr_count = sctppcbinfo.ipi_count_raddr;
247 spcb->chk_count = sctppcbinfo.ipi_count_chunk;
248 spcb->sockq_count = sctppcbinfo.ipi_count_sockq;
249 spcb->mbuf_track = sctppcbinfo.mbuf_track;
250 SCTP_INP_INFO_RUNLOCK();
255 * Notes on locks for FreeBSD 5 and up. All association
256 * lookups that have a definte ep, the INP structure is
257 * assumed to be locked for reading. If we need to go
258 * find the INP (ususally when a **inp is passed) then
259 * we must lock the INFO structure first and if needed
260 * lock the INP too. Note that if we lock it we must
266 * Given a endpoint, look and find in its association list any association
267 * with the "to" address given. This can be a "from" address, too, for
268 * inbound packets. For outbound packets it is a true "to" address.
270 static struct sctp_tcb *
271 sctp_tcb_special_locate(struct sctp_inpcb **inp_p, struct sockaddr *from,
272 struct sockaddr *to, struct sctp_nets **netp)
274 /**** ASSUMSES THE CALLER holds the INP_INFO_RLOCK */
277 * Note for this module care must be taken when observing what to is
278 * for. In most of the rest of the code the TO field represents my
279 * peer and the FROM field represents my address. For this module it
280 * is reversed of that.
283 * If we support the TCP model, then we must now dig through to
284 * see if we can find our endpoint in the list of tcp ep's.
286 uint16_t lport, rport;
287 struct sctppcbhead *ephead;
288 struct sctp_inpcb *inp;
289 struct sctp_laddr *laddr;
290 struct sctp_tcb *stcb;
291 struct sctp_nets *net;
293 if ((to == NULL) || (from == NULL)) {
294 return (NULL);
297 if (to->sa_family == AF_INET && from->sa_family == AF_INET) {
298 lport = ((struct sockaddr_in *)to)->sin_port;
299 rport = ((struct sockaddr_in *)from)->sin_port;
300 } else if (to->sa_family == AF_INET6 && from->sa_family == AF_INET6) {
301 lport = ((struct sockaddr_in6 *)to)->sin6_port;
302 rport = ((struct sockaddr_in6 *)from)->sin6_port;
303 } else {
304 return NULL;
306 ephead = &sctppcbinfo.sctp_tcpephash[SCTP_PCBHASH_ALLADDR(
307 (lport + rport), sctppcbinfo.hashtcpmark)];
309 * Ok now for each of the guys in this bucket we must look
310 * and see:
311 * - Does the remote port match.
312 * - Does there single association's addresses match this
313 * address (to).
314 * If so we update p_ep to point to this ep and return the
315 * tcb from it.
317 LIST_FOREACH(inp, ephead, sctp_hash) {
318 if (lport != inp->sctp_lport) {
319 continue;
321 SCTP_INP_RLOCK(inp);
322 /* check to see if the ep has one of the addresses */
323 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) {
324 /* We are NOT bound all, so look further */
325 int match = 0;
327 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
328 if (laddr->ifa == NULL) {
329 #ifdef SCTP_DEBUG
330 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
331 kprintf("An ounce of prevention is worth a pound of cure\n");
333 #endif
334 continue;
336 if (laddr->ifa->ifa_addr == NULL) {
337 #ifdef SCTP_DEBUG
338 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
339 kprintf("ifa with a NULL address\n");
341 #endif
342 continue;
344 if (laddr->ifa->ifa_addr->sa_family ==
345 to->sa_family) {
346 /* see if it matches */
347 struct sockaddr_in *intf_addr, *sin;
348 intf_addr = (struct sockaddr_in *)
349 laddr->ifa->ifa_addr;
350 sin = (struct sockaddr_in *)to;
351 if (from->sa_family == AF_INET) {
352 if (sin->sin_addr.s_addr ==
353 intf_addr->sin_addr.s_addr) {
354 match = 1;
355 SCTP_INP_RUNLOCK(inp);
356 break;
358 } else {
359 struct sockaddr_in6 *intf_addr6;
360 struct sockaddr_in6 *sin6;
361 sin6 = (struct sockaddr_in6 *)
363 intf_addr6 = (struct sockaddr_in6 *)
364 laddr->ifa->ifa_addr;
366 if (SCTP6_ARE_ADDR_EQUAL(&sin6->sin6_addr,
367 &intf_addr6->sin6_addr)) {
368 match = 1;
369 SCTP_INP_RUNLOCK(inp);
370 break;
375 if (match == 0) {
376 /* This endpoint does not have this address */
377 SCTP_INP_RUNLOCK(inp);
378 continue;
382 * Ok if we hit here the ep has the address, does it hold the
383 * tcb?
386 stcb = LIST_FIRST(&inp->sctp_asoc_list);
387 if (stcb == NULL) {
388 SCTP_INP_RUNLOCK(inp);
389 continue;
391 SCTP_TCB_LOCK(stcb);
392 if (stcb->rport != rport) {
393 /* remote port does not match. */
394 SCTP_TCB_UNLOCK(stcb);
395 SCTP_INP_RUNLOCK(inp);
396 continue;
398 /* Does this TCB have a matching address? */
399 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
400 if (net->ro._l_addr.sa.sa_family != from->sa_family) {
401 /* not the same family, can't be a match */
402 continue;
404 if (from->sa_family == AF_INET) {
405 struct sockaddr_in *sin, *rsin;
406 sin = (struct sockaddr_in *)&net->ro._l_addr;
407 rsin = (struct sockaddr_in *)from;
408 if (sin->sin_addr.s_addr ==
409 rsin->sin_addr.s_addr) {
410 /* found it */
411 if (netp != NULL) {
412 *netp = net;
414 /* Update the endpoint pointer */
415 *inp_p = inp;
416 SCTP_INP_RUNLOCK(inp);
417 return (stcb);
419 } else {
420 struct sockaddr_in6 *sin6, *rsin6;
421 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
422 rsin6 = (struct sockaddr_in6 *)from;
423 if (SCTP6_ARE_ADDR_EQUAL(&sin6->sin6_addr,
424 &rsin6->sin6_addr)) {
425 /* found it */
426 if (netp != NULL) {
427 *netp = net;
429 /* Update the endpoint pointer */
430 *inp_p = inp;
431 SCTP_INP_RUNLOCK(inp);
432 return (stcb);
436 SCTP_TCB_UNLOCK(stcb);
438 SCTP_INP_RUNLOCK(inp);
440 return (NULL);
443 struct sctp_tcb *
444 sctp_findassociation_ep_asconf(struct mbuf *m, int iphlen, int offset,
445 struct sctphdr *sh, struct sctp_inpcb **inp_p, struct sctp_nets **netp)
447 struct sctp_tcb *stcb;
448 struct sockaddr_in *sin;
449 struct sockaddr_in6 *sin6;
450 struct sockaddr_storage local_store, remote_store;
451 struct ip *iph;
452 struct sctp_paramhdr parm_buf, *phdr;
453 int ptype;
455 memset(&local_store, 0, sizeof(local_store));
456 memset(&remote_store, 0, sizeof(remote_store));
458 /* First get the destination address setup too. */
459 iph = mtod(m, struct ip *);
460 if (iph->ip_v == IPVERSION) {
461 /* its IPv4 */
462 sin = (struct sockaddr_in *)&local_store;
463 sin->sin_family = AF_INET;
464 sin->sin_len = sizeof(*sin);
465 sin->sin_port = sh->dest_port;
466 sin->sin_addr.s_addr = iph->ip_dst.s_addr ;
467 } else if (iph->ip_v == (IPV6_VERSION >> 4)) {
468 /* its IPv6 */
469 struct ip6_hdr *ip6;
470 ip6 = mtod(m, struct ip6_hdr *);
471 sin6 = (struct sockaddr_in6 *)&local_store;
472 sin6->sin6_family = AF_INET6;
473 sin6->sin6_len = sizeof(*sin6);
474 sin6->sin6_port = sh->dest_port;
475 sin6->sin6_addr = ip6->ip6_dst;
476 } else {
477 return NULL;
480 phdr = sctp_get_next_param(m, offset + sizeof(struct sctp_asconf_chunk),
481 &parm_buf, sizeof(struct sctp_paramhdr));
482 if (phdr == NULL) {
483 #ifdef SCTP_DEBUG
484 if (sctp_debug_on & SCTP_DEBUG_INPUT3) {
485 kprintf("sctp_process_control: failed to get asconf lookup addr\n");
487 #endif /* SCTP_DEBUG */
488 return NULL;
490 ptype = (int)((u_int)ntohs(phdr->param_type));
491 /* get the correlation address */
492 if (ptype == SCTP_IPV6_ADDRESS) {
493 /* ipv6 address param */
494 struct sctp_ipv6addr_param *p6, p6_buf;
495 if (ntohs(phdr->param_length) != sizeof(struct sctp_ipv6addr_param)) {
496 return NULL;
499 p6 = (struct sctp_ipv6addr_param *)sctp_get_next_param(m,
500 offset + sizeof(struct sctp_asconf_chunk),
501 &p6_buf.ph, sizeof(*p6));
502 if (p6 == NULL) {
503 #ifdef SCTP_DEBUG
504 if (sctp_debug_on & SCTP_DEBUG_INPUT3) {
505 kprintf("sctp_process_control: failed to get asconf v6 lookup addr\n");
507 #endif /* SCTP_DEBUG */
508 return (NULL);
510 sin6 = (struct sockaddr_in6 *)&remote_store;
511 sin6->sin6_family = AF_INET6;
512 sin6->sin6_len = sizeof(*sin6);
513 sin6->sin6_port = sh->src_port;
514 memcpy(&sin6->sin6_addr, &p6->addr, sizeof(struct in6_addr));
515 } else if (ptype == SCTP_IPV4_ADDRESS) {
516 /* ipv4 address param */
517 struct sctp_ipv4addr_param *p4, p4_buf;
518 if (ntohs(phdr->param_length) != sizeof(struct sctp_ipv4addr_param)) {
519 return NULL;
522 p4 = (struct sctp_ipv4addr_param *)sctp_get_next_param(m,
523 offset + sizeof(struct sctp_asconf_chunk),
524 &p4_buf.ph, sizeof(*p4));
525 if (p4 == NULL) {
526 #ifdef SCTP_DEBUG
527 if (sctp_debug_on & SCTP_DEBUG_INPUT3) {
528 kprintf("sctp_process_control: failed to get asconf v4 lookup addr\n");
530 #endif /* SCTP_DEBUG */
531 return (NULL);
533 sin = (struct sockaddr_in *)&remote_store;
534 sin->sin_family = AF_INET;
535 sin->sin_len = sizeof(*sin);
536 sin->sin_port = sh->src_port;
537 memcpy(&sin->sin_addr, &p4->addr, sizeof(struct in_addr));
538 } else {
539 /* invalid address param type */
540 return NULL;
543 stcb = sctp_findassociation_ep_addr(inp_p,
544 (struct sockaddr *)&remote_store, netp,
545 (struct sockaddr *)&local_store, NULL);
546 return (stcb);
549 struct sctp_tcb *
550 sctp_findassociation_ep_addr(struct sctp_inpcb **inp_p, struct sockaddr *remote,
551 struct sctp_nets **netp, struct sockaddr *local, struct sctp_tcb *locked_tcb)
553 struct sctpasochead *head;
554 struct sctp_inpcb *inp;
555 struct sctp_tcb *stcb;
556 struct sctp_nets *net;
557 uint16_t rport;
559 inp = *inp_p;
560 if (remote->sa_family == AF_INET) {
561 rport = (((struct sockaddr_in *)remote)->sin_port);
562 } else if (remote->sa_family == AF_INET6) {
563 rport = (((struct sockaddr_in6 *)remote)->sin6_port);
564 } else {
565 return (NULL);
567 if (locked_tcb) {
568 /* UN-lock so we can do proper locking here
569 * this occurs when called from load_addresses_from_init.
571 SCTP_TCB_UNLOCK(locked_tcb);
573 SCTP_INP_INFO_RLOCK();
574 if (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
576 * Now either this guy is our listner or it's the connector.
577 * If it is the one that issued the connect, then it's only
578 * chance is to be the first TCB in the list. If it is the
579 * acceptor, then do the special_lookup to hash and find the
580 * real inp.
582 if (inp->sctp_flags & SCTP_PCB_FLAGS_ACCEPTING) {
583 /* to is peer addr, from is my addr */
584 stcb = sctp_tcb_special_locate(inp_p, remote, local,
585 netp);
586 if ((stcb != NULL) && (locked_tcb == NULL)){
587 /* we have a locked tcb, lower refcount */
588 SCTP_INP_WLOCK(inp);
589 SCTP_INP_DECR_REF(inp);
590 SCTP_INP_WUNLOCK(inp);
592 if (locked_tcb != NULL) {
593 SCTP_INP_RLOCK(locked_tcb->sctp_ep);
594 SCTP_TCB_LOCK(locked_tcb);
595 SCTP_INP_RUNLOCK(locked_tcb->sctp_ep);
596 if (stcb != NULL)
597 SCTP_TCB_UNLOCK(stcb);
599 SCTP_INP_INFO_RUNLOCK();
600 return (stcb);
601 } else {
602 SCTP_INP_WLOCK(inp);
603 stcb = LIST_FIRST(&inp->sctp_asoc_list);
604 if (stcb == NULL) {
605 goto null_return;
607 SCTP_TCB_LOCK(stcb);
608 if (stcb->rport != rport) {
609 /* remote port does not match. */
610 SCTP_TCB_UNLOCK(stcb);
611 goto null_return;
613 /* now look at the list of remote addresses */
614 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
615 if (net->ro._l_addr.sa.sa_family !=
616 remote->sa_family) {
617 /* not the same family */
618 continue;
620 if (remote->sa_family == AF_INET) {
621 struct sockaddr_in *sin, *rsin;
622 sin = (struct sockaddr_in *)
623 &net->ro._l_addr;
624 rsin = (struct sockaddr_in *)remote;
625 if (sin->sin_addr.s_addr ==
626 rsin->sin_addr.s_addr) {
627 /* found it */
628 if (netp != NULL) {
629 *netp = net;
631 if (locked_tcb == NULL) {
632 SCTP_INP_DECR_REF(inp);
634 SCTP_INP_WUNLOCK(inp);
635 SCTP_INP_INFO_RUNLOCK();
636 return (stcb);
638 } else if (remote->sa_family == AF_INET6) {
639 struct sockaddr_in6 *sin6, *rsin6;
640 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
641 rsin6 = (struct sockaddr_in6 *)remote;
642 if (SCTP6_ARE_ADDR_EQUAL(&sin6->sin6_addr,
643 &rsin6->sin6_addr)) {
644 /* found it */
645 if (netp != NULL) {
646 *netp = net;
648 if (locked_tcb == NULL) {
649 SCTP_INP_DECR_REF(inp);
651 SCTP_INP_WUNLOCK(inp);
652 SCTP_INP_INFO_RUNLOCK();
653 return (stcb);
657 SCTP_TCB_UNLOCK(stcb);
659 } else {
660 SCTP_INP_WLOCK(inp);
661 head = &inp->sctp_tcbhash[SCTP_PCBHASH_ALLADDR(rport,
662 inp->sctp_hashmark)];
663 if (head == NULL) {
664 goto null_return;
666 LIST_FOREACH(stcb, head, sctp_tcbhash) {
667 if (stcb->rport != rport) {
668 /* remote port does not match */
669 continue;
671 /* now look at the list of remote addresses */
672 SCTP_TCB_LOCK(stcb);
673 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
674 if (net->ro._l_addr.sa.sa_family !=
675 remote->sa_family) {
676 /* not the same family */
677 continue;
679 if (remote->sa_family == AF_INET) {
680 struct sockaddr_in *sin, *rsin;
681 sin = (struct sockaddr_in *)
682 &net->ro._l_addr;
683 rsin = (struct sockaddr_in *)remote;
684 if (sin->sin_addr.s_addr ==
685 rsin->sin_addr.s_addr) {
686 /* found it */
687 if (netp != NULL) {
688 *netp = net;
690 if (locked_tcb == NULL) {
691 SCTP_INP_DECR_REF(inp);
693 SCTP_INP_WUNLOCK(inp);
694 SCTP_INP_INFO_RUNLOCK();
695 return (stcb);
697 } else if (remote->sa_family == AF_INET6) {
698 struct sockaddr_in6 *sin6, *rsin6;
699 sin6 = (struct sockaddr_in6 *)
700 &net->ro._l_addr;
701 rsin6 = (struct sockaddr_in6 *)remote;
702 if (SCTP6_ARE_ADDR_EQUAL(&sin6->sin6_addr,
703 &rsin6->sin6_addr)) {
704 /* found it */
705 if (netp != NULL) {
706 *netp = net;
708 if (locked_tcb == NULL) {
709 SCTP_INP_DECR_REF(inp);
711 SCTP_INP_WUNLOCK(inp);
712 SCTP_INP_INFO_RUNLOCK();
713 return (stcb);
717 SCTP_TCB_UNLOCK(stcb);
720 null_return:
721 /* clean up for returning null */
722 if (locked_tcb){
723 if (locked_tcb->sctp_ep != inp) {
724 SCTP_INP_RLOCK(locked_tcb->sctp_ep);
725 SCTP_TCB_LOCK(locked_tcb);
726 SCTP_INP_RUNLOCK(locked_tcb->sctp_ep);
727 } else
728 SCTP_TCB_LOCK(locked_tcb);
730 SCTP_INP_WUNLOCK(inp);
731 SCTP_INP_INFO_RUNLOCK();
732 /* not found */
733 return (NULL);
737 * Find an association for a specific endpoint using the association id
738 * given out in the COMM_UP notification
740 struct sctp_tcb *
741 sctp_findassociation_ep_asocid(struct sctp_inpcb *inp, caddr_t asoc_id)
744 * Use my the assoc_id to find a endpoint
746 struct sctpasochead *head;
747 struct sctp_tcb *stcb;
748 u_int32_t vtag;
750 if (asoc_id == 0 || inp == NULL) {
751 return (NULL);
753 SCTP_INP_INFO_RLOCK();
754 vtag = (u_int32_t)asoc_id;
755 head = &sctppcbinfo.sctp_asochash[SCTP_PCBHASH_ASOC(vtag,
756 sctppcbinfo.hashasocmark)];
757 if (head == NULL) {
758 /* invalid vtag */
759 SCTP_INP_INFO_RUNLOCK();
760 return (NULL);
762 LIST_FOREACH(stcb, head, sctp_asocs) {
763 SCTP_INP_RLOCK(stcb->sctp_ep);
764 SCTP_TCB_LOCK(stcb);
765 SCTP_INP_RUNLOCK(stcb->sctp_ep);
766 if (stcb->asoc.my_vtag == vtag) {
767 /* candidate */
768 if (inp != stcb->sctp_ep) {
769 /* some other guy has the
770 * same vtag active (vtag collision).
772 sctp_pegs[SCTP_VTAG_BOGUS]++;
773 SCTP_TCB_UNLOCK(stcb);
774 continue;
776 sctp_pegs[SCTP_VTAG_EXPR]++;
777 SCTP_INP_INFO_RUNLOCK();
778 return (stcb);
780 SCTP_TCB_UNLOCK(stcb);
782 SCTP_INP_INFO_RUNLOCK();
783 return (NULL);
786 static struct sctp_inpcb *
787 sctp_endpoint_probe(struct sockaddr *nam, struct sctppcbhead *head,
788 uint16_t lport)
790 struct sctp_inpcb *inp;
791 struct sockaddr_in *sin;
792 struct sockaddr_in6 *sin6;
793 struct sctp_laddr *laddr;
795 /* Endpoing probe expects
796 * that the INP_INFO is locked.
798 if (nam->sa_family == AF_INET) {
799 sin = (struct sockaddr_in *)nam;
800 sin6 = NULL;
801 } else if (nam->sa_family == AF_INET6) {
802 sin6 = (struct sockaddr_in6 *)nam;
803 sin = NULL;
804 } else {
805 /* unsupported family */
806 return (NULL);
808 if (head == NULL)
809 return (NULL);
811 LIST_FOREACH(inp, head, sctp_hash) {
812 SCTP_INP_RLOCK(inp);
814 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) &&
815 (inp->sctp_lport == lport)) {
816 /* got it */
817 if ((nam->sa_family == AF_INET) &&
818 (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
819 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
820 (((struct inpcb *)inp)->inp_flags & IN6P_IPV6_V6ONLY)
821 #else
822 #if defined(__OpenBSD__)
823 (0) /* For open bsd we do dual bind only */
824 #else
825 (((struct in6pcb *)inp)->in6p_flags & IN6P_IPV6_V6ONLY)
826 #endif
827 #endif
829 /* IPv4 on a IPv6 socket with ONLY IPv6 set */
830 SCTP_INP_RUNLOCK(inp);
831 continue;
833 /* A V6 address and the endpoint is NOT bound V6 */
834 if (nam->sa_family == AF_INET6 &&
835 (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
836 SCTP_INP_RUNLOCK(inp);
837 continue;
839 SCTP_INP_RUNLOCK(inp);
840 return (inp);
842 SCTP_INP_RUNLOCK(inp);
845 if ((nam->sa_family == AF_INET) &&
846 (sin->sin_addr.s_addr == INADDR_ANY)) {
847 /* Can't hunt for one that has no address specified */
848 return (NULL);
849 } else if ((nam->sa_family == AF_INET6) &&
850 (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr))) {
851 /* Can't hunt for one that has no address specified */
852 return (NULL);
855 * ok, not bound to all so see if we can find a EP bound to this
856 * address.
858 #ifdef SCTP_DEBUG
859 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
860 kprintf("Ok, there is NO bound-all available for port:%x\n", ntohs(lport));
862 #endif
863 LIST_FOREACH(inp, head, sctp_hash) {
864 SCTP_INP_RLOCK(inp);
865 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL)) {
866 SCTP_INP_RUNLOCK(inp);
867 continue;
870 * Ok this could be a likely candidate, look at all of
871 * its addresses
873 if (inp->sctp_lport != lport) {
874 SCTP_INP_RUNLOCK(inp);
875 continue;
877 #ifdef SCTP_DEBUG
878 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
879 kprintf("Ok, found matching local port\n");
881 #endif
882 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
883 if (laddr->ifa == NULL) {
884 #ifdef SCTP_DEBUG
885 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
886 kprintf("An ounce of prevention is worth a pound of cure\n");
888 #endif
889 continue;
891 #ifdef SCTP_DEBUG
892 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
893 kprintf("Ok laddr->ifa:%p is possible, ",
894 laddr->ifa);
896 #endif
897 if (laddr->ifa->ifa_addr == NULL) {
898 #ifdef SCTP_DEBUG
899 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
900 kprintf("Huh IFA as an ifa_addr=NULL, ");
902 #endif
903 continue;
905 #ifdef SCTP_DEBUG
906 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
907 kprintf("Ok laddr->ifa:%p is possible, ",
908 laddr->ifa->ifa_addr);
909 sctp_print_address(laddr->ifa->ifa_addr);
910 kprintf("looking for ");
911 sctp_print_address(nam);
913 #endif
914 if (laddr->ifa->ifa_addr->sa_family == nam->sa_family) {
915 /* possible, see if it matches */
916 struct sockaddr_in *intf_addr;
917 intf_addr = (struct sockaddr_in *)
918 laddr->ifa->ifa_addr;
919 if (nam->sa_family == AF_INET) {
920 if (sin->sin_addr.s_addr ==
921 intf_addr->sin_addr.s_addr) {
922 #ifdef SCTP_DEBUG
923 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
924 kprintf("YES, return ep:%p\n", inp);
926 #endif
927 SCTP_INP_RUNLOCK(inp);
928 return (inp);
930 } else if (nam->sa_family == AF_INET6) {
931 struct sockaddr_in6 *intf_addr6;
932 intf_addr6 = (struct sockaddr_in6 *)
933 laddr->ifa->ifa_addr;
934 if (SCTP6_ARE_ADDR_EQUAL(&sin6->sin6_addr,
935 &intf_addr6->sin6_addr)) {
936 #ifdef SCTP_DEBUG
937 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
938 kprintf("YES, return ep:%p\n", inp);
940 #endif
941 SCTP_INP_RUNLOCK(inp);
942 return (inp);
946 SCTP_INP_RUNLOCK(inp);
949 #ifdef SCTP_DEBUG
950 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
951 kprintf("NO, Falls out to NULL\n");
953 #endif
954 return (NULL);
958 struct sctp_inpcb *
959 sctp_pcb_findep(struct sockaddr *nam, int find_tcp_pool, int have_lock)
962 * First we check the hash table to see if someone has this port
963 * bound with just the port.
965 struct sctp_inpcb *inp;
966 struct sctppcbhead *head;
967 struct sockaddr_in *sin;
968 struct sockaddr_in6 *sin6;
969 int lport;
970 #ifdef SCTP_DEBUG
971 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
972 kprintf("Looking for endpoint %d :",
973 ntohs(((struct sockaddr_in *)nam)->sin_port));
974 sctp_print_address(nam);
976 #endif
977 if (nam->sa_family == AF_INET) {
978 sin = (struct sockaddr_in *)nam;
979 lport = ((struct sockaddr_in *)nam)->sin_port;
980 } else if (nam->sa_family == AF_INET6) {
981 sin6 = (struct sockaddr_in6 *)nam;
982 lport = ((struct sockaddr_in6 *)nam)->sin6_port;
983 } else {
984 /* unsupported family */
985 return (NULL);
988 * I could cheat here and just cast to one of the types but we will
989 * do it right. It also provides the check against an Unsupported
990 * type too.
992 /* Find the head of the ALLADDR chain */
993 if (have_lock == 0)
994 SCTP_INP_INFO_RLOCK();
995 head = &sctppcbinfo.sctp_ephash[SCTP_PCBHASH_ALLADDR(lport,
996 sctppcbinfo.hashmark)];
997 #ifdef SCTP_DEBUG
998 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
999 kprintf("Main hash to lookup at head:%p\n", head);
1001 #endif
1002 inp = sctp_endpoint_probe(nam, head, lport);
1005 * If the TCP model exists it could be that the main listening
1006 * endpoint is gone but there exists a connected socket for this
1007 * guy yet. If so we can return the first one that we find. This
1008 * may NOT be the correct one but the sctp_findassociation_ep_addr
1009 * has further code to look at all TCP models.
1011 if (inp == NULL && find_tcp_pool) {
1012 unsigned int i;
1013 #ifdef SCTP_DEBUG
1014 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
1015 kprintf("EP was NULL and TCP model is supported\n");
1017 #endif
1018 for (i = 0; i < sctppcbinfo.hashtblsize; i++) {
1020 * This is real gross, but we do NOT have a remote
1021 * port at this point depending on who is calling. We
1022 * must therefore look for ANY one that matches our
1023 * local port :/
1025 head = &sctppcbinfo.sctp_tcpephash[i];
1026 if (LIST_FIRST(head)) {
1027 inp = sctp_endpoint_probe(nam, head, lport);
1028 if (inp) {
1029 /* Found one */
1030 break;
1035 #ifdef SCTP_DEBUG
1036 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
1037 kprintf("EP to return is %p\n", inp);
1039 #endif
1040 if (have_lock == 0) {
1041 if (inp) {
1042 SCTP_INP_WLOCK(inp);
1043 SCTP_INP_INCR_REF(inp);
1044 SCTP_INP_WUNLOCK(inp);
1046 SCTP_INP_INFO_RUNLOCK();
1047 } else {
1048 if (inp) {
1049 SCTP_INP_WLOCK(inp);
1050 SCTP_INP_INCR_REF(inp);
1051 SCTP_INP_WUNLOCK(inp);
1054 return (inp);
1058 * Find an association for an endpoint with the pointer to whom you want
1059 * to send to and the endpoint pointer. The address can be IPv4 or IPv6.
1060 * We may need to change the *to to some other struct like a mbuf...
1062 struct sctp_tcb *
1063 sctp_findassociation_addr_sa(struct sockaddr *to, struct sockaddr *from,
1064 struct sctp_inpcb **inp_p, struct sctp_nets **netp, int find_tcp_pool)
1066 struct sctp_inpcb *inp;
1067 struct sctp_tcb *retval;
1069 SCTP_INP_INFO_RLOCK();
1070 if (find_tcp_pool) {
1071 if (inp_p != NULL) {
1072 retval = sctp_tcb_special_locate(inp_p, from, to, netp);
1073 } else {
1074 retval = sctp_tcb_special_locate(&inp, from, to, netp);
1076 if (retval != NULL) {
1077 SCTP_INP_INFO_RUNLOCK();
1078 return (retval);
1081 inp = sctp_pcb_findep(to, 0, 1);
1082 if (inp_p != NULL) {
1083 *inp_p = inp;
1085 SCTP_INP_INFO_RUNLOCK();
1087 if (inp == NULL) {
1088 return (NULL);
1092 * ok, we have an endpoint, now lets find the assoc for it (if any)
1093 * we now place the source address or from in the to of the find
1094 * endpoint call. Since in reality this chain is used from the
1095 * inbound packet side.
1097 if (inp_p != NULL) {
1098 return (sctp_findassociation_ep_addr(inp_p, from, netp, to, NULL));
1099 } else {
1100 return (sctp_findassociation_ep_addr(&inp, from, netp, to, NULL));
1106 * This routine will grub through the mbuf that is a INIT or INIT-ACK and
1107 * find all addresses that the sender has specified in any address list.
1108 * Each address will be used to lookup the TCB and see if one exits.
1110 static struct sctp_tcb *
1111 sctp_findassociation_special_addr(struct mbuf *m, int iphlen, int offset,
1112 struct sctphdr *sh, struct sctp_inpcb **inp_p, struct sctp_nets **netp,
1113 struct sockaddr *dest)
1115 struct sockaddr_in sin4;
1116 struct sockaddr_in6 sin6;
1117 struct sctp_paramhdr *phdr, parm_buf;
1118 struct sctp_tcb *retval;
1119 u_int32_t ptype, plen;
1121 memset(&sin4, 0, sizeof(sin4));
1122 memset(&sin6, 0, sizeof(sin6));
1123 sin4.sin_len = sizeof(sin4);
1124 sin4.sin_family = AF_INET;
1125 sin4.sin_port = sh->src_port;
1126 sin6.sin6_len = sizeof(sin6);
1127 sin6.sin6_family = AF_INET6;
1128 sin6.sin6_port = sh->src_port;
1130 retval = NULL;
1131 offset += sizeof(struct sctp_init_chunk);
1133 phdr = sctp_get_next_param(m, offset, &parm_buf, sizeof(parm_buf));
1134 while (phdr != NULL) {
1135 /* now we must see if we want the parameter */
1136 ptype = ntohs(phdr->param_type);
1137 plen = ntohs(phdr->param_length);
1138 if (plen == 0) {
1139 #ifdef SCTP_DEBUG
1140 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
1141 kprintf("sctp_findassociation_special_addr: Impossible length in parameter\n");
1143 #endif /* SCTP_DEBUG */
1144 break;
1146 if (ptype == SCTP_IPV4_ADDRESS &&
1147 plen == sizeof(struct sctp_ipv4addr_param)) {
1148 /* Get the rest of the address */
1149 struct sctp_ipv4addr_param ip4_parm, *p4;
1151 phdr = sctp_get_next_param(m, offset,
1152 (struct sctp_paramhdr *)&ip4_parm, plen);
1153 if (phdr == NULL) {
1154 return (NULL);
1156 p4 = (struct sctp_ipv4addr_param *)phdr;
1157 memcpy(&sin4.sin_addr, &p4->addr, sizeof(p4->addr));
1158 /* look it up */
1159 retval = sctp_findassociation_ep_addr(inp_p,
1160 (struct sockaddr *)&sin4, netp, dest, NULL);
1161 if (retval != NULL) {
1162 return (retval);
1164 } else if (ptype == SCTP_IPV6_ADDRESS &&
1165 plen == sizeof(struct sctp_ipv6addr_param)) {
1166 /* Get the rest of the address */
1167 struct sctp_ipv6addr_param ip6_parm, *p6;
1169 phdr = sctp_get_next_param(m, offset,
1170 (struct sctp_paramhdr *)&ip6_parm, plen);
1171 if (phdr == NULL) {
1172 return (NULL);
1174 p6 = (struct sctp_ipv6addr_param *)phdr;
1175 memcpy(&sin6.sin6_addr, &p6->addr, sizeof(p6->addr));
1176 /* look it up */
1177 retval = sctp_findassociation_ep_addr(inp_p,
1178 (struct sockaddr *)&sin6, netp, dest, NULL);
1179 if (retval != NULL) {
1180 return (retval);
1183 offset += SCTP_SIZE32(plen);
1184 phdr = sctp_get_next_param(m, offset, &parm_buf,
1185 sizeof(parm_buf));
1187 return (NULL);
1190 static struct sctp_tcb *
1191 sctp_findassoc_by_vtag(struct sockaddr *from, uint32_t vtag,
1192 struct sctp_inpcb **inp_p, struct sctp_nets **netp, uint16_t rport,
1193 uint16_t lport)
1196 * Use my vtag to hash. If we find it we then verify the source addr
1197 * is in the assoc. If all goes well we save a bit on rec of a packet.
1199 struct sctpasochead *head;
1200 struct sctp_nets *net;
1201 struct sctp_tcb *stcb;
1203 SCTP_INP_INFO_RLOCK();
1204 head = &sctppcbinfo.sctp_asochash[SCTP_PCBHASH_ASOC(vtag,
1205 sctppcbinfo.hashasocmark)];
1206 if (head == NULL) {
1207 /* invalid vtag */
1208 SCTP_INP_INFO_RUNLOCK();
1209 return (NULL);
1211 LIST_FOREACH(stcb, head, sctp_asocs) {
1212 SCTP_INP_RLOCK(stcb->sctp_ep);
1213 SCTP_TCB_LOCK(stcb);
1214 SCTP_INP_RUNLOCK(stcb->sctp_ep);
1215 if (stcb->asoc.my_vtag == vtag) {
1216 /* candidate */
1217 if (stcb->rport != rport) {
1219 * we could remove this if vtags are unique
1220 * across the system.
1222 SCTP_TCB_UNLOCK(stcb);
1223 continue;
1225 if (stcb->sctp_ep->sctp_lport != lport) {
1227 * we could remove this if vtags are unique
1228 * across the system.
1230 SCTP_TCB_UNLOCK(stcb);
1231 continue;
1233 net = sctp_findnet(stcb, from);
1234 if (net) {
1235 /* yep its him. */
1236 *netp = net;
1237 sctp_pegs[SCTP_VTAG_EXPR]++;
1238 *inp_p = stcb->sctp_ep;
1239 SCTP_INP_INFO_RUNLOCK();
1240 return (stcb);
1241 } else {
1242 /* not him, this should only
1243 * happen in rare cases so
1244 * I peg it.
1246 sctp_pegs[SCTP_VTAG_BOGUS]++;
1249 SCTP_TCB_UNLOCK(stcb);
1251 SCTP_INP_INFO_RUNLOCK();
1252 return (NULL);
1256 * Find an association with the pointer to the inbound IP packet. This
1257 * can be a IPv4 or IPv6 packet.
1259 struct sctp_tcb *
1260 sctp_findassociation_addr(struct mbuf *m, int iphlen, int offset,
1261 struct sctphdr *sh, struct sctp_chunkhdr *ch,
1262 struct sctp_inpcb **inp_p, struct sctp_nets **netp)
1264 int find_tcp_pool;
1265 struct ip *iph;
1266 struct sctp_tcb *retval;
1267 struct sockaddr_storage to_store, from_store;
1268 struct sockaddr *to = (struct sockaddr *)&to_store;
1269 struct sockaddr *from = (struct sockaddr *)&from_store;
1270 struct sctp_inpcb *inp;
1273 iph = mtod(m, struct ip *);
1274 if (iph->ip_v == IPVERSION) {
1275 /* its IPv4 */
1276 struct sockaddr_in *to4, *from4;
1278 to4 = (struct sockaddr_in *)&to_store;
1279 from4 = (struct sockaddr_in *)&from_store;
1280 bzero(to4, sizeof(*to4));
1281 bzero(from4, sizeof(*from4));
1282 from4->sin_family = to4->sin_family = AF_INET;
1283 from4->sin_len = to4->sin_len = sizeof(struct sockaddr_in);
1284 from4->sin_addr.s_addr = iph->ip_src.s_addr;
1285 to4->sin_addr.s_addr = iph->ip_dst.s_addr ;
1286 from4->sin_port = sh->src_port;
1287 to4->sin_port = sh->dest_port;
1288 } else if (iph->ip_v == (IPV6_VERSION >> 4)) {
1289 /* its IPv6 */
1290 struct ip6_hdr *ip6;
1291 struct sockaddr_in6 *to6, *from6;
1293 ip6 = mtod(m, struct ip6_hdr *);
1294 to6 = (struct sockaddr_in6 *)&to_store;
1295 from6 = (struct sockaddr_in6 *)&from_store;
1296 bzero(to6, sizeof(*to6));
1297 bzero(from6, sizeof(*from6));
1298 from6->sin6_family = to6->sin6_family = AF_INET6;
1299 from6->sin6_len = to6->sin6_len = sizeof(struct sockaddr_in6);
1300 to6->sin6_addr = ip6->ip6_dst;
1301 from6->sin6_addr = ip6->ip6_src;
1302 from6->sin6_port = sh->src_port;
1303 to6->sin6_port = sh->dest_port;
1304 /* Get the scopes in properly to the sin6 addr's */
1305 in6_recoverscope(to6, &to6->sin6_addr, NULL);
1306 #if defined(SCTP_BASE_FREEBSD) || defined(__APPLE__) || defined(__DragonFly__)
1307 in6_embedscope(&to6->sin6_addr, to6, NULL, NULL);
1308 #else
1309 in6_embedscope(&to6->sin6_addr, to6);
1310 #endif
1312 in6_recoverscope(from6, &from6->sin6_addr, NULL);
1313 #if defined(SCTP_BASE_FREEBSD) || defined(__APPLE__) || defined(__DragonFly__)
1314 in6_embedscope(&from6->sin6_addr, from6, NULL, NULL);
1315 #else
1316 in6_embedscope(&from6->sin6_addr, from6);
1317 #endif
1318 } else {
1319 /* Currently not supported. */
1320 return (NULL);
1322 #ifdef SCTP_DEBUG
1323 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
1324 kprintf("Looking for port %d address :",
1325 ntohs(((struct sockaddr_in *)to)->sin_port));
1326 sctp_print_address(to);
1327 kprintf("From for port %d address :",
1328 ntohs(((struct sockaddr_in *)from)->sin_port));
1329 sctp_print_address(from);
1331 #endif
1333 if (sh->v_tag) {
1334 /* we only go down this path if vtag is non-zero */
1335 retval = sctp_findassoc_by_vtag(from, ntohl(sh->v_tag),
1336 inp_p, netp, sh->src_port, sh->dest_port);
1337 if (retval) {
1338 return (retval);
1341 find_tcp_pool = 0;
1342 if ((ch->chunk_type != SCTP_INITIATION) &&
1343 (ch->chunk_type != SCTP_INITIATION_ACK) &&
1344 (ch->chunk_type != SCTP_COOKIE_ACK) &&
1345 (ch->chunk_type != SCTP_COOKIE_ECHO)) {
1346 /* Other chunk types go to the tcp pool. */
1347 find_tcp_pool = 1;
1349 if (inp_p) {
1350 retval = sctp_findassociation_addr_sa(to, from, inp_p, netp,
1351 find_tcp_pool);
1352 inp = *inp_p;
1353 } else {
1354 retval = sctp_findassociation_addr_sa(to, from, &inp, netp,
1355 find_tcp_pool);
1357 #ifdef SCTP_DEBUG
1358 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
1359 kprintf("retval:%p inp:%p\n", retval, inp);
1361 #endif
1362 if (retval == NULL && inp) {
1363 /* Found a EP but not this address */
1364 #ifdef SCTP_DEBUG
1365 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
1366 kprintf("Found endpoint %p but no asoc - ep state:%x\n",
1367 inp, inp->sctp_flags);
1369 #endif
1370 if ((ch->chunk_type == SCTP_INITIATION) ||
1371 (ch->chunk_type == SCTP_INITIATION_ACK)) {
1373 * special hook, we do NOT return linp or an
1374 * association that is linked to an existing
1375 * association that is under the TCP pool (i.e. no
1376 * listener exists). The endpoint finding routine
1377 * will always find a listner before examining the
1378 * TCP pool.
1380 if (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) {
1381 #ifdef SCTP_DEBUG
1382 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
1383 kprintf("Gak, its in the TCP pool... return NULL");
1385 #endif
1386 if (inp_p) {
1387 *inp_p = NULL;
1389 return (NULL);
1391 #ifdef SCTP_DEBUG
1392 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
1393 kprintf("Now doing SPECIAL find\n");
1395 #endif
1396 retval = sctp_findassociation_special_addr(m, iphlen,
1397 offset, sh, inp_p, netp, to);
1400 #ifdef SCTP_DEBUG
1401 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
1402 kprintf("retval is %p\n", retval);
1404 #endif
1405 return (retval);
1408 extern int sctp_max_burst_default;
1410 extern unsigned int sctp_delayed_sack_time_default;
1411 extern unsigned int sctp_heartbeat_interval_default;
1412 extern unsigned int sctp_pmtu_raise_time_default;
1413 extern unsigned int sctp_shutdown_guard_time_default;
1414 extern unsigned int sctp_secret_lifetime_default;
1416 extern unsigned int sctp_rto_max_default;
1417 extern unsigned int sctp_rto_min_default;
1418 extern unsigned int sctp_rto_initial_default;
1419 extern unsigned int sctp_init_rto_max_default;
1420 extern unsigned int sctp_valid_cookie_life_default;
1421 extern unsigned int sctp_init_rtx_max_default;
1422 extern unsigned int sctp_assoc_rtx_max_default;
1423 extern unsigned int sctp_path_rtx_max_default;
1424 extern unsigned int sctp_nr_outgoing_streams_default;
1427 * allocate a sctp_inpcb and setup a temporary binding to a port/all
1428 * addresses. This way if we don't get a bind we by default pick a ephemeral
1429 * port with all addresses bound.
1432 sctp_inpcb_alloc(struct socket *so)
1435 * we get called when a new endpoint starts up. We need to allocate
1436 * the sctp_inpcb structure from the zone and init it. Mark it as
1437 * unbound and find a port that we can use as an ephemeral with
1438 * INADDR_ANY. If the user binds later no problem we can then add
1439 * in the specific addresses. And setup the default parameters for
1440 * the EP.
1442 int i, error;
1443 struct sctp_inpcb *inp, *n_inp;
1444 struct sctp_pcb *m;
1445 struct timeval time;
1447 error = 0;
1449 /* Hack alert:
1451 * This code audits the entire INP list to see if
1452 * any ep's that are in the GONE state are now
1453 * all free. This should not happen really since when
1454 * the last association if freed we should end up deleting
1455 * the inpcb. This code including the locks should
1456 * be taken out ... since the last set of fixes I
1457 * have not seen the "Found a GONE on list" has not
1458 * came out. But i am paranoid and we will leave this
1459 * in at the cost of efficency on allocation of PCB's.
1460 * Probably we should move this to the invariant
1461 * compile options
1463 /* #ifdef INVARIANTS*/
1464 SCTP_INP_INFO_RLOCK();
1465 inp = LIST_FIRST(&sctppcbinfo.listhead);
1466 while (inp) {
1467 n_inp = LIST_NEXT(inp, sctp_list);
1468 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
1469 if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
1470 /* finish the job now */
1471 kprintf("Found a GONE on list\n");
1472 SCTP_INP_INFO_RUNLOCK();
1473 sctp_inpcb_free(inp, 1);
1474 SCTP_INP_INFO_RLOCK();
1477 inp = n_inp;
1479 SCTP_INP_INFO_RUNLOCK();
1480 /* #endif INVARIANTS*/
1482 SCTP_INP_INFO_WLOCK();
1483 inp = (struct sctp_inpcb *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_ep);
1484 if (inp == NULL) {
1485 kprintf("Out of SCTP-INPCB structures - no resources\n");
1486 SCTP_INP_INFO_WUNLOCK();
1487 return (ENOBUFS);
1490 /* zap it */
1491 bzero(inp, sizeof(*inp));
1493 /* bump generations */
1494 inp->ip_inp.inp.inp_socket = so;
1496 /* setup socket pointers */
1497 inp->sctp_socket = so;
1499 /* setup inpcb socket too */
1500 inp->ip_inp.inp.inp_socket = so;
1501 inp->sctp_frag_point = SCTP_DEFAULT_MAXSEGMENT;
1502 #ifdef IPSEC
1503 #if !(defined(__OpenBSD__) || defined(__APPLE__))
1505 struct inpcbpolicy *pcb_sp = NULL;
1506 error = ipsec_init_policy(so, &pcb_sp);
1507 /* Arrange to share the policy */
1508 inp->ip_inp.inp.inp_sp = pcb_sp;
1509 ((struct in6pcb *)(&inp->ip_inp.inp))->in6p_sp = pcb_sp;
1511 #else
1512 /* not sure what to do for openbsd here */
1513 error = 0;
1514 #endif
1515 if (error != 0) {
1516 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_ep, inp);
1517 SCTP_INP_INFO_WUNLOCK();
1518 return error;
1520 #endif /* IPSEC */
1521 sctppcbinfo.ipi_count_ep++;
1522 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
1523 inp->ip_inp.inp.inp_gencnt = ++sctppcbinfo.ipi_gencnt_ep;
1524 inp->ip_inp.inp.inp_ip_ttl = ip_defttl;
1525 #else
1526 inp->inp_ip_ttl = ip_defttl;
1527 inp->inp_ip_tos = 0;
1528 #endif
1530 so->so_pcb = (caddr_t)inp;
1532 if ((so->so_type == SOCK_DGRAM) ||
1533 (so->so_type == SOCK_SEQPACKET)) {
1534 /* UDP style socket */
1535 inp->sctp_flags = (SCTP_PCB_FLAGS_UDPTYPE |
1536 SCTP_PCB_FLAGS_UNBOUND);
1537 inp->sctp_flags |= (SCTP_PCB_FLAGS_RECVDATAIOEVNT);
1538 } else if (so->so_type == SOCK_STREAM) {
1539 /* TCP style socket */
1540 inp->sctp_flags = (SCTP_PCB_FLAGS_TCPTYPE |
1541 SCTP_PCB_FLAGS_UNBOUND);
1542 inp->sctp_flags |= (SCTP_PCB_FLAGS_RECVDATAIOEVNT);
1543 } else {
1545 * unsupported socket type (RAW, etc)- in case we missed
1546 * it in protosw
1548 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_ep, inp);
1549 SCTP_INP_INFO_WUNLOCK();
1550 return (EOPNOTSUPP);
1552 inp->sctp_tcbhash = hashinit(sctp_pcbtblsize,
1553 #ifdef __NetBSD__
1554 HASH_LIST,
1555 #endif
1556 M_PCB,
1557 #if defined(__NetBSD__) || defined(__OpenBSD__)
1558 M_WAITOK,
1559 #endif
1560 &inp->sctp_hashmark);
1561 if (inp->sctp_tcbhash == NULL) {
1562 kprintf("Out of SCTP-INPCB->hashinit - no resources\n");
1563 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_ep, inp);
1564 SCTP_INP_INFO_WUNLOCK();
1565 return (ENOBUFS);
1567 /* LOCK init's */
1568 SCTP_INP_LOCK_INIT(inp);
1569 SCTP_ASOC_CREATE_LOCK_INIT(inp);
1570 /* lock the new ep */
1571 SCTP_INP_WLOCK(inp);
1573 /* add it to the info area */
1574 LIST_INSERT_HEAD(&sctppcbinfo.listhead, inp, sctp_list);
1575 SCTP_INP_INFO_WUNLOCK();
1577 LIST_INIT(&inp->sctp_addr_list);
1578 LIST_INIT(&inp->sctp_asoc_list);
1579 TAILQ_INIT(&inp->sctp_queue_list);
1580 /* Init the timer structure for signature change */
1581 #if defined (__FreeBSD__) && __FreeBSD_version >= 500000
1582 callout_init(&inp->sctp_ep.signature_change.timer, 0);
1583 #else
1584 callout_init(&inp->sctp_ep.signature_change.timer);
1585 #endif
1586 inp->sctp_ep.signature_change.type = SCTP_TIMER_TYPE_NEWCOOKIE;
1588 /* now init the actual endpoint default data */
1589 m = &inp->sctp_ep;
1591 /* setup the base timeout information */
1592 m->sctp_timeoutticks[SCTP_TIMER_SEND] = SEC_TO_TICKS(SCTP_SEND_SEC); /* needed ? */
1593 m->sctp_timeoutticks[SCTP_TIMER_INIT] = SEC_TO_TICKS(SCTP_INIT_SEC); /* needed ? */
1594 m->sctp_timeoutticks[SCTP_TIMER_RECV] = MSEC_TO_TICKS(sctp_delayed_sack_time_default);
1595 m->sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = sctp_heartbeat_interval_default; /* this is in MSEC */
1596 m->sctp_timeoutticks[SCTP_TIMER_PMTU] = SEC_TO_TICKS(sctp_pmtu_raise_time_default);
1597 m->sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] = SEC_TO_TICKS(sctp_shutdown_guard_time_default);
1598 m->sctp_timeoutticks[SCTP_TIMER_SIGNATURE] = SEC_TO_TICKS(sctp_secret_lifetime_default);
1599 /* all max/min max are in ms */
1600 m->sctp_maxrto = sctp_rto_max_default;
1601 m->sctp_minrto = sctp_rto_min_default;
1602 m->initial_rto = sctp_rto_initial_default;
1603 m->initial_init_rto_max = sctp_init_rto_max_default;
1605 m->max_open_streams_intome = MAX_SCTP_STREAMS;
1607 m->max_init_times = sctp_init_rtx_max_default;
1608 m->max_send_times = sctp_assoc_rtx_max_default;
1609 m->def_net_failure = sctp_path_rtx_max_default;
1610 m->sctp_sws_sender = SCTP_SWS_SENDER_DEF;
1611 m->sctp_sws_receiver = SCTP_SWS_RECEIVER_DEF;
1612 m->max_burst = sctp_max_burst_default;
1613 /* number of streams to pre-open on a association */
1614 m->pre_open_stream_count = sctp_nr_outgoing_streams_default;
1616 /* Add adaption cookie */
1617 m->adaption_layer_indicator = 0x504C5253;
1619 /* seed random number generator */
1620 m->random_counter = 1;
1621 m->store_at = SCTP_SIGNATURE_SIZE;
1622 #if (defined(__FreeBSD__) && (__FreeBSD_version < 500000)) || defined(__DragonFly__)
1623 read_random_unlimited(m->random_numbers, sizeof(m->random_numbers));
1624 #elif defined(__APPLE__) || (__FreeBSD_version > 500000)
1625 read_random(m->random_numbers, sizeof(m->random_numbers));
1626 #elif defined(__OpenBSD__)
1627 get_random_bytes(m->random_numbers, sizeof(m->random_numbers));
1628 #elif defined(__NetBSD__) && NRND > 0
1629 rnd_extract_data(m->random_numbers, sizeof(m->random_numbers),
1630 RND_EXTRACT_ANY);
1631 #else
1633 u_int32_t *ranm, *ranp;
1634 ranp = (u_int32_t *)&m->random_numbers;
1635 ranm = ranp + (SCTP_SIGNATURE_ALOC_SIZE/sizeof(u_int32_t));
1636 if ((u_long)ranp % 4) {
1637 /* not a even boundary? */
1638 ranp = (u_int32_t *)SCTP_SIZE32((u_long)ranp);
1640 while (ranp < ranm) {
1641 *ranp = random();
1642 ranp++;
1645 #endif
1646 sctp_fill_random_store(m);
1648 /* Minimum cookie size */
1649 m->size_of_a_cookie = (sizeof(struct sctp_init_msg) * 2) +
1650 sizeof(struct sctp_state_cookie);
1651 m->size_of_a_cookie += SCTP_SIGNATURE_SIZE;
1653 /* Setup the initial secret */
1654 SCTP_GETTIME_TIMEVAL(&time);
1655 m->time_of_secret_change = time.tv_sec;
1657 for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1658 m->secret_key[0][i] = sctp_select_initial_TSN(m);
1660 sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, NULL, NULL);
1662 /* How long is a cookie good for ? */
1663 m->def_cookie_life = sctp_valid_cookie_life_default;
1664 SCTP_INP_WUNLOCK(inp);
1665 return (error);
1669 void
1670 sctp_move_pcb_and_assoc(struct sctp_inpcb *old_inp, struct sctp_inpcb *new_inp,
1671 struct sctp_tcb *stcb)
1673 uint16_t lport, rport;
1674 struct sctppcbhead *head;
1675 struct sctp_laddr *laddr, *oladdr;
1677 SCTP_TCB_UNLOCK(stcb);
1678 SCTP_INP_INFO_WLOCK();
1679 SCTP_INP_WLOCK(old_inp);
1680 SCTP_INP_WLOCK(new_inp);
1681 SCTP_TCB_LOCK(stcb);
1683 new_inp->sctp_ep.time_of_secret_change =
1684 old_inp->sctp_ep.time_of_secret_change;
1685 memcpy(new_inp->sctp_ep.secret_key, old_inp->sctp_ep.secret_key,
1686 sizeof(old_inp->sctp_ep.secret_key));
1687 new_inp->sctp_ep.current_secret_number =
1688 old_inp->sctp_ep.current_secret_number;
1689 new_inp->sctp_ep.last_secret_number =
1690 old_inp->sctp_ep.last_secret_number;
1691 new_inp->sctp_ep.size_of_a_cookie = old_inp->sctp_ep.size_of_a_cookie;
1693 /* Copy the port across */
1694 lport = new_inp->sctp_lport = old_inp->sctp_lport;
1695 rport = stcb->rport;
1696 /* Pull the tcb from the old association */
1697 LIST_REMOVE(stcb, sctp_tcbhash);
1698 LIST_REMOVE(stcb, sctp_tcblist);
1700 /* Now insert the new_inp into the TCP connected hash */
1701 head = &sctppcbinfo.sctp_tcpephash[SCTP_PCBHASH_ALLADDR((lport + rport),
1702 sctppcbinfo.hashtcpmark)];
1704 LIST_INSERT_HEAD(head, new_inp, sctp_hash);
1706 /* Now move the tcb into the endpoint list */
1707 LIST_INSERT_HEAD(&new_inp->sctp_asoc_list, stcb, sctp_tcblist);
1709 * Question, do we even need to worry about the ep-hash since
1710 * we only have one connection? Probably not :> so lets
1711 * get rid of it and not suck up any kernel memory in that.
1713 SCTP_INP_INFO_WUNLOCK();
1714 stcb->sctp_socket = new_inp->sctp_socket;
1715 stcb->sctp_ep = new_inp;
1716 if (new_inp->sctp_tcbhash != NULL) {
1717 FREE(new_inp->sctp_tcbhash, M_PCB);
1718 new_inp->sctp_tcbhash = NULL;
1720 if ((new_inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) {
1721 /* Subset bound, so copy in the laddr list from the old_inp */
1722 LIST_FOREACH(oladdr, &old_inp->sctp_addr_list, sctp_nxt_addr) {
1723 laddr = (struct sctp_laddr *)SCTP_ZONE_GET(
1724 sctppcbinfo.ipi_zone_laddr);
1725 if (laddr == NULL) {
1727 * Gak, what can we do? This assoc is really
1728 * HOSED. We probably should send an abort
1729 * here.
1731 #ifdef SCTP_DEBUG
1732 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
1733 kprintf("Association hosed in TCP model, out of laddr memory\n");
1735 #endif /* SCTP_DEBUG */
1736 continue;
1738 sctppcbinfo.ipi_count_laddr++;
1739 sctppcbinfo.ipi_gencnt_laddr++;
1740 bzero(laddr, sizeof(*laddr));
1741 laddr->ifa = oladdr->ifa;
1742 LIST_INSERT_HEAD(&new_inp->sctp_addr_list, laddr,
1743 sctp_nxt_addr);
1744 new_inp->laddr_count++;
1747 SCTP_INP_WUNLOCK(new_inp);
1748 SCTP_INP_WUNLOCK(old_inp);
1751 static int
1752 sctp_isport_inuse(struct sctp_inpcb *inp, uint16_t lport)
1754 struct sctppcbhead *head;
1755 struct sctp_inpcb *t_inp;
1757 head = &sctppcbinfo.sctp_ephash[SCTP_PCBHASH_ALLADDR(lport,
1758 sctppcbinfo.hashmark)];
1759 LIST_FOREACH(t_inp, head, sctp_hash) {
1760 if (t_inp->sctp_lport != lport) {
1761 continue;
1763 /* This one is in use. */
1764 /* check the v6/v4 binding issue */
1765 if ((t_inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
1766 #if defined(__FreeBSD__)
1767 (((struct inpcb *)t_inp)->inp_flags & IN6P_IPV6_V6ONLY)
1768 #else
1769 #if defined(__OpenBSD__)
1770 (0) /* For open bsd we do dual bind only */
1771 #else
1772 (((struct in6pcb *)t_inp)->in6p_flags & IN6P_IPV6_V6ONLY)
1773 #endif
1774 #endif
1776 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1777 /* collision in V6 space */
1778 return (1);
1779 } else {
1780 /* inp is BOUND_V4 no conflict */
1781 continue;
1783 } else if (t_inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1784 /* t_inp is bound v4 and v6, conflict always */
1785 return (1);
1786 } else {
1787 /* t_inp is bound only V4 */
1788 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
1789 #if defined(__FreeBSD__)
1790 (((struct inpcb *)inp)->inp_flags & IN6P_IPV6_V6ONLY)
1791 #else
1792 #if defined(__OpenBSD__)
1793 (0) /* For open bsd we do dual bind only */
1794 #else
1795 (((struct in6pcb *)inp)->in6p_flags & IN6P_IPV6_V6ONLY)
1796 #endif
1797 #endif
1799 /* no conflict */
1800 continue;
1802 /* else fall through to conflict */
1804 return (1);
1806 return (0);
1809 #if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__))
1811 * Don't know why, but without this there is an unknown reference when
1812 * compiling NetBSD... hmm
1814 extern void in6_sin6_2_sin (struct sockaddr_in *, struct sockaddr_in6 *sin6);
1815 #endif
1819 #if (defined(__FreeBSD__) && __FreeBSD_version >= 500000) || defined(__DragonFly__)
1820 sctp_inpcb_bind(struct socket *so, struct sockaddr *addr, struct thread *p)
1821 #else
1822 sctp_inpcb_bind(struct socket *so, struct sockaddr *addr, struct proc *p)
1823 #endif
1825 /* bind a ep to a socket address */
1826 struct sctppcbhead *head;
1827 struct sctp_inpcb *inp, *inp_tmp;
1828 struct inpcb *ip_inp;
1829 int bindall;
1830 uint16_t lport;
1831 int error;
1833 lport = 0;
1834 error = 0;
1835 bindall = 1;
1836 inp = (struct sctp_inpcb *)so->so_pcb;
1837 ip_inp = (struct inpcb *)so->so_pcb;
1838 #ifdef SCTP_DEBUG
1839 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
1840 if (addr) {
1841 kprintf("Bind called port:%d\n",
1842 ntohs(((struct sockaddr_in *)addr)->sin_port));
1843 kprintf("Addr :");
1844 sctp_print_address(addr);
1847 #endif /* SCTP_DEBUG */
1848 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) == 0) {
1849 /* already did a bind, subsequent binds NOT allowed ! */
1850 return (EINVAL);
1853 if (addr != NULL) {
1854 if (addr->sa_family == AF_INET) {
1855 struct sockaddr_in *sin;
1857 /* IPV6_V6ONLY socket? */
1858 if (
1859 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
1860 (ip_inp->inp_flags & IN6P_IPV6_V6ONLY)
1861 #else
1862 #if defined(__OpenBSD__)
1863 (0) /* For openbsd we do dual bind only */
1864 #else
1865 (((struct in6pcb *)inp)->in6p_flags & IN6P_IPV6_V6ONLY)
1866 #endif
1867 #endif
1869 return (EINVAL);
1872 if (addr->sa_len != sizeof(*sin))
1873 return (EINVAL);
1875 sin = (struct sockaddr_in *)addr;
1876 lport = sin->sin_port;
1878 if (sin->sin_addr.s_addr != INADDR_ANY) {
1879 bindall = 0;
1881 } else if (addr->sa_family == AF_INET6) {
1882 /* Only for pure IPv6 Address. (No IPv4 Mapped!) */
1883 struct sockaddr_in6 *sin6;
1885 sin6 = (struct sockaddr_in6 *)addr;
1887 if (addr->sa_len != sizeof(*sin6))
1888 return (EINVAL);
1890 lport = sin6->sin6_port;
1891 if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
1892 bindall = 0;
1893 /* KAME hack: embed scopeid */
1894 #if defined(SCTP_BASE_FREEBSD) || defined(__APPLE__) || defined(__DragonFly__)
1895 if (in6_embedscope(&sin6->sin6_addr, sin6,
1896 ip_inp, NULL) != 0)
1897 return (EINVAL);
1898 #elif defined(__FreeBSD__)
1899 error = scope6_check_id(sin6, ip6_use_defzone);
1900 if (error != 0)
1901 return (error);
1902 #else
1903 if (in6_embedscope(&sin6->sin6_addr, sin6) != 0) {
1904 return (EINVAL);
1906 #endif
1908 #ifndef SCOPEDROUTING
1909 /* this must be cleared for ifa_ifwithaddr() */
1910 sin6->sin6_scope_id = 0;
1911 #endif /* SCOPEDROUTING */
1912 } else {
1913 return (EAFNOSUPPORT);
1916 SCTP_INP_INFO_WLOCK();
1917 SCTP_INP_WLOCK(inp);
1918 /* increase our count due to the unlock we do */
1919 SCTP_INP_INCR_REF(inp);
1920 if (lport) {
1922 * Did the caller specify a port? if so we must see if a
1923 * ep already has this one bound.
1925 /* got to be root to get at low ports */
1926 if (ntohs(lport) < IPPORT_RESERVED) {
1927 if (p && (error =
1928 #ifdef __FreeBSD__
1929 #if __FreeBSD_version >= 500000
1930 suser_cred(p->td_ucred, 0)
1931 #else
1932 suser(p)
1933 #endif
1934 #elif defined(__NetBSD__) || defined(__APPLE__)
1935 suser(p->p_ucred, &p->p_acflag)
1936 #elif defined(__DragonFly__)
1937 priv_check(p, PRIV_ROOT)
1938 #else
1939 suser(p, 0)
1940 #endif
1941 )) {
1942 SCTP_INP_DECR_REF(inp);
1943 SCTP_INP_WUNLOCK(inp);
1944 SCTP_INP_INFO_WUNLOCK();
1945 return (error);
1948 if (p == NULL) {
1949 SCTP_INP_DECR_REF(inp);
1950 SCTP_INP_WUNLOCK(inp);
1951 SCTP_INP_INFO_WUNLOCK();
1952 return (error);
1954 SCTP_INP_WUNLOCK(inp);
1955 inp_tmp = sctp_pcb_findep(addr, 0, 1);
1956 if (inp_tmp != NULL) {
1957 /* lock guy returned and lower count
1958 * note that we are not bound so inp_tmp
1959 * should NEVER be inp. And it is this
1960 * inp (inp_tmp) that gets the reference
1961 * bump, so we must lower it.
1963 SCTP_INP_WLOCK(inp_tmp);
1964 SCTP_INP_DECR_REF(inp_tmp);
1965 SCTP_INP_WUNLOCK(inp_tmp);
1967 /* unlock info */
1968 SCTP_INP_INFO_WUNLOCK();
1969 return (EADDRNOTAVAIL);
1971 SCTP_INP_WLOCK(inp);
1972 if (bindall) {
1973 /* verify that no lport is not used by a singleton */
1974 if (sctp_isport_inuse(inp, lport)) {
1975 /* Sorry someone already has this one bound */
1976 SCTP_INP_DECR_REF(inp);
1977 SCTP_INP_WUNLOCK(inp);
1978 SCTP_INP_INFO_WUNLOCK();
1979 return (EADDRNOTAVAIL);
1982 } else {
1984 * get any port but lets make sure no one has any address
1985 * with this port bound
1989 * setup the inp to the top (I could use the union but this
1990 * is just as easy
1992 uint32_t port_guess;
1993 uint16_t port_attempt;
1994 int not_done=1;
1996 while (not_done) {
1997 port_guess = sctp_select_initial_TSN(&inp->sctp_ep);
1998 port_attempt = (port_guess & 0x0000ffff);
1999 if (port_attempt == 0) {
2000 goto next_half;
2002 if (port_attempt < IPPORT_RESERVED) {
2003 port_attempt += IPPORT_RESERVED;
2006 if (sctp_isport_inuse(inp, htons(port_attempt)) == 0) {
2007 /* got a port we can use */
2008 not_done = 0;
2009 continue;
2011 /* try upper half */
2012 next_half:
2013 port_attempt = ((port_guess >> 16) & 0x0000ffff);
2014 if (port_attempt == 0) {
2015 goto last_try;
2017 if (port_attempt < IPPORT_RESERVED) {
2018 port_attempt += IPPORT_RESERVED;
2020 if (sctp_isport_inuse(inp, htons(port_attempt)) == 0) {
2021 /* got a port we can use */
2022 not_done = 0;
2023 continue;
2025 /* try two half's added together */
2026 last_try:
2027 port_attempt = (((port_guess >> 16) & 0x0000ffff) + (port_guess & 0x0000ffff));
2028 if (port_attempt == 0) {
2029 /* get a new random number */
2030 continue;
2032 if (port_attempt < IPPORT_RESERVED) {
2033 port_attempt += IPPORT_RESERVED;
2035 if (sctp_isport_inuse(inp, htons(port_attempt)) == 0) {
2036 /* got a port we can use */
2037 not_done = 0;
2038 continue;
2041 /* we don't get out of the loop until we have a port */
2042 lport = htons(port_attempt);
2044 SCTP_INP_DECR_REF(inp);
2045 if (inp->sctp_flags & (SCTP_PCB_FLAGS_SOCKET_GONE|SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
2046 /* this really should not happen. The guy
2047 * did a non-blocking bind and then did a close
2048 * at the same time.
2050 SCTP_INP_WUNLOCK(inp);
2051 SCTP_INP_INFO_WUNLOCK();
2052 return (EINVAL);
2054 /* ok we look clear to give out this port, so lets setup the binding */
2055 if (bindall) {
2056 /* binding to all addresses, so just set in the proper flags */
2057 inp->sctp_flags |= (SCTP_PCB_FLAGS_BOUNDALL |
2058 SCTP_PCB_FLAGS_DO_ASCONF);
2059 /* set the automatic addr changes from kernel flag */
2060 if (sctp_auto_asconf == 0) {
2061 inp->sctp_flags &= ~SCTP_PCB_FLAGS_AUTO_ASCONF;
2062 } else {
2063 inp->sctp_flags |= SCTP_PCB_FLAGS_AUTO_ASCONF;
2065 } else {
2067 * bind specific, make sure flags is off and add a new address
2068 * structure to the sctp_addr_list inside the ep structure.
2070 * We will need to allocate one and insert it at the head.
2071 * The socketopt call can just insert new addresses in there
2072 * as well. It will also have to do the embed scope kame hack
2073 * too (before adding).
2075 struct ifaddr *ifa;
2076 struct sockaddr_storage store_sa;
2078 memset(&store_sa, 0, sizeof(store_sa));
2079 if (addr->sa_family == AF_INET) {
2080 struct sockaddr_in *sin;
2082 sin = (struct sockaddr_in *)&store_sa;
2083 memcpy(sin, addr, sizeof(struct sockaddr_in));
2084 sin->sin_port = 0;
2085 } else if (addr->sa_family == AF_INET6) {
2086 struct sockaddr_in6 *sin6;
2088 sin6 = (struct sockaddr_in6 *)&store_sa;
2089 memcpy(sin6, addr, sizeof(struct sockaddr_in6));
2090 sin6->sin6_port = 0;
2093 * first find the interface with the bound address
2094 * need to zero out the port to find the address! yuck!
2095 * can't do this earlier since need port for sctp_pcb_findep()
2097 ifa = sctp_find_ifa_by_addr((struct sockaddr *)&store_sa);
2098 if (ifa == NULL) {
2099 /* Can't find an interface with that address */
2100 SCTP_INP_WUNLOCK(inp);
2101 SCTP_INP_INFO_WUNLOCK();
2102 return (EADDRNOTAVAIL);
2104 if (addr->sa_family == AF_INET6) {
2105 struct in6_ifaddr *ifa6;
2106 ifa6 = (struct in6_ifaddr *)ifa;
2108 * allow binding of deprecated addresses as per
2109 * RFC 2462 and ipng discussion
2111 if (ifa6->ia6_flags & (IN6_IFF_DETACHED |
2112 IN6_IFF_ANYCAST |
2113 IN6_IFF_NOTREADY)) {
2114 /* Can't bind a non-existent addr. */
2115 SCTP_INP_WUNLOCK(inp);
2116 SCTP_INP_INFO_WUNLOCK();
2117 return (EINVAL);
2120 /* we're not bound all */
2121 inp->sctp_flags &= ~SCTP_PCB_FLAGS_BOUNDALL;
2122 #if 0 /* use sysctl now */
2123 /* don't allow automatic addr changes from kernel */
2124 inp->sctp_flags &= ~SCTP_PCB_FLAGS_AUTO_ASCONF;
2125 #endif
2126 /* set the automatic addr changes from kernel flag */
2127 if (sctp_auto_asconf == 0) {
2128 inp->sctp_flags &= ~SCTP_PCB_FLAGS_AUTO_ASCONF;
2129 } else {
2130 inp->sctp_flags |= SCTP_PCB_FLAGS_AUTO_ASCONF;
2132 /* allow bindx() to send ASCONF's for binding changes */
2133 inp->sctp_flags |= SCTP_PCB_FLAGS_DO_ASCONF;
2134 /* add this address to the endpoint list */
2135 error = sctp_insert_laddr(&inp->sctp_addr_list, ifa);
2136 if (error != 0) {
2137 SCTP_INP_WUNLOCK(inp);
2138 SCTP_INP_INFO_WUNLOCK();
2139 return (error);
2141 inp->laddr_count++;
2143 /* find the bucket */
2144 head = &sctppcbinfo.sctp_ephash[SCTP_PCBHASH_ALLADDR(lport,
2145 sctppcbinfo.hashmark)];
2146 /* put it in the bucket */
2147 LIST_INSERT_HEAD(head, inp, sctp_hash);
2148 #ifdef SCTP_DEBUG
2149 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
2150 kprintf("Main hash to bind at head:%p, bound port:%d\n", head, ntohs(lport));
2152 #endif
2153 /* set in the port */
2154 inp->sctp_lport = lport;
2156 /* turn off just the unbound flag */
2157 inp->sctp_flags &= ~SCTP_PCB_FLAGS_UNBOUND;
2158 SCTP_INP_WUNLOCK(inp);
2159 SCTP_INP_INFO_WUNLOCK();
2160 return (0);
2164 static void
2165 sctp_iterator_inp_being_freed(struct sctp_inpcb *inp, struct sctp_inpcb *inp_next)
2167 struct sctp_iterator *it;
2168 /* We enter with the only the ITERATOR_LOCK in place and
2169 * A write lock on the inp_info stuff.
2172 /* Go through all iterators, we must do this since
2173 * it is possible that some iterator does NOT have
2174 * the lock, but is waiting for it. And the one that
2175 * had the lock has either moved in the last iteration
2176 * or we just cleared it above. We need to find all
2177 * of those guys. The list of iterators should never
2178 * be very big though.
2180 LIST_FOREACH(it, &sctppcbinfo.iteratorhead, sctp_nxt_itr) {
2181 if (it == inp->inp_starting_point_for_iterator)
2182 /* skip this guy, he's special */
2183 continue;
2184 if (it->inp == inp) {
2185 /* This is tricky and we DON'T lock the iterator.
2186 * Reason is he's running but waiting for me since
2187 * inp->inp_starting_point_for_iterator has the lock
2188 * on me (the guy above we skipped). This tells us
2189 * its is not running but waiting for inp->inp_starting_point_for_iterator
2190 * to be released by the guy that does have our INP in a lock.
2192 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
2193 it->inp = NULL;
2194 it->stcb = NULL;
2195 } else {
2196 /* set him up to do the next guy not me */
2197 it->inp = inp_next;
2198 it->stcb = NULL;
2202 it = inp->inp_starting_point_for_iterator;
2203 if (it) {
2204 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
2205 it->inp = NULL;
2206 } else {
2207 it->inp = inp_next;
2209 it->stcb = NULL;
2213 /* release sctp_inpcb unbind the port */
2214 void
2215 sctp_inpcb_free(struct sctp_inpcb *inp, int immediate)
2218 * Here we free a endpoint. We must find it (if it is in the Hash
2219 * table) and remove it from there. Then we must also find it in
2220 * the overall list and remove it from there. After all removals are
2221 * complete then any timer has to be stopped. Then start the actual
2222 * freeing.
2223 * a) Any local lists.
2224 * b) Any associations.
2225 * c) The hash of all associations.
2226 * d) finally the ep itself.
2228 struct sctp_pcb *m;
2229 struct sctp_inpcb *inp_save;
2230 struct sctp_tcb *asoc, *nasoc;
2231 struct sctp_laddr *laddr, *nladdr;
2232 struct inpcb *ip_pcb;
2233 struct socket *so;
2234 struct sctp_socket_q_list *sq;
2235 #if !defined(__FreeBSD__) || __FreeBSD_version < 500000
2236 struct rtentry *rt;
2237 #endif
2238 int cnt;
2240 crit_enter();
2241 SCTP_ASOC_CREATE_LOCK(inp);
2242 SCTP_INP_WLOCK(inp);
2244 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
2245 /* been here before */
2246 crit_exit();
2247 kprintf("Endpoint was all gone (dup free)?\n");
2248 SCTP_INP_WUNLOCK(inp);
2249 SCTP_ASOC_CREATE_UNLOCK(inp);
2250 return;
2252 sctp_timer_stop(SCTP_TIMER_TYPE_NEWCOOKIE, inp, NULL, NULL);
2254 if (inp->control) {
2255 sctp_m_freem(inp->control);
2256 inp->control = NULL;
2258 if (inp->pkt) {
2259 sctp_m_freem(inp->pkt);
2260 inp->pkt = NULL;
2262 so = inp->sctp_socket;
2263 m = &inp->sctp_ep;
2264 ip_pcb = &inp->ip_inp.inp; /* we could just cast the main
2265 * pointer here but I will
2266 * be nice :> (i.e. ip_pcb = ep;)
2269 if (immediate == 0) {
2270 int cnt_in_sd;
2271 cnt_in_sd = 0;
2272 for ((asoc = LIST_FIRST(&inp->sctp_asoc_list)); asoc != NULL;
2273 asoc = nasoc) {
2274 nasoc = LIST_NEXT(asoc, sctp_tcblist);
2275 if ((SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_COOKIE_WAIT) ||
2276 (SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
2277 /* Just abandon things in the front states */
2278 SCTP_TCB_LOCK(asoc);
2279 SCTP_INP_WUNLOCK(inp);
2280 sctp_free_assoc(inp, asoc);
2281 SCTP_INP_WLOCK(inp);
2282 continue;
2283 } else {
2284 asoc->asoc.state |= SCTP_STATE_CLOSED_SOCKET;
2286 if ((asoc->asoc.size_on_delivery_queue > 0) ||
2287 (asoc->asoc.size_on_reasm_queue > 0) ||
2288 (asoc->asoc.size_on_all_streams > 0) ||
2289 (so && (so->so_rcv.ssb_cc > 0))
2291 /* Left with Data unread */
2292 struct mbuf *op_err;
2293 MGET(op_err, MB_DONTWAIT, MT_DATA);
2294 if (op_err) {
2295 /* Fill in the user initiated abort */
2296 struct sctp_paramhdr *ph;
2297 op_err->m_len =
2298 sizeof(struct sctp_paramhdr);
2299 ph = mtod(op_err,
2300 struct sctp_paramhdr *);
2301 ph->param_type = htons(
2302 SCTP_CAUSE_USER_INITIATED_ABT);
2303 ph->param_length = htons(op_err->m_len);
2305 SCTP_TCB_LOCK(asoc);
2306 sctp_send_abort_tcb(asoc, op_err);
2308 SCTP_INP_WUNLOCK(inp);
2309 sctp_free_assoc(inp, asoc);
2310 SCTP_INP_WLOCK(inp);
2311 continue;
2312 } else if (TAILQ_EMPTY(&asoc->asoc.send_queue) &&
2313 TAILQ_EMPTY(&asoc->asoc.sent_queue)) {
2314 if ((SCTP_GET_STATE(&asoc->asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
2315 (SCTP_GET_STATE(&asoc->asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
2316 /* there is nothing queued to send, so I send shutdown */
2317 SCTP_TCB_LOCK(asoc);
2318 sctp_send_shutdown(asoc, asoc->asoc.primary_destination);
2319 asoc->asoc.state = SCTP_STATE_SHUTDOWN_SENT;
2320 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, asoc->sctp_ep, asoc,
2321 asoc->asoc.primary_destination);
2322 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, asoc->sctp_ep, asoc,
2323 asoc->asoc.primary_destination);
2324 sctp_chunk_output(inp, asoc, 1);
2325 SCTP_TCB_UNLOCK(asoc);
2327 } else {
2328 /* mark into shutdown pending */
2329 asoc->asoc.state |= SCTP_STATE_SHUTDOWN_PENDING;
2331 cnt_in_sd++;
2333 /* now is there some left in our SHUTDOWN state? */
2334 if (cnt_in_sd) {
2335 inp->sctp_flags |= SCTP_PCB_FLAGS_SOCKET_GONE;
2336 crit_exit();
2337 SCTP_INP_WUNLOCK(inp);
2338 SCTP_ASOC_CREATE_UNLOCK(inp);
2339 return;
2342 #if defined(__FreeBSD__) && __FreeBSD_version >= 503000
2343 if (inp->refcount) {
2344 sctp_timer_start(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL);
2345 SCTP_INP_WUNLOCK(inp);
2346 SCTP_ASOC_CREATE_UNLOCK(inp);
2347 return;
2349 #endif
2350 inp->sctp_flags |= SCTP_PCB_FLAGS_SOCKET_ALLGONE;
2351 #if !defined(__FreeBSD__) || __FreeBSD_version < 500000
2352 rt = ip_pcb->inp_route.ro_rt;
2353 #endif
2355 callout_stop(&inp->sctp_ep.signature_change.timer);
2357 if (so) {
2358 /* First take care of socket level things */
2359 #ifdef IPSEC
2360 #ifdef __OpenBSD__
2361 /* XXX IPsec cleanup here */
2362 crit_enter();
2363 if (ip_pcb->inp_tdb_in)
2364 TAILQ_REMOVE(&ip_pcb->inp_tdb_in->tdb_inp_in,
2365 ip_pcb, inp_tdb_in_next);
2366 if (ip_pcb->inp_tdb_out)
2367 TAILQ_REMOVE(&ip_pcb->inp_tdb_out->tdb_inp_out, ip_pcb,
2368 inp_tdb_out_next);
2369 if (ip_pcb->inp_ipsec_localid)
2370 ipsp_reffree(ip_pcb->inp_ipsec_localid);
2371 if (ip_pcb->inp_ipsec_remoteid)
2372 ipsp_reffree(ip_pcb->inp_ipsec_remoteid);
2373 if (ip_pcb->inp_ipsec_localcred)
2374 ipsp_reffree(ip_pcb->inp_ipsec_localcred);
2375 if (ip_pcb->inp_ipsec_remotecred)
2376 ipsp_reffree(ip_pcb->inp_ipsec_remotecred);
2377 if (ip_pcb->inp_ipsec_localauth)
2378 ipsp_reffree(ip_pcb->inp_ipsec_localauth);
2379 if (ip_pcb->inp_ipsec_remoteauth)
2380 ipsp_reffree(ip_pcb->inp_ipsec_remoteauth);
2381 crit_exit();
2382 #else
2383 ipsec4_delete_pcbpolicy(ip_pcb);
2384 #endif
2385 #endif /*IPSEC*/
2386 #if defined(__FreeBSD__) && __FreeBSD_version > 500000
2387 ACCEPT_LOCK();
2388 SOCK_LOCK(so);
2389 #endif
2390 so->so_pcb = 0;
2391 #if defined(__FreeBSD__) && __FreeBSD_version > 500000
2392 sotryfree(so);
2393 #else
2394 sofree(so);
2395 #endif
2398 if (ip_pcb->inp_options) {
2399 m_free(ip_pcb->inp_options);
2400 ip_pcb->inp_options = 0;
2402 #if !defined(__FreeBSD__) || __FreeBSD_version < 500000
2403 if (rt) {
2404 RTFREE(rt);
2405 ip_pcb->inp_route.ro_rt = 0;
2407 #endif
2408 if (ip_pcb->inp_moptions) {
2409 ip_freemoptions(ip_pcb->inp_moptions);
2410 ip_pcb->inp_moptions = 0;
2412 #if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__))
2413 inp->inp_vflag = 0;
2414 #else
2415 ip_pcb->inp_vflag = 0;
2416 #endif
2418 /* Now the sctp_pcb things */
2421 * free each asoc if it is not already closed/free. we can't use
2422 * the macro here since le_next will get freed as part of the
2423 * sctp_free_assoc() call.
2425 cnt = 0;
2426 for ((asoc = LIST_FIRST(&inp->sctp_asoc_list)); asoc != NULL;
2427 asoc = nasoc) {
2428 nasoc = LIST_NEXT(asoc, sctp_tcblist);
2429 SCTP_TCB_LOCK(asoc);
2430 if (SCTP_GET_STATE(&asoc->asoc) != SCTP_STATE_COOKIE_WAIT) {
2431 struct mbuf *op_err;
2432 MGET(op_err, MB_DONTWAIT, MT_DATA);
2433 if (op_err) {
2434 /* Fill in the user initiated abort */
2435 struct sctp_paramhdr *ph;
2436 op_err->m_len = sizeof(struct sctp_paramhdr);
2437 ph = mtod(op_err, struct sctp_paramhdr *);
2438 ph->param_type = htons(
2439 SCTP_CAUSE_USER_INITIATED_ABT);
2440 ph->param_length = htons(op_err->m_len);
2442 sctp_send_abort_tcb(asoc, op_err);
2444 cnt++;
2446 * sctp_free_assoc() will call sctp_inpcb_free(),
2447 * if SCTP_PCB_FLAGS_SOCKET_GONE set.
2448 * So, we clear it before sctp_free_assoc() making sure
2449 * no double sctp_inpcb_free().
2451 inp->sctp_flags &= ~SCTP_PCB_FLAGS_SOCKET_GONE;
2452 SCTP_INP_WUNLOCK(inp);
2453 sctp_free_assoc(inp, asoc);
2454 SCTP_INP_WLOCK(inp);
2456 while ((sq = TAILQ_FIRST(&inp->sctp_queue_list)) != NULL) {
2457 TAILQ_REMOVE(&inp->sctp_queue_list, sq, next_sq);
2458 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_sockq, sq);
2459 sctppcbinfo.ipi_count_sockq--;
2460 sctppcbinfo.ipi_gencnt_sockq++;
2462 inp->sctp_socket = 0;
2463 /* Now first we remove ourselves from the overall list of all EP's */
2465 /* Unlock inp first, need correct order */
2466 SCTP_INP_WUNLOCK(inp);
2467 /* now iterator lock */
2468 SCTP_ITERATOR_LOCK();
2469 /* now info lock */
2470 SCTP_INP_INFO_WLOCK();
2471 /* now reget the inp lock */
2472 SCTP_INP_WLOCK(inp);
2474 inp_save = LIST_NEXT(inp, sctp_list);
2475 LIST_REMOVE(inp, sctp_list);
2477 * Now the question comes as to if this EP was ever bound at all.
2478 * If it was, then we must pull it out of the EP hash list.
2480 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) !=
2481 SCTP_PCB_FLAGS_UNBOUND) {
2483 * ok, this guy has been bound. It's port is somewhere
2484 * in the sctppcbinfo hash table. Remove it!
2486 LIST_REMOVE(inp, sctp_hash);
2488 /* fix any iterators only after out of the list */
2489 sctp_iterator_inp_being_freed(inp, inp_save);
2490 SCTP_ITERATOR_UNLOCK();
2492 * if we have an address list the following will free the list of
2493 * ifaddr's that are set into this ep. Again macro limitations here,
2494 * since the LIST_FOREACH could be a bad idea.
2496 for ((laddr = LIST_FIRST(&inp->sctp_addr_list)); laddr != NULL;
2497 laddr = nladdr) {
2498 nladdr = LIST_NEXT(laddr, sctp_nxt_addr);
2499 LIST_REMOVE(laddr, sctp_nxt_addr);
2500 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_laddr, laddr);
2501 sctppcbinfo.ipi_gencnt_laddr++;
2502 sctppcbinfo.ipi_count_laddr--;
2504 /* Now lets see about freeing the EP hash table. */
2505 if (inp->sctp_tcbhash != NULL) {
2506 FREE(inp->sctp_tcbhash, M_PCB);
2507 inp->sctp_tcbhash = 0;
2509 SCTP_INP_WUNLOCK(inp);
2510 SCTP_ASOC_CREATE_UNLOCK(inp);
2511 SCTP_INP_LOCK_DESTROY(inp);
2512 SCTP_ASOC_CREATE_LOCK_DESTROY(inp);
2514 /* Now we must put the ep memory back into the zone pool */
2515 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_ep, inp);
2516 sctppcbinfo.ipi_count_ep--;
2518 SCTP_INP_INFO_WUNLOCK();
2519 crit_exit();
2523 struct sctp_nets *
2524 sctp_findnet(struct sctp_tcb *stcb, struct sockaddr *addr)
2526 struct sctp_nets *net;
2527 struct sockaddr_in *sin;
2528 struct sockaddr_in6 *sin6;
2529 /* use the peer's/remote port for lookup if unspecified */
2530 sin = (struct sockaddr_in *)addr;
2531 sin6 = (struct sockaddr_in6 *)addr;
2532 #if 0 /* why do we need to check the port for a nets list on an assoc? */
2533 if (stcb->rport != sin->sin_port) {
2534 /* we cheat and just a sin for this test */
2535 return (NULL);
2537 #endif
2538 /* locate the address */
2539 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
2540 if (sctp_cmpaddr(addr, (struct sockaddr *)&net->ro._l_addr))
2541 return (net);
2543 return (NULL);
2548 * add's a remote endpoint address, done with the INIT/INIT-ACK
2549 * as well as when a ASCONF arrives that adds it. It will also
2550 * initialize all the cwnd stats of stuff.
2553 sctp_is_address_on_local_host(struct sockaddr *addr)
2555 struct ifnet *ifn;
2557 TAILQ_FOREACH(ifn, &ifnet, if_list) {
2558 struct ifaddr_container *ifac;
2560 TAILQ_FOREACH(ifac, &ifn->if_addrheads[mycpuid], ifa_link) {
2561 struct ifaddr *ifa = ifac->ifa;
2563 if (addr->sa_family == ifa->ifa_addr->sa_family) {
2564 /* same family */
2565 if (addr->sa_family == AF_INET) {
2566 struct sockaddr_in *sin, *sin_c;
2567 sin = (struct sockaddr_in *)addr;
2568 sin_c = (struct sockaddr_in *)
2569 ifa->ifa_addr;
2570 if (sin->sin_addr.s_addr ==
2571 sin_c->sin_addr.s_addr) {
2572 /* we are on the same machine */
2573 return (1);
2575 } else if (addr->sa_family == AF_INET6) {
2576 struct sockaddr_in6 *sin6, *sin_c6;
2577 sin6 = (struct sockaddr_in6 *)addr;
2578 sin_c6 = (struct sockaddr_in6 *)
2579 ifa->ifa_addr;
2580 if (SCTP6_ARE_ADDR_EQUAL(&sin6->sin6_addr,
2581 &sin_c6->sin6_addr)) {
2582 /* we are on the same machine */
2583 return (1);
2589 return (0);
2593 sctp_add_remote_addr(struct sctp_tcb *stcb, struct sockaddr *newaddr,
2594 int set_scope, int from)
2597 * The following is redundant to the same lines in the
2598 * sctp_aloc_assoc() but is needed since other's call the add
2599 * address function
2601 struct sctp_nets *net, *netfirst;
2602 int addr_inscope;
2604 #ifdef SCTP_DEBUG
2605 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
2606 kprintf("Adding an address (from:%d) to the peer: ", from);
2607 sctp_print_address(newaddr);
2609 #endif
2610 netfirst = sctp_findnet(stcb, newaddr);
2611 if (netfirst) {
2613 * Lie and return ok, we don't want to make the association
2614 * go away for this behavior. It will happen in the TCP model
2615 * in a connected socket. It does not reach the hash table
2616 * until after the association is built so it can't be found.
2617 * Mark as reachable, since the initial creation will have
2618 * been cleared and the NOT_IN_ASSOC flag will have been
2619 * added... and we don't want to end up removing it back out.
2621 if (netfirst->dest_state & SCTP_ADDR_UNCONFIRMED) {
2622 netfirst->dest_state = (SCTP_ADDR_REACHABLE|
2623 SCTP_ADDR_UNCONFIRMED);
2624 } else {
2625 netfirst->dest_state = SCTP_ADDR_REACHABLE;
2628 return (0);
2630 addr_inscope = 1;
2631 if (newaddr->sa_family == AF_INET) {
2632 struct sockaddr_in *sin;
2633 sin = (struct sockaddr_in *)newaddr;
2634 if (sin->sin_addr.s_addr == 0) {
2635 /* Invalid address */
2636 return (-1);
2638 /* zero out the bzero area */
2639 memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
2641 /* assure len is set */
2642 sin->sin_len = sizeof(struct sockaddr_in);
2643 if (set_scope) {
2644 #ifdef SCTP_DONT_DO_PRIVADDR_SCOPE
2645 stcb->ipv4_local_scope = 1;
2646 #else
2647 if (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr)) {
2648 stcb->asoc.ipv4_local_scope = 1;
2650 #endif /* SCTP_DONT_DO_PRIVADDR_SCOPE */
2652 if (sctp_is_address_on_local_host(newaddr)) {
2653 stcb->asoc.loopback_scope = 1;
2654 stcb->asoc.ipv4_local_scope = 1;
2655 stcb->asoc.local_scope = 1;
2656 stcb->asoc.site_scope = 1;
2658 } else {
2659 if (from == 8) {
2660 /* From connectx */
2661 if (sctp_is_address_on_local_host(newaddr)) {
2662 stcb->asoc.loopback_scope = 1;
2663 stcb->asoc.ipv4_local_scope = 1;
2664 stcb->asoc.local_scope = 1;
2665 stcb->asoc.site_scope = 1;
2668 /* Validate the address is in scope */
2669 if ((IN4_ISPRIVATE_ADDRESS(&sin->sin_addr)) &&
2670 (stcb->asoc.ipv4_local_scope == 0)) {
2671 addr_inscope = 0;
2674 } else if (newaddr->sa_family == AF_INET6) {
2675 struct sockaddr_in6 *sin6;
2676 sin6 = (struct sockaddr_in6 *)newaddr;
2677 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
2678 /* Invalid address */
2679 return (-1);
2681 /* assure len is set */
2682 sin6->sin6_len = sizeof(struct sockaddr_in6);
2683 if (set_scope) {
2684 if (sctp_is_address_on_local_host(newaddr)) {
2685 stcb->asoc.loopback_scope = 1;
2686 stcb->asoc.local_scope = 1;
2687 stcb->asoc.ipv4_local_scope = 1;
2688 stcb->asoc.site_scope = 1;
2689 } else if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
2691 * If the new destination is a LINK_LOCAL
2692 * we must have common site scope. Don't set
2693 * the local scope since we may not share all
2694 * links, only loopback can do this.
2695 * Links on the local network would also
2696 * be on our private network for v4 too.
2698 stcb->asoc.ipv4_local_scope = 1;
2699 stcb->asoc.site_scope = 1;
2700 } else if (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr)) {
2702 * If the new destination is SITE_LOCAL
2703 * then we must have site scope in common.
2705 stcb->asoc.site_scope = 1;
2707 } else {
2708 if (from == 8) {
2709 /* From connectx */
2710 if (sctp_is_address_on_local_host(newaddr)) {
2711 stcb->asoc.loopback_scope = 1;
2712 stcb->asoc.ipv4_local_scope = 1;
2713 stcb->asoc.local_scope = 1;
2714 stcb->asoc.site_scope = 1;
2717 /* Validate the address is in scope */
2718 if (IN6_IS_ADDR_LOOPBACK(&sin6->sin6_addr) &&
2719 (stcb->asoc.loopback_scope == 0)) {
2720 addr_inscope = 0;
2721 } else if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr) &&
2722 (stcb->asoc.local_scope == 0)) {
2723 addr_inscope = 0;
2724 } else if (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr) &&
2725 (stcb->asoc.site_scope == 0)) {
2726 addr_inscope = 0;
2729 } else {
2730 /* not supported family type */
2731 return (-1);
2733 net = (struct sctp_nets *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_net);
2734 if (net == NULL) {
2735 return (-1);
2737 sctppcbinfo.ipi_count_raddr++;
2738 sctppcbinfo.ipi_gencnt_raddr++;
2739 bzero(net, sizeof(*net));
2740 memcpy(&net->ro._l_addr, newaddr, newaddr->sa_len);
2741 if (newaddr->sa_family == AF_INET) {
2742 ((struct sockaddr_in *)&net->ro._l_addr)->sin_port = stcb->rport;
2743 } else if (newaddr->sa_family == AF_INET6) {
2744 ((struct sockaddr_in6 *)&net->ro._l_addr)->sin6_port = stcb->rport;
2746 net->addr_is_local = sctp_is_address_on_local_host(newaddr);
2747 net->failure_threshold = stcb->asoc.def_net_failure;
2748 if (addr_inscope == 0) {
2749 #ifdef SCTP_DEBUG
2750 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
2751 kprintf("Adding an address which is OUT OF SCOPE\n");
2753 #endif /* SCTP_DEBUG */
2754 net->dest_state = (SCTP_ADDR_REACHABLE |
2755 SCTP_ADDR_OUT_OF_SCOPE);
2756 } else {
2757 if (from == 8)
2758 /* 8 is passed by connect_x */
2759 net->dest_state = SCTP_ADDR_REACHABLE;
2760 else
2761 net->dest_state = SCTP_ADDR_REACHABLE |
2762 SCTP_ADDR_UNCONFIRMED;
2764 net->RTO = stcb->asoc.initial_rto;
2765 stcb->asoc.numnets++;
2766 net->ref_count = 1;
2768 /* Init the timer structure */
2769 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000
2770 callout_init(&net->rxt_timer.timer, 0);
2771 callout_init(&net->pmtu_timer.timer, 0);
2772 #else
2773 callout_init(&net->rxt_timer.timer);
2774 callout_init(&net->pmtu_timer.timer);
2775 #endif
2777 /* Now generate a route for this guy */
2778 /* KAME hack: embed scopeid */
2779 if (newaddr->sa_family == AF_INET6) {
2780 struct sockaddr_in6 *sin6;
2781 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
2782 #if defined(SCTP_BASE_FREEBSD) || defined(__APPLE__) || defined(__DragonFly__)
2783 in6_embedscope(&sin6->sin6_addr, sin6,
2784 &stcb->sctp_ep->ip_inp.inp, NULL);
2785 #else
2786 in6_embedscope(&sin6->sin6_addr, sin6);
2787 #endif
2788 #ifndef SCOPEDROUTING
2789 sin6->sin6_scope_id = 0;
2790 #endif
2792 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
2793 rtalloc_ign((struct route *)&net->ro, 0UL);
2794 #else
2795 rtalloc((struct route *)&net->ro);
2796 #endif
2797 if (newaddr->sa_family == AF_INET6) {
2798 struct sockaddr_in6 *sin6;
2799 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
2800 in6_recoverscope(sin6, &sin6->sin6_addr, NULL);
2802 if ((net->ro.ro_rt) &&
2803 (net->ro.ro_rt->rt_ifp)) {
2804 net->mtu = net->ro.ro_rt->rt_ifp->if_mtu;
2805 if (from == 1) {
2806 stcb->asoc.smallest_mtu = net->mtu;
2808 /* start things off to match mtu of interface please. */
2809 net->ro.ro_rt->rt_rmx.rmx_mtu = net->ro.ro_rt->rt_ifp->if_mtu;
2810 } else {
2811 net->mtu = stcb->asoc.smallest_mtu;
2813 if (stcb->asoc.smallest_mtu > net->mtu) {
2814 stcb->asoc.smallest_mtu = net->mtu;
2816 /* We take the max of the burst limit times a MTU or the INITIAL_CWND.
2817 * We then limit this to 4 MTU's of sending.
2819 net->cwnd = min((net->mtu * 4), max((stcb->asoc.max_burst * net->mtu), SCTP_INITIAL_CWND));
2821 /* we always get at LEAST 2 MTU's */
2822 if (net->cwnd < (2 * net->mtu)) {
2823 net->cwnd = 2 * net->mtu;
2826 net->ssthresh = stcb->asoc.peers_rwnd;
2828 net->src_addr_selected = 0;
2829 netfirst = TAILQ_FIRST(&stcb->asoc.nets);
2830 if (net->ro.ro_rt == NULL) {
2831 /* Since we have no route put it at the back */
2832 TAILQ_INSERT_TAIL(&stcb->asoc.nets, net, sctp_next);
2833 } else if (netfirst == NULL) {
2834 /* We are the first one in the pool. */
2835 TAILQ_INSERT_HEAD(&stcb->asoc.nets, net, sctp_next);
2836 } else if (netfirst->ro.ro_rt == NULL) {
2838 * First one has NO route. Place this one ahead of the
2839 * first one.
2841 TAILQ_INSERT_HEAD(&stcb->asoc.nets, net, sctp_next);
2842 } else if (net->ro.ro_rt->rt_ifp != netfirst->ro.ro_rt->rt_ifp) {
2844 * This one has a different interface than the one at the
2845 * top of the list. Place it ahead.
2847 TAILQ_INSERT_HEAD(&stcb->asoc.nets, net, sctp_next);
2848 } else {
2850 * Ok we have the same interface as the first one. Move
2851 * forward until we find either
2852 * a) one with a NULL route... insert ahead of that
2853 * b) one with a different ifp.. insert after that.
2854 * c) end of the list.. insert at the tail.
2856 struct sctp_nets *netlook;
2857 do {
2858 netlook = TAILQ_NEXT(netfirst, sctp_next);
2859 if (netlook == NULL) {
2860 /* End of the list */
2861 TAILQ_INSERT_TAIL(&stcb->asoc.nets, net,
2862 sctp_next);
2863 break;
2864 } else if (netlook->ro.ro_rt == NULL) {
2865 /* next one has NO route */
2866 TAILQ_INSERT_BEFORE(netfirst, net, sctp_next);
2867 break;
2868 } else if (netlook->ro.ro_rt->rt_ifp !=
2869 net->ro.ro_rt->rt_ifp) {
2870 TAILQ_INSERT_AFTER(&stcb->asoc.nets, netlook,
2871 net, sctp_next);
2872 break;
2874 /* Shift forward */
2875 netfirst = netlook;
2876 } while (netlook != NULL);
2878 /* got to have a primary set */
2879 if (stcb->asoc.primary_destination == 0) {
2880 stcb->asoc.primary_destination = net;
2881 } else if ((stcb->asoc.primary_destination->ro.ro_rt == NULL) &&
2882 (net->ro.ro_rt)) {
2883 /* No route to current primary adopt new primary */
2884 stcb->asoc.primary_destination = net;
2886 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, stcb->sctp_ep, stcb,
2887 net);
2889 return (0);
2894 * allocate an association and add it to the endpoint. The caller must
2895 * be careful to add all additional addresses once they are know right
2896 * away or else the assoc will be may experience a blackout scenario.
2898 struct sctp_tcb *
2899 sctp_aloc_assoc(struct sctp_inpcb *inp, struct sockaddr *firstaddr,
2900 int for_a_init, int *error, uint32_t override_tag)
2902 struct sctp_tcb *stcb;
2903 struct sctp_association *asoc;
2904 struct sctpasochead *head;
2905 uint16_t rport;
2906 int err;
2909 * Assumption made here:
2910 * Caller has done a sctp_findassociation_ep_addr(ep, addr's);
2911 * to make sure the address does not exist already.
2913 if (sctppcbinfo.ipi_count_asoc >= SCTP_MAX_NUM_OF_ASOC) {
2914 /* Hit max assoc, sorry no more */
2915 *error = ENOBUFS;
2916 return (NULL);
2918 SCTP_INP_RLOCK(inp);
2919 if (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) {
2921 * If its in the TCP pool, its NOT allowed to create an
2922 * association. The parent listener needs to call
2923 * sctp_aloc_assoc.. or the one-2-many socket. If a
2924 * peeled off, or connected one does this.. its an error.
2926 SCTP_INP_RUNLOCK(inp);
2927 *error = EINVAL;
2928 return (NULL);
2931 #ifdef SCTP_DEBUG
2932 if (sctp_debug_on & SCTP_DEBUG_PCB3) {
2933 kprintf("Allocate an association for peer:");
2934 if (firstaddr)
2935 sctp_print_address(firstaddr);
2936 else
2937 kprintf("None\n");
2938 kprintf("Port:%d\n",
2939 ntohs(((struct sockaddr_in *)firstaddr)->sin_port));
2941 #endif /* SCTP_DEBUG */
2942 if (firstaddr->sa_family == AF_INET) {
2943 struct sockaddr_in *sin;
2944 sin = (struct sockaddr_in *)firstaddr;
2945 if ((sin->sin_port == 0) || (sin->sin_addr.s_addr == 0)) {
2946 /* Invalid address */
2947 #ifdef SCTP_DEBUG
2948 if (sctp_debug_on & SCTP_DEBUG_PCB3) {
2949 kprintf("peer address invalid\n");
2951 #endif
2952 SCTP_INP_RUNLOCK(inp);
2953 *error = EINVAL;
2954 return (NULL);
2956 rport = sin->sin_port;
2957 } else if (firstaddr->sa_family == AF_INET6) {
2958 struct sockaddr_in6 *sin6;
2959 sin6 = (struct sockaddr_in6 *)firstaddr;
2960 if ((sin6->sin6_port == 0) ||
2961 (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr))) {
2962 /* Invalid address */
2963 #ifdef SCTP_DEBUG
2964 if (sctp_debug_on & SCTP_DEBUG_PCB3) {
2965 kprintf("peer address invalid\n");
2967 #endif
2968 SCTP_INP_RUNLOCK(inp);
2969 *error = EINVAL;
2970 return (NULL);
2972 rport = sin6->sin6_port;
2973 } else {
2974 /* not supported family type */
2975 #ifdef SCTP_DEBUG
2976 if (sctp_debug_on & SCTP_DEBUG_PCB3) {
2977 kprintf("BAD family %d\n", firstaddr->sa_family);
2979 #endif
2980 SCTP_INP_RUNLOCK(inp);
2981 *error = EINVAL;
2982 return (NULL);
2984 SCTP_INP_RUNLOCK(inp);
2985 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
2987 * If you have not performed a bind, then we need to do
2988 * the ephemerial bind for you.
2990 #ifdef SCTP_DEBUG
2991 if (sctp_debug_on & SCTP_DEBUG_PCB3) {
2992 kprintf("Doing implicit BIND\n");
2994 #endif
2996 if ((err = sctp_inpcb_bind(inp->sctp_socket,
2997 NULL,
2998 #if (defined(__FreeBSD__) && __FreeBSD_version >= 500000) || defined(__DragonFly__)
2999 NULL
3000 #else
3001 NULL
3002 #endif
3003 ))){
3004 /* bind error, probably perm */
3005 #ifdef SCTP_DEBUG
3006 if (sctp_debug_on & SCTP_DEBUG_PCB3) {
3007 kprintf("BIND FAILS ret:%d\n", err);
3009 #endif
3011 *error = err;
3012 return (NULL);
3015 stcb = (struct sctp_tcb *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_asoc);
3016 if (stcb == NULL) {
3017 /* out of memory? */
3018 #ifdef SCTP_DEBUG
3019 if (sctp_debug_on & SCTP_DEBUG_PCB3) {
3020 kprintf("aloc_assoc: no assoc mem left, stcb=NULL\n");
3022 #endif
3023 *error = ENOMEM;
3024 return (NULL);
3026 sctppcbinfo.ipi_count_asoc++;
3027 sctppcbinfo.ipi_gencnt_asoc++;
3029 bzero(stcb, sizeof(*stcb));
3030 asoc = &stcb->asoc;
3031 SCTP_TCB_LOCK_INIT(stcb);
3032 /* setup back pointer's */
3033 stcb->sctp_ep = inp;
3034 stcb->sctp_socket = inp->sctp_socket;
3035 if ((err = sctp_init_asoc(inp, asoc, for_a_init, override_tag))) {
3036 /* failed */
3037 SCTP_TCB_LOCK_DESTROY (stcb);
3038 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_asoc, stcb);
3039 sctppcbinfo.ipi_count_asoc--;
3040 #ifdef SCTP_DEBUG
3041 if (sctp_debug_on & SCTP_DEBUG_PCB3) {
3042 kprintf("aloc_assoc: couldn't init asoc, out of mem?!\n");
3044 #endif
3045 *error = err;
3046 return (NULL);
3048 /* and the port */
3049 stcb->rport = rport;
3050 SCTP_INP_INFO_WLOCK();
3051 SCTP_INP_WLOCK(inp);
3052 if (inp->sctp_flags & (SCTP_PCB_FLAGS_SOCKET_GONE|SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
3053 /* inpcb freed while alloc going on */
3054 SCTP_TCB_LOCK_DESTROY (stcb);
3055 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_asoc, stcb);
3056 SCTP_INP_WUNLOCK(inp);
3057 SCTP_INP_INFO_WUNLOCK();
3058 sctppcbinfo.ipi_count_asoc--;
3059 #ifdef SCTP_DEBUG
3060 if (sctp_debug_on & SCTP_DEBUG_PCB3) {
3061 kprintf("aloc_assoc: couldn't init asoc, out of mem?!\n");
3063 #endif
3064 *error = EINVAL;
3065 return (NULL);
3067 SCTP_TCB_LOCK(stcb);
3069 /* now that my_vtag is set, add it to the hash */
3070 head = &sctppcbinfo.sctp_asochash[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag,
3071 sctppcbinfo.hashasocmark)];
3072 /* put it in the bucket in the vtag hash of assoc's for the system */
3073 LIST_INSERT_HEAD(head, stcb, sctp_asocs);
3074 SCTP_INP_INFO_WUNLOCK();
3077 if ((err = sctp_add_remote_addr(stcb, firstaddr, 1, 1))) {
3078 /* failure.. memory error? */
3079 if (asoc->strmout)
3080 FREE(asoc->strmout, M_PCB);
3081 if (asoc->mapping_array)
3082 FREE(asoc->mapping_array, M_PCB);
3084 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_asoc, stcb);
3085 sctppcbinfo.ipi_count_asoc--;
3086 #ifdef SCTP_DEBUG
3087 if (sctp_debug_on & SCTP_DEBUG_PCB3) {
3088 kprintf("aloc_assoc: couldn't add remote addr!\n");
3090 #endif
3091 SCTP_TCB_LOCK_DESTROY (stcb);
3092 *error = ENOBUFS;
3093 return (NULL);
3095 /* Init all the timers */
3096 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000
3097 callout_init(&asoc->hb_timer.timer, 0);
3098 callout_init(&asoc->dack_timer.timer, 0);
3099 callout_init(&asoc->asconf_timer.timer, 0);
3100 callout_init(&asoc->shut_guard_timer.timer, 0);
3101 callout_init(&asoc->autoclose_timer.timer, 0);
3102 callout_init(&asoc->delayed_event_timer.timer, 0);
3103 #else
3104 callout_init(&asoc->hb_timer.timer);
3105 callout_init(&asoc->dack_timer.timer);
3106 callout_init(&asoc->asconf_timer.timer);
3107 callout_init(&asoc->shut_guard_timer.timer);
3108 callout_init(&asoc->autoclose_timer.timer);
3109 callout_init(&asoc->delayed_event_timer.timer);
3110 #endif
3111 LIST_INSERT_HEAD(&inp->sctp_asoc_list, stcb, sctp_tcblist);
3112 /* now file the port under the hash as well */
3113 if (inp->sctp_tcbhash != NULL) {
3114 head = &inp->sctp_tcbhash[SCTP_PCBHASH_ALLADDR(stcb->rport,
3115 inp->sctp_hashmark)];
3116 LIST_INSERT_HEAD(head, stcb, sctp_tcbhash);
3118 SCTP_INP_WUNLOCK(inp);
3119 #ifdef SCTP_DEBUG
3120 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
3121 kprintf("Association %p now allocated\n", stcb);
3123 #endif
3124 return (stcb);
3127 void
3128 sctp_free_remote_addr(struct sctp_nets *net)
3130 if (net == NULL)
3131 return;
3132 net->ref_count--;
3133 if (net->ref_count <= 0) {
3134 /* stop timer if running */
3135 callout_stop(&net->rxt_timer.timer);
3136 callout_stop(&net->pmtu_timer.timer);
3137 net->dest_state = SCTP_ADDR_NOT_REACHABLE;
3138 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_net, net);
3139 sctppcbinfo.ipi_count_raddr--;
3144 * remove a remote endpoint address from an association, it
3145 * will fail if the address does not exist.
3148 sctp_del_remote_addr(struct sctp_tcb *stcb, struct sockaddr *remaddr)
3151 * Here we need to remove a remote address. This is quite simple, we
3152 * first find it in the list of address for the association
3153 * (tasoc->asoc.nets) and then if it is there, we do a LIST_REMOVE on
3154 * that item.
3155 * Note we do not allow it to be removed if there are no other
3156 * addresses.
3158 struct sctp_association *asoc;
3159 struct sctp_nets *net, *net_tmp;
3160 asoc = &stcb->asoc;
3161 if (asoc->numnets < 2) {
3162 /* Must have at LEAST two remote addresses */
3163 return (-1);
3165 /* locate the address */
3166 for (net = TAILQ_FIRST(&asoc->nets); net != NULL; net = net_tmp) {
3167 net_tmp = TAILQ_NEXT(net, sctp_next);
3168 if (net->ro._l_addr.sa.sa_family != remaddr->sa_family) {
3169 continue;
3171 if (sctp_cmpaddr((struct sockaddr *)&net->ro._l_addr,
3172 remaddr)) {
3173 /* we found the guy */
3174 asoc->numnets--;
3175 TAILQ_REMOVE(&asoc->nets, net, sctp_next);
3176 sctp_free_remote_addr(net);
3177 if (net == asoc->primary_destination) {
3178 /* Reset primary */
3179 struct sctp_nets *lnet;
3180 lnet = TAILQ_FIRST(&asoc->nets);
3181 /* Try to find a confirmed primary */
3182 asoc->primary_destination =
3183 sctp_find_alternate_net(stcb, lnet);
3185 if (net == asoc->last_data_chunk_from) {
3186 /* Reset primary */
3187 asoc->last_data_chunk_from =
3188 TAILQ_FIRST(&asoc->nets);
3190 if (net == asoc->last_control_chunk_from) {
3191 /* Reset primary */
3192 asoc->last_control_chunk_from =
3193 TAILQ_FIRST(&asoc->nets);
3195 if (net == asoc->asconf_last_sent_to) {
3196 /* Reset primary */
3197 asoc->asconf_last_sent_to =
3198 TAILQ_FIRST(&asoc->nets);
3200 return (0);
3203 /* not found. */
3204 return (-2);
3208 static void
3209 sctp_add_vtag_to_timewait(struct sctp_inpcb *inp, u_int32_t tag)
3211 struct sctpvtaghead *chain;
3212 struct sctp_tagblock *twait_block;
3213 struct timeval now;
3214 int set, i;
3215 SCTP_GETTIME_TIMEVAL(&now);
3216 chain = &sctppcbinfo.vtag_timewait[(tag % SCTP_STACK_VTAG_HASH_SIZE)];
3217 set = 0;
3218 if (!LIST_EMPTY(chain)) {
3219 /* Block(s) present, lets find space, and expire on the fly */
3220 LIST_FOREACH(twait_block, chain, sctp_nxt_tagblock) {
3221 for (i = 0; i < SCTP_NUMBER_IN_VTAG_BLOCK; i++) {
3222 if ((twait_block->vtag_block[i].v_tag == 0) &&
3223 !set) {
3224 twait_block->vtag_block[0].tv_sec_at_expire =
3225 now.tv_sec + SCTP_TIME_WAIT;
3226 twait_block->vtag_block[0].v_tag = tag;
3227 set = 1;
3228 } else if ((twait_block->vtag_block[i].v_tag) &&
3229 ((long)twait_block->vtag_block[i].tv_sec_at_expire >
3230 now.tv_sec)) {
3231 /* Audit expires this guy */
3232 twait_block->vtag_block[i].tv_sec_at_expire = 0;
3233 twait_block->vtag_block[i].v_tag = 0;
3234 if (set == 0) {
3235 /* Reuse it for my new tag */
3236 twait_block->vtag_block[0].tv_sec_at_expire = now.tv_sec + SCTP_TIME_WAIT;
3237 twait_block->vtag_block[0].v_tag = tag;
3238 set = 1;
3242 if (set) {
3244 * We only do up to the block where we can
3245 * place our tag for audits
3247 break;
3251 /* Need to add a new block to chain */
3252 if (!set) {
3253 MALLOC(twait_block, struct sctp_tagblock *,
3254 sizeof(struct sctp_tagblock), M_PCB, M_NOWAIT);
3255 if (twait_block == NULL) {
3256 return;
3258 memset(twait_block, 0, sizeof(struct sctp_timewait));
3259 LIST_INSERT_HEAD(chain, twait_block, sctp_nxt_tagblock);
3260 twait_block->vtag_block[0].tv_sec_at_expire = now.tv_sec +
3261 SCTP_TIME_WAIT;
3262 twait_block->vtag_block[0].v_tag = tag;
3267 static void
3268 sctp_iterator_asoc_being_freed(struct sctp_inpcb *inp, struct sctp_tcb *stcb)
3270 struct sctp_iterator *it;
3274 /* Unlock the tcb lock we do this so
3275 * we avoid a dead lock scenario where
3276 * the iterator is waiting on the TCB lock
3277 * and the TCB lock is waiting on the iterator
3278 * lock.
3280 SCTP_ITERATOR_LOCK();
3281 SCTP_INP_INFO_WLOCK();
3282 SCTP_INP_WLOCK(inp);
3283 SCTP_TCB_LOCK(stcb);
3285 it = stcb->asoc.stcb_starting_point_for_iterator;
3286 if (it == NULL) {
3287 return;
3289 if (it->inp != stcb->sctp_ep) {
3290 /* hm, focused on the wrong one? */
3291 return;
3293 if (it->stcb != stcb) {
3294 return;
3296 it->stcb = LIST_NEXT(stcb, sctp_tcblist);
3297 if (it->stcb == NULL) {
3298 /* done with all asoc's in this assoc */
3299 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
3300 it->inp = NULL;
3301 } else {
3303 it->inp = LIST_NEXT(inp, sctp_list);
3309 * Free the association after un-hashing the remote port.
3311 void
3312 sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb)
3314 struct sctp_association *asoc;
3315 struct sctp_nets *net, *prev;
3316 struct sctp_laddr *laddr;
3317 struct sctp_tmit_chunk *chk;
3318 struct sctp_asconf_addr *aparam;
3319 struct sctp_socket_q_list *sq;
3321 /* first, lets purge the entry from the hash table. */
3322 crit_enter();
3323 if (stcb->asoc.state == 0) {
3324 kprintf("Freeing already free association:%p - huh??\n",
3325 stcb);
3326 crit_exit();
3327 return;
3329 asoc = &stcb->asoc;
3330 asoc->state = 0;
3331 /* now clean up any other timers */
3332 callout_stop(&asoc->hb_timer.timer);
3333 callout_stop(&asoc->dack_timer.timer);
3334 callout_stop(&asoc->asconf_timer.timer);
3335 callout_stop(&asoc->shut_guard_timer.timer);
3336 callout_stop(&asoc->autoclose_timer.timer);
3337 callout_stop(&asoc->delayed_event_timer.timer);
3338 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3339 callout_stop(&net->rxt_timer.timer);
3340 callout_stop(&net->pmtu_timer.timer);
3343 /* Iterator asoc being freed we send an
3344 * unlocked TCB. It returns with INP_INFO
3345 * and INP write locked and the TCB locked
3346 * too and of course the iterator lock
3347 * in place as well..
3349 SCTP_TCB_UNLOCK(stcb);
3350 sctp_iterator_asoc_being_freed(inp, stcb);
3352 /* Null all of my entry's on the socket q */
3353 TAILQ_FOREACH(sq, &inp->sctp_queue_list, next_sq) {
3354 if (sq->tcb == stcb) {
3355 sq->tcb = NULL;
3359 if (inp->sctp_tcb_at_block == (void *)stcb) {
3360 inp->error_on_block = ECONNRESET;
3363 if (inp->sctp_tcbhash) {
3364 LIST_REMOVE(stcb, sctp_tcbhash);
3366 /* Now lets remove it from the list of ALL associations in the EP */
3367 LIST_REMOVE(stcb, sctp_tcblist);
3368 SCTP_INP_WUNLOCK(inp);
3369 SCTP_ITERATOR_UNLOCK();
3372 /* pull from vtag hash */
3373 LIST_REMOVE(stcb, sctp_asocs);
3376 * Now before we can free the assoc, we must remove all of the
3377 * networks and any other allocated space.. i.e. add removes here
3378 * before the SCTP_ZONE_FREE() of the tasoc entry.
3381 sctp_add_vtag_to_timewait(inp, asoc->my_vtag);
3382 SCTP_INP_INFO_WUNLOCK();
3383 prev = NULL;
3384 while (!TAILQ_EMPTY(&asoc->nets)) {
3385 net = TAILQ_FIRST(&asoc->nets);
3386 /* pull from list */
3387 if ((sctppcbinfo.ipi_count_raddr == 0) || (prev == net)) {
3388 break;
3390 prev = net;
3391 TAILQ_REMOVE(&asoc->nets, net, sctp_next);
3392 /* free it */
3393 net->ref_count = 0;
3394 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_net, net);
3395 sctppcbinfo.ipi_count_raddr--;
3398 * The chunk lists and such SHOULD be empty but we check them
3399 * just in case.
3401 /* anything on the wheel needs to be removed */
3402 while (!TAILQ_EMPTY(&asoc->out_wheel)) {
3403 struct sctp_stream_out *outs;
3404 outs = TAILQ_FIRST(&asoc->out_wheel);
3405 TAILQ_REMOVE(&asoc->out_wheel, outs, next_spoke);
3406 /* now clean up any chunks here */
3407 chk = TAILQ_FIRST(&outs->outqueue);
3408 while (chk) {
3409 TAILQ_REMOVE(&outs->outqueue, chk, sctp_next);
3410 if (chk->data) {
3411 sctp_m_freem(chk->data);
3412 chk->data = NULL;
3414 chk->whoTo = NULL;
3415 chk->asoc = NULL;
3416 /* Free the chunk */
3417 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
3418 sctppcbinfo.ipi_count_chunk--;
3419 sctppcbinfo.ipi_gencnt_chunk++;
3420 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
3421 panic("Chunk count is negative");
3423 chk = TAILQ_FIRST(&outs->outqueue);
3425 outs = TAILQ_FIRST(&asoc->out_wheel);
3428 if (asoc->pending_reply) {
3429 FREE(asoc->pending_reply, M_PCB);
3430 asoc->pending_reply = NULL;
3432 chk = TAILQ_FIRST(&asoc->pending_reply_queue);
3433 while (chk) {
3434 TAILQ_REMOVE(&asoc->pending_reply_queue, chk, sctp_next);
3435 if (chk->data) {
3436 sctp_m_freem(chk->data);
3437 chk->data = NULL;
3439 chk->whoTo = NULL;
3440 chk->asoc = NULL;
3441 /* Free the chunk */
3442 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
3443 sctppcbinfo.ipi_count_chunk--;
3444 sctppcbinfo.ipi_gencnt_chunk++;
3445 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
3446 panic("Chunk count is negative");
3448 chk = TAILQ_FIRST(&asoc->pending_reply_queue);
3450 /* pending send queue SHOULD be empty */
3451 if (!TAILQ_EMPTY(&asoc->send_queue)) {
3452 chk = TAILQ_FIRST(&asoc->send_queue);
3453 while (chk) {
3454 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3455 if (chk->data) {
3456 sctp_m_freem(chk->data);
3457 chk->data = NULL;
3459 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
3460 sctppcbinfo.ipi_count_chunk--;
3461 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
3462 panic("Chunk count is negative");
3464 sctppcbinfo.ipi_gencnt_chunk++;
3465 chk = TAILQ_FIRST(&asoc->send_queue);
3468 /* sent queue SHOULD be empty */
3469 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3470 chk = TAILQ_FIRST(&asoc->sent_queue);
3471 while (chk) {
3472 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3473 if (chk->data) {
3474 sctp_m_freem(chk->data);
3475 chk->data = NULL;
3477 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
3478 sctppcbinfo.ipi_count_chunk--;
3479 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
3480 panic("Chunk count is negative");
3482 sctppcbinfo.ipi_gencnt_chunk++;
3483 chk = TAILQ_FIRST(&asoc->sent_queue);
3486 /* control queue MAY not be empty */
3487 if (!TAILQ_EMPTY(&asoc->control_send_queue)) {
3488 chk = TAILQ_FIRST(&asoc->control_send_queue);
3489 while (chk) {
3490 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
3491 if (chk->data) {
3492 sctp_m_freem(chk->data);
3493 chk->data = NULL;
3495 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
3496 sctppcbinfo.ipi_count_chunk--;
3497 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
3498 panic("Chunk count is negative");
3500 sctppcbinfo.ipi_gencnt_chunk++;
3501 chk = TAILQ_FIRST(&asoc->control_send_queue);
3504 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
3505 chk = TAILQ_FIRST(&asoc->reasmqueue);
3506 while (chk) {
3507 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
3508 if (chk->data) {
3509 sctp_m_freem(chk->data);
3510 chk->data = NULL;
3512 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
3513 sctppcbinfo.ipi_count_chunk--;
3514 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
3515 panic("Chunk count is negative");
3517 sctppcbinfo.ipi_gencnt_chunk++;
3518 chk = TAILQ_FIRST(&asoc->reasmqueue);
3521 if (!TAILQ_EMPTY(&asoc->delivery_queue)) {
3522 chk = TAILQ_FIRST(&asoc->delivery_queue);
3523 while (chk) {
3524 TAILQ_REMOVE(&asoc->delivery_queue, chk, sctp_next);
3525 if (chk->data) {
3526 sctp_m_freem(chk->data);
3527 chk->data = NULL;
3529 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
3530 sctppcbinfo.ipi_count_chunk--;
3531 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
3532 panic("Chunk count is negative");
3534 sctppcbinfo.ipi_gencnt_chunk++;
3535 chk = TAILQ_FIRST(&asoc->delivery_queue);
3538 if (asoc->mapping_array) {
3539 FREE(asoc->mapping_array, M_PCB);
3540 asoc->mapping_array = NULL;
3543 /* the stream outs */
3544 if (asoc->strmout) {
3545 FREE(asoc->strmout, M_PCB);
3546 asoc->strmout = NULL;
3548 asoc->streamoutcnt = 0;
3549 if (asoc->strmin) {
3550 int i;
3551 for (i = 0; i < asoc->streamincnt; i++) {
3552 if (!TAILQ_EMPTY(&asoc->strmin[i].inqueue)) {
3553 /* We have somethings on the streamin queue */
3554 chk = TAILQ_FIRST(&asoc->strmin[i].inqueue);
3555 while (chk) {
3556 TAILQ_REMOVE(&asoc->strmin[i].inqueue,
3557 chk, sctp_next);
3558 if (chk->data) {
3559 sctp_m_freem(chk->data);
3560 chk->data = NULL;
3562 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk,
3563 chk);
3564 sctppcbinfo.ipi_count_chunk--;
3565 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
3566 panic("Chunk count is negative");
3568 sctppcbinfo.ipi_gencnt_chunk++;
3569 chk = TAILQ_FIRST(&asoc->strmin[i].inqueue);
3573 FREE(asoc->strmin, M_PCB);
3574 asoc->strmin = NULL;
3576 asoc->streamincnt = 0;
3577 /* local addresses, if any */
3578 while (!LIST_EMPTY(&asoc->sctp_local_addr_list)) {
3579 laddr = LIST_FIRST(&asoc->sctp_local_addr_list);
3580 LIST_REMOVE(laddr, sctp_nxt_addr);
3581 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_laddr, laddr);
3582 sctppcbinfo.ipi_count_laddr--;
3584 /* pending asconf (address) parameters */
3585 while (!TAILQ_EMPTY(&asoc->asconf_queue)) {
3586 aparam = TAILQ_FIRST(&asoc->asconf_queue);
3587 TAILQ_REMOVE(&asoc->asconf_queue, aparam, next);
3588 FREE(aparam, M_PCB);
3590 if (asoc->last_asconf_ack_sent != NULL) {
3591 sctp_m_freem(asoc->last_asconf_ack_sent);
3592 asoc->last_asconf_ack_sent = NULL;
3594 /* Insert new items here :> */
3596 /* Get rid of LOCK */
3597 SCTP_TCB_LOCK_DESTROY(stcb);
3599 /* now clean up the tasoc itself */
3600 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_asoc, stcb);
3601 sctppcbinfo.ipi_count_asoc--;
3602 if ((inp->sctp_socket->so_snd.ssb_cc) ||
3603 (inp->sctp_socket->so_snd.ssb_mbcnt)) {
3604 /* This will happen when a abort is done */
3605 inp->sctp_socket->so_snd.ssb_cc = 0;
3606 inp->sctp_socket->so_snd.ssb_mbcnt = 0;
3608 if (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
3609 if ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) {
3610 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
3612 * For the base fd, that is NOT in TCP pool we
3613 * turn off the connected flag. This allows
3614 * non-listening endpoints to connect/shutdown/
3615 * connect.
3617 inp->sctp_flags &= ~SCTP_PCB_FLAGS_CONNECTED;
3618 soisdisconnected(inp->sctp_socket);
3621 * For those that are in the TCP pool we just leave
3622 * so it cannot be used. When they close the fd we
3623 * will free it all.
3627 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3628 sctp_inpcb_free(inp, 0);
3630 crit_exit();
3635 * determine if a destination is "reachable" based upon the addresses
3636 * bound to the current endpoint (e.g. only v4 or v6 currently bound)
3639 * FIX: if we allow assoc-level bindx(), then this needs to be fixed
3640 * to use assoc level v4/v6 flags, as the assoc *may* not have the
3641 * same address types bound as its endpoint
3644 sctp_destination_is_reachable(struct sctp_tcb *stcb, struct sockaddr *destaddr)
3646 struct sctp_inpcb *inp;
3647 int answer;
3649 /* No locks here, the TCB, in all cases is already
3650 * locked and an assoc is up. There is either a
3651 * INP lock by the caller applied (in asconf case when
3652 * deleting an address) or NOT in the HB case, however
3653 * if HB then the INP increment is up and the INP
3654 * will not be removed (on top of the fact that
3655 * we have a TCB lock). So we only want to
3656 * read the sctp_flags, which is either bound-all
3657 * or not.. no protection needed since once an
3658 * assoc is up you can't be changing your binding.
3660 inp = stcb->sctp_ep;
3661 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
3662 /* if bound all, destination is not restricted */
3663 /* RRS: Question during lock work: Is this
3664 * correct? If you are bound-all you still
3665 * might need to obey the V4--V6 flags???
3666 * IMO this bound-all stuff needs to be removed!
3668 return (1);
3670 /* NOTE: all "scope" checks are done when local addresses are added */
3671 if (destaddr->sa_family == AF_INET6) {
3672 #if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__))
3673 answer = inp->inp_vflag & INP_IPV6;
3674 #else
3675 answer = inp->ip_inp.inp.inp_vflag & INP_IPV6;
3676 #endif
3677 } else if (destaddr->sa_family == AF_INET) {
3678 #if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__))
3679 answer = inp->inp_vflag & INP_IPV4;
3680 #else
3681 answer = inp->ip_inp.inp.inp_vflag & INP_IPV4;
3682 #endif
3683 } else {
3684 /* invalid family, so it's unreachable */
3685 answer = 0;
3687 return (answer);
3691 * update the inp_vflags on an endpoint
3693 static void
3694 sctp_update_ep_vflag(struct sctp_inpcb *inp) {
3695 struct sctp_laddr *laddr;
3697 /* first clear the flag */
3698 #if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__))
3699 inp->inp_vflag = 0;
3700 #else
3701 inp->ip_inp.inp.inp_vflag = 0;
3702 #endif
3703 /* set the flag based on addresses on the ep list */
3704 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
3705 if (laddr->ifa == NULL) {
3706 #ifdef SCTP_DEBUG
3707 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
3708 kprintf("An ounce of prevention is worth a pound of cure\n");
3710 #endif /* SCTP_DEBUG */
3711 continue;
3713 if (laddr->ifa->ifa_addr) {
3714 continue;
3716 if (laddr->ifa->ifa_addr->sa_family == AF_INET6) {
3717 #if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__))
3718 inp->inp_vflag |= INP_IPV6;
3719 #else
3720 inp->ip_inp.inp.inp_vflag |= INP_IPV6;
3721 #endif
3722 } else if (laddr->ifa->ifa_addr->sa_family == AF_INET) {
3723 #if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__))
3724 inp->inp_vflag |= INP_IPV4;
3725 #else
3726 inp->ip_inp.inp.inp_vflag |= INP_IPV4;
3727 #endif
3733 * Add the address to the endpoint local address list
3734 * There is nothing to be done if we are bound to all addresses
3737 sctp_add_local_addr_ep(struct sctp_inpcb *inp, struct ifaddr *ifa)
3739 struct sctp_laddr *laddr;
3740 int fnd, error;
3741 fnd = 0;
3743 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
3744 /* You are already bound to all. You have it already */
3745 return (0);
3747 if (ifa->ifa_addr->sa_family == AF_INET6) {
3748 struct in6_ifaddr *ifa6;
3749 ifa6 = (struct in6_ifaddr *)ifa;
3750 if (ifa6->ia6_flags & (IN6_IFF_DETACHED |
3751 IN6_IFF_DEPRECATED | IN6_IFF_ANYCAST | IN6_IFF_NOTREADY))
3752 /* Can't bind a non-existent addr. */
3753 return (-1);
3755 /* first, is it already present? */
3756 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
3757 if (laddr->ifa == ifa) {
3758 fnd = 1;
3759 break;
3763 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) && (fnd == 0)) {
3764 /* Not bound to all */
3765 error = sctp_insert_laddr(&inp->sctp_addr_list, ifa);
3766 if (error != 0)
3767 return (error);
3768 inp->laddr_count++;
3769 /* update inp_vflag flags */
3770 if (ifa->ifa_addr->sa_family == AF_INET6) {
3771 #if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__))
3772 inp->inp_vflag |= INP_IPV6;
3773 #else
3774 inp->ip_inp.inp.inp_vflag |= INP_IPV6;
3775 #endif
3776 } else if (ifa->ifa_addr->sa_family == AF_INET) {
3777 #if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__))
3778 inp->inp_vflag |= INP_IPV4;
3779 #else
3780 inp->ip_inp.inp.inp_vflag |= INP_IPV4;
3781 #endif
3784 return (0);
3789 * select a new (hopefully reachable) destination net
3790 * (should only be used when we deleted an ep addr that is the
3791 * only usable source address to reach the destination net)
3793 static void
3794 sctp_select_primary_destination(struct sctp_tcb *stcb)
3796 struct sctp_nets *net;
3798 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
3799 /* for now, we'll just pick the first reachable one we find */
3800 if (net->dest_state & SCTP_ADDR_UNCONFIRMED)
3801 continue;
3802 if (sctp_destination_is_reachable(stcb,
3803 (struct sockaddr *)&net->ro._l_addr)) {
3804 /* found a reachable destination */
3805 stcb->asoc.primary_destination = net;
3808 /* I can't there from here! ...we're gonna die shortly... */
3813 * Delete the address from the endpoint local address list
3814 * There is nothing to be done if we are bound to all addresses
3817 sctp_del_local_addr_ep(struct sctp_inpcb *inp, struct ifaddr *ifa)
3819 struct sctp_laddr *laddr;
3820 int fnd;
3821 fnd = 0;
3822 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
3823 /* You are already bound to all. You have it already */
3824 return (EINVAL);
3827 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
3828 if (laddr->ifa == ifa) {
3829 fnd = 1;
3830 break;
3833 if (fnd && (inp->laddr_count < 2)) {
3834 /* can't delete unless there are at LEAST 2 addresses */
3835 return (-1);
3837 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) && (fnd)) {
3839 * clean up any use of this address
3840 * go through our associations and clear any
3841 * last_used_address that match this one
3842 * for each assoc, see if a new primary_destination is needed
3844 struct sctp_tcb *stcb;
3846 /* clean up "next_addr_touse" */
3847 if (inp->next_addr_touse == laddr)
3848 /* delete this address */
3849 inp->next_addr_touse = NULL;
3851 /* clean up "last_used_address" */
3852 LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
3853 if (stcb->asoc.last_used_address == laddr)
3854 /* delete this address */
3855 stcb->asoc.last_used_address = NULL;
3856 } /* for each tcb */
3858 /* remove it from the ep list */
3859 sctp_remove_laddr(laddr);
3860 inp->laddr_count--;
3861 /* update inp_vflag flags */
3862 sctp_update_ep_vflag(inp);
3863 /* select a new primary destination if needed */
3864 LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
3865 /* presume caller (sctp_asconf.c) already owns INP lock */
3866 SCTP_TCB_LOCK(stcb);
3867 if (sctp_destination_is_reachable(stcb,
3868 (struct sockaddr *)&stcb->asoc.primary_destination->ro._l_addr) == 0) {
3869 sctp_select_primary_destination(stcb);
3871 SCTP_TCB_UNLOCK(stcb);
3872 } /* for each tcb */
3874 return (0);
3878 * Add the addr to the TCB local address list
3879 * For the BOUNDALL or dynamic case, this is a "pending" address list
3880 * (eg. addresses waiting for an ASCONF-ACK response)
3881 * For the subset binding, static case, this is a "valid" address list
3884 sctp_add_local_addr_assoc(struct sctp_tcb *stcb, struct ifaddr *ifa)
3886 struct sctp_inpcb *inp;
3887 struct sctp_laddr *laddr;
3888 int error;
3890 /* Assumes TCP is locked.. and possiblye
3891 * the INP. May need to confirm/fix that if
3892 * we need it and is not the case.
3894 inp = stcb->sctp_ep;
3895 if (ifa->ifa_addr->sa_family == AF_INET6) {
3896 struct in6_ifaddr *ifa6;
3897 ifa6 = (struct in6_ifaddr *)ifa;
3898 if (ifa6->ia6_flags & (IN6_IFF_DETACHED |
3899 /* IN6_IFF_DEPRECATED | */
3900 IN6_IFF_ANYCAST |
3901 IN6_IFF_NOTREADY))
3902 /* Can't bind a non-existent addr. */
3903 return (-1);
3905 /* does the address already exist? */
3906 LIST_FOREACH(laddr, &stcb->asoc.sctp_local_addr_list, sctp_nxt_addr) {
3907 if (laddr->ifa == ifa) {
3908 return (-1);
3912 /* add to the list */
3913 error = sctp_insert_laddr(&stcb->asoc.sctp_local_addr_list, ifa);
3914 if (error != 0)
3915 return (error);
3916 return (0);
3920 * insert an laddr entry with the given ifa for the desired list
3923 sctp_insert_laddr(struct sctpladdr *list, struct ifaddr *ifa) {
3924 struct sctp_laddr *laddr;
3926 crit_enter();
3927 laddr = (struct sctp_laddr *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_laddr);
3928 if (laddr == NULL) {
3929 /* out of memory? */
3930 crit_exit();
3931 return (EINVAL);
3933 sctppcbinfo.ipi_count_laddr++;
3934 sctppcbinfo.ipi_gencnt_laddr++;
3935 bzero(laddr, sizeof(*laddr));
3936 laddr->ifa = ifa;
3937 /* insert it */
3938 LIST_INSERT_HEAD(list, laddr, sctp_nxt_addr);
3940 crit_exit();
3941 return (0);
3945 * Remove an laddr entry from the local address list (on an assoc)
3947 void
3948 sctp_remove_laddr(struct sctp_laddr *laddr)
3950 crit_enter();
3951 /* remove from the list */
3952 LIST_REMOVE(laddr, sctp_nxt_addr);
3953 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_laddr, laddr);
3954 sctppcbinfo.ipi_count_laddr--;
3955 sctppcbinfo.ipi_gencnt_laddr++;
3956 crit_exit();
3960 * Remove an address from the TCB local address list
3963 sctp_del_local_addr_assoc(struct sctp_tcb *stcb, struct ifaddr *ifa)
3965 struct sctp_inpcb *inp;
3966 struct sctp_laddr *laddr;
3968 /* This is called by asconf work. It is assumed that
3969 * a) The TCB is locked
3970 * and
3971 * b) The INP is locked.
3972 * This is true in as much as I can trace through
3973 * the entry asconf code where I did these locks.
3974 * Again, the ASCONF code is a bit different in
3975 * that it does lock the INP during its work often
3976 * times. This must be since we don't want other
3977 * proc's looking up things while what they are
3978 * looking up is changing :-D
3981 inp = stcb->sctp_ep;
3982 /* if subset bound and don't allow ASCONF's, can't delete last */
3983 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) &&
3984 ((inp->sctp_flags & SCTP_PCB_FLAGS_DO_ASCONF) == 0)) {
3985 if (stcb->asoc.numnets < 2) {
3986 /* can't delete last address */
3987 return (-1);
3991 LIST_FOREACH(laddr, &stcb->asoc.sctp_local_addr_list, sctp_nxt_addr) {
3992 /* remove the address if it exists */
3993 if (laddr->ifa == NULL)
3994 continue;
3995 if (laddr->ifa == ifa) {
3996 sctp_remove_laddr(laddr);
3997 return (0);
4001 /* address not found! */
4002 return (-1);
4006 * Remove an address from the TCB local address list
4007 * lookup using a sockaddr addr
4010 sctp_del_local_addr_assoc_sa(struct sctp_tcb *stcb, struct sockaddr *sa)
4012 struct sctp_inpcb *inp;
4013 struct sctp_laddr *laddr;
4014 struct sockaddr *l_sa;
4017 * This function I find does not seem to have a caller.
4018 * As such we NEED TO DELETE this code. If we do
4019 * find a caller, the caller MUST have locked the TCB
4020 * at the least and probably the INP as well.
4022 inp = stcb->sctp_ep;
4023 /* if subset bound and don't allow ASCONF's, can't delete last */
4024 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) &&
4025 ((inp->sctp_flags & SCTP_PCB_FLAGS_DO_ASCONF) == 0)) {
4026 if (stcb->asoc.numnets < 2) {
4027 /* can't delete last address */
4028 return (-1);
4032 LIST_FOREACH(laddr, &stcb->asoc.sctp_local_addr_list, sctp_nxt_addr) {
4033 /* make sure the address exists */
4034 if (laddr->ifa == NULL)
4035 continue;
4036 if (laddr->ifa->ifa_addr == NULL)
4037 continue;
4039 l_sa = laddr->ifa->ifa_addr;
4040 if (l_sa->sa_family == AF_INET6) {
4041 /* IPv6 address */
4042 struct sockaddr_in6 *sin1, *sin2;
4043 sin1 = (struct sockaddr_in6 *)l_sa;
4044 sin2 = (struct sockaddr_in6 *)sa;
4045 if (memcmp(&sin1->sin6_addr, &sin2->sin6_addr,
4046 sizeof(struct in6_addr)) == 0) {
4047 /* matched */
4048 sctp_remove_laddr(laddr);
4049 return (0);
4051 } else if (l_sa->sa_family == AF_INET) {
4052 /* IPv4 address */
4053 struct sockaddr_in *sin1, *sin2;
4054 sin1 = (struct sockaddr_in *)l_sa;
4055 sin2 = (struct sockaddr_in *)sa;
4056 if (sin1->sin_addr.s_addr == sin2->sin_addr.s_addr) {
4057 /* matched */
4058 sctp_remove_laddr(laddr);
4059 return (0);
4061 } else {
4062 /* invalid family */
4063 return (-1);
4065 } /* end foreach */
4066 /* address not found! */
4067 return (-1);
4070 static char sctp_pcb_initialized = 0;
4072 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
4073 /* sysctl */
4074 static int sctp_max_number_of_assoc = SCTP_MAX_NUM_OF_ASOC;
4075 static int sctp_scale_up_for_address = SCTP_SCALE_FOR_ADDR;
4077 #endif /* FreeBSD || APPLE || DragonFly */
4079 #ifndef SCTP_TCBHASHSIZE
4080 #define SCTP_TCBHASHSIZE 1024
4081 #endif
4083 #ifndef SCTP_CHUNKQUEUE_SCALE
4084 #define SCTP_CHUNKQUEUE_SCALE 10
4085 #endif
4087 void
4088 sctp_pcb_init(void)
4091 * SCTP initialization for the PCB structures
4092 * should be called by the sctp_init() funciton.
4094 int i;
4095 int hashtblsize = SCTP_TCBHASHSIZE;
4097 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
4098 int sctp_chunkscale = SCTP_CHUNKQUEUE_SCALE;
4099 #endif
4101 if (sctp_pcb_initialized != 0) {
4102 /* error I was called twice */
4103 return;
4105 sctp_pcb_initialized = 1;
4107 /* Init all peg counts */
4108 for (i = 0; i < SCTP_NUMBER_OF_PEGS; i++) {
4109 sctp_pegs[i] = 0;
4112 /* init the empty list of (All) Endpoints */
4113 LIST_INIT(&sctppcbinfo.listhead);
4115 /* init the iterator head */
4116 LIST_INIT(&sctppcbinfo.iteratorhead);
4118 /* init the hash table of endpoints */
4119 #if defined(__FreeBSD__)
4120 #if defined(__FreeBSD_cc_version) && __FreeBSD_cc_version >= 440000
4121 TUNABLE_INT_FETCH("net.inet.sctp.tcbhashsize", &hashtblsize);
4122 TUNABLE_INT_FETCH("net.inet.sctp.pcbhashsize", &sctp_pcbtblsize);
4123 TUNABLE_INT_FETCH("net.inet.sctp.chunkscale", &sctp_chunkscale);
4124 #else
4125 TUNABLE_INT_FETCH("net.inet.sctp.tcbhashsize", SCTP_TCBHASHSIZE,
4126 hashtblsize);
4127 TUNABLE_INT_FETCH("net.inet.sctp.pcbhashsize", SCTP_PCBHASHSIZE,
4128 sctp_pcbtblsize);
4129 TUNABLE_INT_FETCH("net.inet.sctp.chunkscale", SCTP_CHUNKQUEUE_SCALE,
4130 sctp_chunkscale);
4131 #endif
4132 #endif
4134 sctppcbinfo.sctp_asochash = hashinit((hashtblsize * 31),
4135 #ifdef __NetBSD__
4136 HASH_LIST,
4137 #endif
4138 M_PCB,
4139 #if defined(__NetBSD__) || defined(__OpenBSD__)
4140 M_WAITOK,
4141 #endif
4142 &sctppcbinfo.hashasocmark);
4144 sctppcbinfo.sctp_ephash = hashinit(hashtblsize,
4145 #ifdef __NetBSD__
4146 HASH_LIST,
4147 #endif
4148 M_PCB,
4149 #if defined(__NetBSD__) || defined(__OpenBSD__)
4150 M_WAITOK,
4151 #endif
4152 &sctppcbinfo.hashmark);
4154 sctppcbinfo.sctp_tcpephash = hashinit(hashtblsize,
4155 #ifdef __NetBSD__
4156 HASH_LIST,
4157 #endif
4158 M_PCB,
4159 #if defined(__NetBSD__) || defined(__OpenBSD__)
4160 M_WAITOK,
4161 #endif
4162 &sctppcbinfo.hashtcpmark);
4164 sctppcbinfo.hashtblsize = hashtblsize;
4166 /* init the zones */
4168 * FIX ME: Should check for NULL returns, but if it does fail we
4169 * are doomed to panic anyways... add later maybe.
4171 SCTP_ZONE_INIT(sctppcbinfo.ipi_zone_ep, "sctp_ep",
4172 sizeof(struct sctp_inpcb), maxsockets);
4174 SCTP_ZONE_INIT(sctppcbinfo.ipi_zone_asoc, "sctp_asoc",
4175 sizeof(struct sctp_tcb), sctp_max_number_of_assoc);
4177 SCTP_ZONE_INIT(sctppcbinfo.ipi_zone_laddr, "sctp_laddr",
4178 sizeof(struct sctp_laddr),
4179 (sctp_max_number_of_assoc * sctp_scale_up_for_address));
4181 SCTP_ZONE_INIT(sctppcbinfo.ipi_zone_net, "sctp_raddr",
4182 sizeof(struct sctp_nets),
4183 (sctp_max_number_of_assoc * sctp_scale_up_for_address));
4185 SCTP_ZONE_INIT(sctppcbinfo.ipi_zone_chunk, "sctp_chunk",
4186 sizeof(struct sctp_tmit_chunk),
4187 (sctp_max_number_of_assoc * sctp_scale_up_for_address *
4188 sctp_chunkscale));
4190 SCTP_ZONE_INIT(sctppcbinfo.ipi_zone_sockq, "sctp_sockq",
4191 sizeof(struct sctp_socket_q_list),
4192 (sctp_max_number_of_assoc * sctp_scale_up_for_address *
4193 sctp_chunkscale));
4195 /* Master Lock INIT for info structure */
4196 SCTP_INP_INFO_LOCK_INIT();
4197 SCTP_ITERATOR_LOCK_INIT();
4198 /* not sure if we need all the counts */
4199 sctppcbinfo.ipi_count_ep = 0;
4200 sctppcbinfo.ipi_gencnt_ep = 0;
4201 /* assoc/tcb zone info */
4202 sctppcbinfo.ipi_count_asoc = 0;
4203 sctppcbinfo.ipi_gencnt_asoc = 0;
4204 /* local addrlist zone info */
4205 sctppcbinfo.ipi_count_laddr = 0;
4206 sctppcbinfo.ipi_gencnt_laddr = 0;
4207 /* remote addrlist zone info */
4208 sctppcbinfo.ipi_count_raddr = 0;
4209 sctppcbinfo.ipi_gencnt_raddr = 0;
4210 /* chunk info */
4211 sctppcbinfo.ipi_count_chunk = 0;
4212 sctppcbinfo.ipi_gencnt_chunk = 0;
4214 /* socket queue zone info */
4215 sctppcbinfo.ipi_count_sockq = 0;
4216 sctppcbinfo.ipi_gencnt_sockq = 0;
4218 /* mbuf tracker */
4219 sctppcbinfo.mbuf_track = 0;
4220 /* port stuff */
4221 #if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__APPLE__) \
4222 || defined(__DragonFly__)
4223 sctppcbinfo.lastlow = ipport_firstauto;
4224 #else
4225 sctppcbinfo.lastlow = anonportmin;
4226 #endif
4227 /* Init the TIMEWAIT list */
4228 for (i = 0; i < SCTP_STACK_VTAG_HASH_SIZE; i++) {
4229 LIST_INIT(&sctppcbinfo.vtag_timewait[i]);
4232 #if defined(_SCTP_NEEDS_CALLOUT_) && !defined(__APPLE__)
4233 TAILQ_INIT(&sctppcbinfo.callqueue);
4234 #endif
4239 sctp_load_addresses_from_init(struct sctp_tcb *stcb, struct mbuf *m,
4240 int iphlen, int offset, int limit, struct sctphdr *sh,
4241 struct sockaddr *altsa)
4244 * grub through the INIT pulling addresses and
4245 * loading them to the nets structure in the asoc.
4246 * The from address in the mbuf should also be loaded
4247 * (if it is not already). This routine can be called
4248 * with either INIT or INIT-ACK's as long as the
4249 * m points to the IP packet and the offset points
4250 * to the beginning of the parameters.
4252 struct sctp_inpcb *inp, *l_inp;
4253 struct sctp_nets *net, *net_tmp;
4254 struct ip *iph;
4255 struct sctp_paramhdr *phdr, parm_buf;
4256 struct sctp_tcb *stcb_tmp;
4257 u_int16_t ptype, plen;
4258 struct sockaddr *sa;
4259 struct sockaddr_storage dest_store;
4260 struct sockaddr *local_sa = (struct sockaddr *)&dest_store;
4261 struct sockaddr_in sin;
4262 struct sockaddr_in6 sin6;
4264 /* First get the destination address setup too. */
4265 memset(&sin, 0, sizeof(sin));
4266 memset(&sin6, 0, sizeof(sin6));
4268 sin.sin_family = AF_INET;
4269 sin.sin_len = sizeof(sin);
4270 sin.sin_port = stcb->rport;
4272 sin6.sin6_family = AF_INET6;
4273 sin6.sin6_len = sizeof(struct sockaddr_in6);
4274 sin6.sin6_port = stcb->rport;
4275 if (altsa == NULL) {
4276 iph = mtod(m, struct ip *);
4277 if (iph->ip_v == IPVERSION) {
4278 /* its IPv4 */
4279 struct sockaddr_in *sin_2;
4280 sin_2 = (struct sockaddr_in *)(local_sa);
4281 memset(sin_2, 0, sizeof(sin));
4282 sin_2->sin_family = AF_INET;
4283 sin_2->sin_len = sizeof(sin);
4284 sin_2->sin_port = sh->dest_port;
4285 sin_2->sin_addr.s_addr = iph->ip_dst.s_addr ;
4286 sin.sin_addr = iph->ip_src;
4287 sa = (struct sockaddr *)&sin;
4288 } else if (iph->ip_v == (IPV6_VERSION >> 4)) {
4289 /* its IPv6 */
4290 struct ip6_hdr *ip6;
4291 struct sockaddr_in6 *sin6_2;
4293 ip6 = mtod(m, struct ip6_hdr *);
4294 sin6_2 = (struct sockaddr_in6 *)(local_sa);
4295 memset(sin6_2, 0, sizeof(sin6));
4296 sin6_2->sin6_family = AF_INET6;
4297 sin6_2->sin6_len = sizeof(struct sockaddr_in6);
4298 sin6_2->sin6_port = sh->dest_port;
4299 sin6.sin6_addr = ip6->ip6_src;
4300 sa = (struct sockaddr *)&sin6;
4301 } else {
4302 sa = NULL;
4304 } else {
4306 * For cookies we use the src address NOT from the packet
4307 * but from the original INIT
4309 sa = altsa;
4311 /* Turn off ECN until we get through all params */
4312 stcb->asoc.ecn_allowed = 0;
4314 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
4315 /* mark all addresses that we have currently on the list */
4316 net->dest_state |= SCTP_ADDR_NOT_IN_ASSOC;
4318 /* does the source address already exist? if so skip it */
4319 l_inp = inp = stcb->sctp_ep;
4320 stcb_tmp = sctp_findassociation_ep_addr(&inp, sa, &net_tmp, local_sa, stcb);
4321 if ((stcb_tmp == NULL && inp == stcb->sctp_ep) || inp == NULL) {
4322 /* we must add the source address */
4323 /* no scope set here since we have a tcb already. */
4324 if ((sa->sa_family == AF_INET) &&
4325 (stcb->asoc.ipv4_addr_legal)) {
4326 if (sctp_add_remote_addr(stcb, sa, 0, 2)) {
4327 return (-1);
4329 } else if ((sa->sa_family == AF_INET6) &&
4330 (stcb->asoc.ipv6_addr_legal)) {
4331 if (sctp_add_remote_addr(stcb, sa, 0, 3)) {
4332 return (-1);
4335 } else {
4336 if (net_tmp != NULL && stcb_tmp == stcb) {
4337 net_tmp->dest_state &= ~SCTP_ADDR_NOT_IN_ASSOC;
4338 } else if (stcb_tmp != stcb) {
4339 /* It belongs to another association? */
4340 return (-1);
4343 /* since a unlock occured we must check the
4344 * TCB's state and the pcb's gone flags.
4346 if (l_inp->sctp_flags & (SCTP_PCB_FLAGS_SOCKET_GONE|SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
4347 /* the user freed the ep */
4348 return (-1);
4350 if (stcb->asoc.state == 0) {
4351 /* the assoc was freed? */
4352 return (-1);
4355 /* now we must go through each of the params. */
4356 phdr = sctp_get_next_param(m, offset, &parm_buf, sizeof(parm_buf));
4357 while (phdr) {
4358 ptype = ntohs(phdr->param_type);
4359 plen = ntohs(phdr->param_length);
4360 /*kprintf("ptype => %d, plen => %d\n", ptype, plen);*/
4361 if (offset + plen > limit) {
4362 break;
4364 if (plen == 0) {
4365 break;
4367 if ((ptype == SCTP_IPV4_ADDRESS) &&
4368 (stcb->asoc.ipv4_addr_legal)) {
4369 struct sctp_ipv4addr_param *p4, p4_buf;
4370 /* ok get the v4 address and check/add */
4371 phdr = sctp_get_next_param(m, offset,
4372 (struct sctp_paramhdr *)&p4_buf, sizeof(p4_buf));
4373 if (plen != sizeof(struct sctp_ipv4addr_param) ||
4374 phdr == NULL) {
4375 return (-1);
4377 p4 = (struct sctp_ipv4addr_param *)phdr;
4378 sin.sin_addr.s_addr = p4->addr;
4379 sa = (struct sockaddr *)&sin;
4380 inp = stcb->sctp_ep;
4381 stcb_tmp = sctp_findassociation_ep_addr(&inp, sa, &net,
4382 local_sa, stcb);
4384 if ((stcb_tmp== NULL && inp == stcb->sctp_ep) ||
4385 inp == NULL) {
4386 /* we must add the source address */
4387 /* no scope set since we have a tcb already */
4389 /* we must validate the state again here */
4390 if (l_inp->sctp_flags & (SCTP_PCB_FLAGS_SOCKET_GONE|SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
4391 /* the user freed the ep */
4392 return (-1);
4394 if (stcb->asoc.state == 0) {
4395 /* the assoc was freed? */
4396 return (-1);
4398 if (sctp_add_remote_addr(stcb, sa, 0, 4)) {
4399 return (-1);
4401 } else if (stcb_tmp == stcb) {
4402 if (l_inp->sctp_flags & (SCTP_PCB_FLAGS_SOCKET_GONE|SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
4403 /* the user freed the ep */
4404 return (-1);
4406 if (stcb->asoc.state == 0) {
4407 /* the assoc was freed? */
4408 return (-1);
4410 if (net != NULL) {
4411 /* clear flag */
4412 net->dest_state &=
4413 ~SCTP_ADDR_NOT_IN_ASSOC;
4415 } else {
4416 /* strange, address is in another assoc?
4417 * straighten out locks.
4419 SCTP_TCB_UNLOCK(stcb_tmp);
4420 SCTP_INP_RLOCK(inp);
4421 if (l_inp->sctp_flags & (SCTP_PCB_FLAGS_SOCKET_GONE|SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
4422 /* the user freed the ep */
4423 SCTP_INP_RUNLOCK(l_inp);
4424 return (-1);
4426 if (stcb->asoc.state == 0) {
4427 /* the assoc was freed? */
4428 SCTP_INP_RUNLOCK(l_inp);
4429 return (-1);
4431 SCTP_TCB_LOCK(stcb);
4432 SCTP_INP_RUNLOCK(stcb->sctp_ep);
4433 return (-1);
4435 } else if ((ptype == SCTP_IPV6_ADDRESS) &&
4436 (stcb->asoc.ipv6_addr_legal)) {
4437 /* ok get the v6 address and check/add */
4438 struct sctp_ipv6addr_param *p6, p6_buf;
4439 phdr = sctp_get_next_param(m, offset,
4440 (struct sctp_paramhdr *)&p6_buf, sizeof(p6_buf));
4441 if (plen != sizeof(struct sctp_ipv6addr_param) ||
4442 phdr == NULL) {
4443 return (-1);
4445 p6 = (struct sctp_ipv6addr_param *)phdr;
4446 memcpy((caddr_t)&sin6.sin6_addr, p6->addr,
4447 sizeof(p6->addr));
4448 sa = (struct sockaddr *)&sin6;
4449 inp = stcb->sctp_ep;
4450 stcb_tmp= sctp_findassociation_ep_addr(&inp, sa, &net,
4451 local_sa, stcb);
4452 if (stcb_tmp == NULL && (inp == stcb->sctp_ep ||
4453 inp == NULL)) {
4454 /* we must validate the state again here */
4455 if (l_inp->sctp_flags & (SCTP_PCB_FLAGS_SOCKET_GONE|SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
4456 /* the user freed the ep */
4457 return (-1);
4459 if (stcb->asoc.state == 0) {
4460 /* the assoc was freed? */
4461 return (-1);
4463 /* we must add the address, no scope set */
4464 if (sctp_add_remote_addr(stcb, sa, 0, 5)) {
4465 return (-1);
4467 } else if (stcb_tmp == stcb) {
4468 /* we must validate the state again here */
4469 if (l_inp->sctp_flags & (SCTP_PCB_FLAGS_SOCKET_GONE|SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
4470 /* the user freed the ep */
4471 return (-1);
4473 if (stcb->asoc.state == 0) {
4474 /* the assoc was freed? */
4475 return (-1);
4477 if (net != NULL) {
4478 /* clear flag */
4479 net->dest_state &=
4480 ~SCTP_ADDR_NOT_IN_ASSOC;
4482 } else {
4483 /* strange, address is in another assoc?
4484 * straighten out locks.
4486 SCTP_TCB_UNLOCK(stcb_tmp);
4487 SCTP_INP_RLOCK(l_inp);
4488 /* we must validate the state again here */
4489 if (l_inp->sctp_flags & (SCTP_PCB_FLAGS_SOCKET_GONE|SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
4490 /* the user freed the ep */
4491 SCTP_INP_RUNLOCK(l_inp);
4492 return (-1);
4494 if (stcb->asoc.state == 0) {
4495 /* the assoc was freed? */
4496 SCTP_INP_RUNLOCK(l_inp);
4497 return (-1);
4499 SCTP_TCB_LOCK(stcb);
4500 SCTP_INP_RUNLOCK(l_inp);
4501 return (-1);
4503 } else if (ptype == SCTP_ECN_CAPABLE) {
4504 stcb->asoc.ecn_allowed = 1;
4505 } else if (ptype == SCTP_ULP_ADAPTION) {
4506 if (stcb->asoc.state != SCTP_STATE_OPEN) {
4507 struct sctp_adaption_layer_indication ai, *aip;
4509 phdr = sctp_get_next_param(m, offset,
4510 (struct sctp_paramhdr *)&ai, sizeof(ai));
4511 aip = (struct sctp_adaption_layer_indication *)phdr;
4512 sctp_ulp_notify(SCTP_NOTIFY_ADAPTION_INDICATION,
4513 stcb, ntohl(aip->indication), NULL);
4515 } else if (ptype == SCTP_SET_PRIM_ADDR) {
4516 struct sctp_asconf_addr_param lstore, *fee;
4517 struct sctp_asconf_addrv4_param *fii;
4518 int lptype;
4519 struct sockaddr *lsa = NULL;
4521 stcb->asoc.peer_supports_asconf = 1;
4522 stcb->asoc.peer_supports_asconf_setprim = 1;
4523 if (plen > sizeof(lstore)) {
4524 return (-1);
4526 phdr = sctp_get_next_param(m, offset,
4527 (struct sctp_paramhdr *)&lstore, plen);
4528 if (phdr == NULL) {
4529 return (-1);
4532 fee = (struct sctp_asconf_addr_param *)phdr;
4533 lptype = ntohs(fee->addrp.ph.param_type);
4534 if (lptype == SCTP_IPV4_ADDRESS) {
4535 if (plen !=
4536 sizeof(struct sctp_asconf_addrv4_param)) {
4537 kprintf("Sizeof setprim in init/init ack not %d but %d - ignored\n",
4538 (int)sizeof(struct sctp_asconf_addrv4_param),
4539 plen);
4540 } else {
4541 fii = (struct sctp_asconf_addrv4_param *)fee;
4542 sin.sin_addr.s_addr = fii->addrp.addr;
4543 lsa = (struct sockaddr *)&sin;
4545 } else if (lptype == SCTP_IPV6_ADDRESS) {
4546 if (plen !=
4547 sizeof(struct sctp_asconf_addr_param)) {
4548 kprintf("Sizeof setprim (v6) in init/init ack not %d but %d - ignored\n",
4549 (int)sizeof(struct sctp_asconf_addr_param),
4550 plen);
4551 } else {
4552 memcpy(sin6.sin6_addr.s6_addr,
4553 fee->addrp.addr,
4554 sizeof(fee->addrp.addr));
4555 lsa = (struct sockaddr *)&sin6;
4558 if (lsa) {
4559 sctp_set_primary_addr(stcb, sa, NULL);
4562 } else if (ptype == SCTP_PRSCTP_SUPPORTED) {
4563 /* Peer supports pr-sctp */
4564 stcb->asoc.peer_supports_prsctp = 1;
4565 } else if (ptype == SCTP_SUPPORTED_CHUNK_EXT) {
4566 /* A supported extension chunk */
4567 struct sctp_supported_chunk_types_param *pr_supported;
4568 uint8_t local_store[128];
4569 int num_ent, i;
4571 phdr = sctp_get_next_param(m, offset,
4572 (struct sctp_paramhdr *)&local_store, plen);
4573 if (phdr == NULL) {
4574 return (-1);
4576 stcb->asoc.peer_supports_asconf = 0;
4577 stcb->asoc.peer_supports_asconf_setprim = 0;
4578 stcb->asoc.peer_supports_prsctp = 0;
4579 stcb->asoc.peer_supports_pktdrop = 0;
4580 stcb->asoc.peer_supports_strreset = 0;
4581 pr_supported = (struct sctp_supported_chunk_types_param *)phdr;
4582 num_ent = plen - sizeof(struct sctp_paramhdr);
4583 for (i=0; i<num_ent; i++) {
4584 switch (pr_supported->chunk_types[i]) {
4585 case SCTP_ASCONF:
4586 stcb->asoc.peer_supports_asconf = 1;
4587 stcb->asoc.peer_supports_asconf_setprim = 1;
4588 break;
4589 case SCTP_ASCONF_ACK:
4590 stcb->asoc.peer_supports_asconf = 1;
4591 stcb->asoc.peer_supports_asconf_setprim = 1;
4592 break;
4593 case SCTP_FORWARD_CUM_TSN:
4594 stcb->asoc.peer_supports_prsctp = 1;
4595 break;
4596 case SCTP_PACKET_DROPPED:
4597 stcb->asoc.peer_supports_pktdrop = 1;
4598 break;
4599 case SCTP_STREAM_RESET:
4600 stcb->asoc.peer_supports_strreset = 1;
4601 break;
4602 default:
4603 /* one I have not learned yet */
4604 break;
4608 } else if (ptype == SCTP_ECN_NONCE_SUPPORTED) {
4609 /* Peer supports ECN-nonce */
4610 stcb->asoc.peer_supports_ecn_nonce = 1;
4611 stcb->asoc.ecn_nonce_allowed = 1;
4612 } else if ((ptype == SCTP_HEARTBEAT_INFO) ||
4613 (ptype == SCTP_STATE_COOKIE) ||
4614 (ptype == SCTP_UNRECOG_PARAM) ||
4615 (ptype == SCTP_COOKIE_PRESERVE) ||
4616 (ptype == SCTP_SUPPORTED_ADDRTYPE) ||
4617 (ptype == SCTP_ADD_IP_ADDRESS) ||
4618 (ptype == SCTP_DEL_IP_ADDRESS) ||
4619 (ptype == SCTP_ERROR_CAUSE_IND) ||
4620 (ptype == SCTP_SUCCESS_REPORT)) {
4621 /* don't care */;
4622 } else {
4623 if ((ptype & 0x8000) == 0x0000) {
4624 /* must stop processing the rest of
4625 * the param's. Any report bits were
4626 * handled with the call to sctp_arethere_unrecognized_parameters()
4627 * when the INIT or INIT-ACK was first seen.
4629 break;
4632 offset += SCTP_SIZE32(plen);
4633 if (offset >= limit) {
4634 break;
4636 phdr = sctp_get_next_param(m, offset, &parm_buf,
4637 sizeof(parm_buf));
4639 /* Now check to see if we need to purge any addresses */
4640 for (net = TAILQ_FIRST(&stcb->asoc.nets); net != NULL; net = net_tmp) {
4641 net_tmp = TAILQ_NEXT(net, sctp_next);
4642 if ((net->dest_state & SCTP_ADDR_NOT_IN_ASSOC) ==
4643 SCTP_ADDR_NOT_IN_ASSOC) {
4644 /* This address has been removed from the asoc */
4645 /* remove and free it */
4646 stcb->asoc.numnets--;
4647 TAILQ_REMOVE(&stcb->asoc.nets, net, sctp_next);
4648 sctp_free_remote_addr(net);
4649 if (net == stcb->asoc.primary_destination) {
4650 stcb->asoc.primary_destination = NULL;
4651 sctp_select_primary_destination(stcb);
4655 return (0);
4659 sctp_set_primary_addr(struct sctp_tcb *stcb, struct sockaddr *sa,
4660 struct sctp_nets *net)
4662 /* make sure the requested primary address exists in the assoc */
4663 if (net == NULL && sa)
4664 net = sctp_findnet(stcb, sa);
4666 if (net == NULL) {
4667 /* didn't find the requested primary address! */
4668 return (-1);
4669 } else {
4670 /* set the primary address */
4671 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
4672 /* Must be confirmed */
4673 return (-1);
4675 stcb->asoc.primary_destination = net;
4676 net->dest_state &= ~SCTP_ADDR_WAS_PRIMARY;
4677 return (0);
4683 sctp_is_vtag_good(struct sctp_inpcb *inp, u_int32_t tag, struct timeval *now)
4686 * This function serves two purposes. It will see if a TAG can be
4687 * re-used and return 1 for yes it is ok and 0 for don't use that
4688 * tag.
4689 * A secondary function it will do is purge out old tags that can
4690 * be removed.
4692 struct sctpasochead *head;
4693 struct sctpvtaghead *chain;
4694 struct sctp_tagblock *twait_block;
4695 struct sctp_tcb *stcb;
4697 int i;
4698 SCTP_INP_INFO_WLOCK();
4699 chain = &sctppcbinfo.vtag_timewait[(tag % SCTP_STACK_VTAG_HASH_SIZE)];
4700 /* First is the vtag in use ? */
4702 head = &sctppcbinfo.sctp_asochash[SCTP_PCBHASH_ASOC(tag,
4703 sctppcbinfo.hashasocmark)];
4704 if (head == NULL) {
4705 SCTP_INP_INFO_WUNLOCK();
4706 return (0);
4708 LIST_FOREACH(stcb, head, sctp_asocs) {
4709 if (stcb->asoc.my_vtag == tag) {
4710 /* We should remove this if and
4711 * return 0 always if we want vtags
4712 * unique across all endpoints. For
4713 * now within a endpoint is ok.
4715 if (inp == stcb->sctp_ep) {
4716 /* bad tag, in use */
4717 SCTP_INP_INFO_WUNLOCK();
4718 return (0);
4722 if (!LIST_EMPTY(chain)) {
4724 * Block(s) are present, lets see if we have this tag in
4725 * the list
4727 LIST_FOREACH(twait_block, chain, sctp_nxt_tagblock) {
4728 for (i = 0; i < SCTP_NUMBER_IN_VTAG_BLOCK; i++) {
4729 if (twait_block->vtag_block[i].v_tag == 0) {
4730 /* not used */
4731 continue;
4732 } else if ((long)twait_block->vtag_block[i].tv_sec_at_expire >
4733 now->tv_sec) {
4734 /* Audit expires this guy */
4735 twait_block->vtag_block[i].tv_sec_at_expire = 0;
4736 twait_block->vtag_block[i].v_tag = 0;
4737 } else if (twait_block->vtag_block[i].v_tag ==
4738 tag) {
4739 /* Bad tag, sorry :< */
4740 SCTP_INP_INFO_WUNLOCK();
4741 return (0);
4746 /* Not found, ok to use the tag */
4747 SCTP_INP_INFO_WUNLOCK();
4748 return (1);
4753 * Delete the address from the endpoint local address list
4754 * Lookup using a sockaddr address (ie. not an ifaddr)
4757 sctp_del_local_addr_ep_sa(struct sctp_inpcb *inp, struct sockaddr *sa)
4759 struct sctp_laddr *laddr;
4760 struct sockaddr *l_sa;
4761 int found = 0;
4762 /* Here is another function I cannot find a
4763 * caller for. As such we SHOULD delete it
4764 * if we have no users. If we find a user that
4765 * user MUST have the INP locked.
4769 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
4770 /* You are already bound to all. You have it already */
4771 return (EINVAL);
4774 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4775 /* make sure the address exists */
4776 if (laddr->ifa == NULL)
4777 continue;
4778 if (laddr->ifa->ifa_addr == NULL)
4779 continue;
4781 l_sa = laddr->ifa->ifa_addr;
4782 if (l_sa->sa_family == AF_INET6) {
4783 /* IPv6 address */
4784 struct sockaddr_in6 *sin1, *sin2;
4785 sin1 = (struct sockaddr_in6 *)l_sa;
4786 sin2 = (struct sockaddr_in6 *)sa;
4787 if (memcmp(&sin1->sin6_addr, &sin2->sin6_addr,
4788 sizeof(struct in6_addr)) == 0) {
4789 /* matched */
4790 found = 1;
4791 break;
4793 } else if (l_sa->sa_family == AF_INET) {
4794 /* IPv4 address */
4795 struct sockaddr_in *sin1, *sin2;
4796 sin1 = (struct sockaddr_in *)l_sa;
4797 sin2 = (struct sockaddr_in *)sa;
4798 if (sin1->sin_addr.s_addr == sin2->sin_addr.s_addr) {
4799 /* matched */
4800 found = 1;
4801 break;
4803 } else {
4804 /* invalid family */
4805 return (-1);
4809 if (found && inp->laddr_count < 2) {
4810 /* can't delete unless there are at LEAST 2 addresses */
4811 return (-1);
4814 if (found && (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) {
4816 * remove it from the ep list, this should NOT be
4817 * done until its really gone from the interface list and
4818 * we won't be receiving more of these. Probably right
4819 * away. If we do allow a removal of an address from
4820 * an association (sub-set bind) than this should NOT
4821 * be called until the all ASCONF come back from this
4822 * association.
4824 sctp_remove_laddr(laddr);
4825 return (0);
4826 } else {
4827 return (-1);
4831 static void
4832 sctp_drain_mbufs(struct sctp_inpcb *inp, struct sctp_tcb *stcb)
4835 * We must hunt this association for MBUF's past the cumack
4836 * (i.e. out of order data that we can renege on).
4838 struct sctp_association *asoc;
4839 struct sctp_tmit_chunk *chk, *nchk;
4840 u_int32_t cumulative_tsn_p1, tsn;
4841 int cnt, strmat, gap;
4842 /* We look for anything larger than the cum-ack + 1 */
4844 asoc = &stcb->asoc;
4845 cumulative_tsn_p1 = asoc->cumulative_tsn + 1;
4846 cnt = 0;
4847 /* First look in the re-assembly queue */
4848 chk = TAILQ_FIRST(&asoc->reasmqueue);
4849 while (chk) {
4850 /* Get the next one */
4851 nchk = TAILQ_NEXT(chk, sctp_next);
4852 if (compare_with_wrap(chk->rec.data.TSN_seq,
4853 cumulative_tsn_p1, MAX_TSN)) {
4854 /* Yep it is above cum-ack */
4855 cnt++;
4856 tsn = chk->rec.data.TSN_seq;
4857 if (tsn >= asoc->mapping_array_base_tsn) {
4858 gap = tsn - asoc->mapping_array_base_tsn;
4859 } else {
4860 gap = (MAX_TSN - asoc->mapping_array_base_tsn) +
4861 tsn + 1;
4863 asoc->size_on_reasm_queue -= chk->send_size;
4864 asoc->cnt_on_reasm_queue--;
4865 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
4866 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
4867 if (chk->data) {
4868 sctp_m_freem(chk->data);
4869 chk->data = NULL;
4871 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
4872 sctppcbinfo.ipi_count_chunk--;
4873 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
4874 panic("Chunk count is negative");
4876 sctppcbinfo.ipi_gencnt_chunk++;
4878 chk = nchk;
4880 /* Ok that was fun, now we will drain all the inbound streams? */
4881 for (strmat = 0; strmat < asoc->streamincnt; strmat++) {
4882 chk = TAILQ_FIRST(&asoc->strmin[strmat].inqueue);
4883 while (chk) {
4884 nchk = TAILQ_NEXT(chk, sctp_next);
4885 if (compare_with_wrap(chk->rec.data.TSN_seq,
4886 cumulative_tsn_p1, MAX_TSN)) {
4887 /* Yep it is above cum-ack */
4888 cnt++;
4889 tsn = chk->rec.data.TSN_seq;
4890 if (tsn >= asoc->mapping_array_base_tsn) {
4891 gap = tsn -
4892 asoc->mapping_array_base_tsn;
4893 } else {
4894 gap = (MAX_TSN -
4895 asoc->mapping_array_base_tsn) +
4896 tsn + 1;
4898 asoc->size_on_all_streams -= chk->send_size;
4899 asoc->cnt_on_all_streams--;
4901 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array,
4902 gap);
4903 TAILQ_REMOVE(&asoc->strmin[strmat].inqueue,
4904 chk, sctp_next);
4905 if (chk->data) {
4906 sctp_m_freem(chk->data);
4907 chk->data = NULL;
4909 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
4910 sctppcbinfo.ipi_count_chunk--;
4911 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
4912 panic("Chunk count is negative");
4914 sctppcbinfo.ipi_gencnt_chunk++;
4916 chk = nchk;
4920 * Question, should we go through the delivery queue?
4921 * The only reason things are on here is the app not reading OR a
4922 * p-d-api up. An attacker COULD send enough in to initiate the
4923 * PD-API and then send a bunch of stuff to other streams... these
4924 * would wind up on the delivery queue.. and then we would not get
4925 * to them. But in order to do this I then have to back-track and
4926 * un-deliver sequence numbers in streams.. el-yucko. I think for
4927 * now we will NOT look at the delivery queue and leave it to be
4928 * something to consider later. An alternative would be to abort
4929 * the P-D-API with a notification and then deliver the data....
4930 * Or another method might be to keep track of how many times the
4931 * situation occurs and if we see a possible attack underway just
4932 * abort the association.
4934 #ifdef SCTP_DEBUG
4935 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
4936 if (cnt) {
4937 kprintf("Freed %d chunks from reneg harvest\n", cnt);
4940 #endif /* SCTP_DEBUG */
4943 * Another issue, in un-setting the TSN's in the mapping array we
4944 * DID NOT adjust the higest_tsn marker. This will cause one of
4945 * two things to occur. It may cause us to do extra work in checking
4946 * for our mapping array movement. More importantly it may cause us
4947 * to SACK every datagram. This may not be a bad thing though since
4948 * we will recover once we get our cum-ack above and all this stuff
4949 * we dumped recovered.
4953 void
4954 sctp_drain(void)
4957 * We must walk the PCB lists for ALL associations here. The system
4958 * is LOW on MBUF's and needs help. This is where reneging will
4959 * occur. We really hope this does NOT happen!
4961 struct sctp_inpcb *inp;
4962 struct sctp_tcb *stcb;
4964 SCTP_INP_INFO_RLOCK();
4965 LIST_FOREACH(inp, &sctppcbinfo.listhead, sctp_list) {
4966 /* For each endpoint */
4967 SCTP_INP_RLOCK(inp);
4968 LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
4969 /* For each association */
4970 SCTP_TCB_LOCK(stcb);
4971 sctp_drain_mbufs(inp, stcb);
4972 SCTP_TCB_UNLOCK(stcb);
4974 SCTP_INP_RUNLOCK(inp);
4976 SCTP_INP_INFO_RUNLOCK();
4980 sctp_add_to_socket_q(struct sctp_inpcb *inp, struct sctp_tcb *stcb)
4982 struct sctp_socket_q_list *sq;
4984 /* write lock on INP assumed */
4985 if ((inp == NULL) || (stcb == NULL)) {
4986 /* I am paranoid */
4987 return (0);
4989 sq = (struct sctp_socket_q_list *)SCTP_ZONE_GET(
4990 sctppcbinfo.ipi_zone_sockq);
4991 if (sq == NULL) {
4992 /* out of sq structs */
4993 return (0);
4995 sctppcbinfo.ipi_count_sockq++;
4996 sctppcbinfo.ipi_gencnt_sockq++;
4997 if (stcb)
4998 stcb->asoc.cnt_msg_on_sb++;
4999 sq->tcb = stcb;
5000 TAILQ_INSERT_TAIL(&inp->sctp_queue_list, sq, next_sq);
5001 return (1);
5005 struct sctp_tcb *
5006 sctp_remove_from_socket_q(struct sctp_inpcb *inp)
5008 struct sctp_tcb *stcb = NULL;
5009 struct sctp_socket_q_list *sq;
5011 /* W-Lock on INP assumed held */
5012 sq = TAILQ_FIRST(&inp->sctp_queue_list);
5013 if (sq == NULL)
5014 return (NULL);
5016 stcb = sq->tcb;
5017 TAILQ_REMOVE(&inp->sctp_queue_list, sq, next_sq);
5018 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_sockq, sq);
5019 sctppcbinfo.ipi_count_sockq--;
5020 sctppcbinfo.ipi_gencnt_sockq++;
5021 if (stcb) {
5022 stcb->asoc.cnt_msg_on_sb--;
5024 return (stcb);
5028 sctp_initiate_iterator(asoc_func af, uint32_t pcb_state, uint32_t asoc_state,
5029 void *argp, uint32_t argi, end_func ef,
5030 struct sctp_inpcb *s_inp)
5032 struct sctp_iterator *it=NULL;
5034 if (af == NULL) {
5035 return (-1);
5037 MALLOC(it, struct sctp_iterator *, sizeof(struct sctp_iterator), M_PCB,
5038 M_WAITOK);
5039 memset(it, 0, sizeof(*it));
5040 it->function_toapply = af;
5041 it->function_atend = ef;
5042 it->pointer = argp;
5043 it->val = argi;
5044 it->pcb_flags = pcb_state;
5045 it->asoc_state = asoc_state;
5046 if (s_inp) {
5047 it->inp = s_inp;
5048 it->iterator_flags = SCTP_ITERATOR_DO_SINGLE_INP;
5049 } else {
5050 SCTP_INP_INFO_RLOCK();
5051 it->inp = LIST_FIRST(&sctppcbinfo.listhead);
5052 SCTP_INP_INFO_RUNLOCK();
5053 it->iterator_flags = SCTP_ITERATOR_DO_ALL_INP;
5056 /* Init the timer */
5057 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000
5058 callout_init(&it->tmr.timer, 0);
5059 #else
5060 callout_init(&it->tmr.timer);
5061 #endif
5062 /* add to the list of all iterators */
5063 SCTP_INP_INFO_WLOCK();
5064 LIST_INSERT_HEAD(&sctppcbinfo.iteratorhead, it, sctp_nxt_itr);
5065 SCTP_INP_INFO_WUNLOCK();
5066 crit_enter();
5067 sctp_iterator_timer(it);
5068 crit_exit();
5069 return (0);
5074 * Callout/Timer routines for OS that doesn't have them
5076 #ifdef _SCTP_NEEDS_CALLOUT_
5077 #ifndef __APPLE__
5078 extern int ticks;
5079 #endif
5081 void
5082 callout_init(struct callout *c)
5084 bzero(c, sizeof(*c));
5087 void
5088 callout_reset(struct callout *c, int to_ticks, void (*ftn)(void *), void *arg)
5090 crit_enter();
5091 if (c->c_flags & CALLOUT_PENDING)
5092 callout_stop(c);
5095 * We could spl down here and back up at the TAILQ_INSERT_TAIL,
5096 * but there's no point since doing this setup doesn't take much
5097 * time.
5099 if (to_ticks <= 0)
5100 to_ticks = 1;
5102 c->c_arg = arg;
5103 c->c_flags = (CALLOUT_ACTIVE | CALLOUT_PENDING);
5104 c->c_func = ftn;
5105 #ifdef __APPLE__
5106 c->c_time = to_ticks; /* just store the requested timeout */
5107 timeout(ftn, arg, to_ticks);
5108 #else
5109 c->c_time = ticks + to_ticks;
5110 TAILQ_INSERT_TAIL(&sctppcbinfo.callqueue, c, tqe);
5111 #endif
5112 crit_exit();
5116 callout_stop(struct callout *c)
5118 crit_enter();
5120 * Don't attempt to delete a callout that's not on the queue.
5122 if (!(c->c_flags & CALLOUT_PENDING)) {
5123 c->c_flags &= ~CALLOUT_ACTIVE;
5124 crit_exit();
5125 return (0);
5127 c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING| CALLOUT_FIRED);
5128 #ifdef __APPLE__
5129 /* thread_call_cancel(c->c_call); */
5130 untimeout(c->c_func, c->c_arg);
5131 #else
5132 TAILQ_REMOVE(&sctppcbinfo.callqueue, c, tqe);
5133 c->c_func = NULL;
5134 #endif
5135 crit_exit();
5136 return (1);
5139 #if !defined(__APPLE__)
5140 void
5141 sctp_fasttim(void)
5143 struct callout *c, *n;
5144 struct calloutlist locallist;
5145 int inited = 0;
5147 crit_enter();
5148 /* run through and subtract and mark all callouts */
5149 c = TAILQ_FIRST(&sctppcbinfo.callqueue);
5150 while (c) {
5151 n = TAILQ_NEXT(c, tqe);
5152 if (c->c_time <= ticks) {
5153 c->c_flags |= CALLOUT_FIRED;
5154 c->c_time = 0;
5155 TAILQ_REMOVE(&sctppcbinfo.callqueue, c, tqe);
5156 if (inited == 0) {
5157 TAILQ_INIT(&locallist);
5158 inited = 1;
5160 /* move off of main list */
5161 TAILQ_INSERT_TAIL(&locallist, c, tqe);
5163 c = n;
5165 /* Now all the ones on the locallist must be called */
5166 if (inited) {
5167 c = TAILQ_FIRST(&locallist);
5168 while (c) {
5169 /* remove it */
5170 TAILQ_REMOVE(&locallist, c, tqe);
5171 /* now validate that it did not get canceled */
5172 if (c->c_flags & CALLOUT_FIRED) {
5173 c->c_flags &= ~CALLOUT_PENDING;
5174 crit_exit();
5175 (*c->c_func)(c->c_arg);
5176 crit_enter();
5178 c = TAILQ_FIRST(&locallist);
5181 crit_exit();
5183 #endif
5184 #endif /* _SCTP_NEEDS_CALLOUT_ */