libiconv: Uncomment iconv_vfs_refcount.
[dragonfly.git] / sys / netinet / sctp_output.c
blobd09eb4b25da3c2674867f448a0646ef4ca2294f9
1 /* $KAME: sctp_output.c,v 1.46 2005/03/06 16:04:17 itojun Exp $ */
2 /* $DragonFly: src/sys/netinet/sctp_output.c,v 1.14 2008/04/20 13:44:25 swildner Exp $ */
4 /*
5 * Copyright (C) 2002, 2003, 2004 Cisco Systems Inc,
6 * All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the project nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
33 #if !(defined(__OpenBSD__) || defined (__APPLE__))
34 #include "opt_ipsec.h"
35 #endif
36 #if defined(__FreeBSD__) || defined(__DragonFly__)
37 #include "opt_compat.h"
38 #include "opt_inet6.h"
39 #include "opt_inet.h"
40 #endif
41 #if defined(__NetBSD__)
42 #include "opt_inet.h"
43 #endif
44 #ifdef __APPLE__
45 #include <sctp.h>
46 #elif !defined(__OpenBSD__)
47 #include "opt_sctp.h"
48 #endif
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/malloc.h>
52 #include <sys/mbuf.h>
53 #ifndef __OpenBSD__
54 #include <sys/domain.h>
55 #endif
56 #include <sys/protosw.h>
57 #include <sys/socket.h>
58 #include <sys/socketvar.h>
59 #include <sys/proc.h>
60 #include <sys/kernel.h>
61 #include <sys/sysctl.h>
62 #include <sys/resourcevar.h>
63 #include <sys/uio.h>
64 #ifdef INET6
65 #include <sys/domain.h>
66 #endif
67 #include <sys/thread2.h>
68 #include <sys/socketvar2.h>
70 #if (defined(__FreeBSD__) && __FreeBSD_version >= 500000)
71 #include <sys/limits.h>
72 #else
73 #include <machine/limits.h>
74 #endif
75 #include <machine/cpu.h>
77 #include <net/if.h>
78 #include <net/if_types.h>
80 #if defined(__FreeBSD__) || defined(__DragonFly__)
81 #include <net/if_var.h>
82 #endif
84 #include <net/route.h>
86 #include <netinet/in.h>
87 #include <netinet/in_systm.h>
88 #include <netinet/ip.h>
89 #include <netinet/in_pcb.h>
90 #include <netinet/in_var.h>
91 #include <netinet/ip_var.h>
93 #ifdef INET6
94 #include <netinet/ip6.h>
95 #include <netinet6/ip6_var.h>
96 #include <netinet6/scope6_var.h>
97 #include <netinet6/nd6.h>
99 #if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__DragonFly__)
100 #include <netinet6/in6_pcb.h>
101 #elif defined(__OpenBSD__)
102 #include <netinet/in_pcb.h>
103 #endif
105 #include <netinet/icmp6.h>
107 #endif /* INET6 */
109 #include <net/net_osdep.h>
111 #if defined(HAVE_NRL_INPCB) || defined(__FreeBSD__) || defined(__DragonFly__)
112 #ifndef in6pcb
113 #define in6pcb inpcb
114 #endif
115 #endif
117 #include <netinet/sctp_pcb.h>
119 #ifdef IPSEC
120 #ifndef __OpenBSD__
121 #include <netinet6/ipsec.h>
122 #include <netproto/key/key.h>
123 #else
124 #undef IPSEC
125 #endif
126 #endif /* IPSEC */
128 #include <netinet/sctp_var.h>
129 #include <netinet/sctp_header.h>
130 #include <netinet/sctputil.h>
131 #include <netinet/sctp_pcb.h>
132 #include <netinet/sctp_output.h>
133 #include <netinet/sctp_uio.h>
134 #include <netinet/sctputil.h>
135 #include <netinet/sctp_hashdriver.h>
136 #include <netinet/sctp_timer.h>
137 #include <netinet/sctp_asconf.h>
138 #include <netinet/sctp_indata.h>
140 #ifdef SCTP_DEBUG
141 extern uint32_t sctp_debug_on;
142 #endif
144 extern int sctp_peer_chunk_oh;
146 static int
147 sctp_find_cmsg(int c_type, void *data, struct mbuf *control, int cpsize)
149 struct cmsghdr cmh;
150 int tlen, at;
152 tlen = control->m_len;
153 at = 0;
155 * Independent of how many mbufs, find the c_type inside the control
156 * structure and copy out the data.
158 while (at < tlen) {
159 if ((tlen-at) < (int)CMSG_ALIGN(sizeof(cmh))) {
160 /* not enough room for one more we are done. */
161 return (0);
163 m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh);
164 if ((cmh.cmsg_len + at) > tlen) {
166 * this is real messed up since there is not enough
167 * data here to cover the cmsg header. We are done.
169 return (0);
171 if ((cmh.cmsg_level == IPPROTO_SCTP) &&
172 (c_type == cmh.cmsg_type)) {
173 /* found the one we want, copy it out */
174 at += CMSG_ALIGN(sizeof(struct cmsghdr));
175 if ((int)(cmh.cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr))) < cpsize) {
177 * space of cmsg_len after header not
178 * big enough
180 return (0);
182 m_copydata(control, at, cpsize, data);
183 return (1);
184 } else {
185 at += CMSG_ALIGN(cmh.cmsg_len);
186 if (cmh.cmsg_len == 0) {
187 break;
191 /* not found */
192 return (0);
195 static struct mbuf *
196 sctp_add_addr_to_mbuf(struct mbuf *m, struct ifaddr *ifa)
198 struct sctp_paramhdr *parmh;
199 struct mbuf *mret;
200 int len;
201 if (ifa->ifa_addr->sa_family == AF_INET) {
202 len = sizeof(struct sctp_ipv4addr_param);
203 } else if (ifa->ifa_addr->sa_family == AF_INET6) {
204 len = sizeof(struct sctp_ipv6addr_param);
205 } else {
206 /* unknown type */
207 return (m);
210 if (M_TRAILINGSPACE(m) >= len) {
211 /* easy side we just drop it on the end */
212 parmh = (struct sctp_paramhdr *)(m->m_data + m->m_len);
213 mret = m;
214 } else {
215 /* Need more space */
216 mret = m;
217 while (mret->m_next != NULL) {
218 mret = mret->m_next;
220 MGET(mret->m_next, MB_DONTWAIT, MT_DATA);
221 if (mret->m_next == NULL) {
222 /* We are hosed, can't add more addresses */
223 return (m);
225 mret = mret->m_next;
226 parmh = mtod(mret, struct sctp_paramhdr *);
228 /* now add the parameter */
229 if (ifa->ifa_addr->sa_family == AF_INET) {
230 struct sctp_ipv4addr_param *ipv4p;
231 struct sockaddr_in *sin;
232 sin = (struct sockaddr_in *)ifa->ifa_addr;
233 ipv4p = (struct sctp_ipv4addr_param *)parmh;
234 parmh->param_type = htons(SCTP_IPV4_ADDRESS);
235 parmh->param_length = htons(len);
236 ipv4p->addr = sin->sin_addr.s_addr;
237 mret->m_len += len;
238 } else if (ifa->ifa_addr->sa_family == AF_INET6) {
239 struct sctp_ipv6addr_param *ipv6p;
240 struct sockaddr_in6 *sin6;
241 sin6 = (struct sockaddr_in6 *)ifa->ifa_addr;
242 ipv6p = (struct sctp_ipv6addr_param *)parmh;
243 parmh->param_type = htons(SCTP_IPV6_ADDRESS);
244 parmh->param_length = htons(len);
245 memcpy(ipv6p->addr, &sin6->sin6_addr,
246 sizeof(ipv6p->addr));
247 /* clear embedded scope in the address */
248 in6_clearscope((struct in6_addr *)ipv6p->addr);
249 mret->m_len += len;
250 } else {
251 return (m);
253 return (mret);
258 static struct mbuf *
259 sctp_add_cookie(struct sctp_inpcb *inp, struct mbuf *init, int init_offset,
260 struct mbuf *initack, int initack_offset, struct sctp_state_cookie *stc_in)
262 struct mbuf *copy_init, *copy_initack, *m_at, *sig, *mret;
263 struct sctp_state_cookie *stc;
264 struct sctp_paramhdr *ph;
265 uint8_t *signature;
266 int sig_offset;
267 uint16_t cookie_sz;
269 mret = NULL;
271 MGET(mret, MB_DONTWAIT, MT_DATA);
272 if (mret == NULL) {
273 return (NULL);
275 copy_init = sctp_m_copym(init, init_offset, M_COPYALL, MB_DONTWAIT);
276 if (copy_init == NULL) {
277 sctp_m_freem(mret);
278 return (NULL);
280 copy_initack = sctp_m_copym(initack, initack_offset, M_COPYALL,
281 MB_DONTWAIT);
282 if (copy_initack == NULL) {
283 sctp_m_freem(mret);
284 sctp_m_freem(copy_init);
285 return (NULL);
287 /* easy side we just drop it on the end */
288 ph = mtod(mret, struct sctp_paramhdr *);
289 mret->m_len = sizeof(struct sctp_state_cookie) +
290 sizeof(struct sctp_paramhdr);
291 stc = (struct sctp_state_cookie *)((caddr_t)ph +
292 sizeof(struct sctp_paramhdr));
293 ph->param_type = htons(SCTP_STATE_COOKIE);
294 ph->param_length = 0; /* fill in at the end */
295 /* Fill in the stc cookie data */
296 *stc = *stc_in;
298 /* tack the INIT and then the INIT-ACK onto the chain */
299 cookie_sz = 0;
300 m_at = mret;
301 for (m_at = mret; m_at; m_at = m_at->m_next) {
302 cookie_sz += m_at->m_len;
303 if (m_at->m_next == NULL) {
304 m_at->m_next = copy_init;
305 break;
309 for (m_at = copy_init; m_at; m_at = m_at->m_next) {
310 cookie_sz += m_at->m_len;
311 if (m_at->m_next == NULL) {
312 m_at->m_next = copy_initack;
313 break;
317 for (m_at = copy_initack; m_at; m_at = m_at->m_next) {
318 cookie_sz += m_at->m_len;
319 if (m_at->m_next == NULL) {
320 break;
323 MGET(sig, MB_DONTWAIT, MT_DATA);
324 if (sig == NULL) {
325 /* no space */
326 sctp_m_freem(mret);
327 sctp_m_freem(copy_init);
328 sctp_m_freem(copy_initack);
329 return (NULL);
331 sig->m_len = 0;
332 m_at->m_next = sig;
333 sig_offset = 0;
334 signature = (uint8_t *)(mtod(sig, caddr_t) + sig_offset);
335 /* Time to sign the cookie */
336 sctp_hash_digest_m((char *)inp->sctp_ep.secret_key[
337 (int)(inp->sctp_ep.current_secret_number)],
338 SCTP_SECRET_SIZE, mret, sizeof(struct sctp_paramhdr),
339 (uint8_t *)signature);
340 sig->m_len += SCTP_SIGNATURE_SIZE;
341 cookie_sz += SCTP_SIGNATURE_SIZE;
343 ph->param_length = htons(cookie_sz);
344 return (mret);
348 static struct sockaddr_in *
349 sctp_is_v4_ifa_addr_prefered (struct ifaddr *ifa, uint8_t loopscope, uint8_t ipv4_scope, uint8_t *sin_loop, uint8_t *sin_local)
351 struct sockaddr_in *sin;
353 * Here we determine if its a prefered address. A
354 * prefered address means it is the same scope or
355 * higher scope then the destination.
356 * L = loopback, P = private, G = global
357 * -----------------------------------------
358 * src | dest | result
359 *-----------------------------------------
360 * L | L | yes
361 *-----------------------------------------
362 * P | L | yes
363 *-----------------------------------------
364 * G | L | yes
365 *-----------------------------------------
366 * L | P | no
367 *-----------------------------------------
368 * P | P | yes
369 *-----------------------------------------
370 * G | P | no
371 *-----------------------------------------
372 * L | G | no
373 *-----------------------------------------
374 * P | G | no
375 *-----------------------------------------
376 * G | G | yes
377 *-----------------------------------------
380 if (ifa->ifa_addr->sa_family != AF_INET) {
381 /* forget non-v4 */
382 return (NULL);
384 /* Ok the address may be ok */
385 sin = (struct sockaddr_in *)ifa->ifa_addr;
386 if (sin->sin_addr.s_addr == 0) {
387 return (NULL);
389 *sin_local = *sin_loop = 0;
390 if ((ifa->ifa_ifp->if_type == IFT_LOOP) ||
391 (IN4_ISLOOPBACK_ADDRESS(&sin->sin_addr))) {
392 *sin_loop = 1;
393 *sin_local = 1;
395 if ((IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
396 *sin_local = 1;
398 if (!loopscope && *sin_loop) {
399 /* Its a loopback address and we don't have loop scope */
400 return (NULL);
402 if (!ipv4_scope && *sin_local) {
403 /* Its a private address, and we don't have private address scope */
404 return (NULL);
406 if (((ipv4_scope == 0) && (loopscope == 0)) && (*sin_local)) {
407 /* its a global src and a private dest */
408 return (NULL);
410 /* its a prefered address */
411 return (sin);
414 static struct sockaddr_in *
415 sctp_is_v4_ifa_addr_acceptable (struct ifaddr *ifa, uint8_t loopscope, uint8_t ipv4_scope, uint8_t *sin_loop, uint8_t *sin_local)
417 struct sockaddr_in *sin;
419 * Here we determine if its a acceptable address. A
420 * acceptable address means it is the same scope or
421 * higher scope but we can allow for NAT which means
422 * its ok to have a global dest and a private src.
424 * L = loopback, P = private, G = global
425 * -----------------------------------------
426 * src | dest | result
427 *-----------------------------------------
428 * L | L | yes
429 *-----------------------------------------
430 * P | L | yes
431 *-----------------------------------------
432 * G | L | yes
433 *-----------------------------------------
434 * L | P | no
435 *-----------------------------------------
436 * P | P | yes
437 *-----------------------------------------
438 * G | P | yes - probably this won't work.
439 *-----------------------------------------
440 * L | G | no
441 *-----------------------------------------
442 * P | G | yes
443 *-----------------------------------------
444 * G | G | yes
445 *-----------------------------------------
448 if (ifa->ifa_addr->sa_family != AF_INET) {
449 /* forget non-v4 */
450 return (NULL);
452 /* Ok the address may be ok */
453 sin = (struct sockaddr_in *)ifa->ifa_addr;
454 if (sin->sin_addr.s_addr == 0) {
455 return (NULL);
457 *sin_local = *sin_loop = 0;
458 if ((ifa->ifa_ifp->if_type == IFT_LOOP) ||
459 (IN4_ISLOOPBACK_ADDRESS(&sin->sin_addr))) {
460 *sin_loop = 1;
461 *sin_local = 1;
463 if ((IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
464 *sin_local = 1;
466 if (!loopscope && *sin_loop) {
467 /* Its a loopback address and we don't have loop scope */
468 return (NULL);
470 /* its an acceptable address */
471 return (sin);
475 * This treats the address list on the ep as a restricted list
476 * (negative list). If a the passed address is listed, then
477 * the address is NOT allowed on the association.
480 sctp_is_addr_restricted(struct sctp_tcb *stcb, struct sockaddr *addr)
482 struct sctp_laddr *laddr;
483 #ifdef SCTP_DEBUG
484 int cnt=0;
485 #endif
486 if (stcb == NULL) {
487 /* There are no restrictions, no TCB :-) */
488 return (0);
490 #ifdef SCTP_DEBUG
491 LIST_FOREACH(laddr, &stcb->asoc.sctp_local_addr_list, sctp_nxt_addr) {
492 cnt++;
494 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
495 kprintf("There are %d addresses on the restricted list\n", cnt);
497 cnt = 0;
498 #endif
499 LIST_FOREACH(laddr, &stcb->asoc.sctp_local_addr_list, sctp_nxt_addr) {
500 if (laddr->ifa == NULL) {
501 #ifdef SCTP_DEBUG
502 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
503 kprintf("Help I have fallen and I can't get up!\n");
505 #endif
506 continue;
508 #ifdef SCTP_DEBUG
509 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
510 cnt++;
511 kprintf("Restricted address[%d]:", cnt);
512 sctp_print_address(laddr->ifa->ifa_addr);
514 #endif
515 if (sctp_cmpaddr(addr, laddr->ifa->ifa_addr) == 1) {
516 /* Yes it is on the list */
517 return (1);
520 return (0);
523 static int
524 sctp_is_addr_in_ep(struct sctp_inpcb *inp, struct ifaddr *ifa)
526 struct sctp_laddr *laddr;
528 if (ifa == NULL)
529 return (0);
530 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
531 if (laddr->ifa == NULL) {
532 #ifdef SCTP_DEBUG
533 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
534 kprintf("Help I have fallen and I can't get up!\n");
536 #endif
537 continue;
539 if (laddr->ifa->ifa_addr == NULL)
540 continue;
541 if (laddr->ifa == ifa)
542 /* same pointer */
543 return (1);
544 if (laddr->ifa->ifa_addr->sa_family != ifa->ifa_addr->sa_family) {
545 /* skip non compatible address comparison */
546 continue;
548 if (sctp_cmpaddr(ifa->ifa_addr, laddr->ifa->ifa_addr) == 1) {
549 /* Yes it is restricted */
550 return (1);
553 return (0);
558 static struct in_addr
559 sctp_choose_v4_boundspecific_inp(struct sctp_inpcb *inp,
560 struct rtentry *rt,
561 uint8_t ipv4_scope,
562 uint8_t loopscope)
564 struct in_addr ans;
565 struct sctp_laddr *laddr;
566 struct sockaddr_in *sin;
567 struct ifnet *ifn;
568 uint8_t sin_loop, sin_local;
570 /* first question, is the ifn we will emit on
571 * in our list, if so, we want that one.
573 ifn = rt->rt_ifp;
574 if (ifn) {
575 struct ifaddr_container *ifac;
577 /* is a prefered one on the interface we route out? */
578 TAILQ_FOREACH(ifac, &ifn->if_addrheads[mycpuid], ifa_link) {
579 struct ifaddr *ifa = ifac->ifa;
581 sin = sctp_is_v4_ifa_addr_prefered (ifa, loopscope, ipv4_scope, &sin_loop, &sin_local);
582 if (sin == NULL)
583 continue;
584 if (sctp_is_addr_in_ep(inp, ifa)) {
585 return (sin->sin_addr);
588 /* is an acceptable one on the interface we route out? */
589 TAILQ_FOREACH(ifac, &ifn->if_addrheads[mycpuid], ifa_link) {
590 struct ifaddr *ifa = ifac->ifa;
592 sin = sctp_is_v4_ifa_addr_acceptable (ifa, loopscope, ipv4_scope, &sin_loop, &sin_local);
593 if (sin == NULL)
594 continue;
595 if (sctp_is_addr_in_ep(inp, ifa)) {
596 return (sin->sin_addr);
600 /* ok, what about a prefered address in the inp */
601 for (laddr = LIST_FIRST(&inp->sctp_addr_list);
602 laddr && (laddr != inp->next_addr_touse);
603 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
604 if (laddr->ifa == NULL) {
605 /* address has been removed */
606 continue;
608 sin = sctp_is_v4_ifa_addr_prefered (laddr->ifa, loopscope, ipv4_scope, &sin_loop, &sin_local);
609 if (sin == NULL)
610 continue;
611 return (sin->sin_addr);
614 /* ok, what about an acceptable address in the inp */
615 for (laddr = LIST_FIRST(&inp->sctp_addr_list);
616 laddr && (laddr != inp->next_addr_touse);
617 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
618 if (laddr->ifa == NULL) {
619 /* address has been removed */
620 continue;
622 sin = sctp_is_v4_ifa_addr_acceptable (laddr->ifa, loopscope, ipv4_scope, &sin_loop, &sin_local);
623 if (sin == NULL)
624 continue;
625 return (sin->sin_addr);
629 /* no address bound can be a source for the destination we are in trouble */
630 #ifdef SCTP_DEBUG
631 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
632 kprintf("Src address selection for EP, no acceptable src address found for address\n");
634 #endif
635 memset(&ans, 0, sizeof(ans));
636 return (ans);
641 static struct in_addr
642 sctp_choose_v4_boundspecific_stcb(struct sctp_inpcb *inp,
643 struct sctp_tcb *stcb,
644 struct sctp_nets *net,
645 struct rtentry *rt,
646 uint8_t ipv4_scope,
647 uint8_t loopscope,
648 int non_asoc_addr_ok)
651 * Here we have two cases, bound all asconf
652 * allowed. bound all asconf not allowed.
655 struct sctp_laddr *laddr, *starting_point;
656 struct in_addr ans;
657 struct ifnet *ifn;
658 uint8_t sin_loop, sin_local, start_at_beginning=0;
659 struct sockaddr_in *sin;
661 /* first question, is the ifn we will emit on
662 * in our list, if so, we want that one.
664 ifn = rt->rt_ifp;
666 if (inp->sctp_flags & SCTP_PCB_FLAGS_DO_ASCONF) {
668 * Here we use the list of addresses on the endpoint. Then
669 * the addresses listed on the "restricted" list is just that,
670 * address that have not been added and can't be used (unless
671 * the non_asoc_addr_ok is set).
673 #ifdef SCTP_DEBUG
674 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
675 kprintf("Have a STCB - asconf allowed, not bound all have a netgative list\n");
677 #endif
678 /* first question, is the ifn we will emit on
679 * in our list, if so, we want that one.
681 if (ifn) {
682 struct ifaddr_container *ifac;
684 /* first try for an prefered address on the ep */
685 TAILQ_FOREACH(ifac, &ifn->if_addrheads[mycpuid], ifa_link) {
686 struct ifaddr *ifa = ifac->ifa;
688 if (sctp_is_addr_in_ep(inp, ifa)) {
689 sin = sctp_is_v4_ifa_addr_prefered (ifa, loopscope, ipv4_scope, &sin_loop, &sin_local);
690 if (sin == NULL)
691 continue;
692 if ((non_asoc_addr_ok == 0) &&
693 (sctp_is_addr_restricted(stcb, (struct sockaddr *)sin))) {
694 /* on the no-no list */
695 continue;
697 return (sin->sin_addr);
700 /* next try for an acceptable address on the ep */
701 TAILQ_FOREACH(ifac, &ifn->if_addrheads[mycpuid], ifa_link) {
702 struct ifaddr *ifa = ifac->ifa;
704 if (sctp_is_addr_in_ep(inp, ifa)) {
705 sin = sctp_is_v4_ifa_addr_acceptable (ifa, loopscope, ipv4_scope, &sin_loop, &sin_local);
706 if (sin == NULL)
707 continue;
708 if ((non_asoc_addr_ok == 0) &&
709 (sctp_is_addr_restricted(stcb, (struct sockaddr *)sin))) {
710 /* on the no-no list */
711 continue;
713 return (sin->sin_addr);
718 /* if we can't find one like that then we must
719 * look at all addresses bound to pick one at
720 * first prefereable then secondly acceptable.
722 starting_point = stcb->asoc.last_used_address;
723 sctpv4_from_the_top:
724 if (stcb->asoc.last_used_address == NULL) {
725 start_at_beginning=1;
726 stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
728 /* search beginning with the last used address */
729 for (laddr = stcb->asoc.last_used_address; laddr;
730 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
731 if (laddr->ifa == NULL) {
732 /* address has been removed */
733 continue;
735 sin = sctp_is_v4_ifa_addr_prefered (laddr->ifa, loopscope, ipv4_scope, &sin_loop, &sin_local);
736 if (sin == NULL)
737 continue;
738 if ((non_asoc_addr_ok == 0) &&
739 (sctp_is_addr_restricted(stcb, (struct sockaddr *)sin))) {
740 /* on the no-no list */
741 continue;
743 return (sin->sin_addr);
746 if (start_at_beginning == 0) {
747 stcb->asoc.last_used_address = NULL;
748 goto sctpv4_from_the_top;
750 /* now try for any higher scope than the destination */
751 stcb->asoc.last_used_address = starting_point;
752 start_at_beginning = 0;
753 sctpv4_from_the_top2:
754 if (stcb->asoc.last_used_address == NULL) {
755 start_at_beginning=1;
756 stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
758 /* search beginning with the last used address */
759 for (laddr = stcb->asoc.last_used_address; laddr;
760 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
761 if (laddr->ifa == NULL) {
762 /* address has been removed */
763 continue;
765 sin = sctp_is_v4_ifa_addr_acceptable (laddr->ifa, loopscope, ipv4_scope, &sin_loop, &sin_local);
766 if (sin == NULL)
767 continue;
768 if ((non_asoc_addr_ok == 0) &&
769 (sctp_is_addr_restricted(stcb, (struct sockaddr *)sin))) {
770 /* on the no-no list */
771 continue;
773 return (sin->sin_addr);
775 if (start_at_beginning == 0) {
776 stcb->asoc.last_used_address = NULL;
777 goto sctpv4_from_the_top2;
779 } else {
781 * Here we have an address list on the association, thats the
782 * only valid source addresses that we can use.
784 #ifdef SCTP_DEBUG
785 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
786 kprintf("Have a STCB - no asconf allowed, not bound all have a positive list\n");
788 #endif
789 /* First look at all addresses for one that is on
790 * the interface we route out
792 LIST_FOREACH(laddr, &stcb->asoc.sctp_local_addr_list,
793 sctp_nxt_addr) {
794 if (laddr->ifa == NULL) {
795 /* address has been removed */
796 continue;
798 sin = sctp_is_v4_ifa_addr_prefered (laddr->ifa, loopscope, ipv4_scope, &sin_loop, &sin_local);
799 if (sin == NULL)
800 continue;
801 /* first question, is laddr->ifa an address associated with the emit interface */
802 if (ifn) {
803 struct ifaddr_container *ifac;
805 TAILQ_FOREACH(ifac, &ifn->if_addrheads[mycpuid], ifa_link) {
806 struct ifaddr *ifa = ifac->ifa;
808 if (laddr->ifa == ifa) {
809 sin = (struct sockaddr_in *)laddr->ifa->ifa_addr;
810 return (sin->sin_addr);
812 if (sctp_cmpaddr(ifa->ifa_addr, laddr->ifa->ifa_addr) == 1) {
813 sin = (struct sockaddr_in *)laddr->ifa->ifa_addr;
814 return (sin->sin_addr);
819 /* what about an acceptable one on the interface? */
820 LIST_FOREACH(laddr, &stcb->asoc.sctp_local_addr_list,
821 sctp_nxt_addr) {
822 if (laddr->ifa == NULL) {
823 /* address has been removed */
824 continue;
826 sin = sctp_is_v4_ifa_addr_acceptable (laddr->ifa, loopscope, ipv4_scope, &sin_loop, &sin_local);
827 if (sin == NULL)
828 continue;
829 /* first question, is laddr->ifa an address associated with the emit interface */
830 if (ifn) {
831 struct ifaddr_container *ifac;
833 TAILQ_FOREACH(ifac, &ifn->if_addrheads[mycpuid], ifa_link) {
834 struct ifaddr *ifa = ifac->ifa;
836 if (laddr->ifa == ifa) {
837 sin = (struct sockaddr_in *)laddr->ifa->ifa_addr;
838 return (sin->sin_addr);
840 if (sctp_cmpaddr(ifa->ifa_addr, laddr->ifa->ifa_addr) == 1) {
841 sin = (struct sockaddr_in *)laddr->ifa->ifa_addr;
842 return (sin->sin_addr);
847 /* ok, next one that is preferable in general */
848 LIST_FOREACH(laddr, &stcb->asoc.sctp_local_addr_list,
849 sctp_nxt_addr) {
850 if (laddr->ifa == NULL) {
851 /* address has been removed */
852 continue;
854 sin = sctp_is_v4_ifa_addr_prefered (laddr->ifa, loopscope, ipv4_scope, &sin_loop, &sin_local);
855 if (sin == NULL)
856 continue;
857 return (sin->sin_addr);
860 /* last, what about one that is acceptable */
861 LIST_FOREACH(laddr, &stcb->asoc.sctp_local_addr_list,
862 sctp_nxt_addr) {
863 if (laddr->ifa == NULL) {
864 /* address has been removed */
865 continue;
867 sin = sctp_is_v4_ifa_addr_acceptable (laddr->ifa, loopscope, ipv4_scope, &sin_loop, &sin_local);
868 if (sin == NULL)
869 continue;
870 return (sin->sin_addr);
873 memset(&ans, 0, sizeof(ans));
874 return (ans);
877 static struct sockaddr_in *
878 sctp_select_v4_nth_prefered_addr_from_ifn_boundall (struct ifnet *ifn, struct sctp_tcb *stcb, int non_asoc_addr_ok,
879 uint8_t loopscope, uint8_t ipv4_scope, int cur_addr_num)
881 struct ifaddr_container *ifac;
882 struct sockaddr_in *sin;
883 uint8_t sin_loop, sin_local;
884 int num_eligible_addr = 0;
886 TAILQ_FOREACH(ifac, &ifn->if_addrheads[mycpuid], ifa_link) {
887 struct ifaddr *ifa = ifac->ifa;
889 sin = sctp_is_v4_ifa_addr_prefered (ifa, loopscope, ipv4_scope, &sin_loop, &sin_local);
890 if (sin == NULL)
891 continue;
892 if (stcb) {
893 if ((non_asoc_addr_ok == 0) && sctp_is_addr_restricted(stcb, (struct sockaddr *)sin)) {
894 /* It is restricted for some reason.. probably
895 * not yet added.
897 continue;
900 if (cur_addr_num == num_eligible_addr) {
901 return (sin);
904 return (NULL);
908 static int
909 sctp_count_v4_num_prefered_boundall (struct ifnet *ifn, struct sctp_tcb *stcb, int non_asoc_addr_ok,
910 uint8_t loopscope, uint8_t ipv4_scope, uint8_t *sin_loop, uint8_t *sin_local)
912 struct ifaddr_container *ifac;
913 struct sockaddr_in *sin;
914 int num_eligible_addr = 0;
916 TAILQ_FOREACH(ifac, &ifn->if_addrheads[mycpuid], ifa_link) {
917 struct ifaddr *ifa = ifac->ifa;
919 sin = sctp_is_v4_ifa_addr_prefered (ifa, loopscope, ipv4_scope, sin_loop, sin_local);
920 if (sin == NULL)
921 continue;
922 if (stcb) {
923 if ((non_asoc_addr_ok == 0) && sctp_is_addr_restricted(stcb, (struct sockaddr *)sin)) {
924 /* It is restricted for some reason.. probably
925 * not yet added.
927 continue;
930 num_eligible_addr++;
932 return (num_eligible_addr);
936 static struct in_addr
937 sctp_choose_v4_boundall(struct sctp_inpcb *inp,
938 struct sctp_tcb *stcb,
939 struct sctp_nets *net,
940 struct rtentry *rt,
941 uint8_t ipv4_scope,
942 uint8_t loopscope,
943 int non_asoc_addr_ok)
945 int cur_addr_num=0, num_prefered=0;
946 uint8_t sin_loop, sin_local;
947 struct ifnet *ifn;
948 struct sockaddr_in *sin;
949 struct in_addr ans;
950 struct ifaddr_container *ifac;
952 * For v4 we can use (in boundall) any address in the association. If
953 * non_asoc_addr_ok is set we can use any address (at least in theory).
954 * So we look for prefered addresses first. If we find one, we use it.
955 * Otherwise we next try to get an address on the interface, which we
956 * should be able to do (unless non_asoc_addr_ok is false and we are
957 * routed out that way). In these cases where we can't use the address
958 * of the interface we go through all the ifn's looking for an address
959 * we can use and fill that in. Punting means we send back address
960 * 0, which will probably cause problems actually since then IP will
961 * fill in the address of the route ifn, which means we probably already
962 * rejected it.. i.e. here comes an abort :-<.
964 ifn = rt->rt_ifp;
965 if (net) {
966 cur_addr_num = net->indx_of_eligible_next_to_use;
968 if (ifn == NULL) {
969 goto bound_all_v4_plan_c;
971 num_prefered = sctp_count_v4_num_prefered_boundall (ifn, stcb, non_asoc_addr_ok, loopscope, ipv4_scope, &sin_loop, &sin_local);
972 #ifdef SCTP_DEBUG
973 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
974 kprintf("Found %d preferred source addresses\n", num_prefered);
976 #endif
977 if (num_prefered == 0) {
978 /* no eligible addresses, we must use some other
979 * interface address if we can find one.
981 goto bound_all_v4_plan_b;
983 /* Ok we have num_eligible_addr set with how many we can use,
984 * this may vary from call to call due to addresses being deprecated etc..
986 if (cur_addr_num >= num_prefered) {
987 cur_addr_num = 0;
989 /* select the nth address from the list (where cur_addr_num is the nth) and
990 * 0 is the first one, 1 is the second one etc...
992 #ifdef SCTP_DEBUG
993 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
994 kprintf("cur_addr_num:%d\n", cur_addr_num);
996 #endif
997 sin = sctp_select_v4_nth_prefered_addr_from_ifn_boundall (ifn, stcb, non_asoc_addr_ok, loopscope,
998 ipv4_scope, cur_addr_num);
1000 /* if sin is NULL something changed??, plan_a now */
1001 if (sin) {
1002 return (sin->sin_addr);
1006 * plan_b: Look at the interface that we emit on
1007 * and see if we can find an acceptable address.
1009 bound_all_v4_plan_b:
1010 TAILQ_FOREACH(ifac, &ifn->if_addrheads[mycpuid], ifa_link) {
1011 struct ifaddr *ifa = ifac->ifa;
1013 sin = sctp_is_v4_ifa_addr_acceptable (ifa, loopscope, ipv4_scope, &sin_loop, &sin_local);
1014 if (sin == NULL)
1015 continue;
1016 if (stcb) {
1017 if ((non_asoc_addr_ok == 0) && sctp_is_addr_restricted(stcb, (struct sockaddr *)sin)) {
1018 /* It is restricted for some reason.. probably
1019 * not yet added.
1021 continue;
1024 return (sin->sin_addr);
1027 * plan_c: Look at all interfaces and find a prefered
1028 * address. If we reache here we are in trouble I think.
1030 bound_all_v4_plan_c:
1031 for (ifn = TAILQ_FIRST(&ifnet);
1032 ifn && (ifn != inp->next_ifn_touse);
1033 ifn=TAILQ_NEXT(ifn, if_list)) {
1034 if (loopscope == 0 && ifn->if_type == IFT_LOOP) {
1035 /* wrong base scope */
1036 continue;
1038 if (ifn == rt->rt_ifp)
1039 /* already looked at this guy */
1040 continue;
1041 num_prefered = sctp_count_v4_num_prefered_boundall (ifn, stcb, non_asoc_addr_ok,
1042 loopscope, ipv4_scope, &sin_loop, &sin_local);
1043 #ifdef SCTP_DEBUG
1044 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
1045 kprintf("Found ifn:%x %d preferred source addresses\n", (u_int)ifn, num_prefered);
1047 #endif
1048 if (num_prefered == 0) {
1050 * None on this interface.
1052 continue;
1054 /* Ok we have num_eligible_addr set with how many we can use,
1055 * this may vary from call to call due to addresses being deprecated etc..
1057 if (cur_addr_num >= num_prefered) {
1058 cur_addr_num = 0;
1060 sin = sctp_select_v4_nth_prefered_addr_from_ifn_boundall (ifn, stcb, non_asoc_addr_ok, loopscope,
1061 ipv4_scope, cur_addr_num);
1062 if (sin == NULL)
1063 continue;
1064 return (sin->sin_addr);
1069 * plan_d: We are in deep trouble. No prefered address on
1070 * any interface. And the emit interface does not
1071 * even have an acceptable address. Take anything
1072 * we can get! If this does not work we are
1073 * probably going to emit a packet that will
1074 * illicit an ABORT, falling through.
1077 for (ifn = TAILQ_FIRST(&ifnet);
1078 ifn && (ifn != inp->next_ifn_touse);
1079 ifn=TAILQ_NEXT(ifn, if_list)) {
1080 if (loopscope == 0 && ifn->if_type == IFT_LOOP) {
1081 /* wrong base scope */
1082 continue;
1084 if (ifn == rt->rt_ifp)
1085 /* already looked at this guy */
1086 continue;
1088 TAILQ_FOREACH(ifac, &ifn->if_addrheads[mycpuid], ifa_link) {
1089 struct ifaddr *ifa = ifac->ifa;
1091 sin = sctp_is_v4_ifa_addr_acceptable (ifa, loopscope, ipv4_scope, &sin_loop, &sin_local);
1092 if (sin == NULL)
1093 continue;
1094 if (stcb) {
1095 if ((non_asoc_addr_ok == 0) && sctp_is_addr_restricted(stcb, (struct sockaddr *)sin)) {
1096 /* It is restricted for some reason.. probably
1097 * not yet added.
1099 continue;
1102 return (sin->sin_addr);
1106 * Ok we can find NO address to source from that is
1107 * not on our negative list. It is either the special
1108 * ASCONF case where we are sourceing from a intf that
1109 * has been ifconfig'd to a different address (i.e.
1110 * it holds a ADD/DEL/SET-PRIM and the proper lookup
1111 * address. OR we are hosed, and this baby is going
1112 * to abort the association.
1114 if (non_asoc_addr_ok) {
1115 return (((struct sockaddr_in *)(rt->rt_ifa->ifa_addr))->sin_addr);
1116 } else {
1117 memset(&ans, 0, sizeof(ans));
1118 return (ans);
1124 /* tcb may be NULL */
1125 struct in_addr
1126 sctp_ipv4_source_address_selection(struct sctp_inpcb *inp,
1127 struct sctp_tcb *stcb, struct route *ro, struct sctp_nets *net,
1128 int non_asoc_addr_ok)
1130 struct in_addr ans;
1131 struct sockaddr_in *to = (struct sockaddr_in *)&ro->ro_dst;
1132 uint8_t ipv4_scope, loopscope;
1134 * Rules:
1135 * - Find the route if needed, cache if I can.
1136 * - Look at interface address in route, Is it
1137 * in the bound list. If so we have the best source.
1138 * - If not we must rotate amongst the addresses.
1140 * Cavets and issues
1142 * Do we need to pay attention to scope. We can have
1143 * a private address or a global address we are sourcing
1144 * or sending to. So if we draw it out
1145 * source * dest * result
1146 * ------------------------------------------
1147 * a Private * Global * NAT?
1148 * ------------------------------------------
1149 * b Private * Private * No problem
1150 * ------------------------------------------
1151 * c Global * Private * Huh, How will this work?
1152 * ------------------------------------------
1153 * d Global * Global * No Problem
1154 * ------------------------------------------
1156 * And then we add to that what happens if there are multiple
1157 * addresses assigned to an interface. Remember the ifa on a
1158 * ifn is a linked list of addresses. So one interface can
1159 * have more than one IPv4 address. What happens if we
1160 * have both a private and a global address? Do we then
1161 * use context of destination to sort out which one is
1162 * best? And what about NAT's sending P->G may get you
1163 * a NAT translation, or should you select the G thats
1164 * on the interface in preference.
1166 * Decisions:
1168 * - count the number of addresses on the interface.
1169 * - if its one, no problem except case <c>. For <a>
1170 * we will assume a NAT out there.
1171 * - if there are more than one, then we need to worry
1172 * about scope P or G. We should prefer G -> G and
1173 * P -> P if possible. Then as a secondary fall back
1174 * to mixed types G->P being a last ditch one.
1175 * - The above all works for bound all, but bound
1176 * specific we need to use the same concept but instead
1177 * only consider the bound addresses. If the bound set
1178 * is NOT assigned to the interface then we must use
1179 * rotation amongst them.
1181 * Notes: For v4, we can always punt and let ip_output
1182 * decide by sending back a source of 0.0.0.0
1185 if (ro->ro_rt == NULL) {
1187 * Need a route to cache.
1190 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
1191 rtalloc_ign(ro, 0UL);
1192 #else
1193 rtalloc(ro);
1194 #endif
1196 if (ro->ro_rt == NULL) {
1197 /* No route to host .. punt */
1198 memset(&ans, 0, sizeof(ans));
1199 return (ans);
1201 /* Setup our scopes */
1202 if (stcb) {
1203 ipv4_scope = stcb->asoc.ipv4_local_scope;
1204 loopscope = stcb->asoc.loopback_scope;
1205 } else {
1206 /* Scope based on outbound address */
1207 if ((IN4_ISPRIVATE_ADDRESS(&to->sin_addr))) {
1208 ipv4_scope = 1;
1209 loopscope = 0;
1210 } else if (IN4_ISLOOPBACK_ADDRESS(&to->sin_addr)) {
1211 ipv4_scope = 1;
1212 loopscope = 1;
1213 } else {
1214 ipv4_scope = 0;
1215 loopscope = 0;
1218 #ifdef SCTP_DEBUG
1219 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
1220 kprintf("Scope setup loop:%d ipv4_scope:%d\n",
1221 loopscope, ipv4_scope);
1223 #endif
1224 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
1226 * When bound to all if the address list is set
1227 * it is a negative list. Addresses being added
1228 * by asconf.
1230 return (sctp_choose_v4_boundall(inp, stcb, net, ro->ro_rt,
1231 ipv4_scope, loopscope, non_asoc_addr_ok));
1234 * Three possiblities here:
1236 * a) stcb is NULL, which means we operate only from
1237 * the list of addresses (ifa's) bound to the assoc and
1238 * we care not about the list.
1239 * b) stcb is NOT-NULL, which means we have an assoc structure and
1240 * auto-asconf is on. This means that the list of addresses is
1241 * a NOT list. We use the list from the inp, but any listed address
1242 * in our list is NOT yet added. However if the non_asoc_addr_ok is
1243 * set we CAN use an address NOT available (i.e. being added). Its
1244 * a negative list.
1245 * c) stcb is NOT-NULL, which means we have an assoc structure and
1246 * auto-asconf is off. This means that the list of addresses is
1247 * the ONLY addresses I can use.. its positive.
1249 * Note we collapse b & c into the same function just like in
1250 * the v6 address selection.
1252 if (stcb) {
1253 return (sctp_choose_v4_boundspecific_stcb(inp, stcb, net,
1254 ro->ro_rt, ipv4_scope, loopscope, non_asoc_addr_ok));
1255 } else {
1256 return (sctp_choose_v4_boundspecific_inp(inp, ro->ro_rt,
1257 ipv4_scope, loopscope));
1259 /* this should not be reached */
1260 memset(&ans, 0, sizeof(ans));
1261 return (ans);
1266 static struct sockaddr_in6 *
1267 sctp_is_v6_ifa_addr_acceptable (struct ifaddr *ifa, int loopscope, int loc_scope, int *sin_loop, int *sin_local)
1269 struct in6_ifaddr *ifa6;
1270 struct sockaddr_in6 *sin6;
1272 if (ifa->ifa_addr->sa_family != AF_INET6) {
1273 /* forget non-v6 */
1274 return (NULL);
1276 ifa6 = (struct in6_ifaddr *)ifa;
1277 /* ok to use deprecated addresses? */
1278 if (!ip6_use_deprecated) {
1279 if (IFA6_IS_DEPRECATED(ifa6)) {
1280 /* can't use this type */
1281 return (NULL);
1284 /* are we ok, with the current state of this address? */
1285 if (ifa6->ia6_flags &
1286 (IN6_IFF_DETACHED | IN6_IFF_NOTREADY | IN6_IFF_ANYCAST)) {
1287 /* Can't use these types */
1288 return (NULL);
1290 /* Ok the address may be ok */
1291 sin6 = (struct sockaddr_in6 *)ifa->ifa_addr;
1292 *sin_local = *sin_loop = 0;
1293 if ((ifa->ifa_ifp->if_type == IFT_LOOP) ||
1294 (IN6_IS_ADDR_LOOPBACK(&sin6->sin6_addr))) {
1295 *sin_loop = 1;
1297 if (!loopscope && *sin_loop) {
1298 /* Its a loopback address and we don't have loop scope */
1299 return (NULL);
1301 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
1302 /* we skip unspecifed addresses */
1303 return (NULL);
1306 if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
1307 *sin_local = 1;
1309 if (!loc_scope && *sin_local) {
1310 /* Its a link local address, and we don't have link local scope */
1311 return (NULL);
1313 return (sin6);
1317 static struct sockaddr_in6 *
1318 sctp_choose_v6_boundspecific_stcb(struct sctp_inpcb *inp,
1319 struct sctp_tcb *stcb,
1320 struct sctp_nets *net,
1321 struct rtentry *rt,
1322 uint8_t loc_scope,
1323 uint8_t loopscope,
1324 int non_asoc_addr_ok)
1327 * Each endpoint has a list of local addresses associated
1328 * with it. The address list is either a "negative list" i.e.
1329 * those addresses that are NOT allowed to be used as a source OR
1330 * a "postive list" i.e. those addresses that CAN be used.
1332 * Its a negative list if asconf is allowed. What we do
1333 * in this case is use the ep address list BUT we have
1334 * to cross check it against the negative list.
1336 * In the case where NO asconf is allowed, we have just
1337 * a straight association level list that we must use to
1338 * find a source address.
1340 struct sctp_laddr *laddr, *starting_point;
1341 struct sockaddr_in6 *sin6;
1342 int sin_loop, sin_local;
1343 int start_at_beginning=0;
1344 struct ifnet *ifn;
1346 ifn = rt->rt_ifp;
1347 if (inp->sctp_flags & SCTP_PCB_FLAGS_DO_ASCONF) {
1348 #ifdef SCTP_DEBUG
1349 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
1350 kprintf("Have a STCB - asconf allowed, not bound all have a netgative list\n");
1352 #endif
1353 /* first question, is the ifn we will emit on
1354 * in our list, if so, we want that one.
1356 if (ifn) {
1357 struct ifaddr_container *ifac;
1359 TAILQ_FOREACH(ifac, &ifn->if_addrheads[mycpuid], ifa_link) {
1360 struct ifaddr *ifa = ifac->ifa;
1362 if (sctp_is_addr_in_ep(inp, ifa)) {
1363 sin6 = sctp_is_v6_ifa_addr_acceptable (ifa, loopscope, loc_scope, &sin_loop, &sin_local);
1364 if (sin6 == NULL)
1365 continue;
1366 if ((non_asoc_addr_ok == 0) &&
1367 (sctp_is_addr_restricted(stcb, (struct sockaddr *)sin6))) {
1368 /* on the no-no list */
1369 continue;
1371 return (sin6);
1375 starting_point = stcb->asoc.last_used_address;
1376 /* First try for matching scope */
1377 sctp_from_the_top:
1378 if (stcb->asoc.last_used_address == NULL) {
1379 start_at_beginning=1;
1380 stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
1382 /* search beginning with the last used address */
1383 for (laddr = stcb->asoc.last_used_address; laddr;
1384 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
1385 if (laddr->ifa == NULL) {
1386 /* address has been removed */
1387 continue;
1389 sin6 = sctp_is_v6_ifa_addr_acceptable (laddr->ifa, loopscope, loc_scope, &sin_loop, &sin_local);
1390 if (sin6 == NULL)
1391 continue;
1392 if ((non_asoc_addr_ok == 0) && (sctp_is_addr_restricted(stcb, (struct sockaddr *)sin6))) {
1393 /* on the no-no list */
1394 continue;
1396 /* is it of matching scope ? */
1397 if ((loopscope == 0) &&
1398 (loc_scope == 0) &&
1399 (sin_loop == 0) &&
1400 (sin_local == 0)) {
1401 /* all of global scope we are ok with it */
1402 return (sin6);
1404 if (loopscope && sin_loop)
1405 /* both on the loopback, thats ok */
1406 return (sin6);
1407 if (loc_scope && sin_local)
1408 /* both local scope */
1409 return (sin6);
1412 if (start_at_beginning == 0) {
1413 stcb->asoc.last_used_address = NULL;
1414 goto sctp_from_the_top;
1416 /* now try for any higher scope than the destination */
1417 stcb->asoc.last_used_address = starting_point;
1418 start_at_beginning = 0;
1419 sctp_from_the_top2:
1420 if (stcb->asoc.last_used_address == NULL) {
1421 start_at_beginning=1;
1422 stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
1424 /* search beginning with the last used address */
1425 for (laddr = stcb->asoc.last_used_address; laddr;
1426 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
1427 if (laddr->ifa == NULL) {
1428 /* address has been removed */
1429 continue;
1431 sin6 = sctp_is_v6_ifa_addr_acceptable (laddr->ifa, loopscope, loc_scope, &sin_loop, &sin_local);
1432 if (sin6 == NULL)
1433 continue;
1434 if ((non_asoc_addr_ok == 0) && (sctp_is_addr_restricted(stcb, (struct sockaddr *)sin6))) {
1435 /* on the no-no list */
1436 continue;
1438 return (sin6);
1440 if (start_at_beginning == 0) {
1441 stcb->asoc.last_used_address = NULL;
1442 goto sctp_from_the_top2;
1444 } else {
1445 #ifdef SCTP_DEBUG
1446 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
1447 kprintf("Have a STCB - no asconf allowed, not bound all have a positive list\n");
1449 #endif
1450 /* First try for interface output match */
1451 LIST_FOREACH(laddr, &stcb->asoc.sctp_local_addr_list,
1452 sctp_nxt_addr) {
1453 if (laddr->ifa == NULL) {
1454 /* address has been removed */
1455 continue;
1457 sin6 = sctp_is_v6_ifa_addr_acceptable (laddr->ifa, loopscope, loc_scope, &sin_loop, &sin_local);
1458 if (sin6 == NULL)
1459 continue;
1460 /* first question, is laddr->ifa an address associated with the emit interface */
1461 if (ifn) {
1462 struct ifaddr_container *ifac;
1464 TAILQ_FOREACH(ifac, &ifn->if_addrheads[mycpuid], ifa_link) {
1465 struct ifaddr *ifa = ifac->ifa;
1467 if (laddr->ifa == ifa) {
1468 sin6 = (struct sockaddr_in6 *)laddr->ifa->ifa_addr;
1469 return (sin6);
1471 if (sctp_cmpaddr(ifa->ifa_addr, laddr->ifa->ifa_addr) == 1) {
1472 sin6 = (struct sockaddr_in6 *)laddr->ifa->ifa_addr;
1473 return (sin6);
1478 /* Next try for matching scope */
1479 LIST_FOREACH(laddr, &stcb->asoc.sctp_local_addr_list,
1480 sctp_nxt_addr) {
1481 if (laddr->ifa == NULL) {
1482 /* address has been removed */
1483 continue;
1485 sin6 = sctp_is_v6_ifa_addr_acceptable (laddr->ifa, loopscope, loc_scope, &sin_loop, &sin_local);
1486 if (sin6 == NULL)
1487 continue;
1489 if ((loopscope == 0) &&
1490 (loc_scope == 0) &&
1491 (sin_loop == 0) &&
1492 (sin_local == 0)) {
1493 /* all of global scope we are ok with it */
1494 return (sin6);
1496 if (loopscope && sin_loop)
1497 /* both on the loopback, thats ok */
1498 return (sin6);
1499 if (loc_scope && sin_local)
1500 /* both local scope */
1501 return (sin6);
1503 /* ok, now try for a higher scope in the source address */
1504 /* First try for matching scope */
1505 LIST_FOREACH(laddr, &stcb->asoc.sctp_local_addr_list,
1506 sctp_nxt_addr) {
1507 if (laddr->ifa == NULL) {
1508 /* address has been removed */
1509 continue;
1511 sin6 = sctp_is_v6_ifa_addr_acceptable (laddr->ifa, loopscope, loc_scope, &sin_loop, &sin_local);
1512 if (sin6 == NULL)
1513 continue;
1514 return (sin6);
1517 return (NULL);
1520 static struct sockaddr_in6 *
1521 sctp_choose_v6_boundspecific_inp(struct sctp_inpcb *inp,
1522 struct rtentry *rt,
1523 uint8_t loc_scope,
1524 uint8_t loopscope)
1527 * Here we are bound specific and have only
1528 * an inp. We must find an address that is bound
1529 * that we can give out as a src address. We
1530 * prefer two addresses of same scope if we can
1531 * find them that way.
1533 struct sctp_laddr *laddr;
1534 struct sockaddr_in6 *sin6;
1535 struct ifnet *ifn;
1536 int sin_loop, sin_local;
1538 /* first question, is the ifn we will emit on
1539 * in our list, if so, we want that one.
1542 ifn = rt->rt_ifp;
1543 if (ifn) {
1544 struct ifaddr_container *ifac;
1546 TAILQ_FOREACH(ifac, &ifn->if_addrheads[mycpuid], ifa_link) {
1547 struct ifaddr *ifa = ifac->ifa;
1549 sin6 = sctp_is_v6_ifa_addr_acceptable (ifa, loopscope, loc_scope, &sin_loop, &sin_local);
1550 if (sin6 == NULL)
1551 continue;
1552 if (sctp_is_addr_in_ep(inp, ifa)) {
1553 return (sin6);
1557 for (laddr = LIST_FIRST(&inp->sctp_addr_list);
1558 laddr && (laddr != inp->next_addr_touse);
1559 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
1560 if (laddr->ifa == NULL) {
1561 /* address has been removed */
1562 continue;
1564 sin6 = sctp_is_v6_ifa_addr_acceptable (laddr->ifa, loopscope, loc_scope, &sin_loop, &sin_local);
1565 if (sin6 == NULL)
1566 continue;
1568 if ((loopscope == 0) &&
1569 (loc_scope == 0) &&
1570 (sin_loop == 0) &&
1571 (sin_local == 0)) {
1572 /* all of global scope we are ok with it */
1573 return (sin6);
1575 if (loopscope && sin_loop)
1576 /* both on the loopback, thats ok */
1577 return (sin6);
1578 if (loc_scope && sin_local)
1579 /* both local scope */
1580 return (sin6);
1583 /* if we reach here, we could not find two addresses
1584 * of the same scope to give out. Lets look for any higher level
1585 * scope for a source address.
1587 for (laddr = LIST_FIRST(&inp->sctp_addr_list);
1588 laddr && (laddr != inp->next_addr_touse);
1589 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
1590 if (laddr->ifa == NULL) {
1591 /* address has been removed */
1592 continue;
1594 sin6 = sctp_is_v6_ifa_addr_acceptable (laddr->ifa, loopscope, loc_scope, &sin_loop, &sin_local);
1595 if (sin6 == NULL)
1596 continue;
1597 return (sin6);
1599 /* no address bound can be a source for the destination */
1600 #ifdef SCTP_DEBUG
1601 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
1602 kprintf("Src address selection for EP, no acceptable src address found for address\n");
1604 #endif
1605 return (NULL);
1609 static struct sockaddr_in6 *
1610 sctp_select_v6_nth_addr_from_ifn_boundall (struct ifnet *ifn, struct sctp_tcb *stcb, int non_asoc_addr_ok, uint8_t loopscope,
1611 uint8_t loc_scope, int cur_addr_num, int match_scope)
1613 struct ifaddr_container *ifac;
1614 struct sockaddr_in6 *sin6;
1615 int sin_loop, sin_local;
1616 int num_eligible_addr = 0;
1618 TAILQ_FOREACH(ifac, &ifn->if_addrheads[mycpuid], ifa_link) {
1619 struct ifaddr *ifa = ifac->ifa;
1621 sin6 = sctp_is_v6_ifa_addr_acceptable (ifa, loopscope, loc_scope, &sin_loop, &sin_local);
1622 if (sin6 == NULL)
1623 continue;
1624 if (stcb) {
1625 if ((non_asoc_addr_ok == 0) && sctp_is_addr_restricted(stcb, (struct sockaddr *)sin6)) {
1626 /* It is restricted for some reason.. probably
1627 * not yet added.
1629 continue;
1632 if (match_scope) {
1633 /* Here we are asked to match scope if possible */
1634 if (loopscope && sin_loop)
1635 /* src and destination are loopback scope */
1636 return (sin6);
1637 if (loc_scope && sin_local)
1638 /* src and destination are local scope */
1639 return (sin6);
1640 if ((loopscope == 0) &&
1641 (loc_scope == 0) &&
1642 (sin_loop == 0) &&
1643 (sin_local == 0)) {
1644 /* src and destination are global scope */
1645 return (sin6);
1647 continue;
1649 if (num_eligible_addr == cur_addr_num) {
1650 /* this is it */
1651 return (sin6);
1653 num_eligible_addr++;
1655 return (NULL);
1659 static int
1660 sctp_count_v6_num_eligible_boundall (struct ifnet *ifn, struct sctp_tcb *stcb,
1661 int non_asoc_addr_ok, uint8_t loopscope, uint8_t loc_scope)
1663 struct ifaddr_container *ifac;
1664 struct sockaddr_in6 *sin6;
1665 int num_eligible_addr = 0;
1666 int sin_loop, sin_local;
1668 TAILQ_FOREACH(ifac, &ifn->if_addrheads[mycpuid], ifa_link) {
1669 struct ifaddr *ifa = ifac->ifa;
1671 sin6 = sctp_is_v6_ifa_addr_acceptable (ifa, loopscope, loc_scope, &sin_loop, &sin_local);
1672 if (sin6 == NULL)
1673 continue;
1674 if (stcb) {
1675 if ((non_asoc_addr_ok == 0) && sctp_is_addr_restricted(stcb, (struct sockaddr *)sin6)) {
1676 /* It is restricted for some reason.. probably
1677 * not yet added.
1679 continue;
1682 num_eligible_addr++;
1684 return (num_eligible_addr);
1688 static struct sockaddr_in6 *
1689 sctp_choose_v6_boundall(struct sctp_inpcb *inp,
1690 struct sctp_tcb *stcb,
1691 struct sctp_nets *net,
1692 struct rtentry *rt,
1693 uint8_t loc_scope,
1694 uint8_t loopscope,
1695 int non_asoc_addr_ok)
1697 /* Ok, we are bound all SO any address
1698 * is ok to use as long as it is NOT in the negative
1699 * list.
1701 int num_eligible_addr;
1702 int cur_addr_num=0;
1703 int started_at_beginning=0;
1704 int match_scope_prefered;
1705 /* first question is, how many eligible addresses are
1706 * there for the destination ifn that we are using that
1707 * are within the proper scope?
1709 struct ifnet *ifn;
1710 struct sockaddr_in6 *sin6;
1712 ifn = rt->rt_ifp;
1713 if (net) {
1714 cur_addr_num = net->indx_of_eligible_next_to_use;
1716 if (cur_addr_num == 0) {
1717 match_scope_prefered = 1;
1718 } else {
1719 match_scope_prefered = 0;
1721 num_eligible_addr = sctp_count_v6_num_eligible_boundall (ifn, stcb, non_asoc_addr_ok, loopscope, loc_scope);
1722 #ifdef SCTP_DEBUG
1723 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
1724 kprintf("Found %d eligible source addresses\n", num_eligible_addr);
1726 #endif
1727 if (num_eligible_addr == 0) {
1728 /* no eligible addresses, we must use some other
1729 * interface address if we can find one.
1731 goto bound_all_v6_plan_b;
1733 /* Ok we have num_eligible_addr set with how many we can use,
1734 * this may vary from call to call due to addresses being deprecated etc..
1736 if (cur_addr_num >= num_eligible_addr) {
1737 cur_addr_num = 0;
1739 /* select the nth address from the list (where cur_addr_num is the nth) and
1740 * 0 is the first one, 1 is the second one etc...
1742 #ifdef SCTP_DEBUG
1743 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
1744 kprintf("cur_addr_num:%d match_scope_prefered:%d select it\n",
1745 cur_addr_num, match_scope_prefered);
1747 #endif
1748 sin6 = sctp_select_v6_nth_addr_from_ifn_boundall (ifn, stcb, non_asoc_addr_ok, loopscope,
1749 loc_scope, cur_addr_num, match_scope_prefered);
1750 if (match_scope_prefered && (sin6 == NULL)) {
1751 /* retry without the preference for matching scope */
1752 #ifdef SCTP_DEBUG
1753 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
1754 kprintf("retry with no match_scope_prefered\n");
1756 #endif
1757 sin6 = sctp_select_v6_nth_addr_from_ifn_boundall (ifn, stcb, non_asoc_addr_ok, loopscope,
1758 loc_scope, cur_addr_num, 0);
1760 if (sin6) {
1761 #ifdef SCTP_DEBUG
1762 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
1763 kprintf("Selected address %d ifn:%x for the route\n", cur_addr_num, (u_int)ifn);
1765 #endif
1766 if (net) {
1767 /* store so we get the next one */
1768 if (cur_addr_num < 255)
1769 net->indx_of_eligible_next_to_use = cur_addr_num + 1;
1770 else
1771 net->indx_of_eligible_next_to_use = 0;
1773 return (sin6);
1775 num_eligible_addr = 0;
1776 bound_all_v6_plan_b:
1777 /* ok, if we reach here we either fell through
1778 * due to something changing during an interupt (unlikely)
1779 * or we have NO eligible source addresses for the ifn
1780 * of the route (most likely). We must look at all the other
1781 * interfaces EXCEPT rt->rt_ifp and do the same game.
1783 #ifdef SCTP_DEBUG
1784 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
1785 kprintf("bound-all Plan B\n");
1787 #endif
1788 if (inp->next_ifn_touse == NULL) {
1789 started_at_beginning=1;
1790 inp->next_ifn_touse = TAILQ_FIRST(&ifnet);
1791 #ifdef SCTP_DEBUG
1792 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
1793 kprintf("Start at first IFN:%x\n", (u_int)inp->next_ifn_touse);
1795 #endif
1796 } else {
1797 inp->next_ifn_touse = TAILQ_NEXT(inp->next_ifn_touse, if_list);
1798 #ifdef SCTP_DEBUG
1799 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
1800 kprintf("Resume at IFN:%x\n", (u_int)inp->next_ifn_touse);
1802 #endif
1803 if (inp->next_ifn_touse == NULL) {
1804 #ifdef SCTP_DEBUG
1805 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
1806 kprintf("IFN Resets\n");
1808 #endif
1809 started_at_beginning=1;
1810 inp->next_ifn_touse = TAILQ_FIRST(&ifnet);
1813 for (ifn = inp->next_ifn_touse; ifn;
1814 ifn = TAILQ_NEXT(ifn, if_list)) {
1815 if (loopscope == 0 && ifn->if_type == IFT_LOOP) {
1816 /* wrong base scope */
1817 continue;
1819 if (loc_scope && (ifn->if_index != loc_scope)) {
1820 /* by definition the scope (from to->sin6_scopeid)
1821 * must match that of the interface. If not then
1822 * we could pick a wrong scope for the address.
1823 * Ususally we don't hit plan-b since the route
1824 * handles this. However we can hit plan-b when
1825 * we send to local-host so the route is the
1826 * loopback interface, but the destination is a
1827 * link local.
1829 continue;
1831 if (ifn == rt->rt_ifp) {
1832 /* already looked at this guy */
1833 continue;
1835 /* Address rotation will only work when we are not
1836 * rotating sourced interfaces and are using the interface
1837 * of the route. We would need to have a per interface index
1838 * in order to do proper rotation.
1840 num_eligible_addr = sctp_count_v6_num_eligible_boundall (ifn, stcb, non_asoc_addr_ok, loopscope, loc_scope);
1841 #ifdef SCTP_DEBUG
1842 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
1843 kprintf("IFN:%x has %d eligible\n", (u_int)ifn, num_eligible_addr);
1845 #endif
1846 if (num_eligible_addr == 0) {
1847 /* none we can use */
1848 continue;
1850 /* Ok we have num_eligible_addr set with how many we can use,
1851 * this may vary from call to call due to addresses being deprecated etc..
1853 inp->next_ifn_touse = ifn;
1855 /* select the first one we can find with perference for matching scope.
1857 sin6 = sctp_select_v6_nth_addr_from_ifn_boundall (ifn, stcb, non_asoc_addr_ok, loopscope, loc_scope, 0, 1);
1858 if (sin6 == NULL) {
1859 /* can't find one with matching scope how about a source with higher
1860 * scope
1862 sin6 = sctp_select_v6_nth_addr_from_ifn_boundall (ifn, stcb, non_asoc_addr_ok, loopscope, loc_scope, 0, 0);
1863 if (sin6 == NULL)
1864 /* Hmm, can't find one in the interface now */
1865 continue;
1867 #ifdef SCTP_DEBUG
1868 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
1869 kprintf("Selected the %d'th address of ifn:%x\n",
1870 cur_addr_num,
1871 (u_int)ifn);
1873 #endif
1874 return (sin6);
1876 if (started_at_beginning == 0) {
1877 /* we have not been through all of them yet, force
1878 * us to go through them all.
1880 #ifdef SCTP_DEBUG
1881 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
1882 kprintf("Force a recycle\n");
1884 #endif
1885 inp->next_ifn_touse = NULL;
1886 goto bound_all_v6_plan_b;
1888 return (NULL);
1892 /* stcb and net may be NULL */
1893 struct in6_addr
1894 sctp_ipv6_source_address_selection(struct sctp_inpcb *inp,
1895 struct sctp_tcb *stcb, struct route *ro, struct sctp_nets *net,
1896 int non_asoc_addr_ok)
1898 struct in6_addr ans;
1899 struct sockaddr_in6 *rt_addr;
1900 uint8_t loc_scope, loopscope;
1901 struct sockaddr_in6 *to = (struct sockaddr_in6 *)&ro->ro_dst;
1904 * This routine is tricky standard v6 src address
1905 * selection cannot take into account what we have
1906 * bound etc, so we can't use it.
1908 * Instead here is what we must do:
1909 * 1) Make sure we have a route, if we
1910 * don't have a route we can never reach the peer.
1911 * 2) Once we have a route, determine the scope of the
1912 * route. Link local, loopback or global.
1913 * 3) Next we divide into three types. Either we
1914 * are bound all.. which means we want to use
1915 * one of the addresses of the interface we are
1916 * going out. <or>
1917 * 4a) We have not stcb, which means we are using the
1918 * specific addresses bound on an inp, in this
1919 * case we are similar to the stcb case (4b below)
1920 * accept the list is always a positive list.<or>
1921 * 4b) We are bound specific with a stcb, which means we have a
1922 * list of bound addresses and we must see if the
1923 * ifn of the route is actually one of the bound addresses.
1924 * If not, then we must rotate addresses amongst properly
1925 * scoped bound addresses, if so we use the address
1926 * of the interface.
1927 * 5) Always, no matter which path we take through the above
1928 * we must be sure the source address we use is allowed to
1929 * be used. I.e. IN6_IFF_DETACHED, IN6_IFF_NOTREADY, and IN6_IFF_ANYCAST
1930 * addresses cannot be used.
1931 * 6) Addresses that are deprecated MAY be used
1932 * if (!ip6_use_deprecated) {
1933 * if (IFA6_IS_DEPRECATED(ifa6)) {
1934 * skip the address
1939 /*** 1> determine route, if not already done */
1940 if (ro->ro_rt == NULL) {
1942 * Need a route to cache.
1944 #ifndef SCOPEDROUTING
1945 int scope_save;
1946 scope_save = to->sin6_scope_id;
1947 to->sin6_scope_id = 0;
1948 #endif
1950 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
1951 rtalloc_ign(ro, 0UL);
1952 #else
1953 rtalloc(ro);
1954 #endif
1955 #ifndef SCOPEDROUTING
1956 to->sin6_scope_id = scope_save;
1957 #endif
1959 if (ro->ro_rt == NULL) {
1961 * no route to host. this packet is going no-where.
1962 * We probably should make sure we arrange to send back
1963 * an error.
1965 #ifdef SCTP_DEBUG
1966 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
1967 kprintf("No route to host, this packet cannot be sent!\n");
1969 #endif
1970 memset(&ans, 0, sizeof(ans));
1971 return (ans);
1974 /*** 2a> determine scope for outbound address/route */
1975 loc_scope = loopscope = 0;
1977 * We base our scope on the outbound packet scope and route,
1978 * NOT the TCB (if there is one). This way in local scope we will only
1979 * use a local scope src address when we send to a local address.
1982 if (IN6_IS_ADDR_LOOPBACK(&to->sin6_addr)) {
1983 /* If the route goes to the loopback address OR
1984 * the address is a loopback address, we are loopback
1985 * scope.
1987 #ifdef SCTP_DEBUG
1988 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
1989 kprintf("Loopback scope is set\n");
1991 #endif
1992 loc_scope = 0;
1993 loopscope = 1;
1994 if (net != NULL) {
1995 /* mark it as local */
1996 net->addr_is_local = 1;
1999 } else if (IN6_IS_ADDR_LINKLOCAL(&to->sin6_addr)) {
2000 #ifdef SCTP_DEBUG
2001 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
2002 kprintf("Link local scope is set, id:%d\n", to->sin6_scope_id);
2004 #endif
2005 if (to->sin6_scope_id)
2006 loc_scope = to->sin6_scope_id;
2007 else {
2008 loc_scope = 1;
2010 loopscope = 0;
2011 } else {
2012 #ifdef SCTP_DEBUG
2013 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
2014 kprintf("Global scope is set\n");
2016 #endif
2019 /* now, depending on which way we are bound we call the appropriate
2020 * routine to do steps 3-6
2022 #ifdef SCTP_DEBUG
2023 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
2024 kprintf("Destination address:");
2025 sctp_print_address((struct sockaddr *)to);
2027 #endif
2029 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
2030 #ifdef SCTP_DEBUG
2031 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
2032 kprintf("Calling bound-all src addr selection for v6\n");
2034 #endif
2035 rt_addr = sctp_choose_v6_boundall(inp, stcb, net, ro->ro_rt, loc_scope, loopscope, non_asoc_addr_ok);
2036 } else {
2037 #ifdef SCTP_DEBUG
2038 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
2039 kprintf("Calling bound-specific src addr selection for v6\n");
2041 #endif
2042 if (stcb)
2043 rt_addr = sctp_choose_v6_boundspecific_stcb(inp, stcb, net, ro->ro_rt, loc_scope, loopscope, non_asoc_addr_ok);
2044 else
2045 /* we can't have a non-asoc address since we have no association */
2046 rt_addr = sctp_choose_v6_boundspecific_inp(inp, ro->ro_rt, loc_scope, loopscope);
2048 if (rt_addr == NULL) {
2049 /* no suitable address? */
2050 struct in6_addr in6;
2051 #ifdef SCTP_DEBUG
2052 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
2053 kprintf("V6 packet will reach dead-end no suitable src address\n");
2055 #endif
2056 memset(&in6, 0, sizeof(in6));
2057 return (in6);
2059 #ifdef SCTP_DEBUG
2060 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
2061 kprintf("Source address selected is:");
2062 sctp_print_address((struct sockaddr *)rt_addr);
2064 #endif
2065 return (rt_addr->sin6_addr);
2068 static uint8_t
2069 sctp_get_ect(struct sctp_tcb *stcb,
2070 struct sctp_tmit_chunk *chk)
2072 uint8_t this_random;
2074 /* Huh? */
2075 if (sctp_ecn == 0)
2076 return (0);
2078 if (sctp_ecn_nonce == 0)
2079 /* no nonce, always return ECT0 */
2080 return (SCTP_ECT0_BIT);
2082 if (stcb->asoc.peer_supports_ecn_nonce == 0) {
2083 /* Peer does NOT support it, so we send a ECT0 only */
2084 return (SCTP_ECT0_BIT);
2087 if (chk == NULL)
2088 return (SCTP_ECT0_BIT);
2090 if (((stcb->asoc.hb_random_idx == 3) &&
2091 (stcb->asoc.hb_ect_randombit > 7)) ||
2092 (stcb->asoc.hb_random_idx > 3)) {
2093 uint32_t rndval;
2094 rndval = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
2095 memcpy(stcb->asoc.hb_random_values, &rndval,
2096 sizeof(stcb->asoc.hb_random_values));
2097 this_random = stcb->asoc.hb_random_values[0];
2098 stcb->asoc.hb_random_idx = 0;
2099 stcb->asoc.hb_ect_randombit = 0;
2100 } else {
2101 if (stcb->asoc.hb_ect_randombit > 7) {
2102 stcb->asoc.hb_ect_randombit = 0;
2103 stcb->asoc.hb_random_idx++;
2105 this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
2107 if ((this_random >> stcb->asoc.hb_ect_randombit) & 0x01) {
2108 if (chk != NULL)
2109 /* ECN Nonce stuff */
2110 chk->rec.data.ect_nonce = SCTP_ECT1_BIT;
2111 stcb->asoc.hb_ect_randombit++;
2112 return (SCTP_ECT1_BIT);
2113 } else {
2114 stcb->asoc.hb_ect_randombit++;
2115 return (SCTP_ECT0_BIT);
2119 extern int sctp_no_csum_on_loopback;
2121 static int
2122 sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
2123 struct sctp_tcb *stcb, /* may be NULL */
2124 struct sctp_nets *net,
2125 struct sockaddr *to,
2126 struct mbuf *m,
2127 int nofragment_flag,
2128 int ecn_ok,
2129 struct sctp_tmit_chunk *chk,
2130 int out_of_asoc_ok)
2131 /* nofragment_flag to tell if IP_DF should be set (IPv4 only) */
2134 * Given a mbuf chain (via m_next) that holds a packet header
2135 * WITH a SCTPHDR but no IP header, endpoint inp and sa structure.
2136 * - calculate SCTP checksum and fill in
2137 * - prepend a IP address header
2138 * - if boundall use INADDR_ANY
2139 * - if boundspecific do source address selection
2140 * - set fragmentation option for ipV4
2141 * - On return from IP output, check/adjust mtu size
2142 * - of output interface and smallest_mtu size as well.
2144 struct sctphdr *sctphdr;
2145 int o_flgs;
2146 uint32_t csum;
2147 int ret;
2148 unsigned int have_mtu;
2149 struct route *ro;
2151 if ((net) && (net->dest_state & SCTP_ADDR_OUT_OF_SCOPE)) {
2152 sctp_m_freem(m);
2153 return (EFAULT);
2155 if ((m->m_flags & M_PKTHDR) == 0) {
2156 #ifdef SCTP_DEBUG
2157 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
2158 kprintf("Software error: sctp_lowlevel_chunk_output() called with non pkthdr!\n");
2160 #endif
2161 sctp_m_freem(m);
2162 return (EFAULT);
2164 /* Calculate the csum and fill in the length of the packet */
2165 sctphdr = mtod(m, struct sctphdr *);
2166 have_mtu = 0;
2167 if (sctp_no_csum_on_loopback &&
2168 (stcb) &&
2169 (stcb->asoc.loopback_scope)) {
2170 sctphdr->checksum = 0;
2171 m->m_pkthdr.len = sctp_calculate_len(m);
2172 } else {
2173 sctphdr->checksum = 0;
2174 csum = sctp_calculate_sum(m, &m->m_pkthdr.len, 0);
2175 sctphdr->checksum = csum;
2177 if (to->sa_family == AF_INET) {
2178 struct ip *ip;
2179 struct route iproute;
2180 M_PREPEND(m, sizeof(struct ip), MB_DONTWAIT);
2181 if (m == NULL) {
2182 /* failed to prepend data, give up */
2183 return (ENOMEM);
2185 ip = mtod(m, struct ip *);
2186 ip->ip_v = IPVERSION;
2187 ip->ip_hl = (sizeof(struct ip) >> 2);
2188 if (nofragment_flag) {
2189 #if defined(WITH_CONVERT_IP_OFF) || defined(__FreeBSD__) || defined(__DragonFly__)
2190 #if defined( __OpenBSD__) || defined(__NetBSD__)
2191 /* OpenBSD has WITH_CONVERT_IP_OFF defined?? */
2192 ip->ip_off = htons(IP_DF);
2193 #else
2194 ip->ip_off = IP_DF;
2195 #endif
2196 #else
2197 ip->ip_off = htons(IP_DF);
2198 #endif
2199 } else
2200 ip->ip_off = 0;
2202 /* FreeBSD and Apple have RANDOM_IP_ID switch */
2203 #if defined(RANDOM_IP_ID) || defined(__NetBSD__) || defined(__OpenBSD__)
2204 ip->ip_id = htons(ip_randomid());
2205 #else
2206 ip->ip_id = htons(ip_id++);
2207 #endif
2209 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
2210 ip->ip_ttl = inp->ip_inp.inp.inp_ip_ttl;
2211 #else
2212 ip->ip_ttl = inp->inp_ip_ttl;
2213 #endif
2214 #if defined(__OpenBSD__) || defined(__NetBSD__)
2215 ip->ip_len = htons(m->m_pkthdr.len);
2216 #else
2217 ip->ip_len = m->m_pkthdr.len;
2218 #endif
2219 if (stcb) {
2220 if ((stcb->asoc.ecn_allowed) && ecn_ok) {
2221 /* Enable ECN */
2222 #if defined(__FreeBSD__) || defined (__APPLE__) || defined(__DragonFly__)
2223 ip->ip_tos = (u_char)((inp->ip_inp.inp.inp_ip_tos & 0x000000fc) |
2224 sctp_get_ect(stcb, chk));
2225 #elif defined(__NetBSD__)
2226 ip->ip_tos = (u_char)((inp->ip_inp.inp.inp_ip.ip_tos & 0x000000fc) |
2227 sctp_get_ect(stcb, chk));
2228 #else
2229 ip->ip_tos = (u_char)((inp->inp_ip_tos & 0x000000fc) |
2230 sctp_get_ect(stcb, chk));
2231 #endif
2232 } else {
2233 /* No ECN */
2234 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
2235 ip->ip_tos = inp->ip_inp.inp.inp_ip_tos;
2236 #elif defined(__NetBSD__)
2237 ip->ip_tos = inp->ip_inp.inp.inp_ip.ip_tos;
2238 #else
2239 ip->ip_tos = inp->inp_ip_tos;
2240 #endif
2242 } else {
2243 /* no association at all */
2244 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
2245 ip->ip_tos = inp->ip_inp.inp.inp_ip_tos;
2246 #else
2247 ip->ip_tos = inp->inp_ip_tos;
2248 #endif
2250 ip->ip_p = IPPROTO_SCTP;
2251 ip->ip_sum = 0;
2252 if (net == NULL) {
2253 ro = &iproute;
2254 memset(&iproute, 0, sizeof(iproute));
2255 memcpy(&ro->ro_dst, to, to->sa_len);
2256 } else {
2257 ro = (struct route *)&net->ro;
2259 /* Now the address selection part */
2260 ip->ip_dst.s_addr = ((struct sockaddr_in *)to)->sin_addr.s_addr;
2262 /* call the routine to select the src address */
2263 if (net) {
2264 if (net->src_addr_selected == 0) {
2265 /* Cache the source address */
2266 ((struct sockaddr_in *)&net->ro._s_addr)->sin_addr = sctp_ipv4_source_address_selection(inp,
2267 stcb,
2268 ro, net, out_of_asoc_ok);
2269 if (ro->ro_rt)
2270 net->src_addr_selected = 1;
2272 ip->ip_src = ((struct sockaddr_in *)&net->ro._s_addr)->sin_addr;
2273 } else {
2274 ip->ip_src = sctp_ipv4_source_address_selection(inp,
2275 stcb, ro, net, out_of_asoc_ok);
2278 * If source address selection fails and we find no route then
2279 * the ip_ouput should fail as well with a NO_ROUTE_TO_HOST
2280 * type error. We probably should catch that somewhere and
2281 * abort the association right away (assuming this is an INIT
2282 * being sent).
2284 if ((ro->ro_rt == NULL)) {
2286 * src addr selection failed to find a route (or valid
2287 * source addr), so we can't get there from here!
2289 #ifdef SCTP_DEBUG
2290 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
2291 kprintf("low_level_output: dropped v4 packet- no valid source addr\n");
2292 kprintf("Destination was %x\n", (u_int)(ntohl(ip->ip_dst.s_addr)));
2294 #endif /* SCTP_DEBUG */
2295 if (net) {
2296 if ((net->dest_state & SCTP_ADDR_REACHABLE) && stcb)
2297 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
2298 stcb,
2299 SCTP_FAILED_THRESHOLD,
2300 (void *)net);
2301 net->dest_state &= ~SCTP_ADDR_REACHABLE;
2302 net->dest_state |= SCTP_ADDR_NOT_REACHABLE;
2303 if (stcb) {
2304 if (net == stcb->asoc.primary_destination) {
2305 /* need a new primary */
2306 struct sctp_nets *alt;
2307 alt = sctp_find_alternate_net(stcb, net);
2308 if (alt != net) {
2309 if (sctp_set_primary_addr(stcb,
2310 NULL,
2311 alt) == 0) {
2312 net->dest_state |= SCTP_ADDR_WAS_PRIMARY;
2313 net->src_addr_selected = 0;
2319 sctp_m_freem(m);
2320 return (EHOSTUNREACH);
2321 } else {
2322 have_mtu = ro->ro_rt->rt_ifp->if_mtu;
2325 o_flgs = (IP_RAWOUTPUT | (inp->sctp_socket->so_options & (SO_DONTROUTE | SO_BROADCAST)));
2326 #ifdef SCTP_DEBUG
2327 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
2328 kprintf("Calling ipv4 output routine from low level src addr:%x\n",
2329 (u_int)(ntohl(ip->ip_src.s_addr)));
2330 kprintf("Destination is %x\n", (u_int)(ntohl(ip->ip_dst.s_addr)));
2331 kprintf("RTP route is %p through\n", ro->ro_rt);
2333 #endif
2334 if ((have_mtu) && (net) && (have_mtu > net->mtu)) {
2335 ro->ro_rt->rt_ifp->if_mtu = net->mtu;
2337 ret = ip_output(m, inp->ip_inp.inp.inp_options,
2338 ro, o_flgs, inp->ip_inp.inp.inp_moptions
2339 #if defined(__OpenBSD__) || (defined(__FreeBSD__) && __FreeBSD_version >= 480000) \
2340 || defined(__DragonFly__)
2341 , NULL
2342 #endif
2343 #if defined(__NetBSD__)
2344 ,(struct socket *)inp->sctp_socket
2345 #endif
2348 if ((ro->ro_rt) && (have_mtu) && (net) && (have_mtu > net->mtu)) {
2349 ro->ro_rt->rt_ifp->if_mtu = have_mtu;
2351 sctp_pegs[SCTP_DATAGRAMS_SENT]++;
2352 #ifdef SCTP_DEBUG
2353 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
2354 kprintf("Ip output returns %d\n", ret);
2356 #endif
2357 if (net == NULL) {
2358 /* free tempy routes */
2359 if (ro->ro_rt)
2360 RTFREE(ro->ro_rt);
2361 } else {
2362 /* PMTU check versus smallest asoc MTU goes here */
2363 if (ro->ro_rt != NULL) {
2364 if (ro->ro_rt->rt_rmx.rmx_mtu &&
2365 (stcb->asoc.smallest_mtu > ro->ro_rt->rt_rmx.rmx_mtu)) {
2366 sctp_mtu_size_reset(inp, &stcb->asoc,
2367 ro->ro_rt->rt_rmx.rmx_mtu);
2369 } else {
2370 /* route was freed */
2371 net->src_addr_selected = 0;
2374 return (ret);
2376 #ifdef INET6
2377 else if (to->sa_family == AF_INET6) {
2378 struct ip6_hdr *ip6h;
2379 #ifdef NEW_STRUCT_ROUTE
2380 struct route ip6route;
2381 #else
2382 struct route_in6 ip6route;
2383 #endif
2384 struct ifnet *ifp;
2385 u_char flowTop;
2386 uint16_t flowBottom;
2387 u_char tosBottom, tosTop;
2388 struct sockaddr_in6 *sin6, tmp, *lsa6, lsa6_tmp;
2389 struct sockaddr_in6 lsa6_storage;
2390 int prev_scope=0;
2391 int error;
2392 u_short prev_port=0;
2394 M_PREPEND(m, sizeof(struct ip6_hdr), MB_DONTWAIT);
2395 if (m == NULL) {
2396 /* failed to prepend data, give up */
2397 return (ENOMEM);
2399 ip6h = mtod(m, struct ip6_hdr *);
2402 * We assume here that inp_flow is in host byte order within
2403 * the TCB!
2405 flowBottom = ((struct in6pcb *)inp)->in6p_flowinfo & 0x0000ffff;
2406 flowTop = ((((struct in6pcb *)inp)->in6p_flowinfo & 0x000f0000) >> 16);
2408 tosTop = (((((struct in6pcb *)inp)->in6p_flowinfo & 0xf0) >> 4) | IPV6_VERSION);
2410 /* protect *sin6 from overwrite */
2411 sin6 = (struct sockaddr_in6 *)to;
2412 tmp = *sin6;
2413 sin6 = &tmp;
2415 /* KAME hack: embed scopeid */
2416 #if defined(SCTP_BASE_FREEBSD) || defined(__APPLE__) || defined(__DragonFly__)
2417 if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL) != 0)
2418 #else
2419 if (in6_embedscope(&sin6->sin6_addr, sin6) != 0)
2420 #endif
2421 return (EINVAL);
2422 if (net == NULL) {
2423 memset(&ip6route, 0, sizeof(ip6route));
2424 ro = (struct route *)&ip6route;
2425 memcpy(&ro->ro_dst, sin6, sin6->sin6_len);
2426 } else {
2427 ro = (struct route *)&net->ro;
2429 if (stcb != NULL) {
2430 if ((stcb->asoc.ecn_allowed) && ecn_ok) {
2431 /* Enable ECN */
2432 tosBottom = (((((struct in6pcb *)inp)->in6p_flowinfo & 0x0c) | sctp_get_ect(stcb, chk)) << 4);
2433 } else {
2434 /* No ECN */
2435 tosBottom = ((((struct in6pcb *)inp)->in6p_flowinfo & 0x0c) << 4);
2437 } else {
2438 /* we could get no asoc if it is a O-O-T-B packet */
2439 tosBottom = ((((struct in6pcb *)inp)->in6p_flowinfo & 0x0c) << 4);
2441 ip6h->ip6_flow = htonl(((tosTop << 24) | ((tosBottom|flowTop) << 16) | flowBottom));
2442 ip6h->ip6_nxt = IPPROTO_SCTP;
2443 ip6h->ip6_plen = m->m_pkthdr.len;
2444 ip6h->ip6_dst = sin6->sin6_addr;
2447 * Add SRC address selection here:
2448 * we can only reuse to a limited degree the kame src-addr-sel,
2449 * since we can try their selection but it may not be bound.
2451 bzero(&lsa6_tmp, sizeof(lsa6_tmp));
2452 lsa6_tmp.sin6_family = AF_INET6;
2453 lsa6_tmp.sin6_len = sizeof(lsa6_tmp);
2454 lsa6 = &lsa6_tmp;
2455 if (net) {
2456 if (net->src_addr_selected == 0) {
2457 /* Cache the source address */
2458 ((struct sockaddr_in6 *)&net->ro._s_addr)->sin6_addr = sctp_ipv6_source_address_selection(inp,
2459 stcb, ro, net, out_of_asoc_ok);
2461 if (ro->ro_rt)
2462 net->src_addr_selected = 1;
2464 lsa6->sin6_addr = ((struct sockaddr_in6 *)&net->ro._s_addr)->sin6_addr;
2465 } else {
2466 lsa6->sin6_addr = sctp_ipv6_source_address_selection(
2467 inp, stcb, ro, net, out_of_asoc_ok);
2469 lsa6->sin6_port = inp->sctp_lport;
2471 if ((ro->ro_rt == NULL)) {
2473 * src addr selection failed to find a route (or valid
2474 * source addr), so we can't get there from here!
2476 #ifdef SCTP_DEBUG
2477 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
2478 kprintf("low_level_output: dropped v6 pkt- no valid source addr\n");
2480 #endif
2481 sctp_m_freem(m);
2482 if (net) {
2483 if ((net->dest_state & SCTP_ADDR_REACHABLE) && stcb)
2484 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
2485 stcb,
2486 SCTP_FAILED_THRESHOLD,
2487 (void *)net);
2488 net->dest_state &= ~SCTP_ADDR_REACHABLE;
2489 net->dest_state |= SCTP_ADDR_NOT_REACHABLE;
2490 if (stcb) {
2491 if (net == stcb->asoc.primary_destination) {
2492 /* need a new primary */
2493 struct sctp_nets *alt;
2494 alt = sctp_find_alternate_net(stcb, net);
2495 if (alt != net) {
2496 if (sctp_set_primary_addr(stcb,
2497 NULL,
2498 alt) == 0) {
2499 net->dest_state |= SCTP_ADDR_WAS_PRIMARY;
2500 net->src_addr_selected = 0;
2506 return (EHOSTUNREACH);
2509 #ifndef SCOPEDROUTING
2511 * XXX: sa6 may not have a valid sin6_scope_id in
2512 * the non-SCOPEDROUTING case.
2514 bzero(&lsa6_storage, sizeof(lsa6_storage));
2515 lsa6_storage.sin6_family = AF_INET6;
2516 lsa6_storage.sin6_len = sizeof(lsa6_storage);
2517 if ((error = in6_recoverscope(&lsa6_storage, &lsa6->sin6_addr,
2518 NULL)) != 0) {
2519 sctp_m_freem(m);
2520 return (error);
2522 /* XXX */
2523 lsa6_storage.sin6_addr = lsa6->sin6_addr;
2524 lsa6_storage.sin6_port = inp->sctp_lport;
2525 lsa6 = &lsa6_storage;
2526 #endif /* SCOPEDROUTING */
2527 ip6h->ip6_src = lsa6->sin6_addr;
2530 * We set the hop limit now since there is a good chance that
2531 * our ro pointer is now filled
2533 ip6h->ip6_hlim = in6_selecthlim((struct in6pcb *)&inp->ip_inp.inp,
2534 (ro ?
2535 (ro->ro_rt ? (ro->ro_rt->rt_ifp) : (NULL)) :
2536 (NULL)));
2537 o_flgs = 0;
2538 ifp = ro->ro_rt->rt_ifp;
2539 #ifdef SCTP_DEBUG
2540 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
2541 /* Copy to be sure something bad is not happening */
2542 sin6->sin6_addr = ip6h->ip6_dst;
2543 lsa6->sin6_addr = ip6h->ip6_src;
2545 kprintf("Calling ipv6 output routine from low level\n");
2546 kprintf("src: ");
2547 sctp_print_address((struct sockaddr *)lsa6);
2548 kprintf("dst: ");
2549 sctp_print_address((struct sockaddr *)sin6);
2551 #endif /* SCTP_DEBUG */
2552 if (net) {
2553 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
2554 /* preserve the port and scope for link local send */
2555 prev_scope = sin6->sin6_scope_id;
2556 prev_port = sin6->sin6_port;
2558 ret = ip6_output(m, ((struct in6pcb *)inp)->in6p_outputopts,
2559 #ifdef NEW_STRUCT_ROUTE
2561 #else
2562 (struct route_in6 *)ro,
2563 #endif
2564 o_flgs,
2565 ((struct in6pcb *)inp)->in6p_moptions,
2566 #if defined(__NetBSD__)
2567 (struct socket *)inp->sctp_socket,
2568 #endif
2569 &ifp
2570 #if (defined(__FreeBSD__) && __FreeBSD_version >= 480000) || defined(__DragonFly__)
2571 , NULL
2572 #endif
2574 if (net) {
2575 /* for link local this must be done */
2576 sin6->sin6_scope_id = prev_scope;
2577 sin6->sin6_port = prev_port;
2579 #ifdef SCTP_DEBUG
2580 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
2581 kprintf("return from send is %d\n", ret);
2583 #endif /* SCTP_DEBUG_OUTPUT */
2584 sctp_pegs[SCTP_DATAGRAMS_SENT]++;
2585 if (net == NULL) {
2586 /* Now if we had a temp route free it */
2587 if (ro->ro_rt) {
2588 RTFREE(ro->ro_rt);
2590 } else {
2591 /* PMTU check versus smallest asoc MTU goes here */
2592 if (ro->ro_rt == NULL) {
2593 /* Route was freed */
2594 net->src_addr_selected = 0;
2596 if (ro->ro_rt != NULL) {
2597 if (ro->ro_rt->rt_rmx.rmx_mtu &&
2598 (stcb->asoc.smallest_mtu > ro->ro_rt->rt_rmx.rmx_mtu)) {
2599 sctp_mtu_size_reset(inp,
2600 &stcb->asoc,
2601 ro->ro_rt->rt_rmx.rmx_mtu);
2603 } else if (ifp) {
2604 #if (defined(SCTP_BASE_FREEBSD) && __FreeBSD_version < 500000) || defined(__APPLE__)
2605 #define ND_IFINFO(ifp) (&nd_ifinfo[ifp->if_index])
2606 #endif /* SCTP_BASE_FREEBSD */
2607 if (ND_IFINFO(ifp)->linkmtu &&
2608 (stcb->asoc.smallest_mtu > ND_IFINFO(ifp)->linkmtu)) {
2609 sctp_mtu_size_reset(inp,
2610 &stcb->asoc,
2611 ND_IFINFO(ifp)->linkmtu);
2615 return (ret);
2617 #endif
2618 else {
2619 #ifdef SCTP_DEBUG
2620 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
2621 kprintf("Unknown protocol (TSNH) type %d\n", ((struct sockaddr *)to)->sa_family);
2623 #endif
2624 sctp_m_freem(m);
2625 return (EFAULT);
2629 static int
2630 sctp_is_address_in_scope(struct ifaddr *ifa,
2631 int ipv4_addr_legal,
2632 int ipv6_addr_legal,
2633 int loopback_scope,
2634 int ipv4_local_scope,
2635 int local_scope,
2636 int site_scope)
2638 if ((loopback_scope == 0) &&
2639 (ifa->ifa_ifp) &&
2640 (ifa->ifa_ifp->if_type == IFT_LOOP)) {
2641 /* skip loopback if not in scope *
2643 return (0);
2645 if ((ifa->ifa_addr->sa_family == AF_INET) && ipv4_addr_legal) {
2646 struct sockaddr_in *sin;
2647 sin = (struct sockaddr_in *)ifa->ifa_addr;
2648 if (sin->sin_addr.s_addr == 0) {
2649 /* not in scope , unspecified */
2650 return (0);
2652 if ((ipv4_local_scope == 0) &&
2653 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
2654 /* private address not in scope */
2655 return (0);
2657 } else if ((ifa->ifa_addr->sa_family == AF_INET6) && ipv6_addr_legal) {
2658 struct sockaddr_in6 *sin6;
2659 struct in6_ifaddr *ifa6;
2661 ifa6 = (struct in6_ifaddr *)ifa;
2662 /* ok to use deprecated addresses? */
2663 if (!ip6_use_deprecated) {
2664 if (ifa6->ia6_flags &
2665 IN6_IFF_DEPRECATED) {
2666 return (0);
2669 if (ifa6->ia6_flags &
2670 (IN6_IFF_DETACHED |
2671 IN6_IFF_ANYCAST |
2672 IN6_IFF_NOTREADY)) {
2673 return (0);
2675 sin6 = (struct sockaddr_in6 *)ifa->ifa_addr;
2676 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
2677 /* skip unspecifed addresses */
2678 return (0);
2680 if (/*(local_scope == 0) && */
2681 (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr))) {
2682 return (0);
2684 if ((site_scope == 0) &&
2685 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
2686 return (0);
2688 } else {
2689 return (0);
2691 return (1);
2695 void
2696 sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb)
2698 struct mbuf *m, *m_at, *m_last;
2699 struct sctp_nets *net;
2700 struct sctp_init_msg *initm;
2701 struct sctp_supported_addr_param *sup_addr;
2702 struct sctp_ecn_supported_param *ecn;
2703 struct sctp_prsctp_supported_param *prsctp;
2704 struct sctp_ecn_nonce_supported_param *ecn_nonce;
2705 struct sctp_supported_chunk_types_param *pr_supported;
2706 int cnt_inits_to=0;
2707 int padval, ret;
2709 /* INIT's always go to the primary (and usually ONLY address) */
2710 m_last = NULL;
2711 net = stcb->asoc.primary_destination;
2712 if (net == NULL) {
2713 net = TAILQ_FIRST(&stcb->asoc.nets);
2714 if (net == NULL) {
2715 /* TSNH */
2716 return;
2718 /* we confirm any address we send an INIT to */
2719 net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
2720 sctp_set_primary_addr(stcb, NULL, net);
2721 } else {
2722 /* we confirm any address we send an INIT to */
2723 net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
2725 #ifdef SCTP_DEBUG
2726 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
2727 kprintf("Sending INIT to ");
2728 sctp_print_address ((struct sockaddr *)&net->ro._l_addr);
2730 #endif
2731 if (((struct sockaddr *)&(net->ro._l_addr))->sa_family == AF_INET6) {
2732 /* special hook, if we are sending to link local
2733 * it will not show up in our private address count.
2735 struct sockaddr_in6 *sin6l;
2736 sin6l = &net->ro._l_addr.sin6;
2737 if (IN6_IS_ADDR_LINKLOCAL(&sin6l->sin6_addr))
2738 cnt_inits_to = 1;
2740 if (callout_pending(&net->rxt_timer.timer)) {
2741 /* This case should not happen */
2742 return;
2744 /* start the INIT timer */
2745 if (sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, net)) {
2746 /* we are hosed since I can't start the INIT timer? */
2747 return;
2749 MGETHDR(m, MB_DONTWAIT, MT_HEADER);
2750 if (m == NULL) {
2751 /* No memory, INIT timer will re-attempt. */
2752 return;
2754 /* make it into a M_EXT */
2755 MCLGET(m, MB_DONTWAIT);
2756 if ((m->m_flags & M_EXT) != M_EXT) {
2757 /* Failed to get cluster buffer */
2758 sctp_m_freem(m);
2759 return;
2761 m->m_data += SCTP_MIN_OVERHEAD;
2762 m->m_len = sizeof(struct sctp_init_msg);
2763 /* Now lets put the SCTP header in place */
2764 initm = mtod(m, struct sctp_init_msg *);
2765 initm->sh.src_port = inp->sctp_lport;
2766 initm->sh.dest_port = stcb->rport;
2767 initm->sh.v_tag = 0;
2768 initm->sh.checksum = 0; /* calculate later */
2769 /* now the chunk header */
2770 initm->msg.ch.chunk_type = SCTP_INITIATION;
2771 initm->msg.ch.chunk_flags = 0;
2772 /* fill in later from mbuf we build */
2773 initm->msg.ch.chunk_length = 0;
2774 /* place in my tag */
2775 initm->msg.init.initiate_tag = htonl(stcb->asoc.my_vtag);
2776 /* set up some of the credits. */
2777 initm->msg.init.a_rwnd = htonl(max(inp->sctp_socket->so_rcv.ssb_hiwat,
2778 SCTP_MINIMAL_RWND));
2780 initm->msg.init.num_outbound_streams = htons(stcb->asoc.pre_open_streams);
2781 initm->msg.init.num_inbound_streams = htons(stcb->asoc.max_inbound_streams);
2782 initm->msg.init.initial_tsn = htonl(stcb->asoc.init_seq_number);
2783 /* now the address restriction */
2784 sup_addr = (struct sctp_supported_addr_param *)((caddr_t)initm +
2785 sizeof(*initm));
2786 sup_addr->ph.param_type = htons(SCTP_SUPPORTED_ADDRTYPE);
2787 /* we support 2 types IPv6/IPv4 */
2788 sup_addr->ph.param_length = htons(sizeof(*sup_addr) +
2789 sizeof(uint16_t));
2790 sup_addr->addr_type[0] = htons(SCTP_IPV4_ADDRESS);
2791 sup_addr->addr_type[1] = htons(SCTP_IPV6_ADDRESS);
2792 m->m_len += sizeof(*sup_addr) + sizeof(uint16_t);
2794 /* if (inp->sctp_flags & SCTP_PCB_FLAGS_ADAPTIONEVNT) {*/
2795 if (inp->sctp_ep.adaption_layer_indicator) {
2796 struct sctp_adaption_layer_indication *ali;
2797 ali = (struct sctp_adaption_layer_indication *)(
2798 (caddr_t)sup_addr + sizeof(*sup_addr) + sizeof(uint16_t));
2799 ali->ph.param_type = htons(SCTP_ULP_ADAPTION);
2800 ali->ph.param_length = htons(sizeof(*ali));
2801 ali->indication = ntohl(inp->sctp_ep.adaption_layer_indicator);
2802 m->m_len += sizeof(*ali);
2803 ecn = (struct sctp_ecn_supported_param *)((caddr_t)ali +
2804 sizeof(*ali));
2805 } else {
2806 ecn = (struct sctp_ecn_supported_param *)((caddr_t)sup_addr +
2807 sizeof(*sup_addr) + sizeof(uint16_t));
2810 /* now any cookie time extensions */
2811 if (stcb->asoc.cookie_preserve_req) {
2812 struct sctp_cookie_perserve_param *cookie_preserve;
2813 cookie_preserve = (struct sctp_cookie_perserve_param *)(ecn);
2814 cookie_preserve->ph.param_type = htons(SCTP_COOKIE_PRESERVE);
2815 cookie_preserve->ph.param_length = htons(
2816 sizeof(*cookie_preserve));
2817 cookie_preserve->time = htonl(stcb->asoc.cookie_preserve_req);
2818 m->m_len += sizeof(*cookie_preserve);
2819 ecn = (struct sctp_ecn_supported_param *)(
2820 (caddr_t)cookie_preserve + sizeof(*cookie_preserve));
2821 stcb->asoc.cookie_preserve_req = 0;
2824 /* ECN parameter */
2825 if (sctp_ecn == 1) {
2826 ecn->ph.param_type = htons(SCTP_ECN_CAPABLE);
2827 ecn->ph.param_length = htons(sizeof(*ecn));
2828 m->m_len += sizeof(*ecn);
2829 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn +
2830 sizeof(*ecn));
2831 } else {
2832 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn);
2834 /* And now tell the peer we do pr-sctp */
2835 prsctp->ph.param_type = htons(SCTP_PRSCTP_SUPPORTED);
2836 prsctp->ph.param_length = htons(sizeof(*prsctp));
2837 m->m_len += sizeof(*prsctp);
2840 /* And now tell the peer we do all the extensions */
2841 pr_supported = (struct sctp_supported_chunk_types_param *)((caddr_t)prsctp +
2842 sizeof(*prsctp));
2844 pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
2845 pr_supported->ph.param_length = htons(sizeof(*pr_supported) + SCTP_EXT_COUNT);
2846 pr_supported->chunk_types[0] = SCTP_ASCONF;
2847 pr_supported->chunk_types[1] = SCTP_ASCONF_ACK;
2848 pr_supported->chunk_types[2] = SCTP_FORWARD_CUM_TSN;
2849 pr_supported->chunk_types[3] = SCTP_PACKET_DROPPED;
2850 pr_supported->chunk_types[4] = SCTP_STREAM_RESET;
2851 pr_supported->chunk_types[5] = 0; /* pad */
2852 pr_supported->chunk_types[6] = 0; /* pad */
2853 pr_supported->chunk_types[7] = 0; /* pad */
2855 m->m_len += (sizeof(*pr_supported) + SCTP_EXT_COUNT + SCTP_PAD_EXT_COUNT);
2856 /* ECN nonce: And now tell the peer we support ECN nonce */
2858 if (sctp_ecn_nonce) {
2859 ecn_nonce = (struct sctp_ecn_nonce_supported_param *)((caddr_t)pr_supported +
2860 sizeof(*pr_supported) + SCTP_EXT_COUNT + SCTP_PAD_EXT_COUNT);
2861 ecn_nonce->ph.param_type = htons(SCTP_ECN_NONCE_SUPPORTED);
2862 ecn_nonce->ph.param_length = htons(sizeof(*ecn_nonce));
2863 m->m_len += sizeof(*ecn_nonce);
2866 m_at = m;
2867 /* now the addresses */
2868 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
2869 struct ifnet *ifn;
2870 int cnt;
2872 cnt = cnt_inits_to;
2873 TAILQ_FOREACH(ifn, &ifnet, if_list) {
2874 struct ifaddr_container *ifac;
2876 if ((stcb->asoc.loopback_scope == 0) &&
2877 (ifn->if_type == IFT_LOOP)) {
2879 * Skip loopback devices if loopback_scope
2880 * not set
2882 continue;
2884 TAILQ_FOREACH(ifac, &ifn->if_addrheads[mycpuid], ifa_link) {
2885 struct ifaddr *ifa = ifac->ifa;
2887 if (sctp_is_address_in_scope(ifa,
2888 stcb->asoc.ipv4_addr_legal,
2889 stcb->asoc.ipv6_addr_legal,
2890 stcb->asoc.loopback_scope,
2891 stcb->asoc.ipv4_local_scope,
2892 stcb->asoc.local_scope,
2893 stcb->asoc.site_scope) == 0) {
2894 continue;
2896 cnt++;
2899 if (cnt > 1) {
2900 TAILQ_FOREACH(ifn, &ifnet, if_list) {
2901 struct ifaddr_container *ifac;
2903 if ((stcb->asoc.loopback_scope == 0) &&
2904 (ifn->if_type == IFT_LOOP)) {
2906 * Skip loopback devices if loopback_scope
2907 * not set
2909 continue;
2911 TAILQ_FOREACH(ifac, &ifn->if_addrheads[mycpuid], ifa_link) {
2912 struct ifaddr *ifa = ifac->ifa;
2914 if (sctp_is_address_in_scope(ifa,
2915 stcb->asoc.ipv4_addr_legal,
2916 stcb->asoc.ipv6_addr_legal,
2917 stcb->asoc.loopback_scope,
2918 stcb->asoc.ipv4_local_scope,
2919 stcb->asoc.local_scope,
2920 stcb->asoc.site_scope) == 0) {
2921 continue;
2923 m_at = sctp_add_addr_to_mbuf(m_at, ifa);
2927 } else {
2928 struct sctp_laddr *laddr;
2929 int cnt;
2930 cnt = cnt_inits_to;
2931 /* First, how many ? */
2932 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2933 if (laddr->ifa == NULL) {
2934 continue;
2936 if (laddr->ifa->ifa_addr == NULL)
2937 continue;
2938 if (sctp_is_address_in_scope(laddr->ifa,
2939 stcb->asoc.ipv4_addr_legal,
2940 stcb->asoc.ipv6_addr_legal,
2941 stcb->asoc.loopback_scope,
2942 stcb->asoc.ipv4_local_scope,
2943 stcb->asoc.local_scope,
2944 stcb->asoc.site_scope) == 0) {
2945 continue;
2947 cnt++;
2949 /* To get through a NAT we only list addresses if
2950 * we have more than one. That way if you just
2951 * bind a single address we let the source of the init
2952 * dictate our address.
2954 if (cnt > 1) {
2955 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2956 if (laddr->ifa == NULL) {
2957 continue;
2959 if (laddr->ifa->ifa_addr == NULL) {
2960 continue;
2963 if (sctp_is_address_in_scope(laddr->ifa,
2964 stcb->asoc.ipv4_addr_legal,
2965 stcb->asoc.ipv6_addr_legal,
2966 stcb->asoc.loopback_scope,
2967 stcb->asoc.ipv4_local_scope,
2968 stcb->asoc.local_scope,
2969 stcb->asoc.site_scope) == 0) {
2970 continue;
2972 m_at = sctp_add_addr_to_mbuf(m_at, laddr->ifa);
2976 /* calulate the size and update pkt header and chunk header */
2977 m->m_pkthdr.len = 0;
2978 for (m_at = m; m_at; m_at = m_at->m_next) {
2979 if (m_at->m_next == NULL)
2980 m_last = m_at;
2981 m->m_pkthdr.len += m_at->m_len;
2983 initm->msg.ch.chunk_length = htons((m->m_pkthdr.len -
2984 sizeof(struct sctphdr)));
2985 /* We pass 0 here to NOT set IP_DF if its IPv4, we
2986 * ignore the return here since the timer will drive
2987 * a retranmission.
2990 /* I don't expect this to execute but we will be safe here */
2991 padval = m->m_pkthdr.len % 4;
2992 if ((padval) && (m_last)) {
2993 /* The compiler worries that m_last may not be
2994 * set even though I think it is impossible :->
2995 * however we add m_last here just in case.
2997 int ret;
2998 ret = sctp_add_pad_tombuf(m_last, (4-padval));
2999 if (ret) {
3000 /* Houston we have a problem, no space */
3001 sctp_m_freem(m);
3002 return;
3004 m->m_pkthdr.len += padval;
3006 #ifdef SCTP_DEBUG
3007 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
3008 kprintf("Calling lowlevel output stcb:%x net:%x\n",
3009 (u_int)stcb, (u_int)net);
3011 #endif
3012 ret = sctp_lowlevel_chunk_output(inp, stcb, net,
3013 (struct sockaddr *)&net->ro._l_addr, m, 0, 0, NULL, 0);
3014 #ifdef SCTP_DEBUG
3015 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
3016 kprintf("Low level output returns %d\n", ret);
3018 #endif
3019 sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, net);
3020 SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
3023 struct mbuf *
3024 sctp_arethere_unrecognized_parameters(struct mbuf *in_initpkt,
3025 int param_offset, int *abort_processing, struct sctp_chunkhdr *cp)
3027 /* Given a mbuf containing an INIT or INIT-ACK
3028 * with the param_offset being equal to the
3029 * beginning of the params i.e. (iphlen + sizeof(struct sctp_init_msg)
3030 * parse through the parameters to the end of the mbuf verifying
3031 * that all parameters are known.
3033 * For unknown parameters build and return a mbuf with
3034 * UNRECOGNIZED_PARAMETER errors. If the flags indicate
3035 * to stop processing this chunk stop, and set *abort_processing
3036 * to 1.
3038 * By having param_offset be pre-set to where parameters begin
3039 * it is hoped that this routine may be reused in the future
3040 * by new features.
3042 struct sctp_paramhdr *phdr, params;
3044 struct mbuf *mat, *op_err;
3045 char tempbuf[2048];
3046 int at, limit, pad_needed;
3047 uint16_t ptype, plen;
3048 int err_at;
3050 *abort_processing = 0;
3051 mat = in_initpkt;
3052 err_at = 0;
3053 limit = ntohs(cp->chunk_length) - sizeof(struct sctp_init_chunk);
3054 #ifdef SCTP_DEBUG
3055 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
3056 kprintf("Limit is %d bytes\n", limit);
3058 #endif
3059 at = param_offset;
3060 op_err = NULL;
3062 phdr = sctp_get_next_param(mat, at, &params, sizeof(params));
3063 while ((phdr != NULL) && ((size_t)limit >= sizeof(struct sctp_paramhdr))) {
3064 ptype = ntohs(phdr->param_type);
3065 plen = ntohs(phdr->param_length);
3066 limit -= SCTP_SIZE32(plen);
3067 if (plen < sizeof(struct sctp_paramhdr)) {
3068 #ifdef SCTP_DEBUG
3069 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
3070 kprintf("sctp_output.c:Impossible length in parameter < %d\n", plen);
3072 #endif
3073 *abort_processing = 1;
3074 break;
3076 /* All parameters for all chunks that we
3077 * know/understand are listed here. We process
3078 * them other places and make appropriate
3079 * stop actions per the upper bits. However
3080 * this is the generic routine processor's can
3081 * call to get back an operr.. to either incorporate (init-ack)
3082 * or send.
3084 if ((ptype == SCTP_HEARTBEAT_INFO) ||
3085 (ptype == SCTP_IPV4_ADDRESS) ||
3086 (ptype == SCTP_IPV6_ADDRESS) ||
3087 (ptype == SCTP_STATE_COOKIE) ||
3088 (ptype == SCTP_UNRECOG_PARAM) ||
3089 (ptype == SCTP_COOKIE_PRESERVE) ||
3090 (ptype == SCTP_SUPPORTED_ADDRTYPE) ||
3091 (ptype == SCTP_PRSCTP_SUPPORTED) ||
3092 (ptype == SCTP_ADD_IP_ADDRESS) ||
3093 (ptype == SCTP_DEL_IP_ADDRESS) ||
3094 (ptype == SCTP_ECN_CAPABLE) ||
3095 (ptype == SCTP_ULP_ADAPTION) ||
3096 (ptype == SCTP_ERROR_CAUSE_IND) ||
3097 (ptype == SCTP_SET_PRIM_ADDR) ||
3098 (ptype == SCTP_SUCCESS_REPORT) ||
3099 (ptype == SCTP_ULP_ADAPTION) ||
3100 (ptype == SCTP_SUPPORTED_CHUNK_EXT) ||
3101 (ptype == SCTP_ECN_NONCE_SUPPORTED)
3103 /* no skip it */
3104 at += SCTP_SIZE32(plen);
3105 } else if (ptype == SCTP_HOSTNAME_ADDRESS) {
3106 /* We can NOT handle HOST NAME addresses!! */
3107 #ifdef SCTP_DEBUG
3108 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
3109 kprintf("Can't handle hostname addresses.. abort processing\n");
3111 #endif
3112 *abort_processing = 1;
3113 if (op_err == NULL) {
3114 /* Ok need to try to get a mbuf */
3115 MGETHDR(op_err, MB_DONTWAIT, MT_DATA);
3116 if (op_err) {
3117 op_err->m_len = 0;
3118 op_err->m_pkthdr.len = 0;
3119 /* pre-reserve space for ip and sctp header and chunk hdr*/
3120 op_err->m_data += sizeof(struct ip6_hdr);
3121 op_err->m_data += sizeof(struct sctphdr);
3122 op_err->m_data += sizeof(struct sctp_chunkhdr);
3125 if (op_err) {
3126 /* If we have space */
3127 struct sctp_paramhdr s;
3128 if (err_at % 4) {
3129 u_int32_t cpthis=0;
3130 pad_needed = 4 - (err_at % 4);
3131 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
3132 err_at += pad_needed;
3134 s.param_type = htons(SCTP_CAUSE_UNRESOLV_ADDR);
3135 s.param_length = htons(sizeof(s) + plen);
3136 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
3137 err_at += sizeof(s);
3138 phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)tempbuf, plen);
3139 if (phdr == NULL) {
3140 sctp_m_freem(op_err);
3141 /* we are out of memory but we
3142 * still need to have a look at what to
3143 * do (the system is in trouble though).
3145 return (NULL);
3147 m_copyback(op_err, err_at, plen, (caddr_t)phdr);
3148 err_at += plen;
3150 return (op_err);
3151 } else {
3152 /* we do not recognize the parameter
3153 * figure out what we do.
3155 #ifdef SCTP_DEBUG
3156 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
3157 kprintf("Got parameter type %x - unknown\n",
3158 (u_int)ptype);
3160 #endif
3161 if ((ptype & 0x4000) == 0x4000) {
3162 /* Report bit is set?? */
3163 #ifdef SCTP_DEBUG
3164 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
3165 kprintf("Report bit is set\n");
3167 #endif
3168 if (op_err == NULL) {
3169 /* Ok need to try to get an mbuf */
3170 MGETHDR(op_err, MB_DONTWAIT, MT_DATA);
3171 if (op_err) {
3172 op_err->m_len = 0;
3173 op_err->m_pkthdr.len = 0;
3174 op_err->m_data += sizeof(struct ip6_hdr);
3175 op_err->m_data += sizeof(struct sctphdr);
3176 op_err->m_data += sizeof(struct sctp_chunkhdr);
3179 if (op_err) {
3180 /* If we have space */
3181 struct sctp_paramhdr s;
3182 if (err_at % 4) {
3183 u_int32_t cpthis=0;
3184 pad_needed = 4 - (err_at % 4);
3185 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
3186 err_at += pad_needed;
3188 s.param_type = htons(SCTP_UNRECOG_PARAM);
3189 s.param_length = htons(sizeof(s) + plen);
3190 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
3191 err_at += sizeof(s);
3192 if (plen > sizeof(tempbuf)) {
3193 plen = sizeof(tempbuf);
3195 phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)tempbuf, plen);
3196 if (phdr == NULL) {
3197 sctp_m_freem(op_err);
3198 /* we are out of memory but we
3199 * still need to have a look at what to
3200 * do (the system is in trouble though).
3202 goto more_processing;
3204 m_copyback(op_err, err_at, plen, (caddr_t)phdr);
3205 err_at += plen;
3208 more_processing:
3209 if ((ptype & 0x8000) == 0x0000) {
3210 #ifdef SCTP_DEBUG
3211 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
3212 kprintf("Abort bit is now setting1\n");
3214 #endif
3215 return (op_err);
3216 } else {
3217 /* skip this chunk and continue processing */
3218 at += SCTP_SIZE32(plen);
3222 phdr = sctp_get_next_param(mat, at, &params, sizeof(params));
3224 return (op_err);
3227 static int
3228 sctp_are_there_new_addresses(struct sctp_association *asoc,
3229 struct mbuf *in_initpkt, int iphlen, int offset)
3232 * Given a INIT packet, look through the packet to verify that
3233 * there are NO new addresses. As we go through the parameters
3234 * add reports of any un-understood parameters that require an
3235 * error. Also we must return (1) to drop the packet if we see
3236 * a un-understood parameter that tells us to drop the chunk.
3238 struct sockaddr_in sin4, *sa4;
3239 struct sockaddr_in6 sin6, *sa6;
3240 struct sockaddr *sa_touse;
3241 struct sockaddr *sa;
3242 struct sctp_paramhdr *phdr, params;
3243 struct ip *iph;
3244 struct mbuf *mat;
3245 uint16_t ptype, plen;
3246 int err_at;
3247 uint8_t fnd;
3248 struct sctp_nets *net;
3250 memset(&sin4, 0, sizeof(sin4));
3251 memset(&sin6, 0, sizeof(sin6));
3252 sin4.sin_family = AF_INET;
3253 sin4.sin_len = sizeof(sin4);
3254 sin6.sin6_family = AF_INET6;
3255 sin6.sin6_len = sizeof(sin6);
3257 sa_touse = NULL;
3258 /* First what about the src address of the pkt ? */
3259 iph = mtod(in_initpkt, struct ip *);
3260 if (iph->ip_v == IPVERSION) {
3261 /* source addr is IPv4 */
3262 sin4.sin_addr = iph->ip_src;
3263 sa_touse = (struct sockaddr *)&sin4;
3264 } else if (iph->ip_v == (IPV6_VERSION >> 4)) {
3265 /* source addr is IPv6 */
3266 struct ip6_hdr *ip6h;
3267 ip6h = mtod(in_initpkt, struct ip6_hdr *);
3268 sin6.sin6_addr = ip6h->ip6_src;
3269 sa_touse = (struct sockaddr *)&sin6;
3270 } else {
3271 return (1);
3274 fnd = 0;
3275 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3276 sa = (struct sockaddr *)&net->ro._l_addr;
3277 if (sa->sa_family == sa_touse->sa_family) {
3278 if (sa->sa_family == AF_INET) {
3279 sa4 = (struct sockaddr_in *)sa;
3280 if (sa4->sin_addr.s_addr ==
3281 sin4.sin_addr.s_addr) {
3282 fnd = 1;
3283 break;
3285 } else if (sa->sa_family == AF_INET6) {
3286 sa6 = (struct sockaddr_in6 *)sa;
3287 if (SCTP6_ARE_ADDR_EQUAL(&sa6->sin6_addr,
3288 &sin6.sin6_addr)) {
3289 fnd = 1;
3290 break;
3295 if (fnd == 0) {
3296 /* New address added! no need to look futher. */
3297 return (1);
3299 /* Ok so far lets munge through the rest of the packet */
3300 mat = in_initpkt;
3301 err_at = 0;
3302 sa_touse = NULL;
3303 offset += sizeof(struct sctp_init_chunk);
3304 phdr = sctp_get_next_param(mat, offset, &params, sizeof(params));
3305 while (phdr) {
3306 ptype = ntohs(phdr->param_type);
3307 plen = ntohs(phdr->param_length);
3308 if (ptype == SCTP_IPV4_ADDRESS) {
3309 struct sctp_ipv4addr_param *p4, p4_buf;
3311 phdr = sctp_get_next_param(mat, offset,
3312 (struct sctp_paramhdr *)&p4_buf, sizeof(p4_buf));
3313 if (plen != sizeof(struct sctp_ipv4addr_param) ||
3314 phdr == NULL) {
3315 return (1);
3317 p4 = (struct sctp_ipv4addr_param *)phdr;
3318 sin4.sin_addr.s_addr = p4->addr;
3319 sa_touse = (struct sockaddr *)&sin4;
3320 } else if (ptype == SCTP_IPV6_ADDRESS) {
3321 struct sctp_ipv6addr_param *p6, p6_buf;
3323 phdr = sctp_get_next_param(mat, offset,
3324 (struct sctp_paramhdr *)&p6_buf, sizeof(p6_buf));
3325 if (plen != sizeof(struct sctp_ipv6addr_param) ||
3326 phdr == NULL) {
3327 return (1);
3329 p6 = (struct sctp_ipv6addr_param *)phdr;
3330 memcpy((caddr_t)&sin6.sin6_addr, p6->addr,
3331 sizeof(p6->addr));
3332 sa_touse = (struct sockaddr *)&sin4;
3335 if (sa_touse) {
3336 /* ok, sa_touse points to one to check */
3337 fnd = 0;
3338 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3339 sa = (struct sockaddr *)&net->ro._l_addr;
3340 if (sa->sa_family != sa_touse->sa_family) {
3341 continue;
3343 if (sa->sa_family == AF_INET) {
3344 sa4 = (struct sockaddr_in *)sa;
3345 if (sa4->sin_addr.s_addr ==
3346 sin4.sin_addr.s_addr) {
3347 fnd = 1;
3348 break;
3350 } else if (sa->sa_family == AF_INET6) {
3351 sa6 = (struct sockaddr_in6 *)sa;
3352 if (SCTP6_ARE_ADDR_EQUAL(
3353 &sa6->sin6_addr, &sin6.sin6_addr)) {
3354 fnd = 1;
3355 break;
3359 if (!fnd) {
3360 /* New addr added! no need to look further */
3361 return (1);
3364 offset += SCTP_SIZE32(plen);
3365 phdr = sctp_get_next_param(mat, offset, &params, sizeof(params));
3367 return (0);
3371 * Given a MBUF chain that was sent into us containing an
3372 * INIT. Build a INIT-ACK with COOKIE and send back.
3373 * We assume that the in_initpkt has done a pullup to
3374 * include IPv6/4header, SCTP header and initial part of
3375 * INIT message (i.e. the struct sctp_init_msg).
3377 void
3378 sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3379 struct mbuf *init_pkt, int iphlen, int offset, struct sctphdr *sh,
3380 struct sctp_init_chunk *init_chk)
3382 struct sctp_association *asoc;
3383 struct mbuf *m, *m_at, *m_tmp, *m_cookie, *op_err, *m_last;
3384 struct sctp_init_msg *initackm_out;
3385 struct sctp_ecn_supported_param *ecn;
3386 struct sctp_prsctp_supported_param *prsctp;
3387 struct sctp_ecn_nonce_supported_param *ecn_nonce;
3388 struct sctp_supported_chunk_types_param *pr_supported;
3389 struct sockaddr_storage store;
3390 struct sockaddr_in *sin;
3391 struct sockaddr_in6 *sin6;
3392 struct route *ro;
3393 struct ip *iph;
3394 struct ip6_hdr *ip6;
3395 struct sockaddr *to;
3396 struct sctp_state_cookie stc;
3397 struct sctp_nets *net=NULL;
3398 int cnt_inits_to=0;
3399 uint16_t his_limit, i_want;
3400 int abort_flag, padval, sz_of;
3402 if (stcb) {
3403 asoc = &stcb->asoc;
3404 } else {
3405 asoc = NULL;
3407 m_last = NULL;
3408 if ((asoc != NULL) &&
3409 (SCTP_GET_STATE(asoc) != SCTP_STATE_COOKIE_WAIT) &&
3410 (sctp_are_there_new_addresses(asoc, init_pkt, iphlen, offset))) {
3411 /* new addresses, out of here in non-cookie-wait states */
3413 * Send a ABORT, we don't add the new address error clause though
3414 * we even set the T bit and copy in the 0 tag.. this looks no
3415 * different than if no listner was present.
3417 sctp_send_abort(init_pkt, iphlen, sh, 0, NULL);
3418 return;
3420 abort_flag = 0;
3421 op_err = sctp_arethere_unrecognized_parameters(init_pkt,
3422 (offset+sizeof(struct sctp_init_chunk)),
3423 &abort_flag, (struct sctp_chunkhdr *)init_chk);
3424 if (abort_flag) {
3425 sctp_send_abort(init_pkt, iphlen, sh, init_chk->init.initiate_tag, op_err);
3426 return;
3428 MGETHDR(m, MB_DONTWAIT, MT_HEADER);
3429 if (m == NULL) {
3430 /* No memory, INIT timer will re-attempt. */
3431 if (op_err)
3432 sctp_m_freem(op_err);
3433 return;
3435 MCLGET(m, MB_DONTWAIT);
3436 if ((m->m_flags & M_EXT) != M_EXT) {
3437 /* Failed to get cluster buffer */
3438 if (op_err)
3439 sctp_m_freem(op_err);
3440 sctp_m_freem(m);
3441 return;
3443 m->m_data += SCTP_MIN_OVERHEAD;
3444 m->m_pkthdr.rcvif = 0;
3445 m->m_len = sizeof(struct sctp_init_msg);
3447 /* the time I built cookie */
3448 SCTP_GETTIME_TIMEVAL(&stc.time_entered);
3450 /* populate any tie tags */
3451 if (asoc != NULL) {
3452 /* unlock before tag selections */
3453 SCTP_TCB_UNLOCK(stcb);
3454 if (asoc->my_vtag_nonce == 0)
3455 asoc->my_vtag_nonce = sctp_select_a_tag(inp);
3456 stc.tie_tag_my_vtag = asoc->my_vtag_nonce;
3458 if (asoc->peer_vtag_nonce == 0)
3459 asoc->peer_vtag_nonce = sctp_select_a_tag(inp);
3460 stc.tie_tag_peer_vtag = asoc->peer_vtag_nonce;
3462 stc.cookie_life = asoc->cookie_life;
3463 net = asoc->primary_destination;
3464 /* now we must relock */
3465 SCTP_INP_RLOCK(inp);
3466 /* we may be in trouble here if the inp got freed
3467 * most likely this set of tests will protect
3468 * us but there is a chance not.
3470 if (inp->sctp_flags & (SCTP_PCB_FLAGS_SOCKET_GONE|SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
3471 if (op_err)
3472 sctp_m_freem(op_err);
3473 sctp_m_freem(m);
3474 sctp_send_abort(init_pkt, iphlen, sh, 0, NULL);
3475 return;
3477 SCTP_TCB_LOCK(stcb);
3478 SCTP_INP_RUNLOCK(stcb->sctp_ep);
3479 } else {
3480 stc.tie_tag_my_vtag = 0;
3481 stc.tie_tag_peer_vtag = 0;
3482 /* life I will award this cookie */
3483 stc.cookie_life = inp->sctp_ep.def_cookie_life;
3486 /* copy in the ports for later check */
3487 stc.myport = sh->dest_port;
3488 stc.peerport = sh->src_port;
3491 * If we wanted to honor cookie life extentions, we would add
3492 * to stc.cookie_life. For now we should NOT honor any extension
3494 stc.site_scope = stc.local_scope = stc.loopback_scope = 0;
3495 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
3496 struct inpcb *in_inp;
3497 /* Its a V6 socket */
3498 in_inp = (struct inpcb *)inp;
3499 stc.ipv6_addr_legal = 1;
3500 /* Now look at the binding flag to see if V4 will be legal */
3501 if (
3502 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
3503 (in_inp->inp_flags & IN6P_IPV6_V6ONLY)
3504 #elif defined(__OpenBSD__)
3505 (0) /* For openbsd we do dual bind only */
3506 #else
3507 (((struct in6pcb *)in_inp)->in6p_flags & IN6P_IPV6_V6ONLY)
3508 #endif
3509 == 0) {
3510 stc.ipv4_addr_legal = 1;
3511 } else {
3512 /* V4 addresses are NOT legal on the association */
3513 stc.ipv4_addr_legal = 0;
3515 } else {
3516 /* Its a V4 socket, no - V6 */
3517 stc.ipv4_addr_legal = 1;
3518 stc.ipv6_addr_legal = 0;
3521 #ifdef SCTP_DONT_DO_PRIVADDR_SCOPE
3522 stc.ipv4_scope = 1;
3523 #else
3524 stc.ipv4_scope = 0;
3525 #endif
3526 /* now for scope setup */
3527 memset((caddr_t)&store, 0, sizeof(store));
3528 sin = (struct sockaddr_in *)&store;
3529 sin6 = (struct sockaddr_in6 *)&store;
3530 if (net == NULL) {
3531 to = (struct sockaddr *)&store;
3532 iph = mtod(init_pkt, struct ip *);
3533 if (iph->ip_v == IPVERSION) {
3534 struct in_addr addr;
3535 struct route iproute;
3537 sin->sin_family = AF_INET;
3538 sin->sin_len = sizeof(struct sockaddr_in);
3539 sin->sin_port = sh->src_port;
3540 sin->sin_addr = iph->ip_src;
3541 /* lookup address */
3542 stc.address[0] = sin->sin_addr.s_addr;
3543 stc.address[1] = 0;
3544 stc.address[2] = 0;
3545 stc.address[3] = 0;
3546 stc.addr_type = SCTP_IPV4_ADDRESS;
3547 /* local from address */
3548 memset(&iproute, 0, sizeof(iproute));
3549 ro = &iproute;
3550 memcpy(&ro->ro_dst, sin, sizeof(*sin));
3551 addr = sctp_ipv4_source_address_selection(inp, NULL,
3552 ro, NULL, 0);
3553 if (ro->ro_rt) {
3554 RTFREE(ro->ro_rt);
3556 stc.laddress[0] = addr.s_addr;
3557 stc.laddress[1] = 0;
3558 stc.laddress[2] = 0;
3559 stc.laddress[3] = 0;
3560 stc.laddr_type = SCTP_IPV4_ADDRESS;
3561 /* scope_id is only for v6 */
3562 stc.scope_id = 0;
3563 #ifndef SCTP_DONT_DO_PRIVADDR_SCOPE
3564 if (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr)) {
3565 stc.ipv4_scope = 1;
3567 #else
3568 stc.ipv4_scope = 1;
3569 #endif /* SCTP_DONT_DO_PRIVADDR_SCOPE */
3570 /* Must use the address in this case */
3571 if (sctp_is_address_on_local_host((struct sockaddr *)sin)) {
3572 stc.loopback_scope = 1;
3573 stc.ipv4_scope = 1;
3574 stc.site_scope = 1;
3575 stc.local_scope = 1;
3577 } else if (iph->ip_v == (IPV6_VERSION >> 4)) {
3578 struct in6_addr addr;
3579 #ifdef NEW_STRUCT_ROUTE
3580 struct route iproute6;
3581 #else
3582 struct route_in6 iproute6;
3583 #endif
3584 ip6 = mtod(init_pkt, struct ip6_hdr *);
3585 sin6->sin6_family = AF_INET6;
3586 sin6->sin6_len = sizeof(struct sockaddr_in6);
3587 sin6->sin6_port = sh->src_port;
3588 sin6->sin6_addr = ip6->ip6_src;
3589 /* lookup address */
3590 memcpy(&stc.address, &sin6->sin6_addr,
3591 sizeof(struct in6_addr));
3592 sin6->sin6_scope_id = 0;
3593 stc.addr_type = SCTP_IPV6_ADDRESS;
3594 stc.scope_id = 0;
3595 if (sctp_is_address_on_local_host((struct sockaddr *)sin6)) {
3596 stc.loopback_scope = 1;
3597 stc.local_scope = 1;
3598 stc.site_scope = 1;
3599 stc.ipv4_scope = 1;
3600 } else if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
3602 * If the new destination is a LINK_LOCAL
3603 * we must have common both site and local
3604 * scope. Don't set local scope though since
3605 * we must depend on the source to be added
3606 * implicitly. We cannot assure just because
3607 * we share one link that all links are common.
3609 stc.local_scope = 0;
3610 stc.site_scope = 1;
3611 stc.ipv4_scope = 1;
3612 /* we start counting for the private
3613 * address stuff at 1. since the link
3614 * local we source from won't show
3615 * up in our scoped cou8nt.
3617 cnt_inits_to=1;
3618 /* pull out the scope_id from incoming pkt */
3619 in6_recoverscope(sin6, &ip6->ip6_src,
3620 init_pkt->m_pkthdr.rcvif);
3621 #if defined(SCTP_BASE_FREEBSD) || defined(__APPLE__) || defined(__DragonFly__)
3622 in6_embedscope(&sin6->sin6_addr, sin6, NULL,
3623 NULL);
3624 #else
3625 in6_embedscope(&sin6->sin6_addr, sin6);
3626 #endif
3627 stc.scope_id = sin6->sin6_scope_id;
3628 } else if (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr)) {
3630 * If the new destination is SITE_LOCAL
3631 * then we must have site scope in common.
3633 stc.site_scope = 1;
3635 /* local from address */
3636 memset(&iproute6, 0, sizeof(iproute6));
3637 ro = (struct route *)&iproute6;
3638 memcpy(&ro->ro_dst, sin6, sizeof(*sin6));
3639 addr = sctp_ipv6_source_address_selection(inp, NULL,
3640 ro, NULL, 0);
3641 if (ro->ro_rt) {
3642 RTFREE(ro->ro_rt);
3644 memcpy(&stc.laddress, &addr, sizeof(struct in6_addr));
3645 stc.laddr_type = SCTP_IPV6_ADDRESS;
3647 } else {
3648 /* set the scope per the existing tcb */
3649 struct sctp_nets *lnet;
3651 stc.loopback_scope = asoc->loopback_scope;
3652 stc.ipv4_scope = asoc->ipv4_local_scope;
3653 stc.site_scope = asoc->site_scope;
3654 stc.local_scope = asoc->local_scope;
3655 TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
3656 if (lnet->ro._l_addr.sin6.sin6_family == AF_INET6) {
3657 if (IN6_IS_ADDR_LINKLOCAL(&lnet->ro._l_addr.sin6.sin6_addr)) {
3658 /* if we have a LL address, start counting
3659 * at 1.
3661 cnt_inits_to = 1;
3666 /* use the net pointer */
3667 to = (struct sockaddr *)&net->ro._l_addr;
3668 if (to->sa_family == AF_INET) {
3669 sin = (struct sockaddr_in *)to;
3670 stc.address[0] = sin->sin_addr.s_addr;
3671 stc.address[1] = 0;
3672 stc.address[2] = 0;
3673 stc.address[3] = 0;
3674 stc.addr_type = SCTP_IPV4_ADDRESS;
3675 if (net->src_addr_selected == 0) {
3676 /* strange case here, the INIT
3677 * should have did the selection.
3679 net->ro._s_addr.sin.sin_addr =
3680 sctp_ipv4_source_address_selection(inp,
3681 stcb, (struct route *)&net->ro, net, 0);
3682 net->src_addr_selected = 1;
3686 stc.laddress[0] = net->ro._s_addr.sin.sin_addr.s_addr;
3687 stc.laddress[1] = 0;
3688 stc.laddress[2] = 0;
3689 stc.laddress[3] = 0;
3690 stc.laddr_type = SCTP_IPV4_ADDRESS;
3691 } else if (to->sa_family == AF_INET6) {
3692 sin6 = (struct sockaddr_in6 *)to;
3693 memcpy(&stc.address, &sin6->sin6_addr,
3694 sizeof(struct in6_addr));
3695 stc.addr_type = SCTP_IPV6_ADDRESS;
3696 if (net->src_addr_selected == 0) {
3697 /* strange case here, the INIT
3698 * should have did the selection.
3700 net->ro._s_addr.sin6.sin6_addr =
3701 sctp_ipv6_source_address_selection(inp,
3702 stcb, (struct route *)&net->ro, net, 0);
3703 net->src_addr_selected = 1;
3705 memcpy(&stc.laddress, &net->ro._l_addr.sin6.sin6_addr,
3706 sizeof(struct in6_addr));
3707 stc.laddr_type = SCTP_IPV6_ADDRESS;
3710 /* Now lets put the SCTP header in place */
3711 initackm_out = mtod(m, struct sctp_init_msg *);
3712 initackm_out->sh.src_port = inp->sctp_lport;
3713 initackm_out->sh.dest_port = sh->src_port;
3714 initackm_out->sh.v_tag = init_chk->init.initiate_tag;
3715 /* Save it off for quick ref */
3716 stc.peers_vtag = init_chk->init.initiate_tag;
3717 initackm_out->sh.checksum = 0; /* calculate later */
3718 /* who are we */
3719 strncpy(stc.identification, SCTP_VERSION_STRING,
3720 min(strlen(SCTP_VERSION_STRING), sizeof(stc.identification)));
3721 /* now the chunk header */
3722 initackm_out->msg.ch.chunk_type = SCTP_INITIATION_ACK;
3723 initackm_out->msg.ch.chunk_flags = 0;
3724 /* fill in later from mbuf we build */
3725 initackm_out->msg.ch.chunk_length = 0;
3726 /* place in my tag */
3727 if ((asoc != NULL) &&
3728 ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
3729 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED))) {
3730 /* re-use the v-tags and init-seq here */
3731 initackm_out->msg.init.initiate_tag = htonl(asoc->my_vtag);
3732 initackm_out->msg.init.initial_tsn = htonl(asoc->init_seq_number);
3733 } else {
3734 initackm_out->msg.init.initiate_tag = htonl(sctp_select_a_tag(inp));
3735 /* get a TSN to use too */
3736 initackm_out->msg.init.initial_tsn = htonl(sctp_select_initial_TSN(&inp->sctp_ep));
3738 /* save away my tag to */
3739 stc.my_vtag = initackm_out->msg.init.initiate_tag;
3741 /* set up some of the credits. */
3742 initackm_out->msg.init.a_rwnd = htonl(max(inp->sctp_socket->so_rcv.ssb_hiwat, SCTP_MINIMAL_RWND));
3743 /* set what I want */
3744 his_limit = ntohs(init_chk->init.num_inbound_streams);
3745 /* choose what I want */
3746 if (asoc != NULL) {
3747 if (asoc->streamoutcnt > inp->sctp_ep.pre_open_stream_count) {
3748 i_want = asoc->streamoutcnt;
3749 } else {
3750 i_want = inp->sctp_ep.pre_open_stream_count;
3752 } else {
3753 i_want = inp->sctp_ep.pre_open_stream_count;
3755 if (his_limit < i_want) {
3756 /* I Want more :< */
3757 initackm_out->msg.init.num_outbound_streams = init_chk->init.num_inbound_streams;
3758 } else {
3759 /* I can have what I want :> */
3760 initackm_out->msg.init.num_outbound_streams = htons(i_want);
3762 /* tell him his limt. */
3763 initackm_out->msg.init.num_inbound_streams =
3764 htons(inp->sctp_ep.max_open_streams_intome);
3765 /* setup the ECN pointer */
3767 /* if (inp->sctp_flags & SCTP_PCB_FLAGS_ADAPTIONEVNT) {*/
3768 if (inp->sctp_ep.adaption_layer_indicator) {
3769 struct sctp_adaption_layer_indication *ali;
3770 ali = (struct sctp_adaption_layer_indication *)(
3771 (caddr_t)initackm_out + sizeof(*initackm_out));
3772 ali->ph.param_type = htons(SCTP_ULP_ADAPTION);
3773 ali->ph.param_length = htons(sizeof(*ali));
3774 ali->indication = ntohl(inp->sctp_ep.adaption_layer_indicator);
3775 m->m_len += sizeof(*ali);
3776 ecn = (struct sctp_ecn_supported_param *)((caddr_t)ali +
3777 sizeof(*ali));
3778 } else {
3779 ecn = (struct sctp_ecn_supported_param*)(
3780 (caddr_t)initackm_out + sizeof(*initackm_out));
3783 /* ECN parameter */
3784 if (sctp_ecn == 1) {
3785 ecn->ph.param_type = htons(SCTP_ECN_CAPABLE);
3786 ecn->ph.param_length = htons(sizeof(*ecn));
3787 m->m_len += sizeof(*ecn);
3789 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn +
3790 sizeof(*ecn));
3791 } else {
3792 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn);
3794 /* And now tell the peer we do pr-sctp */
3795 prsctp->ph.param_type = htons(SCTP_PRSCTP_SUPPORTED);
3796 prsctp->ph.param_length = htons(sizeof(*prsctp));
3797 m->m_len += sizeof(*prsctp);
3800 /* And now tell the peer we do all the extensions */
3801 pr_supported = (struct sctp_supported_chunk_types_param *)((caddr_t)prsctp +
3802 sizeof(*prsctp));
3804 pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
3805 pr_supported->ph.param_length = htons(sizeof(*pr_supported) + SCTP_EXT_COUNT);
3806 pr_supported->chunk_types[0] = SCTP_ASCONF;
3807 pr_supported->chunk_types[1] = SCTP_ASCONF_ACK;
3808 pr_supported->chunk_types[2] = SCTP_FORWARD_CUM_TSN;
3809 pr_supported->chunk_types[3] = SCTP_PACKET_DROPPED;
3810 pr_supported->chunk_types[4] = SCTP_STREAM_RESET;
3811 pr_supported->chunk_types[5] = 0; /* pad */
3812 pr_supported->chunk_types[6] = 0; /* pad */
3813 pr_supported->chunk_types[7] = 0; /* pad */
3815 m->m_len += (sizeof(*pr_supported) + SCTP_EXT_COUNT + SCTP_PAD_EXT_COUNT);
3816 if (sctp_ecn_nonce) {
3817 /* ECN nonce: And now tell the peer we support ECN nonce */
3818 ecn_nonce = (struct sctp_ecn_nonce_supported_param *)((caddr_t)pr_supported +
3819 sizeof(*pr_supported) + SCTP_EXT_COUNT + SCTP_PAD_EXT_COUNT);
3820 ecn_nonce->ph.param_type = htons(SCTP_ECN_NONCE_SUPPORTED);
3821 ecn_nonce->ph.param_length = htons(sizeof(*ecn_nonce));
3822 m->m_len += sizeof(*ecn_nonce);
3825 m_at = m;
3826 /* now the addresses */
3827 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
3828 struct ifnet *ifn;
3829 int cnt = cnt_inits_to;
3831 TAILQ_FOREACH(ifn, &ifnet, if_list) {
3832 struct ifaddr_container *ifac;
3834 if ((stc.loopback_scope == 0) &&
3835 (ifn->if_type == IFT_LOOP)) {
3837 * Skip loopback devices if loopback_scope
3838 * not set
3840 continue;
3842 TAILQ_FOREACH(ifac, &ifn->if_addrheads[mycpuid], ifa_link) {
3843 struct ifaddr *ifa = ifac->ifa;
3845 if (sctp_is_address_in_scope(ifa,
3846 stc.ipv4_addr_legal, stc.ipv6_addr_legal,
3847 stc.loopback_scope, stc.ipv4_scope,
3848 stc.local_scope, stc.site_scope) == 0) {
3849 continue;
3851 cnt++;
3854 if (cnt > 1) {
3855 TAILQ_FOREACH(ifn, &ifnet, if_list) {
3856 struct ifaddr_container *ifac;
3858 if ((stc.loopback_scope == 0) &&
3859 (ifn->if_type == IFT_LOOP)) {
3861 * Skip loopback devices if
3862 * loopback_scope not set
3864 continue;
3866 TAILQ_FOREACH(ifac, &ifn->if_addrheads[mycpuid], ifa_link) {
3867 struct ifaddr *ifa = ifac->ifa;
3869 if (sctp_is_address_in_scope(ifa,
3870 stc.ipv4_addr_legal,
3871 stc.ipv6_addr_legal,
3872 stc.loopback_scope, stc.ipv4_scope,
3873 stc.local_scope, stc.site_scope) == 0) {
3874 continue;
3876 m_at = sctp_add_addr_to_mbuf(m_at, ifa);
3880 } else {
3881 struct sctp_laddr *laddr;
3882 int cnt;
3883 cnt = cnt_inits_to;
3884 /* First, how many ? */
3885 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
3886 if (laddr->ifa == NULL) {
3887 continue;
3889 if (laddr->ifa->ifa_addr == NULL)
3890 continue;
3891 if (sctp_is_address_in_scope(laddr->ifa,
3892 stc.ipv4_addr_legal, stc.ipv6_addr_legal,
3893 stc.loopback_scope, stc.ipv4_scope,
3894 stc.local_scope, stc.site_scope) == 0) {
3895 continue;
3897 cnt++;
3899 /* If we bind a single address only we won't list
3900 * any. This way you can get through a NAT
3902 if (cnt > 1) {
3903 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
3904 if (laddr->ifa == NULL) {
3905 #ifdef SCTP_DEBUG
3906 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
3907 kprintf("Help I have fallen and I can't get up!\n");
3909 #endif
3910 continue;
3912 if (laddr->ifa->ifa_addr == NULL)
3913 continue;
3914 if (sctp_is_address_in_scope(laddr->ifa,
3915 stc.ipv4_addr_legal, stc.ipv6_addr_legal,
3916 stc.loopback_scope, stc.ipv4_scope,
3917 stc.local_scope, stc.site_scope) == 0) {
3918 continue;
3920 m_at = sctp_add_addr_to_mbuf(m_at, laddr->ifa);
3925 /* tack on the operational error if present */
3926 if (op_err) {
3927 if (op_err->m_pkthdr.len % 4) {
3928 /* must add a pad to the param */
3929 u_int32_t cpthis=0;
3930 int padlen;
3931 padlen = 4 - (op_err->m_pkthdr.len % 4);
3932 m_copyback(op_err, op_err->m_pkthdr.len, padlen, (caddr_t)&cpthis);
3934 while (m_at->m_next != NULL) {
3935 m_at = m_at->m_next;
3937 m_at->m_next = op_err;
3938 while (m_at->m_next != NULL) {
3939 m_at = m_at->m_next;
3942 /* Get total size of init packet */
3943 sz_of = SCTP_SIZE32(ntohs(init_chk->ch.chunk_length));
3944 /* pre-calulate the size and update pkt header and chunk header */
3945 m->m_pkthdr.len = 0;
3946 for (m_tmp = m; m_tmp; m_tmp = m_tmp->m_next) {
3947 m->m_pkthdr.len += m_tmp->m_len;
3948 if (m_tmp->m_next == NULL) {
3949 /* m_tmp should now point to last one */
3950 break;
3954 * Figure now the size of the cookie. We know the size of the
3955 * INIT-ACK. The Cookie is going to be the size of INIT, INIT-ACK,
3956 * COOKIE-STRUCTURE and SIGNATURE.
3960 * take our earlier INIT calc and add in the sz we just calculated
3961 * minus the size of the sctphdr (its not included in chunk size
3964 /* add once for the INIT-ACK */
3965 sz_of += (m->m_pkthdr.len - sizeof(struct sctphdr));
3967 /* add a second time for the INIT-ACK in the cookie */
3968 sz_of += (m->m_pkthdr.len - sizeof(struct sctphdr));
3970 /* Now add the cookie header and cookie message struct */
3971 sz_of += sizeof(struct sctp_state_cookie_param);
3972 /* ...and add the size of our signature */
3973 sz_of += SCTP_SIGNATURE_SIZE;
3974 initackm_out->msg.ch.chunk_length = htons(sz_of);
3976 /* Now we must build a cookie */
3977 m_cookie = sctp_add_cookie(inp, init_pkt, offset, m,
3978 sizeof(struct sctphdr), &stc);
3979 if (m_cookie == NULL) {
3980 /* memory problem */
3981 sctp_m_freem(m);
3982 return;
3984 /* Now append the cookie to the end and update the space/size */
3985 m_tmp->m_next = m_cookie;
3988 * We pass 0 here to NOT set IP_DF if its IPv4, we ignore the
3989 * return here since the timer will drive a retranmission.
3991 padval = m->m_pkthdr.len % 4;
3992 if ((padval) && (m_last)) {
3993 /* see my previous comments on m_last */
3994 int ret;
3995 ret = sctp_add_pad_tombuf(m_last, (4-padval));
3996 if (ret) {
3997 /* Houston we have a problem, no space */
3998 sctp_m_freem(m);
3999 return;
4001 m->m_pkthdr.len += padval;
4003 sctp_lowlevel_chunk_output(inp, NULL, NULL, to, m, 0, 0, NULL, 0);
4007 static void
4008 sctp_insert_on_wheel(struct sctp_association *asoc,
4009 struct sctp_stream_out *strq)
4011 struct sctp_stream_out *stre, *strn;
4012 stre = TAILQ_FIRST(&asoc->out_wheel);
4013 if (stre == NULL) {
4014 /* only one on wheel */
4015 TAILQ_INSERT_HEAD(&asoc->out_wheel, strq, next_spoke);
4016 return;
4018 for (; stre; stre = strn) {
4019 strn = TAILQ_NEXT(stre, next_spoke);
4020 if (stre->stream_no > strq->stream_no) {
4021 TAILQ_INSERT_BEFORE(stre, strq, next_spoke);
4022 return;
4023 } else if (stre->stream_no == strq->stream_no) {
4024 /* huh, should not happen */
4025 return;
4026 } else if (strn == NULL) {
4027 /* next one is null */
4028 TAILQ_INSERT_AFTER(&asoc->out_wheel, stre, strq,
4029 next_spoke);
4034 static void
4035 sctp_remove_from_wheel(struct sctp_association *asoc,
4036 struct sctp_stream_out *strq)
4038 /* take off and then setup so we know it is not on the wheel */
4039 TAILQ_REMOVE(&asoc->out_wheel, strq, next_spoke);
4040 strq->next_spoke.tqe_next = NULL;
4041 strq->next_spoke.tqe_prev = NULL;
4045 static void
4046 sctp_prune_prsctp(struct sctp_tcb *stcb,
4047 struct sctp_association *asoc,
4048 struct sctp_sndrcvinfo *srcv,
4049 int dataout
4052 int freed_spc=0;
4053 struct sctp_tmit_chunk *chk, *nchk;
4054 if ((asoc->peer_supports_prsctp) && (asoc->sent_queue_cnt_removeable > 0)) {
4055 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
4057 * Look for chunks marked with the PR_SCTP
4058 * flag AND the buffer space flag. If the one
4059 * being sent is equal or greater priority then
4060 * purge the old one and free some space.
4062 if ((chk->flags & (SCTP_PR_SCTP_ENABLED |
4063 SCTP_PR_SCTP_BUFFER)) ==
4064 (SCTP_PR_SCTP_ENABLED|SCTP_PR_SCTP_BUFFER)) {
4066 * This one is PR-SCTP AND buffer space
4067 * limited type
4069 if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) {
4070 /* Lower numbers equates to
4071 * higher priority so if the
4072 * one we are looking at has a
4073 * larger or equal priority we
4074 * want to drop the data and
4075 * NOT retransmit it.
4077 if (chk->data) {
4078 /* We release the
4079 * book_size if the
4080 * mbuf is here
4082 int ret_spc;
4083 int cause;
4084 if (chk->sent > SCTP_DATAGRAM_UNSENT)
4085 cause = SCTP_RESPONSE_TO_USER_REQ|SCTP_NOTIFY_DATAGRAM_SENT;
4086 else
4087 cause = SCTP_RESPONSE_TO_USER_REQ|SCTP_NOTIFY_DATAGRAM_UNSENT;
4088 ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
4089 cause,
4090 &asoc->sent_queue);
4091 freed_spc += ret_spc;
4092 if (freed_spc >= dataout) {
4093 return;
4095 } /* if chunk was present */
4096 } /* if of sufficent priority */
4097 } /* if chunk has enabled */
4098 } /* tailqforeach */
4100 chk = TAILQ_FIRST(&asoc->send_queue);
4101 while (chk) {
4102 nchk = TAILQ_NEXT(chk, sctp_next);
4103 /* Here we must move to the sent queue and mark */
4104 if ((chk->flags & (SCTP_PR_SCTP_ENABLED |
4105 SCTP_PR_SCTP_BUFFER)) ==
4106 (SCTP_PR_SCTP_ENABLED|SCTP_PR_SCTP_BUFFER)) {
4107 if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) {
4108 if (chk->data) {
4109 /* We release the
4110 * book_size if the
4111 * mbuf is here
4113 int ret_spc;
4114 ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
4115 SCTP_RESPONSE_TO_USER_REQ|SCTP_NOTIFY_DATAGRAM_UNSENT,
4116 &asoc->send_queue);
4118 freed_spc += ret_spc;
4119 if (freed_spc >= dataout) {
4120 return;
4122 } /* end if chk->data */
4123 } /* end if right class */
4124 } /* end if chk pr-sctp */
4125 chk = nchk;
4126 } /* end while (chk) */
4127 } /* if enabled in asoc */
4130 static void
4131 sctp_prepare_chunk(struct sctp_tmit_chunk *template,
4132 struct sctp_tcb *stcb,
4133 struct sctp_sndrcvinfo *srcv,
4134 struct sctp_stream_out *strq,
4135 struct sctp_nets *net)
4137 bzero(template, sizeof(struct sctp_tmit_chunk));
4138 template->sent = SCTP_DATAGRAM_UNSENT;
4139 if ((stcb->asoc.peer_supports_prsctp) &&
4140 (srcv->sinfo_flags & (MSG_PR_SCTP_TTL|MSG_PR_SCTP_BUF)) &&
4141 (srcv->sinfo_timetolive > 0)
4143 /* If:
4144 * Peer supports PR-SCTP
4145 * The flags is set against this send for PR-SCTP
4146 * And timetolive is a postive value, zero is reserved
4147 * to mean a reliable send for both buffer/time
4148 * related one.
4150 if (srcv->sinfo_flags & MSG_PR_SCTP_BUF) {
4152 * Time to live is a priority stored in tv_sec
4153 * when doing the buffer drop thing.
4155 template->rec.data.timetodrop.tv_sec = srcv->sinfo_timetolive;
4156 } else {
4157 struct timeval tv;
4159 SCTP_GETTIME_TIMEVAL(&template->rec.data.timetodrop);
4160 tv.tv_sec = srcv->sinfo_timetolive / 1000;
4161 tv.tv_usec = (srcv->sinfo_timetolive * 1000) % 1000000;
4162 #ifndef __FreeBSD__
4163 timeradd(&template->rec.data.timetodrop, &tv,
4164 &template->rec.data.timetodrop);
4165 #else
4166 timevaladd(&template->rec.data.timetodrop, &tv);
4167 #endif
4170 if ((srcv->sinfo_flags & MSG_UNORDERED) == 0) {
4171 template->rec.data.stream_seq = strq->next_sequence_sent;
4172 } else {
4173 template->rec.data.stream_seq = 0;
4175 template->rec.data.TSN_seq = 0; /* not yet assigned */
4177 template->rec.data.stream_number = srcv->sinfo_stream;
4178 template->rec.data.payloadtype = srcv->sinfo_ppid;
4179 template->rec.data.context = srcv->sinfo_context;
4180 template->rec.data.doing_fast_retransmit = 0;
4181 template->rec.data.ect_nonce = 0; /* ECN Nonce */
4183 if (srcv->sinfo_flags & MSG_ADDR_OVER) {
4184 template->whoTo = net;
4185 } else {
4186 if (stcb->asoc.primary_destination)
4187 template->whoTo = stcb->asoc.primary_destination;
4188 else {
4189 /* TSNH */
4190 template->whoTo = net;
4193 /* the actual chunk flags */
4194 if (srcv->sinfo_flags & MSG_UNORDERED) {
4195 template->rec.data.rcv_flags = SCTP_DATA_UNORDERED;
4196 } else {
4197 template->rec.data.rcv_flags = 0;
4199 /* no flags yet, FRAGMENT_OK goes here */
4200 template->flags = 0;
4201 /* PR sctp flags */
4202 if (stcb->asoc.peer_supports_prsctp) {
4203 if (srcv->sinfo_timetolive > 0) {
4205 * We only set the flag if timetolive (or
4206 * priority) was set to a positive number.
4207 * Zero is reserved specifically to be
4208 * EXCLUDED and sent reliable.
4210 if (srcv->sinfo_flags & MSG_PR_SCTP_TTL) {
4211 template->flags |= SCTP_PR_SCTP_ENABLED;
4213 if (srcv->sinfo_flags & MSG_PR_SCTP_BUF) {
4214 template->flags |= SCTP_PR_SCTP_BUFFER;
4218 template->asoc = &stcb->asoc;
4223 sctp_get_frag_point(struct sctp_tcb *stcb,
4224 struct sctp_association *asoc)
4226 int siz, ovh;
4228 /* For endpoints that have both 6 and 4 addresses
4229 * we must reserver room for the 6 ip header, for
4230 * those that are only dealing with V4 we use
4231 * a larger frag point.
4233 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
4234 ovh = SCTP_MED_OVERHEAD;
4235 } else {
4236 ovh = SCTP_MED_V4_OVERHEAD;
4239 if (stcb->sctp_ep->sctp_frag_point > asoc->smallest_mtu)
4240 siz = asoc->smallest_mtu - ovh;
4241 else
4242 siz = (stcb->sctp_ep->sctp_frag_point - ovh);
4244 if (siz > (MCLBYTES-sizeof(struct sctp_data_chunk))) { */
4245 /* A data chunk MUST fit in a cluster */
4246 /* siz = (MCLBYTES - sizeof(struct sctp_data_chunk));*/
4247 /* }*/
4249 if (siz % 4) {
4250 /* make it an even word boundary please */
4251 siz -= (siz % 4);
4253 return (siz);
4255 extern unsigned int sctp_max_chunks_on_queue;
4257 #define SBLOCKWAIT(f) (((f)&MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK)
4259 static int
4260 sctp_msg_append(struct sctp_tcb *stcb,
4261 struct sctp_nets *net,
4262 struct mbuf *m,
4263 struct sctp_sndrcvinfo *srcv,
4264 int flags)
4266 struct socket *so;
4267 struct sctp_association *asoc;
4268 struct sctp_stream_out *strq;
4269 struct sctp_tmit_chunk *chk;
4270 struct sctpchunk_listhead tmp;
4271 struct sctp_tmit_chunk template;
4272 struct mbuf *n, *mnext;
4273 struct mbuf *mm;
4274 unsigned int dataout, siz;
4275 int mbcnt = 0;
4276 int mbcnt_e = 0;
4277 int error = 0;
4279 if ((stcb == NULL) || (net == NULL) || (m == NULL) || (srcv == NULL)) {
4280 /* Software fault, you blew it on the call */
4281 #ifdef SCTP_DEBUG
4282 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
4283 kprintf("software error in sctp_msg_append:1\n");
4284 kprintf("stcb:%p net:%p m:%p srcv:%p\n",
4285 stcb, net, m, srcv);
4287 #endif
4288 if (m)
4289 sctp_m_freem(m);
4290 return (EFAULT);
4292 so = stcb->sctp_socket;
4293 asoc = &stcb->asoc;
4294 if (srcv->sinfo_flags & MSG_ABORT) {
4295 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_COOKIE_WAIT) &&
4296 (SCTP_GET_STATE(asoc) != SCTP_STATE_COOKIE_ECHOED)) {
4297 /* It has to be up before we abort */
4298 /* how big is the user initiated abort? */
4299 if ((m->m_flags & M_PKTHDR) && (m->m_pkthdr.len)) {
4300 dataout = m->m_pkthdr.len;
4301 } else {
4302 /* we must count */
4303 dataout = 0;
4304 for (n = m; n; n = n->m_next) {
4305 dataout += n->m_len;
4308 M_PREPEND(m, sizeof(struct sctp_paramhdr), MB_DONTWAIT);
4309 if (m) {
4310 struct sctp_paramhdr *ph;
4311 m->m_len = sizeof(struct sctp_paramhdr) + dataout;
4312 ph = mtod(m, struct sctp_paramhdr *);
4313 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
4314 ph->param_length = htons(m->m_len);
4316 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, m);
4317 m = NULL;
4318 } else {
4319 /* Only free if we don't send an abort */
4322 goto out;
4324 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
4325 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
4326 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
4327 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
4328 /* got data while shutting down */
4329 error = ECONNRESET;
4330 goto out;
4333 if (srcv->sinfo_stream >= asoc->streamoutcnt) {
4334 /* Invalid stream number */
4335 error = EINVAL;
4336 goto out;
4338 if (asoc->strmout == NULL) {
4339 /* huh? software error */
4340 #ifdef SCTP_DEBUG
4341 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
4342 kprintf("software error in sctp_msg_append:2\n");
4344 #endif
4345 error = EFAULT;
4346 goto out;
4348 strq = &asoc->strmout[srcv->sinfo_stream];
4349 /* how big is it ? */
4350 if ((m->m_flags & M_PKTHDR) && (m->m_pkthdr.len)) {
4351 dataout = m->m_pkthdr.len;
4352 } else {
4353 /* we must count */
4354 dataout = 0;
4355 for (n = m; n; n = n->m_next) {
4356 dataout += n->m_len;
4359 #ifdef SCTP_DEBUG
4360 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
4361 kprintf("Attempt to send out %d bytes\n",
4362 dataout);
4364 #endif
4366 /* lock the socket buf */
4367 SOCKBUF_LOCK(&so->so_snd);
4368 error = ssb_lock(&so->so_snd, SBLOCKWAIT(flags));
4369 if (error)
4370 goto out_locked;
4372 if (dataout > so->so_snd.ssb_hiwat) {
4373 /* It will NEVER fit */
4374 error = EMSGSIZE;
4375 goto release;
4377 if ((srcv->sinfo_flags & MSG_EOF) &&
4378 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
4379 (dataout == 0)
4381 goto zap_by_it_all;
4383 if ((so->so_snd.ssb_hiwat <
4384 (dataout + asoc->total_output_queue_size)) ||
4385 (asoc->chunks_on_out_queue > sctp_max_chunks_on_queue) ||
4386 (asoc->total_output_mbuf_queue_size >
4387 so->so_snd.ssb_mbmax)
4389 /* XXX Buffer space hunt for data to skip */
4390 if (asoc->peer_supports_prsctp) {
4391 sctp_prune_prsctp(stcb, asoc, srcv, dataout);
4393 while ((so->so_snd.ssb_hiwat <
4394 (dataout + asoc->total_output_queue_size)) ||
4395 (asoc->chunks_on_out_queue > sctp_max_chunks_on_queue) ||
4396 (asoc->total_output_mbuf_queue_size >
4397 so->so_snd.ssb_mbmax)) {
4398 struct sctp_inpcb *inp;
4399 /* Now did we free up enough room? */
4400 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) {
4401 /* Non-blocking io in place */
4402 error = EWOULDBLOCK;
4403 goto release;
4406 * We store off a pointer to the endpoint.
4407 * Since on return from this we must check to
4408 * see if an so_error is set. If so we may have
4409 * been reset and our stcb destroyed. Returning
4410 * an error will cause the correct error return
4411 * through and fix this all.
4413 inp = stcb->sctp_ep;
4415 * Not sure how else to do this since
4416 * the level we suspended at is not
4417 * known deep down where we are. I will
4418 * drop to spl0() so that others can
4419 * get in.
4422 inp->sctp_tcb_at_block = (void *)stcb;
4423 inp->error_on_block = 0;
4424 ssb_unlock(&so->so_snd);
4425 error = ssb_wait(&so->so_snd);
4427 * XXX: This is ugly but I have
4428 * recreated most of what goes on to
4429 * block in the sb. UGHH
4430 * May want to add the bit about being
4431 * no longer connected.. but this then
4432 * further dooms the UDP model NOT to
4433 * allow this.
4435 inp->sctp_tcb_at_block = 0;
4436 if (inp->error_on_block)
4437 error = inp->error_on_block;
4438 if (so->so_error)
4439 error = so->so_error;
4440 if (error) {
4441 goto out_locked;
4443 error = ssb_lock(&so->so_snd, M_WAITOK);
4444 if (error)
4445 goto out_locked;
4446 /* Otherwise we cycle back and recheck
4447 * the space
4449 #if defined(__FreeBSD__) && __FreeBSD_version >= 502115
4450 if (so->so_rcv.sb_state & SBS_CANTSENDMORE) {
4451 #else
4452 if (so->so_state & SS_CANTSENDMORE) {
4453 #endif
4454 error = EPIPE;
4455 goto release;
4457 if (so->so_error) {
4458 error = so->so_error;
4459 goto release;
4463 /* If we have a packet header fix it if it was broke */
4464 if (m->m_flags & M_PKTHDR) {
4465 m->m_pkthdr.len = dataout;
4467 /* use the smallest one, user set value or
4468 * smallest mtu of the asoc
4470 siz = sctp_get_frag_point(stcb, asoc);
4471 SOCKBUF_UNLOCK(&so->so_snd);
4472 if ((dataout) && (dataout <= siz)) {
4473 /* Fast path */
4474 chk = (struct sctp_tmit_chunk *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_chunk);
4475 if (chk == NULL) {
4476 error = ENOMEM;
4477 SOCKBUF_LOCK(&so->so_snd);
4478 goto release;
4480 sctp_prepare_chunk(chk, stcb, srcv, strq, net);
4481 chk->whoTo->ref_count++;
4482 chk->rec.data.rcv_flags |= SCTP_DATA_NOT_FRAG;
4484 /* no flags yet, FRAGMENT_OK goes here */
4485 sctppcbinfo.ipi_count_chunk++;
4486 sctppcbinfo.ipi_gencnt_chunk++;
4487 asoc->chunks_on_out_queue++;
4488 chk->data = m;
4489 m = NULL;
4490 /* Total in the MSIZE */
4491 for (mm = chk->data; mm; mm = mm->m_next) {
4492 mbcnt += MSIZE;
4493 if (mm->m_flags & M_EXT) {
4494 mbcnt += chk->data->m_ext.ext_size;
4497 /* fix up the send_size if it is not present */
4498 chk->send_size = dataout;
4499 chk->book_size = chk->send_size;
4500 chk->mbcnt = mbcnt;
4501 /* ok, we are commited */
4502 if ((srcv->sinfo_flags & MSG_UNORDERED) == 0) {
4503 /* bump the ssn if we are unordered. */
4504 strq->next_sequence_sent++;
4506 chk->data->m_nextpkt = 0;
4507 asoc->stream_queue_cnt++;
4508 TAILQ_INSERT_TAIL(&strq->outqueue, chk, sctp_next);
4509 /* now check if this stream is on the wheel */
4510 if ((strq->next_spoke.tqe_next == NULL) &&
4511 (strq->next_spoke.tqe_prev == NULL)) {
4512 /* Insert it on the wheel since it is not
4513 * on it currently
4515 sctp_insert_on_wheel(asoc, strq);
4517 } else if ((dataout) && (dataout > siz)) {
4518 /* Slow path */
4519 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NO_FRAGMENT) &&
4520 (dataout > siz)) {
4521 error = EMSGSIZE;
4522 SOCKBUF_LOCK(&so->so_snd);
4523 goto release;
4525 /* setup the template */
4526 sctp_prepare_chunk(&template, stcb, srcv, strq, net);
4528 n = m;
4529 while (dataout > siz) {
4531 * We can wait since this is called from the user
4532 * send side
4534 n->m_nextpkt = m_split(n, siz, MB_WAIT);
4535 if (n->m_nextpkt == NULL) {
4536 error = EFAULT;
4537 SOCKBUF_LOCK(&so->so_snd);
4538 goto release;
4540 dataout -= siz;
4541 n = n->m_nextpkt;
4544 * ok, now we have a chain on m where m->m_nextpkt points to
4545 * the next chunk and m/m->m_next chain is the piece to send.
4546 * We must go through the chains and thread them on to
4547 * sctp_tmit_chunk chains and place them all on the stream
4548 * queue, breaking the m->m_nextpkt pointers as we go.
4550 n = m;
4551 TAILQ_INIT(&tmp);
4552 while (n) {
4554 * first go through and allocate a sctp_tmit chunk
4555 * for each chunk piece
4557 chk = (struct sctp_tmit_chunk *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_chunk);
4558 if (chk == NULL) {
4560 * ok we must spin through and dump anything
4561 * we have allocated and then jump to the
4562 * no_membad
4564 chk = TAILQ_FIRST(&tmp);
4565 while (chk) {
4566 TAILQ_REMOVE(&tmp, chk, sctp_next);
4567 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
4568 sctppcbinfo.ipi_count_chunk--;
4569 asoc->chunks_on_out_queue--;
4570 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
4571 panic("Chunk count is negative");
4573 sctppcbinfo.ipi_gencnt_chunk++;
4574 chk = TAILQ_FIRST(&tmp);
4576 error = ENOMEM;
4577 SOCKBUF_LOCK(&so->so_snd);
4578 goto release;
4580 sctppcbinfo.ipi_count_chunk++;
4581 asoc->chunks_on_out_queue++;
4583 sctppcbinfo.ipi_gencnt_chunk++;
4584 *chk = template;
4585 chk->whoTo->ref_count++;
4586 chk->data = n;
4587 /* Total in the MSIZE */
4588 mbcnt_e = 0;
4589 for (mm = chk->data; mm; mm = mm->m_next) {
4590 mbcnt_e += MSIZE;
4591 if (mm->m_flags & M_EXT) {
4592 mbcnt_e += chk->data->m_ext.ext_size;
4595 /* now fix the chk->send_size */
4596 if (chk->data->m_flags & M_PKTHDR) {
4597 chk->send_size = chk->data->m_pkthdr.len;
4598 } else {
4599 struct mbuf *nn;
4600 chk->send_size = 0;
4601 for (nn = chk->data; nn; nn = nn->m_next) {
4602 chk->send_size += nn->m_len;
4605 chk->book_size = chk->send_size;
4606 chk->mbcnt = mbcnt_e;
4607 mbcnt += mbcnt_e;
4608 if (chk->flags & SCTP_PR_SCTP_BUFFER) {
4609 asoc->sent_queue_cnt_removeable++;
4611 n = n->m_nextpkt;
4612 TAILQ_INSERT_TAIL(&tmp, chk, sctp_next);
4614 m = NULL;
4615 /* now that we have enough space for all de-couple the
4616 * chain of mbufs by going through our temp array
4617 * and breaking the pointers.
4619 /* ok, we are commited */
4620 if ((srcv->sinfo_flags & MSG_UNORDERED) == 0) {
4621 /* bump the ssn if we are unordered. */
4622 strq->next_sequence_sent++;
4624 /* Mark the first/last flags. This will
4625 * result int a 3 for a single item on the list
4627 chk = TAILQ_FIRST(&tmp);
4628 chk->rec.data.rcv_flags |= SCTP_DATA_FIRST_FRAG;
4629 chk = TAILQ_LAST(&tmp, sctpchunk_listhead);
4630 chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4631 /* now break any chains on the queue and
4632 * move it to the streams actual queue.
4634 chk = TAILQ_FIRST(&tmp);
4635 while (chk) {
4636 chk->data->m_nextpkt = 0;
4637 TAILQ_REMOVE(&tmp, chk, sctp_next);
4638 asoc->stream_queue_cnt++;
4639 TAILQ_INSERT_TAIL(&strq->outqueue, chk, sctp_next);
4640 chk = TAILQ_FIRST(&tmp);
4642 /* now check if this stream is on the wheel */
4643 if ((strq->next_spoke.tqe_next == NULL) &&
4644 (strq->next_spoke.tqe_prev == NULL)) {
4645 /* Insert it on the wheel since it is not
4646 * on it currently
4648 sctp_insert_on_wheel(asoc, strq);
4651 SOCKBUF_LOCK(&so->so_snd);
4652 /* has a SHUTDOWN been (also) requested by the user on this asoc? */
4653 zap_by_it_all:
4655 if ((srcv->sinfo_flags & MSG_EOF) &&
4656 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)) {
4658 int some_on_streamwheel = 0;
4660 if (!TAILQ_EMPTY(&asoc->out_wheel)) {
4661 /* Check to see if some data queued */
4662 struct sctp_stream_out *outs;
4663 TAILQ_FOREACH(outs, &asoc->out_wheel, next_spoke) {
4664 if (!TAILQ_EMPTY(&outs->outqueue)) {
4665 some_on_streamwheel = 1;
4666 break;
4671 if (TAILQ_EMPTY(&asoc->send_queue) &&
4672 TAILQ_EMPTY(&asoc->sent_queue) &&
4673 (some_on_streamwheel == 0)) {
4674 /* there is nothing queued to send, so I'm done... */
4675 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
4676 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
4677 /* only send SHUTDOWN the first time through */
4678 #ifdef SCTP_DEBUG
4679 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
4680 kprintf("%s:%d sends a shutdown\n",
4681 __FILE__,
4682 __LINE__
4685 #endif
4686 sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
4687 asoc->state = SCTP_STATE_SHUTDOWN_SENT;
4688 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
4689 asoc->primary_destination);
4690 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
4691 asoc->primary_destination);
4693 } else {
4695 * we still got (or just got) data to send, so set
4696 * SHUTDOWN_PENDING
4699 * XXX sockets draft says that MSG_EOF should be sent
4700 * with no data. currently, we will allow user data
4701 * to be sent first and move to SHUTDOWN-PENDING
4703 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
4706 #ifdef SCTP_MBCNT_LOGGING
4707 sctp_log_mbcnt(SCTP_LOG_MBCNT_INCREASE,
4708 asoc->total_output_queue_size,
4709 dataout,
4710 asoc->total_output_mbuf_queue_size,
4711 mbcnt);
4712 #endif
4713 asoc->total_output_queue_size += dataout;
4714 asoc->total_output_mbuf_queue_size += mbcnt;
4715 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
4716 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
4717 so->so_snd.ssb_cc += dataout;
4718 so->so_snd.ssb_mbcnt += mbcnt;
4721 #ifdef SCTP_DEBUG
4722 if (sctp_debug_on & SCTP_DEBUG_OUTPUT2) {
4723 kprintf("++total out:%d total_mbuf_out:%d\n",
4724 (int)asoc->total_output_queue_size,
4725 (int)asoc->total_output_mbuf_queue_size);
4727 #endif
4729 release:
4730 ssb_unlock(&so->so_snd);
4731 out_locked:
4732 SOCKBUF_UNLOCK(&so->so_snd);
4733 out:
4734 if (m && m->m_nextpkt) {
4735 n = m;
4736 while (n) {
4737 mnext = n->m_nextpkt;
4738 n->m_nextpkt = NULL;
4739 sctp_m_freem(n);
4740 n = mnext;
4742 } else if (m)
4743 sctp_m_freem(m);
4745 return (error);
4748 static struct mbuf *
4749 sctp_copy_mbufchain(struct mbuf *clonechain,
4750 struct mbuf *outchain)
4752 struct mbuf *appendchain;
4753 #if defined(__FreeBSD__) || defined(__NetBSD__)
4754 /* Supposedly m_copypacket is an optimization, use it if we can */
4755 if (clonechain->m_flags & M_PKTHDR) {
4756 appendchain = m_copypacket(clonechain, MB_DONTWAIT);
4757 sctp_pegs[SCTP_CACHED_SRC]++;
4758 } else
4759 appendchain = m_copy(clonechain, 0, M_COPYALL);
4760 #elif defined(__APPLE__)
4761 appendchain = sctp_m_copym(clonechain, 0, M_COPYALL, MB_DONTWAIT);
4762 #else
4763 appendchain = m_copy(clonechain, 0, M_COPYALL);
4764 #endif
4766 if (appendchain == NULL) {
4767 /* error */
4768 if (outchain)
4769 sctp_m_freem(outchain);
4770 return (NULL);
4772 if (outchain) {
4773 /* tack on to the end */
4774 struct mbuf *m;
4775 m = outchain;
4776 while (m) {
4777 if (m->m_next == NULL) {
4778 m->m_next = appendchain;
4779 break;
4781 m = m->m_next;
4783 if (outchain->m_flags & M_PKTHDR) {
4784 int append_tot;
4785 struct mbuf *t;
4786 t = appendchain;
4787 append_tot = 0;
4788 while (t) {
4789 append_tot += t->m_len;
4790 t = t->m_next;
4792 outchain->m_pkthdr.len += append_tot;
4794 return (outchain);
4795 } else {
4796 return (appendchain);
4800 static void
4801 sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr, u_int32_t val)
4803 struct sctp_copy_all *ca;
4804 struct mbuf *m;
4805 int ret;
4807 ca = (struct sctp_copy_all *)ptr;
4808 if (ca->m == NULL) {
4809 return;
4811 if (ca->inp != inp) {
4812 /* TSNH */
4813 return;
4815 m = sctp_copy_mbufchain(ca->m, NULL);
4816 if (m == NULL) {
4817 /* can't copy so we are done */
4818 ca->cnt_failed++;
4819 return;
4821 ret = sctp_msg_append(stcb, stcb->asoc.primary_destination, m,
4822 &ca->sndrcv, MSG_FNONBLOCKING);
4823 if (ret) {
4824 ca->cnt_failed++;
4825 } else {
4826 ca->cnt_sent++;
4830 static void
4831 sctp_sendall_completes(void *ptr, u_int32_t val)
4833 struct sctp_copy_all *ca;
4834 ca = (struct sctp_copy_all *)ptr;
4835 /* Do a notify here?
4836 * Kacheong suggests that the notify
4837 * be done at the send time.. so you would
4838 * push up a notification if any send failed.
4839 * Don't know if this is feasable since the
4840 * only failures we have is "memory" related and
4841 * if you cannot get an mbuf to send the data
4842 * you surely can't get an mbuf to send up
4843 * to notify the user you can't send the data :->
4846 /* now free everything */
4847 m_freem(ca->m);
4848 FREE(ca, M_PCB);
4852 #define MC_ALIGN(m, len) do { \
4853 (m)->m_data += (MCLBYTES - (len)) & ~(sizeof(long) - 1); \
4854 } while (0)
4858 static struct mbuf *
4859 sctp_copy_out_all(struct uio *uio, int len)
4861 struct mbuf *ret, *at;
4862 int left, willcpy, cancpy, error;
4864 MGETHDR(ret, MB_WAIT, MT_HEADER);
4865 if (ret == NULL) {
4866 /* TSNH */
4867 return (NULL);
4869 left = len;
4870 ret->m_len = 0;
4871 ret->m_pkthdr.len = len;
4872 MCLGET(ret, MB_WAIT);
4873 if (ret == NULL) {
4874 return (NULL);
4876 if ((ret->m_flags & M_EXT) == 0) {
4877 m_freem (ret);
4878 return (NULL);
4880 cancpy = M_TRAILINGSPACE(ret);
4881 willcpy = min(cancpy, left);
4882 at = ret;
4883 while (left > 0) {
4884 /* Align data to the end */
4885 MC_ALIGN(at, willcpy);
4886 error = uiomove(mtod(at, caddr_t), willcpy, uio);
4887 if (error) {
4888 err_out_now:
4889 m_freem(ret);
4890 return (NULL);
4892 at->m_len = willcpy;
4893 at->m_nextpkt = at->m_next = 0;
4894 left -= willcpy;
4895 if (left > 0) {
4896 MGET(at->m_next, MB_WAIT, MT_DATA);
4897 if (at->m_next == NULL) {
4898 goto err_out_now;
4900 at = at->m_next;
4901 at->m_len = 0;
4902 MCLGET(at, MB_WAIT);
4903 if (at == NULL) {
4904 goto err_out_now;
4906 if ((at->m_flags & M_EXT) == 0) {
4907 goto err_out_now;
4909 cancpy = M_TRAILINGSPACE(at);
4910 willcpy = min(cancpy, left);
4913 return (ret);
4916 static int
4917 sctp_sendall (struct sctp_inpcb *inp, struct uio *uio, struct mbuf *m, struct sctp_sndrcvinfo *srcv)
4919 int ret;
4920 struct sctp_copy_all *ca;
4921 MALLOC(ca, struct sctp_copy_all *,
4922 sizeof(struct sctp_copy_all), M_PCB, MB_WAIT);
4923 if (ca == NULL) {
4924 m_freem(m);
4925 return (ENOMEM);
4927 memset (ca, 0, sizeof(struct sctp_copy_all));
4929 ca->inp = inp;
4930 ca->sndrcv = *srcv;
4931 /* take off the sendall flag, it would
4932 * be bad if we failed to do this :-0
4934 ca->sndrcv.sinfo_flags &= ~MSG_SENDALL;
4936 /* get length and mbuf chain */
4937 if (uio) {
4938 ca->sndlen = uio->uio_resid;
4939 ca->m = sctp_copy_out_all(uio, ca->sndlen);
4940 if (ca->m == NULL) {
4941 FREE(ca, M_PCB);
4942 return (ENOMEM);
4944 } else {
4945 if ((m->m_flags & M_PKTHDR) == 0) {
4946 struct mbuf *mat;
4947 mat = m;
4948 ca->sndlen = 0;
4949 while(m) {
4950 ca->sndlen += m->m_len;
4951 m = m->m_next;
4953 } else {
4954 ca->sndlen = m->m_pkthdr.len;
4956 ca->m = m;
4959 ret = sctp_initiate_iterator(sctp_sendall_iterator, SCTP_PCB_ANY_FLAGS, SCTP_ASOC_ANY_STATE,
4960 (void *)ca, 0, sctp_sendall_completes, inp);
4961 if (ret) {
4962 #ifdef SCTP_DEBUG
4963 kprintf("Failed to initate iterator to takeover associations\n");
4964 #endif
4965 FREE(ca, M_PCB);
4966 return (EFAULT);
4969 return (0);
4973 void
4974 sctp_toss_old_cookies(struct sctp_association *asoc)
4976 struct sctp_tmit_chunk *chk, *nchk;
4977 chk = TAILQ_FIRST(&asoc->control_send_queue);
4978 while (chk) {
4979 nchk = TAILQ_NEXT(chk, sctp_next);
4980 if (chk->rec.chunk_id == SCTP_COOKIE_ECHO) {
4981 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
4982 if (chk->data) {
4983 sctp_m_freem(chk->data);
4984 chk->data = NULL;
4986 asoc->ctrl_queue_cnt--;
4987 if (chk->whoTo)
4988 sctp_free_remote_addr(chk->whoTo);
4989 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
4990 sctppcbinfo.ipi_count_chunk--;
4991 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
4992 panic("Chunk count is negative");
4994 sctppcbinfo.ipi_gencnt_chunk++;
4996 chk = nchk;
5000 void
5001 sctp_toss_old_asconf(struct sctp_tcb *stcb)
5003 struct sctp_association *asoc;
5004 struct sctp_tmit_chunk *chk, *chk_tmp;
5006 asoc = &stcb->asoc;
5007 for (chk = TAILQ_FIRST(&asoc->control_send_queue); chk != NULL;
5008 chk = chk_tmp) {
5009 /* get next chk */
5010 chk_tmp = TAILQ_NEXT(chk, sctp_next);
5011 /* find SCTP_ASCONF chunk in queue (only one ever in queue) */
5012 if (chk->rec.chunk_id == SCTP_ASCONF) {
5013 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
5014 if (chk->data) {
5015 sctp_m_freem(chk->data);
5016 chk->data = NULL;
5018 asoc->ctrl_queue_cnt--;
5019 if (chk->whoTo)
5020 sctp_free_remote_addr(chk->whoTo);
5021 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
5022 sctppcbinfo.ipi_count_chunk--;
5023 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
5024 panic("Chunk count is negative");
5026 sctppcbinfo.ipi_gencnt_chunk++;
5032 static void
5033 sctp_clean_up_datalist(struct sctp_tcb *stcb,
5034 struct sctp_association *asoc,
5035 struct sctp_tmit_chunk **data_list,
5036 int bundle_at,
5037 struct sctp_nets *net)
5039 int i;
5040 for (i = 0; i < bundle_at; i++) {
5041 /* off of the send queue */
5042 if (i) {
5043 /* Any chunk NOT 0 you zap the time
5044 * chunk 0 gets zapped or set based on
5045 * if a RTO measurment is needed.
5047 data_list[i]->do_rtt = 0;
5049 /* record time */
5050 data_list[i]->sent_rcv_time = net->last_sent_time;
5051 TAILQ_REMOVE(&asoc->send_queue,
5052 data_list[i],
5053 sctp_next);
5054 /* on to the sent queue */
5055 TAILQ_INSERT_TAIL(&asoc->sent_queue,
5056 data_list[i],
5057 sctp_next);
5058 /* This does not lower until the cum-ack passes it */
5059 asoc->sent_queue_cnt++;
5060 asoc->send_queue_cnt--;
5061 if ((asoc->peers_rwnd <= 0) &&
5062 (asoc->total_flight == 0) &&
5063 (bundle_at == 1)) {
5064 /* Mark the chunk as being a window probe */
5065 #ifdef SCTP_DEBUG
5066 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
5067 kprintf("WINDOW PROBE SET\n");
5069 #endif
5070 sctp_pegs[SCTP_WINDOW_PROBES]++;
5071 data_list[i]->rec.data.state_flags |= SCTP_WINDOW_PROBE;
5072 } else {
5073 data_list[i]->rec.data.state_flags &= ~SCTP_WINDOW_PROBE;
5075 #ifdef SCTP_AUDITING_ENABLED
5076 sctp_audit_log(0xC2, 3);
5077 #endif
5078 data_list[i]->sent = SCTP_DATAGRAM_SENT;
5079 data_list[i]->snd_count = 1;
5080 net->flight_size += data_list[i]->book_size;
5081 asoc->total_flight += data_list[i]->book_size;
5082 asoc->total_flight_count++;
5083 #ifdef SCTP_LOG_RWND
5084 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
5085 asoc->peers_rwnd , data_list[i]->send_size, sctp_peer_chunk_oh);
5086 #endif
5087 asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
5088 (u_int32_t)(data_list[i]->send_size + sctp_peer_chunk_oh));
5089 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5090 /* SWS sender side engages */
5091 asoc->peers_rwnd = 0;
5096 static void
5097 sctp_clean_up_ctl(struct sctp_association *asoc)
5099 struct sctp_tmit_chunk *chk, *nchk;
5100 for (chk = TAILQ_FIRST(&asoc->control_send_queue);
5101 chk; chk = nchk) {
5102 nchk = TAILQ_NEXT(chk, sctp_next);
5103 if ((chk->rec.chunk_id == SCTP_SELECTIVE_ACK) ||
5104 (chk->rec.chunk_id == SCTP_HEARTBEAT_REQUEST) ||
5105 (chk->rec.chunk_id == SCTP_HEARTBEAT_ACK) ||
5106 (chk->rec.chunk_id == SCTP_SHUTDOWN) ||
5107 (chk->rec.chunk_id == SCTP_SHUTDOWN_ACK) ||
5108 (chk->rec.chunk_id == SCTP_OPERATION_ERROR) ||
5109 (chk->rec.chunk_id == SCTP_PACKET_DROPPED) ||
5110 (chk->rec.chunk_id == SCTP_COOKIE_ACK) ||
5111 (chk->rec.chunk_id == SCTP_ECN_CWR) ||
5112 (chk->rec.chunk_id == SCTP_ASCONF_ACK)) {
5113 /* Stray chunks must be cleaned up */
5114 clean_up_anyway:
5115 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
5116 if (chk->data) {
5117 sctp_m_freem(chk->data);
5118 chk->data = NULL;
5120 asoc->ctrl_queue_cnt--;
5121 sctp_free_remote_addr(chk->whoTo);
5122 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
5123 sctppcbinfo.ipi_count_chunk--;
5124 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
5125 panic("Chunk count is negative");
5127 sctppcbinfo.ipi_gencnt_chunk++;
5128 } else if (chk->rec.chunk_id == SCTP_STREAM_RESET) {
5129 struct sctp_stream_reset_req *strreq;
5130 /* special handling, we must look into the param */
5131 strreq = mtod(chk->data, struct sctp_stream_reset_req *);
5132 if (strreq->sr_req.ph.param_type == ntohs(SCTP_STR_RESET_RESPONSE)) {
5133 goto clean_up_anyway;
5139 static int
5140 sctp_move_to_outqueue(struct sctp_tcb *stcb,
5141 struct sctp_stream_out *strq)
5143 /* Move from the stream to the send_queue keeping track of the total */
5144 struct sctp_association *asoc;
5145 int tot_moved = 0;
5146 int failed = 0;
5147 int padval;
5148 struct sctp_tmit_chunk *chk, *nchk;
5149 struct sctp_data_chunk *dchkh;
5150 struct sctpchunk_listhead tmp;
5151 struct mbuf *orig;
5153 asoc = &stcb->asoc;
5154 TAILQ_INIT(&tmp);
5155 chk = TAILQ_FIRST(&strq->outqueue);
5156 while (chk) {
5157 nchk = TAILQ_NEXT(chk, sctp_next);
5158 /* now put in the chunk header */
5159 orig = chk->data;
5160 M_PREPEND(chk->data, sizeof(struct sctp_data_chunk), MB_DONTWAIT);
5161 if (chk->data == NULL) {
5162 /* HELP */
5163 failed++;
5164 break;
5166 if (orig != chk->data) {
5167 /* A new mbuf was added, account for it */
5168 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5169 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5170 stcb->sctp_socket->so_snd.ssb_mbcnt += MSIZE;
5172 #ifdef SCTP_MBCNT_LOGGING
5173 sctp_log_mbcnt(SCTP_LOG_MBCNT_INCREASE,
5174 asoc->total_output_queue_size,
5176 asoc->total_output_mbuf_queue_size,
5177 MSIZE);
5178 #endif
5179 stcb->asoc.total_output_mbuf_queue_size += MSIZE;
5180 chk->mbcnt += MSIZE;
5182 chk->send_size += sizeof(struct sctp_data_chunk);
5183 /* This should NOT have to do anything, but
5184 * I would rather be cautious
5186 if (!failed && ((size_t)chk->data->m_len < sizeof(struct sctp_data_chunk))) {
5187 m_pullup(chk->data, sizeof(struct sctp_data_chunk));
5188 if (chk->data == NULL) {
5189 failed++;
5190 break;
5193 dchkh = mtod(chk->data, struct sctp_data_chunk *);
5194 dchkh->ch.chunk_length = htons(chk->send_size);
5195 /* Chunks must be padded to even word boundary */
5196 padval = chk->send_size % 4;
5197 if (padval) {
5198 /* For fragmented messages this should not
5199 * run except possibly on the last chunk
5201 if (sctp_pad_lastmbuf(chk->data, (4 - padval))) {
5202 /* we are in big big trouble no mbufs :< */
5203 failed++;
5204 break;
5206 chk->send_size += (4 - padval);
5208 /* pull from stream queue */
5209 TAILQ_REMOVE(&strq->outqueue, chk, sctp_next);
5210 asoc->stream_queue_cnt--;
5211 TAILQ_INSERT_TAIL(&tmp, chk, sctp_next);
5212 /* add it in to the size of moved chunks */
5213 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
5214 /* we pull only one message */
5215 break;
5217 chk = nchk;
5219 if (failed) {
5220 /* Gak, we just lost the user message */
5221 chk = TAILQ_FIRST(&tmp);
5222 while (chk) {
5223 nchk = TAILQ_NEXT(chk, sctp_next);
5224 TAILQ_REMOVE(&tmp, chk, sctp_next);
5226 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
5227 (SCTP_NOTIFY_DATAGRAM_UNSENT|SCTP_INTERNAL_ERROR),
5228 chk);
5230 if (chk->data) {
5231 sctp_m_freem(chk->data);
5232 chk->data = NULL;
5234 if (chk->whoTo) {
5235 sctp_free_remote_addr(chk->whoTo);
5236 chk->whoTo = NULL;
5238 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
5239 sctppcbinfo.ipi_count_chunk--;
5240 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
5241 panic("Chunk count is negative");
5243 sctppcbinfo.ipi_gencnt_chunk++;
5244 chk = nchk;
5246 return (0);
5248 /* now pull them off of temp wheel */
5249 chk = TAILQ_FIRST(&tmp);
5250 while (chk) {
5251 nchk = TAILQ_NEXT(chk, sctp_next);
5252 /* insert on send_queue */
5253 TAILQ_REMOVE(&tmp, chk, sctp_next);
5254 TAILQ_INSERT_TAIL(&asoc->send_queue, chk, sctp_next);
5255 asoc->send_queue_cnt++;
5256 /* assign TSN */
5257 chk->rec.data.TSN_seq = asoc->sending_seq++;
5259 dchkh = mtod(chk->data, struct sctp_data_chunk *);
5260 /* Put the rest of the things in place now. Size
5261 * was done earlier in previous loop prior to
5262 * padding.
5264 dchkh->ch.chunk_type = SCTP_DATA;
5265 dchkh->ch.chunk_flags = chk->rec.data.rcv_flags;
5266 dchkh->dp.tsn = htonl(chk->rec.data.TSN_seq);
5267 dchkh->dp.stream_id = htons(strq->stream_no);
5268 dchkh->dp.stream_sequence = htons(chk->rec.data.stream_seq);
5269 dchkh->dp.protocol_id = chk->rec.data.payloadtype;
5270 /* total count moved */
5271 tot_moved += chk->send_size;
5272 chk = nchk;
5274 return (tot_moved);
5277 static void
5278 sctp_fill_outqueue(struct sctp_tcb *stcb,
5279 struct sctp_nets *net)
5281 struct sctp_association *asoc;
5282 struct sctp_tmit_chunk *chk;
5283 struct sctp_stream_out *strq, *strqn;
5284 int mtu_fromwheel, goal_mtu;
5285 unsigned int moved, seenend, cnt_mvd=0;
5287 asoc = &stcb->asoc;
5288 /* Attempt to move at least 1 MTU's worth
5289 * onto the wheel for each destination address
5291 goal_mtu = net->cwnd - net->flight_size;
5292 if ((unsigned int)goal_mtu < net->mtu) {
5293 goal_mtu = net->mtu;
5295 if (sctp_pegs[SCTP_MOVED_MTU] < (unsigned int)goal_mtu) {
5296 sctp_pegs[SCTP_MOVED_MTU] = goal_mtu;
5298 seenend = moved = mtu_fromwheel = 0;
5299 if (asoc->last_out_stream == NULL) {
5300 strq = asoc->last_out_stream = TAILQ_FIRST(&asoc->out_wheel);
5301 if (asoc->last_out_stream == NULL) {
5302 /* huh nothing on the wheel, TSNH */
5303 return;
5305 goto done_it;
5307 strq = TAILQ_NEXT(asoc->last_out_stream, next_spoke);
5308 done_it:
5309 if (strq == NULL) {
5310 asoc->last_out_stream = TAILQ_FIRST(&asoc->out_wheel);
5312 while (mtu_fromwheel < goal_mtu) {
5313 if (strq == NULL) {
5314 if (seenend == 0) {
5315 seenend = 1;
5316 strq = TAILQ_FIRST(&asoc->out_wheel);
5317 } else if ((moved == 0) && (seenend)) {
5318 /* none left on the wheel */
5319 sctp_pegs[SCTP_MOVED_NLEF]++;
5320 return;
5321 } else if (moved) {
5323 * clear the flags and rotate back through
5324 * again
5326 moved = 0;
5327 seenend = 0;
5328 strq = TAILQ_FIRST(&asoc->out_wheel);
5330 if (strq == NULL)
5331 break;
5332 continue;
5334 strqn = TAILQ_NEXT(strq, next_spoke);
5335 if ((chk = TAILQ_FIRST(&strq->outqueue)) == NULL) {
5336 /* none left on this queue, prune a spoke? */
5337 sctp_remove_from_wheel(asoc, strq);
5338 if (strq == asoc->last_out_stream) {
5339 /* the last one we used went off the wheel */
5340 asoc->last_out_stream = NULL;
5342 strq = strqn;
5343 continue;
5345 if (chk->whoTo != net) {
5346 /* Skip this stream, first one on stream
5347 * does not head to our current destination.
5349 strq = strqn;
5350 continue;
5352 mtu_fromwheel += sctp_move_to_outqueue(stcb, strq);
5353 cnt_mvd++;
5354 moved++;
5355 asoc->last_out_stream = strq;
5356 strq = strqn;
5358 sctp_pegs[SCTP_MOVED_MAX]++;
5359 #ifdef SCTP_DEBUG
5360 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
5361 kprintf("Ok we moved %d chunks to send queue\n",
5362 moved);
5364 #endif
5365 if (sctp_pegs[SCTP_MOVED_QMAX] < cnt_mvd) {
5366 sctp_pegs[SCTP_MOVED_QMAX] = cnt_mvd;
5370 void
5371 sctp_fix_ecn_echo(struct sctp_association *asoc)
5373 struct sctp_tmit_chunk *chk;
5374 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
5375 if (chk->rec.chunk_id == SCTP_ECN_ECHO) {
5376 chk->sent = SCTP_DATAGRAM_UNSENT;
5381 static void
5382 sctp_move_to_an_alt(struct sctp_tcb *stcb,
5383 struct sctp_association *asoc,
5384 struct sctp_nets *net)
5386 struct sctp_tmit_chunk *chk;
5387 struct sctp_nets *a_net;
5388 a_net = sctp_find_alternate_net(stcb, net);
5389 if ((a_net != net) &&
5390 ((a_net->dest_state & SCTP_ADDR_REACHABLE) == SCTP_ADDR_REACHABLE)) {
5392 * We only proceed if a valid alternate is found that is
5393 * not this one and is reachable. Here we must move all
5394 * chunks queued in the send queue off of the destination
5395 * address to our alternate.
5397 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
5398 if (chk->whoTo == net) {
5399 /* Move the chunk to our alternate */
5400 sctp_free_remote_addr(chk->whoTo);
5401 chk->whoTo = a_net;
5402 a_net->ref_count++;
5408 static int sctp_from_user_send=0;
5410 static int
5411 sctp_med_chunk_output(struct sctp_inpcb *inp,
5412 struct sctp_tcb *stcb,
5413 struct sctp_association *asoc,
5414 int *num_out,
5415 int *reason_code,
5416 int control_only, int *cwnd_full, int from_where,
5417 struct timeval *now, int *now_filled)
5420 * Ok this is the generic chunk service queue.
5421 * we must do the following:
5422 * - Service the stream queue that is next, moving any message
5423 * (note I must get a complete message i.e. FIRST/MIDDLE and
5424 * LAST to the out queue in one pass) and assigning TSN's
5425 * - Check to see if the cwnd/rwnd allows any output, if so we
5426 * go ahead and fomulate and send the low level chunks. Making
5427 * sure to combine any control in the control chunk queue also.
5429 struct sctp_nets *net;
5430 struct mbuf *outchain;
5431 struct sctp_tmit_chunk *chk, *nchk;
5432 struct sctphdr *shdr;
5433 /* temp arrays for unlinking */
5434 struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
5435 int no_fragmentflg, error;
5436 int one_chunk, hbflag;
5437 int asconf, cookie, no_out_cnt;
5438 int bundle_at, ctl_cnt, no_data_chunks, cwnd_full_ind;
5439 unsigned int mtu, r_mtu, omtu;
5440 *num_out = 0;
5441 cwnd_full_ind = 0;
5442 ctl_cnt = no_out_cnt = asconf = cookie = 0;
5444 * First lets prime the pump. For each destination, if there
5445 * is room in the flight size, attempt to pull an MTU's worth
5446 * out of the stream queues into the general send_queue
5448 #ifdef SCTP_AUDITING_ENABLED
5449 sctp_audit_log(0xC2, 2);
5450 #endif
5451 #ifdef SCTP_DEBUG
5452 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
5453 kprintf("***********************\n");
5455 #endif
5456 hbflag = 0;
5457 if (control_only)
5458 no_data_chunks = 1;
5459 else
5460 no_data_chunks = 0;
5462 /* Nothing to possible to send? */
5463 if (TAILQ_EMPTY(&asoc->control_send_queue) &&
5464 TAILQ_EMPTY(&asoc->send_queue) &&
5465 TAILQ_EMPTY(&asoc->out_wheel)) {
5466 #ifdef SCTP_DEBUG
5467 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
5468 kprintf("All wheels empty\n");
5470 #endif
5471 return (0);
5473 if (asoc->peers_rwnd <= 0) {
5474 /* No room in peers rwnd */
5475 *cwnd_full = 1;
5476 *reason_code = 1;
5477 if (asoc->total_flight > 0) {
5478 /* we are allowed one chunk in flight */
5479 no_data_chunks = 1;
5480 sctp_pegs[SCTP_RWND_BLOCKED]++;
5483 #ifdef SCTP_DEBUG
5484 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
5485 kprintf("Ok we have done the fillup no_data_chunk=%d tf=%d prw:%d\n",
5486 (int)no_data_chunks,
5487 (int)asoc->total_flight, (int)asoc->peers_rwnd);
5489 #endif
5490 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5491 #ifdef SCTP_DEBUG
5492 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
5493 kprintf("net:%p fs:%d cwnd:%d\n",
5494 net, net->flight_size, net->cwnd);
5496 #endif
5497 if (net->flight_size >= net->cwnd) {
5498 /* skip this network, no room */
5499 cwnd_full_ind++;
5500 #ifdef SCTP_DEBUG
5501 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
5502 kprintf("Ok skip fillup->fs:%d > cwnd:%d\n",
5503 net->flight_size,
5504 net->cwnd);
5506 #endif
5507 sctp_pegs[SCTP_CWND_NOFILL]++;
5508 continue;
5511 * spin through the stream queues moving one message and
5512 * assign TSN's as appropriate.
5514 sctp_fill_outqueue(stcb, net);
5516 *cwnd_full = cwnd_full_ind;
5517 /* now service each destination and send out what we can for it */
5518 #ifdef SCTP_DEBUG
5519 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
5520 int chk_cnt = 0;
5521 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
5522 chk_cnt++;
5524 kprintf("We have %d chunks on the send_queue\n", chk_cnt);
5525 chk_cnt = 0;
5526 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
5527 chk_cnt++;
5529 kprintf("We have %d chunks on the sent_queue\n", chk_cnt);
5530 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
5531 chk_cnt++;
5533 kprintf("We have %d chunks on the control_queue\n", chk_cnt);
5535 #endif
5536 /* If we have data to send, and DSACK is running, stop it
5537 * and build a SACK to dump on to bundle with output. This
5538 * actually MAY make it so the bundling does not occur if
5539 * the SACK is big but I think this is ok because basic SACK
5540 * space is pre-reserved in our fragmentation size choice.
5542 if ((TAILQ_FIRST(&asoc->send_queue) != NULL) &&
5543 (no_data_chunks == 0)) {
5544 /* We will be sending something */
5545 if (callout_pending(&stcb->asoc.dack_timer.timer)) {
5546 /* Yep a callout is pending */
5547 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
5548 stcb->sctp_ep,
5549 stcb, NULL);
5550 sctp_send_sack(stcb);
5553 /* Nothing to send? */
5554 if ((TAILQ_FIRST(&asoc->control_send_queue) == NULL) &&
5555 (TAILQ_FIRST(&asoc->send_queue) == NULL)) {
5556 return (0);
5558 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5559 /* how much can we send? */
5560 if (net->ref_count < 2) {
5561 /* Ref-count of 1 so we cannot have data or control
5562 * queued to this address. Skip it.
5564 continue;
5566 ctl_cnt = bundle_at = 0;
5567 outchain = NULL;
5568 no_fragmentflg = 1;
5569 one_chunk = 0;
5571 if ((net->ro.ro_rt) && (net->ro.ro_rt->rt_ifp)) {
5572 /* if we have a route and an ifp
5573 * check to see if we have room to
5574 * send to this guy
5576 struct ifnet *ifp;
5577 ifp = net->ro.ro_rt->rt_ifp;
5578 if ((ifp->if_snd.ifq_len + 2) >= ifp->if_snd.ifq_maxlen) {
5579 sctp_pegs[SCTP_IFP_QUEUE_FULL]++;
5580 #ifdef SCTP_LOG_MAXBURST
5581 sctp_log_maxburst(net, ifp->if_snd.ifq_len, ifp->if_snd.ifq_maxlen, SCTP_MAX_IFP_APPLIED);
5582 #endif
5583 continue;
5586 if (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET) {
5587 mtu = net->mtu - (sizeof(struct ip) + sizeof(struct sctphdr));
5588 } else {
5589 mtu = net->mtu - (sizeof(struct ip6_hdr) + sizeof(struct sctphdr));
5591 if (mtu > asoc->peers_rwnd) {
5592 if (asoc->total_flight > 0) {
5593 /* We have a packet in flight somewhere */
5594 r_mtu = asoc->peers_rwnd;
5595 } else {
5596 /* We are always allowed to send one MTU out */
5597 one_chunk = 1;
5598 r_mtu = mtu;
5600 } else {
5601 r_mtu = mtu;
5603 #ifdef SCTP_DEBUG
5604 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
5605 kprintf("Ok r_mtu is %d mtu is %d for this net:%p one_chunk:%d\n",
5606 r_mtu, mtu, net, one_chunk);
5608 #endif
5609 /************************/
5610 /* Control transmission */
5611 /************************/
5612 /* Now first lets go through the control queue */
5613 for (chk = TAILQ_FIRST(&asoc->control_send_queue);
5614 chk; chk = nchk) {
5615 nchk = TAILQ_NEXT(chk, sctp_next);
5616 if (chk->whoTo != net) {
5618 * No, not sent to the network we are
5619 * looking at
5621 continue;
5623 if (chk->data == NULL) {
5624 continue;
5626 if ((chk->data->m_flags & M_PKTHDR) == 0) {
5628 * NOTE: the chk queue MUST have the PKTHDR
5629 * flag set on it with a total in the
5630 * m_pkthdr.len field!! else the chunk will
5631 * ALWAYS be skipped
5633 continue;
5635 if (chk->sent != SCTP_DATAGRAM_UNSENT) {
5637 * It must be unsent. Cookies and ASCONF's
5638 * hang around but there timers will force
5639 * when marked for resend.
5641 continue;
5643 /* Here we do NOT factor the r_mtu */
5644 if ((chk->data->m_pkthdr.len < (int)mtu) ||
5645 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
5647 * We probably should glom the mbuf chain from
5648 * the chk->data for control but the problem
5649 * is it becomes yet one more level of
5650 * tracking to do if for some reason output
5651 * fails. Then I have got to reconstruct the
5652 * merged control chain.. el yucko.. for now
5653 * we take the easy way and do the copy
5655 outchain = sctp_copy_mbufchain(chk->data,
5656 outchain);
5657 if (outchain == NULL) {
5658 return (ENOMEM);
5660 /* update our MTU size */
5661 mtu -= chk->data->m_pkthdr.len;
5662 if (mtu < 0) {
5663 mtu = 0;
5665 /* Do clear IP_DF ? */
5666 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
5667 no_fragmentflg = 0;
5669 /* Mark things to be removed, if needed */
5670 if ((chk->rec.chunk_id == SCTP_SELECTIVE_ACK) ||
5671 (chk->rec.chunk_id == SCTP_HEARTBEAT_REQUEST) ||
5672 (chk->rec.chunk_id == SCTP_HEARTBEAT_ACK) ||
5673 (chk->rec.chunk_id == SCTP_SHUTDOWN) ||
5674 (chk->rec.chunk_id == SCTP_SHUTDOWN_ACK) ||
5675 (chk->rec.chunk_id == SCTP_OPERATION_ERROR) ||
5676 (chk->rec.chunk_id == SCTP_COOKIE_ACK) ||
5677 (chk->rec.chunk_id == SCTP_ECN_CWR) ||
5678 (chk->rec.chunk_id == SCTP_PACKET_DROPPED) ||
5679 (chk->rec.chunk_id == SCTP_ASCONF_ACK)) {
5681 if (chk->rec.chunk_id == SCTP_HEARTBEAT_REQUEST)
5682 hbflag = 1;
5683 /* remove these chunks at the end */
5684 if (chk->rec.chunk_id == SCTP_SELECTIVE_ACK) {
5685 /* turn off the timer */
5686 if (callout_pending(&stcb->asoc.dack_timer.timer)) {
5687 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
5688 inp, stcb, net);
5691 ctl_cnt++;
5692 } else {
5694 * Other chunks, since they have
5695 * timers running (i.e. COOKIE or
5696 * ASCONF) we just "trust" that it
5697 * gets sent or retransmitted.
5699 ctl_cnt++;
5700 if (chk->rec.chunk_id == SCTP_COOKIE_ECHO) {
5701 cookie = 1;
5702 no_out_cnt = 1;
5703 } else if (chk->rec.chunk_id == SCTP_ASCONF) {
5705 * set hb flag since we can use
5706 * these for RTO
5708 hbflag = 1;
5709 asconf = 1;
5711 chk->sent = SCTP_DATAGRAM_SENT;
5712 chk->snd_count++;
5714 if (mtu == 0) {
5716 * Ok we are out of room but we can
5717 * output without effecting the flight
5718 * size since this little guy is a
5719 * control only packet.
5721 if (asconf) {
5722 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
5723 asconf = 0;
5725 if (cookie) {
5726 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net);
5727 cookie = 0;
5729 if (outchain->m_len == 0) {
5731 * Special case for when you
5732 * get a 0 len mbuf at the
5733 * head due to the lack of a
5734 * MHDR at the beginning.
5736 outchain->m_len = sizeof(struct sctphdr);
5737 } else {
5738 M_PREPEND(outchain, sizeof(struct sctphdr), MB_DONTWAIT);
5739 if (outchain == NULL) {
5740 /* no memory */
5741 error = ENOBUFS;
5742 goto error_out_again;
5745 shdr = mtod(outchain, struct sctphdr *);
5746 shdr->src_port = inp->sctp_lport;
5747 shdr->dest_port = stcb->rport;
5748 shdr->v_tag = htonl(stcb->asoc.peer_vtag);
5749 shdr->checksum = 0;
5751 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
5752 (struct sockaddr *)&net->ro._l_addr,
5753 outchain,
5754 no_fragmentflg, 0, NULL, asconf))) {
5755 if (error == ENOBUFS) {
5756 asoc->ifp_had_enobuf = 1;
5758 sctp_pegs[SCTP_DATA_OUT_ERR]++;
5759 if (from_where == 0) {
5760 sctp_pegs[SCTP_ERROUT_FRM_USR]++;
5762 error_out_again:
5763 #ifdef SCTP_DEBUG
5764 if (sctp_debug_on & SCTP_DEBUG_OUTPUT2) {
5765 kprintf("Gak got ctrl error %d\n", error);
5767 #endif
5768 /* error, could not output */
5769 if (hbflag) {
5770 #ifdef SCTP_DEBUG
5771 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
5772 kprintf("Update HB anyway\n");
5774 #endif
5775 if (*now_filled == 0) {
5776 SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
5777 *now_filled = 1;
5778 *now = net->last_sent_time;
5779 } else {
5780 net->last_sent_time = *now;
5782 hbflag = 0;
5784 if (error == EHOSTUNREACH) {
5786 * Destination went
5787 * unreachable during
5788 * this send
5790 #ifdef SCTP_DEBUG
5791 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
5792 kprintf("Moving data to an alterante\n");
5794 #endif
5795 sctp_move_to_an_alt(stcb, asoc, net);
5797 sctp_clean_up_ctl (asoc);
5798 return (error);
5799 } else
5800 asoc->ifp_had_enobuf = 0;
5801 /* Only HB or ASCONF advances time */
5802 if (hbflag) {
5803 if (*now_filled == 0) {
5804 SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
5805 *now_filled = 1;
5806 *now = net->last_sent_time;
5807 } else {
5808 net->last_sent_time = *now;
5810 hbflag = 0;
5813 * increase the number we sent, if a
5814 * cookie is sent we don't tell them
5815 * any was sent out.
5817 if (!no_out_cnt)
5818 *num_out += ctl_cnt;
5819 /* recalc a clean slate and setup */
5820 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
5821 mtu = (net->mtu - SCTP_MIN_OVERHEAD);
5822 } else {
5823 mtu = (net->mtu - SCTP_MIN_V4_OVERHEAD);
5825 no_fragmentflg = 1;
5829 /*********************/
5830 /* Data transmission */
5831 /*********************/
5832 /* now lets add any data within the MTU constraints */
5833 if (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET) {
5834 omtu = net->mtu - (sizeof(struct ip) + sizeof(struct sctphdr));
5835 } else {
5836 omtu = net->mtu - (sizeof(struct ip6_hdr) + sizeof(struct sctphdr));
5839 #ifdef SCTP_DEBUG
5840 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
5841 kprintf("Now to data transmission\n");
5843 #endif
5845 if (((asoc->state & SCTP_STATE_OPEN) == SCTP_STATE_OPEN) ||
5846 (cookie)) {
5847 for (chk = TAILQ_FIRST(&asoc->send_queue); chk; chk = nchk) {
5848 if (no_data_chunks) {
5849 /* let only control go out */
5850 #ifdef SCTP_DEBUG
5851 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
5852 kprintf("Either nothing to send or we are full\n");
5854 #endif
5855 break;
5857 if (net->flight_size >= net->cwnd) {
5858 /* skip this net, no room for data */
5859 #ifdef SCTP_DEBUG
5860 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
5861 kprintf("fs:%d > cwnd:%d\n",
5862 net->flight_size, net->cwnd);
5864 #endif
5865 sctp_pegs[SCTP_CWND_BLOCKED]++;
5866 *reason_code = 2;
5867 break;
5869 nchk = TAILQ_NEXT(chk, sctp_next);
5870 if (chk->whoTo != net) {
5871 /* No, not sent to this net */
5872 #ifdef SCTP_DEBUG
5873 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
5874 kprintf("chk->whoTo:%p not %p\n",
5875 chk->whoTo, net);
5878 #endif
5879 continue;
5881 #ifdef SCTP_DEBUG
5882 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
5883 kprintf("Can we pick up a chunk?\n");
5885 #endif
5886 if ((chk->send_size > omtu) && ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) == 0)) {
5887 /* strange, we have a chunk that is to bit
5888 * for its destination and yet no fragment ok flag.
5889 * Something went wrong when the PMTU changed...we did
5890 * not mark this chunk for some reason?? I will
5891 * fix it here by letting IP fragment it for now and
5892 * printing a warning. This really should not happen ...
5894 /*#ifdef SCTP_DEBUG*/
5895 kprintf("Warning chunk of %d bytes > mtu:%d and yet PMTU disc missed\n",
5896 chk->send_size, mtu);
5897 /*#endif*/
5898 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
5901 if (((chk->send_size <= mtu) && (chk->send_size <= r_mtu)) ||
5902 ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) && (chk->send_size <= asoc->peers_rwnd))) {
5903 /* ok we will add this one */
5904 #ifdef SCTP_DEBUG
5905 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
5906 kprintf("Picking up the chunk\n");
5908 #endif
5909 outchain = sctp_copy_mbufchain(chk->data, outchain);
5910 if (outchain == NULL) {
5911 #ifdef SCTP_DEBUG
5912 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
5913 kprintf("Gakk no memory\n");
5915 #endif
5916 if (!callout_pending(&net->rxt_timer.timer)) {
5917 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
5919 return (ENOMEM);
5921 /* upate our MTU size */
5922 /* Do clear IP_DF ? */
5923 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
5924 no_fragmentflg = 0;
5926 mtu -= chk->send_size;
5927 r_mtu -= chk->send_size;
5928 data_list[bundle_at++] = chk;
5929 if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
5930 mtu = 0;
5931 break;
5933 if (mtu <= 0) {
5934 mtu = 0;
5935 break;
5937 if ((r_mtu <= 0) || one_chunk) {
5938 r_mtu = 0;
5939 break;
5941 } else {
5943 * Must be sent in order of the TSN's
5944 * (on a network)
5946 #ifdef SCTP_DEBUG
5947 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
5948 kprintf("ok no more chk:%d > mtu:%d || < r_mtu:%d\n",
5949 chk->send_size, mtu, r_mtu);
5951 #endif
5953 break;
5955 }/* for () */
5956 } /* if asoc.state OPEN */
5957 /* Is there something to send for this destination? */
5958 #ifdef SCTP_DEBUG
5959 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
5960 kprintf("ok now is chain assembled? %p\n",
5961 outchain);
5963 #endif
5965 if (outchain) {
5966 /* We may need to start a control timer or two */
5967 if (asconf) {
5968 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
5969 asconf = 0;
5971 if (cookie) {
5972 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net);
5973 cookie = 0;
5975 /* must start a send timer if data is being sent */
5976 if (bundle_at && (!callout_pending(&net->rxt_timer.timer))) {
5977 /* no timer running on this destination
5978 * restart it.
5980 #ifdef SCTP_DEBUG
5981 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
5982 kprintf("ok lets start a send timer .. we will transmit %p\n",
5983 outchain);
5985 #endif
5986 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
5988 /* Now send it, if there is anything to send :> */
5989 if ((outchain->m_flags & M_PKTHDR) == 0) {
5990 struct mbuf *t;
5992 MGETHDR(t, MB_DONTWAIT, MT_HEADER);
5993 if (t == NULL) {
5994 sctp_m_freem(outchain);
5995 return (ENOMEM);
5997 t->m_next = outchain;
5998 t->m_pkthdr.len = 0;
5999 t->m_pkthdr.rcvif = 0;
6000 t->m_len = 0;
6002 outchain = t;
6003 while (t) {
6004 outchain->m_pkthdr.len += t->m_len;
6005 t = t->m_next;
6008 if (outchain->m_len == 0) {
6009 /* Special case for when you get a 0 len
6010 * mbuf at the head due to the lack
6011 * of a MHDR at the beginning.
6013 MH_ALIGN(outchain, sizeof(struct sctphdr));
6014 outchain->m_len = sizeof(struct sctphdr);
6015 } else {
6016 M_PREPEND(outchain, sizeof(struct sctphdr), MB_DONTWAIT);
6017 if (outchain == NULL) {
6018 /* out of mbufs */
6019 error = ENOBUFS;
6020 goto errored_send;
6023 shdr = mtod(outchain, struct sctphdr *);
6024 shdr->src_port = inp->sctp_lport;
6025 shdr->dest_port = stcb->rport;
6026 shdr->v_tag = htonl(stcb->asoc.peer_vtag);
6027 shdr->checksum = 0;
6028 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
6029 (struct sockaddr *)&net->ro._l_addr,
6030 outchain,
6031 no_fragmentflg, bundle_at, data_list[0], asconf))) {
6032 /* error, we could not output */
6033 if (error == ENOBUFS) {
6034 asoc->ifp_had_enobuf = 1;
6036 sctp_pegs[SCTP_DATA_OUT_ERR]++;
6037 if (from_where == 0) {
6038 sctp_pegs[SCTP_ERROUT_FRM_USR]++;
6041 errored_send:
6042 #ifdef SCTP_DEBUG
6043 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
6044 kprintf("Gak send error %d\n", error);
6046 #endif
6047 if (hbflag) {
6048 #ifdef SCTP_DEBUG
6049 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
6050 kprintf("Update HB time anyway\n");
6052 #endif
6053 if (*now_filled == 0) {
6054 SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
6055 *now_filled = 1;
6056 *now = net->last_sent_time;
6057 } else {
6058 net->last_sent_time = *now;
6060 hbflag = 0;
6062 if (error == EHOSTUNREACH) {
6064 * Destination went unreachable during
6065 * this send
6067 #ifdef SCTP_DEBUG
6068 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
6069 kprintf("Calling the movement routine\n");
6071 #endif
6072 sctp_move_to_an_alt(stcb, asoc, net);
6074 sctp_clean_up_ctl (asoc);
6075 return (error);
6076 } else {
6077 asoc->ifp_had_enobuf = 0;
6079 if (bundle_at || hbflag) {
6080 /* For data/asconf and hb set time */
6081 if (*now_filled == 0) {
6082 SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
6083 *now_filled = 1;
6084 *now = net->last_sent_time;
6085 } else {
6086 net->last_sent_time = *now;
6090 if (!no_out_cnt) {
6091 *num_out += (ctl_cnt + bundle_at);
6093 if (bundle_at) {
6094 if (!net->rto_pending) {
6095 /* setup for a RTO measurement */
6096 net->rto_pending = 1;
6097 data_list[0]->do_rtt = 1;
6098 } else {
6099 data_list[0]->do_rtt = 0;
6101 sctp_pegs[SCTP_PEG_TSNS_SENT] += bundle_at;
6102 sctp_clean_up_datalist(stcb, asoc, data_list, bundle_at, net);
6104 if (one_chunk) {
6105 break;
6109 /* At the end there should be no NON timed
6110 * chunks hanging on this queue.
6112 if ((*num_out == 0) && (*reason_code == 0)) {
6113 *reason_code = 3;
6115 sctp_clean_up_ctl (asoc);
6116 return (0);
6119 void
6120 sctp_queue_op_err(struct sctp_tcb *stcb, struct mbuf *op_err)
6122 /* Prepend a OPERATIONAL_ERROR chunk header
6123 * and put on the end of the control chunk queue.
6125 /* Sender had better have gotten a MGETHDR or else
6126 * the control chunk will be forever skipped
6128 struct sctp_chunkhdr *hdr;
6129 struct sctp_tmit_chunk *chk;
6130 struct mbuf *mat;
6132 chk = (struct sctp_tmit_chunk *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_chunk);
6133 if (chk == NULL) {
6134 /* no memory */
6135 sctp_m_freem(op_err);
6136 return;
6138 sctppcbinfo.ipi_count_chunk++;
6139 sctppcbinfo.ipi_gencnt_chunk++;
6140 M_PREPEND(op_err, sizeof(struct sctp_chunkhdr), MB_DONTWAIT);
6141 if (op_err == NULL) {
6142 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
6143 sctppcbinfo.ipi_count_chunk--;
6144 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
6145 panic("Chunk count is negative");
6147 sctppcbinfo.ipi_gencnt_chunk++;
6148 return;
6150 chk->send_size = 0;
6151 mat = op_err;
6152 while (mat != NULL) {
6153 chk->send_size += mat->m_len;
6154 mat = mat->m_next;
6156 chk->rec.chunk_id = SCTP_OPERATION_ERROR;
6157 chk->sent = SCTP_DATAGRAM_UNSENT;
6158 chk->snd_count = 0;
6159 chk->flags = 0;
6160 chk->asoc = &stcb->asoc;
6161 chk->data = op_err;
6162 chk->whoTo = chk->asoc->primary_destination;
6163 chk->whoTo->ref_count++;
6164 hdr = mtod(op_err, struct sctp_chunkhdr *);
6165 hdr->chunk_type = SCTP_OPERATION_ERROR;
6166 hdr->chunk_flags = 0;
6167 hdr->chunk_length = htons(chk->send_size);
6168 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue,
6169 chk,
6170 sctp_next);
6171 chk->asoc->ctrl_queue_cnt++;
6175 sctp_send_cookie_echo(struct mbuf *m,
6176 int offset,
6177 struct sctp_tcb *stcb,
6178 struct sctp_nets *net)
6181 * pull out the cookie and put it at the front of the control
6182 * chunk queue.
6184 int at;
6185 struct mbuf *cookie, *mat;
6186 struct sctp_paramhdr parm, *phdr;
6187 struct sctp_chunkhdr *hdr;
6188 struct sctp_tmit_chunk *chk;
6189 uint16_t ptype, plen;
6190 /* First find the cookie in the param area */
6191 cookie = NULL;
6192 at = offset + sizeof(struct sctp_init_chunk);
6194 do {
6195 phdr = sctp_get_next_param(m, at, &parm, sizeof(parm));
6196 if (phdr == NULL) {
6197 return (-3);
6199 ptype = ntohs(phdr->param_type);
6200 plen = ntohs(phdr->param_length);
6201 if (ptype == SCTP_STATE_COOKIE) {
6202 int pad;
6203 /* found the cookie */
6204 if ((pad = (plen % 4))) {
6205 plen += 4 - pad;
6207 cookie = sctp_m_copym(m, at, plen, MB_DONTWAIT);
6208 if (cookie == NULL) {
6209 /* No memory */
6210 return (-2);
6212 break;
6214 at += SCTP_SIZE32(plen);
6215 } while (phdr);
6216 if (cookie == NULL) {
6217 /* Did not find the cookie */
6218 return (-3);
6220 /* ok, we got the cookie lets change it into a cookie echo chunk */
6222 /* first the change from param to cookie */
6223 hdr = mtod(cookie, struct sctp_chunkhdr *);
6224 hdr->chunk_type = SCTP_COOKIE_ECHO;
6225 hdr->chunk_flags = 0;
6226 /* now we MUST have a PKTHDR on it */
6227 if ((cookie->m_flags & M_PKTHDR) != M_PKTHDR) {
6228 /* we hope this happens rarely */
6229 MGETHDR(mat, MB_DONTWAIT, MT_HEADER);
6230 if (mat == NULL) {
6231 sctp_m_freem(cookie);
6232 return (-4);
6234 mat->m_len = 0;
6235 mat->m_pkthdr.rcvif = 0;
6236 mat->m_next = cookie;
6237 cookie = mat;
6239 cookie->m_pkthdr.len = plen;
6240 /* get the chunk stuff now and place it in the FRONT of the queue */
6241 chk = (struct sctp_tmit_chunk *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_chunk);
6242 if (chk == NULL) {
6243 /* no memory */
6244 sctp_m_freem(cookie);
6245 return (-5);
6247 sctppcbinfo.ipi_count_chunk++;
6248 sctppcbinfo.ipi_gencnt_chunk++;
6249 chk->send_size = cookie->m_pkthdr.len;
6250 chk->rec.chunk_id = SCTP_COOKIE_ECHO;
6251 chk->sent = SCTP_DATAGRAM_UNSENT;
6252 chk->snd_count = 0;
6253 chk->flags = 0;
6254 chk->asoc = &stcb->asoc;
6255 chk->data = cookie;
6256 chk->whoTo = chk->asoc->primary_destination;
6257 chk->whoTo->ref_count++;
6258 TAILQ_INSERT_HEAD(&chk->asoc->control_send_queue, chk, sctp_next);
6259 chk->asoc->ctrl_queue_cnt++;
6260 return (0);
6263 void
6264 sctp_send_heartbeat_ack(struct sctp_tcb *stcb,
6265 struct mbuf *m,
6266 int offset,
6267 int chk_length,
6268 struct sctp_nets *net)
6270 /* take a HB request and make it into a
6271 * HB ack and send it.
6273 struct mbuf *outchain;
6274 struct sctp_chunkhdr *chdr;
6275 struct sctp_tmit_chunk *chk;
6278 if (net == NULL)
6279 /* must have a net pointer */
6280 return;
6282 outchain = sctp_m_copym(m, offset, chk_length, MB_DONTWAIT);
6283 if (outchain == NULL) {
6284 /* gak out of memory */
6285 return;
6287 chdr = mtod(outchain, struct sctp_chunkhdr *);
6288 chdr->chunk_type = SCTP_HEARTBEAT_ACK;
6289 chdr->chunk_flags = 0;
6290 if ((outchain->m_flags & M_PKTHDR) != M_PKTHDR) {
6291 /* should not happen but we are cautious. */
6292 struct mbuf *tmp;
6293 MGETHDR(tmp, MB_DONTWAIT, MT_HEADER);
6294 if (tmp == NULL) {
6295 return;
6297 tmp->m_len = 0;
6298 tmp->m_pkthdr.rcvif = 0;
6299 tmp->m_next = outchain;
6300 outchain = tmp;
6302 outchain->m_pkthdr.len = chk_length;
6303 if (chk_length % 4) {
6304 /* need pad */
6305 u_int32_t cpthis=0;
6306 int padlen;
6307 padlen = 4 - (outchain->m_pkthdr.len % 4);
6308 m_copyback(outchain, outchain->m_pkthdr.len, padlen, (caddr_t)&cpthis);
6310 chk = (struct sctp_tmit_chunk *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_chunk);
6311 if (chk == NULL) {
6312 /* no memory */
6313 sctp_m_freem(outchain);
6314 return ;
6316 sctppcbinfo.ipi_count_chunk++;
6317 sctppcbinfo.ipi_gencnt_chunk++;
6319 chk->send_size = chk_length;
6320 chk->rec.chunk_id = SCTP_HEARTBEAT_ACK;
6321 chk->sent = SCTP_DATAGRAM_UNSENT;
6322 chk->snd_count = 0;
6323 chk->flags = 0;
6324 chk->asoc = &stcb->asoc;
6325 chk->data = outchain;
6326 chk->whoTo = net;
6327 chk->whoTo->ref_count++;
6328 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
6329 chk->asoc->ctrl_queue_cnt++;
6333 sctp_send_cookie_ack(struct sctp_tcb *stcb) {
6334 /* formulate and queue a cookie-ack back to sender */
6335 struct mbuf *cookie_ack;
6336 struct sctp_chunkhdr *hdr;
6337 struct sctp_tmit_chunk *chk;
6339 cookie_ack = NULL;
6340 MGETHDR(cookie_ack, MB_DONTWAIT, MT_HEADER);
6341 if (cookie_ack == NULL) {
6342 /* no mbuf's */
6343 return (-1);
6345 cookie_ack->m_data += SCTP_MIN_OVERHEAD;
6346 chk = (struct sctp_tmit_chunk *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_chunk);
6347 if (chk == NULL) {
6348 /* no memory */
6349 sctp_m_freem(cookie_ack);
6350 return (-1);
6352 sctppcbinfo.ipi_count_chunk++;
6353 sctppcbinfo.ipi_gencnt_chunk++;
6355 chk->send_size = sizeof(struct sctp_chunkhdr);
6356 chk->rec.chunk_id = SCTP_COOKIE_ACK;
6357 chk->sent = SCTP_DATAGRAM_UNSENT;
6358 chk->snd_count = 0;
6359 chk->flags = 0;
6360 chk->asoc = &stcb->asoc;
6361 chk->data = cookie_ack;
6362 if (chk->asoc->last_control_chunk_from != NULL) {
6363 chk->whoTo = chk->asoc->last_control_chunk_from;
6364 } else {
6365 chk->whoTo = chk->asoc->primary_destination;
6367 chk->whoTo->ref_count++;
6368 hdr = mtod(cookie_ack, struct sctp_chunkhdr *);
6369 hdr->chunk_type = SCTP_COOKIE_ACK;
6370 hdr->chunk_flags = 0;
6371 hdr->chunk_length = htons(chk->send_size);
6372 cookie_ack->m_pkthdr.len = cookie_ack->m_len = chk->send_size;
6373 cookie_ack->m_pkthdr.rcvif = 0;
6374 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
6375 chk->asoc->ctrl_queue_cnt++;
6376 return (0);
6381 sctp_send_shutdown_ack(struct sctp_tcb *stcb, struct sctp_nets *net)
6383 /* formulate and queue a SHUTDOWN-ACK back to the sender */
6384 struct mbuf *m_shutdown_ack;
6385 struct sctp_shutdown_ack_chunk *ack_cp;
6386 struct sctp_tmit_chunk *chk;
6388 m_shutdown_ack = NULL;
6389 MGETHDR(m_shutdown_ack, MB_DONTWAIT, MT_HEADER);
6390 if (m_shutdown_ack == NULL) {
6391 /* no mbuf's */
6392 return (-1);
6394 m_shutdown_ack->m_data += SCTP_MIN_OVERHEAD;
6395 chk = (struct sctp_tmit_chunk *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_chunk);
6396 if (chk == NULL) {
6397 /* no memory */
6398 sctp_m_freem(m_shutdown_ack);
6399 return (-1);
6401 sctppcbinfo.ipi_count_chunk++;
6402 sctppcbinfo.ipi_gencnt_chunk++;
6404 chk->send_size = sizeof(struct sctp_chunkhdr);
6405 chk->rec.chunk_id = SCTP_SHUTDOWN_ACK;
6406 chk->sent = SCTP_DATAGRAM_UNSENT;
6407 chk->snd_count = 0;
6408 chk->flags = 0;
6409 chk->asoc = &stcb->asoc;
6410 chk->data = m_shutdown_ack;
6411 chk->whoTo = net;
6412 net->ref_count++;
6414 ack_cp = mtod(m_shutdown_ack, struct sctp_shutdown_ack_chunk *);
6415 ack_cp->ch.chunk_type = SCTP_SHUTDOWN_ACK;
6416 ack_cp->ch.chunk_flags = 0;
6417 ack_cp->ch.chunk_length = htons(chk->send_size);
6418 m_shutdown_ack->m_pkthdr.len = m_shutdown_ack->m_len = chk->send_size;
6419 m_shutdown_ack->m_pkthdr.rcvif = 0;
6420 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
6421 chk->asoc->ctrl_queue_cnt++;
6422 return (0);
6426 sctp_send_shutdown(struct sctp_tcb *stcb, struct sctp_nets *net)
6428 /* formulate and queue a SHUTDOWN to the sender */
6429 struct mbuf *m_shutdown;
6430 struct sctp_shutdown_chunk *shutdown_cp;
6431 struct sctp_tmit_chunk *chk;
6433 m_shutdown = NULL;
6434 MGETHDR(m_shutdown, MB_DONTWAIT, MT_HEADER);
6435 if (m_shutdown == NULL) {
6436 /* no mbuf's */
6437 return (-1);
6439 m_shutdown->m_data += SCTP_MIN_OVERHEAD;
6440 chk = (struct sctp_tmit_chunk *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_chunk);
6441 if (chk == NULL) {
6442 /* no memory */
6443 sctp_m_freem(m_shutdown);
6444 return (-1);
6446 sctppcbinfo.ipi_count_chunk++;
6447 sctppcbinfo.ipi_gencnt_chunk++;
6449 chk->send_size = sizeof(struct sctp_shutdown_chunk);
6450 chk->rec.chunk_id = SCTP_SHUTDOWN;
6451 chk->sent = SCTP_DATAGRAM_UNSENT;
6452 chk->snd_count = 0;
6453 chk->flags = 0;
6454 chk->asoc = &stcb->asoc;
6455 chk->data = m_shutdown;
6456 chk->whoTo = net;
6457 net->ref_count++;
6459 shutdown_cp = mtod(m_shutdown, struct sctp_shutdown_chunk *);
6460 shutdown_cp->ch.chunk_type = SCTP_SHUTDOWN;
6461 shutdown_cp->ch.chunk_flags = 0;
6462 shutdown_cp->ch.chunk_length = htons(chk->send_size);
6463 shutdown_cp->cumulative_tsn_ack = htonl(stcb->asoc.cumulative_tsn);
6464 m_shutdown->m_pkthdr.len = m_shutdown->m_len = chk->send_size;
6465 m_shutdown->m_pkthdr.rcvif = 0;
6466 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
6467 chk->asoc->ctrl_queue_cnt++;
6469 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
6470 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
6471 stcb->sctp_ep->sctp_socket->so_snd.ssb_cc = 0;
6472 soisdisconnecting(stcb->sctp_ep->sctp_socket);
6474 return (0);
6478 sctp_send_asconf(struct sctp_tcb *stcb, struct sctp_nets *net)
6481 * formulate and queue an ASCONF to the peer
6482 * ASCONF parameters should be queued on the assoc queue
6484 struct sctp_tmit_chunk *chk;
6485 struct mbuf *m_asconf;
6486 struct sctp_asconf_chunk *acp;
6489 /* compose an ASCONF chunk, maximum length is PMTU */
6490 m_asconf = sctp_compose_asconf(stcb);
6491 if (m_asconf == NULL) {
6492 return (-1);
6494 acp = mtod(m_asconf, struct sctp_asconf_chunk *);
6495 chk = (struct sctp_tmit_chunk *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_chunk);
6496 if (chk == NULL) {
6497 /* no memory */
6498 sctp_m_freem(m_asconf);
6499 return (-1);
6501 sctppcbinfo.ipi_count_chunk++;
6502 sctppcbinfo.ipi_gencnt_chunk++;
6504 chk->data = m_asconf;
6505 chk->send_size = m_asconf->m_pkthdr.len;
6506 chk->rec.chunk_id = SCTP_ASCONF;
6507 chk->sent = SCTP_DATAGRAM_UNSENT;
6508 chk->snd_count = 0;
6509 chk->flags = 0;
6510 chk->asoc = &stcb->asoc;
6511 chk->whoTo = chk->asoc->primary_destination;
6512 chk->whoTo->ref_count++;
6513 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
6514 chk->asoc->ctrl_queue_cnt++;
6515 return (0);
6519 sctp_send_asconf_ack(struct sctp_tcb *stcb, uint32_t retrans)
6522 * formulate and queue a asconf-ack back to sender
6523 * the asconf-ack must be stored in the tcb
6525 struct sctp_tmit_chunk *chk;
6526 struct mbuf *m_ack;
6528 /* is there a asconf-ack mbuf chain to send? */
6529 if (stcb->asoc.last_asconf_ack_sent == NULL) {
6530 return (-1);
6533 /* copy the asconf_ack */
6534 #if defined(__FreeBSD__) || defined(__NetBSD__)
6535 /* Supposedly the m_copypacket is a optimzation,
6536 * use it if we can.
6538 if (stcb->asoc.last_asconf_ack_sent->m_flags & M_PKTHDR) {
6539 m_ack = m_copypacket(stcb->asoc.last_asconf_ack_sent, MB_DONTWAIT);
6540 sctp_pegs[SCTP_CACHED_SRC]++;
6541 } else
6542 m_ack = m_copy(stcb->asoc.last_asconf_ack_sent, 0, M_COPYALL);
6543 #else
6544 m_ack = m_copy(stcb->asoc.last_asconf_ack_sent, 0, M_COPYALL);
6545 #endif
6546 if (m_ack == NULL) {
6547 /* couldn't copy it */
6549 return (-1);
6551 chk = (struct sctp_tmit_chunk *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_chunk);
6552 if (chk == NULL) {
6553 /* no memory */
6554 if (m_ack)
6555 sctp_m_freem(m_ack);
6556 return (-1);
6558 sctppcbinfo.ipi_count_chunk++;
6559 sctppcbinfo.ipi_gencnt_chunk++;
6561 /* figure out where it goes to */
6562 if (retrans) {
6563 /* we're doing a retransmission */
6564 if (stcb->asoc.used_alt_asconfack > 2) {
6565 /* tried alternate nets already, go back */
6566 chk->whoTo = NULL;
6567 } else {
6568 /* need to try and alternate net */
6569 chk->whoTo = sctp_find_alternate_net(stcb, stcb->asoc.last_control_chunk_from);
6570 stcb->asoc.used_alt_asconfack++;
6572 if (chk->whoTo == NULL) {
6573 /* no alternate */
6574 if (stcb->asoc.last_control_chunk_from == NULL)
6575 chk->whoTo = stcb->asoc.primary_destination;
6576 else
6577 chk->whoTo = stcb->asoc.last_control_chunk_from;
6578 stcb->asoc.used_alt_asconfack = 0;
6580 } else {
6581 /* normal case */
6582 if (stcb->asoc.last_control_chunk_from == NULL)
6583 chk->whoTo = stcb->asoc.primary_destination;
6584 else
6585 chk->whoTo = stcb->asoc.last_control_chunk_from;
6586 stcb->asoc.used_alt_asconfack = 0;
6588 chk->data = m_ack;
6589 chk->send_size = m_ack->m_pkthdr.len;
6590 chk->rec.chunk_id = SCTP_ASCONF_ACK;
6591 chk->sent = SCTP_DATAGRAM_UNSENT;
6592 chk->snd_count = 0;
6593 chk->flags = 0;
6594 chk->asoc = &stcb->asoc;
6595 chk->whoTo->ref_count++;
6596 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
6597 chk->asoc->ctrl_queue_cnt++;
6598 return (0);
6602 static int
6603 sctp_chunk_retransmission(struct sctp_inpcb *inp,
6604 struct sctp_tcb *stcb,
6605 struct sctp_association *asoc,
6606 int *cnt_out, struct timeval *now, int *now_filled)
6609 * send out one MTU of retransmission.
6610 * If fast_retransmit is happening we ignore the cwnd.
6611 * Otherwise we obey the cwnd and rwnd.
6612 * For a Cookie or Asconf in the control chunk queue we retransmit
6613 * them by themselves.
6615 * For data chunks we will pick out the lowest TSN's in the
6616 * sent_queue marked for resend and bundle them all together
6617 * (up to a MTU of destination). The address to send to should
6618 * have been selected/changed where the retransmission was
6619 * marked (i.e. in FR or t3-timeout routines).
6621 struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
6622 struct sctp_tmit_chunk *chk, *fwd;
6623 struct mbuf *m;
6624 struct sctphdr *shdr;
6625 int asconf;
6626 struct sctp_nets *net;
6627 int no_fragmentflg, bundle_at, cnt_thru;
6628 unsigned int mtu;
6629 int error, i, one_chunk, fwd_tsn, ctl_cnt, tmr_started;
6631 tmr_started = ctl_cnt = bundle_at = error = 0;
6632 no_fragmentflg = 1;
6633 asconf = 0;
6634 fwd_tsn = 0;
6635 *cnt_out = 0;
6636 fwd = NULL;
6637 m = NULL;
6638 #ifdef SCTP_AUDITING_ENABLED
6639 sctp_audit_log(0xC3, 1);
6640 #endif
6641 if (TAILQ_EMPTY(&asoc->sent_queue)) {
6642 #ifdef SCTP_DEBUG
6643 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
6644 kprintf("SCTP hits empty queue with cnt set to %d?\n",
6645 asoc->sent_queue_retran_cnt);
6647 #endif
6648 asoc->sent_queue_cnt = 0;
6649 asoc->sent_queue_cnt_removeable = 0;
6651 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
6652 if (chk->sent != SCTP_DATAGRAM_RESEND) {
6653 /* we only worry about things marked for resend */
6654 continue;
6656 if ((chk->rec.chunk_id == SCTP_COOKIE_ECHO) ||
6657 (chk->rec.chunk_id == SCTP_ASCONF) ||
6658 (chk->rec.chunk_id == SCTP_STREAM_RESET) ||
6659 (chk->rec.chunk_id == SCTP_FORWARD_CUM_TSN)) {
6660 if (chk->rec.chunk_id == SCTP_STREAM_RESET) {
6661 /* For stream reset we only retran the request
6662 * not the response.
6664 struct sctp_stream_reset_req *strreq;
6665 strreq = mtod(chk->data, struct sctp_stream_reset_req *);
6666 if (strreq->sr_req.ph.param_type != ntohs(SCTP_STR_RESET_REQUEST)) {
6667 continue;
6670 ctl_cnt++;
6671 if (chk->rec.chunk_id == SCTP_ASCONF) {
6672 no_fragmentflg = 1;
6673 asconf = 1;
6675 if (chk->rec.chunk_id == SCTP_FORWARD_CUM_TSN) {
6676 fwd_tsn = 1;
6677 fwd = chk;
6679 m = sctp_copy_mbufchain(chk->data, m);
6680 break;
6683 one_chunk = 0;
6684 cnt_thru = 0;
6685 /* do we have control chunks to retransmit? */
6686 if (m != NULL) {
6687 /* Start a timer no matter if we suceed or fail */
6688 if (chk->rec.chunk_id == SCTP_COOKIE_ECHO) {
6689 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, chk->whoTo);
6690 } else if (chk->rec.chunk_id == SCTP_ASCONF)
6691 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, chk->whoTo);
6693 if (m->m_len == 0) {
6694 /* Special case for when you get a 0 len
6695 * mbuf at the head due to the lack
6696 * of a MHDR at the beginning.
6698 m->m_len = sizeof(struct sctphdr);
6699 } else {
6700 M_PREPEND(m, sizeof(struct sctphdr), MB_DONTWAIT);
6701 if (m == NULL) {
6702 return (ENOBUFS);
6705 shdr = mtod(m, struct sctphdr *);
6706 shdr->src_port = inp->sctp_lport;
6707 shdr->dest_port = stcb->rport;
6708 shdr->v_tag = htonl(stcb->asoc.peer_vtag);
6709 shdr->checksum = 0;
6710 chk->snd_count++; /* update our count */
6712 if ((error = sctp_lowlevel_chunk_output(inp, stcb, chk->whoTo,
6713 (struct sockaddr *)&chk->whoTo->ro._l_addr, m,
6714 no_fragmentflg, 0, NULL, asconf))) {
6715 sctp_pegs[SCTP_DATA_OUT_ERR]++;
6716 return (error);
6719 *We don't want to mark the net->sent time here since this
6720 * we use this for HB and retrans cannot measure RTT
6722 /* SCTP_GETTIME_TIMEVAL(&chk->whoTo->last_sent_time);*/
6723 *cnt_out += 1;
6724 chk->sent = SCTP_DATAGRAM_SENT;
6725 asoc->sent_queue_retran_cnt--;
6726 if (asoc->sent_queue_retran_cnt < 0) {
6727 asoc->sent_queue_retran_cnt = 0;
6729 if (fwd_tsn == 0) {
6730 return (0);
6731 } else {
6732 /* Clean up the fwd-tsn list */
6733 sctp_clean_up_ctl (asoc);
6734 return (0);
6737 /* Ok, it is just data retransmission we need to do or
6738 * that and a fwd-tsn with it all.
6740 if (TAILQ_EMPTY(&asoc->sent_queue)) {
6741 return (-1);
6743 #ifdef SCTP_DEBUG
6744 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
6745 kprintf("Normal chunk retransmission cnt:%d\n",
6746 asoc->sent_queue_retran_cnt);
6748 #endif
6749 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) ||
6750 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT)) {
6751 /* not yet open, resend the cookie and that is it */
6752 return (1);
6756 #ifdef SCTP_AUDITING_ENABLED
6757 sctp_auditing(20, inp, stcb, NULL);
6758 #endif
6759 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
6760 if (chk->sent != SCTP_DATAGRAM_RESEND) {
6761 /* No, not sent to this net or not ready for rtx */
6762 continue;
6765 /* pick up the net */
6766 net = chk->whoTo;
6767 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6768 mtu = (net->mtu - SCTP_MIN_OVERHEAD);
6769 } else {
6770 mtu = net->mtu- SCTP_MIN_V4_OVERHEAD;
6773 if ((asoc->peers_rwnd < mtu) && (asoc->total_flight > 0)) {
6774 /* No room in peers rwnd */
6775 uint32_t tsn;
6776 tsn = asoc->last_acked_seq + 1;
6777 if (tsn == chk->rec.data.TSN_seq) {
6778 /* we make a special exception for this case.
6779 * The peer has no rwnd but is missing the
6780 * lowest chunk.. which is probably what is
6781 * holding up the rwnd.
6783 goto one_chunk_around;
6785 #ifdef SCTP_DEBUG
6786 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
6787 kprintf("blocked-peers_rwnd:%d tf:%d\n",
6788 (int)asoc->peers_rwnd,
6789 (int)asoc->total_flight);
6791 #endif
6792 sctp_pegs[SCTP_RWND_BLOCKED]++;
6793 return (1);
6795 one_chunk_around:
6796 if (asoc->peers_rwnd < mtu) {
6797 one_chunk = 1;
6799 #ifdef SCTP_AUDITING_ENABLED
6800 sctp_audit_log(0xC3, 2);
6801 #endif
6802 bundle_at = 0;
6803 m = NULL;
6804 net->fast_retran_ip = 0;
6805 if (chk->rec.data.doing_fast_retransmit == 0) {
6806 /* if no FR in progress skip destination that
6807 * have flight_size > cwnd.
6809 if (net->flight_size >= net->cwnd) {
6810 sctp_pegs[SCTP_CWND_BLOCKED]++;
6811 continue;
6813 } else {
6814 /* Mark the destination net to have FR recovery
6815 * limits put on it.
6817 net->fast_retran_ip = 1;
6820 if ((chk->send_size <= mtu) || (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
6821 /* ok we will add this one */
6822 m = sctp_copy_mbufchain(chk->data, m);
6823 if (m == NULL) {
6824 return (ENOMEM);
6826 /* upate our MTU size */
6827 /* Do clear IP_DF ? */
6828 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
6829 no_fragmentflg = 0;
6831 mtu -= chk->send_size;
6832 data_list[bundle_at++] = chk;
6833 if (one_chunk && (asoc->total_flight <= 0)) {
6834 sctp_pegs[SCTP_WINDOW_PROBES]++;
6835 chk->rec.data.state_flags |= SCTP_WINDOW_PROBE;
6838 if (one_chunk == 0) {
6839 /* now are there anymore forward from chk to pick up?*/
6840 fwd = TAILQ_NEXT(chk, sctp_next);
6841 while (fwd) {
6842 if (fwd->sent != SCTP_DATAGRAM_RESEND) {
6843 /* Nope, not for retran */
6844 fwd = TAILQ_NEXT(fwd, sctp_next);
6845 continue;
6847 if (fwd->whoTo != net) {
6848 /* Nope, not the net in question */
6849 fwd = TAILQ_NEXT(fwd, sctp_next);
6850 continue;
6852 if (fwd->send_size <= mtu) {
6853 m = sctp_copy_mbufchain(fwd->data, m);
6854 if (m == NULL) {
6855 return (ENOMEM);
6857 /* upate our MTU size */
6858 /* Do clear IP_DF ? */
6859 if (fwd->flags & CHUNK_FLAGS_FRAGMENT_OK) {
6860 no_fragmentflg = 0;
6862 mtu -= fwd->send_size;
6863 data_list[bundle_at++] = fwd;
6864 if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
6865 break;
6867 fwd = TAILQ_NEXT(fwd, sctp_next);
6868 } else {
6869 /* can't fit so we are done */
6870 break;
6874 /* Is there something to send for this destination? */
6875 if (m) {
6876 /* No matter if we fail/or suceed we should
6877 * start a timer. A failure is like a lost
6878 * IP packet :-)
6880 if (!callout_pending(&net->rxt_timer.timer)) {
6881 /* no timer running on this destination
6882 * restart it.
6884 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
6885 tmr_started = 1;
6887 if (m->m_len == 0) {
6888 /* Special case for when you get a 0 len
6889 * mbuf at the head due to the lack
6890 * of a MHDR at the beginning.
6892 m->m_len = sizeof(struct sctphdr);
6893 } else {
6894 M_PREPEND(m, sizeof(struct sctphdr), MB_DONTWAIT);
6895 if (m == NULL) {
6896 return (ENOBUFS);
6899 shdr = mtod(m, struct sctphdr *);
6900 shdr->src_port = inp->sctp_lport;
6901 shdr->dest_port = stcb->rport;
6902 shdr->v_tag = htonl(stcb->asoc.peer_vtag);
6903 shdr->checksum = 0;
6905 /* Now lets send it, if there is anything to send :> */
6906 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
6907 (struct sockaddr *)&net->ro._l_addr,
6909 no_fragmentflg, 0, NULL, asconf))) {
6910 /* error, we could not output */
6911 sctp_pegs[SCTP_DATA_OUT_ERR]++;
6912 return (error);
6914 /* For HB's */
6916 * We don't want to mark the net->sent time here since
6917 * this we use this for HB and retrans cannot measure
6918 * RTT
6920 /* SCTP_GETTIME_TIMEVAL(&net->last_sent_time);*/
6922 /* For auto-close */
6923 cnt_thru++;
6924 if (*now_filled == 0) {
6925 SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent);
6926 *now = asoc->time_last_sent;
6927 *now_filled = 1;
6928 } else {
6929 asoc->time_last_sent = *now;
6931 *cnt_out += bundle_at;
6932 #ifdef SCTP_AUDITING_ENABLED
6933 sctp_audit_log(0xC4, bundle_at);
6934 #endif
6935 for (i = 0; i < bundle_at; i++) {
6936 sctp_pegs[SCTP_RETRANTSN_SENT]++;
6937 data_list[i]->sent = SCTP_DATAGRAM_SENT;
6938 data_list[i]->snd_count++;
6939 asoc->sent_queue_retran_cnt--;
6940 /* record the time */
6941 data_list[i]->sent_rcv_time = asoc->time_last_sent;
6942 if (asoc->sent_queue_retran_cnt < 0) {
6943 asoc->sent_queue_retran_cnt = 0;
6945 net->flight_size += data_list[i]->book_size;
6946 asoc->total_flight += data_list[i]->book_size;
6947 asoc->total_flight_count++;
6949 #ifdef SCTP_LOG_RWND
6950 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
6951 asoc->peers_rwnd , data_list[i]->send_size, sctp_peer_chunk_oh);
6952 #endif
6953 asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
6954 (u_int32_t)(data_list[i]->send_size + sctp_peer_chunk_oh));
6955 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
6956 /* SWS sender side engages */
6957 asoc->peers_rwnd = 0;
6960 if ((i == 0) &&
6961 (data_list[i]->rec.data.doing_fast_retransmit)) {
6962 sctp_pegs[SCTP_FAST_RETRAN]++;
6963 if ((data_list[i] == TAILQ_FIRST(&asoc->sent_queue)) &&
6964 (tmr_started == 0)) {
6966 * ok we just fast-retrans'd
6967 * the lowest TSN, i.e the
6968 * first on the list. In this
6969 * case we want to give some
6970 * more time to get a SACK
6971 * back without a t3-expiring.
6973 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
6974 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
6978 #ifdef SCTP_AUDITING_ENABLED
6979 sctp_auditing(21, inp, stcb, NULL);
6980 #endif
6981 } else {
6982 /* None will fit */
6983 return (1);
6985 if (asoc->sent_queue_retran_cnt <= 0) {
6986 /* all done we have no more to retran */
6987 asoc->sent_queue_retran_cnt = 0;
6988 break;
6990 if (one_chunk) {
6991 /* No more room in rwnd */
6992 return (1);
6994 /* stop the for loop here. we sent out a packet */
6995 break;
6997 return (0);
7001 static int
7002 sctp_timer_validation(struct sctp_inpcb *inp,
7003 struct sctp_tcb *stcb,
7004 struct sctp_association *asoc,
7005 int ret)
7007 struct sctp_nets *net;
7008 /* Validate that a timer is running somewhere */
7009 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7010 if (callout_pending(&net->rxt_timer.timer)) {
7011 /* Here is a timer */
7012 return (ret);
7015 /* Gak, we did not have a timer somewhere */
7016 #ifdef SCTP_DEBUG
7017 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
7018 kprintf("Deadlock avoided starting timer on a dest at retran\n");
7020 #endif
7021 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->primary_destination);
7022 return (ret);
7026 sctp_chunk_output(struct sctp_inpcb *inp,
7027 struct sctp_tcb *stcb,
7028 int from_where)
7030 /* Ok this is the generic chunk service queue.
7031 * we must do the following:
7032 * - See if there are retransmits pending, if so we
7033 * must do these first and return.
7034 * - Service the stream queue that is next,
7035 * moving any message (note I must get a complete
7036 * message i.e. FIRST/MIDDLE and LAST to the out
7037 * queue in one pass) and assigning TSN's
7038 * - Check to see if the cwnd/rwnd allows any output, if
7039 * so we go ahead and fomulate and send the low level
7040 * chunks. Making sure to combine any control in the
7041 * control chunk queue also.
7043 struct sctp_association *asoc;
7044 struct sctp_nets *net;
7045 int error, num_out, tot_out, ret, reason_code, burst_cnt, burst_limit;
7046 struct timeval now;
7047 int now_filled=0;
7048 int cwnd_full=0;
7049 asoc = &stcb->asoc;
7050 tot_out = 0;
7051 num_out = 0;
7052 reason_code = 0;
7053 sctp_pegs[SCTP_CALLS_TO_CO]++;
7054 #ifdef SCTP_DEBUG
7055 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
7056 kprintf("in co - retran count:%d\n", asoc->sent_queue_retran_cnt);
7058 #endif
7059 while (asoc->sent_queue_retran_cnt) {
7060 /* Ok, it is retransmission time only, we send out only ONE
7061 * packet with a single call off to the retran code.
7063 ret = sctp_chunk_retransmission(inp, stcb, asoc, &num_out, &now, &now_filled);
7064 if (ret > 0) {
7065 /* Can't send anymore */
7066 #ifdef SCTP_DEBUG
7067 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
7068 kprintf("retransmission ret:%d -- full\n", ret);
7070 #endif
7072 * now lets push out control by calling med-level
7073 * output once. this assures that we WILL send HB's
7074 * if queued too.
7076 sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1,
7077 &cwnd_full, from_where,
7078 &now, &now_filled);
7079 #ifdef SCTP_DEBUG
7080 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
7081 kprintf("Control send outputs:%d@full\n", num_out);
7083 #endif
7084 #ifdef SCTP_AUDITING_ENABLED
7085 sctp_auditing(8, inp, stcb, NULL);
7086 #endif
7087 return (sctp_timer_validation(inp, stcb, asoc, ret));
7089 if (ret < 0) {
7091 * The count was off.. retran is not happening so do
7092 * the normal retransmission.
7094 #ifdef SCTP_DEBUG
7095 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
7096 kprintf("Done with retrans, none left fill up window\n");
7098 #endif
7099 #ifdef SCTP_AUDITING_ENABLED
7100 sctp_auditing(9, inp, stcb, NULL);
7101 #endif
7102 break;
7104 if (from_where == 1) {
7105 /* Only one transmission allowed out of a timeout */
7106 #ifdef SCTP_DEBUG
7107 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
7108 kprintf("Only one packet allowed out\n");
7110 #endif
7111 #ifdef SCTP_AUDITING_ENABLED
7112 sctp_auditing(10, inp, stcb, NULL);
7113 #endif
7114 /* Push out any control */
7115 sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, &cwnd_full, from_where,
7116 &now, &now_filled);
7117 return (ret);
7119 if ((num_out == 0) && (ret == 0)) {
7120 /* No more retrans to send */
7121 break;
7124 #ifdef SCTP_AUDITING_ENABLED
7125 sctp_auditing(12, inp, stcb, NULL);
7126 #endif
7127 /* Check for bad destinations, if they exist move chunks around. */
7128 burst_limit = asoc->max_burst;
7129 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7130 if ((net->dest_state & SCTP_ADDR_NOT_REACHABLE) ==
7131 SCTP_ADDR_NOT_REACHABLE) {
7133 * if possible move things off of this address
7134 * we still may send below due to the dormant state
7135 * but we try to find an alternate address to send
7136 * to and if we have one we move all queued data on
7137 * the out wheel to this alternate address.
7139 sctp_move_to_an_alt(stcb, asoc, net);
7140 } else {
7142 if ((asoc->sat_network) || (net->addr_is_local)) {
7143 burst_limit = asoc->max_burst * SCTP_SAT_NETWORK_BURST_INCR;
7146 #ifdef SCTP_DEBUG
7147 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
7148 kprintf("examined net:%p burst limit:%d\n", net, asoc->max_burst);
7150 #endif
7152 #ifdef SCTP_USE_ALLMAN_BURST
7153 if ((net->flight_size+(burst_limit*net->mtu)) < net->cwnd) {
7154 if (net->ssthresh < net->cwnd)
7155 net->ssthresh = net->cwnd;
7156 net->cwnd = (net->flight_size+(burst_limit*net->mtu));
7157 #ifdef SCTP_LOG_MAXBURST
7158 sctp_log_maxburst(net, 0, burst_limit, SCTP_MAX_BURST_APPLIED);
7159 #endif
7160 sctp_pegs[SCTP_MAX_BURST_APL]++;
7162 net->fast_retran_ip = 0;
7163 #endif
7167 /* Fill up what we can to the destination */
7168 burst_cnt = 0;
7169 cwnd_full = 0;
7170 do {
7171 #ifdef SCTP_DEBUG
7172 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
7173 kprintf("Burst count:%d - call m-c-o\n", burst_cnt);
7175 #endif
7176 error = sctp_med_chunk_output(inp, stcb, asoc, &num_out,
7177 &reason_code, 0, &cwnd_full, from_where,
7178 &now, &now_filled);
7179 if (error) {
7180 #ifdef SCTP_DEBUG
7181 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
7182 kprintf("Error %d was returned from med-c-op\n", error);
7184 #endif
7185 #ifdef SCTP_LOG_MAXBURST
7186 sctp_log_maxburst(asoc->primary_destination, error , burst_cnt, SCTP_MAX_BURST_ERROR_STOP);
7187 #endif
7188 break;
7190 #ifdef SCTP_DEBUG
7191 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
7192 kprintf("m-c-o put out %d\n", num_out);
7194 #endif
7195 tot_out += num_out;
7196 burst_cnt++;
7197 } while (num_out
7198 #ifndef SCTP_USE_ALLMAN_BURST
7199 && (burst_cnt < burst_limit)
7200 #endif
7202 #ifndef SCTP_USE_ALLMAN_BURST
7203 if (burst_cnt >= burst_limit) {
7204 sctp_pegs[SCTP_MAX_BURST_APL]++;
7205 asoc->burst_limit_applied = 1;
7206 #ifdef SCTP_LOG_MAXBURST
7207 sctp_log_maxburst(asoc->primary_destination, 0 , burst_cnt, SCTP_MAX_BURST_APPLIED);
7208 #endif
7209 } else {
7210 asoc->burst_limit_applied = 0;
7212 #endif
7214 #ifdef SCTP_DEBUG
7215 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
7216 kprintf("Ok, we have put out %d chunks\n", tot_out);
7218 #endif
7219 if (tot_out == 0) {
7220 sctp_pegs[SCTP_CO_NODATASNT]++;
7221 if (asoc->stream_queue_cnt > 0) {
7222 sctp_pegs[SCTP_SOS_NOSNT]++;
7223 } else {
7224 sctp_pegs[SCTP_NOS_NOSNT]++;
7226 if (asoc->send_queue_cnt > 0) {
7227 sctp_pegs[SCTP_SOSE_NOSNT]++;
7228 } else {
7229 sctp_pegs[SCTP_NOSE_NOSNT]++;
7232 /* Now we need to clean up the control chunk chain if
7233 * a ECNE is on it. It must be marked as UNSENT again
7234 * so next call will continue to send it until
7235 * such time that we get a CWR, to remove it.
7237 sctp_fix_ecn_echo(asoc);
7238 return (error);
7243 sctp_output(struct sctp_inpcb *inp, struct mbuf *m, struct sockaddr *addr,
7244 struct mbuf *control, struct thread *p, int flags)
7246 struct inpcb *ip_inp;
7247 struct sctp_inpcb *t_inp;
7248 struct sctp_tcb *stcb;
7249 struct sctp_nets *net;
7250 struct sctp_association *asoc;
7251 int create_lock_applied = 0;
7252 int queue_only, error = 0;
7253 struct sctp_sndrcvinfo srcv;
7254 int un_sent = 0;
7255 int use_rcvinfo = 0;
7256 t_inp = inp;
7257 /* struct route ro;*/
7259 crit_enter();
7260 queue_only = 0;
7261 ip_inp = (struct inpcb *)inp;
7262 stcb = NULL;
7263 asoc = NULL;
7264 net = NULL;
7266 #ifdef SCTP_DEBUG
7267 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
7268 kprintf("USR Send BEGINS\n");
7270 #endif
7272 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
7273 (inp->sctp_flags & SCTP_PCB_FLAGS_ACCEPTING)) {
7274 /* The listner can NOT send */
7275 if (control) {
7276 sctppcbinfo.mbuf_track--;
7277 sctp_m_freem(control);
7278 control = NULL;
7280 sctp_m_freem(m);
7281 crit_exit();
7282 return (EFAULT);
7284 /* Can't allow a V6 address on a non-v6 socket */
7285 if (addr) {
7286 SCTP_ASOC_CREATE_LOCK(inp);
7287 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
7288 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
7289 /* Should I really unlock ? */
7290 SCTP_ASOC_CREATE_UNLOCK(inp);
7291 if (control) {
7292 sctppcbinfo.mbuf_track--;
7293 sctp_m_freem(control);
7294 control = NULL;
7296 sctp_m_freem(m);
7297 crit_exit();
7298 return (EFAULT);
7300 create_lock_applied = 1;
7301 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
7302 (addr->sa_family == AF_INET6)) {
7303 SCTP_ASOC_CREATE_UNLOCK(inp);
7304 if (control) {
7305 sctppcbinfo.mbuf_track--;
7306 sctp_m_freem(control);
7307 control = NULL;
7309 sctp_m_freem(m);
7310 crit_exit();
7311 return (EINVAL);
7314 if (control) {
7315 sctppcbinfo.mbuf_track++;
7316 if (sctp_find_cmsg(SCTP_SNDRCV, (void *)&srcv, control,
7317 sizeof(srcv))) {
7318 if (srcv.sinfo_flags & MSG_SENDALL) {
7319 /* its a sendall */
7320 sctppcbinfo.mbuf_track--;
7321 sctp_m_freem(control);
7322 crit_exit();
7323 if (create_lock_applied) {
7324 SCTP_ASOC_CREATE_UNLOCK(inp);
7325 create_lock_applied = 0;
7327 return (sctp_sendall(inp, NULL, m, &srcv));
7329 if (srcv.sinfo_assoc_id) {
7330 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
7331 SCTP_INP_RLOCK(inp);
7332 stcb = LIST_FIRST(&inp->sctp_asoc_list);
7333 if (stcb)
7334 SCTP_TCB_LOCK(stcb);
7335 SCTP_INP_RUNLOCK(inp);
7337 if (stcb == NULL) {
7338 if (create_lock_applied) {
7339 SCTP_ASOC_CREATE_UNLOCK(inp);
7340 create_lock_applied = 0;
7342 sctppcbinfo.mbuf_track--;
7343 sctp_m_freem(control);
7344 sctp_m_freem(m);
7345 crit_exit();
7346 return (ENOTCONN);
7348 net = stcb->asoc.primary_destination;
7349 } else {
7350 stcb = sctp_findassociation_ep_asocid(inp, srcv.sinfo_assoc_id);
7353 * Question: Should I error here if the
7355 * assoc_id is no longer valid?
7356 * i.e. I can't find it?
7358 if ((stcb) &&
7359 (addr != NULL)) {
7360 /* Must locate the net structure */
7361 if (addr)
7362 net = sctp_findnet(stcb, addr);
7364 if (net == NULL)
7365 net = stcb->asoc.primary_destination;
7367 use_rcvinfo = 1;
7370 if (stcb == NULL) {
7371 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
7372 SCTP_INP_RLOCK(inp);
7373 stcb = LIST_FIRST(&inp->sctp_asoc_list);
7374 if (stcb)
7375 SCTP_TCB_LOCK(stcb);
7376 SCTP_INP_RUNLOCK(inp);
7377 if (stcb == NULL) {
7378 crit_exit();
7379 if (create_lock_applied) {
7380 SCTP_ASOC_CREATE_UNLOCK(inp);
7381 create_lock_applied = 0;
7383 if (control) {
7384 sctppcbinfo.mbuf_track--;
7385 sctp_m_freem(control);
7386 control = NULL;
7388 sctp_m_freem(m);
7389 return (ENOTCONN);
7391 if (addr == NULL) {
7392 net = stcb->asoc.primary_destination;
7393 } else {
7394 net = sctp_findnet(stcb, addr);
7395 if (net == NULL) {
7396 net = stcb->asoc.primary_destination;
7399 } else {
7400 if (addr != NULL) {
7401 SCTP_INP_WLOCK(inp);
7402 SCTP_INP_INCR_REF(inp);
7403 SCTP_INP_WUNLOCK(inp);
7404 stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
7405 if (stcb == NULL) {
7406 SCTP_INP_WLOCK(inp);
7407 SCTP_INP_DECR_REF(inp);
7408 SCTP_INP_WUNLOCK(inp);
7413 if ((stcb == NULL) &&
7414 (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)) {
7415 if (control) {
7416 sctppcbinfo.mbuf_track--;
7417 sctp_m_freem(control);
7418 control = NULL;
7420 if (create_lock_applied) {
7421 SCTP_ASOC_CREATE_UNLOCK(inp);
7422 create_lock_applied = 0;
7424 sctp_m_freem(m);
7425 crit_exit();
7426 return (ENOTCONN);
7427 } else if ((stcb == NULL) &&
7428 (addr == NULL)) {
7429 if (control) {
7430 sctppcbinfo.mbuf_track--;
7431 sctp_m_freem(control);
7432 control = NULL;
7434 if (create_lock_applied) {
7435 SCTP_ASOC_CREATE_UNLOCK(inp);
7436 create_lock_applied = 0;
7438 sctp_m_freem(m);
7439 crit_exit();
7440 return (ENOENT);
7441 } else if (stcb == NULL) {
7442 /* UDP mode, we must go ahead and start the INIT process */
7443 if ((use_rcvinfo) && (srcv.sinfo_flags & MSG_ABORT)) {
7444 /* Strange user to do this */
7445 if (control) {
7446 sctppcbinfo.mbuf_track--;
7447 sctp_m_freem(control);
7448 control = NULL;
7450 if (create_lock_applied) {
7451 SCTP_ASOC_CREATE_UNLOCK(inp);
7452 create_lock_applied = 0;
7454 sctp_m_freem(m);
7455 crit_exit();
7456 return (ENOENT);
7458 stcb = sctp_aloc_assoc(inp, addr, 1, &error, 0);
7459 if (stcb == NULL) {
7460 if (control) {
7461 sctppcbinfo.mbuf_track--;
7462 sctp_m_freem(control);
7463 control = NULL;
7465 if (create_lock_applied) {
7466 SCTP_ASOC_CREATE_UNLOCK(inp);
7467 create_lock_applied = 0;
7469 sctp_m_freem(m);
7470 crit_exit();
7471 return (error);
7473 if (create_lock_applied) {
7474 SCTP_ASOC_CREATE_UNLOCK(inp);
7475 create_lock_applied = 0;
7476 } else {
7477 kprintf("Huh-1, create lock should have been applied!\n");
7479 queue_only = 1;
7480 asoc = &stcb->asoc;
7481 asoc->state = SCTP_STATE_COOKIE_WAIT;
7482 SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
7483 if (control) {
7484 /* see if a init structure exists in cmsg headers */
7485 struct sctp_initmsg initm;
7486 int i;
7487 if (sctp_find_cmsg(SCTP_INIT, (void *)&initm, control,
7488 sizeof(initm))) {
7489 /* we have an INIT override of the default */
7490 if (initm.sinit_max_attempts)
7491 asoc->max_init_times = initm.sinit_max_attempts;
7492 if (initm.sinit_num_ostreams)
7493 asoc->pre_open_streams = initm.sinit_num_ostreams;
7494 if (initm.sinit_max_instreams)
7495 asoc->max_inbound_streams = initm.sinit_max_instreams;
7496 if (initm.sinit_max_init_timeo)
7497 asoc->initial_init_rto_max = initm.sinit_max_init_timeo;
7499 if (asoc->streamoutcnt < asoc->pre_open_streams) {
7500 /* Default is NOT correct */
7501 #ifdef SCTP_DEBUG
7502 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
7503 kprintf("Ok, defout:%d pre_open:%d\n",
7504 asoc->streamoutcnt, asoc->pre_open_streams);
7506 #endif
7507 FREE(asoc->strmout, M_PCB);
7508 asoc->strmout = NULL;
7509 asoc->streamoutcnt = asoc->pre_open_streams;
7510 MALLOC(asoc->strmout, struct sctp_stream_out *,
7511 asoc->streamoutcnt *
7512 sizeof(struct sctp_stream_out), M_PCB,
7513 MB_WAIT);
7514 for (i = 0; i < asoc->streamoutcnt; i++) {
7516 * inbound side must be set to 0xffff,
7517 * also NOTE when we get the INIT-ACK
7518 * back (for INIT sender) we MUST
7519 * reduce the count (streamoutcnt) but
7520 * first check if we sent to any of the
7521 * upper streams that were dropped (if
7522 * some were). Those that were dropped
7523 * must be notified to the upper layer
7524 * as failed to send.
7526 asoc->strmout[i].next_sequence_sent = 0x0;
7527 TAILQ_INIT(&asoc->strmout[i].outqueue);
7528 asoc->strmout[i].stream_no = i;
7529 asoc->strmout[i].next_spoke.tqe_next = 0;
7530 asoc->strmout[i].next_spoke.tqe_prev = 0;
7534 sctp_send_initiate(inp, stcb);
7536 * we may want to dig in after this call and adjust the MTU
7537 * value. It defaulted to 1500 (constant) but the ro structure
7538 * may now have an update and thus we may need to change it
7539 * BEFORE we append the message.
7541 net = stcb->asoc.primary_destination;
7542 } else {
7543 if (create_lock_applied) {
7544 SCTP_ASOC_CREATE_UNLOCK(inp);
7545 create_lock_applied = 0;
7547 asoc = &stcb->asoc;
7548 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
7549 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
7550 queue_only = 1;
7552 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
7553 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
7554 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
7555 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
7556 if (control) {
7557 sctppcbinfo.mbuf_track--;
7558 sctp_m_freem(control);
7559 control = NULL;
7561 if ((use_rcvinfo) &&
7562 (srcv.sinfo_flags & MSG_ABORT)) {
7563 sctp_msg_append(stcb, net, m, &srcv, flags);
7564 error = 0;
7565 } else {
7566 if (m)
7567 sctp_m_freem(m);
7568 error = ECONNRESET;
7570 crit_exit();
7571 SCTP_TCB_UNLOCK(stcb);
7572 return (error);
7575 if (create_lock_applied) {
7576 /* we should never hit here with the create lock applied
7579 SCTP_ASOC_CREATE_UNLOCK(inp);
7580 create_lock_applied = 0;
7584 if (use_rcvinfo == 0) {
7585 srcv = stcb->asoc.def_send;
7587 #ifdef SCTP_DEBUG
7588 else {
7589 if (sctp_debug_on & SCTP_DEBUG_OUTPUT5) {
7590 kprintf("stream:%d\n", srcv.sinfo_stream);
7591 kprintf("flags:%x\n", (u_int)srcv.sinfo_flags);
7592 kprintf("ppid:%d\n", srcv.sinfo_ppid);
7593 kprintf("context:%d\n", srcv.sinfo_context);
7596 #endif
7597 if (control) {
7598 sctppcbinfo.mbuf_track--;
7599 sctp_m_freem(control);
7600 control = NULL;
7602 if (net && ((srcv.sinfo_flags & MSG_ADDR_OVER))) {
7603 /* we take the override or the unconfirmed */
7605 } else {
7606 net = stcb->asoc.primary_destination;
7608 if ((error = sctp_msg_append(stcb, net, m, &srcv, flags))) {
7609 SCTP_TCB_UNLOCK(stcb);
7610 crit_exit();
7611 return (error);
7613 if (net->flight_size > net->cwnd) {
7614 sctp_pegs[SCTP_SENDTO_FULL_CWND]++;
7615 queue_only = 1;
7616 } else if (asoc->ifp_had_enobuf) {
7617 sctp_pegs[SCTP_QUEONLY_BURSTLMT]++;
7618 queue_only = 1;
7619 } else {
7620 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
7621 ((stcb->asoc.chunks_on_out_queue - stcb->asoc.total_flight_count) * sizeof(struct sctp_data_chunk)) +
7622 SCTP_MED_OVERHEAD);
7624 if (((inp->sctp_flags & SCTP_PCB_FLAGS_NODELAY) == 0) &&
7625 (stcb->asoc.total_flight > 0) &&
7626 (un_sent < (int)stcb->asoc.smallest_mtu)
7629 /* Ok, Nagle is set on and we have
7630 * data outstanding. Don't send anything
7631 * and let the SACK drive out the data.
7633 sctp_pegs[SCTP_NAGLE_NOQ]++;
7634 queue_only = 1;
7635 } else {
7636 sctp_pegs[SCTP_NAGLE_OFF]++;
7639 if ((queue_only == 0) && stcb->asoc.peers_rwnd) {
7640 /* we can attempt to send too.*/
7641 #ifdef SCTP_DEBUG
7642 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
7643 kprintf("USR Send calls sctp_chunk_output\n");
7645 #endif
7646 #ifdef SCTP_AUDITING_ENABLED
7647 sctp_audit_log(0xC0, 1);
7648 sctp_auditing(6, inp, stcb, net);
7649 #endif
7650 sctp_pegs[SCTP_OUTPUT_FRM_SND]++;
7651 sctp_chunk_output(inp, stcb, 0);
7652 #ifdef SCTP_AUDITING_ENABLED
7653 sctp_audit_log(0xC0, 2);
7654 sctp_auditing(7, inp, stcb, net);
7655 #endif
7658 #ifdef SCTP_DEBUG
7659 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
7660 kprintf("USR Send complete qo:%d prw:%d\n", queue_only, stcb->asoc.peers_rwnd);
7662 #endif
7663 SCTP_TCB_UNLOCK(stcb);
7664 crit_exit();
7665 return (0);
7668 void
7669 send_forward_tsn(struct sctp_tcb *stcb,
7670 struct sctp_association *asoc)
7672 struct sctp_tmit_chunk *chk;
7673 struct sctp_forward_tsn_chunk *fwdtsn;
7675 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
7676 if (chk->rec.chunk_id == SCTP_FORWARD_CUM_TSN) {
7677 /* mark it to unsent */
7678 chk->sent = SCTP_DATAGRAM_UNSENT;
7679 chk->snd_count = 0;
7680 /* Do we correct its output location? */
7681 if (chk->whoTo != asoc->primary_destination) {
7682 sctp_free_remote_addr(chk->whoTo);
7683 chk->whoTo = asoc->primary_destination;
7684 chk->whoTo->ref_count++;
7686 goto sctp_fill_in_rest;
7689 /* Ok if we reach here we must build one */
7690 chk = (struct sctp_tmit_chunk *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_chunk);
7691 if (chk == NULL) {
7692 return;
7694 sctppcbinfo.ipi_count_chunk++;
7695 sctppcbinfo.ipi_gencnt_chunk++;
7696 chk->rec.chunk_id = SCTP_FORWARD_CUM_TSN;
7697 chk->asoc = asoc;
7698 MGETHDR(chk->data, MB_DONTWAIT, MT_DATA);
7699 if (chk->data == NULL) {
7700 chk->whoTo->ref_count--;
7701 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
7702 sctppcbinfo.ipi_count_chunk--;
7703 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
7704 panic("Chunk count is negative");
7706 sctppcbinfo.ipi_gencnt_chunk++;
7707 return;
7709 chk->data->m_data += SCTP_MIN_OVERHEAD;
7710 chk->sent = SCTP_DATAGRAM_UNSENT;
7711 chk->snd_count = 0;
7712 chk->whoTo = asoc->primary_destination;
7713 chk->whoTo->ref_count++;
7714 TAILQ_INSERT_TAIL(&asoc->control_send_queue, chk, sctp_next);
7715 asoc->ctrl_queue_cnt++;
7716 sctp_fill_in_rest:
7717 /* Here we go through and fill out the part that
7718 * deals with stream/seq of the ones we skip.
7720 chk->data->m_pkthdr.len = chk->data->m_len = 0;
7722 struct sctp_tmit_chunk *at, *tp1, *last;
7723 struct sctp_strseq *strseq;
7724 unsigned int cnt_of_space, i, ovh;
7725 unsigned int space_needed;
7726 unsigned int cnt_of_skipped = 0;
7727 TAILQ_FOREACH(at, &asoc->sent_queue, sctp_next) {
7728 if (at->sent != SCTP_FORWARD_TSN_SKIP) {
7729 /* no more to look at */
7730 break;
7732 if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
7733 /* We don't report these */
7734 continue;
7736 cnt_of_skipped++;
7738 space_needed = (sizeof(struct sctp_forward_tsn_chunk) +
7739 (cnt_of_skipped * sizeof(struct sctp_strseq)));
7740 if ((M_TRAILINGSPACE(chk->data) < (int)space_needed) &&
7741 ((chk->data->m_flags & M_EXT) == 0)) {
7742 /* Need a M_EXT, get one and move
7743 * fwdtsn to data area.
7745 MCLGET(chk->data, MB_DONTWAIT);
7747 cnt_of_space = M_TRAILINGSPACE(chk->data);
7749 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
7750 ovh = SCTP_MIN_OVERHEAD;
7751 } else {
7752 ovh = SCTP_MIN_V4_OVERHEAD;
7754 if (cnt_of_space > (asoc->smallest_mtu-ovh)) {
7755 /* trim to a mtu size */
7756 cnt_of_space = asoc->smallest_mtu - ovh;
7758 if (cnt_of_space < space_needed) {
7759 /* ok we must trim down the chunk by lowering
7760 * the advance peer ack point.
7762 cnt_of_skipped = (cnt_of_space-
7763 ((sizeof(struct sctp_forward_tsn_chunk))/
7764 sizeof(struct sctp_strseq)));
7765 /* Go through and find the TSN that
7766 * will be the one we report.
7768 at = TAILQ_FIRST(&asoc->sent_queue);
7769 for (i = 0; i < cnt_of_skipped; i++) {
7770 tp1 = TAILQ_NEXT(at, sctp_next);
7771 at = tp1;
7773 last = at;
7774 /* last now points to last one I can report, update peer ack point */
7775 asoc->advanced_peer_ack_point = last->rec.data.TSN_seq;
7776 space_needed -= (cnt_of_skipped * sizeof(struct sctp_strseq));
7778 chk->send_size = space_needed;
7779 /* Setup the chunk */
7780 fwdtsn = mtod(chk->data, struct sctp_forward_tsn_chunk *);
7781 fwdtsn->ch.chunk_length = htons(chk->send_size);
7782 fwdtsn->ch.chunk_flags = 0;
7783 fwdtsn->ch.chunk_type = SCTP_FORWARD_CUM_TSN;
7784 fwdtsn->new_cumulative_tsn = htonl(asoc->advanced_peer_ack_point);
7785 chk->send_size = (sizeof(struct sctp_forward_tsn_chunk) +
7786 (cnt_of_skipped * sizeof(struct sctp_strseq)));
7787 chk->data->m_pkthdr.len = chk->data->m_len = chk->send_size;
7788 fwdtsn++;
7789 /* Move pointer to after the fwdtsn and transfer to
7790 * the strseq pointer.
7792 strseq = (struct sctp_strseq *)fwdtsn;
7794 * Now populate the strseq list. This is done blindly
7795 * without pulling out duplicate stream info. This is
7796 * inefficent but won't harm the process since the peer
7797 * will look at these in sequence and will thus release
7798 * anything. It could mean we exceed the PMTU and chop
7799 * off some that we could have included.. but this is
7800 * unlikely (aka 1432/4 would mean 300+ stream seq's would
7801 * have to be reported in one FWD-TSN. With a bit of work
7802 * we can later FIX this to optimize and pull out duplcates..
7803 * but it does add more overhead. So for now... not!
7805 at = TAILQ_FIRST(&asoc->sent_queue);
7806 for (i = 0; i < cnt_of_skipped; i++) {
7807 tp1 = TAILQ_NEXT(at, sctp_next);
7808 if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
7809 /* We don't report these */
7810 i--;
7811 at = tp1;
7812 continue;
7814 strseq->stream = ntohs(at->rec.data.stream_number);
7815 strseq->sequence = ntohs(at->rec.data.stream_seq);
7816 strseq++;
7817 at = tp1;
7820 return;
7824 void
7825 sctp_send_sack(struct sctp_tcb *stcb)
7828 * Queue up a SACK in the control queue. We must first check to
7829 * see if a SACK is somehow on the control queue. If so, we will
7830 * take and and remove the old one.
7832 struct sctp_association *asoc;
7833 struct sctp_tmit_chunk *chk, *a_chk;
7834 struct sctp_sack_chunk *sack;
7835 struct sctp_gap_ack_block *gap_descriptor;
7836 uint32_t *dup;
7837 int start;
7838 unsigned int i, maxi, seeing_ones, m_size;
7839 unsigned int num_gap_blocks, space;
7841 start = maxi = 0;
7842 seeing_ones = 1;
7843 a_chk = NULL;
7844 asoc = &stcb->asoc;
7845 if (asoc->last_data_chunk_from == NULL) {
7846 /* Hmm we never received anything */
7847 return;
7849 sctp_set_rwnd(stcb, asoc);
7850 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
7851 if (chk->rec.chunk_id == SCTP_SELECTIVE_ACK) {
7852 /* Hmm, found a sack already on queue, remove it */
7853 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
7854 asoc->ctrl_queue_cnt++;
7855 a_chk = chk;
7856 if (a_chk->data)
7857 sctp_m_freem(a_chk->data);
7858 a_chk->data = NULL;
7859 sctp_free_remote_addr(a_chk->whoTo);
7860 a_chk->whoTo = NULL;
7861 break;
7864 if (a_chk == NULL) {
7865 a_chk = (struct sctp_tmit_chunk *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_chunk);
7866 if (a_chk == NULL) {
7867 /* No memory so we drop the idea, and set a timer */
7868 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
7869 stcb->sctp_ep, stcb, NULL);
7870 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
7871 stcb->sctp_ep, stcb, NULL);
7872 return;
7874 sctppcbinfo.ipi_count_chunk++;
7875 sctppcbinfo.ipi_gencnt_chunk++;
7876 a_chk->rec.chunk_id = SCTP_SELECTIVE_ACK;
7878 a_chk->asoc = asoc;
7879 a_chk->snd_count = 0;
7880 a_chk->send_size = 0; /* fill in later */
7881 a_chk->sent = SCTP_DATAGRAM_UNSENT;
7882 m_size = (asoc->mapping_array_size << 3);
7884 if ((asoc->numduptsns) ||
7885 (asoc->last_data_chunk_from->dest_state & SCTP_ADDR_NOT_REACHABLE)
7887 /* Ok, we have some duplicates or the destination for the
7888 * sack is unreachable, lets see if we can select an alternate
7889 * than asoc->last_data_chunk_from
7891 if ((!(asoc->last_data_chunk_from->dest_state &
7892 SCTP_ADDR_NOT_REACHABLE)) &&
7893 (asoc->used_alt_onsack > 2)) {
7894 /* We used an alt last time, don't this time */
7895 a_chk->whoTo = NULL;
7896 } else {
7897 asoc->used_alt_onsack++;
7898 a_chk->whoTo = sctp_find_alternate_net(stcb, asoc->last_data_chunk_from);
7900 if (a_chk->whoTo == NULL) {
7901 /* Nope, no alternate */
7902 a_chk->whoTo = asoc->last_data_chunk_from;
7903 asoc->used_alt_onsack = 0;
7905 } else {
7906 /* No duplicates so we use the last
7907 * place we received data from.
7909 #ifdef SCTP_DEBUG
7910 if (asoc->last_data_chunk_from == NULL) {
7911 kprintf("Huh, last_data_chunk_from is null when we want to sack??\n");
7913 #endif
7914 asoc->used_alt_onsack = 0;
7915 a_chk->whoTo = asoc->last_data_chunk_from;
7917 if (a_chk->whoTo)
7918 a_chk->whoTo->ref_count++;
7920 /* Ok now lets formulate a MBUF with our sack */
7921 MGETHDR(a_chk->data, MB_DONTWAIT, MT_DATA);
7922 if ((a_chk->data == NULL) ||
7923 (a_chk->whoTo == NULL)) {
7924 /* rats, no mbuf memory */
7925 if (a_chk->data) {
7926 /* was a problem with the destination */
7927 sctp_m_freem(a_chk->data);
7928 a_chk->data = NULL;
7930 a_chk->whoTo->ref_count--;
7931 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, a_chk);
7932 sctppcbinfo.ipi_count_chunk--;
7933 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
7934 panic("Chunk count is negative");
7936 sctppcbinfo.ipi_gencnt_chunk++;
7937 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
7938 stcb->sctp_ep, stcb, NULL);
7939 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
7940 stcb->sctp_ep, stcb, NULL);
7941 return;
7943 /* First count the number of gap ack blocks we need */
7944 if (asoc->highest_tsn_inside_map == asoc->cumulative_tsn) {
7945 /* We know if there are none above the cum-ack we
7946 * have everything with NO gaps
7948 num_gap_blocks = 0;
7949 } else {
7950 /* Ok we must count how many gaps we
7951 * have.
7953 num_gap_blocks = 0;
7954 if (asoc->highest_tsn_inside_map >= asoc->mapping_array_base_tsn) {
7955 maxi = (asoc->highest_tsn_inside_map - asoc->mapping_array_base_tsn);
7956 } else {
7957 maxi = (asoc->highest_tsn_inside_map + (MAX_TSN - asoc->mapping_array_base_tsn) + 1);
7959 if (maxi > m_size) {
7960 /* impossible but who knows, someone is playing with us :> */
7961 #ifdef SCTP_DEBUG
7962 kprintf("GAK maxi:%d > m_size:%d came out higher than allowed htsn:%u base:%u cumack:%u\n",
7963 maxi,
7964 m_size,
7965 asoc->highest_tsn_inside_map,
7966 asoc->mapping_array_base_tsn,
7967 asoc->cumulative_tsn
7969 #endif
7970 num_gap_blocks = 0;
7971 goto no_gaps_now;
7973 if (asoc->cumulative_tsn >= asoc->mapping_array_base_tsn) {
7974 start = (asoc->cumulative_tsn - asoc->mapping_array_base_tsn);
7975 } else {
7976 /* Set it so we start at 0 */
7977 start = -1;
7979 /* Ok move start up one to look at the NEXT past the cum-ack */
7980 start++;
7981 for (i = start; i <= maxi; i++) {
7982 if (seeing_ones) {
7983 /* while seeing ones I must
7984 * transition back to 0 before
7985 * finding the next gap and
7986 * counting the segment.
7988 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) == 0) {
7989 seeing_ones = 0;
7991 } else {
7992 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, i)) {
7993 seeing_ones = 1;
7994 num_gap_blocks++;
7998 no_gaps_now:
7999 if (num_gap_blocks == 0) {
8001 * Traveled all of the bits and NO one,
8002 * must have reneged
8004 if (compare_with_wrap(asoc->cumulative_tsn, asoc->highest_tsn_inside_map, MAX_TSN)) {
8005 asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
8006 #ifdef SCTP_MAP_LOGGING
8007 sctp_log_map(0, 4, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
8008 #endif
8013 /* Now calculate the space needed */
8014 space = (sizeof(struct sctp_sack_chunk) +
8015 (num_gap_blocks * sizeof(struct sctp_gap_ack_block)) +
8016 (asoc->numduptsns * sizeof(int32_t))
8018 if (space > (asoc->smallest_mtu-SCTP_MAX_OVERHEAD)) {
8019 /* Reduce the size of the sack to fit */
8020 int calc, fit;
8021 calc = (asoc->smallest_mtu - SCTP_MAX_OVERHEAD);
8022 calc -= sizeof(struct sctp_gap_ack_block);
8023 fit = calc/sizeof(struct sctp_gap_ack_block);
8024 if (fit > (int)num_gap_blocks) {
8025 /* discard some dups */
8026 asoc->numduptsns = (fit - num_gap_blocks);
8027 } else {
8028 /* discard all dups and some gaps */
8029 num_gap_blocks = fit;
8030 asoc->numduptsns = 0;
8032 /* recalc space */
8033 space = (sizeof(struct sctp_sack_chunk) +
8034 (num_gap_blocks * sizeof(struct sctp_gap_ack_block)) +
8035 (asoc->numduptsns * sizeof(int32_t))
8040 if ((space+SCTP_MIN_OVERHEAD) > MHLEN) {
8041 /* We need a cluster */
8042 MCLGET(a_chk->data, MB_DONTWAIT);
8043 if ((a_chk->data->m_flags & M_EXT) != M_EXT) {
8044 /* can't get a cluster
8045 * give up and try later.
8047 if (a_chk->data)
8048 sctp_m_freem(a_chk->data);
8049 a_chk->data = NULL;
8050 a_chk->whoTo->ref_count--;
8051 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, a_chk);
8052 sctppcbinfo.ipi_count_chunk--;
8053 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
8054 panic("Chunk count is negative");
8056 sctppcbinfo.ipi_gencnt_chunk++;
8057 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
8058 stcb->sctp_ep, stcb, NULL);
8059 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
8060 stcb->sctp_ep, stcb, NULL);
8061 return;
8065 /* ok, lets go through and fill it in */
8066 a_chk->data->m_data += SCTP_MIN_OVERHEAD;
8067 sack = mtod(a_chk->data, struct sctp_sack_chunk *);
8068 sack->ch.chunk_type = SCTP_SELECTIVE_ACK;
8069 sack->ch.chunk_flags = asoc->receiver_nonce_sum & SCTP_SACK_NONCE_SUM;
8070 sack->sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
8071 sack->sack.a_rwnd = htonl(asoc->my_rwnd);
8072 asoc->my_last_reported_rwnd = asoc->my_rwnd;
8073 sack->sack.num_gap_ack_blks = htons(num_gap_blocks);
8074 sack->sack.num_dup_tsns = htons(asoc->numduptsns);
8076 a_chk->send_size = (sizeof(struct sctp_sack_chunk) +
8077 (num_gap_blocks * sizeof(struct sctp_gap_ack_block)) +
8078 (asoc->numduptsns * sizeof(int32_t)));
8079 a_chk->data->m_pkthdr.len = a_chk->data->m_len = a_chk->send_size;
8080 sack->ch.chunk_length = htons(a_chk->send_size);
8082 gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)sack + sizeof(struct sctp_sack_chunk));
8083 seeing_ones = 0;
8084 for (i = start; i <= maxi; i++) {
8085 if (num_gap_blocks == 0) {
8086 break;
8088 if (seeing_ones) {
8089 /* while seeing Ones I must
8090 * transition back to 0 before
8091 * finding the next gap
8093 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) == 0) {
8094 gap_descriptor->end = htons(((uint16_t)(i-start)));
8095 gap_descriptor++;
8096 seeing_ones = 0;
8097 num_gap_blocks--;
8099 } else {
8100 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, i)) {
8101 gap_descriptor->start = htons(((uint16_t)(i+1-start)));
8102 /* advance struct to next pointer */
8103 seeing_ones = 1;
8107 if (num_gap_blocks) {
8108 /* special case where the array is all 1's
8109 * to the end of the array.
8111 gap_descriptor->end = htons(((uint16_t)((i-start))));
8112 gap_descriptor++;
8114 /* now we must add any dups we are going to report. */
8115 if (asoc->numduptsns) {
8116 dup = (uint32_t *)gap_descriptor;
8117 for (i = 0; i < asoc->numduptsns; i++) {
8118 *dup = htonl(asoc->dup_tsns[i]);
8119 dup++;
8121 asoc->numduptsns = 0;
8123 /* now that the chunk is prepared queue it to the control
8124 * chunk queue.
8126 TAILQ_INSERT_TAIL(&asoc->control_send_queue, a_chk, sctp_next);
8127 asoc->ctrl_queue_cnt++;
8128 sctp_pegs[SCTP_PEG_SACKS_SENT]++;
8129 return;
8132 void
8133 sctp_send_abort_tcb(struct sctp_tcb *stcb, struct mbuf *operr)
8135 struct mbuf *m_abort;
8136 struct sctp_abort_msg *abort_m;
8137 int sz;
8138 abort_m = NULL;
8139 MGETHDR(m_abort, MB_DONTWAIT, MT_HEADER);
8140 if (m_abort == NULL) {
8141 /* no mbuf's */
8142 return;
8144 m_abort->m_data += SCTP_MIN_OVERHEAD;
8145 abort_m = mtod(m_abort, struct sctp_abort_msg *);
8146 m_abort->m_len = sizeof(struct sctp_abort_msg);
8147 m_abort->m_next = operr;
8148 sz = 0;
8149 if (operr) {
8150 struct mbuf *n;
8151 n = operr;
8152 while (n) {
8153 sz += n->m_len;
8154 n = n->m_next;
8157 abort_m->msg.ch.chunk_type = SCTP_ABORT_ASSOCIATION;
8158 abort_m->msg.ch.chunk_flags = 0;
8159 abort_m->msg.ch.chunk_length = htons(sizeof(struct sctp_abort_chunk) +
8160 sz);
8161 abort_m->sh.src_port = stcb->sctp_ep->sctp_lport;
8162 abort_m->sh.dest_port = stcb->rport;
8163 abort_m->sh.v_tag = htonl(stcb->asoc.peer_vtag);
8164 abort_m->sh.checksum = 0;
8165 m_abort->m_pkthdr.len = m_abort->m_len + sz;
8166 m_abort->m_pkthdr.rcvif = 0;
8167 sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb,
8168 stcb->asoc.primary_destination,
8169 (struct sockaddr *)&stcb->asoc.primary_destination->ro._l_addr,
8170 m_abort, 1, 0, NULL, 0);
8174 sctp_send_shutdown_complete(struct sctp_tcb *stcb,
8175 struct sctp_nets *net)
8178 /* formulate and SEND a SHUTDOWN-COMPLETE */
8179 struct mbuf *m_shutdown_comp;
8180 struct sctp_shutdown_complete_msg *comp_cp;
8182 m_shutdown_comp = NULL;
8183 MGETHDR(m_shutdown_comp, MB_DONTWAIT, MT_HEADER);
8184 if (m_shutdown_comp == NULL) {
8185 /* no mbuf's */
8186 return (-1);
8188 m_shutdown_comp->m_data += sizeof(struct ip6_hdr);
8189 comp_cp = mtod(m_shutdown_comp, struct sctp_shutdown_complete_msg *);
8190 comp_cp->shut_cmp.ch.chunk_type = SCTP_SHUTDOWN_COMPLETE;
8191 comp_cp->shut_cmp.ch.chunk_flags = 0;
8192 comp_cp->shut_cmp.ch.chunk_length = htons(sizeof(struct sctp_shutdown_complete_chunk));
8193 comp_cp->sh.src_port = stcb->sctp_ep->sctp_lport;
8194 comp_cp->sh.dest_port = stcb->rport;
8195 comp_cp->sh.v_tag = htonl(stcb->asoc.peer_vtag);
8196 comp_cp->sh.checksum = 0;
8198 m_shutdown_comp->m_pkthdr.len = m_shutdown_comp->m_len = sizeof(struct sctp_shutdown_complete_msg);
8199 m_shutdown_comp->m_pkthdr.rcvif = 0;
8200 sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net,
8201 (struct sockaddr *)&net->ro._l_addr, m_shutdown_comp,
8202 1, 0, NULL, 0);
8203 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
8204 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
8205 stcb->sctp_ep->sctp_flags &= ~SCTP_PCB_FLAGS_CONNECTED;
8206 stcb->sctp_ep->sctp_socket->so_snd.ssb_cc = 0;
8207 soisdisconnected(stcb->sctp_ep->sctp_socket);
8209 return (0);
8213 sctp_send_shutdown_complete2(struct mbuf *m, int iphlen, struct sctphdr *sh)
8215 /* formulate and SEND a SHUTDOWN-COMPLETE */
8216 struct mbuf *mout;
8217 struct ip *iph, *iph_out;
8218 struct ip6_hdr *ip6, *ip6_out;
8219 int offset_out;
8220 struct sctp_shutdown_complete_msg *comp_cp;
8222 MGETHDR(mout, MB_DONTWAIT, MT_HEADER);
8223 if (mout == NULL) {
8224 /* no mbuf's */
8225 return (-1);
8227 iph = mtod(m, struct ip *);
8228 iph_out = NULL;
8229 ip6_out = NULL;
8230 offset_out = 0;
8231 if (iph->ip_v == IPVERSION) {
8232 mout->m_len = sizeof(struct ip) +
8233 sizeof(struct sctp_shutdown_complete_msg);
8234 mout->m_next = NULL;
8235 iph_out = mtod(mout, struct ip *);
8237 /* Fill in the IP header for the ABORT */
8238 iph_out->ip_v = IPVERSION;
8239 iph_out->ip_hl = (sizeof(struct ip)/4);
8240 iph_out->ip_tos = (u_char)0;
8241 iph_out->ip_id = 0;
8242 iph_out->ip_off = 0;
8243 iph_out->ip_ttl = MAXTTL;
8244 iph_out->ip_p = IPPROTO_SCTP;
8245 iph_out->ip_src.s_addr = iph->ip_dst.s_addr;
8246 iph_out->ip_dst.s_addr = iph->ip_src.s_addr;
8248 /* let IP layer calculate this */
8249 iph_out->ip_sum = 0;
8250 offset_out += sizeof(*iph_out);
8251 comp_cp = (struct sctp_shutdown_complete_msg *)(
8252 (caddr_t)iph_out + offset_out);
8253 } else if (iph->ip_v == (IPV6_VERSION >> 4)) {
8254 ip6 = (struct ip6_hdr *)iph;
8255 mout->m_len = sizeof(struct ip6_hdr) +
8256 sizeof(struct sctp_shutdown_complete_msg);
8257 mout->m_next = NULL;
8258 ip6_out = mtod(mout, struct ip6_hdr *);
8260 /* Fill in the IPv6 header for the ABORT */
8261 ip6_out->ip6_flow = ip6->ip6_flow;
8262 ip6_out->ip6_hlim = ip6_defhlim;
8263 ip6_out->ip6_nxt = IPPROTO_SCTP;
8264 ip6_out->ip6_src = ip6->ip6_dst;
8265 ip6_out->ip6_dst = ip6->ip6_src;
8266 ip6_out->ip6_plen = mout->m_len;
8267 offset_out += sizeof(*ip6_out);
8268 comp_cp = (struct sctp_shutdown_complete_msg *)(
8269 (caddr_t)ip6_out + offset_out);
8270 } else {
8271 /* Currently not supported. */
8272 return (-1);
8275 /* Now copy in and fill in the ABORT tags etc. */
8276 comp_cp->sh.src_port = sh->dest_port;
8277 comp_cp->sh.dest_port = sh->src_port;
8278 comp_cp->sh.checksum = 0;
8279 comp_cp->sh.v_tag = sh->v_tag;
8280 comp_cp->shut_cmp.ch.chunk_flags = SCTP_HAD_NO_TCB;
8281 comp_cp->shut_cmp.ch.chunk_type = SCTP_SHUTDOWN_COMPLETE;
8282 comp_cp->shut_cmp.ch.chunk_length = htons(sizeof(struct sctp_shutdown_complete_chunk));
8284 mout->m_pkthdr.len = mout->m_len;
8285 /* add checksum */
8286 if ((sctp_no_csum_on_loopback) &&
8287 (m->m_pkthdr.rcvif) &&
8288 (m->m_pkthdr.rcvif->if_type == IFT_LOOP)) {
8289 comp_cp->sh.checksum = 0;
8290 } else {
8291 comp_cp->sh.checksum = sctp_calculate_sum(mout, NULL, offset_out);
8294 /* zap the rcvif, it should be null */
8295 mout->m_pkthdr.rcvif = 0;
8296 /* zap the stack pointer to the route */
8297 if (iph_out != NULL) {
8298 struct route ro;
8300 bzero(&ro, sizeof ro);
8301 #ifdef SCTP_DEBUG
8302 if (sctp_debug_on & SCTP_DEBUG_OUTPUT2) {
8303 kprintf("sctp_shutdown_complete2 calling ip_output:\n");
8304 sctp_print_address_pkt(iph_out, &comp_cp->sh);
8306 #endif
8307 /* set IPv4 length */
8308 #if defined(__FreeBSD__)
8309 iph_out->ip_len = mout->m_pkthdr.len;
8310 #else
8311 iph_out->ip_len = htons(mout->m_pkthdr.len);
8312 #endif
8313 /* out it goes */
8314 ip_output(mout, 0, &ro, IP_RAWOUTPUT, NULL
8315 #if defined(__OpenBSD__) || (defined(__FreeBSD__) && __FreeBSD_version >= 480000) \
8316 || defined(__NetBSD__) || defined(__DragonFly__)
8317 , NULL
8318 #endif
8320 /* Free the route if we got one back */
8321 if (ro.ro_rt)
8322 RTFREE(ro.ro_rt);
8323 } else if (ip6_out != NULL) {
8324 #ifdef NEW_STRUCT_ROUTE
8325 struct route ro;
8326 #else
8327 struct route_in6 ro;
8328 #endif
8330 bzero(&ro, sizeof(ro));
8331 #ifdef SCTP_DEBUG
8332 if (sctp_debug_on & SCTP_DEBUG_OUTPUT2) {
8333 kprintf("sctp_shutdown_complete2 calling ip6_output:\n");
8334 sctp_print_address_pkt((struct ip *)ip6_out,
8335 &comp_cp->sh);
8337 #endif
8338 ip6_output(mout, NULL, &ro, 0, NULL, NULL
8339 #if defined(__NetBSD__)
8340 , NULL
8341 #endif
8342 #if (defined(__FreeBSD__) && __FreeBSD_version >= 480000) || defined(__DragonFly__)
8343 , NULL
8344 #endif
8346 /* Free the route if we got one back */
8347 if (ro.ro_rt)
8348 RTFREE(ro.ro_rt);
8350 sctp_pegs[SCTP_DATAGRAMS_SENT]++;
8351 return (0);
8354 static struct sctp_nets *
8355 sctp_select_hb_destination(struct sctp_tcb *stcb, struct timeval *now)
8357 struct sctp_nets *net, *hnet;
8358 int ms_goneby, highest_ms, state_overide=0;
8360 SCTP_GETTIME_TIMEVAL(now);
8361 highest_ms = 0;
8362 hnet = NULL;
8363 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
8364 if (
8365 ((net->dest_state & SCTP_ADDR_NOHB) && ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0)) ||
8366 (net->dest_state & SCTP_ADDR_OUT_OF_SCOPE)
8368 /* Skip this guy from consideration if HB is off AND its confirmed*/
8369 #ifdef SCTP_DEBUG
8370 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
8371 kprintf("Skipping net:%p state:%d nohb/out-of-scope\n",
8372 net, net->dest_state);
8374 #endif
8375 continue;
8377 if (sctp_destination_is_reachable(stcb, (struct sockaddr *)&net->ro._l_addr) == 0) {
8378 /* skip this dest net from consideration */
8379 #ifdef SCTP_DEBUG
8380 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
8381 kprintf("Skipping net:%p reachable NOT\n",
8382 net);
8384 #endif
8385 continue;
8387 if (net->last_sent_time.tv_sec) {
8388 /* Sent to so we subtract */
8389 ms_goneby = (now->tv_sec - net->last_sent_time.tv_sec) * 1000;
8390 } else
8391 /* Never been sent to */
8392 ms_goneby = 0x7fffffff;
8393 #ifdef SCTP_DEBUG
8394 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
8395 kprintf("net:%p ms_goneby:%d\n",
8396 net, ms_goneby);
8398 #endif
8399 /* When the address state is unconfirmed but still considered reachable, we
8400 * HB at a higher rate. Once it goes confirmed OR reaches the "unreachable"
8401 * state, thenw we cut it back to HB at a more normal pace.
8403 if ((net->dest_state & (SCTP_ADDR_UNCONFIRMED|SCTP_ADDR_NOT_REACHABLE)) == SCTP_ADDR_UNCONFIRMED) {
8404 state_overide = 1;
8405 } else {
8406 state_overide = 0;
8409 if ((((unsigned int)ms_goneby >= net->RTO) || (state_overide)) &&
8410 (ms_goneby > highest_ms)) {
8411 highest_ms = ms_goneby;
8412 hnet = net;
8413 #ifdef SCTP_DEBUG
8414 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
8415 kprintf("net:%p is the new high\n",
8416 net);
8418 #endif
8421 if (hnet &&
8422 ((hnet->dest_state & (SCTP_ADDR_UNCONFIRMED|SCTP_ADDR_NOT_REACHABLE)) == SCTP_ADDR_UNCONFIRMED)) {
8423 state_overide = 1;
8424 } else {
8425 state_overide = 0;
8428 if (highest_ms && (((unsigned int)highest_ms >= hnet->RTO) || state_overide)) {
8429 /* Found the one with longest delay bounds
8430 * OR it is unconfirmed and still not marked
8431 * unreachable.
8433 #ifdef SCTP_DEBUG
8434 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
8435 kprintf("net:%p is the hb winner -",
8436 hnet);
8437 if (hnet)
8438 sctp_print_address((struct sockaddr *)&hnet->ro._l_addr);
8439 else
8440 kprintf(" none\n");
8442 #endif
8443 /* update the timer now */
8444 hnet->last_sent_time = *now;
8445 return (hnet);
8447 /* Nothing to HB */
8448 return (NULL);
8452 sctp_send_hb(struct sctp_tcb *stcb, int user_req, struct sctp_nets *u_net)
8454 struct sctp_tmit_chunk *chk;
8455 struct sctp_nets *net;
8456 struct sctp_heartbeat_chunk *hb;
8457 struct timeval now;
8458 struct sockaddr_in *sin;
8459 struct sockaddr_in6 *sin6;
8461 if (user_req == 0) {
8462 net = sctp_select_hb_destination(stcb, &now);
8463 if (net == NULL) {
8464 /* All our busy none to send to, just
8465 * start the timer again.
8467 if (stcb->asoc.state == 0) {
8468 return (0);
8470 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT,
8471 stcb->sctp_ep,
8472 stcb,
8473 net);
8474 return (0);
8476 #ifndef SCTP_USE_ALLMAN_BURST
8477 else {
8478 /* found one idle.. decay cwnd on this one
8479 * by 1/2 if none outstanding.
8482 if (net->flight_size == 0) {
8483 net->cwnd /= 2;
8484 if (net->addr_is_local) {
8485 if (net->cwnd < (net->mtu *4)) {
8486 net->cwnd = net->mtu * 4;
8488 } else {
8489 if (net->cwnd < (net->mtu * 2)) {
8490 net->cwnd = net->mtu * 2;
8497 #endif
8498 } else {
8499 net = u_net;
8500 if (net == NULL) {
8501 return (0);
8503 SCTP_GETTIME_TIMEVAL(&now);
8505 sin = (struct sockaddr_in *)&net->ro._l_addr;
8506 if (sin->sin_family != AF_INET) {
8507 if (sin->sin_family != AF_INET6) {
8508 /* huh */
8509 return (0);
8512 chk = (struct sctp_tmit_chunk *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_chunk);
8513 if (chk == NULL) {
8514 #ifdef SCTP_DEBUG
8515 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
8516 kprintf("Gak, can't get a chunk for hb\n");
8518 #endif
8519 return (0);
8521 sctppcbinfo.ipi_gencnt_chunk++;
8522 sctppcbinfo.ipi_count_chunk++;
8523 chk->rec.chunk_id = SCTP_HEARTBEAT_REQUEST;
8524 chk->asoc = &stcb->asoc;
8525 chk->send_size = sizeof(struct sctp_heartbeat_chunk);
8526 MGETHDR(chk->data, MB_DONTWAIT, MT_DATA);
8527 if (chk->data == NULL) {
8528 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
8529 sctppcbinfo.ipi_count_chunk--;
8530 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
8531 panic("Chunk count is negative");
8533 sctppcbinfo.ipi_gencnt_chunk++;
8534 return (0);
8536 chk->data->m_data += SCTP_MIN_OVERHEAD;
8537 chk->data->m_pkthdr.len = chk->data->m_len = chk->send_size;
8538 chk->sent = SCTP_DATAGRAM_UNSENT;
8539 chk->snd_count = 0;
8540 chk->whoTo = net;
8541 chk->whoTo->ref_count++;
8542 /* Now we have a mbuf that we can fill in with the details */
8543 hb = mtod(chk->data, struct sctp_heartbeat_chunk *);
8545 /* fill out chunk header */
8546 hb->ch.chunk_type = SCTP_HEARTBEAT_REQUEST;
8547 hb->ch.chunk_flags = 0;
8548 hb->ch.chunk_length = htons(chk->send_size);
8549 /* Fill out hb parameter */
8550 hb->heartbeat.hb_info.ph.param_type = htons(SCTP_HEARTBEAT_INFO);
8551 hb->heartbeat.hb_info.ph.param_length = htons(sizeof(struct sctp_heartbeat_info_param));
8552 hb->heartbeat.hb_info.time_value_1 = now.tv_sec;
8553 hb->heartbeat.hb_info.time_value_2 = now.tv_usec;
8554 /* Did our user request this one, put it in */
8555 hb->heartbeat.hb_info.user_req = user_req;
8556 hb->heartbeat.hb_info.addr_family = sin->sin_family;
8557 hb->heartbeat.hb_info.addr_len = sin->sin_len;
8558 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
8559 /* we only take from the entropy pool if the address is
8560 * not confirmed.
8562 net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
8563 net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
8564 } else {
8565 net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = 0;
8566 net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = 0;
8568 if (sin->sin_family == AF_INET) {
8569 memcpy(hb->heartbeat.hb_info.address, &sin->sin_addr, sizeof(sin->sin_addr));
8570 } else if (sin->sin_family == AF_INET6) {
8571 /* We leave the scope the way it is in our lookup table. */
8572 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
8573 memcpy(hb->heartbeat.hb_info.address, &sin6->sin6_addr, sizeof(sin6->sin6_addr));
8574 } else {
8575 /* huh compiler bug */
8576 #ifdef SCTP_DEBUG
8577 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
8578 kprintf("Compiler bug bleeds a mbuf and a chunk\n");
8580 #endif
8581 return (0);
8583 /* ok we have a destination that needs a beat */
8584 /* lets do the theshold management Qiaobing style */
8585 if (user_req == 0) {
8586 if (sctp_threshold_management(stcb->sctp_ep, stcb, net,
8587 stcb->asoc.max_send_times)) {
8588 /* we have lost the association, in a way this
8589 * is quite bad since we really are one less time
8590 * since we really did not send yet. This is the
8591 * down side to the Q's style as defined in the RFC
8592 * and not my alternate style defined in the RFC.
8594 if (chk->data != NULL) {
8595 sctp_m_freem(chk->data);
8596 chk->data = NULL;
8598 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
8599 sctppcbinfo.ipi_count_chunk--;
8600 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
8601 panic("Chunk count is negative");
8603 sctppcbinfo.ipi_gencnt_chunk++;
8604 return (-1);
8607 net->hb_responded = 0;
8608 #ifdef SCTP_DEBUG
8609 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
8610 kprintf("Inserting chunk for HB\n");
8612 #endif
8613 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
8614 stcb->asoc.ctrl_queue_cnt++;
8615 sctp_pegs[SCTP_HB_SENT]++;
8617 * Call directly med level routine to put out the chunk. It will
8618 * always tumble out control chunks aka HB but it may even tumble
8619 * out data too.
8621 if (user_req == 0) {
8622 /* Ok now lets start the HB timer if it is NOT a user req */
8623 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep,
8624 stcb, net);
8626 return (1);
8629 void
8630 sctp_send_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net,
8631 uint32_t high_tsn)
8633 struct sctp_association *asoc;
8634 struct sctp_ecne_chunk *ecne;
8635 struct sctp_tmit_chunk *chk;
8636 asoc = &stcb->asoc;
8637 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
8638 if (chk->rec.chunk_id == SCTP_ECN_ECHO) {
8639 /* found a previous ECN_ECHO update it if needed */
8640 ecne = mtod(chk->data, struct sctp_ecne_chunk *);
8641 ecne->tsn = htonl(high_tsn);
8642 return;
8645 /* nope could not find one to update so we must build one */
8646 chk = (struct sctp_tmit_chunk *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_chunk);
8647 if (chk == NULL) {
8648 return;
8650 sctp_pegs[SCTP_ECNE_SENT]++;
8651 sctppcbinfo.ipi_count_chunk++;
8652 sctppcbinfo.ipi_gencnt_chunk++;
8653 chk->rec.chunk_id = SCTP_ECN_ECHO;
8654 chk->asoc = &stcb->asoc;
8655 chk->send_size = sizeof(struct sctp_ecne_chunk);
8656 MGETHDR(chk->data, MB_DONTWAIT, MT_DATA);
8657 if (chk->data == NULL) {
8658 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
8659 sctppcbinfo.ipi_count_chunk--;
8660 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
8661 panic("Chunk count is negative");
8663 sctppcbinfo.ipi_gencnt_chunk++;
8664 return;
8666 chk->data->m_data += SCTP_MIN_OVERHEAD;
8667 chk->data->m_pkthdr.len = chk->data->m_len = chk->send_size;
8668 chk->sent = SCTP_DATAGRAM_UNSENT;
8669 chk->snd_count = 0;
8670 chk->whoTo = net;
8671 chk->whoTo->ref_count++;
8672 ecne = mtod(chk->data, struct sctp_ecne_chunk *);
8673 ecne->ch.chunk_type = SCTP_ECN_ECHO;
8674 ecne->ch.chunk_flags = 0;
8675 ecne->ch.chunk_length = htons(sizeof(struct sctp_ecne_chunk));
8676 ecne->tsn = htonl(high_tsn);
8677 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
8678 asoc->ctrl_queue_cnt++;
8681 void
8682 sctp_send_packet_dropped(struct sctp_tcb *stcb, struct sctp_nets *net,
8683 struct mbuf *m, int iphlen, int bad_crc)
8685 struct sctp_association *asoc;
8686 struct sctp_pktdrop_chunk *drp;
8687 struct sctp_tmit_chunk *chk;
8688 uint8_t *datap;
8689 int len;
8690 unsigned int small_one;
8691 struct ip *iph;
8693 long spc;
8694 asoc = &stcb->asoc;
8695 if (asoc->peer_supports_pktdrop == 0) {
8696 /* peer must declare support before I
8697 * send one.
8699 return;
8701 chk = (struct sctp_tmit_chunk *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_chunk);
8702 if (chk == NULL) {
8703 return;
8705 sctppcbinfo.ipi_count_chunk++;
8706 sctppcbinfo.ipi_gencnt_chunk++;
8708 iph = mtod(m, struct ip *);
8709 if (iph == NULL) {
8710 return;
8712 if (iph->ip_v == IPVERSION) {
8713 /* IPv4 */
8714 #if defined(__FreeBSD__)
8715 len = chk->send_size = iph->ip_len;
8716 #else
8717 len = chk->send_size = (iph->ip_len - iphlen);
8718 #endif
8719 } else {
8720 struct ip6_hdr *ip6h;
8721 /* IPv6 */
8722 ip6h = mtod(m, struct ip6_hdr *);
8723 len = chk->send_size = htons(ip6h->ip6_plen);
8725 if ((len+iphlen) > m->m_pkthdr.len) {
8726 /* huh */
8727 chk->send_size = len = m->m_pkthdr.len - iphlen;
8729 chk->asoc = &stcb->asoc;
8730 MGETHDR(chk->data, MB_DONTWAIT, MT_DATA);
8731 if (chk->data == NULL) {
8732 jump_out:
8733 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
8734 sctppcbinfo.ipi_count_chunk--;
8735 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
8736 panic("Chunk count is negative");
8738 sctppcbinfo.ipi_gencnt_chunk++;
8739 return;
8741 if ((chk->send_size+sizeof(struct sctp_pktdrop_chunk)+SCTP_MIN_OVERHEAD) > MHLEN) {
8742 MCLGET(chk->data, MB_DONTWAIT);
8743 if ((chk->data->m_flags & M_EXT) == 0) {
8744 /* Give up */
8745 sctp_m_freem(chk->data);
8746 chk->data = NULL;
8747 goto jump_out;
8750 chk->data->m_data += SCTP_MIN_OVERHEAD;
8751 drp = mtod(chk->data, struct sctp_pktdrop_chunk *);
8752 if (drp == NULL) {
8753 sctp_m_freem(chk->data);
8754 chk->data = NULL;
8755 goto jump_out;
8757 small_one = asoc->smallest_mtu;
8758 if (small_one > MCLBYTES) {
8759 /* Only one cluster worth of data MAX */
8760 small_one = MCLBYTES;
8762 chk->book_size = (chk->send_size + sizeof(struct sctp_pktdrop_chunk) +
8763 sizeof(struct sctphdr) + SCTP_MED_OVERHEAD);
8764 if (chk->book_size > small_one) {
8765 drp->ch.chunk_flags = SCTP_PACKET_TRUNCATED;
8766 drp->trunc_len = htons(chk->send_size);
8767 chk->send_size = small_one - (SCTP_MED_OVERHEAD +
8768 sizeof(struct sctp_pktdrop_chunk) +
8769 sizeof(struct sctphdr));
8770 len = chk->send_size;
8771 } else {
8772 /* no truncation needed */
8773 drp->ch.chunk_flags = 0;
8774 drp->trunc_len = htons(0);
8776 if (bad_crc) {
8777 drp->ch.chunk_flags |= SCTP_BADCRC;
8779 chk->send_size += sizeof(struct sctp_pktdrop_chunk);
8780 chk->data->m_pkthdr.len = chk->data->m_len = chk->send_size;
8781 chk->sent = SCTP_DATAGRAM_UNSENT;
8782 chk->snd_count = 0;
8783 if (net) {
8784 /* we should hit here */
8785 chk->whoTo = net;
8786 } else {
8787 chk->whoTo = asoc->primary_destination;
8789 chk->whoTo->ref_count++;
8790 chk->rec.chunk_id = SCTP_PACKET_DROPPED;
8791 drp->ch.chunk_type = SCTP_PACKET_DROPPED;
8792 drp->ch.chunk_length = htons(chk->send_size);
8793 spc = stcb->sctp_socket->so_rcv.ssb_hiwat;
8794 if (spc < 0) {
8795 spc = 0;
8797 drp->bottle_bw = htonl(spc);
8798 drp->current_onq = htonl(asoc->size_on_delivery_queue +
8799 asoc->size_on_reasm_queue +
8800 asoc->size_on_all_streams +
8801 asoc->my_rwnd_control_len +
8802 stcb->sctp_socket->so_rcv.ssb_cc);
8803 drp->reserved = 0;
8804 datap = drp->data;
8805 m_copydata(m, iphlen, len, datap);
8806 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
8807 asoc->ctrl_queue_cnt++;
8810 void
8811 sctp_send_cwr(struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t high_tsn)
8813 struct sctp_association *asoc;
8814 struct sctp_cwr_chunk *cwr;
8815 struct sctp_tmit_chunk *chk;
8817 asoc = &stcb->asoc;
8818 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
8819 if (chk->rec.chunk_id == SCTP_ECN_CWR) {
8820 /* found a previous ECN_CWR update it if needed */
8821 cwr = mtod(chk->data, struct sctp_cwr_chunk *);
8822 if (compare_with_wrap(high_tsn, ntohl(cwr->tsn),
8823 MAX_TSN)) {
8824 cwr->tsn = htonl(high_tsn);
8826 return;
8829 /* nope could not find one to update so we must build one */
8830 chk = (struct sctp_tmit_chunk *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_chunk);
8831 if (chk == NULL) {
8832 return;
8834 sctppcbinfo.ipi_count_chunk++;
8835 sctppcbinfo.ipi_gencnt_chunk++;
8836 chk->rec.chunk_id = SCTP_ECN_CWR;
8837 chk->asoc = &stcb->asoc;
8838 chk->send_size = sizeof(struct sctp_cwr_chunk);
8839 MGETHDR(chk->data, MB_DONTWAIT, MT_DATA);
8840 if (chk->data == NULL) {
8841 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
8842 sctppcbinfo.ipi_count_chunk--;
8843 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
8844 panic("Chunk count is negative");
8846 sctppcbinfo.ipi_gencnt_chunk++;
8847 return;
8849 chk->data->m_data += SCTP_MIN_OVERHEAD;
8850 chk->data->m_pkthdr.len = chk->data->m_len = chk->send_size;
8851 chk->sent = SCTP_DATAGRAM_UNSENT;
8852 chk->snd_count = 0;
8853 chk->whoTo = net;
8854 chk->whoTo->ref_count++;
8855 cwr = mtod(chk->data, struct sctp_cwr_chunk *);
8856 cwr->ch.chunk_type = SCTP_ECN_CWR;
8857 cwr->ch.chunk_flags = 0;
8858 cwr->ch.chunk_length = htons(sizeof(struct sctp_cwr_chunk));
8859 cwr->tsn = htonl(high_tsn);
8860 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
8861 asoc->ctrl_queue_cnt++;
8863 static void
8864 sctp_reset_the_streams(struct sctp_tcb *stcb,
8865 struct sctp_stream_reset_request *req, int number_entries, uint16_t *list)
8867 int i;
8869 if (req->reset_flags & SCTP_RESET_ALL) {
8870 for (i=0; i<stcb->asoc.streamoutcnt; i++) {
8871 stcb->asoc.strmout[i].next_sequence_sent = 0;
8873 } else if (number_entries) {
8874 for (i=0; i<number_entries; i++) {
8875 if (list[i] >= stcb->asoc.streamoutcnt) {
8876 /* no such stream */
8877 continue;
8879 stcb->asoc.strmout[(list[i])].next_sequence_sent = 0;
8882 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_SEND, stcb, number_entries, (void *)list);
8885 void
8886 sctp_send_str_reset_ack(struct sctp_tcb *stcb,
8887 struct sctp_stream_reset_request *req)
8889 struct sctp_association *asoc;
8890 struct sctp_stream_reset_resp *strack;
8891 struct sctp_tmit_chunk *chk;
8892 uint32_t seq;
8893 int number_entries, i;
8894 uint8_t two_way=0, not_peer=0;
8895 uint16_t *list=NULL;
8897 asoc = &stcb->asoc;
8898 if (req->reset_flags & SCTP_RESET_ALL)
8899 number_entries = 0;
8900 else
8901 number_entries = (ntohs(req->ph.param_length) - sizeof(struct sctp_stream_reset_request)) / sizeof(uint16_t);
8903 chk = (struct sctp_tmit_chunk *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_chunk);
8904 if (chk == NULL) {
8905 return;
8907 sctppcbinfo.ipi_count_chunk++;
8908 sctppcbinfo.ipi_gencnt_chunk++;
8909 chk->rec.chunk_id = SCTP_STREAM_RESET;
8910 chk->asoc = &stcb->asoc;
8911 chk->send_size = sizeof(struct sctp_stream_reset_resp) + (number_entries * sizeof(uint16_t));
8912 MGETHDR(chk->data, MB_DONTWAIT, MT_DATA);
8913 if (chk->data == NULL) {
8914 strresp_jump_out:
8915 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
8916 sctppcbinfo.ipi_count_chunk--;
8917 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
8918 panic("Chunk count is negative");
8920 sctppcbinfo.ipi_gencnt_chunk++;
8921 return;
8923 chk->data->m_data += SCTP_MIN_OVERHEAD;
8924 chk->data->m_pkthdr.len = chk->data->m_len = SCTP_SIZE32(chk->send_size);
8925 if (M_TRAILINGSPACE(chk->data) < (int)SCTP_SIZE32(chk->send_size)) {
8926 MCLGET(chk->data, MB_DONTWAIT);
8927 if ((chk->data->m_flags & M_EXT) == 0) {
8928 /* Give up */
8929 sctp_m_freem(chk->data);
8930 chk->data = NULL;
8931 goto strresp_jump_out;
8933 chk->data->m_data += SCTP_MIN_OVERHEAD;
8935 if (M_TRAILINGSPACE(chk->data) < (int)SCTP_SIZE32(chk->send_size)) {
8936 /* can't do it, no room */
8937 /* Give up */
8938 sctp_m_freem(chk->data);
8939 chk->data = NULL;
8940 goto strresp_jump_out;
8943 chk->sent = SCTP_DATAGRAM_UNSENT;
8944 chk->snd_count = 0;
8945 chk->whoTo = asoc->primary_destination;
8946 chk->whoTo->ref_count++;
8947 strack = mtod(chk->data, struct sctp_stream_reset_resp *);
8949 strack->ch.chunk_type = SCTP_STREAM_RESET;
8950 strack->ch.chunk_flags = 0;
8951 strack->ch.chunk_length = htons(chk->send_size);
8953 memset(strack->sr_resp.reset_pad, 0, sizeof(strack->sr_resp.reset_pad));
8955 strack->sr_resp.ph.param_type = ntohs(SCTP_STR_RESET_RESPONSE);
8956 strack->sr_resp.ph.param_length = htons((chk->send_size - sizeof(struct sctp_chunkhdr)));
8960 if (chk->send_size % 4) {
8961 /* need a padding for the end */
8962 int pad;
8963 uint8_t *end;
8964 end = (uint8_t *)((caddr_t)strack + chk->send_size);
8965 pad = chk->send_size % 4;
8966 for (i = 0; i < pad; i++) {
8967 end[i] = 0;
8969 chk->send_size += pad;
8972 /* actual response */
8973 if (req->reset_flags & SCTP_RESET_YOUR) {
8974 strack->sr_resp.reset_flags = SCTP_RESET_PERFORMED;
8975 } else {
8976 strack->sr_resp.reset_flags = 0;
8979 /* copied from reset request */
8980 strack->sr_resp.reset_req_seq_resp = req->reset_req_seq;
8981 seq = ntohl(req->reset_req_seq);
8983 list = req->list_of_streams;
8984 /* copy the un-converted network byte order streams */
8985 for (i=0; i<number_entries; i++) {
8986 strack->sr_resp.list_of_streams[i] = list[i];
8988 if (asoc->str_reset_seq_in == seq) {
8989 /* is it the next expected? */
8990 asoc->str_reset_seq_in++;
8991 strack->sr_resp.reset_at_tsn = htonl(asoc->sending_seq);
8992 asoc->str_reset_sending_seq = asoc->sending_seq;
8993 if (number_entries) {
8994 int i;
8995 uint16_t temp;
8996 /* convert them to host byte order */
8997 for (i=0 ; i<number_entries; i++) {
8998 temp = ntohs(list[i]);
8999 list[i] = temp;
9002 if (req->reset_flags & SCTP_RESET_YOUR) {
9003 /* reset my outbound streams */
9004 sctp_reset_the_streams(stcb, req , number_entries, list);
9006 if (req->reset_flags & SCTP_RECIPRICAL) {
9007 /* reset peer too */
9008 sctp_send_str_reset_req(stcb, number_entries, list, two_way, not_peer);
9011 } else {
9012 /* no its a retran so I must just ack and do nothing */
9013 strack->sr_resp.reset_at_tsn = htonl(asoc->str_reset_sending_seq);
9015 strack->sr_resp.cumulative_tsn = htonl(asoc->cumulative_tsn);
9016 TAILQ_INSERT_TAIL(&asoc->control_send_queue,
9017 chk,
9018 sctp_next);
9019 asoc->ctrl_queue_cnt++;
9023 void
9024 sctp_send_str_reset_req(struct sctp_tcb *stcb,
9025 int number_entrys, uint16_t *list, uint8_t two_way, uint8_t not_peer)
9027 /* Send a stream reset request. The number_entrys may be 0 and list NULL
9028 * if the request is to reset all streams. If two_way is true then we
9029 * not only request a RESET of the received streams but we also
9030 * request the peer to send a reset req to us too.
9031 * Flag combinations in table:
9033 * two_way | not_peer | = | Flags
9034 * ------------------------------
9035 * 0 | 0 | = | SCTP_RESET_YOUR (just the peer)
9036 * 1 | 0 | = | SCTP_RESET_YOUR | SCTP_RECIPRICAL (both sides)
9037 * 0 | 1 | = | Not a Valid Request (not anyone)
9038 * 1 | 1 | = | SCTP_RESET_RECIPRICAL (Just local host)
9040 struct sctp_association *asoc;
9041 struct sctp_stream_reset_req *strreq;
9042 struct sctp_tmit_chunk *chk;
9045 asoc = &stcb->asoc;
9046 if (asoc->stream_reset_outstanding) {
9047 /* Already one pending, must get ACK back
9048 * to clear the flag.
9050 return;
9053 if ((two_way == 0) && (not_peer == 1)) {
9054 /* not a valid request */
9055 return;
9058 chk = (struct sctp_tmit_chunk *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_chunk);
9059 if (chk == NULL) {
9060 return;
9062 sctppcbinfo.ipi_count_chunk++;
9063 sctppcbinfo.ipi_gencnt_chunk++;
9064 chk->rec.chunk_id = SCTP_STREAM_RESET;
9065 chk->asoc = &stcb->asoc;
9066 chk->send_size = sizeof(struct sctp_stream_reset_req) + (number_entrys * sizeof(uint16_t));
9067 MGETHDR(chk->data, MB_DONTWAIT, MT_DATA);
9068 if (chk->data == NULL) {
9069 strreq_jump_out:
9070 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
9071 sctppcbinfo.ipi_count_chunk--;
9072 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
9073 panic("Chunk count is negative");
9075 sctppcbinfo.ipi_gencnt_chunk++;
9076 return;
9078 chk->data->m_data += SCTP_MIN_OVERHEAD;
9079 chk->data->m_pkthdr.len = chk->data->m_len = SCTP_SIZE32(chk->send_size);
9080 if (M_TRAILINGSPACE(chk->data) < (int)SCTP_SIZE32(chk->send_size)) {
9081 MCLGET(chk->data, MB_DONTWAIT);
9082 if ((chk->data->m_flags & M_EXT) == 0) {
9083 /* Give up */
9084 sctp_m_freem(chk->data);
9085 chk->data = NULL;
9086 goto strreq_jump_out;
9088 chk->data->m_data += SCTP_MIN_OVERHEAD;
9090 if (M_TRAILINGSPACE(chk->data) < (int)SCTP_SIZE32(chk->send_size)) {
9091 /* can't do it, no room */
9092 /* Give up */
9093 sctp_m_freem(chk->data);
9094 chk->data = NULL;
9095 goto strreq_jump_out;
9097 chk->sent = SCTP_DATAGRAM_UNSENT;
9098 chk->snd_count = 0;
9099 chk->whoTo = asoc->primary_destination;
9100 chk->whoTo->ref_count++;
9102 strreq = mtod(chk->data, struct sctp_stream_reset_req *);
9103 strreq->ch.chunk_type = SCTP_STREAM_RESET;
9104 strreq->ch.chunk_flags = 0;
9105 strreq->ch.chunk_length = htons(chk->send_size);
9107 strreq->sr_req.ph.param_type = ntohs(SCTP_STR_RESET_REQUEST);
9108 strreq->sr_req.ph.param_length = htons((chk->send_size - sizeof(struct sctp_chunkhdr)));
9110 if (chk->send_size % 4) {
9111 /* need a padding for the end */
9112 int pad, i;
9113 uint8_t *end;
9114 end = (uint8_t *)((caddr_t)strreq + chk->send_size);
9115 pad = chk->send_size % 4;
9116 for (i=0; i<pad; i++) {
9117 end[i] = 0;
9119 chk->send_size += pad;
9122 strreq->sr_req.reset_flags = 0;
9123 if (number_entrys == 0) {
9124 strreq->sr_req.reset_flags |= SCTP_RESET_ALL;
9126 if (two_way == 0) {
9127 strreq->sr_req.reset_flags |= SCTP_RESET_YOUR;
9128 } else {
9129 if (not_peer == 0) {
9130 strreq->sr_req.reset_flags |= SCTP_RECIPRICAL | SCTP_RESET_YOUR;
9131 } else {
9132 strreq->sr_req.reset_flags |= SCTP_RECIPRICAL;
9135 memset(strreq->sr_req.reset_pad, 0, sizeof(strreq->sr_req.reset_pad));
9136 strreq->sr_req.reset_req_seq = htonl(asoc->str_reset_seq_out);
9137 if (number_entrys) {
9138 /* populate the specific entry's */
9139 int i;
9140 for (i=0; i < number_entrys; i++) {
9141 strreq->sr_req.list_of_streams[i] = htons(list[i]);
9144 TAILQ_INSERT_TAIL(&asoc->control_send_queue,
9145 chk,
9146 sctp_next);
9147 asoc->ctrl_queue_cnt++;
9148 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo);
9149 asoc->stream_reset_outstanding = 1;
9152 void
9153 sctp_send_abort(struct mbuf *m, int iphlen, struct sctphdr *sh, uint32_t vtag,
9154 struct mbuf *err_cause)
9157 * Formulate the abort message, and send it back down.
9159 struct mbuf *mout;
9160 struct sctp_abort_msg *abm;
9161 struct ip *iph, *iph_out;
9162 struct ip6_hdr *ip6, *ip6_out;
9163 int iphlen_out;
9165 /* don't respond to ABORT with ABORT */
9166 if (sctp_is_there_an_abort_here(m, iphlen, &vtag)) {
9167 if (err_cause)
9168 sctp_m_freem(err_cause);
9169 return;
9171 MGETHDR(mout, MB_DONTWAIT, MT_HEADER);
9172 if (mout == NULL) {
9173 if (err_cause)
9174 sctp_m_freem(err_cause);
9175 return;
9177 iph = mtod(m, struct ip *);
9178 iph_out = NULL;
9179 ip6_out = NULL;
9180 if (iph->ip_v == IPVERSION) {
9181 iph_out = mtod(mout, struct ip *);
9182 mout->m_len = sizeof(*iph_out) + sizeof(*abm);
9183 mout->m_next = err_cause;
9185 /* Fill in the IP header for the ABORT */
9186 iph_out->ip_v = IPVERSION;
9187 iph_out->ip_hl = (sizeof(struct ip) / 4);
9188 iph_out->ip_tos = (u_char)0;
9189 iph_out->ip_id = 0;
9190 iph_out->ip_off = 0;
9191 iph_out->ip_ttl = MAXTTL;
9192 iph_out->ip_p = IPPROTO_SCTP;
9193 iph_out->ip_src.s_addr = iph->ip_dst.s_addr;
9194 iph_out->ip_dst.s_addr = iph->ip_src.s_addr;
9195 /* let IP layer calculate this */
9196 iph_out->ip_sum = 0;
9198 iphlen_out = sizeof(*iph_out);
9199 abm = (struct sctp_abort_msg *)((caddr_t)iph_out + iphlen_out);
9200 } else if (iph->ip_v == (IPV6_VERSION >> 4)) {
9201 ip6 = (struct ip6_hdr *)iph;
9202 ip6_out = mtod(mout, struct ip6_hdr *);
9203 mout->m_len = sizeof(*ip6_out) + sizeof(*abm);
9204 mout->m_next = err_cause;
9206 /* Fill in the IP6 header for the ABORT */
9207 ip6_out->ip6_flow = ip6->ip6_flow;
9208 ip6_out->ip6_hlim = ip6_defhlim;
9209 ip6_out->ip6_nxt = IPPROTO_SCTP;
9210 ip6_out->ip6_src = ip6->ip6_dst;
9211 ip6_out->ip6_dst = ip6->ip6_src;
9213 iphlen_out = sizeof(*ip6_out);
9214 abm = (struct sctp_abort_msg *)((caddr_t)ip6_out + iphlen_out);
9215 } else {
9216 /* Currently not supported */
9217 return;
9220 abm->sh.src_port = sh->dest_port;
9221 abm->sh.dest_port = sh->src_port;
9222 abm->sh.checksum = 0;
9223 if (vtag == 0) {
9224 abm->sh.v_tag = sh->v_tag;
9225 abm->msg.ch.chunk_flags = SCTP_HAD_NO_TCB;
9226 } else {
9227 abm->sh.v_tag = htonl(vtag);
9228 abm->msg.ch.chunk_flags = 0;
9230 abm->msg.ch.chunk_type = SCTP_ABORT_ASSOCIATION;
9232 if (err_cause) {
9233 struct mbuf *m_tmp = err_cause;
9234 int err_len = 0;
9235 /* get length of the err_cause chain */
9236 while (m_tmp != NULL) {
9237 err_len += m_tmp->m_len;
9238 m_tmp = m_tmp->m_next;
9240 mout->m_pkthdr.len = mout->m_len + err_len;
9241 if (err_len % 4) {
9242 /* need pad at end of chunk */
9243 u_int32_t cpthis=0;
9244 int padlen;
9245 padlen = 4 - (mout->m_pkthdr.len % 4);
9246 m_copyback(mout, mout->m_pkthdr.len, padlen, (caddr_t)&cpthis);
9248 abm->msg.ch.chunk_length = htons(sizeof(abm->msg.ch) + err_len);
9249 } else {
9250 mout->m_pkthdr.len = mout->m_len;
9251 abm->msg.ch.chunk_length = htons(sizeof(abm->msg.ch));
9254 /* add checksum */
9255 if ((sctp_no_csum_on_loopback) &&
9256 (m->m_pkthdr.rcvif) &&
9257 (m->m_pkthdr.rcvif->if_type == IFT_LOOP)) {
9258 abm->sh.checksum = 0;
9259 } else {
9260 abm->sh.checksum = sctp_calculate_sum(mout, NULL, iphlen_out);
9263 /* zap the rcvif, it should be null */
9264 mout->m_pkthdr.rcvif = 0;
9265 if (iph_out != NULL) {
9266 struct route ro;
9268 /* zap the stack pointer to the route */
9269 bzero(&ro, sizeof ro);
9270 #ifdef SCTP_DEBUG
9271 if (sctp_debug_on & SCTP_DEBUG_OUTPUT2) {
9272 kprintf("sctp_send_abort calling ip_output:\n");
9273 sctp_print_address_pkt(iph_out, &abm->sh);
9275 #endif
9276 /* set IPv4 length */
9277 #if defined(__FreeBSD__)
9278 iph_out->ip_len = mout->m_pkthdr.len;
9279 #else
9280 iph_out->ip_len = htons(mout->m_pkthdr.len);
9281 #endif
9282 /* out it goes */
9283 ip_output(mout, 0, &ro, IP_RAWOUTPUT, NULL
9284 #if defined(__OpenBSD__) || (defined(__FreeBSD__) && __FreeBSD_version >= 480000) \
9285 || defined(__NetBSD__) || defined(__DragonFly__)
9286 , NULL
9287 #endif
9289 /* Free the route if we got one back */
9290 if (ro.ro_rt)
9291 RTFREE(ro.ro_rt);
9292 } else if (ip6_out != NULL) {
9293 #ifdef NEW_STRUCT_ROUTE
9294 struct route ro;
9295 #else
9296 struct route_in6 ro;
9297 #endif
9299 /* zap the stack pointer to the route */
9300 bzero(&ro, sizeof(ro));
9301 #ifdef SCTP_DEBUG
9302 if (sctp_debug_on & SCTP_DEBUG_OUTPUT2) {
9303 kprintf("sctp_send_abort calling ip6_output:\n");
9304 sctp_print_address_pkt((struct ip *)ip6_out, &abm->sh);
9306 #endif
9307 ip6_output(mout, NULL, &ro, 0, NULL, NULL
9308 #if defined(__NetBSD__)
9309 , NULL
9310 #endif
9311 #if (defined(__FreeBSD__) && __FreeBSD_version >= 480000) || defined(__DragonFly__)
9312 , NULL
9313 #endif
9315 /* Free the route if we got one back */
9316 if (ro.ro_rt)
9317 RTFREE(ro.ro_rt);
9319 sctp_pegs[SCTP_DATAGRAMS_SENT]++;
9322 void
9323 sctp_send_operr_to(struct mbuf *m, int iphlen,
9324 struct mbuf *scm,
9325 uint32_t vtag)
9327 struct sctphdr *ihdr;
9328 int retcode;
9329 struct sctphdr *ohdr;
9330 struct sctp_chunkhdr *ophdr;
9332 struct ip *iph;
9333 #ifdef SCTP_DEBUG
9334 struct sockaddr_in6 lsa6, fsa6;
9335 #endif
9336 uint32_t val;
9337 iph = mtod(m, struct ip *);
9338 ihdr = (struct sctphdr *)((caddr_t)iph + iphlen);
9339 if (!(scm->m_flags & M_PKTHDR)) {
9340 /* must be a pkthdr */
9341 kprintf("Huh, not a packet header in send_operr\n");
9342 m_freem(scm);
9343 return;
9345 M_PREPEND(scm, (sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr)), MB_DONTWAIT);
9346 if (scm == NULL) {
9347 /* can't send because we can't add a mbuf */
9348 return;
9350 ohdr = mtod(scm, struct sctphdr *);
9351 ohdr->src_port = ihdr->dest_port;
9352 ohdr->dest_port = ihdr->src_port;
9353 ohdr->v_tag = vtag;
9354 ohdr->checksum = 0;
9355 ophdr = (struct sctp_chunkhdr *)(ohdr + 1);
9356 ophdr->chunk_type = SCTP_OPERATION_ERROR;
9357 ophdr->chunk_flags = 0;
9358 ophdr->chunk_length = htons(scm->m_pkthdr.len - sizeof(struct sctphdr));
9359 if (scm->m_pkthdr.len % 4) {
9360 /* need padding */
9361 u_int32_t cpthis=0;
9362 int padlen;
9363 padlen = 4 - (scm->m_pkthdr.len % 4);
9364 m_copyback(scm, scm->m_pkthdr.len, padlen, (caddr_t)&cpthis);
9366 if ((sctp_no_csum_on_loopback) &&
9367 (m->m_pkthdr.rcvif) &&
9368 (m->m_pkthdr.rcvif->if_type == IFT_LOOP)) {
9369 val = 0;
9370 } else {
9371 val = sctp_calculate_sum(scm, NULL, 0);
9373 ohdr->checksum = val;
9374 if (iph->ip_v == IPVERSION) {
9375 /* V4 */
9376 struct ip *out;
9377 struct route ro;
9378 M_PREPEND(scm, sizeof(struct ip), MB_DONTWAIT);
9379 if (scm == NULL)
9380 return;
9381 bzero(&ro, sizeof ro);
9382 out = mtod(scm, struct ip *);
9383 out->ip_v = iph->ip_v;
9384 out->ip_hl = (sizeof(struct ip)/4);
9385 out->ip_tos = iph->ip_tos;
9386 out->ip_id = iph->ip_id;
9387 out->ip_off = 0;
9388 out->ip_ttl = MAXTTL;
9389 out->ip_p = IPPROTO_SCTP;
9390 out->ip_sum = 0;
9391 out->ip_src = iph->ip_dst;
9392 out->ip_dst = iph->ip_src;
9393 #if defined(__FreeBSD__)
9394 out->ip_len = scm->m_pkthdr.len;
9395 #else
9396 out->ip_len = htons(scm->m_pkthdr.len);
9397 #endif
9398 retcode = ip_output(scm, 0, &ro, IP_RAWOUTPUT, NULL
9399 #if defined(__OpenBSD__) || (defined(__FreeBSD__) && __FreeBSD_version >= 480000) \
9400 || defined(__NetBSD__) || defined(__DragonFly__)
9401 , NULL
9402 #endif
9404 sctp_pegs[SCTP_DATAGRAMS_SENT]++;
9405 /* Free the route if we got one back */
9406 if (ro.ro_rt)
9407 RTFREE(ro.ro_rt);
9408 } else {
9409 /* V6 */
9410 #ifdef NEW_STRUCT_ROUTE
9411 struct route ro;
9412 #else
9413 struct route_in6 ro;
9414 #endif
9415 struct ip6_hdr *out6, *in6;
9417 M_PREPEND(scm, sizeof(struct ip6_hdr), MB_DONTWAIT);
9418 if (scm == NULL)
9419 return;
9420 bzero(&ro, sizeof ro);
9421 in6 = mtod(m, struct ip6_hdr *);
9422 out6 = mtod(scm, struct ip6_hdr *);
9423 out6->ip6_flow = in6->ip6_flow;
9424 out6->ip6_hlim = ip6_defhlim;
9425 out6->ip6_nxt = IPPROTO_SCTP;
9426 out6->ip6_src = in6->ip6_dst;
9427 out6->ip6_dst = in6->ip6_src;
9429 #ifdef SCTP_DEBUG
9430 bzero(&lsa6, sizeof(lsa6));
9431 lsa6.sin6_len = sizeof(lsa6);
9432 lsa6.sin6_family = AF_INET6;
9433 lsa6.sin6_addr = out6->ip6_src;
9434 bzero(&fsa6, sizeof(fsa6));
9435 fsa6.sin6_len = sizeof(fsa6);
9436 fsa6.sin6_family = AF_INET6;
9437 fsa6.sin6_addr = out6->ip6_dst;
9438 if (sctp_debug_on & SCTP_DEBUG_OUTPUT2) {
9439 kprintf("sctp_operr_to calling ipv6 output:\n");
9440 kprintf("src: ");
9441 sctp_print_address((struct sockaddr *)&lsa6);
9442 kprintf("dst ");
9443 sctp_print_address((struct sockaddr *)&fsa6);
9445 #endif /* SCTP_DEBUG */
9446 ip6_output(scm, NULL, &ro, 0, NULL, NULL
9447 #if defined(__NetBSD__)
9448 , NULL
9449 #endif
9450 #if (defined(__FreeBSD__) && __FreeBSD_version >= 480000) || defined(__DragonFly__)
9451 , NULL
9452 #endif
9454 sctp_pegs[SCTP_DATAGRAMS_SENT]++;
9455 /* Free the route if we got one back */
9456 if (ro.ro_rt)
9457 RTFREE(ro.ro_rt);
9461 static int
9462 sctp_copy_one(struct mbuf *m, struct uio *uio, int cpsz, int resv_upfront, int *mbcnt)
9464 int left, cancpy, willcpy, error;
9465 left = cpsz;
9467 if (m == NULL) {
9468 /* TSNH */
9469 *mbcnt = 0;
9470 return (ENOMEM);
9472 m->m_len = 0;
9473 if ((left+resv_upfront) > (int)MHLEN) {
9474 MCLGET(m, MB_WAIT);
9475 if (m == NULL) {
9476 *mbcnt = 0;
9477 return (ENOMEM);
9479 if ((m->m_flags & M_EXT) == 0) {
9480 *mbcnt = 0;
9481 return (ENOMEM);
9483 *mbcnt += m->m_ext.ext_size;
9485 *mbcnt += MSIZE;
9486 cancpy = M_TRAILINGSPACE(m);
9487 willcpy = min(cancpy, left);
9488 if ((willcpy + resv_upfront) > cancpy) {
9489 willcpy -= resv_upfront;
9491 while (left > 0) {
9492 /* Align data to the end */
9493 if ((m->m_flags & M_EXT) == 0) {
9494 if (m->m_flags & M_PKTHDR) {
9495 MH_ALIGN(m, willcpy);
9496 } else {
9497 M_ALIGN(m, willcpy);
9499 } else {
9500 MC_ALIGN(m, willcpy);
9502 error = uiomove(mtod(m, caddr_t), willcpy, uio);
9503 if (error) {
9504 return (error);
9506 m->m_len = willcpy;
9507 m->m_nextpkt = 0;
9508 left -= willcpy;
9509 if (left > 0) {
9510 MGET(m->m_next, MB_WAIT, MT_DATA);
9511 if (m->m_next == NULL) {
9512 *mbcnt = 0;
9513 return (ENOMEM);
9515 m = m->m_next;
9516 m->m_len = 0;
9517 *mbcnt += MSIZE;
9518 if (left > (int)MHLEN) {
9519 MCLGET(m, MB_WAIT);
9520 if (m == NULL) {
9521 *mbcnt = 0;
9522 return (ENOMEM);
9524 if ((m->m_flags & M_EXT) == 0) {
9525 *mbcnt = 0;
9526 return (ENOMEM);
9528 *mbcnt += m->m_ext.ext_size;
9530 cancpy = M_TRAILINGSPACE(m);
9531 willcpy = min(cancpy, left);
9534 return (0);
9537 static int
9538 sctp_copy_it_in(struct sctp_inpcb *inp,
9539 struct sctp_tcb *stcb,
9540 struct sctp_association *asoc,
9541 struct sctp_nets *net,
9542 struct sctp_sndrcvinfo *srcv,
9543 struct uio *uio,
9544 int flags)
9546 /* This routine must be very careful in
9547 * its work. Protocol processing is
9548 * up and running so care must be taken to
9549 * spl...() when you need to do something
9550 * that may effect the stcb/asoc. The sb is
9551 * locked however. When data is copied the
9552 * protocol processing should be enabled since
9553 * this is a slower operation...
9555 struct socket *so;
9556 int error = 0;
9557 int frag_size, mbcnt = 0, mbcnt_e = 0;
9558 unsigned int sndlen;
9559 unsigned int tot_demand;
9560 int tot_out, dataout;
9561 struct sctp_tmit_chunk *chk;
9562 struct mbuf *mm;
9563 struct sctp_stream_out *strq;
9564 uint32_t my_vtag;
9565 int resv_in_first;
9567 crit_enter();
9568 so = stcb->sctp_socket;
9569 chk = NULL;
9570 mm = NULL;
9572 sndlen = uio->uio_resid;
9573 /* lock the socket buf */
9574 SOCKBUF_LOCK(&so->so_snd);
9575 error = ssb_lock(&so->so_snd, SBLOCKWAIT(flags));
9576 if (error)
9577 goto out_locked;
9579 /* will it ever fit ? */
9580 if (sndlen > so->so_snd.ssb_hiwat) {
9581 /* It will NEVER fit */
9582 error = EMSGSIZE;
9583 crit_exit();
9584 goto release;
9586 /* Do I need to block? */
9587 if ((so->so_snd.ssb_hiwat <
9588 (sndlen + asoc->total_output_queue_size)) ||
9589 (asoc->chunks_on_out_queue > sctp_max_chunks_on_queue) ||
9590 (asoc->total_output_mbuf_queue_size >
9591 so->so_snd.ssb_mbmax)
9593 /* prune any prsctp bufs out */
9594 if (asoc->peer_supports_prsctp) {
9595 sctp_prune_prsctp(stcb, asoc, srcv, sndlen);
9598 * We store off a pointer to the endpoint.
9599 * Since on return from this we must check to
9600 * see if an so_error is set. If so we may have
9601 * been reset and our stcb destroyed. Returning
9602 * an error will flow back to the user...
9604 while ((so->so_snd.ssb_hiwat <
9605 (sndlen + asoc->total_output_queue_size)) ||
9606 (asoc->chunks_on_out_queue >
9607 sctp_max_chunks_on_queue) ||
9608 (asoc->total_output_mbuf_queue_size >
9609 so->so_snd.ssb_mbmax)
9611 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) {
9612 /* Non-blocking io in place */
9613 error = EWOULDBLOCK;
9614 goto release;
9616 inp->sctp_tcb_at_block = (void *)stcb;
9617 inp->error_on_block = 0;
9618 #ifdef SCTP_BLK_LOGGING
9619 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK,
9620 so, asoc);
9621 #endif
9622 ssb_unlock(&so->so_snd);
9623 SCTP_TCB_UNLOCK(stcb);
9624 error = ssb_wait(&so->so_snd);
9625 SCTP_INP_RLOCK(inp);
9626 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
9627 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
9628 /* Should I really unlock ? */
9629 SCTP_INP_RUNLOCK(inp);
9630 error = EFAULT;
9631 goto out_locked;
9633 SCTP_TCB_LOCK(stcb);
9634 SCTP_INP_RUNLOCK(inp);
9636 inp->sctp_tcb_at_block = 0;
9637 #ifdef SCTP_BLK_LOGGING
9638 sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
9639 so, asoc);
9640 #endif
9641 if (inp->error_on_block) {
9643 * if our asoc was killed, the free code
9644 * (in sctp_pcb.c) will save a error in
9645 * here for us
9647 error = inp->error_on_block;
9648 crit_exit();
9649 goto out_locked;
9651 if (error) {
9652 crit_exit();
9653 goto out_locked;
9655 /* did we encounter a socket error? */
9656 if (so->so_error) {
9657 error = so->so_error;
9658 crit_exit();
9659 goto out_locked;
9661 error = ssb_lock(&so->so_snd, M_WAITOK);
9662 if (error) {
9663 /* Can't acquire the lock */
9664 crit_exit();
9665 goto out_locked;
9667 #if defined(__FreeBSD__) && __FreeBSD_version >= 502115
9668 if (so->so_rcv.sb_state & SBS_CANTSENDMORE) {
9669 #else
9670 if (so->so_state & SS_CANTSENDMORE) {
9671 #endif
9672 /* The socket is now set not to sendmore.. its gone */
9673 error = EPIPE;
9674 crit_exit();
9675 goto release;
9677 if (so->so_error) {
9678 error = so->so_error;
9679 crit_exit();
9680 goto release;
9682 if (asoc->peer_supports_prsctp) {
9683 sctp_prune_prsctp(stcb, asoc, srcv, sndlen);
9687 dataout = tot_out = uio->uio_resid;
9688 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
9689 resv_in_first = SCTP_MED_OVERHEAD;
9690 } else {
9691 resv_in_first = SCTP_MED_V4_OVERHEAD;
9694 /* Are we aborting? */
9695 if (srcv->sinfo_flags & MSG_ABORT) {
9696 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_COOKIE_WAIT) &&
9697 (SCTP_GET_STATE(asoc) != SCTP_STATE_COOKIE_ECHOED)) {
9698 /* It has to be up before we abort */
9699 /* how big is the user initiated abort? */
9701 /* I wonder about doing a MGET without a splnet set.
9702 * it is done that way in the sosend code so I guess
9703 * it is ok :-0
9705 MGETHDR(mm, MB_WAIT, MT_DATA);
9706 if (mm) {
9707 struct sctp_paramhdr *ph;
9709 tot_demand = (tot_out + sizeof(struct sctp_paramhdr));
9710 if (tot_demand > MHLEN) {
9711 if (tot_demand > MCLBYTES) {
9712 /* truncate user data */
9713 tot_demand = MCLBYTES;
9714 tot_out = tot_demand - sizeof(struct sctp_paramhdr);
9716 MCLGET(mm, MB_WAIT);
9717 if ((mm->m_flags & M_EXT) == 0) {
9718 /* truncate further */
9719 tot_demand = MHLEN;
9720 tot_out = tot_demand - sizeof(struct sctp_paramhdr);
9723 /* now move forward the data pointer */
9724 ph = mtod(mm, struct sctp_paramhdr *);
9725 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
9726 ph->param_length = htons((sizeof(struct sctp_paramhdr) + tot_out));
9727 ph++;
9728 mm->m_pkthdr.len = tot_out + sizeof(struct sctp_paramhdr);
9729 mm->m_len = mm->m_pkthdr.len;
9730 error = uiomove((caddr_t)ph, (int)tot_out, uio);
9731 if (error) {
9733 * Here if we can't get his data we
9734 * still abort we just don't get to
9735 * send the users note :-0
9737 sctp_m_freem(mm);
9738 mm = NULL;
9741 ssb_unlock(&so->so_snd);
9742 SOCKBUF_UNLOCK(&so->so_snd);
9743 sctp_abort_an_association(stcb->sctp_ep, stcb,
9744 SCTP_RESPONSE_TO_USER_REQ,
9745 mm);
9746 mm = NULL;
9747 crit_exit();
9748 goto out_notlocked;
9750 crit_exit();
9751 goto release;
9754 /* Now can we send this? */
9755 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
9756 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
9757 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
9758 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
9759 /* got data while shutting down */
9760 error = ECONNRESET;
9761 crit_exit();
9762 goto release;
9764 /* Is the stream no. valid? */
9765 if (srcv->sinfo_stream >= asoc->streamoutcnt) {
9766 /* Invalid stream number */
9767 error = EINVAL;
9768 crit_exit();
9769 goto release;
9771 if (asoc->strmout == NULL) {
9772 /* huh? software error */
9773 #ifdef SCTP_DEBUG
9774 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
9775 kprintf("software error in sctp_copy_it_in\n");
9777 #endif
9778 error = EFAULT;
9779 crit_exit();
9780 goto release;
9782 if ((srcv->sinfo_flags & MSG_EOF) &&
9783 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
9784 (tot_out == 0)) {
9785 crit_exit();
9786 goto zap_by_it_now;
9788 if (tot_out == 0) {
9789 /* not allowed */
9790 error = EMSGSIZE;
9791 crit_exit();
9792 goto release;
9794 /* save off the tag */
9795 my_vtag = asoc->my_vtag;
9796 strq = &asoc->strmout[srcv->sinfo_stream];
9797 /* First lets figure out the "chunking" point */
9798 frag_size = sctp_get_frag_point(stcb, asoc);
9800 /* two choices here, it all fits in one chunk or
9801 * we need multiple chunks.
9803 crit_exit();
9804 SOCKBUF_UNLOCK(&so->so_snd);
9805 if (tot_out <= frag_size) {
9806 /* no need to setup a template */
9807 chk = (struct sctp_tmit_chunk *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_chunk);
9808 if (chk == NULL) {
9809 error = ENOMEM;
9810 SOCKBUF_LOCK(&so->so_snd);
9811 goto release;
9813 sctppcbinfo.ipi_count_chunk++;
9814 sctppcbinfo.ipi_gencnt_chunk++;
9815 asoc->chunks_on_out_queue++;
9816 MGETHDR(mm, MB_WAIT, MT_DATA);
9817 if (mm == NULL) {
9818 error = ENOMEM;
9819 goto clean_up;
9821 error = sctp_copy_one(mm, uio, tot_out, resv_in_first, &mbcnt_e);
9822 if (error)
9823 goto clean_up;
9824 sctp_prepare_chunk(chk, stcb, srcv, strq, net);
9825 chk->mbcnt = mbcnt_e;
9826 mbcnt += mbcnt_e;
9827 mbcnt_e = 0;
9828 mm->m_pkthdr.len = tot_out;
9829 chk->data = mm;
9830 mm = NULL;
9832 /* the actual chunk flags */
9833 chk->rec.data.rcv_flags |= SCTP_DATA_NOT_FRAG;
9834 chk->whoTo->ref_count++;
9836 /* fix up the send_size if it is not present */
9837 chk->send_size = tot_out;
9838 chk->book_size = chk->send_size;
9839 /* ok, we are commited */
9840 if ((srcv->sinfo_flags & MSG_UNORDERED) == 0) {
9841 /* bump the ssn if we are unordered. */
9842 strq->next_sequence_sent++;
9844 if (chk->flags & SCTP_PR_SCTP_BUFFER) {
9845 asoc->sent_queue_cnt_removeable++;
9847 crit_enter();
9848 if ((asoc->state == 0) ||
9849 (my_vtag != asoc->my_vtag) ||
9850 (so != inp->sctp_socket) ||
9851 (inp->sctp_socket == 0)) {
9852 /* connection was aborted */
9853 crit_exit();
9854 error = ECONNRESET;
9855 goto clean_up;
9857 asoc->stream_queue_cnt++;
9858 TAILQ_INSERT_TAIL(&strq->outqueue, chk, sctp_next);
9859 /* now check if this stream is on the wheel */
9860 if ((strq->next_spoke.tqe_next == NULL) &&
9861 (strq->next_spoke.tqe_prev == NULL)) {
9862 /* Insert it on the wheel since it is not
9863 * on it currently
9865 sctp_insert_on_wheel(asoc, strq);
9867 crit_exit();
9868 clean_up:
9869 if (error) {
9870 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
9871 sctppcbinfo.ipi_count_chunk--;
9872 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
9873 panic("Chunk count is negative");
9875 SOCKBUF_LOCK(&so->so_snd);
9876 goto release;
9878 } else {
9879 /* we need to setup a template */
9880 struct sctp_tmit_chunk template;
9881 struct sctpchunk_listhead tmp;
9883 /* setup the template */
9884 sctp_prepare_chunk(&template, stcb, srcv, strq, net);
9886 /* Prepare the temp list */
9887 TAILQ_INIT(&tmp);
9889 /* Template is complete, now time for the work */
9890 while (tot_out > 0) {
9891 /* Get a chunk */
9892 chk = (struct sctp_tmit_chunk *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_chunk);
9893 if (chk == NULL) {
9895 * ok we must spin through and dump anything
9896 * we have allocated and then jump to the
9897 * no_membad
9899 error = ENOMEM;
9901 sctppcbinfo.ipi_count_chunk++;
9902 asoc->chunks_on_out_queue++;
9904 sctppcbinfo.ipi_gencnt_chunk++;
9905 *chk = template;
9906 chk->whoTo->ref_count++;
9907 MGETHDR(chk->data, MB_WAIT, MT_DATA);
9908 if (chk->data == NULL) {
9909 error = ENOMEM;
9910 goto temp_clean_up;
9912 tot_demand = min(tot_out, frag_size);
9913 error = sctp_copy_one(chk->data, uio, tot_demand , resv_in_first, &mbcnt_e);
9914 if (error)
9915 goto temp_clean_up;
9916 /* now fix the chk->send_size */
9917 chk->mbcnt = mbcnt_e;
9918 mbcnt += mbcnt_e;
9919 mbcnt_e = 0;
9920 chk->send_size = tot_demand;
9921 chk->data->m_pkthdr.len = tot_demand;
9922 chk->book_size = chk->send_size;
9923 if (chk->flags & SCTP_PR_SCTP_BUFFER) {
9924 asoc->sent_queue_cnt_removeable++;
9926 TAILQ_INSERT_TAIL(&tmp, chk, sctp_next);
9927 tot_out -= tot_demand;
9929 /* Now the tmp list holds all chunks and data */
9930 if ((srcv->sinfo_flags & MSG_UNORDERED) == 0) {
9931 /* bump the ssn if we are unordered. */
9932 strq->next_sequence_sent++;
9934 /* Mark the first/last flags. This will
9935 * result int a 3 for a single item on the list
9937 chk = TAILQ_FIRST(&tmp);
9938 chk->rec.data.rcv_flags |= SCTP_DATA_FIRST_FRAG;
9939 chk = TAILQ_LAST(&tmp, sctpchunk_listhead);
9940 chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
9942 /* now move it to the streams actual queue */
9943 /* first stop protocol processing */
9944 crit_enter();
9945 if ((asoc->state == 0) ||
9946 (my_vtag != asoc->my_vtag) ||
9947 (so != inp->sctp_socket) ||
9948 (inp->sctp_socket == 0)) {
9949 /* connection was aborted */
9950 crit_exit();
9951 error = ECONNRESET;
9952 goto temp_clean_up;
9954 chk = TAILQ_FIRST(&tmp);
9955 while (chk) {
9956 chk->data->m_nextpkt = 0;
9957 TAILQ_REMOVE(&tmp, chk, sctp_next);
9958 asoc->stream_queue_cnt++;
9959 TAILQ_INSERT_TAIL(&strq->outqueue, chk, sctp_next);
9960 chk = TAILQ_FIRST(&tmp);
9962 /* now check if this stream is on the wheel */
9963 if ((strq->next_spoke.tqe_next == NULL) &&
9964 (strq->next_spoke.tqe_prev == NULL)) {
9965 /* Insert it on the wheel since it is not
9966 * on it currently
9968 sctp_insert_on_wheel(asoc, strq);
9970 /* Ok now we can allow pping */
9971 crit_exit();
9972 temp_clean_up:
9973 if (error) {
9974 SOCKBUF_LOCK(&so->so_snd);
9975 chk = TAILQ_FIRST(&tmp);
9976 while (chk) {
9977 if (chk->data) {
9978 sctp_m_freem(chk->data);
9979 chk->data = NULL;
9981 TAILQ_REMOVE(&tmp, chk, sctp_next);
9982 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
9983 sctppcbinfo.ipi_count_chunk--;
9984 asoc->chunks_on_out_queue--;
9985 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
9986 panic("Chunk count is negative");
9988 sctppcbinfo.ipi_gencnt_chunk++;
9989 chk = TAILQ_FIRST(&tmp);
9991 goto release;
9994 zap_by_it_now:
9995 #ifdef SCTP_MBCNT_LOGGING
9996 sctp_log_mbcnt(SCTP_LOG_MBCNT_INCREASE,
9997 asoc->total_output_queue_size,
9998 dataout,
9999 asoc->total_output_mbuf_queue_size,
10000 mbcnt);
10001 #endif
10002 crit_enter();
10003 SOCKBUF_LOCK(&so->so_snd);
10004 asoc->total_output_queue_size += dataout;
10005 asoc->total_output_mbuf_queue_size += mbcnt;
10006 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
10007 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
10008 so->so_snd.ssb_cc += dataout;
10009 so->so_snd.ssb_mbcnt += mbcnt;
10011 if ((srcv->sinfo_flags & MSG_EOF) &&
10012 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)
10014 int some_on_streamwheel = 0;
10015 error = 0;
10016 if (!TAILQ_EMPTY(&asoc->out_wheel)) {
10017 /* Check to see if some data queued */
10018 struct sctp_stream_out *outs;
10019 TAILQ_FOREACH(outs, &asoc->out_wheel, next_spoke) {
10020 if (!TAILQ_EMPTY(&outs->outqueue)) {
10021 some_on_streamwheel = 1;
10022 break;
10026 if (TAILQ_EMPTY(&asoc->send_queue) &&
10027 TAILQ_EMPTY(&asoc->sent_queue) &&
10028 (some_on_streamwheel == 0)) {
10029 /* there is nothing queued to send, so I'm done... */
10030 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
10031 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
10032 /* only send SHUTDOWN the first time through */
10033 #ifdef SCTP_DEBUG
10034 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
10035 kprintf("%s:%d sends a shutdown\n",
10036 __FILE__,
10037 __LINE__
10040 #endif
10041 sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
10042 asoc->state = SCTP_STATE_SHUTDOWN_SENT;
10043 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
10044 asoc->primary_destination);
10045 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
10046 asoc->primary_destination);
10048 } else {
10050 * we still got (or just got) data to send, so set
10051 * SHUTDOWN_PENDING
10054 * XXX sockets draft says that MSG_EOF should be sent
10055 * with no data. currently, we will allow user data
10056 * to be sent first and move to SHUTDOWN-PENDING
10058 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
10061 crit_exit();
10062 #ifdef SCTP_DEBUG
10063 if (sctp_debug_on & SCTP_DEBUG_OUTPUT2) {
10064 kprintf("++total out:%d total_mbuf_out:%d\n",
10065 (int)asoc->total_output_queue_size,
10066 (int)asoc->total_output_mbuf_queue_size);
10068 #endif
10070 release:
10071 ssb_unlock(&so->so_snd);
10072 out_locked:
10073 SOCKBUF_UNLOCK(&so->so_snd);
10074 out_notlocked:
10075 if (mm)
10076 sctp_m_freem(mm);
10077 return (error);
10082 sctp_sosend(struct socket *so,
10083 #ifdef __NetBSD__
10084 struct mbuf *addr_mbuf,
10085 #else
10086 struct sockaddr *addr,
10087 #endif
10088 struct uio *uio,
10089 struct mbuf *top,
10090 struct mbuf *control,
10091 #if defined(__NetBSD__) || defined(__APPLE__)
10092 int flags
10093 #else
10094 int flags,
10095 #if (defined(__FreeBSD__) && __FreeBSD_version >= 500000) || defined(__DragonFly__)
10096 struct thread *p
10097 #else
10098 struct proc *p
10099 #endif
10100 #endif
10103 unsigned int sndlen;
10104 int error, use_rcvinfo;
10105 int queue_only = 0, queue_only_for_init=0;
10106 int un_sent = 0;
10107 int now_filled=0;
10108 struct sctp_inpcb *inp;
10109 struct sctp_tcb *stcb=NULL;
10110 struct sctp_sndrcvinfo srcv;
10111 struct timeval now;
10112 struct sctp_nets *net;
10113 struct sctp_association *asoc;
10114 struct sctp_inpcb *t_inp;
10115 int create_lock_applied = 0;
10116 #if defined(__APPLE__)
10117 struct proc *p = current_proc();
10118 #elif defined(__NetBSD__)
10119 struct proc *p = curproc; /* XXX */
10120 struct sockaddr *addr = NULL;
10121 if (addr_mbuf)
10122 addr = mtod(addr_mbuf, struct sockaddr *);
10123 #endif
10125 error = use_rcvinfo = 0;
10126 net = NULL;
10127 stcb = NULL;
10128 asoc = NULL;
10129 t_inp = inp = (struct sctp_inpcb *)so->so_pcb;
10130 if (uio)
10131 sndlen = uio->uio_resid;
10132 else
10133 sndlen = top->m_pkthdr.len;
10136 crit_enter();
10138 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
10139 (inp->sctp_flags & SCTP_PCB_FLAGS_ACCEPTING)) {
10140 /* The listner can NOT send */
10141 error = EFAULT;
10142 crit_exit();
10143 goto out;
10145 if (addr) {
10146 SCTP_ASOC_CREATE_LOCK(inp);
10147 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
10148 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
10149 /* Should I really unlock ? */
10150 error = EFAULT;
10151 crit_exit();
10152 goto out;
10155 create_lock_applied = 1;
10156 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
10157 (addr->sa_family == AF_INET6)) {
10158 error = EINVAL;
10159 crit_exit();
10160 goto out;
10163 /* now we must find the assoc */
10164 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
10165 SCTP_INP_RLOCK(inp);
10166 stcb = LIST_FIRST(&inp->sctp_asoc_list);
10167 if (stcb == NULL) {
10168 SCTP_INP_RUNLOCK(inp);
10169 error = ENOTCONN;
10170 crit_exit();
10171 goto out;
10173 SCTP_TCB_LOCK(stcb);
10174 SCTP_INP_RUNLOCK(inp);
10175 net = stcb->asoc.primary_destination;
10177 /* get control */
10178 if (control) {
10179 /* process cmsg snd/rcv info (maybe a assoc-id) */
10180 if (sctp_find_cmsg(SCTP_SNDRCV, (void *)&srcv, control,
10181 sizeof(srcv))) {
10182 /* got one */
10183 if (srcv.sinfo_flags & MSG_SENDALL) {
10184 /* its a sendall */
10185 sctppcbinfo.mbuf_track--;
10186 sctp_m_freem(control);
10188 if (create_lock_applied) {
10189 SCTP_ASOC_CREATE_UNLOCK(inp);
10190 create_lock_applied = 0;
10192 return (sctp_sendall(inp, uio, top, &srcv));
10194 use_rcvinfo = 1;
10197 if (stcb == NULL) {
10198 /* Need to do a lookup */
10199 if (use_rcvinfo && srcv.sinfo_assoc_id) {
10200 stcb = sctp_findassociation_ep_asocid(inp, srcv.sinfo_assoc_id);
10202 * Question: Should I error here if the assoc_id is
10203 * no longer valid? i.e. I can't find it?
10205 if ((stcb) &&
10206 (addr != NULL)) {
10207 /* Must locate the net structure */
10208 net = sctp_findnet(stcb, addr);
10211 if (stcb == NULL) {
10212 if (addr != NULL) {
10213 /* Since we did not use findep we must
10214 * increment it, and if we don't find a
10215 * tcb decrement it.
10217 SCTP_INP_WLOCK(inp);
10218 SCTP_INP_INCR_REF(inp);
10219 SCTP_INP_WUNLOCK(inp);
10220 stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
10221 if (stcb == NULL) {
10222 SCTP_INP_WLOCK(inp);
10223 SCTP_INP_DECR_REF(inp);
10224 SCTP_INP_WUNLOCK(inp);
10229 if ((stcb == NULL) &&
10230 (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)) {
10231 error = ENOTCONN;
10232 crit_exit();
10233 goto out;
10234 } else if ((stcb == NULL) && (addr == NULL)) {
10235 error = ENOENT;
10236 crit_exit();
10237 goto out;
10238 } else if (stcb == NULL) {
10239 /* UDP style, we must go ahead and start the INIT process */
10240 if ((use_rcvinfo) &&
10241 (srcv.sinfo_flags & MSG_ABORT)) {
10242 /* User asks to abort a non-existant asoc */
10243 error = ENOENT;
10244 crit_exit();
10245 goto out;
10247 /* get an asoc/stcb struct */
10248 stcb = sctp_aloc_assoc(inp, addr, 1, &error, 0);
10249 if (stcb == NULL) {
10250 /* Error is setup for us in the call */
10251 crit_exit();
10252 goto out;
10254 if (create_lock_applied) {
10255 SCTP_ASOC_CREATE_UNLOCK(inp);
10256 create_lock_applied = 0;
10257 } else {
10258 kprintf("Huh-3? create lock should have been on??\n");
10260 /* Turn on queue only flag to prevent data from being sent */
10261 queue_only = 1;
10262 asoc = &stcb->asoc;
10263 asoc->state = SCTP_STATE_COOKIE_WAIT;
10264 SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
10265 if (control) {
10266 /* see if a init structure exists in cmsg headers */
10267 struct sctp_initmsg initm;
10268 int i;
10269 if (sctp_find_cmsg(SCTP_INIT, (void *)&initm, control, sizeof(initm))) {
10270 /* we have an INIT override of the default */
10271 if (initm.sinit_max_attempts)
10272 asoc->max_init_times = initm.sinit_max_attempts;
10273 if (initm.sinit_num_ostreams)
10274 asoc->pre_open_streams = initm.sinit_num_ostreams;
10275 if (initm.sinit_max_instreams)
10276 asoc->max_inbound_streams = initm.sinit_max_instreams;
10277 if (initm.sinit_max_init_timeo)
10278 asoc->initial_init_rto_max = initm.sinit_max_init_timeo;
10279 if (asoc->streamoutcnt < asoc->pre_open_streams) {
10280 /* Default is NOT correct */
10281 #ifdef SCTP_DEBUG
10282 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
10283 kprintf("Ok, defout:%d pre_open:%d\n",
10284 asoc->streamoutcnt, asoc->pre_open_streams);
10286 #endif
10287 FREE(asoc->strmout, M_PCB);
10288 asoc->strmout = NULL;
10289 asoc->streamoutcnt = asoc->pre_open_streams;
10291 /* What happesn if this fails? .. we panic ...*/
10292 MALLOC(asoc->strmout,
10293 struct sctp_stream_out *,
10294 asoc->streamoutcnt *
10295 sizeof(struct sctp_stream_out),
10296 M_PCB, MB_WAIT);
10297 for (i = 0; i < asoc->streamoutcnt; i++) {
10299 * inbound side must be set to 0xffff,
10300 * also NOTE when we get the INIT-ACK
10301 * back (for INIT sender) we MUST
10302 * reduce the count (streamoutcnt) but
10303 * first check if we sent to any of the
10304 * upper streams that were dropped (if
10305 * some were). Those that were dropped
10306 * must be notified to the upper layer
10307 * as failed to send.
10309 asoc->strmout[i].next_sequence_sent = 0x0;
10310 TAILQ_INIT(&asoc->strmout[i].outqueue);
10311 asoc->strmout[i].stream_no = i;
10312 asoc->strmout[i].next_spoke.tqe_next = 0;
10313 asoc->strmout[i].next_spoke.tqe_prev = 0;
10319 /* out with the INIT */
10320 queue_only_for_init = 1;
10321 sctp_send_initiate(inp, stcb);
10323 * we may want to dig in after this call and adjust the MTU
10324 * value. It defaulted to 1500 (constant) but the ro structure
10325 * may now have an update and thus we may need to change it
10326 * BEFORE we append the message.
10328 net = stcb->asoc.primary_destination;
10329 asoc = &stcb->asoc;
10330 } else {
10331 asoc = &stcb->asoc;
10333 if (create_lock_applied) {
10334 SCTP_ASOC_CREATE_UNLOCK(inp);
10335 create_lock_applied = 0;
10337 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
10338 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
10339 queue_only = 1;
10341 if (use_rcvinfo == 0) {
10342 /* Grab the default stuff from the asoc */
10343 srcv = stcb->asoc.def_send;
10345 /* we are now done with all control */
10346 if (control) {
10347 sctp_m_freem(control);
10348 control = NULL;
10351 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
10352 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
10353 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
10354 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
10355 if ((use_rcvinfo) &&
10356 (srcv.sinfo_flags & MSG_ABORT)) {
10358 } else {
10359 error = ECONNRESET;
10360 crit_exit();
10361 goto out;
10364 /* Ok, we will attempt a msgsnd :> */
10365 if (p)
10366 #if (defined(__FreeBSD__) && __FreeBSD_version >= 500000) || defined(__DragonFly__)
10367 p->td_lwp->lwp_ru.ru_msgsnd++;
10368 #else
10369 p->p_stats->p_ru.ru_msgsnd++;
10370 #endif
10372 if (stcb) {
10373 if (net && ((srcv.sinfo_flags & MSG_ADDR_OVER))) {
10374 /* we take the override or the unconfirmed */
10376 } else {
10377 net = stcb->asoc.primary_destination;
10381 if (top == NULL) {
10382 /* Must copy it all in from user land. The
10383 * socket buf is locked but we don't suspend
10384 * protocol processing until we are ready to
10385 * send/queue it.
10387 crit_exit();
10388 error = sctp_copy_it_in(inp, stcb, asoc, net, &srcv, uio, flags);
10389 if (error)
10390 goto out;
10391 } else {
10392 /* Here we must either pull in the user data to chunk
10393 * buffers, or use top to do a msg_append.
10395 error = sctp_msg_append(stcb, net, top, &srcv, flags);
10396 crit_exit();
10397 if (error)
10398 goto out;
10399 /* zap the top since it is now being used */
10400 top = 0;
10403 if (net->flight_size > net->cwnd) {
10404 sctp_pegs[SCTP_SENDTO_FULL_CWND]++;
10405 queue_only = 1;
10407 } else if (asoc->ifp_had_enobuf) {
10408 sctp_pegs[SCTP_QUEONLY_BURSTLMT]++;
10409 queue_only = 1;
10410 } else {
10411 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
10412 ((stcb->asoc.chunks_on_out_queue - stcb->asoc.total_flight_count) * sizeof(struct sctp_data_chunk)) +
10413 SCTP_MED_OVERHEAD);
10415 if (((inp->sctp_flags & SCTP_PCB_FLAGS_NODELAY) == 0) &&
10416 (stcb->asoc.total_flight > 0) &&
10417 (un_sent < (int)stcb->asoc.smallest_mtu)) {
10419 /* Ok, Nagle is set on and we have data outstanding. Don't
10420 * send anything and let SACKs drive out the data unless we
10421 * have a "full" segment to send.
10423 sctp_pegs[SCTP_NAGLE_NOQ]++;
10424 queue_only = 1;
10425 } else {
10426 sctp_pegs[SCTP_NAGLE_OFF]++;
10429 if (queue_only_for_init) {
10430 /* It is possible to have a turn around of the
10431 * INIT/INIT-ACK/COOKIE before I have a chance to
10432 * copy in the data. In such a case I DO want to
10433 * send it out by reversing the queue only flag.
10435 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_COOKIE_WAIT) ||
10436 (SCTP_GET_STATE(asoc) != SCTP_STATE_COOKIE_ECHOED)) {
10437 /* yep, reverse it */
10438 queue_only = 0;
10442 if ((queue_only == 0) && (stcb->asoc.peers_rwnd && un_sent)) {
10443 /* we can attempt to send too.*/
10444 #ifdef SCTP_DEBUG
10445 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
10446 kprintf("USR Send calls sctp_chunk_output\n");
10448 #endif
10449 crit_enter();
10450 sctp_pegs[SCTP_OUTPUT_FRM_SND]++;
10451 sctp_chunk_output(inp, stcb, 0);
10452 crit_exit();
10453 } else if ((queue_only == 0) &&
10454 (stcb->asoc.peers_rwnd == 0) &&
10455 (stcb->asoc.total_flight == 0)) {
10456 /* We get to have a probe outstanding */
10457 crit_enter();
10458 sctp_from_user_send = 1;
10459 sctp_chunk_output(inp, stcb, 0);
10460 sctp_from_user_send = 0;
10461 crit_exit();
10463 } else if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
10464 int num_out, reason, cwnd_full;
10465 /* Here we do control only */
10466 crit_enter();
10467 sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out,
10468 &reason, 1, &cwnd_full, 1, &now, &now_filled);
10469 crit_exit();
10471 #ifdef SCTP_DEBUG
10472 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
10473 kprintf("USR Send complete qo:%d prw:%d unsent:%d tf:%d cooq:%d toqs:%d \n",
10474 queue_only, stcb->asoc.peers_rwnd, un_sent,
10475 stcb->asoc.total_flight, stcb->asoc.chunks_on_out_queue,
10476 stcb->asoc.total_output_queue_size);
10478 #endif
10479 out:
10480 if (create_lock_applied) {
10481 SCTP_ASOC_CREATE_UNLOCK(inp);
10482 create_lock_applied = 0;
10484 if (stcb)
10485 SCTP_TCB_UNLOCK(stcb);
10486 if (top)
10487 sctp_m_freem(top);
10488 if (control)
10489 sctp_m_freem(control);
10490 return (error);