1 /* $KAME: sctp_output.c,v 1.46 2005/03/06 16:04:17 itojun Exp $ */
2 /* $DragonFly: src/sys/netinet/sctp_output.c,v 1.12 2007/11/10 17:55:25 swildner Exp $ */
5 * Copyright (C) 2002, 2003, 2004 Cisco Systems Inc,
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the project nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 #if !(defined(__OpenBSD__) || defined (__APPLE__))
34 #include "opt_ipsec.h"
36 #if defined(__FreeBSD__) || defined(__DragonFly__)
37 #include "opt_compat.h"
38 #include "opt_inet6.h"
41 #if defined(__NetBSD__)
46 #elif !defined(__OpenBSD__)
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/malloc.h>
54 #include <sys/domain.h>
56 #include <sys/protosw.h>
57 #include <sys/socket.h>
58 #include <sys/socketvar.h>
60 #include <sys/kernel.h>
61 #include <sys/sysctl.h>
62 #include <sys/resourcevar.h>
65 #include <sys/domain.h>
67 #include <sys/thread2.h>
68 #include <sys/socketvar2.h>
70 #if (defined(__FreeBSD__) && __FreeBSD_version >= 500000)
71 #include <sys/limits.h>
73 #include <machine/limits.h>
75 #include <machine/cpu.h>
78 #include <net/if_types.h>
80 #if defined(__FreeBSD__) || defined(__DragonFly__)
81 #include <net/if_var.h>
84 #include <net/route.h>
86 #include <netinet/in.h>
87 #include <netinet/in_systm.h>
88 #include <netinet/ip.h>
89 #include <netinet/in_pcb.h>
90 #include <netinet/in_var.h>
91 #include <netinet/ip_var.h>
94 #include <netinet/ip6.h>
95 #include <netinet6/ip6_var.h>
96 #include <netinet6/scope6_var.h>
97 #include <netinet6/nd6.h>
99 #if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__DragonFly__)
100 #include <netinet6/in6_pcb.h>
101 #elif defined(__OpenBSD__)
102 #include <netinet/in_pcb.h>
105 #include <netinet/icmp6.h>
109 #include <net/net_osdep.h>
111 #if defined(HAVE_NRL_INPCB) || defined(__FreeBSD__) || defined(__DragonFly__)
117 #include <netinet/sctp_pcb.h>
121 #include <netinet6/ipsec.h>
122 #include <netproto/key/key.h>
128 #include <netinet/sctp_var.h>
129 #include <netinet/sctp_header.h>
130 #include <netinet/sctputil.h>
131 #include <netinet/sctp_pcb.h>
132 #include <netinet/sctp_output.h>
133 #include <netinet/sctp_uio.h>
134 #include <netinet/sctputil.h>
135 #include <netinet/sctp_hashdriver.h>
136 #include <netinet/sctp_timer.h>
137 #include <netinet/sctp_asconf.h>
138 #include <netinet/sctp_indata.h>
141 extern uint32_t sctp_debug_on
;
144 extern int sctp_peer_chunk_oh
;
147 sctp_find_cmsg(int c_type
, void *data
, struct mbuf
*control
, int cpsize
)
152 tlen
= control
->m_len
;
155 * Independent of how many mbufs, find the c_type inside the control
156 * structure and copy out the data.
159 if ((tlen
-at
) < (int)CMSG_ALIGN(sizeof(cmh
))) {
160 /* not enough room for one more we are done. */
163 m_copydata(control
, at
, sizeof(cmh
), (caddr_t
)&cmh
);
164 if ((cmh
.cmsg_len
+ at
) > tlen
) {
166 * this is real messed up since there is not enough
167 * data here to cover the cmsg header. We are done.
171 if ((cmh
.cmsg_level
== IPPROTO_SCTP
) &&
172 (c_type
== cmh
.cmsg_type
)) {
173 /* found the one we want, copy it out */
174 at
+= CMSG_ALIGN(sizeof(struct cmsghdr
));
175 if ((int)(cmh
.cmsg_len
- CMSG_ALIGN(sizeof(struct cmsghdr
))) < cpsize
) {
177 * space of cmsg_len after header not
182 m_copydata(control
, at
, cpsize
, data
);
185 at
+= CMSG_ALIGN(cmh
.cmsg_len
);
186 if (cmh
.cmsg_len
== 0) {
196 sctp_add_addr_to_mbuf(struct mbuf
*m
, struct ifaddr
*ifa
)
198 struct sctp_paramhdr
*parmh
;
201 if (ifa
->ifa_addr
->sa_family
== AF_INET
) {
202 len
= sizeof(struct sctp_ipv4addr_param
);
203 } else if (ifa
->ifa_addr
->sa_family
== AF_INET6
) {
204 len
= sizeof(struct sctp_ipv6addr_param
);
210 if (M_TRAILINGSPACE(m
) >= len
) {
211 /* easy side we just drop it on the end */
212 parmh
= (struct sctp_paramhdr
*)(m
->m_data
+ m
->m_len
);
215 /* Need more space */
217 while (mret
->m_next
!= NULL
) {
220 MGET(mret
->m_next
, MB_DONTWAIT
, MT_DATA
);
221 if (mret
->m_next
== NULL
) {
222 /* We are hosed, can't add more addresses */
226 parmh
= mtod(mret
, struct sctp_paramhdr
*);
228 /* now add the parameter */
229 if (ifa
->ifa_addr
->sa_family
== AF_INET
) {
230 struct sctp_ipv4addr_param
*ipv4p
;
231 struct sockaddr_in
*sin
;
232 sin
= (struct sockaddr_in
*)ifa
->ifa_addr
;
233 ipv4p
= (struct sctp_ipv4addr_param
*)parmh
;
234 parmh
->param_type
= htons(SCTP_IPV4_ADDRESS
);
235 parmh
->param_length
= htons(len
);
236 ipv4p
->addr
= sin
->sin_addr
.s_addr
;
238 } else if (ifa
->ifa_addr
->sa_family
== AF_INET6
) {
239 struct sctp_ipv6addr_param
*ipv6p
;
240 struct sockaddr_in6
*sin6
;
241 sin6
= (struct sockaddr_in6
*)ifa
->ifa_addr
;
242 ipv6p
= (struct sctp_ipv6addr_param
*)parmh
;
243 parmh
->param_type
= htons(SCTP_IPV6_ADDRESS
);
244 parmh
->param_length
= htons(len
);
245 memcpy(ipv6p
->addr
, &sin6
->sin6_addr
,
246 sizeof(ipv6p
->addr
));
247 /* clear embedded scope in the address */
248 in6_clearscope((struct in6_addr
*)ipv6p
->addr
);
259 sctp_add_cookie(struct sctp_inpcb
*inp
, struct mbuf
*init
, int init_offset
,
260 struct mbuf
*initack
, int initack_offset
, struct sctp_state_cookie
*stc_in
)
262 struct mbuf
*copy_init
, *copy_initack
, *m_at
, *sig
, *mret
;
263 struct sctp_state_cookie
*stc
;
264 struct sctp_paramhdr
*ph
;
271 MGET(mret
, MB_DONTWAIT
, MT_DATA
);
275 copy_init
= sctp_m_copym(init
, init_offset
, M_COPYALL
, MB_DONTWAIT
);
276 if (copy_init
== NULL
) {
280 copy_initack
= sctp_m_copym(initack
, initack_offset
, M_COPYALL
,
282 if (copy_initack
== NULL
) {
284 sctp_m_freem(copy_init
);
287 /* easy side we just drop it on the end */
288 ph
= mtod(mret
, struct sctp_paramhdr
*);
289 mret
->m_len
= sizeof(struct sctp_state_cookie
) +
290 sizeof(struct sctp_paramhdr
);
291 stc
= (struct sctp_state_cookie
*)((caddr_t
)ph
+
292 sizeof(struct sctp_paramhdr
));
293 ph
->param_type
= htons(SCTP_STATE_COOKIE
);
294 ph
->param_length
= 0; /* fill in at the end */
295 /* Fill in the stc cookie data */
298 /* tack the INIT and then the INIT-ACK onto the chain */
301 for (m_at
= mret
; m_at
; m_at
= m_at
->m_next
) {
302 cookie_sz
+= m_at
->m_len
;
303 if (m_at
->m_next
== NULL
) {
304 m_at
->m_next
= copy_init
;
309 for (m_at
= copy_init
; m_at
; m_at
= m_at
->m_next
) {
310 cookie_sz
+= m_at
->m_len
;
311 if (m_at
->m_next
== NULL
) {
312 m_at
->m_next
= copy_initack
;
317 for (m_at
= copy_initack
; m_at
; m_at
= m_at
->m_next
) {
318 cookie_sz
+= m_at
->m_len
;
319 if (m_at
->m_next
== NULL
) {
323 MGET(sig
, MB_DONTWAIT
, MT_DATA
);
327 sctp_m_freem(copy_init
);
328 sctp_m_freem(copy_initack
);
334 signature
= (uint8_t *)(mtod(sig
, caddr_t
) + sig_offset
);
335 /* Time to sign the cookie */
336 sctp_hash_digest_m((char *)inp
->sctp_ep
.secret_key
[
337 (int)(inp
->sctp_ep
.current_secret_number
)],
338 SCTP_SECRET_SIZE
, mret
, sizeof(struct sctp_paramhdr
),
339 (uint8_t *)signature
);
340 sig
->m_len
+= SCTP_SIGNATURE_SIZE
;
341 cookie_sz
+= SCTP_SIGNATURE_SIZE
;
343 ph
->param_length
= htons(cookie_sz
);
348 static struct sockaddr_in
*
349 sctp_is_v4_ifa_addr_prefered (struct ifaddr
*ifa
, uint8_t loopscope
, uint8_t ipv4_scope
, uint8_t *sin_loop
, uint8_t *sin_local
)
351 struct sockaddr_in
*sin
;
353 * Here we determine if its a prefered address. A
354 * prefered address means it is the same scope or
355 * higher scope then the destination.
356 * L = loopback, P = private, G = global
357 * -----------------------------------------
358 * src | dest | result
359 *-----------------------------------------
361 *-----------------------------------------
363 *-----------------------------------------
365 *-----------------------------------------
367 *-----------------------------------------
369 *-----------------------------------------
371 *-----------------------------------------
373 *-----------------------------------------
375 *-----------------------------------------
377 *-----------------------------------------
380 if (ifa
->ifa_addr
->sa_family
!= AF_INET
) {
384 /* Ok the address may be ok */
385 sin
= (struct sockaddr_in
*)ifa
->ifa_addr
;
386 if (sin
->sin_addr
.s_addr
== 0) {
389 *sin_local
= *sin_loop
= 0;
390 if ((ifa
->ifa_ifp
->if_type
== IFT_LOOP
) ||
391 (IN4_ISLOOPBACK_ADDRESS(&sin
->sin_addr
))) {
395 if ((IN4_ISPRIVATE_ADDRESS(&sin
->sin_addr
))) {
398 if (!loopscope
&& *sin_loop
) {
399 /* Its a loopback address and we don't have loop scope */
402 if (!ipv4_scope
&& *sin_local
) {
403 /* Its a private address, and we don't have private address scope */
406 if (((ipv4_scope
== 0) && (loopscope
== 0)) && (*sin_local
)) {
407 /* its a global src and a private dest */
410 /* its a prefered address */
414 static struct sockaddr_in
*
415 sctp_is_v4_ifa_addr_acceptable (struct ifaddr
*ifa
, uint8_t loopscope
, uint8_t ipv4_scope
, uint8_t *sin_loop
, uint8_t *sin_local
)
417 struct sockaddr_in
*sin
;
419 * Here we determine if its a acceptable address. A
420 * acceptable address means it is the same scope or
421 * higher scope but we can allow for NAT which means
422 * its ok to have a global dest and a private src.
424 * L = loopback, P = private, G = global
425 * -----------------------------------------
426 * src | dest | result
427 *-----------------------------------------
429 *-----------------------------------------
431 *-----------------------------------------
433 *-----------------------------------------
435 *-----------------------------------------
437 *-----------------------------------------
438 * G | P | yes - probably this won't work.
439 *-----------------------------------------
441 *-----------------------------------------
443 *-----------------------------------------
445 *-----------------------------------------
448 if (ifa
->ifa_addr
->sa_family
!= AF_INET
) {
452 /* Ok the address may be ok */
453 sin
= (struct sockaddr_in
*)ifa
->ifa_addr
;
454 if (sin
->sin_addr
.s_addr
== 0) {
457 *sin_local
= *sin_loop
= 0;
458 if ((ifa
->ifa_ifp
->if_type
== IFT_LOOP
) ||
459 (IN4_ISLOOPBACK_ADDRESS(&sin
->sin_addr
))) {
463 if ((IN4_ISPRIVATE_ADDRESS(&sin
->sin_addr
))) {
466 if (!loopscope
&& *sin_loop
) {
467 /* Its a loopback address and we don't have loop scope */
470 /* its an acceptable address */
475 * This treats the address list on the ep as a restricted list
476 * (negative list). If a the passed address is listed, then
477 * the address is NOT allowed on the association.
480 sctp_is_addr_restricted(struct sctp_tcb
*stcb
, struct sockaddr
*addr
)
482 struct sctp_laddr
*laddr
;
487 /* There are no restrictions, no TCB :-) */
491 LIST_FOREACH(laddr
, &stcb
->asoc
.sctp_local_addr_list
, sctp_nxt_addr
) {
494 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT4
) {
495 kprintf("There are %d addresses on the restricted list\n", cnt
);
499 LIST_FOREACH(laddr
, &stcb
->asoc
.sctp_local_addr_list
, sctp_nxt_addr
) {
500 if (laddr
->ifa
== NULL
) {
502 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
503 kprintf("Help I have fallen and I can't get up!\n");
509 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT4
) {
511 kprintf("Restricted address[%d]:", cnt
);
512 sctp_print_address(laddr
->ifa
->ifa_addr
);
515 if (sctp_cmpaddr(addr
, laddr
->ifa
->ifa_addr
) == 1) {
516 /* Yes it is on the list */
524 sctp_is_addr_in_ep(struct sctp_inpcb
*inp
, struct ifaddr
*ifa
)
526 struct sctp_laddr
*laddr
;
530 LIST_FOREACH(laddr
, &inp
->sctp_addr_list
, sctp_nxt_addr
) {
531 if (laddr
->ifa
== NULL
) {
533 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
534 kprintf("Help I have fallen and I can't get up!\n");
539 if (laddr
->ifa
->ifa_addr
== NULL
)
541 if (laddr
->ifa
== ifa
)
544 if (laddr
->ifa
->ifa_addr
->sa_family
!= ifa
->ifa_addr
->sa_family
) {
545 /* skip non compatible address comparison */
548 if (sctp_cmpaddr(ifa
->ifa_addr
, laddr
->ifa
->ifa_addr
) == 1) {
549 /* Yes it is restricted */
558 static struct in_addr
559 sctp_choose_v4_boundspecific_inp(struct sctp_inpcb
*inp
,
565 struct sctp_laddr
*laddr
;
566 struct sockaddr_in
*sin
;
569 uint8_t sin_loop
, sin_local
;
571 /* first question, is the ifn we will emit on
572 * in our list, if so, we want that one.
576 /* is a prefered one on the interface we route out? */
577 TAILQ_FOREACH(ifa
, &ifn
->if_addrlist
, ifa_list
) {
578 sin
= sctp_is_v4_ifa_addr_prefered (ifa
, loopscope
, ipv4_scope
, &sin_loop
, &sin_local
);
581 if (sctp_is_addr_in_ep(inp
, ifa
)) {
582 return (sin
->sin_addr
);
585 /* is an acceptable one on the interface we route out? */
586 TAILQ_FOREACH(ifa
, &ifn
->if_addrlist
, ifa_list
) {
587 sin
= sctp_is_v4_ifa_addr_acceptable (ifa
, loopscope
, ipv4_scope
, &sin_loop
, &sin_local
);
590 if (sctp_is_addr_in_ep(inp
, ifa
)) {
591 return (sin
->sin_addr
);
595 /* ok, what about a prefered address in the inp */
596 for (laddr
= LIST_FIRST(&inp
->sctp_addr_list
);
597 laddr
&& (laddr
!= inp
->next_addr_touse
);
598 laddr
= LIST_NEXT(laddr
, sctp_nxt_addr
)) {
599 if (laddr
->ifa
== NULL
) {
600 /* address has been removed */
603 sin
= sctp_is_v4_ifa_addr_prefered (laddr
->ifa
, loopscope
, ipv4_scope
, &sin_loop
, &sin_local
);
606 return (sin
->sin_addr
);
609 /* ok, what about an acceptable address in the inp */
610 for (laddr
= LIST_FIRST(&inp
->sctp_addr_list
);
611 laddr
&& (laddr
!= inp
->next_addr_touse
);
612 laddr
= LIST_NEXT(laddr
, sctp_nxt_addr
)) {
613 if (laddr
->ifa
== NULL
) {
614 /* address has been removed */
617 sin
= sctp_is_v4_ifa_addr_acceptable (laddr
->ifa
, loopscope
, ipv4_scope
, &sin_loop
, &sin_local
);
620 return (sin
->sin_addr
);
624 /* no address bound can be a source for the destination we are in trouble */
626 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
627 kprintf("Src address selection for EP, no acceptable src address found for address\n");
630 memset(&ans
, 0, sizeof(ans
));
636 static struct in_addr
637 sctp_choose_v4_boundspecific_stcb(struct sctp_inpcb
*inp
,
638 struct sctp_tcb
*stcb
,
639 struct sctp_nets
*net
,
643 int non_asoc_addr_ok
)
646 * Here we have two cases, bound all asconf
647 * allowed. bound all asconf not allowed.
650 struct sctp_laddr
*laddr
, *starting_point
;
654 uint8_t sin_loop
, sin_local
, start_at_beginning
=0;
655 struct sockaddr_in
*sin
;
657 /* first question, is the ifn we will emit on
658 * in our list, if so, we want that one.
662 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_DO_ASCONF
) {
664 * Here we use the list of addresses on the endpoint. Then
665 * the addresses listed on the "restricted" list is just that,
666 * address that have not been added and can't be used (unless
667 * the non_asoc_addr_ok is set).
670 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
671 kprintf("Have a STCB - asconf allowed, not bound all have a netgative list\n");
674 /* first question, is the ifn we will emit on
675 * in our list, if so, we want that one.
678 /* first try for an prefered address on the ep */
679 TAILQ_FOREACH(ifa
, &ifn
->if_addrlist
, ifa_list
) {
680 if (sctp_is_addr_in_ep(inp
, ifa
)) {
681 sin
= sctp_is_v4_ifa_addr_prefered (ifa
, loopscope
, ipv4_scope
, &sin_loop
, &sin_local
);
684 if ((non_asoc_addr_ok
== 0) &&
685 (sctp_is_addr_restricted(stcb
, (struct sockaddr
*)sin
))) {
686 /* on the no-no list */
689 return (sin
->sin_addr
);
692 /* next try for an acceptable address on the ep */
693 TAILQ_FOREACH(ifa
, &ifn
->if_addrlist
, ifa_list
) {
694 if (sctp_is_addr_in_ep(inp
, ifa
)) {
695 sin
= sctp_is_v4_ifa_addr_acceptable (ifa
, loopscope
, ipv4_scope
, &sin_loop
, &sin_local
);
698 if ((non_asoc_addr_ok
== 0) &&
699 (sctp_is_addr_restricted(stcb
, (struct sockaddr
*)sin
))) {
700 /* on the no-no list */
703 return (sin
->sin_addr
);
708 /* if we can't find one like that then we must
709 * look at all addresses bound to pick one at
710 * first prefereable then secondly acceptable.
712 starting_point
= stcb
->asoc
.last_used_address
;
714 if (stcb
->asoc
.last_used_address
== NULL
) {
715 start_at_beginning
=1;
716 stcb
->asoc
.last_used_address
= LIST_FIRST(&inp
->sctp_addr_list
);
718 /* search beginning with the last used address */
719 for (laddr
= stcb
->asoc
.last_used_address
; laddr
;
720 laddr
= LIST_NEXT(laddr
, sctp_nxt_addr
)) {
721 if (laddr
->ifa
== NULL
) {
722 /* address has been removed */
725 sin
= sctp_is_v4_ifa_addr_prefered (laddr
->ifa
, loopscope
, ipv4_scope
, &sin_loop
, &sin_local
);
728 if ((non_asoc_addr_ok
== 0) &&
729 (sctp_is_addr_restricted(stcb
, (struct sockaddr
*)sin
))) {
730 /* on the no-no list */
733 return (sin
->sin_addr
);
736 if (start_at_beginning
== 0) {
737 stcb
->asoc
.last_used_address
= NULL
;
738 goto sctpv4_from_the_top
;
740 /* now try for any higher scope than the destination */
741 stcb
->asoc
.last_used_address
= starting_point
;
742 start_at_beginning
= 0;
743 sctpv4_from_the_top2
:
744 if (stcb
->asoc
.last_used_address
== NULL
) {
745 start_at_beginning
=1;
746 stcb
->asoc
.last_used_address
= LIST_FIRST(&inp
->sctp_addr_list
);
748 /* search beginning with the last used address */
749 for (laddr
= stcb
->asoc
.last_used_address
; laddr
;
750 laddr
= LIST_NEXT(laddr
, sctp_nxt_addr
)) {
751 if (laddr
->ifa
== NULL
) {
752 /* address has been removed */
755 sin
= sctp_is_v4_ifa_addr_acceptable (laddr
->ifa
, loopscope
, ipv4_scope
, &sin_loop
, &sin_local
);
758 if ((non_asoc_addr_ok
== 0) &&
759 (sctp_is_addr_restricted(stcb
, (struct sockaddr
*)sin
))) {
760 /* on the no-no list */
763 return (sin
->sin_addr
);
765 if (start_at_beginning
== 0) {
766 stcb
->asoc
.last_used_address
= NULL
;
767 goto sctpv4_from_the_top2
;
771 * Here we have an address list on the association, thats the
772 * only valid source addresses that we can use.
775 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
776 kprintf("Have a STCB - no asconf allowed, not bound all have a postive list\n");
779 /* First look at all addresses for one that is on
780 * the interface we route out
782 LIST_FOREACH(laddr
, &stcb
->asoc
.sctp_local_addr_list
,
784 if (laddr
->ifa
== NULL
) {
785 /* address has been removed */
788 sin
= sctp_is_v4_ifa_addr_prefered (laddr
->ifa
, loopscope
, ipv4_scope
, &sin_loop
, &sin_local
);
791 /* first question, is laddr->ifa an address associated with the emit interface */
793 TAILQ_FOREACH(ifa
, &ifn
->if_addrlist
, ifa_list
) {
794 if (laddr
->ifa
== ifa
) {
795 sin
= (struct sockaddr_in
*)laddr
->ifa
->ifa_addr
;
796 return (sin
->sin_addr
);
798 if (sctp_cmpaddr(ifa
->ifa_addr
, laddr
->ifa
->ifa_addr
) == 1) {
799 sin
= (struct sockaddr_in
*)laddr
->ifa
->ifa_addr
;
800 return (sin
->sin_addr
);
805 /* what about an acceptable one on the interface? */
806 LIST_FOREACH(laddr
, &stcb
->asoc
.sctp_local_addr_list
,
808 if (laddr
->ifa
== NULL
) {
809 /* address has been removed */
812 sin
= sctp_is_v4_ifa_addr_acceptable (laddr
->ifa
, loopscope
, ipv4_scope
, &sin_loop
, &sin_local
);
815 /* first question, is laddr->ifa an address associated with the emit interface */
817 TAILQ_FOREACH(ifa
, &ifn
->if_addrlist
, ifa_list
) {
818 if (laddr
->ifa
== ifa
) {
819 sin
= (struct sockaddr_in
*)laddr
->ifa
->ifa_addr
;
820 return (sin
->sin_addr
);
822 if (sctp_cmpaddr(ifa
->ifa_addr
, laddr
->ifa
->ifa_addr
) == 1) {
823 sin
= (struct sockaddr_in
*)laddr
->ifa
->ifa_addr
;
824 return (sin
->sin_addr
);
829 /* ok, next one that is preferable in general */
830 LIST_FOREACH(laddr
, &stcb
->asoc
.sctp_local_addr_list
,
832 if (laddr
->ifa
== NULL
) {
833 /* address has been removed */
836 sin
= sctp_is_v4_ifa_addr_prefered (laddr
->ifa
, loopscope
, ipv4_scope
, &sin_loop
, &sin_local
);
839 return (sin
->sin_addr
);
842 /* last, what about one that is acceptable */
843 LIST_FOREACH(laddr
, &stcb
->asoc
.sctp_local_addr_list
,
845 if (laddr
->ifa
== NULL
) {
846 /* address has been removed */
849 sin
= sctp_is_v4_ifa_addr_acceptable (laddr
->ifa
, loopscope
, ipv4_scope
, &sin_loop
, &sin_local
);
852 return (sin
->sin_addr
);
855 memset(&ans
, 0, sizeof(ans
));
859 static struct sockaddr_in
*
860 sctp_select_v4_nth_prefered_addr_from_ifn_boundall (struct ifnet
*ifn
, struct sctp_tcb
*stcb
, int non_asoc_addr_ok
,
861 uint8_t loopscope
, uint8_t ipv4_scope
, int cur_addr_num
)
864 struct sockaddr_in
*sin
;
865 uint8_t sin_loop
, sin_local
;
866 int num_eligible_addr
= 0;
867 TAILQ_FOREACH(ifa
, &ifn
->if_addrlist
, ifa_list
) {
868 sin
= sctp_is_v4_ifa_addr_prefered (ifa
, loopscope
, ipv4_scope
, &sin_loop
, &sin_local
);
872 if ((non_asoc_addr_ok
== 0) && sctp_is_addr_restricted(stcb
, (struct sockaddr
*)sin
)) {
873 /* It is restricted for some reason.. probably
879 if (cur_addr_num
== num_eligible_addr
) {
888 sctp_count_v4_num_prefered_boundall (struct ifnet
*ifn
, struct sctp_tcb
*stcb
, int non_asoc_addr_ok
,
889 uint8_t loopscope
, uint8_t ipv4_scope
, uint8_t *sin_loop
, uint8_t *sin_local
)
892 struct sockaddr_in
*sin
;
893 int num_eligible_addr
= 0;
895 TAILQ_FOREACH(ifa
, &ifn
->if_addrlist
, ifa_list
) {
896 sin
= sctp_is_v4_ifa_addr_prefered (ifa
, loopscope
, ipv4_scope
, sin_loop
, sin_local
);
900 if ((non_asoc_addr_ok
== 0) && sctp_is_addr_restricted(stcb
, (struct sockaddr
*)sin
)) {
901 /* It is restricted for some reason.. probably
909 return (num_eligible_addr
);
913 static struct in_addr
914 sctp_choose_v4_boundall(struct sctp_inpcb
*inp
,
915 struct sctp_tcb
*stcb
,
916 struct sctp_nets
*net
,
920 int non_asoc_addr_ok
)
922 int cur_addr_num
=0, num_prefered
=0;
923 uint8_t sin_loop
, sin_local
;
925 struct sockaddr_in
*sin
;
929 * For v4 we can use (in boundall) any address in the association. If
930 * non_asoc_addr_ok is set we can use any address (at least in theory).
931 * So we look for prefered addresses first. If we find one, we use it.
932 * Otherwise we next try to get an address on the interface, which we
933 * should be able to do (unless non_asoc_addr_ok is false and we are
934 * routed out that way). In these cases where we can't use the address
935 * of the interface we go through all the ifn's looking for an address
936 * we can use and fill that in. Punting means we send back address
937 * 0, which will probably cause problems actually since then IP will
938 * fill in the address of the route ifn, which means we probably already
939 * rejected it.. i.e. here comes an abort :-<.
943 cur_addr_num
= net
->indx_of_eligible_next_to_use
;
946 goto bound_all_v4_plan_c
;
948 num_prefered
= sctp_count_v4_num_prefered_boundall (ifn
, stcb
, non_asoc_addr_ok
, loopscope
, ipv4_scope
, &sin_loop
, &sin_local
);
950 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
951 kprintf("Found %d prefered source addresses\n", num_prefered
);
954 if (num_prefered
== 0) {
955 /* no eligible addresses, we must use some other
956 * interface address if we can find one.
958 goto bound_all_v4_plan_b
;
960 /* Ok we have num_eligible_addr set with how many we can use,
961 * this may vary from call to call due to addresses being deprecated etc..
963 if (cur_addr_num
>= num_prefered
) {
966 /* select the nth address from the list (where cur_addr_num is the nth) and
967 * 0 is the first one, 1 is the second one etc...
970 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
971 kprintf("cur_addr_num:%d\n", cur_addr_num
);
974 sin
= sctp_select_v4_nth_prefered_addr_from_ifn_boundall (ifn
, stcb
, non_asoc_addr_ok
, loopscope
,
975 ipv4_scope
, cur_addr_num
);
977 /* if sin is NULL something changed??, plan_a now */
979 return (sin
->sin_addr
);
983 * plan_b: Look at the interface that we emit on
984 * and see if we can find an acceptable address.
987 TAILQ_FOREACH(ifa
, &ifn
->if_addrlist
, ifa_list
) {
988 sin
= sctp_is_v4_ifa_addr_acceptable (ifa
, loopscope
, ipv4_scope
, &sin_loop
, &sin_local
);
992 if ((non_asoc_addr_ok
== 0) && sctp_is_addr_restricted(stcb
, (struct sockaddr
*)sin
)) {
993 /* It is restricted for some reason.. probably
999 return (sin
->sin_addr
);
1002 * plan_c: Look at all interfaces and find a prefered
1003 * address. If we reache here we are in trouble I think.
1005 bound_all_v4_plan_c
:
1006 for (ifn
= TAILQ_FIRST(&ifnet
);
1007 ifn
&& (ifn
!= inp
->next_ifn_touse
);
1008 ifn
=TAILQ_NEXT(ifn
, if_list
)) {
1009 if (loopscope
== 0 && ifn
->if_type
== IFT_LOOP
) {
1010 /* wrong base scope */
1013 if (ifn
== rt
->rt_ifp
)
1014 /* already looked at this guy */
1016 num_prefered
= sctp_count_v4_num_prefered_boundall (ifn
, stcb
, non_asoc_addr_ok
,
1017 loopscope
, ipv4_scope
, &sin_loop
, &sin_local
);
1019 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
1020 kprintf("Found ifn:%x %d prefered source addresses\n", (u_int
)ifn
, num_prefered
);
1023 if (num_prefered
== 0) {
1025 * None on this interface.
1029 /* Ok we have num_eligible_addr set with how many we can use,
1030 * this may vary from call to call due to addresses being deprecated etc..
1032 if (cur_addr_num
>= num_prefered
) {
1035 sin
= sctp_select_v4_nth_prefered_addr_from_ifn_boundall (ifn
, stcb
, non_asoc_addr_ok
, loopscope
,
1036 ipv4_scope
, cur_addr_num
);
1039 return (sin
->sin_addr
);
1044 * plan_d: We are in deep trouble. No prefered address on
1045 * any interface. And the emit interface does not
1046 * even have an acceptable address. Take anything
1047 * we can get! If this does not work we are
1048 * probably going to emit a packet that will
1049 * illicit an ABORT, falling through.
1052 for (ifn
= TAILQ_FIRST(&ifnet
);
1053 ifn
&& (ifn
!= inp
->next_ifn_touse
);
1054 ifn
=TAILQ_NEXT(ifn
, if_list
)) {
1055 if (loopscope
== 0 && ifn
->if_type
== IFT_LOOP
) {
1056 /* wrong base scope */
1059 if (ifn
== rt
->rt_ifp
)
1060 /* already looked at this guy */
1063 TAILQ_FOREACH(ifa
, &ifn
->if_addrlist
, ifa_list
) {
1064 sin
= sctp_is_v4_ifa_addr_acceptable (ifa
, loopscope
, ipv4_scope
, &sin_loop
, &sin_local
);
1068 if ((non_asoc_addr_ok
== 0) && sctp_is_addr_restricted(stcb
, (struct sockaddr
*)sin
)) {
1069 /* It is restricted for some reason.. probably
1075 return (sin
->sin_addr
);
1079 * Ok we can find NO address to source from that is
1080 * not on our negative list. It is either the special
1081 * ASCONF case where we are sourceing from a intf that
1082 * has been ifconfig'd to a different address (i.e.
1083 * it holds a ADD/DEL/SET-PRIM and the proper lookup
1084 * address. OR we are hosed, and this baby is going
1085 * to abort the association.
1087 if (non_asoc_addr_ok
) {
1088 return (((struct sockaddr_in
*)(rt
->rt_ifa
->ifa_addr
))->sin_addr
);
1090 memset(&ans
, 0, sizeof(ans
));
1097 /* tcb may be NULL */
1099 sctp_ipv4_source_address_selection(struct sctp_inpcb
*inp
,
1100 struct sctp_tcb
*stcb
, struct route
*ro
, struct sctp_nets
*net
,
1101 int non_asoc_addr_ok
)
1104 struct sockaddr_in
*to
= (struct sockaddr_in
*)&ro
->ro_dst
;
1105 uint8_t ipv4_scope
, loopscope
;
1108 * - Find the route if needed, cache if I can.
1109 * - Look at interface address in route, Is it
1110 * in the bound list. If so we have the best source.
1111 * - If not we must rotate amongst the addresses.
1115 * Do we need to pay attention to scope. We can have
1116 * a private address or a global address we are sourcing
1117 * or sending to. So if we draw it out
1118 * source * dest * result
1119 * ------------------------------------------
1120 * a Private * Global * NAT?
1121 * ------------------------------------------
1122 * b Private * Private * No problem
1123 * ------------------------------------------
1124 * c Global * Private * Huh, How will this work?
1125 * ------------------------------------------
1126 * d Global * Global * No Problem
1127 * ------------------------------------------
1129 * And then we add to that what happens if there are multiple
1130 * addresses assigned to an interface. Remember the ifa on a
1131 * ifn is a linked list of addresses. So one interface can
1132 * have more than one IPv4 address. What happens if we
1133 * have both a private and a global address? Do we then
1134 * use context of destination to sort out which one is
1135 * best? And what about NAT's sending P->G may get you
1136 * a NAT translation, or should you select the G thats
1137 * on the interface in preference.
1141 * - count the number of addresses on the interface.
1142 * - if its one, no problem except case <c>. For <a>
1143 * we will assume a NAT out there.
1144 * - if there are more than one, then we need to worry
1145 * about scope P or G. We should prefer G -> G and
1146 * P -> P if possible. Then as a secondary fall back
1147 * to mixed types G->P being a last ditch one.
1148 * - The above all works for bound all, but bound
1149 * specific we need to use the same concept but instead
1150 * only consider the bound addresses. If the bound set
1151 * is NOT assigned to the interface then we must use
1152 * rotation amongst them.
1154 * Notes: For v4, we can always punt and let ip_output
1155 * decide by sending back a source of 0.0.0.0
1158 if (ro
->ro_rt
== NULL
) {
1160 * Need a route to cache.
1163 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
1164 rtalloc_ign(ro
, 0UL);
1169 if (ro
->ro_rt
== NULL
) {
1170 /* No route to host .. punt */
1171 memset(&ans
, 0, sizeof(ans
));
1174 /* Setup our scopes */
1176 ipv4_scope
= stcb
->asoc
.ipv4_local_scope
;
1177 loopscope
= stcb
->asoc
.loopback_scope
;
1179 /* Scope based on outbound address */
1180 if ((IN4_ISPRIVATE_ADDRESS(&to
->sin_addr
))) {
1183 } else if (IN4_ISLOOPBACK_ADDRESS(&to
->sin_addr
)) {
1192 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
1193 kprintf("Scope setup loop:%d ipv4_scope:%d\n",
1194 loopscope
, ipv4_scope
);
1197 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_BOUNDALL
) {
1199 * When bound to all if the address list is set
1200 * it is a negative list. Addresses being added
1203 return (sctp_choose_v4_boundall(inp
, stcb
, net
, ro
->ro_rt
,
1204 ipv4_scope
, loopscope
, non_asoc_addr_ok
));
1207 * Three possiblities here:
1209 * a) stcb is NULL, which means we operate only from
1210 * the list of addresses (ifa's) bound to the assoc and
1211 * we care not about the list.
1212 * b) stcb is NOT-NULL, which means we have an assoc structure and
1213 * auto-asconf is on. This means that the list of addresses is
1214 * a NOT list. We use the list from the inp, but any listed address
1215 * in our list is NOT yet added. However if the non_asoc_addr_ok is
1216 * set we CAN use an address NOT available (i.e. being added). Its
1218 * c) stcb is NOT-NULL, which means we have an assoc structure and
1219 * auto-asconf is off. This means that the list of addresses is
1220 * the ONLY addresses I can use.. its positive.
1222 * Note we collapse b & c into the same function just like in
1223 * the v6 address selection.
1226 return (sctp_choose_v4_boundspecific_stcb(inp
, stcb
, net
,
1227 ro
->ro_rt
, ipv4_scope
, loopscope
, non_asoc_addr_ok
));
1229 return (sctp_choose_v4_boundspecific_inp(inp
, ro
->ro_rt
,
1230 ipv4_scope
, loopscope
));
1232 /* this should not be reached */
1233 memset(&ans
, 0, sizeof(ans
));
1239 static struct sockaddr_in6
*
1240 sctp_is_v6_ifa_addr_acceptable (struct ifaddr
*ifa
, int loopscope
, int loc_scope
, int *sin_loop
, int *sin_local
)
1242 struct in6_ifaddr
*ifa6
;
1243 struct sockaddr_in6
*sin6
;
1245 if (ifa
->ifa_addr
->sa_family
!= AF_INET6
) {
1249 ifa6
= (struct in6_ifaddr
*)ifa
;
1250 /* ok to use deprecated addresses? */
1251 if (!ip6_use_deprecated
) {
1252 if (IFA6_IS_DEPRECATED(ifa6
)) {
1253 /* can't use this type */
1257 /* are we ok, with the current state of this address? */
1258 if (ifa6
->ia6_flags
&
1259 (IN6_IFF_DETACHED
| IN6_IFF_NOTREADY
| IN6_IFF_ANYCAST
)) {
1260 /* Can't use these types */
1263 /* Ok the address may be ok */
1264 sin6
= (struct sockaddr_in6
*)ifa
->ifa_addr
;
1265 *sin_local
= *sin_loop
= 0;
1266 if ((ifa
->ifa_ifp
->if_type
== IFT_LOOP
) ||
1267 (IN6_IS_ADDR_LOOPBACK(&sin6
->sin6_addr
))) {
1270 if (!loopscope
&& *sin_loop
) {
1271 /* Its a loopback address and we don't have loop scope */
1274 if (IN6_IS_ADDR_UNSPECIFIED(&sin6
->sin6_addr
)) {
1275 /* we skip unspecifed addresses */
1279 if (IN6_IS_ADDR_LINKLOCAL(&sin6
->sin6_addr
)) {
1282 if (!loc_scope
&& *sin_local
) {
1283 /* Its a link local address, and we don't have link local scope */
1290 static struct sockaddr_in6
*
1291 sctp_choose_v6_boundspecific_stcb(struct sctp_inpcb
*inp
,
1292 struct sctp_tcb
*stcb
,
1293 struct sctp_nets
*net
,
1297 int non_asoc_addr_ok
)
1300 * Each endpoint has a list of local addresses associated
1301 * with it. The address list is either a "negative list" i.e.
1302 * those addresses that are NOT allowed to be used as a source OR
1303 * a "postive list" i.e. those addresses that CAN be used.
1305 * Its a negative list if asconf is allowed. What we do
1306 * in this case is use the ep address list BUT we have
1307 * to cross check it against the negative list.
1309 * In the case where NO asconf is allowed, we have just
1310 * a straight association level list that we must use to
1311 * find a source address.
1313 struct sctp_laddr
*laddr
, *starting_point
;
1314 struct sockaddr_in6
*sin6
;
1315 int sin_loop
, sin_local
;
1316 int start_at_beginning
=0;
1321 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_DO_ASCONF
) {
1323 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
1324 kprintf("Have a STCB - asconf allowed, not bound all have a netgative list\n");
1327 /* first question, is the ifn we will emit on
1328 * in our list, if so, we want that one.
1331 TAILQ_FOREACH(ifa
, &ifn
->if_addrlist
, ifa_list
) {
1332 if (sctp_is_addr_in_ep(inp
, ifa
)) {
1333 sin6
= sctp_is_v6_ifa_addr_acceptable (ifa
, loopscope
, loc_scope
, &sin_loop
, &sin_local
);
1336 if ((non_asoc_addr_ok
== 0) &&
1337 (sctp_is_addr_restricted(stcb
, (struct sockaddr
*)sin6
))) {
1338 /* on the no-no list */
1345 starting_point
= stcb
->asoc
.last_used_address
;
1346 /* First try for matching scope */
1348 if (stcb
->asoc
.last_used_address
== NULL
) {
1349 start_at_beginning
=1;
1350 stcb
->asoc
.last_used_address
= LIST_FIRST(&inp
->sctp_addr_list
);
1352 /* search beginning with the last used address */
1353 for (laddr
= stcb
->asoc
.last_used_address
; laddr
;
1354 laddr
= LIST_NEXT(laddr
, sctp_nxt_addr
)) {
1355 if (laddr
->ifa
== NULL
) {
1356 /* address has been removed */
1359 sin6
= sctp_is_v6_ifa_addr_acceptable (laddr
->ifa
, loopscope
, loc_scope
, &sin_loop
, &sin_local
);
1362 if ((non_asoc_addr_ok
== 0) && (sctp_is_addr_restricted(stcb
, (struct sockaddr
*)sin6
))) {
1363 /* on the no-no list */
1366 /* is it of matching scope ? */
1367 if ((loopscope
== 0) &&
1371 /* all of global scope we are ok with it */
1374 if (loopscope
&& sin_loop
)
1375 /* both on the loopback, thats ok */
1377 if (loc_scope
&& sin_local
)
1378 /* both local scope */
1382 if (start_at_beginning
== 0) {
1383 stcb
->asoc
.last_used_address
= NULL
;
1384 goto sctp_from_the_top
;
1386 /* now try for any higher scope than the destination */
1387 stcb
->asoc
.last_used_address
= starting_point
;
1388 start_at_beginning
= 0;
1390 if (stcb
->asoc
.last_used_address
== NULL
) {
1391 start_at_beginning
=1;
1392 stcb
->asoc
.last_used_address
= LIST_FIRST(&inp
->sctp_addr_list
);
1394 /* search beginning with the last used address */
1395 for (laddr
= stcb
->asoc
.last_used_address
; laddr
;
1396 laddr
= LIST_NEXT(laddr
, sctp_nxt_addr
)) {
1397 if (laddr
->ifa
== NULL
) {
1398 /* address has been removed */
1401 sin6
= sctp_is_v6_ifa_addr_acceptable (laddr
->ifa
, loopscope
, loc_scope
, &sin_loop
, &sin_local
);
1404 if ((non_asoc_addr_ok
== 0) && (sctp_is_addr_restricted(stcb
, (struct sockaddr
*)sin6
))) {
1405 /* on the no-no list */
1410 if (start_at_beginning
== 0) {
1411 stcb
->asoc
.last_used_address
= NULL
;
1412 goto sctp_from_the_top2
;
1416 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
1417 kprintf("Have a STCB - no asconf allowed, not bound all have a postive list\n");
1420 /* First try for interface output match */
1421 LIST_FOREACH(laddr
, &stcb
->asoc
.sctp_local_addr_list
,
1423 if (laddr
->ifa
== NULL
) {
1424 /* address has been removed */
1427 sin6
= sctp_is_v6_ifa_addr_acceptable (laddr
->ifa
, loopscope
, loc_scope
, &sin_loop
, &sin_local
);
1430 /* first question, is laddr->ifa an address associated with the emit interface */
1432 TAILQ_FOREACH(ifa
, &ifn
->if_addrlist
, ifa_list
) {
1433 if (laddr
->ifa
== ifa
) {
1434 sin6
= (struct sockaddr_in6
*)laddr
->ifa
->ifa_addr
;
1437 if (sctp_cmpaddr(ifa
->ifa_addr
, laddr
->ifa
->ifa_addr
) == 1) {
1438 sin6
= (struct sockaddr_in6
*)laddr
->ifa
->ifa_addr
;
1444 /* Next try for matching scope */
1445 LIST_FOREACH(laddr
, &stcb
->asoc
.sctp_local_addr_list
,
1447 if (laddr
->ifa
== NULL
) {
1448 /* address has been removed */
1451 sin6
= sctp_is_v6_ifa_addr_acceptable (laddr
->ifa
, loopscope
, loc_scope
, &sin_loop
, &sin_local
);
1455 if ((loopscope
== 0) &&
1459 /* all of global scope we are ok with it */
1462 if (loopscope
&& sin_loop
)
1463 /* both on the loopback, thats ok */
1465 if (loc_scope
&& sin_local
)
1466 /* both local scope */
1469 /* ok, now try for a higher scope in the source address */
1470 /* First try for matching scope */
1471 LIST_FOREACH(laddr
, &stcb
->asoc
.sctp_local_addr_list
,
1473 if (laddr
->ifa
== NULL
) {
1474 /* address has been removed */
1477 sin6
= sctp_is_v6_ifa_addr_acceptable (laddr
->ifa
, loopscope
, loc_scope
, &sin_loop
, &sin_local
);
1486 static struct sockaddr_in6
*
1487 sctp_choose_v6_boundspecific_inp(struct sctp_inpcb
*inp
,
1493 * Here we are bound specific and have only
1494 * an inp. We must find an address that is bound
1495 * that we can give out as a src address. We
1496 * prefer two addresses of same scope if we can
1497 * find them that way.
1499 struct sctp_laddr
*laddr
;
1500 struct sockaddr_in6
*sin6
;
1503 int sin_loop
, sin_local
;
1505 /* first question, is the ifn we will emit on
1506 * in our list, if so, we want that one.
1511 TAILQ_FOREACH(ifa
, &ifn
->if_addrlist
, ifa_list
) {
1512 sin6
= sctp_is_v6_ifa_addr_acceptable (ifa
, loopscope
, loc_scope
, &sin_loop
, &sin_local
);
1515 if (sctp_is_addr_in_ep(inp
, ifa
)) {
1520 for (laddr
= LIST_FIRST(&inp
->sctp_addr_list
);
1521 laddr
&& (laddr
!= inp
->next_addr_touse
);
1522 laddr
= LIST_NEXT(laddr
, sctp_nxt_addr
)) {
1523 if (laddr
->ifa
== NULL
) {
1524 /* address has been removed */
1527 sin6
= sctp_is_v6_ifa_addr_acceptable (laddr
->ifa
, loopscope
, loc_scope
, &sin_loop
, &sin_local
);
1531 if ((loopscope
== 0) &&
1535 /* all of global scope we are ok with it */
1538 if (loopscope
&& sin_loop
)
1539 /* both on the loopback, thats ok */
1541 if (loc_scope
&& sin_local
)
1542 /* both local scope */
1546 /* if we reach here, we could not find two addresses
1547 * of the same scope to give out. Lets look for any higher level
1548 * scope for a source address.
1550 for (laddr
= LIST_FIRST(&inp
->sctp_addr_list
);
1551 laddr
&& (laddr
!= inp
->next_addr_touse
);
1552 laddr
= LIST_NEXT(laddr
, sctp_nxt_addr
)) {
1553 if (laddr
->ifa
== NULL
) {
1554 /* address has been removed */
1557 sin6
= sctp_is_v6_ifa_addr_acceptable (laddr
->ifa
, loopscope
, loc_scope
, &sin_loop
, &sin_local
);
1562 /* no address bound can be a source for the destination */
1564 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
1565 kprintf("Src address selection for EP, no acceptable src address found for address\n");
1572 static struct sockaddr_in6
*
1573 sctp_select_v6_nth_addr_from_ifn_boundall (struct ifnet
*ifn
, struct sctp_tcb
*stcb
, int non_asoc_addr_ok
, uint8_t loopscope
,
1574 uint8_t loc_scope
, int cur_addr_num
, int match_scope
)
1577 struct sockaddr_in6
*sin6
;
1578 int sin_loop
, sin_local
;
1579 int num_eligible_addr
= 0;
1581 TAILQ_FOREACH(ifa
, &ifn
->if_addrlist
, ifa_list
) {
1582 sin6
= sctp_is_v6_ifa_addr_acceptable (ifa
, loopscope
, loc_scope
, &sin_loop
, &sin_local
);
1586 if ((non_asoc_addr_ok
== 0) && sctp_is_addr_restricted(stcb
, (struct sockaddr
*)sin6
)) {
1587 /* It is restricted for some reason.. probably
1594 /* Here we are asked to match scope if possible */
1595 if (loopscope
&& sin_loop
)
1596 /* src and destination are loopback scope */
1598 if (loc_scope
&& sin_local
)
1599 /* src and destination are local scope */
1601 if ((loopscope
== 0) &&
1605 /* src and destination are global scope */
1610 if (num_eligible_addr
== cur_addr_num
) {
1614 num_eligible_addr
++;
1621 sctp_count_v6_num_eligible_boundall (struct ifnet
*ifn
, struct sctp_tcb
*stcb
,
1622 int non_asoc_addr_ok
, uint8_t loopscope
, uint8_t loc_scope
)
1625 struct sockaddr_in6
*sin6
;
1626 int num_eligible_addr
= 0;
1627 int sin_loop
, sin_local
;
1629 TAILQ_FOREACH(ifa
, &ifn
->if_addrlist
, ifa_list
) {
1630 sin6
= sctp_is_v6_ifa_addr_acceptable (ifa
, loopscope
, loc_scope
, &sin_loop
, &sin_local
);
1634 if ((non_asoc_addr_ok
== 0) && sctp_is_addr_restricted(stcb
, (struct sockaddr
*)sin6
)) {
1635 /* It is restricted for some reason.. probably
1641 num_eligible_addr
++;
1643 return (num_eligible_addr
);
1647 static struct sockaddr_in6
*
1648 sctp_choose_v6_boundall(struct sctp_inpcb
*inp
,
1649 struct sctp_tcb
*stcb
,
1650 struct sctp_nets
*net
,
1654 int non_asoc_addr_ok
)
1656 /* Ok, we are bound all SO any address
1657 * is ok to use as long as it is NOT in the negative
1660 int num_eligible_addr
;
1662 int started_at_beginning
=0;
1663 int match_scope_prefered
;
1664 /* first question is, how many eligible addresses are
1665 * there for the destination ifn that we are using that
1666 * are within the proper scope?
1669 struct sockaddr_in6
*sin6
;
1673 cur_addr_num
= net
->indx_of_eligible_next_to_use
;
1675 if (cur_addr_num
== 0) {
1676 match_scope_prefered
= 1;
1678 match_scope_prefered
= 0;
1680 num_eligible_addr
= sctp_count_v6_num_eligible_boundall (ifn
, stcb
, non_asoc_addr_ok
, loopscope
, loc_scope
);
1682 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
1683 kprintf("Found %d eligible source addresses\n", num_eligible_addr
);
1686 if (num_eligible_addr
== 0) {
1687 /* no eligible addresses, we must use some other
1688 * interface address if we can find one.
1690 goto bound_all_v6_plan_b
;
1692 /* Ok we have num_eligible_addr set with how many we can use,
1693 * this may vary from call to call due to addresses being deprecated etc..
1695 if (cur_addr_num
>= num_eligible_addr
) {
1698 /* select the nth address from the list (where cur_addr_num is the nth) and
1699 * 0 is the first one, 1 is the second one etc...
1702 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
1703 kprintf("cur_addr_num:%d match_scope_prefered:%d select it\n",
1704 cur_addr_num
, match_scope_prefered
);
1707 sin6
= sctp_select_v6_nth_addr_from_ifn_boundall (ifn
, stcb
, non_asoc_addr_ok
, loopscope
,
1708 loc_scope
, cur_addr_num
, match_scope_prefered
);
1709 if (match_scope_prefered
&& (sin6
== NULL
)) {
1710 /* retry without the preference for matching scope */
1712 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
1713 kprintf("retry with no match_scope_prefered\n");
1716 sin6
= sctp_select_v6_nth_addr_from_ifn_boundall (ifn
, stcb
, non_asoc_addr_ok
, loopscope
,
1717 loc_scope
, cur_addr_num
, 0);
1721 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
1722 kprintf("Selected address %d ifn:%x for the route\n", cur_addr_num
, (u_int
)ifn
);
1726 /* store so we get the next one */
1727 if (cur_addr_num
< 255)
1728 net
->indx_of_eligible_next_to_use
= cur_addr_num
+ 1;
1730 net
->indx_of_eligible_next_to_use
= 0;
1734 num_eligible_addr
= 0;
1735 bound_all_v6_plan_b
:
1736 /* ok, if we reach here we either fell through
1737 * due to something changing during an interupt (unlikely)
1738 * or we have NO eligible source addresses for the ifn
1739 * of the route (most likely). We must look at all the other
1740 * interfaces EXCEPT rt->rt_ifp and do the same game.
1743 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
1744 kprintf("bound-all Plan B\n");
1747 if (inp
->next_ifn_touse
== NULL
) {
1748 started_at_beginning
=1;
1749 inp
->next_ifn_touse
= TAILQ_FIRST(&ifnet
);
1751 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
1752 kprintf("Start at first IFN:%x\n", (u_int
)inp
->next_ifn_touse
);
1756 inp
->next_ifn_touse
= TAILQ_NEXT(inp
->next_ifn_touse
, if_list
);
1758 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
1759 kprintf("Resume at IFN:%x\n", (u_int
)inp
->next_ifn_touse
);
1762 if (inp
->next_ifn_touse
== NULL
) {
1764 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
1765 kprintf("IFN Resets\n");
1768 started_at_beginning
=1;
1769 inp
->next_ifn_touse
= TAILQ_FIRST(&ifnet
);
1772 for (ifn
= inp
->next_ifn_touse
; ifn
;
1773 ifn
= TAILQ_NEXT(ifn
, if_list
)) {
1774 if (loopscope
== 0 && ifn
->if_type
== IFT_LOOP
) {
1775 /* wrong base scope */
1778 if (loc_scope
&& (ifn
->if_index
!= loc_scope
)) {
1779 /* by definition the scope (from to->sin6_scopeid)
1780 * must match that of the interface. If not then
1781 * we could pick a wrong scope for the address.
1782 * Ususally we don't hit plan-b since the route
1783 * handles this. However we can hit plan-b when
1784 * we send to local-host so the route is the
1785 * loopback interface, but the destination is a
1790 if (ifn
== rt
->rt_ifp
) {
1791 /* already looked at this guy */
1794 /* Address rotation will only work when we are not
1795 * rotating sourced interfaces and are using the interface
1796 * of the route. We would need to have a per interface index
1797 * in order to do proper rotation.
1799 num_eligible_addr
= sctp_count_v6_num_eligible_boundall (ifn
, stcb
, non_asoc_addr_ok
, loopscope
, loc_scope
);
1801 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
1802 kprintf("IFN:%x has %d eligible\n", (u_int
)ifn
, num_eligible_addr
);
1805 if (num_eligible_addr
== 0) {
1806 /* none we can use */
1809 /* Ok we have num_eligible_addr set with how many we can use,
1810 * this may vary from call to call due to addresses being deprecated etc..
1812 inp
->next_ifn_touse
= ifn
;
1814 /* select the first one we can find with perference for matching scope.
1816 sin6
= sctp_select_v6_nth_addr_from_ifn_boundall (ifn
, stcb
, non_asoc_addr_ok
, loopscope
, loc_scope
, 0, 1);
1818 /* can't find one with matching scope how about a source with higher
1821 sin6
= sctp_select_v6_nth_addr_from_ifn_boundall (ifn
, stcb
, non_asoc_addr_ok
, loopscope
, loc_scope
, 0, 0);
1823 /* Hmm, can't find one in the interface now */
1827 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
1828 kprintf("Selected the %d'th address of ifn:%x\n",
1835 if (started_at_beginning
== 0) {
1836 /* we have not been through all of them yet, force
1837 * us to go through them all.
1840 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
1841 kprintf("Force a recycle\n");
1844 inp
->next_ifn_touse
= NULL
;
1845 goto bound_all_v6_plan_b
;
1851 /* stcb and net may be NULL */
1853 sctp_ipv6_source_address_selection(struct sctp_inpcb
*inp
,
1854 struct sctp_tcb
*stcb
, struct route
*ro
, struct sctp_nets
*net
,
1855 int non_asoc_addr_ok
)
1857 struct in6_addr ans
;
1858 struct sockaddr_in6
*rt_addr
;
1859 uint8_t loc_scope
, loopscope
;
1860 struct sockaddr_in6
*to
= (struct sockaddr_in6
*)&ro
->ro_dst
;
1863 * This routine is tricky standard v6 src address
1864 * selection cannot take into account what we have
1865 * bound etc, so we can't use it.
1867 * Instead here is what we must do:
1868 * 1) Make sure we have a route, if we
1869 * don't have a route we can never reach the peer.
1870 * 2) Once we have a route, determine the scope of the
1871 * route. Link local, loopback or global.
1872 * 3) Next we divide into three types. Either we
1873 * are bound all.. which means we want to use
1874 * one of the addresses of the interface we are
1876 * 4a) We have not stcb, which means we are using the
1877 * specific addresses bound on an inp, in this
1878 * case we are similar to the stcb case (4b below)
1879 * accept the list is always a positive list.<or>
1880 * 4b) We are bound specific with a stcb, which means we have a
1881 * list of bound addresses and we must see if the
1882 * ifn of the route is actually one of the bound addresses.
1883 * If not, then we must rotate addresses amongst properly
1884 * scoped bound addresses, if so we use the address
1886 * 5) Always, no matter which path we take through the above
1887 * we must be sure the source address we use is allowed to
1888 * be used. I.e. IN6_IFF_DETACHED, IN6_IFF_NOTREADY, and IN6_IFF_ANYCAST
1889 * addresses cannot be used.
1890 * 6) Addresses that are deprecated MAY be used
1891 * if (!ip6_use_deprecated) {
1892 * if (IFA6_IS_DEPRECATED(ifa6)) {
1898 /*** 1> determine route, if not already done */
1899 if (ro
->ro_rt
== NULL
) {
1901 * Need a route to cache.
1903 #ifndef SCOPEDROUTING
1905 scope_save
= to
->sin6_scope_id
;
1906 to
->sin6_scope_id
= 0;
1909 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
1910 rtalloc_ign(ro
, 0UL);
1914 #ifndef SCOPEDROUTING
1915 to
->sin6_scope_id
= scope_save
;
1918 if (ro
->ro_rt
== NULL
) {
1920 * no route to host. this packet is going no-where.
1921 * We probably should make sure we arrange to send back
1925 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
1926 kprintf("No route to host, this packet cannot be sent!\n");
1929 memset(&ans
, 0, sizeof(ans
));
1933 /*** 2a> determine scope for outbound address/route */
1934 loc_scope
= loopscope
= 0;
1936 * We base our scope on the outbound packet scope and route,
1937 * NOT the TCB (if there is one). This way in local scope we will only
1938 * use a local scope src address when we send to a local address.
1941 if (IN6_IS_ADDR_LOOPBACK(&to
->sin6_addr
)) {
1942 /* If the route goes to the loopback address OR
1943 * the address is a loopback address, we are loopback
1947 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
1948 kprintf("Loopback scope is set\n");
1954 /* mark it as local */
1955 net
->addr_is_local
= 1;
1958 } else if (IN6_IS_ADDR_LINKLOCAL(&to
->sin6_addr
)) {
1960 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
1961 kprintf("Link local scope is set, id:%d\n", to
->sin6_scope_id
);
1964 if (to
->sin6_scope_id
)
1965 loc_scope
= to
->sin6_scope_id
;
1972 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
1973 kprintf("Global scope is set\n");
1978 /* now, depending on which way we are bound we call the appropriate
1979 * routine to do steps 3-6
1982 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
1983 kprintf("Destination address:");
1984 sctp_print_address((struct sockaddr
*)to
);
1988 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_BOUNDALL
) {
1990 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
1991 kprintf("Calling bound-all src addr selection for v6\n");
1994 rt_addr
= sctp_choose_v6_boundall(inp
, stcb
, net
, ro
->ro_rt
, loc_scope
, loopscope
, non_asoc_addr_ok
);
1997 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
1998 kprintf("Calling bound-specific src addr selection for v6\n");
2002 rt_addr
= sctp_choose_v6_boundspecific_stcb(inp
, stcb
, net
, ro
->ro_rt
, loc_scope
, loopscope
, non_asoc_addr_ok
);
2004 /* we can't have a non-asoc address since we have no association */
2005 rt_addr
= sctp_choose_v6_boundspecific_inp(inp
, ro
->ro_rt
, loc_scope
, loopscope
);
2007 if (rt_addr
== NULL
) {
2008 /* no suitable address? */
2009 struct in6_addr in6
;
2011 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
2012 kprintf("V6 packet will reach dead-end no suitable src address\n");
2015 memset(&in6
, 0, sizeof(in6
));
2019 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
2020 kprintf("Source address selected is:");
2021 sctp_print_address((struct sockaddr
*)rt_addr
);
2024 return (rt_addr
->sin6_addr
);
2028 sctp_get_ect(struct sctp_tcb
*stcb
,
2029 struct sctp_tmit_chunk
*chk
)
2031 uint8_t this_random
;
2037 if (sctp_ecn_nonce
== 0)
2038 /* no nonce, always return ECT0 */
2039 return (SCTP_ECT0_BIT
);
2041 if (stcb
->asoc
.peer_supports_ecn_nonce
== 0) {
2042 /* Peer does NOT support it, so we send a ECT0 only */
2043 return (SCTP_ECT0_BIT
);
2047 return (SCTP_ECT0_BIT
);
2049 if (((stcb
->asoc
.hb_random_idx
== 3) &&
2050 (stcb
->asoc
.hb_ect_randombit
> 7)) ||
2051 (stcb
->asoc
.hb_random_idx
> 3)) {
2053 rndval
= sctp_select_initial_TSN(&stcb
->sctp_ep
->sctp_ep
);
2054 memcpy(stcb
->asoc
.hb_random_values
, &rndval
,
2055 sizeof(stcb
->asoc
.hb_random_values
));
2056 this_random
= stcb
->asoc
.hb_random_values
[0];
2057 stcb
->asoc
.hb_random_idx
= 0;
2058 stcb
->asoc
.hb_ect_randombit
= 0;
2060 if (stcb
->asoc
.hb_ect_randombit
> 7) {
2061 stcb
->asoc
.hb_ect_randombit
= 0;
2062 stcb
->asoc
.hb_random_idx
++;
2064 this_random
= stcb
->asoc
.hb_random_values
[stcb
->asoc
.hb_random_idx
];
2066 if ((this_random
>> stcb
->asoc
.hb_ect_randombit
) & 0x01) {
2068 /* ECN Nonce stuff */
2069 chk
->rec
.data
.ect_nonce
= SCTP_ECT1_BIT
;
2070 stcb
->asoc
.hb_ect_randombit
++;
2071 return (SCTP_ECT1_BIT
);
2073 stcb
->asoc
.hb_ect_randombit
++;
2074 return (SCTP_ECT0_BIT
);
2078 extern int sctp_no_csum_on_loopback
;
2081 sctp_lowlevel_chunk_output(struct sctp_inpcb
*inp
,
2082 struct sctp_tcb
*stcb
, /* may be NULL */
2083 struct sctp_nets
*net
,
2084 struct sockaddr
*to
,
2086 int nofragment_flag
,
2088 struct sctp_tmit_chunk
*chk
,
2090 /* nofragment_flag to tell if IP_DF should be set (IPv4 only) */
2093 * Given a mbuf chain (via m_next) that holds a packet header
2094 * WITH a SCTPHDR but no IP header, endpoint inp and sa structure.
2095 * - calculate SCTP checksum and fill in
2096 * - prepend a IP address header
2097 * - if boundall use INADDR_ANY
2098 * - if boundspecific do source address selection
2099 * - set fragmentation option for ipV4
2100 * - On return from IP output, check/adjust mtu size
2101 * - of output interface and smallest_mtu size as well.
2103 struct sctphdr
*sctphdr
;
2107 unsigned int have_mtu
;
2110 if ((net
) && (net
->dest_state
& SCTP_ADDR_OUT_OF_SCOPE
)) {
2114 if ((m
->m_flags
& M_PKTHDR
) == 0) {
2116 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
2117 kprintf("Software error: sctp_lowlevel_chunk_output() called with non pkthdr!\n");
2123 /* Calculate the csum and fill in the length of the packet */
2124 sctphdr
= mtod(m
, struct sctphdr
*);
2126 if (sctp_no_csum_on_loopback
&&
2128 (stcb
->asoc
.loopback_scope
)) {
2129 sctphdr
->checksum
= 0;
2130 m
->m_pkthdr
.len
= sctp_calculate_len(m
);
2132 sctphdr
->checksum
= 0;
2133 csum
= sctp_calculate_sum(m
, &m
->m_pkthdr
.len
, 0);
2134 sctphdr
->checksum
= csum
;
2136 if (to
->sa_family
== AF_INET
) {
2138 struct route iproute
;
2139 M_PREPEND(m
, sizeof(struct ip
), MB_DONTWAIT
);
2141 /* failed to prepend data, give up */
2144 ip
= mtod(m
, struct ip
*);
2145 ip
->ip_v
= IPVERSION
;
2146 ip
->ip_hl
= (sizeof(struct ip
) >> 2);
2147 if (nofragment_flag
) {
2148 #if defined(WITH_CONVERT_IP_OFF) || defined(__FreeBSD__) || defined(__DragonFly__)
2149 #if defined( __OpenBSD__) || defined(__NetBSD__)
2150 /* OpenBSD has WITH_CONVERT_IP_OFF defined?? */
2151 ip
->ip_off
= htons(IP_DF
);
2156 ip
->ip_off
= htons(IP_DF
);
2161 /* FreeBSD and Apple have RANDOM_IP_ID switch */
2162 #if defined(RANDOM_IP_ID) || defined(__NetBSD__) || defined(__OpenBSD__)
2163 ip
->ip_id
= htons(ip_randomid());
2165 ip
->ip_id
= htons(ip_id
++);
2168 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
2169 ip
->ip_ttl
= inp
->ip_inp
.inp
.inp_ip_ttl
;
2171 ip
->ip_ttl
= inp
->inp_ip_ttl
;
2173 #if defined(__OpenBSD__) || defined(__NetBSD__)
2174 ip
->ip_len
= htons(m
->m_pkthdr
.len
);
2176 ip
->ip_len
= m
->m_pkthdr
.len
;
2179 if ((stcb
->asoc
.ecn_allowed
) && ecn_ok
) {
2181 #if defined(__FreeBSD__) || defined (__APPLE__) || defined(__DragonFly__)
2182 ip
->ip_tos
= (u_char
)((inp
->ip_inp
.inp
.inp_ip_tos
& 0x000000fc) |
2183 sctp_get_ect(stcb
, chk
));
2184 #elif defined(__NetBSD__)
2185 ip
->ip_tos
= (u_char
)((inp
->ip_inp
.inp
.inp_ip
.ip_tos
& 0x000000fc) |
2186 sctp_get_ect(stcb
, chk
));
2188 ip
->ip_tos
= (u_char
)((inp
->inp_ip_tos
& 0x000000fc) |
2189 sctp_get_ect(stcb
, chk
));
2193 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
2194 ip
->ip_tos
= inp
->ip_inp
.inp
.inp_ip_tos
;
2195 #elif defined(__NetBSD__)
2196 ip
->ip_tos
= inp
->ip_inp
.inp
.inp_ip
.ip_tos
;
2198 ip
->ip_tos
= inp
->inp_ip_tos
;
2202 /* no association at all */
2203 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
2204 ip
->ip_tos
= inp
->ip_inp
.inp
.inp_ip_tos
;
2206 ip
->ip_tos
= inp
->inp_ip_tos
;
2209 ip
->ip_p
= IPPROTO_SCTP
;
2213 memset(&iproute
, 0, sizeof(iproute
));
2214 memcpy(&ro
->ro_dst
, to
, to
->sa_len
);
2216 ro
= (struct route
*)&net
->ro
;
2218 /* Now the address selection part */
2219 ip
->ip_dst
.s_addr
= ((struct sockaddr_in
*)to
)->sin_addr
.s_addr
;
2221 /* call the routine to select the src address */
2223 if (net
->src_addr_selected
== 0) {
2224 /* Cache the source address */
2225 ((struct sockaddr_in
*)&net
->ro
._s_addr
)->sin_addr
= sctp_ipv4_source_address_selection(inp
,
2227 ro
, net
, out_of_asoc_ok
);
2229 net
->src_addr_selected
= 1;
2231 ip
->ip_src
= ((struct sockaddr_in
*)&net
->ro
._s_addr
)->sin_addr
;
2233 ip
->ip_src
= sctp_ipv4_source_address_selection(inp
,
2234 stcb
, ro
, net
, out_of_asoc_ok
);
2237 * If source address selection fails and we find no route then
2238 * the ip_ouput should fail as well with a NO_ROUTE_TO_HOST
2239 * type error. We probably should catch that somewhere and
2240 * abort the association right away (assuming this is an INIT
2243 if ((ro
->ro_rt
== NULL
)) {
2245 * src addr selection failed to find a route (or valid
2246 * source addr), so we can't get there from here!
2249 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
2250 kprintf("low_level_output: dropped v4 packet- no valid source addr\n");
2251 kprintf("Destination was %x\n", (u_int
)(ntohl(ip
->ip_dst
.s_addr
)));
2253 #endif /* SCTP_DEBUG */
2255 if ((net
->dest_state
& SCTP_ADDR_REACHABLE
) && stcb
)
2256 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN
,
2258 SCTP_FAILED_THRESHOLD
,
2260 net
->dest_state
&= ~SCTP_ADDR_REACHABLE
;
2261 net
->dest_state
|= SCTP_ADDR_NOT_REACHABLE
;
2263 if (net
== stcb
->asoc
.primary_destination
) {
2264 /* need a new primary */
2265 struct sctp_nets
*alt
;
2266 alt
= sctp_find_alternate_net(stcb
, net
);
2268 if (sctp_set_primary_addr(stcb
,
2269 (struct sockaddr
*)NULL
,
2271 net
->dest_state
|= SCTP_ADDR_WAS_PRIMARY
;
2272 net
->src_addr_selected
= 0;
2279 return (EHOSTUNREACH
);
2281 have_mtu
= ro
->ro_rt
->rt_ifp
->if_mtu
;
2284 o_flgs
= (IP_RAWOUTPUT
| (inp
->sctp_socket
->so_options
& (SO_DONTROUTE
| SO_BROADCAST
)));
2286 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
2287 kprintf("Calling ipv4 output routine from low level src addr:%x\n",
2288 (u_int
)(ntohl(ip
->ip_src
.s_addr
)));
2289 kprintf("Destination is %x\n", (u_int
)(ntohl(ip
->ip_dst
.s_addr
)));
2290 kprintf("RTP route is %p through\n", ro
->ro_rt
);
2293 if ((have_mtu
) && (net
) && (have_mtu
> net
->mtu
)) {
2294 ro
->ro_rt
->rt_ifp
->if_mtu
= net
->mtu
;
2296 ret
= ip_output(m
, inp
->ip_inp
.inp
.inp_options
,
2297 ro
, o_flgs
, inp
->ip_inp
.inp
.inp_moptions
2298 #if defined(__OpenBSD__) || (defined(__FreeBSD__) && __FreeBSD_version >= 480000) \
2299 || defined(__DragonFly__)
2300 , (struct inpcb
*)NULL
2302 #if defined(__NetBSD__)
2303 ,(struct socket
*)inp
->sctp_socket
2307 if ((ro
->ro_rt
) && (have_mtu
) && (net
) && (have_mtu
> net
->mtu
)) {
2308 ro
->ro_rt
->rt_ifp
->if_mtu
= have_mtu
;
2310 sctp_pegs
[SCTP_DATAGRAMS_SENT
]++;
2312 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
2313 kprintf("Ip output returns %d\n", ret
);
2317 /* free tempy routes */
2321 /* PMTU check versus smallest asoc MTU goes here */
2322 if (ro
->ro_rt
!= NULL
) {
2323 if (ro
->ro_rt
->rt_rmx
.rmx_mtu
&&
2324 (stcb
->asoc
.smallest_mtu
> ro
->ro_rt
->rt_rmx
.rmx_mtu
)) {
2325 sctp_mtu_size_reset(inp
, &stcb
->asoc
,
2326 ro
->ro_rt
->rt_rmx
.rmx_mtu
);
2329 /* route was freed */
2330 net
->src_addr_selected
= 0;
2336 else if (to
->sa_family
== AF_INET6
) {
2337 struct ip6_hdr
*ip6h
;
2338 #ifdef NEW_STRUCT_ROUTE
2339 struct route ip6route
;
2341 struct route_in6 ip6route
;
2345 uint16_t flowBottom
;
2346 u_char tosBottom
, tosTop
;
2347 struct sockaddr_in6
*sin6
, tmp
, *lsa6
, lsa6_tmp
;
2348 struct sockaddr_in6 lsa6_storage
;
2351 u_short prev_port
=0;
2353 M_PREPEND(m
, sizeof(struct ip6_hdr
), MB_DONTWAIT
);
2355 /* failed to prepend data, give up */
2358 ip6h
= mtod(m
, struct ip6_hdr
*);
2361 * We assume here that inp_flow is in host byte order within
2364 flowBottom
= ((struct in6pcb
*)inp
)->in6p_flowinfo
& 0x0000ffff;
2365 flowTop
= ((((struct in6pcb
*)inp
)->in6p_flowinfo
& 0x000f0000) >> 16);
2367 tosTop
= (((((struct in6pcb
*)inp
)->in6p_flowinfo
& 0xf0) >> 4) | IPV6_VERSION
);
2369 /* protect *sin6 from overwrite */
2370 sin6
= (struct sockaddr_in6
*)to
;
2374 /* KAME hack: embed scopeid */
2375 #if defined(SCTP_BASE_FREEBSD) || defined(__APPLE__) || defined(__DragonFly__)
2376 if (in6_embedscope(&sin6
->sin6_addr
, sin6
, NULL
, NULL
) != 0)
2378 if (in6_embedscope(&sin6
->sin6_addr
, sin6
) != 0)
2382 memset(&ip6route
, 0, sizeof(ip6route
));
2383 ro
= (struct route
*)&ip6route
;
2384 memcpy(&ro
->ro_dst
, sin6
, sin6
->sin6_len
);
2386 ro
= (struct route
*)&net
->ro
;
2389 if ((stcb
->asoc
.ecn_allowed
) && ecn_ok
) {
2391 tosBottom
= (((((struct in6pcb
*)inp
)->in6p_flowinfo
& 0x0c) | sctp_get_ect(stcb
, chk
)) << 4);
2394 tosBottom
= ((((struct in6pcb
*)inp
)->in6p_flowinfo
& 0x0c) << 4);
2397 /* we could get no asoc if it is a O-O-T-B packet */
2398 tosBottom
= ((((struct in6pcb
*)inp
)->in6p_flowinfo
& 0x0c) << 4);
2400 ip6h
->ip6_flow
= htonl(((tosTop
<< 24) | ((tosBottom
|flowTop
) << 16) | flowBottom
));
2401 ip6h
->ip6_nxt
= IPPROTO_SCTP
;
2402 ip6h
->ip6_plen
= m
->m_pkthdr
.len
;
2403 ip6h
->ip6_dst
= sin6
->sin6_addr
;
2406 * Add SRC address selection here:
2407 * we can only reuse to a limited degree the kame src-addr-sel,
2408 * since we can try their selection but it may not be bound.
2410 bzero(&lsa6_tmp
, sizeof(lsa6_tmp
));
2411 lsa6_tmp
.sin6_family
= AF_INET6
;
2412 lsa6_tmp
.sin6_len
= sizeof(lsa6_tmp
);
2415 if (net
->src_addr_selected
== 0) {
2416 /* Cache the source address */
2417 ((struct sockaddr_in6
*)&net
->ro
._s_addr
)->sin6_addr
= sctp_ipv6_source_address_selection(inp
,
2418 stcb
, ro
, net
, out_of_asoc_ok
);
2421 net
->src_addr_selected
= 1;
2423 lsa6
->sin6_addr
= ((struct sockaddr_in6
*)&net
->ro
._s_addr
)->sin6_addr
;
2425 lsa6
->sin6_addr
= sctp_ipv6_source_address_selection(
2426 inp
, stcb
, ro
, net
, out_of_asoc_ok
);
2428 lsa6
->sin6_port
= inp
->sctp_lport
;
2430 if ((ro
->ro_rt
== NULL
)) {
2432 * src addr selection failed to find a route (or valid
2433 * source addr), so we can't get there from here!
2436 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
2437 kprintf("low_level_output: dropped v6 pkt- no valid source addr\n");
2442 if ((net
->dest_state
& SCTP_ADDR_REACHABLE
) && stcb
)
2443 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN
,
2445 SCTP_FAILED_THRESHOLD
,
2447 net
->dest_state
&= ~SCTP_ADDR_REACHABLE
;
2448 net
->dest_state
|= SCTP_ADDR_NOT_REACHABLE
;
2450 if (net
== stcb
->asoc
.primary_destination
) {
2451 /* need a new primary */
2452 struct sctp_nets
*alt
;
2453 alt
= sctp_find_alternate_net(stcb
, net
);
2455 if (sctp_set_primary_addr(stcb
,
2456 (struct sockaddr
*)NULL
,
2458 net
->dest_state
|= SCTP_ADDR_WAS_PRIMARY
;
2459 net
->src_addr_selected
= 0;
2465 return (EHOSTUNREACH
);
2468 #ifndef SCOPEDROUTING
2470 * XXX: sa6 may not have a valid sin6_scope_id in
2471 * the non-SCOPEDROUTING case.
2473 bzero(&lsa6_storage
, sizeof(lsa6_storage
));
2474 lsa6_storage
.sin6_family
= AF_INET6
;
2475 lsa6_storage
.sin6_len
= sizeof(lsa6_storage
);
2476 if ((error
= in6_recoverscope(&lsa6_storage
, &lsa6
->sin6_addr
,
2482 lsa6_storage
.sin6_addr
= lsa6
->sin6_addr
;
2483 lsa6_storage
.sin6_port
= inp
->sctp_lport
;
2484 lsa6
= &lsa6_storage
;
2485 #endif /* SCOPEDROUTING */
2486 ip6h
->ip6_src
= lsa6
->sin6_addr
;
2489 * We set the hop limit now since there is a good chance that
2490 * our ro pointer is now filled
2492 ip6h
->ip6_hlim
= in6_selecthlim((struct in6pcb
*)&inp
->ip_inp
.inp
,
2494 (ro
->ro_rt
? (ro
->ro_rt
->rt_ifp
) : (NULL
)) :
2497 ifp
= ro
->ro_rt
->rt_ifp
;
2499 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
2500 /* Copy to be sure something bad is not happening */
2501 sin6
->sin6_addr
= ip6h
->ip6_dst
;
2502 lsa6
->sin6_addr
= ip6h
->ip6_src
;
2504 kprintf("Calling ipv6 output routine from low level\n");
2506 sctp_print_address((struct sockaddr
*)lsa6
);
2508 sctp_print_address((struct sockaddr
*)sin6
);
2510 #endif /* SCTP_DEBUG */
2512 sin6
= (struct sockaddr_in6
*)&net
->ro
._l_addr
;
2513 /* preserve the port and scope for link local send */
2514 prev_scope
= sin6
->sin6_scope_id
;
2515 prev_port
= sin6
->sin6_port
;
2517 ret
= ip6_output(m
, ((struct in6pcb
*)inp
)->in6p_outputopts
,
2518 #ifdef NEW_STRUCT_ROUTE
2521 (struct route_in6
*)ro
,
2524 ((struct in6pcb
*)inp
)->in6p_moptions
,
2525 #if defined(__NetBSD__)
2526 (struct socket
*)inp
->sctp_socket
,
2529 #if (defined(__FreeBSD__) && __FreeBSD_version >= 480000) || defined(__DragonFly__)
2534 /* for link local this must be done */
2535 sin6
->sin6_scope_id
= prev_scope
;
2536 sin6
->sin6_port
= prev_port
;
2539 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
2540 kprintf("return from send is %d\n", ret
);
2542 #endif /* SCTP_DEBUG_OUTPUT */
2543 sctp_pegs
[SCTP_DATAGRAMS_SENT
]++;
2545 /* Now if we had a temp route free it */
2550 /* PMTU check versus smallest asoc MTU goes here */
2551 if (ro
->ro_rt
== NULL
) {
2552 /* Route was freed */
2553 net
->src_addr_selected
= 0;
2555 if (ro
->ro_rt
!= NULL
) {
2556 if (ro
->ro_rt
->rt_rmx
.rmx_mtu
&&
2557 (stcb
->asoc
.smallest_mtu
> ro
->ro_rt
->rt_rmx
.rmx_mtu
)) {
2558 sctp_mtu_size_reset(inp
,
2560 ro
->ro_rt
->rt_rmx
.rmx_mtu
);
2563 #if (defined(SCTP_BASE_FREEBSD) && __FreeBSD_version < 500000) || defined(__APPLE__)
2564 #define ND_IFINFO(ifp) (&nd_ifinfo[ifp->if_index])
2565 #endif /* SCTP_BASE_FREEBSD */
2566 if (ND_IFINFO(ifp
)->linkmtu
&&
2567 (stcb
->asoc
.smallest_mtu
> ND_IFINFO(ifp
)->linkmtu
)) {
2568 sctp_mtu_size_reset(inp
,
2570 ND_IFINFO(ifp
)->linkmtu
);
2579 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
2580 kprintf("Unknown protocol (TSNH) type %d\n", ((struct sockaddr
*)to
)->sa_family
);
2589 sctp_is_address_in_scope(struct ifaddr
*ifa
,
2590 int ipv4_addr_legal
,
2591 int ipv6_addr_legal
,
2593 int ipv4_local_scope
,
2597 if ((loopback_scope
== 0) &&
2599 (ifa
->ifa_ifp
->if_type
== IFT_LOOP
)) {
2600 /* skip loopback if not in scope *
2604 if ((ifa
->ifa_addr
->sa_family
== AF_INET
) && ipv4_addr_legal
) {
2605 struct sockaddr_in
*sin
;
2606 sin
= (struct sockaddr_in
*)ifa
->ifa_addr
;
2607 if (sin
->sin_addr
.s_addr
== 0) {
2608 /* not in scope , unspecified */
2611 if ((ipv4_local_scope
== 0) &&
2612 (IN4_ISPRIVATE_ADDRESS(&sin
->sin_addr
))) {
2613 /* private address not in scope */
2616 } else if ((ifa
->ifa_addr
->sa_family
== AF_INET6
) && ipv6_addr_legal
) {
2617 struct sockaddr_in6
*sin6
;
2618 struct in6_ifaddr
*ifa6
;
2620 ifa6
= (struct in6_ifaddr
*)ifa
;
2621 /* ok to use deprecated addresses? */
2622 if (!ip6_use_deprecated
) {
2623 if (ifa6
->ia6_flags
&
2624 IN6_IFF_DEPRECATED
) {
2628 if (ifa6
->ia6_flags
&
2631 IN6_IFF_NOTREADY
)) {
2634 sin6
= (struct sockaddr_in6
*)ifa
->ifa_addr
;
2635 if (IN6_IS_ADDR_UNSPECIFIED(&sin6
->sin6_addr
)) {
2636 /* skip unspecifed addresses */
2639 if (/*(local_scope == 0) && */
2640 (IN6_IS_ADDR_LINKLOCAL(&sin6
->sin6_addr
))) {
2643 if ((site_scope
== 0) &&
2644 (IN6_IS_ADDR_SITELOCAL(&sin6
->sin6_addr
))) {
2655 sctp_send_initiate(struct sctp_inpcb
*inp
, struct sctp_tcb
*stcb
)
2657 struct mbuf
*m
, *m_at
, *m_last
;
2658 struct sctp_nets
*net
;
2659 struct sctp_init_msg
*initm
;
2660 struct sctp_supported_addr_param
*sup_addr
;
2661 struct sctp_ecn_supported_param
*ecn
;
2662 struct sctp_prsctp_supported_param
*prsctp
;
2663 struct sctp_ecn_nonce_supported_param
*ecn_nonce
;
2664 struct sctp_supported_chunk_types_param
*pr_supported
;
2668 /* INIT's always go to the primary (and usually ONLY address) */
2670 net
= stcb
->asoc
.primary_destination
;
2672 net
= TAILQ_FIRST(&stcb
->asoc
.nets
);
2677 /* we confirm any address we send an INIT to */
2678 net
->dest_state
&= ~SCTP_ADDR_UNCONFIRMED
;
2679 sctp_set_primary_addr(stcb
, NULL
, net
);
2681 /* we confirm any address we send an INIT to */
2682 net
->dest_state
&= ~SCTP_ADDR_UNCONFIRMED
;
2685 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT4
) {
2686 kprintf("Sending INIT to ");
2687 sctp_print_address ((struct sockaddr
*)&net
->ro
._l_addr
);
2690 if (((struct sockaddr
*)&(net
->ro
._l_addr
))->sa_family
== AF_INET6
) {
2691 /* special hook, if we are sending to link local
2692 * it will not show up in our private address count.
2694 struct sockaddr_in6
*sin6l
;
2695 sin6l
= &net
->ro
._l_addr
.sin6
;
2696 if (IN6_IS_ADDR_LINKLOCAL(&sin6l
->sin6_addr
))
2699 if (callout_pending(&net
->rxt_timer
.timer
)) {
2700 /* This case should not happen */
2703 /* start the INIT timer */
2704 if (sctp_timer_start(SCTP_TIMER_TYPE_INIT
, inp
, stcb
, net
)) {
2705 /* we are hosed since I can't start the INIT timer? */
2708 MGETHDR(m
, MB_DONTWAIT
, MT_HEADER
);
2710 /* No memory, INIT timer will re-attempt. */
2713 /* make it into a M_EXT */
2714 MCLGET(m
, MB_DONTWAIT
);
2715 if ((m
->m_flags
& M_EXT
) != M_EXT
) {
2716 /* Failed to get cluster buffer */
2720 m
->m_data
+= SCTP_MIN_OVERHEAD
;
2721 m
->m_len
= sizeof(struct sctp_init_msg
);
2722 /* Now lets put the SCTP header in place */
2723 initm
= mtod(m
, struct sctp_init_msg
*);
2724 initm
->sh
.src_port
= inp
->sctp_lport
;
2725 initm
->sh
.dest_port
= stcb
->rport
;
2726 initm
->sh
.v_tag
= 0;
2727 initm
->sh
.checksum
= 0; /* calculate later */
2728 /* now the chunk header */
2729 initm
->msg
.ch
.chunk_type
= SCTP_INITIATION
;
2730 initm
->msg
.ch
.chunk_flags
= 0;
2731 /* fill in later from mbuf we build */
2732 initm
->msg
.ch
.chunk_length
= 0;
2733 /* place in my tag */
2734 initm
->msg
.init
.initiate_tag
= htonl(stcb
->asoc
.my_vtag
);
2735 /* set up some of the credits. */
2736 initm
->msg
.init
.a_rwnd
= htonl(max(inp
->sctp_socket
->so_rcv
.ssb_hiwat
,
2737 SCTP_MINIMAL_RWND
));
2739 initm
->msg
.init
.num_outbound_streams
= htons(stcb
->asoc
.pre_open_streams
);
2740 initm
->msg
.init
.num_inbound_streams
= htons(stcb
->asoc
.max_inbound_streams
);
2741 initm
->msg
.init
.initial_tsn
= htonl(stcb
->asoc
.init_seq_number
);
2742 /* now the address restriction */
2743 sup_addr
= (struct sctp_supported_addr_param
*)((caddr_t
)initm
+
2745 sup_addr
->ph
.param_type
= htons(SCTP_SUPPORTED_ADDRTYPE
);
2746 /* we support 2 types IPv6/IPv4 */
2747 sup_addr
->ph
.param_length
= htons(sizeof(*sup_addr
) +
2749 sup_addr
->addr_type
[0] = htons(SCTP_IPV4_ADDRESS
);
2750 sup_addr
->addr_type
[1] = htons(SCTP_IPV6_ADDRESS
);
2751 m
->m_len
+= sizeof(*sup_addr
) + sizeof(uint16_t);
2753 /* if (inp->sctp_flags & SCTP_PCB_FLAGS_ADAPTIONEVNT) {*/
2754 if (inp
->sctp_ep
.adaption_layer_indicator
) {
2755 struct sctp_adaption_layer_indication
*ali
;
2756 ali
= (struct sctp_adaption_layer_indication
*)(
2757 (caddr_t
)sup_addr
+ sizeof(*sup_addr
) + sizeof(uint16_t));
2758 ali
->ph
.param_type
= htons(SCTP_ULP_ADAPTION
);
2759 ali
->ph
.param_length
= htons(sizeof(*ali
));
2760 ali
->indication
= ntohl(inp
->sctp_ep
.adaption_layer_indicator
);
2761 m
->m_len
+= sizeof(*ali
);
2762 ecn
= (struct sctp_ecn_supported_param
*)((caddr_t
)ali
+
2765 ecn
= (struct sctp_ecn_supported_param
*)((caddr_t
)sup_addr
+
2766 sizeof(*sup_addr
) + sizeof(uint16_t));
2769 /* now any cookie time extensions */
2770 if (stcb
->asoc
.cookie_preserve_req
) {
2771 struct sctp_cookie_perserve_param
*cookie_preserve
;
2772 cookie_preserve
= (struct sctp_cookie_perserve_param
*)(ecn
);
2773 cookie_preserve
->ph
.param_type
= htons(SCTP_COOKIE_PRESERVE
);
2774 cookie_preserve
->ph
.param_length
= htons(
2775 sizeof(*cookie_preserve
));
2776 cookie_preserve
->time
= htonl(stcb
->asoc
.cookie_preserve_req
);
2777 m
->m_len
+= sizeof(*cookie_preserve
);
2778 ecn
= (struct sctp_ecn_supported_param
*)(
2779 (caddr_t
)cookie_preserve
+ sizeof(*cookie_preserve
));
2780 stcb
->asoc
.cookie_preserve_req
= 0;
2784 if (sctp_ecn
== 1) {
2785 ecn
->ph
.param_type
= htons(SCTP_ECN_CAPABLE
);
2786 ecn
->ph
.param_length
= htons(sizeof(*ecn
));
2787 m
->m_len
+= sizeof(*ecn
);
2788 prsctp
= (struct sctp_prsctp_supported_param
*)((caddr_t
)ecn
+
2791 prsctp
= (struct sctp_prsctp_supported_param
*)((caddr_t
)ecn
);
2793 /* And now tell the peer we do pr-sctp */
2794 prsctp
->ph
.param_type
= htons(SCTP_PRSCTP_SUPPORTED
);
2795 prsctp
->ph
.param_length
= htons(sizeof(*prsctp
));
2796 m
->m_len
+= sizeof(*prsctp
);
2799 /* And now tell the peer we do all the extensions */
2800 pr_supported
= (struct sctp_supported_chunk_types_param
*)((caddr_t
)prsctp
+
2803 pr_supported
->ph
.param_type
= htons(SCTP_SUPPORTED_CHUNK_EXT
);
2804 pr_supported
->ph
.param_length
= htons(sizeof(*pr_supported
) + SCTP_EXT_COUNT
);
2805 pr_supported
->chunk_types
[0] = SCTP_ASCONF
;
2806 pr_supported
->chunk_types
[1] = SCTP_ASCONF_ACK
;
2807 pr_supported
->chunk_types
[2] = SCTP_FORWARD_CUM_TSN
;
2808 pr_supported
->chunk_types
[3] = SCTP_PACKET_DROPPED
;
2809 pr_supported
->chunk_types
[4] = SCTP_STREAM_RESET
;
2810 pr_supported
->chunk_types
[5] = 0; /* pad */
2811 pr_supported
->chunk_types
[6] = 0; /* pad */
2812 pr_supported
->chunk_types
[7] = 0; /* pad */
2814 m
->m_len
+= (sizeof(*pr_supported
) + SCTP_EXT_COUNT
+ SCTP_PAD_EXT_COUNT
);
2815 /* ECN nonce: And now tell the peer we support ECN nonce */
2817 if (sctp_ecn_nonce
) {
2818 ecn_nonce
= (struct sctp_ecn_nonce_supported_param
*)((caddr_t
)pr_supported
+
2819 sizeof(*pr_supported
) + SCTP_EXT_COUNT
+ SCTP_PAD_EXT_COUNT
);
2820 ecn_nonce
->ph
.param_type
= htons(SCTP_ECN_NONCE_SUPPORTED
);
2821 ecn_nonce
->ph
.param_length
= htons(sizeof(*ecn_nonce
));
2822 m
->m_len
+= sizeof(*ecn_nonce
);
2826 /* now the addresses */
2827 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_BOUNDALL
) {
2833 TAILQ_FOREACH(ifn
, &ifnet
, if_list
) {
2834 if ((stcb
->asoc
.loopback_scope
== 0) &&
2835 (ifn
->if_type
== IFT_LOOP
)) {
2837 * Skip loopback devices if loopback_scope
2842 TAILQ_FOREACH(ifa
, &ifn
->if_addrlist
, ifa_list
) {
2843 if (sctp_is_address_in_scope(ifa
,
2844 stcb
->asoc
.ipv4_addr_legal
,
2845 stcb
->asoc
.ipv6_addr_legal
,
2846 stcb
->asoc
.loopback_scope
,
2847 stcb
->asoc
.ipv4_local_scope
,
2848 stcb
->asoc
.local_scope
,
2849 stcb
->asoc
.site_scope
) == 0) {
2856 TAILQ_FOREACH(ifn
, &ifnet
, if_list
) {
2857 if ((stcb
->asoc
.loopback_scope
== 0) &&
2858 (ifn
->if_type
== IFT_LOOP
)) {
2860 * Skip loopback devices if loopback_scope
2865 TAILQ_FOREACH(ifa
, &ifn
->if_addrlist
, ifa_list
) {
2866 if (sctp_is_address_in_scope(ifa
,
2867 stcb
->asoc
.ipv4_addr_legal
,
2868 stcb
->asoc
.ipv6_addr_legal
,
2869 stcb
->asoc
.loopback_scope
,
2870 stcb
->asoc
.ipv4_local_scope
,
2871 stcb
->asoc
.local_scope
,
2872 stcb
->asoc
.site_scope
) == 0) {
2875 m_at
= sctp_add_addr_to_mbuf(m_at
, ifa
);
2880 struct sctp_laddr
*laddr
;
2883 /* First, how many ? */
2884 LIST_FOREACH(laddr
, &inp
->sctp_addr_list
, sctp_nxt_addr
) {
2885 if (laddr
->ifa
== NULL
) {
2888 if (laddr
->ifa
->ifa_addr
== NULL
)
2890 if (sctp_is_address_in_scope(laddr
->ifa
,
2891 stcb
->asoc
.ipv4_addr_legal
,
2892 stcb
->asoc
.ipv6_addr_legal
,
2893 stcb
->asoc
.loopback_scope
,
2894 stcb
->asoc
.ipv4_local_scope
,
2895 stcb
->asoc
.local_scope
,
2896 stcb
->asoc
.site_scope
) == 0) {
2901 /* To get through a NAT we only list addresses if
2902 * we have more than one. That way if you just
2903 * bind a single address we let the source of the init
2904 * dictate our address.
2907 LIST_FOREACH(laddr
, &inp
->sctp_addr_list
, sctp_nxt_addr
) {
2908 if (laddr
->ifa
== NULL
) {
2911 if (laddr
->ifa
->ifa_addr
== NULL
) {
2915 if (sctp_is_address_in_scope(laddr
->ifa
,
2916 stcb
->asoc
.ipv4_addr_legal
,
2917 stcb
->asoc
.ipv6_addr_legal
,
2918 stcb
->asoc
.loopback_scope
,
2919 stcb
->asoc
.ipv4_local_scope
,
2920 stcb
->asoc
.local_scope
,
2921 stcb
->asoc
.site_scope
) == 0) {
2924 m_at
= sctp_add_addr_to_mbuf(m_at
, laddr
->ifa
);
2928 /* calulate the size and update pkt header and chunk header */
2929 m
->m_pkthdr
.len
= 0;
2930 for (m_at
= m
; m_at
; m_at
= m_at
->m_next
) {
2931 if (m_at
->m_next
== NULL
)
2933 m
->m_pkthdr
.len
+= m_at
->m_len
;
2935 initm
->msg
.ch
.chunk_length
= htons((m
->m_pkthdr
.len
-
2936 sizeof(struct sctphdr
)));
2937 /* We pass 0 here to NOT set IP_DF if its IPv4, we
2938 * ignore the return here since the timer will drive
2942 /* I don't expect this to execute but we will be safe here */
2943 padval
= m
->m_pkthdr
.len
% 4;
2944 if ((padval
) && (m_last
)) {
2945 /* The compiler worries that m_last may not be
2946 * set even though I think it is impossible :->
2947 * however we add m_last here just in case.
2950 ret
= sctp_add_pad_tombuf(m_last
, (4-padval
));
2952 /* Houston we have a problem, no space */
2956 m
->m_pkthdr
.len
+= padval
;
2959 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT4
) {
2960 kprintf("Calling lowlevel output stcb:%x net:%x\n",
2961 (u_int
)stcb
, (u_int
)net
);
2964 ret
= sctp_lowlevel_chunk_output(inp
, stcb
, net
,
2965 (struct sockaddr
*)&net
->ro
._l_addr
, m
, 0, 0, NULL
, 0);
2967 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT4
) {
2968 kprintf("Low level output returns %d\n", ret
);
2971 sctp_timer_start(SCTP_TIMER_TYPE_INIT
, inp
, stcb
, net
);
2972 SCTP_GETTIME_TIMEVAL(&net
->last_sent_time
);
2976 sctp_arethere_unrecognized_parameters(struct mbuf
*in_initpkt
,
2977 int param_offset
, int *abort_processing
, struct sctp_chunkhdr
*cp
)
2979 /* Given a mbuf containing an INIT or INIT-ACK
2980 * with the param_offset being equal to the
2981 * beginning of the params i.e. (iphlen + sizeof(struct sctp_init_msg)
2982 * parse through the parameters to the end of the mbuf verifying
2983 * that all parameters are known.
2985 * For unknown parameters build and return a mbuf with
2986 * UNRECOGNIZED_PARAMETER errors. If the flags indicate
2987 * to stop processing this chunk stop, and set *abort_processing
2990 * By having param_offset be pre-set to where parameters begin
2991 * it is hoped that this routine may be reused in the future
2994 struct sctp_paramhdr
*phdr
, params
;
2996 struct mbuf
*mat
, *op_err
;
2998 int at
, limit
, pad_needed
;
2999 uint16_t ptype
, plen
;
3002 *abort_processing
= 0;
3005 limit
= ntohs(cp
->chunk_length
) - sizeof(struct sctp_init_chunk
);
3007 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT4
) {
3008 kprintf("Limit is %d bytes\n", limit
);
3014 phdr
= sctp_get_next_param(mat
, at
, ¶ms
, sizeof(params
));
3015 while ((phdr
!= NULL
) && ((size_t)limit
>= sizeof(struct sctp_paramhdr
))) {
3016 ptype
= ntohs(phdr
->param_type
);
3017 plen
= ntohs(phdr
->param_length
);
3018 limit
-= SCTP_SIZE32(plen
);
3019 if (plen
< sizeof(struct sctp_paramhdr
)) {
3021 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT4
) {
3022 kprintf("sctp_output.c:Impossible length in parameter < %d\n", plen
);
3025 *abort_processing
= 1;
3028 /* All parameters for all chunks that we
3029 * know/understand are listed here. We process
3030 * them other places and make appropriate
3031 * stop actions per the upper bits. However
3032 * this is the generic routine processor's can
3033 * call to get back an operr.. to either incorporate (init-ack)
3036 if ((ptype
== SCTP_HEARTBEAT_INFO
) ||
3037 (ptype
== SCTP_IPV4_ADDRESS
) ||
3038 (ptype
== SCTP_IPV6_ADDRESS
) ||
3039 (ptype
== SCTP_STATE_COOKIE
) ||
3040 (ptype
== SCTP_UNRECOG_PARAM
) ||
3041 (ptype
== SCTP_COOKIE_PRESERVE
) ||
3042 (ptype
== SCTP_SUPPORTED_ADDRTYPE
) ||
3043 (ptype
== SCTP_PRSCTP_SUPPORTED
) ||
3044 (ptype
== SCTP_ADD_IP_ADDRESS
) ||
3045 (ptype
== SCTP_DEL_IP_ADDRESS
) ||
3046 (ptype
== SCTP_ECN_CAPABLE
) ||
3047 (ptype
== SCTP_ULP_ADAPTION
) ||
3048 (ptype
== SCTP_ERROR_CAUSE_IND
) ||
3049 (ptype
== SCTP_SET_PRIM_ADDR
) ||
3050 (ptype
== SCTP_SUCCESS_REPORT
) ||
3051 (ptype
== SCTP_ULP_ADAPTION
) ||
3052 (ptype
== SCTP_SUPPORTED_CHUNK_EXT
) ||
3053 (ptype
== SCTP_ECN_NONCE_SUPPORTED
)
3056 at
+= SCTP_SIZE32(plen
);
3057 } else if (ptype
== SCTP_HOSTNAME_ADDRESS
) {
3058 /* We can NOT handle HOST NAME addresses!! */
3060 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT4
) {
3061 kprintf("Can't handle hostname addresses.. abort processing\n");
3064 *abort_processing
= 1;
3065 if (op_err
== NULL
) {
3066 /* Ok need to try to get a mbuf */
3067 MGETHDR(op_err
, MB_DONTWAIT
, MT_DATA
);
3070 op_err
->m_pkthdr
.len
= 0;
3071 /* pre-reserve space for ip and sctp header and chunk hdr*/
3072 op_err
->m_data
+= sizeof(struct ip6_hdr
);
3073 op_err
->m_data
+= sizeof(struct sctphdr
);
3074 op_err
->m_data
+= sizeof(struct sctp_chunkhdr
);
3078 /* If we have space */
3079 struct sctp_paramhdr s
;
3082 pad_needed
= 4 - (err_at
% 4);
3083 m_copyback(op_err
, err_at
, pad_needed
, (caddr_t
)&cpthis
);
3084 err_at
+= pad_needed
;
3086 s
.param_type
= htons(SCTP_CAUSE_UNRESOLV_ADDR
);
3087 s
.param_length
= htons(sizeof(s
) + plen
);
3088 m_copyback(op_err
, err_at
, sizeof(s
), (caddr_t
)&s
);
3089 err_at
+= sizeof(s
);
3090 phdr
= sctp_get_next_param(mat
, at
, (struct sctp_paramhdr
*)tempbuf
, plen
);
3092 sctp_m_freem(op_err
);
3093 /* we are out of memory but we
3094 * still need to have a look at what to
3095 * do (the system is in trouble though).
3099 m_copyback(op_err
, err_at
, plen
, (caddr_t
)phdr
);
3104 /* we do not recognize the parameter
3105 * figure out what we do.
3108 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT4
) {
3109 kprintf("Got parameter type %x - unknown\n",
3113 if ((ptype
& 0x4000) == 0x4000) {
3114 /* Report bit is set?? */
3116 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT4
) {
3117 kprintf("Report bit is set\n");
3120 if (op_err
== NULL
) {
3121 /* Ok need to try to get an mbuf */
3122 MGETHDR(op_err
, MB_DONTWAIT
, MT_DATA
);
3125 op_err
->m_pkthdr
.len
= 0;
3126 op_err
->m_data
+= sizeof(struct ip6_hdr
);
3127 op_err
->m_data
+= sizeof(struct sctphdr
);
3128 op_err
->m_data
+= sizeof(struct sctp_chunkhdr
);
3132 /* If we have space */
3133 struct sctp_paramhdr s
;
3136 pad_needed
= 4 - (err_at
% 4);
3137 m_copyback(op_err
, err_at
, pad_needed
, (caddr_t
)&cpthis
);
3138 err_at
+= pad_needed
;
3140 s
.param_type
= htons(SCTP_UNRECOG_PARAM
);
3141 s
.param_length
= htons(sizeof(s
) + plen
);
3142 m_copyback(op_err
, err_at
, sizeof(s
), (caddr_t
)&s
);
3143 err_at
+= sizeof(s
);
3144 if (plen
> sizeof(tempbuf
)) {
3145 plen
= sizeof(tempbuf
);
3147 phdr
= sctp_get_next_param(mat
, at
, (struct sctp_paramhdr
*)tempbuf
, plen
);
3149 sctp_m_freem(op_err
);
3150 /* we are out of memory but we
3151 * still need to have a look at what to
3152 * do (the system is in trouble though).
3154 goto more_processing
;
3156 m_copyback(op_err
, err_at
, plen
, (caddr_t
)phdr
);
3161 if ((ptype
& 0x8000) == 0x0000) {
3163 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT4
) {
3164 kprintf("Abort bit is now setting1\n");
3169 /* skip this chunk and continue processing */
3170 at
+= SCTP_SIZE32(plen
);
3174 phdr
= sctp_get_next_param(mat
, at
, ¶ms
, sizeof(params
));
3180 sctp_are_there_new_addresses(struct sctp_association
*asoc
,
3181 struct mbuf
*in_initpkt
, int iphlen
, int offset
)
3184 * Given a INIT packet, look through the packet to verify that
3185 * there are NO new addresses. As we go through the parameters
3186 * add reports of any un-understood parameters that require an
3187 * error. Also we must return (1) to drop the packet if we see
3188 * a un-understood parameter that tells us to drop the chunk.
3190 struct sockaddr_in sin4
, *sa4
;
3191 struct sockaddr_in6 sin6
, *sa6
;
3192 struct sockaddr
*sa_touse
;
3193 struct sockaddr
*sa
;
3194 struct sctp_paramhdr
*phdr
, params
;
3197 uint16_t ptype
, plen
;
3200 struct sctp_nets
*net
;
3202 memset(&sin4
, 0, sizeof(sin4
));
3203 memset(&sin6
, 0, sizeof(sin6
));
3204 sin4
.sin_family
= AF_INET
;
3205 sin4
.sin_len
= sizeof(sin4
);
3206 sin6
.sin6_family
= AF_INET6
;
3207 sin6
.sin6_len
= sizeof(sin6
);
3210 /* First what about the src address of the pkt ? */
3211 iph
= mtod(in_initpkt
, struct ip
*);
3212 if (iph
->ip_v
== IPVERSION
) {
3213 /* source addr is IPv4 */
3214 sin4
.sin_addr
= iph
->ip_src
;
3215 sa_touse
= (struct sockaddr
*)&sin4
;
3216 } else if (iph
->ip_v
== (IPV6_VERSION
>> 4)) {
3217 /* source addr is IPv6 */
3218 struct ip6_hdr
*ip6h
;
3219 ip6h
= mtod(in_initpkt
, struct ip6_hdr
*);
3220 sin6
.sin6_addr
= ip6h
->ip6_src
;
3221 sa_touse
= (struct sockaddr
*)&sin6
;
3227 TAILQ_FOREACH(net
, &asoc
->nets
, sctp_next
) {
3228 sa
= (struct sockaddr
*)&net
->ro
._l_addr
;
3229 if (sa
->sa_family
== sa_touse
->sa_family
) {
3230 if (sa
->sa_family
== AF_INET
) {
3231 sa4
= (struct sockaddr_in
*)sa
;
3232 if (sa4
->sin_addr
.s_addr
==
3233 sin4
.sin_addr
.s_addr
) {
3237 } else if (sa
->sa_family
== AF_INET6
) {
3238 sa6
= (struct sockaddr_in6
*)sa
;
3239 if (SCTP6_ARE_ADDR_EQUAL(&sa6
->sin6_addr
,
3248 /* New address added! no need to look futher. */
3251 /* Ok so far lets munge through the rest of the packet */
3255 offset
+= sizeof(struct sctp_init_chunk
);
3256 phdr
= sctp_get_next_param(mat
, offset
, ¶ms
, sizeof(params
));
3258 ptype
= ntohs(phdr
->param_type
);
3259 plen
= ntohs(phdr
->param_length
);
3260 if (ptype
== SCTP_IPV4_ADDRESS
) {
3261 struct sctp_ipv4addr_param
*p4
, p4_buf
;
3263 phdr
= sctp_get_next_param(mat
, offset
,
3264 (struct sctp_paramhdr
*)&p4_buf
, sizeof(p4_buf
));
3265 if (plen
!= sizeof(struct sctp_ipv4addr_param
) ||
3269 p4
= (struct sctp_ipv4addr_param
*)phdr
;
3270 sin4
.sin_addr
.s_addr
= p4
->addr
;
3271 sa_touse
= (struct sockaddr
*)&sin4
;
3272 } else if (ptype
== SCTP_IPV6_ADDRESS
) {
3273 struct sctp_ipv6addr_param
*p6
, p6_buf
;
3275 phdr
= sctp_get_next_param(mat
, offset
,
3276 (struct sctp_paramhdr
*)&p6_buf
, sizeof(p6_buf
));
3277 if (plen
!= sizeof(struct sctp_ipv6addr_param
) ||
3281 p6
= (struct sctp_ipv6addr_param
*)phdr
;
3282 memcpy((caddr_t
)&sin6
.sin6_addr
, p6
->addr
,
3284 sa_touse
= (struct sockaddr
*)&sin4
;
3288 /* ok, sa_touse points to one to check */
3290 TAILQ_FOREACH(net
, &asoc
->nets
, sctp_next
) {
3291 sa
= (struct sockaddr
*)&net
->ro
._l_addr
;
3292 if (sa
->sa_family
!= sa_touse
->sa_family
) {
3295 if (sa
->sa_family
== AF_INET
) {
3296 sa4
= (struct sockaddr_in
*)sa
;
3297 if (sa4
->sin_addr
.s_addr
==
3298 sin4
.sin_addr
.s_addr
) {
3302 } else if (sa
->sa_family
== AF_INET6
) {
3303 sa6
= (struct sockaddr_in6
*)sa
;
3304 if (SCTP6_ARE_ADDR_EQUAL(
3305 &sa6
->sin6_addr
, &sin6
.sin6_addr
)) {
3312 /* New addr added! no need to look further */
3316 offset
+= SCTP_SIZE32(plen
);
3317 phdr
= sctp_get_next_param(mat
, offset
, ¶ms
, sizeof(params
));
3323 * Given a MBUF chain that was sent into us containing an
3324 * INIT. Build a INIT-ACK with COOKIE and send back.
3325 * We assume that the in_initpkt has done a pullup to
3326 * include IPv6/4header, SCTP header and initial part of
3327 * INIT message (i.e. the struct sctp_init_msg).
3330 sctp_send_initiate_ack(struct sctp_inpcb
*inp
, struct sctp_tcb
*stcb
,
3331 struct mbuf
*init_pkt
, int iphlen
, int offset
, struct sctphdr
*sh
,
3332 struct sctp_init_chunk
*init_chk
)
3334 struct sctp_association
*asoc
;
3335 struct mbuf
*m
, *m_at
, *m_tmp
, *m_cookie
, *op_err
, *m_last
;
3336 struct sctp_init_msg
*initackm_out
;
3337 struct sctp_ecn_supported_param
*ecn
;
3338 struct sctp_prsctp_supported_param
*prsctp
;
3339 struct sctp_ecn_nonce_supported_param
*ecn_nonce
;
3340 struct sctp_supported_chunk_types_param
*pr_supported
;
3341 struct sockaddr_storage store
;
3342 struct sockaddr_in
*sin
;
3343 struct sockaddr_in6
*sin6
;
3346 struct ip6_hdr
*ip6
;
3347 struct sockaddr
*to
;
3348 struct sctp_state_cookie stc
;
3349 struct sctp_nets
*net
=NULL
;
3351 uint16_t his_limit
, i_want
;
3352 int abort_flag
, padval
, sz_of
;
3360 if ((asoc
!= NULL
) &&
3361 (SCTP_GET_STATE(asoc
) != SCTP_STATE_COOKIE_WAIT
) &&
3362 (sctp_are_there_new_addresses(asoc
, init_pkt
, iphlen
, offset
))) {
3363 /* new addresses, out of here in non-cookie-wait states */
3365 * Send a ABORT, we don't add the new address error clause though
3366 * we even set the T bit and copy in the 0 tag.. this looks no
3367 * different than if no listner was present.
3369 sctp_send_abort(init_pkt
, iphlen
, sh
, 0, NULL
);
3373 op_err
= sctp_arethere_unrecognized_parameters(init_pkt
,
3374 (offset
+sizeof(struct sctp_init_chunk
)),
3375 &abort_flag
, (struct sctp_chunkhdr
*)init_chk
);
3377 sctp_send_abort(init_pkt
, iphlen
, sh
, init_chk
->init
.initiate_tag
, op_err
);
3380 MGETHDR(m
, MB_DONTWAIT
, MT_HEADER
);
3382 /* No memory, INIT timer will re-attempt. */
3384 sctp_m_freem(op_err
);
3387 MCLGET(m
, MB_DONTWAIT
);
3388 if ((m
->m_flags
& M_EXT
) != M_EXT
) {
3389 /* Failed to get cluster buffer */
3391 sctp_m_freem(op_err
);
3395 m
->m_data
+= SCTP_MIN_OVERHEAD
;
3396 m
->m_pkthdr
.rcvif
= 0;
3397 m
->m_len
= sizeof(struct sctp_init_msg
);
3399 /* the time I built cookie */
3400 SCTP_GETTIME_TIMEVAL(&stc
.time_entered
);
3402 /* populate any tie tags */
3404 /* unlock before tag selections */
3405 SCTP_TCB_UNLOCK(stcb
);
3406 if (asoc
->my_vtag_nonce
== 0)
3407 asoc
->my_vtag_nonce
= sctp_select_a_tag(inp
);
3408 stc
.tie_tag_my_vtag
= asoc
->my_vtag_nonce
;
3410 if (asoc
->peer_vtag_nonce
== 0)
3411 asoc
->peer_vtag_nonce
= sctp_select_a_tag(inp
);
3412 stc
.tie_tag_peer_vtag
= asoc
->peer_vtag_nonce
;
3414 stc
.cookie_life
= asoc
->cookie_life
;
3415 net
= asoc
->primary_destination
;
3416 /* now we must relock */
3417 SCTP_INP_RLOCK(inp
);
3418 /* we may be in trouble here if the inp got freed
3419 * most likely this set of tests will protect
3420 * us but there is a chance not.
3422 if (inp
->sctp_flags
& (SCTP_PCB_FLAGS_SOCKET_GONE
|SCTP_PCB_FLAGS_SOCKET_ALLGONE
)) {
3424 sctp_m_freem(op_err
);
3426 sctp_send_abort(init_pkt
, iphlen
, sh
, 0, NULL
);
3429 SCTP_TCB_LOCK(stcb
);
3430 SCTP_INP_RUNLOCK(stcb
->sctp_ep
);
3432 stc
.tie_tag_my_vtag
= 0;
3433 stc
.tie_tag_peer_vtag
= 0;
3434 /* life I will award this cookie */
3435 stc
.cookie_life
= inp
->sctp_ep
.def_cookie_life
;
3438 /* copy in the ports for later check */
3439 stc
.myport
= sh
->dest_port
;
3440 stc
.peerport
= sh
->src_port
;
3443 * If we wanted to honor cookie life extentions, we would add
3444 * to stc.cookie_life. For now we should NOT honor any extension
3446 stc
.site_scope
= stc
.local_scope
= stc
.loopback_scope
= 0;
3447 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_BOUND_V6
) {
3448 struct inpcb
*in_inp
;
3449 /* Its a V6 socket */
3450 in_inp
= (struct inpcb
*)inp
;
3451 stc
.ipv6_addr_legal
= 1;
3452 /* Now look at the binding flag to see if V4 will be legal */
3454 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
3455 (in_inp
->inp_flags
& IN6P_IPV6_V6ONLY
)
3456 #elif defined(__OpenBSD__)
3457 (0) /* For openbsd we do dual bind only */
3459 (((struct in6pcb
*)in_inp
)->in6p_flags
& IN6P_IPV6_V6ONLY
)
3462 stc
.ipv4_addr_legal
= 1;
3464 /* V4 addresses are NOT legal on the association */
3465 stc
.ipv4_addr_legal
= 0;
3468 /* Its a V4 socket, no - V6 */
3469 stc
.ipv4_addr_legal
= 1;
3470 stc
.ipv6_addr_legal
= 0;
3473 #ifdef SCTP_DONT_DO_PRIVADDR_SCOPE
3478 /* now for scope setup */
3479 memset((caddr_t
)&store
, 0, sizeof(store
));
3480 sin
= (struct sockaddr_in
*)&store
;
3481 sin6
= (struct sockaddr_in6
*)&store
;
3483 to
= (struct sockaddr
*)&store
;
3484 iph
= mtod(init_pkt
, struct ip
*);
3485 if (iph
->ip_v
== IPVERSION
) {
3486 struct in_addr addr
;
3487 struct route iproute
;
3489 sin
->sin_family
= AF_INET
;
3490 sin
->sin_len
= sizeof(struct sockaddr_in
);
3491 sin
->sin_port
= sh
->src_port
;
3492 sin
->sin_addr
= iph
->ip_src
;
3493 /* lookup address */
3494 stc
.address
[0] = sin
->sin_addr
.s_addr
;
3498 stc
.addr_type
= SCTP_IPV4_ADDRESS
;
3499 /* local from address */
3500 memset(&iproute
, 0, sizeof(iproute
));
3502 memcpy(&ro
->ro_dst
, sin
, sizeof(*sin
));
3503 addr
= sctp_ipv4_source_address_selection(inp
, NULL
,
3508 stc
.laddress
[0] = addr
.s_addr
;
3509 stc
.laddress
[1] = 0;
3510 stc
.laddress
[2] = 0;
3511 stc
.laddress
[3] = 0;
3512 stc
.laddr_type
= SCTP_IPV4_ADDRESS
;
3513 /* scope_id is only for v6 */
3515 #ifndef SCTP_DONT_DO_PRIVADDR_SCOPE
3516 if (IN4_ISPRIVATE_ADDRESS(&sin
->sin_addr
)) {
3521 #endif /* SCTP_DONT_DO_PRIVADDR_SCOPE */
3522 /* Must use the address in this case */
3523 if (sctp_is_address_on_local_host((struct sockaddr
*)sin
)) {
3524 stc
.loopback_scope
= 1;
3527 stc
.local_scope
= 1;
3529 } else if (iph
->ip_v
== (IPV6_VERSION
>> 4)) {
3530 struct in6_addr addr
;
3531 #ifdef NEW_STRUCT_ROUTE
3532 struct route iproute6
;
3534 struct route_in6 iproute6
;
3536 ip6
= mtod(init_pkt
, struct ip6_hdr
*);
3537 sin6
->sin6_family
= AF_INET6
;
3538 sin6
->sin6_len
= sizeof(struct sockaddr_in6
);
3539 sin6
->sin6_port
= sh
->src_port
;
3540 sin6
->sin6_addr
= ip6
->ip6_src
;
3541 /* lookup address */
3542 memcpy(&stc
.address
, &sin6
->sin6_addr
,
3543 sizeof(struct in6_addr
));
3544 sin6
->sin6_scope_id
= 0;
3545 stc
.addr_type
= SCTP_IPV6_ADDRESS
;
3547 if (sctp_is_address_on_local_host((struct sockaddr
*)sin6
)) {
3548 stc
.loopback_scope
= 1;
3549 stc
.local_scope
= 1;
3552 } else if (IN6_IS_ADDR_LINKLOCAL(&sin6
->sin6_addr
)) {
3554 * If the new destination is a LINK_LOCAL
3555 * we must have common both site and local
3556 * scope. Don't set local scope though since
3557 * we must depend on the source to be added
3558 * implicitly. We cannot assure just because
3559 * we share one link that all links are common.
3561 stc
.local_scope
= 0;
3564 /* we start counting for the private
3565 * address stuff at 1. since the link
3566 * local we source from won't show
3567 * up in our scoped cou8nt.
3570 /* pull out the scope_id from incoming pkt */
3571 in6_recoverscope(sin6
, &ip6
->ip6_src
,
3572 init_pkt
->m_pkthdr
.rcvif
);
3573 #if defined(SCTP_BASE_FREEBSD) || defined(__APPLE__) || defined(__DragonFly__)
3574 in6_embedscope(&sin6
->sin6_addr
, sin6
, NULL
,
3577 in6_embedscope(&sin6
->sin6_addr
, sin6
);
3579 stc
.scope_id
= sin6
->sin6_scope_id
;
3580 } else if (IN6_IS_ADDR_SITELOCAL(&sin6
->sin6_addr
)) {
3582 * If the new destination is SITE_LOCAL
3583 * then we must have site scope in common.
3587 /* local from address */
3588 memset(&iproute6
, 0, sizeof(iproute6
));
3589 ro
= (struct route
*)&iproute6
;
3590 memcpy(&ro
->ro_dst
, sin6
, sizeof(*sin6
));
3591 addr
= sctp_ipv6_source_address_selection(inp
, NULL
,
3596 memcpy(&stc
.laddress
, &addr
, sizeof(struct in6_addr
));
3597 stc
.laddr_type
= SCTP_IPV6_ADDRESS
;
3600 /* set the scope per the existing tcb */
3601 struct sctp_nets
*lnet
;
3603 stc
.loopback_scope
= asoc
->loopback_scope
;
3604 stc
.ipv4_scope
= asoc
->ipv4_local_scope
;
3605 stc
.site_scope
= asoc
->site_scope
;
3606 stc
.local_scope
= asoc
->local_scope
;
3607 TAILQ_FOREACH(lnet
, &asoc
->nets
, sctp_next
) {
3608 if (lnet
->ro
._l_addr
.sin6
.sin6_family
== AF_INET6
) {
3609 if (IN6_IS_ADDR_LINKLOCAL(&lnet
->ro
._l_addr
.sin6
.sin6_addr
)) {
3610 /* if we have a LL address, start counting
3618 /* use the net pointer */
3619 to
= (struct sockaddr
*)&net
->ro
._l_addr
;
3620 if (to
->sa_family
== AF_INET
) {
3621 sin
= (struct sockaddr_in
*)to
;
3622 stc
.address
[0] = sin
->sin_addr
.s_addr
;
3626 stc
.addr_type
= SCTP_IPV4_ADDRESS
;
3627 if (net
->src_addr_selected
== 0) {
3628 /* strange case here, the INIT
3629 * should have did the selection.
3631 net
->ro
._s_addr
.sin
.sin_addr
=
3632 sctp_ipv4_source_address_selection(inp
,
3633 stcb
, (struct route
*)&net
->ro
, net
, 0);
3634 net
->src_addr_selected
= 1;
3638 stc
.laddress
[0] = net
->ro
._s_addr
.sin
.sin_addr
.s_addr
;
3639 stc
.laddress
[1] = 0;
3640 stc
.laddress
[2] = 0;
3641 stc
.laddress
[3] = 0;
3642 stc
.laddr_type
= SCTP_IPV4_ADDRESS
;
3643 } else if (to
->sa_family
== AF_INET6
) {
3644 sin6
= (struct sockaddr_in6
*)to
;
3645 memcpy(&stc
.address
, &sin6
->sin6_addr
,
3646 sizeof(struct in6_addr
));
3647 stc
.addr_type
= SCTP_IPV6_ADDRESS
;
3648 if (net
->src_addr_selected
== 0) {
3649 /* strange case here, the INIT
3650 * should have did the selection.
3652 net
->ro
._s_addr
.sin6
.sin6_addr
=
3653 sctp_ipv6_source_address_selection(inp
,
3654 stcb
, (struct route
*)&net
->ro
, net
, 0);
3655 net
->src_addr_selected
= 1;
3657 memcpy(&stc
.laddress
, &net
->ro
._l_addr
.sin6
.sin6_addr
,
3658 sizeof(struct in6_addr
));
3659 stc
.laddr_type
= SCTP_IPV6_ADDRESS
;
3662 /* Now lets put the SCTP header in place */
3663 initackm_out
= mtod(m
, struct sctp_init_msg
*);
3664 initackm_out
->sh
.src_port
= inp
->sctp_lport
;
3665 initackm_out
->sh
.dest_port
= sh
->src_port
;
3666 initackm_out
->sh
.v_tag
= init_chk
->init
.initiate_tag
;
3667 /* Save it off for quick ref */
3668 stc
.peers_vtag
= init_chk
->init
.initiate_tag
;
3669 initackm_out
->sh
.checksum
= 0; /* calculate later */
3671 strncpy(stc
.identification
, SCTP_VERSION_STRING
,
3672 min(strlen(SCTP_VERSION_STRING
), sizeof(stc
.identification
)));
3673 /* now the chunk header */
3674 initackm_out
->msg
.ch
.chunk_type
= SCTP_INITIATION_ACK
;
3675 initackm_out
->msg
.ch
.chunk_flags
= 0;
3676 /* fill in later from mbuf we build */
3677 initackm_out
->msg
.ch
.chunk_length
= 0;
3678 /* place in my tag */
3679 if ((asoc
!= NULL
) &&
3680 ((SCTP_GET_STATE(asoc
) == SCTP_STATE_COOKIE_WAIT
) ||
3681 (SCTP_GET_STATE(asoc
) == SCTP_STATE_COOKIE_ECHOED
))) {
3682 /* re-use the v-tags and init-seq here */
3683 initackm_out
->msg
.init
.initiate_tag
= htonl(asoc
->my_vtag
);
3684 initackm_out
->msg
.init
.initial_tsn
= htonl(asoc
->init_seq_number
);
3686 initackm_out
->msg
.init
.initiate_tag
= htonl(sctp_select_a_tag(inp
));
3687 /* get a TSN to use too */
3688 initackm_out
->msg
.init
.initial_tsn
= htonl(sctp_select_initial_TSN(&inp
->sctp_ep
));
3690 /* save away my tag to */
3691 stc
.my_vtag
= initackm_out
->msg
.init
.initiate_tag
;
3693 /* set up some of the credits. */
3694 initackm_out
->msg
.init
.a_rwnd
= htonl(max(inp
->sctp_socket
->so_rcv
.ssb_hiwat
, SCTP_MINIMAL_RWND
));
3695 /* set what I want */
3696 his_limit
= ntohs(init_chk
->init
.num_inbound_streams
);
3697 /* choose what I want */
3699 if (asoc
->streamoutcnt
> inp
->sctp_ep
.pre_open_stream_count
) {
3700 i_want
= asoc
->streamoutcnt
;
3702 i_want
= inp
->sctp_ep
.pre_open_stream_count
;
3705 i_want
= inp
->sctp_ep
.pre_open_stream_count
;
3707 if (his_limit
< i_want
) {
3708 /* I Want more :< */
3709 initackm_out
->msg
.init
.num_outbound_streams
= init_chk
->init
.num_inbound_streams
;
3711 /* I can have what I want :> */
3712 initackm_out
->msg
.init
.num_outbound_streams
= htons(i_want
);
3714 /* tell him his limt. */
3715 initackm_out
->msg
.init
.num_inbound_streams
=
3716 htons(inp
->sctp_ep
.max_open_streams_intome
);
3717 /* setup the ECN pointer */
3719 /* if (inp->sctp_flags & SCTP_PCB_FLAGS_ADAPTIONEVNT) {*/
3720 if (inp
->sctp_ep
.adaption_layer_indicator
) {
3721 struct sctp_adaption_layer_indication
*ali
;
3722 ali
= (struct sctp_adaption_layer_indication
*)(
3723 (caddr_t
)initackm_out
+ sizeof(*initackm_out
));
3724 ali
->ph
.param_type
= htons(SCTP_ULP_ADAPTION
);
3725 ali
->ph
.param_length
= htons(sizeof(*ali
));
3726 ali
->indication
= ntohl(inp
->sctp_ep
.adaption_layer_indicator
);
3727 m
->m_len
+= sizeof(*ali
);
3728 ecn
= (struct sctp_ecn_supported_param
*)((caddr_t
)ali
+
3731 ecn
= (struct sctp_ecn_supported_param
*)(
3732 (caddr_t
)initackm_out
+ sizeof(*initackm_out
));
3736 if (sctp_ecn
== 1) {
3737 ecn
->ph
.param_type
= htons(SCTP_ECN_CAPABLE
);
3738 ecn
->ph
.param_length
= htons(sizeof(*ecn
));
3739 m
->m_len
+= sizeof(*ecn
);
3741 prsctp
= (struct sctp_prsctp_supported_param
*)((caddr_t
)ecn
+
3744 prsctp
= (struct sctp_prsctp_supported_param
*)((caddr_t
)ecn
);
3746 /* And now tell the peer we do pr-sctp */
3747 prsctp
->ph
.param_type
= htons(SCTP_PRSCTP_SUPPORTED
);
3748 prsctp
->ph
.param_length
= htons(sizeof(*prsctp
));
3749 m
->m_len
+= sizeof(*prsctp
);
3752 /* And now tell the peer we do all the extensions */
3753 pr_supported
= (struct sctp_supported_chunk_types_param
*)((caddr_t
)prsctp
+
3756 pr_supported
->ph
.param_type
= htons(SCTP_SUPPORTED_CHUNK_EXT
);
3757 pr_supported
->ph
.param_length
= htons(sizeof(*pr_supported
) + SCTP_EXT_COUNT
);
3758 pr_supported
->chunk_types
[0] = SCTP_ASCONF
;
3759 pr_supported
->chunk_types
[1] = SCTP_ASCONF_ACK
;
3760 pr_supported
->chunk_types
[2] = SCTP_FORWARD_CUM_TSN
;
3761 pr_supported
->chunk_types
[3] = SCTP_PACKET_DROPPED
;
3762 pr_supported
->chunk_types
[4] = SCTP_STREAM_RESET
;
3763 pr_supported
->chunk_types
[5] = 0; /* pad */
3764 pr_supported
->chunk_types
[6] = 0; /* pad */
3765 pr_supported
->chunk_types
[7] = 0; /* pad */
3767 m
->m_len
+= (sizeof(*pr_supported
) + SCTP_EXT_COUNT
+ SCTP_PAD_EXT_COUNT
);
3768 if (sctp_ecn_nonce
) {
3769 /* ECN nonce: And now tell the peer we support ECN nonce */
3770 ecn_nonce
= (struct sctp_ecn_nonce_supported_param
*)((caddr_t
)pr_supported
+
3771 sizeof(*pr_supported
) + SCTP_EXT_COUNT
+ SCTP_PAD_EXT_COUNT
);
3772 ecn_nonce
->ph
.param_type
= htons(SCTP_ECN_NONCE_SUPPORTED
);
3773 ecn_nonce
->ph
.param_length
= htons(sizeof(*ecn_nonce
));
3774 m
->m_len
+= sizeof(*ecn_nonce
);
3778 /* now the addresses */
3779 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_BOUNDALL
) {
3782 int cnt
= cnt_inits_to
;
3784 TAILQ_FOREACH(ifn
, &ifnet
, if_list
) {
3785 if ((stc
.loopback_scope
== 0) &&
3786 (ifn
->if_type
== IFT_LOOP
)) {
3788 * Skip loopback devices if loopback_scope
3793 TAILQ_FOREACH(ifa
, &ifn
->if_addrlist
, ifa_list
) {
3794 if (sctp_is_address_in_scope(ifa
,
3795 stc
.ipv4_addr_legal
, stc
.ipv6_addr_legal
,
3796 stc
.loopback_scope
, stc
.ipv4_scope
,
3797 stc
.local_scope
, stc
.site_scope
) == 0) {
3804 TAILQ_FOREACH(ifn
, &ifnet
, if_list
) {
3805 if ((stc
.loopback_scope
== 0) &&
3806 (ifn
->if_type
== IFT_LOOP
)) {
3808 * Skip loopback devices if
3809 * loopback_scope not set
3813 TAILQ_FOREACH(ifa
, &ifn
->if_addrlist
, ifa_list
) {
3814 if (sctp_is_address_in_scope(ifa
,
3815 stc
.ipv4_addr_legal
,
3816 stc
.ipv6_addr_legal
,
3817 stc
.loopback_scope
, stc
.ipv4_scope
,
3818 stc
.local_scope
, stc
.site_scope
) == 0) {
3821 m_at
= sctp_add_addr_to_mbuf(m_at
, ifa
);
3826 struct sctp_laddr
*laddr
;
3829 /* First, how many ? */
3830 LIST_FOREACH(laddr
, &inp
->sctp_addr_list
, sctp_nxt_addr
) {
3831 if (laddr
->ifa
== NULL
) {
3834 if (laddr
->ifa
->ifa_addr
== NULL
)
3836 if (sctp_is_address_in_scope(laddr
->ifa
,
3837 stc
.ipv4_addr_legal
, stc
.ipv6_addr_legal
,
3838 stc
.loopback_scope
, stc
.ipv4_scope
,
3839 stc
.local_scope
, stc
.site_scope
) == 0) {
3844 /* If we bind a single address only we won't list
3845 * any. This way you can get through a NAT
3848 LIST_FOREACH(laddr
, &inp
->sctp_addr_list
, sctp_nxt_addr
) {
3849 if (laddr
->ifa
== NULL
) {
3851 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
3852 kprintf("Help I have fallen and I can't get up!\n");
3857 if (laddr
->ifa
->ifa_addr
== NULL
)
3859 if (sctp_is_address_in_scope(laddr
->ifa
,
3860 stc
.ipv4_addr_legal
, stc
.ipv6_addr_legal
,
3861 stc
.loopback_scope
, stc
.ipv4_scope
,
3862 stc
.local_scope
, stc
.site_scope
) == 0) {
3865 m_at
= sctp_add_addr_to_mbuf(m_at
, laddr
->ifa
);
3870 /* tack on the operational error if present */
3872 if (op_err
->m_pkthdr
.len
% 4) {
3873 /* must add a pad to the param */
3876 padlen
= 4 - (op_err
->m_pkthdr
.len
% 4);
3877 m_copyback(op_err
, op_err
->m_pkthdr
.len
, padlen
, (caddr_t
)&cpthis
);
3879 while (m_at
->m_next
!= NULL
) {
3880 m_at
= m_at
->m_next
;
3882 m_at
->m_next
= op_err
;
3883 while (m_at
->m_next
!= NULL
) {
3884 m_at
= m_at
->m_next
;
3887 /* Get total size of init packet */
3888 sz_of
= SCTP_SIZE32(ntohs(init_chk
->ch
.chunk_length
));
3889 /* pre-calulate the size and update pkt header and chunk header */
3890 m
->m_pkthdr
.len
= 0;
3891 for (m_tmp
= m
; m_tmp
; m_tmp
= m_tmp
->m_next
) {
3892 m
->m_pkthdr
.len
+= m_tmp
->m_len
;
3893 if (m_tmp
->m_next
== NULL
) {
3894 /* m_tmp should now point to last one */
3899 * Figure now the size of the cookie. We know the size of the
3900 * INIT-ACK. The Cookie is going to be the size of INIT, INIT-ACK,
3901 * COOKIE-STRUCTURE and SIGNATURE.
3905 * take our earlier INIT calc and add in the sz we just calculated
3906 * minus the size of the sctphdr (its not included in chunk size
3909 /* add once for the INIT-ACK */
3910 sz_of
+= (m
->m_pkthdr
.len
- sizeof(struct sctphdr
));
3912 /* add a second time for the INIT-ACK in the cookie */
3913 sz_of
+= (m
->m_pkthdr
.len
- sizeof(struct sctphdr
));
3915 /* Now add the cookie header and cookie message struct */
3916 sz_of
+= sizeof(struct sctp_state_cookie_param
);
3917 /* ...and add the size of our signature */
3918 sz_of
+= SCTP_SIGNATURE_SIZE
;
3919 initackm_out
->msg
.ch
.chunk_length
= htons(sz_of
);
3921 /* Now we must build a cookie */
3922 m_cookie
= sctp_add_cookie(inp
, init_pkt
, offset
, m
,
3923 sizeof(struct sctphdr
), &stc
);
3924 if (m_cookie
== NULL
) {
3925 /* memory problem */
3929 /* Now append the cookie to the end and update the space/size */
3930 m_tmp
->m_next
= m_cookie
;
3933 * We pass 0 here to NOT set IP_DF if its IPv4, we ignore the
3934 * return here since the timer will drive a retranmission.
3936 padval
= m
->m_pkthdr
.len
% 4;
3937 if ((padval
) && (m_last
)) {
3938 /* see my previous comments on m_last */
3940 ret
= sctp_add_pad_tombuf(m_last
, (4-padval
));
3942 /* Houston we have a problem, no space */
3946 m
->m_pkthdr
.len
+= padval
;
3948 sctp_lowlevel_chunk_output(inp
, NULL
, NULL
, to
, m
, 0, 0, NULL
, 0);
3953 sctp_insert_on_wheel(struct sctp_association
*asoc
,
3954 struct sctp_stream_out
*strq
)
3956 struct sctp_stream_out
*stre
, *strn
;
3957 stre
= TAILQ_FIRST(&asoc
->out_wheel
);
3959 /* only one on wheel */
3960 TAILQ_INSERT_HEAD(&asoc
->out_wheel
, strq
, next_spoke
);
3963 for (; stre
; stre
= strn
) {
3964 strn
= TAILQ_NEXT(stre
, next_spoke
);
3965 if (stre
->stream_no
> strq
->stream_no
) {
3966 TAILQ_INSERT_BEFORE(stre
, strq
, next_spoke
);
3968 } else if (stre
->stream_no
== strq
->stream_no
) {
3969 /* huh, should not happen */
3971 } else if (strn
== NULL
) {
3972 /* next one is null */
3973 TAILQ_INSERT_AFTER(&asoc
->out_wheel
, stre
, strq
,
3980 sctp_remove_from_wheel(struct sctp_association
*asoc
,
3981 struct sctp_stream_out
*strq
)
3983 /* take off and then setup so we know it is not on the wheel */
3984 TAILQ_REMOVE(&asoc
->out_wheel
, strq
, next_spoke
);
3985 strq
->next_spoke
.tqe_next
= NULL
;
3986 strq
->next_spoke
.tqe_prev
= NULL
;
3991 sctp_prune_prsctp(struct sctp_tcb
*stcb
,
3992 struct sctp_association
*asoc
,
3993 struct sctp_sndrcvinfo
*srcv
,
3998 struct sctp_tmit_chunk
*chk
, *nchk
;
3999 if ((asoc
->peer_supports_prsctp
) && (asoc
->sent_queue_cnt_removeable
> 0)) {
4000 TAILQ_FOREACH(chk
, &asoc
->sent_queue
, sctp_next
) {
4002 * Look for chunks marked with the PR_SCTP
4003 * flag AND the buffer space flag. If the one
4004 * being sent is equal or greater priority then
4005 * purge the old one and free some space.
4007 if ((chk
->flags
& (SCTP_PR_SCTP_ENABLED
|
4008 SCTP_PR_SCTP_BUFFER
)) ==
4009 (SCTP_PR_SCTP_ENABLED
|SCTP_PR_SCTP_BUFFER
)) {
4011 * This one is PR-SCTP AND buffer space
4014 if (chk
->rec
.data
.timetodrop
.tv_sec
>= (long)srcv
->sinfo_timetolive
) {
4015 /* Lower numbers equates to
4016 * higher priority so if the
4017 * one we are looking at has a
4018 * larger or equal priority we
4019 * want to drop the data and
4020 * NOT retransmit it.
4029 if (chk
->sent
> SCTP_DATAGRAM_UNSENT
)
4030 cause
= SCTP_RESPONSE_TO_USER_REQ
|SCTP_NOTIFY_DATAGRAM_SENT
;
4032 cause
= SCTP_RESPONSE_TO_USER_REQ
|SCTP_NOTIFY_DATAGRAM_UNSENT
;
4033 ret_spc
= sctp_release_pr_sctp_chunk(stcb
, chk
,
4036 freed_spc
+= ret_spc
;
4037 if (freed_spc
>= dataout
) {
4040 } /* if chunk was present */
4041 } /* if of sufficent priority */
4042 } /* if chunk has enabled */
4043 } /* tailqforeach */
4045 chk
= TAILQ_FIRST(&asoc
->send_queue
);
4047 nchk
= TAILQ_NEXT(chk
, sctp_next
);
4048 /* Here we must move to the sent queue and mark */
4049 if ((chk
->flags
& (SCTP_PR_SCTP_ENABLED
|
4050 SCTP_PR_SCTP_BUFFER
)) ==
4051 (SCTP_PR_SCTP_ENABLED
|SCTP_PR_SCTP_BUFFER
)) {
4052 if (chk
->rec
.data
.timetodrop
.tv_sec
>= (long)srcv
->sinfo_timetolive
) {
4059 ret_spc
= sctp_release_pr_sctp_chunk(stcb
, chk
,
4060 SCTP_RESPONSE_TO_USER_REQ
|SCTP_NOTIFY_DATAGRAM_UNSENT
,
4063 freed_spc
+= ret_spc
;
4064 if (freed_spc
>= dataout
) {
4067 } /* end if chk->data */
4068 } /* end if right class */
4069 } /* end if chk pr-sctp */
4071 } /* end while (chk) */
4072 } /* if enabled in asoc */
4076 sctp_prepare_chunk(struct sctp_tmit_chunk
*template,
4077 struct sctp_tcb
*stcb
,
4078 struct sctp_sndrcvinfo
*srcv
,
4079 struct sctp_stream_out
*strq
,
4080 struct sctp_nets
*net
)
4082 bzero(template, sizeof(struct sctp_tmit_chunk
));
4083 template->sent
= SCTP_DATAGRAM_UNSENT
;
4084 if ((stcb
->asoc
.peer_supports_prsctp
) &&
4085 (srcv
->sinfo_flags
& (MSG_PR_SCTP_TTL
|MSG_PR_SCTP_BUF
)) &&
4086 (srcv
->sinfo_timetolive
> 0)
4089 * Peer supports PR-SCTP
4090 * The flags is set against this send for PR-SCTP
4091 * And timetolive is a postive value, zero is reserved
4092 * to mean a reliable send for both buffer/time
4095 if (srcv
->sinfo_flags
& MSG_PR_SCTP_BUF
) {
4097 * Time to live is a priority stored in tv_sec
4098 * when doing the buffer drop thing.
4100 template->rec
.data
.timetodrop
.tv_sec
= srcv
->sinfo_timetolive
;
4104 SCTP_GETTIME_TIMEVAL(&template->rec
.data
.timetodrop
);
4105 tv
.tv_sec
= srcv
->sinfo_timetolive
/ 1000;
4106 tv
.tv_usec
= (srcv
->sinfo_timetolive
* 1000) % 1000000;
4108 timeradd(&template->rec
.data
.timetodrop
, &tv
,
4109 &template->rec
.data
.timetodrop
);
4111 timevaladd(&template->rec
.data
.timetodrop
, &tv
);
4115 if ((srcv
->sinfo_flags
& MSG_UNORDERED
) == 0) {
4116 template->rec
.data
.stream_seq
= strq
->next_sequence_sent
;
4118 template->rec
.data
.stream_seq
= 0;
4120 template->rec
.data
.TSN_seq
= 0; /* not yet assigned */
4122 template->rec
.data
.stream_number
= srcv
->sinfo_stream
;
4123 template->rec
.data
.payloadtype
= srcv
->sinfo_ppid
;
4124 template->rec
.data
.context
= srcv
->sinfo_context
;
4125 template->rec
.data
.doing_fast_retransmit
= 0;
4126 template->rec
.data
.ect_nonce
= 0; /* ECN Nonce */
4128 if (srcv
->sinfo_flags
& MSG_ADDR_OVER
) {
4129 template->whoTo
= net
;
4131 if (stcb
->asoc
.primary_destination
)
4132 template->whoTo
= stcb
->asoc
.primary_destination
;
4135 template->whoTo
= net
;
4138 /* the actual chunk flags */
4139 if (srcv
->sinfo_flags
& MSG_UNORDERED
) {
4140 template->rec
.data
.rcv_flags
= SCTP_DATA_UNORDERED
;
4142 template->rec
.data
.rcv_flags
= 0;
4144 /* no flags yet, FRAGMENT_OK goes here */
4145 template->flags
= 0;
4147 if (stcb
->asoc
.peer_supports_prsctp
) {
4148 if (srcv
->sinfo_timetolive
> 0) {
4150 * We only set the flag if timetolive (or
4151 * priority) was set to a positive number.
4152 * Zero is reserved specifically to be
4153 * EXCLUDED and sent reliable.
4155 if (srcv
->sinfo_flags
& MSG_PR_SCTP_TTL
) {
4156 template->flags
|= SCTP_PR_SCTP_ENABLED
;
4158 if (srcv
->sinfo_flags
& MSG_PR_SCTP_BUF
) {
4159 template->flags
|= SCTP_PR_SCTP_BUFFER
;
4163 template->asoc
= &stcb
->asoc
;
4168 sctp_get_frag_point(struct sctp_tcb
*stcb
,
4169 struct sctp_association
*asoc
)
4173 /* For endpoints that have both 6 and 4 addresses
4174 * we must reserver room for the 6 ip header, for
4175 * those that are only dealing with V4 we use
4176 * a larger frag point.
4178 if (stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_BOUND_V6
) {
4179 ovh
= SCTP_MED_OVERHEAD
;
4181 ovh
= SCTP_MED_V4_OVERHEAD
;
4184 if (stcb
->sctp_ep
->sctp_frag_point
> asoc
->smallest_mtu
)
4185 siz
= asoc
->smallest_mtu
- ovh
;
4187 siz
= (stcb
->sctp_ep
->sctp_frag_point
- ovh
);
4189 if (siz > (MCLBYTES-sizeof(struct sctp_data_chunk))) { */
4190 /* A data chunk MUST fit in a cluster */
4191 /* siz = (MCLBYTES - sizeof(struct sctp_data_chunk));*/
4195 /* make it an even word boundary please */
4200 extern unsigned int sctp_max_chunks_on_queue
;
4202 #define SBLOCKWAIT(f) (((f)&MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK)
4205 sctp_msg_append(struct sctp_tcb
*stcb
,
4206 struct sctp_nets
*net
,
4208 struct sctp_sndrcvinfo
*srcv
,
4212 struct sctp_association
*asoc
;
4213 struct sctp_stream_out
*strq
;
4214 struct sctp_tmit_chunk
*chk
;
4215 struct sctpchunk_listhead tmp
;
4216 struct sctp_tmit_chunk
template;
4217 struct mbuf
*n
, *mnext
;
4219 unsigned int dataout
, siz
;
4224 if ((stcb
== NULL
) || (net
== NULL
) || (m
== NULL
) || (srcv
== NULL
)) {
4225 /* Software fault, you blew it on the call */
4227 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
4228 kprintf("software error in sctp_msg_append:1\n");
4229 kprintf("stcb:%p net:%p m:%p srcv:%p\n",
4230 stcb
, net
, m
, srcv
);
4237 so
= stcb
->sctp_socket
;
4239 if (srcv
->sinfo_flags
& MSG_ABORT
) {
4240 if ((SCTP_GET_STATE(asoc
) != SCTP_STATE_COOKIE_WAIT
) &&
4241 (SCTP_GET_STATE(asoc
) != SCTP_STATE_COOKIE_ECHOED
)) {
4242 /* It has to be up before we abort */
4243 /* how big is the user initiated abort? */
4244 if ((m
->m_flags
& M_PKTHDR
) && (m
->m_pkthdr
.len
)) {
4245 dataout
= m
->m_pkthdr
.len
;
4249 for (n
= m
; n
; n
= n
->m_next
) {
4250 dataout
+= n
->m_len
;
4253 M_PREPEND(m
, sizeof(struct sctp_paramhdr
), MB_DONTWAIT
);
4255 struct sctp_paramhdr
*ph
;
4256 m
->m_len
= sizeof(struct sctp_paramhdr
) + dataout
;
4257 ph
= mtod(m
, struct sctp_paramhdr
*);
4258 ph
->param_type
= htons(SCTP_CAUSE_USER_INITIATED_ABT
);
4259 ph
->param_length
= htons(m
->m_len
);
4261 sctp_abort_an_association(stcb
->sctp_ep
, stcb
, SCTP_RESPONSE_TO_USER_REQ
, m
);
4264 /* Only free if we don't send an abort */
4269 if ((SCTP_GET_STATE(asoc
) == SCTP_STATE_SHUTDOWN_SENT
) ||
4270 (SCTP_GET_STATE(asoc
) == SCTP_STATE_SHUTDOWN_ACK_SENT
) ||
4271 (SCTP_GET_STATE(asoc
) == SCTP_STATE_SHUTDOWN_RECEIVED
) ||
4272 (asoc
->state
& SCTP_STATE_SHUTDOWN_PENDING
)) {
4273 /* got data while shutting down */
4278 if (srcv
->sinfo_stream
>= asoc
->streamoutcnt
) {
4279 /* Invalid stream number */
4283 if (asoc
->strmout
== NULL
) {
4284 /* huh? software error */
4286 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
4287 kprintf("software error in sctp_msg_append:2\n");
4293 strq
= &asoc
->strmout
[srcv
->sinfo_stream
];
4294 /* how big is it ? */
4295 if ((m
->m_flags
& M_PKTHDR
) && (m
->m_pkthdr
.len
)) {
4296 dataout
= m
->m_pkthdr
.len
;
4300 for (n
= m
; n
; n
= n
->m_next
) {
4301 dataout
+= n
->m_len
;
4305 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
4306 kprintf("Attempt to send out %d bytes\n",
4311 /* lock the socket buf */
4312 SOCKBUF_LOCK(&so
->so_snd
);
4313 error
= ssb_lock(&so
->so_snd
, SBLOCKWAIT(flags
));
4317 if (dataout
> so
->so_snd
.ssb_hiwat
) {
4318 /* It will NEVER fit */
4322 if ((srcv
->sinfo_flags
& MSG_EOF
) &&
4323 (stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_UDPTYPE
) &&
4328 if ((so
->so_snd
.ssb_hiwat
<
4329 (dataout
+ asoc
->total_output_queue_size
)) ||
4330 (asoc
->chunks_on_out_queue
> sctp_max_chunks_on_queue
) ||
4331 (asoc
->total_output_mbuf_queue_size
>
4332 so
->so_snd
.ssb_mbmax
)
4334 /* XXX Buffer space hunt for data to skip */
4335 if (asoc
->peer_supports_prsctp
) {
4336 sctp_prune_prsctp(stcb
, asoc
, srcv
, dataout
);
4338 while ((so
->so_snd
.ssb_hiwat
<
4339 (dataout
+ asoc
->total_output_queue_size
)) ||
4340 (asoc
->chunks_on_out_queue
> sctp_max_chunks_on_queue
) ||
4341 (asoc
->total_output_mbuf_queue_size
>
4342 so
->so_snd
.ssb_mbmax
)) {
4343 struct sctp_inpcb
*inp
;
4344 /* Now did we free up enough room? */
4345 if (flags
& (MSG_FNONBLOCKING
|MSG_DONTWAIT
)) {
4346 /* Non-blocking io in place */
4347 error
= EWOULDBLOCK
;
4351 * We store off a pointer to the endpoint.
4352 * Since on return from this we must check to
4353 * see if an so_error is set. If so we may have
4354 * been reset and our stcb destroyed. Returning
4355 * an error will cause the correct error return
4356 * through and fix this all.
4358 inp
= stcb
->sctp_ep
;
4360 * Not sure how else to do this since
4361 * the level we suspended at is not
4362 * known deep down where we are. I will
4363 * drop to spl0() so that others can
4367 inp
->sctp_tcb_at_block
= (void *)stcb
;
4368 inp
->error_on_block
= 0;
4369 ssb_unlock(&so
->so_snd
);
4370 error
= ssb_wait(&so
->so_snd
);
4372 * XXX: This is ugly but I have
4373 * recreated most of what goes on to
4374 * block in the sb. UGHH
4375 * May want to add the bit about being
4376 * no longer connected.. but this then
4377 * further dooms the UDP model NOT to
4380 inp
->sctp_tcb_at_block
= 0;
4381 if (inp
->error_on_block
)
4382 error
= inp
->error_on_block
;
4384 error
= so
->so_error
;
4388 error
= ssb_lock(&so
->so_snd
, M_WAITOK
);
4391 /* Otherwise we cycle back and recheck
4394 #if defined(__FreeBSD__) && __FreeBSD_version >= 502115
4395 if (so
->so_rcv
.sb_state
& SBS_CANTSENDMORE
) {
4397 if (so
->so_state
& SS_CANTSENDMORE
) {
4403 error
= so
->so_error
;
4408 /* If we have a packet header fix it if it was broke */
4409 if (m
->m_flags
& M_PKTHDR
) {
4410 m
->m_pkthdr
.len
= dataout
;
4412 /* use the smallest one, user set value or
4413 * smallest mtu of the asoc
4415 siz
= sctp_get_frag_point(stcb
, asoc
);
4416 SOCKBUF_UNLOCK(&so
->so_snd
);
4417 if ((dataout
) && (dataout
<= siz
)) {
4419 chk
= (struct sctp_tmit_chunk
*)SCTP_ZONE_GET(sctppcbinfo
.ipi_zone_chunk
);
4422 SOCKBUF_LOCK(&so
->so_snd
);
4425 sctp_prepare_chunk(chk
, stcb
, srcv
, strq
, net
);
4426 chk
->whoTo
->ref_count
++;
4427 chk
->rec
.data
.rcv_flags
|= SCTP_DATA_NOT_FRAG
;
4429 /* no flags yet, FRAGMENT_OK goes here */
4430 sctppcbinfo
.ipi_count_chunk
++;
4431 sctppcbinfo
.ipi_gencnt_chunk
++;
4432 asoc
->chunks_on_out_queue
++;
4435 /* Total in the MSIZE */
4436 for (mm
= chk
->data
; mm
; mm
= mm
->m_next
) {
4438 if (mm
->m_flags
& M_EXT
) {
4439 mbcnt
+= chk
->data
->m_ext
.ext_size
;
4442 /* fix up the send_size if it is not present */
4443 chk
->send_size
= dataout
;
4444 chk
->book_size
= chk
->send_size
;
4446 /* ok, we are commited */
4447 if ((srcv
->sinfo_flags
& MSG_UNORDERED
) == 0) {
4448 /* bump the ssn if we are unordered. */
4449 strq
->next_sequence_sent
++;
4451 chk
->data
->m_nextpkt
= 0;
4452 asoc
->stream_queue_cnt
++;
4453 TAILQ_INSERT_TAIL(&strq
->outqueue
, chk
, sctp_next
);
4454 /* now check if this stream is on the wheel */
4455 if ((strq
->next_spoke
.tqe_next
== NULL
) &&
4456 (strq
->next_spoke
.tqe_prev
== NULL
)) {
4457 /* Insert it on the wheel since it is not
4460 sctp_insert_on_wheel(asoc
, strq
);
4462 } else if ((dataout
) && (dataout
> siz
)) {
4464 if ((stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_NO_FRAGMENT
) &&
4467 SOCKBUF_LOCK(&so
->so_snd
);
4470 /* setup the template */
4471 sctp_prepare_chunk(&template, stcb
, srcv
, strq
, net
);
4474 while (dataout
> siz
) {
4476 * We can wait since this is called from the user
4479 n
->m_nextpkt
= m_split(n
, siz
, MB_WAIT
);
4480 if (n
->m_nextpkt
== NULL
) {
4482 SOCKBUF_LOCK(&so
->so_snd
);
4489 * ok, now we have a chain on m where m->m_nextpkt points to
4490 * the next chunk and m/m->m_next chain is the piece to send.
4491 * We must go through the chains and thread them on to
4492 * sctp_tmit_chunk chains and place them all on the stream
4493 * queue, breaking the m->m_nextpkt pointers as we go.
4499 * first go through and allocate a sctp_tmit chunk
4500 * for each chunk piece
4502 chk
= (struct sctp_tmit_chunk
*)SCTP_ZONE_GET(sctppcbinfo
.ipi_zone_chunk
);
4505 * ok we must spin through and dump anything
4506 * we have allocated and then jump to the
4509 chk
= TAILQ_FIRST(&tmp
);
4511 TAILQ_REMOVE(&tmp
, chk
, sctp_next
);
4512 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, chk
);
4513 sctppcbinfo
.ipi_count_chunk
--;
4514 asoc
->chunks_on_out_queue
--;
4515 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
4516 panic("Chunk count is negative");
4518 sctppcbinfo
.ipi_gencnt_chunk
++;
4519 chk
= TAILQ_FIRST(&tmp
);
4522 SOCKBUF_LOCK(&so
->so_snd
);
4525 sctppcbinfo
.ipi_count_chunk
++;
4526 asoc
->chunks_on_out_queue
++;
4528 sctppcbinfo
.ipi_gencnt_chunk
++;
4530 chk
->whoTo
->ref_count
++;
4532 /* Total in the MSIZE */
4534 for (mm
= chk
->data
; mm
; mm
= mm
->m_next
) {
4536 if (mm
->m_flags
& M_EXT
) {
4537 mbcnt_e
+= chk
->data
->m_ext
.ext_size
;
4540 /* now fix the chk->send_size */
4541 if (chk
->data
->m_flags
& M_PKTHDR
) {
4542 chk
->send_size
= chk
->data
->m_pkthdr
.len
;
4546 for (nn
= chk
->data
; nn
; nn
= nn
->m_next
) {
4547 chk
->send_size
+= nn
->m_len
;
4550 chk
->book_size
= chk
->send_size
;
4551 chk
->mbcnt
= mbcnt_e
;
4553 if (chk
->flags
& SCTP_PR_SCTP_BUFFER
) {
4554 asoc
->sent_queue_cnt_removeable
++;
4557 TAILQ_INSERT_TAIL(&tmp
, chk
, sctp_next
);
4560 /* now that we have enough space for all de-couple the
4561 * chain of mbufs by going through our temp array
4562 * and breaking the pointers.
4564 /* ok, we are commited */
4565 if ((srcv
->sinfo_flags
& MSG_UNORDERED
) == 0) {
4566 /* bump the ssn if we are unordered. */
4567 strq
->next_sequence_sent
++;
4569 /* Mark the first/last flags. This will
4570 * result int a 3 for a single item on the list
4572 chk
= TAILQ_FIRST(&tmp
);
4573 chk
->rec
.data
.rcv_flags
|= SCTP_DATA_FIRST_FRAG
;
4574 chk
= TAILQ_LAST(&tmp
, sctpchunk_listhead
);
4575 chk
->rec
.data
.rcv_flags
|= SCTP_DATA_LAST_FRAG
;
4576 /* now break any chains on the queue and
4577 * move it to the streams actual queue.
4579 chk
= TAILQ_FIRST(&tmp
);
4581 chk
->data
->m_nextpkt
= 0;
4582 TAILQ_REMOVE(&tmp
, chk
, sctp_next
);
4583 asoc
->stream_queue_cnt
++;
4584 TAILQ_INSERT_TAIL(&strq
->outqueue
, chk
, sctp_next
);
4585 chk
= TAILQ_FIRST(&tmp
);
4587 /* now check if this stream is on the wheel */
4588 if ((strq
->next_spoke
.tqe_next
== NULL
) &&
4589 (strq
->next_spoke
.tqe_prev
== NULL
)) {
4590 /* Insert it on the wheel since it is not
4593 sctp_insert_on_wheel(asoc
, strq
);
4596 SOCKBUF_LOCK(&so
->so_snd
);
4597 /* has a SHUTDOWN been (also) requested by the user on this asoc? */
4600 if ((srcv
->sinfo_flags
& MSG_EOF
) &&
4601 (stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_UDPTYPE
)) {
4603 int some_on_streamwheel
= 0;
4605 if (!TAILQ_EMPTY(&asoc
->out_wheel
)) {
4606 /* Check to see if some data queued */
4607 struct sctp_stream_out
*outs
;
4608 TAILQ_FOREACH(outs
, &asoc
->out_wheel
, next_spoke
) {
4609 if (!TAILQ_EMPTY(&outs
->outqueue
)) {
4610 some_on_streamwheel
= 1;
4616 if (TAILQ_EMPTY(&asoc
->send_queue
) &&
4617 TAILQ_EMPTY(&asoc
->sent_queue
) &&
4618 (some_on_streamwheel
== 0)) {
4619 /* there is nothing queued to send, so I'm done... */
4620 if ((SCTP_GET_STATE(asoc
) != SCTP_STATE_SHUTDOWN_SENT
) &&
4621 (SCTP_GET_STATE(asoc
) != SCTP_STATE_SHUTDOWN_ACK_SENT
)) {
4622 /* only send SHUTDOWN the first time through */
4624 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT4
) {
4625 kprintf("%s:%d sends a shutdown\n",
4631 sctp_send_shutdown(stcb
, stcb
->asoc
.primary_destination
);
4632 asoc
->state
= SCTP_STATE_SHUTDOWN_SENT
;
4633 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN
, stcb
->sctp_ep
, stcb
,
4634 asoc
->primary_destination
);
4635 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD
, stcb
->sctp_ep
, stcb
,
4636 asoc
->primary_destination
);
4640 * we still got (or just got) data to send, so set
4644 * XXX sockets draft says that MSG_EOF should be sent
4645 * with no data. currently, we will allow user data
4646 * to be sent first and move to SHUTDOWN-PENDING
4648 asoc
->state
|= SCTP_STATE_SHUTDOWN_PENDING
;
4651 #ifdef SCTP_MBCNT_LOGGING
4652 sctp_log_mbcnt(SCTP_LOG_MBCNT_INCREASE
,
4653 asoc
->total_output_queue_size
,
4655 asoc
->total_output_mbuf_queue_size
,
4658 asoc
->total_output_queue_size
+= dataout
;
4659 asoc
->total_output_mbuf_queue_size
+= mbcnt
;
4660 if ((stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_TCPTYPE
) ||
4661 (stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_IN_TCPPOOL
)) {
4662 so
->so_snd
.ssb_cc
+= dataout
;
4663 so
->so_snd
.ssb_mbcnt
+= mbcnt
;
4667 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT2
) {
4668 kprintf("++total out:%d total_mbuf_out:%d\n",
4669 (int)asoc
->total_output_queue_size
,
4670 (int)asoc
->total_output_mbuf_queue_size
);
4675 ssb_unlock(&so
->so_snd
);
4677 SOCKBUF_UNLOCK(&so
->so_snd
);
4679 if (m
&& m
->m_nextpkt
) {
4682 mnext
= n
->m_nextpkt
;
4683 n
->m_nextpkt
= NULL
;
4693 static struct mbuf
*
4694 sctp_copy_mbufchain(struct mbuf
*clonechain
,
4695 struct mbuf
*outchain
)
4697 struct mbuf
*appendchain
;
4698 #if defined(__FreeBSD__) || defined(__NetBSD__)
4699 /* Supposedly m_copypacket is an optimization, use it if we can */
4700 if (clonechain
->m_flags
& M_PKTHDR
) {
4701 appendchain
= m_copypacket(clonechain
, MB_DONTWAIT
);
4702 sctp_pegs
[SCTP_CACHED_SRC
]++;
4704 appendchain
= m_copy(clonechain
, 0, M_COPYALL
);
4705 #elif defined(__APPLE__)
4706 appendchain
= sctp_m_copym(clonechain
, 0, M_COPYALL
, MB_DONTWAIT
);
4708 appendchain
= m_copy(clonechain
, 0, M_COPYALL
);
4711 if (appendchain
== NULL
) {
4714 sctp_m_freem(outchain
);
4718 /* tack on to the end */
4722 if (m
->m_next
== NULL
) {
4723 m
->m_next
= appendchain
;
4728 if (outchain
->m_flags
& M_PKTHDR
) {
4734 append_tot
+= t
->m_len
;
4737 outchain
->m_pkthdr
.len
+= append_tot
;
4741 return (appendchain
);
4746 sctp_sendall_iterator(struct sctp_inpcb
*inp
, struct sctp_tcb
*stcb
, void *ptr
, u_int32_t val
)
4748 struct sctp_copy_all
*ca
;
4752 ca
= (struct sctp_copy_all
*)ptr
;
4753 if (ca
->m
== NULL
) {
4756 if (ca
->inp
!= inp
) {
4760 m
= sctp_copy_mbufchain(ca
->m
, NULL
);
4762 /* can't copy so we are done */
4766 ret
= sctp_msg_append(stcb
, stcb
->asoc
.primary_destination
, m
,
4767 &ca
->sndrcv
, MSG_FNONBLOCKING
);
4776 sctp_sendall_completes(void *ptr
, u_int32_t val
)
4778 struct sctp_copy_all
*ca
;
4779 ca
= (struct sctp_copy_all
*)ptr
;
4780 /* Do a notify here?
4781 * Kacheong suggests that the notify
4782 * be done at the send time.. so you would
4783 * push up a notification if any send failed.
4784 * Don't know if this is feasable since the
4785 * only failures we have is "memory" related and
4786 * if you cannot get an mbuf to send the data
4787 * you surely can't get an mbuf to send up
4788 * to notify the user you can't send the data :->
4791 /* now free everything */
4797 #define MC_ALIGN(m, len) do { \
4798 (m)->m_data += (MCLBYTES - (len)) & ~(sizeof(long) - 1); \
4803 static struct mbuf
*
4804 sctp_copy_out_all(struct uio
*uio
, int len
)
4806 struct mbuf
*ret
, *at
;
4807 int left
, willcpy
, cancpy
, error
;
4809 MGETHDR(ret
, MB_WAIT
, MT_HEADER
);
4816 ret
->m_pkthdr
.len
= len
;
4817 MCLGET(ret
, MB_WAIT
);
4821 if ((ret
->m_flags
& M_EXT
) == 0) {
4825 cancpy
= M_TRAILINGSPACE(ret
);
4826 willcpy
= min(cancpy
, left
);
4829 /* Align data to the end */
4830 MC_ALIGN(at
, willcpy
);
4831 error
= uiomove(mtod(at
, caddr_t
), willcpy
, uio
);
4837 at
->m_len
= willcpy
;
4838 at
->m_nextpkt
= at
->m_next
= 0;
4841 MGET(at
->m_next
, MB_WAIT
, MT_DATA
);
4842 if (at
->m_next
== NULL
) {
4847 MCLGET(at
, MB_WAIT
);
4851 if ((at
->m_flags
& M_EXT
) == 0) {
4854 cancpy
= M_TRAILINGSPACE(at
);
4855 willcpy
= min(cancpy
, left
);
4862 sctp_sendall (struct sctp_inpcb
*inp
, struct uio
*uio
, struct mbuf
*m
, struct sctp_sndrcvinfo
*srcv
)
4865 struct sctp_copy_all
*ca
;
4866 MALLOC(ca
, struct sctp_copy_all
*,
4867 sizeof(struct sctp_copy_all
), M_PCB
, MB_WAIT
);
4872 memset (ca
, 0, sizeof(struct sctp_copy_all
));
4876 /* take off the sendall flag, it would
4877 * be bad if we failed to do this :-0
4879 ca
->sndrcv
.sinfo_flags
&= ~MSG_SENDALL
;
4881 /* get length and mbuf chain */
4883 ca
->sndlen
= uio
->uio_resid
;
4884 ca
->m
= sctp_copy_out_all(uio
, ca
->sndlen
);
4885 if (ca
->m
== NULL
) {
4890 if ((m
->m_flags
& M_PKTHDR
) == 0) {
4895 ca
->sndlen
+= m
->m_len
;
4899 ca
->sndlen
= m
->m_pkthdr
.len
;
4904 ret
= sctp_initiate_iterator(sctp_sendall_iterator
, SCTP_PCB_ANY_FLAGS
, SCTP_ASOC_ANY_STATE
,
4905 (void *)ca
, 0, sctp_sendall_completes
, inp
);
4908 kprintf("Failed to initate iterator to takeover associations\n");
4919 sctp_toss_old_cookies(struct sctp_association
*asoc
)
4921 struct sctp_tmit_chunk
*chk
, *nchk
;
4922 chk
= TAILQ_FIRST(&asoc
->control_send_queue
);
4924 nchk
= TAILQ_NEXT(chk
, sctp_next
);
4925 if (chk
->rec
.chunk_id
== SCTP_COOKIE_ECHO
) {
4926 TAILQ_REMOVE(&asoc
->control_send_queue
, chk
, sctp_next
);
4928 sctp_m_freem(chk
->data
);
4931 asoc
->ctrl_queue_cnt
--;
4933 sctp_free_remote_addr(chk
->whoTo
);
4934 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, chk
);
4935 sctppcbinfo
.ipi_count_chunk
--;
4936 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
4937 panic("Chunk count is negative");
4939 sctppcbinfo
.ipi_gencnt_chunk
++;
4946 sctp_toss_old_asconf(struct sctp_tcb
*stcb
)
4948 struct sctp_association
*asoc
;
4949 struct sctp_tmit_chunk
*chk
, *chk_tmp
;
4952 for (chk
= TAILQ_FIRST(&asoc
->control_send_queue
); chk
!= NULL
;
4955 chk_tmp
= TAILQ_NEXT(chk
, sctp_next
);
4956 /* find SCTP_ASCONF chunk in queue (only one ever in queue) */
4957 if (chk
->rec
.chunk_id
== SCTP_ASCONF
) {
4958 TAILQ_REMOVE(&asoc
->control_send_queue
, chk
, sctp_next
);
4960 sctp_m_freem(chk
->data
);
4963 asoc
->ctrl_queue_cnt
--;
4965 sctp_free_remote_addr(chk
->whoTo
);
4966 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, chk
);
4967 sctppcbinfo
.ipi_count_chunk
--;
4968 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
4969 panic("Chunk count is negative");
4971 sctppcbinfo
.ipi_gencnt_chunk
++;
4978 sctp_clean_up_datalist(struct sctp_tcb
*stcb
,
4979 struct sctp_association
*asoc
,
4980 struct sctp_tmit_chunk
**data_list
,
4982 struct sctp_nets
*net
)
4985 for (i
= 0; i
< bundle_at
; i
++) {
4986 /* off of the send queue */
4988 /* Any chunk NOT 0 you zap the time
4989 * chunk 0 gets zapped or set based on
4990 * if a RTO measurment is needed.
4992 data_list
[i
]->do_rtt
= 0;
4995 data_list
[i
]->sent_rcv_time
= net
->last_sent_time
;
4996 TAILQ_REMOVE(&asoc
->send_queue
,
4999 /* on to the sent queue */
5000 TAILQ_INSERT_TAIL(&asoc
->sent_queue
,
5003 /* This does not lower until the cum-ack passes it */
5004 asoc
->sent_queue_cnt
++;
5005 asoc
->send_queue_cnt
--;
5006 if ((asoc
->peers_rwnd
<= 0) &&
5007 (asoc
->total_flight
== 0) &&
5009 /* Mark the chunk as being a window probe */
5011 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
5012 kprintf("WINDOW PROBE SET\n");
5015 sctp_pegs
[SCTP_WINDOW_PROBES
]++;
5016 data_list
[i
]->rec
.data
.state_flags
|= SCTP_WINDOW_PROBE
;
5018 data_list
[i
]->rec
.data
.state_flags
&= ~SCTP_WINDOW_PROBE
;
5020 #ifdef SCTP_AUDITING_ENABLED
5021 sctp_audit_log(0xC2, 3);
5023 data_list
[i
]->sent
= SCTP_DATAGRAM_SENT
;
5024 data_list
[i
]->snd_count
= 1;
5025 net
->flight_size
+= data_list
[i
]->book_size
;
5026 asoc
->total_flight
+= data_list
[i
]->book_size
;
5027 asoc
->total_flight_count
++;
5028 #ifdef SCTP_LOG_RWND
5029 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND
,
5030 asoc
->peers_rwnd
, data_list
[i
]->send_size
, sctp_peer_chunk_oh
);
5032 asoc
->peers_rwnd
= sctp_sbspace_sub(asoc
->peers_rwnd
,
5033 (u_int32_t
)(data_list
[i
]->send_size
+ sctp_peer_chunk_oh
));
5034 if (asoc
->peers_rwnd
< stcb
->sctp_ep
->sctp_ep
.sctp_sws_sender
) {
5035 /* SWS sender side engages */
5036 asoc
->peers_rwnd
= 0;
5042 sctp_clean_up_ctl(struct sctp_association
*asoc
)
5044 struct sctp_tmit_chunk
*chk
, *nchk
;
5045 for (chk
= TAILQ_FIRST(&asoc
->control_send_queue
);
5047 nchk
= TAILQ_NEXT(chk
, sctp_next
);
5048 if ((chk
->rec
.chunk_id
== SCTP_SELECTIVE_ACK
) ||
5049 (chk
->rec
.chunk_id
== SCTP_HEARTBEAT_REQUEST
) ||
5050 (chk
->rec
.chunk_id
== SCTP_HEARTBEAT_ACK
) ||
5051 (chk
->rec
.chunk_id
== SCTP_SHUTDOWN
) ||
5052 (chk
->rec
.chunk_id
== SCTP_SHUTDOWN_ACK
) ||
5053 (chk
->rec
.chunk_id
== SCTP_OPERATION_ERROR
) ||
5054 (chk
->rec
.chunk_id
== SCTP_PACKET_DROPPED
) ||
5055 (chk
->rec
.chunk_id
== SCTP_COOKIE_ACK
) ||
5056 (chk
->rec
.chunk_id
== SCTP_ECN_CWR
) ||
5057 (chk
->rec
.chunk_id
== SCTP_ASCONF_ACK
)) {
5058 /* Stray chunks must be cleaned up */
5060 TAILQ_REMOVE(&asoc
->control_send_queue
, chk
, sctp_next
);
5062 sctp_m_freem(chk
->data
);
5065 asoc
->ctrl_queue_cnt
--;
5066 sctp_free_remote_addr(chk
->whoTo
);
5067 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, chk
);
5068 sctppcbinfo
.ipi_count_chunk
--;
5069 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
5070 panic("Chunk count is negative");
5072 sctppcbinfo
.ipi_gencnt_chunk
++;
5073 } else if (chk
->rec
.chunk_id
== SCTP_STREAM_RESET
) {
5074 struct sctp_stream_reset_req
*strreq
;
5075 /* special handling, we must look into the param */
5076 strreq
= mtod(chk
->data
, struct sctp_stream_reset_req
*);
5077 if (strreq
->sr_req
.ph
.param_type
== ntohs(SCTP_STR_RESET_RESPONSE
)) {
5078 goto clean_up_anyway
;
5085 sctp_move_to_outqueue(struct sctp_tcb
*stcb
,
5086 struct sctp_stream_out
*strq
)
5088 /* Move from the stream to the send_queue keeping track of the total */
5089 struct sctp_association
*asoc
;
5093 struct sctp_tmit_chunk
*chk
, *nchk
;
5094 struct sctp_data_chunk
*dchkh
;
5095 struct sctpchunk_listhead tmp
;
5100 chk
= TAILQ_FIRST(&strq
->outqueue
);
5102 nchk
= TAILQ_NEXT(chk
, sctp_next
);
5103 /* now put in the chunk header */
5105 M_PREPEND(chk
->data
, sizeof(struct sctp_data_chunk
), MB_DONTWAIT
);
5106 if (chk
->data
== NULL
) {
5111 if (orig
!= chk
->data
) {
5112 /* A new mbuf was added, account for it */
5113 if ((stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_TCPTYPE
) ||
5114 (stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_IN_TCPPOOL
)) {
5115 stcb
->sctp_socket
->so_snd
.ssb_mbcnt
+= MSIZE
;
5117 #ifdef SCTP_MBCNT_LOGGING
5118 sctp_log_mbcnt(SCTP_LOG_MBCNT_INCREASE
,
5119 asoc
->total_output_queue_size
,
5121 asoc
->total_output_mbuf_queue_size
,
5124 stcb
->asoc
.total_output_mbuf_queue_size
+= MSIZE
;
5125 chk
->mbcnt
+= MSIZE
;
5127 chk
->send_size
+= sizeof(struct sctp_data_chunk
);
5128 /* This should NOT have to do anything, but
5129 * I would rather be cautious
5131 if (!failed
&& ((size_t)chk
->data
->m_len
< sizeof(struct sctp_data_chunk
))) {
5132 m_pullup(chk
->data
, sizeof(struct sctp_data_chunk
));
5133 if (chk
->data
== NULL
) {
5138 dchkh
= mtod(chk
->data
, struct sctp_data_chunk
*);
5139 dchkh
->ch
.chunk_length
= htons(chk
->send_size
);
5140 /* Chunks must be padded to even word boundary */
5141 padval
= chk
->send_size
% 4;
5143 /* For fragmented messages this should not
5144 * run except possibly on the last chunk
5146 if (sctp_pad_lastmbuf(chk
->data
, (4 - padval
))) {
5147 /* we are in big big trouble no mbufs :< */
5151 chk
->send_size
+= (4 - padval
);
5153 /* pull from stream queue */
5154 TAILQ_REMOVE(&strq
->outqueue
, chk
, sctp_next
);
5155 asoc
->stream_queue_cnt
--;
5156 TAILQ_INSERT_TAIL(&tmp
, chk
, sctp_next
);
5157 /* add it in to the size of moved chunks */
5158 if (chk
->rec
.data
.rcv_flags
& SCTP_DATA_LAST_FRAG
) {
5159 /* we pull only one message */
5165 /* Gak, we just lost the user message */
5166 chk
= TAILQ_FIRST(&tmp
);
5168 nchk
= TAILQ_NEXT(chk
, sctp_next
);
5169 TAILQ_REMOVE(&tmp
, chk
, sctp_next
);
5171 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL
, stcb
,
5172 (SCTP_NOTIFY_DATAGRAM_UNSENT
|SCTP_INTERNAL_ERROR
),
5176 sctp_m_freem(chk
->data
);
5180 sctp_free_remote_addr(chk
->whoTo
);
5183 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, chk
);
5184 sctppcbinfo
.ipi_count_chunk
--;
5185 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
5186 panic("Chunk count is negative");
5188 sctppcbinfo
.ipi_gencnt_chunk
++;
5193 /* now pull them off of temp wheel */
5194 chk
= TAILQ_FIRST(&tmp
);
5196 nchk
= TAILQ_NEXT(chk
, sctp_next
);
5197 /* insert on send_queue */
5198 TAILQ_REMOVE(&tmp
, chk
, sctp_next
);
5199 TAILQ_INSERT_TAIL(&asoc
->send_queue
, chk
, sctp_next
);
5200 asoc
->send_queue_cnt
++;
5202 chk
->rec
.data
.TSN_seq
= asoc
->sending_seq
++;
5204 dchkh
= mtod(chk
->data
, struct sctp_data_chunk
*);
5205 /* Put the rest of the things in place now. Size
5206 * was done earlier in previous loop prior to
5209 dchkh
->ch
.chunk_type
= SCTP_DATA
;
5210 dchkh
->ch
.chunk_flags
= chk
->rec
.data
.rcv_flags
;
5211 dchkh
->dp
.tsn
= htonl(chk
->rec
.data
.TSN_seq
);
5212 dchkh
->dp
.stream_id
= htons(strq
->stream_no
);
5213 dchkh
->dp
.stream_sequence
= htons(chk
->rec
.data
.stream_seq
);
5214 dchkh
->dp
.protocol_id
= chk
->rec
.data
.payloadtype
;
5215 /* total count moved */
5216 tot_moved
+= chk
->send_size
;
5223 sctp_fill_outqueue(struct sctp_tcb
*stcb
,
5224 struct sctp_nets
*net
)
5226 struct sctp_association
*asoc
;
5227 struct sctp_tmit_chunk
*chk
;
5228 struct sctp_stream_out
*strq
, *strqn
;
5229 int mtu_fromwheel
, goal_mtu
;
5230 unsigned int moved
, seenend
, cnt_mvd
=0;
5233 /* Attempt to move at least 1 MTU's worth
5234 * onto the wheel for each destination address
5236 goal_mtu
= net
->cwnd
- net
->flight_size
;
5237 if ((unsigned int)goal_mtu
< net
->mtu
) {
5238 goal_mtu
= net
->mtu
;
5240 if (sctp_pegs
[SCTP_MOVED_MTU
] < (unsigned int)goal_mtu
) {
5241 sctp_pegs
[SCTP_MOVED_MTU
] = goal_mtu
;
5243 seenend
= moved
= mtu_fromwheel
= 0;
5244 if (asoc
->last_out_stream
== NULL
) {
5245 strq
= asoc
->last_out_stream
= TAILQ_FIRST(&asoc
->out_wheel
);
5246 if (asoc
->last_out_stream
== NULL
) {
5247 /* huh nothing on the wheel, TSNH */
5252 strq
= TAILQ_NEXT(asoc
->last_out_stream
, next_spoke
);
5255 asoc
->last_out_stream
= TAILQ_FIRST(&asoc
->out_wheel
);
5257 while (mtu_fromwheel
< goal_mtu
) {
5261 strq
= TAILQ_FIRST(&asoc
->out_wheel
);
5262 } else if ((moved
== 0) && (seenend
)) {
5263 /* none left on the wheel */
5264 sctp_pegs
[SCTP_MOVED_NLEF
]++;
5268 * clear the flags and rotate back through
5273 strq
= TAILQ_FIRST(&asoc
->out_wheel
);
5279 strqn
= TAILQ_NEXT(strq
, next_spoke
);
5280 if ((chk
= TAILQ_FIRST(&strq
->outqueue
)) == NULL
) {
5281 /* none left on this queue, prune a spoke? */
5282 sctp_remove_from_wheel(asoc
, strq
);
5283 if (strq
== asoc
->last_out_stream
) {
5284 /* the last one we used went off the wheel */
5285 asoc
->last_out_stream
= NULL
;
5290 if (chk
->whoTo
!= net
) {
5291 /* Skip this stream, first one on stream
5292 * does not head to our current destination.
5297 mtu_fromwheel
+= sctp_move_to_outqueue(stcb
, strq
);
5300 asoc
->last_out_stream
= strq
;
5303 sctp_pegs
[SCTP_MOVED_MAX
]++;
5305 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
5306 kprintf("Ok we moved %d chunks to send queue\n",
5310 if (sctp_pegs
[SCTP_MOVED_QMAX
] < cnt_mvd
) {
5311 sctp_pegs
[SCTP_MOVED_QMAX
] = cnt_mvd
;
5316 sctp_fix_ecn_echo(struct sctp_association
*asoc
)
5318 struct sctp_tmit_chunk
*chk
;
5319 TAILQ_FOREACH(chk
, &asoc
->control_send_queue
, sctp_next
) {
5320 if (chk
->rec
.chunk_id
== SCTP_ECN_ECHO
) {
5321 chk
->sent
= SCTP_DATAGRAM_UNSENT
;
5327 sctp_move_to_an_alt(struct sctp_tcb
*stcb
,
5328 struct sctp_association
*asoc
,
5329 struct sctp_nets
*net
)
5331 struct sctp_tmit_chunk
*chk
;
5332 struct sctp_nets
*a_net
;
5333 a_net
= sctp_find_alternate_net(stcb
, net
);
5334 if ((a_net
!= net
) &&
5335 ((a_net
->dest_state
& SCTP_ADDR_REACHABLE
) == SCTP_ADDR_REACHABLE
)) {
5337 * We only proceed if a valid alternate is found that is
5338 * not this one and is reachable. Here we must move all
5339 * chunks queued in the send queue off of the destination
5340 * address to our alternate.
5342 TAILQ_FOREACH(chk
, &asoc
->send_queue
, sctp_next
) {
5343 if (chk
->whoTo
== net
) {
5344 /* Move the chunk to our alternate */
5345 sctp_free_remote_addr(chk
->whoTo
);
5353 static int sctp_from_user_send
=0;
5356 sctp_med_chunk_output(struct sctp_inpcb
*inp
,
5357 struct sctp_tcb
*stcb
,
5358 struct sctp_association
*asoc
,
5361 int control_only
, int *cwnd_full
, int from_where
,
5362 struct timeval
*now
, int *now_filled
)
5365 * Ok this is the generic chunk service queue.
5366 * we must do the following:
5367 * - Service the stream queue that is next, moving any message
5368 * (note I must get a complete message i.e. FIRST/MIDDLE and
5369 * LAST to the out queue in one pass) and assigning TSN's
5370 * - Check to see if the cwnd/rwnd allows any output, if so we
5371 * go ahead and fomulate and send the low level chunks. Making
5372 * sure to combine any control in the control chunk queue also.
5374 struct sctp_nets
*net
;
5375 struct mbuf
*outchain
;
5376 struct sctp_tmit_chunk
*chk
, *nchk
;
5377 struct sctphdr
*shdr
;
5378 /* temp arrays for unlinking */
5379 struct sctp_tmit_chunk
*data_list
[SCTP_MAX_DATA_BUNDLING
];
5380 int no_fragmentflg
, error
;
5381 int one_chunk
, hbflag
;
5382 int asconf
, cookie
, no_out_cnt
;
5383 int bundle_at
, ctl_cnt
, no_data_chunks
, cwnd_full_ind
;
5384 unsigned int mtu
, r_mtu
, omtu
;
5387 ctl_cnt
= no_out_cnt
= asconf
= cookie
= 0;
5389 * First lets prime the pump. For each destination, if there
5390 * is room in the flight size, attempt to pull an MTU's worth
5391 * out of the stream queues into the general send_queue
5393 #ifdef SCTP_AUDITING_ENABLED
5394 sctp_audit_log(0xC2, 2);
5397 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
5398 kprintf("***********************\n");
5407 /* Nothing to possible to send? */
5408 if (TAILQ_EMPTY(&asoc
->control_send_queue
) &&
5409 TAILQ_EMPTY(&asoc
->send_queue
) &&
5410 TAILQ_EMPTY(&asoc
->out_wheel
)) {
5412 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
5413 kprintf("All wheels empty\n");
5418 if (asoc
->peers_rwnd
<= 0) {
5419 /* No room in peers rwnd */
5422 if (asoc
->total_flight
> 0) {
5423 /* we are allowed one chunk in flight */
5425 sctp_pegs
[SCTP_RWND_BLOCKED
]++;
5429 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
5430 kprintf("Ok we have done the fillup no_data_chunk=%d tf=%d prw:%d\n",
5431 (int)no_data_chunks
,
5432 (int)asoc
->total_flight
, (int)asoc
->peers_rwnd
);
5435 TAILQ_FOREACH(net
, &asoc
->nets
, sctp_next
) {
5437 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
5438 kprintf("net:%p fs:%d cwnd:%d\n",
5439 net
, net
->flight_size
, net
->cwnd
);
5442 if (net
->flight_size
>= net
->cwnd
) {
5443 /* skip this network, no room */
5446 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
5447 kprintf("Ok skip fillup->fs:%d > cwnd:%d\n",
5452 sctp_pegs
[SCTP_CWND_NOFILL
]++;
5456 * spin through the stream queues moving one message and
5457 * assign TSN's as appropriate.
5459 sctp_fill_outqueue(stcb
, net
);
5461 *cwnd_full
= cwnd_full_ind
;
5462 /* now service each destination and send out what we can for it */
5464 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
5466 TAILQ_FOREACH(chk
, &asoc
->send_queue
, sctp_next
) {
5469 kprintf("We have %d chunks on the send_queue\n", chk_cnt
);
5471 TAILQ_FOREACH(chk
, &asoc
->sent_queue
, sctp_next
) {
5474 kprintf("We have %d chunks on the sent_queue\n", chk_cnt
);
5475 TAILQ_FOREACH(chk
, &asoc
->control_send_queue
, sctp_next
) {
5478 kprintf("We have %d chunks on the control_queue\n", chk_cnt
);
5481 /* If we have data to send, and DSACK is running, stop it
5482 * and build a SACK to dump on to bundle with output. This
5483 * actually MAY make it so the bundling does not occur if
5484 * the SACK is big but I think this is ok because basic SACK
5485 * space is pre-reserved in our fragmentation size choice.
5487 if ((TAILQ_FIRST(&asoc
->send_queue
) != NULL
) &&
5488 (no_data_chunks
== 0)) {
5489 /* We will be sending something */
5490 if (callout_pending(&stcb
->asoc
.dack_timer
.timer
)) {
5491 /* Yep a callout is pending */
5492 sctp_timer_stop(SCTP_TIMER_TYPE_RECV
,
5495 sctp_send_sack(stcb
);
5498 /* Nothing to send? */
5499 if ((TAILQ_FIRST(&asoc
->control_send_queue
) == NULL
) &&
5500 (TAILQ_FIRST(&asoc
->send_queue
) == NULL
)) {
5503 TAILQ_FOREACH(net
, &asoc
->nets
, sctp_next
) {
5504 /* how much can we send? */
5505 if (net
->ref_count
< 2) {
5506 /* Ref-count of 1 so we cannot have data or control
5507 * queued to this address. Skip it.
5511 ctl_cnt
= bundle_at
= 0;
5516 if ((net
->ro
.ro_rt
) && (net
->ro
.ro_rt
->rt_ifp
)) {
5517 /* if we have a route and an ifp
5518 * check to see if we have room to
5522 ifp
= net
->ro
.ro_rt
->rt_ifp
;
5523 if ((ifp
->if_snd
.ifq_len
+ 2) >= ifp
->if_snd
.ifq_maxlen
) {
5524 sctp_pegs
[SCTP_IFP_QUEUE_FULL
]++;
5525 #ifdef SCTP_LOG_MAXBURST
5526 sctp_log_maxburst(net
, ifp
->if_snd
.ifq_len
, ifp
->if_snd
.ifq_maxlen
, SCTP_MAX_IFP_APPLIED
);
5531 if (((struct sockaddr
*)&net
->ro
._l_addr
)->sa_family
== AF_INET
) {
5532 mtu
= net
->mtu
- (sizeof(struct ip
) + sizeof(struct sctphdr
));
5534 mtu
= net
->mtu
- (sizeof(struct ip6_hdr
) + sizeof(struct sctphdr
));
5536 if (mtu
> asoc
->peers_rwnd
) {
5537 if (asoc
->total_flight
> 0) {
5538 /* We have a packet in flight somewhere */
5539 r_mtu
= asoc
->peers_rwnd
;
5541 /* We are always allowed to send one MTU out */
5549 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
5550 kprintf("Ok r_mtu is %d mtu is %d for this net:%p one_chunk:%d\n",
5551 r_mtu
, mtu
, net
, one_chunk
);
5554 /************************/
5555 /* Control transmission */
5556 /************************/
5557 /* Now first lets go through the control queue */
5558 for (chk
= TAILQ_FIRST(&asoc
->control_send_queue
);
5560 nchk
= TAILQ_NEXT(chk
, sctp_next
);
5561 if (chk
->whoTo
!= net
) {
5563 * No, not sent to the network we are
5568 if (chk
->data
== NULL
) {
5571 if ((chk
->data
->m_flags
& M_PKTHDR
) == 0) {
5573 * NOTE: the chk queue MUST have the PKTHDR
5574 * flag set on it with a total in the
5575 * m_pkthdr.len field!! else the chunk will
5580 if (chk
->sent
!= SCTP_DATAGRAM_UNSENT
) {
5582 * It must be unsent. Cookies and ASCONF's
5583 * hang around but there timers will force
5584 * when marked for resend.
5588 /* Here we do NOT factor the r_mtu */
5589 if ((chk
->data
->m_pkthdr
.len
< (int)mtu
) ||
5590 (chk
->flags
& CHUNK_FLAGS_FRAGMENT_OK
)) {
5592 * We probably should glom the mbuf chain from
5593 * the chk->data for control but the problem
5594 * is it becomes yet one more level of
5595 * tracking to do if for some reason output
5596 * fails. Then I have got to reconstruct the
5597 * merged control chain.. el yucko.. for now
5598 * we take the easy way and do the copy
5600 outchain
= sctp_copy_mbufchain(chk
->data
,
5602 if (outchain
== NULL
) {
5605 /* update our MTU size */
5606 mtu
-= chk
->data
->m_pkthdr
.len
;
5610 /* Do clear IP_DF ? */
5611 if (chk
->flags
& CHUNK_FLAGS_FRAGMENT_OK
) {
5614 /* Mark things to be removed, if needed */
5615 if ((chk
->rec
.chunk_id
== SCTP_SELECTIVE_ACK
) ||
5616 (chk
->rec
.chunk_id
== SCTP_HEARTBEAT_REQUEST
) ||
5617 (chk
->rec
.chunk_id
== SCTP_HEARTBEAT_ACK
) ||
5618 (chk
->rec
.chunk_id
== SCTP_SHUTDOWN
) ||
5619 (chk
->rec
.chunk_id
== SCTP_SHUTDOWN_ACK
) ||
5620 (chk
->rec
.chunk_id
== SCTP_OPERATION_ERROR
) ||
5621 (chk
->rec
.chunk_id
== SCTP_COOKIE_ACK
) ||
5622 (chk
->rec
.chunk_id
== SCTP_ECN_CWR
) ||
5623 (chk
->rec
.chunk_id
== SCTP_PACKET_DROPPED
) ||
5624 (chk
->rec
.chunk_id
== SCTP_ASCONF_ACK
)) {
5626 if (chk
->rec
.chunk_id
== SCTP_HEARTBEAT_REQUEST
)
5628 /* remove these chunks at the end */
5629 if (chk
->rec
.chunk_id
== SCTP_SELECTIVE_ACK
) {
5630 /* turn off the timer */
5631 if (callout_pending(&stcb
->asoc
.dack_timer
.timer
)) {
5632 sctp_timer_stop(SCTP_TIMER_TYPE_RECV
,
5639 * Other chunks, since they have
5640 * timers running (i.e. COOKIE or
5641 * ASCONF) we just "trust" that it
5642 * gets sent or retransmitted.
5645 if (chk
->rec
.chunk_id
== SCTP_COOKIE_ECHO
) {
5648 } else if (chk
->rec
.chunk_id
== SCTP_ASCONF
) {
5650 * set hb flag since we can use
5656 chk
->sent
= SCTP_DATAGRAM_SENT
;
5661 * Ok we are out of room but we can
5662 * output without effecting the flight
5663 * size since this little guy is a
5664 * control only packet.
5667 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF
, inp
, stcb
, net
);
5671 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE
, inp
, stcb
, net
);
5674 if (outchain
->m_len
== 0) {
5676 * Special case for when you
5677 * get a 0 len mbuf at the
5678 * head due to the lack of a
5679 * MHDR at the beginning.
5681 outchain
->m_len
= sizeof(struct sctphdr
);
5683 M_PREPEND(outchain
, sizeof(struct sctphdr
), MB_DONTWAIT
);
5684 if (outchain
== NULL
) {
5687 goto error_out_again
;
5690 shdr
= mtod(outchain
, struct sctphdr
*);
5691 shdr
->src_port
= inp
->sctp_lport
;
5692 shdr
->dest_port
= stcb
->rport
;
5693 shdr
->v_tag
= htonl(stcb
->asoc
.peer_vtag
);
5696 if ((error
= sctp_lowlevel_chunk_output(inp
, stcb
, net
,
5697 (struct sockaddr
*)&net
->ro
._l_addr
,
5699 no_fragmentflg
, 0, NULL
, asconf
))) {
5700 if (error
== ENOBUFS
) {
5701 asoc
->ifp_had_enobuf
= 1;
5703 sctp_pegs
[SCTP_DATA_OUT_ERR
]++;
5704 if (from_where
== 0) {
5705 sctp_pegs
[SCTP_ERROUT_FRM_USR
]++;
5709 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT2
) {
5710 kprintf("Gak got ctrl error %d\n", error
);
5713 /* error, could not output */
5716 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
5717 kprintf("Update HB anyway\n");
5720 if (*now_filled
== 0) {
5721 SCTP_GETTIME_TIMEVAL(&net
->last_sent_time
);
5723 *now
= net
->last_sent_time
;
5725 net
->last_sent_time
= *now
;
5729 if (error
== EHOSTUNREACH
) {
5732 * unreachable during
5736 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
5737 kprintf("Moving data to an alterante\n");
5740 sctp_move_to_an_alt(stcb
, asoc
, net
);
5742 sctp_clean_up_ctl (asoc
);
5745 asoc
->ifp_had_enobuf
= 0;
5746 /* Only HB or ASCONF advances time */
5748 if (*now_filled
== 0) {
5749 SCTP_GETTIME_TIMEVAL(&net
->last_sent_time
);
5751 *now
= net
->last_sent_time
;
5753 net
->last_sent_time
= *now
;
5758 * increase the number we sent, if a
5759 * cookie is sent we don't tell them
5763 *num_out
+= ctl_cnt
;
5764 /* recalc a clean slate and setup */
5765 if (stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_BOUND_V6
) {
5766 mtu
= (net
->mtu
- SCTP_MIN_OVERHEAD
);
5768 mtu
= (net
->mtu
- SCTP_MIN_V4_OVERHEAD
);
5774 /*********************/
5775 /* Data transmission */
5776 /*********************/
5777 /* now lets add any data within the MTU constraints */
5778 if (((struct sockaddr
*)&net
->ro
._l_addr
)->sa_family
== AF_INET
) {
5779 omtu
= net
->mtu
- (sizeof(struct ip
) + sizeof(struct sctphdr
));
5781 omtu
= net
->mtu
- (sizeof(struct ip6_hdr
) + sizeof(struct sctphdr
));
5785 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
5786 kprintf("Now to data transmission\n");
5790 if (((asoc
->state
& SCTP_STATE_OPEN
) == SCTP_STATE_OPEN
) ||
5792 for (chk
= TAILQ_FIRST(&asoc
->send_queue
); chk
; chk
= nchk
) {
5793 if (no_data_chunks
) {
5794 /* let only control go out */
5796 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
5797 kprintf("Either nothing to send or we are full\n");
5802 if (net
->flight_size
>= net
->cwnd
) {
5803 /* skip this net, no room for data */
5805 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
5806 kprintf("fs:%d > cwnd:%d\n",
5807 net
->flight_size
, net
->cwnd
);
5810 sctp_pegs
[SCTP_CWND_BLOCKED
]++;
5814 nchk
= TAILQ_NEXT(chk
, sctp_next
);
5815 if (chk
->whoTo
!= net
) {
5816 /* No, not sent to this net */
5818 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
5819 kprintf("chk->whoTo:%p not %p\n",
5827 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
5828 kprintf("Can we pick up a chunk?\n");
5831 if ((chk
->send_size
> omtu
) && ((chk
->flags
& CHUNK_FLAGS_FRAGMENT_OK
) == 0)) {
5832 /* strange, we have a chunk that is to bit
5833 * for its destination and yet no fragment ok flag.
5834 * Something went wrong when the PMTU changed...we did
5835 * not mark this chunk for some reason?? I will
5836 * fix it here by letting IP fragment it for now and
5837 * printing a warning. This really should not happen ...
5839 /*#ifdef SCTP_DEBUG*/
5840 kprintf("Warning chunk of %d bytes > mtu:%d and yet PMTU disc missed\n",
5841 chk
->send_size
, mtu
);
5843 chk
->flags
|= CHUNK_FLAGS_FRAGMENT_OK
;
5846 if (((chk
->send_size
<= mtu
) && (chk
->send_size
<= r_mtu
)) ||
5847 ((chk
->flags
& CHUNK_FLAGS_FRAGMENT_OK
) && (chk
->send_size
<= asoc
->peers_rwnd
))) {
5848 /* ok we will add this one */
5850 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
5851 kprintf("Picking up the chunk\n");
5854 outchain
= sctp_copy_mbufchain(chk
->data
, outchain
);
5855 if (outchain
== NULL
) {
5857 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
5858 kprintf("Gakk no memory\n");
5861 if (!callout_pending(&net
->rxt_timer
.timer
)) {
5862 sctp_timer_start(SCTP_TIMER_TYPE_SEND
, inp
, stcb
, net
);
5866 /* upate our MTU size */
5867 /* Do clear IP_DF ? */
5868 if (chk
->flags
& CHUNK_FLAGS_FRAGMENT_OK
) {
5871 mtu
-= chk
->send_size
;
5872 r_mtu
-= chk
->send_size
;
5873 data_list
[bundle_at
++] = chk
;
5874 if (bundle_at
>= SCTP_MAX_DATA_BUNDLING
) {
5882 if ((r_mtu
<= 0) || one_chunk
) {
5888 * Must be sent in order of the TSN's
5892 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
5893 kprintf("ok no more chk:%d > mtu:%d || < r_mtu:%d\n",
5894 chk
->send_size
, mtu
, r_mtu
);
5901 } /* if asoc.state OPEN */
5902 /* Is there something to send for this destination? */
5904 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
5905 kprintf("ok now is chain assembled? %p\n",
5911 /* We may need to start a control timer or two */
5913 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF
, inp
, stcb
, net
);
5917 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE
, inp
, stcb
, net
);
5920 /* must start a send timer if data is being sent */
5921 if (bundle_at
&& (!callout_pending(&net
->rxt_timer
.timer
))) {
5922 /* no timer running on this destination
5926 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
5927 kprintf("ok lets start a send timer .. we will transmit %p\n",
5931 sctp_timer_start(SCTP_TIMER_TYPE_SEND
, inp
, stcb
, net
);
5933 /* Now send it, if there is anything to send :> */
5934 if ((outchain
->m_flags
& M_PKTHDR
) == 0) {
5937 MGETHDR(t
, MB_DONTWAIT
, MT_HEADER
);
5939 sctp_m_freem(outchain
);
5942 t
->m_next
= outchain
;
5943 t
->m_pkthdr
.len
= 0;
5944 t
->m_pkthdr
.rcvif
= 0;
5949 outchain
->m_pkthdr
.len
+= t
->m_len
;
5953 if (outchain
->m_len
== 0) {
5954 /* Special case for when you get a 0 len
5955 * mbuf at the head due to the lack
5956 * of a MHDR at the beginning.
5958 MH_ALIGN(outchain
, sizeof(struct sctphdr
));
5959 outchain
->m_len
= sizeof(struct sctphdr
);
5961 M_PREPEND(outchain
, sizeof(struct sctphdr
), MB_DONTWAIT
);
5962 if (outchain
== NULL
) {
5968 shdr
= mtod(outchain
, struct sctphdr
*);
5969 shdr
->src_port
= inp
->sctp_lport
;
5970 shdr
->dest_port
= stcb
->rport
;
5971 shdr
->v_tag
= htonl(stcb
->asoc
.peer_vtag
);
5973 if ((error
= sctp_lowlevel_chunk_output(inp
, stcb
, net
,
5974 (struct sockaddr
*)&net
->ro
._l_addr
,
5976 no_fragmentflg
, bundle_at
, data_list
[0], asconf
))) {
5977 /* error, we could not output */
5978 if (error
== ENOBUFS
) {
5979 asoc
->ifp_had_enobuf
= 1;
5981 sctp_pegs
[SCTP_DATA_OUT_ERR
]++;
5982 if (from_where
== 0) {
5983 sctp_pegs
[SCTP_ERROUT_FRM_USR
]++;
5988 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
5989 kprintf("Gak send error %d\n", error
);
5994 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
5995 kprintf("Update HB time anyway\n");
5998 if (*now_filled
== 0) {
5999 SCTP_GETTIME_TIMEVAL(&net
->last_sent_time
);
6001 *now
= net
->last_sent_time
;
6003 net
->last_sent_time
= *now
;
6007 if (error
== EHOSTUNREACH
) {
6009 * Destination went unreachable during
6013 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
6014 kprintf("Calling the movement routine\n");
6017 sctp_move_to_an_alt(stcb
, asoc
, net
);
6019 sctp_clean_up_ctl (asoc
);
6022 asoc
->ifp_had_enobuf
= 0;
6024 if (bundle_at
|| hbflag
) {
6025 /* For data/asconf and hb set time */
6026 if (*now_filled
== 0) {
6027 SCTP_GETTIME_TIMEVAL(&net
->last_sent_time
);
6029 *now
= net
->last_sent_time
;
6031 net
->last_sent_time
= *now
;
6036 *num_out
+= (ctl_cnt
+ bundle_at
);
6039 if (!net
->rto_pending
) {
6040 /* setup for a RTO measurement */
6041 net
->rto_pending
= 1;
6042 data_list
[0]->do_rtt
= 1;
6044 data_list
[0]->do_rtt
= 0;
6046 sctp_pegs
[SCTP_PEG_TSNS_SENT
] += bundle_at
;
6047 sctp_clean_up_datalist(stcb
, asoc
, data_list
, bundle_at
, net
);
6054 /* At the end there should be no NON timed
6055 * chunks hanging on this queue.
6057 if ((*num_out
== 0) && (*reason_code
== 0)) {
6060 sctp_clean_up_ctl (asoc
);
6065 sctp_queue_op_err(struct sctp_tcb
*stcb
, struct mbuf
*op_err
)
6067 /* Prepend a OPERATIONAL_ERROR chunk header
6068 * and put on the end of the control chunk queue.
6070 /* Sender had better have gotten a MGETHDR or else
6071 * the control chunk will be forever skipped
6073 struct sctp_chunkhdr
*hdr
;
6074 struct sctp_tmit_chunk
*chk
;
6077 chk
= (struct sctp_tmit_chunk
*)SCTP_ZONE_GET(sctppcbinfo
.ipi_zone_chunk
);
6080 sctp_m_freem(op_err
);
6083 sctppcbinfo
.ipi_count_chunk
++;
6084 sctppcbinfo
.ipi_gencnt_chunk
++;
6085 M_PREPEND(op_err
, sizeof(struct sctp_chunkhdr
), MB_DONTWAIT
);
6086 if (op_err
== NULL
) {
6087 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, chk
);
6088 sctppcbinfo
.ipi_count_chunk
--;
6089 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
6090 panic("Chunk count is negative");
6092 sctppcbinfo
.ipi_gencnt_chunk
++;
6097 while (mat
!= NULL
) {
6098 chk
->send_size
+= mat
->m_len
;
6101 chk
->rec
.chunk_id
= SCTP_OPERATION_ERROR
;
6102 chk
->sent
= SCTP_DATAGRAM_UNSENT
;
6105 chk
->asoc
= &stcb
->asoc
;
6107 chk
->whoTo
= chk
->asoc
->primary_destination
;
6108 chk
->whoTo
->ref_count
++;
6109 hdr
= mtod(op_err
, struct sctp_chunkhdr
*);
6110 hdr
->chunk_type
= SCTP_OPERATION_ERROR
;
6111 hdr
->chunk_flags
= 0;
6112 hdr
->chunk_length
= htons(chk
->send_size
);
6113 TAILQ_INSERT_TAIL(&chk
->asoc
->control_send_queue
,
6116 chk
->asoc
->ctrl_queue_cnt
++;
6120 sctp_send_cookie_echo(struct mbuf
*m
,
6122 struct sctp_tcb
*stcb
,
6123 struct sctp_nets
*net
)
6126 * pull out the cookie and put it at the front of the control
6130 struct mbuf
*cookie
, *mat
;
6131 struct sctp_paramhdr parm
, *phdr
;
6132 struct sctp_chunkhdr
*hdr
;
6133 struct sctp_tmit_chunk
*chk
;
6134 uint16_t ptype
, plen
;
6135 /* First find the cookie in the param area */
6137 at
= offset
+ sizeof(struct sctp_init_chunk
);
6140 phdr
= sctp_get_next_param(m
, at
, &parm
, sizeof(parm
));
6144 ptype
= ntohs(phdr
->param_type
);
6145 plen
= ntohs(phdr
->param_length
);
6146 if (ptype
== SCTP_STATE_COOKIE
) {
6148 /* found the cookie */
6149 if ((pad
= (plen
% 4))) {
6152 cookie
= sctp_m_copym(m
, at
, plen
, MB_DONTWAIT
);
6153 if (cookie
== NULL
) {
6159 at
+= SCTP_SIZE32(plen
);
6161 if (cookie
== NULL
) {
6162 /* Did not find the cookie */
6165 /* ok, we got the cookie lets change it into a cookie echo chunk */
6167 /* first the change from param to cookie */
6168 hdr
= mtod(cookie
, struct sctp_chunkhdr
*);
6169 hdr
->chunk_type
= SCTP_COOKIE_ECHO
;
6170 hdr
->chunk_flags
= 0;
6171 /* now we MUST have a PKTHDR on it */
6172 if ((cookie
->m_flags
& M_PKTHDR
) != M_PKTHDR
) {
6173 /* we hope this happens rarely */
6174 MGETHDR(mat
, MB_DONTWAIT
, MT_HEADER
);
6176 sctp_m_freem(cookie
);
6180 mat
->m_pkthdr
.rcvif
= 0;
6181 mat
->m_next
= cookie
;
6184 cookie
->m_pkthdr
.len
= plen
;
6185 /* get the chunk stuff now and place it in the FRONT of the queue */
6186 chk
= (struct sctp_tmit_chunk
*)SCTP_ZONE_GET(sctppcbinfo
.ipi_zone_chunk
);
6189 sctp_m_freem(cookie
);
6192 sctppcbinfo
.ipi_count_chunk
++;
6193 sctppcbinfo
.ipi_gencnt_chunk
++;
6194 chk
->send_size
= cookie
->m_pkthdr
.len
;
6195 chk
->rec
.chunk_id
= SCTP_COOKIE_ECHO
;
6196 chk
->sent
= SCTP_DATAGRAM_UNSENT
;
6199 chk
->asoc
= &stcb
->asoc
;
6201 chk
->whoTo
= chk
->asoc
->primary_destination
;
6202 chk
->whoTo
->ref_count
++;
6203 TAILQ_INSERT_HEAD(&chk
->asoc
->control_send_queue
, chk
, sctp_next
);
6204 chk
->asoc
->ctrl_queue_cnt
++;
6209 sctp_send_heartbeat_ack(struct sctp_tcb
*stcb
,
6213 struct sctp_nets
*net
)
6215 /* take a HB request and make it into a
6216 * HB ack and send it.
6218 struct mbuf
*outchain
;
6219 struct sctp_chunkhdr
*chdr
;
6220 struct sctp_tmit_chunk
*chk
;
6224 /* must have a net pointer */
6227 outchain
= sctp_m_copym(m
, offset
, chk_length
, MB_DONTWAIT
);
6228 if (outchain
== NULL
) {
6229 /* gak out of memory */
6232 chdr
= mtod(outchain
, struct sctp_chunkhdr
*);
6233 chdr
->chunk_type
= SCTP_HEARTBEAT_ACK
;
6234 chdr
->chunk_flags
= 0;
6235 if ((outchain
->m_flags
& M_PKTHDR
) != M_PKTHDR
) {
6236 /* should not happen but we are cautious. */
6238 MGETHDR(tmp
, MB_DONTWAIT
, MT_HEADER
);
6243 tmp
->m_pkthdr
.rcvif
= 0;
6244 tmp
->m_next
= outchain
;
6247 outchain
->m_pkthdr
.len
= chk_length
;
6248 if (chk_length
% 4) {
6252 padlen
= 4 - (outchain
->m_pkthdr
.len
% 4);
6253 m_copyback(outchain
, outchain
->m_pkthdr
.len
, padlen
, (caddr_t
)&cpthis
);
6255 chk
= (struct sctp_tmit_chunk
*)SCTP_ZONE_GET(sctppcbinfo
.ipi_zone_chunk
);
6258 sctp_m_freem(outchain
);
6261 sctppcbinfo
.ipi_count_chunk
++;
6262 sctppcbinfo
.ipi_gencnt_chunk
++;
6264 chk
->send_size
= chk_length
;
6265 chk
->rec
.chunk_id
= SCTP_HEARTBEAT_ACK
;
6266 chk
->sent
= SCTP_DATAGRAM_UNSENT
;
6269 chk
->asoc
= &stcb
->asoc
;
6270 chk
->data
= outchain
;
6272 chk
->whoTo
->ref_count
++;
6273 TAILQ_INSERT_TAIL(&chk
->asoc
->control_send_queue
, chk
, sctp_next
);
6274 chk
->asoc
->ctrl_queue_cnt
++;
6278 sctp_send_cookie_ack(struct sctp_tcb
*stcb
) {
6279 /* formulate and queue a cookie-ack back to sender */
6280 struct mbuf
*cookie_ack
;
6281 struct sctp_chunkhdr
*hdr
;
6282 struct sctp_tmit_chunk
*chk
;
6285 MGETHDR(cookie_ack
, MB_DONTWAIT
, MT_HEADER
);
6286 if (cookie_ack
== NULL
) {
6290 cookie_ack
->m_data
+= SCTP_MIN_OVERHEAD
;
6291 chk
= (struct sctp_tmit_chunk
*)SCTP_ZONE_GET(sctppcbinfo
.ipi_zone_chunk
);
6294 sctp_m_freem(cookie_ack
);
6297 sctppcbinfo
.ipi_count_chunk
++;
6298 sctppcbinfo
.ipi_gencnt_chunk
++;
6300 chk
->send_size
= sizeof(struct sctp_chunkhdr
);
6301 chk
->rec
.chunk_id
= SCTP_COOKIE_ACK
;
6302 chk
->sent
= SCTP_DATAGRAM_UNSENT
;
6305 chk
->asoc
= &stcb
->asoc
;
6306 chk
->data
= cookie_ack
;
6307 if (chk
->asoc
->last_control_chunk_from
!= NULL
) {
6308 chk
->whoTo
= chk
->asoc
->last_control_chunk_from
;
6310 chk
->whoTo
= chk
->asoc
->primary_destination
;
6312 chk
->whoTo
->ref_count
++;
6313 hdr
= mtod(cookie_ack
, struct sctp_chunkhdr
*);
6314 hdr
->chunk_type
= SCTP_COOKIE_ACK
;
6315 hdr
->chunk_flags
= 0;
6316 hdr
->chunk_length
= htons(chk
->send_size
);
6317 cookie_ack
->m_pkthdr
.len
= cookie_ack
->m_len
= chk
->send_size
;
6318 cookie_ack
->m_pkthdr
.rcvif
= 0;
6319 TAILQ_INSERT_TAIL(&chk
->asoc
->control_send_queue
, chk
, sctp_next
);
6320 chk
->asoc
->ctrl_queue_cnt
++;
6326 sctp_send_shutdown_ack(struct sctp_tcb
*stcb
, struct sctp_nets
*net
)
6328 /* formulate and queue a SHUTDOWN-ACK back to the sender */
6329 struct mbuf
*m_shutdown_ack
;
6330 struct sctp_shutdown_ack_chunk
*ack_cp
;
6331 struct sctp_tmit_chunk
*chk
;
6333 m_shutdown_ack
= NULL
;
6334 MGETHDR(m_shutdown_ack
, MB_DONTWAIT
, MT_HEADER
);
6335 if (m_shutdown_ack
== NULL
) {
6339 m_shutdown_ack
->m_data
+= SCTP_MIN_OVERHEAD
;
6340 chk
= (struct sctp_tmit_chunk
*)SCTP_ZONE_GET(sctppcbinfo
.ipi_zone_chunk
);
6343 sctp_m_freem(m_shutdown_ack
);
6346 sctppcbinfo
.ipi_count_chunk
++;
6347 sctppcbinfo
.ipi_gencnt_chunk
++;
6349 chk
->send_size
= sizeof(struct sctp_chunkhdr
);
6350 chk
->rec
.chunk_id
= SCTP_SHUTDOWN_ACK
;
6351 chk
->sent
= SCTP_DATAGRAM_UNSENT
;
6354 chk
->asoc
= &stcb
->asoc
;
6355 chk
->data
= m_shutdown_ack
;
6359 ack_cp
= mtod(m_shutdown_ack
, struct sctp_shutdown_ack_chunk
*);
6360 ack_cp
->ch
.chunk_type
= SCTP_SHUTDOWN_ACK
;
6361 ack_cp
->ch
.chunk_flags
= 0;
6362 ack_cp
->ch
.chunk_length
= htons(chk
->send_size
);
6363 m_shutdown_ack
->m_pkthdr
.len
= m_shutdown_ack
->m_len
= chk
->send_size
;
6364 m_shutdown_ack
->m_pkthdr
.rcvif
= 0;
6365 TAILQ_INSERT_TAIL(&chk
->asoc
->control_send_queue
, chk
, sctp_next
);
6366 chk
->asoc
->ctrl_queue_cnt
++;
6371 sctp_send_shutdown(struct sctp_tcb
*stcb
, struct sctp_nets
*net
)
6373 /* formulate and queue a SHUTDOWN to the sender */
6374 struct mbuf
*m_shutdown
;
6375 struct sctp_shutdown_chunk
*shutdown_cp
;
6376 struct sctp_tmit_chunk
*chk
;
6379 MGETHDR(m_shutdown
, MB_DONTWAIT
, MT_HEADER
);
6380 if (m_shutdown
== NULL
) {
6384 m_shutdown
->m_data
+= SCTP_MIN_OVERHEAD
;
6385 chk
= (struct sctp_tmit_chunk
*)SCTP_ZONE_GET(sctppcbinfo
.ipi_zone_chunk
);
6388 sctp_m_freem(m_shutdown
);
6391 sctppcbinfo
.ipi_count_chunk
++;
6392 sctppcbinfo
.ipi_gencnt_chunk
++;
6394 chk
->send_size
= sizeof(struct sctp_shutdown_chunk
);
6395 chk
->rec
.chunk_id
= SCTP_SHUTDOWN
;
6396 chk
->sent
= SCTP_DATAGRAM_UNSENT
;
6399 chk
->asoc
= &stcb
->asoc
;
6400 chk
->data
= m_shutdown
;
6404 shutdown_cp
= mtod(m_shutdown
, struct sctp_shutdown_chunk
*);
6405 shutdown_cp
->ch
.chunk_type
= SCTP_SHUTDOWN
;
6406 shutdown_cp
->ch
.chunk_flags
= 0;
6407 shutdown_cp
->ch
.chunk_length
= htons(chk
->send_size
);
6408 shutdown_cp
->cumulative_tsn_ack
= htonl(stcb
->asoc
.cumulative_tsn
);
6409 m_shutdown
->m_pkthdr
.len
= m_shutdown
->m_len
= chk
->send_size
;
6410 m_shutdown
->m_pkthdr
.rcvif
= 0;
6411 TAILQ_INSERT_TAIL(&chk
->asoc
->control_send_queue
, chk
, sctp_next
);
6412 chk
->asoc
->ctrl_queue_cnt
++;
6414 if ((stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_TCPTYPE
) ||
6415 (stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_IN_TCPPOOL
)) {
6416 stcb
->sctp_ep
->sctp_socket
->so_snd
.ssb_cc
= 0;
6417 soisdisconnecting(stcb
->sctp_ep
->sctp_socket
);
6423 sctp_send_asconf(struct sctp_tcb
*stcb
, struct sctp_nets
*net
)
6426 * formulate and queue an ASCONF to the peer
6427 * ASCONF parameters should be queued on the assoc queue
6429 struct sctp_tmit_chunk
*chk
;
6430 struct mbuf
*m_asconf
;
6431 struct sctp_asconf_chunk
*acp
;
6434 /* compose an ASCONF chunk, maximum length is PMTU */
6435 m_asconf
= sctp_compose_asconf(stcb
);
6436 if (m_asconf
== NULL
) {
6439 acp
= mtod(m_asconf
, struct sctp_asconf_chunk
*);
6440 chk
= (struct sctp_tmit_chunk
*)SCTP_ZONE_GET(sctppcbinfo
.ipi_zone_chunk
);
6443 sctp_m_freem(m_asconf
);
6446 sctppcbinfo
.ipi_count_chunk
++;
6447 sctppcbinfo
.ipi_gencnt_chunk
++;
6449 chk
->data
= m_asconf
;
6450 chk
->send_size
= m_asconf
->m_pkthdr
.len
;
6451 chk
->rec
.chunk_id
= SCTP_ASCONF
;
6452 chk
->sent
= SCTP_DATAGRAM_UNSENT
;
6455 chk
->asoc
= &stcb
->asoc
;
6456 chk
->whoTo
= chk
->asoc
->primary_destination
;
6457 chk
->whoTo
->ref_count
++;
6458 TAILQ_INSERT_TAIL(&chk
->asoc
->control_send_queue
, chk
, sctp_next
);
6459 chk
->asoc
->ctrl_queue_cnt
++;
6464 sctp_send_asconf_ack(struct sctp_tcb
*stcb
, uint32_t retrans
)
6467 * formulate and queue a asconf-ack back to sender
6468 * the asconf-ack must be stored in the tcb
6470 struct sctp_tmit_chunk
*chk
;
6473 /* is there a asconf-ack mbuf chain to send? */
6474 if (stcb
->asoc
.last_asconf_ack_sent
== NULL
) {
6478 /* copy the asconf_ack */
6479 #if defined(__FreeBSD__) || defined(__NetBSD__)
6480 /* Supposedly the m_copypacket is a optimzation,
6483 if (stcb
->asoc
.last_asconf_ack_sent
->m_flags
& M_PKTHDR
) {
6484 m_ack
= m_copypacket(stcb
->asoc
.last_asconf_ack_sent
, MB_DONTWAIT
);
6485 sctp_pegs
[SCTP_CACHED_SRC
]++;
6487 m_ack
= m_copy(stcb
->asoc
.last_asconf_ack_sent
, 0, M_COPYALL
);
6489 m_ack
= m_copy(stcb
->asoc
.last_asconf_ack_sent
, 0, M_COPYALL
);
6491 if (m_ack
== NULL
) {
6492 /* couldn't copy it */
6496 chk
= (struct sctp_tmit_chunk
*)SCTP_ZONE_GET(sctppcbinfo
.ipi_zone_chunk
);
6500 sctp_m_freem(m_ack
);
6503 sctppcbinfo
.ipi_count_chunk
++;
6504 sctppcbinfo
.ipi_gencnt_chunk
++;
6506 /* figure out where it goes to */
6508 /* we're doing a retransmission */
6509 if (stcb
->asoc
.used_alt_asconfack
> 2) {
6510 /* tried alternate nets already, go back */
6513 /* need to try and alternate net */
6514 chk
->whoTo
= sctp_find_alternate_net(stcb
, stcb
->asoc
.last_control_chunk_from
);
6515 stcb
->asoc
.used_alt_asconfack
++;
6517 if (chk
->whoTo
== NULL
) {
6519 if (stcb
->asoc
.last_control_chunk_from
== NULL
)
6520 chk
->whoTo
= stcb
->asoc
.primary_destination
;
6522 chk
->whoTo
= stcb
->asoc
.last_control_chunk_from
;
6523 stcb
->asoc
.used_alt_asconfack
= 0;
6527 if (stcb
->asoc
.last_control_chunk_from
== NULL
)
6528 chk
->whoTo
= stcb
->asoc
.primary_destination
;
6530 chk
->whoTo
= stcb
->asoc
.last_control_chunk_from
;
6531 stcb
->asoc
.used_alt_asconfack
= 0;
6534 chk
->send_size
= m_ack
->m_pkthdr
.len
;
6535 chk
->rec
.chunk_id
= SCTP_ASCONF_ACK
;
6536 chk
->sent
= SCTP_DATAGRAM_UNSENT
;
6539 chk
->asoc
= &stcb
->asoc
;
6540 chk
->whoTo
->ref_count
++;
6541 TAILQ_INSERT_TAIL(&chk
->asoc
->control_send_queue
, chk
, sctp_next
);
6542 chk
->asoc
->ctrl_queue_cnt
++;
6548 sctp_chunk_retransmission(struct sctp_inpcb
*inp
,
6549 struct sctp_tcb
*stcb
,
6550 struct sctp_association
*asoc
,
6551 int *cnt_out
, struct timeval
*now
, int *now_filled
)
6554 * send out one MTU of retransmission.
6555 * If fast_retransmit is happening we ignore the cwnd.
6556 * Otherwise we obey the cwnd and rwnd.
6557 * For a Cookie or Asconf in the control chunk queue we retransmit
6558 * them by themselves.
6560 * For data chunks we will pick out the lowest TSN's in the
6561 * sent_queue marked for resend and bundle them all together
6562 * (up to a MTU of destination). The address to send to should
6563 * have been selected/changed where the retransmission was
6564 * marked (i.e. in FR or t3-timeout routines).
6566 struct sctp_tmit_chunk
*data_list
[SCTP_MAX_DATA_BUNDLING
];
6567 struct sctp_tmit_chunk
*chk
, *fwd
;
6569 struct sctphdr
*shdr
;
6571 struct sctp_nets
*net
;
6572 int no_fragmentflg
, bundle_at
, cnt_thru
;
6574 int error
, i
, one_chunk
, fwd_tsn
, ctl_cnt
, tmr_started
;
6576 tmr_started
= ctl_cnt
= bundle_at
= error
= 0;
6583 #ifdef SCTP_AUDITING_ENABLED
6584 sctp_audit_log(0xC3, 1);
6586 if (TAILQ_EMPTY(&asoc
->sent_queue
)) {
6588 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
6589 kprintf("SCTP hits empty queue with cnt set to %d?\n",
6590 asoc
->sent_queue_retran_cnt
);
6593 asoc
->sent_queue_cnt
= 0;
6594 asoc
->sent_queue_cnt_removeable
= 0;
6596 TAILQ_FOREACH(chk
, &asoc
->control_send_queue
, sctp_next
) {
6597 if (chk
->sent
!= SCTP_DATAGRAM_RESEND
) {
6598 /* we only worry about things marked for resend */
6601 if ((chk
->rec
.chunk_id
== SCTP_COOKIE_ECHO
) ||
6602 (chk
->rec
.chunk_id
== SCTP_ASCONF
) ||
6603 (chk
->rec
.chunk_id
== SCTP_STREAM_RESET
) ||
6604 (chk
->rec
.chunk_id
== SCTP_FORWARD_CUM_TSN
)) {
6605 if (chk
->rec
.chunk_id
== SCTP_STREAM_RESET
) {
6606 /* For stream reset we only retran the request
6609 struct sctp_stream_reset_req
*strreq
;
6610 strreq
= mtod(chk
->data
, struct sctp_stream_reset_req
*);
6611 if (strreq
->sr_req
.ph
.param_type
!= ntohs(SCTP_STR_RESET_REQUEST
)) {
6616 if (chk
->rec
.chunk_id
== SCTP_ASCONF
) {
6620 if (chk
->rec
.chunk_id
== SCTP_FORWARD_CUM_TSN
) {
6624 m
= sctp_copy_mbufchain(chk
->data
, m
);
6630 /* do we have control chunks to retransmit? */
6632 /* Start a timer no matter if we suceed or fail */
6633 if (chk
->rec
.chunk_id
== SCTP_COOKIE_ECHO
) {
6634 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE
, inp
, stcb
, chk
->whoTo
);
6635 } else if (chk
->rec
.chunk_id
== SCTP_ASCONF
)
6636 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF
, inp
, stcb
, chk
->whoTo
);
6638 if (m
->m_len
== 0) {
6639 /* Special case for when you get a 0 len
6640 * mbuf at the head due to the lack
6641 * of a MHDR at the beginning.
6643 m
->m_len
= sizeof(struct sctphdr
);
6645 M_PREPEND(m
, sizeof(struct sctphdr
), MB_DONTWAIT
);
6650 shdr
= mtod(m
, struct sctphdr
*);
6651 shdr
->src_port
= inp
->sctp_lport
;
6652 shdr
->dest_port
= stcb
->rport
;
6653 shdr
->v_tag
= htonl(stcb
->asoc
.peer_vtag
);
6655 chk
->snd_count
++; /* update our count */
6657 if ((error
= sctp_lowlevel_chunk_output(inp
, stcb
, chk
->whoTo
,
6658 (struct sockaddr
*)&chk
->whoTo
->ro
._l_addr
, m
,
6659 no_fragmentflg
, 0, NULL
, asconf
))) {
6660 sctp_pegs
[SCTP_DATA_OUT_ERR
]++;
6664 *We don't want to mark the net->sent time here since this
6665 * we use this for HB and retrans cannot measure RTT
6667 /* SCTP_GETTIME_TIMEVAL(&chk->whoTo->last_sent_time);*/
6669 chk
->sent
= SCTP_DATAGRAM_SENT
;
6670 asoc
->sent_queue_retran_cnt
--;
6671 if (asoc
->sent_queue_retran_cnt
< 0) {
6672 asoc
->sent_queue_retran_cnt
= 0;
6677 /* Clean up the fwd-tsn list */
6678 sctp_clean_up_ctl (asoc
);
6682 /* Ok, it is just data retransmission we need to do or
6683 * that and a fwd-tsn with it all.
6685 if (TAILQ_EMPTY(&asoc
->sent_queue
)) {
6689 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
6690 kprintf("Normal chunk retransmission cnt:%d\n",
6691 asoc
->sent_queue_retran_cnt
);
6694 if ((SCTP_GET_STATE(asoc
) == SCTP_STATE_COOKIE_ECHOED
) ||
6695 (SCTP_GET_STATE(asoc
) == SCTP_STATE_COOKIE_WAIT
)) {
6696 /* not yet open, resend the cookie and that is it */
6701 #ifdef SCTP_AUDITING_ENABLED
6702 sctp_auditing(20, inp
, stcb
, NULL
);
6704 TAILQ_FOREACH(chk
, &asoc
->sent_queue
, sctp_next
) {
6705 if (chk
->sent
!= SCTP_DATAGRAM_RESEND
) {
6706 /* No, not sent to this net or not ready for rtx */
6710 /* pick up the net */
6712 if (stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_BOUND_V6
) {
6713 mtu
= (net
->mtu
- SCTP_MIN_OVERHEAD
);
6715 mtu
= net
->mtu
- SCTP_MIN_V4_OVERHEAD
;
6718 if ((asoc
->peers_rwnd
< mtu
) && (asoc
->total_flight
> 0)) {
6719 /* No room in peers rwnd */
6721 tsn
= asoc
->last_acked_seq
+ 1;
6722 if (tsn
== chk
->rec
.data
.TSN_seq
) {
6723 /* we make a special exception for this case.
6724 * The peer has no rwnd but is missing the
6725 * lowest chunk.. which is probably what is
6726 * holding up the rwnd.
6728 goto one_chunk_around
;
6731 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
6732 kprintf("blocked-peers_rwnd:%d tf:%d\n",
6733 (int)asoc
->peers_rwnd
,
6734 (int)asoc
->total_flight
);
6737 sctp_pegs
[SCTP_RWND_BLOCKED
]++;
6741 if (asoc
->peers_rwnd
< mtu
) {
6744 #ifdef SCTP_AUDITING_ENABLED
6745 sctp_audit_log(0xC3, 2);
6749 net
->fast_retran_ip
= 0;
6750 if (chk
->rec
.data
.doing_fast_retransmit
== 0) {
6751 /* if no FR in progress skip destination that
6752 * have flight_size > cwnd.
6754 if (net
->flight_size
>= net
->cwnd
) {
6755 sctp_pegs
[SCTP_CWND_BLOCKED
]++;
6759 /* Mark the destination net to have FR recovery
6762 net
->fast_retran_ip
= 1;
6765 if ((chk
->send_size
<= mtu
) || (chk
->flags
& CHUNK_FLAGS_FRAGMENT_OK
)) {
6766 /* ok we will add this one */
6767 m
= sctp_copy_mbufchain(chk
->data
, m
);
6771 /* upate our MTU size */
6772 /* Do clear IP_DF ? */
6773 if (chk
->flags
& CHUNK_FLAGS_FRAGMENT_OK
) {
6776 mtu
-= chk
->send_size
;
6777 data_list
[bundle_at
++] = chk
;
6778 if (one_chunk
&& (asoc
->total_flight
<= 0)) {
6779 sctp_pegs
[SCTP_WINDOW_PROBES
]++;
6780 chk
->rec
.data
.state_flags
|= SCTP_WINDOW_PROBE
;
6783 if (one_chunk
== 0) {
6784 /* now are there anymore forward from chk to pick up?*/
6785 fwd
= TAILQ_NEXT(chk
, sctp_next
);
6787 if (fwd
->sent
!= SCTP_DATAGRAM_RESEND
) {
6788 /* Nope, not for retran */
6789 fwd
= TAILQ_NEXT(fwd
, sctp_next
);
6792 if (fwd
->whoTo
!= net
) {
6793 /* Nope, not the net in question */
6794 fwd
= TAILQ_NEXT(fwd
, sctp_next
);
6797 if (fwd
->send_size
<= mtu
) {
6798 m
= sctp_copy_mbufchain(fwd
->data
, m
);
6802 /* upate our MTU size */
6803 /* Do clear IP_DF ? */
6804 if (fwd
->flags
& CHUNK_FLAGS_FRAGMENT_OK
) {
6807 mtu
-= fwd
->send_size
;
6808 data_list
[bundle_at
++] = fwd
;
6809 if (bundle_at
>= SCTP_MAX_DATA_BUNDLING
) {
6812 fwd
= TAILQ_NEXT(fwd
, sctp_next
);
6814 /* can't fit so we are done */
6819 /* Is there something to send for this destination? */
6821 /* No matter if we fail/or suceed we should
6822 * start a timer. A failure is like a lost
6825 if (!callout_pending(&net
->rxt_timer
.timer
)) {
6826 /* no timer running on this destination
6829 sctp_timer_start(SCTP_TIMER_TYPE_SEND
, inp
, stcb
, net
);
6832 if (m
->m_len
== 0) {
6833 /* Special case for when you get a 0 len
6834 * mbuf at the head due to the lack
6835 * of a MHDR at the beginning.
6837 m
->m_len
= sizeof(struct sctphdr
);
6839 M_PREPEND(m
, sizeof(struct sctphdr
), MB_DONTWAIT
);
6844 shdr
= mtod(m
, struct sctphdr
*);
6845 shdr
->src_port
= inp
->sctp_lport
;
6846 shdr
->dest_port
= stcb
->rport
;
6847 shdr
->v_tag
= htonl(stcb
->asoc
.peer_vtag
);
6850 /* Now lets send it, if there is anything to send :> */
6851 if ((error
= sctp_lowlevel_chunk_output(inp
, stcb
, net
,
6852 (struct sockaddr
*)&net
->ro
._l_addr
,
6854 no_fragmentflg
, 0, NULL
, asconf
))) {
6855 /* error, we could not output */
6856 sctp_pegs
[SCTP_DATA_OUT_ERR
]++;
6861 * We don't want to mark the net->sent time here since
6862 * this we use this for HB and retrans cannot measure
6865 /* SCTP_GETTIME_TIMEVAL(&net->last_sent_time);*/
6867 /* For auto-close */
6869 if (*now_filled
== 0) {
6870 SCTP_GETTIME_TIMEVAL(&asoc
->time_last_sent
);
6871 *now
= asoc
->time_last_sent
;
6874 asoc
->time_last_sent
= *now
;
6876 *cnt_out
+= bundle_at
;
6877 #ifdef SCTP_AUDITING_ENABLED
6878 sctp_audit_log(0xC4, bundle_at
);
6880 for (i
= 0; i
< bundle_at
; i
++) {
6881 sctp_pegs
[SCTP_RETRANTSN_SENT
]++;
6882 data_list
[i
]->sent
= SCTP_DATAGRAM_SENT
;
6883 data_list
[i
]->snd_count
++;
6884 asoc
->sent_queue_retran_cnt
--;
6885 /* record the time */
6886 data_list
[i
]->sent_rcv_time
= asoc
->time_last_sent
;
6887 if (asoc
->sent_queue_retran_cnt
< 0) {
6888 asoc
->sent_queue_retran_cnt
= 0;
6890 net
->flight_size
+= data_list
[i
]->book_size
;
6891 asoc
->total_flight
+= data_list
[i
]->book_size
;
6892 asoc
->total_flight_count
++;
6894 #ifdef SCTP_LOG_RWND
6895 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND
,
6896 asoc
->peers_rwnd
, data_list
[i
]->send_size
, sctp_peer_chunk_oh
);
6898 asoc
->peers_rwnd
= sctp_sbspace_sub(asoc
->peers_rwnd
,
6899 (u_int32_t
)(data_list
[i
]->send_size
+ sctp_peer_chunk_oh
));
6900 if (asoc
->peers_rwnd
< stcb
->sctp_ep
->sctp_ep
.sctp_sws_sender
) {
6901 /* SWS sender side engages */
6902 asoc
->peers_rwnd
= 0;
6906 (data_list
[i
]->rec
.data
.doing_fast_retransmit
)) {
6907 sctp_pegs
[SCTP_FAST_RETRAN
]++;
6908 if ((data_list
[i
] == TAILQ_FIRST(&asoc
->sent_queue
)) &&
6909 (tmr_started
== 0)) {
6911 * ok we just fast-retrans'd
6912 * the lowest TSN, i.e the
6913 * first on the list. In this
6914 * case we want to give some
6915 * more time to get a SACK
6916 * back without a t3-expiring.
6918 sctp_timer_stop(SCTP_TIMER_TYPE_SEND
, inp
, stcb
, net
);
6919 sctp_timer_start(SCTP_TIMER_TYPE_SEND
, inp
, stcb
, net
);
6923 #ifdef SCTP_AUDITING_ENABLED
6924 sctp_auditing(21, inp
, stcb
, NULL
);
6930 if (asoc
->sent_queue_retran_cnt
<= 0) {
6931 /* all done we have no more to retran */
6932 asoc
->sent_queue_retran_cnt
= 0;
6936 /* No more room in rwnd */
6939 /* stop the for loop here. we sent out a packet */
6947 sctp_timer_validation(struct sctp_inpcb
*inp
,
6948 struct sctp_tcb
*stcb
,
6949 struct sctp_association
*asoc
,
6952 struct sctp_nets
*net
;
6953 /* Validate that a timer is running somewhere */
6954 TAILQ_FOREACH(net
, &asoc
->nets
, sctp_next
) {
6955 if (callout_pending(&net
->rxt_timer
.timer
)) {
6956 /* Here is a timer */
6960 /* Gak, we did not have a timer somewhere */
6962 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
6963 kprintf("Deadlock avoided starting timer on a dest at retran\n");
6966 sctp_timer_start(SCTP_TIMER_TYPE_SEND
, inp
, stcb
, asoc
->primary_destination
);
6971 sctp_chunk_output(struct sctp_inpcb
*inp
,
6972 struct sctp_tcb
*stcb
,
6975 /* Ok this is the generic chunk service queue.
6976 * we must do the following:
6977 * - See if there are retransmits pending, if so we
6978 * must do these first and return.
6979 * - Service the stream queue that is next,
6980 * moving any message (note I must get a complete
6981 * message i.e. FIRST/MIDDLE and LAST to the out
6982 * queue in one pass) and assigning TSN's
6983 * - Check to see if the cwnd/rwnd allows any output, if
6984 * so we go ahead and fomulate and send the low level
6985 * chunks. Making sure to combine any control in the
6986 * control chunk queue also.
6988 struct sctp_association
*asoc
;
6989 struct sctp_nets
*net
;
6990 int error
, num_out
, tot_out
, ret
, reason_code
, burst_cnt
, burst_limit
;
6998 sctp_pegs
[SCTP_CALLS_TO_CO
]++;
7000 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
7001 kprintf("in co - retran count:%d\n", asoc
->sent_queue_retran_cnt
);
7004 while (asoc
->sent_queue_retran_cnt
) {
7005 /* Ok, it is retransmission time only, we send out only ONE
7006 * packet with a single call off to the retran code.
7008 ret
= sctp_chunk_retransmission(inp
, stcb
, asoc
, &num_out
, &now
, &now_filled
);
7010 /* Can't send anymore */
7012 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
7013 kprintf("retransmission ret:%d -- full\n", ret
);
7017 * now lets push out control by calling med-level
7018 * output once. this assures that we WILL send HB's
7021 sctp_med_chunk_output(inp
, stcb
, asoc
, &num_out
, &reason_code
, 1,
7022 &cwnd_full
, from_where
,
7025 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
7026 kprintf("Control send outputs:%d@full\n", num_out
);
7029 #ifdef SCTP_AUDITING_ENABLED
7030 sctp_auditing(8, inp
, stcb
, NULL
);
7032 return (sctp_timer_validation(inp
, stcb
, asoc
, ret
));
7036 * The count was off.. retran is not happening so do
7037 * the normal retransmission.
7040 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
7041 kprintf("Done with retrans, none left fill up window\n");
7044 #ifdef SCTP_AUDITING_ENABLED
7045 sctp_auditing(9, inp
, stcb
, NULL
);
7049 if (from_where
== 1) {
7050 /* Only one transmission allowed out of a timeout */
7052 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
7053 kprintf("Only one packet allowed out\n");
7056 #ifdef SCTP_AUDITING_ENABLED
7057 sctp_auditing(10, inp
, stcb
, NULL
);
7059 /* Push out any control */
7060 sctp_med_chunk_output(inp
, stcb
, asoc
, &num_out
, &reason_code
, 1, &cwnd_full
, from_where
,
7064 if ((num_out
== 0) && (ret
== 0)) {
7065 /* No more retrans to send */
7069 #ifdef SCTP_AUDITING_ENABLED
7070 sctp_auditing(12, inp
, stcb
, NULL
);
7072 /* Check for bad destinations, if they exist move chunks around. */
7073 burst_limit
= asoc
->max_burst
;
7074 TAILQ_FOREACH(net
, &asoc
->nets
, sctp_next
) {
7075 if ((net
->dest_state
& SCTP_ADDR_NOT_REACHABLE
) ==
7076 SCTP_ADDR_NOT_REACHABLE
) {
7078 * if possible move things off of this address
7079 * we still may send below due to the dormant state
7080 * but we try to find an alternate address to send
7081 * to and if we have one we move all queued data on
7082 * the out wheel to this alternate address.
7084 sctp_move_to_an_alt(stcb
, asoc
, net
);
7087 if ((asoc->sat_network) || (net->addr_is_local)) {
7088 burst_limit = asoc->max_burst * SCTP_SAT_NETWORK_BURST_INCR;
7092 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
7093 kprintf("examined net:%p burst limit:%d\n", net
, asoc
->max_burst
);
7097 #ifdef SCTP_USE_ALLMAN_BURST
7098 if ((net
->flight_size
+(burst_limit
*net
->mtu
)) < net
->cwnd
) {
7099 if (net
->ssthresh
< net
->cwnd
)
7100 net
->ssthresh
= net
->cwnd
;
7101 net
->cwnd
= (net
->flight_size
+(burst_limit
*net
->mtu
));
7102 #ifdef SCTP_LOG_MAXBURST
7103 sctp_log_maxburst(net
, 0, burst_limit
, SCTP_MAX_BURST_APPLIED
);
7105 sctp_pegs
[SCTP_MAX_BURST_APL
]++;
7107 net
->fast_retran_ip
= 0;
7112 /* Fill up what we can to the destination */
7117 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
7118 kprintf("Burst count:%d - call m-c-o\n", burst_cnt
);
7121 error
= sctp_med_chunk_output(inp
, stcb
, asoc
, &num_out
,
7122 &reason_code
, 0, &cwnd_full
, from_where
,
7126 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
7127 kprintf("Error %d was returned from med-c-op\n", error
);
7130 #ifdef SCTP_LOG_MAXBURST
7131 sctp_log_maxburst(asoc
->primary_destination
, error
, burst_cnt
, SCTP_MAX_BURST_ERROR_STOP
);
7136 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
7137 kprintf("m-c-o put out %d\n", num_out
);
7143 #ifndef SCTP_USE_ALLMAN_BURST
7144 && (burst_cnt
< burst_limit
)
7147 #ifndef SCTP_USE_ALLMAN_BURST
7148 if (burst_cnt
>= burst_limit
) {
7149 sctp_pegs
[SCTP_MAX_BURST_APL
]++;
7150 asoc
->burst_limit_applied
= 1;
7151 #ifdef SCTP_LOG_MAXBURST
7152 sctp_log_maxburst(asoc
->primary_destination
, 0 , burst_cnt
, SCTP_MAX_BURST_APPLIED
);
7155 asoc
->burst_limit_applied
= 0;
7160 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
7161 kprintf("Ok, we have put out %d chunks\n", tot_out
);
7165 sctp_pegs
[SCTP_CO_NODATASNT
]++;
7166 if (asoc
->stream_queue_cnt
> 0) {
7167 sctp_pegs
[SCTP_SOS_NOSNT
]++;
7169 sctp_pegs
[SCTP_NOS_NOSNT
]++;
7171 if (asoc
->send_queue_cnt
> 0) {
7172 sctp_pegs
[SCTP_SOSE_NOSNT
]++;
7174 sctp_pegs
[SCTP_NOSE_NOSNT
]++;
7177 /* Now we need to clean up the control chunk chain if
7178 * a ECNE is on it. It must be marked as UNSENT again
7179 * so next call will continue to send it until
7180 * such time that we get a CWR, to remove it.
7182 sctp_fix_ecn_echo(asoc
);
7188 sctp_output(struct sctp_inpcb
*inp
, struct mbuf
*m
, struct sockaddr
*addr
,
7189 struct mbuf
*control
, struct thread
*p
, int flags
)
7191 struct inpcb
*ip_inp
;
7192 struct sctp_inpcb
*t_inp
;
7193 struct sctp_tcb
*stcb
;
7194 struct sctp_nets
*net
;
7195 struct sctp_association
*asoc
;
7196 int create_lock_applied
= 0;
7197 int queue_only
, error
= 0;
7198 struct sctp_sndrcvinfo srcv
;
7200 int use_rcvinfo
= 0;
7202 /* struct route ro;*/
7206 ip_inp
= (struct inpcb
*)inp
;
7212 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
7213 kprintf("USR Send BEGINS\n");
7217 if ((inp
->sctp_flags
& SCTP_PCB_FLAGS_TCPTYPE
) &&
7218 (inp
->sctp_flags
& SCTP_PCB_FLAGS_ACCEPTING
)) {
7219 /* The listner can NOT send */
7221 sctppcbinfo
.mbuf_track
--;
7222 sctp_m_freem(control
);
7229 /* Can't allow a V6 address on a non-v6 socket */
7231 SCTP_ASOC_CREATE_LOCK(inp
);
7232 if ((inp
->sctp_flags
& SCTP_PCB_FLAGS_SOCKET_GONE
) ||
7233 (inp
->sctp_flags
& SCTP_PCB_FLAGS_SOCKET_GONE
)) {
7234 /* Should I really unlock ? */
7235 SCTP_ASOC_CREATE_UNLOCK(inp
);
7237 sctppcbinfo
.mbuf_track
--;
7238 sctp_m_freem(control
);
7245 create_lock_applied
= 1;
7246 if (((inp
->sctp_flags
& SCTP_PCB_FLAGS_BOUND_V6
) == 0) &&
7247 (addr
->sa_family
== AF_INET6
)) {
7248 SCTP_ASOC_CREATE_UNLOCK(inp
);
7250 sctppcbinfo
.mbuf_track
--;
7251 sctp_m_freem(control
);
7260 sctppcbinfo
.mbuf_track
++;
7261 if (sctp_find_cmsg(SCTP_SNDRCV
, (void *)&srcv
, control
,
7263 if (srcv
.sinfo_flags
& MSG_SENDALL
) {
7265 sctppcbinfo
.mbuf_track
--;
7266 sctp_m_freem(control
);
7268 if (create_lock_applied
) {
7269 SCTP_ASOC_CREATE_UNLOCK(inp
);
7270 create_lock_applied
= 0;
7272 return (sctp_sendall(inp
, NULL
, m
, &srcv
));
7274 if (srcv
.sinfo_assoc_id
) {
7275 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_CONNECTED
) {
7276 SCTP_INP_RLOCK(inp
);
7277 stcb
= LIST_FIRST(&inp
->sctp_asoc_list
);
7279 SCTP_TCB_LOCK(stcb
);
7280 SCTP_INP_RUNLOCK(inp
);
7283 if (create_lock_applied
) {
7284 SCTP_ASOC_CREATE_UNLOCK(inp
);
7285 create_lock_applied
= 0;
7287 sctppcbinfo
.mbuf_track
--;
7288 sctp_m_freem(control
);
7293 net
= stcb
->asoc
.primary_destination
;
7295 stcb
= sctp_findassociation_ep_asocid(inp
, srcv
.sinfo_assoc_id
);
7298 * Question: Should I error here if the
7300 * assoc_id is no longer valid?
7301 * i.e. I can't find it?
7305 /* Must locate the net structure */
7307 net
= sctp_findnet(stcb
, addr
);
7310 net
= stcb
->asoc
.primary_destination
;
7316 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_CONNECTED
) {
7317 SCTP_INP_RLOCK(inp
);
7318 stcb
= LIST_FIRST(&inp
->sctp_asoc_list
);
7320 SCTP_TCB_LOCK(stcb
);
7321 SCTP_INP_RUNLOCK(inp
);
7324 if (create_lock_applied
) {
7325 SCTP_ASOC_CREATE_UNLOCK(inp
);
7326 create_lock_applied
= 0;
7329 sctppcbinfo
.mbuf_track
--;
7330 sctp_m_freem(control
);
7337 net
= stcb
->asoc
.primary_destination
;
7339 net
= sctp_findnet(stcb
, addr
);
7341 net
= stcb
->asoc
.primary_destination
;
7346 SCTP_INP_WLOCK(inp
);
7347 SCTP_INP_INCR_REF(inp
);
7348 SCTP_INP_WUNLOCK(inp
);
7349 stcb
= sctp_findassociation_ep_addr(&t_inp
, addr
, &net
, NULL
, NULL
);
7351 SCTP_INP_WLOCK(inp
);
7352 SCTP_INP_DECR_REF(inp
);
7353 SCTP_INP_WUNLOCK(inp
);
7358 if ((stcb
== NULL
) &&
7359 (inp
->sctp_flags
& SCTP_PCB_FLAGS_TCPTYPE
)) {
7361 sctppcbinfo
.mbuf_track
--;
7362 sctp_m_freem(control
);
7365 if (create_lock_applied
) {
7366 SCTP_ASOC_CREATE_UNLOCK(inp
);
7367 create_lock_applied
= 0;
7372 } else if ((stcb
== NULL
) &&
7375 sctppcbinfo
.mbuf_track
--;
7376 sctp_m_freem(control
);
7379 if (create_lock_applied
) {
7380 SCTP_ASOC_CREATE_UNLOCK(inp
);
7381 create_lock_applied
= 0;
7386 } else if (stcb
== NULL
) {
7387 /* UDP mode, we must go ahead and start the INIT process */
7388 if ((use_rcvinfo
) && (srcv
.sinfo_flags
& MSG_ABORT
)) {
7389 /* Strange user to do this */
7391 sctppcbinfo
.mbuf_track
--;
7392 sctp_m_freem(control
);
7395 if (create_lock_applied
) {
7396 SCTP_ASOC_CREATE_UNLOCK(inp
);
7397 create_lock_applied
= 0;
7403 stcb
= sctp_aloc_assoc(inp
, addr
, 1, &error
, 0);
7406 sctppcbinfo
.mbuf_track
--;
7407 sctp_m_freem(control
);
7410 if (create_lock_applied
) {
7411 SCTP_ASOC_CREATE_UNLOCK(inp
);
7412 create_lock_applied
= 0;
7418 if (create_lock_applied
) {
7419 SCTP_ASOC_CREATE_UNLOCK(inp
);
7420 create_lock_applied
= 0;
7422 kprintf("Huh-1, create lock should have been applied!\n");
7426 asoc
->state
= SCTP_STATE_COOKIE_WAIT
;
7427 SCTP_GETTIME_TIMEVAL(&asoc
->time_entered
);
7429 /* see if a init structure exists in cmsg headers */
7430 struct sctp_initmsg initm
;
7432 if (sctp_find_cmsg(SCTP_INIT
, (void *)&initm
, control
,
7434 /* we have an INIT override of the default */
7435 if (initm
.sinit_max_attempts
)
7436 asoc
->max_init_times
= initm
.sinit_max_attempts
;
7437 if (initm
.sinit_num_ostreams
)
7438 asoc
->pre_open_streams
= initm
.sinit_num_ostreams
;
7439 if (initm
.sinit_max_instreams
)
7440 asoc
->max_inbound_streams
= initm
.sinit_max_instreams
;
7441 if (initm
.sinit_max_init_timeo
)
7442 asoc
->initial_init_rto_max
= initm
.sinit_max_init_timeo
;
7444 if (asoc
->streamoutcnt
< asoc
->pre_open_streams
) {
7445 /* Default is NOT correct */
7447 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
7448 kprintf("Ok, defout:%d pre_open:%d\n",
7449 asoc
->streamoutcnt
, asoc
->pre_open_streams
);
7452 FREE(asoc
->strmout
, M_PCB
);
7453 asoc
->strmout
= NULL
;
7454 asoc
->streamoutcnt
= asoc
->pre_open_streams
;
7455 MALLOC(asoc
->strmout
, struct sctp_stream_out
*,
7456 asoc
->streamoutcnt
*
7457 sizeof(struct sctp_stream_out
), M_PCB
,
7459 for (i
= 0; i
< asoc
->streamoutcnt
; i
++) {
7461 * inbound side must be set to 0xffff,
7462 * also NOTE when we get the INIT-ACK
7463 * back (for INIT sender) we MUST
7464 * reduce the count (streamoutcnt) but
7465 * first check if we sent to any of the
7466 * upper streams that were dropped (if
7467 * some were). Those that were dropped
7468 * must be notified to the upper layer
7469 * as failed to send.
7471 asoc
->strmout
[i
].next_sequence_sent
= 0x0;
7472 TAILQ_INIT(&asoc
->strmout
[i
].outqueue
);
7473 asoc
->strmout
[i
].stream_no
= i
;
7474 asoc
->strmout
[i
].next_spoke
.tqe_next
= 0;
7475 asoc
->strmout
[i
].next_spoke
.tqe_prev
= 0;
7479 sctp_send_initiate(inp
, stcb
);
7481 * we may want to dig in after this call and adjust the MTU
7482 * value. It defaulted to 1500 (constant) but the ro structure
7483 * may now have an update and thus we may need to change it
7484 * BEFORE we append the message.
7486 net
= stcb
->asoc
.primary_destination
;
7488 if (create_lock_applied
) {
7489 SCTP_ASOC_CREATE_UNLOCK(inp
);
7490 create_lock_applied
= 0;
7493 if ((SCTP_GET_STATE(asoc
) == SCTP_STATE_COOKIE_WAIT
) ||
7494 (SCTP_GET_STATE(asoc
) == SCTP_STATE_COOKIE_ECHOED
)) {
7497 if ((SCTP_GET_STATE(asoc
) == SCTP_STATE_SHUTDOWN_SENT
) ||
7498 (SCTP_GET_STATE(asoc
) == SCTP_STATE_SHUTDOWN_RECEIVED
) ||
7499 (SCTP_GET_STATE(asoc
) == SCTP_STATE_SHUTDOWN_ACK_SENT
) ||
7500 (asoc
->state
& SCTP_STATE_SHUTDOWN_PENDING
)) {
7502 sctppcbinfo
.mbuf_track
--;
7503 sctp_m_freem(control
);
7506 if ((use_rcvinfo
) &&
7507 (srcv
.sinfo_flags
& MSG_ABORT
)) {
7508 sctp_msg_append(stcb
, net
, m
, &srcv
, flags
);
7516 SCTP_TCB_UNLOCK(stcb
);
7520 if (create_lock_applied
) {
7521 /* we should never hit here with the create lock applied
7524 SCTP_ASOC_CREATE_UNLOCK(inp
);
7525 create_lock_applied
= 0;
7529 if (use_rcvinfo
== 0) {
7530 srcv
= stcb
->asoc
.def_send
;
7534 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT5
) {
7535 kprintf("stream:%d\n", srcv
.sinfo_stream
);
7536 kprintf("flags:%x\n", (u_int
)srcv
.sinfo_flags
);
7537 kprintf("ppid:%d\n", srcv
.sinfo_ppid
);
7538 kprintf("context:%d\n", srcv
.sinfo_context
);
7543 sctppcbinfo
.mbuf_track
--;
7544 sctp_m_freem(control
);
7547 if (net
&& ((srcv
.sinfo_flags
& MSG_ADDR_OVER
))) {
7548 /* we take the override or the unconfirmed */
7551 net
= stcb
->asoc
.primary_destination
;
7553 if ((error
= sctp_msg_append(stcb
, net
, m
, &srcv
, flags
))) {
7554 SCTP_TCB_UNLOCK(stcb
);
7558 if (net
->flight_size
> net
->cwnd
) {
7559 sctp_pegs
[SCTP_SENDTO_FULL_CWND
]++;
7561 } else if (asoc
->ifp_had_enobuf
) {
7562 sctp_pegs
[SCTP_QUEONLY_BURSTLMT
]++;
7565 un_sent
= ((stcb
->asoc
.total_output_queue_size
- stcb
->asoc
.total_flight
) +
7566 ((stcb
->asoc
.chunks_on_out_queue
- stcb
->asoc
.total_flight_count
) * sizeof(struct sctp_data_chunk
)) +
7569 if (((inp
->sctp_flags
& SCTP_PCB_FLAGS_NODELAY
) == 0) &&
7570 (stcb
->asoc
.total_flight
> 0) &&
7571 (un_sent
< (int)stcb
->asoc
.smallest_mtu
)
7574 /* Ok, Nagle is set on and we have
7575 * data outstanding. Don't send anything
7576 * and let the SACK drive out the data.
7578 sctp_pegs
[SCTP_NAGLE_NOQ
]++;
7581 sctp_pegs
[SCTP_NAGLE_OFF
]++;
7584 if ((queue_only
== 0) && stcb
->asoc
.peers_rwnd
) {
7585 /* we can attempt to send too.*/
7587 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
7588 kprintf("USR Send calls sctp_chunk_output\n");
7591 #ifdef SCTP_AUDITING_ENABLED
7592 sctp_audit_log(0xC0, 1);
7593 sctp_auditing(6, inp
, stcb
, net
);
7595 sctp_pegs
[SCTP_OUTPUT_FRM_SND
]++;
7596 sctp_chunk_output(inp
, stcb
, 0);
7597 #ifdef SCTP_AUDITING_ENABLED
7598 sctp_audit_log(0xC0, 2);
7599 sctp_auditing(7, inp
, stcb
, net
);
7604 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
7605 kprintf("USR Send complete qo:%d prw:%d\n", queue_only
, stcb
->asoc
.peers_rwnd
);
7608 SCTP_TCB_UNLOCK(stcb
);
7614 send_forward_tsn(struct sctp_tcb
*stcb
,
7615 struct sctp_association
*asoc
)
7617 struct sctp_tmit_chunk
*chk
;
7618 struct sctp_forward_tsn_chunk
*fwdtsn
;
7620 TAILQ_FOREACH(chk
, &asoc
->control_send_queue
, sctp_next
) {
7621 if (chk
->rec
.chunk_id
== SCTP_FORWARD_CUM_TSN
) {
7622 /* mark it to unsent */
7623 chk
->sent
= SCTP_DATAGRAM_UNSENT
;
7625 /* Do we correct its output location? */
7626 if (chk
->whoTo
!= asoc
->primary_destination
) {
7627 sctp_free_remote_addr(chk
->whoTo
);
7628 chk
->whoTo
= asoc
->primary_destination
;
7629 chk
->whoTo
->ref_count
++;
7631 goto sctp_fill_in_rest
;
7634 /* Ok if we reach here we must build one */
7635 chk
= (struct sctp_tmit_chunk
*)SCTP_ZONE_GET(sctppcbinfo
.ipi_zone_chunk
);
7639 sctppcbinfo
.ipi_count_chunk
++;
7640 sctppcbinfo
.ipi_gencnt_chunk
++;
7641 chk
->rec
.chunk_id
= SCTP_FORWARD_CUM_TSN
;
7643 MGETHDR(chk
->data
, MB_DONTWAIT
, MT_DATA
);
7644 if (chk
->data
== NULL
) {
7645 chk
->whoTo
->ref_count
--;
7646 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, chk
);
7647 sctppcbinfo
.ipi_count_chunk
--;
7648 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
7649 panic("Chunk count is negative");
7651 sctppcbinfo
.ipi_gencnt_chunk
++;
7654 chk
->data
->m_data
+= SCTP_MIN_OVERHEAD
;
7655 chk
->sent
= SCTP_DATAGRAM_UNSENT
;
7657 chk
->whoTo
= asoc
->primary_destination
;
7658 chk
->whoTo
->ref_count
++;
7659 TAILQ_INSERT_TAIL(&asoc
->control_send_queue
, chk
, sctp_next
);
7660 asoc
->ctrl_queue_cnt
++;
7662 /* Here we go through and fill out the part that
7663 * deals with stream/seq of the ones we skip.
7665 chk
->data
->m_pkthdr
.len
= chk
->data
->m_len
= 0;
7667 struct sctp_tmit_chunk
*at
, *tp1
, *last
;
7668 struct sctp_strseq
*strseq
;
7669 unsigned int cnt_of_space
, i
, ovh
;
7670 unsigned int space_needed
;
7671 unsigned int cnt_of_skipped
= 0;
7672 TAILQ_FOREACH(at
, &asoc
->sent_queue
, sctp_next
) {
7673 if (at
->sent
!= SCTP_FORWARD_TSN_SKIP
) {
7674 /* no more to look at */
7677 if (at
->rec
.data
.rcv_flags
& SCTP_DATA_UNORDERED
) {
7678 /* We don't report these */
7683 space_needed
= (sizeof(struct sctp_forward_tsn_chunk
) +
7684 (cnt_of_skipped
* sizeof(struct sctp_strseq
)));
7685 if ((M_TRAILINGSPACE(chk
->data
) < (int)space_needed
) &&
7686 ((chk
->data
->m_flags
& M_EXT
) == 0)) {
7687 /* Need a M_EXT, get one and move
7688 * fwdtsn to data area.
7690 MCLGET(chk
->data
, MB_DONTWAIT
);
7692 cnt_of_space
= M_TRAILINGSPACE(chk
->data
);
7694 if (stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_BOUND_V6
) {
7695 ovh
= SCTP_MIN_OVERHEAD
;
7697 ovh
= SCTP_MIN_V4_OVERHEAD
;
7699 if (cnt_of_space
> (asoc
->smallest_mtu
-ovh
)) {
7700 /* trim to a mtu size */
7701 cnt_of_space
= asoc
->smallest_mtu
- ovh
;
7703 if (cnt_of_space
< space_needed
) {
7704 /* ok we must trim down the chunk by lowering
7705 * the advance peer ack point.
7707 cnt_of_skipped
= (cnt_of_space
-
7708 ((sizeof(struct sctp_forward_tsn_chunk
))/
7709 sizeof(struct sctp_strseq
)));
7710 /* Go through and find the TSN that
7711 * will be the one we report.
7713 at
= TAILQ_FIRST(&asoc
->sent_queue
);
7714 for (i
= 0; i
< cnt_of_skipped
; i
++) {
7715 tp1
= TAILQ_NEXT(at
, sctp_next
);
7719 /* last now points to last one I can report, update peer ack point */
7720 asoc
->advanced_peer_ack_point
= last
->rec
.data
.TSN_seq
;
7721 space_needed
-= (cnt_of_skipped
* sizeof(struct sctp_strseq
));
7723 chk
->send_size
= space_needed
;
7724 /* Setup the chunk */
7725 fwdtsn
= mtod(chk
->data
, struct sctp_forward_tsn_chunk
*);
7726 fwdtsn
->ch
.chunk_length
= htons(chk
->send_size
);
7727 fwdtsn
->ch
.chunk_flags
= 0;
7728 fwdtsn
->ch
.chunk_type
= SCTP_FORWARD_CUM_TSN
;
7729 fwdtsn
->new_cumulative_tsn
= htonl(asoc
->advanced_peer_ack_point
);
7730 chk
->send_size
= (sizeof(struct sctp_forward_tsn_chunk
) +
7731 (cnt_of_skipped
* sizeof(struct sctp_strseq
)));
7732 chk
->data
->m_pkthdr
.len
= chk
->data
->m_len
= chk
->send_size
;
7734 /* Move pointer to after the fwdtsn and transfer to
7735 * the strseq pointer.
7737 strseq
= (struct sctp_strseq
*)fwdtsn
;
7739 * Now populate the strseq list. This is done blindly
7740 * without pulling out duplicate stream info. This is
7741 * inefficent but won't harm the process since the peer
7742 * will look at these in sequence and will thus release
7743 * anything. It could mean we exceed the PMTU and chop
7744 * off some that we could have included.. but this is
7745 * unlikely (aka 1432/4 would mean 300+ stream seq's would
7746 * have to be reported in one FWD-TSN. With a bit of work
7747 * we can later FIX this to optimize and pull out duplcates..
7748 * but it does add more overhead. So for now... not!
7750 at
= TAILQ_FIRST(&asoc
->sent_queue
);
7751 for (i
= 0; i
< cnt_of_skipped
; i
++) {
7752 tp1
= TAILQ_NEXT(at
, sctp_next
);
7753 if (at
->rec
.data
.rcv_flags
& SCTP_DATA_UNORDERED
) {
7754 /* We don't report these */
7759 strseq
->stream
= ntohs(at
->rec
.data
.stream_number
);
7760 strseq
->sequence
= ntohs(at
->rec
.data
.stream_seq
);
7770 sctp_send_sack(struct sctp_tcb
*stcb
)
7773 * Queue up a SACK in the control queue. We must first check to
7774 * see if a SACK is somehow on the control queue. If so, we will
7775 * take and and remove the old one.
7777 struct sctp_association
*asoc
;
7778 struct sctp_tmit_chunk
*chk
, *a_chk
;
7779 struct sctp_sack_chunk
*sack
;
7780 struct sctp_gap_ack_block
*gap_descriptor
;
7783 unsigned int i
, maxi
, seeing_ones
, m_size
;
7784 unsigned int num_gap_blocks
, space
;
7790 if (asoc
->last_data_chunk_from
== NULL
) {
7791 /* Hmm we never received anything */
7794 sctp_set_rwnd(stcb
, asoc
);
7795 TAILQ_FOREACH(chk
, &asoc
->control_send_queue
, sctp_next
) {
7796 if (chk
->rec
.chunk_id
== SCTP_SELECTIVE_ACK
) {
7797 /* Hmm, found a sack already on queue, remove it */
7798 TAILQ_REMOVE(&asoc
->control_send_queue
, chk
, sctp_next
);
7799 asoc
->ctrl_queue_cnt
++;
7802 sctp_m_freem(a_chk
->data
);
7804 sctp_free_remote_addr(a_chk
->whoTo
);
7805 a_chk
->whoTo
= NULL
;
7809 if (a_chk
== NULL
) {
7810 a_chk
= (struct sctp_tmit_chunk
*)SCTP_ZONE_GET(sctppcbinfo
.ipi_zone_chunk
);
7811 if (a_chk
== NULL
) {
7812 /* No memory so we drop the idea, and set a timer */
7813 sctp_timer_stop(SCTP_TIMER_TYPE_RECV
,
7814 stcb
->sctp_ep
, stcb
, NULL
);
7815 sctp_timer_start(SCTP_TIMER_TYPE_RECV
,
7816 stcb
->sctp_ep
, stcb
, NULL
);
7819 sctppcbinfo
.ipi_count_chunk
++;
7820 sctppcbinfo
.ipi_gencnt_chunk
++;
7821 a_chk
->rec
.chunk_id
= SCTP_SELECTIVE_ACK
;
7824 a_chk
->snd_count
= 0;
7825 a_chk
->send_size
= 0; /* fill in later */
7826 a_chk
->sent
= SCTP_DATAGRAM_UNSENT
;
7827 m_size
= (asoc
->mapping_array_size
<< 3);
7829 if ((asoc
->numduptsns
) ||
7830 (asoc
->last_data_chunk_from
->dest_state
& SCTP_ADDR_NOT_REACHABLE
)
7832 /* Ok, we have some duplicates or the destination for the
7833 * sack is unreachable, lets see if we can select an alternate
7834 * than asoc->last_data_chunk_from
7836 if ((!(asoc
->last_data_chunk_from
->dest_state
&
7837 SCTP_ADDR_NOT_REACHABLE
)) &&
7838 (asoc
->used_alt_onsack
> 2)) {
7839 /* We used an alt last time, don't this time */
7840 a_chk
->whoTo
= NULL
;
7842 asoc
->used_alt_onsack
++;
7843 a_chk
->whoTo
= sctp_find_alternate_net(stcb
, asoc
->last_data_chunk_from
);
7845 if (a_chk
->whoTo
== NULL
) {
7846 /* Nope, no alternate */
7847 a_chk
->whoTo
= asoc
->last_data_chunk_from
;
7848 asoc
->used_alt_onsack
= 0;
7851 /* No duplicates so we use the last
7852 * place we received data from.
7855 if (asoc
->last_data_chunk_from
== NULL
) {
7856 kprintf("Huh, last_data_chunk_from is null when we want to sack??\n");
7859 asoc
->used_alt_onsack
= 0;
7860 a_chk
->whoTo
= asoc
->last_data_chunk_from
;
7863 a_chk
->whoTo
->ref_count
++;
7865 /* Ok now lets formulate a MBUF with our sack */
7866 MGETHDR(a_chk
->data
, MB_DONTWAIT
, MT_DATA
);
7867 if ((a_chk
->data
== NULL
) ||
7868 (a_chk
->whoTo
== NULL
)) {
7869 /* rats, no mbuf memory */
7871 /* was a problem with the destination */
7872 sctp_m_freem(a_chk
->data
);
7875 a_chk
->whoTo
->ref_count
--;
7876 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, a_chk
);
7877 sctppcbinfo
.ipi_count_chunk
--;
7878 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
7879 panic("Chunk count is negative");
7881 sctppcbinfo
.ipi_gencnt_chunk
++;
7882 sctp_timer_stop(SCTP_TIMER_TYPE_RECV
,
7883 stcb
->sctp_ep
, stcb
, NULL
);
7884 sctp_timer_start(SCTP_TIMER_TYPE_RECV
,
7885 stcb
->sctp_ep
, stcb
, NULL
);
7888 /* First count the number of gap ack blocks we need */
7889 if (asoc
->highest_tsn_inside_map
== asoc
->cumulative_tsn
) {
7890 /* We know if there are none above the cum-ack we
7891 * have everything with NO gaps
7895 /* Ok we must count how many gaps we
7899 if (asoc
->highest_tsn_inside_map
>= asoc
->mapping_array_base_tsn
) {
7900 maxi
= (asoc
->highest_tsn_inside_map
- asoc
->mapping_array_base_tsn
);
7902 maxi
= (asoc
->highest_tsn_inside_map
+ (MAX_TSN
- asoc
->mapping_array_base_tsn
) + 1);
7904 if (maxi
> m_size
) {
7905 /* impossible but who knows, someone is playing with us :> */
7907 kprintf("GAK maxi:%d > m_size:%d came out higher than allowed htsn:%u base:%u cumack:%u\n",
7910 asoc
->highest_tsn_inside_map
,
7911 asoc
->mapping_array_base_tsn
,
7912 asoc
->cumulative_tsn
7918 if (asoc
->cumulative_tsn
>= asoc
->mapping_array_base_tsn
) {
7919 start
= (asoc
->cumulative_tsn
- asoc
->mapping_array_base_tsn
);
7921 /* Set it so we start at 0 */
7924 /* Ok move start up one to look at the NEXT past the cum-ack */
7926 for (i
= start
; i
<= maxi
; i
++) {
7928 /* while seeing ones I must
7929 * transition back to 0 before
7930 * finding the next gap and
7931 * counting the segment.
7933 if (SCTP_IS_TSN_PRESENT(asoc
->mapping_array
, i
) == 0) {
7937 if (SCTP_IS_TSN_PRESENT(asoc
->mapping_array
, i
)) {
7944 if (num_gap_blocks
== 0) {
7946 * Traveled all of the bits and NO one,
7949 if (compare_with_wrap(asoc
->cumulative_tsn
, asoc
->highest_tsn_inside_map
, MAX_TSN
)) {
7950 asoc
->highest_tsn_inside_map
= asoc
->cumulative_tsn
;
7951 #ifdef SCTP_MAP_LOGGING
7952 sctp_log_map(0, 4, asoc
->highest_tsn_inside_map
, SCTP_MAP_SLIDE_RESULT
);
7958 /* Now calculate the space needed */
7959 space
= (sizeof(struct sctp_sack_chunk
) +
7960 (num_gap_blocks
* sizeof(struct sctp_gap_ack_block
)) +
7961 (asoc
->numduptsns
* sizeof(int32_t))
7963 if (space
> (asoc
->smallest_mtu
-SCTP_MAX_OVERHEAD
)) {
7964 /* Reduce the size of the sack to fit */
7966 calc
= (asoc
->smallest_mtu
- SCTP_MAX_OVERHEAD
);
7967 calc
-= sizeof(struct sctp_gap_ack_block
);
7968 fit
= calc
/sizeof(struct sctp_gap_ack_block
);
7969 if (fit
> (int)num_gap_blocks
) {
7970 /* discard some dups */
7971 asoc
->numduptsns
= (fit
- num_gap_blocks
);
7973 /* discard all dups and some gaps */
7974 num_gap_blocks
= fit
;
7975 asoc
->numduptsns
= 0;
7978 space
= (sizeof(struct sctp_sack_chunk
) +
7979 (num_gap_blocks
* sizeof(struct sctp_gap_ack_block
)) +
7980 (asoc
->numduptsns
* sizeof(int32_t))
7985 if ((space
+SCTP_MIN_OVERHEAD
) > MHLEN
) {
7986 /* We need a cluster */
7987 MCLGET(a_chk
->data
, MB_DONTWAIT
);
7988 if ((a_chk
->data
->m_flags
& M_EXT
) != M_EXT
) {
7989 /* can't get a cluster
7990 * give up and try later.
7993 sctp_m_freem(a_chk
->data
);
7995 a_chk
->whoTo
->ref_count
--;
7996 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, a_chk
);
7997 sctppcbinfo
.ipi_count_chunk
--;
7998 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
7999 panic("Chunk count is negative");
8001 sctppcbinfo
.ipi_gencnt_chunk
++;
8002 sctp_timer_stop(SCTP_TIMER_TYPE_RECV
,
8003 stcb
->sctp_ep
, stcb
, NULL
);
8004 sctp_timer_start(SCTP_TIMER_TYPE_RECV
,
8005 stcb
->sctp_ep
, stcb
, NULL
);
8010 /* ok, lets go through and fill it in */
8011 a_chk
->data
->m_data
+= SCTP_MIN_OVERHEAD
;
8012 sack
= mtod(a_chk
->data
, struct sctp_sack_chunk
*);
8013 sack
->ch
.chunk_type
= SCTP_SELECTIVE_ACK
;
8014 sack
->ch
.chunk_flags
= asoc
->receiver_nonce_sum
& SCTP_SACK_NONCE_SUM
;
8015 sack
->sack
.cum_tsn_ack
= htonl(asoc
->cumulative_tsn
);
8016 sack
->sack
.a_rwnd
= htonl(asoc
->my_rwnd
);
8017 asoc
->my_last_reported_rwnd
= asoc
->my_rwnd
;
8018 sack
->sack
.num_gap_ack_blks
= htons(num_gap_blocks
);
8019 sack
->sack
.num_dup_tsns
= htons(asoc
->numduptsns
);
8021 a_chk
->send_size
= (sizeof(struct sctp_sack_chunk
) +
8022 (num_gap_blocks
* sizeof(struct sctp_gap_ack_block
)) +
8023 (asoc
->numduptsns
* sizeof(int32_t)));
8024 a_chk
->data
->m_pkthdr
.len
= a_chk
->data
->m_len
= a_chk
->send_size
;
8025 sack
->ch
.chunk_length
= htons(a_chk
->send_size
);
8027 gap_descriptor
= (struct sctp_gap_ack_block
*)((caddr_t
)sack
+ sizeof(struct sctp_sack_chunk
));
8029 for (i
= start
; i
<= maxi
; i
++) {
8030 if (num_gap_blocks
== 0) {
8034 /* while seeing Ones I must
8035 * transition back to 0 before
8036 * finding the next gap
8038 if (SCTP_IS_TSN_PRESENT(asoc
->mapping_array
, i
) == 0) {
8039 gap_descriptor
->end
= htons(((uint16_t)(i
-start
)));
8045 if (SCTP_IS_TSN_PRESENT(asoc
->mapping_array
, i
)) {
8046 gap_descriptor
->start
= htons(((uint16_t)(i
+1-start
)));
8047 /* advance struct to next pointer */
8052 if (num_gap_blocks
) {
8053 /* special case where the array is all 1's
8054 * to the end of the array.
8056 gap_descriptor
->end
= htons(((uint16_t)((i
-start
))));
8059 /* now we must add any dups we are going to report. */
8060 if (asoc
->numduptsns
) {
8061 dup
= (uint32_t *)gap_descriptor
;
8062 for (i
= 0; i
< asoc
->numduptsns
; i
++) {
8063 *dup
= htonl(asoc
->dup_tsns
[i
]);
8066 asoc
->numduptsns
= 0;
8068 /* now that the chunk is prepared queue it to the control
8071 TAILQ_INSERT_TAIL(&asoc
->control_send_queue
, a_chk
, sctp_next
);
8072 asoc
->ctrl_queue_cnt
++;
8073 sctp_pegs
[SCTP_PEG_SACKS_SENT
]++;
8078 sctp_send_abort_tcb(struct sctp_tcb
*stcb
, struct mbuf
*operr
)
8080 struct mbuf
*m_abort
;
8081 struct sctp_abort_msg
*abort_m
;
8084 MGETHDR(m_abort
, MB_DONTWAIT
, MT_HEADER
);
8085 if (m_abort
== NULL
) {
8089 m_abort
->m_data
+= SCTP_MIN_OVERHEAD
;
8090 abort_m
= mtod(m_abort
, struct sctp_abort_msg
*);
8091 m_abort
->m_len
= sizeof(struct sctp_abort_msg
);
8092 m_abort
->m_next
= operr
;
8102 abort_m
->msg
.ch
.chunk_type
= SCTP_ABORT_ASSOCIATION
;
8103 abort_m
->msg
.ch
.chunk_flags
= 0;
8104 abort_m
->msg
.ch
.chunk_length
= htons(sizeof(struct sctp_abort_chunk
) +
8106 abort_m
->sh
.src_port
= stcb
->sctp_ep
->sctp_lport
;
8107 abort_m
->sh
.dest_port
= stcb
->rport
;
8108 abort_m
->sh
.v_tag
= htonl(stcb
->asoc
.peer_vtag
);
8109 abort_m
->sh
.checksum
= 0;
8110 m_abort
->m_pkthdr
.len
= m_abort
->m_len
+ sz
;
8111 m_abort
->m_pkthdr
.rcvif
= 0;
8112 sctp_lowlevel_chunk_output(stcb
->sctp_ep
, stcb
,
8113 stcb
->asoc
.primary_destination
,
8114 (struct sockaddr
*)&stcb
->asoc
.primary_destination
->ro
._l_addr
,
8115 m_abort
, 1, 0, NULL
, 0);
8119 sctp_send_shutdown_complete(struct sctp_tcb
*stcb
,
8120 struct sctp_nets
*net
)
8123 /* formulate and SEND a SHUTDOWN-COMPLETE */
8124 struct mbuf
*m_shutdown_comp
;
8125 struct sctp_shutdown_complete_msg
*comp_cp
;
8127 m_shutdown_comp
= NULL
;
8128 MGETHDR(m_shutdown_comp
, MB_DONTWAIT
, MT_HEADER
);
8129 if (m_shutdown_comp
== NULL
) {
8133 m_shutdown_comp
->m_data
+= sizeof(struct ip6_hdr
);
8134 comp_cp
= mtod(m_shutdown_comp
, struct sctp_shutdown_complete_msg
*);
8135 comp_cp
->shut_cmp
.ch
.chunk_type
= SCTP_SHUTDOWN_COMPLETE
;
8136 comp_cp
->shut_cmp
.ch
.chunk_flags
= 0;
8137 comp_cp
->shut_cmp
.ch
.chunk_length
= htons(sizeof(struct sctp_shutdown_complete_chunk
));
8138 comp_cp
->sh
.src_port
= stcb
->sctp_ep
->sctp_lport
;
8139 comp_cp
->sh
.dest_port
= stcb
->rport
;
8140 comp_cp
->sh
.v_tag
= htonl(stcb
->asoc
.peer_vtag
);
8141 comp_cp
->sh
.checksum
= 0;
8143 m_shutdown_comp
->m_pkthdr
.len
= m_shutdown_comp
->m_len
= sizeof(struct sctp_shutdown_complete_msg
);
8144 m_shutdown_comp
->m_pkthdr
.rcvif
= 0;
8145 sctp_lowlevel_chunk_output(stcb
->sctp_ep
, stcb
, net
,
8146 (struct sockaddr
*)&net
->ro
._l_addr
, m_shutdown_comp
,
8148 if ((stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_TCPTYPE
) ||
8149 (stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_IN_TCPPOOL
)) {
8150 stcb
->sctp_ep
->sctp_flags
&= ~SCTP_PCB_FLAGS_CONNECTED
;
8151 stcb
->sctp_ep
->sctp_socket
->so_snd
.ssb_cc
= 0;
8152 soisdisconnected(stcb
->sctp_ep
->sctp_socket
);
8158 sctp_send_shutdown_complete2(struct mbuf
*m
, int iphlen
, struct sctphdr
*sh
)
8160 /* formulate and SEND a SHUTDOWN-COMPLETE */
8162 struct ip
*iph
, *iph_out
;
8163 struct ip6_hdr
*ip6
, *ip6_out
;
8165 struct sctp_shutdown_complete_msg
*comp_cp
;
8167 MGETHDR(mout
, MB_DONTWAIT
, MT_HEADER
);
8172 iph
= mtod(m
, struct ip
*);
8176 if (iph
->ip_v
== IPVERSION
) {
8177 mout
->m_len
= sizeof(struct ip
) +
8178 sizeof(struct sctp_shutdown_complete_msg
);
8179 mout
->m_next
= NULL
;
8180 iph_out
= mtod(mout
, struct ip
*);
8182 /* Fill in the IP header for the ABORT */
8183 iph_out
->ip_v
= IPVERSION
;
8184 iph_out
->ip_hl
= (sizeof(struct ip
)/4);
8185 iph_out
->ip_tos
= (u_char
)0;
8187 iph_out
->ip_off
= 0;
8188 iph_out
->ip_ttl
= MAXTTL
;
8189 iph_out
->ip_p
= IPPROTO_SCTP
;
8190 iph_out
->ip_src
.s_addr
= iph
->ip_dst
.s_addr
;
8191 iph_out
->ip_dst
.s_addr
= iph
->ip_src
.s_addr
;
8193 /* let IP layer calculate this */
8194 iph_out
->ip_sum
= 0;
8195 offset_out
+= sizeof(*iph_out
);
8196 comp_cp
= (struct sctp_shutdown_complete_msg
*)(
8197 (caddr_t
)iph_out
+ offset_out
);
8198 } else if (iph
->ip_v
== (IPV6_VERSION
>> 4)) {
8199 ip6
= (struct ip6_hdr
*)iph
;
8200 mout
->m_len
= sizeof(struct ip6_hdr
) +
8201 sizeof(struct sctp_shutdown_complete_msg
);
8202 mout
->m_next
= NULL
;
8203 ip6_out
= mtod(mout
, struct ip6_hdr
*);
8205 /* Fill in the IPv6 header for the ABORT */
8206 ip6_out
->ip6_flow
= ip6
->ip6_flow
;
8207 ip6_out
->ip6_hlim
= ip6_defhlim
;
8208 ip6_out
->ip6_nxt
= IPPROTO_SCTP
;
8209 ip6_out
->ip6_src
= ip6
->ip6_dst
;
8210 ip6_out
->ip6_dst
= ip6
->ip6_src
;
8211 ip6_out
->ip6_plen
= mout
->m_len
;
8212 offset_out
+= sizeof(*ip6_out
);
8213 comp_cp
= (struct sctp_shutdown_complete_msg
*)(
8214 (caddr_t
)ip6_out
+ offset_out
);
8216 /* Currently not supported. */
8220 /* Now copy in and fill in the ABORT tags etc. */
8221 comp_cp
->sh
.src_port
= sh
->dest_port
;
8222 comp_cp
->sh
.dest_port
= sh
->src_port
;
8223 comp_cp
->sh
.checksum
= 0;
8224 comp_cp
->sh
.v_tag
= sh
->v_tag
;
8225 comp_cp
->shut_cmp
.ch
.chunk_flags
= SCTP_HAD_NO_TCB
;
8226 comp_cp
->shut_cmp
.ch
.chunk_type
= SCTP_SHUTDOWN_COMPLETE
;
8227 comp_cp
->shut_cmp
.ch
.chunk_length
= htons(sizeof(struct sctp_shutdown_complete_chunk
));
8229 mout
->m_pkthdr
.len
= mout
->m_len
;
8231 if ((sctp_no_csum_on_loopback
) &&
8232 (m
->m_pkthdr
.rcvif
) &&
8233 (m
->m_pkthdr
.rcvif
->if_type
== IFT_LOOP
)) {
8234 comp_cp
->sh
.checksum
= 0;
8236 comp_cp
->sh
.checksum
= sctp_calculate_sum(mout
, NULL
, offset_out
);
8239 /* zap the rcvif, it should be null */
8240 mout
->m_pkthdr
.rcvif
= 0;
8241 /* zap the stack pointer to the route */
8242 if (iph_out
!= NULL
) {
8245 bzero(&ro
, sizeof ro
);
8247 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT2
) {
8248 kprintf("sctp_shutdown_complete2 calling ip_output:\n");
8249 sctp_print_address_pkt(iph_out
, &comp_cp
->sh
);
8252 /* set IPv4 length */
8253 #if defined(__FreeBSD__)
8254 iph_out
->ip_len
= mout
->m_pkthdr
.len
;
8256 iph_out
->ip_len
= htons(mout
->m_pkthdr
.len
);
8259 ip_output(mout
, 0, &ro
, IP_RAWOUTPUT
, NULL
8260 #if defined(__OpenBSD__) || (defined(__FreeBSD__) && __FreeBSD_version >= 480000) \
8261 || defined(__NetBSD__) || defined(__DragonFly__)
8265 /* Free the route if we got one back */
8268 } else if (ip6_out
!= NULL
) {
8269 #ifdef NEW_STRUCT_ROUTE
8272 struct route_in6 ro
;
8275 bzero(&ro
, sizeof(ro
));
8277 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT2
) {
8278 kprintf("sctp_shutdown_complete2 calling ip6_output:\n");
8279 sctp_print_address_pkt((struct ip
*)ip6_out
,
8283 ip6_output(mout
, NULL
, &ro
, 0, NULL
, NULL
8284 #if defined(__NetBSD__)
8287 #if (defined(__FreeBSD__) && __FreeBSD_version >= 480000) || defined(__DragonFly__)
8291 /* Free the route if we got one back */
8295 sctp_pegs
[SCTP_DATAGRAMS_SENT
]++;
8299 static struct sctp_nets
*
8300 sctp_select_hb_destination(struct sctp_tcb
*stcb
, struct timeval
*now
)
8302 struct sctp_nets
*net
, *hnet
;
8303 int ms_goneby
, highest_ms
, state_overide
=0;
8305 SCTP_GETTIME_TIMEVAL(now
);
8308 TAILQ_FOREACH(net
, &stcb
->asoc
.nets
, sctp_next
) {
8310 ((net
->dest_state
& SCTP_ADDR_NOHB
) && ((net
->dest_state
& SCTP_ADDR_UNCONFIRMED
) == 0)) ||
8311 (net
->dest_state
& SCTP_ADDR_OUT_OF_SCOPE
)
8313 /* Skip this guy from consideration if HB is off AND its confirmed*/
8315 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT4
) {
8316 kprintf("Skipping net:%p state:%d nohb/out-of-scope\n",
8317 net
, net
->dest_state
);
8322 if (sctp_destination_is_reachable(stcb
, (struct sockaddr
*)&net
->ro
._l_addr
) == 0) {
8323 /* skip this dest net from consideration */
8325 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT4
) {
8326 kprintf("Skipping net:%p reachable NOT\n",
8332 if (net
->last_sent_time
.tv_sec
) {
8333 /* Sent to so we subtract */
8334 ms_goneby
= (now
->tv_sec
- net
->last_sent_time
.tv_sec
) * 1000;
8336 /* Never been sent to */
8337 ms_goneby
= 0x7fffffff;
8339 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT4
) {
8340 kprintf("net:%p ms_goneby:%d\n",
8344 /* When the address state is unconfirmed but still considered reachable, we
8345 * HB at a higher rate. Once it goes confirmed OR reaches the "unreachable"
8346 * state, thenw we cut it back to HB at a more normal pace.
8348 if ((net
->dest_state
& (SCTP_ADDR_UNCONFIRMED
|SCTP_ADDR_NOT_REACHABLE
)) == SCTP_ADDR_UNCONFIRMED
) {
8354 if ((((unsigned int)ms_goneby
>= net
->RTO
) || (state_overide
)) &&
8355 (ms_goneby
> highest_ms
)) {
8356 highest_ms
= ms_goneby
;
8359 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT4
) {
8360 kprintf("net:%p is the new high\n",
8367 ((hnet
->dest_state
& (SCTP_ADDR_UNCONFIRMED
|SCTP_ADDR_NOT_REACHABLE
)) == SCTP_ADDR_UNCONFIRMED
)) {
8373 if (highest_ms
&& (((unsigned int)highest_ms
>= hnet
->RTO
) || state_overide
)) {
8374 /* Found the one with longest delay bounds
8375 * OR it is unconfirmed and still not marked
8379 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT4
) {
8380 kprintf("net:%p is the hb winner -",
8383 sctp_print_address((struct sockaddr
*)&hnet
->ro
._l_addr
);
8388 /* update the timer now */
8389 hnet
->last_sent_time
= *now
;
8397 sctp_send_hb(struct sctp_tcb
*stcb
, int user_req
, struct sctp_nets
*u_net
)
8399 struct sctp_tmit_chunk
*chk
;
8400 struct sctp_nets
*net
;
8401 struct sctp_heartbeat_chunk
*hb
;
8403 struct sockaddr_in
*sin
;
8404 struct sockaddr_in6
*sin6
;
8406 if (user_req
== 0) {
8407 net
= sctp_select_hb_destination(stcb
, &now
);
8409 /* All our busy none to send to, just
8410 * start the timer again.
8412 if (stcb
->asoc
.state
== 0) {
8415 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT
,
8421 #ifndef SCTP_USE_ALLMAN_BURST
8423 /* found one idle.. decay cwnd on this one
8424 * by 1/2 if none outstanding.
8427 if (net
->flight_size
== 0) {
8429 if (net
->addr_is_local
) {
8430 if (net
->cwnd
< (net
->mtu
*4)) {
8431 net
->cwnd
= net
->mtu
* 4;
8434 if (net
->cwnd
< (net
->mtu
* 2)) {
8435 net
->cwnd
= net
->mtu
* 2;
8448 SCTP_GETTIME_TIMEVAL(&now
);
8450 sin
= (struct sockaddr_in
*)&net
->ro
._l_addr
;
8451 if (sin
->sin_family
!= AF_INET
) {
8452 if (sin
->sin_family
!= AF_INET6
) {
8457 chk
= (struct sctp_tmit_chunk
*)SCTP_ZONE_GET(sctppcbinfo
.ipi_zone_chunk
);
8460 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT4
) {
8461 kprintf("Gak, can't get a chunk for hb\n");
8466 sctppcbinfo
.ipi_gencnt_chunk
++;
8467 sctppcbinfo
.ipi_count_chunk
++;
8468 chk
->rec
.chunk_id
= SCTP_HEARTBEAT_REQUEST
;
8469 chk
->asoc
= &stcb
->asoc
;
8470 chk
->send_size
= sizeof(struct sctp_heartbeat_chunk
);
8471 MGETHDR(chk
->data
, MB_DONTWAIT
, MT_DATA
);
8472 if (chk
->data
== NULL
) {
8473 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, chk
);
8474 sctppcbinfo
.ipi_count_chunk
--;
8475 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
8476 panic("Chunk count is negative");
8478 sctppcbinfo
.ipi_gencnt_chunk
++;
8481 chk
->data
->m_data
+= SCTP_MIN_OVERHEAD
;
8482 chk
->data
->m_pkthdr
.len
= chk
->data
->m_len
= chk
->send_size
;
8483 chk
->sent
= SCTP_DATAGRAM_UNSENT
;
8486 chk
->whoTo
->ref_count
++;
8487 /* Now we have a mbuf that we can fill in with the details */
8488 hb
= mtod(chk
->data
, struct sctp_heartbeat_chunk
*);
8490 /* fill out chunk header */
8491 hb
->ch
.chunk_type
= SCTP_HEARTBEAT_REQUEST
;
8492 hb
->ch
.chunk_flags
= 0;
8493 hb
->ch
.chunk_length
= htons(chk
->send_size
);
8494 /* Fill out hb parameter */
8495 hb
->heartbeat
.hb_info
.ph
.param_type
= htons(SCTP_HEARTBEAT_INFO
);
8496 hb
->heartbeat
.hb_info
.ph
.param_length
= htons(sizeof(struct sctp_heartbeat_info_param
));
8497 hb
->heartbeat
.hb_info
.time_value_1
= now
.tv_sec
;
8498 hb
->heartbeat
.hb_info
.time_value_2
= now
.tv_usec
;
8499 /* Did our user request this one, put it in */
8500 hb
->heartbeat
.hb_info
.user_req
= user_req
;
8501 hb
->heartbeat
.hb_info
.addr_family
= sin
->sin_family
;
8502 hb
->heartbeat
.hb_info
.addr_len
= sin
->sin_len
;
8503 if (net
->dest_state
& SCTP_ADDR_UNCONFIRMED
) {
8504 /* we only take from the entropy pool if the address is
8507 net
->heartbeat_random1
= hb
->heartbeat
.hb_info
.random_value1
= sctp_select_initial_TSN(&stcb
->sctp_ep
->sctp_ep
);
8508 net
->heartbeat_random2
= hb
->heartbeat
.hb_info
.random_value2
= sctp_select_initial_TSN(&stcb
->sctp_ep
->sctp_ep
);
8510 net
->heartbeat_random1
= hb
->heartbeat
.hb_info
.random_value1
= 0;
8511 net
->heartbeat_random2
= hb
->heartbeat
.hb_info
.random_value2
= 0;
8513 if (sin
->sin_family
== AF_INET
) {
8514 memcpy(hb
->heartbeat
.hb_info
.address
, &sin
->sin_addr
, sizeof(sin
->sin_addr
));
8515 } else if (sin
->sin_family
== AF_INET6
) {
8516 /* We leave the scope the way it is in our lookup table. */
8517 sin6
= (struct sockaddr_in6
*)&net
->ro
._l_addr
;
8518 memcpy(hb
->heartbeat
.hb_info
.address
, &sin6
->sin6_addr
, sizeof(sin6
->sin6_addr
));
8520 /* huh compiler bug */
8522 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
8523 kprintf("Compiler bug bleeds a mbuf and a chunk\n");
8528 /* ok we have a destination that needs a beat */
8529 /* lets do the theshold management Qiaobing style */
8530 if (user_req
== 0) {
8531 if (sctp_threshold_management(stcb
->sctp_ep
, stcb
, net
,
8532 stcb
->asoc
.max_send_times
)) {
8533 /* we have lost the association, in a way this
8534 * is quite bad since we really are one less time
8535 * since we really did not send yet. This is the
8536 * down side to the Q's style as defined in the RFC
8537 * and not my alternate style defined in the RFC.
8539 if (chk
->data
!= NULL
) {
8540 sctp_m_freem(chk
->data
);
8543 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, chk
);
8544 sctppcbinfo
.ipi_count_chunk
--;
8545 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
8546 panic("Chunk count is negative");
8548 sctppcbinfo
.ipi_gencnt_chunk
++;
8552 net
->hb_responded
= 0;
8554 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT4
) {
8555 kprintf("Inserting chunk for HB\n");
8558 TAILQ_INSERT_TAIL(&stcb
->asoc
.control_send_queue
, chk
, sctp_next
);
8559 stcb
->asoc
.ctrl_queue_cnt
++;
8560 sctp_pegs
[SCTP_HB_SENT
]++;
8562 * Call directly med level routine to put out the chunk. It will
8563 * always tumble out control chunks aka HB but it may even tumble
8566 if (user_req
== 0) {
8567 /* Ok now lets start the HB timer if it is NOT a user req */
8568 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT
, stcb
->sctp_ep
,
8575 sctp_send_ecn_echo(struct sctp_tcb
*stcb
, struct sctp_nets
*net
,
8578 struct sctp_association
*asoc
;
8579 struct sctp_ecne_chunk
*ecne
;
8580 struct sctp_tmit_chunk
*chk
;
8582 TAILQ_FOREACH(chk
, &asoc
->control_send_queue
, sctp_next
) {
8583 if (chk
->rec
.chunk_id
== SCTP_ECN_ECHO
) {
8584 /* found a previous ECN_ECHO update it if needed */
8585 ecne
= mtod(chk
->data
, struct sctp_ecne_chunk
*);
8586 ecne
->tsn
= htonl(high_tsn
);
8590 /* nope could not find one to update so we must build one */
8591 chk
= (struct sctp_tmit_chunk
*)SCTP_ZONE_GET(sctppcbinfo
.ipi_zone_chunk
);
8595 sctp_pegs
[SCTP_ECNE_SENT
]++;
8596 sctppcbinfo
.ipi_count_chunk
++;
8597 sctppcbinfo
.ipi_gencnt_chunk
++;
8598 chk
->rec
.chunk_id
= SCTP_ECN_ECHO
;
8599 chk
->asoc
= &stcb
->asoc
;
8600 chk
->send_size
= sizeof(struct sctp_ecne_chunk
);
8601 MGETHDR(chk
->data
, MB_DONTWAIT
, MT_DATA
);
8602 if (chk
->data
== NULL
) {
8603 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, chk
);
8604 sctppcbinfo
.ipi_count_chunk
--;
8605 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
8606 panic("Chunk count is negative");
8608 sctppcbinfo
.ipi_gencnt_chunk
++;
8611 chk
->data
->m_data
+= SCTP_MIN_OVERHEAD
;
8612 chk
->data
->m_pkthdr
.len
= chk
->data
->m_len
= chk
->send_size
;
8613 chk
->sent
= SCTP_DATAGRAM_UNSENT
;
8616 chk
->whoTo
->ref_count
++;
8617 ecne
= mtod(chk
->data
, struct sctp_ecne_chunk
*);
8618 ecne
->ch
.chunk_type
= SCTP_ECN_ECHO
;
8619 ecne
->ch
.chunk_flags
= 0;
8620 ecne
->ch
.chunk_length
= htons(sizeof(struct sctp_ecne_chunk
));
8621 ecne
->tsn
= htonl(high_tsn
);
8622 TAILQ_INSERT_TAIL(&stcb
->asoc
.control_send_queue
, chk
, sctp_next
);
8623 asoc
->ctrl_queue_cnt
++;
8627 sctp_send_packet_dropped(struct sctp_tcb
*stcb
, struct sctp_nets
*net
,
8628 struct mbuf
*m
, int iphlen
, int bad_crc
)
8630 struct sctp_association
*asoc
;
8631 struct sctp_pktdrop_chunk
*drp
;
8632 struct sctp_tmit_chunk
*chk
;
8635 unsigned int small_one
;
8640 if (asoc
->peer_supports_pktdrop
== 0) {
8641 /* peer must declare support before I
8646 chk
= (struct sctp_tmit_chunk
*)SCTP_ZONE_GET(sctppcbinfo
.ipi_zone_chunk
);
8650 sctppcbinfo
.ipi_count_chunk
++;
8651 sctppcbinfo
.ipi_gencnt_chunk
++;
8653 iph
= mtod(m
, struct ip
*);
8657 if (iph
->ip_v
== IPVERSION
) {
8659 #if defined(__FreeBSD__)
8660 len
= chk
->send_size
= iph
->ip_len
;
8662 len
= chk
->send_size
= (iph
->ip_len
- iphlen
);
8665 struct ip6_hdr
*ip6h
;
8667 ip6h
= mtod(m
, struct ip6_hdr
*);
8668 len
= chk
->send_size
= htons(ip6h
->ip6_plen
);
8670 if ((len
+iphlen
) > m
->m_pkthdr
.len
) {
8672 chk
->send_size
= len
= m
->m_pkthdr
.len
- iphlen
;
8674 chk
->asoc
= &stcb
->asoc
;
8675 MGETHDR(chk
->data
, MB_DONTWAIT
, MT_DATA
);
8676 if (chk
->data
== NULL
) {
8678 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, chk
);
8679 sctppcbinfo
.ipi_count_chunk
--;
8680 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
8681 panic("Chunk count is negative");
8683 sctppcbinfo
.ipi_gencnt_chunk
++;
8686 if ((chk
->send_size
+sizeof(struct sctp_pktdrop_chunk
)+SCTP_MIN_OVERHEAD
) > MHLEN
) {
8687 MCLGET(chk
->data
, MB_DONTWAIT
);
8688 if ((chk
->data
->m_flags
& M_EXT
) == 0) {
8690 sctp_m_freem(chk
->data
);
8695 chk
->data
->m_data
+= SCTP_MIN_OVERHEAD
;
8696 drp
= mtod(chk
->data
, struct sctp_pktdrop_chunk
*);
8698 sctp_m_freem(chk
->data
);
8702 small_one
= asoc
->smallest_mtu
;
8703 if (small_one
> MCLBYTES
) {
8704 /* Only one cluster worth of data MAX */
8705 small_one
= MCLBYTES
;
8707 chk
->book_size
= (chk
->send_size
+ sizeof(struct sctp_pktdrop_chunk
) +
8708 sizeof(struct sctphdr
) + SCTP_MED_OVERHEAD
);
8709 if (chk
->book_size
> small_one
) {
8710 drp
->ch
.chunk_flags
= SCTP_PACKET_TRUNCATED
;
8711 drp
->trunc_len
= htons(chk
->send_size
);
8712 chk
->send_size
= small_one
- (SCTP_MED_OVERHEAD
+
8713 sizeof(struct sctp_pktdrop_chunk
) +
8714 sizeof(struct sctphdr
));
8715 len
= chk
->send_size
;
8717 /* no truncation needed */
8718 drp
->ch
.chunk_flags
= 0;
8719 drp
->trunc_len
= htons(0);
8722 drp
->ch
.chunk_flags
|= SCTP_BADCRC
;
8724 chk
->send_size
+= sizeof(struct sctp_pktdrop_chunk
);
8725 chk
->data
->m_pkthdr
.len
= chk
->data
->m_len
= chk
->send_size
;
8726 chk
->sent
= SCTP_DATAGRAM_UNSENT
;
8729 /* we should hit here */
8732 chk
->whoTo
= asoc
->primary_destination
;
8734 chk
->whoTo
->ref_count
++;
8735 chk
->rec
.chunk_id
= SCTP_PACKET_DROPPED
;
8736 drp
->ch
.chunk_type
= SCTP_PACKET_DROPPED
;
8737 drp
->ch
.chunk_length
= htons(chk
->send_size
);
8738 spc
= stcb
->sctp_socket
->so_rcv
.ssb_hiwat
;
8742 drp
->bottle_bw
= htonl(spc
);
8743 drp
->current_onq
= htonl(asoc
->size_on_delivery_queue
+
8744 asoc
->size_on_reasm_queue
+
8745 asoc
->size_on_all_streams
+
8746 asoc
->my_rwnd_control_len
+
8747 stcb
->sctp_socket
->so_rcv
.ssb_cc
);
8750 m_copydata(m
, iphlen
, len
, datap
);
8751 TAILQ_INSERT_TAIL(&stcb
->asoc
.control_send_queue
, chk
, sctp_next
);
8752 asoc
->ctrl_queue_cnt
++;
8756 sctp_send_cwr(struct sctp_tcb
*stcb
, struct sctp_nets
*net
, uint32_t high_tsn
)
8758 struct sctp_association
*asoc
;
8759 struct sctp_cwr_chunk
*cwr
;
8760 struct sctp_tmit_chunk
*chk
;
8763 TAILQ_FOREACH(chk
, &asoc
->control_send_queue
, sctp_next
) {
8764 if (chk
->rec
.chunk_id
== SCTP_ECN_CWR
) {
8765 /* found a previous ECN_CWR update it if needed */
8766 cwr
= mtod(chk
->data
, struct sctp_cwr_chunk
*);
8767 if (compare_with_wrap(high_tsn
, ntohl(cwr
->tsn
),
8769 cwr
->tsn
= htonl(high_tsn
);
8774 /* nope could not find one to update so we must build one */
8775 chk
= (struct sctp_tmit_chunk
*)SCTP_ZONE_GET(sctppcbinfo
.ipi_zone_chunk
);
8779 sctppcbinfo
.ipi_count_chunk
++;
8780 sctppcbinfo
.ipi_gencnt_chunk
++;
8781 chk
->rec
.chunk_id
= SCTP_ECN_CWR
;
8782 chk
->asoc
= &stcb
->asoc
;
8783 chk
->send_size
= sizeof(struct sctp_cwr_chunk
);
8784 MGETHDR(chk
->data
, MB_DONTWAIT
, MT_DATA
);
8785 if (chk
->data
== NULL
) {
8786 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, chk
);
8787 sctppcbinfo
.ipi_count_chunk
--;
8788 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
8789 panic("Chunk count is negative");
8791 sctppcbinfo
.ipi_gencnt_chunk
++;
8794 chk
->data
->m_data
+= SCTP_MIN_OVERHEAD
;
8795 chk
->data
->m_pkthdr
.len
= chk
->data
->m_len
= chk
->send_size
;
8796 chk
->sent
= SCTP_DATAGRAM_UNSENT
;
8799 chk
->whoTo
->ref_count
++;
8800 cwr
= mtod(chk
->data
, struct sctp_cwr_chunk
*);
8801 cwr
->ch
.chunk_type
= SCTP_ECN_CWR
;
8802 cwr
->ch
.chunk_flags
= 0;
8803 cwr
->ch
.chunk_length
= htons(sizeof(struct sctp_cwr_chunk
));
8804 cwr
->tsn
= htonl(high_tsn
);
8805 TAILQ_INSERT_TAIL(&stcb
->asoc
.control_send_queue
, chk
, sctp_next
);
8806 asoc
->ctrl_queue_cnt
++;
8809 sctp_reset_the_streams(struct sctp_tcb
*stcb
,
8810 struct sctp_stream_reset_request
*req
, int number_entries
, uint16_t *list
)
8814 if (req
->reset_flags
& SCTP_RESET_ALL
) {
8815 for (i
=0; i
<stcb
->asoc
.streamoutcnt
; i
++) {
8816 stcb
->asoc
.strmout
[i
].next_sequence_sent
= 0;
8818 } else if (number_entries
) {
8819 for (i
=0; i
<number_entries
; i
++) {
8820 if (list
[i
] >= stcb
->asoc
.streamoutcnt
) {
8821 /* no such stream */
8824 stcb
->asoc
.strmout
[(list
[i
])].next_sequence_sent
= 0;
8827 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_SEND
, stcb
, number_entries
, (void *)list
);
8831 sctp_send_str_reset_ack(struct sctp_tcb
*stcb
,
8832 struct sctp_stream_reset_request
*req
)
8834 struct sctp_association
*asoc
;
8835 struct sctp_stream_reset_resp
*strack
;
8836 struct sctp_tmit_chunk
*chk
;
8838 int number_entries
, i
;
8839 uint8_t two_way
=0, not_peer
=0;
8840 uint16_t *list
=NULL
;
8843 if (req
->reset_flags
& SCTP_RESET_ALL
)
8846 number_entries
= (ntohs(req
->ph
.param_length
) - sizeof(struct sctp_stream_reset_request
)) / sizeof(uint16_t);
8848 chk
= (struct sctp_tmit_chunk
*)SCTP_ZONE_GET(sctppcbinfo
.ipi_zone_chunk
);
8852 sctppcbinfo
.ipi_count_chunk
++;
8853 sctppcbinfo
.ipi_gencnt_chunk
++;
8854 chk
->rec
.chunk_id
= SCTP_STREAM_RESET
;
8855 chk
->asoc
= &stcb
->asoc
;
8856 chk
->send_size
= sizeof(struct sctp_stream_reset_resp
) + (number_entries
* sizeof(uint16_t));
8857 MGETHDR(chk
->data
, MB_DONTWAIT
, MT_DATA
);
8858 if (chk
->data
== NULL
) {
8860 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, chk
);
8861 sctppcbinfo
.ipi_count_chunk
--;
8862 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
8863 panic("Chunk count is negative");
8865 sctppcbinfo
.ipi_gencnt_chunk
++;
8868 chk
->data
->m_data
+= SCTP_MIN_OVERHEAD
;
8869 chk
->data
->m_pkthdr
.len
= chk
->data
->m_len
= SCTP_SIZE32(chk
->send_size
);
8870 if (M_TRAILINGSPACE(chk
->data
) < (int)SCTP_SIZE32(chk
->send_size
)) {
8871 MCLGET(chk
->data
, MB_DONTWAIT
);
8872 if ((chk
->data
->m_flags
& M_EXT
) == 0) {
8874 sctp_m_freem(chk
->data
);
8876 goto strresp_jump_out
;
8878 chk
->data
->m_data
+= SCTP_MIN_OVERHEAD
;
8880 if (M_TRAILINGSPACE(chk
->data
) < (int)SCTP_SIZE32(chk
->send_size
)) {
8881 /* can't do it, no room */
8883 sctp_m_freem(chk
->data
);
8885 goto strresp_jump_out
;
8888 chk
->sent
= SCTP_DATAGRAM_UNSENT
;
8890 chk
->whoTo
= asoc
->primary_destination
;
8891 chk
->whoTo
->ref_count
++;
8892 strack
= mtod(chk
->data
, struct sctp_stream_reset_resp
*);
8894 strack
->ch
.chunk_type
= SCTP_STREAM_RESET
;
8895 strack
->ch
.chunk_flags
= 0;
8896 strack
->ch
.chunk_length
= htons(chk
->send_size
);
8898 memset(strack
->sr_resp
.reset_pad
, 0, sizeof(strack
->sr_resp
.reset_pad
));
8900 strack
->sr_resp
.ph
.param_type
= ntohs(SCTP_STR_RESET_RESPONSE
);
8901 strack
->sr_resp
.ph
.param_length
= htons((chk
->send_size
- sizeof(struct sctp_chunkhdr
)));
8905 if (chk
->send_size
% 4) {
8906 /* need a padding for the end */
8909 end
= (uint8_t *)((caddr_t
)strack
+ chk
->send_size
);
8910 pad
= chk
->send_size
% 4;
8911 for (i
= 0; i
< pad
; i
++) {
8914 chk
->send_size
+= pad
;
8917 /* actual response */
8918 if (req
->reset_flags
& SCTP_RESET_YOUR
) {
8919 strack
->sr_resp
.reset_flags
= SCTP_RESET_PERFORMED
;
8921 strack
->sr_resp
.reset_flags
= 0;
8924 /* copied from reset request */
8925 strack
->sr_resp
.reset_req_seq_resp
= req
->reset_req_seq
;
8926 seq
= ntohl(req
->reset_req_seq
);
8928 list
= req
->list_of_streams
;
8929 /* copy the un-converted network byte order streams */
8930 for (i
=0; i
<number_entries
; i
++) {
8931 strack
->sr_resp
.list_of_streams
[i
] = list
[i
];
8933 if (asoc
->str_reset_seq_in
== seq
) {
8934 /* is it the next expected? */
8935 asoc
->str_reset_seq_in
++;
8936 strack
->sr_resp
.reset_at_tsn
= htonl(asoc
->sending_seq
);
8937 asoc
->str_reset_sending_seq
= asoc
->sending_seq
;
8938 if (number_entries
) {
8941 /* convert them to host byte order */
8942 for (i
=0 ; i
<number_entries
; i
++) {
8943 temp
= ntohs(list
[i
]);
8947 if (req
->reset_flags
& SCTP_RESET_YOUR
) {
8948 /* reset my outbound streams */
8949 sctp_reset_the_streams(stcb
, req
, number_entries
, list
);
8951 if (req
->reset_flags
& SCTP_RECIPRICAL
) {
8952 /* reset peer too */
8953 sctp_send_str_reset_req(stcb
, number_entries
, list
, two_way
, not_peer
);
8957 /* no its a retran so I must just ack and do nothing */
8958 strack
->sr_resp
.reset_at_tsn
= htonl(asoc
->str_reset_sending_seq
);
8960 strack
->sr_resp
.cumulative_tsn
= htonl(asoc
->cumulative_tsn
);
8961 TAILQ_INSERT_TAIL(&asoc
->control_send_queue
,
8964 asoc
->ctrl_queue_cnt
++;
8969 sctp_send_str_reset_req(struct sctp_tcb
*stcb
,
8970 int number_entrys
, uint16_t *list
, uint8_t two_way
, uint8_t not_peer
)
8972 /* Send a stream reset request. The number_entrys may be 0 and list NULL
8973 * if the request is to reset all streams. If two_way is true then we
8974 * not only request a RESET of the received streams but we also
8975 * request the peer to send a reset req to us too.
8976 * Flag combinations in table:
8978 * two_way | not_peer | = | Flags
8979 * ------------------------------
8980 * 0 | 0 | = | SCTP_RESET_YOUR (just the peer)
8981 * 1 | 0 | = | SCTP_RESET_YOUR | SCTP_RECIPRICAL (both sides)
8982 * 0 | 1 | = | Not a Valid Request (not anyone)
8983 * 1 | 1 | = | SCTP_RESET_RECIPRICAL (Just local host)
8985 struct sctp_association
*asoc
;
8986 struct sctp_stream_reset_req
*strreq
;
8987 struct sctp_tmit_chunk
*chk
;
8991 if (asoc
->stream_reset_outstanding
) {
8992 /* Already one pending, must get ACK back
8993 * to clear the flag.
8998 if ((two_way
== 0) && (not_peer
== 1)) {
8999 /* not a valid request */
9003 chk
= (struct sctp_tmit_chunk
*)SCTP_ZONE_GET(sctppcbinfo
.ipi_zone_chunk
);
9007 sctppcbinfo
.ipi_count_chunk
++;
9008 sctppcbinfo
.ipi_gencnt_chunk
++;
9009 chk
->rec
.chunk_id
= SCTP_STREAM_RESET
;
9010 chk
->asoc
= &stcb
->asoc
;
9011 chk
->send_size
= sizeof(struct sctp_stream_reset_req
) + (number_entrys
* sizeof(uint16_t));
9012 MGETHDR(chk
->data
, MB_DONTWAIT
, MT_DATA
);
9013 if (chk
->data
== NULL
) {
9015 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, chk
);
9016 sctppcbinfo
.ipi_count_chunk
--;
9017 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
9018 panic("Chunk count is negative");
9020 sctppcbinfo
.ipi_gencnt_chunk
++;
9023 chk
->data
->m_data
+= SCTP_MIN_OVERHEAD
;
9024 chk
->data
->m_pkthdr
.len
= chk
->data
->m_len
= SCTP_SIZE32(chk
->send_size
);
9025 if (M_TRAILINGSPACE(chk
->data
) < (int)SCTP_SIZE32(chk
->send_size
)) {
9026 MCLGET(chk
->data
, MB_DONTWAIT
);
9027 if ((chk
->data
->m_flags
& M_EXT
) == 0) {
9029 sctp_m_freem(chk
->data
);
9031 goto strreq_jump_out
;
9033 chk
->data
->m_data
+= SCTP_MIN_OVERHEAD
;
9035 if (M_TRAILINGSPACE(chk
->data
) < (int)SCTP_SIZE32(chk
->send_size
)) {
9036 /* can't do it, no room */
9038 sctp_m_freem(chk
->data
);
9040 goto strreq_jump_out
;
9042 chk
->sent
= SCTP_DATAGRAM_UNSENT
;
9044 chk
->whoTo
= asoc
->primary_destination
;
9045 chk
->whoTo
->ref_count
++;
9047 strreq
= mtod(chk
->data
, struct sctp_stream_reset_req
*);
9048 strreq
->ch
.chunk_type
= SCTP_STREAM_RESET
;
9049 strreq
->ch
.chunk_flags
= 0;
9050 strreq
->ch
.chunk_length
= htons(chk
->send_size
);
9052 strreq
->sr_req
.ph
.param_type
= ntohs(SCTP_STR_RESET_REQUEST
);
9053 strreq
->sr_req
.ph
.param_length
= htons((chk
->send_size
- sizeof(struct sctp_chunkhdr
)));
9055 if (chk
->send_size
% 4) {
9056 /* need a padding for the end */
9059 end
= (uint8_t *)((caddr_t
)strreq
+ chk
->send_size
);
9060 pad
= chk
->send_size
% 4;
9061 for (i
=0; i
<pad
; i
++) {
9064 chk
->send_size
+= pad
;
9067 strreq
->sr_req
.reset_flags
= 0;
9068 if (number_entrys
== 0) {
9069 strreq
->sr_req
.reset_flags
|= SCTP_RESET_ALL
;
9072 strreq
->sr_req
.reset_flags
|= SCTP_RESET_YOUR
;
9074 if (not_peer
== 0) {
9075 strreq
->sr_req
.reset_flags
|= SCTP_RECIPRICAL
| SCTP_RESET_YOUR
;
9077 strreq
->sr_req
.reset_flags
|= SCTP_RECIPRICAL
;
9080 memset(strreq
->sr_req
.reset_pad
, 0, sizeof(strreq
->sr_req
.reset_pad
));
9081 strreq
->sr_req
.reset_req_seq
= htonl(asoc
->str_reset_seq_out
);
9082 if (number_entrys
) {
9083 /* populate the specific entry's */
9085 for (i
=0; i
< number_entrys
; i
++) {
9086 strreq
->sr_req
.list_of_streams
[i
] = htons(list
[i
]);
9089 TAILQ_INSERT_TAIL(&asoc
->control_send_queue
,
9092 asoc
->ctrl_queue_cnt
++;
9093 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET
, stcb
->sctp_ep
, stcb
, chk
->whoTo
);
9094 asoc
->stream_reset_outstanding
= 1;
9098 sctp_send_abort(struct mbuf
*m
, int iphlen
, struct sctphdr
*sh
, uint32_t vtag
,
9099 struct mbuf
*err_cause
)
9102 * Formulate the abort message, and send it back down.
9105 struct sctp_abort_msg
*abm
;
9106 struct ip
*iph
, *iph_out
;
9107 struct ip6_hdr
*ip6
, *ip6_out
;
9110 /* don't respond to ABORT with ABORT */
9111 if (sctp_is_there_an_abort_here(m
, iphlen
, &vtag
)) {
9113 sctp_m_freem(err_cause
);
9116 MGETHDR(mout
, MB_DONTWAIT
, MT_HEADER
);
9119 sctp_m_freem(err_cause
);
9122 iph
= mtod(m
, struct ip
*);
9125 if (iph
->ip_v
== IPVERSION
) {
9126 iph_out
= mtod(mout
, struct ip
*);
9127 mout
->m_len
= sizeof(*iph_out
) + sizeof(*abm
);
9128 mout
->m_next
= err_cause
;
9130 /* Fill in the IP header for the ABORT */
9131 iph_out
->ip_v
= IPVERSION
;
9132 iph_out
->ip_hl
= (sizeof(struct ip
) / 4);
9133 iph_out
->ip_tos
= (u_char
)0;
9135 iph_out
->ip_off
= 0;
9136 iph_out
->ip_ttl
= MAXTTL
;
9137 iph_out
->ip_p
= IPPROTO_SCTP
;
9138 iph_out
->ip_src
.s_addr
= iph
->ip_dst
.s_addr
;
9139 iph_out
->ip_dst
.s_addr
= iph
->ip_src
.s_addr
;
9140 /* let IP layer calculate this */
9141 iph_out
->ip_sum
= 0;
9143 iphlen_out
= sizeof(*iph_out
);
9144 abm
= (struct sctp_abort_msg
*)((caddr_t
)iph_out
+ iphlen_out
);
9145 } else if (iph
->ip_v
== (IPV6_VERSION
>> 4)) {
9146 ip6
= (struct ip6_hdr
*)iph
;
9147 ip6_out
= mtod(mout
, struct ip6_hdr
*);
9148 mout
->m_len
= sizeof(*ip6_out
) + sizeof(*abm
);
9149 mout
->m_next
= err_cause
;
9151 /* Fill in the IP6 header for the ABORT */
9152 ip6_out
->ip6_flow
= ip6
->ip6_flow
;
9153 ip6_out
->ip6_hlim
= ip6_defhlim
;
9154 ip6_out
->ip6_nxt
= IPPROTO_SCTP
;
9155 ip6_out
->ip6_src
= ip6
->ip6_dst
;
9156 ip6_out
->ip6_dst
= ip6
->ip6_src
;
9158 iphlen_out
= sizeof(*ip6_out
);
9159 abm
= (struct sctp_abort_msg
*)((caddr_t
)ip6_out
+ iphlen_out
);
9161 /* Currently not supported */
9165 abm
->sh
.src_port
= sh
->dest_port
;
9166 abm
->sh
.dest_port
= sh
->src_port
;
9167 abm
->sh
.checksum
= 0;
9169 abm
->sh
.v_tag
= sh
->v_tag
;
9170 abm
->msg
.ch
.chunk_flags
= SCTP_HAD_NO_TCB
;
9172 abm
->sh
.v_tag
= htonl(vtag
);
9173 abm
->msg
.ch
.chunk_flags
= 0;
9175 abm
->msg
.ch
.chunk_type
= SCTP_ABORT_ASSOCIATION
;
9178 struct mbuf
*m_tmp
= err_cause
;
9180 /* get length of the err_cause chain */
9181 while (m_tmp
!= NULL
) {
9182 err_len
+= m_tmp
->m_len
;
9183 m_tmp
= m_tmp
->m_next
;
9185 mout
->m_pkthdr
.len
= mout
->m_len
+ err_len
;
9187 /* need pad at end of chunk */
9190 padlen
= 4 - (mout
->m_pkthdr
.len
% 4);
9191 m_copyback(mout
, mout
->m_pkthdr
.len
, padlen
, (caddr_t
)&cpthis
);
9193 abm
->msg
.ch
.chunk_length
= htons(sizeof(abm
->msg
.ch
) + err_len
);
9195 mout
->m_pkthdr
.len
= mout
->m_len
;
9196 abm
->msg
.ch
.chunk_length
= htons(sizeof(abm
->msg
.ch
));
9200 if ((sctp_no_csum_on_loopback
) &&
9201 (m
->m_pkthdr
.rcvif
) &&
9202 (m
->m_pkthdr
.rcvif
->if_type
== IFT_LOOP
)) {
9203 abm
->sh
.checksum
= 0;
9205 abm
->sh
.checksum
= sctp_calculate_sum(mout
, NULL
, iphlen_out
);
9208 /* zap the rcvif, it should be null */
9209 mout
->m_pkthdr
.rcvif
= 0;
9210 if (iph_out
!= NULL
) {
9213 /* zap the stack pointer to the route */
9214 bzero(&ro
, sizeof ro
);
9216 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT2
) {
9217 kprintf("sctp_send_abort calling ip_output:\n");
9218 sctp_print_address_pkt(iph_out
, &abm
->sh
);
9221 /* set IPv4 length */
9222 #if defined(__FreeBSD__)
9223 iph_out
->ip_len
= mout
->m_pkthdr
.len
;
9225 iph_out
->ip_len
= htons(mout
->m_pkthdr
.len
);
9228 ip_output(mout
, 0, &ro
, IP_RAWOUTPUT
, NULL
9229 #if defined(__OpenBSD__) || (defined(__FreeBSD__) && __FreeBSD_version >= 480000) \
9230 || defined(__NetBSD__) || defined(__DragonFly__)
9234 /* Free the route if we got one back */
9237 } else if (ip6_out
!= NULL
) {
9238 #ifdef NEW_STRUCT_ROUTE
9241 struct route_in6 ro
;
9244 /* zap the stack pointer to the route */
9245 bzero(&ro
, sizeof(ro
));
9247 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT2
) {
9248 kprintf("sctp_send_abort calling ip6_output:\n");
9249 sctp_print_address_pkt((struct ip
*)ip6_out
, &abm
->sh
);
9252 ip6_output(mout
, NULL
, &ro
, 0, NULL
, NULL
9253 #if defined(__NetBSD__)
9256 #if (defined(__FreeBSD__) && __FreeBSD_version >= 480000) || defined(__DragonFly__)
9260 /* Free the route if we got one back */
9264 sctp_pegs
[SCTP_DATAGRAMS_SENT
]++;
9268 sctp_send_operr_to(struct mbuf
*m
, int iphlen
,
9272 struct sctphdr
*ihdr
;
9274 struct sctphdr
*ohdr
;
9275 struct sctp_chunkhdr
*ophdr
;
9279 struct sockaddr_in6 lsa6
, fsa6
;
9282 iph
= mtod(m
, struct ip
*);
9283 ihdr
= (struct sctphdr
*)((caddr_t
)iph
+ iphlen
);
9284 if (!(scm
->m_flags
& M_PKTHDR
)) {
9285 /* must be a pkthdr */
9286 kprintf("Huh, not a packet header in send_operr\n");
9290 M_PREPEND(scm
, (sizeof(struct sctphdr
) + sizeof(struct sctp_chunkhdr
)), MB_DONTWAIT
);
9292 /* can't send because we can't add a mbuf */
9295 ohdr
= mtod(scm
, struct sctphdr
*);
9296 ohdr
->src_port
= ihdr
->dest_port
;
9297 ohdr
->dest_port
= ihdr
->src_port
;
9300 ophdr
= (struct sctp_chunkhdr
*)(ohdr
+ 1);
9301 ophdr
->chunk_type
= SCTP_OPERATION_ERROR
;
9302 ophdr
->chunk_flags
= 0;
9303 ophdr
->chunk_length
= htons(scm
->m_pkthdr
.len
- sizeof(struct sctphdr
));
9304 if (scm
->m_pkthdr
.len
% 4) {
9308 padlen
= 4 - (scm
->m_pkthdr
.len
% 4);
9309 m_copyback(scm
, scm
->m_pkthdr
.len
, padlen
, (caddr_t
)&cpthis
);
9311 if ((sctp_no_csum_on_loopback
) &&
9312 (m
->m_pkthdr
.rcvif
) &&
9313 (m
->m_pkthdr
.rcvif
->if_type
== IFT_LOOP
)) {
9316 val
= sctp_calculate_sum(scm
, NULL
, 0);
9318 ohdr
->checksum
= val
;
9319 if (iph
->ip_v
== IPVERSION
) {
9323 M_PREPEND(scm
, sizeof(struct ip
), MB_DONTWAIT
);
9326 bzero(&ro
, sizeof ro
);
9327 out
= mtod(scm
, struct ip
*);
9328 out
->ip_v
= iph
->ip_v
;
9329 out
->ip_hl
= (sizeof(struct ip
)/4);
9330 out
->ip_tos
= iph
->ip_tos
;
9331 out
->ip_id
= iph
->ip_id
;
9333 out
->ip_ttl
= MAXTTL
;
9334 out
->ip_p
= IPPROTO_SCTP
;
9336 out
->ip_src
= iph
->ip_dst
;
9337 out
->ip_dst
= iph
->ip_src
;
9338 #if defined(__FreeBSD__)
9339 out
->ip_len
= scm
->m_pkthdr
.len
;
9341 out
->ip_len
= htons(scm
->m_pkthdr
.len
);
9343 retcode
= ip_output(scm
, 0, &ro
, IP_RAWOUTPUT
, NULL
9344 #if defined(__OpenBSD__) || (defined(__FreeBSD__) && __FreeBSD_version >= 480000) \
9345 || defined(__NetBSD__) || defined(__DragonFly__)
9349 sctp_pegs
[SCTP_DATAGRAMS_SENT
]++;
9350 /* Free the route if we got one back */
9355 #ifdef NEW_STRUCT_ROUTE
9358 struct route_in6 ro
;
9360 struct ip6_hdr
*out6
, *in6
;
9362 M_PREPEND(scm
, sizeof(struct ip6_hdr
), MB_DONTWAIT
);
9365 bzero(&ro
, sizeof ro
);
9366 in6
= mtod(m
, struct ip6_hdr
*);
9367 out6
= mtod(scm
, struct ip6_hdr
*);
9368 out6
->ip6_flow
= in6
->ip6_flow
;
9369 out6
->ip6_hlim
= ip6_defhlim
;
9370 out6
->ip6_nxt
= IPPROTO_SCTP
;
9371 out6
->ip6_src
= in6
->ip6_dst
;
9372 out6
->ip6_dst
= in6
->ip6_src
;
9375 bzero(&lsa6
, sizeof(lsa6
));
9376 lsa6
.sin6_len
= sizeof(lsa6
);
9377 lsa6
.sin6_family
= AF_INET6
;
9378 lsa6
.sin6_addr
= out6
->ip6_src
;
9379 bzero(&fsa6
, sizeof(fsa6
));
9380 fsa6
.sin6_len
= sizeof(fsa6
);
9381 fsa6
.sin6_family
= AF_INET6
;
9382 fsa6
.sin6_addr
= out6
->ip6_dst
;
9383 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT2
) {
9384 kprintf("sctp_operr_to calling ipv6 output:\n");
9386 sctp_print_address((struct sockaddr
*)&lsa6
);
9388 sctp_print_address((struct sockaddr
*)&fsa6
);
9390 #endif /* SCTP_DEBUG */
9391 ip6_output(scm
, NULL
, &ro
, 0, NULL
, NULL
9392 #if defined(__NetBSD__)
9395 #if (defined(__FreeBSD__) && __FreeBSD_version >= 480000) || defined(__DragonFly__)
9399 sctp_pegs
[SCTP_DATAGRAMS_SENT
]++;
9400 /* Free the route if we got one back */
9407 sctp_copy_one(struct mbuf
*m
, struct uio
*uio
, int cpsz
, int resv_upfront
, int *mbcnt
)
9409 int left
, cancpy
, willcpy
, error
;
9418 if ((left
+resv_upfront
) > (int)MHLEN
) {
9424 if ((m
->m_flags
& M_EXT
) == 0) {
9428 *mbcnt
+= m
->m_ext
.ext_size
;
9431 cancpy
= M_TRAILINGSPACE(m
);
9432 willcpy
= min(cancpy
, left
);
9433 if ((willcpy
+ resv_upfront
) > cancpy
) {
9434 willcpy
-= resv_upfront
;
9437 /* Align data to the end */
9438 if ((m
->m_flags
& M_EXT
) == 0) {
9439 if (m
->m_flags
& M_PKTHDR
) {
9440 MH_ALIGN(m
, willcpy
);
9442 M_ALIGN(m
, willcpy
);
9445 MC_ALIGN(m
, willcpy
);
9447 error
= uiomove(mtod(m
, caddr_t
), willcpy
, uio
);
9455 MGET(m
->m_next
, MB_WAIT
, MT_DATA
);
9456 if (m
->m_next
== NULL
) {
9463 if (left
> (int)MHLEN
) {
9469 if ((m
->m_flags
& M_EXT
) == 0) {
9473 *mbcnt
+= m
->m_ext
.ext_size
;
9475 cancpy
= M_TRAILINGSPACE(m
);
9476 willcpy
= min(cancpy
, left
);
9483 sctp_copy_it_in(struct sctp_inpcb
*inp
,
9484 struct sctp_tcb
*stcb
,
9485 struct sctp_association
*asoc
,
9486 struct sctp_nets
*net
,
9487 struct sctp_sndrcvinfo
*srcv
,
9491 /* This routine must be very careful in
9492 * its work. Protocol processing is
9493 * up and running so care must be taken to
9494 * spl...() when you need to do something
9495 * that may effect the stcb/asoc. The sb is
9496 * locked however. When data is copied the
9497 * protocol processing should be enabled since
9498 * this is a slower operation...
9502 int frag_size
, mbcnt
= 0, mbcnt_e
= 0;
9503 unsigned int sndlen
;
9504 unsigned int tot_demand
;
9505 int tot_out
, dataout
;
9506 struct sctp_tmit_chunk
*chk
;
9508 struct sctp_stream_out
*strq
;
9513 so
= stcb
->sctp_socket
;
9517 sndlen
= uio
->uio_resid
;
9518 /* lock the socket buf */
9519 SOCKBUF_LOCK(&so
->so_snd
);
9520 error
= ssb_lock(&so
->so_snd
, SBLOCKWAIT(flags
));
9524 /* will it ever fit ? */
9525 if (sndlen
> so
->so_snd
.ssb_hiwat
) {
9526 /* It will NEVER fit */
9531 /* Do I need to block? */
9532 if ((so
->so_snd
.ssb_hiwat
<
9533 (sndlen
+ asoc
->total_output_queue_size
)) ||
9534 (asoc
->chunks_on_out_queue
> sctp_max_chunks_on_queue
) ||
9535 (asoc
->total_output_mbuf_queue_size
>
9536 so
->so_snd
.ssb_mbmax
)
9538 /* prune any prsctp bufs out */
9539 if (asoc
->peer_supports_prsctp
) {
9540 sctp_prune_prsctp(stcb
, asoc
, srcv
, sndlen
);
9543 * We store off a pointer to the endpoint.
9544 * Since on return from this we must check to
9545 * see if an so_error is set. If so we may have
9546 * been reset and our stcb destroyed. Returning
9547 * an error will flow back to the user...
9549 while ((so
->so_snd
.ssb_hiwat
<
9550 (sndlen
+ asoc
->total_output_queue_size
)) ||
9551 (asoc
->chunks_on_out_queue
>
9552 sctp_max_chunks_on_queue
) ||
9553 (asoc
->total_output_mbuf_queue_size
>
9554 so
->so_snd
.ssb_mbmax
)
9556 if (flags
& (MSG_FNONBLOCKING
|MSG_DONTWAIT
)) {
9557 /* Non-blocking io in place */
9558 error
= EWOULDBLOCK
;
9561 inp
->sctp_tcb_at_block
= (void *)stcb
;
9562 inp
->error_on_block
= 0;
9563 #ifdef SCTP_BLK_LOGGING
9564 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK
,
9567 ssb_unlock(&so
->so_snd
);
9568 SCTP_TCB_UNLOCK(stcb
);
9569 error
= ssb_wait(&so
->so_snd
);
9570 SCTP_INP_RLOCK(inp
);
9571 if ((inp
->sctp_flags
& SCTP_PCB_FLAGS_SOCKET_GONE
) ||
9572 (inp
->sctp_flags
& SCTP_PCB_FLAGS_SOCKET_GONE
)) {
9573 /* Should I really unlock ? */
9574 SCTP_INP_RUNLOCK(inp
);
9578 SCTP_TCB_LOCK(stcb
);
9579 SCTP_INP_RUNLOCK(inp
);
9581 inp
->sctp_tcb_at_block
= 0;
9582 #ifdef SCTP_BLK_LOGGING
9583 sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK
,
9586 if (inp
->error_on_block
) {
9588 * if our asoc was killed, the free code
9589 * (in sctp_pcb.c) will save a error in
9592 error
= inp
->error_on_block
;
9600 /* did we encounter a socket error? */
9602 error
= so
->so_error
;
9606 error
= ssb_lock(&so
->so_snd
, M_WAITOK
);
9608 /* Can't aquire the lock */
9612 #if defined(__FreeBSD__) && __FreeBSD_version >= 502115
9613 if (so
->so_rcv
.sb_state
& SBS_CANTSENDMORE
) {
9615 if (so
->so_state
& SS_CANTSENDMORE
) {
9617 /* The socket is now set not to sendmore.. its gone */
9623 error
= so
->so_error
;
9627 if (asoc
->peer_supports_prsctp
) {
9628 sctp_prune_prsctp(stcb
, asoc
, srcv
, sndlen
);
9632 dataout
= tot_out
= uio
->uio_resid
;
9633 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_BOUND_V6
) {
9634 resv_in_first
= SCTP_MED_OVERHEAD
;
9636 resv_in_first
= SCTP_MED_V4_OVERHEAD
;
9639 /* Are we aborting? */
9640 if (srcv
->sinfo_flags
& MSG_ABORT
) {
9641 if ((SCTP_GET_STATE(asoc
) != SCTP_STATE_COOKIE_WAIT
) &&
9642 (SCTP_GET_STATE(asoc
) != SCTP_STATE_COOKIE_ECHOED
)) {
9643 /* It has to be up before we abort */
9644 /* how big is the user initiated abort? */
9646 /* I wonder about doing a MGET without a splnet set.
9647 * it is done that way in the sosend code so I guess
9650 MGETHDR(mm
, MB_WAIT
, MT_DATA
);
9652 struct sctp_paramhdr
*ph
;
9654 tot_demand
= (tot_out
+ sizeof(struct sctp_paramhdr
));
9655 if (tot_demand
> MHLEN
) {
9656 if (tot_demand
> MCLBYTES
) {
9657 /* truncate user data */
9658 tot_demand
= MCLBYTES
;
9659 tot_out
= tot_demand
- sizeof(struct sctp_paramhdr
);
9661 MCLGET(mm
, MB_WAIT
);
9662 if ((mm
->m_flags
& M_EXT
) == 0) {
9663 /* truncate further */
9665 tot_out
= tot_demand
- sizeof(struct sctp_paramhdr
);
9668 /* now move forward the data pointer */
9669 ph
= mtod(mm
, struct sctp_paramhdr
*);
9670 ph
->param_type
= htons(SCTP_CAUSE_USER_INITIATED_ABT
);
9671 ph
->param_length
= htons((sizeof(struct sctp_paramhdr
) + tot_out
));
9673 mm
->m_pkthdr
.len
= tot_out
+ sizeof(struct sctp_paramhdr
);
9674 mm
->m_len
= mm
->m_pkthdr
.len
;
9675 error
= uiomove((caddr_t
)ph
, (int)tot_out
, uio
);
9678 * Here if we can't get his data we
9679 * still abort we just don't get to
9680 * send the users note :-0
9686 ssb_unlock(&so
->so_snd
);
9687 SOCKBUF_UNLOCK(&so
->so_snd
);
9688 sctp_abort_an_association(stcb
->sctp_ep
, stcb
,
9689 SCTP_RESPONSE_TO_USER_REQ
,
9699 /* Now can we send this? */
9700 if ((SCTP_GET_STATE(asoc
) == SCTP_STATE_SHUTDOWN_SENT
) ||
9701 (SCTP_GET_STATE(asoc
) == SCTP_STATE_SHUTDOWN_ACK_SENT
) ||
9702 (SCTP_GET_STATE(asoc
) == SCTP_STATE_SHUTDOWN_RECEIVED
) ||
9703 (asoc
->state
& SCTP_STATE_SHUTDOWN_PENDING
)) {
9704 /* got data while shutting down */
9709 /* Is the stream no. valid? */
9710 if (srcv
->sinfo_stream
>= asoc
->streamoutcnt
) {
9711 /* Invalid stream number */
9716 if (asoc
->strmout
== NULL
) {
9717 /* huh? software error */
9719 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
9720 kprintf("software error in sctp_copy_it_in\n");
9727 if ((srcv
->sinfo_flags
& MSG_EOF
) &&
9728 (stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_UDPTYPE
) &&
9739 /* save off the tag */
9740 my_vtag
= asoc
->my_vtag
;
9741 strq
= &asoc
->strmout
[srcv
->sinfo_stream
];
9742 /* First lets figure out the "chunking" point */
9743 frag_size
= sctp_get_frag_point(stcb
, asoc
);
9745 /* two choices here, it all fits in one chunk or
9746 * we need multiple chunks.
9749 SOCKBUF_UNLOCK(&so
->so_snd
);
9750 if (tot_out
<= frag_size
) {
9751 /* no need to setup a template */
9752 chk
= (struct sctp_tmit_chunk
*)SCTP_ZONE_GET(sctppcbinfo
.ipi_zone_chunk
);
9755 SOCKBUF_LOCK(&so
->so_snd
);
9758 sctppcbinfo
.ipi_count_chunk
++;
9759 sctppcbinfo
.ipi_gencnt_chunk
++;
9760 asoc
->chunks_on_out_queue
++;
9761 MGETHDR(mm
, MB_WAIT
, MT_DATA
);
9766 error
= sctp_copy_one(mm
, uio
, tot_out
, resv_in_first
, &mbcnt_e
);
9769 sctp_prepare_chunk(chk
, stcb
, srcv
, strq
, net
);
9770 chk
->mbcnt
= mbcnt_e
;
9773 mm
->m_pkthdr
.len
= tot_out
;
9777 /* the actual chunk flags */
9778 chk
->rec
.data
.rcv_flags
|= SCTP_DATA_NOT_FRAG
;
9779 chk
->whoTo
->ref_count
++;
9781 /* fix up the send_size if it is not present */
9782 chk
->send_size
= tot_out
;
9783 chk
->book_size
= chk
->send_size
;
9784 /* ok, we are commited */
9785 if ((srcv
->sinfo_flags
& MSG_UNORDERED
) == 0) {
9786 /* bump the ssn if we are unordered. */
9787 strq
->next_sequence_sent
++;
9789 if (chk
->flags
& SCTP_PR_SCTP_BUFFER
) {
9790 asoc
->sent_queue_cnt_removeable
++;
9793 if ((asoc
->state
== 0) ||
9794 (my_vtag
!= asoc
->my_vtag
) ||
9795 (so
!= inp
->sctp_socket
) ||
9796 (inp
->sctp_socket
== 0)) {
9797 /* connection was aborted */
9802 asoc
->stream_queue_cnt
++;
9803 TAILQ_INSERT_TAIL(&strq
->outqueue
, chk
, sctp_next
);
9804 /* now check if this stream is on the wheel */
9805 if ((strq
->next_spoke
.tqe_next
== NULL
) &&
9806 (strq
->next_spoke
.tqe_prev
== NULL
)) {
9807 /* Insert it on the wheel since it is not
9810 sctp_insert_on_wheel(asoc
, strq
);
9815 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, chk
);
9816 sctppcbinfo
.ipi_count_chunk
--;
9817 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
9818 panic("Chunk count is negative");
9820 SOCKBUF_LOCK(&so
->so_snd
);
9824 /* we need to setup a template */
9825 struct sctp_tmit_chunk
template;
9826 struct sctpchunk_listhead tmp
;
9828 /* setup the template */
9829 sctp_prepare_chunk(&template, stcb
, srcv
, strq
, net
);
9831 /* Prepare the temp list */
9834 /* Template is complete, now time for the work */
9835 while (tot_out
> 0) {
9837 chk
= (struct sctp_tmit_chunk
*)SCTP_ZONE_GET(sctppcbinfo
.ipi_zone_chunk
);
9840 * ok we must spin through and dump anything
9841 * we have allocated and then jump to the
9846 sctppcbinfo
.ipi_count_chunk
++;
9847 asoc
->chunks_on_out_queue
++;
9849 sctppcbinfo
.ipi_gencnt_chunk
++;
9851 chk
->whoTo
->ref_count
++;
9852 MGETHDR(chk
->data
, MB_WAIT
, MT_DATA
);
9853 if (chk
->data
== NULL
) {
9857 tot_demand
= min(tot_out
, frag_size
);
9858 error
= sctp_copy_one(chk
->data
, uio
, tot_demand
, resv_in_first
, &mbcnt_e
);
9861 /* now fix the chk->send_size */
9862 chk
->mbcnt
= mbcnt_e
;
9865 chk
->send_size
= tot_demand
;
9866 chk
->data
->m_pkthdr
.len
= tot_demand
;
9867 chk
->book_size
= chk
->send_size
;
9868 if (chk
->flags
& SCTP_PR_SCTP_BUFFER
) {
9869 asoc
->sent_queue_cnt_removeable
++;
9871 TAILQ_INSERT_TAIL(&tmp
, chk
, sctp_next
);
9872 tot_out
-= tot_demand
;
9874 /* Now the tmp list holds all chunks and data */
9875 if ((srcv
->sinfo_flags
& MSG_UNORDERED
) == 0) {
9876 /* bump the ssn if we are unordered. */
9877 strq
->next_sequence_sent
++;
9879 /* Mark the first/last flags. This will
9880 * result int a 3 for a single item on the list
9882 chk
= TAILQ_FIRST(&tmp
);
9883 chk
->rec
.data
.rcv_flags
|= SCTP_DATA_FIRST_FRAG
;
9884 chk
= TAILQ_LAST(&tmp
, sctpchunk_listhead
);
9885 chk
->rec
.data
.rcv_flags
|= SCTP_DATA_LAST_FRAG
;
9887 /* now move it to the streams actual queue */
9888 /* first stop protocol processing */
9890 if ((asoc
->state
== 0) ||
9891 (my_vtag
!= asoc
->my_vtag
) ||
9892 (so
!= inp
->sctp_socket
) ||
9893 (inp
->sctp_socket
== 0)) {
9894 /* connection was aborted */
9899 chk
= TAILQ_FIRST(&tmp
);
9901 chk
->data
->m_nextpkt
= 0;
9902 TAILQ_REMOVE(&tmp
, chk
, sctp_next
);
9903 asoc
->stream_queue_cnt
++;
9904 TAILQ_INSERT_TAIL(&strq
->outqueue
, chk
, sctp_next
);
9905 chk
= TAILQ_FIRST(&tmp
);
9907 /* now check if this stream is on the wheel */
9908 if ((strq
->next_spoke
.tqe_next
== NULL
) &&
9909 (strq
->next_spoke
.tqe_prev
== NULL
)) {
9910 /* Insert it on the wheel since it is not
9913 sctp_insert_on_wheel(asoc
, strq
);
9915 /* Ok now we can allow pping */
9919 SOCKBUF_LOCK(&so
->so_snd
);
9920 chk
= TAILQ_FIRST(&tmp
);
9923 sctp_m_freem(chk
->data
);
9926 TAILQ_REMOVE(&tmp
, chk
, sctp_next
);
9927 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, chk
);
9928 sctppcbinfo
.ipi_count_chunk
--;
9929 asoc
->chunks_on_out_queue
--;
9930 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
9931 panic("Chunk count is negative");
9933 sctppcbinfo
.ipi_gencnt_chunk
++;
9934 chk
= TAILQ_FIRST(&tmp
);
9940 #ifdef SCTP_MBCNT_LOGGING
9941 sctp_log_mbcnt(SCTP_LOG_MBCNT_INCREASE
,
9942 asoc
->total_output_queue_size
,
9944 asoc
->total_output_mbuf_queue_size
,
9948 SOCKBUF_LOCK(&so
->so_snd
);
9949 asoc
->total_output_queue_size
+= dataout
;
9950 asoc
->total_output_mbuf_queue_size
+= mbcnt
;
9951 if ((stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_TCPTYPE
) ||
9952 (stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_IN_TCPPOOL
)) {
9953 so
->so_snd
.ssb_cc
+= dataout
;
9954 so
->so_snd
.ssb_mbcnt
+= mbcnt
;
9956 if ((srcv
->sinfo_flags
& MSG_EOF
) &&
9957 (stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_UDPTYPE
)
9959 int some_on_streamwheel
= 0;
9961 if (!TAILQ_EMPTY(&asoc
->out_wheel
)) {
9962 /* Check to see if some data queued */
9963 struct sctp_stream_out
*outs
;
9964 TAILQ_FOREACH(outs
, &asoc
->out_wheel
, next_spoke
) {
9965 if (!TAILQ_EMPTY(&outs
->outqueue
)) {
9966 some_on_streamwheel
= 1;
9971 if (TAILQ_EMPTY(&asoc
->send_queue
) &&
9972 TAILQ_EMPTY(&asoc
->sent_queue
) &&
9973 (some_on_streamwheel
== 0)) {
9974 /* there is nothing queued to send, so I'm done... */
9975 if ((SCTP_GET_STATE(asoc
) != SCTP_STATE_SHUTDOWN_SENT
) &&
9976 (SCTP_GET_STATE(asoc
) != SCTP_STATE_SHUTDOWN_ACK_SENT
)) {
9977 /* only send SHUTDOWN the first time through */
9979 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT4
) {
9980 kprintf("%s:%d sends a shutdown\n",
9986 sctp_send_shutdown(stcb
, stcb
->asoc
.primary_destination
);
9987 asoc
->state
= SCTP_STATE_SHUTDOWN_SENT
;
9988 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN
, stcb
->sctp_ep
, stcb
,
9989 asoc
->primary_destination
);
9990 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD
, stcb
->sctp_ep
, stcb
,
9991 asoc
->primary_destination
);
9995 * we still got (or just got) data to send, so set
9999 * XXX sockets draft says that MSG_EOF should be sent
10000 * with no data. currently, we will allow user data
10001 * to be sent first and move to SHUTDOWN-PENDING
10003 asoc
->state
|= SCTP_STATE_SHUTDOWN_PENDING
;
10008 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT2
) {
10009 kprintf("++total out:%d total_mbuf_out:%d\n",
10010 (int)asoc
->total_output_queue_size
,
10011 (int)asoc
->total_output_mbuf_queue_size
);
10016 ssb_unlock(&so
->so_snd
);
10018 SOCKBUF_UNLOCK(&so
->so_snd
);
10027 sctp_sosend(struct socket
*so
,
10029 struct mbuf
*addr_mbuf
,
10031 struct sockaddr
*addr
,
10035 struct mbuf
*control
,
10036 #if defined(__NetBSD__) || defined(__APPLE__)
10040 #if (defined(__FreeBSD__) && __FreeBSD_version >= 500000) || defined(__DragonFly__)
10048 unsigned int sndlen
;
10049 int error
, use_rcvinfo
;
10050 int queue_only
= 0, queue_only_for_init
=0;
10053 struct sctp_inpcb
*inp
;
10054 struct sctp_tcb
*stcb
=NULL
;
10055 struct sctp_sndrcvinfo srcv
;
10056 struct timeval now
;
10057 struct sctp_nets
*net
;
10058 struct sctp_association
*asoc
;
10059 struct sctp_inpcb
*t_inp
;
10060 int create_lock_applied
= 0;
10061 #if defined(__APPLE__)
10062 struct proc
*p
= current_proc();
10063 #elif defined(__NetBSD__)
10064 struct proc
*p
= curproc
; /* XXX */
10065 struct sockaddr
*addr
= NULL
;
10067 addr
= mtod(addr_mbuf
, struct sockaddr
*);
10070 error
= use_rcvinfo
= 0;
10074 t_inp
= inp
= (struct sctp_inpcb
*)so
->so_pcb
;
10076 sndlen
= uio
->uio_resid
;
10078 sndlen
= top
->m_pkthdr
.len
;
10083 if ((inp
->sctp_flags
& SCTP_PCB_FLAGS_TCPTYPE
) &&
10084 (inp
->sctp_flags
& SCTP_PCB_FLAGS_ACCEPTING
)) {
10085 /* The listner can NOT send */
10091 SCTP_ASOC_CREATE_LOCK(inp
);
10092 if ((inp
->sctp_flags
& SCTP_PCB_FLAGS_SOCKET_GONE
) ||
10093 (inp
->sctp_flags
& SCTP_PCB_FLAGS_SOCKET_GONE
)) {
10094 /* Should I really unlock ? */
10100 create_lock_applied
= 1;
10101 if (((inp
->sctp_flags
& SCTP_PCB_FLAGS_BOUND_V6
) == 0) &&
10102 (addr
->sa_family
== AF_INET6
)) {
10108 /* now we must find the assoc */
10109 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_CONNECTED
) {
10110 SCTP_INP_RLOCK(inp
);
10111 stcb
= LIST_FIRST(&inp
->sctp_asoc_list
);
10112 if (stcb
== NULL
) {
10113 SCTP_INP_RUNLOCK(inp
);
10118 SCTP_TCB_LOCK(stcb
);
10119 SCTP_INP_RUNLOCK(inp
);
10120 net
= stcb
->asoc
.primary_destination
;
10124 /* process cmsg snd/rcv info (maybe a assoc-id) */
10125 if (sctp_find_cmsg(SCTP_SNDRCV
, (void *)&srcv
, control
,
10128 if (srcv
.sinfo_flags
& MSG_SENDALL
) {
10129 /* its a sendall */
10130 sctppcbinfo
.mbuf_track
--;
10131 sctp_m_freem(control
);
10133 if (create_lock_applied
) {
10134 SCTP_ASOC_CREATE_UNLOCK(inp
);
10135 create_lock_applied
= 0;
10137 return (sctp_sendall(inp
, uio
, top
, &srcv
));
10142 if (stcb
== NULL
) {
10143 /* Need to do a lookup */
10144 if (use_rcvinfo
&& srcv
.sinfo_assoc_id
) {
10145 stcb
= sctp_findassociation_ep_asocid(inp
, srcv
.sinfo_assoc_id
);
10147 * Question: Should I error here if the assoc_id is
10148 * no longer valid? i.e. I can't find it?
10152 /* Must locate the net structure */
10153 net
= sctp_findnet(stcb
, addr
);
10156 if (stcb
== NULL
) {
10157 if (addr
!= NULL
) {
10158 /* Since we did not use findep we must
10159 * increment it, and if we don't find a
10160 * tcb decrement it.
10162 SCTP_INP_WLOCK(inp
);
10163 SCTP_INP_INCR_REF(inp
);
10164 SCTP_INP_WUNLOCK(inp
);
10165 stcb
= sctp_findassociation_ep_addr(&t_inp
, addr
, &net
, NULL
, NULL
);
10166 if (stcb
== NULL
) {
10167 SCTP_INP_WLOCK(inp
);
10168 SCTP_INP_DECR_REF(inp
);
10169 SCTP_INP_WUNLOCK(inp
);
10174 if ((stcb
== NULL
) &&
10175 (inp
->sctp_flags
& SCTP_PCB_FLAGS_TCPTYPE
)) {
10179 } else if ((stcb
== NULL
) && (addr
== NULL
)) {
10183 } else if (stcb
== NULL
) {
10184 /* UDP style, we must go ahead and start the INIT process */
10185 if ((use_rcvinfo
) &&
10186 (srcv
.sinfo_flags
& MSG_ABORT
)) {
10187 /* User asks to abort a non-existant asoc */
10192 /* get an asoc/stcb struct */
10193 stcb
= sctp_aloc_assoc(inp
, addr
, 1, &error
, 0);
10194 if (stcb
== NULL
) {
10195 /* Error is setup for us in the call */
10199 if (create_lock_applied
) {
10200 SCTP_ASOC_CREATE_UNLOCK(inp
);
10201 create_lock_applied
= 0;
10203 kprintf("Huh-3? create lock should have been on??\n");
10205 /* Turn on queue only flag to prevent data from being sent */
10207 asoc
= &stcb
->asoc
;
10208 asoc
->state
= SCTP_STATE_COOKIE_WAIT
;
10209 SCTP_GETTIME_TIMEVAL(&asoc
->time_entered
);
10211 /* see if a init structure exists in cmsg headers */
10212 struct sctp_initmsg initm
;
10214 if (sctp_find_cmsg(SCTP_INIT
, (void *)&initm
, control
, sizeof(initm
))) {
10215 /* we have an INIT override of the default */
10216 if (initm
.sinit_max_attempts
)
10217 asoc
->max_init_times
= initm
.sinit_max_attempts
;
10218 if (initm
.sinit_num_ostreams
)
10219 asoc
->pre_open_streams
= initm
.sinit_num_ostreams
;
10220 if (initm
.sinit_max_instreams
)
10221 asoc
->max_inbound_streams
= initm
.sinit_max_instreams
;
10222 if (initm
.sinit_max_init_timeo
)
10223 asoc
->initial_init_rto_max
= initm
.sinit_max_init_timeo
;
10224 if (asoc
->streamoutcnt
< asoc
->pre_open_streams
) {
10225 /* Default is NOT correct */
10227 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
10228 kprintf("Ok, defout:%d pre_open:%d\n",
10229 asoc
->streamoutcnt
, asoc
->pre_open_streams
);
10232 FREE(asoc
->strmout
, M_PCB
);
10233 asoc
->strmout
= NULL
;
10234 asoc
->streamoutcnt
= asoc
->pre_open_streams
;
10236 /* What happesn if this fails? .. we panic ...*/
10237 MALLOC(asoc
->strmout
,
10238 struct sctp_stream_out
*,
10239 asoc
->streamoutcnt
*
10240 sizeof(struct sctp_stream_out
),
10242 for (i
= 0; i
< asoc
->streamoutcnt
; i
++) {
10244 * inbound side must be set to 0xffff,
10245 * also NOTE when we get the INIT-ACK
10246 * back (for INIT sender) we MUST
10247 * reduce the count (streamoutcnt) but
10248 * first check if we sent to any of the
10249 * upper streams that were dropped (if
10250 * some were). Those that were dropped
10251 * must be notified to the upper layer
10252 * as failed to send.
10254 asoc
->strmout
[i
].next_sequence_sent
= 0x0;
10255 TAILQ_INIT(&asoc
->strmout
[i
].outqueue
);
10256 asoc
->strmout
[i
].stream_no
= i
;
10257 asoc
->strmout
[i
].next_spoke
.tqe_next
= 0;
10258 asoc
->strmout
[i
].next_spoke
.tqe_prev
= 0;
10264 /* out with the INIT */
10265 queue_only_for_init
= 1;
10266 sctp_send_initiate(inp
, stcb
);
10268 * we may want to dig in after this call and adjust the MTU
10269 * value. It defaulted to 1500 (constant) but the ro structure
10270 * may now have an update and thus we may need to change it
10271 * BEFORE we append the message.
10273 net
= stcb
->asoc
.primary_destination
;
10274 asoc
= &stcb
->asoc
;
10276 asoc
= &stcb
->asoc
;
10278 if (create_lock_applied
) {
10279 SCTP_ASOC_CREATE_UNLOCK(inp
);
10280 create_lock_applied
= 0;
10282 if ((SCTP_GET_STATE(asoc
) == SCTP_STATE_COOKIE_WAIT
) ||
10283 (SCTP_GET_STATE(asoc
) == SCTP_STATE_COOKIE_ECHOED
)) {
10286 if (use_rcvinfo
== 0) {
10287 /* Grab the default stuff from the asoc */
10288 srcv
= stcb
->asoc
.def_send
;
10290 /* we are now done with all control */
10292 sctp_m_freem(control
);
10296 if ((SCTP_GET_STATE(asoc
) == SCTP_STATE_SHUTDOWN_SENT
) ||
10297 (SCTP_GET_STATE(asoc
) == SCTP_STATE_SHUTDOWN_RECEIVED
) ||
10298 (SCTP_GET_STATE(asoc
) == SCTP_STATE_SHUTDOWN_ACK_SENT
) ||
10299 (asoc
->state
& SCTP_STATE_SHUTDOWN_PENDING
)) {
10300 if ((use_rcvinfo
) &&
10301 (srcv
.sinfo_flags
& MSG_ABORT
)) {
10304 error
= ECONNRESET
;
10309 /* Ok, we will attempt a msgsnd :> */
10311 #if (defined(__FreeBSD__) && __FreeBSD_version >= 500000) || defined(__DragonFly__)
10312 p
->td_lwp
->lwp_ru
.ru_msgsnd
++;
10314 p
->p_stats
->p_ru
.ru_msgsnd
++;
10318 if (net
&& ((srcv
.sinfo_flags
& MSG_ADDR_OVER
))) {
10319 /* we take the override or the unconfirmed */
10322 net
= stcb
->asoc
.primary_destination
;
10327 /* Must copy it all in from user land. The
10328 * socket buf is locked but we don't suspend
10329 * protocol processing until we are ready to
10333 error
= sctp_copy_it_in(inp
, stcb
, asoc
, net
, &srcv
, uio
, flags
);
10337 /* Here we must either pull in the user data to chunk
10338 * buffers, or use top to do a msg_append.
10340 error
= sctp_msg_append(stcb
, net
, top
, &srcv
, flags
);
10344 /* zap the top since it is now being used */
10348 if (net
->flight_size
> net
->cwnd
) {
10349 sctp_pegs
[SCTP_SENDTO_FULL_CWND
]++;
10352 } else if (asoc
->ifp_had_enobuf
) {
10353 sctp_pegs
[SCTP_QUEONLY_BURSTLMT
]++;
10356 un_sent
= ((stcb
->asoc
.total_output_queue_size
- stcb
->asoc
.total_flight
) +
10357 ((stcb
->asoc
.chunks_on_out_queue
- stcb
->asoc
.total_flight_count
) * sizeof(struct sctp_data_chunk
)) +
10358 SCTP_MED_OVERHEAD
);
10360 if (((inp
->sctp_flags
& SCTP_PCB_FLAGS_NODELAY
) == 0) &&
10361 (stcb
->asoc
.total_flight
> 0) &&
10362 (un_sent
< (int)stcb
->asoc
.smallest_mtu
)) {
10364 /* Ok, Nagle is set on and we have data outstanding. Don't
10365 * send anything and let SACKs drive out the data unless we
10366 * have a "full" segment to send.
10368 sctp_pegs
[SCTP_NAGLE_NOQ
]++;
10371 sctp_pegs
[SCTP_NAGLE_OFF
]++;
10374 if (queue_only_for_init
) {
10375 /* It is possible to have a turn around of the
10376 * INIT/INIT-ACK/COOKIE before I have a chance to
10377 * copy in the data. In such a case I DO want to
10378 * send it out by reversing the queue only flag.
10380 if ((SCTP_GET_STATE(asoc
) != SCTP_STATE_COOKIE_WAIT
) ||
10381 (SCTP_GET_STATE(asoc
) != SCTP_STATE_COOKIE_ECHOED
)) {
10382 /* yep, reverse it */
10387 if ((queue_only
== 0) && (stcb
->asoc
.peers_rwnd
&& un_sent
)) {
10388 /* we can attempt to send too.*/
10390 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
10391 kprintf("USR Send calls sctp_chunk_output\n");
10395 sctp_pegs
[SCTP_OUTPUT_FRM_SND
]++;
10396 sctp_chunk_output(inp
, stcb
, 0);
10398 } else if ((queue_only
== 0) &&
10399 (stcb
->asoc
.peers_rwnd
== 0) &&
10400 (stcb
->asoc
.total_flight
== 0)) {
10401 /* We get to have a probe outstanding */
10403 sctp_from_user_send
= 1;
10404 sctp_chunk_output(inp
, stcb
, 0);
10405 sctp_from_user_send
= 0;
10408 } else if (!TAILQ_EMPTY(&stcb
->asoc
.control_send_queue
)) {
10409 int num_out
, reason
, cwnd_full
;
10410 /* Here we do control only */
10412 sctp_med_chunk_output(inp
, stcb
, &stcb
->asoc
, &num_out
,
10413 &reason
, 1, &cwnd_full
, 1, &now
, &now_filled
);
10417 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
10418 kprintf("USR Send complete qo:%d prw:%d unsent:%d tf:%d cooq:%d toqs:%d \n",
10419 queue_only
, stcb
->asoc
.peers_rwnd
, un_sent
,
10420 stcb
->asoc
.total_flight
, stcb
->asoc
.chunks_on_out_queue
,
10421 stcb
->asoc
.total_output_queue_size
);
10425 if (create_lock_applied
) {
10426 SCTP_ASOC_CREATE_UNLOCK(inp
);
10427 create_lock_applied
= 0;
10430 SCTP_TCB_UNLOCK(stcb
);
10434 sctp_m_freem(control
);