1 /* $KAME: sctp_output.c,v 1.46 2005/03/06 16:04:17 itojun Exp $ */
2 /* $DragonFly: src/sys/netinet/sctp_output.c,v 1.11 2007/04/22 01:13:14 dillon Exp $ */
5 * Copyright (C) 2002, 2003, 2004 Cisco Systems Inc,
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the project nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 #if !(defined(__OpenBSD__) || defined (__APPLE__))
34 #include "opt_ipsec.h"
36 #if defined(__FreeBSD__) || defined(__DragonFly__)
37 #include "opt_compat.h"
38 #include "opt_inet6.h"
41 #if defined(__NetBSD__)
46 #elif !defined(__OpenBSD__)
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/malloc.h>
54 #include <sys/domain.h>
56 #include <sys/protosw.h>
57 #include <sys/socket.h>
58 #include <sys/socketvar.h>
60 #include <sys/kernel.h>
61 #include <sys/sysctl.h>
62 #include <sys/resourcevar.h>
65 #include <sys/domain.h>
67 #include <sys/thread2.h>
69 #if (defined(__FreeBSD__) && __FreeBSD_version >= 500000)
70 #include <sys/limits.h>
72 #include <machine/limits.h>
74 #include <machine/cpu.h>
77 #include <net/if_types.h>
79 #if defined(__FreeBSD__) || defined(__DragonFly__)
80 #include <net/if_var.h>
83 #include <net/route.h>
85 #include <netinet/in.h>
86 #include <netinet/in_systm.h>
87 #include <netinet/ip.h>
88 #include <netinet/in_pcb.h>
89 #include <netinet/in_var.h>
90 #include <netinet/ip_var.h>
93 #include <netinet/ip6.h>
94 #include <netinet6/ip6_var.h>
95 #include <netinet6/scope6_var.h>
96 #include <netinet6/nd6.h>
98 #if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__DragonFly__)
99 #include <netinet6/in6_pcb.h>
100 #elif defined(__OpenBSD__)
101 #include <netinet/in_pcb.h>
104 #include <netinet/icmp6.h>
108 #include <net/net_osdep.h>
110 #if defined(HAVE_NRL_INPCB) || defined(__FreeBSD__) || defined(__DragonFly__)
116 #include <netinet/sctp_pcb.h>
120 #include <netinet6/ipsec.h>
121 #include <netproto/key/key.h>
127 #include <netinet/sctp_var.h>
128 #include <netinet/sctp_header.h>
129 #include <netinet/sctputil.h>
130 #include <netinet/sctp_pcb.h>
131 #include <netinet/sctp_output.h>
132 #include <netinet/sctp_uio.h>
133 #include <netinet/sctputil.h>
134 #include <netinet/sctp_hashdriver.h>
135 #include <netinet/sctp_timer.h>
136 #include <netinet/sctp_asconf.h>
137 #include <netinet/sctp_indata.h>
140 extern uint32_t sctp_debug_on
;
143 extern int sctp_peer_chunk_oh
;
146 sctp_find_cmsg(int c_type
, void *data
, struct mbuf
*control
, int cpsize
)
151 tlen
= control
->m_len
;
154 * Independent of how many mbufs, find the c_type inside the control
155 * structure and copy out the data.
158 if ((tlen
-at
) < (int)CMSG_ALIGN(sizeof(cmh
))) {
159 /* not enough room for one more we are done. */
162 m_copydata(control
, at
, sizeof(cmh
), (caddr_t
)&cmh
);
163 if ((cmh
.cmsg_len
+ at
) > tlen
) {
165 * this is real messed up since there is not enough
166 * data here to cover the cmsg header. We are done.
170 if ((cmh
.cmsg_level
== IPPROTO_SCTP
) &&
171 (c_type
== cmh
.cmsg_type
)) {
172 /* found the one we want, copy it out */
173 at
+= CMSG_ALIGN(sizeof(struct cmsghdr
));
174 if ((int)(cmh
.cmsg_len
- CMSG_ALIGN(sizeof(struct cmsghdr
))) < cpsize
) {
176 * space of cmsg_len after header not
181 m_copydata(control
, at
, cpsize
, data
);
184 at
+= CMSG_ALIGN(cmh
.cmsg_len
);
185 if (cmh
.cmsg_len
== 0) {
195 sctp_add_addr_to_mbuf(struct mbuf
*m
, struct ifaddr
*ifa
)
197 struct sctp_paramhdr
*parmh
;
200 if (ifa
->ifa_addr
->sa_family
== AF_INET
) {
201 len
= sizeof(struct sctp_ipv4addr_param
);
202 } else if (ifa
->ifa_addr
->sa_family
== AF_INET6
) {
203 len
= sizeof(struct sctp_ipv6addr_param
);
209 if (M_TRAILINGSPACE(m
) >= len
) {
210 /* easy side we just drop it on the end */
211 parmh
= (struct sctp_paramhdr
*)(m
->m_data
+ m
->m_len
);
214 /* Need more space */
216 while (mret
->m_next
!= NULL
) {
219 MGET(mret
->m_next
, MB_DONTWAIT
, MT_DATA
);
220 if (mret
->m_next
== NULL
) {
221 /* We are hosed, can't add more addresses */
225 parmh
= mtod(mret
, struct sctp_paramhdr
*);
227 /* now add the parameter */
228 if (ifa
->ifa_addr
->sa_family
== AF_INET
) {
229 struct sctp_ipv4addr_param
*ipv4p
;
230 struct sockaddr_in
*sin
;
231 sin
= (struct sockaddr_in
*)ifa
->ifa_addr
;
232 ipv4p
= (struct sctp_ipv4addr_param
*)parmh
;
233 parmh
->param_type
= htons(SCTP_IPV4_ADDRESS
);
234 parmh
->param_length
= htons(len
);
235 ipv4p
->addr
= sin
->sin_addr
.s_addr
;
237 } else if (ifa
->ifa_addr
->sa_family
== AF_INET6
) {
238 struct sctp_ipv6addr_param
*ipv6p
;
239 struct sockaddr_in6
*sin6
;
240 sin6
= (struct sockaddr_in6
*)ifa
->ifa_addr
;
241 ipv6p
= (struct sctp_ipv6addr_param
*)parmh
;
242 parmh
->param_type
= htons(SCTP_IPV6_ADDRESS
);
243 parmh
->param_length
= htons(len
);
244 memcpy(ipv6p
->addr
, &sin6
->sin6_addr
,
245 sizeof(ipv6p
->addr
));
246 /* clear embedded scope in the address */
247 in6_clearscope((struct in6_addr
*)ipv6p
->addr
);
258 sctp_add_cookie(struct sctp_inpcb
*inp
, struct mbuf
*init
, int init_offset
,
259 struct mbuf
*initack
, int initack_offset
, struct sctp_state_cookie
*stc_in
)
261 struct mbuf
*copy_init
, *copy_initack
, *m_at
, *sig
, *mret
;
262 struct sctp_state_cookie
*stc
;
263 struct sctp_paramhdr
*ph
;
270 MGET(mret
, MB_DONTWAIT
, MT_DATA
);
274 copy_init
= sctp_m_copym(init
, init_offset
, M_COPYALL
, MB_DONTWAIT
);
275 if (copy_init
== NULL
) {
279 copy_initack
= sctp_m_copym(initack
, initack_offset
, M_COPYALL
,
281 if (copy_initack
== NULL
) {
283 sctp_m_freem(copy_init
);
286 /* easy side we just drop it on the end */
287 ph
= mtod(mret
, struct sctp_paramhdr
*);
288 mret
->m_len
= sizeof(struct sctp_state_cookie
) +
289 sizeof(struct sctp_paramhdr
);
290 stc
= (struct sctp_state_cookie
*)((caddr_t
)ph
+
291 sizeof(struct sctp_paramhdr
));
292 ph
->param_type
= htons(SCTP_STATE_COOKIE
);
293 ph
->param_length
= 0; /* fill in at the end */
294 /* Fill in the stc cookie data */
297 /* tack the INIT and then the INIT-ACK onto the chain */
300 for (m_at
= mret
; m_at
; m_at
= m_at
->m_next
) {
301 cookie_sz
+= m_at
->m_len
;
302 if (m_at
->m_next
== NULL
) {
303 m_at
->m_next
= copy_init
;
308 for (m_at
= copy_init
; m_at
; m_at
= m_at
->m_next
) {
309 cookie_sz
+= m_at
->m_len
;
310 if (m_at
->m_next
== NULL
) {
311 m_at
->m_next
= copy_initack
;
316 for (m_at
= copy_initack
; m_at
; m_at
= m_at
->m_next
) {
317 cookie_sz
+= m_at
->m_len
;
318 if (m_at
->m_next
== NULL
) {
322 MGET(sig
, MB_DONTWAIT
, MT_DATA
);
326 sctp_m_freem(copy_init
);
327 sctp_m_freem(copy_initack
);
333 signature
= (uint8_t *)(mtod(sig
, caddr_t
) + sig_offset
);
334 /* Time to sign the cookie */
335 sctp_hash_digest_m((char *)inp
->sctp_ep
.secret_key
[
336 (int)(inp
->sctp_ep
.current_secret_number
)],
337 SCTP_SECRET_SIZE
, mret
, sizeof(struct sctp_paramhdr
),
338 (uint8_t *)signature
);
339 sig
->m_len
+= SCTP_SIGNATURE_SIZE
;
340 cookie_sz
+= SCTP_SIGNATURE_SIZE
;
342 ph
->param_length
= htons(cookie_sz
);
347 static struct sockaddr_in
*
348 sctp_is_v4_ifa_addr_prefered (struct ifaddr
*ifa
, uint8_t loopscope
, uint8_t ipv4_scope
, uint8_t *sin_loop
, uint8_t *sin_local
)
350 struct sockaddr_in
*sin
;
352 * Here we determine if its a prefered address. A
353 * prefered address means it is the same scope or
354 * higher scope then the destination.
355 * L = loopback, P = private, G = global
356 * -----------------------------------------
357 * src | dest | result
358 *-----------------------------------------
360 *-----------------------------------------
362 *-----------------------------------------
364 *-----------------------------------------
366 *-----------------------------------------
368 *-----------------------------------------
370 *-----------------------------------------
372 *-----------------------------------------
374 *-----------------------------------------
376 *-----------------------------------------
379 if (ifa
->ifa_addr
->sa_family
!= AF_INET
) {
383 /* Ok the address may be ok */
384 sin
= (struct sockaddr_in
*)ifa
->ifa_addr
;
385 if (sin
->sin_addr
.s_addr
== 0) {
388 *sin_local
= *sin_loop
= 0;
389 if ((ifa
->ifa_ifp
->if_type
== IFT_LOOP
) ||
390 (IN4_ISLOOPBACK_ADDRESS(&sin
->sin_addr
))) {
394 if ((IN4_ISPRIVATE_ADDRESS(&sin
->sin_addr
))) {
397 if (!loopscope
&& *sin_loop
) {
398 /* Its a loopback address and we don't have loop scope */
401 if (!ipv4_scope
&& *sin_local
) {
402 /* Its a private address, and we don't have private address scope */
405 if (((ipv4_scope
== 0) && (loopscope
== 0)) && (*sin_local
)) {
406 /* its a global src and a private dest */
409 /* its a prefered address */
413 static struct sockaddr_in
*
414 sctp_is_v4_ifa_addr_acceptable (struct ifaddr
*ifa
, uint8_t loopscope
, uint8_t ipv4_scope
, uint8_t *sin_loop
, uint8_t *sin_local
)
416 struct sockaddr_in
*sin
;
418 * Here we determine if its a acceptable address. A
419 * acceptable address means it is the same scope or
420 * higher scope but we can allow for NAT which means
421 * its ok to have a global dest and a private src.
423 * L = loopback, P = private, G = global
424 * -----------------------------------------
425 * src | dest | result
426 *-----------------------------------------
428 *-----------------------------------------
430 *-----------------------------------------
432 *-----------------------------------------
434 *-----------------------------------------
436 *-----------------------------------------
437 * G | P | yes - probably this won't work.
438 *-----------------------------------------
440 *-----------------------------------------
442 *-----------------------------------------
444 *-----------------------------------------
447 if (ifa
->ifa_addr
->sa_family
!= AF_INET
) {
451 /* Ok the address may be ok */
452 sin
= (struct sockaddr_in
*)ifa
->ifa_addr
;
453 if (sin
->sin_addr
.s_addr
== 0) {
456 *sin_local
= *sin_loop
= 0;
457 if ((ifa
->ifa_ifp
->if_type
== IFT_LOOP
) ||
458 (IN4_ISLOOPBACK_ADDRESS(&sin
->sin_addr
))) {
462 if ((IN4_ISPRIVATE_ADDRESS(&sin
->sin_addr
))) {
465 if (!loopscope
&& *sin_loop
) {
466 /* Its a loopback address and we don't have loop scope */
469 /* its an acceptable address */
474 * This treats the address list on the ep as a restricted list
475 * (negative list). If a the passed address is listed, then
476 * the address is NOT allowed on the association.
479 sctp_is_addr_restricted(struct sctp_tcb
*stcb
, struct sockaddr
*addr
)
481 struct sctp_laddr
*laddr
;
486 /* There are no restrictions, no TCB :-) */
490 LIST_FOREACH(laddr
, &stcb
->asoc
.sctp_local_addr_list
, sctp_nxt_addr
) {
493 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT4
) {
494 kprintf("There are %d addresses on the restricted list\n", cnt
);
498 LIST_FOREACH(laddr
, &stcb
->asoc
.sctp_local_addr_list
, sctp_nxt_addr
) {
499 if (laddr
->ifa
== NULL
) {
501 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
502 kprintf("Help I have fallen and I can't get up!\n");
508 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT4
) {
510 kprintf("Restricted address[%d]:", cnt
);
511 sctp_print_address(laddr
->ifa
->ifa_addr
);
514 if (sctp_cmpaddr(addr
, laddr
->ifa
->ifa_addr
) == 1) {
515 /* Yes it is on the list */
523 sctp_is_addr_in_ep(struct sctp_inpcb
*inp
, struct ifaddr
*ifa
)
525 struct sctp_laddr
*laddr
;
529 LIST_FOREACH(laddr
, &inp
->sctp_addr_list
, sctp_nxt_addr
) {
530 if (laddr
->ifa
== NULL
) {
532 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
533 kprintf("Help I have fallen and I can't get up!\n");
538 if (laddr
->ifa
->ifa_addr
== NULL
)
540 if (laddr
->ifa
== ifa
)
543 if (laddr
->ifa
->ifa_addr
->sa_family
!= ifa
->ifa_addr
->sa_family
) {
544 /* skip non compatible address comparison */
547 if (sctp_cmpaddr(ifa
->ifa_addr
, laddr
->ifa
->ifa_addr
) == 1) {
548 /* Yes it is restricted */
557 static struct in_addr
558 sctp_choose_v4_boundspecific_inp(struct sctp_inpcb
*inp
,
564 struct sctp_laddr
*laddr
;
565 struct sockaddr_in
*sin
;
568 uint8_t sin_loop
, sin_local
;
570 /* first question, is the ifn we will emit on
571 * in our list, if so, we want that one.
575 /* is a prefered one on the interface we route out? */
576 TAILQ_FOREACH(ifa
, &ifn
->if_addrlist
, ifa_list
) {
577 sin
= sctp_is_v4_ifa_addr_prefered (ifa
, loopscope
, ipv4_scope
, &sin_loop
, &sin_local
);
580 if (sctp_is_addr_in_ep(inp
, ifa
)) {
581 return (sin
->sin_addr
);
584 /* is an acceptable one on the interface we route out? */
585 TAILQ_FOREACH(ifa
, &ifn
->if_addrlist
, ifa_list
) {
586 sin
= sctp_is_v4_ifa_addr_acceptable (ifa
, loopscope
, ipv4_scope
, &sin_loop
, &sin_local
);
589 if (sctp_is_addr_in_ep(inp
, ifa
)) {
590 return (sin
->sin_addr
);
594 /* ok, what about a prefered address in the inp */
595 for (laddr
= LIST_FIRST(&inp
->sctp_addr_list
);
596 laddr
&& (laddr
!= inp
->next_addr_touse
);
597 laddr
= LIST_NEXT(laddr
, sctp_nxt_addr
)) {
598 if (laddr
->ifa
== NULL
) {
599 /* address has been removed */
602 sin
= sctp_is_v4_ifa_addr_prefered (laddr
->ifa
, loopscope
, ipv4_scope
, &sin_loop
, &sin_local
);
605 return (sin
->sin_addr
);
608 /* ok, what about an acceptable address in the inp */
609 for (laddr
= LIST_FIRST(&inp
->sctp_addr_list
);
610 laddr
&& (laddr
!= inp
->next_addr_touse
);
611 laddr
= LIST_NEXT(laddr
, sctp_nxt_addr
)) {
612 if (laddr
->ifa
== NULL
) {
613 /* address has been removed */
616 sin
= sctp_is_v4_ifa_addr_acceptable (laddr
->ifa
, loopscope
, ipv4_scope
, &sin_loop
, &sin_local
);
619 return (sin
->sin_addr
);
623 /* no address bound can be a source for the destination we are in trouble */
625 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
626 kprintf("Src address selection for EP, no acceptable src address found for address\n");
629 memset(&ans
, 0, sizeof(ans
));
635 static struct in_addr
636 sctp_choose_v4_boundspecific_stcb(struct sctp_inpcb
*inp
,
637 struct sctp_tcb
*stcb
,
638 struct sctp_nets
*net
,
642 int non_asoc_addr_ok
)
645 * Here we have two cases, bound all asconf
646 * allowed. bound all asconf not allowed.
649 struct sctp_laddr
*laddr
, *starting_point
;
653 uint8_t sin_loop
, sin_local
, start_at_beginning
=0;
654 struct sockaddr_in
*sin
;
656 /* first question, is the ifn we will emit on
657 * in our list, if so, we want that one.
661 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_DO_ASCONF
) {
663 * Here we use the list of addresses on the endpoint. Then
664 * the addresses listed on the "restricted" list is just that,
665 * address that have not been added and can't be used (unless
666 * the non_asoc_addr_ok is set).
669 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
670 kprintf("Have a STCB - asconf allowed, not bound all have a netgative list\n");
673 /* first question, is the ifn we will emit on
674 * in our list, if so, we want that one.
677 /* first try for an prefered address on the ep */
678 TAILQ_FOREACH(ifa
, &ifn
->if_addrlist
, ifa_list
) {
679 if (sctp_is_addr_in_ep(inp
, ifa
)) {
680 sin
= sctp_is_v4_ifa_addr_prefered (ifa
, loopscope
, ipv4_scope
, &sin_loop
, &sin_local
);
683 if ((non_asoc_addr_ok
== 0) &&
684 (sctp_is_addr_restricted(stcb
, (struct sockaddr
*)sin
))) {
685 /* on the no-no list */
688 return (sin
->sin_addr
);
691 /* next try for an acceptable address on the ep */
692 TAILQ_FOREACH(ifa
, &ifn
->if_addrlist
, ifa_list
) {
693 if (sctp_is_addr_in_ep(inp
, ifa
)) {
694 sin
= sctp_is_v4_ifa_addr_acceptable (ifa
, loopscope
, ipv4_scope
, &sin_loop
, &sin_local
);
697 if ((non_asoc_addr_ok
== 0) &&
698 (sctp_is_addr_restricted(stcb
, (struct sockaddr
*)sin
))) {
699 /* on the no-no list */
702 return (sin
->sin_addr
);
707 /* if we can't find one like that then we must
708 * look at all addresses bound to pick one at
709 * first prefereable then secondly acceptable.
711 starting_point
= stcb
->asoc
.last_used_address
;
713 if (stcb
->asoc
.last_used_address
== NULL
) {
714 start_at_beginning
=1;
715 stcb
->asoc
.last_used_address
= LIST_FIRST(&inp
->sctp_addr_list
);
717 /* search beginning with the last used address */
718 for (laddr
= stcb
->asoc
.last_used_address
; laddr
;
719 laddr
= LIST_NEXT(laddr
, sctp_nxt_addr
)) {
720 if (laddr
->ifa
== NULL
) {
721 /* address has been removed */
724 sin
= sctp_is_v4_ifa_addr_prefered (laddr
->ifa
, loopscope
, ipv4_scope
, &sin_loop
, &sin_local
);
727 if ((non_asoc_addr_ok
== 0) &&
728 (sctp_is_addr_restricted(stcb
, (struct sockaddr
*)sin
))) {
729 /* on the no-no list */
732 return (sin
->sin_addr
);
735 if (start_at_beginning
== 0) {
736 stcb
->asoc
.last_used_address
= NULL
;
737 goto sctpv4_from_the_top
;
739 /* now try for any higher scope than the destination */
740 stcb
->asoc
.last_used_address
= starting_point
;
741 start_at_beginning
= 0;
742 sctpv4_from_the_top2
:
743 if (stcb
->asoc
.last_used_address
== NULL
) {
744 start_at_beginning
=1;
745 stcb
->asoc
.last_used_address
= LIST_FIRST(&inp
->sctp_addr_list
);
747 /* search beginning with the last used address */
748 for (laddr
= stcb
->asoc
.last_used_address
; laddr
;
749 laddr
= LIST_NEXT(laddr
, sctp_nxt_addr
)) {
750 if (laddr
->ifa
== NULL
) {
751 /* address has been removed */
754 sin
= sctp_is_v4_ifa_addr_acceptable (laddr
->ifa
, loopscope
, ipv4_scope
, &sin_loop
, &sin_local
);
757 if ((non_asoc_addr_ok
== 0) &&
758 (sctp_is_addr_restricted(stcb
, (struct sockaddr
*)sin
))) {
759 /* on the no-no list */
762 return (sin
->sin_addr
);
764 if (start_at_beginning
== 0) {
765 stcb
->asoc
.last_used_address
= NULL
;
766 goto sctpv4_from_the_top2
;
770 * Here we have an address list on the association, thats the
771 * only valid source addresses that we can use.
774 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
775 kprintf("Have a STCB - no asconf allowed, not bound all have a postive list\n");
778 /* First look at all addresses for one that is on
779 * the interface we route out
781 LIST_FOREACH(laddr
, &stcb
->asoc
.sctp_local_addr_list
,
783 if (laddr
->ifa
== NULL
) {
784 /* address has been removed */
787 sin
= sctp_is_v4_ifa_addr_prefered (laddr
->ifa
, loopscope
, ipv4_scope
, &sin_loop
, &sin_local
);
790 /* first question, is laddr->ifa an address associated with the emit interface */
792 TAILQ_FOREACH(ifa
, &ifn
->if_addrlist
, ifa_list
) {
793 if (laddr
->ifa
== ifa
) {
794 sin
= (struct sockaddr_in
*)laddr
->ifa
->ifa_addr
;
795 return (sin
->sin_addr
);
797 if (sctp_cmpaddr(ifa
->ifa_addr
, laddr
->ifa
->ifa_addr
) == 1) {
798 sin
= (struct sockaddr_in
*)laddr
->ifa
->ifa_addr
;
799 return (sin
->sin_addr
);
804 /* what about an acceptable one on the interface? */
805 LIST_FOREACH(laddr
, &stcb
->asoc
.sctp_local_addr_list
,
807 if (laddr
->ifa
== NULL
) {
808 /* address has been removed */
811 sin
= sctp_is_v4_ifa_addr_acceptable (laddr
->ifa
, loopscope
, ipv4_scope
, &sin_loop
, &sin_local
);
814 /* first question, is laddr->ifa an address associated with the emit interface */
816 TAILQ_FOREACH(ifa
, &ifn
->if_addrlist
, ifa_list
) {
817 if (laddr
->ifa
== ifa
) {
818 sin
= (struct sockaddr_in
*)laddr
->ifa
->ifa_addr
;
819 return (sin
->sin_addr
);
821 if (sctp_cmpaddr(ifa
->ifa_addr
, laddr
->ifa
->ifa_addr
) == 1) {
822 sin
= (struct sockaddr_in
*)laddr
->ifa
->ifa_addr
;
823 return (sin
->sin_addr
);
828 /* ok, next one that is preferable in general */
829 LIST_FOREACH(laddr
, &stcb
->asoc
.sctp_local_addr_list
,
831 if (laddr
->ifa
== NULL
) {
832 /* address has been removed */
835 sin
= sctp_is_v4_ifa_addr_prefered (laddr
->ifa
, loopscope
, ipv4_scope
, &sin_loop
, &sin_local
);
838 return (sin
->sin_addr
);
841 /* last, what about one that is acceptable */
842 LIST_FOREACH(laddr
, &stcb
->asoc
.sctp_local_addr_list
,
844 if (laddr
->ifa
== NULL
) {
845 /* address has been removed */
848 sin
= sctp_is_v4_ifa_addr_acceptable (laddr
->ifa
, loopscope
, ipv4_scope
, &sin_loop
, &sin_local
);
851 return (sin
->sin_addr
);
854 memset(&ans
, 0, sizeof(ans
));
858 static struct sockaddr_in
*
859 sctp_select_v4_nth_prefered_addr_from_ifn_boundall (struct ifnet
*ifn
, struct sctp_tcb
*stcb
, int non_asoc_addr_ok
,
860 uint8_t loopscope
, uint8_t ipv4_scope
, int cur_addr_num
)
863 struct sockaddr_in
*sin
;
864 uint8_t sin_loop
, sin_local
;
865 int num_eligible_addr
= 0;
866 TAILQ_FOREACH(ifa
, &ifn
->if_addrlist
, ifa_list
) {
867 sin
= sctp_is_v4_ifa_addr_prefered (ifa
, loopscope
, ipv4_scope
, &sin_loop
, &sin_local
);
871 if ((non_asoc_addr_ok
== 0) && sctp_is_addr_restricted(stcb
, (struct sockaddr
*)sin
)) {
872 /* It is restricted for some reason.. probably
878 if (cur_addr_num
== num_eligible_addr
) {
887 sctp_count_v4_num_prefered_boundall (struct ifnet
*ifn
, struct sctp_tcb
*stcb
, int non_asoc_addr_ok
,
888 uint8_t loopscope
, uint8_t ipv4_scope
, uint8_t *sin_loop
, uint8_t *sin_local
)
891 struct sockaddr_in
*sin
;
892 int num_eligible_addr
= 0;
894 TAILQ_FOREACH(ifa
, &ifn
->if_addrlist
, ifa_list
) {
895 sin
= sctp_is_v4_ifa_addr_prefered (ifa
, loopscope
, ipv4_scope
, sin_loop
, sin_local
);
899 if ((non_asoc_addr_ok
== 0) && sctp_is_addr_restricted(stcb
, (struct sockaddr
*)sin
)) {
900 /* It is restricted for some reason.. probably
908 return (num_eligible_addr
);
912 static struct in_addr
913 sctp_choose_v4_boundall(struct sctp_inpcb
*inp
,
914 struct sctp_tcb
*stcb
,
915 struct sctp_nets
*net
,
919 int non_asoc_addr_ok
)
921 int cur_addr_num
=0, num_prefered
=0;
922 uint8_t sin_loop
, sin_local
;
924 struct sockaddr_in
*sin
;
928 * For v4 we can use (in boundall) any address in the association. If
929 * non_asoc_addr_ok is set we can use any address (at least in theory).
930 * So we look for prefered addresses first. If we find one, we use it.
931 * Otherwise we next try to get an address on the interface, which we
932 * should be able to do (unless non_asoc_addr_ok is false and we are
933 * routed out that way). In these cases where we can't use the address
934 * of the interface we go through all the ifn's looking for an address
935 * we can use and fill that in. Punting means we send back address
936 * 0, which will probably cause problems actually since then IP will
937 * fill in the address of the route ifn, which means we probably already
938 * rejected it.. i.e. here comes an abort :-<.
942 cur_addr_num
= net
->indx_of_eligible_next_to_use
;
945 goto bound_all_v4_plan_c
;
947 num_prefered
= sctp_count_v4_num_prefered_boundall (ifn
, stcb
, non_asoc_addr_ok
, loopscope
, ipv4_scope
, &sin_loop
, &sin_local
);
949 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
950 kprintf("Found %d prefered source addresses\n", num_prefered
);
953 if (num_prefered
== 0) {
954 /* no eligible addresses, we must use some other
955 * interface address if we can find one.
957 goto bound_all_v4_plan_b
;
959 /* Ok we have num_eligible_addr set with how many we can use,
960 * this may vary from call to call due to addresses being deprecated etc..
962 if (cur_addr_num
>= num_prefered
) {
965 /* select the nth address from the list (where cur_addr_num is the nth) and
966 * 0 is the first one, 1 is the second one etc...
969 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
970 kprintf("cur_addr_num:%d\n", cur_addr_num
);
973 sin
= sctp_select_v4_nth_prefered_addr_from_ifn_boundall (ifn
, stcb
, non_asoc_addr_ok
, loopscope
,
974 ipv4_scope
, cur_addr_num
);
976 /* if sin is NULL something changed??, plan_a now */
978 return (sin
->sin_addr
);
982 * plan_b: Look at the interface that we emit on
983 * and see if we can find an acceptable address.
986 TAILQ_FOREACH(ifa
, &ifn
->if_addrlist
, ifa_list
) {
987 sin
= sctp_is_v4_ifa_addr_acceptable (ifa
, loopscope
, ipv4_scope
, &sin_loop
, &sin_local
);
991 if ((non_asoc_addr_ok
== 0) && sctp_is_addr_restricted(stcb
, (struct sockaddr
*)sin
)) {
992 /* It is restricted for some reason.. probably
998 return (sin
->sin_addr
);
1001 * plan_c: Look at all interfaces and find a prefered
1002 * address. If we reache here we are in trouble I think.
1004 bound_all_v4_plan_c
:
1005 for (ifn
= TAILQ_FIRST(&ifnet
);
1006 ifn
&& (ifn
!= inp
->next_ifn_touse
);
1007 ifn
=TAILQ_NEXT(ifn
, if_list
)) {
1008 if (loopscope
== 0 && ifn
->if_type
== IFT_LOOP
) {
1009 /* wrong base scope */
1012 if (ifn
== rt
->rt_ifp
)
1013 /* already looked at this guy */
1015 num_prefered
= sctp_count_v4_num_prefered_boundall (ifn
, stcb
, non_asoc_addr_ok
,
1016 loopscope
, ipv4_scope
, &sin_loop
, &sin_local
);
1018 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
1019 kprintf("Found ifn:%x %d prefered source addresses\n", (u_int
)ifn
, num_prefered
);
1022 if (num_prefered
== 0) {
1024 * None on this interface.
1028 /* Ok we have num_eligible_addr set with how many we can use,
1029 * this may vary from call to call due to addresses being deprecated etc..
1031 if (cur_addr_num
>= num_prefered
) {
1034 sin
= sctp_select_v4_nth_prefered_addr_from_ifn_boundall (ifn
, stcb
, non_asoc_addr_ok
, loopscope
,
1035 ipv4_scope
, cur_addr_num
);
1038 return (sin
->sin_addr
);
1043 * plan_d: We are in deep trouble. No prefered address on
1044 * any interface. And the emit interface does not
1045 * even have an acceptable address. Take anything
1046 * we can get! If this does not work we are
1047 * probably going to emit a packet that will
1048 * illicit an ABORT, falling through.
1051 for (ifn
= TAILQ_FIRST(&ifnet
);
1052 ifn
&& (ifn
!= inp
->next_ifn_touse
);
1053 ifn
=TAILQ_NEXT(ifn
, if_list
)) {
1054 if (loopscope
== 0 && ifn
->if_type
== IFT_LOOP
) {
1055 /* wrong base scope */
1058 if (ifn
== rt
->rt_ifp
)
1059 /* already looked at this guy */
1062 TAILQ_FOREACH(ifa
, &ifn
->if_addrlist
, ifa_list
) {
1063 sin
= sctp_is_v4_ifa_addr_acceptable (ifa
, loopscope
, ipv4_scope
, &sin_loop
, &sin_local
);
1067 if ((non_asoc_addr_ok
== 0) && sctp_is_addr_restricted(stcb
, (struct sockaddr
*)sin
)) {
1068 /* It is restricted for some reason.. probably
1074 return (sin
->sin_addr
);
1078 * Ok we can find NO address to source from that is
1079 * not on our negative list. It is either the special
1080 * ASCONF case where we are sourceing from a intf that
1081 * has been ifconfig'd to a different address (i.e.
1082 * it holds a ADD/DEL/SET-PRIM and the proper lookup
1083 * address. OR we are hosed, and this baby is going
1084 * to abort the association.
1086 if (non_asoc_addr_ok
) {
1087 return (((struct sockaddr_in
*)(rt
->rt_ifa
->ifa_addr
))->sin_addr
);
1089 memset(&ans
, 0, sizeof(ans
));
1096 /* tcb may be NULL */
1098 sctp_ipv4_source_address_selection(struct sctp_inpcb
*inp
,
1099 struct sctp_tcb
*stcb
, struct route
*ro
, struct sctp_nets
*net
,
1100 int non_asoc_addr_ok
)
1103 struct sockaddr_in
*to
= (struct sockaddr_in
*)&ro
->ro_dst
;
1104 uint8_t ipv4_scope
, loopscope
;
1107 * - Find the route if needed, cache if I can.
1108 * - Look at interface address in route, Is it
1109 * in the bound list. If so we have the best source.
1110 * - If not we must rotate amongst the addresses.
1114 * Do we need to pay attention to scope. We can have
1115 * a private address or a global address we are sourcing
1116 * or sending to. So if we draw it out
1117 * source * dest * result
1118 * ------------------------------------------
1119 * a Private * Global * NAT?
1120 * ------------------------------------------
1121 * b Private * Private * No problem
1122 * ------------------------------------------
1123 * c Global * Private * Huh, How will this work?
1124 * ------------------------------------------
1125 * d Global * Global * No Problem
1126 * ------------------------------------------
1128 * And then we add to that what happens if there are multiple
1129 * addresses assigned to an interface. Remember the ifa on a
1130 * ifn is a linked list of addresses. So one interface can
1131 * have more than one IPv4 address. What happens if we
1132 * have both a private and a global address? Do we then
1133 * use context of destination to sort out which one is
1134 * best? And what about NAT's sending P->G may get you
1135 * a NAT translation, or should you select the G thats
1136 * on the interface in preference.
1140 * - count the number of addresses on the interface.
1141 * - if its one, no problem except case <c>. For <a>
1142 * we will assume a NAT out there.
1143 * - if there are more than one, then we need to worry
1144 * about scope P or G. We should prefer G -> G and
1145 * P -> P if possible. Then as a secondary fall back
1146 * to mixed types G->P being a last ditch one.
1147 * - The above all works for bound all, but bound
1148 * specific we need to use the same concept but instead
1149 * only consider the bound addresses. If the bound set
1150 * is NOT assigned to the interface then we must use
1151 * rotation amongst them.
1153 * Notes: For v4, we can always punt and let ip_output
1154 * decide by sending back a source of 0.0.0.0
1157 if (ro
->ro_rt
== NULL
) {
1159 * Need a route to cache.
1162 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
1163 rtalloc_ign(ro
, 0UL);
1168 if (ro
->ro_rt
== NULL
) {
1169 /* No route to host .. punt */
1170 memset(&ans
, 0, sizeof(ans
));
1173 /* Setup our scopes */
1175 ipv4_scope
= stcb
->asoc
.ipv4_local_scope
;
1176 loopscope
= stcb
->asoc
.loopback_scope
;
1178 /* Scope based on outbound address */
1179 if ((IN4_ISPRIVATE_ADDRESS(&to
->sin_addr
))) {
1182 } else if (IN4_ISLOOPBACK_ADDRESS(&to
->sin_addr
)) {
1191 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
1192 kprintf("Scope setup loop:%d ipv4_scope:%d\n",
1193 loopscope
, ipv4_scope
);
1196 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_BOUNDALL
) {
1198 * When bound to all if the address list is set
1199 * it is a negative list. Addresses being added
1202 return (sctp_choose_v4_boundall(inp
, stcb
, net
, ro
->ro_rt
,
1203 ipv4_scope
, loopscope
, non_asoc_addr_ok
));
1206 * Three possiblities here:
1208 * a) stcb is NULL, which means we operate only from
1209 * the list of addresses (ifa's) bound to the assoc and
1210 * we care not about the list.
1211 * b) stcb is NOT-NULL, which means we have an assoc structure and
1212 * auto-asconf is on. This means that the list of addresses is
1213 * a NOT list. We use the list from the inp, but any listed address
1214 * in our list is NOT yet added. However if the non_asoc_addr_ok is
1215 * set we CAN use an address NOT available (i.e. being added). Its
1217 * c) stcb is NOT-NULL, which means we have an assoc structure and
1218 * auto-asconf is off. This means that the list of addresses is
1219 * the ONLY addresses I can use.. its positive.
1221 * Note we collapse b & c into the same function just like in
1222 * the v6 address selection.
1225 return (sctp_choose_v4_boundspecific_stcb(inp
, stcb
, net
,
1226 ro
->ro_rt
, ipv4_scope
, loopscope
, non_asoc_addr_ok
));
1228 return (sctp_choose_v4_boundspecific_inp(inp
, ro
->ro_rt
,
1229 ipv4_scope
, loopscope
));
1231 /* this should not be reached */
1232 memset(&ans
, 0, sizeof(ans
));
1238 static struct sockaddr_in6
*
1239 sctp_is_v6_ifa_addr_acceptable (struct ifaddr
*ifa
, int loopscope
, int loc_scope
, int *sin_loop
, int *sin_local
)
1241 struct in6_ifaddr
*ifa6
;
1242 struct sockaddr_in6
*sin6
;
1244 if (ifa
->ifa_addr
->sa_family
!= AF_INET6
) {
1248 ifa6
= (struct in6_ifaddr
*)ifa
;
1249 /* ok to use deprecated addresses? */
1250 if (!ip6_use_deprecated
) {
1251 if (IFA6_IS_DEPRECATED(ifa6
)) {
1252 /* can't use this type */
1256 /* are we ok, with the current state of this address? */
1257 if (ifa6
->ia6_flags
&
1258 (IN6_IFF_DETACHED
| IN6_IFF_NOTREADY
| IN6_IFF_ANYCAST
)) {
1259 /* Can't use these types */
1262 /* Ok the address may be ok */
1263 sin6
= (struct sockaddr_in6
*)ifa
->ifa_addr
;
1264 *sin_local
= *sin_loop
= 0;
1265 if ((ifa
->ifa_ifp
->if_type
== IFT_LOOP
) ||
1266 (IN6_IS_ADDR_LOOPBACK(&sin6
->sin6_addr
))) {
1269 if (!loopscope
&& *sin_loop
) {
1270 /* Its a loopback address and we don't have loop scope */
1273 if (IN6_IS_ADDR_UNSPECIFIED(&sin6
->sin6_addr
)) {
1274 /* we skip unspecifed addresses */
1278 if (IN6_IS_ADDR_LINKLOCAL(&sin6
->sin6_addr
)) {
1281 if (!loc_scope
&& *sin_local
) {
1282 /* Its a link local address, and we don't have link local scope */
1289 static struct sockaddr_in6
*
1290 sctp_choose_v6_boundspecific_stcb(struct sctp_inpcb
*inp
,
1291 struct sctp_tcb
*stcb
,
1292 struct sctp_nets
*net
,
1296 int non_asoc_addr_ok
)
1299 * Each endpoint has a list of local addresses associated
1300 * with it. The address list is either a "negative list" i.e.
1301 * those addresses that are NOT allowed to be used as a source OR
1302 * a "postive list" i.e. those addresses that CAN be used.
1304 * Its a negative list if asconf is allowed. What we do
1305 * in this case is use the ep address list BUT we have
1306 * to cross check it against the negative list.
1308 * In the case where NO asconf is allowed, we have just
1309 * a straight association level list that we must use to
1310 * find a source address.
1312 struct sctp_laddr
*laddr
, *starting_point
;
1313 struct sockaddr_in6
*sin6
;
1314 int sin_loop
, sin_local
;
1315 int start_at_beginning
=0;
1320 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_DO_ASCONF
) {
1322 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
1323 kprintf("Have a STCB - asconf allowed, not bound all have a netgative list\n");
1326 /* first question, is the ifn we will emit on
1327 * in our list, if so, we want that one.
1330 TAILQ_FOREACH(ifa
, &ifn
->if_addrlist
, ifa_list
) {
1331 if (sctp_is_addr_in_ep(inp
, ifa
)) {
1332 sin6
= sctp_is_v6_ifa_addr_acceptable (ifa
, loopscope
, loc_scope
, &sin_loop
, &sin_local
);
1335 if ((non_asoc_addr_ok
== 0) &&
1336 (sctp_is_addr_restricted(stcb
, (struct sockaddr
*)sin6
))) {
1337 /* on the no-no list */
1344 starting_point
= stcb
->asoc
.last_used_address
;
1345 /* First try for matching scope */
1347 if (stcb
->asoc
.last_used_address
== NULL
) {
1348 start_at_beginning
=1;
1349 stcb
->asoc
.last_used_address
= LIST_FIRST(&inp
->sctp_addr_list
);
1351 /* search beginning with the last used address */
1352 for (laddr
= stcb
->asoc
.last_used_address
; laddr
;
1353 laddr
= LIST_NEXT(laddr
, sctp_nxt_addr
)) {
1354 if (laddr
->ifa
== NULL
) {
1355 /* address has been removed */
1358 sin6
= sctp_is_v6_ifa_addr_acceptable (laddr
->ifa
, loopscope
, loc_scope
, &sin_loop
, &sin_local
);
1361 if ((non_asoc_addr_ok
== 0) && (sctp_is_addr_restricted(stcb
, (struct sockaddr
*)sin6
))) {
1362 /* on the no-no list */
1365 /* is it of matching scope ? */
1366 if ((loopscope
== 0) &&
1370 /* all of global scope we are ok with it */
1373 if (loopscope
&& sin_loop
)
1374 /* both on the loopback, thats ok */
1376 if (loc_scope
&& sin_local
)
1377 /* both local scope */
1381 if (start_at_beginning
== 0) {
1382 stcb
->asoc
.last_used_address
= NULL
;
1383 goto sctp_from_the_top
;
1385 /* now try for any higher scope than the destination */
1386 stcb
->asoc
.last_used_address
= starting_point
;
1387 start_at_beginning
= 0;
1389 if (stcb
->asoc
.last_used_address
== NULL
) {
1390 start_at_beginning
=1;
1391 stcb
->asoc
.last_used_address
= LIST_FIRST(&inp
->sctp_addr_list
);
1393 /* search beginning with the last used address */
1394 for (laddr
= stcb
->asoc
.last_used_address
; laddr
;
1395 laddr
= LIST_NEXT(laddr
, sctp_nxt_addr
)) {
1396 if (laddr
->ifa
== NULL
) {
1397 /* address has been removed */
1400 sin6
= sctp_is_v6_ifa_addr_acceptable (laddr
->ifa
, loopscope
, loc_scope
, &sin_loop
, &sin_local
);
1403 if ((non_asoc_addr_ok
== 0) && (sctp_is_addr_restricted(stcb
, (struct sockaddr
*)sin6
))) {
1404 /* on the no-no list */
1409 if (start_at_beginning
== 0) {
1410 stcb
->asoc
.last_used_address
= NULL
;
1411 goto sctp_from_the_top2
;
1415 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
1416 kprintf("Have a STCB - no asconf allowed, not bound all have a postive list\n");
1419 /* First try for interface output match */
1420 LIST_FOREACH(laddr
, &stcb
->asoc
.sctp_local_addr_list
,
1422 if (laddr
->ifa
== NULL
) {
1423 /* address has been removed */
1426 sin6
= sctp_is_v6_ifa_addr_acceptable (laddr
->ifa
, loopscope
, loc_scope
, &sin_loop
, &sin_local
);
1429 /* first question, is laddr->ifa an address associated with the emit interface */
1431 TAILQ_FOREACH(ifa
, &ifn
->if_addrlist
, ifa_list
) {
1432 if (laddr
->ifa
== ifa
) {
1433 sin6
= (struct sockaddr_in6
*)laddr
->ifa
->ifa_addr
;
1436 if (sctp_cmpaddr(ifa
->ifa_addr
, laddr
->ifa
->ifa_addr
) == 1) {
1437 sin6
= (struct sockaddr_in6
*)laddr
->ifa
->ifa_addr
;
1443 /* Next try for matching scope */
1444 LIST_FOREACH(laddr
, &stcb
->asoc
.sctp_local_addr_list
,
1446 if (laddr
->ifa
== NULL
) {
1447 /* address has been removed */
1450 sin6
= sctp_is_v6_ifa_addr_acceptable (laddr
->ifa
, loopscope
, loc_scope
, &sin_loop
, &sin_local
);
1454 if ((loopscope
== 0) &&
1458 /* all of global scope we are ok with it */
1461 if (loopscope
&& sin_loop
)
1462 /* both on the loopback, thats ok */
1464 if (loc_scope
&& sin_local
)
1465 /* both local scope */
1468 /* ok, now try for a higher scope in the source address */
1469 /* First try for matching scope */
1470 LIST_FOREACH(laddr
, &stcb
->asoc
.sctp_local_addr_list
,
1472 if (laddr
->ifa
== NULL
) {
1473 /* address has been removed */
1476 sin6
= sctp_is_v6_ifa_addr_acceptable (laddr
->ifa
, loopscope
, loc_scope
, &sin_loop
, &sin_local
);
1485 static struct sockaddr_in6
*
1486 sctp_choose_v6_boundspecific_inp(struct sctp_inpcb
*inp
,
1492 * Here we are bound specific and have only
1493 * an inp. We must find an address that is bound
1494 * that we can give out as a src address. We
1495 * prefer two addresses of same scope if we can
1496 * find them that way.
1498 struct sctp_laddr
*laddr
;
1499 struct sockaddr_in6
*sin6
;
1502 int sin_loop
, sin_local
;
1504 /* first question, is the ifn we will emit on
1505 * in our list, if so, we want that one.
1510 TAILQ_FOREACH(ifa
, &ifn
->if_addrlist
, ifa_list
) {
1511 sin6
= sctp_is_v6_ifa_addr_acceptable (ifa
, loopscope
, loc_scope
, &sin_loop
, &sin_local
);
1514 if (sctp_is_addr_in_ep(inp
, ifa
)) {
1519 for (laddr
= LIST_FIRST(&inp
->sctp_addr_list
);
1520 laddr
&& (laddr
!= inp
->next_addr_touse
);
1521 laddr
= LIST_NEXT(laddr
, sctp_nxt_addr
)) {
1522 if (laddr
->ifa
== NULL
) {
1523 /* address has been removed */
1526 sin6
= sctp_is_v6_ifa_addr_acceptable (laddr
->ifa
, loopscope
, loc_scope
, &sin_loop
, &sin_local
);
1530 if ((loopscope
== 0) &&
1534 /* all of global scope we are ok with it */
1537 if (loopscope
&& sin_loop
)
1538 /* both on the loopback, thats ok */
1540 if (loc_scope
&& sin_local
)
1541 /* both local scope */
1545 /* if we reach here, we could not find two addresses
1546 * of the same scope to give out. Lets look for any higher level
1547 * scope for a source address.
1549 for (laddr
= LIST_FIRST(&inp
->sctp_addr_list
);
1550 laddr
&& (laddr
!= inp
->next_addr_touse
);
1551 laddr
= LIST_NEXT(laddr
, sctp_nxt_addr
)) {
1552 if (laddr
->ifa
== NULL
) {
1553 /* address has been removed */
1556 sin6
= sctp_is_v6_ifa_addr_acceptable (laddr
->ifa
, loopscope
, loc_scope
, &sin_loop
, &sin_local
);
1561 /* no address bound can be a source for the destination */
1563 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
1564 kprintf("Src address selection for EP, no acceptable src address found for address\n");
1571 static struct sockaddr_in6
*
1572 sctp_select_v6_nth_addr_from_ifn_boundall (struct ifnet
*ifn
, struct sctp_tcb
*stcb
, int non_asoc_addr_ok
, uint8_t loopscope
,
1573 uint8_t loc_scope
, int cur_addr_num
, int match_scope
)
1576 struct sockaddr_in6
*sin6
;
1577 int sin_loop
, sin_local
;
1578 int num_eligible_addr
= 0;
1580 TAILQ_FOREACH(ifa
, &ifn
->if_addrlist
, ifa_list
) {
1581 sin6
= sctp_is_v6_ifa_addr_acceptable (ifa
, loopscope
, loc_scope
, &sin_loop
, &sin_local
);
1585 if ((non_asoc_addr_ok
== 0) && sctp_is_addr_restricted(stcb
, (struct sockaddr
*)sin6
)) {
1586 /* It is restricted for some reason.. probably
1593 /* Here we are asked to match scope if possible */
1594 if (loopscope
&& sin_loop
)
1595 /* src and destination are loopback scope */
1597 if (loc_scope
&& sin_local
)
1598 /* src and destination are local scope */
1600 if ((loopscope
== 0) &&
1604 /* src and destination are global scope */
1609 if (num_eligible_addr
== cur_addr_num
) {
1613 num_eligible_addr
++;
1620 sctp_count_v6_num_eligible_boundall (struct ifnet
*ifn
, struct sctp_tcb
*stcb
,
1621 int non_asoc_addr_ok
, uint8_t loopscope
, uint8_t loc_scope
)
1624 struct sockaddr_in6
*sin6
;
1625 int num_eligible_addr
= 0;
1626 int sin_loop
, sin_local
;
1628 TAILQ_FOREACH(ifa
, &ifn
->if_addrlist
, ifa_list
) {
1629 sin6
= sctp_is_v6_ifa_addr_acceptable (ifa
, loopscope
, loc_scope
, &sin_loop
, &sin_local
);
1633 if ((non_asoc_addr_ok
== 0) && sctp_is_addr_restricted(stcb
, (struct sockaddr
*)sin6
)) {
1634 /* It is restricted for some reason.. probably
1640 num_eligible_addr
++;
1642 return (num_eligible_addr
);
1646 static struct sockaddr_in6
*
1647 sctp_choose_v6_boundall(struct sctp_inpcb
*inp
,
1648 struct sctp_tcb
*stcb
,
1649 struct sctp_nets
*net
,
1653 int non_asoc_addr_ok
)
1655 /* Ok, we are bound all SO any address
1656 * is ok to use as long as it is NOT in the negative
1659 int num_eligible_addr
;
1661 int started_at_beginning
=0;
1662 int match_scope_prefered
;
1663 /* first question is, how many eligible addresses are
1664 * there for the destination ifn that we are using that
1665 * are within the proper scope?
1668 struct sockaddr_in6
*sin6
;
1672 cur_addr_num
= net
->indx_of_eligible_next_to_use
;
1674 if (cur_addr_num
== 0) {
1675 match_scope_prefered
= 1;
1677 match_scope_prefered
= 0;
1679 num_eligible_addr
= sctp_count_v6_num_eligible_boundall (ifn
, stcb
, non_asoc_addr_ok
, loopscope
, loc_scope
);
1681 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
1682 kprintf("Found %d eligible source addresses\n", num_eligible_addr
);
1685 if (num_eligible_addr
== 0) {
1686 /* no eligible addresses, we must use some other
1687 * interface address if we can find one.
1689 goto bound_all_v6_plan_b
;
1691 /* Ok we have num_eligible_addr set with how many we can use,
1692 * this may vary from call to call due to addresses being deprecated etc..
1694 if (cur_addr_num
>= num_eligible_addr
) {
1697 /* select the nth address from the list (where cur_addr_num is the nth) and
1698 * 0 is the first one, 1 is the second one etc...
1701 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
1702 kprintf("cur_addr_num:%d match_scope_prefered:%d select it\n",
1703 cur_addr_num
, match_scope_prefered
);
1706 sin6
= sctp_select_v6_nth_addr_from_ifn_boundall (ifn
, stcb
, non_asoc_addr_ok
, loopscope
,
1707 loc_scope
, cur_addr_num
, match_scope_prefered
);
1708 if (match_scope_prefered
&& (sin6
== NULL
)) {
1709 /* retry without the preference for matching scope */
1711 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
1712 kprintf("retry with no match_scope_prefered\n");
1715 sin6
= sctp_select_v6_nth_addr_from_ifn_boundall (ifn
, stcb
, non_asoc_addr_ok
, loopscope
,
1716 loc_scope
, cur_addr_num
, 0);
1720 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
1721 kprintf("Selected address %d ifn:%x for the route\n", cur_addr_num
, (u_int
)ifn
);
1725 /* store so we get the next one */
1726 if (cur_addr_num
< 255)
1727 net
->indx_of_eligible_next_to_use
= cur_addr_num
+ 1;
1729 net
->indx_of_eligible_next_to_use
= 0;
1733 num_eligible_addr
= 0;
1734 bound_all_v6_plan_b
:
1735 /* ok, if we reach here we either fell through
1736 * due to something changing during an interupt (unlikely)
1737 * or we have NO eligible source addresses for the ifn
1738 * of the route (most likely). We must look at all the other
1739 * interfaces EXCEPT rt->rt_ifp and do the same game.
1742 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
1743 kprintf("bound-all Plan B\n");
1746 if (inp
->next_ifn_touse
== NULL
) {
1747 started_at_beginning
=1;
1748 inp
->next_ifn_touse
= TAILQ_FIRST(&ifnet
);
1750 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
1751 kprintf("Start at first IFN:%x\n", (u_int
)inp
->next_ifn_touse
);
1755 inp
->next_ifn_touse
= TAILQ_NEXT(inp
->next_ifn_touse
, if_list
);
1757 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
1758 kprintf("Resume at IFN:%x\n", (u_int
)inp
->next_ifn_touse
);
1761 if (inp
->next_ifn_touse
== NULL
) {
1763 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
1764 kprintf("IFN Resets\n");
1767 started_at_beginning
=1;
1768 inp
->next_ifn_touse
= TAILQ_FIRST(&ifnet
);
1771 for (ifn
= inp
->next_ifn_touse
; ifn
;
1772 ifn
= TAILQ_NEXT(ifn
, if_list
)) {
1773 if (loopscope
== 0 && ifn
->if_type
== IFT_LOOP
) {
1774 /* wrong base scope */
1777 if (loc_scope
&& (ifn
->if_index
!= loc_scope
)) {
1778 /* by definition the scope (from to->sin6_scopeid)
1779 * must match that of the interface. If not then
1780 * we could pick a wrong scope for the address.
1781 * Ususally we don't hit plan-b since the route
1782 * handles this. However we can hit plan-b when
1783 * we send to local-host so the route is the
1784 * loopback interface, but the destination is a
1789 if (ifn
== rt
->rt_ifp
) {
1790 /* already looked at this guy */
1793 /* Address rotation will only work when we are not
1794 * rotating sourced interfaces and are using the interface
1795 * of the route. We would need to have a per interface index
1796 * in order to do proper rotation.
1798 num_eligible_addr
= sctp_count_v6_num_eligible_boundall (ifn
, stcb
, non_asoc_addr_ok
, loopscope
, loc_scope
);
1800 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
1801 kprintf("IFN:%x has %d eligible\n", (u_int
)ifn
, num_eligible_addr
);
1804 if (num_eligible_addr
== 0) {
1805 /* none we can use */
1808 /* Ok we have num_eligible_addr set with how many we can use,
1809 * this may vary from call to call due to addresses being deprecated etc..
1811 inp
->next_ifn_touse
= ifn
;
1813 /* select the first one we can find with perference for matching scope.
1815 sin6
= sctp_select_v6_nth_addr_from_ifn_boundall (ifn
, stcb
, non_asoc_addr_ok
, loopscope
, loc_scope
, 0, 1);
1817 /* can't find one with matching scope how about a source with higher
1820 sin6
= sctp_select_v6_nth_addr_from_ifn_boundall (ifn
, stcb
, non_asoc_addr_ok
, loopscope
, loc_scope
, 0, 0);
1822 /* Hmm, can't find one in the interface now */
1826 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
1827 kprintf("Selected the %d'th address of ifn:%x\n",
1834 if (started_at_beginning
== 0) {
1835 /* we have not been through all of them yet, force
1836 * us to go through them all.
1839 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
1840 kprintf("Force a recycle\n");
1843 inp
->next_ifn_touse
= NULL
;
1844 goto bound_all_v6_plan_b
;
1850 /* stcb and net may be NULL */
1852 sctp_ipv6_source_address_selection(struct sctp_inpcb
*inp
,
1853 struct sctp_tcb
*stcb
, struct route
*ro
, struct sctp_nets
*net
,
1854 int non_asoc_addr_ok
)
1856 struct in6_addr ans
;
1857 struct sockaddr_in6
*rt_addr
;
1858 uint8_t loc_scope
, loopscope
;
1859 struct sockaddr_in6
*to
= (struct sockaddr_in6
*)&ro
->ro_dst
;
1862 * This routine is tricky standard v6 src address
1863 * selection cannot take into account what we have
1864 * bound etc, so we can't use it.
1866 * Instead here is what we must do:
1867 * 1) Make sure we have a route, if we
1868 * don't have a route we can never reach the peer.
1869 * 2) Once we have a route, determine the scope of the
1870 * route. Link local, loopback or global.
1871 * 3) Next we divide into three types. Either we
1872 * are bound all.. which means we want to use
1873 * one of the addresses of the interface we are
1875 * 4a) We have not stcb, which means we are using the
1876 * specific addresses bound on an inp, in this
1877 * case we are similar to the stcb case (4b below)
1878 * accept the list is always a positive list.<or>
1879 * 4b) We are bound specific with a stcb, which means we have a
1880 * list of bound addresses and we must see if the
1881 * ifn of the route is actually one of the bound addresses.
1882 * If not, then we must rotate addresses amongst properly
1883 * scoped bound addresses, if so we use the address
1885 * 5) Always, no matter which path we take through the above
1886 * we must be sure the source address we use is allowed to
1887 * be used. I.e. IN6_IFF_DETACHED, IN6_IFF_NOTREADY, and IN6_IFF_ANYCAST
1888 * addresses cannot be used.
1889 * 6) Addresses that are deprecated MAY be used
1890 * if (!ip6_use_deprecated) {
1891 * if (IFA6_IS_DEPRECATED(ifa6)) {
1897 /*** 1> determine route, if not already done */
1898 if (ro
->ro_rt
== NULL
) {
1900 * Need a route to cache.
1902 #ifndef SCOPEDROUTING
1904 scope_save
= to
->sin6_scope_id
;
1905 to
->sin6_scope_id
= 0;
1908 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
1909 rtalloc_ign(ro
, 0UL);
1913 #ifndef SCOPEDROUTING
1914 to
->sin6_scope_id
= scope_save
;
1917 if (ro
->ro_rt
== NULL
) {
1919 * no route to host. this packet is going no-where.
1920 * We probably should make sure we arrange to send back
1924 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
1925 kprintf("No route to host, this packet cannot be sent!\n");
1928 memset(&ans
, 0, sizeof(ans
));
1932 /*** 2a> determine scope for outbound address/route */
1933 loc_scope
= loopscope
= 0;
1935 * We base our scope on the outbound packet scope and route,
1936 * NOT the TCB (if there is one). This way in local scope we will only
1937 * use a local scope src address when we send to a local address.
1940 if (IN6_IS_ADDR_LOOPBACK(&to
->sin6_addr
)) {
1941 /* If the route goes to the loopback address OR
1942 * the address is a loopback address, we are loopback
1946 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
1947 kprintf("Loopback scope is set\n");
1953 /* mark it as local */
1954 net
->addr_is_local
= 1;
1957 } else if (IN6_IS_ADDR_LINKLOCAL(&to
->sin6_addr
)) {
1959 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
1960 kprintf("Link local scope is set, id:%d\n", to
->sin6_scope_id
);
1963 if (to
->sin6_scope_id
)
1964 loc_scope
= to
->sin6_scope_id
;
1971 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
1972 kprintf("Global scope is set\n");
1977 /* now, depending on which way we are bound we call the appropriate
1978 * routine to do steps 3-6
1981 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
1982 kprintf("Destination address:");
1983 sctp_print_address((struct sockaddr
*)to
);
1987 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_BOUNDALL
) {
1989 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
1990 kprintf("Calling bound-all src addr selection for v6\n");
1993 rt_addr
= sctp_choose_v6_boundall(inp
, stcb
, net
, ro
->ro_rt
, loc_scope
, loopscope
, non_asoc_addr_ok
);
1996 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
1997 kprintf("Calling bound-specific src addr selection for v6\n");
2001 rt_addr
= sctp_choose_v6_boundspecific_stcb(inp
, stcb
, net
, ro
->ro_rt
, loc_scope
, loopscope
, non_asoc_addr_ok
);
2003 /* we can't have a non-asoc address since we have no association */
2004 rt_addr
= sctp_choose_v6_boundspecific_inp(inp
, ro
->ro_rt
, loc_scope
, loopscope
);
2006 if (rt_addr
== NULL
) {
2007 /* no suitable address? */
2008 struct in6_addr in6
;
2010 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
2011 kprintf("V6 packet will reach dead-end no suitable src address\n");
2014 memset(&in6
, 0, sizeof(in6
));
2018 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
2019 kprintf("Source address selected is:");
2020 sctp_print_address((struct sockaddr
*)rt_addr
);
2023 return (rt_addr
->sin6_addr
);
2027 sctp_get_ect(struct sctp_tcb
*stcb
,
2028 struct sctp_tmit_chunk
*chk
)
2030 uint8_t this_random
;
2036 if (sctp_ecn_nonce
== 0)
2037 /* no nonce, always return ECT0 */
2038 return (SCTP_ECT0_BIT
);
2040 if (stcb
->asoc
.peer_supports_ecn_nonce
== 0) {
2041 /* Peer does NOT support it, so we send a ECT0 only */
2042 return (SCTP_ECT0_BIT
);
2046 return (SCTP_ECT0_BIT
);
2048 if (((stcb
->asoc
.hb_random_idx
== 3) &&
2049 (stcb
->asoc
.hb_ect_randombit
> 7)) ||
2050 (stcb
->asoc
.hb_random_idx
> 3)) {
2052 rndval
= sctp_select_initial_TSN(&stcb
->sctp_ep
->sctp_ep
);
2053 memcpy(stcb
->asoc
.hb_random_values
, &rndval
,
2054 sizeof(stcb
->asoc
.hb_random_values
));
2055 this_random
= stcb
->asoc
.hb_random_values
[0];
2056 stcb
->asoc
.hb_random_idx
= 0;
2057 stcb
->asoc
.hb_ect_randombit
= 0;
2059 if (stcb
->asoc
.hb_ect_randombit
> 7) {
2060 stcb
->asoc
.hb_ect_randombit
= 0;
2061 stcb
->asoc
.hb_random_idx
++;
2063 this_random
= stcb
->asoc
.hb_random_values
[stcb
->asoc
.hb_random_idx
];
2065 if ((this_random
>> stcb
->asoc
.hb_ect_randombit
) & 0x01) {
2067 /* ECN Nonce stuff */
2068 chk
->rec
.data
.ect_nonce
= SCTP_ECT1_BIT
;
2069 stcb
->asoc
.hb_ect_randombit
++;
2070 return (SCTP_ECT1_BIT
);
2072 stcb
->asoc
.hb_ect_randombit
++;
2073 return (SCTP_ECT0_BIT
);
2077 extern int sctp_no_csum_on_loopback
;
2080 sctp_lowlevel_chunk_output(struct sctp_inpcb
*inp
,
2081 struct sctp_tcb
*stcb
, /* may be NULL */
2082 struct sctp_nets
*net
,
2083 struct sockaddr
*to
,
2085 int nofragment_flag
,
2087 struct sctp_tmit_chunk
*chk
,
2089 /* nofragment_flag to tell if IP_DF should be set (IPv4 only) */
2092 * Given a mbuf chain (via m_next) that holds a packet header
2093 * WITH a SCTPHDR but no IP header, endpoint inp and sa structure.
2094 * - calculate SCTP checksum and fill in
2095 * - prepend a IP address header
2096 * - if boundall use INADDR_ANY
2097 * - if boundspecific do source address selection
2098 * - set fragmentation option for ipV4
2099 * - On return from IP output, check/adjust mtu size
2100 * - of output interface and smallest_mtu size as well.
2102 struct sctphdr
*sctphdr
;
2106 unsigned int have_mtu
;
2109 if ((net
) && (net
->dest_state
& SCTP_ADDR_OUT_OF_SCOPE
)) {
2113 if ((m
->m_flags
& M_PKTHDR
) == 0) {
2115 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
2116 kprintf("Software error: sctp_lowlevel_chunk_output() called with non pkthdr!\n");
2122 /* Calculate the csum and fill in the length of the packet */
2123 sctphdr
= mtod(m
, struct sctphdr
*);
2125 if (sctp_no_csum_on_loopback
&&
2127 (stcb
->asoc
.loopback_scope
)) {
2128 sctphdr
->checksum
= 0;
2129 m
->m_pkthdr
.len
= sctp_calculate_len(m
);
2131 sctphdr
->checksum
= 0;
2132 csum
= sctp_calculate_sum(m
, &m
->m_pkthdr
.len
, 0);
2133 sctphdr
->checksum
= csum
;
2135 if (to
->sa_family
== AF_INET
) {
2137 struct route iproute
;
2138 M_PREPEND(m
, sizeof(struct ip
), MB_DONTWAIT
);
2140 /* failed to prepend data, give up */
2143 ip
= mtod(m
, struct ip
*);
2144 ip
->ip_v
= IPVERSION
;
2145 ip
->ip_hl
= (sizeof(struct ip
) >> 2);
2146 if (nofragment_flag
) {
2147 #if defined(WITH_CONVERT_IP_OFF) || defined(__FreeBSD__) || defined(__DragonFly__)
2148 #if defined( __OpenBSD__) || defined(__NetBSD__)
2149 /* OpenBSD has WITH_CONVERT_IP_OFF defined?? */
2150 ip
->ip_off
= htons(IP_DF
);
2155 ip
->ip_off
= htons(IP_DF
);
2160 /* FreeBSD and Apple have RANDOM_IP_ID switch */
2161 #if defined(RANDOM_IP_ID) || defined(__NetBSD__) || defined(__OpenBSD__)
2162 ip
->ip_id
= htons(ip_randomid());
2164 ip
->ip_id
= htons(ip_id
++);
2167 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
2168 ip
->ip_ttl
= inp
->ip_inp
.inp
.inp_ip_ttl
;
2170 ip
->ip_ttl
= inp
->inp_ip_ttl
;
2172 #if defined(__OpenBSD__) || defined(__NetBSD__)
2173 ip
->ip_len
= htons(m
->m_pkthdr
.len
);
2175 ip
->ip_len
= m
->m_pkthdr
.len
;
2178 if ((stcb
->asoc
.ecn_allowed
) && ecn_ok
) {
2180 #if defined(__FreeBSD__) || defined (__APPLE__) || defined(__DragonFly__)
2181 ip
->ip_tos
= (u_char
)((inp
->ip_inp
.inp
.inp_ip_tos
& 0x000000fc) |
2182 sctp_get_ect(stcb
, chk
));
2183 #elif defined(__NetBSD__)
2184 ip
->ip_tos
= (u_char
)((inp
->ip_inp
.inp
.inp_ip
.ip_tos
& 0x000000fc) |
2185 sctp_get_ect(stcb
, chk
));
2187 ip
->ip_tos
= (u_char
)((inp
->inp_ip_tos
& 0x000000fc) |
2188 sctp_get_ect(stcb
, chk
));
2192 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
2193 ip
->ip_tos
= inp
->ip_inp
.inp
.inp_ip_tos
;
2194 #elif defined(__NetBSD__)
2195 ip
->ip_tos
= inp
->ip_inp
.inp
.inp_ip
.ip_tos
;
2197 ip
->ip_tos
= inp
->inp_ip_tos
;
2201 /* no association at all */
2202 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
2203 ip
->ip_tos
= inp
->ip_inp
.inp
.inp_ip_tos
;
2205 ip
->ip_tos
= inp
->inp_ip_tos
;
2208 ip
->ip_p
= IPPROTO_SCTP
;
2212 memset(&iproute
, 0, sizeof(iproute
));
2213 memcpy(&ro
->ro_dst
, to
, to
->sa_len
);
2215 ro
= (struct route
*)&net
->ro
;
2217 /* Now the address selection part */
2218 ip
->ip_dst
.s_addr
= ((struct sockaddr_in
*)to
)->sin_addr
.s_addr
;
2220 /* call the routine to select the src address */
2222 if (net
->src_addr_selected
== 0) {
2223 /* Cache the source address */
2224 ((struct sockaddr_in
*)&net
->ro
._s_addr
)->sin_addr
= sctp_ipv4_source_address_selection(inp
,
2226 ro
, net
, out_of_asoc_ok
);
2228 net
->src_addr_selected
= 1;
2230 ip
->ip_src
= ((struct sockaddr_in
*)&net
->ro
._s_addr
)->sin_addr
;
2232 ip
->ip_src
= sctp_ipv4_source_address_selection(inp
,
2233 stcb
, ro
, net
, out_of_asoc_ok
);
2236 * If source address selection fails and we find no route then
2237 * the ip_ouput should fail as well with a NO_ROUTE_TO_HOST
2238 * type error. We probably should catch that somewhere and
2239 * abort the association right away (assuming this is an INIT
2242 if ((ro
->ro_rt
== NULL
)) {
2244 * src addr selection failed to find a route (or valid
2245 * source addr), so we can't get there from here!
2248 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
2249 kprintf("low_level_output: dropped v4 packet- no valid source addr\n");
2250 kprintf("Destination was %x\n", (u_int
)(ntohl(ip
->ip_dst
.s_addr
)));
2252 #endif /* SCTP_DEBUG */
2254 if ((net
->dest_state
& SCTP_ADDR_REACHABLE
) && stcb
)
2255 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN
,
2257 SCTP_FAILED_THRESHOLD
,
2259 net
->dest_state
&= ~SCTP_ADDR_REACHABLE
;
2260 net
->dest_state
|= SCTP_ADDR_NOT_REACHABLE
;
2262 if (net
== stcb
->asoc
.primary_destination
) {
2263 /* need a new primary */
2264 struct sctp_nets
*alt
;
2265 alt
= sctp_find_alternate_net(stcb
, net
);
2267 if (sctp_set_primary_addr(stcb
,
2268 (struct sockaddr
*)NULL
,
2270 net
->dest_state
|= SCTP_ADDR_WAS_PRIMARY
;
2271 net
->src_addr_selected
= 0;
2278 return (EHOSTUNREACH
);
2280 have_mtu
= ro
->ro_rt
->rt_ifp
->if_mtu
;
2283 o_flgs
= (IP_RAWOUTPUT
| (inp
->sctp_socket
->so_options
& (SO_DONTROUTE
| SO_BROADCAST
)));
2285 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
2286 kprintf("Calling ipv4 output routine from low level src addr:%x\n",
2287 (u_int
)(ntohl(ip
->ip_src
.s_addr
)));
2288 kprintf("Destination is %x\n", (u_int
)(ntohl(ip
->ip_dst
.s_addr
)));
2289 kprintf("RTP route is %p through\n", ro
->ro_rt
);
2292 if ((have_mtu
) && (net
) && (have_mtu
> net
->mtu
)) {
2293 ro
->ro_rt
->rt_ifp
->if_mtu
= net
->mtu
;
2295 ret
= ip_output(m
, inp
->ip_inp
.inp
.inp_options
,
2296 ro
, o_flgs
, inp
->ip_inp
.inp
.inp_moptions
2297 #if defined(__OpenBSD__) || (defined(__FreeBSD__) && __FreeBSD_version >= 480000) \
2298 || defined(__DragonFly__)
2299 , (struct inpcb
*)NULL
2301 #if defined(__NetBSD__)
2302 ,(struct socket
*)inp
->sctp_socket
2306 if ((ro
->ro_rt
) && (have_mtu
) && (net
) && (have_mtu
> net
->mtu
)) {
2307 ro
->ro_rt
->rt_ifp
->if_mtu
= have_mtu
;
2309 sctp_pegs
[SCTP_DATAGRAMS_SENT
]++;
2311 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
2312 kprintf("Ip output returns %d\n", ret
);
2316 /* free tempy routes */
2320 /* PMTU check versus smallest asoc MTU goes here */
2321 if (ro
->ro_rt
!= NULL
) {
2322 if (ro
->ro_rt
->rt_rmx
.rmx_mtu
&&
2323 (stcb
->asoc
.smallest_mtu
> ro
->ro_rt
->rt_rmx
.rmx_mtu
)) {
2324 sctp_mtu_size_reset(inp
, &stcb
->asoc
,
2325 ro
->ro_rt
->rt_rmx
.rmx_mtu
);
2328 /* route was freed */
2329 net
->src_addr_selected
= 0;
2335 else if (to
->sa_family
== AF_INET6
) {
2336 struct ip6_hdr
*ip6h
;
2337 #ifdef NEW_STRUCT_ROUTE
2338 struct route ip6route
;
2340 struct route_in6 ip6route
;
2344 uint16_t flowBottom
;
2345 u_char tosBottom
, tosTop
;
2346 struct sockaddr_in6
*sin6
, tmp
, *lsa6
, lsa6_tmp
;
2347 struct sockaddr_in6 lsa6_storage
;
2350 u_short prev_port
=0;
2352 M_PREPEND(m
, sizeof(struct ip6_hdr
), MB_DONTWAIT
);
2354 /* failed to prepend data, give up */
2357 ip6h
= mtod(m
, struct ip6_hdr
*);
2360 * We assume here that inp_flow is in host byte order within
2363 flowBottom
= ((struct in6pcb
*)inp
)->in6p_flowinfo
& 0x0000ffff;
2364 flowTop
= ((((struct in6pcb
*)inp
)->in6p_flowinfo
& 0x000f0000) >> 16);
2366 tosTop
= (((((struct in6pcb
*)inp
)->in6p_flowinfo
& 0xf0) >> 4) | IPV6_VERSION
);
2368 /* protect *sin6 from overwrite */
2369 sin6
= (struct sockaddr_in6
*)to
;
2373 /* KAME hack: embed scopeid */
2374 #if defined(SCTP_BASE_FREEBSD) || defined(__APPLE__) || defined(__DragonFly__)
2375 if (in6_embedscope(&sin6
->sin6_addr
, sin6
, NULL
, NULL
) != 0)
2377 if (in6_embedscope(&sin6
->sin6_addr
, sin6
) != 0)
2381 memset(&ip6route
, 0, sizeof(ip6route
));
2382 ro
= (struct route
*)&ip6route
;
2383 memcpy(&ro
->ro_dst
, sin6
, sin6
->sin6_len
);
2385 ro
= (struct route
*)&net
->ro
;
2388 if ((stcb
->asoc
.ecn_allowed
) && ecn_ok
) {
2390 tosBottom
= (((((struct in6pcb
*)inp
)->in6p_flowinfo
& 0x0c) | sctp_get_ect(stcb
, chk
)) << 4);
2393 tosBottom
= ((((struct in6pcb
*)inp
)->in6p_flowinfo
& 0x0c) << 4);
2396 /* we could get no asoc if it is a O-O-T-B packet */
2397 tosBottom
= ((((struct in6pcb
*)inp
)->in6p_flowinfo
& 0x0c) << 4);
2399 ip6h
->ip6_flow
= htonl(((tosTop
<< 24) | ((tosBottom
|flowTop
) << 16) | flowBottom
));
2400 ip6h
->ip6_nxt
= IPPROTO_SCTP
;
2401 ip6h
->ip6_plen
= m
->m_pkthdr
.len
;
2402 ip6h
->ip6_dst
= sin6
->sin6_addr
;
2405 * Add SRC address selection here:
2406 * we can only reuse to a limited degree the kame src-addr-sel,
2407 * since we can try their selection but it may not be bound.
2409 bzero(&lsa6_tmp
, sizeof(lsa6_tmp
));
2410 lsa6_tmp
.sin6_family
= AF_INET6
;
2411 lsa6_tmp
.sin6_len
= sizeof(lsa6_tmp
);
2414 if (net
->src_addr_selected
== 0) {
2415 /* Cache the source address */
2416 ((struct sockaddr_in6
*)&net
->ro
._s_addr
)->sin6_addr
= sctp_ipv6_source_address_selection(inp
,
2417 stcb
, ro
, net
, out_of_asoc_ok
);
2420 net
->src_addr_selected
= 1;
2422 lsa6
->sin6_addr
= ((struct sockaddr_in6
*)&net
->ro
._s_addr
)->sin6_addr
;
2424 lsa6
->sin6_addr
= sctp_ipv6_source_address_selection(
2425 inp
, stcb
, ro
, net
, out_of_asoc_ok
);
2427 lsa6
->sin6_port
= inp
->sctp_lport
;
2429 if ((ro
->ro_rt
== NULL
)) {
2431 * src addr selection failed to find a route (or valid
2432 * source addr), so we can't get there from here!
2435 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
2436 kprintf("low_level_output: dropped v6 pkt- no valid source addr\n");
2441 if ((net
->dest_state
& SCTP_ADDR_REACHABLE
) && stcb
)
2442 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN
,
2444 SCTP_FAILED_THRESHOLD
,
2446 net
->dest_state
&= ~SCTP_ADDR_REACHABLE
;
2447 net
->dest_state
|= SCTP_ADDR_NOT_REACHABLE
;
2449 if (net
== stcb
->asoc
.primary_destination
) {
2450 /* need a new primary */
2451 struct sctp_nets
*alt
;
2452 alt
= sctp_find_alternate_net(stcb
, net
);
2454 if (sctp_set_primary_addr(stcb
,
2455 (struct sockaddr
*)NULL
,
2457 net
->dest_state
|= SCTP_ADDR_WAS_PRIMARY
;
2458 net
->src_addr_selected
= 0;
2464 return (EHOSTUNREACH
);
2467 #ifndef SCOPEDROUTING
2469 * XXX: sa6 may not have a valid sin6_scope_id in
2470 * the non-SCOPEDROUTING case.
2472 bzero(&lsa6_storage
, sizeof(lsa6_storage
));
2473 lsa6_storage
.sin6_family
= AF_INET6
;
2474 lsa6_storage
.sin6_len
= sizeof(lsa6_storage
);
2475 if ((error
= in6_recoverscope(&lsa6_storage
, &lsa6
->sin6_addr
,
2481 lsa6_storage
.sin6_addr
= lsa6
->sin6_addr
;
2482 lsa6_storage
.sin6_port
= inp
->sctp_lport
;
2483 lsa6
= &lsa6_storage
;
2484 #endif /* SCOPEDROUTING */
2485 ip6h
->ip6_src
= lsa6
->sin6_addr
;
2488 * We set the hop limit now since there is a good chance that
2489 * our ro pointer is now filled
2491 ip6h
->ip6_hlim
= in6_selecthlim((struct in6pcb
*)&inp
->ip_inp
.inp
,
2493 (ro
->ro_rt
? (ro
->ro_rt
->rt_ifp
) : (NULL
)) :
2496 ifp
= ro
->ro_rt
->rt_ifp
;
2498 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
2499 /* Copy to be sure something bad is not happening */
2500 sin6
->sin6_addr
= ip6h
->ip6_dst
;
2501 lsa6
->sin6_addr
= ip6h
->ip6_src
;
2503 kprintf("Calling ipv6 output routine from low level\n");
2505 sctp_print_address((struct sockaddr
*)lsa6
);
2507 sctp_print_address((struct sockaddr
*)sin6
);
2509 #endif /* SCTP_DEBUG */
2511 sin6
= (struct sockaddr_in6
*)&net
->ro
._l_addr
;
2512 /* preserve the port and scope for link local send */
2513 prev_scope
= sin6
->sin6_scope_id
;
2514 prev_port
= sin6
->sin6_port
;
2516 ret
= ip6_output(m
, ((struct in6pcb
*)inp
)->in6p_outputopts
,
2517 #ifdef NEW_STRUCT_ROUTE
2520 (struct route_in6
*)ro
,
2523 ((struct in6pcb
*)inp
)->in6p_moptions
,
2524 #if defined(__NetBSD__)
2525 (struct socket
*)inp
->sctp_socket
,
2528 #if (defined(__FreeBSD__) && __FreeBSD_version >= 480000) || defined(__DragonFly__)
2533 /* for link local this must be done */
2534 sin6
->sin6_scope_id
= prev_scope
;
2535 sin6
->sin6_port
= prev_port
;
2538 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
2539 kprintf("return from send is %d\n", ret
);
2541 #endif /* SCTP_DEBUG_OUTPUT */
2542 sctp_pegs
[SCTP_DATAGRAMS_SENT
]++;
2544 /* Now if we had a temp route free it */
2549 /* PMTU check versus smallest asoc MTU goes here */
2550 if (ro
->ro_rt
== NULL
) {
2551 /* Route was freed */
2552 net
->src_addr_selected
= 0;
2554 if (ro
->ro_rt
!= NULL
) {
2555 if (ro
->ro_rt
->rt_rmx
.rmx_mtu
&&
2556 (stcb
->asoc
.smallest_mtu
> ro
->ro_rt
->rt_rmx
.rmx_mtu
)) {
2557 sctp_mtu_size_reset(inp
,
2559 ro
->ro_rt
->rt_rmx
.rmx_mtu
);
2562 #if (defined(SCTP_BASE_FREEBSD) && __FreeBSD_version < 500000) || defined(__APPLE__)
2563 #define ND_IFINFO(ifp) (&nd_ifinfo[ifp->if_index])
2564 #endif /* SCTP_BASE_FREEBSD */
2565 if (ND_IFINFO(ifp
)->linkmtu
&&
2566 (stcb
->asoc
.smallest_mtu
> ND_IFINFO(ifp
)->linkmtu
)) {
2567 sctp_mtu_size_reset(inp
,
2569 ND_IFINFO(ifp
)->linkmtu
);
2578 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
2579 kprintf("Unknown protocol (TSNH) type %d\n", ((struct sockaddr
*)to
)->sa_family
);
2588 sctp_is_address_in_scope(struct ifaddr
*ifa
,
2589 int ipv4_addr_legal
,
2590 int ipv6_addr_legal
,
2592 int ipv4_local_scope
,
2596 if ((loopback_scope
== 0) &&
2598 (ifa
->ifa_ifp
->if_type
== IFT_LOOP
)) {
2599 /* skip loopback if not in scope *
2603 if ((ifa
->ifa_addr
->sa_family
== AF_INET
) && ipv4_addr_legal
) {
2604 struct sockaddr_in
*sin
;
2605 sin
= (struct sockaddr_in
*)ifa
->ifa_addr
;
2606 if (sin
->sin_addr
.s_addr
== 0) {
2607 /* not in scope , unspecified */
2610 if ((ipv4_local_scope
== 0) &&
2611 (IN4_ISPRIVATE_ADDRESS(&sin
->sin_addr
))) {
2612 /* private address not in scope */
2615 } else if ((ifa
->ifa_addr
->sa_family
== AF_INET6
) && ipv6_addr_legal
) {
2616 struct sockaddr_in6
*sin6
;
2617 struct in6_ifaddr
*ifa6
;
2619 ifa6
= (struct in6_ifaddr
*)ifa
;
2620 /* ok to use deprecated addresses? */
2621 if (!ip6_use_deprecated
) {
2622 if (ifa6
->ia6_flags
&
2623 IN6_IFF_DEPRECATED
) {
2627 if (ifa6
->ia6_flags
&
2630 IN6_IFF_NOTREADY
)) {
2633 sin6
= (struct sockaddr_in6
*)ifa
->ifa_addr
;
2634 if (IN6_IS_ADDR_UNSPECIFIED(&sin6
->sin6_addr
)) {
2635 /* skip unspecifed addresses */
2638 if (/*(local_scope == 0) && */
2639 (IN6_IS_ADDR_LINKLOCAL(&sin6
->sin6_addr
))) {
2642 if ((site_scope
== 0) &&
2643 (IN6_IS_ADDR_SITELOCAL(&sin6
->sin6_addr
))) {
2654 sctp_send_initiate(struct sctp_inpcb
*inp
, struct sctp_tcb
*stcb
)
2656 struct mbuf
*m
, *m_at
, *m_last
;
2657 struct sctp_nets
*net
;
2658 struct sctp_init_msg
*initm
;
2659 struct sctp_supported_addr_param
*sup_addr
;
2660 struct sctp_ecn_supported_param
*ecn
;
2661 struct sctp_prsctp_supported_param
*prsctp
;
2662 struct sctp_ecn_nonce_supported_param
*ecn_nonce
;
2663 struct sctp_supported_chunk_types_param
*pr_supported
;
2667 /* INIT's always go to the primary (and usually ONLY address) */
2669 net
= stcb
->asoc
.primary_destination
;
2671 net
= TAILQ_FIRST(&stcb
->asoc
.nets
);
2676 /* we confirm any address we send an INIT to */
2677 net
->dest_state
&= ~SCTP_ADDR_UNCONFIRMED
;
2678 sctp_set_primary_addr(stcb
, NULL
, net
);
2680 /* we confirm any address we send an INIT to */
2681 net
->dest_state
&= ~SCTP_ADDR_UNCONFIRMED
;
2684 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT4
) {
2685 kprintf("Sending INIT to ");
2686 sctp_print_address ((struct sockaddr
*)&net
->ro
._l_addr
);
2689 if (((struct sockaddr
*)&(net
->ro
._l_addr
))->sa_family
== AF_INET6
) {
2690 /* special hook, if we are sending to link local
2691 * it will not show up in our private address count.
2693 struct sockaddr_in6
*sin6l
;
2694 sin6l
= &net
->ro
._l_addr
.sin6
;
2695 if (IN6_IS_ADDR_LINKLOCAL(&sin6l
->sin6_addr
))
2698 if (callout_pending(&net
->rxt_timer
.timer
)) {
2699 /* This case should not happen */
2702 /* start the INIT timer */
2703 if (sctp_timer_start(SCTP_TIMER_TYPE_INIT
, inp
, stcb
, net
)) {
2704 /* we are hosed since I can't start the INIT timer? */
2707 MGETHDR(m
, MB_DONTWAIT
, MT_HEADER
);
2709 /* No memory, INIT timer will re-attempt. */
2712 /* make it into a M_EXT */
2713 MCLGET(m
, MB_DONTWAIT
);
2714 if ((m
->m_flags
& M_EXT
) != M_EXT
) {
2715 /* Failed to get cluster buffer */
2719 m
->m_data
+= SCTP_MIN_OVERHEAD
;
2720 m
->m_len
= sizeof(struct sctp_init_msg
);
2721 /* Now lets put the SCTP header in place */
2722 initm
= mtod(m
, struct sctp_init_msg
*);
2723 initm
->sh
.src_port
= inp
->sctp_lport
;
2724 initm
->sh
.dest_port
= stcb
->rport
;
2725 initm
->sh
.v_tag
= 0;
2726 initm
->sh
.checksum
= 0; /* calculate later */
2727 /* now the chunk header */
2728 initm
->msg
.ch
.chunk_type
= SCTP_INITIATION
;
2729 initm
->msg
.ch
.chunk_flags
= 0;
2730 /* fill in later from mbuf we build */
2731 initm
->msg
.ch
.chunk_length
= 0;
2732 /* place in my tag */
2733 initm
->msg
.init
.initiate_tag
= htonl(stcb
->asoc
.my_vtag
);
2734 /* set up some of the credits. */
2735 initm
->msg
.init
.a_rwnd
= htonl(max(inp
->sctp_socket
->so_rcv
.ssb_hiwat
,
2736 SCTP_MINIMAL_RWND
));
2738 initm
->msg
.init
.num_outbound_streams
= htons(stcb
->asoc
.pre_open_streams
);
2739 initm
->msg
.init
.num_inbound_streams
= htons(stcb
->asoc
.max_inbound_streams
);
2740 initm
->msg
.init
.initial_tsn
= htonl(stcb
->asoc
.init_seq_number
);
2741 /* now the address restriction */
2742 sup_addr
= (struct sctp_supported_addr_param
*)((caddr_t
)initm
+
2744 sup_addr
->ph
.param_type
= htons(SCTP_SUPPORTED_ADDRTYPE
);
2745 /* we support 2 types IPv6/IPv4 */
2746 sup_addr
->ph
.param_length
= htons(sizeof(*sup_addr
) +
2748 sup_addr
->addr_type
[0] = htons(SCTP_IPV4_ADDRESS
);
2749 sup_addr
->addr_type
[1] = htons(SCTP_IPV6_ADDRESS
);
2750 m
->m_len
+= sizeof(*sup_addr
) + sizeof(uint16_t);
2752 /* if (inp->sctp_flags & SCTP_PCB_FLAGS_ADAPTIONEVNT) {*/
2753 if (inp
->sctp_ep
.adaption_layer_indicator
) {
2754 struct sctp_adaption_layer_indication
*ali
;
2755 ali
= (struct sctp_adaption_layer_indication
*)(
2756 (caddr_t
)sup_addr
+ sizeof(*sup_addr
) + sizeof(uint16_t));
2757 ali
->ph
.param_type
= htons(SCTP_ULP_ADAPTION
);
2758 ali
->ph
.param_length
= htons(sizeof(*ali
));
2759 ali
->indication
= ntohl(inp
->sctp_ep
.adaption_layer_indicator
);
2760 m
->m_len
+= sizeof(*ali
);
2761 ecn
= (struct sctp_ecn_supported_param
*)((caddr_t
)ali
+
2764 ecn
= (struct sctp_ecn_supported_param
*)((caddr_t
)sup_addr
+
2765 sizeof(*sup_addr
) + sizeof(uint16_t));
2768 /* now any cookie time extensions */
2769 if (stcb
->asoc
.cookie_preserve_req
) {
2770 struct sctp_cookie_perserve_param
*cookie_preserve
;
2771 cookie_preserve
= (struct sctp_cookie_perserve_param
*)(ecn
);
2772 cookie_preserve
->ph
.param_type
= htons(SCTP_COOKIE_PRESERVE
);
2773 cookie_preserve
->ph
.param_length
= htons(
2774 sizeof(*cookie_preserve
));
2775 cookie_preserve
->time
= htonl(stcb
->asoc
.cookie_preserve_req
);
2776 m
->m_len
+= sizeof(*cookie_preserve
);
2777 ecn
= (struct sctp_ecn_supported_param
*)(
2778 (caddr_t
)cookie_preserve
+ sizeof(*cookie_preserve
));
2779 stcb
->asoc
.cookie_preserve_req
= 0;
2783 if (sctp_ecn
== 1) {
2784 ecn
->ph
.param_type
= htons(SCTP_ECN_CAPABLE
);
2785 ecn
->ph
.param_length
= htons(sizeof(*ecn
));
2786 m
->m_len
+= sizeof(*ecn
);
2787 prsctp
= (struct sctp_prsctp_supported_param
*)((caddr_t
)ecn
+
2790 prsctp
= (struct sctp_prsctp_supported_param
*)((caddr_t
)ecn
);
2792 /* And now tell the peer we do pr-sctp */
2793 prsctp
->ph
.param_type
= htons(SCTP_PRSCTP_SUPPORTED
);
2794 prsctp
->ph
.param_length
= htons(sizeof(*prsctp
));
2795 m
->m_len
+= sizeof(*prsctp
);
2798 /* And now tell the peer we do all the extensions */
2799 pr_supported
= (struct sctp_supported_chunk_types_param
*)((caddr_t
)prsctp
+
2802 pr_supported
->ph
.param_type
= htons(SCTP_SUPPORTED_CHUNK_EXT
);
2803 pr_supported
->ph
.param_length
= htons(sizeof(*pr_supported
) + SCTP_EXT_COUNT
);
2804 pr_supported
->chunk_types
[0] = SCTP_ASCONF
;
2805 pr_supported
->chunk_types
[1] = SCTP_ASCONF_ACK
;
2806 pr_supported
->chunk_types
[2] = SCTP_FORWARD_CUM_TSN
;
2807 pr_supported
->chunk_types
[3] = SCTP_PACKET_DROPPED
;
2808 pr_supported
->chunk_types
[4] = SCTP_STREAM_RESET
;
2809 pr_supported
->chunk_types
[5] = 0; /* pad */
2810 pr_supported
->chunk_types
[6] = 0; /* pad */
2811 pr_supported
->chunk_types
[7] = 0; /* pad */
2813 m
->m_len
+= (sizeof(*pr_supported
) + SCTP_EXT_COUNT
+ SCTP_PAD_EXT_COUNT
);
2814 /* ECN nonce: And now tell the peer we support ECN nonce */
2816 if (sctp_ecn_nonce
) {
2817 ecn_nonce
= (struct sctp_ecn_nonce_supported_param
*)((caddr_t
)pr_supported
+
2818 sizeof(*pr_supported
) + SCTP_EXT_COUNT
+ SCTP_PAD_EXT_COUNT
);
2819 ecn_nonce
->ph
.param_type
= htons(SCTP_ECN_NONCE_SUPPORTED
);
2820 ecn_nonce
->ph
.param_length
= htons(sizeof(*ecn_nonce
));
2821 m
->m_len
+= sizeof(*ecn_nonce
);
2825 /* now the addresses */
2826 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_BOUNDALL
) {
2832 TAILQ_FOREACH(ifn
, &ifnet
, if_list
) {
2833 if ((stcb
->asoc
.loopback_scope
== 0) &&
2834 (ifn
->if_type
== IFT_LOOP
)) {
2836 * Skip loopback devices if loopback_scope
2841 TAILQ_FOREACH(ifa
, &ifn
->if_addrlist
, ifa_list
) {
2842 if (sctp_is_address_in_scope(ifa
,
2843 stcb
->asoc
.ipv4_addr_legal
,
2844 stcb
->asoc
.ipv6_addr_legal
,
2845 stcb
->asoc
.loopback_scope
,
2846 stcb
->asoc
.ipv4_local_scope
,
2847 stcb
->asoc
.local_scope
,
2848 stcb
->asoc
.site_scope
) == 0) {
2855 TAILQ_FOREACH(ifn
, &ifnet
, if_list
) {
2856 if ((stcb
->asoc
.loopback_scope
== 0) &&
2857 (ifn
->if_type
== IFT_LOOP
)) {
2859 * Skip loopback devices if loopback_scope
2864 TAILQ_FOREACH(ifa
, &ifn
->if_addrlist
, ifa_list
) {
2865 if (sctp_is_address_in_scope(ifa
,
2866 stcb
->asoc
.ipv4_addr_legal
,
2867 stcb
->asoc
.ipv6_addr_legal
,
2868 stcb
->asoc
.loopback_scope
,
2869 stcb
->asoc
.ipv4_local_scope
,
2870 stcb
->asoc
.local_scope
,
2871 stcb
->asoc
.site_scope
) == 0) {
2874 m_at
= sctp_add_addr_to_mbuf(m_at
, ifa
);
2879 struct sctp_laddr
*laddr
;
2882 /* First, how many ? */
2883 LIST_FOREACH(laddr
, &inp
->sctp_addr_list
, sctp_nxt_addr
) {
2884 if (laddr
->ifa
== NULL
) {
2887 if (laddr
->ifa
->ifa_addr
== NULL
)
2889 if (sctp_is_address_in_scope(laddr
->ifa
,
2890 stcb
->asoc
.ipv4_addr_legal
,
2891 stcb
->asoc
.ipv6_addr_legal
,
2892 stcb
->asoc
.loopback_scope
,
2893 stcb
->asoc
.ipv4_local_scope
,
2894 stcb
->asoc
.local_scope
,
2895 stcb
->asoc
.site_scope
) == 0) {
2900 /* To get through a NAT we only list addresses if
2901 * we have more than one. That way if you just
2902 * bind a single address we let the source of the init
2903 * dictate our address.
2906 LIST_FOREACH(laddr
, &inp
->sctp_addr_list
, sctp_nxt_addr
) {
2907 if (laddr
->ifa
== NULL
) {
2910 if (laddr
->ifa
->ifa_addr
== NULL
) {
2914 if (sctp_is_address_in_scope(laddr
->ifa
,
2915 stcb
->asoc
.ipv4_addr_legal
,
2916 stcb
->asoc
.ipv6_addr_legal
,
2917 stcb
->asoc
.loopback_scope
,
2918 stcb
->asoc
.ipv4_local_scope
,
2919 stcb
->asoc
.local_scope
,
2920 stcb
->asoc
.site_scope
) == 0) {
2923 m_at
= sctp_add_addr_to_mbuf(m_at
, laddr
->ifa
);
2927 /* calulate the size and update pkt header and chunk header */
2928 m
->m_pkthdr
.len
= 0;
2929 for (m_at
= m
; m_at
; m_at
= m_at
->m_next
) {
2930 if (m_at
->m_next
== NULL
)
2932 m
->m_pkthdr
.len
+= m_at
->m_len
;
2934 initm
->msg
.ch
.chunk_length
= htons((m
->m_pkthdr
.len
-
2935 sizeof(struct sctphdr
)));
2936 /* We pass 0 here to NOT set IP_DF if its IPv4, we
2937 * ignore the return here since the timer will drive
2941 /* I don't expect this to execute but we will be safe here */
2942 padval
= m
->m_pkthdr
.len
% 4;
2943 if ((padval
) && (m_last
)) {
2944 /* The compiler worries that m_last may not be
2945 * set even though I think it is impossible :->
2946 * however we add m_last here just in case.
2949 ret
= sctp_add_pad_tombuf(m_last
, (4-padval
));
2951 /* Houston we have a problem, no space */
2955 m
->m_pkthdr
.len
+= padval
;
2958 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT4
) {
2959 kprintf("Calling lowlevel output stcb:%x net:%x\n",
2960 (u_int
)stcb
, (u_int
)net
);
2963 ret
= sctp_lowlevel_chunk_output(inp
, stcb
, net
,
2964 (struct sockaddr
*)&net
->ro
._l_addr
, m
, 0, 0, NULL
, 0);
2966 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT4
) {
2967 kprintf("Low level output returns %d\n", ret
);
2970 sctp_timer_start(SCTP_TIMER_TYPE_INIT
, inp
, stcb
, net
);
2971 SCTP_GETTIME_TIMEVAL(&net
->last_sent_time
);
2975 sctp_arethere_unrecognized_parameters(struct mbuf
*in_initpkt
,
2976 int param_offset
, int *abort_processing
, struct sctp_chunkhdr
*cp
)
2978 /* Given a mbuf containing an INIT or INIT-ACK
2979 * with the param_offset being equal to the
2980 * beginning of the params i.e. (iphlen + sizeof(struct sctp_init_msg)
2981 * parse through the parameters to the end of the mbuf verifying
2982 * that all parameters are known.
2984 * For unknown parameters build and return a mbuf with
2985 * UNRECOGNIZED_PARAMETER errors. If the flags indicate
2986 * to stop processing this chunk stop, and set *abort_processing
2989 * By having param_offset be pre-set to where parameters begin
2990 * it is hoped that this routine may be reused in the future
2993 struct sctp_paramhdr
*phdr
, params
;
2995 struct mbuf
*mat
, *op_err
;
2997 int at
, limit
, pad_needed
;
2998 uint16_t ptype
, plen
;
3001 *abort_processing
= 0;
3004 limit
= ntohs(cp
->chunk_length
) - sizeof(struct sctp_init_chunk
);
3006 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT4
) {
3007 kprintf("Limit is %d bytes\n", limit
);
3013 phdr
= sctp_get_next_param(mat
, at
, ¶ms
, sizeof(params
));
3014 while ((phdr
!= NULL
) && ((size_t)limit
>= sizeof(struct sctp_paramhdr
))) {
3015 ptype
= ntohs(phdr
->param_type
);
3016 plen
= ntohs(phdr
->param_length
);
3017 limit
-= SCTP_SIZE32(plen
);
3018 if (plen
< sizeof(struct sctp_paramhdr
)) {
3020 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT4
) {
3021 kprintf("sctp_output.c:Impossible length in parameter < %d\n", plen
);
3024 *abort_processing
= 1;
3027 /* All parameters for all chunks that we
3028 * know/understand are listed here. We process
3029 * them other places and make appropriate
3030 * stop actions per the upper bits. However
3031 * this is the generic routine processor's can
3032 * call to get back an operr.. to either incorporate (init-ack)
3035 if ((ptype
== SCTP_HEARTBEAT_INFO
) ||
3036 (ptype
== SCTP_IPV4_ADDRESS
) ||
3037 (ptype
== SCTP_IPV6_ADDRESS
) ||
3038 (ptype
== SCTP_STATE_COOKIE
) ||
3039 (ptype
== SCTP_UNRECOG_PARAM
) ||
3040 (ptype
== SCTP_COOKIE_PRESERVE
) ||
3041 (ptype
== SCTP_SUPPORTED_ADDRTYPE
) ||
3042 (ptype
== SCTP_PRSCTP_SUPPORTED
) ||
3043 (ptype
== SCTP_ADD_IP_ADDRESS
) ||
3044 (ptype
== SCTP_DEL_IP_ADDRESS
) ||
3045 (ptype
== SCTP_ECN_CAPABLE
) ||
3046 (ptype
== SCTP_ULP_ADAPTION
) ||
3047 (ptype
== SCTP_ERROR_CAUSE_IND
) ||
3048 (ptype
== SCTP_SET_PRIM_ADDR
) ||
3049 (ptype
== SCTP_SUCCESS_REPORT
) ||
3050 (ptype
== SCTP_ULP_ADAPTION
) ||
3051 (ptype
== SCTP_SUPPORTED_CHUNK_EXT
) ||
3052 (ptype
== SCTP_ECN_NONCE_SUPPORTED
)
3055 at
+= SCTP_SIZE32(plen
);
3056 } else if (ptype
== SCTP_HOSTNAME_ADDRESS
) {
3057 /* We can NOT handle HOST NAME addresses!! */
3059 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT4
) {
3060 kprintf("Can't handle hostname addresses.. abort processing\n");
3063 *abort_processing
= 1;
3064 if (op_err
== NULL
) {
3065 /* Ok need to try to get a mbuf */
3066 MGETHDR(op_err
, MB_DONTWAIT
, MT_DATA
);
3069 op_err
->m_pkthdr
.len
= 0;
3070 /* pre-reserve space for ip and sctp header and chunk hdr*/
3071 op_err
->m_data
+= sizeof(struct ip6_hdr
);
3072 op_err
->m_data
+= sizeof(struct sctphdr
);
3073 op_err
->m_data
+= sizeof(struct sctp_chunkhdr
);
3077 /* If we have space */
3078 struct sctp_paramhdr s
;
3081 pad_needed
= 4 - (err_at
% 4);
3082 m_copyback(op_err
, err_at
, pad_needed
, (caddr_t
)&cpthis
);
3083 err_at
+= pad_needed
;
3085 s
.param_type
= htons(SCTP_CAUSE_UNRESOLV_ADDR
);
3086 s
.param_length
= htons(sizeof(s
) + plen
);
3087 m_copyback(op_err
, err_at
, sizeof(s
), (caddr_t
)&s
);
3088 err_at
+= sizeof(s
);
3089 phdr
= sctp_get_next_param(mat
, at
, (struct sctp_paramhdr
*)tempbuf
, plen
);
3091 sctp_m_freem(op_err
);
3092 /* we are out of memory but we
3093 * still need to have a look at what to
3094 * do (the system is in trouble though).
3098 m_copyback(op_err
, err_at
, plen
, (caddr_t
)phdr
);
3103 /* we do not recognize the parameter
3104 * figure out what we do.
3107 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT4
) {
3108 kprintf("Got parameter type %x - unknown\n",
3112 if ((ptype
& 0x4000) == 0x4000) {
3113 /* Report bit is set?? */
3115 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT4
) {
3116 kprintf("Report bit is set\n");
3119 if (op_err
== NULL
) {
3120 /* Ok need to try to get an mbuf */
3121 MGETHDR(op_err
, MB_DONTWAIT
, MT_DATA
);
3124 op_err
->m_pkthdr
.len
= 0;
3125 op_err
->m_data
+= sizeof(struct ip6_hdr
);
3126 op_err
->m_data
+= sizeof(struct sctphdr
);
3127 op_err
->m_data
+= sizeof(struct sctp_chunkhdr
);
3131 /* If we have space */
3132 struct sctp_paramhdr s
;
3135 pad_needed
= 4 - (err_at
% 4);
3136 m_copyback(op_err
, err_at
, pad_needed
, (caddr_t
)&cpthis
);
3137 err_at
+= pad_needed
;
3139 s
.param_type
= htons(SCTP_UNRECOG_PARAM
);
3140 s
.param_length
= htons(sizeof(s
) + plen
);
3141 m_copyback(op_err
, err_at
, sizeof(s
), (caddr_t
)&s
);
3142 err_at
+= sizeof(s
);
3143 if (plen
> sizeof(tempbuf
)) {
3144 plen
= sizeof(tempbuf
);
3146 phdr
= sctp_get_next_param(mat
, at
, (struct sctp_paramhdr
*)tempbuf
, plen
);
3148 sctp_m_freem(op_err
);
3149 /* we are out of memory but we
3150 * still need to have a look at what to
3151 * do (the system is in trouble though).
3153 goto more_processing
;
3155 m_copyback(op_err
, err_at
, plen
, (caddr_t
)phdr
);
3160 if ((ptype
& 0x8000) == 0x0000) {
3162 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT4
) {
3163 kprintf("Abort bit is now setting1\n");
3168 /* skip this chunk and continue processing */
3169 at
+= SCTP_SIZE32(plen
);
3173 phdr
= sctp_get_next_param(mat
, at
, ¶ms
, sizeof(params
));
3179 sctp_are_there_new_addresses(struct sctp_association
*asoc
,
3180 struct mbuf
*in_initpkt
, int iphlen
, int offset
)
3183 * Given a INIT packet, look through the packet to verify that
3184 * there are NO new addresses. As we go through the parameters
3185 * add reports of any un-understood parameters that require an
3186 * error. Also we must return (1) to drop the packet if we see
3187 * a un-understood parameter that tells us to drop the chunk.
3189 struct sockaddr_in sin4
, *sa4
;
3190 struct sockaddr_in6 sin6
, *sa6
;
3191 struct sockaddr
*sa_touse
;
3192 struct sockaddr
*sa
;
3193 struct sctp_paramhdr
*phdr
, params
;
3196 uint16_t ptype
, plen
;
3199 struct sctp_nets
*net
;
3201 memset(&sin4
, 0, sizeof(sin4
));
3202 memset(&sin6
, 0, sizeof(sin6
));
3203 sin4
.sin_family
= AF_INET
;
3204 sin4
.sin_len
= sizeof(sin4
);
3205 sin6
.sin6_family
= AF_INET6
;
3206 sin6
.sin6_len
= sizeof(sin6
);
3209 /* First what about the src address of the pkt ? */
3210 iph
= mtod(in_initpkt
, struct ip
*);
3211 if (iph
->ip_v
== IPVERSION
) {
3212 /* source addr is IPv4 */
3213 sin4
.sin_addr
= iph
->ip_src
;
3214 sa_touse
= (struct sockaddr
*)&sin4
;
3215 } else if (iph
->ip_v
== (IPV6_VERSION
>> 4)) {
3216 /* source addr is IPv6 */
3217 struct ip6_hdr
*ip6h
;
3218 ip6h
= mtod(in_initpkt
, struct ip6_hdr
*);
3219 sin6
.sin6_addr
= ip6h
->ip6_src
;
3220 sa_touse
= (struct sockaddr
*)&sin6
;
3226 TAILQ_FOREACH(net
, &asoc
->nets
, sctp_next
) {
3227 sa
= (struct sockaddr
*)&net
->ro
._l_addr
;
3228 if (sa
->sa_family
== sa_touse
->sa_family
) {
3229 if (sa
->sa_family
== AF_INET
) {
3230 sa4
= (struct sockaddr_in
*)sa
;
3231 if (sa4
->sin_addr
.s_addr
==
3232 sin4
.sin_addr
.s_addr
) {
3236 } else if (sa
->sa_family
== AF_INET6
) {
3237 sa6
= (struct sockaddr_in6
*)sa
;
3238 if (SCTP6_ARE_ADDR_EQUAL(&sa6
->sin6_addr
,
3247 /* New address added! no need to look futher. */
3250 /* Ok so far lets munge through the rest of the packet */
3254 offset
+= sizeof(struct sctp_init_chunk
);
3255 phdr
= sctp_get_next_param(mat
, offset
, ¶ms
, sizeof(params
));
3257 ptype
= ntohs(phdr
->param_type
);
3258 plen
= ntohs(phdr
->param_length
);
3259 if (ptype
== SCTP_IPV4_ADDRESS
) {
3260 struct sctp_ipv4addr_param
*p4
, p4_buf
;
3262 phdr
= sctp_get_next_param(mat
, offset
,
3263 (struct sctp_paramhdr
*)&p4_buf
, sizeof(p4_buf
));
3264 if (plen
!= sizeof(struct sctp_ipv4addr_param
) ||
3268 p4
= (struct sctp_ipv4addr_param
*)phdr
;
3269 sin4
.sin_addr
.s_addr
= p4
->addr
;
3270 sa_touse
= (struct sockaddr
*)&sin4
;
3271 } else if (ptype
== SCTP_IPV6_ADDRESS
) {
3272 struct sctp_ipv6addr_param
*p6
, p6_buf
;
3274 phdr
= sctp_get_next_param(mat
, offset
,
3275 (struct sctp_paramhdr
*)&p6_buf
, sizeof(p6_buf
));
3276 if (plen
!= sizeof(struct sctp_ipv6addr_param
) ||
3280 p6
= (struct sctp_ipv6addr_param
*)phdr
;
3281 memcpy((caddr_t
)&sin6
.sin6_addr
, p6
->addr
,
3283 sa_touse
= (struct sockaddr
*)&sin4
;
3287 /* ok, sa_touse points to one to check */
3289 TAILQ_FOREACH(net
, &asoc
->nets
, sctp_next
) {
3290 sa
= (struct sockaddr
*)&net
->ro
._l_addr
;
3291 if (sa
->sa_family
!= sa_touse
->sa_family
) {
3294 if (sa
->sa_family
== AF_INET
) {
3295 sa4
= (struct sockaddr_in
*)sa
;
3296 if (sa4
->sin_addr
.s_addr
==
3297 sin4
.sin_addr
.s_addr
) {
3301 } else if (sa
->sa_family
== AF_INET6
) {
3302 sa6
= (struct sockaddr_in6
*)sa
;
3303 if (SCTP6_ARE_ADDR_EQUAL(
3304 &sa6
->sin6_addr
, &sin6
.sin6_addr
)) {
3311 /* New addr added! no need to look further */
3315 offset
+= SCTP_SIZE32(plen
);
3316 phdr
= sctp_get_next_param(mat
, offset
, ¶ms
, sizeof(params
));
3322 * Given a MBUF chain that was sent into us containing an
3323 * INIT. Build a INIT-ACK with COOKIE and send back.
3324 * We assume that the in_initpkt has done a pullup to
3325 * include IPv6/4header, SCTP header and initial part of
3326 * INIT message (i.e. the struct sctp_init_msg).
3329 sctp_send_initiate_ack(struct sctp_inpcb
*inp
, struct sctp_tcb
*stcb
,
3330 struct mbuf
*init_pkt
, int iphlen
, int offset
, struct sctphdr
*sh
,
3331 struct sctp_init_chunk
*init_chk
)
3333 struct sctp_association
*asoc
;
3334 struct mbuf
*m
, *m_at
, *m_tmp
, *m_cookie
, *op_err
, *m_last
;
3335 struct sctp_init_msg
*initackm_out
;
3336 struct sctp_ecn_supported_param
*ecn
;
3337 struct sctp_prsctp_supported_param
*prsctp
;
3338 struct sctp_ecn_nonce_supported_param
*ecn_nonce
;
3339 struct sctp_supported_chunk_types_param
*pr_supported
;
3340 struct sockaddr_storage store
;
3341 struct sockaddr_in
*sin
;
3342 struct sockaddr_in6
*sin6
;
3345 struct ip6_hdr
*ip6
;
3346 struct sockaddr
*to
;
3347 struct sctp_state_cookie stc
;
3348 struct sctp_nets
*net
=NULL
;
3350 uint16_t his_limit
, i_want
;
3351 int abort_flag
, padval
, sz_of
;
3359 if ((asoc
!= NULL
) &&
3360 (SCTP_GET_STATE(asoc
) != SCTP_STATE_COOKIE_WAIT
) &&
3361 (sctp_are_there_new_addresses(asoc
, init_pkt
, iphlen
, offset
))) {
3362 /* new addresses, out of here in non-cookie-wait states */
3364 * Send a ABORT, we don't add the new address error clause though
3365 * we even set the T bit and copy in the 0 tag.. this looks no
3366 * different than if no listner was present.
3368 sctp_send_abort(init_pkt
, iphlen
, sh
, 0, NULL
);
3372 op_err
= sctp_arethere_unrecognized_parameters(init_pkt
,
3373 (offset
+sizeof(struct sctp_init_chunk
)),
3374 &abort_flag
, (struct sctp_chunkhdr
*)init_chk
);
3376 sctp_send_abort(init_pkt
, iphlen
, sh
, init_chk
->init
.initiate_tag
, op_err
);
3379 MGETHDR(m
, MB_DONTWAIT
, MT_HEADER
);
3381 /* No memory, INIT timer will re-attempt. */
3383 sctp_m_freem(op_err
);
3386 MCLGET(m
, MB_DONTWAIT
);
3387 if ((m
->m_flags
& M_EXT
) != M_EXT
) {
3388 /* Failed to get cluster buffer */
3390 sctp_m_freem(op_err
);
3394 m
->m_data
+= SCTP_MIN_OVERHEAD
;
3395 m
->m_pkthdr
.rcvif
= 0;
3396 m
->m_len
= sizeof(struct sctp_init_msg
);
3398 /* the time I built cookie */
3399 SCTP_GETTIME_TIMEVAL(&stc
.time_entered
);
3401 /* populate any tie tags */
3403 /* unlock before tag selections */
3404 SCTP_TCB_UNLOCK(stcb
);
3405 if (asoc
->my_vtag_nonce
== 0)
3406 asoc
->my_vtag_nonce
= sctp_select_a_tag(inp
);
3407 stc
.tie_tag_my_vtag
= asoc
->my_vtag_nonce
;
3409 if (asoc
->peer_vtag_nonce
== 0)
3410 asoc
->peer_vtag_nonce
= sctp_select_a_tag(inp
);
3411 stc
.tie_tag_peer_vtag
= asoc
->peer_vtag_nonce
;
3413 stc
.cookie_life
= asoc
->cookie_life
;
3414 net
= asoc
->primary_destination
;
3415 /* now we must relock */
3416 SCTP_INP_RLOCK(inp
);
3417 /* we may be in trouble here if the inp got freed
3418 * most likely this set of tests will protect
3419 * us but there is a chance not.
3421 if (inp
->sctp_flags
& (SCTP_PCB_FLAGS_SOCKET_GONE
|SCTP_PCB_FLAGS_SOCKET_ALLGONE
)) {
3423 sctp_m_freem(op_err
);
3425 sctp_send_abort(init_pkt
, iphlen
, sh
, 0, NULL
);
3428 SCTP_TCB_LOCK(stcb
);
3429 SCTP_INP_RUNLOCK(stcb
->sctp_ep
);
3431 stc
.tie_tag_my_vtag
= 0;
3432 stc
.tie_tag_peer_vtag
= 0;
3433 /* life I will award this cookie */
3434 stc
.cookie_life
= inp
->sctp_ep
.def_cookie_life
;
3437 /* copy in the ports for later check */
3438 stc
.myport
= sh
->dest_port
;
3439 stc
.peerport
= sh
->src_port
;
3442 * If we wanted to honor cookie life extentions, we would add
3443 * to stc.cookie_life. For now we should NOT honor any extension
3445 stc
.site_scope
= stc
.local_scope
= stc
.loopback_scope
= 0;
3446 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_BOUND_V6
) {
3447 struct inpcb
*in_inp
;
3448 /* Its a V6 socket */
3449 in_inp
= (struct inpcb
*)inp
;
3450 stc
.ipv6_addr_legal
= 1;
3451 /* Now look at the binding flag to see if V4 will be legal */
3453 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
3454 (in_inp
->inp_flags
& IN6P_IPV6_V6ONLY
)
3455 #elif defined(__OpenBSD__)
3456 (0) /* For openbsd we do dual bind only */
3458 (((struct in6pcb
*)in_inp
)->in6p_flags
& IN6P_IPV6_V6ONLY
)
3461 stc
.ipv4_addr_legal
= 1;
3463 /* V4 addresses are NOT legal on the association */
3464 stc
.ipv4_addr_legal
= 0;
3467 /* Its a V4 socket, no - V6 */
3468 stc
.ipv4_addr_legal
= 1;
3469 stc
.ipv6_addr_legal
= 0;
3472 #ifdef SCTP_DONT_DO_PRIVADDR_SCOPE
3477 /* now for scope setup */
3478 memset((caddr_t
)&store
, 0, sizeof(store
));
3479 sin
= (struct sockaddr_in
*)&store
;
3480 sin6
= (struct sockaddr_in6
*)&store
;
3482 to
= (struct sockaddr
*)&store
;
3483 iph
= mtod(init_pkt
, struct ip
*);
3484 if (iph
->ip_v
== IPVERSION
) {
3485 struct in_addr addr
;
3486 struct route iproute
;
3488 sin
->sin_family
= AF_INET
;
3489 sin
->sin_len
= sizeof(struct sockaddr_in
);
3490 sin
->sin_port
= sh
->src_port
;
3491 sin
->sin_addr
= iph
->ip_src
;
3492 /* lookup address */
3493 stc
.address
[0] = sin
->sin_addr
.s_addr
;
3497 stc
.addr_type
= SCTP_IPV4_ADDRESS
;
3498 /* local from address */
3499 memset(&iproute
, 0, sizeof(iproute
));
3501 memcpy(&ro
->ro_dst
, sin
, sizeof(*sin
));
3502 addr
= sctp_ipv4_source_address_selection(inp
, NULL
,
3507 stc
.laddress
[0] = addr
.s_addr
;
3508 stc
.laddress
[1] = 0;
3509 stc
.laddress
[2] = 0;
3510 stc
.laddress
[3] = 0;
3511 stc
.laddr_type
= SCTP_IPV4_ADDRESS
;
3512 /* scope_id is only for v6 */
3514 #ifndef SCTP_DONT_DO_PRIVADDR_SCOPE
3515 if (IN4_ISPRIVATE_ADDRESS(&sin
->sin_addr
)) {
3520 #endif /* SCTP_DONT_DO_PRIVADDR_SCOPE */
3521 /* Must use the address in this case */
3522 if (sctp_is_address_on_local_host((struct sockaddr
*)sin
)) {
3523 stc
.loopback_scope
= 1;
3526 stc
.local_scope
= 1;
3528 } else if (iph
->ip_v
== (IPV6_VERSION
>> 4)) {
3529 struct in6_addr addr
;
3530 #ifdef NEW_STRUCT_ROUTE
3531 struct route iproute6
;
3533 struct route_in6 iproute6
;
3535 ip6
= mtod(init_pkt
, struct ip6_hdr
*);
3536 sin6
->sin6_family
= AF_INET6
;
3537 sin6
->sin6_len
= sizeof(struct sockaddr_in6
);
3538 sin6
->sin6_port
= sh
->src_port
;
3539 sin6
->sin6_addr
= ip6
->ip6_src
;
3540 /* lookup address */
3541 memcpy(&stc
.address
, &sin6
->sin6_addr
,
3542 sizeof(struct in6_addr
));
3543 sin6
->sin6_scope_id
= 0;
3544 stc
.addr_type
= SCTP_IPV6_ADDRESS
;
3546 if (sctp_is_address_on_local_host((struct sockaddr
*)sin6
)) {
3547 stc
.loopback_scope
= 1;
3548 stc
.local_scope
= 1;
3551 } else if (IN6_IS_ADDR_LINKLOCAL(&sin6
->sin6_addr
)) {
3553 * If the new destination is a LINK_LOCAL
3554 * we must have common both site and local
3555 * scope. Don't set local scope though since
3556 * we must depend on the source to be added
3557 * implicitly. We cannot assure just because
3558 * we share one link that all links are common.
3560 stc
.local_scope
= 0;
3563 /* we start counting for the private
3564 * address stuff at 1. since the link
3565 * local we source from won't show
3566 * up in our scoped cou8nt.
3569 /* pull out the scope_id from incoming pkt */
3570 in6_recoverscope(sin6
, &ip6
->ip6_src
,
3571 init_pkt
->m_pkthdr
.rcvif
);
3572 #if defined(SCTP_BASE_FREEBSD) || defined(__APPLE__) || defined(__DragonFly__)
3573 in6_embedscope(&sin6
->sin6_addr
, sin6
, NULL
,
3576 in6_embedscope(&sin6
->sin6_addr
, sin6
);
3578 stc
.scope_id
= sin6
->sin6_scope_id
;
3579 } else if (IN6_IS_ADDR_SITELOCAL(&sin6
->sin6_addr
)) {
3581 * If the new destination is SITE_LOCAL
3582 * then we must have site scope in common.
3586 /* local from address */
3587 memset(&iproute6
, 0, sizeof(iproute6
));
3588 ro
= (struct route
*)&iproute6
;
3589 memcpy(&ro
->ro_dst
, sin6
, sizeof(*sin6
));
3590 addr
= sctp_ipv6_source_address_selection(inp
, NULL
,
3595 memcpy(&stc
.laddress
, &addr
, sizeof(struct in6_addr
));
3596 stc
.laddr_type
= SCTP_IPV6_ADDRESS
;
3599 /* set the scope per the existing tcb */
3600 struct sctp_nets
*lnet
;
3602 stc
.loopback_scope
= asoc
->loopback_scope
;
3603 stc
.ipv4_scope
= asoc
->ipv4_local_scope
;
3604 stc
.site_scope
= asoc
->site_scope
;
3605 stc
.local_scope
= asoc
->local_scope
;
3606 TAILQ_FOREACH(lnet
, &asoc
->nets
, sctp_next
) {
3607 if (lnet
->ro
._l_addr
.sin6
.sin6_family
== AF_INET6
) {
3608 if (IN6_IS_ADDR_LINKLOCAL(&lnet
->ro
._l_addr
.sin6
.sin6_addr
)) {
3609 /* if we have a LL address, start counting
3617 /* use the net pointer */
3618 to
= (struct sockaddr
*)&net
->ro
._l_addr
;
3619 if (to
->sa_family
== AF_INET
) {
3620 sin
= (struct sockaddr_in
*)to
;
3621 stc
.address
[0] = sin
->sin_addr
.s_addr
;
3625 stc
.addr_type
= SCTP_IPV4_ADDRESS
;
3626 if (net
->src_addr_selected
== 0) {
3627 /* strange case here, the INIT
3628 * should have did the selection.
3630 net
->ro
._s_addr
.sin
.sin_addr
=
3631 sctp_ipv4_source_address_selection(inp
,
3632 stcb
, (struct route
*)&net
->ro
, net
, 0);
3633 net
->src_addr_selected
= 1;
3637 stc
.laddress
[0] = net
->ro
._s_addr
.sin
.sin_addr
.s_addr
;
3638 stc
.laddress
[1] = 0;
3639 stc
.laddress
[2] = 0;
3640 stc
.laddress
[3] = 0;
3641 stc
.laddr_type
= SCTP_IPV4_ADDRESS
;
3642 } else if (to
->sa_family
== AF_INET6
) {
3643 sin6
= (struct sockaddr_in6
*)to
;
3644 memcpy(&stc
.address
, &sin6
->sin6_addr
,
3645 sizeof(struct in6_addr
));
3646 stc
.addr_type
= SCTP_IPV6_ADDRESS
;
3647 if (net
->src_addr_selected
== 0) {
3648 /* strange case here, the INIT
3649 * should have did the selection.
3651 net
->ro
._s_addr
.sin6
.sin6_addr
=
3652 sctp_ipv6_source_address_selection(inp
,
3653 stcb
, (struct route
*)&net
->ro
, net
, 0);
3654 net
->src_addr_selected
= 1;
3656 memcpy(&stc
.laddress
, &net
->ro
._l_addr
.sin6
.sin6_addr
,
3657 sizeof(struct in6_addr
));
3658 stc
.laddr_type
= SCTP_IPV6_ADDRESS
;
3661 /* Now lets put the SCTP header in place */
3662 initackm_out
= mtod(m
, struct sctp_init_msg
*);
3663 initackm_out
->sh
.src_port
= inp
->sctp_lport
;
3664 initackm_out
->sh
.dest_port
= sh
->src_port
;
3665 initackm_out
->sh
.v_tag
= init_chk
->init
.initiate_tag
;
3666 /* Save it off for quick ref */
3667 stc
.peers_vtag
= init_chk
->init
.initiate_tag
;
3668 initackm_out
->sh
.checksum
= 0; /* calculate later */
3670 strncpy(stc
.identification
, SCTP_VERSION_STRING
,
3671 min(strlen(SCTP_VERSION_STRING
), sizeof(stc
.identification
)));
3672 /* now the chunk header */
3673 initackm_out
->msg
.ch
.chunk_type
= SCTP_INITIATION_ACK
;
3674 initackm_out
->msg
.ch
.chunk_flags
= 0;
3675 /* fill in later from mbuf we build */
3676 initackm_out
->msg
.ch
.chunk_length
= 0;
3677 /* place in my tag */
3678 if ((asoc
!= NULL
) &&
3679 ((SCTP_GET_STATE(asoc
) == SCTP_STATE_COOKIE_WAIT
) ||
3680 (SCTP_GET_STATE(asoc
) == SCTP_STATE_COOKIE_ECHOED
))) {
3681 /* re-use the v-tags and init-seq here */
3682 initackm_out
->msg
.init
.initiate_tag
= htonl(asoc
->my_vtag
);
3683 initackm_out
->msg
.init
.initial_tsn
= htonl(asoc
->init_seq_number
);
3685 initackm_out
->msg
.init
.initiate_tag
= htonl(sctp_select_a_tag(inp
));
3686 /* get a TSN to use too */
3687 initackm_out
->msg
.init
.initial_tsn
= htonl(sctp_select_initial_TSN(&inp
->sctp_ep
));
3689 /* save away my tag to */
3690 stc
.my_vtag
= initackm_out
->msg
.init
.initiate_tag
;
3692 /* set up some of the credits. */
3693 initackm_out
->msg
.init
.a_rwnd
= htonl(max(inp
->sctp_socket
->so_rcv
.ssb_hiwat
, SCTP_MINIMAL_RWND
));
3694 /* set what I want */
3695 his_limit
= ntohs(init_chk
->init
.num_inbound_streams
);
3696 /* choose what I want */
3698 if (asoc
->streamoutcnt
> inp
->sctp_ep
.pre_open_stream_count
) {
3699 i_want
= asoc
->streamoutcnt
;
3701 i_want
= inp
->sctp_ep
.pre_open_stream_count
;
3704 i_want
= inp
->sctp_ep
.pre_open_stream_count
;
3706 if (his_limit
< i_want
) {
3707 /* I Want more :< */
3708 initackm_out
->msg
.init
.num_outbound_streams
= init_chk
->init
.num_inbound_streams
;
3710 /* I can have what I want :> */
3711 initackm_out
->msg
.init
.num_outbound_streams
= htons(i_want
);
3713 /* tell him his limt. */
3714 initackm_out
->msg
.init
.num_inbound_streams
=
3715 htons(inp
->sctp_ep
.max_open_streams_intome
);
3716 /* setup the ECN pointer */
3718 /* if (inp->sctp_flags & SCTP_PCB_FLAGS_ADAPTIONEVNT) {*/
3719 if (inp
->sctp_ep
.adaption_layer_indicator
) {
3720 struct sctp_adaption_layer_indication
*ali
;
3721 ali
= (struct sctp_adaption_layer_indication
*)(
3722 (caddr_t
)initackm_out
+ sizeof(*initackm_out
));
3723 ali
->ph
.param_type
= htons(SCTP_ULP_ADAPTION
);
3724 ali
->ph
.param_length
= htons(sizeof(*ali
));
3725 ali
->indication
= ntohl(inp
->sctp_ep
.adaption_layer_indicator
);
3726 m
->m_len
+= sizeof(*ali
);
3727 ecn
= (struct sctp_ecn_supported_param
*)((caddr_t
)ali
+
3730 ecn
= (struct sctp_ecn_supported_param
*)(
3731 (caddr_t
)initackm_out
+ sizeof(*initackm_out
));
3735 if (sctp_ecn
== 1) {
3736 ecn
->ph
.param_type
= htons(SCTP_ECN_CAPABLE
);
3737 ecn
->ph
.param_length
= htons(sizeof(*ecn
));
3738 m
->m_len
+= sizeof(*ecn
);
3740 prsctp
= (struct sctp_prsctp_supported_param
*)((caddr_t
)ecn
+
3743 prsctp
= (struct sctp_prsctp_supported_param
*)((caddr_t
)ecn
);
3745 /* And now tell the peer we do pr-sctp */
3746 prsctp
->ph
.param_type
= htons(SCTP_PRSCTP_SUPPORTED
);
3747 prsctp
->ph
.param_length
= htons(sizeof(*prsctp
));
3748 m
->m_len
+= sizeof(*prsctp
);
3751 /* And now tell the peer we do all the extensions */
3752 pr_supported
= (struct sctp_supported_chunk_types_param
*)((caddr_t
)prsctp
+
3755 pr_supported
->ph
.param_type
= htons(SCTP_SUPPORTED_CHUNK_EXT
);
3756 pr_supported
->ph
.param_length
= htons(sizeof(*pr_supported
) + SCTP_EXT_COUNT
);
3757 pr_supported
->chunk_types
[0] = SCTP_ASCONF
;
3758 pr_supported
->chunk_types
[1] = SCTP_ASCONF_ACK
;
3759 pr_supported
->chunk_types
[2] = SCTP_FORWARD_CUM_TSN
;
3760 pr_supported
->chunk_types
[3] = SCTP_PACKET_DROPPED
;
3761 pr_supported
->chunk_types
[4] = SCTP_STREAM_RESET
;
3762 pr_supported
->chunk_types
[5] = 0; /* pad */
3763 pr_supported
->chunk_types
[6] = 0; /* pad */
3764 pr_supported
->chunk_types
[7] = 0; /* pad */
3766 m
->m_len
+= (sizeof(*pr_supported
) + SCTP_EXT_COUNT
+ SCTP_PAD_EXT_COUNT
);
3767 if (sctp_ecn_nonce
) {
3768 /* ECN nonce: And now tell the peer we support ECN nonce */
3769 ecn_nonce
= (struct sctp_ecn_nonce_supported_param
*)((caddr_t
)pr_supported
+
3770 sizeof(*pr_supported
) + SCTP_EXT_COUNT
+ SCTP_PAD_EXT_COUNT
);
3771 ecn_nonce
->ph
.param_type
= htons(SCTP_ECN_NONCE_SUPPORTED
);
3772 ecn_nonce
->ph
.param_length
= htons(sizeof(*ecn_nonce
));
3773 m
->m_len
+= sizeof(*ecn_nonce
);
3777 /* now the addresses */
3778 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_BOUNDALL
) {
3781 int cnt
= cnt_inits_to
;
3783 TAILQ_FOREACH(ifn
, &ifnet
, if_list
) {
3784 if ((stc
.loopback_scope
== 0) &&
3785 (ifn
->if_type
== IFT_LOOP
)) {
3787 * Skip loopback devices if loopback_scope
3792 TAILQ_FOREACH(ifa
, &ifn
->if_addrlist
, ifa_list
) {
3793 if (sctp_is_address_in_scope(ifa
,
3794 stc
.ipv4_addr_legal
, stc
.ipv6_addr_legal
,
3795 stc
.loopback_scope
, stc
.ipv4_scope
,
3796 stc
.local_scope
, stc
.site_scope
) == 0) {
3803 TAILQ_FOREACH(ifn
, &ifnet
, if_list
) {
3804 if ((stc
.loopback_scope
== 0) &&
3805 (ifn
->if_type
== IFT_LOOP
)) {
3807 * Skip loopback devices if
3808 * loopback_scope not set
3812 TAILQ_FOREACH(ifa
, &ifn
->if_addrlist
, ifa_list
) {
3813 if (sctp_is_address_in_scope(ifa
,
3814 stc
.ipv4_addr_legal
,
3815 stc
.ipv6_addr_legal
,
3816 stc
.loopback_scope
, stc
.ipv4_scope
,
3817 stc
.local_scope
, stc
.site_scope
) == 0) {
3820 m_at
= sctp_add_addr_to_mbuf(m_at
, ifa
);
3825 struct sctp_laddr
*laddr
;
3828 /* First, how many ? */
3829 LIST_FOREACH(laddr
, &inp
->sctp_addr_list
, sctp_nxt_addr
) {
3830 if (laddr
->ifa
== NULL
) {
3833 if (laddr
->ifa
->ifa_addr
== NULL
)
3835 if (sctp_is_address_in_scope(laddr
->ifa
,
3836 stc
.ipv4_addr_legal
, stc
.ipv6_addr_legal
,
3837 stc
.loopback_scope
, stc
.ipv4_scope
,
3838 stc
.local_scope
, stc
.site_scope
) == 0) {
3843 /* If we bind a single address only we won't list
3844 * any. This way you can get through a NAT
3847 LIST_FOREACH(laddr
, &inp
->sctp_addr_list
, sctp_nxt_addr
) {
3848 if (laddr
->ifa
== NULL
) {
3850 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
3851 kprintf("Help I have fallen and I can't get up!\n");
3856 if (laddr
->ifa
->ifa_addr
== NULL
)
3858 if (sctp_is_address_in_scope(laddr
->ifa
,
3859 stc
.ipv4_addr_legal
, stc
.ipv6_addr_legal
,
3860 stc
.loopback_scope
, stc
.ipv4_scope
,
3861 stc
.local_scope
, stc
.site_scope
) == 0) {
3864 m_at
= sctp_add_addr_to_mbuf(m_at
, laddr
->ifa
);
3869 /* tack on the operational error if present */
3871 if (op_err
->m_pkthdr
.len
% 4) {
3872 /* must add a pad to the param */
3875 padlen
= 4 - (op_err
->m_pkthdr
.len
% 4);
3876 m_copyback(op_err
, op_err
->m_pkthdr
.len
, padlen
, (caddr_t
)&cpthis
);
3878 while (m_at
->m_next
!= NULL
) {
3879 m_at
= m_at
->m_next
;
3881 m_at
->m_next
= op_err
;
3882 while (m_at
->m_next
!= NULL
) {
3883 m_at
= m_at
->m_next
;
3886 /* Get total size of init packet */
3887 sz_of
= SCTP_SIZE32(ntohs(init_chk
->ch
.chunk_length
));
3888 /* pre-calulate the size and update pkt header and chunk header */
3889 m
->m_pkthdr
.len
= 0;
3890 for (m_tmp
= m
; m_tmp
; m_tmp
= m_tmp
->m_next
) {
3891 m
->m_pkthdr
.len
+= m_tmp
->m_len
;
3892 if (m_tmp
->m_next
== NULL
) {
3893 /* m_tmp should now point to last one */
3898 * Figure now the size of the cookie. We know the size of the
3899 * INIT-ACK. The Cookie is going to be the size of INIT, INIT-ACK,
3900 * COOKIE-STRUCTURE and SIGNATURE.
3904 * take our earlier INIT calc and add in the sz we just calculated
3905 * minus the size of the sctphdr (its not included in chunk size
3908 /* add once for the INIT-ACK */
3909 sz_of
+= (m
->m_pkthdr
.len
- sizeof(struct sctphdr
));
3911 /* add a second time for the INIT-ACK in the cookie */
3912 sz_of
+= (m
->m_pkthdr
.len
- sizeof(struct sctphdr
));
3914 /* Now add the cookie header and cookie message struct */
3915 sz_of
+= sizeof(struct sctp_state_cookie_param
);
3916 /* ...and add the size of our signature */
3917 sz_of
+= SCTP_SIGNATURE_SIZE
;
3918 initackm_out
->msg
.ch
.chunk_length
= htons(sz_of
);
3920 /* Now we must build a cookie */
3921 m_cookie
= sctp_add_cookie(inp
, init_pkt
, offset
, m
,
3922 sizeof(struct sctphdr
), &stc
);
3923 if (m_cookie
== NULL
) {
3924 /* memory problem */
3928 /* Now append the cookie to the end and update the space/size */
3929 m_tmp
->m_next
= m_cookie
;
3932 * We pass 0 here to NOT set IP_DF if its IPv4, we ignore the
3933 * return here since the timer will drive a retranmission.
3935 padval
= m
->m_pkthdr
.len
% 4;
3936 if ((padval
) && (m_last
)) {
3937 /* see my previous comments on m_last */
3939 ret
= sctp_add_pad_tombuf(m_last
, (4-padval
));
3941 /* Houston we have a problem, no space */
3945 m
->m_pkthdr
.len
+= padval
;
3947 sctp_lowlevel_chunk_output(inp
, NULL
, NULL
, to
, m
, 0, 0, NULL
, 0);
3952 sctp_insert_on_wheel(struct sctp_association
*asoc
,
3953 struct sctp_stream_out
*strq
)
3955 struct sctp_stream_out
*stre
, *strn
;
3956 stre
= TAILQ_FIRST(&asoc
->out_wheel
);
3958 /* only one on wheel */
3959 TAILQ_INSERT_HEAD(&asoc
->out_wheel
, strq
, next_spoke
);
3962 for (; stre
; stre
= strn
) {
3963 strn
= TAILQ_NEXT(stre
, next_spoke
);
3964 if (stre
->stream_no
> strq
->stream_no
) {
3965 TAILQ_INSERT_BEFORE(stre
, strq
, next_spoke
);
3967 } else if (stre
->stream_no
== strq
->stream_no
) {
3968 /* huh, should not happen */
3970 } else if (strn
== NULL
) {
3971 /* next one is null */
3972 TAILQ_INSERT_AFTER(&asoc
->out_wheel
, stre
, strq
,
3979 sctp_remove_from_wheel(struct sctp_association
*asoc
,
3980 struct sctp_stream_out
*strq
)
3982 /* take off and then setup so we know it is not on the wheel */
3983 TAILQ_REMOVE(&asoc
->out_wheel
, strq
, next_spoke
);
3984 strq
->next_spoke
.tqe_next
= NULL
;
3985 strq
->next_spoke
.tqe_prev
= NULL
;
3990 sctp_prune_prsctp(struct sctp_tcb
*stcb
,
3991 struct sctp_association
*asoc
,
3992 struct sctp_sndrcvinfo
*srcv
,
3997 struct sctp_tmit_chunk
*chk
, *nchk
;
3998 if ((asoc
->peer_supports_prsctp
) && (asoc
->sent_queue_cnt_removeable
> 0)) {
3999 TAILQ_FOREACH(chk
, &asoc
->sent_queue
, sctp_next
) {
4001 * Look for chunks marked with the PR_SCTP
4002 * flag AND the buffer space flag. If the one
4003 * being sent is equal or greater priority then
4004 * purge the old one and free some space.
4006 if ((chk
->flags
& (SCTP_PR_SCTP_ENABLED
|
4007 SCTP_PR_SCTP_BUFFER
)) ==
4008 (SCTP_PR_SCTP_ENABLED
|SCTP_PR_SCTP_BUFFER
)) {
4010 * This one is PR-SCTP AND buffer space
4013 if (chk
->rec
.data
.timetodrop
.tv_sec
>= (long)srcv
->sinfo_timetolive
) {
4014 /* Lower numbers equates to
4015 * higher priority so if the
4016 * one we are looking at has a
4017 * larger or equal priority we
4018 * want to drop the data and
4019 * NOT retransmit it.
4028 if (chk
->sent
> SCTP_DATAGRAM_UNSENT
)
4029 cause
= SCTP_RESPONSE_TO_USER_REQ
|SCTP_NOTIFY_DATAGRAM_SENT
;
4031 cause
= SCTP_RESPONSE_TO_USER_REQ
|SCTP_NOTIFY_DATAGRAM_UNSENT
;
4032 ret_spc
= sctp_release_pr_sctp_chunk(stcb
, chk
,
4035 freed_spc
+= ret_spc
;
4036 if (freed_spc
>= dataout
) {
4039 } /* if chunk was present */
4040 } /* if of sufficent priority */
4041 } /* if chunk has enabled */
4042 } /* tailqforeach */
4044 chk
= TAILQ_FIRST(&asoc
->send_queue
);
4046 nchk
= TAILQ_NEXT(chk
, sctp_next
);
4047 /* Here we must move to the sent queue and mark */
4048 if ((chk
->flags
& (SCTP_PR_SCTP_ENABLED
|
4049 SCTP_PR_SCTP_BUFFER
)) ==
4050 (SCTP_PR_SCTP_ENABLED
|SCTP_PR_SCTP_BUFFER
)) {
4051 if (chk
->rec
.data
.timetodrop
.tv_sec
>= (long)srcv
->sinfo_timetolive
) {
4058 ret_spc
= sctp_release_pr_sctp_chunk(stcb
, chk
,
4059 SCTP_RESPONSE_TO_USER_REQ
|SCTP_NOTIFY_DATAGRAM_UNSENT
,
4062 freed_spc
+= ret_spc
;
4063 if (freed_spc
>= dataout
) {
4066 } /* end if chk->data */
4067 } /* end if right class */
4068 } /* end if chk pr-sctp */
4070 } /* end while (chk) */
4071 } /* if enabled in asoc */
4075 sctp_prepare_chunk(struct sctp_tmit_chunk
*template,
4076 struct sctp_tcb
*stcb
,
4077 struct sctp_sndrcvinfo
*srcv
,
4078 struct sctp_stream_out
*strq
,
4079 struct sctp_nets
*net
)
4081 bzero(template, sizeof(struct sctp_tmit_chunk
));
4082 template->sent
= SCTP_DATAGRAM_UNSENT
;
4083 if ((stcb
->asoc
.peer_supports_prsctp
) &&
4084 (srcv
->sinfo_flags
& (MSG_PR_SCTP_TTL
|MSG_PR_SCTP_BUF
)) &&
4085 (srcv
->sinfo_timetolive
> 0)
4088 * Peer supports PR-SCTP
4089 * The flags is set against this send for PR-SCTP
4090 * And timetolive is a postive value, zero is reserved
4091 * to mean a reliable send for both buffer/time
4094 if (srcv
->sinfo_flags
& MSG_PR_SCTP_BUF
) {
4096 * Time to live is a priority stored in tv_sec
4097 * when doing the buffer drop thing.
4099 template->rec
.data
.timetodrop
.tv_sec
= srcv
->sinfo_timetolive
;
4103 SCTP_GETTIME_TIMEVAL(&template->rec
.data
.timetodrop
);
4104 tv
.tv_sec
= srcv
->sinfo_timetolive
/ 1000;
4105 tv
.tv_usec
= (srcv
->sinfo_timetolive
* 1000) % 1000000;
4107 timeradd(&template->rec
.data
.timetodrop
, &tv
,
4108 &template->rec
.data
.timetodrop
);
4110 timevaladd(&template->rec
.data
.timetodrop
, &tv
);
4114 if ((srcv
->sinfo_flags
& MSG_UNORDERED
) == 0) {
4115 template->rec
.data
.stream_seq
= strq
->next_sequence_sent
;
4117 template->rec
.data
.stream_seq
= 0;
4119 template->rec
.data
.TSN_seq
= 0; /* not yet assigned */
4121 template->rec
.data
.stream_number
= srcv
->sinfo_stream
;
4122 template->rec
.data
.payloadtype
= srcv
->sinfo_ppid
;
4123 template->rec
.data
.context
= srcv
->sinfo_context
;
4124 template->rec
.data
.doing_fast_retransmit
= 0;
4125 template->rec
.data
.ect_nonce
= 0; /* ECN Nonce */
4127 if (srcv
->sinfo_flags
& MSG_ADDR_OVER
) {
4128 template->whoTo
= net
;
4130 if (stcb
->asoc
.primary_destination
)
4131 template->whoTo
= stcb
->asoc
.primary_destination
;
4134 template->whoTo
= net
;
4137 /* the actual chunk flags */
4138 if (srcv
->sinfo_flags
& MSG_UNORDERED
) {
4139 template->rec
.data
.rcv_flags
= SCTP_DATA_UNORDERED
;
4141 template->rec
.data
.rcv_flags
= 0;
4143 /* no flags yet, FRAGMENT_OK goes here */
4144 template->flags
= 0;
4146 if (stcb
->asoc
.peer_supports_prsctp
) {
4147 if (srcv
->sinfo_timetolive
> 0) {
4149 * We only set the flag if timetolive (or
4150 * priority) was set to a positive number.
4151 * Zero is reserved specifically to be
4152 * EXCLUDED and sent reliable.
4154 if (srcv
->sinfo_flags
& MSG_PR_SCTP_TTL
) {
4155 template->flags
|= SCTP_PR_SCTP_ENABLED
;
4157 if (srcv
->sinfo_flags
& MSG_PR_SCTP_BUF
) {
4158 template->flags
|= SCTP_PR_SCTP_BUFFER
;
4162 template->asoc
= &stcb
->asoc
;
4167 sctp_get_frag_point(struct sctp_tcb
*stcb
,
4168 struct sctp_association
*asoc
)
4172 /* For endpoints that have both 6 and 4 addresses
4173 * we must reserver room for the 6 ip header, for
4174 * those that are only dealing with V4 we use
4175 * a larger frag point.
4177 if (stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_BOUND_V6
) {
4178 ovh
= SCTP_MED_OVERHEAD
;
4180 ovh
= SCTP_MED_V4_OVERHEAD
;
4183 if (stcb
->sctp_ep
->sctp_frag_point
> asoc
->smallest_mtu
)
4184 siz
= asoc
->smallest_mtu
- ovh
;
4186 siz
= (stcb
->sctp_ep
->sctp_frag_point
- ovh
);
4188 if (siz > (MCLBYTES-sizeof(struct sctp_data_chunk))) { */
4189 /* A data chunk MUST fit in a cluster */
4190 /* siz = (MCLBYTES - sizeof(struct sctp_data_chunk));*/
4194 /* make it an even word boundary please */
4199 extern unsigned int sctp_max_chunks_on_queue
;
4201 #define SBLOCKWAIT(f) (((f)&MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK)
4204 sctp_msg_append(struct sctp_tcb
*stcb
,
4205 struct sctp_nets
*net
,
4207 struct sctp_sndrcvinfo
*srcv
,
4211 struct sctp_association
*asoc
;
4212 struct sctp_stream_out
*strq
;
4213 struct sctp_tmit_chunk
*chk
;
4214 struct sctpchunk_listhead tmp
;
4215 struct sctp_tmit_chunk
template;
4216 struct mbuf
*n
, *mnext
;
4218 unsigned int dataout
, siz
;
4223 if ((stcb
== NULL
) || (net
== NULL
) || (m
== NULL
) || (srcv
== NULL
)) {
4224 /* Software fault, you blew it on the call */
4226 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
4227 kprintf("software error in sctp_msg_append:1\n");
4228 kprintf("stcb:%p net:%p m:%p srcv:%p\n",
4229 stcb
, net
, m
, srcv
);
4236 so
= stcb
->sctp_socket
;
4238 if (srcv
->sinfo_flags
& MSG_ABORT
) {
4239 if ((SCTP_GET_STATE(asoc
) != SCTP_STATE_COOKIE_WAIT
) &&
4240 (SCTP_GET_STATE(asoc
) != SCTP_STATE_COOKIE_ECHOED
)) {
4241 /* It has to be up before we abort */
4242 /* how big is the user initiated abort? */
4243 if ((m
->m_flags
& M_PKTHDR
) && (m
->m_pkthdr
.len
)) {
4244 dataout
= m
->m_pkthdr
.len
;
4248 for (n
= m
; n
; n
= n
->m_next
) {
4249 dataout
+= n
->m_len
;
4252 M_PREPEND(m
, sizeof(struct sctp_paramhdr
), MB_DONTWAIT
);
4254 struct sctp_paramhdr
*ph
;
4255 m
->m_len
= sizeof(struct sctp_paramhdr
) + dataout
;
4256 ph
= mtod(m
, struct sctp_paramhdr
*);
4257 ph
->param_type
= htons(SCTP_CAUSE_USER_INITIATED_ABT
);
4258 ph
->param_length
= htons(m
->m_len
);
4260 sctp_abort_an_association(stcb
->sctp_ep
, stcb
, SCTP_RESPONSE_TO_USER_REQ
, m
);
4263 /* Only free if we don't send an abort */
4268 if ((SCTP_GET_STATE(asoc
) == SCTP_STATE_SHUTDOWN_SENT
) ||
4269 (SCTP_GET_STATE(asoc
) == SCTP_STATE_SHUTDOWN_ACK_SENT
) ||
4270 (SCTP_GET_STATE(asoc
) == SCTP_STATE_SHUTDOWN_RECEIVED
) ||
4271 (asoc
->state
& SCTP_STATE_SHUTDOWN_PENDING
)) {
4272 /* got data while shutting down */
4277 if (srcv
->sinfo_stream
>= asoc
->streamoutcnt
) {
4278 /* Invalid stream number */
4282 if (asoc
->strmout
== NULL
) {
4283 /* huh? software error */
4285 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
4286 kprintf("software error in sctp_msg_append:2\n");
4292 strq
= &asoc
->strmout
[srcv
->sinfo_stream
];
4293 /* how big is it ? */
4294 if ((m
->m_flags
& M_PKTHDR
) && (m
->m_pkthdr
.len
)) {
4295 dataout
= m
->m_pkthdr
.len
;
4299 for (n
= m
; n
; n
= n
->m_next
) {
4300 dataout
+= n
->m_len
;
4304 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
4305 kprintf("Attempt to send out %d bytes\n",
4310 /* lock the socket buf */
4311 SOCKBUF_LOCK(&so
->so_snd
);
4312 error
= ssb_lock(&so
->so_snd
, SBLOCKWAIT(flags
));
4316 if (dataout
> so
->so_snd
.ssb_hiwat
) {
4317 /* It will NEVER fit */
4321 if ((srcv
->sinfo_flags
& MSG_EOF
) &&
4322 (stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_UDPTYPE
) &&
4327 if ((so
->so_snd
.ssb_hiwat
<
4328 (dataout
+ asoc
->total_output_queue_size
)) ||
4329 (asoc
->chunks_on_out_queue
> sctp_max_chunks_on_queue
) ||
4330 (asoc
->total_output_mbuf_queue_size
>
4331 so
->so_snd
.ssb_mbmax
)
4333 /* XXX Buffer space hunt for data to skip */
4334 if (asoc
->peer_supports_prsctp
) {
4335 sctp_prune_prsctp(stcb
, asoc
, srcv
, dataout
);
4337 while ((so
->so_snd
.ssb_hiwat
<
4338 (dataout
+ asoc
->total_output_queue_size
)) ||
4339 (asoc
->chunks_on_out_queue
> sctp_max_chunks_on_queue
) ||
4340 (asoc
->total_output_mbuf_queue_size
>
4341 so
->so_snd
.ssb_mbmax
)) {
4342 struct sctp_inpcb
*inp
;
4343 /* Now did we free up enough room? */
4344 if (flags
& (MSG_FNONBLOCKING
|MSG_DONTWAIT
)) {
4345 /* Non-blocking io in place */
4346 error
= EWOULDBLOCK
;
4350 * We store off a pointer to the endpoint.
4351 * Since on return from this we must check to
4352 * see if an so_error is set. If so we may have
4353 * been reset and our stcb destroyed. Returning
4354 * an error will cause the correct error return
4355 * through and fix this all.
4357 inp
= stcb
->sctp_ep
;
4359 * Not sure how else to do this since
4360 * the level we suspended at is not
4361 * known deep down where we are. I will
4362 * drop to spl0() so that others can
4366 inp
->sctp_tcb_at_block
= (void *)stcb
;
4367 inp
->error_on_block
= 0;
4368 ssb_unlock(&so
->so_snd
);
4369 error
= ssb_wait(&so
->so_snd
);
4371 * XXX: This is ugly but I have
4372 * recreated most of what goes on to
4373 * block in the sb. UGHH
4374 * May want to add the bit about being
4375 * no longer connected.. but this then
4376 * further dooms the UDP model NOT to
4379 inp
->sctp_tcb_at_block
= 0;
4380 if (inp
->error_on_block
)
4381 error
= inp
->error_on_block
;
4383 error
= so
->so_error
;
4387 error
= ssb_lock(&so
->so_snd
, M_WAITOK
);
4390 /* Otherwise we cycle back and recheck
4393 #if defined(__FreeBSD__) && __FreeBSD_version >= 502115
4394 if (so
->so_rcv
.sb_state
& SBS_CANTSENDMORE
) {
4396 if (so
->so_state
& SS_CANTSENDMORE
) {
4402 error
= so
->so_error
;
4407 /* If we have a packet header fix it if it was broke */
4408 if (m
->m_flags
& M_PKTHDR
) {
4409 m
->m_pkthdr
.len
= dataout
;
4411 /* use the smallest one, user set value or
4412 * smallest mtu of the asoc
4414 siz
= sctp_get_frag_point(stcb
, asoc
);
4415 SOCKBUF_UNLOCK(&so
->so_snd
);
4416 if ((dataout
) && (dataout
<= siz
)) {
4418 chk
= (struct sctp_tmit_chunk
*)SCTP_ZONE_GET(sctppcbinfo
.ipi_zone_chunk
);
4421 SOCKBUF_LOCK(&so
->so_snd
);
4424 sctp_prepare_chunk(chk
, stcb
, srcv
, strq
, net
);
4425 chk
->whoTo
->ref_count
++;
4426 chk
->rec
.data
.rcv_flags
|= SCTP_DATA_NOT_FRAG
;
4428 /* no flags yet, FRAGMENT_OK goes here */
4429 sctppcbinfo
.ipi_count_chunk
++;
4430 sctppcbinfo
.ipi_gencnt_chunk
++;
4431 asoc
->chunks_on_out_queue
++;
4434 /* Total in the MSIZE */
4435 for (mm
= chk
->data
; mm
; mm
= mm
->m_next
) {
4437 if (mm
->m_flags
& M_EXT
) {
4438 mbcnt
+= chk
->data
->m_ext
.ext_size
;
4441 /* fix up the send_size if it is not present */
4442 chk
->send_size
= dataout
;
4443 chk
->book_size
= chk
->send_size
;
4445 /* ok, we are commited */
4446 if ((srcv
->sinfo_flags
& MSG_UNORDERED
) == 0) {
4447 /* bump the ssn if we are unordered. */
4448 strq
->next_sequence_sent
++;
4450 chk
->data
->m_nextpkt
= 0;
4451 asoc
->stream_queue_cnt
++;
4452 TAILQ_INSERT_TAIL(&strq
->outqueue
, chk
, sctp_next
);
4453 /* now check if this stream is on the wheel */
4454 if ((strq
->next_spoke
.tqe_next
== NULL
) &&
4455 (strq
->next_spoke
.tqe_prev
== NULL
)) {
4456 /* Insert it on the wheel since it is not
4459 sctp_insert_on_wheel(asoc
, strq
);
4461 } else if ((dataout
) && (dataout
> siz
)) {
4463 if ((stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_NO_FRAGMENT
) &&
4466 SOCKBUF_LOCK(&so
->so_snd
);
4469 /* setup the template */
4470 sctp_prepare_chunk(&template, stcb
, srcv
, strq
, net
);
4473 while (dataout
> siz
) {
4475 * We can wait since this is called from the user
4478 n
->m_nextpkt
= m_split(n
, siz
, MB_WAIT
);
4479 if (n
->m_nextpkt
== NULL
) {
4481 SOCKBUF_LOCK(&so
->so_snd
);
4488 * ok, now we have a chain on m where m->m_nextpkt points to
4489 * the next chunk and m/m->m_next chain is the piece to send.
4490 * We must go through the chains and thread them on to
4491 * sctp_tmit_chunk chains and place them all on the stream
4492 * queue, breaking the m->m_nextpkt pointers as we go.
4498 * first go through and allocate a sctp_tmit chunk
4499 * for each chunk piece
4501 chk
= (struct sctp_tmit_chunk
*)SCTP_ZONE_GET(sctppcbinfo
.ipi_zone_chunk
);
4504 * ok we must spin through and dump anything
4505 * we have allocated and then jump to the
4508 chk
= TAILQ_FIRST(&tmp
);
4510 TAILQ_REMOVE(&tmp
, chk
, sctp_next
);
4511 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, chk
);
4512 sctppcbinfo
.ipi_count_chunk
--;
4513 asoc
->chunks_on_out_queue
--;
4514 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
4515 panic("Chunk count is negative");
4517 sctppcbinfo
.ipi_gencnt_chunk
++;
4518 chk
= TAILQ_FIRST(&tmp
);
4521 SOCKBUF_LOCK(&so
->so_snd
);
4524 sctppcbinfo
.ipi_count_chunk
++;
4525 asoc
->chunks_on_out_queue
++;
4527 sctppcbinfo
.ipi_gencnt_chunk
++;
4529 chk
->whoTo
->ref_count
++;
4531 /* Total in the MSIZE */
4533 for (mm
= chk
->data
; mm
; mm
= mm
->m_next
) {
4535 if (mm
->m_flags
& M_EXT
) {
4536 mbcnt_e
+= chk
->data
->m_ext
.ext_size
;
4539 /* now fix the chk->send_size */
4540 if (chk
->data
->m_flags
& M_PKTHDR
) {
4541 chk
->send_size
= chk
->data
->m_pkthdr
.len
;
4545 for (nn
= chk
->data
; nn
; nn
= nn
->m_next
) {
4546 chk
->send_size
+= nn
->m_len
;
4549 chk
->book_size
= chk
->send_size
;
4550 chk
->mbcnt
= mbcnt_e
;
4552 if (chk
->flags
& SCTP_PR_SCTP_BUFFER
) {
4553 asoc
->sent_queue_cnt_removeable
++;
4556 TAILQ_INSERT_TAIL(&tmp
, chk
, sctp_next
);
4559 /* now that we have enough space for all de-couple the
4560 * chain of mbufs by going through our temp array
4561 * and breaking the pointers.
4563 /* ok, we are commited */
4564 if ((srcv
->sinfo_flags
& MSG_UNORDERED
) == 0) {
4565 /* bump the ssn if we are unordered. */
4566 strq
->next_sequence_sent
++;
4568 /* Mark the first/last flags. This will
4569 * result int a 3 for a single item on the list
4571 chk
= TAILQ_FIRST(&tmp
);
4572 chk
->rec
.data
.rcv_flags
|= SCTP_DATA_FIRST_FRAG
;
4573 chk
= TAILQ_LAST(&tmp
, sctpchunk_listhead
);
4574 chk
->rec
.data
.rcv_flags
|= SCTP_DATA_LAST_FRAG
;
4575 /* now break any chains on the queue and
4576 * move it to the streams actual queue.
4578 chk
= TAILQ_FIRST(&tmp
);
4580 chk
->data
->m_nextpkt
= 0;
4581 TAILQ_REMOVE(&tmp
, chk
, sctp_next
);
4582 asoc
->stream_queue_cnt
++;
4583 TAILQ_INSERT_TAIL(&strq
->outqueue
, chk
, sctp_next
);
4584 chk
= TAILQ_FIRST(&tmp
);
4586 /* now check if this stream is on the wheel */
4587 if ((strq
->next_spoke
.tqe_next
== NULL
) &&
4588 (strq
->next_spoke
.tqe_prev
== NULL
)) {
4589 /* Insert it on the wheel since it is not
4592 sctp_insert_on_wheel(asoc
, strq
);
4595 SOCKBUF_LOCK(&so
->so_snd
);
4596 /* has a SHUTDOWN been (also) requested by the user on this asoc? */
4599 if ((srcv
->sinfo_flags
& MSG_EOF
) &&
4600 (stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_UDPTYPE
)) {
4602 int some_on_streamwheel
= 0;
4604 if (!TAILQ_EMPTY(&asoc
->out_wheel
)) {
4605 /* Check to see if some data queued */
4606 struct sctp_stream_out
*outs
;
4607 TAILQ_FOREACH(outs
, &asoc
->out_wheel
, next_spoke
) {
4608 if (!TAILQ_EMPTY(&outs
->outqueue
)) {
4609 some_on_streamwheel
= 1;
4615 if (TAILQ_EMPTY(&asoc
->send_queue
) &&
4616 TAILQ_EMPTY(&asoc
->sent_queue
) &&
4617 (some_on_streamwheel
== 0)) {
4618 /* there is nothing queued to send, so I'm done... */
4619 if ((SCTP_GET_STATE(asoc
) != SCTP_STATE_SHUTDOWN_SENT
) &&
4620 (SCTP_GET_STATE(asoc
) != SCTP_STATE_SHUTDOWN_ACK_SENT
)) {
4621 /* only send SHUTDOWN the first time through */
4623 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT4
) {
4624 kprintf("%s:%d sends a shutdown\n",
4630 sctp_send_shutdown(stcb
, stcb
->asoc
.primary_destination
);
4631 asoc
->state
= SCTP_STATE_SHUTDOWN_SENT
;
4632 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN
, stcb
->sctp_ep
, stcb
,
4633 asoc
->primary_destination
);
4634 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD
, stcb
->sctp_ep
, stcb
,
4635 asoc
->primary_destination
);
4639 * we still got (or just got) data to send, so set
4643 * XXX sockets draft says that MSG_EOF should be sent
4644 * with no data. currently, we will allow user data
4645 * to be sent first and move to SHUTDOWN-PENDING
4647 asoc
->state
|= SCTP_STATE_SHUTDOWN_PENDING
;
4650 #ifdef SCTP_MBCNT_LOGGING
4651 sctp_log_mbcnt(SCTP_LOG_MBCNT_INCREASE
,
4652 asoc
->total_output_queue_size
,
4654 asoc
->total_output_mbuf_queue_size
,
4657 asoc
->total_output_queue_size
+= dataout
;
4658 asoc
->total_output_mbuf_queue_size
+= mbcnt
;
4659 if ((stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_TCPTYPE
) ||
4660 (stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_IN_TCPPOOL
)) {
4661 so
->so_snd
.ssb_cc
+= dataout
;
4662 so
->so_snd
.ssb_mbcnt
+= mbcnt
;
4666 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT2
) {
4667 kprintf("++total out:%d total_mbuf_out:%d\n",
4668 (int)asoc
->total_output_queue_size
,
4669 (int)asoc
->total_output_mbuf_queue_size
);
4674 ssb_unlock(&so
->so_snd
);
4676 SOCKBUF_UNLOCK(&so
->so_snd
);
4678 if (m
&& m
->m_nextpkt
) {
4681 mnext
= n
->m_nextpkt
;
4682 n
->m_nextpkt
= NULL
;
4692 static struct mbuf
*
4693 sctp_copy_mbufchain(struct mbuf
*clonechain
,
4694 struct mbuf
*outchain
)
4696 struct mbuf
*appendchain
;
4697 #if defined(__FreeBSD__) || defined(__NetBSD__)
4698 /* Supposedly m_copypacket is an optimization, use it if we can */
4699 if (clonechain
->m_flags
& M_PKTHDR
) {
4700 appendchain
= m_copypacket(clonechain
, MB_DONTWAIT
);
4701 sctp_pegs
[SCTP_CACHED_SRC
]++;
4703 appendchain
= m_copy(clonechain
, 0, M_COPYALL
);
4704 #elif defined(__APPLE__)
4705 appendchain
= sctp_m_copym(clonechain
, 0, M_COPYALL
, MB_DONTWAIT
);
4707 appendchain
= m_copy(clonechain
, 0, M_COPYALL
);
4710 if (appendchain
== NULL
) {
4713 sctp_m_freem(outchain
);
4717 /* tack on to the end */
4721 if (m
->m_next
== NULL
) {
4722 m
->m_next
= appendchain
;
4727 if (outchain
->m_flags
& M_PKTHDR
) {
4733 append_tot
+= t
->m_len
;
4736 outchain
->m_pkthdr
.len
+= append_tot
;
4740 return (appendchain
);
4745 sctp_sendall_iterator(struct sctp_inpcb
*inp
, struct sctp_tcb
*stcb
, void *ptr
, u_int32_t val
)
4747 struct sctp_copy_all
*ca
;
4751 ca
= (struct sctp_copy_all
*)ptr
;
4752 if (ca
->m
== NULL
) {
4755 if (ca
->inp
!= inp
) {
4759 m
= sctp_copy_mbufchain(ca
->m
, NULL
);
4761 /* can't copy so we are done */
4765 ret
= sctp_msg_append(stcb
, stcb
->asoc
.primary_destination
, m
,
4766 &ca
->sndrcv
, MSG_FNONBLOCKING
);
4775 sctp_sendall_completes(void *ptr
, u_int32_t val
)
4777 struct sctp_copy_all
*ca
;
4778 ca
= (struct sctp_copy_all
*)ptr
;
4779 /* Do a notify here?
4780 * Kacheong suggests that the notify
4781 * be done at the send time.. so you would
4782 * push up a notification if any send failed.
4783 * Don't know if this is feasable since the
4784 * only failures we have is "memory" related and
4785 * if you cannot get an mbuf to send the data
4786 * you surely can't get an mbuf to send up
4787 * to notify the user you can't send the data :->
4790 /* now free everything */
4796 #define MC_ALIGN(m, len) do { \
4797 (m)->m_data += (MCLBYTES - (len)) & ~(sizeof(long) - 1); \
4802 static struct mbuf
*
4803 sctp_copy_out_all(struct uio
*uio
, int len
)
4805 struct mbuf
*ret
, *at
;
4806 int left
, willcpy
, cancpy
, error
;
4808 MGETHDR(ret
, MB_WAIT
, MT_HEADER
);
4815 ret
->m_pkthdr
.len
= len
;
4816 MCLGET(ret
, MB_WAIT
);
4820 if ((ret
->m_flags
& M_EXT
) == 0) {
4824 cancpy
= M_TRAILINGSPACE(ret
);
4825 willcpy
= min(cancpy
, left
);
4828 /* Align data to the end */
4829 MC_ALIGN(at
, willcpy
);
4830 error
= uiomove(mtod(at
, caddr_t
), willcpy
, uio
);
4836 at
->m_len
= willcpy
;
4837 at
->m_nextpkt
= at
->m_next
= 0;
4840 MGET(at
->m_next
, MB_WAIT
, MT_DATA
);
4841 if (at
->m_next
== NULL
) {
4846 MCLGET(at
, MB_WAIT
);
4850 if ((at
->m_flags
& M_EXT
) == 0) {
4853 cancpy
= M_TRAILINGSPACE(at
);
4854 willcpy
= min(cancpy
, left
);
4861 sctp_sendall (struct sctp_inpcb
*inp
, struct uio
*uio
, struct mbuf
*m
, struct sctp_sndrcvinfo
*srcv
)
4864 struct sctp_copy_all
*ca
;
4865 MALLOC(ca
, struct sctp_copy_all
*,
4866 sizeof(struct sctp_copy_all
), M_PCB
, MB_WAIT
);
4871 memset (ca
, 0, sizeof(struct sctp_copy_all
));
4875 /* take off the sendall flag, it would
4876 * be bad if we failed to do this :-0
4878 ca
->sndrcv
.sinfo_flags
&= ~MSG_SENDALL
;
4880 /* get length and mbuf chain */
4882 ca
->sndlen
= uio
->uio_resid
;
4883 ca
->m
= sctp_copy_out_all(uio
, ca
->sndlen
);
4884 if (ca
->m
== NULL
) {
4889 if ((m
->m_flags
& M_PKTHDR
) == 0) {
4894 ca
->sndlen
+= m
->m_len
;
4898 ca
->sndlen
= m
->m_pkthdr
.len
;
4903 ret
= sctp_initiate_iterator(sctp_sendall_iterator
, SCTP_PCB_ANY_FLAGS
, SCTP_ASOC_ANY_STATE
,
4904 (void *)ca
, 0, sctp_sendall_completes
, inp
);
4907 kprintf("Failed to initate iterator to takeover associations\n");
4918 sctp_toss_old_cookies(struct sctp_association
*asoc
)
4920 struct sctp_tmit_chunk
*chk
, *nchk
;
4921 chk
= TAILQ_FIRST(&asoc
->control_send_queue
);
4923 nchk
= TAILQ_NEXT(chk
, sctp_next
);
4924 if (chk
->rec
.chunk_id
== SCTP_COOKIE_ECHO
) {
4925 TAILQ_REMOVE(&asoc
->control_send_queue
, chk
, sctp_next
);
4927 sctp_m_freem(chk
->data
);
4930 asoc
->ctrl_queue_cnt
--;
4932 sctp_free_remote_addr(chk
->whoTo
);
4933 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, chk
);
4934 sctppcbinfo
.ipi_count_chunk
--;
4935 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
4936 panic("Chunk count is negative");
4938 sctppcbinfo
.ipi_gencnt_chunk
++;
4945 sctp_toss_old_asconf(struct sctp_tcb
*stcb
)
4947 struct sctp_association
*asoc
;
4948 struct sctp_tmit_chunk
*chk
, *chk_tmp
;
4951 for (chk
= TAILQ_FIRST(&asoc
->control_send_queue
); chk
!= NULL
;
4954 chk_tmp
= TAILQ_NEXT(chk
, sctp_next
);
4955 /* find SCTP_ASCONF chunk in queue (only one ever in queue) */
4956 if (chk
->rec
.chunk_id
== SCTP_ASCONF
) {
4957 TAILQ_REMOVE(&asoc
->control_send_queue
, chk
, sctp_next
);
4959 sctp_m_freem(chk
->data
);
4962 asoc
->ctrl_queue_cnt
--;
4964 sctp_free_remote_addr(chk
->whoTo
);
4965 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, chk
);
4966 sctppcbinfo
.ipi_count_chunk
--;
4967 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
4968 panic("Chunk count is negative");
4970 sctppcbinfo
.ipi_gencnt_chunk
++;
4977 sctp_clean_up_datalist(struct sctp_tcb
*stcb
,
4978 struct sctp_association
*asoc
,
4979 struct sctp_tmit_chunk
**data_list
,
4981 struct sctp_nets
*net
)
4984 for (i
= 0; i
< bundle_at
; i
++) {
4985 /* off of the send queue */
4987 /* Any chunk NOT 0 you zap the time
4988 * chunk 0 gets zapped or set based on
4989 * if a RTO measurment is needed.
4991 data_list
[i
]->do_rtt
= 0;
4994 data_list
[i
]->sent_rcv_time
= net
->last_sent_time
;
4995 TAILQ_REMOVE(&asoc
->send_queue
,
4998 /* on to the sent queue */
4999 TAILQ_INSERT_TAIL(&asoc
->sent_queue
,
5002 /* This does not lower until the cum-ack passes it */
5003 asoc
->sent_queue_cnt
++;
5004 asoc
->send_queue_cnt
--;
5005 if ((asoc
->peers_rwnd
<= 0) &&
5006 (asoc
->total_flight
== 0) &&
5008 /* Mark the chunk as being a window probe */
5010 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
5011 kprintf("WINDOW PROBE SET\n");
5014 sctp_pegs
[SCTP_WINDOW_PROBES
]++;
5015 data_list
[i
]->rec
.data
.state_flags
|= SCTP_WINDOW_PROBE
;
5017 data_list
[i
]->rec
.data
.state_flags
&= ~SCTP_WINDOW_PROBE
;
5019 #ifdef SCTP_AUDITING_ENABLED
5020 sctp_audit_log(0xC2, 3);
5022 data_list
[i
]->sent
= SCTP_DATAGRAM_SENT
;
5023 data_list
[i
]->snd_count
= 1;
5024 net
->flight_size
+= data_list
[i
]->book_size
;
5025 asoc
->total_flight
+= data_list
[i
]->book_size
;
5026 asoc
->total_flight_count
++;
5027 #ifdef SCTP_LOG_RWND
5028 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND
,
5029 asoc
->peers_rwnd
, data_list
[i
]->send_size
, sctp_peer_chunk_oh
);
5031 asoc
->peers_rwnd
= sctp_sbspace_sub(asoc
->peers_rwnd
,
5032 (u_int32_t
)(data_list
[i
]->send_size
+ sctp_peer_chunk_oh
));
5033 if (asoc
->peers_rwnd
< stcb
->sctp_ep
->sctp_ep
.sctp_sws_sender
) {
5034 /* SWS sender side engages */
5035 asoc
->peers_rwnd
= 0;
5041 sctp_clean_up_ctl(struct sctp_association
*asoc
)
5043 struct sctp_tmit_chunk
*chk
, *nchk
;
5044 for (chk
= TAILQ_FIRST(&asoc
->control_send_queue
);
5046 nchk
= TAILQ_NEXT(chk
, sctp_next
);
5047 if ((chk
->rec
.chunk_id
== SCTP_SELECTIVE_ACK
) ||
5048 (chk
->rec
.chunk_id
== SCTP_HEARTBEAT_REQUEST
) ||
5049 (chk
->rec
.chunk_id
== SCTP_HEARTBEAT_ACK
) ||
5050 (chk
->rec
.chunk_id
== SCTP_SHUTDOWN
) ||
5051 (chk
->rec
.chunk_id
== SCTP_SHUTDOWN_ACK
) ||
5052 (chk
->rec
.chunk_id
== SCTP_OPERATION_ERROR
) ||
5053 (chk
->rec
.chunk_id
== SCTP_PACKET_DROPPED
) ||
5054 (chk
->rec
.chunk_id
== SCTP_COOKIE_ACK
) ||
5055 (chk
->rec
.chunk_id
== SCTP_ECN_CWR
) ||
5056 (chk
->rec
.chunk_id
== SCTP_ASCONF_ACK
)) {
5057 /* Stray chunks must be cleaned up */
5059 TAILQ_REMOVE(&asoc
->control_send_queue
, chk
, sctp_next
);
5061 sctp_m_freem(chk
->data
);
5064 asoc
->ctrl_queue_cnt
--;
5065 sctp_free_remote_addr(chk
->whoTo
);
5066 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, chk
);
5067 sctppcbinfo
.ipi_count_chunk
--;
5068 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
5069 panic("Chunk count is negative");
5071 sctppcbinfo
.ipi_gencnt_chunk
++;
5072 } else if (chk
->rec
.chunk_id
== SCTP_STREAM_RESET
) {
5073 struct sctp_stream_reset_req
*strreq
;
5074 /* special handling, we must look into the param */
5075 strreq
= mtod(chk
->data
, struct sctp_stream_reset_req
*);
5076 if (strreq
->sr_req
.ph
.param_type
== ntohs(SCTP_STR_RESET_RESPONSE
)) {
5077 goto clean_up_anyway
;
5084 sctp_move_to_outqueue(struct sctp_tcb
*stcb
,
5085 struct sctp_stream_out
*strq
)
5087 /* Move from the stream to the send_queue keeping track of the total */
5088 struct sctp_association
*asoc
;
5092 struct sctp_tmit_chunk
*chk
, *nchk
;
5093 struct sctp_data_chunk
*dchkh
;
5094 struct sctpchunk_listhead tmp
;
5099 chk
= TAILQ_FIRST(&strq
->outqueue
);
5101 nchk
= TAILQ_NEXT(chk
, sctp_next
);
5102 /* now put in the chunk header */
5104 M_PREPEND(chk
->data
, sizeof(struct sctp_data_chunk
), MB_DONTWAIT
);
5105 if (chk
->data
== NULL
) {
5110 if (orig
!= chk
->data
) {
5111 /* A new mbuf was added, account for it */
5112 if ((stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_TCPTYPE
) ||
5113 (stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_IN_TCPPOOL
)) {
5114 stcb
->sctp_socket
->so_snd
.ssb_mbcnt
+= MSIZE
;
5116 #ifdef SCTP_MBCNT_LOGGING
5117 sctp_log_mbcnt(SCTP_LOG_MBCNT_INCREASE
,
5118 asoc
->total_output_queue_size
,
5120 asoc
->total_output_mbuf_queue_size
,
5123 stcb
->asoc
.total_output_mbuf_queue_size
+= MSIZE
;
5124 chk
->mbcnt
+= MSIZE
;
5126 chk
->send_size
+= sizeof(struct sctp_data_chunk
);
5127 /* This should NOT have to do anything, but
5128 * I would rather be cautious
5130 if (!failed
&& ((size_t)chk
->data
->m_len
< sizeof(struct sctp_data_chunk
))) {
5131 m_pullup(chk
->data
, sizeof(struct sctp_data_chunk
));
5132 if (chk
->data
== NULL
) {
5137 dchkh
= mtod(chk
->data
, struct sctp_data_chunk
*);
5138 dchkh
->ch
.chunk_length
= htons(chk
->send_size
);
5139 /* Chunks must be padded to even word boundary */
5140 padval
= chk
->send_size
% 4;
5142 /* For fragmented messages this should not
5143 * run except possibly on the last chunk
5145 if (sctp_pad_lastmbuf(chk
->data
, (4 - padval
))) {
5146 /* we are in big big trouble no mbufs :< */
5150 chk
->send_size
+= (4 - padval
);
5152 /* pull from stream queue */
5153 TAILQ_REMOVE(&strq
->outqueue
, chk
, sctp_next
);
5154 asoc
->stream_queue_cnt
--;
5155 TAILQ_INSERT_TAIL(&tmp
, chk
, sctp_next
);
5156 /* add it in to the size of moved chunks */
5157 if (chk
->rec
.data
.rcv_flags
& SCTP_DATA_LAST_FRAG
) {
5158 /* we pull only one message */
5164 /* Gak, we just lost the user message */
5165 chk
= TAILQ_FIRST(&tmp
);
5167 nchk
= TAILQ_NEXT(chk
, sctp_next
);
5168 TAILQ_REMOVE(&tmp
, chk
, sctp_next
);
5170 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL
, stcb
,
5171 (SCTP_NOTIFY_DATAGRAM_UNSENT
|SCTP_INTERNAL_ERROR
),
5175 sctp_m_freem(chk
->data
);
5179 sctp_free_remote_addr(chk
->whoTo
);
5182 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, chk
);
5183 sctppcbinfo
.ipi_count_chunk
--;
5184 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
5185 panic("Chunk count is negative");
5187 sctppcbinfo
.ipi_gencnt_chunk
++;
5192 /* now pull them off of temp wheel */
5193 chk
= TAILQ_FIRST(&tmp
);
5195 nchk
= TAILQ_NEXT(chk
, sctp_next
);
5196 /* insert on send_queue */
5197 TAILQ_REMOVE(&tmp
, chk
, sctp_next
);
5198 TAILQ_INSERT_TAIL(&asoc
->send_queue
, chk
, sctp_next
);
5199 asoc
->send_queue_cnt
++;
5201 chk
->rec
.data
.TSN_seq
= asoc
->sending_seq
++;
5203 dchkh
= mtod(chk
->data
, struct sctp_data_chunk
*);
5204 /* Put the rest of the things in place now. Size
5205 * was done earlier in previous loop prior to
5208 dchkh
->ch
.chunk_type
= SCTP_DATA
;
5209 dchkh
->ch
.chunk_flags
= chk
->rec
.data
.rcv_flags
;
5210 dchkh
->dp
.tsn
= htonl(chk
->rec
.data
.TSN_seq
);
5211 dchkh
->dp
.stream_id
= htons(strq
->stream_no
);
5212 dchkh
->dp
.stream_sequence
= htons(chk
->rec
.data
.stream_seq
);
5213 dchkh
->dp
.protocol_id
= chk
->rec
.data
.payloadtype
;
5214 /* total count moved */
5215 tot_moved
+= chk
->send_size
;
5222 sctp_fill_outqueue(struct sctp_tcb
*stcb
,
5223 struct sctp_nets
*net
)
5225 struct sctp_association
*asoc
;
5226 struct sctp_tmit_chunk
*chk
;
5227 struct sctp_stream_out
*strq
, *strqn
;
5228 int mtu_fromwheel
, goal_mtu
;
5229 unsigned int moved
, seenend
, cnt_mvd
=0;
5232 /* Attempt to move at least 1 MTU's worth
5233 * onto the wheel for each destination address
5235 goal_mtu
= net
->cwnd
- net
->flight_size
;
5236 if ((unsigned int)goal_mtu
< net
->mtu
) {
5237 goal_mtu
= net
->mtu
;
5239 if (sctp_pegs
[SCTP_MOVED_MTU
] < (unsigned int)goal_mtu
) {
5240 sctp_pegs
[SCTP_MOVED_MTU
] = goal_mtu
;
5242 seenend
= moved
= mtu_fromwheel
= 0;
5243 if (asoc
->last_out_stream
== NULL
) {
5244 strq
= asoc
->last_out_stream
= TAILQ_FIRST(&asoc
->out_wheel
);
5245 if (asoc
->last_out_stream
== NULL
) {
5246 /* huh nothing on the wheel, TSNH */
5251 strq
= TAILQ_NEXT(asoc
->last_out_stream
, next_spoke
);
5254 asoc
->last_out_stream
= TAILQ_FIRST(&asoc
->out_wheel
);
5256 while (mtu_fromwheel
< goal_mtu
) {
5260 strq
= TAILQ_FIRST(&asoc
->out_wheel
);
5261 } else if ((moved
== 0) && (seenend
)) {
5262 /* none left on the wheel */
5263 sctp_pegs
[SCTP_MOVED_NLEF
]++;
5267 * clear the flags and rotate back through
5272 strq
= TAILQ_FIRST(&asoc
->out_wheel
);
5278 strqn
= TAILQ_NEXT(strq
, next_spoke
);
5279 if ((chk
= TAILQ_FIRST(&strq
->outqueue
)) == NULL
) {
5280 /* none left on this queue, prune a spoke? */
5281 sctp_remove_from_wheel(asoc
, strq
);
5282 if (strq
== asoc
->last_out_stream
) {
5283 /* the last one we used went off the wheel */
5284 asoc
->last_out_stream
= NULL
;
5289 if (chk
->whoTo
!= net
) {
5290 /* Skip this stream, first one on stream
5291 * does not head to our current destination.
5296 mtu_fromwheel
+= sctp_move_to_outqueue(stcb
, strq
);
5299 asoc
->last_out_stream
= strq
;
5302 sctp_pegs
[SCTP_MOVED_MAX
]++;
5304 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
5305 kprintf("Ok we moved %d chunks to send queue\n",
5309 if (sctp_pegs
[SCTP_MOVED_QMAX
] < cnt_mvd
) {
5310 sctp_pegs
[SCTP_MOVED_QMAX
] = cnt_mvd
;
5315 sctp_fix_ecn_echo(struct sctp_association
*asoc
)
5317 struct sctp_tmit_chunk
*chk
;
5318 TAILQ_FOREACH(chk
, &asoc
->control_send_queue
, sctp_next
) {
5319 if (chk
->rec
.chunk_id
== SCTP_ECN_ECHO
) {
5320 chk
->sent
= SCTP_DATAGRAM_UNSENT
;
5326 sctp_move_to_an_alt(struct sctp_tcb
*stcb
,
5327 struct sctp_association
*asoc
,
5328 struct sctp_nets
*net
)
5330 struct sctp_tmit_chunk
*chk
;
5331 struct sctp_nets
*a_net
;
5332 a_net
= sctp_find_alternate_net(stcb
, net
);
5333 if ((a_net
!= net
) &&
5334 ((a_net
->dest_state
& SCTP_ADDR_REACHABLE
) == SCTP_ADDR_REACHABLE
)) {
5336 * We only proceed if a valid alternate is found that is
5337 * not this one and is reachable. Here we must move all
5338 * chunks queued in the send queue off of the destination
5339 * address to our alternate.
5341 TAILQ_FOREACH(chk
, &asoc
->send_queue
, sctp_next
) {
5342 if (chk
->whoTo
== net
) {
5343 /* Move the chunk to our alternate */
5344 sctp_free_remote_addr(chk
->whoTo
);
5352 static int sctp_from_user_send
=0;
5355 sctp_med_chunk_output(struct sctp_inpcb
*inp
,
5356 struct sctp_tcb
*stcb
,
5357 struct sctp_association
*asoc
,
5360 int control_only
, int *cwnd_full
, int from_where
,
5361 struct timeval
*now
, int *now_filled
)
5364 * Ok this is the generic chunk service queue.
5365 * we must do the following:
5366 * - Service the stream queue that is next, moving any message
5367 * (note I must get a complete message i.e. FIRST/MIDDLE and
5368 * LAST to the out queue in one pass) and assigning TSN's
5369 * - Check to see if the cwnd/rwnd allows any output, if so we
5370 * go ahead and fomulate and send the low level chunks. Making
5371 * sure to combine any control in the control chunk queue also.
5373 struct sctp_nets
*net
;
5374 struct mbuf
*outchain
;
5375 struct sctp_tmit_chunk
*chk
, *nchk
;
5376 struct sctphdr
*shdr
;
5377 /* temp arrays for unlinking */
5378 struct sctp_tmit_chunk
*data_list
[SCTP_MAX_DATA_BUNDLING
];
5379 int no_fragmentflg
, error
;
5380 int one_chunk
, hbflag
;
5381 int asconf
, cookie
, no_out_cnt
;
5382 int bundle_at
, ctl_cnt
, no_data_chunks
, cwnd_full_ind
;
5383 unsigned int mtu
, r_mtu
, omtu
;
5386 ctl_cnt
= no_out_cnt
= asconf
= cookie
= 0;
5388 * First lets prime the pump. For each destination, if there
5389 * is room in the flight size, attempt to pull an MTU's worth
5390 * out of the stream queues into the general send_queue
5392 #ifdef SCTP_AUDITING_ENABLED
5393 sctp_audit_log(0xC2, 2);
5396 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
5397 kprintf("***********************\n");
5406 /* Nothing to possible to send? */
5407 if (TAILQ_EMPTY(&asoc
->control_send_queue
) &&
5408 TAILQ_EMPTY(&asoc
->send_queue
) &&
5409 TAILQ_EMPTY(&asoc
->out_wheel
)) {
5411 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
5412 kprintf("All wheels empty\n");
5417 if (asoc
->peers_rwnd
<= 0) {
5418 /* No room in peers rwnd */
5421 if (asoc
->total_flight
> 0) {
5422 /* we are allowed one chunk in flight */
5424 sctp_pegs
[SCTP_RWND_BLOCKED
]++;
5428 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
5429 kprintf("Ok we have done the fillup no_data_chunk=%d tf=%d prw:%d\n",
5430 (int)no_data_chunks
,
5431 (int)asoc
->total_flight
, (int)asoc
->peers_rwnd
);
5434 TAILQ_FOREACH(net
, &asoc
->nets
, sctp_next
) {
5436 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
5437 kprintf("net:%p fs:%d cwnd:%d\n",
5438 net
, net
->flight_size
, net
->cwnd
);
5441 if (net
->flight_size
>= net
->cwnd
) {
5442 /* skip this network, no room */
5445 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
5446 kprintf("Ok skip fillup->fs:%d > cwnd:%d\n",
5451 sctp_pegs
[SCTP_CWND_NOFILL
]++;
5455 * spin through the stream queues moving one message and
5456 * assign TSN's as appropriate.
5458 sctp_fill_outqueue(stcb
, net
);
5460 *cwnd_full
= cwnd_full_ind
;
5461 /* now service each destination and send out what we can for it */
5463 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
5465 TAILQ_FOREACH(chk
, &asoc
->send_queue
, sctp_next
) {
5468 kprintf("We have %d chunks on the send_queue\n", chk_cnt
);
5470 TAILQ_FOREACH(chk
, &asoc
->sent_queue
, sctp_next
) {
5473 kprintf("We have %d chunks on the sent_queue\n", chk_cnt
);
5474 TAILQ_FOREACH(chk
, &asoc
->control_send_queue
, sctp_next
) {
5477 kprintf("We have %d chunks on the control_queue\n", chk_cnt
);
5480 /* If we have data to send, and DSACK is running, stop it
5481 * and build a SACK to dump on to bundle with output. This
5482 * actually MAY make it so the bundling does not occur if
5483 * the SACK is big but I think this is ok because basic SACK
5484 * space is pre-reserved in our fragmentation size choice.
5486 if ((TAILQ_FIRST(&asoc
->send_queue
) != NULL
) &&
5487 (no_data_chunks
== 0)) {
5488 /* We will be sending something */
5489 if (callout_pending(&stcb
->asoc
.dack_timer
.timer
)) {
5490 /* Yep a callout is pending */
5491 sctp_timer_stop(SCTP_TIMER_TYPE_RECV
,
5494 sctp_send_sack(stcb
);
5497 /* Nothing to send? */
5498 if ((TAILQ_FIRST(&asoc
->control_send_queue
) == NULL
) &&
5499 (TAILQ_FIRST(&asoc
->send_queue
) == NULL
)) {
5502 TAILQ_FOREACH(net
, &asoc
->nets
, sctp_next
) {
5503 /* how much can we send? */
5504 if (net
->ref_count
< 2) {
5505 /* Ref-count of 1 so we cannot have data or control
5506 * queued to this address. Skip it.
5510 ctl_cnt
= bundle_at
= 0;
5515 if ((net
->ro
.ro_rt
) && (net
->ro
.ro_rt
->rt_ifp
)) {
5516 /* if we have a route and an ifp
5517 * check to see if we have room to
5521 ifp
= net
->ro
.ro_rt
->rt_ifp
;
5522 if ((ifp
->if_snd
.ifq_len
+ 2) >= ifp
->if_snd
.ifq_maxlen
) {
5523 sctp_pegs
[SCTP_IFP_QUEUE_FULL
]++;
5524 #ifdef SCTP_LOG_MAXBURST
5525 sctp_log_maxburst(net
, ifp
->if_snd
.ifq_len
, ifp
->if_snd
.ifq_maxlen
, SCTP_MAX_IFP_APPLIED
);
5530 if (((struct sockaddr
*)&net
->ro
._l_addr
)->sa_family
== AF_INET
) {
5531 mtu
= net
->mtu
- (sizeof(struct ip
) + sizeof(struct sctphdr
));
5533 mtu
= net
->mtu
- (sizeof(struct ip6_hdr
) + sizeof(struct sctphdr
));
5535 if (mtu
> asoc
->peers_rwnd
) {
5536 if (asoc
->total_flight
> 0) {
5537 /* We have a packet in flight somewhere */
5538 r_mtu
= asoc
->peers_rwnd
;
5540 /* We are always allowed to send one MTU out */
5548 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
5549 kprintf("Ok r_mtu is %d mtu is %d for this net:%p one_chunk:%d\n",
5550 r_mtu
, mtu
, net
, one_chunk
);
5553 /************************/
5554 /* Control transmission */
5555 /************************/
5556 /* Now first lets go through the control queue */
5557 for (chk
= TAILQ_FIRST(&asoc
->control_send_queue
);
5559 nchk
= TAILQ_NEXT(chk
, sctp_next
);
5560 if (chk
->whoTo
!= net
) {
5562 * No, not sent to the network we are
5567 if (chk
->data
== NULL
) {
5570 if ((chk
->data
->m_flags
& M_PKTHDR
) == 0) {
5572 * NOTE: the chk queue MUST have the PKTHDR
5573 * flag set on it with a total in the
5574 * m_pkthdr.len field!! else the chunk will
5579 if (chk
->sent
!= SCTP_DATAGRAM_UNSENT
) {
5581 * It must be unsent. Cookies and ASCONF's
5582 * hang around but there timers will force
5583 * when marked for resend.
5587 /* Here we do NOT factor the r_mtu */
5588 if ((chk
->data
->m_pkthdr
.len
< (int)mtu
) ||
5589 (chk
->flags
& CHUNK_FLAGS_FRAGMENT_OK
)) {
5591 * We probably should glom the mbuf chain from
5592 * the chk->data for control but the problem
5593 * is it becomes yet one more level of
5594 * tracking to do if for some reason output
5595 * fails. Then I have got to reconstruct the
5596 * merged control chain.. el yucko.. for now
5597 * we take the easy way and do the copy
5599 outchain
= sctp_copy_mbufchain(chk
->data
,
5601 if (outchain
== NULL
) {
5604 /* update our MTU size */
5605 mtu
-= chk
->data
->m_pkthdr
.len
;
5609 /* Do clear IP_DF ? */
5610 if (chk
->flags
& CHUNK_FLAGS_FRAGMENT_OK
) {
5613 /* Mark things to be removed, if needed */
5614 if ((chk
->rec
.chunk_id
== SCTP_SELECTIVE_ACK
) ||
5615 (chk
->rec
.chunk_id
== SCTP_HEARTBEAT_REQUEST
) ||
5616 (chk
->rec
.chunk_id
== SCTP_HEARTBEAT_ACK
) ||
5617 (chk
->rec
.chunk_id
== SCTP_SHUTDOWN
) ||
5618 (chk
->rec
.chunk_id
== SCTP_SHUTDOWN_ACK
) ||
5619 (chk
->rec
.chunk_id
== SCTP_OPERATION_ERROR
) ||
5620 (chk
->rec
.chunk_id
== SCTP_COOKIE_ACK
) ||
5621 (chk
->rec
.chunk_id
== SCTP_ECN_CWR
) ||
5622 (chk
->rec
.chunk_id
== SCTP_PACKET_DROPPED
) ||
5623 (chk
->rec
.chunk_id
== SCTP_ASCONF_ACK
)) {
5625 if (chk
->rec
.chunk_id
== SCTP_HEARTBEAT_REQUEST
)
5627 /* remove these chunks at the end */
5628 if (chk
->rec
.chunk_id
== SCTP_SELECTIVE_ACK
) {
5629 /* turn off the timer */
5630 if (callout_pending(&stcb
->asoc
.dack_timer
.timer
)) {
5631 sctp_timer_stop(SCTP_TIMER_TYPE_RECV
,
5638 * Other chunks, since they have
5639 * timers running (i.e. COOKIE or
5640 * ASCONF) we just "trust" that it
5641 * gets sent or retransmitted.
5644 if (chk
->rec
.chunk_id
== SCTP_COOKIE_ECHO
) {
5647 } else if (chk
->rec
.chunk_id
== SCTP_ASCONF
) {
5649 * set hb flag since we can use
5655 chk
->sent
= SCTP_DATAGRAM_SENT
;
5660 * Ok we are out of room but we can
5661 * output without effecting the flight
5662 * size since this little guy is a
5663 * control only packet.
5666 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF
, inp
, stcb
, net
);
5670 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE
, inp
, stcb
, net
);
5673 if (outchain
->m_len
== 0) {
5675 * Special case for when you
5676 * get a 0 len mbuf at the
5677 * head due to the lack of a
5678 * MHDR at the beginning.
5680 outchain
->m_len
= sizeof(struct sctphdr
);
5682 M_PREPEND(outchain
, sizeof(struct sctphdr
), MB_DONTWAIT
);
5683 if (outchain
== NULL
) {
5686 goto error_out_again
;
5689 shdr
= mtod(outchain
, struct sctphdr
*);
5690 shdr
->src_port
= inp
->sctp_lport
;
5691 shdr
->dest_port
= stcb
->rport
;
5692 shdr
->v_tag
= htonl(stcb
->asoc
.peer_vtag
);
5695 if ((error
= sctp_lowlevel_chunk_output(inp
, stcb
, net
,
5696 (struct sockaddr
*)&net
->ro
._l_addr
,
5698 no_fragmentflg
, 0, NULL
, asconf
))) {
5699 if (error
== ENOBUFS
) {
5700 asoc
->ifp_had_enobuf
= 1;
5702 sctp_pegs
[SCTP_DATA_OUT_ERR
]++;
5703 if (from_where
== 0) {
5704 sctp_pegs
[SCTP_ERROUT_FRM_USR
]++;
5708 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT2
) {
5709 kprintf("Gak got ctrl error %d\n", error
);
5712 /* error, could not output */
5715 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
5716 kprintf("Update HB anyway\n");
5719 if (*now_filled
== 0) {
5720 SCTP_GETTIME_TIMEVAL(&net
->last_sent_time
);
5722 *now
= net
->last_sent_time
;
5724 net
->last_sent_time
= *now
;
5728 if (error
== EHOSTUNREACH
) {
5731 * unreachable during
5735 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
5736 kprintf("Moving data to an alterante\n");
5739 sctp_move_to_an_alt(stcb
, asoc
, net
);
5741 sctp_clean_up_ctl (asoc
);
5744 asoc
->ifp_had_enobuf
= 0;
5745 /* Only HB or ASCONF advances time */
5747 if (*now_filled
== 0) {
5748 SCTP_GETTIME_TIMEVAL(&net
->last_sent_time
);
5750 *now
= net
->last_sent_time
;
5752 net
->last_sent_time
= *now
;
5757 * increase the number we sent, if a
5758 * cookie is sent we don't tell them
5762 *num_out
+= ctl_cnt
;
5763 /* recalc a clean slate and setup */
5764 if (stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_BOUND_V6
) {
5765 mtu
= (net
->mtu
- SCTP_MIN_OVERHEAD
);
5767 mtu
= (net
->mtu
- SCTP_MIN_V4_OVERHEAD
);
5773 /*********************/
5774 /* Data transmission */
5775 /*********************/
5776 /* now lets add any data within the MTU constraints */
5777 if (((struct sockaddr
*)&net
->ro
._l_addr
)->sa_family
== AF_INET
) {
5778 omtu
= net
->mtu
- (sizeof(struct ip
) + sizeof(struct sctphdr
));
5780 omtu
= net
->mtu
- (sizeof(struct ip6_hdr
) + sizeof(struct sctphdr
));
5784 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
5785 kprintf("Now to data transmission\n");
5789 if (((asoc
->state
& SCTP_STATE_OPEN
) == SCTP_STATE_OPEN
) ||
5791 for (chk
= TAILQ_FIRST(&asoc
->send_queue
); chk
; chk
= nchk
) {
5792 if (no_data_chunks
) {
5793 /* let only control go out */
5795 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
5796 kprintf("Either nothing to send or we are full\n");
5801 if (net
->flight_size
>= net
->cwnd
) {
5802 /* skip this net, no room for data */
5804 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
5805 kprintf("fs:%d > cwnd:%d\n",
5806 net
->flight_size
, net
->cwnd
);
5809 sctp_pegs
[SCTP_CWND_BLOCKED
]++;
5813 nchk
= TAILQ_NEXT(chk
, sctp_next
);
5814 if (chk
->whoTo
!= net
) {
5815 /* No, not sent to this net */
5817 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
5818 kprintf("chk->whoTo:%p not %p\n",
5826 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
5827 kprintf("Can we pick up a chunk?\n");
5830 if ((chk
->send_size
> omtu
) && ((chk
->flags
& CHUNK_FLAGS_FRAGMENT_OK
) == 0)) {
5831 /* strange, we have a chunk that is to bit
5832 * for its destination and yet no fragment ok flag.
5833 * Something went wrong when the PMTU changed...we did
5834 * not mark this chunk for some reason?? I will
5835 * fix it here by letting IP fragment it for now and
5836 * printing a warning. This really should not happen ...
5838 /*#ifdef SCTP_DEBUG*/
5839 kprintf("Warning chunk of %d bytes > mtu:%d and yet PMTU disc missed\n",
5840 chk
->send_size
, mtu
);
5842 chk
->flags
|= CHUNK_FLAGS_FRAGMENT_OK
;
5845 if (((chk
->send_size
<= mtu
) && (chk
->send_size
<= r_mtu
)) ||
5846 ((chk
->flags
& CHUNK_FLAGS_FRAGMENT_OK
) && (chk
->send_size
<= asoc
->peers_rwnd
))) {
5847 /* ok we will add this one */
5849 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
5850 kprintf("Picking up the chunk\n");
5853 outchain
= sctp_copy_mbufchain(chk
->data
, outchain
);
5854 if (outchain
== NULL
) {
5856 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
5857 kprintf("Gakk no memory\n");
5860 if (!callout_pending(&net
->rxt_timer
.timer
)) {
5861 sctp_timer_start(SCTP_TIMER_TYPE_SEND
, inp
, stcb
, net
);
5865 /* upate our MTU size */
5866 /* Do clear IP_DF ? */
5867 if (chk
->flags
& CHUNK_FLAGS_FRAGMENT_OK
) {
5870 mtu
-= chk
->send_size
;
5871 r_mtu
-= chk
->send_size
;
5872 data_list
[bundle_at
++] = chk
;
5873 if (bundle_at
>= SCTP_MAX_DATA_BUNDLING
) {
5881 if ((r_mtu
<= 0) || one_chunk
) {
5887 * Must be sent in order of the TSN's
5891 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
5892 kprintf("ok no more chk:%d > mtu:%d || < r_mtu:%d\n",
5893 chk
->send_size
, mtu
, r_mtu
);
5900 } /* if asoc.state OPEN */
5901 /* Is there something to send for this destination? */
5903 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
5904 kprintf("ok now is chain assembled? %p\n",
5910 /* We may need to start a control timer or two */
5912 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF
, inp
, stcb
, net
);
5916 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE
, inp
, stcb
, net
);
5919 /* must start a send timer if data is being sent */
5920 if (bundle_at
&& (!callout_pending(&net
->rxt_timer
.timer
))) {
5921 /* no timer running on this destination
5925 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
5926 kprintf("ok lets start a send timer .. we will transmit %p\n",
5930 sctp_timer_start(SCTP_TIMER_TYPE_SEND
, inp
, stcb
, net
);
5932 /* Now send it, if there is anything to send :> */
5933 if ((outchain
->m_flags
& M_PKTHDR
) == 0) {
5936 MGETHDR(t
, MB_DONTWAIT
, MT_HEADER
);
5938 sctp_m_freem(outchain
);
5941 t
->m_next
= outchain
;
5942 t
->m_pkthdr
.len
= 0;
5943 t
->m_pkthdr
.rcvif
= 0;
5948 outchain
->m_pkthdr
.len
+= t
->m_len
;
5952 if (outchain
->m_len
== 0) {
5953 /* Special case for when you get a 0 len
5954 * mbuf at the head due to the lack
5955 * of a MHDR at the beginning.
5957 MH_ALIGN(outchain
, sizeof(struct sctphdr
));
5958 outchain
->m_len
= sizeof(struct sctphdr
);
5960 M_PREPEND(outchain
, sizeof(struct sctphdr
), MB_DONTWAIT
);
5961 if (outchain
== NULL
) {
5967 shdr
= mtod(outchain
, struct sctphdr
*);
5968 shdr
->src_port
= inp
->sctp_lport
;
5969 shdr
->dest_port
= stcb
->rport
;
5970 shdr
->v_tag
= htonl(stcb
->asoc
.peer_vtag
);
5972 if ((error
= sctp_lowlevel_chunk_output(inp
, stcb
, net
,
5973 (struct sockaddr
*)&net
->ro
._l_addr
,
5975 no_fragmentflg
, bundle_at
, data_list
[0], asconf
))) {
5976 /* error, we could not output */
5977 if (error
== ENOBUFS
) {
5978 asoc
->ifp_had_enobuf
= 1;
5980 sctp_pegs
[SCTP_DATA_OUT_ERR
]++;
5981 if (from_where
== 0) {
5982 sctp_pegs
[SCTP_ERROUT_FRM_USR
]++;
5987 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
5988 kprintf("Gak send error %d\n", error
);
5993 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
5994 kprintf("Update HB time anyway\n");
5997 if (*now_filled
== 0) {
5998 SCTP_GETTIME_TIMEVAL(&net
->last_sent_time
);
6000 *now
= net
->last_sent_time
;
6002 net
->last_sent_time
= *now
;
6006 if (error
== EHOSTUNREACH
) {
6008 * Destination went unreachable during
6012 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
6013 kprintf("Calling the movement routine\n");
6016 sctp_move_to_an_alt(stcb
, asoc
, net
);
6018 sctp_clean_up_ctl (asoc
);
6021 asoc
->ifp_had_enobuf
= 0;
6023 if (bundle_at
|| hbflag
) {
6024 /* For data/asconf and hb set time */
6025 if (*now_filled
== 0) {
6026 SCTP_GETTIME_TIMEVAL(&net
->last_sent_time
);
6028 *now
= net
->last_sent_time
;
6030 net
->last_sent_time
= *now
;
6035 *num_out
+= (ctl_cnt
+ bundle_at
);
6038 if (!net
->rto_pending
) {
6039 /* setup for a RTO measurement */
6040 net
->rto_pending
= 1;
6041 data_list
[0]->do_rtt
= 1;
6043 data_list
[0]->do_rtt
= 0;
6045 sctp_pegs
[SCTP_PEG_TSNS_SENT
] += bundle_at
;
6046 sctp_clean_up_datalist(stcb
, asoc
, data_list
, bundle_at
, net
);
6053 /* At the end there should be no NON timed
6054 * chunks hanging on this queue.
6056 if ((*num_out
== 0) && (*reason_code
== 0)) {
6059 sctp_clean_up_ctl (asoc
);
6064 sctp_queue_op_err(struct sctp_tcb
*stcb
, struct mbuf
*op_err
)
6066 /* Prepend a OPERATIONAL_ERROR chunk header
6067 * and put on the end of the control chunk queue.
6069 /* Sender had better have gotten a MGETHDR or else
6070 * the control chunk will be forever skipped
6072 struct sctp_chunkhdr
*hdr
;
6073 struct sctp_tmit_chunk
*chk
;
6076 chk
= (struct sctp_tmit_chunk
*)SCTP_ZONE_GET(sctppcbinfo
.ipi_zone_chunk
);
6079 sctp_m_freem(op_err
);
6082 sctppcbinfo
.ipi_count_chunk
++;
6083 sctppcbinfo
.ipi_gencnt_chunk
++;
6084 M_PREPEND(op_err
, sizeof(struct sctp_chunkhdr
), MB_DONTWAIT
);
6085 if (op_err
== NULL
) {
6086 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, chk
);
6087 sctppcbinfo
.ipi_count_chunk
--;
6088 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
6089 panic("Chunk count is negative");
6091 sctppcbinfo
.ipi_gencnt_chunk
++;
6096 while (mat
!= NULL
) {
6097 chk
->send_size
+= mat
->m_len
;
6100 chk
->rec
.chunk_id
= SCTP_OPERATION_ERROR
;
6101 chk
->sent
= SCTP_DATAGRAM_UNSENT
;
6104 chk
->asoc
= &stcb
->asoc
;
6106 chk
->whoTo
= chk
->asoc
->primary_destination
;
6107 chk
->whoTo
->ref_count
++;
6108 hdr
= mtod(op_err
, struct sctp_chunkhdr
*);
6109 hdr
->chunk_type
= SCTP_OPERATION_ERROR
;
6110 hdr
->chunk_flags
= 0;
6111 hdr
->chunk_length
= htons(chk
->send_size
);
6112 TAILQ_INSERT_TAIL(&chk
->asoc
->control_send_queue
,
6115 chk
->asoc
->ctrl_queue_cnt
++;
6119 sctp_send_cookie_echo(struct mbuf
*m
,
6121 struct sctp_tcb
*stcb
,
6122 struct sctp_nets
*net
)
6125 * pull out the cookie and put it at the front of the control
6129 struct mbuf
*cookie
, *mat
;
6130 struct sctp_paramhdr parm
, *phdr
;
6131 struct sctp_chunkhdr
*hdr
;
6132 struct sctp_tmit_chunk
*chk
;
6133 uint16_t ptype
, plen
;
6134 /* First find the cookie in the param area */
6136 at
= offset
+ sizeof(struct sctp_init_chunk
);
6139 phdr
= sctp_get_next_param(m
, at
, &parm
, sizeof(parm
));
6143 ptype
= ntohs(phdr
->param_type
);
6144 plen
= ntohs(phdr
->param_length
);
6145 if (ptype
== SCTP_STATE_COOKIE
) {
6147 /* found the cookie */
6148 if ((pad
= (plen
% 4))) {
6151 cookie
= sctp_m_copym(m
, at
, plen
, MB_DONTWAIT
);
6152 if (cookie
== NULL
) {
6158 at
+= SCTP_SIZE32(plen
);
6160 if (cookie
== NULL
) {
6161 /* Did not find the cookie */
6164 /* ok, we got the cookie lets change it into a cookie echo chunk */
6166 /* first the change from param to cookie */
6167 hdr
= mtod(cookie
, struct sctp_chunkhdr
*);
6168 hdr
->chunk_type
= SCTP_COOKIE_ECHO
;
6169 hdr
->chunk_flags
= 0;
6170 /* now we MUST have a PKTHDR on it */
6171 if ((cookie
->m_flags
& M_PKTHDR
) != M_PKTHDR
) {
6172 /* we hope this happens rarely */
6173 MGETHDR(mat
, MB_DONTWAIT
, MT_HEADER
);
6175 sctp_m_freem(cookie
);
6179 mat
->m_pkthdr
.rcvif
= 0;
6180 mat
->m_next
= cookie
;
6183 cookie
->m_pkthdr
.len
= plen
;
6184 /* get the chunk stuff now and place it in the FRONT of the queue */
6185 chk
= (struct sctp_tmit_chunk
*)SCTP_ZONE_GET(sctppcbinfo
.ipi_zone_chunk
);
6188 sctp_m_freem(cookie
);
6191 sctppcbinfo
.ipi_count_chunk
++;
6192 sctppcbinfo
.ipi_gencnt_chunk
++;
6193 chk
->send_size
= cookie
->m_pkthdr
.len
;
6194 chk
->rec
.chunk_id
= SCTP_COOKIE_ECHO
;
6195 chk
->sent
= SCTP_DATAGRAM_UNSENT
;
6198 chk
->asoc
= &stcb
->asoc
;
6200 chk
->whoTo
= chk
->asoc
->primary_destination
;
6201 chk
->whoTo
->ref_count
++;
6202 TAILQ_INSERT_HEAD(&chk
->asoc
->control_send_queue
, chk
, sctp_next
);
6203 chk
->asoc
->ctrl_queue_cnt
++;
6208 sctp_send_heartbeat_ack(struct sctp_tcb
*stcb
,
6212 struct sctp_nets
*net
)
6214 /* take a HB request and make it into a
6215 * HB ack and send it.
6217 struct mbuf
*outchain
;
6218 struct sctp_chunkhdr
*chdr
;
6219 struct sctp_tmit_chunk
*chk
;
6223 /* must have a net pointer */
6226 outchain
= sctp_m_copym(m
, offset
, chk_length
, MB_DONTWAIT
);
6227 if (outchain
== NULL
) {
6228 /* gak out of memory */
6231 chdr
= mtod(outchain
, struct sctp_chunkhdr
*);
6232 chdr
->chunk_type
= SCTP_HEARTBEAT_ACK
;
6233 chdr
->chunk_flags
= 0;
6234 if ((outchain
->m_flags
& M_PKTHDR
) != M_PKTHDR
) {
6235 /* should not happen but we are cautious. */
6237 MGETHDR(tmp
, MB_DONTWAIT
, MT_HEADER
);
6242 tmp
->m_pkthdr
.rcvif
= 0;
6243 tmp
->m_next
= outchain
;
6246 outchain
->m_pkthdr
.len
= chk_length
;
6247 if (chk_length
% 4) {
6251 padlen
= 4 - (outchain
->m_pkthdr
.len
% 4);
6252 m_copyback(outchain
, outchain
->m_pkthdr
.len
, padlen
, (caddr_t
)&cpthis
);
6254 chk
= (struct sctp_tmit_chunk
*)SCTP_ZONE_GET(sctppcbinfo
.ipi_zone_chunk
);
6257 sctp_m_freem(outchain
);
6260 sctppcbinfo
.ipi_count_chunk
++;
6261 sctppcbinfo
.ipi_gencnt_chunk
++;
6263 chk
->send_size
= chk_length
;
6264 chk
->rec
.chunk_id
= SCTP_HEARTBEAT_ACK
;
6265 chk
->sent
= SCTP_DATAGRAM_UNSENT
;
6268 chk
->asoc
= &stcb
->asoc
;
6269 chk
->data
= outchain
;
6271 chk
->whoTo
->ref_count
++;
6272 TAILQ_INSERT_TAIL(&chk
->asoc
->control_send_queue
, chk
, sctp_next
);
6273 chk
->asoc
->ctrl_queue_cnt
++;
6277 sctp_send_cookie_ack(struct sctp_tcb
*stcb
) {
6278 /* formulate and queue a cookie-ack back to sender */
6279 struct mbuf
*cookie_ack
;
6280 struct sctp_chunkhdr
*hdr
;
6281 struct sctp_tmit_chunk
*chk
;
6284 MGETHDR(cookie_ack
, MB_DONTWAIT
, MT_HEADER
);
6285 if (cookie_ack
== NULL
) {
6289 cookie_ack
->m_data
+= SCTP_MIN_OVERHEAD
;
6290 chk
= (struct sctp_tmit_chunk
*)SCTP_ZONE_GET(sctppcbinfo
.ipi_zone_chunk
);
6293 sctp_m_freem(cookie_ack
);
6296 sctppcbinfo
.ipi_count_chunk
++;
6297 sctppcbinfo
.ipi_gencnt_chunk
++;
6299 chk
->send_size
= sizeof(struct sctp_chunkhdr
);
6300 chk
->rec
.chunk_id
= SCTP_COOKIE_ACK
;
6301 chk
->sent
= SCTP_DATAGRAM_UNSENT
;
6304 chk
->asoc
= &stcb
->asoc
;
6305 chk
->data
= cookie_ack
;
6306 if (chk
->asoc
->last_control_chunk_from
!= NULL
) {
6307 chk
->whoTo
= chk
->asoc
->last_control_chunk_from
;
6309 chk
->whoTo
= chk
->asoc
->primary_destination
;
6311 chk
->whoTo
->ref_count
++;
6312 hdr
= mtod(cookie_ack
, struct sctp_chunkhdr
*);
6313 hdr
->chunk_type
= SCTP_COOKIE_ACK
;
6314 hdr
->chunk_flags
= 0;
6315 hdr
->chunk_length
= htons(chk
->send_size
);
6316 cookie_ack
->m_pkthdr
.len
= cookie_ack
->m_len
= chk
->send_size
;
6317 cookie_ack
->m_pkthdr
.rcvif
= 0;
6318 TAILQ_INSERT_TAIL(&chk
->asoc
->control_send_queue
, chk
, sctp_next
);
6319 chk
->asoc
->ctrl_queue_cnt
++;
6325 sctp_send_shutdown_ack(struct sctp_tcb
*stcb
, struct sctp_nets
*net
)
6327 /* formulate and queue a SHUTDOWN-ACK back to the sender */
6328 struct mbuf
*m_shutdown_ack
;
6329 struct sctp_shutdown_ack_chunk
*ack_cp
;
6330 struct sctp_tmit_chunk
*chk
;
6332 m_shutdown_ack
= NULL
;
6333 MGETHDR(m_shutdown_ack
, MB_DONTWAIT
, MT_HEADER
);
6334 if (m_shutdown_ack
== NULL
) {
6338 m_shutdown_ack
->m_data
+= SCTP_MIN_OVERHEAD
;
6339 chk
= (struct sctp_tmit_chunk
*)SCTP_ZONE_GET(sctppcbinfo
.ipi_zone_chunk
);
6342 sctp_m_freem(m_shutdown_ack
);
6345 sctppcbinfo
.ipi_count_chunk
++;
6346 sctppcbinfo
.ipi_gencnt_chunk
++;
6348 chk
->send_size
= sizeof(struct sctp_chunkhdr
);
6349 chk
->rec
.chunk_id
= SCTP_SHUTDOWN_ACK
;
6350 chk
->sent
= SCTP_DATAGRAM_UNSENT
;
6353 chk
->asoc
= &stcb
->asoc
;
6354 chk
->data
= m_shutdown_ack
;
6358 ack_cp
= mtod(m_shutdown_ack
, struct sctp_shutdown_ack_chunk
*);
6359 ack_cp
->ch
.chunk_type
= SCTP_SHUTDOWN_ACK
;
6360 ack_cp
->ch
.chunk_flags
= 0;
6361 ack_cp
->ch
.chunk_length
= htons(chk
->send_size
);
6362 m_shutdown_ack
->m_pkthdr
.len
= m_shutdown_ack
->m_len
= chk
->send_size
;
6363 m_shutdown_ack
->m_pkthdr
.rcvif
= 0;
6364 TAILQ_INSERT_TAIL(&chk
->asoc
->control_send_queue
, chk
, sctp_next
);
6365 chk
->asoc
->ctrl_queue_cnt
++;
6370 sctp_send_shutdown(struct sctp_tcb
*stcb
, struct sctp_nets
*net
)
6372 /* formulate and queue a SHUTDOWN to the sender */
6373 struct mbuf
*m_shutdown
;
6374 struct sctp_shutdown_chunk
*shutdown_cp
;
6375 struct sctp_tmit_chunk
*chk
;
6378 MGETHDR(m_shutdown
, MB_DONTWAIT
, MT_HEADER
);
6379 if (m_shutdown
== NULL
) {
6383 m_shutdown
->m_data
+= SCTP_MIN_OVERHEAD
;
6384 chk
= (struct sctp_tmit_chunk
*)SCTP_ZONE_GET(sctppcbinfo
.ipi_zone_chunk
);
6387 sctp_m_freem(m_shutdown
);
6390 sctppcbinfo
.ipi_count_chunk
++;
6391 sctppcbinfo
.ipi_gencnt_chunk
++;
6393 chk
->send_size
= sizeof(struct sctp_shutdown_chunk
);
6394 chk
->rec
.chunk_id
= SCTP_SHUTDOWN
;
6395 chk
->sent
= SCTP_DATAGRAM_UNSENT
;
6398 chk
->asoc
= &stcb
->asoc
;
6399 chk
->data
= m_shutdown
;
6403 shutdown_cp
= mtod(m_shutdown
, struct sctp_shutdown_chunk
*);
6404 shutdown_cp
->ch
.chunk_type
= SCTP_SHUTDOWN
;
6405 shutdown_cp
->ch
.chunk_flags
= 0;
6406 shutdown_cp
->ch
.chunk_length
= htons(chk
->send_size
);
6407 shutdown_cp
->cumulative_tsn_ack
= htonl(stcb
->asoc
.cumulative_tsn
);
6408 m_shutdown
->m_pkthdr
.len
= m_shutdown
->m_len
= chk
->send_size
;
6409 m_shutdown
->m_pkthdr
.rcvif
= 0;
6410 TAILQ_INSERT_TAIL(&chk
->asoc
->control_send_queue
, chk
, sctp_next
);
6411 chk
->asoc
->ctrl_queue_cnt
++;
6413 if ((stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_TCPTYPE
) ||
6414 (stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_IN_TCPPOOL
)) {
6415 stcb
->sctp_ep
->sctp_socket
->so_snd
.ssb_cc
= 0;
6416 soisdisconnecting(stcb
->sctp_ep
->sctp_socket
);
6422 sctp_send_asconf(struct sctp_tcb
*stcb
, struct sctp_nets
*net
)
6425 * formulate and queue an ASCONF to the peer
6426 * ASCONF parameters should be queued on the assoc queue
6428 struct sctp_tmit_chunk
*chk
;
6429 struct mbuf
*m_asconf
;
6430 struct sctp_asconf_chunk
*acp
;
6433 /* compose an ASCONF chunk, maximum length is PMTU */
6434 m_asconf
= sctp_compose_asconf(stcb
);
6435 if (m_asconf
== NULL
) {
6438 acp
= mtod(m_asconf
, struct sctp_asconf_chunk
*);
6439 chk
= (struct sctp_tmit_chunk
*)SCTP_ZONE_GET(sctppcbinfo
.ipi_zone_chunk
);
6442 sctp_m_freem(m_asconf
);
6445 sctppcbinfo
.ipi_count_chunk
++;
6446 sctppcbinfo
.ipi_gencnt_chunk
++;
6448 chk
->data
= m_asconf
;
6449 chk
->send_size
= m_asconf
->m_pkthdr
.len
;
6450 chk
->rec
.chunk_id
= SCTP_ASCONF
;
6451 chk
->sent
= SCTP_DATAGRAM_UNSENT
;
6454 chk
->asoc
= &stcb
->asoc
;
6455 chk
->whoTo
= chk
->asoc
->primary_destination
;
6456 chk
->whoTo
->ref_count
++;
6457 TAILQ_INSERT_TAIL(&chk
->asoc
->control_send_queue
, chk
, sctp_next
);
6458 chk
->asoc
->ctrl_queue_cnt
++;
6463 sctp_send_asconf_ack(struct sctp_tcb
*stcb
, uint32_t retrans
)
6466 * formulate and queue a asconf-ack back to sender
6467 * the asconf-ack must be stored in the tcb
6469 struct sctp_tmit_chunk
*chk
;
6472 /* is there a asconf-ack mbuf chain to send? */
6473 if (stcb
->asoc
.last_asconf_ack_sent
== NULL
) {
6477 /* copy the asconf_ack */
6478 #if defined(__FreeBSD__) || defined(__NetBSD__)
6479 /* Supposedly the m_copypacket is a optimzation,
6482 if (stcb
->asoc
.last_asconf_ack_sent
->m_flags
& M_PKTHDR
) {
6483 m_ack
= m_copypacket(stcb
->asoc
.last_asconf_ack_sent
, MB_DONTWAIT
);
6484 sctp_pegs
[SCTP_CACHED_SRC
]++;
6486 m_ack
= m_copy(stcb
->asoc
.last_asconf_ack_sent
, 0, M_COPYALL
);
6488 m_ack
= m_copy(stcb
->asoc
.last_asconf_ack_sent
, 0, M_COPYALL
);
6490 if (m_ack
== NULL
) {
6491 /* couldn't copy it */
6495 chk
= (struct sctp_tmit_chunk
*)SCTP_ZONE_GET(sctppcbinfo
.ipi_zone_chunk
);
6499 sctp_m_freem(m_ack
);
6502 sctppcbinfo
.ipi_count_chunk
++;
6503 sctppcbinfo
.ipi_gencnt_chunk
++;
6505 /* figure out where it goes to */
6507 /* we're doing a retransmission */
6508 if (stcb
->asoc
.used_alt_asconfack
> 2) {
6509 /* tried alternate nets already, go back */
6512 /* need to try and alternate net */
6513 chk
->whoTo
= sctp_find_alternate_net(stcb
, stcb
->asoc
.last_control_chunk_from
);
6514 stcb
->asoc
.used_alt_asconfack
++;
6516 if (chk
->whoTo
== NULL
) {
6518 if (stcb
->asoc
.last_control_chunk_from
== NULL
)
6519 chk
->whoTo
= stcb
->asoc
.primary_destination
;
6521 chk
->whoTo
= stcb
->asoc
.last_control_chunk_from
;
6522 stcb
->asoc
.used_alt_asconfack
= 0;
6526 if (stcb
->asoc
.last_control_chunk_from
== NULL
)
6527 chk
->whoTo
= stcb
->asoc
.primary_destination
;
6529 chk
->whoTo
= stcb
->asoc
.last_control_chunk_from
;
6530 stcb
->asoc
.used_alt_asconfack
= 0;
6533 chk
->send_size
= m_ack
->m_pkthdr
.len
;
6534 chk
->rec
.chunk_id
= SCTP_ASCONF_ACK
;
6535 chk
->sent
= SCTP_DATAGRAM_UNSENT
;
6538 chk
->asoc
= &stcb
->asoc
;
6539 chk
->whoTo
->ref_count
++;
6540 TAILQ_INSERT_TAIL(&chk
->asoc
->control_send_queue
, chk
, sctp_next
);
6541 chk
->asoc
->ctrl_queue_cnt
++;
6547 sctp_chunk_retransmission(struct sctp_inpcb
*inp
,
6548 struct sctp_tcb
*stcb
,
6549 struct sctp_association
*asoc
,
6550 int *cnt_out
, struct timeval
*now
, int *now_filled
)
6553 * send out one MTU of retransmission.
6554 * If fast_retransmit is happening we ignore the cwnd.
6555 * Otherwise we obey the cwnd and rwnd.
6556 * For a Cookie or Asconf in the control chunk queue we retransmit
6557 * them by themselves.
6559 * For data chunks we will pick out the lowest TSN's in the
6560 * sent_queue marked for resend and bundle them all together
6561 * (up to a MTU of destination). The address to send to should
6562 * have been selected/changed where the retransmission was
6563 * marked (i.e. in FR or t3-timeout routines).
6565 struct sctp_tmit_chunk
*data_list
[SCTP_MAX_DATA_BUNDLING
];
6566 struct sctp_tmit_chunk
*chk
, *fwd
;
6568 struct sctphdr
*shdr
;
6570 struct sctp_nets
*net
;
6571 int no_fragmentflg
, bundle_at
, cnt_thru
;
6573 int error
, i
, one_chunk
, fwd_tsn
, ctl_cnt
, tmr_started
;
6575 tmr_started
= ctl_cnt
= bundle_at
= error
= 0;
6582 #ifdef SCTP_AUDITING_ENABLED
6583 sctp_audit_log(0xC3, 1);
6585 if (TAILQ_EMPTY(&asoc
->sent_queue
)) {
6587 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
6588 kprintf("SCTP hits empty queue with cnt set to %d?\n",
6589 asoc
->sent_queue_retran_cnt
);
6592 asoc
->sent_queue_cnt
= 0;
6593 asoc
->sent_queue_cnt_removeable
= 0;
6595 TAILQ_FOREACH(chk
, &asoc
->control_send_queue
, sctp_next
) {
6596 if (chk
->sent
!= SCTP_DATAGRAM_RESEND
) {
6597 /* we only worry about things marked for resend */
6600 if ((chk
->rec
.chunk_id
== SCTP_COOKIE_ECHO
) ||
6601 (chk
->rec
.chunk_id
== SCTP_ASCONF
) ||
6602 (chk
->rec
.chunk_id
== SCTP_STREAM_RESET
) ||
6603 (chk
->rec
.chunk_id
== SCTP_FORWARD_CUM_TSN
)) {
6604 if (chk
->rec
.chunk_id
== SCTP_STREAM_RESET
) {
6605 /* For stream reset we only retran the request
6608 struct sctp_stream_reset_req
*strreq
;
6609 strreq
= mtod(chk
->data
, struct sctp_stream_reset_req
*);
6610 if (strreq
->sr_req
.ph
.param_type
!= ntohs(SCTP_STR_RESET_REQUEST
)) {
6615 if (chk
->rec
.chunk_id
== SCTP_ASCONF
) {
6619 if (chk
->rec
.chunk_id
== SCTP_FORWARD_CUM_TSN
) {
6623 m
= sctp_copy_mbufchain(chk
->data
, m
);
6629 /* do we have control chunks to retransmit? */
6631 /* Start a timer no matter if we suceed or fail */
6632 if (chk
->rec
.chunk_id
== SCTP_COOKIE_ECHO
) {
6633 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE
, inp
, stcb
, chk
->whoTo
);
6634 } else if (chk
->rec
.chunk_id
== SCTP_ASCONF
)
6635 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF
, inp
, stcb
, chk
->whoTo
);
6637 if (m
->m_len
== 0) {
6638 /* Special case for when you get a 0 len
6639 * mbuf at the head due to the lack
6640 * of a MHDR at the beginning.
6642 m
->m_len
= sizeof(struct sctphdr
);
6644 M_PREPEND(m
, sizeof(struct sctphdr
), MB_DONTWAIT
);
6649 shdr
= mtod(m
, struct sctphdr
*);
6650 shdr
->src_port
= inp
->sctp_lport
;
6651 shdr
->dest_port
= stcb
->rport
;
6652 shdr
->v_tag
= htonl(stcb
->asoc
.peer_vtag
);
6654 chk
->snd_count
++; /* update our count */
6656 if ((error
= sctp_lowlevel_chunk_output(inp
, stcb
, chk
->whoTo
,
6657 (struct sockaddr
*)&chk
->whoTo
->ro
._l_addr
, m
,
6658 no_fragmentflg
, 0, NULL
, asconf
))) {
6659 sctp_pegs
[SCTP_DATA_OUT_ERR
]++;
6663 *We don't want to mark the net->sent time here since this
6664 * we use this for HB and retrans cannot measure RTT
6666 /* SCTP_GETTIME_TIMEVAL(&chk->whoTo->last_sent_time);*/
6668 chk
->sent
= SCTP_DATAGRAM_SENT
;
6669 asoc
->sent_queue_retran_cnt
--;
6670 if (asoc
->sent_queue_retran_cnt
< 0) {
6671 asoc
->sent_queue_retran_cnt
= 0;
6676 /* Clean up the fwd-tsn list */
6677 sctp_clean_up_ctl (asoc
);
6681 /* Ok, it is just data retransmission we need to do or
6682 * that and a fwd-tsn with it all.
6684 if (TAILQ_EMPTY(&asoc
->sent_queue
)) {
6688 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
6689 kprintf("Normal chunk retransmission cnt:%d\n",
6690 asoc
->sent_queue_retran_cnt
);
6693 if ((SCTP_GET_STATE(asoc
) == SCTP_STATE_COOKIE_ECHOED
) ||
6694 (SCTP_GET_STATE(asoc
) == SCTP_STATE_COOKIE_WAIT
)) {
6695 /* not yet open, resend the cookie and that is it */
6700 #ifdef SCTP_AUDITING_ENABLED
6701 sctp_auditing(20, inp
, stcb
, NULL
);
6703 TAILQ_FOREACH(chk
, &asoc
->sent_queue
, sctp_next
) {
6704 if (chk
->sent
!= SCTP_DATAGRAM_RESEND
) {
6705 /* No, not sent to this net or not ready for rtx */
6709 /* pick up the net */
6711 if (stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_BOUND_V6
) {
6712 mtu
= (net
->mtu
- SCTP_MIN_OVERHEAD
);
6714 mtu
= net
->mtu
- SCTP_MIN_V4_OVERHEAD
;
6717 if ((asoc
->peers_rwnd
< mtu
) && (asoc
->total_flight
> 0)) {
6718 /* No room in peers rwnd */
6720 tsn
= asoc
->last_acked_seq
+ 1;
6721 if (tsn
== chk
->rec
.data
.TSN_seq
) {
6722 /* we make a special exception for this case.
6723 * The peer has no rwnd but is missing the
6724 * lowest chunk.. which is probably what is
6725 * holding up the rwnd.
6727 goto one_chunk_around
;
6730 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
6731 kprintf("blocked-peers_rwnd:%d tf:%d\n",
6732 (int)asoc
->peers_rwnd
,
6733 (int)asoc
->total_flight
);
6736 sctp_pegs
[SCTP_RWND_BLOCKED
]++;
6740 if (asoc
->peers_rwnd
< mtu
) {
6743 #ifdef SCTP_AUDITING_ENABLED
6744 sctp_audit_log(0xC3, 2);
6748 net
->fast_retran_ip
= 0;
6749 if (chk
->rec
.data
.doing_fast_retransmit
== 0) {
6750 /* if no FR in progress skip destination that
6751 * have flight_size > cwnd.
6753 if (net
->flight_size
>= net
->cwnd
) {
6754 sctp_pegs
[SCTP_CWND_BLOCKED
]++;
6758 /* Mark the destination net to have FR recovery
6761 net
->fast_retran_ip
= 1;
6764 if ((chk
->send_size
<= mtu
) || (chk
->flags
& CHUNK_FLAGS_FRAGMENT_OK
)) {
6765 /* ok we will add this one */
6766 m
= sctp_copy_mbufchain(chk
->data
, m
);
6770 /* upate our MTU size */
6771 /* Do clear IP_DF ? */
6772 if (chk
->flags
& CHUNK_FLAGS_FRAGMENT_OK
) {
6775 mtu
-= chk
->send_size
;
6776 data_list
[bundle_at
++] = chk
;
6777 if (one_chunk
&& (asoc
->total_flight
<= 0)) {
6778 sctp_pegs
[SCTP_WINDOW_PROBES
]++;
6779 chk
->rec
.data
.state_flags
|= SCTP_WINDOW_PROBE
;
6782 if (one_chunk
== 0) {
6783 /* now are there anymore forward from chk to pick up?*/
6784 fwd
= TAILQ_NEXT(chk
, sctp_next
);
6786 if (fwd
->sent
!= SCTP_DATAGRAM_RESEND
) {
6787 /* Nope, not for retran */
6788 fwd
= TAILQ_NEXT(fwd
, sctp_next
);
6791 if (fwd
->whoTo
!= net
) {
6792 /* Nope, not the net in question */
6793 fwd
= TAILQ_NEXT(fwd
, sctp_next
);
6796 if (fwd
->send_size
<= mtu
) {
6797 m
= sctp_copy_mbufchain(fwd
->data
, m
);
6801 /* upate our MTU size */
6802 /* Do clear IP_DF ? */
6803 if (fwd
->flags
& CHUNK_FLAGS_FRAGMENT_OK
) {
6806 mtu
-= fwd
->send_size
;
6807 data_list
[bundle_at
++] = fwd
;
6808 if (bundle_at
>= SCTP_MAX_DATA_BUNDLING
) {
6811 fwd
= TAILQ_NEXT(fwd
, sctp_next
);
6813 /* can't fit so we are done */
6818 /* Is there something to send for this destination? */
6820 /* No matter if we fail/or suceed we should
6821 * start a timer. A failure is like a lost
6824 if (!callout_pending(&net
->rxt_timer
.timer
)) {
6825 /* no timer running on this destination
6828 sctp_timer_start(SCTP_TIMER_TYPE_SEND
, inp
, stcb
, net
);
6831 if (m
->m_len
== 0) {
6832 /* Special case for when you get a 0 len
6833 * mbuf at the head due to the lack
6834 * of a MHDR at the beginning.
6836 m
->m_len
= sizeof(struct sctphdr
);
6838 M_PREPEND(m
, sizeof(struct sctphdr
), MB_DONTWAIT
);
6843 shdr
= mtod(m
, struct sctphdr
*);
6844 shdr
->src_port
= inp
->sctp_lport
;
6845 shdr
->dest_port
= stcb
->rport
;
6846 shdr
->v_tag
= htonl(stcb
->asoc
.peer_vtag
);
6849 /* Now lets send it, if there is anything to send :> */
6850 if ((error
= sctp_lowlevel_chunk_output(inp
, stcb
, net
,
6851 (struct sockaddr
*)&net
->ro
._l_addr
,
6853 no_fragmentflg
, 0, NULL
, asconf
))) {
6854 /* error, we could not output */
6855 sctp_pegs
[SCTP_DATA_OUT_ERR
]++;
6860 * We don't want to mark the net->sent time here since
6861 * this we use this for HB and retrans cannot measure
6864 /* SCTP_GETTIME_TIMEVAL(&net->last_sent_time);*/
6866 /* For auto-close */
6868 if (*now_filled
== 0) {
6869 SCTP_GETTIME_TIMEVAL(&asoc
->time_last_sent
);
6870 *now
= asoc
->time_last_sent
;
6873 asoc
->time_last_sent
= *now
;
6875 *cnt_out
+= bundle_at
;
6876 #ifdef SCTP_AUDITING_ENABLED
6877 sctp_audit_log(0xC4, bundle_at
);
6879 for (i
= 0; i
< bundle_at
; i
++) {
6880 sctp_pegs
[SCTP_RETRANTSN_SENT
]++;
6881 data_list
[i
]->sent
= SCTP_DATAGRAM_SENT
;
6882 data_list
[i
]->snd_count
++;
6883 asoc
->sent_queue_retran_cnt
--;
6884 /* record the time */
6885 data_list
[i
]->sent_rcv_time
= asoc
->time_last_sent
;
6886 if (asoc
->sent_queue_retran_cnt
< 0) {
6887 asoc
->sent_queue_retran_cnt
= 0;
6889 net
->flight_size
+= data_list
[i
]->book_size
;
6890 asoc
->total_flight
+= data_list
[i
]->book_size
;
6891 asoc
->total_flight_count
++;
6893 #ifdef SCTP_LOG_RWND
6894 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND
,
6895 asoc
->peers_rwnd
, data_list
[i
]->send_size
, sctp_peer_chunk_oh
);
6897 asoc
->peers_rwnd
= sctp_sbspace_sub(asoc
->peers_rwnd
,
6898 (u_int32_t
)(data_list
[i
]->send_size
+ sctp_peer_chunk_oh
));
6899 if (asoc
->peers_rwnd
< stcb
->sctp_ep
->sctp_ep
.sctp_sws_sender
) {
6900 /* SWS sender side engages */
6901 asoc
->peers_rwnd
= 0;
6905 (data_list
[i
]->rec
.data
.doing_fast_retransmit
)) {
6906 sctp_pegs
[SCTP_FAST_RETRAN
]++;
6907 if ((data_list
[i
] == TAILQ_FIRST(&asoc
->sent_queue
)) &&
6908 (tmr_started
== 0)) {
6910 * ok we just fast-retrans'd
6911 * the lowest TSN, i.e the
6912 * first on the list. In this
6913 * case we want to give some
6914 * more time to get a SACK
6915 * back without a t3-expiring.
6917 sctp_timer_stop(SCTP_TIMER_TYPE_SEND
, inp
, stcb
, net
);
6918 sctp_timer_start(SCTP_TIMER_TYPE_SEND
, inp
, stcb
, net
);
6922 #ifdef SCTP_AUDITING_ENABLED
6923 sctp_auditing(21, inp
, stcb
, NULL
);
6929 if (asoc
->sent_queue_retran_cnt
<= 0) {
6930 /* all done we have no more to retran */
6931 asoc
->sent_queue_retran_cnt
= 0;
6935 /* No more room in rwnd */
6938 /* stop the for loop here. we sent out a packet */
6946 sctp_timer_validation(struct sctp_inpcb
*inp
,
6947 struct sctp_tcb
*stcb
,
6948 struct sctp_association
*asoc
,
6951 struct sctp_nets
*net
;
6952 /* Validate that a timer is running somewhere */
6953 TAILQ_FOREACH(net
, &asoc
->nets
, sctp_next
) {
6954 if (callout_pending(&net
->rxt_timer
.timer
)) {
6955 /* Here is a timer */
6959 /* Gak, we did not have a timer somewhere */
6961 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
6962 kprintf("Deadlock avoided starting timer on a dest at retran\n");
6965 sctp_timer_start(SCTP_TIMER_TYPE_SEND
, inp
, stcb
, asoc
->primary_destination
);
6970 sctp_chunk_output(struct sctp_inpcb
*inp
,
6971 struct sctp_tcb
*stcb
,
6974 /* Ok this is the generic chunk service queue.
6975 * we must do the following:
6976 * - See if there are retransmits pending, if so we
6977 * must do these first and return.
6978 * - Service the stream queue that is next,
6979 * moving any message (note I must get a complete
6980 * message i.e. FIRST/MIDDLE and LAST to the out
6981 * queue in one pass) and assigning TSN's
6982 * - Check to see if the cwnd/rwnd allows any output, if
6983 * so we go ahead and fomulate and send the low level
6984 * chunks. Making sure to combine any control in the
6985 * control chunk queue also.
6987 struct sctp_association
*asoc
;
6988 struct sctp_nets
*net
;
6989 int error
, num_out
, tot_out
, ret
, reason_code
, burst_cnt
, burst_limit
;
6997 sctp_pegs
[SCTP_CALLS_TO_CO
]++;
6999 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
7000 kprintf("in co - retran count:%d\n", asoc
->sent_queue_retran_cnt
);
7003 while (asoc
->sent_queue_retran_cnt
) {
7004 /* Ok, it is retransmission time only, we send out only ONE
7005 * packet with a single call off to the retran code.
7007 ret
= sctp_chunk_retransmission(inp
, stcb
, asoc
, &num_out
, &now
, &now_filled
);
7009 /* Can't send anymore */
7011 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
7012 kprintf("retransmission ret:%d -- full\n", ret
);
7016 * now lets push out control by calling med-level
7017 * output once. this assures that we WILL send HB's
7020 sctp_med_chunk_output(inp
, stcb
, asoc
, &num_out
, &reason_code
, 1,
7021 &cwnd_full
, from_where
,
7024 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
7025 kprintf("Control send outputs:%d@full\n", num_out
);
7028 #ifdef SCTP_AUDITING_ENABLED
7029 sctp_auditing(8, inp
, stcb
, NULL
);
7031 return (sctp_timer_validation(inp
, stcb
, asoc
, ret
));
7035 * The count was off.. retran is not happening so do
7036 * the normal retransmission.
7039 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
7040 kprintf("Done with retrans, none left fill up window\n");
7043 #ifdef SCTP_AUDITING_ENABLED
7044 sctp_auditing(9, inp
, stcb
, NULL
);
7048 if (from_where
== 1) {
7049 /* Only one transmission allowed out of a timeout */
7051 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
7052 kprintf("Only one packet allowed out\n");
7055 #ifdef SCTP_AUDITING_ENABLED
7056 sctp_auditing(10, inp
, stcb
, NULL
);
7058 /* Push out any control */
7059 sctp_med_chunk_output(inp
, stcb
, asoc
, &num_out
, &reason_code
, 1, &cwnd_full
, from_where
,
7063 if ((num_out
== 0) && (ret
== 0)) {
7064 /* No more retrans to send */
7068 #ifdef SCTP_AUDITING_ENABLED
7069 sctp_auditing(12, inp
, stcb
, NULL
);
7071 /* Check for bad destinations, if they exist move chunks around. */
7072 burst_limit
= asoc
->max_burst
;
7073 TAILQ_FOREACH(net
, &asoc
->nets
, sctp_next
) {
7074 if ((net
->dest_state
& SCTP_ADDR_NOT_REACHABLE
) ==
7075 SCTP_ADDR_NOT_REACHABLE
) {
7077 * if possible move things off of this address
7078 * we still may send below due to the dormant state
7079 * but we try to find an alternate address to send
7080 * to and if we have one we move all queued data on
7081 * the out wheel to this alternate address.
7083 sctp_move_to_an_alt(stcb
, asoc
, net
);
7086 if ((asoc->sat_network) || (net->addr_is_local)) {
7087 burst_limit = asoc->max_burst * SCTP_SAT_NETWORK_BURST_INCR;
7091 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
7092 kprintf("examined net:%p burst limit:%d\n", net
, asoc
->max_burst
);
7096 #ifdef SCTP_USE_ALLMAN_BURST
7097 if ((net
->flight_size
+(burst_limit
*net
->mtu
)) < net
->cwnd
) {
7098 if (net
->ssthresh
< net
->cwnd
)
7099 net
->ssthresh
= net
->cwnd
;
7100 net
->cwnd
= (net
->flight_size
+(burst_limit
*net
->mtu
));
7101 #ifdef SCTP_LOG_MAXBURST
7102 sctp_log_maxburst(net
, 0, burst_limit
, SCTP_MAX_BURST_APPLIED
);
7104 sctp_pegs
[SCTP_MAX_BURST_APL
]++;
7106 net
->fast_retran_ip
= 0;
7111 /* Fill up what we can to the destination */
7116 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
7117 kprintf("Burst count:%d - call m-c-o\n", burst_cnt
);
7120 error
= sctp_med_chunk_output(inp
, stcb
, asoc
, &num_out
,
7121 &reason_code
, 0, &cwnd_full
, from_where
,
7125 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
7126 kprintf("Error %d was returned from med-c-op\n", error
);
7129 #ifdef SCTP_LOG_MAXBURST
7130 sctp_log_maxburst(asoc
->primary_destination
, error
, burst_cnt
, SCTP_MAX_BURST_ERROR_STOP
);
7135 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
7136 kprintf("m-c-o put out %d\n", num_out
);
7142 #ifndef SCTP_USE_ALLMAN_BURST
7143 && (burst_cnt
< burst_limit
)
7146 #ifndef SCTP_USE_ALLMAN_BURST
7147 if (burst_cnt
>= burst_limit
) {
7148 sctp_pegs
[SCTP_MAX_BURST_APL
]++;
7149 asoc
->burst_limit_applied
= 1;
7150 #ifdef SCTP_LOG_MAXBURST
7151 sctp_log_maxburst(asoc
->primary_destination
, 0 , burst_cnt
, SCTP_MAX_BURST_APPLIED
);
7154 asoc
->burst_limit_applied
= 0;
7159 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
7160 kprintf("Ok, we have put out %d chunks\n", tot_out
);
7164 sctp_pegs
[SCTP_CO_NODATASNT
]++;
7165 if (asoc
->stream_queue_cnt
> 0) {
7166 sctp_pegs
[SCTP_SOS_NOSNT
]++;
7168 sctp_pegs
[SCTP_NOS_NOSNT
]++;
7170 if (asoc
->send_queue_cnt
> 0) {
7171 sctp_pegs
[SCTP_SOSE_NOSNT
]++;
7173 sctp_pegs
[SCTP_NOSE_NOSNT
]++;
7176 /* Now we need to clean up the control chunk chain if
7177 * a ECNE is on it. It must be marked as UNSENT again
7178 * so next call will continue to send it until
7179 * such time that we get a CWR, to remove it.
7181 sctp_fix_ecn_echo(asoc
);
7187 sctp_output(struct sctp_inpcb
*inp
, struct mbuf
*m
, struct sockaddr
*addr
,
7188 struct mbuf
*control
, struct thread
*p
, int flags
)
7190 struct inpcb
*ip_inp
;
7191 struct sctp_inpcb
*t_inp
;
7192 struct sctp_tcb
*stcb
;
7193 struct sctp_nets
*net
;
7194 struct sctp_association
*asoc
;
7195 int create_lock_applied
= 0;
7196 int queue_only
, error
= 0;
7197 struct sctp_sndrcvinfo srcv
;
7199 int use_rcvinfo
= 0;
7201 /* struct route ro;*/
7205 ip_inp
= (struct inpcb
*)inp
;
7211 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
7212 kprintf("USR Send BEGINS\n");
7216 if ((inp
->sctp_flags
& SCTP_PCB_FLAGS_TCPTYPE
) &&
7217 (inp
->sctp_flags
& SCTP_PCB_FLAGS_ACCEPTING
)) {
7218 /* The listner can NOT send */
7220 sctppcbinfo
.mbuf_track
--;
7221 sctp_m_freem(control
);
7228 /* Can't allow a V6 address on a non-v6 socket */
7230 SCTP_ASOC_CREATE_LOCK(inp
);
7231 if ((inp
->sctp_flags
& SCTP_PCB_FLAGS_SOCKET_GONE
) ||
7232 (inp
->sctp_flags
& SCTP_PCB_FLAGS_SOCKET_GONE
)) {
7233 /* Should I really unlock ? */
7234 SCTP_ASOC_CREATE_UNLOCK(inp
);
7236 sctppcbinfo
.mbuf_track
--;
7237 sctp_m_freem(control
);
7244 create_lock_applied
= 1;
7245 if (((inp
->sctp_flags
& SCTP_PCB_FLAGS_BOUND_V6
) == 0) &&
7246 (addr
->sa_family
== AF_INET6
)) {
7247 SCTP_ASOC_CREATE_UNLOCK(inp
);
7249 sctppcbinfo
.mbuf_track
--;
7250 sctp_m_freem(control
);
7259 sctppcbinfo
.mbuf_track
++;
7260 if (sctp_find_cmsg(SCTP_SNDRCV
, (void *)&srcv
, control
,
7262 if (srcv
.sinfo_flags
& MSG_SENDALL
) {
7264 sctppcbinfo
.mbuf_track
--;
7265 sctp_m_freem(control
);
7267 if (create_lock_applied
) {
7268 SCTP_ASOC_CREATE_UNLOCK(inp
);
7269 create_lock_applied
= 0;
7271 return (sctp_sendall(inp
, NULL
, m
, &srcv
));
7273 if (srcv
.sinfo_assoc_id
) {
7274 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_CONNECTED
) {
7275 SCTP_INP_RLOCK(inp
);
7276 stcb
= LIST_FIRST(&inp
->sctp_asoc_list
);
7278 SCTP_TCB_LOCK(stcb
);
7279 SCTP_INP_RUNLOCK(inp
);
7282 if (create_lock_applied
) {
7283 SCTP_ASOC_CREATE_UNLOCK(inp
);
7284 create_lock_applied
= 0;
7286 sctppcbinfo
.mbuf_track
--;
7287 sctp_m_freem(control
);
7292 net
= stcb
->asoc
.primary_destination
;
7294 stcb
= sctp_findassociation_ep_asocid(inp
, srcv
.sinfo_assoc_id
);
7297 * Question: Should I error here if the
7299 * assoc_id is no longer valid?
7300 * i.e. I can't find it?
7304 /* Must locate the net structure */
7306 net
= sctp_findnet(stcb
, addr
);
7309 net
= stcb
->asoc
.primary_destination
;
7315 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_CONNECTED
) {
7316 SCTP_INP_RLOCK(inp
);
7317 stcb
= LIST_FIRST(&inp
->sctp_asoc_list
);
7319 SCTP_TCB_LOCK(stcb
);
7320 SCTP_INP_RUNLOCK(inp
);
7323 if (create_lock_applied
) {
7324 SCTP_ASOC_CREATE_UNLOCK(inp
);
7325 create_lock_applied
= 0;
7328 sctppcbinfo
.mbuf_track
--;
7329 sctp_m_freem(control
);
7336 net
= stcb
->asoc
.primary_destination
;
7338 net
= sctp_findnet(stcb
, addr
);
7340 net
= stcb
->asoc
.primary_destination
;
7345 SCTP_INP_WLOCK(inp
);
7346 SCTP_INP_INCR_REF(inp
);
7347 SCTP_INP_WUNLOCK(inp
);
7348 stcb
= sctp_findassociation_ep_addr(&t_inp
, addr
, &net
, NULL
, NULL
);
7350 SCTP_INP_WLOCK(inp
);
7351 SCTP_INP_DECR_REF(inp
);
7352 SCTP_INP_WUNLOCK(inp
);
7357 if ((stcb
== NULL
) &&
7358 (inp
->sctp_flags
& SCTP_PCB_FLAGS_TCPTYPE
)) {
7360 sctppcbinfo
.mbuf_track
--;
7361 sctp_m_freem(control
);
7364 if (create_lock_applied
) {
7365 SCTP_ASOC_CREATE_UNLOCK(inp
);
7366 create_lock_applied
= 0;
7371 } else if ((stcb
== NULL
) &&
7374 sctppcbinfo
.mbuf_track
--;
7375 sctp_m_freem(control
);
7378 if (create_lock_applied
) {
7379 SCTP_ASOC_CREATE_UNLOCK(inp
);
7380 create_lock_applied
= 0;
7385 } else if (stcb
== NULL
) {
7386 /* UDP mode, we must go ahead and start the INIT process */
7387 if ((use_rcvinfo
) && (srcv
.sinfo_flags
& MSG_ABORT
)) {
7388 /* Strange user to do this */
7390 sctppcbinfo
.mbuf_track
--;
7391 sctp_m_freem(control
);
7394 if (create_lock_applied
) {
7395 SCTP_ASOC_CREATE_UNLOCK(inp
);
7396 create_lock_applied
= 0;
7402 stcb
= sctp_aloc_assoc(inp
, addr
, 1, &error
, 0);
7405 sctppcbinfo
.mbuf_track
--;
7406 sctp_m_freem(control
);
7409 if (create_lock_applied
) {
7410 SCTP_ASOC_CREATE_UNLOCK(inp
);
7411 create_lock_applied
= 0;
7417 if (create_lock_applied
) {
7418 SCTP_ASOC_CREATE_UNLOCK(inp
);
7419 create_lock_applied
= 0;
7421 kprintf("Huh-1, create lock should have been applied!\n");
7425 asoc
->state
= SCTP_STATE_COOKIE_WAIT
;
7426 SCTP_GETTIME_TIMEVAL(&asoc
->time_entered
);
7428 /* see if a init structure exists in cmsg headers */
7429 struct sctp_initmsg initm
;
7431 if (sctp_find_cmsg(SCTP_INIT
, (void *)&initm
, control
,
7433 /* we have an INIT override of the default */
7434 if (initm
.sinit_max_attempts
)
7435 asoc
->max_init_times
= initm
.sinit_max_attempts
;
7436 if (initm
.sinit_num_ostreams
)
7437 asoc
->pre_open_streams
= initm
.sinit_num_ostreams
;
7438 if (initm
.sinit_max_instreams
)
7439 asoc
->max_inbound_streams
= initm
.sinit_max_instreams
;
7440 if (initm
.sinit_max_init_timeo
)
7441 asoc
->initial_init_rto_max
= initm
.sinit_max_init_timeo
;
7443 if (asoc
->streamoutcnt
< asoc
->pre_open_streams
) {
7444 /* Default is NOT correct */
7446 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
7447 kprintf("Ok, defout:%d pre_open:%d\n",
7448 asoc
->streamoutcnt
, asoc
->pre_open_streams
);
7451 FREE(asoc
->strmout
, M_PCB
);
7452 asoc
->strmout
= NULL
;
7453 asoc
->streamoutcnt
= asoc
->pre_open_streams
;
7454 MALLOC(asoc
->strmout
, struct sctp_stream_out
*,
7455 asoc
->streamoutcnt
*
7456 sizeof(struct sctp_stream_out
), M_PCB
,
7458 for (i
= 0; i
< asoc
->streamoutcnt
; i
++) {
7460 * inbound side must be set to 0xffff,
7461 * also NOTE when we get the INIT-ACK
7462 * back (for INIT sender) we MUST
7463 * reduce the count (streamoutcnt) but
7464 * first check if we sent to any of the
7465 * upper streams that were dropped (if
7466 * some were). Those that were dropped
7467 * must be notified to the upper layer
7468 * as failed to send.
7470 asoc
->strmout
[i
].next_sequence_sent
= 0x0;
7471 TAILQ_INIT(&asoc
->strmout
[i
].outqueue
);
7472 asoc
->strmout
[i
].stream_no
= i
;
7473 asoc
->strmout
[i
].next_spoke
.tqe_next
= 0;
7474 asoc
->strmout
[i
].next_spoke
.tqe_prev
= 0;
7478 sctp_send_initiate(inp
, stcb
);
7480 * we may want to dig in after this call and adjust the MTU
7481 * value. It defaulted to 1500 (constant) but the ro structure
7482 * may now have an update and thus we may need to change it
7483 * BEFORE we append the message.
7485 net
= stcb
->asoc
.primary_destination
;
7487 if (create_lock_applied
) {
7488 SCTP_ASOC_CREATE_UNLOCK(inp
);
7489 create_lock_applied
= 0;
7492 if ((SCTP_GET_STATE(asoc
) == SCTP_STATE_COOKIE_WAIT
) ||
7493 (SCTP_GET_STATE(asoc
) == SCTP_STATE_COOKIE_ECHOED
)) {
7496 if ((SCTP_GET_STATE(asoc
) == SCTP_STATE_SHUTDOWN_SENT
) ||
7497 (SCTP_GET_STATE(asoc
) == SCTP_STATE_SHUTDOWN_RECEIVED
) ||
7498 (SCTP_GET_STATE(asoc
) == SCTP_STATE_SHUTDOWN_ACK_SENT
) ||
7499 (asoc
->state
& SCTP_STATE_SHUTDOWN_PENDING
)) {
7501 sctppcbinfo
.mbuf_track
--;
7502 sctp_m_freem(control
);
7505 if ((use_rcvinfo
) &&
7506 (srcv
.sinfo_flags
& MSG_ABORT
)) {
7507 sctp_msg_append(stcb
, net
, m
, &srcv
, flags
);
7515 SCTP_TCB_UNLOCK(stcb
);
7519 if (create_lock_applied
) {
7520 /* we should never hit here with the create lock applied
7523 SCTP_ASOC_CREATE_UNLOCK(inp
);
7524 create_lock_applied
= 0;
7528 if (use_rcvinfo
== 0) {
7529 srcv
= stcb
->asoc
.def_send
;
7533 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT5
) {
7534 kprintf("stream:%d\n", srcv
.sinfo_stream
);
7535 kprintf("flags:%x\n", (u_int
)srcv
.sinfo_flags
);
7536 kprintf("ppid:%d\n", srcv
.sinfo_ppid
);
7537 kprintf("context:%d\n", srcv
.sinfo_context
);
7542 sctppcbinfo
.mbuf_track
--;
7543 sctp_m_freem(control
);
7546 if (net
&& ((srcv
.sinfo_flags
& MSG_ADDR_OVER
))) {
7547 /* we take the override or the unconfirmed */
7550 net
= stcb
->asoc
.primary_destination
;
7552 if ((error
= sctp_msg_append(stcb
, net
, m
, &srcv
, flags
))) {
7553 SCTP_TCB_UNLOCK(stcb
);
7557 if (net
->flight_size
> net
->cwnd
) {
7558 sctp_pegs
[SCTP_SENDTO_FULL_CWND
]++;
7560 } else if (asoc
->ifp_had_enobuf
) {
7561 sctp_pegs
[SCTP_QUEONLY_BURSTLMT
]++;
7564 un_sent
= ((stcb
->asoc
.total_output_queue_size
- stcb
->asoc
.total_flight
) +
7565 ((stcb
->asoc
.chunks_on_out_queue
- stcb
->asoc
.total_flight_count
) * sizeof(struct sctp_data_chunk
)) +
7568 if (((inp
->sctp_flags
& SCTP_PCB_FLAGS_NODELAY
) == 0) &&
7569 (stcb
->asoc
.total_flight
> 0) &&
7570 (un_sent
< (int)stcb
->asoc
.smallest_mtu
)
7573 /* Ok, Nagle is set on and we have
7574 * data outstanding. Don't send anything
7575 * and let the SACK drive out the data.
7577 sctp_pegs
[SCTP_NAGLE_NOQ
]++;
7580 sctp_pegs
[SCTP_NAGLE_OFF
]++;
7583 if ((queue_only
== 0) && stcb
->asoc
.peers_rwnd
) {
7584 /* we can attempt to send too.*/
7586 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
7587 kprintf("USR Send calls sctp_chunk_output\n");
7590 #ifdef SCTP_AUDITING_ENABLED
7591 sctp_audit_log(0xC0, 1);
7592 sctp_auditing(6, inp
, stcb
, net
);
7594 sctp_pegs
[SCTP_OUTPUT_FRM_SND
]++;
7595 sctp_chunk_output(inp
, stcb
, 0);
7596 #ifdef SCTP_AUDITING_ENABLED
7597 sctp_audit_log(0xC0, 2);
7598 sctp_auditing(7, inp
, stcb
, net
);
7603 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
7604 kprintf("USR Send complete qo:%d prw:%d\n", queue_only
, stcb
->asoc
.peers_rwnd
);
7607 SCTP_TCB_UNLOCK(stcb
);
7613 send_forward_tsn(struct sctp_tcb
*stcb
,
7614 struct sctp_association
*asoc
)
7616 struct sctp_tmit_chunk
*chk
;
7617 struct sctp_forward_tsn_chunk
*fwdtsn
;
7619 TAILQ_FOREACH(chk
, &asoc
->control_send_queue
, sctp_next
) {
7620 if (chk
->rec
.chunk_id
== SCTP_FORWARD_CUM_TSN
) {
7621 /* mark it to unsent */
7622 chk
->sent
= SCTP_DATAGRAM_UNSENT
;
7624 /* Do we correct its output location? */
7625 if (chk
->whoTo
!= asoc
->primary_destination
) {
7626 sctp_free_remote_addr(chk
->whoTo
);
7627 chk
->whoTo
= asoc
->primary_destination
;
7628 chk
->whoTo
->ref_count
++;
7630 goto sctp_fill_in_rest
;
7633 /* Ok if we reach here we must build one */
7634 chk
= (struct sctp_tmit_chunk
*)SCTP_ZONE_GET(sctppcbinfo
.ipi_zone_chunk
);
7638 sctppcbinfo
.ipi_count_chunk
++;
7639 sctppcbinfo
.ipi_gencnt_chunk
++;
7640 chk
->rec
.chunk_id
= SCTP_FORWARD_CUM_TSN
;
7642 MGETHDR(chk
->data
, MB_DONTWAIT
, MT_DATA
);
7643 if (chk
->data
== NULL
) {
7644 chk
->whoTo
->ref_count
--;
7645 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, chk
);
7646 sctppcbinfo
.ipi_count_chunk
--;
7647 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
7648 panic("Chunk count is negative");
7650 sctppcbinfo
.ipi_gencnt_chunk
++;
7653 chk
->data
->m_data
+= SCTP_MIN_OVERHEAD
;
7654 chk
->sent
= SCTP_DATAGRAM_UNSENT
;
7656 chk
->whoTo
= asoc
->primary_destination
;
7657 chk
->whoTo
->ref_count
++;
7658 TAILQ_INSERT_TAIL(&asoc
->control_send_queue
, chk
, sctp_next
);
7659 asoc
->ctrl_queue_cnt
++;
7661 /* Here we go through and fill out the part that
7662 * deals with stream/seq of the ones we skip.
7664 chk
->data
->m_pkthdr
.len
= chk
->data
->m_len
= 0;
7666 struct sctp_tmit_chunk
*at
, *tp1
, *last
;
7667 struct sctp_strseq
*strseq
;
7668 unsigned int cnt_of_space
, i
, ovh
;
7669 unsigned int space_needed
;
7670 unsigned int cnt_of_skipped
= 0;
7671 TAILQ_FOREACH(at
, &asoc
->sent_queue
, sctp_next
) {
7672 if (at
->sent
!= SCTP_FORWARD_TSN_SKIP
) {
7673 /* no more to look at */
7676 if (at
->rec
.data
.rcv_flags
& SCTP_DATA_UNORDERED
) {
7677 /* We don't report these */
7682 space_needed
= (sizeof(struct sctp_forward_tsn_chunk
) +
7683 (cnt_of_skipped
* sizeof(struct sctp_strseq
)));
7684 if ((M_TRAILINGSPACE(chk
->data
) < (int)space_needed
) &&
7685 ((chk
->data
->m_flags
& M_EXT
) == 0)) {
7686 /* Need a M_EXT, get one and move
7687 * fwdtsn to data area.
7689 MCLGET(chk
->data
, MB_DONTWAIT
);
7691 cnt_of_space
= M_TRAILINGSPACE(chk
->data
);
7693 if (stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_BOUND_V6
) {
7694 ovh
= SCTP_MIN_OVERHEAD
;
7696 ovh
= SCTP_MIN_V4_OVERHEAD
;
7698 if (cnt_of_space
> (asoc
->smallest_mtu
-ovh
)) {
7699 /* trim to a mtu size */
7700 cnt_of_space
= asoc
->smallest_mtu
- ovh
;
7702 if (cnt_of_space
< space_needed
) {
7703 /* ok we must trim down the chunk by lowering
7704 * the advance peer ack point.
7706 cnt_of_skipped
= (cnt_of_space
-
7707 ((sizeof(struct sctp_forward_tsn_chunk
))/
7708 sizeof(struct sctp_strseq
)));
7709 /* Go through and find the TSN that
7710 * will be the one we report.
7712 at
= TAILQ_FIRST(&asoc
->sent_queue
);
7713 for (i
= 0; i
< cnt_of_skipped
; i
++) {
7714 tp1
= TAILQ_NEXT(at
, sctp_next
);
7718 /* last now points to last one I can report, update peer ack point */
7719 asoc
->advanced_peer_ack_point
= last
->rec
.data
.TSN_seq
;
7720 space_needed
-= (cnt_of_skipped
* sizeof(struct sctp_strseq
));
7722 chk
->send_size
= space_needed
;
7723 /* Setup the chunk */
7724 fwdtsn
= mtod(chk
->data
, struct sctp_forward_tsn_chunk
*);
7725 fwdtsn
->ch
.chunk_length
= htons(chk
->send_size
);
7726 fwdtsn
->ch
.chunk_flags
= 0;
7727 fwdtsn
->ch
.chunk_type
= SCTP_FORWARD_CUM_TSN
;
7728 fwdtsn
->new_cumulative_tsn
= htonl(asoc
->advanced_peer_ack_point
);
7729 chk
->send_size
= (sizeof(struct sctp_forward_tsn_chunk
) +
7730 (cnt_of_skipped
* sizeof(struct sctp_strseq
)));
7731 chk
->data
->m_pkthdr
.len
= chk
->data
->m_len
= chk
->send_size
;
7733 /* Move pointer to after the fwdtsn and transfer to
7734 * the strseq pointer.
7736 strseq
= (struct sctp_strseq
*)fwdtsn
;
7738 * Now populate the strseq list. This is done blindly
7739 * without pulling out duplicate stream info. This is
7740 * inefficent but won't harm the process since the peer
7741 * will look at these in sequence and will thus release
7742 * anything. It could mean we exceed the PMTU and chop
7743 * off some that we could have included.. but this is
7744 * unlikely (aka 1432/4 would mean 300+ stream seq's would
7745 * have to be reported in one FWD-TSN. With a bit of work
7746 * we can later FIX this to optimize and pull out duplcates..
7747 * but it does add more overhead. So for now... not!
7749 at
= TAILQ_FIRST(&asoc
->sent_queue
);
7750 for (i
= 0; i
< cnt_of_skipped
; i
++) {
7751 tp1
= TAILQ_NEXT(at
, sctp_next
);
7752 if (at
->rec
.data
.rcv_flags
& SCTP_DATA_UNORDERED
) {
7753 /* We don't report these */
7758 strseq
->stream
= ntohs(at
->rec
.data
.stream_number
);
7759 strseq
->sequence
= ntohs(at
->rec
.data
.stream_seq
);
7769 sctp_send_sack(struct sctp_tcb
*stcb
)
7772 * Queue up a SACK in the control queue. We must first check to
7773 * see if a SACK is somehow on the control queue. If so, we will
7774 * take and and remove the old one.
7776 struct sctp_association
*asoc
;
7777 struct sctp_tmit_chunk
*chk
, *a_chk
;
7778 struct sctp_sack_chunk
*sack
;
7779 struct sctp_gap_ack_block
*gap_descriptor
;
7782 unsigned int i
, maxi
, seeing_ones
, m_size
;
7783 unsigned int num_gap_blocks
, space
;
7789 if (asoc
->last_data_chunk_from
== NULL
) {
7790 /* Hmm we never received anything */
7793 sctp_set_rwnd(stcb
, asoc
);
7794 TAILQ_FOREACH(chk
, &asoc
->control_send_queue
, sctp_next
) {
7795 if (chk
->rec
.chunk_id
== SCTP_SELECTIVE_ACK
) {
7796 /* Hmm, found a sack already on queue, remove it */
7797 TAILQ_REMOVE(&asoc
->control_send_queue
, chk
, sctp_next
);
7798 asoc
->ctrl_queue_cnt
++;
7801 sctp_m_freem(a_chk
->data
);
7803 sctp_free_remote_addr(a_chk
->whoTo
);
7804 a_chk
->whoTo
= NULL
;
7808 if (a_chk
== NULL
) {
7809 a_chk
= (struct sctp_tmit_chunk
*)SCTP_ZONE_GET(sctppcbinfo
.ipi_zone_chunk
);
7810 if (a_chk
== NULL
) {
7811 /* No memory so we drop the idea, and set a timer */
7812 sctp_timer_stop(SCTP_TIMER_TYPE_RECV
,
7813 stcb
->sctp_ep
, stcb
, NULL
);
7814 sctp_timer_start(SCTP_TIMER_TYPE_RECV
,
7815 stcb
->sctp_ep
, stcb
, NULL
);
7818 sctppcbinfo
.ipi_count_chunk
++;
7819 sctppcbinfo
.ipi_gencnt_chunk
++;
7820 a_chk
->rec
.chunk_id
= SCTP_SELECTIVE_ACK
;
7823 a_chk
->snd_count
= 0;
7824 a_chk
->send_size
= 0; /* fill in later */
7825 a_chk
->sent
= SCTP_DATAGRAM_UNSENT
;
7826 m_size
= (asoc
->mapping_array_size
<< 3);
7828 if ((asoc
->numduptsns
) ||
7829 (asoc
->last_data_chunk_from
->dest_state
& SCTP_ADDR_NOT_REACHABLE
)
7831 /* Ok, we have some duplicates or the destination for the
7832 * sack is unreachable, lets see if we can select an alternate
7833 * than asoc->last_data_chunk_from
7835 if ((!(asoc
->last_data_chunk_from
->dest_state
&
7836 SCTP_ADDR_NOT_REACHABLE
)) &&
7837 (asoc
->used_alt_onsack
> 2)) {
7838 /* We used an alt last time, don't this time */
7839 a_chk
->whoTo
= NULL
;
7841 asoc
->used_alt_onsack
++;
7842 a_chk
->whoTo
= sctp_find_alternate_net(stcb
, asoc
->last_data_chunk_from
);
7844 if (a_chk
->whoTo
== NULL
) {
7845 /* Nope, no alternate */
7846 a_chk
->whoTo
= asoc
->last_data_chunk_from
;
7847 asoc
->used_alt_onsack
= 0;
7850 /* No duplicates so we use the last
7851 * place we received data from.
7854 if (asoc
->last_data_chunk_from
== NULL
) {
7855 kprintf("Huh, last_data_chunk_from is null when we want to sack??\n");
7858 asoc
->used_alt_onsack
= 0;
7859 a_chk
->whoTo
= asoc
->last_data_chunk_from
;
7862 a_chk
->whoTo
->ref_count
++;
7864 /* Ok now lets formulate a MBUF with our sack */
7865 MGETHDR(a_chk
->data
, MB_DONTWAIT
, MT_DATA
);
7866 if ((a_chk
->data
== NULL
) ||
7867 (a_chk
->whoTo
== NULL
)) {
7868 /* rats, no mbuf memory */
7870 /* was a problem with the destination */
7871 sctp_m_freem(a_chk
->data
);
7874 a_chk
->whoTo
->ref_count
--;
7875 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, a_chk
);
7876 sctppcbinfo
.ipi_count_chunk
--;
7877 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
7878 panic("Chunk count is negative");
7880 sctppcbinfo
.ipi_gencnt_chunk
++;
7881 sctp_timer_stop(SCTP_TIMER_TYPE_RECV
,
7882 stcb
->sctp_ep
, stcb
, NULL
);
7883 sctp_timer_start(SCTP_TIMER_TYPE_RECV
,
7884 stcb
->sctp_ep
, stcb
, NULL
);
7887 /* First count the number of gap ack blocks we need */
7888 if (asoc
->highest_tsn_inside_map
== asoc
->cumulative_tsn
) {
7889 /* We know if there are none above the cum-ack we
7890 * have everything with NO gaps
7894 /* Ok we must count how many gaps we
7898 if (asoc
->highest_tsn_inside_map
>= asoc
->mapping_array_base_tsn
) {
7899 maxi
= (asoc
->highest_tsn_inside_map
- asoc
->mapping_array_base_tsn
);
7901 maxi
= (asoc
->highest_tsn_inside_map
+ (MAX_TSN
- asoc
->mapping_array_base_tsn
) + 1);
7903 if (maxi
> m_size
) {
7904 /* impossible but who knows, someone is playing with us :> */
7906 kprintf("GAK maxi:%d > m_size:%d came out higher than allowed htsn:%u base:%u cumack:%u\n",
7909 asoc
->highest_tsn_inside_map
,
7910 asoc
->mapping_array_base_tsn
,
7911 asoc
->cumulative_tsn
7917 if (asoc
->cumulative_tsn
>= asoc
->mapping_array_base_tsn
) {
7918 start
= (asoc
->cumulative_tsn
- asoc
->mapping_array_base_tsn
);
7920 /* Set it so we start at 0 */
7923 /* Ok move start up one to look at the NEXT past the cum-ack */
7925 for (i
= start
; i
<= maxi
; i
++) {
7927 /* while seeing ones I must
7928 * transition back to 0 before
7929 * finding the next gap and
7930 * counting the segment.
7932 if (SCTP_IS_TSN_PRESENT(asoc
->mapping_array
, i
) == 0) {
7936 if (SCTP_IS_TSN_PRESENT(asoc
->mapping_array
, i
)) {
7943 if (num_gap_blocks
== 0) {
7945 * Traveled all of the bits and NO one,
7948 if (compare_with_wrap(asoc
->cumulative_tsn
, asoc
->highest_tsn_inside_map
, MAX_TSN
)) {
7949 asoc
->highest_tsn_inside_map
= asoc
->cumulative_tsn
;
7950 #ifdef SCTP_MAP_LOGGING
7951 sctp_log_map(0, 4, asoc
->highest_tsn_inside_map
, SCTP_MAP_SLIDE_RESULT
);
7957 /* Now calculate the space needed */
7958 space
= (sizeof(struct sctp_sack_chunk
) +
7959 (num_gap_blocks
* sizeof(struct sctp_gap_ack_block
)) +
7960 (asoc
->numduptsns
* sizeof(int32_t))
7962 if (space
> (asoc
->smallest_mtu
-SCTP_MAX_OVERHEAD
)) {
7963 /* Reduce the size of the sack to fit */
7965 calc
= (asoc
->smallest_mtu
- SCTP_MAX_OVERHEAD
);
7966 calc
-= sizeof(struct sctp_gap_ack_block
);
7967 fit
= calc
/sizeof(struct sctp_gap_ack_block
);
7968 if (fit
> (int)num_gap_blocks
) {
7969 /* discard some dups */
7970 asoc
->numduptsns
= (fit
- num_gap_blocks
);
7972 /* discard all dups and some gaps */
7973 num_gap_blocks
= fit
;
7974 asoc
->numduptsns
= 0;
7977 space
= (sizeof(struct sctp_sack_chunk
) +
7978 (num_gap_blocks
* sizeof(struct sctp_gap_ack_block
)) +
7979 (asoc
->numduptsns
* sizeof(int32_t))
7984 if ((space
+SCTP_MIN_OVERHEAD
) > MHLEN
) {
7985 /* We need a cluster */
7986 MCLGET(a_chk
->data
, MB_DONTWAIT
);
7987 if ((a_chk
->data
->m_flags
& M_EXT
) != M_EXT
) {
7988 /* can't get a cluster
7989 * give up and try later.
7992 sctp_m_freem(a_chk
->data
);
7994 a_chk
->whoTo
->ref_count
--;
7995 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, a_chk
);
7996 sctppcbinfo
.ipi_count_chunk
--;
7997 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
7998 panic("Chunk count is negative");
8000 sctppcbinfo
.ipi_gencnt_chunk
++;
8001 sctp_timer_stop(SCTP_TIMER_TYPE_RECV
,
8002 stcb
->sctp_ep
, stcb
, NULL
);
8003 sctp_timer_start(SCTP_TIMER_TYPE_RECV
,
8004 stcb
->sctp_ep
, stcb
, NULL
);
8009 /* ok, lets go through and fill it in */
8010 a_chk
->data
->m_data
+= SCTP_MIN_OVERHEAD
;
8011 sack
= mtod(a_chk
->data
, struct sctp_sack_chunk
*);
8012 sack
->ch
.chunk_type
= SCTP_SELECTIVE_ACK
;
8013 sack
->ch
.chunk_flags
= asoc
->receiver_nonce_sum
& SCTP_SACK_NONCE_SUM
;
8014 sack
->sack
.cum_tsn_ack
= htonl(asoc
->cumulative_tsn
);
8015 sack
->sack
.a_rwnd
= htonl(asoc
->my_rwnd
);
8016 asoc
->my_last_reported_rwnd
= asoc
->my_rwnd
;
8017 sack
->sack
.num_gap_ack_blks
= htons(num_gap_blocks
);
8018 sack
->sack
.num_dup_tsns
= htons(asoc
->numduptsns
);
8020 a_chk
->send_size
= (sizeof(struct sctp_sack_chunk
) +
8021 (num_gap_blocks
* sizeof(struct sctp_gap_ack_block
)) +
8022 (asoc
->numduptsns
* sizeof(int32_t)));
8023 a_chk
->data
->m_pkthdr
.len
= a_chk
->data
->m_len
= a_chk
->send_size
;
8024 sack
->ch
.chunk_length
= htons(a_chk
->send_size
);
8026 gap_descriptor
= (struct sctp_gap_ack_block
*)((caddr_t
)sack
+ sizeof(struct sctp_sack_chunk
));
8028 for (i
= start
; i
<= maxi
; i
++) {
8029 if (num_gap_blocks
== 0) {
8033 /* while seeing Ones I must
8034 * transition back to 0 before
8035 * finding the next gap
8037 if (SCTP_IS_TSN_PRESENT(asoc
->mapping_array
, i
) == 0) {
8038 gap_descriptor
->end
= htons(((uint16_t)(i
-start
)));
8044 if (SCTP_IS_TSN_PRESENT(asoc
->mapping_array
, i
)) {
8045 gap_descriptor
->start
= htons(((uint16_t)(i
+1-start
)));
8046 /* advance struct to next pointer */
8051 if (num_gap_blocks
) {
8052 /* special case where the array is all 1's
8053 * to the end of the array.
8055 gap_descriptor
->end
= htons(((uint16_t)((i
-start
))));
8058 /* now we must add any dups we are going to report. */
8059 if (asoc
->numduptsns
) {
8060 dup
= (uint32_t *)gap_descriptor
;
8061 for (i
= 0; i
< asoc
->numduptsns
; i
++) {
8062 *dup
= htonl(asoc
->dup_tsns
[i
]);
8065 asoc
->numduptsns
= 0;
8067 /* now that the chunk is prepared queue it to the control
8070 TAILQ_INSERT_TAIL(&asoc
->control_send_queue
, a_chk
, sctp_next
);
8071 asoc
->ctrl_queue_cnt
++;
8072 sctp_pegs
[SCTP_PEG_SACKS_SENT
]++;
8077 sctp_send_abort_tcb(struct sctp_tcb
*stcb
, struct mbuf
*operr
)
8079 struct mbuf
*m_abort
;
8080 struct sctp_abort_msg
*abort_m
;
8083 MGETHDR(m_abort
, MB_DONTWAIT
, MT_HEADER
);
8084 if (m_abort
== NULL
) {
8088 m_abort
->m_data
+= SCTP_MIN_OVERHEAD
;
8089 abort_m
= mtod(m_abort
, struct sctp_abort_msg
*);
8090 m_abort
->m_len
= sizeof(struct sctp_abort_msg
);
8091 m_abort
->m_next
= operr
;
8101 abort_m
->msg
.ch
.chunk_type
= SCTP_ABORT_ASSOCIATION
;
8102 abort_m
->msg
.ch
.chunk_flags
= 0;
8103 abort_m
->msg
.ch
.chunk_length
= htons(sizeof(struct sctp_abort_chunk
) +
8105 abort_m
->sh
.src_port
= stcb
->sctp_ep
->sctp_lport
;
8106 abort_m
->sh
.dest_port
= stcb
->rport
;
8107 abort_m
->sh
.v_tag
= htonl(stcb
->asoc
.peer_vtag
);
8108 abort_m
->sh
.checksum
= 0;
8109 m_abort
->m_pkthdr
.len
= m_abort
->m_len
+ sz
;
8110 m_abort
->m_pkthdr
.rcvif
= 0;
8111 sctp_lowlevel_chunk_output(stcb
->sctp_ep
, stcb
,
8112 stcb
->asoc
.primary_destination
,
8113 (struct sockaddr
*)&stcb
->asoc
.primary_destination
->ro
._l_addr
,
8114 m_abort
, 1, 0, NULL
, 0);
8118 sctp_send_shutdown_complete(struct sctp_tcb
*stcb
,
8119 struct sctp_nets
*net
)
8122 /* formulate and SEND a SHUTDOWN-COMPLETE */
8123 struct mbuf
*m_shutdown_comp
;
8124 struct sctp_shutdown_complete_msg
*comp_cp
;
8126 m_shutdown_comp
= NULL
;
8127 MGETHDR(m_shutdown_comp
, MB_DONTWAIT
, MT_HEADER
);
8128 if (m_shutdown_comp
== NULL
) {
8132 m_shutdown_comp
->m_data
+= sizeof(struct ip6_hdr
);
8133 comp_cp
= mtod(m_shutdown_comp
, struct sctp_shutdown_complete_msg
*);
8134 comp_cp
->shut_cmp
.ch
.chunk_type
= SCTP_SHUTDOWN_COMPLETE
;
8135 comp_cp
->shut_cmp
.ch
.chunk_flags
= 0;
8136 comp_cp
->shut_cmp
.ch
.chunk_length
= htons(sizeof(struct sctp_shutdown_complete_chunk
));
8137 comp_cp
->sh
.src_port
= stcb
->sctp_ep
->sctp_lport
;
8138 comp_cp
->sh
.dest_port
= stcb
->rport
;
8139 comp_cp
->sh
.v_tag
= htonl(stcb
->asoc
.peer_vtag
);
8140 comp_cp
->sh
.checksum
= 0;
8142 m_shutdown_comp
->m_pkthdr
.len
= m_shutdown_comp
->m_len
= sizeof(struct sctp_shutdown_complete_msg
);
8143 m_shutdown_comp
->m_pkthdr
.rcvif
= 0;
8144 sctp_lowlevel_chunk_output(stcb
->sctp_ep
, stcb
, net
,
8145 (struct sockaddr
*)&net
->ro
._l_addr
, m_shutdown_comp
,
8147 if ((stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_TCPTYPE
) ||
8148 (stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_IN_TCPPOOL
)) {
8149 stcb
->sctp_ep
->sctp_flags
&= ~SCTP_PCB_FLAGS_CONNECTED
;
8150 stcb
->sctp_ep
->sctp_socket
->so_snd
.ssb_cc
= 0;
8151 soisdisconnected(stcb
->sctp_ep
->sctp_socket
);
8157 sctp_send_shutdown_complete2(struct mbuf
*m
, int iphlen
, struct sctphdr
*sh
)
8159 /* formulate and SEND a SHUTDOWN-COMPLETE */
8161 struct ip
*iph
, *iph_out
;
8162 struct ip6_hdr
*ip6
, *ip6_out
;
8164 struct sctp_shutdown_complete_msg
*comp_cp
;
8166 MGETHDR(mout
, MB_DONTWAIT
, MT_HEADER
);
8171 iph
= mtod(m
, struct ip
*);
8175 if (iph
->ip_v
== IPVERSION
) {
8176 mout
->m_len
= sizeof(struct ip
) +
8177 sizeof(struct sctp_shutdown_complete_msg
);
8178 mout
->m_next
= NULL
;
8179 iph_out
= mtod(mout
, struct ip
*);
8181 /* Fill in the IP header for the ABORT */
8182 iph_out
->ip_v
= IPVERSION
;
8183 iph_out
->ip_hl
= (sizeof(struct ip
)/4);
8184 iph_out
->ip_tos
= (u_char
)0;
8186 iph_out
->ip_off
= 0;
8187 iph_out
->ip_ttl
= MAXTTL
;
8188 iph_out
->ip_p
= IPPROTO_SCTP
;
8189 iph_out
->ip_src
.s_addr
= iph
->ip_dst
.s_addr
;
8190 iph_out
->ip_dst
.s_addr
= iph
->ip_src
.s_addr
;
8192 /* let IP layer calculate this */
8193 iph_out
->ip_sum
= 0;
8194 offset_out
+= sizeof(*iph_out
);
8195 comp_cp
= (struct sctp_shutdown_complete_msg
*)(
8196 (caddr_t
)iph_out
+ offset_out
);
8197 } else if (iph
->ip_v
== (IPV6_VERSION
>> 4)) {
8198 ip6
= (struct ip6_hdr
*)iph
;
8199 mout
->m_len
= sizeof(struct ip6_hdr
) +
8200 sizeof(struct sctp_shutdown_complete_msg
);
8201 mout
->m_next
= NULL
;
8202 ip6_out
= mtod(mout
, struct ip6_hdr
*);
8204 /* Fill in the IPv6 header for the ABORT */
8205 ip6_out
->ip6_flow
= ip6
->ip6_flow
;
8206 ip6_out
->ip6_hlim
= ip6_defhlim
;
8207 ip6_out
->ip6_nxt
= IPPROTO_SCTP
;
8208 ip6_out
->ip6_src
= ip6
->ip6_dst
;
8209 ip6_out
->ip6_dst
= ip6
->ip6_src
;
8210 ip6_out
->ip6_plen
= mout
->m_len
;
8211 offset_out
+= sizeof(*ip6_out
);
8212 comp_cp
= (struct sctp_shutdown_complete_msg
*)(
8213 (caddr_t
)ip6_out
+ offset_out
);
8215 /* Currently not supported. */
8219 /* Now copy in and fill in the ABORT tags etc. */
8220 comp_cp
->sh
.src_port
= sh
->dest_port
;
8221 comp_cp
->sh
.dest_port
= sh
->src_port
;
8222 comp_cp
->sh
.checksum
= 0;
8223 comp_cp
->sh
.v_tag
= sh
->v_tag
;
8224 comp_cp
->shut_cmp
.ch
.chunk_flags
= SCTP_HAD_NO_TCB
;
8225 comp_cp
->shut_cmp
.ch
.chunk_type
= SCTP_SHUTDOWN_COMPLETE
;
8226 comp_cp
->shut_cmp
.ch
.chunk_length
= htons(sizeof(struct sctp_shutdown_complete_chunk
));
8228 mout
->m_pkthdr
.len
= mout
->m_len
;
8230 if ((sctp_no_csum_on_loopback
) &&
8231 (m
->m_pkthdr
.rcvif
) &&
8232 (m
->m_pkthdr
.rcvif
->if_type
== IFT_LOOP
)) {
8233 comp_cp
->sh
.checksum
= 0;
8235 comp_cp
->sh
.checksum
= sctp_calculate_sum(mout
, NULL
, offset_out
);
8238 /* zap the rcvif, it should be null */
8239 mout
->m_pkthdr
.rcvif
= 0;
8240 /* zap the stack pointer to the route */
8241 if (iph_out
!= NULL
) {
8244 bzero(&ro
, sizeof ro
);
8246 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT2
) {
8247 kprintf("sctp_shutdown_complete2 calling ip_output:\n");
8248 sctp_print_address_pkt(iph_out
, &comp_cp
->sh
);
8251 /* set IPv4 length */
8252 #if defined(__FreeBSD__)
8253 iph_out
->ip_len
= mout
->m_pkthdr
.len
;
8255 iph_out
->ip_len
= htons(mout
->m_pkthdr
.len
);
8258 ip_output(mout
, 0, &ro
, IP_RAWOUTPUT
, NULL
8259 #if defined(__OpenBSD__) || (defined(__FreeBSD__) && __FreeBSD_version >= 480000) \
8260 || defined(__NetBSD__) || defined(__DragonFly__)
8264 /* Free the route if we got one back */
8267 } else if (ip6_out
!= NULL
) {
8268 #ifdef NEW_STRUCT_ROUTE
8271 struct route_in6 ro
;
8274 bzero(&ro
, sizeof(ro
));
8276 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT2
) {
8277 kprintf("sctp_shutdown_complete2 calling ip6_output:\n");
8278 sctp_print_address_pkt((struct ip
*)ip6_out
,
8282 ip6_output(mout
, NULL
, &ro
, 0, NULL
, NULL
8283 #if defined(__NetBSD__)
8286 #if (defined(__FreeBSD__) && __FreeBSD_version >= 480000) || defined(__DragonFly__)
8290 /* Free the route if we got one back */
8294 sctp_pegs
[SCTP_DATAGRAMS_SENT
]++;
8298 static struct sctp_nets
*
8299 sctp_select_hb_destination(struct sctp_tcb
*stcb
, struct timeval
*now
)
8301 struct sctp_nets
*net
, *hnet
;
8302 int ms_goneby
, highest_ms
, state_overide
=0;
8304 SCTP_GETTIME_TIMEVAL(now
);
8307 TAILQ_FOREACH(net
, &stcb
->asoc
.nets
, sctp_next
) {
8309 ((net
->dest_state
& SCTP_ADDR_NOHB
) && ((net
->dest_state
& SCTP_ADDR_UNCONFIRMED
) == 0)) ||
8310 (net
->dest_state
& SCTP_ADDR_OUT_OF_SCOPE
)
8312 /* Skip this guy from consideration if HB is off AND its confirmed*/
8314 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT4
) {
8315 kprintf("Skipping net:%p state:%d nohb/out-of-scope\n",
8316 net
, net
->dest_state
);
8321 if (sctp_destination_is_reachable(stcb
, (struct sockaddr
*)&net
->ro
._l_addr
) == 0) {
8322 /* skip this dest net from consideration */
8324 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT4
) {
8325 kprintf("Skipping net:%p reachable NOT\n",
8331 if (net
->last_sent_time
.tv_sec
) {
8332 /* Sent to so we subtract */
8333 ms_goneby
= (now
->tv_sec
- net
->last_sent_time
.tv_sec
) * 1000;
8335 /* Never been sent to */
8336 ms_goneby
= 0x7fffffff;
8338 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT4
) {
8339 kprintf("net:%p ms_goneby:%d\n",
8343 /* When the address state is unconfirmed but still considered reachable, we
8344 * HB at a higher rate. Once it goes confirmed OR reaches the "unreachable"
8345 * state, thenw we cut it back to HB at a more normal pace.
8347 if ((net
->dest_state
& (SCTP_ADDR_UNCONFIRMED
|SCTP_ADDR_NOT_REACHABLE
)) == SCTP_ADDR_UNCONFIRMED
) {
8353 if ((((unsigned int)ms_goneby
>= net
->RTO
) || (state_overide
)) &&
8354 (ms_goneby
> highest_ms
)) {
8355 highest_ms
= ms_goneby
;
8358 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT4
) {
8359 kprintf("net:%p is the new high\n",
8366 ((hnet
->dest_state
& (SCTP_ADDR_UNCONFIRMED
|SCTP_ADDR_NOT_REACHABLE
)) == SCTP_ADDR_UNCONFIRMED
)) {
8372 if (highest_ms
&& (((unsigned int)highest_ms
>= hnet
->RTO
) || state_overide
)) {
8373 /* Found the one with longest delay bounds
8374 * OR it is unconfirmed and still not marked
8378 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT4
) {
8379 kprintf("net:%p is the hb winner -",
8382 sctp_print_address((struct sockaddr
*)&hnet
->ro
._l_addr
);
8387 /* update the timer now */
8388 hnet
->last_sent_time
= *now
;
8396 sctp_send_hb(struct sctp_tcb
*stcb
, int user_req
, struct sctp_nets
*u_net
)
8398 struct sctp_tmit_chunk
*chk
;
8399 struct sctp_nets
*net
;
8400 struct sctp_heartbeat_chunk
*hb
;
8402 struct sockaddr_in
*sin
;
8403 struct sockaddr_in6
*sin6
;
8405 if (user_req
== 0) {
8406 net
= sctp_select_hb_destination(stcb
, &now
);
8408 /* All our busy none to send to, just
8409 * start the timer again.
8411 if (stcb
->asoc
.state
== 0) {
8414 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT
,
8420 #ifndef SCTP_USE_ALLMAN_BURST
8422 /* found one idle.. decay cwnd on this one
8423 * by 1/2 if none outstanding.
8426 if (net
->flight_size
== 0) {
8428 if (net
->addr_is_local
) {
8429 if (net
->cwnd
< (net
->mtu
*4)) {
8430 net
->cwnd
= net
->mtu
* 4;
8433 if (net
->cwnd
< (net
->mtu
* 2)) {
8434 net
->cwnd
= net
->mtu
* 2;
8447 SCTP_GETTIME_TIMEVAL(&now
);
8449 sin
= (struct sockaddr_in
*)&net
->ro
._l_addr
;
8450 if (sin
->sin_family
!= AF_INET
) {
8451 if (sin
->sin_family
!= AF_INET6
) {
8456 chk
= (struct sctp_tmit_chunk
*)SCTP_ZONE_GET(sctppcbinfo
.ipi_zone_chunk
);
8459 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT4
) {
8460 kprintf("Gak, can't get a chunk for hb\n");
8465 sctppcbinfo
.ipi_gencnt_chunk
++;
8466 sctppcbinfo
.ipi_count_chunk
++;
8467 chk
->rec
.chunk_id
= SCTP_HEARTBEAT_REQUEST
;
8468 chk
->asoc
= &stcb
->asoc
;
8469 chk
->send_size
= sizeof(struct sctp_heartbeat_chunk
);
8470 MGETHDR(chk
->data
, MB_DONTWAIT
, MT_DATA
);
8471 if (chk
->data
== NULL
) {
8472 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, chk
);
8473 sctppcbinfo
.ipi_count_chunk
--;
8474 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
8475 panic("Chunk count is negative");
8477 sctppcbinfo
.ipi_gencnt_chunk
++;
8480 chk
->data
->m_data
+= SCTP_MIN_OVERHEAD
;
8481 chk
->data
->m_pkthdr
.len
= chk
->data
->m_len
= chk
->send_size
;
8482 chk
->sent
= SCTP_DATAGRAM_UNSENT
;
8485 chk
->whoTo
->ref_count
++;
8486 /* Now we have a mbuf that we can fill in with the details */
8487 hb
= mtod(chk
->data
, struct sctp_heartbeat_chunk
*);
8489 /* fill out chunk header */
8490 hb
->ch
.chunk_type
= SCTP_HEARTBEAT_REQUEST
;
8491 hb
->ch
.chunk_flags
= 0;
8492 hb
->ch
.chunk_length
= htons(chk
->send_size
);
8493 /* Fill out hb parameter */
8494 hb
->heartbeat
.hb_info
.ph
.param_type
= htons(SCTP_HEARTBEAT_INFO
);
8495 hb
->heartbeat
.hb_info
.ph
.param_length
= htons(sizeof(struct sctp_heartbeat_info_param
));
8496 hb
->heartbeat
.hb_info
.time_value_1
= now
.tv_sec
;
8497 hb
->heartbeat
.hb_info
.time_value_2
= now
.tv_usec
;
8498 /* Did our user request this one, put it in */
8499 hb
->heartbeat
.hb_info
.user_req
= user_req
;
8500 hb
->heartbeat
.hb_info
.addr_family
= sin
->sin_family
;
8501 hb
->heartbeat
.hb_info
.addr_len
= sin
->sin_len
;
8502 if (net
->dest_state
& SCTP_ADDR_UNCONFIRMED
) {
8503 /* we only take from the entropy pool if the address is
8506 net
->heartbeat_random1
= hb
->heartbeat
.hb_info
.random_value1
= sctp_select_initial_TSN(&stcb
->sctp_ep
->sctp_ep
);
8507 net
->heartbeat_random2
= hb
->heartbeat
.hb_info
.random_value2
= sctp_select_initial_TSN(&stcb
->sctp_ep
->sctp_ep
);
8509 net
->heartbeat_random1
= hb
->heartbeat
.hb_info
.random_value1
= 0;
8510 net
->heartbeat_random2
= hb
->heartbeat
.hb_info
.random_value2
= 0;
8512 if (sin
->sin_family
== AF_INET
) {
8513 memcpy(hb
->heartbeat
.hb_info
.address
, &sin
->sin_addr
, sizeof(sin
->sin_addr
));
8514 } else if (sin
->sin_family
== AF_INET6
) {
8515 /* We leave the scope the way it is in our lookup table. */
8516 sin6
= (struct sockaddr_in6
*)&net
->ro
._l_addr
;
8517 memcpy(hb
->heartbeat
.hb_info
.address
, &sin6
->sin6_addr
, sizeof(sin6
->sin6_addr
));
8519 /* huh compiler bug */
8521 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
8522 kprintf("Compiler bug bleeds a mbuf and a chunk\n");
8527 /* ok we have a destination that needs a beat */
8528 /* lets do the theshold management Qiaobing style */
8529 if (user_req
== 0) {
8530 if (sctp_threshold_management(stcb
->sctp_ep
, stcb
, net
,
8531 stcb
->asoc
.max_send_times
)) {
8532 /* we have lost the association, in a way this
8533 * is quite bad since we really are one less time
8534 * since we really did not send yet. This is the
8535 * down side to the Q's style as defined in the RFC
8536 * and not my alternate style defined in the RFC.
8538 if (chk
->data
!= NULL
) {
8539 sctp_m_freem(chk
->data
);
8542 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, chk
);
8543 sctppcbinfo
.ipi_count_chunk
--;
8544 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
8545 panic("Chunk count is negative");
8547 sctppcbinfo
.ipi_gencnt_chunk
++;
8551 net
->hb_responded
= 0;
8553 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT4
) {
8554 kprintf("Inserting chunk for HB\n");
8557 TAILQ_INSERT_TAIL(&stcb
->asoc
.control_send_queue
, chk
, sctp_next
);
8558 stcb
->asoc
.ctrl_queue_cnt
++;
8559 sctp_pegs
[SCTP_HB_SENT
]++;
8561 * Call directly med level routine to put out the chunk. It will
8562 * always tumble out control chunks aka HB but it may even tumble
8565 if (user_req
== 0) {
8566 /* Ok now lets start the HB timer if it is NOT a user req */
8567 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT
, stcb
->sctp_ep
,
8574 sctp_send_ecn_echo(struct sctp_tcb
*stcb
, struct sctp_nets
*net
,
8577 struct sctp_association
*asoc
;
8578 struct sctp_ecne_chunk
*ecne
;
8579 struct sctp_tmit_chunk
*chk
;
8581 TAILQ_FOREACH(chk
, &asoc
->control_send_queue
, sctp_next
) {
8582 if (chk
->rec
.chunk_id
== SCTP_ECN_ECHO
) {
8583 /* found a previous ECN_ECHO update it if needed */
8584 ecne
= mtod(chk
->data
, struct sctp_ecne_chunk
*);
8585 ecne
->tsn
= htonl(high_tsn
);
8589 /* nope could not find one to update so we must build one */
8590 chk
= (struct sctp_tmit_chunk
*)SCTP_ZONE_GET(sctppcbinfo
.ipi_zone_chunk
);
8594 sctp_pegs
[SCTP_ECNE_SENT
]++;
8595 sctppcbinfo
.ipi_count_chunk
++;
8596 sctppcbinfo
.ipi_gencnt_chunk
++;
8597 chk
->rec
.chunk_id
= SCTP_ECN_ECHO
;
8598 chk
->asoc
= &stcb
->asoc
;
8599 chk
->send_size
= sizeof(struct sctp_ecne_chunk
);
8600 MGETHDR(chk
->data
, MB_DONTWAIT
, MT_DATA
);
8601 if (chk
->data
== NULL
) {
8602 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, chk
);
8603 sctppcbinfo
.ipi_count_chunk
--;
8604 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
8605 panic("Chunk count is negative");
8607 sctppcbinfo
.ipi_gencnt_chunk
++;
8610 chk
->data
->m_data
+= SCTP_MIN_OVERHEAD
;
8611 chk
->data
->m_pkthdr
.len
= chk
->data
->m_len
= chk
->send_size
;
8612 chk
->sent
= SCTP_DATAGRAM_UNSENT
;
8615 chk
->whoTo
->ref_count
++;
8616 ecne
= mtod(chk
->data
, struct sctp_ecne_chunk
*);
8617 ecne
->ch
.chunk_type
= SCTP_ECN_ECHO
;
8618 ecne
->ch
.chunk_flags
= 0;
8619 ecne
->ch
.chunk_length
= htons(sizeof(struct sctp_ecne_chunk
));
8620 ecne
->tsn
= htonl(high_tsn
);
8621 TAILQ_INSERT_TAIL(&stcb
->asoc
.control_send_queue
, chk
, sctp_next
);
8622 asoc
->ctrl_queue_cnt
++;
8626 sctp_send_packet_dropped(struct sctp_tcb
*stcb
, struct sctp_nets
*net
,
8627 struct mbuf
*m
, int iphlen
, int bad_crc
)
8629 struct sctp_association
*asoc
;
8630 struct sctp_pktdrop_chunk
*drp
;
8631 struct sctp_tmit_chunk
*chk
;
8634 unsigned int small_one
;
8639 if (asoc
->peer_supports_pktdrop
== 0) {
8640 /* peer must declare support before I
8645 chk
= (struct sctp_tmit_chunk
*)SCTP_ZONE_GET(sctppcbinfo
.ipi_zone_chunk
);
8649 sctppcbinfo
.ipi_count_chunk
++;
8650 sctppcbinfo
.ipi_gencnt_chunk
++;
8652 iph
= mtod(m
, struct ip
*);
8656 if (iph
->ip_v
== IPVERSION
) {
8658 #if defined(__FreeBSD__)
8659 len
= chk
->send_size
= iph
->ip_len
;
8661 len
= chk
->send_size
= (iph
->ip_len
- iphlen
);
8664 struct ip6_hdr
*ip6h
;
8666 ip6h
= mtod(m
, struct ip6_hdr
*);
8667 len
= chk
->send_size
= htons(ip6h
->ip6_plen
);
8669 if ((len
+iphlen
) > m
->m_pkthdr
.len
) {
8671 chk
->send_size
= len
= m
->m_pkthdr
.len
- iphlen
;
8673 chk
->asoc
= &stcb
->asoc
;
8674 MGETHDR(chk
->data
, MB_DONTWAIT
, MT_DATA
);
8675 if (chk
->data
== NULL
) {
8677 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, chk
);
8678 sctppcbinfo
.ipi_count_chunk
--;
8679 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
8680 panic("Chunk count is negative");
8682 sctppcbinfo
.ipi_gencnt_chunk
++;
8685 if ((chk
->send_size
+sizeof(struct sctp_pktdrop_chunk
)+SCTP_MIN_OVERHEAD
) > MHLEN
) {
8686 MCLGET(chk
->data
, MB_DONTWAIT
);
8687 if ((chk
->data
->m_flags
& M_EXT
) == 0) {
8689 sctp_m_freem(chk
->data
);
8694 chk
->data
->m_data
+= SCTP_MIN_OVERHEAD
;
8695 drp
= mtod(chk
->data
, struct sctp_pktdrop_chunk
*);
8697 sctp_m_freem(chk
->data
);
8701 small_one
= asoc
->smallest_mtu
;
8702 if (small_one
> MCLBYTES
) {
8703 /* Only one cluster worth of data MAX */
8704 small_one
= MCLBYTES
;
8706 chk
->book_size
= (chk
->send_size
+ sizeof(struct sctp_pktdrop_chunk
) +
8707 sizeof(struct sctphdr
) + SCTP_MED_OVERHEAD
);
8708 if (chk
->book_size
> small_one
) {
8709 drp
->ch
.chunk_flags
= SCTP_PACKET_TRUNCATED
;
8710 drp
->trunc_len
= htons(chk
->send_size
);
8711 chk
->send_size
= small_one
- (SCTP_MED_OVERHEAD
+
8712 sizeof(struct sctp_pktdrop_chunk
) +
8713 sizeof(struct sctphdr
));
8714 len
= chk
->send_size
;
8716 /* no truncation needed */
8717 drp
->ch
.chunk_flags
= 0;
8718 drp
->trunc_len
= htons(0);
8721 drp
->ch
.chunk_flags
|= SCTP_BADCRC
;
8723 chk
->send_size
+= sizeof(struct sctp_pktdrop_chunk
);
8724 chk
->data
->m_pkthdr
.len
= chk
->data
->m_len
= chk
->send_size
;
8725 chk
->sent
= SCTP_DATAGRAM_UNSENT
;
8728 /* we should hit here */
8731 chk
->whoTo
= asoc
->primary_destination
;
8733 chk
->whoTo
->ref_count
++;
8734 chk
->rec
.chunk_id
= SCTP_PACKET_DROPPED
;
8735 drp
->ch
.chunk_type
= SCTP_PACKET_DROPPED
;
8736 drp
->ch
.chunk_length
= htons(chk
->send_size
);
8737 spc
= stcb
->sctp_socket
->so_rcv
.ssb_hiwat
;
8741 drp
->bottle_bw
= htonl(spc
);
8742 drp
->current_onq
= htonl(asoc
->size_on_delivery_queue
+
8743 asoc
->size_on_reasm_queue
+
8744 asoc
->size_on_all_streams
+
8745 asoc
->my_rwnd_control_len
+
8746 stcb
->sctp_socket
->so_rcv
.ssb_cc
);
8749 m_copydata(m
, iphlen
, len
, datap
);
8750 TAILQ_INSERT_TAIL(&stcb
->asoc
.control_send_queue
, chk
, sctp_next
);
8751 asoc
->ctrl_queue_cnt
++;
8755 sctp_send_cwr(struct sctp_tcb
*stcb
, struct sctp_nets
*net
, uint32_t high_tsn
)
8757 struct sctp_association
*asoc
;
8758 struct sctp_cwr_chunk
*cwr
;
8759 struct sctp_tmit_chunk
*chk
;
8762 TAILQ_FOREACH(chk
, &asoc
->control_send_queue
, sctp_next
) {
8763 if (chk
->rec
.chunk_id
== SCTP_ECN_CWR
) {
8764 /* found a previous ECN_CWR update it if needed */
8765 cwr
= mtod(chk
->data
, struct sctp_cwr_chunk
*);
8766 if (compare_with_wrap(high_tsn
, ntohl(cwr
->tsn
),
8768 cwr
->tsn
= htonl(high_tsn
);
8773 /* nope could not find one to update so we must build one */
8774 chk
= (struct sctp_tmit_chunk
*)SCTP_ZONE_GET(sctppcbinfo
.ipi_zone_chunk
);
8778 sctppcbinfo
.ipi_count_chunk
++;
8779 sctppcbinfo
.ipi_gencnt_chunk
++;
8780 chk
->rec
.chunk_id
= SCTP_ECN_CWR
;
8781 chk
->asoc
= &stcb
->asoc
;
8782 chk
->send_size
= sizeof(struct sctp_cwr_chunk
);
8783 MGETHDR(chk
->data
, MB_DONTWAIT
, MT_DATA
);
8784 if (chk
->data
== NULL
) {
8785 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, chk
);
8786 sctppcbinfo
.ipi_count_chunk
--;
8787 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
8788 panic("Chunk count is negative");
8790 sctppcbinfo
.ipi_gencnt_chunk
++;
8793 chk
->data
->m_data
+= SCTP_MIN_OVERHEAD
;
8794 chk
->data
->m_pkthdr
.len
= chk
->data
->m_len
= chk
->send_size
;
8795 chk
->sent
= SCTP_DATAGRAM_UNSENT
;
8798 chk
->whoTo
->ref_count
++;
8799 cwr
= mtod(chk
->data
, struct sctp_cwr_chunk
*);
8800 cwr
->ch
.chunk_type
= SCTP_ECN_CWR
;
8801 cwr
->ch
.chunk_flags
= 0;
8802 cwr
->ch
.chunk_length
= htons(sizeof(struct sctp_cwr_chunk
));
8803 cwr
->tsn
= htonl(high_tsn
);
8804 TAILQ_INSERT_TAIL(&stcb
->asoc
.control_send_queue
, chk
, sctp_next
);
8805 asoc
->ctrl_queue_cnt
++;
8808 sctp_reset_the_streams(struct sctp_tcb
*stcb
,
8809 struct sctp_stream_reset_request
*req
, int number_entries
, uint16_t *list
)
8813 if (req
->reset_flags
& SCTP_RESET_ALL
) {
8814 for (i
=0; i
<stcb
->asoc
.streamoutcnt
; i
++) {
8815 stcb
->asoc
.strmout
[i
].next_sequence_sent
= 0;
8817 } else if (number_entries
) {
8818 for (i
=0; i
<number_entries
; i
++) {
8819 if (list
[i
] >= stcb
->asoc
.streamoutcnt
) {
8820 /* no such stream */
8823 stcb
->asoc
.strmout
[(list
[i
])].next_sequence_sent
= 0;
8826 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_SEND
, stcb
, number_entries
, (void *)list
);
8830 sctp_send_str_reset_ack(struct sctp_tcb
*stcb
,
8831 struct sctp_stream_reset_request
*req
)
8833 struct sctp_association
*asoc
;
8834 struct sctp_stream_reset_resp
*strack
;
8835 struct sctp_tmit_chunk
*chk
;
8837 int number_entries
, i
;
8838 uint8_t two_way
=0, not_peer
=0;
8839 uint16_t *list
=NULL
;
8842 if (req
->reset_flags
& SCTP_RESET_ALL
)
8845 number_entries
= (ntohs(req
->ph
.param_length
) - sizeof(struct sctp_stream_reset_request
)) / sizeof(uint16_t);
8847 chk
= (struct sctp_tmit_chunk
*)SCTP_ZONE_GET(sctppcbinfo
.ipi_zone_chunk
);
8851 sctppcbinfo
.ipi_count_chunk
++;
8852 sctppcbinfo
.ipi_gencnt_chunk
++;
8853 chk
->rec
.chunk_id
= SCTP_STREAM_RESET
;
8854 chk
->asoc
= &stcb
->asoc
;
8855 chk
->send_size
= sizeof(struct sctp_stream_reset_resp
) + (number_entries
* sizeof(uint16_t));
8856 MGETHDR(chk
->data
, MB_DONTWAIT
, MT_DATA
);
8857 if (chk
->data
== NULL
) {
8859 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, chk
);
8860 sctppcbinfo
.ipi_count_chunk
--;
8861 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
8862 panic("Chunk count is negative");
8864 sctppcbinfo
.ipi_gencnt_chunk
++;
8867 chk
->data
->m_data
+= SCTP_MIN_OVERHEAD
;
8868 chk
->data
->m_pkthdr
.len
= chk
->data
->m_len
= SCTP_SIZE32(chk
->send_size
);
8869 if (M_TRAILINGSPACE(chk
->data
) < (int)SCTP_SIZE32(chk
->send_size
)) {
8870 MCLGET(chk
->data
, MB_DONTWAIT
);
8871 if ((chk
->data
->m_flags
& M_EXT
) == 0) {
8873 sctp_m_freem(chk
->data
);
8875 goto strresp_jump_out
;
8877 chk
->data
->m_data
+= SCTP_MIN_OVERHEAD
;
8879 if (M_TRAILINGSPACE(chk
->data
) < (int)SCTP_SIZE32(chk
->send_size
)) {
8880 /* can't do it, no room */
8882 sctp_m_freem(chk
->data
);
8884 goto strresp_jump_out
;
8887 chk
->sent
= SCTP_DATAGRAM_UNSENT
;
8889 chk
->whoTo
= asoc
->primary_destination
;
8890 chk
->whoTo
->ref_count
++;
8891 strack
= mtod(chk
->data
, struct sctp_stream_reset_resp
*);
8893 strack
->ch
.chunk_type
= SCTP_STREAM_RESET
;
8894 strack
->ch
.chunk_flags
= 0;
8895 strack
->ch
.chunk_length
= htons(chk
->send_size
);
8897 memset(strack
->sr_resp
.reset_pad
, 0, sizeof(strack
->sr_resp
.reset_pad
));
8899 strack
->sr_resp
.ph
.param_type
= ntohs(SCTP_STR_RESET_RESPONSE
);
8900 strack
->sr_resp
.ph
.param_length
= htons((chk
->send_size
- sizeof(struct sctp_chunkhdr
)));
8904 if (chk
->send_size
% 4) {
8905 /* need a padding for the end */
8908 end
= (uint8_t *)((caddr_t
)strack
+ chk
->send_size
);
8909 pad
= chk
->send_size
% 4;
8910 for (i
= 0; i
< pad
; i
++) {
8913 chk
->send_size
+= pad
;
8916 /* actual response */
8917 if (req
->reset_flags
& SCTP_RESET_YOUR
) {
8918 strack
->sr_resp
.reset_flags
= SCTP_RESET_PERFORMED
;
8920 strack
->sr_resp
.reset_flags
= 0;
8923 /* copied from reset request */
8924 strack
->sr_resp
.reset_req_seq_resp
= req
->reset_req_seq
;
8925 seq
= ntohl(req
->reset_req_seq
);
8927 list
= req
->list_of_streams
;
8928 /* copy the un-converted network byte order streams */
8929 for (i
=0; i
<number_entries
; i
++) {
8930 strack
->sr_resp
.list_of_streams
[i
] = list
[i
];
8932 if (asoc
->str_reset_seq_in
== seq
) {
8933 /* is it the next expected? */
8934 asoc
->str_reset_seq_in
++;
8935 strack
->sr_resp
.reset_at_tsn
= htonl(asoc
->sending_seq
);
8936 asoc
->str_reset_sending_seq
= asoc
->sending_seq
;
8937 if (number_entries
) {
8940 /* convert them to host byte order */
8941 for (i
=0 ; i
<number_entries
; i
++) {
8942 temp
= ntohs(list
[i
]);
8946 if (req
->reset_flags
& SCTP_RESET_YOUR
) {
8947 /* reset my outbound streams */
8948 sctp_reset_the_streams(stcb
, req
, number_entries
, list
);
8950 if (req
->reset_flags
& SCTP_RECIPRICAL
) {
8951 /* reset peer too */
8952 sctp_send_str_reset_req(stcb
, number_entries
, list
, two_way
, not_peer
);
8956 /* no its a retran so I must just ack and do nothing */
8957 strack
->sr_resp
.reset_at_tsn
= htonl(asoc
->str_reset_sending_seq
);
8959 strack
->sr_resp
.cumulative_tsn
= htonl(asoc
->cumulative_tsn
);
8960 TAILQ_INSERT_TAIL(&asoc
->control_send_queue
,
8963 asoc
->ctrl_queue_cnt
++;
8968 sctp_send_str_reset_req(struct sctp_tcb
*stcb
,
8969 int number_entrys
, uint16_t *list
, uint8_t two_way
, uint8_t not_peer
)
8971 /* Send a stream reset request. The number_entrys may be 0 and list NULL
8972 * if the request is to reset all streams. If two_way is true then we
8973 * not only request a RESET of the received streams but we also
8974 * request the peer to send a reset req to us too.
8975 * Flag combinations in table:
8977 * two_way | not_peer | = | Flags
8978 * ------------------------------
8979 * 0 | 0 | = | SCTP_RESET_YOUR (just the peer)
8980 * 1 | 0 | = | SCTP_RESET_YOUR | SCTP_RECIPRICAL (both sides)
8981 * 0 | 1 | = | Not a Valid Request (not anyone)
8982 * 1 | 1 | = | SCTP_RESET_RECIPRICAL (Just local host)
8984 struct sctp_association
*asoc
;
8985 struct sctp_stream_reset_req
*strreq
;
8986 struct sctp_tmit_chunk
*chk
;
8990 if (asoc
->stream_reset_outstanding
) {
8991 /* Already one pending, must get ACK back
8992 * to clear the flag.
8997 if ((two_way
== 0) && (not_peer
== 1)) {
8998 /* not a valid request */
9002 chk
= (struct sctp_tmit_chunk
*)SCTP_ZONE_GET(sctppcbinfo
.ipi_zone_chunk
);
9006 sctppcbinfo
.ipi_count_chunk
++;
9007 sctppcbinfo
.ipi_gencnt_chunk
++;
9008 chk
->rec
.chunk_id
= SCTP_STREAM_RESET
;
9009 chk
->asoc
= &stcb
->asoc
;
9010 chk
->send_size
= sizeof(struct sctp_stream_reset_req
) + (number_entrys
* sizeof(uint16_t));
9011 MGETHDR(chk
->data
, MB_DONTWAIT
, MT_DATA
);
9012 if (chk
->data
== NULL
) {
9014 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, chk
);
9015 sctppcbinfo
.ipi_count_chunk
--;
9016 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
9017 panic("Chunk count is negative");
9019 sctppcbinfo
.ipi_gencnt_chunk
++;
9022 chk
->data
->m_data
+= SCTP_MIN_OVERHEAD
;
9023 chk
->data
->m_pkthdr
.len
= chk
->data
->m_len
= SCTP_SIZE32(chk
->send_size
);
9024 if (M_TRAILINGSPACE(chk
->data
) < (int)SCTP_SIZE32(chk
->send_size
)) {
9025 MCLGET(chk
->data
, MB_DONTWAIT
);
9026 if ((chk
->data
->m_flags
& M_EXT
) == 0) {
9028 sctp_m_freem(chk
->data
);
9030 goto strreq_jump_out
;
9032 chk
->data
->m_data
+= SCTP_MIN_OVERHEAD
;
9034 if (M_TRAILINGSPACE(chk
->data
) < (int)SCTP_SIZE32(chk
->send_size
)) {
9035 /* can't do it, no room */
9037 sctp_m_freem(chk
->data
);
9039 goto strreq_jump_out
;
9041 chk
->sent
= SCTP_DATAGRAM_UNSENT
;
9043 chk
->whoTo
= asoc
->primary_destination
;
9044 chk
->whoTo
->ref_count
++;
9046 strreq
= mtod(chk
->data
, struct sctp_stream_reset_req
*);
9047 strreq
->ch
.chunk_type
= SCTP_STREAM_RESET
;
9048 strreq
->ch
.chunk_flags
= 0;
9049 strreq
->ch
.chunk_length
= htons(chk
->send_size
);
9051 strreq
->sr_req
.ph
.param_type
= ntohs(SCTP_STR_RESET_REQUEST
);
9052 strreq
->sr_req
.ph
.param_length
= htons((chk
->send_size
- sizeof(struct sctp_chunkhdr
)));
9054 if (chk
->send_size
% 4) {
9055 /* need a padding for the end */
9058 end
= (uint8_t *)((caddr_t
)strreq
+ chk
->send_size
);
9059 pad
= chk
->send_size
% 4;
9060 for (i
=0; i
<pad
; i
++) {
9063 chk
->send_size
+= pad
;
9066 strreq
->sr_req
.reset_flags
= 0;
9067 if (number_entrys
== 0) {
9068 strreq
->sr_req
.reset_flags
|= SCTP_RESET_ALL
;
9071 strreq
->sr_req
.reset_flags
|= SCTP_RESET_YOUR
;
9073 if (not_peer
== 0) {
9074 strreq
->sr_req
.reset_flags
|= SCTP_RECIPRICAL
| SCTP_RESET_YOUR
;
9076 strreq
->sr_req
.reset_flags
|= SCTP_RECIPRICAL
;
9079 memset(strreq
->sr_req
.reset_pad
, 0, sizeof(strreq
->sr_req
.reset_pad
));
9080 strreq
->sr_req
.reset_req_seq
= htonl(asoc
->str_reset_seq_out
);
9081 if (number_entrys
) {
9082 /* populate the specific entry's */
9084 for (i
=0; i
< number_entrys
; i
++) {
9085 strreq
->sr_req
.list_of_streams
[i
] = htons(list
[i
]);
9088 TAILQ_INSERT_TAIL(&asoc
->control_send_queue
,
9091 asoc
->ctrl_queue_cnt
++;
9092 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET
, stcb
->sctp_ep
, stcb
, chk
->whoTo
);
9093 asoc
->stream_reset_outstanding
= 1;
9097 sctp_send_abort(struct mbuf
*m
, int iphlen
, struct sctphdr
*sh
, uint32_t vtag
,
9098 struct mbuf
*err_cause
)
9101 * Formulate the abort message, and send it back down.
9104 struct sctp_abort_msg
*abm
;
9105 struct ip
*iph
, *iph_out
;
9106 struct ip6_hdr
*ip6
, *ip6_out
;
9109 /* don't respond to ABORT with ABORT */
9110 if (sctp_is_there_an_abort_here(m
, iphlen
, &vtag
)) {
9112 sctp_m_freem(err_cause
);
9115 MGETHDR(mout
, MB_DONTWAIT
, MT_HEADER
);
9118 sctp_m_freem(err_cause
);
9121 iph
= mtod(m
, struct ip
*);
9124 if (iph
->ip_v
== IPVERSION
) {
9125 iph_out
= mtod(mout
, struct ip
*);
9126 mout
->m_len
= sizeof(*iph_out
) + sizeof(*abm
);
9127 mout
->m_next
= err_cause
;
9129 /* Fill in the IP header for the ABORT */
9130 iph_out
->ip_v
= IPVERSION
;
9131 iph_out
->ip_hl
= (sizeof(struct ip
) / 4);
9132 iph_out
->ip_tos
= (u_char
)0;
9134 iph_out
->ip_off
= 0;
9135 iph_out
->ip_ttl
= MAXTTL
;
9136 iph_out
->ip_p
= IPPROTO_SCTP
;
9137 iph_out
->ip_src
.s_addr
= iph
->ip_dst
.s_addr
;
9138 iph_out
->ip_dst
.s_addr
= iph
->ip_src
.s_addr
;
9139 /* let IP layer calculate this */
9140 iph_out
->ip_sum
= 0;
9142 iphlen_out
= sizeof(*iph_out
);
9143 abm
= (struct sctp_abort_msg
*)((caddr_t
)iph_out
+ iphlen_out
);
9144 } else if (iph
->ip_v
== (IPV6_VERSION
>> 4)) {
9145 ip6
= (struct ip6_hdr
*)iph
;
9146 ip6_out
= mtod(mout
, struct ip6_hdr
*);
9147 mout
->m_len
= sizeof(*ip6_out
) + sizeof(*abm
);
9148 mout
->m_next
= err_cause
;
9150 /* Fill in the IP6 header for the ABORT */
9151 ip6_out
->ip6_flow
= ip6
->ip6_flow
;
9152 ip6_out
->ip6_hlim
= ip6_defhlim
;
9153 ip6_out
->ip6_nxt
= IPPROTO_SCTP
;
9154 ip6_out
->ip6_src
= ip6
->ip6_dst
;
9155 ip6_out
->ip6_dst
= ip6
->ip6_src
;
9157 iphlen_out
= sizeof(*ip6_out
);
9158 abm
= (struct sctp_abort_msg
*)((caddr_t
)ip6_out
+ iphlen_out
);
9160 /* Currently not supported */
9164 abm
->sh
.src_port
= sh
->dest_port
;
9165 abm
->sh
.dest_port
= sh
->src_port
;
9166 abm
->sh
.checksum
= 0;
9168 abm
->sh
.v_tag
= sh
->v_tag
;
9169 abm
->msg
.ch
.chunk_flags
= SCTP_HAD_NO_TCB
;
9171 abm
->sh
.v_tag
= htonl(vtag
);
9172 abm
->msg
.ch
.chunk_flags
= 0;
9174 abm
->msg
.ch
.chunk_type
= SCTP_ABORT_ASSOCIATION
;
9177 struct mbuf
*m_tmp
= err_cause
;
9179 /* get length of the err_cause chain */
9180 while (m_tmp
!= NULL
) {
9181 err_len
+= m_tmp
->m_len
;
9182 m_tmp
= m_tmp
->m_next
;
9184 mout
->m_pkthdr
.len
= mout
->m_len
+ err_len
;
9186 /* need pad at end of chunk */
9189 padlen
= 4 - (mout
->m_pkthdr
.len
% 4);
9190 m_copyback(mout
, mout
->m_pkthdr
.len
, padlen
, (caddr_t
)&cpthis
);
9192 abm
->msg
.ch
.chunk_length
= htons(sizeof(abm
->msg
.ch
) + err_len
);
9194 mout
->m_pkthdr
.len
= mout
->m_len
;
9195 abm
->msg
.ch
.chunk_length
= htons(sizeof(abm
->msg
.ch
));
9199 if ((sctp_no_csum_on_loopback
) &&
9200 (m
->m_pkthdr
.rcvif
) &&
9201 (m
->m_pkthdr
.rcvif
->if_type
== IFT_LOOP
)) {
9202 abm
->sh
.checksum
= 0;
9204 abm
->sh
.checksum
= sctp_calculate_sum(mout
, NULL
, iphlen_out
);
9207 /* zap the rcvif, it should be null */
9208 mout
->m_pkthdr
.rcvif
= 0;
9209 if (iph_out
!= NULL
) {
9212 /* zap the stack pointer to the route */
9213 bzero(&ro
, sizeof ro
);
9215 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT2
) {
9216 kprintf("sctp_send_abort calling ip_output:\n");
9217 sctp_print_address_pkt(iph_out
, &abm
->sh
);
9220 /* set IPv4 length */
9221 #if defined(__FreeBSD__)
9222 iph_out
->ip_len
= mout
->m_pkthdr
.len
;
9224 iph_out
->ip_len
= htons(mout
->m_pkthdr
.len
);
9227 ip_output(mout
, 0, &ro
, IP_RAWOUTPUT
, NULL
9228 #if defined(__OpenBSD__) || (defined(__FreeBSD__) && __FreeBSD_version >= 480000) \
9229 || defined(__NetBSD__) || defined(__DragonFly__)
9233 /* Free the route if we got one back */
9236 } else if (ip6_out
!= NULL
) {
9237 #ifdef NEW_STRUCT_ROUTE
9240 struct route_in6 ro
;
9243 /* zap the stack pointer to the route */
9244 bzero(&ro
, sizeof(ro
));
9246 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT2
) {
9247 kprintf("sctp_send_abort calling ip6_output:\n");
9248 sctp_print_address_pkt((struct ip
*)ip6_out
, &abm
->sh
);
9251 ip6_output(mout
, NULL
, &ro
, 0, NULL
, NULL
9252 #if defined(__NetBSD__)
9255 #if (defined(__FreeBSD__) && __FreeBSD_version >= 480000) || defined(__DragonFly__)
9259 /* Free the route if we got one back */
9263 sctp_pegs
[SCTP_DATAGRAMS_SENT
]++;
9267 sctp_send_operr_to(struct mbuf
*m
, int iphlen
,
9271 struct sctphdr
*ihdr
;
9273 struct sctphdr
*ohdr
;
9274 struct sctp_chunkhdr
*ophdr
;
9278 struct sockaddr_in6 lsa6
, fsa6
;
9281 iph
= mtod(m
, struct ip
*);
9282 ihdr
= (struct sctphdr
*)((caddr_t
)iph
+ iphlen
);
9283 if (!(scm
->m_flags
& M_PKTHDR
)) {
9284 /* must be a pkthdr */
9285 kprintf("Huh, not a packet header in send_operr\n");
9289 M_PREPEND(scm
, (sizeof(struct sctphdr
) + sizeof(struct sctp_chunkhdr
)), MB_DONTWAIT
);
9291 /* can't send because we can't add a mbuf */
9294 ohdr
= mtod(scm
, struct sctphdr
*);
9295 ohdr
->src_port
= ihdr
->dest_port
;
9296 ohdr
->dest_port
= ihdr
->src_port
;
9299 ophdr
= (struct sctp_chunkhdr
*)(ohdr
+ 1);
9300 ophdr
->chunk_type
= SCTP_OPERATION_ERROR
;
9301 ophdr
->chunk_flags
= 0;
9302 ophdr
->chunk_length
= htons(scm
->m_pkthdr
.len
- sizeof(struct sctphdr
));
9303 if (scm
->m_pkthdr
.len
% 4) {
9307 padlen
= 4 - (scm
->m_pkthdr
.len
% 4);
9308 m_copyback(scm
, scm
->m_pkthdr
.len
, padlen
, (caddr_t
)&cpthis
);
9310 if ((sctp_no_csum_on_loopback
) &&
9311 (m
->m_pkthdr
.rcvif
) &&
9312 (m
->m_pkthdr
.rcvif
->if_type
== IFT_LOOP
)) {
9315 val
= sctp_calculate_sum(scm
, NULL
, 0);
9317 ohdr
->checksum
= val
;
9318 if (iph
->ip_v
== IPVERSION
) {
9322 M_PREPEND(scm
, sizeof(struct ip
), MB_DONTWAIT
);
9325 bzero(&ro
, sizeof ro
);
9326 out
= mtod(scm
, struct ip
*);
9327 out
->ip_v
= iph
->ip_v
;
9328 out
->ip_hl
= (sizeof(struct ip
)/4);
9329 out
->ip_tos
= iph
->ip_tos
;
9330 out
->ip_id
= iph
->ip_id
;
9332 out
->ip_ttl
= MAXTTL
;
9333 out
->ip_p
= IPPROTO_SCTP
;
9335 out
->ip_src
= iph
->ip_dst
;
9336 out
->ip_dst
= iph
->ip_src
;
9337 #if defined(__FreeBSD__)
9338 out
->ip_len
= scm
->m_pkthdr
.len
;
9340 out
->ip_len
= htons(scm
->m_pkthdr
.len
);
9342 retcode
= ip_output(scm
, 0, &ro
, IP_RAWOUTPUT
, NULL
9343 #if defined(__OpenBSD__) || (defined(__FreeBSD__) && __FreeBSD_version >= 480000) \
9344 || defined(__NetBSD__) || defined(__DragonFly__)
9348 sctp_pegs
[SCTP_DATAGRAMS_SENT
]++;
9349 /* Free the route if we got one back */
9354 #ifdef NEW_STRUCT_ROUTE
9357 struct route_in6 ro
;
9359 struct ip6_hdr
*out6
, *in6
;
9361 M_PREPEND(scm
, sizeof(struct ip6_hdr
), MB_DONTWAIT
);
9364 bzero(&ro
, sizeof ro
);
9365 in6
= mtod(m
, struct ip6_hdr
*);
9366 out6
= mtod(scm
, struct ip6_hdr
*);
9367 out6
->ip6_flow
= in6
->ip6_flow
;
9368 out6
->ip6_hlim
= ip6_defhlim
;
9369 out6
->ip6_nxt
= IPPROTO_SCTP
;
9370 out6
->ip6_src
= in6
->ip6_dst
;
9371 out6
->ip6_dst
= in6
->ip6_src
;
9374 bzero(&lsa6
, sizeof(lsa6
));
9375 lsa6
.sin6_len
= sizeof(lsa6
);
9376 lsa6
.sin6_family
= AF_INET6
;
9377 lsa6
.sin6_addr
= out6
->ip6_src
;
9378 bzero(&fsa6
, sizeof(fsa6
));
9379 fsa6
.sin6_len
= sizeof(fsa6
);
9380 fsa6
.sin6_family
= AF_INET6
;
9381 fsa6
.sin6_addr
= out6
->ip6_dst
;
9382 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT2
) {
9383 kprintf("sctp_operr_to calling ipv6 output:\n");
9385 sctp_print_address((struct sockaddr
*)&lsa6
);
9387 sctp_print_address((struct sockaddr
*)&fsa6
);
9389 #endif /* SCTP_DEBUG */
9390 ip6_output(scm
, NULL
, &ro
, 0, NULL
, NULL
9391 #if defined(__NetBSD__)
9394 #if (defined(__FreeBSD__) && __FreeBSD_version >= 480000) || defined(__DragonFly__)
9398 sctp_pegs
[SCTP_DATAGRAMS_SENT
]++;
9399 /* Free the route if we got one back */
9406 sctp_copy_one(struct mbuf
*m
, struct uio
*uio
, int cpsz
, int resv_upfront
, int *mbcnt
)
9408 int left
, cancpy
, willcpy
, error
;
9417 if ((left
+resv_upfront
) > (int)MHLEN
) {
9423 if ((m
->m_flags
& M_EXT
) == 0) {
9427 *mbcnt
+= m
->m_ext
.ext_size
;
9430 cancpy
= M_TRAILINGSPACE(m
);
9431 willcpy
= min(cancpy
, left
);
9432 if ((willcpy
+ resv_upfront
) > cancpy
) {
9433 willcpy
-= resv_upfront
;
9436 /* Align data to the end */
9437 if ((m
->m_flags
& M_EXT
) == 0) {
9438 if (m
->m_flags
& M_PKTHDR
) {
9439 MH_ALIGN(m
, willcpy
);
9441 M_ALIGN(m
, willcpy
);
9444 MC_ALIGN(m
, willcpy
);
9446 error
= uiomove(mtod(m
, caddr_t
), willcpy
, uio
);
9454 MGET(m
->m_next
, MB_WAIT
, MT_DATA
);
9455 if (m
->m_next
== NULL
) {
9462 if (left
> (int)MHLEN
) {
9468 if ((m
->m_flags
& M_EXT
) == 0) {
9472 *mbcnt
+= m
->m_ext
.ext_size
;
9474 cancpy
= M_TRAILINGSPACE(m
);
9475 willcpy
= min(cancpy
, left
);
9482 sctp_copy_it_in(struct sctp_inpcb
*inp
,
9483 struct sctp_tcb
*stcb
,
9484 struct sctp_association
*asoc
,
9485 struct sctp_nets
*net
,
9486 struct sctp_sndrcvinfo
*srcv
,
9490 /* This routine must be very careful in
9491 * its work. Protocol processing is
9492 * up and running so care must be taken to
9493 * spl...() when you need to do something
9494 * that may effect the stcb/asoc. The sb is
9495 * locked however. When data is copied the
9496 * protocol processing should be enabled since
9497 * this is a slower operation...
9501 int frag_size
, mbcnt
= 0, mbcnt_e
= 0;
9502 unsigned int sndlen
;
9503 unsigned int tot_demand
;
9504 int tot_out
, dataout
;
9505 struct sctp_tmit_chunk
*chk
;
9507 struct sctp_stream_out
*strq
;
9512 so
= stcb
->sctp_socket
;
9516 sndlen
= uio
->uio_resid
;
9517 /* lock the socket buf */
9518 SOCKBUF_LOCK(&so
->so_snd
);
9519 error
= ssb_lock(&so
->so_snd
, SBLOCKWAIT(flags
));
9523 /* will it ever fit ? */
9524 if (sndlen
> so
->so_snd
.ssb_hiwat
) {
9525 /* It will NEVER fit */
9530 /* Do I need to block? */
9531 if ((so
->so_snd
.ssb_hiwat
<
9532 (sndlen
+ asoc
->total_output_queue_size
)) ||
9533 (asoc
->chunks_on_out_queue
> sctp_max_chunks_on_queue
) ||
9534 (asoc
->total_output_mbuf_queue_size
>
9535 so
->so_snd
.ssb_mbmax
)
9537 /* prune any prsctp bufs out */
9538 if (asoc
->peer_supports_prsctp
) {
9539 sctp_prune_prsctp(stcb
, asoc
, srcv
, sndlen
);
9542 * We store off a pointer to the endpoint.
9543 * Since on return from this we must check to
9544 * see if an so_error is set. If so we may have
9545 * been reset and our stcb destroyed. Returning
9546 * an error will flow back to the user...
9548 while ((so
->so_snd
.ssb_hiwat
<
9549 (sndlen
+ asoc
->total_output_queue_size
)) ||
9550 (asoc
->chunks_on_out_queue
>
9551 sctp_max_chunks_on_queue
) ||
9552 (asoc
->total_output_mbuf_queue_size
>
9553 so
->so_snd
.ssb_mbmax
)
9555 if (flags
& (MSG_FNONBLOCKING
|MSG_DONTWAIT
)) {
9556 /* Non-blocking io in place */
9557 error
= EWOULDBLOCK
;
9560 inp
->sctp_tcb_at_block
= (void *)stcb
;
9561 inp
->error_on_block
= 0;
9562 #ifdef SCTP_BLK_LOGGING
9563 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK
,
9566 ssb_unlock(&so
->so_snd
);
9567 SCTP_TCB_UNLOCK(stcb
);
9568 error
= ssb_wait(&so
->so_snd
);
9569 SCTP_INP_RLOCK(inp
);
9570 if ((inp
->sctp_flags
& SCTP_PCB_FLAGS_SOCKET_GONE
) ||
9571 (inp
->sctp_flags
& SCTP_PCB_FLAGS_SOCKET_GONE
)) {
9572 /* Should I really unlock ? */
9573 SCTP_INP_RUNLOCK(inp
);
9577 SCTP_TCB_LOCK(stcb
);
9578 SCTP_INP_RUNLOCK(inp
);
9580 inp
->sctp_tcb_at_block
= 0;
9581 #ifdef SCTP_BLK_LOGGING
9582 sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK
,
9585 if (inp
->error_on_block
) {
9587 * if our asoc was killed, the free code
9588 * (in sctp_pcb.c) will save a error in
9591 error
= inp
->error_on_block
;
9599 /* did we encounter a socket error? */
9601 error
= so
->so_error
;
9605 error
= ssb_lock(&so
->so_snd
, M_WAITOK
);
9607 /* Can't aquire the lock */
9611 #if defined(__FreeBSD__) && __FreeBSD_version >= 502115
9612 if (so
->so_rcv
.sb_state
& SBS_CANTSENDMORE
) {
9614 if (so
->so_state
& SS_CANTSENDMORE
) {
9616 /* The socket is now set not to sendmore.. its gone */
9622 error
= so
->so_error
;
9626 if (asoc
->peer_supports_prsctp
) {
9627 sctp_prune_prsctp(stcb
, asoc
, srcv
, sndlen
);
9631 dataout
= tot_out
= uio
->uio_resid
;
9632 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_BOUND_V6
) {
9633 resv_in_first
= SCTP_MED_OVERHEAD
;
9635 resv_in_first
= SCTP_MED_V4_OVERHEAD
;
9638 /* Are we aborting? */
9639 if (srcv
->sinfo_flags
& MSG_ABORT
) {
9640 if ((SCTP_GET_STATE(asoc
) != SCTP_STATE_COOKIE_WAIT
) &&
9641 (SCTP_GET_STATE(asoc
) != SCTP_STATE_COOKIE_ECHOED
)) {
9642 /* It has to be up before we abort */
9643 /* how big is the user initiated abort? */
9645 /* I wonder about doing a MGET without a splnet set.
9646 * it is done that way in the sosend code so I guess
9649 MGETHDR(mm
, MB_WAIT
, MT_DATA
);
9651 struct sctp_paramhdr
*ph
;
9653 tot_demand
= (tot_out
+ sizeof(struct sctp_paramhdr
));
9654 if (tot_demand
> MHLEN
) {
9655 if (tot_demand
> MCLBYTES
) {
9656 /* truncate user data */
9657 tot_demand
= MCLBYTES
;
9658 tot_out
= tot_demand
- sizeof(struct sctp_paramhdr
);
9660 MCLGET(mm
, MB_WAIT
);
9661 if ((mm
->m_flags
& M_EXT
) == 0) {
9662 /* truncate further */
9664 tot_out
= tot_demand
- sizeof(struct sctp_paramhdr
);
9667 /* now move forward the data pointer */
9668 ph
= mtod(mm
, struct sctp_paramhdr
*);
9669 ph
->param_type
= htons(SCTP_CAUSE_USER_INITIATED_ABT
);
9670 ph
->param_length
= htons((sizeof(struct sctp_paramhdr
) + tot_out
));
9672 mm
->m_pkthdr
.len
= tot_out
+ sizeof(struct sctp_paramhdr
);
9673 mm
->m_len
= mm
->m_pkthdr
.len
;
9674 error
= uiomove((caddr_t
)ph
, (int)tot_out
, uio
);
9677 * Here if we can't get his data we
9678 * still abort we just don't get to
9679 * send the users note :-0
9685 ssb_unlock(&so
->so_snd
);
9686 SOCKBUF_UNLOCK(&so
->so_snd
);
9687 sctp_abort_an_association(stcb
->sctp_ep
, stcb
,
9688 SCTP_RESPONSE_TO_USER_REQ
,
9698 /* Now can we send this? */
9699 if ((SCTP_GET_STATE(asoc
) == SCTP_STATE_SHUTDOWN_SENT
) ||
9700 (SCTP_GET_STATE(asoc
) == SCTP_STATE_SHUTDOWN_ACK_SENT
) ||
9701 (SCTP_GET_STATE(asoc
) == SCTP_STATE_SHUTDOWN_RECEIVED
) ||
9702 (asoc
->state
& SCTP_STATE_SHUTDOWN_PENDING
)) {
9703 /* got data while shutting down */
9708 /* Is the stream no. valid? */
9709 if (srcv
->sinfo_stream
>= asoc
->streamoutcnt
) {
9710 /* Invalid stream number */
9715 if (asoc
->strmout
== NULL
) {
9716 /* huh? software error */
9718 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
9719 kprintf("software error in sctp_copy_it_in\n");
9726 if ((srcv
->sinfo_flags
& MSG_EOF
) &&
9727 (stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_UDPTYPE
) &&
9738 /* save off the tag */
9739 my_vtag
= asoc
->my_vtag
;
9740 strq
= &asoc
->strmout
[srcv
->sinfo_stream
];
9741 /* First lets figure out the "chunking" point */
9742 frag_size
= sctp_get_frag_point(stcb
, asoc
);
9744 /* two choices here, it all fits in one chunk or
9745 * we need multiple chunks.
9748 SOCKBUF_UNLOCK(&so
->so_snd
);
9749 if (tot_out
<= frag_size
) {
9750 /* no need to setup a template */
9751 chk
= (struct sctp_tmit_chunk
*)SCTP_ZONE_GET(sctppcbinfo
.ipi_zone_chunk
);
9754 SOCKBUF_LOCK(&so
->so_snd
);
9757 sctppcbinfo
.ipi_count_chunk
++;
9758 sctppcbinfo
.ipi_gencnt_chunk
++;
9759 asoc
->chunks_on_out_queue
++;
9760 MGETHDR(mm
, MB_WAIT
, MT_DATA
);
9765 error
= sctp_copy_one(mm
, uio
, tot_out
, resv_in_first
, &mbcnt_e
);
9768 sctp_prepare_chunk(chk
, stcb
, srcv
, strq
, net
);
9769 chk
->mbcnt
= mbcnt_e
;
9772 mm
->m_pkthdr
.len
= tot_out
;
9776 /* the actual chunk flags */
9777 chk
->rec
.data
.rcv_flags
|= SCTP_DATA_NOT_FRAG
;
9778 chk
->whoTo
->ref_count
++;
9780 /* fix up the send_size if it is not present */
9781 chk
->send_size
= tot_out
;
9782 chk
->book_size
= chk
->send_size
;
9783 /* ok, we are commited */
9784 if ((srcv
->sinfo_flags
& MSG_UNORDERED
) == 0) {
9785 /* bump the ssn if we are unordered. */
9786 strq
->next_sequence_sent
++;
9788 if (chk
->flags
& SCTP_PR_SCTP_BUFFER
) {
9789 asoc
->sent_queue_cnt_removeable
++;
9792 if ((asoc
->state
== 0) ||
9793 (my_vtag
!= asoc
->my_vtag
) ||
9794 (so
!= inp
->sctp_socket
) ||
9795 (inp
->sctp_socket
== 0)) {
9796 /* connection was aborted */
9801 asoc
->stream_queue_cnt
++;
9802 TAILQ_INSERT_TAIL(&strq
->outqueue
, chk
, sctp_next
);
9803 /* now check if this stream is on the wheel */
9804 if ((strq
->next_spoke
.tqe_next
== NULL
) &&
9805 (strq
->next_spoke
.tqe_prev
== NULL
)) {
9806 /* Insert it on the wheel since it is not
9809 sctp_insert_on_wheel(asoc
, strq
);
9814 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, chk
);
9815 sctppcbinfo
.ipi_count_chunk
--;
9816 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
9817 panic("Chunk count is negative");
9819 SOCKBUF_LOCK(&so
->so_snd
);
9823 /* we need to setup a template */
9824 struct sctp_tmit_chunk
template;
9825 struct sctpchunk_listhead tmp
;
9827 /* setup the template */
9828 sctp_prepare_chunk(&template, stcb
, srcv
, strq
, net
);
9830 /* Prepare the temp list */
9833 /* Template is complete, now time for the work */
9834 while (tot_out
> 0) {
9836 chk
= (struct sctp_tmit_chunk
*)SCTP_ZONE_GET(sctppcbinfo
.ipi_zone_chunk
);
9839 * ok we must spin through and dump anything
9840 * we have allocated and then jump to the
9845 sctppcbinfo
.ipi_count_chunk
++;
9846 asoc
->chunks_on_out_queue
++;
9848 sctppcbinfo
.ipi_gencnt_chunk
++;
9850 chk
->whoTo
->ref_count
++;
9851 MGETHDR(chk
->data
, MB_WAIT
, MT_DATA
);
9852 if (chk
->data
== NULL
) {
9856 tot_demand
= min(tot_out
, frag_size
);
9857 error
= sctp_copy_one(chk
->data
, uio
, tot_demand
, resv_in_first
, &mbcnt_e
);
9860 /* now fix the chk->send_size */
9861 chk
->mbcnt
= mbcnt_e
;
9864 chk
->send_size
= tot_demand
;
9865 chk
->data
->m_pkthdr
.len
= tot_demand
;
9866 chk
->book_size
= chk
->send_size
;
9867 if (chk
->flags
& SCTP_PR_SCTP_BUFFER
) {
9868 asoc
->sent_queue_cnt_removeable
++;
9870 TAILQ_INSERT_TAIL(&tmp
, chk
, sctp_next
);
9871 tot_out
-= tot_demand
;
9873 /* Now the tmp list holds all chunks and data */
9874 if ((srcv
->sinfo_flags
& MSG_UNORDERED
) == 0) {
9875 /* bump the ssn if we are unordered. */
9876 strq
->next_sequence_sent
++;
9878 /* Mark the first/last flags. This will
9879 * result int a 3 for a single item on the list
9881 chk
= TAILQ_FIRST(&tmp
);
9882 chk
->rec
.data
.rcv_flags
|= SCTP_DATA_FIRST_FRAG
;
9883 chk
= TAILQ_LAST(&tmp
, sctpchunk_listhead
);
9884 chk
->rec
.data
.rcv_flags
|= SCTP_DATA_LAST_FRAG
;
9886 /* now move it to the streams actual queue */
9887 /* first stop protocol processing */
9889 if ((asoc
->state
== 0) ||
9890 (my_vtag
!= asoc
->my_vtag
) ||
9891 (so
!= inp
->sctp_socket
) ||
9892 (inp
->sctp_socket
== 0)) {
9893 /* connection was aborted */
9898 chk
= TAILQ_FIRST(&tmp
);
9900 chk
->data
->m_nextpkt
= 0;
9901 TAILQ_REMOVE(&tmp
, chk
, sctp_next
);
9902 asoc
->stream_queue_cnt
++;
9903 TAILQ_INSERT_TAIL(&strq
->outqueue
, chk
, sctp_next
);
9904 chk
= TAILQ_FIRST(&tmp
);
9906 /* now check if this stream is on the wheel */
9907 if ((strq
->next_spoke
.tqe_next
== NULL
) &&
9908 (strq
->next_spoke
.tqe_prev
== NULL
)) {
9909 /* Insert it on the wheel since it is not
9912 sctp_insert_on_wheel(asoc
, strq
);
9914 /* Ok now we can allow pping */
9918 SOCKBUF_LOCK(&so
->so_snd
);
9919 chk
= TAILQ_FIRST(&tmp
);
9922 sctp_m_freem(chk
->data
);
9925 TAILQ_REMOVE(&tmp
, chk
, sctp_next
);
9926 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, chk
);
9927 sctppcbinfo
.ipi_count_chunk
--;
9928 asoc
->chunks_on_out_queue
--;
9929 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
9930 panic("Chunk count is negative");
9932 sctppcbinfo
.ipi_gencnt_chunk
++;
9933 chk
= TAILQ_FIRST(&tmp
);
9939 #ifdef SCTP_MBCNT_LOGGING
9940 sctp_log_mbcnt(SCTP_LOG_MBCNT_INCREASE
,
9941 asoc
->total_output_queue_size
,
9943 asoc
->total_output_mbuf_queue_size
,
9947 SOCKBUF_LOCK(&so
->so_snd
);
9948 asoc
->total_output_queue_size
+= dataout
;
9949 asoc
->total_output_mbuf_queue_size
+= mbcnt
;
9950 if ((stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_TCPTYPE
) ||
9951 (stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_IN_TCPPOOL
)) {
9952 so
->so_snd
.ssb_cc
+= dataout
;
9953 so
->so_snd
.ssb_mbcnt
+= mbcnt
;
9955 if ((srcv
->sinfo_flags
& MSG_EOF
) &&
9956 (stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_UDPTYPE
)
9958 int some_on_streamwheel
= 0;
9960 if (!TAILQ_EMPTY(&asoc
->out_wheel
)) {
9961 /* Check to see if some data queued */
9962 struct sctp_stream_out
*outs
;
9963 TAILQ_FOREACH(outs
, &asoc
->out_wheel
, next_spoke
) {
9964 if (!TAILQ_EMPTY(&outs
->outqueue
)) {
9965 some_on_streamwheel
= 1;
9970 if (TAILQ_EMPTY(&asoc
->send_queue
) &&
9971 TAILQ_EMPTY(&asoc
->sent_queue
) &&
9972 (some_on_streamwheel
== 0)) {
9973 /* there is nothing queued to send, so I'm done... */
9974 if ((SCTP_GET_STATE(asoc
) != SCTP_STATE_SHUTDOWN_SENT
) &&
9975 (SCTP_GET_STATE(asoc
) != SCTP_STATE_SHUTDOWN_ACK_SENT
)) {
9976 /* only send SHUTDOWN the first time through */
9978 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT4
) {
9979 kprintf("%s:%d sends a shutdown\n",
9985 sctp_send_shutdown(stcb
, stcb
->asoc
.primary_destination
);
9986 asoc
->state
= SCTP_STATE_SHUTDOWN_SENT
;
9987 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN
, stcb
->sctp_ep
, stcb
,
9988 asoc
->primary_destination
);
9989 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD
, stcb
->sctp_ep
, stcb
,
9990 asoc
->primary_destination
);
9994 * we still got (or just got) data to send, so set
9998 * XXX sockets draft says that MSG_EOF should be sent
9999 * with no data. currently, we will allow user data
10000 * to be sent first and move to SHUTDOWN-PENDING
10002 asoc
->state
|= SCTP_STATE_SHUTDOWN_PENDING
;
10007 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT2
) {
10008 kprintf("++total out:%d total_mbuf_out:%d\n",
10009 (int)asoc
->total_output_queue_size
,
10010 (int)asoc
->total_output_mbuf_queue_size
);
10015 ssb_unlock(&so
->so_snd
);
10017 SOCKBUF_UNLOCK(&so
->so_snd
);
10026 sctp_sosend(struct socket
*so
,
10028 struct mbuf
*addr_mbuf
,
10030 struct sockaddr
*addr
,
10034 struct mbuf
*control
,
10035 #if defined(__NetBSD__) || defined(__APPLE__)
10039 #if (defined(__FreeBSD__) && __FreeBSD_version >= 500000) || defined(__DragonFly__)
10047 unsigned int sndlen
;
10048 int error
, use_rcvinfo
;
10049 int queue_only
= 0, queue_only_for_init
=0;
10052 struct sctp_inpcb
*inp
;
10053 struct sctp_tcb
*stcb
=NULL
;
10054 struct sctp_sndrcvinfo srcv
;
10055 struct timeval now
;
10056 struct sctp_nets
*net
;
10057 struct sctp_association
*asoc
;
10058 struct sctp_inpcb
*t_inp
;
10059 int create_lock_applied
= 0;
10060 #if defined(__APPLE__)
10061 struct proc
*p
= current_proc();
10062 #elif defined(__NetBSD__)
10063 struct proc
*p
= curproc
; /* XXX */
10064 struct sockaddr
*addr
= NULL
;
10066 addr
= mtod(addr_mbuf
, struct sockaddr
*);
10069 error
= use_rcvinfo
= 0;
10073 t_inp
= inp
= (struct sctp_inpcb
*)so
->so_pcb
;
10075 sndlen
= uio
->uio_resid
;
10077 sndlen
= top
->m_pkthdr
.len
;
10082 if ((inp
->sctp_flags
& SCTP_PCB_FLAGS_TCPTYPE
) &&
10083 (inp
->sctp_flags
& SCTP_PCB_FLAGS_ACCEPTING
)) {
10084 /* The listner can NOT send */
10090 SCTP_ASOC_CREATE_LOCK(inp
);
10091 if ((inp
->sctp_flags
& SCTP_PCB_FLAGS_SOCKET_GONE
) ||
10092 (inp
->sctp_flags
& SCTP_PCB_FLAGS_SOCKET_GONE
)) {
10093 /* Should I really unlock ? */
10099 create_lock_applied
= 1;
10100 if (((inp
->sctp_flags
& SCTP_PCB_FLAGS_BOUND_V6
) == 0) &&
10101 (addr
->sa_family
== AF_INET6
)) {
10107 /* now we must find the assoc */
10108 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_CONNECTED
) {
10109 SCTP_INP_RLOCK(inp
);
10110 stcb
= LIST_FIRST(&inp
->sctp_asoc_list
);
10111 if (stcb
== NULL
) {
10112 SCTP_INP_RUNLOCK(inp
);
10117 SCTP_TCB_LOCK(stcb
);
10118 SCTP_INP_RUNLOCK(inp
);
10119 net
= stcb
->asoc
.primary_destination
;
10123 /* process cmsg snd/rcv info (maybe a assoc-id) */
10124 if (sctp_find_cmsg(SCTP_SNDRCV
, (void *)&srcv
, control
,
10127 if (srcv
.sinfo_flags
& MSG_SENDALL
) {
10128 /* its a sendall */
10129 sctppcbinfo
.mbuf_track
--;
10130 sctp_m_freem(control
);
10132 if (create_lock_applied
) {
10133 SCTP_ASOC_CREATE_UNLOCK(inp
);
10134 create_lock_applied
= 0;
10136 return (sctp_sendall(inp
, uio
, top
, &srcv
));
10141 if (stcb
== NULL
) {
10142 /* Need to do a lookup */
10143 if (use_rcvinfo
&& srcv
.sinfo_assoc_id
) {
10144 stcb
= sctp_findassociation_ep_asocid(inp
, srcv
.sinfo_assoc_id
);
10146 * Question: Should I error here if the assoc_id is
10147 * no longer valid? i.e. I can't find it?
10151 /* Must locate the net structure */
10152 net
= sctp_findnet(stcb
, addr
);
10155 if (stcb
== NULL
) {
10156 if (addr
!= NULL
) {
10157 /* Since we did not use findep we must
10158 * increment it, and if we don't find a
10159 * tcb decrement it.
10161 SCTP_INP_WLOCK(inp
);
10162 SCTP_INP_INCR_REF(inp
);
10163 SCTP_INP_WUNLOCK(inp
);
10164 stcb
= sctp_findassociation_ep_addr(&t_inp
, addr
, &net
, NULL
, NULL
);
10165 if (stcb
== NULL
) {
10166 SCTP_INP_WLOCK(inp
);
10167 SCTP_INP_DECR_REF(inp
);
10168 SCTP_INP_WUNLOCK(inp
);
10173 if ((stcb
== NULL
) &&
10174 (inp
->sctp_flags
& SCTP_PCB_FLAGS_TCPTYPE
)) {
10178 } else if ((stcb
== NULL
) && (addr
== NULL
)) {
10182 } else if (stcb
== NULL
) {
10183 /* UDP style, we must go ahead and start the INIT process */
10184 if ((use_rcvinfo
) &&
10185 (srcv
.sinfo_flags
& MSG_ABORT
)) {
10186 /* User asks to abort a non-existant asoc */
10191 /* get an asoc/stcb struct */
10192 stcb
= sctp_aloc_assoc(inp
, addr
, 1, &error
, 0);
10193 if (stcb
== NULL
) {
10194 /* Error is setup for us in the call */
10198 if (create_lock_applied
) {
10199 SCTP_ASOC_CREATE_UNLOCK(inp
);
10200 create_lock_applied
= 0;
10202 kprintf("Huh-3? create lock should have been on??\n");
10204 /* Turn on queue only flag to prevent data from being sent */
10206 asoc
= &stcb
->asoc
;
10207 asoc
->state
= SCTP_STATE_COOKIE_WAIT
;
10208 SCTP_GETTIME_TIMEVAL(&asoc
->time_entered
);
10210 /* see if a init structure exists in cmsg headers */
10211 struct sctp_initmsg initm
;
10213 if (sctp_find_cmsg(SCTP_INIT
, (void *)&initm
, control
, sizeof(initm
))) {
10214 /* we have an INIT override of the default */
10215 if (initm
.sinit_max_attempts
)
10216 asoc
->max_init_times
= initm
.sinit_max_attempts
;
10217 if (initm
.sinit_num_ostreams
)
10218 asoc
->pre_open_streams
= initm
.sinit_num_ostreams
;
10219 if (initm
.sinit_max_instreams
)
10220 asoc
->max_inbound_streams
= initm
.sinit_max_instreams
;
10221 if (initm
.sinit_max_init_timeo
)
10222 asoc
->initial_init_rto_max
= initm
.sinit_max_init_timeo
;
10223 if (asoc
->streamoutcnt
< asoc
->pre_open_streams
) {
10224 /* Default is NOT correct */
10226 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
10227 kprintf("Ok, defout:%d pre_open:%d\n",
10228 asoc
->streamoutcnt
, asoc
->pre_open_streams
);
10231 FREE(asoc
->strmout
, M_PCB
);
10232 asoc
->strmout
= NULL
;
10233 asoc
->streamoutcnt
= asoc
->pre_open_streams
;
10235 /* What happesn if this fails? .. we panic ...*/
10236 MALLOC(asoc
->strmout
,
10237 struct sctp_stream_out
*,
10238 asoc
->streamoutcnt
*
10239 sizeof(struct sctp_stream_out
),
10241 for (i
= 0; i
< asoc
->streamoutcnt
; i
++) {
10243 * inbound side must be set to 0xffff,
10244 * also NOTE when we get the INIT-ACK
10245 * back (for INIT sender) we MUST
10246 * reduce the count (streamoutcnt) but
10247 * first check if we sent to any of the
10248 * upper streams that were dropped (if
10249 * some were). Those that were dropped
10250 * must be notified to the upper layer
10251 * as failed to send.
10253 asoc
->strmout
[i
].next_sequence_sent
= 0x0;
10254 TAILQ_INIT(&asoc
->strmout
[i
].outqueue
);
10255 asoc
->strmout
[i
].stream_no
= i
;
10256 asoc
->strmout
[i
].next_spoke
.tqe_next
= 0;
10257 asoc
->strmout
[i
].next_spoke
.tqe_prev
= 0;
10263 /* out with the INIT */
10264 queue_only_for_init
= 1;
10265 sctp_send_initiate(inp
, stcb
);
10267 * we may want to dig in after this call and adjust the MTU
10268 * value. It defaulted to 1500 (constant) but the ro structure
10269 * may now have an update and thus we may need to change it
10270 * BEFORE we append the message.
10272 net
= stcb
->asoc
.primary_destination
;
10273 asoc
= &stcb
->asoc
;
10275 asoc
= &stcb
->asoc
;
10277 if (create_lock_applied
) {
10278 SCTP_ASOC_CREATE_UNLOCK(inp
);
10279 create_lock_applied
= 0;
10281 if ((SCTP_GET_STATE(asoc
) == SCTP_STATE_COOKIE_WAIT
) ||
10282 (SCTP_GET_STATE(asoc
) == SCTP_STATE_COOKIE_ECHOED
)) {
10285 if (use_rcvinfo
== 0) {
10286 /* Grab the default stuff from the asoc */
10287 srcv
= stcb
->asoc
.def_send
;
10289 /* we are now done with all control */
10291 sctp_m_freem(control
);
10295 if ((SCTP_GET_STATE(asoc
) == SCTP_STATE_SHUTDOWN_SENT
) ||
10296 (SCTP_GET_STATE(asoc
) == SCTP_STATE_SHUTDOWN_RECEIVED
) ||
10297 (SCTP_GET_STATE(asoc
) == SCTP_STATE_SHUTDOWN_ACK_SENT
) ||
10298 (asoc
->state
& SCTP_STATE_SHUTDOWN_PENDING
)) {
10299 if ((use_rcvinfo
) &&
10300 (srcv
.sinfo_flags
& MSG_ABORT
)) {
10303 error
= ECONNRESET
;
10308 /* Ok, we will attempt a msgsnd :> */
10310 #if (defined(__FreeBSD__) && __FreeBSD_version >= 500000) || defined(__DragonFly__)
10311 p
->td_lwp
->lwp_ru
.ru_msgsnd
++;
10313 p
->p_stats
->p_ru
.ru_msgsnd
++;
10317 if (net
&& ((srcv
.sinfo_flags
& MSG_ADDR_OVER
))) {
10318 /* we take the override or the unconfirmed */
10321 net
= stcb
->asoc
.primary_destination
;
10326 /* Must copy it all in from user land. The
10327 * socket buf is locked but we don't suspend
10328 * protocol processing until we are ready to
10332 error
= sctp_copy_it_in(inp
, stcb
, asoc
, net
, &srcv
, uio
, flags
);
10336 /* Here we must either pull in the user data to chunk
10337 * buffers, or use top to do a msg_append.
10339 error
= sctp_msg_append(stcb
, net
, top
, &srcv
, flags
);
10343 /* zap the top since it is now being used */
10347 if (net
->flight_size
> net
->cwnd
) {
10348 sctp_pegs
[SCTP_SENDTO_FULL_CWND
]++;
10351 } else if (asoc
->ifp_had_enobuf
) {
10352 sctp_pegs
[SCTP_QUEONLY_BURSTLMT
]++;
10355 un_sent
= ((stcb
->asoc
.total_output_queue_size
- stcb
->asoc
.total_flight
) +
10356 ((stcb
->asoc
.chunks_on_out_queue
- stcb
->asoc
.total_flight_count
) * sizeof(struct sctp_data_chunk
)) +
10357 SCTP_MED_OVERHEAD
);
10359 if (((inp
->sctp_flags
& SCTP_PCB_FLAGS_NODELAY
) == 0) &&
10360 (stcb
->asoc
.total_flight
> 0) &&
10361 (un_sent
< (int)stcb
->asoc
.smallest_mtu
)) {
10363 /* Ok, Nagle is set on and we have data outstanding. Don't
10364 * send anything and let SACKs drive out the data unless we
10365 * have a "full" segment to send.
10367 sctp_pegs
[SCTP_NAGLE_NOQ
]++;
10370 sctp_pegs
[SCTP_NAGLE_OFF
]++;
10373 if (queue_only_for_init
) {
10374 /* It is possible to have a turn around of the
10375 * INIT/INIT-ACK/COOKIE before I have a chance to
10376 * copy in the data. In such a case I DO want to
10377 * send it out by reversing the queue only flag.
10379 if ((SCTP_GET_STATE(asoc
) != SCTP_STATE_COOKIE_WAIT
) ||
10380 (SCTP_GET_STATE(asoc
) != SCTP_STATE_COOKIE_ECHOED
)) {
10381 /* yep, reverse it */
10386 if ((queue_only
== 0) && (stcb
->asoc
.peers_rwnd
&& un_sent
)) {
10387 /* we can attempt to send too.*/
10389 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
10390 kprintf("USR Send calls sctp_chunk_output\n");
10394 sctp_pegs
[SCTP_OUTPUT_FRM_SND
]++;
10395 sctp_chunk_output(inp
, stcb
, 0);
10397 } else if ((queue_only
== 0) &&
10398 (stcb
->asoc
.peers_rwnd
== 0) &&
10399 (stcb
->asoc
.total_flight
== 0)) {
10400 /* We get to have a probe outstanding */
10402 sctp_from_user_send
= 1;
10403 sctp_chunk_output(inp
, stcb
, 0);
10404 sctp_from_user_send
= 0;
10407 } else if (!TAILQ_EMPTY(&stcb
->asoc
.control_send_queue
)) {
10408 int num_out
, reason
, cwnd_full
;
10409 /* Here we do control only */
10411 sctp_med_chunk_output(inp
, stcb
, &stcb
->asoc
, &num_out
,
10412 &reason
, 1, &cwnd_full
, 1, &now
, &now_filled
);
10416 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
10417 kprintf("USR Send complete qo:%d prw:%d unsent:%d tf:%d cooq:%d toqs:%d \n",
10418 queue_only
, stcb
->asoc
.peers_rwnd
, un_sent
,
10419 stcb
->asoc
.total_flight
, stcb
->asoc
.chunks_on_out_queue
,
10420 stcb
->asoc
.total_output_queue_size
);
10424 if (create_lock_applied
) {
10425 SCTP_ASOC_CREATE_UNLOCK(inp
);
10426 create_lock_applied
= 0;
10429 SCTP_TCB_UNLOCK(stcb
);
10433 sctp_m_freem(control
);