1 /* $KAME: sctp_output.c,v 1.46 2005/03/06 16:04:17 itojun Exp $ */
2 /* $DragonFly: src/sys/netinet/sctp_output.c,v 1.14 2008/04/20 13:44:25 swildner Exp $ */
5 * Copyright (C) 2002, 2003, 2004 Cisco Systems Inc,
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the project nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 #if !(defined(__OpenBSD__) || defined (__APPLE__))
34 #include "opt_ipsec.h"
36 #if defined(__FreeBSD__) || defined(__DragonFly__)
37 #include "opt_compat.h"
38 #include "opt_inet6.h"
41 #if defined(__NetBSD__)
46 #elif !defined(__OpenBSD__)
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/malloc.h>
54 #include <sys/domain.h>
56 #include <sys/protosw.h>
57 #include <sys/socket.h>
58 #include <sys/socketvar.h>
60 #include <sys/kernel.h>
61 #include <sys/sysctl.h>
62 #include <sys/resourcevar.h>
65 #include <sys/domain.h>
67 #include <sys/thread2.h>
68 #include <sys/socketvar2.h>
70 #if (defined(__FreeBSD__) && __FreeBSD_version >= 500000)
71 #include <sys/limits.h>
73 #include <machine/limits.h>
75 #include <machine/cpu.h>
78 #include <net/if_types.h>
80 #if defined(__FreeBSD__) || defined(__DragonFly__)
81 #include <net/if_var.h>
84 #include <net/route.h>
86 #include <netinet/in.h>
87 #include <netinet/in_systm.h>
88 #include <netinet/ip.h>
89 #include <netinet/in_pcb.h>
90 #include <netinet/in_var.h>
91 #include <netinet/ip_var.h>
94 #include <netinet/ip6.h>
95 #include <netinet6/ip6_var.h>
96 #include <netinet6/scope6_var.h>
97 #include <netinet6/nd6.h>
99 #if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__DragonFly__)
100 #include <netinet6/in6_pcb.h>
101 #elif defined(__OpenBSD__)
102 #include <netinet/in_pcb.h>
105 #include <netinet/icmp6.h>
109 #include <net/net_osdep.h>
111 #if defined(HAVE_NRL_INPCB) || defined(__FreeBSD__) || defined(__DragonFly__)
117 #include <netinet/sctp_pcb.h>
121 #include <netinet6/ipsec.h>
122 #include <netproto/key/key.h>
128 #include <netinet/sctp_var.h>
129 #include <netinet/sctp_header.h>
130 #include <netinet/sctputil.h>
131 #include <netinet/sctp_pcb.h>
132 #include <netinet/sctp_output.h>
133 #include <netinet/sctp_uio.h>
134 #include <netinet/sctputil.h>
135 #include <netinet/sctp_hashdriver.h>
136 #include <netinet/sctp_timer.h>
137 #include <netinet/sctp_asconf.h>
138 #include <netinet/sctp_indata.h>
141 extern uint32_t sctp_debug_on
;
144 extern int sctp_peer_chunk_oh
;
147 sctp_find_cmsg(int c_type
, void *data
, struct mbuf
*control
, int cpsize
)
152 tlen
= control
->m_len
;
155 * Independent of how many mbufs, find the c_type inside the control
156 * structure and copy out the data.
159 if ((tlen
-at
) < (int)CMSG_ALIGN(sizeof(cmh
))) {
160 /* not enough room for one more we are done. */
163 m_copydata(control
, at
, sizeof(cmh
), (caddr_t
)&cmh
);
164 if ((cmh
.cmsg_len
+ at
) > tlen
) {
166 * this is real messed up since there is not enough
167 * data here to cover the cmsg header. We are done.
171 if ((cmh
.cmsg_level
== IPPROTO_SCTP
) &&
172 (c_type
== cmh
.cmsg_type
)) {
173 /* found the one we want, copy it out */
174 at
+= CMSG_ALIGN(sizeof(struct cmsghdr
));
175 if ((int)(cmh
.cmsg_len
- CMSG_ALIGN(sizeof(struct cmsghdr
))) < cpsize
) {
177 * space of cmsg_len after header not
182 m_copydata(control
, at
, cpsize
, data
);
185 at
+= CMSG_ALIGN(cmh
.cmsg_len
);
186 if (cmh
.cmsg_len
== 0) {
196 sctp_add_addr_to_mbuf(struct mbuf
*m
, struct ifaddr
*ifa
)
198 struct sctp_paramhdr
*parmh
;
201 if (ifa
->ifa_addr
->sa_family
== AF_INET
) {
202 len
= sizeof(struct sctp_ipv4addr_param
);
203 } else if (ifa
->ifa_addr
->sa_family
== AF_INET6
) {
204 len
= sizeof(struct sctp_ipv6addr_param
);
210 if (M_TRAILINGSPACE(m
) >= len
) {
211 /* easy side we just drop it on the end */
212 parmh
= (struct sctp_paramhdr
*)(m
->m_data
+ m
->m_len
);
215 /* Need more space */
217 while (mret
->m_next
!= NULL
) {
220 MGET(mret
->m_next
, MB_DONTWAIT
, MT_DATA
);
221 if (mret
->m_next
== NULL
) {
222 /* We are hosed, can't add more addresses */
226 parmh
= mtod(mret
, struct sctp_paramhdr
*);
228 /* now add the parameter */
229 if (ifa
->ifa_addr
->sa_family
== AF_INET
) {
230 struct sctp_ipv4addr_param
*ipv4p
;
231 struct sockaddr_in
*sin
;
232 sin
= (struct sockaddr_in
*)ifa
->ifa_addr
;
233 ipv4p
= (struct sctp_ipv4addr_param
*)parmh
;
234 parmh
->param_type
= htons(SCTP_IPV4_ADDRESS
);
235 parmh
->param_length
= htons(len
);
236 ipv4p
->addr
= sin
->sin_addr
.s_addr
;
238 } else if (ifa
->ifa_addr
->sa_family
== AF_INET6
) {
239 struct sctp_ipv6addr_param
*ipv6p
;
240 struct sockaddr_in6
*sin6
;
241 sin6
= (struct sockaddr_in6
*)ifa
->ifa_addr
;
242 ipv6p
= (struct sctp_ipv6addr_param
*)parmh
;
243 parmh
->param_type
= htons(SCTP_IPV6_ADDRESS
);
244 parmh
->param_length
= htons(len
);
245 memcpy(ipv6p
->addr
, &sin6
->sin6_addr
,
246 sizeof(ipv6p
->addr
));
247 /* clear embedded scope in the address */
248 in6_clearscope((struct in6_addr
*)ipv6p
->addr
);
259 sctp_add_cookie(struct sctp_inpcb
*inp
, struct mbuf
*init
, int init_offset
,
260 struct mbuf
*initack
, int initack_offset
, struct sctp_state_cookie
*stc_in
)
262 struct mbuf
*copy_init
, *copy_initack
, *m_at
, *sig
, *mret
;
263 struct sctp_state_cookie
*stc
;
264 struct sctp_paramhdr
*ph
;
271 MGET(mret
, MB_DONTWAIT
, MT_DATA
);
275 copy_init
= sctp_m_copym(init
, init_offset
, M_COPYALL
, MB_DONTWAIT
);
276 if (copy_init
== NULL
) {
280 copy_initack
= sctp_m_copym(initack
, initack_offset
, M_COPYALL
,
282 if (copy_initack
== NULL
) {
284 sctp_m_freem(copy_init
);
287 /* easy side we just drop it on the end */
288 ph
= mtod(mret
, struct sctp_paramhdr
*);
289 mret
->m_len
= sizeof(struct sctp_state_cookie
) +
290 sizeof(struct sctp_paramhdr
);
291 stc
= (struct sctp_state_cookie
*)((caddr_t
)ph
+
292 sizeof(struct sctp_paramhdr
));
293 ph
->param_type
= htons(SCTP_STATE_COOKIE
);
294 ph
->param_length
= 0; /* fill in at the end */
295 /* Fill in the stc cookie data */
298 /* tack the INIT and then the INIT-ACK onto the chain */
301 for (m_at
= mret
; m_at
; m_at
= m_at
->m_next
) {
302 cookie_sz
+= m_at
->m_len
;
303 if (m_at
->m_next
== NULL
) {
304 m_at
->m_next
= copy_init
;
309 for (m_at
= copy_init
; m_at
; m_at
= m_at
->m_next
) {
310 cookie_sz
+= m_at
->m_len
;
311 if (m_at
->m_next
== NULL
) {
312 m_at
->m_next
= copy_initack
;
317 for (m_at
= copy_initack
; m_at
; m_at
= m_at
->m_next
) {
318 cookie_sz
+= m_at
->m_len
;
319 if (m_at
->m_next
== NULL
) {
323 MGET(sig
, MB_DONTWAIT
, MT_DATA
);
327 sctp_m_freem(copy_init
);
328 sctp_m_freem(copy_initack
);
334 signature
= (uint8_t *)(mtod(sig
, caddr_t
) + sig_offset
);
335 /* Time to sign the cookie */
336 sctp_hash_digest_m((char *)inp
->sctp_ep
.secret_key
[
337 (int)(inp
->sctp_ep
.current_secret_number
)],
338 SCTP_SECRET_SIZE
, mret
, sizeof(struct sctp_paramhdr
),
339 (uint8_t *)signature
);
340 sig
->m_len
+= SCTP_SIGNATURE_SIZE
;
341 cookie_sz
+= SCTP_SIGNATURE_SIZE
;
343 ph
->param_length
= htons(cookie_sz
);
348 static struct sockaddr_in
*
349 sctp_is_v4_ifa_addr_prefered (struct ifaddr
*ifa
, uint8_t loopscope
, uint8_t ipv4_scope
, uint8_t *sin_loop
, uint8_t *sin_local
)
351 struct sockaddr_in
*sin
;
353 * Here we determine if its a prefered address. A
354 * prefered address means it is the same scope or
355 * higher scope then the destination.
356 * L = loopback, P = private, G = global
357 * -----------------------------------------
358 * src | dest | result
359 *-----------------------------------------
361 *-----------------------------------------
363 *-----------------------------------------
365 *-----------------------------------------
367 *-----------------------------------------
369 *-----------------------------------------
371 *-----------------------------------------
373 *-----------------------------------------
375 *-----------------------------------------
377 *-----------------------------------------
380 if (ifa
->ifa_addr
->sa_family
!= AF_INET
) {
384 /* Ok the address may be ok */
385 sin
= (struct sockaddr_in
*)ifa
->ifa_addr
;
386 if (sin
->sin_addr
.s_addr
== 0) {
389 *sin_local
= *sin_loop
= 0;
390 if ((ifa
->ifa_ifp
->if_type
== IFT_LOOP
) ||
391 (IN4_ISLOOPBACK_ADDRESS(&sin
->sin_addr
))) {
395 if ((IN4_ISPRIVATE_ADDRESS(&sin
->sin_addr
))) {
398 if (!loopscope
&& *sin_loop
) {
399 /* Its a loopback address and we don't have loop scope */
402 if (!ipv4_scope
&& *sin_local
) {
403 /* Its a private address, and we don't have private address scope */
406 if (((ipv4_scope
== 0) && (loopscope
== 0)) && (*sin_local
)) {
407 /* its a global src and a private dest */
410 /* its a prefered address */
414 static struct sockaddr_in
*
415 sctp_is_v4_ifa_addr_acceptable (struct ifaddr
*ifa
, uint8_t loopscope
, uint8_t ipv4_scope
, uint8_t *sin_loop
, uint8_t *sin_local
)
417 struct sockaddr_in
*sin
;
419 * Here we determine if its a acceptable address. A
420 * acceptable address means it is the same scope or
421 * higher scope but we can allow for NAT which means
422 * its ok to have a global dest and a private src.
424 * L = loopback, P = private, G = global
425 * -----------------------------------------
426 * src | dest | result
427 *-----------------------------------------
429 *-----------------------------------------
431 *-----------------------------------------
433 *-----------------------------------------
435 *-----------------------------------------
437 *-----------------------------------------
438 * G | P | yes - probably this won't work.
439 *-----------------------------------------
441 *-----------------------------------------
443 *-----------------------------------------
445 *-----------------------------------------
448 if (ifa
->ifa_addr
->sa_family
!= AF_INET
) {
452 /* Ok the address may be ok */
453 sin
= (struct sockaddr_in
*)ifa
->ifa_addr
;
454 if (sin
->sin_addr
.s_addr
== 0) {
457 *sin_local
= *sin_loop
= 0;
458 if ((ifa
->ifa_ifp
->if_type
== IFT_LOOP
) ||
459 (IN4_ISLOOPBACK_ADDRESS(&sin
->sin_addr
))) {
463 if ((IN4_ISPRIVATE_ADDRESS(&sin
->sin_addr
))) {
466 if (!loopscope
&& *sin_loop
) {
467 /* Its a loopback address and we don't have loop scope */
470 /* its an acceptable address */
475 * This treats the address list on the ep as a restricted list
476 * (negative list). If a the passed address is listed, then
477 * the address is NOT allowed on the association.
480 sctp_is_addr_restricted(struct sctp_tcb
*stcb
, struct sockaddr
*addr
)
482 struct sctp_laddr
*laddr
;
487 /* There are no restrictions, no TCB :-) */
491 LIST_FOREACH(laddr
, &stcb
->asoc
.sctp_local_addr_list
, sctp_nxt_addr
) {
494 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT4
) {
495 kprintf("There are %d addresses on the restricted list\n", cnt
);
499 LIST_FOREACH(laddr
, &stcb
->asoc
.sctp_local_addr_list
, sctp_nxt_addr
) {
500 if (laddr
->ifa
== NULL
) {
502 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
503 kprintf("Help I have fallen and I can't get up!\n");
509 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT4
) {
511 kprintf("Restricted address[%d]:", cnt
);
512 sctp_print_address(laddr
->ifa
->ifa_addr
);
515 if (sctp_cmpaddr(addr
, laddr
->ifa
->ifa_addr
) == 1) {
516 /* Yes it is on the list */
524 sctp_is_addr_in_ep(struct sctp_inpcb
*inp
, struct ifaddr
*ifa
)
526 struct sctp_laddr
*laddr
;
530 LIST_FOREACH(laddr
, &inp
->sctp_addr_list
, sctp_nxt_addr
) {
531 if (laddr
->ifa
== NULL
) {
533 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
534 kprintf("Help I have fallen and I can't get up!\n");
539 if (laddr
->ifa
->ifa_addr
== NULL
)
541 if (laddr
->ifa
== ifa
)
544 if (laddr
->ifa
->ifa_addr
->sa_family
!= ifa
->ifa_addr
->sa_family
) {
545 /* skip non compatible address comparison */
548 if (sctp_cmpaddr(ifa
->ifa_addr
, laddr
->ifa
->ifa_addr
) == 1) {
549 /* Yes it is restricted */
558 static struct in_addr
559 sctp_choose_v4_boundspecific_inp(struct sctp_inpcb
*inp
,
565 struct sctp_laddr
*laddr
;
566 struct sockaddr_in
*sin
;
568 uint8_t sin_loop
, sin_local
;
570 /* first question, is the ifn we will emit on
571 * in our list, if so, we want that one.
575 struct ifaddr_container
*ifac
;
577 /* is a prefered one on the interface we route out? */
578 TAILQ_FOREACH(ifac
, &ifn
->if_addrheads
[mycpuid
], ifa_link
) {
579 struct ifaddr
*ifa
= ifac
->ifa
;
581 sin
= sctp_is_v4_ifa_addr_prefered (ifa
, loopscope
, ipv4_scope
, &sin_loop
, &sin_local
);
584 if (sctp_is_addr_in_ep(inp
, ifa
)) {
585 return (sin
->sin_addr
);
588 /* is an acceptable one on the interface we route out? */
589 TAILQ_FOREACH(ifac
, &ifn
->if_addrheads
[mycpuid
], ifa_link
) {
590 struct ifaddr
*ifa
= ifac
->ifa
;
592 sin
= sctp_is_v4_ifa_addr_acceptable (ifa
, loopscope
, ipv4_scope
, &sin_loop
, &sin_local
);
595 if (sctp_is_addr_in_ep(inp
, ifa
)) {
596 return (sin
->sin_addr
);
600 /* ok, what about a prefered address in the inp */
601 for (laddr
= LIST_FIRST(&inp
->sctp_addr_list
);
602 laddr
&& (laddr
!= inp
->next_addr_touse
);
603 laddr
= LIST_NEXT(laddr
, sctp_nxt_addr
)) {
604 if (laddr
->ifa
== NULL
) {
605 /* address has been removed */
608 sin
= sctp_is_v4_ifa_addr_prefered (laddr
->ifa
, loopscope
, ipv4_scope
, &sin_loop
, &sin_local
);
611 return (sin
->sin_addr
);
614 /* ok, what about an acceptable address in the inp */
615 for (laddr
= LIST_FIRST(&inp
->sctp_addr_list
);
616 laddr
&& (laddr
!= inp
->next_addr_touse
);
617 laddr
= LIST_NEXT(laddr
, sctp_nxt_addr
)) {
618 if (laddr
->ifa
== NULL
) {
619 /* address has been removed */
622 sin
= sctp_is_v4_ifa_addr_acceptable (laddr
->ifa
, loopscope
, ipv4_scope
, &sin_loop
, &sin_local
);
625 return (sin
->sin_addr
);
629 /* no address bound can be a source for the destination we are in trouble */
631 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
632 kprintf("Src address selection for EP, no acceptable src address found for address\n");
635 memset(&ans
, 0, sizeof(ans
));
641 static struct in_addr
642 sctp_choose_v4_boundspecific_stcb(struct sctp_inpcb
*inp
,
643 struct sctp_tcb
*stcb
,
644 struct sctp_nets
*net
,
648 int non_asoc_addr_ok
)
651 * Here we have two cases, bound all asconf
652 * allowed. bound all asconf not allowed.
655 struct sctp_laddr
*laddr
, *starting_point
;
658 uint8_t sin_loop
, sin_local
, start_at_beginning
=0;
659 struct sockaddr_in
*sin
;
661 /* first question, is the ifn we will emit on
662 * in our list, if so, we want that one.
666 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_DO_ASCONF
) {
668 * Here we use the list of addresses on the endpoint. Then
669 * the addresses listed on the "restricted" list is just that,
670 * address that have not been added and can't be used (unless
671 * the non_asoc_addr_ok is set).
674 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
675 kprintf("Have a STCB - asconf allowed, not bound all have a netgative list\n");
678 /* first question, is the ifn we will emit on
679 * in our list, if so, we want that one.
682 struct ifaddr_container
*ifac
;
684 /* first try for an prefered address on the ep */
685 TAILQ_FOREACH(ifac
, &ifn
->if_addrheads
[mycpuid
], ifa_link
) {
686 struct ifaddr
*ifa
= ifac
->ifa
;
688 if (sctp_is_addr_in_ep(inp
, ifa
)) {
689 sin
= sctp_is_v4_ifa_addr_prefered (ifa
, loopscope
, ipv4_scope
, &sin_loop
, &sin_local
);
692 if ((non_asoc_addr_ok
== 0) &&
693 (sctp_is_addr_restricted(stcb
, (struct sockaddr
*)sin
))) {
694 /* on the no-no list */
697 return (sin
->sin_addr
);
700 /* next try for an acceptable address on the ep */
701 TAILQ_FOREACH(ifac
, &ifn
->if_addrheads
[mycpuid
], ifa_link
) {
702 struct ifaddr
*ifa
= ifac
->ifa
;
704 if (sctp_is_addr_in_ep(inp
, ifa
)) {
705 sin
= sctp_is_v4_ifa_addr_acceptable (ifa
, loopscope
, ipv4_scope
, &sin_loop
, &sin_local
);
708 if ((non_asoc_addr_ok
== 0) &&
709 (sctp_is_addr_restricted(stcb
, (struct sockaddr
*)sin
))) {
710 /* on the no-no list */
713 return (sin
->sin_addr
);
718 /* if we can't find one like that then we must
719 * look at all addresses bound to pick one at
720 * first prefereable then secondly acceptable.
722 starting_point
= stcb
->asoc
.last_used_address
;
724 if (stcb
->asoc
.last_used_address
== NULL
) {
725 start_at_beginning
=1;
726 stcb
->asoc
.last_used_address
= LIST_FIRST(&inp
->sctp_addr_list
);
728 /* search beginning with the last used address */
729 for (laddr
= stcb
->asoc
.last_used_address
; laddr
;
730 laddr
= LIST_NEXT(laddr
, sctp_nxt_addr
)) {
731 if (laddr
->ifa
== NULL
) {
732 /* address has been removed */
735 sin
= sctp_is_v4_ifa_addr_prefered (laddr
->ifa
, loopscope
, ipv4_scope
, &sin_loop
, &sin_local
);
738 if ((non_asoc_addr_ok
== 0) &&
739 (sctp_is_addr_restricted(stcb
, (struct sockaddr
*)sin
))) {
740 /* on the no-no list */
743 return (sin
->sin_addr
);
746 if (start_at_beginning
== 0) {
747 stcb
->asoc
.last_used_address
= NULL
;
748 goto sctpv4_from_the_top
;
750 /* now try for any higher scope than the destination */
751 stcb
->asoc
.last_used_address
= starting_point
;
752 start_at_beginning
= 0;
753 sctpv4_from_the_top2
:
754 if (stcb
->asoc
.last_used_address
== NULL
) {
755 start_at_beginning
=1;
756 stcb
->asoc
.last_used_address
= LIST_FIRST(&inp
->sctp_addr_list
);
758 /* search beginning with the last used address */
759 for (laddr
= stcb
->asoc
.last_used_address
; laddr
;
760 laddr
= LIST_NEXT(laddr
, sctp_nxt_addr
)) {
761 if (laddr
->ifa
== NULL
) {
762 /* address has been removed */
765 sin
= sctp_is_v4_ifa_addr_acceptable (laddr
->ifa
, loopscope
, ipv4_scope
, &sin_loop
, &sin_local
);
768 if ((non_asoc_addr_ok
== 0) &&
769 (sctp_is_addr_restricted(stcb
, (struct sockaddr
*)sin
))) {
770 /* on the no-no list */
773 return (sin
->sin_addr
);
775 if (start_at_beginning
== 0) {
776 stcb
->asoc
.last_used_address
= NULL
;
777 goto sctpv4_from_the_top2
;
781 * Here we have an address list on the association, thats the
782 * only valid source addresses that we can use.
785 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
786 kprintf("Have a STCB - no asconf allowed, not bound all have a positive list\n");
789 /* First look at all addresses for one that is on
790 * the interface we route out
792 LIST_FOREACH(laddr
, &stcb
->asoc
.sctp_local_addr_list
,
794 if (laddr
->ifa
== NULL
) {
795 /* address has been removed */
798 sin
= sctp_is_v4_ifa_addr_prefered (laddr
->ifa
, loopscope
, ipv4_scope
, &sin_loop
, &sin_local
);
801 /* first question, is laddr->ifa an address associated with the emit interface */
803 struct ifaddr_container
*ifac
;
805 TAILQ_FOREACH(ifac
, &ifn
->if_addrheads
[mycpuid
], ifa_link
) {
806 struct ifaddr
*ifa
= ifac
->ifa
;
808 if (laddr
->ifa
== ifa
) {
809 sin
= (struct sockaddr_in
*)laddr
->ifa
->ifa_addr
;
810 return (sin
->sin_addr
);
812 if (sctp_cmpaddr(ifa
->ifa_addr
, laddr
->ifa
->ifa_addr
) == 1) {
813 sin
= (struct sockaddr_in
*)laddr
->ifa
->ifa_addr
;
814 return (sin
->sin_addr
);
819 /* what about an acceptable one on the interface? */
820 LIST_FOREACH(laddr
, &stcb
->asoc
.sctp_local_addr_list
,
822 if (laddr
->ifa
== NULL
) {
823 /* address has been removed */
826 sin
= sctp_is_v4_ifa_addr_acceptable (laddr
->ifa
, loopscope
, ipv4_scope
, &sin_loop
, &sin_local
);
829 /* first question, is laddr->ifa an address associated with the emit interface */
831 struct ifaddr_container
*ifac
;
833 TAILQ_FOREACH(ifac
, &ifn
->if_addrheads
[mycpuid
], ifa_link
) {
834 struct ifaddr
*ifa
= ifac
->ifa
;
836 if (laddr
->ifa
== ifa
) {
837 sin
= (struct sockaddr_in
*)laddr
->ifa
->ifa_addr
;
838 return (sin
->sin_addr
);
840 if (sctp_cmpaddr(ifa
->ifa_addr
, laddr
->ifa
->ifa_addr
) == 1) {
841 sin
= (struct sockaddr_in
*)laddr
->ifa
->ifa_addr
;
842 return (sin
->sin_addr
);
847 /* ok, next one that is preferable in general */
848 LIST_FOREACH(laddr
, &stcb
->asoc
.sctp_local_addr_list
,
850 if (laddr
->ifa
== NULL
) {
851 /* address has been removed */
854 sin
= sctp_is_v4_ifa_addr_prefered (laddr
->ifa
, loopscope
, ipv4_scope
, &sin_loop
, &sin_local
);
857 return (sin
->sin_addr
);
860 /* last, what about one that is acceptable */
861 LIST_FOREACH(laddr
, &stcb
->asoc
.sctp_local_addr_list
,
863 if (laddr
->ifa
== NULL
) {
864 /* address has been removed */
867 sin
= sctp_is_v4_ifa_addr_acceptable (laddr
->ifa
, loopscope
, ipv4_scope
, &sin_loop
, &sin_local
);
870 return (sin
->sin_addr
);
873 memset(&ans
, 0, sizeof(ans
));
877 static struct sockaddr_in
*
878 sctp_select_v4_nth_prefered_addr_from_ifn_boundall (struct ifnet
*ifn
, struct sctp_tcb
*stcb
, int non_asoc_addr_ok
,
879 uint8_t loopscope
, uint8_t ipv4_scope
, int cur_addr_num
)
881 struct ifaddr_container
*ifac
;
882 struct sockaddr_in
*sin
;
883 uint8_t sin_loop
, sin_local
;
884 int num_eligible_addr
= 0;
886 TAILQ_FOREACH(ifac
, &ifn
->if_addrheads
[mycpuid
], ifa_link
) {
887 struct ifaddr
*ifa
= ifac
->ifa
;
889 sin
= sctp_is_v4_ifa_addr_prefered (ifa
, loopscope
, ipv4_scope
, &sin_loop
, &sin_local
);
893 if ((non_asoc_addr_ok
== 0) && sctp_is_addr_restricted(stcb
, (struct sockaddr
*)sin
)) {
894 /* It is restricted for some reason.. probably
900 if (cur_addr_num
== num_eligible_addr
) {
909 sctp_count_v4_num_prefered_boundall (struct ifnet
*ifn
, struct sctp_tcb
*stcb
, int non_asoc_addr_ok
,
910 uint8_t loopscope
, uint8_t ipv4_scope
, uint8_t *sin_loop
, uint8_t *sin_local
)
912 struct ifaddr_container
*ifac
;
913 struct sockaddr_in
*sin
;
914 int num_eligible_addr
= 0;
916 TAILQ_FOREACH(ifac
, &ifn
->if_addrheads
[mycpuid
], ifa_link
) {
917 struct ifaddr
*ifa
= ifac
->ifa
;
919 sin
= sctp_is_v4_ifa_addr_prefered (ifa
, loopscope
, ipv4_scope
, sin_loop
, sin_local
);
923 if ((non_asoc_addr_ok
== 0) && sctp_is_addr_restricted(stcb
, (struct sockaddr
*)sin
)) {
924 /* It is restricted for some reason.. probably
932 return (num_eligible_addr
);
936 static struct in_addr
937 sctp_choose_v4_boundall(struct sctp_inpcb
*inp
,
938 struct sctp_tcb
*stcb
,
939 struct sctp_nets
*net
,
943 int non_asoc_addr_ok
)
945 int cur_addr_num
=0, num_prefered
=0;
946 uint8_t sin_loop
, sin_local
;
948 struct sockaddr_in
*sin
;
950 struct ifaddr_container
*ifac
;
952 * For v4 we can use (in boundall) any address in the association. If
953 * non_asoc_addr_ok is set we can use any address (at least in theory).
954 * So we look for prefered addresses first. If we find one, we use it.
955 * Otherwise we next try to get an address on the interface, which we
956 * should be able to do (unless non_asoc_addr_ok is false and we are
957 * routed out that way). In these cases where we can't use the address
958 * of the interface we go through all the ifn's looking for an address
959 * we can use and fill that in. Punting means we send back address
960 * 0, which will probably cause problems actually since then IP will
961 * fill in the address of the route ifn, which means we probably already
962 * rejected it.. i.e. here comes an abort :-<.
966 cur_addr_num
= net
->indx_of_eligible_next_to_use
;
969 goto bound_all_v4_plan_c
;
971 num_prefered
= sctp_count_v4_num_prefered_boundall (ifn
, stcb
, non_asoc_addr_ok
, loopscope
, ipv4_scope
, &sin_loop
, &sin_local
);
973 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
974 kprintf("Found %d preferred source addresses\n", num_prefered
);
977 if (num_prefered
== 0) {
978 /* no eligible addresses, we must use some other
979 * interface address if we can find one.
981 goto bound_all_v4_plan_b
;
983 /* Ok we have num_eligible_addr set with how many we can use,
984 * this may vary from call to call due to addresses being deprecated etc..
986 if (cur_addr_num
>= num_prefered
) {
989 /* select the nth address from the list (where cur_addr_num is the nth) and
990 * 0 is the first one, 1 is the second one etc...
993 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
994 kprintf("cur_addr_num:%d\n", cur_addr_num
);
997 sin
= sctp_select_v4_nth_prefered_addr_from_ifn_boundall (ifn
, stcb
, non_asoc_addr_ok
, loopscope
,
998 ipv4_scope
, cur_addr_num
);
1000 /* if sin is NULL something changed??, plan_a now */
1002 return (sin
->sin_addr
);
1006 * plan_b: Look at the interface that we emit on
1007 * and see if we can find an acceptable address.
1009 bound_all_v4_plan_b
:
1010 TAILQ_FOREACH(ifac
, &ifn
->if_addrheads
[mycpuid
], ifa_link
) {
1011 struct ifaddr
*ifa
= ifac
->ifa
;
1013 sin
= sctp_is_v4_ifa_addr_acceptable (ifa
, loopscope
, ipv4_scope
, &sin_loop
, &sin_local
);
1017 if ((non_asoc_addr_ok
== 0) && sctp_is_addr_restricted(stcb
, (struct sockaddr
*)sin
)) {
1018 /* It is restricted for some reason.. probably
1024 return (sin
->sin_addr
);
1027 * plan_c: Look at all interfaces and find a prefered
1028 * address. If we reache here we are in trouble I think.
1030 bound_all_v4_plan_c
:
1031 for (ifn
= TAILQ_FIRST(&ifnet
);
1032 ifn
&& (ifn
!= inp
->next_ifn_touse
);
1033 ifn
=TAILQ_NEXT(ifn
, if_list
)) {
1034 if (loopscope
== 0 && ifn
->if_type
== IFT_LOOP
) {
1035 /* wrong base scope */
1038 if (ifn
== rt
->rt_ifp
)
1039 /* already looked at this guy */
1041 num_prefered
= sctp_count_v4_num_prefered_boundall (ifn
, stcb
, non_asoc_addr_ok
,
1042 loopscope
, ipv4_scope
, &sin_loop
, &sin_local
);
1044 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
1045 kprintf("Found ifn:%x %d preferred source addresses\n", (u_int
)ifn
, num_prefered
);
1048 if (num_prefered
== 0) {
1050 * None on this interface.
1054 /* Ok we have num_eligible_addr set with how many we can use,
1055 * this may vary from call to call due to addresses being deprecated etc..
1057 if (cur_addr_num
>= num_prefered
) {
1060 sin
= sctp_select_v4_nth_prefered_addr_from_ifn_boundall (ifn
, stcb
, non_asoc_addr_ok
, loopscope
,
1061 ipv4_scope
, cur_addr_num
);
1064 return (sin
->sin_addr
);
1069 * plan_d: We are in deep trouble. No prefered address on
1070 * any interface. And the emit interface does not
1071 * even have an acceptable address. Take anything
1072 * we can get! If this does not work we are
1073 * probably going to emit a packet that will
1074 * illicit an ABORT, falling through.
1077 for (ifn
= TAILQ_FIRST(&ifnet
);
1078 ifn
&& (ifn
!= inp
->next_ifn_touse
);
1079 ifn
=TAILQ_NEXT(ifn
, if_list
)) {
1080 if (loopscope
== 0 && ifn
->if_type
== IFT_LOOP
) {
1081 /* wrong base scope */
1084 if (ifn
== rt
->rt_ifp
)
1085 /* already looked at this guy */
1088 TAILQ_FOREACH(ifac
, &ifn
->if_addrheads
[mycpuid
], ifa_link
) {
1089 struct ifaddr
*ifa
= ifac
->ifa
;
1091 sin
= sctp_is_v4_ifa_addr_acceptable (ifa
, loopscope
, ipv4_scope
, &sin_loop
, &sin_local
);
1095 if ((non_asoc_addr_ok
== 0) && sctp_is_addr_restricted(stcb
, (struct sockaddr
*)sin
)) {
1096 /* It is restricted for some reason.. probably
1102 return (sin
->sin_addr
);
1106 * Ok we can find NO address to source from that is
1107 * not on our negative list. It is either the special
1108 * ASCONF case where we are sourceing from a intf that
1109 * has been ifconfig'd to a different address (i.e.
1110 * it holds a ADD/DEL/SET-PRIM and the proper lookup
1111 * address. OR we are hosed, and this baby is going
1112 * to abort the association.
1114 if (non_asoc_addr_ok
) {
1115 return (((struct sockaddr_in
*)(rt
->rt_ifa
->ifa_addr
))->sin_addr
);
1117 memset(&ans
, 0, sizeof(ans
));
1124 /* tcb may be NULL */
1126 sctp_ipv4_source_address_selection(struct sctp_inpcb
*inp
,
1127 struct sctp_tcb
*stcb
, struct route
*ro
, struct sctp_nets
*net
,
1128 int non_asoc_addr_ok
)
1131 struct sockaddr_in
*to
= (struct sockaddr_in
*)&ro
->ro_dst
;
1132 uint8_t ipv4_scope
, loopscope
;
1135 * - Find the route if needed, cache if I can.
1136 * - Look at interface address in route, Is it
1137 * in the bound list. If so we have the best source.
1138 * - If not we must rotate amongst the addresses.
1142 * Do we need to pay attention to scope. We can have
1143 * a private address or a global address we are sourcing
1144 * or sending to. So if we draw it out
1145 * source * dest * result
1146 * ------------------------------------------
1147 * a Private * Global * NAT?
1148 * ------------------------------------------
1149 * b Private * Private * No problem
1150 * ------------------------------------------
1151 * c Global * Private * Huh, How will this work?
1152 * ------------------------------------------
1153 * d Global * Global * No Problem
1154 * ------------------------------------------
1156 * And then we add to that what happens if there are multiple
1157 * addresses assigned to an interface. Remember the ifa on a
1158 * ifn is a linked list of addresses. So one interface can
1159 * have more than one IPv4 address. What happens if we
1160 * have both a private and a global address? Do we then
1161 * use context of destination to sort out which one is
1162 * best? And what about NAT's sending P->G may get you
1163 * a NAT translation, or should you select the G thats
1164 * on the interface in preference.
1168 * - count the number of addresses on the interface.
1169 * - if its one, no problem except case <c>. For <a>
1170 * we will assume a NAT out there.
1171 * - if there are more than one, then we need to worry
1172 * about scope P or G. We should prefer G -> G and
1173 * P -> P if possible. Then as a secondary fall back
1174 * to mixed types G->P being a last ditch one.
1175 * - The above all works for bound all, but bound
1176 * specific we need to use the same concept but instead
1177 * only consider the bound addresses. If the bound set
1178 * is NOT assigned to the interface then we must use
1179 * rotation amongst them.
1181 * Notes: For v4, we can always punt and let ip_output
1182 * decide by sending back a source of 0.0.0.0
1185 if (ro
->ro_rt
== NULL
) {
1187 * Need a route to cache.
1190 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
1191 rtalloc_ign(ro
, 0UL);
1196 if (ro
->ro_rt
== NULL
) {
1197 /* No route to host .. punt */
1198 memset(&ans
, 0, sizeof(ans
));
1201 /* Setup our scopes */
1203 ipv4_scope
= stcb
->asoc
.ipv4_local_scope
;
1204 loopscope
= stcb
->asoc
.loopback_scope
;
1206 /* Scope based on outbound address */
1207 if ((IN4_ISPRIVATE_ADDRESS(&to
->sin_addr
))) {
1210 } else if (IN4_ISLOOPBACK_ADDRESS(&to
->sin_addr
)) {
1219 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
1220 kprintf("Scope setup loop:%d ipv4_scope:%d\n",
1221 loopscope
, ipv4_scope
);
1224 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_BOUNDALL
) {
1226 * When bound to all if the address list is set
1227 * it is a negative list. Addresses being added
1230 return (sctp_choose_v4_boundall(inp
, stcb
, net
, ro
->ro_rt
,
1231 ipv4_scope
, loopscope
, non_asoc_addr_ok
));
1234 * Three possiblities here:
1236 * a) stcb is NULL, which means we operate only from
1237 * the list of addresses (ifa's) bound to the assoc and
1238 * we care not about the list.
1239 * b) stcb is NOT-NULL, which means we have an assoc structure and
1240 * auto-asconf is on. This means that the list of addresses is
1241 * a NOT list. We use the list from the inp, but any listed address
1242 * in our list is NOT yet added. However if the non_asoc_addr_ok is
1243 * set we CAN use an address NOT available (i.e. being added). Its
1245 * c) stcb is NOT-NULL, which means we have an assoc structure and
1246 * auto-asconf is off. This means that the list of addresses is
1247 * the ONLY addresses I can use.. its positive.
1249 * Note we collapse b & c into the same function just like in
1250 * the v6 address selection.
1253 return (sctp_choose_v4_boundspecific_stcb(inp
, stcb
, net
,
1254 ro
->ro_rt
, ipv4_scope
, loopscope
, non_asoc_addr_ok
));
1256 return (sctp_choose_v4_boundspecific_inp(inp
, ro
->ro_rt
,
1257 ipv4_scope
, loopscope
));
1259 /* this should not be reached */
1260 memset(&ans
, 0, sizeof(ans
));
1266 static struct sockaddr_in6
*
1267 sctp_is_v6_ifa_addr_acceptable (struct ifaddr
*ifa
, int loopscope
, int loc_scope
, int *sin_loop
, int *sin_local
)
1269 struct in6_ifaddr
*ifa6
;
1270 struct sockaddr_in6
*sin6
;
1272 if (ifa
->ifa_addr
->sa_family
!= AF_INET6
) {
1276 ifa6
= (struct in6_ifaddr
*)ifa
;
1277 /* ok to use deprecated addresses? */
1278 if (!ip6_use_deprecated
) {
1279 if (IFA6_IS_DEPRECATED(ifa6
)) {
1280 /* can't use this type */
1284 /* are we ok, with the current state of this address? */
1285 if (ifa6
->ia6_flags
&
1286 (IN6_IFF_DETACHED
| IN6_IFF_NOTREADY
| IN6_IFF_ANYCAST
)) {
1287 /* Can't use these types */
1290 /* Ok the address may be ok */
1291 sin6
= (struct sockaddr_in6
*)ifa
->ifa_addr
;
1292 *sin_local
= *sin_loop
= 0;
1293 if ((ifa
->ifa_ifp
->if_type
== IFT_LOOP
) ||
1294 (IN6_IS_ADDR_LOOPBACK(&sin6
->sin6_addr
))) {
1297 if (!loopscope
&& *sin_loop
) {
1298 /* Its a loopback address and we don't have loop scope */
1301 if (IN6_IS_ADDR_UNSPECIFIED(&sin6
->sin6_addr
)) {
1302 /* we skip unspecifed addresses */
1306 if (IN6_IS_ADDR_LINKLOCAL(&sin6
->sin6_addr
)) {
1309 if (!loc_scope
&& *sin_local
) {
1310 /* Its a link local address, and we don't have link local scope */
1317 static struct sockaddr_in6
*
1318 sctp_choose_v6_boundspecific_stcb(struct sctp_inpcb
*inp
,
1319 struct sctp_tcb
*stcb
,
1320 struct sctp_nets
*net
,
1324 int non_asoc_addr_ok
)
1327 * Each endpoint has a list of local addresses associated
1328 * with it. The address list is either a "negative list" i.e.
1329 * those addresses that are NOT allowed to be used as a source OR
1330 * a "postive list" i.e. those addresses that CAN be used.
1332 * Its a negative list if asconf is allowed. What we do
1333 * in this case is use the ep address list BUT we have
1334 * to cross check it against the negative list.
1336 * In the case where NO asconf is allowed, we have just
1337 * a straight association level list that we must use to
1338 * find a source address.
1340 struct sctp_laddr
*laddr
, *starting_point
;
1341 struct sockaddr_in6
*sin6
;
1342 int sin_loop
, sin_local
;
1343 int start_at_beginning
=0;
1347 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_DO_ASCONF
) {
1349 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
1350 kprintf("Have a STCB - asconf allowed, not bound all have a netgative list\n");
1353 /* first question, is the ifn we will emit on
1354 * in our list, if so, we want that one.
1357 struct ifaddr_container
*ifac
;
1359 TAILQ_FOREACH(ifac
, &ifn
->if_addrheads
[mycpuid
], ifa_link
) {
1360 struct ifaddr
*ifa
= ifac
->ifa
;
1362 if (sctp_is_addr_in_ep(inp
, ifa
)) {
1363 sin6
= sctp_is_v6_ifa_addr_acceptable (ifa
, loopscope
, loc_scope
, &sin_loop
, &sin_local
);
1366 if ((non_asoc_addr_ok
== 0) &&
1367 (sctp_is_addr_restricted(stcb
, (struct sockaddr
*)sin6
))) {
1368 /* on the no-no list */
1375 starting_point
= stcb
->asoc
.last_used_address
;
1376 /* First try for matching scope */
1378 if (stcb
->asoc
.last_used_address
== NULL
) {
1379 start_at_beginning
=1;
1380 stcb
->asoc
.last_used_address
= LIST_FIRST(&inp
->sctp_addr_list
);
1382 /* search beginning with the last used address */
1383 for (laddr
= stcb
->asoc
.last_used_address
; laddr
;
1384 laddr
= LIST_NEXT(laddr
, sctp_nxt_addr
)) {
1385 if (laddr
->ifa
== NULL
) {
1386 /* address has been removed */
1389 sin6
= sctp_is_v6_ifa_addr_acceptable (laddr
->ifa
, loopscope
, loc_scope
, &sin_loop
, &sin_local
);
1392 if ((non_asoc_addr_ok
== 0) && (sctp_is_addr_restricted(stcb
, (struct sockaddr
*)sin6
))) {
1393 /* on the no-no list */
1396 /* is it of matching scope ? */
1397 if ((loopscope
== 0) &&
1401 /* all of global scope we are ok with it */
1404 if (loopscope
&& sin_loop
)
1405 /* both on the loopback, thats ok */
1407 if (loc_scope
&& sin_local
)
1408 /* both local scope */
1412 if (start_at_beginning
== 0) {
1413 stcb
->asoc
.last_used_address
= NULL
;
1414 goto sctp_from_the_top
;
1416 /* now try for any higher scope than the destination */
1417 stcb
->asoc
.last_used_address
= starting_point
;
1418 start_at_beginning
= 0;
1420 if (stcb
->asoc
.last_used_address
== NULL
) {
1421 start_at_beginning
=1;
1422 stcb
->asoc
.last_used_address
= LIST_FIRST(&inp
->sctp_addr_list
);
1424 /* search beginning with the last used address */
1425 for (laddr
= stcb
->asoc
.last_used_address
; laddr
;
1426 laddr
= LIST_NEXT(laddr
, sctp_nxt_addr
)) {
1427 if (laddr
->ifa
== NULL
) {
1428 /* address has been removed */
1431 sin6
= sctp_is_v6_ifa_addr_acceptable (laddr
->ifa
, loopscope
, loc_scope
, &sin_loop
, &sin_local
);
1434 if ((non_asoc_addr_ok
== 0) && (sctp_is_addr_restricted(stcb
, (struct sockaddr
*)sin6
))) {
1435 /* on the no-no list */
1440 if (start_at_beginning
== 0) {
1441 stcb
->asoc
.last_used_address
= NULL
;
1442 goto sctp_from_the_top2
;
1446 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
1447 kprintf("Have a STCB - no asconf allowed, not bound all have a positive list\n");
1450 /* First try for interface output match */
1451 LIST_FOREACH(laddr
, &stcb
->asoc
.sctp_local_addr_list
,
1453 if (laddr
->ifa
== NULL
) {
1454 /* address has been removed */
1457 sin6
= sctp_is_v6_ifa_addr_acceptable (laddr
->ifa
, loopscope
, loc_scope
, &sin_loop
, &sin_local
);
1460 /* first question, is laddr->ifa an address associated with the emit interface */
1462 struct ifaddr_container
*ifac
;
1464 TAILQ_FOREACH(ifac
, &ifn
->if_addrheads
[mycpuid
], ifa_link
) {
1465 struct ifaddr
*ifa
= ifac
->ifa
;
1467 if (laddr
->ifa
== ifa
) {
1468 sin6
= (struct sockaddr_in6
*)laddr
->ifa
->ifa_addr
;
1471 if (sctp_cmpaddr(ifa
->ifa_addr
, laddr
->ifa
->ifa_addr
) == 1) {
1472 sin6
= (struct sockaddr_in6
*)laddr
->ifa
->ifa_addr
;
1478 /* Next try for matching scope */
1479 LIST_FOREACH(laddr
, &stcb
->asoc
.sctp_local_addr_list
,
1481 if (laddr
->ifa
== NULL
) {
1482 /* address has been removed */
1485 sin6
= sctp_is_v6_ifa_addr_acceptable (laddr
->ifa
, loopscope
, loc_scope
, &sin_loop
, &sin_local
);
1489 if ((loopscope
== 0) &&
1493 /* all of global scope we are ok with it */
1496 if (loopscope
&& sin_loop
)
1497 /* both on the loopback, thats ok */
1499 if (loc_scope
&& sin_local
)
1500 /* both local scope */
1503 /* ok, now try for a higher scope in the source address */
1504 /* First try for matching scope */
1505 LIST_FOREACH(laddr
, &stcb
->asoc
.sctp_local_addr_list
,
1507 if (laddr
->ifa
== NULL
) {
1508 /* address has been removed */
1511 sin6
= sctp_is_v6_ifa_addr_acceptable (laddr
->ifa
, loopscope
, loc_scope
, &sin_loop
, &sin_local
);
1520 static struct sockaddr_in6
*
1521 sctp_choose_v6_boundspecific_inp(struct sctp_inpcb
*inp
,
1527 * Here we are bound specific and have only
1528 * an inp. We must find an address that is bound
1529 * that we can give out as a src address. We
1530 * prefer two addresses of same scope if we can
1531 * find them that way.
1533 struct sctp_laddr
*laddr
;
1534 struct sockaddr_in6
*sin6
;
1536 int sin_loop
, sin_local
;
1538 /* first question, is the ifn we will emit on
1539 * in our list, if so, we want that one.
1544 struct ifaddr_container
*ifac
;
1546 TAILQ_FOREACH(ifac
, &ifn
->if_addrheads
[mycpuid
], ifa_link
) {
1547 struct ifaddr
*ifa
= ifac
->ifa
;
1549 sin6
= sctp_is_v6_ifa_addr_acceptable (ifa
, loopscope
, loc_scope
, &sin_loop
, &sin_local
);
1552 if (sctp_is_addr_in_ep(inp
, ifa
)) {
1557 for (laddr
= LIST_FIRST(&inp
->sctp_addr_list
);
1558 laddr
&& (laddr
!= inp
->next_addr_touse
);
1559 laddr
= LIST_NEXT(laddr
, sctp_nxt_addr
)) {
1560 if (laddr
->ifa
== NULL
) {
1561 /* address has been removed */
1564 sin6
= sctp_is_v6_ifa_addr_acceptable (laddr
->ifa
, loopscope
, loc_scope
, &sin_loop
, &sin_local
);
1568 if ((loopscope
== 0) &&
1572 /* all of global scope we are ok with it */
1575 if (loopscope
&& sin_loop
)
1576 /* both on the loopback, thats ok */
1578 if (loc_scope
&& sin_local
)
1579 /* both local scope */
1583 /* if we reach here, we could not find two addresses
1584 * of the same scope to give out. Lets look for any higher level
1585 * scope for a source address.
1587 for (laddr
= LIST_FIRST(&inp
->sctp_addr_list
);
1588 laddr
&& (laddr
!= inp
->next_addr_touse
);
1589 laddr
= LIST_NEXT(laddr
, sctp_nxt_addr
)) {
1590 if (laddr
->ifa
== NULL
) {
1591 /* address has been removed */
1594 sin6
= sctp_is_v6_ifa_addr_acceptable (laddr
->ifa
, loopscope
, loc_scope
, &sin_loop
, &sin_local
);
1599 /* no address bound can be a source for the destination */
1601 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
1602 kprintf("Src address selection for EP, no acceptable src address found for address\n");
1609 static struct sockaddr_in6
*
1610 sctp_select_v6_nth_addr_from_ifn_boundall (struct ifnet
*ifn
, struct sctp_tcb
*stcb
, int non_asoc_addr_ok
, uint8_t loopscope
,
1611 uint8_t loc_scope
, int cur_addr_num
, int match_scope
)
1613 struct ifaddr_container
*ifac
;
1614 struct sockaddr_in6
*sin6
;
1615 int sin_loop
, sin_local
;
1616 int num_eligible_addr
= 0;
1618 TAILQ_FOREACH(ifac
, &ifn
->if_addrheads
[mycpuid
], ifa_link
) {
1619 struct ifaddr
*ifa
= ifac
->ifa
;
1621 sin6
= sctp_is_v6_ifa_addr_acceptable (ifa
, loopscope
, loc_scope
, &sin_loop
, &sin_local
);
1625 if ((non_asoc_addr_ok
== 0) && sctp_is_addr_restricted(stcb
, (struct sockaddr
*)sin6
)) {
1626 /* It is restricted for some reason.. probably
1633 /* Here we are asked to match scope if possible */
1634 if (loopscope
&& sin_loop
)
1635 /* src and destination are loopback scope */
1637 if (loc_scope
&& sin_local
)
1638 /* src and destination are local scope */
1640 if ((loopscope
== 0) &&
1644 /* src and destination are global scope */
1649 if (num_eligible_addr
== cur_addr_num
) {
1653 num_eligible_addr
++;
1660 sctp_count_v6_num_eligible_boundall (struct ifnet
*ifn
, struct sctp_tcb
*stcb
,
1661 int non_asoc_addr_ok
, uint8_t loopscope
, uint8_t loc_scope
)
1663 struct ifaddr_container
*ifac
;
1664 struct sockaddr_in6
*sin6
;
1665 int num_eligible_addr
= 0;
1666 int sin_loop
, sin_local
;
1668 TAILQ_FOREACH(ifac
, &ifn
->if_addrheads
[mycpuid
], ifa_link
) {
1669 struct ifaddr
*ifa
= ifac
->ifa
;
1671 sin6
= sctp_is_v6_ifa_addr_acceptable (ifa
, loopscope
, loc_scope
, &sin_loop
, &sin_local
);
1675 if ((non_asoc_addr_ok
== 0) && sctp_is_addr_restricted(stcb
, (struct sockaddr
*)sin6
)) {
1676 /* It is restricted for some reason.. probably
1682 num_eligible_addr
++;
1684 return (num_eligible_addr
);
1688 static struct sockaddr_in6
*
1689 sctp_choose_v6_boundall(struct sctp_inpcb
*inp
,
1690 struct sctp_tcb
*stcb
,
1691 struct sctp_nets
*net
,
1695 int non_asoc_addr_ok
)
1697 /* Ok, we are bound all SO any address
1698 * is ok to use as long as it is NOT in the negative
1701 int num_eligible_addr
;
1703 int started_at_beginning
=0;
1704 int match_scope_prefered
;
1705 /* first question is, how many eligible addresses are
1706 * there for the destination ifn that we are using that
1707 * are within the proper scope?
1710 struct sockaddr_in6
*sin6
;
1714 cur_addr_num
= net
->indx_of_eligible_next_to_use
;
1716 if (cur_addr_num
== 0) {
1717 match_scope_prefered
= 1;
1719 match_scope_prefered
= 0;
1721 num_eligible_addr
= sctp_count_v6_num_eligible_boundall (ifn
, stcb
, non_asoc_addr_ok
, loopscope
, loc_scope
);
1723 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
1724 kprintf("Found %d eligible source addresses\n", num_eligible_addr
);
1727 if (num_eligible_addr
== 0) {
1728 /* no eligible addresses, we must use some other
1729 * interface address if we can find one.
1731 goto bound_all_v6_plan_b
;
1733 /* Ok we have num_eligible_addr set with how many we can use,
1734 * this may vary from call to call due to addresses being deprecated etc..
1736 if (cur_addr_num
>= num_eligible_addr
) {
1739 /* select the nth address from the list (where cur_addr_num is the nth) and
1740 * 0 is the first one, 1 is the second one etc...
1743 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
1744 kprintf("cur_addr_num:%d match_scope_prefered:%d select it\n",
1745 cur_addr_num
, match_scope_prefered
);
1748 sin6
= sctp_select_v6_nth_addr_from_ifn_boundall (ifn
, stcb
, non_asoc_addr_ok
, loopscope
,
1749 loc_scope
, cur_addr_num
, match_scope_prefered
);
1750 if (match_scope_prefered
&& (sin6
== NULL
)) {
1751 /* retry without the preference for matching scope */
1753 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
1754 kprintf("retry with no match_scope_prefered\n");
1757 sin6
= sctp_select_v6_nth_addr_from_ifn_boundall (ifn
, stcb
, non_asoc_addr_ok
, loopscope
,
1758 loc_scope
, cur_addr_num
, 0);
1762 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
1763 kprintf("Selected address %d ifn:%x for the route\n", cur_addr_num
, (u_int
)ifn
);
1767 /* store so we get the next one */
1768 if (cur_addr_num
< 255)
1769 net
->indx_of_eligible_next_to_use
= cur_addr_num
+ 1;
1771 net
->indx_of_eligible_next_to_use
= 0;
1775 num_eligible_addr
= 0;
1776 bound_all_v6_plan_b
:
1777 /* ok, if we reach here we either fell through
1778 * due to something changing during an interupt (unlikely)
1779 * or we have NO eligible source addresses for the ifn
1780 * of the route (most likely). We must look at all the other
1781 * interfaces EXCEPT rt->rt_ifp and do the same game.
1784 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
1785 kprintf("bound-all Plan B\n");
1788 if (inp
->next_ifn_touse
== NULL
) {
1789 started_at_beginning
=1;
1790 inp
->next_ifn_touse
= TAILQ_FIRST(&ifnet
);
1792 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
1793 kprintf("Start at first IFN:%x\n", (u_int
)inp
->next_ifn_touse
);
1797 inp
->next_ifn_touse
= TAILQ_NEXT(inp
->next_ifn_touse
, if_list
);
1799 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
1800 kprintf("Resume at IFN:%x\n", (u_int
)inp
->next_ifn_touse
);
1803 if (inp
->next_ifn_touse
== NULL
) {
1805 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
1806 kprintf("IFN Resets\n");
1809 started_at_beginning
=1;
1810 inp
->next_ifn_touse
= TAILQ_FIRST(&ifnet
);
1813 for (ifn
= inp
->next_ifn_touse
; ifn
;
1814 ifn
= TAILQ_NEXT(ifn
, if_list
)) {
1815 if (loopscope
== 0 && ifn
->if_type
== IFT_LOOP
) {
1816 /* wrong base scope */
1819 if (loc_scope
&& (ifn
->if_index
!= loc_scope
)) {
1820 /* by definition the scope (from to->sin6_scopeid)
1821 * must match that of the interface. If not then
1822 * we could pick a wrong scope for the address.
1823 * Ususally we don't hit plan-b since the route
1824 * handles this. However we can hit plan-b when
1825 * we send to local-host so the route is the
1826 * loopback interface, but the destination is a
1831 if (ifn
== rt
->rt_ifp
) {
1832 /* already looked at this guy */
1835 /* Address rotation will only work when we are not
1836 * rotating sourced interfaces and are using the interface
1837 * of the route. We would need to have a per interface index
1838 * in order to do proper rotation.
1840 num_eligible_addr
= sctp_count_v6_num_eligible_boundall (ifn
, stcb
, non_asoc_addr_ok
, loopscope
, loc_scope
);
1842 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
1843 kprintf("IFN:%x has %d eligible\n", (u_int
)ifn
, num_eligible_addr
);
1846 if (num_eligible_addr
== 0) {
1847 /* none we can use */
1850 /* Ok we have num_eligible_addr set with how many we can use,
1851 * this may vary from call to call due to addresses being deprecated etc..
1853 inp
->next_ifn_touse
= ifn
;
1855 /* select the first one we can find with perference for matching scope.
1857 sin6
= sctp_select_v6_nth_addr_from_ifn_boundall (ifn
, stcb
, non_asoc_addr_ok
, loopscope
, loc_scope
, 0, 1);
1859 /* can't find one with matching scope how about a source with higher
1862 sin6
= sctp_select_v6_nth_addr_from_ifn_boundall (ifn
, stcb
, non_asoc_addr_ok
, loopscope
, loc_scope
, 0, 0);
1864 /* Hmm, can't find one in the interface now */
1868 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
1869 kprintf("Selected the %d'th address of ifn:%x\n",
1876 if (started_at_beginning
== 0) {
1877 /* we have not been through all of them yet, force
1878 * us to go through them all.
1881 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
1882 kprintf("Force a recycle\n");
1885 inp
->next_ifn_touse
= NULL
;
1886 goto bound_all_v6_plan_b
;
1892 /* stcb and net may be NULL */
1894 sctp_ipv6_source_address_selection(struct sctp_inpcb
*inp
,
1895 struct sctp_tcb
*stcb
, struct route
*ro
, struct sctp_nets
*net
,
1896 int non_asoc_addr_ok
)
1898 struct in6_addr ans
;
1899 struct sockaddr_in6
*rt_addr
;
1900 uint8_t loc_scope
, loopscope
;
1901 struct sockaddr_in6
*to
= (struct sockaddr_in6
*)&ro
->ro_dst
;
1904 * This routine is tricky standard v6 src address
1905 * selection cannot take into account what we have
1906 * bound etc, so we can't use it.
1908 * Instead here is what we must do:
1909 * 1) Make sure we have a route, if we
1910 * don't have a route we can never reach the peer.
1911 * 2) Once we have a route, determine the scope of the
1912 * route. Link local, loopback or global.
1913 * 3) Next we divide into three types. Either we
1914 * are bound all.. which means we want to use
1915 * one of the addresses of the interface we are
1917 * 4a) We have not stcb, which means we are using the
1918 * specific addresses bound on an inp, in this
1919 * case we are similar to the stcb case (4b below)
1920 * accept the list is always a positive list.<or>
1921 * 4b) We are bound specific with a stcb, which means we have a
1922 * list of bound addresses and we must see if the
1923 * ifn of the route is actually one of the bound addresses.
1924 * If not, then we must rotate addresses amongst properly
1925 * scoped bound addresses, if so we use the address
1927 * 5) Always, no matter which path we take through the above
1928 * we must be sure the source address we use is allowed to
1929 * be used. I.e. IN6_IFF_DETACHED, IN6_IFF_NOTREADY, and IN6_IFF_ANYCAST
1930 * addresses cannot be used.
1931 * 6) Addresses that are deprecated MAY be used
1932 * if (!ip6_use_deprecated) {
1933 * if (IFA6_IS_DEPRECATED(ifa6)) {
1939 /*** 1> determine route, if not already done */
1940 if (ro
->ro_rt
== NULL
) {
1942 * Need a route to cache.
1944 #ifndef SCOPEDROUTING
1946 scope_save
= to
->sin6_scope_id
;
1947 to
->sin6_scope_id
= 0;
1950 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
1951 rtalloc_ign(ro
, 0UL);
1955 #ifndef SCOPEDROUTING
1956 to
->sin6_scope_id
= scope_save
;
1959 if (ro
->ro_rt
== NULL
) {
1961 * no route to host. this packet is going no-where.
1962 * We probably should make sure we arrange to send back
1966 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
1967 kprintf("No route to host, this packet cannot be sent!\n");
1970 memset(&ans
, 0, sizeof(ans
));
1974 /*** 2a> determine scope for outbound address/route */
1975 loc_scope
= loopscope
= 0;
1977 * We base our scope on the outbound packet scope and route,
1978 * NOT the TCB (if there is one). This way in local scope we will only
1979 * use a local scope src address when we send to a local address.
1982 if (IN6_IS_ADDR_LOOPBACK(&to
->sin6_addr
)) {
1983 /* If the route goes to the loopback address OR
1984 * the address is a loopback address, we are loopback
1988 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
1989 kprintf("Loopback scope is set\n");
1995 /* mark it as local */
1996 net
->addr_is_local
= 1;
1999 } else if (IN6_IS_ADDR_LINKLOCAL(&to
->sin6_addr
)) {
2001 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
2002 kprintf("Link local scope is set, id:%d\n", to
->sin6_scope_id
);
2005 if (to
->sin6_scope_id
)
2006 loc_scope
= to
->sin6_scope_id
;
2013 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
2014 kprintf("Global scope is set\n");
2019 /* now, depending on which way we are bound we call the appropriate
2020 * routine to do steps 3-6
2023 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
2024 kprintf("Destination address:");
2025 sctp_print_address((struct sockaddr
*)to
);
2029 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_BOUNDALL
) {
2031 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
2032 kprintf("Calling bound-all src addr selection for v6\n");
2035 rt_addr
= sctp_choose_v6_boundall(inp
, stcb
, net
, ro
->ro_rt
, loc_scope
, loopscope
, non_asoc_addr_ok
);
2038 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
2039 kprintf("Calling bound-specific src addr selection for v6\n");
2043 rt_addr
= sctp_choose_v6_boundspecific_stcb(inp
, stcb
, net
, ro
->ro_rt
, loc_scope
, loopscope
, non_asoc_addr_ok
);
2045 /* we can't have a non-asoc address since we have no association */
2046 rt_addr
= sctp_choose_v6_boundspecific_inp(inp
, ro
->ro_rt
, loc_scope
, loopscope
);
2048 if (rt_addr
== NULL
) {
2049 /* no suitable address? */
2050 struct in6_addr in6
;
2052 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
2053 kprintf("V6 packet will reach dead-end no suitable src address\n");
2056 memset(&in6
, 0, sizeof(in6
));
2060 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
2061 kprintf("Source address selected is:");
2062 sctp_print_address((struct sockaddr
*)rt_addr
);
2065 return (rt_addr
->sin6_addr
);
2069 sctp_get_ect(struct sctp_tcb
*stcb
,
2070 struct sctp_tmit_chunk
*chk
)
2072 uint8_t this_random
;
2078 if (sctp_ecn_nonce
== 0)
2079 /* no nonce, always return ECT0 */
2080 return (SCTP_ECT0_BIT
);
2082 if (stcb
->asoc
.peer_supports_ecn_nonce
== 0) {
2083 /* Peer does NOT support it, so we send a ECT0 only */
2084 return (SCTP_ECT0_BIT
);
2088 return (SCTP_ECT0_BIT
);
2090 if (((stcb
->asoc
.hb_random_idx
== 3) &&
2091 (stcb
->asoc
.hb_ect_randombit
> 7)) ||
2092 (stcb
->asoc
.hb_random_idx
> 3)) {
2094 rndval
= sctp_select_initial_TSN(&stcb
->sctp_ep
->sctp_ep
);
2095 memcpy(stcb
->asoc
.hb_random_values
, &rndval
,
2096 sizeof(stcb
->asoc
.hb_random_values
));
2097 this_random
= stcb
->asoc
.hb_random_values
[0];
2098 stcb
->asoc
.hb_random_idx
= 0;
2099 stcb
->asoc
.hb_ect_randombit
= 0;
2101 if (stcb
->asoc
.hb_ect_randombit
> 7) {
2102 stcb
->asoc
.hb_ect_randombit
= 0;
2103 stcb
->asoc
.hb_random_idx
++;
2105 this_random
= stcb
->asoc
.hb_random_values
[stcb
->asoc
.hb_random_idx
];
2107 if ((this_random
>> stcb
->asoc
.hb_ect_randombit
) & 0x01) {
2109 /* ECN Nonce stuff */
2110 chk
->rec
.data
.ect_nonce
= SCTP_ECT1_BIT
;
2111 stcb
->asoc
.hb_ect_randombit
++;
2112 return (SCTP_ECT1_BIT
);
2114 stcb
->asoc
.hb_ect_randombit
++;
2115 return (SCTP_ECT0_BIT
);
2119 extern int sctp_no_csum_on_loopback
;
2122 sctp_lowlevel_chunk_output(struct sctp_inpcb
*inp
,
2123 struct sctp_tcb
*stcb
, /* may be NULL */
2124 struct sctp_nets
*net
,
2125 struct sockaddr
*to
,
2127 int nofragment_flag
,
2129 struct sctp_tmit_chunk
*chk
,
2131 /* nofragment_flag to tell if IP_DF should be set (IPv4 only) */
2134 * Given a mbuf chain (via m_next) that holds a packet header
2135 * WITH a SCTPHDR but no IP header, endpoint inp and sa structure.
2136 * - calculate SCTP checksum and fill in
2137 * - prepend a IP address header
2138 * - if boundall use INADDR_ANY
2139 * - if boundspecific do source address selection
2140 * - set fragmentation option for ipV4
2141 * - On return from IP output, check/adjust mtu size
2142 * - of output interface and smallest_mtu size as well.
2144 struct sctphdr
*sctphdr
;
2148 unsigned int have_mtu
;
2151 if ((net
) && (net
->dest_state
& SCTP_ADDR_OUT_OF_SCOPE
)) {
2155 if ((m
->m_flags
& M_PKTHDR
) == 0) {
2157 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
2158 kprintf("Software error: sctp_lowlevel_chunk_output() called with non pkthdr!\n");
2164 /* Calculate the csum and fill in the length of the packet */
2165 sctphdr
= mtod(m
, struct sctphdr
*);
2167 if (sctp_no_csum_on_loopback
&&
2169 (stcb
->asoc
.loopback_scope
)) {
2170 sctphdr
->checksum
= 0;
2171 m
->m_pkthdr
.len
= sctp_calculate_len(m
);
2173 sctphdr
->checksum
= 0;
2174 csum
= sctp_calculate_sum(m
, &m
->m_pkthdr
.len
, 0);
2175 sctphdr
->checksum
= csum
;
2177 if (to
->sa_family
== AF_INET
) {
2179 struct route iproute
;
2180 M_PREPEND(m
, sizeof(struct ip
), MB_DONTWAIT
);
2182 /* failed to prepend data, give up */
2185 ip
= mtod(m
, struct ip
*);
2186 ip
->ip_v
= IPVERSION
;
2187 ip
->ip_hl
= (sizeof(struct ip
) >> 2);
2188 if (nofragment_flag
) {
2189 #if defined(WITH_CONVERT_IP_OFF) || defined(__FreeBSD__) || defined(__DragonFly__)
2190 #if defined( __OpenBSD__) || defined(__NetBSD__)
2191 /* OpenBSD has WITH_CONVERT_IP_OFF defined?? */
2192 ip
->ip_off
= htons(IP_DF
);
2197 ip
->ip_off
= htons(IP_DF
);
2202 /* FreeBSD and Apple have RANDOM_IP_ID switch */
2203 #if defined(RANDOM_IP_ID) || defined(__NetBSD__) || defined(__OpenBSD__)
2204 ip
->ip_id
= htons(ip_randomid());
2206 ip
->ip_id
= htons(ip_id
++);
2209 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
2210 ip
->ip_ttl
= inp
->ip_inp
.inp
.inp_ip_ttl
;
2212 ip
->ip_ttl
= inp
->inp_ip_ttl
;
2214 #if defined(__OpenBSD__) || defined(__NetBSD__)
2215 ip
->ip_len
= htons(m
->m_pkthdr
.len
);
2217 ip
->ip_len
= m
->m_pkthdr
.len
;
2220 if ((stcb
->asoc
.ecn_allowed
) && ecn_ok
) {
2222 #if defined(__FreeBSD__) || defined (__APPLE__) || defined(__DragonFly__)
2223 ip
->ip_tos
= (u_char
)((inp
->ip_inp
.inp
.inp_ip_tos
& 0x000000fc) |
2224 sctp_get_ect(stcb
, chk
));
2225 #elif defined(__NetBSD__)
2226 ip
->ip_tos
= (u_char
)((inp
->ip_inp
.inp
.inp_ip
.ip_tos
& 0x000000fc) |
2227 sctp_get_ect(stcb
, chk
));
2229 ip
->ip_tos
= (u_char
)((inp
->inp_ip_tos
& 0x000000fc) |
2230 sctp_get_ect(stcb
, chk
));
2234 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
2235 ip
->ip_tos
= inp
->ip_inp
.inp
.inp_ip_tos
;
2236 #elif defined(__NetBSD__)
2237 ip
->ip_tos
= inp
->ip_inp
.inp
.inp_ip
.ip_tos
;
2239 ip
->ip_tos
= inp
->inp_ip_tos
;
2243 /* no association at all */
2244 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
2245 ip
->ip_tos
= inp
->ip_inp
.inp
.inp_ip_tos
;
2247 ip
->ip_tos
= inp
->inp_ip_tos
;
2250 ip
->ip_p
= IPPROTO_SCTP
;
2254 memset(&iproute
, 0, sizeof(iproute
));
2255 memcpy(&ro
->ro_dst
, to
, to
->sa_len
);
2257 ro
= (struct route
*)&net
->ro
;
2259 /* Now the address selection part */
2260 ip
->ip_dst
.s_addr
= ((struct sockaddr_in
*)to
)->sin_addr
.s_addr
;
2262 /* call the routine to select the src address */
2264 if (net
->src_addr_selected
== 0) {
2265 /* Cache the source address */
2266 ((struct sockaddr_in
*)&net
->ro
._s_addr
)->sin_addr
= sctp_ipv4_source_address_selection(inp
,
2268 ro
, net
, out_of_asoc_ok
);
2270 net
->src_addr_selected
= 1;
2272 ip
->ip_src
= ((struct sockaddr_in
*)&net
->ro
._s_addr
)->sin_addr
;
2274 ip
->ip_src
= sctp_ipv4_source_address_selection(inp
,
2275 stcb
, ro
, net
, out_of_asoc_ok
);
2278 * If source address selection fails and we find no route then
2279 * the ip_ouput should fail as well with a NO_ROUTE_TO_HOST
2280 * type error. We probably should catch that somewhere and
2281 * abort the association right away (assuming this is an INIT
2284 if ((ro
->ro_rt
== NULL
)) {
2286 * src addr selection failed to find a route (or valid
2287 * source addr), so we can't get there from here!
2290 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
2291 kprintf("low_level_output: dropped v4 packet- no valid source addr\n");
2292 kprintf("Destination was %x\n", (u_int
)(ntohl(ip
->ip_dst
.s_addr
)));
2294 #endif /* SCTP_DEBUG */
2296 if ((net
->dest_state
& SCTP_ADDR_REACHABLE
) && stcb
)
2297 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN
,
2299 SCTP_FAILED_THRESHOLD
,
2301 net
->dest_state
&= ~SCTP_ADDR_REACHABLE
;
2302 net
->dest_state
|= SCTP_ADDR_NOT_REACHABLE
;
2304 if (net
== stcb
->asoc
.primary_destination
) {
2305 /* need a new primary */
2306 struct sctp_nets
*alt
;
2307 alt
= sctp_find_alternate_net(stcb
, net
);
2309 if (sctp_set_primary_addr(stcb
,
2312 net
->dest_state
|= SCTP_ADDR_WAS_PRIMARY
;
2313 net
->src_addr_selected
= 0;
2320 return (EHOSTUNREACH
);
2322 have_mtu
= ro
->ro_rt
->rt_ifp
->if_mtu
;
2325 o_flgs
= (IP_RAWOUTPUT
| (inp
->sctp_socket
->so_options
& (SO_DONTROUTE
| SO_BROADCAST
)));
2327 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
2328 kprintf("Calling ipv4 output routine from low level src addr:%x\n",
2329 (u_int
)(ntohl(ip
->ip_src
.s_addr
)));
2330 kprintf("Destination is %x\n", (u_int
)(ntohl(ip
->ip_dst
.s_addr
)));
2331 kprintf("RTP route is %p through\n", ro
->ro_rt
);
2334 if ((have_mtu
) && (net
) && (have_mtu
> net
->mtu
)) {
2335 ro
->ro_rt
->rt_ifp
->if_mtu
= net
->mtu
;
2337 ret
= ip_output(m
, inp
->ip_inp
.inp
.inp_options
,
2338 ro
, o_flgs
, inp
->ip_inp
.inp
.inp_moptions
2339 #if defined(__OpenBSD__) || (defined(__FreeBSD__) && __FreeBSD_version >= 480000) \
2340 || defined(__DragonFly__)
2343 #if defined(__NetBSD__)
2344 ,(struct socket
*)inp
->sctp_socket
2348 if ((ro
->ro_rt
) && (have_mtu
) && (net
) && (have_mtu
> net
->mtu
)) {
2349 ro
->ro_rt
->rt_ifp
->if_mtu
= have_mtu
;
2351 sctp_pegs
[SCTP_DATAGRAMS_SENT
]++;
2353 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
2354 kprintf("Ip output returns %d\n", ret
);
2358 /* free tempy routes */
2362 /* PMTU check versus smallest asoc MTU goes here */
2363 if (ro
->ro_rt
!= NULL
) {
2364 if (ro
->ro_rt
->rt_rmx
.rmx_mtu
&&
2365 (stcb
->asoc
.smallest_mtu
> ro
->ro_rt
->rt_rmx
.rmx_mtu
)) {
2366 sctp_mtu_size_reset(inp
, &stcb
->asoc
,
2367 ro
->ro_rt
->rt_rmx
.rmx_mtu
);
2370 /* route was freed */
2371 net
->src_addr_selected
= 0;
2377 else if (to
->sa_family
== AF_INET6
) {
2378 struct ip6_hdr
*ip6h
;
2379 #ifdef NEW_STRUCT_ROUTE
2380 struct route ip6route
;
2382 struct route_in6 ip6route
;
2386 uint16_t flowBottom
;
2387 u_char tosBottom
, tosTop
;
2388 struct sockaddr_in6
*sin6
, tmp
, *lsa6
, lsa6_tmp
;
2389 struct sockaddr_in6 lsa6_storage
;
2392 u_short prev_port
=0;
2394 M_PREPEND(m
, sizeof(struct ip6_hdr
), MB_DONTWAIT
);
2396 /* failed to prepend data, give up */
2399 ip6h
= mtod(m
, struct ip6_hdr
*);
2402 * We assume here that inp_flow is in host byte order within
2405 flowBottom
= ((struct in6pcb
*)inp
)->in6p_flowinfo
& 0x0000ffff;
2406 flowTop
= ((((struct in6pcb
*)inp
)->in6p_flowinfo
& 0x000f0000) >> 16);
2408 tosTop
= (((((struct in6pcb
*)inp
)->in6p_flowinfo
& 0xf0) >> 4) | IPV6_VERSION
);
2410 /* protect *sin6 from overwrite */
2411 sin6
= (struct sockaddr_in6
*)to
;
2415 /* KAME hack: embed scopeid */
2416 #if defined(SCTP_BASE_FREEBSD) || defined(__APPLE__) || defined(__DragonFly__)
2417 if (in6_embedscope(&sin6
->sin6_addr
, sin6
, NULL
, NULL
) != 0)
2419 if (in6_embedscope(&sin6
->sin6_addr
, sin6
) != 0)
2423 memset(&ip6route
, 0, sizeof(ip6route
));
2424 ro
= (struct route
*)&ip6route
;
2425 memcpy(&ro
->ro_dst
, sin6
, sin6
->sin6_len
);
2427 ro
= (struct route
*)&net
->ro
;
2430 if ((stcb
->asoc
.ecn_allowed
) && ecn_ok
) {
2432 tosBottom
= (((((struct in6pcb
*)inp
)->in6p_flowinfo
& 0x0c) | sctp_get_ect(stcb
, chk
)) << 4);
2435 tosBottom
= ((((struct in6pcb
*)inp
)->in6p_flowinfo
& 0x0c) << 4);
2438 /* we could get no asoc if it is a O-O-T-B packet */
2439 tosBottom
= ((((struct in6pcb
*)inp
)->in6p_flowinfo
& 0x0c) << 4);
2441 ip6h
->ip6_flow
= htonl(((tosTop
<< 24) | ((tosBottom
|flowTop
) << 16) | flowBottom
));
2442 ip6h
->ip6_nxt
= IPPROTO_SCTP
;
2443 ip6h
->ip6_plen
= m
->m_pkthdr
.len
;
2444 ip6h
->ip6_dst
= sin6
->sin6_addr
;
2447 * Add SRC address selection here:
2448 * we can only reuse to a limited degree the kame src-addr-sel,
2449 * since we can try their selection but it may not be bound.
2451 bzero(&lsa6_tmp
, sizeof(lsa6_tmp
));
2452 lsa6_tmp
.sin6_family
= AF_INET6
;
2453 lsa6_tmp
.sin6_len
= sizeof(lsa6_tmp
);
2456 if (net
->src_addr_selected
== 0) {
2457 /* Cache the source address */
2458 ((struct sockaddr_in6
*)&net
->ro
._s_addr
)->sin6_addr
= sctp_ipv6_source_address_selection(inp
,
2459 stcb
, ro
, net
, out_of_asoc_ok
);
2462 net
->src_addr_selected
= 1;
2464 lsa6
->sin6_addr
= ((struct sockaddr_in6
*)&net
->ro
._s_addr
)->sin6_addr
;
2466 lsa6
->sin6_addr
= sctp_ipv6_source_address_selection(
2467 inp
, stcb
, ro
, net
, out_of_asoc_ok
);
2469 lsa6
->sin6_port
= inp
->sctp_lport
;
2471 if ((ro
->ro_rt
== NULL
)) {
2473 * src addr selection failed to find a route (or valid
2474 * source addr), so we can't get there from here!
2477 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
2478 kprintf("low_level_output: dropped v6 pkt- no valid source addr\n");
2483 if ((net
->dest_state
& SCTP_ADDR_REACHABLE
) && stcb
)
2484 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN
,
2486 SCTP_FAILED_THRESHOLD
,
2488 net
->dest_state
&= ~SCTP_ADDR_REACHABLE
;
2489 net
->dest_state
|= SCTP_ADDR_NOT_REACHABLE
;
2491 if (net
== stcb
->asoc
.primary_destination
) {
2492 /* need a new primary */
2493 struct sctp_nets
*alt
;
2494 alt
= sctp_find_alternate_net(stcb
, net
);
2496 if (sctp_set_primary_addr(stcb
,
2499 net
->dest_state
|= SCTP_ADDR_WAS_PRIMARY
;
2500 net
->src_addr_selected
= 0;
2506 return (EHOSTUNREACH
);
2509 #ifndef SCOPEDROUTING
2511 * XXX: sa6 may not have a valid sin6_scope_id in
2512 * the non-SCOPEDROUTING case.
2514 bzero(&lsa6_storage
, sizeof(lsa6_storage
));
2515 lsa6_storage
.sin6_family
= AF_INET6
;
2516 lsa6_storage
.sin6_len
= sizeof(lsa6_storage
);
2517 if ((error
= in6_recoverscope(&lsa6_storage
, &lsa6
->sin6_addr
,
2523 lsa6_storage
.sin6_addr
= lsa6
->sin6_addr
;
2524 lsa6_storage
.sin6_port
= inp
->sctp_lport
;
2525 lsa6
= &lsa6_storage
;
2526 #endif /* SCOPEDROUTING */
2527 ip6h
->ip6_src
= lsa6
->sin6_addr
;
2530 * We set the hop limit now since there is a good chance that
2531 * our ro pointer is now filled
2533 ip6h
->ip6_hlim
= in6_selecthlim((struct in6pcb
*)&inp
->ip_inp
.inp
,
2535 (ro
->ro_rt
? (ro
->ro_rt
->rt_ifp
) : (NULL
)) :
2538 ifp
= ro
->ro_rt
->rt_ifp
;
2540 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
2541 /* Copy to be sure something bad is not happening */
2542 sin6
->sin6_addr
= ip6h
->ip6_dst
;
2543 lsa6
->sin6_addr
= ip6h
->ip6_src
;
2545 kprintf("Calling ipv6 output routine from low level\n");
2547 sctp_print_address((struct sockaddr
*)lsa6
);
2549 sctp_print_address((struct sockaddr
*)sin6
);
2551 #endif /* SCTP_DEBUG */
2553 sin6
= (struct sockaddr_in6
*)&net
->ro
._l_addr
;
2554 /* preserve the port and scope for link local send */
2555 prev_scope
= sin6
->sin6_scope_id
;
2556 prev_port
= sin6
->sin6_port
;
2558 ret
= ip6_output(m
, ((struct in6pcb
*)inp
)->in6p_outputopts
,
2559 #ifdef NEW_STRUCT_ROUTE
2562 (struct route_in6
*)ro
,
2565 ((struct in6pcb
*)inp
)->in6p_moptions
,
2566 #if defined(__NetBSD__)
2567 (struct socket
*)inp
->sctp_socket
,
2570 #if (defined(__FreeBSD__) && __FreeBSD_version >= 480000) || defined(__DragonFly__)
2575 /* for link local this must be done */
2576 sin6
->sin6_scope_id
= prev_scope
;
2577 sin6
->sin6_port
= prev_port
;
2580 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
2581 kprintf("return from send is %d\n", ret
);
2583 #endif /* SCTP_DEBUG_OUTPUT */
2584 sctp_pegs
[SCTP_DATAGRAMS_SENT
]++;
2586 /* Now if we had a temp route free it */
2591 /* PMTU check versus smallest asoc MTU goes here */
2592 if (ro
->ro_rt
== NULL
) {
2593 /* Route was freed */
2594 net
->src_addr_selected
= 0;
2596 if (ro
->ro_rt
!= NULL
) {
2597 if (ro
->ro_rt
->rt_rmx
.rmx_mtu
&&
2598 (stcb
->asoc
.smallest_mtu
> ro
->ro_rt
->rt_rmx
.rmx_mtu
)) {
2599 sctp_mtu_size_reset(inp
,
2601 ro
->ro_rt
->rt_rmx
.rmx_mtu
);
2604 #if (defined(SCTP_BASE_FREEBSD) && __FreeBSD_version < 500000) || defined(__APPLE__)
2605 #define ND_IFINFO(ifp) (&nd_ifinfo[ifp->if_index])
2606 #endif /* SCTP_BASE_FREEBSD */
2607 if (ND_IFINFO(ifp
)->linkmtu
&&
2608 (stcb
->asoc
.smallest_mtu
> ND_IFINFO(ifp
)->linkmtu
)) {
2609 sctp_mtu_size_reset(inp
,
2611 ND_IFINFO(ifp
)->linkmtu
);
2620 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
2621 kprintf("Unknown protocol (TSNH) type %d\n", ((struct sockaddr
*)to
)->sa_family
);
2630 sctp_is_address_in_scope(struct ifaddr
*ifa
,
2631 int ipv4_addr_legal
,
2632 int ipv6_addr_legal
,
2634 int ipv4_local_scope
,
2638 if ((loopback_scope
== 0) &&
2640 (ifa
->ifa_ifp
->if_type
== IFT_LOOP
)) {
2641 /* skip loopback if not in scope *
2645 if ((ifa
->ifa_addr
->sa_family
== AF_INET
) && ipv4_addr_legal
) {
2646 struct sockaddr_in
*sin
;
2647 sin
= (struct sockaddr_in
*)ifa
->ifa_addr
;
2648 if (sin
->sin_addr
.s_addr
== 0) {
2649 /* not in scope , unspecified */
2652 if ((ipv4_local_scope
== 0) &&
2653 (IN4_ISPRIVATE_ADDRESS(&sin
->sin_addr
))) {
2654 /* private address not in scope */
2657 } else if ((ifa
->ifa_addr
->sa_family
== AF_INET6
) && ipv6_addr_legal
) {
2658 struct sockaddr_in6
*sin6
;
2659 struct in6_ifaddr
*ifa6
;
2661 ifa6
= (struct in6_ifaddr
*)ifa
;
2662 /* ok to use deprecated addresses? */
2663 if (!ip6_use_deprecated
) {
2664 if (ifa6
->ia6_flags
&
2665 IN6_IFF_DEPRECATED
) {
2669 if (ifa6
->ia6_flags
&
2672 IN6_IFF_NOTREADY
)) {
2675 sin6
= (struct sockaddr_in6
*)ifa
->ifa_addr
;
2676 if (IN6_IS_ADDR_UNSPECIFIED(&sin6
->sin6_addr
)) {
2677 /* skip unspecifed addresses */
2680 if (/*(local_scope == 0) && */
2681 (IN6_IS_ADDR_LINKLOCAL(&sin6
->sin6_addr
))) {
2684 if ((site_scope
== 0) &&
2685 (IN6_IS_ADDR_SITELOCAL(&sin6
->sin6_addr
))) {
2696 sctp_send_initiate(struct sctp_inpcb
*inp
, struct sctp_tcb
*stcb
)
2698 struct mbuf
*m
, *m_at
, *m_last
;
2699 struct sctp_nets
*net
;
2700 struct sctp_init_msg
*initm
;
2701 struct sctp_supported_addr_param
*sup_addr
;
2702 struct sctp_ecn_supported_param
*ecn
;
2703 struct sctp_prsctp_supported_param
*prsctp
;
2704 struct sctp_ecn_nonce_supported_param
*ecn_nonce
;
2705 struct sctp_supported_chunk_types_param
*pr_supported
;
2709 /* INIT's always go to the primary (and usually ONLY address) */
2711 net
= stcb
->asoc
.primary_destination
;
2713 net
= TAILQ_FIRST(&stcb
->asoc
.nets
);
2718 /* we confirm any address we send an INIT to */
2719 net
->dest_state
&= ~SCTP_ADDR_UNCONFIRMED
;
2720 sctp_set_primary_addr(stcb
, NULL
, net
);
2722 /* we confirm any address we send an INIT to */
2723 net
->dest_state
&= ~SCTP_ADDR_UNCONFIRMED
;
2726 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT4
) {
2727 kprintf("Sending INIT to ");
2728 sctp_print_address ((struct sockaddr
*)&net
->ro
._l_addr
);
2731 if (((struct sockaddr
*)&(net
->ro
._l_addr
))->sa_family
== AF_INET6
) {
2732 /* special hook, if we are sending to link local
2733 * it will not show up in our private address count.
2735 struct sockaddr_in6
*sin6l
;
2736 sin6l
= &net
->ro
._l_addr
.sin6
;
2737 if (IN6_IS_ADDR_LINKLOCAL(&sin6l
->sin6_addr
))
2740 if (callout_pending(&net
->rxt_timer
.timer
)) {
2741 /* This case should not happen */
2744 /* start the INIT timer */
2745 if (sctp_timer_start(SCTP_TIMER_TYPE_INIT
, inp
, stcb
, net
)) {
2746 /* we are hosed since I can't start the INIT timer? */
2749 MGETHDR(m
, MB_DONTWAIT
, MT_HEADER
);
2751 /* No memory, INIT timer will re-attempt. */
2754 /* make it into a M_EXT */
2755 MCLGET(m
, MB_DONTWAIT
);
2756 if ((m
->m_flags
& M_EXT
) != M_EXT
) {
2757 /* Failed to get cluster buffer */
2761 m
->m_data
+= SCTP_MIN_OVERHEAD
;
2762 m
->m_len
= sizeof(struct sctp_init_msg
);
2763 /* Now lets put the SCTP header in place */
2764 initm
= mtod(m
, struct sctp_init_msg
*);
2765 initm
->sh
.src_port
= inp
->sctp_lport
;
2766 initm
->sh
.dest_port
= stcb
->rport
;
2767 initm
->sh
.v_tag
= 0;
2768 initm
->sh
.checksum
= 0; /* calculate later */
2769 /* now the chunk header */
2770 initm
->msg
.ch
.chunk_type
= SCTP_INITIATION
;
2771 initm
->msg
.ch
.chunk_flags
= 0;
2772 /* fill in later from mbuf we build */
2773 initm
->msg
.ch
.chunk_length
= 0;
2774 /* place in my tag */
2775 initm
->msg
.init
.initiate_tag
= htonl(stcb
->asoc
.my_vtag
);
2776 /* set up some of the credits. */
2777 initm
->msg
.init
.a_rwnd
= htonl(max(inp
->sctp_socket
->so_rcv
.ssb_hiwat
,
2778 SCTP_MINIMAL_RWND
));
2780 initm
->msg
.init
.num_outbound_streams
= htons(stcb
->asoc
.pre_open_streams
);
2781 initm
->msg
.init
.num_inbound_streams
= htons(stcb
->asoc
.max_inbound_streams
);
2782 initm
->msg
.init
.initial_tsn
= htonl(stcb
->asoc
.init_seq_number
);
2783 /* now the address restriction */
2784 sup_addr
= (struct sctp_supported_addr_param
*)((caddr_t
)initm
+
2786 sup_addr
->ph
.param_type
= htons(SCTP_SUPPORTED_ADDRTYPE
);
2787 /* we support 2 types IPv6/IPv4 */
2788 sup_addr
->ph
.param_length
= htons(sizeof(*sup_addr
) +
2790 sup_addr
->addr_type
[0] = htons(SCTP_IPV4_ADDRESS
);
2791 sup_addr
->addr_type
[1] = htons(SCTP_IPV6_ADDRESS
);
2792 m
->m_len
+= sizeof(*sup_addr
) + sizeof(uint16_t);
2794 /* if (inp->sctp_flags & SCTP_PCB_FLAGS_ADAPTIONEVNT) {*/
2795 if (inp
->sctp_ep
.adaption_layer_indicator
) {
2796 struct sctp_adaption_layer_indication
*ali
;
2797 ali
= (struct sctp_adaption_layer_indication
*)(
2798 (caddr_t
)sup_addr
+ sizeof(*sup_addr
) + sizeof(uint16_t));
2799 ali
->ph
.param_type
= htons(SCTP_ULP_ADAPTION
);
2800 ali
->ph
.param_length
= htons(sizeof(*ali
));
2801 ali
->indication
= ntohl(inp
->sctp_ep
.adaption_layer_indicator
);
2802 m
->m_len
+= sizeof(*ali
);
2803 ecn
= (struct sctp_ecn_supported_param
*)((caddr_t
)ali
+
2806 ecn
= (struct sctp_ecn_supported_param
*)((caddr_t
)sup_addr
+
2807 sizeof(*sup_addr
) + sizeof(uint16_t));
2810 /* now any cookie time extensions */
2811 if (stcb
->asoc
.cookie_preserve_req
) {
2812 struct sctp_cookie_perserve_param
*cookie_preserve
;
2813 cookie_preserve
= (struct sctp_cookie_perserve_param
*)(ecn
);
2814 cookie_preserve
->ph
.param_type
= htons(SCTP_COOKIE_PRESERVE
);
2815 cookie_preserve
->ph
.param_length
= htons(
2816 sizeof(*cookie_preserve
));
2817 cookie_preserve
->time
= htonl(stcb
->asoc
.cookie_preserve_req
);
2818 m
->m_len
+= sizeof(*cookie_preserve
);
2819 ecn
= (struct sctp_ecn_supported_param
*)(
2820 (caddr_t
)cookie_preserve
+ sizeof(*cookie_preserve
));
2821 stcb
->asoc
.cookie_preserve_req
= 0;
2825 if (sctp_ecn
== 1) {
2826 ecn
->ph
.param_type
= htons(SCTP_ECN_CAPABLE
);
2827 ecn
->ph
.param_length
= htons(sizeof(*ecn
));
2828 m
->m_len
+= sizeof(*ecn
);
2829 prsctp
= (struct sctp_prsctp_supported_param
*)((caddr_t
)ecn
+
2832 prsctp
= (struct sctp_prsctp_supported_param
*)((caddr_t
)ecn
);
2834 /* And now tell the peer we do pr-sctp */
2835 prsctp
->ph
.param_type
= htons(SCTP_PRSCTP_SUPPORTED
);
2836 prsctp
->ph
.param_length
= htons(sizeof(*prsctp
));
2837 m
->m_len
+= sizeof(*prsctp
);
2840 /* And now tell the peer we do all the extensions */
2841 pr_supported
= (struct sctp_supported_chunk_types_param
*)((caddr_t
)prsctp
+
2844 pr_supported
->ph
.param_type
= htons(SCTP_SUPPORTED_CHUNK_EXT
);
2845 pr_supported
->ph
.param_length
= htons(sizeof(*pr_supported
) + SCTP_EXT_COUNT
);
2846 pr_supported
->chunk_types
[0] = SCTP_ASCONF
;
2847 pr_supported
->chunk_types
[1] = SCTP_ASCONF_ACK
;
2848 pr_supported
->chunk_types
[2] = SCTP_FORWARD_CUM_TSN
;
2849 pr_supported
->chunk_types
[3] = SCTP_PACKET_DROPPED
;
2850 pr_supported
->chunk_types
[4] = SCTP_STREAM_RESET
;
2851 pr_supported
->chunk_types
[5] = 0; /* pad */
2852 pr_supported
->chunk_types
[6] = 0; /* pad */
2853 pr_supported
->chunk_types
[7] = 0; /* pad */
2855 m
->m_len
+= (sizeof(*pr_supported
) + SCTP_EXT_COUNT
+ SCTP_PAD_EXT_COUNT
);
2856 /* ECN nonce: And now tell the peer we support ECN nonce */
2858 if (sctp_ecn_nonce
) {
2859 ecn_nonce
= (struct sctp_ecn_nonce_supported_param
*)((caddr_t
)pr_supported
+
2860 sizeof(*pr_supported
) + SCTP_EXT_COUNT
+ SCTP_PAD_EXT_COUNT
);
2861 ecn_nonce
->ph
.param_type
= htons(SCTP_ECN_NONCE_SUPPORTED
);
2862 ecn_nonce
->ph
.param_length
= htons(sizeof(*ecn_nonce
));
2863 m
->m_len
+= sizeof(*ecn_nonce
);
2867 /* now the addresses */
2868 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_BOUNDALL
) {
2873 TAILQ_FOREACH(ifn
, &ifnet
, if_list
) {
2874 struct ifaddr_container
*ifac
;
2876 if ((stcb
->asoc
.loopback_scope
== 0) &&
2877 (ifn
->if_type
== IFT_LOOP
)) {
2879 * Skip loopback devices if loopback_scope
2884 TAILQ_FOREACH(ifac
, &ifn
->if_addrheads
[mycpuid
], ifa_link
) {
2885 struct ifaddr
*ifa
= ifac
->ifa
;
2887 if (sctp_is_address_in_scope(ifa
,
2888 stcb
->asoc
.ipv4_addr_legal
,
2889 stcb
->asoc
.ipv6_addr_legal
,
2890 stcb
->asoc
.loopback_scope
,
2891 stcb
->asoc
.ipv4_local_scope
,
2892 stcb
->asoc
.local_scope
,
2893 stcb
->asoc
.site_scope
) == 0) {
2900 TAILQ_FOREACH(ifn
, &ifnet
, if_list
) {
2901 struct ifaddr_container
*ifac
;
2903 if ((stcb
->asoc
.loopback_scope
== 0) &&
2904 (ifn
->if_type
== IFT_LOOP
)) {
2906 * Skip loopback devices if loopback_scope
2911 TAILQ_FOREACH(ifac
, &ifn
->if_addrheads
[mycpuid
], ifa_link
) {
2912 struct ifaddr
*ifa
= ifac
->ifa
;
2914 if (sctp_is_address_in_scope(ifa
,
2915 stcb
->asoc
.ipv4_addr_legal
,
2916 stcb
->asoc
.ipv6_addr_legal
,
2917 stcb
->asoc
.loopback_scope
,
2918 stcb
->asoc
.ipv4_local_scope
,
2919 stcb
->asoc
.local_scope
,
2920 stcb
->asoc
.site_scope
) == 0) {
2923 m_at
= sctp_add_addr_to_mbuf(m_at
, ifa
);
2928 struct sctp_laddr
*laddr
;
2931 /* First, how many ? */
2932 LIST_FOREACH(laddr
, &inp
->sctp_addr_list
, sctp_nxt_addr
) {
2933 if (laddr
->ifa
== NULL
) {
2936 if (laddr
->ifa
->ifa_addr
== NULL
)
2938 if (sctp_is_address_in_scope(laddr
->ifa
,
2939 stcb
->asoc
.ipv4_addr_legal
,
2940 stcb
->asoc
.ipv6_addr_legal
,
2941 stcb
->asoc
.loopback_scope
,
2942 stcb
->asoc
.ipv4_local_scope
,
2943 stcb
->asoc
.local_scope
,
2944 stcb
->asoc
.site_scope
) == 0) {
2949 /* To get through a NAT we only list addresses if
2950 * we have more than one. That way if you just
2951 * bind a single address we let the source of the init
2952 * dictate our address.
2955 LIST_FOREACH(laddr
, &inp
->sctp_addr_list
, sctp_nxt_addr
) {
2956 if (laddr
->ifa
== NULL
) {
2959 if (laddr
->ifa
->ifa_addr
== NULL
) {
2963 if (sctp_is_address_in_scope(laddr
->ifa
,
2964 stcb
->asoc
.ipv4_addr_legal
,
2965 stcb
->asoc
.ipv6_addr_legal
,
2966 stcb
->asoc
.loopback_scope
,
2967 stcb
->asoc
.ipv4_local_scope
,
2968 stcb
->asoc
.local_scope
,
2969 stcb
->asoc
.site_scope
) == 0) {
2972 m_at
= sctp_add_addr_to_mbuf(m_at
, laddr
->ifa
);
2976 /* calulate the size and update pkt header and chunk header */
2977 m
->m_pkthdr
.len
= 0;
2978 for (m_at
= m
; m_at
; m_at
= m_at
->m_next
) {
2979 if (m_at
->m_next
== NULL
)
2981 m
->m_pkthdr
.len
+= m_at
->m_len
;
2983 initm
->msg
.ch
.chunk_length
= htons((m
->m_pkthdr
.len
-
2984 sizeof(struct sctphdr
)));
2985 /* We pass 0 here to NOT set IP_DF if its IPv4, we
2986 * ignore the return here since the timer will drive
2990 /* I don't expect this to execute but we will be safe here */
2991 padval
= m
->m_pkthdr
.len
% 4;
2992 if ((padval
) && (m_last
)) {
2993 /* The compiler worries that m_last may not be
2994 * set even though I think it is impossible :->
2995 * however we add m_last here just in case.
2998 ret
= sctp_add_pad_tombuf(m_last
, (4-padval
));
3000 /* Houston we have a problem, no space */
3004 m
->m_pkthdr
.len
+= padval
;
3007 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT4
) {
3008 kprintf("Calling lowlevel output stcb:%x net:%x\n",
3009 (u_int
)stcb
, (u_int
)net
);
3012 ret
= sctp_lowlevel_chunk_output(inp
, stcb
, net
,
3013 (struct sockaddr
*)&net
->ro
._l_addr
, m
, 0, 0, NULL
, 0);
3015 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT4
) {
3016 kprintf("Low level output returns %d\n", ret
);
3019 sctp_timer_start(SCTP_TIMER_TYPE_INIT
, inp
, stcb
, net
);
3020 SCTP_GETTIME_TIMEVAL(&net
->last_sent_time
);
3024 sctp_arethere_unrecognized_parameters(struct mbuf
*in_initpkt
,
3025 int param_offset
, int *abort_processing
, struct sctp_chunkhdr
*cp
)
3027 /* Given a mbuf containing an INIT or INIT-ACK
3028 * with the param_offset being equal to the
3029 * beginning of the params i.e. (iphlen + sizeof(struct sctp_init_msg)
3030 * parse through the parameters to the end of the mbuf verifying
3031 * that all parameters are known.
3033 * For unknown parameters build and return a mbuf with
3034 * UNRECOGNIZED_PARAMETER errors. If the flags indicate
3035 * to stop processing this chunk stop, and set *abort_processing
3038 * By having param_offset be pre-set to where parameters begin
3039 * it is hoped that this routine may be reused in the future
3042 struct sctp_paramhdr
*phdr
, params
;
3044 struct mbuf
*mat
, *op_err
;
3046 int at
, limit
, pad_needed
;
3047 uint16_t ptype
, plen
;
3050 *abort_processing
= 0;
3053 limit
= ntohs(cp
->chunk_length
) - sizeof(struct sctp_init_chunk
);
3055 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT4
) {
3056 kprintf("Limit is %d bytes\n", limit
);
3062 phdr
= sctp_get_next_param(mat
, at
, ¶ms
, sizeof(params
));
3063 while ((phdr
!= NULL
) && ((size_t)limit
>= sizeof(struct sctp_paramhdr
))) {
3064 ptype
= ntohs(phdr
->param_type
);
3065 plen
= ntohs(phdr
->param_length
);
3066 limit
-= SCTP_SIZE32(plen
);
3067 if (plen
< sizeof(struct sctp_paramhdr
)) {
3069 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT4
) {
3070 kprintf("sctp_output.c:Impossible length in parameter < %d\n", plen
);
3073 *abort_processing
= 1;
3076 /* All parameters for all chunks that we
3077 * know/understand are listed here. We process
3078 * them other places and make appropriate
3079 * stop actions per the upper bits. However
3080 * this is the generic routine processor's can
3081 * call to get back an operr.. to either incorporate (init-ack)
3084 if ((ptype
== SCTP_HEARTBEAT_INFO
) ||
3085 (ptype
== SCTP_IPV4_ADDRESS
) ||
3086 (ptype
== SCTP_IPV6_ADDRESS
) ||
3087 (ptype
== SCTP_STATE_COOKIE
) ||
3088 (ptype
== SCTP_UNRECOG_PARAM
) ||
3089 (ptype
== SCTP_COOKIE_PRESERVE
) ||
3090 (ptype
== SCTP_SUPPORTED_ADDRTYPE
) ||
3091 (ptype
== SCTP_PRSCTP_SUPPORTED
) ||
3092 (ptype
== SCTP_ADD_IP_ADDRESS
) ||
3093 (ptype
== SCTP_DEL_IP_ADDRESS
) ||
3094 (ptype
== SCTP_ECN_CAPABLE
) ||
3095 (ptype
== SCTP_ULP_ADAPTION
) ||
3096 (ptype
== SCTP_ERROR_CAUSE_IND
) ||
3097 (ptype
== SCTP_SET_PRIM_ADDR
) ||
3098 (ptype
== SCTP_SUCCESS_REPORT
) ||
3099 (ptype
== SCTP_ULP_ADAPTION
) ||
3100 (ptype
== SCTP_SUPPORTED_CHUNK_EXT
) ||
3101 (ptype
== SCTP_ECN_NONCE_SUPPORTED
)
3104 at
+= SCTP_SIZE32(plen
);
3105 } else if (ptype
== SCTP_HOSTNAME_ADDRESS
) {
3106 /* We can NOT handle HOST NAME addresses!! */
3108 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT4
) {
3109 kprintf("Can't handle hostname addresses.. abort processing\n");
3112 *abort_processing
= 1;
3113 if (op_err
== NULL
) {
3114 /* Ok need to try to get a mbuf */
3115 MGETHDR(op_err
, MB_DONTWAIT
, MT_DATA
);
3118 op_err
->m_pkthdr
.len
= 0;
3119 /* pre-reserve space for ip and sctp header and chunk hdr*/
3120 op_err
->m_data
+= sizeof(struct ip6_hdr
);
3121 op_err
->m_data
+= sizeof(struct sctphdr
);
3122 op_err
->m_data
+= sizeof(struct sctp_chunkhdr
);
3126 /* If we have space */
3127 struct sctp_paramhdr s
;
3130 pad_needed
= 4 - (err_at
% 4);
3131 m_copyback(op_err
, err_at
, pad_needed
, (caddr_t
)&cpthis
);
3132 err_at
+= pad_needed
;
3134 s
.param_type
= htons(SCTP_CAUSE_UNRESOLV_ADDR
);
3135 s
.param_length
= htons(sizeof(s
) + plen
);
3136 m_copyback(op_err
, err_at
, sizeof(s
), (caddr_t
)&s
);
3137 err_at
+= sizeof(s
);
3138 phdr
= sctp_get_next_param(mat
, at
, (struct sctp_paramhdr
*)tempbuf
, plen
);
3140 sctp_m_freem(op_err
);
3141 /* we are out of memory but we
3142 * still need to have a look at what to
3143 * do (the system is in trouble though).
3147 m_copyback(op_err
, err_at
, plen
, (caddr_t
)phdr
);
3152 /* we do not recognize the parameter
3153 * figure out what we do.
3156 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT4
) {
3157 kprintf("Got parameter type %x - unknown\n",
3161 if ((ptype
& 0x4000) == 0x4000) {
3162 /* Report bit is set?? */
3164 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT4
) {
3165 kprintf("Report bit is set\n");
3168 if (op_err
== NULL
) {
3169 /* Ok need to try to get an mbuf */
3170 MGETHDR(op_err
, MB_DONTWAIT
, MT_DATA
);
3173 op_err
->m_pkthdr
.len
= 0;
3174 op_err
->m_data
+= sizeof(struct ip6_hdr
);
3175 op_err
->m_data
+= sizeof(struct sctphdr
);
3176 op_err
->m_data
+= sizeof(struct sctp_chunkhdr
);
3180 /* If we have space */
3181 struct sctp_paramhdr s
;
3184 pad_needed
= 4 - (err_at
% 4);
3185 m_copyback(op_err
, err_at
, pad_needed
, (caddr_t
)&cpthis
);
3186 err_at
+= pad_needed
;
3188 s
.param_type
= htons(SCTP_UNRECOG_PARAM
);
3189 s
.param_length
= htons(sizeof(s
) + plen
);
3190 m_copyback(op_err
, err_at
, sizeof(s
), (caddr_t
)&s
);
3191 err_at
+= sizeof(s
);
3192 if (plen
> sizeof(tempbuf
)) {
3193 plen
= sizeof(tempbuf
);
3195 phdr
= sctp_get_next_param(mat
, at
, (struct sctp_paramhdr
*)tempbuf
, plen
);
3197 sctp_m_freem(op_err
);
3198 /* we are out of memory but we
3199 * still need to have a look at what to
3200 * do (the system is in trouble though).
3202 goto more_processing
;
3204 m_copyback(op_err
, err_at
, plen
, (caddr_t
)phdr
);
3209 if ((ptype
& 0x8000) == 0x0000) {
3211 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT4
) {
3212 kprintf("Abort bit is now setting1\n");
3217 /* skip this chunk and continue processing */
3218 at
+= SCTP_SIZE32(plen
);
3222 phdr
= sctp_get_next_param(mat
, at
, ¶ms
, sizeof(params
));
3228 sctp_are_there_new_addresses(struct sctp_association
*asoc
,
3229 struct mbuf
*in_initpkt
, int iphlen
, int offset
)
3232 * Given a INIT packet, look through the packet to verify that
3233 * there are NO new addresses. As we go through the parameters
3234 * add reports of any un-understood parameters that require an
3235 * error. Also we must return (1) to drop the packet if we see
3236 * a un-understood parameter that tells us to drop the chunk.
3238 struct sockaddr_in sin4
, *sa4
;
3239 struct sockaddr_in6 sin6
, *sa6
;
3240 struct sockaddr
*sa_touse
;
3241 struct sockaddr
*sa
;
3242 struct sctp_paramhdr
*phdr
, params
;
3245 uint16_t ptype
, plen
;
3248 struct sctp_nets
*net
;
3250 memset(&sin4
, 0, sizeof(sin4
));
3251 memset(&sin6
, 0, sizeof(sin6
));
3252 sin4
.sin_family
= AF_INET
;
3253 sin4
.sin_len
= sizeof(sin4
);
3254 sin6
.sin6_family
= AF_INET6
;
3255 sin6
.sin6_len
= sizeof(sin6
);
3258 /* First what about the src address of the pkt ? */
3259 iph
= mtod(in_initpkt
, struct ip
*);
3260 if (iph
->ip_v
== IPVERSION
) {
3261 /* source addr is IPv4 */
3262 sin4
.sin_addr
= iph
->ip_src
;
3263 sa_touse
= (struct sockaddr
*)&sin4
;
3264 } else if (iph
->ip_v
== (IPV6_VERSION
>> 4)) {
3265 /* source addr is IPv6 */
3266 struct ip6_hdr
*ip6h
;
3267 ip6h
= mtod(in_initpkt
, struct ip6_hdr
*);
3268 sin6
.sin6_addr
= ip6h
->ip6_src
;
3269 sa_touse
= (struct sockaddr
*)&sin6
;
3275 TAILQ_FOREACH(net
, &asoc
->nets
, sctp_next
) {
3276 sa
= (struct sockaddr
*)&net
->ro
._l_addr
;
3277 if (sa
->sa_family
== sa_touse
->sa_family
) {
3278 if (sa
->sa_family
== AF_INET
) {
3279 sa4
= (struct sockaddr_in
*)sa
;
3280 if (sa4
->sin_addr
.s_addr
==
3281 sin4
.sin_addr
.s_addr
) {
3285 } else if (sa
->sa_family
== AF_INET6
) {
3286 sa6
= (struct sockaddr_in6
*)sa
;
3287 if (SCTP6_ARE_ADDR_EQUAL(&sa6
->sin6_addr
,
3296 /* New address added! no need to look futher. */
3299 /* Ok so far lets munge through the rest of the packet */
3303 offset
+= sizeof(struct sctp_init_chunk
);
3304 phdr
= sctp_get_next_param(mat
, offset
, ¶ms
, sizeof(params
));
3306 ptype
= ntohs(phdr
->param_type
);
3307 plen
= ntohs(phdr
->param_length
);
3308 if (ptype
== SCTP_IPV4_ADDRESS
) {
3309 struct sctp_ipv4addr_param
*p4
, p4_buf
;
3311 phdr
= sctp_get_next_param(mat
, offset
,
3312 (struct sctp_paramhdr
*)&p4_buf
, sizeof(p4_buf
));
3313 if (plen
!= sizeof(struct sctp_ipv4addr_param
) ||
3317 p4
= (struct sctp_ipv4addr_param
*)phdr
;
3318 sin4
.sin_addr
.s_addr
= p4
->addr
;
3319 sa_touse
= (struct sockaddr
*)&sin4
;
3320 } else if (ptype
== SCTP_IPV6_ADDRESS
) {
3321 struct sctp_ipv6addr_param
*p6
, p6_buf
;
3323 phdr
= sctp_get_next_param(mat
, offset
,
3324 (struct sctp_paramhdr
*)&p6_buf
, sizeof(p6_buf
));
3325 if (plen
!= sizeof(struct sctp_ipv6addr_param
) ||
3329 p6
= (struct sctp_ipv6addr_param
*)phdr
;
3330 memcpy((caddr_t
)&sin6
.sin6_addr
, p6
->addr
,
3332 sa_touse
= (struct sockaddr
*)&sin4
;
3336 /* ok, sa_touse points to one to check */
3338 TAILQ_FOREACH(net
, &asoc
->nets
, sctp_next
) {
3339 sa
= (struct sockaddr
*)&net
->ro
._l_addr
;
3340 if (sa
->sa_family
!= sa_touse
->sa_family
) {
3343 if (sa
->sa_family
== AF_INET
) {
3344 sa4
= (struct sockaddr_in
*)sa
;
3345 if (sa4
->sin_addr
.s_addr
==
3346 sin4
.sin_addr
.s_addr
) {
3350 } else if (sa
->sa_family
== AF_INET6
) {
3351 sa6
= (struct sockaddr_in6
*)sa
;
3352 if (SCTP6_ARE_ADDR_EQUAL(
3353 &sa6
->sin6_addr
, &sin6
.sin6_addr
)) {
3360 /* New addr added! no need to look further */
3364 offset
+= SCTP_SIZE32(plen
);
3365 phdr
= sctp_get_next_param(mat
, offset
, ¶ms
, sizeof(params
));
3371 * Given a MBUF chain that was sent into us containing an
3372 * INIT. Build a INIT-ACK with COOKIE and send back.
3373 * We assume that the in_initpkt has done a pullup to
3374 * include IPv6/4header, SCTP header and initial part of
3375 * INIT message (i.e. the struct sctp_init_msg).
3378 sctp_send_initiate_ack(struct sctp_inpcb
*inp
, struct sctp_tcb
*stcb
,
3379 struct mbuf
*init_pkt
, int iphlen
, int offset
, struct sctphdr
*sh
,
3380 struct sctp_init_chunk
*init_chk
)
3382 struct sctp_association
*asoc
;
3383 struct mbuf
*m
, *m_at
, *m_tmp
, *m_cookie
, *op_err
, *m_last
;
3384 struct sctp_init_msg
*initackm_out
;
3385 struct sctp_ecn_supported_param
*ecn
;
3386 struct sctp_prsctp_supported_param
*prsctp
;
3387 struct sctp_ecn_nonce_supported_param
*ecn_nonce
;
3388 struct sctp_supported_chunk_types_param
*pr_supported
;
3389 struct sockaddr_storage store
;
3390 struct sockaddr_in
*sin
;
3391 struct sockaddr_in6
*sin6
;
3394 struct ip6_hdr
*ip6
;
3395 struct sockaddr
*to
;
3396 struct sctp_state_cookie stc
;
3397 struct sctp_nets
*net
=NULL
;
3399 uint16_t his_limit
, i_want
;
3400 int abort_flag
, padval
, sz_of
;
3408 if ((asoc
!= NULL
) &&
3409 (SCTP_GET_STATE(asoc
) != SCTP_STATE_COOKIE_WAIT
) &&
3410 (sctp_are_there_new_addresses(asoc
, init_pkt
, iphlen
, offset
))) {
3411 /* new addresses, out of here in non-cookie-wait states */
3413 * Send a ABORT, we don't add the new address error clause though
3414 * we even set the T bit and copy in the 0 tag.. this looks no
3415 * different than if no listner was present.
3417 sctp_send_abort(init_pkt
, iphlen
, sh
, 0, NULL
);
3421 op_err
= sctp_arethere_unrecognized_parameters(init_pkt
,
3422 (offset
+sizeof(struct sctp_init_chunk
)),
3423 &abort_flag
, (struct sctp_chunkhdr
*)init_chk
);
3425 sctp_send_abort(init_pkt
, iphlen
, sh
, init_chk
->init
.initiate_tag
, op_err
);
3428 MGETHDR(m
, MB_DONTWAIT
, MT_HEADER
);
3430 /* No memory, INIT timer will re-attempt. */
3432 sctp_m_freem(op_err
);
3435 MCLGET(m
, MB_DONTWAIT
);
3436 if ((m
->m_flags
& M_EXT
) != M_EXT
) {
3437 /* Failed to get cluster buffer */
3439 sctp_m_freem(op_err
);
3443 m
->m_data
+= SCTP_MIN_OVERHEAD
;
3444 m
->m_pkthdr
.rcvif
= 0;
3445 m
->m_len
= sizeof(struct sctp_init_msg
);
3447 /* the time I built cookie */
3448 SCTP_GETTIME_TIMEVAL(&stc
.time_entered
);
3450 /* populate any tie tags */
3452 /* unlock before tag selections */
3453 SCTP_TCB_UNLOCK(stcb
);
3454 if (asoc
->my_vtag_nonce
== 0)
3455 asoc
->my_vtag_nonce
= sctp_select_a_tag(inp
);
3456 stc
.tie_tag_my_vtag
= asoc
->my_vtag_nonce
;
3458 if (asoc
->peer_vtag_nonce
== 0)
3459 asoc
->peer_vtag_nonce
= sctp_select_a_tag(inp
);
3460 stc
.tie_tag_peer_vtag
= asoc
->peer_vtag_nonce
;
3462 stc
.cookie_life
= asoc
->cookie_life
;
3463 net
= asoc
->primary_destination
;
3464 /* now we must relock */
3465 SCTP_INP_RLOCK(inp
);
3466 /* we may be in trouble here if the inp got freed
3467 * most likely this set of tests will protect
3468 * us but there is a chance not.
3470 if (inp
->sctp_flags
& (SCTP_PCB_FLAGS_SOCKET_GONE
|SCTP_PCB_FLAGS_SOCKET_ALLGONE
)) {
3472 sctp_m_freem(op_err
);
3474 sctp_send_abort(init_pkt
, iphlen
, sh
, 0, NULL
);
3477 SCTP_TCB_LOCK(stcb
);
3478 SCTP_INP_RUNLOCK(stcb
->sctp_ep
);
3480 stc
.tie_tag_my_vtag
= 0;
3481 stc
.tie_tag_peer_vtag
= 0;
3482 /* life I will award this cookie */
3483 stc
.cookie_life
= inp
->sctp_ep
.def_cookie_life
;
3486 /* copy in the ports for later check */
3487 stc
.myport
= sh
->dest_port
;
3488 stc
.peerport
= sh
->src_port
;
3491 * If we wanted to honor cookie life extentions, we would add
3492 * to stc.cookie_life. For now we should NOT honor any extension
3494 stc
.site_scope
= stc
.local_scope
= stc
.loopback_scope
= 0;
3495 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_BOUND_V6
) {
3496 struct inpcb
*in_inp
;
3497 /* Its a V6 socket */
3498 in_inp
= (struct inpcb
*)inp
;
3499 stc
.ipv6_addr_legal
= 1;
3500 /* Now look at the binding flag to see if V4 will be legal */
3502 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
3503 (in_inp
->inp_flags
& IN6P_IPV6_V6ONLY
)
3504 #elif defined(__OpenBSD__)
3505 (0) /* For openbsd we do dual bind only */
3507 (((struct in6pcb
*)in_inp
)->in6p_flags
& IN6P_IPV6_V6ONLY
)
3510 stc
.ipv4_addr_legal
= 1;
3512 /* V4 addresses are NOT legal on the association */
3513 stc
.ipv4_addr_legal
= 0;
3516 /* Its a V4 socket, no - V6 */
3517 stc
.ipv4_addr_legal
= 1;
3518 stc
.ipv6_addr_legal
= 0;
3521 #ifdef SCTP_DONT_DO_PRIVADDR_SCOPE
3526 /* now for scope setup */
3527 memset((caddr_t
)&store
, 0, sizeof(store
));
3528 sin
= (struct sockaddr_in
*)&store
;
3529 sin6
= (struct sockaddr_in6
*)&store
;
3531 to
= (struct sockaddr
*)&store
;
3532 iph
= mtod(init_pkt
, struct ip
*);
3533 if (iph
->ip_v
== IPVERSION
) {
3534 struct in_addr addr
;
3535 struct route iproute
;
3537 sin
->sin_family
= AF_INET
;
3538 sin
->sin_len
= sizeof(struct sockaddr_in
);
3539 sin
->sin_port
= sh
->src_port
;
3540 sin
->sin_addr
= iph
->ip_src
;
3541 /* lookup address */
3542 stc
.address
[0] = sin
->sin_addr
.s_addr
;
3546 stc
.addr_type
= SCTP_IPV4_ADDRESS
;
3547 /* local from address */
3548 memset(&iproute
, 0, sizeof(iproute
));
3550 memcpy(&ro
->ro_dst
, sin
, sizeof(*sin
));
3551 addr
= sctp_ipv4_source_address_selection(inp
, NULL
,
3556 stc
.laddress
[0] = addr
.s_addr
;
3557 stc
.laddress
[1] = 0;
3558 stc
.laddress
[2] = 0;
3559 stc
.laddress
[3] = 0;
3560 stc
.laddr_type
= SCTP_IPV4_ADDRESS
;
3561 /* scope_id is only for v6 */
3563 #ifndef SCTP_DONT_DO_PRIVADDR_SCOPE
3564 if (IN4_ISPRIVATE_ADDRESS(&sin
->sin_addr
)) {
3569 #endif /* SCTP_DONT_DO_PRIVADDR_SCOPE */
3570 /* Must use the address in this case */
3571 if (sctp_is_address_on_local_host((struct sockaddr
*)sin
)) {
3572 stc
.loopback_scope
= 1;
3575 stc
.local_scope
= 1;
3577 } else if (iph
->ip_v
== (IPV6_VERSION
>> 4)) {
3578 struct in6_addr addr
;
3579 #ifdef NEW_STRUCT_ROUTE
3580 struct route iproute6
;
3582 struct route_in6 iproute6
;
3584 ip6
= mtod(init_pkt
, struct ip6_hdr
*);
3585 sin6
->sin6_family
= AF_INET6
;
3586 sin6
->sin6_len
= sizeof(struct sockaddr_in6
);
3587 sin6
->sin6_port
= sh
->src_port
;
3588 sin6
->sin6_addr
= ip6
->ip6_src
;
3589 /* lookup address */
3590 memcpy(&stc
.address
, &sin6
->sin6_addr
,
3591 sizeof(struct in6_addr
));
3592 sin6
->sin6_scope_id
= 0;
3593 stc
.addr_type
= SCTP_IPV6_ADDRESS
;
3595 if (sctp_is_address_on_local_host((struct sockaddr
*)sin6
)) {
3596 stc
.loopback_scope
= 1;
3597 stc
.local_scope
= 1;
3600 } else if (IN6_IS_ADDR_LINKLOCAL(&sin6
->sin6_addr
)) {
3602 * If the new destination is a LINK_LOCAL
3603 * we must have common both site and local
3604 * scope. Don't set local scope though since
3605 * we must depend on the source to be added
3606 * implicitly. We cannot assure just because
3607 * we share one link that all links are common.
3609 stc
.local_scope
= 0;
3612 /* we start counting for the private
3613 * address stuff at 1. since the link
3614 * local we source from won't show
3615 * up in our scoped cou8nt.
3618 /* pull out the scope_id from incoming pkt */
3619 in6_recoverscope(sin6
, &ip6
->ip6_src
,
3620 init_pkt
->m_pkthdr
.rcvif
);
3621 #if defined(SCTP_BASE_FREEBSD) || defined(__APPLE__) || defined(__DragonFly__)
3622 in6_embedscope(&sin6
->sin6_addr
, sin6
, NULL
,
3625 in6_embedscope(&sin6
->sin6_addr
, sin6
);
3627 stc
.scope_id
= sin6
->sin6_scope_id
;
3628 } else if (IN6_IS_ADDR_SITELOCAL(&sin6
->sin6_addr
)) {
3630 * If the new destination is SITE_LOCAL
3631 * then we must have site scope in common.
3635 /* local from address */
3636 memset(&iproute6
, 0, sizeof(iproute6
));
3637 ro
= (struct route
*)&iproute6
;
3638 memcpy(&ro
->ro_dst
, sin6
, sizeof(*sin6
));
3639 addr
= sctp_ipv6_source_address_selection(inp
, NULL
,
3644 memcpy(&stc
.laddress
, &addr
, sizeof(struct in6_addr
));
3645 stc
.laddr_type
= SCTP_IPV6_ADDRESS
;
3648 /* set the scope per the existing tcb */
3649 struct sctp_nets
*lnet
;
3651 stc
.loopback_scope
= asoc
->loopback_scope
;
3652 stc
.ipv4_scope
= asoc
->ipv4_local_scope
;
3653 stc
.site_scope
= asoc
->site_scope
;
3654 stc
.local_scope
= asoc
->local_scope
;
3655 TAILQ_FOREACH(lnet
, &asoc
->nets
, sctp_next
) {
3656 if (lnet
->ro
._l_addr
.sin6
.sin6_family
== AF_INET6
) {
3657 if (IN6_IS_ADDR_LINKLOCAL(&lnet
->ro
._l_addr
.sin6
.sin6_addr
)) {
3658 /* if we have a LL address, start counting
3666 /* use the net pointer */
3667 to
= (struct sockaddr
*)&net
->ro
._l_addr
;
3668 if (to
->sa_family
== AF_INET
) {
3669 sin
= (struct sockaddr_in
*)to
;
3670 stc
.address
[0] = sin
->sin_addr
.s_addr
;
3674 stc
.addr_type
= SCTP_IPV4_ADDRESS
;
3675 if (net
->src_addr_selected
== 0) {
3676 /* strange case here, the INIT
3677 * should have did the selection.
3679 net
->ro
._s_addr
.sin
.sin_addr
=
3680 sctp_ipv4_source_address_selection(inp
,
3681 stcb
, (struct route
*)&net
->ro
, net
, 0);
3682 net
->src_addr_selected
= 1;
3686 stc
.laddress
[0] = net
->ro
._s_addr
.sin
.sin_addr
.s_addr
;
3687 stc
.laddress
[1] = 0;
3688 stc
.laddress
[2] = 0;
3689 stc
.laddress
[3] = 0;
3690 stc
.laddr_type
= SCTP_IPV4_ADDRESS
;
3691 } else if (to
->sa_family
== AF_INET6
) {
3692 sin6
= (struct sockaddr_in6
*)to
;
3693 memcpy(&stc
.address
, &sin6
->sin6_addr
,
3694 sizeof(struct in6_addr
));
3695 stc
.addr_type
= SCTP_IPV6_ADDRESS
;
3696 if (net
->src_addr_selected
== 0) {
3697 /* strange case here, the INIT
3698 * should have did the selection.
3700 net
->ro
._s_addr
.sin6
.sin6_addr
=
3701 sctp_ipv6_source_address_selection(inp
,
3702 stcb
, (struct route
*)&net
->ro
, net
, 0);
3703 net
->src_addr_selected
= 1;
3705 memcpy(&stc
.laddress
, &net
->ro
._l_addr
.sin6
.sin6_addr
,
3706 sizeof(struct in6_addr
));
3707 stc
.laddr_type
= SCTP_IPV6_ADDRESS
;
3710 /* Now lets put the SCTP header in place */
3711 initackm_out
= mtod(m
, struct sctp_init_msg
*);
3712 initackm_out
->sh
.src_port
= inp
->sctp_lport
;
3713 initackm_out
->sh
.dest_port
= sh
->src_port
;
3714 initackm_out
->sh
.v_tag
= init_chk
->init
.initiate_tag
;
3715 /* Save it off for quick ref */
3716 stc
.peers_vtag
= init_chk
->init
.initiate_tag
;
3717 initackm_out
->sh
.checksum
= 0; /* calculate later */
3719 strncpy(stc
.identification
, SCTP_VERSION_STRING
,
3720 min(strlen(SCTP_VERSION_STRING
), sizeof(stc
.identification
)));
3721 /* now the chunk header */
3722 initackm_out
->msg
.ch
.chunk_type
= SCTP_INITIATION_ACK
;
3723 initackm_out
->msg
.ch
.chunk_flags
= 0;
3724 /* fill in later from mbuf we build */
3725 initackm_out
->msg
.ch
.chunk_length
= 0;
3726 /* place in my tag */
3727 if ((asoc
!= NULL
) &&
3728 ((SCTP_GET_STATE(asoc
) == SCTP_STATE_COOKIE_WAIT
) ||
3729 (SCTP_GET_STATE(asoc
) == SCTP_STATE_COOKIE_ECHOED
))) {
3730 /* re-use the v-tags and init-seq here */
3731 initackm_out
->msg
.init
.initiate_tag
= htonl(asoc
->my_vtag
);
3732 initackm_out
->msg
.init
.initial_tsn
= htonl(asoc
->init_seq_number
);
3734 initackm_out
->msg
.init
.initiate_tag
= htonl(sctp_select_a_tag(inp
));
3735 /* get a TSN to use too */
3736 initackm_out
->msg
.init
.initial_tsn
= htonl(sctp_select_initial_TSN(&inp
->sctp_ep
));
3738 /* save away my tag to */
3739 stc
.my_vtag
= initackm_out
->msg
.init
.initiate_tag
;
3741 /* set up some of the credits. */
3742 initackm_out
->msg
.init
.a_rwnd
= htonl(max(inp
->sctp_socket
->so_rcv
.ssb_hiwat
, SCTP_MINIMAL_RWND
));
3743 /* set what I want */
3744 his_limit
= ntohs(init_chk
->init
.num_inbound_streams
);
3745 /* choose what I want */
3747 if (asoc
->streamoutcnt
> inp
->sctp_ep
.pre_open_stream_count
) {
3748 i_want
= asoc
->streamoutcnt
;
3750 i_want
= inp
->sctp_ep
.pre_open_stream_count
;
3753 i_want
= inp
->sctp_ep
.pre_open_stream_count
;
3755 if (his_limit
< i_want
) {
3756 /* I Want more :< */
3757 initackm_out
->msg
.init
.num_outbound_streams
= init_chk
->init
.num_inbound_streams
;
3759 /* I can have what I want :> */
3760 initackm_out
->msg
.init
.num_outbound_streams
= htons(i_want
);
3762 /* tell him his limt. */
3763 initackm_out
->msg
.init
.num_inbound_streams
=
3764 htons(inp
->sctp_ep
.max_open_streams_intome
);
3765 /* setup the ECN pointer */
3767 /* if (inp->sctp_flags & SCTP_PCB_FLAGS_ADAPTIONEVNT) {*/
3768 if (inp
->sctp_ep
.adaption_layer_indicator
) {
3769 struct sctp_adaption_layer_indication
*ali
;
3770 ali
= (struct sctp_adaption_layer_indication
*)(
3771 (caddr_t
)initackm_out
+ sizeof(*initackm_out
));
3772 ali
->ph
.param_type
= htons(SCTP_ULP_ADAPTION
);
3773 ali
->ph
.param_length
= htons(sizeof(*ali
));
3774 ali
->indication
= ntohl(inp
->sctp_ep
.adaption_layer_indicator
);
3775 m
->m_len
+= sizeof(*ali
);
3776 ecn
= (struct sctp_ecn_supported_param
*)((caddr_t
)ali
+
3779 ecn
= (struct sctp_ecn_supported_param
*)(
3780 (caddr_t
)initackm_out
+ sizeof(*initackm_out
));
3784 if (sctp_ecn
== 1) {
3785 ecn
->ph
.param_type
= htons(SCTP_ECN_CAPABLE
);
3786 ecn
->ph
.param_length
= htons(sizeof(*ecn
));
3787 m
->m_len
+= sizeof(*ecn
);
3789 prsctp
= (struct sctp_prsctp_supported_param
*)((caddr_t
)ecn
+
3792 prsctp
= (struct sctp_prsctp_supported_param
*)((caddr_t
)ecn
);
3794 /* And now tell the peer we do pr-sctp */
3795 prsctp
->ph
.param_type
= htons(SCTP_PRSCTP_SUPPORTED
);
3796 prsctp
->ph
.param_length
= htons(sizeof(*prsctp
));
3797 m
->m_len
+= sizeof(*prsctp
);
3800 /* And now tell the peer we do all the extensions */
3801 pr_supported
= (struct sctp_supported_chunk_types_param
*)((caddr_t
)prsctp
+
3804 pr_supported
->ph
.param_type
= htons(SCTP_SUPPORTED_CHUNK_EXT
);
3805 pr_supported
->ph
.param_length
= htons(sizeof(*pr_supported
) + SCTP_EXT_COUNT
);
3806 pr_supported
->chunk_types
[0] = SCTP_ASCONF
;
3807 pr_supported
->chunk_types
[1] = SCTP_ASCONF_ACK
;
3808 pr_supported
->chunk_types
[2] = SCTP_FORWARD_CUM_TSN
;
3809 pr_supported
->chunk_types
[3] = SCTP_PACKET_DROPPED
;
3810 pr_supported
->chunk_types
[4] = SCTP_STREAM_RESET
;
3811 pr_supported
->chunk_types
[5] = 0; /* pad */
3812 pr_supported
->chunk_types
[6] = 0; /* pad */
3813 pr_supported
->chunk_types
[7] = 0; /* pad */
3815 m
->m_len
+= (sizeof(*pr_supported
) + SCTP_EXT_COUNT
+ SCTP_PAD_EXT_COUNT
);
3816 if (sctp_ecn_nonce
) {
3817 /* ECN nonce: And now tell the peer we support ECN nonce */
3818 ecn_nonce
= (struct sctp_ecn_nonce_supported_param
*)((caddr_t
)pr_supported
+
3819 sizeof(*pr_supported
) + SCTP_EXT_COUNT
+ SCTP_PAD_EXT_COUNT
);
3820 ecn_nonce
->ph
.param_type
= htons(SCTP_ECN_NONCE_SUPPORTED
);
3821 ecn_nonce
->ph
.param_length
= htons(sizeof(*ecn_nonce
));
3822 m
->m_len
+= sizeof(*ecn_nonce
);
3826 /* now the addresses */
3827 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_BOUNDALL
) {
3829 int cnt
= cnt_inits_to
;
3831 TAILQ_FOREACH(ifn
, &ifnet
, if_list
) {
3832 struct ifaddr_container
*ifac
;
3834 if ((stc
.loopback_scope
== 0) &&
3835 (ifn
->if_type
== IFT_LOOP
)) {
3837 * Skip loopback devices if loopback_scope
3842 TAILQ_FOREACH(ifac
, &ifn
->if_addrheads
[mycpuid
], ifa_link
) {
3843 struct ifaddr
*ifa
= ifac
->ifa
;
3845 if (sctp_is_address_in_scope(ifa
,
3846 stc
.ipv4_addr_legal
, stc
.ipv6_addr_legal
,
3847 stc
.loopback_scope
, stc
.ipv4_scope
,
3848 stc
.local_scope
, stc
.site_scope
) == 0) {
3855 TAILQ_FOREACH(ifn
, &ifnet
, if_list
) {
3856 struct ifaddr_container
*ifac
;
3858 if ((stc
.loopback_scope
== 0) &&
3859 (ifn
->if_type
== IFT_LOOP
)) {
3861 * Skip loopback devices if
3862 * loopback_scope not set
3866 TAILQ_FOREACH(ifac
, &ifn
->if_addrheads
[mycpuid
], ifa_link
) {
3867 struct ifaddr
*ifa
= ifac
->ifa
;
3869 if (sctp_is_address_in_scope(ifa
,
3870 stc
.ipv4_addr_legal
,
3871 stc
.ipv6_addr_legal
,
3872 stc
.loopback_scope
, stc
.ipv4_scope
,
3873 stc
.local_scope
, stc
.site_scope
) == 0) {
3876 m_at
= sctp_add_addr_to_mbuf(m_at
, ifa
);
3881 struct sctp_laddr
*laddr
;
3884 /* First, how many ? */
3885 LIST_FOREACH(laddr
, &inp
->sctp_addr_list
, sctp_nxt_addr
) {
3886 if (laddr
->ifa
== NULL
) {
3889 if (laddr
->ifa
->ifa_addr
== NULL
)
3891 if (sctp_is_address_in_scope(laddr
->ifa
,
3892 stc
.ipv4_addr_legal
, stc
.ipv6_addr_legal
,
3893 stc
.loopback_scope
, stc
.ipv4_scope
,
3894 stc
.local_scope
, stc
.site_scope
) == 0) {
3899 /* If we bind a single address only we won't list
3900 * any. This way you can get through a NAT
3903 LIST_FOREACH(laddr
, &inp
->sctp_addr_list
, sctp_nxt_addr
) {
3904 if (laddr
->ifa
== NULL
) {
3906 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
3907 kprintf("Help I have fallen and I can't get up!\n");
3912 if (laddr
->ifa
->ifa_addr
== NULL
)
3914 if (sctp_is_address_in_scope(laddr
->ifa
,
3915 stc
.ipv4_addr_legal
, stc
.ipv6_addr_legal
,
3916 stc
.loopback_scope
, stc
.ipv4_scope
,
3917 stc
.local_scope
, stc
.site_scope
) == 0) {
3920 m_at
= sctp_add_addr_to_mbuf(m_at
, laddr
->ifa
);
3925 /* tack on the operational error if present */
3927 if (op_err
->m_pkthdr
.len
% 4) {
3928 /* must add a pad to the param */
3931 padlen
= 4 - (op_err
->m_pkthdr
.len
% 4);
3932 m_copyback(op_err
, op_err
->m_pkthdr
.len
, padlen
, (caddr_t
)&cpthis
);
3934 while (m_at
->m_next
!= NULL
) {
3935 m_at
= m_at
->m_next
;
3937 m_at
->m_next
= op_err
;
3938 while (m_at
->m_next
!= NULL
) {
3939 m_at
= m_at
->m_next
;
3942 /* Get total size of init packet */
3943 sz_of
= SCTP_SIZE32(ntohs(init_chk
->ch
.chunk_length
));
3944 /* pre-calulate the size and update pkt header and chunk header */
3945 m
->m_pkthdr
.len
= 0;
3946 for (m_tmp
= m
; m_tmp
; m_tmp
= m_tmp
->m_next
) {
3947 m
->m_pkthdr
.len
+= m_tmp
->m_len
;
3948 if (m_tmp
->m_next
== NULL
) {
3949 /* m_tmp should now point to last one */
3954 * Figure now the size of the cookie. We know the size of the
3955 * INIT-ACK. The Cookie is going to be the size of INIT, INIT-ACK,
3956 * COOKIE-STRUCTURE and SIGNATURE.
3960 * take our earlier INIT calc and add in the sz we just calculated
3961 * minus the size of the sctphdr (its not included in chunk size
3964 /* add once for the INIT-ACK */
3965 sz_of
+= (m
->m_pkthdr
.len
- sizeof(struct sctphdr
));
3967 /* add a second time for the INIT-ACK in the cookie */
3968 sz_of
+= (m
->m_pkthdr
.len
- sizeof(struct sctphdr
));
3970 /* Now add the cookie header and cookie message struct */
3971 sz_of
+= sizeof(struct sctp_state_cookie_param
);
3972 /* ...and add the size of our signature */
3973 sz_of
+= SCTP_SIGNATURE_SIZE
;
3974 initackm_out
->msg
.ch
.chunk_length
= htons(sz_of
);
3976 /* Now we must build a cookie */
3977 m_cookie
= sctp_add_cookie(inp
, init_pkt
, offset
, m
,
3978 sizeof(struct sctphdr
), &stc
);
3979 if (m_cookie
== NULL
) {
3980 /* memory problem */
3984 /* Now append the cookie to the end and update the space/size */
3985 m_tmp
->m_next
= m_cookie
;
3988 * We pass 0 here to NOT set IP_DF if its IPv4, we ignore the
3989 * return here since the timer will drive a retranmission.
3991 padval
= m
->m_pkthdr
.len
% 4;
3992 if ((padval
) && (m_last
)) {
3993 /* see my previous comments on m_last */
3995 ret
= sctp_add_pad_tombuf(m_last
, (4-padval
));
3997 /* Houston we have a problem, no space */
4001 m
->m_pkthdr
.len
+= padval
;
4003 sctp_lowlevel_chunk_output(inp
, NULL
, NULL
, to
, m
, 0, 0, NULL
, 0);
4008 sctp_insert_on_wheel(struct sctp_association
*asoc
,
4009 struct sctp_stream_out
*strq
)
4011 struct sctp_stream_out
*stre
, *strn
;
4012 stre
= TAILQ_FIRST(&asoc
->out_wheel
);
4014 /* only one on wheel */
4015 TAILQ_INSERT_HEAD(&asoc
->out_wheel
, strq
, next_spoke
);
4018 for (; stre
; stre
= strn
) {
4019 strn
= TAILQ_NEXT(stre
, next_spoke
);
4020 if (stre
->stream_no
> strq
->stream_no
) {
4021 TAILQ_INSERT_BEFORE(stre
, strq
, next_spoke
);
4023 } else if (stre
->stream_no
== strq
->stream_no
) {
4024 /* huh, should not happen */
4026 } else if (strn
== NULL
) {
4027 /* next one is null */
4028 TAILQ_INSERT_AFTER(&asoc
->out_wheel
, stre
, strq
,
4035 sctp_remove_from_wheel(struct sctp_association
*asoc
,
4036 struct sctp_stream_out
*strq
)
4038 /* take off and then setup so we know it is not on the wheel */
4039 TAILQ_REMOVE(&asoc
->out_wheel
, strq
, next_spoke
);
4040 strq
->next_spoke
.tqe_next
= NULL
;
4041 strq
->next_spoke
.tqe_prev
= NULL
;
4046 sctp_prune_prsctp(struct sctp_tcb
*stcb
,
4047 struct sctp_association
*asoc
,
4048 struct sctp_sndrcvinfo
*srcv
,
4053 struct sctp_tmit_chunk
*chk
, *nchk
;
4054 if ((asoc
->peer_supports_prsctp
) && (asoc
->sent_queue_cnt_removeable
> 0)) {
4055 TAILQ_FOREACH(chk
, &asoc
->sent_queue
, sctp_next
) {
4057 * Look for chunks marked with the PR_SCTP
4058 * flag AND the buffer space flag. If the one
4059 * being sent is equal or greater priority then
4060 * purge the old one and free some space.
4062 if ((chk
->flags
& (SCTP_PR_SCTP_ENABLED
|
4063 SCTP_PR_SCTP_BUFFER
)) ==
4064 (SCTP_PR_SCTP_ENABLED
|SCTP_PR_SCTP_BUFFER
)) {
4066 * This one is PR-SCTP AND buffer space
4069 if (chk
->rec
.data
.timetodrop
.tv_sec
>= (long)srcv
->sinfo_timetolive
) {
4070 /* Lower numbers equates to
4071 * higher priority so if the
4072 * one we are looking at has a
4073 * larger or equal priority we
4074 * want to drop the data and
4075 * NOT retransmit it.
4084 if (chk
->sent
> SCTP_DATAGRAM_UNSENT
)
4085 cause
= SCTP_RESPONSE_TO_USER_REQ
|SCTP_NOTIFY_DATAGRAM_SENT
;
4087 cause
= SCTP_RESPONSE_TO_USER_REQ
|SCTP_NOTIFY_DATAGRAM_UNSENT
;
4088 ret_spc
= sctp_release_pr_sctp_chunk(stcb
, chk
,
4091 freed_spc
+= ret_spc
;
4092 if (freed_spc
>= dataout
) {
4095 } /* if chunk was present */
4096 } /* if of sufficent priority */
4097 } /* if chunk has enabled */
4098 } /* tailqforeach */
4100 chk
= TAILQ_FIRST(&asoc
->send_queue
);
4102 nchk
= TAILQ_NEXT(chk
, sctp_next
);
4103 /* Here we must move to the sent queue and mark */
4104 if ((chk
->flags
& (SCTP_PR_SCTP_ENABLED
|
4105 SCTP_PR_SCTP_BUFFER
)) ==
4106 (SCTP_PR_SCTP_ENABLED
|SCTP_PR_SCTP_BUFFER
)) {
4107 if (chk
->rec
.data
.timetodrop
.tv_sec
>= (long)srcv
->sinfo_timetolive
) {
4114 ret_spc
= sctp_release_pr_sctp_chunk(stcb
, chk
,
4115 SCTP_RESPONSE_TO_USER_REQ
|SCTP_NOTIFY_DATAGRAM_UNSENT
,
4118 freed_spc
+= ret_spc
;
4119 if (freed_spc
>= dataout
) {
4122 } /* end if chk->data */
4123 } /* end if right class */
4124 } /* end if chk pr-sctp */
4126 } /* end while (chk) */
4127 } /* if enabled in asoc */
4131 sctp_prepare_chunk(struct sctp_tmit_chunk
*template,
4132 struct sctp_tcb
*stcb
,
4133 struct sctp_sndrcvinfo
*srcv
,
4134 struct sctp_stream_out
*strq
,
4135 struct sctp_nets
*net
)
4137 bzero(template, sizeof(struct sctp_tmit_chunk
));
4138 template->sent
= SCTP_DATAGRAM_UNSENT
;
4139 if ((stcb
->asoc
.peer_supports_prsctp
) &&
4140 (srcv
->sinfo_flags
& (MSG_PR_SCTP_TTL
|MSG_PR_SCTP_BUF
)) &&
4141 (srcv
->sinfo_timetolive
> 0)
4144 * Peer supports PR-SCTP
4145 * The flags is set against this send for PR-SCTP
4146 * And timetolive is a postive value, zero is reserved
4147 * to mean a reliable send for both buffer/time
4150 if (srcv
->sinfo_flags
& MSG_PR_SCTP_BUF
) {
4152 * Time to live is a priority stored in tv_sec
4153 * when doing the buffer drop thing.
4155 template->rec
.data
.timetodrop
.tv_sec
= srcv
->sinfo_timetolive
;
4159 SCTP_GETTIME_TIMEVAL(&template->rec
.data
.timetodrop
);
4160 tv
.tv_sec
= srcv
->sinfo_timetolive
/ 1000;
4161 tv
.tv_usec
= (srcv
->sinfo_timetolive
* 1000) % 1000000;
4163 timeradd(&template->rec
.data
.timetodrop
, &tv
,
4164 &template->rec
.data
.timetodrop
);
4166 timevaladd(&template->rec
.data
.timetodrop
, &tv
);
4170 if ((srcv
->sinfo_flags
& MSG_UNORDERED
) == 0) {
4171 template->rec
.data
.stream_seq
= strq
->next_sequence_sent
;
4173 template->rec
.data
.stream_seq
= 0;
4175 template->rec
.data
.TSN_seq
= 0; /* not yet assigned */
4177 template->rec
.data
.stream_number
= srcv
->sinfo_stream
;
4178 template->rec
.data
.payloadtype
= srcv
->sinfo_ppid
;
4179 template->rec
.data
.context
= srcv
->sinfo_context
;
4180 template->rec
.data
.doing_fast_retransmit
= 0;
4181 template->rec
.data
.ect_nonce
= 0; /* ECN Nonce */
4183 if (srcv
->sinfo_flags
& MSG_ADDR_OVER
) {
4184 template->whoTo
= net
;
4186 if (stcb
->asoc
.primary_destination
)
4187 template->whoTo
= stcb
->asoc
.primary_destination
;
4190 template->whoTo
= net
;
4193 /* the actual chunk flags */
4194 if (srcv
->sinfo_flags
& MSG_UNORDERED
) {
4195 template->rec
.data
.rcv_flags
= SCTP_DATA_UNORDERED
;
4197 template->rec
.data
.rcv_flags
= 0;
4199 /* no flags yet, FRAGMENT_OK goes here */
4200 template->flags
= 0;
4202 if (stcb
->asoc
.peer_supports_prsctp
) {
4203 if (srcv
->sinfo_timetolive
> 0) {
4205 * We only set the flag if timetolive (or
4206 * priority) was set to a positive number.
4207 * Zero is reserved specifically to be
4208 * EXCLUDED and sent reliable.
4210 if (srcv
->sinfo_flags
& MSG_PR_SCTP_TTL
) {
4211 template->flags
|= SCTP_PR_SCTP_ENABLED
;
4213 if (srcv
->sinfo_flags
& MSG_PR_SCTP_BUF
) {
4214 template->flags
|= SCTP_PR_SCTP_BUFFER
;
4218 template->asoc
= &stcb
->asoc
;
4223 sctp_get_frag_point(struct sctp_tcb
*stcb
,
4224 struct sctp_association
*asoc
)
4228 /* For endpoints that have both 6 and 4 addresses
4229 * we must reserver room for the 6 ip header, for
4230 * those that are only dealing with V4 we use
4231 * a larger frag point.
4233 if (stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_BOUND_V6
) {
4234 ovh
= SCTP_MED_OVERHEAD
;
4236 ovh
= SCTP_MED_V4_OVERHEAD
;
4239 if (stcb
->sctp_ep
->sctp_frag_point
> asoc
->smallest_mtu
)
4240 siz
= asoc
->smallest_mtu
- ovh
;
4242 siz
= (stcb
->sctp_ep
->sctp_frag_point
- ovh
);
4244 if (siz > (MCLBYTES-sizeof(struct sctp_data_chunk))) { */
4245 /* A data chunk MUST fit in a cluster */
4246 /* siz = (MCLBYTES - sizeof(struct sctp_data_chunk));*/
4250 /* make it an even word boundary please */
4255 extern unsigned int sctp_max_chunks_on_queue
;
4257 #define SBLOCKWAIT(f) (((f)&MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK)
4260 sctp_msg_append(struct sctp_tcb
*stcb
,
4261 struct sctp_nets
*net
,
4263 struct sctp_sndrcvinfo
*srcv
,
4267 struct sctp_association
*asoc
;
4268 struct sctp_stream_out
*strq
;
4269 struct sctp_tmit_chunk
*chk
;
4270 struct sctpchunk_listhead tmp
;
4271 struct sctp_tmit_chunk
template;
4272 struct mbuf
*n
, *mnext
;
4274 unsigned int dataout
, siz
;
4279 if ((stcb
== NULL
) || (net
== NULL
) || (m
== NULL
) || (srcv
== NULL
)) {
4280 /* Software fault, you blew it on the call */
4282 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
4283 kprintf("software error in sctp_msg_append:1\n");
4284 kprintf("stcb:%p net:%p m:%p srcv:%p\n",
4285 stcb
, net
, m
, srcv
);
4292 so
= stcb
->sctp_socket
;
4294 if (srcv
->sinfo_flags
& MSG_ABORT
) {
4295 if ((SCTP_GET_STATE(asoc
) != SCTP_STATE_COOKIE_WAIT
) &&
4296 (SCTP_GET_STATE(asoc
) != SCTP_STATE_COOKIE_ECHOED
)) {
4297 /* It has to be up before we abort */
4298 /* how big is the user initiated abort? */
4299 if ((m
->m_flags
& M_PKTHDR
) && (m
->m_pkthdr
.len
)) {
4300 dataout
= m
->m_pkthdr
.len
;
4304 for (n
= m
; n
; n
= n
->m_next
) {
4305 dataout
+= n
->m_len
;
4308 M_PREPEND(m
, sizeof(struct sctp_paramhdr
), MB_DONTWAIT
);
4310 struct sctp_paramhdr
*ph
;
4311 m
->m_len
= sizeof(struct sctp_paramhdr
) + dataout
;
4312 ph
= mtod(m
, struct sctp_paramhdr
*);
4313 ph
->param_type
= htons(SCTP_CAUSE_USER_INITIATED_ABT
);
4314 ph
->param_length
= htons(m
->m_len
);
4316 sctp_abort_an_association(stcb
->sctp_ep
, stcb
, SCTP_RESPONSE_TO_USER_REQ
, m
);
4319 /* Only free if we don't send an abort */
4324 if ((SCTP_GET_STATE(asoc
) == SCTP_STATE_SHUTDOWN_SENT
) ||
4325 (SCTP_GET_STATE(asoc
) == SCTP_STATE_SHUTDOWN_ACK_SENT
) ||
4326 (SCTP_GET_STATE(asoc
) == SCTP_STATE_SHUTDOWN_RECEIVED
) ||
4327 (asoc
->state
& SCTP_STATE_SHUTDOWN_PENDING
)) {
4328 /* got data while shutting down */
4333 if (srcv
->sinfo_stream
>= asoc
->streamoutcnt
) {
4334 /* Invalid stream number */
4338 if (asoc
->strmout
== NULL
) {
4339 /* huh? software error */
4341 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
4342 kprintf("software error in sctp_msg_append:2\n");
4348 strq
= &asoc
->strmout
[srcv
->sinfo_stream
];
4349 /* how big is it ? */
4350 if ((m
->m_flags
& M_PKTHDR
) && (m
->m_pkthdr
.len
)) {
4351 dataout
= m
->m_pkthdr
.len
;
4355 for (n
= m
; n
; n
= n
->m_next
) {
4356 dataout
+= n
->m_len
;
4360 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
4361 kprintf("Attempt to send out %d bytes\n",
4366 /* lock the socket buf */
4367 SOCKBUF_LOCK(&so
->so_snd
);
4368 error
= ssb_lock(&so
->so_snd
, SBLOCKWAIT(flags
));
4372 if (dataout
> so
->so_snd
.ssb_hiwat
) {
4373 /* It will NEVER fit */
4377 if ((srcv
->sinfo_flags
& MSG_EOF
) &&
4378 (stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_UDPTYPE
) &&
4383 if ((so
->so_snd
.ssb_hiwat
<
4384 (dataout
+ asoc
->total_output_queue_size
)) ||
4385 (asoc
->chunks_on_out_queue
> sctp_max_chunks_on_queue
) ||
4386 (asoc
->total_output_mbuf_queue_size
>
4387 so
->so_snd
.ssb_mbmax
)
4389 /* XXX Buffer space hunt for data to skip */
4390 if (asoc
->peer_supports_prsctp
) {
4391 sctp_prune_prsctp(stcb
, asoc
, srcv
, dataout
);
4393 while ((so
->so_snd
.ssb_hiwat
<
4394 (dataout
+ asoc
->total_output_queue_size
)) ||
4395 (asoc
->chunks_on_out_queue
> sctp_max_chunks_on_queue
) ||
4396 (asoc
->total_output_mbuf_queue_size
>
4397 so
->so_snd
.ssb_mbmax
)) {
4398 struct sctp_inpcb
*inp
;
4399 /* Now did we free up enough room? */
4400 if (flags
& (MSG_FNONBLOCKING
|MSG_DONTWAIT
)) {
4401 /* Non-blocking io in place */
4402 error
= EWOULDBLOCK
;
4406 * We store off a pointer to the endpoint.
4407 * Since on return from this we must check to
4408 * see if an so_error is set. If so we may have
4409 * been reset and our stcb destroyed. Returning
4410 * an error will cause the correct error return
4411 * through and fix this all.
4413 inp
= stcb
->sctp_ep
;
4415 * Not sure how else to do this since
4416 * the level we suspended at is not
4417 * known deep down where we are. I will
4418 * drop to spl0() so that others can
4422 inp
->sctp_tcb_at_block
= (void *)stcb
;
4423 inp
->error_on_block
= 0;
4424 ssb_unlock(&so
->so_snd
);
4425 error
= ssb_wait(&so
->so_snd
);
4427 * XXX: This is ugly but I have
4428 * recreated most of what goes on to
4429 * block in the sb. UGHH
4430 * May want to add the bit about being
4431 * no longer connected.. but this then
4432 * further dooms the UDP model NOT to
4435 inp
->sctp_tcb_at_block
= 0;
4436 if (inp
->error_on_block
)
4437 error
= inp
->error_on_block
;
4439 error
= so
->so_error
;
4443 error
= ssb_lock(&so
->so_snd
, M_WAITOK
);
4446 /* Otherwise we cycle back and recheck
4449 #if defined(__FreeBSD__) && __FreeBSD_version >= 502115
4450 if (so
->so_rcv
.sb_state
& SBS_CANTSENDMORE
) {
4452 if (so
->so_state
& SS_CANTSENDMORE
) {
4458 error
= so
->so_error
;
4463 /* If we have a packet header fix it if it was broke */
4464 if (m
->m_flags
& M_PKTHDR
) {
4465 m
->m_pkthdr
.len
= dataout
;
4467 /* use the smallest one, user set value or
4468 * smallest mtu of the asoc
4470 siz
= sctp_get_frag_point(stcb
, asoc
);
4471 SOCKBUF_UNLOCK(&so
->so_snd
);
4472 if ((dataout
) && (dataout
<= siz
)) {
4474 chk
= (struct sctp_tmit_chunk
*)SCTP_ZONE_GET(sctppcbinfo
.ipi_zone_chunk
);
4477 SOCKBUF_LOCK(&so
->so_snd
);
4480 sctp_prepare_chunk(chk
, stcb
, srcv
, strq
, net
);
4481 chk
->whoTo
->ref_count
++;
4482 chk
->rec
.data
.rcv_flags
|= SCTP_DATA_NOT_FRAG
;
4484 /* no flags yet, FRAGMENT_OK goes here */
4485 sctppcbinfo
.ipi_count_chunk
++;
4486 sctppcbinfo
.ipi_gencnt_chunk
++;
4487 asoc
->chunks_on_out_queue
++;
4490 /* Total in the MSIZE */
4491 for (mm
= chk
->data
; mm
; mm
= mm
->m_next
) {
4493 if (mm
->m_flags
& M_EXT
) {
4494 mbcnt
+= chk
->data
->m_ext
.ext_size
;
4497 /* fix up the send_size if it is not present */
4498 chk
->send_size
= dataout
;
4499 chk
->book_size
= chk
->send_size
;
4501 /* ok, we are commited */
4502 if ((srcv
->sinfo_flags
& MSG_UNORDERED
) == 0) {
4503 /* bump the ssn if we are unordered. */
4504 strq
->next_sequence_sent
++;
4506 chk
->data
->m_nextpkt
= 0;
4507 asoc
->stream_queue_cnt
++;
4508 TAILQ_INSERT_TAIL(&strq
->outqueue
, chk
, sctp_next
);
4509 /* now check if this stream is on the wheel */
4510 if ((strq
->next_spoke
.tqe_next
== NULL
) &&
4511 (strq
->next_spoke
.tqe_prev
== NULL
)) {
4512 /* Insert it on the wheel since it is not
4515 sctp_insert_on_wheel(asoc
, strq
);
4517 } else if ((dataout
) && (dataout
> siz
)) {
4519 if ((stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_NO_FRAGMENT
) &&
4522 SOCKBUF_LOCK(&so
->so_snd
);
4525 /* setup the template */
4526 sctp_prepare_chunk(&template, stcb
, srcv
, strq
, net
);
4529 while (dataout
> siz
) {
4531 * We can wait since this is called from the user
4534 n
->m_nextpkt
= m_split(n
, siz
, MB_WAIT
);
4535 if (n
->m_nextpkt
== NULL
) {
4537 SOCKBUF_LOCK(&so
->so_snd
);
4544 * ok, now we have a chain on m where m->m_nextpkt points to
4545 * the next chunk and m/m->m_next chain is the piece to send.
4546 * We must go through the chains and thread them on to
4547 * sctp_tmit_chunk chains and place them all on the stream
4548 * queue, breaking the m->m_nextpkt pointers as we go.
4554 * first go through and allocate a sctp_tmit chunk
4555 * for each chunk piece
4557 chk
= (struct sctp_tmit_chunk
*)SCTP_ZONE_GET(sctppcbinfo
.ipi_zone_chunk
);
4560 * ok we must spin through and dump anything
4561 * we have allocated and then jump to the
4564 chk
= TAILQ_FIRST(&tmp
);
4566 TAILQ_REMOVE(&tmp
, chk
, sctp_next
);
4567 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, chk
);
4568 sctppcbinfo
.ipi_count_chunk
--;
4569 asoc
->chunks_on_out_queue
--;
4570 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
4571 panic("Chunk count is negative");
4573 sctppcbinfo
.ipi_gencnt_chunk
++;
4574 chk
= TAILQ_FIRST(&tmp
);
4577 SOCKBUF_LOCK(&so
->so_snd
);
4580 sctppcbinfo
.ipi_count_chunk
++;
4581 asoc
->chunks_on_out_queue
++;
4583 sctppcbinfo
.ipi_gencnt_chunk
++;
4585 chk
->whoTo
->ref_count
++;
4587 /* Total in the MSIZE */
4589 for (mm
= chk
->data
; mm
; mm
= mm
->m_next
) {
4591 if (mm
->m_flags
& M_EXT
) {
4592 mbcnt_e
+= chk
->data
->m_ext
.ext_size
;
4595 /* now fix the chk->send_size */
4596 if (chk
->data
->m_flags
& M_PKTHDR
) {
4597 chk
->send_size
= chk
->data
->m_pkthdr
.len
;
4601 for (nn
= chk
->data
; nn
; nn
= nn
->m_next
) {
4602 chk
->send_size
+= nn
->m_len
;
4605 chk
->book_size
= chk
->send_size
;
4606 chk
->mbcnt
= mbcnt_e
;
4608 if (chk
->flags
& SCTP_PR_SCTP_BUFFER
) {
4609 asoc
->sent_queue_cnt_removeable
++;
4612 TAILQ_INSERT_TAIL(&tmp
, chk
, sctp_next
);
4615 /* now that we have enough space for all de-couple the
4616 * chain of mbufs by going through our temp array
4617 * and breaking the pointers.
4619 /* ok, we are commited */
4620 if ((srcv
->sinfo_flags
& MSG_UNORDERED
) == 0) {
4621 /* bump the ssn if we are unordered. */
4622 strq
->next_sequence_sent
++;
4624 /* Mark the first/last flags. This will
4625 * result int a 3 for a single item on the list
4627 chk
= TAILQ_FIRST(&tmp
);
4628 chk
->rec
.data
.rcv_flags
|= SCTP_DATA_FIRST_FRAG
;
4629 chk
= TAILQ_LAST(&tmp
, sctpchunk_listhead
);
4630 chk
->rec
.data
.rcv_flags
|= SCTP_DATA_LAST_FRAG
;
4631 /* now break any chains on the queue and
4632 * move it to the streams actual queue.
4634 chk
= TAILQ_FIRST(&tmp
);
4636 chk
->data
->m_nextpkt
= 0;
4637 TAILQ_REMOVE(&tmp
, chk
, sctp_next
);
4638 asoc
->stream_queue_cnt
++;
4639 TAILQ_INSERT_TAIL(&strq
->outqueue
, chk
, sctp_next
);
4640 chk
= TAILQ_FIRST(&tmp
);
4642 /* now check if this stream is on the wheel */
4643 if ((strq
->next_spoke
.tqe_next
== NULL
) &&
4644 (strq
->next_spoke
.tqe_prev
== NULL
)) {
4645 /* Insert it on the wheel since it is not
4648 sctp_insert_on_wheel(asoc
, strq
);
4651 SOCKBUF_LOCK(&so
->so_snd
);
4652 /* has a SHUTDOWN been (also) requested by the user on this asoc? */
4655 if ((srcv
->sinfo_flags
& MSG_EOF
) &&
4656 (stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_UDPTYPE
)) {
4658 int some_on_streamwheel
= 0;
4660 if (!TAILQ_EMPTY(&asoc
->out_wheel
)) {
4661 /* Check to see if some data queued */
4662 struct sctp_stream_out
*outs
;
4663 TAILQ_FOREACH(outs
, &asoc
->out_wheel
, next_spoke
) {
4664 if (!TAILQ_EMPTY(&outs
->outqueue
)) {
4665 some_on_streamwheel
= 1;
4671 if (TAILQ_EMPTY(&asoc
->send_queue
) &&
4672 TAILQ_EMPTY(&asoc
->sent_queue
) &&
4673 (some_on_streamwheel
== 0)) {
4674 /* there is nothing queued to send, so I'm done... */
4675 if ((SCTP_GET_STATE(asoc
) != SCTP_STATE_SHUTDOWN_SENT
) &&
4676 (SCTP_GET_STATE(asoc
) != SCTP_STATE_SHUTDOWN_ACK_SENT
)) {
4677 /* only send SHUTDOWN the first time through */
4679 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT4
) {
4680 kprintf("%s:%d sends a shutdown\n",
4686 sctp_send_shutdown(stcb
, stcb
->asoc
.primary_destination
);
4687 asoc
->state
= SCTP_STATE_SHUTDOWN_SENT
;
4688 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN
, stcb
->sctp_ep
, stcb
,
4689 asoc
->primary_destination
);
4690 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD
, stcb
->sctp_ep
, stcb
,
4691 asoc
->primary_destination
);
4695 * we still got (or just got) data to send, so set
4699 * XXX sockets draft says that MSG_EOF should be sent
4700 * with no data. currently, we will allow user data
4701 * to be sent first and move to SHUTDOWN-PENDING
4703 asoc
->state
|= SCTP_STATE_SHUTDOWN_PENDING
;
4706 #ifdef SCTP_MBCNT_LOGGING
4707 sctp_log_mbcnt(SCTP_LOG_MBCNT_INCREASE
,
4708 asoc
->total_output_queue_size
,
4710 asoc
->total_output_mbuf_queue_size
,
4713 asoc
->total_output_queue_size
+= dataout
;
4714 asoc
->total_output_mbuf_queue_size
+= mbcnt
;
4715 if ((stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_TCPTYPE
) ||
4716 (stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_IN_TCPPOOL
)) {
4717 so
->so_snd
.ssb_cc
+= dataout
;
4718 so
->so_snd
.ssb_mbcnt
+= mbcnt
;
4722 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT2
) {
4723 kprintf("++total out:%d total_mbuf_out:%d\n",
4724 (int)asoc
->total_output_queue_size
,
4725 (int)asoc
->total_output_mbuf_queue_size
);
4730 ssb_unlock(&so
->so_snd
);
4732 SOCKBUF_UNLOCK(&so
->so_snd
);
4734 if (m
&& m
->m_nextpkt
) {
4737 mnext
= n
->m_nextpkt
;
4738 n
->m_nextpkt
= NULL
;
4748 static struct mbuf
*
4749 sctp_copy_mbufchain(struct mbuf
*clonechain
,
4750 struct mbuf
*outchain
)
4752 struct mbuf
*appendchain
;
4753 #if defined(__FreeBSD__) || defined(__NetBSD__)
4754 /* Supposedly m_copypacket is an optimization, use it if we can */
4755 if (clonechain
->m_flags
& M_PKTHDR
) {
4756 appendchain
= m_copypacket(clonechain
, MB_DONTWAIT
);
4757 sctp_pegs
[SCTP_CACHED_SRC
]++;
4759 appendchain
= m_copy(clonechain
, 0, M_COPYALL
);
4760 #elif defined(__APPLE__)
4761 appendchain
= sctp_m_copym(clonechain
, 0, M_COPYALL
, MB_DONTWAIT
);
4763 appendchain
= m_copy(clonechain
, 0, M_COPYALL
);
4766 if (appendchain
== NULL
) {
4769 sctp_m_freem(outchain
);
4773 /* tack on to the end */
4777 if (m
->m_next
== NULL
) {
4778 m
->m_next
= appendchain
;
4783 if (outchain
->m_flags
& M_PKTHDR
) {
4789 append_tot
+= t
->m_len
;
4792 outchain
->m_pkthdr
.len
+= append_tot
;
4796 return (appendchain
);
4801 sctp_sendall_iterator(struct sctp_inpcb
*inp
, struct sctp_tcb
*stcb
, void *ptr
, u_int32_t val
)
4803 struct sctp_copy_all
*ca
;
4807 ca
= (struct sctp_copy_all
*)ptr
;
4808 if (ca
->m
== NULL
) {
4811 if (ca
->inp
!= inp
) {
4815 m
= sctp_copy_mbufchain(ca
->m
, NULL
);
4817 /* can't copy so we are done */
4821 ret
= sctp_msg_append(stcb
, stcb
->asoc
.primary_destination
, m
,
4822 &ca
->sndrcv
, MSG_FNONBLOCKING
);
4831 sctp_sendall_completes(void *ptr
, u_int32_t val
)
4833 struct sctp_copy_all
*ca
;
4834 ca
= (struct sctp_copy_all
*)ptr
;
4835 /* Do a notify here?
4836 * Kacheong suggests that the notify
4837 * be done at the send time.. so you would
4838 * push up a notification if any send failed.
4839 * Don't know if this is feasable since the
4840 * only failures we have is "memory" related and
4841 * if you cannot get an mbuf to send the data
4842 * you surely can't get an mbuf to send up
4843 * to notify the user you can't send the data :->
4846 /* now free everything */
4852 #define MC_ALIGN(m, len) do { \
4853 (m)->m_data += (MCLBYTES - (len)) & ~(sizeof(long) - 1); \
4858 static struct mbuf
*
4859 sctp_copy_out_all(struct uio
*uio
, int len
)
4861 struct mbuf
*ret
, *at
;
4862 int left
, willcpy
, cancpy
, error
;
4864 MGETHDR(ret
, MB_WAIT
, MT_HEADER
);
4871 ret
->m_pkthdr
.len
= len
;
4872 MCLGET(ret
, MB_WAIT
);
4876 if ((ret
->m_flags
& M_EXT
) == 0) {
4880 cancpy
= M_TRAILINGSPACE(ret
);
4881 willcpy
= min(cancpy
, left
);
4884 /* Align data to the end */
4885 MC_ALIGN(at
, willcpy
);
4886 error
= uiomove(mtod(at
, caddr_t
), willcpy
, uio
);
4892 at
->m_len
= willcpy
;
4893 at
->m_nextpkt
= at
->m_next
= 0;
4896 MGET(at
->m_next
, MB_WAIT
, MT_DATA
);
4897 if (at
->m_next
== NULL
) {
4902 MCLGET(at
, MB_WAIT
);
4906 if ((at
->m_flags
& M_EXT
) == 0) {
4909 cancpy
= M_TRAILINGSPACE(at
);
4910 willcpy
= min(cancpy
, left
);
4917 sctp_sendall (struct sctp_inpcb
*inp
, struct uio
*uio
, struct mbuf
*m
, struct sctp_sndrcvinfo
*srcv
)
4920 struct sctp_copy_all
*ca
;
4921 MALLOC(ca
, struct sctp_copy_all
*,
4922 sizeof(struct sctp_copy_all
), M_PCB
, MB_WAIT
);
4927 memset (ca
, 0, sizeof(struct sctp_copy_all
));
4931 /* take off the sendall flag, it would
4932 * be bad if we failed to do this :-0
4934 ca
->sndrcv
.sinfo_flags
&= ~MSG_SENDALL
;
4936 /* get length and mbuf chain */
4938 ca
->sndlen
= uio
->uio_resid
;
4939 ca
->m
= sctp_copy_out_all(uio
, ca
->sndlen
);
4940 if (ca
->m
== NULL
) {
4945 if ((m
->m_flags
& M_PKTHDR
) == 0) {
4950 ca
->sndlen
+= m
->m_len
;
4954 ca
->sndlen
= m
->m_pkthdr
.len
;
4959 ret
= sctp_initiate_iterator(sctp_sendall_iterator
, SCTP_PCB_ANY_FLAGS
, SCTP_ASOC_ANY_STATE
,
4960 (void *)ca
, 0, sctp_sendall_completes
, inp
);
4963 kprintf("Failed to initate iterator to takeover associations\n");
4974 sctp_toss_old_cookies(struct sctp_association
*asoc
)
4976 struct sctp_tmit_chunk
*chk
, *nchk
;
4977 chk
= TAILQ_FIRST(&asoc
->control_send_queue
);
4979 nchk
= TAILQ_NEXT(chk
, sctp_next
);
4980 if (chk
->rec
.chunk_id
== SCTP_COOKIE_ECHO
) {
4981 TAILQ_REMOVE(&asoc
->control_send_queue
, chk
, sctp_next
);
4983 sctp_m_freem(chk
->data
);
4986 asoc
->ctrl_queue_cnt
--;
4988 sctp_free_remote_addr(chk
->whoTo
);
4989 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, chk
);
4990 sctppcbinfo
.ipi_count_chunk
--;
4991 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
4992 panic("Chunk count is negative");
4994 sctppcbinfo
.ipi_gencnt_chunk
++;
5001 sctp_toss_old_asconf(struct sctp_tcb
*stcb
)
5003 struct sctp_association
*asoc
;
5004 struct sctp_tmit_chunk
*chk
, *chk_tmp
;
5007 for (chk
= TAILQ_FIRST(&asoc
->control_send_queue
); chk
!= NULL
;
5010 chk_tmp
= TAILQ_NEXT(chk
, sctp_next
);
5011 /* find SCTP_ASCONF chunk in queue (only one ever in queue) */
5012 if (chk
->rec
.chunk_id
== SCTP_ASCONF
) {
5013 TAILQ_REMOVE(&asoc
->control_send_queue
, chk
, sctp_next
);
5015 sctp_m_freem(chk
->data
);
5018 asoc
->ctrl_queue_cnt
--;
5020 sctp_free_remote_addr(chk
->whoTo
);
5021 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, chk
);
5022 sctppcbinfo
.ipi_count_chunk
--;
5023 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
5024 panic("Chunk count is negative");
5026 sctppcbinfo
.ipi_gencnt_chunk
++;
5033 sctp_clean_up_datalist(struct sctp_tcb
*stcb
,
5034 struct sctp_association
*asoc
,
5035 struct sctp_tmit_chunk
**data_list
,
5037 struct sctp_nets
*net
)
5040 for (i
= 0; i
< bundle_at
; i
++) {
5041 /* off of the send queue */
5043 /* Any chunk NOT 0 you zap the time
5044 * chunk 0 gets zapped or set based on
5045 * if a RTO measurment is needed.
5047 data_list
[i
]->do_rtt
= 0;
5050 data_list
[i
]->sent_rcv_time
= net
->last_sent_time
;
5051 TAILQ_REMOVE(&asoc
->send_queue
,
5054 /* on to the sent queue */
5055 TAILQ_INSERT_TAIL(&asoc
->sent_queue
,
5058 /* This does not lower until the cum-ack passes it */
5059 asoc
->sent_queue_cnt
++;
5060 asoc
->send_queue_cnt
--;
5061 if ((asoc
->peers_rwnd
<= 0) &&
5062 (asoc
->total_flight
== 0) &&
5064 /* Mark the chunk as being a window probe */
5066 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
5067 kprintf("WINDOW PROBE SET\n");
5070 sctp_pegs
[SCTP_WINDOW_PROBES
]++;
5071 data_list
[i
]->rec
.data
.state_flags
|= SCTP_WINDOW_PROBE
;
5073 data_list
[i
]->rec
.data
.state_flags
&= ~SCTP_WINDOW_PROBE
;
5075 #ifdef SCTP_AUDITING_ENABLED
5076 sctp_audit_log(0xC2, 3);
5078 data_list
[i
]->sent
= SCTP_DATAGRAM_SENT
;
5079 data_list
[i
]->snd_count
= 1;
5080 net
->flight_size
+= data_list
[i
]->book_size
;
5081 asoc
->total_flight
+= data_list
[i
]->book_size
;
5082 asoc
->total_flight_count
++;
5083 #ifdef SCTP_LOG_RWND
5084 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND
,
5085 asoc
->peers_rwnd
, data_list
[i
]->send_size
, sctp_peer_chunk_oh
);
5087 asoc
->peers_rwnd
= sctp_sbspace_sub(asoc
->peers_rwnd
,
5088 (u_int32_t
)(data_list
[i
]->send_size
+ sctp_peer_chunk_oh
));
5089 if (asoc
->peers_rwnd
< stcb
->sctp_ep
->sctp_ep
.sctp_sws_sender
) {
5090 /* SWS sender side engages */
5091 asoc
->peers_rwnd
= 0;
5097 sctp_clean_up_ctl(struct sctp_association
*asoc
)
5099 struct sctp_tmit_chunk
*chk
, *nchk
;
5100 for (chk
= TAILQ_FIRST(&asoc
->control_send_queue
);
5102 nchk
= TAILQ_NEXT(chk
, sctp_next
);
5103 if ((chk
->rec
.chunk_id
== SCTP_SELECTIVE_ACK
) ||
5104 (chk
->rec
.chunk_id
== SCTP_HEARTBEAT_REQUEST
) ||
5105 (chk
->rec
.chunk_id
== SCTP_HEARTBEAT_ACK
) ||
5106 (chk
->rec
.chunk_id
== SCTP_SHUTDOWN
) ||
5107 (chk
->rec
.chunk_id
== SCTP_SHUTDOWN_ACK
) ||
5108 (chk
->rec
.chunk_id
== SCTP_OPERATION_ERROR
) ||
5109 (chk
->rec
.chunk_id
== SCTP_PACKET_DROPPED
) ||
5110 (chk
->rec
.chunk_id
== SCTP_COOKIE_ACK
) ||
5111 (chk
->rec
.chunk_id
== SCTP_ECN_CWR
) ||
5112 (chk
->rec
.chunk_id
== SCTP_ASCONF_ACK
)) {
5113 /* Stray chunks must be cleaned up */
5115 TAILQ_REMOVE(&asoc
->control_send_queue
, chk
, sctp_next
);
5117 sctp_m_freem(chk
->data
);
5120 asoc
->ctrl_queue_cnt
--;
5121 sctp_free_remote_addr(chk
->whoTo
);
5122 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, chk
);
5123 sctppcbinfo
.ipi_count_chunk
--;
5124 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
5125 panic("Chunk count is negative");
5127 sctppcbinfo
.ipi_gencnt_chunk
++;
5128 } else if (chk
->rec
.chunk_id
== SCTP_STREAM_RESET
) {
5129 struct sctp_stream_reset_req
*strreq
;
5130 /* special handling, we must look into the param */
5131 strreq
= mtod(chk
->data
, struct sctp_stream_reset_req
*);
5132 if (strreq
->sr_req
.ph
.param_type
== ntohs(SCTP_STR_RESET_RESPONSE
)) {
5133 goto clean_up_anyway
;
5140 sctp_move_to_outqueue(struct sctp_tcb
*stcb
,
5141 struct sctp_stream_out
*strq
)
5143 /* Move from the stream to the send_queue keeping track of the total */
5144 struct sctp_association
*asoc
;
5148 struct sctp_tmit_chunk
*chk
, *nchk
;
5149 struct sctp_data_chunk
*dchkh
;
5150 struct sctpchunk_listhead tmp
;
5155 chk
= TAILQ_FIRST(&strq
->outqueue
);
5157 nchk
= TAILQ_NEXT(chk
, sctp_next
);
5158 /* now put in the chunk header */
5160 M_PREPEND(chk
->data
, sizeof(struct sctp_data_chunk
), MB_DONTWAIT
);
5161 if (chk
->data
== NULL
) {
5166 if (orig
!= chk
->data
) {
5167 /* A new mbuf was added, account for it */
5168 if ((stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_TCPTYPE
) ||
5169 (stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_IN_TCPPOOL
)) {
5170 stcb
->sctp_socket
->so_snd
.ssb_mbcnt
+= MSIZE
;
5172 #ifdef SCTP_MBCNT_LOGGING
5173 sctp_log_mbcnt(SCTP_LOG_MBCNT_INCREASE
,
5174 asoc
->total_output_queue_size
,
5176 asoc
->total_output_mbuf_queue_size
,
5179 stcb
->asoc
.total_output_mbuf_queue_size
+= MSIZE
;
5180 chk
->mbcnt
+= MSIZE
;
5182 chk
->send_size
+= sizeof(struct sctp_data_chunk
);
5183 /* This should NOT have to do anything, but
5184 * I would rather be cautious
5186 if (!failed
&& ((size_t)chk
->data
->m_len
< sizeof(struct sctp_data_chunk
))) {
5187 m_pullup(chk
->data
, sizeof(struct sctp_data_chunk
));
5188 if (chk
->data
== NULL
) {
5193 dchkh
= mtod(chk
->data
, struct sctp_data_chunk
*);
5194 dchkh
->ch
.chunk_length
= htons(chk
->send_size
);
5195 /* Chunks must be padded to even word boundary */
5196 padval
= chk
->send_size
% 4;
5198 /* For fragmented messages this should not
5199 * run except possibly on the last chunk
5201 if (sctp_pad_lastmbuf(chk
->data
, (4 - padval
))) {
5202 /* we are in big big trouble no mbufs :< */
5206 chk
->send_size
+= (4 - padval
);
5208 /* pull from stream queue */
5209 TAILQ_REMOVE(&strq
->outqueue
, chk
, sctp_next
);
5210 asoc
->stream_queue_cnt
--;
5211 TAILQ_INSERT_TAIL(&tmp
, chk
, sctp_next
);
5212 /* add it in to the size of moved chunks */
5213 if (chk
->rec
.data
.rcv_flags
& SCTP_DATA_LAST_FRAG
) {
5214 /* we pull only one message */
5220 /* Gak, we just lost the user message */
5221 chk
= TAILQ_FIRST(&tmp
);
5223 nchk
= TAILQ_NEXT(chk
, sctp_next
);
5224 TAILQ_REMOVE(&tmp
, chk
, sctp_next
);
5226 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL
, stcb
,
5227 (SCTP_NOTIFY_DATAGRAM_UNSENT
|SCTP_INTERNAL_ERROR
),
5231 sctp_m_freem(chk
->data
);
5235 sctp_free_remote_addr(chk
->whoTo
);
5238 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, chk
);
5239 sctppcbinfo
.ipi_count_chunk
--;
5240 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
5241 panic("Chunk count is negative");
5243 sctppcbinfo
.ipi_gencnt_chunk
++;
5248 /* now pull them off of temp wheel */
5249 chk
= TAILQ_FIRST(&tmp
);
5251 nchk
= TAILQ_NEXT(chk
, sctp_next
);
5252 /* insert on send_queue */
5253 TAILQ_REMOVE(&tmp
, chk
, sctp_next
);
5254 TAILQ_INSERT_TAIL(&asoc
->send_queue
, chk
, sctp_next
);
5255 asoc
->send_queue_cnt
++;
5257 chk
->rec
.data
.TSN_seq
= asoc
->sending_seq
++;
5259 dchkh
= mtod(chk
->data
, struct sctp_data_chunk
*);
5260 /* Put the rest of the things in place now. Size
5261 * was done earlier in previous loop prior to
5264 dchkh
->ch
.chunk_type
= SCTP_DATA
;
5265 dchkh
->ch
.chunk_flags
= chk
->rec
.data
.rcv_flags
;
5266 dchkh
->dp
.tsn
= htonl(chk
->rec
.data
.TSN_seq
);
5267 dchkh
->dp
.stream_id
= htons(strq
->stream_no
);
5268 dchkh
->dp
.stream_sequence
= htons(chk
->rec
.data
.stream_seq
);
5269 dchkh
->dp
.protocol_id
= chk
->rec
.data
.payloadtype
;
5270 /* total count moved */
5271 tot_moved
+= chk
->send_size
;
5278 sctp_fill_outqueue(struct sctp_tcb
*stcb
,
5279 struct sctp_nets
*net
)
5281 struct sctp_association
*asoc
;
5282 struct sctp_tmit_chunk
*chk
;
5283 struct sctp_stream_out
*strq
, *strqn
;
5284 int mtu_fromwheel
, goal_mtu
;
5285 unsigned int moved
, seenend
, cnt_mvd
=0;
5288 /* Attempt to move at least 1 MTU's worth
5289 * onto the wheel for each destination address
5291 goal_mtu
= net
->cwnd
- net
->flight_size
;
5292 if ((unsigned int)goal_mtu
< net
->mtu
) {
5293 goal_mtu
= net
->mtu
;
5295 if (sctp_pegs
[SCTP_MOVED_MTU
] < (unsigned int)goal_mtu
) {
5296 sctp_pegs
[SCTP_MOVED_MTU
] = goal_mtu
;
5298 seenend
= moved
= mtu_fromwheel
= 0;
5299 if (asoc
->last_out_stream
== NULL
) {
5300 strq
= asoc
->last_out_stream
= TAILQ_FIRST(&asoc
->out_wheel
);
5301 if (asoc
->last_out_stream
== NULL
) {
5302 /* huh nothing on the wheel, TSNH */
5307 strq
= TAILQ_NEXT(asoc
->last_out_stream
, next_spoke
);
5310 asoc
->last_out_stream
= TAILQ_FIRST(&asoc
->out_wheel
);
5312 while (mtu_fromwheel
< goal_mtu
) {
5316 strq
= TAILQ_FIRST(&asoc
->out_wheel
);
5317 } else if ((moved
== 0) && (seenend
)) {
5318 /* none left on the wheel */
5319 sctp_pegs
[SCTP_MOVED_NLEF
]++;
5323 * clear the flags and rotate back through
5328 strq
= TAILQ_FIRST(&asoc
->out_wheel
);
5334 strqn
= TAILQ_NEXT(strq
, next_spoke
);
5335 if ((chk
= TAILQ_FIRST(&strq
->outqueue
)) == NULL
) {
5336 /* none left on this queue, prune a spoke? */
5337 sctp_remove_from_wheel(asoc
, strq
);
5338 if (strq
== asoc
->last_out_stream
) {
5339 /* the last one we used went off the wheel */
5340 asoc
->last_out_stream
= NULL
;
5345 if (chk
->whoTo
!= net
) {
5346 /* Skip this stream, first one on stream
5347 * does not head to our current destination.
5352 mtu_fromwheel
+= sctp_move_to_outqueue(stcb
, strq
);
5355 asoc
->last_out_stream
= strq
;
5358 sctp_pegs
[SCTP_MOVED_MAX
]++;
5360 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
5361 kprintf("Ok we moved %d chunks to send queue\n",
5365 if (sctp_pegs
[SCTP_MOVED_QMAX
] < cnt_mvd
) {
5366 sctp_pegs
[SCTP_MOVED_QMAX
] = cnt_mvd
;
5371 sctp_fix_ecn_echo(struct sctp_association
*asoc
)
5373 struct sctp_tmit_chunk
*chk
;
5374 TAILQ_FOREACH(chk
, &asoc
->control_send_queue
, sctp_next
) {
5375 if (chk
->rec
.chunk_id
== SCTP_ECN_ECHO
) {
5376 chk
->sent
= SCTP_DATAGRAM_UNSENT
;
5382 sctp_move_to_an_alt(struct sctp_tcb
*stcb
,
5383 struct sctp_association
*asoc
,
5384 struct sctp_nets
*net
)
5386 struct sctp_tmit_chunk
*chk
;
5387 struct sctp_nets
*a_net
;
5388 a_net
= sctp_find_alternate_net(stcb
, net
);
5389 if ((a_net
!= net
) &&
5390 ((a_net
->dest_state
& SCTP_ADDR_REACHABLE
) == SCTP_ADDR_REACHABLE
)) {
5392 * We only proceed if a valid alternate is found that is
5393 * not this one and is reachable. Here we must move all
5394 * chunks queued in the send queue off of the destination
5395 * address to our alternate.
5397 TAILQ_FOREACH(chk
, &asoc
->send_queue
, sctp_next
) {
5398 if (chk
->whoTo
== net
) {
5399 /* Move the chunk to our alternate */
5400 sctp_free_remote_addr(chk
->whoTo
);
5408 static int sctp_from_user_send
=0;
5411 sctp_med_chunk_output(struct sctp_inpcb
*inp
,
5412 struct sctp_tcb
*stcb
,
5413 struct sctp_association
*asoc
,
5416 int control_only
, int *cwnd_full
, int from_where
,
5417 struct timeval
*now
, int *now_filled
)
5420 * Ok this is the generic chunk service queue.
5421 * we must do the following:
5422 * - Service the stream queue that is next, moving any message
5423 * (note I must get a complete message i.e. FIRST/MIDDLE and
5424 * LAST to the out queue in one pass) and assigning TSN's
5425 * - Check to see if the cwnd/rwnd allows any output, if so we
5426 * go ahead and fomulate and send the low level chunks. Making
5427 * sure to combine any control in the control chunk queue also.
5429 struct sctp_nets
*net
;
5430 struct mbuf
*outchain
;
5431 struct sctp_tmit_chunk
*chk
, *nchk
;
5432 struct sctphdr
*shdr
;
5433 /* temp arrays for unlinking */
5434 struct sctp_tmit_chunk
*data_list
[SCTP_MAX_DATA_BUNDLING
];
5435 int no_fragmentflg
, error
;
5436 int one_chunk
, hbflag
;
5437 int asconf
, cookie
, no_out_cnt
;
5438 int bundle_at
, ctl_cnt
, no_data_chunks
, cwnd_full_ind
;
5439 unsigned int mtu
, r_mtu
, omtu
;
5442 ctl_cnt
= no_out_cnt
= asconf
= cookie
= 0;
5444 * First lets prime the pump. For each destination, if there
5445 * is room in the flight size, attempt to pull an MTU's worth
5446 * out of the stream queues into the general send_queue
5448 #ifdef SCTP_AUDITING_ENABLED
5449 sctp_audit_log(0xC2, 2);
5452 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
5453 kprintf("***********************\n");
5462 /* Nothing to possible to send? */
5463 if (TAILQ_EMPTY(&asoc
->control_send_queue
) &&
5464 TAILQ_EMPTY(&asoc
->send_queue
) &&
5465 TAILQ_EMPTY(&asoc
->out_wheel
)) {
5467 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
5468 kprintf("All wheels empty\n");
5473 if (asoc
->peers_rwnd
<= 0) {
5474 /* No room in peers rwnd */
5477 if (asoc
->total_flight
> 0) {
5478 /* we are allowed one chunk in flight */
5480 sctp_pegs
[SCTP_RWND_BLOCKED
]++;
5484 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
5485 kprintf("Ok we have done the fillup no_data_chunk=%d tf=%d prw:%d\n",
5486 (int)no_data_chunks
,
5487 (int)asoc
->total_flight
, (int)asoc
->peers_rwnd
);
5490 TAILQ_FOREACH(net
, &asoc
->nets
, sctp_next
) {
5492 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
5493 kprintf("net:%p fs:%d cwnd:%d\n",
5494 net
, net
->flight_size
, net
->cwnd
);
5497 if (net
->flight_size
>= net
->cwnd
) {
5498 /* skip this network, no room */
5501 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
5502 kprintf("Ok skip fillup->fs:%d > cwnd:%d\n",
5507 sctp_pegs
[SCTP_CWND_NOFILL
]++;
5511 * spin through the stream queues moving one message and
5512 * assign TSN's as appropriate.
5514 sctp_fill_outqueue(stcb
, net
);
5516 *cwnd_full
= cwnd_full_ind
;
5517 /* now service each destination and send out what we can for it */
5519 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
5521 TAILQ_FOREACH(chk
, &asoc
->send_queue
, sctp_next
) {
5524 kprintf("We have %d chunks on the send_queue\n", chk_cnt
);
5526 TAILQ_FOREACH(chk
, &asoc
->sent_queue
, sctp_next
) {
5529 kprintf("We have %d chunks on the sent_queue\n", chk_cnt
);
5530 TAILQ_FOREACH(chk
, &asoc
->control_send_queue
, sctp_next
) {
5533 kprintf("We have %d chunks on the control_queue\n", chk_cnt
);
5536 /* If we have data to send, and DSACK is running, stop it
5537 * and build a SACK to dump on to bundle with output. This
5538 * actually MAY make it so the bundling does not occur if
5539 * the SACK is big but I think this is ok because basic SACK
5540 * space is pre-reserved in our fragmentation size choice.
5542 if ((TAILQ_FIRST(&asoc
->send_queue
) != NULL
) &&
5543 (no_data_chunks
== 0)) {
5544 /* We will be sending something */
5545 if (callout_pending(&stcb
->asoc
.dack_timer
.timer
)) {
5546 /* Yep a callout is pending */
5547 sctp_timer_stop(SCTP_TIMER_TYPE_RECV
,
5550 sctp_send_sack(stcb
);
5553 /* Nothing to send? */
5554 if ((TAILQ_FIRST(&asoc
->control_send_queue
) == NULL
) &&
5555 (TAILQ_FIRST(&asoc
->send_queue
) == NULL
)) {
5558 TAILQ_FOREACH(net
, &asoc
->nets
, sctp_next
) {
5559 /* how much can we send? */
5560 if (net
->ref_count
< 2) {
5561 /* Ref-count of 1 so we cannot have data or control
5562 * queued to this address. Skip it.
5566 ctl_cnt
= bundle_at
= 0;
5571 if ((net
->ro
.ro_rt
) && (net
->ro
.ro_rt
->rt_ifp
)) {
5572 /* if we have a route and an ifp
5573 * check to see if we have room to
5577 ifp
= net
->ro
.ro_rt
->rt_ifp
;
5578 if ((ifp
->if_snd
.ifq_len
+ 2) >= ifp
->if_snd
.ifq_maxlen
) {
5579 sctp_pegs
[SCTP_IFP_QUEUE_FULL
]++;
5580 #ifdef SCTP_LOG_MAXBURST
5581 sctp_log_maxburst(net
, ifp
->if_snd
.ifq_len
, ifp
->if_snd
.ifq_maxlen
, SCTP_MAX_IFP_APPLIED
);
5586 if (((struct sockaddr
*)&net
->ro
._l_addr
)->sa_family
== AF_INET
) {
5587 mtu
= net
->mtu
- (sizeof(struct ip
) + sizeof(struct sctphdr
));
5589 mtu
= net
->mtu
- (sizeof(struct ip6_hdr
) + sizeof(struct sctphdr
));
5591 if (mtu
> asoc
->peers_rwnd
) {
5592 if (asoc
->total_flight
> 0) {
5593 /* We have a packet in flight somewhere */
5594 r_mtu
= asoc
->peers_rwnd
;
5596 /* We are always allowed to send one MTU out */
5604 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
5605 kprintf("Ok r_mtu is %d mtu is %d for this net:%p one_chunk:%d\n",
5606 r_mtu
, mtu
, net
, one_chunk
);
5609 /************************/
5610 /* Control transmission */
5611 /************************/
5612 /* Now first lets go through the control queue */
5613 for (chk
= TAILQ_FIRST(&asoc
->control_send_queue
);
5615 nchk
= TAILQ_NEXT(chk
, sctp_next
);
5616 if (chk
->whoTo
!= net
) {
5618 * No, not sent to the network we are
5623 if (chk
->data
== NULL
) {
5626 if ((chk
->data
->m_flags
& M_PKTHDR
) == 0) {
5628 * NOTE: the chk queue MUST have the PKTHDR
5629 * flag set on it with a total in the
5630 * m_pkthdr.len field!! else the chunk will
5635 if (chk
->sent
!= SCTP_DATAGRAM_UNSENT
) {
5637 * It must be unsent. Cookies and ASCONF's
5638 * hang around but there timers will force
5639 * when marked for resend.
5643 /* Here we do NOT factor the r_mtu */
5644 if ((chk
->data
->m_pkthdr
.len
< (int)mtu
) ||
5645 (chk
->flags
& CHUNK_FLAGS_FRAGMENT_OK
)) {
5647 * We probably should glom the mbuf chain from
5648 * the chk->data for control but the problem
5649 * is it becomes yet one more level of
5650 * tracking to do if for some reason output
5651 * fails. Then I have got to reconstruct the
5652 * merged control chain.. el yucko.. for now
5653 * we take the easy way and do the copy
5655 outchain
= sctp_copy_mbufchain(chk
->data
,
5657 if (outchain
== NULL
) {
5660 /* update our MTU size */
5661 mtu
-= chk
->data
->m_pkthdr
.len
;
5665 /* Do clear IP_DF ? */
5666 if (chk
->flags
& CHUNK_FLAGS_FRAGMENT_OK
) {
5669 /* Mark things to be removed, if needed */
5670 if ((chk
->rec
.chunk_id
== SCTP_SELECTIVE_ACK
) ||
5671 (chk
->rec
.chunk_id
== SCTP_HEARTBEAT_REQUEST
) ||
5672 (chk
->rec
.chunk_id
== SCTP_HEARTBEAT_ACK
) ||
5673 (chk
->rec
.chunk_id
== SCTP_SHUTDOWN
) ||
5674 (chk
->rec
.chunk_id
== SCTP_SHUTDOWN_ACK
) ||
5675 (chk
->rec
.chunk_id
== SCTP_OPERATION_ERROR
) ||
5676 (chk
->rec
.chunk_id
== SCTP_COOKIE_ACK
) ||
5677 (chk
->rec
.chunk_id
== SCTP_ECN_CWR
) ||
5678 (chk
->rec
.chunk_id
== SCTP_PACKET_DROPPED
) ||
5679 (chk
->rec
.chunk_id
== SCTP_ASCONF_ACK
)) {
5681 if (chk
->rec
.chunk_id
== SCTP_HEARTBEAT_REQUEST
)
5683 /* remove these chunks at the end */
5684 if (chk
->rec
.chunk_id
== SCTP_SELECTIVE_ACK
) {
5685 /* turn off the timer */
5686 if (callout_pending(&stcb
->asoc
.dack_timer
.timer
)) {
5687 sctp_timer_stop(SCTP_TIMER_TYPE_RECV
,
5694 * Other chunks, since they have
5695 * timers running (i.e. COOKIE or
5696 * ASCONF) we just "trust" that it
5697 * gets sent or retransmitted.
5700 if (chk
->rec
.chunk_id
== SCTP_COOKIE_ECHO
) {
5703 } else if (chk
->rec
.chunk_id
== SCTP_ASCONF
) {
5705 * set hb flag since we can use
5711 chk
->sent
= SCTP_DATAGRAM_SENT
;
5716 * Ok we are out of room but we can
5717 * output without effecting the flight
5718 * size since this little guy is a
5719 * control only packet.
5722 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF
, inp
, stcb
, net
);
5726 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE
, inp
, stcb
, net
);
5729 if (outchain
->m_len
== 0) {
5731 * Special case for when you
5732 * get a 0 len mbuf at the
5733 * head due to the lack of a
5734 * MHDR at the beginning.
5736 outchain
->m_len
= sizeof(struct sctphdr
);
5738 M_PREPEND(outchain
, sizeof(struct sctphdr
), MB_DONTWAIT
);
5739 if (outchain
== NULL
) {
5742 goto error_out_again
;
5745 shdr
= mtod(outchain
, struct sctphdr
*);
5746 shdr
->src_port
= inp
->sctp_lport
;
5747 shdr
->dest_port
= stcb
->rport
;
5748 shdr
->v_tag
= htonl(stcb
->asoc
.peer_vtag
);
5751 if ((error
= sctp_lowlevel_chunk_output(inp
, stcb
, net
,
5752 (struct sockaddr
*)&net
->ro
._l_addr
,
5754 no_fragmentflg
, 0, NULL
, asconf
))) {
5755 if (error
== ENOBUFS
) {
5756 asoc
->ifp_had_enobuf
= 1;
5758 sctp_pegs
[SCTP_DATA_OUT_ERR
]++;
5759 if (from_where
== 0) {
5760 sctp_pegs
[SCTP_ERROUT_FRM_USR
]++;
5764 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT2
) {
5765 kprintf("Gak got ctrl error %d\n", error
);
5768 /* error, could not output */
5771 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
5772 kprintf("Update HB anyway\n");
5775 if (*now_filled
== 0) {
5776 SCTP_GETTIME_TIMEVAL(&net
->last_sent_time
);
5778 *now
= net
->last_sent_time
;
5780 net
->last_sent_time
= *now
;
5784 if (error
== EHOSTUNREACH
) {
5787 * unreachable during
5791 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
5792 kprintf("Moving data to an alterante\n");
5795 sctp_move_to_an_alt(stcb
, asoc
, net
);
5797 sctp_clean_up_ctl (asoc
);
5800 asoc
->ifp_had_enobuf
= 0;
5801 /* Only HB or ASCONF advances time */
5803 if (*now_filled
== 0) {
5804 SCTP_GETTIME_TIMEVAL(&net
->last_sent_time
);
5806 *now
= net
->last_sent_time
;
5808 net
->last_sent_time
= *now
;
5813 * increase the number we sent, if a
5814 * cookie is sent we don't tell them
5818 *num_out
+= ctl_cnt
;
5819 /* recalc a clean slate and setup */
5820 if (stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_BOUND_V6
) {
5821 mtu
= (net
->mtu
- SCTP_MIN_OVERHEAD
);
5823 mtu
= (net
->mtu
- SCTP_MIN_V4_OVERHEAD
);
5829 /*********************/
5830 /* Data transmission */
5831 /*********************/
5832 /* now lets add any data within the MTU constraints */
5833 if (((struct sockaddr
*)&net
->ro
._l_addr
)->sa_family
== AF_INET
) {
5834 omtu
= net
->mtu
- (sizeof(struct ip
) + sizeof(struct sctphdr
));
5836 omtu
= net
->mtu
- (sizeof(struct ip6_hdr
) + sizeof(struct sctphdr
));
5840 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
5841 kprintf("Now to data transmission\n");
5845 if (((asoc
->state
& SCTP_STATE_OPEN
) == SCTP_STATE_OPEN
) ||
5847 for (chk
= TAILQ_FIRST(&asoc
->send_queue
); chk
; chk
= nchk
) {
5848 if (no_data_chunks
) {
5849 /* let only control go out */
5851 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
5852 kprintf("Either nothing to send or we are full\n");
5857 if (net
->flight_size
>= net
->cwnd
) {
5858 /* skip this net, no room for data */
5860 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
5861 kprintf("fs:%d > cwnd:%d\n",
5862 net
->flight_size
, net
->cwnd
);
5865 sctp_pegs
[SCTP_CWND_BLOCKED
]++;
5869 nchk
= TAILQ_NEXT(chk
, sctp_next
);
5870 if (chk
->whoTo
!= net
) {
5871 /* No, not sent to this net */
5873 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
5874 kprintf("chk->whoTo:%p not %p\n",
5882 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
5883 kprintf("Can we pick up a chunk?\n");
5886 if ((chk
->send_size
> omtu
) && ((chk
->flags
& CHUNK_FLAGS_FRAGMENT_OK
) == 0)) {
5887 /* strange, we have a chunk that is to bit
5888 * for its destination and yet no fragment ok flag.
5889 * Something went wrong when the PMTU changed...we did
5890 * not mark this chunk for some reason?? I will
5891 * fix it here by letting IP fragment it for now and
5892 * printing a warning. This really should not happen ...
5894 /*#ifdef SCTP_DEBUG*/
5895 kprintf("Warning chunk of %d bytes > mtu:%d and yet PMTU disc missed\n",
5896 chk
->send_size
, mtu
);
5898 chk
->flags
|= CHUNK_FLAGS_FRAGMENT_OK
;
5901 if (((chk
->send_size
<= mtu
) && (chk
->send_size
<= r_mtu
)) ||
5902 ((chk
->flags
& CHUNK_FLAGS_FRAGMENT_OK
) && (chk
->send_size
<= asoc
->peers_rwnd
))) {
5903 /* ok we will add this one */
5905 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
5906 kprintf("Picking up the chunk\n");
5909 outchain
= sctp_copy_mbufchain(chk
->data
, outchain
);
5910 if (outchain
== NULL
) {
5912 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
5913 kprintf("Gakk no memory\n");
5916 if (!callout_pending(&net
->rxt_timer
.timer
)) {
5917 sctp_timer_start(SCTP_TIMER_TYPE_SEND
, inp
, stcb
, net
);
5921 /* upate our MTU size */
5922 /* Do clear IP_DF ? */
5923 if (chk
->flags
& CHUNK_FLAGS_FRAGMENT_OK
) {
5926 mtu
-= chk
->send_size
;
5927 r_mtu
-= chk
->send_size
;
5928 data_list
[bundle_at
++] = chk
;
5929 if (bundle_at
>= SCTP_MAX_DATA_BUNDLING
) {
5937 if ((r_mtu
<= 0) || one_chunk
) {
5943 * Must be sent in order of the TSN's
5947 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
5948 kprintf("ok no more chk:%d > mtu:%d || < r_mtu:%d\n",
5949 chk
->send_size
, mtu
, r_mtu
);
5956 } /* if asoc.state OPEN */
5957 /* Is there something to send for this destination? */
5959 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
5960 kprintf("ok now is chain assembled? %p\n",
5966 /* We may need to start a control timer or two */
5968 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF
, inp
, stcb
, net
);
5972 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE
, inp
, stcb
, net
);
5975 /* must start a send timer if data is being sent */
5976 if (bundle_at
&& (!callout_pending(&net
->rxt_timer
.timer
))) {
5977 /* no timer running on this destination
5981 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
5982 kprintf("ok lets start a send timer .. we will transmit %p\n",
5986 sctp_timer_start(SCTP_TIMER_TYPE_SEND
, inp
, stcb
, net
);
5988 /* Now send it, if there is anything to send :> */
5989 if ((outchain
->m_flags
& M_PKTHDR
) == 0) {
5992 MGETHDR(t
, MB_DONTWAIT
, MT_HEADER
);
5994 sctp_m_freem(outchain
);
5997 t
->m_next
= outchain
;
5998 t
->m_pkthdr
.len
= 0;
5999 t
->m_pkthdr
.rcvif
= 0;
6004 outchain
->m_pkthdr
.len
+= t
->m_len
;
6008 if (outchain
->m_len
== 0) {
6009 /* Special case for when you get a 0 len
6010 * mbuf at the head due to the lack
6011 * of a MHDR at the beginning.
6013 MH_ALIGN(outchain
, sizeof(struct sctphdr
));
6014 outchain
->m_len
= sizeof(struct sctphdr
);
6016 M_PREPEND(outchain
, sizeof(struct sctphdr
), MB_DONTWAIT
);
6017 if (outchain
== NULL
) {
6023 shdr
= mtod(outchain
, struct sctphdr
*);
6024 shdr
->src_port
= inp
->sctp_lport
;
6025 shdr
->dest_port
= stcb
->rport
;
6026 shdr
->v_tag
= htonl(stcb
->asoc
.peer_vtag
);
6028 if ((error
= sctp_lowlevel_chunk_output(inp
, stcb
, net
,
6029 (struct sockaddr
*)&net
->ro
._l_addr
,
6031 no_fragmentflg
, bundle_at
, data_list
[0], asconf
))) {
6032 /* error, we could not output */
6033 if (error
== ENOBUFS
) {
6034 asoc
->ifp_had_enobuf
= 1;
6036 sctp_pegs
[SCTP_DATA_OUT_ERR
]++;
6037 if (from_where
== 0) {
6038 sctp_pegs
[SCTP_ERROUT_FRM_USR
]++;
6043 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
6044 kprintf("Gak send error %d\n", error
);
6049 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
6050 kprintf("Update HB time anyway\n");
6053 if (*now_filled
== 0) {
6054 SCTP_GETTIME_TIMEVAL(&net
->last_sent_time
);
6056 *now
= net
->last_sent_time
;
6058 net
->last_sent_time
= *now
;
6062 if (error
== EHOSTUNREACH
) {
6064 * Destination went unreachable during
6068 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
6069 kprintf("Calling the movement routine\n");
6072 sctp_move_to_an_alt(stcb
, asoc
, net
);
6074 sctp_clean_up_ctl (asoc
);
6077 asoc
->ifp_had_enobuf
= 0;
6079 if (bundle_at
|| hbflag
) {
6080 /* For data/asconf and hb set time */
6081 if (*now_filled
== 0) {
6082 SCTP_GETTIME_TIMEVAL(&net
->last_sent_time
);
6084 *now
= net
->last_sent_time
;
6086 net
->last_sent_time
= *now
;
6091 *num_out
+= (ctl_cnt
+ bundle_at
);
6094 if (!net
->rto_pending
) {
6095 /* setup for a RTO measurement */
6096 net
->rto_pending
= 1;
6097 data_list
[0]->do_rtt
= 1;
6099 data_list
[0]->do_rtt
= 0;
6101 sctp_pegs
[SCTP_PEG_TSNS_SENT
] += bundle_at
;
6102 sctp_clean_up_datalist(stcb
, asoc
, data_list
, bundle_at
, net
);
6109 /* At the end there should be no NON timed
6110 * chunks hanging on this queue.
6112 if ((*num_out
== 0) && (*reason_code
== 0)) {
6115 sctp_clean_up_ctl (asoc
);
6120 sctp_queue_op_err(struct sctp_tcb
*stcb
, struct mbuf
*op_err
)
6122 /* Prepend a OPERATIONAL_ERROR chunk header
6123 * and put on the end of the control chunk queue.
6125 /* Sender had better have gotten a MGETHDR or else
6126 * the control chunk will be forever skipped
6128 struct sctp_chunkhdr
*hdr
;
6129 struct sctp_tmit_chunk
*chk
;
6132 chk
= (struct sctp_tmit_chunk
*)SCTP_ZONE_GET(sctppcbinfo
.ipi_zone_chunk
);
6135 sctp_m_freem(op_err
);
6138 sctppcbinfo
.ipi_count_chunk
++;
6139 sctppcbinfo
.ipi_gencnt_chunk
++;
6140 M_PREPEND(op_err
, sizeof(struct sctp_chunkhdr
), MB_DONTWAIT
);
6141 if (op_err
== NULL
) {
6142 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, chk
);
6143 sctppcbinfo
.ipi_count_chunk
--;
6144 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
6145 panic("Chunk count is negative");
6147 sctppcbinfo
.ipi_gencnt_chunk
++;
6152 while (mat
!= NULL
) {
6153 chk
->send_size
+= mat
->m_len
;
6156 chk
->rec
.chunk_id
= SCTP_OPERATION_ERROR
;
6157 chk
->sent
= SCTP_DATAGRAM_UNSENT
;
6160 chk
->asoc
= &stcb
->asoc
;
6162 chk
->whoTo
= chk
->asoc
->primary_destination
;
6163 chk
->whoTo
->ref_count
++;
6164 hdr
= mtod(op_err
, struct sctp_chunkhdr
*);
6165 hdr
->chunk_type
= SCTP_OPERATION_ERROR
;
6166 hdr
->chunk_flags
= 0;
6167 hdr
->chunk_length
= htons(chk
->send_size
);
6168 TAILQ_INSERT_TAIL(&chk
->asoc
->control_send_queue
,
6171 chk
->asoc
->ctrl_queue_cnt
++;
6175 sctp_send_cookie_echo(struct mbuf
*m
,
6177 struct sctp_tcb
*stcb
,
6178 struct sctp_nets
*net
)
6181 * pull out the cookie and put it at the front of the control
6185 struct mbuf
*cookie
, *mat
;
6186 struct sctp_paramhdr parm
, *phdr
;
6187 struct sctp_chunkhdr
*hdr
;
6188 struct sctp_tmit_chunk
*chk
;
6189 uint16_t ptype
, plen
;
6190 /* First find the cookie in the param area */
6192 at
= offset
+ sizeof(struct sctp_init_chunk
);
6195 phdr
= sctp_get_next_param(m
, at
, &parm
, sizeof(parm
));
6199 ptype
= ntohs(phdr
->param_type
);
6200 plen
= ntohs(phdr
->param_length
);
6201 if (ptype
== SCTP_STATE_COOKIE
) {
6203 /* found the cookie */
6204 if ((pad
= (plen
% 4))) {
6207 cookie
= sctp_m_copym(m
, at
, plen
, MB_DONTWAIT
);
6208 if (cookie
== NULL
) {
6214 at
+= SCTP_SIZE32(plen
);
6216 if (cookie
== NULL
) {
6217 /* Did not find the cookie */
6220 /* ok, we got the cookie lets change it into a cookie echo chunk */
6222 /* first the change from param to cookie */
6223 hdr
= mtod(cookie
, struct sctp_chunkhdr
*);
6224 hdr
->chunk_type
= SCTP_COOKIE_ECHO
;
6225 hdr
->chunk_flags
= 0;
6226 /* now we MUST have a PKTHDR on it */
6227 if ((cookie
->m_flags
& M_PKTHDR
) != M_PKTHDR
) {
6228 /* we hope this happens rarely */
6229 MGETHDR(mat
, MB_DONTWAIT
, MT_HEADER
);
6231 sctp_m_freem(cookie
);
6235 mat
->m_pkthdr
.rcvif
= 0;
6236 mat
->m_next
= cookie
;
6239 cookie
->m_pkthdr
.len
= plen
;
6240 /* get the chunk stuff now and place it in the FRONT of the queue */
6241 chk
= (struct sctp_tmit_chunk
*)SCTP_ZONE_GET(sctppcbinfo
.ipi_zone_chunk
);
6244 sctp_m_freem(cookie
);
6247 sctppcbinfo
.ipi_count_chunk
++;
6248 sctppcbinfo
.ipi_gencnt_chunk
++;
6249 chk
->send_size
= cookie
->m_pkthdr
.len
;
6250 chk
->rec
.chunk_id
= SCTP_COOKIE_ECHO
;
6251 chk
->sent
= SCTP_DATAGRAM_UNSENT
;
6254 chk
->asoc
= &stcb
->asoc
;
6256 chk
->whoTo
= chk
->asoc
->primary_destination
;
6257 chk
->whoTo
->ref_count
++;
6258 TAILQ_INSERT_HEAD(&chk
->asoc
->control_send_queue
, chk
, sctp_next
);
6259 chk
->asoc
->ctrl_queue_cnt
++;
6264 sctp_send_heartbeat_ack(struct sctp_tcb
*stcb
,
6268 struct sctp_nets
*net
)
6270 /* take a HB request and make it into a
6271 * HB ack and send it.
6273 struct mbuf
*outchain
;
6274 struct sctp_chunkhdr
*chdr
;
6275 struct sctp_tmit_chunk
*chk
;
6279 /* must have a net pointer */
6282 outchain
= sctp_m_copym(m
, offset
, chk_length
, MB_DONTWAIT
);
6283 if (outchain
== NULL
) {
6284 /* gak out of memory */
6287 chdr
= mtod(outchain
, struct sctp_chunkhdr
*);
6288 chdr
->chunk_type
= SCTP_HEARTBEAT_ACK
;
6289 chdr
->chunk_flags
= 0;
6290 if ((outchain
->m_flags
& M_PKTHDR
) != M_PKTHDR
) {
6291 /* should not happen but we are cautious. */
6293 MGETHDR(tmp
, MB_DONTWAIT
, MT_HEADER
);
6298 tmp
->m_pkthdr
.rcvif
= 0;
6299 tmp
->m_next
= outchain
;
6302 outchain
->m_pkthdr
.len
= chk_length
;
6303 if (chk_length
% 4) {
6307 padlen
= 4 - (outchain
->m_pkthdr
.len
% 4);
6308 m_copyback(outchain
, outchain
->m_pkthdr
.len
, padlen
, (caddr_t
)&cpthis
);
6310 chk
= (struct sctp_tmit_chunk
*)SCTP_ZONE_GET(sctppcbinfo
.ipi_zone_chunk
);
6313 sctp_m_freem(outchain
);
6316 sctppcbinfo
.ipi_count_chunk
++;
6317 sctppcbinfo
.ipi_gencnt_chunk
++;
6319 chk
->send_size
= chk_length
;
6320 chk
->rec
.chunk_id
= SCTP_HEARTBEAT_ACK
;
6321 chk
->sent
= SCTP_DATAGRAM_UNSENT
;
6324 chk
->asoc
= &stcb
->asoc
;
6325 chk
->data
= outchain
;
6327 chk
->whoTo
->ref_count
++;
6328 TAILQ_INSERT_TAIL(&chk
->asoc
->control_send_queue
, chk
, sctp_next
);
6329 chk
->asoc
->ctrl_queue_cnt
++;
6333 sctp_send_cookie_ack(struct sctp_tcb
*stcb
) {
6334 /* formulate and queue a cookie-ack back to sender */
6335 struct mbuf
*cookie_ack
;
6336 struct sctp_chunkhdr
*hdr
;
6337 struct sctp_tmit_chunk
*chk
;
6340 MGETHDR(cookie_ack
, MB_DONTWAIT
, MT_HEADER
);
6341 if (cookie_ack
== NULL
) {
6345 cookie_ack
->m_data
+= SCTP_MIN_OVERHEAD
;
6346 chk
= (struct sctp_tmit_chunk
*)SCTP_ZONE_GET(sctppcbinfo
.ipi_zone_chunk
);
6349 sctp_m_freem(cookie_ack
);
6352 sctppcbinfo
.ipi_count_chunk
++;
6353 sctppcbinfo
.ipi_gencnt_chunk
++;
6355 chk
->send_size
= sizeof(struct sctp_chunkhdr
);
6356 chk
->rec
.chunk_id
= SCTP_COOKIE_ACK
;
6357 chk
->sent
= SCTP_DATAGRAM_UNSENT
;
6360 chk
->asoc
= &stcb
->asoc
;
6361 chk
->data
= cookie_ack
;
6362 if (chk
->asoc
->last_control_chunk_from
!= NULL
) {
6363 chk
->whoTo
= chk
->asoc
->last_control_chunk_from
;
6365 chk
->whoTo
= chk
->asoc
->primary_destination
;
6367 chk
->whoTo
->ref_count
++;
6368 hdr
= mtod(cookie_ack
, struct sctp_chunkhdr
*);
6369 hdr
->chunk_type
= SCTP_COOKIE_ACK
;
6370 hdr
->chunk_flags
= 0;
6371 hdr
->chunk_length
= htons(chk
->send_size
);
6372 cookie_ack
->m_pkthdr
.len
= cookie_ack
->m_len
= chk
->send_size
;
6373 cookie_ack
->m_pkthdr
.rcvif
= 0;
6374 TAILQ_INSERT_TAIL(&chk
->asoc
->control_send_queue
, chk
, sctp_next
);
6375 chk
->asoc
->ctrl_queue_cnt
++;
6381 sctp_send_shutdown_ack(struct sctp_tcb
*stcb
, struct sctp_nets
*net
)
6383 /* formulate and queue a SHUTDOWN-ACK back to the sender */
6384 struct mbuf
*m_shutdown_ack
;
6385 struct sctp_shutdown_ack_chunk
*ack_cp
;
6386 struct sctp_tmit_chunk
*chk
;
6388 m_shutdown_ack
= NULL
;
6389 MGETHDR(m_shutdown_ack
, MB_DONTWAIT
, MT_HEADER
);
6390 if (m_shutdown_ack
== NULL
) {
6394 m_shutdown_ack
->m_data
+= SCTP_MIN_OVERHEAD
;
6395 chk
= (struct sctp_tmit_chunk
*)SCTP_ZONE_GET(sctppcbinfo
.ipi_zone_chunk
);
6398 sctp_m_freem(m_shutdown_ack
);
6401 sctppcbinfo
.ipi_count_chunk
++;
6402 sctppcbinfo
.ipi_gencnt_chunk
++;
6404 chk
->send_size
= sizeof(struct sctp_chunkhdr
);
6405 chk
->rec
.chunk_id
= SCTP_SHUTDOWN_ACK
;
6406 chk
->sent
= SCTP_DATAGRAM_UNSENT
;
6409 chk
->asoc
= &stcb
->asoc
;
6410 chk
->data
= m_shutdown_ack
;
6414 ack_cp
= mtod(m_shutdown_ack
, struct sctp_shutdown_ack_chunk
*);
6415 ack_cp
->ch
.chunk_type
= SCTP_SHUTDOWN_ACK
;
6416 ack_cp
->ch
.chunk_flags
= 0;
6417 ack_cp
->ch
.chunk_length
= htons(chk
->send_size
);
6418 m_shutdown_ack
->m_pkthdr
.len
= m_shutdown_ack
->m_len
= chk
->send_size
;
6419 m_shutdown_ack
->m_pkthdr
.rcvif
= 0;
6420 TAILQ_INSERT_TAIL(&chk
->asoc
->control_send_queue
, chk
, sctp_next
);
6421 chk
->asoc
->ctrl_queue_cnt
++;
6426 sctp_send_shutdown(struct sctp_tcb
*stcb
, struct sctp_nets
*net
)
6428 /* formulate and queue a SHUTDOWN to the sender */
6429 struct mbuf
*m_shutdown
;
6430 struct sctp_shutdown_chunk
*shutdown_cp
;
6431 struct sctp_tmit_chunk
*chk
;
6434 MGETHDR(m_shutdown
, MB_DONTWAIT
, MT_HEADER
);
6435 if (m_shutdown
== NULL
) {
6439 m_shutdown
->m_data
+= SCTP_MIN_OVERHEAD
;
6440 chk
= (struct sctp_tmit_chunk
*)SCTP_ZONE_GET(sctppcbinfo
.ipi_zone_chunk
);
6443 sctp_m_freem(m_shutdown
);
6446 sctppcbinfo
.ipi_count_chunk
++;
6447 sctppcbinfo
.ipi_gencnt_chunk
++;
6449 chk
->send_size
= sizeof(struct sctp_shutdown_chunk
);
6450 chk
->rec
.chunk_id
= SCTP_SHUTDOWN
;
6451 chk
->sent
= SCTP_DATAGRAM_UNSENT
;
6454 chk
->asoc
= &stcb
->asoc
;
6455 chk
->data
= m_shutdown
;
6459 shutdown_cp
= mtod(m_shutdown
, struct sctp_shutdown_chunk
*);
6460 shutdown_cp
->ch
.chunk_type
= SCTP_SHUTDOWN
;
6461 shutdown_cp
->ch
.chunk_flags
= 0;
6462 shutdown_cp
->ch
.chunk_length
= htons(chk
->send_size
);
6463 shutdown_cp
->cumulative_tsn_ack
= htonl(stcb
->asoc
.cumulative_tsn
);
6464 m_shutdown
->m_pkthdr
.len
= m_shutdown
->m_len
= chk
->send_size
;
6465 m_shutdown
->m_pkthdr
.rcvif
= 0;
6466 TAILQ_INSERT_TAIL(&chk
->asoc
->control_send_queue
, chk
, sctp_next
);
6467 chk
->asoc
->ctrl_queue_cnt
++;
6469 if ((stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_TCPTYPE
) ||
6470 (stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_IN_TCPPOOL
)) {
6471 stcb
->sctp_ep
->sctp_socket
->so_snd
.ssb_cc
= 0;
6472 soisdisconnecting(stcb
->sctp_ep
->sctp_socket
);
6478 sctp_send_asconf(struct sctp_tcb
*stcb
, struct sctp_nets
*net
)
6481 * formulate and queue an ASCONF to the peer
6482 * ASCONF parameters should be queued on the assoc queue
6484 struct sctp_tmit_chunk
*chk
;
6485 struct mbuf
*m_asconf
;
6486 struct sctp_asconf_chunk
*acp
;
6489 /* compose an ASCONF chunk, maximum length is PMTU */
6490 m_asconf
= sctp_compose_asconf(stcb
);
6491 if (m_asconf
== NULL
) {
6494 acp
= mtod(m_asconf
, struct sctp_asconf_chunk
*);
6495 chk
= (struct sctp_tmit_chunk
*)SCTP_ZONE_GET(sctppcbinfo
.ipi_zone_chunk
);
6498 sctp_m_freem(m_asconf
);
6501 sctppcbinfo
.ipi_count_chunk
++;
6502 sctppcbinfo
.ipi_gencnt_chunk
++;
6504 chk
->data
= m_asconf
;
6505 chk
->send_size
= m_asconf
->m_pkthdr
.len
;
6506 chk
->rec
.chunk_id
= SCTP_ASCONF
;
6507 chk
->sent
= SCTP_DATAGRAM_UNSENT
;
6510 chk
->asoc
= &stcb
->asoc
;
6511 chk
->whoTo
= chk
->asoc
->primary_destination
;
6512 chk
->whoTo
->ref_count
++;
6513 TAILQ_INSERT_TAIL(&chk
->asoc
->control_send_queue
, chk
, sctp_next
);
6514 chk
->asoc
->ctrl_queue_cnt
++;
6519 sctp_send_asconf_ack(struct sctp_tcb
*stcb
, uint32_t retrans
)
6522 * formulate and queue a asconf-ack back to sender
6523 * the asconf-ack must be stored in the tcb
6525 struct sctp_tmit_chunk
*chk
;
6528 /* is there a asconf-ack mbuf chain to send? */
6529 if (stcb
->asoc
.last_asconf_ack_sent
== NULL
) {
6533 /* copy the asconf_ack */
6534 #if defined(__FreeBSD__) || defined(__NetBSD__)
6535 /* Supposedly the m_copypacket is a optimzation,
6538 if (stcb
->asoc
.last_asconf_ack_sent
->m_flags
& M_PKTHDR
) {
6539 m_ack
= m_copypacket(stcb
->asoc
.last_asconf_ack_sent
, MB_DONTWAIT
);
6540 sctp_pegs
[SCTP_CACHED_SRC
]++;
6542 m_ack
= m_copy(stcb
->asoc
.last_asconf_ack_sent
, 0, M_COPYALL
);
6544 m_ack
= m_copy(stcb
->asoc
.last_asconf_ack_sent
, 0, M_COPYALL
);
6546 if (m_ack
== NULL
) {
6547 /* couldn't copy it */
6551 chk
= (struct sctp_tmit_chunk
*)SCTP_ZONE_GET(sctppcbinfo
.ipi_zone_chunk
);
6555 sctp_m_freem(m_ack
);
6558 sctppcbinfo
.ipi_count_chunk
++;
6559 sctppcbinfo
.ipi_gencnt_chunk
++;
6561 /* figure out where it goes to */
6563 /* we're doing a retransmission */
6564 if (stcb
->asoc
.used_alt_asconfack
> 2) {
6565 /* tried alternate nets already, go back */
6568 /* need to try and alternate net */
6569 chk
->whoTo
= sctp_find_alternate_net(stcb
, stcb
->asoc
.last_control_chunk_from
);
6570 stcb
->asoc
.used_alt_asconfack
++;
6572 if (chk
->whoTo
== NULL
) {
6574 if (stcb
->asoc
.last_control_chunk_from
== NULL
)
6575 chk
->whoTo
= stcb
->asoc
.primary_destination
;
6577 chk
->whoTo
= stcb
->asoc
.last_control_chunk_from
;
6578 stcb
->asoc
.used_alt_asconfack
= 0;
6582 if (stcb
->asoc
.last_control_chunk_from
== NULL
)
6583 chk
->whoTo
= stcb
->asoc
.primary_destination
;
6585 chk
->whoTo
= stcb
->asoc
.last_control_chunk_from
;
6586 stcb
->asoc
.used_alt_asconfack
= 0;
6589 chk
->send_size
= m_ack
->m_pkthdr
.len
;
6590 chk
->rec
.chunk_id
= SCTP_ASCONF_ACK
;
6591 chk
->sent
= SCTP_DATAGRAM_UNSENT
;
6594 chk
->asoc
= &stcb
->asoc
;
6595 chk
->whoTo
->ref_count
++;
6596 TAILQ_INSERT_TAIL(&chk
->asoc
->control_send_queue
, chk
, sctp_next
);
6597 chk
->asoc
->ctrl_queue_cnt
++;
6603 sctp_chunk_retransmission(struct sctp_inpcb
*inp
,
6604 struct sctp_tcb
*stcb
,
6605 struct sctp_association
*asoc
,
6606 int *cnt_out
, struct timeval
*now
, int *now_filled
)
6609 * send out one MTU of retransmission.
6610 * If fast_retransmit is happening we ignore the cwnd.
6611 * Otherwise we obey the cwnd and rwnd.
6612 * For a Cookie or Asconf in the control chunk queue we retransmit
6613 * them by themselves.
6615 * For data chunks we will pick out the lowest TSN's in the
6616 * sent_queue marked for resend and bundle them all together
6617 * (up to a MTU of destination). The address to send to should
6618 * have been selected/changed where the retransmission was
6619 * marked (i.e. in FR or t3-timeout routines).
6621 struct sctp_tmit_chunk
*data_list
[SCTP_MAX_DATA_BUNDLING
];
6622 struct sctp_tmit_chunk
*chk
, *fwd
;
6624 struct sctphdr
*shdr
;
6626 struct sctp_nets
*net
;
6627 int no_fragmentflg
, bundle_at
, cnt_thru
;
6629 int error
, i
, one_chunk
, fwd_tsn
, ctl_cnt
, tmr_started
;
6631 tmr_started
= ctl_cnt
= bundle_at
= error
= 0;
6638 #ifdef SCTP_AUDITING_ENABLED
6639 sctp_audit_log(0xC3, 1);
6641 if (TAILQ_EMPTY(&asoc
->sent_queue
)) {
6643 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
6644 kprintf("SCTP hits empty queue with cnt set to %d?\n",
6645 asoc
->sent_queue_retran_cnt
);
6648 asoc
->sent_queue_cnt
= 0;
6649 asoc
->sent_queue_cnt_removeable
= 0;
6651 TAILQ_FOREACH(chk
, &asoc
->control_send_queue
, sctp_next
) {
6652 if (chk
->sent
!= SCTP_DATAGRAM_RESEND
) {
6653 /* we only worry about things marked for resend */
6656 if ((chk
->rec
.chunk_id
== SCTP_COOKIE_ECHO
) ||
6657 (chk
->rec
.chunk_id
== SCTP_ASCONF
) ||
6658 (chk
->rec
.chunk_id
== SCTP_STREAM_RESET
) ||
6659 (chk
->rec
.chunk_id
== SCTP_FORWARD_CUM_TSN
)) {
6660 if (chk
->rec
.chunk_id
== SCTP_STREAM_RESET
) {
6661 /* For stream reset we only retran the request
6664 struct sctp_stream_reset_req
*strreq
;
6665 strreq
= mtod(chk
->data
, struct sctp_stream_reset_req
*);
6666 if (strreq
->sr_req
.ph
.param_type
!= ntohs(SCTP_STR_RESET_REQUEST
)) {
6671 if (chk
->rec
.chunk_id
== SCTP_ASCONF
) {
6675 if (chk
->rec
.chunk_id
== SCTP_FORWARD_CUM_TSN
) {
6679 m
= sctp_copy_mbufchain(chk
->data
, m
);
6685 /* do we have control chunks to retransmit? */
6687 /* Start a timer no matter if we suceed or fail */
6688 if (chk
->rec
.chunk_id
== SCTP_COOKIE_ECHO
) {
6689 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE
, inp
, stcb
, chk
->whoTo
);
6690 } else if (chk
->rec
.chunk_id
== SCTP_ASCONF
)
6691 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF
, inp
, stcb
, chk
->whoTo
);
6693 if (m
->m_len
== 0) {
6694 /* Special case for when you get a 0 len
6695 * mbuf at the head due to the lack
6696 * of a MHDR at the beginning.
6698 m
->m_len
= sizeof(struct sctphdr
);
6700 M_PREPEND(m
, sizeof(struct sctphdr
), MB_DONTWAIT
);
6705 shdr
= mtod(m
, struct sctphdr
*);
6706 shdr
->src_port
= inp
->sctp_lport
;
6707 shdr
->dest_port
= stcb
->rport
;
6708 shdr
->v_tag
= htonl(stcb
->asoc
.peer_vtag
);
6710 chk
->snd_count
++; /* update our count */
6712 if ((error
= sctp_lowlevel_chunk_output(inp
, stcb
, chk
->whoTo
,
6713 (struct sockaddr
*)&chk
->whoTo
->ro
._l_addr
, m
,
6714 no_fragmentflg
, 0, NULL
, asconf
))) {
6715 sctp_pegs
[SCTP_DATA_OUT_ERR
]++;
6719 *We don't want to mark the net->sent time here since this
6720 * we use this for HB and retrans cannot measure RTT
6722 /* SCTP_GETTIME_TIMEVAL(&chk->whoTo->last_sent_time);*/
6724 chk
->sent
= SCTP_DATAGRAM_SENT
;
6725 asoc
->sent_queue_retran_cnt
--;
6726 if (asoc
->sent_queue_retran_cnt
< 0) {
6727 asoc
->sent_queue_retran_cnt
= 0;
6732 /* Clean up the fwd-tsn list */
6733 sctp_clean_up_ctl (asoc
);
6737 /* Ok, it is just data retransmission we need to do or
6738 * that and a fwd-tsn with it all.
6740 if (TAILQ_EMPTY(&asoc
->sent_queue
)) {
6744 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
6745 kprintf("Normal chunk retransmission cnt:%d\n",
6746 asoc
->sent_queue_retran_cnt
);
6749 if ((SCTP_GET_STATE(asoc
) == SCTP_STATE_COOKIE_ECHOED
) ||
6750 (SCTP_GET_STATE(asoc
) == SCTP_STATE_COOKIE_WAIT
)) {
6751 /* not yet open, resend the cookie and that is it */
6756 #ifdef SCTP_AUDITING_ENABLED
6757 sctp_auditing(20, inp
, stcb
, NULL
);
6759 TAILQ_FOREACH(chk
, &asoc
->sent_queue
, sctp_next
) {
6760 if (chk
->sent
!= SCTP_DATAGRAM_RESEND
) {
6761 /* No, not sent to this net or not ready for rtx */
6765 /* pick up the net */
6767 if (stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_BOUND_V6
) {
6768 mtu
= (net
->mtu
- SCTP_MIN_OVERHEAD
);
6770 mtu
= net
->mtu
- SCTP_MIN_V4_OVERHEAD
;
6773 if ((asoc
->peers_rwnd
< mtu
) && (asoc
->total_flight
> 0)) {
6774 /* No room in peers rwnd */
6776 tsn
= asoc
->last_acked_seq
+ 1;
6777 if (tsn
== chk
->rec
.data
.TSN_seq
) {
6778 /* we make a special exception for this case.
6779 * The peer has no rwnd but is missing the
6780 * lowest chunk.. which is probably what is
6781 * holding up the rwnd.
6783 goto one_chunk_around
;
6786 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
6787 kprintf("blocked-peers_rwnd:%d tf:%d\n",
6788 (int)asoc
->peers_rwnd
,
6789 (int)asoc
->total_flight
);
6792 sctp_pegs
[SCTP_RWND_BLOCKED
]++;
6796 if (asoc
->peers_rwnd
< mtu
) {
6799 #ifdef SCTP_AUDITING_ENABLED
6800 sctp_audit_log(0xC3, 2);
6804 net
->fast_retran_ip
= 0;
6805 if (chk
->rec
.data
.doing_fast_retransmit
== 0) {
6806 /* if no FR in progress skip destination that
6807 * have flight_size > cwnd.
6809 if (net
->flight_size
>= net
->cwnd
) {
6810 sctp_pegs
[SCTP_CWND_BLOCKED
]++;
6814 /* Mark the destination net to have FR recovery
6817 net
->fast_retran_ip
= 1;
6820 if ((chk
->send_size
<= mtu
) || (chk
->flags
& CHUNK_FLAGS_FRAGMENT_OK
)) {
6821 /* ok we will add this one */
6822 m
= sctp_copy_mbufchain(chk
->data
, m
);
6826 /* upate our MTU size */
6827 /* Do clear IP_DF ? */
6828 if (chk
->flags
& CHUNK_FLAGS_FRAGMENT_OK
) {
6831 mtu
-= chk
->send_size
;
6832 data_list
[bundle_at
++] = chk
;
6833 if (one_chunk
&& (asoc
->total_flight
<= 0)) {
6834 sctp_pegs
[SCTP_WINDOW_PROBES
]++;
6835 chk
->rec
.data
.state_flags
|= SCTP_WINDOW_PROBE
;
6838 if (one_chunk
== 0) {
6839 /* now are there anymore forward from chk to pick up?*/
6840 fwd
= TAILQ_NEXT(chk
, sctp_next
);
6842 if (fwd
->sent
!= SCTP_DATAGRAM_RESEND
) {
6843 /* Nope, not for retran */
6844 fwd
= TAILQ_NEXT(fwd
, sctp_next
);
6847 if (fwd
->whoTo
!= net
) {
6848 /* Nope, not the net in question */
6849 fwd
= TAILQ_NEXT(fwd
, sctp_next
);
6852 if (fwd
->send_size
<= mtu
) {
6853 m
= sctp_copy_mbufchain(fwd
->data
, m
);
6857 /* upate our MTU size */
6858 /* Do clear IP_DF ? */
6859 if (fwd
->flags
& CHUNK_FLAGS_FRAGMENT_OK
) {
6862 mtu
-= fwd
->send_size
;
6863 data_list
[bundle_at
++] = fwd
;
6864 if (bundle_at
>= SCTP_MAX_DATA_BUNDLING
) {
6867 fwd
= TAILQ_NEXT(fwd
, sctp_next
);
6869 /* can't fit so we are done */
6874 /* Is there something to send for this destination? */
6876 /* No matter if we fail/or suceed we should
6877 * start a timer. A failure is like a lost
6880 if (!callout_pending(&net
->rxt_timer
.timer
)) {
6881 /* no timer running on this destination
6884 sctp_timer_start(SCTP_TIMER_TYPE_SEND
, inp
, stcb
, net
);
6887 if (m
->m_len
== 0) {
6888 /* Special case for when you get a 0 len
6889 * mbuf at the head due to the lack
6890 * of a MHDR at the beginning.
6892 m
->m_len
= sizeof(struct sctphdr
);
6894 M_PREPEND(m
, sizeof(struct sctphdr
), MB_DONTWAIT
);
6899 shdr
= mtod(m
, struct sctphdr
*);
6900 shdr
->src_port
= inp
->sctp_lport
;
6901 shdr
->dest_port
= stcb
->rport
;
6902 shdr
->v_tag
= htonl(stcb
->asoc
.peer_vtag
);
6905 /* Now lets send it, if there is anything to send :> */
6906 if ((error
= sctp_lowlevel_chunk_output(inp
, stcb
, net
,
6907 (struct sockaddr
*)&net
->ro
._l_addr
,
6909 no_fragmentflg
, 0, NULL
, asconf
))) {
6910 /* error, we could not output */
6911 sctp_pegs
[SCTP_DATA_OUT_ERR
]++;
6916 * We don't want to mark the net->sent time here since
6917 * this we use this for HB and retrans cannot measure
6920 /* SCTP_GETTIME_TIMEVAL(&net->last_sent_time);*/
6922 /* For auto-close */
6924 if (*now_filled
== 0) {
6925 SCTP_GETTIME_TIMEVAL(&asoc
->time_last_sent
);
6926 *now
= asoc
->time_last_sent
;
6929 asoc
->time_last_sent
= *now
;
6931 *cnt_out
+= bundle_at
;
6932 #ifdef SCTP_AUDITING_ENABLED
6933 sctp_audit_log(0xC4, bundle_at
);
6935 for (i
= 0; i
< bundle_at
; i
++) {
6936 sctp_pegs
[SCTP_RETRANTSN_SENT
]++;
6937 data_list
[i
]->sent
= SCTP_DATAGRAM_SENT
;
6938 data_list
[i
]->snd_count
++;
6939 asoc
->sent_queue_retran_cnt
--;
6940 /* record the time */
6941 data_list
[i
]->sent_rcv_time
= asoc
->time_last_sent
;
6942 if (asoc
->sent_queue_retran_cnt
< 0) {
6943 asoc
->sent_queue_retran_cnt
= 0;
6945 net
->flight_size
+= data_list
[i
]->book_size
;
6946 asoc
->total_flight
+= data_list
[i
]->book_size
;
6947 asoc
->total_flight_count
++;
6949 #ifdef SCTP_LOG_RWND
6950 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND
,
6951 asoc
->peers_rwnd
, data_list
[i
]->send_size
, sctp_peer_chunk_oh
);
6953 asoc
->peers_rwnd
= sctp_sbspace_sub(asoc
->peers_rwnd
,
6954 (u_int32_t
)(data_list
[i
]->send_size
+ sctp_peer_chunk_oh
));
6955 if (asoc
->peers_rwnd
< stcb
->sctp_ep
->sctp_ep
.sctp_sws_sender
) {
6956 /* SWS sender side engages */
6957 asoc
->peers_rwnd
= 0;
6961 (data_list
[i
]->rec
.data
.doing_fast_retransmit
)) {
6962 sctp_pegs
[SCTP_FAST_RETRAN
]++;
6963 if ((data_list
[i
] == TAILQ_FIRST(&asoc
->sent_queue
)) &&
6964 (tmr_started
== 0)) {
6966 * ok we just fast-retrans'd
6967 * the lowest TSN, i.e the
6968 * first on the list. In this
6969 * case we want to give some
6970 * more time to get a SACK
6971 * back without a t3-expiring.
6973 sctp_timer_stop(SCTP_TIMER_TYPE_SEND
, inp
, stcb
, net
);
6974 sctp_timer_start(SCTP_TIMER_TYPE_SEND
, inp
, stcb
, net
);
6978 #ifdef SCTP_AUDITING_ENABLED
6979 sctp_auditing(21, inp
, stcb
, NULL
);
6985 if (asoc
->sent_queue_retran_cnt
<= 0) {
6986 /* all done we have no more to retran */
6987 asoc
->sent_queue_retran_cnt
= 0;
6991 /* No more room in rwnd */
6994 /* stop the for loop here. we sent out a packet */
7002 sctp_timer_validation(struct sctp_inpcb
*inp
,
7003 struct sctp_tcb
*stcb
,
7004 struct sctp_association
*asoc
,
7007 struct sctp_nets
*net
;
7008 /* Validate that a timer is running somewhere */
7009 TAILQ_FOREACH(net
, &asoc
->nets
, sctp_next
) {
7010 if (callout_pending(&net
->rxt_timer
.timer
)) {
7011 /* Here is a timer */
7015 /* Gak, we did not have a timer somewhere */
7017 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
7018 kprintf("Deadlock avoided starting timer on a dest at retran\n");
7021 sctp_timer_start(SCTP_TIMER_TYPE_SEND
, inp
, stcb
, asoc
->primary_destination
);
7026 sctp_chunk_output(struct sctp_inpcb
*inp
,
7027 struct sctp_tcb
*stcb
,
7030 /* Ok this is the generic chunk service queue.
7031 * we must do the following:
7032 * - See if there are retransmits pending, if so we
7033 * must do these first and return.
7034 * - Service the stream queue that is next,
7035 * moving any message (note I must get a complete
7036 * message i.e. FIRST/MIDDLE and LAST to the out
7037 * queue in one pass) and assigning TSN's
7038 * - Check to see if the cwnd/rwnd allows any output, if
7039 * so we go ahead and fomulate and send the low level
7040 * chunks. Making sure to combine any control in the
7041 * control chunk queue also.
7043 struct sctp_association
*asoc
;
7044 struct sctp_nets
*net
;
7045 int error
, num_out
, tot_out
, ret
, reason_code
, burst_cnt
, burst_limit
;
7053 sctp_pegs
[SCTP_CALLS_TO_CO
]++;
7055 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
7056 kprintf("in co - retran count:%d\n", asoc
->sent_queue_retran_cnt
);
7059 while (asoc
->sent_queue_retran_cnt
) {
7060 /* Ok, it is retransmission time only, we send out only ONE
7061 * packet with a single call off to the retran code.
7063 ret
= sctp_chunk_retransmission(inp
, stcb
, asoc
, &num_out
, &now
, &now_filled
);
7065 /* Can't send anymore */
7067 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
7068 kprintf("retransmission ret:%d -- full\n", ret
);
7072 * now lets push out control by calling med-level
7073 * output once. this assures that we WILL send HB's
7076 sctp_med_chunk_output(inp
, stcb
, asoc
, &num_out
, &reason_code
, 1,
7077 &cwnd_full
, from_where
,
7080 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
7081 kprintf("Control send outputs:%d@full\n", num_out
);
7084 #ifdef SCTP_AUDITING_ENABLED
7085 sctp_auditing(8, inp
, stcb
, NULL
);
7087 return (sctp_timer_validation(inp
, stcb
, asoc
, ret
));
7091 * The count was off.. retran is not happening so do
7092 * the normal retransmission.
7095 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
7096 kprintf("Done with retrans, none left fill up window\n");
7099 #ifdef SCTP_AUDITING_ENABLED
7100 sctp_auditing(9, inp
, stcb
, NULL
);
7104 if (from_where
== 1) {
7105 /* Only one transmission allowed out of a timeout */
7107 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
7108 kprintf("Only one packet allowed out\n");
7111 #ifdef SCTP_AUDITING_ENABLED
7112 sctp_auditing(10, inp
, stcb
, NULL
);
7114 /* Push out any control */
7115 sctp_med_chunk_output(inp
, stcb
, asoc
, &num_out
, &reason_code
, 1, &cwnd_full
, from_where
,
7119 if ((num_out
== 0) && (ret
== 0)) {
7120 /* No more retrans to send */
7124 #ifdef SCTP_AUDITING_ENABLED
7125 sctp_auditing(12, inp
, stcb
, NULL
);
7127 /* Check for bad destinations, if they exist move chunks around. */
7128 burst_limit
= asoc
->max_burst
;
7129 TAILQ_FOREACH(net
, &asoc
->nets
, sctp_next
) {
7130 if ((net
->dest_state
& SCTP_ADDR_NOT_REACHABLE
) ==
7131 SCTP_ADDR_NOT_REACHABLE
) {
7133 * if possible move things off of this address
7134 * we still may send below due to the dormant state
7135 * but we try to find an alternate address to send
7136 * to and if we have one we move all queued data on
7137 * the out wheel to this alternate address.
7139 sctp_move_to_an_alt(stcb
, asoc
, net
);
7142 if ((asoc->sat_network) || (net->addr_is_local)) {
7143 burst_limit = asoc->max_burst * SCTP_SAT_NETWORK_BURST_INCR;
7147 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
7148 kprintf("examined net:%p burst limit:%d\n", net
, asoc
->max_burst
);
7152 #ifdef SCTP_USE_ALLMAN_BURST
7153 if ((net
->flight_size
+(burst_limit
*net
->mtu
)) < net
->cwnd
) {
7154 if (net
->ssthresh
< net
->cwnd
)
7155 net
->ssthresh
= net
->cwnd
;
7156 net
->cwnd
= (net
->flight_size
+(burst_limit
*net
->mtu
));
7157 #ifdef SCTP_LOG_MAXBURST
7158 sctp_log_maxburst(net
, 0, burst_limit
, SCTP_MAX_BURST_APPLIED
);
7160 sctp_pegs
[SCTP_MAX_BURST_APL
]++;
7162 net
->fast_retran_ip
= 0;
7167 /* Fill up what we can to the destination */
7172 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
7173 kprintf("Burst count:%d - call m-c-o\n", burst_cnt
);
7176 error
= sctp_med_chunk_output(inp
, stcb
, asoc
, &num_out
,
7177 &reason_code
, 0, &cwnd_full
, from_where
,
7181 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
7182 kprintf("Error %d was returned from med-c-op\n", error
);
7185 #ifdef SCTP_LOG_MAXBURST
7186 sctp_log_maxburst(asoc
->primary_destination
, error
, burst_cnt
, SCTP_MAX_BURST_ERROR_STOP
);
7191 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT3
) {
7192 kprintf("m-c-o put out %d\n", num_out
);
7198 #ifndef SCTP_USE_ALLMAN_BURST
7199 && (burst_cnt
< burst_limit
)
7202 #ifndef SCTP_USE_ALLMAN_BURST
7203 if (burst_cnt
>= burst_limit
) {
7204 sctp_pegs
[SCTP_MAX_BURST_APL
]++;
7205 asoc
->burst_limit_applied
= 1;
7206 #ifdef SCTP_LOG_MAXBURST
7207 sctp_log_maxburst(asoc
->primary_destination
, 0 , burst_cnt
, SCTP_MAX_BURST_APPLIED
);
7210 asoc
->burst_limit_applied
= 0;
7215 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
7216 kprintf("Ok, we have put out %d chunks\n", tot_out
);
7220 sctp_pegs
[SCTP_CO_NODATASNT
]++;
7221 if (asoc
->stream_queue_cnt
> 0) {
7222 sctp_pegs
[SCTP_SOS_NOSNT
]++;
7224 sctp_pegs
[SCTP_NOS_NOSNT
]++;
7226 if (asoc
->send_queue_cnt
> 0) {
7227 sctp_pegs
[SCTP_SOSE_NOSNT
]++;
7229 sctp_pegs
[SCTP_NOSE_NOSNT
]++;
7232 /* Now we need to clean up the control chunk chain if
7233 * a ECNE is on it. It must be marked as UNSENT again
7234 * so next call will continue to send it until
7235 * such time that we get a CWR, to remove it.
7237 sctp_fix_ecn_echo(asoc
);
7243 sctp_output(struct sctp_inpcb
*inp
, struct mbuf
*m
, struct sockaddr
*addr
,
7244 struct mbuf
*control
, struct thread
*p
, int flags
)
7246 struct inpcb
*ip_inp
;
7247 struct sctp_inpcb
*t_inp
;
7248 struct sctp_tcb
*stcb
;
7249 struct sctp_nets
*net
;
7250 struct sctp_association
*asoc
;
7251 int create_lock_applied
= 0;
7252 int queue_only
, error
= 0;
7253 struct sctp_sndrcvinfo srcv
;
7255 int use_rcvinfo
= 0;
7257 /* struct route ro;*/
7261 ip_inp
= (struct inpcb
*)inp
;
7267 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
7268 kprintf("USR Send BEGINS\n");
7272 if ((inp
->sctp_flags
& SCTP_PCB_FLAGS_TCPTYPE
) &&
7273 (inp
->sctp_flags
& SCTP_PCB_FLAGS_ACCEPTING
)) {
7274 /* The listner can NOT send */
7276 sctppcbinfo
.mbuf_track
--;
7277 sctp_m_freem(control
);
7284 /* Can't allow a V6 address on a non-v6 socket */
7286 SCTP_ASOC_CREATE_LOCK(inp
);
7287 if ((inp
->sctp_flags
& SCTP_PCB_FLAGS_SOCKET_GONE
) ||
7288 (inp
->sctp_flags
& SCTP_PCB_FLAGS_SOCKET_GONE
)) {
7289 /* Should I really unlock ? */
7290 SCTP_ASOC_CREATE_UNLOCK(inp
);
7292 sctppcbinfo
.mbuf_track
--;
7293 sctp_m_freem(control
);
7300 create_lock_applied
= 1;
7301 if (((inp
->sctp_flags
& SCTP_PCB_FLAGS_BOUND_V6
) == 0) &&
7302 (addr
->sa_family
== AF_INET6
)) {
7303 SCTP_ASOC_CREATE_UNLOCK(inp
);
7305 sctppcbinfo
.mbuf_track
--;
7306 sctp_m_freem(control
);
7315 sctppcbinfo
.mbuf_track
++;
7316 if (sctp_find_cmsg(SCTP_SNDRCV
, (void *)&srcv
, control
,
7318 if (srcv
.sinfo_flags
& MSG_SENDALL
) {
7320 sctppcbinfo
.mbuf_track
--;
7321 sctp_m_freem(control
);
7323 if (create_lock_applied
) {
7324 SCTP_ASOC_CREATE_UNLOCK(inp
);
7325 create_lock_applied
= 0;
7327 return (sctp_sendall(inp
, NULL
, m
, &srcv
));
7329 if (srcv
.sinfo_assoc_id
) {
7330 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_CONNECTED
) {
7331 SCTP_INP_RLOCK(inp
);
7332 stcb
= LIST_FIRST(&inp
->sctp_asoc_list
);
7334 SCTP_TCB_LOCK(stcb
);
7335 SCTP_INP_RUNLOCK(inp
);
7338 if (create_lock_applied
) {
7339 SCTP_ASOC_CREATE_UNLOCK(inp
);
7340 create_lock_applied
= 0;
7342 sctppcbinfo
.mbuf_track
--;
7343 sctp_m_freem(control
);
7348 net
= stcb
->asoc
.primary_destination
;
7350 stcb
= sctp_findassociation_ep_asocid(inp
, srcv
.sinfo_assoc_id
);
7353 * Question: Should I error here if the
7355 * assoc_id is no longer valid?
7356 * i.e. I can't find it?
7360 /* Must locate the net structure */
7362 net
= sctp_findnet(stcb
, addr
);
7365 net
= stcb
->asoc
.primary_destination
;
7371 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_CONNECTED
) {
7372 SCTP_INP_RLOCK(inp
);
7373 stcb
= LIST_FIRST(&inp
->sctp_asoc_list
);
7375 SCTP_TCB_LOCK(stcb
);
7376 SCTP_INP_RUNLOCK(inp
);
7379 if (create_lock_applied
) {
7380 SCTP_ASOC_CREATE_UNLOCK(inp
);
7381 create_lock_applied
= 0;
7384 sctppcbinfo
.mbuf_track
--;
7385 sctp_m_freem(control
);
7392 net
= stcb
->asoc
.primary_destination
;
7394 net
= sctp_findnet(stcb
, addr
);
7396 net
= stcb
->asoc
.primary_destination
;
7401 SCTP_INP_WLOCK(inp
);
7402 SCTP_INP_INCR_REF(inp
);
7403 SCTP_INP_WUNLOCK(inp
);
7404 stcb
= sctp_findassociation_ep_addr(&t_inp
, addr
, &net
, NULL
, NULL
);
7406 SCTP_INP_WLOCK(inp
);
7407 SCTP_INP_DECR_REF(inp
);
7408 SCTP_INP_WUNLOCK(inp
);
7413 if ((stcb
== NULL
) &&
7414 (inp
->sctp_flags
& SCTP_PCB_FLAGS_TCPTYPE
)) {
7416 sctppcbinfo
.mbuf_track
--;
7417 sctp_m_freem(control
);
7420 if (create_lock_applied
) {
7421 SCTP_ASOC_CREATE_UNLOCK(inp
);
7422 create_lock_applied
= 0;
7427 } else if ((stcb
== NULL
) &&
7430 sctppcbinfo
.mbuf_track
--;
7431 sctp_m_freem(control
);
7434 if (create_lock_applied
) {
7435 SCTP_ASOC_CREATE_UNLOCK(inp
);
7436 create_lock_applied
= 0;
7441 } else if (stcb
== NULL
) {
7442 /* UDP mode, we must go ahead and start the INIT process */
7443 if ((use_rcvinfo
) && (srcv
.sinfo_flags
& MSG_ABORT
)) {
7444 /* Strange user to do this */
7446 sctppcbinfo
.mbuf_track
--;
7447 sctp_m_freem(control
);
7450 if (create_lock_applied
) {
7451 SCTP_ASOC_CREATE_UNLOCK(inp
);
7452 create_lock_applied
= 0;
7458 stcb
= sctp_aloc_assoc(inp
, addr
, 1, &error
, 0);
7461 sctppcbinfo
.mbuf_track
--;
7462 sctp_m_freem(control
);
7465 if (create_lock_applied
) {
7466 SCTP_ASOC_CREATE_UNLOCK(inp
);
7467 create_lock_applied
= 0;
7473 if (create_lock_applied
) {
7474 SCTP_ASOC_CREATE_UNLOCK(inp
);
7475 create_lock_applied
= 0;
7477 kprintf("Huh-1, create lock should have been applied!\n");
7481 asoc
->state
= SCTP_STATE_COOKIE_WAIT
;
7482 SCTP_GETTIME_TIMEVAL(&asoc
->time_entered
);
7484 /* see if a init structure exists in cmsg headers */
7485 struct sctp_initmsg initm
;
7487 if (sctp_find_cmsg(SCTP_INIT
, (void *)&initm
, control
,
7489 /* we have an INIT override of the default */
7490 if (initm
.sinit_max_attempts
)
7491 asoc
->max_init_times
= initm
.sinit_max_attempts
;
7492 if (initm
.sinit_num_ostreams
)
7493 asoc
->pre_open_streams
= initm
.sinit_num_ostreams
;
7494 if (initm
.sinit_max_instreams
)
7495 asoc
->max_inbound_streams
= initm
.sinit_max_instreams
;
7496 if (initm
.sinit_max_init_timeo
)
7497 asoc
->initial_init_rto_max
= initm
.sinit_max_init_timeo
;
7499 if (asoc
->streamoutcnt
< asoc
->pre_open_streams
) {
7500 /* Default is NOT correct */
7502 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
7503 kprintf("Ok, defout:%d pre_open:%d\n",
7504 asoc
->streamoutcnt
, asoc
->pre_open_streams
);
7507 FREE(asoc
->strmout
, M_PCB
);
7508 asoc
->strmout
= NULL
;
7509 asoc
->streamoutcnt
= asoc
->pre_open_streams
;
7510 MALLOC(asoc
->strmout
, struct sctp_stream_out
*,
7511 asoc
->streamoutcnt
*
7512 sizeof(struct sctp_stream_out
), M_PCB
,
7514 for (i
= 0; i
< asoc
->streamoutcnt
; i
++) {
7516 * inbound side must be set to 0xffff,
7517 * also NOTE when we get the INIT-ACK
7518 * back (for INIT sender) we MUST
7519 * reduce the count (streamoutcnt) but
7520 * first check if we sent to any of the
7521 * upper streams that were dropped (if
7522 * some were). Those that were dropped
7523 * must be notified to the upper layer
7524 * as failed to send.
7526 asoc
->strmout
[i
].next_sequence_sent
= 0x0;
7527 TAILQ_INIT(&asoc
->strmout
[i
].outqueue
);
7528 asoc
->strmout
[i
].stream_no
= i
;
7529 asoc
->strmout
[i
].next_spoke
.tqe_next
= 0;
7530 asoc
->strmout
[i
].next_spoke
.tqe_prev
= 0;
7534 sctp_send_initiate(inp
, stcb
);
7536 * we may want to dig in after this call and adjust the MTU
7537 * value. It defaulted to 1500 (constant) but the ro structure
7538 * may now have an update and thus we may need to change it
7539 * BEFORE we append the message.
7541 net
= stcb
->asoc
.primary_destination
;
7543 if (create_lock_applied
) {
7544 SCTP_ASOC_CREATE_UNLOCK(inp
);
7545 create_lock_applied
= 0;
7548 if ((SCTP_GET_STATE(asoc
) == SCTP_STATE_COOKIE_WAIT
) ||
7549 (SCTP_GET_STATE(asoc
) == SCTP_STATE_COOKIE_ECHOED
)) {
7552 if ((SCTP_GET_STATE(asoc
) == SCTP_STATE_SHUTDOWN_SENT
) ||
7553 (SCTP_GET_STATE(asoc
) == SCTP_STATE_SHUTDOWN_RECEIVED
) ||
7554 (SCTP_GET_STATE(asoc
) == SCTP_STATE_SHUTDOWN_ACK_SENT
) ||
7555 (asoc
->state
& SCTP_STATE_SHUTDOWN_PENDING
)) {
7557 sctppcbinfo
.mbuf_track
--;
7558 sctp_m_freem(control
);
7561 if ((use_rcvinfo
) &&
7562 (srcv
.sinfo_flags
& MSG_ABORT
)) {
7563 sctp_msg_append(stcb
, net
, m
, &srcv
, flags
);
7571 SCTP_TCB_UNLOCK(stcb
);
7575 if (create_lock_applied
) {
7576 /* we should never hit here with the create lock applied
7579 SCTP_ASOC_CREATE_UNLOCK(inp
);
7580 create_lock_applied
= 0;
7584 if (use_rcvinfo
== 0) {
7585 srcv
= stcb
->asoc
.def_send
;
7589 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT5
) {
7590 kprintf("stream:%d\n", srcv
.sinfo_stream
);
7591 kprintf("flags:%x\n", (u_int
)srcv
.sinfo_flags
);
7592 kprintf("ppid:%d\n", srcv
.sinfo_ppid
);
7593 kprintf("context:%d\n", srcv
.sinfo_context
);
7598 sctppcbinfo
.mbuf_track
--;
7599 sctp_m_freem(control
);
7602 if (net
&& ((srcv
.sinfo_flags
& MSG_ADDR_OVER
))) {
7603 /* we take the override or the unconfirmed */
7606 net
= stcb
->asoc
.primary_destination
;
7608 if ((error
= sctp_msg_append(stcb
, net
, m
, &srcv
, flags
))) {
7609 SCTP_TCB_UNLOCK(stcb
);
7613 if (net
->flight_size
> net
->cwnd
) {
7614 sctp_pegs
[SCTP_SENDTO_FULL_CWND
]++;
7616 } else if (asoc
->ifp_had_enobuf
) {
7617 sctp_pegs
[SCTP_QUEONLY_BURSTLMT
]++;
7620 un_sent
= ((stcb
->asoc
.total_output_queue_size
- stcb
->asoc
.total_flight
) +
7621 ((stcb
->asoc
.chunks_on_out_queue
- stcb
->asoc
.total_flight_count
) * sizeof(struct sctp_data_chunk
)) +
7624 if (((inp
->sctp_flags
& SCTP_PCB_FLAGS_NODELAY
) == 0) &&
7625 (stcb
->asoc
.total_flight
> 0) &&
7626 (un_sent
< (int)stcb
->asoc
.smallest_mtu
)
7629 /* Ok, Nagle is set on and we have
7630 * data outstanding. Don't send anything
7631 * and let the SACK drive out the data.
7633 sctp_pegs
[SCTP_NAGLE_NOQ
]++;
7636 sctp_pegs
[SCTP_NAGLE_OFF
]++;
7639 if ((queue_only
== 0) && stcb
->asoc
.peers_rwnd
) {
7640 /* we can attempt to send too.*/
7642 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
7643 kprintf("USR Send calls sctp_chunk_output\n");
7646 #ifdef SCTP_AUDITING_ENABLED
7647 sctp_audit_log(0xC0, 1);
7648 sctp_auditing(6, inp
, stcb
, net
);
7650 sctp_pegs
[SCTP_OUTPUT_FRM_SND
]++;
7651 sctp_chunk_output(inp
, stcb
, 0);
7652 #ifdef SCTP_AUDITING_ENABLED
7653 sctp_audit_log(0xC0, 2);
7654 sctp_auditing(7, inp
, stcb
, net
);
7659 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
7660 kprintf("USR Send complete qo:%d prw:%d\n", queue_only
, stcb
->asoc
.peers_rwnd
);
7663 SCTP_TCB_UNLOCK(stcb
);
7669 send_forward_tsn(struct sctp_tcb
*stcb
,
7670 struct sctp_association
*asoc
)
7672 struct sctp_tmit_chunk
*chk
;
7673 struct sctp_forward_tsn_chunk
*fwdtsn
;
7675 TAILQ_FOREACH(chk
, &asoc
->control_send_queue
, sctp_next
) {
7676 if (chk
->rec
.chunk_id
== SCTP_FORWARD_CUM_TSN
) {
7677 /* mark it to unsent */
7678 chk
->sent
= SCTP_DATAGRAM_UNSENT
;
7680 /* Do we correct its output location? */
7681 if (chk
->whoTo
!= asoc
->primary_destination
) {
7682 sctp_free_remote_addr(chk
->whoTo
);
7683 chk
->whoTo
= asoc
->primary_destination
;
7684 chk
->whoTo
->ref_count
++;
7686 goto sctp_fill_in_rest
;
7689 /* Ok if we reach here we must build one */
7690 chk
= (struct sctp_tmit_chunk
*)SCTP_ZONE_GET(sctppcbinfo
.ipi_zone_chunk
);
7694 sctppcbinfo
.ipi_count_chunk
++;
7695 sctppcbinfo
.ipi_gencnt_chunk
++;
7696 chk
->rec
.chunk_id
= SCTP_FORWARD_CUM_TSN
;
7698 MGETHDR(chk
->data
, MB_DONTWAIT
, MT_DATA
);
7699 if (chk
->data
== NULL
) {
7700 chk
->whoTo
->ref_count
--;
7701 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, chk
);
7702 sctppcbinfo
.ipi_count_chunk
--;
7703 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
7704 panic("Chunk count is negative");
7706 sctppcbinfo
.ipi_gencnt_chunk
++;
7709 chk
->data
->m_data
+= SCTP_MIN_OVERHEAD
;
7710 chk
->sent
= SCTP_DATAGRAM_UNSENT
;
7712 chk
->whoTo
= asoc
->primary_destination
;
7713 chk
->whoTo
->ref_count
++;
7714 TAILQ_INSERT_TAIL(&asoc
->control_send_queue
, chk
, sctp_next
);
7715 asoc
->ctrl_queue_cnt
++;
7717 /* Here we go through and fill out the part that
7718 * deals with stream/seq of the ones we skip.
7720 chk
->data
->m_pkthdr
.len
= chk
->data
->m_len
= 0;
7722 struct sctp_tmit_chunk
*at
, *tp1
, *last
;
7723 struct sctp_strseq
*strseq
;
7724 unsigned int cnt_of_space
, i
, ovh
;
7725 unsigned int space_needed
;
7726 unsigned int cnt_of_skipped
= 0;
7727 TAILQ_FOREACH(at
, &asoc
->sent_queue
, sctp_next
) {
7728 if (at
->sent
!= SCTP_FORWARD_TSN_SKIP
) {
7729 /* no more to look at */
7732 if (at
->rec
.data
.rcv_flags
& SCTP_DATA_UNORDERED
) {
7733 /* We don't report these */
7738 space_needed
= (sizeof(struct sctp_forward_tsn_chunk
) +
7739 (cnt_of_skipped
* sizeof(struct sctp_strseq
)));
7740 if ((M_TRAILINGSPACE(chk
->data
) < (int)space_needed
) &&
7741 ((chk
->data
->m_flags
& M_EXT
) == 0)) {
7742 /* Need a M_EXT, get one and move
7743 * fwdtsn to data area.
7745 MCLGET(chk
->data
, MB_DONTWAIT
);
7747 cnt_of_space
= M_TRAILINGSPACE(chk
->data
);
7749 if (stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_BOUND_V6
) {
7750 ovh
= SCTP_MIN_OVERHEAD
;
7752 ovh
= SCTP_MIN_V4_OVERHEAD
;
7754 if (cnt_of_space
> (asoc
->smallest_mtu
-ovh
)) {
7755 /* trim to a mtu size */
7756 cnt_of_space
= asoc
->smallest_mtu
- ovh
;
7758 if (cnt_of_space
< space_needed
) {
7759 /* ok we must trim down the chunk by lowering
7760 * the advance peer ack point.
7762 cnt_of_skipped
= (cnt_of_space
-
7763 ((sizeof(struct sctp_forward_tsn_chunk
))/
7764 sizeof(struct sctp_strseq
)));
7765 /* Go through and find the TSN that
7766 * will be the one we report.
7768 at
= TAILQ_FIRST(&asoc
->sent_queue
);
7769 for (i
= 0; i
< cnt_of_skipped
; i
++) {
7770 tp1
= TAILQ_NEXT(at
, sctp_next
);
7774 /* last now points to last one I can report, update peer ack point */
7775 asoc
->advanced_peer_ack_point
= last
->rec
.data
.TSN_seq
;
7776 space_needed
-= (cnt_of_skipped
* sizeof(struct sctp_strseq
));
7778 chk
->send_size
= space_needed
;
7779 /* Setup the chunk */
7780 fwdtsn
= mtod(chk
->data
, struct sctp_forward_tsn_chunk
*);
7781 fwdtsn
->ch
.chunk_length
= htons(chk
->send_size
);
7782 fwdtsn
->ch
.chunk_flags
= 0;
7783 fwdtsn
->ch
.chunk_type
= SCTP_FORWARD_CUM_TSN
;
7784 fwdtsn
->new_cumulative_tsn
= htonl(asoc
->advanced_peer_ack_point
);
7785 chk
->send_size
= (sizeof(struct sctp_forward_tsn_chunk
) +
7786 (cnt_of_skipped
* sizeof(struct sctp_strseq
)));
7787 chk
->data
->m_pkthdr
.len
= chk
->data
->m_len
= chk
->send_size
;
7789 /* Move pointer to after the fwdtsn and transfer to
7790 * the strseq pointer.
7792 strseq
= (struct sctp_strseq
*)fwdtsn
;
7794 * Now populate the strseq list. This is done blindly
7795 * without pulling out duplicate stream info. This is
7796 * inefficent but won't harm the process since the peer
7797 * will look at these in sequence and will thus release
7798 * anything. It could mean we exceed the PMTU and chop
7799 * off some that we could have included.. but this is
7800 * unlikely (aka 1432/4 would mean 300+ stream seq's would
7801 * have to be reported in one FWD-TSN. With a bit of work
7802 * we can later FIX this to optimize and pull out duplcates..
7803 * but it does add more overhead. So for now... not!
7805 at
= TAILQ_FIRST(&asoc
->sent_queue
);
7806 for (i
= 0; i
< cnt_of_skipped
; i
++) {
7807 tp1
= TAILQ_NEXT(at
, sctp_next
);
7808 if (at
->rec
.data
.rcv_flags
& SCTP_DATA_UNORDERED
) {
7809 /* We don't report these */
7814 strseq
->stream
= ntohs(at
->rec
.data
.stream_number
);
7815 strseq
->sequence
= ntohs(at
->rec
.data
.stream_seq
);
7825 sctp_send_sack(struct sctp_tcb
*stcb
)
7828 * Queue up a SACK in the control queue. We must first check to
7829 * see if a SACK is somehow on the control queue. If so, we will
7830 * take and and remove the old one.
7832 struct sctp_association
*asoc
;
7833 struct sctp_tmit_chunk
*chk
, *a_chk
;
7834 struct sctp_sack_chunk
*sack
;
7835 struct sctp_gap_ack_block
*gap_descriptor
;
7838 unsigned int i
, maxi
, seeing_ones
, m_size
;
7839 unsigned int num_gap_blocks
, space
;
7845 if (asoc
->last_data_chunk_from
== NULL
) {
7846 /* Hmm we never received anything */
7849 sctp_set_rwnd(stcb
, asoc
);
7850 TAILQ_FOREACH(chk
, &asoc
->control_send_queue
, sctp_next
) {
7851 if (chk
->rec
.chunk_id
== SCTP_SELECTIVE_ACK
) {
7852 /* Hmm, found a sack already on queue, remove it */
7853 TAILQ_REMOVE(&asoc
->control_send_queue
, chk
, sctp_next
);
7854 asoc
->ctrl_queue_cnt
++;
7857 sctp_m_freem(a_chk
->data
);
7859 sctp_free_remote_addr(a_chk
->whoTo
);
7860 a_chk
->whoTo
= NULL
;
7864 if (a_chk
== NULL
) {
7865 a_chk
= (struct sctp_tmit_chunk
*)SCTP_ZONE_GET(sctppcbinfo
.ipi_zone_chunk
);
7866 if (a_chk
== NULL
) {
7867 /* No memory so we drop the idea, and set a timer */
7868 sctp_timer_stop(SCTP_TIMER_TYPE_RECV
,
7869 stcb
->sctp_ep
, stcb
, NULL
);
7870 sctp_timer_start(SCTP_TIMER_TYPE_RECV
,
7871 stcb
->sctp_ep
, stcb
, NULL
);
7874 sctppcbinfo
.ipi_count_chunk
++;
7875 sctppcbinfo
.ipi_gencnt_chunk
++;
7876 a_chk
->rec
.chunk_id
= SCTP_SELECTIVE_ACK
;
7879 a_chk
->snd_count
= 0;
7880 a_chk
->send_size
= 0; /* fill in later */
7881 a_chk
->sent
= SCTP_DATAGRAM_UNSENT
;
7882 m_size
= (asoc
->mapping_array_size
<< 3);
7884 if ((asoc
->numduptsns
) ||
7885 (asoc
->last_data_chunk_from
->dest_state
& SCTP_ADDR_NOT_REACHABLE
)
7887 /* Ok, we have some duplicates or the destination for the
7888 * sack is unreachable, lets see if we can select an alternate
7889 * than asoc->last_data_chunk_from
7891 if ((!(asoc
->last_data_chunk_from
->dest_state
&
7892 SCTP_ADDR_NOT_REACHABLE
)) &&
7893 (asoc
->used_alt_onsack
> 2)) {
7894 /* We used an alt last time, don't this time */
7895 a_chk
->whoTo
= NULL
;
7897 asoc
->used_alt_onsack
++;
7898 a_chk
->whoTo
= sctp_find_alternate_net(stcb
, asoc
->last_data_chunk_from
);
7900 if (a_chk
->whoTo
== NULL
) {
7901 /* Nope, no alternate */
7902 a_chk
->whoTo
= asoc
->last_data_chunk_from
;
7903 asoc
->used_alt_onsack
= 0;
7906 /* No duplicates so we use the last
7907 * place we received data from.
7910 if (asoc
->last_data_chunk_from
== NULL
) {
7911 kprintf("Huh, last_data_chunk_from is null when we want to sack??\n");
7914 asoc
->used_alt_onsack
= 0;
7915 a_chk
->whoTo
= asoc
->last_data_chunk_from
;
7918 a_chk
->whoTo
->ref_count
++;
7920 /* Ok now lets formulate a MBUF with our sack */
7921 MGETHDR(a_chk
->data
, MB_DONTWAIT
, MT_DATA
);
7922 if ((a_chk
->data
== NULL
) ||
7923 (a_chk
->whoTo
== NULL
)) {
7924 /* rats, no mbuf memory */
7926 /* was a problem with the destination */
7927 sctp_m_freem(a_chk
->data
);
7930 a_chk
->whoTo
->ref_count
--;
7931 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, a_chk
);
7932 sctppcbinfo
.ipi_count_chunk
--;
7933 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
7934 panic("Chunk count is negative");
7936 sctppcbinfo
.ipi_gencnt_chunk
++;
7937 sctp_timer_stop(SCTP_TIMER_TYPE_RECV
,
7938 stcb
->sctp_ep
, stcb
, NULL
);
7939 sctp_timer_start(SCTP_TIMER_TYPE_RECV
,
7940 stcb
->sctp_ep
, stcb
, NULL
);
7943 /* First count the number of gap ack blocks we need */
7944 if (asoc
->highest_tsn_inside_map
== asoc
->cumulative_tsn
) {
7945 /* We know if there are none above the cum-ack we
7946 * have everything with NO gaps
7950 /* Ok we must count how many gaps we
7954 if (asoc
->highest_tsn_inside_map
>= asoc
->mapping_array_base_tsn
) {
7955 maxi
= (asoc
->highest_tsn_inside_map
- asoc
->mapping_array_base_tsn
);
7957 maxi
= (asoc
->highest_tsn_inside_map
+ (MAX_TSN
- asoc
->mapping_array_base_tsn
) + 1);
7959 if (maxi
> m_size
) {
7960 /* impossible but who knows, someone is playing with us :> */
7962 kprintf("GAK maxi:%d > m_size:%d came out higher than allowed htsn:%u base:%u cumack:%u\n",
7965 asoc
->highest_tsn_inside_map
,
7966 asoc
->mapping_array_base_tsn
,
7967 asoc
->cumulative_tsn
7973 if (asoc
->cumulative_tsn
>= asoc
->mapping_array_base_tsn
) {
7974 start
= (asoc
->cumulative_tsn
- asoc
->mapping_array_base_tsn
);
7976 /* Set it so we start at 0 */
7979 /* Ok move start up one to look at the NEXT past the cum-ack */
7981 for (i
= start
; i
<= maxi
; i
++) {
7983 /* while seeing ones I must
7984 * transition back to 0 before
7985 * finding the next gap and
7986 * counting the segment.
7988 if (SCTP_IS_TSN_PRESENT(asoc
->mapping_array
, i
) == 0) {
7992 if (SCTP_IS_TSN_PRESENT(asoc
->mapping_array
, i
)) {
7999 if (num_gap_blocks
== 0) {
8001 * Traveled all of the bits and NO one,
8004 if (compare_with_wrap(asoc
->cumulative_tsn
, asoc
->highest_tsn_inside_map
, MAX_TSN
)) {
8005 asoc
->highest_tsn_inside_map
= asoc
->cumulative_tsn
;
8006 #ifdef SCTP_MAP_LOGGING
8007 sctp_log_map(0, 4, asoc
->highest_tsn_inside_map
, SCTP_MAP_SLIDE_RESULT
);
8013 /* Now calculate the space needed */
8014 space
= (sizeof(struct sctp_sack_chunk
) +
8015 (num_gap_blocks
* sizeof(struct sctp_gap_ack_block
)) +
8016 (asoc
->numduptsns
* sizeof(int32_t))
8018 if (space
> (asoc
->smallest_mtu
-SCTP_MAX_OVERHEAD
)) {
8019 /* Reduce the size of the sack to fit */
8021 calc
= (asoc
->smallest_mtu
- SCTP_MAX_OVERHEAD
);
8022 calc
-= sizeof(struct sctp_gap_ack_block
);
8023 fit
= calc
/sizeof(struct sctp_gap_ack_block
);
8024 if (fit
> (int)num_gap_blocks
) {
8025 /* discard some dups */
8026 asoc
->numduptsns
= (fit
- num_gap_blocks
);
8028 /* discard all dups and some gaps */
8029 num_gap_blocks
= fit
;
8030 asoc
->numduptsns
= 0;
8033 space
= (sizeof(struct sctp_sack_chunk
) +
8034 (num_gap_blocks
* sizeof(struct sctp_gap_ack_block
)) +
8035 (asoc
->numduptsns
* sizeof(int32_t))
8040 if ((space
+SCTP_MIN_OVERHEAD
) > MHLEN
) {
8041 /* We need a cluster */
8042 MCLGET(a_chk
->data
, MB_DONTWAIT
);
8043 if ((a_chk
->data
->m_flags
& M_EXT
) != M_EXT
) {
8044 /* can't get a cluster
8045 * give up and try later.
8048 sctp_m_freem(a_chk
->data
);
8050 a_chk
->whoTo
->ref_count
--;
8051 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, a_chk
);
8052 sctppcbinfo
.ipi_count_chunk
--;
8053 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
8054 panic("Chunk count is negative");
8056 sctppcbinfo
.ipi_gencnt_chunk
++;
8057 sctp_timer_stop(SCTP_TIMER_TYPE_RECV
,
8058 stcb
->sctp_ep
, stcb
, NULL
);
8059 sctp_timer_start(SCTP_TIMER_TYPE_RECV
,
8060 stcb
->sctp_ep
, stcb
, NULL
);
8065 /* ok, lets go through and fill it in */
8066 a_chk
->data
->m_data
+= SCTP_MIN_OVERHEAD
;
8067 sack
= mtod(a_chk
->data
, struct sctp_sack_chunk
*);
8068 sack
->ch
.chunk_type
= SCTP_SELECTIVE_ACK
;
8069 sack
->ch
.chunk_flags
= asoc
->receiver_nonce_sum
& SCTP_SACK_NONCE_SUM
;
8070 sack
->sack
.cum_tsn_ack
= htonl(asoc
->cumulative_tsn
);
8071 sack
->sack
.a_rwnd
= htonl(asoc
->my_rwnd
);
8072 asoc
->my_last_reported_rwnd
= asoc
->my_rwnd
;
8073 sack
->sack
.num_gap_ack_blks
= htons(num_gap_blocks
);
8074 sack
->sack
.num_dup_tsns
= htons(asoc
->numduptsns
);
8076 a_chk
->send_size
= (sizeof(struct sctp_sack_chunk
) +
8077 (num_gap_blocks
* sizeof(struct sctp_gap_ack_block
)) +
8078 (asoc
->numduptsns
* sizeof(int32_t)));
8079 a_chk
->data
->m_pkthdr
.len
= a_chk
->data
->m_len
= a_chk
->send_size
;
8080 sack
->ch
.chunk_length
= htons(a_chk
->send_size
);
8082 gap_descriptor
= (struct sctp_gap_ack_block
*)((caddr_t
)sack
+ sizeof(struct sctp_sack_chunk
));
8084 for (i
= start
; i
<= maxi
; i
++) {
8085 if (num_gap_blocks
== 0) {
8089 /* while seeing Ones I must
8090 * transition back to 0 before
8091 * finding the next gap
8093 if (SCTP_IS_TSN_PRESENT(asoc
->mapping_array
, i
) == 0) {
8094 gap_descriptor
->end
= htons(((uint16_t)(i
-start
)));
8100 if (SCTP_IS_TSN_PRESENT(asoc
->mapping_array
, i
)) {
8101 gap_descriptor
->start
= htons(((uint16_t)(i
+1-start
)));
8102 /* advance struct to next pointer */
8107 if (num_gap_blocks
) {
8108 /* special case where the array is all 1's
8109 * to the end of the array.
8111 gap_descriptor
->end
= htons(((uint16_t)((i
-start
))));
8114 /* now we must add any dups we are going to report. */
8115 if (asoc
->numduptsns
) {
8116 dup
= (uint32_t *)gap_descriptor
;
8117 for (i
= 0; i
< asoc
->numduptsns
; i
++) {
8118 *dup
= htonl(asoc
->dup_tsns
[i
]);
8121 asoc
->numduptsns
= 0;
8123 /* now that the chunk is prepared queue it to the control
8126 TAILQ_INSERT_TAIL(&asoc
->control_send_queue
, a_chk
, sctp_next
);
8127 asoc
->ctrl_queue_cnt
++;
8128 sctp_pegs
[SCTP_PEG_SACKS_SENT
]++;
8133 sctp_send_abort_tcb(struct sctp_tcb
*stcb
, struct mbuf
*operr
)
8135 struct mbuf
*m_abort
;
8136 struct sctp_abort_msg
*abort_m
;
8139 MGETHDR(m_abort
, MB_DONTWAIT
, MT_HEADER
);
8140 if (m_abort
== NULL
) {
8144 m_abort
->m_data
+= SCTP_MIN_OVERHEAD
;
8145 abort_m
= mtod(m_abort
, struct sctp_abort_msg
*);
8146 m_abort
->m_len
= sizeof(struct sctp_abort_msg
);
8147 m_abort
->m_next
= operr
;
8157 abort_m
->msg
.ch
.chunk_type
= SCTP_ABORT_ASSOCIATION
;
8158 abort_m
->msg
.ch
.chunk_flags
= 0;
8159 abort_m
->msg
.ch
.chunk_length
= htons(sizeof(struct sctp_abort_chunk
) +
8161 abort_m
->sh
.src_port
= stcb
->sctp_ep
->sctp_lport
;
8162 abort_m
->sh
.dest_port
= stcb
->rport
;
8163 abort_m
->sh
.v_tag
= htonl(stcb
->asoc
.peer_vtag
);
8164 abort_m
->sh
.checksum
= 0;
8165 m_abort
->m_pkthdr
.len
= m_abort
->m_len
+ sz
;
8166 m_abort
->m_pkthdr
.rcvif
= 0;
8167 sctp_lowlevel_chunk_output(stcb
->sctp_ep
, stcb
,
8168 stcb
->asoc
.primary_destination
,
8169 (struct sockaddr
*)&stcb
->asoc
.primary_destination
->ro
._l_addr
,
8170 m_abort
, 1, 0, NULL
, 0);
8174 sctp_send_shutdown_complete(struct sctp_tcb
*stcb
,
8175 struct sctp_nets
*net
)
8178 /* formulate and SEND a SHUTDOWN-COMPLETE */
8179 struct mbuf
*m_shutdown_comp
;
8180 struct sctp_shutdown_complete_msg
*comp_cp
;
8182 m_shutdown_comp
= NULL
;
8183 MGETHDR(m_shutdown_comp
, MB_DONTWAIT
, MT_HEADER
);
8184 if (m_shutdown_comp
== NULL
) {
8188 m_shutdown_comp
->m_data
+= sizeof(struct ip6_hdr
);
8189 comp_cp
= mtod(m_shutdown_comp
, struct sctp_shutdown_complete_msg
*);
8190 comp_cp
->shut_cmp
.ch
.chunk_type
= SCTP_SHUTDOWN_COMPLETE
;
8191 comp_cp
->shut_cmp
.ch
.chunk_flags
= 0;
8192 comp_cp
->shut_cmp
.ch
.chunk_length
= htons(sizeof(struct sctp_shutdown_complete_chunk
));
8193 comp_cp
->sh
.src_port
= stcb
->sctp_ep
->sctp_lport
;
8194 comp_cp
->sh
.dest_port
= stcb
->rport
;
8195 comp_cp
->sh
.v_tag
= htonl(stcb
->asoc
.peer_vtag
);
8196 comp_cp
->sh
.checksum
= 0;
8198 m_shutdown_comp
->m_pkthdr
.len
= m_shutdown_comp
->m_len
= sizeof(struct sctp_shutdown_complete_msg
);
8199 m_shutdown_comp
->m_pkthdr
.rcvif
= 0;
8200 sctp_lowlevel_chunk_output(stcb
->sctp_ep
, stcb
, net
,
8201 (struct sockaddr
*)&net
->ro
._l_addr
, m_shutdown_comp
,
8203 if ((stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_TCPTYPE
) ||
8204 (stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_IN_TCPPOOL
)) {
8205 stcb
->sctp_ep
->sctp_flags
&= ~SCTP_PCB_FLAGS_CONNECTED
;
8206 stcb
->sctp_ep
->sctp_socket
->so_snd
.ssb_cc
= 0;
8207 soisdisconnected(stcb
->sctp_ep
->sctp_socket
);
8213 sctp_send_shutdown_complete2(struct mbuf
*m
, int iphlen
, struct sctphdr
*sh
)
8215 /* formulate and SEND a SHUTDOWN-COMPLETE */
8217 struct ip
*iph
, *iph_out
;
8218 struct ip6_hdr
*ip6
, *ip6_out
;
8220 struct sctp_shutdown_complete_msg
*comp_cp
;
8222 MGETHDR(mout
, MB_DONTWAIT
, MT_HEADER
);
8227 iph
= mtod(m
, struct ip
*);
8231 if (iph
->ip_v
== IPVERSION
) {
8232 mout
->m_len
= sizeof(struct ip
) +
8233 sizeof(struct sctp_shutdown_complete_msg
);
8234 mout
->m_next
= NULL
;
8235 iph_out
= mtod(mout
, struct ip
*);
8237 /* Fill in the IP header for the ABORT */
8238 iph_out
->ip_v
= IPVERSION
;
8239 iph_out
->ip_hl
= (sizeof(struct ip
)/4);
8240 iph_out
->ip_tos
= (u_char
)0;
8242 iph_out
->ip_off
= 0;
8243 iph_out
->ip_ttl
= MAXTTL
;
8244 iph_out
->ip_p
= IPPROTO_SCTP
;
8245 iph_out
->ip_src
.s_addr
= iph
->ip_dst
.s_addr
;
8246 iph_out
->ip_dst
.s_addr
= iph
->ip_src
.s_addr
;
8248 /* let IP layer calculate this */
8249 iph_out
->ip_sum
= 0;
8250 offset_out
+= sizeof(*iph_out
);
8251 comp_cp
= (struct sctp_shutdown_complete_msg
*)(
8252 (caddr_t
)iph_out
+ offset_out
);
8253 } else if (iph
->ip_v
== (IPV6_VERSION
>> 4)) {
8254 ip6
= (struct ip6_hdr
*)iph
;
8255 mout
->m_len
= sizeof(struct ip6_hdr
) +
8256 sizeof(struct sctp_shutdown_complete_msg
);
8257 mout
->m_next
= NULL
;
8258 ip6_out
= mtod(mout
, struct ip6_hdr
*);
8260 /* Fill in the IPv6 header for the ABORT */
8261 ip6_out
->ip6_flow
= ip6
->ip6_flow
;
8262 ip6_out
->ip6_hlim
= ip6_defhlim
;
8263 ip6_out
->ip6_nxt
= IPPROTO_SCTP
;
8264 ip6_out
->ip6_src
= ip6
->ip6_dst
;
8265 ip6_out
->ip6_dst
= ip6
->ip6_src
;
8266 ip6_out
->ip6_plen
= mout
->m_len
;
8267 offset_out
+= sizeof(*ip6_out
);
8268 comp_cp
= (struct sctp_shutdown_complete_msg
*)(
8269 (caddr_t
)ip6_out
+ offset_out
);
8271 /* Currently not supported. */
8275 /* Now copy in and fill in the ABORT tags etc. */
8276 comp_cp
->sh
.src_port
= sh
->dest_port
;
8277 comp_cp
->sh
.dest_port
= sh
->src_port
;
8278 comp_cp
->sh
.checksum
= 0;
8279 comp_cp
->sh
.v_tag
= sh
->v_tag
;
8280 comp_cp
->shut_cmp
.ch
.chunk_flags
= SCTP_HAD_NO_TCB
;
8281 comp_cp
->shut_cmp
.ch
.chunk_type
= SCTP_SHUTDOWN_COMPLETE
;
8282 comp_cp
->shut_cmp
.ch
.chunk_length
= htons(sizeof(struct sctp_shutdown_complete_chunk
));
8284 mout
->m_pkthdr
.len
= mout
->m_len
;
8286 if ((sctp_no_csum_on_loopback
) &&
8287 (m
->m_pkthdr
.rcvif
) &&
8288 (m
->m_pkthdr
.rcvif
->if_type
== IFT_LOOP
)) {
8289 comp_cp
->sh
.checksum
= 0;
8291 comp_cp
->sh
.checksum
= sctp_calculate_sum(mout
, NULL
, offset_out
);
8294 /* zap the rcvif, it should be null */
8295 mout
->m_pkthdr
.rcvif
= 0;
8296 /* zap the stack pointer to the route */
8297 if (iph_out
!= NULL
) {
8300 bzero(&ro
, sizeof ro
);
8302 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT2
) {
8303 kprintf("sctp_shutdown_complete2 calling ip_output:\n");
8304 sctp_print_address_pkt(iph_out
, &comp_cp
->sh
);
8307 /* set IPv4 length */
8308 #if defined(__FreeBSD__)
8309 iph_out
->ip_len
= mout
->m_pkthdr
.len
;
8311 iph_out
->ip_len
= htons(mout
->m_pkthdr
.len
);
8314 ip_output(mout
, 0, &ro
, IP_RAWOUTPUT
, NULL
8315 #if defined(__OpenBSD__) || (defined(__FreeBSD__) && __FreeBSD_version >= 480000) \
8316 || defined(__NetBSD__) || defined(__DragonFly__)
8320 /* Free the route if we got one back */
8323 } else if (ip6_out
!= NULL
) {
8324 #ifdef NEW_STRUCT_ROUTE
8327 struct route_in6 ro
;
8330 bzero(&ro
, sizeof(ro
));
8332 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT2
) {
8333 kprintf("sctp_shutdown_complete2 calling ip6_output:\n");
8334 sctp_print_address_pkt((struct ip
*)ip6_out
,
8338 ip6_output(mout
, NULL
, &ro
, 0, NULL
, NULL
8339 #if defined(__NetBSD__)
8342 #if (defined(__FreeBSD__) && __FreeBSD_version >= 480000) || defined(__DragonFly__)
8346 /* Free the route if we got one back */
8350 sctp_pegs
[SCTP_DATAGRAMS_SENT
]++;
8354 static struct sctp_nets
*
8355 sctp_select_hb_destination(struct sctp_tcb
*stcb
, struct timeval
*now
)
8357 struct sctp_nets
*net
, *hnet
;
8358 int ms_goneby
, highest_ms
, state_overide
=0;
8360 SCTP_GETTIME_TIMEVAL(now
);
8363 TAILQ_FOREACH(net
, &stcb
->asoc
.nets
, sctp_next
) {
8365 ((net
->dest_state
& SCTP_ADDR_NOHB
) && ((net
->dest_state
& SCTP_ADDR_UNCONFIRMED
) == 0)) ||
8366 (net
->dest_state
& SCTP_ADDR_OUT_OF_SCOPE
)
8368 /* Skip this guy from consideration if HB is off AND its confirmed*/
8370 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT4
) {
8371 kprintf("Skipping net:%p state:%d nohb/out-of-scope\n",
8372 net
, net
->dest_state
);
8377 if (sctp_destination_is_reachable(stcb
, (struct sockaddr
*)&net
->ro
._l_addr
) == 0) {
8378 /* skip this dest net from consideration */
8380 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT4
) {
8381 kprintf("Skipping net:%p reachable NOT\n",
8387 if (net
->last_sent_time
.tv_sec
) {
8388 /* Sent to so we subtract */
8389 ms_goneby
= (now
->tv_sec
- net
->last_sent_time
.tv_sec
) * 1000;
8391 /* Never been sent to */
8392 ms_goneby
= 0x7fffffff;
8394 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT4
) {
8395 kprintf("net:%p ms_goneby:%d\n",
8399 /* When the address state is unconfirmed but still considered reachable, we
8400 * HB at a higher rate. Once it goes confirmed OR reaches the "unreachable"
8401 * state, thenw we cut it back to HB at a more normal pace.
8403 if ((net
->dest_state
& (SCTP_ADDR_UNCONFIRMED
|SCTP_ADDR_NOT_REACHABLE
)) == SCTP_ADDR_UNCONFIRMED
) {
8409 if ((((unsigned int)ms_goneby
>= net
->RTO
) || (state_overide
)) &&
8410 (ms_goneby
> highest_ms
)) {
8411 highest_ms
= ms_goneby
;
8414 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT4
) {
8415 kprintf("net:%p is the new high\n",
8422 ((hnet
->dest_state
& (SCTP_ADDR_UNCONFIRMED
|SCTP_ADDR_NOT_REACHABLE
)) == SCTP_ADDR_UNCONFIRMED
)) {
8428 if (highest_ms
&& (((unsigned int)highest_ms
>= hnet
->RTO
) || state_overide
)) {
8429 /* Found the one with longest delay bounds
8430 * OR it is unconfirmed and still not marked
8434 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT4
) {
8435 kprintf("net:%p is the hb winner -",
8438 sctp_print_address((struct sockaddr
*)&hnet
->ro
._l_addr
);
8443 /* update the timer now */
8444 hnet
->last_sent_time
= *now
;
8452 sctp_send_hb(struct sctp_tcb
*stcb
, int user_req
, struct sctp_nets
*u_net
)
8454 struct sctp_tmit_chunk
*chk
;
8455 struct sctp_nets
*net
;
8456 struct sctp_heartbeat_chunk
*hb
;
8458 struct sockaddr_in
*sin
;
8459 struct sockaddr_in6
*sin6
;
8461 if (user_req
== 0) {
8462 net
= sctp_select_hb_destination(stcb
, &now
);
8464 /* All our busy none to send to, just
8465 * start the timer again.
8467 if (stcb
->asoc
.state
== 0) {
8470 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT
,
8476 #ifndef SCTP_USE_ALLMAN_BURST
8478 /* found one idle.. decay cwnd on this one
8479 * by 1/2 if none outstanding.
8482 if (net
->flight_size
== 0) {
8484 if (net
->addr_is_local
) {
8485 if (net
->cwnd
< (net
->mtu
*4)) {
8486 net
->cwnd
= net
->mtu
* 4;
8489 if (net
->cwnd
< (net
->mtu
* 2)) {
8490 net
->cwnd
= net
->mtu
* 2;
8503 SCTP_GETTIME_TIMEVAL(&now
);
8505 sin
= (struct sockaddr_in
*)&net
->ro
._l_addr
;
8506 if (sin
->sin_family
!= AF_INET
) {
8507 if (sin
->sin_family
!= AF_INET6
) {
8512 chk
= (struct sctp_tmit_chunk
*)SCTP_ZONE_GET(sctppcbinfo
.ipi_zone_chunk
);
8515 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT4
) {
8516 kprintf("Gak, can't get a chunk for hb\n");
8521 sctppcbinfo
.ipi_gencnt_chunk
++;
8522 sctppcbinfo
.ipi_count_chunk
++;
8523 chk
->rec
.chunk_id
= SCTP_HEARTBEAT_REQUEST
;
8524 chk
->asoc
= &stcb
->asoc
;
8525 chk
->send_size
= sizeof(struct sctp_heartbeat_chunk
);
8526 MGETHDR(chk
->data
, MB_DONTWAIT
, MT_DATA
);
8527 if (chk
->data
== NULL
) {
8528 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, chk
);
8529 sctppcbinfo
.ipi_count_chunk
--;
8530 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
8531 panic("Chunk count is negative");
8533 sctppcbinfo
.ipi_gencnt_chunk
++;
8536 chk
->data
->m_data
+= SCTP_MIN_OVERHEAD
;
8537 chk
->data
->m_pkthdr
.len
= chk
->data
->m_len
= chk
->send_size
;
8538 chk
->sent
= SCTP_DATAGRAM_UNSENT
;
8541 chk
->whoTo
->ref_count
++;
8542 /* Now we have a mbuf that we can fill in with the details */
8543 hb
= mtod(chk
->data
, struct sctp_heartbeat_chunk
*);
8545 /* fill out chunk header */
8546 hb
->ch
.chunk_type
= SCTP_HEARTBEAT_REQUEST
;
8547 hb
->ch
.chunk_flags
= 0;
8548 hb
->ch
.chunk_length
= htons(chk
->send_size
);
8549 /* Fill out hb parameter */
8550 hb
->heartbeat
.hb_info
.ph
.param_type
= htons(SCTP_HEARTBEAT_INFO
);
8551 hb
->heartbeat
.hb_info
.ph
.param_length
= htons(sizeof(struct sctp_heartbeat_info_param
));
8552 hb
->heartbeat
.hb_info
.time_value_1
= now
.tv_sec
;
8553 hb
->heartbeat
.hb_info
.time_value_2
= now
.tv_usec
;
8554 /* Did our user request this one, put it in */
8555 hb
->heartbeat
.hb_info
.user_req
= user_req
;
8556 hb
->heartbeat
.hb_info
.addr_family
= sin
->sin_family
;
8557 hb
->heartbeat
.hb_info
.addr_len
= sin
->sin_len
;
8558 if (net
->dest_state
& SCTP_ADDR_UNCONFIRMED
) {
8559 /* we only take from the entropy pool if the address is
8562 net
->heartbeat_random1
= hb
->heartbeat
.hb_info
.random_value1
= sctp_select_initial_TSN(&stcb
->sctp_ep
->sctp_ep
);
8563 net
->heartbeat_random2
= hb
->heartbeat
.hb_info
.random_value2
= sctp_select_initial_TSN(&stcb
->sctp_ep
->sctp_ep
);
8565 net
->heartbeat_random1
= hb
->heartbeat
.hb_info
.random_value1
= 0;
8566 net
->heartbeat_random2
= hb
->heartbeat
.hb_info
.random_value2
= 0;
8568 if (sin
->sin_family
== AF_INET
) {
8569 memcpy(hb
->heartbeat
.hb_info
.address
, &sin
->sin_addr
, sizeof(sin
->sin_addr
));
8570 } else if (sin
->sin_family
== AF_INET6
) {
8571 /* We leave the scope the way it is in our lookup table. */
8572 sin6
= (struct sockaddr_in6
*)&net
->ro
._l_addr
;
8573 memcpy(hb
->heartbeat
.hb_info
.address
, &sin6
->sin6_addr
, sizeof(sin6
->sin6_addr
));
8575 /* huh compiler bug */
8577 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
8578 kprintf("Compiler bug bleeds a mbuf and a chunk\n");
8583 /* ok we have a destination that needs a beat */
8584 /* lets do the theshold management Qiaobing style */
8585 if (user_req
== 0) {
8586 if (sctp_threshold_management(stcb
->sctp_ep
, stcb
, net
,
8587 stcb
->asoc
.max_send_times
)) {
8588 /* we have lost the association, in a way this
8589 * is quite bad since we really are one less time
8590 * since we really did not send yet. This is the
8591 * down side to the Q's style as defined in the RFC
8592 * and not my alternate style defined in the RFC.
8594 if (chk
->data
!= NULL
) {
8595 sctp_m_freem(chk
->data
);
8598 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, chk
);
8599 sctppcbinfo
.ipi_count_chunk
--;
8600 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
8601 panic("Chunk count is negative");
8603 sctppcbinfo
.ipi_gencnt_chunk
++;
8607 net
->hb_responded
= 0;
8609 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT4
) {
8610 kprintf("Inserting chunk for HB\n");
8613 TAILQ_INSERT_TAIL(&stcb
->asoc
.control_send_queue
, chk
, sctp_next
);
8614 stcb
->asoc
.ctrl_queue_cnt
++;
8615 sctp_pegs
[SCTP_HB_SENT
]++;
8617 * Call directly med level routine to put out the chunk. It will
8618 * always tumble out control chunks aka HB but it may even tumble
8621 if (user_req
== 0) {
8622 /* Ok now lets start the HB timer if it is NOT a user req */
8623 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT
, stcb
->sctp_ep
,
8630 sctp_send_ecn_echo(struct sctp_tcb
*stcb
, struct sctp_nets
*net
,
8633 struct sctp_association
*asoc
;
8634 struct sctp_ecne_chunk
*ecne
;
8635 struct sctp_tmit_chunk
*chk
;
8637 TAILQ_FOREACH(chk
, &asoc
->control_send_queue
, sctp_next
) {
8638 if (chk
->rec
.chunk_id
== SCTP_ECN_ECHO
) {
8639 /* found a previous ECN_ECHO update it if needed */
8640 ecne
= mtod(chk
->data
, struct sctp_ecne_chunk
*);
8641 ecne
->tsn
= htonl(high_tsn
);
8645 /* nope could not find one to update so we must build one */
8646 chk
= (struct sctp_tmit_chunk
*)SCTP_ZONE_GET(sctppcbinfo
.ipi_zone_chunk
);
8650 sctp_pegs
[SCTP_ECNE_SENT
]++;
8651 sctppcbinfo
.ipi_count_chunk
++;
8652 sctppcbinfo
.ipi_gencnt_chunk
++;
8653 chk
->rec
.chunk_id
= SCTP_ECN_ECHO
;
8654 chk
->asoc
= &stcb
->asoc
;
8655 chk
->send_size
= sizeof(struct sctp_ecne_chunk
);
8656 MGETHDR(chk
->data
, MB_DONTWAIT
, MT_DATA
);
8657 if (chk
->data
== NULL
) {
8658 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, chk
);
8659 sctppcbinfo
.ipi_count_chunk
--;
8660 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
8661 panic("Chunk count is negative");
8663 sctppcbinfo
.ipi_gencnt_chunk
++;
8666 chk
->data
->m_data
+= SCTP_MIN_OVERHEAD
;
8667 chk
->data
->m_pkthdr
.len
= chk
->data
->m_len
= chk
->send_size
;
8668 chk
->sent
= SCTP_DATAGRAM_UNSENT
;
8671 chk
->whoTo
->ref_count
++;
8672 ecne
= mtod(chk
->data
, struct sctp_ecne_chunk
*);
8673 ecne
->ch
.chunk_type
= SCTP_ECN_ECHO
;
8674 ecne
->ch
.chunk_flags
= 0;
8675 ecne
->ch
.chunk_length
= htons(sizeof(struct sctp_ecne_chunk
));
8676 ecne
->tsn
= htonl(high_tsn
);
8677 TAILQ_INSERT_TAIL(&stcb
->asoc
.control_send_queue
, chk
, sctp_next
);
8678 asoc
->ctrl_queue_cnt
++;
8682 sctp_send_packet_dropped(struct sctp_tcb
*stcb
, struct sctp_nets
*net
,
8683 struct mbuf
*m
, int iphlen
, int bad_crc
)
8685 struct sctp_association
*asoc
;
8686 struct sctp_pktdrop_chunk
*drp
;
8687 struct sctp_tmit_chunk
*chk
;
8690 unsigned int small_one
;
8695 if (asoc
->peer_supports_pktdrop
== 0) {
8696 /* peer must declare support before I
8701 chk
= (struct sctp_tmit_chunk
*)SCTP_ZONE_GET(sctppcbinfo
.ipi_zone_chunk
);
8705 sctppcbinfo
.ipi_count_chunk
++;
8706 sctppcbinfo
.ipi_gencnt_chunk
++;
8708 iph
= mtod(m
, struct ip
*);
8712 if (iph
->ip_v
== IPVERSION
) {
8714 #if defined(__FreeBSD__)
8715 len
= chk
->send_size
= iph
->ip_len
;
8717 len
= chk
->send_size
= (iph
->ip_len
- iphlen
);
8720 struct ip6_hdr
*ip6h
;
8722 ip6h
= mtod(m
, struct ip6_hdr
*);
8723 len
= chk
->send_size
= htons(ip6h
->ip6_plen
);
8725 if ((len
+iphlen
) > m
->m_pkthdr
.len
) {
8727 chk
->send_size
= len
= m
->m_pkthdr
.len
- iphlen
;
8729 chk
->asoc
= &stcb
->asoc
;
8730 MGETHDR(chk
->data
, MB_DONTWAIT
, MT_DATA
);
8731 if (chk
->data
== NULL
) {
8733 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, chk
);
8734 sctppcbinfo
.ipi_count_chunk
--;
8735 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
8736 panic("Chunk count is negative");
8738 sctppcbinfo
.ipi_gencnt_chunk
++;
8741 if ((chk
->send_size
+sizeof(struct sctp_pktdrop_chunk
)+SCTP_MIN_OVERHEAD
) > MHLEN
) {
8742 MCLGET(chk
->data
, MB_DONTWAIT
);
8743 if ((chk
->data
->m_flags
& M_EXT
) == 0) {
8745 sctp_m_freem(chk
->data
);
8750 chk
->data
->m_data
+= SCTP_MIN_OVERHEAD
;
8751 drp
= mtod(chk
->data
, struct sctp_pktdrop_chunk
*);
8753 sctp_m_freem(chk
->data
);
8757 small_one
= asoc
->smallest_mtu
;
8758 if (small_one
> MCLBYTES
) {
8759 /* Only one cluster worth of data MAX */
8760 small_one
= MCLBYTES
;
8762 chk
->book_size
= (chk
->send_size
+ sizeof(struct sctp_pktdrop_chunk
) +
8763 sizeof(struct sctphdr
) + SCTP_MED_OVERHEAD
);
8764 if (chk
->book_size
> small_one
) {
8765 drp
->ch
.chunk_flags
= SCTP_PACKET_TRUNCATED
;
8766 drp
->trunc_len
= htons(chk
->send_size
);
8767 chk
->send_size
= small_one
- (SCTP_MED_OVERHEAD
+
8768 sizeof(struct sctp_pktdrop_chunk
) +
8769 sizeof(struct sctphdr
));
8770 len
= chk
->send_size
;
8772 /* no truncation needed */
8773 drp
->ch
.chunk_flags
= 0;
8774 drp
->trunc_len
= htons(0);
8777 drp
->ch
.chunk_flags
|= SCTP_BADCRC
;
8779 chk
->send_size
+= sizeof(struct sctp_pktdrop_chunk
);
8780 chk
->data
->m_pkthdr
.len
= chk
->data
->m_len
= chk
->send_size
;
8781 chk
->sent
= SCTP_DATAGRAM_UNSENT
;
8784 /* we should hit here */
8787 chk
->whoTo
= asoc
->primary_destination
;
8789 chk
->whoTo
->ref_count
++;
8790 chk
->rec
.chunk_id
= SCTP_PACKET_DROPPED
;
8791 drp
->ch
.chunk_type
= SCTP_PACKET_DROPPED
;
8792 drp
->ch
.chunk_length
= htons(chk
->send_size
);
8793 spc
= stcb
->sctp_socket
->so_rcv
.ssb_hiwat
;
8797 drp
->bottle_bw
= htonl(spc
);
8798 drp
->current_onq
= htonl(asoc
->size_on_delivery_queue
+
8799 asoc
->size_on_reasm_queue
+
8800 asoc
->size_on_all_streams
+
8801 asoc
->my_rwnd_control_len
+
8802 stcb
->sctp_socket
->so_rcv
.ssb_cc
);
8805 m_copydata(m
, iphlen
, len
, datap
);
8806 TAILQ_INSERT_TAIL(&stcb
->asoc
.control_send_queue
, chk
, sctp_next
);
8807 asoc
->ctrl_queue_cnt
++;
8811 sctp_send_cwr(struct sctp_tcb
*stcb
, struct sctp_nets
*net
, uint32_t high_tsn
)
8813 struct sctp_association
*asoc
;
8814 struct sctp_cwr_chunk
*cwr
;
8815 struct sctp_tmit_chunk
*chk
;
8818 TAILQ_FOREACH(chk
, &asoc
->control_send_queue
, sctp_next
) {
8819 if (chk
->rec
.chunk_id
== SCTP_ECN_CWR
) {
8820 /* found a previous ECN_CWR update it if needed */
8821 cwr
= mtod(chk
->data
, struct sctp_cwr_chunk
*);
8822 if (compare_with_wrap(high_tsn
, ntohl(cwr
->tsn
),
8824 cwr
->tsn
= htonl(high_tsn
);
8829 /* nope could not find one to update so we must build one */
8830 chk
= (struct sctp_tmit_chunk
*)SCTP_ZONE_GET(sctppcbinfo
.ipi_zone_chunk
);
8834 sctppcbinfo
.ipi_count_chunk
++;
8835 sctppcbinfo
.ipi_gencnt_chunk
++;
8836 chk
->rec
.chunk_id
= SCTP_ECN_CWR
;
8837 chk
->asoc
= &stcb
->asoc
;
8838 chk
->send_size
= sizeof(struct sctp_cwr_chunk
);
8839 MGETHDR(chk
->data
, MB_DONTWAIT
, MT_DATA
);
8840 if (chk
->data
== NULL
) {
8841 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, chk
);
8842 sctppcbinfo
.ipi_count_chunk
--;
8843 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
8844 panic("Chunk count is negative");
8846 sctppcbinfo
.ipi_gencnt_chunk
++;
8849 chk
->data
->m_data
+= SCTP_MIN_OVERHEAD
;
8850 chk
->data
->m_pkthdr
.len
= chk
->data
->m_len
= chk
->send_size
;
8851 chk
->sent
= SCTP_DATAGRAM_UNSENT
;
8854 chk
->whoTo
->ref_count
++;
8855 cwr
= mtod(chk
->data
, struct sctp_cwr_chunk
*);
8856 cwr
->ch
.chunk_type
= SCTP_ECN_CWR
;
8857 cwr
->ch
.chunk_flags
= 0;
8858 cwr
->ch
.chunk_length
= htons(sizeof(struct sctp_cwr_chunk
));
8859 cwr
->tsn
= htonl(high_tsn
);
8860 TAILQ_INSERT_TAIL(&stcb
->asoc
.control_send_queue
, chk
, sctp_next
);
8861 asoc
->ctrl_queue_cnt
++;
8864 sctp_reset_the_streams(struct sctp_tcb
*stcb
,
8865 struct sctp_stream_reset_request
*req
, int number_entries
, uint16_t *list
)
8869 if (req
->reset_flags
& SCTP_RESET_ALL
) {
8870 for (i
=0; i
<stcb
->asoc
.streamoutcnt
; i
++) {
8871 stcb
->asoc
.strmout
[i
].next_sequence_sent
= 0;
8873 } else if (number_entries
) {
8874 for (i
=0; i
<number_entries
; i
++) {
8875 if (list
[i
] >= stcb
->asoc
.streamoutcnt
) {
8876 /* no such stream */
8879 stcb
->asoc
.strmout
[(list
[i
])].next_sequence_sent
= 0;
8882 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_SEND
, stcb
, number_entries
, (void *)list
);
8886 sctp_send_str_reset_ack(struct sctp_tcb
*stcb
,
8887 struct sctp_stream_reset_request
*req
)
8889 struct sctp_association
*asoc
;
8890 struct sctp_stream_reset_resp
*strack
;
8891 struct sctp_tmit_chunk
*chk
;
8893 int number_entries
, i
;
8894 uint8_t two_way
=0, not_peer
=0;
8895 uint16_t *list
=NULL
;
8898 if (req
->reset_flags
& SCTP_RESET_ALL
)
8901 number_entries
= (ntohs(req
->ph
.param_length
) - sizeof(struct sctp_stream_reset_request
)) / sizeof(uint16_t);
8903 chk
= (struct sctp_tmit_chunk
*)SCTP_ZONE_GET(sctppcbinfo
.ipi_zone_chunk
);
8907 sctppcbinfo
.ipi_count_chunk
++;
8908 sctppcbinfo
.ipi_gencnt_chunk
++;
8909 chk
->rec
.chunk_id
= SCTP_STREAM_RESET
;
8910 chk
->asoc
= &stcb
->asoc
;
8911 chk
->send_size
= sizeof(struct sctp_stream_reset_resp
) + (number_entries
* sizeof(uint16_t));
8912 MGETHDR(chk
->data
, MB_DONTWAIT
, MT_DATA
);
8913 if (chk
->data
== NULL
) {
8915 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, chk
);
8916 sctppcbinfo
.ipi_count_chunk
--;
8917 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
8918 panic("Chunk count is negative");
8920 sctppcbinfo
.ipi_gencnt_chunk
++;
8923 chk
->data
->m_data
+= SCTP_MIN_OVERHEAD
;
8924 chk
->data
->m_pkthdr
.len
= chk
->data
->m_len
= SCTP_SIZE32(chk
->send_size
);
8925 if (M_TRAILINGSPACE(chk
->data
) < (int)SCTP_SIZE32(chk
->send_size
)) {
8926 MCLGET(chk
->data
, MB_DONTWAIT
);
8927 if ((chk
->data
->m_flags
& M_EXT
) == 0) {
8929 sctp_m_freem(chk
->data
);
8931 goto strresp_jump_out
;
8933 chk
->data
->m_data
+= SCTP_MIN_OVERHEAD
;
8935 if (M_TRAILINGSPACE(chk
->data
) < (int)SCTP_SIZE32(chk
->send_size
)) {
8936 /* can't do it, no room */
8938 sctp_m_freem(chk
->data
);
8940 goto strresp_jump_out
;
8943 chk
->sent
= SCTP_DATAGRAM_UNSENT
;
8945 chk
->whoTo
= asoc
->primary_destination
;
8946 chk
->whoTo
->ref_count
++;
8947 strack
= mtod(chk
->data
, struct sctp_stream_reset_resp
*);
8949 strack
->ch
.chunk_type
= SCTP_STREAM_RESET
;
8950 strack
->ch
.chunk_flags
= 0;
8951 strack
->ch
.chunk_length
= htons(chk
->send_size
);
8953 memset(strack
->sr_resp
.reset_pad
, 0, sizeof(strack
->sr_resp
.reset_pad
));
8955 strack
->sr_resp
.ph
.param_type
= ntohs(SCTP_STR_RESET_RESPONSE
);
8956 strack
->sr_resp
.ph
.param_length
= htons((chk
->send_size
- sizeof(struct sctp_chunkhdr
)));
8960 if (chk
->send_size
% 4) {
8961 /* need a padding for the end */
8964 end
= (uint8_t *)((caddr_t
)strack
+ chk
->send_size
);
8965 pad
= chk
->send_size
% 4;
8966 for (i
= 0; i
< pad
; i
++) {
8969 chk
->send_size
+= pad
;
8972 /* actual response */
8973 if (req
->reset_flags
& SCTP_RESET_YOUR
) {
8974 strack
->sr_resp
.reset_flags
= SCTP_RESET_PERFORMED
;
8976 strack
->sr_resp
.reset_flags
= 0;
8979 /* copied from reset request */
8980 strack
->sr_resp
.reset_req_seq_resp
= req
->reset_req_seq
;
8981 seq
= ntohl(req
->reset_req_seq
);
8983 list
= req
->list_of_streams
;
8984 /* copy the un-converted network byte order streams */
8985 for (i
=0; i
<number_entries
; i
++) {
8986 strack
->sr_resp
.list_of_streams
[i
] = list
[i
];
8988 if (asoc
->str_reset_seq_in
== seq
) {
8989 /* is it the next expected? */
8990 asoc
->str_reset_seq_in
++;
8991 strack
->sr_resp
.reset_at_tsn
= htonl(asoc
->sending_seq
);
8992 asoc
->str_reset_sending_seq
= asoc
->sending_seq
;
8993 if (number_entries
) {
8996 /* convert them to host byte order */
8997 for (i
=0 ; i
<number_entries
; i
++) {
8998 temp
= ntohs(list
[i
]);
9002 if (req
->reset_flags
& SCTP_RESET_YOUR
) {
9003 /* reset my outbound streams */
9004 sctp_reset_the_streams(stcb
, req
, number_entries
, list
);
9006 if (req
->reset_flags
& SCTP_RECIPRICAL
) {
9007 /* reset peer too */
9008 sctp_send_str_reset_req(stcb
, number_entries
, list
, two_way
, not_peer
);
9012 /* no its a retran so I must just ack and do nothing */
9013 strack
->sr_resp
.reset_at_tsn
= htonl(asoc
->str_reset_sending_seq
);
9015 strack
->sr_resp
.cumulative_tsn
= htonl(asoc
->cumulative_tsn
);
9016 TAILQ_INSERT_TAIL(&asoc
->control_send_queue
,
9019 asoc
->ctrl_queue_cnt
++;
9024 sctp_send_str_reset_req(struct sctp_tcb
*stcb
,
9025 int number_entrys
, uint16_t *list
, uint8_t two_way
, uint8_t not_peer
)
9027 /* Send a stream reset request. The number_entrys may be 0 and list NULL
9028 * if the request is to reset all streams. If two_way is true then we
9029 * not only request a RESET of the received streams but we also
9030 * request the peer to send a reset req to us too.
9031 * Flag combinations in table:
9033 * two_way | not_peer | = | Flags
9034 * ------------------------------
9035 * 0 | 0 | = | SCTP_RESET_YOUR (just the peer)
9036 * 1 | 0 | = | SCTP_RESET_YOUR | SCTP_RECIPRICAL (both sides)
9037 * 0 | 1 | = | Not a Valid Request (not anyone)
9038 * 1 | 1 | = | SCTP_RESET_RECIPRICAL (Just local host)
9040 struct sctp_association
*asoc
;
9041 struct sctp_stream_reset_req
*strreq
;
9042 struct sctp_tmit_chunk
*chk
;
9046 if (asoc
->stream_reset_outstanding
) {
9047 /* Already one pending, must get ACK back
9048 * to clear the flag.
9053 if ((two_way
== 0) && (not_peer
== 1)) {
9054 /* not a valid request */
9058 chk
= (struct sctp_tmit_chunk
*)SCTP_ZONE_GET(sctppcbinfo
.ipi_zone_chunk
);
9062 sctppcbinfo
.ipi_count_chunk
++;
9063 sctppcbinfo
.ipi_gencnt_chunk
++;
9064 chk
->rec
.chunk_id
= SCTP_STREAM_RESET
;
9065 chk
->asoc
= &stcb
->asoc
;
9066 chk
->send_size
= sizeof(struct sctp_stream_reset_req
) + (number_entrys
* sizeof(uint16_t));
9067 MGETHDR(chk
->data
, MB_DONTWAIT
, MT_DATA
);
9068 if (chk
->data
== NULL
) {
9070 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, chk
);
9071 sctppcbinfo
.ipi_count_chunk
--;
9072 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
9073 panic("Chunk count is negative");
9075 sctppcbinfo
.ipi_gencnt_chunk
++;
9078 chk
->data
->m_data
+= SCTP_MIN_OVERHEAD
;
9079 chk
->data
->m_pkthdr
.len
= chk
->data
->m_len
= SCTP_SIZE32(chk
->send_size
);
9080 if (M_TRAILINGSPACE(chk
->data
) < (int)SCTP_SIZE32(chk
->send_size
)) {
9081 MCLGET(chk
->data
, MB_DONTWAIT
);
9082 if ((chk
->data
->m_flags
& M_EXT
) == 0) {
9084 sctp_m_freem(chk
->data
);
9086 goto strreq_jump_out
;
9088 chk
->data
->m_data
+= SCTP_MIN_OVERHEAD
;
9090 if (M_TRAILINGSPACE(chk
->data
) < (int)SCTP_SIZE32(chk
->send_size
)) {
9091 /* can't do it, no room */
9093 sctp_m_freem(chk
->data
);
9095 goto strreq_jump_out
;
9097 chk
->sent
= SCTP_DATAGRAM_UNSENT
;
9099 chk
->whoTo
= asoc
->primary_destination
;
9100 chk
->whoTo
->ref_count
++;
9102 strreq
= mtod(chk
->data
, struct sctp_stream_reset_req
*);
9103 strreq
->ch
.chunk_type
= SCTP_STREAM_RESET
;
9104 strreq
->ch
.chunk_flags
= 0;
9105 strreq
->ch
.chunk_length
= htons(chk
->send_size
);
9107 strreq
->sr_req
.ph
.param_type
= ntohs(SCTP_STR_RESET_REQUEST
);
9108 strreq
->sr_req
.ph
.param_length
= htons((chk
->send_size
- sizeof(struct sctp_chunkhdr
)));
9110 if (chk
->send_size
% 4) {
9111 /* need a padding for the end */
9114 end
= (uint8_t *)((caddr_t
)strreq
+ chk
->send_size
);
9115 pad
= chk
->send_size
% 4;
9116 for (i
=0; i
<pad
; i
++) {
9119 chk
->send_size
+= pad
;
9122 strreq
->sr_req
.reset_flags
= 0;
9123 if (number_entrys
== 0) {
9124 strreq
->sr_req
.reset_flags
|= SCTP_RESET_ALL
;
9127 strreq
->sr_req
.reset_flags
|= SCTP_RESET_YOUR
;
9129 if (not_peer
== 0) {
9130 strreq
->sr_req
.reset_flags
|= SCTP_RECIPRICAL
| SCTP_RESET_YOUR
;
9132 strreq
->sr_req
.reset_flags
|= SCTP_RECIPRICAL
;
9135 memset(strreq
->sr_req
.reset_pad
, 0, sizeof(strreq
->sr_req
.reset_pad
));
9136 strreq
->sr_req
.reset_req_seq
= htonl(asoc
->str_reset_seq_out
);
9137 if (number_entrys
) {
9138 /* populate the specific entry's */
9140 for (i
=0; i
< number_entrys
; i
++) {
9141 strreq
->sr_req
.list_of_streams
[i
] = htons(list
[i
]);
9144 TAILQ_INSERT_TAIL(&asoc
->control_send_queue
,
9147 asoc
->ctrl_queue_cnt
++;
9148 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET
, stcb
->sctp_ep
, stcb
, chk
->whoTo
);
9149 asoc
->stream_reset_outstanding
= 1;
9153 sctp_send_abort(struct mbuf
*m
, int iphlen
, struct sctphdr
*sh
, uint32_t vtag
,
9154 struct mbuf
*err_cause
)
9157 * Formulate the abort message, and send it back down.
9160 struct sctp_abort_msg
*abm
;
9161 struct ip
*iph
, *iph_out
;
9162 struct ip6_hdr
*ip6
, *ip6_out
;
9165 /* don't respond to ABORT with ABORT */
9166 if (sctp_is_there_an_abort_here(m
, iphlen
, &vtag
)) {
9168 sctp_m_freem(err_cause
);
9171 MGETHDR(mout
, MB_DONTWAIT
, MT_HEADER
);
9174 sctp_m_freem(err_cause
);
9177 iph
= mtod(m
, struct ip
*);
9180 if (iph
->ip_v
== IPVERSION
) {
9181 iph_out
= mtod(mout
, struct ip
*);
9182 mout
->m_len
= sizeof(*iph_out
) + sizeof(*abm
);
9183 mout
->m_next
= err_cause
;
9185 /* Fill in the IP header for the ABORT */
9186 iph_out
->ip_v
= IPVERSION
;
9187 iph_out
->ip_hl
= (sizeof(struct ip
) / 4);
9188 iph_out
->ip_tos
= (u_char
)0;
9190 iph_out
->ip_off
= 0;
9191 iph_out
->ip_ttl
= MAXTTL
;
9192 iph_out
->ip_p
= IPPROTO_SCTP
;
9193 iph_out
->ip_src
.s_addr
= iph
->ip_dst
.s_addr
;
9194 iph_out
->ip_dst
.s_addr
= iph
->ip_src
.s_addr
;
9195 /* let IP layer calculate this */
9196 iph_out
->ip_sum
= 0;
9198 iphlen_out
= sizeof(*iph_out
);
9199 abm
= (struct sctp_abort_msg
*)((caddr_t
)iph_out
+ iphlen_out
);
9200 } else if (iph
->ip_v
== (IPV6_VERSION
>> 4)) {
9201 ip6
= (struct ip6_hdr
*)iph
;
9202 ip6_out
= mtod(mout
, struct ip6_hdr
*);
9203 mout
->m_len
= sizeof(*ip6_out
) + sizeof(*abm
);
9204 mout
->m_next
= err_cause
;
9206 /* Fill in the IP6 header for the ABORT */
9207 ip6_out
->ip6_flow
= ip6
->ip6_flow
;
9208 ip6_out
->ip6_hlim
= ip6_defhlim
;
9209 ip6_out
->ip6_nxt
= IPPROTO_SCTP
;
9210 ip6_out
->ip6_src
= ip6
->ip6_dst
;
9211 ip6_out
->ip6_dst
= ip6
->ip6_src
;
9213 iphlen_out
= sizeof(*ip6_out
);
9214 abm
= (struct sctp_abort_msg
*)((caddr_t
)ip6_out
+ iphlen_out
);
9216 /* Currently not supported */
9220 abm
->sh
.src_port
= sh
->dest_port
;
9221 abm
->sh
.dest_port
= sh
->src_port
;
9222 abm
->sh
.checksum
= 0;
9224 abm
->sh
.v_tag
= sh
->v_tag
;
9225 abm
->msg
.ch
.chunk_flags
= SCTP_HAD_NO_TCB
;
9227 abm
->sh
.v_tag
= htonl(vtag
);
9228 abm
->msg
.ch
.chunk_flags
= 0;
9230 abm
->msg
.ch
.chunk_type
= SCTP_ABORT_ASSOCIATION
;
9233 struct mbuf
*m_tmp
= err_cause
;
9235 /* get length of the err_cause chain */
9236 while (m_tmp
!= NULL
) {
9237 err_len
+= m_tmp
->m_len
;
9238 m_tmp
= m_tmp
->m_next
;
9240 mout
->m_pkthdr
.len
= mout
->m_len
+ err_len
;
9242 /* need pad at end of chunk */
9245 padlen
= 4 - (mout
->m_pkthdr
.len
% 4);
9246 m_copyback(mout
, mout
->m_pkthdr
.len
, padlen
, (caddr_t
)&cpthis
);
9248 abm
->msg
.ch
.chunk_length
= htons(sizeof(abm
->msg
.ch
) + err_len
);
9250 mout
->m_pkthdr
.len
= mout
->m_len
;
9251 abm
->msg
.ch
.chunk_length
= htons(sizeof(abm
->msg
.ch
));
9255 if ((sctp_no_csum_on_loopback
) &&
9256 (m
->m_pkthdr
.rcvif
) &&
9257 (m
->m_pkthdr
.rcvif
->if_type
== IFT_LOOP
)) {
9258 abm
->sh
.checksum
= 0;
9260 abm
->sh
.checksum
= sctp_calculate_sum(mout
, NULL
, iphlen_out
);
9263 /* zap the rcvif, it should be null */
9264 mout
->m_pkthdr
.rcvif
= 0;
9265 if (iph_out
!= NULL
) {
9268 /* zap the stack pointer to the route */
9269 bzero(&ro
, sizeof ro
);
9271 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT2
) {
9272 kprintf("sctp_send_abort calling ip_output:\n");
9273 sctp_print_address_pkt(iph_out
, &abm
->sh
);
9276 /* set IPv4 length */
9277 #if defined(__FreeBSD__)
9278 iph_out
->ip_len
= mout
->m_pkthdr
.len
;
9280 iph_out
->ip_len
= htons(mout
->m_pkthdr
.len
);
9283 ip_output(mout
, 0, &ro
, IP_RAWOUTPUT
, NULL
9284 #if defined(__OpenBSD__) || (defined(__FreeBSD__) && __FreeBSD_version >= 480000) \
9285 || defined(__NetBSD__) || defined(__DragonFly__)
9289 /* Free the route if we got one back */
9292 } else if (ip6_out
!= NULL
) {
9293 #ifdef NEW_STRUCT_ROUTE
9296 struct route_in6 ro
;
9299 /* zap the stack pointer to the route */
9300 bzero(&ro
, sizeof(ro
));
9302 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT2
) {
9303 kprintf("sctp_send_abort calling ip6_output:\n");
9304 sctp_print_address_pkt((struct ip
*)ip6_out
, &abm
->sh
);
9307 ip6_output(mout
, NULL
, &ro
, 0, NULL
, NULL
9308 #if defined(__NetBSD__)
9311 #if (defined(__FreeBSD__) && __FreeBSD_version >= 480000) || defined(__DragonFly__)
9315 /* Free the route if we got one back */
9319 sctp_pegs
[SCTP_DATAGRAMS_SENT
]++;
9323 sctp_send_operr_to(struct mbuf
*m
, int iphlen
,
9327 struct sctphdr
*ihdr
;
9329 struct sctphdr
*ohdr
;
9330 struct sctp_chunkhdr
*ophdr
;
9334 struct sockaddr_in6 lsa6
, fsa6
;
9337 iph
= mtod(m
, struct ip
*);
9338 ihdr
= (struct sctphdr
*)((caddr_t
)iph
+ iphlen
);
9339 if (!(scm
->m_flags
& M_PKTHDR
)) {
9340 /* must be a pkthdr */
9341 kprintf("Huh, not a packet header in send_operr\n");
9345 M_PREPEND(scm
, (sizeof(struct sctphdr
) + sizeof(struct sctp_chunkhdr
)), MB_DONTWAIT
);
9347 /* can't send because we can't add a mbuf */
9350 ohdr
= mtod(scm
, struct sctphdr
*);
9351 ohdr
->src_port
= ihdr
->dest_port
;
9352 ohdr
->dest_port
= ihdr
->src_port
;
9355 ophdr
= (struct sctp_chunkhdr
*)(ohdr
+ 1);
9356 ophdr
->chunk_type
= SCTP_OPERATION_ERROR
;
9357 ophdr
->chunk_flags
= 0;
9358 ophdr
->chunk_length
= htons(scm
->m_pkthdr
.len
- sizeof(struct sctphdr
));
9359 if (scm
->m_pkthdr
.len
% 4) {
9363 padlen
= 4 - (scm
->m_pkthdr
.len
% 4);
9364 m_copyback(scm
, scm
->m_pkthdr
.len
, padlen
, (caddr_t
)&cpthis
);
9366 if ((sctp_no_csum_on_loopback
) &&
9367 (m
->m_pkthdr
.rcvif
) &&
9368 (m
->m_pkthdr
.rcvif
->if_type
== IFT_LOOP
)) {
9371 val
= sctp_calculate_sum(scm
, NULL
, 0);
9373 ohdr
->checksum
= val
;
9374 if (iph
->ip_v
== IPVERSION
) {
9378 M_PREPEND(scm
, sizeof(struct ip
), MB_DONTWAIT
);
9381 bzero(&ro
, sizeof ro
);
9382 out
= mtod(scm
, struct ip
*);
9383 out
->ip_v
= iph
->ip_v
;
9384 out
->ip_hl
= (sizeof(struct ip
)/4);
9385 out
->ip_tos
= iph
->ip_tos
;
9386 out
->ip_id
= iph
->ip_id
;
9388 out
->ip_ttl
= MAXTTL
;
9389 out
->ip_p
= IPPROTO_SCTP
;
9391 out
->ip_src
= iph
->ip_dst
;
9392 out
->ip_dst
= iph
->ip_src
;
9393 #if defined(__FreeBSD__)
9394 out
->ip_len
= scm
->m_pkthdr
.len
;
9396 out
->ip_len
= htons(scm
->m_pkthdr
.len
);
9398 retcode
= ip_output(scm
, 0, &ro
, IP_RAWOUTPUT
, NULL
9399 #if defined(__OpenBSD__) || (defined(__FreeBSD__) && __FreeBSD_version >= 480000) \
9400 || defined(__NetBSD__) || defined(__DragonFly__)
9404 sctp_pegs
[SCTP_DATAGRAMS_SENT
]++;
9405 /* Free the route if we got one back */
9410 #ifdef NEW_STRUCT_ROUTE
9413 struct route_in6 ro
;
9415 struct ip6_hdr
*out6
, *in6
;
9417 M_PREPEND(scm
, sizeof(struct ip6_hdr
), MB_DONTWAIT
);
9420 bzero(&ro
, sizeof ro
);
9421 in6
= mtod(m
, struct ip6_hdr
*);
9422 out6
= mtod(scm
, struct ip6_hdr
*);
9423 out6
->ip6_flow
= in6
->ip6_flow
;
9424 out6
->ip6_hlim
= ip6_defhlim
;
9425 out6
->ip6_nxt
= IPPROTO_SCTP
;
9426 out6
->ip6_src
= in6
->ip6_dst
;
9427 out6
->ip6_dst
= in6
->ip6_src
;
9430 bzero(&lsa6
, sizeof(lsa6
));
9431 lsa6
.sin6_len
= sizeof(lsa6
);
9432 lsa6
.sin6_family
= AF_INET6
;
9433 lsa6
.sin6_addr
= out6
->ip6_src
;
9434 bzero(&fsa6
, sizeof(fsa6
));
9435 fsa6
.sin6_len
= sizeof(fsa6
);
9436 fsa6
.sin6_family
= AF_INET6
;
9437 fsa6
.sin6_addr
= out6
->ip6_dst
;
9438 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT2
) {
9439 kprintf("sctp_operr_to calling ipv6 output:\n");
9441 sctp_print_address((struct sockaddr
*)&lsa6
);
9443 sctp_print_address((struct sockaddr
*)&fsa6
);
9445 #endif /* SCTP_DEBUG */
9446 ip6_output(scm
, NULL
, &ro
, 0, NULL
, NULL
9447 #if defined(__NetBSD__)
9450 #if (defined(__FreeBSD__) && __FreeBSD_version >= 480000) || defined(__DragonFly__)
9454 sctp_pegs
[SCTP_DATAGRAMS_SENT
]++;
9455 /* Free the route if we got one back */
9462 sctp_copy_one(struct mbuf
*m
, struct uio
*uio
, int cpsz
, int resv_upfront
, int *mbcnt
)
9464 int left
, cancpy
, willcpy
, error
;
9473 if ((left
+resv_upfront
) > (int)MHLEN
) {
9479 if ((m
->m_flags
& M_EXT
) == 0) {
9483 *mbcnt
+= m
->m_ext
.ext_size
;
9486 cancpy
= M_TRAILINGSPACE(m
);
9487 willcpy
= min(cancpy
, left
);
9488 if ((willcpy
+ resv_upfront
) > cancpy
) {
9489 willcpy
-= resv_upfront
;
9492 /* Align data to the end */
9493 if ((m
->m_flags
& M_EXT
) == 0) {
9494 if (m
->m_flags
& M_PKTHDR
) {
9495 MH_ALIGN(m
, willcpy
);
9497 M_ALIGN(m
, willcpy
);
9500 MC_ALIGN(m
, willcpy
);
9502 error
= uiomove(mtod(m
, caddr_t
), willcpy
, uio
);
9510 MGET(m
->m_next
, MB_WAIT
, MT_DATA
);
9511 if (m
->m_next
== NULL
) {
9518 if (left
> (int)MHLEN
) {
9524 if ((m
->m_flags
& M_EXT
) == 0) {
9528 *mbcnt
+= m
->m_ext
.ext_size
;
9530 cancpy
= M_TRAILINGSPACE(m
);
9531 willcpy
= min(cancpy
, left
);
9538 sctp_copy_it_in(struct sctp_inpcb
*inp
,
9539 struct sctp_tcb
*stcb
,
9540 struct sctp_association
*asoc
,
9541 struct sctp_nets
*net
,
9542 struct sctp_sndrcvinfo
*srcv
,
9546 /* This routine must be very careful in
9547 * its work. Protocol processing is
9548 * up and running so care must be taken to
9549 * spl...() when you need to do something
9550 * that may effect the stcb/asoc. The sb is
9551 * locked however. When data is copied the
9552 * protocol processing should be enabled since
9553 * this is a slower operation...
9557 int frag_size
, mbcnt
= 0, mbcnt_e
= 0;
9558 unsigned int sndlen
;
9559 unsigned int tot_demand
;
9560 int tot_out
, dataout
;
9561 struct sctp_tmit_chunk
*chk
;
9563 struct sctp_stream_out
*strq
;
9568 so
= stcb
->sctp_socket
;
9572 sndlen
= uio
->uio_resid
;
9573 /* lock the socket buf */
9574 SOCKBUF_LOCK(&so
->so_snd
);
9575 error
= ssb_lock(&so
->so_snd
, SBLOCKWAIT(flags
));
9579 /* will it ever fit ? */
9580 if (sndlen
> so
->so_snd
.ssb_hiwat
) {
9581 /* It will NEVER fit */
9586 /* Do I need to block? */
9587 if ((so
->so_snd
.ssb_hiwat
<
9588 (sndlen
+ asoc
->total_output_queue_size
)) ||
9589 (asoc
->chunks_on_out_queue
> sctp_max_chunks_on_queue
) ||
9590 (asoc
->total_output_mbuf_queue_size
>
9591 so
->so_snd
.ssb_mbmax
)
9593 /* prune any prsctp bufs out */
9594 if (asoc
->peer_supports_prsctp
) {
9595 sctp_prune_prsctp(stcb
, asoc
, srcv
, sndlen
);
9598 * We store off a pointer to the endpoint.
9599 * Since on return from this we must check to
9600 * see if an so_error is set. If so we may have
9601 * been reset and our stcb destroyed. Returning
9602 * an error will flow back to the user...
9604 while ((so
->so_snd
.ssb_hiwat
<
9605 (sndlen
+ asoc
->total_output_queue_size
)) ||
9606 (asoc
->chunks_on_out_queue
>
9607 sctp_max_chunks_on_queue
) ||
9608 (asoc
->total_output_mbuf_queue_size
>
9609 so
->so_snd
.ssb_mbmax
)
9611 if (flags
& (MSG_FNONBLOCKING
|MSG_DONTWAIT
)) {
9612 /* Non-blocking io in place */
9613 error
= EWOULDBLOCK
;
9616 inp
->sctp_tcb_at_block
= (void *)stcb
;
9617 inp
->error_on_block
= 0;
9618 #ifdef SCTP_BLK_LOGGING
9619 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK
,
9622 ssb_unlock(&so
->so_snd
);
9623 SCTP_TCB_UNLOCK(stcb
);
9624 error
= ssb_wait(&so
->so_snd
);
9625 SCTP_INP_RLOCK(inp
);
9626 if ((inp
->sctp_flags
& SCTP_PCB_FLAGS_SOCKET_GONE
) ||
9627 (inp
->sctp_flags
& SCTP_PCB_FLAGS_SOCKET_GONE
)) {
9628 /* Should I really unlock ? */
9629 SCTP_INP_RUNLOCK(inp
);
9633 SCTP_TCB_LOCK(stcb
);
9634 SCTP_INP_RUNLOCK(inp
);
9636 inp
->sctp_tcb_at_block
= 0;
9637 #ifdef SCTP_BLK_LOGGING
9638 sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK
,
9641 if (inp
->error_on_block
) {
9643 * if our asoc was killed, the free code
9644 * (in sctp_pcb.c) will save a error in
9647 error
= inp
->error_on_block
;
9655 /* did we encounter a socket error? */
9657 error
= so
->so_error
;
9661 error
= ssb_lock(&so
->so_snd
, M_WAITOK
);
9663 /* Can't acquire the lock */
9667 #if defined(__FreeBSD__) && __FreeBSD_version >= 502115
9668 if (so
->so_rcv
.sb_state
& SBS_CANTSENDMORE
) {
9670 if (so
->so_state
& SS_CANTSENDMORE
) {
9672 /* The socket is now set not to sendmore.. its gone */
9678 error
= so
->so_error
;
9682 if (asoc
->peer_supports_prsctp
) {
9683 sctp_prune_prsctp(stcb
, asoc
, srcv
, sndlen
);
9687 dataout
= tot_out
= uio
->uio_resid
;
9688 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_BOUND_V6
) {
9689 resv_in_first
= SCTP_MED_OVERHEAD
;
9691 resv_in_first
= SCTP_MED_V4_OVERHEAD
;
9694 /* Are we aborting? */
9695 if (srcv
->sinfo_flags
& MSG_ABORT
) {
9696 if ((SCTP_GET_STATE(asoc
) != SCTP_STATE_COOKIE_WAIT
) &&
9697 (SCTP_GET_STATE(asoc
) != SCTP_STATE_COOKIE_ECHOED
)) {
9698 /* It has to be up before we abort */
9699 /* how big is the user initiated abort? */
9701 /* I wonder about doing a MGET without a splnet set.
9702 * it is done that way in the sosend code so I guess
9705 MGETHDR(mm
, MB_WAIT
, MT_DATA
);
9707 struct sctp_paramhdr
*ph
;
9709 tot_demand
= (tot_out
+ sizeof(struct sctp_paramhdr
));
9710 if (tot_demand
> MHLEN
) {
9711 if (tot_demand
> MCLBYTES
) {
9712 /* truncate user data */
9713 tot_demand
= MCLBYTES
;
9714 tot_out
= tot_demand
- sizeof(struct sctp_paramhdr
);
9716 MCLGET(mm
, MB_WAIT
);
9717 if ((mm
->m_flags
& M_EXT
) == 0) {
9718 /* truncate further */
9720 tot_out
= tot_demand
- sizeof(struct sctp_paramhdr
);
9723 /* now move forward the data pointer */
9724 ph
= mtod(mm
, struct sctp_paramhdr
*);
9725 ph
->param_type
= htons(SCTP_CAUSE_USER_INITIATED_ABT
);
9726 ph
->param_length
= htons((sizeof(struct sctp_paramhdr
) + tot_out
));
9728 mm
->m_pkthdr
.len
= tot_out
+ sizeof(struct sctp_paramhdr
);
9729 mm
->m_len
= mm
->m_pkthdr
.len
;
9730 error
= uiomove((caddr_t
)ph
, (int)tot_out
, uio
);
9733 * Here if we can't get his data we
9734 * still abort we just don't get to
9735 * send the users note :-0
9741 ssb_unlock(&so
->so_snd
);
9742 SOCKBUF_UNLOCK(&so
->so_snd
);
9743 sctp_abort_an_association(stcb
->sctp_ep
, stcb
,
9744 SCTP_RESPONSE_TO_USER_REQ
,
9754 /* Now can we send this? */
9755 if ((SCTP_GET_STATE(asoc
) == SCTP_STATE_SHUTDOWN_SENT
) ||
9756 (SCTP_GET_STATE(asoc
) == SCTP_STATE_SHUTDOWN_ACK_SENT
) ||
9757 (SCTP_GET_STATE(asoc
) == SCTP_STATE_SHUTDOWN_RECEIVED
) ||
9758 (asoc
->state
& SCTP_STATE_SHUTDOWN_PENDING
)) {
9759 /* got data while shutting down */
9764 /* Is the stream no. valid? */
9765 if (srcv
->sinfo_stream
>= asoc
->streamoutcnt
) {
9766 /* Invalid stream number */
9771 if (asoc
->strmout
== NULL
) {
9772 /* huh? software error */
9774 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
9775 kprintf("software error in sctp_copy_it_in\n");
9782 if ((srcv
->sinfo_flags
& MSG_EOF
) &&
9783 (stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_UDPTYPE
) &&
9794 /* save off the tag */
9795 my_vtag
= asoc
->my_vtag
;
9796 strq
= &asoc
->strmout
[srcv
->sinfo_stream
];
9797 /* First lets figure out the "chunking" point */
9798 frag_size
= sctp_get_frag_point(stcb
, asoc
);
9800 /* two choices here, it all fits in one chunk or
9801 * we need multiple chunks.
9804 SOCKBUF_UNLOCK(&so
->so_snd
);
9805 if (tot_out
<= frag_size
) {
9806 /* no need to setup a template */
9807 chk
= (struct sctp_tmit_chunk
*)SCTP_ZONE_GET(sctppcbinfo
.ipi_zone_chunk
);
9810 SOCKBUF_LOCK(&so
->so_snd
);
9813 sctppcbinfo
.ipi_count_chunk
++;
9814 sctppcbinfo
.ipi_gencnt_chunk
++;
9815 asoc
->chunks_on_out_queue
++;
9816 MGETHDR(mm
, MB_WAIT
, MT_DATA
);
9821 error
= sctp_copy_one(mm
, uio
, tot_out
, resv_in_first
, &mbcnt_e
);
9824 sctp_prepare_chunk(chk
, stcb
, srcv
, strq
, net
);
9825 chk
->mbcnt
= mbcnt_e
;
9828 mm
->m_pkthdr
.len
= tot_out
;
9832 /* the actual chunk flags */
9833 chk
->rec
.data
.rcv_flags
|= SCTP_DATA_NOT_FRAG
;
9834 chk
->whoTo
->ref_count
++;
9836 /* fix up the send_size if it is not present */
9837 chk
->send_size
= tot_out
;
9838 chk
->book_size
= chk
->send_size
;
9839 /* ok, we are commited */
9840 if ((srcv
->sinfo_flags
& MSG_UNORDERED
) == 0) {
9841 /* bump the ssn if we are unordered. */
9842 strq
->next_sequence_sent
++;
9844 if (chk
->flags
& SCTP_PR_SCTP_BUFFER
) {
9845 asoc
->sent_queue_cnt_removeable
++;
9848 if ((asoc
->state
== 0) ||
9849 (my_vtag
!= asoc
->my_vtag
) ||
9850 (so
!= inp
->sctp_socket
) ||
9851 (inp
->sctp_socket
== 0)) {
9852 /* connection was aborted */
9857 asoc
->stream_queue_cnt
++;
9858 TAILQ_INSERT_TAIL(&strq
->outqueue
, chk
, sctp_next
);
9859 /* now check if this stream is on the wheel */
9860 if ((strq
->next_spoke
.tqe_next
== NULL
) &&
9861 (strq
->next_spoke
.tqe_prev
== NULL
)) {
9862 /* Insert it on the wheel since it is not
9865 sctp_insert_on_wheel(asoc
, strq
);
9870 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, chk
);
9871 sctppcbinfo
.ipi_count_chunk
--;
9872 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
9873 panic("Chunk count is negative");
9875 SOCKBUF_LOCK(&so
->so_snd
);
9879 /* we need to setup a template */
9880 struct sctp_tmit_chunk
template;
9881 struct sctpchunk_listhead tmp
;
9883 /* setup the template */
9884 sctp_prepare_chunk(&template, stcb
, srcv
, strq
, net
);
9886 /* Prepare the temp list */
9889 /* Template is complete, now time for the work */
9890 while (tot_out
> 0) {
9892 chk
= (struct sctp_tmit_chunk
*)SCTP_ZONE_GET(sctppcbinfo
.ipi_zone_chunk
);
9895 * ok we must spin through and dump anything
9896 * we have allocated and then jump to the
9901 sctppcbinfo
.ipi_count_chunk
++;
9902 asoc
->chunks_on_out_queue
++;
9904 sctppcbinfo
.ipi_gencnt_chunk
++;
9906 chk
->whoTo
->ref_count
++;
9907 MGETHDR(chk
->data
, MB_WAIT
, MT_DATA
);
9908 if (chk
->data
== NULL
) {
9912 tot_demand
= min(tot_out
, frag_size
);
9913 error
= sctp_copy_one(chk
->data
, uio
, tot_demand
, resv_in_first
, &mbcnt_e
);
9916 /* now fix the chk->send_size */
9917 chk
->mbcnt
= mbcnt_e
;
9920 chk
->send_size
= tot_demand
;
9921 chk
->data
->m_pkthdr
.len
= tot_demand
;
9922 chk
->book_size
= chk
->send_size
;
9923 if (chk
->flags
& SCTP_PR_SCTP_BUFFER
) {
9924 asoc
->sent_queue_cnt_removeable
++;
9926 TAILQ_INSERT_TAIL(&tmp
, chk
, sctp_next
);
9927 tot_out
-= tot_demand
;
9929 /* Now the tmp list holds all chunks and data */
9930 if ((srcv
->sinfo_flags
& MSG_UNORDERED
) == 0) {
9931 /* bump the ssn if we are unordered. */
9932 strq
->next_sequence_sent
++;
9934 /* Mark the first/last flags. This will
9935 * result int a 3 for a single item on the list
9937 chk
= TAILQ_FIRST(&tmp
);
9938 chk
->rec
.data
.rcv_flags
|= SCTP_DATA_FIRST_FRAG
;
9939 chk
= TAILQ_LAST(&tmp
, sctpchunk_listhead
);
9940 chk
->rec
.data
.rcv_flags
|= SCTP_DATA_LAST_FRAG
;
9942 /* now move it to the streams actual queue */
9943 /* first stop protocol processing */
9945 if ((asoc
->state
== 0) ||
9946 (my_vtag
!= asoc
->my_vtag
) ||
9947 (so
!= inp
->sctp_socket
) ||
9948 (inp
->sctp_socket
== 0)) {
9949 /* connection was aborted */
9954 chk
= TAILQ_FIRST(&tmp
);
9956 chk
->data
->m_nextpkt
= 0;
9957 TAILQ_REMOVE(&tmp
, chk
, sctp_next
);
9958 asoc
->stream_queue_cnt
++;
9959 TAILQ_INSERT_TAIL(&strq
->outqueue
, chk
, sctp_next
);
9960 chk
= TAILQ_FIRST(&tmp
);
9962 /* now check if this stream is on the wheel */
9963 if ((strq
->next_spoke
.tqe_next
== NULL
) &&
9964 (strq
->next_spoke
.tqe_prev
== NULL
)) {
9965 /* Insert it on the wheel since it is not
9968 sctp_insert_on_wheel(asoc
, strq
);
9970 /* Ok now we can allow pping */
9974 SOCKBUF_LOCK(&so
->so_snd
);
9975 chk
= TAILQ_FIRST(&tmp
);
9978 sctp_m_freem(chk
->data
);
9981 TAILQ_REMOVE(&tmp
, chk
, sctp_next
);
9982 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, chk
);
9983 sctppcbinfo
.ipi_count_chunk
--;
9984 asoc
->chunks_on_out_queue
--;
9985 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
9986 panic("Chunk count is negative");
9988 sctppcbinfo
.ipi_gencnt_chunk
++;
9989 chk
= TAILQ_FIRST(&tmp
);
9995 #ifdef SCTP_MBCNT_LOGGING
9996 sctp_log_mbcnt(SCTP_LOG_MBCNT_INCREASE
,
9997 asoc
->total_output_queue_size
,
9999 asoc
->total_output_mbuf_queue_size
,
10003 SOCKBUF_LOCK(&so
->so_snd
);
10004 asoc
->total_output_queue_size
+= dataout
;
10005 asoc
->total_output_mbuf_queue_size
+= mbcnt
;
10006 if ((stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_TCPTYPE
) ||
10007 (stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_IN_TCPPOOL
)) {
10008 so
->so_snd
.ssb_cc
+= dataout
;
10009 so
->so_snd
.ssb_mbcnt
+= mbcnt
;
10011 if ((srcv
->sinfo_flags
& MSG_EOF
) &&
10012 (stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_UDPTYPE
)
10014 int some_on_streamwheel
= 0;
10016 if (!TAILQ_EMPTY(&asoc
->out_wheel
)) {
10017 /* Check to see if some data queued */
10018 struct sctp_stream_out
*outs
;
10019 TAILQ_FOREACH(outs
, &asoc
->out_wheel
, next_spoke
) {
10020 if (!TAILQ_EMPTY(&outs
->outqueue
)) {
10021 some_on_streamwheel
= 1;
10026 if (TAILQ_EMPTY(&asoc
->send_queue
) &&
10027 TAILQ_EMPTY(&asoc
->sent_queue
) &&
10028 (some_on_streamwheel
== 0)) {
10029 /* there is nothing queued to send, so I'm done... */
10030 if ((SCTP_GET_STATE(asoc
) != SCTP_STATE_SHUTDOWN_SENT
) &&
10031 (SCTP_GET_STATE(asoc
) != SCTP_STATE_SHUTDOWN_ACK_SENT
)) {
10032 /* only send SHUTDOWN the first time through */
10034 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT4
) {
10035 kprintf("%s:%d sends a shutdown\n",
10041 sctp_send_shutdown(stcb
, stcb
->asoc
.primary_destination
);
10042 asoc
->state
= SCTP_STATE_SHUTDOWN_SENT
;
10043 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN
, stcb
->sctp_ep
, stcb
,
10044 asoc
->primary_destination
);
10045 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD
, stcb
->sctp_ep
, stcb
,
10046 asoc
->primary_destination
);
10050 * we still got (or just got) data to send, so set
10054 * XXX sockets draft says that MSG_EOF should be sent
10055 * with no data. currently, we will allow user data
10056 * to be sent first and move to SHUTDOWN-PENDING
10058 asoc
->state
|= SCTP_STATE_SHUTDOWN_PENDING
;
10063 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT2
) {
10064 kprintf("++total out:%d total_mbuf_out:%d\n",
10065 (int)asoc
->total_output_queue_size
,
10066 (int)asoc
->total_output_mbuf_queue_size
);
10071 ssb_unlock(&so
->so_snd
);
10073 SOCKBUF_UNLOCK(&so
->so_snd
);
10082 sctp_sosend(struct socket
*so
,
10084 struct mbuf
*addr_mbuf
,
10086 struct sockaddr
*addr
,
10090 struct mbuf
*control
,
10091 #if defined(__NetBSD__) || defined(__APPLE__)
10095 #if (defined(__FreeBSD__) && __FreeBSD_version >= 500000) || defined(__DragonFly__)
10103 unsigned int sndlen
;
10104 int error
, use_rcvinfo
;
10105 int queue_only
= 0, queue_only_for_init
=0;
10108 struct sctp_inpcb
*inp
;
10109 struct sctp_tcb
*stcb
=NULL
;
10110 struct sctp_sndrcvinfo srcv
;
10111 struct timeval now
;
10112 struct sctp_nets
*net
;
10113 struct sctp_association
*asoc
;
10114 struct sctp_inpcb
*t_inp
;
10115 int create_lock_applied
= 0;
10116 #if defined(__APPLE__)
10117 struct proc
*p
= current_proc();
10118 #elif defined(__NetBSD__)
10119 struct proc
*p
= curproc
; /* XXX */
10120 struct sockaddr
*addr
= NULL
;
10122 addr
= mtod(addr_mbuf
, struct sockaddr
*);
10125 error
= use_rcvinfo
= 0;
10129 t_inp
= inp
= (struct sctp_inpcb
*)so
->so_pcb
;
10131 sndlen
= uio
->uio_resid
;
10133 sndlen
= top
->m_pkthdr
.len
;
10138 if ((inp
->sctp_flags
& SCTP_PCB_FLAGS_TCPTYPE
) &&
10139 (inp
->sctp_flags
& SCTP_PCB_FLAGS_ACCEPTING
)) {
10140 /* The listner can NOT send */
10146 SCTP_ASOC_CREATE_LOCK(inp
);
10147 if ((inp
->sctp_flags
& SCTP_PCB_FLAGS_SOCKET_GONE
) ||
10148 (inp
->sctp_flags
& SCTP_PCB_FLAGS_SOCKET_GONE
)) {
10149 /* Should I really unlock ? */
10155 create_lock_applied
= 1;
10156 if (((inp
->sctp_flags
& SCTP_PCB_FLAGS_BOUND_V6
) == 0) &&
10157 (addr
->sa_family
== AF_INET6
)) {
10163 /* now we must find the assoc */
10164 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_CONNECTED
) {
10165 SCTP_INP_RLOCK(inp
);
10166 stcb
= LIST_FIRST(&inp
->sctp_asoc_list
);
10167 if (stcb
== NULL
) {
10168 SCTP_INP_RUNLOCK(inp
);
10173 SCTP_TCB_LOCK(stcb
);
10174 SCTP_INP_RUNLOCK(inp
);
10175 net
= stcb
->asoc
.primary_destination
;
10179 /* process cmsg snd/rcv info (maybe a assoc-id) */
10180 if (sctp_find_cmsg(SCTP_SNDRCV
, (void *)&srcv
, control
,
10183 if (srcv
.sinfo_flags
& MSG_SENDALL
) {
10184 /* its a sendall */
10185 sctppcbinfo
.mbuf_track
--;
10186 sctp_m_freem(control
);
10188 if (create_lock_applied
) {
10189 SCTP_ASOC_CREATE_UNLOCK(inp
);
10190 create_lock_applied
= 0;
10192 return (sctp_sendall(inp
, uio
, top
, &srcv
));
10197 if (stcb
== NULL
) {
10198 /* Need to do a lookup */
10199 if (use_rcvinfo
&& srcv
.sinfo_assoc_id
) {
10200 stcb
= sctp_findassociation_ep_asocid(inp
, srcv
.sinfo_assoc_id
);
10202 * Question: Should I error here if the assoc_id is
10203 * no longer valid? i.e. I can't find it?
10207 /* Must locate the net structure */
10208 net
= sctp_findnet(stcb
, addr
);
10211 if (stcb
== NULL
) {
10212 if (addr
!= NULL
) {
10213 /* Since we did not use findep we must
10214 * increment it, and if we don't find a
10215 * tcb decrement it.
10217 SCTP_INP_WLOCK(inp
);
10218 SCTP_INP_INCR_REF(inp
);
10219 SCTP_INP_WUNLOCK(inp
);
10220 stcb
= sctp_findassociation_ep_addr(&t_inp
, addr
, &net
, NULL
, NULL
);
10221 if (stcb
== NULL
) {
10222 SCTP_INP_WLOCK(inp
);
10223 SCTP_INP_DECR_REF(inp
);
10224 SCTP_INP_WUNLOCK(inp
);
10229 if ((stcb
== NULL
) &&
10230 (inp
->sctp_flags
& SCTP_PCB_FLAGS_TCPTYPE
)) {
10234 } else if ((stcb
== NULL
) && (addr
== NULL
)) {
10238 } else if (stcb
== NULL
) {
10239 /* UDP style, we must go ahead and start the INIT process */
10240 if ((use_rcvinfo
) &&
10241 (srcv
.sinfo_flags
& MSG_ABORT
)) {
10242 /* User asks to abort a non-existant asoc */
10247 /* get an asoc/stcb struct */
10248 stcb
= sctp_aloc_assoc(inp
, addr
, 1, &error
, 0);
10249 if (stcb
== NULL
) {
10250 /* Error is setup for us in the call */
10254 if (create_lock_applied
) {
10255 SCTP_ASOC_CREATE_UNLOCK(inp
);
10256 create_lock_applied
= 0;
10258 kprintf("Huh-3? create lock should have been on??\n");
10260 /* Turn on queue only flag to prevent data from being sent */
10262 asoc
= &stcb
->asoc
;
10263 asoc
->state
= SCTP_STATE_COOKIE_WAIT
;
10264 SCTP_GETTIME_TIMEVAL(&asoc
->time_entered
);
10266 /* see if a init structure exists in cmsg headers */
10267 struct sctp_initmsg initm
;
10269 if (sctp_find_cmsg(SCTP_INIT
, (void *)&initm
, control
, sizeof(initm
))) {
10270 /* we have an INIT override of the default */
10271 if (initm
.sinit_max_attempts
)
10272 asoc
->max_init_times
= initm
.sinit_max_attempts
;
10273 if (initm
.sinit_num_ostreams
)
10274 asoc
->pre_open_streams
= initm
.sinit_num_ostreams
;
10275 if (initm
.sinit_max_instreams
)
10276 asoc
->max_inbound_streams
= initm
.sinit_max_instreams
;
10277 if (initm
.sinit_max_init_timeo
)
10278 asoc
->initial_init_rto_max
= initm
.sinit_max_init_timeo
;
10279 if (asoc
->streamoutcnt
< asoc
->pre_open_streams
) {
10280 /* Default is NOT correct */
10282 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
10283 kprintf("Ok, defout:%d pre_open:%d\n",
10284 asoc
->streamoutcnt
, asoc
->pre_open_streams
);
10287 FREE(asoc
->strmout
, M_PCB
);
10288 asoc
->strmout
= NULL
;
10289 asoc
->streamoutcnt
= asoc
->pre_open_streams
;
10291 /* What happesn if this fails? .. we panic ...*/
10292 MALLOC(asoc
->strmout
,
10293 struct sctp_stream_out
*,
10294 asoc
->streamoutcnt
*
10295 sizeof(struct sctp_stream_out
),
10297 for (i
= 0; i
< asoc
->streamoutcnt
; i
++) {
10299 * inbound side must be set to 0xffff,
10300 * also NOTE when we get the INIT-ACK
10301 * back (for INIT sender) we MUST
10302 * reduce the count (streamoutcnt) but
10303 * first check if we sent to any of the
10304 * upper streams that were dropped (if
10305 * some were). Those that were dropped
10306 * must be notified to the upper layer
10307 * as failed to send.
10309 asoc
->strmout
[i
].next_sequence_sent
= 0x0;
10310 TAILQ_INIT(&asoc
->strmout
[i
].outqueue
);
10311 asoc
->strmout
[i
].stream_no
= i
;
10312 asoc
->strmout
[i
].next_spoke
.tqe_next
= 0;
10313 asoc
->strmout
[i
].next_spoke
.tqe_prev
= 0;
10319 /* out with the INIT */
10320 queue_only_for_init
= 1;
10321 sctp_send_initiate(inp
, stcb
);
10323 * we may want to dig in after this call and adjust the MTU
10324 * value. It defaulted to 1500 (constant) but the ro structure
10325 * may now have an update and thus we may need to change it
10326 * BEFORE we append the message.
10328 net
= stcb
->asoc
.primary_destination
;
10329 asoc
= &stcb
->asoc
;
10331 asoc
= &stcb
->asoc
;
10333 if (create_lock_applied
) {
10334 SCTP_ASOC_CREATE_UNLOCK(inp
);
10335 create_lock_applied
= 0;
10337 if ((SCTP_GET_STATE(asoc
) == SCTP_STATE_COOKIE_WAIT
) ||
10338 (SCTP_GET_STATE(asoc
) == SCTP_STATE_COOKIE_ECHOED
)) {
10341 if (use_rcvinfo
== 0) {
10342 /* Grab the default stuff from the asoc */
10343 srcv
= stcb
->asoc
.def_send
;
10345 /* we are now done with all control */
10347 sctp_m_freem(control
);
10351 if ((SCTP_GET_STATE(asoc
) == SCTP_STATE_SHUTDOWN_SENT
) ||
10352 (SCTP_GET_STATE(asoc
) == SCTP_STATE_SHUTDOWN_RECEIVED
) ||
10353 (SCTP_GET_STATE(asoc
) == SCTP_STATE_SHUTDOWN_ACK_SENT
) ||
10354 (asoc
->state
& SCTP_STATE_SHUTDOWN_PENDING
)) {
10355 if ((use_rcvinfo
) &&
10356 (srcv
.sinfo_flags
& MSG_ABORT
)) {
10359 error
= ECONNRESET
;
10364 /* Ok, we will attempt a msgsnd :> */
10366 #if (defined(__FreeBSD__) && __FreeBSD_version >= 500000) || defined(__DragonFly__)
10367 p
->td_lwp
->lwp_ru
.ru_msgsnd
++;
10369 p
->p_stats
->p_ru
.ru_msgsnd
++;
10373 if (net
&& ((srcv
.sinfo_flags
& MSG_ADDR_OVER
))) {
10374 /* we take the override or the unconfirmed */
10377 net
= stcb
->asoc
.primary_destination
;
10382 /* Must copy it all in from user land. The
10383 * socket buf is locked but we don't suspend
10384 * protocol processing until we are ready to
10388 error
= sctp_copy_it_in(inp
, stcb
, asoc
, net
, &srcv
, uio
, flags
);
10392 /* Here we must either pull in the user data to chunk
10393 * buffers, or use top to do a msg_append.
10395 error
= sctp_msg_append(stcb
, net
, top
, &srcv
, flags
);
10399 /* zap the top since it is now being used */
10403 if (net
->flight_size
> net
->cwnd
) {
10404 sctp_pegs
[SCTP_SENDTO_FULL_CWND
]++;
10407 } else if (asoc
->ifp_had_enobuf
) {
10408 sctp_pegs
[SCTP_QUEONLY_BURSTLMT
]++;
10411 un_sent
= ((stcb
->asoc
.total_output_queue_size
- stcb
->asoc
.total_flight
) +
10412 ((stcb
->asoc
.chunks_on_out_queue
- stcb
->asoc
.total_flight_count
) * sizeof(struct sctp_data_chunk
)) +
10413 SCTP_MED_OVERHEAD
);
10415 if (((inp
->sctp_flags
& SCTP_PCB_FLAGS_NODELAY
) == 0) &&
10416 (stcb
->asoc
.total_flight
> 0) &&
10417 (un_sent
< (int)stcb
->asoc
.smallest_mtu
)) {
10419 /* Ok, Nagle is set on and we have data outstanding. Don't
10420 * send anything and let SACKs drive out the data unless we
10421 * have a "full" segment to send.
10423 sctp_pegs
[SCTP_NAGLE_NOQ
]++;
10426 sctp_pegs
[SCTP_NAGLE_OFF
]++;
10429 if (queue_only_for_init
) {
10430 /* It is possible to have a turn around of the
10431 * INIT/INIT-ACK/COOKIE before I have a chance to
10432 * copy in the data. In such a case I DO want to
10433 * send it out by reversing the queue only flag.
10435 if ((SCTP_GET_STATE(asoc
) != SCTP_STATE_COOKIE_WAIT
) ||
10436 (SCTP_GET_STATE(asoc
) != SCTP_STATE_COOKIE_ECHOED
)) {
10437 /* yep, reverse it */
10442 if ((queue_only
== 0) && (stcb
->asoc
.peers_rwnd
&& un_sent
)) {
10443 /* we can attempt to send too.*/
10445 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
10446 kprintf("USR Send calls sctp_chunk_output\n");
10450 sctp_pegs
[SCTP_OUTPUT_FRM_SND
]++;
10451 sctp_chunk_output(inp
, stcb
, 0);
10453 } else if ((queue_only
== 0) &&
10454 (stcb
->asoc
.peers_rwnd
== 0) &&
10455 (stcb
->asoc
.total_flight
== 0)) {
10456 /* We get to have a probe outstanding */
10458 sctp_from_user_send
= 1;
10459 sctp_chunk_output(inp
, stcb
, 0);
10460 sctp_from_user_send
= 0;
10463 } else if (!TAILQ_EMPTY(&stcb
->asoc
.control_send_queue
)) {
10464 int num_out
, reason
, cwnd_full
;
10465 /* Here we do control only */
10467 sctp_med_chunk_output(inp
, stcb
, &stcb
->asoc
, &num_out
,
10468 &reason
, 1, &cwnd_full
, 1, &now
, &now_filled
);
10472 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT1
) {
10473 kprintf("USR Send complete qo:%d prw:%d unsent:%d tf:%d cooq:%d toqs:%d \n",
10474 queue_only
, stcb
->asoc
.peers_rwnd
, un_sent
,
10475 stcb
->asoc
.total_flight
, stcb
->asoc
.chunks_on_out_queue
,
10476 stcb
->asoc
.total_output_queue_size
);
10480 if (create_lock_applied
) {
10481 SCTP_ASOC_CREATE_UNLOCK(inp
);
10482 create_lock_applied
= 0;
10485 SCTP_TCB_UNLOCK(stcb
);
10489 sctp_m_freem(control
);