1 /* $KAME: sctputil.c,v 1.36 2005/03/06 16:04:19 itojun Exp $ */
4 * Copyright (c) 2001, 2002, 2003, 2004 Cisco Systems, Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Cisco Systems, Inc.
18 * 4. Neither the name of the project nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY CISCO SYSTEMS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL CISCO SYSTEMS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #if !(defined(__OpenBSD__) || defined(__APPLE__))
36 #include "opt_ipsec.h"
38 #if defined(__FreeBSD__) || defined(__DragonFly__)
39 #include "opt_compat.h"
40 #include "opt_inet6.h"
42 #if !(defined(SCTP_BASE_FREEBSD) || defined(__DragonFly__))
43 #include "opt_mpath.h"
44 #endif /* SCTP_BASE_FREEBSD || __DragonFly__ */
46 #if defined(__NetBSD__)
51 #elif !defined(__OpenBSD__)
55 #include <sys/param.h>
56 #include <sys/systm.h>
57 #include <sys/malloc.h>
59 #include <sys/domain.h>
60 #include <sys/protosw.h>
61 #include <sys/socket.h>
62 #include <sys/socketvar.h>
64 #include <sys/kernel.h>
65 #include <sys/sysctl.h>
66 #include <sys/thread2.h>
68 #if defined(__FreeBSD__) || defined(__DragonFly__)
69 #include <sys/callout.h>
71 #include <netinet/sctp_callout.h> /* for callout_active() */
74 #include <net/radix.h>
75 #include <net/route.h>
79 #include <sys/domain.h>
83 #if (defined(__FreeBSD__) && __FreeBSD_version >= 500000)
84 #include <sys/limits.h>
86 #include <machine/limits.h>
90 #include <net/if_types.h>
91 #include <net/route.h>
93 #include <netinet/in.h>
94 #include <netinet/in_systm.h>
95 #include <netinet/ip.h>
96 #include <netinet/in_pcb.h>
97 #include <netinet/in_var.h>
98 #include <netinet/ip_var.h>
101 #include <netinet/ip6.h>
102 #include <netinet6/ip6_var.h>
104 #if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__APPLE__) || defined(__DragonFly_)
105 #include <netinet6/in6_pcb.h>
106 #elif defined(__OpenBSD__)
107 #include <netinet/in_pcb.h>
112 #include <netinet/sctp_pcb.h>
116 #include <netinet6/ipsec.h>
117 #include <netproto/key/key.h>
123 #include <netinet/sctputil.h>
124 #include <netinet/sctp_var.h>
126 #include <netinet6/sctp6_var.h>
128 #include <netinet/sctp_header.h>
129 #include <netinet/sctp_output.h>
130 #include <netinet/sctp_hashdriver.h>
131 #include <netinet/sctp_uio.h>
132 #include <netinet/sctp_timer.h>
133 #include <netinet/sctp_crc32.h>
134 #include <netinet/sctp_indata.h> /* for sctp_deliver_data() */
135 #define NUMBER_OF_MTU_SIZES 18
138 extern u_int32_t sctp_debug_on
;
141 #ifdef SCTP_STAT_LOGGING
142 int sctp_cwnd_log_at
=0;
143 int sctp_cwnd_log_rolled
=0;
144 struct sctp_cwnd_log sctp_clog
[SCTP_STAT_LOG_SIZE
];
147 sctp_clr_stat_log(void)
150 sctp_cwnd_log_rolled
=0;
154 sctp_log_strm_del_alt(u_int32_t tsn
, u_int16_t sseq
, int from
)
157 sctp_clog
[sctp_cwnd_log_at
].from
= (u_int8_t
)from
;
158 sctp_clog
[sctp_cwnd_log_at
].event_type
= (u_int8_t
)SCTP_LOG_EVENT_STRM
;
159 sctp_clog
[sctp_cwnd_log_at
].x
.strlog
.n_tsn
= tsn
;
160 sctp_clog
[sctp_cwnd_log_at
].x
.strlog
.n_sseq
= sseq
;
161 sctp_clog
[sctp_cwnd_log_at
].x
.strlog
.e_tsn
= 0;
162 sctp_clog
[sctp_cwnd_log_at
].x
.strlog
.e_sseq
= 0;
164 if (sctp_cwnd_log_at
>= SCTP_STAT_LOG_SIZE
) {
165 sctp_cwnd_log_at
= 0;
166 sctp_cwnd_log_rolled
= 1;
173 sctp_log_map(uint32_t map
, uint32_t cum
, uint32_t high
, int from
)
176 sctp_clog
[sctp_cwnd_log_at
].from
= (u_int8_t
)from
;
177 sctp_clog
[sctp_cwnd_log_at
].event_type
= (u_int8_t
)SCTP_LOG_EVENT_MAP
;
178 sctp_clog
[sctp_cwnd_log_at
].x
.map
.base
= map
;
179 sctp_clog
[sctp_cwnd_log_at
].x
.map
.cum
= cum
;
180 sctp_clog
[sctp_cwnd_log_at
].x
.map
.high
= high
;
182 if (sctp_cwnd_log_at
>= SCTP_STAT_LOG_SIZE
) {
183 sctp_cwnd_log_at
= 0;
184 sctp_cwnd_log_rolled
= 1;
189 sctp_log_fr(uint32_t biggest_tsn
, uint32_t biggest_new_tsn
, uint32_t tsn
,
193 sctp_clog
[sctp_cwnd_log_at
].from
= (u_int8_t
)from
;
194 sctp_clog
[sctp_cwnd_log_at
].event_type
= (u_int8_t
)SCTP_LOG_EVENT_FR
;
195 sctp_clog
[sctp_cwnd_log_at
].x
.fr
.largest_tsn
= biggest_tsn
;
196 sctp_clog
[sctp_cwnd_log_at
].x
.fr
.largest_new_tsn
= biggest_new_tsn
;
197 sctp_clog
[sctp_cwnd_log_at
].x
.fr
.tsn
= tsn
;
199 if (sctp_cwnd_log_at
>= SCTP_STAT_LOG_SIZE
) {
200 sctp_cwnd_log_at
= 0;
201 sctp_cwnd_log_rolled
= 1;
206 sctp_log_strm_del(struct sctp_tmit_chunk
*chk
, struct sctp_tmit_chunk
*poschk
,
211 kprintf("Gak log of NULL?\n");
214 sctp_clog
[sctp_cwnd_log_at
].from
= (u_int8_t
)from
;
215 sctp_clog
[sctp_cwnd_log_at
].event_type
= (u_int8_t
)SCTP_LOG_EVENT_STRM
;
216 sctp_clog
[sctp_cwnd_log_at
].x
.strlog
.n_tsn
= chk
->rec
.data
.TSN_seq
;
217 sctp_clog
[sctp_cwnd_log_at
].x
.strlog
.n_sseq
= chk
->rec
.data
.stream_seq
;
218 if (poschk
!= NULL
) {
219 sctp_clog
[sctp_cwnd_log_at
].x
.strlog
.e_tsn
=
220 poschk
->rec
.data
.TSN_seq
;
221 sctp_clog
[sctp_cwnd_log_at
].x
.strlog
.e_sseq
=
222 poschk
->rec
.data
.stream_seq
;
224 sctp_clog
[sctp_cwnd_log_at
].x
.strlog
.e_tsn
= 0;
225 sctp_clog
[sctp_cwnd_log_at
].x
.strlog
.e_sseq
= 0;
228 if (sctp_cwnd_log_at
>= SCTP_STAT_LOG_SIZE
) {
229 sctp_cwnd_log_at
= 0;
230 sctp_cwnd_log_rolled
= 1;
235 sctp_log_cwnd(struct sctp_nets
*net
, int augment
, uint8_t from
)
238 sctp_clog
[sctp_cwnd_log_at
].from
= (u_int8_t
)from
;
239 sctp_clog
[sctp_cwnd_log_at
].event_type
= (u_int8_t
)SCTP_LOG_EVENT_CWND
;
240 sctp_clog
[sctp_cwnd_log_at
].x
.cwnd
.net
= net
;
241 sctp_clog
[sctp_cwnd_log_at
].x
.cwnd
.cwnd_new_value
= net
->cwnd
;
242 sctp_clog
[sctp_cwnd_log_at
].x
.cwnd
.inflight
= net
->flight_size
;
243 sctp_clog
[sctp_cwnd_log_at
].x
.cwnd
.cwnd_augment
= augment
;
245 if (sctp_cwnd_log_at
>= SCTP_STAT_LOG_SIZE
) {
246 sctp_cwnd_log_at
= 0;
247 sctp_cwnd_log_rolled
= 1;
252 sctp_log_maxburst(struct sctp_nets
*net
, int error
, int burst
, uint8_t from
)
254 sctp_clog
[sctp_cwnd_log_at
].from
= (u_int8_t
)from
;
255 sctp_clog
[sctp_cwnd_log_at
].event_type
= (u_int8_t
)SCTP_LOG_EVENT_MAXBURST
;
256 sctp_clog
[sctp_cwnd_log_at
].x
.cwnd
.net
= net
;
257 sctp_clog
[sctp_cwnd_log_at
].x
.cwnd
.cwnd_new_value
= error
;
258 sctp_clog
[sctp_cwnd_log_at
].x
.cwnd
.inflight
= net
->flight_size
;
259 sctp_clog
[sctp_cwnd_log_at
].x
.cwnd
.cwnd_augment
= burst
;
261 if (sctp_cwnd_log_at
>= SCTP_STAT_LOG_SIZE
) {
262 sctp_cwnd_log_at
= 0;
263 sctp_cwnd_log_rolled
= 1;
268 sctp_log_rwnd(uint8_t from
, u_int32_t peers_rwnd
, u_int32_t snd_size
, u_int32_t overhead
)
270 sctp_clog
[sctp_cwnd_log_at
].from
= (u_int8_t
)from
;
271 sctp_clog
[sctp_cwnd_log_at
].event_type
= (u_int8_t
)SCTP_LOG_EVENT_RWND
;
272 sctp_clog
[sctp_cwnd_log_at
].x
.rwnd
.rwnd
= peers_rwnd
;
273 sctp_clog
[sctp_cwnd_log_at
].x
.rwnd
.send_size
= snd_size
;
274 sctp_clog
[sctp_cwnd_log_at
].x
.rwnd
.overhead
= overhead
;
275 sctp_clog
[sctp_cwnd_log_at
].x
.rwnd
.new_rwnd
= 0;
277 if (sctp_cwnd_log_at
>= SCTP_STAT_LOG_SIZE
) {
278 sctp_cwnd_log_at
= 0;
279 sctp_cwnd_log_rolled
= 1;
284 sctp_log_rwnd_set(uint8_t from
, u_int32_t peers_rwnd
, u_int32_t flight_size
, u_int32_t overhead
, u_int32_t a_rwndval
)
286 sctp_clog
[sctp_cwnd_log_at
].from
= (u_int8_t
)from
;
287 sctp_clog
[sctp_cwnd_log_at
].event_type
= (u_int8_t
)SCTP_LOG_EVENT_RWND
;
288 sctp_clog
[sctp_cwnd_log_at
].x
.rwnd
.rwnd
= peers_rwnd
;
289 sctp_clog
[sctp_cwnd_log_at
].x
.rwnd
.send_size
= flight_size
;
290 sctp_clog
[sctp_cwnd_log_at
].x
.rwnd
.overhead
= overhead
;
291 sctp_clog
[sctp_cwnd_log_at
].x
.rwnd
.new_rwnd
= a_rwndval
;
293 if (sctp_cwnd_log_at
>= SCTP_STAT_LOG_SIZE
) {
294 sctp_cwnd_log_at
= 0;
295 sctp_cwnd_log_rolled
= 1;
300 sctp_log_mbcnt(uint8_t from
, u_int32_t total_oq
, u_int32_t book
, u_int32_t total_mbcnt_q
, u_int32_t mbcnt
)
302 sctp_clog
[sctp_cwnd_log_at
].from
= (u_int8_t
)from
;
303 sctp_clog
[sctp_cwnd_log_at
].event_type
= (u_int8_t
)SCTP_LOG_EVENT_MBCNT
;
304 sctp_clog
[sctp_cwnd_log_at
].x
.mbcnt
.total_queue_size
= total_oq
;
305 sctp_clog
[sctp_cwnd_log_at
].x
.mbcnt
.size_change
= book
;
306 sctp_clog
[sctp_cwnd_log_at
].x
.mbcnt
.total_queue_mb_size
= total_mbcnt_q
;
307 sctp_clog
[sctp_cwnd_log_at
].x
.mbcnt
.mbcnt_change
= mbcnt
;
309 if (sctp_cwnd_log_at
>= SCTP_STAT_LOG_SIZE
) {
310 sctp_cwnd_log_at
= 0;
311 sctp_cwnd_log_rolled
= 1;
316 sctp_log_block(uint8_t from
, struct socket
*so
, struct sctp_association
*asoc
)
319 sctp_clog
[sctp_cwnd_log_at
].from
= (u_int8_t
)from
;
320 sctp_clog
[sctp_cwnd_log_at
].event_type
= (u_int8_t
)SCTP_LOG_EVENT_BLOCK
;
321 sctp_clog
[sctp_cwnd_log_at
].x
.blk
.maxmb
= (u_int16_t
)(so
->so_snd
.ssb_mbmax
/1024);
322 sctp_clog
[sctp_cwnd_log_at
].x
.blk
.onmb
= asoc
->total_output_mbuf_queue_size
;
323 sctp_clog
[sctp_cwnd_log_at
].x
.blk
.maxsb
= (u_int16_t
)(so
->so_snd
.ssb_hiwat
/1024);
324 sctp_clog
[sctp_cwnd_log_at
].x
.blk
.onsb
= asoc
->total_output_queue_size
;
325 sctp_clog
[sctp_cwnd_log_at
].x
.blk
.send_sent_qcnt
= (u_int16_t
)(asoc
->send_queue_cnt
+ asoc
->sent_queue_cnt
);
326 sctp_clog
[sctp_cwnd_log_at
].x
.blk
.stream_qcnt
= (u_int16_t
)asoc
->stream_queue_cnt
;
328 if (sctp_cwnd_log_at
>= SCTP_STAT_LOG_SIZE
) {
329 sctp_cwnd_log_at
= 0;
330 sctp_cwnd_log_rolled
= 1;
335 sctp_fill_stat_log(struct mbuf
*m
)
337 struct sctp_cwnd_log_req
*req
;
338 int size_limit
, num
, i
, at
, cnt_out
=0;
343 size_limit
= (m
->m_len
- sizeof(struct sctp_cwnd_log_req
));
344 if (size_limit
< sizeof(struct sctp_cwnd_log
)) {
347 req
= mtod(m
, struct sctp_cwnd_log_req
*);
348 num
= size_limit
/sizeof(struct sctp_cwnd_log
);
349 if (sctp_cwnd_log_rolled
) {
350 req
->num_in_log
= SCTP_STAT_LOG_SIZE
;
352 req
->num_in_log
= sctp_cwnd_log_at
;
353 /* if the log has not rolled, we don't
354 * let you have old data.
356 if (req
->end_at
> sctp_cwnd_log_at
) {
357 req
->end_at
= sctp_cwnd_log_at
;
360 if ((num
< SCTP_STAT_LOG_SIZE
) &&
361 ((sctp_cwnd_log_rolled
) || (sctp_cwnd_log_at
> num
))) {
362 /* we can't return all of it */
363 if (((req
->start_at
== 0) && (req
->end_at
== 0)) ||
364 (req
->start_at
>= SCTP_STAT_LOG_SIZE
) ||
365 (req
->end_at
>= SCTP_STAT_LOG_SIZE
)) {
366 /* No user request or user is wacked. */
368 req
->end_at
= sctp_cwnd_log_at
- 1;
369 if ((sctp_cwnd_log_at
- num
) < 0) {
371 cc
= num
- sctp_cwnd_log_at
;
372 req
->start_at
= SCTP_STAT_LOG_SIZE
- cc
;
374 req
->start_at
= sctp_cwnd_log_at
- num
;
379 if (req
->start_at
> req
->end_at
) {
380 cc
= (SCTP_STAT_LOG_SIZE
- req
->start_at
) +
384 cc
= req
->end_at
- req
->start_at
;
392 /* We can return all of it */
394 req
->end_at
= sctp_cwnd_log_at
- 1;
395 req
->num_ret
= sctp_cwnd_log_at
;
397 for (i
= 0, at
= req
->start_at
; i
< req
->num_ret
; i
++) {
398 req
->log
[i
] = sctp_clog
[at
];
401 if (at
>= SCTP_STAT_LOG_SIZE
)
404 m
->m_len
= (cnt_out
* sizeof(struct sctp_cwnd_log_req
)) + sizeof(struct sctp_cwnd_log_req
);
410 #ifdef SCTP_AUDITING_ENABLED
411 u_int8_t sctp_audit_data
[SCTP_AUDIT_SIZE
][2];
412 static int sctp_audit_indx
= 0;
415 sctp_print_audit_report(void)
420 for (i
=sctp_audit_indx
;i
<SCTP_AUDIT_SIZE
;i
++) {
421 if ((sctp_audit_data
[i
][0] == 0xe0) &&
422 (sctp_audit_data
[i
][1] == 0x01)) {
425 } else if (sctp_audit_data
[i
][0] == 0xf0) {
428 } else if ((sctp_audit_data
[i
][0] == 0xc0) &&
429 (sctp_audit_data
[i
][1] == 0x01)) {
433 kprintf("%2.2x%2.2x ", (uint32_t)sctp_audit_data
[i
][0],
434 (uint32_t)sctp_audit_data
[i
][1]);
439 for (i
=0;i
<sctp_audit_indx
;i
++) {
440 if ((sctp_audit_data
[i
][0] == 0xe0) &&
441 (sctp_audit_data
[i
][1] == 0x01)) {
444 } else if (sctp_audit_data
[i
][0] == 0xf0) {
447 } else if ((sctp_audit_data
[i
][0] == 0xc0) &&
448 (sctp_audit_data
[i
][1] == 0x01)) {
452 kprintf("%2.2x%2.2x ", (uint32_t)sctp_audit_data
[i
][0],
453 (uint32_t)sctp_audit_data
[i
][1]);
462 sctp_auditing(int from
, struct sctp_inpcb
*inp
, struct sctp_tcb
*stcb
,
463 struct sctp_nets
*net
)
465 int resend_cnt
, tot_out
, rep
, tot_book_cnt
;
466 struct sctp_nets
*lnet
;
467 struct sctp_tmit_chunk
*chk
;
469 sctp_audit_data
[sctp_audit_indx
][0] = 0xAA;
470 sctp_audit_data
[sctp_audit_indx
][1] = 0x000000ff & from
;
472 if (sctp_audit_indx
>= SCTP_AUDIT_SIZE
) {
476 sctp_audit_data
[sctp_audit_indx
][0] = 0xAF;
477 sctp_audit_data
[sctp_audit_indx
][1] = 0x01;
479 if (sctp_audit_indx
>= SCTP_AUDIT_SIZE
) {
485 sctp_audit_data
[sctp_audit_indx
][0] = 0xAF;
486 sctp_audit_data
[sctp_audit_indx
][1] = 0x02;
488 if (sctp_audit_indx
>= SCTP_AUDIT_SIZE
) {
493 sctp_audit_data
[sctp_audit_indx
][0] = 0xA1;
494 sctp_audit_data
[sctp_audit_indx
][1] =
495 (0x000000ff & stcb
->asoc
.sent_queue_retran_cnt
);
497 if (sctp_audit_indx
>= SCTP_AUDIT_SIZE
) {
502 resend_cnt
= tot_out
= 0;
503 TAILQ_FOREACH(chk
, &stcb
->asoc
.sent_queue
, sctp_next
) {
504 if (chk
->sent
== SCTP_DATAGRAM_RESEND
) {
506 } else if (chk
->sent
< SCTP_DATAGRAM_RESEND
) {
507 tot_out
+= chk
->book_size
;
511 if (resend_cnt
!= stcb
->asoc
.sent_queue_retran_cnt
) {
512 sctp_audit_data
[sctp_audit_indx
][0] = 0xAF;
513 sctp_audit_data
[sctp_audit_indx
][1] = 0xA1;
515 if (sctp_audit_indx
>= SCTP_AUDIT_SIZE
) {
518 kprintf("resend_cnt:%d asoc-tot:%d\n",
519 resend_cnt
, stcb
->asoc
.sent_queue_retran_cnt
);
521 stcb
->asoc
.sent_queue_retran_cnt
= resend_cnt
;
522 sctp_audit_data
[sctp_audit_indx
][0] = 0xA2;
523 sctp_audit_data
[sctp_audit_indx
][1] =
524 (0x000000ff & stcb
->asoc
.sent_queue_retran_cnt
);
526 if (sctp_audit_indx
>= SCTP_AUDIT_SIZE
) {
530 if (tot_out
!= stcb
->asoc
.total_flight
) {
531 sctp_audit_data
[sctp_audit_indx
][0] = 0xAF;
532 sctp_audit_data
[sctp_audit_indx
][1] = 0xA2;
534 if (sctp_audit_indx
>= SCTP_AUDIT_SIZE
) {
538 kprintf("tot_flt:%d asoc_tot:%d\n", tot_out
,
539 (int)stcb
->asoc
.total_flight
);
540 stcb
->asoc
.total_flight
= tot_out
;
542 if (tot_book_cnt
!= stcb
->asoc
.total_flight_count
) {
543 sctp_audit_data
[sctp_audit_indx
][0] = 0xAF;
544 sctp_audit_data
[sctp_audit_indx
][1] = 0xA5;
546 if (sctp_audit_indx
>= SCTP_AUDIT_SIZE
) {
550 kprintf("tot_flt_book:%d\n", tot_book
);
552 stcb
->asoc
.total_flight_count
= tot_book_cnt
;
555 TAILQ_FOREACH(lnet
, &stcb
->asoc
.nets
, sctp_next
) {
556 tot_out
+= lnet
->flight_size
;
558 if (tot_out
!= stcb
->asoc
.total_flight
) {
559 sctp_audit_data
[sctp_audit_indx
][0] = 0xAF;
560 sctp_audit_data
[sctp_audit_indx
][1] = 0xA3;
562 if (sctp_audit_indx
>= SCTP_AUDIT_SIZE
) {
566 kprintf("real flight:%d net total was %d\n",
567 stcb
->asoc
.total_flight
, tot_out
);
568 /* now corrective action */
569 TAILQ_FOREACH(lnet
, &stcb
->asoc
.nets
, sctp_next
) {
571 TAILQ_FOREACH(chk
, &stcb
->asoc
.sent_queue
, sctp_next
) {
572 if ((chk
->whoTo
== lnet
) &&
573 (chk
->sent
< SCTP_DATAGRAM_RESEND
)) {
574 tot_out
+= chk
->book_size
;
577 if (lnet
->flight_size
!= tot_out
) {
578 kprintf("net:%x flight was %d corrected to %d\n",
579 (uint32_t)lnet
, lnet
->flight_size
, tot_out
);
580 lnet
->flight_size
= tot_out
;
587 sctp_print_audit_report();
592 sctp_audit_log(u_int8_t ev
, u_int8_t fd
)
595 sctp_audit_data
[sctp_audit_indx
][0] = ev
;
596 sctp_audit_data
[sctp_audit_indx
][1] = fd
;
598 if (sctp_audit_indx
>= SCTP_AUDIT_SIZE
) {
607 * a list of sizes based on typical mtu's, used only if next hop
610 static int sctp_mtu_sizes
[] = {
632 find_next_best_mtu(int totsz
)
636 * if we are in here we must find the next best fit based on the
637 * size of the dg that failed to be sent.
640 for (i
= 0; i
< NUMBER_OF_MTU_SIZES
; i
++) {
641 if (totsz
< sctp_mtu_sizes
[i
]) {
648 return (sctp_mtu_sizes
[perfer
]);
652 sctp_fill_random_store(struct sctp_pcb
*m
)
655 * Here we use the MD5/SHA-1 to hash with our good randomNumbers
656 * and our counter. The result becomes our good random numbers and
657 * we then setup to give these out. Note that we do no lockig
658 * to protect this. This is ok, since if competing folks call
659 * this we will get more gobbled gook in the random store whic
660 * is what we want. There is a danger that two guys will use
661 * the same random numbers, but thats ok too since that
662 * is random as well :->
665 sctp_hash_digest((char *)m
->random_numbers
, sizeof(m
->random_numbers
),
666 (char *)&m
->random_counter
, sizeof(m
->random_counter
),
667 (char *)m
->random_store
);
672 sctp_select_initial_TSN(struct sctp_pcb
*m
)
675 * A true implementation should use random selection process to
676 * get the initial stream sequence number, using RFC1750 as a
682 if (m
->initial_sequence_debug
!= 0) {
684 ret
= m
->initial_sequence_debug
;
685 m
->initial_sequence_debug
++;
688 if ((m
->store_at
+sizeof(u_long
)) > SCTP_SIGNATURE_SIZE
) {
689 /* Refill the random store */
690 sctp_fill_random_store(m
);
692 p
= &m
->random_store
[(int)m
->store_at
];
695 m
->store_at
+= sizeof(u_long
);
700 sctp_select_a_tag(struct sctp_inpcb
*m
)
705 SCTP_GETTIME_TIMEVAL(&now
);
708 x
= sctp_select_initial_TSN(&m
->sctp_ep
);
713 if (sctp_is_vtag_good(m
, x
, &now
)) {
722 sctp_init_asoc(struct sctp_inpcb
*m
, struct sctp_association
*asoc
,
723 int for_a_init
, uint32_t override_tag
)
726 * Anything set to zero is taken care of by the allocation
731 * Up front select what scoping to apply on addresses I tell my peer
732 * Not sure what to do with these right now, we will need to come up
733 * with a way to set them. We may need to pass them through from the
734 * caller in the sctp_aloc_assoc() function.
737 /* init all variables to a known value.*/
738 asoc
->state
= SCTP_STATE_INUSE
;
739 asoc
->max_burst
= m
->sctp_ep
.max_burst
;
740 asoc
->heart_beat_delay
= m
->sctp_ep
.sctp_timeoutticks
[SCTP_TIMER_HEARTBEAT
];
741 asoc
->cookie_life
= m
->sctp_ep
.def_cookie_life
;
744 asoc
->my_vtag
= override_tag
;
746 asoc
->my_vtag
= sctp_select_a_tag(m
);
748 asoc
->asconf_seq_out
= asoc
->str_reset_seq_out
= asoc
->init_seq_number
= asoc
->sending_seq
=
749 sctp_select_initial_TSN(&m
->sctp_ep
);
750 asoc
->t3timeout_highest_marked
= asoc
->asconf_seq_out
;
751 /* we are opptimisitic here */
752 asoc
->peer_supports_asconf
= 1;
753 asoc
->peer_supports_asconf_setprim
= 1;
754 asoc
->peer_supports_pktdrop
= 1;
756 asoc
->sent_queue_retran_cnt
= 0;
757 /* This will need to be adjusted */
758 asoc
->last_cwr_tsn
= asoc
->init_seq_number
- 1;
759 asoc
->last_acked_seq
= asoc
->init_seq_number
- 1;
760 asoc
->advanced_peer_ack_point
= asoc
->last_acked_seq
;
761 asoc
->asconf_seq_in
= asoc
->last_acked_seq
;
763 /* here we are different, we hold the next one we expect */
764 asoc
->str_reset_seq_in
= asoc
->last_acked_seq
+ 1;
766 asoc
->initial_init_rto_max
= m
->sctp_ep
.initial_init_rto_max
;
767 asoc
->initial_rto
= m
->sctp_ep
.initial_rto
;
769 asoc
->max_init_times
= m
->sctp_ep
.max_init_times
;
770 asoc
->max_send_times
= m
->sctp_ep
.max_send_times
;
771 asoc
->def_net_failure
= m
->sctp_ep
.def_net_failure
;
773 /* ECN Nonce initialization */
774 asoc
->ecn_nonce_allowed
= 0;
775 asoc
->receiver_nonce_sum
= 1;
776 asoc
->nonce_sum_expect_base
= 1;
777 asoc
->nonce_sum_check
= 1;
778 asoc
->nonce_resync_tsn
= 0;
779 asoc
->nonce_wait_for_ecne
= 0;
780 asoc
->nonce_wait_tsn
= 0;
782 if (m
->sctp_flags
& SCTP_PCB_FLAGS_BOUND_V6
) {
786 /* Its a V6 socket */
787 inp6
= (struct in6pcb
*)m
;
788 asoc
->ipv6_addr_legal
= 1;
789 /* Now look at the binding flag to see if V4 will be legal */
791 #if defined(__OpenBSD__)
792 (0) /* we always do dual bind */
793 #elif defined (__NetBSD__)
794 (inp6
->in6p_flags
& IN6P_IPV6_V6ONLY
)
796 (inp6
->inp_flags
& IN6P_IPV6_V6ONLY
)
799 asoc
->ipv4_addr_legal
= 1;
801 /* V4 addresses are NOT legal on the association */
802 asoc
->ipv4_addr_legal
= 0;
805 /* Its a V4 socket, no - V6 */
806 asoc
->ipv4_addr_legal
= 1;
807 asoc
->ipv6_addr_legal
= 0;
811 asoc
->my_rwnd
= max(m
->sctp_socket
->so_rcv
.ssb_hiwat
, SCTP_MINIMAL_RWND
);
812 asoc
->peers_rwnd
= m
->sctp_socket
->so_rcv
.ssb_hiwat
;
814 asoc
->smallest_mtu
= m
->sctp_frag_point
;
815 asoc
->minrto
= m
->sctp_ep
.sctp_minrto
;
816 asoc
->maxrto
= m
->sctp_ep
.sctp_maxrto
;
818 LIST_INIT(&asoc
->sctp_local_addr_list
);
819 TAILQ_INIT(&asoc
->nets
);
820 TAILQ_INIT(&asoc
->pending_reply_queue
);
821 asoc
->last_asconf_ack_sent
= NULL
;
822 /* Setup to fill the hb random cache at first HB */
823 asoc
->hb_random_idx
= 4;
825 asoc
->sctp_autoclose_ticks
= m
->sctp_ep
.auto_close_time
;
828 * Now the stream parameters, here we allocate space for all
829 * streams that we request by default.
831 asoc
->streamoutcnt
= asoc
->pre_open_streams
=
832 m
->sctp_ep
.pre_open_stream_count
;
833 asoc
->strmout
= kmalloc(asoc
->streamoutcnt
* sizeof(struct sctp_stream_out
),
835 if (asoc
->strmout
== NULL
) {
836 /* big trouble no memory */
839 for (i
= 0; i
< asoc
->streamoutcnt
; i
++) {
841 * inbound side must be set to 0xffff,
842 * also NOTE when we get the INIT-ACK back (for INIT sender)
843 * we MUST reduce the count (streamoutcnt) but first check
844 * if we sent to any of the upper streams that were dropped
845 * (if some were). Those that were dropped must be notified
846 * to the upper layer as failed to send.
848 asoc
->strmout
[i
].next_sequence_sent
= 0x0;
849 TAILQ_INIT(&asoc
->strmout
[i
].outqueue
);
850 asoc
->strmout
[i
].stream_no
= i
;
851 asoc
->strmout
[i
].next_spoke
.tqe_next
= 0;
852 asoc
->strmout
[i
].next_spoke
.tqe_prev
= 0;
854 /* Now the mapping array */
855 asoc
->mapping_array_size
= SCTP_INITIAL_MAPPING_ARRAY
;
857 asoc
->mapping_array
= kmalloc(SCTP_INITIAL_MAPPING_ARRAY
, M_PCB
,
860 asoc
->mapping_array
= kmalloc(asoc
->mapping_array_size
, M_PCB
,
863 if (asoc
->mapping_array
== NULL
) {
864 kfree(asoc
->strmout
, M_PCB
);
867 memset(asoc
->mapping_array
, 0, asoc
->mapping_array_size
);
868 /* Now the init of the other outqueues */
869 TAILQ_INIT(&asoc
->out_wheel
);
870 TAILQ_INIT(&asoc
->control_send_queue
);
871 TAILQ_INIT(&asoc
->send_queue
);
872 TAILQ_INIT(&asoc
->sent_queue
);
873 TAILQ_INIT(&asoc
->reasmqueue
);
874 TAILQ_INIT(&asoc
->delivery_queue
);
875 asoc
->max_inbound_streams
= m
->sctp_ep
.max_open_streams_intome
;
877 TAILQ_INIT(&asoc
->asconf_queue
);
882 sctp_expand_mapping_array(struct sctp_association
*asoc
)
884 /* mapping array needs to grow */
888 new_size
= asoc
->mapping_array_size
+ SCTP_MAPPING_ARRAY_INCR
;
890 new_array
= kmalloc(asoc
->mapping_array_size
+ SCTP_MAPPING_ARRAY_INCR
,
893 new_array
= kmalloc(new_size
, M_PCB
, M_NOWAIT
);
895 if (new_array
== NULL
) {
896 /* can't get more, forget it */
897 kprintf("No memory for expansion of SCTP mapping array %d\n",
901 memset(new_array
, 0, new_size
);
902 memcpy(new_array
, asoc
->mapping_array
, asoc
->mapping_array_size
);
903 kfree(asoc
->mapping_array
, M_PCB
);
904 asoc
->mapping_array
= new_array
;
905 asoc
->mapping_array_size
= new_size
;
910 sctp_timeout_handler(void *t
)
912 struct sctp_inpcb
*inp
;
913 struct sctp_tcb
*stcb
;
914 struct sctp_nets
*net
;
915 struct sctp_timer
*tmr
;
917 #if defined(__APPLE__)
918 boolean_t funnel_state
;
920 /* get BSD kernel funnel/mutex */
921 funnel_state
= thread_funnel_set(network_flock
, TRUE
);
925 tmr
= (struct sctp_timer
*)t
;
926 inp
= (struct sctp_inpcb
*)tmr
->ep
;
927 stcb
= (struct sctp_tcb
*)tmr
->tcb
;
928 net
= (struct sctp_nets
*)tmr
->net
;
932 #ifdef SCTP_AUDITING_ENABLED
933 sctp_audit_log(0xF0, (u_int8_t
)tmr
->type
);
934 sctp_auditing(3, inp
, stcb
, net
);
936 sctp_pegs
[SCTP_TIMERS_EXP
]++;
944 if (inp
->sctp_socket
== 0) {
946 #if defined(__APPLE__)
947 /* release BSD kernel funnel/mutex */
948 thread_funnel_set(network_flock
, FALSE
);
950 SCTP_INP_WUNLOCK(inp
);
954 if (stcb
->asoc
.state
== 0) {
956 #if defined(__APPLE__)
957 /* release BSD kernel funnel/mutex */
958 thread_funnel_set(network_flock
, FALSE
);
960 SCTP_INP_WUNLOCK(inp
);
965 if (sctp_debug_on
& SCTP_DEBUG_TIMER1
) {
966 kprintf("Timer type %d goes off\n", tmr
->type
);
968 #endif /* SCTP_DEBUG */
970 if (!callout_active(&tmr
->timer
)) {
972 #if defined(__APPLE__)
973 /* release BSD kernel funnel/mutex */
974 thread_funnel_set(network_flock
, FALSE
);
976 SCTP_INP_WUNLOCK(inp
);
980 #if defined(__APPLE__)
981 /* clear the callout pending status here */
982 callout_stop(&tmr
->timer
);
987 SCTP_INP_INCR_REF(inp
);
988 SCTP_INP_WUNLOCK(inp
);
992 case SCTP_TIMER_TYPE_ITERATOR
:
994 struct sctp_iterator
*it
;
995 it
= (struct sctp_iterator
*)inp
;
996 sctp_iterator_timer(it
);
999 /* call the handler for the appropriate timer type */
1000 case SCTP_TIMER_TYPE_SEND
:
1001 sctp_pegs
[SCTP_TMIT_TIMER
]++;
1002 stcb
->asoc
.num_send_timers_up
--;
1003 if (stcb
->asoc
.num_send_timers_up
< 0) {
1004 stcb
->asoc
.num_send_timers_up
= 0;
1006 if (sctp_t3rxt_timer(inp
, stcb
, net
)) {
1007 /* no need to unlock on tcb its gone */
1011 #ifdef SCTP_AUDITING_ENABLED
1012 sctp_auditing(4, inp
, stcb
, net
);
1014 sctp_chunk_output(inp
, stcb
, 1);
1015 if ((stcb
->asoc
.num_send_timers_up
== 0) &&
1016 (stcb
->asoc
.sent_queue_cnt
> 0)
1018 struct sctp_tmit_chunk
*chk
;
1020 * safeguard. If there on some on the sent queue
1021 * somewhere but no timers running something is
1022 * wrong... so we start a timer on the first chunk
1023 * on the send queue on whatever net it is sent to.
1025 sctp_pegs
[SCTP_T3_SAFEGRD
]++;
1026 chk
= TAILQ_FIRST(&stcb
->asoc
.sent_queue
);
1027 sctp_timer_start(SCTP_TIMER_TYPE_SEND
, inp
, stcb
,
1031 case SCTP_TIMER_TYPE_INIT
:
1032 if (sctp_t1init_timer(inp
, stcb
, net
)) {
1033 /* no need to unlock on tcb its gone */
1036 /* We do output but not here */
1039 case SCTP_TIMER_TYPE_RECV
:
1040 sctp_pegs
[SCTP_RECV_TIMER
]++;
1041 sctp_send_sack(stcb
);
1042 #ifdef SCTP_AUDITING_ENABLED
1043 sctp_auditing(4, inp
, stcb
, net
);
1045 sctp_chunk_output(inp
, stcb
, 4);
1047 case SCTP_TIMER_TYPE_SHUTDOWN
:
1048 if (sctp_shutdown_timer(inp
, stcb
, net
) ) {
1049 /* no need to unlock on tcb its gone */
1052 #ifdef SCTP_AUDITING_ENABLED
1053 sctp_auditing(4, inp
, stcb
, net
);
1055 sctp_chunk_output(inp
, stcb
, 5);
1057 case SCTP_TIMER_TYPE_HEARTBEAT
:
1058 if (sctp_heartbeat_timer(inp
, stcb
, net
)) {
1059 /* no need to unlock on tcb its gone */
1062 #ifdef SCTP_AUDITING_ENABLED
1063 sctp_auditing(4, inp
, stcb
, net
);
1065 sctp_chunk_output(inp
, stcb
, 6);
1067 case SCTP_TIMER_TYPE_COOKIE
:
1068 if (sctp_cookie_timer(inp
, stcb
, net
)) {
1069 /* no need to unlock on tcb its gone */
1072 #ifdef SCTP_AUDITING_ENABLED
1073 sctp_auditing(4, inp
, stcb
, net
);
1075 sctp_chunk_output(inp
, stcb
, 1);
1077 case SCTP_TIMER_TYPE_NEWCOOKIE
:
1081 SCTP_GETTIME_TIMEVAL(&tv
);
1082 SCTP_INP_WLOCK(inp
);
1083 inp
->sctp_ep
.time_of_secret_change
= tv
.tv_sec
;
1084 inp
->sctp_ep
.last_secret_number
=
1085 inp
->sctp_ep
.current_secret_number
;
1086 inp
->sctp_ep
.current_secret_number
++;
1087 if (inp
->sctp_ep
.current_secret_number
>=
1088 SCTP_HOW_MANY_SECRETS
) {
1089 inp
->sctp_ep
.current_secret_number
= 0;
1091 secret
= (int)inp
->sctp_ep
.current_secret_number
;
1092 for (i
= 0; i
< SCTP_NUMBER_OF_SECRETS
; i
++) {
1093 inp
->sctp_ep
.secret_key
[secret
][i
] =
1094 sctp_select_initial_TSN(&inp
->sctp_ep
);
1096 SCTP_INP_WUNLOCK(inp
);
1097 sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE
, inp
, stcb
, net
);
1101 case SCTP_TIMER_TYPE_PATHMTURAISE
:
1102 sctp_pathmtu_timer(inp
, stcb
, net
);
1105 case SCTP_TIMER_TYPE_SHUTDOWNACK
:
1106 if (sctp_shutdownack_timer(inp
, stcb
, net
)) {
1107 /* no need to unlock on tcb its gone */
1110 #ifdef SCTP_AUDITING_ENABLED
1111 sctp_auditing(4, inp
, stcb
, net
);
1113 sctp_chunk_output(inp
, stcb
, 7);
1115 case SCTP_TIMER_TYPE_SHUTDOWNGUARD
:
1116 sctp_abort_an_association(inp
, stcb
,
1117 SCTP_SHUTDOWN_GUARD_EXPIRES
, NULL
);
1118 /* no need to unlock on tcb its gone */
1122 case SCTP_TIMER_TYPE_STRRESET
:
1123 if (sctp_strreset_timer(inp
, stcb
, net
)) {
1124 /* no need to unlock on tcb its gone */
1127 sctp_chunk_output(inp
, stcb
, 9);
1130 case SCTP_TIMER_TYPE_ASCONF
:
1131 if (sctp_asconf_timer(inp
, stcb
, net
)) {
1132 /* no need to unlock on tcb its gone */
1135 #ifdef SCTP_AUDITING_ENABLED
1136 sctp_auditing(4, inp
, stcb
, net
);
1138 sctp_chunk_output(inp
, stcb
, 8);
1141 case SCTP_TIMER_TYPE_AUTOCLOSE
:
1142 sctp_autoclose_timer(inp
, stcb
, net
);
1143 sctp_chunk_output(inp
, stcb
, 10);
1146 case SCTP_TIMER_TYPE_INPKILL
:
1147 /* special case, take away our
1148 * increment since WE are the killer
1150 SCTP_INP_WLOCK(inp
);
1151 SCTP_INP_DECR_REF(inp
);
1152 SCTP_INP_WUNLOCK(inp
);
1153 sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL
, inp
, NULL
, NULL
);
1154 sctp_inpcb_free(inp
, 1);
1159 if (sctp_debug_on
& SCTP_DEBUG_TIMER1
) {
1160 kprintf("sctp_timeout_handler:unknown timer %d\n",
1163 #endif /* SCTP_DEBUG */
1166 #ifdef SCTP_AUDITING_ENABLED
1167 sctp_audit_log(0xF1, (u_int8_t
)tmr
->type
);
1168 sctp_auditing(5, inp
, stcb
, net
);
1172 * Now we need to clean up the control chunk chain if an
1173 * ECNE is on it. It must be marked as UNSENT again so next
1174 * call will continue to send it until such time that we get
1175 * a CWR, to remove it. It is, however, less likely that we
1176 * will find a ecn echo on the chain though.
1178 sctp_fix_ecn_echo(&stcb
->asoc
);
1181 SCTP_TCB_UNLOCK(stcb
);
1184 SCTP_INP_WLOCK(inp
);
1185 SCTP_INP_DECR_REF(inp
);
1186 SCTP_INP_WUNLOCK(inp
);
1191 if (sctp_debug_on
& SCTP_DEBUG_TIMER1
) {
1192 kprintf("Timer now complete (type %d)\n", typ
);
1194 #endif /* SCTP_DEBUG */
1197 #if defined(__APPLE__)
1198 /* release BSD kernel funnel/mutex */
1199 thread_funnel_set(network_flock
, FALSE
);
1204 sctp_timer_start(int t_type
, struct sctp_inpcb
*inp
, struct sctp_tcb
*stcb
,
1205 struct sctp_nets
*net
)
1208 struct sctp_timer
*tmr
;
1217 case SCTP_TIMER_TYPE_ITERATOR
:
1219 struct sctp_iterator
*it
;
1220 it
= (struct sctp_iterator
*)inp
;
1222 to_ticks
= SCTP_ITERATOR_TICKS
;
1225 case SCTP_TIMER_TYPE_SEND
:
1226 /* Here we use the RTO timer */
1229 if ((stcb
== NULL
) || (net
== NULL
)) {
1232 tmr
= &net
->rxt_timer
;
1233 if (net
->RTO
== 0) {
1234 rto_val
= stcb
->asoc
.initial_rto
;
1238 to_ticks
= MSEC_TO_TICKS(rto_val
);
1241 case SCTP_TIMER_TYPE_INIT
:
1243 * Here we use the INIT timer default
1244 * usually about 1 minute.
1246 if ((stcb
== NULL
) || (net
== NULL
)) {
1249 tmr
= &net
->rxt_timer
;
1250 if (net
->RTO
== 0) {
1251 to_ticks
= MSEC_TO_TICKS(stcb
->asoc
.initial_rto
);
1253 to_ticks
= MSEC_TO_TICKS(net
->RTO
);
1256 case SCTP_TIMER_TYPE_RECV
:
1258 * Here we use the Delayed-Ack timer value from the inp
1259 * ususually about 200ms.
1264 tmr
= &stcb
->asoc
.dack_timer
;
1265 to_ticks
= inp
->sctp_ep
.sctp_timeoutticks
[SCTP_TIMER_RECV
];
1267 case SCTP_TIMER_TYPE_SHUTDOWN
:
1268 /* Here we use the RTO of the destination. */
1269 if ((stcb
== NULL
) || (net
== NULL
)) {
1273 if (net
->RTO
== 0) {
1274 to_ticks
= MSEC_TO_TICKS(stcb
->asoc
.initial_rto
);
1276 to_ticks
= MSEC_TO_TICKS(net
->RTO
);
1278 tmr
= &net
->rxt_timer
;
1280 case SCTP_TIMER_TYPE_HEARTBEAT
:
1282 * the net is used here so that we can add in the RTO.
1283 * Even though we use a different timer. We also add the
1284 * HB timer PLUS a random jitter.
1291 uint8_t this_random
;
1292 int cnt_of_unconf
=0;
1293 struct sctp_nets
*lnet
;
1295 TAILQ_FOREACH(lnet
, &stcb
->asoc
.nets
, sctp_next
) {
1296 if (lnet
->dest_state
& SCTP_ADDR_UNCONFIRMED
) {
1301 if (sctp_debug_on
& SCTP_DEBUG_TIMER1
) {
1302 kprintf("HB timer to start unconfirmed:%d hb_delay:%d\n",
1303 cnt_of_unconf
, stcb
->asoc
.heart_beat_delay
);
1306 if (stcb
->asoc
.hb_random_idx
> 3) {
1307 rndval
= sctp_select_initial_TSN(&inp
->sctp_ep
);
1308 memcpy(stcb
->asoc
.hb_random_values
, &rndval
,
1309 sizeof(stcb
->asoc
.hb_random_values
));
1310 this_random
= stcb
->asoc
.hb_random_values
[0];
1311 stcb
->asoc
.hb_random_idx
= 0;
1312 stcb
->asoc
.hb_ect_randombit
= 0;
1314 this_random
= stcb
->asoc
.hb_random_values
[stcb
->asoc
.hb_random_idx
];
1315 stcb
->asoc
.hb_random_idx
++;
1316 stcb
->asoc
.hb_ect_randombit
= 0;
1319 * this_random will be 0 - 256 ms
1322 if ((stcb
->asoc
.heart_beat_delay
== 0) &&
1323 (cnt_of_unconf
== 0)) {
1324 /* no HB on this inp after confirmations */
1328 struct sctp_nets
*lnet
;
1330 delay
= stcb
->asoc
.heart_beat_delay
;
1331 TAILQ_FOREACH(lnet
, &stcb
->asoc
.nets
, sctp_next
) {
1332 if ((lnet
->dest_state
& SCTP_ADDR_UNCONFIRMED
) &&
1333 ((lnet
->dest_state
& SCTP_ADDR_OUT_OF_SCOPE
) == 0) &&
1334 (lnet
->dest_state
& SCTP_ADDR_REACHABLE
)) {
1338 if (net
->RTO
== 0) {
1339 /* Never been checked */
1340 to_ticks
= this_random
+ stcb
->asoc
.initial_rto
+ delay
;
1342 /* set rto_val to the ms */
1343 to_ticks
= delay
+ net
->RTO
+ this_random
;
1346 if (cnt_of_unconf
) {
1347 to_ticks
= this_random
+ stcb
->asoc
.initial_rto
;
1349 to_ticks
= stcb
->asoc
.heart_beat_delay
+ this_random
+ stcb
->asoc
.initial_rto
;
1353 * Now we must convert the to_ticks that are now in
1359 if (sctp_debug_on
& SCTP_DEBUG_TIMER1
) {
1360 kprintf("Timer to expire in %d ticks\n", to_ticks
);
1363 tmr
= &stcb
->asoc
.hb_timer
;
1366 case SCTP_TIMER_TYPE_COOKIE
:
1368 * Here we can use the RTO timer from the network since
1369 * one RTT was compelete. If a retran happened then we will
1370 * be using the RTO initial value.
1372 if ((stcb
== NULL
) || (net
== NULL
)) {
1375 if (net
->RTO
== 0) {
1376 to_ticks
= MSEC_TO_TICKS(stcb
->asoc
.initial_rto
);
1378 to_ticks
= MSEC_TO_TICKS(net
->RTO
);
1380 tmr
= &net
->rxt_timer
;
1382 case SCTP_TIMER_TYPE_NEWCOOKIE
:
1384 * nothing needed but the endpoint here
1385 * ususually about 60 minutes.
1387 tmr
= &inp
->sctp_ep
.signature_change
;
1388 to_ticks
= inp
->sctp_ep
.sctp_timeoutticks
[SCTP_TIMER_SIGNATURE
];
1390 case SCTP_TIMER_TYPE_INPKILL
:
1392 * The inp is setup to die. We re-use the
1393 * signature_chage timer since that has
1394 * stopped and we are in the GONE state.
1396 tmr
= &inp
->sctp_ep
.signature_change
;
1397 to_ticks
= (SCTP_INP_KILL_TIMEOUT
* hz
) / 1000;
1399 case SCTP_TIMER_TYPE_PATHMTURAISE
:
1401 * Here we use the value found in the EP for PMTU
1402 * ususually about 10 minutes.
1410 to_ticks
= inp
->sctp_ep
.sctp_timeoutticks
[SCTP_TIMER_PMTU
];
1411 tmr
= &net
->pmtu_timer
;
1413 case SCTP_TIMER_TYPE_SHUTDOWNACK
:
1414 /* Here we use the RTO of the destination */
1415 if ((stcb
== NULL
) || (net
== NULL
)) {
1418 if (net
->RTO
== 0) {
1419 to_ticks
= MSEC_TO_TICKS(stcb
->asoc
.initial_rto
);
1421 to_ticks
= MSEC_TO_TICKS(net
->RTO
);
1423 tmr
= &net
->rxt_timer
;
1425 case SCTP_TIMER_TYPE_SHUTDOWNGUARD
:
1427 * Here we use the endpoints shutdown guard timer
1428 * usually about 3 minutes.
1433 to_ticks
= inp
->sctp_ep
.sctp_timeoutticks
[SCTP_TIMER_MAXSHUTDOWN
];
1434 tmr
= &stcb
->asoc
.shut_guard_timer
;
1436 case SCTP_TIMER_TYPE_STRRESET
:
1438 * Here the timer comes from the inp
1439 * but its value is from the RTO.
1441 if ((stcb
== NULL
) || (net
== NULL
)) {
1444 if (net
->RTO
== 0) {
1445 to_ticks
= MSEC_TO_TICKS(stcb
->asoc
.initial_rto
);
1447 to_ticks
= MSEC_TO_TICKS(net
->RTO
);
1449 tmr
= &stcb
->asoc
.strreset_timer
;
1452 case SCTP_TIMER_TYPE_ASCONF
:
1454 * Here the timer comes from the inp
1455 * but its value is from the RTO.
1457 if ((stcb
== NULL
) || (net
== NULL
)) {
1460 if (net
->RTO
== 0) {
1461 to_ticks
= MSEC_TO_TICKS(stcb
->asoc
.initial_rto
);
1463 to_ticks
= MSEC_TO_TICKS(net
->RTO
);
1465 tmr
= &stcb
->asoc
.asconf_timer
;
1467 case SCTP_TIMER_TYPE_AUTOCLOSE
:
1471 if (stcb
->asoc
.sctp_autoclose_ticks
== 0) {
1472 /* Really an error since stcb is NOT set to autoclose */
1475 to_ticks
= stcb
->asoc
.sctp_autoclose_ticks
;
1476 tmr
= &stcb
->asoc
.autoclose_timer
;
1480 if (sctp_debug_on
& SCTP_DEBUG_TIMER1
) {
1481 kprintf("sctp_timer_start:Unknown timer type %d\n",
1484 #endif /* SCTP_DEBUG */
1488 if ((to_ticks
<= 0) || (tmr
== NULL
)) {
1490 if (sctp_debug_on
& SCTP_DEBUG_TIMER1
) {
1491 kprintf("sctp_timer_start:%d:software error to_ticks:%d tmr:%p not set ??\n",
1492 t_type
, to_ticks
, tmr
);
1494 #endif /* SCTP_DEBUG */
1497 if (callout_pending(&tmr
->timer
)) {
1499 * we do NOT allow you to have it already running.
1500 * if it is we leave the current one up unchanged
1504 /* At this point we can proceed */
1505 if (t_type
== SCTP_TIMER_TYPE_SEND
) {
1506 stcb
->asoc
.num_send_timers_up
++;
1509 tmr
->ep
= (void *)inp
;
1510 tmr
->tcb
= (void *)stcb
;
1511 tmr
->net
= (void *)net
;
1512 callout_reset(&tmr
->timer
, to_ticks
, sctp_timeout_handler
, tmr
);
1517 sctp_timer_stop(int t_type
, struct sctp_inpcb
*inp
, struct sctp_tcb
*stcb
,
1518 struct sctp_nets
*net
)
1520 struct sctp_timer
*tmr
;
1527 case SCTP_TIMER_TYPE_ITERATOR
:
1529 struct sctp_iterator
*it
;
1530 it
= (struct sctp_iterator
*)inp
;
1534 case SCTP_TIMER_TYPE_SEND
:
1535 if ((stcb
== NULL
) || (net
== NULL
)) {
1538 tmr
= &net
->rxt_timer
;
1540 case SCTP_TIMER_TYPE_INIT
:
1541 if ((stcb
== NULL
) || (net
== NULL
)) {
1544 tmr
= &net
->rxt_timer
;
1546 case SCTP_TIMER_TYPE_RECV
:
1550 tmr
= &stcb
->asoc
.dack_timer
;
1552 case SCTP_TIMER_TYPE_SHUTDOWN
:
1553 if ((stcb
== NULL
) || (net
== NULL
)) {
1556 tmr
= &net
->rxt_timer
;
1558 case SCTP_TIMER_TYPE_HEARTBEAT
:
1562 tmr
= &stcb
->asoc
.hb_timer
;
1564 case SCTP_TIMER_TYPE_COOKIE
:
1565 if ((stcb
== NULL
) || (net
== NULL
)) {
1568 tmr
= &net
->rxt_timer
;
1570 case SCTP_TIMER_TYPE_NEWCOOKIE
:
1571 /* nothing needed but the endpoint here */
1572 tmr
= &inp
->sctp_ep
.signature_change
;
1573 /* We re-use the newcookie timer for
1574 * the INP kill timer. We must assure
1575 * that we do not kill it by accident.
1578 case SCTP_TIMER_TYPE_INPKILL
:
1580 * The inp is setup to die. We re-use the
1581 * signature_chage timer since that has
1582 * stopped and we are in the GONE state.
1584 tmr
= &inp
->sctp_ep
.signature_change
;
1586 case SCTP_TIMER_TYPE_PATHMTURAISE
:
1593 tmr
= &net
->pmtu_timer
;
1595 case SCTP_TIMER_TYPE_SHUTDOWNACK
:
1596 if ((stcb
== NULL
) || (net
== NULL
)) {
1599 tmr
= &net
->rxt_timer
;
1601 case SCTP_TIMER_TYPE_SHUTDOWNGUARD
:
1605 tmr
= &stcb
->asoc
.shut_guard_timer
;
1607 case SCTP_TIMER_TYPE_STRRESET
:
1611 tmr
= &stcb
->asoc
.strreset_timer
;
1613 case SCTP_TIMER_TYPE_ASCONF
:
1617 tmr
= &stcb
->asoc
.asconf_timer
;
1619 case SCTP_TIMER_TYPE_AUTOCLOSE
:
1623 tmr
= &stcb
->asoc
.autoclose_timer
;
1627 if (sctp_debug_on
& SCTP_DEBUG_TIMER1
) {
1628 kprintf("sctp_timer_stop:Unknown timer type %d\n",
1631 #endif /* SCTP_DEBUG */
1637 if ((tmr
->type
!= t_type
) && tmr
->type
) {
1639 * Ok we have a timer that is under joint use. Cookie timer
1640 * per chance with the SEND timer. We therefore are NOT
1641 * running the timer that the caller wants stopped. So just
1646 if (t_type
== SCTP_TIMER_TYPE_SEND
) {
1647 stcb
->asoc
.num_send_timers_up
--;
1648 if (stcb
->asoc
.num_send_timers_up
< 0) {
1649 stcb
->asoc
.num_send_timers_up
= 0;
1652 callout_stop(&tmr
->timer
);
1656 #ifdef SCTP_USE_ADLER32
1658 update_adler32(uint32_t adler
, uint8_t *buf
, int32_t len
)
1660 u_int32_t s1
= adler
& 0xffff;
1661 u_int32_t s2
= (adler
>> 16) & 0xffff;
1664 for (n
= 0; n
< len
; n
++, buf
++) {
1665 /* s1 = (s1 + buf[n]) % BASE */
1669 * now if we need to, we do a mod by subtracting. It seems
1670 * a bit faster since I really will only ever do one subtract
1671 * at the MOST, since buf[n] is a max of 255.
1673 if (s1
>= SCTP_ADLER32_BASE
) {
1674 s1
-= SCTP_ADLER32_BASE
;
1676 /* s2 = (s2 + s1) % BASE */
1680 * again, it is more efficent (it seems) to subtract since
1681 * the most s2 will ever be is (BASE-1 + BASE-1) in the worse
1682 * case. This would then be (2 * BASE) - 2, which will still
1683 * only do one subtract. On Intel this is much better to do
1684 * this way and avoid the divide. Have not -pg'd on sparc.
1686 if (s2
>= SCTP_ADLER32_BASE
) {
1687 s2
-= SCTP_ADLER32_BASE
;
1690 /* Return the adler32 of the bytes buf[0..len-1] */
1691 return ((s2
<< 16) + s1
);
1698 sctp_calculate_len(struct mbuf
*m
)
1710 #if defined(SCTP_WITH_NO_CSUM)
1713 sctp_calculate_sum(struct mbuf
*m
, int32_t *pktlen
, uint32_t offset
)
1716 * given a mbuf chain with a packetheader offset by 'offset'
1717 * pointing at a sctphdr (with csum set to 0) go through
1718 * the chain of m_next's and calculate the SCTP checksum.
1719 * This is currently Adler32 but will change to CRC32x
1720 * soon. Also has a side bonus calculate the total length
1721 * of the mbuf chain.
1722 * Note: if offset is greater than the total mbuf length,
1723 * checksum=1, pktlen=0 is returned (ie. no real error code)
1727 *pktlen
= sctp_calculate_len(m
);
1731 #elif defined(SCTP_USE_INCHKSUM)
1733 #include <machine/in_cksum.h>
1736 sctp_calculate_sum(struct mbuf
*m
, int32_t *pktlen
, uint32_t offset
)
1739 * given a mbuf chain with a packetheader offset by 'offset'
1740 * pointing at a sctphdr (with csum set to 0) go through
1741 * the chain of m_next's and calculate the SCTP checksum.
1742 * This is currently Adler32 but will change to CRC32x
1743 * soon. Also has a side bonus calculate the total length
1744 * of the mbuf chain.
1745 * Note: if offset is greater than the total mbuf length,
1746 * checksum=1, pktlen=0 is returned (ie. no real error code)
1750 uint32_t the_sum
, retsum
;
1757 the_sum
= (uint32_t)(in_cksum_skip(m
, tlen
, offset
));
1759 *pktlen
= (tlen
-offset
);
1760 retsum
= htons(the_sum
);
1767 sctp_calculate_sum(struct mbuf
*m
, int32_t *pktlen
, uint32_t offset
)
1770 * given a mbuf chain with a packetheader offset by 'offset'
1771 * pointing at a sctphdr (with csum set to 0) go through
1772 * the chain of m_next's and calculate the SCTP checksum.
1773 * This is currently Adler32 but will change to CRC32x
1774 * soon. Also has a side bonus calculate the total length
1775 * of the mbuf chain.
1776 * Note: if offset is greater than the total mbuf length,
1777 * checksum=1, pktlen=0 is returned (ie. no real error code)
1780 #ifdef SCTP_USE_ADLER32
1783 uint32_t base
= 0xffffffff;
1784 #endif /* SCTP_USE_ADLER32 */
1787 /* find the correct mbuf and offset into mbuf */
1788 while ((at
!= NULL
) && (offset
> (uint32_t)at
->m_len
)) {
1789 offset
-= at
->m_len
; /* update remaining offset left */
1793 while (at
!= NULL
) {
1794 #ifdef SCTP_USE_ADLER32
1795 base
= update_adler32(base
, at
->m_data
+ offset
,
1796 at
->m_len
- offset
);
1798 base
= update_crc32(base
, at
->m_data
+ offset
,
1799 at
->m_len
- offset
);
1800 #endif /* SCTP_USE_ADLER32 */
1801 tlen
+= at
->m_len
- offset
;
1802 /* we only offset once into the first mbuf */
1808 if (pktlen
!= NULL
) {
1811 #ifdef SCTP_USE_ADLER32
1816 base
= sctp_csum_finalize(base
);
1825 sctp_mtu_size_reset(struct sctp_inpcb
*inp
,
1826 struct sctp_association
*asoc
, u_long mtu
)
1829 * Reset the P-MTU size on this association, this involves changing
1830 * the asoc MTU, going through ANY chunk+overhead larger than mtu
1831 * to allow the DF flag to be cleared.
1833 struct sctp_tmit_chunk
*chk
;
1834 struct sctp_stream_out
*strm
;
1835 unsigned int eff_mtu
, ovh
;
1836 asoc
->smallest_mtu
= mtu
;
1837 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_BOUND_V6
) {
1838 ovh
= SCTP_MIN_OVERHEAD
;
1840 ovh
= SCTP_MIN_V4_OVERHEAD
;
1842 eff_mtu
= mtu
- ovh
;
1843 /* Now mark any chunks that need to let IP fragment */
1844 TAILQ_FOREACH(strm
, &asoc
->out_wheel
, next_spoke
) {
1845 TAILQ_FOREACH(chk
, &strm
->outqueue
, sctp_next
) {
1846 if (chk
->send_size
> eff_mtu
) {
1847 chk
->flags
&= SCTP_DONT_FRAGMENT
;
1848 chk
->flags
|= CHUNK_FLAGS_FRAGMENT_OK
;
1852 TAILQ_FOREACH(chk
, &asoc
->send_queue
, sctp_next
) {
1853 if (chk
->send_size
> eff_mtu
) {
1854 chk
->flags
&= SCTP_DONT_FRAGMENT
;
1855 chk
->flags
|= CHUNK_FLAGS_FRAGMENT_OK
;
1858 TAILQ_FOREACH(chk
, &asoc
->sent_queue
, sctp_next
) {
1859 if (chk
->send_size
> eff_mtu
) {
1860 chk
->flags
&= SCTP_DONT_FRAGMENT
;
1861 chk
->flags
|= CHUNK_FLAGS_FRAGMENT_OK
;
1868 * given an association and starting time of the current RTT period
1869 * return RTO in number of usecs
1870 * net should point to the current network
1873 sctp_calculate_rto(struct sctp_tcb
*stcb
,
1874 struct sctp_association
*asoc
,
1875 struct sctp_nets
*net
,
1876 struct timeval
*old
)
1879 * given an association and the starting time of the current RTT
1880 * period (in value1/value2) return RTO in number of usecs.
1884 unsigned int new_rto
= 0;
1885 int first_measure
= 0;
1888 /************************/
1889 /* 1. calculate new RTT */
1890 /************************/
1891 /* get the current time */
1892 SCTP_GETTIME_TIMEVAL(&now
);
1893 /* compute the RTT value */
1894 if ((u_long
)now
.tv_sec
> (u_long
)old
->tv_sec
) {
1895 calc_time
= ((u_long
)now
.tv_sec
- (u_long
)old
->tv_sec
) * 1000;
1896 if ((u_long
)now
.tv_usec
> (u_long
)old
->tv_usec
) {
1897 calc_time
+= (((u_long
)now
.tv_usec
-
1898 (u_long
)old
->tv_usec
)/1000);
1899 } else if ((u_long
)now
.tv_usec
< (u_long
)old
->tv_usec
) {
1900 /* Borrow 1,000ms from current calculation */
1902 /* Add in the slop over */
1903 calc_time
+= ((int)now
.tv_usec
/1000);
1904 /* Add in the pre-second ms's */
1905 calc_time
+= (((int)1000000 - (int)old
->tv_usec
)/1000);
1907 } else if ((u_long
)now
.tv_sec
== (u_long
)old
->tv_sec
) {
1908 if ((u_long
)now
.tv_usec
> (u_long
)old
->tv_usec
) {
1909 calc_time
= ((u_long
)now
.tv_usec
-
1910 (u_long
)old
->tv_usec
)/1000;
1911 } else if ((u_long
)now
.tv_usec
< (u_long
)old
->tv_usec
) {
1912 /* impossible .. garbage in nothing out */
1913 return (((net
->lastsa
>> 2) + net
->lastsv
) >> 1);
1915 /* impossible .. garbage in nothing out */
1916 return (((net
->lastsa
>> 2) + net
->lastsv
) >> 1);
1919 /* Clock wrapped? */
1920 return (((net
->lastsa
>> 2) + net
->lastsv
) >> 1);
1922 /***************************/
1923 /* 2. update RTTVAR & SRTT */
1924 /***************************/
1926 /* if (net->lastsv || net->lastsa) {*/
1927 /* per Section 5.3.1 C3 in SCTP */
1928 /* net->lastsv = (int) *//* RTTVAR */
1929 /* (((double)(1.0 - 0.25) * (double)net->lastsv) +
1930 (double)(0.25 * (double)abs(net->lastsa - calc_time)));
1931 net->lastsa = (int) */ /* SRTT */
1932 /*(((double)(1.0 - 0.125) * (double)net->lastsa) +
1933 (double)(0.125 * (double)calc_time));
1935 *//* the first RTT calculation, per C2 Section 5.3.1 */
1936 /* net->lastsa = calc_time; *//* SRTT */
1937 /* net->lastsv = calc_time / 2; *//* RTTVAR */
1939 /* if RTTVAR goes to 0 you set to clock grainularity */
1940 /* if (net->lastsv == 0) {
1941 net->lastsv = SCTP_CLOCK_GRANULARITY;
1943 new_rto = net->lastsa + 4 * net->lastsv;
1946 o_calctime
= calc_time
;
1947 /* this is Van Jacobson's integer version */
1949 calc_time
-= (net
->lastsa
>> 3);
1950 net
->lastsa
+= calc_time
;
1951 if (calc_time
< 0) {
1952 calc_time
= -calc_time
;
1954 calc_time
-= (net
->lastsv
>> 2);
1955 net
->lastsv
+= calc_time
;
1956 if (net
->lastsv
== 0) {
1957 net
->lastsv
= SCTP_CLOCK_GRANULARITY
;
1960 /* First RTO measurment */
1961 net
->lastsa
= calc_time
;
1962 net
->lastsv
= calc_time
>> 1;
1965 new_rto
= ((net
->lastsa
>> 2) + net
->lastsv
) >> 1;
1966 if ((new_rto
> SCTP_SAT_NETWORK_MIN
) &&
1967 (stcb
->asoc
.sat_network_lockout
== 0)) {
1968 stcb
->asoc
.sat_network
= 1;
1969 } else if ((!first_measure
) && stcb
->asoc
.sat_network
) {
1970 stcb
->asoc
.sat_network
= 0;
1971 stcb
->asoc
.sat_network_lockout
= 1;
1973 /* bound it, per C6/C7 in Section 5.3.1 */
1974 if (new_rto
< stcb
->asoc
.minrto
) {
1975 new_rto
= stcb
->asoc
.minrto
;
1977 if (new_rto
> stcb
->asoc
.maxrto
) {
1978 new_rto
= stcb
->asoc
.maxrto
;
1980 /* we are now returning the RTT Smoothed */
1981 return ((u_int32_t
)new_rto
);
1986 * return a pointer to a contiguous piece of data from the given
1987 * mbuf chain starting at 'off' for 'len' bytes. If the desired
1988 * piece spans more than one mbuf, a copy is made at 'ptr'.
1989 * caller must ensure that the buffer size is >= 'len'
1990 * returns NULL if there there isn't 'len' bytes in the chain.
1993 sctp_m_getptr(struct mbuf
*m
, int off
, int len
, u_int8_t
*in_ptr
)
1998 if ((off
< 0) || (len
<= 0))
2001 /* find the desired start location */
2002 while ((m
!= NULL
) && (off
> 0)) {
2011 /* is the current mbuf large enough (eg. contiguous)? */
2012 if ((m
->m_len
- off
) >= len
) {
2013 return (mtod(m
, caddr_t
) + off
);
2015 /* else, it spans more than one mbuf, so save a temp copy... */
2016 while ((m
!= NULL
) && (len
> 0)) {
2017 count
= min(m
->m_len
- off
, len
);
2018 bcopy(mtod(m
, caddr_t
) + off
, ptr
, count
);
2024 if ((m
== NULL
) && (len
> 0))
2027 return ((caddr_t
)in_ptr
);
2032 struct sctp_paramhdr
*
2033 sctp_get_next_param(struct mbuf
*m
,
2035 struct sctp_paramhdr
*pull
,
2038 /* This just provides a typed signature to Peter's Pull routine */
2039 return ((struct sctp_paramhdr
*)sctp_m_getptr(m
, offset
, pull_limit
,
2045 sctp_add_pad_tombuf(struct mbuf
*m
, int padlen
)
2048 * add padlen bytes of 0 filled padding to the end of the mbuf.
2049 * If padlen is > 3 this routine will fail.
2056 if (M_TRAILINGSPACE(m
)) {
2059 * We hope the majority of the time we hit here :)
2061 dp
= (u_int8_t
*)(mtod(m
, caddr_t
) + m
->m_len
);
2064 /* Hard way we must grow the mbuf */
2066 MGET(tmp
, MB_DONTWAIT
, MT_DATA
);
2068 /* Out of space GAK! we are in big trouble. */
2071 /* setup and insert in middle */
2072 tmp
->m_next
= m
->m_next
;
2073 tmp
->m_len
= padlen
;
2075 dp
= mtod(tmp
, u_int8_t
*);
2077 /* zero out the pad */
2078 for (i
= 0; i
< padlen
; i
++) {
2086 sctp_pad_lastmbuf(struct mbuf
*m
, int padval
)
2088 /* find the last mbuf in chain and pad it */
2092 if (m_at
->m_next
== NULL
) {
2093 return (sctp_add_pad_tombuf(m_at
, padval
));
2095 m_at
= m_at
->m_next
;
2101 sctp_notify_assoc_change(u_int32_t event
, struct sctp_tcb
*stcb
,
2104 struct mbuf
*m_notify
;
2105 struct sctp_assoc_change
*sac
;
2106 struct sockaddr
*to
;
2107 struct sockaddr_in6 sin6
, lsa6
;
2110 * First if we are are going down dump everything we
2111 * can to the socket rcv queue.
2113 if ((event
== SCTP_SHUTDOWN_COMP
) || (event
== SCTP_COMM_LOST
)) {
2114 sctp_deliver_data(stcb
, &stcb
->asoc
, NULL
, 0);
2118 * For TCP model AND UDP connected sockets we will send
2119 * an error up when an ABORT comes in.
2121 if (((stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_TCPTYPE
) ||
2122 (stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_IN_TCPPOOL
)) &&
2123 (event
== SCTP_COMM_LOST
)) {
2124 stcb
->sctp_socket
->so_error
= ECONNRESET
;
2125 /* Wake ANY sleepers */
2126 sowwakeup(stcb
->sctp_socket
);
2127 sorwakeup(stcb
->sctp_socket
);
2130 if ((event
== SCTP_COMM_UP
) &&
2131 (stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_TCPTYPE
) &&
2132 (stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_CONNECTED
)) {
2133 soisconnected(stcb
->sctp_socket
);
2136 if (!(stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_RECVASSOCEVNT
)) {
2137 /* event not enabled */
2140 MGETHDR(m_notify
, MB_DONTWAIT
, MT_DATA
);
2141 if (m_notify
== NULL
)
2144 m_notify
->m_len
= 0;
2146 sac
= mtod(m_notify
, struct sctp_assoc_change
*);
2147 sac
->sac_type
= SCTP_ASSOC_CHANGE
;
2149 sac
->sac_length
= sizeof(struct sctp_assoc_change
);
2150 sac
->sac_state
= event
;
2151 sac
->sac_error
= error
;
2152 /* XXX verify these stream counts */
2153 sac
->sac_outbound_streams
= stcb
->asoc
.streamoutcnt
;
2154 sac
->sac_inbound_streams
= stcb
->asoc
.streamincnt
;
2155 sac
->sac_assoc_id
= sctp_get_associd(stcb
);
2157 m_notify
->m_flags
|= M_EOR
| M_NOTIFICATION
;
2158 m_notify
->m_pkthdr
.len
= sizeof(struct sctp_assoc_change
);
2159 m_notify
->m_pkthdr
.rcvif
= 0;
2160 m_notify
->m_len
= sizeof(struct sctp_assoc_change
);
2161 m_notify
->m_next
= NULL
;
2163 /* append to socket */
2164 to
= (struct sockaddr
*)&stcb
->asoc
.primary_destination
->ro
._l_addr
;
2165 if ((stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_NEEDS_MAPPED_V4
) &&
2166 to
->sa_family
== AF_INET
) {
2167 struct sockaddr_in
*sin
;
2169 sin
= (struct sockaddr_in
*)to
;
2170 bzero(&sin6
, sizeof(sin6
));
2171 sin6
.sin6_family
= AF_INET6
;
2172 sin6
.sin6_len
= sizeof(struct sockaddr_in6
);
2173 sin6
.sin6_addr
.s6_addr16
[2] = 0xffff;
2174 bcopy(&sin
->sin_addr
, &sin6
.sin6_addr
.s6_addr16
[3],
2175 sizeof(sin6
.sin6_addr
.s6_addr16
[3]));
2176 sin6
.sin6_port
= sin
->sin_port
;
2177 to
= (struct sockaddr
*)&sin6
;
2179 /* check and strip embedded scope junk */
2180 to
= (struct sockaddr
*)sctp_recover_scope((struct sockaddr_in6
*)to
,
2183 * We need to always notify comm changes.
2184 * if (sctp_sbspace(&stcb->sctp_socket->so_rcv) < m_notify->m_len) {
2185 * sctp_m_freem(m_notify);
2189 SCTP_TCB_UNLOCK(stcb
);
2190 SCTP_INP_WLOCK(stcb
->sctp_ep
);
2191 SCTP_TCB_LOCK(stcb
);
2192 lwkt_gettoken(&stcb
->sctp_socket
->so_rcv
.ssb_token
);
2193 if (!sctp_sbappendaddr_nocheck(&stcb
->sctp_socket
->so_rcv
,
2194 to
, m_notify
, NULL
, stcb
->asoc
.my_vtag
, stcb
->sctp_ep
)) {
2195 /* not enough room */
2196 sctp_m_freem(m_notify
);
2197 SCTP_INP_WUNLOCK(stcb
->sctp_ep
);
2198 lwkt_reltoken(&stcb
->sctp_socket
->so_rcv
.ssb_token
);
2201 lwkt_reltoken(&stcb
->sctp_socket
->so_rcv
.ssb_token
);
2202 if (((stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_IN_TCPPOOL
) == 0) &&
2203 ((stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_CONNECTED
) == 0)){
2204 if (sctp_add_to_socket_q(stcb
->sctp_ep
, stcb
)) {
2205 stcb
->asoc
.my_rwnd_control_len
+= sizeof(struct mbuf
);
2208 stcb
->asoc
.my_rwnd_control_len
+= sizeof(struct mbuf
);
2210 SCTP_INP_WUNLOCK(stcb
->sctp_ep
);
2211 /* Wake up any sleeper */
2212 sctp_sorwakeup(stcb
->sctp_ep
, stcb
->sctp_socket
);
2213 sctp_sowwakeup(stcb
->sctp_ep
, stcb
->sctp_socket
);
2217 sctp_notify_peer_addr_change(struct sctp_tcb
*stcb
, uint32_t state
,
2218 struct sockaddr
*sa
, uint32_t error
)
2220 struct mbuf
*m_notify
;
2221 struct sctp_paddr_change
*spc
;
2222 struct sockaddr
*to
;
2223 struct sockaddr_in6 sin6
, lsa6
;
2225 if (!(stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_RECVPADDREVNT
))
2226 /* event not enabled */
2229 MGETHDR(m_notify
, MB_DONTWAIT
, MT_DATA
);
2230 if (m_notify
== NULL
)
2232 m_notify
->m_len
= 0;
2234 MCLGET(m_notify
, MB_DONTWAIT
);
2235 if ((m_notify
->m_flags
& M_EXT
) != M_EXT
) {
2236 sctp_m_freem(m_notify
);
2240 spc
= mtod(m_notify
, struct sctp_paddr_change
*);
2241 spc
->spc_type
= SCTP_PEER_ADDR_CHANGE
;
2243 spc
->spc_length
= sizeof(struct sctp_paddr_change
);
2244 if (sa
->sa_family
== AF_INET
) {
2245 memcpy(&spc
->spc_aaddr
, sa
, sizeof(struct sockaddr_in
));
2247 memcpy(&spc
->spc_aaddr
, sa
, sizeof(struct sockaddr_in6
));
2249 spc
->spc_state
= state
;
2250 spc
->spc_error
= error
;
2251 spc
->spc_assoc_id
= sctp_get_associd(stcb
);
2253 m_notify
->m_flags
|= M_EOR
| M_NOTIFICATION
;
2254 m_notify
->m_pkthdr
.len
= sizeof(struct sctp_paddr_change
);
2255 m_notify
->m_pkthdr
.rcvif
= 0;
2256 m_notify
->m_len
= sizeof(struct sctp_paddr_change
);
2257 m_notify
->m_next
= NULL
;
2259 to
= (struct sockaddr
*)(struct sockaddr
*)
2260 &stcb
->asoc
.primary_destination
->ro
._l_addr
;
2261 if ((stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_NEEDS_MAPPED_V4
) &&
2262 to
->sa_family
== AF_INET
) {
2263 struct sockaddr_in
*sin
;
2265 sin
= (struct sockaddr_in
*)to
;
2266 bzero(&sin6
, sizeof(sin6
));
2267 sin6
.sin6_family
= AF_INET6
;
2268 sin6
.sin6_len
= sizeof(struct sockaddr_in6
);
2269 sin6
.sin6_addr
.s6_addr16
[2] = 0xffff;
2270 bcopy(&sin
->sin_addr
, &sin6
.sin6_addr
.s6_addr16
[3],
2271 sizeof(sin6
.sin6_addr
.s6_addr16
[3]));
2272 sin6
.sin6_port
= sin
->sin_port
;
2273 to
= (struct sockaddr
*)&sin6
;
2275 /* check and strip embedded scope junk */
2276 to
= (struct sockaddr
*)sctp_recover_scope((struct sockaddr_in6
*)to
,
2279 if (sctp_sbspace(&stcb
->sctp_socket
->so_rcv
) < m_notify
->m_len
) {
2280 sctp_m_freem(m_notify
);
2283 /* append to socket */
2284 SCTP_TCB_UNLOCK(stcb
);
2285 SCTP_INP_WLOCK(stcb
->sctp_ep
);
2286 SCTP_TCB_LOCK(stcb
);
2287 lwkt_gettoken(&stcb
->sctp_socket
->so_rcv
.ssb_token
);
2288 if (!sctp_sbappendaddr_nocheck(&stcb
->sctp_socket
->so_rcv
, to
,
2289 m_notify
, NULL
, stcb
->asoc
.my_vtag
, stcb
->sctp_ep
)) {
2290 /* not enough room */
2291 sctp_m_freem(m_notify
);
2292 SCTP_INP_WUNLOCK(stcb
->sctp_ep
);
2293 lwkt_reltoken(&stcb
->sctp_socket
->so_rcv
.ssb_token
);
2296 lwkt_reltoken(&stcb
->sctp_socket
->so_rcv
.ssb_token
);
2297 if (((stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_IN_TCPPOOL
) == 0) &&
2298 ((stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_CONNECTED
) == 0)){
2299 if (sctp_add_to_socket_q(stcb
->sctp_ep
, stcb
)) {
2300 stcb
->asoc
.my_rwnd_control_len
+= sizeof(struct mbuf
);
2303 stcb
->asoc
.my_rwnd_control_len
+= sizeof(struct mbuf
);
2305 SCTP_INP_WUNLOCK(stcb
->sctp_ep
);
2306 sctp_sorwakeup(stcb
->sctp_ep
, stcb
->sctp_socket
);
2311 sctp_notify_send_failed(struct sctp_tcb
*stcb
, u_int32_t error
,
2312 struct sctp_tmit_chunk
*chk
)
2314 struct mbuf
*m_notify
;
2315 struct sctp_send_failed
*ssf
;
2316 struct sockaddr_in6 sin6
, lsa6
;
2317 struct sockaddr
*to
;
2320 if (!(stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_RECVSENDFAILEVNT
))
2321 /* event not enabled */
2324 length
= sizeof(struct sctp_send_failed
) + chk
->send_size
;
2325 MGETHDR(m_notify
, MB_DONTWAIT
, MT_DATA
);
2326 if (m_notify
== NULL
)
2329 m_notify
->m_len
= 0;
2330 ssf
= mtod(m_notify
, struct sctp_send_failed
*);
2331 ssf
->ssf_type
= SCTP_SEND_FAILED
;
2332 if (error
== SCTP_NOTIFY_DATAGRAM_UNSENT
)
2333 ssf
->ssf_flags
= SCTP_DATA_UNSENT
;
2335 ssf
->ssf_flags
= SCTP_DATA_SENT
;
2336 ssf
->ssf_length
= length
;
2337 ssf
->ssf_error
= error
;
2338 /* not exactly what the user sent in, but should be close :) */
2339 ssf
->ssf_info
.sinfo_stream
= chk
->rec
.data
.stream_number
;
2340 ssf
->ssf_info
.sinfo_ssn
= chk
->rec
.data
.stream_seq
;
2341 ssf
->ssf_info
.sinfo_flags
= chk
->rec
.data
.rcv_flags
;
2342 ssf
->ssf_info
.sinfo_ppid
= chk
->rec
.data
.payloadtype
;
2343 ssf
->ssf_info
.sinfo_context
= chk
->rec
.data
.context
;
2344 ssf
->ssf_info
.sinfo_assoc_id
= sctp_get_associd(stcb
);
2345 ssf
->ssf_assoc_id
= sctp_get_associd(stcb
);
2346 m_notify
->m_next
= chk
->data
;
2347 if (m_notify
->m_next
== NULL
)
2348 m_notify
->m_flags
|= M_EOR
| M_NOTIFICATION
;
2351 m_notify
->m_flags
|= M_NOTIFICATION
;
2353 while (m
->m_next
!= NULL
)
2355 m
->m_flags
|= M_EOR
;
2357 m_notify
->m_pkthdr
.len
= length
;
2358 m_notify
->m_pkthdr
.rcvif
= 0;
2359 m_notify
->m_len
= sizeof(struct sctp_send_failed
);
2361 /* Steal off the mbuf */
2363 to
= (struct sockaddr
*)(struct sockaddr
*)&stcb
->asoc
.primary_destination
->ro
._l_addr
;
2364 if ((stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_NEEDS_MAPPED_V4
) &&
2365 to
->sa_family
== AF_INET
) {
2366 struct sockaddr_in
*sin
;
2368 sin
= (struct sockaddr_in
*)to
;
2369 bzero(&sin6
, sizeof(sin6
));
2370 sin6
.sin6_family
= AF_INET6
;
2371 sin6
.sin6_len
= sizeof(struct sockaddr_in6
);
2372 sin6
.sin6_addr
.s6_addr16
[2] = 0xffff;
2373 bcopy(&sin
->sin_addr
, &sin6
.sin6_addr
.s6_addr16
[3],
2374 sizeof(sin6
.sin6_addr
.s6_addr16
[3]));
2375 sin6
.sin6_port
= sin
->sin_port
;
2376 to
= (struct sockaddr
*)&sin6
;
2378 /* check and strip embedded scope junk */
2379 to
= (struct sockaddr
*)sctp_recover_scope((struct sockaddr_in6
*)to
,
2382 if (sctp_sbspace(&stcb
->sctp_socket
->so_rcv
) < m_notify
->m_len
) {
2383 sctp_m_freem(m_notify
);
2387 /* append to socket */
2388 SCTP_TCB_UNLOCK(stcb
);
2389 SCTP_INP_WLOCK(stcb
->sctp_ep
);
2390 SCTP_TCB_LOCK(stcb
);
2391 lwkt_gettoken(&stcb
->sctp_socket
->so_rcv
.ssb_token
);
2392 if (!sctp_sbappendaddr_nocheck(&stcb
->sctp_socket
->so_rcv
, to
,
2393 m_notify
, NULL
, stcb
->asoc
.my_vtag
, stcb
->sctp_ep
)) {
2394 /* not enough room */
2395 sctp_m_freem(m_notify
);
2396 lwkt_reltoken(&stcb
->sctp_socket
->so_rcv
.ssb_token
);
2397 SCTP_INP_WUNLOCK(stcb
->sctp_ep
);
2400 lwkt_reltoken(&stcb
->sctp_socket
->so_rcv
.ssb_token
);
2401 if (((stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_IN_TCPPOOL
) == 0) &&
2402 ((stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_CONNECTED
) == 0)){
2403 if (sctp_add_to_socket_q(stcb
->sctp_ep
, stcb
)) {
2404 stcb
->asoc
.my_rwnd_control_len
+= sizeof(struct mbuf
);
2407 stcb
->asoc
.my_rwnd_control_len
+= sizeof(struct mbuf
);
2409 SCTP_INP_WUNLOCK(stcb
->sctp_ep
);
2410 sctp_sorwakeup(stcb
->sctp_ep
, stcb
->sctp_socket
);
2414 sctp_notify_adaption_layer(struct sctp_tcb
*stcb
,
2417 struct mbuf
*m_notify
;
2418 struct sctp_adaption_event
*sai
;
2419 struct sockaddr_in6 sin6
, lsa6
;
2420 struct sockaddr
*to
;
2422 if (!(stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_ADAPTIONEVNT
))
2423 /* event not enabled */
2426 MGETHDR(m_notify
, MB_DONTWAIT
, MT_DATA
);
2427 if (m_notify
== NULL
)
2430 m_notify
->m_len
= 0;
2431 sai
= mtod(m_notify
, struct sctp_adaption_event
*);
2432 sai
->sai_type
= SCTP_ADAPTION_INDICATION
;
2434 sai
->sai_length
= sizeof(struct sctp_adaption_event
);
2435 sai
->sai_adaption_ind
= error
;
2436 sai
->sai_assoc_id
= sctp_get_associd(stcb
);
2438 m_notify
->m_flags
|= M_EOR
| M_NOTIFICATION
;
2439 m_notify
->m_pkthdr
.len
= sizeof(struct sctp_adaption_event
);
2440 m_notify
->m_pkthdr
.rcvif
= 0;
2441 m_notify
->m_len
= sizeof(struct sctp_adaption_event
);
2442 m_notify
->m_next
= NULL
;
2444 to
= (struct sockaddr
*)(struct sockaddr
*)&stcb
->asoc
.primary_destination
->ro
._l_addr
;
2445 if ((stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_NEEDS_MAPPED_V4
) &&
2446 (to
->sa_family
== AF_INET
)) {
2447 struct sockaddr_in
*sin
;
2449 sin
= (struct sockaddr_in
*)to
;
2450 bzero(&sin6
, sizeof(sin6
));
2451 sin6
.sin6_family
= AF_INET6
;
2452 sin6
.sin6_len
= sizeof(struct sockaddr_in6
);
2453 sin6
.sin6_addr
.s6_addr16
[2] = 0xffff;
2454 bcopy(&sin
->sin_addr
, &sin6
.sin6_addr
.s6_addr16
[3],
2455 sizeof(sin6
.sin6_addr
.s6_addr16
[3]));
2456 sin6
.sin6_port
= sin
->sin_port
;
2457 to
= (struct sockaddr
*)&sin6
;
2459 /* check and strip embedded scope junk */
2460 to
= (struct sockaddr
*)sctp_recover_scope((struct sockaddr_in6
*)to
,
2462 if (sctp_sbspace(&stcb
->sctp_socket
->so_rcv
) < m_notify
->m_len
) {
2463 sctp_m_freem(m_notify
);
2466 /* append to socket */
2467 SCTP_TCB_UNLOCK(stcb
);
2468 SCTP_INP_WLOCK(stcb
->sctp_ep
);
2469 SCTP_TCB_LOCK(stcb
);
2470 lwkt_gettoken(&stcb
->sctp_socket
->so_rcv
.ssb_token
);
2471 if (!sctp_sbappendaddr_nocheck(&stcb
->sctp_socket
->so_rcv
, to
,
2472 m_notify
, NULL
, stcb
->asoc
.my_vtag
, stcb
->sctp_ep
)) {
2473 /* not enough room */
2474 sctp_m_freem(m_notify
);
2475 lwkt_reltoken(&stcb
->sctp_socket
->so_rcv
.ssb_token
);
2476 SCTP_INP_WUNLOCK(stcb
->sctp_ep
);
2479 lwkt_reltoken(&stcb
->sctp_socket
->so_rcv
.ssb_token
);
2480 if (((stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_IN_TCPPOOL
) == 0) &&
2481 ((stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_CONNECTED
) == 0)){
2482 if (sctp_add_to_socket_q(stcb
->sctp_ep
, stcb
)) {
2483 stcb
->asoc
.my_rwnd_control_len
+= sizeof(struct mbuf
);
2486 stcb
->asoc
.my_rwnd_control_len
+= sizeof(struct mbuf
);
2488 SCTP_INP_WUNLOCK(stcb
->sctp_ep
);
2489 sctp_sorwakeup(stcb
->sctp_ep
, stcb
->sctp_socket
);
2493 sctp_notify_partial_delivery_indication(struct sctp_tcb
*stcb
,
2496 struct mbuf
*m_notify
;
2497 struct sctp_pdapi_event
*pdapi
;
2498 struct sockaddr_in6 sin6
, lsa6
;
2499 struct sockaddr
*to
;
2501 if (!(stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_PDAPIEVNT
))
2502 /* event not enabled */
2505 MGETHDR(m_notify
, MB_DONTWAIT
, MT_DATA
);
2506 if (m_notify
== NULL
)
2509 m_notify
->m_len
= 0;
2510 pdapi
= mtod(m_notify
, struct sctp_pdapi_event
*);
2511 pdapi
->pdapi_type
= SCTP_PARTIAL_DELIVERY_EVENT
;
2512 pdapi
->pdapi_flags
= 0;
2513 pdapi
->pdapi_length
= sizeof(struct sctp_pdapi_event
);
2514 pdapi
->pdapi_indication
= error
;
2515 pdapi
->pdapi_assoc_id
= sctp_get_associd(stcb
);
2517 m_notify
->m_flags
|= M_EOR
| M_NOTIFICATION
;
2518 m_notify
->m_pkthdr
.len
= sizeof(struct sctp_pdapi_event
);
2519 m_notify
->m_pkthdr
.rcvif
= 0;
2520 m_notify
->m_len
= sizeof(struct sctp_pdapi_event
);
2521 m_notify
->m_next
= NULL
;
2523 to
= (struct sockaddr
*)(struct sockaddr
*)&stcb
->asoc
.primary_destination
->ro
._l_addr
;
2524 if ((stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_NEEDS_MAPPED_V4
) &&
2525 (to
->sa_family
== AF_INET
)) {
2526 struct sockaddr_in
*sin
;
2528 sin
= (struct sockaddr_in
*)to
;
2529 bzero(&sin6
, sizeof(sin6
));
2530 sin6
.sin6_family
= AF_INET6
;
2531 sin6
.sin6_len
= sizeof(struct sockaddr_in6
);
2532 sin6
.sin6_addr
.s6_addr16
[2] = 0xffff;
2533 bcopy(&sin
->sin_addr
, &sin6
.sin6_addr
.s6_addr16
[3],
2534 sizeof(sin6
.sin6_addr
.s6_addr16
[3]));
2535 sin6
.sin6_port
= sin
->sin_port
;
2536 to
= (struct sockaddr
*)&sin6
;
2538 /* check and strip embedded scope junk */
2539 to
= (struct sockaddr
*)sctp_recover_scope((struct sockaddr_in6
*)to
,
2541 if (sctp_sbspace(&stcb
->sctp_socket
->so_rcv
) < m_notify
->m_len
) {
2542 sctp_m_freem(m_notify
);
2545 /* append to socket */
2546 SCTP_TCB_UNLOCK(stcb
);
2547 SCTP_INP_WLOCK(stcb
->sctp_ep
);
2548 SCTP_TCB_LOCK(stcb
);
2549 lwkt_gettoken(&stcb
->sctp_socket
->so_rcv
.ssb_token
);
2550 if (!sctp_sbappendaddr_nocheck(&stcb
->sctp_socket
->so_rcv
, to
,
2551 m_notify
, NULL
, stcb
->asoc
.my_vtag
, stcb
->sctp_ep
)) {
2552 /* not enough room */
2553 sctp_m_freem(m_notify
);
2554 lwkt_reltoken(&stcb
->sctp_socket
->so_rcv
.ssb_token
);
2555 SCTP_INP_WUNLOCK(stcb
->sctp_ep
);
2558 lwkt_reltoken(&stcb
->sctp_socket
->so_rcv
.ssb_token
);
2559 if (((stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_IN_TCPPOOL
) == 0) &&
2560 ((stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_CONNECTED
) == 0)){
2561 if (sctp_add_to_socket_q(stcb
->sctp_ep
, stcb
)) {
2562 stcb
->asoc
.my_rwnd_control_len
+= sizeof(struct mbuf
);
2565 stcb
->asoc
.my_rwnd_control_len
+= sizeof(struct mbuf
);
2567 SCTP_INP_WUNLOCK(stcb
->sctp_ep
);
2568 sctp_sorwakeup(stcb
->sctp_ep
, stcb
->sctp_socket
);
2572 sctp_notify_shutdown_event(struct sctp_tcb
*stcb
)
2574 struct mbuf
*m_notify
;
2575 struct sctp_shutdown_event
*sse
;
2576 struct sockaddr_in6 sin6
, lsa6
;
2577 struct sockaddr
*to
;
2580 * For TCP model AND UDP connected sockets we will send
2581 * an error up when an SHUTDOWN completes
2583 if ((stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_TCPTYPE
) ||
2584 (stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_IN_TCPPOOL
)) {
2585 /* mark socket closed for read/write and wakeup! */
2586 socantrcvmore(stcb
->sctp_socket
);
2587 socantsendmore(stcb
->sctp_socket
);
2590 if (!(stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT
))
2591 /* event not enabled */
2594 MGETHDR(m_notify
, MB_DONTWAIT
, MT_DATA
);
2595 if (m_notify
== NULL
)
2598 m_notify
->m_len
= 0;
2599 sse
= mtod(m_notify
, struct sctp_shutdown_event
*);
2600 sse
->sse_type
= SCTP_SHUTDOWN_EVENT
;
2602 sse
->sse_length
= sizeof(struct sctp_shutdown_event
);
2603 sse
->sse_assoc_id
= sctp_get_associd(stcb
);
2605 m_notify
->m_flags
|= M_EOR
| M_NOTIFICATION
;
2606 m_notify
->m_pkthdr
.len
= sizeof(struct sctp_shutdown_event
);
2607 m_notify
->m_pkthdr
.rcvif
= 0;
2608 m_notify
->m_len
= sizeof(struct sctp_shutdown_event
);
2609 m_notify
->m_next
= NULL
;
2611 to
= (struct sockaddr
*)(struct sockaddr
*)&stcb
->asoc
.primary_destination
->ro
._l_addr
;
2612 if ((stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_NEEDS_MAPPED_V4
) &&
2613 to
->sa_family
== AF_INET
) {
2614 struct sockaddr_in
*sin
;
2616 sin
= (struct sockaddr_in
*)to
;
2617 bzero(&sin6
, sizeof(sin6
));
2618 sin6
.sin6_family
= AF_INET6
;
2619 sin6
.sin6_len
= sizeof(struct sockaddr_in6
);
2620 sin6
.sin6_addr
.s6_addr16
[2] = 0xffff;
2621 bcopy(&sin
->sin_addr
, &sin6
.sin6_addr
.s6_addr16
[3],
2622 sizeof(sin6
.sin6_addr
.s6_addr16
[3]));
2623 sin6
.sin6_port
= sin
->sin_port
;
2624 to
= (struct sockaddr
*)&sin6
;
2626 /* check and strip embedded scope junk */
2627 to
= (struct sockaddr
*)sctp_recover_scope((struct sockaddr_in6
*)to
,
2629 if (sctp_sbspace(&stcb
->sctp_socket
->so_rcv
) < m_notify
->m_len
) {
2630 sctp_m_freem(m_notify
);
2633 /* append to socket */
2634 SCTP_TCB_UNLOCK(stcb
);
2635 SCTP_INP_WLOCK(stcb
->sctp_ep
);
2636 SCTP_TCB_LOCK(stcb
);
2637 lwkt_gettoken(&stcb
->sctp_socket
->so_rcv
.ssb_token
);
2638 if (!sctp_sbappendaddr_nocheck(&stcb
->sctp_socket
->so_rcv
, to
,
2639 m_notify
, NULL
, stcb
->asoc
.my_vtag
, stcb
->sctp_ep
)) {
2640 /* not enough room */
2641 sctp_m_freem(m_notify
);
2642 lwkt_reltoken(&stcb
->sctp_socket
->so_rcv
.ssb_token
);
2643 SCTP_INP_WUNLOCK(stcb
->sctp_ep
);
2646 lwkt_reltoken(&stcb
->sctp_socket
->so_rcv
.ssb_token
);
2647 if (((stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_IN_TCPPOOL
) == 0) &&
2648 ((stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_CONNECTED
) == 0)){
2649 if (sctp_add_to_socket_q(stcb
->sctp_ep
, stcb
)) {
2650 stcb
->asoc
.my_rwnd_control_len
+= sizeof(struct mbuf
);
2653 stcb
->asoc
.my_rwnd_control_len
+= sizeof(struct mbuf
);
2655 SCTP_INP_WUNLOCK(stcb
->sctp_ep
);
2656 sctp_sorwakeup(stcb
->sctp_ep
, stcb
->sctp_socket
);
2660 sctp_notify_stream_reset(struct sctp_tcb
*stcb
,
2661 int number_entries
, uint16_t *list
, int flag
)
2663 struct mbuf
*m_notify
;
2664 struct sctp_stream_reset_event
*strreset
;
2665 struct sockaddr_in6 sin6
, lsa6
;
2666 struct sockaddr
*to
;
2669 if (!(stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_STREAM_RESETEVNT
))
2670 /* event not enabled */
2673 MGETHDR(m_notify
, MB_DONTWAIT
, MT_DATA
);
2674 if (m_notify
== NULL
)
2677 m_notify
->m_len
= 0;
2678 len
= sizeof(struct sctp_stream_reset_event
) + (number_entries
* sizeof(uint16_t));
2679 if (len
> M_TRAILINGSPACE(m_notify
)) {
2680 MCLGET(m_notify
, MB_WAIT
);
2682 if (m_notify
== NULL
)
2686 if (len
> M_TRAILINGSPACE(m_notify
)) {
2687 /* never enough room */
2691 strreset
= mtod(m_notify
, struct sctp_stream_reset_event
*);
2692 strreset
->strreset_type
= SCTP_STREAM_RESET_EVENT
;
2693 if (number_entries
== 0) {
2694 strreset
->strreset_flags
= flag
| SCTP_STRRESET_ALL_STREAMS
;
2696 strreset
->strreset_flags
= flag
| SCTP_STRRESET_STREAM_LIST
;
2698 strreset
->strreset_length
= len
;
2699 strreset
->strreset_assoc_id
= sctp_get_associd(stcb
);
2700 if (number_entries
) {
2702 for (i
=0; i
<number_entries
; i
++) {
2703 strreset
->strreset_list
[i
] = list
[i
];
2706 m_notify
->m_flags
|= M_EOR
| M_NOTIFICATION
;
2707 m_notify
->m_pkthdr
.len
= len
;
2708 m_notify
->m_pkthdr
.rcvif
= 0;
2709 m_notify
->m_len
= len
;
2710 m_notify
->m_next
= NULL
;
2711 if (sctp_sbspace(&stcb
->sctp_socket
->so_rcv
) < m_notify
->m_len
) {
2713 sctp_m_freem(m_notify
);
2716 to
= (struct sockaddr
*)(struct sockaddr
*)&stcb
->asoc
.primary_destination
->ro
._l_addr
;
2717 if ((stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_NEEDS_MAPPED_V4
) &&
2718 to
->sa_family
== AF_INET
) {
2719 struct sockaddr_in
*sin
;
2721 sin
= (struct sockaddr_in
*)to
;
2722 bzero(&sin6
, sizeof(sin6
));
2723 sin6
.sin6_family
= AF_INET6
;
2724 sin6
.sin6_len
= sizeof(struct sockaddr_in6
);
2725 sin6
.sin6_addr
.s6_addr16
[2] = 0xffff;
2726 bcopy(&sin
->sin_addr
, &sin6
.sin6_addr
.s6_addr16
[3],
2727 sizeof(sin6
.sin6_addr
.s6_addr16
[3]));
2728 sin6
.sin6_port
= sin
->sin_port
;
2729 to
= (struct sockaddr
*)&sin6
;
2731 /* check and strip embedded scope junk */
2732 to
= (struct sockaddr
*)sctp_recover_scope((struct sockaddr_in6
*)to
,
2734 /* append to socket */
2735 SCTP_TCB_UNLOCK(stcb
);
2736 SCTP_INP_WLOCK(stcb
->sctp_ep
);
2737 SCTP_TCB_LOCK(stcb
);
2738 lwkt_gettoken(&stcb
->sctp_socket
->so_rcv
.ssb_token
);
2739 if (!sctp_sbappendaddr_nocheck(&stcb
->sctp_socket
->so_rcv
, to
,
2740 m_notify
, NULL
, stcb
->asoc
.my_vtag
, stcb
->sctp_ep
)) {
2741 /* not enough room */
2742 sctp_m_freem(m_notify
);
2743 lwkt_reltoken(&stcb
->sctp_socket
->so_rcv
.ssb_token
);
2744 SCTP_INP_WUNLOCK(stcb
->sctp_ep
);
2747 lwkt_reltoken(&stcb
->sctp_socket
->so_rcv
.ssb_token
);
2748 if (((stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_IN_TCPPOOL
) == 0) &&
2749 ((stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_CONNECTED
) == 0)){
2750 if (sctp_add_to_socket_q(stcb
->sctp_ep
, stcb
)) {
2751 stcb
->asoc
.my_rwnd_control_len
+= sizeof(struct mbuf
);
2754 stcb
->asoc
.my_rwnd_control_len
+= sizeof(struct mbuf
);
2756 SCTP_INP_WUNLOCK(stcb
->sctp_ep
);
2757 sctp_sorwakeup(stcb
->sctp_ep
, stcb
->sctp_socket
);
2762 sctp_ulp_notify(u_int32_t notification
, struct sctp_tcb
*stcb
,
2763 u_int32_t error
, void *data
)
2765 if (stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_SOCKET_GONE
) {
2766 /* No notifications up when we are in a no socket state */
2769 if (stcb
->asoc
.state
& SCTP_STATE_CLOSED_SOCKET
) {
2770 /* Can't send up to a closed socket any notifications */
2773 switch (notification
) {
2774 case SCTP_NOTIFY_ASSOC_UP
:
2775 sctp_notify_assoc_change(SCTP_COMM_UP
, stcb
, error
);
2777 case SCTP_NOTIFY_ASSOC_DOWN
:
2778 sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP
, stcb
, error
);
2780 case SCTP_NOTIFY_INTERFACE_DOWN
:
2782 struct sctp_nets
*net
;
2783 net
= (struct sctp_nets
*)data
;
2784 sctp_notify_peer_addr_change(stcb
, SCTP_ADDR_UNREACHABLE
,
2785 (struct sockaddr
*)&net
->ro
._l_addr
, error
);
2788 case SCTP_NOTIFY_INTERFACE_UP
:
2790 struct sctp_nets
*net
;
2791 net
= (struct sctp_nets
*)data
;
2792 sctp_notify_peer_addr_change(stcb
, SCTP_ADDR_AVAILABLE
,
2793 (struct sockaddr
*)&net
->ro
._l_addr
, error
);
2796 case SCTP_NOTIFY_INTERFACE_CONFIRMED
:
2798 struct sctp_nets
*net
;
2799 net
= (struct sctp_nets
*)data
;
2800 sctp_notify_peer_addr_change(stcb
, SCTP_ADDR_CONFIRMED
,
2801 (struct sockaddr
*)&net
->ro
._l_addr
, error
);
2804 case SCTP_NOTIFY_DG_FAIL
:
2805 sctp_notify_send_failed(stcb
, error
,
2806 (struct sctp_tmit_chunk
*)data
);
2808 case SCTP_NOTIFY_ADAPTION_INDICATION
:
2809 /* Here the error is the adaption indication */
2810 sctp_notify_adaption_layer(stcb
, error
);
2812 case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION
:
2813 sctp_notify_partial_delivery_indication(stcb
, error
);
2815 case SCTP_NOTIFY_STRDATA_ERR
:
2817 case SCTP_NOTIFY_ASSOC_ABORTED
:
2818 sctp_notify_assoc_change(SCTP_COMM_LOST
, stcb
, error
);
2820 case SCTP_NOTIFY_PEER_OPENED_STREAM
:
2822 case SCTP_NOTIFY_STREAM_OPENED_OK
:
2824 case SCTP_NOTIFY_ASSOC_RESTART
:
2825 sctp_notify_assoc_change(SCTP_RESTART
, stcb
, error
);
2827 case SCTP_NOTIFY_HB_RESP
:
2829 case SCTP_NOTIFY_STR_RESET_SEND
:
2830 sctp_notify_stream_reset(stcb
, error
, ((uint16_t *)data
), SCTP_STRRESET_OUTBOUND_STR
);
2832 case SCTP_NOTIFY_STR_RESET_RECV
:
2833 sctp_notify_stream_reset(stcb
, error
, ((uint16_t *)data
), SCTP_STRRESET_INBOUND_STR
);
2835 case SCTP_NOTIFY_ASCONF_ADD_IP
:
2836 sctp_notify_peer_addr_change(stcb
, SCTP_ADDR_ADDED
, data
,
2839 case SCTP_NOTIFY_ASCONF_DELETE_IP
:
2840 sctp_notify_peer_addr_change(stcb
, SCTP_ADDR_REMOVED
, data
,
2843 case SCTP_NOTIFY_ASCONF_SET_PRIMARY
:
2844 sctp_notify_peer_addr_change(stcb
, SCTP_ADDR_MADE_PRIM
, data
,
2847 case SCTP_NOTIFY_ASCONF_SUCCESS
:
2849 case SCTP_NOTIFY_ASCONF_FAILED
:
2851 case SCTP_NOTIFY_PEER_SHUTDOWN
:
2852 sctp_notify_shutdown_event(stcb
);
2856 if (sctp_debug_on
& SCTP_DEBUG_UTIL1
) {
2857 kprintf("NOTIFY: unknown notification %xh (%u)\n",
2858 notification
, notification
);
2860 #endif /* SCTP_DEBUG */
2866 sctp_report_all_outbound(struct sctp_tcb
*stcb
)
2868 struct sctp_association
*asoc
;
2869 struct sctp_stream_out
*outs
;
2870 struct sctp_tmit_chunk
*chk
;
2874 if (stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_SOCKET_GONE
) {
2877 /* now through all the gunk freeing chunks */
2878 TAILQ_FOREACH(outs
, &asoc
->out_wheel
, next_spoke
) {
2879 /* now clean up any chunks here */
2880 chk
= TAILQ_FIRST(&outs
->outqueue
);
2882 stcb
->asoc
.stream_queue_cnt
--;
2883 TAILQ_REMOVE(&outs
->outqueue
, chk
, sctp_next
);
2884 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL
, stcb
,
2885 SCTP_NOTIFY_DATAGRAM_UNSENT
, chk
);
2887 sctp_m_freem(chk
->data
);
2891 sctp_free_remote_addr(chk
->whoTo
);
2894 /* Free the chunk */
2895 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, chk
);
2896 sctppcbinfo
.ipi_count_chunk
--;
2897 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
2898 panic("Chunk count is negative");
2900 sctppcbinfo
.ipi_gencnt_chunk
++;
2901 chk
= TAILQ_FIRST(&outs
->outqueue
);
2904 /* pending send queue SHOULD be empty */
2905 if (!TAILQ_EMPTY(&asoc
->send_queue
)) {
2906 chk
= TAILQ_FIRST(&asoc
->send_queue
);
2908 TAILQ_REMOVE(&asoc
->send_queue
, chk
, sctp_next
);
2909 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL
, stcb
, SCTP_NOTIFY_DATAGRAM_UNSENT
, chk
);
2911 sctp_m_freem(chk
->data
);
2915 sctp_free_remote_addr(chk
->whoTo
);
2917 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, chk
);
2918 sctppcbinfo
.ipi_count_chunk
--;
2919 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
2920 panic("Chunk count is negative");
2922 sctppcbinfo
.ipi_gencnt_chunk
++;
2923 chk
= TAILQ_FIRST(&asoc
->send_queue
);
2926 /* sent queue SHOULD be empty */
2927 if (!TAILQ_EMPTY(&asoc
->sent_queue
)) {
2928 chk
= TAILQ_FIRST(&asoc
->sent_queue
);
2930 TAILQ_REMOVE(&asoc
->sent_queue
, chk
, sctp_next
);
2931 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL
, stcb
,
2932 SCTP_NOTIFY_DATAGRAM_SENT
, chk
);
2934 sctp_m_freem(chk
->data
);
2938 sctp_free_remote_addr(chk
->whoTo
);
2940 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, chk
);
2941 sctppcbinfo
.ipi_count_chunk
--;
2942 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
2943 panic("Chunk count is negative");
2945 sctppcbinfo
.ipi_gencnt_chunk
++;
2946 chk
= TAILQ_FIRST(&asoc
->sent_queue
);
2952 sctp_abort_notification(struct sctp_tcb
*stcb
, int error
)
2955 if (stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_SOCKET_GONE
) {
2958 /* Tell them we lost the asoc */
2959 sctp_report_all_outbound(stcb
);
2960 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_ABORTED
, stcb
, error
, NULL
);
2964 sctp_abort_association(struct sctp_inpcb
*inp
, struct sctp_tcb
*stcb
,
2965 struct mbuf
*m
, int iphlen
, struct sctphdr
*sh
, struct mbuf
*op_err
)
2971 /* We have a TCB to abort, send notification too */
2972 vtag
= stcb
->asoc
.peer_vtag
;
2973 sctp_abort_notification(stcb
, 0);
2975 sctp_send_abort(m
, iphlen
, sh
, vtag
, op_err
);
2977 /* Ok, now lets free it */
2978 sctp_free_assoc(inp
, stcb
);
2980 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_SOCKET_GONE
) {
2981 if (LIST_FIRST(&inp
->sctp_asoc_list
) == NULL
) {
2982 sctp_inpcb_free(inp
, 1);
2989 sctp_abort_an_association(struct sctp_inpcb
*inp
, struct sctp_tcb
*stcb
,
2990 int error
, struct mbuf
*op_err
)
2995 /* Got to have a TCB */
2996 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_SOCKET_GONE
) {
2997 if (LIST_FIRST(&inp
->sctp_asoc_list
) == NULL
) {
2998 sctp_inpcb_free(inp
, 1);
3003 vtag
= stcb
->asoc
.peer_vtag
;
3004 /* notify the ulp */
3005 if ((inp
->sctp_flags
& SCTP_PCB_FLAGS_SOCKET_GONE
) == 0)
3006 sctp_abort_notification(stcb
, error
);
3007 /* notify the peer */
3008 sctp_send_abort_tcb(stcb
, op_err
);
3009 /* now free the asoc */
3010 sctp_free_assoc(inp
, stcb
);
3014 sctp_handle_ootb(struct mbuf
*m
, int iphlen
, int offset
, struct sctphdr
*sh
,
3015 struct sctp_inpcb
*inp
, struct mbuf
*op_err
)
3017 struct sctp_chunkhdr
*ch
, chunk_buf
;
3018 unsigned int chk_length
;
3020 /* Generate a TO address for future reference */
3021 if (inp
&& (inp
->sctp_flags
& SCTP_PCB_FLAGS_SOCKET_GONE
)) {
3022 if (LIST_FIRST(&inp
->sctp_asoc_list
) == NULL
) {
3023 sctp_inpcb_free(inp
, 1);
3026 ch
= (struct sctp_chunkhdr
*)sctp_m_getptr(m
, offset
,
3027 sizeof(*ch
), (u_int8_t
*)&chunk_buf
);
3028 while (ch
!= NULL
) {
3029 chk_length
= ntohs(ch
->chunk_length
);
3030 if (chk_length
< sizeof(*ch
)) {
3031 /* break to abort land */
3034 switch (ch
->chunk_type
) {
3035 case SCTP_PACKET_DROPPED
:
3036 /* we don't respond to pkt-dropped */
3038 case SCTP_ABORT_ASSOCIATION
:
3039 /* we don't respond with an ABORT to an ABORT */
3041 case SCTP_SHUTDOWN_COMPLETE
:
3043 * we ignore it since we are not waiting for it
3047 case SCTP_SHUTDOWN_ACK
:
3048 sctp_send_shutdown_complete2(m
, iphlen
, sh
);
3053 offset
+= SCTP_SIZE32(chk_length
);
3054 ch
= (struct sctp_chunkhdr
*)sctp_m_getptr(m
, offset
,
3055 sizeof(*ch
), (u_int8_t
*)&chunk_buf
);
3057 sctp_send_abort(m
, iphlen
, sh
, 0, op_err
);
3061 * check the inbound datagram to make sure there is not an abort
3062 * inside it, if there is return 1, else return 0.
3065 sctp_is_there_an_abort_here(struct mbuf
*m
, int iphlen
, int *vtagfill
)
3067 struct sctp_chunkhdr
*ch
;
3068 struct sctp_init_chunk
*init_chk
, chunk_buf
;
3070 unsigned int chk_length
;
3072 offset
= iphlen
+ sizeof(struct sctphdr
);
3073 ch
= (struct sctp_chunkhdr
*)sctp_m_getptr(m
, offset
, sizeof(*ch
),
3074 (u_int8_t
*)&chunk_buf
);
3075 while (ch
!= NULL
) {
3076 chk_length
= ntohs(ch
->chunk_length
);
3077 if (chk_length
< sizeof(*ch
)) {
3078 /* packet is probably corrupt */
3081 /* we seem to be ok, is it an abort? */
3082 if (ch
->chunk_type
== SCTP_ABORT_ASSOCIATION
) {
3083 /* yep, tell them */
3086 if (ch
->chunk_type
== SCTP_INITIATION
) {
3087 /* need to update the Vtag */
3088 init_chk
= (struct sctp_init_chunk
*)sctp_m_getptr(m
,
3089 offset
, sizeof(*init_chk
), (u_int8_t
*)&chunk_buf
);
3090 if (init_chk
!= NULL
) {
3091 *vtagfill
= ntohl(init_chk
->init
.initiate_tag
);
3094 /* Nope, move to the next chunk */
3095 offset
+= SCTP_SIZE32(chk_length
);
3096 ch
= (struct sctp_chunkhdr
*)sctp_m_getptr(m
, offset
,
3097 sizeof(*ch
), (u_int8_t
*)&chunk_buf
);
3103 * currently (2/02), ifa_addr embeds scope_id's and don't
3104 * have sin6_scope_id set (i.e. it's 0)
3105 * so, create this function to compare link local scopes
3108 sctp_is_same_scope(struct sockaddr_in6
*addr1
, struct sockaddr_in6
*addr2
)
3110 struct sockaddr_in6 a
, b
;
3116 if (a
.sin6_scope_id
== 0)
3117 if (in6_recoverscope(&a
, &a
.sin6_addr
, NULL
)) {
3118 /* can't get scope, so can't match */
3121 if (b
.sin6_scope_id
== 0)
3122 if (in6_recoverscope(&b
, &b
.sin6_addr
, NULL
)) {
3123 /* can't get scope, so can't match */
3126 if (a
.sin6_scope_id
!= b
.sin6_scope_id
)
3133 * returns a sockaddr_in6 with embedded scope recovered and removed
3135 struct sockaddr_in6
*
3136 sctp_recover_scope(struct sockaddr_in6
*addr
, struct sockaddr_in6
*store
)
3139 /* check and strip embedded scope junk */
3140 if (addr
->sin6_family
== AF_INET6
) {
3141 if (IN6_IS_SCOPE_LINKLOCAL(&addr
->sin6_addr
)) {
3142 if (addr
->sin6_scope_id
== 0) {
3144 if (!in6_recoverscope(store
, &store
->sin6_addr
,
3146 /* use the recovered scope */
3149 /* else, return the original "to" addr */
3157 * are the two addresses the same? currently a "scopeless" check
3158 * returns: 1 if same, 0 if not
3161 sctp_cmpaddr(struct sockaddr
*sa1
, struct sockaddr
*sa2
)
3165 if (sa1
== NULL
|| sa2
== NULL
)
3168 /* must be the same family */
3169 if (sa1
->sa_family
!= sa2
->sa_family
)
3172 if (sa1
->sa_family
== AF_INET6
) {
3173 /* IPv6 addresses */
3174 struct sockaddr_in6
*sin6_1
, *sin6_2
;
3176 sin6_1
= (struct sockaddr_in6
*)sa1
;
3177 sin6_2
= (struct sockaddr_in6
*)sa2
;
3178 return (SCTP6_ARE_ADDR_EQUAL(&sin6_1
->sin6_addr
,
3179 &sin6_2
->sin6_addr
));
3180 } else if (sa1
->sa_family
== AF_INET
) {
3181 /* IPv4 addresses */
3182 struct sockaddr_in
*sin_1
, *sin_2
;
3184 sin_1
= (struct sockaddr_in
*)sa1
;
3185 sin_2
= (struct sockaddr_in
*)sa2
;
3186 return (sin_1
->sin_addr
.s_addr
== sin_2
->sin_addr
.s_addr
);
3188 /* we don't do these... */
3194 sctp_print_address(struct sockaddr
*sa
)
3197 if (sa
->sa_family
== AF_INET6
) {
3198 struct sockaddr_in6
*sin6
;
3199 sin6
= (struct sockaddr_in6
*)sa
;
3200 kprintf("IPv6 address: %s:%d scope:%u\n",
3201 ip6_sprintf(&sin6
->sin6_addr
), ntohs(sin6
->sin6_port
),
3202 sin6
->sin6_scope_id
);
3203 } else if (sa
->sa_family
== AF_INET
) {
3204 struct sockaddr_in
*sin
;
3205 sin
= (struct sockaddr_in
*)sa
;
3206 kprintf("IPv4 address: %s:%d\n", inet_ntoa(sin
->sin_addr
),
3207 ntohs(sin
->sin_port
));
3214 sctp_print_address_pkt(struct ip
*iph
, struct sctphdr
*sh
)
3216 if (iph
->ip_v
== IPVERSION
) {
3217 struct sockaddr_in lsa
, fsa
;
3219 bzero(&lsa
, sizeof(lsa
));
3220 lsa
.sin_len
= sizeof(lsa
);
3221 lsa
.sin_family
= AF_INET
;
3222 lsa
.sin_addr
= iph
->ip_src
;
3223 lsa
.sin_port
= sh
->src_port
;
3224 bzero(&fsa
, sizeof(fsa
));
3225 fsa
.sin_len
= sizeof(fsa
);
3226 fsa
.sin_family
= AF_INET
;
3227 fsa
.sin_addr
= iph
->ip_dst
;
3228 fsa
.sin_port
= sh
->dest_port
;
3230 sctp_print_address((struct sockaddr
*)&lsa
);
3232 sctp_print_address((struct sockaddr
*)&fsa
);
3233 } else if (iph
->ip_v
== (IPV6_VERSION
>> 4)) {
3234 struct ip6_hdr
*ip6
;
3235 struct sockaddr_in6 lsa6
, fsa6
;
3237 ip6
= (struct ip6_hdr
*)iph
;
3238 bzero(&lsa6
, sizeof(lsa6
));
3239 lsa6
.sin6_len
= sizeof(lsa6
);
3240 lsa6
.sin6_family
= AF_INET6
;
3241 lsa6
.sin6_addr
= ip6
->ip6_src
;
3242 lsa6
.sin6_port
= sh
->src_port
;
3243 bzero(&fsa6
, sizeof(fsa6
));
3244 fsa6
.sin6_len
= sizeof(fsa6
);
3245 fsa6
.sin6_family
= AF_INET6
;
3246 fsa6
.sin6_addr
= ip6
->ip6_dst
;
3247 fsa6
.sin6_port
= sh
->dest_port
;
3249 sctp_print_address((struct sockaddr
*)&lsa6
);
3251 sctp_print_address((struct sockaddr
*)&fsa6
);
3255 #if defined(__FreeBSD__) || defined(__APPLE__)
3257 /* cloned from uipc_socket.c */
3259 #define SCTP_SBLINKRECORD(sb, m0) do { \
3260 if ((sb)->sb_lastrecord != NULL) \
3261 (sb)->sb_lastrecord->m_nextpkt = (m0); \
3263 (sb)->sb_mb = (m0); \
3264 (sb)->sb_lastrecord = (m0); \
3265 } while (/*CONSTCOND*/0)
3270 sctp_sbappendaddr_nocheck(struct signalsockbuf
*ssb
, struct sockaddr
*asa
, struct mbuf
*m0
,
3271 struct mbuf
*control
, u_int32_t tag
,
3272 struct sctp_inpcb
*inp
)
3274 struct mbuf
*m
, *n
, *nlast
;
3277 if (m0
&& (m0
->m_flags
& M_PKTHDR
) == 0)
3278 panic("sctp_sbappendaddr_nocheck");
3280 for (n
= control
; n
; n
= n
->m_next
) {
3281 if (n
->m_next
== 0) /* get pointer to last control buf */
3284 if (((inp
->sctp_flags
& SCTP_PCB_FLAGS_TCPTYPE
) == 0) ||
3285 ((inp
->sctp_flags
& SCTP_PCB_FLAGS_IN_TCPPOOL
)== 0)) {
3286 uint32_t len
= asa
->sa_len
; /* workaround GCC stupidity */
3290 MGETHDR(m
, MB_DONTWAIT
, MT_SONAME
);
3296 kprintf("Duplicate mbuf allocated %p in and mget returned %p?\n",
3299 panic("more than once");
3304 m
->m_len
= asa
->sa_len
;
3305 bcopy((caddr_t
)asa
, mtod(m
, caddr_t
), asa
->sa_len
);
3311 n
->m_next
= m0
; /* concatenate data to control */
3315 m
->m_next
= control
;
3318 m
->m_pkthdr
.csum_data
= (int)tag
;
3321 for (n
= m
; n
; n
= n
->m_next
)
3322 sballoc(&ssb
->sb
, n
);
3324 if (ssb
->ssb_mb
== NULL
) {
3325 inp
->sctp_vtag_first
= tag
;
3327 if ((n
= ssb
->ssb_mb
) != NULL
) {
3328 if ((n
->m_nextpkt
!= inp
->sb_last_mpkt
) && (n
->m_nextpkt
== NULL
)) {
3329 inp
->sb_last_mpkt
= NULL
;
3331 if (inp
->sb_last_mpkt
)
3332 inp
->sb_last_mpkt
->m_nextpkt
= m
;
3334 while (n
->m_nextpkt
) {
3339 inp
->sb_last_mpkt
= m
;
3341 inp
->sb_last_mpkt
= ssb
->ssb_mb
= m
;
3342 inp
->sctp_vtag_first
= tag
;
3344 SOCKBUF_UNLOCK(ssb
);
3348 /*************HOLD THIS COMMENT FOR PATCH FILE OF
3349 *************ALTERNATE ROUTING CODE
3352 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
3353 *************ALTERNATE ROUTING CODE
3357 sctp_generate_invmanparam(int err
)
3359 /* Return a MBUF with a invalid mandatory parameter */
3362 MGET(m
, MB_DONTWAIT
, MT_DATA
);
3364 struct sctp_paramhdr
*ph
;
3365 m
->m_len
= sizeof(struct sctp_paramhdr
);
3366 ph
= mtod(m
, struct sctp_paramhdr
*);
3367 ph
->param_length
= htons(sizeof(struct sctp_paramhdr
));
3368 ph
->param_type
= htons(err
);
3374 sctp_should_be_moved(struct mbuf
*this, struct sctp_association
*asoc
)
3378 * given a mbuf chain, look through it finding
3379 * the M_PKTHDR and return 1 if it belongs to
3380 * the association given. We tell this by
3381 * a kludge where we stuff the my_vtag of the asoc
3382 * into the m->m_pkthdr.csum_data/csum field.
3386 if (m
->m_flags
& M_PKTHDR
) {
3388 #if defined(__OpenBSD__)
3389 if ((u_int32_t
)m
->m_pkthdr
.csum
== asoc
->my_vtag
)
3391 if ((u_int32_t
)m
->m_pkthdr
.csum_data
== asoc
->my_vtag
)
3404 sctp_get_first_vtag_from_sb(struct socket
*so
)
3406 struct mbuf
*this, *at
;
3410 lwkt_gettoken(&so
->so_rcv
.ssb_token
);
3411 if (so
->so_rcv
.ssb_mb
) {
3413 this = so
->so_rcv
.ssb_mb
;
3416 /* get to the m_pkthdr */
3418 if (at
->m_flags
& M_PKTHDR
)
3424 /* now do we have a m_pkthdr */
3425 if (at
&& (at
->m_flags
& M_PKTHDR
)) {
3427 #if defined(__OpenBSD__)
3428 if ((u_int32_t
)at
->m_pkthdr
.csum
!= 0)
3430 if ((u_int32_t
)at
->m_pkthdr
.csum_data
!= 0)
3434 #if defined(__OpenBSD__)
3435 retval
= (u_int32_t
)at
->m_pkthdr
.csum
;
3438 (u_int32_t
)at
->m_pkthdr
.csum_data
;
3443 this = this->m_nextpkt
;
3447 lwkt_reltoken(&so
->so_rcv
.ssb_token
);
3452 sctp_grub_through_socket_buffer(struct sctp_inpcb
*inp
, struct socket
*old
,
3453 struct socket
*new, struct sctp_tcb
*stcb
)
3455 struct mbuf
**put
, **take
, *next
, *this;
3456 struct signalsockbuf
*old_sb
, *new_sb
;
3457 struct sctp_association
*asoc
;
3461 old_sb
= &old
->so_rcv
;
3462 new_sb
= &new->so_rcv
;
3463 if (old_sb
->ssb_mb
== NULL
) {
3464 /* Nothing to move */
3467 SOCKBUF_LOCK(old_sb
);
3468 SOCKBUF_LOCK(new_sb
);
3469 lwkt_gettoken(&old_sb
->ssb_token
);
3470 lwkt_gettoken(&new_sb
->ssb_token
);
3472 if (inp
->sctp_vtag_first
== asoc
->my_vtag
) {
3473 /* First one must be moved */
3475 for (mm
= old_sb
->ssb_mb
; mm
; mm
= mm
->m_next
) {
3477 * Go down the chain and fix
3478 * the space allocation of the
3481 sbfree(&old_sb
->sb
, mm
);
3482 sballoc(&new_sb
->sb
, mm
);
3484 new_sb
->ssb_mb
= old_sb
->ssb_mb
;
3485 old_sb
->ssb_mb
= new_sb
->ssb_mb
->m_nextpkt
;
3486 new_sb
->ssb_mb
->m_nextpkt
= NULL
;
3487 put
= &new_sb
->ssb_mb
->m_nextpkt
;
3490 put
= &new_sb
->ssb_mb
;
3493 take
= &old_sb
->ssb_mb
;
3494 next
= old_sb
->ssb_mb
;
3497 /* postion for next one */
3498 next
= this->m_nextpkt
;
3499 /* check the tag of this packet */
3500 if (sctp_should_be_moved(this, asoc
)) {
3501 /* yes this needs to be moved */
3503 *take
= this->m_nextpkt
;
3504 this->m_nextpkt
= NULL
;
3506 for (mm
= this; mm
; mm
= mm
->m_next
) {
3508 * Go down the chain and fix
3509 * the space allocation of the
3512 sbfree(&old_sb
->sb
, mm
);
3513 sballoc(&new_sb
->sb
, mm
);
3515 put
= &this->m_nextpkt
;
3518 /* no advance our take point. */
3519 take
= &this->m_nextpkt
;
3524 * Ok so now we must re-postion vtag_first to
3525 * match the new first one since we moved the
3528 inp
->sctp_vtag_first
= sctp_get_first_vtag_from_sb(old
);
3530 lwkt_reltoken(&new_sb
->ssb_token
);
3531 lwkt_reltoken(&old_sb
->ssb_token
);
3532 SOCKBUF_UNLOCK(old_sb
);
3533 SOCKBUF_UNLOCK(new_sb
);
3537 sctp_free_bufspace(struct sctp_tcb
*stcb
, struct sctp_association
*asoc
,
3538 struct sctp_tmit_chunk
*tp1
)
3540 if (tp1
->data
== NULL
) {
3543 #ifdef SCTP_MBCNT_LOGGING
3544 sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE
,
3545 asoc
->total_output_queue_size
,
3547 asoc
->total_output_mbuf_queue_size
,
3550 if (asoc
->total_output_queue_size
>= tp1
->book_size
) {
3551 asoc
->total_output_queue_size
-= tp1
->book_size
;
3553 asoc
->total_output_queue_size
= 0;
3556 /* Now free the mbuf */
3557 if (asoc
->total_output_mbuf_queue_size
>= tp1
->mbcnt
) {
3558 asoc
->total_output_mbuf_queue_size
-= tp1
->mbcnt
;
3560 asoc
->total_output_mbuf_queue_size
= 0;
3562 if ((stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_TCPTYPE
) ||
3563 (stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_IN_TCPPOOL
)) {
3564 if (stcb
->sctp_socket
->so_snd
.ssb_cc
>= tp1
->book_size
) {
3565 stcb
->sctp_socket
->so_snd
.ssb_cc
-= tp1
->book_size
;
3567 stcb
->sctp_socket
->so_snd
.ssb_cc
= 0;
3570 if (stcb
->sctp_socket
->so_snd
.ssb_mbcnt
>= tp1
->mbcnt
) {
3571 stcb
->sctp_socket
->so_snd
.ssb_mbcnt
-= tp1
->mbcnt
;
3573 stcb
->sctp_socket
->so_snd
.ssb_mbcnt
= 0;
3579 sctp_release_pr_sctp_chunk(struct sctp_tcb
*stcb
, struct sctp_tmit_chunk
*tp1
,
3580 int reason
, struct sctpchunk_listhead
*queue
)
3584 uint8_t foundeom
= 0;
3587 ret_sz
+= tp1
->book_size
;
3588 tp1
->sent
= SCTP_FORWARD_TSN_SKIP
;
3590 sctp_free_bufspace(stcb
, &stcb
->asoc
, tp1
);
3591 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL
, stcb
, reason
, tp1
);
3592 sctp_m_freem(tp1
->data
);
3594 sctp_sowwakeup(stcb
->sctp_ep
, stcb
->sctp_socket
);
3596 if (tp1
->flags
& SCTP_PR_SCTP_BUFFER
) {
3597 stcb
->asoc
.sent_queue_cnt_removeable
--;
3599 if (queue
== &stcb
->asoc
.send_queue
) {
3600 TAILQ_REMOVE(&stcb
->asoc
.send_queue
, tp1
, sctp_next
);
3601 /* on to the sent queue */
3602 TAILQ_INSERT_TAIL(&stcb
->asoc
.sent_queue
, tp1
,
3604 stcb
->asoc
.sent_queue_cnt
++;
3606 if ((tp1
->rec
.data
.rcv_flags
& SCTP_DATA_NOT_FRAG
) ==
3607 SCTP_DATA_NOT_FRAG
) {
3608 /* not frag'ed we ae done */
3611 } else if (tp1
->rec
.data
.rcv_flags
& SCTP_DATA_LAST_FRAG
) {
3612 /* end of frag, we are done */
3616 /* Its a begin or middle piece, we must mark all of it */
3618 tp1
= TAILQ_NEXT(tp1
, sctp_next
);
3620 } while (tp1
&& notdone
);
3621 if ((foundeom
== 0) && (queue
== &stcb
->asoc
.sent_queue
)) {
3623 * The multi-part message was scattered
3624 * across the send and sent queue.
3626 tp1
= TAILQ_FIRST(&stcb
->asoc
.send_queue
);
3628 * recurse throught the send_queue too, starting at the
3632 ret_sz
+= sctp_release_pr_sctp_chunk(stcb
, tp1
, reason
,
3633 &stcb
->asoc
.send_queue
);
3635 kprintf("hmm, nothing on the send queue and no EOM?\n");
3642 * checks to see if the given address, sa, is one that is currently
3643 * known by the kernel
3644 * note: can't distinguish the same address on multiple interfaces and
3645 * doesn't handle multiple addresses with different zone/scope id's
3646 * note: ifa_ifwithaddr() compares the entire sockaddr struct
3649 sctp_find_ifa_by_addr(struct sockaddr
*sa
)
3653 /* go through all our known interfaces */
3654 TAILQ_FOREACH(ifn
, &ifnet
, if_list
) {
3655 struct ifaddr_container
*ifac
;
3657 /* go through each interface addresses */
3658 TAILQ_FOREACH(ifac
, &ifn
->if_addrheads
[mycpuid
], ifa_link
) {
3659 struct ifaddr
*ifa
= ifac
->ifa
;
3661 /* correct family? */
3662 if (ifa
->ifa_addr
->sa_family
!= sa
->sa_family
)
3666 if (ifa
->ifa_addr
->sa_family
== AF_INET6
) {
3668 struct sockaddr_in6
*sin1
, *sin2
, sin6_tmp
;
3669 sin1
= (struct sockaddr_in6
*)ifa
->ifa_addr
;
3670 if (IN6_IS_SCOPE_LINKLOCAL(&sin1
->sin6_addr
)) {
3671 /* create a copy and clear scope */
3672 memcpy(&sin6_tmp
, sin1
,
3673 sizeof(struct sockaddr_in6
));
3675 in6_clearscope(&sin1
->sin6_addr
);
3677 sin2
= (struct sockaddr_in6
*)sa
;
3678 if (memcmp(&sin1
->sin6_addr
, &sin2
->sin6_addr
,
3679 sizeof(struct in6_addr
)) == 0) {
3685 if (ifa
->ifa_addr
->sa_family
== AF_INET
) {
3687 struct sockaddr_in
*sin1
, *sin2
;
3688 sin1
= (struct sockaddr_in
*)ifa
->ifa_addr
;
3689 sin2
= (struct sockaddr_in
*)sa
;
3690 if (sin1
->sin_addr
.s_addr
==
3691 sin2
->sin_addr
.s_addr
) {
3696 /* else, not AF_INET or AF_INET6, so skip */
3697 } /* end foreach ifa */
3698 } /* end foreach ifn */
3706 * here we hack in a fix for Apple's m_copym for the case where the first mbuf
3707 * in the chain is a M_PKTHDR and the length is zero
3710 sctp_pkthdr_fix(struct mbuf
*m
)
3714 if ((m
->m_flags
& M_PKTHDR
) == 0) {
3719 if (m
->m_len
!= 0) {
3720 /* not a zero length PKTHDR mbuf */
3724 /* let's move in a word into the first mbuf... yes, ugly! */
3726 if (m_nxt
== NULL
) {
3727 /* umm... not a very useful mbuf chain... */
3730 if ((size_t)m_nxt
->m_len
> sizeof(long)) {
3731 /* move over a long */
3732 bcopy(mtod(m_nxt
, caddr_t
), mtod(m
, caddr_t
), sizeof(long));
3733 /* update mbuf data pointers and lengths */
3734 m
->m_len
+= sizeof(long);
3735 m_nxt
->m_data
+= sizeof(long);
3736 m_nxt
->m_len
-= sizeof(long);
3740 inline struct mbuf
*
3741 sctp_m_copym(struct mbuf
*m
, int off
, int len
, int wait
)
3744 return (m_copym(m
, off
, len
, wait
));
3746 #endif /* __APPLE__ */