arcmsr(4): Use MSI if it is supported by the device.
[dragonfly.git] / sys / netinet / sctputil.c
blob00f6d52536744316a54b8825e4a2b5398f7f8a5b
1 /* $KAME: sctputil.c,v 1.36 2005/03/06 16:04:19 itojun Exp $ */
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Cisco Systems, Inc.
5 * All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Cisco Systems, Inc.
18 * 4. Neither the name of the project nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY CISCO SYSTEMS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL CISCO SYSTEMS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
35 #if !(defined(__OpenBSD__) || defined(__APPLE__))
36 #include "opt_ipsec.h"
37 #endif
38 #if defined(__FreeBSD__) || defined(__DragonFly__)
39 #include "opt_compat.h"
40 #include "opt_inet6.h"
41 #include "opt_inet.h"
42 #if !(defined(SCTP_BASE_FREEBSD) || defined(__DragonFly__))
43 #include "opt_mpath.h"
44 #endif /* SCTP_BASE_FREEBSD || __DragonFly__ */
45 #endif /* FreeBSD */
46 #if defined(__NetBSD__)
47 #include "opt_inet.h"
48 #endif
49 #ifdef __APPLE__
50 #include <sctp.h>
51 #elif !defined(__OpenBSD__)
52 #include "opt_sctp.h"
53 #endif
55 #include <sys/param.h>
56 #include <sys/systm.h>
57 #include <sys/malloc.h>
58 #include <sys/mbuf.h>
59 #include <sys/domain.h>
60 #include <sys/protosw.h>
61 #include <sys/socket.h>
62 #include <sys/socketvar.h>
63 #include <sys/proc.h>
64 #include <sys/kernel.h>
65 #include <sys/sysctl.h>
66 #include <sys/thread2.h>
68 #if defined(__FreeBSD__) || defined(__DragonFly__)
69 #include <sys/callout.h>
70 #else
71 #include <netinet/sctp_callout.h> /* for callout_active() */
72 #endif
74 #include <net/radix.h>
75 #include <net/route.h>
77 #ifdef INET6
78 #ifndef __OpenBSD__
79 #include <sys/domain.h>
80 #endif
81 #endif
83 #if (defined(__FreeBSD__) && __FreeBSD_version >= 500000)
84 #include <sys/limits.h>
85 #else
86 #include <machine/limits.h>
87 #endif
89 #include <net/if.h>
90 #include <net/if_types.h>
91 #include <net/route.h>
93 #include <netinet/in.h>
94 #include <netinet/in_systm.h>
95 #include <netinet/ip.h>
96 #include <netinet/in_pcb.h>
97 #include <netinet/in_var.h>
98 #include <netinet/ip_var.h>
100 #ifdef INET6
101 #include <netinet/ip6.h>
102 #include <netinet6/ip6_var.h>
104 #if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__APPLE__) || defined(__DragonFly_)
105 #include <netinet6/in6_pcb.h>
106 #elif defined(__OpenBSD__)
107 #include <netinet/in_pcb.h>
108 #endif
110 #endif /* INET6 */
112 #include <netinet/sctp_pcb.h>
114 #ifdef IPSEC
115 #ifndef __OpenBSD__
116 #include <netinet6/ipsec.h>
117 #include <netproto/key/key.h>
118 #else
119 #undef IPSEC
120 #endif
121 #endif /* IPSEC */
123 #include <netinet/sctputil.h>
124 #include <netinet/sctp_var.h>
125 #ifdef INET6
126 #include <netinet6/sctp6_var.h>
127 #endif
128 #include <netinet/sctp_header.h>
129 #include <netinet/sctp_output.h>
130 #include <netinet/sctp_hashdriver.h>
131 #include <netinet/sctp_uio.h>
132 #include <netinet/sctp_timer.h>
133 #include <netinet/sctp_crc32.h>
134 #include <netinet/sctp_indata.h> /* for sctp_deliver_data() */
135 #define NUMBER_OF_MTU_SIZES 18
137 #ifdef SCTP_DEBUG
138 extern u_int32_t sctp_debug_on;
139 #endif
141 #ifdef SCTP_STAT_LOGGING
142 int sctp_cwnd_log_at=0;
143 int sctp_cwnd_log_rolled=0;
144 struct sctp_cwnd_log sctp_clog[SCTP_STAT_LOG_SIZE];
146 void
147 sctp_clr_stat_log(void)
149 sctp_cwnd_log_at=0;
150 sctp_cwnd_log_rolled=0;
153 void
154 sctp_log_strm_del_alt(u_int32_t tsn, u_int16_t sseq, int from)
157 sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
158 sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_STRM;
159 sctp_clog[sctp_cwnd_log_at].x.strlog.n_tsn = tsn;
160 sctp_clog[sctp_cwnd_log_at].x.strlog.n_sseq = sseq;
161 sctp_clog[sctp_cwnd_log_at].x.strlog.e_tsn = 0;
162 sctp_clog[sctp_cwnd_log_at].x.strlog.e_sseq = 0;
163 sctp_cwnd_log_at++;
164 if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
165 sctp_cwnd_log_at = 0;
166 sctp_cwnd_log_rolled = 1;
172 void
173 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
176 sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
177 sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_MAP;
178 sctp_clog[sctp_cwnd_log_at].x.map.base = map;
179 sctp_clog[sctp_cwnd_log_at].x.map.cum = cum;
180 sctp_clog[sctp_cwnd_log_at].x.map.high = high;
181 sctp_cwnd_log_at++;
182 if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
183 sctp_cwnd_log_at = 0;
184 sctp_cwnd_log_rolled = 1;
188 void
189 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn,
190 int from)
193 sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
194 sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_FR;
195 sctp_clog[sctp_cwnd_log_at].x.fr.largest_tsn = biggest_tsn;
196 sctp_clog[sctp_cwnd_log_at].x.fr.largest_new_tsn = biggest_new_tsn;
197 sctp_clog[sctp_cwnd_log_at].x.fr.tsn = tsn;
198 sctp_cwnd_log_at++;
199 if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
200 sctp_cwnd_log_at = 0;
201 sctp_cwnd_log_rolled = 1;
205 void
206 sctp_log_strm_del(struct sctp_tmit_chunk *chk, struct sctp_tmit_chunk *poschk,
207 int from)
210 if (chk == NULL) {
211 kprintf("Gak log of NULL?\n");
212 return;
214 sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
215 sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_STRM;
216 sctp_clog[sctp_cwnd_log_at].x.strlog.n_tsn = chk->rec.data.TSN_seq;
217 sctp_clog[sctp_cwnd_log_at].x.strlog.n_sseq = chk->rec.data.stream_seq;
218 if (poschk != NULL) {
219 sctp_clog[sctp_cwnd_log_at].x.strlog.e_tsn =
220 poschk->rec.data.TSN_seq;
221 sctp_clog[sctp_cwnd_log_at].x.strlog.e_sseq =
222 poschk->rec.data.stream_seq;
223 } else {
224 sctp_clog[sctp_cwnd_log_at].x.strlog.e_tsn = 0;
225 sctp_clog[sctp_cwnd_log_at].x.strlog.e_sseq = 0;
227 sctp_cwnd_log_at++;
228 if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
229 sctp_cwnd_log_at = 0;
230 sctp_cwnd_log_rolled = 1;
234 void
235 sctp_log_cwnd(struct sctp_nets *net, int augment, uint8_t from)
238 sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
239 sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_CWND;
240 sctp_clog[sctp_cwnd_log_at].x.cwnd.net = net;
241 sctp_clog[sctp_cwnd_log_at].x.cwnd.cwnd_new_value = net->cwnd;
242 sctp_clog[sctp_cwnd_log_at].x.cwnd.inflight = net->flight_size;
243 sctp_clog[sctp_cwnd_log_at].x.cwnd.cwnd_augment = augment;
244 sctp_cwnd_log_at++;
245 if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
246 sctp_cwnd_log_at = 0;
247 sctp_cwnd_log_rolled = 1;
251 void
252 sctp_log_maxburst(struct sctp_nets *net, int error, int burst, uint8_t from)
254 sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
255 sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_MAXBURST;
256 sctp_clog[sctp_cwnd_log_at].x.cwnd.net = net;
257 sctp_clog[sctp_cwnd_log_at].x.cwnd.cwnd_new_value = error;
258 sctp_clog[sctp_cwnd_log_at].x.cwnd.inflight = net->flight_size;
259 sctp_clog[sctp_cwnd_log_at].x.cwnd.cwnd_augment = burst;
260 sctp_cwnd_log_at++;
261 if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
262 sctp_cwnd_log_at = 0;
263 sctp_cwnd_log_rolled = 1;
267 void
268 sctp_log_rwnd(uint8_t from, u_int32_t peers_rwnd , u_int32_t snd_size, u_int32_t overhead)
270 sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
271 sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_RWND;
272 sctp_clog[sctp_cwnd_log_at].x.rwnd.rwnd = peers_rwnd;
273 sctp_clog[sctp_cwnd_log_at].x.rwnd.send_size = snd_size;
274 sctp_clog[sctp_cwnd_log_at].x.rwnd.overhead = overhead;
275 sctp_clog[sctp_cwnd_log_at].x.rwnd.new_rwnd = 0;
276 sctp_cwnd_log_at++;
277 if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
278 sctp_cwnd_log_at = 0;
279 sctp_cwnd_log_rolled = 1;
283 void
284 sctp_log_rwnd_set(uint8_t from, u_int32_t peers_rwnd , u_int32_t flight_size, u_int32_t overhead, u_int32_t a_rwndval)
286 sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
287 sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_RWND;
288 sctp_clog[sctp_cwnd_log_at].x.rwnd.rwnd = peers_rwnd;
289 sctp_clog[sctp_cwnd_log_at].x.rwnd.send_size = flight_size;
290 sctp_clog[sctp_cwnd_log_at].x.rwnd.overhead = overhead;
291 sctp_clog[sctp_cwnd_log_at].x.rwnd.new_rwnd = a_rwndval;
292 sctp_cwnd_log_at++;
293 if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
294 sctp_cwnd_log_at = 0;
295 sctp_cwnd_log_rolled = 1;
299 void
300 sctp_log_mbcnt(uint8_t from, u_int32_t total_oq , u_int32_t book, u_int32_t total_mbcnt_q, u_int32_t mbcnt)
302 sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
303 sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_MBCNT;
304 sctp_clog[sctp_cwnd_log_at].x.mbcnt.total_queue_size = total_oq;
305 sctp_clog[sctp_cwnd_log_at].x.mbcnt.size_change = book;
306 sctp_clog[sctp_cwnd_log_at].x.mbcnt.total_queue_mb_size = total_mbcnt_q;
307 sctp_clog[sctp_cwnd_log_at].x.mbcnt.mbcnt_change = mbcnt;
308 sctp_cwnd_log_at++;
309 if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
310 sctp_cwnd_log_at = 0;
311 sctp_cwnd_log_rolled = 1;
315 void
316 sctp_log_block(uint8_t from, struct socket *so, struct sctp_association *asoc)
319 sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
320 sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_BLOCK;
321 sctp_clog[sctp_cwnd_log_at].x.blk.maxmb = (u_int16_t)(so->so_snd.ssb_mbmax/1024);
322 sctp_clog[sctp_cwnd_log_at].x.blk.onmb = asoc->total_output_mbuf_queue_size;
323 sctp_clog[sctp_cwnd_log_at].x.blk.maxsb = (u_int16_t)(so->so_snd.ssb_hiwat/1024);
324 sctp_clog[sctp_cwnd_log_at].x.blk.onsb = asoc->total_output_queue_size;
325 sctp_clog[sctp_cwnd_log_at].x.blk.send_sent_qcnt = (u_int16_t)(asoc->send_queue_cnt + asoc->sent_queue_cnt);
326 sctp_clog[sctp_cwnd_log_at].x.blk.stream_qcnt = (u_int16_t)asoc->stream_queue_cnt;
327 sctp_cwnd_log_at++;
328 if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
329 sctp_cwnd_log_at = 0;
330 sctp_cwnd_log_rolled = 1;
335 sctp_fill_stat_log(struct mbuf *m)
337 struct sctp_cwnd_log_req *req;
338 int size_limit, num, i, at, cnt_out=0;
340 if (m == NULL)
341 return (EINVAL);
343 size_limit = (m->m_len - sizeof(struct sctp_cwnd_log_req));
344 if (size_limit < sizeof(struct sctp_cwnd_log)) {
345 return (EINVAL);
347 req = mtod(m, struct sctp_cwnd_log_req *);
348 num = size_limit/sizeof(struct sctp_cwnd_log);
349 if (sctp_cwnd_log_rolled) {
350 req->num_in_log = SCTP_STAT_LOG_SIZE;
351 } else {
352 req->num_in_log = sctp_cwnd_log_at;
353 /* if the log has not rolled, we don't
354 * let you have old data.
356 if (req->end_at > sctp_cwnd_log_at) {
357 req->end_at = sctp_cwnd_log_at;
360 if ((num < SCTP_STAT_LOG_SIZE) &&
361 ((sctp_cwnd_log_rolled) || (sctp_cwnd_log_at > num))) {
362 /* we can't return all of it */
363 if (((req->start_at == 0) && (req->end_at == 0)) ||
364 (req->start_at >= SCTP_STAT_LOG_SIZE) ||
365 (req->end_at >= SCTP_STAT_LOG_SIZE)) {
366 /* No user request or user is wacked. */
367 req->num_ret = num;
368 req->end_at = sctp_cwnd_log_at - 1;
369 if ((sctp_cwnd_log_at - num) < 0) {
370 int cc;
371 cc = num - sctp_cwnd_log_at;
372 req->start_at = SCTP_STAT_LOG_SIZE - cc;
373 } else {
374 req->start_at = sctp_cwnd_log_at - num;
376 } else {
377 /* a user request */
378 int cc;
379 if (req->start_at > req->end_at) {
380 cc = (SCTP_STAT_LOG_SIZE - req->start_at) +
381 (req->end_at + 1);
382 } else {
384 cc = req->end_at - req->start_at;
386 if (cc < num) {
387 num = cc;
389 req->num_ret = num;
391 } else {
392 /* We can return all of it */
393 req->start_at = 0;
394 req->end_at = sctp_cwnd_log_at - 1;
395 req->num_ret = sctp_cwnd_log_at;
397 for (i = 0, at = req->start_at; i < req->num_ret; i++) {
398 req->log[i] = sctp_clog[at];
399 cnt_out++;
400 at++;
401 if (at >= SCTP_STAT_LOG_SIZE)
402 at = 0;
404 m->m_len = (cnt_out * sizeof(struct sctp_cwnd_log_req)) + sizeof(struct sctp_cwnd_log_req);
405 return (0);
408 #endif
410 #ifdef SCTP_AUDITING_ENABLED
411 u_int8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
412 static int sctp_audit_indx = 0;
414 static void
415 sctp_print_audit_report(void)
417 int i;
418 int cnt;
419 cnt = 0;
420 for (i=sctp_audit_indx;i<SCTP_AUDIT_SIZE;i++) {
421 if ((sctp_audit_data[i][0] == 0xe0) &&
422 (sctp_audit_data[i][1] == 0x01)) {
423 cnt = 0;
424 kprintf("\n");
425 } else if (sctp_audit_data[i][0] == 0xf0) {
426 cnt = 0;
427 kprintf("\n");
428 } else if ((sctp_audit_data[i][0] == 0xc0) &&
429 (sctp_audit_data[i][1] == 0x01)) {
430 kprintf("\n");
431 cnt = 0;
433 kprintf("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0],
434 (uint32_t)sctp_audit_data[i][1]);
435 cnt++;
436 if ((cnt % 14) == 0)
437 kprintf("\n");
439 for (i=0;i<sctp_audit_indx;i++) {
440 if ((sctp_audit_data[i][0] == 0xe0) &&
441 (sctp_audit_data[i][1] == 0x01)) {
442 cnt = 0;
443 kprintf("\n");
444 } else if (sctp_audit_data[i][0] == 0xf0) {
445 cnt = 0;
446 kprintf("\n");
447 } else if ((sctp_audit_data[i][0] == 0xc0) &&
448 (sctp_audit_data[i][1] == 0x01)) {
449 kprintf("\n");
450 cnt = 0;
452 kprintf("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0],
453 (uint32_t)sctp_audit_data[i][1]);
454 cnt++;
455 if ((cnt % 14) == 0)
456 kprintf("\n");
458 kprintf("\n");
461 void
462 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
463 struct sctp_nets *net)
465 int resend_cnt, tot_out, rep, tot_book_cnt;
466 struct sctp_nets *lnet;
467 struct sctp_tmit_chunk *chk;
469 sctp_audit_data[sctp_audit_indx][0] = 0xAA;
470 sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
471 sctp_audit_indx++;
472 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
473 sctp_audit_indx = 0;
475 if (inp == NULL) {
476 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
477 sctp_audit_data[sctp_audit_indx][1] = 0x01;
478 sctp_audit_indx++;
479 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
480 sctp_audit_indx = 0;
482 return;
484 if (stcb == NULL) {
485 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
486 sctp_audit_data[sctp_audit_indx][1] = 0x02;
487 sctp_audit_indx++;
488 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
489 sctp_audit_indx = 0;
491 return;
493 sctp_audit_data[sctp_audit_indx][0] = 0xA1;
494 sctp_audit_data[sctp_audit_indx][1] =
495 (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
496 sctp_audit_indx++;
497 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
498 sctp_audit_indx = 0;
500 rep = 0;
501 tot_book_cnt = 0;
502 resend_cnt = tot_out = 0;
503 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
504 if (chk->sent == SCTP_DATAGRAM_RESEND) {
505 resend_cnt++;
506 } else if (chk->sent < SCTP_DATAGRAM_RESEND) {
507 tot_out += chk->book_size;
508 tot_book_cnt++;
511 if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
512 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
513 sctp_audit_data[sctp_audit_indx][1] = 0xA1;
514 sctp_audit_indx++;
515 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
516 sctp_audit_indx = 0;
518 kprintf("resend_cnt:%d asoc-tot:%d\n",
519 resend_cnt, stcb->asoc.sent_queue_retran_cnt);
520 rep = 1;
521 stcb->asoc.sent_queue_retran_cnt = resend_cnt;
522 sctp_audit_data[sctp_audit_indx][0] = 0xA2;
523 sctp_audit_data[sctp_audit_indx][1] =
524 (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
525 sctp_audit_indx++;
526 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
527 sctp_audit_indx = 0;
530 if (tot_out != stcb->asoc.total_flight) {
531 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
532 sctp_audit_data[sctp_audit_indx][1] = 0xA2;
533 sctp_audit_indx++;
534 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
535 sctp_audit_indx = 0;
537 rep = 1;
538 kprintf("tot_flt:%d asoc_tot:%d\n", tot_out,
539 (int)stcb->asoc.total_flight);
540 stcb->asoc.total_flight = tot_out;
542 if (tot_book_cnt != stcb->asoc.total_flight_count) {
543 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
544 sctp_audit_data[sctp_audit_indx][1] = 0xA5;
545 sctp_audit_indx++;
546 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
547 sctp_audit_indx = 0;
549 rep = 1;
550 kprintf("tot_flt_book:%d\n", tot_book);
552 stcb->asoc.total_flight_count = tot_book_cnt;
554 tot_out = 0;
555 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
556 tot_out += lnet->flight_size;
558 if (tot_out != stcb->asoc.total_flight) {
559 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
560 sctp_audit_data[sctp_audit_indx][1] = 0xA3;
561 sctp_audit_indx++;
562 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
563 sctp_audit_indx = 0;
565 rep = 1;
566 kprintf("real flight:%d net total was %d\n",
567 stcb->asoc.total_flight, tot_out);
568 /* now corrective action */
569 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
570 tot_out = 0;
571 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
572 if ((chk->whoTo == lnet) &&
573 (chk->sent < SCTP_DATAGRAM_RESEND)) {
574 tot_out += chk->book_size;
577 if (lnet->flight_size != tot_out) {
578 kprintf("net:%x flight was %d corrected to %d\n",
579 (uint32_t)lnet, lnet->flight_size, tot_out);
580 lnet->flight_size = tot_out;
586 if (rep) {
587 sctp_print_audit_report();
591 void
592 sctp_audit_log(u_int8_t ev, u_int8_t fd)
594 crit_enter();
595 sctp_audit_data[sctp_audit_indx][0] = ev;
596 sctp_audit_data[sctp_audit_indx][1] = fd;
597 sctp_audit_indx++;
598 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
599 sctp_audit_indx = 0;
601 crit_exit();
604 #endif
607 * a list of sizes based on typical mtu's, used only if next hop
608 * size not returned.
610 static int sctp_mtu_sizes[] = {
612 296,
613 508,
614 512,
615 544,
616 576,
617 1006,
618 1492,
619 1500,
620 1536,
621 2002,
622 2048,
623 4352,
624 4464,
625 8166,
626 17914,
627 32000,
628 65535
632 find_next_best_mtu(int totsz)
634 int i, perfer;
636 * if we are in here we must find the next best fit based on the
637 * size of the dg that failed to be sent.
639 perfer = 0;
640 for (i = 0; i < NUMBER_OF_MTU_SIZES; i++) {
641 if (totsz < sctp_mtu_sizes[i]) {
642 perfer = i - 1;
643 if (perfer < 0)
644 perfer = 0;
645 break;
648 return (sctp_mtu_sizes[perfer]);
651 void
652 sctp_fill_random_store(struct sctp_pcb *m)
655 * Here we use the MD5/SHA-1 to hash with our good randomNumbers
656 * and our counter. The result becomes our good random numbers and
657 * we then setup to give these out. Note that we do no lockig
658 * to protect this. This is ok, since if competing folks call
659 * this we will get more gobbled gook in the random store whic
660 * is what we want. There is a danger that two guys will use
661 * the same random numbers, but thats ok too since that
662 * is random as well :->
664 m->store_at = 0;
665 sctp_hash_digest((char *)m->random_numbers, sizeof(m->random_numbers),
666 (char *)&m->random_counter, sizeof(m->random_counter),
667 (char *)m->random_store);
668 m->random_counter++;
671 uint32_t
672 sctp_select_initial_TSN(struct sctp_pcb *m)
675 * A true implementation should use random selection process to
676 * get the initial stream sequence number, using RFC1750 as a
677 * good guideline
679 u_long x, *xp;
680 uint8_t *p;
682 if (m->initial_sequence_debug != 0) {
683 u_int32_t ret;
684 ret = m->initial_sequence_debug;
685 m->initial_sequence_debug++;
686 return (ret);
688 if ((m->store_at+sizeof(u_long)) > SCTP_SIGNATURE_SIZE) {
689 /* Refill the random store */
690 sctp_fill_random_store(m);
692 p = &m->random_store[(int)m->store_at];
693 xp = (u_long *)p;
694 x = *xp;
695 m->store_at += sizeof(u_long);
696 return (x);
699 u_int32_t
700 sctp_select_a_tag(struct sctp_inpcb *m)
702 u_long x, not_done;
703 struct timeval now;
705 SCTP_GETTIME_TIMEVAL(&now);
706 not_done = 1;
707 while (not_done) {
708 x = sctp_select_initial_TSN(&m->sctp_ep);
709 if (x == 0) {
710 /* we never use 0 */
711 continue;
713 if (sctp_is_vtag_good(m, x, &now)) {
714 not_done = 0;
717 return (x);
722 sctp_init_asoc(struct sctp_inpcb *m, struct sctp_association *asoc,
723 int for_a_init, uint32_t override_tag )
726 * Anything set to zero is taken care of by the allocation
727 * routine's bzero
731 * Up front select what scoping to apply on addresses I tell my peer
732 * Not sure what to do with these right now, we will need to come up
733 * with a way to set them. We may need to pass them through from the
734 * caller in the sctp_aloc_assoc() function.
736 int i;
737 /* init all variables to a known value.*/
738 asoc->state = SCTP_STATE_INUSE;
739 asoc->max_burst = m->sctp_ep.max_burst;
740 asoc->heart_beat_delay = m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT];
741 asoc->cookie_life = m->sctp_ep.def_cookie_life;
743 if (override_tag) {
744 asoc->my_vtag = override_tag;
745 } else {
746 asoc->my_vtag = sctp_select_a_tag(m);
748 asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
749 sctp_select_initial_TSN(&m->sctp_ep);
750 asoc->t3timeout_highest_marked = asoc->asconf_seq_out;
751 /* we are opptimisitic here */
752 asoc->peer_supports_asconf = 1;
753 asoc->peer_supports_asconf_setprim = 1;
754 asoc->peer_supports_pktdrop = 1;
756 asoc->sent_queue_retran_cnt = 0;
757 /* This will need to be adjusted */
758 asoc->last_cwr_tsn = asoc->init_seq_number - 1;
759 asoc->last_acked_seq = asoc->init_seq_number - 1;
760 asoc->advanced_peer_ack_point = asoc->last_acked_seq;
761 asoc->asconf_seq_in = asoc->last_acked_seq;
763 /* here we are different, we hold the next one we expect */
764 asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
766 asoc->initial_init_rto_max = m->sctp_ep.initial_init_rto_max;
767 asoc->initial_rto = m->sctp_ep.initial_rto;
769 asoc->max_init_times = m->sctp_ep.max_init_times;
770 asoc->max_send_times = m->sctp_ep.max_send_times;
771 asoc->def_net_failure = m->sctp_ep.def_net_failure;
773 /* ECN Nonce initialization */
774 asoc->ecn_nonce_allowed = 0;
775 asoc->receiver_nonce_sum = 1;
776 asoc->nonce_sum_expect_base = 1;
777 asoc->nonce_sum_check = 1;
778 asoc->nonce_resync_tsn = 0;
779 asoc->nonce_wait_for_ecne = 0;
780 asoc->nonce_wait_tsn = 0;
782 if (m->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
783 struct in6pcb *inp6;
786 /* Its a V6 socket */
787 inp6 = (struct in6pcb *)m;
788 asoc->ipv6_addr_legal = 1;
789 /* Now look at the binding flag to see if V4 will be legal */
790 if (
791 #if defined(__OpenBSD__)
792 (0) /* we always do dual bind */
793 #elif defined (__NetBSD__)
794 (inp6->in6p_flags & IN6P_IPV6_V6ONLY)
795 #else
796 (inp6->inp_flags & IN6P_IPV6_V6ONLY)
797 #endif
798 == 0) {
799 asoc->ipv4_addr_legal = 1;
800 } else {
801 /* V4 addresses are NOT legal on the association */
802 asoc->ipv4_addr_legal = 0;
804 } else {
805 /* Its a V4 socket, no - V6 */
806 asoc->ipv4_addr_legal = 1;
807 asoc->ipv6_addr_legal = 0;
811 asoc->my_rwnd = max(m->sctp_socket->so_rcv.ssb_hiwat, SCTP_MINIMAL_RWND);
812 asoc->peers_rwnd = m->sctp_socket->so_rcv.ssb_hiwat;
814 asoc->smallest_mtu = m->sctp_frag_point;
815 asoc->minrto = m->sctp_ep.sctp_minrto;
816 asoc->maxrto = m->sctp_ep.sctp_maxrto;
818 LIST_INIT(&asoc->sctp_local_addr_list);
819 TAILQ_INIT(&asoc->nets);
820 TAILQ_INIT(&asoc->pending_reply_queue);
821 asoc->last_asconf_ack_sent = NULL;
822 /* Setup to fill the hb random cache at first HB */
823 asoc->hb_random_idx = 4;
825 asoc->sctp_autoclose_ticks = m->sctp_ep.auto_close_time;
828 * Now the stream parameters, here we allocate space for all
829 * streams that we request by default.
831 asoc->streamoutcnt = asoc->pre_open_streams =
832 m->sctp_ep.pre_open_stream_count;
833 asoc->strmout = kmalloc(asoc->streamoutcnt * sizeof(struct sctp_stream_out),
834 M_PCB, M_NOWAIT);
835 if (asoc->strmout == NULL) {
836 /* big trouble no memory */
837 return (ENOMEM);
839 for (i = 0; i < asoc->streamoutcnt; i++) {
841 * inbound side must be set to 0xffff,
842 * also NOTE when we get the INIT-ACK back (for INIT sender)
843 * we MUST reduce the count (streamoutcnt) but first check
844 * if we sent to any of the upper streams that were dropped
845 * (if some were). Those that were dropped must be notified
846 * to the upper layer as failed to send.
848 asoc->strmout[i].next_sequence_sent = 0x0;
849 TAILQ_INIT(&asoc->strmout[i].outqueue);
850 asoc->strmout[i].stream_no = i;
851 asoc->strmout[i].next_spoke.tqe_next = 0;
852 asoc->strmout[i].next_spoke.tqe_prev = 0;
854 /* Now the mapping array */
855 asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
856 #ifdef __NetBSD__
857 asoc->mapping_array = kmalloc(SCTP_INITIAL_MAPPING_ARRAY, M_PCB,
858 M_NOWAIT);
859 #else
860 asoc->mapping_array = kmalloc(asoc->mapping_array_size, M_PCB,
861 M_NOWAIT);
862 #endif
863 if (asoc->mapping_array == NULL) {
864 kfree(asoc->strmout, M_PCB);
865 return (ENOMEM);
867 memset(asoc->mapping_array, 0, asoc->mapping_array_size);
868 /* Now the init of the other outqueues */
869 TAILQ_INIT(&asoc->out_wheel);
870 TAILQ_INIT(&asoc->control_send_queue);
871 TAILQ_INIT(&asoc->send_queue);
872 TAILQ_INIT(&asoc->sent_queue);
873 TAILQ_INIT(&asoc->reasmqueue);
874 TAILQ_INIT(&asoc->delivery_queue);
875 asoc->max_inbound_streams = m->sctp_ep.max_open_streams_intome;
877 TAILQ_INIT(&asoc->asconf_queue);
878 return (0);
882 sctp_expand_mapping_array(struct sctp_association *asoc)
884 /* mapping array needs to grow */
885 u_int8_t *new_array;
886 uint16_t new_size;
888 new_size = asoc->mapping_array_size + SCTP_MAPPING_ARRAY_INCR;
889 #ifdef __NetBSD__
890 new_array = kmalloc(asoc->mapping_array_size + SCTP_MAPPING_ARRAY_INCR,
891 M_PCB, M_NOWAIT);
892 #else
893 new_array = kmalloc(new_size, M_PCB, M_NOWAIT);
894 #endif
895 if (new_array == NULL) {
896 /* can't get more, forget it */
897 kprintf("No memory for expansion of SCTP mapping array %d\n",
898 new_size);
899 return (-1);
901 memset(new_array, 0, new_size);
902 memcpy(new_array, asoc->mapping_array, asoc->mapping_array_size);
903 kfree(asoc->mapping_array, M_PCB);
904 asoc->mapping_array = new_array;
905 asoc->mapping_array_size = new_size;
906 return (0);
909 static void
910 sctp_timeout_handler(void *t)
912 struct sctp_inpcb *inp;
913 struct sctp_tcb *stcb;
914 struct sctp_nets *net;
915 struct sctp_timer *tmr;
916 int did_output, typ;
917 #if defined(__APPLE__)
918 boolean_t funnel_state;
920 /* get BSD kernel funnel/mutex */
921 funnel_state = thread_funnel_set(network_flock, TRUE);
922 #endif
924 crit_enter();
925 tmr = (struct sctp_timer *)t;
926 inp = (struct sctp_inpcb *)tmr->ep;
927 stcb = (struct sctp_tcb *)tmr->tcb;
928 net = (struct sctp_nets *)tmr->net;
929 did_output = 1;
932 #ifdef SCTP_AUDITING_ENABLED
933 sctp_audit_log(0xF0, (u_int8_t)tmr->type);
934 sctp_auditing(3, inp, stcb, net);
935 #endif
936 sctp_pegs[SCTP_TIMERS_EXP]++;
938 if (inp == NULL) {
939 crit_exit();
940 return;
943 SCTP_INP_WLOCK(inp);
944 if (inp->sctp_socket == 0) {
945 crit_exit();
946 #if defined(__APPLE__)
947 /* release BSD kernel funnel/mutex */
948 thread_funnel_set(network_flock, FALSE);
949 #endif
950 SCTP_INP_WUNLOCK(inp);
951 return;
953 if (stcb) {
954 if (stcb->asoc.state == 0) {
955 crit_exit();
956 #if defined(__APPLE__)
957 /* release BSD kernel funnel/mutex */
958 thread_funnel_set(network_flock, FALSE);
959 #endif
960 SCTP_INP_WUNLOCK(inp);
961 return;
964 #ifdef SCTP_DEBUG
965 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
966 kprintf("Timer type %d goes off\n", tmr->type);
968 #endif /* SCTP_DEBUG */
969 #ifndef __NetBSD__
970 if (!callout_active(&tmr->timer)) {
971 crit_exit();
972 #if defined(__APPLE__)
973 /* release BSD kernel funnel/mutex */
974 thread_funnel_set(network_flock, FALSE);
975 #endif
976 SCTP_INP_WUNLOCK(inp);
977 return;
979 #endif
980 #if defined(__APPLE__)
981 /* clear the callout pending status here */
982 callout_stop(&tmr->timer);
983 #endif
984 if (stcb) {
985 SCTP_TCB_LOCK(stcb);
987 SCTP_INP_INCR_REF(inp);
988 SCTP_INP_WUNLOCK(inp);
990 typ = tmr->type;
991 switch (tmr->type) {
992 case SCTP_TIMER_TYPE_ITERATOR:
994 struct sctp_iterator *it;
995 it = (struct sctp_iterator *)inp;
996 sctp_iterator_timer(it);
998 break;
999 /* call the handler for the appropriate timer type */
1000 case SCTP_TIMER_TYPE_SEND:
1001 sctp_pegs[SCTP_TMIT_TIMER]++;
1002 stcb->asoc.num_send_timers_up--;
1003 if (stcb->asoc.num_send_timers_up < 0) {
1004 stcb->asoc.num_send_timers_up = 0;
1006 if (sctp_t3rxt_timer(inp, stcb, net)) {
1007 /* no need to unlock on tcb its gone */
1009 goto out_decr;
1011 #ifdef SCTP_AUDITING_ENABLED
1012 sctp_auditing(4, inp, stcb, net);
1013 #endif
1014 sctp_chunk_output(inp, stcb, 1);
1015 if ((stcb->asoc.num_send_timers_up == 0) &&
1016 (stcb->asoc.sent_queue_cnt > 0)
1018 struct sctp_tmit_chunk *chk;
1020 * safeguard. If there on some on the sent queue
1021 * somewhere but no timers running something is
1022 * wrong... so we start a timer on the first chunk
1023 * on the send queue on whatever net it is sent to.
1025 sctp_pegs[SCTP_T3_SAFEGRD]++;
1026 chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1027 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1028 chk->whoTo);
1030 break;
1031 case SCTP_TIMER_TYPE_INIT:
1032 if (sctp_t1init_timer(inp, stcb, net)) {
1033 /* no need to unlock on tcb its gone */
1034 goto out_decr;
1036 /* We do output but not here */
1037 did_output = 0;
1038 break;
1039 case SCTP_TIMER_TYPE_RECV:
1040 sctp_pegs[SCTP_RECV_TIMER]++;
1041 sctp_send_sack(stcb);
1042 #ifdef SCTP_AUDITING_ENABLED
1043 sctp_auditing(4, inp, stcb, net);
1044 #endif
1045 sctp_chunk_output(inp, stcb, 4);
1046 break;
1047 case SCTP_TIMER_TYPE_SHUTDOWN:
1048 if (sctp_shutdown_timer(inp, stcb, net) ) {
1049 /* no need to unlock on tcb its gone */
1050 goto out_decr;
1052 #ifdef SCTP_AUDITING_ENABLED
1053 sctp_auditing(4, inp, stcb, net);
1054 #endif
1055 sctp_chunk_output(inp, stcb, 5);
1056 break;
1057 case SCTP_TIMER_TYPE_HEARTBEAT:
1058 if (sctp_heartbeat_timer(inp, stcb, net)) {
1059 /* no need to unlock on tcb its gone */
1060 goto out_decr;
1062 #ifdef SCTP_AUDITING_ENABLED
1063 sctp_auditing(4, inp, stcb, net);
1064 #endif
1065 sctp_chunk_output(inp, stcb, 6);
1066 break;
1067 case SCTP_TIMER_TYPE_COOKIE:
1068 if (sctp_cookie_timer(inp, stcb, net)) {
1069 /* no need to unlock on tcb its gone */
1070 goto out_decr;
1072 #ifdef SCTP_AUDITING_ENABLED
1073 sctp_auditing(4, inp, stcb, net);
1074 #endif
1075 sctp_chunk_output(inp, stcb, 1);
1076 break;
1077 case SCTP_TIMER_TYPE_NEWCOOKIE:
1079 struct timeval tv;
1080 int i, secret;
1081 SCTP_GETTIME_TIMEVAL(&tv);
1082 SCTP_INP_WLOCK(inp);
1083 inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1084 inp->sctp_ep.last_secret_number =
1085 inp->sctp_ep.current_secret_number;
1086 inp->sctp_ep.current_secret_number++;
1087 if (inp->sctp_ep.current_secret_number >=
1088 SCTP_HOW_MANY_SECRETS) {
1089 inp->sctp_ep.current_secret_number = 0;
1091 secret = (int)inp->sctp_ep.current_secret_number;
1092 for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1093 inp->sctp_ep.secret_key[secret][i] =
1094 sctp_select_initial_TSN(&inp->sctp_ep);
1096 SCTP_INP_WUNLOCK(inp);
1097 sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1099 did_output = 0;
1100 break;
1101 case SCTP_TIMER_TYPE_PATHMTURAISE:
1102 sctp_pathmtu_timer(inp, stcb, net);
1103 did_output = 0;
1104 break;
1105 case SCTP_TIMER_TYPE_SHUTDOWNACK:
1106 if (sctp_shutdownack_timer(inp, stcb, net)) {
1107 /* no need to unlock on tcb its gone */
1108 goto out_decr;
1110 #ifdef SCTP_AUDITING_ENABLED
1111 sctp_auditing(4, inp, stcb, net);
1112 #endif
1113 sctp_chunk_output(inp, stcb, 7);
1114 break;
1115 case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1116 sctp_abort_an_association(inp, stcb,
1117 SCTP_SHUTDOWN_GUARD_EXPIRES, NULL);
1118 /* no need to unlock on tcb its gone */
1119 goto out_decr;
1120 break;
1122 case SCTP_TIMER_TYPE_STRRESET:
1123 if (sctp_strreset_timer(inp, stcb, net)) {
1124 /* no need to unlock on tcb its gone */
1125 goto out_decr;
1127 sctp_chunk_output(inp, stcb, 9);
1128 break;
1130 case SCTP_TIMER_TYPE_ASCONF:
1131 if (sctp_asconf_timer(inp, stcb, net)) {
1132 /* no need to unlock on tcb its gone */
1133 goto out_decr;
1135 #ifdef SCTP_AUDITING_ENABLED
1136 sctp_auditing(4, inp, stcb, net);
1137 #endif
1138 sctp_chunk_output(inp, stcb, 8);
1139 break;
1141 case SCTP_TIMER_TYPE_AUTOCLOSE:
1142 sctp_autoclose_timer(inp, stcb, net);
1143 sctp_chunk_output(inp, stcb, 10);
1144 did_output = 0;
1145 break;
1146 case SCTP_TIMER_TYPE_INPKILL:
1147 /* special case, take away our
1148 * increment since WE are the killer
1150 SCTP_INP_WLOCK(inp);
1151 SCTP_INP_DECR_REF(inp);
1152 SCTP_INP_WUNLOCK(inp);
1153 sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL);
1154 sctp_inpcb_free(inp, 1);
1155 goto out_no_decr;
1156 break;
1157 default:
1158 #ifdef SCTP_DEBUG
1159 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1160 kprintf("sctp_timeout_handler:unknown timer %d\n",
1161 tmr->type);
1163 #endif /* SCTP_DEBUG */
1164 break;
1166 #ifdef SCTP_AUDITING_ENABLED
1167 sctp_audit_log(0xF1, (u_int8_t)tmr->type);
1168 sctp_auditing(5, inp, stcb, net);
1169 #endif
1170 if (did_output) {
1172 * Now we need to clean up the control chunk chain if an
1173 * ECNE is on it. It must be marked as UNSENT again so next
1174 * call will continue to send it until such time that we get
1175 * a CWR, to remove it. It is, however, less likely that we
1176 * will find a ecn echo on the chain though.
1178 sctp_fix_ecn_echo(&stcb->asoc);
1180 if (stcb) {
1181 SCTP_TCB_UNLOCK(stcb);
1183 out_decr:
1184 SCTP_INP_WLOCK(inp);
1185 SCTP_INP_DECR_REF(inp);
1186 SCTP_INP_WUNLOCK(inp);
1188 out_no_decr:
1190 #ifdef SCTP_DEBUG
1191 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1192 kprintf("Timer now complete (type %d)\n", typ);
1194 #endif /* SCTP_DEBUG */
1196 crit_exit();
1197 #if defined(__APPLE__)
1198 /* release BSD kernel funnel/mutex */
1199 thread_funnel_set(network_flock, FALSE);
1200 #endif
1204 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1205 struct sctp_nets *net)
1207 int to_ticks;
1208 struct sctp_timer *tmr;
1210 if (inp == NULL)
1211 return (EFAULT);
1213 to_ticks = 0;
1215 tmr = NULL;
1216 switch (t_type) {
1217 case SCTP_TIMER_TYPE_ITERATOR:
1219 struct sctp_iterator *it;
1220 it = (struct sctp_iterator *)inp;
1221 tmr = &it->tmr;
1222 to_ticks = SCTP_ITERATOR_TICKS;
1224 break;
1225 case SCTP_TIMER_TYPE_SEND:
1226 /* Here we use the RTO timer */
1228 int rto_val;
1229 if ((stcb == NULL) || (net == NULL)) {
1230 return (EFAULT);
1232 tmr = &net->rxt_timer;
1233 if (net->RTO == 0) {
1234 rto_val = stcb->asoc.initial_rto;
1235 } else {
1236 rto_val = net->RTO;
1238 to_ticks = MSEC_TO_TICKS(rto_val);
1240 break;
1241 case SCTP_TIMER_TYPE_INIT:
1243 * Here we use the INIT timer default
1244 * usually about 1 minute.
1246 if ((stcb == NULL) || (net == NULL)) {
1247 return (EFAULT);
1249 tmr = &net->rxt_timer;
1250 if (net->RTO == 0) {
1251 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1252 } else {
1253 to_ticks = MSEC_TO_TICKS(net->RTO);
1255 break;
1256 case SCTP_TIMER_TYPE_RECV:
1258 * Here we use the Delayed-Ack timer value from the inp
1259 * ususually about 200ms.
1261 if (stcb == NULL) {
1262 return (EFAULT);
1264 tmr = &stcb->asoc.dack_timer;
1265 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV];
1266 break;
1267 case SCTP_TIMER_TYPE_SHUTDOWN:
1268 /* Here we use the RTO of the destination. */
1269 if ((stcb == NULL) || (net == NULL)) {
1270 return (EFAULT);
1273 if (net->RTO == 0) {
1274 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1275 } else {
1276 to_ticks = MSEC_TO_TICKS(net->RTO);
1278 tmr = &net->rxt_timer;
1279 break;
1280 case SCTP_TIMER_TYPE_HEARTBEAT:
1282 * the net is used here so that we can add in the RTO.
1283 * Even though we use a different timer. We also add the
1284 * HB timer PLUS a random jitter.
1286 if (stcb == NULL) {
1287 return (EFAULT);
1290 uint32_t rndval;
1291 uint8_t this_random;
1292 int cnt_of_unconf=0;
1293 struct sctp_nets *lnet;
1295 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
1296 if (lnet->dest_state & SCTP_ADDR_UNCONFIRMED) {
1297 cnt_of_unconf++;
1300 #ifdef SCTP_DEBUG
1301 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1302 kprintf("HB timer to start unconfirmed:%d hb_delay:%d\n",
1303 cnt_of_unconf, stcb->asoc.heart_beat_delay);
1305 #endif
1306 if (stcb->asoc.hb_random_idx > 3) {
1307 rndval = sctp_select_initial_TSN(&inp->sctp_ep);
1308 memcpy(stcb->asoc.hb_random_values, &rndval,
1309 sizeof(stcb->asoc.hb_random_values));
1310 this_random = stcb->asoc.hb_random_values[0];
1311 stcb->asoc.hb_random_idx = 0;
1312 stcb->asoc.hb_ect_randombit = 0;
1313 } else {
1314 this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
1315 stcb->asoc.hb_random_idx++;
1316 stcb->asoc.hb_ect_randombit = 0;
1319 * this_random will be 0 - 256 ms
1320 * RTO is in ms.
1322 if ((stcb->asoc.heart_beat_delay == 0) &&
1323 (cnt_of_unconf == 0)) {
1324 /* no HB on this inp after confirmations */
1325 return (0);
1327 if (net) {
1328 struct sctp_nets *lnet;
1329 int delay;
1330 delay = stcb->asoc.heart_beat_delay;
1331 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
1332 if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1333 ((lnet->dest_state & SCTP_ADDR_OUT_OF_SCOPE) == 0) &&
1334 (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
1335 delay = 0;
1338 if (net->RTO == 0) {
1339 /* Never been checked */
1340 to_ticks = this_random + stcb->asoc.initial_rto + delay;
1341 } else {
1342 /* set rto_val to the ms */
1343 to_ticks = delay + net->RTO + this_random;
1345 } else {
1346 if (cnt_of_unconf) {
1347 to_ticks = this_random + stcb->asoc.initial_rto;
1348 } else {
1349 to_ticks = stcb->asoc.heart_beat_delay + this_random + stcb->asoc.initial_rto;
1353 * Now we must convert the to_ticks that are now in
1354 * ms to ticks.
1356 to_ticks *= hz;
1357 to_ticks /= 1000;
1358 #ifdef SCTP_DEBUG
1359 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1360 kprintf("Timer to expire in %d ticks\n", to_ticks);
1362 #endif
1363 tmr = &stcb->asoc.hb_timer;
1365 break;
1366 case SCTP_TIMER_TYPE_COOKIE:
1368 * Here we can use the RTO timer from the network since
1369 * one RTT was compelete. If a retran happened then we will
1370 * be using the RTO initial value.
1372 if ((stcb == NULL) || (net == NULL)) {
1373 return (EFAULT);
1375 if (net->RTO == 0) {
1376 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1377 } else {
1378 to_ticks = MSEC_TO_TICKS(net->RTO);
1380 tmr = &net->rxt_timer;
1381 break;
1382 case SCTP_TIMER_TYPE_NEWCOOKIE:
1384 * nothing needed but the endpoint here
1385 * ususually about 60 minutes.
1387 tmr = &inp->sctp_ep.signature_change;
1388 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
1389 break;
1390 case SCTP_TIMER_TYPE_INPKILL:
1392 * The inp is setup to die. We re-use the
1393 * signature_chage timer since that has
1394 * stopped and we are in the GONE state.
1396 tmr = &inp->sctp_ep.signature_change;
1397 to_ticks = (SCTP_INP_KILL_TIMEOUT * hz) / 1000;
1398 break;
1399 case SCTP_TIMER_TYPE_PATHMTURAISE:
1401 * Here we use the value found in the EP for PMTU
1402 * ususually about 10 minutes.
1404 if (stcb == NULL) {
1405 return (EFAULT);
1407 if (net == NULL) {
1408 return (EFAULT);
1410 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
1411 tmr = &net->pmtu_timer;
1412 break;
1413 case SCTP_TIMER_TYPE_SHUTDOWNACK:
1414 /* Here we use the RTO of the destination */
1415 if ((stcb == NULL) || (net == NULL)) {
1416 return (EFAULT);
1418 if (net->RTO == 0) {
1419 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1420 } else {
1421 to_ticks = MSEC_TO_TICKS(net->RTO);
1423 tmr = &net->rxt_timer;
1424 break;
1425 case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1427 * Here we use the endpoints shutdown guard timer
1428 * usually about 3 minutes.
1430 if (stcb == NULL) {
1431 return (EFAULT);
1433 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
1434 tmr = &stcb->asoc.shut_guard_timer;
1435 break;
1436 case SCTP_TIMER_TYPE_STRRESET:
1438 * Here the timer comes from the inp
1439 * but its value is from the RTO.
1441 if ((stcb == NULL) || (net == NULL)) {
1442 return (EFAULT);
1444 if (net->RTO == 0) {
1445 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1446 } else {
1447 to_ticks = MSEC_TO_TICKS(net->RTO);
1449 tmr = &stcb->asoc.strreset_timer;
1450 break;
1452 case SCTP_TIMER_TYPE_ASCONF:
1454 * Here the timer comes from the inp
1455 * but its value is from the RTO.
1457 if ((stcb == NULL) || (net == NULL)) {
1458 return (EFAULT);
1460 if (net->RTO == 0) {
1461 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1462 } else {
1463 to_ticks = MSEC_TO_TICKS(net->RTO);
1465 tmr = &stcb->asoc.asconf_timer;
1466 break;
1467 case SCTP_TIMER_TYPE_AUTOCLOSE:
1468 if (stcb == NULL) {
1469 return (EFAULT);
1471 if (stcb->asoc.sctp_autoclose_ticks == 0) {
1472 /* Really an error since stcb is NOT set to autoclose */
1473 return (0);
1475 to_ticks = stcb->asoc.sctp_autoclose_ticks;
1476 tmr = &stcb->asoc.autoclose_timer;
1477 break;
1478 default:
1479 #ifdef SCTP_DEBUG
1480 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1481 kprintf("sctp_timer_start:Unknown timer type %d\n",
1482 t_type);
1484 #endif /* SCTP_DEBUG */
1485 return (EFAULT);
1486 break;
1488 if ((to_ticks <= 0) || (tmr == NULL)) {
1489 #ifdef SCTP_DEBUG
1490 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1491 kprintf("sctp_timer_start:%d:software error to_ticks:%d tmr:%p not set ??\n",
1492 t_type, to_ticks, tmr);
1494 #endif /* SCTP_DEBUG */
1495 return (EFAULT);
1497 if (callout_pending(&tmr->timer)) {
1499 * we do NOT allow you to have it already running.
1500 * if it is we leave the current one up unchanged
1502 return (EALREADY);
1504 /* At this point we can proceed */
1505 if (t_type == SCTP_TIMER_TYPE_SEND) {
1506 stcb->asoc.num_send_timers_up++;
1508 tmr->type = t_type;
1509 tmr->ep = (void *)inp;
1510 tmr->tcb = (void *)stcb;
1511 tmr->net = (void *)net;
1512 callout_reset(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
1513 return (0);
1517 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1518 struct sctp_nets *net)
1520 struct sctp_timer *tmr;
1522 if (inp == NULL)
1523 return (EFAULT);
1525 tmr = NULL;
1526 switch (t_type) {
1527 case SCTP_TIMER_TYPE_ITERATOR:
1529 struct sctp_iterator *it;
1530 it = (struct sctp_iterator *)inp;
1531 tmr = &it->tmr;
1533 break;
1534 case SCTP_TIMER_TYPE_SEND:
1535 if ((stcb == NULL) || (net == NULL)) {
1536 return (EFAULT);
1538 tmr = &net->rxt_timer;
1539 break;
1540 case SCTP_TIMER_TYPE_INIT:
1541 if ((stcb == NULL) || (net == NULL)) {
1542 return (EFAULT);
1544 tmr = &net->rxt_timer;
1545 break;
1546 case SCTP_TIMER_TYPE_RECV:
1547 if (stcb == NULL) {
1548 return (EFAULT);
1550 tmr = &stcb->asoc.dack_timer;
1551 break;
1552 case SCTP_TIMER_TYPE_SHUTDOWN:
1553 if ((stcb == NULL) || (net == NULL)) {
1554 return (EFAULT);
1556 tmr = &net->rxt_timer;
1557 break;
1558 case SCTP_TIMER_TYPE_HEARTBEAT:
1559 if (stcb == NULL) {
1560 return (EFAULT);
1562 tmr = &stcb->asoc.hb_timer;
1563 break;
1564 case SCTP_TIMER_TYPE_COOKIE:
1565 if ((stcb == NULL) || (net == NULL)) {
1566 return (EFAULT);
1568 tmr = &net->rxt_timer;
1569 break;
1570 case SCTP_TIMER_TYPE_NEWCOOKIE:
1571 /* nothing needed but the endpoint here */
1572 tmr = &inp->sctp_ep.signature_change;
1573 /* We re-use the newcookie timer for
1574 * the INP kill timer. We must assure
1575 * that we do not kill it by accident.
1577 break;
1578 case SCTP_TIMER_TYPE_INPKILL:
1580 * The inp is setup to die. We re-use the
1581 * signature_chage timer since that has
1582 * stopped and we are in the GONE state.
1584 tmr = &inp->sctp_ep.signature_change;
1585 break;
1586 case SCTP_TIMER_TYPE_PATHMTURAISE:
1587 if (stcb == NULL) {
1588 return (EFAULT);
1590 if (net == NULL) {
1591 return (EFAULT);
1593 tmr = &net->pmtu_timer;
1594 break;
1595 case SCTP_TIMER_TYPE_SHUTDOWNACK:
1596 if ((stcb == NULL) || (net == NULL)) {
1597 return (EFAULT);
1599 tmr = &net->rxt_timer;
1600 break;
1601 case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1602 if (stcb == NULL) {
1603 return (EFAULT);
1605 tmr = &stcb->asoc.shut_guard_timer;
1606 break;
1607 case SCTP_TIMER_TYPE_STRRESET:
1608 if (stcb == NULL) {
1609 return (EFAULT);
1611 tmr = &stcb->asoc.strreset_timer;
1612 break;
1613 case SCTP_TIMER_TYPE_ASCONF:
1614 if (stcb == NULL) {
1615 return (EFAULT);
1617 tmr = &stcb->asoc.asconf_timer;
1618 break;
1619 case SCTP_TIMER_TYPE_AUTOCLOSE:
1620 if (stcb == NULL) {
1621 return (EFAULT);
1623 tmr = &stcb->asoc.autoclose_timer;
1624 break;
1625 default:
1626 #ifdef SCTP_DEBUG
1627 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1628 kprintf("sctp_timer_stop:Unknown timer type %d\n",
1629 t_type);
1631 #endif /* SCTP_DEBUG */
1632 break;
1634 if (tmr == NULL)
1635 return (EFAULT);
1637 if ((tmr->type != t_type) && tmr->type) {
1639 * Ok we have a timer that is under joint use. Cookie timer
1640 * per chance with the SEND timer. We therefore are NOT
1641 * running the timer that the caller wants stopped. So just
1642 * return.
1644 return (0);
1646 if (t_type == SCTP_TIMER_TYPE_SEND) {
1647 stcb->asoc.num_send_timers_up--;
1648 if (stcb->asoc.num_send_timers_up < 0) {
1649 stcb->asoc.num_send_timers_up = 0;
1652 callout_stop(&tmr->timer);
1653 return (0);
1656 #ifdef SCTP_USE_ADLER32
1657 static uint32_t
1658 update_adler32(uint32_t adler, uint8_t *buf, int32_t len)
1660 u_int32_t s1 = adler & 0xffff;
1661 u_int32_t s2 = (adler >> 16) & 0xffff;
1662 int n;
1664 for (n = 0; n < len; n++, buf++) {
1665 /* s1 = (s1 + buf[n]) % BASE */
1666 /* first we add */
1667 s1 = (s1 + *buf);
1669 * now if we need to, we do a mod by subtracting. It seems
1670 * a bit faster since I really will only ever do one subtract
1671 * at the MOST, since buf[n] is a max of 255.
1673 if (s1 >= SCTP_ADLER32_BASE) {
1674 s1 -= SCTP_ADLER32_BASE;
1676 /* s2 = (s2 + s1) % BASE */
1677 /* first we add */
1678 s2 = (s2 + s1);
1680 * again, it is more efficent (it seems) to subtract since
1681 * the most s2 will ever be is (BASE-1 + BASE-1) in the worse
1682 * case. This would then be (2 * BASE) - 2, which will still
1683 * only do one subtract. On Intel this is much better to do
1684 * this way and avoid the divide. Have not -pg'd on sparc.
1686 if (s2 >= SCTP_ADLER32_BASE) {
1687 s2 -= SCTP_ADLER32_BASE;
1690 /* Return the adler32 of the bytes buf[0..len-1] */
1691 return ((s2 << 16) + s1);
1694 #endif
1697 u_int32_t
1698 sctp_calculate_len(struct mbuf *m)
1700 u_int32_t tlen=0;
1701 struct mbuf *at;
1702 at = m;
1703 while (at) {
1704 tlen += at->m_len;
1705 at = at->m_next;
1707 return (tlen);
1710 #if defined(SCTP_WITH_NO_CSUM)
1712 uint32_t
1713 sctp_calculate_sum(struct mbuf *m, int32_t *pktlen, uint32_t offset)
1716 * given a mbuf chain with a packetheader offset by 'offset'
1717 * pointing at a sctphdr (with csum set to 0) go through
1718 * the chain of m_next's and calculate the SCTP checksum.
1719 * This is currently Adler32 but will change to CRC32x
1720 * soon. Also has a side bonus calculate the total length
1721 * of the mbuf chain.
1722 * Note: if offset is greater than the total mbuf length,
1723 * checksum=1, pktlen=0 is returned (ie. no real error code)
1725 if (pktlen == NULL)
1726 return (0);
1727 *pktlen = sctp_calculate_len(m);
1728 return (0);
1731 #elif defined(SCTP_USE_INCHKSUM)
1733 #include <machine/in_cksum.h>
1735 uint32_t
1736 sctp_calculate_sum(struct mbuf *m, int32_t *pktlen, uint32_t offset)
1739 * given a mbuf chain with a packetheader offset by 'offset'
1740 * pointing at a sctphdr (with csum set to 0) go through
1741 * the chain of m_next's and calculate the SCTP checksum.
1742 * This is currently Adler32 but will change to CRC32x
1743 * soon. Also has a side bonus calculate the total length
1744 * of the mbuf chain.
1745 * Note: if offset is greater than the total mbuf length,
1746 * checksum=1, pktlen=0 is returned (ie. no real error code)
1748 int32_t tlen=0;
1749 struct mbuf *at;
1750 uint32_t the_sum, retsum;
1752 at = m;
1753 while (at) {
1754 tlen += at->m_len;
1755 at = at->m_next;
1757 the_sum = (uint32_t)(in_cksum_skip(m, tlen, offset));
1758 if (pktlen != NULL)
1759 *pktlen = (tlen-offset);
1760 retsum = htons(the_sum);
1761 return (the_sum);
1764 #else
1766 uint32_t
1767 sctp_calculate_sum(struct mbuf *m, int32_t *pktlen, uint32_t offset)
1770 * given a mbuf chain with a packetheader offset by 'offset'
1771 * pointing at a sctphdr (with csum set to 0) go through
1772 * the chain of m_next's and calculate the SCTP checksum.
1773 * This is currently Adler32 but will change to CRC32x
1774 * soon. Also has a side bonus calculate the total length
1775 * of the mbuf chain.
1776 * Note: if offset is greater than the total mbuf length,
1777 * checksum=1, pktlen=0 is returned (ie. no real error code)
1779 int32_t tlen=0;
1780 #ifdef SCTP_USE_ADLER32
1781 uint32_t base = 1L;
1782 #else
1783 uint32_t base = 0xffffffff;
1784 #endif /* SCTP_USE_ADLER32 */
1785 struct mbuf *at;
1786 at = m;
1787 /* find the correct mbuf and offset into mbuf */
1788 while ((at != NULL) && (offset > (uint32_t)at->m_len)) {
1789 offset -= at->m_len; /* update remaining offset left */
1790 at = at->m_next;
1793 while (at != NULL) {
1794 #ifdef SCTP_USE_ADLER32
1795 base = update_adler32(base, at->m_data + offset,
1796 at->m_len - offset);
1797 #else
1798 base = update_crc32(base, at->m_data + offset,
1799 at->m_len - offset);
1800 #endif /* SCTP_USE_ADLER32 */
1801 tlen += at->m_len - offset;
1802 /* we only offset once into the first mbuf */
1803 if (offset) {
1804 offset = 0;
1806 at = at->m_next;
1808 if (pktlen != NULL) {
1809 *pktlen = tlen;
1811 #ifdef SCTP_USE_ADLER32
1812 /* Adler32 */
1813 base = htonl(base);
1814 #else
1815 /* CRC-32c */
1816 base = sctp_csum_finalize(base);
1817 #endif
1818 return (base);
1822 #endif
1824 void
1825 sctp_mtu_size_reset(struct sctp_inpcb *inp,
1826 struct sctp_association *asoc, u_long mtu)
1829 * Reset the P-MTU size on this association, this involves changing
1830 * the asoc MTU, going through ANY chunk+overhead larger than mtu
1831 * to allow the DF flag to be cleared.
1833 struct sctp_tmit_chunk *chk;
1834 struct sctp_stream_out *strm;
1835 unsigned int eff_mtu, ovh;
1836 asoc->smallest_mtu = mtu;
1837 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1838 ovh = SCTP_MIN_OVERHEAD;
1839 } else {
1840 ovh = SCTP_MIN_V4_OVERHEAD;
1842 eff_mtu = mtu - ovh;
1843 /* Now mark any chunks that need to let IP fragment */
1844 TAILQ_FOREACH(strm, &asoc->out_wheel, next_spoke) {
1845 TAILQ_FOREACH(chk, &strm->outqueue, sctp_next) {
1846 if (chk->send_size > eff_mtu) {
1847 chk->flags &= SCTP_DONT_FRAGMENT;
1848 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
1852 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
1853 if (chk->send_size > eff_mtu) {
1854 chk->flags &= SCTP_DONT_FRAGMENT;
1855 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
1858 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
1859 if (chk->send_size > eff_mtu) {
1860 chk->flags &= SCTP_DONT_FRAGMENT;
1861 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
1868 * given an association and starting time of the current RTT period
1869 * return RTO in number of usecs
1870 * net should point to the current network
1872 u_int32_t
1873 sctp_calculate_rto(struct sctp_tcb *stcb,
1874 struct sctp_association *asoc,
1875 struct sctp_nets *net,
1876 struct timeval *old)
1879 * given an association and the starting time of the current RTT
1880 * period (in value1/value2) return RTO in number of usecs.
1882 int calc_time = 0;
1883 int o_calctime;
1884 unsigned int new_rto = 0;
1885 int first_measure = 0;
1886 struct timeval now;
1888 /************************/
1889 /* 1. calculate new RTT */
1890 /************************/
1891 /* get the current time */
1892 SCTP_GETTIME_TIMEVAL(&now);
1893 /* compute the RTT value */
1894 if ((u_long)now.tv_sec > (u_long)old->tv_sec) {
1895 calc_time = ((u_long)now.tv_sec - (u_long)old->tv_sec) * 1000;
1896 if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
1897 calc_time += (((u_long)now.tv_usec -
1898 (u_long)old->tv_usec)/1000);
1899 } else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
1900 /* Borrow 1,000ms from current calculation */
1901 calc_time -= 1000;
1902 /* Add in the slop over */
1903 calc_time += ((int)now.tv_usec/1000);
1904 /* Add in the pre-second ms's */
1905 calc_time += (((int)1000000 - (int)old->tv_usec)/1000);
1907 } else if ((u_long)now.tv_sec == (u_long)old->tv_sec) {
1908 if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
1909 calc_time = ((u_long)now.tv_usec -
1910 (u_long)old->tv_usec)/1000;
1911 } else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
1912 /* impossible .. garbage in nothing out */
1913 return (((net->lastsa >> 2) + net->lastsv) >> 1);
1914 } else {
1915 /* impossible .. garbage in nothing out */
1916 return (((net->lastsa >> 2) + net->lastsv) >> 1);
1918 } else {
1919 /* Clock wrapped? */
1920 return (((net->lastsa >> 2) + net->lastsv) >> 1);
1922 /***************************/
1923 /* 2. update RTTVAR & SRTT */
1924 /***************************/
1925 #if 0
1926 /* if (net->lastsv || net->lastsa) {*/
1927 /* per Section 5.3.1 C3 in SCTP */
1928 /* net->lastsv = (int) *//* RTTVAR */
1929 /* (((double)(1.0 - 0.25) * (double)net->lastsv) +
1930 (double)(0.25 * (double)abs(net->lastsa - calc_time)));
1931 net->lastsa = (int) */ /* SRTT */
1932 /*(((double)(1.0 - 0.125) * (double)net->lastsa) +
1933 (double)(0.125 * (double)calc_time));
1934 } else {
1935 *//* the first RTT calculation, per C2 Section 5.3.1 */
1936 /* net->lastsa = calc_time; *//* SRTT */
1937 /* net->lastsv = calc_time / 2; *//* RTTVAR */
1938 /* }*/
1939 /* if RTTVAR goes to 0 you set to clock grainularity */
1940 /* if (net->lastsv == 0) {
1941 net->lastsv = SCTP_CLOCK_GRANULARITY;
1943 new_rto = net->lastsa + 4 * net->lastsv;
1945 #endif
1946 o_calctime = calc_time;
1947 /* this is Van Jacobson's integer version */
1948 if (net->RTO) {
1949 calc_time -= (net->lastsa >> 3);
1950 net->lastsa += calc_time;
1951 if (calc_time < 0) {
1952 calc_time = -calc_time;
1954 calc_time -= (net->lastsv >> 2);
1955 net->lastsv += calc_time;
1956 if (net->lastsv == 0) {
1957 net->lastsv = SCTP_CLOCK_GRANULARITY;
1959 } else {
1960 /* First RTO measurment */
1961 net->lastsa = calc_time;
1962 net->lastsv = calc_time >> 1;
1963 first_measure = 1;
1965 new_rto = ((net->lastsa >> 2) + net->lastsv) >> 1;
1966 if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
1967 (stcb->asoc.sat_network_lockout == 0)) {
1968 stcb->asoc.sat_network = 1;
1969 } else if ((!first_measure) && stcb->asoc.sat_network) {
1970 stcb->asoc.sat_network = 0;
1971 stcb->asoc.sat_network_lockout = 1;
1973 /* bound it, per C6/C7 in Section 5.3.1 */
1974 if (new_rto < stcb->asoc.minrto) {
1975 new_rto = stcb->asoc.minrto;
1977 if (new_rto > stcb->asoc.maxrto) {
1978 new_rto = stcb->asoc.maxrto;
1980 /* we are now returning the RTT Smoothed */
1981 return ((u_int32_t)new_rto);
1986 * return a pointer to a contiguous piece of data from the given
1987 * mbuf chain starting at 'off' for 'len' bytes. If the desired
1988 * piece spans more than one mbuf, a copy is made at 'ptr'.
1989 * caller must ensure that the buffer size is >= 'len'
1990 * returns NULL if there there isn't 'len' bytes in the chain.
1992 caddr_t
1993 sctp_m_getptr(struct mbuf *m, int off, int len, u_int8_t *in_ptr)
1995 uint32_t count;
1996 uint8_t *ptr;
1997 ptr = in_ptr;
1998 if ((off < 0) || (len <= 0))
1999 return (NULL);
2001 /* find the desired start location */
2002 while ((m != NULL) && (off > 0)) {
2003 if (off < m->m_len)
2004 break;
2005 off -= m->m_len;
2006 m = m->m_next;
2008 if (m == NULL)
2009 return (NULL);
2011 /* is the current mbuf large enough (eg. contiguous)? */
2012 if ((m->m_len - off) >= len) {
2013 return (mtod(m, caddr_t) + off);
2014 } else {
2015 /* else, it spans more than one mbuf, so save a temp copy... */
2016 while ((m != NULL) && (len > 0)) {
2017 count = min(m->m_len - off, len);
2018 bcopy(mtod(m, caddr_t) + off, ptr, count);
2019 len -= count;
2020 ptr += count;
2021 off = 0;
2022 m = m->m_next;
2024 if ((m == NULL) && (len > 0))
2025 return (NULL);
2026 else
2027 return ((caddr_t)in_ptr);
2032 struct sctp_paramhdr *
2033 sctp_get_next_param(struct mbuf *m,
2034 int offset,
2035 struct sctp_paramhdr *pull,
2036 int pull_limit)
2038 /* This just provides a typed signature to Peter's Pull routine */
2039 return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2040 (u_int8_t *)pull));
2045 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2048 * add padlen bytes of 0 filled padding to the end of the mbuf.
2049 * If padlen is > 3 this routine will fail.
2051 u_int8_t *dp;
2052 int i;
2053 if (padlen > 3) {
2054 return (ENOBUFS);
2056 if (M_TRAILINGSPACE(m)) {
2058 * The easy way.
2059 * We hope the majority of the time we hit here :)
2061 dp = (u_int8_t *)(mtod(m, caddr_t) + m->m_len);
2062 m->m_len += padlen;
2063 } else {
2064 /* Hard way we must grow the mbuf */
2065 struct mbuf *tmp;
2066 MGET(tmp, MB_DONTWAIT, MT_DATA);
2067 if (tmp == NULL) {
2068 /* Out of space GAK! we are in big trouble. */
2069 return (ENOSPC);
2071 /* setup and insert in middle */
2072 tmp->m_next = m->m_next;
2073 tmp->m_len = padlen;
2074 m->m_next = tmp;
2075 dp = mtod(tmp, u_int8_t *);
2077 /* zero out the pad */
2078 for (i= 0; i < padlen; i++) {
2079 *dp = 0;
2080 dp++;
2082 return (0);
2086 sctp_pad_lastmbuf(struct mbuf *m, int padval)
2088 /* find the last mbuf in chain and pad it */
2089 struct mbuf *m_at;
2090 m_at = m;
2091 while (m_at) {
2092 if (m_at->m_next == NULL) {
2093 return (sctp_add_pad_tombuf(m_at, padval));
2095 m_at = m_at->m_next;
2097 return (EFAULT);
2100 static void
2101 sctp_notify_assoc_change(u_int32_t event, struct sctp_tcb *stcb,
2102 u_int32_t error)
2104 struct mbuf *m_notify;
2105 struct sctp_assoc_change *sac;
2106 struct sockaddr *to;
2107 struct sockaddr_in6 sin6, lsa6;
2110 * First if we are are going down dump everything we
2111 * can to the socket rcv queue.
2113 if ((event == SCTP_SHUTDOWN_COMP) || (event == SCTP_COMM_LOST)) {
2114 sctp_deliver_data(stcb, &stcb->asoc, NULL, 0);
2118 * For TCP model AND UDP connected sockets we will send
2119 * an error up when an ABORT comes in.
2121 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2122 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2123 (event == SCTP_COMM_LOST)) {
2124 stcb->sctp_socket->so_error = ECONNRESET;
2125 /* Wake ANY sleepers */
2126 sowwakeup(stcb->sctp_socket);
2127 sorwakeup(stcb->sctp_socket);
2129 #if 0
2130 if ((event == SCTP_COMM_UP) &&
2131 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
2132 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) {
2133 soisconnected(stcb->sctp_socket);
2135 #endif
2136 if (!(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2137 /* event not enabled */
2138 return;
2140 MGETHDR(m_notify, MB_DONTWAIT, MT_DATA);
2141 if (m_notify == NULL)
2142 /* no space left */
2143 return;
2144 m_notify->m_len = 0;
2146 sac = mtod(m_notify, struct sctp_assoc_change *);
2147 sac->sac_type = SCTP_ASSOC_CHANGE;
2148 sac->sac_flags = 0;
2149 sac->sac_length = sizeof(struct sctp_assoc_change);
2150 sac->sac_state = event;
2151 sac->sac_error = error;
2152 /* XXX verify these stream counts */
2153 sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2154 sac->sac_inbound_streams = stcb->asoc.streamincnt;
2155 sac->sac_assoc_id = sctp_get_associd(stcb);
2157 m_notify->m_flags |= M_EOR | M_NOTIFICATION;
2158 m_notify->m_pkthdr.len = sizeof(struct sctp_assoc_change);
2159 m_notify->m_pkthdr.rcvif = 0;
2160 m_notify->m_len = sizeof(struct sctp_assoc_change);
2161 m_notify->m_next = NULL;
2163 /* append to socket */
2164 to = (struct sockaddr *)&stcb->asoc.primary_destination->ro._l_addr;
2165 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
2166 to->sa_family == AF_INET) {
2167 struct sockaddr_in *sin;
2169 sin = (struct sockaddr_in *)to;
2170 bzero(&sin6, sizeof(sin6));
2171 sin6.sin6_family = AF_INET6;
2172 sin6.sin6_len = sizeof(struct sockaddr_in6);
2173 sin6.sin6_addr.s6_addr16[2] = 0xffff;
2174 bcopy(&sin->sin_addr, &sin6.sin6_addr.s6_addr16[3],
2175 sizeof(sin6.sin6_addr.s6_addr16[3]));
2176 sin6.sin6_port = sin->sin_port;
2177 to = (struct sockaddr *)&sin6;
2179 /* check and strip embedded scope junk */
2180 to = (struct sockaddr *)sctp_recover_scope((struct sockaddr_in6 *)to,
2181 &lsa6);
2183 * We need to always notify comm changes.
2184 * if (sctp_sbspace(&stcb->sctp_socket->so_rcv) < m_notify->m_len) {
2185 * sctp_m_freem(m_notify);
2186 * return;
2189 SCTP_TCB_UNLOCK(stcb);
2190 SCTP_INP_WLOCK(stcb->sctp_ep);
2191 SCTP_TCB_LOCK(stcb);
2192 lwkt_gettoken(&stcb->sctp_socket->so_rcv.ssb_token);
2193 if (!sctp_sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv,
2194 to, m_notify, NULL, stcb->asoc.my_vtag, stcb->sctp_ep)) {
2195 /* not enough room */
2196 sctp_m_freem(m_notify);
2197 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2198 lwkt_reltoken(&stcb->sctp_socket->so_rcv.ssb_token);
2199 return;
2201 lwkt_reltoken(&stcb->sctp_socket->so_rcv.ssb_token);
2202 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) &&
2203 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)){
2204 if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
2205 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2207 } else {
2208 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2210 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2211 /* Wake up any sleeper */
2212 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2213 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
2216 static void
2217 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2218 struct sockaddr *sa, uint32_t error)
2220 struct mbuf *m_notify;
2221 struct sctp_paddr_change *spc;
2222 struct sockaddr *to;
2223 struct sockaddr_in6 sin6, lsa6;
2225 if (!(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_RECVPADDREVNT))
2226 /* event not enabled */
2227 return;
2229 MGETHDR(m_notify, MB_DONTWAIT, MT_DATA);
2230 if (m_notify == NULL)
2231 return;
2232 m_notify->m_len = 0;
2234 MCLGET(m_notify, MB_DONTWAIT);
2235 if ((m_notify->m_flags & M_EXT) != M_EXT) {
2236 sctp_m_freem(m_notify);
2237 return;
2240 spc = mtod(m_notify, struct sctp_paddr_change *);
2241 spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2242 spc->spc_flags = 0;
2243 spc->spc_length = sizeof(struct sctp_paddr_change);
2244 if (sa->sa_family == AF_INET) {
2245 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2246 } else {
2247 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2249 spc->spc_state = state;
2250 spc->spc_error = error;
2251 spc->spc_assoc_id = sctp_get_associd(stcb);
2253 m_notify->m_flags |= M_EOR | M_NOTIFICATION;
2254 m_notify->m_pkthdr.len = sizeof(struct sctp_paddr_change);
2255 m_notify->m_pkthdr.rcvif = 0;
2256 m_notify->m_len = sizeof(struct sctp_paddr_change);
2257 m_notify->m_next = NULL;
2259 to = (struct sockaddr *)(struct sockaddr *)
2260 &stcb->asoc.primary_destination->ro._l_addr;
2261 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
2262 to->sa_family == AF_INET) {
2263 struct sockaddr_in *sin;
2265 sin = (struct sockaddr_in *)to;
2266 bzero(&sin6, sizeof(sin6));
2267 sin6.sin6_family = AF_INET6;
2268 sin6.sin6_len = sizeof(struct sockaddr_in6);
2269 sin6.sin6_addr.s6_addr16[2] = 0xffff;
2270 bcopy(&sin->sin_addr, &sin6.sin6_addr.s6_addr16[3],
2271 sizeof(sin6.sin6_addr.s6_addr16[3]));
2272 sin6.sin6_port = sin->sin_port;
2273 to = (struct sockaddr *)&sin6;
2275 /* check and strip embedded scope junk */
2276 to = (struct sockaddr *)sctp_recover_scope((struct sockaddr_in6 *)to,
2277 &lsa6);
2279 if (sctp_sbspace(&stcb->sctp_socket->so_rcv) < m_notify->m_len) {
2280 sctp_m_freem(m_notify);
2281 return;
2283 /* append to socket */
2284 SCTP_TCB_UNLOCK(stcb);
2285 SCTP_INP_WLOCK(stcb->sctp_ep);
2286 SCTP_TCB_LOCK(stcb);
2287 lwkt_gettoken(&stcb->sctp_socket->so_rcv.ssb_token);
2288 if (!sctp_sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv, to,
2289 m_notify, NULL, stcb->asoc.my_vtag, stcb->sctp_ep)) {
2290 /* not enough room */
2291 sctp_m_freem(m_notify);
2292 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2293 lwkt_reltoken(&stcb->sctp_socket->so_rcv.ssb_token);
2294 return;
2296 lwkt_reltoken(&stcb->sctp_socket->so_rcv.ssb_token);
2297 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) &&
2298 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)){
2299 if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
2300 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2302 } else {
2303 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2305 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2306 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2310 static void
2311 sctp_notify_send_failed(struct sctp_tcb *stcb, u_int32_t error,
2312 struct sctp_tmit_chunk *chk)
2314 struct mbuf *m_notify;
2315 struct sctp_send_failed *ssf;
2316 struct sockaddr_in6 sin6, lsa6;
2317 struct sockaddr *to;
2318 int length;
2320 if (!(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_RECVSENDFAILEVNT))
2321 /* event not enabled */
2322 return;
2324 length = sizeof(struct sctp_send_failed) + chk->send_size;
2325 MGETHDR(m_notify, MB_DONTWAIT, MT_DATA);
2326 if (m_notify == NULL)
2327 /* no space left */
2328 return;
2329 m_notify->m_len = 0;
2330 ssf = mtod(m_notify, struct sctp_send_failed *);
2331 ssf->ssf_type = SCTP_SEND_FAILED;
2332 if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
2333 ssf->ssf_flags = SCTP_DATA_UNSENT;
2334 else
2335 ssf->ssf_flags = SCTP_DATA_SENT;
2336 ssf->ssf_length = length;
2337 ssf->ssf_error = error;
2338 /* not exactly what the user sent in, but should be close :) */
2339 ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
2340 ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
2341 ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
2342 ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
2343 ssf->ssf_info.sinfo_context = chk->rec.data.context;
2344 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2345 ssf->ssf_assoc_id = sctp_get_associd(stcb);
2346 m_notify->m_next = chk->data;
2347 if (m_notify->m_next == NULL)
2348 m_notify->m_flags |= M_EOR | M_NOTIFICATION;
2349 else {
2350 struct mbuf *m;
2351 m_notify->m_flags |= M_NOTIFICATION;
2352 m = m_notify;
2353 while (m->m_next != NULL)
2354 m = m->m_next;
2355 m->m_flags |= M_EOR;
2357 m_notify->m_pkthdr.len = length;
2358 m_notify->m_pkthdr.rcvif = 0;
2359 m_notify->m_len = sizeof(struct sctp_send_failed);
2361 /* Steal off the mbuf */
2362 chk->data = NULL;
2363 to = (struct sockaddr *)(struct sockaddr *)&stcb->asoc.primary_destination->ro._l_addr;
2364 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
2365 to->sa_family == AF_INET) {
2366 struct sockaddr_in *sin;
2368 sin = (struct sockaddr_in *)to;
2369 bzero(&sin6, sizeof(sin6));
2370 sin6.sin6_family = AF_INET6;
2371 sin6.sin6_len = sizeof(struct sockaddr_in6);
2372 sin6.sin6_addr.s6_addr16[2] = 0xffff;
2373 bcopy(&sin->sin_addr, &sin6.sin6_addr.s6_addr16[3],
2374 sizeof(sin6.sin6_addr.s6_addr16[3]));
2375 sin6.sin6_port = sin->sin_port;
2376 to = (struct sockaddr *)&sin6;
2378 /* check and strip embedded scope junk */
2379 to = (struct sockaddr *)sctp_recover_scope((struct sockaddr_in6 *)to,
2380 &lsa6);
2382 if (sctp_sbspace(&stcb->sctp_socket->so_rcv) < m_notify->m_len) {
2383 sctp_m_freem(m_notify);
2384 return;
2387 /* append to socket */
2388 SCTP_TCB_UNLOCK(stcb);
2389 SCTP_INP_WLOCK(stcb->sctp_ep);
2390 SCTP_TCB_LOCK(stcb);
2391 lwkt_gettoken(&stcb->sctp_socket->so_rcv.ssb_token);
2392 if (!sctp_sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv, to,
2393 m_notify, NULL, stcb->asoc.my_vtag, stcb->sctp_ep)) {
2394 /* not enough room */
2395 sctp_m_freem(m_notify);
2396 lwkt_reltoken(&stcb->sctp_socket->so_rcv.ssb_token);
2397 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2398 return;
2400 lwkt_reltoken(&stcb->sctp_socket->so_rcv.ssb_token);
2401 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) &&
2402 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)){
2403 if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
2404 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2406 } else {
2407 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2409 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2410 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2413 static void
2414 sctp_notify_adaption_layer(struct sctp_tcb *stcb,
2415 u_int32_t error)
2417 struct mbuf *m_notify;
2418 struct sctp_adaption_event *sai;
2419 struct sockaddr_in6 sin6, lsa6;
2420 struct sockaddr *to;
2422 if (!(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_ADAPTIONEVNT))
2423 /* event not enabled */
2424 return;
2426 MGETHDR(m_notify, MB_DONTWAIT, MT_DATA);
2427 if (m_notify == NULL)
2428 /* no space left */
2429 return;
2430 m_notify->m_len = 0;
2431 sai = mtod(m_notify, struct sctp_adaption_event *);
2432 sai->sai_type = SCTP_ADAPTION_INDICATION;
2433 sai->sai_flags = 0;
2434 sai->sai_length = sizeof(struct sctp_adaption_event);
2435 sai->sai_adaption_ind = error;
2436 sai->sai_assoc_id = sctp_get_associd(stcb);
2438 m_notify->m_flags |= M_EOR | M_NOTIFICATION;
2439 m_notify->m_pkthdr.len = sizeof(struct sctp_adaption_event);
2440 m_notify->m_pkthdr.rcvif = 0;
2441 m_notify->m_len = sizeof(struct sctp_adaption_event);
2442 m_notify->m_next = NULL;
2444 to = (struct sockaddr *)(struct sockaddr *)&stcb->asoc.primary_destination->ro._l_addr;
2445 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
2446 (to->sa_family == AF_INET)) {
2447 struct sockaddr_in *sin;
2449 sin = (struct sockaddr_in *)to;
2450 bzero(&sin6, sizeof(sin6));
2451 sin6.sin6_family = AF_INET6;
2452 sin6.sin6_len = sizeof(struct sockaddr_in6);
2453 sin6.sin6_addr.s6_addr16[2] = 0xffff;
2454 bcopy(&sin->sin_addr, &sin6.sin6_addr.s6_addr16[3],
2455 sizeof(sin6.sin6_addr.s6_addr16[3]));
2456 sin6.sin6_port = sin->sin_port;
2457 to = (struct sockaddr *)&sin6;
2459 /* check and strip embedded scope junk */
2460 to = (struct sockaddr *)sctp_recover_scope((struct sockaddr_in6 *)to,
2461 &lsa6);
2462 if (sctp_sbspace(&stcb->sctp_socket->so_rcv) < m_notify->m_len) {
2463 sctp_m_freem(m_notify);
2464 return;
2466 /* append to socket */
2467 SCTP_TCB_UNLOCK(stcb);
2468 SCTP_INP_WLOCK(stcb->sctp_ep);
2469 SCTP_TCB_LOCK(stcb);
2470 lwkt_gettoken(&stcb->sctp_socket->so_rcv.ssb_token);
2471 if (!sctp_sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv, to,
2472 m_notify, NULL, stcb->asoc.my_vtag, stcb->sctp_ep)) {
2473 /* not enough room */
2474 sctp_m_freem(m_notify);
2475 lwkt_reltoken(&stcb->sctp_socket->so_rcv.ssb_token);
2476 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2477 return;
2479 lwkt_reltoken(&stcb->sctp_socket->so_rcv.ssb_token);
2480 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) &&
2481 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)){
2482 if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
2483 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2485 } else {
2486 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2488 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2489 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2492 static void
2493 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb,
2494 u_int32_t error)
2496 struct mbuf *m_notify;
2497 struct sctp_pdapi_event *pdapi;
2498 struct sockaddr_in6 sin6, lsa6;
2499 struct sockaddr *to;
2501 if (!(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_PDAPIEVNT))
2502 /* event not enabled */
2503 return;
2505 MGETHDR(m_notify, MB_DONTWAIT, MT_DATA);
2506 if (m_notify == NULL)
2507 /* no space left */
2508 return;
2509 m_notify->m_len = 0;
2510 pdapi = mtod(m_notify, struct sctp_pdapi_event *);
2511 pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
2512 pdapi->pdapi_flags = 0;
2513 pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
2514 pdapi->pdapi_indication = error;
2515 pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
2517 m_notify->m_flags |= M_EOR | M_NOTIFICATION;
2518 m_notify->m_pkthdr.len = sizeof(struct sctp_pdapi_event);
2519 m_notify->m_pkthdr.rcvif = 0;
2520 m_notify->m_len = sizeof(struct sctp_pdapi_event);
2521 m_notify->m_next = NULL;
2523 to = (struct sockaddr *)(struct sockaddr *)&stcb->asoc.primary_destination->ro._l_addr;
2524 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
2525 (to->sa_family == AF_INET)) {
2526 struct sockaddr_in *sin;
2528 sin = (struct sockaddr_in *)to;
2529 bzero(&sin6, sizeof(sin6));
2530 sin6.sin6_family = AF_INET6;
2531 sin6.sin6_len = sizeof(struct sockaddr_in6);
2532 sin6.sin6_addr.s6_addr16[2] = 0xffff;
2533 bcopy(&sin->sin_addr, &sin6.sin6_addr.s6_addr16[3],
2534 sizeof(sin6.sin6_addr.s6_addr16[3]));
2535 sin6.sin6_port = sin->sin_port;
2536 to = (struct sockaddr *)&sin6;
2538 /* check and strip embedded scope junk */
2539 to = (struct sockaddr *)sctp_recover_scope((struct sockaddr_in6 *)to,
2540 &lsa6);
2541 if (sctp_sbspace(&stcb->sctp_socket->so_rcv) < m_notify->m_len) {
2542 sctp_m_freem(m_notify);
2543 return;
2545 /* append to socket */
2546 SCTP_TCB_UNLOCK(stcb);
2547 SCTP_INP_WLOCK(stcb->sctp_ep);
2548 SCTP_TCB_LOCK(stcb);
2549 lwkt_gettoken(&stcb->sctp_socket->so_rcv.ssb_token);
2550 if (!sctp_sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv, to,
2551 m_notify, NULL, stcb->asoc.my_vtag, stcb->sctp_ep)) {
2552 /* not enough room */
2553 sctp_m_freem(m_notify);
2554 lwkt_reltoken(&stcb->sctp_socket->so_rcv.ssb_token);
2555 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2556 return;
2558 lwkt_reltoken(&stcb->sctp_socket->so_rcv.ssb_token);
2559 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) &&
2560 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)){
2561 if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
2562 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2564 } else {
2565 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2567 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2568 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2571 static void
2572 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
2574 struct mbuf *m_notify;
2575 struct sctp_shutdown_event *sse;
2576 struct sockaddr_in6 sin6, lsa6;
2577 struct sockaddr *to;
2580 * For TCP model AND UDP connected sockets we will send
2581 * an error up when an SHUTDOWN completes
2583 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2584 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
2585 /* mark socket closed for read/write and wakeup! */
2586 socantrcvmore(stcb->sctp_socket);
2587 socantsendmore(stcb->sctp_socket);
2590 if (!(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT))
2591 /* event not enabled */
2592 return;
2594 MGETHDR(m_notify, MB_DONTWAIT, MT_DATA);
2595 if (m_notify == NULL)
2596 /* no space left */
2597 return;
2598 m_notify->m_len = 0;
2599 sse = mtod(m_notify, struct sctp_shutdown_event *);
2600 sse->sse_type = SCTP_SHUTDOWN_EVENT;
2601 sse->sse_flags = 0;
2602 sse->sse_length = sizeof(struct sctp_shutdown_event);
2603 sse->sse_assoc_id = sctp_get_associd(stcb);
2605 m_notify->m_flags |= M_EOR | M_NOTIFICATION;
2606 m_notify->m_pkthdr.len = sizeof(struct sctp_shutdown_event);
2607 m_notify->m_pkthdr.rcvif = 0;
2608 m_notify->m_len = sizeof(struct sctp_shutdown_event);
2609 m_notify->m_next = NULL;
2611 to = (struct sockaddr *)(struct sockaddr *)&stcb->asoc.primary_destination->ro._l_addr;
2612 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
2613 to->sa_family == AF_INET) {
2614 struct sockaddr_in *sin;
2616 sin = (struct sockaddr_in *)to;
2617 bzero(&sin6, sizeof(sin6));
2618 sin6.sin6_family = AF_INET6;
2619 sin6.sin6_len = sizeof(struct sockaddr_in6);
2620 sin6.sin6_addr.s6_addr16[2] = 0xffff;
2621 bcopy(&sin->sin_addr, &sin6.sin6_addr.s6_addr16[3],
2622 sizeof(sin6.sin6_addr.s6_addr16[3]));
2623 sin6.sin6_port = sin->sin_port;
2624 to = (struct sockaddr *)&sin6;
2626 /* check and strip embedded scope junk */
2627 to = (struct sockaddr *)sctp_recover_scope((struct sockaddr_in6 *)to,
2628 &lsa6);
2629 if (sctp_sbspace(&stcb->sctp_socket->so_rcv) < m_notify->m_len) {
2630 sctp_m_freem(m_notify);
2631 return;
2633 /* append to socket */
2634 SCTP_TCB_UNLOCK(stcb);
2635 SCTP_INP_WLOCK(stcb->sctp_ep);
2636 SCTP_TCB_LOCK(stcb);
2637 lwkt_gettoken(&stcb->sctp_socket->so_rcv.ssb_token);
2638 if (!sctp_sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv, to,
2639 m_notify, NULL, stcb->asoc.my_vtag, stcb->sctp_ep)) {
2640 /* not enough room */
2641 sctp_m_freem(m_notify);
2642 lwkt_reltoken(&stcb->sctp_socket->so_rcv.ssb_token);
2643 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2644 return;
2646 lwkt_reltoken(&stcb->sctp_socket->so_rcv.ssb_token);
2647 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) &&
2648 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)){
2649 if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
2650 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2652 } else {
2653 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2655 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2656 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2659 static void
2660 sctp_notify_stream_reset(struct sctp_tcb *stcb,
2661 int number_entries, uint16_t *list, int flag)
2663 struct mbuf *m_notify;
2664 struct sctp_stream_reset_event *strreset;
2665 struct sockaddr_in6 sin6, lsa6;
2666 struct sockaddr *to;
2667 int len;
2669 if (!(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_STREAM_RESETEVNT))
2670 /* event not enabled */
2671 return;
2673 MGETHDR(m_notify, MB_DONTWAIT, MT_DATA);
2674 if (m_notify == NULL)
2675 /* no space left */
2676 return;
2677 m_notify->m_len = 0;
2678 len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
2679 if (len > M_TRAILINGSPACE(m_notify)) {
2680 MCLGET(m_notify, MB_WAIT);
2682 if (m_notify == NULL)
2683 /* no clusters */
2684 return;
2686 if (len > M_TRAILINGSPACE(m_notify)) {
2687 /* never enough room */
2688 m_freem(m_notify);
2689 return;
2691 strreset = mtod(m_notify, struct sctp_stream_reset_event *);
2692 strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
2693 if (number_entries == 0) {
2694 strreset->strreset_flags = flag | SCTP_STRRESET_ALL_STREAMS;
2695 } else {
2696 strreset->strreset_flags = flag | SCTP_STRRESET_STREAM_LIST;
2698 strreset->strreset_length = len;
2699 strreset->strreset_assoc_id = sctp_get_associd(stcb);
2700 if (number_entries) {
2701 int i;
2702 for (i=0; i<number_entries; i++) {
2703 strreset->strreset_list[i] = list[i];
2706 m_notify->m_flags |= M_EOR | M_NOTIFICATION;
2707 m_notify->m_pkthdr.len = len;
2708 m_notify->m_pkthdr.rcvif = 0;
2709 m_notify->m_len = len;
2710 m_notify->m_next = NULL;
2711 if (sctp_sbspace(&stcb->sctp_socket->so_rcv) < m_notify->m_len) {
2712 /* no space */
2713 sctp_m_freem(m_notify);
2714 return;
2716 to = (struct sockaddr *)(struct sockaddr *)&stcb->asoc.primary_destination->ro._l_addr;
2717 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
2718 to->sa_family == AF_INET) {
2719 struct sockaddr_in *sin;
2721 sin = (struct sockaddr_in *)to;
2722 bzero(&sin6, sizeof(sin6));
2723 sin6.sin6_family = AF_INET6;
2724 sin6.sin6_len = sizeof(struct sockaddr_in6);
2725 sin6.sin6_addr.s6_addr16[2] = 0xffff;
2726 bcopy(&sin->sin_addr, &sin6.sin6_addr.s6_addr16[3],
2727 sizeof(sin6.sin6_addr.s6_addr16[3]));
2728 sin6.sin6_port = sin->sin_port;
2729 to = (struct sockaddr *)&sin6;
2731 /* check and strip embedded scope junk */
2732 to = (struct sockaddr *)sctp_recover_scope((struct sockaddr_in6 *)to,
2733 &lsa6);
2734 /* append to socket */
2735 SCTP_TCB_UNLOCK(stcb);
2736 SCTP_INP_WLOCK(stcb->sctp_ep);
2737 SCTP_TCB_LOCK(stcb);
2738 lwkt_gettoken(&stcb->sctp_socket->so_rcv.ssb_token);
2739 if (!sctp_sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv, to,
2740 m_notify, NULL, stcb->asoc.my_vtag, stcb->sctp_ep)) {
2741 /* not enough room */
2742 sctp_m_freem(m_notify);
2743 lwkt_reltoken(&stcb->sctp_socket->so_rcv.ssb_token);
2744 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2745 return;
2747 lwkt_reltoken(&stcb->sctp_socket->so_rcv.ssb_token);
2748 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) &&
2749 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)){
2750 if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
2751 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2753 } else {
2754 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2756 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2757 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2761 void
2762 sctp_ulp_notify(u_int32_t notification, struct sctp_tcb *stcb,
2763 u_int32_t error, void *data)
2765 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
2766 /* No notifications up when we are in a no socket state */
2767 return;
2769 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2770 /* Can't send up to a closed socket any notifications */
2771 return;
2773 switch (notification) {
2774 case SCTP_NOTIFY_ASSOC_UP:
2775 sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error);
2776 break;
2777 case SCTP_NOTIFY_ASSOC_DOWN:
2778 sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error);
2779 break;
2780 case SCTP_NOTIFY_INTERFACE_DOWN:
2782 struct sctp_nets *net;
2783 net = (struct sctp_nets *)data;
2784 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
2785 (struct sockaddr *)&net->ro._l_addr, error);
2786 break;
2788 case SCTP_NOTIFY_INTERFACE_UP:
2790 struct sctp_nets *net;
2791 net = (struct sctp_nets *)data;
2792 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
2793 (struct sockaddr *)&net->ro._l_addr, error);
2794 break;
2796 case SCTP_NOTIFY_INTERFACE_CONFIRMED:
2798 struct sctp_nets *net;
2799 net = (struct sctp_nets *)data;
2800 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
2801 (struct sockaddr *)&net->ro._l_addr, error);
2802 break;
2804 case SCTP_NOTIFY_DG_FAIL:
2805 sctp_notify_send_failed(stcb, error,
2806 (struct sctp_tmit_chunk *)data);
2807 break;
2808 case SCTP_NOTIFY_ADAPTION_INDICATION:
2809 /* Here the error is the adaption indication */
2810 sctp_notify_adaption_layer(stcb, error);
2811 break;
2812 case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
2813 sctp_notify_partial_delivery_indication(stcb, error);
2814 break;
2815 case SCTP_NOTIFY_STRDATA_ERR:
2816 break;
2817 case SCTP_NOTIFY_ASSOC_ABORTED:
2818 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error);
2819 break;
2820 case SCTP_NOTIFY_PEER_OPENED_STREAM:
2821 break;
2822 case SCTP_NOTIFY_STREAM_OPENED_OK:
2823 break;
2824 case SCTP_NOTIFY_ASSOC_RESTART:
2825 sctp_notify_assoc_change(SCTP_RESTART, stcb, error);
2826 break;
2827 case SCTP_NOTIFY_HB_RESP:
2828 break;
2829 case SCTP_NOTIFY_STR_RESET_SEND:
2830 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STRRESET_OUTBOUND_STR);
2831 break;
2832 case SCTP_NOTIFY_STR_RESET_RECV:
2833 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STRRESET_INBOUND_STR);
2834 break;
2835 case SCTP_NOTIFY_ASCONF_ADD_IP:
2836 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
2837 error);
2838 break;
2839 case SCTP_NOTIFY_ASCONF_DELETE_IP:
2840 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
2841 error);
2842 break;
2843 case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
2844 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
2845 error);
2846 break;
2847 case SCTP_NOTIFY_ASCONF_SUCCESS:
2848 break;
2849 case SCTP_NOTIFY_ASCONF_FAILED:
2850 break;
2851 case SCTP_NOTIFY_PEER_SHUTDOWN:
2852 sctp_notify_shutdown_event(stcb);
2853 break;
2854 default:
2855 #ifdef SCTP_DEBUG
2856 if (sctp_debug_on & SCTP_DEBUG_UTIL1) {
2857 kprintf("NOTIFY: unknown notification %xh (%u)\n",
2858 notification, notification);
2860 #endif /* SCTP_DEBUG */
2861 break;
2862 } /* end switch */
2865 void
2866 sctp_report_all_outbound(struct sctp_tcb *stcb)
2868 struct sctp_association *asoc;
2869 struct sctp_stream_out *outs;
2870 struct sctp_tmit_chunk *chk;
2872 asoc = &stcb->asoc;
2874 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
2875 return;
2877 /* now through all the gunk freeing chunks */
2878 TAILQ_FOREACH(outs, &asoc->out_wheel, next_spoke) {
2879 /* now clean up any chunks here */
2880 chk = TAILQ_FIRST(&outs->outqueue);
2881 while (chk) {
2882 stcb->asoc.stream_queue_cnt--;
2883 TAILQ_REMOVE(&outs->outqueue, chk, sctp_next);
2884 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
2885 SCTP_NOTIFY_DATAGRAM_UNSENT, chk);
2886 if (chk->data) {
2887 sctp_m_freem(chk->data);
2888 chk->data = NULL;
2890 if (chk->whoTo)
2891 sctp_free_remote_addr(chk->whoTo);
2892 chk->whoTo = NULL;
2893 chk->asoc = NULL;
2894 /* Free the chunk */
2895 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
2896 sctppcbinfo.ipi_count_chunk--;
2897 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
2898 panic("Chunk count is negative");
2900 sctppcbinfo.ipi_gencnt_chunk++;
2901 chk = TAILQ_FIRST(&outs->outqueue);
2904 /* pending send queue SHOULD be empty */
2905 if (!TAILQ_EMPTY(&asoc->send_queue)) {
2906 chk = TAILQ_FIRST(&asoc->send_queue);
2907 while (chk) {
2908 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
2909 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, SCTP_NOTIFY_DATAGRAM_UNSENT, chk);
2910 if (chk->data) {
2911 sctp_m_freem(chk->data);
2912 chk->data = NULL;
2914 if (chk->whoTo)
2915 sctp_free_remote_addr(chk->whoTo);
2916 chk->whoTo = NULL;
2917 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
2918 sctppcbinfo.ipi_count_chunk--;
2919 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
2920 panic("Chunk count is negative");
2922 sctppcbinfo.ipi_gencnt_chunk++;
2923 chk = TAILQ_FIRST(&asoc->send_queue);
2926 /* sent queue SHOULD be empty */
2927 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
2928 chk = TAILQ_FIRST(&asoc->sent_queue);
2929 while (chk) {
2930 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
2931 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
2932 SCTP_NOTIFY_DATAGRAM_SENT, chk);
2933 if (chk->data) {
2934 sctp_m_freem(chk->data);
2935 chk->data = NULL;
2937 if (chk->whoTo)
2938 sctp_free_remote_addr(chk->whoTo);
2939 chk->whoTo = NULL;
2940 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
2941 sctppcbinfo.ipi_count_chunk--;
2942 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
2943 panic("Chunk count is negative");
2945 sctppcbinfo.ipi_gencnt_chunk++;
2946 chk = TAILQ_FIRST(&asoc->sent_queue);
2951 void
2952 sctp_abort_notification(struct sctp_tcb *stcb, int error)
2955 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
2956 return;
2958 /* Tell them we lost the asoc */
2959 sctp_report_all_outbound(stcb);
2960 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_ABORTED, stcb, error, NULL);
2963 void
2964 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2965 struct mbuf *m, int iphlen, struct sctphdr *sh, struct mbuf *op_err)
2967 u_int32_t vtag;
2969 vtag = 0;
2970 if (stcb != NULL) {
2971 /* We have a TCB to abort, send notification too */
2972 vtag = stcb->asoc.peer_vtag;
2973 sctp_abort_notification(stcb, 0);
2975 sctp_send_abort(m, iphlen, sh, vtag, op_err);
2976 if (stcb != NULL) {
2977 /* Ok, now lets free it */
2978 sctp_free_assoc(inp, stcb);
2979 } else {
2980 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
2981 if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
2982 sctp_inpcb_free(inp, 1);
2988 void
2989 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2990 int error, struct mbuf *op_err)
2992 u_int32_t vtag;
2994 if (stcb == NULL) {
2995 /* Got to have a TCB */
2996 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
2997 if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
2998 sctp_inpcb_free(inp, 1);
3001 return;
3003 vtag = stcb->asoc.peer_vtag;
3004 /* notify the ulp */
3005 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0)
3006 sctp_abort_notification(stcb, error);
3007 /* notify the peer */
3008 sctp_send_abort_tcb(stcb, op_err);
3009 /* now free the asoc */
3010 sctp_free_assoc(inp, stcb);
3013 void
3014 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
3015 struct sctp_inpcb *inp, struct mbuf *op_err)
3017 struct sctp_chunkhdr *ch, chunk_buf;
3018 unsigned int chk_length;
3020 /* Generate a TO address for future reference */
3021 if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
3022 if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3023 sctp_inpcb_free(inp, 1);
3026 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3027 sizeof(*ch), (u_int8_t *)&chunk_buf);
3028 while (ch != NULL) {
3029 chk_length = ntohs(ch->chunk_length);
3030 if (chk_length < sizeof(*ch)) {
3031 /* break to abort land */
3032 break;
3034 switch (ch->chunk_type) {
3035 case SCTP_PACKET_DROPPED:
3036 /* we don't respond to pkt-dropped */
3037 return;
3038 case SCTP_ABORT_ASSOCIATION:
3039 /* we don't respond with an ABORT to an ABORT */
3040 return;
3041 case SCTP_SHUTDOWN_COMPLETE:
3043 * we ignore it since we are not waiting for it
3044 * and peer is gone
3046 return;
3047 case SCTP_SHUTDOWN_ACK:
3048 sctp_send_shutdown_complete2(m, iphlen, sh);
3049 return;
3050 default:
3051 break;
3053 offset += SCTP_SIZE32(chk_length);
3054 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3055 sizeof(*ch), (u_int8_t *)&chunk_buf);
3057 sctp_send_abort(m, iphlen, sh, 0, op_err);
3061 * check the inbound datagram to make sure there is not an abort
3062 * inside it, if there is return 1, else return 0.
3065 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, int *vtagfill)
3067 struct sctp_chunkhdr *ch;
3068 struct sctp_init_chunk *init_chk, chunk_buf;
3069 int offset;
3070 unsigned int chk_length;
3072 offset = iphlen + sizeof(struct sctphdr);
3073 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
3074 (u_int8_t *)&chunk_buf);
3075 while (ch != NULL) {
3076 chk_length = ntohs(ch->chunk_length);
3077 if (chk_length < sizeof(*ch)) {
3078 /* packet is probably corrupt */
3079 break;
3081 /* we seem to be ok, is it an abort? */
3082 if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
3083 /* yep, tell them */
3084 return (1);
3086 if (ch->chunk_type == SCTP_INITIATION) {
3087 /* need to update the Vtag */
3088 init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
3089 offset, sizeof(*init_chk), (u_int8_t *)&chunk_buf);
3090 if (init_chk != NULL) {
3091 *vtagfill = ntohl(init_chk->init.initiate_tag);
3094 /* Nope, move to the next chunk */
3095 offset += SCTP_SIZE32(chk_length);
3096 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3097 sizeof(*ch), (u_int8_t *)&chunk_buf);
3099 return (0);
3103 * currently (2/02), ifa_addr embeds scope_id's and don't
3104 * have sin6_scope_id set (i.e. it's 0)
3105 * so, create this function to compare link local scopes
3107 uint32_t
3108 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
3110 struct sockaddr_in6 a, b;
3112 /* save copies */
3113 a = *addr1;
3114 b = *addr2;
3116 if (a.sin6_scope_id == 0)
3117 if (in6_recoverscope(&a, &a.sin6_addr, NULL)) {
3118 /* can't get scope, so can't match */
3119 return (0);
3121 if (b.sin6_scope_id == 0)
3122 if (in6_recoverscope(&b, &b.sin6_addr, NULL)) {
3123 /* can't get scope, so can't match */
3124 return (0);
3126 if (a.sin6_scope_id != b.sin6_scope_id)
3127 return (0);
3129 return (1);
3133 * returns a sockaddr_in6 with embedded scope recovered and removed
3135 struct sockaddr_in6 *
3136 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
3139 /* check and strip embedded scope junk */
3140 if (addr->sin6_family == AF_INET6) {
3141 if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
3142 if (addr->sin6_scope_id == 0) {
3143 *store = *addr;
3144 if (!in6_recoverscope(store, &store->sin6_addr,
3145 NULL)) {
3146 /* use the recovered scope */
3147 addr = store;
3149 /* else, return the original "to" addr */
3153 return (addr);
3157 * are the two addresses the same? currently a "scopeless" check
3158 * returns: 1 if same, 0 if not
3161 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
3164 /* must be valid */
3165 if (sa1 == NULL || sa2 == NULL)
3166 return (0);
3168 /* must be the same family */
3169 if (sa1->sa_family != sa2->sa_family)
3170 return (0);
3172 if (sa1->sa_family == AF_INET6) {
3173 /* IPv6 addresses */
3174 struct sockaddr_in6 *sin6_1, *sin6_2;
3176 sin6_1 = (struct sockaddr_in6 *)sa1;
3177 sin6_2 = (struct sockaddr_in6 *)sa2;
3178 return (SCTP6_ARE_ADDR_EQUAL(&sin6_1->sin6_addr,
3179 &sin6_2->sin6_addr));
3180 } else if (sa1->sa_family == AF_INET) {
3181 /* IPv4 addresses */
3182 struct sockaddr_in *sin_1, *sin_2;
3184 sin_1 = (struct sockaddr_in *)sa1;
3185 sin_2 = (struct sockaddr_in *)sa2;
3186 return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
3187 } else {
3188 /* we don't do these... */
3189 return (0);
3193 void
3194 sctp_print_address(struct sockaddr *sa)
3197 if (sa->sa_family == AF_INET6) {
3198 struct sockaddr_in6 *sin6;
3199 sin6 = (struct sockaddr_in6 *)sa;
3200 kprintf("IPv6 address: %s:%d scope:%u\n",
3201 ip6_sprintf(&sin6->sin6_addr), ntohs(sin6->sin6_port),
3202 sin6->sin6_scope_id);
3203 } else if (sa->sa_family == AF_INET) {
3204 struct sockaddr_in *sin;
3205 sin = (struct sockaddr_in *)sa;
3206 kprintf("IPv4 address: %s:%d\n", inet_ntoa(sin->sin_addr),
3207 ntohs(sin->sin_port));
3208 } else {
3209 kprintf("?\n");
3213 void
3214 sctp_print_address_pkt(struct ip *iph, struct sctphdr *sh)
3216 if (iph->ip_v == IPVERSION) {
3217 struct sockaddr_in lsa, fsa;
3219 bzero(&lsa, sizeof(lsa));
3220 lsa.sin_len = sizeof(lsa);
3221 lsa.sin_family = AF_INET;
3222 lsa.sin_addr = iph->ip_src;
3223 lsa.sin_port = sh->src_port;
3224 bzero(&fsa, sizeof(fsa));
3225 fsa.sin_len = sizeof(fsa);
3226 fsa.sin_family = AF_INET;
3227 fsa.sin_addr = iph->ip_dst;
3228 fsa.sin_port = sh->dest_port;
3229 kprintf("src: ");
3230 sctp_print_address((struct sockaddr *)&lsa);
3231 kprintf("dest: ");
3232 sctp_print_address((struct sockaddr *)&fsa);
3233 } else if (iph->ip_v == (IPV6_VERSION >> 4)) {
3234 struct ip6_hdr *ip6;
3235 struct sockaddr_in6 lsa6, fsa6;
3237 ip6 = (struct ip6_hdr *)iph;
3238 bzero(&lsa6, sizeof(lsa6));
3239 lsa6.sin6_len = sizeof(lsa6);
3240 lsa6.sin6_family = AF_INET6;
3241 lsa6.sin6_addr = ip6->ip6_src;
3242 lsa6.sin6_port = sh->src_port;
3243 bzero(&fsa6, sizeof(fsa6));
3244 fsa6.sin6_len = sizeof(fsa6);
3245 fsa6.sin6_family = AF_INET6;
3246 fsa6.sin6_addr = ip6->ip6_dst;
3247 fsa6.sin6_port = sh->dest_port;
3248 kprintf("src: ");
3249 sctp_print_address((struct sockaddr *)&lsa6);
3250 kprintf("dest: ");
3251 sctp_print_address((struct sockaddr *)&fsa6);
3255 #if defined(__FreeBSD__) || defined(__APPLE__)
3257 /* cloned from uipc_socket.c */
3259 #define SCTP_SBLINKRECORD(sb, m0) do { \
3260 if ((sb)->sb_lastrecord != NULL) \
3261 (sb)->sb_lastrecord->m_nextpkt = (m0); \
3262 else \
3263 (sb)->sb_mb = (m0); \
3264 (sb)->sb_lastrecord = (m0); \
3265 } while (/*CONSTCOND*/0)
3266 #endif
3270 sctp_sbappendaddr_nocheck(struct signalsockbuf *ssb, struct sockaddr *asa, struct mbuf *m0,
3271 struct mbuf *control, u_int32_t tag,
3272 struct sctp_inpcb *inp)
3274 struct mbuf *m, *n, *nlast;
3275 int cnt=0;
3277 if (m0 && (m0->m_flags & M_PKTHDR) == 0)
3278 panic("sctp_sbappendaddr_nocheck");
3280 for (n = control; n; n = n->m_next) {
3281 if (n->m_next == 0) /* get pointer to last control buf */
3282 break;
3284 if (((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) == 0) ||
3285 ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)== 0)) {
3286 uint32_t len = asa->sa_len; /* workaround GCC stupidity */
3287 if (len > MHLEN)
3288 return (0);
3289 try_again:
3290 MGETHDR(m, MB_DONTWAIT, MT_SONAME);
3291 if (m == NULL)
3292 return (0);
3293 m->m_len = 0;
3294 /* safety */
3295 if (m == m0) {
3296 kprintf("Duplicate mbuf allocated %p in and mget returned %p?\n",
3297 m0, m);
3298 if (cnt) {
3299 panic("more than once");
3301 cnt++;
3302 goto try_again;
3304 m->m_len = asa->sa_len;
3305 bcopy((caddr_t)asa, mtod(m, caddr_t), asa->sa_len);
3307 else {
3308 m = NULL;
3310 if (n)
3311 n->m_next = m0; /* concatenate data to control */
3312 else
3313 control = m0;
3314 if (m)
3315 m->m_next = control;
3316 else
3317 m = control;
3318 m->m_pkthdr.csum_data = (int)tag;
3320 SOCKBUF_LOCK(ssb);
3321 for (n = m; n; n = n->m_next)
3322 sballoc(&ssb->sb, n);
3323 nlast = n;
3324 if (ssb->ssb_mb == NULL) {
3325 inp->sctp_vtag_first = tag;
3327 if ((n = ssb->ssb_mb) != NULL) {
3328 if ((n->m_nextpkt != inp->sb_last_mpkt) && (n->m_nextpkt == NULL)) {
3329 inp->sb_last_mpkt = NULL;
3331 if (inp->sb_last_mpkt)
3332 inp->sb_last_mpkt->m_nextpkt = m;
3333 else {
3334 while (n->m_nextpkt) {
3335 n = n->m_nextpkt;
3337 n->m_nextpkt = m;
3339 inp->sb_last_mpkt = m;
3340 } else {
3341 inp->sb_last_mpkt = ssb->ssb_mb = m;
3342 inp->sctp_vtag_first = tag;
3344 SOCKBUF_UNLOCK(ssb);
3345 return (1);
3348 /*************HOLD THIS COMMENT FOR PATCH FILE OF
3349 *************ALTERNATE ROUTING CODE
3352 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
3353 *************ALTERNATE ROUTING CODE
3356 struct mbuf *
3357 sctp_generate_invmanparam(int err)
3359 /* Return a MBUF with a invalid mandatory parameter */
3360 struct mbuf *m;
3362 MGET(m, MB_DONTWAIT, MT_DATA);
3363 if (m) {
3364 struct sctp_paramhdr *ph;
3365 m->m_len = sizeof(struct sctp_paramhdr);
3366 ph = mtod(m, struct sctp_paramhdr *);
3367 ph->param_length = htons(sizeof(struct sctp_paramhdr));
3368 ph->param_type = htons(err);
3370 return (m);
3373 static int
3374 sctp_should_be_moved(struct mbuf *this, struct sctp_association *asoc)
3376 struct mbuf *m;
3378 * given a mbuf chain, look through it finding
3379 * the M_PKTHDR and return 1 if it belongs to
3380 * the association given. We tell this by
3381 * a kludge where we stuff the my_vtag of the asoc
3382 * into the m->m_pkthdr.csum_data/csum field.
3384 m = this;
3385 while (m) {
3386 if (m->m_flags & M_PKTHDR) {
3387 /* check it */
3388 #if defined(__OpenBSD__)
3389 if ((u_int32_t)m->m_pkthdr.csum == asoc->my_vtag)
3390 #else
3391 if ((u_int32_t)m->m_pkthdr.csum_data == asoc->my_vtag)
3392 #endif
3394 /* Yep */
3395 return (1);
3398 m = m->m_next;
3400 return (0);
3403 u_int32_t
3404 sctp_get_first_vtag_from_sb(struct socket *so)
3406 struct mbuf *this, *at;
3407 u_int32_t retval;
3409 retval = 0;
3410 lwkt_gettoken(&so->so_rcv.ssb_token);
3411 if (so->so_rcv.ssb_mb) {
3412 /* grubbing time */
3413 this = so->so_rcv.ssb_mb;
3414 while (this) {
3415 at = this;
3416 /* get to the m_pkthdr */
3417 while (at) {
3418 if (at->m_flags & M_PKTHDR)
3419 break;
3420 else {
3421 at = at->m_next;
3424 /* now do we have a m_pkthdr */
3425 if (at && (at->m_flags & M_PKTHDR)) {
3426 /* check it */
3427 #if defined(__OpenBSD__)
3428 if ((u_int32_t)at->m_pkthdr.csum != 0)
3429 #else
3430 if ((u_int32_t)at->m_pkthdr.csum_data != 0)
3431 #endif
3433 /* its the one */
3434 #if defined(__OpenBSD__)
3435 retval = (u_int32_t)at->m_pkthdr.csum;
3436 #else
3437 retval =
3438 (u_int32_t)at->m_pkthdr.csum_data;
3439 #endif
3440 break;
3443 this = this->m_nextpkt;
3447 lwkt_reltoken(&so->so_rcv.ssb_token);
3448 return (retval);
3451 void
3452 sctp_grub_through_socket_buffer(struct sctp_inpcb *inp, struct socket *old,
3453 struct socket *new, struct sctp_tcb *stcb)
3455 struct mbuf **put, **take, *next, *this;
3456 struct signalsockbuf *old_sb, *new_sb;
3457 struct sctp_association *asoc;
3458 int moved_top = 0;
3460 asoc = &stcb->asoc;
3461 old_sb = &old->so_rcv;
3462 new_sb = &new->so_rcv;
3463 if (old_sb->ssb_mb == NULL) {
3464 /* Nothing to move */
3465 return;
3467 SOCKBUF_LOCK(old_sb);
3468 SOCKBUF_LOCK(new_sb);
3469 lwkt_gettoken(&old_sb->ssb_token);
3470 lwkt_gettoken(&new_sb->ssb_token);
3472 if (inp->sctp_vtag_first == asoc->my_vtag) {
3473 /* First one must be moved */
3474 struct mbuf *mm;
3475 for (mm = old_sb->ssb_mb; mm; mm = mm->m_next) {
3477 * Go down the chain and fix
3478 * the space allocation of the
3479 * two sockets.
3481 sbfree(&old_sb->sb, mm);
3482 sballoc(&new_sb->sb, mm);
3484 new_sb->ssb_mb = old_sb->ssb_mb;
3485 old_sb->ssb_mb = new_sb->ssb_mb->m_nextpkt;
3486 new_sb->ssb_mb->m_nextpkt = NULL;
3487 put = &new_sb->ssb_mb->m_nextpkt;
3488 moved_top = 1;
3489 } else {
3490 put = &new_sb->ssb_mb;
3493 take = &old_sb->ssb_mb;
3494 next = old_sb->ssb_mb;
3495 while (next) {
3496 this = next;
3497 /* postion for next one */
3498 next = this->m_nextpkt;
3499 /* check the tag of this packet */
3500 if (sctp_should_be_moved(this, asoc)) {
3501 /* yes this needs to be moved */
3502 struct mbuf *mm;
3503 *take = this->m_nextpkt;
3504 this->m_nextpkt = NULL;
3505 *put = this;
3506 for (mm = this; mm; mm = mm->m_next) {
3508 * Go down the chain and fix
3509 * the space allocation of the
3510 * two sockets.
3512 sbfree(&old_sb->sb, mm);
3513 sballoc(&new_sb->sb, mm);
3515 put = &this->m_nextpkt;
3517 } else {
3518 /* no advance our take point. */
3519 take = &this->m_nextpkt;
3522 if (moved_top) {
3524 * Ok so now we must re-postion vtag_first to
3525 * match the new first one since we moved the
3526 * mbuf at the top.
3528 inp->sctp_vtag_first = sctp_get_first_vtag_from_sb(old);
3530 lwkt_reltoken(&new_sb->ssb_token);
3531 lwkt_reltoken(&old_sb->ssb_token);
3532 SOCKBUF_UNLOCK(old_sb);
3533 SOCKBUF_UNLOCK(new_sb);
3536 void
3537 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
3538 struct sctp_tmit_chunk *tp1)
3540 if (tp1->data == NULL) {
3541 return;
3543 #ifdef SCTP_MBCNT_LOGGING
3544 sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
3545 asoc->total_output_queue_size,
3546 tp1->book_size,
3547 asoc->total_output_mbuf_queue_size,
3548 tp1->mbcnt);
3549 #endif
3550 if (asoc->total_output_queue_size >= tp1->book_size) {
3551 asoc->total_output_queue_size -= tp1->book_size;
3552 } else {
3553 asoc->total_output_queue_size = 0;
3556 /* Now free the mbuf */
3557 if (asoc->total_output_mbuf_queue_size >= tp1->mbcnt) {
3558 asoc->total_output_mbuf_queue_size -= tp1->mbcnt;
3559 } else {
3560 asoc->total_output_mbuf_queue_size = 0;
3562 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3563 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3564 if (stcb->sctp_socket->so_snd.ssb_cc >= tp1->book_size) {
3565 stcb->sctp_socket->so_snd.ssb_cc -= tp1->book_size;
3566 } else {
3567 stcb->sctp_socket->so_snd.ssb_cc = 0;
3570 if (stcb->sctp_socket->so_snd.ssb_mbcnt >= tp1->mbcnt) {
3571 stcb->sctp_socket->so_snd.ssb_mbcnt -= tp1->mbcnt;
3572 } else {
3573 stcb->sctp_socket->so_snd.ssb_mbcnt = 0;
3579 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
3580 int reason, struct sctpchunk_listhead *queue)
3582 int ret_sz = 0;
3583 int notdone;
3584 uint8_t foundeom = 0;
3586 do {
3587 ret_sz += tp1->book_size;
3588 tp1->sent = SCTP_FORWARD_TSN_SKIP;
3589 if (tp1->data) {
3590 sctp_free_bufspace(stcb, &stcb->asoc, tp1);
3591 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1);
3592 sctp_m_freem(tp1->data);
3593 tp1->data = NULL;
3594 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
3596 if (tp1->flags & SCTP_PR_SCTP_BUFFER) {
3597 stcb->asoc.sent_queue_cnt_removeable--;
3599 if (queue == &stcb->asoc.send_queue) {
3600 TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
3601 /* on to the sent queue */
3602 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
3603 sctp_next);
3604 stcb->asoc.sent_queue_cnt++;
3606 if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
3607 SCTP_DATA_NOT_FRAG) {
3608 /* not frag'ed we ae done */
3609 notdone = 0;
3610 foundeom = 1;
3611 } else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
3612 /* end of frag, we are done */
3613 notdone = 0;
3614 foundeom = 1;
3615 } else {
3616 /* Its a begin or middle piece, we must mark all of it */
3617 notdone = 1;
3618 tp1 = TAILQ_NEXT(tp1, sctp_next);
3620 } while (tp1 && notdone);
3621 if ((foundeom == 0) && (queue == &stcb->asoc.sent_queue)) {
3623 * The multi-part message was scattered
3624 * across the send and sent queue.
3626 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3628 * recurse throught the send_queue too, starting at the
3629 * beginning.
3631 if (tp1) {
3632 ret_sz += sctp_release_pr_sctp_chunk(stcb, tp1, reason,
3633 &stcb->asoc.send_queue);
3634 } else {
3635 kprintf("hmm, nothing on the send queue and no EOM?\n");
3638 return (ret_sz);
3642 * checks to see if the given address, sa, is one that is currently
3643 * known by the kernel
3644 * note: can't distinguish the same address on multiple interfaces and
3645 * doesn't handle multiple addresses with different zone/scope id's
3646 * note: ifa_ifwithaddr() compares the entire sockaddr struct
3648 struct ifaddr *
3649 sctp_find_ifa_by_addr(struct sockaddr *sa)
3651 struct ifnet *ifn;
3653 /* go through all our known interfaces */
3654 TAILQ_FOREACH(ifn, &ifnet, if_list) {
3655 struct ifaddr_container *ifac;
3657 /* go through each interface addresses */
3658 TAILQ_FOREACH(ifac, &ifn->if_addrheads[mycpuid], ifa_link) {
3659 struct ifaddr *ifa = ifac->ifa;
3661 /* correct family? */
3662 if (ifa->ifa_addr->sa_family != sa->sa_family)
3663 continue;
3665 #ifdef INET6
3666 if (ifa->ifa_addr->sa_family == AF_INET6) {
3667 /* IPv6 address */
3668 struct sockaddr_in6 *sin1, *sin2, sin6_tmp;
3669 sin1 = (struct sockaddr_in6 *)ifa->ifa_addr;
3670 if (IN6_IS_SCOPE_LINKLOCAL(&sin1->sin6_addr)) {
3671 /* create a copy and clear scope */
3672 memcpy(&sin6_tmp, sin1,
3673 sizeof(struct sockaddr_in6));
3674 sin1 = &sin6_tmp;
3675 in6_clearscope(&sin1->sin6_addr);
3677 sin2 = (struct sockaddr_in6 *)sa;
3678 if (memcmp(&sin1->sin6_addr, &sin2->sin6_addr,
3679 sizeof(struct in6_addr)) == 0) {
3680 /* found it */
3681 return (ifa);
3683 } else
3684 #endif
3685 if (ifa->ifa_addr->sa_family == AF_INET) {
3686 /* IPv4 address */
3687 struct sockaddr_in *sin1, *sin2;
3688 sin1 = (struct sockaddr_in *)ifa->ifa_addr;
3689 sin2 = (struct sockaddr_in *)sa;
3690 if (sin1->sin_addr.s_addr ==
3691 sin2->sin_addr.s_addr) {
3692 /* found it */
3693 return (ifa);
3696 /* else, not AF_INET or AF_INET6, so skip */
3697 } /* end foreach ifa */
3698 } /* end foreach ifn */
3699 /* not found! */
3700 return (NULL);
3704 #ifdef __APPLE__
3706 * here we hack in a fix for Apple's m_copym for the case where the first mbuf
3707 * in the chain is a M_PKTHDR and the length is zero
3709 static void
3710 sctp_pkthdr_fix(struct mbuf *m)
3712 struct mbuf *m_nxt;
3714 if ((m->m_flags & M_PKTHDR) == 0) {
3715 /* not a PKTHDR */
3716 return;
3719 if (m->m_len != 0) {
3720 /* not a zero length PKTHDR mbuf */
3721 return;
3724 /* let's move in a word into the first mbuf... yes, ugly! */
3725 m_nxt = m->m_next;
3726 if (m_nxt == NULL) {
3727 /* umm... not a very useful mbuf chain... */
3728 return;
3730 if ((size_t)m_nxt->m_len > sizeof(long)) {
3731 /* move over a long */
3732 bcopy(mtod(m_nxt, caddr_t), mtod(m, caddr_t), sizeof(long));
3733 /* update mbuf data pointers and lengths */
3734 m->m_len += sizeof(long);
3735 m_nxt->m_data += sizeof(long);
3736 m_nxt->m_len -= sizeof(long);
3740 inline struct mbuf *
3741 sctp_m_copym(struct mbuf *m, int off, int len, int wait)
3743 sctp_pkthdr_fix(m);
3744 return (m_copym(m, off, len, wait));
3746 #endif /* __APPLE__ */