Reduce differences between our VKERNEL and VKERNEL64 configurations.
[dragonfly.git] / sys / netinet / sctp_timer.c
bloba9f7b0ef79fa5c289d2568312d7b0cb70c1e3bdb
1 /* $KAME: sctp_timer.c,v 1.28 2004/08/17 04:06:20 itojun Exp $ */
3 /*
4 * Copyright (C) 2002, 2003, 2004 Cisco Systems Inc,
5 * All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the project nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
31 #if !(defined(__OpenBSD__) || defined(__APPLE__))
32 #include "opt_ipsec.h"
33 #endif
34 #if defined(__FreeBSD__) || defined(__DragonFly__)
35 #include "opt_compat.h"
36 #include "opt_inet6.h"
37 #include "opt_inet.h"
38 #endif
39 #if defined(__NetBSD__)
40 #include "opt_inet.h"
41 #endif
42 #ifdef __APPLE__
43 #include <sctp.h>
44 #elif !defined(__OpenBSD__)
45 #include "opt_sctp.h"
46 #endif
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/malloc.h>
51 #include <sys/mbuf.h>
52 #ifndef __OpenBSD__
53 #include <sys/domain.h>
54 #endif
55 #include <sys/protosw.h>
56 #include <sys/socket.h>
57 #include <sys/socketvar.h>
58 #include <sys/proc.h>
59 #include <sys/kernel.h>
60 #include <sys/sysctl.h>
61 #ifdef INET6
62 #include <sys/domain.h>
63 #endif
65 #if (defined(__FreeBSD__) && __FreeBSD_version >= 500000)
66 #include <sys/limits.h>
67 #else
68 #include <machine/limits.h>
69 #endif
71 #include <net/if.h>
72 #include <net/if_types.h>
73 #include <net/route.h>
74 #include <netinet/in.h>
75 #include <netinet/in_systm.h>
76 #define _IP_VHL
77 #include <netinet/ip.h>
78 #include <netinet/in_pcb.h>
79 #include <netinet/in_var.h>
80 #include <netinet/ip_var.h>
82 #ifdef INET6
83 #include <netinet/ip6.h>
84 #include <netinet6/ip6_var.h>
85 #endif /* INET6 */
87 #include <netinet/sctp_pcb.h>
89 #ifdef IPSEC
90 #ifndef __OpenBSD__
91 #include <netinet6/ipsec.h>
92 #include <netproto/key/key.h>
93 #else
94 #undef IPSEC
95 #endif
96 #endif /* IPSEC */
97 #ifdef INET6
98 #include <netinet6/sctp6_var.h>
99 #endif
100 #include <netinet/sctp_var.h>
101 #include <netinet/sctp_timer.h>
102 #include <netinet/sctputil.h>
103 #include <netinet/sctp_output.h>
104 #include <netinet/sctp_hashdriver.h>
105 #include <netinet/sctp_header.h>
106 #include <netinet/sctp_indata.h>
107 #include <netinet/sctp_asconf.h>
109 #include <netinet/sctp.h>
110 #include <netinet/sctp_uio.h>
112 #include <net/net_osdep.h>
114 #ifdef SCTP_DEBUG
115 extern u_int32_t sctp_debug_on;
116 #endif /* SCTP_DEBUG */
118 void
119 sctp_audit_retranmission_queue(struct sctp_association *asoc)
121 struct sctp_tmit_chunk *chk;
123 #ifdef SCTP_DEBUG
124 if (sctp_debug_on & SCTP_DEBUG_TIMER4) {
125 kprintf("Audit invoked on send queue cnt:%d onqueue:%d\n",
126 asoc->sent_queue_retran_cnt,
127 asoc->sent_queue_cnt);
129 #endif /* SCTP_DEBUG */
130 asoc->sent_queue_retran_cnt = 0;
131 asoc->sent_queue_cnt = 0;
132 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
133 if (chk->sent == SCTP_DATAGRAM_RESEND) {
134 asoc->sent_queue_retran_cnt++;
136 asoc->sent_queue_cnt++;
138 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
139 if (chk->sent == SCTP_DATAGRAM_RESEND) {
140 asoc->sent_queue_retran_cnt++;
143 #ifdef SCTP_DEBUG
144 if (sctp_debug_on & SCTP_DEBUG_TIMER4) {
145 kprintf("Audit completes retran:%d onqueue:%d\n",
146 asoc->sent_queue_retran_cnt,
147 asoc->sent_queue_cnt);
149 #endif /* SCTP_DEBUG */
153 sctp_threshold_management(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
154 struct sctp_nets *net, uint16_t threshold)
156 if (net) {
157 net->error_count++;
158 #ifdef SCTP_DEBUG
159 if (sctp_debug_on & SCTP_DEBUG_TIMER4) {
160 kprintf("Error count for %p now %d thresh:%d\n",
161 net, net->error_count,
162 net->failure_threshold);
164 #endif /* SCTP_DEBUG */
165 if (net->error_count >= net->failure_threshold) {
166 /* We had a threshold failure */
167 if (net->dest_state & SCTP_ADDR_REACHABLE) {
168 net->dest_state &= ~SCTP_ADDR_REACHABLE;
169 net->dest_state |= SCTP_ADDR_NOT_REACHABLE;
170 if (net == stcb->asoc.primary_destination) {
171 net->dest_state |= SCTP_ADDR_WAS_PRIMARY;
173 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
174 stcb,
175 SCTP_FAILED_THRESHOLD,
176 (void *)net);
179 /*********HOLD THIS COMMENT FOR PATCH OF ALTERNATE
180 *********ROUTING CODE
182 /*********HOLD THIS COMMENT FOR END OF PATCH OF ALTERNATE
183 *********ROUTING CODE
186 if (stcb == NULL)
187 return (0);
189 if (net) {
190 if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0) {
191 stcb->asoc.overall_error_count++;
193 } else {
194 stcb->asoc.overall_error_count++;
196 #ifdef SCTP_DEBUG
197 if (sctp_debug_on & SCTP_DEBUG_TIMER4) {
198 kprintf("Overall error count for %p now %d thresh:%u state:%x\n",
199 &stcb->asoc,
200 stcb->asoc.overall_error_count,
201 (u_int)threshold,
202 ((net == NULL) ? (u_int)0 : (u_int)net->dest_state));
204 #endif /* SCTP_DEBUG */
205 /* We specifically do not do >= to give the assoc one more
206 * change before we fail it.
208 if (stcb->asoc.overall_error_count > threshold) {
209 /* Abort notification sends a ULP notify */
210 struct mbuf *oper;
211 MGET(oper, MB_DONTWAIT, MT_DATA);
212 if (oper) {
213 struct sctp_paramhdr *ph;
214 u_int32_t *ippp;
216 oper->m_len = sizeof(struct sctp_paramhdr) +
217 sizeof(*ippp);
218 ph = mtod(oper, struct sctp_paramhdr *);
219 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
220 ph->param_length = htons(oper->m_len);
221 ippp = (u_int32_t *)(ph + 1);
222 *ippp = htonl(0x40000001);
224 sctp_abort_an_association(inp, stcb, SCTP_FAILED_THRESHOLD, oper);
225 return (1);
227 return (0);
230 struct sctp_nets *
231 sctp_find_alternate_net(struct sctp_tcb *stcb,
232 struct sctp_nets *net)
234 /* Find and return an alternate network if possible */
235 struct sctp_nets *alt, *mnet;
236 int once;
238 if (stcb->asoc.numnets == 1) {
239 /* No others but net */
240 return (TAILQ_FIRST(&stcb->asoc.nets));
242 mnet = net;
243 once = 0;
245 if (mnet == NULL) {
246 mnet = TAILQ_FIRST(&stcb->asoc.nets);
248 do {
249 alt = TAILQ_NEXT(mnet, sctp_next);
250 if (alt == NULL) {
251 once++;
252 if (once > 1) {
253 break;
255 alt = TAILQ_FIRST(&stcb->asoc.nets);
257 if (alt->ro.ro_rt == NULL) {
258 #ifndef SCOPEDROUTING
259 struct sockaddr_in6 *sin6;
260 sin6 = (struct sockaddr_in6 *)&alt->ro._l_addr;
261 if (sin6->sin6_family == AF_INET6) {
262 #if defined(SCTP_BASE_FREEBSD) || defined(__APPLE__) || defined(__DragonFly__)
263 in6_embedscope(&sin6->sin6_addr, sin6,
264 NULL, NULL);
265 #else
266 in6_embedscope(&sin6->sin6_addr, sin6);
267 #endif
269 #endif
270 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
271 rtalloc_ign((struct route*)&alt->ro, 0UL);
272 #else
273 rtalloc((struct route*)&alt->ro);
274 #endif
275 #ifndef SCOPEDROUTING
276 if (sin6->sin6_family == AF_INET6) {
277 in6_recoverscope(sin6, &sin6->sin6_addr, NULL);
279 #endif
280 alt->src_addr_selected = 0;
282 if (
283 ((alt->dest_state & SCTP_ADDR_REACHABLE) == SCTP_ADDR_REACHABLE) &&
284 (alt->ro.ro_rt != NULL) &&
285 (!(alt->dest_state & SCTP_ADDR_UNCONFIRMED))
287 /* Found a reachable address */
288 break;
290 mnet = alt;
291 } while (alt != NULL);
293 if (alt == NULL) {
294 /* Case where NO insv network exists (dormant state) */
295 /* we rotate destinations */
296 once = 0;
297 mnet = net;
298 do {
299 alt = TAILQ_NEXT(mnet, sctp_next);
300 if (alt == NULL) {
301 once++;
302 if (once > 1) {
303 break;
305 alt = TAILQ_FIRST(&stcb->asoc.nets);
307 if ((!(alt->dest_state & SCTP_ADDR_UNCONFIRMED)) &&
308 (alt != net)) {
309 /* Found an alternate address */
310 break;
312 mnet = alt;
313 } while (alt != NULL);
315 if (alt == NULL) {
316 return (net);
318 return (alt);
321 static void
322 sctp_backoff_on_timeout(struct sctp_tcb *stcb,
323 struct sctp_nets *net,
324 int win_probe,
325 int num_marked)
327 #ifdef SCTP_DEBUG
328 int oldRTO;
330 oldRTO = net->RTO;
331 #endif /* SCTP_DEBUG */
332 net->RTO <<= 1;
333 #ifdef SCTP_DEBUG
334 if (sctp_debug_on & SCTP_DEBUG_TIMER2) {
335 kprintf("Timer doubles from %d ms -to-> %d ms\n",
336 oldRTO, net->RTO);
338 #endif /* SCTP_DEBUG */
340 if (net->RTO > stcb->asoc.maxrto) {
341 net->RTO = stcb->asoc.maxrto;
342 #ifdef SCTP_DEBUG
343 if (sctp_debug_on & SCTP_DEBUG_TIMER2) {
344 kprintf("Growth capped by maxrto %d\n",
345 net->RTO);
347 #endif /* SCTP_DEBUG */
351 if ((win_probe == 0) && num_marked) {
352 /* We don't apply penalty to window probe scenarios */
353 #ifdef SCTP_CWND_LOGGING
354 int old_cwnd=net->cwnd;
355 #endif
356 net->ssthresh = net->cwnd >> 1;
357 if (net->ssthresh < (net->mtu << 1)) {
358 net->ssthresh = (net->mtu << 1);
360 net->cwnd = net->mtu;
361 /* floor of 1 mtu */
362 if (net->cwnd < net->mtu)
363 net->cwnd = net->mtu;
364 #ifdef SCTP_CWND_LOGGING
365 sctp_log_cwnd(net, net->cwnd-old_cwnd, SCTP_CWND_LOG_FROM_RTX);
366 #endif
368 net->partial_bytes_acked = 0;
369 #ifdef SCTP_DEBUG
370 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
371 kprintf("collapse cwnd to 1MTU ssthresh to %d\n",
372 net->ssthresh);
374 #endif
380 static int
381 sctp_mark_all_for_resend(struct sctp_tcb *stcb,
382 struct sctp_nets *net,
383 struct sctp_nets *alt,
384 int *num_marked)
388 * Mark all chunks (well not all) that were sent to *net for retransmission.
389 * Move them to alt for there destination as well... We only
390 * mark chunks that have been outstanding long enough to have
391 * received feed-back.
393 struct sctp_tmit_chunk *chk, *tp2;
394 struct sctp_nets *lnets;
395 struct timeval now, min_wait, tv;
396 int cur_rto;
397 int win_probes, non_win_probes, orig_rwnd, audit_tf, num_mk, fir;
398 unsigned int cnt_mk;
399 u_int32_t orig_flight;
400 u_int32_t tsnlast, tsnfirst;
402 /* none in flight now */
403 audit_tf = 0;
404 fir=0;
405 /* figure out how long a data chunk must be pending
406 * before we can mark it ..
408 SCTP_GETTIME_TIMEVAL(&now);
409 /* get cur rto in micro-seconds */
410 cur_rto = (((net->lastsa >> 2) + net->lastsv) >> 1);
411 #ifdef SCTP_FR_LOGGING
412 sctp_log_fr(cur_rto, 0, 0, SCTP_FR_T3_MARK_TIME);
413 #endif
414 cur_rto *= 1000;
415 #ifdef SCTP_FR_LOGGING
416 sctp_log_fr(cur_rto, 0, 0, SCTP_FR_T3_MARK_TIME);
417 #endif
418 tv.tv_sec = cur_rto / 1000000;
419 tv.tv_usec = cur_rto % 1000000;
420 #ifndef __FreeBSD__
421 timersub(&now, &tv, &min_wait);
422 #else
423 min_wait = now;
424 timevalsub(&min_wait, &tv);
425 #endif
426 if (min_wait.tv_sec < 0 || min_wait.tv_usec < 0) {
428 * if we hit here, we don't
429 * have enough seconds on the clock to account
430 * for the RTO. We just let the lower seconds
431 * be the bounds and don't worry about it. This
432 * may mean we will mark a lot more than we should.
434 min_wait.tv_sec = min_wait.tv_usec = 0;
436 #ifdef SCTP_FR_LOGGING
437 sctp_log_fr(cur_rto, now.tv_sec, now.tv_usec, SCTP_FR_T3_MARK_TIME);
438 sctp_log_fr(0, min_wait.tv_sec, min_wait.tv_usec, SCTP_FR_T3_MARK_TIME);
439 #endif
440 stcb->asoc.total_flight -= net->flight_size;
441 if (stcb->asoc.total_flight < 0) {
442 audit_tf = 1;
443 stcb->asoc.total_flight = 0;
445 /* Our rwnd will be incorrect here since we are not adding
446 * back the cnt * mbuf but we will fix that down below.
448 orig_rwnd = stcb->asoc.peers_rwnd;
449 orig_flight = net->flight_size;
450 stcb->asoc.peers_rwnd += net->flight_size;
451 net->flight_size = 0;
452 net->rto_pending = 0;
453 net->fast_retran_ip= 0;
454 win_probes = non_win_probes = 0;
455 #ifdef SCTP_DEBUG
456 if (sctp_debug_on & SCTP_DEBUG_TIMER2) {
457 kprintf("Marking ALL un-acked for retransmission at t3-timeout\n");
459 #endif /* SCTP_DEBUG */
460 /* Now on to each chunk */
461 num_mk = cnt_mk = 0;
462 tsnfirst = tsnlast = 0;
463 chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
464 for (;chk != NULL; chk = tp2) {
465 tp2 = TAILQ_NEXT(chk, sctp_next);
466 if ((compare_with_wrap(stcb->asoc.last_acked_seq,
467 chk->rec.data.TSN_seq,
468 MAX_TSN)) ||
469 (stcb->asoc.last_acked_seq == chk->rec.data.TSN_seq)) {
470 /* Strange case our list got out of order? */
471 kprintf("Our list is out of order?\n");
472 TAILQ_REMOVE(&stcb->asoc.sent_queue, chk, sctp_next);
473 if (chk->data) {
474 sctp_release_pr_sctp_chunk(stcb, chk, 0xffff,
475 &stcb->asoc.sent_queue);
476 if (chk->flags & SCTP_PR_SCTP_BUFFER) {
477 stcb->asoc.sent_queue_cnt_removeable--;
480 stcb->asoc.sent_queue_cnt--;
481 sctp_free_remote_addr(chk->whoTo);
482 sctppcbinfo.ipi_count_chunk--;
483 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
484 panic("Chunk count is going negative");
486 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
487 sctppcbinfo.ipi_gencnt_chunk++;
488 continue;
490 if ((chk->whoTo == net) && (chk->sent < SCTP_DATAGRAM_ACKED)) {
491 /* found one to mark:
492 * If it is less than DATAGRAM_ACKED it MUST
493 * not be a skipped or marked TSN but instead
494 * one that is either already set for retransmission OR
495 * one that needs retransmission.
498 /* validate its been outstanding long enough */
499 #ifdef SCTP_FR_LOGGING
500 sctp_log_fr(chk->rec.data.TSN_seq,
501 chk->sent_rcv_time.tv_sec,
502 chk->sent_rcv_time.tv_usec,
503 SCTP_FR_T3_MARK_TIME);
504 #endif
505 if (chk->sent_rcv_time.tv_sec > min_wait.tv_sec) {
506 /* we have reached a chunk that was sent some
507 * seconds past our min.. forget it we will
508 * find no more to send.
510 #ifdef SCTP_FR_LOGGING
511 sctp_log_fr(0,
512 chk->sent_rcv_time.tv_sec,
513 chk->sent_rcv_time.tv_usec,
514 SCTP_FR_T3_STOPPED);
515 #endif
516 continue;
517 } else if (chk->sent_rcv_time.tv_sec == min_wait.tv_sec) {
518 /* we must look at the micro seconds to know.
520 if (chk->sent_rcv_time.tv_usec >= min_wait.tv_usec) {
521 /* ok it was sent after our boundary time. */
522 #ifdef SCTP_FR_LOGGING
523 sctp_log_fr(0,
524 chk->sent_rcv_time.tv_sec,
525 chk->sent_rcv_time.tv_usec,
526 SCTP_FR_T3_STOPPED);
527 #endif
528 continue;
531 stcb->asoc.total_flight_count--;
532 if (stcb->asoc.total_flight_count < 0) {
533 stcb->asoc.total_flight_count = 0;
535 if ((chk->flags & (SCTP_PR_SCTP_ENABLED|SCTP_PR_SCTP_BUFFER)) == SCTP_PR_SCTP_ENABLED) {
536 /* Is it expired? */
537 if ((now.tv_sec > chk->rec.data.timetodrop.tv_sec) ||
538 ((chk->rec.data.timetodrop.tv_sec == now.tv_sec) &&
539 (now.tv_usec > chk->rec.data.timetodrop.tv_usec))) {
540 /* Yes so drop it */
541 if (chk->data) {
542 sctp_release_pr_sctp_chunk(stcb,
543 chk,
544 (SCTP_RESPONSE_TO_USER_REQ|SCTP_NOTIFY_DATAGRAM_SENT),
545 &stcb->asoc.sent_queue);
548 continue;
550 if (chk->sent != SCTP_DATAGRAM_RESEND) {
551 stcb->asoc.sent_queue_retran_cnt++;
552 num_mk++;
553 if (fir == 0) {
554 fir = 1;
555 #ifdef SCTP_DEBUG
556 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
557 kprintf("First TSN marked was %x\n",
558 chk->rec.data.TSN_seq);
560 #endif
561 tsnfirst = chk->rec.data.TSN_seq;
563 tsnlast = chk->rec.data.TSN_seq;
564 #ifdef SCTP_FR_LOGGING
565 sctp_log_fr(chk->rec.data.TSN_seq, chk->snd_count,
566 0, SCTP_FR_T3_MARKED);
568 #endif
570 chk->sent = SCTP_DATAGRAM_RESEND;
571 /* reset the TSN for striking and other FR stuff */
572 chk->rec.data.doing_fast_retransmit = 0;
573 #ifdef SCTP_DEBUG
574 if (sctp_debug_on & SCTP_DEBUG_TIMER3) {
575 kprintf("mark TSN:%x for retransmission\n", chk->rec.data.TSN_seq);
577 #endif /* SCTP_DEBUG */
578 /* Clear any time so NO RTT is being done */
579 chk->do_rtt = 0;
580 /* Bump up the count */
581 if (compare_with_wrap(chk->rec.data.TSN_seq,
582 stcb->asoc.t3timeout_highest_marked,
583 MAX_TSN)) {
584 /* TSN_seq > than t3timeout so update */
585 stcb->asoc.t3timeout_highest_marked = chk->rec.data.TSN_seq;
587 if (alt != net) {
588 sctp_free_remote_addr(chk->whoTo);
589 chk->whoTo = alt;
590 alt->ref_count++;
592 if ((chk->rec.data.state_flags & SCTP_WINDOW_PROBE) !=
593 SCTP_WINDOW_PROBE) {
594 non_win_probes++;
595 } else {
596 chk->rec.data.state_flags &= ~SCTP_WINDOW_PROBE;
597 win_probes++;
600 if (chk->sent == SCTP_DATAGRAM_RESEND) {
601 cnt_mk++;
605 #ifdef SCTP_FR_LOGGING
606 sctp_log_fr(tsnfirst, tsnlast, num_mk, SCTP_FR_T3_TIMEOUT);
607 #endif
608 /* compensate for the number we marked */
609 stcb->asoc.peers_rwnd += (num_mk /* * sizeof(struct mbuf)*/);
611 #ifdef SCTP_DEBUG
612 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
613 if (num_mk) {
614 kprintf("LAST TSN marked was %x\n", tsnlast);
615 kprintf("Num marked for retransmission was %d peer-rwd:%ld\n",
616 num_mk, (u_long)stcb->asoc.peers_rwnd);
617 kprintf("LAST TSN marked was %x\n", tsnlast);
618 kprintf("Num marked for retransmission was %d peer-rwd:%d\n",
619 num_mk,
620 (int)stcb->asoc.peers_rwnd
624 #endif
625 *num_marked = num_mk;
626 if (stcb->asoc.sent_queue_retran_cnt != cnt_mk) {
627 kprintf("Local Audit says there are %d for retran asoc cnt:%d\n",
628 cnt_mk, stcb->asoc.sent_queue_retran_cnt);
629 #ifndef SCTP_AUDITING_ENABLED
630 stcb->asoc.sent_queue_retran_cnt = cnt_mk;
631 #endif
633 #ifdef SCTP_DEBUG
634 if (sctp_debug_on & SCTP_DEBUG_TIMER3) {
635 kprintf("**************************\n");
637 #endif /* SCTP_DEBUG */
639 /* Now check for a ECN Echo that may be stranded */
640 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
641 if ((chk->whoTo == net) &&
642 (chk->rec.chunk_id == SCTP_ECN_ECHO)) {
643 sctp_free_remote_addr(chk->whoTo);
644 chk->whoTo = alt;
645 if (chk->sent != SCTP_DATAGRAM_RESEND) {
646 chk->sent = SCTP_DATAGRAM_RESEND;
647 stcb->asoc.sent_queue_retran_cnt++;
649 alt->ref_count++;
652 if ((orig_rwnd == 0) && (stcb->asoc.total_flight == 0) &&
653 (orig_flight <= net->mtu)) {
655 * If the LAST packet sent was not acked and our rwnd is 0
656 * then we are in a win-probe state.
658 win_probes = 1;
659 non_win_probes = 0;
660 #ifdef SCTP_DEBUG
661 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
662 kprintf("WIN_PROBE set via o_rwnd=0 tf=0 and all:%d fit in mtu:%d\n",
663 orig_flight, net->mtu);
665 #endif
668 if (audit_tf) {
669 #ifdef SCTP_DEBUG
670 if (sctp_debug_on & SCTP_DEBUG_TIMER4) {
671 kprintf("Audit total flight due to negative value net:%p\n",
672 net);
674 #endif /* SCTP_DEBUG */
675 stcb->asoc.total_flight = 0;
676 stcb->asoc.total_flight_count = 0;
677 /* Clear all networks flight size */
678 TAILQ_FOREACH(lnets, &stcb->asoc.nets, sctp_next) {
679 lnets->flight_size = 0;
680 #ifdef SCTP_DEBUG
681 if (sctp_debug_on & SCTP_DEBUG_TIMER4) {
682 kprintf("Net:%p c-f cwnd:%d ssthresh:%d\n",
683 lnets, lnets->cwnd, lnets->ssthresh);
685 #endif /* SCTP_DEBUG */
687 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
688 if (chk->sent < SCTP_DATAGRAM_RESEND) {
689 stcb->asoc.total_flight += chk->book_size;
690 chk->whoTo->flight_size += chk->book_size;
691 stcb->asoc.total_flight_count++;
695 /* Setup the ecn nonce re-sync point. We
696 * do this since retranmissions are NOT
697 * setup for ECN. This means that do to
698 * Karn's rule, we don't know the total
699 * of the peers ecn bits.
701 chk = TAILQ_FIRST(&stcb->asoc.send_queue);
702 if (chk == NULL) {
703 stcb->asoc.nonce_resync_tsn = stcb->asoc.sending_seq;
704 } else {
705 stcb->asoc.nonce_resync_tsn = chk->rec.data.TSN_seq;
707 stcb->asoc.nonce_wait_for_ecne = 0;
708 stcb->asoc.nonce_sum_check = 0;
709 /* We return 1 if we only have a window probe outstanding */
710 if (win_probes && (non_win_probes == 0)) {
711 return (1);
713 return (0);
716 static void
717 sctp_move_all_chunks_to_alt(struct sctp_tcb *stcb,
718 struct sctp_nets *net,
719 struct sctp_nets *alt)
721 struct sctp_association *asoc;
722 struct sctp_stream_out *outs;
723 struct sctp_tmit_chunk *chk;
725 if (net == alt)
726 /* nothing to do */
727 return;
729 asoc = &stcb->asoc;
732 * now through all the streams checking for chunks sent to our
733 * bad network.
735 TAILQ_FOREACH(outs, &asoc->out_wheel, next_spoke) {
736 /* now clean up any chunks here */
737 TAILQ_FOREACH(chk, &outs->outqueue, sctp_next) {
738 if (chk->whoTo == net) {
739 sctp_free_remote_addr(chk->whoTo);
740 chk->whoTo = alt;
741 alt->ref_count++;
745 /* Now check the pending queue */
746 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
747 if (chk->whoTo == net) {
748 sctp_free_remote_addr(chk->whoTo);
749 chk->whoTo = alt;
750 alt->ref_count++;
757 sctp_t3rxt_timer(struct sctp_inpcb *inp,
758 struct sctp_tcb *stcb,
759 struct sctp_nets *net)
761 struct sctp_nets *alt;
762 int win_probe, num_mk;
765 #ifdef SCTP_FR_LOGGING
766 sctp_log_fr(0, 0, 0, SCTP_FR_T3_TIMEOUT);
767 #endif
768 /* Find an alternate and mark those for retransmission */
769 alt = sctp_find_alternate_net(stcb, net);
770 win_probe = sctp_mark_all_for_resend(stcb, net, alt, &num_mk);
772 /* FR Loss recovery just ended with the T3. */
773 stcb->asoc.fast_retran_loss_recovery = 0;
775 /* setup the sat loss recovery that prevents
776 * satellite cwnd advance.
778 stcb->asoc.sat_t3_loss_recovery = 1;
779 stcb->asoc.sat_t3_recovery_tsn = stcb->asoc.sending_seq;
781 /* Backoff the timer and cwnd */
782 sctp_backoff_on_timeout(stcb, net, win_probe, num_mk);
783 if (win_probe == 0) {
784 /* We don't do normal threshold management on window probes */
785 if (sctp_threshold_management(inp, stcb, net,
786 stcb->asoc.max_send_times)) {
787 /* Association was destroyed */
788 return (1);
789 } else {
790 if (net != stcb->asoc.primary_destination) {
791 /* send a immediate HB if our RTO is stale */
792 struct timeval now;
793 unsigned int ms_goneby;
794 SCTP_GETTIME_TIMEVAL(&now);
795 if (net->last_sent_time.tv_sec) {
796 ms_goneby = (now.tv_sec - net->last_sent_time.tv_sec) * 1000;
797 } else {
798 ms_goneby = 0;
800 if ((ms_goneby > net->RTO) || (net->RTO == 0)) {
801 /* no recent feed back in an RTO or more, request a RTT update */
802 sctp_send_hb(stcb, 1, net);
806 } else {
808 * For a window probe we don't penalize the net's but only
809 * the association. This may fail it if SACKs are not coming
810 * back. If sack's are coming with rwnd locked at 0, we will
811 * continue to hold things waiting for rwnd to raise
813 if (sctp_threshold_management(inp, stcb, NULL,
814 stcb->asoc.max_send_times)) {
815 /* Association was destroyed */
816 return (1);
819 if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) {
820 /* Move all pending over too */
821 sctp_move_all_chunks_to_alt(stcb, net, alt);
822 /* Was it our primary? */
823 if ((stcb->asoc.primary_destination == net) && (alt != net)) {
825 * Yes, note it as such and find an alternate
826 * note: this means HB code must use this to resent
827 * the primary if it goes active AND if someone does
828 * a change-primary then this flag must be cleared
829 * from any net structures.
831 if (sctp_set_primary_addr(stcb,
832 NULL,
833 alt) == 0) {
834 net->dest_state |= SCTP_ADDR_WAS_PRIMARY;
835 net->src_addr_selected = 0;
840 * Special case for cookie-echo'ed case, we don't do output
841 * but must await the COOKIE-ACK before retransmission
843 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) {
845 * Here we just reset the timer and start again since we
846 * have not established the asoc
848 #ifdef SCTP_DEBUG
849 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
850 kprintf("Special cookie case return\n");
852 #endif /* SCTP_DEBUG */
853 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
854 return (0);
856 if (stcb->asoc.peer_supports_prsctp) {
857 struct sctp_tmit_chunk *lchk;
858 lchk = sctp_try_advance_peer_ack_point(stcb, &stcb->asoc);
859 /* C3. See if we need to send a Fwd-TSN */
860 if (compare_with_wrap(stcb->asoc.advanced_peer_ack_point,
861 stcb->asoc.last_acked_seq, MAX_TSN)) {
863 * ISSUE with ECN, see FWD-TSN processing for notes
864 * on issues that will occur when the ECN NONCE stuff
865 * is put into SCTP for cross checking.
867 #ifdef SCTP_DEBUG
868 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
869 kprintf("Forward TSN time\n");
871 #endif /* SCTP_DEBUG */
872 send_forward_tsn(stcb, &stcb->asoc);
873 if (lchk) {
874 /* Assure a timer is up */
875 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, lchk->whoTo);
879 return (0);
883 sctp_t1init_timer(struct sctp_inpcb *inp,
884 struct sctp_tcb *stcb,
885 struct sctp_nets *net)
887 /* bump the thresholds */
888 if (stcb->asoc.delayed_connection) {
889 /* special hook for delayed connection. The
890 * library did NOT complete the rest of its
891 * sends.
893 stcb->asoc.delayed_connection = 0;
894 sctp_send_initiate(inp, stcb);
895 return (0);
897 if (sctp_threshold_management(inp, stcb, net,
898 stcb->asoc.max_init_times)) {
899 /* Association was destroyed */
900 return (1);
902 stcb->asoc.dropped_special_cnt = 0;
903 sctp_backoff_on_timeout(stcb, stcb->asoc.primary_destination, 1, 0);
904 if (stcb->asoc.initial_init_rto_max < net->RTO) {
905 net->RTO = stcb->asoc.initial_init_rto_max;
907 if (stcb->asoc.numnets > 1) {
908 /* If we have more than one addr use it */
909 struct sctp_nets *alt;
910 alt = sctp_find_alternate_net(stcb, stcb->asoc.primary_destination);
911 if ((alt != NULL) && (alt != stcb->asoc.primary_destination)) {
912 sctp_move_all_chunks_to_alt(stcb, stcb->asoc.primary_destination, alt);
913 stcb->asoc.primary_destination = alt;
916 /* Send out a new init */
917 sctp_send_initiate(inp, stcb);
918 return (0);
922 * For cookie and asconf we actually need to find and mark for resend,
923 * then increment the resend counter (after all the threshold management
924 * stuff of course).
927 sctp_cookie_timer(struct sctp_inpcb *inp,
928 struct sctp_tcb *stcb,
929 struct sctp_nets *net)
931 struct sctp_nets *alt;
932 struct sctp_tmit_chunk *cookie;
933 /* first before all else we must find the cookie */
934 TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue, sctp_next) {
935 if (cookie->rec.chunk_id == SCTP_COOKIE_ECHO) {
936 break;
939 if (cookie == NULL) {
940 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) {
941 /* FOOBAR! */
942 struct mbuf *oper;
943 MGET(oper, MB_DONTWAIT, MT_DATA);
944 if (oper) {
945 struct sctp_paramhdr *ph;
946 u_int32_t *ippp;
948 oper->m_len = sizeof(struct sctp_paramhdr) +
949 sizeof(*ippp);
950 ph = mtod(oper, struct sctp_paramhdr *);
951 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
952 ph->param_length = htons(oper->m_len);
953 ippp = (u_int32_t *)(ph + 1);
954 *ippp = htonl(0x40000002);
956 sctp_abort_an_association(inp, stcb, SCTP_INTERNAL_ERROR,
957 oper);
959 return (1);
961 /* Ok we found the cookie, threshold management next */
962 if (sctp_threshold_management(inp, stcb, cookie->whoTo,
963 stcb->asoc.max_init_times)) {
964 /* Assoc is over */
965 return (1);
968 * cleared theshold management now lets backoff the address &
969 * select an alternate
971 stcb->asoc.dropped_special_cnt = 0;
972 sctp_backoff_on_timeout(stcb, cookie->whoTo, 1, 0);
973 alt = sctp_find_alternate_net(stcb, cookie->whoTo);
974 if (alt != cookie->whoTo) {
975 sctp_free_remote_addr(cookie->whoTo);
976 cookie->whoTo = alt;
977 alt->ref_count++;
979 /* Now mark the retran info */
980 if (cookie->sent != SCTP_DATAGRAM_RESEND) {
981 stcb->asoc.sent_queue_retran_cnt++;
983 cookie->sent = SCTP_DATAGRAM_RESEND;
985 * Now call the output routine to kick out the cookie again, Note we
986 * don't mark any chunks for retran so that FR will need to kick in
987 * to move these (or a send timer).
989 return (0);
993 sctp_strreset_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
994 struct sctp_nets *net)
996 struct sctp_nets *alt;
997 struct sctp_tmit_chunk *strrst, *chk;
998 struct sctp_stream_reset_req *strreq;
999 /* find the existing STRRESET */
1000 TAILQ_FOREACH(strrst, &stcb->asoc.control_send_queue,
1001 sctp_next) {
1002 if (strrst->rec.chunk_id == SCTP_STREAM_RESET) {
1003 /* is it what we want */
1004 strreq = mtod(strrst->data, struct sctp_stream_reset_req *);
1005 if (strreq->sr_req.ph.param_type == ntohs(SCTP_STR_RESET_REQUEST)) {
1006 break;
1010 if (strrst == NULL) {
1011 #ifdef SCTP_DEBUG
1012 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1013 kprintf("Strange, strreset timer fires, but I can't find an str-reset?\n");
1015 #endif /* SCTP_DEBUG */
1016 return (0);
1018 /* do threshold management */
1019 if (sctp_threshold_management(inp, stcb, strrst->whoTo,
1020 stcb->asoc.max_send_times)) {
1021 /* Assoc is over */
1022 return (1);
1026 * cleared theshold management
1027 * now lets backoff the address & select an alternate
1029 sctp_backoff_on_timeout(stcb, strrst->whoTo, 1, 0);
1030 alt = sctp_find_alternate_net(stcb, strrst->whoTo);
1031 sctp_free_remote_addr(strrst->whoTo);
1032 strrst->whoTo = alt;
1033 alt->ref_count++;
1035 /* See if a ECN Echo is also stranded */
1036 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
1037 if ((chk->whoTo == net) &&
1038 (chk->rec.chunk_id == SCTP_ECN_ECHO)) {
1039 sctp_free_remote_addr(chk->whoTo);
1040 if (chk->sent != SCTP_DATAGRAM_RESEND) {
1041 chk->sent = SCTP_DATAGRAM_RESEND;
1042 stcb->asoc.sent_queue_retran_cnt++;
1044 chk->whoTo = alt;
1045 alt->ref_count++;
1048 if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) {
1050 * If the address went un-reachable, we need to move
1051 * to alternates for ALL chk's in queue
1053 sctp_move_all_chunks_to_alt(stcb, net, alt);
1055 /* mark the retran info */
1056 if (strrst->sent != SCTP_DATAGRAM_RESEND)
1057 stcb->asoc.sent_queue_retran_cnt++;
1058 strrst->sent = SCTP_DATAGRAM_RESEND;
1060 /* restart the timer */
1061 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, inp, stcb, strrst->whoTo);
1062 return (0);
1066 sctp_asconf_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1067 struct sctp_nets *net)
1069 struct sctp_nets *alt;
1070 struct sctp_tmit_chunk *asconf, *chk;
1072 /* is this the first send, or a retransmission? */
1073 if (stcb->asoc.asconf_sent == 0) {
1074 /* compose a new ASCONF chunk and send it */
1075 sctp_send_asconf(stcb, net);
1076 } else {
1077 /* Retransmission of the existing ASCONF needed... */
1079 /* find the existing ASCONF */
1080 TAILQ_FOREACH(asconf, &stcb->asoc.control_send_queue,
1081 sctp_next) {
1082 if (asconf->rec.chunk_id == SCTP_ASCONF) {
1083 break;
1086 if (asconf == NULL) {
1087 #ifdef SCTP_DEBUG
1088 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1089 kprintf("Strange, asconf timer fires, but I can't find an asconf?\n");
1091 #endif /* SCTP_DEBUG */
1092 return (0);
1094 /* do threshold management */
1095 if (sctp_threshold_management(inp, stcb, asconf->whoTo,
1096 stcb->asoc.max_send_times)) {
1097 /* Assoc is over */
1098 return (1);
1101 /* PETER? FIX? How will the following code ever run? If
1102 * the max_send_times is hit, threshold managment will
1103 * blow away the association?
1105 if (asconf->snd_count > stcb->asoc.max_send_times) {
1107 * Something is rotten, peer is not responding to
1108 * ASCONFs but maybe is to data etc. e.g. it is not
1109 * properly handling the chunk type upper bits
1110 * Mark this peer as ASCONF incapable and cleanup
1112 #ifdef SCTP_DEBUG
1113 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1114 kprintf("asconf_timer: Peer has not responded to our repeated ASCONFs\n");
1116 #endif /* SCTP_DEBUG */
1117 sctp_asconf_cleanup(stcb, net);
1118 return (0);
1121 * cleared theshold management
1122 * now lets backoff the address & select an alternate
1124 sctp_backoff_on_timeout(stcb, asconf->whoTo, 1, 0);
1125 alt = sctp_find_alternate_net(stcb, asconf->whoTo);
1126 sctp_free_remote_addr(asconf->whoTo);
1127 asconf->whoTo = alt;
1128 alt->ref_count++;
1130 /* See if a ECN Echo is also stranded */
1131 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
1132 if ((chk->whoTo == net) &&
1133 (chk->rec.chunk_id == SCTP_ECN_ECHO)) {
1134 sctp_free_remote_addr(chk->whoTo);
1135 chk->whoTo = alt;
1136 if (chk->sent != SCTP_DATAGRAM_RESEND) {
1137 chk->sent = SCTP_DATAGRAM_RESEND;
1138 stcb->asoc.sent_queue_retran_cnt++;
1140 alt->ref_count++;
1144 if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) {
1146 * If the address went un-reachable, we need to move
1147 * to alternates for ALL chk's in queue
1149 sctp_move_all_chunks_to_alt(stcb, net, alt);
1151 /* mark the retran info */
1152 if (asconf->sent != SCTP_DATAGRAM_RESEND)
1153 stcb->asoc.sent_queue_retran_cnt++;
1154 asconf->sent = SCTP_DATAGRAM_RESEND;
1156 return (0);
1160 * For the shutdown and shutdown-ack, we do not keep one around on the
1161 * control queue. This means we must generate a new one and call the general
1162 * chunk output routine, AFTER having done threshold
1163 * management.
1166 sctp_shutdown_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1167 struct sctp_nets *net)
1169 struct sctp_nets *alt;
1170 /* first threshold managment */
1171 if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) {
1172 /* Assoc is over */
1173 return (1);
1175 /* second select an alternative */
1176 alt = sctp_find_alternate_net(stcb, net);
1178 /* third generate a shutdown into the queue for out net */
1179 #ifdef SCTP_DEBUG
1180 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
1181 kprintf("%s:%d sends a shutdown\n",
1182 __FILE__,
1183 __LINE__
1186 #endif
1187 if (alt) {
1188 sctp_send_shutdown(stcb, alt);
1189 } else {
1190 /* if alt is NULL, there is no dest
1191 * to send to??
1193 return (0);
1195 /* fourth restart timer */
1196 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, inp, stcb, alt);
1197 return (0);
1201 sctp_shutdownack_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1202 struct sctp_nets *net)
1204 struct sctp_nets *alt;
1205 /* first threshold managment */
1206 if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) {
1207 /* Assoc is over */
1208 return (1);
1210 /* second select an alternative */
1211 alt = sctp_find_alternate_net(stcb, net);
1213 /* third generate a shutdown into the queue for out net */
1214 sctp_send_shutdown_ack(stcb, alt);
1216 /* fourth restart timer */
1217 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, inp, stcb, alt);
1218 return (0);
1221 static void
1222 sctp_audit_stream_queues_for_size(struct sctp_inpcb *inp,
1223 struct sctp_tcb *stcb)
1225 struct sctp_stream_out *outs;
1226 struct sctp_tmit_chunk *chk;
1227 unsigned int chks_in_queue=0;
1229 if ((stcb == NULL) || (inp == NULL))
1230 return;
1231 if (TAILQ_EMPTY(&stcb->asoc.out_wheel)) {
1232 kprintf("Strange, out_wheel empty nothing on sent/send and tot=%lu?\n",
1233 (u_long)stcb->asoc.total_output_queue_size);
1234 stcb->asoc.total_output_queue_size = 0;
1235 return;
1237 if (stcb->asoc.sent_queue_retran_cnt) {
1238 kprintf("Hmm, sent_queue_retran_cnt is non-zero %d\n",
1239 stcb->asoc.sent_queue_retran_cnt);
1240 stcb->asoc.sent_queue_retran_cnt = 0;
1242 /* Check to see if some data queued, if so report it */
1243 TAILQ_FOREACH(outs, &stcb->asoc.out_wheel, next_spoke) {
1244 if (!TAILQ_EMPTY(&outs->outqueue)) {
1245 TAILQ_FOREACH(chk, &outs->outqueue, sctp_next) {
1246 chks_in_queue++;
1250 if (chks_in_queue != stcb->asoc.stream_queue_cnt) {
1251 kprintf("Hmm, stream queue cnt at %d I counted %d in stream out wheel\n",
1252 stcb->asoc.stream_queue_cnt, chks_in_queue);
1254 if (chks_in_queue) {
1255 /* call the output queue function */
1256 sctp_chunk_output(inp, stcb, 1);
1257 if ((TAILQ_EMPTY(&stcb->asoc.send_queue)) &&
1258 (TAILQ_EMPTY(&stcb->asoc.sent_queue))) {
1259 /* Probably should go in and make it go back through and add fragments allowed */
1260 kprintf("Still nothing moved %d chunks are stuck\n", chks_in_queue);
1262 } else {
1263 kprintf("Found no chunks on any queue tot:%lu\n",
1264 (u_long)stcb->asoc.total_output_queue_size);
1265 stcb->asoc.total_output_queue_size = 0;
1270 sctp_heartbeat_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1271 struct sctp_nets *net)
1273 int cnt_of_unconf=0;
1275 if (net) {
1276 if (net->hb_responded == 0) {
1277 sctp_backoff_on_timeout(stcb, net, 1, 0);
1279 /* Zero PBA, if it needs it */
1280 if (net->partial_bytes_acked) {
1281 net->partial_bytes_acked = 0;
1284 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
1285 if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1286 (net->dest_state & SCTP_ADDR_REACHABLE)) {
1287 cnt_of_unconf++;
1290 if ((stcb->asoc.total_output_queue_size > 0) &&
1291 (TAILQ_EMPTY(&stcb->asoc.send_queue)) &&
1292 (TAILQ_EMPTY(&stcb->asoc.sent_queue))) {
1293 sctp_audit_stream_queues_for_size(inp, stcb);
1295 /* Send a new HB, this will do threshold managment, pick a new dest */
1296 if (sctp_send_hb(stcb, 0, NULL) < 0) {
1297 return (1);
1299 if (cnt_of_unconf > 1) {
1301 * this will send out extra hb's up to maxburst if
1302 * there are any unconfirmed addresses.
1304 int cnt_sent = 1;
1305 while ((cnt_sent < stcb->asoc.max_burst) && (cnt_of_unconf > 1)) {
1306 if (sctp_send_hb(stcb, 0, NULL) == 0)
1307 break;
1308 cnt_of_unconf--;
1309 cnt_sent++;
1312 return (0);
1315 #define SCTP_NUMBER_OF_MTU_SIZES 18
1316 static u_int32_t mtu_sizes[]={
1318 296,
1319 508,
1320 512,
1321 544,
1322 576,
1323 1006,
1324 1492,
1325 1500,
1326 1536,
1327 2002,
1328 2048,
1329 4352,
1330 4464,
1331 8166,
1332 17914,
1333 32000,
1334 65535
1338 static u_int32_t
1339 sctp_getnext_mtu(struct sctp_inpcb *inp, u_int32_t cur_mtu)
1341 /* select another MTU that is just bigger than this one */
1342 int i;
1344 for (i = 0; i < SCTP_NUMBER_OF_MTU_SIZES; i++) {
1345 if (cur_mtu < mtu_sizes[i]) {
1346 /* no max_mtu is bigger than this one */
1347 return (mtu_sizes[i]);
1350 /* here return the highest allowable */
1351 return (cur_mtu);
1355 void
1356 sctp_pathmtu_timer(struct sctp_inpcb *inp,
1357 struct sctp_tcb *stcb,
1358 struct sctp_nets *net)
1360 u_int32_t next_mtu;
1362 /* restart the timer in any case */
1363 next_mtu = sctp_getnext_mtu(inp, net->mtu);
1364 if (next_mtu <= net->mtu) {
1365 /* nothing to do */
1366 return;
1368 if (net->ro.ro_rt != NULL) {
1369 /* only if we have a route and interface do we
1370 * set anything. Note we always restart
1371 * the timer though just in case it is updated
1372 * (i.e. the ifp) or route/ifp is populated.
1374 if (net->ro.ro_rt->rt_ifp != NULL) {
1375 if (net->ro.ro_rt->rt_ifp->if_mtu > next_mtu) {
1376 /* ok it will fit out the door */
1377 net->mtu = next_mtu;
1381 /* restart the timer */
1382 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net);
1385 void
1386 sctp_autoclose_timer(struct sctp_inpcb *inp,
1387 struct sctp_tcb *stcb,
1388 struct sctp_nets *net)
1390 struct timeval tn, *tim_touse;
1391 struct sctp_association *asoc;
1392 int ticks_gone_by;
1394 SCTP_GETTIME_TIMEVAL(&tn);
1395 if (stcb->asoc.sctp_autoclose_ticks &&
1396 (inp->sctp_flags & SCTP_PCB_FLAGS_AUTOCLOSE)) {
1397 /* Auto close is on */
1398 asoc = &stcb->asoc;
1399 /* pick the time to use */
1400 if (asoc->time_last_rcvd.tv_sec >
1401 asoc->time_last_sent.tv_sec) {
1402 tim_touse = &asoc->time_last_rcvd;
1403 } else {
1404 tim_touse = &asoc->time_last_sent;
1406 /* Now has long enough transpired to autoclose? */
1407 ticks_gone_by = ((tn.tv_sec - tim_touse->tv_sec) * hz);
1408 if ((ticks_gone_by > 0) &&
1409 (ticks_gone_by >= (int)asoc->sctp_autoclose_ticks)) {
1411 * autoclose time has hit, call the output routine,
1412 * which should do nothing just to be SURE we don't
1413 * have hanging data. We can then safely check the
1414 * queues and know that we are clear to send shutdown
1416 sctp_chunk_output(inp, stcb, 9);
1417 /* Are we clean? */
1418 if (TAILQ_EMPTY(&asoc->send_queue) &&
1419 TAILQ_EMPTY(&asoc->sent_queue)) {
1421 * there is nothing queued to send,
1422 * so I'm done...
1424 if (SCTP_GET_STATE(asoc) !=
1425 SCTP_STATE_SHUTDOWN_SENT) {
1426 /* only send SHUTDOWN 1st time thru */
1427 #ifdef SCTP_DEBUG
1428 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
1429 kprintf("%s:%d sends a shutdown\n",
1430 __FILE__,
1431 __LINE__
1434 #endif
1435 sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
1436 asoc->state = SCTP_STATE_SHUTDOWN_SENT;
1437 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
1438 stcb->sctp_ep, stcb,
1439 asoc->primary_destination);
1440 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1441 stcb->sctp_ep, stcb,
1442 asoc->primary_destination);
1445 } else {
1447 * No auto close at this time, reset t-o to
1448 * check later
1450 int tmp;
1451 /* fool the timer startup to use the time left */
1452 tmp = asoc->sctp_autoclose_ticks;
1453 asoc->sctp_autoclose_ticks -= ticks_gone_by;
1454 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb,
1455 net);
1456 /* restore the real tick value */
1457 asoc->sctp_autoclose_ticks = tmp;
1462 void
1463 sctp_iterator_timer(struct sctp_iterator *it)
1465 int cnt= 0;
1466 /* only one iterator can run at a
1467 * time. This is the only way we
1468 * can cleanly pull ep's from underneath
1469 * all the running interators when a
1470 * ep is freed.
1472 SCTP_ITERATOR_LOCK();
1473 if (it->inp == NULL) {
1474 /* iterator is complete */
1475 done_with_iterator:
1476 SCTP_ITERATOR_UNLOCK();
1477 SCTP_INP_INFO_WLOCK();
1478 LIST_REMOVE(it, sctp_nxt_itr);
1479 /* stopping the callout is not needed, in theory,
1480 * but I am paranoid.
1482 SCTP_INP_INFO_WUNLOCK();
1483 callout_stop(&it->tmr.timer);
1484 if (it->function_atend != NULL) {
1485 (*it->function_atend)(it->pointer, it->val);
1487 kfree(it, M_PCB);
1488 return;
1490 select_a_new_ep:
1491 SCTP_INP_WLOCK(it->inp);
1492 while ((it->pcb_flags) && ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) {
1493 /* we do not like this ep */
1494 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1495 SCTP_INP_WUNLOCK(it->inp);
1496 goto done_with_iterator;
1498 SCTP_INP_WUNLOCK(it->inp);
1499 it->inp = LIST_NEXT(it->inp, sctp_list);
1500 if (it->inp == NULL) {
1501 goto done_with_iterator;
1503 SCTP_INP_WLOCK(it->inp);
1505 if ((it->inp->inp_starting_point_for_iterator != NULL) &&
1506 (it->inp->inp_starting_point_for_iterator != it)) {
1507 kprintf("Iterator collision, we must wait for other iterator at %p\n",
1508 it->inp);
1509 SCTP_INP_WUNLOCK(it->inp);
1510 goto start_timer_return;
1512 /* now we do the actual write to this guy */
1513 it->inp->inp_starting_point_for_iterator = it;
1514 SCTP_INP_WUNLOCK(it->inp);
1515 SCTP_INP_RLOCK(it->inp);
1516 /* if we reach here we found a inp acceptable, now through each
1517 * one that has the association in the right state
1519 if (it->stcb == NULL) {
1520 it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1522 if (it->stcb->asoc.stcb_starting_point_for_iterator == it) {
1523 it->stcb->asoc.stcb_starting_point_for_iterator = NULL;
1525 while (it->stcb) {
1526 SCTP_TCB_LOCK(it->stcb);
1527 if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1528 SCTP_TCB_UNLOCK(it->stcb);
1529 it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1530 continue;
1532 cnt++;
1533 /* run function on this one */
1534 SCTP_INP_RUNLOCK(it->inp);
1535 (*it->function_toapply)(it->inp, it->stcb, it->pointer, it->val);
1536 sctp_chunk_output(it->inp, it->stcb, 1);
1537 SCTP_TCB_UNLOCK(it->stcb);
1538 /* see if we have limited out */
1539 if (cnt > SCTP_MAX_ITERATOR_AT_ONCE) {
1540 it->stcb->asoc.stcb_starting_point_for_iterator = it;
1541 start_timer_return:
1542 SCTP_ITERATOR_UNLOCK();
1543 sctp_timer_start(SCTP_TIMER_TYPE_ITERATOR, (struct sctp_inpcb *)it, NULL, NULL);
1544 return;
1546 SCTP_INP_RLOCK(it->inp);
1547 it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1549 /* if we reach here, we ran out of stcb's in the inp we are looking at */
1550 SCTP_INP_RUNLOCK(it->inp);
1551 SCTP_INP_WLOCK(it->inp);
1552 it->inp->inp_starting_point_for_iterator = NULL;
1553 SCTP_INP_WUNLOCK(it->inp);
1554 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1555 it->inp = NULL;
1556 } else {
1557 SCTP_INP_INFO_RLOCK();
1558 it->inp = LIST_NEXT(it->inp, sctp_list);
1559 SCTP_INP_INFO_RUNLOCK();
1561 if (it->inp == NULL) {
1562 goto done_with_iterator;
1564 goto select_a_new_ep;