HAMMER 61E/Many: Stabilization, Performance
[dragonfly.git] / sys / netinet / sctp_timer.c
blob5f9f7ea1a27574b0c5a62716ccead1698fe332a9
1 /* $KAME: sctp_timer.c,v 1.28 2004/08/17 04:06:20 itojun Exp $ */
2 /* $DragonFly: src/sys/netinet/sctp_timer.c,v 1.6 2006/12/22 23:57:52 swildner Exp $ */
4 /*
5 * Copyright (C) 2002, 2003, 2004 Cisco Systems Inc,
6 * All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the project nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
32 #if !(defined(__OpenBSD__) || defined(__APPLE__))
33 #include "opt_ipsec.h"
34 #endif
35 #if defined(__FreeBSD__) || defined(__DragonFly__)
36 #include "opt_compat.h"
37 #include "opt_inet6.h"
38 #include "opt_inet.h"
39 #endif
40 #if defined(__NetBSD__)
41 #include "opt_inet.h"
42 #endif
43 #ifdef __APPLE__
44 #include <sctp.h>
45 #elif !defined(__OpenBSD__)
46 #include "opt_sctp.h"
47 #endif
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/malloc.h>
52 #include <sys/mbuf.h>
53 #ifndef __OpenBSD__
54 #include <sys/domain.h>
55 #endif
56 #include <sys/protosw.h>
57 #include <sys/socket.h>
58 #include <sys/socketvar.h>
59 #include <sys/proc.h>
60 #include <sys/kernel.h>
61 #include <sys/sysctl.h>
62 #ifdef INET6
63 #include <sys/domain.h>
64 #endif
66 #if (defined(__FreeBSD__) && __FreeBSD_version >= 500000)
67 #include <sys/limits.h>
68 #else
69 #include <machine/limits.h>
70 #endif
72 #include <net/if.h>
73 #include <net/if_types.h>
74 #include <net/route.h>
75 #include <netinet/in.h>
76 #include <netinet/in_systm.h>
77 #define _IP_VHL
78 #include <netinet/ip.h>
79 #include <netinet/in_pcb.h>
80 #include <netinet/in_var.h>
81 #include <netinet/ip_var.h>
83 #ifdef INET6
84 #include <netinet/ip6.h>
85 #include <netinet6/ip6_var.h>
86 #endif /* INET6 */
88 #include <netinet/sctp_pcb.h>
90 #ifdef IPSEC
91 #ifndef __OpenBSD__
92 #include <netinet6/ipsec.h>
93 #include <netproto/key/key.h>
94 #else
95 #undef IPSEC
96 #endif
97 #endif /* IPSEC */
98 #ifdef INET6
99 #include <netinet6/sctp6_var.h>
100 #endif
101 #include <netinet/sctp_var.h>
102 #include <netinet/sctp_timer.h>
103 #include <netinet/sctputil.h>
104 #include <netinet/sctp_output.h>
105 #include <netinet/sctp_hashdriver.h>
106 #include <netinet/sctp_header.h>
107 #include <netinet/sctp_indata.h>
108 #include <netinet/sctp_asconf.h>
110 #include <netinet/sctp.h>
111 #include <netinet/sctp_uio.h>
113 #include <net/net_osdep.h>
115 #ifdef SCTP_DEBUG
116 extern u_int32_t sctp_debug_on;
117 #endif /* SCTP_DEBUG */
119 void
120 sctp_audit_retranmission_queue(struct sctp_association *asoc)
122 struct sctp_tmit_chunk *chk;
124 #ifdef SCTP_DEBUG
125 if (sctp_debug_on & SCTP_DEBUG_TIMER4) {
126 kprintf("Audit invoked on send queue cnt:%d onqueue:%d\n",
127 asoc->sent_queue_retran_cnt,
128 asoc->sent_queue_cnt);
130 #endif /* SCTP_DEBUG */
131 asoc->sent_queue_retran_cnt = 0;
132 asoc->sent_queue_cnt = 0;
133 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
134 if (chk->sent == SCTP_DATAGRAM_RESEND) {
135 asoc->sent_queue_retran_cnt++;
137 asoc->sent_queue_cnt++;
139 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
140 if (chk->sent == SCTP_DATAGRAM_RESEND) {
141 asoc->sent_queue_retran_cnt++;
144 #ifdef SCTP_DEBUG
145 if (sctp_debug_on & SCTP_DEBUG_TIMER4) {
146 kprintf("Audit completes retran:%d onqueue:%d\n",
147 asoc->sent_queue_retran_cnt,
148 asoc->sent_queue_cnt);
150 #endif /* SCTP_DEBUG */
154 sctp_threshold_management(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
155 struct sctp_nets *net, uint16_t threshold)
157 if (net) {
158 net->error_count++;
159 #ifdef SCTP_DEBUG
160 if (sctp_debug_on & SCTP_DEBUG_TIMER4) {
161 kprintf("Error count for %p now %d thresh:%d\n",
162 net, net->error_count,
163 net->failure_threshold);
165 #endif /* SCTP_DEBUG */
166 if (net->error_count >= net->failure_threshold) {
167 /* We had a threshold failure */
168 if (net->dest_state & SCTP_ADDR_REACHABLE) {
169 net->dest_state &= ~SCTP_ADDR_REACHABLE;
170 net->dest_state |= SCTP_ADDR_NOT_REACHABLE;
171 if (net == stcb->asoc.primary_destination) {
172 net->dest_state |= SCTP_ADDR_WAS_PRIMARY;
174 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
175 stcb,
176 SCTP_FAILED_THRESHOLD,
177 (void *)net);
180 /*********HOLD THIS COMMENT FOR PATCH OF ALTERNATE
181 *********ROUTING CODE
183 /*********HOLD THIS COMMENT FOR END OF PATCH OF ALTERNATE
184 *********ROUTING CODE
187 if (stcb == NULL)
188 return (0);
190 if (net) {
191 if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0) {
192 stcb->asoc.overall_error_count++;
194 } else {
195 stcb->asoc.overall_error_count++;
197 #ifdef SCTP_DEBUG
198 if (sctp_debug_on & SCTP_DEBUG_TIMER4) {
199 kprintf("Overall error count for %p now %d thresh:%u state:%x\n",
200 &stcb->asoc,
201 stcb->asoc.overall_error_count,
202 (u_int)threshold,
203 ((net == NULL) ? (u_int)0 : (u_int)net->dest_state));
205 #endif /* SCTP_DEBUG */
206 /* We specifically do not do >= to give the assoc one more
207 * change before we fail it.
209 if (stcb->asoc.overall_error_count > threshold) {
210 /* Abort notification sends a ULP notify */
211 struct mbuf *oper;
212 MGET(oper, MB_DONTWAIT, MT_DATA);
213 if (oper) {
214 struct sctp_paramhdr *ph;
215 u_int32_t *ippp;
217 oper->m_len = sizeof(struct sctp_paramhdr) +
218 sizeof(*ippp);
219 ph = mtod(oper, struct sctp_paramhdr *);
220 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
221 ph->param_length = htons(oper->m_len);
222 ippp = (u_int32_t *)(ph + 1);
223 *ippp = htonl(0x40000001);
225 sctp_abort_an_association(inp, stcb, SCTP_FAILED_THRESHOLD, oper);
226 return (1);
228 return (0);
231 struct sctp_nets *
232 sctp_find_alternate_net(struct sctp_tcb *stcb,
233 struct sctp_nets *net)
235 /* Find and return an alternate network if possible */
236 struct sctp_nets *alt, *mnet;
237 int once;
239 if (stcb->asoc.numnets == 1) {
240 /* No others but net */
241 return (TAILQ_FIRST(&stcb->asoc.nets));
243 mnet = net;
244 once = 0;
246 if (mnet == NULL) {
247 mnet = TAILQ_FIRST(&stcb->asoc.nets);
249 do {
250 alt = TAILQ_NEXT(mnet, sctp_next);
251 if (alt == NULL) {
252 once++;
253 if (once > 1) {
254 break;
256 alt = TAILQ_FIRST(&stcb->asoc.nets);
258 if (alt->ro.ro_rt == NULL) {
259 #ifndef SCOPEDROUTING
260 struct sockaddr_in6 *sin6;
261 sin6 = (struct sockaddr_in6 *)&alt->ro._l_addr;
262 if (sin6->sin6_family == AF_INET6) {
263 #if defined(SCTP_BASE_FREEBSD) || defined(__APPLE__) || defined(__DragonFly__)
264 in6_embedscope(&sin6->sin6_addr, sin6,
265 NULL, NULL);
266 #else
267 in6_embedscope(&sin6->sin6_addr, sin6);
268 #endif
270 #endif
271 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
272 rtalloc_ign((struct route*)&alt->ro, 0UL);
273 #else
274 rtalloc((struct route*)&alt->ro);
275 #endif
276 #ifndef SCOPEDROUTING
277 if (sin6->sin6_family == AF_INET6) {
278 in6_recoverscope(sin6, &sin6->sin6_addr, NULL);
280 #endif
281 alt->src_addr_selected = 0;
283 if (
284 ((alt->dest_state & SCTP_ADDR_REACHABLE) == SCTP_ADDR_REACHABLE) &&
285 (alt->ro.ro_rt != NULL) &&
286 (!(alt->dest_state & SCTP_ADDR_UNCONFIRMED))
288 /* Found a reachable address */
289 break;
291 mnet = alt;
292 } while (alt != NULL);
294 if (alt == NULL) {
295 /* Case where NO insv network exists (dormant state) */
296 /* we rotate destinations */
297 once = 0;
298 mnet = net;
299 do {
300 alt = TAILQ_NEXT(mnet, sctp_next);
301 if (alt == NULL) {
302 once++;
303 if (once > 1) {
304 break;
306 alt = TAILQ_FIRST(&stcb->asoc.nets);
308 if ((!(alt->dest_state & SCTP_ADDR_UNCONFIRMED)) &&
309 (alt != net)) {
310 /* Found an alternate address */
311 break;
313 mnet = alt;
314 } while (alt != NULL);
316 if (alt == NULL) {
317 return (net);
319 return (alt);
322 static void
323 sctp_backoff_on_timeout(struct sctp_tcb *stcb,
324 struct sctp_nets *net,
325 int win_probe,
326 int num_marked)
328 #ifdef SCTP_DEBUG
329 int oldRTO;
331 oldRTO = net->RTO;
332 #endif /* SCTP_DEBUG */
333 net->RTO <<= 1;
334 #ifdef SCTP_DEBUG
335 if (sctp_debug_on & SCTP_DEBUG_TIMER2) {
336 kprintf("Timer doubles from %d ms -to-> %d ms\n",
337 oldRTO, net->RTO);
339 #endif /* SCTP_DEBUG */
341 if (net->RTO > stcb->asoc.maxrto) {
342 net->RTO = stcb->asoc.maxrto;
343 #ifdef SCTP_DEBUG
344 if (sctp_debug_on & SCTP_DEBUG_TIMER2) {
345 kprintf("Growth capped by maxrto %d\n",
346 net->RTO);
348 #endif /* SCTP_DEBUG */
352 if ((win_probe == 0) && num_marked) {
353 /* We don't apply penalty to window probe scenarios */
354 #ifdef SCTP_CWND_LOGGING
355 int old_cwnd=net->cwnd;
356 #endif
357 net->ssthresh = net->cwnd >> 1;
358 if (net->ssthresh < (net->mtu << 1)) {
359 net->ssthresh = (net->mtu << 1);
361 net->cwnd = net->mtu;
362 /* floor of 1 mtu */
363 if (net->cwnd < net->mtu)
364 net->cwnd = net->mtu;
365 #ifdef SCTP_CWND_LOGGING
366 sctp_log_cwnd(net, net->cwnd-old_cwnd, SCTP_CWND_LOG_FROM_RTX);
367 #endif
369 net->partial_bytes_acked = 0;
370 #ifdef SCTP_DEBUG
371 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
372 kprintf("collapse cwnd to 1MTU ssthresh to %d\n",
373 net->ssthresh);
375 #endif
381 static int
382 sctp_mark_all_for_resend(struct sctp_tcb *stcb,
383 struct sctp_nets *net,
384 struct sctp_nets *alt,
385 int *num_marked)
389 * Mark all chunks (well not all) that were sent to *net for retransmission.
390 * Move them to alt for there destination as well... We only
391 * mark chunks that have been outstanding long enough to have
392 * received feed-back.
394 struct sctp_tmit_chunk *chk, *tp2;
395 struct sctp_nets *lnets;
396 struct timeval now, min_wait, tv;
397 int cur_rto;
398 int win_probes, non_win_probes, orig_rwnd, audit_tf, num_mk, fir;
399 unsigned int cnt_mk;
400 u_int32_t orig_flight;
401 u_int32_t tsnlast, tsnfirst;
403 /* none in flight now */
404 audit_tf = 0;
405 fir=0;
406 /* figure out how long a data chunk must be pending
407 * before we can mark it ..
409 SCTP_GETTIME_TIMEVAL(&now);
410 /* get cur rto in micro-seconds */
411 cur_rto = (((net->lastsa >> 2) + net->lastsv) >> 1);
412 #ifdef SCTP_FR_LOGGING
413 sctp_log_fr(cur_rto, 0, 0, SCTP_FR_T3_MARK_TIME);
414 #endif
415 cur_rto *= 1000;
416 #ifdef SCTP_FR_LOGGING
417 sctp_log_fr(cur_rto, 0, 0, SCTP_FR_T3_MARK_TIME);
418 #endif
419 tv.tv_sec = cur_rto / 1000000;
420 tv.tv_usec = cur_rto % 1000000;
421 #ifndef __FreeBSD__
422 timersub(&now, &tv, &min_wait);
423 #else
424 min_wait = now;
425 timevalsub(&min_wait, &tv);
426 #endif
427 if (min_wait.tv_sec < 0 || min_wait.tv_usec < 0) {
429 * if we hit here, we don't
430 * have enough seconds on the clock to account
431 * for the RTO. We just let the lower seconds
432 * be the bounds and don't worry about it. This
433 * may mean we will mark a lot more than we should.
435 min_wait.tv_sec = min_wait.tv_usec = 0;
437 #ifdef SCTP_FR_LOGGING
438 sctp_log_fr(cur_rto, now.tv_sec, now.tv_usec, SCTP_FR_T3_MARK_TIME);
439 sctp_log_fr(0, min_wait.tv_sec, min_wait.tv_usec, SCTP_FR_T3_MARK_TIME);
440 #endif
441 stcb->asoc.total_flight -= net->flight_size;
442 if (stcb->asoc.total_flight < 0) {
443 audit_tf = 1;
444 stcb->asoc.total_flight = 0;
446 /* Our rwnd will be incorrect here since we are not adding
447 * back the cnt * mbuf but we will fix that down below.
449 orig_rwnd = stcb->asoc.peers_rwnd;
450 orig_flight = net->flight_size;
451 stcb->asoc.peers_rwnd += net->flight_size;
452 net->flight_size = 0;
453 net->rto_pending = 0;
454 net->fast_retran_ip= 0;
455 win_probes = non_win_probes = 0;
456 #ifdef SCTP_DEBUG
457 if (sctp_debug_on & SCTP_DEBUG_TIMER2) {
458 kprintf("Marking ALL un-acked for retransmission at t3-timeout\n");
460 #endif /* SCTP_DEBUG */
461 /* Now on to each chunk */
462 num_mk = cnt_mk = 0;
463 tsnfirst = tsnlast = 0;
464 chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
465 for (;chk != NULL; chk = tp2) {
466 tp2 = TAILQ_NEXT(chk, sctp_next);
467 if ((compare_with_wrap(stcb->asoc.last_acked_seq,
468 chk->rec.data.TSN_seq,
469 MAX_TSN)) ||
470 (stcb->asoc.last_acked_seq == chk->rec.data.TSN_seq)) {
471 /* Strange case our list got out of order? */
472 kprintf("Our list is out of order?\n");
473 TAILQ_REMOVE(&stcb->asoc.sent_queue, chk, sctp_next);
474 if (chk->data) {
475 sctp_release_pr_sctp_chunk(stcb, chk, 0xffff,
476 &stcb->asoc.sent_queue);
477 if (chk->flags & SCTP_PR_SCTP_BUFFER) {
478 stcb->asoc.sent_queue_cnt_removeable--;
481 stcb->asoc.sent_queue_cnt--;
482 sctp_free_remote_addr(chk->whoTo);
483 sctppcbinfo.ipi_count_chunk--;
484 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
485 panic("Chunk count is going negative");
487 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
488 sctppcbinfo.ipi_gencnt_chunk++;
489 continue;
491 if ((chk->whoTo == net) && (chk->sent < SCTP_DATAGRAM_ACKED)) {
492 /* found one to mark:
493 * If it is less than DATAGRAM_ACKED it MUST
494 * not be a skipped or marked TSN but instead
495 * one that is either already set for retransmission OR
496 * one that needs retransmission.
499 /* validate its been outstanding long enough */
500 #ifdef SCTP_FR_LOGGING
501 sctp_log_fr(chk->rec.data.TSN_seq,
502 chk->sent_rcv_time.tv_sec,
503 chk->sent_rcv_time.tv_usec,
504 SCTP_FR_T3_MARK_TIME);
505 #endif
506 if (chk->sent_rcv_time.tv_sec > min_wait.tv_sec) {
507 /* we have reached a chunk that was sent some
508 * seconds past our min.. forget it we will
509 * find no more to send.
511 #ifdef SCTP_FR_LOGGING
512 sctp_log_fr(0,
513 chk->sent_rcv_time.tv_sec,
514 chk->sent_rcv_time.tv_usec,
515 SCTP_FR_T3_STOPPED);
516 #endif
517 continue;
518 } else if (chk->sent_rcv_time.tv_sec == min_wait.tv_sec) {
519 /* we must look at the micro seconds to know.
521 if (chk->sent_rcv_time.tv_usec >= min_wait.tv_usec) {
522 /* ok it was sent after our boundary time. */
523 #ifdef SCTP_FR_LOGGING
524 sctp_log_fr(0,
525 chk->sent_rcv_time.tv_sec,
526 chk->sent_rcv_time.tv_usec,
527 SCTP_FR_T3_STOPPED);
528 #endif
529 continue;
532 stcb->asoc.total_flight_count--;
533 if (stcb->asoc.total_flight_count < 0) {
534 stcb->asoc.total_flight_count = 0;
536 if ((chk->flags & (SCTP_PR_SCTP_ENABLED|SCTP_PR_SCTP_BUFFER)) == SCTP_PR_SCTP_ENABLED) {
537 /* Is it expired? */
538 if ((now.tv_sec > chk->rec.data.timetodrop.tv_sec) ||
539 ((chk->rec.data.timetodrop.tv_sec == now.tv_sec) &&
540 (now.tv_usec > chk->rec.data.timetodrop.tv_usec))) {
541 /* Yes so drop it */
542 if (chk->data) {
543 sctp_release_pr_sctp_chunk(stcb,
544 chk,
545 (SCTP_RESPONSE_TO_USER_REQ|SCTP_NOTIFY_DATAGRAM_SENT),
546 &stcb->asoc.sent_queue);
549 continue;
551 if (chk->sent != SCTP_DATAGRAM_RESEND) {
552 stcb->asoc.sent_queue_retran_cnt++;
553 num_mk++;
554 if (fir == 0) {
555 fir = 1;
556 #ifdef SCTP_DEBUG
557 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
558 kprintf("First TSN marked was %x\n",
559 chk->rec.data.TSN_seq);
561 #endif
562 tsnfirst = chk->rec.data.TSN_seq;
564 tsnlast = chk->rec.data.TSN_seq;
565 #ifdef SCTP_FR_LOGGING
566 sctp_log_fr(chk->rec.data.TSN_seq, chk->snd_count,
567 0, SCTP_FR_T3_MARKED);
569 #endif
571 chk->sent = SCTP_DATAGRAM_RESEND;
572 /* reset the TSN for striking and other FR stuff */
573 chk->rec.data.doing_fast_retransmit = 0;
574 #ifdef SCTP_DEBUG
575 if (sctp_debug_on & SCTP_DEBUG_TIMER3) {
576 kprintf("mark TSN:%x for retransmission\n", chk->rec.data.TSN_seq);
578 #endif /* SCTP_DEBUG */
579 /* Clear any time so NO RTT is being done */
580 chk->do_rtt = 0;
581 /* Bump up the count */
582 if (compare_with_wrap(chk->rec.data.TSN_seq,
583 stcb->asoc.t3timeout_highest_marked,
584 MAX_TSN)) {
585 /* TSN_seq > than t3timeout so update */
586 stcb->asoc.t3timeout_highest_marked = chk->rec.data.TSN_seq;
588 if (alt != net) {
589 sctp_free_remote_addr(chk->whoTo);
590 chk->whoTo = alt;
591 alt->ref_count++;
593 if ((chk->rec.data.state_flags & SCTP_WINDOW_PROBE) !=
594 SCTP_WINDOW_PROBE) {
595 non_win_probes++;
596 } else {
597 chk->rec.data.state_flags &= ~SCTP_WINDOW_PROBE;
598 win_probes++;
601 if (chk->sent == SCTP_DATAGRAM_RESEND) {
602 cnt_mk++;
606 #ifdef SCTP_FR_LOGGING
607 sctp_log_fr(tsnfirst, tsnlast, num_mk, SCTP_FR_T3_TIMEOUT);
608 #endif
609 /* compensate for the number we marked */
610 stcb->asoc.peers_rwnd += (num_mk /* * sizeof(struct mbuf)*/);
612 #ifdef SCTP_DEBUG
613 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
614 if (num_mk) {
615 kprintf("LAST TSN marked was %x\n", tsnlast);
616 kprintf("Num marked for retransmission was %d peer-rwd:%ld\n",
617 num_mk, (u_long)stcb->asoc.peers_rwnd);
618 kprintf("LAST TSN marked was %x\n", tsnlast);
619 kprintf("Num marked for retransmission was %d peer-rwd:%d\n",
620 num_mk,
621 (int)stcb->asoc.peers_rwnd
625 #endif
626 *num_marked = num_mk;
627 if (stcb->asoc.sent_queue_retran_cnt != cnt_mk) {
628 kprintf("Local Audit says there are %d for retran asoc cnt:%d\n",
629 cnt_mk, stcb->asoc.sent_queue_retran_cnt);
630 #ifndef SCTP_AUDITING_ENABLED
631 stcb->asoc.sent_queue_retran_cnt = cnt_mk;
632 #endif
634 #ifdef SCTP_DEBUG
635 if (sctp_debug_on & SCTP_DEBUG_TIMER3) {
636 kprintf("**************************\n");
638 #endif /* SCTP_DEBUG */
640 /* Now check for a ECN Echo that may be stranded */
641 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
642 if ((chk->whoTo == net) &&
643 (chk->rec.chunk_id == SCTP_ECN_ECHO)) {
644 sctp_free_remote_addr(chk->whoTo);
645 chk->whoTo = alt;
646 if (chk->sent != SCTP_DATAGRAM_RESEND) {
647 chk->sent = SCTP_DATAGRAM_RESEND;
648 stcb->asoc.sent_queue_retran_cnt++;
650 alt->ref_count++;
653 if ((orig_rwnd == 0) && (stcb->asoc.total_flight == 0) &&
654 (orig_flight <= net->mtu)) {
656 * If the LAST packet sent was not acked and our rwnd is 0
657 * then we are in a win-probe state.
659 win_probes = 1;
660 non_win_probes = 0;
661 #ifdef SCTP_DEBUG
662 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
663 kprintf("WIN_PROBE set via o_rwnd=0 tf=0 and all:%d fit in mtu:%d\n",
664 orig_flight, net->mtu);
666 #endif
669 if (audit_tf) {
670 #ifdef SCTP_DEBUG
671 if (sctp_debug_on & SCTP_DEBUG_TIMER4) {
672 kprintf("Audit total flight due to negative value net:%p\n",
673 net);
675 #endif /* SCTP_DEBUG */
676 stcb->asoc.total_flight = 0;
677 stcb->asoc.total_flight_count = 0;
678 /* Clear all networks flight size */
679 TAILQ_FOREACH(lnets, &stcb->asoc.nets, sctp_next) {
680 lnets->flight_size = 0;
681 #ifdef SCTP_DEBUG
682 if (sctp_debug_on & SCTP_DEBUG_TIMER4) {
683 kprintf("Net:%p c-f cwnd:%d ssthresh:%d\n",
684 lnets, lnets->cwnd, lnets->ssthresh);
686 #endif /* SCTP_DEBUG */
688 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
689 if (chk->sent < SCTP_DATAGRAM_RESEND) {
690 stcb->asoc.total_flight += chk->book_size;
691 chk->whoTo->flight_size += chk->book_size;
692 stcb->asoc.total_flight_count++;
696 /* Setup the ecn nonce re-sync point. We
697 * do this since retranmissions are NOT
698 * setup for ECN. This means that do to
699 * Karn's rule, we don't know the total
700 * of the peers ecn bits.
702 chk = TAILQ_FIRST(&stcb->asoc.send_queue);
703 if (chk == NULL) {
704 stcb->asoc.nonce_resync_tsn = stcb->asoc.sending_seq;
705 } else {
706 stcb->asoc.nonce_resync_tsn = chk->rec.data.TSN_seq;
708 stcb->asoc.nonce_wait_for_ecne = 0;
709 stcb->asoc.nonce_sum_check = 0;
710 /* We return 1 if we only have a window probe outstanding */
711 if (win_probes && (non_win_probes == 0)) {
712 return (1);
714 return (0);
717 static void
718 sctp_move_all_chunks_to_alt(struct sctp_tcb *stcb,
719 struct sctp_nets *net,
720 struct sctp_nets *alt)
722 struct sctp_association *asoc;
723 struct sctp_stream_out *outs;
724 struct sctp_tmit_chunk *chk;
726 if (net == alt)
727 /* nothing to do */
728 return;
730 asoc = &stcb->asoc;
733 * now through all the streams checking for chunks sent to our
734 * bad network.
736 TAILQ_FOREACH(outs, &asoc->out_wheel, next_spoke) {
737 /* now clean up any chunks here */
738 TAILQ_FOREACH(chk, &outs->outqueue, sctp_next) {
739 if (chk->whoTo == net) {
740 sctp_free_remote_addr(chk->whoTo);
741 chk->whoTo = alt;
742 alt->ref_count++;
746 /* Now check the pending queue */
747 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
748 if (chk->whoTo == net) {
749 sctp_free_remote_addr(chk->whoTo);
750 chk->whoTo = alt;
751 alt->ref_count++;
758 sctp_t3rxt_timer(struct sctp_inpcb *inp,
759 struct sctp_tcb *stcb,
760 struct sctp_nets *net)
762 struct sctp_nets *alt;
763 int win_probe, num_mk;
766 #ifdef SCTP_FR_LOGGING
767 sctp_log_fr(0, 0, 0, SCTP_FR_T3_TIMEOUT);
768 #endif
769 /* Find an alternate and mark those for retransmission */
770 alt = sctp_find_alternate_net(stcb, net);
771 win_probe = sctp_mark_all_for_resend(stcb, net, alt, &num_mk);
773 /* FR Loss recovery just ended with the T3. */
774 stcb->asoc.fast_retran_loss_recovery = 0;
776 /* setup the sat loss recovery that prevents
777 * satellite cwnd advance.
779 stcb->asoc.sat_t3_loss_recovery = 1;
780 stcb->asoc.sat_t3_recovery_tsn = stcb->asoc.sending_seq;
782 /* Backoff the timer and cwnd */
783 sctp_backoff_on_timeout(stcb, net, win_probe, num_mk);
784 if (win_probe == 0) {
785 /* We don't do normal threshold management on window probes */
786 if (sctp_threshold_management(inp, stcb, net,
787 stcb->asoc.max_send_times)) {
788 /* Association was destroyed */
789 return (1);
790 } else {
791 if (net != stcb->asoc.primary_destination) {
792 /* send a immediate HB if our RTO is stale */
793 struct timeval now;
794 unsigned int ms_goneby;
795 SCTP_GETTIME_TIMEVAL(&now);
796 if (net->last_sent_time.tv_sec) {
797 ms_goneby = (now.tv_sec - net->last_sent_time.tv_sec) * 1000;
798 } else {
799 ms_goneby = 0;
801 if ((ms_goneby > net->RTO) || (net->RTO == 0)) {
802 /* no recent feed back in an RTO or more, request a RTT update */
803 sctp_send_hb(stcb, 1, net);
807 } else {
809 * For a window probe we don't penalize the net's but only
810 * the association. This may fail it if SACKs are not coming
811 * back. If sack's are coming with rwnd locked at 0, we will
812 * continue to hold things waiting for rwnd to raise
814 if (sctp_threshold_management(inp, stcb, NULL,
815 stcb->asoc.max_send_times)) {
816 /* Association was destroyed */
817 return (1);
820 if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) {
821 /* Move all pending over too */
822 sctp_move_all_chunks_to_alt(stcb, net, alt);
823 /* Was it our primary? */
824 if ((stcb->asoc.primary_destination == net) && (alt != net)) {
826 * Yes, note it as such and find an alternate
827 * note: this means HB code must use this to resent
828 * the primary if it goes active AND if someone does
829 * a change-primary then this flag must be cleared
830 * from any net structures.
832 if (sctp_set_primary_addr(stcb,
833 (struct sockaddr *)NULL,
834 alt) == 0) {
835 net->dest_state |= SCTP_ADDR_WAS_PRIMARY;
836 net->src_addr_selected = 0;
841 * Special case for cookie-echo'ed case, we don't do output
842 * but must await the COOKIE-ACK before retransmission
844 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) {
846 * Here we just reset the timer and start again since we
847 * have not established the asoc
849 #ifdef SCTP_DEBUG
850 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
851 kprintf("Special cookie case return\n");
853 #endif /* SCTP_DEBUG */
854 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
855 return (0);
857 if (stcb->asoc.peer_supports_prsctp) {
858 struct sctp_tmit_chunk *lchk;
859 lchk = sctp_try_advance_peer_ack_point(stcb, &stcb->asoc);
860 /* C3. See if we need to send a Fwd-TSN */
861 if (compare_with_wrap(stcb->asoc.advanced_peer_ack_point,
862 stcb->asoc.last_acked_seq, MAX_TSN)) {
864 * ISSUE with ECN, see FWD-TSN processing for notes
865 * on issues that will occur when the ECN NONCE stuff
866 * is put into SCTP for cross checking.
868 #ifdef SCTP_DEBUG
869 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
870 kprintf("Forward TSN time\n");
872 #endif /* SCTP_DEBUG */
873 send_forward_tsn(stcb, &stcb->asoc);
874 if (lchk) {
875 /* Assure a timer is up */
876 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, lchk->whoTo);
880 return (0);
884 sctp_t1init_timer(struct sctp_inpcb *inp,
885 struct sctp_tcb *stcb,
886 struct sctp_nets *net)
888 /* bump the thresholds */
889 if (stcb->asoc.delayed_connection) {
890 /* special hook for delayed connection. The
891 * library did NOT complete the rest of its
892 * sends.
894 stcb->asoc.delayed_connection = 0;
895 sctp_send_initiate(inp, stcb);
896 return (0);
898 if (sctp_threshold_management(inp, stcb, net,
899 stcb->asoc.max_init_times)) {
900 /* Association was destroyed */
901 return (1);
903 stcb->asoc.dropped_special_cnt = 0;
904 sctp_backoff_on_timeout(stcb, stcb->asoc.primary_destination, 1, 0);
905 if (stcb->asoc.initial_init_rto_max < net->RTO) {
906 net->RTO = stcb->asoc.initial_init_rto_max;
908 if (stcb->asoc.numnets > 1) {
909 /* If we have more than one addr use it */
910 struct sctp_nets *alt;
911 alt = sctp_find_alternate_net(stcb, stcb->asoc.primary_destination);
912 if ((alt != NULL) && (alt != stcb->asoc.primary_destination)) {
913 sctp_move_all_chunks_to_alt(stcb, stcb->asoc.primary_destination, alt);
914 stcb->asoc.primary_destination = alt;
917 /* Send out a new init */
918 sctp_send_initiate(inp, stcb);
919 return (0);
923 * For cookie and asconf we actually need to find and mark for resend,
924 * then increment the resend counter (after all the threshold management
925 * stuff of course).
928 sctp_cookie_timer(struct sctp_inpcb *inp,
929 struct sctp_tcb *stcb,
930 struct sctp_nets *net)
932 struct sctp_nets *alt;
933 struct sctp_tmit_chunk *cookie;
934 /* first before all else we must find the cookie */
935 TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue, sctp_next) {
936 if (cookie->rec.chunk_id == SCTP_COOKIE_ECHO) {
937 break;
940 if (cookie == NULL) {
941 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) {
942 /* FOOBAR! */
943 struct mbuf *oper;
944 MGET(oper, MB_DONTWAIT, MT_DATA);
945 if (oper) {
946 struct sctp_paramhdr *ph;
947 u_int32_t *ippp;
949 oper->m_len = sizeof(struct sctp_paramhdr) +
950 sizeof(*ippp);
951 ph = mtod(oper, struct sctp_paramhdr *);
952 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
953 ph->param_length = htons(oper->m_len);
954 ippp = (u_int32_t *)(ph + 1);
955 *ippp = htonl(0x40000002);
957 sctp_abort_an_association(inp, stcb, SCTP_INTERNAL_ERROR,
958 oper);
960 return (1);
962 /* Ok we found the cookie, threshold management next */
963 if (sctp_threshold_management(inp, stcb, cookie->whoTo,
964 stcb->asoc.max_init_times)) {
965 /* Assoc is over */
966 return (1);
969 * cleared theshold management now lets backoff the address &
970 * select an alternate
972 stcb->asoc.dropped_special_cnt = 0;
973 sctp_backoff_on_timeout(stcb, cookie->whoTo, 1, 0);
974 alt = sctp_find_alternate_net(stcb, cookie->whoTo);
975 if (alt != cookie->whoTo) {
976 sctp_free_remote_addr(cookie->whoTo);
977 cookie->whoTo = alt;
978 alt->ref_count++;
980 /* Now mark the retran info */
981 if (cookie->sent != SCTP_DATAGRAM_RESEND) {
982 stcb->asoc.sent_queue_retran_cnt++;
984 cookie->sent = SCTP_DATAGRAM_RESEND;
986 * Now call the output routine to kick out the cookie again, Note we
987 * don't mark any chunks for retran so that FR will need to kick in
988 * to move these (or a send timer).
990 return (0);
994 sctp_strreset_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
995 struct sctp_nets *net)
997 struct sctp_nets *alt;
998 struct sctp_tmit_chunk *strrst, *chk;
999 struct sctp_stream_reset_req *strreq;
1000 /* find the existing STRRESET */
1001 TAILQ_FOREACH(strrst, &stcb->asoc.control_send_queue,
1002 sctp_next) {
1003 if (strrst->rec.chunk_id == SCTP_STREAM_RESET) {
1004 /* is it what we want */
1005 strreq = mtod(strrst->data, struct sctp_stream_reset_req *);
1006 if (strreq->sr_req.ph.param_type == ntohs(SCTP_STR_RESET_REQUEST)) {
1007 break;
1011 if (strrst == NULL) {
1012 #ifdef SCTP_DEBUG
1013 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1014 kprintf("Strange, strreset timer fires, but I can't find an str-reset?\n");
1016 #endif /* SCTP_DEBUG */
1017 return (0);
1019 /* do threshold management */
1020 if (sctp_threshold_management(inp, stcb, strrst->whoTo,
1021 stcb->asoc.max_send_times)) {
1022 /* Assoc is over */
1023 return (1);
1027 * cleared theshold management
1028 * now lets backoff the address & select an alternate
1030 sctp_backoff_on_timeout(stcb, strrst->whoTo, 1, 0);
1031 alt = sctp_find_alternate_net(stcb, strrst->whoTo);
1032 sctp_free_remote_addr(strrst->whoTo);
1033 strrst->whoTo = alt;
1034 alt->ref_count++;
1036 /* See if a ECN Echo is also stranded */
1037 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
1038 if ((chk->whoTo == net) &&
1039 (chk->rec.chunk_id == SCTP_ECN_ECHO)) {
1040 sctp_free_remote_addr(chk->whoTo);
1041 if (chk->sent != SCTP_DATAGRAM_RESEND) {
1042 chk->sent = SCTP_DATAGRAM_RESEND;
1043 stcb->asoc.sent_queue_retran_cnt++;
1045 chk->whoTo = alt;
1046 alt->ref_count++;
1049 if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) {
1051 * If the address went un-reachable, we need to move
1052 * to alternates for ALL chk's in queue
1054 sctp_move_all_chunks_to_alt(stcb, net, alt);
1056 /* mark the retran info */
1057 if (strrst->sent != SCTP_DATAGRAM_RESEND)
1058 stcb->asoc.sent_queue_retran_cnt++;
1059 strrst->sent = SCTP_DATAGRAM_RESEND;
1061 /* restart the timer */
1062 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, inp, stcb, strrst->whoTo);
1063 return (0);
1067 sctp_asconf_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1068 struct sctp_nets *net)
1070 struct sctp_nets *alt;
1071 struct sctp_tmit_chunk *asconf, *chk;
1073 /* is this the first send, or a retransmission? */
1074 if (stcb->asoc.asconf_sent == 0) {
1075 /* compose a new ASCONF chunk and send it */
1076 sctp_send_asconf(stcb, net);
1077 } else {
1078 /* Retransmission of the existing ASCONF needed... */
1080 /* find the existing ASCONF */
1081 TAILQ_FOREACH(asconf, &stcb->asoc.control_send_queue,
1082 sctp_next) {
1083 if (asconf->rec.chunk_id == SCTP_ASCONF) {
1084 break;
1087 if (asconf == NULL) {
1088 #ifdef SCTP_DEBUG
1089 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1090 kprintf("Strange, asconf timer fires, but I can't find an asconf?\n");
1092 #endif /* SCTP_DEBUG */
1093 return (0);
1095 /* do threshold management */
1096 if (sctp_threshold_management(inp, stcb, asconf->whoTo,
1097 stcb->asoc.max_send_times)) {
1098 /* Assoc is over */
1099 return (1);
1102 /* PETER? FIX? How will the following code ever run? If
1103 * the max_send_times is hit, threshold managment will
1104 * blow away the association?
1106 if (asconf->snd_count > stcb->asoc.max_send_times) {
1108 * Something is rotten, peer is not responding to
1109 * ASCONFs but maybe is to data etc. e.g. it is not
1110 * properly handling the chunk type upper bits
1111 * Mark this peer as ASCONF incapable and cleanup
1113 #ifdef SCTP_DEBUG
1114 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1115 kprintf("asconf_timer: Peer has not responded to our repeated ASCONFs\n");
1117 #endif /* SCTP_DEBUG */
1118 sctp_asconf_cleanup(stcb, net);
1119 return (0);
1122 * cleared theshold management
1123 * now lets backoff the address & select an alternate
1125 sctp_backoff_on_timeout(stcb, asconf->whoTo, 1, 0);
1126 alt = sctp_find_alternate_net(stcb, asconf->whoTo);
1127 sctp_free_remote_addr(asconf->whoTo);
1128 asconf->whoTo = alt;
1129 alt->ref_count++;
1131 /* See if a ECN Echo is also stranded */
1132 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
1133 if ((chk->whoTo == net) &&
1134 (chk->rec.chunk_id == SCTP_ECN_ECHO)) {
1135 sctp_free_remote_addr(chk->whoTo);
1136 chk->whoTo = alt;
1137 if (chk->sent != SCTP_DATAGRAM_RESEND) {
1138 chk->sent = SCTP_DATAGRAM_RESEND;
1139 stcb->asoc.sent_queue_retran_cnt++;
1141 alt->ref_count++;
1145 if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) {
1147 * If the address went un-reachable, we need to move
1148 * to alternates for ALL chk's in queue
1150 sctp_move_all_chunks_to_alt(stcb, net, alt);
1152 /* mark the retran info */
1153 if (asconf->sent != SCTP_DATAGRAM_RESEND)
1154 stcb->asoc.sent_queue_retran_cnt++;
1155 asconf->sent = SCTP_DATAGRAM_RESEND;
1157 return (0);
1161 * For the shutdown and shutdown-ack, we do not keep one around on the
1162 * control queue. This means we must generate a new one and call the general
1163 * chunk output routine, AFTER having done threshold
1164 * management.
1167 sctp_shutdown_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1168 struct sctp_nets *net)
1170 struct sctp_nets *alt;
1171 /* first threshold managment */
1172 if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) {
1173 /* Assoc is over */
1174 return (1);
1176 /* second select an alternative */
1177 alt = sctp_find_alternate_net(stcb, net);
1179 /* third generate a shutdown into the queue for out net */
1180 #ifdef SCTP_DEBUG
1181 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
1182 kprintf("%s:%d sends a shutdown\n",
1183 __FILE__,
1184 __LINE__
1187 #endif
1188 if (alt) {
1189 sctp_send_shutdown(stcb, alt);
1190 } else {
1191 /* if alt is NULL, there is no dest
1192 * to send to??
1194 return (0);
1196 /* fourth restart timer */
1197 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, inp, stcb, alt);
1198 return (0);
1202 sctp_shutdownack_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1203 struct sctp_nets *net)
1205 struct sctp_nets *alt;
1206 /* first threshold managment */
1207 if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) {
1208 /* Assoc is over */
1209 return (1);
1211 /* second select an alternative */
1212 alt = sctp_find_alternate_net(stcb, net);
1214 /* third generate a shutdown into the queue for out net */
1215 sctp_send_shutdown_ack(stcb, alt);
1217 /* fourth restart timer */
1218 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, inp, stcb, alt);
1219 return (0);
1222 static void
1223 sctp_audit_stream_queues_for_size(struct sctp_inpcb *inp,
1224 struct sctp_tcb *stcb)
1226 struct sctp_stream_out *outs;
1227 struct sctp_tmit_chunk *chk;
1228 unsigned int chks_in_queue=0;
1230 if ((stcb == NULL) || (inp == NULL))
1231 return;
1232 if (TAILQ_EMPTY(&stcb->asoc.out_wheel)) {
1233 kprintf("Strange, out_wheel empty nothing on sent/send and tot=%lu?\n",
1234 (u_long)stcb->asoc.total_output_queue_size);
1235 stcb->asoc.total_output_queue_size = 0;
1236 return;
1238 if (stcb->asoc.sent_queue_retran_cnt) {
1239 kprintf("Hmm, sent_queue_retran_cnt is non-zero %d\n",
1240 stcb->asoc.sent_queue_retran_cnt);
1241 stcb->asoc.sent_queue_retran_cnt = 0;
1243 /* Check to see if some data queued, if so report it */
1244 TAILQ_FOREACH(outs, &stcb->asoc.out_wheel, next_spoke) {
1245 if (!TAILQ_EMPTY(&outs->outqueue)) {
1246 TAILQ_FOREACH(chk, &outs->outqueue, sctp_next) {
1247 chks_in_queue++;
1251 if (chks_in_queue != stcb->asoc.stream_queue_cnt) {
1252 kprintf("Hmm, stream queue cnt at %d I counted %d in stream out wheel\n",
1253 stcb->asoc.stream_queue_cnt, chks_in_queue);
1255 if (chks_in_queue) {
1256 /* call the output queue function */
1257 sctp_chunk_output(inp, stcb, 1);
1258 if ((TAILQ_EMPTY(&stcb->asoc.send_queue)) &&
1259 (TAILQ_EMPTY(&stcb->asoc.sent_queue))) {
1260 /* Probably should go in and make it go back through and add fragments allowed */
1261 kprintf("Still nothing moved %d chunks are stuck\n", chks_in_queue);
1263 } else {
1264 kprintf("Found no chunks on any queue tot:%lu\n",
1265 (u_long)stcb->asoc.total_output_queue_size);
1266 stcb->asoc.total_output_queue_size = 0;
1271 sctp_heartbeat_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1272 struct sctp_nets *net)
1274 int cnt_of_unconf=0;
1276 if (net) {
1277 if (net->hb_responded == 0) {
1278 sctp_backoff_on_timeout(stcb, net, 1, 0);
1280 /* Zero PBA, if it needs it */
1281 if (net->partial_bytes_acked) {
1282 net->partial_bytes_acked = 0;
1285 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
1286 if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1287 (net->dest_state & SCTP_ADDR_REACHABLE)) {
1288 cnt_of_unconf++;
1291 if ((stcb->asoc.total_output_queue_size > 0) &&
1292 (TAILQ_EMPTY(&stcb->asoc.send_queue)) &&
1293 (TAILQ_EMPTY(&stcb->asoc.sent_queue))) {
1294 sctp_audit_stream_queues_for_size(inp, stcb);
1296 /* Send a new HB, this will do threshold managment, pick a new dest */
1297 if (sctp_send_hb(stcb, 0, NULL) < 0) {
1298 return (1);
1300 if (cnt_of_unconf > 1) {
1302 * this will send out extra hb's up to maxburst if
1303 * there are any unconfirmed addresses.
1305 int cnt_sent = 1;
1306 while ((cnt_sent < stcb->asoc.max_burst) && (cnt_of_unconf > 1)) {
1307 if (sctp_send_hb(stcb, 0, NULL) == 0)
1308 break;
1309 cnt_of_unconf--;
1310 cnt_sent++;
1313 return (0);
1316 #define SCTP_NUMBER_OF_MTU_SIZES 18
1317 static u_int32_t mtu_sizes[]={
1319 296,
1320 508,
1321 512,
1322 544,
1323 576,
1324 1006,
1325 1492,
1326 1500,
1327 1536,
1328 2002,
1329 2048,
1330 4352,
1331 4464,
1332 8166,
1333 17914,
1334 32000,
1335 65535
1339 static u_int32_t
1340 sctp_getnext_mtu(struct sctp_inpcb *inp, u_int32_t cur_mtu)
1342 /* select another MTU that is just bigger than this one */
1343 int i;
1345 for (i = 0; i < SCTP_NUMBER_OF_MTU_SIZES; i++) {
1346 if (cur_mtu < mtu_sizes[i]) {
1347 /* no max_mtu is bigger than this one */
1348 return (mtu_sizes[i]);
1351 /* here return the highest allowable */
1352 return (cur_mtu);
1356 void
1357 sctp_pathmtu_timer(struct sctp_inpcb *inp,
1358 struct sctp_tcb *stcb,
1359 struct sctp_nets *net)
1361 u_int32_t next_mtu;
1363 /* restart the timer in any case */
1364 next_mtu = sctp_getnext_mtu(inp, net->mtu);
1365 if (next_mtu <= net->mtu) {
1366 /* nothing to do */
1367 return;
1369 if (net->ro.ro_rt != NULL) {
1370 /* only if we have a route and interface do we
1371 * set anything. Note we always restart
1372 * the timer though just in case it is updated
1373 * (i.e. the ifp) or route/ifp is populated.
1375 if (net->ro.ro_rt->rt_ifp != NULL) {
1376 if (net->ro.ro_rt->rt_ifp->if_mtu > next_mtu) {
1377 /* ok it will fit out the door */
1378 net->mtu = next_mtu;
1382 /* restart the timer */
1383 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net);
1386 void
1387 sctp_autoclose_timer(struct sctp_inpcb *inp,
1388 struct sctp_tcb *stcb,
1389 struct sctp_nets *net)
1391 struct timeval tn, *tim_touse;
1392 struct sctp_association *asoc;
1393 int ticks_gone_by;
1395 SCTP_GETTIME_TIMEVAL(&tn);
1396 if (stcb->asoc.sctp_autoclose_ticks &&
1397 (inp->sctp_flags & SCTP_PCB_FLAGS_AUTOCLOSE)) {
1398 /* Auto close is on */
1399 asoc = &stcb->asoc;
1400 /* pick the time to use */
1401 if (asoc->time_last_rcvd.tv_sec >
1402 asoc->time_last_sent.tv_sec) {
1403 tim_touse = &asoc->time_last_rcvd;
1404 } else {
1405 tim_touse = &asoc->time_last_sent;
1407 /* Now has long enough transpired to autoclose? */
1408 ticks_gone_by = ((tn.tv_sec - tim_touse->tv_sec) * hz);
1409 if ((ticks_gone_by > 0) &&
1410 (ticks_gone_by >= (int)asoc->sctp_autoclose_ticks)) {
1412 * autoclose time has hit, call the output routine,
1413 * which should do nothing just to be SURE we don't
1414 * have hanging data. We can then safely check the
1415 * queues and know that we are clear to send shutdown
1417 sctp_chunk_output(inp, stcb, 9);
1418 /* Are we clean? */
1419 if (TAILQ_EMPTY(&asoc->send_queue) &&
1420 TAILQ_EMPTY(&asoc->sent_queue)) {
1422 * there is nothing queued to send,
1423 * so I'm done...
1425 if (SCTP_GET_STATE(asoc) !=
1426 SCTP_STATE_SHUTDOWN_SENT) {
1427 /* only send SHUTDOWN 1st time thru */
1428 #ifdef SCTP_DEBUG
1429 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
1430 kprintf("%s:%d sends a shutdown\n",
1431 __FILE__,
1432 __LINE__
1435 #endif
1436 sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
1437 asoc->state = SCTP_STATE_SHUTDOWN_SENT;
1438 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
1439 stcb->sctp_ep, stcb,
1440 asoc->primary_destination);
1441 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1442 stcb->sctp_ep, stcb,
1443 asoc->primary_destination);
1446 } else {
1448 * No auto close at this time, reset t-o to
1449 * check later
1451 int tmp;
1452 /* fool the timer startup to use the time left */
1453 tmp = asoc->sctp_autoclose_ticks;
1454 asoc->sctp_autoclose_ticks -= ticks_gone_by;
1455 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb,
1456 net);
1457 /* restore the real tick value */
1458 asoc->sctp_autoclose_ticks = tmp;
1463 void
1464 sctp_iterator_timer(struct sctp_iterator *it)
1466 int cnt= 0;
1467 /* only one iterator can run at a
1468 * time. This is the only way we
1469 * can cleanly pull ep's from underneath
1470 * all the running interators when a
1471 * ep is freed.
1473 SCTP_ITERATOR_LOCK();
1474 if (it->inp == NULL) {
1475 /* iterator is complete */
1476 done_with_iterator:
1477 SCTP_ITERATOR_UNLOCK();
1478 SCTP_INP_INFO_WLOCK();
1479 LIST_REMOVE(it, sctp_nxt_itr);
1480 /* stopping the callout is not needed, in theory,
1481 * but I am paranoid.
1483 SCTP_INP_INFO_WUNLOCK();
1484 callout_stop(&it->tmr.timer);
1485 if (it->function_atend != NULL) {
1486 (*it->function_atend)(it->pointer, it->val);
1488 FREE(it, M_PCB);
1489 return;
1491 select_a_new_ep:
1492 SCTP_INP_WLOCK(it->inp);
1493 while ((it->pcb_flags) && ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) {
1494 /* we do not like this ep */
1495 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1496 SCTP_INP_WUNLOCK(it->inp);
1497 goto done_with_iterator;
1499 SCTP_INP_WUNLOCK(it->inp);
1500 it->inp = LIST_NEXT(it->inp, sctp_list);
1501 if (it->inp == NULL) {
1502 goto done_with_iterator;
1504 SCTP_INP_WLOCK(it->inp);
1506 if ((it->inp->inp_starting_point_for_iterator != NULL) &&
1507 (it->inp->inp_starting_point_for_iterator != it)) {
1508 kprintf("Iterator collision, we must wait for other iterator at %x\n",
1509 (u_int)it->inp);
1510 SCTP_INP_WUNLOCK(it->inp);
1511 goto start_timer_return;
1513 /* now we do the actual write to this guy */
1514 it->inp->inp_starting_point_for_iterator = it;
1515 SCTP_INP_WUNLOCK(it->inp);
1516 SCTP_INP_RLOCK(it->inp);
1517 /* if we reach here we found a inp acceptable, now through each
1518 * one that has the association in the right state
1520 if (it->stcb == NULL) {
1521 it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1523 if (it->stcb->asoc.stcb_starting_point_for_iterator == it) {
1524 it->stcb->asoc.stcb_starting_point_for_iterator = NULL;
1526 while (it->stcb) {
1527 SCTP_TCB_LOCK(it->stcb);
1528 if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1529 SCTP_TCB_UNLOCK(it->stcb);
1530 it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1531 continue;
1533 cnt++;
1534 /* run function on this one */
1535 SCTP_INP_RUNLOCK(it->inp);
1536 (*it->function_toapply)(it->inp, it->stcb, it->pointer, it->val);
1537 sctp_chunk_output(it->inp, it->stcb, 1);
1538 SCTP_TCB_UNLOCK(it->stcb);
1539 /* see if we have limited out */
1540 if (cnt > SCTP_MAX_ITERATOR_AT_ONCE) {
1541 it->stcb->asoc.stcb_starting_point_for_iterator = it;
1542 start_timer_return:
1543 SCTP_ITERATOR_UNLOCK();
1544 sctp_timer_start(SCTP_TIMER_TYPE_ITERATOR, (struct sctp_inpcb *)it, NULL, NULL);
1545 return;
1547 SCTP_INP_RLOCK(it->inp);
1548 it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1550 /* if we reach here, we ran out of stcb's in the inp we are looking at */
1551 SCTP_INP_RUNLOCK(it->inp);
1552 SCTP_INP_WLOCK(it->inp);
1553 it->inp->inp_starting_point_for_iterator = NULL;
1554 SCTP_INP_WUNLOCK(it->inp);
1555 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1556 it->inp = NULL;
1557 } else {
1558 SCTP_INP_INFO_RLOCK();
1559 it->inp = LIST_NEXT(it->inp, sctp_list);
1560 SCTP_INP_INFO_RUNLOCK();
1562 if (it->inp == NULL) {
1563 goto done_with_iterator;
1565 goto select_a_new_ep;