1 /* $KAME: sctp_timer.c,v 1.28 2004/08/17 04:06:20 itojun Exp $ */
2 /* $DragonFly: src/sys/netinet/sctp_timer.c,v 1.6 2006/12/22 23:57:52 swildner Exp $ */
5 * Copyright (C) 2002, 2003, 2004 Cisco Systems Inc,
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the project nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #if !(defined(__OpenBSD__) || defined(__APPLE__))
33 #include "opt_ipsec.h"
35 #if defined(__FreeBSD__) || defined(__DragonFly__)
36 #include "opt_compat.h"
37 #include "opt_inet6.h"
40 #if defined(__NetBSD__)
45 #elif !defined(__OpenBSD__)
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/malloc.h>
54 #include <sys/domain.h>
56 #include <sys/protosw.h>
57 #include <sys/socket.h>
58 #include <sys/socketvar.h>
60 #include <sys/kernel.h>
61 #include <sys/sysctl.h>
63 #include <sys/domain.h>
66 #if (defined(__FreeBSD__) && __FreeBSD_version >= 500000)
67 #include <sys/limits.h>
69 #include <machine/limits.h>
73 #include <net/if_types.h>
74 #include <net/route.h>
75 #include <netinet/in.h>
76 #include <netinet/in_systm.h>
78 #include <netinet/ip.h>
79 #include <netinet/in_pcb.h>
80 #include <netinet/in_var.h>
81 #include <netinet/ip_var.h>
84 #include <netinet/ip6.h>
85 #include <netinet6/ip6_var.h>
88 #include <netinet/sctp_pcb.h>
92 #include <netinet6/ipsec.h>
93 #include <netproto/key/key.h>
99 #include <netinet6/sctp6_var.h>
101 #include <netinet/sctp_var.h>
102 #include <netinet/sctp_timer.h>
103 #include <netinet/sctputil.h>
104 #include <netinet/sctp_output.h>
105 #include <netinet/sctp_hashdriver.h>
106 #include <netinet/sctp_header.h>
107 #include <netinet/sctp_indata.h>
108 #include <netinet/sctp_asconf.h>
110 #include <netinet/sctp.h>
111 #include <netinet/sctp_uio.h>
113 #include <net/net_osdep.h>
116 extern u_int32_t sctp_debug_on
;
117 #endif /* SCTP_DEBUG */
120 sctp_audit_retranmission_queue(struct sctp_association
*asoc
)
122 struct sctp_tmit_chunk
*chk
;
125 if (sctp_debug_on
& SCTP_DEBUG_TIMER4
) {
126 kprintf("Audit invoked on send queue cnt:%d onqueue:%d\n",
127 asoc
->sent_queue_retran_cnt
,
128 asoc
->sent_queue_cnt
);
130 #endif /* SCTP_DEBUG */
131 asoc
->sent_queue_retran_cnt
= 0;
132 asoc
->sent_queue_cnt
= 0;
133 TAILQ_FOREACH(chk
, &asoc
->sent_queue
, sctp_next
) {
134 if (chk
->sent
== SCTP_DATAGRAM_RESEND
) {
135 asoc
->sent_queue_retran_cnt
++;
137 asoc
->sent_queue_cnt
++;
139 TAILQ_FOREACH(chk
, &asoc
->control_send_queue
, sctp_next
) {
140 if (chk
->sent
== SCTP_DATAGRAM_RESEND
) {
141 asoc
->sent_queue_retran_cnt
++;
145 if (sctp_debug_on
& SCTP_DEBUG_TIMER4
) {
146 kprintf("Audit completes retran:%d onqueue:%d\n",
147 asoc
->sent_queue_retran_cnt
,
148 asoc
->sent_queue_cnt
);
150 #endif /* SCTP_DEBUG */
154 sctp_threshold_management(struct sctp_inpcb
*inp
, struct sctp_tcb
*stcb
,
155 struct sctp_nets
*net
, uint16_t threshold
)
160 if (sctp_debug_on
& SCTP_DEBUG_TIMER4
) {
161 kprintf("Error count for %p now %d thresh:%d\n",
162 net
, net
->error_count
,
163 net
->failure_threshold
);
165 #endif /* SCTP_DEBUG */
166 if (net
->error_count
>= net
->failure_threshold
) {
167 /* We had a threshold failure */
168 if (net
->dest_state
& SCTP_ADDR_REACHABLE
) {
169 net
->dest_state
&= ~SCTP_ADDR_REACHABLE
;
170 net
->dest_state
|= SCTP_ADDR_NOT_REACHABLE
;
171 if (net
== stcb
->asoc
.primary_destination
) {
172 net
->dest_state
|= SCTP_ADDR_WAS_PRIMARY
;
174 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN
,
176 SCTP_FAILED_THRESHOLD
,
180 /*********HOLD THIS COMMENT FOR PATCH OF ALTERNATE
181 *********ROUTING CODE
183 /*********HOLD THIS COMMENT FOR END OF PATCH OF ALTERNATE
184 *********ROUTING CODE
191 if ((net
->dest_state
& SCTP_ADDR_UNCONFIRMED
) == 0) {
192 stcb
->asoc
.overall_error_count
++;
195 stcb
->asoc
.overall_error_count
++;
198 if (sctp_debug_on
& SCTP_DEBUG_TIMER4
) {
199 kprintf("Overall error count for %p now %d thresh:%u state:%x\n",
201 stcb
->asoc
.overall_error_count
,
203 ((net
== NULL
) ? (u_int
)0 : (u_int
)net
->dest_state
));
205 #endif /* SCTP_DEBUG */
206 /* We specifically do not do >= to give the assoc one more
207 * change before we fail it.
209 if (stcb
->asoc
.overall_error_count
> threshold
) {
210 /* Abort notification sends a ULP notify */
212 MGET(oper
, MB_DONTWAIT
, MT_DATA
);
214 struct sctp_paramhdr
*ph
;
217 oper
->m_len
= sizeof(struct sctp_paramhdr
) +
219 ph
= mtod(oper
, struct sctp_paramhdr
*);
220 ph
->param_type
= htons(SCTP_CAUSE_PROTOCOL_VIOLATION
);
221 ph
->param_length
= htons(oper
->m_len
);
222 ippp
= (u_int32_t
*)(ph
+ 1);
223 *ippp
= htonl(0x40000001);
225 sctp_abort_an_association(inp
, stcb
, SCTP_FAILED_THRESHOLD
, oper
);
232 sctp_find_alternate_net(struct sctp_tcb
*stcb
,
233 struct sctp_nets
*net
)
235 /* Find and return an alternate network if possible */
236 struct sctp_nets
*alt
, *mnet
;
239 if (stcb
->asoc
.numnets
== 1) {
240 /* No others but net */
241 return (TAILQ_FIRST(&stcb
->asoc
.nets
));
247 mnet
= TAILQ_FIRST(&stcb
->asoc
.nets
);
250 alt
= TAILQ_NEXT(mnet
, sctp_next
);
256 alt
= TAILQ_FIRST(&stcb
->asoc
.nets
);
258 if (alt
->ro
.ro_rt
== NULL
) {
259 #ifndef SCOPEDROUTING
260 struct sockaddr_in6
*sin6
;
261 sin6
= (struct sockaddr_in6
*)&alt
->ro
._l_addr
;
262 if (sin6
->sin6_family
== AF_INET6
) {
263 #if defined(SCTP_BASE_FREEBSD) || defined(__APPLE__) || defined(__DragonFly__)
264 in6_embedscope(&sin6
->sin6_addr
, sin6
,
267 in6_embedscope(&sin6
->sin6_addr
, sin6
);
271 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
272 rtalloc_ign((struct route
*)&alt
->ro
, 0UL);
274 rtalloc((struct route
*)&alt
->ro
);
276 #ifndef SCOPEDROUTING
277 if (sin6
->sin6_family
== AF_INET6
) {
278 in6_recoverscope(sin6
, &sin6
->sin6_addr
, NULL
);
281 alt
->src_addr_selected
= 0;
284 ((alt
->dest_state
& SCTP_ADDR_REACHABLE
) == SCTP_ADDR_REACHABLE
) &&
285 (alt
->ro
.ro_rt
!= NULL
) &&
286 (!(alt
->dest_state
& SCTP_ADDR_UNCONFIRMED
))
288 /* Found a reachable address */
292 } while (alt
!= NULL
);
295 /* Case where NO insv network exists (dormant state) */
296 /* we rotate destinations */
300 alt
= TAILQ_NEXT(mnet
, sctp_next
);
306 alt
= TAILQ_FIRST(&stcb
->asoc
.nets
);
308 if ((!(alt
->dest_state
& SCTP_ADDR_UNCONFIRMED
)) &&
310 /* Found an alternate address */
314 } while (alt
!= NULL
);
323 sctp_backoff_on_timeout(struct sctp_tcb
*stcb
,
324 struct sctp_nets
*net
,
332 #endif /* SCTP_DEBUG */
335 if (sctp_debug_on
& SCTP_DEBUG_TIMER2
) {
336 kprintf("Timer doubles from %d ms -to-> %d ms\n",
339 #endif /* SCTP_DEBUG */
341 if (net
->RTO
> stcb
->asoc
.maxrto
) {
342 net
->RTO
= stcb
->asoc
.maxrto
;
344 if (sctp_debug_on
& SCTP_DEBUG_TIMER2
) {
345 kprintf("Growth capped by maxrto %d\n",
348 #endif /* SCTP_DEBUG */
352 if ((win_probe
== 0) && num_marked
) {
353 /* We don't apply penalty to window probe scenarios */
354 #ifdef SCTP_CWND_LOGGING
355 int old_cwnd
=net
->cwnd
;
357 net
->ssthresh
= net
->cwnd
>> 1;
358 if (net
->ssthresh
< (net
->mtu
<< 1)) {
359 net
->ssthresh
= (net
->mtu
<< 1);
361 net
->cwnd
= net
->mtu
;
363 if (net
->cwnd
< net
->mtu
)
364 net
->cwnd
= net
->mtu
;
365 #ifdef SCTP_CWND_LOGGING
366 sctp_log_cwnd(net
, net
->cwnd
-old_cwnd
, SCTP_CWND_LOG_FROM_RTX
);
369 net
->partial_bytes_acked
= 0;
371 if (sctp_debug_on
& SCTP_DEBUG_TIMER1
) {
372 kprintf("collapse cwnd to 1MTU ssthresh to %d\n",
382 sctp_mark_all_for_resend(struct sctp_tcb
*stcb
,
383 struct sctp_nets
*net
,
384 struct sctp_nets
*alt
,
389 * Mark all chunks (well not all) that were sent to *net for retransmission.
390 * Move them to alt for there destination as well... We only
391 * mark chunks that have been outstanding long enough to have
392 * received feed-back.
394 struct sctp_tmit_chunk
*chk
, *tp2
;
395 struct sctp_nets
*lnets
;
396 struct timeval now
, min_wait
, tv
;
398 int win_probes
, non_win_probes
, orig_rwnd
, audit_tf
, num_mk
, fir
;
400 u_int32_t orig_flight
;
401 u_int32_t tsnlast
, tsnfirst
;
403 /* none in flight now */
406 /* figure out how long a data chunk must be pending
407 * before we can mark it ..
409 SCTP_GETTIME_TIMEVAL(&now
);
410 /* get cur rto in micro-seconds */
411 cur_rto
= (((net
->lastsa
>> 2) + net
->lastsv
) >> 1);
412 #ifdef SCTP_FR_LOGGING
413 sctp_log_fr(cur_rto
, 0, 0, SCTP_FR_T3_MARK_TIME
);
416 #ifdef SCTP_FR_LOGGING
417 sctp_log_fr(cur_rto
, 0, 0, SCTP_FR_T3_MARK_TIME
);
419 tv
.tv_sec
= cur_rto
/ 1000000;
420 tv
.tv_usec
= cur_rto
% 1000000;
422 timersub(&now
, &tv
, &min_wait
);
425 timevalsub(&min_wait
, &tv
);
427 if (min_wait
.tv_sec
< 0 || min_wait
.tv_usec
< 0) {
429 * if we hit here, we don't
430 * have enough seconds on the clock to account
431 * for the RTO. We just let the lower seconds
432 * be the bounds and don't worry about it. This
433 * may mean we will mark a lot more than we should.
435 min_wait
.tv_sec
= min_wait
.tv_usec
= 0;
437 #ifdef SCTP_FR_LOGGING
438 sctp_log_fr(cur_rto
, now
.tv_sec
, now
.tv_usec
, SCTP_FR_T3_MARK_TIME
);
439 sctp_log_fr(0, min_wait
.tv_sec
, min_wait
.tv_usec
, SCTP_FR_T3_MARK_TIME
);
441 stcb
->asoc
.total_flight
-= net
->flight_size
;
442 if (stcb
->asoc
.total_flight
< 0) {
444 stcb
->asoc
.total_flight
= 0;
446 /* Our rwnd will be incorrect here since we are not adding
447 * back the cnt * mbuf but we will fix that down below.
449 orig_rwnd
= stcb
->asoc
.peers_rwnd
;
450 orig_flight
= net
->flight_size
;
451 stcb
->asoc
.peers_rwnd
+= net
->flight_size
;
452 net
->flight_size
= 0;
453 net
->rto_pending
= 0;
454 net
->fast_retran_ip
= 0;
455 win_probes
= non_win_probes
= 0;
457 if (sctp_debug_on
& SCTP_DEBUG_TIMER2
) {
458 kprintf("Marking ALL un-acked for retransmission at t3-timeout\n");
460 #endif /* SCTP_DEBUG */
461 /* Now on to each chunk */
463 tsnfirst
= tsnlast
= 0;
464 chk
= TAILQ_FIRST(&stcb
->asoc
.sent_queue
);
465 for (;chk
!= NULL
; chk
= tp2
) {
466 tp2
= TAILQ_NEXT(chk
, sctp_next
);
467 if ((compare_with_wrap(stcb
->asoc
.last_acked_seq
,
468 chk
->rec
.data
.TSN_seq
,
470 (stcb
->asoc
.last_acked_seq
== chk
->rec
.data
.TSN_seq
)) {
471 /* Strange case our list got out of order? */
472 kprintf("Our list is out of order?\n");
473 TAILQ_REMOVE(&stcb
->asoc
.sent_queue
, chk
, sctp_next
);
475 sctp_release_pr_sctp_chunk(stcb
, chk
, 0xffff,
476 &stcb
->asoc
.sent_queue
);
477 if (chk
->flags
& SCTP_PR_SCTP_BUFFER
) {
478 stcb
->asoc
.sent_queue_cnt_removeable
--;
481 stcb
->asoc
.sent_queue_cnt
--;
482 sctp_free_remote_addr(chk
->whoTo
);
483 sctppcbinfo
.ipi_count_chunk
--;
484 if ((int)sctppcbinfo
.ipi_count_chunk
< 0) {
485 panic("Chunk count is going negative");
487 SCTP_ZONE_FREE(sctppcbinfo
.ipi_zone_chunk
, chk
);
488 sctppcbinfo
.ipi_gencnt_chunk
++;
491 if ((chk
->whoTo
== net
) && (chk
->sent
< SCTP_DATAGRAM_ACKED
)) {
492 /* found one to mark:
493 * If it is less than DATAGRAM_ACKED it MUST
494 * not be a skipped or marked TSN but instead
495 * one that is either already set for retransmission OR
496 * one that needs retransmission.
499 /* validate its been outstanding long enough */
500 #ifdef SCTP_FR_LOGGING
501 sctp_log_fr(chk
->rec
.data
.TSN_seq
,
502 chk
->sent_rcv_time
.tv_sec
,
503 chk
->sent_rcv_time
.tv_usec
,
504 SCTP_FR_T3_MARK_TIME
);
506 if (chk
->sent_rcv_time
.tv_sec
> min_wait
.tv_sec
) {
507 /* we have reached a chunk that was sent some
508 * seconds past our min.. forget it we will
509 * find no more to send.
511 #ifdef SCTP_FR_LOGGING
513 chk
->sent_rcv_time
.tv_sec
,
514 chk
->sent_rcv_time
.tv_usec
,
518 } else if (chk
->sent_rcv_time
.tv_sec
== min_wait
.tv_sec
) {
519 /* we must look at the micro seconds to know.
521 if (chk
->sent_rcv_time
.tv_usec
>= min_wait
.tv_usec
) {
522 /* ok it was sent after our boundary time. */
523 #ifdef SCTP_FR_LOGGING
525 chk
->sent_rcv_time
.tv_sec
,
526 chk
->sent_rcv_time
.tv_usec
,
532 stcb
->asoc
.total_flight_count
--;
533 if (stcb
->asoc
.total_flight_count
< 0) {
534 stcb
->asoc
.total_flight_count
= 0;
536 if ((chk
->flags
& (SCTP_PR_SCTP_ENABLED
|SCTP_PR_SCTP_BUFFER
)) == SCTP_PR_SCTP_ENABLED
) {
538 if ((now
.tv_sec
> chk
->rec
.data
.timetodrop
.tv_sec
) ||
539 ((chk
->rec
.data
.timetodrop
.tv_sec
== now
.tv_sec
) &&
540 (now
.tv_usec
> chk
->rec
.data
.timetodrop
.tv_usec
))) {
543 sctp_release_pr_sctp_chunk(stcb
,
545 (SCTP_RESPONSE_TO_USER_REQ
|SCTP_NOTIFY_DATAGRAM_SENT
),
546 &stcb
->asoc
.sent_queue
);
551 if (chk
->sent
!= SCTP_DATAGRAM_RESEND
) {
552 stcb
->asoc
.sent_queue_retran_cnt
++;
557 if (sctp_debug_on
& SCTP_DEBUG_TIMER1
) {
558 kprintf("First TSN marked was %x\n",
559 chk
->rec
.data
.TSN_seq
);
562 tsnfirst
= chk
->rec
.data
.TSN_seq
;
564 tsnlast
= chk
->rec
.data
.TSN_seq
;
565 #ifdef SCTP_FR_LOGGING
566 sctp_log_fr(chk
->rec
.data
.TSN_seq
, chk
->snd_count
,
567 0, SCTP_FR_T3_MARKED
);
571 chk
->sent
= SCTP_DATAGRAM_RESEND
;
572 /* reset the TSN for striking and other FR stuff */
573 chk
->rec
.data
.doing_fast_retransmit
= 0;
575 if (sctp_debug_on
& SCTP_DEBUG_TIMER3
) {
576 kprintf("mark TSN:%x for retransmission\n", chk
->rec
.data
.TSN_seq
);
578 #endif /* SCTP_DEBUG */
579 /* Clear any time so NO RTT is being done */
581 /* Bump up the count */
582 if (compare_with_wrap(chk
->rec
.data
.TSN_seq
,
583 stcb
->asoc
.t3timeout_highest_marked
,
585 /* TSN_seq > than t3timeout so update */
586 stcb
->asoc
.t3timeout_highest_marked
= chk
->rec
.data
.TSN_seq
;
589 sctp_free_remote_addr(chk
->whoTo
);
593 if ((chk
->rec
.data
.state_flags
& SCTP_WINDOW_PROBE
) !=
597 chk
->rec
.data
.state_flags
&= ~SCTP_WINDOW_PROBE
;
601 if (chk
->sent
== SCTP_DATAGRAM_RESEND
) {
606 #ifdef SCTP_FR_LOGGING
607 sctp_log_fr(tsnfirst
, tsnlast
, num_mk
, SCTP_FR_T3_TIMEOUT
);
609 /* compensate for the number we marked */
610 stcb
->asoc
.peers_rwnd
+= (num_mk
/* * sizeof(struct mbuf)*/);
613 if (sctp_debug_on
& SCTP_DEBUG_TIMER1
) {
615 kprintf("LAST TSN marked was %x\n", tsnlast
);
616 kprintf("Num marked for retransmission was %d peer-rwd:%ld\n",
617 num_mk
, (u_long
)stcb
->asoc
.peers_rwnd
);
618 kprintf("LAST TSN marked was %x\n", tsnlast
);
619 kprintf("Num marked for retransmission was %d peer-rwd:%d\n",
621 (int)stcb
->asoc
.peers_rwnd
626 *num_marked
= num_mk
;
627 if (stcb
->asoc
.sent_queue_retran_cnt
!= cnt_mk
) {
628 kprintf("Local Audit says there are %d for retran asoc cnt:%d\n",
629 cnt_mk
, stcb
->asoc
.sent_queue_retran_cnt
);
630 #ifndef SCTP_AUDITING_ENABLED
631 stcb
->asoc
.sent_queue_retran_cnt
= cnt_mk
;
635 if (sctp_debug_on
& SCTP_DEBUG_TIMER3
) {
636 kprintf("**************************\n");
638 #endif /* SCTP_DEBUG */
640 /* Now check for a ECN Echo that may be stranded */
641 TAILQ_FOREACH(chk
, &stcb
->asoc
.control_send_queue
, sctp_next
) {
642 if ((chk
->whoTo
== net
) &&
643 (chk
->rec
.chunk_id
== SCTP_ECN_ECHO
)) {
644 sctp_free_remote_addr(chk
->whoTo
);
646 if (chk
->sent
!= SCTP_DATAGRAM_RESEND
) {
647 chk
->sent
= SCTP_DATAGRAM_RESEND
;
648 stcb
->asoc
.sent_queue_retran_cnt
++;
653 if ((orig_rwnd
== 0) && (stcb
->asoc
.total_flight
== 0) &&
654 (orig_flight
<= net
->mtu
)) {
656 * If the LAST packet sent was not acked and our rwnd is 0
657 * then we are in a win-probe state.
662 if (sctp_debug_on
& SCTP_DEBUG_TIMER1
) {
663 kprintf("WIN_PROBE set via o_rwnd=0 tf=0 and all:%d fit in mtu:%d\n",
664 orig_flight
, net
->mtu
);
671 if (sctp_debug_on
& SCTP_DEBUG_TIMER4
) {
672 kprintf("Audit total flight due to negative value net:%p\n",
675 #endif /* SCTP_DEBUG */
676 stcb
->asoc
.total_flight
= 0;
677 stcb
->asoc
.total_flight_count
= 0;
678 /* Clear all networks flight size */
679 TAILQ_FOREACH(lnets
, &stcb
->asoc
.nets
, sctp_next
) {
680 lnets
->flight_size
= 0;
682 if (sctp_debug_on
& SCTP_DEBUG_TIMER4
) {
683 kprintf("Net:%p c-f cwnd:%d ssthresh:%d\n",
684 lnets
, lnets
->cwnd
, lnets
->ssthresh
);
686 #endif /* SCTP_DEBUG */
688 TAILQ_FOREACH(chk
, &stcb
->asoc
.sent_queue
, sctp_next
) {
689 if (chk
->sent
< SCTP_DATAGRAM_RESEND
) {
690 stcb
->asoc
.total_flight
+= chk
->book_size
;
691 chk
->whoTo
->flight_size
+= chk
->book_size
;
692 stcb
->asoc
.total_flight_count
++;
696 /* Setup the ecn nonce re-sync point. We
697 * do this since retranmissions are NOT
698 * setup for ECN. This means that do to
699 * Karn's rule, we don't know the total
700 * of the peers ecn bits.
702 chk
= TAILQ_FIRST(&stcb
->asoc
.send_queue
);
704 stcb
->asoc
.nonce_resync_tsn
= stcb
->asoc
.sending_seq
;
706 stcb
->asoc
.nonce_resync_tsn
= chk
->rec
.data
.TSN_seq
;
708 stcb
->asoc
.nonce_wait_for_ecne
= 0;
709 stcb
->asoc
.nonce_sum_check
= 0;
710 /* We return 1 if we only have a window probe outstanding */
711 if (win_probes
&& (non_win_probes
== 0)) {
718 sctp_move_all_chunks_to_alt(struct sctp_tcb
*stcb
,
719 struct sctp_nets
*net
,
720 struct sctp_nets
*alt
)
722 struct sctp_association
*asoc
;
723 struct sctp_stream_out
*outs
;
724 struct sctp_tmit_chunk
*chk
;
733 * now through all the streams checking for chunks sent to our
736 TAILQ_FOREACH(outs
, &asoc
->out_wheel
, next_spoke
) {
737 /* now clean up any chunks here */
738 TAILQ_FOREACH(chk
, &outs
->outqueue
, sctp_next
) {
739 if (chk
->whoTo
== net
) {
740 sctp_free_remote_addr(chk
->whoTo
);
746 /* Now check the pending queue */
747 TAILQ_FOREACH(chk
, &asoc
->send_queue
, sctp_next
) {
748 if (chk
->whoTo
== net
) {
749 sctp_free_remote_addr(chk
->whoTo
);
758 sctp_t3rxt_timer(struct sctp_inpcb
*inp
,
759 struct sctp_tcb
*stcb
,
760 struct sctp_nets
*net
)
762 struct sctp_nets
*alt
;
763 int win_probe
, num_mk
;
766 #ifdef SCTP_FR_LOGGING
767 sctp_log_fr(0, 0, 0, SCTP_FR_T3_TIMEOUT
);
769 /* Find an alternate and mark those for retransmission */
770 alt
= sctp_find_alternate_net(stcb
, net
);
771 win_probe
= sctp_mark_all_for_resend(stcb
, net
, alt
, &num_mk
);
773 /* FR Loss recovery just ended with the T3. */
774 stcb
->asoc
.fast_retran_loss_recovery
= 0;
776 /* setup the sat loss recovery that prevents
777 * satellite cwnd advance.
779 stcb
->asoc
.sat_t3_loss_recovery
= 1;
780 stcb
->asoc
.sat_t3_recovery_tsn
= stcb
->asoc
.sending_seq
;
782 /* Backoff the timer and cwnd */
783 sctp_backoff_on_timeout(stcb
, net
, win_probe
, num_mk
);
784 if (win_probe
== 0) {
785 /* We don't do normal threshold management on window probes */
786 if (sctp_threshold_management(inp
, stcb
, net
,
787 stcb
->asoc
.max_send_times
)) {
788 /* Association was destroyed */
791 if (net
!= stcb
->asoc
.primary_destination
) {
792 /* send a immediate HB if our RTO is stale */
794 unsigned int ms_goneby
;
795 SCTP_GETTIME_TIMEVAL(&now
);
796 if (net
->last_sent_time
.tv_sec
) {
797 ms_goneby
= (now
.tv_sec
- net
->last_sent_time
.tv_sec
) * 1000;
801 if ((ms_goneby
> net
->RTO
) || (net
->RTO
== 0)) {
802 /* no recent feed back in an RTO or more, request a RTT update */
803 sctp_send_hb(stcb
, 1, net
);
809 * For a window probe we don't penalize the net's but only
810 * the association. This may fail it if SACKs are not coming
811 * back. If sack's are coming with rwnd locked at 0, we will
812 * continue to hold things waiting for rwnd to raise
814 if (sctp_threshold_management(inp
, stcb
, NULL
,
815 stcb
->asoc
.max_send_times
)) {
816 /* Association was destroyed */
820 if (net
->dest_state
& SCTP_ADDR_NOT_REACHABLE
) {
821 /* Move all pending over too */
822 sctp_move_all_chunks_to_alt(stcb
, net
, alt
);
823 /* Was it our primary? */
824 if ((stcb
->asoc
.primary_destination
== net
) && (alt
!= net
)) {
826 * Yes, note it as such and find an alternate
827 * note: this means HB code must use this to resent
828 * the primary if it goes active AND if someone does
829 * a change-primary then this flag must be cleared
830 * from any net structures.
832 if (sctp_set_primary_addr(stcb
,
833 (struct sockaddr
*)NULL
,
835 net
->dest_state
|= SCTP_ADDR_WAS_PRIMARY
;
836 net
->src_addr_selected
= 0;
841 * Special case for cookie-echo'ed case, we don't do output
842 * but must await the COOKIE-ACK before retransmission
844 if (SCTP_GET_STATE(&stcb
->asoc
) == SCTP_STATE_COOKIE_ECHOED
) {
846 * Here we just reset the timer and start again since we
847 * have not established the asoc
850 if (sctp_debug_on
& SCTP_DEBUG_TIMER1
) {
851 kprintf("Special cookie case return\n");
853 #endif /* SCTP_DEBUG */
854 sctp_timer_start(SCTP_TIMER_TYPE_SEND
, inp
, stcb
, net
);
857 if (stcb
->asoc
.peer_supports_prsctp
) {
858 struct sctp_tmit_chunk
*lchk
;
859 lchk
= sctp_try_advance_peer_ack_point(stcb
, &stcb
->asoc
);
860 /* C3. See if we need to send a Fwd-TSN */
861 if (compare_with_wrap(stcb
->asoc
.advanced_peer_ack_point
,
862 stcb
->asoc
.last_acked_seq
, MAX_TSN
)) {
864 * ISSUE with ECN, see FWD-TSN processing for notes
865 * on issues that will occur when the ECN NONCE stuff
866 * is put into SCTP for cross checking.
869 if (sctp_debug_on
& SCTP_DEBUG_TIMER1
) {
870 kprintf("Forward TSN time\n");
872 #endif /* SCTP_DEBUG */
873 send_forward_tsn(stcb
, &stcb
->asoc
);
875 /* Assure a timer is up */
876 sctp_timer_start(SCTP_TIMER_TYPE_SEND
, stcb
->sctp_ep
, stcb
, lchk
->whoTo
);
884 sctp_t1init_timer(struct sctp_inpcb
*inp
,
885 struct sctp_tcb
*stcb
,
886 struct sctp_nets
*net
)
888 /* bump the thresholds */
889 if (stcb
->asoc
.delayed_connection
) {
890 /* special hook for delayed connection. The
891 * library did NOT complete the rest of its
894 stcb
->asoc
.delayed_connection
= 0;
895 sctp_send_initiate(inp
, stcb
);
898 if (sctp_threshold_management(inp
, stcb
, net
,
899 stcb
->asoc
.max_init_times
)) {
900 /* Association was destroyed */
903 stcb
->asoc
.dropped_special_cnt
= 0;
904 sctp_backoff_on_timeout(stcb
, stcb
->asoc
.primary_destination
, 1, 0);
905 if (stcb
->asoc
.initial_init_rto_max
< net
->RTO
) {
906 net
->RTO
= stcb
->asoc
.initial_init_rto_max
;
908 if (stcb
->asoc
.numnets
> 1) {
909 /* If we have more than one addr use it */
910 struct sctp_nets
*alt
;
911 alt
= sctp_find_alternate_net(stcb
, stcb
->asoc
.primary_destination
);
912 if ((alt
!= NULL
) && (alt
!= stcb
->asoc
.primary_destination
)) {
913 sctp_move_all_chunks_to_alt(stcb
, stcb
->asoc
.primary_destination
, alt
);
914 stcb
->asoc
.primary_destination
= alt
;
917 /* Send out a new init */
918 sctp_send_initiate(inp
, stcb
);
923 * For cookie and asconf we actually need to find and mark for resend,
924 * then increment the resend counter (after all the threshold management
928 sctp_cookie_timer(struct sctp_inpcb
*inp
,
929 struct sctp_tcb
*stcb
,
930 struct sctp_nets
*net
)
932 struct sctp_nets
*alt
;
933 struct sctp_tmit_chunk
*cookie
;
934 /* first before all else we must find the cookie */
935 TAILQ_FOREACH(cookie
, &stcb
->asoc
.control_send_queue
, sctp_next
) {
936 if (cookie
->rec
.chunk_id
== SCTP_COOKIE_ECHO
) {
940 if (cookie
== NULL
) {
941 if (SCTP_GET_STATE(&stcb
->asoc
) == SCTP_STATE_COOKIE_ECHOED
) {
944 MGET(oper
, MB_DONTWAIT
, MT_DATA
);
946 struct sctp_paramhdr
*ph
;
949 oper
->m_len
= sizeof(struct sctp_paramhdr
) +
951 ph
= mtod(oper
, struct sctp_paramhdr
*);
952 ph
->param_type
= htons(SCTP_CAUSE_PROTOCOL_VIOLATION
);
953 ph
->param_length
= htons(oper
->m_len
);
954 ippp
= (u_int32_t
*)(ph
+ 1);
955 *ippp
= htonl(0x40000002);
957 sctp_abort_an_association(inp
, stcb
, SCTP_INTERNAL_ERROR
,
962 /* Ok we found the cookie, threshold management next */
963 if (sctp_threshold_management(inp
, stcb
, cookie
->whoTo
,
964 stcb
->asoc
.max_init_times
)) {
969 * cleared theshold management now lets backoff the address &
970 * select an alternate
972 stcb
->asoc
.dropped_special_cnt
= 0;
973 sctp_backoff_on_timeout(stcb
, cookie
->whoTo
, 1, 0);
974 alt
= sctp_find_alternate_net(stcb
, cookie
->whoTo
);
975 if (alt
!= cookie
->whoTo
) {
976 sctp_free_remote_addr(cookie
->whoTo
);
980 /* Now mark the retran info */
981 if (cookie
->sent
!= SCTP_DATAGRAM_RESEND
) {
982 stcb
->asoc
.sent_queue_retran_cnt
++;
984 cookie
->sent
= SCTP_DATAGRAM_RESEND
;
986 * Now call the output routine to kick out the cookie again, Note we
987 * don't mark any chunks for retran so that FR will need to kick in
988 * to move these (or a send timer).
994 sctp_strreset_timer(struct sctp_inpcb
*inp
, struct sctp_tcb
*stcb
,
995 struct sctp_nets
*net
)
997 struct sctp_nets
*alt
;
998 struct sctp_tmit_chunk
*strrst
, *chk
;
999 struct sctp_stream_reset_req
*strreq
;
1000 /* find the existing STRRESET */
1001 TAILQ_FOREACH(strrst
, &stcb
->asoc
.control_send_queue
,
1003 if (strrst
->rec
.chunk_id
== SCTP_STREAM_RESET
) {
1004 /* is it what we want */
1005 strreq
= mtod(strrst
->data
, struct sctp_stream_reset_req
*);
1006 if (strreq
->sr_req
.ph
.param_type
== ntohs(SCTP_STR_RESET_REQUEST
)) {
1011 if (strrst
== NULL
) {
1013 if (sctp_debug_on
& SCTP_DEBUG_TIMER1
) {
1014 kprintf("Strange, strreset timer fires, but I can't find an str-reset?\n");
1016 #endif /* SCTP_DEBUG */
1019 /* do threshold management */
1020 if (sctp_threshold_management(inp
, stcb
, strrst
->whoTo
,
1021 stcb
->asoc
.max_send_times
)) {
1027 * cleared theshold management
1028 * now lets backoff the address & select an alternate
1030 sctp_backoff_on_timeout(stcb
, strrst
->whoTo
, 1, 0);
1031 alt
= sctp_find_alternate_net(stcb
, strrst
->whoTo
);
1032 sctp_free_remote_addr(strrst
->whoTo
);
1033 strrst
->whoTo
= alt
;
1036 /* See if a ECN Echo is also stranded */
1037 TAILQ_FOREACH(chk
, &stcb
->asoc
.control_send_queue
, sctp_next
) {
1038 if ((chk
->whoTo
== net
) &&
1039 (chk
->rec
.chunk_id
== SCTP_ECN_ECHO
)) {
1040 sctp_free_remote_addr(chk
->whoTo
);
1041 if (chk
->sent
!= SCTP_DATAGRAM_RESEND
) {
1042 chk
->sent
= SCTP_DATAGRAM_RESEND
;
1043 stcb
->asoc
.sent_queue_retran_cnt
++;
1049 if (net
->dest_state
& SCTP_ADDR_NOT_REACHABLE
) {
1051 * If the address went un-reachable, we need to move
1052 * to alternates for ALL chk's in queue
1054 sctp_move_all_chunks_to_alt(stcb
, net
, alt
);
1056 /* mark the retran info */
1057 if (strrst
->sent
!= SCTP_DATAGRAM_RESEND
)
1058 stcb
->asoc
.sent_queue_retran_cnt
++;
1059 strrst
->sent
= SCTP_DATAGRAM_RESEND
;
1061 /* restart the timer */
1062 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET
, inp
, stcb
, strrst
->whoTo
);
1067 sctp_asconf_timer(struct sctp_inpcb
*inp
, struct sctp_tcb
*stcb
,
1068 struct sctp_nets
*net
)
1070 struct sctp_nets
*alt
;
1071 struct sctp_tmit_chunk
*asconf
, *chk
;
1073 /* is this the first send, or a retransmission? */
1074 if (stcb
->asoc
.asconf_sent
== 0) {
1075 /* compose a new ASCONF chunk and send it */
1076 sctp_send_asconf(stcb
, net
);
1078 /* Retransmission of the existing ASCONF needed... */
1080 /* find the existing ASCONF */
1081 TAILQ_FOREACH(asconf
, &stcb
->asoc
.control_send_queue
,
1083 if (asconf
->rec
.chunk_id
== SCTP_ASCONF
) {
1087 if (asconf
== NULL
) {
1089 if (sctp_debug_on
& SCTP_DEBUG_TIMER1
) {
1090 kprintf("Strange, asconf timer fires, but I can't find an asconf?\n");
1092 #endif /* SCTP_DEBUG */
1095 /* do threshold management */
1096 if (sctp_threshold_management(inp
, stcb
, asconf
->whoTo
,
1097 stcb
->asoc
.max_send_times
)) {
1102 /* PETER? FIX? How will the following code ever run? If
1103 * the max_send_times is hit, threshold managment will
1104 * blow away the association?
1106 if (asconf
->snd_count
> stcb
->asoc
.max_send_times
) {
1108 * Something is rotten, peer is not responding to
1109 * ASCONFs but maybe is to data etc. e.g. it is not
1110 * properly handling the chunk type upper bits
1111 * Mark this peer as ASCONF incapable and cleanup
1114 if (sctp_debug_on
& SCTP_DEBUG_TIMER1
) {
1115 kprintf("asconf_timer: Peer has not responded to our repeated ASCONFs\n");
1117 #endif /* SCTP_DEBUG */
1118 sctp_asconf_cleanup(stcb
, net
);
1122 * cleared theshold management
1123 * now lets backoff the address & select an alternate
1125 sctp_backoff_on_timeout(stcb
, asconf
->whoTo
, 1, 0);
1126 alt
= sctp_find_alternate_net(stcb
, asconf
->whoTo
);
1127 sctp_free_remote_addr(asconf
->whoTo
);
1128 asconf
->whoTo
= alt
;
1131 /* See if a ECN Echo is also stranded */
1132 TAILQ_FOREACH(chk
, &stcb
->asoc
.control_send_queue
, sctp_next
) {
1133 if ((chk
->whoTo
== net
) &&
1134 (chk
->rec
.chunk_id
== SCTP_ECN_ECHO
)) {
1135 sctp_free_remote_addr(chk
->whoTo
);
1137 if (chk
->sent
!= SCTP_DATAGRAM_RESEND
) {
1138 chk
->sent
= SCTP_DATAGRAM_RESEND
;
1139 stcb
->asoc
.sent_queue_retran_cnt
++;
1145 if (net
->dest_state
& SCTP_ADDR_NOT_REACHABLE
) {
1147 * If the address went un-reachable, we need to move
1148 * to alternates for ALL chk's in queue
1150 sctp_move_all_chunks_to_alt(stcb
, net
, alt
);
1152 /* mark the retran info */
1153 if (asconf
->sent
!= SCTP_DATAGRAM_RESEND
)
1154 stcb
->asoc
.sent_queue_retran_cnt
++;
1155 asconf
->sent
= SCTP_DATAGRAM_RESEND
;
1161 * For the shutdown and shutdown-ack, we do not keep one around on the
1162 * control queue. This means we must generate a new one and call the general
1163 * chunk output routine, AFTER having done threshold
1167 sctp_shutdown_timer(struct sctp_inpcb
*inp
, struct sctp_tcb
*stcb
,
1168 struct sctp_nets
*net
)
1170 struct sctp_nets
*alt
;
1171 /* first threshold managment */
1172 if (sctp_threshold_management(inp
, stcb
, net
, stcb
->asoc
.max_send_times
)) {
1176 /* second select an alternative */
1177 alt
= sctp_find_alternate_net(stcb
, net
);
1179 /* third generate a shutdown into the queue for out net */
1181 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT4
) {
1182 kprintf("%s:%d sends a shutdown\n",
1189 sctp_send_shutdown(stcb
, alt
);
1191 /* if alt is NULL, there is no dest
1196 /* fourth restart timer */
1197 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN
, inp
, stcb
, alt
);
1202 sctp_shutdownack_timer(struct sctp_inpcb
*inp
, struct sctp_tcb
*stcb
,
1203 struct sctp_nets
*net
)
1205 struct sctp_nets
*alt
;
1206 /* first threshold managment */
1207 if (sctp_threshold_management(inp
, stcb
, net
, stcb
->asoc
.max_send_times
)) {
1211 /* second select an alternative */
1212 alt
= sctp_find_alternate_net(stcb
, net
);
1214 /* third generate a shutdown into the queue for out net */
1215 sctp_send_shutdown_ack(stcb
, alt
);
1217 /* fourth restart timer */
1218 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK
, inp
, stcb
, alt
);
1223 sctp_audit_stream_queues_for_size(struct sctp_inpcb
*inp
,
1224 struct sctp_tcb
*stcb
)
1226 struct sctp_stream_out
*outs
;
1227 struct sctp_tmit_chunk
*chk
;
1228 unsigned int chks_in_queue
=0;
1230 if ((stcb
== NULL
) || (inp
== NULL
))
1232 if (TAILQ_EMPTY(&stcb
->asoc
.out_wheel
)) {
1233 kprintf("Strange, out_wheel empty nothing on sent/send and tot=%lu?\n",
1234 (u_long
)stcb
->asoc
.total_output_queue_size
);
1235 stcb
->asoc
.total_output_queue_size
= 0;
1238 if (stcb
->asoc
.sent_queue_retran_cnt
) {
1239 kprintf("Hmm, sent_queue_retran_cnt is non-zero %d\n",
1240 stcb
->asoc
.sent_queue_retran_cnt
);
1241 stcb
->asoc
.sent_queue_retran_cnt
= 0;
1243 /* Check to see if some data queued, if so report it */
1244 TAILQ_FOREACH(outs
, &stcb
->asoc
.out_wheel
, next_spoke
) {
1245 if (!TAILQ_EMPTY(&outs
->outqueue
)) {
1246 TAILQ_FOREACH(chk
, &outs
->outqueue
, sctp_next
) {
1251 if (chks_in_queue
!= stcb
->asoc
.stream_queue_cnt
) {
1252 kprintf("Hmm, stream queue cnt at %d I counted %d in stream out wheel\n",
1253 stcb
->asoc
.stream_queue_cnt
, chks_in_queue
);
1255 if (chks_in_queue
) {
1256 /* call the output queue function */
1257 sctp_chunk_output(inp
, stcb
, 1);
1258 if ((TAILQ_EMPTY(&stcb
->asoc
.send_queue
)) &&
1259 (TAILQ_EMPTY(&stcb
->asoc
.sent_queue
))) {
1260 /* Probably should go in and make it go back through and add fragments allowed */
1261 kprintf("Still nothing moved %d chunks are stuck\n", chks_in_queue
);
1264 kprintf("Found no chunks on any queue tot:%lu\n",
1265 (u_long
)stcb
->asoc
.total_output_queue_size
);
1266 stcb
->asoc
.total_output_queue_size
= 0;
1271 sctp_heartbeat_timer(struct sctp_inpcb
*inp
, struct sctp_tcb
*stcb
,
1272 struct sctp_nets
*net
)
1274 int cnt_of_unconf
=0;
1277 if (net
->hb_responded
== 0) {
1278 sctp_backoff_on_timeout(stcb
, net
, 1, 0);
1280 /* Zero PBA, if it needs it */
1281 if (net
->partial_bytes_acked
) {
1282 net
->partial_bytes_acked
= 0;
1285 TAILQ_FOREACH(net
, &stcb
->asoc
.nets
, sctp_next
) {
1286 if ((net
->dest_state
& SCTP_ADDR_UNCONFIRMED
) &&
1287 (net
->dest_state
& SCTP_ADDR_REACHABLE
)) {
1291 if ((stcb
->asoc
.total_output_queue_size
> 0) &&
1292 (TAILQ_EMPTY(&stcb
->asoc
.send_queue
)) &&
1293 (TAILQ_EMPTY(&stcb
->asoc
.sent_queue
))) {
1294 sctp_audit_stream_queues_for_size(inp
, stcb
);
1296 /* Send a new HB, this will do threshold managment, pick a new dest */
1297 if (sctp_send_hb(stcb
, 0, NULL
) < 0) {
1300 if (cnt_of_unconf
> 1) {
1302 * this will send out extra hb's up to maxburst if
1303 * there are any unconfirmed addresses.
1306 while ((cnt_sent
< stcb
->asoc
.max_burst
) && (cnt_of_unconf
> 1)) {
1307 if (sctp_send_hb(stcb
, 0, NULL
) == 0)
1316 #define SCTP_NUMBER_OF_MTU_SIZES 18
1317 static u_int32_t mtu_sizes
[]={
1340 sctp_getnext_mtu(struct sctp_inpcb
*inp
, u_int32_t cur_mtu
)
1342 /* select another MTU that is just bigger than this one */
1345 for (i
= 0; i
< SCTP_NUMBER_OF_MTU_SIZES
; i
++) {
1346 if (cur_mtu
< mtu_sizes
[i
]) {
1347 /* no max_mtu is bigger than this one */
1348 return (mtu_sizes
[i
]);
1351 /* here return the highest allowable */
1357 sctp_pathmtu_timer(struct sctp_inpcb
*inp
,
1358 struct sctp_tcb
*stcb
,
1359 struct sctp_nets
*net
)
1363 /* restart the timer in any case */
1364 next_mtu
= sctp_getnext_mtu(inp
, net
->mtu
);
1365 if (next_mtu
<= net
->mtu
) {
1369 if (net
->ro
.ro_rt
!= NULL
) {
1370 /* only if we have a route and interface do we
1371 * set anything. Note we always restart
1372 * the timer though just in case it is updated
1373 * (i.e. the ifp) or route/ifp is populated.
1375 if (net
->ro
.ro_rt
->rt_ifp
!= NULL
) {
1376 if (net
->ro
.ro_rt
->rt_ifp
->if_mtu
> next_mtu
) {
1377 /* ok it will fit out the door */
1378 net
->mtu
= next_mtu
;
1382 /* restart the timer */
1383 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE
, inp
, stcb
, net
);
1387 sctp_autoclose_timer(struct sctp_inpcb
*inp
,
1388 struct sctp_tcb
*stcb
,
1389 struct sctp_nets
*net
)
1391 struct timeval tn
, *tim_touse
;
1392 struct sctp_association
*asoc
;
1395 SCTP_GETTIME_TIMEVAL(&tn
);
1396 if (stcb
->asoc
.sctp_autoclose_ticks
&&
1397 (inp
->sctp_flags
& SCTP_PCB_FLAGS_AUTOCLOSE
)) {
1398 /* Auto close is on */
1400 /* pick the time to use */
1401 if (asoc
->time_last_rcvd
.tv_sec
>
1402 asoc
->time_last_sent
.tv_sec
) {
1403 tim_touse
= &asoc
->time_last_rcvd
;
1405 tim_touse
= &asoc
->time_last_sent
;
1407 /* Now has long enough transpired to autoclose? */
1408 ticks_gone_by
= ((tn
.tv_sec
- tim_touse
->tv_sec
) * hz
);
1409 if ((ticks_gone_by
> 0) &&
1410 (ticks_gone_by
>= (int)asoc
->sctp_autoclose_ticks
)) {
1412 * autoclose time has hit, call the output routine,
1413 * which should do nothing just to be SURE we don't
1414 * have hanging data. We can then safely check the
1415 * queues and know that we are clear to send shutdown
1417 sctp_chunk_output(inp
, stcb
, 9);
1419 if (TAILQ_EMPTY(&asoc
->send_queue
) &&
1420 TAILQ_EMPTY(&asoc
->sent_queue
)) {
1422 * there is nothing queued to send,
1425 if (SCTP_GET_STATE(asoc
) !=
1426 SCTP_STATE_SHUTDOWN_SENT
) {
1427 /* only send SHUTDOWN 1st time thru */
1429 if (sctp_debug_on
& SCTP_DEBUG_OUTPUT4
) {
1430 kprintf("%s:%d sends a shutdown\n",
1436 sctp_send_shutdown(stcb
, stcb
->asoc
.primary_destination
);
1437 asoc
->state
= SCTP_STATE_SHUTDOWN_SENT
;
1438 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN
,
1439 stcb
->sctp_ep
, stcb
,
1440 asoc
->primary_destination
);
1441 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD
,
1442 stcb
->sctp_ep
, stcb
,
1443 asoc
->primary_destination
);
1448 * No auto close at this time, reset t-o to
1452 /* fool the timer startup to use the time left */
1453 tmp
= asoc
->sctp_autoclose_ticks
;
1454 asoc
->sctp_autoclose_ticks
-= ticks_gone_by
;
1455 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE
, inp
, stcb
,
1457 /* restore the real tick value */
1458 asoc
->sctp_autoclose_ticks
= tmp
;
1464 sctp_iterator_timer(struct sctp_iterator
*it
)
1467 /* only one iterator can run at a
1468 * time. This is the only way we
1469 * can cleanly pull ep's from underneath
1470 * all the running interators when a
1473 SCTP_ITERATOR_LOCK();
1474 if (it
->inp
== NULL
) {
1475 /* iterator is complete */
1477 SCTP_ITERATOR_UNLOCK();
1478 SCTP_INP_INFO_WLOCK();
1479 LIST_REMOVE(it
, sctp_nxt_itr
);
1480 /* stopping the callout is not needed, in theory,
1481 * but I am paranoid.
1483 SCTP_INP_INFO_WUNLOCK();
1484 callout_stop(&it
->tmr
.timer
);
1485 if (it
->function_atend
!= NULL
) {
1486 (*it
->function_atend
)(it
->pointer
, it
->val
);
1492 SCTP_INP_WLOCK(it
->inp
);
1493 while ((it
->pcb_flags
) && ((it
->inp
->sctp_flags
& it
->pcb_flags
) != it
->pcb_flags
)) {
1494 /* we do not like this ep */
1495 if (it
->iterator_flags
& SCTP_ITERATOR_DO_SINGLE_INP
) {
1496 SCTP_INP_WUNLOCK(it
->inp
);
1497 goto done_with_iterator
;
1499 SCTP_INP_WUNLOCK(it
->inp
);
1500 it
->inp
= LIST_NEXT(it
->inp
, sctp_list
);
1501 if (it
->inp
== NULL
) {
1502 goto done_with_iterator
;
1504 SCTP_INP_WLOCK(it
->inp
);
1506 if ((it
->inp
->inp_starting_point_for_iterator
!= NULL
) &&
1507 (it
->inp
->inp_starting_point_for_iterator
!= it
)) {
1508 kprintf("Iterator collision, we must wait for other iterator at %x\n",
1510 SCTP_INP_WUNLOCK(it
->inp
);
1511 goto start_timer_return
;
1513 /* now we do the actual write to this guy */
1514 it
->inp
->inp_starting_point_for_iterator
= it
;
1515 SCTP_INP_WUNLOCK(it
->inp
);
1516 SCTP_INP_RLOCK(it
->inp
);
1517 /* if we reach here we found a inp acceptable, now through each
1518 * one that has the association in the right state
1520 if (it
->stcb
== NULL
) {
1521 it
->stcb
= LIST_FIRST(&it
->inp
->sctp_asoc_list
);
1523 if (it
->stcb
->asoc
.stcb_starting_point_for_iterator
== it
) {
1524 it
->stcb
->asoc
.stcb_starting_point_for_iterator
= NULL
;
1527 SCTP_TCB_LOCK(it
->stcb
);
1528 if (it
->asoc_state
&& ((it
->stcb
->asoc
.state
& it
->asoc_state
) != it
->asoc_state
)) {
1529 SCTP_TCB_UNLOCK(it
->stcb
);
1530 it
->stcb
= LIST_NEXT(it
->stcb
, sctp_tcblist
);
1534 /* run function on this one */
1535 SCTP_INP_RUNLOCK(it
->inp
);
1536 (*it
->function_toapply
)(it
->inp
, it
->stcb
, it
->pointer
, it
->val
);
1537 sctp_chunk_output(it
->inp
, it
->stcb
, 1);
1538 SCTP_TCB_UNLOCK(it
->stcb
);
1539 /* see if we have limited out */
1540 if (cnt
> SCTP_MAX_ITERATOR_AT_ONCE
) {
1541 it
->stcb
->asoc
.stcb_starting_point_for_iterator
= it
;
1543 SCTP_ITERATOR_UNLOCK();
1544 sctp_timer_start(SCTP_TIMER_TYPE_ITERATOR
, (struct sctp_inpcb
*)it
, NULL
, NULL
);
1547 SCTP_INP_RLOCK(it
->inp
);
1548 it
->stcb
= LIST_NEXT(it
->stcb
, sctp_tcblist
);
1550 /* if we reach here, we ran out of stcb's in the inp we are looking at */
1551 SCTP_INP_RUNLOCK(it
->inp
);
1552 SCTP_INP_WLOCK(it
->inp
);
1553 it
->inp
->inp_starting_point_for_iterator
= NULL
;
1554 SCTP_INP_WUNLOCK(it
->inp
);
1555 if (it
->iterator_flags
& SCTP_ITERATOR_DO_SINGLE_INP
) {
1558 SCTP_INP_INFO_RLOCK();
1559 it
->inp
= LIST_NEXT(it
->inp
, sctp_list
);
1560 SCTP_INP_INFO_RUNLOCK();
1562 if (it
->inp
== NULL
) {
1563 goto done_with_iterator
;
1565 goto select_a_new_ep
;