CAM_NEW_TRAN_CODE fixes.
[dragonfly.git] / sys / netinet / sctp_indata.c
blob105c6469a5616032a563ac3793f64eef085946f8
1 /* $KAME: sctp_indata.c,v 1.35 2004/08/17 04:06:17 itojun Exp $ */
2 /* $DragonFly: src/sys/netinet/sctp_indata.c,v 1.7 2007/04/22 01:13:14 dillon Exp $ */
4 /*
5 * Copyright (C) 2002, 2003, 2004 Cisco Systems Inc,
6 * All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the project nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
33 #if !(defined(__OpenBSD__) || defined(__APPLE__))
34 #include "opt_ipsec.h"
35 #endif
36 #if defined(__FreeBSD__) || defined(__DragonFly__)
37 #include "opt_inet6.h"
38 #include "opt_inet.h"
39 #endif
40 #if defined(__NetBSD__)
41 #include "opt_inet.h"
42 #endif
44 #ifdef __APPLE__
45 #include <sctp.h>
46 #elif !defined(__OpenBSD__)
47 #include "opt_sctp.h"
48 #endif
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/mbuf.h>
53 #include <sys/malloc.h>
54 #include <sys/socket.h>
55 #include <sys/socketvar.h>
56 #include <sys/sysctl.h>
58 #include <net/if.h>
59 #include <net/route.h>
62 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000
63 #include <sys/limits.h>
64 #else
65 #include <machine/limits.h>
66 #endif
67 #include <machine/cpu.h>
69 #include <netinet/in.h>
70 #include <netinet/in_systm.h>
71 #include <netinet/ip.h>
72 #ifdef INET6
73 #include <netinet/ip6.h>
74 #endif /* INET6 */
75 #include <netinet/in_pcb.h>
76 #include <netinet/in_var.h>
77 #include <netinet/ip_var.h>
78 #ifdef INET6
79 #include <netinet6/ip6_var.h>
80 #endif /* INET6 */
81 #include <netinet/ip_icmp.h>
82 #include <netinet/icmp_var.h>
83 #include <netinet/sctp_var.h>
84 #include <netinet/sctp_pcb.h>
85 #include <netinet/sctp_header.h>
86 #include <netinet/sctputil.h>
87 #include <netinet/sctp_output.h>
88 #include <netinet/sctp_input.h>
89 #include <netinet/sctp_hashdriver.h>
90 #include <netinet/sctp_indata.h>
91 #include <netinet/sctp_uio.h>
92 #include <netinet/sctp_timer.h>
93 #ifdef IPSEC
94 #ifndef __OpenBSD__
95 #include <netinet6/ipsec.h>
96 #include <netproto/key/key.h>
97 #else
98 #undef IPSEC
99 #endif
100 #endif /*IPSEC*/
102 #include <net/net_osdep.h>
104 #ifdef SCTP_DEBUG
105 extern u_int32_t sctp_debug_on;
106 #endif
109 * NOTES: On the outbound side of things I need to check the sack timer to
110 * see if I should generate a sack into the chunk queue (if I have data to
111 * send that is and will be sending it .. for bundling.
113 * The callback in sctp_usrreq.c will get called when the socket is read
114 * from. This will cause sctp_service_queues() to get called on the top
115 * entry in the list.
118 extern int sctp_strict_sacks;
120 void
121 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
123 u_int32_t calc, calc_w_oh;
125 #ifdef SCTP_DEBUG
126 if (sctp_debug_on & SCTP_DEBUG_INDATA4) {
127 kprintf("cc:%lu hiwat:%lu lowat:%lu mbcnt:%lu mbmax:%lu\n",
128 (u_long)stcb->sctp_socket->so_rcv.ssb_cc,
129 (u_long)stcb->sctp_socket->so_rcv.ssb_hiwat,
130 (u_long)stcb->sctp_socket->so_rcv.ssb_lowat,
131 (u_long)stcb->sctp_socket->so_rcv.ssb_mbcnt,
132 (u_long)stcb->sctp_socket->so_rcv.ssb_mbmax);
133 kprintf("Setting rwnd to: sb:%ld - (del:%d + reasm:%d str:%d)\n",
134 sctp_sbspace(&stcb->sctp_socket->so_rcv),
135 asoc->size_on_delivery_queue,
136 asoc->size_on_reasm_queue,
137 asoc->size_on_all_streams);
139 #endif
140 if (stcb->sctp_socket->so_rcv.ssb_cc == 0 &&
141 asoc->size_on_delivery_queue == 0 &&
142 asoc->size_on_reasm_queue == 0 &&
143 asoc->size_on_all_streams == 0) {
144 /* Full rwnd granted */
145 asoc->my_rwnd = max(stcb->sctp_socket->so_rcv.ssb_hiwat,
146 SCTP_MINIMAL_RWND);
147 return;
149 /* get actual space */
150 calc = (u_int32_t)sctp_sbspace(&stcb->sctp_socket->so_rcv);
152 /* take out what has NOT been put on socket queue and
153 * we yet hold for putting up.
155 calc = sctp_sbspace_sub(calc, (u_int32_t)asoc->size_on_delivery_queue);
156 calc = sctp_sbspace_sub(calc, (u_int32_t)asoc->size_on_reasm_queue);
157 calc = sctp_sbspace_sub(calc, (u_int32_t)asoc->size_on_all_streams);
159 /* what is the overhead of all these rwnd's */
160 calc_w_oh = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
162 asoc->my_rwnd = calc;
163 if (calc_w_oh == 0) {
164 /* If our overhead is greater than the advertised
165 * rwnd, we clamp the rwnd to 1. This lets us
166 * still accept inbound segments, but hopefully will
167 * shut the sender down when he finally gets the message.
169 asoc->my_rwnd = 1;
170 } else {
171 /* SWS threshold */
172 if (asoc->my_rwnd &&
173 (asoc->my_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_receiver)) {
174 /* SWS engaged, tell peer none left */
175 asoc->my_rwnd = 1;
176 #ifdef SCTP_DEBUG
177 if (sctp_debug_on & SCTP_DEBUG_INDATA4) {
178 kprintf(" - SWS zeros\n");
180 } else {
181 if (sctp_debug_on & SCTP_DEBUG_INDATA4) {
182 kprintf("\n");
184 #endif
190 * Take a chk structure and build it into an mbuf. Hmm should we change things
191 * so that instead we store the data side in a chunk?
193 static struct mbuf *
194 sctp_build_ctl_nchunk(struct sctp_tcb *stcb, uint32_t tsn, uint32_t ppid,
195 uint32_t context, uint16_t stream_no, uint16_t stream_seq, uint8_t flags)
197 struct sctp_sndrcvinfo *outinfo;
198 struct cmsghdr *cmh;
199 struct mbuf *ret;
201 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_RECVDATAIOEVNT) == 0) {
202 /* user does not want the sndrcv ctl */
203 return (NULL);
206 MGETHDR(ret, MB_DONTWAIT, MT_CONTROL);
207 if (ret == NULL) {
208 /* No space */
209 return (ret);
211 /* We need a CMSG header followed by the struct */
212 cmh = mtod(ret, struct cmsghdr *);
213 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
214 cmh->cmsg_level = IPPROTO_SCTP;
215 cmh->cmsg_type = SCTP_SNDRCV;
216 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
217 outinfo->sinfo_stream = stream_no;
218 outinfo->sinfo_ssn = stream_seq;
219 if (flags & SCTP_DATA_UNORDERED) {
220 outinfo->sinfo_flags = MSG_UNORDERED;
221 } else {
222 outinfo->sinfo_flags = 0;
224 outinfo->sinfo_ppid = ppid;
225 outinfo->sinfo_context = context;
226 outinfo->sinfo_assoc_id = sctp_get_associd(stcb);
227 outinfo->sinfo_tsn = tsn;
228 outinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
229 ret->m_len = cmh->cmsg_len;
230 ret->m_pkthdr.len = ret->m_len;
232 * We track how many control len's have gone upon the sb
233 * and do not count these in the rwnd calculation.
235 stcb->asoc.my_rwnd_control_len +=
236 CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
238 return (ret);
242 * Take a chk structure and build it into an mbuf. Should we change things
243 * so that instead we store the data side in a chunk?
245 static
246 struct mbuf *
247 sctp_build_ctl(struct sctp_tcb *stcb, struct sctp_tmit_chunk *chk)
249 struct sctp_sndrcvinfo *outinfo;
250 struct cmsghdr *cmh;
251 struct mbuf *ret;
252 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_RECVDATAIOEVNT) == 0) {
253 /* user does not want the sndrcv ctl */
254 return (NULL);
256 MGET(ret, MB_DONTWAIT, MT_CONTROL);
257 if (ret == NULL) {
258 /* No space */
259 return (ret);
262 /* We need a CMSG header followed by the struct */
263 cmh = mtod(ret, struct cmsghdr *);
264 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
265 cmh->cmsg_level = IPPROTO_SCTP;
266 cmh->cmsg_type = SCTP_SNDRCV;
267 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
268 outinfo->sinfo_stream = chk->rec.data.stream_number;
269 outinfo->sinfo_ssn = chk->rec.data.stream_seq;
270 if (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
271 outinfo->sinfo_flags = MSG_UNORDERED;
272 } else {
273 outinfo->sinfo_flags = 0;
275 outinfo->sinfo_ppid = chk->rec.data.payloadtype;
276 outinfo->sinfo_context = chk->rec.data.context;
277 outinfo->sinfo_assoc_id = sctp_get_associd(stcb);
278 outinfo->sinfo_tsn = chk->rec.data.TSN_seq;
279 outinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
280 ret->m_len = cmh->cmsg_len;
281 stcb->asoc.my_rwnd_control_len +=
282 CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
284 return (ret);
288 sctp_deliver_data(struct sctp_tcb *stcb, struct sctp_association *asoc,
289 struct sctp_tmit_chunk *chk, int hold_locks)
291 struct mbuf *control, *m;
292 int free_it;
293 struct sockaddr_in6 sin6;
294 struct sockaddr *to;
296 #ifdef SCTP_DEBUG
297 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
298 kprintf("I am now in Deliver data! (%p)\n", chk);
300 #endif
301 /* get a write lock on the inp if not already */
302 if (hold_locks == 0) {
303 SCTP_TCB_UNLOCK(stcb);
304 SCTP_INP_WLOCK(stcb->sctp_ep);
305 SCTP_TCB_LOCK(stcb);
307 free_it = 0;
308 /* We always add it to the queue */
309 if (stcb && (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
310 /* socket above is long gone */
311 #ifdef SCTP_DEBUG
312 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
313 kprintf("gone is gone!\n");
315 #endif
316 if (chk != NULL) {
317 if (chk->data)
318 sctp_m_freem(chk->data);
319 chk->data = NULL;
320 sctp_free_remote_addr(chk->whoTo);
321 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
322 sctppcbinfo.ipi_count_chunk--;
323 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
324 panic("Chunk count is negative");
326 sctppcbinfo.ipi_gencnt_chunk++;
328 TAILQ_FOREACH(chk, &asoc->delivery_queue, sctp_next) {
329 asoc->size_on_delivery_queue -= chk->send_size;
330 asoc->cnt_on_delivery_queue--;
332 * Lose the data pointer, since its in the socket buffer
334 if (chk->data)
335 sctp_m_freem(chk->data);
336 chk->data = NULL;
337 /* Now free the address and data */
338 sctp_free_remote_addr(chk->whoTo);
339 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
340 sctppcbinfo.ipi_count_chunk--;
341 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
342 panic("Chunk count is negative");
344 sctppcbinfo.ipi_gencnt_chunk++;
346 if (hold_locks == 0)
347 SCTP_INP_WUNLOCK(stcb->sctp_ep);
348 return (0);
350 if (chk != NULL) {
351 TAILQ_INSERT_TAIL(&asoc->delivery_queue, chk, sctp_next);
352 asoc->size_on_delivery_queue += chk->send_size;
353 asoc->cnt_on_delivery_queue++;
355 if (asoc->fragmented_delivery_inprogress) {
357 * oh oh, fragmented delivery in progress
358 * return out of here.
360 #ifdef SCTP_DEBUG
361 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
362 kprintf("Fragmented delivery in progress?\n");
364 #endif
365 if (hold_locks == 0)
366 SCTP_INP_WUNLOCK(stcb->sctp_ep);
367 return (0);
369 /* Now grab the first one */
370 chk = TAILQ_FIRST(&asoc->delivery_queue);
371 if (chk == NULL) {
372 /* Nothing in queue */
373 #ifdef SCTP_DEBUG
374 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
375 kprintf("Nothing in queue?\n");
377 #endif
378 asoc->size_on_delivery_queue = 0;
379 asoc->cnt_on_delivery_queue = 0;
380 if (hold_locks == 0)
381 SCTP_INP_WUNLOCK(stcb->sctp_ep);
382 return (0);
385 if (stcb->sctp_socket->so_rcv.ssb_cc >= stcb->sctp_socket->so_rcv.ssb_hiwat) {
386 /* Boy, there really is NO room */
387 if (hold_locks == 0)
388 SCTP_INP_WUNLOCK(stcb->sctp_ep);
389 return (0);
391 #ifdef SCTP_DEBUG
392 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
393 kprintf("Now to the delivery with chk(%p)!\n", chk);
395 #endif
396 /* XXX need to append PKTHDR to the socket buffer first */
397 if ((chk->data->m_flags & M_PKTHDR) == 0) {
398 MGETHDR(m, MB_DONTWAIT, MT_DATA);
399 if (m == NULL) {
400 /* no room! */
401 if (hold_locks == 0)
402 SCTP_INP_WUNLOCK(stcb->sctp_ep);
403 return (0);
405 m->m_pkthdr.len = chk->send_size;
406 m->m_len = 0;
407 m->m_next = chk->data;
408 chk->data = m;
410 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
411 if (chk->data->m_next == NULL) {
412 /* hopefully we hit here most of the time */
413 chk->data->m_flags |= M_EOR;
414 } else {
415 /* Add the flag to the LAST mbuf in the chain */
416 m = chk->data;
417 while (m->m_next != NULL) {
418 m = m->m_next;
420 m->m_flags |= M_EOR;
424 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
425 struct sockaddr_in6 lsa6;
427 control = sctp_build_ctl(stcb, chk);
428 to = (struct sockaddr *)&chk->whoTo->ro._l_addr;
429 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
430 to->sa_family == AF_INET) {
431 struct sockaddr_in *sin;
433 sin = (struct sockaddr_in *)to;
434 bzero(&sin6, sizeof(sin6));
435 sin6.sin6_family = AF_INET6;
436 sin6.sin6_len = sizeof(struct sockaddr_in6);
437 sin6.sin6_addr.s6_addr16[2] = 0xffff;
438 bcopy(&sin->sin_addr, &sin6.sin6_addr.s6_addr16[3],
439 sizeof(sin6.sin6_addr.s6_addr16[3]));
440 sin6.sin6_port = sin->sin_port;
441 to = (struct sockaddr *)&sin6;
443 /* check and strip embedded scope junk */
444 to = (struct sockaddr *)sctp_recover_scope((struct sockaddr_in6 *)to,
445 &lsa6);
446 if (((struct sockaddr_in *)to)->sin_port == 0) {
447 kprintf("Huh a, port is %d not net:%x %d?\n",
448 ((struct sockaddr_in *)to)->sin_port,
449 (u_int)chk->whoTo,
450 (int)(ntohs(stcb->rport)));
451 ((struct sockaddr_in *)to)->sin_port = stcb->rport;
453 if (sctp_sbspace(&stcb->sctp_socket->so_rcv) < (long)chk->send_size) {
454 /* Gak not enough room */
455 if (control) {
456 sctp_m_freem(control);
457 stcb->asoc.my_rwnd_control_len -=
458 CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
460 goto skip;
462 if (!sctp_sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv,
463 to, chk->data, control, stcb->asoc.my_vtag,
464 stcb->sctp_ep)) {
465 /* Gak not enough room */
466 if (control) {
467 sctp_m_freem(control);
468 stcb->asoc.my_rwnd_control_len -=
469 CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
471 } else {
472 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) {
473 if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
474 stcb->asoc.my_rwnd_control_len +=
475 sizeof(struct mbuf);
477 } else {
478 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
480 free_it = 1;
482 } else {
483 /* append to a already started message. */
484 if (sctp_sbspace(&stcb->sctp_socket->so_rcv) >=
485 (long)chk->send_size) {
486 ssb_append(&stcb->sctp_socket->so_rcv, chk->data);
487 free_it = 1;
490 skip:
491 if (hold_locks == 0)
492 SCTP_INP_WUNLOCK(stcb->sctp_ep);
493 /* free up the one we inserted */
494 if (free_it) {
495 /* Pull it off the queue */
496 #ifdef SCTP_DEBUG
497 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
498 kprintf("Free_it true, doing tickle wakeup\n");
500 #endif
501 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
502 TAILQ_REMOVE(&asoc->delivery_queue, chk, sctp_next);
503 asoc->size_on_delivery_queue -= chk->send_size;
504 asoc->cnt_on_delivery_queue--;
505 /* Lose the data pointer, since its in the socket buffer */
506 chk->data = NULL;
507 /* Now free the address and data */
508 sctp_free_remote_addr(chk->whoTo);
509 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
510 sctppcbinfo.ipi_count_chunk--;
511 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
512 panic("Chunk count is negative");
514 sctppcbinfo.ipi_gencnt_chunk++;
516 return (free_it);
520 * We are delivering currently from the reassembly queue. We must continue to
521 * deliver until we either:
522 * 1) run out of space.
523 * 2) run out of sequential TSN's
524 * 3) hit the SCTP_DATA_LAST_FRAG flag.
526 static void
527 sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc, int hold_locks)
529 struct sockaddr *to;
530 struct sockaddr_in6 sin6;
531 struct sctp_tmit_chunk *chk, *at;
532 struct mbuf *control, *m;
533 u_int16_t nxt_todel;
534 u_int16_t stream_no;
535 int cntDel;
536 cntDel = stream_no = 0;
537 if (hold_locks == 0) {
539 * you always have the TCB lock, we need
540 * to have the inp write lock as well.
542 SCTP_TCB_UNLOCK(stcb);
543 SCTP_INP_WLOCK(stcb->sctp_ep);
544 SCTP_TCB_LOCK(stcb);
546 if (stcb && (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
547 /* socket above is long gone */
548 asoc->fragmented_delivery_inprogress = 0;
549 TAILQ_FOREACH(chk, &asoc->reasmqueue, sctp_next) {
550 asoc->size_on_delivery_queue -= chk->send_size;
551 asoc->cnt_on_delivery_queue--;
553 * Lose the data pointer, since its in the socket buffer
555 if (chk->data)
556 sctp_m_freem(chk->data);
557 chk->data = NULL;
558 /* Now free the address and data */
559 sctp_free_remote_addr(chk->whoTo);
560 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
561 sctppcbinfo.ipi_count_chunk--;
562 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
563 panic("Chunk count is negative");
565 sctppcbinfo.ipi_gencnt_chunk++;
567 if (hold_locks == 0)
568 SCTP_INP_WUNLOCK(stcb->sctp_ep);
569 return;
571 do {
572 if (stcb->sctp_socket->so_rcv.ssb_cc >=
573 stcb->sctp_socket->so_rcv.ssb_hiwat) {
574 if (cntDel) {
575 sctp_sorwakeup(stcb->sctp_ep,
576 stcb->sctp_socket);
578 if (hold_locks == 0)
579 SCTP_INP_WUNLOCK(stcb->sctp_ep);
580 return;
582 chk = TAILQ_FIRST(&asoc->reasmqueue);
583 if (chk == NULL) {
584 if (cntDel) {
585 sctp_sorwakeup(stcb->sctp_ep,
586 stcb->sctp_socket);
588 if (hold_locks == 0)
589 SCTP_INP_WUNLOCK(stcb->sctp_ep);
590 return;
592 if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) {
593 /* Can't deliver more :< */
594 if (cntDel) {
595 sctp_sorwakeup(stcb->sctp_ep,
596 stcb->sctp_socket);
598 if (hold_locks == 0)
599 SCTP_INP_WUNLOCK(stcb->sctp_ep);
600 return;
602 stream_no = chk->rec.data.stream_number;
603 nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1;
604 if (nxt_todel != chk->rec.data.stream_seq &&
605 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
607 * Not the next sequence to deliver in its stream OR
608 * unordered
610 if (cntDel) {
611 sctp_sorwakeup(stcb->sctp_ep,
612 stcb->sctp_socket);
614 if (hold_locks == 0)
615 SCTP_INP_WUNLOCK(stcb->sctp_ep);
616 return;
619 if ((chk->data->m_flags & M_PKTHDR) == 0) {
620 MGETHDR(m, MB_DONTWAIT, MT_DATA);
621 if (m == NULL) {
622 /* no room! */
623 if (hold_locks == 0)
624 SCTP_INP_WUNLOCK(stcb->sctp_ep);
625 return;
627 m->m_pkthdr.len = chk->send_size;
628 m->m_len = 0;
629 m->m_next = chk->data;
630 chk->data = m;
632 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
633 if (chk->data->m_next == NULL) {
634 /* hopefully we hit here most of the time */
635 chk->data->m_flags |= M_EOR;
636 } else {
637 /* Add the flag to the LAST mbuf in the chain */
638 m = chk->data;
639 while (m->m_next != NULL) {
640 m = m->m_next;
642 m->m_flags |= M_EOR;
645 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
646 struct sockaddr_in6 lsa6;
648 control = sctp_build_ctl(stcb, chk);
649 to = (struct sockaddr *)&chk->whoTo->ro._l_addr;
650 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
651 to->sa_family == AF_INET) {
652 struct sockaddr_in *sin;
653 sin = (struct sockaddr_in *)to;
654 bzero(&sin6, sizeof(sin6));
655 sin6.sin6_family = AF_INET6;
656 sin6.sin6_len = sizeof(struct sockaddr_in6);
657 sin6.sin6_addr.s6_addr16[2] = 0xffff;
658 bcopy(&sin->sin_addr,
659 &sin6.sin6_addr.s6_addr16[3],
660 sizeof(sin6.sin6_addr.s6_addr16[3]));
661 sin6.sin6_port = sin->sin_port;
662 to = (struct sockaddr *)&sin6;
664 /* check and strip embedded scope junk */
665 to = (struct sockaddr *)sctp_recover_scope((struct sockaddr_in6 *)to,
666 &lsa6);
667 if (((struct sockaddr_in *)to)->sin_port == 0) {
668 kprintf("Huh b, port is %d not net:%x %d?\n",
669 ((struct sockaddr_in *)to)->sin_port,
670 (u_int)chk->whoTo,
671 (int)(ntohs(stcb->rport)));
672 ((struct sockaddr_in *)to)->sin_port = stcb->rport;
674 if (sctp_sbspace(&stcb->sctp_socket->so_rcv) <
675 (long)chk->send_size) {
676 if (control) {
677 sctp_m_freem(control);
678 stcb->asoc.my_rwnd_control_len -=
679 CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
681 sctp_sorwakeup(stcb->sctp_ep,
682 stcb->sctp_socket);
683 if (hold_locks == 0)
684 SCTP_INP_WUNLOCK(stcb->sctp_ep);
685 return;
687 if (!sctp_sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv,
688 to, chk->data, control, stcb->asoc.my_vtag,
689 stcb->sctp_ep)) {
690 /* Gak not enough room */
691 if (control) {
692 sctp_m_freem(control);
693 stcb->asoc.my_rwnd_control_len -=
694 CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
696 sctp_sorwakeup(stcb->sctp_ep,
697 stcb->sctp_socket);
698 if (hold_locks == 0)
699 SCTP_INP_WUNLOCK(stcb->sctp_ep);
700 return;
702 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) {
703 if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
704 stcb->asoc.my_rwnd_control_len +=
705 sizeof(struct mbuf);
707 } else {
708 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
710 cntDel++;
711 } else {
712 if (sctp_sbspace(&stcb->sctp_socket->so_rcv) >=
713 (long)chk->send_size) {
714 ssb_append(&stcb->sctp_socket->so_rcv, chk->data);
715 cntDel++;
716 } else {
717 /* out of space in the sb */
718 sctp_sorwakeup(stcb->sctp_ep,
719 stcb->sctp_socket);
720 if (hold_locks == 0)
721 SCTP_INP_WUNLOCK(stcb->sctp_ep);
722 return;
725 /* pull it we did it */
726 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
727 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
728 asoc->fragmented_delivery_inprogress = 0;
729 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
730 asoc->strmin[stream_no].last_sequence_delivered++;
733 asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
734 asoc->size_on_reasm_queue -= chk->send_size;
735 asoc->cnt_on_reasm_queue--;
736 /* free up the chk */
737 sctp_free_remote_addr(chk->whoTo);
738 chk->data = NULL;
739 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
740 sctppcbinfo.ipi_count_chunk--;
741 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
742 panic("Chunk count is negative");
744 sctppcbinfo.ipi_gencnt_chunk++;
745 if (asoc->fragmented_delivery_inprogress == 0) {
747 * Now lets see if we can deliver the next one on the
748 * stream
750 u_int16_t nxt_todel;
751 struct sctp_stream_in *strm;
753 strm = &asoc->strmin[stream_no];
754 nxt_todel = strm->last_sequence_delivered + 1;
755 chk = TAILQ_FIRST(&strm->inqueue);
756 if (chk && (nxt_todel == chk->rec.data.stream_seq)) {
757 while (chk != NULL) {
758 /* all delivered */
759 if (nxt_todel ==
760 chk->rec.data.stream_seq) {
761 at = TAILQ_NEXT(chk, sctp_next);
762 TAILQ_REMOVE(&strm->inqueue,
763 chk, sctp_next);
764 asoc->size_on_all_streams -=
765 chk->send_size;
766 asoc->cnt_on_all_streams--;
767 strm->last_sequence_delivered++;
769 * We ignore the return of
770 * deliver_data here since we
771 * always can hold the chunk on
772 * the d-queue. And we have a
773 * finite number that can be
774 * delivered from the strq.
776 sctp_deliver_data(stcb, asoc, chk, 1);
777 chk = at;
778 } else {
779 break;
781 nxt_todel =
782 strm->last_sequence_delivered + 1;
785 if (!TAILQ_EMPTY(&asoc->delivery_queue)) {
786 /* Here if deliver_data fails, we must break */
787 if (sctp_deliver_data(stcb, asoc, NULL, 1) == 0)
788 break;
790 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
791 if (hold_locks == 0)
792 SCTP_INP_WUNLOCK(stcb->sctp_ep);
793 return;
795 chk = TAILQ_FIRST(&asoc->reasmqueue);
796 } while (chk);
797 if (cntDel) {
798 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
800 if (hold_locks == 0)
801 SCTP_INP_WUNLOCK(stcb->sctp_ep);
805 * Queue the chunk either right into the socket buffer if it is the next one
806 * to go OR put it in the correct place in the delivery queue. If we do
807 * append to the so_buf, keep doing so until we are out of order.
808 * One big question still remains, what to do when the socket buffer is FULL??
810 static void
811 sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
812 struct sctp_tmit_chunk *chk, int *abort_flag)
814 struct sctp_stream_in *strm;
815 struct sctp_tmit_chunk *at;
816 int queue_needed;
817 u_int16_t nxt_todel;
818 struct mbuf *oper;
820 /*** FIX FIX FIX ???
821 * Need to add code to deal with 16 bit seq wrap
822 * without a TSN wrap for ordered delivery (maybe).
823 * FIX FIX FIX ???
825 queue_needed = 1;
826 asoc->size_on_all_streams += chk->send_size;
827 asoc->cnt_on_all_streams++;
828 strm = &asoc->strmin[chk->rec.data.stream_number];
829 nxt_todel = strm->last_sequence_delivered + 1;
830 #ifdef SCTP_STR_LOGGING
831 sctp_log_strm_del(chk, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
832 #endif
833 #ifdef SCTP_DEBUG
834 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
835 kprintf("queue to stream called for ssn:%u lastdel:%u nxt:%u\n",
836 (u_int)chk->rec.data.stream_seq,
837 (u_int)strm->last_sequence_delivered, (u_int)nxt_todel);
839 #endif
840 if (compare_with_wrap(strm->last_sequence_delivered,
841 chk->rec.data.stream_seq, MAX_SEQ) ||
842 (strm->last_sequence_delivered == chk->rec.data.stream_seq)) {
843 /* The incoming sseq is behind where we last delivered? */
844 #ifdef SCTP_DEBUG
845 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
846 kprintf("Duplicate S-SEQ:%d delivered:%d from peer, Abort association\n",
847 chk->rec.data.stream_seq,
848 strm->last_sequence_delivered);
850 #endif
852 * throw it in the stream so it gets cleaned up in
853 * association destruction
855 TAILQ_INSERT_HEAD(&strm->inqueue, chk, sctp_next);
856 MGET(oper, MB_DONTWAIT, MT_DATA);
857 if (oper) {
858 struct sctp_paramhdr *ph;
859 u_int32_t *ippp;
861 oper->m_len = sizeof(struct sctp_paramhdr) +
862 sizeof(*ippp);
863 ph = mtod(oper, struct sctp_paramhdr *);
864 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
865 ph->param_length = htons(oper->m_len);
866 ippp = (u_int32_t *)(ph + 1);
867 *ippp = htonl(0x00000001);
869 sctp_abort_an_association(stcb->sctp_ep, stcb,
870 SCTP_PEER_FAULTY, oper);
872 *abort_flag = 1;
873 return;
876 if (nxt_todel == chk->rec.data.stream_seq) {
877 /* can be delivered right away */
878 #ifdef SCTP_DEBUG
879 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
880 kprintf("It's NEXT!\n");
882 #endif
883 #ifdef SCTP_STR_LOGGING
884 sctp_log_strm_del(chk, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
885 #endif
886 queue_needed = 0;
887 asoc->size_on_all_streams -= chk->send_size;
888 asoc->cnt_on_all_streams--;
889 strm->last_sequence_delivered++;
890 sctp_deliver_data(stcb, asoc, chk, 0);
891 chk = TAILQ_FIRST(&strm->inqueue);
892 while (chk != NULL) {
893 /* all delivered */
894 nxt_todel = strm->last_sequence_delivered + 1;
895 if (nxt_todel == chk->rec.data.stream_seq) {
896 at = TAILQ_NEXT(chk, sctp_next);
897 TAILQ_REMOVE(&strm->inqueue, chk, sctp_next);
898 asoc->size_on_all_streams -= chk->send_size;
899 asoc->cnt_on_all_streams--;
900 strm->last_sequence_delivered++;
902 * We ignore the return of deliver_data here
903 * since we always can hold the chunk on the
904 * d-queue. And we have a finite number that
905 * can be delivered from the strq.
907 #ifdef SCTP_STR_LOGGING
908 sctp_log_strm_del(chk, NULL,
909 SCTP_STR_LOG_FROM_IMMED_DEL);
910 #endif
911 sctp_deliver_data(stcb, asoc, chk, 0);
912 chk = at;
913 continue;
915 break;
918 if (queue_needed) {
920 * Ok, we did not deliver this guy, find
921 * the correct place to put it on the queue.
923 #ifdef SCTP_DEBUG
924 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
925 kprintf("Queue Needed!\n");
927 #endif
928 if (TAILQ_EMPTY(&strm->inqueue)) {
929 /* Empty queue */
930 #ifdef SCTP_STR_LOGGING
931 sctp_log_strm_del(chk, NULL, SCTP_STR_LOG_FROM_INSERT_HD);
932 #endif
933 TAILQ_INSERT_HEAD(&strm->inqueue, chk, sctp_next);
934 } else {
935 TAILQ_FOREACH(at, &strm->inqueue, sctp_next) {
936 if (compare_with_wrap(at->rec.data.stream_seq,
937 chk->rec.data.stream_seq, MAX_SEQ)) {
939 * one in queue is bigger than the new
940 * one, insert before this one
942 #ifdef SCTP_STR_LOGGING
943 sctp_log_strm_del(chk, at,
944 SCTP_STR_LOG_FROM_INSERT_MD);
945 #endif
946 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
947 break;
948 } else if (at->rec.data.stream_seq ==
949 chk->rec.data.stream_seq) {
951 * Gak, He sent me a duplicate str seq
952 * number
955 * foo bar, I guess I will just free
956 * this new guy, should we abort too?
957 * FIX ME MAYBE? Or it COULD be that
958 * the SSN's have wrapped. Maybe I
959 * should compare to TSN somehow...
960 * sigh for now just blow away the
961 * chunk!
964 if (chk->data)
965 sctp_m_freem(chk->data);
966 chk->data = NULL;
967 asoc->size_on_all_streams -= chk->send_size;
968 asoc->cnt_on_all_streams--;
969 sctp_pegs[SCTP_DUP_SSN_RCVD]++;
970 sctp_free_remote_addr(chk->whoTo);
971 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
972 sctppcbinfo.ipi_count_chunk--;
973 if ((int)sctppcbinfo.ipi_count_chunk <
974 0) {
975 panic("Chunk count is negative");
977 sctppcbinfo.ipi_gencnt_chunk++;
978 return;
979 } else {
980 if (TAILQ_NEXT(at, sctp_next) == NULL) {
982 * We are at the end, insert it
983 * after this one
985 #ifdef SCTP_STR_LOGGING
986 sctp_log_strm_del(chk, at,
987 SCTP_STR_LOG_FROM_INSERT_TL);
988 #endif
989 TAILQ_INSERT_AFTER(&strm->inqueue,
990 at, chk, sctp_next);
991 break;
996 } else {
997 /* We delivered some chunks, wake them up */
999 #ifdef SCTP_DEBUG
1000 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1001 kprintf("Doing WAKEUP!\n");
1003 #endif
1004 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1009 * Returns two things: You get the total size of the deliverable parts of the
1010 * first fragmented message on the reassembly queue. And you get a 1 back if
1011 * all of the message is ready or a 0 back if the message is still incomplete
1013 static int
1014 sctp_is_all_msg_on_reasm(struct sctp_association *asoc, int *t_size)
1016 struct sctp_tmit_chunk *chk;
1017 u_int32_t tsn;
1019 *t_size = 0;
1020 chk = TAILQ_FIRST(&asoc->reasmqueue);
1021 if (chk == NULL) {
1022 /* nothing on the queue */
1023 return (0);
1025 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
1026 /* Not a first on the queue */
1027 return (0);
1029 tsn = chk->rec.data.TSN_seq;
1030 while (chk) {
1031 if (tsn != chk->rec.data.TSN_seq) {
1032 return (0);
1034 *t_size += chk->send_size;
1035 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1036 return (1);
1038 tsn++;
1039 chk = TAILQ_NEXT(chk, sctp_next);
1041 return (0);
1045 * Dump onto the re-assembly queue, in its proper place. After dumping on
1046 * the queue, see if anthing can be delivered. If so pull it off (or as much
1047 * as we can. If we run out of space then we must dump what we can and set
1048 * the appropriate flag to say we queued what we could.
1050 static void
1051 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
1052 struct sctp_tmit_chunk *chk, int *abort_flag)
1054 struct mbuf *oper;
1055 u_int16_t nxt_todel;
1056 u_int32_t cum_ackp1, last_tsn, prev_tsn, post_tsn;
1057 int tsize;
1058 u_char last_flags;
1059 struct sctp_tmit_chunk *at, *prev, *next;
1061 prev = next = NULL;
1062 cum_ackp1 = asoc->tsn_last_delivered + 1;
1064 if (TAILQ_EMPTY(&asoc->reasmqueue)) {
1065 /* This is the first one on the queue */
1066 TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next);
1068 * we do not check for delivery of anything when
1069 * only one fragment is here
1071 asoc->size_on_reasm_queue = chk->send_size;
1072 asoc->cnt_on_reasm_queue++;
1073 if (chk->rec.data.TSN_seq == cum_ackp1) {
1074 if (asoc->fragmented_delivery_inprogress == 0 &&
1075 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) !=
1076 SCTP_DATA_FIRST_FRAG) {
1078 * An empty queue, no delivery inprogress, we
1079 * hit the next one and it does NOT have a
1080 * FIRST fragment mark.
1082 #ifdef SCTP_DEBUG
1083 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1084 kprintf("Gak, Evil plot, its not first, no fragmented delivery in progress\n");
1086 #endif
1087 MGET(oper, MB_DONTWAIT, MT_DATA);
1088 if (oper) {
1089 struct sctp_paramhdr *ph;
1090 u_int32_t *ippp;
1092 oper->m_len =
1093 sizeof(struct sctp_paramhdr) +
1094 sizeof(*ippp);
1095 ph = mtod(oper, struct sctp_paramhdr *);
1096 ph->param_type =
1097 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1098 ph->param_length = htons(oper->m_len);
1099 ippp = (u_int32_t *)(ph + 1);
1100 *ippp = htonl(0x10000001);
1102 sctp_abort_an_association(stcb->sctp_ep, stcb,
1103 SCTP_PEER_FAULTY, oper);
1104 *abort_flag = 1;
1105 } else if (asoc->fragmented_delivery_inprogress &&
1106 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1108 * We are doing a partial delivery and the NEXT
1109 * chunk MUST be either the LAST or MIDDLE
1110 * fragment NOT a FIRST
1112 #ifdef SCTP_DEBUG
1113 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1114 kprintf("Gak, Evil plot, it IS a first and fragmented delivery in progress\n");
1116 #endif
1117 MGET(oper, MB_DONTWAIT, MT_DATA);
1118 if (oper) {
1119 struct sctp_paramhdr *ph;
1120 u_int32_t *ippp;
1122 oper->m_len =
1123 sizeof(struct sctp_paramhdr) +
1124 sizeof(*ippp);
1125 ph = mtod(oper, struct sctp_paramhdr *);
1126 ph->param_type =
1127 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1128 ph->param_length = htons(oper->m_len);
1129 ippp = (u_int32_t *)(ph + 1);
1130 *ippp = htonl(0x10000002);
1132 sctp_abort_an_association(stcb->sctp_ep, stcb,
1133 SCTP_PEER_FAULTY, oper);
1134 *abort_flag = 1;
1135 } else if (asoc->fragmented_delivery_inprogress) {
1136 /* Here we are ok with a MIDDLE or LAST piece */
1137 if (chk->rec.data.stream_number !=
1138 asoc->str_of_pdapi) {
1139 /* Got to be the right STR No */
1140 #ifdef SCTP_DEBUG
1141 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1142 kprintf("Gak, Evil plot, it IS not same stream number %d vs %d\n",
1143 chk->rec.data.stream_number,
1144 asoc->str_of_pdapi);
1146 #endif
1147 MGET(oper, MB_DONTWAIT, MT_DATA);
1148 if (oper) {
1149 struct sctp_paramhdr *ph;
1150 u_int32_t *ippp;
1151 oper->m_len =
1152 sizeof(struct sctp_paramhdr) +
1153 sizeof(*ippp);
1154 ph = mtod(oper,
1155 struct sctp_paramhdr *);
1156 ph->param_type =
1157 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1158 ph->param_length =
1159 htons(oper->m_len);
1160 ippp = (u_int32_t *)(ph + 1);
1161 *ippp = htonl(0x10000003);
1163 sctp_abort_an_association(stcb->sctp_ep,
1164 stcb, SCTP_PEER_FAULTY, oper);
1165 *abort_flag = 1;
1166 } else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) !=
1167 SCTP_DATA_UNORDERED &&
1168 chk->rec.data.stream_seq !=
1169 asoc->ssn_of_pdapi) {
1170 /* Got to be the right STR Seq */
1171 #ifdef SCTP_DEBUG
1172 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1173 kprintf("Gak, Evil plot, it IS not same stream seq %d vs %d\n",
1174 chk->rec.data.stream_seq,
1175 asoc->ssn_of_pdapi);
1177 #endif
1178 MGET(oper, MB_DONTWAIT, MT_DATA);
1179 if (oper) {
1180 struct sctp_paramhdr *ph;
1181 u_int32_t *ippp;
1182 oper->m_len =
1183 sizeof(struct sctp_paramhdr) +
1184 sizeof(*ippp);
1185 ph = mtod(oper,
1186 struct sctp_paramhdr *);
1187 ph->param_type =
1188 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1189 ph->param_length =
1190 htons(oper->m_len);
1191 ippp = (u_int32_t *)(ph + 1);
1192 *ippp = htonl(0x10000004);
1194 sctp_abort_an_association(stcb->sctp_ep,
1195 stcb, SCTP_PEER_FAULTY, oper);
1196 *abort_flag = 1;
1200 return;
1202 /* Find its place */
1203 at = TAILQ_FIRST(&asoc->reasmqueue);
1205 /* Grab the top flags */
1206 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1207 if (compare_with_wrap(at->rec.data.TSN_seq,
1208 chk->rec.data.TSN_seq, MAX_TSN)) {
1210 * one in queue is bigger than the new one, insert
1211 * before this one
1213 /* A check */
1214 asoc->size_on_reasm_queue += chk->send_size;
1215 asoc->cnt_on_reasm_queue++;
1216 next = at;
1217 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1218 break;
1219 } else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) {
1220 /* Gak, He sent me a duplicate str seq number */
1222 * foo bar, I guess I will just free this new guy,
1223 * should we abort too? FIX ME MAYBE? Or it COULD be
1224 * that the SSN's have wrapped. Maybe I should compare
1225 * to TSN somehow... sigh for now just blow away the
1226 * chunk!
1228 if (chk->data)
1229 sctp_m_freem(chk->data);
1230 chk->data = NULL;
1231 sctp_free_remote_addr(chk->whoTo);
1232 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
1233 sctppcbinfo.ipi_count_chunk--;
1234 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
1235 panic("Chunk count is negative");
1237 sctppcbinfo.ipi_gencnt_chunk++;
1238 return;
1239 } else {
1240 last_flags = at->rec.data.rcv_flags;
1241 last_tsn = at->rec.data.TSN_seq;
1242 prev = at;
1243 if (TAILQ_NEXT(at, sctp_next) == NULL) {
1245 * We are at the end, insert it after this one
1247 /* check it first */
1248 asoc->size_on_reasm_queue += chk->send_size;
1249 asoc->cnt_on_reasm_queue++;
1250 TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next);
1251 break;
1255 /* Now the audits */
1256 if (prev) {
1257 prev_tsn = chk->rec.data.TSN_seq - 1;
1258 if (prev_tsn == prev->rec.data.TSN_seq) {
1260 * Ok the one I am dropping onto the end
1261 * is the NEXT. A bit of valdiation here.
1263 if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1264 SCTP_DATA_FIRST_FRAG ||
1265 (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1266 SCTP_DATA_MIDDLE_FRAG) {
1268 * Insert chk MUST be a MIDDLE or LAST fragment
1270 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1271 SCTP_DATA_FIRST_FRAG) {
1272 #ifdef SCTP_DEBUG
1273 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1274 kprintf("Prev check - It can be a midlle or last but not a first\n");
1275 kprintf("Gak, Evil plot, it's a FIRST!\n");
1277 #endif
1278 MGET(oper, MB_DONTWAIT, MT_DATA);
1279 if (oper) {
1280 struct sctp_paramhdr *ph;
1281 u_int32_t *ippp;
1283 oper->m_len =
1284 sizeof(struct sctp_paramhdr) +
1285 sizeof(*ippp);
1286 ph = mtod(oper,
1287 struct sctp_paramhdr *);
1288 ph->param_type =
1289 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1290 ph->param_length =
1291 htons(oper->m_len);
1293 ippp = (u_int32_t *)(ph + 1);
1294 *ippp = htonl(0x10000005);
1296 sctp_abort_an_association(stcb->sctp_ep,
1297 stcb, SCTP_PEER_FAULTY, oper);
1298 *abort_flag = 1;
1299 return;
1301 if (chk->rec.data.stream_number !=
1302 prev->rec.data.stream_number) {
1304 * Huh, need the correct STR here, they
1305 * must be the same.
1307 #ifdef SCTP_DEBUG
1308 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1309 kprintf("Prev check - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1310 chk->rec.data.stream_number,
1311 prev->rec.data.stream_number);
1313 #endif
1314 MGET(oper, MB_DONTWAIT, MT_DATA);
1315 if (oper) {
1316 struct sctp_paramhdr *ph;
1317 u_int32_t *ippp;
1319 oper->m_len =
1320 sizeof(struct sctp_paramhdr) +
1321 sizeof(*ippp);
1322 ph = mtod(oper,
1323 struct sctp_paramhdr *);
1324 ph->param_type =
1325 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1326 ph->param_length =
1327 htons(oper->m_len);
1328 ippp = (u_int32_t *)(ph + 1);
1329 *ippp = htonl(0x10000006);
1332 sctp_abort_an_association(stcb->sctp_ep,
1333 stcb, SCTP_PEER_FAULTY, oper);
1335 *abort_flag = 1;
1336 return;
1338 if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1339 chk->rec.data.stream_seq !=
1340 prev->rec.data.stream_seq) {
1342 * Huh, need the correct STR here, they
1343 * must be the same.
1345 #ifdef SCTP_DEBUG
1346 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1347 kprintf("Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1348 chk->rec.data.stream_seq,
1349 prev->rec.data.stream_seq);
1351 #endif
1352 MGET(oper, MB_DONTWAIT, MT_DATA);
1353 if (oper) {
1354 struct sctp_paramhdr *ph;
1355 u_int32_t *ippp;
1357 oper->m_len =
1358 sizeof(struct sctp_paramhdr) +
1359 sizeof(*ippp);
1360 ph = mtod(oper,
1361 struct sctp_paramhdr *);
1362 ph->param_type =
1363 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1364 ph->param_length =
1365 htons(oper->m_len);
1366 ippp = (u_int32_t *)(ph + 1);
1367 *ippp = htonl(0x10000007);
1370 sctp_abort_an_association(stcb->sctp_ep,
1371 stcb, SCTP_PEER_FAULTY, oper);
1373 *abort_flag = 1;
1374 return;
1376 } else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1377 SCTP_DATA_LAST_FRAG) {
1378 /* Insert chk MUST be a FIRST */
1379 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1380 SCTP_DATA_FIRST_FRAG) {
1381 #ifdef SCTP_DEBUG
1382 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1383 kprintf("Prev check - Gak, evil plot, its not FIRST and it must be!\n");
1385 #endif
1386 MGET(oper, MB_DONTWAIT, MT_DATA);
1387 if (oper) {
1388 struct sctp_paramhdr *ph;
1389 u_int32_t *ippp;
1391 oper->m_len =
1392 sizeof(struct sctp_paramhdr) +
1393 sizeof(*ippp);
1394 ph = mtod(oper,
1395 struct sctp_paramhdr *);
1396 ph->param_type =
1397 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1398 ph->param_length =
1399 htons(oper->m_len);
1400 ippp = (u_int32_t *)(ph + 1);
1401 *ippp = htonl(0x10000008);
1404 sctp_abort_an_association(stcb->sctp_ep,
1405 stcb, SCTP_PEER_FAULTY, oper);
1407 *abort_flag = 1;
1408 return;
1414 if (next) {
1415 post_tsn = chk->rec.data.TSN_seq + 1;
1416 if (post_tsn == next->rec.data.TSN_seq) {
1418 * Ok the one I am inserting ahead of
1419 * is my NEXT one. A bit of valdiation here.
1421 if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1422 /* Insert chk MUST be a last fragment */
1423 if ((chk->rec.data.rcv_flags&SCTP_DATA_FRAG_MASK)
1424 != SCTP_DATA_LAST_FRAG) {
1425 #ifdef SCTP_DEBUG
1426 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1427 kprintf("Next chk - Next is FIRST, we must be LAST\n");
1428 kprintf("Gak, Evil plot, its not a last!\n");
1430 #endif
1431 MGET(oper, MB_DONTWAIT, MT_DATA);
1432 if (oper) {
1433 struct sctp_paramhdr *ph;
1434 u_int32_t *ippp;
1436 oper->m_len =
1437 sizeof(struct sctp_paramhdr) +
1438 sizeof(*ippp);
1439 ph = mtod(oper,
1440 struct sctp_paramhdr *);
1441 ph->param_type =
1442 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1443 ph->param_length =
1444 htons(oper->m_len);
1445 ippp = (u_int32_t *)(ph + 1);
1446 *ippp = htonl(0x10000009);
1449 sctp_abort_an_association(stcb->sctp_ep,
1450 stcb, SCTP_PEER_FAULTY, oper);
1452 *abort_flag = 1;
1453 return;
1455 } else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1456 SCTP_DATA_MIDDLE_FRAG ||
1457 (next->rec.data.rcv_flags&SCTP_DATA_FRAG_MASK) ==
1458 SCTP_DATA_LAST_FRAG) {
1459 /* Insert chk CAN be MIDDLE or FIRST NOT LAST */
1460 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1461 SCTP_DATA_LAST_FRAG) {
1462 #ifdef SCTP_DEBUG
1463 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1464 kprintf("Next chk - Next is a MIDDLE/LAST\n");
1465 kprintf("Gak, Evil plot, new prev chunk is a LAST\n");
1467 #endif
1468 MGET(oper, MB_DONTWAIT, MT_DATA);
1469 if (oper) {
1470 struct sctp_paramhdr *ph;
1471 u_int32_t *ippp;
1473 oper->m_len =
1474 sizeof(struct sctp_paramhdr) +
1475 sizeof(*ippp);
1476 ph = mtod(oper,
1477 struct sctp_paramhdr *);
1478 ph->param_type =
1479 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1480 ph->param_length =
1481 htons(oper->m_len);
1482 ippp = (u_int32_t *)(ph + 1);
1483 *ippp = htonl(0x1000000a);
1485 sctp_abort_an_association(stcb->sctp_ep,
1486 stcb, SCTP_PEER_FAULTY, oper);
1488 *abort_flag = 1;
1489 return;
1491 if (chk->rec.data.stream_number !=
1492 next->rec.data.stream_number) {
1494 * Huh, need the correct STR here, they
1495 * must be the same.
1497 #ifdef SCTP_DEBUG
1498 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1499 kprintf("Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1500 chk->rec.data.stream_number,
1501 next->rec.data.stream_number);
1503 #endif
1504 MGET(oper, MB_DONTWAIT, MT_DATA);
1505 if (oper) {
1506 struct sctp_paramhdr *ph;
1507 u_int32_t *ippp;
1509 oper->m_len =
1510 sizeof(struct sctp_paramhdr) +
1511 sizeof(*ippp);
1512 ph = mtod(oper,
1513 struct sctp_paramhdr *);
1514 ph->param_type =
1515 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1516 ph->param_length =
1517 htons(oper->m_len);
1518 ippp = (u_int32_t *)(ph + 1);
1519 *ippp = htonl(0x1000000b);
1522 sctp_abort_an_association(stcb->sctp_ep,
1523 stcb, SCTP_PEER_FAULTY, oper);
1525 *abort_flag = 1;
1526 return;
1528 if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1529 chk->rec.data.stream_seq !=
1530 next->rec.data.stream_seq) {
1532 * Huh, need the correct STR here, they
1533 * must be the same.
1535 #ifdef SCTP_DEBUG
1536 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1537 kprintf("Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1538 chk->rec.data.stream_seq,
1539 next->rec.data.stream_seq);
1541 #endif
1542 MGET(oper, MB_DONTWAIT, MT_DATA);
1543 if (oper) {
1544 struct sctp_paramhdr *ph;
1545 u_int32_t *ippp;
1547 oper->m_len =
1548 sizeof(struct sctp_paramhdr) +
1549 sizeof(*ippp);
1550 ph = mtod(oper,
1551 struct sctp_paramhdr *);
1552 ph->param_type =
1553 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1554 ph->param_length =
1555 htons(oper->m_len);
1556 ippp = (u_int32_t *)(ph + 1);
1557 *ippp = htonl(0x1000000c);
1560 sctp_abort_an_association(stcb->sctp_ep,
1561 stcb, SCTP_PEER_FAULTY, oper);
1563 *abort_flag = 1;
1564 return;
1571 * now that we have all in there place we must check a number of
1572 * things to see if we can send data to the ULP.
1574 /* we need to do some delivery, if we can */
1575 chk = TAILQ_FIRST(&asoc->reasmqueue);
1576 if (chk == NULL) {
1577 /* Huh? */
1578 asoc->size_on_reasm_queue = 0;
1579 asoc->cnt_on_reasm_queue = 0;
1580 return;
1582 if (asoc->fragmented_delivery_inprogress == 0) {
1583 nxt_todel =
1584 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
1585 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
1586 (nxt_todel == chk->rec.data.stream_seq ||
1587 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
1589 * Yep the first one is here and its
1590 * ok to deliver but should we?
1592 if (TAILQ_EMPTY(&asoc->delivery_queue) &&
1593 (sctp_is_all_msg_on_reasm(asoc, &tsize) ||
1594 (asoc->size_on_reasm_queue >=
1595 (stcb->sctp_socket->so_rcv.ssb_hiwat >> 2) &&
1596 tsize))) {
1598 * Yes, we setup to
1599 * start reception, by backing down the TSN
1600 * just in case we can't deliver. If we
1602 asoc->fragmented_delivery_inprogress = 1;
1603 asoc->tsn_last_delivered =
1604 chk->rec.data.TSN_seq - 1;
1605 asoc->str_of_pdapi =
1606 chk->rec.data.stream_number;
1607 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
1608 asoc->fragment_flags = chk->rec.data.rcv_flags;
1609 sctp_service_reassembly(stcb, asoc, 0);
1612 } else {
1613 sctp_service_reassembly(stcb, asoc, 0);
1618 * This is an unfortunate routine. It checks to make sure a evil guy is not
1619 * stuffing us full of bad packet fragments. A broken peer could also do this
1620 * but this is doubtful. It is to bad I must worry about evil crackers sigh
1621 * :< more cycles.
1623 static int
1624 sctp_does_chk_belong_to_reasm(struct sctp_association *asoc,
1625 struct sctp_tmit_chunk *chk)
1627 struct sctp_tmit_chunk *at;
1628 u_int32_t tsn_est;
1630 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1631 if (compare_with_wrap(chk->rec.data.TSN_seq,
1632 at->rec.data.TSN_seq, MAX_TSN)) {
1633 /* is it one bigger? */
1634 tsn_est = at->rec.data.TSN_seq + 1;
1635 if (tsn_est == chk->rec.data.TSN_seq) {
1636 /* yep. It better be a last then*/
1637 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1638 SCTP_DATA_LAST_FRAG) {
1640 * Ok this guy belongs next to a guy
1641 * that is NOT last, it should be a
1642 * middle/last, not a complete chunk.
1644 return (1);
1645 } else {
1647 * This guy is ok since its a LAST and
1648 * the new chunk is a fully self-
1649 * contained one.
1651 return (0);
1654 } else if (chk->rec.data.TSN_seq == at->rec.data.TSN_seq) {
1655 /* Software error since I have a dup? */
1656 return (1);
1657 } else {
1659 * Ok, 'at' is larger than new chunk but does it
1660 * need to be right before it.
1662 tsn_est = chk->rec.data.TSN_seq + 1;
1663 if (tsn_est == at->rec.data.TSN_seq) {
1664 /* Yep, It better be a first */
1665 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1666 SCTP_DATA_FIRST_FRAG) {
1667 return (1);
1668 } else {
1669 return (0);
1674 return (0);
1677 extern unsigned int sctp_max_chunks_on_queue;
1678 static int
1679 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1680 struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length,
1681 struct sctp_nets *net, u_int32_t *high_tsn, int *abort_flag,
1682 int *break_flag, int last_chunk)
1684 /* Process a data chunk */
1685 /* struct sctp_tmit_chunk *chk;*/
1686 struct sctp_tmit_chunk *chk;
1687 u_int32_t tsn, gap;
1688 struct mbuf *dmbuf;
1689 int indx, the_len;
1690 u_int16_t strmno, strmseq;
1691 struct mbuf *oper;
1693 chk = NULL;
1694 tsn = ntohl(ch->dp.tsn);
1695 #ifdef SCTP_MAP_LOGGING
1696 sctp_log_map(0, tsn, asoc->cumulative_tsn, SCTP_MAP_PREPARE_SLIDE);
1697 #endif
1698 if (compare_with_wrap(asoc->cumulative_tsn, tsn, MAX_TSN) ||
1699 asoc->cumulative_tsn == tsn) {
1700 /* It is a duplicate */
1701 sctp_pegs[SCTP_DUPTSN_RECVD]++;
1702 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1703 /* Record a dup for the next outbound sack */
1704 asoc->dup_tsns[asoc->numduptsns] = tsn;
1705 asoc->numduptsns++;
1707 return (0);
1709 /* Calculate the number of TSN's between the base and this TSN */
1710 if (tsn >= asoc->mapping_array_base_tsn) {
1711 gap = tsn - asoc->mapping_array_base_tsn;
1712 } else {
1713 gap = (MAX_TSN - asoc->mapping_array_base_tsn) + tsn + 1;
1715 if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1716 /* Can't hold the bit in the mapping at max array, toss it */
1717 return (0);
1719 if (gap >= (uint32_t)(asoc->mapping_array_size << 3)) {
1720 if (sctp_expand_mapping_array(asoc)) {
1721 /* Can't expand, drop it */
1722 return (0);
1725 if (compare_with_wrap(tsn, *high_tsn, MAX_TSN)) {
1726 *high_tsn = tsn;
1728 /* See if we have received this one already */
1729 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
1730 sctp_pegs[SCTP_DUPTSN_RECVD]++;
1731 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1732 /* Record a dup for the next outbound sack */
1733 asoc->dup_tsns[asoc->numduptsns] = tsn;
1734 asoc->numduptsns++;
1736 if (!callout_pending(&asoc->dack_timer.timer)) {
1738 * By starting the timer we assure that we
1739 * WILL sack at the end of the packet
1740 * when sctp_sack_check gets called.
1742 sctp_timer_start(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep,
1743 stcb, NULL);
1745 return (0);
1748 * Check to see about the GONE flag, duplicates would cause
1749 * a sack to be sent up above
1751 if (stcb && (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
1753 * wait a minute, this guy is gone, there is no
1754 * longer a receiver. Send peer an ABORT!
1756 struct mbuf *op_err;
1757 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
1758 sctp_abort_an_association(stcb->sctp_ep, stcb, 0, op_err);
1759 *abort_flag = 1;
1760 return (0);
1763 * Now before going further we see if there is room. If NOT then
1764 * we MAY let one through only IF this TSN is the one we are
1765 * waiting for on a partial delivery API.
1768 /* now do the tests */
1769 if (((asoc->cnt_on_all_streams +
1770 asoc->cnt_on_delivery_queue +
1771 asoc->cnt_on_reasm_queue +
1772 asoc->cnt_msg_on_sb) > sctp_max_chunks_on_queue) ||
1773 (((int)asoc->my_rwnd) <= 0)) {
1775 * When we have NO room in the rwnd we check
1776 * to make sure the reader is doing its job...
1778 if (stcb->sctp_socket->so_rcv.ssb_cc) {
1779 /* some to read, wake-up */
1780 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1782 /* now is it in the mapping array of what we have accepted? */
1783 if (compare_with_wrap(tsn,
1784 asoc->highest_tsn_inside_map, MAX_TSN)) {
1786 /* Nope not in the valid range dump it */
1787 #ifdef SCTP_DEBUG
1788 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1789 kprintf("My rwnd overrun1:tsn:%lx rwnd %lu sbspace:%ld delq:%d!\n",
1790 (u_long)tsn, (u_long)asoc->my_rwnd,
1791 sctp_sbspace(&stcb->sctp_socket->so_rcv),
1792 stcb->asoc.cnt_on_delivery_queue);
1794 #endif
1795 sctp_set_rwnd(stcb, asoc);
1796 if ((asoc->cnt_on_all_streams +
1797 asoc->cnt_on_delivery_queue +
1798 asoc->cnt_on_reasm_queue +
1799 asoc->cnt_msg_on_sb) > sctp_max_chunks_on_queue) {
1800 sctp_pegs[SCTP_MSGC_DROP]++;
1801 } else {
1802 sctp_pegs[SCTP_RWND_DROPS]++;
1804 indx = *break_flag;
1805 *break_flag = 1;
1806 return (0);
1809 strmno = ntohs(ch->dp.stream_id);
1810 if (strmno >= asoc->streamincnt) {
1811 struct sctp_paramhdr *phdr;
1812 struct mbuf *mb;
1814 MGETHDR(mb, MB_DONTWAIT, MT_DATA);
1815 if (mb != NULL) {
1816 /* add some space up front so prepend will work well */
1817 mb->m_data += sizeof(struct sctp_chunkhdr);
1818 phdr = mtod(mb, struct sctp_paramhdr *);
1820 * Error causes are just param's and this one has
1821 * two back to back phdr, one with the error type
1822 * and size, the other with the streamid and a rsvd
1824 mb->m_pkthdr.len = mb->m_len =
1825 (sizeof(struct sctp_paramhdr) * 2);
1826 phdr->param_type = htons(SCTP_CAUSE_INV_STRM);
1827 phdr->param_length =
1828 htons(sizeof(struct sctp_paramhdr) * 2);
1829 phdr++;
1830 /* We insert the stream in the type field */
1831 phdr->param_type = ch->dp.stream_id;
1832 /* And set the length to 0 for the rsvd field */
1833 phdr->param_length = 0;
1834 sctp_queue_op_err(stcb, mb);
1836 sctp_pegs[SCTP_BAD_STRMNO]++;
1837 return (0);
1840 * Before we continue lets validate that we are not
1841 * being fooled by an evil attacker. We can only
1842 * have 4k chunks based on our TSN spread allowed
1843 * by the mapping array 512 * 8 bits, so there is
1844 * no way our stream sequence numbers could have wrapped.
1845 * We of course only validate the FIRST fragment so the
1846 * bit must be set.
1848 strmseq = ntohs(ch->dp.stream_sequence);
1849 if ((ch->ch.chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1850 (ch->ch.chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1851 (compare_with_wrap(asoc->strmin[strmno].last_sequence_delivered,
1852 strmseq, MAX_SEQ) ||
1853 asoc->strmin[strmno].last_sequence_delivered == strmseq)) {
1854 /* The incoming sseq is behind where we last delivered? */
1855 #ifdef SCTP_DEBUG
1856 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1857 kprintf("EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
1858 strmseq,
1859 asoc->strmin[strmno].last_sequence_delivered);
1861 #endif
1863 * throw it in the stream so it gets cleaned up in
1864 * association destruction
1866 MGET(oper, MB_DONTWAIT, MT_DATA);
1867 if (oper) {
1868 struct sctp_paramhdr *ph;
1869 u_int32_t *ippp;
1871 oper->m_len = sizeof(struct sctp_paramhdr) +
1872 sizeof(*ippp);
1873 ph = mtod(oper, struct sctp_paramhdr *);
1874 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1875 ph->param_length = htons(oper->m_len);
1876 ippp = (u_int32_t *)(ph + 1);
1877 *ippp = htonl(0x20000001);
1879 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY,
1880 oper);
1881 sctp_pegs[SCTP_BAD_SSN_WRAP]++;
1882 *abort_flag = 1;
1883 return (0);
1886 the_len = (chk_length-sizeof(struct sctp_data_chunk));
1887 if (last_chunk == 0) {
1888 dmbuf = sctp_m_copym(*m,
1889 (offset + sizeof(struct sctp_data_chunk)),
1890 the_len, MB_DONTWAIT);
1891 } else {
1892 /* We can steal the last chunk */
1893 dmbuf = *m;
1894 /* lop off the top part */
1895 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1896 if (dmbuf->m_pkthdr.len > the_len) {
1897 /* Trim the end round bytes off too */
1898 m_adj(dmbuf, -(dmbuf->m_pkthdr.len-the_len));
1900 sctp_pegs[SCTP_NO_COPY_IN]++;
1902 if (dmbuf == NULL) {
1903 sctp_pegs[SCTP_DROP_NOMEMORY]++;
1904 return (0);
1906 if ((ch->ch.chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1907 asoc->fragmented_delivery_inprogress == 0 &&
1908 TAILQ_EMPTY(&asoc->delivery_queue) &&
1909 ((ch->ch.chunk_flags & SCTP_DATA_UNORDERED) ||
1910 ((asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq &&
1911 TAILQ_EMPTY(&asoc->strmin[strmno].inqueue))) &&
1912 ((long)(stcb->sctp_socket->so_rcv.ssb_hiwat -
1913 stcb->sctp_socket->so_rcv.ssb_cc) >= (long)the_len)) {
1914 /* Candidate for express delivery */
1916 * Its not fragmented,
1917 * No PD-API is up,
1918 * Nothing in the delivery queue,
1919 * Its un-ordered OR ordered and the next to deliver AND
1920 * nothing else is stuck on the stream queue,
1921 * And there is room for it in the socket buffer.
1922 * Lets just stuff it up the buffer....
1925 struct mbuf *control, *mmm;
1926 struct sockaddr_in6 sin6;
1927 struct sockaddr_in6 lsa6;
1928 struct sockaddr *to;
1930 /* It would be nice to avoid this copy if we could :< */
1931 control = sctp_build_ctl_nchunk(stcb, tsn,
1932 ch->dp.protocol_id, 0, strmno, strmseq,
1933 ch->ch.chunk_flags);
1934 /* XXX need to append PKTHDR to the socket buffer first */
1936 if ((dmbuf->m_flags & M_PKTHDR) == 0) {
1937 struct mbuf *tmp;
1938 MGETHDR(tmp, MB_DONTWAIT, MT_DATA);
1939 if (tmp == NULL) {
1941 /* no room! */
1942 if (control) {
1943 sctp_m_freem(control);
1944 stcb->asoc.my_rwnd_control_len -=
1945 CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
1948 goto failed_express_del;
1950 tmp->m_pkthdr.len = the_len;
1951 tmp->m_len = 0;
1952 tmp->m_next = dmbuf;
1953 dmbuf = tmp;
1955 to = (struct sockaddr *)&net->ro._l_addr;
1956 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
1957 to->sa_family == AF_INET) {
1958 struct sockaddr_in *sin;
1960 sin = (struct sockaddr_in *)to;
1961 bzero(&sin6, sizeof(sin6));
1962 sin6.sin6_family = AF_INET6;
1963 sin6.sin6_len = sizeof(struct sockaddr_in6);
1964 sin6.sin6_addr.s6_addr16[2] = 0xffff;
1965 bcopy(&sin->sin_addr,
1966 &sin6.sin6_addr.s6_addr16[3],
1967 sizeof(sin6.sin6_addr.s6_addr16[3]));
1968 sin6.sin6_port = sin->sin_port;
1969 to = (struct sockaddr *)&sin6;
1972 /* check and strip embedded scope junk */
1973 to = (struct sockaddr *)sctp_recover_scope((struct sockaddr_in6 *)to,
1974 &lsa6);
1975 if (((struct sockaddr_in *)to)->sin_port == 0) {
1976 kprintf("Huh c, port is %d not net:%x %d?\n",
1977 ((struct sockaddr_in *)to)->sin_port,
1978 (u_int)net,
1979 (int)(ntohs(stcb->rport)));
1980 ((struct sockaddr_in *)to)->sin_port = stcb->rport;
1983 mmm = dmbuf;
1984 /* Mark the EOR */
1985 while (mmm->m_next != NULL) {
1986 mmm = mmm->m_next;
1988 mmm->m_flags |= M_EOR;
1989 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) {
1990 /* we have a new high score */
1991 asoc->highest_tsn_inside_map = tsn;
1992 #ifdef SCTP_MAP_LOGGING
1993 sctp_log_map(0, 1, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
1994 #endif
1996 SCTP_TCB_UNLOCK(stcb);
1997 SCTP_INP_WLOCK(stcb->sctp_ep);
1998 SCTP_TCB_LOCK(stcb);
1999 if (!sctp_sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv, to, dmbuf,
2000 control, stcb->asoc.my_vtag, stcb->sctp_ep)) {
2001 if (control) {
2002 sctp_m_freem(control);
2003 stcb->asoc.my_rwnd_control_len -=
2004 CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
2006 sctp_m_freem(dmbuf);
2007 goto failed_express_del;
2009 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) {
2010 if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
2011 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2013 } else {
2014 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2016 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2017 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2018 if ((ch->ch.chunk_flags & SCTP_DATA_UNORDERED) == 0) {
2020 /* for ordered, bump what we delivered */
2021 asoc->strmin[strmno].last_sequence_delivered++;
2023 sctp_pegs[SCTP_EXPRESS_ROUTE]++;
2024 #ifdef SCTP_STR_LOGGING
2025 sctp_log_strm_del_alt(tsn, strmseq,
2026 SCTP_STR_LOG_FROM_EXPRS_DEL);
2027 #endif
2028 #ifdef SCTP_DEBUG
2029 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
2030 kprintf("Express Delivery succeeds\n");
2032 #endif
2033 goto finish_express_del;
2036 failed_express_del:
2037 /* If we reach here this is a new chunk */
2038 chk = (struct sctp_tmit_chunk *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_chunk);
2039 if (chk == NULL) {
2040 /* No memory so we drop the chunk */
2041 sctp_pegs[SCTP_DROP_NOMEMORY]++;
2042 if (last_chunk == 0) {
2043 /* we copied it, free the copy */
2044 sctp_m_freem(dmbuf);
2046 return (0);
2048 sctppcbinfo.ipi_count_chunk++;
2049 sctppcbinfo.ipi_gencnt_chunk++;
2050 chk->rec.data.TSN_seq = tsn;
2051 chk->rec.data.stream_seq = strmseq;
2052 chk->rec.data.stream_number = strmno;
2053 chk->rec.data.payloadtype = ch->dp.protocol_id;
2054 chk->rec.data.context = 0;
2055 chk->rec.data.doing_fast_retransmit = 0;
2056 chk->rec.data.rcv_flags = ch->ch.chunk_flags;
2057 chk->asoc = asoc;
2058 chk->send_size = the_len;
2059 chk->whoTo = net;
2060 net->ref_count++;
2061 chk->data = dmbuf;
2064 /* Mark it as received */
2065 /* Now queue it where it belongs */
2066 if ((chk->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
2067 SCTP_DATA_NOT_FRAG) {
2068 /* First a sanity check */
2069 if (asoc->fragmented_delivery_inprogress) {
2071 * Ok, we have a fragmented delivery in progress
2072 * if this chunk is next to deliver OR belongs in
2073 * our view to the reassembly, the peer is evil
2074 * or broken.
2076 u_int32_t estimate_tsn;
2077 estimate_tsn = asoc->tsn_last_delivered + 1;
2078 if (TAILQ_EMPTY(&asoc->reasmqueue) &&
2079 (estimate_tsn == chk->rec.data.TSN_seq)) {
2080 /* Evil/Broke peer */
2081 MGET(oper, MB_DONTWAIT, MT_DATA);
2082 if (oper) {
2083 struct sctp_paramhdr *ph;
2084 u_int32_t *ippp;
2086 oper->m_len =
2087 sizeof(struct sctp_paramhdr) +
2088 sizeof(*ippp);
2089 ph = mtod(oper, struct sctp_paramhdr *);
2090 ph->param_type =
2091 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2092 ph->param_length = htons(oper->m_len);
2093 ippp = (u_int32_t *)(ph + 1);
2094 *ippp = htonl(0x20000002);
2096 sctp_abort_an_association(stcb->sctp_ep, stcb,
2097 SCTP_PEER_FAULTY, oper);
2099 *abort_flag = 1;
2100 sctp_pegs[SCTP_DROP_FRAG]++;
2101 return (0);
2102 } else {
2103 if (sctp_does_chk_belong_to_reasm(asoc, chk)) {
2104 MGET(oper, MB_DONTWAIT, MT_DATA);
2105 if (oper) {
2106 struct sctp_paramhdr *ph;
2107 u_int32_t *ippp;
2109 oper->m_len =
2110 sizeof(struct sctp_paramhdr) +
2111 sizeof(*ippp);
2112 ph = mtod(oper,
2113 struct sctp_paramhdr *);
2114 ph->param_type =
2115 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2116 ph->param_length =
2117 htons(oper->m_len);
2118 ippp = (u_int32_t *)(ph + 1);
2119 *ippp = htonl(0x20000003);
2121 sctp_abort_an_association(stcb->sctp_ep,
2122 stcb, SCTP_PEER_FAULTY, oper);
2124 *abort_flag = 1;
2125 sctp_pegs[SCTP_DROP_FRAG]++;
2126 return (0);
2129 } else {
2130 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
2132 * Reassembly queue is NOT empty
2133 * validate that this chk does not need to
2134 * be in reasembly queue. If it does then
2135 * our peer is broken or evil.
2137 if (sctp_does_chk_belong_to_reasm(asoc, chk)) {
2138 MGET(oper, MB_DONTWAIT, MT_DATA);
2139 if (oper) {
2140 struct sctp_paramhdr *ph;
2141 u_int32_t *ippp;
2143 oper->m_len =
2144 sizeof(struct sctp_paramhdr) +
2145 sizeof(*ippp);
2146 ph = mtod(oper,
2147 struct sctp_paramhdr *);
2148 ph->param_type =
2149 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2150 ph->param_length =
2151 htons(oper->m_len);
2152 ippp = (u_int32_t *)(ph + 1);
2153 *ippp = htonl(0x20000004);
2155 sctp_abort_an_association(stcb->sctp_ep,
2156 stcb, SCTP_PEER_FAULTY, oper);
2158 *abort_flag = 1;
2159 sctp_pegs[SCTP_DROP_FRAG]++;
2160 return (0);
2164 if (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
2165 /* queue directly into socket buffer */
2166 sctp_deliver_data(stcb, asoc, chk, 0);
2167 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2168 } else {
2169 /* Special check for when streams are resetting.
2170 * We could be more smart about this and check the
2171 * actual stream to see if it is not being reset.. that
2172 * way we would not create a HOLB when amongst streams
2173 * being reset and those not being reset.
2175 * We take complete messages that have a stream reset
2176 * intervening (aka the TSN is after where our cum-ack needs
2177 * to be) off and put them on a pending_reply_queue. The
2178 * reassembly ones we do not have to worry about since
2179 * they are all sorted and proceessed by TSN order. It
2180 * is only the singletons I must worry about.
2182 if ((asoc->pending_reply) &&
2183 ((compare_with_wrap(tsn, ntohl(asoc->pending_reply->reset_at_tsn), MAX_TSN)) ||
2184 (tsn == ntohl(asoc->pending_reply->reset_at_tsn)))
2186 /* yep its past where we need to reset... go ahead and
2187 * queue it.
2189 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue , chk, sctp_next);
2190 } else {
2191 sctp_queue_data_to_stream(stcb, asoc, chk, abort_flag);
2194 } else {
2195 /* Into the re-assembly queue */
2196 sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag);
2197 if (*abort_flag) {
2198 sctp_pegs[SCTP_DROP_FRAG]++;
2199 return (0);
2202 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) {
2203 /* we have a new high score */
2204 asoc->highest_tsn_inside_map = tsn;
2205 #ifdef SCTP_MAP_LOGGING
2206 sctp_log_map(0, 2, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2207 #endif
2209 finish_express_del:
2210 if (last_chunk) {
2211 *m = NULL;
2213 sctp_pegs[SCTP_PEG_TSNS_RCVD]++;
2214 /* Set it present please */
2215 #ifdef SCTP_STR_LOGGING
2216 sctp_log_strm_del_alt(tsn, strmseq, SCTP_STR_LOG_FROM_MARK_TSN);
2217 #endif
2218 #ifdef SCTP_MAP_LOGGING
2219 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2220 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2221 #endif
2222 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2223 return (1);
2226 void
2227 sctp_sack_check(struct sctp_tcb *stcb, int ok_to_sack, int was_a_gap, int *abort_flag)
2230 * Now we also need to check the mapping array in a couple of ways.
2231 * 1) Did we move the cum-ack point?
2233 struct sctp_association *asoc;
2234 int i, at;
2235 int m_size, all_ones;
2236 int slide_from, slide_end, lgap, distance;
2237 #ifdef SCTP_MAP_LOGGING
2238 uint32_t old_cumack, old_base, old_highest;
2239 unsigned char aux_array[64];
2240 #endif
2242 asoc = &stcb->asoc;
2243 at = 0;
2245 #ifdef SCTP_MAP_LOGGING
2246 old_cumack = asoc->cumulative_tsn;
2247 old_base = asoc->mapping_array_base_tsn;
2248 old_highest = asoc->highest_tsn_inside_map;
2249 if (asoc->mapping_array_size < 64)
2250 memcpy(aux_array, asoc->mapping_array,
2251 asoc->mapping_array_size);
2252 else
2253 memcpy(aux_array, asoc->mapping_array, 64);
2254 #endif
2257 * We could probably improve this a small bit by calculating the
2258 * offset of the current cum-ack as the starting point.
2260 all_ones = 1;
2261 m_size = stcb->asoc.mapping_array_size << 3;
2262 for (i = 0; i < m_size; i++) {
2263 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i)) {
2265 * Ok we found the first place that we are
2266 * missing a TSN.
2268 at = i;
2269 all_ones = 0;
2270 asoc->cumulative_tsn = asoc->mapping_array_base_tsn +
2271 (i - 1);
2272 break;
2275 if (compare_with_wrap(asoc->cumulative_tsn,
2276 asoc->highest_tsn_inside_map,
2277 MAX_TSN)) {
2278 panic("huh, cumack greater than high-tsn in map");
2280 if (all_ones ||
2281 (asoc->cumulative_tsn == asoc->highest_tsn_inside_map && at >= 8)) {
2282 /* The complete array was completed by a single FR */
2283 /* higest becomes the cum-ack */
2284 int clr;
2285 asoc->cumulative_tsn = asoc->highest_tsn_inside_map;
2286 /* clear the array */
2287 if (all_ones)
2288 clr = asoc->mapping_array_size;
2289 else {
2290 clr = (at >> 3) + 1;
2292 * this should be the allones case
2293 * but just in case :>
2295 if (clr > asoc->mapping_array_size)
2296 clr = asoc->mapping_array_size;
2298 memset(asoc->mapping_array, 0, clr);
2299 /* base becomes one ahead of the cum-ack */
2300 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2301 #ifdef SCTP_MAP_LOGGING
2302 sctp_log_map(old_base, old_cumack, old_highest,
2303 SCTP_MAP_PREPARE_SLIDE);
2304 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2305 asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_CLEARED);
2306 #endif
2307 } else if (at >= 8) {
2308 /* we can slide the mapping array down */
2309 /* Calculate the new byte postion we can move down */
2310 slide_from = at >> 3;
2311 /* now calculate the ceiling of the move using our highest TSN value */
2312 if (asoc->highest_tsn_inside_map >= asoc->mapping_array_base_tsn) {
2313 lgap = asoc->highest_tsn_inside_map -
2314 asoc->mapping_array_base_tsn;
2315 } else {
2316 lgap = (MAX_TSN - asoc->mapping_array_base_tsn) +
2317 asoc->highest_tsn_inside_map + 1;
2319 slide_end = lgap >> 3;
2320 if (slide_end < slide_from) {
2321 panic("impossible slide");
2323 distance = (slide_end-slide_from) + 1;
2324 #ifdef SCTP_MAP_LOGGING
2325 sctp_log_map(old_base, old_cumack, old_highest,
2326 SCTP_MAP_PREPARE_SLIDE);
2327 sctp_log_map((uint32_t)slide_from, (uint32_t)slide_end,
2328 (uint32_t)lgap, SCTP_MAP_SLIDE_FROM);
2329 #endif
2330 if (distance + slide_from > asoc->mapping_array_size ||
2331 distance < 0) {
2332 #ifdef SCTP_DEBUG
2333 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
2334 kprintf("Ugh bad addition.. you can't hrumpp!\n");
2336 #endif
2338 * Here we do NOT slide forward the array so that
2339 * hopefully when more data comes in to fill it up
2340 * we will be able to slide it forward. Really
2341 * I don't think this should happen :-0
2344 #ifdef SCTP_MAP_LOGGING
2345 sctp_log_map((uint32_t)distance, (uint32_t)slide_from,
2346 (uint32_t)asoc->mapping_array_size,
2347 SCTP_MAP_SLIDE_NONE);
2348 #endif
2349 } else {
2350 int ii;
2351 for (ii = 0; ii < distance; ii++) {
2352 asoc->mapping_array[ii] =
2353 asoc->mapping_array[slide_from + ii];
2355 for (ii = distance;ii <= slide_end; ii++) {
2356 asoc->mapping_array[ii] = 0;
2358 asoc->mapping_array_base_tsn += (slide_from << 3);
2359 #ifdef SCTP_MAP_LOGGING
2360 sctp_log_map(asoc->mapping_array_base_tsn,
2361 asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2362 SCTP_MAP_SLIDE_RESULT);
2363 #endif
2367 /* check the special flag for stream resets */
2368 if ((asoc->pending_reply) &&
2369 ((compare_with_wrap((asoc->cumulative_tsn+1), ntohl(asoc->pending_reply->reset_at_tsn), MAX_TSN)) ||
2370 ((asoc->cumulative_tsn+1) == ntohl(asoc->pending_reply->reset_at_tsn)))
2372 /* we have finished working through the backlogged TSN's now
2373 * time to reset streams.
2374 * 1: call reset function.
2375 * 2: free pending_reply space
2376 * 3: distribute any chunks in pending_reply_queue.
2378 struct sctp_tmit_chunk *chk;
2379 sctp_handle_stream_reset_response(stcb, asoc->pending_reply);
2380 FREE(asoc->pending_reply, M_PCB);
2381 asoc->pending_reply = NULL;
2382 chk = TAILQ_FIRST(&asoc->pending_reply_queue);
2383 while (chk) {
2384 TAILQ_REMOVE(&asoc->pending_reply_queue, chk, sctp_next);
2385 sctp_queue_data_to_stream(stcb, asoc, chk, abort_flag);
2386 if (*abort_flag) {
2387 return;
2389 chk = TAILQ_FIRST(&asoc->pending_reply_queue);
2393 * Now we need to see if we need to queue a sack or just start
2394 * the timer (if allowed).
2396 if (ok_to_sack) {
2397 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2399 * Ok special case, in SHUTDOWN-SENT case.
2400 * here we maker sure SACK timer is off and
2401 * instead send a SHUTDOWN and a SACK
2403 if (callout_pending(&stcb->asoc.dack_timer.timer)) {
2404 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2405 stcb->sctp_ep, stcb, NULL);
2407 #ifdef SCTP_DEBUG
2408 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
2409 kprintf("%s:%d sends a shutdown\n",
2410 __FILE__,
2411 __LINE__
2414 #endif
2415 sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
2416 sctp_send_sack(stcb);
2417 } else {
2418 int is_a_gap;
2419 /* is there a gap now ? */
2420 is_a_gap = compare_with_wrap(stcb->asoc.highest_tsn_inside_map,
2421 stcb->asoc.cumulative_tsn, MAX_TSN);
2422 if ((stcb->asoc.first_ack_sent == 0) || /* First time we send a sack */
2423 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no longer is one */
2424 (stcb->asoc.numduptsns) || /* we have dup's */
2425 (is_a_gap) || /* is still a gap */
2426 (callout_pending(&stcb->asoc.dack_timer.timer)) /* timer was up . second packet */
2429 * Ok we must build a SACK since the timer
2430 * is pending, we got our first packet OR
2431 * there are gaps or duplicates.
2433 stcb->asoc.first_ack_sent = 1;
2434 sctp_send_sack(stcb);
2435 /* The sending will stop the timer */
2436 } else {
2437 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2438 stcb->sctp_ep, stcb, NULL);
2444 void
2445 sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc, int hold_locks)
2447 struct sctp_tmit_chunk *chk;
2448 int tsize, cntDel;
2449 u_int16_t nxt_todel;
2451 cntDel = 0;
2452 if (asoc->fragmented_delivery_inprogress) {
2453 sctp_service_reassembly(stcb, asoc, hold_locks);
2455 /* Can we proceed further, i.e. the PD-API is complete */
2456 if (asoc->fragmented_delivery_inprogress) {
2457 /* no */
2458 return;
2462 * Yes, reassembly delivery no longer in progress see if we
2463 * have some on the sb hold queue.
2465 do {
2466 if (stcb->sctp_socket->so_rcv.ssb_cc >= stcb->sctp_socket->so_rcv.ssb_hiwat) {
2467 if (cntDel == 0)
2468 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2469 break;
2471 /* If deliver_data says no we must stop */
2472 if (sctp_deliver_data(stcb, asoc, (struct sctp_tmit_chunk *)NULL, hold_locks) == 0)
2473 break;
2474 cntDel++;
2475 chk = TAILQ_FIRST(&asoc->delivery_queue);
2476 } while (chk);
2477 if (cntDel) {
2478 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2481 * Now is there some other chunk I can deliver
2482 * from the reassembly queue.
2484 chk = TAILQ_FIRST(&asoc->reasmqueue);
2485 if (chk == NULL) {
2486 asoc->size_on_reasm_queue = 0;
2487 asoc->cnt_on_reasm_queue = 0;
2488 return;
2490 nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
2491 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
2492 ((nxt_todel == chk->rec.data.stream_seq) ||
2493 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
2495 * Yep the first one is here. We setup to
2496 * start reception, by backing down the TSN
2497 * just in case we can't deliver.
2501 * Before we start though either all of the
2502 * message should be here or 1/4 the socket buffer
2503 * max or nothing on the delivery queue and something
2504 * can be delivered.
2506 if (TAILQ_EMPTY(&asoc->delivery_queue) &&
2507 (sctp_is_all_msg_on_reasm(asoc, &tsize) ||
2508 (asoc->size_on_reasm_queue >=
2509 (stcb->sctp_socket->so_rcv.ssb_hiwat >> 2) && tsize))) {
2510 asoc->fragmented_delivery_inprogress = 1;
2511 asoc->tsn_last_delivered = chk->rec.data.TSN_seq-1;
2512 asoc->str_of_pdapi = chk->rec.data.stream_number;
2513 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
2514 asoc->fragment_flags = chk->rec.data.rcv_flags;
2515 sctp_service_reassembly(stcb, asoc, hold_locks);
2521 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2522 struct sctphdr *sh, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2523 struct sctp_nets *net, u_int32_t *high_tsn)
2525 struct sctp_data_chunk *ch, chunk_buf;
2526 struct sctp_association *asoc;
2527 int num_chunks = 0; /* number of control chunks processed */
2528 int chk_length, break_flag, last_chunk;
2529 int abort_flag = 0, was_a_gap = 0;
2530 struct mbuf *m;
2532 /* set the rwnd */
2533 sctp_set_rwnd(stcb, &stcb->asoc);
2535 m = *mm;
2536 asoc = &stcb->asoc;
2537 if (compare_with_wrap(stcb->asoc.highest_tsn_inside_map,
2538 stcb->asoc.cumulative_tsn, MAX_TSN)) {
2539 /* there was a gap before this data was processed */
2540 was_a_gap = 1;
2543 * setup where we got the last DATA packet from for
2544 * any SACK that may need to go out. Don't bump
2545 * the net. This is done ONLY when a chunk
2546 * is assigned.
2548 asoc->last_data_chunk_from = net;
2551 * Now before we proceed we must figure out if this
2552 * is a wasted cluster... i.e. it is a small packet
2553 * sent in and yet the driver underneath allocated a
2554 * full cluster for it. If so we must copy it to a
2555 * smaller mbuf and free up the cluster mbuf. This
2556 * will help with cluster starvation.
2558 if (m->m_len < (long)MHLEN && m->m_next == NULL) {
2559 /* we only handle mbufs that are singletons.. not chains */
2560 #ifdef __DragonFly__
2561 if ((*mm)->m_flags & M_PKTHDR)
2562 MGETHDR(m, MB_DONTWAIT, MT_HEADER);
2563 else
2564 #endif
2565 MGET(m, MB_DONTWAIT, MT_DATA);
2566 if (m) {
2567 /* ok lets see if we can copy the data up */
2568 caddr_t *from, *to;
2570 if ((*mm)->m_flags & M_PKTHDR) {
2571 /* got to copy the header first */
2572 #ifdef __APPLE__
2573 M_COPY_PKTHDR(m, (*mm));
2574 #else
2575 M_MOVE_PKTHDR(m, (*mm));
2576 #endif
2578 /* get the pointers and copy */
2579 to = mtod(m, caddr_t *);
2580 from = mtod((*mm), caddr_t *);
2581 memcpy(to, from, (*mm)->m_len);
2582 /* copy the length and free up the old */
2583 m->m_len = (*mm)->m_len;
2584 sctp_m_freem(*mm);
2585 /* sucess, back copy */
2586 *mm = m;
2587 } else {
2588 /* We are in trouble in the mbuf world .. yikes */
2589 m = *mm;
2592 /* get pointer to the first chunk header */
2593 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2594 sizeof(chunk_buf), (u_int8_t *)&chunk_buf);
2595 if (ch == NULL) {
2596 kprintf(" ... its short\n");
2597 return (1);
2600 * process all DATA chunks...
2603 #ifdef SCTP_DEBUG
2604 if (sctp_debug_on & SCTP_DEBUG_INPUT1) {
2605 kprintf("In process data off:%d length:%d iphlen:%d ch->type:%d\n",
2606 *offset, length, iphlen, (int)ch->ch.chunk_type);
2608 #endif
2610 *high_tsn = asoc->cumulative_tsn;
2611 break_flag = 0;
2612 while (ch->ch.chunk_type == SCTP_DATA) {
2613 /* validate chunk length */
2614 chk_length = ntohs(ch->ch.chunk_length);
2615 if ((size_t)chk_length < sizeof(struct sctp_data_chunk) + 1 ||
2616 length - *offset < chk_length) {
2618 * Need to send an abort since we had a invalid
2619 * data chunk.
2621 struct mbuf *op_err;
2622 MGET(op_err, MB_DONTWAIT, MT_DATA);
2623 if (op_err) {
2624 struct sctp_paramhdr *ph;
2625 u_int32_t *ippp;
2627 op_err->m_len = sizeof(struct sctp_paramhdr) +
2628 sizeof(*ippp);
2629 ph = mtod(op_err, struct sctp_paramhdr *);
2630 ph->param_type =
2631 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2632 ph->param_length = htons(op_err->m_len);
2633 ippp = (u_int32_t *)(ph + 1);
2634 *ippp = htonl(0x30000001);
2636 sctp_abort_association(inp, stcb, m, iphlen, sh,
2637 op_err);
2638 return (2);
2640 #ifdef SCTP_DEBUG
2641 if (sctp_debug_on & SCTP_DEBUG_INPUT1) {
2642 kprintf("A chunk of len:%d to process (tot:%d)\n",
2643 chk_length, length - *offset);
2645 #endif
2647 #ifdef SCTP_AUDITING_ENABLED
2648 sctp_audit_log(0xB1, 0);
2649 #endif
2650 if (SCTP_SIZE32(chk_length) == *offset - length) {
2651 last_chunk = 1;
2652 } else {
2653 last_chunk = 0;
2655 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch,
2656 chk_length, net, high_tsn, &abort_flag, &break_flag,
2657 last_chunk)) {
2658 num_chunks++;
2659 #ifdef SCTP_DEBUG
2660 if (sctp_debug_on & SCTP_DEBUG_INPUT1) {
2661 kprintf("Now incr num_chunks to %d\n",
2662 num_chunks);
2664 #endif
2666 if (abort_flag)
2667 return (2);
2669 if (break_flag) {
2671 * Set because of out of rwnd space and no drop rep
2672 * space left.
2674 break;
2677 *offset += SCTP_SIZE32(chk_length);
2678 if (*offset >= length) {
2679 /* no more data left in the mbuf chain */
2680 break;
2682 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2683 sizeof(chunk_buf), (u_int8_t *)&chunk_buf);
2684 if (ch == NULL) {
2685 *offset = length;
2686 break;
2688 } /* while */
2689 if (break_flag) {
2691 * we need to report rwnd overrun drops.
2693 sctp_send_packet_dropped(stcb, net, *mm, iphlen, 0);
2695 if (num_chunks) {
2697 * Did we get data, if so update the time for
2698 * auto-close and give peer credit for being
2699 * alive.
2701 sctp_pegs[SCTP_DATA_DG_RECV]++;
2702 stcb->asoc.overall_error_count = 0;
2703 SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2705 /* now service all of the reassm queue and delivery queue */
2706 sctp_service_queues(stcb, asoc, 0);
2707 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2709 * Assure that we ack right away by making
2710 * sure that a d-ack timer is running. So the
2711 * sack_check will send a sack.
2713 sctp_timer_start(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb,
2714 net);
2716 /* Start a sack timer or QUEUE a SACK for sending */
2717 sctp_sack_check(stcb, 1, was_a_gap, &abort_flag);
2718 if (abort_flag)
2719 return (2);
2721 return (0);
2724 static void
2725 sctp_handle_segments(struct sctp_tcb *stcb, struct sctp_association *asoc,
2726 struct sctp_sack_chunk *ch, u_long last_tsn, u_long *biggest_tsn_acked,
2727 u_long *biggest_newly_acked_tsn, int num_seg, int *ecn_seg_sums)
2729 /************************************************/
2730 /* process fragments and update sendqueue */
2731 /************************************************/
2732 struct sctp_sack *sack;
2733 struct sctp_gap_ack_block *frag;
2734 struct sctp_tmit_chunk *tp1;
2735 int i;
2736 unsigned int j;
2737 #ifdef SCTP_FR_LOGGING
2738 int num_frs=0;
2739 #endif
2740 uint16_t frag_strt, frag_end, primary_flag_set;
2741 u_long last_frag_high;
2743 if (asoc->primary_destination->dest_state & SCTP_ADDR_SWITCH_PRIMARY) {
2744 primary_flag_set = 1;
2745 } else {
2746 primary_flag_set = 0;
2749 sack = &ch->sack;
2750 frag = (struct sctp_gap_ack_block *)((caddr_t)sack +
2751 sizeof(struct sctp_sack));
2752 tp1 = NULL;
2753 last_frag_high = 0;
2754 for (i = 0; i < num_seg; i++) {
2755 frag_strt = ntohs(frag->start);
2756 frag_end = ntohs(frag->end);
2757 /* some sanity checks on the fargment offsets */
2758 if (frag_strt > frag_end) {
2759 /* this one is malformed, skip */
2760 frag++;
2761 continue;
2763 if (compare_with_wrap((frag_end+last_tsn), *biggest_tsn_acked,
2764 MAX_TSN))
2765 *biggest_tsn_acked = frag_end+last_tsn;
2767 /* mark acked dgs and find out the highestTSN being acked */
2768 if (tp1 == NULL) {
2769 tp1 = TAILQ_FIRST(&asoc->sent_queue);
2771 /* save the locations of the last frags */
2772 last_frag_high = frag_end + last_tsn;
2773 } else {
2775 * now lets see if we need to reset the queue
2776 * due to a out-of-order SACK fragment
2778 if (compare_with_wrap(frag_strt+last_tsn,
2779 last_frag_high, MAX_TSN)) {
2781 * if the new frag starts after the last TSN
2782 * frag covered, we are ok
2783 * and this one is beyond the last one
2786 } else {
2788 * ok, they have reset us, so we need to reset
2789 * the queue this will cause extra hunting but
2790 * hey, they chose the performance
2791 * hit when they failed to order there gaps..
2793 tp1 = TAILQ_FIRST(&asoc->sent_queue);
2795 last_frag_high = frag_end + last_tsn;
2797 for (j = frag_strt + last_tsn; j <= frag_end + last_tsn; j++) {
2798 while (tp1) {
2799 #ifdef SCTP_FR_LOGGING
2800 if (tp1->rec.data.doing_fast_retransmit)
2801 num_frs++;
2802 #endif
2804 if (tp1->rec.data.TSN_seq == j) {
2805 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2806 /* must be held until cum-ack passes */
2807 /* ECN Nonce: Add the nonce value to the sender's nonce sum */
2808 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
2810 * If it is less than
2811 * ACKED, it is now
2812 * no-longer in flight.
2813 * Higher values may
2814 * already be set via
2815 * previous Gap Ack
2816 * Blocks...
2817 * i.e. ACKED or MARKED.
2819 if (compare_with_wrap(tp1->rec.data.TSN_seq,
2820 *biggest_newly_acked_tsn,
2821 MAX_TSN)) {
2822 *biggest_newly_acked_tsn =
2823 tp1->rec.data.TSN_seq;
2825 tp1->whoTo->flight_size -= tp1->book_size;
2826 if (tp1->whoTo->flight_size < 0) {
2827 tp1->whoTo->flight_size = 0;
2829 asoc->total_flight -=
2830 tp1->book_size;
2832 if (asoc->total_flight < 0) {
2833 asoc->total_flight = 0;
2836 asoc->total_flight_count--;
2837 if (asoc->total_flight_count < 0) {
2838 asoc->total_flight_count = 0;
2841 if (tp1->snd_count < 2) {
2842 /* True non-retransmited chunk */
2843 tp1->whoTo->net_ack2 +=
2844 tp1->send_size;
2846 /* update RTO too? */
2847 if (tp1->do_rtt) {
2848 tp1->whoTo->RTO =
2849 sctp_calculate_rto(stcb,
2850 asoc,
2851 tp1->whoTo,
2852 &tp1->sent_rcv_time);
2853 tp1->whoTo->rto_pending = 0;
2854 tp1->do_rtt = 0;
2858 if (tp1->sent <= SCTP_DATAGRAM_RESEND &&
2859 tp1->sent != SCTP_DATAGRAM_UNSENT &&
2860 compare_with_wrap(tp1->rec.data.TSN_seq,
2861 asoc->this_sack_highest_gap,
2862 MAX_TSN)) {
2863 asoc->this_sack_highest_gap =
2864 tp1->rec.data.TSN_seq;
2865 if (primary_flag_set) {
2866 tp1->whoTo->cacc_saw_newack = 1;
2869 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
2870 #ifdef SCTP_DEBUG
2871 if (sctp_debug_on &
2872 SCTP_DEBUG_INDATA3) {
2873 kprintf("Hmm. one that is in RESEND that is now ACKED\n");
2875 #endif
2876 asoc->sent_queue_retran_cnt--;
2877 #ifdef SCTP_AUDITING_ENABLED
2878 sctp_audit_log(0xB2,
2879 (asoc->sent_queue_retran_cnt & 0x000000ff));
2880 #endif
2882 if (asoc->sent_queue_retran_cnt < 0) {
2883 kprintf("huh3 retran went negative?\n");
2884 #ifdef SCTP_AUDITING_ENABLED
2885 sctp_auditing(30,
2886 inp, tcb,
2887 NULL);
2888 #else
2889 asoc->sent_queue_retran_cnt = 0;
2890 #endif
2893 (*ecn_seg_sums) += tp1->rec.data.ect_nonce;
2894 (*ecn_seg_sums) &= SCTP_SACK_NONCE_SUM;
2895 tp1->sent = SCTP_DATAGRAM_MARKED;
2897 break;
2898 } /* if (tp1->TSN_seq == j) */
2899 if (compare_with_wrap(tp1->rec.data.TSN_seq, j,
2900 MAX_TSN))
2901 break;
2902 tp1 = TAILQ_NEXT(tp1, sctp_next);
2903 }/* end while (tp1) */
2904 } /* end for (j = fragStart */
2905 frag++; /* next one */
2907 #ifdef SCTP_FR_LOGGING
2908 if (num_frs)
2909 sctp_log_fr(*biggest_tsn_acked, *biggest_newly_acked_tsn,
2910 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
2911 #endif
2914 static void
2915 sctp_check_for_revoked(struct sctp_association *asoc, u_long cum_ack,
2916 u_long biggest_tsn_acked)
2918 struct sctp_tmit_chunk *tp1;
2919 int tot_revoked=0;
2921 tp1 = TAILQ_FIRST(&asoc->sent_queue);
2922 while (tp1) {
2923 if (compare_with_wrap(tp1->rec.data.TSN_seq, cum_ack,
2924 MAX_TSN)) {
2926 * ok this guy is either ACK or MARKED. If it is ACKED
2927 * it has been previously acked but not this time i.e.
2928 * revoked. If it is MARKED it was ACK'ed again.
2930 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
2931 /* it has been revoked */
2933 * We do NOT add back to flight size here since
2934 * it is really NOT in flight. Resend (when/if
2935 * it occurs will add to flight size
2937 tp1->sent = SCTP_DATAGRAM_SENT;
2938 tot_revoked++;
2939 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
2940 /* it has been re-acked in this SACK */
2941 tp1->sent = SCTP_DATAGRAM_ACKED;
2944 if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
2945 MAX_TSN)) {
2946 /* above the sack */
2947 break;
2949 if (tp1->sent == SCTP_DATAGRAM_UNSENT)
2950 break;
2951 tp1 = TAILQ_NEXT(tp1, sctp_next);
2953 if (tot_revoked > 0) {
2954 /* Setup the ecn nonce re-sync point. We
2955 * do this since once data is revoked
2956 * we begin to retransmit things, which
2957 * do NOT have the ECN bits set. This means
2958 * we are now out of sync and must wait until
2959 * we get back in sync with the peer to
2960 * check ECN bits.
2962 tp1 = TAILQ_FIRST(&asoc->send_queue);
2963 if (tp1 == NULL) {
2964 asoc->nonce_resync_tsn = asoc->sending_seq;
2965 } else {
2966 asoc->nonce_resync_tsn = tp1->rec.data.TSN_seq;
2968 asoc->nonce_wait_for_ecne = 0;
2969 asoc->nonce_sum_check = 0;
2974 extern int sctp_peer_chunk_oh;
2976 static void
2977 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
2978 u_long biggest_tsn_acked, int strike_enabled,
2979 u_long biggest_tsn_newly_acked, int accum_moved)
2981 struct sctp_tmit_chunk *tp1;
2982 int strike_flag=0;
2983 struct timeval now;
2984 int tot_retrans=0;
2985 u_int32_t sending_seq;
2986 int primary_switch_active = 0;
2987 int double_switch_active = 0;
2989 /* select the sending_seq, this is
2990 * either the next thing ready to
2991 * be sent but not transmitted, OR,
2992 * the next seq we assign.
2994 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
2995 if (tp1 == NULL) {
2996 sending_seq = asoc->sending_seq;
2997 } else {
2998 sending_seq = tp1->rec.data.TSN_seq;
3001 if (asoc->primary_destination->dest_state & SCTP_ADDR_SWITCH_PRIMARY) {
3002 primary_switch_active = 1;
3004 if (asoc->primary_destination->dest_state & SCTP_ADDR_DOUBLE_SWITCH) {
3005 double_switch_active = 1;
3007 if (stcb->asoc.peer_supports_prsctp ) {
3008 SCTP_GETTIME_TIMEVAL(&now);
3010 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3011 while (tp1) {
3012 strike_flag=0;
3013 if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
3014 MAX_TSN) ||
3015 tp1->sent == SCTP_DATAGRAM_UNSENT) {
3016 /* done */
3017 break;
3019 if ((tp1->flags & (SCTP_PR_SCTP_ENABLED|SCTP_PR_SCTP_BUFFER)) ==
3020 SCTP_PR_SCTP_ENABLED &&
3021 tp1->sent < SCTP_DATAGRAM_ACKED) {
3022 /* Is it expired? */
3023 #ifndef __FreeBSD__
3024 if (timercmp(&now, &tp1->rec.data.timetodrop, >))
3025 #else
3026 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >))
3027 #endif
3029 /* Yes so drop it */
3030 if (tp1->data != NULL) {
3031 sctp_release_pr_sctp_chunk(stcb, tp1,
3032 (SCTP_RESPONSE_TO_USER_REQ|SCTP_NOTIFY_DATAGRAM_SENT),
3033 &asoc->sent_queue);
3035 tp1 = TAILQ_NEXT(tp1, sctp_next);
3036 continue;
3040 if (compare_with_wrap(tp1->rec.data.TSN_seq,
3041 asoc->this_sack_highest_gap, MAX_TSN)) {
3042 /* we are beyond the tsn in the sack */
3043 break;
3045 if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3046 /* either a RESEND, ACKED, or MARKED */
3047 /* skip */
3048 tp1 = TAILQ_NEXT(tp1, sctp_next);
3049 continue;
3051 if (primary_switch_active && (strike_enabled == 0)) {
3052 if (tp1->whoTo != asoc->primary_destination) {
3054 * We can only strike things on the primary if
3055 * the strike_enabled flag is clear
3057 tp1 = TAILQ_NEXT(tp1, sctp_next);
3058 continue;
3060 } else if (primary_switch_active) {
3061 if (tp1->whoTo->cacc_saw_newack == 0) {
3063 * Only one was received but it was NOT
3064 * this one.
3066 tp1 = TAILQ_NEXT(tp1, sctp_next);
3067 continue;
3070 if (double_switch_active &&
3071 (compare_with_wrap(asoc->primary_destination->next_tsn_at_change,
3072 tp1->rec.data.TSN_seq, MAX_TSN))) {
3074 * With a double switch we do NOT mark unless we
3075 * are beyond the switch point.
3077 tp1 = TAILQ_NEXT(tp1, sctp_next);
3078 continue;
3081 * Here we check to see if we were have already done a FR
3082 * and if so we see if the biggest TSN we saw in the sack is
3083 * smaller than the recovery point. If so we don't strike the
3084 * tsn... otherwise we CAN strike the TSN.
3086 if (accum_moved && asoc->fast_retran_loss_recovery) {
3088 * Strike the TSN if in fast-recovery and
3089 * cum-ack moved.
3091 tp1->sent++;
3092 } else if (tp1->rec.data.doing_fast_retransmit) {
3094 * For those that have done a FR we must
3095 * take special consideration if we strike. I.e
3096 * the biggest_newly_acked must be higher
3097 * than the sending_seq at the time we did
3098 * the FR.
3100 #ifdef SCTP_FR_TO_ALTERNATE
3102 * If FR's go to new networks, then we
3103 * must only do this for singly homed asoc's. However
3104 * if the FR's go to the same network (Armando's work)
3105 * then its ok to FR multiple times.
3107 if (asoc->numnets < 2)
3108 #else
3109 if (1)
3110 #endif
3112 if ((compare_with_wrap(biggest_tsn_newly_acked,
3113 tp1->rec.data.fast_retran_tsn, MAX_TSN)) ||
3114 (biggest_tsn_newly_acked ==
3115 tp1->rec.data.fast_retran_tsn)) {
3117 * Strike the TSN, since this ack is
3118 * beyond where things were when we did
3119 * a FR.
3121 #ifdef SCTP_FR_LOGGING
3122 sctp_log_fr(biggest_tsn_newly_acked,
3123 tp1->rec.data.TSN_seq,
3124 tp1->rec.data.fast_retran_tsn,
3125 SCTP_FR_LOG_STRIKE_CHUNK);
3126 #endif
3127 tp1->sent++;
3128 strike_flag=1;
3131 } else if (compare_with_wrap(tp1->rec.data.TSN_seq,
3132 biggest_tsn_newly_acked, MAX_TSN)) {
3134 * We don't strike these:
3135 * This is the HTNA algorithm i.e. we don't strike
3136 * If our TSN is larger than the Highest TSN Newly
3137 * Acked.
3140 } else {
3141 /* Strike the TSN */
3142 tp1->sent++;
3144 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3145 /* Increment the count to resend */
3146 struct sctp_nets *alt;
3148 #ifdef SCTP_FR_LOGGING
3149 sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3150 0, SCTP_FR_MARKED);
3151 #endif
3152 if (strike_flag) {
3153 /* This is a subsequent FR */
3154 sctp_pegs[SCTP_DUP_FR]++;
3156 asoc->sent_queue_retran_cnt++;
3157 #ifdef SCTP_FR_TO_ALTERNATE
3158 /* Can we find an alternate? */
3159 alt = sctp_find_alternate_net(stcb, tp1->whoTo);
3160 #else
3162 * default behavior is to NOT retransmit FR's
3163 * to an alternate. Armando Caro's paper details
3164 * why.
3166 alt = tp1->whoTo;
3167 #endif
3168 tp1->rec.data.doing_fast_retransmit = 1;
3169 tot_retrans++;
3170 /* mark the sending seq for possible subsequent FR's */
3171 if (TAILQ_EMPTY(&asoc->send_queue)) {
3173 * If the queue of send is empty then its the
3174 * next sequence number that will be assigned so
3175 * we subtract one from this to get the one we
3176 * last sent.
3178 tp1->rec.data.fast_retran_tsn = sending_seq - 1;
3179 } else {
3181 * If there are chunks on the send queue
3182 * (unsent data that has made it from the
3183 * stream queues but not out the door, we take
3184 * the first one (which will have the lowest
3185 * TSN) and subtract one to get the one we last
3186 * sent.
3188 struct sctp_tmit_chunk *ttt;
3189 ttt = TAILQ_FIRST(&asoc->send_queue);
3190 tp1->rec.data.fast_retran_tsn =
3191 ttt->rec.data.TSN_seq - 1;
3193 if (tp1->do_rtt) {
3195 * this guy had a RTO calculation pending on it,
3196 * cancel it
3198 tp1->whoTo->rto_pending = 0;
3199 tp1->do_rtt = 0;
3201 /* fix counts and things */
3203 tp1->whoTo->net_ack++;
3204 tp1->whoTo->flight_size -= tp1->book_size;
3205 if (tp1->whoTo->flight_size < 0) {
3206 tp1->whoTo->flight_size = 0;
3208 #ifdef SCTP_LOG_RWND
3209 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3210 asoc->peers_rwnd , tp1->send_size, sctp_peer_chunk_oh);
3211 #endif
3212 /* add back to the rwnd */
3213 asoc->peers_rwnd += (tp1->send_size + sctp_peer_chunk_oh);
3215 /* remove from the total flight */
3216 asoc->total_flight -= tp1->book_size;
3217 if (asoc->total_flight < 0) {
3218 asoc->total_flight = 0;
3220 asoc->total_flight_count--;
3221 if (asoc->total_flight_count < 0) {
3222 asoc->total_flight_count = 0;
3224 if (alt != tp1->whoTo) {
3225 /* yes, there is an alternate. */
3226 sctp_free_remote_addr(tp1->whoTo);
3227 tp1->whoTo = alt;
3228 alt->ref_count++;
3231 tp1 = TAILQ_NEXT(tp1, sctp_next);
3232 } /* while (tp1) */
3234 if (tot_retrans > 0) {
3235 /* Setup the ecn nonce re-sync point. We
3236 * do this since once we go to FR something
3237 * we introduce a Karn's rule scenario and
3238 * won't know the totals for the ECN bits.
3240 asoc->nonce_resync_tsn = sending_seq;
3241 asoc->nonce_wait_for_ecne = 0;
3242 asoc->nonce_sum_check = 0;
3247 struct sctp_tmit_chunk *
3248 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3249 struct sctp_association *asoc)
3251 struct sctp_tmit_chunk *tp1, *tp2, *a_adv=NULL;
3252 struct timeval now;
3253 int now_filled=0;
3255 if (asoc->peer_supports_prsctp == 0) {
3256 return (NULL);
3258 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3259 while (tp1) {
3260 if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3261 tp1->sent != SCTP_DATAGRAM_RESEND) {
3262 /* no chance to advance, out of here */
3263 break;
3265 if ((tp1->flags & SCTP_PR_SCTP_ENABLED) == 0) {
3267 * We can't fwd-tsn past any that are reliable
3268 * aka retransmitted until the asoc fails.
3270 break;
3272 if (!now_filled) {
3273 SCTP_GETTIME_TIMEVAL(&now);
3274 now_filled = 1;
3276 tp2 = TAILQ_NEXT(tp1, sctp_next);
3278 * now we got a chunk which is marked for another
3279 * retransmission to a PR-stream but has run
3280 * out its chances already maybe OR has been
3281 * marked to skip now. Can we skip it if its a
3282 * resend?
3284 if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3285 (tp1->flags & SCTP_PR_SCTP_BUFFER) == 0) {
3287 * Now is this one marked for resend and its time
3288 * is now up?
3290 #ifndef __FreeBSD__
3291 if (timercmp(&now, &tp1->rec.data.timetodrop, >))
3292 #else
3293 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >))
3294 #endif
3296 /* Yes so drop it */
3297 if (tp1->data) {
3298 sctp_release_pr_sctp_chunk(stcb, tp1,
3299 (SCTP_RESPONSE_TO_USER_REQ|SCTP_NOTIFY_DATAGRAM_SENT),
3300 &asoc->sent_queue);
3302 } else {
3304 * No, we are done when hit one for resend whos
3305 * time as not expired.
3307 break;
3311 * Ok now if this chunk is marked to drop it
3312 * we can clean up the chunk, advance our peer ack point
3313 * and we can check the next chunk.
3315 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3316 /* advance PeerAckPoint goes forward */
3317 asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
3318 a_adv = tp1;
3320 * we don't want to de-queue it here. Just wait for the
3321 * next peer SACK to come with a new cumTSN and then
3322 * the chunk will be droped in the normal fashion.
3324 if (tp1->data) {
3325 sctp_free_bufspace(stcb, asoc, tp1);
3326 #ifdef SCTP_DEBUG
3327 if (sctp_debug_on & SCTP_DEBUG_OUTPUT2) {
3328 kprintf("--total out:%lu total_mbuf_out:%lu\n",
3329 (u_long)asoc->total_output_queue_size,
3330 (u_long)asoc->total_output_mbuf_queue_size);
3332 #endif
3334 * Maybe there should be another notification
3335 * type
3337 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3338 (SCTP_RESPONSE_TO_USER_REQ|SCTP_NOTIFY_DATAGRAM_SENT),
3339 tp1);
3340 sctp_m_freem(tp1->data);
3341 tp1->data = NULL;
3342 sctp_sowwakeup(stcb->sctp_ep,
3343 stcb->sctp_socket);
3345 } else {
3346 /* If it is still in RESEND we can advance no further */
3347 break;
3350 * If we hit here we just dumped tp1, move to next
3351 * tsn on sent queue.
3353 tp1 = tp2;
3355 return (a_adv);
3358 #ifdef SCTP_HIGH_SPEED
3359 struct sctp_hs_raise_drop {
3360 int32_t cwnd;
3361 int32_t increase;
3362 int32_t drop_percent;
3365 #define SCTP_HS_TABLE_SIZE 73
3367 struct sctp_hs_raise_drop sctp_cwnd_adjust[SCTP_HS_TABLE_SIZE] = {
3368 {38,1,50}, /* 0 */
3369 {118,2,44}, /* 1 */
3370 {221,3,41}, /* 2 */
3371 {347,4,38}, /* 3 */
3372 {495,5,37}, /* 4 */
3373 {663,6,35}, /* 5 */
3374 {851,7,34}, /* 6 */
3375 {1058,8,33}, /* 7 */
3376 {1284,9,32}, /* 8 */
3377 {1529,10,31}, /* 9 */
3378 {1793,11,30}, /* 10 */
3379 {2076,12,29}, /* 11 */
3380 {2378,13,28}, /* 12 */
3381 {2699,14,28}, /* 13 */
3382 {3039,15,27}, /* 14 */
3383 {3399,16,27}, /* 15 */
3384 {3778,17,26}, /* 16 */
3385 {4177,18,26}, /* 17 */
3386 {4596,19,25}, /* 18 */
3387 {5036,20,25}, /* 19 */
3388 {5497,21,24}, /* 20 */
3389 {5979,22,24}, /* 21 */
3390 {6483,23,23}, /* 22 */
3391 {7009,24,23}, /* 23 */
3392 {7558,25,22}, /* 24 */
3393 {8130,26,22}, /* 25 */
3394 {8726,27,22}, /* 26 */
3395 {9346,28,21}, /* 27 */
3396 {9991,29,21}, /* 28 */
3397 {10661,30,21}, /* 29 */
3398 {11358,31,20}, /* 30 */
3399 {12082,32,20}, /* 31 */
3400 {12834,33,20}, /* 32 */
3401 {13614,34,19}, /* 33 */
3402 {14424,35,19}, /* 34 */
3403 {15265,36,19}, /* 35 */
3404 {16137,37,19}, /* 36 */
3405 {17042,38,18}, /* 37 */
3406 {17981,39,18}, /* 38 */
3407 {18955,40,18}, /* 39 */
3408 {19965,41,17}, /* 40 */
3409 {21013,42,17}, /* 41 */
3410 {22101,43,17}, /* 42 */
3411 {23230,44,17}, /* 43 */
3412 {24402,45,16}, /* 44 */
3413 {25618,46,16}, /* 45 */
3414 {26881,47,16}, /* 46 */
3415 {28193,48,16}, /* 47 */
3416 {29557,49,15}, /* 48 */
3417 {30975,50,15}, /* 49 */
3418 {32450,51,15}, /* 50 */
3419 {33986,52,15}, /* 51 */
3420 {35586,53,14}, /* 52 */
3421 {37253,54,14}, /* 53 */
3422 {38992,55,14}, /* 54 */
3423 {40808,56,14}, /* 55 */
3424 {42707,57,13}, /* 56 */
3425 {44694,58,13}, /* 57 */
3426 {46776,59,13}, /* 58 */
3427 {48961,60,13}, /* 59 */
3428 {51258,61,13}, /* 60 */
3429 {53677,62,12}, /* 61 */
3430 {56230,63,12}, /* 62 */
3431 {58932,64,12}, /* 63 */
3432 {61799,65,12}, /* 64 */
3433 {64851,66,11}, /* 65 */
3434 {68113,67,11}, /* 66 */
3435 {71617,68,11}, /* 67 */
3436 {75401,69,10}, /* 68 */
3437 {79517,70,10}, /* 69 */
3438 {84035,71,10}, /* 70 */
3439 {89053,72,10}, /* 71 */
3440 {94717,73,9} /* 72 */
3443 static void
3444 sctp_hs_cwnd_increase(struct sctp_nets *net)
3446 int cur_val, i, indx, incr;
3448 cur_val = net->cwnd >> 10;
3449 indx = SCTP_HS_TABLE_SIZE - 1;
3451 if (cur_val < sctp_cwnd_adjust[0].cwnd) {
3452 /* normal mode */
3453 if (net->net_ack > net->mtu) {
3454 net->cwnd += net->mtu;
3455 #ifdef SCTP_CWND_LOGGING
3456 sctp_log_cwnd(net, net->mtu, SCTP_CWND_LOG_FROM_SS);
3457 #endif
3458 } else {
3459 net->cwnd += net->net_ack;
3460 #ifdef SCTP_CWND_LOGGING
3461 sctp_log_cwnd(net, net->net_ack, SCTP_CWND_LOG_FROM_SS);
3462 #endif
3464 } else {
3465 for (i=net->last_hs_used; i<SCTP_HS_TABLE_SIZE; i++) {
3466 if (cur_val < sctp_cwnd_adjust[i].cwnd) {
3467 indx = i;
3468 break;
3471 net->last_hs_used = indx;
3472 incr = ((sctp_cwnd_adjust[indx].increase) << 10);
3473 net->cwnd += incr;
3474 #ifdef SCTP_CWND_LOGGING
3475 sctp_log_cwnd(net, incr, SCTP_CWND_LOG_FROM_SS);
3476 #endif
3480 static void
3481 sctp_hs_cwnd_decrease(struct sctp_nets *net)
3483 int cur_val, i, indx;
3484 #ifdef SCTP_CWND_LOGGING
3485 int old_cwnd = net->cwnd;
3486 #endif
3488 cur_val = net->cwnd >> 10;
3489 indx = net->last_hs_used;
3490 if (cur_val < sctp_cwnd_adjust[0].cwnd) {
3491 /* normal mode */
3492 net->ssthresh = net->cwnd / 2;
3493 if (net->ssthresh < (net->mtu*2)) {
3494 net->ssthresh = 2 * net->mtu;
3496 net->cwnd = net->ssthresh;
3497 #ifdef SCTP_CWND_LOGGING
3498 sctp_log_cwnd(net, (net->cwnd-old_cwnd), SCTP_CWND_LOG_FROM_FR);
3499 #endif
3500 } else {
3501 /* drop by the proper amount */
3502 net->ssthresh = net->cwnd - (int)((net->cwnd / 100) *
3503 sctp_cwnd_adjust[net->last_hs_used].drop_percent);
3504 net->cwnd = net->ssthresh;
3505 /* now where are we */
3506 indx = net->last_hs_used;
3507 cur_val = net->cwnd >> 10;
3508 /* reset where we are in the table */
3509 if (cur_val < sctp_cwnd_adjust[0].cwnd) {
3510 /* feel out of hs */
3511 net->last_hs_used = 0;
3512 } else {
3513 for (i = indx; i >= 1; i--) {
3514 if (cur_val > sctp_cwnd_adjust[i - 1].cwnd) {
3515 break;
3518 net->last_hs_used = indx;
3522 #endif
3524 void
3525 sctp_handle_sack(struct sctp_sack_chunk *ch, struct sctp_tcb *stcb,
3526 struct sctp_nets *net_from, int *abort_now)
3528 struct sctp_association *asoc;
3529 struct sctp_sack *sack;
3530 struct sctp_tmit_chunk *tp1, *tp2;
3531 u_long cum_ack, last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked;
3532 uint16_t num_seg;
3533 unsigned int sack_length;
3534 uint32_t send_s;
3535 int some_on_streamwheel;
3536 long j;
3537 int strike_enabled = 0, cnt_of_cacc = 0;
3538 int accum_moved = 0;
3539 int marking_allowed = 1;
3540 int will_exit_fast_recovery=0;
3541 u_int32_t a_rwnd;
3542 struct sctp_nets *net = NULL;
3543 int nonce_sum_flag, ecn_seg_sums=0;
3544 asoc = &stcb->asoc;
3547 * Handle the incoming sack on data I have been sending.
3551 * we take any chance we can to service our queues since we
3552 * cannot get awoken when the socket is read from :<
3554 asoc->overall_error_count = 0;
3556 if (asoc->sent_queue_retran_cnt) {
3557 #ifdef SCTP_DEBUG
3558 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
3559 kprintf("Handling SACK for asoc:%p retran:%d\n",
3560 asoc, asoc->sent_queue_retran_cnt);
3562 #endif
3565 sctp_service_queues(stcb, asoc, 0);
3568 * Now perform the actual SACK handling:
3569 * 1) Verify that it is not an old sack, if so discard.
3570 * 2) If there is nothing left in the send queue (cum-ack is equal
3571 * to last acked) then you have a duplicate too, update any rwnd
3572 * change and verify no timers are running. then return.
3573 * 3) Process any new consequtive data i.e. cum-ack moved
3574 * process these first and note that it moved.
3575 * 4) Process any sack blocks.
3576 * 5) Drop any acked from the queue.
3577 * 6) Check for any revoked blocks and mark.
3578 * 7) Update the cwnd.
3579 * 8) Nothing left, sync up flightsizes and things, stop all timers
3580 * and also check for shutdown_pending state. If so then go ahead
3581 * and send off the shutdown. If in shutdown recv, send off the
3582 * shutdown-ack and start that timer, Ret.
3583 * 9) Strike any non-acked things and do FR procedure if needed being
3584 * sure to set the FR flag.
3585 * 10) Do pr-sctp procedures.
3586 * 11) Apply any FR penalties.
3587 * 12) Assure we will SACK if in shutdown_recv state.
3590 j = 0;
3591 sack_length = ntohs(ch->ch.chunk_length);
3592 if (sack_length < sizeof(struct sctp_sack_chunk)) {
3593 #ifdef SCTP_DEBUG
3594 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
3595 kprintf("Bad size on sack chunk .. to small\n");
3597 #endif
3598 return;
3600 /* ECN Nonce */
3601 nonce_sum_flag = ch->ch.chunk_flags & SCTP_SACK_NONCE_SUM;
3602 sack = &ch->sack;
3603 cum_ack = last_tsn = ntohl(sack->cum_tsn_ack);
3604 num_seg = ntohs(sack->num_gap_ack_blks);
3606 /* reality check */
3607 if (TAILQ_EMPTY(&asoc->send_queue)) {
3608 send_s = asoc->sending_seq;
3609 } else {
3610 tp1 = TAILQ_FIRST(&asoc->send_queue);
3611 send_s = tp1->rec.data.TSN_seq;
3614 if (sctp_strict_sacks) {
3615 if (cum_ack == send_s ||
3616 compare_with_wrap(cum_ack, send_s, MAX_TSN)) {
3617 struct mbuf *oper;
3619 * no way, we have not even sent this TSN out yet.
3620 * Peer is hopelessly messed up with us.
3622 hopeless_peer:
3623 *abort_now = 1;
3624 /* XXX */
3625 MGET(oper, MB_DONTWAIT, MT_DATA);
3626 if (oper) {
3627 struct sctp_paramhdr *ph;
3628 u_int32_t *ippp;
3630 oper->m_len = sizeof(struct sctp_paramhdr) +
3631 sizeof(*ippp);
3632 ph = mtod(oper, struct sctp_paramhdr *);
3633 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
3634 ph->param_length = htons(oper->m_len);
3635 ippp = (u_int32_t *)(ph + 1);
3636 *ippp = htonl(0x30000002);
3638 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper);
3639 return;
3642 /* update the Rwnd of the peer */
3643 a_rwnd = (u_int32_t)ntohl(sack->a_rwnd);
3644 if (asoc->sent_queue_retran_cnt) {
3645 #ifdef SCTP_DEBUG
3646 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
3647 kprintf("cum_ack:%lx num_seg:%u last_acked_seq:%x\n",
3648 cum_ack, (u_int)num_seg, asoc->last_acked_seq);
3650 #endif
3652 if (compare_with_wrap(asoc->t3timeout_highest_marked, cum_ack, MAX_TSN)) {
3653 /* we are not allowed to mark for FR */
3654 marking_allowed = 0;
3656 /**********************/
3657 /* 1) check the range */
3658 /**********************/
3659 if (compare_with_wrap(asoc->last_acked_seq, last_tsn, MAX_TSN)) {
3660 /* acking something behind */
3661 if (asoc->sent_queue_retran_cnt) {
3662 #ifdef SCTP_DEBUG
3663 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
3664 kprintf("The cum-ack is behind us\n");
3666 #endif
3668 return;
3671 if (TAILQ_EMPTY(&asoc->sent_queue)) {
3672 /* nothing left on sendqueue.. consider done */
3673 #ifdef SCTP_LOG_RWND
3674 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
3675 asoc->peers_rwnd, 0, 0, a_rwnd);
3676 #endif
3677 asoc->peers_rwnd = a_rwnd;
3678 if (asoc->sent_queue_retran_cnt) {
3679 #ifdef SCTP_DEBUG
3680 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
3681 kprintf("Huh? retran set but none on queue\n");
3683 #endif
3684 asoc->sent_queue_retran_cnt = 0;
3686 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3687 /* SWS sender side engages */
3688 asoc->peers_rwnd = 0;
3690 /* stop any timers */
3691 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3692 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
3693 stcb, net);
3694 net->partial_bytes_acked = 0;
3695 net->flight_size = 0;
3697 asoc->total_flight = 0;
3698 asoc->total_flight_count = 0;
3699 return;
3702 * We init netAckSz and netAckSz2 to 0. These are used to track 2
3703 * things. The total byte count acked is tracked in netAckSz AND
3704 * netAck2 is used to track the total bytes acked that are un-
3705 * amibguious and were never retransmitted. We track these on a
3706 * per destination address basis.
3708 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3709 net->prev_cwnd = net->cwnd;
3710 net->net_ack = 0;
3711 net->net_ack2 = 0;
3713 /* process the new consecutive TSN first */
3714 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3715 while (tp1) {
3716 if (compare_with_wrap(last_tsn, tp1->rec.data.TSN_seq,
3717 MAX_TSN) ||
3718 last_tsn == tp1->rec.data.TSN_seq) {
3719 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
3720 /* ECN Nonce: Add the nonce to the sender's nonce sum */
3721 asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
3722 accum_moved = 1;
3723 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
3725 * If it is less than ACKED, it is now
3726 * no-longer in flight. Higher values
3727 * may occur during marking
3729 if ((tp1->whoTo->dest_state &
3730 SCTP_ADDR_UNCONFIRMED) &&
3731 (tp1->snd_count < 2) ) {
3733 * If there was no retran and
3734 * the address is un-confirmed
3735 * and we sent there and are
3736 * now sacked.. its confirmed,
3737 * mark it so.
3739 tp1->whoTo->dest_state &=
3740 ~SCTP_ADDR_UNCONFIRMED;
3742 tp1->whoTo->flight_size -=
3743 tp1->book_size;
3744 if (tp1->whoTo->flight_size < 0) {
3745 tp1->whoTo->flight_size = 0;
3747 asoc->total_flight -= tp1->book_size;
3748 if (asoc->total_flight < 0) {
3749 asoc->total_flight = 0;
3751 asoc->total_flight_count--;
3752 if (asoc->total_flight_count < 0) {
3753 asoc->total_flight_count = 0;
3755 tp1->whoTo->net_ack += tp1->send_size;
3756 if (tp1->snd_count < 2) {
3757 /* True non-retransmited chunk */
3758 tp1->whoTo->net_ack2 +=
3759 tp1->send_size;
3760 /* update RTO too? */
3761 if (tp1->do_rtt) {
3762 tp1->whoTo->RTO =
3763 sctp_calculate_rto(stcb,
3764 asoc, tp1->whoTo,
3765 &tp1->sent_rcv_time);
3766 tp1->whoTo->rto_pending = 0;
3767 tp1->do_rtt = 0;
3771 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3772 #ifdef SCTP_DEBUG
3773 if (sctp_debug_on & SCTP_DEBUG_INDATA3) {
3774 kprintf("Hmm. one that is in RESEND that is now ACKED\n");
3776 #endif
3777 asoc->sent_queue_retran_cnt--;
3778 #ifdef SCTP_AUDITING_ENABLED
3779 sctp_audit_log(0xB3,
3780 (asoc->sent_queue_retran_cnt & 0x000000ff));
3781 #endif
3782 if (asoc->sent_queue_retran_cnt < 0) {
3783 kprintf("huh4 retran went negative?\n");
3784 #ifdef SCTP_AUDITING_ENABLED
3785 sctp_auditing(31, inp, tcb,
3786 NULL);
3787 #else
3788 asoc->sent_queue_retran_cnt = 0;
3789 #endif
3793 tp1->sent = SCTP_DATAGRAM_ACKED;
3795 } else {
3796 break;
3798 tp1 = TAILQ_NEXT(tp1, sctp_next);
3800 /*******************************************/
3801 /* cancel ALL T3-send timer if accum moved */
3802 /*******************************************/
3803 if (accum_moved) {
3804 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3805 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
3806 stcb, net);
3809 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
3810 /* always set this up to cum-ack */
3811 asoc->this_sack_highest_gap = last_tsn;
3813 if (((num_seg * sizeof (sizeof(struct sctp_gap_ack_block))) + sizeof(struct sctp_sack_chunk)) > sack_length) {
3814 /* skip corrupt segments */
3815 strike_enabled = 0;
3816 goto skip_segments;
3819 if (num_seg > 0) {
3820 if (asoc->primary_destination->dest_state &
3821 SCTP_ADDR_SWITCH_PRIMARY) {
3822 /* clear the nets CACC flags */
3823 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3824 net->cacc_saw_newack = 0;
3828 * thisSackHighestGap will increase while handling NEW segments
3831 sctp_handle_segments(stcb, asoc, ch, last_tsn,
3832 &biggest_tsn_acked, &biggest_tsn_newly_acked,
3833 num_seg, &ecn_seg_sums);
3835 if (sctp_strict_sacks) {
3836 /* validate the biggest_tsn_acked in the gap acks
3837 * if strict adherence is wanted.
3839 if ((biggest_tsn_acked == send_s) ||
3840 (compare_with_wrap(biggest_tsn_acked, send_s, MAX_TSN))) {
3842 * peer is either confused or we are under
3843 * attack. We must abort.
3845 goto hopeless_peer;
3849 if (asoc->primary_destination->dest_state &
3850 SCTP_ADDR_SWITCH_PRIMARY) {
3851 /* clear the nets CACC flags */
3852 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3853 if (net->cacc_saw_newack) {
3854 cnt_of_cacc++;
3861 if (cnt_of_cacc < 2) {
3862 strike_enabled = 1;
3863 } else {
3864 strike_enabled = 0;
3866 skip_segments:
3867 /********************************************/
3868 /* drop the acked chunks from the sendqueue */
3869 /********************************************/
3870 asoc->last_acked_seq = cum_ack;
3871 if (asoc->primary_destination->dest_state & SCTP_ADDR_SWITCH_PRIMARY) {
3872 if ((cum_ack == asoc->primary_destination->next_tsn_at_change) ||
3873 (compare_with_wrap(cum_ack,
3874 asoc->primary_destination->next_tsn_at_change, MAX_TSN))) {
3875 struct sctp_nets *lnet;
3876 /* Turn off the switch flag for ALL addresses */
3877 TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
3878 asoc->primary_destination->dest_state &=
3879 ~(SCTP_ADDR_SWITCH_PRIMARY|SCTP_ADDR_DOUBLE_SWITCH);
3883 /* Drag along the t3 timeout point so we don't have a problem at wrap */
3884 if (marking_allowed) {
3885 asoc->t3timeout_highest_marked = cum_ack;
3887 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3888 do {
3889 if (compare_with_wrap(tp1->rec.data.TSN_seq, cum_ack,
3890 MAX_TSN)) {
3891 break;
3893 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3894 /* no more sent on list */
3895 break;
3897 tp2 = TAILQ_NEXT(tp1, sctp_next);
3898 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
3899 if (tp1->data) {
3900 sctp_free_bufspace(stcb, asoc, tp1);
3901 #ifdef SCTP_DEBUG
3902 if (sctp_debug_on & SCTP_DEBUG_OUTPUT2) {
3903 kprintf("--total out:%lu total_mbuf_out:%lu\n",
3904 (u_long)asoc->total_output_queue_size,
3905 (u_long)asoc->total_output_mbuf_queue_size);
3907 #endif
3909 sctp_m_freem(tp1->data);
3910 if (tp1->flags & SCTP_PR_SCTP_BUFFER) {
3911 asoc->sent_queue_cnt_removeable--;
3915 tp1->data = NULL;
3916 asoc->sent_queue_cnt--;
3917 sctp_free_remote_addr(tp1->whoTo);
3918 sctppcbinfo.ipi_count_chunk--;
3919 asoc->chunks_on_out_queue--;
3921 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
3922 panic("Chunk count is going negative");
3924 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, tp1);
3925 sctppcbinfo.ipi_gencnt_chunk++;
3926 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
3927 tp1 = tp2;
3928 } while (tp1 != NULL);
3931 if (asoc->fast_retran_loss_recovery && accum_moved) {
3932 if (compare_with_wrap(asoc->last_acked_seq,
3933 asoc->fast_recovery_tsn, MAX_TSN) ||
3934 asoc->last_acked_seq == asoc->fast_recovery_tsn) {
3935 /* Setup so we will exit RFC2582 fast recovery */
3936 will_exit_fast_recovery = 1;
3940 /* Check for revoked fragments if we hand
3941 * fragments in a previous segment. If we
3942 * had no previous fragments we cannot have
3943 * a revoke issue.
3945 if (asoc->saw_sack_with_frags)
3946 sctp_check_for_revoked(asoc, cum_ack, biggest_tsn_acked);
3948 if (num_seg)
3949 asoc->saw_sack_with_frags = 1;
3950 else
3951 asoc->saw_sack_with_frags = 0;
3953 /******************************/
3954 /* update cwnd */
3955 /******************************/
3956 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3957 /* if nothing was acked on this destination skip it */
3958 if (net->net_ack == 0)
3959 continue;
3961 if (net->net_ack2 > 0) {
3963 * Karn's rule applies to clearing error count,
3964 * this is optional.
3966 net->error_count = 0;
3967 if ((net->dest_state&SCTP_ADDR_NOT_REACHABLE) ==
3968 SCTP_ADDR_NOT_REACHABLE) {
3969 /* addr came good */
3970 net->dest_state &= ~SCTP_ADDR_NOT_REACHABLE;
3971 net->dest_state |= SCTP_ADDR_REACHABLE;
3972 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
3973 SCTP_RECEIVED_SACK, (void *)net);
3974 /* now was it the primary? if so restore */
3975 if (net->dest_state & SCTP_ADDR_WAS_PRIMARY) {
3976 sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, net);
3981 if (asoc->fast_retran_loss_recovery &&
3982 will_exit_fast_recovery == 0) {
3983 /* If we are in loss recovery we skip any cwnd update */
3984 sctp_pegs[SCTP_CWND_SKIP]++;
3985 goto skip_cwnd_update;
3987 if (accum_moved) {
3988 /* If the cumulative ack moved we can proceed */
3989 if (net->cwnd <= net->ssthresh) {
3990 /* We are in slow start */
3991 if (net->flight_size + net->net_ack >=
3992 net->cwnd ) {
3993 #ifdef SCTP_HIGH_SPEED
3994 sctp_hs_cwnd_increase(net);
3995 #else
3996 if (net->net_ack > net->mtu) {
3997 net->cwnd += net->mtu;
3998 #ifdef SCTP_CWND_LOGGING
3999 sctp_log_cwnd(net, net->mtu,
4000 SCTP_CWND_LOG_FROM_SS);
4001 #endif
4003 } else {
4004 net->cwnd += net->net_ack;
4005 #ifdef SCTP_CWND_LOGGING
4006 sctp_log_cwnd(net, net->net_ack,
4007 SCTP_CWND_LOG_FROM_SS);
4008 #endif
4011 #endif
4012 sctp_pegs[SCTP_CWND_SS]++;
4013 } else {
4014 unsigned int dif;
4015 sctp_pegs[SCTP_CWND_NOUSE_SS]++;
4016 dif = net->cwnd - (net->flight_size +
4017 net->net_ack);
4018 #ifdef SCTP_CWND_LOGGING
4019 /* sctp_log_cwnd(net, net->net_ack,
4020 SCTP_CWND_LOG_NOADV_SS);*/
4021 #endif
4022 if (dif > sctp_pegs[SCTP_CWND_DIFF_SA]) {
4023 sctp_pegs[SCTP_CWND_DIFF_SA] =
4024 dif;
4025 sctp_pegs[SCTP_OQS_AT_SS] =
4026 asoc->total_output_queue_size;
4027 sctp_pegs[SCTP_SQQ_AT_SS] =
4028 asoc->sent_queue_cnt;
4029 sctp_pegs[SCTP_SQC_AT_SS] =
4030 asoc->send_queue_cnt;
4033 } else {
4034 /* We are in congestion avoidance */
4035 if (net->flight_size + net->net_ack >=
4036 net->cwnd) {
4038 * add to pba only if we had a cwnd's
4039 * worth (or so) in flight OR the
4040 * burst limit was applied.
4042 net->partial_bytes_acked +=
4043 net->net_ack;
4046 * Do we need to increase
4047 * (if pba is > cwnd)?
4049 if (net->partial_bytes_acked >=
4050 net->cwnd) {
4051 if (net->cwnd <
4052 net->partial_bytes_acked) {
4053 net->partial_bytes_acked -=
4054 net->cwnd;
4055 } else {
4056 net->partial_bytes_acked =
4059 net->cwnd += net->mtu;
4060 #ifdef SCTP_CWND_LOGGING
4061 sctp_log_cwnd(net, net->mtu,
4062 SCTP_CWND_LOG_FROM_CA);
4063 #endif
4064 sctp_pegs[SCTP_CWND_CA]++;
4066 } else {
4067 unsigned int dif;
4068 sctp_pegs[SCTP_CWND_NOUSE_CA]++;
4069 #ifdef SCTP_CWND_LOGGING
4070 /* sctp_log_cwnd(net, net->net_ack,
4071 SCTP_CWND_LOG_NOADV_CA);
4073 #endif
4074 dif = net->cwnd - (net->flight_size +
4075 net->net_ack);
4076 if (dif > sctp_pegs[SCTP_CWND_DIFF_CA]) {
4077 sctp_pegs[SCTP_CWND_DIFF_CA] =
4078 dif;
4079 sctp_pegs[SCTP_OQS_AT_CA] =
4080 asoc->total_output_queue_size;
4081 sctp_pegs[SCTP_SQQ_AT_CA] =
4082 asoc->sent_queue_cnt;
4083 sctp_pegs[SCTP_SQC_AT_CA] =
4084 asoc->send_queue_cnt;
4090 } else {
4091 sctp_pegs[SCTP_CWND_NOCUM]++;
4093 skip_cwnd_update:
4095 * NOW, according to Karn's rule do we need to restore the
4096 * RTO timer back? Check our net_ack2. If not set then we
4097 * have a ambiguity.. i.e. all data ack'd was sent to more
4098 * than one place.
4101 if (net->net_ack2) {
4102 /* restore any doubled timers */
4103 net->RTO = ((net->lastsa >> 2) + net->lastsv) >> 1;
4104 if (net->RTO < stcb->asoc.minrto) {
4105 net->RTO = stcb->asoc.minrto;
4107 if (net->RTO > stcb->asoc.maxrto) {
4108 net->RTO = stcb->asoc.maxrto;
4111 if (net->cwnd > sctp_pegs[SCTP_MAX_CWND]) {
4112 sctp_pegs[SCTP_MAX_CWND] = net->cwnd;
4115 /**********************************/
4116 /* Now what about shutdown issues */
4117 /**********************************/
4118 some_on_streamwheel = 0;
4119 if (!TAILQ_EMPTY(&asoc->out_wheel)) {
4120 /* Check to see if some data queued */
4121 struct sctp_stream_out *outs;
4122 TAILQ_FOREACH(outs, &asoc->out_wheel, next_spoke) {
4123 if (!TAILQ_EMPTY(&outs->outqueue)) {
4124 some_on_streamwheel = 1;
4125 break;
4129 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue) &&
4130 some_on_streamwheel == 0) {
4131 /* nothing left on sendqueue.. consider done */
4132 /* stop all timers */
4133 #ifdef SCTP_LOG_RWND
4134 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4135 asoc->peers_rwnd, 0, 0, a_rwnd);
4136 #endif
4137 asoc->peers_rwnd = a_rwnd;
4138 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4139 /* SWS sender side engages */
4140 asoc->peers_rwnd = 0;
4142 /* stop any timers */
4143 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4144 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4145 stcb, net);
4146 net->flight_size = 0;
4147 net->partial_bytes_acked = 0;
4149 asoc->total_flight = 0;
4150 asoc->total_flight_count = 0;
4151 /* clean up */
4152 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
4153 asoc->state = SCTP_STATE_SHUTDOWN_SENT;
4154 #ifdef SCTP_DEBUG
4155 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
4156 kprintf("%s:%d sends a shutdown\n",
4157 __FILE__,
4158 __LINE__
4161 #endif
4162 sctp_send_shutdown(stcb,
4163 stcb->asoc.primary_destination);
4164 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4165 stcb->sctp_ep, stcb, asoc->primary_destination);
4166 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4167 stcb->sctp_ep, stcb, asoc->primary_destination);
4168 } else if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) {
4169 asoc->state = SCTP_STATE_SHUTDOWN_ACK_SENT;
4171 sctp_send_shutdown_ack(stcb,
4172 stcb->asoc.primary_destination);
4174 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4175 stcb->sctp_ep, stcb, asoc->primary_destination);
4177 return;
4180 * Now here we are going to recycle net_ack for a different
4181 * use... HEADS UP.
4183 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4184 net->net_ack = 0;
4186 if ((num_seg > 0) && marking_allowed) {
4187 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
4188 strike_enabled, biggest_tsn_newly_acked, accum_moved);
4191 /*********************************************/
4192 /* Here we perform PR-SCTP procedures */
4193 /* (section 4.2) */
4194 /*********************************************/
4195 /* C1. update advancedPeerAckPoint */
4196 if (compare_with_wrap(cum_ack, asoc->advanced_peer_ack_point, MAX_TSN)) {
4197 asoc->advanced_peer_ack_point = cum_ack;
4199 /* C2. try to further move advancedPeerAckPoint ahead */
4200 if (asoc->peer_supports_prsctp) {
4201 struct sctp_tmit_chunk *lchk;
4202 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4203 /* C3. See if we need to send a Fwd-TSN */
4204 if (compare_with_wrap(asoc->advanced_peer_ack_point, cum_ack,
4205 MAX_TSN)) {
4207 * ISSUE with ECN, see FWD-TSN processing for notes
4208 * on issues that will occur when the ECN NONCE stuff
4209 * is put into SCTP for cross checking.
4211 send_forward_tsn(stcb, asoc);
4213 /* ECN Nonce: Disable Nonce Sum check when FWD TSN is sent and store resync tsn*/
4214 asoc->nonce_sum_check = 0;
4215 asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
4216 if (lchk) {
4217 /* Assure a timer is up */
4218 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4219 stcb->sctp_ep, stcb, lchk->whoTo);
4223 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4224 if (asoc->fast_retran_loss_recovery == 0) {
4225 /* out of a RFC2582 Fast recovery window? */
4226 if (net->net_ack > 0) {
4228 * per section 7.2.3, are there
4229 * any destinations that had a fast
4230 * retransmit to them. If so what we
4231 * need to do is adjust ssthresh and
4232 * cwnd.
4234 struct sctp_tmit_chunk *lchk;
4235 #ifdef SCTP_HIGH_SPEED
4236 sctp_hs_cwnd_decrease(net);
4237 #else
4238 #ifdef SCTP_CWND_LOGGING
4239 int old_cwnd = net->cwnd;
4240 #endif
4241 net->ssthresh = net->cwnd / 2;
4242 if (net->ssthresh < (net->mtu*2)) {
4243 net->ssthresh = 2 * net->mtu;
4245 net->cwnd = net->ssthresh;
4246 #ifdef SCTP_CWND_LOGGING
4247 sctp_log_cwnd(net, (net->cwnd-old_cwnd),
4248 SCTP_CWND_LOG_FROM_FR);
4249 #endif
4250 #endif
4252 lchk = TAILQ_FIRST(&asoc->send_queue);
4254 net->partial_bytes_acked = 0;
4255 /* Turn on fast recovery window */
4256 asoc->fast_retran_loss_recovery = 1;
4257 if (lchk == NULL) {
4258 /* Mark end of the window */
4259 asoc->fast_recovery_tsn = asoc->sending_seq - 1;
4260 } else {
4261 asoc->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1;
4265 /* Disable Nonce Sum Checking and store the resync tsn*/
4266 asoc->nonce_sum_check = 0;
4267 asoc->nonce_resync_tsn = asoc->fast_recovery_tsn + 1;
4269 sctp_timer_stop(SCTP_TIMER_TYPE_SEND,
4270 stcb->sctp_ep, stcb, net);
4271 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4272 stcb->sctp_ep, stcb, net);
4274 } else if (net->net_ack > 0) {
4276 * Mark a peg that we WOULD have done a cwnd reduction
4277 * but RFC2582 prevented this action.
4279 sctp_pegs[SCTP_FR_INAWINDOW]++;
4284 /******************************************************************
4285 * Here we do the stuff with ECN Nonce checking.
4286 * We basically check to see if the nonce sum flag was incorrect
4287 * or if resynchronization needs to be done. Also if we catch a
4288 * misbehaving receiver we give him the kick.
4289 ******************************************************************/
4291 if (asoc->ecn_nonce_allowed) {
4292 if (asoc->nonce_sum_check) {
4293 if (nonce_sum_flag != ((asoc->nonce_sum_expect_base + ecn_seg_sums) & SCTP_SACK_NONCE_SUM)) {
4294 if (asoc->nonce_wait_for_ecne == 0) {
4295 struct sctp_tmit_chunk *lchk;
4296 lchk = TAILQ_FIRST(&asoc->send_queue);
4297 asoc->nonce_wait_for_ecne = 1;
4298 if (lchk) {
4299 asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
4300 } else {
4301 asoc->nonce_wait_tsn = asoc->sending_seq;
4303 } else {
4304 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
4305 (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
4306 /* Misbehaving peer. We need to react to this guy */
4307 kprintf("Mis-behaving peer detected\n");
4308 asoc->ecn_allowed = 0;
4309 asoc->ecn_nonce_allowed = 0;
4313 } else {
4314 /* See if Resynchronization Possible */
4315 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
4316 asoc->nonce_sum_check = 1;
4317 /* now we must calculate what the base
4318 * is. We do this based on two things, we know
4319 * the total's for all the segments gap-acked
4320 * in the SACK, its stored in ecn_seg_sums.
4321 * We also know the SACK's nonce sum, its
4322 * in nonce_sum_flag. So we can build a truth
4323 * table to back-calculate the new value of asoc->nonce_sum_expect_base:
4325 * SACK-flag-Value Seg-Sums Base
4326 * 0 0 0
4327 * 1 0 1
4328 * 0 1 1
4329 * 1 1 0
4331 asoc->nonce_sum_expect_base = (ecn_seg_sums ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
4335 /* Now are we exiting loss recovery ? */
4336 if (will_exit_fast_recovery) {
4337 /* Ok, we must exit fast recovery */
4338 asoc->fast_retran_loss_recovery = 0;
4340 if ((asoc->sat_t3_loss_recovery) &&
4341 ((compare_with_wrap(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn,
4342 MAX_TSN) ||
4343 (asoc->last_acked_seq == asoc->sat_t3_recovery_tsn)))) {
4344 /* end satellite t3 loss recovery */
4345 asoc->sat_t3_loss_recovery = 0;
4347 /* Adjust and set the new rwnd value */
4348 #ifdef SCTP_LOG_RWND
4349 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4350 asoc->peers_rwnd, asoc->total_flight, (asoc->sent_queue_cnt * sctp_peer_chunk_oh), a_rwnd);
4351 #endif
4353 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
4354 (u_int32_t)(asoc->total_flight + (asoc->sent_queue_cnt * sctp_peer_chunk_oh)));
4355 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4356 /* SWS sender side engages */
4357 asoc->peers_rwnd = 0;
4360 * Now we must setup so we have a timer up for anyone with
4361 * outstanding data.
4363 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4364 struct sctp_tmit_chunk *chk;
4365 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
4366 if (chk->whoTo == net &&
4367 (chk->sent < SCTP_DATAGRAM_ACKED ||
4368 chk->sent == SCTP_FORWARD_TSN_SKIP)) {
4370 * Not ack'ed and still outstanding to this
4371 * destination or marked and must be
4372 * sacked after fwd-tsn sent.
4374 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4375 stcb->sctp_ep, stcb, net);
4376 break;
4382 void
4383 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp,
4384 struct sctp_nets *netp, int *abort_flag)
4386 /* Mutate a shutdown into a SACK */
4387 struct sctp_sack_chunk sack;
4389 /* Copy cum-ack */
4390 sack.sack.cum_tsn_ack = cp->cumulative_tsn_ack;
4391 /* Arrange so a_rwnd does NOT change */
4392 sack.ch.chunk_type = SCTP_SELECTIVE_ACK;
4393 sack.ch.chunk_flags = 0;
4394 sack.ch.chunk_length = ntohs(sizeof(struct sctp_sack_chunk));
4395 sack.sack.a_rwnd =
4396 htonl(stcb->asoc.peers_rwnd + stcb->asoc.total_flight);
4398 * no gaps in this one. This may cause a temporal view to reneging,
4399 * but hopefully the second chunk is a true SACK in the packet and
4400 * will correct this view. One will come soon after no matter what
4401 * to fix this.
4403 sack.sack.num_gap_ack_blks = 0;
4404 sack.sack.num_dup_tsns = 0;
4405 /* Now call the SACK processor */
4406 sctp_handle_sack(&sack, stcb, netp, abort_flag);
4409 static void
4410 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
4411 struct sctp_stream_in *strmin)
4413 struct sctp_tmit_chunk *chk, *nchk;
4414 struct sctp_association *asoc;
4415 int tt;
4417 asoc = &stcb->asoc;
4418 tt = strmin->last_sequence_delivered;
4420 * First deliver anything prior to and including the stream no that
4421 * came in
4423 chk = TAILQ_FIRST(&strmin->inqueue);
4424 while (chk) {
4425 nchk = TAILQ_NEXT(chk, sctp_next);
4426 if (compare_with_wrap(tt, chk->rec.data.stream_seq, MAX_SEQ) ||
4427 (tt == chk->rec.data.stream_seq)) {
4428 /* this is deliverable now */
4429 TAILQ_REMOVE(&strmin->inqueue, chk, sctp_next);
4430 /* subtract pending on streams */
4431 asoc->size_on_all_streams -= chk->send_size;
4432 asoc->cnt_on_all_streams--;
4433 /* deliver it to at least the delivery-q */
4434 sctp_deliver_data(stcb, &stcb->asoc, chk, 0);
4435 } else {
4436 /* no more delivery now. */
4437 break;
4439 chk = nchk;
4442 * now we must deliver things in queue the normal way if any
4443 * are now ready.
4445 tt = strmin->last_sequence_delivered + 1;
4446 chk = TAILQ_FIRST(&strmin->inqueue);
4447 while (chk) {
4448 nchk = TAILQ_NEXT(chk, sctp_next);
4449 if (tt == chk->rec.data.stream_seq) {
4450 /* this is deliverable now */
4451 TAILQ_REMOVE(&strmin->inqueue, chk, sctp_next);
4452 /* subtract pending on streams */
4453 asoc->size_on_all_streams -= chk->send_size;
4454 asoc->cnt_on_all_streams--;
4455 /* deliver it to at least the delivery-q */
4456 strmin->last_sequence_delivered =
4457 chk->rec.data.stream_seq;
4458 sctp_deliver_data(stcb, &stcb->asoc, chk, 0);
4459 tt = strmin->last_sequence_delivered + 1;
4460 } else {
4461 break;
4463 chk = nchk;
4468 void
4469 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
4470 struct sctp_forward_tsn_chunk *fwd, int *abort_flag)
4473 * ISSUES that MUST be fixed for ECN! When we are the
4474 * sender of the forward TSN, when the SACK comes back
4475 * that acknowledges the FWD-TSN we must reset the
4476 * NONCE sum to match correctly. This will get quite
4477 * tricky since we may have sent more data interveneing and
4478 * must carefully account for what the SACK says on the
4479 * nonce and any gaps that are reported. This work
4480 * will NOT be done here, but I note it here since
4481 * it is really related to PR-SCTP and FWD-TSN's
4484 /* The pr-sctp fwd tsn */
4486 * here we will perform all the data receiver side steps for
4487 * processing FwdTSN, as required in by pr-sctp draft:
4489 * Assume we get FwdTSN(x):
4491 * 1) update local cumTSN to x
4492 * 2) try to further advance cumTSN to x + others we have
4493 * 3) examine and update re-ordering queue on pr-in-streams
4494 * 4) clean up re-assembly queue
4495 * 5) Send a sack to report where we are.
4497 struct sctp_strseq *stseq;
4498 struct sctp_association *asoc;
4499 u_int32_t new_cum_tsn, gap, back_out_htsn;
4500 unsigned int i, cnt_gone, fwd_sz, cumack_set_flag, m_size;
4501 struct sctp_stream_in *strm;
4502 struct sctp_tmit_chunk *chk, *at;
4504 cumack_set_flag = 0;
4505 asoc = &stcb->asoc;
4506 cnt_gone = 0;
4507 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
4508 #ifdef SCTP_DEBUG
4509 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
4510 kprintf("Bad size too small/big fwd-tsn\n");
4512 #endif
4513 return;
4515 m_size = (stcb->asoc.mapping_array_size << 3);
4516 /*************************************************************/
4517 /* 1. Here we update local cumTSN and shift the bitmap array */
4518 /*************************************************************/
4519 new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
4521 if (compare_with_wrap(asoc->cumulative_tsn, new_cum_tsn, MAX_TSN) ||
4522 asoc->cumulative_tsn == new_cum_tsn) {
4523 /* Already got there ... */
4524 return;
4527 back_out_htsn = asoc->highest_tsn_inside_map;
4528 if (compare_with_wrap(new_cum_tsn, asoc->highest_tsn_inside_map,
4529 MAX_TSN)) {
4530 asoc->highest_tsn_inside_map = new_cum_tsn;
4531 #ifdef SCTP_MAP_LOGGING
4532 sctp_log_map(0, 0, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
4533 #endif
4536 * now we know the new TSN is more advanced, let's find the
4537 * actual gap
4539 if ((compare_with_wrap(new_cum_tsn, asoc->mapping_array_base_tsn,
4540 MAX_TSN)) ||
4541 (new_cum_tsn == asoc->mapping_array_base_tsn)) {
4542 gap = new_cum_tsn - asoc->mapping_array_base_tsn;
4543 } else {
4544 /* try to prevent underflow here */
4545 gap = new_cum_tsn + (MAX_TSN - asoc->mapping_array_base_tsn) + 1;
4548 if (gap > m_size || gap < 0) {
4549 asoc->highest_tsn_inside_map = back_out_htsn;
4550 if ((long)gap > sctp_sbspace(&stcb->sctp_socket->so_rcv)) {
4552 * out of range (of single byte chunks in the rwnd I
4553 * give out)
4554 * too questionable. better to drop it silently
4556 return;
4558 if (asoc->highest_tsn_inside_map >
4559 asoc->mapping_array_base_tsn) {
4560 gap = asoc->highest_tsn_inside_map -
4561 asoc->mapping_array_base_tsn;
4562 } else {
4563 gap = asoc->highest_tsn_inside_map +
4564 (MAX_TSN - asoc->mapping_array_base_tsn) + 1;
4566 cumack_set_flag = 1;
4568 for (i = 0; i <= gap; i++) {
4569 SCTP_SET_TSN_PRESENT(asoc->mapping_array, i);
4572 * Now after marking all, slide thing forward but no
4573 * sack please.
4575 sctp_sack_check(stcb, 0, 0, abort_flag);
4576 if (*abort_flag)
4577 return;
4579 if (cumack_set_flag) {
4581 * fwd-tsn went outside my gap array - not a
4582 * common occurance. Do the same thing we
4583 * do when a cookie-echo arrives.
4585 asoc->highest_tsn_inside_map = new_cum_tsn - 1;
4586 asoc->mapping_array_base_tsn = new_cum_tsn;
4587 asoc->cumulative_tsn = asoc->highest_tsn_inside_map;
4588 #ifdef SCTP_MAP_LOGGING
4589 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
4590 #endif
4591 asoc->last_echo_tsn = asoc->highest_tsn_inside_map;
4593 /*************************************************************/
4594 /* 2. Clear up re-assembly queue */
4595 /*************************************************************/
4598 * First service it if pd-api is up, just in case we can
4599 * progress it forward
4601 if (asoc->fragmented_delivery_inprogress) {
4602 sctp_service_reassembly(stcb, asoc, 0);
4604 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
4605 /* For each one on here see if we need to toss it */
4607 * For now large messages held on the reasmqueue that are
4608 * complete will be tossed too. We could in theory do more
4609 * work to spin through and stop after dumping one msg
4610 * aka seeing the start of a new msg at the head, and call
4611 * the delivery function... to see if it can be delivered...
4612 * But for now we just dump everything on the queue.
4614 chk = TAILQ_FIRST(&asoc->reasmqueue);
4615 while (chk) {
4616 at = TAILQ_NEXT(chk, sctp_next);
4617 if (compare_with_wrap(asoc->cumulative_tsn,
4618 chk->rec.data.TSN_seq, MAX_TSN) ||
4619 asoc->cumulative_tsn == chk->rec.data.TSN_seq) {
4620 /* It needs to be tossed */
4621 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
4622 if (compare_with_wrap(chk->rec.data.TSN_seq,
4623 asoc->tsn_last_delivered, MAX_TSN)) {
4624 asoc->tsn_last_delivered =
4625 chk->rec.data.TSN_seq;
4626 asoc->str_of_pdapi =
4627 chk->rec.data.stream_number;
4628 asoc->ssn_of_pdapi =
4629 chk->rec.data.stream_seq;
4630 asoc->fragment_flags =
4631 chk->rec.data.rcv_flags;
4633 asoc->size_on_reasm_queue -= chk->send_size;
4634 asoc->cnt_on_reasm_queue--;
4635 cnt_gone++;
4637 /* Clear up any stream problem */
4638 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
4639 SCTP_DATA_UNORDERED &&
4640 (compare_with_wrap(chk->rec.data.stream_seq,
4641 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered,
4642 MAX_SEQ))) {
4644 * We must dump forward this streams
4645 * sequence number if the chunk is not
4646 * unordered that is being skipped.
4647 * There is a chance that if the peer
4648 * does not include the last fragment
4649 * in its FWD-TSN we WILL have a problem
4650 * here since you would have a partial
4651 * chunk in queue that may not be
4652 * deliverable.
4653 * Also if a Partial delivery API as
4654 * started the user may get a partial
4655 * chunk. The next read returning a new
4656 * chunk... really ugly but I see no way
4657 * around it! Maybe a notify??
4659 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered =
4660 chk->rec.data.stream_seq;
4662 if (chk->data) {
4663 sctp_m_freem(chk->data);
4664 chk->data = NULL;
4666 sctp_free_remote_addr(chk->whoTo);
4667 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
4668 sctppcbinfo.ipi_count_chunk--;
4669 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
4670 panic("Chunk count is negative");
4672 sctppcbinfo.ipi_gencnt_chunk++;
4673 } else {
4675 * Ok we have gone beyond the end of the
4676 * fwd-tsn's mark. Some checks...
4678 if ((asoc->fragmented_delivery_inprogress) &&
4679 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
4680 /* Special case PD-API is up and what we fwd-tsn'
4681 * over includes one that had the LAST_FRAG. We
4682 * no longer need to do the PD-API.
4684 asoc->fragmented_delivery_inprogress = 0;
4685 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
4686 stcb, SCTP_PARTIAL_DELIVERY_ABORTED, (void *)NULL);
4689 break;
4691 chk = at;
4694 if (asoc->fragmented_delivery_inprogress) {
4696 * Ok we removed cnt_gone chunks in the PD-API queue that
4697 * were being delivered. So now we must turn off the
4698 * flag.
4700 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
4701 stcb, SCTP_PARTIAL_DELIVERY_ABORTED, (void *)NULL);
4702 asoc->fragmented_delivery_inprogress = 0;
4704 /*************************************************************/
4705 /* 3. Update the PR-stream re-ordering queues */
4706 /*************************************************************/
4707 stseq = (struct sctp_strseq *)((caddr_t)fwd + sizeof(*fwd));
4708 fwd_sz -= sizeof(*fwd);
4710 /* New method. */
4711 int num_str, i;
4712 num_str = fwd_sz/sizeof(struct sctp_strseq);
4713 #ifdef SCTP_DEBUG
4714 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
4715 kprintf("Using NEW method, %d strseq's reported in FWD-TSN\n",
4716 num_str);
4718 #endif
4719 for (i = 0; i < num_str; i++) {
4720 u_int16_t st;
4721 unsigned char *xx;
4722 /* Convert */
4723 xx = (unsigned char *)&stseq[i];
4724 st = ntohs(stseq[i].stream);
4725 stseq[i].stream = st;
4726 st = ntohs(stseq[i].sequence);
4727 stseq[i].sequence = st;
4728 /* now process */
4729 if (stseq[i].stream > asoc->streamincnt) {
4730 #ifdef SCTP_DEBUG
4731 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
4732 kprintf("Bogus stream number %d "
4733 "streamincnt is %d\n",
4734 stseq[i].stream, asoc->streamincnt);
4736 #endif
4738 * It is arguable if we should continue. Since
4739 * the peer sent bogus stream info we may be in
4740 * deep trouble..
4741 * a return may be a better choice?
4743 continue;
4745 strm = &asoc->strmin[stseq[i].stream];
4746 if (compare_with_wrap(stseq[i].sequence,
4747 strm->last_sequence_delivered, MAX_SEQ)) {
4748 /* Update the sequence number */
4749 strm->last_sequence_delivered =
4750 stseq[i].sequence;
4752 /* now kick the stream the new way */
4753 sctp_kick_prsctp_reorder_queue(stcb, strm);