2 * Copyright (c) 2003, 2004 Jeffrey M. Hsu. All rights reserved.
3 * Copyright (c) 2003, 2004 The DragonFly Project. All rights reserved.
5 * This code is derived from software contributed to The DragonFly Project
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of The DragonFly Project nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific, prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * $DragonFly: src/sys/netinet/tcp_sack.c,v 1.8 2008/08/15 21:37:16 nth Exp $
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/kernel.h>
39 #include <sys/malloc.h>
40 #include <sys/queue.h>
41 #include <sys/thread.h>
42 #include <sys/types.h>
43 #include <sys/socket.h>
44 #include <sys/socketvar.h>
45 #include <sys/resourcevar.h>
49 #include <netinet/in.h>
50 #include <netinet/in_systm.h>
51 #include <netinet/ip.h>
52 #include <netinet/in_var.h>
53 #include <netinet/in_pcb.h>
54 #include <netinet/ip_var.h>
55 #include <netinet/tcp.h>
56 #include <netinet/tcp_seq.h>
57 #include <netinet/tcp_var.h>
71 TAILQ_ENTRY(sackblock
) sblk_list
;
74 #define MAXSAVEDBLOCKS 8 /* per connection limit */
76 static int insert_block(struct scoreboard
*scb
,
77 const struct raw_sackblock
*raw_sb
, boolean_t
*update
);
79 static MALLOC_DEFINE(M_SACKBLOCK
, "sblk", "sackblock struct");
82 * Per-tcpcb initialization.
85 tcp_sack_tcpcb_init(struct tcpcb
*tp
)
87 struct scoreboard
*scb
= &tp
->scb
;
90 TAILQ_INIT(&scb
->sackblocks
);
91 scb
->lastfound
= NULL
;
95 * Find the SACK block containing or immediately preceding "seq".
96 * The boolean result indicates whether the sequence is actually
97 * contained in the SACK block.
100 sack_block_lookup(struct scoreboard
*scb
, tcp_seq seq
, struct sackblock
**sb
)
102 static struct krate sackkrate
= { .freq
= 1 };
103 struct sackblock
*hint
= scb
->lastfound
;
104 struct sackblock
*cur
, *last
, *prev
;
106 if (TAILQ_EMPTY(&scb
->sackblocks
)) {
112 /* No hint. Search from start to end. */
113 cur
= TAILQ_FIRST(&scb
->sackblocks
);
115 prev
= TAILQ_LAST(&scb
->sackblocks
, sackblock_list
);
117 if (SEQ_GEQ(seq
, hint
->sblk_start
)) {
118 /* Search from hint to end of list. */
121 prev
= TAILQ_LAST(&scb
->sackblocks
, sackblock_list
);
123 /* Search from front of list to hint. */
124 cur
= TAILQ_FIRST(&scb
->sackblocks
);
126 prev
= TAILQ_PREV(hint
, sackblock_list
, sblk_list
);
132 * Ensure we can't crash if the list really blows up due to
133 * delta sign wraps when comparing seq against sblk_start vs
137 krateprintf(&sackkrate
,
138 "tcp_sack: fatal corrupt seq\n");
146 if (SEQ_GT(cur
->sblk_end
, seq
)) {
147 if (SEQ_GEQ(seq
, cur
->sblk_start
)) {
148 *sb
= scb
->lastfound
= cur
;
151 *sb
= scb
->lastfound
=
152 TAILQ_PREV(cur
, sackblock_list
, sblk_list
);
158 * seq is greater than sblk_end, nominally proceed to the
161 * It is possible for an overflow to cause the comparison
162 * between seq an sblk_start vs sblk_end to make it appear
163 * that seq is less than sblk_start and also greater than
164 * sblk_end. If we allow the case to fall through we can
165 * end up with cur == NULL on the next loop.
167 if (SEQ_LT(seq
, cur
->sblk_start
)) {
168 krateprintf(&sackkrate
,
169 "tcp_sack: corrupt seq "
170 "0x%08x vs 0x%08x-0x%08x\n",
171 seq
, cur
->sblk_start
, cur
->sblk_end
);
172 if (SEQ_GEQ(seq
, cur
->sblk_start
)) {
173 *sb
= scb
->lastfound
= cur
;
176 *sb
= scb
->lastfound
=
177 TAILQ_PREV(cur
, sackblock_list
, sblk_list
);
181 cur
= TAILQ_NEXT(cur
, sblk_list
);
182 } while (cur
!= last
);
184 *sb
= scb
->lastfound
= prev
;
189 * Allocate a SACK block.
191 static __inline
struct sackblock
*
192 alloc_sackblock(struct scoreboard
*scb
, const struct raw_sackblock
*raw_sb
)
194 struct sackblock
*sb
;
196 if (scb
->freecache
!= NULL
) {
198 scb
->freecache
= NULL
;
199 tcpstat
.tcps_sacksbfast
++;
201 sb
= kmalloc(sizeof(struct sackblock
), M_SACKBLOCK
, M_NOWAIT
);
203 tcpstat
.tcps_sacksbfailed
++;
207 sb
->sblk_start
= raw_sb
->rblk_start
;
208 sb
->sblk_end
= raw_sb
->rblk_end
;
212 static __inline
struct sackblock
*
213 alloc_sackblock_limit(struct scoreboard
*scb
,
214 const struct raw_sackblock
*raw_sb
)
216 if (scb
->nblocks
== MAXSAVEDBLOCKS
) {
218 * Should try to kick out older blocks XXX JH
219 * May be able to coalesce with existing block.
220 * Or, go other way and free all blocks if we hit
223 tcpstat
.tcps_sacksboverflow
++;
226 return alloc_sackblock(scb
, raw_sb
);
233 free_sackblock(struct scoreboard
*scb
, struct sackblock
*s
)
235 if (scb
->freecache
== NULL
) {
236 /* YYY Maybe use the latest freed block? */
240 kfree(s
, M_SACKBLOCK
);
244 * Free up SACK blocks for data that's been acked.
247 tcp_sack_ack_blocks(struct tcpcb
*tp
, tcp_seq th_ack
)
249 struct scoreboard
*scb
= &tp
->scb
;
250 struct sackblock
*sb
, *nb
;
252 sb
= TAILQ_FIRST(&scb
->sackblocks
);
253 while (sb
&& SEQ_LEQ(sb
->sblk_end
, th_ack
)) {
254 nb
= TAILQ_NEXT(sb
, sblk_list
);
255 if (scb
->lastfound
== sb
)
256 scb
->lastfound
= NULL
;
257 TAILQ_REMOVE(&scb
->sackblocks
, sb
, sblk_list
);
258 free_sackblock(scb
, sb
);
260 KASSERT(scb
->nblocks
>= 0,
261 ("SACK block count underflow: %d < 0", scb
->nblocks
));
264 if (sb
&& SEQ_GEQ(th_ack
, sb
->sblk_start
)) {
265 /* Other side reneged? XXX */
266 tcpstat
.tcps_sackrenege
++;
267 tcp_sack_discard(tp
);
272 * Delete and free SACK blocks saved in scoreboard.
275 tcp_sack_cleanup(struct scoreboard
*scb
)
277 struct sackblock
*sb
, *nb
;
279 TAILQ_FOREACH_MUTABLE(sb
, &scb
->sackblocks
, sblk_list
, nb
) {
280 free_sackblock(scb
, sb
);
283 KASSERT(scb
->nblocks
== 0,
284 ("SACK block %d count not zero", scb
->nblocks
));
285 TAILQ_INIT(&scb
->sackblocks
);
286 scb
->lastfound
= NULL
;
290 * Discard SACK scoreboard, HighRxt, RescueRxt and LostSeq.
293 tcp_sack_discard(struct tcpcb
*tp
)
295 tcp_sack_cleanup(&tp
->scb
);
296 tp
->rexmt_high
= tp
->snd_una
;
297 tp
->sack_flags
&= ~TSACK_F_SACKRESCUED
;
298 tp
->scb
.lostseq
= tp
->snd_una
;
302 * Delete and free SACK blocks saved in scoreboard.
303 * Delete the one slot block cache.
306 tcp_sack_destroy(struct scoreboard
*scb
)
308 tcp_sack_cleanup(scb
);
309 if (scb
->freecache
!= NULL
) {
310 kfree(scb
->freecache
, M_SACKBLOCK
);
311 scb
->freecache
= NULL
;
316 * Cleanup the reported SACK block information
319 tcp_sack_report_cleanup(struct tcpcb
*tp
)
322 ~(TSACK_F_DUPSEG
| TSACK_F_ENCLOSESEG
| TSACK_F_SACKLEFT
);
323 tp
->reportblk
.rblk_start
= tp
->reportblk
.rblk_end
;
327 * Whether SACK report is needed or not
330 tcp_sack_report_needed(const struct tcpcb
*tp
)
332 if ((tp
->sack_flags
&
333 (TSACK_F_DUPSEG
| TSACK_F_ENCLOSESEG
| TSACK_F_SACKLEFT
)) ||
334 tp
->reportblk
.rblk_start
!= tp
->reportblk
.rblk_end
)
341 * Returns 0 if not D-SACK block,
343 * 2 if duplicate of out-of-order D-SACK block.
346 tcp_sack_ndsack_blocks(const struct raw_sackblock
*blocks
, const int numblocks
,
352 if (SEQ_LT(blocks
[0].rblk_start
, snd_una
))
355 /* block 0 inside block 1 */
357 SEQ_GEQ(blocks
[0].rblk_start
, blocks
[1].rblk_start
) &&
358 SEQ_LEQ(blocks
[0].rblk_end
, blocks
[1].rblk_end
))
365 * Update scoreboard on new incoming ACK.
368 tcp_sack_add_blocks(struct tcpcb
*tp
, struct tcpopt
*to
)
370 const int numblocks
= to
->to_nsackblocks
;
371 struct raw_sackblock
*blocks
= to
->to_sackblocks
;
372 struct scoreboard
*scb
= &tp
->scb
;
375 if (tcp_sack_ndsack_blocks(blocks
, numblocks
, tp
->snd_una
) > 0)
380 to
->to_flags
|= TOF_SACK_REDUNDANT
;
381 for (i
= startblock
; i
< numblocks
; i
++) {
382 struct raw_sackblock
*newsackblock
= &blocks
[i
];
386 /* Guard against ACK reordering */
387 if (SEQ_LEQ(newsackblock
->rblk_start
, tp
->snd_una
))
390 /* Don't accept bad SACK blocks */
391 if (SEQ_GT(newsackblock
->rblk_end
, tp
->snd_max
)) {
392 tcpstat
.tcps_rcvbadsackopt
++;
393 break; /* skip all other blocks */
395 tcpstat
.tcps_sacksbupdate
++;
397 error
= insert_block(scb
, newsackblock
, &update
);
399 to
->to_flags
&= ~TOF_SACK_REDUNDANT
;
406 tcp_sack_update_scoreboard(struct tcpcb
*tp
, struct tcpopt
*to
)
408 struct scoreboard
*scb
= &tp
->scb
;
409 int rexmt_high_update
= 0;
411 tcp_sack_ack_blocks(tp
, tp
->snd_una
);
412 tcp_sack_add_blocks(tp
, to
);
413 tcp_sack_update_lostseq(scb
, tp
->snd_una
, tp
->t_maxseg
,
415 if (SEQ_LT(tp
->rexmt_high
, tp
->snd_una
)) {
416 tp
->rexmt_high
= tp
->snd_una
;
417 rexmt_high_update
= 1;
419 if (tp
->sack_flags
& TSACK_F_SACKRESCUED
) {
420 if (SEQ_LEQ(tp
->rexmt_rescue
, tp
->snd_una
)) {
421 tp
->sack_flags
&= ~TSACK_F_SACKRESCUED
;
422 } else if (tcp_aggressive_rescuesack
&& rexmt_high_update
&&
423 SEQ_LT(tp
->rexmt_rescue
, tp
->rexmt_high
)) {
424 /* Drag RescueRxt along with HighRxt */
425 tp
->rexmt_rescue
= tp
->rexmt_high
;
431 * Insert SACK block into sender's scoreboard.
434 insert_block(struct scoreboard
*scb
, const struct raw_sackblock
*raw_sb
,
437 struct sackblock
*sb
, *workingblock
;
438 boolean_t overlap_front
;
441 if (TAILQ_EMPTY(&scb
->sackblocks
)) {
442 struct sackblock
*newblock
;
444 KASSERT(scb
->nblocks
== 0, ("emply scb w/ blocks"));
446 newblock
= alloc_sackblock(scb
, raw_sb
);
447 if (newblock
== NULL
)
449 TAILQ_INSERT_HEAD(&scb
->sackblocks
, newblock
, sblk_list
);
454 KASSERT(scb
->nblocks
> 0, ("insert_block() called w/ no blocks"));
455 KASSERT(scb
->nblocks
<= MAXSAVEDBLOCKS
,
456 ("too many SACK blocks %d", scb
->nblocks
));
458 overlap_front
= sack_block_lookup(scb
, raw_sb
->rblk_start
, &sb
);
461 workingblock
= alloc_sackblock_limit(scb
, raw_sb
);
462 if (workingblock
== NULL
)
464 TAILQ_INSERT_HEAD(&scb
->sackblocks
, workingblock
, sblk_list
);
467 if (overlap_front
|| sb
->sblk_end
== raw_sb
->rblk_start
) {
468 tcpstat
.tcps_sacksbreused
++;
470 /* Extend old block */
472 if (SEQ_GT(raw_sb
->rblk_end
, sb
->sblk_end
)) {
473 sb
->sblk_end
= raw_sb
->rblk_end
;
475 /* Exact match, nothing to consolidate */
480 workingblock
= alloc_sackblock_limit(scb
, raw_sb
);
481 if (workingblock
== NULL
)
483 TAILQ_INSERT_AFTER(&scb
->sackblocks
, sb
, workingblock
,
489 /* Consolidate right-hand side. */
490 sb
= TAILQ_NEXT(workingblock
, sblk_list
);
492 SEQ_GEQ(workingblock
->sblk_end
, sb
->sblk_end
)) {
493 struct sackblock
*nextblock
;
495 nextblock
= TAILQ_NEXT(sb
, sblk_list
);
496 if (scb
->lastfound
== sb
)
497 scb
->lastfound
= NULL
;
498 /* Remove completely overlapped block */
499 TAILQ_REMOVE(&scb
->sackblocks
, sb
, sblk_list
);
500 free_sackblock(scb
, sb
);
502 KASSERT(scb
->nblocks
> 0,
503 ("removed overlapped block: %d blocks left", scb
->nblocks
));
507 SEQ_GEQ(workingblock
->sblk_end
, sb
->sblk_start
)) {
508 /* Extend new block to cover partially overlapped old block. */
509 workingblock
->sblk_end
= sb
->sblk_end
;
510 if (scb
->lastfound
== sb
)
511 scb
->lastfound
= NULL
;
512 TAILQ_REMOVE(&scb
->sackblocks
, sb
, sblk_list
);
513 free_sackblock(scb
, sb
);
515 KASSERT(scb
->nblocks
> 0,
516 ("removed partial right: %d blocks left", scb
->nblocks
));
521 #ifdef DEBUG_SACK_BLOCKS
523 tcp_sack_dump_blocks(const struct scoreboard
*scb
)
525 const struct sackblock
*sb
;
527 kprintf("%d blocks:", scb
->nblocks
);
528 TAILQ_FOREACH(sb
, &scb
->sackblocks
, sblk_list
)
529 kprintf(" [%u, %u)", sb
->sblk_start
, sb
->sblk_end
);
534 tcp_sack_dump_blocks(const struct scoreboard
*scb
)
540 * Optimization to quickly determine which packets are lost.
543 tcp_sack_update_lostseq(struct scoreboard
*scb
, tcp_seq snd_una
, u_int maxseg
,
546 struct sackblock
*sb
;
548 int bytes_sacked
= 0;
552 rxtthresh_bytes
= (rxtthresh
- 1) * maxseg
;
554 rxtthresh_bytes
= rxtthresh
* maxseg
;
556 sb
= TAILQ_LAST(&scb
->sackblocks
, sackblock_list
);
559 bytes_sacked
+= sb
->sblk_end
- sb
->sblk_start
;
560 if (nsackblocks
== rxtthresh
||
561 bytes_sacked
>= rxtthresh_bytes
) {
562 scb
->lostseq
= sb
->sblk_start
;
565 sb
= TAILQ_PREV(sb
, sackblock_list
, sblk_list
);
567 scb
->lostseq
= snd_una
;
571 * Return whether the given sequence number is considered lost.
574 tcp_sack_islost(const struct scoreboard
*scb
, tcp_seq seqnum
)
576 return SEQ_LT(seqnum
, scb
->lostseq
);
580 * True if at least "amount" has been SACKed. Used by Early Retransmit.
583 tcp_sack_has_sacked(const struct scoreboard
*scb
, u_int amount
)
585 const struct sackblock
*sb
;
586 int bytes_sacked
= 0;
588 TAILQ_FOREACH(sb
, &scb
->sackblocks
, sblk_list
) {
589 bytes_sacked
+= sb
->sblk_end
- sb
->sblk_start
;
590 if (bytes_sacked
>= amount
)
597 * Number of bytes SACKed below seq.
600 tcp_sack_bytes_below(const struct scoreboard
*scb
, tcp_seq seq
)
602 const struct sackblock
*sb
;
603 int bytes_sacked
= 0;
605 sb
= TAILQ_FIRST(&scb
->sackblocks
);
606 while (sb
&& SEQ_GT(seq
, sb
->sblk_start
)) {
607 bytes_sacked
+= seq_min(seq
, sb
->sblk_end
) - sb
->sblk_start
;
608 sb
= TAILQ_NEXT(sb
, sblk_list
);
614 * Return estimate of the number of bytes outstanding in the network.
617 tcp_sack_compute_pipe(const struct tcpcb
*tp
)
619 const struct scoreboard
*scb
= &tp
->scb
;
620 const struct sackblock
*sb
;
621 int nlost
, nretransmitted
;
624 nlost
= tp
->snd_max
- scb
->lostseq
;
625 nretransmitted
= tp
->rexmt_high
- tp
->snd_una
;
627 TAILQ_FOREACH(sb
, &scb
->sackblocks
, sblk_list
) {
628 if (SEQ_LT(sb
->sblk_start
, tp
->rexmt_high
)) {
629 end
= seq_min(sb
->sblk_end
, tp
->rexmt_high
);
630 nretransmitted
-= end
- sb
->sblk_start
;
632 if (SEQ_GEQ(sb
->sblk_start
, scb
->lostseq
))
633 nlost
-= sb
->sblk_end
- sb
->sblk_start
;
636 return (nlost
+ nretransmitted
);
640 * Return the sequence number and length of the next segment to transmit
641 * when in Fast Recovery.
644 tcp_sack_nextseg(struct tcpcb
*tp
, tcp_seq
*nextrexmt
, uint32_t *plen
,
647 struct scoreboard
*scb
= &tp
->scb
;
648 struct socket
*so
= tp
->t_inpcb
->inp_socket
;
649 struct sackblock
*sb
;
650 const struct sackblock
*lastblock
=
651 TAILQ_LAST(&scb
->sackblocks
, sackblock_list
);
653 long len
, off
, sendwin
;
655 /* skip SACKed data */
656 tcp_sack_skip_sacked(scb
, &tp
->rexmt_high
);
658 /* Look for lost data. */
659 torexmt
= tp
->rexmt_high
;
661 if (lastblock
!= NULL
) {
662 if (SEQ_LT(torexmt
, lastblock
->sblk_end
) &&
663 tcp_sack_islost(scb
, torexmt
)) {
665 *nextrexmt
= torexmt
;
666 /* If the left-hand edge has been SACKed, pull it in. */
667 if (sack_block_lookup(scb
, torexmt
+ tp
->t_maxseg
, &sb
))
668 *plen
= sb
->sblk_start
- torexmt
;
670 *plen
= tp
->t_maxseg
;
675 /* See if unsent data available within send window. */
676 off
= tp
->snd_max
- tp
->snd_una
;
677 sendwin
= min(tp
->snd_wnd
, tp
->snd_bwnd
);
678 len
= (long) ulmin(so
->so_snd
.ssb_cc
, sendwin
) - off
;
680 *nextrexmt
= tp
->snd_max
; /* Send new data. */
681 *plen
= tp
->t_maxseg
;
685 /* We're less certain this data has been lost. */
686 if (lastblock
!= NULL
&& SEQ_LT(torexmt
, lastblock
->sblk_end
))
689 /* Rescue retransmission */
690 if (tcp_do_rescuesack
|| tcp_do_rfc6675
) {
691 tcpstat
.tcps_sackrescue_try
++;
692 if (tp
->sack_flags
& TSACK_F_SACKRESCUED
) {
693 if (!tcp_aggressive_rescuesack
)
697 * Aggressive variant of the rescue retransmission.
699 * The idea of the rescue retransmission is to sustain
700 * the ACK clock thus to avoid timeout retransmission.
702 * Under some situations, the conservative approach
703 * suggested in the draft
704 * http://tools.ietf.org/html/
705 * draft-nishida-tcpm-rescue-retransmission-00
706 * could not sustain ACK clock, since it only allows
707 * one rescue retransmission before a cumulative ACK
708 * covers the segement transmitted by rescue
711 * We try to locate the next unSACKed segment which
712 * follows the previously sent rescue segment. If
713 * there is no such segment, we loop back to the first
714 * unacknowledged segment.
718 * Skip SACKed data, but here we follow
719 * the last transmitted rescue segment.
721 torexmt
= tp
->rexmt_rescue
;
722 tcp_sack_skip_sacked(scb
, &torexmt
);
724 if (torexmt
== tp
->snd_max
) {
725 /* Nothing left to retransmit; restart */
726 torexmt
= tp
->snd_una
;
730 } else if (tcp_do_smartsack
&& lastblock
== NULL
) {
731 tcpstat
.tcps_sackrescue_try
++;
740 * Return the next sequence number higher than "*prexmt" that has
744 tcp_sack_skip_sacked(struct scoreboard
*scb
, tcp_seq
*prexmt
)
746 struct sackblock
*sb
;
748 /* skip SACKed data */
749 if (sack_block_lookup(scb
, *prexmt
, &sb
))
750 *prexmt
= sb
->sblk_end
;
754 * The length of the first amount of unSACKed data
757 tcp_sack_first_unsacked_len(const struct tcpcb
*tp
)
759 const struct sackblock
*sb
;
761 sb
= TAILQ_FIRST(&tp
->scb
.sackblocks
);
765 KASSERT(SEQ_LT(tp
->snd_una
, sb
->sblk_start
),
766 ("invalid sb start %u, snd_una %u",
767 sb
->sblk_start
, tp
->snd_una
));
768 return (sb
->sblk_start
- tp
->snd_una
);
773 tcp_sack_save_scoreboard(struct scoreboard
*scb
)
775 struct scoreboard
*scb
= &tp
->scb
;
777 scb
->sackblocks_prev
= scb
->sackblocks
;
778 TAILQ_INIT(&scb
->sackblocks
);
782 tcp_sack_revert_scoreboard(struct scoreboard
*scb
, tcp_seq snd_una
,
785 struct sackblock
*sb
;
787 scb
->sackblocks
= scb
->sackblocks_prev
;
789 TAILQ_FOREACH(sb
, &scb
->sackblocks
, sblk_list
)
791 tcp_sack_ack_blocks(scb
, snd_una
);
792 scb
->lastfound
= NULL
;
796 #ifdef DEBUG_SACK_HISTORY
798 tcp_sack_dump_history(const char *msg
, const struct tcpcb
*tp
)
803 /* only need a couple of these to debug most problems */
807 kprintf("%s:\tnsackhistory %d: ", msg
, tp
->nsackhistory
);
808 for (i
= 0; i
< tp
->nsackhistory
; ++i
)
809 kprintf("[%u, %u) ", tp
->sackhistory
[i
].rblk_start
,
810 tp
->sackhistory
[i
].rblk_end
);
815 tcp_sack_dump_history(const char *msg
, const struct tcpcb
*tp
)
821 * Remove old SACK blocks from the SACK history that have already been ACKed.
824 tcp_sack_ack_history(struct tcpcb
*tp
)
826 int i
, nblocks
, openslot
;
828 tcp_sack_dump_history("before tcp_sack_ack_history", tp
);
829 nblocks
= tp
->nsackhistory
;
830 for (i
= openslot
= 0; i
< nblocks
; ++i
) {
831 if (SEQ_LEQ(tp
->sackhistory
[i
].rblk_end
, tp
->rcv_nxt
)) {
835 if (SEQ_LT(tp
->sackhistory
[i
].rblk_start
, tp
->rcv_nxt
))
836 tp
->sackhistory
[i
].rblk_start
= tp
->rcv_nxt
;
840 tp
->sackhistory
[openslot
++] = tp
->sackhistory
[i
];
842 tcp_sack_dump_history("after tcp_sack_ack_history", tp
);
843 KASSERT(openslot
== tp
->nsackhistory
,
844 ("tcp_sack_ack_history miscounted: %d != %d",
845 openslot
, tp
->nsackhistory
));
849 * Add or merge newblock into reported history.
850 * Also remove or update SACK blocks that will be acked.
853 tcp_sack_update_reported_history(struct tcpcb
*tp
, tcp_seq start
, tcp_seq end
)
855 struct raw_sackblock copy
[MAX_SACK_REPORT_BLOCKS
];
858 tcp_sack_dump_history("before tcp_sack_update_reported_history", tp
);
862 * 1) newblock == oldblock
863 * 2) oldblock contains newblock
864 * 3) newblock contains oldblock
865 * 4) tail of oldblock overlaps or abuts start of newblock
866 * 5) tail of newblock overlaps or abuts head of oldblock
868 for (i
= cindex
= 0; i
< tp
->nsackhistory
; ++i
) {
869 struct raw_sackblock
*oldblock
= &tp
->sackhistory
[i
];
870 tcp_seq old_start
= oldblock
->rblk_start
;
871 tcp_seq old_end
= oldblock
->rblk_end
;
873 if (SEQ_LT(end
, old_start
) || SEQ_GT(start
, old_end
)) {
874 /* Case 0: no overlap. Copy old block. */
875 copy
[cindex
++] = *oldblock
;
879 if (SEQ_GEQ(start
, old_start
) && SEQ_LEQ(end
, old_end
)) {
880 /* Cases 1 & 2. Move block to front of history. */
885 /* no need to check rest of blocks */
886 for (j
= i
+ 1; j
< tp
->nsackhistory
; ++j
)
887 copy
[cindex
++] = tp
->sackhistory
[j
];
891 if (SEQ_GEQ(old_end
, start
) && SEQ_LT(old_start
, start
)) {
892 /* Case 4: extend start of new block. */
894 } else if (SEQ_GEQ(end
, old_start
) && SEQ_GT(old_end
, end
)) {
895 /* Case 5: extend end of new block */
898 /* Case 3. Delete old block by not copying it. */
899 KASSERT(SEQ_LEQ(start
, old_start
) &&
900 SEQ_GEQ(end
, old_end
),
901 ("bad logic: old [%u, %u), new [%u, %u)",
902 old_start
, old_end
, start
, end
));
906 /* insert new block */
907 tp
->sackhistory
[0].rblk_start
= start
;
908 tp
->sackhistory
[0].rblk_end
= end
;
909 cindex
= min(cindex
, MAX_SACK_REPORT_BLOCKS
- 1);
910 for (i
= 0; i
< cindex
; ++i
)
911 tp
->sackhistory
[i
+ 1] = copy
[i
];
912 tp
->nsackhistory
= cindex
+ 1;
913 tcp_sack_dump_history("after tcp_sack_update_reported_history", tp
);
917 * Fill in SACK report to return to data sender.
920 tcp_sack_fill_report(struct tcpcb
*tp
, u_char
*opt
, u_int
*plen
)
922 u_int optlen
= *plen
;
923 uint32_t *lp
= (uint32_t *)(opt
+ optlen
);
925 tcp_seq hstart
= tp
->rcv_nxt
, hend
;
928 KASSERT(TCP_MAXOLEN
- optlen
>=
929 TCPOLEN_SACK_ALIGNED
+ TCPOLEN_SACK_BLOCK
,
930 ("no room for SACK header and one block: optlen %d", optlen
));
932 if (tp
->sack_flags
& TSACK_F_DUPSEG
)
933 tcpstat
.tcps_snddsackopt
++;
935 tcpstat
.tcps_sndsackopt
++;
938 optlen
+= TCPOLEN_SACK_ALIGNED
;
940 tcp_sack_ack_history(tp
);
941 if (tp
->reportblk
.rblk_start
!= tp
->reportblk
.rblk_end
) {
942 *lp
++ = htonl(tp
->reportblk
.rblk_start
);
943 *lp
++ = htonl(tp
->reportblk
.rblk_end
);
944 optlen
+= TCPOLEN_SACK_BLOCK
;
945 hstart
= tp
->reportblk
.rblk_start
;
946 hend
= tp
->reportblk
.rblk_end
;
947 if (tp
->sack_flags
& TSACK_F_ENCLOSESEG
) {
948 KASSERT(TCP_MAXOLEN
- optlen
>= TCPOLEN_SACK_BLOCK
,
949 ("no room for enclosing SACK block: oplen %d",
951 *lp
++ = htonl(tp
->encloseblk
.rblk_start
);
952 *lp
++ = htonl(tp
->encloseblk
.rblk_end
);
953 optlen
+= TCPOLEN_SACK_BLOCK
;
954 hstart
= tp
->encloseblk
.rblk_start
;
955 hend
= tp
->encloseblk
.rblk_end
;
957 if (SEQ_GT(hstart
, tp
->rcv_nxt
))
958 tcp_sack_update_reported_history(tp
, hstart
, hend
);
960 if (tcp_do_smartsack
&& (tp
->sack_flags
& TSACK_F_SACKLEFT
)) {
961 /* Fill in from left! Walk re-assembly queue. */
964 q
= TAILQ_FIRST(&tp
->t_segq
);
966 TCP_MAXOLEN
- optlen
>= TCPOLEN_SACK_BLOCK
) {
967 *lp
++ = htonl(q
->tqe_th
->th_seq
);
968 *lp
++ = htonl(TCP_SACK_BLKEND(
969 q
->tqe_th
->th_seq
+ q
->tqe_len
,
970 q
->tqe_th
->th_flags
));
971 optlen
+= TCPOLEN_SACK_BLOCK
;
972 q
= TAILQ_NEXT(q
, tqe_q
);
977 /* Fill in SACK blocks from right side. */
978 while (n
< tp
->nsackhistory
&&
979 TCP_MAXOLEN
- optlen
>= TCPOLEN_SACK_BLOCK
) {
980 if (tp
->sackhistory
[n
].rblk_start
!= hstart
) {
981 *lp
++ = htonl(tp
->sackhistory
[n
].rblk_start
);
982 *lp
++ = htonl(tp
->sackhistory
[n
].rblk_end
);
983 optlen
+= TCPOLEN_SACK_BLOCK
;
988 tp
->reportblk
.rblk_start
= tp
->reportblk
.rblk_end
;
990 ~(TSACK_F_DUPSEG
| TSACK_F_ENCLOSESEG
| TSACK_F_SACKLEFT
);
991 nblocks
= (lp
- olp
- 1) / 2;
992 *olp
= htonl(TCPOPT_SACK_ALIGNED
|
993 (TCPOLEN_SACK
+ nblocks
* TCPOLEN_SACK_BLOCK
));