2 * Copyright (c) 2002, 2003, 2004 Jeffrey M. Hsu. All rights reserved.
3 * Copyright (c) 2002, 2003, 2004 The DragonFly Project. All rights reserved.
5 * This code is derived from software contributed to The DragonFly Project
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of The DragonFly Project nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific, prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995
36 * The Regents of the University of California. All rights reserved.
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66 * @(#)tcp_input.c 8.12 (Berkeley) 5/24/95
67 * $FreeBSD: src/sys/netinet/tcp_input.c,v 1.107.2.38 2003/05/21 04:46:41 cjc Exp $
68 * $DragonFly: src/sys/netinet/tcp_input.c,v 1.68 2008/08/22 09:14:17 sephe Exp $
71 #include "opt_ipfw.h" /* for ipfw_fwd */
72 #include "opt_inet6.h"
73 #include "opt_ipsec.h"
74 #include "opt_tcpdebug.h"
75 #include "opt_tcp_input.h"
77 #include <sys/param.h>
78 #include <sys/systm.h>
79 #include <sys/kernel.h>
80 #include <sys/sysctl.h>
81 #include <sys/malloc.h>
83 #include <sys/proc.h> /* for proc0 declaration */
84 #include <sys/protosw.h>
85 #include <sys/socket.h>
86 #include <sys/socketvar.h>
87 #include <sys/syslog.h>
88 #include <sys/in_cksum.h>
90 #include <machine/cpu.h> /* before tcp_seq.h, for tcp_random18() */
91 #include <machine/stdarg.h>
94 #include <net/route.h>
96 #include <netinet/in.h>
97 #include <netinet/in_systm.h>
98 #include <netinet/ip.h>
99 #include <netinet/ip_icmp.h> /* for ICMP_BANDLIM */
100 #include <netinet/in_var.h>
101 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */
102 #include <netinet/in_pcb.h>
103 #include <netinet/ip_var.h>
104 #include <netinet/ip6.h>
105 #include <netinet/icmp6.h>
106 #include <netinet6/nd6.h>
107 #include <netinet6/ip6_var.h>
108 #include <netinet6/in6_pcb.h>
109 #include <netinet/tcp.h>
110 #include <netinet/tcp_fsm.h>
111 #include <netinet/tcp_seq.h>
112 #include <netinet/tcp_timer.h>
113 #include <netinet/tcp_timer2.h>
114 #include <netinet/tcp_var.h>
115 #include <netinet6/tcp6_var.h>
116 #include <netinet/tcpip.h>
119 #include <netinet/tcp_debug.h>
121 u_char tcp_saveipgen
[40]; /* the size must be of max ip header, now IPv6 */
122 struct tcphdr tcp_savetcp
;
126 #include <netproto/ipsec/ipsec.h>
127 #include <netproto/ipsec/ipsec6.h>
131 #include <netinet6/ipsec.h>
132 #include <netinet6/ipsec6.h>
133 #include <netproto/key/key.h>
136 MALLOC_DEFINE(M_TSEGQ
, "tseg_qent", "TCP segment queue entry");
138 static int log_in_vain
= 0;
139 SYSCTL_INT(_net_inet_tcp
, OID_AUTO
, log_in_vain
, CTLFLAG_RW
,
140 &log_in_vain
, 0, "Log all incoming TCP connections");
142 static int blackhole
= 0;
143 SYSCTL_INT(_net_inet_tcp
, OID_AUTO
, blackhole
, CTLFLAG_RW
,
144 &blackhole
, 0, "Do not send RST when dropping refused connections");
146 int tcp_delack_enabled
= 1;
147 SYSCTL_INT(_net_inet_tcp
, OID_AUTO
, delayed_ack
, CTLFLAG_RW
,
148 &tcp_delack_enabled
, 0,
149 "Delay ACK to try and piggyback it onto a data packet");
151 #ifdef TCP_DROP_SYNFIN
152 static int drop_synfin
= 0;
153 SYSCTL_INT(_net_inet_tcp
, OID_AUTO
, drop_synfin
, CTLFLAG_RW
,
154 &drop_synfin
, 0, "Drop TCP packets with SYN+FIN set");
157 static int tcp_do_limitedtransmit
= 1;
158 SYSCTL_INT(_net_inet_tcp
, OID_AUTO
, limitedtransmit
, CTLFLAG_RW
,
159 &tcp_do_limitedtransmit
, 0, "Enable RFC 3042 (Limited Transmit)");
161 static int tcp_do_early_retransmit
= 1;
162 SYSCTL_INT(_net_inet_tcp
, OID_AUTO
, earlyretransmit
, CTLFLAG_RW
,
163 &tcp_do_early_retransmit
, 0, "Early retransmit");
165 int tcp_aggregate_acks
= 1;
166 SYSCTL_INT(_net_inet_tcp
, OID_AUTO
, aggregate_acks
, CTLFLAG_RW
,
167 &tcp_aggregate_acks
, 0, "Aggregate built-up acks into one ack");
169 int tcp_do_rfc3390
= 1;
170 SYSCTL_INT(_net_inet_tcp
, OID_AUTO
, rfc3390
, CTLFLAG_RW
,
172 "Enable RFC 3390 (Increasing TCP's Initial Congestion Window)");
174 static int tcp_do_eifel_detect
= 1;
175 SYSCTL_INT(_net_inet_tcp
, OID_AUTO
, eifel
, CTLFLAG_RW
,
176 &tcp_do_eifel_detect
, 0, "Eifel detection algorithm (RFC 3522)");
178 static int tcp_do_abc
= 1;
179 SYSCTL_INT(_net_inet_tcp
, OID_AUTO
, abc
, CTLFLAG_RW
,
181 "TCP Appropriate Byte Counting (RFC 3465)");
184 * Define as tunable for easy testing with SACK on and off.
185 * Warning: do not change setting in the middle of an existing active TCP flow,
186 * else strange things might happen to that flow.
189 SYSCTL_INT(_net_inet_tcp
, OID_AUTO
, sack
, CTLFLAG_RW
,
190 &tcp_do_sack
, 0, "Enable SACK Algorithms");
192 int tcp_do_smartsack
= 1;
193 SYSCTL_INT(_net_inet_tcp
, OID_AUTO
, smartsack
, CTLFLAG_RW
,
194 &tcp_do_smartsack
, 0, "Enable Smart SACK Algorithms");
196 SYSCTL_NODE(_net_inet_tcp
, OID_AUTO
, reass
, CTLFLAG_RW
, 0,
197 "TCP Segment Reassembly Queue");
199 int tcp_reass_maxseg
= 0;
200 SYSCTL_INT(_net_inet_tcp_reass
, OID_AUTO
, maxsegments
, CTLFLAG_RD
,
201 &tcp_reass_maxseg
, 0,
202 "Global maximum number of TCP Segments in Reassembly Queue");
204 int tcp_reass_qsize
= 0;
205 SYSCTL_INT(_net_inet_tcp_reass
, OID_AUTO
, cursegments
, CTLFLAG_RD
,
207 "Global number of TCP Segments currently in Reassembly Queue");
209 static int tcp_reass_overflows
= 0;
210 SYSCTL_INT(_net_inet_tcp_reass
, OID_AUTO
, overflows
, CTLFLAG_RD
,
211 &tcp_reass_overflows
, 0,
212 "Global number of TCP Segment Reassembly Queue Overflows");
214 int tcp_do_autorcvbuf
= 1;
215 SYSCTL_INT(_net_inet_tcp
, OID_AUTO
, recvbuf_auto
, CTLFLAG_RW
,
216 &tcp_do_autorcvbuf
, 0, "Enable automatic receive buffer sizing");
218 int tcp_autorcvbuf_inc
= 16*1024;
219 SYSCTL_INT(_net_inet_tcp
, OID_AUTO
, recvbuf_inc
, CTLFLAG_RW
,
220 &tcp_autorcvbuf_inc
, 0,
221 "Incrementor step size of automatic receive buffer");
223 int tcp_autorcvbuf_max
= 2*1024*1024;
224 SYSCTL_INT(_net_inet_tcp
, OID_AUTO
, recvbuf_max
, CTLFLAG_RW
,
225 &tcp_autorcvbuf_max
, 0, "Max size of automatic receive buffer");
228 static void tcp_dooptions(struct tcpopt
*, u_char
*, int, boolean_t
);
229 static void tcp_pulloutofband(struct socket
*,
230 struct tcphdr
*, struct mbuf
*, int);
231 static int tcp_reass(struct tcpcb
*, struct tcphdr
*, int *,
233 static void tcp_xmit_timer(struct tcpcb
*, int);
234 static void tcp_newreno_partial_ack(struct tcpcb
*, struct tcphdr
*, int);
235 static void tcp_sack_rexmt(struct tcpcb
*, struct tcphdr
*);
237 /* Neighbor Discovery, Neighbor Unreachability Detection Upper layer hint. */
239 #define ND6_HINT(tp) \
241 if ((tp) && (tp)->t_inpcb && \
242 ((tp)->t_inpcb->inp_vflag & INP_IPV6) && \
243 (tp)->t_inpcb->in6p_route.ro_rt) \
244 nd6_nud_hint((tp)->t_inpcb->in6p_route.ro_rt, NULL, 0); \
251 * Indicate whether this ack should be delayed. We can delay the ack if
252 * - delayed acks are enabled and
253 * - there is no delayed ack timer in progress and
254 * - our last ack wasn't a 0-sized window. We never want to delay
255 * the ack that opens up a 0-sized window.
257 #define DELAY_ACK(tp) \
258 (tcp_delack_enabled && !tcp_callout_pending(tp, tp->tt_delack) && \
259 !(tp->t_flags & TF_RXWIN0SENT))
261 #define acceptable_window_update(tp, th, tiwin) \
262 (SEQ_LT(tp->snd_wl1, th->th_seq) || \
263 (tp->snd_wl1 == th->th_seq && \
264 (SEQ_LT(tp->snd_wl2, th->th_ack) || \
265 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))
268 tcp_reass(struct tcpcb
*tp
, struct tcphdr
*th
, int *tlenp
, struct mbuf
*m
)
271 struct tseg_qent
*p
= NULL
;
272 struct tseg_qent
*te
;
273 struct socket
*so
= tp
->t_inpcb
->inp_socket
;
277 * Call with th == NULL after become established to
278 * force pre-ESTABLISHED data up to user socket.
284 * Limit the number of segments in the reassembly queue to prevent
285 * holding on to too many segments (and thus running out of mbufs).
286 * Make sure to let the missing segment through which caused this
287 * queue. Always keep one global queue entry spare to be able to
288 * process the missing segment.
290 if (th
->th_seq
!= tp
->rcv_nxt
&&
291 tcp_reass_qsize
+ 1 >= tcp_reass_maxseg
) {
292 tcp_reass_overflows
++;
293 tcpstat
.tcps_rcvmemdrop
++;
295 /* no SACK block to report */
296 tp
->reportblk
.rblk_start
= tp
->reportblk
.rblk_end
;
300 /* Allocate a new queue entry. */
301 MALLOC(te
, struct tseg_qent
*, sizeof(struct tseg_qent
), M_TSEGQ
,
302 M_INTWAIT
| M_NULLOK
);
304 tcpstat
.tcps_rcvmemdrop
++;
306 /* no SACK block to report */
307 tp
->reportblk
.rblk_start
= tp
->reportblk
.rblk_end
;
313 * Find a segment which begins after this one does.
315 LIST_FOREACH(q
, &tp
->t_segq
, tqe_q
) {
316 if (SEQ_GT(q
->tqe_th
->th_seq
, th
->th_seq
))
322 * If there is a preceding segment, it may provide some of
323 * our data already. If so, drop the data from the incoming
324 * segment. If it provides all of our data, drop us.
329 /* conversion to int (in i) handles seq wraparound */
330 i
= p
->tqe_th
->th_seq
+ p
->tqe_len
- th
->th_seq
;
331 if (i
> 0) { /* overlaps preceding segment */
332 tp
->t_flags
|= (TF_DUPSEG
| TF_ENCLOSESEG
);
333 /* enclosing block starts w/ preceding segment */
334 tp
->encloseblk
.rblk_start
= p
->tqe_th
->th_seq
;
336 /* preceding encloses incoming segment */
337 tp
->encloseblk
.rblk_end
= p
->tqe_th
->th_seq
+
339 tcpstat
.tcps_rcvduppack
++;
340 tcpstat
.tcps_rcvdupbyte
+= *tlenp
;
345 * Try to present any queued data
346 * at the left window edge to the user.
347 * This is needed after the 3-WHS
350 goto present
; /* ??? */
355 /* incoming segment end is enclosing block end */
356 tp
->encloseblk
.rblk_end
= th
->th_seq
+ *tlenp
+
357 ((th
->th_flags
& TH_FIN
) != 0);
358 /* trim end of reported D-SACK block */
359 tp
->reportblk
.rblk_end
= th
->th_seq
;
362 tcpstat
.tcps_rcvoopack
++;
363 tcpstat
.tcps_rcvoobyte
+= *tlenp
;
366 * While we overlap succeeding segments trim them or,
367 * if they are completely covered, dequeue them.
370 tcp_seq_diff_t i
= (th
->th_seq
+ *tlenp
) - q
->tqe_th
->th_seq
;
371 tcp_seq qend
= q
->tqe_th
->th_seq
+ q
->tqe_len
;
372 struct tseg_qent
*nq
;
376 if (!(tp
->t_flags
& TF_DUPSEG
)) { /* first time through */
377 tp
->t_flags
|= (TF_DUPSEG
| TF_ENCLOSESEG
);
378 tp
->encloseblk
= tp
->reportblk
;
379 /* report trailing duplicate D-SACK segment */
380 tp
->reportblk
.rblk_start
= q
->tqe_th
->th_seq
;
382 if ((tp
->t_flags
& TF_ENCLOSESEG
) &&
383 SEQ_GT(qend
, tp
->encloseblk
.rblk_end
)) {
384 /* extend enclosing block if one exists */
385 tp
->encloseblk
.rblk_end
= qend
;
387 if (i
< q
->tqe_len
) {
388 q
->tqe_th
->th_seq
+= i
;
394 nq
= LIST_NEXT(q
, tqe_q
);
395 LIST_REMOVE(q
, tqe_q
);
402 /* Insert the new segment queue entry into place. */
405 te
->tqe_len
= *tlenp
;
407 /* check if can coalesce with following segment */
408 if (q
!= NULL
&& (th
->th_seq
+ *tlenp
== q
->tqe_th
->th_seq
)) {
409 tcp_seq tend
= te
->tqe_th
->th_seq
+ te
->tqe_len
;
411 te
->tqe_len
+= q
->tqe_len
;
412 if (q
->tqe_th
->th_flags
& TH_FIN
)
413 te
->tqe_th
->th_flags
|= TH_FIN
;
414 m_cat(te
->tqe_m
, q
->tqe_m
);
415 tp
->encloseblk
.rblk_end
= tend
;
417 * When not reporting a duplicate segment, use
418 * the larger enclosing block as the SACK block.
420 if (!(tp
->t_flags
& TF_DUPSEG
))
421 tp
->reportblk
.rblk_end
= tend
;
422 LIST_REMOVE(q
, tqe_q
);
428 LIST_INSERT_HEAD(&tp
->t_segq
, te
, tqe_q
);
430 /* check if can coalesce with preceding segment */
431 if (p
->tqe_th
->th_seq
+ p
->tqe_len
== th
->th_seq
) {
432 p
->tqe_len
+= te
->tqe_len
;
433 m_cat(p
->tqe_m
, te
->tqe_m
);
434 tp
->encloseblk
.rblk_start
= p
->tqe_th
->th_seq
;
436 * When not reporting a duplicate segment, use
437 * the larger enclosing block as the SACK block.
439 if (!(tp
->t_flags
& TF_DUPSEG
))
440 tp
->reportblk
.rblk_start
= p
->tqe_th
->th_seq
;
444 LIST_INSERT_AFTER(p
, te
, tqe_q
);
449 * Present data to user, advancing rcv_nxt through
450 * completed sequence space.
452 if (!TCPS_HAVEESTABLISHED(tp
->t_state
))
454 q
= LIST_FIRST(&tp
->t_segq
);
455 if (q
== NULL
|| q
->tqe_th
->th_seq
!= tp
->rcv_nxt
)
457 tp
->rcv_nxt
+= q
->tqe_len
;
458 if (!(tp
->t_flags
& TF_DUPSEG
)) {
459 /* no SACK block to report since ACK advanced */
460 tp
->reportblk
.rblk_start
= tp
->reportblk
.rblk_end
;
462 /* no enclosing block to report since ACK advanced */
463 tp
->t_flags
&= ~TF_ENCLOSESEG
;
464 flags
= q
->tqe_th
->th_flags
& TH_FIN
;
465 LIST_REMOVE(q
, tqe_q
);
466 KASSERT(LIST_EMPTY(&tp
->t_segq
) ||
467 LIST_FIRST(&tp
->t_segq
)->tqe_th
->th_seq
!= tp
->rcv_nxt
,
468 ("segment not coalesced"));
469 if (so
->so_state
& SS_CANTRCVMORE
)
472 ssb_appendstream(&so
->so_rcv
, q
->tqe_m
);
481 * TCP input routine, follows pages 65-76 of the
482 * protocol specification dated September, 1981 very closely.
486 tcp6_input(struct mbuf
**mp
, int *offp
, int proto
)
488 struct mbuf
*m
= *mp
;
489 struct in6_ifaddr
*ia6
;
491 IP6_EXTHDR_CHECK(m
, *offp
, sizeof(struct tcphdr
), IPPROTO_DONE
);
494 * draft-itojun-ipv6-tcp-to-anycast
495 * better place to put this in?
497 ia6
= ip6_getdstifaddr(m
);
498 if (ia6
&& (ia6
->ia6_flags
& IN6_IFF_ANYCAST
)) {
501 ip6
= mtod(m
, struct ip6_hdr
*);
502 icmp6_error(m
, ICMP6_DST_UNREACH
, ICMP6_DST_UNREACH_ADDR
,
503 offsetof(struct ip6_hdr
, ip6_dst
));
504 return (IPPROTO_DONE
);
507 tcp_input(m
, *offp
, proto
);
508 return (IPPROTO_DONE
);
513 tcp_input(struct mbuf
*m
, ...)
518 struct ip
*ip
= NULL
;
520 struct inpcb
*inp
= NULL
;
525 struct tcpcb
*tp
= NULL
;
527 struct socket
*so
= 0;
529 boolean_t ourfinisacked
, needoutput
= FALSE
;
532 struct tcpopt to
; /* options in this segment */
533 struct sockaddr_in
*next_hop
= NULL
;
534 int rstreason
; /* For badport_bandlim accounting purposes */
536 struct ip6_hdr
*ip6
= NULL
;
540 const boolean_t isipv6
= FALSE
;
547 off0
= __va_arg(ap
, int);
548 proto
= __va_arg(ap
, int);
551 tcpstat
.tcps_rcvtotal
++;
553 if (m
->m_pkthdr
.fw_flags
& IPFORWARD_MBUF_TAGGED
) {
556 mtag
= m_tag_find(m
, PACKET_TAG_IPFORWARD
, NULL
);
557 KKASSERT(mtag
!= NULL
);
558 next_hop
= m_tag_data(mtag
);
562 isipv6
= (mtod(m
, struct ip
*)->ip_v
== 6) ? TRUE
: FALSE
;
566 /* IP6_EXTHDR_CHECK() is already done at tcp6_input() */
567 ip6
= mtod(m
, struct ip6_hdr
*);
568 tlen
= (sizeof *ip6
) + ntohs(ip6
->ip6_plen
) - off0
;
569 if (in6_cksum(m
, IPPROTO_TCP
, off0
, tlen
)) {
570 tcpstat
.tcps_rcvbadsum
++;
573 th
= (struct tcphdr
*)((caddr_t
)ip6
+ off0
);
576 * Be proactive about unspecified IPv6 address in source.
577 * As we use all-zero to indicate unbounded/unconnected pcb,
578 * unspecified IPv6 address can be used to confuse us.
580 * Note that packets with unspecified IPv6 destination is
581 * already dropped in ip6_input.
583 if (IN6_IS_ADDR_UNSPECIFIED(&ip6
->ip6_src
)) {
589 * Get IP and TCP header together in first mbuf.
590 * Note: IP leaves IP header in first mbuf.
592 if (off0
> sizeof(struct ip
)) {
594 off0
= sizeof(struct ip
);
596 /* already checked and pulled up in ip_demux() */
597 KASSERT(m
->m_len
>= sizeof(struct tcpiphdr
),
598 ("TCP header not in one mbuf: m->m_len %d", m
->m_len
));
599 ip
= mtod(m
, struct ip
*);
600 ipov
= (struct ipovly
*)ip
;
601 th
= (struct tcphdr
*)((caddr_t
)ip
+ off0
);
604 if (m
->m_pkthdr
.csum_flags
& CSUM_DATA_VALID
) {
605 if (m
->m_pkthdr
.csum_flags
& CSUM_PSEUDO_HDR
)
606 th
->th_sum
= m
->m_pkthdr
.csum_data
;
608 th
->th_sum
= in_pseudo(ip
->ip_src
.s_addr
,
610 htonl(m
->m_pkthdr
.csum_data
+
613 th
->th_sum
^= 0xffff;
616 * Checksum extended TCP header and data.
618 len
= sizeof(struct ip
) + tlen
;
619 bzero(ipov
->ih_x1
, sizeof ipov
->ih_x1
);
620 ipov
->ih_len
= (u_short
)tlen
;
621 ipov
->ih_len
= htons(ipov
->ih_len
);
622 th
->th_sum
= in_cksum(m
, len
);
625 tcpstat
.tcps_rcvbadsum
++;
629 /* Re-initialization for later version check */
630 ip
->ip_v
= IPVERSION
;
635 * Check that TCP offset makes sense,
636 * pull out TCP options and adjust length. XXX
638 off
= th
->th_off
<< 2;
639 /* already checked and pulled up in ip_demux() */
640 KASSERT(off
>= sizeof(struct tcphdr
) && off
<= tlen
,
641 ("bad TCP data offset %d (tlen %d)", off
, tlen
));
642 tlen
-= off
; /* tlen is used instead of ti->ti_len */
643 if (off
> sizeof(struct tcphdr
)) {
645 IP6_EXTHDR_CHECK(m
, off0
, off
, );
646 ip6
= mtod(m
, struct ip6_hdr
*);
647 th
= (struct tcphdr
*)((caddr_t
)ip6
+ off0
);
649 /* already pulled up in ip_demux() */
650 KASSERT(m
->m_len
>= sizeof(struct ip
) + off
,
651 ("TCP header and options not in one mbuf: "
652 "m_len %d, off %d", m
->m_len
, off
));
654 optlen
= off
- sizeof(struct tcphdr
);
655 optp
= (u_char
*)(th
+ 1);
657 thflags
= th
->th_flags
;
659 #ifdef TCP_DROP_SYNFIN
661 * If the drop_synfin option is enabled, drop all packets with
662 * both the SYN and FIN bits set. This prevents e.g. nmap from
663 * identifying the TCP/IP stack.
665 * This is a violation of the TCP specification.
667 if (drop_synfin
&& (thflags
& (TH_SYN
| TH_FIN
)) == (TH_SYN
| TH_FIN
))
672 * Convert TCP protocol specific fields to host format.
674 th
->th_seq
= ntohl(th
->th_seq
);
675 th
->th_ack
= ntohl(th
->th_ack
);
676 th
->th_win
= ntohs(th
->th_win
);
677 th
->th_urp
= ntohs(th
->th_urp
);
680 * Delay dropping TCP, IP headers, IPv6 ext headers, and TCP options,
681 * until after ip6_savecontrol() is called and before other functions
682 * which don't want those proto headers.
683 * Because ip6_savecontrol() is going to parse the mbuf to
684 * search for data to be passed up to user-land, it wants mbuf
685 * parameters to be unchanged.
686 * XXX: the call of ip6_savecontrol() has been obsoleted based on
687 * latest version of the advanced API (20020110).
689 drop_hdrlen
= off0
+ off
;
692 * Locate pcb for segment.
695 /* IPFIREWALL_FORWARD section */
696 if (next_hop
!= NULL
&& !isipv6
) { /* IPv6 support is not there yet */
698 * Transparently forwarded. Pretend to be the destination.
699 * already got one like this?
701 cpu
= mycpu
->gd_cpuid
;
702 inp
= in_pcblookup_hash(&tcbinfo
[cpu
],
703 ip
->ip_src
, th
->th_sport
,
704 ip
->ip_dst
, th
->th_dport
,
705 0, m
->m_pkthdr
.rcvif
);
708 * It's new. Try to find the ambushing socket.
712 * The rest of the ipfw code stores the port in
714 * (The IP address is still in network order.)
716 in_port_t dport
= next_hop
->sin_port
?
717 htons(next_hop
->sin_port
) :
720 cpu
= tcp_addrcpu(ip
->ip_src
.s_addr
, th
->th_sport
,
721 next_hop
->sin_addr
.s_addr
, dport
);
722 inp
= in_pcblookup_hash(&tcbinfo
[cpu
],
723 ip
->ip_src
, th
->th_sport
,
724 next_hop
->sin_addr
, dport
,
725 1, m
->m_pkthdr
.rcvif
);
729 inp
= in6_pcblookup_hash(&tcbinfo
[0],
730 &ip6
->ip6_src
, th
->th_sport
,
731 &ip6
->ip6_dst
, th
->th_dport
,
732 1, m
->m_pkthdr
.rcvif
);
734 cpu
= mycpu
->gd_cpuid
;
735 inp
= in_pcblookup_hash(&tcbinfo
[cpu
],
736 ip
->ip_src
, th
->th_sport
,
737 ip
->ip_dst
, th
->th_dport
,
738 1, m
->m_pkthdr
.rcvif
);
743 * If the state is CLOSED (i.e., TCB does not exist) then
744 * all data in the incoming segment is discarded.
745 * If the TCB exists but is in CLOSED state, it is embryonic,
746 * but should either do a listen or a connect soon.
751 char dbuf
[INET6_ADDRSTRLEN
+2], sbuf
[INET6_ADDRSTRLEN
+2];
753 char dbuf
[sizeof "aaa.bbb.ccc.ddd"];
754 char sbuf
[sizeof "aaa.bbb.ccc.ddd"];
758 strcat(dbuf
, ip6_sprintf(&ip6
->ip6_dst
));
761 strcat(sbuf
, ip6_sprintf(&ip6
->ip6_src
));
764 strcpy(dbuf
, inet_ntoa(ip
->ip_dst
));
765 strcpy(sbuf
, inet_ntoa(ip
->ip_src
));
767 switch (log_in_vain
) {
769 if (!(thflags
& TH_SYN
))
773 "Connection attempt to TCP %s:%d "
774 "from %s:%d flags:0x%02x\n",
775 dbuf
, ntohs(th
->th_dport
), sbuf
,
776 ntohs(th
->th_sport
), thflags
);
785 if (thflags
& TH_SYN
)
794 rstreason
= BANDLIM_RST_CLOSEDPORT
;
800 if (ipsec6_in_reject_so(m
, inp
->inp_socket
)) {
801 ipsec6stat
.in_polvio
++;
805 if (ipsec4_in_reject_so(m
, inp
->inp_socket
)) {
806 ipsecstat
.in_polvio
++;
813 if (ipsec6_in_reject(m
, inp
))
816 if (ipsec4_in_reject(m
, inp
))
820 /* Check the minimum TTL for socket. */
822 if ((isipv6
? ip6
->ip6_hlim
: ip
->ip_ttl
) < inp
->inp_ip_minttl
)
828 rstreason
= BANDLIM_RST_CLOSEDPORT
;
831 if (tp
->t_state
<= TCPS_CLOSED
)
834 /* Unscale the window into a 32-bit value. */
835 if (!(thflags
& TH_SYN
))
836 tiwin
= th
->th_win
<< tp
->snd_scale
;
840 so
= inp
->inp_socket
;
843 if (so
->so_options
& SO_DEBUG
) {
844 ostate
= tp
->t_state
;
846 bcopy(ip6
, tcp_saveipgen
, sizeof(*ip6
));
848 bcopy(ip
, tcp_saveipgen
, sizeof(*ip
));
853 bzero(&to
, sizeof to
);
855 if (so
->so_options
& SO_ACCEPTCONN
) {
856 struct in_conninfo inc
;
859 inc
.inc_isipv6
= (isipv6
== TRUE
);
862 inc
.inc6_faddr
= ip6
->ip6_src
;
863 inc
.inc6_laddr
= ip6
->ip6_dst
;
864 inc
.inc6_route
.ro_rt
= NULL
; /* XXX */
866 inc
.inc_faddr
= ip
->ip_src
;
867 inc
.inc_laddr
= ip
->ip_dst
;
868 inc
.inc_route
.ro_rt
= NULL
; /* XXX */
870 inc
.inc_fport
= th
->th_sport
;
871 inc
.inc_lport
= th
->th_dport
;
874 * If the state is LISTEN then ignore segment if it contains
875 * a RST. If the segment contains an ACK then it is bad and
876 * send a RST. If it does not contain a SYN then it is not
877 * interesting; drop it.
879 * If the state is SYN_RECEIVED (syncache) and seg contains
880 * an ACK, but not for our SYN/ACK, send a RST. If the seg
881 * contains a RST, check the sequence number to see if it
882 * is a valid reset segment.
884 if ((thflags
& (TH_RST
| TH_ACK
| TH_SYN
)) != TH_SYN
) {
885 if ((thflags
& (TH_RST
| TH_ACK
| TH_SYN
)) == TH_ACK
) {
886 if (!syncache_expand(&inc
, th
, &so
, m
)) {
888 * No syncache entry, or ACK was not
889 * for our SYN/ACK. Send a RST.
891 tcpstat
.tcps_badsyn
++;
892 rstreason
= BANDLIM_RST_OPENPORT
;
897 * Could not complete 3-way handshake,
898 * connection is being closed down, and
899 * syncache will free mbuf.
903 * Socket is created in state SYN_RECEIVED.
904 * Continue processing segment.
909 * This is what would have happened in
910 * tcp_output() when the SYN,ACK was sent.
912 tp
->snd_up
= tp
->snd_una
;
913 tp
->snd_max
= tp
->snd_nxt
= tp
->iss
+ 1;
914 tp
->last_ack_sent
= tp
->rcv_nxt
;
916 * XXX possible bug - it doesn't appear that tp->snd_wnd is unscaled
917 * until the _second_ ACK is received:
918 * rcv SYN (set wscale opts) --> send SYN/ACK, set snd_wnd = window.
919 * rcv ACK, calculate tiwin --> process SYN_RECEIVED, determine wscale,
920 * move to ESTAB, set snd_wnd to tiwin.
922 tp
->snd_wnd
= tiwin
; /* unscaled */
925 if (thflags
& TH_RST
) {
926 syncache_chkrst(&inc
, th
);
929 if (thflags
& TH_ACK
) {
930 syncache_badack(&inc
);
931 tcpstat
.tcps_badsyn
++;
932 rstreason
= BANDLIM_RST_OPENPORT
;
939 * Segment's flags are (SYN) or (SYN | FIN).
943 * If deprecated address is forbidden,
944 * we do not accept SYN to deprecated interface
945 * address to prevent any new inbound connection from
946 * getting established.
947 * When we do not accept SYN, we send a TCP RST,
948 * with deprecated source address (instead of dropping
949 * it). We compromise it as it is much better for peer
950 * to send a RST, and RST will be the final packet
953 * If we do not forbid deprecated addresses, we accept
954 * the SYN packet. RFC2462 does not suggest dropping
956 * If we decipher RFC2462 5.5.4, it says like this:
957 * 1. use of deprecated addr with existing
958 * communication is okay - "SHOULD continue to be
960 * 2. use of it with new communication:
961 * (2a) "SHOULD NOT be used if alternate address
962 * with sufficient scope is available"
963 * (2b) nothing mentioned otherwise.
964 * Here we fall into (2b) case as we have no choice in
965 * our source address selection - we must obey the peer.
967 * The wording in RFC2462 is confusing, and there are
968 * multiple description text for deprecated address
969 * handling - worse, they are not exactly the same.
970 * I believe 5.5.4 is the best one, so we follow 5.5.4.
972 if (isipv6
&& !ip6_use_deprecated
) {
973 struct in6_ifaddr
*ia6
;
975 if ((ia6
= ip6_getdstifaddr(m
)) &&
976 (ia6
->ia6_flags
& IN6_IFF_DEPRECATED
)) {
978 rstreason
= BANDLIM_RST_OPENPORT
;
984 * If it is from this socket, drop it, it must be forged.
985 * Don't bother responding if the destination was a broadcast.
987 if (th
->th_dport
== th
->th_sport
) {
989 if (IN6_ARE_ADDR_EQUAL(&ip6
->ip6_dst
,
993 if (ip
->ip_dst
.s_addr
== ip
->ip_src
.s_addr
)
998 * RFC1122 4.2.3.10, p. 104: discard bcast/mcast SYN
1000 * Note that it is quite possible to receive unicast
1001 * link-layer packets with a broadcast IP address. Use
1002 * in_broadcast() to find them.
1004 if (m
->m_flags
& (M_BCAST
| M_MCAST
))
1007 if (IN6_IS_ADDR_MULTICAST(&ip6
->ip6_dst
) ||
1008 IN6_IS_ADDR_MULTICAST(&ip6
->ip6_src
))
1011 if (IN_MULTICAST(ntohl(ip
->ip_dst
.s_addr
)) ||
1012 IN_MULTICAST(ntohl(ip
->ip_src
.s_addr
)) ||
1013 ip
->ip_src
.s_addr
== htonl(INADDR_BROADCAST
) ||
1014 in_broadcast(ip
->ip_dst
, m
->m_pkthdr
.rcvif
))
1018 * SYN appears to be valid; create compressed TCP state
1019 * for syncache, or perform t/tcp connection.
1021 if (so
->so_qlen
<= so
->so_qlimit
) {
1022 tcp_dooptions(&to
, optp
, optlen
, TRUE
);
1023 if (!syncache_add(&inc
, &to
, th
, &so
, m
))
1027 * Entry added to syncache, mbuf used to
1028 * send SYN,ACK packet.
1032 tp
= intotcpcb(inp
);
1033 tp
->snd_wnd
= tiwin
;
1034 tp
->t_starttime
= ticks
;
1035 tp
->t_state
= TCPS_ESTABLISHED
;
1038 * If there is a FIN, or if there is data and the
1039 * connection is local, then delay SYN,ACK(SYN) in
1040 * the hope of piggy-backing it on a response
1041 * segment. Otherwise must send ACK now in case
1042 * the other side is slow starting.
1044 if (DELAY_ACK(tp
) &&
1045 ((thflags
& TH_FIN
) ||
1047 ((isipv6
&& in6_localaddr(&inp
->in6p_faddr
)) ||
1048 (!isipv6
&& in_localaddr(inp
->inp_faddr
)))))) {
1049 tcp_callout_reset(tp
, tp
->tt_delack
,
1050 tcp_delacktime
, tcp_timer_delack
);
1051 tp
->t_flags
|= TF_NEEDSYN
;
1053 tp
->t_flags
|= (TF_ACKNOW
| TF_NEEDSYN
);
1056 tcpstat
.tcps_connects
++;
1064 /* should not happen - syncache should pick up these connections */
1065 KASSERT(tp
->t_state
!= TCPS_LISTEN
, ("tcp_input: TCPS_LISTEN state"));
1068 * This is the second part of the MSS DoS prevention code (after
1069 * minmss on the sending side) and it deals with too many too small
1070 * tcp packets in a too short timeframe (1 second).
1072 * XXX Removed. This code was crap. It does not scale to network
1073 * speed, and default values break NFS. Gone.
1078 * Segment received on connection.
1080 * Reset idle time and keep-alive timer. Don't waste time if less
1081 * then a second has elapsed. Only update t_rcvtime for non-SYN
1084 * Handle the case where one side thinks the connection is established
1085 * but the other side has, say, rebooted without cleaning out the
1086 * connection. The SYNs could be construed as an attack and wind
1087 * up ignored, but in case it isn't an attack we can validate the
1088 * connection by forcing a keepalive.
1090 if (TCPS_HAVEESTABLISHED(tp
->t_state
) && (ticks
- tp
->t_rcvtime
) > hz
) {
1091 if ((thflags
& (TH_SYN
| TH_ACK
)) == TH_SYN
) {
1092 tp
->t_flags
|= TF_KEEPALIVE
;
1093 tcp_callout_reset(tp
, tp
->tt_keep
, hz
/ 2,
1096 tp
->t_rcvtime
= ticks
;
1097 tp
->t_flags
&= ~TF_KEEPALIVE
;
1098 tcp_callout_reset(tp
, tp
->tt_keep
, tcp_keepidle
,
1105 * XXX this is tradtitional behavior, may need to be cleaned up.
1107 tcp_dooptions(&to
, optp
, optlen
, (thflags
& TH_SYN
) != 0);
1108 if (tp
->t_state
== TCPS_SYN_SENT
&& (thflags
& TH_SYN
)) {
1109 if (to
.to_flags
& TOF_SCALE
) {
1110 tp
->t_flags
|= TF_RCVD_SCALE
;
1111 tp
->requested_s_scale
= to
.to_requested_s_scale
;
1113 if (to
.to_flags
& TOF_TS
) {
1114 tp
->t_flags
|= TF_RCVD_TSTMP
;
1115 tp
->ts_recent
= to
.to_tsval
;
1116 tp
->ts_recent_age
= ticks
;
1118 if (to
.to_flags
& TOF_MSS
)
1119 tcp_mss(tp
, to
.to_mss
);
1121 * Only set the TF_SACK_PERMITTED per-connection flag
1122 * if we got a SACK_PERMITTED option from the other side
1123 * and the global tcp_do_sack variable is true.
1125 if (tcp_do_sack
&& (to
.to_flags
& TOF_SACK_PERMITTED
))
1126 tp
->t_flags
|= TF_SACK_PERMITTED
;
1130 * Header prediction: check for the two common cases
1131 * of a uni-directional data xfer. If the packet has
1132 * no control flags, is in-sequence, the window didn't
1133 * change and we're not retransmitting, it's a
1134 * candidate. If the length is zero and the ack moved
1135 * forward, we're the sender side of the xfer. Just
1136 * free the data acked & wake any higher level process
1137 * that was blocked waiting for space. If the length
1138 * is non-zero and the ack didn't move, we're the
1139 * receiver side. If we're getting packets in-order
1140 * (the reassembly queue is empty), add the data to
1141 * the socket buffer and note that we need a delayed ack.
1142 * Make sure that the hidden state-flags are also off.
1143 * Since we check for TCPS_ESTABLISHED above, it can only
1146 if (tp
->t_state
== TCPS_ESTABLISHED
&&
1147 (thflags
& (TH_SYN
|TH_FIN
|TH_RST
|TH_URG
|TH_ACK
)) == TH_ACK
&&
1148 !(tp
->t_flags
& (TF_NEEDSYN
| TF_NEEDFIN
)) &&
1149 (!(to
.to_flags
& TOF_TS
) ||
1150 TSTMP_GEQ(to
.to_tsval
, tp
->ts_recent
)) &&
1151 th
->th_seq
== tp
->rcv_nxt
&&
1152 tp
->snd_nxt
== tp
->snd_max
) {
1155 * If last ACK falls within this segment's sequence numbers,
1156 * record the timestamp.
1157 * NOTE that the test is modified according to the latest
1158 * proposal of the tcplw@cray.com list (Braden 1993/04/26).
1160 if ((to
.to_flags
& TOF_TS
) &&
1161 SEQ_LEQ(th
->th_seq
, tp
->last_ack_sent
)) {
1162 tp
->ts_recent_age
= ticks
;
1163 tp
->ts_recent
= to
.to_tsval
;
1167 if (SEQ_GT(th
->th_ack
, tp
->snd_una
) &&
1168 SEQ_LEQ(th
->th_ack
, tp
->snd_max
) &&
1169 tp
->snd_cwnd
>= tp
->snd_wnd
&&
1170 !IN_FASTRECOVERY(tp
)) {
1172 * This is a pure ack for outstanding data.
1174 ++tcpstat
.tcps_predack
;
1176 * "bad retransmit" recovery
1178 * If Eifel detection applies, then
1179 * it is deterministic, so use it
1180 * unconditionally over the old heuristic.
1181 * Otherwise, fall back to the old heuristic.
1183 if (tcp_do_eifel_detect
&&
1184 (to
.to_flags
& TOF_TS
) && to
.to_tsecr
&&
1185 (tp
->t_flags
& TF_FIRSTACCACK
)) {
1186 /* Eifel detection applicable. */
1187 if (to
.to_tsecr
< tp
->t_rexmtTS
) {
1188 tcp_revert_congestion_state(tp
);
1189 ++tcpstat
.tcps_eifeldetected
;
1191 } else if (tp
->t_rxtshift
== 1 &&
1192 ticks
< tp
->t_badrxtwin
) {
1193 tcp_revert_congestion_state(tp
);
1194 ++tcpstat
.tcps_rttdetected
;
1196 tp
->t_flags
&= ~(TF_FIRSTACCACK
|
1197 TF_FASTREXMT
| TF_EARLYREXMT
);
1199 * Recalculate the retransmit timer / rtt.
1201 * Some machines (certain windows boxes)
1202 * send broken timestamp replies during the
1203 * SYN+ACK phase, ignore timestamps of 0.
1205 if ((to
.to_flags
& TOF_TS
) && to
.to_tsecr
) {
1207 ticks
- to
.to_tsecr
+ 1);
1208 } else if (tp
->t_rtttime
&&
1209 SEQ_GT(th
->th_ack
, tp
->t_rtseq
)) {
1211 ticks
- tp
->t_rtttime
);
1213 tcp_xmit_bandwidth_limit(tp
, th
->th_ack
);
1214 acked
= th
->th_ack
- tp
->snd_una
;
1215 tcpstat
.tcps_rcvackpack
++;
1216 tcpstat
.tcps_rcvackbyte
+= acked
;
1217 sbdrop(&so
->so_snd
.sb
, acked
);
1218 tp
->snd_recover
= th
->th_ack
- 1;
1219 tp
->snd_una
= th
->th_ack
;
1222 * Update window information.
1224 if (tiwin
!= tp
->snd_wnd
&&
1225 acceptable_window_update(tp
, th
, tiwin
)) {
1226 /* keep track of pure window updates */
1227 if (tp
->snd_wl2
== th
->th_ack
&&
1228 tiwin
> tp
->snd_wnd
)
1229 tcpstat
.tcps_rcvwinupd
++;
1230 tp
->snd_wnd
= tiwin
;
1231 tp
->snd_wl1
= th
->th_seq
;
1232 tp
->snd_wl2
= th
->th_ack
;
1233 if (tp
->snd_wnd
> tp
->max_sndwnd
)
1234 tp
->max_sndwnd
= tp
->snd_wnd
;
1237 ND6_HINT(tp
); /* some progress has been done */
1239 * If all outstanding data are acked, stop
1240 * retransmit timer, otherwise restart timer
1241 * using current (possibly backed-off) value.
1242 * If process is waiting for space,
1243 * wakeup/selwakeup/signal. If data
1244 * are ready to send, let tcp_output
1245 * decide between more output or persist.
1247 if (tp
->snd_una
== tp
->snd_max
) {
1248 tcp_callout_stop(tp
, tp
->tt_rexmt
);
1249 } else if (!tcp_callout_active(tp
,
1251 tcp_callout_reset(tp
, tp
->tt_rexmt
,
1252 tp
->t_rxtcur
, tcp_timer_rexmt
);
1255 if (so
->so_snd
.ssb_cc
> 0)
1259 } else if (tiwin
== tp
->snd_wnd
&&
1260 th
->th_ack
== tp
->snd_una
&&
1261 LIST_EMPTY(&tp
->t_segq
) &&
1262 tlen
<= ssb_space(&so
->so_rcv
)) {
1263 u_long newsize
= 0; /* automatic sockbuf scaling */
1265 * This is a pure, in-sequence data packet
1266 * with nothing on the reassembly queue and
1267 * we have enough buffer space to take it.
1269 ++tcpstat
.tcps_preddat
;
1270 tp
->rcv_nxt
+= tlen
;
1271 tcpstat
.tcps_rcvpack
++;
1272 tcpstat
.tcps_rcvbyte
+= tlen
;
1273 ND6_HINT(tp
); /* some progress has been done */
1275 * Automatic sizing of receive socket buffer. Often the send
1276 * buffer size is not optimally adjusted to the actual network
1277 * conditions at hand (delay bandwidth product). Setting the
1278 * buffer size too small limits throughput on links with high
1279 * bandwidth and high delay (eg. trans-continental/oceanic links).
1281 * On the receive side the socket buffer memory is only rarely
1282 * used to any significant extent. This allows us to be much
1283 * more aggressive in scaling the receive socket buffer. For
1284 * the case that the buffer space is actually used to a large
1285 * extent and we run out of kernel memory we can simply drop
1286 * the new segments; TCP on the sender will just retransmit it
1287 * later. Setting the buffer size too big may only consume too
1288 * much kernel memory if the application doesn't read() from
1289 * the socket or packet loss or reordering makes use of the
1292 * The criteria to step up the receive buffer one notch are:
1293 * 1. the number of bytes received during the time it takes
1294 * one timestamp to be reflected back to us (the RTT);
1295 * 2. received bytes per RTT is within seven eighth of the
1296 * current socket buffer size;
1297 * 3. receive buffer size has not hit maximal automatic size;
1299 * This algorithm does one step per RTT at most and only if
1300 * we receive a bulk stream w/o packet losses or reorderings.
1301 * Shrinking the buffer during idle times is not necessary as
1302 * it doesn't consume any memory when idle.
1304 * TODO: Only step up if the application is actually serving
1305 * the buffer to better manage the socket buffer resources.
1307 if (tcp_do_autorcvbuf
&&
1309 (so
->so_rcv
.ssb_flags
& SSB_AUTOSIZE
)) {
1310 if (to
.to_tsecr
> tp
->rfbuf_ts
&&
1311 to
.to_tsecr
- tp
->rfbuf_ts
< hz
) {
1313 (so
->so_rcv
.ssb_hiwat
/ 8 * 7) &&
1314 so
->so_rcv
.ssb_hiwat
<
1315 tcp_autorcvbuf_max
) {
1317 ulmin(so
->so_rcv
.ssb_hiwat
+
1319 tcp_autorcvbuf_max
);
1321 /* Start over with next RTT. */
1325 tp
->rfbuf_cnt
+= tlen
; /* add up */
1328 * Add data to socket buffer.
1330 if (so
->so_state
& SS_CANTRCVMORE
) {
1334 * Set new socket buffer size, give up when
1337 * Adjusting the size can mess up ACK
1338 * sequencing when pure window updates are
1339 * being avoided (which is the default),
1343 tp
->t_flags
|= TF_RXRESIZED
;
1344 if (!ssb_reserve(&so
->so_rcv
, newsize
,
1346 so
->so_rcv
.ssb_flags
&= ~SSB_AUTOSIZE
;
1349 (TCP_MAXWIN
<< tp
->rcv_scale
)) {
1350 so
->so_rcv
.ssb_flags
&= ~SSB_AUTOSIZE
;
1353 m_adj(m
, drop_hdrlen
); /* delayed header drop */
1354 ssb_appendstream(&so
->so_rcv
, m
);
1358 * This code is responsible for most of the ACKs
1359 * the TCP stack sends back after receiving a data
1360 * packet. Note that the DELAY_ACK check fails if
1361 * the delack timer is already running, which results
1362 * in an ack being sent every other packet (which is
1365 * We then further aggregate acks by not actually
1366 * sending one until the protocol thread has completed
1367 * processing the current backlog of packets. This
1368 * does not delay the ack any further, but allows us
1369 * to take advantage of the packet aggregation that
1370 * high speed NICs do (usually blocks of 8-10 packets)
1371 * to send a single ack rather then four or five acks,
1372 * greatly reducing the ack rate, the return channel
1373 * bandwidth, and the protocol overhead on both ends.
1375 * Since this also has the effect of slowing down
1376 * the exponential slow-start ramp-up, systems with
1377 * very large bandwidth-delay products might want
1378 * to turn the feature off.
1380 if (DELAY_ACK(tp
)) {
1381 tcp_callout_reset(tp
, tp
->tt_delack
,
1382 tcp_delacktime
, tcp_timer_delack
);
1383 } else if (tcp_aggregate_acks
) {
1384 tp
->t_flags
|= TF_ACKNOW
;
1385 if (!(tp
->t_flags
& TF_ONOUTPUTQ
)) {
1386 tp
->t_flags
|= TF_ONOUTPUTQ
;
1387 tp
->tt_cpu
= mycpu
->gd_cpuid
;
1389 &tcpcbackq
[tp
->tt_cpu
],
1393 tp
->t_flags
|= TF_ACKNOW
;
1401 * Calculate amount of space in receive window,
1402 * and then do TCP input processing.
1403 * Receive window is amount of space in rcv queue,
1404 * but not less than advertised window.
1406 recvwin
= ssb_space(&so
->so_rcv
);
1409 tp
->rcv_wnd
= imax(recvwin
, (int)(tp
->rcv_adv
- tp
->rcv_nxt
));
1411 /* Reset receive buffer auto scaling when not in bulk receive mode. */
1415 switch (tp
->t_state
) {
1417 * If the state is SYN_RECEIVED:
1418 * if seg contains an ACK, but not for our SYN/ACK, send a RST.
1420 case TCPS_SYN_RECEIVED
:
1421 if ((thflags
& TH_ACK
) &&
1422 (SEQ_LEQ(th
->th_ack
, tp
->snd_una
) ||
1423 SEQ_GT(th
->th_ack
, tp
->snd_max
))) {
1424 rstreason
= BANDLIM_RST_OPENPORT
;
1430 * If the state is SYN_SENT:
1431 * if seg contains an ACK, but not for our SYN, drop the input.
1432 * if seg contains a RST, then drop the connection.
1433 * if seg does not contain SYN, then drop it.
1434 * Otherwise this is an acceptable SYN segment
1435 * initialize tp->rcv_nxt and tp->irs
1436 * if seg contains ack then advance tp->snd_una
1437 * if SYN has been acked change to ESTABLISHED else SYN_RCVD state
1438 * arrange for segment to be acked (eventually)
1439 * continue processing rest of data/controls, beginning with URG
1442 if ((thflags
& TH_ACK
) &&
1443 (SEQ_LEQ(th
->th_ack
, tp
->iss
) ||
1444 SEQ_GT(th
->th_ack
, tp
->snd_max
))) {
1445 rstreason
= BANDLIM_UNLIMITED
;
1448 if (thflags
& TH_RST
) {
1449 if (thflags
& TH_ACK
)
1450 tp
= tcp_drop(tp
, ECONNREFUSED
);
1453 if (!(thflags
& TH_SYN
))
1455 tp
->snd_wnd
= th
->th_win
; /* initial send window */
1457 tp
->irs
= th
->th_seq
;
1459 if (thflags
& TH_ACK
) {
1460 /* Our SYN was acked. */
1461 tcpstat
.tcps_connects
++;
1463 /* Do window scaling on this connection? */
1464 if ((tp
->t_flags
& (TF_RCVD_SCALE
| TF_REQ_SCALE
)) ==
1465 (TF_RCVD_SCALE
| TF_REQ_SCALE
)) {
1466 tp
->snd_scale
= tp
->requested_s_scale
;
1467 tp
->rcv_scale
= tp
->request_r_scale
;
1469 tp
->rcv_adv
+= tp
->rcv_wnd
;
1470 tp
->snd_una
++; /* SYN is acked */
1471 tcp_callout_stop(tp
, tp
->tt_rexmt
);
1473 * If there's data, delay ACK; if there's also a FIN
1474 * ACKNOW will be turned on later.
1476 if (DELAY_ACK(tp
) && tlen
!= 0) {
1477 tcp_callout_reset(tp
, tp
->tt_delack
,
1478 tcp_delacktime
, tcp_timer_delack
);
1480 tp
->t_flags
|= TF_ACKNOW
;
1483 * Received <SYN,ACK> in SYN_SENT[*] state.
1485 * SYN_SENT --> ESTABLISHED
1486 * SYN_SENT* --> FIN_WAIT_1
1488 tp
->t_starttime
= ticks
;
1489 if (tp
->t_flags
& TF_NEEDFIN
) {
1490 tp
->t_state
= TCPS_FIN_WAIT_1
;
1491 tp
->t_flags
&= ~TF_NEEDFIN
;
1494 tp
->t_state
= TCPS_ESTABLISHED
;
1495 tcp_callout_reset(tp
, tp
->tt_keep
, tcp_keepidle
,
1500 * Received initial SYN in SYN-SENT[*] state =>
1501 * simultaneous open.
1502 * Do 3-way handshake:
1503 * SYN-SENT -> SYN-RECEIVED
1504 * SYN-SENT* -> SYN-RECEIVED*
1506 tp
->t_flags
|= TF_ACKNOW
;
1507 tcp_callout_stop(tp
, tp
->tt_rexmt
);
1508 tp
->t_state
= TCPS_SYN_RECEIVED
;
1513 * Advance th->th_seq to correspond to first data byte.
1514 * If data, trim to stay within window,
1515 * dropping FIN if necessary.
1518 if (tlen
> tp
->rcv_wnd
) {
1519 todrop
= tlen
- tp
->rcv_wnd
;
1523 tcpstat
.tcps_rcvpackafterwin
++;
1524 tcpstat
.tcps_rcvbyteafterwin
+= todrop
;
1526 tp
->snd_wl1
= th
->th_seq
- 1;
1527 tp
->rcv_up
= th
->th_seq
;
1529 * Client side of transaction: already sent SYN and data.
1530 * If the remote host used T/TCP to validate the SYN,
1531 * our data will be ACK'd; if so, enter normal data segment
1532 * processing in the middle of step 5, ack processing.
1533 * Otherwise, goto step 6.
1535 if (thflags
& TH_ACK
)
1541 * If the state is LAST_ACK or CLOSING or TIME_WAIT:
1542 * do normal processing (we no longer bother with T/TCP).
1546 case TCPS_TIME_WAIT
:
1547 break; /* continue normal processing */
1551 * States other than LISTEN or SYN_SENT.
1552 * First check the RST flag and sequence number since reset segments
1553 * are exempt from the timestamp and connection count tests. This
1554 * fixes a bug introduced by the Stevens, vol. 2, p. 960 bugfix
1555 * below which allowed reset segments in half the sequence space
1556 * to fall though and be processed (which gives forged reset
1557 * segments with a random sequence number a 50 percent chance of
1558 * killing a connection).
1559 * Then check timestamp, if present.
1560 * Then check the connection count, if present.
1561 * Then check that at least some bytes of segment are within
1562 * receive window. If segment begins before rcv_nxt,
1563 * drop leading data (and SYN); if nothing left, just ack.
1566 * If the RST bit is set, check the sequence number to see
1567 * if this is a valid reset segment.
1569 * In all states except SYN-SENT, all reset (RST) segments
1570 * are validated by checking their SEQ-fields. A reset is
1571 * valid if its sequence number is in the window.
1572 * Note: this does not take into account delayed ACKs, so
1573 * we should test against last_ack_sent instead of rcv_nxt.
1574 * The sequence number in the reset segment is normally an
1575 * echo of our outgoing acknowledgement numbers, but some hosts
1576 * send a reset with the sequence number at the rightmost edge
1577 * of our receive window, and we have to handle this case.
1578 * If we have multiple segments in flight, the intial reset
1579 * segment sequence numbers will be to the left of last_ack_sent,
1580 * but they will eventually catch up.
1581 * In any case, it never made sense to trim reset segments to
1582 * fit the receive window since RFC 1122 says:
1583 * 4.2.2.12 RST Segment: RFC-793 Section 3.4
1585 * A TCP SHOULD allow a received RST segment to include data.
1588 * It has been suggested that a RST segment could contain
1589 * ASCII text that encoded and explained the cause of the
1590 * RST. No standard has yet been established for such
1593 * If the reset segment passes the sequence number test examine
1595 * SYN_RECEIVED STATE:
1596 * If passive open, return to LISTEN state.
1597 * If active open, inform user that connection was refused.
1598 * ESTABLISHED, FIN_WAIT_1, FIN_WAIT_2, CLOSE_WAIT STATES:
1599 * Inform user that connection was reset, and close tcb.
1600 * CLOSING, LAST_ACK STATES:
1603 * Drop the segment - see Stevens, vol. 2, p. 964 and
1606 if (thflags
& TH_RST
) {
1607 if (SEQ_GEQ(th
->th_seq
, tp
->last_ack_sent
) &&
1608 SEQ_LEQ(th
->th_seq
, tp
->last_ack_sent
+ tp
->rcv_wnd
)) {
1609 switch (tp
->t_state
) {
1611 case TCPS_SYN_RECEIVED
:
1612 so
->so_error
= ECONNREFUSED
;
1615 case TCPS_ESTABLISHED
:
1616 case TCPS_FIN_WAIT_1
:
1617 case TCPS_FIN_WAIT_2
:
1618 case TCPS_CLOSE_WAIT
:
1619 so
->so_error
= ECONNRESET
;
1621 tp
->t_state
= TCPS_CLOSED
;
1622 tcpstat
.tcps_drops
++;
1631 case TCPS_TIME_WAIT
:
1639 * RFC 1323 PAWS: If we have a timestamp reply on this segment
1640 * and it's less than ts_recent, drop it.
1642 if ((to
.to_flags
& TOF_TS
) && tp
->ts_recent
!= 0 &&
1643 TSTMP_LT(to
.to_tsval
, tp
->ts_recent
)) {
1645 /* Check to see if ts_recent is over 24 days old. */
1646 if ((int)(ticks
- tp
->ts_recent_age
) > TCP_PAWS_IDLE
) {
1648 * Invalidate ts_recent. If this segment updates
1649 * ts_recent, the age will be reset later and ts_recent
1650 * will get a valid value. If it does not, setting
1651 * ts_recent to zero will at least satisfy the
1652 * requirement that zero be placed in the timestamp
1653 * echo reply when ts_recent isn't valid. The
1654 * age isn't reset until we get a valid ts_recent
1655 * because we don't want out-of-order segments to be
1656 * dropped when ts_recent is old.
1660 tcpstat
.tcps_rcvduppack
++;
1661 tcpstat
.tcps_rcvdupbyte
+= tlen
;
1662 tcpstat
.tcps_pawsdrop
++;
1670 * In the SYN-RECEIVED state, validate that the packet belongs to
1671 * this connection before trimming the data to fit the receive
1672 * window. Check the sequence number versus IRS since we know
1673 * the sequence numbers haven't wrapped. This is a partial fix
1674 * for the "LAND" DoS attack.
1676 if (tp
->t_state
== TCPS_SYN_RECEIVED
&& SEQ_LT(th
->th_seq
, tp
->irs
)) {
1677 rstreason
= BANDLIM_RST_OPENPORT
;
1681 todrop
= tp
->rcv_nxt
- th
->th_seq
;
1683 if (TCP_DO_SACK(tp
)) {
1684 /* Report duplicate segment at head of packet. */
1685 tp
->reportblk
.rblk_start
= th
->th_seq
;
1686 tp
->reportblk
.rblk_end
= th
->th_seq
+ tlen
;
1687 if (thflags
& TH_FIN
)
1688 ++tp
->reportblk
.rblk_end
;
1689 if (SEQ_GT(tp
->reportblk
.rblk_end
, tp
->rcv_nxt
))
1690 tp
->reportblk
.rblk_end
= tp
->rcv_nxt
;
1691 tp
->t_flags
|= (TF_DUPSEG
| TF_SACKLEFT
| TF_ACKNOW
);
1693 if (thflags
& TH_SYN
) {
1703 * Following if statement from Stevens, vol. 2, p. 960.
1705 if (todrop
> tlen
||
1706 (todrop
== tlen
&& !(thflags
& TH_FIN
))) {
1708 * Any valid FIN must be to the left of the window.
1709 * At this point the FIN must be a duplicate or out
1710 * of sequence; drop it.
1715 * Send an ACK to resynchronize and drop any data.
1716 * But keep on processing for RST or ACK.
1718 tp
->t_flags
|= TF_ACKNOW
;
1720 tcpstat
.tcps_rcvduppack
++;
1721 tcpstat
.tcps_rcvdupbyte
+= todrop
;
1723 tcpstat
.tcps_rcvpartduppack
++;
1724 tcpstat
.tcps_rcvpartdupbyte
+= todrop
;
1726 drop_hdrlen
+= todrop
; /* drop from the top afterwards */
1727 th
->th_seq
+= todrop
;
1729 if (th
->th_urp
> todrop
)
1730 th
->th_urp
-= todrop
;
1738 * If new data are received on a connection after the
1739 * user processes are gone, then RST the other end.
1741 if ((so
->so_state
& SS_NOFDREF
) &&
1742 tp
->t_state
> TCPS_CLOSE_WAIT
&& tlen
) {
1744 tcpstat
.tcps_rcvafterclose
++;
1745 rstreason
= BANDLIM_UNLIMITED
;
1750 * If segment ends after window, drop trailing data
1751 * (and PUSH and FIN); if nothing left, just ACK.
1753 todrop
= (th
->th_seq
+ tlen
) - (tp
->rcv_nxt
+ tp
->rcv_wnd
);
1755 tcpstat
.tcps_rcvpackafterwin
++;
1756 if (todrop
>= tlen
) {
1757 tcpstat
.tcps_rcvbyteafterwin
+= tlen
;
1759 * If a new connection request is received
1760 * while in TIME_WAIT, drop the old connection
1761 * and start over if the sequence numbers
1762 * are above the previous ones.
1764 if (thflags
& TH_SYN
&&
1765 tp
->t_state
== TCPS_TIME_WAIT
&&
1766 SEQ_GT(th
->th_seq
, tp
->rcv_nxt
)) {
1771 * If window is closed can only take segments at
1772 * window edge, and have to drop data and PUSH from
1773 * incoming segments. Continue processing, but
1774 * remember to ack. Otherwise, drop segment
1777 if (tp
->rcv_wnd
== 0 && th
->th_seq
== tp
->rcv_nxt
) {
1778 tp
->t_flags
|= TF_ACKNOW
;
1779 tcpstat
.tcps_rcvwinprobe
++;
1783 tcpstat
.tcps_rcvbyteafterwin
+= todrop
;
1786 thflags
&= ~(TH_PUSH
| TH_FIN
);
1790 * If last ACK falls within this segment's sequence numbers,
1791 * record its timestamp.
1793 * 1) That the test incorporates suggestions from the latest
1794 * proposal of the tcplw@cray.com list (Braden 1993/04/26).
1795 * 2) That updating only on newer timestamps interferes with
1796 * our earlier PAWS tests, so this check should be solely
1797 * predicated on the sequence space of this segment.
1798 * 3) That we modify the segment boundary check to be
1799 * Last.ACK.Sent <= SEG.SEQ + SEG.LEN
1800 * instead of RFC1323's
1801 * Last.ACK.Sent < SEG.SEQ + SEG.LEN,
1802 * This modified check allows us to overcome RFC1323's
1803 * limitations as described in Stevens TCP/IP Illustrated
1804 * Vol. 2 p.869. In such cases, we can still calculate the
1805 * RTT correctly when RCV.NXT == Last.ACK.Sent.
1807 if ((to
.to_flags
& TOF_TS
) && SEQ_LEQ(th
->th_seq
, tp
->last_ack_sent
) &&
1808 SEQ_LEQ(tp
->last_ack_sent
, (th
->th_seq
+ tlen
1809 + ((thflags
& TH_SYN
) != 0)
1810 + ((thflags
& TH_FIN
) != 0)))) {
1811 tp
->ts_recent_age
= ticks
;
1812 tp
->ts_recent
= to
.to_tsval
;
1816 * If a SYN is in the window, then this is an
1817 * error and we send an RST and drop the connection.
1819 if (thflags
& TH_SYN
) {
1820 tp
= tcp_drop(tp
, ECONNRESET
);
1821 rstreason
= BANDLIM_UNLIMITED
;
1826 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN
1827 * flag is on (half-synchronized state), then queue data for
1828 * later processing; else drop segment and return.
1830 if (!(thflags
& TH_ACK
)) {
1831 if (tp
->t_state
== TCPS_SYN_RECEIVED
||
1832 (tp
->t_flags
& TF_NEEDSYN
))
1841 switch (tp
->t_state
) {
1843 * In SYN_RECEIVED state, the ACK acknowledges our SYN, so enter
1844 * ESTABLISHED state and continue processing.
1845 * The ACK was checked above.
1847 case TCPS_SYN_RECEIVED
:
1849 tcpstat
.tcps_connects
++;
1851 /* Do window scaling? */
1852 if ((tp
->t_flags
& (TF_RCVD_SCALE
| TF_REQ_SCALE
)) ==
1853 (TF_RCVD_SCALE
| TF_REQ_SCALE
)) {
1854 tp
->snd_scale
= tp
->requested_s_scale
;
1855 tp
->rcv_scale
= tp
->request_r_scale
;
1859 * SYN-RECEIVED -> ESTABLISHED
1860 * SYN-RECEIVED* -> FIN-WAIT-1
1862 tp
->t_starttime
= ticks
;
1863 if (tp
->t_flags
& TF_NEEDFIN
) {
1864 tp
->t_state
= TCPS_FIN_WAIT_1
;
1865 tp
->t_flags
&= ~TF_NEEDFIN
;
1867 tp
->t_state
= TCPS_ESTABLISHED
;
1868 tcp_callout_reset(tp
, tp
->tt_keep
, tcp_keepidle
,
1872 * If segment contains data or ACK, will call tcp_reass()
1873 * later; if not, do so now to pass queued data to user.
1875 if (tlen
== 0 && !(thflags
& TH_FIN
))
1876 tcp_reass(tp
, NULL
, NULL
, NULL
);
1880 * In ESTABLISHED state: drop duplicate ACKs; ACK out of range
1881 * ACKs. If the ack is in the range
1882 * tp->snd_una < th->th_ack <= tp->snd_max
1883 * then advance tp->snd_una to th->th_ack and drop
1884 * data from the retransmission queue. If this ACK reflects
1885 * more up to date window information we update our window information.
1887 case TCPS_ESTABLISHED
:
1888 case TCPS_FIN_WAIT_1
:
1889 case TCPS_FIN_WAIT_2
:
1890 case TCPS_CLOSE_WAIT
:
1893 case TCPS_TIME_WAIT
:
1895 if (SEQ_LEQ(th
->th_ack
, tp
->snd_una
)) {
1896 if (TCP_DO_SACK(tp
))
1897 tcp_sack_update_scoreboard(tp
, &to
);
1898 if (tlen
!= 0 || tiwin
!= tp
->snd_wnd
) {
1902 tcpstat
.tcps_rcvdupack
++;
1903 if (!tcp_callout_active(tp
, tp
->tt_rexmt
) ||
1904 th
->th_ack
!= tp
->snd_una
) {
1909 * We have outstanding data (other than
1910 * a window probe), this is a completely
1911 * duplicate ack (ie, window info didn't
1912 * change), the ack is the biggest we've
1913 * seen and we've seen exactly our rexmt
1914 * threshhold of them, so assume a packet
1915 * has been dropped and retransmit it.
1916 * Kludge snd_nxt & the congestion
1917 * window so we send only this one
1920 if (IN_FASTRECOVERY(tp
)) {
1921 if (TCP_DO_SACK(tp
)) {
1922 /* No artifical cwnd inflation. */
1923 tcp_sack_rexmt(tp
, th
);
1926 * Dup acks mean that packets
1927 * have left the network
1928 * (they're now cached at the
1929 * receiver) so bump cwnd by
1930 * the amount in the receiver
1931 * to keep a constant cwnd
1932 * packets in the network.
1934 tp
->snd_cwnd
+= tp
->t_maxseg
;
1937 } else if (SEQ_LT(th
->th_ack
, tp
->snd_recover
)) {
1940 } else if (++tp
->t_dupacks
== tcprexmtthresh
) {
1941 tcp_seq old_snd_nxt
;
1945 if (tcp_do_eifel_detect
&&
1946 (tp
->t_flags
& TF_RCVD_TSTMP
)) {
1947 tcp_save_congestion_state(tp
);
1948 tp
->t_flags
|= TF_FASTREXMT
;
1951 * We know we're losing at the current
1952 * window size, so do congestion avoidance:
1953 * set ssthresh to half the current window
1954 * and pull our congestion window back to the
1957 win
= min(tp
->snd_wnd
, tp
->snd_cwnd
) / 2 /
1961 tp
->snd_ssthresh
= win
* tp
->t_maxseg
;
1962 ENTER_FASTRECOVERY(tp
);
1963 tp
->snd_recover
= tp
->snd_max
;
1964 tcp_callout_stop(tp
, tp
->tt_rexmt
);
1966 old_snd_nxt
= tp
->snd_nxt
;
1967 tp
->snd_nxt
= th
->th_ack
;
1968 tp
->snd_cwnd
= tp
->t_maxseg
;
1970 ++tcpstat
.tcps_sndfastrexmit
;
1971 tp
->snd_cwnd
= tp
->snd_ssthresh
;
1972 tp
->rexmt_high
= tp
->snd_nxt
;
1973 if (SEQ_GT(old_snd_nxt
, tp
->snd_nxt
))
1974 tp
->snd_nxt
= old_snd_nxt
;
1975 KASSERT(tp
->snd_limited
<= 2,
1976 ("tp->snd_limited too big"));
1977 if (TCP_DO_SACK(tp
))
1978 tcp_sack_rexmt(tp
, th
);
1980 tp
->snd_cwnd
+= tp
->t_maxseg
*
1981 (tp
->t_dupacks
- tp
->snd_limited
);
1982 } else if (tcp_do_limitedtransmit
) {
1983 u_long oldcwnd
= tp
->snd_cwnd
;
1984 tcp_seq oldsndmax
= tp
->snd_max
;
1985 tcp_seq oldsndnxt
= tp
->snd_nxt
;
1986 /* outstanding data */
1987 uint32_t ownd
= tp
->snd_max
- tp
->snd_una
;
1990 #define iceildiv(n, d) (((n)+(d)-1) / (d))
1992 KASSERT(tp
->t_dupacks
== 1 ||
1994 ("dupacks not 1 or 2"));
1995 if (tp
->t_dupacks
== 1)
1996 tp
->snd_limited
= 0;
1997 tp
->snd_nxt
= tp
->snd_max
;
1998 tp
->snd_cwnd
= ownd
+
1999 (tp
->t_dupacks
- tp
->snd_limited
) *
2004 * Other acks may have been processed,
2005 * snd_nxt cannot be reset to a value less
2008 if (SEQ_LT(oldsndnxt
, oldsndmax
)) {
2009 if (SEQ_GT(oldsndnxt
, tp
->snd_una
))
2010 tp
->snd_nxt
= oldsndnxt
;
2012 tp
->snd_nxt
= tp
->snd_una
;
2014 tp
->snd_cwnd
= oldcwnd
;
2015 sent
= tp
->snd_max
- oldsndmax
;
2016 if (sent
> tp
->t_maxseg
) {
2017 KASSERT((tp
->t_dupacks
== 2 &&
2018 tp
->snd_limited
== 0) ||
2019 (sent
== tp
->t_maxseg
+ 1 &&
2020 tp
->t_flags
& TF_SENTFIN
),
2022 KASSERT(sent
<= tp
->t_maxseg
* 2,
2023 ("sent too many segments"));
2024 tp
->snd_limited
= 2;
2025 tcpstat
.tcps_sndlimited
+= 2;
2026 } else if (sent
> 0) {
2028 ++tcpstat
.tcps_sndlimited
;
2029 } else if (tcp_do_early_retransmit
&&
2030 (tcp_do_eifel_detect
&&
2031 (tp
->t_flags
& TF_RCVD_TSTMP
)) &&
2032 ownd
< 4 * tp
->t_maxseg
&&
2033 tp
->t_dupacks
+ 1 >=
2034 iceildiv(ownd
, tp
->t_maxseg
) &&
2035 (!TCP_DO_SACK(tp
) ||
2036 ownd
<= tp
->t_maxseg
||
2037 tcp_sack_has_sacked(&tp
->scb
,
2038 ownd
- tp
->t_maxseg
))) {
2039 ++tcpstat
.tcps_sndearlyrexmit
;
2040 tp
->t_flags
|= TF_EARLYREXMT
;
2041 goto fastretransmit
;
2047 KASSERT(SEQ_GT(th
->th_ack
, tp
->snd_una
), ("th_ack <= snd_una"));
2049 if (SEQ_GT(th
->th_ack
, tp
->snd_max
)) {
2051 * Detected optimistic ACK attack.
2052 * Force slow-start to de-synchronize attack.
2054 tp
->snd_cwnd
= tp
->t_maxseg
;
2057 tcpstat
.tcps_rcvacktoomuch
++;
2061 * If we reach this point, ACK is not a duplicate,
2062 * i.e., it ACKs something we sent.
2064 if (tp
->t_flags
& TF_NEEDSYN
) {
2066 * T/TCP: Connection was half-synchronized, and our
2067 * SYN has been ACK'd (so connection is now fully
2068 * synchronized). Go to non-starred state,
2069 * increment snd_una for ACK of SYN, and check if
2070 * we can do window scaling.
2072 tp
->t_flags
&= ~TF_NEEDSYN
;
2074 /* Do window scaling? */
2075 if ((tp
->t_flags
& (TF_RCVD_SCALE
| TF_REQ_SCALE
)) ==
2076 (TF_RCVD_SCALE
| TF_REQ_SCALE
)) {
2077 tp
->snd_scale
= tp
->requested_s_scale
;
2078 tp
->rcv_scale
= tp
->request_r_scale
;
2083 acked
= th
->th_ack
- tp
->snd_una
;
2084 tcpstat
.tcps_rcvackpack
++;
2085 tcpstat
.tcps_rcvackbyte
+= acked
;
2087 if (tcp_do_eifel_detect
&& acked
> 0 &&
2088 (to
.to_flags
& TOF_TS
) && (to
.to_tsecr
!= 0) &&
2089 (tp
->t_flags
& TF_FIRSTACCACK
)) {
2090 /* Eifel detection applicable. */
2091 if (to
.to_tsecr
< tp
->t_rexmtTS
) {
2092 ++tcpstat
.tcps_eifeldetected
;
2093 tcp_revert_congestion_state(tp
);
2094 if (tp
->t_rxtshift
== 1 &&
2095 ticks
>= tp
->t_badrxtwin
)
2096 ++tcpstat
.tcps_rttcantdetect
;
2098 } else if (tp
->t_rxtshift
== 1 && ticks
< tp
->t_badrxtwin
) {
2100 * If we just performed our first retransmit,
2101 * and the ACK arrives within our recovery window,
2102 * then it was a mistake to do the retransmit
2103 * in the first place. Recover our original cwnd
2104 * and ssthresh, and proceed to transmit where we
2107 tcp_revert_congestion_state(tp
);
2108 ++tcpstat
.tcps_rttdetected
;
2112 * If we have a timestamp reply, update smoothed
2113 * round trip time. If no timestamp is present but
2114 * transmit timer is running and timed sequence
2115 * number was acked, update smoothed round trip time.
2116 * Since we now have an rtt measurement, cancel the
2117 * timer backoff (cf., Phil Karn's retransmit alg.).
2118 * Recompute the initial retransmit timer.
2120 * Some machines (certain windows boxes) send broken
2121 * timestamp replies during the SYN+ACK phase, ignore
2124 if ((to
.to_flags
& TOF_TS
) && (to
.to_tsecr
!= 0))
2125 tcp_xmit_timer(tp
, ticks
- to
.to_tsecr
+ 1);
2126 else if (tp
->t_rtttime
&& SEQ_GT(th
->th_ack
, tp
->t_rtseq
))
2127 tcp_xmit_timer(tp
, ticks
- tp
->t_rtttime
);
2128 tcp_xmit_bandwidth_limit(tp
, th
->th_ack
);
2131 * If no data (only SYN) was ACK'd,
2132 * skip rest of ACK processing.
2137 /* Stop looking for an acceptable ACK since one was received. */
2138 tp
->t_flags
&= ~(TF_FIRSTACCACK
| TF_FASTREXMT
| TF_EARLYREXMT
);
2140 if (acked
> so
->so_snd
.ssb_cc
) {
2141 tp
->snd_wnd
-= so
->so_snd
.ssb_cc
;
2142 sbdrop(&so
->so_snd
.sb
, (int)so
->so_snd
.ssb_cc
);
2143 ourfinisacked
= TRUE
;
2145 sbdrop(&so
->so_snd
.sb
, acked
);
2146 tp
->snd_wnd
-= acked
;
2147 ourfinisacked
= FALSE
;
2152 * Update window information.
2153 * Don't look at window if no ACK:
2154 * TAC's send garbage on first SYN.
2156 if (SEQ_LT(tp
->snd_wl1
, th
->th_seq
) ||
2157 (tp
->snd_wl1
== th
->th_seq
&&
2158 (SEQ_LT(tp
->snd_wl2
, th
->th_ack
) ||
2159 (tp
->snd_wl2
== th
->th_ack
&& tiwin
> tp
->snd_wnd
)))) {
2160 /* keep track of pure window updates */
2161 if (tlen
== 0 && tp
->snd_wl2
== th
->th_ack
&&
2162 tiwin
> tp
->snd_wnd
)
2163 tcpstat
.tcps_rcvwinupd
++;
2164 tp
->snd_wnd
= tiwin
;
2165 tp
->snd_wl1
= th
->th_seq
;
2166 tp
->snd_wl2
= th
->th_ack
;
2167 if (tp
->snd_wnd
> tp
->max_sndwnd
)
2168 tp
->max_sndwnd
= tp
->snd_wnd
;
2172 tp
->snd_una
= th
->th_ack
;
2173 if (TCP_DO_SACK(tp
))
2174 tcp_sack_update_scoreboard(tp
, &to
);
2175 if (IN_FASTRECOVERY(tp
)) {
2176 if (SEQ_GEQ(th
->th_ack
, tp
->snd_recover
)) {
2177 EXIT_FASTRECOVERY(tp
);
2180 * If the congestion window was inflated
2181 * to account for the other side's
2182 * cached packets, retract it.
2184 if (!TCP_DO_SACK(tp
))
2185 tp
->snd_cwnd
= tp
->snd_ssthresh
;
2188 * Window inflation should have left us
2189 * with approximately snd_ssthresh outstanding
2190 * data. But, in case we would be inclined
2191 * to send a burst, better do it using
2194 if (SEQ_GT(th
->th_ack
+ tp
->snd_cwnd
,
2195 tp
->snd_max
+ 2 * tp
->t_maxseg
))
2197 (tp
->snd_max
- tp
->snd_una
) +
2202 if (TCP_DO_SACK(tp
)) {
2203 tp
->snd_max_rexmt
= tp
->snd_max
;
2204 tcp_sack_rexmt(tp
, th
);
2206 tcp_newreno_partial_ack(tp
, th
, acked
);
2212 * Open the congestion window. When in slow-start,
2213 * open exponentially: maxseg per packet. Otherwise,
2214 * open linearly: maxseg per window.
2216 if (tp
->snd_cwnd
<= tp
->snd_ssthresh
) {
2218 (SEQ_LT(tp
->snd_nxt
, tp
->snd_max
) ?
2219 tp
->t_maxseg
: 2 * tp
->t_maxseg
);
2222 tp
->snd_cwnd
+= tcp_do_abc
?
2223 min(acked
, abc_sslimit
) : tp
->t_maxseg
;
2225 /* linear increase */
2226 tp
->snd_wacked
+= tcp_do_abc
? acked
:
2228 if (tp
->snd_wacked
>= tp
->snd_cwnd
) {
2229 tp
->snd_wacked
-= tp
->snd_cwnd
;
2230 tp
->snd_cwnd
+= tp
->t_maxseg
;
2233 tp
->snd_cwnd
= min(tp
->snd_cwnd
,
2234 TCP_MAXWIN
<< tp
->snd_scale
);
2235 tp
->snd_recover
= th
->th_ack
- 1;
2237 if (SEQ_LT(tp
->snd_nxt
, tp
->snd_una
))
2238 tp
->snd_nxt
= tp
->snd_una
;
2241 * If all outstanding data is acked, stop retransmit
2242 * timer and remember to restart (more output or persist).
2243 * If there is more data to be acked, restart retransmit
2244 * timer, using current (possibly backed-off) value.
2246 if (th
->th_ack
== tp
->snd_max
) {
2247 tcp_callout_stop(tp
, tp
->tt_rexmt
);
2249 } else if (!tcp_callout_active(tp
, tp
->tt_persist
)) {
2250 tcp_callout_reset(tp
, tp
->tt_rexmt
, tp
->t_rxtcur
,
2254 switch (tp
->t_state
) {
2256 * In FIN_WAIT_1 STATE in addition to the processing
2257 * for the ESTABLISHED state if our FIN is now acknowledged
2258 * then enter FIN_WAIT_2.
2260 case TCPS_FIN_WAIT_1
:
2261 if (ourfinisacked
) {
2263 * If we can't receive any more
2264 * data, then closing user can proceed.
2265 * Starting the timer is contrary to the
2266 * specification, but if we don't get a FIN
2267 * we'll hang forever.
2269 if (so
->so_state
& SS_CANTRCVMORE
) {
2270 soisdisconnected(so
);
2271 tcp_callout_reset(tp
, tp
->tt_2msl
,
2272 tcp_maxidle
, tcp_timer_2msl
);
2274 tp
->t_state
= TCPS_FIN_WAIT_2
;
2279 * In CLOSING STATE in addition to the processing for
2280 * the ESTABLISHED state if the ACK acknowledges our FIN
2281 * then enter the TIME-WAIT state, otherwise ignore
2285 if (ourfinisacked
) {
2286 tp
->t_state
= TCPS_TIME_WAIT
;
2287 tcp_canceltimers(tp
);
2288 tcp_callout_reset(tp
, tp
->tt_2msl
,
2289 2 * tcp_msl
, tcp_timer_2msl
);
2290 soisdisconnected(so
);
2295 * In LAST_ACK, we may still be waiting for data to drain
2296 * and/or to be acked, as well as for the ack of our FIN.
2297 * If our FIN is now acknowledged, delete the TCB,
2298 * enter the closed state and return.
2301 if (ourfinisacked
) {
2308 * In TIME_WAIT state the only thing that should arrive
2309 * is a retransmission of the remote FIN. Acknowledge
2310 * it and restart the finack timer.
2312 case TCPS_TIME_WAIT
:
2313 tcp_callout_reset(tp
, tp
->tt_2msl
, 2 * tcp_msl
,
2321 * Update window information.
2322 * Don't look at window if no ACK: TAC's send garbage on first SYN.
2324 if ((thflags
& TH_ACK
) &&
2325 acceptable_window_update(tp
, th
, tiwin
)) {
2326 /* keep track of pure window updates */
2327 if (tlen
== 0 && tp
->snd_wl2
== th
->th_ack
&&
2328 tiwin
> tp
->snd_wnd
)
2329 tcpstat
.tcps_rcvwinupd
++;
2330 tp
->snd_wnd
= tiwin
;
2331 tp
->snd_wl1
= th
->th_seq
;
2332 tp
->snd_wl2
= th
->th_ack
;
2333 if (tp
->snd_wnd
> tp
->max_sndwnd
)
2334 tp
->max_sndwnd
= tp
->snd_wnd
;
2339 * Process segments with URG.
2341 if ((thflags
& TH_URG
) && th
->th_urp
&&
2342 !TCPS_HAVERCVDFIN(tp
->t_state
)) {
2344 * This is a kludge, but if we receive and accept
2345 * random urgent pointers, we'll crash in
2346 * soreceive. It's hard to imagine someone
2347 * actually wanting to send this much urgent data.
2349 if (th
->th_urp
+ so
->so_rcv
.ssb_cc
> sb_max
) {
2350 th
->th_urp
= 0; /* XXX */
2351 thflags
&= ~TH_URG
; /* XXX */
2352 goto dodata
; /* XXX */
2355 * If this segment advances the known urgent pointer,
2356 * then mark the data stream. This should not happen
2357 * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since
2358 * a FIN has been received from the remote side.
2359 * In these states we ignore the URG.
2361 * According to RFC961 (Assigned Protocols),
2362 * the urgent pointer points to the last octet
2363 * of urgent data. We continue, however,
2364 * to consider it to indicate the first octet
2365 * of data past the urgent section as the original
2366 * spec states (in one of two places).
2368 if (SEQ_GT(th
->th_seq
+ th
->th_urp
, tp
->rcv_up
)) {
2369 tp
->rcv_up
= th
->th_seq
+ th
->th_urp
;
2370 so
->so_oobmark
= so
->so_rcv
.ssb_cc
+
2371 (tp
->rcv_up
- tp
->rcv_nxt
) - 1;
2372 if (so
->so_oobmark
== 0)
2373 so
->so_state
|= SS_RCVATMARK
;
2375 tp
->t_oobflags
&= ~(TCPOOB_HAVEDATA
| TCPOOB_HADDATA
);
2378 * Remove out of band data so doesn't get presented to user.
2379 * This can happen independent of advancing the URG pointer,
2380 * but if two URG's are pending at once, some out-of-band
2381 * data may creep in... ick.
2383 if (th
->th_urp
<= (u_long
)tlen
&&
2384 !(so
->so_options
& SO_OOBINLINE
)) {
2385 /* hdr drop is delayed */
2386 tcp_pulloutofband(so
, th
, m
, drop_hdrlen
);
2390 * If no out of band data is expected,
2391 * pull receive urgent pointer along
2392 * with the receive window.
2394 if (SEQ_GT(tp
->rcv_nxt
, tp
->rcv_up
))
2395 tp
->rcv_up
= tp
->rcv_nxt
;
2400 * Process the segment text, merging it into the TCP sequencing queue,
2401 * and arranging for acknowledgment of receipt if necessary.
2402 * This process logically involves adjusting tp->rcv_wnd as data
2403 * is presented to the user (this happens in tcp_usrreq.c,
2404 * case PRU_RCVD). If a FIN has already been received on this
2405 * connection then we just ignore the text.
2407 if ((tlen
|| (thflags
& TH_FIN
)) && !TCPS_HAVERCVDFIN(tp
->t_state
)) {
2408 m_adj(m
, drop_hdrlen
); /* delayed header drop */
2410 * Insert segment which includes th into TCP reassembly queue
2411 * with control block tp. Set thflags to whether reassembly now
2412 * includes a segment with FIN. This handles the common case
2413 * inline (segment is the next to be received on an established
2414 * connection, and the queue is empty), avoiding linkage into
2415 * and removal from the queue and repetition of various
2417 * Set DELACK for segments received in order, but ack
2418 * immediately when segments are out of order (so
2419 * fast retransmit can work).
2421 if (th
->th_seq
== tp
->rcv_nxt
&&
2422 LIST_EMPTY(&tp
->t_segq
) &&
2423 TCPS_HAVEESTABLISHED(tp
->t_state
)) {
2424 if (DELAY_ACK(tp
)) {
2425 tcp_callout_reset(tp
, tp
->tt_delack
,
2426 tcp_delacktime
, tcp_timer_delack
);
2428 tp
->t_flags
|= TF_ACKNOW
;
2430 tp
->rcv_nxt
+= tlen
;
2431 thflags
= th
->th_flags
& TH_FIN
;
2432 tcpstat
.tcps_rcvpack
++;
2433 tcpstat
.tcps_rcvbyte
+= tlen
;
2435 if (so
->so_state
& SS_CANTRCVMORE
)
2438 ssb_appendstream(&so
->so_rcv
, m
);
2441 if (!(tp
->t_flags
& TF_DUPSEG
)) {
2442 /* Initialize SACK report block. */
2443 tp
->reportblk
.rblk_start
= th
->th_seq
;
2444 tp
->reportblk
.rblk_end
= th
->th_seq
+ tlen
+
2445 ((thflags
& TH_FIN
) != 0);
2447 thflags
= tcp_reass(tp
, th
, &tlen
, m
);
2448 tp
->t_flags
|= TF_ACKNOW
;
2452 * Note the amount of data that peer has sent into
2453 * our window, in order to estimate the sender's
2456 len
= so
->so_rcv
.ssb_hiwat
- (tp
->rcv_adv
- tp
->rcv_nxt
);
2463 * If FIN is received ACK the FIN and let the user know
2464 * that the connection is closing.
2466 if (thflags
& TH_FIN
) {
2467 if (!TCPS_HAVERCVDFIN(tp
->t_state
)) {
2470 * If connection is half-synchronized
2471 * (ie NEEDSYN flag on) then delay ACK,
2472 * so it may be piggybacked when SYN is sent.
2473 * Otherwise, since we received a FIN then no
2474 * more input can be expected, send ACK now.
2476 if (DELAY_ACK(tp
) && (tp
->t_flags
& TF_NEEDSYN
)) {
2477 tcp_callout_reset(tp
, tp
->tt_delack
,
2478 tcp_delacktime
, tcp_timer_delack
);
2480 tp
->t_flags
|= TF_ACKNOW
;
2485 switch (tp
->t_state
) {
2487 * In SYN_RECEIVED and ESTABLISHED STATES
2488 * enter the CLOSE_WAIT state.
2490 case TCPS_SYN_RECEIVED
:
2491 tp
->t_starttime
= ticks
;
2493 case TCPS_ESTABLISHED
:
2494 tp
->t_state
= TCPS_CLOSE_WAIT
;
2498 * If still in FIN_WAIT_1 STATE FIN has not been acked so
2499 * enter the CLOSING state.
2501 case TCPS_FIN_WAIT_1
:
2502 tp
->t_state
= TCPS_CLOSING
;
2506 * In FIN_WAIT_2 state enter the TIME_WAIT state,
2507 * starting the time-wait timer, turning off the other
2510 case TCPS_FIN_WAIT_2
:
2511 tp
->t_state
= TCPS_TIME_WAIT
;
2512 tcp_canceltimers(tp
);
2513 tcp_callout_reset(tp
, tp
->tt_2msl
, 2 * tcp_msl
,
2515 soisdisconnected(so
);
2519 * In TIME_WAIT state restart the 2 MSL time_wait timer.
2521 case TCPS_TIME_WAIT
:
2522 tcp_callout_reset(tp
, tp
->tt_2msl
, 2 * tcp_msl
,
2529 if (so
->so_options
& SO_DEBUG
)
2530 tcp_trace(TA_INPUT
, ostate
, tp
, tcp_saveipgen
, &tcp_savetcp
, 0);
2534 * Return any desired output.
2536 if (needoutput
|| (tp
->t_flags
& TF_ACKNOW
))
2542 * Generate an ACK dropping incoming segment if it occupies
2543 * sequence space, where the ACK reflects our state.
2545 * We can now skip the test for the RST flag since all
2546 * paths to this code happen after packets containing
2547 * RST have been dropped.
2549 * In the SYN-RECEIVED state, don't send an ACK unless the
2550 * segment we received passes the SYN-RECEIVED ACK test.
2551 * If it fails send a RST. This breaks the loop in the
2552 * "LAND" DoS attack, and also prevents an ACK storm
2553 * between two listening ports that have been sent forged
2554 * SYN segments, each with the source address of the other.
2556 if (tp
->t_state
== TCPS_SYN_RECEIVED
&& (thflags
& TH_ACK
) &&
2557 (SEQ_GT(tp
->snd_una
, th
->th_ack
) ||
2558 SEQ_GT(th
->th_ack
, tp
->snd_max
)) ) {
2559 rstreason
= BANDLIM_RST_OPENPORT
;
2563 if (so
->so_options
& SO_DEBUG
)
2564 tcp_trace(TA_DROP
, ostate
, tp
, tcp_saveipgen
, &tcp_savetcp
, 0);
2567 tp
->t_flags
|= TF_ACKNOW
;
2573 * Generate a RST, dropping incoming segment.
2574 * Make ACK acceptable to originator of segment.
2575 * Don't bother to respond if destination was broadcast/multicast.
2577 if ((thflags
& TH_RST
) || m
->m_flags
& (M_BCAST
| M_MCAST
))
2580 if (IN6_IS_ADDR_MULTICAST(&ip6
->ip6_dst
) ||
2581 IN6_IS_ADDR_MULTICAST(&ip6
->ip6_src
))
2584 if (IN_MULTICAST(ntohl(ip
->ip_dst
.s_addr
)) ||
2585 IN_MULTICAST(ntohl(ip
->ip_src
.s_addr
)) ||
2586 ip
->ip_src
.s_addr
== htonl(INADDR_BROADCAST
) ||
2587 in_broadcast(ip
->ip_dst
, m
->m_pkthdr
.rcvif
))
2590 /* IPv6 anycast check is done at tcp6_input() */
2593 * Perform bandwidth limiting.
2596 if (badport_bandlim(rstreason
) < 0)
2601 if (tp
== NULL
|| (tp
->t_inpcb
->inp_socket
->so_options
& SO_DEBUG
))
2602 tcp_trace(TA_DROP
, ostate
, tp
, tcp_saveipgen
, &tcp_savetcp
, 0);
2604 if (thflags
& TH_ACK
)
2605 /* mtod() below is safe as long as hdr dropping is delayed */
2606 tcp_respond(tp
, mtod(m
, void *), th
, m
, (tcp_seq
)0, th
->th_ack
,
2609 if (thflags
& TH_SYN
)
2611 /* mtod() below is safe as long as hdr dropping is delayed */
2612 tcp_respond(tp
, mtod(m
, void *), th
, m
, th
->th_seq
+ tlen
,
2613 (tcp_seq
)0, TH_RST
| TH_ACK
);
2619 * Drop space held by incoming segment and return.
2622 if (tp
== NULL
|| (tp
->t_inpcb
->inp_socket
->so_options
& SO_DEBUG
))
2623 tcp_trace(TA_DROP
, ostate
, tp
, tcp_saveipgen
, &tcp_savetcp
, 0);
2630 * Parse TCP options and place in tcpopt.
2633 tcp_dooptions(struct tcpopt
*to
, u_char
*cp
, int cnt
, boolean_t is_syn
)
2638 for (; cnt
> 0; cnt
-= optlen
, cp
+= optlen
) {
2640 if (opt
== TCPOPT_EOL
)
2642 if (opt
== TCPOPT_NOP
)
2648 if (optlen
< 2 || optlen
> cnt
)
2653 if (optlen
!= TCPOLEN_MAXSEG
)
2657 to
->to_flags
|= TOF_MSS
;
2658 bcopy(cp
+ 2, &to
->to_mss
, sizeof to
->to_mss
);
2659 to
->to_mss
= ntohs(to
->to_mss
);
2662 if (optlen
!= TCPOLEN_WINDOW
)
2666 to
->to_flags
|= TOF_SCALE
;
2667 to
->to_requested_s_scale
= min(cp
[2], TCP_MAX_WINSHIFT
);
2669 case TCPOPT_TIMESTAMP
:
2670 if (optlen
!= TCPOLEN_TIMESTAMP
)
2672 to
->to_flags
|= TOF_TS
;
2673 bcopy(cp
+ 2, &to
->to_tsval
, sizeof to
->to_tsval
);
2674 to
->to_tsval
= ntohl(to
->to_tsval
);
2675 bcopy(cp
+ 6, &to
->to_tsecr
, sizeof to
->to_tsecr
);
2676 to
->to_tsecr
= ntohl(to
->to_tsecr
);
2678 * If echoed timestamp is later than the current time,
2679 * fall back to non RFC1323 RTT calculation.
2681 if (to
->to_tsecr
!= 0 && TSTMP_GT(to
->to_tsecr
, ticks
))
2684 case TCPOPT_SACK_PERMITTED
:
2685 if (optlen
!= TCPOLEN_SACK_PERMITTED
)
2689 to
->to_flags
|= TOF_SACK_PERMITTED
;
2692 if ((optlen
- 2) & 0x07) /* not multiple of 8 */
2694 to
->to_nsackblocks
= (optlen
- 2) / 8;
2695 to
->to_sackblocks
= (struct raw_sackblock
*) (cp
+ 2);
2696 to
->to_flags
|= TOF_SACK
;
2697 for (i
= 0; i
< to
->to_nsackblocks
; i
++) {
2698 struct raw_sackblock
*r
= &to
->to_sackblocks
[i
];
2700 r
->rblk_start
= ntohl(r
->rblk_start
);
2701 r
->rblk_end
= ntohl(r
->rblk_end
);
2711 * Pull out of band byte out of a segment so
2712 * it doesn't appear in the user's data queue.
2713 * It is still reflected in the segment length for
2714 * sequencing purposes.
2715 * "off" is the delayed to be dropped hdrlen.
2718 tcp_pulloutofband(struct socket
*so
, struct tcphdr
*th
, struct mbuf
*m
, int off
)
2720 int cnt
= off
+ th
->th_urp
- 1;
2723 if (m
->m_len
> cnt
) {
2724 char *cp
= mtod(m
, caddr_t
) + cnt
;
2725 struct tcpcb
*tp
= sototcpcb(so
);
2728 tp
->t_oobflags
|= TCPOOB_HAVEDATA
;
2729 bcopy(cp
+ 1, cp
, m
->m_len
- cnt
- 1);
2731 if (m
->m_flags
& M_PKTHDR
)
2740 panic("tcp_pulloutofband");
2744 * Collect new round-trip time estimate
2745 * and update averages and current timeout.
2748 tcp_xmit_timer(struct tcpcb
*tp
, int rtt
)
2752 tcpstat
.tcps_rttupdated
++;
2754 if (tp
->t_srtt
!= 0) {
2756 * srtt is stored as fixed point with 5 bits after the
2757 * binary point (i.e., scaled by 8). The following magic
2758 * is equivalent to the smoothing algorithm in rfc793 with
2759 * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed
2760 * point). Adjust rtt to origin 0.
2762 delta
= ((rtt
- 1) << TCP_DELTA_SHIFT
)
2763 - (tp
->t_srtt
>> (TCP_RTT_SHIFT
- TCP_DELTA_SHIFT
));
2765 if ((tp
->t_srtt
+= delta
) <= 0)
2769 * We accumulate a smoothed rtt variance (actually, a
2770 * smoothed mean difference), then set the retransmit
2771 * timer to smoothed rtt + 4 times the smoothed variance.
2772 * rttvar is stored as fixed point with 4 bits after the
2773 * binary point (scaled by 16). The following is
2774 * equivalent to rfc793 smoothing with an alpha of .75
2775 * (rttvar = rttvar*3/4 + |delta| / 4). This replaces
2776 * rfc793's wired-in beta.
2780 delta
-= tp
->t_rttvar
>> (TCP_RTTVAR_SHIFT
- TCP_DELTA_SHIFT
);
2781 if ((tp
->t_rttvar
+= delta
) <= 0)
2783 if (tp
->t_rttbest
> tp
->t_srtt
+ tp
->t_rttvar
)
2784 tp
->t_rttbest
= tp
->t_srtt
+ tp
->t_rttvar
;
2787 * No rtt measurement yet - use the unsmoothed rtt.
2788 * Set the variance to half the rtt (so our first
2789 * retransmit happens at 3*rtt).
2791 tp
->t_srtt
= rtt
<< TCP_RTT_SHIFT
;
2792 tp
->t_rttvar
= rtt
<< (TCP_RTTVAR_SHIFT
- 1);
2793 tp
->t_rttbest
= tp
->t_srtt
+ tp
->t_rttvar
;
2799 * the retransmit should happen at rtt + 4 * rttvar.
2800 * Because of the way we do the smoothing, srtt and rttvar
2801 * will each average +1/2 tick of bias. When we compute
2802 * the retransmit timer, we want 1/2 tick of rounding and
2803 * 1 extra tick because of +-1/2 tick uncertainty in the
2804 * firing of the timer. The bias will give us exactly the
2805 * 1.5 tick we need. But, because the bias is
2806 * statistical, we have to test that we don't drop below
2807 * the minimum feasible timer (which is 2 ticks).
2809 TCPT_RANGESET(tp
->t_rxtcur
, TCP_REXMTVAL(tp
),
2810 max(tp
->t_rttmin
, rtt
+ 2), TCPTV_REXMTMAX
);
2813 * We received an ack for a packet that wasn't retransmitted;
2814 * it is probably safe to discard any error indications we've
2815 * received recently. This isn't quite right, but close enough
2816 * for now (a route might have failed after we sent a segment,
2817 * and the return path might not be symmetrical).
2819 tp
->t_softerror
= 0;
2823 * Determine a reasonable value for maxseg size.
2824 * If the route is known, check route for mtu.
2825 * If none, use an mss that can be handled on the outgoing
2826 * interface without forcing IP to fragment; if bigger than
2827 * an mbuf cluster (MCLBYTES), round down to nearest multiple of MCLBYTES
2828 * to utilize large mbufs. If no route is found, route has no mtu,
2829 * or the destination isn't local, use a default, hopefully conservative
2830 * size (usually 512 or the default IP max size, but no more than the mtu
2831 * of the interface), as we can't discover anything about intervening
2832 * gateways or networks. We also initialize the congestion/slow start
2833 * window to be a single segment if the destination isn't local.
2834 * While looking at the routing entry, we also initialize other path-dependent
2835 * parameters from pre-set or cached values in the routing entry.
2837 * Also take into account the space needed for options that we
2838 * send regularly. Make maxseg shorter by that amount to assure
2839 * that we can send maxseg amount of data even when the options
2840 * are present. Store the upper limit of the length of options plus
2843 * NOTE that this routine is only called when we process an incoming
2844 * segment, for outgoing segments only tcp_mssopt is called.
2847 tcp_mss(struct tcpcb
*tp
, int offer
)
2853 struct inpcb
*inp
= tp
->t_inpcb
;
2856 boolean_t isipv6
= ((inp
->inp_vflag
& INP_IPV6
) ? TRUE
: FALSE
);
2857 size_t min_protoh
= isipv6
?
2858 sizeof(struct ip6_hdr
) + sizeof(struct tcphdr
) :
2859 sizeof(struct tcpiphdr
);
2861 const boolean_t isipv6
= FALSE
;
2862 const size_t min_protoh
= sizeof(struct tcpiphdr
);
2866 rt
= tcp_rtlookup6(&inp
->inp_inc
);
2868 rt
= tcp_rtlookup(&inp
->inp_inc
);
2870 tp
->t_maxopd
= tp
->t_maxseg
=
2871 (isipv6
? tcp_v6mssdflt
: tcp_mssdflt
);
2875 so
= inp
->inp_socket
;
2878 * Offer == 0 means that there was no MSS on the SYN segment,
2879 * in this case we use either the interface mtu or tcp_mssdflt.
2881 * An offer which is too large will be cut down later.
2885 if (in6_localaddr(&inp
->in6p_faddr
)) {
2886 offer
= ND_IFINFO(rt
->rt_ifp
)->linkmtu
-
2889 offer
= tcp_v6mssdflt
;
2892 if (in_localaddr(inp
->inp_faddr
))
2893 offer
= ifp
->if_mtu
- min_protoh
;
2895 offer
= tcp_mssdflt
;
2900 * Prevent DoS attack with too small MSS. Round up
2901 * to at least minmss.
2903 * Sanity check: make sure that maxopd will be large
2904 * enough to allow some data on segments even is the
2905 * all the option space is used (40bytes). Otherwise
2906 * funny things may happen in tcp_output.
2908 offer
= max(offer
, tcp_minmss
);
2909 offer
= max(offer
, 64);
2911 rt
->rt_rmx
.rmx_mssopt
= offer
;
2914 * While we're here, check if there's an initial rtt
2915 * or rttvar. Convert from the route-table units
2916 * to scaled multiples of the slow timeout timer.
2918 if (tp
->t_srtt
== 0 && (rtt
= rt
->rt_rmx
.rmx_rtt
)) {
2920 * XXX the lock bit for RTT indicates that the value
2921 * is also a minimum value; this is subject to time.
2923 if (rt
->rt_rmx
.rmx_locks
& RTV_RTT
)
2924 tp
->t_rttmin
= rtt
/ (RTM_RTTUNIT
/ hz
);
2925 tp
->t_srtt
= rtt
/ (RTM_RTTUNIT
/ (hz
* TCP_RTT_SCALE
));
2926 tp
->t_rttbest
= tp
->t_srtt
+ TCP_RTT_SCALE
;
2927 tcpstat
.tcps_usedrtt
++;
2928 if (rt
->rt_rmx
.rmx_rttvar
) {
2929 tp
->t_rttvar
= rt
->rt_rmx
.rmx_rttvar
/
2930 (RTM_RTTUNIT
/ (hz
* TCP_RTTVAR_SCALE
));
2931 tcpstat
.tcps_usedrttvar
++;
2933 /* default variation is +- 1 rtt */
2935 tp
->t_srtt
* TCP_RTTVAR_SCALE
/ TCP_RTT_SCALE
;
2937 TCPT_RANGESET(tp
->t_rxtcur
,
2938 ((tp
->t_srtt
>> 2) + tp
->t_rttvar
) >> 1,
2939 tp
->t_rttmin
, TCPTV_REXMTMAX
);
2943 * if there's an mtu associated with the route, use it
2944 * else, use the link mtu. Take the smaller of mss or offer
2947 if (rt
->rt_rmx
.rmx_mtu
) {
2948 mss
= rt
->rt_rmx
.rmx_mtu
- min_protoh
;
2951 mss
= ND_IFINFO(rt
->rt_ifp
)->linkmtu
- min_protoh
;
2953 mss
= ifp
->if_mtu
- min_protoh
;
2955 mss
= min(mss
, offer
);
2958 * maxopd stores the maximum length of data AND options
2959 * in a segment; maxseg is the amount of data in a normal
2960 * segment. We need to store this value (maxopd) apart
2961 * from maxseg, because now every segment carries options
2962 * and thus we normally have somewhat less data in segments.
2966 if ((tp
->t_flags
& (TF_REQ_TSTMP
| TF_NOOPT
)) == TF_REQ_TSTMP
&&
2967 ((tp
->t_flags
& TF_RCVD_TSTMP
) == TF_RCVD_TSTMP
))
2968 mss
-= TCPOLEN_TSTAMP_APPA
;
2970 #if (MCLBYTES & (MCLBYTES - 1)) == 0
2972 mss
&= ~(MCLBYTES
-1);
2975 mss
= mss
/ MCLBYTES
* MCLBYTES
;
2978 * If there's a pipesize, change the socket buffer
2979 * to that size. Make the socket buffers an integral
2980 * number of mss units; if the mss is larger than
2981 * the socket buffer, decrease the mss.
2984 if ((bufsize
= rt
->rt_rmx
.rmx_sendpipe
) == 0)
2986 bufsize
= so
->so_snd
.ssb_hiwat
;
2990 bufsize
= roundup(bufsize
, mss
);
2991 if (bufsize
> sb_max
)
2993 if (bufsize
> so
->so_snd
.ssb_hiwat
)
2994 ssb_reserve(&so
->so_snd
, bufsize
, so
, NULL
);
2999 if ((bufsize
= rt
->rt_rmx
.rmx_recvpipe
) == 0)
3001 bufsize
= so
->so_rcv
.ssb_hiwat
;
3002 if (bufsize
> mss
) {
3003 bufsize
= roundup(bufsize
, mss
);
3004 if (bufsize
> sb_max
)
3006 if (bufsize
> so
->so_rcv
.ssb_hiwat
)
3007 ssb_reserve(&so
->so_rcv
, bufsize
, so
, NULL
);
3011 * Set the slow-start flight size depending on whether this
3012 * is a local network or not.
3015 tp
->snd_cwnd
= min(4 * mss
, max(2 * mss
, 4380));
3019 if (rt
->rt_rmx
.rmx_ssthresh
) {
3021 * There's some sort of gateway or interface
3022 * buffer limit on the path. Use this to set
3023 * the slow start threshhold, but set the
3024 * threshold to no less than 2*mss.
3026 tp
->snd_ssthresh
= max(2 * mss
, rt
->rt_rmx
.rmx_ssthresh
);
3027 tcpstat
.tcps_usedssthresh
++;
3032 * Determine the MSS option to send on an outgoing SYN.
3035 tcp_mssopt(struct tcpcb
*tp
)
3040 ((tp
->t_inpcb
->inp_vflag
& INP_IPV6
) ? TRUE
: FALSE
);
3041 int min_protoh
= isipv6
?
3042 sizeof(struct ip6_hdr
) + sizeof(struct tcphdr
) :
3043 sizeof(struct tcpiphdr
);
3045 const boolean_t isipv6
= FALSE
;
3046 const size_t min_protoh
= sizeof(struct tcpiphdr
);
3050 rt
= tcp_rtlookup6(&tp
->t_inpcb
->inp_inc
);
3052 rt
= tcp_rtlookup(&tp
->t_inpcb
->inp_inc
);
3054 return (isipv6
? tcp_v6mssdflt
: tcp_mssdflt
);
3056 return (rt
->rt_ifp
->if_mtu
- min_protoh
);
3060 * When a partial ack arrives, force the retransmission of the
3061 * next unacknowledged segment. Do not exit Fast Recovery.
3063 * Implement the Slow-but-Steady variant of NewReno by restarting the
3064 * the retransmission timer. Turn it off here so it can be restarted
3065 * later in tcp_output().
3068 tcp_newreno_partial_ack(struct tcpcb
*tp
, struct tcphdr
*th
, int acked
)
3070 tcp_seq old_snd_nxt
= tp
->snd_nxt
;
3071 u_long ocwnd
= tp
->snd_cwnd
;
3073 tcp_callout_stop(tp
, tp
->tt_rexmt
);
3075 tp
->snd_nxt
= th
->th_ack
;
3076 /* Set snd_cwnd to one segment beyond acknowledged offset. */
3077 tp
->snd_cwnd
= tp
->t_maxseg
;
3078 tp
->t_flags
|= TF_ACKNOW
;
3080 if (SEQ_GT(old_snd_nxt
, tp
->snd_nxt
))
3081 tp
->snd_nxt
= old_snd_nxt
;
3082 /* partial window deflation */
3084 tp
->snd_cwnd
= ocwnd
- acked
+ tp
->t_maxseg
;
3086 tp
->snd_cwnd
= tp
->t_maxseg
;
3090 * In contrast to the Slow-but-Steady NewReno variant,
3091 * we do not reset the retransmission timer for SACK retransmissions,
3092 * except when retransmitting snd_una.
3095 tcp_sack_rexmt(struct tcpcb
*tp
, struct tcphdr
*th
)
3097 uint32_t pipe
, seglen
;
3100 tcp_seq old_snd_nxt
= tp
->snd_nxt
;
3101 u_long ocwnd
= tp
->snd_cwnd
;
3102 int nseg
= 0; /* consecutive new segments */
3103 #define MAXBURST 4 /* limit burst of new packets on partial ack */
3106 pipe
= tcp_sack_compute_pipe(tp
);
3107 while ((tcp_seq_diff_t
)(ocwnd
- pipe
) >= (tcp_seq_diff_t
)tp
->t_maxseg
&&
3108 (!tcp_do_smartsack
|| nseg
< MAXBURST
) &&
3109 tcp_sack_nextseg(tp
, &nextrexmt
, &seglen
, &lostdup
)) {
3111 tcp_seq old_snd_max
;
3114 if (nextrexmt
== tp
->snd_max
)
3116 tp
->snd_nxt
= nextrexmt
;
3117 tp
->snd_cwnd
= nextrexmt
- tp
->snd_una
+ seglen
;
3118 old_snd_max
= tp
->snd_max
;
3119 if (nextrexmt
== tp
->snd_una
)
3120 tcp_callout_stop(tp
, tp
->tt_rexmt
);
3121 error
= tcp_output(tp
);
3124 sent
= tp
->snd_nxt
- nextrexmt
;
3129 tcpstat
.tcps_sndsackpack
++;
3130 tcpstat
.tcps_sndsackbyte
+= sent
;
3131 if (SEQ_LT(nextrexmt
, old_snd_max
) &&
3132 SEQ_LT(tp
->rexmt_high
, tp
->snd_nxt
))
3133 tp
->rexmt_high
= seq_min(tp
->snd_nxt
, old_snd_max
);
3135 if (SEQ_GT(old_snd_nxt
, tp
->snd_nxt
))
3136 tp
->snd_nxt
= old_snd_nxt
;
3137 tp
->snd_cwnd
= ocwnd
;