2 * Copyright (c) 2003, 2004 Jeffrey M. Hsu. All rights reserved.
3 * Copyright (c) 2003, 2004 The DragonFly Project. All rights reserved.
5 * This code is derived from software contributed to The DragonFly Project
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of The DragonFly Project nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific, prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
36 * The Regents of the University of California. All rights reserved.
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. Neither the name of the University nor the names of its contributors
47 * may be used to endorse or promote products derived from this software
48 * without specific prior written permission.
50 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
51 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
52 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
53 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
54 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
55 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
56 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
57 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
58 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
59 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62 * @(#)tcp_subr.c 8.2 (Berkeley) 5/24/95
63 * $FreeBSD: src/sys/netinet/tcp_subr.c,v 1.73.2.31 2003/01/24 05:11:34 sam Exp $
67 #include "opt_inet6.h"
68 #include "opt_ipsec.h"
69 #include "opt_tcpdebug.h"
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/callout.h>
74 #include <sys/kernel.h>
75 #include <sys/sysctl.h>
76 #include <sys/malloc.h>
77 #include <sys/mpipe.h>
80 #include <sys/domain.h>
84 #include <sys/socket.h>
85 #include <sys/socketops.h>
86 #include <sys/socketvar.h>
87 #include <sys/protosw.h>
88 #include <sys/random.h>
89 #include <sys/in_cksum.h>
92 #include <net/route.h>
94 #include <net/netisr2.h>
97 #include <netinet/in.h>
98 #include <netinet/in_systm.h>
99 #include <netinet/ip.h>
100 #include <netinet/ip6.h>
101 #include <netinet/in_pcb.h>
102 #include <netinet6/in6_pcb.h>
103 #include <netinet/in_var.h>
104 #include <netinet/ip_var.h>
105 #include <netinet6/ip6_var.h>
106 #include <netinet/ip_icmp.h>
108 #include <netinet/icmp6.h>
110 #include <netinet/tcp.h>
111 #include <netinet/tcp_fsm.h>
112 #include <netinet/tcp_seq.h>
113 #include <netinet/tcp_timer.h>
114 #include <netinet/tcp_timer2.h>
115 #include <netinet/tcp_var.h>
116 #include <netinet6/tcp6_var.h>
117 #include <netinet/tcpip.h>
119 #include <netinet/tcp_debug.h>
121 #include <netinet6/ip6protosw.h>
124 #include <netinet6/ipsec.h>
125 #include <netproto/key/key.h>
127 #include <netinet6/ipsec6.h>
132 #include <netproto/ipsec/ipsec.h>
134 #include <netproto/ipsec/ipsec6.h>
140 #include <machine/smp.h>
142 #include <sys/msgport2.h>
143 #include <sys/mplock2.h>
144 #include <net/netmsg2.h>
146 #if !defined(KTR_TCP)
147 #define KTR_TCP KTR_ALL
150 KTR_INFO_MASTER(tcp);
151 KTR_INFO(KTR_TCP, tcp, rxmsg, 0, "tcp getmsg", 0);
152 KTR_INFO(KTR_TCP, tcp, wait, 1, "tcp waitmsg", 0);
153 KTR_INFO(KTR_TCP, tcp, delayed, 2, "tcp execute delayed ops", 0);
154 #define logtcp(name) KTR_LOG(tcp_ ## name)
157 #define TCP_IW_MAXSEGS_DFLT 4
158 #define TCP_IW_CAPSEGS_DFLT 4
160 struct inpcbinfo tcbinfo
[MAXCPU
];
161 struct tcpcbackqhead tcpcbackq
[MAXCPU
];
163 int tcp_mssdflt
= TCP_MSS
;
164 SYSCTL_INT(_net_inet_tcp
, TCPCTL_MSSDFLT
, mssdflt
, CTLFLAG_RW
,
165 &tcp_mssdflt
, 0, "Default TCP Maximum Segment Size");
168 int tcp_v6mssdflt
= TCP6_MSS
;
169 SYSCTL_INT(_net_inet_tcp
, TCPCTL_V6MSSDFLT
, v6mssdflt
, CTLFLAG_RW
,
170 &tcp_v6mssdflt
, 0, "Default TCP Maximum Segment Size for IPv6");
174 * Minimum MSS we accept and use. This prevents DoS attacks where
175 * we are forced to a ridiculous low MSS like 20 and send hundreds
176 * of packets instead of one. The effect scales with the available
177 * bandwidth and quickly saturates the CPU and network interface
178 * with packet generation and sending. Set to zero to disable MINMSS
179 * checking. This setting prevents us from sending too small packets.
181 int tcp_minmss
= TCP_MINMSS
;
182 SYSCTL_INT(_net_inet_tcp
, OID_AUTO
, minmss
, CTLFLAG_RW
,
183 &tcp_minmss
, 0, "Minmum TCP Maximum Segment Size");
186 static int tcp_rttdflt
= TCPTV_SRTTDFLT
/ PR_SLOWHZ
;
187 SYSCTL_INT(_net_inet_tcp
, TCPCTL_RTTDFLT
, rttdflt
, CTLFLAG_RW
,
188 &tcp_rttdflt
, 0, "Default maximum TCP Round Trip Time");
191 int tcp_do_rfc1323
= 1;
192 SYSCTL_INT(_net_inet_tcp
, TCPCTL_DO_RFC1323
, rfc1323
, CTLFLAG_RW
,
193 &tcp_do_rfc1323
, 0, "Enable rfc1323 (high performance TCP) extensions");
195 static int tcp_tcbhashsize
= 0;
196 SYSCTL_INT(_net_inet_tcp
, OID_AUTO
, tcbhashsize
, CTLFLAG_RD
,
197 &tcp_tcbhashsize
, 0, "Size of TCP control block hashtable");
199 static int do_tcpdrain
= 1;
200 SYSCTL_INT(_net_inet_tcp
, OID_AUTO
, do_tcpdrain
, CTLFLAG_RW
, &do_tcpdrain
, 0,
201 "Enable tcp_drain routine for extra help when low on mbufs");
203 static int icmp_may_rst
= 1;
204 SYSCTL_INT(_net_inet_tcp
, OID_AUTO
, icmp_may_rst
, CTLFLAG_RW
, &icmp_may_rst
, 0,
205 "Certain ICMP unreachable messages may abort connections in SYN_SENT");
207 static int tcp_isn_reseed_interval
= 0;
208 SYSCTL_INT(_net_inet_tcp
, OID_AUTO
, isn_reseed_interval
, CTLFLAG_RW
,
209 &tcp_isn_reseed_interval
, 0, "Seconds between reseeding of ISN secret");
212 * TCP bandwidth limiting sysctls. The inflight limiter is now turned on
213 * by default, but with generous values which should allow maximal
214 * bandwidth. In particular, the slop defaults to 50 (5 packets).
216 * The reason for doing this is that the limiter is the only mechanism we
217 * have which seems to do a really good job preventing receiver RX rings
218 * on network interfaces from getting blown out. Even though GigE/10GigE
219 * is supposed to flow control it looks like either it doesn't actually
220 * do it or Open Source drivers do not properly enable it.
222 * People using the limiter to reduce bottlenecks on slower WAN connections
223 * should set the slop to 20 (2 packets).
225 static int tcp_inflight_enable
= 1;
226 SYSCTL_INT(_net_inet_tcp
, OID_AUTO
, inflight_enable
, CTLFLAG_RW
,
227 &tcp_inflight_enable
, 0, "Enable automatic TCP inflight data limiting");
229 static int tcp_inflight_debug
= 0;
230 SYSCTL_INT(_net_inet_tcp
, OID_AUTO
, inflight_debug
, CTLFLAG_RW
,
231 &tcp_inflight_debug
, 0, "Debug TCP inflight calculations");
234 * NOTE: tcp_inflight_start is essentially the starting receive window
235 * for a connection. If set too low then fetches over tcp
236 * connections will take noticably longer to ramp-up over
237 * high-latency connections. 6144 is too low for a default,
238 * use something more reasonable.
240 static int tcp_inflight_start
= 33792;
241 SYSCTL_INT(_net_inet_tcp
, OID_AUTO
, inflight_start
, CTLFLAG_RW
,
242 &tcp_inflight_start
, 0, "Start value for TCP inflight window");
244 static int tcp_inflight_min
= 6144;
245 SYSCTL_INT(_net_inet_tcp
, OID_AUTO
, inflight_min
, CTLFLAG_RW
,
246 &tcp_inflight_min
, 0, "Lower bound for TCP inflight window");
248 static int tcp_inflight_max
= TCP_MAXWIN
<< TCP_MAX_WINSHIFT
;
249 SYSCTL_INT(_net_inet_tcp
, OID_AUTO
, inflight_max
, CTLFLAG_RW
,
250 &tcp_inflight_max
, 0, "Upper bound for TCP inflight window");
252 static int tcp_inflight_stab
= 50;
253 SYSCTL_INT(_net_inet_tcp
, OID_AUTO
, inflight_stab
, CTLFLAG_RW
,
254 &tcp_inflight_stab
, 0, "Fudge bw 1/10% (50=5%)");
256 static int tcp_inflight_adjrtt
= 2;
257 SYSCTL_INT(_net_inet_tcp
, OID_AUTO
, inflight_adjrtt
, CTLFLAG_RW
,
258 &tcp_inflight_adjrtt
, 0, "Slop for rtt 1/(hz*32)");
260 static int tcp_do_rfc3390
= 1;
261 SYSCTL_INT(_net_inet_tcp
, OID_AUTO
, rfc3390
, CTLFLAG_RW
,
263 "Enable RFC 3390 (Increasing TCP's Initial Congestion Window)");
265 static u_long tcp_iw_maxsegs
= TCP_IW_MAXSEGS_DFLT
;
266 SYSCTL_ULONG(_net_inet_tcp
, OID_AUTO
, iwmaxsegs
, CTLFLAG_RW
,
267 &tcp_iw_maxsegs
, 0, "TCP IW segments max");
269 static u_long tcp_iw_capsegs
= TCP_IW_CAPSEGS_DFLT
;
270 SYSCTL_ULONG(_net_inet_tcp
, OID_AUTO
, iwcapsegs
, CTLFLAG_RW
,
271 &tcp_iw_capsegs
, 0, "TCP IW segments");
273 int tcp_low_rtobase
= 1;
274 SYSCTL_INT(_net_inet_tcp
, OID_AUTO
, low_rtobase
, CTLFLAG_RW
,
275 &tcp_low_rtobase
, 0, "Lowering the Initial RTO (RFC 6298)");
277 static int tcp_do_ncr
= 1;
278 SYSCTL_INT(_net_inet_tcp
, OID_AUTO
, ncr
, CTLFLAG_RW
,
279 &tcp_do_ncr
, 0, "Non-Congestion Robustness (RFC 4653)");
281 int tcp_ncr_rxtthresh_max
= 16;
282 SYSCTL_INT(_net_inet_tcp
, OID_AUTO
, ncr_rxtthresh_max
, CTLFLAG_RW
,
283 &tcp_ncr_rxtthresh_max
, 0,
284 "Non-Congestion Robustness (RFC 4653), DupThresh upper limit");
286 static MALLOC_DEFINE(M_TCPTEMP
, "tcptemp", "TCP Templates for Keepalives");
287 static struct malloc_pipe tcptemp_mpipe
;
289 static void tcp_willblock(void);
290 static void tcp_notify (struct inpcb
*, int);
292 struct tcp_stats tcpstats_percpu
[MAXCPU
] __cachealign
;
293 struct tcp_state_count tcpstate_count
[MAXCPU
] __cachealign
;
295 static struct netmsg_base tcp_drain_netmsg
[MAXCPU
];
296 static void tcp_drain_dispatch(netmsg_t nmsg
);
299 sysctl_tcpstats(SYSCTL_HANDLER_ARGS
)
303 for (cpu
= 0; cpu
< ncpus2
; ++cpu
) {
304 if ((error
= SYSCTL_OUT(req
, &tcpstats_percpu
[cpu
],
305 sizeof(struct tcp_stats
))))
307 if ((error
= SYSCTL_IN(req
, &tcpstats_percpu
[cpu
],
308 sizeof(struct tcp_stats
))))
314 SYSCTL_PROC(_net_inet_tcp
, TCPCTL_STATS
, stats
, (CTLTYPE_OPAQUE
| CTLFLAG_RW
),
315 0, 0, sysctl_tcpstats
, "S,tcp_stats", "TCP statistics");
318 * Target size of TCP PCB hash tables. Must be a power of two.
320 * Note that this can be overridden by the kernel environment
321 * variable net.inet.tcp.tcbhashsize
324 #define TCBHASHSIZE 512
328 * This is the actual shape of what we allocate using the zone
329 * allocator. Doing it this way allows us to protect both structures
330 * using the same generation count, and also eliminates the overhead
331 * of allocating tcpcbs separately. By hiding the structure here,
332 * we avoid changing most of the rest of the code (although it needs
333 * to be changed, eventually, for greater efficiency).
336 #define ALIGNM1 (ALIGNMENT - 1)
340 char align
[(sizeof(struct inpcb
) + ALIGNM1
) & ~ALIGNM1
];
343 struct tcp_callout inp_tp_rexmt
;
344 struct tcp_callout inp_tp_persist
;
345 struct tcp_callout inp_tp_keep
;
346 struct tcp_callout inp_tp_2msl
;
347 struct tcp_callout inp_tp_delack
;
348 struct netmsg_tcp_timer inp_tp_timermsg
;
349 struct netmsg_base inp_tp_sndmore
;
360 struct inpcbportinfo
*portinfo
;
361 struct inpcbinfo
*ticb
;
362 int hashsize
= TCBHASHSIZE
;
366 * note: tcptemp is used for keepalives, and it is ok for an
367 * allocation to fail so do not specify MPF_INT.
369 mpipe_init(&tcptemp_mpipe
, M_TCPTEMP
, sizeof(struct tcptemp
),
370 25, -1, 0, NULL
, NULL
, NULL
);
372 tcp_delacktime
= TCPTV_DELACK
;
373 tcp_keepinit
= TCPTV_KEEP_INIT
;
374 tcp_keepidle
= TCPTV_KEEP_IDLE
;
375 tcp_keepintvl
= TCPTV_KEEPINTVL
;
376 tcp_maxpersistidle
= TCPTV_KEEP_IDLE
;
378 tcp_rexmit_min
= TCPTV_MIN
;
379 tcp_rexmit_slop
= TCPTV_CPU_VAR
;
381 TUNABLE_INT_FETCH("net.inet.tcp.tcbhashsize", &hashsize
);
382 if (!powerof2(hashsize
)) {
383 kprintf("WARNING: TCB hash size not a power of 2\n");
384 hashsize
= 512; /* safe default */
386 tcp_tcbhashsize
= hashsize
;
388 portinfo
= kmalloc_cachealign(sizeof(*portinfo
) * ncpus2
, M_PCB
,
391 for (cpu
= 0; cpu
< ncpus2
; cpu
++) {
392 ticb
= &tcbinfo
[cpu
];
393 in_pcbinfo_init(ticb
, cpu
, FALSE
);
394 ticb
->hashbase
= hashinit(hashsize
, M_PCB
,
396 in_pcbportinfo_init(&portinfo
[cpu
], hashsize
, cpu
);
397 ticb
->portinfo
= portinfo
;
398 ticb
->portinfo_mask
= ncpus2_mask
;
399 ticb
->wildcardhashbase
= hashinit(hashsize
, M_PCB
,
400 &ticb
->wildcardhashmask
);
401 ticb
->localgrphashbase
= hashinit(hashsize
, M_PCB
,
402 &ticb
->localgrphashmask
);
403 ticb
->ipi_size
= sizeof(struct inp_tp
);
404 TAILQ_INIT(&tcpcbackq
[cpu
]);
407 tcp_reass_maxseg
= nmbclusters
/ 16;
408 TUNABLE_INT_FETCH("net.inet.tcp.reass.maxsegments", &tcp_reass_maxseg
);
411 #define TCP_MINPROTOHDR (sizeof(struct ip6_hdr) + sizeof(struct tcphdr))
413 #define TCP_MINPROTOHDR (sizeof(struct tcpiphdr))
415 if (max_protohdr
< TCP_MINPROTOHDR
)
416 max_protohdr
= TCP_MINPROTOHDR
;
417 if (max_linkhdr
+ TCP_MINPROTOHDR
> MHLEN
)
419 #undef TCP_MINPROTOHDR
422 * Initialize TCP statistics counters for each CPU.
424 for (cpu
= 0; cpu
< ncpus2
; ++cpu
)
425 bzero(&tcpstats_percpu
[cpu
], sizeof(struct tcp_stats
));
428 * Initialize netmsgs for TCP drain
430 for (cpu
= 0; cpu
< ncpus2
; ++cpu
) {
431 netmsg_init(&tcp_drain_netmsg
[cpu
], NULL
, &netisr_adone_rport
,
432 MSGF_PRIORITY
, tcp_drain_dispatch
);
436 netisr_register_rollup(tcp_willblock
, NETISR_ROLLUP_PRIO_TCP
);
443 int cpu
= mycpu
->gd_cpuid
;
445 while ((tp
= TAILQ_FIRST(&tcpcbackq
[cpu
])) != NULL
) {
446 KKASSERT(tp
->t_flags
& TF_ONOUTPUTQ
);
447 tp
->t_flags
&= ~TF_ONOUTPUTQ
;
448 TAILQ_REMOVE(&tcpcbackq
[cpu
], tp
, t_outputq
);
454 * Fill in the IP and TCP headers for an outgoing packet, given the tcpcb.
455 * tcp_template used to store this data in mbufs, but we now recopy it out
456 * of the tcpcb each time to conserve mbufs.
459 tcp_fillheaders(struct tcpcb
*tp
, void *ip_ptr
, void *tcp_ptr
, boolean_t tso
)
461 struct inpcb
*inp
= tp
->t_inpcb
;
462 struct tcphdr
*tcp_hdr
= (struct tcphdr
*)tcp_ptr
;
465 if (INP_ISIPV6(inp
)) {
468 ip6
= (struct ip6_hdr
*)ip_ptr
;
469 ip6
->ip6_flow
= (ip6
->ip6_flow
& ~IPV6_FLOWINFO_MASK
) |
470 (inp
->in6p_flowinfo
& IPV6_FLOWINFO_MASK
);
471 ip6
->ip6_vfc
= (ip6
->ip6_vfc
& ~IPV6_VERSION_MASK
) |
472 (IPV6_VERSION
& IPV6_VERSION_MASK
);
473 ip6
->ip6_nxt
= IPPROTO_TCP
;
474 ip6
->ip6_plen
= sizeof(struct tcphdr
);
475 ip6
->ip6_src
= inp
->in6p_laddr
;
476 ip6
->ip6_dst
= inp
->in6p_faddr
;
481 struct ip
*ip
= (struct ip
*) ip_ptr
;
484 ip
->ip_vhl
= IP_VHL_BORING
;
491 ip
->ip_p
= IPPROTO_TCP
;
492 ip
->ip_src
= inp
->inp_laddr
;
493 ip
->ip_dst
= inp
->inp_faddr
;
496 plen
= htons(IPPROTO_TCP
);
498 plen
= htons(sizeof(struct tcphdr
) + IPPROTO_TCP
);
499 tcp_hdr
->th_sum
= in_pseudo(ip
->ip_src
.s_addr
,
500 ip
->ip_dst
.s_addr
, plen
);
503 tcp_hdr
->th_sport
= inp
->inp_lport
;
504 tcp_hdr
->th_dport
= inp
->inp_fport
;
509 tcp_hdr
->th_flags
= 0;
515 * Create template to be used to send tcp packets on a connection.
516 * Allocates an mbuf and fills in a skeletal tcp/ip header. The only
517 * use for this function is in keepalives, which use tcp_respond.
520 tcp_maketemplate(struct tcpcb
*tp
)
524 if ((tmp
= mpipe_alloc_nowait(&tcptemp_mpipe
)) == NULL
)
526 tcp_fillheaders(tp
, &tmp
->tt_ipgen
, &tmp
->tt_t
, FALSE
);
531 tcp_freetemplate(struct tcptemp
*tmp
)
533 mpipe_free(&tcptemp_mpipe
, tmp
);
537 * Send a single message to the TCP at address specified by
538 * the given TCP/IP header. If m == NULL, then we make a copy
539 * of the tcpiphdr at ti and send directly to the addressed host.
540 * This is used to force keep alive messages out using the TCP
541 * template for a connection. If flags are given then we send
542 * a message back to the TCP which originated the * segment ti,
543 * and discard the mbuf containing it and any other attached mbufs.
545 * In any case the ack and sequence number of the transmitted
546 * segment are as specified by the parameters.
548 * NOTE: If m != NULL, then ti must point to *inside* the mbuf.
551 tcp_respond(struct tcpcb
*tp
, void *ipgen
, struct tcphdr
*th
, struct mbuf
*m
,
552 tcp_seq ack
, tcp_seq seq
, int flags
)
556 struct route
*ro
= NULL
;
558 struct ip
*ip
= ipgen
;
561 struct route_in6
*ro6
= NULL
;
562 struct route_in6 sro6
;
563 struct ip6_hdr
*ip6
= ipgen
;
564 struct inpcb
*inp
= NULL
;
565 boolean_t use_tmpro
= TRUE
;
567 boolean_t isipv6
= (IP_VHL_V(ip
->ip_vhl
) == 6);
569 const boolean_t isipv6
= FALSE
;
574 if (!(flags
& TH_RST
)) {
575 win
= ssb_space(&inp
->inp_socket
->so_rcv
);
578 if (win
> (long)TCP_MAXWIN
<< tp
->rcv_scale
)
579 win
= (long)TCP_MAXWIN
<< tp
->rcv_scale
;
582 * Don't use the route cache of a listen socket,
583 * it is not MPSAFE; use temporary route cache.
585 if (tp
->t_state
!= TCPS_LISTEN
) {
587 ro6
= &inp
->in6p_route
;
589 ro
= &inp
->inp_route
;
596 bzero(ro6
, sizeof *ro6
);
599 bzero(ro
, sizeof *ro
);
603 m
= m_gethdr(M_NOWAIT
, MT_HEADER
);
607 m
->m_data
+= max_linkhdr
;
609 bcopy(ip6
, mtod(m
, caddr_t
), sizeof(struct ip6_hdr
));
610 ip6
= mtod(m
, struct ip6_hdr
*);
611 nth
= (struct tcphdr
*)(ip6
+ 1);
613 bcopy(ip
, mtod(m
, caddr_t
), sizeof(struct ip
));
614 ip
= mtod(m
, struct ip
*);
615 nth
= (struct tcphdr
*)(ip
+ 1);
617 bcopy(th
, nth
, sizeof(struct tcphdr
));
622 m
->m_data
= (caddr_t
)ipgen
;
623 /* m_len is set later */
625 #define xchg(a, b, type) { type t; t = a; a = b; b = t; }
627 xchg(ip6
->ip6_dst
, ip6
->ip6_src
, struct in6_addr
);
628 nth
= (struct tcphdr
*)(ip6
+ 1);
630 xchg(ip
->ip_dst
.s_addr
, ip
->ip_src
.s_addr
, n_long
);
631 nth
= (struct tcphdr
*)(ip
+ 1);
635 * this is usually a case when an extension header
636 * exists between the IPv6 header and the
639 nth
->th_sport
= th
->th_sport
;
640 nth
->th_dport
= th
->th_dport
;
642 xchg(nth
->th_dport
, nth
->th_sport
, n_short
);
647 ip6
->ip6_vfc
= IPV6_VERSION
;
648 ip6
->ip6_nxt
= IPPROTO_TCP
;
649 ip6
->ip6_plen
= htons((u_short
)(sizeof(struct tcphdr
) + tlen
));
650 tlen
+= sizeof(struct ip6_hdr
) + sizeof(struct tcphdr
);
652 tlen
+= sizeof(struct tcpiphdr
);
654 ip
->ip_ttl
= ip_defttl
;
657 m
->m_pkthdr
.len
= tlen
;
658 m
->m_pkthdr
.rcvif
= NULL
;
659 nth
->th_seq
= htonl(seq
);
660 nth
->th_ack
= htonl(ack
);
662 nth
->th_off
= sizeof(struct tcphdr
) >> 2;
663 nth
->th_flags
= flags
;
665 nth
->th_win
= htons((u_short
) (win
>> tp
->rcv_scale
));
667 nth
->th_win
= htons((u_short
)win
);
671 nth
->th_sum
= in6_cksum(m
, IPPROTO_TCP
,
672 sizeof(struct ip6_hdr
),
673 tlen
- sizeof(struct ip6_hdr
));
674 ip6
->ip6_hlim
= in6_selecthlim(inp
,
675 (ro6
&& ro6
->ro_rt
) ? ro6
->ro_rt
->rt_ifp
: NULL
);
677 nth
->th_sum
= in_pseudo(ip
->ip_src
.s_addr
, ip
->ip_dst
.s_addr
,
678 htons((u_short
)(tlen
- sizeof(struct ip
) + ip
->ip_p
)));
679 m
->m_pkthdr
.csum_flags
= CSUM_TCP
;
680 m
->m_pkthdr
.csum_data
= offsetof(struct tcphdr
, th_sum
);
681 m
->m_pkthdr
.csum_thlen
= sizeof(struct tcphdr
);
684 if (tp
== NULL
|| (inp
->inp_socket
->so_options
& SO_DEBUG
))
685 tcp_trace(TA_OUTPUT
, 0, tp
, mtod(m
, void *), th
, 0);
688 ip6_output(m
, NULL
, ro6
, ipflags
, NULL
, NULL
, inp
);
689 if ((ro6
== &sro6
) && (ro6
->ro_rt
!= NULL
)) {
694 if (inp
!= NULL
&& (inp
->inp_flags
& INP_HASH
))
695 m_sethash(m
, inp
->inp_hashval
);
696 ipflags
|= IP_DEBUGROUTE
;
697 ip_output(m
, NULL
, ro
, ipflags
, NULL
, inp
);
698 if ((ro
== &sro
) && (ro
->ro_rt
!= NULL
)) {
706 * Create a new TCP control block, making an
707 * empty reassembly queue and hooking it to the argument
708 * protocol control block. The `inp' parameter must have
709 * come from the zone allocator set up in tcp_init().
712 tcp_newtcpcb(struct inpcb
*inp
)
717 boolean_t isipv6
= INP_ISIPV6(inp
);
719 const boolean_t isipv6
= FALSE
;
722 it
= (struct inp_tp
*)inp
;
724 bzero(tp
, sizeof(struct tcpcb
));
725 TAILQ_INIT(&tp
->t_segq
);
726 tp
->t_maxseg
= tp
->t_maxopd
= isipv6
? tcp_v6mssdflt
: tcp_mssdflt
;
727 tp
->t_rxtthresh
= tcprexmtthresh
;
729 /* Set up our timeouts. */
730 tp
->tt_rexmt
= &it
->inp_tp_rexmt
;
731 tp
->tt_persist
= &it
->inp_tp_persist
;
732 tp
->tt_keep
= &it
->inp_tp_keep
;
733 tp
->tt_2msl
= &it
->inp_tp_2msl
;
734 tp
->tt_delack
= &it
->inp_tp_delack
;
738 * Zero out timer message. We don't create it here,
739 * since the current CPU may not be the owner of this
742 tp
->tt_msg
= &it
->inp_tp_timermsg
;
743 bzero(tp
->tt_msg
, sizeof(*tp
->tt_msg
));
745 tp
->t_keepinit
= tcp_keepinit
;
746 tp
->t_keepidle
= tcp_keepidle
;
747 tp
->t_keepintvl
= tcp_keepintvl
;
748 tp
->t_keepcnt
= tcp_keepcnt
;
749 tp
->t_maxidle
= tp
->t_keepintvl
* tp
->t_keepcnt
;
752 tp
->t_flags
|= TF_NCR
;
754 tp
->t_flags
|= (TF_REQ_SCALE
| TF_REQ_TSTMP
);
756 tp
->t_inpcb
= inp
; /* XXX */
759 * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no
760 * rtt estimate. Set rttvar so that srtt + 4 * rttvar gives
761 * reasonable initial retransmit time.
763 tp
->t_srtt
= TCPTV_SRTTBASE
;
765 ((TCPTV_RTOBASE
- TCPTV_SRTTBASE
) << TCP_RTTVAR_SHIFT
) / 4;
766 tp
->t_rttmin
= tcp_rexmit_min
;
767 tp
->t_rxtcur
= TCPTV_RTOBASE
;
768 tp
->snd_cwnd
= TCP_MAXWIN
<< TCP_MAX_WINSHIFT
;
769 tp
->snd_bwnd
= TCP_MAXWIN
<< TCP_MAX_WINSHIFT
;
770 tp
->snd_ssthresh
= TCP_MAXWIN
<< TCP_MAX_WINSHIFT
;
771 tp
->snd_last
= ticks
;
772 tp
->t_rcvtime
= ticks
;
774 * IPv4 TTL initialization is necessary for an IPv6 socket as well,
775 * because the socket may be bound to an IPv6 wildcard address,
776 * which may match an IPv4-mapped IPv6 address.
778 inp
->inp_ip_ttl
= ip_defttl
;
780 tcp_sack_tcpcb_init(tp
);
782 tp
->tt_sndmore
= &it
->inp_tp_sndmore
;
787 * Drop a TCP connection, reporting the specified error.
788 * If connection is synchronized, then send a RST to peer.
791 tcp_drop(struct tcpcb
*tp
, int error
)
793 struct socket
*so
= tp
->t_inpcb
->inp_socket
;
795 if (TCPS_HAVERCVDSYN(tp
->t_state
)) {
796 TCP_STATE_CHANGE(tp
, TCPS_CLOSED
);
798 tcpstat
.tcps_drops
++;
800 tcpstat
.tcps_conndrops
++;
801 if (error
== ETIMEDOUT
&& tp
->t_softerror
)
802 error
= tp
->t_softerror
;
803 so
->so_error
= error
;
804 return (tcp_close(tp
));
807 struct netmsg_listen_detach
{
808 struct netmsg_base base
;
810 struct tcpcb
*nm_tp_inh
;
814 tcp_listen_detach_handler(netmsg_t msg
)
816 struct netmsg_listen_detach
*nmsg
= (struct netmsg_listen_detach
*)msg
;
817 struct tcpcb
*tp
= nmsg
->nm_tp
;
818 int cpu
= mycpuid
, nextcpu
;
820 if (tp
->t_flags
& TF_LISTEN
) {
821 syncache_destroy(tp
, nmsg
->nm_tp_inh
);
822 tcp_pcbport_merge_oncpu(tp
);
825 in_pcbremwildcardhash_oncpu(tp
->t_inpcb
, &tcbinfo
[cpu
]);
828 if (nextcpu
< ncpus2
)
829 lwkt_forwardmsg(netisr_cpuport(nextcpu
), &nmsg
->base
.lmsg
);
831 lwkt_replymsg(&nmsg
->base
.lmsg
, 0);
835 * Close a TCP control block:
836 * discard all space held by the tcp
837 * discard internet protocol block
838 * wake up any sleepers
841 tcp_close(struct tcpcb
*tp
)
844 struct inpcb
*inp
= tp
->t_inpcb
;
845 struct inpcb
*inp_inh
= NULL
;
846 struct tcpcb
*tp_inh
= NULL
;
847 struct socket
*so
= inp
->inp_socket
;
849 boolean_t dosavessthresh
;
851 boolean_t isipv6
= INP_ISIPV6(inp
);
853 const boolean_t isipv6
= FALSE
;
856 if (tp
->t_flags
& TF_LISTEN
) {
858 * Pending socket/syncache inheritance
860 * If this is a listen(2) socket, find another listen(2)
861 * socket in the same local group, which could inherit
862 * the syncache and sockets pending on the completion
863 * and incompletion queues.
866 * Currently the inheritance could only happen on the
867 * listen(2) sockets w/ SO_REUSEPORT set.
870 inp_inh
= in_pcblocalgroup_last(&tcbinfo
[0], inp
);
872 tp_inh
= intotcpcb(inp_inh
);
876 * INP_WILDCARD indicates that listen(2) has been called on
877 * this socket. This implies:
878 * - A wildcard inp's hash is replicated for each protocol thread.
879 * - Syncache for this inp grows independently in each protocol
881 * - There is more than one cpu
883 * We have to chain a message to the rest of the protocol threads
884 * to cleanup the wildcard hash and the syncache. The cleanup
885 * in the current protocol thread is defered till the end of this
886 * function (syncache_destroy and in_pcbdetach).
889 * After cleanup the inp's hash and syncache entries, this inp will
890 * no longer be available to the rest of the protocol threads, so we
891 * are safe to whack the inp in the following code.
893 if ((inp
->inp_flags
& INP_WILDCARD
) && ncpus2
> 1) {
894 struct netmsg_listen_detach nmsg
;
896 KKASSERT(so
->so_port
== netisr_cpuport(0));
898 KKASSERT(inp
->inp_pcbinfo
== &tcbinfo
[0]);
900 netmsg_init(&nmsg
.base
, NULL
, &curthread
->td_msgport
,
901 MSGF_PRIORITY
, tcp_listen_detach_handler
);
903 nmsg
.nm_tp_inh
= tp_inh
;
904 lwkt_domsg(netisr_cpuport(1), &nmsg
.base
.lmsg
, 0);
910 * Make sure that all of our timers are stopped before we
911 * delete the PCB. For listen TCP socket (tp->tt_msg == NULL),
912 * timers are never used. If timer message is never created
913 * (tp->tt_msg->tt_tcb == NULL), timers are never used too.
915 if (tp
->tt_msg
!= NULL
&& tp
->tt_msg
->tt_tcb
!= NULL
) {
916 tcp_callout_stop(tp
, tp
->tt_rexmt
);
917 tcp_callout_stop(tp
, tp
->tt_persist
);
918 tcp_callout_stop(tp
, tp
->tt_keep
);
919 tcp_callout_stop(tp
, tp
->tt_2msl
);
920 tcp_callout_stop(tp
, tp
->tt_delack
);
923 if (tp
->t_flags
& TF_ONOUTPUTQ
) {
924 KKASSERT(tp
->tt_cpu
== mycpu
->gd_cpuid
);
925 TAILQ_REMOVE(&tcpcbackq
[tp
->tt_cpu
], tp
, t_outputq
);
926 tp
->t_flags
&= ~TF_ONOUTPUTQ
;
930 * If we got enough samples through the srtt filter,
931 * save the rtt and rttvar in the routing entry.
932 * 'Enough' is arbitrarily defined as the 16 samples.
933 * 16 samples is enough for the srtt filter to converge
934 * to within 5% of the correct value; fewer samples and
935 * we could save a very bogus rtt.
937 * Don't update the default route's characteristics and don't
938 * update anything that the user "locked".
940 if (tp
->t_rttupdated
>= 16) {
944 struct sockaddr_in6
*sin6
;
946 if ((rt
= inp
->in6p_route
.ro_rt
) == NULL
)
948 sin6
= (struct sockaddr_in6
*)rt_key(rt
);
949 if (IN6_IS_ADDR_UNSPECIFIED(&sin6
->sin6_addr
))
952 if ((rt
= inp
->inp_route
.ro_rt
) == NULL
||
953 ((struct sockaddr_in
*)rt_key(rt
))->
954 sin_addr
.s_addr
== INADDR_ANY
)
957 if (!(rt
->rt_rmx
.rmx_locks
& RTV_RTT
)) {
958 i
= tp
->t_srtt
* (RTM_RTTUNIT
/ (hz
* TCP_RTT_SCALE
));
959 if (rt
->rt_rmx
.rmx_rtt
&& i
)
961 * filter this update to half the old & half
962 * the new values, converting scale.
963 * See route.h and tcp_var.h for a
964 * description of the scaling constants.
967 (rt
->rt_rmx
.rmx_rtt
+ i
) / 2;
969 rt
->rt_rmx
.rmx_rtt
= i
;
970 tcpstat
.tcps_cachedrtt
++;
972 if (!(rt
->rt_rmx
.rmx_locks
& RTV_RTTVAR
)) {
974 (RTM_RTTUNIT
/ (hz
* TCP_RTTVAR_SCALE
));
975 if (rt
->rt_rmx
.rmx_rttvar
&& i
)
976 rt
->rt_rmx
.rmx_rttvar
=
977 (rt
->rt_rmx
.rmx_rttvar
+ i
) / 2;
979 rt
->rt_rmx
.rmx_rttvar
= i
;
980 tcpstat
.tcps_cachedrttvar
++;
983 * The old comment here said:
984 * update the pipelimit (ssthresh) if it has been updated
985 * already or if a pipesize was specified & the threshhold
986 * got below half the pipesize. I.e., wait for bad news
987 * before we start updating, then update on both good
990 * But we want to save the ssthresh even if no pipesize is
991 * specified explicitly in the route, because such
992 * connections still have an implicit pipesize specified
993 * by the global tcp_sendspace. In the absence of a reliable
994 * way to calculate the pipesize, it will have to do.
996 i
= tp
->snd_ssthresh
;
997 if (rt
->rt_rmx
.rmx_sendpipe
!= 0)
998 dosavessthresh
= (i
< rt
->rt_rmx
.rmx_sendpipe
/2);
1000 dosavessthresh
= (i
< so
->so_snd
.ssb_hiwat
/2);
1001 if (dosavessthresh
||
1002 (!(rt
->rt_rmx
.rmx_locks
& RTV_SSTHRESH
) && (i
!= 0) &&
1003 (rt
->rt_rmx
.rmx_ssthresh
!= 0))) {
1005 * convert the limit from user data bytes to
1006 * packets then to packet data bytes.
1008 i
= (i
+ tp
->t_maxseg
/ 2) / tp
->t_maxseg
;
1013 sizeof(struct ip6_hdr
) + sizeof(struct tcphdr
) :
1014 sizeof(struct tcpiphdr
));
1015 if (rt
->rt_rmx
.rmx_ssthresh
)
1016 rt
->rt_rmx
.rmx_ssthresh
=
1017 (rt
->rt_rmx
.rmx_ssthresh
+ i
) / 2;
1019 rt
->rt_rmx
.rmx_ssthresh
= i
;
1020 tcpstat
.tcps_cachedssthresh
++;
1025 /* free the reassembly queue, if any */
1026 while((q
= TAILQ_FIRST(&tp
->t_segq
)) != NULL
) {
1027 TAILQ_REMOVE(&tp
->t_segq
, q
, tqe_q
);
1030 atomic_add_int(&tcp_reass_qsize
, -1);
1032 /* throw away SACK blocks in scoreboard*/
1033 if (TCP_DO_SACK(tp
))
1034 tcp_sack_destroy(&tp
->scb
);
1036 inp
->inp_ppcb
= NULL
;
1037 soisdisconnected(so
);
1038 /* note: pcb detached later on */
1040 tcp_destroy_timermsg(tp
);
1041 tcp_output_cancel(tp
);
1043 if (tp
->t_flags
& TF_LISTEN
) {
1044 syncache_destroy(tp
, tp_inh
);
1045 tcp_pcbport_merge_oncpu(tp
);
1046 tcp_pcbport_destroy(tp
);
1047 if (inp_inh
!= NULL
&& inp_inh
->inp_socket
!= NULL
) {
1049 * Pending sockets inheritance only needs
1050 * to be done once in the current thread,
1053 soinherit(so
, inp_inh
->inp_socket
);
1056 KASSERT(tp
->t_pcbport
== NULL
, ("tcpcb port cache is not destroyed"));
1058 so_async_rcvd_drop(so
);
1059 /* Drop the reference for the asynchronized pru_rcvd */
1064 * - Remove self from listen tcpcb per-cpu port cache _before_
1066 * - pcbdetach removes any wildcard hash entry on the current CPU.
1068 tcp_pcbport_remove(inp
);
1076 tcpstat
.tcps_closed
++;
1080 static __inline
void
1081 tcp_drain_oncpu(struct inpcbinfo
*pcbinfo
)
1083 struct inpcbhead
*head
= &pcbinfo
->pcblisthead
;
1087 * Since we run in netisr, it is MP safe, even if
1088 * we block during the inpcb list iteration, i.e.
1089 * we don't need to use inpcb marker here.
1091 ASSERT_IN_NETISR(pcbinfo
->cpu
);
1093 LIST_FOREACH(inpb
, head
, inp_list
) {
1095 struct tseg_qent
*te
;
1097 if (inpb
->inp_flags
& INP_PLACEMARKER
)
1100 tcpb
= intotcpcb(inpb
);
1101 KASSERT(tcpb
!= NULL
, ("tcp_drain_oncpu: tcpb is NULL"));
1103 if ((te
= TAILQ_FIRST(&tcpb
->t_segq
)) != NULL
) {
1104 TAILQ_REMOVE(&tcpb
->t_segq
, te
, tqe_q
);
1105 if (te
->tqe_th
->th_flags
& TH_FIN
)
1106 tcpb
->t_flags
&= ~TF_QUEDFIN
;
1109 atomic_add_int(&tcp_reass_qsize
, -1);
1116 tcp_drain_dispatch(netmsg_t nmsg
)
1119 lwkt_replymsg(&nmsg
->lmsg
, 0); /* reply ASAP */
1122 tcp_drain_oncpu(&tcbinfo
[mycpuid
]);
1126 tcp_drain_ipi(void *arg __unused
)
1129 struct lwkt_msg
*msg
= &tcp_drain_netmsg
[cpu
].lmsg
;
1132 if (msg
->ms_flags
& MSGF_DONE
)
1133 lwkt_sendmsg_oncpu(netisr_cpuport(cpu
), msg
);
1146 * Walk the tcpbs, if existing, and flush the reassembly queue,
1147 * if there is one...
1148 * XXX: The "Net/3" implementation doesn't imply that the TCP
1149 * reassembly queue should be flushed, but in a situation
1150 * where we're really low on mbufs, this is potentially
1152 * YYY: We may consider run tcp_drain_oncpu directly here,
1153 * however, that will require M_WAITOK memory allocation
1154 * for the inpcb marker.
1156 CPUMASK_ASSBMASK(mask
, ncpus2
);
1157 CPUMASK_ANDMASK(mask
, smp_active_mask
);
1158 if (CPUMASK_TESTNZERO(mask
))
1159 lwkt_send_ipiq_mask(mask
, tcp_drain_ipi
, NULL
);
1163 * Notify a tcp user of an asynchronous error;
1164 * store error as soft error, but wake up user
1165 * (for now, won't do anything until can select for soft error).
1167 * Do not wake up user since there currently is no mechanism for
1168 * reporting soft errors (yet - a kqueue filter may be added).
1171 tcp_notify(struct inpcb
*inp
, int error
)
1173 struct tcpcb
*tp
= intotcpcb(inp
);
1176 * Ignore some errors if we are hooked up.
1177 * If connection hasn't completed, has retransmitted several times,
1178 * and receives a second error, give up now. This is better
1179 * than waiting a long time to establish a connection that
1180 * can never complete.
1182 if (tp
->t_state
== TCPS_ESTABLISHED
&&
1183 (error
== EHOSTUNREACH
|| error
== ENETUNREACH
||
1184 error
== EHOSTDOWN
)) {
1186 } else if (tp
->t_state
< TCPS_ESTABLISHED
&& tp
->t_rxtshift
> 3 &&
1188 tcp_drop(tp
, error
);
1190 tp
->t_softerror
= error
;
1192 wakeup(&so
->so_timeo
);
1199 tcp_pcblist(SYSCTL_HANDLER_ARGS
)
1202 struct inpcb
*marker
;
1210 * The process of preparing the TCB list is too time-consuming and
1211 * resource-intensive to repeat twice on every request.
1213 if (req
->oldptr
== NULL
) {
1214 for (ccpu
= 0; ccpu
< ncpus2
; ++ccpu
)
1215 n
+= tcbinfo
[ccpu
].ipi_count
;
1216 req
->oldidx
= (n
+ n
/8 + 10) * sizeof(struct xtcpcb
);
1220 if (req
->newptr
!= NULL
)
1223 marker
= kmalloc(sizeof(struct inpcb
), M_TEMP
, M_WAITOK
|M_ZERO
);
1224 marker
->inp_flags
|= INP_PLACEMARKER
;
1227 * OK, now we're committed to doing something. Run the inpcb list
1228 * for each cpu in the system and construct the output. Use a
1229 * list placemarker to deal with list changes occuring during
1230 * copyout blockages (but otherwise depend on being on the correct
1231 * cpu to avoid races).
1233 origcpu
= mycpu
->gd_cpuid
;
1234 for (ccpu
= 0; ccpu
< ncpus2
&& error
== 0; ++ccpu
) {
1238 lwkt_migratecpu(ccpu
);
1240 n
= tcbinfo
[ccpu
].ipi_count
;
1242 LIST_INSERT_HEAD(&tcbinfo
[ccpu
].pcblisthead
, marker
, inp_list
);
1244 while ((inp
= LIST_NEXT(marker
, inp_list
)) != NULL
&& i
< n
) {
1246 * process a snapshot of pcbs, ignoring placemarkers
1247 * and using our own to allow SYSCTL_OUT to block.
1249 LIST_REMOVE(marker
, inp_list
);
1250 LIST_INSERT_AFTER(inp
, marker
, inp_list
);
1252 if (inp
->inp_flags
& INP_PLACEMARKER
)
1254 if (prison_xinpcb(req
->td
, inp
))
1257 xt
.xt_len
= sizeof xt
;
1258 bcopy(inp
, &xt
.xt_inp
, sizeof *inp
);
1259 inp_ppcb
= inp
->inp_ppcb
;
1260 if (inp_ppcb
!= NULL
)
1261 bcopy(inp_ppcb
, &xt
.xt_tp
, sizeof xt
.xt_tp
);
1263 bzero(&xt
.xt_tp
, sizeof xt
.xt_tp
);
1264 if (inp
->inp_socket
)
1265 sotoxsocket(inp
->inp_socket
, &xt
.xt_socket
);
1266 if ((error
= SYSCTL_OUT(req
, &xt
, sizeof xt
)) != 0)
1270 LIST_REMOVE(marker
, inp_list
);
1271 if (error
== 0 && i
< n
) {
1272 bzero(&xt
, sizeof xt
);
1273 xt
.xt_len
= sizeof xt
;
1275 error
= SYSCTL_OUT(req
, &xt
, sizeof xt
);
1284 * Make sure we are on the same cpu we were on originally, since
1285 * higher level callers expect this. Also don't pollute caches with
1286 * migrated userland data by (eventually) returning to userland
1287 * on a different cpu.
1289 lwkt_migratecpu(origcpu
);
1290 kfree(marker
, M_TEMP
);
1294 SYSCTL_PROC(_net_inet_tcp
, TCPCTL_PCBLIST
, pcblist
, CTLFLAG_RD
, 0, 0,
1295 tcp_pcblist
, "S,xtcpcb", "List of active TCP connections");
1298 tcp_getcred(SYSCTL_HANDLER_ARGS
)
1300 struct sockaddr_in addrs
[2];
1301 struct ucred cred0
, *cred
= NULL
;
1303 int cpu
, origcpu
, error
;
1305 error
= priv_check(req
->td
, PRIV_ROOT
);
1308 error
= SYSCTL_IN(req
, addrs
, sizeof addrs
);
1313 cpu
= tcp_addrcpu(addrs
[1].sin_addr
.s_addr
, addrs
[1].sin_port
,
1314 addrs
[0].sin_addr
.s_addr
, addrs
[0].sin_port
);
1316 lwkt_migratecpu(cpu
);
1318 inp
= in_pcblookup_hash(&tcbinfo
[cpu
], addrs
[1].sin_addr
,
1319 addrs
[1].sin_port
, addrs
[0].sin_addr
, addrs
[0].sin_port
, 0, NULL
);
1320 if (inp
== NULL
|| inp
->inp_socket
== NULL
) {
1322 } else if (inp
->inp_socket
->so_cred
!= NULL
) {
1323 cred0
= *(inp
->inp_socket
->so_cred
);
1327 lwkt_migratecpu(origcpu
);
1332 return SYSCTL_OUT(req
, cred
, sizeof(struct ucred
));
1335 SYSCTL_PROC(_net_inet_tcp
, OID_AUTO
, getcred
, (CTLTYPE_OPAQUE
| CTLFLAG_RW
),
1336 0, 0, tcp_getcred
, "S,ucred", "Get the ucred of a TCP connection");
1340 tcp6_getcred(SYSCTL_HANDLER_ARGS
)
1342 struct sockaddr_in6 addrs
[2];
1346 error
= priv_check(req
->td
, PRIV_ROOT
);
1349 error
= SYSCTL_IN(req
, addrs
, sizeof addrs
);
1353 inp
= in6_pcblookup_hash(&tcbinfo
[0],
1354 &addrs
[1].sin6_addr
, addrs
[1].sin6_port
,
1355 &addrs
[0].sin6_addr
, addrs
[0].sin6_port
, 0, NULL
);
1356 if (inp
== NULL
|| inp
->inp_socket
== NULL
) {
1360 error
= SYSCTL_OUT(req
, inp
->inp_socket
->so_cred
, sizeof(struct ucred
));
1366 SYSCTL_PROC(_net_inet6_tcp6
, OID_AUTO
, getcred
, (CTLTYPE_OPAQUE
| CTLFLAG_RW
),
1368 tcp6_getcred
, "S,ucred", "Get the ucred of a TCP6 connection");
1371 struct netmsg_tcp_notify
{
1372 struct netmsg_base base
;
1373 inp_notify_t nm_notify
;
1374 struct in_addr nm_faddr
;
1379 tcp_notifyall_oncpu(netmsg_t msg
)
1381 struct netmsg_tcp_notify
*nm
= (struct netmsg_tcp_notify
*)msg
;
1384 in_pcbnotifyall(&tcbinfo
[mycpuid
], nm
->nm_faddr
,
1385 nm
->nm_arg
, nm
->nm_notify
);
1387 nextcpu
= mycpuid
+ 1;
1388 if (nextcpu
< ncpus2
)
1389 lwkt_forwardmsg(netisr_cpuport(nextcpu
), &nm
->base
.lmsg
);
1391 lwkt_replymsg(&nm
->base
.lmsg
, 0);
1395 tcp_get_inpnotify(int cmd
, const struct sockaddr
*sa
,
1396 int *arg
, struct ip
**ip0
, int *cpuid
)
1398 struct ip
*ip
= *ip0
;
1399 struct in_addr faddr
;
1400 inp_notify_t notify
= tcp_notify
;
1402 faddr
= ((const struct sockaddr_in
*)sa
)->sin_addr
;
1403 if (sa
->sa_family
!= AF_INET
|| faddr
.s_addr
== INADDR_ANY
)
1406 *arg
= inetctlerrmap
[cmd
];
1407 if (cmd
== PRC_QUENCH
) {
1408 notify
= tcp_quench
;
1409 } else if (icmp_may_rst
&&
1410 (cmd
== PRC_UNREACH_ADMIN_PROHIB
||
1411 cmd
== PRC_UNREACH_PORT
||
1412 cmd
== PRC_TIMXCEED_INTRANS
) &&
1414 notify
= tcp_drop_syn_sent
;
1415 } else if (cmd
== PRC_MSGSIZE
) {
1416 const struct icmp
*icmp
= (const struct icmp
*)
1417 ((caddr_t
)ip
- offsetof(struct icmp
, icmp_ip
));
1419 *arg
= ntohs(icmp
->icmp_nextmtu
);
1420 notify
= tcp_mtudisc
;
1421 } else if (PRC_IS_REDIRECT(cmd
)) {
1423 notify
= in_rtchange
;
1424 } else if (cmd
== PRC_HOSTDEAD
) {
1426 } else if ((unsigned)cmd
>= PRC_NCMDS
|| inetctlerrmap
[cmd
] == 0) {
1430 if (cpuid
!= NULL
) {
1432 /* Go through all CPUs */
1435 const struct tcphdr
*th
;
1437 th
= (const struct tcphdr
*)
1438 ((caddr_t
)ip
+ (IP_VHL_HL(ip
->ip_vhl
) << 2));
1439 *cpuid
= tcp_addrcpu(faddr
.s_addr
, th
->th_dport
,
1440 ip
->ip_src
.s_addr
, th
->th_sport
);
1449 tcp_ctlinput(netmsg_t msg
)
1451 int cmd
= msg
->ctlinput
.nm_cmd
;
1452 struct sockaddr
*sa
= msg
->ctlinput
.nm_arg
;
1453 struct ip
*ip
= msg
->ctlinput
.nm_extra
;
1454 struct in_addr faddr
;
1455 inp_notify_t notify
;
1458 notify
= tcp_get_inpnotify(cmd
, sa
, &arg
, &ip
, &cpuid
);
1462 faddr
= ((struct sockaddr_in
*)sa
)->sin_addr
;
1464 const struct tcphdr
*th
;
1467 if (cpuid
!= mycpuid
)
1470 th
= (const struct tcphdr
*)
1471 ((caddr_t
)ip
+ (IP_VHL_HL(ip
->ip_vhl
) << 2));
1472 inp
= in_pcblookup_hash(&tcbinfo
[mycpuid
], faddr
, th
->th_dport
,
1473 ip
->ip_src
, th
->th_sport
, 0, NULL
);
1474 if (inp
!= NULL
&& inp
->inp_socket
!= NULL
) {
1475 tcp_seq icmpseq
= htonl(th
->th_seq
);
1476 struct tcpcb
*tp
= intotcpcb(inp
);
1478 if (SEQ_GEQ(icmpseq
, tp
->snd_una
) &&
1479 SEQ_LT(icmpseq
, tp
->snd_max
))
1482 struct in_conninfo inc
;
1484 inc
.inc_fport
= th
->th_dport
;
1485 inc
.inc_lport
= th
->th_sport
;
1486 inc
.inc_faddr
= faddr
;
1487 inc
.inc_laddr
= ip
->ip_src
;
1491 syncache_unreach(&inc
, th
);
1493 } else if (msg
->ctlinput
.nm_direct
) {
1494 if (cpuid
!= ncpus
&& cpuid
!= mycpuid
)
1496 if (mycpuid
>= ncpus2
)
1499 in_pcbnotifyall(&tcbinfo
[mycpuid
], faddr
, arg
, notify
);
1501 struct netmsg_tcp_notify
*nm
;
1503 ASSERT_IN_NETISR(0);
1504 nm
= kmalloc(sizeof(*nm
), M_LWKTMSG
, M_INTWAIT
);
1505 netmsg_init(&nm
->base
, NULL
, &netisr_afree_rport
,
1506 0, tcp_notifyall_oncpu
);
1507 nm
->nm_faddr
= faddr
;
1509 nm
->nm_notify
= notify
;
1511 lwkt_sendmsg(netisr_cpuport(0), &nm
->base
.lmsg
);
1514 lwkt_replymsg(&msg
->lmsg
, 0);
1520 tcp6_ctlinput(netmsg_t msg
)
1522 int cmd
= msg
->ctlinput
.nm_cmd
;
1523 struct sockaddr
*sa
= msg
->ctlinput
.nm_arg
;
1524 void *d
= msg
->ctlinput
.nm_extra
;
1526 inp_notify_t notify
= tcp_notify
;
1527 struct ip6_hdr
*ip6
;
1529 struct ip6ctlparam
*ip6cp
= NULL
;
1530 const struct sockaddr_in6
*sa6_src
= NULL
;
1532 struct tcp_portonly
{
1538 if (sa
->sa_family
!= AF_INET6
||
1539 sa
->sa_len
!= sizeof(struct sockaddr_in6
)) {
1544 if (cmd
== PRC_QUENCH
)
1545 notify
= tcp_quench
;
1546 else if (cmd
== PRC_MSGSIZE
) {
1547 struct ip6ctlparam
*ip6cp
= d
;
1548 struct icmp6_hdr
*icmp6
= ip6cp
->ip6c_icmp6
;
1550 arg
= ntohl(icmp6
->icmp6_mtu
);
1551 notify
= tcp_mtudisc
;
1552 } else if (!PRC_IS_REDIRECT(cmd
) &&
1553 ((unsigned)cmd
> PRC_NCMDS
|| inet6ctlerrmap
[cmd
] == 0)) {
1557 /* if the parameter is from icmp6, decode it. */
1559 ip6cp
= (struct ip6ctlparam
*)d
;
1561 ip6
= ip6cp
->ip6c_ip6
;
1562 off
= ip6cp
->ip6c_off
;
1563 sa6_src
= ip6cp
->ip6c_src
;
1567 off
= 0; /* fool gcc */
1572 struct in_conninfo inc
;
1574 * XXX: We assume that when IPV6 is non NULL,
1575 * M and OFF are valid.
1578 /* check if we can safely examine src and dst ports */
1579 if (m
->m_pkthdr
.len
< off
+ sizeof *thp
)
1582 bzero(&th
, sizeof th
);
1583 m_copydata(m
, off
, sizeof *thp
, (caddr_t
)&th
);
1585 in6_pcbnotify(&tcbinfo
[0], sa
, th
.th_dport
,
1586 (struct sockaddr
*)ip6cp
->ip6c_src
,
1587 th
.th_sport
, cmd
, arg
, notify
);
1589 inc
.inc_fport
= th
.th_dport
;
1590 inc
.inc_lport
= th
.th_sport
;
1591 inc
.inc6_faddr
= ((struct sockaddr_in6
*)sa
)->sin6_addr
;
1592 inc
.inc6_laddr
= ip6cp
->ip6c_src
->sin6_addr
;
1594 syncache_unreach(&inc
, &th
);
1596 in6_pcbnotify(&tcbinfo
[0], sa
, 0,
1597 (const struct sockaddr
*)sa6_src
, 0, cmd
, arg
, notify
);
1600 lwkt_replymsg(&msg
->ctlinput
.base
.lmsg
, 0);
1606 * Following is where TCP initial sequence number generation occurs.
1608 * There are two places where we must use initial sequence numbers:
1609 * 1. In SYN-ACK packets.
1610 * 2. In SYN packets.
1612 * All ISNs for SYN-ACK packets are generated by the syncache. See
1613 * tcp_syncache.c for details.
1615 * The ISNs in SYN packets must be monotonic; TIME_WAIT recycling
1616 * depends on this property. In addition, these ISNs should be
1617 * unguessable so as to prevent connection hijacking. To satisfy
1618 * the requirements of this situation, the algorithm outlined in
1619 * RFC 1948 is used to generate sequence numbers.
1621 * Implementation details:
1623 * Time is based off the system timer, and is corrected so that it
1624 * increases by one megabyte per second. This allows for proper
1625 * recycling on high speed LANs while still leaving over an hour
1628 * net.inet.tcp.isn_reseed_interval controls the number of seconds
1629 * between seeding of isn_secret. This is normally set to zero,
1630 * as reseeding should not be necessary.
1634 #define ISN_BYTES_PER_SECOND 1048576
1636 u_char isn_secret
[32];
1637 int isn_last_reseed
;
1641 tcp_new_isn(struct tcpcb
*tp
)
1643 u_int32_t md5_buffer
[4];
1646 /* Seed if this is the first use, reseed if requested. */
1647 if ((isn_last_reseed
== 0) || ((tcp_isn_reseed_interval
> 0) &&
1648 (((u_int
)isn_last_reseed
+ (u_int
)tcp_isn_reseed_interval
*hz
)
1650 read_random_unlimited(&isn_secret
, sizeof isn_secret
);
1651 isn_last_reseed
= ticks
;
1654 /* Compute the md5 hash and return the ISN. */
1656 MD5Update(&isn_ctx
, (u_char
*)&tp
->t_inpcb
->inp_fport
, sizeof(u_short
));
1657 MD5Update(&isn_ctx
, (u_char
*)&tp
->t_inpcb
->inp_lport
, sizeof(u_short
));
1659 if (INP_ISIPV6(tp
->t_inpcb
)) {
1660 MD5Update(&isn_ctx
, (u_char
*) &tp
->t_inpcb
->in6p_faddr
,
1661 sizeof(struct in6_addr
));
1662 MD5Update(&isn_ctx
, (u_char
*) &tp
->t_inpcb
->in6p_laddr
,
1663 sizeof(struct in6_addr
));
1667 MD5Update(&isn_ctx
, (u_char
*) &tp
->t_inpcb
->inp_faddr
,
1668 sizeof(struct in_addr
));
1669 MD5Update(&isn_ctx
, (u_char
*) &tp
->t_inpcb
->inp_laddr
,
1670 sizeof(struct in_addr
));
1672 MD5Update(&isn_ctx
, (u_char
*) &isn_secret
, sizeof(isn_secret
));
1673 MD5Final((u_char
*) &md5_buffer
, &isn_ctx
);
1674 new_isn
= (tcp_seq
) md5_buffer
[0];
1675 new_isn
+= ticks
* (ISN_BYTES_PER_SECOND
/ hz
);
1680 * When a source quench is received, close congestion window
1681 * to one segment. We will gradually open it again as we proceed.
1684 tcp_quench(struct inpcb
*inp
, int error
)
1686 struct tcpcb
*tp
= intotcpcb(inp
);
1688 KASSERT(tp
!= NULL
, ("tcp_quench: tp is NULL"));
1689 tp
->snd_cwnd
= tp
->t_maxseg
;
1694 * When a specific ICMP unreachable message is received and the
1695 * connection state is SYN-SENT, drop the connection. This behavior
1696 * is controlled by the icmp_may_rst sysctl.
1699 tcp_drop_syn_sent(struct inpcb
*inp
, int error
)
1701 struct tcpcb
*tp
= intotcpcb(inp
);
1703 KASSERT(tp
!= NULL
, ("tcp_drop_syn_sent: tp is NULL"));
1704 if (tp
->t_state
== TCPS_SYN_SENT
)
1705 tcp_drop(tp
, error
);
1709 * When a `need fragmentation' ICMP is received, update our idea of the MSS
1710 * based on the new value in the route. Also nudge TCP to send something,
1711 * since we know the packet we just sent was dropped.
1712 * This duplicates some code in the tcp_mss() function in tcp_input.c.
1715 tcp_mtudisc(struct inpcb
*inp
, int mtu
)
1717 struct tcpcb
*tp
= intotcpcb(inp
);
1719 struct socket
*so
= inp
->inp_socket
;
1722 boolean_t isipv6
= INP_ISIPV6(inp
);
1724 const boolean_t isipv6
= FALSE
;
1727 KASSERT(tp
!= NULL
, ("tcp_mtudisc: tp is NULL"));
1730 * If no MTU is provided in the ICMP message, use the
1731 * next lower likely value, as specified in RFC 1191.
1736 oldmtu
= tp
->t_maxopd
+
1738 sizeof(struct ip6_hdr
) + sizeof(struct tcphdr
) :
1739 sizeof(struct tcpiphdr
));
1740 mtu
= ip_next_mtu(oldmtu
, 0);
1744 rt
= tcp_rtlookup6(&inp
->inp_inc
);
1746 rt
= tcp_rtlookup(&inp
->inp_inc
);
1748 if (rt
->rt_rmx
.rmx_mtu
!= 0 && rt
->rt_rmx
.rmx_mtu
< mtu
)
1749 mtu
= rt
->rt_rmx
.rmx_mtu
;
1753 sizeof(struct ip6_hdr
) + sizeof(struct tcphdr
) :
1754 sizeof(struct tcpiphdr
));
1757 * XXX - The following conditional probably violates the TCP
1758 * spec. The problem is that, since we don't know the
1759 * other end's MSS, we are supposed to use a conservative
1760 * default. But, if we do that, then MTU discovery will
1761 * never actually take place, because the conservative
1762 * default is much less than the MTUs typically seen
1763 * on the Internet today. For the moment, we'll sweep
1764 * this under the carpet.
1766 * The conservative default might not actually be a problem
1767 * if the only case this occurs is when sending an initial
1768 * SYN with options and data to a host we've never talked
1769 * to before. Then, they will reply with an MSS value which
1770 * will get recorded and the new parameters should get
1771 * recomputed. For Further Study.
1773 if (rt
->rt_rmx
.rmx_mssopt
&& rt
->rt_rmx
.rmx_mssopt
< maxopd
)
1774 maxopd
= rt
->rt_rmx
.rmx_mssopt
;
1778 sizeof(struct ip6_hdr
) + sizeof(struct tcphdr
) :
1779 sizeof(struct tcpiphdr
));
1781 if (tp
->t_maxopd
<= maxopd
)
1783 tp
->t_maxopd
= maxopd
;
1786 if ((tp
->t_flags
& (TF_REQ_TSTMP
| TF_RCVD_TSTMP
| TF_NOOPT
)) ==
1787 (TF_REQ_TSTMP
| TF_RCVD_TSTMP
))
1788 mss
-= TCPOLEN_TSTAMP_APPA
;
1790 /* round down to multiple of MCLBYTES */
1791 #if (MCLBYTES & (MCLBYTES - 1)) == 0 /* test if MCLBYTES power of 2 */
1793 mss
&= ~(MCLBYTES
- 1);
1796 mss
= (mss
/ MCLBYTES
) * MCLBYTES
;
1799 if (so
->so_snd
.ssb_hiwat
< mss
)
1800 mss
= so
->so_snd
.ssb_hiwat
;
1804 tp
->snd_nxt
= tp
->snd_una
;
1806 tcpstat
.tcps_mturesent
++;
1810 * Look-up the routing entry to the peer of this inpcb. If no route
1811 * is found and it cannot be allocated the return NULL. This routine
1812 * is called by TCP routines that access the rmx structure and by tcp_mss
1813 * to get the interface MTU.
1816 tcp_rtlookup(struct in_conninfo
*inc
)
1818 struct route
*ro
= &inc
->inc_route
;
1820 if (ro
->ro_rt
== NULL
|| !(ro
->ro_rt
->rt_flags
& RTF_UP
)) {
1821 /* No route yet, so try to acquire one */
1822 if (inc
->inc_faddr
.s_addr
!= INADDR_ANY
) {
1824 * unused portions of the structure MUST be zero'd
1825 * out because rtalloc() treats it as opaque data
1827 bzero(&ro
->ro_dst
, sizeof(struct sockaddr_in
));
1828 ro
->ro_dst
.sa_family
= AF_INET
;
1829 ro
->ro_dst
.sa_len
= sizeof(struct sockaddr_in
);
1830 ((struct sockaddr_in
*) &ro
->ro_dst
)->sin_addr
=
1840 tcp_rtlookup6(struct in_conninfo
*inc
)
1842 struct route_in6
*ro6
= &inc
->inc6_route
;
1844 if (ro6
->ro_rt
== NULL
|| !(ro6
->ro_rt
->rt_flags
& RTF_UP
)) {
1845 /* No route yet, so try to acquire one */
1846 if (!IN6_IS_ADDR_UNSPECIFIED(&inc
->inc6_faddr
)) {
1848 * unused portions of the structure MUST be zero'd
1849 * out because rtalloc() treats it as opaque data
1851 bzero(&ro6
->ro_dst
, sizeof(struct sockaddr_in6
));
1852 ro6
->ro_dst
.sin6_family
= AF_INET6
;
1853 ro6
->ro_dst
.sin6_len
= sizeof(struct sockaddr_in6
);
1854 ro6
->ro_dst
.sin6_addr
= inc
->inc6_faddr
;
1855 rtalloc((struct route
*)ro6
);
1858 return (ro6
->ro_rt
);
1863 /* compute ESP/AH header size for TCP, including outer IP header. */
1865 ipsec_hdrsiz_tcp(struct tcpcb
*tp
)
1873 if ((tp
== NULL
) || ((inp
= tp
->t_inpcb
) == NULL
))
1875 MGETHDR(m
, M_NOWAIT
, MT_DATA
);
1880 if (INP_ISIPV6(inp
)) {
1881 struct ip6_hdr
*ip6
= mtod(m
, struct ip6_hdr
*);
1883 th
= (struct tcphdr
*)(ip6
+ 1);
1884 m
->m_pkthdr
.len
= m
->m_len
=
1885 sizeof(struct ip6_hdr
) + sizeof(struct tcphdr
);
1886 tcp_fillheaders(tp
, ip6
, th
, FALSE
);
1887 hdrsiz
= ipsec6_hdrsiz(m
, IPSEC_DIR_OUTBOUND
, inp
);
1891 ip
= mtod(m
, struct ip
*);
1892 th
= (struct tcphdr
*)(ip
+ 1);
1893 m
->m_pkthdr
.len
= m
->m_len
= sizeof(struct tcpiphdr
);
1894 tcp_fillheaders(tp
, ip
, th
, FALSE
);
1895 hdrsiz
= ipsec4_hdrsiz(m
, IPSEC_DIR_OUTBOUND
, inp
);
1904 * TCP BANDWIDTH DELAY PRODUCT WINDOW LIMITING
1906 * This code attempts to calculate the bandwidth-delay product as a
1907 * means of determining the optimal window size to maximize bandwidth,
1908 * minimize RTT, and avoid the over-allocation of buffers on interfaces and
1909 * routers. This code also does a fairly good job keeping RTTs in check
1910 * across slow links like modems. We implement an algorithm which is very
1911 * similar (but not meant to be) TCP/Vegas. The code operates on the
1912 * transmitter side of a TCP connection and so only effects the transmit
1913 * side of the connection.
1915 * BACKGROUND: TCP makes no provision for the management of buffer space
1916 * at the end points or at the intermediate routers and switches. A TCP
1917 * stream, whether using NewReno or not, will eventually buffer as
1918 * many packets as it is able and the only reason this typically works is
1919 * due to the fairly small default buffers made available for a connection
1920 * (typicaly 16K or 32K). As machines use larger windows and/or window
1921 * scaling it is now fairly easy for even a single TCP connection to blow-out
1922 * all available buffer space not only on the local interface, but on
1923 * intermediate routers and switches as well. NewReno makes a misguided
1924 * attempt to 'solve' this problem by waiting for an actual failure to occur,
1925 * then backing off, then steadily increasing the window again until another
1926 * failure occurs, ad-infinitum. This results in terrible oscillation that
1927 * is only made worse as network loads increase and the idea of intentionally
1928 * blowing out network buffers is, frankly, a terrible way to manage network
1931 * It is far better to limit the transmit window prior to the failure
1932 * condition being achieved. There are two general ways to do this: First
1933 * you can 'scan' through different transmit window sizes and locate the
1934 * point where the RTT stops increasing, indicating that you have filled the
1935 * pipe, then scan backwards until you note that RTT stops decreasing, then
1936 * repeat ad-infinitum. This method works in principle but has severe
1937 * implementation issues due to RTT variances, timer granularity, and
1938 * instability in the algorithm which can lead to many false positives and
1939 * create oscillations as well as interact badly with other TCP streams
1940 * implementing the same algorithm.
1942 * The second method is to limit the window to the bandwidth delay product
1943 * of the link. This is the method we implement. RTT variances and our
1944 * own manipulation of the congestion window, bwnd, can potentially
1945 * destabilize the algorithm. For this reason we have to stabilize the
1946 * elements used to calculate the window. We do this by using the minimum
1947 * observed RTT, the long term average of the observed bandwidth, and
1948 * by adding two segments worth of slop. It isn't perfect but it is able
1949 * to react to changing conditions and gives us a very stable basis on
1950 * which to extend the algorithm.
1953 tcp_xmit_bandwidth_limit(struct tcpcb
*tp
, tcp_seq ack_seq
)
1962 * If inflight_enable is disabled in the middle of a tcp connection,
1963 * make sure snd_bwnd is effectively disabled.
1965 if (!tcp_inflight_enable
) {
1966 tp
->snd_bwnd
= TCP_MAXWIN
<< TCP_MAX_WINSHIFT
;
1967 tp
->snd_bandwidth
= 0;
1972 * Validate the delta time. If a connection is new or has been idle
1973 * a long time we have to reset the bandwidth calculator.
1977 delta_ticks
= save_ticks
- tp
->t_bw_rtttime
;
1978 if (tp
->t_bw_rtttime
== 0 || delta_ticks
< 0 || delta_ticks
> hz
* 10) {
1979 tp
->t_bw_rtttime
= save_ticks
;
1980 tp
->t_bw_rtseq
= ack_seq
;
1981 if (tp
->snd_bandwidth
== 0)
1982 tp
->snd_bandwidth
= tcp_inflight_start
;
1987 * A delta of at least 1 tick is required. Waiting 2 ticks will
1988 * result in better (bw) accuracy. More than that and the ramp-up
1991 if (delta_ticks
== 0 || delta_ticks
== 1)
1995 * Sanity check, plus ignore pure window update acks.
1997 if ((int)(ack_seq
- tp
->t_bw_rtseq
) <= 0)
2001 * Figure out the bandwidth. Due to the tick granularity this
2002 * is a very rough number and it MUST be averaged over a fairly
2003 * long period of time. XXX we need to take into account a link
2004 * that is not using all available bandwidth, but for now our
2005 * slop will ramp us up if this case occurs and the bandwidth later
2008 ibw
= (int64_t)(ack_seq
- tp
->t_bw_rtseq
) * hz
/ delta_ticks
;
2009 tp
->t_bw_rtttime
= save_ticks
;
2010 tp
->t_bw_rtseq
= ack_seq
;
2011 bw
= ((int64_t)tp
->snd_bandwidth
* 15 + ibw
) >> 4;
2013 tp
->snd_bandwidth
= bw
;
2016 * Calculate the semi-static bandwidth delay product, plus two maximal
2017 * segments. The additional slop puts us squarely in the sweet
2018 * spot and also handles the bandwidth run-up case. Without the
2019 * slop we could be locking ourselves into a lower bandwidth.
2021 * At very high speeds the bw calculation can become overly sensitive
2022 * and error prone when delta_ticks is low (e.g. usually 1). To deal
2023 * with the problem the stab must be scaled to the bw. A stab of 50
2024 * (the default) increases the bw for the purposes of the bwnd
2025 * calculation by 5%.
2027 * Situations Handled:
2028 * (1) Prevents over-queueing of packets on LANs, especially on
2029 * high speed LANs, allowing larger TCP buffers to be
2030 * specified, and also does a good job preventing
2031 * over-queueing of packets over choke points like modems
2032 * (at least for the transmit side).
2034 * (2) Is able to handle changing network loads (bandwidth
2035 * drops so bwnd drops, bandwidth increases so bwnd
2038 * (3) Theoretically should stabilize in the face of multiple
2039 * connections implementing the same algorithm (this may need
2042 * (4) Stability value (defaults to 20 = 2 maximal packets) can
2043 * be adjusted with a sysctl but typically only needs to be on
2044 * very slow connections. A value no smaller then 5 should
2045 * be used, but only reduce this default if you have no other
2049 #define USERTT ((tp->t_srtt + tp->t_rttvar) + tcp_inflight_adjrtt)
2050 bw
+= bw
* tcp_inflight_stab
/ 1000;
2051 bwnd
= (int64_t)bw
* USERTT
/ (hz
<< TCP_RTT_SHIFT
) +
2052 (int)tp
->t_maxseg
* 2;
2055 if (tcp_inflight_debug
> 0) {
2057 if ((u_int
)(save_ticks
- ltime
) >= hz
/ tcp_inflight_debug
) {
2059 kprintf("%p ibw %ld bw %ld rttvar %d srtt %d "
2060 "bwnd %ld delta %d snd_win %ld\n",
2061 tp
, ibw
, bw
, tp
->t_rttvar
, tp
->t_srtt
,
2062 bwnd
, delta_ticks
, tp
->snd_wnd
);
2065 if ((long)bwnd
< tcp_inflight_min
)
2066 bwnd
= tcp_inflight_min
;
2067 if (bwnd
> tcp_inflight_max
)
2068 bwnd
= tcp_inflight_max
;
2069 if ((long)bwnd
< tp
->t_maxseg
* 2)
2070 bwnd
= tp
->t_maxseg
* 2;
2071 tp
->snd_bwnd
= bwnd
;
2075 tcp_rmx_iwsegs(struct tcpcb
*tp
, u_long
*maxsegs
, u_long
*capsegs
)
2078 struct inpcb
*inp
= tp
->t_inpcb
;
2080 boolean_t isipv6
= INP_ISIPV6(inp
);
2082 const boolean_t isipv6
= FALSE
;
2086 if (tcp_iw_maxsegs
< TCP_IW_MAXSEGS_DFLT
)
2087 tcp_iw_maxsegs
= TCP_IW_MAXSEGS_DFLT
;
2088 if (tcp_iw_capsegs
< TCP_IW_CAPSEGS_DFLT
)
2089 tcp_iw_capsegs
= TCP_IW_CAPSEGS_DFLT
;
2092 rt
= tcp_rtlookup6(&inp
->inp_inc
);
2094 rt
= tcp_rtlookup(&inp
->inp_inc
);
2096 rt
->rt_rmx
.rmx_iwmaxsegs
< TCP_IW_MAXSEGS_DFLT
||
2097 rt
->rt_rmx
.rmx_iwcapsegs
< TCP_IW_CAPSEGS_DFLT
) {
2098 *maxsegs
= tcp_iw_maxsegs
;
2099 *capsegs
= tcp_iw_capsegs
;
2102 *maxsegs
= rt
->rt_rmx
.rmx_iwmaxsegs
;
2103 *capsegs
= rt
->rt_rmx
.rmx_iwcapsegs
;
2107 tcp_initial_window(struct tcpcb
*tp
)
2109 if (tcp_do_rfc3390
) {
2112 * "If the SYN or SYN/ACK is lost, the initial window
2113 * used by a sender after a correctly transmitted SYN
2114 * MUST be one segment consisting of MSS bytes."
2116 * However, we do something a little bit more aggressive
2117 * then RFC3390 here:
2118 * - Only if time spent in the SYN or SYN|ACK retransmition
2119 * >= 3 seconds, the IW is reduced. We do this mainly
2120 * because when RFC3390 is published, the initial RTO is
2121 * still 3 seconds (the threshold we test here), while
2122 * after RFC6298, the initial RTO is 1 second. This
2123 * behaviour probably still falls within the spirit of
2125 * - When IW is reduced, 2*MSS is used instead of 1*MSS.
2126 * Mainly to avoid sender and receiver deadlock until
2127 * delayed ACK timer expires. And even RFC2581 does not
2128 * try to reduce IW upon SYN or SYN|ACK retransmition
2132 * http://tools.ietf.org/html/draft-ietf-tcpm-initcwnd-03
2134 if (tp
->t_rxtsyn
>= TCPTV_RTOBASE3
) {
2135 return (2 * tp
->t_maxseg
);
2137 u_long maxsegs
, capsegs
;
2139 tcp_rmx_iwsegs(tp
, &maxsegs
, &capsegs
);
2140 return min(maxsegs
* tp
->t_maxseg
,
2141 max(2 * tp
->t_maxseg
, capsegs
* 1460));
2145 * Even RFC2581 (back to 1999) allows 2*SMSS IW.
2147 * Mainly to avoid sender and receiver deadlock
2148 * until delayed ACK timer expires.
2150 return (2 * tp
->t_maxseg
);
2154 #ifdef TCP_SIGNATURE
2156 * Compute TCP-MD5 hash of a TCP segment. (RFC2385)
2158 * We do this over ip, tcphdr, segment data, and the key in the SADB.
2159 * When called from tcp_input(), we can be sure that th_sum has been
2160 * zeroed out and verified already.
2162 * Return 0 if successful, otherwise return -1.
2164 * XXX The key is retrieved from the system's PF_KEY SADB, by keying a
2165 * search with the destination IP address, and a 'magic SPI' to be
2166 * determined by the application. This is hardcoded elsewhere to 1179
2167 * right now. Another branch of this code exists which uses the SPD to
2168 * specify per-application flows but it is unstable.
2171 tcpsignature_compute(
2172 struct mbuf
*m
, /* mbuf chain */
2173 int len
, /* length of TCP data */
2174 int optlen
, /* length of TCP options */
2175 u_char
*buf
, /* storage for MD5 digest */
2176 u_int direction
) /* direction of flow */
2178 struct ippseudo ippseudo
;
2182 struct ipovly
*ipovly
;
2183 struct secasvar
*sav
;
2186 struct ip6_hdr
*ip6
;
2187 struct in6_addr in6
;
2193 KASSERT(m
!= NULL
, ("passed NULL mbuf. Game over."));
2194 KASSERT(buf
!= NULL
, ("passed NULL storage pointer for MD5 signature"));
2196 * Extract the destination from the IP header in the mbuf.
2198 ip
= mtod(m
, struct ip
*);
2200 ip6
= NULL
; /* Make the compiler happy. */
2203 * Look up an SADB entry which matches the address found in
2206 switch (IP_VHL_V(ip
->ip_vhl
)) {
2208 sav
= key_allocsa(AF_INET
, (caddr_t
)&ip
->ip_src
, (caddr_t
)&ip
->ip_dst
,
2209 IPPROTO_TCP
, htonl(TCP_SIG_SPI
));
2212 case (IPV6_VERSION
>> 4):
2213 ip6
= mtod(m
, struct ip6_hdr
*);
2214 sav
= key_allocsa(AF_INET6
, (caddr_t
)&ip6
->ip6_src
, (caddr_t
)&ip6
->ip6_dst
,
2215 IPPROTO_TCP
, htonl(TCP_SIG_SPI
));
2224 kprintf("%s: SADB lookup failed\n", __func__
);
2230 * Step 1: Update MD5 hash with IP pseudo-header.
2232 * XXX The ippseudo header MUST be digested in network byte order,
2233 * or else we'll fail the regression test. Assume all fields we've
2234 * been doing arithmetic on have been in host byte order.
2235 * XXX One cannot depend on ipovly->ih_len here. When called from
2236 * tcp_output(), the underlying ip_len member has not yet been set.
2238 switch (IP_VHL_V(ip
->ip_vhl
)) {
2240 ipovly
= (struct ipovly
*)ip
;
2241 ippseudo
.ippseudo_src
= ipovly
->ih_src
;
2242 ippseudo
.ippseudo_dst
= ipovly
->ih_dst
;
2243 ippseudo
.ippseudo_pad
= 0;
2244 ippseudo
.ippseudo_p
= IPPROTO_TCP
;
2245 ippseudo
.ippseudo_len
= htons(len
+ sizeof(struct tcphdr
) + optlen
);
2246 MD5Update(&ctx
, (char *)&ippseudo
, sizeof(struct ippseudo
));
2247 th
= (struct tcphdr
*)((u_char
*)ip
+ sizeof(struct ip
));
2248 doff
= sizeof(struct ip
) + sizeof(struct tcphdr
) + optlen
;
2252 * RFC 2385, 2.0 Proposal
2253 * For IPv6, the pseudo-header is as described in RFC 2460, namely the
2254 * 128-bit source IPv6 address, 128-bit destination IPv6 address, zero-
2255 * extended next header value (to form 32 bits), and 32-bit segment
2257 * Note: Upper-Layer Packet Length comes before Next Header.
2259 case (IPV6_VERSION
>> 4):
2261 in6_clearscope(&in6
);
2262 MD5Update(&ctx
, (char *)&in6
, sizeof(struct in6_addr
));
2264 in6_clearscope(&in6
);
2265 MD5Update(&ctx
, (char *)&in6
, sizeof(struct in6_addr
));
2266 plen
= htonl(len
+ sizeof(struct tcphdr
) + optlen
);
2267 MD5Update(&ctx
, (char *)&plen
, sizeof(uint32_t));
2269 MD5Update(&ctx
, (char *)&nhdr
, sizeof(uint8_t));
2270 MD5Update(&ctx
, (char *)&nhdr
, sizeof(uint8_t));
2271 MD5Update(&ctx
, (char *)&nhdr
, sizeof(uint8_t));
2273 MD5Update(&ctx
, (char *)&nhdr
, sizeof(uint8_t));
2274 th
= (struct tcphdr
*)((u_char
*)ip6
+ sizeof(struct ip6_hdr
));
2275 doff
= sizeof(struct ip6_hdr
) + sizeof(struct tcphdr
) + optlen
;
2284 * Step 2: Update MD5 hash with TCP header, excluding options.
2285 * The TCP checksum must be set to zero.
2287 savecsum
= th
->th_sum
;
2289 MD5Update(&ctx
, (char *)th
, sizeof(struct tcphdr
));
2290 th
->th_sum
= savecsum
;
2292 * Step 3: Update MD5 hash with TCP segment data.
2293 * Use m_apply() to avoid an early m_pullup().
2296 m_apply(m
, doff
, len
, tcpsignature_apply
, &ctx
);
2298 * Step 4: Update MD5 hash with shared secret.
2300 MD5Update(&ctx
, _KEYBUF(sav
->key_auth
), _KEYLEN(sav
->key_auth
));
2301 MD5Final(buf
, &ctx
);
2302 key_sa_recordxfer(sav
, m
);
2308 tcpsignature_apply(void *fstate
, void *data
, unsigned int len
)
2311 MD5Update((MD5_CTX
*)fstate
, (unsigned char *)data
, len
);
2314 #endif /* TCP_SIGNATURE */
2317 tcp_drop_sysctl_dispatch(netmsg_t nmsg
)
2319 struct lwkt_msg
*lmsg
= &nmsg
->lmsg
;
2320 /* addrs[0] is a foreign socket, addrs[1] is a local one. */
2321 struct sockaddr_storage
*addrs
= lmsg
->u
.ms_resultp
;
2323 struct sockaddr_in
*fin
, *lin
;
2325 struct sockaddr_in6
*fin6
, *lin6
;
2326 struct in6_addr f6
, l6
;
2330 switch (addrs
[0].ss_family
) {
2333 fin6
= (struct sockaddr_in6
*)&addrs
[0];
2334 lin6
= (struct sockaddr_in6
*)&addrs
[1];
2335 error
= in6_embedscope(&f6
, fin6
, NULL
, NULL
);
2338 error
= in6_embedscope(&l6
, lin6
, NULL
, NULL
);
2341 inp
= in6_pcblookup_hash(&tcbinfo
[mycpuid
], &f6
,
2342 fin6
->sin6_port
, &l6
, lin6
->sin6_port
, FALSE
, NULL
);
2347 fin
= (struct sockaddr_in
*)&addrs
[0];
2348 lin
= (struct sockaddr_in
*)&addrs
[1];
2349 inp
= in_pcblookup_hash(&tcbinfo
[mycpuid
], fin
->sin_addr
,
2350 fin
->sin_port
, lin
->sin_addr
, lin
->sin_port
, FALSE
, NULL
);
2355 * Must not reach here, since the address family was
2356 * checked in sysctl handler.
2358 panic("unknown address family %d", addrs
[0].ss_family
);
2361 struct tcpcb
*tp
= intotcpcb(inp
);
2363 KASSERT((inp
->inp_flags
& INP_WILDCARD
) == 0,
2364 ("in wildcard hash"));
2365 KASSERT(tp
!= NULL
, ("tcp_drop_sysctl_dispatch: tp is NULL"));
2366 KASSERT((tp
->t_flags
& TF_LISTEN
) == 0, ("listen socket"));
2367 tcp_drop(tp
, ECONNABORTED
);
2375 lwkt_replymsg(lmsg
, error
);
2379 sysctl_tcp_drop(SYSCTL_HANDLER_ARGS
)
2381 /* addrs[0] is a foreign socket, addrs[1] is a local one. */
2382 struct sockaddr_storage addrs
[2];
2383 struct sockaddr_in
*fin
, *lin
;
2385 struct sockaddr_in6
*fin6
, *lin6
;
2387 struct netmsg_base nmsg
;
2388 struct lwkt_msg
*lmsg
= &nmsg
.lmsg
;
2389 struct lwkt_port
*port
= NULL
;
2398 if (req
->oldptr
!= NULL
|| req
->oldlen
!= 0)
2400 if (req
->newptr
== NULL
)
2402 if (req
->newlen
< sizeof(addrs
))
2404 error
= SYSCTL_IN(req
, &addrs
, sizeof(addrs
));
2408 switch (addrs
[0].ss_family
) {
2411 fin6
= (struct sockaddr_in6
*)&addrs
[0];
2412 lin6
= (struct sockaddr_in6
*)&addrs
[1];
2413 if (fin6
->sin6_len
!= sizeof(struct sockaddr_in6
) ||
2414 lin6
->sin6_len
!= sizeof(struct sockaddr_in6
))
2416 if (IN6_IS_ADDR_V4MAPPED(&fin6
->sin6_addr
) ||
2417 IN6_IS_ADDR_V4MAPPED(&lin6
->sin6_addr
))
2418 return (EADDRNOTAVAIL
);
2420 error
= sa6_embedscope(fin6
, V_ip6_use_defzone
);
2423 error
= sa6_embedscope(lin6
, V_ip6_use_defzone
);
2427 port
= tcp6_addrport();
2432 fin
= (struct sockaddr_in
*)&addrs
[0];
2433 lin
= (struct sockaddr_in
*)&addrs
[1];
2434 if (fin
->sin_len
!= sizeof(struct sockaddr_in
) ||
2435 lin
->sin_len
!= sizeof(struct sockaddr_in
))
2437 port
= tcp_addrport(fin
->sin_addr
.s_addr
, fin
->sin_port
,
2438 lin
->sin_addr
.s_addr
, lin
->sin_port
);
2445 netmsg_init(&nmsg
, NULL
, &curthread
->td_msgport
, 0,
2446 tcp_drop_sysctl_dispatch
);
2447 lmsg
->u
.ms_resultp
= addrs
;
2448 return lwkt_domsg(port
, lmsg
, 0);
2451 SYSCTL_PROC(_net_inet_tcp
, OID_AUTO
, drop
,
2452 CTLTYPE_STRUCT
| CTLFLAG_WR
| CTLFLAG_SKIP
, NULL
,
2453 0, sysctl_tcp_drop
, "", "Drop TCP connection");
2456 sysctl_tcps_count(SYSCTL_HANDLER_ARGS
)
2458 u_long state_count
[TCP_NSTATES
];
2461 memset(state_count
, 0, sizeof(state_count
));
2462 for (cpu
= 0; cpu
< ncpus2
; ++cpu
) {
2465 for (i
= 0; i
< TCP_NSTATES
; ++i
)
2466 state_count
[i
] += tcpstate_count
[cpu
].tcps_count
[i
];
2469 return sysctl_handle_opaque(oidp
, state_count
, sizeof(state_count
), req
);
2471 SYSCTL_PROC(_net_inet_tcp
, OID_AUTO
, state_count
,
2472 CTLTYPE_OPAQUE
| CTLFLAG_RD
, NULL
, 0,
2473 sysctl_tcps_count
, "LU", "TCP connection counts by state");
2476 tcp_pcbport_create(struct tcpcb
*tp
)
2480 KASSERT((tp
->t_flags
& TF_LISTEN
) && tp
->t_state
== TCPS_LISTEN
,
2481 ("not a listen tcpcb"));
2483 KASSERT(tp
->t_pcbport
== NULL
, ("tcpcb port cache was created"));
2484 tp
->t_pcbport
= kmalloc_cachealign(sizeof(struct tcp_pcbport
) * ncpus2
,
2487 for (cpu
= 0; cpu
< ncpus2
; ++cpu
) {
2488 struct inpcbport
*phd
;
2490 phd
= &tp
->t_pcbport
[cpu
].t_phd
;
2491 LIST_INIT(&phd
->phd_pcblist
);
2492 /* Though, not used ... */
2493 phd
->phd_port
= tp
->t_inpcb
->inp_lport
;
2498 tcp_pcbport_merge_oncpu(struct tcpcb
*tp
)
2500 struct inpcbport
*phd
;
2504 KASSERT(cpu
< ncpus2
, ("invalid cpu%d", cpu
));
2505 phd
= &tp
->t_pcbport
[cpu
].t_phd
;
2507 while ((inp
= LIST_FIRST(&phd
->phd_pcblist
)) != NULL
) {
2508 KASSERT(inp
->inp_phd
== phd
&& inp
->inp_porthash
== NULL
,
2509 ("not on tcpcb port cache"));
2510 LIST_REMOVE(inp
, inp_portlist
);
2511 in_pcbinsporthash_lport(inp
);
2512 KASSERT(inp
->inp_phd
== tp
->t_inpcb
->inp_phd
&&
2513 inp
->inp_porthash
== tp
->t_inpcb
->inp_porthash
,
2514 ("tcpcb port cache merge failed"));
2519 tcp_pcbport_destroy(struct tcpcb
*tp
)
2524 for (cpu
= 0; cpu
< ncpus2
; ++cpu
) {
2525 KASSERT(LIST_EMPTY(&tp
->t_pcbport
[cpu
].t_phd
.phd_pcblist
),
2526 ("tcpcb port cache is not empty"));
2529 kfree(tp
->t_pcbport
, M_PCB
);
2530 tp
->t_pcbport
= NULL
;