2 * Copyright (c) 2003, 2004 Jeffrey M. Hsu. All rights reserved.
3 * Copyright (c) 2003, 2004 The DragonFly Project. All rights reserved.
5 * This code is derived from software contributed to The DragonFly Project
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of The DragonFly Project nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific, prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
36 * The Regents of the University of California. All rights reserved.
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66 * @(#)tcp_subr.c 8.2 (Berkeley) 5/24/95
67 * $FreeBSD: src/sys/netinet/tcp_subr.c,v 1.73.2.31 2003/01/24 05:11:34 sam Exp $
68 * $DragonFly: src/sys/netinet/tcp_subr.c,v 1.63 2008/11/11 10:46:58 sephe Exp $
71 #include "opt_compat.h"
73 #include "opt_inet6.h"
74 #include "opt_ipsec.h"
75 #include "opt_tcpdebug.h"
77 #include <sys/param.h>
78 #include <sys/systm.h>
79 #include <sys/callout.h>
80 #include <sys/kernel.h>
81 #include <sys/sysctl.h>
82 #include <sys/malloc.h>
83 #include <sys/mpipe.h>
86 #include <sys/domain.h>
90 #include <sys/socket.h>
91 #include <sys/socketvar.h>
92 #include <sys/protosw.h>
93 #include <sys/random.h>
94 #include <sys/in_cksum.h>
97 #include <net/route.h>
99 #include <net/netisr.h>
102 #include <netinet/in.h>
103 #include <netinet/in_systm.h>
104 #include <netinet/ip.h>
105 #include <netinet/ip6.h>
106 #include <netinet/in_pcb.h>
107 #include <netinet6/in6_pcb.h>
108 #include <netinet/in_var.h>
109 #include <netinet/ip_var.h>
110 #include <netinet6/ip6_var.h>
111 #include <netinet/ip_icmp.h>
113 #include <netinet/icmp6.h>
115 #include <netinet/tcp.h>
116 #include <netinet/tcp_fsm.h>
117 #include <netinet/tcp_seq.h>
118 #include <netinet/tcp_timer.h>
119 #include <netinet/tcp_timer2.h>
120 #include <netinet/tcp_var.h>
121 #include <netinet6/tcp6_var.h>
122 #include <netinet/tcpip.h>
124 #include <netinet/tcp_debug.h>
126 #include <netinet6/ip6protosw.h>
129 #include <netinet6/ipsec.h>
130 #include <netproto/key/key.h>
132 #include <netinet6/ipsec6.h>
137 #include <netproto/ipsec/ipsec.h>
139 #include <netproto/ipsec/ipsec6.h>
145 #include <machine/smp.h>
147 #include <sys/msgport2.h>
148 #include <sys/mplock2.h>
149 #include <net/netmsg2.h>
151 #if !defined(KTR_TCP)
152 #define KTR_TCP KTR_ALL
154 KTR_INFO_MASTER(tcp
);
156 KTR_INFO(KTR_TCP, tcp, rxmsg, 0, "tcp getmsg", 0);
157 KTR_INFO(KTR_TCP, tcp, wait, 1, "tcp waitmsg", 0);
158 KTR_INFO(KTR_TCP, tcp, delayed, 2, "tcp execute delayed ops", 0);
160 #define logtcp(name) KTR_LOG(tcp_ ## name)
162 struct inpcbinfo tcbinfo
[MAXCPU
];
163 struct tcpcbackqhead tcpcbackq
[MAXCPU
];
165 static struct lwkt_token tcp_port_token
=
166 LWKT_TOKEN_MP_INITIALIZER(tcp_port_token
);
168 int tcp_mssdflt
= TCP_MSS
;
169 SYSCTL_INT(_net_inet_tcp
, TCPCTL_MSSDFLT
, mssdflt
, CTLFLAG_RW
,
170 &tcp_mssdflt
, 0, "Default TCP Maximum Segment Size");
173 int tcp_v6mssdflt
= TCP6_MSS
;
174 SYSCTL_INT(_net_inet_tcp
, TCPCTL_V6MSSDFLT
, v6mssdflt
, CTLFLAG_RW
,
175 &tcp_v6mssdflt
, 0, "Default TCP Maximum Segment Size for IPv6");
179 * Minimum MSS we accept and use. This prevents DoS attacks where
180 * we are forced to a ridiculous low MSS like 20 and send hundreds
181 * of packets instead of one. The effect scales with the available
182 * bandwidth and quickly saturates the CPU and network interface
183 * with packet generation and sending. Set to zero to disable MINMSS
184 * checking. This setting prevents us from sending too small packets.
186 int tcp_minmss
= TCP_MINMSS
;
187 SYSCTL_INT(_net_inet_tcp
, OID_AUTO
, minmss
, CTLFLAG_RW
,
188 &tcp_minmss
, 0, "Minmum TCP Maximum Segment Size");
191 static int tcp_rttdflt
= TCPTV_SRTTDFLT
/ PR_SLOWHZ
;
192 SYSCTL_INT(_net_inet_tcp
, TCPCTL_RTTDFLT
, rttdflt
, CTLFLAG_RW
,
193 &tcp_rttdflt
, 0, "Default maximum TCP Round Trip Time");
196 int tcp_do_rfc1323
= 1;
197 SYSCTL_INT(_net_inet_tcp
, TCPCTL_DO_RFC1323
, rfc1323
, CTLFLAG_RW
,
198 &tcp_do_rfc1323
, 0, "Enable rfc1323 (high performance TCP) extensions");
200 static int tcp_tcbhashsize
= 0;
201 SYSCTL_INT(_net_inet_tcp
, OID_AUTO
, tcbhashsize
, CTLFLAG_RD
,
202 &tcp_tcbhashsize
, 0, "Size of TCP control block hashtable");
204 static int do_tcpdrain
= 1;
205 SYSCTL_INT(_net_inet_tcp
, OID_AUTO
, do_tcpdrain
, CTLFLAG_RW
, &do_tcpdrain
, 0,
206 "Enable tcp_drain routine for extra help when low on mbufs");
208 static int icmp_may_rst
= 1;
209 SYSCTL_INT(_net_inet_tcp
, OID_AUTO
, icmp_may_rst
, CTLFLAG_RW
, &icmp_may_rst
, 0,
210 "Certain ICMP unreachable messages may abort connections in SYN_SENT");
212 static int tcp_isn_reseed_interval
= 0;
213 SYSCTL_INT(_net_inet_tcp
, OID_AUTO
, isn_reseed_interval
, CTLFLAG_RW
,
214 &tcp_isn_reseed_interval
, 0, "Seconds between reseeding of ISN secret");
217 * TCP bandwidth limiting sysctls. The inflight limiter is now turned on
218 * by default, but with generous values which should allow maximal
219 * bandwidth. In particular, the slop defaults to 50 (5 packets).
221 * The reason for doing this is that the limiter is the only mechanism we
222 * have which seems to do a really good job preventing receiver RX rings
223 * on network interfaces from getting blown out. Even though GigE/10GigE
224 * is supposed to flow control it looks like either it doesn't actually
225 * do it or Open Source drivers do not properly enable it.
227 * People using the limiter to reduce bottlenecks on slower WAN connections
228 * should set the slop to 20 (2 packets).
230 static int tcp_inflight_enable
= 1;
231 SYSCTL_INT(_net_inet_tcp
, OID_AUTO
, inflight_enable
, CTLFLAG_RW
,
232 &tcp_inflight_enable
, 0, "Enable automatic TCP inflight data limiting");
234 static int tcp_inflight_debug
= 0;
235 SYSCTL_INT(_net_inet_tcp
, OID_AUTO
, inflight_debug
, CTLFLAG_RW
,
236 &tcp_inflight_debug
, 0, "Debug TCP inflight calculations");
238 static int tcp_inflight_min
= 6144;
239 SYSCTL_INT(_net_inet_tcp
, OID_AUTO
, inflight_min
, CTLFLAG_RW
,
240 &tcp_inflight_min
, 0, "Lower bound for TCP inflight window");
242 static int tcp_inflight_max
= TCP_MAXWIN
<< TCP_MAX_WINSHIFT
;
243 SYSCTL_INT(_net_inet_tcp
, OID_AUTO
, inflight_max
, CTLFLAG_RW
,
244 &tcp_inflight_max
, 0, "Upper bound for TCP inflight window");
246 static int tcp_inflight_stab
= 50;
247 SYSCTL_INT(_net_inet_tcp
, OID_AUTO
, inflight_stab
, CTLFLAG_RW
,
248 &tcp_inflight_stab
, 0, "Slop in maximal packets / 10 (20 = 3 packets)");
250 static MALLOC_DEFINE(M_TCPTEMP
, "tcptemp", "TCP Templates for Keepalives");
251 static struct malloc_pipe tcptemp_mpipe
;
253 static void tcp_willblock(void);
254 static void tcp_notify (struct inpcb
*, int);
256 struct tcp_stats tcpstats_percpu
[MAXCPU
];
259 sysctl_tcpstats(SYSCTL_HANDLER_ARGS
)
263 for (cpu
= 0; cpu
< ncpus
; ++cpu
) {
264 if ((error
= SYSCTL_OUT(req
, &tcpstats_percpu
[cpu
],
265 sizeof(struct tcp_stats
))))
267 if ((error
= SYSCTL_IN(req
, &tcpstats_percpu
[cpu
],
268 sizeof(struct tcp_stats
))))
274 SYSCTL_PROC(_net_inet_tcp
, TCPCTL_STATS
, stats
, (CTLTYPE_OPAQUE
| CTLFLAG_RW
),
275 0, 0, sysctl_tcpstats
, "S,tcp_stats", "TCP statistics");
277 SYSCTL_STRUCT(_net_inet_tcp
, TCPCTL_STATS
, stats
, CTLFLAG_RW
,
278 &tcpstat
, tcp_stats
, "TCP statistics");
282 * Target size of TCP PCB hash tables. Must be a power of two.
284 * Note that this can be overridden by the kernel environment
285 * variable net.inet.tcp.tcbhashsize
288 #define TCBHASHSIZE 512
292 * This is the actual shape of what we allocate using the zone
293 * allocator. Doing it this way allows us to protect both structures
294 * using the same generation count, and also eliminates the overhead
295 * of allocating tcpcbs separately. By hiding the structure here,
296 * we avoid changing most of the rest of the code (although it needs
297 * to be changed, eventually, for greater efficiency).
300 #define ALIGNM1 (ALIGNMENT - 1)
304 char align
[(sizeof(struct inpcb
) + ALIGNM1
) & ~ALIGNM1
];
307 struct tcp_callout inp_tp_rexmt
;
308 struct tcp_callout inp_tp_persist
;
309 struct tcp_callout inp_tp_keep
;
310 struct tcp_callout inp_tp_2msl
;
311 struct tcp_callout inp_tp_delack
;
312 struct netmsg_tcp_timer inp_tp_timermsg
;
323 struct inpcbporthead
*porthashbase
;
324 struct inpcbinfo
*ticb
;
326 int hashsize
= TCBHASHSIZE
;
330 * note: tcptemp is used for keepalives, and it is ok for an
331 * allocation to fail so do not specify MPF_INT.
333 mpipe_init(&tcptemp_mpipe
, M_TCPTEMP
, sizeof(struct tcptemp
),
336 tcp_delacktime
= TCPTV_DELACK
;
337 tcp_keepinit
= TCPTV_KEEP_INIT
;
338 tcp_keepidle
= TCPTV_KEEP_IDLE
;
339 tcp_keepintvl
= TCPTV_KEEPINTVL
;
340 tcp_maxpersistidle
= TCPTV_KEEP_IDLE
;
342 tcp_rexmit_min
= TCPTV_MIN
;
343 tcp_rexmit_slop
= TCPTV_CPU_VAR
;
345 TUNABLE_INT_FETCH("net.inet.tcp.tcbhashsize", &hashsize
);
346 if (!powerof2(hashsize
)) {
347 kprintf("WARNING: TCB hash size not a power of 2\n");
348 hashsize
= 512; /* safe default */
350 tcp_tcbhashsize
= hashsize
;
351 porthashbase
= hashinit(hashsize
, M_PCB
, &porthashmask
);
353 for (cpu
= 0; cpu
< ncpus2
; cpu
++) {
354 ticb
= &tcbinfo
[cpu
];
355 in_pcbinfo_init(ticb
);
357 ticb
->hashbase
= hashinit(hashsize
, M_PCB
,
359 ticb
->porthashbase
= porthashbase
;
360 ticb
->porthashmask
= porthashmask
;
361 ticb
->porttoken
= &tcp_port_token
;
363 ticb
->porthashbase
= hashinit(hashsize
, M_PCB
,
364 &ticb
->porthashmask
);
366 ticb
->wildcardhashbase
= hashinit(hashsize
, M_PCB
,
367 &ticb
->wildcardhashmask
);
368 ticb
->ipi_size
= sizeof(struct inp_tp
);
369 TAILQ_INIT(&tcpcbackq
[cpu
]);
372 tcp_reass_maxseg
= nmbclusters
/ 16;
373 TUNABLE_INT_FETCH("net.inet.tcp.reass.maxsegments", &tcp_reass_maxseg
);
376 #define TCP_MINPROTOHDR (sizeof(struct ip6_hdr) + sizeof(struct tcphdr))
378 #define TCP_MINPROTOHDR (sizeof(struct tcpiphdr))
380 if (max_protohdr
< TCP_MINPROTOHDR
)
381 max_protohdr
= TCP_MINPROTOHDR
;
382 if (max_linkhdr
+ TCP_MINPROTOHDR
> MHLEN
)
384 #undef TCP_MINPROTOHDR
387 * Initialize TCP statistics counters for each CPU.
390 for (cpu
= 0; cpu
< ncpus
; ++cpu
) {
391 bzero(&tcpstats_percpu
[cpu
], sizeof(struct tcp_stats
));
394 bzero(&tcpstat
, sizeof(struct tcp_stats
));
398 netisr_register_rollup(tcp_willblock
);
405 int cpu
= mycpu
->gd_cpuid
;
407 while ((tp
= TAILQ_FIRST(&tcpcbackq
[cpu
])) != NULL
) {
408 KKASSERT(tp
->t_flags
& TF_ONOUTPUTQ
);
409 tp
->t_flags
&= ~TF_ONOUTPUTQ
;
410 TAILQ_REMOVE(&tcpcbackq
[cpu
], tp
, t_outputq
);
416 * Fill in the IP and TCP headers for an outgoing packet, given the tcpcb.
417 * tcp_template used to store this data in mbufs, but we now recopy it out
418 * of the tcpcb each time to conserve mbufs.
421 tcp_fillheaders(struct tcpcb
*tp
, void *ip_ptr
, void *tcp_ptr
)
423 struct inpcb
*inp
= tp
->t_inpcb
;
424 struct tcphdr
*tcp_hdr
= (struct tcphdr
*)tcp_ptr
;
427 if (inp
->inp_vflag
& INP_IPV6
) {
430 ip6
= (struct ip6_hdr
*)ip_ptr
;
431 ip6
->ip6_flow
= (ip6
->ip6_flow
& ~IPV6_FLOWINFO_MASK
) |
432 (inp
->in6p_flowinfo
& IPV6_FLOWINFO_MASK
);
433 ip6
->ip6_vfc
= (ip6
->ip6_vfc
& ~IPV6_VERSION_MASK
) |
434 (IPV6_VERSION
& IPV6_VERSION_MASK
);
435 ip6
->ip6_nxt
= IPPROTO_TCP
;
436 ip6
->ip6_plen
= sizeof(struct tcphdr
);
437 ip6
->ip6_src
= inp
->in6p_laddr
;
438 ip6
->ip6_dst
= inp
->in6p_faddr
;
443 struct ip
*ip
= (struct ip
*) ip_ptr
;
445 ip
->ip_vhl
= IP_VHL_BORING
;
452 ip
->ip_p
= IPPROTO_TCP
;
453 ip
->ip_src
= inp
->inp_laddr
;
454 ip
->ip_dst
= inp
->inp_faddr
;
455 tcp_hdr
->th_sum
= in_pseudo(ip
->ip_src
.s_addr
,
457 htons(sizeof(struct tcphdr
) + IPPROTO_TCP
));
460 tcp_hdr
->th_sport
= inp
->inp_lport
;
461 tcp_hdr
->th_dport
= inp
->inp_fport
;
466 tcp_hdr
->th_flags
= 0;
472 * Create template to be used to send tcp packets on a connection.
473 * Allocates an mbuf and fills in a skeletal tcp/ip header. The only
474 * use for this function is in keepalives, which use tcp_respond.
477 tcp_maketemplate(struct tcpcb
*tp
)
481 if ((tmp
= mpipe_alloc_nowait(&tcptemp_mpipe
)) == NULL
)
483 tcp_fillheaders(tp
, &tmp
->tt_ipgen
, &tmp
->tt_t
);
488 tcp_freetemplate(struct tcptemp
*tmp
)
490 mpipe_free(&tcptemp_mpipe
, tmp
);
494 * Send a single message to the TCP at address specified by
495 * the given TCP/IP header. If m == NULL, then we make a copy
496 * of the tcpiphdr at ti and send directly to the addressed host.
497 * This is used to force keep alive messages out using the TCP
498 * template for a connection. If flags are given then we send
499 * a message back to the TCP which originated the * segment ti,
500 * and discard the mbuf containing it and any other attached mbufs.
502 * In any case the ack and sequence number of the transmitted
503 * segment are as specified by the parameters.
505 * NOTE: If m != NULL, then ti must point to *inside* the mbuf.
508 tcp_respond(struct tcpcb
*tp
, void *ipgen
, struct tcphdr
*th
, struct mbuf
*m
,
509 tcp_seq ack
, tcp_seq seq
, int flags
)
513 struct route
*ro
= NULL
;
515 struct ip
*ip
= ipgen
;
518 struct route_in6
*ro6
= NULL
;
519 struct route_in6 sro6
;
520 struct ip6_hdr
*ip6
= ipgen
;
521 boolean_t use_tmpro
= TRUE
;
523 boolean_t isipv6
= (IP_VHL_V(ip
->ip_vhl
) == 6);
525 const boolean_t isipv6
= FALSE
;
529 if (!(flags
& TH_RST
)) {
530 win
= ssb_space(&tp
->t_inpcb
->inp_socket
->so_rcv
);
533 if (win
> (long)TCP_MAXWIN
<< tp
->rcv_scale
)
534 win
= (long)TCP_MAXWIN
<< tp
->rcv_scale
;
537 * Don't use the route cache of a listen socket,
538 * it is not MPSAFE; use temporary route cache.
540 if (tp
->t_state
!= TCPS_LISTEN
) {
542 ro6
= &tp
->t_inpcb
->in6p_route
;
544 ro
= &tp
->t_inpcb
->inp_route
;
551 bzero(ro6
, sizeof *ro6
);
554 bzero(ro
, sizeof *ro
);
558 m
= m_gethdr(MB_DONTWAIT
, MT_HEADER
);
562 m
->m_data
+= max_linkhdr
;
564 bcopy(ip6
, mtod(m
, caddr_t
), sizeof(struct ip6_hdr
));
565 ip6
= mtod(m
, struct ip6_hdr
*);
566 nth
= (struct tcphdr
*)(ip6
+ 1);
568 bcopy(ip
, mtod(m
, caddr_t
), sizeof(struct ip
));
569 ip
= mtod(m
, struct ip
*);
570 nth
= (struct tcphdr
*)(ip
+ 1);
572 bcopy(th
, nth
, sizeof(struct tcphdr
));
577 m
->m_data
= (caddr_t
)ipgen
;
578 /* m_len is set later */
580 #define xchg(a, b, type) { type t; t = a; a = b; b = t; }
582 xchg(ip6
->ip6_dst
, ip6
->ip6_src
, struct in6_addr
);
583 nth
= (struct tcphdr
*)(ip6
+ 1);
585 xchg(ip
->ip_dst
.s_addr
, ip
->ip_src
.s_addr
, n_long
);
586 nth
= (struct tcphdr
*)(ip
+ 1);
590 * this is usually a case when an extension header
591 * exists between the IPv6 header and the
594 nth
->th_sport
= th
->th_sport
;
595 nth
->th_dport
= th
->th_dport
;
597 xchg(nth
->th_dport
, nth
->th_sport
, n_short
);
602 ip6
->ip6_vfc
= IPV6_VERSION
;
603 ip6
->ip6_nxt
= IPPROTO_TCP
;
604 ip6
->ip6_plen
= htons((u_short
)(sizeof(struct tcphdr
) + tlen
));
605 tlen
+= sizeof(struct ip6_hdr
) + sizeof(struct tcphdr
);
607 tlen
+= sizeof(struct tcpiphdr
);
609 ip
->ip_ttl
= ip_defttl
;
612 m
->m_pkthdr
.len
= tlen
;
613 m
->m_pkthdr
.rcvif
= NULL
;
614 nth
->th_seq
= htonl(seq
);
615 nth
->th_ack
= htonl(ack
);
617 nth
->th_off
= sizeof(struct tcphdr
) >> 2;
618 nth
->th_flags
= flags
;
620 nth
->th_win
= htons((u_short
) (win
>> tp
->rcv_scale
));
622 nth
->th_win
= htons((u_short
)win
);
626 nth
->th_sum
= in6_cksum(m
, IPPROTO_TCP
,
627 sizeof(struct ip6_hdr
),
628 tlen
- sizeof(struct ip6_hdr
));
629 ip6
->ip6_hlim
= in6_selecthlim(tp
? tp
->t_inpcb
: NULL
,
630 (ro6
&& ro6
->ro_rt
) ?
631 ro6
->ro_rt
->rt_ifp
: NULL
);
633 nth
->th_sum
= in_pseudo(ip
->ip_src
.s_addr
, ip
->ip_dst
.s_addr
,
634 htons((u_short
)(tlen
- sizeof(struct ip
) + ip
->ip_p
)));
635 m
->m_pkthdr
.csum_flags
= CSUM_TCP
;
636 m
->m_pkthdr
.csum_data
= offsetof(struct tcphdr
, th_sum
);
639 if (tp
== NULL
|| (tp
->t_inpcb
->inp_socket
->so_options
& SO_DEBUG
))
640 tcp_trace(TA_OUTPUT
, 0, tp
, mtod(m
, void *), th
, 0);
643 ip6_output(m
, NULL
, ro6
, ipflags
, NULL
, NULL
,
644 tp
? tp
->t_inpcb
: NULL
);
645 if ((ro6
== &sro6
) && (ro6
->ro_rt
!= NULL
)) {
650 ipflags
|= IP_DEBUGROUTE
;
651 ip_output(m
, NULL
, ro
, ipflags
, NULL
, tp
? tp
->t_inpcb
: NULL
);
652 if ((ro
== &sro
) && (ro
->ro_rt
!= NULL
)) {
660 * Create a new TCP control block, making an
661 * empty reassembly queue and hooking it to the argument
662 * protocol control block. The `inp' parameter must have
663 * come from the zone allocator set up in tcp_init().
666 tcp_newtcpcb(struct inpcb
*inp
)
671 boolean_t isipv6
= ((inp
->inp_vflag
& INP_IPV6
) != 0);
673 const boolean_t isipv6
= FALSE
;
676 it
= (struct inp_tp
*)inp
;
678 bzero(tp
, sizeof(struct tcpcb
));
679 LIST_INIT(&tp
->t_segq
);
680 tp
->t_maxseg
= tp
->t_maxopd
= isipv6
? tcp_v6mssdflt
: tcp_mssdflt
;
682 /* Set up our timeouts. */
683 tp
->tt_rexmt
= &it
->inp_tp_rexmt
;
684 tp
->tt_persist
= &it
->inp_tp_persist
;
685 tp
->tt_keep
= &it
->inp_tp_keep
;
686 tp
->tt_2msl
= &it
->inp_tp_2msl
;
687 tp
->tt_delack
= &it
->inp_tp_delack
;
691 * Zero out timer message. We don't create it here,
692 * since the current CPU may not be the owner of this
695 tp
->tt_msg
= &it
->inp_tp_timermsg
;
696 bzero(tp
->tt_msg
, sizeof(*tp
->tt_msg
));
699 tp
->t_flags
= (TF_REQ_SCALE
| TF_REQ_TSTMP
);
700 tp
->t_inpcb
= inp
; /* XXX */
701 tp
->t_state
= TCPS_CLOSED
;
703 * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no
704 * rtt estimate. Set rttvar so that srtt + 4 * rttvar gives
705 * reasonable initial retransmit time.
707 tp
->t_srtt
= TCPTV_SRTTBASE
;
709 ((TCPTV_RTOBASE
- TCPTV_SRTTBASE
) << TCP_RTTVAR_SHIFT
) / 4;
710 tp
->t_rttmin
= tcp_rexmit_min
;
711 tp
->t_rxtcur
= TCPTV_RTOBASE
;
712 tp
->snd_cwnd
= TCP_MAXWIN
<< TCP_MAX_WINSHIFT
;
713 tp
->snd_bwnd
= TCP_MAXWIN
<< TCP_MAX_WINSHIFT
;
714 tp
->snd_ssthresh
= TCP_MAXWIN
<< TCP_MAX_WINSHIFT
;
715 tp
->t_rcvtime
= ticks
;
717 * IPv4 TTL initialization is necessary for an IPv6 socket as well,
718 * because the socket may be bound to an IPv6 wildcard address,
719 * which may match an IPv4-mapped IPv6 address.
721 inp
->inp_ip_ttl
= ip_defttl
;
723 tcp_sack_tcpcb_init(tp
);
724 return (tp
); /* XXX */
728 * Drop a TCP connection, reporting the specified error.
729 * If connection is synchronized, then send a RST to peer.
732 tcp_drop(struct tcpcb
*tp
, int error
)
734 struct socket
*so
= tp
->t_inpcb
->inp_socket
;
736 if (TCPS_HAVERCVDSYN(tp
->t_state
)) {
737 tp
->t_state
= TCPS_CLOSED
;
739 tcpstat
.tcps_drops
++;
741 tcpstat
.tcps_conndrops
++;
742 if (error
== ETIMEDOUT
&& tp
->t_softerror
)
743 error
= tp
->t_softerror
;
744 so
->so_error
= error
;
745 return (tcp_close(tp
));
750 struct netmsg_remwildcard
{
751 struct netmsg_base base
;
752 struct inpcb
*nm_inp
;
753 struct inpcbinfo
*nm_pcbinfo
;
762 * Wildcard inpcb's on SMP boxes must be removed from all cpus before the
763 * inp can be detached. We do this by cycling through the cpus, ending up
764 * on the cpu controlling the inp last and then doing the disconnect.
767 in_pcbremwildcardhash_handler(netmsg_t msg
)
769 struct netmsg_remwildcard
*nmsg
= (struct netmsg_remwildcard
*)msg
;
772 cpu
= nmsg
->nm_pcbinfo
->cpu
;
774 if (cpu
== nmsg
->nm_inp
->inp_pcbinfo
->cpu
) {
775 /* note: detach removes any wildcard hash entry */
777 if (nmsg
->nm_isinet6
)
778 in6_pcbdetach(nmsg
->nm_inp
);
781 in_pcbdetach(nmsg
->nm_inp
);
782 lwkt_replymsg(&nmsg
->base
.lmsg
, 0);
784 in_pcbremwildcardhash_oncpu(nmsg
->nm_inp
, nmsg
->nm_pcbinfo
);
785 cpu
= (cpu
+ 1) % ncpus2
;
786 nmsg
->nm_pcbinfo
= &tcbinfo
[cpu
];
787 lwkt_forwardmsg(cpu_portfn(cpu
), &nmsg
->base
.lmsg
);
794 * Close a TCP control block:
795 * discard all space held by the tcp
796 * discard internet protocol block
797 * wake up any sleepers
800 tcp_close(struct tcpcb
*tp
)
803 struct inpcb
*inp
= tp
->t_inpcb
;
804 struct socket
*so
= inp
->inp_socket
;
806 boolean_t dosavessthresh
;
811 boolean_t isipv6
= ((inp
->inp_vflag
& INP_IPV6
) != 0);
812 boolean_t isafinet6
= (INP_CHECK_SOCKAF(so
, AF_INET6
) != 0);
814 const boolean_t isipv6
= FALSE
;
818 * The tp is not instantly destroyed in the wildcard case. Setting
819 * the state to TCPS_TERMINATING will prevent the TCP stack from
820 * messing with it, though it should be noted that this change may
821 * not take effect on other cpus until we have chained the wildcard
824 * XXX we currently depend on the BGL to synchronize the tp->t_state
825 * update and prevent other tcp protocol threads from accepting new
826 * connections on the listen socket we might be trying to close down.
828 KKASSERT(tp
->t_state
!= TCPS_TERMINATING
);
829 tp
->t_state
= TCPS_TERMINATING
;
832 * Make sure that all of our timers are stopped before we
833 * delete the PCB. For listen TCP socket (tp->tt_msg == NULL),
834 * timers are never used. If timer message is never created
835 * (tp->tt_msg->tt_tcb == NULL), timers are never used too.
837 if (tp
->tt_msg
!= NULL
&& tp
->tt_msg
->tt_tcb
!= NULL
) {
838 tcp_callout_stop(tp
, tp
->tt_rexmt
);
839 tcp_callout_stop(tp
, tp
->tt_persist
);
840 tcp_callout_stop(tp
, tp
->tt_keep
);
841 tcp_callout_stop(tp
, tp
->tt_2msl
);
842 tcp_callout_stop(tp
, tp
->tt_delack
);
845 if (tp
->t_flags
& TF_ONOUTPUTQ
) {
846 KKASSERT(tp
->tt_cpu
== mycpu
->gd_cpuid
);
847 TAILQ_REMOVE(&tcpcbackq
[tp
->tt_cpu
], tp
, t_outputq
);
848 tp
->t_flags
&= ~TF_ONOUTPUTQ
;
852 * If we got enough samples through the srtt filter,
853 * save the rtt and rttvar in the routing entry.
854 * 'Enough' is arbitrarily defined as the 16 samples.
855 * 16 samples is enough for the srtt filter to converge
856 * to within 5% of the correct value; fewer samples and
857 * we could save a very bogus rtt.
859 * Don't update the default route's characteristics and don't
860 * update anything that the user "locked".
862 if (tp
->t_rttupdated
>= 16) {
866 struct sockaddr_in6
*sin6
;
868 if ((rt
= inp
->in6p_route
.ro_rt
) == NULL
)
870 sin6
= (struct sockaddr_in6
*)rt_key(rt
);
871 if (IN6_IS_ADDR_UNSPECIFIED(&sin6
->sin6_addr
))
874 if ((rt
= inp
->inp_route
.ro_rt
) == NULL
||
875 ((struct sockaddr_in
*)rt_key(rt
))->
876 sin_addr
.s_addr
== INADDR_ANY
)
879 if (!(rt
->rt_rmx
.rmx_locks
& RTV_RTT
)) {
880 i
= tp
->t_srtt
* (RTM_RTTUNIT
/ (hz
* TCP_RTT_SCALE
));
881 if (rt
->rt_rmx
.rmx_rtt
&& i
)
883 * filter this update to half the old & half
884 * the new values, converting scale.
885 * See route.h and tcp_var.h for a
886 * description of the scaling constants.
889 (rt
->rt_rmx
.rmx_rtt
+ i
) / 2;
891 rt
->rt_rmx
.rmx_rtt
= i
;
892 tcpstat
.tcps_cachedrtt
++;
894 if (!(rt
->rt_rmx
.rmx_locks
& RTV_RTTVAR
)) {
896 (RTM_RTTUNIT
/ (hz
* TCP_RTTVAR_SCALE
));
897 if (rt
->rt_rmx
.rmx_rttvar
&& i
)
898 rt
->rt_rmx
.rmx_rttvar
=
899 (rt
->rt_rmx
.rmx_rttvar
+ i
) / 2;
901 rt
->rt_rmx
.rmx_rttvar
= i
;
902 tcpstat
.tcps_cachedrttvar
++;
905 * The old comment here said:
906 * update the pipelimit (ssthresh) if it has been updated
907 * already or if a pipesize was specified & the threshhold
908 * got below half the pipesize. I.e., wait for bad news
909 * before we start updating, then update on both good
912 * But we want to save the ssthresh even if no pipesize is
913 * specified explicitly in the route, because such
914 * connections still have an implicit pipesize specified
915 * by the global tcp_sendspace. In the absence of a reliable
916 * way to calculate the pipesize, it will have to do.
918 i
= tp
->snd_ssthresh
;
919 if (rt
->rt_rmx
.rmx_sendpipe
!= 0)
920 dosavessthresh
= (i
< rt
->rt_rmx
.rmx_sendpipe
/2);
922 dosavessthresh
= (i
< so
->so_snd
.ssb_hiwat
/2);
923 if (dosavessthresh
||
924 (!(rt
->rt_rmx
.rmx_locks
& RTV_SSTHRESH
) && (i
!= 0) &&
925 (rt
->rt_rmx
.rmx_ssthresh
!= 0))) {
927 * convert the limit from user data bytes to
928 * packets then to packet data bytes.
930 i
= (i
+ tp
->t_maxseg
/ 2) / tp
->t_maxseg
;
935 sizeof(struct ip6_hdr
) + sizeof(struct tcphdr
) :
936 sizeof(struct tcpiphdr
));
937 if (rt
->rt_rmx
.rmx_ssthresh
)
938 rt
->rt_rmx
.rmx_ssthresh
=
939 (rt
->rt_rmx
.rmx_ssthresh
+ i
) / 2;
941 rt
->rt_rmx
.rmx_ssthresh
= i
;
942 tcpstat
.tcps_cachedssthresh
++;
947 /* free the reassembly queue, if any */
948 while((q
= LIST_FIRST(&tp
->t_segq
)) != NULL
) {
949 LIST_REMOVE(q
, tqe_q
);
952 atomic_add_int(&tcp_reass_qsize
, -1);
954 /* throw away SACK blocks in scoreboard*/
956 tcp_sack_cleanup(&tp
->scb
);
958 inp
->inp_ppcb
= NULL
;
959 soisdisconnected(so
);
960 /* note: pcb detached later on */
962 tcp_destroy_timermsg(tp
);
963 if (tp
->t_flags
& TF_SYNCACHE
)
964 syncache_destroy(tp
);
967 * Discard the inp. In the SMP case a wildcard inp's hash (created
968 * by a listen socket or an INADDR_ANY udp socket) is replicated
969 * for each protocol thread and must be removed in the context of
970 * that thread. This is accomplished by chaining the message
973 * If the inp is not wildcarded we simply detach, which will remove
974 * the any hashes still present for this inp.
977 if (inp
->inp_flags
& INP_WILDCARD_MP
) {
978 struct netmsg_remwildcard
*nmsg
;
980 cpu
= (inp
->inp_pcbinfo
->cpu
+ 1) % ncpus2
;
981 nmsg
= kmalloc(sizeof(struct netmsg_remwildcard
),
982 M_LWKTMSG
, M_INTWAIT
);
983 netmsg_init(&nmsg
->base
, NULL
, &netisr_afree_rport
,
984 0, in_pcbremwildcardhash_handler
);
986 nmsg
->nm_isinet6
= isafinet6
;
989 nmsg
->nm_pcbinfo
= &tcbinfo
[cpu
];
990 lwkt_sendmsg(cpu_portfn(cpu
), &nmsg
->base
.lmsg
);
994 /* note: detach removes any wildcard hash entry */
1002 tcpstat
.tcps_closed
++;
1006 static __inline
void
1007 tcp_drain_oncpu(struct inpcbhead
*head
)
1009 struct inpcb
*marker
;
1012 struct tseg_qent
*te
;
1015 * Allows us to block while running the list
1017 marker
= kmalloc(sizeof(struct inpcb
), M_TEMP
, M_WAITOK
|M_ZERO
);
1018 marker
->inp_flags
|= INP_PLACEMARKER
;
1019 LIST_INSERT_HEAD(head
, marker
, inp_list
);
1021 while ((inpb
= LIST_NEXT(marker
, inp_list
)) != NULL
) {
1022 if ((inpb
->inp_flags
& INP_PLACEMARKER
) == 0 &&
1023 (tcpb
= intotcpcb(inpb
)) != NULL
&&
1024 (te
= LIST_FIRST(&tcpb
->t_segq
)) != NULL
) {
1025 LIST_REMOVE(te
, tqe_q
);
1028 atomic_add_int(&tcp_reass_qsize
, -1);
1031 LIST_REMOVE(marker
, inp_list
);
1032 LIST_INSERT_AFTER(inpb
, marker
, inp_list
);
1035 LIST_REMOVE(marker
, inp_list
);
1036 kfree(marker
, M_TEMP
);
1040 struct netmsg_tcp_drain
{
1041 struct netmsg_base base
;
1042 struct inpcbhead
*nm_head
;
1046 tcp_drain_handler(netmsg_t msg
)
1048 struct netmsg_tcp_drain
*nm
= (void *)msg
;
1050 tcp_drain_oncpu(nm
->nm_head
);
1051 lwkt_replymsg(&nm
->base
.lmsg
, 0);
1066 * Walk the tcpbs, if existing, and flush the reassembly queue,
1067 * if there is one...
1068 * XXX: The "Net/3" implementation doesn't imply that the TCP
1069 * reassembly queue should be flushed, but in a situation
1070 * where we're really low on mbufs, this is potentially
1074 for (cpu
= 0; cpu
< ncpus2
; cpu
++) {
1075 struct netmsg_tcp_drain
*nm
;
1077 if (cpu
== mycpu
->gd_cpuid
) {
1078 tcp_drain_oncpu(&tcbinfo
[cpu
].pcblisthead
);
1080 nm
= kmalloc(sizeof(struct netmsg_tcp_drain
),
1081 M_LWKTMSG
, M_NOWAIT
);
1084 netmsg_init(&nm
->base
, NULL
, &netisr_afree_rport
,
1085 0, tcp_drain_handler
);
1086 nm
->nm_head
= &tcbinfo
[cpu
].pcblisthead
;
1087 lwkt_sendmsg(cpu_portfn(cpu
), &nm
->base
.lmsg
);
1091 tcp_drain_oncpu(&tcbinfo
[0].pcblisthead
);
1096 * Notify a tcp user of an asynchronous error;
1097 * store error as soft error, but wake up user
1098 * (for now, won't do anything until can select for soft error).
1100 * Do not wake up user since there currently is no mechanism for
1101 * reporting soft errors (yet - a kqueue filter may be added).
1104 tcp_notify(struct inpcb
*inp
, int error
)
1106 struct tcpcb
*tp
= intotcpcb(inp
);
1109 * Ignore some errors if we are hooked up.
1110 * If connection hasn't completed, has retransmitted several times,
1111 * and receives a second error, give up now. This is better
1112 * than waiting a long time to establish a connection that
1113 * can never complete.
1115 if (tp
->t_state
== TCPS_ESTABLISHED
&&
1116 (error
== EHOSTUNREACH
|| error
== ENETUNREACH
||
1117 error
== EHOSTDOWN
)) {
1119 } else if (tp
->t_state
< TCPS_ESTABLISHED
&& tp
->t_rxtshift
> 3 &&
1121 tcp_drop(tp
, error
);
1123 tp
->t_softerror
= error
;
1125 wakeup(&so
->so_timeo
);
1132 tcp_pcblist(SYSCTL_HANDLER_ARGS
)
1135 struct inpcb
*marker
;
1145 * The process of preparing the TCB list is too time-consuming and
1146 * resource-intensive to repeat twice on every request.
1148 if (req
->oldptr
== NULL
) {
1149 for (ccpu
= 0; ccpu
< ncpus
; ++ccpu
) {
1150 gd
= globaldata_find(ccpu
);
1151 n
+= tcbinfo
[gd
->gd_cpuid
].ipi_count
;
1153 req
->oldidx
= (n
+ n
/8 + 10) * sizeof(struct xtcpcb
);
1157 if (req
->newptr
!= NULL
)
1160 marker
= kmalloc(sizeof(struct inpcb
), M_TEMP
, M_WAITOK
|M_ZERO
);
1161 marker
->inp_flags
|= INP_PLACEMARKER
;
1164 * OK, now we're committed to doing something. Run the inpcb list
1165 * for each cpu in the system and construct the output. Use a
1166 * list placemarker to deal with list changes occuring during
1167 * copyout blockages (but otherwise depend on being on the correct
1168 * cpu to avoid races).
1170 origcpu
= mycpu
->gd_cpuid
;
1171 for (ccpu
= 1; ccpu
<= ncpus
&& error
== 0; ++ccpu
) {
1177 cpu_id
= (origcpu
+ ccpu
) % ncpus
;
1178 if ((smp_active_mask
& (1 << cpu_id
)) == 0)
1180 rgd
= globaldata_find(cpu_id
);
1181 lwkt_setcpu_self(rgd
);
1183 gencnt
= tcbinfo
[cpu_id
].ipi_gencnt
;
1184 n
= tcbinfo
[cpu_id
].ipi_count
;
1186 LIST_INSERT_HEAD(&tcbinfo
[cpu_id
].pcblisthead
, marker
, inp_list
);
1188 while ((inp
= LIST_NEXT(marker
, inp_list
)) != NULL
&& i
< n
) {
1190 * process a snapshot of pcbs, ignoring placemarkers
1191 * and using our own to allow SYSCTL_OUT to block.
1193 LIST_REMOVE(marker
, inp_list
);
1194 LIST_INSERT_AFTER(inp
, marker
, inp_list
);
1196 if (inp
->inp_flags
& INP_PLACEMARKER
)
1198 if (inp
->inp_gencnt
> gencnt
)
1200 if (prison_xinpcb(req
->td
, inp
))
1203 xt
.xt_len
= sizeof xt
;
1204 bcopy(inp
, &xt
.xt_inp
, sizeof *inp
);
1205 inp_ppcb
= inp
->inp_ppcb
;
1206 if (inp_ppcb
!= NULL
)
1207 bcopy(inp_ppcb
, &xt
.xt_tp
, sizeof xt
.xt_tp
);
1209 bzero(&xt
.xt_tp
, sizeof xt
.xt_tp
);
1210 if (inp
->inp_socket
)
1211 sotoxsocket(inp
->inp_socket
, &xt
.xt_socket
);
1212 if ((error
= SYSCTL_OUT(req
, &xt
, sizeof xt
)) != 0)
1216 LIST_REMOVE(marker
, inp_list
);
1217 if (error
== 0 && i
< n
) {
1218 bzero(&xt
, sizeof xt
);
1219 xt
.xt_len
= sizeof xt
;
1221 error
= SYSCTL_OUT(req
, &xt
, sizeof xt
);
1230 * Make sure we are on the same cpu we were on originally, since
1231 * higher level callers expect this. Also don't pollute caches with
1232 * migrated userland data by (eventually) returning to userland
1233 * on a different cpu.
1235 lwkt_setcpu_self(globaldata_find(origcpu
));
1236 kfree(marker
, M_TEMP
);
1240 SYSCTL_PROC(_net_inet_tcp
, TCPCTL_PCBLIST
, pcblist
, CTLFLAG_RD
, 0, 0,
1241 tcp_pcblist
, "S,xtcpcb", "List of active TCP connections");
1244 tcp_getcred(SYSCTL_HANDLER_ARGS
)
1246 struct sockaddr_in addrs
[2];
1251 error
= priv_check(req
->td
, PRIV_ROOT
);
1254 error
= SYSCTL_IN(req
, addrs
, sizeof addrs
);
1258 cpu
= tcp_addrcpu(addrs
[1].sin_addr
.s_addr
, addrs
[1].sin_port
,
1259 addrs
[0].sin_addr
.s_addr
, addrs
[0].sin_port
);
1260 inp
= in_pcblookup_hash(&tcbinfo
[cpu
], addrs
[1].sin_addr
,
1261 addrs
[1].sin_port
, addrs
[0].sin_addr
, addrs
[0].sin_port
, 0, NULL
);
1262 if (inp
== NULL
|| inp
->inp_socket
== NULL
) {
1266 error
= SYSCTL_OUT(req
, inp
->inp_socket
->so_cred
, sizeof(struct ucred
));
1272 SYSCTL_PROC(_net_inet_tcp
, OID_AUTO
, getcred
, (CTLTYPE_OPAQUE
| CTLFLAG_RW
),
1273 0, 0, tcp_getcred
, "S,ucred", "Get the ucred of a TCP connection");
1277 tcp6_getcred(SYSCTL_HANDLER_ARGS
)
1279 struct sockaddr_in6 addrs
[2];
1282 boolean_t mapped
= FALSE
;
1284 error
= priv_check(req
->td
, PRIV_ROOT
);
1287 error
= SYSCTL_IN(req
, addrs
, sizeof addrs
);
1290 if (IN6_IS_ADDR_V4MAPPED(&addrs
[0].sin6_addr
)) {
1291 if (IN6_IS_ADDR_V4MAPPED(&addrs
[1].sin6_addr
))
1298 inp
= in_pcblookup_hash(&tcbinfo
[0],
1299 *(struct in_addr
*)&addrs
[1].sin6_addr
.s6_addr
[12],
1301 *(struct in_addr
*)&addrs
[0].sin6_addr
.s6_addr
[12],
1305 inp
= in6_pcblookup_hash(&tcbinfo
[0],
1306 &addrs
[1].sin6_addr
, addrs
[1].sin6_port
,
1307 &addrs
[0].sin6_addr
, addrs
[0].sin6_port
,
1310 if (inp
== NULL
|| inp
->inp_socket
== NULL
) {
1314 error
= SYSCTL_OUT(req
, inp
->inp_socket
->so_cred
, sizeof(struct ucred
));
1320 SYSCTL_PROC(_net_inet6_tcp6
, OID_AUTO
, getcred
, (CTLTYPE_OPAQUE
| CTLFLAG_RW
),
1322 tcp6_getcred
, "S,ucred", "Get the ucred of a TCP6 connection");
1325 struct netmsg_tcp_notify
{
1326 struct netmsg_base base
;
1327 void (*nm_notify
)(struct inpcb
*, int);
1328 struct in_addr nm_faddr
;
1333 tcp_notifyall_oncpu(netmsg_t msg
)
1335 struct netmsg_tcp_notify
*nm
= (struct netmsg_tcp_notify
*)msg
;
1338 in_pcbnotifyall(&tcbinfo
[mycpuid
].pcblisthead
, nm
->nm_faddr
,
1339 nm
->nm_arg
, nm
->nm_notify
);
1341 nextcpu
= mycpuid
+ 1;
1342 if (nextcpu
< ncpus2
)
1343 lwkt_forwardmsg(cpu_portfn(nextcpu
), &nm
->base
.lmsg
);
1345 lwkt_replymsg(&nm
->base
.lmsg
, 0);
1349 tcp_ctlinput(netmsg_t msg
)
1351 int cmd
= msg
->ctlinput
.nm_cmd
;
1352 struct sockaddr
*sa
= msg
->ctlinput
.nm_arg
;
1353 struct ip
*ip
= msg
->ctlinput
.nm_extra
;
1355 struct in_addr faddr
;
1358 void (*notify
)(struct inpcb
*, int) = tcp_notify
;
1362 if ((unsigned)cmd
>= PRC_NCMDS
|| inetctlerrmap
[cmd
] == 0) {
1366 faddr
= ((struct sockaddr_in
*)sa
)->sin_addr
;
1367 if (sa
->sa_family
!= AF_INET
|| faddr
.s_addr
== INADDR_ANY
)
1370 arg
= inetctlerrmap
[cmd
];
1371 if (cmd
== PRC_QUENCH
) {
1372 notify
= tcp_quench
;
1373 } else if (icmp_may_rst
&&
1374 (cmd
== PRC_UNREACH_ADMIN_PROHIB
||
1375 cmd
== PRC_UNREACH_PORT
||
1376 cmd
== PRC_TIMXCEED_INTRANS
) &&
1378 notify
= tcp_drop_syn_sent
;
1379 } else if (cmd
== PRC_MSGSIZE
) {
1380 struct icmp
*icmp
= (struct icmp
*)
1381 ((caddr_t
)ip
- offsetof(struct icmp
, icmp_ip
));
1383 arg
= ntohs(icmp
->icmp_nextmtu
);
1384 notify
= tcp_mtudisc
;
1385 } else if (PRC_IS_REDIRECT(cmd
)) {
1387 notify
= in_rtchange
;
1388 } else if (cmd
== PRC_HOSTDEAD
) {
1394 th
= (struct tcphdr
*)((caddr_t
)ip
+
1395 (IP_VHL_HL(ip
->ip_vhl
) << 2));
1396 cpu
= tcp_addrcpu(faddr
.s_addr
, th
->th_dport
,
1397 ip
->ip_src
.s_addr
, th
->th_sport
);
1398 inp
= in_pcblookup_hash(&tcbinfo
[cpu
], faddr
, th
->th_dport
,
1399 ip
->ip_src
, th
->th_sport
, 0, NULL
);
1400 if ((inp
!= NULL
) && (inp
->inp_socket
!= NULL
)) {
1401 icmpseq
= htonl(th
->th_seq
);
1402 tp
= intotcpcb(inp
);
1403 if (SEQ_GEQ(icmpseq
, tp
->snd_una
) &&
1404 SEQ_LT(icmpseq
, tp
->snd_max
))
1405 (*notify
)(inp
, arg
);
1407 struct in_conninfo inc
;
1409 inc
.inc_fport
= th
->th_dport
;
1410 inc
.inc_lport
= th
->th_sport
;
1411 inc
.inc_faddr
= faddr
;
1412 inc
.inc_laddr
= ip
->ip_src
;
1416 syncache_unreach(&inc
, th
);
1420 struct netmsg_tcp_notify
*nm
;
1422 KKASSERT(&curthread
->td_msgport
== cpu_portfn(0));
1423 nm
= kmalloc(sizeof(*nm
), M_LWKTMSG
, M_INTWAIT
);
1424 netmsg_init(&nm
->base
, NULL
, &netisr_afree_rport
,
1425 0, tcp_notifyall_oncpu
);
1426 nm
->nm_faddr
= faddr
;
1428 nm
->nm_notify
= notify
;
1430 lwkt_sendmsg(cpu_portfn(0), &nm
->base
.lmsg
);
1433 lwkt_replymsg(&msg
->lmsg
, 0);
1439 tcp6_ctlinput(netmsg_t msg
)
1441 int cmd
= msg
->ctlinput
.nm_cmd
;
1442 struct sockaddr
*sa
= msg
->ctlinput
.nm_arg
;
1443 void *d
= msg
->ctlinput
.nm_extra
;
1445 void (*notify
) (struct inpcb
*, int) = tcp_notify
;
1446 struct ip6_hdr
*ip6
;
1448 struct ip6ctlparam
*ip6cp
= NULL
;
1449 const struct sockaddr_in6
*sa6_src
= NULL
;
1451 struct tcp_portonly
{
1457 if (sa
->sa_family
!= AF_INET6
||
1458 sa
->sa_len
!= sizeof(struct sockaddr_in6
)) {
1463 if (cmd
== PRC_QUENCH
)
1464 notify
= tcp_quench
;
1465 else if (cmd
== PRC_MSGSIZE
) {
1466 struct ip6ctlparam
*ip6cp
= d
;
1467 struct icmp6_hdr
*icmp6
= ip6cp
->ip6c_icmp6
;
1469 arg
= ntohl(icmp6
->icmp6_mtu
);
1470 notify
= tcp_mtudisc
;
1471 } else if (!PRC_IS_REDIRECT(cmd
) &&
1472 ((unsigned)cmd
> PRC_NCMDS
|| inet6ctlerrmap
[cmd
] == 0)) {
1476 /* if the parameter is from icmp6, decode it. */
1478 ip6cp
= (struct ip6ctlparam
*)d
;
1480 ip6
= ip6cp
->ip6c_ip6
;
1481 off
= ip6cp
->ip6c_off
;
1482 sa6_src
= ip6cp
->ip6c_src
;
1486 off
= 0; /* fool gcc */
1491 struct in_conninfo inc
;
1493 * XXX: We assume that when IPV6 is non NULL,
1494 * M and OFF are valid.
1497 /* check if we can safely examine src and dst ports */
1498 if (m
->m_pkthdr
.len
< off
+ sizeof *thp
)
1501 bzero(&th
, sizeof th
);
1502 m_copydata(m
, off
, sizeof *thp
, (caddr_t
)&th
);
1504 in6_pcbnotify(&tcbinfo
[0].pcblisthead
, sa
, th
.th_dport
,
1505 (struct sockaddr
*)ip6cp
->ip6c_src
,
1506 th
.th_sport
, cmd
, arg
, notify
);
1508 inc
.inc_fport
= th
.th_dport
;
1509 inc
.inc_lport
= th
.th_sport
;
1510 inc
.inc6_faddr
= ((struct sockaddr_in6
*)sa
)->sin6_addr
;
1511 inc
.inc6_laddr
= ip6cp
->ip6c_src
->sin6_addr
;
1513 syncache_unreach(&inc
, &th
);
1515 in6_pcbnotify(&tcbinfo
[0].pcblisthead
, sa
, 0,
1516 (const struct sockaddr
*)sa6_src
, 0, cmd
, arg
, notify
);
1519 lwkt_replymsg(&msg
->ctlinput
.base
.lmsg
, 0);
1525 * Following is where TCP initial sequence number generation occurs.
1527 * There are two places where we must use initial sequence numbers:
1528 * 1. In SYN-ACK packets.
1529 * 2. In SYN packets.
1531 * All ISNs for SYN-ACK packets are generated by the syncache. See
1532 * tcp_syncache.c for details.
1534 * The ISNs in SYN packets must be monotonic; TIME_WAIT recycling
1535 * depends on this property. In addition, these ISNs should be
1536 * unguessable so as to prevent connection hijacking. To satisfy
1537 * the requirements of this situation, the algorithm outlined in
1538 * RFC 1948 is used to generate sequence numbers.
1540 * Implementation details:
1542 * Time is based off the system timer, and is corrected so that it
1543 * increases by one megabyte per second. This allows for proper
1544 * recycling on high speed LANs while still leaving over an hour
1547 * net.inet.tcp.isn_reseed_interval controls the number of seconds
1548 * between seeding of isn_secret. This is normally set to zero,
1549 * as reseeding should not be necessary.
1553 #define ISN_BYTES_PER_SECOND 1048576
1555 u_char isn_secret
[32];
1556 int isn_last_reseed
;
1560 tcp_new_isn(struct tcpcb
*tp
)
1562 u_int32_t md5_buffer
[4];
1565 /* Seed if this is the first use, reseed if requested. */
1566 if ((isn_last_reseed
== 0) || ((tcp_isn_reseed_interval
> 0) &&
1567 (((u_int
)isn_last_reseed
+ (u_int
)tcp_isn_reseed_interval
*hz
)
1569 read_random_unlimited(&isn_secret
, sizeof isn_secret
);
1570 isn_last_reseed
= ticks
;
1573 /* Compute the md5 hash and return the ISN. */
1575 MD5Update(&isn_ctx
, (u_char
*)&tp
->t_inpcb
->inp_fport
, sizeof(u_short
));
1576 MD5Update(&isn_ctx
, (u_char
*)&tp
->t_inpcb
->inp_lport
, sizeof(u_short
));
1578 if (tp
->t_inpcb
->inp_vflag
& INP_IPV6
) {
1579 MD5Update(&isn_ctx
, (u_char
*) &tp
->t_inpcb
->in6p_faddr
,
1580 sizeof(struct in6_addr
));
1581 MD5Update(&isn_ctx
, (u_char
*) &tp
->t_inpcb
->in6p_laddr
,
1582 sizeof(struct in6_addr
));
1586 MD5Update(&isn_ctx
, (u_char
*) &tp
->t_inpcb
->inp_faddr
,
1587 sizeof(struct in_addr
));
1588 MD5Update(&isn_ctx
, (u_char
*) &tp
->t_inpcb
->inp_laddr
,
1589 sizeof(struct in_addr
));
1591 MD5Update(&isn_ctx
, (u_char
*) &isn_secret
, sizeof(isn_secret
));
1592 MD5Final((u_char
*) &md5_buffer
, &isn_ctx
);
1593 new_isn
= (tcp_seq
) md5_buffer
[0];
1594 new_isn
+= ticks
* (ISN_BYTES_PER_SECOND
/ hz
);
1599 * When a source quench is received, close congestion window
1600 * to one segment. We will gradually open it again as we proceed.
1603 tcp_quench(struct inpcb
*inp
, int error
)
1605 struct tcpcb
*tp
= intotcpcb(inp
);
1608 tp
->snd_cwnd
= tp
->t_maxseg
;
1614 * When a specific ICMP unreachable message is received and the
1615 * connection state is SYN-SENT, drop the connection. This behavior
1616 * is controlled by the icmp_may_rst sysctl.
1619 tcp_drop_syn_sent(struct inpcb
*inp
, int error
)
1621 struct tcpcb
*tp
= intotcpcb(inp
);
1623 if ((tp
!= NULL
) && (tp
->t_state
== TCPS_SYN_SENT
))
1624 tcp_drop(tp
, error
);
1628 * When a `need fragmentation' ICMP is received, update our idea of the MSS
1629 * based on the new value in the route. Also nudge TCP to send something,
1630 * since we know the packet we just sent was dropped.
1631 * This duplicates some code in the tcp_mss() function in tcp_input.c.
1634 tcp_mtudisc(struct inpcb
*inp
, int mtu
)
1636 struct tcpcb
*tp
= intotcpcb(inp
);
1638 struct socket
*so
= inp
->inp_socket
;
1641 boolean_t isipv6
= ((tp
->t_inpcb
->inp_vflag
& INP_IPV6
) != 0);
1643 const boolean_t isipv6
= FALSE
;
1650 * If no MTU is provided in the ICMP message, use the
1651 * next lower likely value, as specified in RFC 1191.
1656 oldmtu
= tp
->t_maxopd
+
1658 sizeof(struct ip6_hdr
) + sizeof(struct tcphdr
) :
1659 sizeof(struct tcpiphdr
));
1660 mtu
= ip_next_mtu(oldmtu
, 0);
1664 rt
= tcp_rtlookup6(&inp
->inp_inc
);
1666 rt
= tcp_rtlookup(&inp
->inp_inc
);
1668 if (rt
->rt_rmx
.rmx_mtu
!= 0 && rt
->rt_rmx
.rmx_mtu
< mtu
)
1669 mtu
= rt
->rt_rmx
.rmx_mtu
;
1673 sizeof(struct ip6_hdr
) + sizeof(struct tcphdr
) :
1674 sizeof(struct tcpiphdr
));
1677 * XXX - The following conditional probably violates the TCP
1678 * spec. The problem is that, since we don't know the
1679 * other end's MSS, we are supposed to use a conservative
1680 * default. But, if we do that, then MTU discovery will
1681 * never actually take place, because the conservative
1682 * default is much less than the MTUs typically seen
1683 * on the Internet today. For the moment, we'll sweep
1684 * this under the carpet.
1686 * The conservative default might not actually be a problem
1687 * if the only case this occurs is when sending an initial
1688 * SYN with options and data to a host we've never talked
1689 * to before. Then, they will reply with an MSS value which
1690 * will get recorded and the new parameters should get
1691 * recomputed. For Further Study.
1693 if (rt
->rt_rmx
.rmx_mssopt
&& rt
->rt_rmx
.rmx_mssopt
< maxopd
)
1694 maxopd
= rt
->rt_rmx
.rmx_mssopt
;
1698 sizeof(struct ip6_hdr
) + sizeof(struct tcphdr
) :
1699 sizeof(struct tcpiphdr
));
1701 if (tp
->t_maxopd
<= maxopd
)
1703 tp
->t_maxopd
= maxopd
;
1706 if ((tp
->t_flags
& (TF_REQ_TSTMP
| TF_RCVD_TSTMP
| TF_NOOPT
)) ==
1707 (TF_REQ_TSTMP
| TF_RCVD_TSTMP
))
1708 mss
-= TCPOLEN_TSTAMP_APPA
;
1710 /* round down to multiple of MCLBYTES */
1711 #if (MCLBYTES & (MCLBYTES - 1)) == 0 /* test if MCLBYTES power of 2 */
1713 mss
&= ~(MCLBYTES
- 1);
1716 mss
= (mss
/ MCLBYTES
) * MCLBYTES
;
1719 if (so
->so_snd
.ssb_hiwat
< mss
)
1720 mss
= so
->so_snd
.ssb_hiwat
;
1724 tp
->snd_nxt
= tp
->snd_una
;
1726 tcpstat
.tcps_mturesent
++;
1730 * Look-up the routing entry to the peer of this inpcb. If no route
1731 * is found and it cannot be allocated the return NULL. This routine
1732 * is called by TCP routines that access the rmx structure and by tcp_mss
1733 * to get the interface MTU.
1736 tcp_rtlookup(struct in_conninfo
*inc
)
1738 struct route
*ro
= &inc
->inc_route
;
1740 if (ro
->ro_rt
== NULL
|| !(ro
->ro_rt
->rt_flags
& RTF_UP
)) {
1741 /* No route yet, so try to acquire one */
1742 if (inc
->inc_faddr
.s_addr
!= INADDR_ANY
) {
1744 * unused portions of the structure MUST be zero'd
1745 * out because rtalloc() treats it as opaque data
1747 bzero(&ro
->ro_dst
, sizeof(struct sockaddr_in
));
1748 ro
->ro_dst
.sa_family
= AF_INET
;
1749 ro
->ro_dst
.sa_len
= sizeof(struct sockaddr_in
);
1750 ((struct sockaddr_in
*) &ro
->ro_dst
)->sin_addr
=
1760 tcp_rtlookup6(struct in_conninfo
*inc
)
1762 struct route_in6
*ro6
= &inc
->inc6_route
;
1764 if (ro6
->ro_rt
== NULL
|| !(ro6
->ro_rt
->rt_flags
& RTF_UP
)) {
1765 /* No route yet, so try to acquire one */
1766 if (!IN6_IS_ADDR_UNSPECIFIED(&inc
->inc6_faddr
)) {
1768 * unused portions of the structure MUST be zero'd
1769 * out because rtalloc() treats it as opaque data
1771 bzero(&ro6
->ro_dst
, sizeof(struct sockaddr_in6
));
1772 ro6
->ro_dst
.sin6_family
= AF_INET6
;
1773 ro6
->ro_dst
.sin6_len
= sizeof(struct sockaddr_in6
);
1774 ro6
->ro_dst
.sin6_addr
= inc
->inc6_faddr
;
1775 rtalloc((struct route
*)ro6
);
1778 return (ro6
->ro_rt
);
1783 /* compute ESP/AH header size for TCP, including outer IP header. */
1785 ipsec_hdrsiz_tcp(struct tcpcb
*tp
)
1793 if ((tp
== NULL
) || ((inp
= tp
->t_inpcb
) == NULL
))
1795 MGETHDR(m
, MB_DONTWAIT
, MT_DATA
);
1800 if (inp
->inp_vflag
& INP_IPV6
) {
1801 struct ip6_hdr
*ip6
= mtod(m
, struct ip6_hdr
*);
1803 th
= (struct tcphdr
*)(ip6
+ 1);
1804 m
->m_pkthdr
.len
= m
->m_len
=
1805 sizeof(struct ip6_hdr
) + sizeof(struct tcphdr
);
1806 tcp_fillheaders(tp
, ip6
, th
);
1807 hdrsiz
= ipsec6_hdrsiz(m
, IPSEC_DIR_OUTBOUND
, inp
);
1811 ip
= mtod(m
, struct ip
*);
1812 th
= (struct tcphdr
*)(ip
+ 1);
1813 m
->m_pkthdr
.len
= m
->m_len
= sizeof(struct tcpiphdr
);
1814 tcp_fillheaders(tp
, ip
, th
);
1815 hdrsiz
= ipsec4_hdrsiz(m
, IPSEC_DIR_OUTBOUND
, inp
);
1824 * TCP BANDWIDTH DELAY PRODUCT WINDOW LIMITING
1826 * This code attempts to calculate the bandwidth-delay product as a
1827 * means of determining the optimal window size to maximize bandwidth,
1828 * minimize RTT, and avoid the over-allocation of buffers on interfaces and
1829 * routers. This code also does a fairly good job keeping RTTs in check
1830 * across slow links like modems. We implement an algorithm which is very
1831 * similar (but not meant to be) TCP/Vegas. The code operates on the
1832 * transmitter side of a TCP connection and so only effects the transmit
1833 * side of the connection.
1835 * BACKGROUND: TCP makes no provision for the management of buffer space
1836 * at the end points or at the intermediate routers and switches. A TCP
1837 * stream, whether using NewReno or not, will eventually buffer as
1838 * many packets as it is able and the only reason this typically works is
1839 * due to the fairly small default buffers made available for a connection
1840 * (typicaly 16K or 32K). As machines use larger windows and/or window
1841 * scaling it is now fairly easy for even a single TCP connection to blow-out
1842 * all available buffer space not only on the local interface, but on
1843 * intermediate routers and switches as well. NewReno makes a misguided
1844 * attempt to 'solve' this problem by waiting for an actual failure to occur,
1845 * then backing off, then steadily increasing the window again until another
1846 * failure occurs, ad-infinitum. This results in terrible oscillation that
1847 * is only made worse as network loads increase and the idea of intentionally
1848 * blowing out network buffers is, frankly, a terrible way to manage network
1851 * It is far better to limit the transmit window prior to the failure
1852 * condition being achieved. There are two general ways to do this: First
1853 * you can 'scan' through different transmit window sizes and locate the
1854 * point where the RTT stops increasing, indicating that you have filled the
1855 * pipe, then scan backwards until you note that RTT stops decreasing, then
1856 * repeat ad-infinitum. This method works in principle but has severe
1857 * implementation issues due to RTT variances, timer granularity, and
1858 * instability in the algorithm which can lead to many false positives and
1859 * create oscillations as well as interact badly with other TCP streams
1860 * implementing the same algorithm.
1862 * The second method is to limit the window to the bandwidth delay product
1863 * of the link. This is the method we implement. RTT variances and our
1864 * own manipulation of the congestion window, bwnd, can potentially
1865 * destabilize the algorithm. For this reason we have to stabilize the
1866 * elements used to calculate the window. We do this by using the minimum
1867 * observed RTT, the long term average of the observed bandwidth, and
1868 * by adding two segments worth of slop. It isn't perfect but it is able
1869 * to react to changing conditions and gives us a very stable basis on
1870 * which to extend the algorithm.
1873 tcp_xmit_bandwidth_limit(struct tcpcb
*tp
, tcp_seq ack_seq
)
1881 * If inflight_enable is disabled in the middle of a tcp connection,
1882 * make sure snd_bwnd is effectively disabled.
1884 if (!tcp_inflight_enable
) {
1885 tp
->snd_bwnd
= TCP_MAXWIN
<< TCP_MAX_WINSHIFT
;
1886 tp
->snd_bandwidth
= 0;
1891 * Validate the delta time. If a connection is new or has been idle
1892 * a long time we have to reset the bandwidth calculator.
1895 delta_ticks
= save_ticks
- tp
->t_bw_rtttime
;
1896 if (tp
->t_bw_rtttime
== 0 || delta_ticks
< 0 || delta_ticks
> hz
* 10) {
1897 tp
->t_bw_rtttime
= ticks
;
1898 tp
->t_bw_rtseq
= ack_seq
;
1899 if (tp
->snd_bandwidth
== 0)
1900 tp
->snd_bandwidth
= tcp_inflight_min
;
1903 if (delta_ticks
== 0)
1907 * Sanity check, plus ignore pure window update acks.
1909 if ((int)(ack_seq
- tp
->t_bw_rtseq
) <= 0)
1913 * Figure out the bandwidth. Due to the tick granularity this
1914 * is a very rough number and it MUST be averaged over a fairly
1915 * long period of time. XXX we need to take into account a link
1916 * that is not using all available bandwidth, but for now our
1917 * slop will ramp us up if this case occurs and the bandwidth later
1920 bw
= (int64_t)(ack_seq
- tp
->t_bw_rtseq
) * hz
/ delta_ticks
;
1921 tp
->t_bw_rtttime
= save_ticks
;
1922 tp
->t_bw_rtseq
= ack_seq
;
1923 bw
= ((int64_t)tp
->snd_bandwidth
* 15 + bw
) >> 4;
1925 tp
->snd_bandwidth
= bw
;
1928 * Calculate the semi-static bandwidth delay product, plus two maximal
1929 * segments. The additional slop puts us squarely in the sweet
1930 * spot and also handles the bandwidth run-up case. Without the
1931 * slop we could be locking ourselves into a lower bandwidth.
1933 * Situations Handled:
1934 * (1) Prevents over-queueing of packets on LANs, especially on
1935 * high speed LANs, allowing larger TCP buffers to be
1936 * specified, and also does a good job preventing
1937 * over-queueing of packets over choke points like modems
1938 * (at least for the transmit side).
1940 * (2) Is able to handle changing network loads (bandwidth
1941 * drops so bwnd drops, bandwidth increases so bwnd
1944 * (3) Theoretically should stabilize in the face of multiple
1945 * connections implementing the same algorithm (this may need
1948 * (4) Stability value (defaults to 20 = 2 maximal packets) can
1949 * be adjusted with a sysctl but typically only needs to be on
1950 * very slow connections. A value no smaller then 5 should
1951 * be used, but only reduce this default if you have no other
1955 #define USERTT ((tp->t_srtt + tp->t_rttbest) / 2)
1956 bwnd
= (int64_t)bw
* USERTT
/ (hz
<< TCP_RTT_SHIFT
) +
1957 tcp_inflight_stab
* (int)tp
->t_maxseg
/ 10;
1960 if (tcp_inflight_debug
> 0) {
1962 if ((u_int
)(ticks
- ltime
) >= hz
/ tcp_inflight_debug
) {
1964 kprintf("%p bw %ld rttbest %d srtt %d bwnd %ld\n",
1965 tp
, bw
, tp
->t_rttbest
, tp
->t_srtt
, bwnd
);
1968 if ((long)bwnd
< tcp_inflight_min
)
1969 bwnd
= tcp_inflight_min
;
1970 if (bwnd
> tcp_inflight_max
)
1971 bwnd
= tcp_inflight_max
;
1972 if ((long)bwnd
< tp
->t_maxseg
* 2)
1973 bwnd
= tp
->t_maxseg
* 2;
1974 tp
->snd_bwnd
= bwnd
;
1977 #ifdef TCP_SIGNATURE
1979 * Compute TCP-MD5 hash of a TCP segment. (RFC2385)
1981 * We do this over ip, tcphdr, segment data, and the key in the SADB.
1982 * When called from tcp_input(), we can be sure that th_sum has been
1983 * zeroed out and verified already.
1985 * Return 0 if successful, otherwise return -1.
1987 * XXX The key is retrieved from the system's PF_KEY SADB, by keying a
1988 * search with the destination IP address, and a 'magic SPI' to be
1989 * determined by the application. This is hardcoded elsewhere to 1179
1990 * right now. Another branch of this code exists which uses the SPD to
1991 * specify per-application flows but it is unstable.
1994 tcpsignature_compute(
1995 struct mbuf
*m
, /* mbuf chain */
1996 int len
, /* length of TCP data */
1997 int optlen
, /* length of TCP options */
1998 u_char
*buf
, /* storage for MD5 digest */
1999 u_int direction
) /* direction of flow */
2001 struct ippseudo ippseudo
;
2005 struct ipovly
*ipovly
;
2006 struct secasvar
*sav
;
2009 struct ip6_hdr
*ip6
;
2010 struct in6_addr in6
;
2016 KASSERT(m
!= NULL
, ("passed NULL mbuf. Game over."));
2017 KASSERT(buf
!= NULL
, ("passed NULL storage pointer for MD5 signature"));
2019 * Extract the destination from the IP header in the mbuf.
2021 ip
= mtod(m
, struct ip
*);
2023 ip6
= NULL
; /* Make the compiler happy. */
2026 * Look up an SADB entry which matches the address found in
2029 switch (IP_VHL_V(ip
->ip_vhl
)) {
2031 sav
= key_allocsa(AF_INET
, (caddr_t
)&ip
->ip_src
, (caddr_t
)&ip
->ip_dst
,
2032 IPPROTO_TCP
, htonl(TCP_SIG_SPI
));
2035 case (IPV6_VERSION
>> 4):
2036 ip6
= mtod(m
, struct ip6_hdr
*);
2037 sav
= key_allocsa(AF_INET6
, (caddr_t
)&ip6
->ip6_src
, (caddr_t
)&ip6
->ip6_dst
,
2038 IPPROTO_TCP
, htonl(TCP_SIG_SPI
));
2047 kprintf("%s: SADB lookup failed\n", __func__
);
2053 * Step 1: Update MD5 hash with IP pseudo-header.
2055 * XXX The ippseudo header MUST be digested in network byte order,
2056 * or else we'll fail the regression test. Assume all fields we've
2057 * been doing arithmetic on have been in host byte order.
2058 * XXX One cannot depend on ipovly->ih_len here. When called from
2059 * tcp_output(), the underlying ip_len member has not yet been set.
2061 switch (IP_VHL_V(ip
->ip_vhl
)) {
2063 ipovly
= (struct ipovly
*)ip
;
2064 ippseudo
.ippseudo_src
= ipovly
->ih_src
;
2065 ippseudo
.ippseudo_dst
= ipovly
->ih_dst
;
2066 ippseudo
.ippseudo_pad
= 0;
2067 ippseudo
.ippseudo_p
= IPPROTO_TCP
;
2068 ippseudo
.ippseudo_len
= htons(len
+ sizeof(struct tcphdr
) + optlen
);
2069 MD5Update(&ctx
, (char *)&ippseudo
, sizeof(struct ippseudo
));
2070 th
= (struct tcphdr
*)((u_char
*)ip
+ sizeof(struct ip
));
2071 doff
= sizeof(struct ip
) + sizeof(struct tcphdr
) + optlen
;
2075 * RFC 2385, 2.0 Proposal
2076 * For IPv6, the pseudo-header is as described in RFC 2460, namely the
2077 * 128-bit source IPv6 address, 128-bit destination IPv6 address, zero-
2078 * extended next header value (to form 32 bits), and 32-bit segment
2080 * Note: Upper-Layer Packet Length comes before Next Header.
2082 case (IPV6_VERSION
>> 4):
2084 in6_clearscope(&in6
);
2085 MD5Update(&ctx
, (char *)&in6
, sizeof(struct in6_addr
));
2087 in6_clearscope(&in6
);
2088 MD5Update(&ctx
, (char *)&in6
, sizeof(struct in6_addr
));
2089 plen
= htonl(len
+ sizeof(struct tcphdr
) + optlen
);
2090 MD5Update(&ctx
, (char *)&plen
, sizeof(uint32_t));
2092 MD5Update(&ctx
, (char *)&nhdr
, sizeof(uint8_t));
2093 MD5Update(&ctx
, (char *)&nhdr
, sizeof(uint8_t));
2094 MD5Update(&ctx
, (char *)&nhdr
, sizeof(uint8_t));
2096 MD5Update(&ctx
, (char *)&nhdr
, sizeof(uint8_t));
2097 th
= (struct tcphdr
*)((u_char
*)ip6
+ sizeof(struct ip6_hdr
));
2098 doff
= sizeof(struct ip6_hdr
) + sizeof(struct tcphdr
) + optlen
;
2107 * Step 2: Update MD5 hash with TCP header, excluding options.
2108 * The TCP checksum must be set to zero.
2110 savecsum
= th
->th_sum
;
2112 MD5Update(&ctx
, (char *)th
, sizeof(struct tcphdr
));
2113 th
->th_sum
= savecsum
;
2115 * Step 3: Update MD5 hash with TCP segment data.
2116 * Use m_apply() to avoid an early m_pullup().
2119 m_apply(m
, doff
, len
, tcpsignature_apply
, &ctx
);
2121 * Step 4: Update MD5 hash with shared secret.
2123 MD5Update(&ctx
, _KEYBUF(sav
->key_auth
), _KEYLEN(sav
->key_auth
));
2124 MD5Final(buf
, &ctx
);
2125 key_sa_recordxfer(sav
, m
);
2131 tcpsignature_apply(void *fstate
, void *data
, unsigned int len
)
2134 MD5Update((MD5_CTX
*)fstate
, (unsigned char *)data
, len
);
2137 #endif /* TCP_SIGNATURE */