2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * IPv4 specific functions
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
16 * See tcp.c for author information
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
37 * request_sock handling and moved
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
40 * Added new listen semantics.
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
53 #define pr_fmt(fmt) "TCP: " fmt
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
66 #include <net/net_namespace.h>
68 #include <net/inet_hashtables.h>
70 #include <net/transp_v6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
75 #include <net/secure_seq.h>
76 #include <net/busy_poll.h>
78 #include <linux/inet.h>
79 #include <linux/ipv6.h>
80 #include <linux/stddef.h>
81 #include <linux/proc_fs.h>
82 #include <linux/seq_file.h>
84 #include <crypto/hash.h>
85 #include <linux/scatterlist.h>
87 int sysctl_tcp_low_latency __read_mostly
;
89 #ifdef CONFIG_TCP_MD5SIG
90 static int tcp_v4_md5_hash_hdr(char *md5_hash
, const struct tcp_md5sig_key
*key
,
91 __be32 daddr
, __be32 saddr
, const struct tcphdr
*th
);
94 struct inet_hashinfo tcp_hashinfo
;
95 EXPORT_SYMBOL(tcp_hashinfo
);
97 static u32
tcp_v4_init_sequence(const struct sk_buff
*skb
, u32
*tsoff
)
99 return secure_tcp_sequence_number(ip_hdr(skb
)->daddr
,
102 tcp_hdr(skb
)->source
, tsoff
);
105 int tcp_twsk_unique(struct sock
*sk
, struct sock
*sktw
, void *twp
)
107 const struct tcp_timewait_sock
*tcptw
= tcp_twsk(sktw
);
108 struct tcp_sock
*tp
= tcp_sk(sk
);
110 /* With PAWS, it is safe from the viewpoint
111 of data integrity. Even without PAWS it is safe provided sequence
112 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
114 Actually, the idea is close to VJ's one, only timestamp cache is
115 held not per host, but per port pair and TW bucket is used as state
118 If TW bucket has been already destroyed we fall back to VJ's scheme
119 and use initial timestamp retrieved from peer table.
121 if (tcptw
->tw_ts_recent_stamp
&&
122 (!twp
|| (sock_net(sk
)->ipv4
.sysctl_tcp_tw_reuse
&&
123 get_seconds() - tcptw
->tw_ts_recent_stamp
> 1))) {
124 tp
->write_seq
= tcptw
->tw_snd_nxt
+ 65535 + 2;
125 if (tp
->write_seq
== 0)
127 tp
->rx_opt
.ts_recent
= tcptw
->tw_ts_recent
;
128 tp
->rx_opt
.ts_recent_stamp
= tcptw
->tw_ts_recent_stamp
;
135 EXPORT_SYMBOL_GPL(tcp_twsk_unique
);
137 /* This will initiate an outgoing connection. */
138 int tcp_v4_connect(struct sock
*sk
, struct sockaddr
*uaddr
, int addr_len
)
140 struct sockaddr_in
*usin
= (struct sockaddr_in
*)uaddr
;
141 struct inet_sock
*inet
= inet_sk(sk
);
142 struct tcp_sock
*tp
= tcp_sk(sk
);
143 __be16 orig_sport
, orig_dport
;
144 __be32 daddr
, nexthop
;
148 struct ip_options_rcu
*inet_opt
;
149 struct inet_timewait_death_row
*tcp_death_row
= &sock_net(sk
)->ipv4
.tcp_death_row
;
151 if (addr_len
< sizeof(struct sockaddr_in
))
154 if (usin
->sin_family
!= AF_INET
)
155 return -EAFNOSUPPORT
;
157 nexthop
= daddr
= usin
->sin_addr
.s_addr
;
158 inet_opt
= rcu_dereference_protected(inet
->inet_opt
,
159 lockdep_sock_is_held(sk
));
160 if (inet_opt
&& inet_opt
->opt
.srr
) {
163 nexthop
= inet_opt
->opt
.faddr
;
166 orig_sport
= inet
->inet_sport
;
167 orig_dport
= usin
->sin_port
;
168 fl4
= &inet
->cork
.fl
.u
.ip4
;
169 rt
= ip_route_connect(fl4
, nexthop
, inet
->inet_saddr
,
170 RT_CONN_FLAGS(sk
), sk
->sk_bound_dev_if
,
172 orig_sport
, orig_dport
, sk
);
175 if (err
== -ENETUNREACH
)
176 IP_INC_STATS(sock_net(sk
), IPSTATS_MIB_OUTNOROUTES
);
180 if (rt
->rt_flags
& (RTCF_MULTICAST
| RTCF_BROADCAST
)) {
185 if (!inet_opt
|| !inet_opt
->opt
.srr
)
188 if (!inet
->inet_saddr
)
189 inet
->inet_saddr
= fl4
->saddr
;
190 sk_rcv_saddr_set(sk
, inet
->inet_saddr
);
192 if (tp
->rx_opt
.ts_recent_stamp
&& inet
->inet_daddr
!= daddr
) {
193 /* Reset inherited state */
194 tp
->rx_opt
.ts_recent
= 0;
195 tp
->rx_opt
.ts_recent_stamp
= 0;
196 if (likely(!tp
->repair
))
200 if (tcp_death_row
->sysctl_tw_recycle
&&
201 !tp
->rx_opt
.ts_recent_stamp
&& fl4
->daddr
== daddr
)
202 tcp_fetch_timewait_stamp(sk
, &rt
->dst
);
204 inet
->inet_dport
= usin
->sin_port
;
205 sk_daddr_set(sk
, daddr
);
207 inet_csk(sk
)->icsk_ext_hdr_len
= 0;
209 inet_csk(sk
)->icsk_ext_hdr_len
= inet_opt
->opt
.optlen
;
211 tp
->rx_opt
.mss_clamp
= TCP_MSS_DEFAULT
;
213 /* Socket identity is still unknown (sport may be zero).
214 * However we set state to SYN-SENT and not releasing socket
215 * lock select source port, enter ourselves into the hash tables and
216 * complete initialization after this.
218 tcp_set_state(sk
, TCP_SYN_SENT
);
219 err
= inet_hash_connect(tcp_death_row
, sk
);
225 rt
= ip_route_newports(fl4
, rt
, orig_sport
, orig_dport
,
226 inet
->inet_sport
, inet
->inet_dport
, sk
);
232 /* OK, now commit destination to socket. */
233 sk
->sk_gso_type
= SKB_GSO_TCPV4
;
234 sk_setup_caps(sk
, &rt
->dst
);
236 if (!tp
->write_seq
&& likely(!tp
->repair
))
237 tp
->write_seq
= secure_tcp_sequence_number(inet
->inet_saddr
,
243 inet
->inet_id
= tp
->write_seq
^ jiffies
;
245 err
= tcp_connect(sk
);
255 * This unhashes the socket and releases the local port,
258 tcp_set_state(sk
, TCP_CLOSE
);
260 sk
->sk_route_caps
= 0;
261 inet
->inet_dport
= 0;
264 EXPORT_SYMBOL(tcp_v4_connect
);
267 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
268 * It can be called through tcp_release_cb() if socket was owned by user
269 * at the time tcp_v4_err() was called to handle ICMP message.
271 void tcp_v4_mtu_reduced(struct sock
*sk
)
273 struct dst_entry
*dst
;
274 struct inet_sock
*inet
= inet_sk(sk
);
275 u32 mtu
= tcp_sk(sk
)->mtu_info
;
277 dst
= inet_csk_update_pmtu(sk
, mtu
);
281 /* Something is about to be wrong... Remember soft error
282 * for the case, if this connection will not able to recover.
284 if (mtu
< dst_mtu(dst
) && ip_dont_fragment(sk
, dst
))
285 sk
->sk_err_soft
= EMSGSIZE
;
289 if (inet
->pmtudisc
!= IP_PMTUDISC_DONT
&&
290 ip_sk_accept_pmtu(sk
) &&
291 inet_csk(sk
)->icsk_pmtu_cookie
> mtu
) {
292 tcp_sync_mss(sk
, mtu
);
294 /* Resend the TCP packet because it's
295 * clear that the old packet has been
296 * dropped. This is the new "fast" path mtu
299 tcp_simple_retransmit(sk
);
300 } /* else let the usual retransmit timer handle it */
302 EXPORT_SYMBOL(tcp_v4_mtu_reduced
);
304 static void do_redirect(struct sk_buff
*skb
, struct sock
*sk
)
306 struct dst_entry
*dst
= __sk_dst_check(sk
, 0);
309 dst
->ops
->redirect(dst
, sk
, skb
);
313 /* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
314 void tcp_req_err(struct sock
*sk
, u32 seq
, bool abort
)
316 struct request_sock
*req
= inet_reqsk(sk
);
317 struct net
*net
= sock_net(sk
);
319 /* ICMPs are not backlogged, hence we cannot get
320 * an established socket here.
322 if (seq
!= tcp_rsk(req
)->snt_isn
) {
323 __NET_INC_STATS(net
, LINUX_MIB_OUTOFWINDOWICMPS
);
326 * Still in SYN_RECV, just remove it silently.
327 * There is no good way to pass the error to the newly
328 * created socket, and POSIX does not want network
329 * errors returned from accept().
331 inet_csk_reqsk_queue_drop(req
->rsk_listener
, req
);
332 tcp_listendrop(req
->rsk_listener
);
336 EXPORT_SYMBOL(tcp_req_err
);
339 * This routine is called by the ICMP module when it gets some
340 * sort of error condition. If err < 0 then the socket should
341 * be closed and the error returned to the user. If err > 0
342 * it's just the icmp type << 8 | icmp code. After adjustment
343 * header points to the first 8 bytes of the tcp header. We need
344 * to find the appropriate port.
346 * The locking strategy used here is very "optimistic". When
347 * someone else accesses the socket the ICMP is just dropped
348 * and for some paths there is no check at all.
349 * A more general error queue to queue errors for later handling
350 * is probably better.
354 void tcp_v4_err(struct sk_buff
*icmp_skb
, u32 info
)
356 const struct iphdr
*iph
= (const struct iphdr
*)icmp_skb
->data
;
357 struct tcphdr
*th
= (struct tcphdr
*)(icmp_skb
->data
+ (iph
->ihl
<< 2));
358 struct inet_connection_sock
*icsk
;
360 struct inet_sock
*inet
;
361 const int type
= icmp_hdr(icmp_skb
)->type
;
362 const int code
= icmp_hdr(icmp_skb
)->code
;
365 struct request_sock
*fastopen
;
369 struct net
*net
= dev_net(icmp_skb
->dev
);
371 sk
= __inet_lookup_established(net
, &tcp_hashinfo
, iph
->daddr
,
372 th
->dest
, iph
->saddr
, ntohs(th
->source
),
375 __ICMP_INC_STATS(net
, ICMP_MIB_INERRORS
);
378 if (sk
->sk_state
== TCP_TIME_WAIT
) {
379 inet_twsk_put(inet_twsk(sk
));
382 seq
= ntohl(th
->seq
);
383 if (sk
->sk_state
== TCP_NEW_SYN_RECV
)
384 return tcp_req_err(sk
, seq
,
385 type
== ICMP_PARAMETERPROB
||
386 type
== ICMP_TIME_EXCEEDED
||
387 (type
== ICMP_DEST_UNREACH
&&
388 (code
== ICMP_NET_UNREACH
||
389 code
== ICMP_HOST_UNREACH
)));
392 /* If too many ICMPs get dropped on busy
393 * servers this needs to be solved differently.
394 * We do take care of PMTU discovery (RFC1191) special case :
395 * we can receive locally generated ICMP messages while socket is held.
397 if (sock_owned_by_user(sk
)) {
398 if (!(type
== ICMP_DEST_UNREACH
&& code
== ICMP_FRAG_NEEDED
))
399 __NET_INC_STATS(net
, LINUX_MIB_LOCKDROPPEDICMPS
);
401 if (sk
->sk_state
== TCP_CLOSE
)
404 if (unlikely(iph
->ttl
< inet_sk(sk
)->min_ttl
)) {
405 __NET_INC_STATS(net
, LINUX_MIB_TCPMINTTLDROP
);
411 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
412 fastopen
= tp
->fastopen_rsk
;
413 snd_una
= fastopen
? tcp_rsk(fastopen
)->snt_isn
: tp
->snd_una
;
414 if (sk
->sk_state
!= TCP_LISTEN
&&
415 !between(seq
, snd_una
, tp
->snd_nxt
)) {
416 __NET_INC_STATS(net
, LINUX_MIB_OUTOFWINDOWICMPS
);
422 do_redirect(icmp_skb
, sk
);
424 case ICMP_SOURCE_QUENCH
:
425 /* Just silently ignore these. */
427 case ICMP_PARAMETERPROB
:
430 case ICMP_DEST_UNREACH
:
431 if (code
> NR_ICMP_UNREACH
)
434 if (code
== ICMP_FRAG_NEEDED
) { /* PMTU discovery (RFC1191) */
435 /* We are not interested in TCP_LISTEN and open_requests
436 * (SYN-ACKs send out by Linux are always <576bytes so
437 * they should go through unfragmented).
439 if (sk
->sk_state
== TCP_LISTEN
)
443 if (!sock_owned_by_user(sk
)) {
444 tcp_v4_mtu_reduced(sk
);
446 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED
, &sk
->sk_tsq_flags
))
452 err
= icmp_err_convert
[code
].errno
;
453 /* check if icmp_skb allows revert of backoff
454 * (see draft-zimmermann-tcp-lcd) */
455 if (code
!= ICMP_NET_UNREACH
&& code
!= ICMP_HOST_UNREACH
)
457 if (seq
!= tp
->snd_una
|| !icsk
->icsk_retransmits
||
458 !icsk
->icsk_backoff
|| fastopen
)
461 if (sock_owned_by_user(sk
))
464 icsk
->icsk_backoff
--;
465 icsk
->icsk_rto
= tp
->srtt_us
? __tcp_set_rto(tp
) :
467 icsk
->icsk_rto
= inet_csk_rto_backoff(icsk
, TCP_RTO_MAX
);
469 skb
= tcp_write_queue_head(sk
);
472 remaining
= icsk
->icsk_rto
-
474 tcp_time_stamp
- tcp_skb_timestamp(skb
));
477 inet_csk_reset_xmit_timer(sk
, ICSK_TIME_RETRANS
,
478 remaining
, TCP_RTO_MAX
);
480 /* RTO revert clocked out retransmission.
481 * Will retransmit now */
482 tcp_retransmit_timer(sk
);
486 case ICMP_TIME_EXCEEDED
:
493 switch (sk
->sk_state
) {
496 /* Only in fast or simultaneous open. If a fast open socket is
497 * is already accepted it is treated as a connected one below.
499 if (fastopen
&& !fastopen
->sk
)
502 if (!sock_owned_by_user(sk
)) {
505 sk
->sk_error_report(sk
);
509 sk
->sk_err_soft
= err
;
514 /* If we've already connected we will keep trying
515 * until we time out, or the user gives up.
517 * rfc1122 4.2.3.9 allows to consider as hard errors
518 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
519 * but it is obsoleted by pmtu discovery).
521 * Note, that in modern internet, where routing is unreliable
522 * and in each dark corner broken firewalls sit, sending random
523 * errors ordered by their masters even this two messages finally lose
524 * their original sense (even Linux sends invalid PORT_UNREACHs)
526 * Now we are in compliance with RFCs.
531 if (!sock_owned_by_user(sk
) && inet
->recverr
) {
533 sk
->sk_error_report(sk
);
534 } else { /* Only an error on timeout */
535 sk
->sk_err_soft
= err
;
543 void __tcp_v4_send_check(struct sk_buff
*skb
, __be32 saddr
, __be32 daddr
)
545 struct tcphdr
*th
= tcp_hdr(skb
);
547 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
548 th
->check
= ~tcp_v4_check(skb
->len
, saddr
, daddr
, 0);
549 skb
->csum_start
= skb_transport_header(skb
) - skb
->head
;
550 skb
->csum_offset
= offsetof(struct tcphdr
, check
);
552 th
->check
= tcp_v4_check(skb
->len
, saddr
, daddr
,
559 /* This routine computes an IPv4 TCP checksum. */
560 void tcp_v4_send_check(struct sock
*sk
, struct sk_buff
*skb
)
562 const struct inet_sock
*inet
= inet_sk(sk
);
564 __tcp_v4_send_check(skb
, inet
->inet_saddr
, inet
->inet_daddr
);
566 EXPORT_SYMBOL(tcp_v4_send_check
);
569 * This routine will send an RST to the other tcp.
571 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
573 * Answer: if a packet caused RST, it is not for a socket
574 * existing in our system, if it is matched to a socket,
575 * it is just duplicate segment or bug in other side's TCP.
576 * So that we build reply only basing on parameters
577 * arrived with segment.
578 * Exception: precedence violation. We do not implement it in any case.
581 static void tcp_v4_send_reset(const struct sock
*sk
, struct sk_buff
*skb
)
583 const struct tcphdr
*th
= tcp_hdr(skb
);
586 #ifdef CONFIG_TCP_MD5SIG
587 __be32 opt
[(TCPOLEN_MD5SIG_ALIGNED
>> 2)];
590 struct ip_reply_arg arg
;
591 #ifdef CONFIG_TCP_MD5SIG
592 struct tcp_md5sig_key
*key
= NULL
;
593 const __u8
*hash_location
= NULL
;
594 unsigned char newhash
[16];
596 struct sock
*sk1
= NULL
;
600 /* Never send a reset in response to a reset. */
604 /* If sk not NULL, it means we did a successful lookup and incoming
605 * route had to be correct. prequeue might have dropped our dst.
607 if (!sk
&& skb_rtable(skb
)->rt_type
!= RTN_LOCAL
)
610 /* Swap the send and the receive. */
611 memset(&rep
, 0, sizeof(rep
));
612 rep
.th
.dest
= th
->source
;
613 rep
.th
.source
= th
->dest
;
614 rep
.th
.doff
= sizeof(struct tcphdr
) / 4;
618 rep
.th
.seq
= th
->ack_seq
;
621 rep
.th
.ack_seq
= htonl(ntohl(th
->seq
) + th
->syn
+ th
->fin
+
622 skb
->len
- (th
->doff
<< 2));
625 memset(&arg
, 0, sizeof(arg
));
626 arg
.iov
[0].iov_base
= (unsigned char *)&rep
;
627 arg
.iov
[0].iov_len
= sizeof(rep
.th
);
629 net
= sk
? sock_net(sk
) : dev_net(skb_dst(skb
)->dev
);
630 #ifdef CONFIG_TCP_MD5SIG
632 hash_location
= tcp_parse_md5sig_option(th
);
633 if (sk
&& sk_fullsock(sk
)) {
634 key
= tcp_md5_do_lookup(sk
, (union tcp_md5_addr
*)
635 &ip_hdr(skb
)->saddr
, AF_INET
);
636 } else if (hash_location
) {
638 * active side is lost. Try to find listening socket through
639 * source port, and then find md5 key through listening socket.
640 * we are not loose security here:
641 * Incoming packet is checked with md5 hash with finding key,
642 * no RST generated if md5 hash doesn't match.
644 sk1
= __inet_lookup_listener(net
, &tcp_hashinfo
, NULL
, 0,
646 th
->source
, ip_hdr(skb
)->daddr
,
647 ntohs(th
->source
), inet_iif(skb
));
648 /* don't send rst if it can't find key */
652 key
= tcp_md5_do_lookup(sk1
, (union tcp_md5_addr
*)
653 &ip_hdr(skb
)->saddr
, AF_INET
);
658 genhash
= tcp_v4_md5_hash_skb(newhash
, key
, NULL
, skb
);
659 if (genhash
|| memcmp(hash_location
, newhash
, 16) != 0)
665 rep
.opt
[0] = htonl((TCPOPT_NOP
<< 24) |
667 (TCPOPT_MD5SIG
<< 8) |
669 /* Update length and the length the header thinks exists */
670 arg
.iov
[0].iov_len
+= TCPOLEN_MD5SIG_ALIGNED
;
671 rep
.th
.doff
= arg
.iov
[0].iov_len
/ 4;
673 tcp_v4_md5_hash_hdr((__u8
*) &rep
.opt
[1],
674 key
, ip_hdr(skb
)->saddr
,
675 ip_hdr(skb
)->daddr
, &rep
.th
);
678 arg
.csum
= csum_tcpudp_nofold(ip_hdr(skb
)->daddr
,
679 ip_hdr(skb
)->saddr
, /* XXX */
680 arg
.iov
[0].iov_len
, IPPROTO_TCP
, 0);
681 arg
.csumoffset
= offsetof(struct tcphdr
, check
) / 2;
682 arg
.flags
= (sk
&& inet_sk_transparent(sk
)) ? IP_REPLY_ARG_NOSRCCHECK
: 0;
684 /* When socket is gone, all binding information is lost.
685 * routing might fail in this case. No choice here, if we choose to force
686 * input interface, we will misroute in case of asymmetric route.
689 arg
.bound_dev_if
= sk
->sk_bound_dev_if
;
691 BUILD_BUG_ON(offsetof(struct sock
, sk_bound_dev_if
) !=
692 offsetof(struct inet_timewait_sock
, tw_bound_dev_if
));
694 arg
.tos
= ip_hdr(skb
)->tos
;
695 arg
.uid
= sock_net_uid(net
, sk
&& sk_fullsock(sk
) ? sk
: NULL
);
697 ip_send_unicast_reply(*this_cpu_ptr(net
->ipv4
.tcp_sk
),
698 skb
, &TCP_SKB_CB(skb
)->header
.h4
.opt
,
699 ip_hdr(skb
)->saddr
, ip_hdr(skb
)->daddr
,
700 &arg
, arg
.iov
[0].iov_len
);
702 __TCP_INC_STATS(net
, TCP_MIB_OUTSEGS
);
703 __TCP_INC_STATS(net
, TCP_MIB_OUTRSTS
);
706 #ifdef CONFIG_TCP_MD5SIG
712 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
713 outside socket context is ugly, certainly. What can I do?
716 static void tcp_v4_send_ack(const struct sock
*sk
,
717 struct sk_buff
*skb
, u32 seq
, u32 ack
,
718 u32 win
, u32 tsval
, u32 tsecr
, int oif
,
719 struct tcp_md5sig_key
*key
,
720 int reply_flags
, u8 tos
)
722 const struct tcphdr
*th
= tcp_hdr(skb
);
725 __be32 opt
[(TCPOLEN_TSTAMP_ALIGNED
>> 2)
726 #ifdef CONFIG_TCP_MD5SIG
727 + (TCPOLEN_MD5SIG_ALIGNED
>> 2)
731 struct net
*net
= sock_net(sk
);
732 struct ip_reply_arg arg
;
734 memset(&rep
.th
, 0, sizeof(struct tcphdr
));
735 memset(&arg
, 0, sizeof(arg
));
737 arg
.iov
[0].iov_base
= (unsigned char *)&rep
;
738 arg
.iov
[0].iov_len
= sizeof(rep
.th
);
740 rep
.opt
[0] = htonl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16) |
741 (TCPOPT_TIMESTAMP
<< 8) |
743 rep
.opt
[1] = htonl(tsval
);
744 rep
.opt
[2] = htonl(tsecr
);
745 arg
.iov
[0].iov_len
+= TCPOLEN_TSTAMP_ALIGNED
;
748 /* Swap the send and the receive. */
749 rep
.th
.dest
= th
->source
;
750 rep
.th
.source
= th
->dest
;
751 rep
.th
.doff
= arg
.iov
[0].iov_len
/ 4;
752 rep
.th
.seq
= htonl(seq
);
753 rep
.th
.ack_seq
= htonl(ack
);
755 rep
.th
.window
= htons(win
);
757 #ifdef CONFIG_TCP_MD5SIG
759 int offset
= (tsecr
) ? 3 : 0;
761 rep
.opt
[offset
++] = htonl((TCPOPT_NOP
<< 24) |
763 (TCPOPT_MD5SIG
<< 8) |
765 arg
.iov
[0].iov_len
+= TCPOLEN_MD5SIG_ALIGNED
;
766 rep
.th
.doff
= arg
.iov
[0].iov_len
/4;
768 tcp_v4_md5_hash_hdr((__u8
*) &rep
.opt
[offset
],
769 key
, ip_hdr(skb
)->saddr
,
770 ip_hdr(skb
)->daddr
, &rep
.th
);
773 arg
.flags
= reply_flags
;
774 arg
.csum
= csum_tcpudp_nofold(ip_hdr(skb
)->daddr
,
775 ip_hdr(skb
)->saddr
, /* XXX */
776 arg
.iov
[0].iov_len
, IPPROTO_TCP
, 0);
777 arg
.csumoffset
= offsetof(struct tcphdr
, check
) / 2;
779 arg
.bound_dev_if
= oif
;
781 arg
.uid
= sock_net_uid(net
, sk_fullsock(sk
) ? sk
: NULL
);
783 ip_send_unicast_reply(*this_cpu_ptr(net
->ipv4
.tcp_sk
),
784 skb
, &TCP_SKB_CB(skb
)->header
.h4
.opt
,
785 ip_hdr(skb
)->saddr
, ip_hdr(skb
)->daddr
,
786 &arg
, arg
.iov
[0].iov_len
);
788 __TCP_INC_STATS(net
, TCP_MIB_OUTSEGS
);
792 static void tcp_v4_timewait_ack(struct sock
*sk
, struct sk_buff
*skb
)
794 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
795 struct tcp_timewait_sock
*tcptw
= tcp_twsk(sk
);
797 tcp_v4_send_ack(sk
, skb
,
798 tcptw
->tw_snd_nxt
, tcptw
->tw_rcv_nxt
,
799 tcptw
->tw_rcv_wnd
>> tw
->tw_rcv_wscale
,
800 tcp_time_stamp
+ tcptw
->tw_ts_offset
,
803 tcp_twsk_md5_key(tcptw
),
804 tw
->tw_transparent
? IP_REPLY_ARG_NOSRCCHECK
: 0,
811 static void tcp_v4_reqsk_send_ack(const struct sock
*sk
, struct sk_buff
*skb
,
812 struct request_sock
*req
)
814 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
815 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
817 u32 seq
= (sk
->sk_state
== TCP_LISTEN
) ? tcp_rsk(req
)->snt_isn
+ 1 :
821 * The window field (SEG.WND) of every outgoing segment, with the
822 * exception of <SYN> segments, MUST be right-shifted by
823 * Rcv.Wind.Shift bits:
825 tcp_v4_send_ack(sk
, skb
, seq
,
826 tcp_rsk(req
)->rcv_nxt
,
827 req
->rsk_rcv_wnd
>> inet_rsk(req
)->rcv_wscale
,
828 tcp_time_stamp
+ tcp_rsk(req
)->ts_off
,
831 tcp_md5_do_lookup(sk
, (union tcp_md5_addr
*)&ip_hdr(skb
)->daddr
,
833 inet_rsk(req
)->no_srccheck
? IP_REPLY_ARG_NOSRCCHECK
: 0,
838 * Send a SYN-ACK after having received a SYN.
839 * This still operates on a request_sock only, not on a big
842 static int tcp_v4_send_synack(const struct sock
*sk
, struct dst_entry
*dst
,
844 struct request_sock
*req
,
845 struct tcp_fastopen_cookie
*foc
,
846 enum tcp_synack_type synack_type
)
848 const struct inet_request_sock
*ireq
= inet_rsk(req
);
853 /* First, grab a route. */
854 if (!dst
&& (dst
= inet_csk_route_req(sk
, &fl4
, req
)) == NULL
)
857 skb
= tcp_make_synack(sk
, dst
, req
, foc
, synack_type
);
860 __tcp_v4_send_check(skb
, ireq
->ir_loc_addr
, ireq
->ir_rmt_addr
);
862 err
= ip_build_and_send_pkt(skb
, sk
, ireq
->ir_loc_addr
,
865 err
= net_xmit_eval(err
);
872 * IPv4 request_sock destructor.
874 static void tcp_v4_reqsk_destructor(struct request_sock
*req
)
876 kfree(inet_rsk(req
)->opt
);
879 #ifdef CONFIG_TCP_MD5SIG
881 * RFC2385 MD5 checksumming requires a mapping of
882 * IP address->MD5 Key.
883 * We need to maintain these in the sk structure.
886 /* Find the Key structure for an address. */
887 struct tcp_md5sig_key
*tcp_md5_do_lookup(const struct sock
*sk
,
888 const union tcp_md5_addr
*addr
,
891 const struct tcp_sock
*tp
= tcp_sk(sk
);
892 struct tcp_md5sig_key
*key
;
893 unsigned int size
= sizeof(struct in_addr
);
894 const struct tcp_md5sig_info
*md5sig
;
896 /* caller either holds rcu_read_lock() or socket lock */
897 md5sig
= rcu_dereference_check(tp
->md5sig_info
,
898 lockdep_sock_is_held(sk
));
901 #if IS_ENABLED(CONFIG_IPV6)
902 if (family
== AF_INET6
)
903 size
= sizeof(struct in6_addr
);
905 hlist_for_each_entry_rcu(key
, &md5sig
->head
, node
) {
906 if (key
->family
!= family
)
908 if (!memcmp(&key
->addr
, addr
, size
))
913 EXPORT_SYMBOL(tcp_md5_do_lookup
);
915 struct tcp_md5sig_key
*tcp_v4_md5_lookup(const struct sock
*sk
,
916 const struct sock
*addr_sk
)
918 const union tcp_md5_addr
*addr
;
920 addr
= (const union tcp_md5_addr
*)&addr_sk
->sk_daddr
;
921 return tcp_md5_do_lookup(sk
, addr
, AF_INET
);
923 EXPORT_SYMBOL(tcp_v4_md5_lookup
);
925 /* This can be called on a newly created socket, from other files */
926 int tcp_md5_do_add(struct sock
*sk
, const union tcp_md5_addr
*addr
,
927 int family
, const u8
*newkey
, u8 newkeylen
, gfp_t gfp
)
929 /* Add Key to the list */
930 struct tcp_md5sig_key
*key
;
931 struct tcp_sock
*tp
= tcp_sk(sk
);
932 struct tcp_md5sig_info
*md5sig
;
934 key
= tcp_md5_do_lookup(sk
, addr
, family
);
936 /* Pre-existing entry - just update that one. */
937 memcpy(key
->key
, newkey
, newkeylen
);
938 key
->keylen
= newkeylen
;
942 md5sig
= rcu_dereference_protected(tp
->md5sig_info
,
943 lockdep_sock_is_held(sk
));
945 md5sig
= kmalloc(sizeof(*md5sig
), gfp
);
949 sk_nocaps_add(sk
, NETIF_F_GSO_MASK
);
950 INIT_HLIST_HEAD(&md5sig
->head
);
951 rcu_assign_pointer(tp
->md5sig_info
, md5sig
);
954 key
= sock_kmalloc(sk
, sizeof(*key
), gfp
);
957 if (!tcp_alloc_md5sig_pool()) {
958 sock_kfree_s(sk
, key
, sizeof(*key
));
962 memcpy(key
->key
, newkey
, newkeylen
);
963 key
->keylen
= newkeylen
;
964 key
->family
= family
;
965 memcpy(&key
->addr
, addr
,
966 (family
== AF_INET6
) ? sizeof(struct in6_addr
) :
967 sizeof(struct in_addr
));
968 hlist_add_head_rcu(&key
->node
, &md5sig
->head
);
971 EXPORT_SYMBOL(tcp_md5_do_add
);
973 int tcp_md5_do_del(struct sock
*sk
, const union tcp_md5_addr
*addr
, int family
)
975 struct tcp_md5sig_key
*key
;
977 key
= tcp_md5_do_lookup(sk
, addr
, family
);
980 hlist_del_rcu(&key
->node
);
981 atomic_sub(sizeof(*key
), &sk
->sk_omem_alloc
);
985 EXPORT_SYMBOL(tcp_md5_do_del
);
987 static void tcp_clear_md5_list(struct sock
*sk
)
989 struct tcp_sock
*tp
= tcp_sk(sk
);
990 struct tcp_md5sig_key
*key
;
991 struct hlist_node
*n
;
992 struct tcp_md5sig_info
*md5sig
;
994 md5sig
= rcu_dereference_protected(tp
->md5sig_info
, 1);
996 hlist_for_each_entry_safe(key
, n
, &md5sig
->head
, node
) {
997 hlist_del_rcu(&key
->node
);
998 atomic_sub(sizeof(*key
), &sk
->sk_omem_alloc
);
1003 static int tcp_v4_parse_md5_keys(struct sock
*sk
, char __user
*optval
,
1006 struct tcp_md5sig cmd
;
1007 struct sockaddr_in
*sin
= (struct sockaddr_in
*)&cmd
.tcpm_addr
;
1009 if (optlen
< sizeof(cmd
))
1012 if (copy_from_user(&cmd
, optval
, sizeof(cmd
)))
1015 if (sin
->sin_family
!= AF_INET
)
1018 if (!cmd
.tcpm_keylen
)
1019 return tcp_md5_do_del(sk
, (union tcp_md5_addr
*)&sin
->sin_addr
.s_addr
,
1022 if (cmd
.tcpm_keylen
> TCP_MD5SIG_MAXKEYLEN
)
1025 return tcp_md5_do_add(sk
, (union tcp_md5_addr
*)&sin
->sin_addr
.s_addr
,
1026 AF_INET
, cmd
.tcpm_key
, cmd
.tcpm_keylen
,
1030 static int tcp_v4_md5_hash_headers(struct tcp_md5sig_pool
*hp
,
1031 __be32 daddr
, __be32 saddr
,
1032 const struct tcphdr
*th
, int nbytes
)
1034 struct tcp4_pseudohdr
*bp
;
1035 struct scatterlist sg
;
1042 bp
->protocol
= IPPROTO_TCP
;
1043 bp
->len
= cpu_to_be16(nbytes
);
1045 _th
= (struct tcphdr
*)(bp
+ 1);
1046 memcpy(_th
, th
, sizeof(*th
));
1049 sg_init_one(&sg
, bp
, sizeof(*bp
) + sizeof(*th
));
1050 ahash_request_set_crypt(hp
->md5_req
, &sg
, NULL
,
1051 sizeof(*bp
) + sizeof(*th
));
1052 return crypto_ahash_update(hp
->md5_req
);
1055 static int tcp_v4_md5_hash_hdr(char *md5_hash
, const struct tcp_md5sig_key
*key
,
1056 __be32 daddr
, __be32 saddr
, const struct tcphdr
*th
)
1058 struct tcp_md5sig_pool
*hp
;
1059 struct ahash_request
*req
;
1061 hp
= tcp_get_md5sig_pool();
1063 goto clear_hash_noput
;
1066 if (crypto_ahash_init(req
))
1068 if (tcp_v4_md5_hash_headers(hp
, daddr
, saddr
, th
, th
->doff
<< 2))
1070 if (tcp_md5_hash_key(hp
, key
))
1072 ahash_request_set_crypt(req
, NULL
, md5_hash
, 0);
1073 if (crypto_ahash_final(req
))
1076 tcp_put_md5sig_pool();
1080 tcp_put_md5sig_pool();
1082 memset(md5_hash
, 0, 16);
1086 int tcp_v4_md5_hash_skb(char *md5_hash
, const struct tcp_md5sig_key
*key
,
1087 const struct sock
*sk
,
1088 const struct sk_buff
*skb
)
1090 struct tcp_md5sig_pool
*hp
;
1091 struct ahash_request
*req
;
1092 const struct tcphdr
*th
= tcp_hdr(skb
);
1093 __be32 saddr
, daddr
;
1095 if (sk
) { /* valid for establish/request sockets */
1096 saddr
= sk
->sk_rcv_saddr
;
1097 daddr
= sk
->sk_daddr
;
1099 const struct iphdr
*iph
= ip_hdr(skb
);
1104 hp
= tcp_get_md5sig_pool();
1106 goto clear_hash_noput
;
1109 if (crypto_ahash_init(req
))
1112 if (tcp_v4_md5_hash_headers(hp
, daddr
, saddr
, th
, skb
->len
))
1114 if (tcp_md5_hash_skb_data(hp
, skb
, th
->doff
<< 2))
1116 if (tcp_md5_hash_key(hp
, key
))
1118 ahash_request_set_crypt(req
, NULL
, md5_hash
, 0);
1119 if (crypto_ahash_final(req
))
1122 tcp_put_md5sig_pool();
1126 tcp_put_md5sig_pool();
1128 memset(md5_hash
, 0, 16);
1131 EXPORT_SYMBOL(tcp_v4_md5_hash_skb
);
1135 /* Called with rcu_read_lock() */
1136 static bool tcp_v4_inbound_md5_hash(const struct sock
*sk
,
1137 const struct sk_buff
*skb
)
1139 #ifdef CONFIG_TCP_MD5SIG
1141 * This gets called for each TCP segment that arrives
1142 * so we want to be efficient.
1143 * We have 3 drop cases:
1144 * o No MD5 hash and one expected.
1145 * o MD5 hash and we're not expecting one.
1146 * o MD5 hash and its wrong.
1148 const __u8
*hash_location
= NULL
;
1149 struct tcp_md5sig_key
*hash_expected
;
1150 const struct iphdr
*iph
= ip_hdr(skb
);
1151 const struct tcphdr
*th
= tcp_hdr(skb
);
1153 unsigned char newhash
[16];
1155 hash_expected
= tcp_md5_do_lookup(sk
, (union tcp_md5_addr
*)&iph
->saddr
,
1157 hash_location
= tcp_parse_md5sig_option(th
);
1159 /* We've parsed the options - do we have a hash? */
1160 if (!hash_expected
&& !hash_location
)
1163 if (hash_expected
&& !hash_location
) {
1164 NET_INC_STATS(sock_net(sk
), LINUX_MIB_TCPMD5NOTFOUND
);
1168 if (!hash_expected
&& hash_location
) {
1169 NET_INC_STATS(sock_net(sk
), LINUX_MIB_TCPMD5UNEXPECTED
);
1173 /* Okay, so this is hash_expected and hash_location -
1174 * so we need to calculate the checksum.
1176 genhash
= tcp_v4_md5_hash_skb(newhash
,
1180 if (genhash
|| memcmp(hash_location
, newhash
, 16) != 0) {
1181 NET_INC_STATS(sock_net(sk
), LINUX_MIB_TCPMD5FAILURE
);
1182 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1183 &iph
->saddr
, ntohs(th
->source
),
1184 &iph
->daddr
, ntohs(th
->dest
),
1185 genhash
? " tcp_v4_calc_md5_hash failed"
1194 static void tcp_v4_init_req(struct request_sock
*req
,
1195 const struct sock
*sk_listener
,
1196 struct sk_buff
*skb
)
1198 struct inet_request_sock
*ireq
= inet_rsk(req
);
1200 sk_rcv_saddr_set(req_to_sk(req
), ip_hdr(skb
)->daddr
);
1201 sk_daddr_set(req_to_sk(req
), ip_hdr(skb
)->saddr
);
1202 ireq
->opt
= tcp_v4_save_options(skb
);
1205 static struct dst_entry
*tcp_v4_route_req(const struct sock
*sk
,
1207 const struct request_sock
*req
,
1210 struct dst_entry
*dst
= inet_csk_route_req(sk
, &fl
->u
.ip4
, req
);
1213 if (fl
->u
.ip4
.daddr
== inet_rsk(req
)->ir_rmt_addr
)
1222 struct request_sock_ops tcp_request_sock_ops __read_mostly
= {
1224 .obj_size
= sizeof(struct tcp_request_sock
),
1225 .rtx_syn_ack
= tcp_rtx_synack
,
1226 .send_ack
= tcp_v4_reqsk_send_ack
,
1227 .destructor
= tcp_v4_reqsk_destructor
,
1228 .send_reset
= tcp_v4_send_reset
,
1229 .syn_ack_timeout
= tcp_syn_ack_timeout
,
1232 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops
= {
1233 .mss_clamp
= TCP_MSS_DEFAULT
,
1234 #ifdef CONFIG_TCP_MD5SIG
1235 .req_md5_lookup
= tcp_v4_md5_lookup
,
1236 .calc_md5_hash
= tcp_v4_md5_hash_skb
,
1238 .init_req
= tcp_v4_init_req
,
1239 #ifdef CONFIG_SYN_COOKIES
1240 .cookie_init_seq
= cookie_v4_init_sequence
,
1242 .route_req
= tcp_v4_route_req
,
1243 .init_seq
= tcp_v4_init_sequence
,
1244 .send_synack
= tcp_v4_send_synack
,
1247 int tcp_v4_conn_request(struct sock
*sk
, struct sk_buff
*skb
)
1249 /* Never answer to SYNs send to broadcast or multicast */
1250 if (skb_rtable(skb
)->rt_flags
& (RTCF_BROADCAST
| RTCF_MULTICAST
))
1253 return tcp_conn_request(&tcp_request_sock_ops
,
1254 &tcp_request_sock_ipv4_ops
, sk
, skb
);
1260 EXPORT_SYMBOL(tcp_v4_conn_request
);
1264 * The three way handshake has completed - we got a valid synack -
1265 * now create the new socket.
1267 struct sock
*tcp_v4_syn_recv_sock(const struct sock
*sk
, struct sk_buff
*skb
,
1268 struct request_sock
*req
,
1269 struct dst_entry
*dst
,
1270 struct request_sock
*req_unhash
,
1273 struct inet_request_sock
*ireq
;
1274 struct inet_sock
*newinet
;
1275 struct tcp_sock
*newtp
;
1277 #ifdef CONFIG_TCP_MD5SIG
1278 struct tcp_md5sig_key
*key
;
1280 struct ip_options_rcu
*inet_opt
;
1282 if (sk_acceptq_is_full(sk
))
1285 newsk
= tcp_create_openreq_child(sk
, req
, skb
);
1289 newsk
->sk_gso_type
= SKB_GSO_TCPV4
;
1290 inet_sk_rx_dst_set(newsk
, skb
);
1292 newtp
= tcp_sk(newsk
);
1293 newinet
= inet_sk(newsk
);
1294 ireq
= inet_rsk(req
);
1295 sk_daddr_set(newsk
, ireq
->ir_rmt_addr
);
1296 sk_rcv_saddr_set(newsk
, ireq
->ir_loc_addr
);
1297 newsk
->sk_bound_dev_if
= ireq
->ir_iif
;
1298 newinet
->inet_saddr
= ireq
->ir_loc_addr
;
1299 inet_opt
= ireq
->opt
;
1300 rcu_assign_pointer(newinet
->inet_opt
, inet_opt
);
1302 newinet
->mc_index
= inet_iif(skb
);
1303 newinet
->mc_ttl
= ip_hdr(skb
)->ttl
;
1304 newinet
->rcv_tos
= ip_hdr(skb
)->tos
;
1305 inet_csk(newsk
)->icsk_ext_hdr_len
= 0;
1307 inet_csk(newsk
)->icsk_ext_hdr_len
= inet_opt
->opt
.optlen
;
1308 newinet
->inet_id
= newtp
->write_seq
^ jiffies
;
1311 dst
= inet_csk_route_child_sock(sk
, newsk
, req
);
1315 /* syncookie case : see end of cookie_v4_check() */
1317 sk_setup_caps(newsk
, dst
);
1319 tcp_ca_openreq_child(newsk
, dst
);
1321 tcp_sync_mss(newsk
, dst_mtu(dst
));
1322 newtp
->advmss
= dst_metric_advmss(dst
);
1323 if (tcp_sk(sk
)->rx_opt
.user_mss
&&
1324 tcp_sk(sk
)->rx_opt
.user_mss
< newtp
->advmss
)
1325 newtp
->advmss
= tcp_sk(sk
)->rx_opt
.user_mss
;
1327 tcp_initialize_rcv_mss(newsk
);
1329 #ifdef CONFIG_TCP_MD5SIG
1330 /* Copy over the MD5 key from the original socket */
1331 key
= tcp_md5_do_lookup(sk
, (union tcp_md5_addr
*)&newinet
->inet_daddr
,
1335 * We're using one, so create a matching key
1336 * on the newsk structure. If we fail to get
1337 * memory, then we end up not copying the key
1340 tcp_md5_do_add(newsk
, (union tcp_md5_addr
*)&newinet
->inet_daddr
,
1341 AF_INET
, key
->key
, key
->keylen
, GFP_ATOMIC
);
1342 sk_nocaps_add(newsk
, NETIF_F_GSO_MASK
);
1346 if (__inet_inherit_port(sk
, newsk
) < 0)
1348 *own_req
= inet_ehash_nolisten(newsk
, req_to_sk(req_unhash
));
1350 tcp_move_syn(newtp
, req
);
1355 NET_INC_STATS(sock_net(sk
), LINUX_MIB_LISTENOVERFLOWS
);
1362 inet_csk_prepare_forced_close(newsk
);
1366 EXPORT_SYMBOL(tcp_v4_syn_recv_sock
);
1368 static struct sock
*tcp_v4_cookie_check(struct sock
*sk
, struct sk_buff
*skb
)
1370 #ifdef CONFIG_SYN_COOKIES
1371 const struct tcphdr
*th
= tcp_hdr(skb
);
1374 sk
= cookie_v4_check(sk
, skb
);
1379 /* The socket must have it's spinlock held when we get
1380 * here, unless it is a TCP_LISTEN socket.
1382 * We have a potential double-lock case here, so even when
1383 * doing backlog processing we use the BH locking scheme.
1384 * This is because we cannot sleep with the original spinlock
1387 int tcp_v4_do_rcv(struct sock
*sk
, struct sk_buff
*skb
)
1391 if (sk
->sk_state
== TCP_ESTABLISHED
) { /* Fast path */
1392 struct dst_entry
*dst
= sk
->sk_rx_dst
;
1394 sock_rps_save_rxhash(sk
, skb
);
1395 sk_mark_napi_id(sk
, skb
);
1397 if (inet_sk(sk
)->rx_dst_ifindex
!= skb
->skb_iif
||
1398 !dst
->ops
->check(dst
, 0)) {
1400 sk
->sk_rx_dst
= NULL
;
1403 tcp_rcv_established(sk
, skb
, tcp_hdr(skb
), skb
->len
);
1407 if (tcp_checksum_complete(skb
))
1410 if (sk
->sk_state
== TCP_LISTEN
) {
1411 struct sock
*nsk
= tcp_v4_cookie_check(sk
, skb
);
1416 sock_rps_save_rxhash(nsk
, skb
);
1417 sk_mark_napi_id(nsk
, skb
);
1418 if (tcp_child_process(sk
, nsk
, skb
)) {
1425 sock_rps_save_rxhash(sk
, skb
);
1427 if (tcp_rcv_state_process(sk
, skb
)) {
1434 tcp_v4_send_reset(rsk
, skb
);
1437 /* Be careful here. If this function gets more complicated and
1438 * gcc suffers from register pressure on the x86, sk (in %ebx)
1439 * might be destroyed here. This current version compiles correctly,
1440 * but you have been warned.
1445 TCP_INC_STATS(sock_net(sk
), TCP_MIB_CSUMERRORS
);
1446 TCP_INC_STATS(sock_net(sk
), TCP_MIB_INERRS
);
1449 EXPORT_SYMBOL(tcp_v4_do_rcv
);
1451 void tcp_v4_early_demux(struct sk_buff
*skb
)
1453 const struct iphdr
*iph
;
1454 const struct tcphdr
*th
;
1457 if (skb
->pkt_type
!= PACKET_HOST
)
1460 if (!pskb_may_pull(skb
, skb_transport_offset(skb
) + sizeof(struct tcphdr
)))
1466 if (th
->doff
< sizeof(struct tcphdr
) / 4)
1469 sk
= __inet_lookup_established(dev_net(skb
->dev
), &tcp_hashinfo
,
1470 iph
->saddr
, th
->source
,
1471 iph
->daddr
, ntohs(th
->dest
),
1475 skb
->destructor
= sock_edemux
;
1476 if (sk_fullsock(sk
)) {
1477 struct dst_entry
*dst
= READ_ONCE(sk
->sk_rx_dst
);
1480 dst
= dst_check(dst
, 0);
1482 inet_sk(sk
)->rx_dst_ifindex
== skb
->skb_iif
)
1483 skb_dst_set_noref(skb
, dst
);
1488 /* Packet is added to VJ-style prequeue for processing in process
1489 * context, if a reader task is waiting. Apparently, this exciting
1490 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1491 * failed somewhere. Latency? Burstiness? Well, at least now we will
1492 * see, why it failed. 8)8) --ANK
1495 bool tcp_prequeue(struct sock
*sk
, struct sk_buff
*skb
)
1497 struct tcp_sock
*tp
= tcp_sk(sk
);
1499 if (sysctl_tcp_low_latency
|| !tp
->ucopy
.task
)
1502 if (skb
->len
<= tcp_hdrlen(skb
) &&
1503 skb_queue_len(&tp
->ucopy
.prequeue
) == 0)
1506 /* Before escaping RCU protected region, we need to take care of skb
1507 * dst. Prequeue is only enabled for established sockets.
1508 * For such sockets, we might need the skb dst only to set sk->sk_rx_dst
1509 * Instead of doing full sk_rx_dst validity here, let's perform
1510 * an optimistic check.
1512 if (likely(sk
->sk_rx_dst
))
1515 skb_dst_force_safe(skb
);
1517 __skb_queue_tail(&tp
->ucopy
.prequeue
, skb
);
1518 tp
->ucopy
.memory
+= skb
->truesize
;
1519 if (skb_queue_len(&tp
->ucopy
.prequeue
) >= 32 ||
1520 tp
->ucopy
.memory
+ atomic_read(&sk
->sk_rmem_alloc
) > sk
->sk_rcvbuf
) {
1521 struct sk_buff
*skb1
;
1523 BUG_ON(sock_owned_by_user(sk
));
1524 __NET_ADD_STATS(sock_net(sk
), LINUX_MIB_TCPPREQUEUEDROPPED
,
1525 skb_queue_len(&tp
->ucopy
.prequeue
));
1527 while ((skb1
= __skb_dequeue(&tp
->ucopy
.prequeue
)) != NULL
)
1528 sk_backlog_rcv(sk
, skb1
);
1530 tp
->ucopy
.memory
= 0;
1531 } else if (skb_queue_len(&tp
->ucopy
.prequeue
) == 1) {
1532 wake_up_interruptible_sync_poll(sk_sleep(sk
),
1533 POLLIN
| POLLRDNORM
| POLLRDBAND
);
1534 if (!inet_csk_ack_scheduled(sk
))
1535 inet_csk_reset_xmit_timer(sk
, ICSK_TIME_DACK
,
1536 (3 * tcp_rto_min(sk
)) / 4,
1541 EXPORT_SYMBOL(tcp_prequeue
);
1543 bool tcp_add_backlog(struct sock
*sk
, struct sk_buff
*skb
)
1545 u32 limit
= sk
->sk_rcvbuf
+ sk
->sk_sndbuf
;
1547 /* Only socket owner can try to collapse/prune rx queues
1548 * to reduce memory overhead, so add a little headroom here.
1549 * Few sockets backlog are possibly concurrently non empty.
1553 /* In case all data was pulled from skb frags (in __pskb_pull_tail()),
1554 * we can fix skb->truesize to its real value to avoid future drops.
1555 * This is valid because skb is not yet charged to the socket.
1556 * It has been noticed pure SACK packets were sometimes dropped
1557 * (if cooked by drivers without copybreak feature).
1560 skb
->truesize
= SKB_TRUESIZE(skb_end_offset(skb
));
1562 if (unlikely(sk_add_backlog(sk
, skb
, limit
))) {
1564 __NET_INC_STATS(sock_net(sk
), LINUX_MIB_TCPBACKLOGDROP
);
1569 EXPORT_SYMBOL(tcp_add_backlog
);
1571 int tcp_filter(struct sock
*sk
, struct sk_buff
*skb
)
1573 struct tcphdr
*th
= (struct tcphdr
*)skb
->data
;
1574 unsigned int eaten
= skb
->len
;
1577 err
= sk_filter_trim_cap(sk
, skb
, th
->doff
* 4);
1580 TCP_SKB_CB(skb
)->end_seq
-= eaten
;
1584 EXPORT_SYMBOL(tcp_filter
);
1590 int tcp_v4_rcv(struct sk_buff
*skb
)
1592 struct net
*net
= dev_net(skb
->dev
);
1593 const struct iphdr
*iph
;
1594 const struct tcphdr
*th
;
1599 if (skb
->pkt_type
!= PACKET_HOST
)
1602 /* Count it even if it's bad */
1603 __TCP_INC_STATS(net
, TCP_MIB_INSEGS
);
1605 if (!pskb_may_pull(skb
, sizeof(struct tcphdr
)))
1608 th
= (const struct tcphdr
*)skb
->data
;
1610 if (unlikely(th
->doff
< sizeof(struct tcphdr
) / 4))
1612 if (!pskb_may_pull(skb
, th
->doff
* 4))
1615 /* An explanation is required here, I think.
1616 * Packet length and doff are validated by header prediction,
1617 * provided case of th->doff==0 is eliminated.
1618 * So, we defer the checks. */
1620 if (skb_checksum_init(skb
, IPPROTO_TCP
, inet_compute_pseudo
))
1623 th
= (const struct tcphdr
*)skb
->data
;
1625 /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1626 * barrier() makes sure compiler wont play fool^Waliasing games.
1628 memmove(&TCP_SKB_CB(skb
)->header
.h4
, IPCB(skb
),
1629 sizeof(struct inet_skb_parm
));
1632 TCP_SKB_CB(skb
)->seq
= ntohl(th
->seq
);
1633 TCP_SKB_CB(skb
)->end_seq
= (TCP_SKB_CB(skb
)->seq
+ th
->syn
+ th
->fin
+
1634 skb
->len
- th
->doff
* 4);
1635 TCP_SKB_CB(skb
)->ack_seq
= ntohl(th
->ack_seq
);
1636 TCP_SKB_CB(skb
)->tcp_flags
= tcp_flag_byte(th
);
1637 TCP_SKB_CB(skb
)->tcp_tw_isn
= 0;
1638 TCP_SKB_CB(skb
)->ip_dsfield
= ipv4_get_dsfield(iph
);
1639 TCP_SKB_CB(skb
)->sacked
= 0;
1642 sk
= __inet_lookup_skb(&tcp_hashinfo
, skb
, __tcp_hdrlen(th
), th
->source
,
1643 th
->dest
, &refcounted
);
1648 if (sk
->sk_state
== TCP_TIME_WAIT
)
1651 if (sk
->sk_state
== TCP_NEW_SYN_RECV
) {
1652 struct request_sock
*req
= inet_reqsk(sk
);
1655 sk
= req
->rsk_listener
;
1656 if (unlikely(tcp_v4_inbound_md5_hash(sk
, skb
))) {
1657 sk_drops_add(sk
, skb
);
1661 if (unlikely(sk
->sk_state
!= TCP_LISTEN
)) {
1662 inet_csk_reqsk_queue_drop_and_put(sk
, req
);
1665 /* We own a reference on the listener, increase it again
1666 * as we might lose it too soon.
1670 nsk
= tcp_check_req(sk
, skb
, req
, false);
1673 goto discard_and_relse
;
1677 } else if (tcp_child_process(sk
, nsk
, skb
)) {
1678 tcp_v4_send_reset(nsk
, skb
);
1679 goto discard_and_relse
;
1685 if (unlikely(iph
->ttl
< inet_sk(sk
)->min_ttl
)) {
1686 __NET_INC_STATS(net
, LINUX_MIB_TCPMINTTLDROP
);
1687 goto discard_and_relse
;
1690 if (!xfrm4_policy_check(sk
, XFRM_POLICY_IN
, skb
))
1691 goto discard_and_relse
;
1693 if (tcp_v4_inbound_md5_hash(sk
, skb
))
1694 goto discard_and_relse
;
1698 if (tcp_filter(sk
, skb
))
1699 goto discard_and_relse
;
1700 th
= (const struct tcphdr
*)skb
->data
;
1705 if (sk
->sk_state
== TCP_LISTEN
) {
1706 ret
= tcp_v4_do_rcv(sk
, skb
);
1707 goto put_and_return
;
1710 sk_incoming_cpu_update(sk
);
1712 bh_lock_sock_nested(sk
);
1713 tcp_segs_in(tcp_sk(sk
), skb
);
1715 if (!sock_owned_by_user(sk
)) {
1716 if (!tcp_prequeue(sk
, skb
))
1717 ret
= tcp_v4_do_rcv(sk
, skb
);
1718 } else if (tcp_add_backlog(sk
, skb
)) {
1719 goto discard_and_relse
;
1730 if (!xfrm4_policy_check(NULL
, XFRM_POLICY_IN
, skb
))
1733 if (tcp_checksum_complete(skb
)) {
1735 __TCP_INC_STATS(net
, TCP_MIB_CSUMERRORS
);
1737 __TCP_INC_STATS(net
, TCP_MIB_INERRS
);
1739 tcp_v4_send_reset(NULL
, skb
);
1743 /* Discard frame. */
1748 sk_drops_add(sk
, skb
);
1754 if (!xfrm4_policy_check(NULL
, XFRM_POLICY_IN
, skb
)) {
1755 inet_twsk_put(inet_twsk(sk
));
1759 if (tcp_checksum_complete(skb
)) {
1760 inet_twsk_put(inet_twsk(sk
));
1763 switch (tcp_timewait_state_process(inet_twsk(sk
), skb
, th
)) {
1765 struct sock
*sk2
= inet_lookup_listener(dev_net(skb
->dev
),
1768 iph
->saddr
, th
->source
,
1769 iph
->daddr
, th
->dest
,
1772 inet_twsk_deschedule_put(inet_twsk(sk
));
1777 /* Fall through to ACK */
1780 tcp_v4_timewait_ack(sk
, skb
);
1783 tcp_v4_send_reset(sk
, skb
);
1784 inet_twsk_deschedule_put(inet_twsk(sk
));
1786 case TCP_TW_SUCCESS
:;
1791 static struct timewait_sock_ops tcp_timewait_sock_ops
= {
1792 .twsk_obj_size
= sizeof(struct tcp_timewait_sock
),
1793 .twsk_unique
= tcp_twsk_unique
,
1794 .twsk_destructor
= tcp_twsk_destructor
,
1797 void inet_sk_rx_dst_set(struct sock
*sk
, const struct sk_buff
*skb
)
1799 struct dst_entry
*dst
= skb_dst(skb
);
1801 if (dst
&& dst_hold_safe(dst
)) {
1802 sk
->sk_rx_dst
= dst
;
1803 inet_sk(sk
)->rx_dst_ifindex
= skb
->skb_iif
;
1806 EXPORT_SYMBOL(inet_sk_rx_dst_set
);
1808 const struct inet_connection_sock_af_ops ipv4_specific
= {
1809 .queue_xmit
= ip_queue_xmit
,
1810 .send_check
= tcp_v4_send_check
,
1811 .rebuild_header
= inet_sk_rebuild_header
,
1812 .sk_rx_dst_set
= inet_sk_rx_dst_set
,
1813 .conn_request
= tcp_v4_conn_request
,
1814 .syn_recv_sock
= tcp_v4_syn_recv_sock
,
1815 .net_header_len
= sizeof(struct iphdr
),
1816 .setsockopt
= ip_setsockopt
,
1817 .getsockopt
= ip_getsockopt
,
1818 .addr2sockaddr
= inet_csk_addr2sockaddr
,
1819 .sockaddr_len
= sizeof(struct sockaddr_in
),
1820 #ifdef CONFIG_COMPAT
1821 .compat_setsockopt
= compat_ip_setsockopt
,
1822 .compat_getsockopt
= compat_ip_getsockopt
,
1824 .mtu_reduced
= tcp_v4_mtu_reduced
,
1826 EXPORT_SYMBOL(ipv4_specific
);
1828 #ifdef CONFIG_TCP_MD5SIG
1829 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific
= {
1830 .md5_lookup
= tcp_v4_md5_lookup
,
1831 .calc_md5_hash
= tcp_v4_md5_hash_skb
,
1832 .md5_parse
= tcp_v4_parse_md5_keys
,
1836 /* NOTE: A lot of things set to zero explicitly by call to
1837 * sk_alloc() so need not be done here.
1839 static int tcp_v4_init_sock(struct sock
*sk
)
1841 struct inet_connection_sock
*icsk
= inet_csk(sk
);
1845 icsk
->icsk_af_ops
= &ipv4_specific
;
1847 #ifdef CONFIG_TCP_MD5SIG
1848 tcp_sk(sk
)->af_specific
= &tcp_sock_ipv4_specific
;
1854 void tcp_v4_destroy_sock(struct sock
*sk
)
1856 struct tcp_sock
*tp
= tcp_sk(sk
);
1858 tcp_clear_xmit_timers(sk
);
1860 tcp_cleanup_congestion_control(sk
);
1862 /* Cleanup up the write buffer. */
1863 tcp_write_queue_purge(sk
);
1865 /* Cleans up our, hopefully empty, out_of_order_queue. */
1866 skb_rbtree_purge(&tp
->out_of_order_queue
);
1868 #ifdef CONFIG_TCP_MD5SIG
1869 /* Clean up the MD5 key list, if any */
1870 if (tp
->md5sig_info
) {
1871 tcp_clear_md5_list(sk
);
1872 kfree_rcu(tp
->md5sig_info
, rcu
);
1873 tp
->md5sig_info
= NULL
;
1877 /* Clean prequeue, it must be empty really */
1878 __skb_queue_purge(&tp
->ucopy
.prequeue
);
1880 /* Clean up a referenced TCP bind bucket. */
1881 if (inet_csk(sk
)->icsk_bind_hash
)
1884 BUG_ON(tp
->fastopen_rsk
);
1886 /* If socket is aborted during connect operation */
1887 tcp_free_fastopen_req(tp
);
1888 tcp_saved_syn_free(tp
);
1890 sk_sockets_allocated_dec(sk
);
1892 EXPORT_SYMBOL(tcp_v4_destroy_sock
);
1894 #ifdef CONFIG_PROC_FS
1895 /* Proc filesystem TCP sock list dumping. */
1898 * Get next listener socket follow cur. If cur is NULL, get first socket
1899 * starting from bucket given in st->bucket; when st->bucket is zero the
1900 * very first socket in the hash table is returned.
1902 static void *listening_get_next(struct seq_file
*seq
, void *cur
)
1904 struct tcp_iter_state
*st
= seq
->private;
1905 struct net
*net
= seq_file_net(seq
);
1906 struct inet_listen_hashbucket
*ilb
;
1907 struct sock
*sk
= cur
;
1911 ilb
= &tcp_hashinfo
.listening_hash
[st
->bucket
];
1912 spin_lock(&ilb
->lock
);
1913 sk
= sk_head(&ilb
->head
);
1917 ilb
= &tcp_hashinfo
.listening_hash
[st
->bucket
];
1923 sk_for_each_from(sk
) {
1924 if (!net_eq(sock_net(sk
), net
))
1926 if (sk
->sk_family
== st
->family
)
1929 spin_unlock(&ilb
->lock
);
1931 if (++st
->bucket
< INET_LHTABLE_SIZE
)
1936 static void *listening_get_idx(struct seq_file
*seq
, loff_t
*pos
)
1938 struct tcp_iter_state
*st
= seq
->private;
1943 rc
= listening_get_next(seq
, NULL
);
1945 while (rc
&& *pos
) {
1946 rc
= listening_get_next(seq
, rc
);
1952 static inline bool empty_bucket(const struct tcp_iter_state
*st
)
1954 return hlist_nulls_empty(&tcp_hashinfo
.ehash
[st
->bucket
].chain
);
1958 * Get first established socket starting from bucket given in st->bucket.
1959 * If st->bucket is zero, the very first socket in the hash is returned.
1961 static void *established_get_first(struct seq_file
*seq
)
1963 struct tcp_iter_state
*st
= seq
->private;
1964 struct net
*net
= seq_file_net(seq
);
1968 for (; st
->bucket
<= tcp_hashinfo
.ehash_mask
; ++st
->bucket
) {
1970 struct hlist_nulls_node
*node
;
1971 spinlock_t
*lock
= inet_ehash_lockp(&tcp_hashinfo
, st
->bucket
);
1973 /* Lockless fast path for the common case of empty buckets */
1974 if (empty_bucket(st
))
1978 sk_nulls_for_each(sk
, node
, &tcp_hashinfo
.ehash
[st
->bucket
].chain
) {
1979 if (sk
->sk_family
!= st
->family
||
1980 !net_eq(sock_net(sk
), net
)) {
1986 spin_unlock_bh(lock
);
1992 static void *established_get_next(struct seq_file
*seq
, void *cur
)
1994 struct sock
*sk
= cur
;
1995 struct hlist_nulls_node
*node
;
1996 struct tcp_iter_state
*st
= seq
->private;
1997 struct net
*net
= seq_file_net(seq
);
2002 sk
= sk_nulls_next(sk
);
2004 sk_nulls_for_each_from(sk
, node
) {
2005 if (sk
->sk_family
== st
->family
&& net_eq(sock_net(sk
), net
))
2009 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo
, st
->bucket
));
2011 return established_get_first(seq
);
2014 static void *established_get_idx(struct seq_file
*seq
, loff_t pos
)
2016 struct tcp_iter_state
*st
= seq
->private;
2020 rc
= established_get_first(seq
);
2023 rc
= established_get_next(seq
, rc
);
2029 static void *tcp_get_idx(struct seq_file
*seq
, loff_t pos
)
2032 struct tcp_iter_state
*st
= seq
->private;
2034 st
->state
= TCP_SEQ_STATE_LISTENING
;
2035 rc
= listening_get_idx(seq
, &pos
);
2038 st
->state
= TCP_SEQ_STATE_ESTABLISHED
;
2039 rc
= established_get_idx(seq
, pos
);
2045 static void *tcp_seek_last_pos(struct seq_file
*seq
)
2047 struct tcp_iter_state
*st
= seq
->private;
2048 int offset
= st
->offset
;
2049 int orig_num
= st
->num
;
2052 switch (st
->state
) {
2053 case TCP_SEQ_STATE_LISTENING
:
2054 if (st
->bucket
>= INET_LHTABLE_SIZE
)
2056 st
->state
= TCP_SEQ_STATE_LISTENING
;
2057 rc
= listening_get_next(seq
, NULL
);
2058 while (offset
-- && rc
)
2059 rc
= listening_get_next(seq
, rc
);
2063 st
->state
= TCP_SEQ_STATE_ESTABLISHED
;
2065 case TCP_SEQ_STATE_ESTABLISHED
:
2066 if (st
->bucket
> tcp_hashinfo
.ehash_mask
)
2068 rc
= established_get_first(seq
);
2069 while (offset
-- && rc
)
2070 rc
= established_get_next(seq
, rc
);
2078 static void *tcp_seq_start(struct seq_file
*seq
, loff_t
*pos
)
2080 struct tcp_iter_state
*st
= seq
->private;
2083 if (*pos
&& *pos
== st
->last_pos
) {
2084 rc
= tcp_seek_last_pos(seq
);
2089 st
->state
= TCP_SEQ_STATE_LISTENING
;
2093 rc
= *pos
? tcp_get_idx(seq
, *pos
- 1) : SEQ_START_TOKEN
;
2096 st
->last_pos
= *pos
;
2100 static void *tcp_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
2102 struct tcp_iter_state
*st
= seq
->private;
2105 if (v
== SEQ_START_TOKEN
) {
2106 rc
= tcp_get_idx(seq
, 0);
2110 switch (st
->state
) {
2111 case TCP_SEQ_STATE_LISTENING
:
2112 rc
= listening_get_next(seq
, v
);
2114 st
->state
= TCP_SEQ_STATE_ESTABLISHED
;
2117 rc
= established_get_first(seq
);
2120 case TCP_SEQ_STATE_ESTABLISHED
:
2121 rc
= established_get_next(seq
, v
);
2126 st
->last_pos
= *pos
;
2130 static void tcp_seq_stop(struct seq_file
*seq
, void *v
)
2132 struct tcp_iter_state
*st
= seq
->private;
2134 switch (st
->state
) {
2135 case TCP_SEQ_STATE_LISTENING
:
2136 if (v
!= SEQ_START_TOKEN
)
2137 spin_unlock(&tcp_hashinfo
.listening_hash
[st
->bucket
].lock
);
2139 case TCP_SEQ_STATE_ESTABLISHED
:
2141 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo
, st
->bucket
));
2146 int tcp_seq_open(struct inode
*inode
, struct file
*file
)
2148 struct tcp_seq_afinfo
*afinfo
= PDE_DATA(inode
);
2149 struct tcp_iter_state
*s
;
2152 err
= seq_open_net(inode
, file
, &afinfo
->seq_ops
,
2153 sizeof(struct tcp_iter_state
));
2157 s
= ((struct seq_file
*)file
->private_data
)->private;
2158 s
->family
= afinfo
->family
;
2162 EXPORT_SYMBOL(tcp_seq_open
);
2164 int tcp_proc_register(struct net
*net
, struct tcp_seq_afinfo
*afinfo
)
2167 struct proc_dir_entry
*p
;
2169 afinfo
->seq_ops
.start
= tcp_seq_start
;
2170 afinfo
->seq_ops
.next
= tcp_seq_next
;
2171 afinfo
->seq_ops
.stop
= tcp_seq_stop
;
2173 p
= proc_create_data(afinfo
->name
, S_IRUGO
, net
->proc_net
,
2174 afinfo
->seq_fops
, afinfo
);
2179 EXPORT_SYMBOL(tcp_proc_register
);
2181 void tcp_proc_unregister(struct net
*net
, struct tcp_seq_afinfo
*afinfo
)
2183 remove_proc_entry(afinfo
->name
, net
->proc_net
);
2185 EXPORT_SYMBOL(tcp_proc_unregister
);
2187 static void get_openreq4(const struct request_sock
*req
,
2188 struct seq_file
*f
, int i
)
2190 const struct inet_request_sock
*ireq
= inet_rsk(req
);
2191 long delta
= req
->rsk_timer
.expires
- jiffies
;
2193 seq_printf(f
, "%4d: %08X:%04X %08X:%04X"
2194 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
2199 ntohs(ireq
->ir_rmt_port
),
2201 0, 0, /* could print option size, but that is af dependent. */
2202 1, /* timers active (only the expire timer) */
2203 jiffies_delta_to_clock_t(delta
),
2205 from_kuid_munged(seq_user_ns(f
),
2206 sock_i_uid(req
->rsk_listener
)),
2207 0, /* non standard timer */
2208 0, /* open_requests have no inode */
2213 static void get_tcp4_sock(struct sock
*sk
, struct seq_file
*f
, int i
)
2216 unsigned long timer_expires
;
2217 const struct tcp_sock
*tp
= tcp_sk(sk
);
2218 const struct inet_connection_sock
*icsk
= inet_csk(sk
);
2219 const struct inet_sock
*inet
= inet_sk(sk
);
2220 const struct fastopen_queue
*fastopenq
= &icsk
->icsk_accept_queue
.fastopenq
;
2221 __be32 dest
= inet
->inet_daddr
;
2222 __be32 src
= inet
->inet_rcv_saddr
;
2223 __u16 destp
= ntohs(inet
->inet_dport
);
2224 __u16 srcp
= ntohs(inet
->inet_sport
);
2228 if (icsk
->icsk_pending
== ICSK_TIME_RETRANS
||
2229 icsk
->icsk_pending
== ICSK_TIME_REO_TIMEOUT
||
2230 icsk
->icsk_pending
== ICSK_TIME_LOSS_PROBE
) {
2232 timer_expires
= icsk
->icsk_timeout
;
2233 } else if (icsk
->icsk_pending
== ICSK_TIME_PROBE0
) {
2235 timer_expires
= icsk
->icsk_timeout
;
2236 } else if (timer_pending(&sk
->sk_timer
)) {
2238 timer_expires
= sk
->sk_timer
.expires
;
2241 timer_expires
= jiffies
;
2244 state
= sk_state_load(sk
);
2245 if (state
== TCP_LISTEN
)
2246 rx_queue
= sk
->sk_ack_backlog
;
2248 /* Because we don't lock the socket,
2249 * we might find a transient negative value.
2251 rx_queue
= max_t(int, tp
->rcv_nxt
- tp
->copied_seq
, 0);
2253 seq_printf(f
, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2254 "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2255 i
, src
, srcp
, dest
, destp
, state
,
2256 tp
->write_seq
- tp
->snd_una
,
2259 jiffies_delta_to_clock_t(timer_expires
- jiffies
),
2260 icsk
->icsk_retransmits
,
2261 from_kuid_munged(seq_user_ns(f
), sock_i_uid(sk
)),
2262 icsk
->icsk_probes_out
,
2264 atomic_read(&sk
->sk_refcnt
), sk
,
2265 jiffies_to_clock_t(icsk
->icsk_rto
),
2266 jiffies_to_clock_t(icsk
->icsk_ack
.ato
),
2267 (icsk
->icsk_ack
.quick
<< 1) | icsk
->icsk_ack
.pingpong
,
2269 state
== TCP_LISTEN
?
2270 fastopenq
->max_qlen
:
2271 (tcp_in_initial_slowstart(tp
) ? -1 : tp
->snd_ssthresh
));
2274 static void get_timewait4_sock(const struct inet_timewait_sock
*tw
,
2275 struct seq_file
*f
, int i
)
2277 long delta
= tw
->tw_timer
.expires
- jiffies
;
2281 dest
= tw
->tw_daddr
;
2282 src
= tw
->tw_rcv_saddr
;
2283 destp
= ntohs(tw
->tw_dport
);
2284 srcp
= ntohs(tw
->tw_sport
);
2286 seq_printf(f
, "%4d: %08X:%04X %08X:%04X"
2287 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
2288 i
, src
, srcp
, dest
, destp
, tw
->tw_substate
, 0, 0,
2289 3, jiffies_delta_to_clock_t(delta
), 0, 0, 0, 0,
2290 atomic_read(&tw
->tw_refcnt
), tw
);
2295 static int tcp4_seq_show(struct seq_file
*seq
, void *v
)
2297 struct tcp_iter_state
*st
;
2298 struct sock
*sk
= v
;
2300 seq_setwidth(seq
, TMPSZ
- 1);
2301 if (v
== SEQ_START_TOKEN
) {
2302 seq_puts(seq
, " sl local_address rem_address st tx_queue "
2303 "rx_queue tr tm->when retrnsmt uid timeout "
2309 if (sk
->sk_state
== TCP_TIME_WAIT
)
2310 get_timewait4_sock(v
, seq
, st
->num
);
2311 else if (sk
->sk_state
== TCP_NEW_SYN_RECV
)
2312 get_openreq4(v
, seq
, st
->num
);
2314 get_tcp4_sock(v
, seq
, st
->num
);
2320 static const struct file_operations tcp_afinfo_seq_fops
= {
2321 .owner
= THIS_MODULE
,
2322 .open
= tcp_seq_open
,
2324 .llseek
= seq_lseek
,
2325 .release
= seq_release_net
2328 static struct tcp_seq_afinfo tcp4_seq_afinfo
= {
2331 .seq_fops
= &tcp_afinfo_seq_fops
,
2333 .show
= tcp4_seq_show
,
2337 static int __net_init
tcp4_proc_init_net(struct net
*net
)
2339 return tcp_proc_register(net
, &tcp4_seq_afinfo
);
2342 static void __net_exit
tcp4_proc_exit_net(struct net
*net
)
2344 tcp_proc_unregister(net
, &tcp4_seq_afinfo
);
2347 static struct pernet_operations tcp4_net_ops
= {
2348 .init
= tcp4_proc_init_net
,
2349 .exit
= tcp4_proc_exit_net
,
2352 int __init
tcp4_proc_init(void)
2354 return register_pernet_subsys(&tcp4_net_ops
);
2357 void tcp4_proc_exit(void)
2359 unregister_pernet_subsys(&tcp4_net_ops
);
2361 #endif /* CONFIG_PROC_FS */
2363 struct proto tcp_prot
= {
2365 .owner
= THIS_MODULE
,
2367 .connect
= tcp_v4_connect
,
2368 .disconnect
= tcp_disconnect
,
2369 .accept
= inet_csk_accept
,
2371 .init
= tcp_v4_init_sock
,
2372 .destroy
= tcp_v4_destroy_sock
,
2373 .shutdown
= tcp_shutdown
,
2374 .setsockopt
= tcp_setsockopt
,
2375 .getsockopt
= tcp_getsockopt
,
2376 .keepalive
= tcp_set_keepalive
,
2377 .recvmsg
= tcp_recvmsg
,
2378 .sendmsg
= tcp_sendmsg
,
2379 .sendpage
= tcp_sendpage
,
2380 .backlog_rcv
= tcp_v4_do_rcv
,
2381 .release_cb
= tcp_release_cb
,
2383 .unhash
= inet_unhash
,
2384 .get_port
= inet_csk_get_port
,
2385 .enter_memory_pressure
= tcp_enter_memory_pressure
,
2386 .stream_memory_free
= tcp_stream_memory_free
,
2387 .sockets_allocated
= &tcp_sockets_allocated
,
2388 .orphan_count
= &tcp_orphan_count
,
2389 .memory_allocated
= &tcp_memory_allocated
,
2390 .memory_pressure
= &tcp_memory_pressure
,
2391 .sysctl_mem
= sysctl_tcp_mem
,
2392 .sysctl_wmem
= sysctl_tcp_wmem
,
2393 .sysctl_rmem
= sysctl_tcp_rmem
,
2394 .max_header
= MAX_TCP_HEADER
,
2395 .obj_size
= sizeof(struct tcp_sock
),
2396 .slab_flags
= SLAB_DESTROY_BY_RCU
,
2397 .twsk_prot
= &tcp_timewait_sock_ops
,
2398 .rsk_prot
= &tcp_request_sock_ops
,
2399 .h
.hashinfo
= &tcp_hashinfo
,
2400 .no_autobind
= true,
2401 #ifdef CONFIG_COMPAT
2402 .compat_setsockopt
= compat_tcp_setsockopt
,
2403 .compat_getsockopt
= compat_tcp_getsockopt
,
2405 .diag_destroy
= tcp_abort
,
2407 EXPORT_SYMBOL(tcp_prot
);
2409 static void __net_exit
tcp_sk_exit(struct net
*net
)
2413 for_each_possible_cpu(cpu
)
2414 inet_ctl_sock_destroy(*per_cpu_ptr(net
->ipv4
.tcp_sk
, cpu
));
2415 free_percpu(net
->ipv4
.tcp_sk
);
2418 static int __net_init
tcp_sk_init(struct net
*net
)
2422 net
->ipv4
.tcp_sk
= alloc_percpu(struct sock
*);
2423 if (!net
->ipv4
.tcp_sk
)
2426 for_each_possible_cpu(cpu
) {
2429 res
= inet_ctl_sock_create(&sk
, PF_INET
, SOCK_RAW
,
2433 sock_set_flag(sk
, SOCK_USE_WRITE_QUEUE
);
2434 *per_cpu_ptr(net
->ipv4
.tcp_sk
, cpu
) = sk
;
2437 net
->ipv4
.sysctl_tcp_ecn
= 2;
2438 net
->ipv4
.sysctl_tcp_ecn_fallback
= 1;
2440 net
->ipv4
.sysctl_tcp_base_mss
= TCP_BASE_MSS
;
2441 net
->ipv4
.sysctl_tcp_probe_threshold
= TCP_PROBE_THRESHOLD
;
2442 net
->ipv4
.sysctl_tcp_probe_interval
= TCP_PROBE_INTERVAL
;
2444 net
->ipv4
.sysctl_tcp_keepalive_time
= TCP_KEEPALIVE_TIME
;
2445 net
->ipv4
.sysctl_tcp_keepalive_probes
= TCP_KEEPALIVE_PROBES
;
2446 net
->ipv4
.sysctl_tcp_keepalive_intvl
= TCP_KEEPALIVE_INTVL
;
2448 net
->ipv4
.sysctl_tcp_syn_retries
= TCP_SYN_RETRIES
;
2449 net
->ipv4
.sysctl_tcp_synack_retries
= TCP_SYNACK_RETRIES
;
2450 net
->ipv4
.sysctl_tcp_syncookies
= 1;
2451 net
->ipv4
.sysctl_tcp_reordering
= TCP_FASTRETRANS_THRESH
;
2452 net
->ipv4
.sysctl_tcp_retries1
= TCP_RETR1
;
2453 net
->ipv4
.sysctl_tcp_retries2
= TCP_RETR2
;
2454 net
->ipv4
.sysctl_tcp_orphan_retries
= 0;
2455 net
->ipv4
.sysctl_tcp_fin_timeout
= TCP_FIN_TIMEOUT
;
2456 net
->ipv4
.sysctl_tcp_notsent_lowat
= UINT_MAX
;
2457 net
->ipv4
.sysctl_tcp_tw_reuse
= 0;
2459 cnt
= tcp_hashinfo
.ehash_mask
+ 1;
2460 net
->ipv4
.tcp_death_row
.sysctl_tw_recycle
= 0;
2461 net
->ipv4
.tcp_death_row
.sysctl_max_tw_buckets
= (cnt
+ 1) / 2;
2462 net
->ipv4
.tcp_death_row
.hashinfo
= &tcp_hashinfo
;
2464 net
->ipv4
.sysctl_max_syn_backlog
= max(128, cnt
/ 256);
2473 static void __net_exit
tcp_sk_exit_batch(struct list_head
*net_exit_list
)
2475 inet_twsk_purge(&tcp_hashinfo
, AF_INET
);
2478 static struct pernet_operations __net_initdata tcp_sk_ops
= {
2479 .init
= tcp_sk_init
,
2480 .exit
= tcp_sk_exit
,
2481 .exit_batch
= tcp_sk_exit_batch
,
2484 void __init
tcp_v4_init(void)
2486 if (register_pernet_subsys(&tcp_sk_ops
))
2487 panic("Failed to create the TCP control socket.\n");