netfilter: nf_nat: don't bug when mapping already exists
[linux-2.6/btrfs-unstable.git] / net / ipv4 / tcp_ipv4.c
bloba63486afa7a7e7b4dce88b65bc27cfa872a3ba2f
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * IPv4 specific functions
11 * code split from:
12 * linux/ipv4/tcp.c
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
16 * See tcp.c for author information
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
25 * Changes:
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
34 * ACK bit.
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
37 * request_sock handling and moved
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
40 * Added new listen semantics.
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
45 * coma.
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
53 #define pr_fmt(fmt) "TCP: " fmt
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
66 #include <net/net_namespace.h>
67 #include <net/icmp.h>
68 #include <net/inet_hashtables.h>
69 #include <net/tcp.h>
70 #include <net/transp_v6.h>
71 #include <net/ipv6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
74 #include <net/xfrm.h>
75 #include <net/secure_seq.h>
76 #include <net/busy_poll.h>
78 #include <linux/inet.h>
79 #include <linux/ipv6.h>
80 #include <linux/stddef.h>
81 #include <linux/proc_fs.h>
82 #include <linux/seq_file.h>
83 #include <linux/inetdevice.h>
85 #include <crypto/hash.h>
86 #include <linux/scatterlist.h>
88 #ifdef CONFIG_TCP_MD5SIG
89 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
90 __be32 daddr, __be32 saddr, const struct tcphdr *th);
91 #endif
93 struct inet_hashinfo tcp_hashinfo;
94 EXPORT_SYMBOL(tcp_hashinfo);
96 static u32 tcp_v4_init_seq(const struct sk_buff *skb)
98 return secure_tcp_seq(ip_hdr(skb)->daddr,
99 ip_hdr(skb)->saddr,
100 tcp_hdr(skb)->dest,
101 tcp_hdr(skb)->source);
104 static u32 tcp_v4_init_ts_off(const struct net *net, const struct sk_buff *skb)
106 return secure_tcp_ts_off(net, ip_hdr(skb)->daddr, ip_hdr(skb)->saddr);
109 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
111 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
112 struct tcp_sock *tp = tcp_sk(sk);
114 /* With PAWS, it is safe from the viewpoint
115 of data integrity. Even without PAWS it is safe provided sequence
116 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
118 Actually, the idea is close to VJ's one, only timestamp cache is
119 held not per host, but per port pair and TW bucket is used as state
120 holder.
122 If TW bucket has been already destroyed we fall back to VJ's scheme
123 and use initial timestamp retrieved from peer table.
125 if (tcptw->tw_ts_recent_stamp &&
126 (!twp || (sock_net(sk)->ipv4.sysctl_tcp_tw_reuse &&
127 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
128 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
129 if (tp->write_seq == 0)
130 tp->write_seq = 1;
131 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
132 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
133 sock_hold(sktw);
134 return 1;
137 return 0;
139 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
141 /* This will initiate an outgoing connection. */
142 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
144 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
145 struct inet_sock *inet = inet_sk(sk);
146 struct tcp_sock *tp = tcp_sk(sk);
147 __be16 orig_sport, orig_dport;
148 __be32 daddr, nexthop;
149 struct flowi4 *fl4;
150 struct rtable *rt;
151 int err;
152 struct ip_options_rcu *inet_opt;
153 struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
155 if (addr_len < sizeof(struct sockaddr_in))
156 return -EINVAL;
158 if (usin->sin_family != AF_INET)
159 return -EAFNOSUPPORT;
161 nexthop = daddr = usin->sin_addr.s_addr;
162 inet_opt = rcu_dereference_protected(inet->inet_opt,
163 lockdep_sock_is_held(sk));
164 if (inet_opt && inet_opt->opt.srr) {
165 if (!daddr)
166 return -EINVAL;
167 nexthop = inet_opt->opt.faddr;
170 orig_sport = inet->inet_sport;
171 orig_dport = usin->sin_port;
172 fl4 = &inet->cork.fl.u.ip4;
173 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
174 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
175 IPPROTO_TCP,
176 orig_sport, orig_dport, sk);
177 if (IS_ERR(rt)) {
178 err = PTR_ERR(rt);
179 if (err == -ENETUNREACH)
180 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
181 return err;
184 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
185 ip_rt_put(rt);
186 return -ENETUNREACH;
189 if (!inet_opt || !inet_opt->opt.srr)
190 daddr = fl4->daddr;
192 if (!inet->inet_saddr)
193 inet->inet_saddr = fl4->saddr;
194 sk_rcv_saddr_set(sk, inet->inet_saddr);
196 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
197 /* Reset inherited state */
198 tp->rx_opt.ts_recent = 0;
199 tp->rx_opt.ts_recent_stamp = 0;
200 if (likely(!tp->repair))
201 tp->write_seq = 0;
204 inet->inet_dport = usin->sin_port;
205 sk_daddr_set(sk, daddr);
207 inet_csk(sk)->icsk_ext_hdr_len = 0;
208 if (inet_opt)
209 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
211 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
213 /* Socket identity is still unknown (sport may be zero).
214 * However we set state to SYN-SENT and not releasing socket
215 * lock select source port, enter ourselves into the hash tables and
216 * complete initialization after this.
218 tcp_set_state(sk, TCP_SYN_SENT);
219 err = inet_hash_connect(tcp_death_row, sk);
220 if (err)
221 goto failure;
223 sk_set_txhash(sk);
225 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
226 inet->inet_sport, inet->inet_dport, sk);
227 if (IS_ERR(rt)) {
228 err = PTR_ERR(rt);
229 rt = NULL;
230 goto failure;
232 /* OK, now commit destination to socket. */
233 sk->sk_gso_type = SKB_GSO_TCPV4;
234 sk_setup_caps(sk, &rt->dst);
235 rt = NULL;
237 if (likely(!tp->repair)) {
238 if (!tp->write_seq)
239 tp->write_seq = secure_tcp_seq(inet->inet_saddr,
240 inet->inet_daddr,
241 inet->inet_sport,
242 usin->sin_port);
243 tp->tsoffset = secure_tcp_ts_off(sock_net(sk),
244 inet->inet_saddr,
245 inet->inet_daddr);
248 inet->inet_id = tp->write_seq ^ jiffies;
250 if (tcp_fastopen_defer_connect(sk, &err))
251 return err;
252 if (err)
253 goto failure;
255 err = tcp_connect(sk);
257 if (err)
258 goto failure;
260 return 0;
262 failure:
264 * This unhashes the socket and releases the local port,
265 * if necessary.
267 tcp_set_state(sk, TCP_CLOSE);
268 ip_rt_put(rt);
269 sk->sk_route_caps = 0;
270 inet->inet_dport = 0;
271 return err;
273 EXPORT_SYMBOL(tcp_v4_connect);
276 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
277 * It can be called through tcp_release_cb() if socket was owned by user
278 * at the time tcp_v4_err() was called to handle ICMP message.
280 void tcp_v4_mtu_reduced(struct sock *sk)
282 struct inet_sock *inet = inet_sk(sk);
283 struct dst_entry *dst;
284 u32 mtu;
286 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
287 return;
288 mtu = tcp_sk(sk)->mtu_info;
289 dst = inet_csk_update_pmtu(sk, mtu);
290 if (!dst)
291 return;
293 /* Something is about to be wrong... Remember soft error
294 * for the case, if this connection will not able to recover.
296 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
297 sk->sk_err_soft = EMSGSIZE;
299 mtu = dst_mtu(dst);
301 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
302 ip_sk_accept_pmtu(sk) &&
303 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
304 tcp_sync_mss(sk, mtu);
306 /* Resend the TCP packet because it's
307 * clear that the old packet has been
308 * dropped. This is the new "fast" path mtu
309 * discovery.
311 tcp_simple_retransmit(sk);
312 } /* else let the usual retransmit timer handle it */
314 EXPORT_SYMBOL(tcp_v4_mtu_reduced);
316 static void do_redirect(struct sk_buff *skb, struct sock *sk)
318 struct dst_entry *dst = __sk_dst_check(sk, 0);
320 if (dst)
321 dst->ops->redirect(dst, sk, skb);
325 /* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
326 void tcp_req_err(struct sock *sk, u32 seq, bool abort)
328 struct request_sock *req = inet_reqsk(sk);
329 struct net *net = sock_net(sk);
331 /* ICMPs are not backlogged, hence we cannot get
332 * an established socket here.
334 if (seq != tcp_rsk(req)->snt_isn) {
335 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
336 } else if (abort) {
338 * Still in SYN_RECV, just remove it silently.
339 * There is no good way to pass the error to the newly
340 * created socket, and POSIX does not want network
341 * errors returned from accept().
343 inet_csk_reqsk_queue_drop(req->rsk_listener, req);
344 tcp_listendrop(req->rsk_listener);
346 reqsk_put(req);
348 EXPORT_SYMBOL(tcp_req_err);
351 * This routine is called by the ICMP module when it gets some
352 * sort of error condition. If err < 0 then the socket should
353 * be closed and the error returned to the user. If err > 0
354 * it's just the icmp type << 8 | icmp code. After adjustment
355 * header points to the first 8 bytes of the tcp header. We need
356 * to find the appropriate port.
358 * The locking strategy used here is very "optimistic". When
359 * someone else accesses the socket the ICMP is just dropped
360 * and for some paths there is no check at all.
361 * A more general error queue to queue errors for later handling
362 * is probably better.
366 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
368 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
369 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
370 struct inet_connection_sock *icsk;
371 struct tcp_sock *tp;
372 struct inet_sock *inet;
373 const int type = icmp_hdr(icmp_skb)->type;
374 const int code = icmp_hdr(icmp_skb)->code;
375 struct sock *sk;
376 struct sk_buff *skb;
377 struct request_sock *fastopen;
378 u32 seq, snd_una;
379 s32 remaining;
380 u32 delta_us;
381 int err;
382 struct net *net = dev_net(icmp_skb->dev);
384 sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr,
385 th->dest, iph->saddr, ntohs(th->source),
386 inet_iif(icmp_skb), 0);
387 if (!sk) {
388 __ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
389 return;
391 if (sk->sk_state == TCP_TIME_WAIT) {
392 inet_twsk_put(inet_twsk(sk));
393 return;
395 seq = ntohl(th->seq);
396 if (sk->sk_state == TCP_NEW_SYN_RECV)
397 return tcp_req_err(sk, seq,
398 type == ICMP_PARAMETERPROB ||
399 type == ICMP_TIME_EXCEEDED ||
400 (type == ICMP_DEST_UNREACH &&
401 (code == ICMP_NET_UNREACH ||
402 code == ICMP_HOST_UNREACH)));
404 bh_lock_sock(sk);
405 /* If too many ICMPs get dropped on busy
406 * servers this needs to be solved differently.
407 * We do take care of PMTU discovery (RFC1191) special case :
408 * we can receive locally generated ICMP messages while socket is held.
410 if (sock_owned_by_user(sk)) {
411 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
412 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
414 if (sk->sk_state == TCP_CLOSE)
415 goto out;
417 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
418 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
419 goto out;
422 icsk = inet_csk(sk);
423 tp = tcp_sk(sk);
424 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
425 fastopen = tp->fastopen_rsk;
426 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
427 if (sk->sk_state != TCP_LISTEN &&
428 !between(seq, snd_una, tp->snd_nxt)) {
429 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
430 goto out;
433 switch (type) {
434 case ICMP_REDIRECT:
435 if (!sock_owned_by_user(sk))
436 do_redirect(icmp_skb, sk);
437 goto out;
438 case ICMP_SOURCE_QUENCH:
439 /* Just silently ignore these. */
440 goto out;
441 case ICMP_PARAMETERPROB:
442 err = EPROTO;
443 break;
444 case ICMP_DEST_UNREACH:
445 if (code > NR_ICMP_UNREACH)
446 goto out;
448 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
449 /* We are not interested in TCP_LISTEN and open_requests
450 * (SYN-ACKs send out by Linux are always <576bytes so
451 * they should go through unfragmented).
453 if (sk->sk_state == TCP_LISTEN)
454 goto out;
456 tp->mtu_info = info;
457 if (!sock_owned_by_user(sk)) {
458 tcp_v4_mtu_reduced(sk);
459 } else {
460 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &sk->sk_tsq_flags))
461 sock_hold(sk);
463 goto out;
466 err = icmp_err_convert[code].errno;
467 /* check if icmp_skb allows revert of backoff
468 * (see draft-zimmermann-tcp-lcd) */
469 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
470 break;
471 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
472 !icsk->icsk_backoff || fastopen)
473 break;
475 if (sock_owned_by_user(sk))
476 break;
478 icsk->icsk_backoff--;
479 icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
480 TCP_TIMEOUT_INIT;
481 icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
483 skb = tcp_write_queue_head(sk);
484 BUG_ON(!skb);
486 tcp_mstamp_refresh(tp);
487 delta_us = (u32)(tp->tcp_mstamp - skb->skb_mstamp);
488 remaining = icsk->icsk_rto -
489 usecs_to_jiffies(delta_us);
491 if (remaining > 0) {
492 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
493 remaining, TCP_RTO_MAX);
494 } else {
495 /* RTO revert clocked out retransmission.
496 * Will retransmit now */
497 tcp_retransmit_timer(sk);
500 break;
501 case ICMP_TIME_EXCEEDED:
502 err = EHOSTUNREACH;
503 break;
504 default:
505 goto out;
508 switch (sk->sk_state) {
509 case TCP_SYN_SENT:
510 case TCP_SYN_RECV:
511 /* Only in fast or simultaneous open. If a fast open socket is
512 * is already accepted it is treated as a connected one below.
514 if (fastopen && !fastopen->sk)
515 break;
517 if (!sock_owned_by_user(sk)) {
518 sk->sk_err = err;
520 sk->sk_error_report(sk);
522 tcp_done(sk);
523 } else {
524 sk->sk_err_soft = err;
526 goto out;
529 /* If we've already connected we will keep trying
530 * until we time out, or the user gives up.
532 * rfc1122 4.2.3.9 allows to consider as hard errors
533 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
534 * but it is obsoleted by pmtu discovery).
536 * Note, that in modern internet, where routing is unreliable
537 * and in each dark corner broken firewalls sit, sending random
538 * errors ordered by their masters even this two messages finally lose
539 * their original sense (even Linux sends invalid PORT_UNREACHs)
541 * Now we are in compliance with RFCs.
542 * --ANK (980905)
545 inet = inet_sk(sk);
546 if (!sock_owned_by_user(sk) && inet->recverr) {
547 sk->sk_err = err;
548 sk->sk_error_report(sk);
549 } else { /* Only an error on timeout */
550 sk->sk_err_soft = err;
553 out:
554 bh_unlock_sock(sk);
555 sock_put(sk);
558 void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
560 struct tcphdr *th = tcp_hdr(skb);
562 if (skb->ip_summed == CHECKSUM_PARTIAL) {
563 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
564 skb->csum_start = skb_transport_header(skb) - skb->head;
565 skb->csum_offset = offsetof(struct tcphdr, check);
566 } else {
567 th->check = tcp_v4_check(skb->len, saddr, daddr,
568 csum_partial(th,
569 th->doff << 2,
570 skb->csum));
574 /* This routine computes an IPv4 TCP checksum. */
575 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
577 const struct inet_sock *inet = inet_sk(sk);
579 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
581 EXPORT_SYMBOL(tcp_v4_send_check);
584 * This routine will send an RST to the other tcp.
586 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
587 * for reset.
588 * Answer: if a packet caused RST, it is not for a socket
589 * existing in our system, if it is matched to a socket,
590 * it is just duplicate segment or bug in other side's TCP.
591 * So that we build reply only basing on parameters
592 * arrived with segment.
593 * Exception: precedence violation. We do not implement it in any case.
596 static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
598 const struct tcphdr *th = tcp_hdr(skb);
599 struct {
600 struct tcphdr th;
601 #ifdef CONFIG_TCP_MD5SIG
602 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
603 #endif
604 } rep;
605 struct ip_reply_arg arg;
606 #ifdef CONFIG_TCP_MD5SIG
607 struct tcp_md5sig_key *key = NULL;
608 const __u8 *hash_location = NULL;
609 unsigned char newhash[16];
610 int genhash;
611 struct sock *sk1 = NULL;
612 #endif
613 struct net *net;
615 /* Never send a reset in response to a reset. */
616 if (th->rst)
617 return;
619 /* If sk not NULL, it means we did a successful lookup and incoming
620 * route had to be correct. prequeue might have dropped our dst.
622 if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
623 return;
625 /* Swap the send and the receive. */
626 memset(&rep, 0, sizeof(rep));
627 rep.th.dest = th->source;
628 rep.th.source = th->dest;
629 rep.th.doff = sizeof(struct tcphdr) / 4;
630 rep.th.rst = 1;
632 if (th->ack) {
633 rep.th.seq = th->ack_seq;
634 } else {
635 rep.th.ack = 1;
636 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
637 skb->len - (th->doff << 2));
640 memset(&arg, 0, sizeof(arg));
641 arg.iov[0].iov_base = (unsigned char *)&rep;
642 arg.iov[0].iov_len = sizeof(rep.th);
644 net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
645 #ifdef CONFIG_TCP_MD5SIG
646 rcu_read_lock();
647 hash_location = tcp_parse_md5sig_option(th);
648 if (sk && sk_fullsock(sk)) {
649 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
650 &ip_hdr(skb)->saddr, AF_INET);
651 } else if (hash_location) {
653 * active side is lost. Try to find listening socket through
654 * source port, and then find md5 key through listening socket.
655 * we are not loose security here:
656 * Incoming packet is checked with md5 hash with finding key,
657 * no RST generated if md5 hash doesn't match.
659 sk1 = __inet_lookup_listener(net, &tcp_hashinfo, NULL, 0,
660 ip_hdr(skb)->saddr,
661 th->source, ip_hdr(skb)->daddr,
662 ntohs(th->source), inet_iif(skb),
663 tcp_v4_sdif(skb));
664 /* don't send rst if it can't find key */
665 if (!sk1)
666 goto out;
668 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
669 &ip_hdr(skb)->saddr, AF_INET);
670 if (!key)
671 goto out;
674 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
675 if (genhash || memcmp(hash_location, newhash, 16) != 0)
676 goto out;
680 if (key) {
681 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
682 (TCPOPT_NOP << 16) |
683 (TCPOPT_MD5SIG << 8) |
684 TCPOLEN_MD5SIG);
685 /* Update length and the length the header thinks exists */
686 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
687 rep.th.doff = arg.iov[0].iov_len / 4;
689 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
690 key, ip_hdr(skb)->saddr,
691 ip_hdr(skb)->daddr, &rep.th);
693 #endif
694 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
695 ip_hdr(skb)->saddr, /* XXX */
696 arg.iov[0].iov_len, IPPROTO_TCP, 0);
697 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
698 arg.flags = (sk && inet_sk_transparent(sk)) ? IP_REPLY_ARG_NOSRCCHECK : 0;
700 /* When socket is gone, all binding information is lost.
701 * routing might fail in this case. No choice here, if we choose to force
702 * input interface, we will misroute in case of asymmetric route.
704 if (sk)
705 arg.bound_dev_if = sk->sk_bound_dev_if;
707 BUILD_BUG_ON(offsetof(struct sock, sk_bound_dev_if) !=
708 offsetof(struct inet_timewait_sock, tw_bound_dev_if));
710 arg.tos = ip_hdr(skb)->tos;
711 arg.uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
712 local_bh_disable();
713 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
714 skb, &TCP_SKB_CB(skb)->header.h4.opt,
715 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
716 &arg, arg.iov[0].iov_len);
718 __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
719 __TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
720 local_bh_enable();
722 #ifdef CONFIG_TCP_MD5SIG
723 out:
724 rcu_read_unlock();
725 #endif
728 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
729 outside socket context is ugly, certainly. What can I do?
732 static void tcp_v4_send_ack(const struct sock *sk,
733 struct sk_buff *skb, u32 seq, u32 ack,
734 u32 win, u32 tsval, u32 tsecr, int oif,
735 struct tcp_md5sig_key *key,
736 int reply_flags, u8 tos)
738 const struct tcphdr *th = tcp_hdr(skb);
739 struct {
740 struct tcphdr th;
741 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
742 #ifdef CONFIG_TCP_MD5SIG
743 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
744 #endif
746 } rep;
747 struct net *net = sock_net(sk);
748 struct ip_reply_arg arg;
750 memset(&rep.th, 0, sizeof(struct tcphdr));
751 memset(&arg, 0, sizeof(arg));
753 arg.iov[0].iov_base = (unsigned char *)&rep;
754 arg.iov[0].iov_len = sizeof(rep.th);
755 if (tsecr) {
756 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
757 (TCPOPT_TIMESTAMP << 8) |
758 TCPOLEN_TIMESTAMP);
759 rep.opt[1] = htonl(tsval);
760 rep.opt[2] = htonl(tsecr);
761 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
764 /* Swap the send and the receive. */
765 rep.th.dest = th->source;
766 rep.th.source = th->dest;
767 rep.th.doff = arg.iov[0].iov_len / 4;
768 rep.th.seq = htonl(seq);
769 rep.th.ack_seq = htonl(ack);
770 rep.th.ack = 1;
771 rep.th.window = htons(win);
773 #ifdef CONFIG_TCP_MD5SIG
774 if (key) {
775 int offset = (tsecr) ? 3 : 0;
777 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
778 (TCPOPT_NOP << 16) |
779 (TCPOPT_MD5SIG << 8) |
780 TCPOLEN_MD5SIG);
781 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
782 rep.th.doff = arg.iov[0].iov_len/4;
784 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
785 key, ip_hdr(skb)->saddr,
786 ip_hdr(skb)->daddr, &rep.th);
788 #endif
789 arg.flags = reply_flags;
790 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
791 ip_hdr(skb)->saddr, /* XXX */
792 arg.iov[0].iov_len, IPPROTO_TCP, 0);
793 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
794 if (oif)
795 arg.bound_dev_if = oif;
796 arg.tos = tos;
797 arg.uid = sock_net_uid(net, sk_fullsock(sk) ? sk : NULL);
798 local_bh_disable();
799 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
800 skb, &TCP_SKB_CB(skb)->header.h4.opt,
801 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
802 &arg, arg.iov[0].iov_len);
804 __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
805 local_bh_enable();
808 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
810 struct inet_timewait_sock *tw = inet_twsk(sk);
811 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
813 tcp_v4_send_ack(sk, skb,
814 tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
815 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
816 tcp_time_stamp_raw() + tcptw->tw_ts_offset,
817 tcptw->tw_ts_recent,
818 tw->tw_bound_dev_if,
819 tcp_twsk_md5_key(tcptw),
820 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
821 tw->tw_tos
824 inet_twsk_put(tw);
827 static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
828 struct request_sock *req)
830 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
831 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
833 u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
834 tcp_sk(sk)->snd_nxt;
836 /* RFC 7323 2.3
837 * The window field (SEG.WND) of every outgoing segment, with the
838 * exception of <SYN> segments, MUST be right-shifted by
839 * Rcv.Wind.Shift bits:
841 tcp_v4_send_ack(sk, skb, seq,
842 tcp_rsk(req)->rcv_nxt,
843 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
844 tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
845 req->ts_recent,
847 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
848 AF_INET),
849 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
850 ip_hdr(skb)->tos);
854 * Send a SYN-ACK after having received a SYN.
855 * This still operates on a request_sock only, not on a big
856 * socket.
858 static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
859 struct flowi *fl,
860 struct request_sock *req,
861 struct tcp_fastopen_cookie *foc,
862 enum tcp_synack_type synack_type)
864 const struct inet_request_sock *ireq = inet_rsk(req);
865 struct flowi4 fl4;
866 int err = -1;
867 struct sk_buff *skb;
869 /* First, grab a route. */
870 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
871 return -1;
873 skb = tcp_make_synack(sk, dst, req, foc, synack_type);
875 if (skb) {
876 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
878 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
879 ireq->ir_rmt_addr,
880 ireq->opt);
881 err = net_xmit_eval(err);
884 return err;
888 * IPv4 request_sock destructor.
890 static void tcp_v4_reqsk_destructor(struct request_sock *req)
892 kfree(inet_rsk(req)->opt);
895 #ifdef CONFIG_TCP_MD5SIG
897 * RFC2385 MD5 checksumming requires a mapping of
898 * IP address->MD5 Key.
899 * We need to maintain these in the sk structure.
902 /* Find the Key structure for an address. */
903 struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
904 const union tcp_md5_addr *addr,
905 int family)
907 const struct tcp_sock *tp = tcp_sk(sk);
908 struct tcp_md5sig_key *key;
909 const struct tcp_md5sig_info *md5sig;
910 __be32 mask;
911 struct tcp_md5sig_key *best_match = NULL;
912 bool match;
914 /* caller either holds rcu_read_lock() or socket lock */
915 md5sig = rcu_dereference_check(tp->md5sig_info,
916 lockdep_sock_is_held(sk));
917 if (!md5sig)
918 return NULL;
920 hlist_for_each_entry_rcu(key, &md5sig->head, node) {
921 if (key->family != family)
922 continue;
924 if (family == AF_INET) {
925 mask = inet_make_mask(key->prefixlen);
926 match = (key->addr.a4.s_addr & mask) ==
927 (addr->a4.s_addr & mask);
928 #if IS_ENABLED(CONFIG_IPV6)
929 } else if (family == AF_INET6) {
930 match = ipv6_prefix_equal(&key->addr.a6, &addr->a6,
931 key->prefixlen);
932 #endif
933 } else {
934 match = false;
937 if (match && (!best_match ||
938 key->prefixlen > best_match->prefixlen))
939 best_match = key;
941 return best_match;
943 EXPORT_SYMBOL(tcp_md5_do_lookup);
945 static struct tcp_md5sig_key *tcp_md5_do_lookup_exact(const struct sock *sk,
946 const union tcp_md5_addr *addr,
947 int family, u8 prefixlen)
949 const struct tcp_sock *tp = tcp_sk(sk);
950 struct tcp_md5sig_key *key;
951 unsigned int size = sizeof(struct in_addr);
952 const struct tcp_md5sig_info *md5sig;
954 /* caller either holds rcu_read_lock() or socket lock */
955 md5sig = rcu_dereference_check(tp->md5sig_info,
956 lockdep_sock_is_held(sk));
957 if (!md5sig)
958 return NULL;
959 #if IS_ENABLED(CONFIG_IPV6)
960 if (family == AF_INET6)
961 size = sizeof(struct in6_addr);
962 #endif
963 hlist_for_each_entry_rcu(key, &md5sig->head, node) {
964 if (key->family != family)
965 continue;
966 if (!memcmp(&key->addr, addr, size) &&
967 key->prefixlen == prefixlen)
968 return key;
970 return NULL;
973 struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
974 const struct sock *addr_sk)
976 const union tcp_md5_addr *addr;
978 addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr;
979 return tcp_md5_do_lookup(sk, addr, AF_INET);
981 EXPORT_SYMBOL(tcp_v4_md5_lookup);
983 /* This can be called on a newly created socket, from other files */
984 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
985 int family, u8 prefixlen, const u8 *newkey, u8 newkeylen,
986 gfp_t gfp)
988 /* Add Key to the list */
989 struct tcp_md5sig_key *key;
990 struct tcp_sock *tp = tcp_sk(sk);
991 struct tcp_md5sig_info *md5sig;
993 key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen);
994 if (key) {
995 /* Pre-existing entry - just update that one. */
996 memcpy(key->key, newkey, newkeylen);
997 key->keylen = newkeylen;
998 return 0;
1001 md5sig = rcu_dereference_protected(tp->md5sig_info,
1002 lockdep_sock_is_held(sk));
1003 if (!md5sig) {
1004 md5sig = kmalloc(sizeof(*md5sig), gfp);
1005 if (!md5sig)
1006 return -ENOMEM;
1008 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1009 INIT_HLIST_HEAD(&md5sig->head);
1010 rcu_assign_pointer(tp->md5sig_info, md5sig);
1013 key = sock_kmalloc(sk, sizeof(*key), gfp);
1014 if (!key)
1015 return -ENOMEM;
1016 if (!tcp_alloc_md5sig_pool()) {
1017 sock_kfree_s(sk, key, sizeof(*key));
1018 return -ENOMEM;
1021 memcpy(key->key, newkey, newkeylen);
1022 key->keylen = newkeylen;
1023 key->family = family;
1024 key->prefixlen = prefixlen;
1025 memcpy(&key->addr, addr,
1026 (family == AF_INET6) ? sizeof(struct in6_addr) :
1027 sizeof(struct in_addr));
1028 hlist_add_head_rcu(&key->node, &md5sig->head);
1029 return 0;
1031 EXPORT_SYMBOL(tcp_md5_do_add);
1033 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family,
1034 u8 prefixlen)
1036 struct tcp_md5sig_key *key;
1038 key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen);
1039 if (!key)
1040 return -ENOENT;
1041 hlist_del_rcu(&key->node);
1042 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1043 kfree_rcu(key, rcu);
1044 return 0;
1046 EXPORT_SYMBOL(tcp_md5_do_del);
1048 static void tcp_clear_md5_list(struct sock *sk)
1050 struct tcp_sock *tp = tcp_sk(sk);
1051 struct tcp_md5sig_key *key;
1052 struct hlist_node *n;
1053 struct tcp_md5sig_info *md5sig;
1055 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1057 hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
1058 hlist_del_rcu(&key->node);
1059 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1060 kfree_rcu(key, rcu);
1064 static int tcp_v4_parse_md5_keys(struct sock *sk, int optname,
1065 char __user *optval, int optlen)
1067 struct tcp_md5sig cmd;
1068 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1069 u8 prefixlen = 32;
1071 if (optlen < sizeof(cmd))
1072 return -EINVAL;
1074 if (copy_from_user(&cmd, optval, sizeof(cmd)))
1075 return -EFAULT;
1077 if (sin->sin_family != AF_INET)
1078 return -EINVAL;
1080 if (optname == TCP_MD5SIG_EXT &&
1081 cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
1082 prefixlen = cmd.tcpm_prefixlen;
1083 if (prefixlen > 32)
1084 return -EINVAL;
1087 if (!cmd.tcpm_keylen)
1088 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1089 AF_INET, prefixlen);
1091 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1092 return -EINVAL;
1094 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1095 AF_INET, prefixlen, cmd.tcpm_key, cmd.tcpm_keylen,
1096 GFP_KERNEL);
1099 static int tcp_v4_md5_hash_headers(struct tcp_md5sig_pool *hp,
1100 __be32 daddr, __be32 saddr,
1101 const struct tcphdr *th, int nbytes)
1103 struct tcp4_pseudohdr *bp;
1104 struct scatterlist sg;
1105 struct tcphdr *_th;
1107 bp = hp->scratch;
1108 bp->saddr = saddr;
1109 bp->daddr = daddr;
1110 bp->pad = 0;
1111 bp->protocol = IPPROTO_TCP;
1112 bp->len = cpu_to_be16(nbytes);
1114 _th = (struct tcphdr *)(bp + 1);
1115 memcpy(_th, th, sizeof(*th));
1116 _th->check = 0;
1118 sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
1119 ahash_request_set_crypt(hp->md5_req, &sg, NULL,
1120 sizeof(*bp) + sizeof(*th));
1121 return crypto_ahash_update(hp->md5_req);
1124 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1125 __be32 daddr, __be32 saddr, const struct tcphdr *th)
1127 struct tcp_md5sig_pool *hp;
1128 struct ahash_request *req;
1130 hp = tcp_get_md5sig_pool();
1131 if (!hp)
1132 goto clear_hash_noput;
1133 req = hp->md5_req;
1135 if (crypto_ahash_init(req))
1136 goto clear_hash;
1137 if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
1138 goto clear_hash;
1139 if (tcp_md5_hash_key(hp, key))
1140 goto clear_hash;
1141 ahash_request_set_crypt(req, NULL, md5_hash, 0);
1142 if (crypto_ahash_final(req))
1143 goto clear_hash;
1145 tcp_put_md5sig_pool();
1146 return 0;
1148 clear_hash:
1149 tcp_put_md5sig_pool();
1150 clear_hash_noput:
1151 memset(md5_hash, 0, 16);
1152 return 1;
1155 int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1156 const struct sock *sk,
1157 const struct sk_buff *skb)
1159 struct tcp_md5sig_pool *hp;
1160 struct ahash_request *req;
1161 const struct tcphdr *th = tcp_hdr(skb);
1162 __be32 saddr, daddr;
1164 if (sk) { /* valid for establish/request sockets */
1165 saddr = sk->sk_rcv_saddr;
1166 daddr = sk->sk_daddr;
1167 } else {
1168 const struct iphdr *iph = ip_hdr(skb);
1169 saddr = iph->saddr;
1170 daddr = iph->daddr;
1173 hp = tcp_get_md5sig_pool();
1174 if (!hp)
1175 goto clear_hash_noput;
1176 req = hp->md5_req;
1178 if (crypto_ahash_init(req))
1179 goto clear_hash;
1181 if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, skb->len))
1182 goto clear_hash;
1183 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1184 goto clear_hash;
1185 if (tcp_md5_hash_key(hp, key))
1186 goto clear_hash;
1187 ahash_request_set_crypt(req, NULL, md5_hash, 0);
1188 if (crypto_ahash_final(req))
1189 goto clear_hash;
1191 tcp_put_md5sig_pool();
1192 return 0;
1194 clear_hash:
1195 tcp_put_md5sig_pool();
1196 clear_hash_noput:
1197 memset(md5_hash, 0, 16);
1198 return 1;
1200 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1202 #endif
1204 /* Called with rcu_read_lock() */
1205 static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
1206 const struct sk_buff *skb)
1208 #ifdef CONFIG_TCP_MD5SIG
1210 * This gets called for each TCP segment that arrives
1211 * so we want to be efficient.
1212 * We have 3 drop cases:
1213 * o No MD5 hash and one expected.
1214 * o MD5 hash and we're not expecting one.
1215 * o MD5 hash and its wrong.
1217 const __u8 *hash_location = NULL;
1218 struct tcp_md5sig_key *hash_expected;
1219 const struct iphdr *iph = ip_hdr(skb);
1220 const struct tcphdr *th = tcp_hdr(skb);
1221 int genhash;
1222 unsigned char newhash[16];
1224 hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1225 AF_INET);
1226 hash_location = tcp_parse_md5sig_option(th);
1228 /* We've parsed the options - do we have a hash? */
1229 if (!hash_expected && !hash_location)
1230 return false;
1232 if (hash_expected && !hash_location) {
1233 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1234 return true;
1237 if (!hash_expected && hash_location) {
1238 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1239 return true;
1242 /* Okay, so this is hash_expected and hash_location -
1243 * so we need to calculate the checksum.
1245 genhash = tcp_v4_md5_hash_skb(newhash,
1246 hash_expected,
1247 NULL, skb);
1249 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1250 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
1251 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1252 &iph->saddr, ntohs(th->source),
1253 &iph->daddr, ntohs(th->dest),
1254 genhash ? " tcp_v4_calc_md5_hash failed"
1255 : "");
1256 return true;
1258 return false;
1259 #endif
1260 return false;
1263 static void tcp_v4_init_req(struct request_sock *req,
1264 const struct sock *sk_listener,
1265 struct sk_buff *skb)
1267 struct inet_request_sock *ireq = inet_rsk(req);
1269 sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
1270 sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
1271 ireq->opt = tcp_v4_save_options(sock_net(sk_listener), skb);
1274 static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
1275 struct flowi *fl,
1276 const struct request_sock *req)
1278 return inet_csk_route_req(sk, &fl->u.ip4, req);
1281 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1282 .family = PF_INET,
1283 .obj_size = sizeof(struct tcp_request_sock),
1284 .rtx_syn_ack = tcp_rtx_synack,
1285 .send_ack = tcp_v4_reqsk_send_ack,
1286 .destructor = tcp_v4_reqsk_destructor,
1287 .send_reset = tcp_v4_send_reset,
1288 .syn_ack_timeout = tcp_syn_ack_timeout,
1291 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1292 .mss_clamp = TCP_MSS_DEFAULT,
1293 #ifdef CONFIG_TCP_MD5SIG
1294 .req_md5_lookup = tcp_v4_md5_lookup,
1295 .calc_md5_hash = tcp_v4_md5_hash_skb,
1296 #endif
1297 .init_req = tcp_v4_init_req,
1298 #ifdef CONFIG_SYN_COOKIES
1299 .cookie_init_seq = cookie_v4_init_sequence,
1300 #endif
1301 .route_req = tcp_v4_route_req,
1302 .init_seq = tcp_v4_init_seq,
1303 .init_ts_off = tcp_v4_init_ts_off,
1304 .send_synack = tcp_v4_send_synack,
1307 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1309 /* Never answer to SYNs send to broadcast or multicast */
1310 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1311 goto drop;
1313 return tcp_conn_request(&tcp_request_sock_ops,
1314 &tcp_request_sock_ipv4_ops, sk, skb);
1316 drop:
1317 tcp_listendrop(sk);
1318 return 0;
1320 EXPORT_SYMBOL(tcp_v4_conn_request);
1324 * The three way handshake has completed - we got a valid synack -
1325 * now create the new socket.
1327 struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1328 struct request_sock *req,
1329 struct dst_entry *dst,
1330 struct request_sock *req_unhash,
1331 bool *own_req)
1333 struct inet_request_sock *ireq;
1334 struct inet_sock *newinet;
1335 struct tcp_sock *newtp;
1336 struct sock *newsk;
1337 #ifdef CONFIG_TCP_MD5SIG
1338 struct tcp_md5sig_key *key;
1339 #endif
1340 struct ip_options_rcu *inet_opt;
1342 if (sk_acceptq_is_full(sk))
1343 goto exit_overflow;
1345 newsk = tcp_create_openreq_child(sk, req, skb);
1346 if (!newsk)
1347 goto exit_nonewsk;
1349 newsk->sk_gso_type = SKB_GSO_TCPV4;
1350 inet_sk_rx_dst_set(newsk, skb);
1352 newtp = tcp_sk(newsk);
1353 newinet = inet_sk(newsk);
1354 ireq = inet_rsk(req);
1355 sk_daddr_set(newsk, ireq->ir_rmt_addr);
1356 sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
1357 newsk->sk_bound_dev_if = ireq->ir_iif;
1358 newinet->inet_saddr = ireq->ir_loc_addr;
1359 inet_opt = ireq->opt;
1360 rcu_assign_pointer(newinet->inet_opt, inet_opt);
1361 ireq->opt = NULL;
1362 newinet->mc_index = inet_iif(skb);
1363 newinet->mc_ttl = ip_hdr(skb)->ttl;
1364 newinet->rcv_tos = ip_hdr(skb)->tos;
1365 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1366 if (inet_opt)
1367 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1368 newinet->inet_id = newtp->write_seq ^ jiffies;
1370 if (!dst) {
1371 dst = inet_csk_route_child_sock(sk, newsk, req);
1372 if (!dst)
1373 goto put_and_exit;
1374 } else {
1375 /* syncookie case : see end of cookie_v4_check() */
1377 sk_setup_caps(newsk, dst);
1379 tcp_ca_openreq_child(newsk, dst);
1381 tcp_sync_mss(newsk, dst_mtu(dst));
1382 newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
1384 tcp_initialize_rcv_mss(newsk);
1386 #ifdef CONFIG_TCP_MD5SIG
1387 /* Copy over the MD5 key from the original socket */
1388 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1389 AF_INET);
1390 if (key) {
1392 * We're using one, so create a matching key
1393 * on the newsk structure. If we fail to get
1394 * memory, then we end up not copying the key
1395 * across. Shucks.
1397 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1398 AF_INET, 32, key->key, key->keylen, GFP_ATOMIC);
1399 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1401 #endif
1403 if (__inet_inherit_port(sk, newsk) < 0)
1404 goto put_and_exit;
1405 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1406 if (*own_req)
1407 tcp_move_syn(newtp, req);
1409 return newsk;
1411 exit_overflow:
1412 NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1413 exit_nonewsk:
1414 dst_release(dst);
1415 exit:
1416 tcp_listendrop(sk);
1417 return NULL;
1418 put_and_exit:
1419 inet_csk_prepare_forced_close(newsk);
1420 tcp_done(newsk);
1421 goto exit;
1423 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1425 static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb)
1427 #ifdef CONFIG_SYN_COOKIES
1428 const struct tcphdr *th = tcp_hdr(skb);
1430 if (!th->syn)
1431 sk = cookie_v4_check(sk, skb);
1432 #endif
1433 return sk;
1436 /* The socket must have it's spinlock held when we get
1437 * here, unless it is a TCP_LISTEN socket.
1439 * We have a potential double-lock case here, so even when
1440 * doing backlog processing we use the BH locking scheme.
1441 * This is because we cannot sleep with the original spinlock
1442 * held.
1444 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1446 struct sock *rsk;
1448 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1449 struct dst_entry *dst = sk->sk_rx_dst;
1451 sock_rps_save_rxhash(sk, skb);
1452 sk_mark_napi_id(sk, skb);
1453 if (dst) {
1454 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1455 !dst->ops->check(dst, 0)) {
1456 dst_release(dst);
1457 sk->sk_rx_dst = NULL;
1460 tcp_rcv_established(sk, skb, tcp_hdr(skb));
1461 return 0;
1464 if (tcp_checksum_complete(skb))
1465 goto csum_err;
1467 if (sk->sk_state == TCP_LISTEN) {
1468 struct sock *nsk = tcp_v4_cookie_check(sk, skb);
1470 if (!nsk)
1471 goto discard;
1472 if (nsk != sk) {
1473 if (tcp_child_process(sk, nsk, skb)) {
1474 rsk = nsk;
1475 goto reset;
1477 return 0;
1479 } else
1480 sock_rps_save_rxhash(sk, skb);
1482 if (tcp_rcv_state_process(sk, skb)) {
1483 rsk = sk;
1484 goto reset;
1486 return 0;
1488 reset:
1489 tcp_v4_send_reset(rsk, skb);
1490 discard:
1491 kfree_skb(skb);
1492 /* Be careful here. If this function gets more complicated and
1493 * gcc suffers from register pressure on the x86, sk (in %ebx)
1494 * might be destroyed here. This current version compiles correctly,
1495 * but you have been warned.
1497 return 0;
1499 csum_err:
1500 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1501 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1502 goto discard;
1504 EXPORT_SYMBOL(tcp_v4_do_rcv);
1506 void tcp_v4_early_demux(struct sk_buff *skb)
1508 const struct iphdr *iph;
1509 const struct tcphdr *th;
1510 struct sock *sk;
1512 if (skb->pkt_type != PACKET_HOST)
1513 return;
1515 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1516 return;
1518 iph = ip_hdr(skb);
1519 th = tcp_hdr(skb);
1521 if (th->doff < sizeof(struct tcphdr) / 4)
1522 return;
1524 sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1525 iph->saddr, th->source,
1526 iph->daddr, ntohs(th->dest),
1527 skb->skb_iif, inet_sdif(skb));
1528 if (sk) {
1529 skb->sk = sk;
1530 skb->destructor = sock_edemux;
1531 if (sk_fullsock(sk)) {
1532 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1534 if (dst)
1535 dst = dst_check(dst, 0);
1536 if (dst &&
1537 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1538 skb_dst_set_noref(skb, dst);
1543 bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
1545 u32 limit = sk->sk_rcvbuf + sk->sk_sndbuf;
1547 /* Only socket owner can try to collapse/prune rx queues
1548 * to reduce memory overhead, so add a little headroom here.
1549 * Few sockets backlog are possibly concurrently non empty.
1551 limit += 64*1024;
1553 /* In case all data was pulled from skb frags (in __pskb_pull_tail()),
1554 * we can fix skb->truesize to its real value to avoid future drops.
1555 * This is valid because skb is not yet charged to the socket.
1556 * It has been noticed pure SACK packets were sometimes dropped
1557 * (if cooked by drivers without copybreak feature).
1559 skb_condense(skb);
1561 if (unlikely(sk_add_backlog(sk, skb, limit))) {
1562 bh_unlock_sock(sk);
1563 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPBACKLOGDROP);
1564 return true;
1566 return false;
1568 EXPORT_SYMBOL(tcp_add_backlog);
1570 int tcp_filter(struct sock *sk, struct sk_buff *skb)
1572 struct tcphdr *th = (struct tcphdr *)skb->data;
1573 unsigned int eaten = skb->len;
1574 int err;
1576 err = sk_filter_trim_cap(sk, skb, th->doff * 4);
1577 if (!err) {
1578 eaten -= skb->len;
1579 TCP_SKB_CB(skb)->end_seq -= eaten;
1581 return err;
1583 EXPORT_SYMBOL(tcp_filter);
1586 * From tcp_input.c
1589 int tcp_v4_rcv(struct sk_buff *skb)
1591 struct net *net = dev_net(skb->dev);
1592 int sdif = inet_sdif(skb);
1593 const struct iphdr *iph;
1594 const struct tcphdr *th;
1595 bool refcounted;
1596 struct sock *sk;
1597 int ret;
1599 if (skb->pkt_type != PACKET_HOST)
1600 goto discard_it;
1602 /* Count it even if it's bad */
1603 __TCP_INC_STATS(net, TCP_MIB_INSEGS);
1605 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1606 goto discard_it;
1608 th = (const struct tcphdr *)skb->data;
1610 if (unlikely(th->doff < sizeof(struct tcphdr) / 4))
1611 goto bad_packet;
1612 if (!pskb_may_pull(skb, th->doff * 4))
1613 goto discard_it;
1615 /* An explanation is required here, I think.
1616 * Packet length and doff are validated by header prediction,
1617 * provided case of th->doff==0 is eliminated.
1618 * So, we defer the checks. */
1620 if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
1621 goto csum_error;
1623 th = (const struct tcphdr *)skb->data;
1624 iph = ip_hdr(skb);
1625 /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1626 * barrier() makes sure compiler wont play fool^Waliasing games.
1628 memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
1629 sizeof(struct inet_skb_parm));
1630 barrier();
1632 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1633 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1634 skb->len - th->doff * 4);
1635 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1636 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1637 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1638 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1639 TCP_SKB_CB(skb)->sacked = 0;
1640 TCP_SKB_CB(skb)->has_rxtstamp =
1641 skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
1643 lookup:
1644 sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source,
1645 th->dest, sdif, &refcounted);
1646 if (!sk)
1647 goto no_tcp_socket;
1649 process:
1650 if (sk->sk_state == TCP_TIME_WAIT)
1651 goto do_time_wait;
1653 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1654 struct request_sock *req = inet_reqsk(sk);
1655 struct sock *nsk;
1657 sk = req->rsk_listener;
1658 if (unlikely(tcp_v4_inbound_md5_hash(sk, skb))) {
1659 sk_drops_add(sk, skb);
1660 reqsk_put(req);
1661 goto discard_it;
1663 if (unlikely(sk->sk_state != TCP_LISTEN)) {
1664 inet_csk_reqsk_queue_drop_and_put(sk, req);
1665 goto lookup;
1667 /* We own a reference on the listener, increase it again
1668 * as we might lose it too soon.
1670 sock_hold(sk);
1671 refcounted = true;
1672 if (tcp_filter(sk, skb))
1673 goto discard_and_relse;
1674 nsk = tcp_check_req(sk, skb, req, false);
1675 if (!nsk) {
1676 reqsk_put(req);
1677 goto discard_and_relse;
1679 if (nsk == sk) {
1680 reqsk_put(req);
1681 } else if (tcp_child_process(sk, nsk, skb)) {
1682 tcp_v4_send_reset(nsk, skb);
1683 goto discard_and_relse;
1684 } else {
1685 sock_put(sk);
1686 return 0;
1689 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1690 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
1691 goto discard_and_relse;
1694 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1695 goto discard_and_relse;
1697 if (tcp_v4_inbound_md5_hash(sk, skb))
1698 goto discard_and_relse;
1700 nf_reset(skb);
1702 if (tcp_filter(sk, skb))
1703 goto discard_and_relse;
1704 th = (const struct tcphdr *)skb->data;
1705 iph = ip_hdr(skb);
1707 skb->dev = NULL;
1709 if (sk->sk_state == TCP_LISTEN) {
1710 ret = tcp_v4_do_rcv(sk, skb);
1711 goto put_and_return;
1714 sk_incoming_cpu_update(sk);
1716 bh_lock_sock_nested(sk);
1717 tcp_segs_in(tcp_sk(sk), skb);
1718 ret = 0;
1719 if (!sock_owned_by_user(sk)) {
1720 ret = tcp_v4_do_rcv(sk, skb);
1721 } else if (tcp_add_backlog(sk, skb)) {
1722 goto discard_and_relse;
1724 bh_unlock_sock(sk);
1726 put_and_return:
1727 if (refcounted)
1728 sock_put(sk);
1730 return ret;
1732 no_tcp_socket:
1733 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1734 goto discard_it;
1736 if (tcp_checksum_complete(skb)) {
1737 csum_error:
1738 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
1739 bad_packet:
1740 __TCP_INC_STATS(net, TCP_MIB_INERRS);
1741 } else {
1742 tcp_v4_send_reset(NULL, skb);
1745 discard_it:
1746 /* Discard frame. */
1747 kfree_skb(skb);
1748 return 0;
1750 discard_and_relse:
1751 sk_drops_add(sk, skb);
1752 if (refcounted)
1753 sock_put(sk);
1754 goto discard_it;
1756 do_time_wait:
1757 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1758 inet_twsk_put(inet_twsk(sk));
1759 goto discard_it;
1762 if (tcp_checksum_complete(skb)) {
1763 inet_twsk_put(inet_twsk(sk));
1764 goto csum_error;
1766 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1767 case TCP_TW_SYN: {
1768 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1769 &tcp_hashinfo, skb,
1770 __tcp_hdrlen(th),
1771 iph->saddr, th->source,
1772 iph->daddr, th->dest,
1773 inet_iif(skb),
1774 sdif);
1775 if (sk2) {
1776 inet_twsk_deschedule_put(inet_twsk(sk));
1777 sk = sk2;
1778 refcounted = false;
1779 goto process;
1781 /* Fall through to ACK */
1783 case TCP_TW_ACK:
1784 tcp_v4_timewait_ack(sk, skb);
1785 break;
1786 case TCP_TW_RST:
1787 tcp_v4_send_reset(sk, skb);
1788 inet_twsk_deschedule_put(inet_twsk(sk));
1789 goto discard_it;
1790 case TCP_TW_SUCCESS:;
1792 goto discard_it;
1795 static struct timewait_sock_ops tcp_timewait_sock_ops = {
1796 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1797 .twsk_unique = tcp_twsk_unique,
1798 .twsk_destructor= tcp_twsk_destructor,
1801 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
1803 struct dst_entry *dst = skb_dst(skb);
1805 if (dst && dst_hold_safe(dst)) {
1806 sk->sk_rx_dst = dst;
1807 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
1810 EXPORT_SYMBOL(inet_sk_rx_dst_set);
1812 const struct inet_connection_sock_af_ops ipv4_specific = {
1813 .queue_xmit = ip_queue_xmit,
1814 .send_check = tcp_v4_send_check,
1815 .rebuild_header = inet_sk_rebuild_header,
1816 .sk_rx_dst_set = inet_sk_rx_dst_set,
1817 .conn_request = tcp_v4_conn_request,
1818 .syn_recv_sock = tcp_v4_syn_recv_sock,
1819 .net_header_len = sizeof(struct iphdr),
1820 .setsockopt = ip_setsockopt,
1821 .getsockopt = ip_getsockopt,
1822 .addr2sockaddr = inet_csk_addr2sockaddr,
1823 .sockaddr_len = sizeof(struct sockaddr_in),
1824 #ifdef CONFIG_COMPAT
1825 .compat_setsockopt = compat_ip_setsockopt,
1826 .compat_getsockopt = compat_ip_getsockopt,
1827 #endif
1828 .mtu_reduced = tcp_v4_mtu_reduced,
1830 EXPORT_SYMBOL(ipv4_specific);
1832 #ifdef CONFIG_TCP_MD5SIG
1833 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1834 .md5_lookup = tcp_v4_md5_lookup,
1835 .calc_md5_hash = tcp_v4_md5_hash_skb,
1836 .md5_parse = tcp_v4_parse_md5_keys,
1838 #endif
1840 /* NOTE: A lot of things set to zero explicitly by call to
1841 * sk_alloc() so need not be done here.
1843 static int tcp_v4_init_sock(struct sock *sk)
1845 struct inet_connection_sock *icsk = inet_csk(sk);
1847 tcp_init_sock(sk);
1849 icsk->icsk_af_ops = &ipv4_specific;
1851 #ifdef CONFIG_TCP_MD5SIG
1852 tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
1853 #endif
1855 return 0;
1858 void tcp_v4_destroy_sock(struct sock *sk)
1860 struct tcp_sock *tp = tcp_sk(sk);
1862 tcp_clear_xmit_timers(sk);
1864 tcp_cleanup_congestion_control(sk);
1866 tcp_cleanup_ulp(sk);
1868 /* Cleanup up the write buffer. */
1869 tcp_write_queue_purge(sk);
1871 /* Check if we want to disable active TFO */
1872 tcp_fastopen_active_disable_ofo_check(sk);
1874 /* Cleans up our, hopefully empty, out_of_order_queue. */
1875 skb_rbtree_purge(&tp->out_of_order_queue);
1877 #ifdef CONFIG_TCP_MD5SIG
1878 /* Clean up the MD5 key list, if any */
1879 if (tp->md5sig_info) {
1880 tcp_clear_md5_list(sk);
1881 kfree_rcu(tp->md5sig_info, rcu);
1882 tp->md5sig_info = NULL;
1884 #endif
1886 /* Clean up a referenced TCP bind bucket. */
1887 if (inet_csk(sk)->icsk_bind_hash)
1888 inet_put_port(sk);
1890 BUG_ON(tp->fastopen_rsk);
1892 /* If socket is aborted during connect operation */
1893 tcp_free_fastopen_req(tp);
1894 tcp_saved_syn_free(tp);
1896 sk_sockets_allocated_dec(sk);
1898 EXPORT_SYMBOL(tcp_v4_destroy_sock);
1900 #ifdef CONFIG_PROC_FS
1901 /* Proc filesystem TCP sock list dumping. */
1904 * Get next listener socket follow cur. If cur is NULL, get first socket
1905 * starting from bucket given in st->bucket; when st->bucket is zero the
1906 * very first socket in the hash table is returned.
1908 static void *listening_get_next(struct seq_file *seq, void *cur)
1910 struct tcp_iter_state *st = seq->private;
1911 struct net *net = seq_file_net(seq);
1912 struct inet_listen_hashbucket *ilb;
1913 struct sock *sk = cur;
1915 if (!sk) {
1916 get_head:
1917 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1918 spin_lock(&ilb->lock);
1919 sk = sk_head(&ilb->head);
1920 st->offset = 0;
1921 goto get_sk;
1923 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1924 ++st->num;
1925 ++st->offset;
1927 sk = sk_next(sk);
1928 get_sk:
1929 sk_for_each_from(sk) {
1930 if (!net_eq(sock_net(sk), net))
1931 continue;
1932 if (sk->sk_family == st->family)
1933 return sk;
1935 spin_unlock(&ilb->lock);
1936 st->offset = 0;
1937 if (++st->bucket < INET_LHTABLE_SIZE)
1938 goto get_head;
1939 return NULL;
1942 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
1944 struct tcp_iter_state *st = seq->private;
1945 void *rc;
1947 st->bucket = 0;
1948 st->offset = 0;
1949 rc = listening_get_next(seq, NULL);
1951 while (rc && *pos) {
1952 rc = listening_get_next(seq, rc);
1953 --*pos;
1955 return rc;
1958 static inline bool empty_bucket(const struct tcp_iter_state *st)
1960 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
1964 * Get first established socket starting from bucket given in st->bucket.
1965 * If st->bucket is zero, the very first socket in the hash is returned.
1967 static void *established_get_first(struct seq_file *seq)
1969 struct tcp_iter_state *st = seq->private;
1970 struct net *net = seq_file_net(seq);
1971 void *rc = NULL;
1973 st->offset = 0;
1974 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
1975 struct sock *sk;
1976 struct hlist_nulls_node *node;
1977 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
1979 /* Lockless fast path for the common case of empty buckets */
1980 if (empty_bucket(st))
1981 continue;
1983 spin_lock_bh(lock);
1984 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
1985 if (sk->sk_family != st->family ||
1986 !net_eq(sock_net(sk), net)) {
1987 continue;
1989 rc = sk;
1990 goto out;
1992 spin_unlock_bh(lock);
1994 out:
1995 return rc;
1998 static void *established_get_next(struct seq_file *seq, void *cur)
2000 struct sock *sk = cur;
2001 struct hlist_nulls_node *node;
2002 struct tcp_iter_state *st = seq->private;
2003 struct net *net = seq_file_net(seq);
2005 ++st->num;
2006 ++st->offset;
2008 sk = sk_nulls_next(sk);
2010 sk_nulls_for_each_from(sk, node) {
2011 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
2012 return sk;
2015 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2016 ++st->bucket;
2017 return established_get_first(seq);
2020 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2022 struct tcp_iter_state *st = seq->private;
2023 void *rc;
2025 st->bucket = 0;
2026 rc = established_get_first(seq);
2028 while (rc && pos) {
2029 rc = established_get_next(seq, rc);
2030 --pos;
2032 return rc;
2035 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2037 void *rc;
2038 struct tcp_iter_state *st = seq->private;
2040 st->state = TCP_SEQ_STATE_LISTENING;
2041 rc = listening_get_idx(seq, &pos);
2043 if (!rc) {
2044 st->state = TCP_SEQ_STATE_ESTABLISHED;
2045 rc = established_get_idx(seq, pos);
2048 return rc;
2051 static void *tcp_seek_last_pos(struct seq_file *seq)
2053 struct tcp_iter_state *st = seq->private;
2054 int offset = st->offset;
2055 int orig_num = st->num;
2056 void *rc = NULL;
2058 switch (st->state) {
2059 case TCP_SEQ_STATE_LISTENING:
2060 if (st->bucket >= INET_LHTABLE_SIZE)
2061 break;
2062 st->state = TCP_SEQ_STATE_LISTENING;
2063 rc = listening_get_next(seq, NULL);
2064 while (offset-- && rc)
2065 rc = listening_get_next(seq, rc);
2066 if (rc)
2067 break;
2068 st->bucket = 0;
2069 st->state = TCP_SEQ_STATE_ESTABLISHED;
2070 /* Fallthrough */
2071 case TCP_SEQ_STATE_ESTABLISHED:
2072 if (st->bucket > tcp_hashinfo.ehash_mask)
2073 break;
2074 rc = established_get_first(seq);
2075 while (offset-- && rc)
2076 rc = established_get_next(seq, rc);
2079 st->num = orig_num;
2081 return rc;
2084 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2086 struct tcp_iter_state *st = seq->private;
2087 void *rc;
2089 if (*pos && *pos == st->last_pos) {
2090 rc = tcp_seek_last_pos(seq);
2091 if (rc)
2092 goto out;
2095 st->state = TCP_SEQ_STATE_LISTENING;
2096 st->num = 0;
2097 st->bucket = 0;
2098 st->offset = 0;
2099 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2101 out:
2102 st->last_pos = *pos;
2103 return rc;
2106 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2108 struct tcp_iter_state *st = seq->private;
2109 void *rc = NULL;
2111 if (v == SEQ_START_TOKEN) {
2112 rc = tcp_get_idx(seq, 0);
2113 goto out;
2116 switch (st->state) {
2117 case TCP_SEQ_STATE_LISTENING:
2118 rc = listening_get_next(seq, v);
2119 if (!rc) {
2120 st->state = TCP_SEQ_STATE_ESTABLISHED;
2121 st->bucket = 0;
2122 st->offset = 0;
2123 rc = established_get_first(seq);
2125 break;
2126 case TCP_SEQ_STATE_ESTABLISHED:
2127 rc = established_get_next(seq, v);
2128 break;
2130 out:
2131 ++*pos;
2132 st->last_pos = *pos;
2133 return rc;
2136 static void tcp_seq_stop(struct seq_file *seq, void *v)
2138 struct tcp_iter_state *st = seq->private;
2140 switch (st->state) {
2141 case TCP_SEQ_STATE_LISTENING:
2142 if (v != SEQ_START_TOKEN)
2143 spin_unlock(&tcp_hashinfo.listening_hash[st->bucket].lock);
2144 break;
2145 case TCP_SEQ_STATE_ESTABLISHED:
2146 if (v)
2147 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2148 break;
2152 int tcp_seq_open(struct inode *inode, struct file *file)
2154 struct tcp_seq_afinfo *afinfo = PDE_DATA(inode);
2155 struct tcp_iter_state *s;
2156 int err;
2158 err = seq_open_net(inode, file, &afinfo->seq_ops,
2159 sizeof(struct tcp_iter_state));
2160 if (err < 0)
2161 return err;
2163 s = ((struct seq_file *)file->private_data)->private;
2164 s->family = afinfo->family;
2165 s->last_pos = 0;
2166 return 0;
2168 EXPORT_SYMBOL(tcp_seq_open);
2170 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2172 int rc = 0;
2173 struct proc_dir_entry *p;
2175 afinfo->seq_ops.start = tcp_seq_start;
2176 afinfo->seq_ops.next = tcp_seq_next;
2177 afinfo->seq_ops.stop = tcp_seq_stop;
2179 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2180 afinfo->seq_fops, afinfo);
2181 if (!p)
2182 rc = -ENOMEM;
2183 return rc;
2185 EXPORT_SYMBOL(tcp_proc_register);
2187 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2189 remove_proc_entry(afinfo->name, net->proc_net);
2191 EXPORT_SYMBOL(tcp_proc_unregister);
2193 static void get_openreq4(const struct request_sock *req,
2194 struct seq_file *f, int i)
2196 const struct inet_request_sock *ireq = inet_rsk(req);
2197 long delta = req->rsk_timer.expires - jiffies;
2199 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2200 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
2202 ireq->ir_loc_addr,
2203 ireq->ir_num,
2204 ireq->ir_rmt_addr,
2205 ntohs(ireq->ir_rmt_port),
2206 TCP_SYN_RECV,
2207 0, 0, /* could print option size, but that is af dependent. */
2208 1, /* timers active (only the expire timer) */
2209 jiffies_delta_to_clock_t(delta),
2210 req->num_timeout,
2211 from_kuid_munged(seq_user_ns(f),
2212 sock_i_uid(req->rsk_listener)),
2213 0, /* non standard timer */
2214 0, /* open_requests have no inode */
2216 req);
2219 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2221 int timer_active;
2222 unsigned long timer_expires;
2223 const struct tcp_sock *tp = tcp_sk(sk);
2224 const struct inet_connection_sock *icsk = inet_csk(sk);
2225 const struct inet_sock *inet = inet_sk(sk);
2226 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
2227 __be32 dest = inet->inet_daddr;
2228 __be32 src = inet->inet_rcv_saddr;
2229 __u16 destp = ntohs(inet->inet_dport);
2230 __u16 srcp = ntohs(inet->inet_sport);
2231 int rx_queue;
2232 int state;
2234 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2235 icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
2236 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2237 timer_active = 1;
2238 timer_expires = icsk->icsk_timeout;
2239 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2240 timer_active = 4;
2241 timer_expires = icsk->icsk_timeout;
2242 } else if (timer_pending(&sk->sk_timer)) {
2243 timer_active = 2;
2244 timer_expires = sk->sk_timer.expires;
2245 } else {
2246 timer_active = 0;
2247 timer_expires = jiffies;
2250 state = sk_state_load(sk);
2251 if (state == TCP_LISTEN)
2252 rx_queue = sk->sk_ack_backlog;
2253 else
2254 /* Because we don't lock the socket,
2255 * we might find a transient negative value.
2257 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2259 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2260 "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2261 i, src, srcp, dest, destp, state,
2262 tp->write_seq - tp->snd_una,
2263 rx_queue,
2264 timer_active,
2265 jiffies_delta_to_clock_t(timer_expires - jiffies),
2266 icsk->icsk_retransmits,
2267 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2268 icsk->icsk_probes_out,
2269 sock_i_ino(sk),
2270 refcount_read(&sk->sk_refcnt), sk,
2271 jiffies_to_clock_t(icsk->icsk_rto),
2272 jiffies_to_clock_t(icsk->icsk_ack.ato),
2273 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2274 tp->snd_cwnd,
2275 state == TCP_LISTEN ?
2276 fastopenq->max_qlen :
2277 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
2280 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2281 struct seq_file *f, int i)
2283 long delta = tw->tw_timer.expires - jiffies;
2284 __be32 dest, src;
2285 __u16 destp, srcp;
2287 dest = tw->tw_daddr;
2288 src = tw->tw_rcv_saddr;
2289 destp = ntohs(tw->tw_dport);
2290 srcp = ntohs(tw->tw_sport);
2292 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2293 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
2294 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2295 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2296 refcount_read(&tw->tw_refcnt), tw);
2299 #define TMPSZ 150
2301 static int tcp4_seq_show(struct seq_file *seq, void *v)
2303 struct tcp_iter_state *st;
2304 struct sock *sk = v;
2306 seq_setwidth(seq, TMPSZ - 1);
2307 if (v == SEQ_START_TOKEN) {
2308 seq_puts(seq, " sl local_address rem_address st tx_queue "
2309 "rx_queue tr tm->when retrnsmt uid timeout "
2310 "inode");
2311 goto out;
2313 st = seq->private;
2315 if (sk->sk_state == TCP_TIME_WAIT)
2316 get_timewait4_sock(v, seq, st->num);
2317 else if (sk->sk_state == TCP_NEW_SYN_RECV)
2318 get_openreq4(v, seq, st->num);
2319 else
2320 get_tcp4_sock(v, seq, st->num);
2321 out:
2322 seq_pad(seq, '\n');
2323 return 0;
2326 static const struct file_operations tcp_afinfo_seq_fops = {
2327 .owner = THIS_MODULE,
2328 .open = tcp_seq_open,
2329 .read = seq_read,
2330 .llseek = seq_lseek,
2331 .release = seq_release_net
2334 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2335 .name = "tcp",
2336 .family = AF_INET,
2337 .seq_fops = &tcp_afinfo_seq_fops,
2338 .seq_ops = {
2339 .show = tcp4_seq_show,
2343 static int __net_init tcp4_proc_init_net(struct net *net)
2345 return tcp_proc_register(net, &tcp4_seq_afinfo);
2348 static void __net_exit tcp4_proc_exit_net(struct net *net)
2350 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2353 static struct pernet_operations tcp4_net_ops = {
2354 .init = tcp4_proc_init_net,
2355 .exit = tcp4_proc_exit_net,
2358 int __init tcp4_proc_init(void)
2360 return register_pernet_subsys(&tcp4_net_ops);
2363 void tcp4_proc_exit(void)
2365 unregister_pernet_subsys(&tcp4_net_ops);
2367 #endif /* CONFIG_PROC_FS */
2369 struct proto tcp_prot = {
2370 .name = "TCP",
2371 .owner = THIS_MODULE,
2372 .close = tcp_close,
2373 .connect = tcp_v4_connect,
2374 .disconnect = tcp_disconnect,
2375 .accept = inet_csk_accept,
2376 .ioctl = tcp_ioctl,
2377 .init = tcp_v4_init_sock,
2378 .destroy = tcp_v4_destroy_sock,
2379 .shutdown = tcp_shutdown,
2380 .setsockopt = tcp_setsockopt,
2381 .getsockopt = tcp_getsockopt,
2382 .keepalive = tcp_set_keepalive,
2383 .recvmsg = tcp_recvmsg,
2384 .sendmsg = tcp_sendmsg,
2385 .sendpage = tcp_sendpage,
2386 .backlog_rcv = tcp_v4_do_rcv,
2387 .release_cb = tcp_release_cb,
2388 .hash = inet_hash,
2389 .unhash = inet_unhash,
2390 .get_port = inet_csk_get_port,
2391 .enter_memory_pressure = tcp_enter_memory_pressure,
2392 .leave_memory_pressure = tcp_leave_memory_pressure,
2393 .stream_memory_free = tcp_stream_memory_free,
2394 .sockets_allocated = &tcp_sockets_allocated,
2395 .orphan_count = &tcp_orphan_count,
2396 .memory_allocated = &tcp_memory_allocated,
2397 .memory_pressure = &tcp_memory_pressure,
2398 .sysctl_mem = sysctl_tcp_mem,
2399 .sysctl_wmem = sysctl_tcp_wmem,
2400 .sysctl_rmem = sysctl_tcp_rmem,
2401 .max_header = MAX_TCP_HEADER,
2402 .obj_size = sizeof(struct tcp_sock),
2403 .slab_flags = SLAB_TYPESAFE_BY_RCU,
2404 .twsk_prot = &tcp_timewait_sock_ops,
2405 .rsk_prot = &tcp_request_sock_ops,
2406 .h.hashinfo = &tcp_hashinfo,
2407 .no_autobind = true,
2408 #ifdef CONFIG_COMPAT
2409 .compat_setsockopt = compat_tcp_setsockopt,
2410 .compat_getsockopt = compat_tcp_getsockopt,
2411 #endif
2412 .diag_destroy = tcp_abort,
2414 EXPORT_SYMBOL(tcp_prot);
2416 static void __net_exit tcp_sk_exit(struct net *net)
2418 int cpu;
2420 for_each_possible_cpu(cpu)
2421 inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
2422 free_percpu(net->ipv4.tcp_sk);
2425 static int __net_init tcp_sk_init(struct net *net)
2427 int res, cpu, cnt;
2429 net->ipv4.tcp_sk = alloc_percpu(struct sock *);
2430 if (!net->ipv4.tcp_sk)
2431 return -ENOMEM;
2433 for_each_possible_cpu(cpu) {
2434 struct sock *sk;
2436 res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
2437 IPPROTO_TCP, net);
2438 if (res)
2439 goto fail;
2440 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
2441 *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
2444 net->ipv4.sysctl_tcp_ecn = 2;
2445 net->ipv4.sysctl_tcp_ecn_fallback = 1;
2447 net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
2448 net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
2449 net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
2451 net->ipv4.sysctl_tcp_keepalive_time = TCP_KEEPALIVE_TIME;
2452 net->ipv4.sysctl_tcp_keepalive_probes = TCP_KEEPALIVE_PROBES;
2453 net->ipv4.sysctl_tcp_keepalive_intvl = TCP_KEEPALIVE_INTVL;
2455 net->ipv4.sysctl_tcp_syn_retries = TCP_SYN_RETRIES;
2456 net->ipv4.sysctl_tcp_synack_retries = TCP_SYNACK_RETRIES;
2457 net->ipv4.sysctl_tcp_syncookies = 1;
2458 net->ipv4.sysctl_tcp_reordering = TCP_FASTRETRANS_THRESH;
2459 net->ipv4.sysctl_tcp_retries1 = TCP_RETR1;
2460 net->ipv4.sysctl_tcp_retries2 = TCP_RETR2;
2461 net->ipv4.sysctl_tcp_orphan_retries = 0;
2462 net->ipv4.sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
2463 net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX;
2464 net->ipv4.sysctl_tcp_tw_reuse = 0;
2466 cnt = tcp_hashinfo.ehash_mask + 1;
2467 net->ipv4.tcp_death_row.sysctl_max_tw_buckets = (cnt + 1) / 2;
2468 net->ipv4.tcp_death_row.hashinfo = &tcp_hashinfo;
2470 net->ipv4.sysctl_max_syn_backlog = max(128, cnt / 256);
2471 net->ipv4.sysctl_tcp_sack = 1;
2472 net->ipv4.sysctl_tcp_window_scaling = 1;
2473 net->ipv4.sysctl_tcp_timestamps = 1;
2475 return 0;
2476 fail:
2477 tcp_sk_exit(net);
2479 return res;
2482 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2484 inet_twsk_purge(&tcp_hashinfo, AF_INET);
2487 static struct pernet_operations __net_initdata tcp_sk_ops = {
2488 .init = tcp_sk_init,
2489 .exit = tcp_sk_exit,
2490 .exit_batch = tcp_sk_exit_batch,
2493 void __init tcp_v4_init(void)
2495 if (register_pernet_subsys(&tcp_sk_ops))
2496 panic("Failed to create the TCP control socket.\n");