tcp: sk_add_backlog() is too agressive for TCP
[linux-2.6.git] / net / ipv4 / tcp_ipv4.c
blobcf97e9821d76b352f0f786f1201967db8dd0d6ea
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * IPv4 specific functions
11 * code split from:
12 * linux/ipv4/tcp.c
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
16 * See tcp.c for author information
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
25 * Changes:
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
34 * ACK bit.
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
37 * request_sock handling and moved
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
40 * Added new listen semantics.
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
45 * coma.
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
53 #define pr_fmt(fmt) "TCP: " fmt
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
66 #include <net/net_namespace.h>
67 #include <net/icmp.h>
68 #include <net/inet_hashtables.h>
69 #include <net/tcp.h>
70 #include <net/transp_v6.h>
71 #include <net/ipv6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
74 #include <net/xfrm.h>
75 #include <net/netdma.h>
76 #include <net/secure_seq.h>
77 #include <net/tcp_memcontrol.h>
79 #include <linux/inet.h>
80 #include <linux/ipv6.h>
81 #include <linux/stddef.h>
82 #include <linux/proc_fs.h>
83 #include <linux/seq_file.h>
85 #include <linux/crypto.h>
86 #include <linux/scatterlist.h>
88 int sysctl_tcp_tw_reuse __read_mostly;
89 int sysctl_tcp_low_latency __read_mostly;
90 EXPORT_SYMBOL(sysctl_tcp_low_latency);
93 #ifdef CONFIG_TCP_MD5SIG
94 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
95 __be32 daddr, __be32 saddr, const struct tcphdr *th);
96 #endif
98 struct inet_hashinfo tcp_hashinfo;
99 EXPORT_SYMBOL(tcp_hashinfo);
101 static inline __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
103 return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
104 ip_hdr(skb)->saddr,
105 tcp_hdr(skb)->dest,
106 tcp_hdr(skb)->source);
109 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
111 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
112 struct tcp_sock *tp = tcp_sk(sk);
114 /* With PAWS, it is safe from the viewpoint
115 of data integrity. Even without PAWS it is safe provided sequence
116 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
118 Actually, the idea is close to VJ's one, only timestamp cache is
119 held not per host, but per port pair and TW bucket is used as state
120 holder.
122 If TW bucket has been already destroyed we fall back to VJ's scheme
123 and use initial timestamp retrieved from peer table.
125 if (tcptw->tw_ts_recent_stamp &&
126 (twp == NULL || (sysctl_tcp_tw_reuse &&
127 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
128 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
129 if (tp->write_seq == 0)
130 tp->write_seq = 1;
131 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
132 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
133 sock_hold(sktw);
134 return 1;
137 return 0;
139 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
141 static int tcp_repair_connect(struct sock *sk)
143 tcp_connect_init(sk);
144 tcp_finish_connect(sk, NULL);
146 return 0;
149 /* This will initiate an outgoing connection. */
150 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
152 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
153 struct inet_sock *inet = inet_sk(sk);
154 struct tcp_sock *tp = tcp_sk(sk);
155 __be16 orig_sport, orig_dport;
156 __be32 daddr, nexthop;
157 struct flowi4 *fl4;
158 struct rtable *rt;
159 int err;
160 struct ip_options_rcu *inet_opt;
162 if (addr_len < sizeof(struct sockaddr_in))
163 return -EINVAL;
165 if (usin->sin_family != AF_INET)
166 return -EAFNOSUPPORT;
168 nexthop = daddr = usin->sin_addr.s_addr;
169 inet_opt = rcu_dereference_protected(inet->inet_opt,
170 sock_owned_by_user(sk));
171 if (inet_opt && inet_opt->opt.srr) {
172 if (!daddr)
173 return -EINVAL;
174 nexthop = inet_opt->opt.faddr;
177 orig_sport = inet->inet_sport;
178 orig_dport = usin->sin_port;
179 fl4 = &inet->cork.fl.u.ip4;
180 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
181 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
182 IPPROTO_TCP,
183 orig_sport, orig_dport, sk, true);
184 if (IS_ERR(rt)) {
185 err = PTR_ERR(rt);
186 if (err == -ENETUNREACH)
187 IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
188 return err;
191 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
192 ip_rt_put(rt);
193 return -ENETUNREACH;
196 if (!inet_opt || !inet_opt->opt.srr)
197 daddr = fl4->daddr;
199 if (!inet->inet_saddr)
200 inet->inet_saddr = fl4->saddr;
201 inet->inet_rcv_saddr = inet->inet_saddr;
203 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
204 /* Reset inherited state */
205 tp->rx_opt.ts_recent = 0;
206 tp->rx_opt.ts_recent_stamp = 0;
207 if (likely(!tp->repair))
208 tp->write_seq = 0;
211 if (tcp_death_row.sysctl_tw_recycle &&
212 !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr) {
213 struct inet_peer *peer = rt_get_peer(rt, fl4->daddr);
215 * VJ's idea. We save last timestamp seen from
216 * the destination in peer table, when entering state
217 * TIME-WAIT * and initialize rx_opt.ts_recent from it,
218 * when trying new connection.
220 if (peer) {
221 inet_peer_refcheck(peer);
222 if ((u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) {
223 tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
224 tp->rx_opt.ts_recent = peer->tcp_ts;
229 inet->inet_dport = usin->sin_port;
230 inet->inet_daddr = daddr;
232 inet_csk(sk)->icsk_ext_hdr_len = 0;
233 if (inet_opt)
234 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
236 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
238 /* Socket identity is still unknown (sport may be zero).
239 * However we set state to SYN-SENT and not releasing socket
240 * lock select source port, enter ourselves into the hash tables and
241 * complete initialization after this.
243 tcp_set_state(sk, TCP_SYN_SENT);
244 err = inet_hash_connect(&tcp_death_row, sk);
245 if (err)
246 goto failure;
248 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
249 inet->inet_sport, inet->inet_dport, sk);
250 if (IS_ERR(rt)) {
251 err = PTR_ERR(rt);
252 rt = NULL;
253 goto failure;
255 /* OK, now commit destination to socket. */
256 sk->sk_gso_type = SKB_GSO_TCPV4;
257 sk_setup_caps(sk, &rt->dst);
259 if (!tp->write_seq && likely(!tp->repair))
260 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
261 inet->inet_daddr,
262 inet->inet_sport,
263 usin->sin_port);
265 inet->inet_id = tp->write_seq ^ jiffies;
267 if (likely(!tp->repair))
268 err = tcp_connect(sk);
269 else
270 err = tcp_repair_connect(sk);
272 rt = NULL;
273 if (err)
274 goto failure;
276 return 0;
278 failure:
280 * This unhashes the socket and releases the local port,
281 * if necessary.
283 tcp_set_state(sk, TCP_CLOSE);
284 ip_rt_put(rt);
285 sk->sk_route_caps = 0;
286 inet->inet_dport = 0;
287 return err;
289 EXPORT_SYMBOL(tcp_v4_connect);
292 * This routine does path mtu discovery as defined in RFC1191.
294 static void do_pmtu_discovery(struct sock *sk, const struct iphdr *iph, u32 mtu)
296 struct dst_entry *dst;
297 struct inet_sock *inet = inet_sk(sk);
299 /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
300 * send out by Linux are always <576bytes so they should go through
301 * unfragmented).
303 if (sk->sk_state == TCP_LISTEN)
304 return;
306 /* We don't check in the destentry if pmtu discovery is forbidden
307 * on this route. We just assume that no packet_to_big packets
308 * are send back when pmtu discovery is not active.
309 * There is a small race when the user changes this flag in the
310 * route, but I think that's acceptable.
312 if ((dst = __sk_dst_check(sk, 0)) == NULL)
313 return;
315 dst->ops->update_pmtu(dst, mtu);
317 /* Something is about to be wrong... Remember soft error
318 * for the case, if this connection will not able to recover.
320 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
321 sk->sk_err_soft = EMSGSIZE;
323 mtu = dst_mtu(dst);
325 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
326 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
327 tcp_sync_mss(sk, mtu);
329 /* Resend the TCP packet because it's
330 * clear that the old packet has been
331 * dropped. This is the new "fast" path mtu
332 * discovery.
334 tcp_simple_retransmit(sk);
335 } /* else let the usual retransmit timer handle it */
339 * This routine is called by the ICMP module when it gets some
340 * sort of error condition. If err < 0 then the socket should
341 * be closed and the error returned to the user. If err > 0
342 * it's just the icmp type << 8 | icmp code. After adjustment
343 * header points to the first 8 bytes of the tcp header. We need
344 * to find the appropriate port.
346 * The locking strategy used here is very "optimistic". When
347 * someone else accesses the socket the ICMP is just dropped
348 * and for some paths there is no check at all.
349 * A more general error queue to queue errors for later handling
350 * is probably better.
354 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
356 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
357 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
358 struct inet_connection_sock *icsk;
359 struct tcp_sock *tp;
360 struct inet_sock *inet;
361 const int type = icmp_hdr(icmp_skb)->type;
362 const int code = icmp_hdr(icmp_skb)->code;
363 struct sock *sk;
364 struct sk_buff *skb;
365 __u32 seq;
366 __u32 remaining;
367 int err;
368 struct net *net = dev_net(icmp_skb->dev);
370 if (icmp_skb->len < (iph->ihl << 2) + 8) {
371 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
372 return;
375 sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest,
376 iph->saddr, th->source, inet_iif(icmp_skb));
377 if (!sk) {
378 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
379 return;
381 if (sk->sk_state == TCP_TIME_WAIT) {
382 inet_twsk_put(inet_twsk(sk));
383 return;
386 bh_lock_sock(sk);
387 /* If too many ICMPs get dropped on busy
388 * servers this needs to be solved differently.
390 if (sock_owned_by_user(sk))
391 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
393 if (sk->sk_state == TCP_CLOSE)
394 goto out;
396 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
397 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
398 goto out;
401 icsk = inet_csk(sk);
402 tp = tcp_sk(sk);
403 seq = ntohl(th->seq);
404 if (sk->sk_state != TCP_LISTEN &&
405 !between(seq, tp->snd_una, tp->snd_nxt)) {
406 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
407 goto out;
410 switch (type) {
411 case ICMP_SOURCE_QUENCH:
412 /* Just silently ignore these. */
413 goto out;
414 case ICMP_PARAMETERPROB:
415 err = EPROTO;
416 break;
417 case ICMP_DEST_UNREACH:
418 if (code > NR_ICMP_UNREACH)
419 goto out;
421 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
422 if (!sock_owned_by_user(sk))
423 do_pmtu_discovery(sk, iph, info);
424 goto out;
427 err = icmp_err_convert[code].errno;
428 /* check if icmp_skb allows revert of backoff
429 * (see draft-zimmermann-tcp-lcd) */
430 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
431 break;
432 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
433 !icsk->icsk_backoff)
434 break;
436 if (sock_owned_by_user(sk))
437 break;
439 icsk->icsk_backoff--;
440 inet_csk(sk)->icsk_rto = (tp->srtt ? __tcp_set_rto(tp) :
441 TCP_TIMEOUT_INIT) << icsk->icsk_backoff;
442 tcp_bound_rto(sk);
444 skb = tcp_write_queue_head(sk);
445 BUG_ON(!skb);
447 remaining = icsk->icsk_rto - min(icsk->icsk_rto,
448 tcp_time_stamp - TCP_SKB_CB(skb)->when);
450 if (remaining) {
451 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
452 remaining, TCP_RTO_MAX);
453 } else {
454 /* RTO revert clocked out retransmission.
455 * Will retransmit now */
456 tcp_retransmit_timer(sk);
459 break;
460 case ICMP_TIME_EXCEEDED:
461 err = EHOSTUNREACH;
462 break;
463 default:
464 goto out;
467 switch (sk->sk_state) {
468 struct request_sock *req, **prev;
469 case TCP_LISTEN:
470 if (sock_owned_by_user(sk))
471 goto out;
473 req = inet_csk_search_req(sk, &prev, th->dest,
474 iph->daddr, iph->saddr);
475 if (!req)
476 goto out;
478 /* ICMPs are not backlogged, hence we cannot get
479 an established socket here.
481 WARN_ON(req->sk);
483 if (seq != tcp_rsk(req)->snt_isn) {
484 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
485 goto out;
489 * Still in SYN_RECV, just remove it silently.
490 * There is no good way to pass the error to the newly
491 * created socket, and POSIX does not want network
492 * errors returned from accept().
494 inet_csk_reqsk_queue_drop(sk, req, prev);
495 goto out;
497 case TCP_SYN_SENT:
498 case TCP_SYN_RECV: /* Cannot happen.
499 It can f.e. if SYNs crossed.
501 if (!sock_owned_by_user(sk)) {
502 sk->sk_err = err;
504 sk->sk_error_report(sk);
506 tcp_done(sk);
507 } else {
508 sk->sk_err_soft = err;
510 goto out;
513 /* If we've already connected we will keep trying
514 * until we time out, or the user gives up.
516 * rfc1122 4.2.3.9 allows to consider as hard errors
517 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
518 * but it is obsoleted by pmtu discovery).
520 * Note, that in modern internet, where routing is unreliable
521 * and in each dark corner broken firewalls sit, sending random
522 * errors ordered by their masters even this two messages finally lose
523 * their original sense (even Linux sends invalid PORT_UNREACHs)
525 * Now we are in compliance with RFCs.
526 * --ANK (980905)
529 inet = inet_sk(sk);
530 if (!sock_owned_by_user(sk) && inet->recverr) {
531 sk->sk_err = err;
532 sk->sk_error_report(sk);
533 } else { /* Only an error on timeout */
534 sk->sk_err_soft = err;
537 out:
538 bh_unlock_sock(sk);
539 sock_put(sk);
542 static void __tcp_v4_send_check(struct sk_buff *skb,
543 __be32 saddr, __be32 daddr)
545 struct tcphdr *th = tcp_hdr(skb);
547 if (skb->ip_summed == CHECKSUM_PARTIAL) {
548 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
549 skb->csum_start = skb_transport_header(skb) - skb->head;
550 skb->csum_offset = offsetof(struct tcphdr, check);
551 } else {
552 th->check = tcp_v4_check(skb->len, saddr, daddr,
553 csum_partial(th,
554 th->doff << 2,
555 skb->csum));
559 /* This routine computes an IPv4 TCP checksum. */
560 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
562 const struct inet_sock *inet = inet_sk(sk);
564 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
566 EXPORT_SYMBOL(tcp_v4_send_check);
568 int tcp_v4_gso_send_check(struct sk_buff *skb)
570 const struct iphdr *iph;
571 struct tcphdr *th;
573 if (!pskb_may_pull(skb, sizeof(*th)))
574 return -EINVAL;
576 iph = ip_hdr(skb);
577 th = tcp_hdr(skb);
579 th->check = 0;
580 skb->ip_summed = CHECKSUM_PARTIAL;
581 __tcp_v4_send_check(skb, iph->saddr, iph->daddr);
582 return 0;
586 * This routine will send an RST to the other tcp.
588 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
589 * for reset.
590 * Answer: if a packet caused RST, it is not for a socket
591 * existing in our system, if it is matched to a socket,
592 * it is just duplicate segment or bug in other side's TCP.
593 * So that we build reply only basing on parameters
594 * arrived with segment.
595 * Exception: precedence violation. We do not implement it in any case.
598 static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
600 const struct tcphdr *th = tcp_hdr(skb);
601 struct {
602 struct tcphdr th;
603 #ifdef CONFIG_TCP_MD5SIG
604 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
605 #endif
606 } rep;
607 struct ip_reply_arg arg;
608 #ifdef CONFIG_TCP_MD5SIG
609 struct tcp_md5sig_key *key;
610 const __u8 *hash_location = NULL;
611 unsigned char newhash[16];
612 int genhash;
613 struct sock *sk1 = NULL;
614 #endif
615 struct net *net;
617 /* Never send a reset in response to a reset. */
618 if (th->rst)
619 return;
621 if (skb_rtable(skb)->rt_type != RTN_LOCAL)
622 return;
624 /* Swap the send and the receive. */
625 memset(&rep, 0, sizeof(rep));
626 rep.th.dest = th->source;
627 rep.th.source = th->dest;
628 rep.th.doff = sizeof(struct tcphdr) / 4;
629 rep.th.rst = 1;
631 if (th->ack) {
632 rep.th.seq = th->ack_seq;
633 } else {
634 rep.th.ack = 1;
635 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
636 skb->len - (th->doff << 2));
639 memset(&arg, 0, sizeof(arg));
640 arg.iov[0].iov_base = (unsigned char *)&rep;
641 arg.iov[0].iov_len = sizeof(rep.th);
643 #ifdef CONFIG_TCP_MD5SIG
644 hash_location = tcp_parse_md5sig_option(th);
645 if (!sk && hash_location) {
647 * active side is lost. Try to find listening socket through
648 * source port, and then find md5 key through listening socket.
649 * we are not loose security here:
650 * Incoming packet is checked with md5 hash with finding key,
651 * no RST generated if md5 hash doesn't match.
653 sk1 = __inet_lookup_listener(dev_net(skb_dst(skb)->dev),
654 &tcp_hashinfo, ip_hdr(skb)->daddr,
655 ntohs(th->source), inet_iif(skb));
656 /* don't send rst if it can't find key */
657 if (!sk1)
658 return;
659 rcu_read_lock();
660 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
661 &ip_hdr(skb)->saddr, AF_INET);
662 if (!key)
663 goto release_sk1;
665 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, NULL, skb);
666 if (genhash || memcmp(hash_location, newhash, 16) != 0)
667 goto release_sk1;
668 } else {
669 key = sk ? tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
670 &ip_hdr(skb)->saddr,
671 AF_INET) : NULL;
674 if (key) {
675 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
676 (TCPOPT_NOP << 16) |
677 (TCPOPT_MD5SIG << 8) |
678 TCPOLEN_MD5SIG);
679 /* Update length and the length the header thinks exists */
680 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
681 rep.th.doff = arg.iov[0].iov_len / 4;
683 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
684 key, ip_hdr(skb)->saddr,
685 ip_hdr(skb)->daddr, &rep.th);
687 #endif
688 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
689 ip_hdr(skb)->saddr, /* XXX */
690 arg.iov[0].iov_len, IPPROTO_TCP, 0);
691 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
692 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
693 /* When socket is gone, all binding information is lost.
694 * routing might fail in this case. using iif for oif to
695 * make sure we can deliver it
697 arg.bound_dev_if = sk ? sk->sk_bound_dev_if : inet_iif(skb);
699 net = dev_net(skb_dst(skb)->dev);
700 arg.tos = ip_hdr(skb)->tos;
701 ip_send_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr,
702 &arg, arg.iov[0].iov_len);
704 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
705 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
707 #ifdef CONFIG_TCP_MD5SIG
708 release_sk1:
709 if (sk1) {
710 rcu_read_unlock();
711 sock_put(sk1);
713 #endif
716 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
717 outside socket context is ugly, certainly. What can I do?
720 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
721 u32 win, u32 ts, int oif,
722 struct tcp_md5sig_key *key,
723 int reply_flags, u8 tos)
725 const struct tcphdr *th = tcp_hdr(skb);
726 struct {
727 struct tcphdr th;
728 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
729 #ifdef CONFIG_TCP_MD5SIG
730 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
731 #endif
733 } rep;
734 struct ip_reply_arg arg;
735 struct net *net = dev_net(skb_dst(skb)->dev);
737 memset(&rep.th, 0, sizeof(struct tcphdr));
738 memset(&arg, 0, sizeof(arg));
740 arg.iov[0].iov_base = (unsigned char *)&rep;
741 arg.iov[0].iov_len = sizeof(rep.th);
742 if (ts) {
743 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
744 (TCPOPT_TIMESTAMP << 8) |
745 TCPOLEN_TIMESTAMP);
746 rep.opt[1] = htonl(tcp_time_stamp);
747 rep.opt[2] = htonl(ts);
748 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
751 /* Swap the send and the receive. */
752 rep.th.dest = th->source;
753 rep.th.source = th->dest;
754 rep.th.doff = arg.iov[0].iov_len / 4;
755 rep.th.seq = htonl(seq);
756 rep.th.ack_seq = htonl(ack);
757 rep.th.ack = 1;
758 rep.th.window = htons(win);
760 #ifdef CONFIG_TCP_MD5SIG
761 if (key) {
762 int offset = (ts) ? 3 : 0;
764 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
765 (TCPOPT_NOP << 16) |
766 (TCPOPT_MD5SIG << 8) |
767 TCPOLEN_MD5SIG);
768 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
769 rep.th.doff = arg.iov[0].iov_len/4;
771 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
772 key, ip_hdr(skb)->saddr,
773 ip_hdr(skb)->daddr, &rep.th);
775 #endif
776 arg.flags = reply_flags;
777 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
778 ip_hdr(skb)->saddr, /* XXX */
779 arg.iov[0].iov_len, IPPROTO_TCP, 0);
780 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
781 if (oif)
782 arg.bound_dev_if = oif;
783 arg.tos = tos;
784 ip_send_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr,
785 &arg, arg.iov[0].iov_len);
787 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
790 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
792 struct inet_timewait_sock *tw = inet_twsk(sk);
793 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
795 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
796 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
797 tcptw->tw_ts_recent,
798 tw->tw_bound_dev_if,
799 tcp_twsk_md5_key(tcptw),
800 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
801 tw->tw_tos
804 inet_twsk_put(tw);
807 static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
808 struct request_sock *req)
810 tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1,
811 tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd,
812 req->ts_recent,
814 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
815 AF_INET),
816 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
817 ip_hdr(skb)->tos);
821 * Send a SYN-ACK after having received a SYN.
822 * This still operates on a request_sock only, not on a big
823 * socket.
825 static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
826 struct request_sock *req,
827 struct request_values *rvp)
829 const struct inet_request_sock *ireq = inet_rsk(req);
830 struct flowi4 fl4;
831 int err = -1;
832 struct sk_buff * skb;
834 /* First, grab a route. */
835 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
836 return -1;
838 skb = tcp_make_synack(sk, dst, req, rvp);
840 if (skb) {
841 __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr);
843 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
844 ireq->rmt_addr,
845 ireq->opt);
846 err = net_xmit_eval(err);
849 dst_release(dst);
850 return err;
853 static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req,
854 struct request_values *rvp)
856 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
857 return tcp_v4_send_synack(sk, NULL, req, rvp);
861 * IPv4 request_sock destructor.
863 static void tcp_v4_reqsk_destructor(struct request_sock *req)
865 kfree(inet_rsk(req)->opt);
869 * Return 1 if a syncookie should be sent
871 int tcp_syn_flood_action(struct sock *sk,
872 const struct sk_buff *skb,
873 const char *proto)
875 const char *msg = "Dropping request";
876 int want_cookie = 0;
877 struct listen_sock *lopt;
881 #ifdef CONFIG_SYN_COOKIES
882 if (sysctl_tcp_syncookies) {
883 msg = "Sending cookies";
884 want_cookie = 1;
885 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
886 } else
887 #endif
888 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
890 lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
891 if (!lopt->synflood_warned) {
892 lopt->synflood_warned = 1;
893 pr_info("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n",
894 proto, ntohs(tcp_hdr(skb)->dest), msg);
896 return want_cookie;
898 EXPORT_SYMBOL(tcp_syn_flood_action);
901 * Save and compile IPv4 options into the request_sock if needed.
903 static struct ip_options_rcu *tcp_v4_save_options(struct sock *sk,
904 struct sk_buff *skb)
906 const struct ip_options *opt = &(IPCB(skb)->opt);
907 struct ip_options_rcu *dopt = NULL;
909 if (opt && opt->optlen) {
910 int opt_size = sizeof(*dopt) + opt->optlen;
912 dopt = kmalloc(opt_size, GFP_ATOMIC);
913 if (dopt) {
914 if (ip_options_echo(&dopt->opt, skb)) {
915 kfree(dopt);
916 dopt = NULL;
920 return dopt;
923 #ifdef CONFIG_TCP_MD5SIG
925 * RFC2385 MD5 checksumming requires a mapping of
926 * IP address->MD5 Key.
927 * We need to maintain these in the sk structure.
930 /* Find the Key structure for an address. */
931 struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
932 const union tcp_md5_addr *addr,
933 int family)
935 struct tcp_sock *tp = tcp_sk(sk);
936 struct tcp_md5sig_key *key;
937 struct hlist_node *pos;
938 unsigned int size = sizeof(struct in_addr);
939 struct tcp_md5sig_info *md5sig;
941 /* caller either holds rcu_read_lock() or socket lock */
942 md5sig = rcu_dereference_check(tp->md5sig_info,
943 sock_owned_by_user(sk) ||
944 lockdep_is_held(&sk->sk_lock.slock));
945 if (!md5sig)
946 return NULL;
947 #if IS_ENABLED(CONFIG_IPV6)
948 if (family == AF_INET6)
949 size = sizeof(struct in6_addr);
950 #endif
951 hlist_for_each_entry_rcu(key, pos, &md5sig->head, node) {
952 if (key->family != family)
953 continue;
954 if (!memcmp(&key->addr, addr, size))
955 return key;
957 return NULL;
959 EXPORT_SYMBOL(tcp_md5_do_lookup);
961 struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
962 struct sock *addr_sk)
964 union tcp_md5_addr *addr;
966 addr = (union tcp_md5_addr *)&inet_sk(addr_sk)->inet_daddr;
967 return tcp_md5_do_lookup(sk, addr, AF_INET);
969 EXPORT_SYMBOL(tcp_v4_md5_lookup);
971 static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
972 struct request_sock *req)
974 union tcp_md5_addr *addr;
976 addr = (union tcp_md5_addr *)&inet_rsk(req)->rmt_addr;
977 return tcp_md5_do_lookup(sk, addr, AF_INET);
980 /* This can be called on a newly created socket, from other files */
981 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
982 int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
984 /* Add Key to the list */
985 struct tcp_md5sig_key *key;
986 struct tcp_sock *tp = tcp_sk(sk);
987 struct tcp_md5sig_info *md5sig;
989 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET);
990 if (key) {
991 /* Pre-existing entry - just update that one. */
992 memcpy(key->key, newkey, newkeylen);
993 key->keylen = newkeylen;
994 return 0;
997 md5sig = rcu_dereference_protected(tp->md5sig_info,
998 sock_owned_by_user(sk));
999 if (!md5sig) {
1000 md5sig = kmalloc(sizeof(*md5sig), gfp);
1001 if (!md5sig)
1002 return -ENOMEM;
1004 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1005 INIT_HLIST_HEAD(&md5sig->head);
1006 rcu_assign_pointer(tp->md5sig_info, md5sig);
1009 key = sock_kmalloc(sk, sizeof(*key), gfp);
1010 if (!key)
1011 return -ENOMEM;
1012 if (hlist_empty(&md5sig->head) && !tcp_alloc_md5sig_pool(sk)) {
1013 sock_kfree_s(sk, key, sizeof(*key));
1014 return -ENOMEM;
1017 memcpy(key->key, newkey, newkeylen);
1018 key->keylen = newkeylen;
1019 key->family = family;
1020 memcpy(&key->addr, addr,
1021 (family == AF_INET6) ? sizeof(struct in6_addr) :
1022 sizeof(struct in_addr));
1023 hlist_add_head_rcu(&key->node, &md5sig->head);
1024 return 0;
1026 EXPORT_SYMBOL(tcp_md5_do_add);
1028 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
1030 struct tcp_sock *tp = tcp_sk(sk);
1031 struct tcp_md5sig_key *key;
1032 struct tcp_md5sig_info *md5sig;
1034 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET);
1035 if (!key)
1036 return -ENOENT;
1037 hlist_del_rcu(&key->node);
1038 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1039 kfree_rcu(key, rcu);
1040 md5sig = rcu_dereference_protected(tp->md5sig_info,
1041 sock_owned_by_user(sk));
1042 if (hlist_empty(&md5sig->head))
1043 tcp_free_md5sig_pool();
1044 return 0;
1046 EXPORT_SYMBOL(tcp_md5_do_del);
1048 void tcp_clear_md5_list(struct sock *sk)
1050 struct tcp_sock *tp = tcp_sk(sk);
1051 struct tcp_md5sig_key *key;
1052 struct hlist_node *pos, *n;
1053 struct tcp_md5sig_info *md5sig;
1055 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1057 if (!hlist_empty(&md5sig->head))
1058 tcp_free_md5sig_pool();
1059 hlist_for_each_entry_safe(key, pos, n, &md5sig->head, node) {
1060 hlist_del_rcu(&key->node);
1061 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1062 kfree_rcu(key, rcu);
1066 static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
1067 int optlen)
1069 struct tcp_md5sig cmd;
1070 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1072 if (optlen < sizeof(cmd))
1073 return -EINVAL;
1075 if (copy_from_user(&cmd, optval, sizeof(cmd)))
1076 return -EFAULT;
1078 if (sin->sin_family != AF_INET)
1079 return -EINVAL;
1081 if (!cmd.tcpm_key || !cmd.tcpm_keylen)
1082 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1083 AF_INET);
1085 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1086 return -EINVAL;
1088 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1089 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1090 GFP_KERNEL);
1093 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1094 __be32 daddr, __be32 saddr, int nbytes)
1096 struct tcp4_pseudohdr *bp;
1097 struct scatterlist sg;
1099 bp = &hp->md5_blk.ip4;
1102 * 1. the TCP pseudo-header (in the order: source IP address,
1103 * destination IP address, zero-padded protocol number, and
1104 * segment length)
1106 bp->saddr = saddr;
1107 bp->daddr = daddr;
1108 bp->pad = 0;
1109 bp->protocol = IPPROTO_TCP;
1110 bp->len = cpu_to_be16(nbytes);
1112 sg_init_one(&sg, bp, sizeof(*bp));
1113 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1116 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1117 __be32 daddr, __be32 saddr, const struct tcphdr *th)
1119 struct tcp_md5sig_pool *hp;
1120 struct hash_desc *desc;
1122 hp = tcp_get_md5sig_pool();
1123 if (!hp)
1124 goto clear_hash_noput;
1125 desc = &hp->md5_desc;
1127 if (crypto_hash_init(desc))
1128 goto clear_hash;
1129 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1130 goto clear_hash;
1131 if (tcp_md5_hash_header(hp, th))
1132 goto clear_hash;
1133 if (tcp_md5_hash_key(hp, key))
1134 goto clear_hash;
1135 if (crypto_hash_final(desc, md5_hash))
1136 goto clear_hash;
1138 tcp_put_md5sig_pool();
1139 return 0;
1141 clear_hash:
1142 tcp_put_md5sig_pool();
1143 clear_hash_noput:
1144 memset(md5_hash, 0, 16);
1145 return 1;
1148 int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1149 const struct sock *sk, const struct request_sock *req,
1150 const struct sk_buff *skb)
1152 struct tcp_md5sig_pool *hp;
1153 struct hash_desc *desc;
1154 const struct tcphdr *th = tcp_hdr(skb);
1155 __be32 saddr, daddr;
1157 if (sk) {
1158 saddr = inet_sk(sk)->inet_saddr;
1159 daddr = inet_sk(sk)->inet_daddr;
1160 } else if (req) {
1161 saddr = inet_rsk(req)->loc_addr;
1162 daddr = inet_rsk(req)->rmt_addr;
1163 } else {
1164 const struct iphdr *iph = ip_hdr(skb);
1165 saddr = iph->saddr;
1166 daddr = iph->daddr;
1169 hp = tcp_get_md5sig_pool();
1170 if (!hp)
1171 goto clear_hash_noput;
1172 desc = &hp->md5_desc;
1174 if (crypto_hash_init(desc))
1175 goto clear_hash;
1177 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1178 goto clear_hash;
1179 if (tcp_md5_hash_header(hp, th))
1180 goto clear_hash;
1181 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1182 goto clear_hash;
1183 if (tcp_md5_hash_key(hp, key))
1184 goto clear_hash;
1185 if (crypto_hash_final(desc, md5_hash))
1186 goto clear_hash;
1188 tcp_put_md5sig_pool();
1189 return 0;
1191 clear_hash:
1192 tcp_put_md5sig_pool();
1193 clear_hash_noput:
1194 memset(md5_hash, 0, 16);
1195 return 1;
1197 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1199 static int tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
1202 * This gets called for each TCP segment that arrives
1203 * so we want to be efficient.
1204 * We have 3 drop cases:
1205 * o No MD5 hash and one expected.
1206 * o MD5 hash and we're not expecting one.
1207 * o MD5 hash and its wrong.
1209 const __u8 *hash_location = NULL;
1210 struct tcp_md5sig_key *hash_expected;
1211 const struct iphdr *iph = ip_hdr(skb);
1212 const struct tcphdr *th = tcp_hdr(skb);
1213 int genhash;
1214 unsigned char newhash[16];
1216 hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1217 AF_INET);
1218 hash_location = tcp_parse_md5sig_option(th);
1220 /* We've parsed the options - do we have a hash? */
1221 if (!hash_expected && !hash_location)
1222 return 0;
1224 if (hash_expected && !hash_location) {
1225 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1226 return 1;
1229 if (!hash_expected && hash_location) {
1230 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1231 return 1;
1234 /* Okay, so this is hash_expected and hash_location -
1235 * so we need to calculate the checksum.
1237 genhash = tcp_v4_md5_hash_skb(newhash,
1238 hash_expected,
1239 NULL, NULL, skb);
1241 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1242 if (net_ratelimit()) {
1243 pr_info("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1244 &iph->saddr, ntohs(th->source),
1245 &iph->daddr, ntohs(th->dest),
1246 genhash ? " tcp_v4_calc_md5_hash failed" : "");
1248 return 1;
1250 return 0;
1253 #endif
1255 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1256 .family = PF_INET,
1257 .obj_size = sizeof(struct tcp_request_sock),
1258 .rtx_syn_ack = tcp_v4_rtx_synack,
1259 .send_ack = tcp_v4_reqsk_send_ack,
1260 .destructor = tcp_v4_reqsk_destructor,
1261 .send_reset = tcp_v4_send_reset,
1262 .syn_ack_timeout = tcp_syn_ack_timeout,
1265 #ifdef CONFIG_TCP_MD5SIG
1266 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1267 .md5_lookup = tcp_v4_reqsk_md5_lookup,
1268 .calc_md5_hash = tcp_v4_md5_hash_skb,
1270 #endif
1272 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1274 struct tcp_extend_values tmp_ext;
1275 struct tcp_options_received tmp_opt;
1276 const u8 *hash_location;
1277 struct request_sock *req;
1278 struct inet_request_sock *ireq;
1279 struct tcp_sock *tp = tcp_sk(sk);
1280 struct dst_entry *dst = NULL;
1281 __be32 saddr = ip_hdr(skb)->saddr;
1282 __be32 daddr = ip_hdr(skb)->daddr;
1283 __u32 isn = TCP_SKB_CB(skb)->when;
1284 int want_cookie = 0;
1286 /* Never answer to SYNs send to broadcast or multicast */
1287 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1288 goto drop;
1290 /* TW buckets are converted to open requests without
1291 * limitations, they conserve resources and peer is
1292 * evidently real one.
1294 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1295 want_cookie = tcp_syn_flood_action(sk, skb, "TCP");
1296 if (!want_cookie)
1297 goto drop;
1300 /* Accept backlog is full. If we have already queued enough
1301 * of warm entries in syn queue, drop request. It is better than
1302 * clogging syn queue with openreqs with exponentially increasing
1303 * timeout.
1305 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1306 goto drop;
1308 req = inet_reqsk_alloc(&tcp_request_sock_ops);
1309 if (!req)
1310 goto drop;
1312 #ifdef CONFIG_TCP_MD5SIG
1313 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
1314 #endif
1316 tcp_clear_options(&tmp_opt);
1317 tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
1318 tmp_opt.user_mss = tp->rx_opt.user_mss;
1319 tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
1321 if (tmp_opt.cookie_plus > 0 &&
1322 tmp_opt.saw_tstamp &&
1323 !tp->rx_opt.cookie_out_never &&
1324 (sysctl_tcp_cookie_size > 0 ||
1325 (tp->cookie_values != NULL &&
1326 tp->cookie_values->cookie_desired > 0))) {
1327 u8 *c;
1328 u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1329 int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1331 if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1332 goto drop_and_release;
1334 /* Secret recipe starts with IP addresses */
1335 *mess++ ^= (__force u32)daddr;
1336 *mess++ ^= (__force u32)saddr;
1338 /* plus variable length Initiator Cookie */
1339 c = (u8 *)mess;
1340 while (l-- > 0)
1341 *c++ ^= *hash_location++;
1343 want_cookie = 0; /* not our kind of cookie */
1344 tmp_ext.cookie_out_never = 0; /* false */
1345 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1346 } else if (!tp->rx_opt.cookie_in_always) {
1347 /* redundant indications, but ensure initialization. */
1348 tmp_ext.cookie_out_never = 1; /* true */
1349 tmp_ext.cookie_plus = 0;
1350 } else {
1351 goto drop_and_release;
1353 tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1355 if (want_cookie && !tmp_opt.saw_tstamp)
1356 tcp_clear_options(&tmp_opt);
1358 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1359 tcp_openreq_init(req, &tmp_opt, skb);
1361 ireq = inet_rsk(req);
1362 ireq->loc_addr = daddr;
1363 ireq->rmt_addr = saddr;
1364 ireq->no_srccheck = inet_sk(sk)->transparent;
1365 ireq->opt = tcp_v4_save_options(sk, skb);
1367 if (security_inet_conn_request(sk, skb, req))
1368 goto drop_and_free;
1370 if (!want_cookie || tmp_opt.tstamp_ok)
1371 TCP_ECN_create_request(req, tcp_hdr(skb));
1373 if (want_cookie) {
1374 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
1375 req->cookie_ts = tmp_opt.tstamp_ok;
1376 } else if (!isn) {
1377 struct inet_peer *peer = NULL;
1378 struct flowi4 fl4;
1380 /* VJ's idea. We save last timestamp seen
1381 * from the destination in peer table, when entering
1382 * state TIME-WAIT, and check against it before
1383 * accepting new connection request.
1385 * If "isn" is not zero, this request hit alive
1386 * timewait bucket, so that all the necessary checks
1387 * are made in the function processing timewait state.
1389 if (tmp_opt.saw_tstamp &&
1390 tcp_death_row.sysctl_tw_recycle &&
1391 (dst = inet_csk_route_req(sk, &fl4, req)) != NULL &&
1392 fl4.daddr == saddr &&
1393 (peer = rt_get_peer((struct rtable *)dst, fl4.daddr)) != NULL) {
1394 inet_peer_refcheck(peer);
1395 if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
1396 (s32)(peer->tcp_ts - req->ts_recent) >
1397 TCP_PAWS_WINDOW) {
1398 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1399 goto drop_and_release;
1402 /* Kill the following clause, if you dislike this way. */
1403 else if (!sysctl_tcp_syncookies &&
1404 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1405 (sysctl_max_syn_backlog >> 2)) &&
1406 (!peer || !peer->tcp_ts_stamp) &&
1407 (!dst || !dst_metric(dst, RTAX_RTT))) {
1408 /* Without syncookies last quarter of
1409 * backlog is filled with destinations,
1410 * proven to be alive.
1411 * It means that we continue to communicate
1412 * to destinations, already remembered
1413 * to the moment of synflood.
1415 LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("drop open request from %pI4/%u\n"),
1416 &saddr, ntohs(tcp_hdr(skb)->source));
1417 goto drop_and_release;
1420 isn = tcp_v4_init_sequence(skb);
1422 tcp_rsk(req)->snt_isn = isn;
1423 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1425 if (tcp_v4_send_synack(sk, dst, req,
1426 (struct request_values *)&tmp_ext) ||
1427 want_cookie)
1428 goto drop_and_free;
1430 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1431 return 0;
1433 drop_and_release:
1434 dst_release(dst);
1435 drop_and_free:
1436 reqsk_free(req);
1437 drop:
1438 return 0;
1440 EXPORT_SYMBOL(tcp_v4_conn_request);
1444 * The three way handshake has completed - we got a valid synack -
1445 * now create the new socket.
1447 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1448 struct request_sock *req,
1449 struct dst_entry *dst)
1451 struct inet_request_sock *ireq;
1452 struct inet_sock *newinet;
1453 struct tcp_sock *newtp;
1454 struct sock *newsk;
1455 #ifdef CONFIG_TCP_MD5SIG
1456 struct tcp_md5sig_key *key;
1457 #endif
1458 struct ip_options_rcu *inet_opt;
1460 if (sk_acceptq_is_full(sk))
1461 goto exit_overflow;
1463 newsk = tcp_create_openreq_child(sk, req, skb);
1464 if (!newsk)
1465 goto exit_nonewsk;
1467 newsk->sk_gso_type = SKB_GSO_TCPV4;
1469 newtp = tcp_sk(newsk);
1470 newinet = inet_sk(newsk);
1471 ireq = inet_rsk(req);
1472 newinet->inet_daddr = ireq->rmt_addr;
1473 newinet->inet_rcv_saddr = ireq->loc_addr;
1474 newinet->inet_saddr = ireq->loc_addr;
1475 inet_opt = ireq->opt;
1476 rcu_assign_pointer(newinet->inet_opt, inet_opt);
1477 ireq->opt = NULL;
1478 newinet->mc_index = inet_iif(skb);
1479 newinet->mc_ttl = ip_hdr(skb)->ttl;
1480 newinet->rcv_tos = ip_hdr(skb)->tos;
1481 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1482 if (inet_opt)
1483 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1484 newinet->inet_id = newtp->write_seq ^ jiffies;
1486 if (!dst) {
1487 dst = inet_csk_route_child_sock(sk, newsk, req);
1488 if (!dst)
1489 goto put_and_exit;
1490 } else {
1491 /* syncookie case : see end of cookie_v4_check() */
1493 sk_setup_caps(newsk, dst);
1495 tcp_mtup_init(newsk);
1496 tcp_sync_mss(newsk, dst_mtu(dst));
1497 newtp->advmss = dst_metric_advmss(dst);
1498 if (tcp_sk(sk)->rx_opt.user_mss &&
1499 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1500 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1502 tcp_initialize_rcv_mss(newsk);
1503 if (tcp_rsk(req)->snt_synack)
1504 tcp_valid_rtt_meas(newsk,
1505 tcp_time_stamp - tcp_rsk(req)->snt_synack);
1506 newtp->total_retrans = req->retrans;
1508 #ifdef CONFIG_TCP_MD5SIG
1509 /* Copy over the MD5 key from the original socket */
1510 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1511 AF_INET);
1512 if (key != NULL) {
1514 * We're using one, so create a matching key
1515 * on the newsk structure. If we fail to get
1516 * memory, then we end up not copying the key
1517 * across. Shucks.
1519 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1520 AF_INET, key->key, key->keylen, GFP_ATOMIC);
1521 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1523 #endif
1525 if (__inet_inherit_port(sk, newsk) < 0)
1526 goto put_and_exit;
1527 __inet_hash_nolisten(newsk, NULL);
1529 return newsk;
1531 exit_overflow:
1532 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1533 exit_nonewsk:
1534 dst_release(dst);
1535 exit:
1536 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1537 return NULL;
1538 put_and_exit:
1539 tcp_clear_xmit_timers(newsk);
1540 tcp_cleanup_congestion_control(newsk);
1541 bh_unlock_sock(newsk);
1542 sock_put(newsk);
1543 goto exit;
1545 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1547 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1549 struct tcphdr *th = tcp_hdr(skb);
1550 const struct iphdr *iph = ip_hdr(skb);
1551 struct sock *nsk;
1552 struct request_sock **prev;
1553 /* Find possible connection requests. */
1554 struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
1555 iph->saddr, iph->daddr);
1556 if (req)
1557 return tcp_check_req(sk, skb, req, prev);
1559 nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
1560 th->source, iph->daddr, th->dest, inet_iif(skb));
1562 if (nsk) {
1563 if (nsk->sk_state != TCP_TIME_WAIT) {
1564 bh_lock_sock(nsk);
1565 return nsk;
1567 inet_twsk_put(inet_twsk(nsk));
1568 return NULL;
1571 #ifdef CONFIG_SYN_COOKIES
1572 if (!th->syn)
1573 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1574 #endif
1575 return sk;
1578 static __sum16 tcp_v4_checksum_init(struct sk_buff *skb)
1580 const struct iphdr *iph = ip_hdr(skb);
1582 if (skb->ip_summed == CHECKSUM_COMPLETE) {
1583 if (!tcp_v4_check(skb->len, iph->saddr,
1584 iph->daddr, skb->csum)) {
1585 skb->ip_summed = CHECKSUM_UNNECESSARY;
1586 return 0;
1590 skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
1591 skb->len, IPPROTO_TCP, 0);
1593 if (skb->len <= 76) {
1594 return __skb_checksum_complete(skb);
1596 return 0;
1600 /* The socket must have it's spinlock held when we get
1601 * here.
1603 * We have a potential double-lock case here, so even when
1604 * doing backlog processing we use the BH locking scheme.
1605 * This is because we cannot sleep with the original spinlock
1606 * held.
1608 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1610 struct sock *rsk;
1611 #ifdef CONFIG_TCP_MD5SIG
1613 * We really want to reject the packet as early as possible
1614 * if:
1615 * o We're expecting an MD5'd packet and this is no MD5 tcp option
1616 * o There is an MD5 option and we're not expecting one
1618 if (tcp_v4_inbound_md5_hash(sk, skb))
1619 goto discard;
1620 #endif
1622 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1623 sock_rps_save_rxhash(sk, skb);
1624 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
1625 rsk = sk;
1626 goto reset;
1628 return 0;
1631 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1632 goto csum_err;
1634 if (sk->sk_state == TCP_LISTEN) {
1635 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1636 if (!nsk)
1637 goto discard;
1639 if (nsk != sk) {
1640 sock_rps_save_rxhash(nsk, skb);
1641 if (tcp_child_process(sk, nsk, skb)) {
1642 rsk = nsk;
1643 goto reset;
1645 return 0;
1647 } else
1648 sock_rps_save_rxhash(sk, skb);
1650 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
1651 rsk = sk;
1652 goto reset;
1654 return 0;
1656 reset:
1657 tcp_v4_send_reset(rsk, skb);
1658 discard:
1659 kfree_skb(skb);
1660 /* Be careful here. If this function gets more complicated and
1661 * gcc suffers from register pressure on the x86, sk (in %ebx)
1662 * might be destroyed here. This current version compiles correctly,
1663 * but you have been warned.
1665 return 0;
1667 csum_err:
1668 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1669 goto discard;
1671 EXPORT_SYMBOL(tcp_v4_do_rcv);
1674 * From tcp_input.c
1677 int tcp_v4_rcv(struct sk_buff *skb)
1679 const struct iphdr *iph;
1680 const struct tcphdr *th;
1681 struct sock *sk;
1682 int ret;
1683 struct net *net = dev_net(skb->dev);
1685 if (skb->pkt_type != PACKET_HOST)
1686 goto discard_it;
1688 /* Count it even if it's bad */
1689 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1691 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1692 goto discard_it;
1694 th = tcp_hdr(skb);
1696 if (th->doff < sizeof(struct tcphdr) / 4)
1697 goto bad_packet;
1698 if (!pskb_may_pull(skb, th->doff * 4))
1699 goto discard_it;
1701 /* An explanation is required here, I think.
1702 * Packet length and doff are validated by header prediction,
1703 * provided case of th->doff==0 is eliminated.
1704 * So, we defer the checks. */
1705 if (!skb_csum_unnecessary(skb) && tcp_v4_checksum_init(skb))
1706 goto bad_packet;
1708 th = tcp_hdr(skb);
1709 iph = ip_hdr(skb);
1710 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1711 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1712 skb->len - th->doff * 4);
1713 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1714 TCP_SKB_CB(skb)->when = 0;
1715 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1716 TCP_SKB_CB(skb)->sacked = 0;
1718 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1719 if (!sk)
1720 goto no_tcp_socket;
1722 process:
1723 if (sk->sk_state == TCP_TIME_WAIT)
1724 goto do_time_wait;
1726 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1727 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1728 goto discard_and_relse;
1731 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1732 goto discard_and_relse;
1733 nf_reset(skb);
1735 if (sk_filter(sk, skb))
1736 goto discard_and_relse;
1738 skb->dev = NULL;
1740 bh_lock_sock_nested(sk);
1741 ret = 0;
1742 if (!sock_owned_by_user(sk)) {
1743 #ifdef CONFIG_NET_DMA
1744 struct tcp_sock *tp = tcp_sk(sk);
1745 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1746 tp->ucopy.dma_chan = net_dma_find_channel();
1747 if (tp->ucopy.dma_chan)
1748 ret = tcp_v4_do_rcv(sk, skb);
1749 else
1750 #endif
1752 if (!tcp_prequeue(sk, skb))
1753 ret = tcp_v4_do_rcv(sk, skb);
1755 } else if (unlikely(sk_add_backlog(sk, skb,
1756 sk->sk_rcvbuf + sk->sk_sndbuf))) {
1757 bh_unlock_sock(sk);
1758 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1759 goto discard_and_relse;
1761 bh_unlock_sock(sk);
1763 sock_put(sk);
1765 return ret;
1767 no_tcp_socket:
1768 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1769 goto discard_it;
1771 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1772 bad_packet:
1773 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1774 } else {
1775 tcp_v4_send_reset(NULL, skb);
1778 discard_it:
1779 /* Discard frame. */
1780 kfree_skb(skb);
1781 return 0;
1783 discard_and_relse:
1784 sock_put(sk);
1785 goto discard_it;
1787 do_time_wait:
1788 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1789 inet_twsk_put(inet_twsk(sk));
1790 goto discard_it;
1793 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1794 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1795 inet_twsk_put(inet_twsk(sk));
1796 goto discard_it;
1798 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1799 case TCP_TW_SYN: {
1800 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1801 &tcp_hashinfo,
1802 iph->daddr, th->dest,
1803 inet_iif(skb));
1804 if (sk2) {
1805 inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
1806 inet_twsk_put(inet_twsk(sk));
1807 sk = sk2;
1808 goto process;
1810 /* Fall through to ACK */
1812 case TCP_TW_ACK:
1813 tcp_v4_timewait_ack(sk, skb);
1814 break;
1815 case TCP_TW_RST:
1816 goto no_tcp_socket;
1817 case TCP_TW_SUCCESS:;
1819 goto discard_it;
1822 struct inet_peer *tcp_v4_get_peer(struct sock *sk, bool *release_it)
1824 struct rtable *rt = (struct rtable *) __sk_dst_get(sk);
1825 struct inet_sock *inet = inet_sk(sk);
1826 struct inet_peer *peer;
1828 if (!rt ||
1829 inet->cork.fl.u.ip4.daddr != inet->inet_daddr) {
1830 peer = inet_getpeer_v4(inet->inet_daddr, 1);
1831 *release_it = true;
1832 } else {
1833 if (!rt->peer)
1834 rt_bind_peer(rt, inet->inet_daddr, 1);
1835 peer = rt->peer;
1836 *release_it = false;
1839 return peer;
1841 EXPORT_SYMBOL(tcp_v4_get_peer);
1843 void *tcp_v4_tw_get_peer(struct sock *sk)
1845 const struct inet_timewait_sock *tw = inet_twsk(sk);
1847 return inet_getpeer_v4(tw->tw_daddr, 1);
1849 EXPORT_SYMBOL(tcp_v4_tw_get_peer);
1851 static struct timewait_sock_ops tcp_timewait_sock_ops = {
1852 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1853 .twsk_unique = tcp_twsk_unique,
1854 .twsk_destructor= tcp_twsk_destructor,
1855 .twsk_getpeer = tcp_v4_tw_get_peer,
1858 const struct inet_connection_sock_af_ops ipv4_specific = {
1859 .queue_xmit = ip_queue_xmit,
1860 .send_check = tcp_v4_send_check,
1861 .rebuild_header = inet_sk_rebuild_header,
1862 .conn_request = tcp_v4_conn_request,
1863 .syn_recv_sock = tcp_v4_syn_recv_sock,
1864 .get_peer = tcp_v4_get_peer,
1865 .net_header_len = sizeof(struct iphdr),
1866 .setsockopt = ip_setsockopt,
1867 .getsockopt = ip_getsockopt,
1868 .addr2sockaddr = inet_csk_addr2sockaddr,
1869 .sockaddr_len = sizeof(struct sockaddr_in),
1870 .bind_conflict = inet_csk_bind_conflict,
1871 #ifdef CONFIG_COMPAT
1872 .compat_setsockopt = compat_ip_setsockopt,
1873 .compat_getsockopt = compat_ip_getsockopt,
1874 #endif
1876 EXPORT_SYMBOL(ipv4_specific);
1878 #ifdef CONFIG_TCP_MD5SIG
1879 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1880 .md5_lookup = tcp_v4_md5_lookup,
1881 .calc_md5_hash = tcp_v4_md5_hash_skb,
1882 .md5_parse = tcp_v4_parse_md5_keys,
1884 #endif
1886 /* NOTE: A lot of things set to zero explicitly by call to
1887 * sk_alloc() so need not be done here.
1889 static int tcp_v4_init_sock(struct sock *sk)
1891 struct inet_connection_sock *icsk = inet_csk(sk);
1893 tcp_init_sock(sk);
1895 icsk->icsk_af_ops = &ipv4_specific;
1897 #ifdef CONFIG_TCP_MD5SIG
1898 tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
1899 #endif
1901 return 0;
1904 void tcp_v4_destroy_sock(struct sock *sk)
1906 struct tcp_sock *tp = tcp_sk(sk);
1908 tcp_clear_xmit_timers(sk);
1910 tcp_cleanup_congestion_control(sk);
1912 /* Cleanup up the write buffer. */
1913 tcp_write_queue_purge(sk);
1915 /* Cleans up our, hopefully empty, out_of_order_queue. */
1916 __skb_queue_purge(&tp->out_of_order_queue);
1918 #ifdef CONFIG_TCP_MD5SIG
1919 /* Clean up the MD5 key list, if any */
1920 if (tp->md5sig_info) {
1921 tcp_clear_md5_list(sk);
1922 kfree_rcu(tp->md5sig_info, rcu);
1923 tp->md5sig_info = NULL;
1925 #endif
1927 #ifdef CONFIG_NET_DMA
1928 /* Cleans up our sk_async_wait_queue */
1929 __skb_queue_purge(&sk->sk_async_wait_queue);
1930 #endif
1932 /* Clean prequeue, it must be empty really */
1933 __skb_queue_purge(&tp->ucopy.prequeue);
1935 /* Clean up a referenced TCP bind bucket. */
1936 if (inet_csk(sk)->icsk_bind_hash)
1937 inet_put_port(sk);
1940 * If sendmsg cached page exists, toss it.
1942 if (sk->sk_sndmsg_page) {
1943 __free_page(sk->sk_sndmsg_page);
1944 sk->sk_sndmsg_page = NULL;
1947 /* TCP Cookie Transactions */
1948 if (tp->cookie_values != NULL) {
1949 kref_put(&tp->cookie_values->kref,
1950 tcp_cookie_values_release);
1951 tp->cookie_values = NULL;
1954 sk_sockets_allocated_dec(sk);
1955 sock_release_memcg(sk);
1957 EXPORT_SYMBOL(tcp_v4_destroy_sock);
1959 #ifdef CONFIG_PROC_FS
1960 /* Proc filesystem TCP sock list dumping. */
1962 static inline struct inet_timewait_sock *tw_head(struct hlist_nulls_head *head)
1964 return hlist_nulls_empty(head) ? NULL :
1965 list_entry(head->first, struct inet_timewait_sock, tw_node);
1968 static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
1970 return !is_a_nulls(tw->tw_node.next) ?
1971 hlist_nulls_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
1975 * Get next listener socket follow cur. If cur is NULL, get first socket
1976 * starting from bucket given in st->bucket; when st->bucket is zero the
1977 * very first socket in the hash table is returned.
1979 static void *listening_get_next(struct seq_file *seq, void *cur)
1981 struct inet_connection_sock *icsk;
1982 struct hlist_nulls_node *node;
1983 struct sock *sk = cur;
1984 struct inet_listen_hashbucket *ilb;
1985 struct tcp_iter_state *st = seq->private;
1986 struct net *net = seq_file_net(seq);
1988 if (!sk) {
1989 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1990 spin_lock_bh(&ilb->lock);
1991 sk = sk_nulls_head(&ilb->head);
1992 st->offset = 0;
1993 goto get_sk;
1995 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1996 ++st->num;
1997 ++st->offset;
1999 if (st->state == TCP_SEQ_STATE_OPENREQ) {
2000 struct request_sock *req = cur;
2002 icsk = inet_csk(st->syn_wait_sk);
2003 req = req->dl_next;
2004 while (1) {
2005 while (req) {
2006 if (req->rsk_ops->family == st->family) {
2007 cur = req;
2008 goto out;
2010 req = req->dl_next;
2012 if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
2013 break;
2014 get_req:
2015 req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
2017 sk = sk_nulls_next(st->syn_wait_sk);
2018 st->state = TCP_SEQ_STATE_LISTENING;
2019 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2020 } else {
2021 icsk = inet_csk(sk);
2022 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2023 if (reqsk_queue_len(&icsk->icsk_accept_queue))
2024 goto start_req;
2025 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2026 sk = sk_nulls_next(sk);
2028 get_sk:
2029 sk_nulls_for_each_from(sk, node) {
2030 if (!net_eq(sock_net(sk), net))
2031 continue;
2032 if (sk->sk_family == st->family) {
2033 cur = sk;
2034 goto out;
2036 icsk = inet_csk(sk);
2037 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2038 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
2039 start_req:
2040 st->uid = sock_i_uid(sk);
2041 st->syn_wait_sk = sk;
2042 st->state = TCP_SEQ_STATE_OPENREQ;
2043 st->sbucket = 0;
2044 goto get_req;
2046 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2048 spin_unlock_bh(&ilb->lock);
2049 st->offset = 0;
2050 if (++st->bucket < INET_LHTABLE_SIZE) {
2051 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2052 spin_lock_bh(&ilb->lock);
2053 sk = sk_nulls_head(&ilb->head);
2054 goto get_sk;
2056 cur = NULL;
2057 out:
2058 return cur;
2061 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2063 struct tcp_iter_state *st = seq->private;
2064 void *rc;
2066 st->bucket = 0;
2067 st->offset = 0;
2068 rc = listening_get_next(seq, NULL);
2070 while (rc && *pos) {
2071 rc = listening_get_next(seq, rc);
2072 --*pos;
2074 return rc;
2077 static inline int empty_bucket(struct tcp_iter_state *st)
2079 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain) &&
2080 hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain);
2084 * Get first established socket starting from bucket given in st->bucket.
2085 * If st->bucket is zero, the very first socket in the hash is returned.
2087 static void *established_get_first(struct seq_file *seq)
2089 struct tcp_iter_state *st = seq->private;
2090 struct net *net = seq_file_net(seq);
2091 void *rc = NULL;
2093 st->offset = 0;
2094 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
2095 struct sock *sk;
2096 struct hlist_nulls_node *node;
2097 struct inet_timewait_sock *tw;
2098 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
2100 /* Lockless fast path for the common case of empty buckets */
2101 if (empty_bucket(st))
2102 continue;
2104 spin_lock_bh(lock);
2105 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
2106 if (sk->sk_family != st->family ||
2107 !net_eq(sock_net(sk), net)) {
2108 continue;
2110 rc = sk;
2111 goto out;
2113 st->state = TCP_SEQ_STATE_TIME_WAIT;
2114 inet_twsk_for_each(tw, node,
2115 &tcp_hashinfo.ehash[st->bucket].twchain) {
2116 if (tw->tw_family != st->family ||
2117 !net_eq(twsk_net(tw), net)) {
2118 continue;
2120 rc = tw;
2121 goto out;
2123 spin_unlock_bh(lock);
2124 st->state = TCP_SEQ_STATE_ESTABLISHED;
2126 out:
2127 return rc;
2130 static void *established_get_next(struct seq_file *seq, void *cur)
2132 struct sock *sk = cur;
2133 struct inet_timewait_sock *tw;
2134 struct hlist_nulls_node *node;
2135 struct tcp_iter_state *st = seq->private;
2136 struct net *net = seq_file_net(seq);
2138 ++st->num;
2139 ++st->offset;
2141 if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
2142 tw = cur;
2143 tw = tw_next(tw);
2144 get_tw:
2145 while (tw && (tw->tw_family != st->family || !net_eq(twsk_net(tw), net))) {
2146 tw = tw_next(tw);
2148 if (tw) {
2149 cur = tw;
2150 goto out;
2152 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2153 st->state = TCP_SEQ_STATE_ESTABLISHED;
2155 /* Look for next non empty bucket */
2156 st->offset = 0;
2157 while (++st->bucket <= tcp_hashinfo.ehash_mask &&
2158 empty_bucket(st))
2160 if (st->bucket > tcp_hashinfo.ehash_mask)
2161 return NULL;
2163 spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2164 sk = sk_nulls_head(&tcp_hashinfo.ehash[st->bucket].chain);
2165 } else
2166 sk = sk_nulls_next(sk);
2168 sk_nulls_for_each_from(sk, node) {
2169 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
2170 goto found;
2173 st->state = TCP_SEQ_STATE_TIME_WAIT;
2174 tw = tw_head(&tcp_hashinfo.ehash[st->bucket].twchain);
2175 goto get_tw;
2176 found:
2177 cur = sk;
2178 out:
2179 return cur;
2182 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2184 struct tcp_iter_state *st = seq->private;
2185 void *rc;
2187 st->bucket = 0;
2188 rc = established_get_first(seq);
2190 while (rc && pos) {
2191 rc = established_get_next(seq, rc);
2192 --pos;
2194 return rc;
2197 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2199 void *rc;
2200 struct tcp_iter_state *st = seq->private;
2202 st->state = TCP_SEQ_STATE_LISTENING;
2203 rc = listening_get_idx(seq, &pos);
2205 if (!rc) {
2206 st->state = TCP_SEQ_STATE_ESTABLISHED;
2207 rc = established_get_idx(seq, pos);
2210 return rc;
2213 static void *tcp_seek_last_pos(struct seq_file *seq)
2215 struct tcp_iter_state *st = seq->private;
2216 int offset = st->offset;
2217 int orig_num = st->num;
2218 void *rc = NULL;
2220 switch (st->state) {
2221 case TCP_SEQ_STATE_OPENREQ:
2222 case TCP_SEQ_STATE_LISTENING:
2223 if (st->bucket >= INET_LHTABLE_SIZE)
2224 break;
2225 st->state = TCP_SEQ_STATE_LISTENING;
2226 rc = listening_get_next(seq, NULL);
2227 while (offset-- && rc)
2228 rc = listening_get_next(seq, rc);
2229 if (rc)
2230 break;
2231 st->bucket = 0;
2232 /* Fallthrough */
2233 case TCP_SEQ_STATE_ESTABLISHED:
2234 case TCP_SEQ_STATE_TIME_WAIT:
2235 st->state = TCP_SEQ_STATE_ESTABLISHED;
2236 if (st->bucket > tcp_hashinfo.ehash_mask)
2237 break;
2238 rc = established_get_first(seq);
2239 while (offset-- && rc)
2240 rc = established_get_next(seq, rc);
2243 st->num = orig_num;
2245 return rc;
2248 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2250 struct tcp_iter_state *st = seq->private;
2251 void *rc;
2253 if (*pos && *pos == st->last_pos) {
2254 rc = tcp_seek_last_pos(seq);
2255 if (rc)
2256 goto out;
2259 st->state = TCP_SEQ_STATE_LISTENING;
2260 st->num = 0;
2261 st->bucket = 0;
2262 st->offset = 0;
2263 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2265 out:
2266 st->last_pos = *pos;
2267 return rc;
2270 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2272 struct tcp_iter_state *st = seq->private;
2273 void *rc = NULL;
2275 if (v == SEQ_START_TOKEN) {
2276 rc = tcp_get_idx(seq, 0);
2277 goto out;
2280 switch (st->state) {
2281 case TCP_SEQ_STATE_OPENREQ:
2282 case TCP_SEQ_STATE_LISTENING:
2283 rc = listening_get_next(seq, v);
2284 if (!rc) {
2285 st->state = TCP_SEQ_STATE_ESTABLISHED;
2286 st->bucket = 0;
2287 st->offset = 0;
2288 rc = established_get_first(seq);
2290 break;
2291 case TCP_SEQ_STATE_ESTABLISHED:
2292 case TCP_SEQ_STATE_TIME_WAIT:
2293 rc = established_get_next(seq, v);
2294 break;
2296 out:
2297 ++*pos;
2298 st->last_pos = *pos;
2299 return rc;
2302 static void tcp_seq_stop(struct seq_file *seq, void *v)
2304 struct tcp_iter_state *st = seq->private;
2306 switch (st->state) {
2307 case TCP_SEQ_STATE_OPENREQ:
2308 if (v) {
2309 struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2310 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2312 case TCP_SEQ_STATE_LISTENING:
2313 if (v != SEQ_START_TOKEN)
2314 spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
2315 break;
2316 case TCP_SEQ_STATE_TIME_WAIT:
2317 case TCP_SEQ_STATE_ESTABLISHED:
2318 if (v)
2319 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2320 break;
2324 int tcp_seq_open(struct inode *inode, struct file *file)
2326 struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
2327 struct tcp_iter_state *s;
2328 int err;
2330 err = seq_open_net(inode, file, &afinfo->seq_ops,
2331 sizeof(struct tcp_iter_state));
2332 if (err < 0)
2333 return err;
2335 s = ((struct seq_file *)file->private_data)->private;
2336 s->family = afinfo->family;
2337 s->last_pos = 0;
2338 return 0;
2340 EXPORT_SYMBOL(tcp_seq_open);
2342 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2344 int rc = 0;
2345 struct proc_dir_entry *p;
2347 afinfo->seq_ops.start = tcp_seq_start;
2348 afinfo->seq_ops.next = tcp_seq_next;
2349 afinfo->seq_ops.stop = tcp_seq_stop;
2351 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2352 afinfo->seq_fops, afinfo);
2353 if (!p)
2354 rc = -ENOMEM;
2355 return rc;
2357 EXPORT_SYMBOL(tcp_proc_register);
2359 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2361 proc_net_remove(net, afinfo->name);
2363 EXPORT_SYMBOL(tcp_proc_unregister);
2365 static void get_openreq4(const struct sock *sk, const struct request_sock *req,
2366 struct seq_file *f, int i, int uid, int *len)
2368 const struct inet_request_sock *ireq = inet_rsk(req);
2369 int ttd = req->expires - jiffies;
2371 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2372 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %pK%n",
2374 ireq->loc_addr,
2375 ntohs(inet_sk(sk)->inet_sport),
2376 ireq->rmt_addr,
2377 ntohs(ireq->rmt_port),
2378 TCP_SYN_RECV,
2379 0, 0, /* could print option size, but that is af dependent. */
2380 1, /* timers active (only the expire timer) */
2381 jiffies_to_clock_t(ttd),
2382 req->retrans,
2383 uid,
2384 0, /* non standard timer */
2385 0, /* open_requests have no inode */
2386 atomic_read(&sk->sk_refcnt),
2387 req,
2388 len);
2391 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
2393 int timer_active;
2394 unsigned long timer_expires;
2395 const struct tcp_sock *tp = tcp_sk(sk);
2396 const struct inet_connection_sock *icsk = inet_csk(sk);
2397 const struct inet_sock *inet = inet_sk(sk);
2398 __be32 dest = inet->inet_daddr;
2399 __be32 src = inet->inet_rcv_saddr;
2400 __u16 destp = ntohs(inet->inet_dport);
2401 __u16 srcp = ntohs(inet->inet_sport);
2402 int rx_queue;
2404 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
2405 timer_active = 1;
2406 timer_expires = icsk->icsk_timeout;
2407 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2408 timer_active = 4;
2409 timer_expires = icsk->icsk_timeout;
2410 } else if (timer_pending(&sk->sk_timer)) {
2411 timer_active = 2;
2412 timer_expires = sk->sk_timer.expires;
2413 } else {
2414 timer_active = 0;
2415 timer_expires = jiffies;
2418 if (sk->sk_state == TCP_LISTEN)
2419 rx_queue = sk->sk_ack_backlog;
2420 else
2422 * because we dont lock socket, we might find a transient negative value
2424 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2426 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2427 "%08X %5d %8d %lu %d %pK %lu %lu %u %u %d%n",
2428 i, src, srcp, dest, destp, sk->sk_state,
2429 tp->write_seq - tp->snd_una,
2430 rx_queue,
2431 timer_active,
2432 jiffies_to_clock_t(timer_expires - jiffies),
2433 icsk->icsk_retransmits,
2434 sock_i_uid(sk),
2435 icsk->icsk_probes_out,
2436 sock_i_ino(sk),
2437 atomic_read(&sk->sk_refcnt), sk,
2438 jiffies_to_clock_t(icsk->icsk_rto),
2439 jiffies_to_clock_t(icsk->icsk_ack.ato),
2440 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2441 tp->snd_cwnd,
2442 tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh,
2443 len);
2446 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2447 struct seq_file *f, int i, int *len)
2449 __be32 dest, src;
2450 __u16 destp, srcp;
2451 int ttd = tw->tw_ttd - jiffies;
2453 if (ttd < 0)
2454 ttd = 0;
2456 dest = tw->tw_daddr;
2457 src = tw->tw_rcv_saddr;
2458 destp = ntohs(tw->tw_dport);
2459 srcp = ntohs(tw->tw_sport);
2461 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2462 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
2463 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2464 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2465 atomic_read(&tw->tw_refcnt), tw, len);
2468 #define TMPSZ 150
2470 static int tcp4_seq_show(struct seq_file *seq, void *v)
2472 struct tcp_iter_state *st;
2473 int len;
2475 if (v == SEQ_START_TOKEN) {
2476 seq_printf(seq, "%-*s\n", TMPSZ - 1,
2477 " sl local_address rem_address st tx_queue "
2478 "rx_queue tr tm->when retrnsmt uid timeout "
2479 "inode");
2480 goto out;
2482 st = seq->private;
2484 switch (st->state) {
2485 case TCP_SEQ_STATE_LISTENING:
2486 case TCP_SEQ_STATE_ESTABLISHED:
2487 get_tcp4_sock(v, seq, st->num, &len);
2488 break;
2489 case TCP_SEQ_STATE_OPENREQ:
2490 get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid, &len);
2491 break;
2492 case TCP_SEQ_STATE_TIME_WAIT:
2493 get_timewait4_sock(v, seq, st->num, &len);
2494 break;
2496 seq_printf(seq, "%*s\n", TMPSZ - 1 - len, "");
2497 out:
2498 return 0;
2501 static const struct file_operations tcp_afinfo_seq_fops = {
2502 .owner = THIS_MODULE,
2503 .open = tcp_seq_open,
2504 .read = seq_read,
2505 .llseek = seq_lseek,
2506 .release = seq_release_net
2509 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2510 .name = "tcp",
2511 .family = AF_INET,
2512 .seq_fops = &tcp_afinfo_seq_fops,
2513 .seq_ops = {
2514 .show = tcp4_seq_show,
2518 static int __net_init tcp4_proc_init_net(struct net *net)
2520 return tcp_proc_register(net, &tcp4_seq_afinfo);
2523 static void __net_exit tcp4_proc_exit_net(struct net *net)
2525 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2528 static struct pernet_operations tcp4_net_ops = {
2529 .init = tcp4_proc_init_net,
2530 .exit = tcp4_proc_exit_net,
2533 int __init tcp4_proc_init(void)
2535 return register_pernet_subsys(&tcp4_net_ops);
2538 void tcp4_proc_exit(void)
2540 unregister_pernet_subsys(&tcp4_net_ops);
2542 #endif /* CONFIG_PROC_FS */
2544 struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2546 const struct iphdr *iph = skb_gro_network_header(skb);
2548 switch (skb->ip_summed) {
2549 case CHECKSUM_COMPLETE:
2550 if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr,
2551 skb->csum)) {
2552 skb->ip_summed = CHECKSUM_UNNECESSARY;
2553 break;
2556 /* fall through */
2557 case CHECKSUM_NONE:
2558 NAPI_GRO_CB(skb)->flush = 1;
2559 return NULL;
2562 return tcp_gro_receive(head, skb);
2565 int tcp4_gro_complete(struct sk_buff *skb)
2567 const struct iphdr *iph = ip_hdr(skb);
2568 struct tcphdr *th = tcp_hdr(skb);
2570 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
2571 iph->saddr, iph->daddr, 0);
2572 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
2574 return tcp_gro_complete(skb);
2577 struct proto tcp_prot = {
2578 .name = "TCP",
2579 .owner = THIS_MODULE,
2580 .close = tcp_close,
2581 .connect = tcp_v4_connect,
2582 .disconnect = tcp_disconnect,
2583 .accept = inet_csk_accept,
2584 .ioctl = tcp_ioctl,
2585 .init = tcp_v4_init_sock,
2586 .destroy = tcp_v4_destroy_sock,
2587 .shutdown = tcp_shutdown,
2588 .setsockopt = tcp_setsockopt,
2589 .getsockopt = tcp_getsockopt,
2590 .recvmsg = tcp_recvmsg,
2591 .sendmsg = tcp_sendmsg,
2592 .sendpage = tcp_sendpage,
2593 .backlog_rcv = tcp_v4_do_rcv,
2594 .hash = inet_hash,
2595 .unhash = inet_unhash,
2596 .get_port = inet_csk_get_port,
2597 .enter_memory_pressure = tcp_enter_memory_pressure,
2598 .sockets_allocated = &tcp_sockets_allocated,
2599 .orphan_count = &tcp_orphan_count,
2600 .memory_allocated = &tcp_memory_allocated,
2601 .memory_pressure = &tcp_memory_pressure,
2602 .sysctl_wmem = sysctl_tcp_wmem,
2603 .sysctl_rmem = sysctl_tcp_rmem,
2604 .max_header = MAX_TCP_HEADER,
2605 .obj_size = sizeof(struct tcp_sock),
2606 .slab_flags = SLAB_DESTROY_BY_RCU,
2607 .twsk_prot = &tcp_timewait_sock_ops,
2608 .rsk_prot = &tcp_request_sock_ops,
2609 .h.hashinfo = &tcp_hashinfo,
2610 .no_autobind = true,
2611 #ifdef CONFIG_COMPAT
2612 .compat_setsockopt = compat_tcp_setsockopt,
2613 .compat_getsockopt = compat_tcp_getsockopt,
2614 #endif
2615 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
2616 .init_cgroup = tcp_init_cgroup,
2617 .destroy_cgroup = tcp_destroy_cgroup,
2618 .proto_cgroup = tcp_proto_cgroup,
2619 #endif
2621 EXPORT_SYMBOL(tcp_prot);
2623 static int __net_init tcp_sk_init(struct net *net)
2625 return inet_ctl_sock_create(&net->ipv4.tcp_sock,
2626 PF_INET, SOCK_RAW, IPPROTO_TCP, net);
2629 static void __net_exit tcp_sk_exit(struct net *net)
2631 inet_ctl_sock_destroy(net->ipv4.tcp_sock);
2634 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2636 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
2639 static struct pernet_operations __net_initdata tcp_sk_ops = {
2640 .init = tcp_sk_init,
2641 .exit = tcp_sk_exit,
2642 .exit_batch = tcp_sk_exit_batch,
2645 void __init tcp_v4_init(void)
2647 inet_hashinfo_init(&tcp_hashinfo);
2648 if (register_pernet_subsys(&tcp_sk_ops))
2649 panic("Failed to create the TCP control socket.\n");