2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Mark Evans, <evansmp@uhura.aston.ac.uk>
11 * Corey Minyard <wf-rch!minyard@relay.EU.net>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
14 * Linus Torvalds, <torvalds@cs.helsinki.fi>
15 * Alan Cox, <gw4pts@gw4pts.ampr.org>
16 * Matthew Dillon, <dillon@apollo.west.oic.com>
17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18 * Jorge Cwik, <jorge@laser.satlink.net>
22 #include <linux/module.h>
23 #include <linux/slab.h>
24 #include <linux/sysctl.h>
25 #include <linux/workqueue.h>
27 #include <net/inet_common.h>
30 int sysctl_tcp_syncookies __read_mostly
= 1;
31 EXPORT_SYMBOL(sysctl_tcp_syncookies
);
33 int sysctl_tcp_abort_on_overflow __read_mostly
;
35 struct inet_timewait_death_row tcp_death_row
= {
36 .sysctl_max_tw_buckets
= NR_FILE
* 2,
37 .period
= TCP_TIMEWAIT_LEN
/ INET_TWDR_TWKILL_SLOTS
,
38 .death_lock
= __SPIN_LOCK_UNLOCKED(tcp_death_row
.death_lock
),
39 .hashinfo
= &tcp_hashinfo
,
40 .tw_timer
= TIMER_INITIALIZER(inet_twdr_hangman
, 0,
41 (unsigned long)&tcp_death_row
),
42 .twkill_work
= __WORK_INITIALIZER(tcp_death_row
.twkill_work
,
43 inet_twdr_twkill_work
),
44 /* Short-time timewait calendar */
47 .twcal_timer
= TIMER_INITIALIZER(inet_twdr_twcal_tick
, 0,
48 (unsigned long)&tcp_death_row
),
50 EXPORT_SYMBOL_GPL(tcp_death_row
);
52 /* VJ's idea. Save last timestamp seen from this destination
53 * and hold it at least for normal timewait interval to use for duplicate
54 * segment detection in subsequent connections, before they enter synchronized
58 static int tcp_remember_stamp(struct sock
*sk
)
60 const struct inet_connection_sock
*icsk
= inet_csk(sk
);
61 struct tcp_sock
*tp
= tcp_sk(sk
);
62 struct inet_peer
*peer
;
65 peer
= icsk
->icsk_af_ops
->get_peer(sk
, &release_it
);
67 if ((s32
)(peer
->tcp_ts
- tp
->rx_opt
.ts_recent
) <= 0 ||
68 ((u32
)get_seconds() - peer
->tcp_ts_stamp
> TCP_PAWS_MSL
&&
69 peer
->tcp_ts_stamp
<= (u32
)tp
->rx_opt
.ts_recent_stamp
)) {
70 peer
->tcp_ts_stamp
= (u32
)tp
->rx_opt
.ts_recent_stamp
;
71 peer
->tcp_ts
= tp
->rx_opt
.ts_recent
;
81 static int tcp_tw_remember_stamp(struct inet_timewait_sock
*tw
)
83 struct sock
*sk
= (struct sock
*) tw
;
84 struct inet_peer
*peer
;
86 peer
= twsk_getpeer(sk
);
88 const struct tcp_timewait_sock
*tcptw
= tcp_twsk(sk
);
90 if ((s32
)(peer
->tcp_ts
- tcptw
->tw_ts_recent
) <= 0 ||
91 ((u32
)get_seconds() - peer
->tcp_ts_stamp
> TCP_PAWS_MSL
&&
92 peer
->tcp_ts_stamp
<= (u32
)tcptw
->tw_ts_recent_stamp
)) {
93 peer
->tcp_ts_stamp
= (u32
)tcptw
->tw_ts_recent_stamp
;
94 peer
->tcp_ts
= tcptw
->tw_ts_recent
;
102 static __inline__
int tcp_in_window(u32 seq
, u32 end_seq
, u32 s_win
, u32 e_win
)
106 if (after(end_seq
, s_win
) && before(seq
, e_win
))
108 return seq
== e_win
&& seq
== end_seq
;
112 * * Main purpose of TIME-WAIT state is to close connection gracefully,
113 * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
114 * (and, probably, tail of data) and one or more our ACKs are lost.
115 * * What is TIME-WAIT timeout? It is associated with maximal packet
116 * lifetime in the internet, which results in wrong conclusion, that
117 * it is set to catch "old duplicate segments" wandering out of their path.
118 * It is not quite correct. This timeout is calculated so that it exceeds
119 * maximal retransmission timeout enough to allow to lose one (or more)
120 * segments sent by peer and our ACKs. This time may be calculated from RTO.
121 * * When TIME-WAIT socket receives RST, it means that another end
122 * finally closed and we are allowed to kill TIME-WAIT too.
123 * * Second purpose of TIME-WAIT is catching old duplicate segments.
124 * Well, certainly it is pure paranoia, but if we load TIME-WAIT
125 * with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
126 * * If we invented some more clever way to catch duplicates
127 * (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
129 * The algorithm below is based on FORMAL INTERPRETATION of RFCs.
130 * When you compare it to RFCs, please, read section SEGMENT ARRIVES
131 * from the very beginning.
133 * NOTE. With recycling (and later with fin-wait-2) TW bucket
134 * is _not_ stateless. It means, that strictly speaking we must
135 * spinlock it. I do not want! Well, probability of misbehaviour
136 * is ridiculously low and, seems, we could use some mb() tricks
137 * to avoid misread sequence numbers, states etc. --ANK
140 tcp_timewait_state_process(struct inet_timewait_sock
*tw
, struct sk_buff
*skb
,
141 const struct tcphdr
*th
)
143 struct tcp_options_received tmp_opt
;
144 const u8
*hash_location
;
145 struct tcp_timewait_sock
*tcptw
= tcp_twsk((struct sock
*)tw
);
148 tmp_opt
.saw_tstamp
= 0;
149 if (th
->doff
> (sizeof(*th
) >> 2) && tcptw
->tw_ts_recent_stamp
) {
150 tcp_parse_options(skb
, &tmp_opt
, &hash_location
, 0);
152 if (tmp_opt
.saw_tstamp
) {
153 tmp_opt
.ts_recent
= tcptw
->tw_ts_recent
;
154 tmp_opt
.ts_recent_stamp
= tcptw
->tw_ts_recent_stamp
;
155 paws_reject
= tcp_paws_reject(&tmp_opt
, th
->rst
);
159 if (tw
->tw_substate
== TCP_FIN_WAIT2
) {
160 /* Just repeat all the checks of tcp_rcv_state_process() */
162 /* Out of window, send ACK */
164 !tcp_in_window(TCP_SKB_CB(skb
)->seq
, TCP_SKB_CB(skb
)->end_seq
,
166 tcptw
->tw_rcv_nxt
+ tcptw
->tw_rcv_wnd
))
172 if (th
->syn
&& !before(TCP_SKB_CB(skb
)->seq
, tcptw
->tw_rcv_nxt
))
177 !after(TCP_SKB_CB(skb
)->end_seq
, tcptw
->tw_rcv_nxt
) ||
178 TCP_SKB_CB(skb
)->end_seq
== TCP_SKB_CB(skb
)->seq
) {
180 return TCP_TW_SUCCESS
;
183 /* New data or FIN. If new data arrive after half-duplex close,
187 TCP_SKB_CB(skb
)->end_seq
!= tcptw
->tw_rcv_nxt
+ 1) {
189 inet_twsk_deschedule(tw
, &tcp_death_row
);
194 /* FIN arrived, enter true time-wait state. */
195 tw
->tw_substate
= TCP_TIME_WAIT
;
196 tcptw
->tw_rcv_nxt
= TCP_SKB_CB(skb
)->end_seq
;
197 if (tmp_opt
.saw_tstamp
) {
198 tcptw
->tw_ts_recent_stamp
= get_seconds();
199 tcptw
->tw_ts_recent
= tmp_opt
.rcv_tsval
;
202 if (tcp_death_row
.sysctl_tw_recycle
&&
203 tcptw
->tw_ts_recent_stamp
&&
204 tcp_tw_remember_stamp(tw
))
205 inet_twsk_schedule(tw
, &tcp_death_row
, tw
->tw_timeout
,
208 inet_twsk_schedule(tw
, &tcp_death_row
, TCP_TIMEWAIT_LEN
,
214 * Now real TIME-WAIT state.
217 * "When a connection is [...] on TIME-WAIT state [...]
218 * [a TCP] MAY accept a new SYN from the remote TCP to
219 * reopen the connection directly, if it:
221 * (1) assigns its initial sequence number for the new
222 * connection to be larger than the largest sequence
223 * number it used on the previous connection incarnation,
226 * (2) returns to TIME-WAIT state if the SYN turns out
227 * to be an old duplicate".
231 (TCP_SKB_CB(skb
)->seq
== tcptw
->tw_rcv_nxt
&&
232 (TCP_SKB_CB(skb
)->seq
== TCP_SKB_CB(skb
)->end_seq
|| th
->rst
))) {
233 /* In window segment, it may be only reset or bare ack. */
236 /* This is TIME_WAIT assassination, in two flavors.
237 * Oh well... nobody has a sufficient solution to this
240 if (sysctl_tcp_rfc1337
== 0) {
242 inet_twsk_deschedule(tw
, &tcp_death_row
);
244 return TCP_TW_SUCCESS
;
247 inet_twsk_schedule(tw
, &tcp_death_row
, TCP_TIMEWAIT_LEN
,
250 if (tmp_opt
.saw_tstamp
) {
251 tcptw
->tw_ts_recent
= tmp_opt
.rcv_tsval
;
252 tcptw
->tw_ts_recent_stamp
= get_seconds();
256 return TCP_TW_SUCCESS
;
259 /* Out of window segment.
261 All the segments are ACKed immediately.
263 The only exception is new SYN. We accept it, if it is
264 not old duplicate and we are not in danger to be killed
265 by delayed old duplicates. RFC check is that it has
266 newer sequence number works at rates <40Mbit/sec.
267 However, if paws works, it is reliable AND even more,
268 we even may relax silly seq space cutoff.
270 RED-PEN: we violate main RFC requirement, if this SYN will appear
271 old duplicate (i.e. we receive RST in reply to SYN-ACK),
272 we must return socket to time-wait state. It is not good,
276 if (th
->syn
&& !th
->rst
&& !th
->ack
&& !paws_reject
&&
277 (after(TCP_SKB_CB(skb
)->seq
, tcptw
->tw_rcv_nxt
) ||
278 (tmp_opt
.saw_tstamp
&&
279 (s32
)(tcptw
->tw_ts_recent
- tmp_opt
.rcv_tsval
) < 0))) {
280 u32 isn
= tcptw
->tw_snd_nxt
+ 65535 + 2;
283 TCP_SKB_CB(skb
)->when
= isn
;
288 NET_INC_STATS_BH(twsk_net(tw
), LINUX_MIB_PAWSESTABREJECTED
);
291 /* In this case we must reset the TIMEWAIT timer.
293 * If it is ACKless SYN it may be both old duplicate
294 * and new good SYN with random sequence number <rcv_nxt.
295 * Do not reschedule in the last case.
297 if (paws_reject
|| th
->ack
)
298 inet_twsk_schedule(tw
, &tcp_death_row
, TCP_TIMEWAIT_LEN
,
301 /* Send ACK. Note, we do not put the bucket,
302 * it will be released by caller.
307 return TCP_TW_SUCCESS
;
309 EXPORT_SYMBOL(tcp_timewait_state_process
);
312 * Move a socket to time-wait or dead fin-wait-2 state.
314 void tcp_time_wait(struct sock
*sk
, int state
, int timeo
)
316 struct inet_timewait_sock
*tw
= NULL
;
317 const struct inet_connection_sock
*icsk
= inet_csk(sk
);
318 const struct tcp_sock
*tp
= tcp_sk(sk
);
321 if (tcp_death_row
.sysctl_tw_recycle
&& tp
->rx_opt
.ts_recent_stamp
)
322 recycle_ok
= tcp_remember_stamp(sk
);
324 if (tcp_death_row
.tw_count
< tcp_death_row
.sysctl_max_tw_buckets
)
325 tw
= inet_twsk_alloc(sk
, state
);
328 struct tcp_timewait_sock
*tcptw
= tcp_twsk((struct sock
*)tw
);
329 const int rto
= (icsk
->icsk_rto
<< 2) - (icsk
->icsk_rto
>> 1);
331 tw
->tw_transparent
= inet_sk(sk
)->transparent
;
332 tw
->tw_rcv_wscale
= tp
->rx_opt
.rcv_wscale
;
333 tcptw
->tw_rcv_nxt
= tp
->rcv_nxt
;
334 tcptw
->tw_snd_nxt
= tp
->snd_nxt
;
335 tcptw
->tw_rcv_wnd
= tcp_receive_window(tp
);
336 tcptw
->tw_ts_recent
= tp
->rx_opt
.ts_recent
;
337 tcptw
->tw_ts_recent_stamp
= tp
->rx_opt
.ts_recent_stamp
;
339 #if IS_ENABLED(CONFIG_IPV6)
340 if (tw
->tw_family
== PF_INET6
) {
341 struct ipv6_pinfo
*np
= inet6_sk(sk
);
342 struct inet6_timewait_sock
*tw6
;
344 tw
->tw_ipv6_offset
= inet6_tw_offset(sk
->sk_prot
);
345 tw6
= inet6_twsk((struct sock
*)tw
);
346 tw6
->tw_v6_daddr
= np
->daddr
;
347 tw6
->tw_v6_rcv_saddr
= np
->rcv_saddr
;
348 tw
->tw_tclass
= np
->tclass
;
349 tw
->tw_ipv6only
= np
->ipv6only
;
353 #ifdef CONFIG_TCP_MD5SIG
355 * The timewait bucket does not have the key DB from the
356 * sock structure. We just make a quick copy of the
357 * md5 key being used (if indeed we are using one)
358 * so the timewait ack generating code has the key.
361 struct tcp_md5sig_key
*key
;
362 memset(tcptw
->tw_md5_key
, 0, sizeof(tcptw
->tw_md5_key
));
363 tcptw
->tw_md5_keylen
= 0;
364 key
= tp
->af_specific
->md5_lookup(sk
, sk
);
366 memcpy(&tcptw
->tw_md5_key
, key
->key
, key
->keylen
);
367 tcptw
->tw_md5_keylen
= key
->keylen
;
368 if (tcp_alloc_md5sig_pool(sk
) == NULL
)
374 /* Linkage updates. */
375 __inet_twsk_hashdance(tw
, sk
, &tcp_hashinfo
);
377 /* Get the TIME_WAIT timeout firing. */
382 tw
->tw_timeout
= rto
;
384 tw
->tw_timeout
= TCP_TIMEWAIT_LEN
;
385 if (state
== TCP_TIME_WAIT
)
386 timeo
= TCP_TIMEWAIT_LEN
;
389 inet_twsk_schedule(tw
, &tcp_death_row
, timeo
,
393 /* Sorry, if we're out of memory, just CLOSE this
394 * socket up. We've got bigger problems than
395 * non-graceful socket closings.
397 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPTIMEWAITOVERFLOW
);
400 tcp_update_metrics(sk
);
404 void tcp_twsk_destructor(struct sock
*sk
)
406 #ifdef CONFIG_TCP_MD5SIG
407 struct tcp_timewait_sock
*twsk
= tcp_twsk(sk
);
408 if (twsk
->tw_md5_keylen
)
409 tcp_free_md5sig_pool();
412 EXPORT_SYMBOL_GPL(tcp_twsk_destructor
);
414 static inline void TCP_ECN_openreq_child(struct tcp_sock
*tp
,
415 struct request_sock
*req
)
417 tp
->ecn_flags
= inet_rsk(req
)->ecn_ok
? TCP_ECN_OK
: 0;
420 /* This is not only more efficient than what we used to do, it eliminates
421 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
423 * Actually, we could lots of memory writes here. tp of listening
424 * socket contains all necessary default parameters.
426 struct sock
*tcp_create_openreq_child(struct sock
*sk
, struct request_sock
*req
, struct sk_buff
*skb
)
428 struct sock
*newsk
= inet_csk_clone_lock(sk
, req
, GFP_ATOMIC
);
431 const struct inet_request_sock
*ireq
= inet_rsk(req
);
432 struct tcp_request_sock
*treq
= tcp_rsk(req
);
433 struct inet_connection_sock
*newicsk
= inet_csk(newsk
);
434 struct tcp_sock
*newtp
= tcp_sk(newsk
);
435 struct tcp_sock
*oldtp
= tcp_sk(sk
);
436 struct tcp_cookie_values
*oldcvp
= oldtp
->cookie_values
;
438 /* TCP Cookie Transactions require space for the cookie pair,
439 * as it differs for each connection. There is no need to
440 * copy any s_data_payload stored at the original socket.
441 * Failure will prevent resuming the connection.
443 * Presumed copied, in order of appearance:
444 * cookie_in_always, cookie_out_never
446 if (oldcvp
!= NULL
) {
447 struct tcp_cookie_values
*newcvp
=
448 kzalloc(sizeof(*newtp
->cookie_values
),
451 if (newcvp
!= NULL
) {
452 kref_init(&newcvp
->kref
);
453 newcvp
->cookie_desired
=
454 oldcvp
->cookie_desired
;
455 newtp
->cookie_values
= newcvp
;
457 /* Not Yet Implemented */
458 newtp
->cookie_values
= NULL
;
462 /* Now setup tcp_sock */
463 newtp
->pred_flags
= 0;
465 newtp
->rcv_wup
= newtp
->copied_seq
=
466 newtp
->rcv_nxt
= treq
->rcv_isn
+ 1;
468 newtp
->snd_sml
= newtp
->snd_una
=
469 newtp
->snd_nxt
= newtp
->snd_up
=
470 treq
->snt_isn
+ 1 + tcp_s_data_size(oldtp
);
472 tcp_prequeue_init(newtp
);
474 tcp_init_wl(newtp
, treq
->rcv_isn
);
477 newtp
->mdev
= TCP_TIMEOUT_INIT
;
478 newicsk
->icsk_rto
= TCP_TIMEOUT_INIT
;
480 newtp
->packets_out
= 0;
481 newtp
->retrans_out
= 0;
482 newtp
->sacked_out
= 0;
483 newtp
->fackets_out
= 0;
484 newtp
->snd_ssthresh
= TCP_INFINITE_SSTHRESH
;
486 /* So many TCP implementations out there (incorrectly) count the
487 * initial SYN frame in their delayed-ACK and congestion control
488 * algorithms that we must have the following bandaid to talk
489 * efficiently to them. -DaveM
491 newtp
->snd_cwnd
= TCP_INIT_CWND
;
492 newtp
->snd_cwnd_cnt
= 0;
493 newtp
->bytes_acked
= 0;
495 newtp
->frto_counter
= 0;
496 newtp
->frto_highmark
= 0;
498 if (newicsk
->icsk_ca_ops
!= &tcp_init_congestion_ops
&&
499 !try_module_get(newicsk
->icsk_ca_ops
->owner
))
500 newicsk
->icsk_ca_ops
= &tcp_init_congestion_ops
;
502 tcp_set_ca_state(newsk
, TCP_CA_Open
);
503 tcp_init_xmit_timers(newsk
);
504 skb_queue_head_init(&newtp
->out_of_order_queue
);
505 newtp
->write_seq
= newtp
->pushed_seq
=
506 treq
->snt_isn
+ 1 + tcp_s_data_size(oldtp
);
508 newtp
->rx_opt
.saw_tstamp
= 0;
510 newtp
->rx_opt
.dsack
= 0;
511 newtp
->rx_opt
.num_sacks
= 0;
515 if (sock_flag(newsk
, SOCK_KEEPOPEN
))
516 inet_csk_reset_keepalive_timer(newsk
,
517 keepalive_time_when(newtp
));
519 newtp
->rx_opt
.tstamp_ok
= ireq
->tstamp_ok
;
520 if ((newtp
->rx_opt
.sack_ok
= ireq
->sack_ok
) != 0) {
522 tcp_enable_fack(newtp
);
524 newtp
->window_clamp
= req
->window_clamp
;
525 newtp
->rcv_ssthresh
= req
->rcv_wnd
;
526 newtp
->rcv_wnd
= req
->rcv_wnd
;
527 newtp
->rx_opt
.wscale_ok
= ireq
->wscale_ok
;
528 if (newtp
->rx_opt
.wscale_ok
) {
529 newtp
->rx_opt
.snd_wscale
= ireq
->snd_wscale
;
530 newtp
->rx_opt
.rcv_wscale
= ireq
->rcv_wscale
;
532 newtp
->rx_opt
.snd_wscale
= newtp
->rx_opt
.rcv_wscale
= 0;
533 newtp
->window_clamp
= min(newtp
->window_clamp
, 65535U);
535 newtp
->snd_wnd
= (ntohs(tcp_hdr(skb
)->window
) <<
536 newtp
->rx_opt
.snd_wscale
);
537 newtp
->max_window
= newtp
->snd_wnd
;
539 if (newtp
->rx_opt
.tstamp_ok
) {
540 newtp
->rx_opt
.ts_recent
= req
->ts_recent
;
541 newtp
->rx_opt
.ts_recent_stamp
= get_seconds();
542 newtp
->tcp_header_len
= sizeof(struct tcphdr
) + TCPOLEN_TSTAMP_ALIGNED
;
544 newtp
->rx_opt
.ts_recent_stamp
= 0;
545 newtp
->tcp_header_len
= sizeof(struct tcphdr
);
547 #ifdef CONFIG_TCP_MD5SIG
548 newtp
->md5sig_info
= NULL
; /*XXX*/
549 if (newtp
->af_specific
->md5_lookup(sk
, newsk
))
550 newtp
->tcp_header_len
+= TCPOLEN_MD5SIG_ALIGNED
;
552 if (skb
->len
>= TCP_MSS_DEFAULT
+ newtp
->tcp_header_len
)
553 newicsk
->icsk_ack
.last_seg_size
= skb
->len
- newtp
->tcp_header_len
;
554 newtp
->rx_opt
.mss_clamp
= req
->mss
;
555 TCP_ECN_openreq_child(newtp
, req
);
557 TCP_INC_STATS_BH(sock_net(sk
), TCP_MIB_PASSIVEOPENS
);
561 EXPORT_SYMBOL(tcp_create_openreq_child
);
564 * Process an incoming packet for SYN_RECV sockets represented
568 struct sock
*tcp_check_req(struct sock
*sk
, struct sk_buff
*skb
,
569 struct request_sock
*req
,
570 struct request_sock
**prev
)
572 struct tcp_options_received tmp_opt
;
573 const u8
*hash_location
;
575 const struct tcphdr
*th
= tcp_hdr(skb
);
576 __be32 flg
= tcp_flag_word(th
) & (TCP_FLAG_RST
|TCP_FLAG_SYN
|TCP_FLAG_ACK
);
579 tmp_opt
.saw_tstamp
= 0;
580 if (th
->doff
> (sizeof(struct tcphdr
)>>2)) {
581 tcp_parse_options(skb
, &tmp_opt
, &hash_location
, 0);
583 if (tmp_opt
.saw_tstamp
) {
584 tmp_opt
.ts_recent
= req
->ts_recent
;
585 /* We do not store true stamp, but it is not required,
586 * it can be estimated (approximately)
589 tmp_opt
.ts_recent_stamp
= get_seconds() - ((TCP_TIMEOUT_INIT
/HZ
)<<req
->retrans
);
590 paws_reject
= tcp_paws_reject(&tmp_opt
, th
->rst
);
594 /* Check for pure retransmitted SYN. */
595 if (TCP_SKB_CB(skb
)->seq
== tcp_rsk(req
)->rcv_isn
&&
596 flg
== TCP_FLAG_SYN
&&
599 * RFC793 draws (Incorrectly! It was fixed in RFC1122)
600 * this case on figure 6 and figure 8, but formal
601 * protocol description says NOTHING.
602 * To be more exact, it says that we should send ACK,
603 * because this segment (at least, if it has no data)
606 * CONCLUSION: RFC793 (even with RFC1122) DOES NOT
607 * describe SYN-RECV state. All the description
608 * is wrong, we cannot believe to it and should
609 * rely only on common sense and implementation
612 * Enforce "SYN-ACK" according to figure 8, figure 6
613 * of RFC793, fixed by RFC1122.
615 req
->rsk_ops
->rtx_syn_ack(sk
, req
, NULL
);
619 /* Further reproduces section "SEGMENT ARRIVES"
620 for state SYN-RECEIVED of RFC793.
621 It is broken, however, it does not work only
622 when SYNs are crossed.
624 You would think that SYN crossing is impossible here, since
625 we should have a SYN_SENT socket (from connect()) on our end,
626 but this is not true if the crossed SYNs were sent to both
627 ends by a malicious third party. We must defend against this,
628 and to do that we first verify the ACK (as per RFC793, page
629 36) and reset if it is invalid. Is this a true full defense?
630 To convince ourselves, let us consider a way in which the ACK
631 test can still pass in this 'malicious crossed SYNs' case.
632 Malicious sender sends identical SYNs (and thus identical sequence
633 numbers) to both A and B:
638 By our good fortune, both A and B select the same initial
639 send sequence number of seven :-)
641 A: sends SYN|ACK, seq=7, ack_seq=8
642 B: sends SYN|ACK, seq=7, ack_seq=8
644 So we are now A eating this SYN|ACK, ACK test passes. So
645 does sequence test, SYN is truncated, and thus we consider
648 If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this
649 bare ACK. Otherwise, we create an established connection. Both
650 ends (listening sockets) accept the new incoming connection and try
651 to talk to each other. 8-)
653 Note: This case is both harmless, and rare. Possibility is about the
654 same as us discovering intelligent life on another plant tomorrow.
656 But generally, we should (RFC lies!) to accept ACK
657 from SYNACK both here and in tcp_rcv_state_process().
658 tcp_rcv_state_process() does not, hence, we do not too.
660 Note that the case is absolutely generic:
661 we cannot optimize anything here without
662 violating protocol. All the checks must be made
663 before attempt to create socket.
666 /* RFC793 page 36: "If the connection is in any non-synchronized state ...
667 * and the incoming segment acknowledges something not yet
668 * sent (the segment carries an unacceptable ACK) ...
671 * Invalid ACK: reset will be sent by listening socket
673 if ((flg
& TCP_FLAG_ACK
) &&
674 (TCP_SKB_CB(skb
)->ack_seq
!=
675 tcp_rsk(req
)->snt_isn
+ 1 + tcp_s_data_size(tcp_sk(sk
))))
678 /* Also, it would be not so bad idea to check rcv_tsecr, which
679 * is essentially ACK extension and too early or too late values
680 * should cause reset in unsynchronized states.
683 /* RFC793: "first check sequence number". */
685 if (paws_reject
|| !tcp_in_window(TCP_SKB_CB(skb
)->seq
, TCP_SKB_CB(skb
)->end_seq
,
686 tcp_rsk(req
)->rcv_isn
+ 1, tcp_rsk(req
)->rcv_isn
+ 1 + req
->rcv_wnd
)) {
687 /* Out of window: send ACK and drop. */
688 if (!(flg
& TCP_FLAG_RST
))
689 req
->rsk_ops
->send_ack(sk
, skb
, req
);
691 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_PAWSESTABREJECTED
);
695 /* In sequence, PAWS is OK. */
697 if (tmp_opt
.saw_tstamp
&& !after(TCP_SKB_CB(skb
)->seq
, tcp_rsk(req
)->rcv_isn
+ 1))
698 req
->ts_recent
= tmp_opt
.rcv_tsval
;
700 if (TCP_SKB_CB(skb
)->seq
== tcp_rsk(req
)->rcv_isn
) {
701 /* Truncate SYN, it is out of window starting
702 at tcp_rsk(req)->rcv_isn + 1. */
703 flg
&= ~TCP_FLAG_SYN
;
706 /* RFC793: "second check the RST bit" and
707 * "fourth, check the SYN bit"
709 if (flg
& (TCP_FLAG_RST
|TCP_FLAG_SYN
)) {
710 TCP_INC_STATS_BH(sock_net(sk
), TCP_MIB_ATTEMPTFAILS
);
711 goto embryonic_reset
;
714 /* ACK sequence verified above, just make sure ACK is
715 * set. If ACK not set, just silently drop the packet.
717 if (!(flg
& TCP_FLAG_ACK
))
720 /* While TCP_DEFER_ACCEPT is active, drop bare ACK. */
721 if (req
->retrans
< inet_csk(sk
)->icsk_accept_queue
.rskq_defer_accept
&&
722 TCP_SKB_CB(skb
)->end_seq
== tcp_rsk(req
)->rcv_isn
+ 1) {
723 inet_rsk(req
)->acked
= 1;
724 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPDEFERACCEPTDROP
);
727 if (tmp_opt
.saw_tstamp
&& tmp_opt
.rcv_tsecr
)
728 tcp_rsk(req
)->snt_synack
= tmp_opt
.rcv_tsecr
;
729 else if (req
->retrans
) /* don't take RTT sample if retrans && ~TS */
730 tcp_rsk(req
)->snt_synack
= 0;
732 /* OK, ACK is valid, create big socket and
733 * feed this segment to it. It will repeat all
734 * the tests. THIS SEGMENT MUST MOVE SOCKET TO
735 * ESTABLISHED STATE. If it will be dropped after
736 * socket is created, wait for troubles.
738 child
= inet_csk(sk
)->icsk_af_ops
->syn_recv_sock(sk
, skb
, req
, NULL
);
740 goto listen_overflow
;
742 inet_csk_reqsk_queue_unlink(sk
, req
, prev
);
743 inet_csk_reqsk_queue_removed(sk
, req
);
745 inet_csk_reqsk_queue_add(sk
, req
, child
);
749 if (!sysctl_tcp_abort_on_overflow
) {
750 inet_rsk(req
)->acked
= 1;
755 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_EMBRYONICRSTS
);
756 if (!(flg
& TCP_FLAG_RST
))
757 req
->rsk_ops
->send_reset(sk
, skb
);
759 inet_csk_reqsk_queue_drop(sk
, req
, prev
);
762 EXPORT_SYMBOL(tcp_check_req
);
765 * Queue segment on the new socket if the new socket is active,
766 * otherwise we just shortcircuit this and continue with
770 int tcp_child_process(struct sock
*parent
, struct sock
*child
,
774 int state
= child
->sk_state
;
776 if (!sock_owned_by_user(child
)) {
777 ret
= tcp_rcv_state_process(child
, skb
, tcp_hdr(skb
),
779 /* Wakeup parent, send SIGIO */
780 if (state
== TCP_SYN_RECV
&& child
->sk_state
!= state
)
781 parent
->sk_data_ready(parent
, 0);
783 /* Alas, it is possible again, because we do lookup
784 * in main socket hash table and lock on listening
785 * socket does not protect us more.
787 __sk_add_backlog(child
, skb
);
790 bh_unlock_sock(child
);
794 EXPORT_SYMBOL(tcp_child_process
);