[NET]: IP header modifier helpers annotations.
[linux-2.6/x86.git] / net / ipv4 / tcp_minisocks.c
blob383cb38461c56ac2875c5dfd0dd367605ede24fc
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * Version: $Id: tcp_minisocks.c,v 1.15 2002/02/01 22:01:04 davem Exp $
10 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 * Corey Minyard <wf-rch!minyard@relay.EU.net>
14 * Florian La Roche, <flla@stud.uni-sb.de>
15 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
16 * Linus Torvalds, <torvalds@cs.helsinki.fi>
17 * Alan Cox, <gw4pts@gw4pts.ampr.org>
18 * Matthew Dillon, <dillon@apollo.west.oic.com>
19 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
20 * Jorge Cwik, <jorge@laser.satlink.net>
23 #include <linux/mm.h>
24 #include <linux/module.h>
25 #include <linux/sysctl.h>
26 #include <linux/workqueue.h>
27 #include <net/tcp.h>
28 #include <net/inet_common.h>
29 #include <net/xfrm.h>
31 #ifdef CONFIG_SYSCTL
32 #define SYNC_INIT 0 /* let the user enable it */
33 #else
34 #define SYNC_INIT 1
35 #endif
37 int sysctl_tcp_syncookies __read_mostly = SYNC_INIT;
38 int sysctl_tcp_abort_on_overflow __read_mostly;
40 struct inet_timewait_death_row tcp_death_row = {
41 .sysctl_max_tw_buckets = NR_FILE * 2,
42 .period = TCP_TIMEWAIT_LEN / INET_TWDR_TWKILL_SLOTS,
43 .death_lock = __SPIN_LOCK_UNLOCKED(tcp_death_row.death_lock),
44 .hashinfo = &tcp_hashinfo,
45 .tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0,
46 (unsigned long)&tcp_death_row),
47 .twkill_work = __WORK_INITIALIZER(tcp_death_row.twkill_work,
48 inet_twdr_twkill_work,
49 &tcp_death_row),
50 /* Short-time timewait calendar */
52 .twcal_hand = -1,
53 .twcal_timer = TIMER_INITIALIZER(inet_twdr_twcal_tick, 0,
54 (unsigned long)&tcp_death_row),
57 EXPORT_SYMBOL_GPL(tcp_death_row);
59 static __inline__ int tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
61 if (seq == s_win)
62 return 1;
63 if (after(end_seq, s_win) && before(seq, e_win))
64 return 1;
65 return (seq == e_win && seq == end_seq);
68 /*
69 * * Main purpose of TIME-WAIT state is to close connection gracefully,
70 * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
71 * (and, probably, tail of data) and one or more our ACKs are lost.
72 * * What is TIME-WAIT timeout? It is associated with maximal packet
73 * lifetime in the internet, which results in wrong conclusion, that
74 * it is set to catch "old duplicate segments" wandering out of their path.
75 * It is not quite correct. This timeout is calculated so that it exceeds
76 * maximal retransmission timeout enough to allow to lose one (or more)
77 * segments sent by peer and our ACKs. This time may be calculated from RTO.
78 * * When TIME-WAIT socket receives RST, it means that another end
79 * finally closed and we are allowed to kill TIME-WAIT too.
80 * * Second purpose of TIME-WAIT is catching old duplicate segments.
81 * Well, certainly it is pure paranoia, but if we load TIME-WAIT
82 * with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
83 * * If we invented some more clever way to catch duplicates
84 * (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
86 * The algorithm below is based on FORMAL INTERPRETATION of RFCs.
87 * When you compare it to RFCs, please, read section SEGMENT ARRIVES
88 * from the very beginning.
90 * NOTE. With recycling (and later with fin-wait-2) TW bucket
91 * is _not_ stateless. It means, that strictly speaking we must
92 * spinlock it. I do not want! Well, probability of misbehaviour
93 * is ridiculously low and, seems, we could use some mb() tricks
94 * to avoid misread sequence numbers, states etc. --ANK
96 enum tcp_tw_status
97 tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
98 const struct tcphdr *th)
100 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
101 struct tcp_options_received tmp_opt;
102 int paws_reject = 0;
104 tmp_opt.saw_tstamp = 0;
105 if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
106 tcp_parse_options(skb, &tmp_opt, 0);
108 if (tmp_opt.saw_tstamp) {
109 tmp_opt.ts_recent = tcptw->tw_ts_recent;
110 tmp_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
111 paws_reject = tcp_paws_check(&tmp_opt, th->rst);
115 if (tw->tw_substate == TCP_FIN_WAIT2) {
116 /* Just repeat all the checks of tcp_rcv_state_process() */
118 /* Out of window, send ACK */
119 if (paws_reject ||
120 !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
121 tcptw->tw_rcv_nxt,
122 tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd))
123 return TCP_TW_ACK;
125 if (th->rst)
126 goto kill;
128 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt))
129 goto kill_with_rst;
131 /* Dup ACK? */
132 if (!after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
133 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
134 inet_twsk_put(tw);
135 return TCP_TW_SUCCESS;
138 /* New data or FIN. If new data arrive after half-duplex close,
139 * reset.
141 if (!th->fin ||
142 TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1) {
143 kill_with_rst:
144 inet_twsk_deschedule(tw, &tcp_death_row);
145 inet_twsk_put(tw);
146 return TCP_TW_RST;
149 /* FIN arrived, enter true time-wait state. */
150 tw->tw_substate = TCP_TIME_WAIT;
151 tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
152 if (tmp_opt.saw_tstamp) {
153 tcptw->tw_ts_recent_stamp = xtime.tv_sec;
154 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
157 /* I am shamed, but failed to make it more elegant.
158 * Yes, it is direct reference to IP, which is impossible
159 * to generalize to IPv6. Taking into account that IPv6
160 * do not understand recycling in any case, it not
161 * a big problem in practice. --ANK */
162 if (tw->tw_family == AF_INET &&
163 tcp_death_row.sysctl_tw_recycle && tcptw->tw_ts_recent_stamp &&
164 tcp_v4_tw_remember_stamp(tw))
165 inet_twsk_schedule(tw, &tcp_death_row, tw->tw_timeout,
166 TCP_TIMEWAIT_LEN);
167 else
168 inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN,
169 TCP_TIMEWAIT_LEN);
170 return TCP_TW_ACK;
174 * Now real TIME-WAIT state.
176 * RFC 1122:
177 * "When a connection is [...] on TIME-WAIT state [...]
178 * [a TCP] MAY accept a new SYN from the remote TCP to
179 * reopen the connection directly, if it:
181 * (1) assigns its initial sequence number for the new
182 * connection to be larger than the largest sequence
183 * number it used on the previous connection incarnation,
184 * and
186 * (2) returns to TIME-WAIT state if the SYN turns out
187 * to be an old duplicate".
190 if (!paws_reject &&
191 (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt &&
192 (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
193 /* In window segment, it may be only reset or bare ack. */
195 if (th->rst) {
196 /* This is TIME_WAIT assassination, in two flavors.
197 * Oh well... nobody has a sufficient solution to this
198 * protocol bug yet.
200 if (sysctl_tcp_rfc1337 == 0) {
201 kill:
202 inet_twsk_deschedule(tw, &tcp_death_row);
203 inet_twsk_put(tw);
204 return TCP_TW_SUCCESS;
207 inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN,
208 TCP_TIMEWAIT_LEN);
210 if (tmp_opt.saw_tstamp) {
211 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
212 tcptw->tw_ts_recent_stamp = xtime.tv_sec;
215 inet_twsk_put(tw);
216 return TCP_TW_SUCCESS;
219 /* Out of window segment.
221 All the segments are ACKed immediately.
223 The only exception is new SYN. We accept it, if it is
224 not old duplicate and we are not in danger to be killed
225 by delayed old duplicates. RFC check is that it has
226 newer sequence number works at rates <40Mbit/sec.
227 However, if paws works, it is reliable AND even more,
228 we even may relax silly seq space cutoff.
230 RED-PEN: we violate main RFC requirement, if this SYN will appear
231 old duplicate (i.e. we receive RST in reply to SYN-ACK),
232 we must return socket to time-wait state. It is not good,
233 but not fatal yet.
236 if (th->syn && !th->rst && !th->ack && !paws_reject &&
237 (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) ||
238 (tmp_opt.saw_tstamp &&
239 (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
240 u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
241 if (isn == 0)
242 isn++;
243 TCP_SKB_CB(skb)->when = isn;
244 return TCP_TW_SYN;
247 if (paws_reject)
248 NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
250 if(!th->rst) {
251 /* In this case we must reset the TIMEWAIT timer.
253 * If it is ACKless SYN it may be both old duplicate
254 * and new good SYN with random sequence number <rcv_nxt.
255 * Do not reschedule in the last case.
257 if (paws_reject || th->ack)
258 inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN,
259 TCP_TIMEWAIT_LEN);
261 /* Send ACK. Note, we do not put the bucket,
262 * it will be released by caller.
264 return TCP_TW_ACK;
266 inet_twsk_put(tw);
267 return TCP_TW_SUCCESS;
271 * Move a socket to time-wait or dead fin-wait-2 state.
273 void tcp_time_wait(struct sock *sk, int state, int timeo)
275 struct inet_timewait_sock *tw = NULL;
276 const struct inet_connection_sock *icsk = inet_csk(sk);
277 const struct tcp_sock *tp = tcp_sk(sk);
278 int recycle_ok = 0;
280 if (tcp_death_row.sysctl_tw_recycle && tp->rx_opt.ts_recent_stamp)
281 recycle_ok = icsk->icsk_af_ops->remember_stamp(sk);
283 if (tcp_death_row.tw_count < tcp_death_row.sysctl_max_tw_buckets)
284 tw = inet_twsk_alloc(sk, state);
286 if (tw != NULL) {
287 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
288 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
290 tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale;
291 tcptw->tw_rcv_nxt = tp->rcv_nxt;
292 tcptw->tw_snd_nxt = tp->snd_nxt;
293 tcptw->tw_rcv_wnd = tcp_receive_window(tp);
294 tcptw->tw_ts_recent = tp->rx_opt.ts_recent;
295 tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
297 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
298 if (tw->tw_family == PF_INET6) {
299 struct ipv6_pinfo *np = inet6_sk(sk);
300 struct inet6_timewait_sock *tw6;
302 tw->tw_ipv6_offset = inet6_tw_offset(sk->sk_prot);
303 tw6 = inet6_twsk((struct sock *)tw);
304 ipv6_addr_copy(&tw6->tw_v6_daddr, &np->daddr);
305 ipv6_addr_copy(&tw6->tw_v6_rcv_saddr, &np->rcv_saddr);
306 tw->tw_ipv6only = np->ipv6only;
308 #endif
310 #ifdef CONFIG_TCP_MD5SIG
312 * The timewait bucket does not have the key DB from the
313 * sock structure. We just make a quick copy of the
314 * md5 key being used (if indeed we are using one)
315 * so the timewait ack generating code has the key.
317 do {
318 struct tcp_md5sig_key *key;
319 memset(tcptw->tw_md5_key, 0, sizeof(tcptw->tw_md5_key));
320 tcptw->tw_md5_keylen = 0;
321 key = tp->af_specific->md5_lookup(sk, sk);
322 if (key != NULL) {
323 memcpy(&tcptw->tw_md5_key, key->key, key->keylen);
324 tcptw->tw_md5_keylen = key->keylen;
325 if (tcp_alloc_md5sig_pool() == NULL)
326 BUG();
328 } while(0);
329 #endif
331 /* Linkage updates. */
332 __inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
334 /* Get the TIME_WAIT timeout firing. */
335 if (timeo < rto)
336 timeo = rto;
338 if (recycle_ok) {
339 tw->tw_timeout = rto;
340 } else {
341 tw->tw_timeout = TCP_TIMEWAIT_LEN;
342 if (state == TCP_TIME_WAIT)
343 timeo = TCP_TIMEWAIT_LEN;
346 inet_twsk_schedule(tw, &tcp_death_row, timeo,
347 TCP_TIMEWAIT_LEN);
348 inet_twsk_put(tw);
349 } else {
350 /* Sorry, if we're out of memory, just CLOSE this
351 * socket up. We've got bigger problems than
352 * non-graceful socket closings.
354 if (net_ratelimit())
355 printk(KERN_INFO "TCP: time wait bucket table overflow\n");
358 tcp_update_metrics(sk);
359 tcp_done(sk);
362 void tcp_twsk_destructor(struct sock *sk)
364 #ifdef CONFIG_TCP_MD5SIG
365 struct tcp_timewait_sock *twsk = tcp_twsk(sk);
366 if (twsk->tw_md5_keylen)
367 tcp_put_md5sig_pool();
368 #endif
371 EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
373 /* This is not only more efficient than what we used to do, it eliminates
374 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
376 * Actually, we could lots of memory writes here. tp of listening
377 * socket contains all necessary default parameters.
379 struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, struct sk_buff *skb)
381 struct sock *newsk = inet_csk_clone(sk, req, GFP_ATOMIC);
383 if (newsk != NULL) {
384 const struct inet_request_sock *ireq = inet_rsk(req);
385 struct tcp_request_sock *treq = tcp_rsk(req);
386 struct inet_connection_sock *newicsk = inet_csk(sk);
387 struct tcp_sock *newtp;
389 /* Now setup tcp_sock */
390 newtp = tcp_sk(newsk);
391 newtp->pred_flags = 0;
392 newtp->rcv_nxt = treq->rcv_isn + 1;
393 newtp->snd_nxt = newtp->snd_una = newtp->snd_sml = treq->snt_isn + 1;
395 tcp_prequeue_init(newtp);
397 tcp_init_wl(newtp, treq->snt_isn, treq->rcv_isn);
399 newtp->srtt = 0;
400 newtp->mdev = TCP_TIMEOUT_INIT;
401 newicsk->icsk_rto = TCP_TIMEOUT_INIT;
403 newtp->packets_out = 0;
404 newtp->left_out = 0;
405 newtp->retrans_out = 0;
406 newtp->sacked_out = 0;
407 newtp->fackets_out = 0;
408 newtp->snd_ssthresh = 0x7fffffff;
410 /* So many TCP implementations out there (incorrectly) count the
411 * initial SYN frame in their delayed-ACK and congestion control
412 * algorithms that we must have the following bandaid to talk
413 * efficiently to them. -DaveM
415 newtp->snd_cwnd = 2;
416 newtp->snd_cwnd_cnt = 0;
417 newtp->bytes_acked = 0;
419 newtp->frto_counter = 0;
420 newtp->frto_highmark = 0;
422 newicsk->icsk_ca_ops = &tcp_init_congestion_ops;
424 tcp_set_ca_state(newsk, TCP_CA_Open);
425 tcp_init_xmit_timers(newsk);
426 skb_queue_head_init(&newtp->out_of_order_queue);
427 newtp->rcv_wup = treq->rcv_isn + 1;
428 newtp->write_seq = treq->snt_isn + 1;
429 newtp->pushed_seq = newtp->write_seq;
430 newtp->copied_seq = treq->rcv_isn + 1;
432 newtp->rx_opt.saw_tstamp = 0;
434 newtp->rx_opt.dsack = 0;
435 newtp->rx_opt.eff_sacks = 0;
437 newtp->rx_opt.num_sacks = 0;
438 newtp->urg_data = 0;
440 if (sock_flag(newsk, SOCK_KEEPOPEN))
441 inet_csk_reset_keepalive_timer(newsk,
442 keepalive_time_when(newtp));
444 newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
445 if((newtp->rx_opt.sack_ok = ireq->sack_ok) != 0) {
446 if (sysctl_tcp_fack)
447 newtp->rx_opt.sack_ok |= 2;
449 newtp->window_clamp = req->window_clamp;
450 newtp->rcv_ssthresh = req->rcv_wnd;
451 newtp->rcv_wnd = req->rcv_wnd;
452 newtp->rx_opt.wscale_ok = ireq->wscale_ok;
453 if (newtp->rx_opt.wscale_ok) {
454 newtp->rx_opt.snd_wscale = ireq->snd_wscale;
455 newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
456 } else {
457 newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
458 newtp->window_clamp = min(newtp->window_clamp, 65535U);
460 newtp->snd_wnd = ntohs(skb->h.th->window) << newtp->rx_opt.snd_wscale;
461 newtp->max_window = newtp->snd_wnd;
463 if (newtp->rx_opt.tstamp_ok) {
464 newtp->rx_opt.ts_recent = req->ts_recent;
465 newtp->rx_opt.ts_recent_stamp = xtime.tv_sec;
466 newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
467 } else {
468 newtp->rx_opt.ts_recent_stamp = 0;
469 newtp->tcp_header_len = sizeof(struct tcphdr);
471 #ifdef CONFIG_TCP_MD5SIG
472 newtp->md5sig_info = NULL; /*XXX*/
473 if (newtp->af_specific->md5_lookup(sk, newsk))
474 newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
475 #endif
476 if (skb->len >= TCP_MIN_RCVMSS+newtp->tcp_header_len)
477 newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
478 newtp->rx_opt.mss_clamp = req->mss;
479 TCP_ECN_openreq_child(newtp, req);
481 TCP_INC_STATS_BH(TCP_MIB_PASSIVEOPENS);
483 return newsk;
487 * Process an incoming packet for SYN_RECV sockets represented
488 * as a request_sock.
491 struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
492 struct request_sock *req,
493 struct request_sock **prev)
495 struct tcphdr *th = skb->h.th;
496 __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
497 int paws_reject = 0;
498 struct tcp_options_received tmp_opt;
499 struct sock *child;
501 tmp_opt.saw_tstamp = 0;
502 if (th->doff > (sizeof(struct tcphdr)>>2)) {
503 tcp_parse_options(skb, &tmp_opt, 0);
505 if (tmp_opt.saw_tstamp) {
506 tmp_opt.ts_recent = req->ts_recent;
507 /* We do not store true stamp, but it is not required,
508 * it can be estimated (approximately)
509 * from another data.
511 tmp_opt.ts_recent_stamp = xtime.tv_sec - ((TCP_TIMEOUT_INIT/HZ)<<req->retrans);
512 paws_reject = tcp_paws_check(&tmp_opt, th->rst);
516 /* Check for pure retransmitted SYN. */
517 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
518 flg == TCP_FLAG_SYN &&
519 !paws_reject) {
521 * RFC793 draws (Incorrectly! It was fixed in RFC1122)
522 * this case on figure 6 and figure 8, but formal
523 * protocol description says NOTHING.
524 * To be more exact, it says that we should send ACK,
525 * because this segment (at least, if it has no data)
526 * is out of window.
528 * CONCLUSION: RFC793 (even with RFC1122) DOES NOT
529 * describe SYN-RECV state. All the description
530 * is wrong, we cannot believe to it and should
531 * rely only on common sense and implementation
532 * experience.
534 * Enforce "SYN-ACK" according to figure 8, figure 6
535 * of RFC793, fixed by RFC1122.
537 req->rsk_ops->rtx_syn_ack(sk, req, NULL);
538 return NULL;
541 /* Further reproduces section "SEGMENT ARRIVES"
542 for state SYN-RECEIVED of RFC793.
543 It is broken, however, it does not work only
544 when SYNs are crossed.
546 You would think that SYN crossing is impossible here, since
547 we should have a SYN_SENT socket (from connect()) on our end,
548 but this is not true if the crossed SYNs were sent to both
549 ends by a malicious third party. We must defend against this,
550 and to do that we first verify the ACK (as per RFC793, page
551 36) and reset if it is invalid. Is this a true full defense?
552 To convince ourselves, let us consider a way in which the ACK
553 test can still pass in this 'malicious crossed SYNs' case.
554 Malicious sender sends identical SYNs (and thus identical sequence
555 numbers) to both A and B:
557 A: gets SYN, seq=7
558 B: gets SYN, seq=7
560 By our good fortune, both A and B select the same initial
561 send sequence number of seven :-)
563 A: sends SYN|ACK, seq=7, ack_seq=8
564 B: sends SYN|ACK, seq=7, ack_seq=8
566 So we are now A eating this SYN|ACK, ACK test passes. So
567 does sequence test, SYN is truncated, and thus we consider
568 it a bare ACK.
570 If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this
571 bare ACK. Otherwise, we create an established connection. Both
572 ends (listening sockets) accept the new incoming connection and try
573 to talk to each other. 8-)
575 Note: This case is both harmless, and rare. Possibility is about the
576 same as us discovering intelligent life on another plant tomorrow.
578 But generally, we should (RFC lies!) to accept ACK
579 from SYNACK both here and in tcp_rcv_state_process().
580 tcp_rcv_state_process() does not, hence, we do not too.
582 Note that the case is absolutely generic:
583 we cannot optimize anything here without
584 violating protocol. All the checks must be made
585 before attempt to create socket.
588 /* RFC793 page 36: "If the connection is in any non-synchronized state ...
589 * and the incoming segment acknowledges something not yet
590 * sent (the segment carries an unacceptable ACK) ...
591 * a reset is sent."
593 * Invalid ACK: reset will be sent by listening socket
595 if ((flg & TCP_FLAG_ACK) &&
596 (TCP_SKB_CB(skb)->ack_seq != tcp_rsk(req)->snt_isn + 1))
597 return sk;
599 /* Also, it would be not so bad idea to check rcv_tsecr, which
600 * is essentially ACK extension and too early or too late values
601 * should cause reset in unsynchronized states.
604 /* RFC793: "first check sequence number". */
606 if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
607 tcp_rsk(req)->rcv_isn + 1, tcp_rsk(req)->rcv_isn + 1 + req->rcv_wnd)) {
608 /* Out of window: send ACK and drop. */
609 if (!(flg & TCP_FLAG_RST))
610 req->rsk_ops->send_ack(skb, req);
611 if (paws_reject)
612 NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
613 return NULL;
616 /* In sequence, PAWS is OK. */
618 if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_isn + 1))
619 req->ts_recent = tmp_opt.rcv_tsval;
621 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
622 /* Truncate SYN, it is out of window starting
623 at tcp_rsk(req)->rcv_isn + 1. */
624 flg &= ~TCP_FLAG_SYN;
627 /* RFC793: "second check the RST bit" and
628 * "fourth, check the SYN bit"
630 if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) {
631 TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
632 goto embryonic_reset;
635 /* ACK sequence verified above, just make sure ACK is
636 * set. If ACK not set, just silently drop the packet.
638 if (!(flg & TCP_FLAG_ACK))
639 return NULL;
641 /* If TCP_DEFER_ACCEPT is set, drop bare ACK. */
642 if (inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
643 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
644 inet_rsk(req)->acked = 1;
645 return NULL;
648 /* OK, ACK is valid, create big socket and
649 * feed this segment to it. It will repeat all
650 * the tests. THIS SEGMENT MUST MOVE SOCKET TO
651 * ESTABLISHED STATE. If it will be dropped after
652 * socket is created, wait for troubles.
654 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb,
655 req, NULL);
656 if (child == NULL)
657 goto listen_overflow;
658 #ifdef CONFIG_TCP_MD5SIG
659 else {
660 /* Copy over the MD5 key from the original socket */
661 struct tcp_md5sig_key *key;
662 struct tcp_sock *tp = tcp_sk(sk);
663 key = tp->af_specific->md5_lookup(sk, child);
664 if (key != NULL) {
666 * We're using one, so create a matching key on the
667 * newsk structure. If we fail to get memory then we
668 * end up not copying the key across. Shucks.
670 char *newkey = kmalloc(key->keylen, GFP_ATOMIC);
671 if (newkey) {
672 if (!tcp_alloc_md5sig_pool())
673 BUG();
674 memcpy(newkey, key->key, key->keylen);
675 tp->af_specific->md5_add(child, child,
676 newkey,
677 key->keylen);
681 #endif
683 inet_csk_reqsk_queue_unlink(sk, req, prev);
684 inet_csk_reqsk_queue_removed(sk, req);
686 inet_csk_reqsk_queue_add(sk, req, child);
687 return child;
689 listen_overflow:
690 if (!sysctl_tcp_abort_on_overflow) {
691 inet_rsk(req)->acked = 1;
692 return NULL;
695 embryonic_reset:
696 NET_INC_STATS_BH(LINUX_MIB_EMBRYONICRSTS);
697 if (!(flg & TCP_FLAG_RST))
698 req->rsk_ops->send_reset(sk, skb);
700 inet_csk_reqsk_queue_drop(sk, req, prev);
701 return NULL;
705 * Queue segment on the new socket if the new socket is active,
706 * otherwise we just shortcircuit this and continue with
707 * the new socket.
710 int tcp_child_process(struct sock *parent, struct sock *child,
711 struct sk_buff *skb)
713 int ret = 0;
714 int state = child->sk_state;
716 if (!sock_owned_by_user(child)) {
717 ret = tcp_rcv_state_process(child, skb, skb->h.th, skb->len);
719 /* Wakeup parent, send SIGIO */
720 if (state == TCP_SYN_RECV && child->sk_state != state)
721 parent->sk_data_ready(parent, 0);
722 } else {
723 /* Alas, it is possible again, because we do lookup
724 * in main socket hash table and lock on listening
725 * socket does not protect us more.
727 sk_add_backlog(child, skb);
730 bh_unlock_sock(child);
731 sock_put(child);
732 return ret;
735 EXPORT_SYMBOL(tcp_check_req);
736 EXPORT_SYMBOL(tcp_child_process);
737 EXPORT_SYMBOL(tcp_create_openreq_child);
738 EXPORT_SYMBOL(tcp_timewait_state_process);