initial commit with v2.6.9
[linux-2.6.9-moxart.git] / net / ipv4 / tcp_minisocks.c
blob2bdc1975c319b488d3025b3d7e2cafcba6388d3f
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * Version: $Id: tcp_minisocks.c,v 1.15 2002/02/01 22:01:04 davem Exp $
10 * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 * Corey Minyard <wf-rch!minyard@relay.EU.net>
14 * Florian La Roche, <flla@stud.uni-sb.de>
15 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
16 * Linus Torvalds, <torvalds@cs.helsinki.fi>
17 * Alan Cox, <gw4pts@gw4pts.ampr.org>
18 * Matthew Dillon, <dillon@apollo.west.oic.com>
19 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
20 * Jorge Cwik, <jorge@laser.satlink.net>
23 #include <linux/config.h>
24 #include <linux/mm.h>
25 #include <linux/module.h>
26 #include <linux/sysctl.h>
27 #include <linux/workqueue.h>
28 #include <net/tcp.h>
29 #include <net/inet_common.h>
30 #include <net/xfrm.h>
32 #ifdef CONFIG_SYSCTL
33 #define SYNC_INIT 0 /* let the user enable it */
34 #else
35 #define SYNC_INIT 1
36 #endif
38 int sysctl_tcp_tw_recycle;
39 int sysctl_tcp_max_tw_buckets = NR_FILE*2;
41 int sysctl_tcp_syncookies = SYNC_INIT;
42 int sysctl_tcp_abort_on_overflow;
44 static __inline__ int tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
46 if (seq == s_win)
47 return 1;
48 if (after(end_seq, s_win) && before(seq, e_win))
49 return 1;
50 return (seq == e_win && seq == end_seq);
53 /* New-style handling of TIME_WAIT sockets. */
55 int tcp_tw_count;
58 /* Must be called with locally disabled BHs. */
59 static void tcp_timewait_kill(struct tcp_tw_bucket *tw)
61 struct tcp_ehash_bucket *ehead;
62 struct tcp_bind_hashbucket *bhead;
63 struct tcp_bind_bucket *tb;
65 /* Unlink from established hashes. */
66 ehead = &tcp_ehash[tw->tw_hashent];
67 write_lock(&ehead->lock);
68 if (hlist_unhashed(&tw->tw_node)) {
69 write_unlock(&ehead->lock);
70 return;
72 __hlist_del(&tw->tw_node);
73 sk_node_init(&tw->tw_node);
74 write_unlock(&ehead->lock);
76 /* Disassociate with bind bucket. */
77 bhead = &tcp_bhash[tcp_bhashfn(tw->tw_num)];
78 spin_lock(&bhead->lock);
79 tb = tw->tw_tb;
80 __hlist_del(&tw->tw_bind_node);
81 tw->tw_tb = NULL;
82 tcp_bucket_destroy(tb);
83 spin_unlock(&bhead->lock);
85 #ifdef INET_REFCNT_DEBUG
86 if (atomic_read(&tw->tw_refcnt) != 1) {
87 printk(KERN_DEBUG "tw_bucket %p refcnt=%d\n", tw,
88 atomic_read(&tw->tw_refcnt));
90 #endif
91 tcp_tw_put(tw);
94 /*
95 * * Main purpose of TIME-WAIT state is to close connection gracefully,
96 * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
97 * (and, probably, tail of data) and one or more our ACKs are lost.
98 * * What is TIME-WAIT timeout? It is associated with maximal packet
99 * lifetime in the internet, which results in wrong conclusion, that
100 * it is set to catch "old duplicate segments" wandering out of their path.
101 * It is not quite correct. This timeout is calculated so that it exceeds
102 * maximal retransmission timeout enough to allow to lose one (or more)
103 * segments sent by peer and our ACKs. This time may be calculated from RTO.
104 * * When TIME-WAIT socket receives RST, it means that another end
105 * finally closed and we are allowed to kill TIME-WAIT too.
106 * * Second purpose of TIME-WAIT is catching old duplicate segments.
107 * Well, certainly it is pure paranoia, but if we load TIME-WAIT
108 * with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
109 * * If we invented some more clever way to catch duplicates
110 * (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
112 * The algorithm below is based on FORMAL INTERPRETATION of RFCs.
113 * When you compare it to RFCs, please, read section SEGMENT ARRIVES
114 * from the very beginning.
116 * NOTE. With recycling (and later with fin-wait-2) TW bucket
117 * is _not_ stateless. It means, that strictly speaking we must
118 * spinlock it. I do not want! Well, probability of misbehaviour
119 * is ridiculously low and, seems, we could use some mb() tricks
120 * to avoid misread sequence numbers, states etc. --ANK
122 enum tcp_tw_status
123 tcp_timewait_state_process(struct tcp_tw_bucket *tw, struct sk_buff *skb,
124 struct tcphdr *th, unsigned len)
126 struct tcp_opt tp;
127 int paws_reject = 0;
129 tp.saw_tstamp = 0;
130 if (th->doff > (sizeof(struct tcphdr) >> 2) && tw->tw_ts_recent_stamp) {
131 tcp_parse_options(skb, &tp, 0);
133 if (tp.saw_tstamp) {
134 tp.ts_recent = tw->tw_ts_recent;
135 tp.ts_recent_stamp = tw->tw_ts_recent_stamp;
136 paws_reject = tcp_paws_check(&tp, th->rst);
140 if (tw->tw_substate == TCP_FIN_WAIT2) {
141 /* Just repeat all the checks of tcp_rcv_state_process() */
143 /* Out of window, send ACK */
144 if (paws_reject ||
145 !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
146 tw->tw_rcv_nxt,
147 tw->tw_rcv_nxt + tw->tw_rcv_wnd))
148 return TCP_TW_ACK;
150 if (th->rst)
151 goto kill;
153 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tw->tw_rcv_nxt))
154 goto kill_with_rst;
156 /* Dup ACK? */
157 if (!after(TCP_SKB_CB(skb)->end_seq, tw->tw_rcv_nxt) ||
158 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
159 tcp_tw_put(tw);
160 return TCP_TW_SUCCESS;
163 /* New data or FIN. If new data arrive after half-duplex close,
164 * reset.
166 if (!th->fin ||
167 TCP_SKB_CB(skb)->end_seq != tw->tw_rcv_nxt + 1) {
168 kill_with_rst:
169 tcp_tw_deschedule(tw);
170 tcp_tw_put(tw);
171 return TCP_TW_RST;
174 /* FIN arrived, enter true time-wait state. */
175 tw->tw_substate = TCP_TIME_WAIT;
176 tw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
177 if (tp.saw_tstamp) {
178 tw->tw_ts_recent_stamp = xtime.tv_sec;
179 tw->tw_ts_recent = tp.rcv_tsval;
182 /* I am shamed, but failed to make it more elegant.
183 * Yes, it is direct reference to IP, which is impossible
184 * to generalize to IPv6. Taking into account that IPv6
185 * do not undertsnad recycling in any case, it not
186 * a big problem in practice. --ANK */
187 if (tw->tw_family == AF_INET &&
188 sysctl_tcp_tw_recycle && tw->tw_ts_recent_stamp &&
189 tcp_v4_tw_remember_stamp(tw))
190 tcp_tw_schedule(tw, tw->tw_timeout);
191 else
192 tcp_tw_schedule(tw, TCP_TIMEWAIT_LEN);
193 return TCP_TW_ACK;
197 * Now real TIME-WAIT state.
199 * RFC 1122:
200 * "When a connection is [...] on TIME-WAIT state [...]
201 * [a TCP] MAY accept a new SYN from the remote TCP to
202 * reopen the connection directly, if it:
204 * (1) assigns its initial sequence number for the new
205 * connection to be larger than the largest sequence
206 * number it used on the previous connection incarnation,
207 * and
209 * (2) returns to TIME-WAIT state if the SYN turns out
210 * to be an old duplicate".
213 if (!paws_reject &&
214 (TCP_SKB_CB(skb)->seq == tw->tw_rcv_nxt &&
215 (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
216 /* In window segment, it may be only reset or bare ack. */
218 if (th->rst) {
219 /* This is TIME_WAIT assasination, in two flavors.
220 * Oh well... nobody has a sufficient solution to this
221 * protocol bug yet.
223 if (sysctl_tcp_rfc1337 == 0) {
224 kill:
225 tcp_tw_deschedule(tw);
226 tcp_tw_put(tw);
227 return TCP_TW_SUCCESS;
230 tcp_tw_schedule(tw, TCP_TIMEWAIT_LEN);
232 if (tp.saw_tstamp) {
233 tw->tw_ts_recent = tp.rcv_tsval;
234 tw->tw_ts_recent_stamp = xtime.tv_sec;
237 tcp_tw_put(tw);
238 return TCP_TW_SUCCESS;
241 /* Out of window segment.
243 All the segments are ACKed immediately.
245 The only exception is new SYN. We accept it, if it is
246 not old duplicate and we are not in danger to be killed
247 by delayed old duplicates. RFC check is that it has
248 newer sequence number works at rates <40Mbit/sec.
249 However, if paws works, it is reliable AND even more,
250 we even may relax silly seq space cutoff.
252 RED-PEN: we violate main RFC requirement, if this SYN will appear
253 old duplicate (i.e. we receive RST in reply to SYN-ACK),
254 we must return socket to time-wait state. It is not good,
255 but not fatal yet.
258 if (th->syn && !th->rst && !th->ack && !paws_reject &&
259 (after(TCP_SKB_CB(skb)->seq, tw->tw_rcv_nxt) ||
260 (tp.saw_tstamp && (s32)(tw->tw_ts_recent - tp.rcv_tsval) < 0))) {
261 u32 isn = tw->tw_snd_nxt + 65535 + 2;
262 if (isn == 0)
263 isn++;
264 TCP_SKB_CB(skb)->when = isn;
265 return TCP_TW_SYN;
268 if (paws_reject)
269 NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
271 if(!th->rst) {
272 /* In this case we must reset the TIMEWAIT timer.
274 * If it is ACKless SYN it may be both old duplicate
275 * and new good SYN with random sequence number <rcv_nxt.
276 * Do not reschedule in the last case.
278 if (paws_reject || th->ack)
279 tcp_tw_schedule(tw, TCP_TIMEWAIT_LEN);
281 /* Send ACK. Note, we do not put the bucket,
282 * it will be released by caller.
284 return TCP_TW_ACK;
286 tcp_tw_put(tw);
287 return TCP_TW_SUCCESS;
290 /* Enter the time wait state. This is called with locally disabled BH.
291 * Essentially we whip up a timewait bucket, copy the
292 * relevant info into it from the SK, and mess with hash chains
293 * and list linkage.
295 static void __tcp_tw_hashdance(struct sock *sk, struct tcp_tw_bucket *tw)
297 struct tcp_ehash_bucket *ehead = &tcp_ehash[sk->sk_hashent];
298 struct tcp_bind_hashbucket *bhead;
300 /* Step 1: Put TW into bind hash. Original socket stays there too.
301 Note, that any socket with inet_sk(sk)->num != 0 MUST be bound in
302 binding cache, even if it is closed.
304 bhead = &tcp_bhash[tcp_bhashfn(inet_sk(sk)->num)];
305 spin_lock(&bhead->lock);
306 tw->tw_tb = tcp_sk(sk)->bind_hash;
307 BUG_TRAP(tcp_sk(sk)->bind_hash);
308 tw_add_bind_node(tw, &tw->tw_tb->owners);
309 spin_unlock(&bhead->lock);
311 write_lock(&ehead->lock);
313 /* Step 2: Remove SK from established hash. */
314 if (__sk_del_node_init(sk))
315 sock_prot_dec_use(sk->sk_prot);
317 /* Step 3: Hash TW into TIMEWAIT half of established hash table. */
318 tw_add_node(tw, &(ehead + tcp_ehash_size)->chain);
319 atomic_inc(&tw->tw_refcnt);
321 write_unlock(&ehead->lock);
325 * Move a socket to time-wait or dead fin-wait-2 state.
327 void tcp_time_wait(struct sock *sk, int state, int timeo)
329 struct tcp_tw_bucket *tw = NULL;
330 struct tcp_opt *tp = tcp_sk(sk);
331 int recycle_ok = 0;
333 if (sysctl_tcp_tw_recycle && tp->ts_recent_stamp)
334 recycle_ok = tp->af_specific->remember_stamp(sk);
336 if (tcp_tw_count < sysctl_tcp_max_tw_buckets)
337 tw = kmem_cache_alloc(tcp_timewait_cachep, SLAB_ATOMIC);
339 if(tw != NULL) {
340 struct inet_opt *inet = inet_sk(sk);
341 int rto = (tp->rto<<2) - (tp->rto>>1);
343 /* Give us an identity. */
344 tw->tw_daddr = inet->daddr;
345 tw->tw_rcv_saddr = inet->rcv_saddr;
346 tw->tw_bound_dev_if = sk->sk_bound_dev_if;
347 tw->tw_num = inet->num;
348 tw->tw_state = TCP_TIME_WAIT;
349 tw->tw_substate = state;
350 tw->tw_sport = inet->sport;
351 tw->tw_dport = inet->dport;
352 tw->tw_family = sk->sk_family;
353 tw->tw_reuse = sk->sk_reuse;
354 tw->tw_rcv_wscale = tp->rcv_wscale;
355 atomic_set(&tw->tw_refcnt, 1);
357 tw->tw_hashent = sk->sk_hashent;
358 tw->tw_rcv_nxt = tp->rcv_nxt;
359 tw->tw_snd_nxt = tp->snd_nxt;
360 tw->tw_rcv_wnd = tcp_receive_window(tp);
361 tw->tw_ts_recent = tp->ts_recent;
362 tw->tw_ts_recent_stamp = tp->ts_recent_stamp;
363 tw_dead_node_init(tw);
365 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
366 if (tw->tw_family == PF_INET6) {
367 struct ipv6_pinfo *np = inet6_sk(sk);
369 ipv6_addr_copy(&tw->tw_v6_daddr, &np->daddr);
370 ipv6_addr_copy(&tw->tw_v6_rcv_saddr, &np->rcv_saddr);
371 tw->tw_v6_ipv6only = np->ipv6only;
372 } else {
373 memset(&tw->tw_v6_daddr, 0, sizeof(tw->tw_v6_daddr));
374 memset(&tw->tw_v6_rcv_saddr, 0, sizeof(tw->tw_v6_rcv_saddr));
375 tw->tw_v6_ipv6only = 0;
377 #endif
378 /* Linkage updates. */
379 __tcp_tw_hashdance(sk, tw);
381 /* Get the TIME_WAIT timeout firing. */
382 if (timeo < rto)
383 timeo = rto;
385 if (recycle_ok) {
386 tw->tw_timeout = rto;
387 } else {
388 tw->tw_timeout = TCP_TIMEWAIT_LEN;
389 if (state == TCP_TIME_WAIT)
390 timeo = TCP_TIMEWAIT_LEN;
393 tcp_tw_schedule(tw, timeo);
394 tcp_tw_put(tw);
395 } else {
396 /* Sorry, if we're out of memory, just CLOSE this
397 * socket up. We've got bigger problems than
398 * non-graceful socket closings.
400 if (net_ratelimit())
401 printk(KERN_INFO "TCP: time wait bucket table overflow\n");
404 tcp_update_metrics(sk);
405 tcp_done(sk);
408 /* Kill off TIME_WAIT sockets once their lifetime has expired. */
409 static int tcp_tw_death_row_slot;
411 static void tcp_twkill(unsigned long);
413 /* TIME_WAIT reaping mechanism. */
414 #define TCP_TWKILL_SLOTS 8 /* Please keep this a power of 2. */
415 #define TCP_TWKILL_PERIOD (TCP_TIMEWAIT_LEN/TCP_TWKILL_SLOTS)
417 #define TCP_TWKILL_QUOTA 100
419 static struct hlist_head tcp_tw_death_row[TCP_TWKILL_SLOTS];
420 static spinlock_t tw_death_lock = SPIN_LOCK_UNLOCKED;
421 static struct timer_list tcp_tw_timer = TIMER_INITIALIZER(tcp_twkill, 0, 0);
422 static void twkill_work(void *);
423 static DECLARE_WORK(tcp_twkill_work, twkill_work, NULL);
424 static u32 twkill_thread_slots;
426 /* Returns non-zero if quota exceeded. */
427 static int tcp_do_twkill_work(int slot, unsigned int quota)
429 struct tcp_tw_bucket *tw;
430 struct hlist_node *node;
431 unsigned int killed;
432 int ret;
434 /* NOTE: compare this to previous version where lock
435 * was released after detaching chain. It was racy,
436 * because tw buckets are scheduled in not serialized context
437 * in 2.3 (with netfilter), and with softnet it is common, because
438 * soft irqs are not sequenced.
440 killed = 0;
441 ret = 0;
442 rescan:
443 tw_for_each_inmate(tw, node, &tcp_tw_death_row[slot]) {
444 __tw_del_dead_node(tw);
445 spin_unlock(&tw_death_lock);
446 tcp_timewait_kill(tw);
447 tcp_tw_put(tw);
448 killed++;
449 spin_lock(&tw_death_lock);
450 if (killed > quota) {
451 ret = 1;
452 break;
455 /* While we dropped tw_death_lock, another cpu may have
456 * killed off the next TW bucket in the list, therefore
457 * do a fresh re-read of the hlist head node with the
458 * lock reacquired. We still use the hlist traversal
459 * macro in order to get the prefetches.
461 goto rescan;
464 tcp_tw_count -= killed;
465 NET_ADD_STATS_BH(LINUX_MIB_TIMEWAITED, killed);
467 return ret;
470 static void tcp_twkill(unsigned long dummy)
472 int need_timer, ret;
474 spin_lock(&tw_death_lock);
476 if (tcp_tw_count == 0)
477 goto out;
479 need_timer = 0;
480 ret = tcp_do_twkill_work(tcp_tw_death_row_slot, TCP_TWKILL_QUOTA);
481 if (ret) {
482 twkill_thread_slots |= (1 << tcp_tw_death_row_slot);
483 mb();
484 schedule_work(&tcp_twkill_work);
485 need_timer = 1;
486 } else {
487 /* We purged the entire slot, anything left? */
488 if (tcp_tw_count)
489 need_timer = 1;
491 tcp_tw_death_row_slot =
492 ((tcp_tw_death_row_slot + 1) & (TCP_TWKILL_SLOTS - 1));
493 if (need_timer)
494 mod_timer(&tcp_tw_timer, jiffies + TCP_TWKILL_PERIOD);
495 out:
496 spin_unlock(&tw_death_lock);
499 extern void twkill_slots_invalid(void);
501 static void twkill_work(void *dummy)
503 int i;
505 if ((TCP_TWKILL_SLOTS - 1) > (sizeof(twkill_thread_slots) * 8))
506 twkill_slots_invalid();
508 while (twkill_thread_slots) {
509 spin_lock_bh(&tw_death_lock);
510 for (i = 0; i < TCP_TWKILL_SLOTS; i++) {
511 if (!(twkill_thread_slots & (1 << i)))
512 continue;
514 while (tcp_do_twkill_work(i, TCP_TWKILL_QUOTA) != 0) {
515 if (need_resched()) {
516 spin_unlock_bh(&tw_death_lock);
517 schedule();
518 spin_lock_bh(&tw_death_lock);
522 twkill_thread_slots &= ~(1 << i);
524 spin_unlock_bh(&tw_death_lock);
528 /* These are always called from BH context. See callers in
529 * tcp_input.c to verify this.
532 /* This is for handling early-kills of TIME_WAIT sockets. */
533 void tcp_tw_deschedule(struct tcp_tw_bucket *tw)
535 spin_lock(&tw_death_lock);
536 if (tw_del_dead_node(tw)) {
537 tcp_tw_put(tw);
538 if (--tcp_tw_count == 0)
539 del_timer(&tcp_tw_timer);
541 spin_unlock(&tw_death_lock);
542 tcp_timewait_kill(tw);
545 /* Short-time timewait calendar */
547 static int tcp_twcal_hand = -1;
548 static int tcp_twcal_jiffie;
549 static void tcp_twcal_tick(unsigned long);
550 static struct timer_list tcp_twcal_timer =
551 TIMER_INITIALIZER(tcp_twcal_tick, 0, 0);
552 static struct hlist_head tcp_twcal_row[TCP_TW_RECYCLE_SLOTS];
554 void tcp_tw_schedule(struct tcp_tw_bucket *tw, int timeo)
556 struct hlist_head *list;
557 int slot;
559 /* timeout := RTO * 3.5
561 * 3.5 = 1+2+0.5 to wait for two retransmits.
563 * RATIONALE: if FIN arrived and we entered TIME-WAIT state,
564 * our ACK acking that FIN can be lost. If N subsequent retransmitted
565 * FINs (or previous seqments) are lost (probability of such event
566 * is p^(N+1), where p is probability to lose single packet and
567 * time to detect the loss is about RTO*(2^N - 1) with exponential
568 * backoff). Normal timewait length is calculated so, that we
569 * waited at least for one retransmitted FIN (maximal RTO is 120sec).
570 * [ BTW Linux. following BSD, violates this requirement waiting
571 * only for 60sec, we should wait at least for 240 secs.
572 * Well, 240 consumes too much of resources 8)
574 * This interval is not reduced to catch old duplicate and
575 * responces to our wandering segments living for two MSLs.
576 * However, if we use PAWS to detect
577 * old duplicates, we can reduce the interval to bounds required
578 * by RTO, rather than MSL. So, if peer understands PAWS, we
579 * kill tw bucket after 3.5*RTO (it is important that this number
580 * is greater than TS tick!) and detect old duplicates with help
581 * of PAWS.
583 slot = (timeo + (1<<TCP_TW_RECYCLE_TICK) - 1) >> TCP_TW_RECYCLE_TICK;
585 spin_lock(&tw_death_lock);
587 /* Unlink it, if it was scheduled */
588 if (tw_del_dead_node(tw))
589 tcp_tw_count--;
590 else
591 atomic_inc(&tw->tw_refcnt);
593 if (slot >= TCP_TW_RECYCLE_SLOTS) {
594 /* Schedule to slow timer */
595 if (timeo >= TCP_TIMEWAIT_LEN) {
596 slot = TCP_TWKILL_SLOTS-1;
597 } else {
598 slot = (timeo + TCP_TWKILL_PERIOD-1) / TCP_TWKILL_PERIOD;
599 if (slot >= TCP_TWKILL_SLOTS)
600 slot = TCP_TWKILL_SLOTS-1;
602 tw->tw_ttd = jiffies + timeo;
603 slot = (tcp_tw_death_row_slot + slot) & (TCP_TWKILL_SLOTS - 1);
604 list = &tcp_tw_death_row[slot];
605 } else {
606 tw->tw_ttd = jiffies + (slot << TCP_TW_RECYCLE_TICK);
608 if (tcp_twcal_hand < 0) {
609 tcp_twcal_hand = 0;
610 tcp_twcal_jiffie = jiffies;
611 tcp_twcal_timer.expires = tcp_twcal_jiffie + (slot<<TCP_TW_RECYCLE_TICK);
612 add_timer(&tcp_twcal_timer);
613 } else {
614 if (time_after(tcp_twcal_timer.expires, jiffies + (slot<<TCP_TW_RECYCLE_TICK)))
615 mod_timer(&tcp_twcal_timer, jiffies + (slot<<TCP_TW_RECYCLE_TICK));
616 slot = (tcp_twcal_hand + slot)&(TCP_TW_RECYCLE_SLOTS-1);
618 list = &tcp_twcal_row[slot];
621 hlist_add_head(&tw->tw_death_node, list);
623 if (tcp_tw_count++ == 0)
624 mod_timer(&tcp_tw_timer, jiffies+TCP_TWKILL_PERIOD);
625 spin_unlock(&tw_death_lock);
628 void tcp_twcal_tick(unsigned long dummy)
630 int n, slot;
631 unsigned long j;
632 unsigned long now = jiffies;
633 int killed = 0;
634 int adv = 0;
636 spin_lock(&tw_death_lock);
637 if (tcp_twcal_hand < 0)
638 goto out;
640 slot = tcp_twcal_hand;
641 j = tcp_twcal_jiffie;
643 for (n=0; n<TCP_TW_RECYCLE_SLOTS; n++) {
644 if (time_before_eq(j, now)) {
645 struct hlist_node *node, *safe;
646 struct tcp_tw_bucket *tw;
648 tw_for_each_inmate_safe(tw, node, safe,
649 &tcp_twcal_row[slot]) {
650 __tw_del_dead_node(tw);
651 tcp_timewait_kill(tw);
652 tcp_tw_put(tw);
653 killed++;
655 } else {
656 if (!adv) {
657 adv = 1;
658 tcp_twcal_jiffie = j;
659 tcp_twcal_hand = slot;
662 if (!hlist_empty(&tcp_twcal_row[slot])) {
663 mod_timer(&tcp_twcal_timer, j);
664 goto out;
667 j += (1<<TCP_TW_RECYCLE_TICK);
668 slot = (slot+1)&(TCP_TW_RECYCLE_SLOTS-1);
670 tcp_twcal_hand = -1;
672 out:
673 if ((tcp_tw_count -= killed) == 0)
674 del_timer(&tcp_tw_timer);
675 NET_ADD_STATS_BH(LINUX_MIB_TIMEWAITKILLED, killed);
676 spin_unlock(&tw_death_lock);
679 /* This is not only more efficient than what we used to do, it eliminates
680 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
682 * Actually, we could lots of memory writes here. tp of listening
683 * socket contains all necessary default parameters.
685 struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req, struct sk_buff *skb)
687 /* allocate the newsk from the same slab of the master sock,
688 * if not, at sk_free time we'll try to free it from the wrong
689 * slabcache (i.e. is it TCPv4 or v6?) -acme */
690 struct sock *newsk = sk_alloc(PF_INET, GFP_ATOMIC, 0, sk->sk_prot->slab);
692 if(newsk != NULL) {
693 struct tcp_opt *newtp;
694 struct sk_filter *filter;
696 memcpy(newsk, sk, sizeof(struct tcp_sock));
697 newsk->sk_state = TCP_SYN_RECV;
699 /* SANITY */
700 sk_node_init(&newsk->sk_node);
701 tcp_sk(newsk)->bind_hash = NULL;
703 /* Clone the TCP header template */
704 inet_sk(newsk)->dport = req->rmt_port;
706 sock_lock_init(newsk);
707 bh_lock_sock(newsk);
709 newsk->sk_dst_lock = RW_LOCK_UNLOCKED;
710 atomic_set(&newsk->sk_rmem_alloc, 0);
711 skb_queue_head_init(&newsk->sk_receive_queue);
712 atomic_set(&newsk->sk_wmem_alloc, 0);
713 skb_queue_head_init(&newsk->sk_write_queue);
714 atomic_set(&newsk->sk_omem_alloc, 0);
715 newsk->sk_wmem_queued = 0;
716 newsk->sk_forward_alloc = 0;
718 sock_reset_flag(newsk, SOCK_DONE);
719 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
720 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
721 newsk->sk_send_head = NULL;
722 newsk->sk_callback_lock = RW_LOCK_UNLOCKED;
723 skb_queue_head_init(&newsk->sk_error_queue);
724 newsk->sk_write_space = sk_stream_write_space;
726 if ((filter = newsk->sk_filter) != NULL)
727 sk_filter_charge(newsk, filter);
729 if (unlikely(xfrm_sk_clone_policy(newsk))) {
730 /* It is still raw copy of parent, so invalidate
731 * destructor and make plain sk_free() */
732 newsk->sk_destruct = NULL;
733 sk_free(newsk);
734 return NULL;
737 /* Now setup tcp_opt */
738 newtp = tcp_sk(newsk);
739 newtp->pred_flags = 0;
740 newtp->rcv_nxt = req->rcv_isn + 1;
741 newtp->snd_nxt = req->snt_isn + 1;
742 newtp->snd_una = req->snt_isn + 1;
743 newtp->snd_sml = req->snt_isn + 1;
745 tcp_prequeue_init(newtp);
747 tcp_init_wl(newtp, req->snt_isn, req->rcv_isn);
749 newtp->retransmits = 0;
750 newtp->backoff = 0;
751 newtp->srtt = 0;
752 newtp->mdev = TCP_TIMEOUT_INIT;
753 newtp->rto = TCP_TIMEOUT_INIT;
755 tcp_set_pcount(&newtp->packets_out, 0);
756 tcp_set_pcount(&newtp->left_out, 0);
757 tcp_set_pcount(&newtp->retrans_out, 0);
758 tcp_set_pcount(&newtp->sacked_out, 0);
759 tcp_set_pcount(&newtp->fackets_out, 0);
760 newtp->snd_ssthresh = 0x7fffffff;
762 /* So many TCP implementations out there (incorrectly) count the
763 * initial SYN frame in their delayed-ACK and congestion control
764 * algorithms that we must have the following bandaid to talk
765 * efficiently to them. -DaveM
767 newtp->snd_cwnd = 2;
768 newtp->snd_cwnd_cnt = 0;
770 newtp->frto_counter = 0;
771 newtp->frto_highmark = 0;
773 tcp_set_ca_state(newtp, TCP_CA_Open);
774 tcp_init_xmit_timers(newsk);
775 skb_queue_head_init(&newtp->out_of_order_queue);
776 newtp->rcv_wup = req->rcv_isn + 1;
777 newtp->write_seq = req->snt_isn + 1;
778 newtp->pushed_seq = newtp->write_seq;
779 newtp->copied_seq = req->rcv_isn + 1;
781 newtp->saw_tstamp = 0;
783 newtp->dsack = 0;
784 newtp->eff_sacks = 0;
786 newtp->probes_out = 0;
787 newtp->num_sacks = 0;
788 newtp->urg_data = 0;
789 newtp->listen_opt = NULL;
790 newtp->accept_queue = newtp->accept_queue_tail = NULL;
791 /* Deinitialize syn_wait_lock to trap illegal accesses. */
792 memset(&newtp->syn_wait_lock, 0, sizeof(newtp->syn_wait_lock));
794 /* Back to base struct sock members. */
795 newsk->sk_err = 0;
796 newsk->sk_priority = 0;
797 atomic_set(&newsk->sk_refcnt, 2);
798 #ifdef INET_REFCNT_DEBUG
799 atomic_inc(&inet_sock_nr);
800 #endif
801 atomic_inc(&tcp_sockets_allocated);
803 if (sock_flag(newsk, SOCK_KEEPOPEN))
804 tcp_reset_keepalive_timer(newsk,
805 keepalive_time_when(newtp));
806 newsk->sk_socket = NULL;
807 newsk->sk_sleep = NULL;
808 newsk->sk_owner = NULL;
810 newtp->tstamp_ok = req->tstamp_ok;
811 if((newtp->sack_ok = req->sack_ok) != 0) {
812 if (sysctl_tcp_fack)
813 newtp->sack_ok |= 2;
815 newtp->window_clamp = req->window_clamp;
816 newtp->rcv_ssthresh = req->rcv_wnd;
817 newtp->rcv_wnd = req->rcv_wnd;
818 newtp->wscale_ok = req->wscale_ok;
819 if (newtp->wscale_ok) {
820 newtp->snd_wscale = req->snd_wscale;
821 newtp->rcv_wscale = req->rcv_wscale;
822 } else {
823 newtp->snd_wscale = newtp->rcv_wscale = 0;
824 newtp->window_clamp = min(newtp->window_clamp, 65535U);
826 newtp->snd_wnd = ntohs(skb->h.th->window) << newtp->snd_wscale;
827 newtp->max_window = newtp->snd_wnd;
829 if (newtp->tstamp_ok) {
830 newtp->ts_recent = req->ts_recent;
831 newtp->ts_recent_stamp = xtime.tv_sec;
832 newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
833 } else {
834 newtp->ts_recent_stamp = 0;
835 newtp->tcp_header_len = sizeof(struct tcphdr);
837 if (skb->len >= TCP_MIN_RCVMSS+newtp->tcp_header_len)
838 newtp->ack.last_seg_size = skb->len-newtp->tcp_header_len;
839 newtp->mss_clamp = req->mss;
840 TCP_ECN_openreq_child(newtp, req);
841 if (newtp->ecn_flags&TCP_ECN_OK)
842 newsk->sk_no_largesend = 1;
844 tcp_ca_init(newtp);
846 TCP_INC_STATS_BH(TCP_MIB_PASSIVEOPENS);
848 return newsk;
852 * Process an incoming packet for SYN_RECV sockets represented
853 * as an open_request.
856 struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
857 struct open_request *req,
858 struct open_request **prev)
860 struct tcphdr *th = skb->h.th;
861 struct tcp_opt *tp = tcp_sk(sk);
862 u32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
863 int paws_reject = 0;
864 struct tcp_opt ttp;
865 struct sock *child;
867 ttp.saw_tstamp = 0;
868 if (th->doff > (sizeof(struct tcphdr)>>2)) {
869 tcp_parse_options(skb, &ttp, 0);
871 if (ttp.saw_tstamp) {
872 ttp.ts_recent = req->ts_recent;
873 /* We do not store true stamp, but it is not required,
874 * it can be estimated (approximately)
875 * from another data.
877 ttp.ts_recent_stamp = xtime.tv_sec - ((TCP_TIMEOUT_INIT/HZ)<<req->retrans);
878 paws_reject = tcp_paws_check(&ttp, th->rst);
882 /* Check for pure retransmitted SYN. */
883 if (TCP_SKB_CB(skb)->seq == req->rcv_isn &&
884 flg == TCP_FLAG_SYN &&
885 !paws_reject) {
887 * RFC793 draws (Incorrectly! It was fixed in RFC1122)
888 * this case on figure 6 and figure 8, but formal
889 * protocol description says NOTHING.
890 * To be more exact, it says that we should send ACK,
891 * because this segment (at least, if it has no data)
892 * is out of window.
894 * CONCLUSION: RFC793 (even with RFC1122) DOES NOT
895 * describe SYN-RECV state. All the description
896 * is wrong, we cannot believe to it and should
897 * rely only on common sense and implementation
898 * experience.
900 * Enforce "SYN-ACK" according to figure 8, figure 6
901 * of RFC793, fixed by RFC1122.
903 req->class->rtx_syn_ack(sk, req, NULL);
904 return NULL;
907 /* Further reproduces section "SEGMENT ARRIVES"
908 for state SYN-RECEIVED of RFC793.
909 It is broken, however, it does not work only
910 when SYNs are crossed.
912 You would think that SYN crossing is impossible here, since
913 we should have a SYN_SENT socket (from connect()) on our end,
914 but this is not true if the crossed SYNs were sent to both
915 ends by a malicious third party. We must defend against this,
916 and to do that we first verify the ACK (as per RFC793, page
917 36) and reset if it is invalid. Is this a true full defense?
918 To convince ourselves, let us consider a way in which the ACK
919 test can still pass in this 'malicious crossed SYNs' case.
920 Malicious sender sends identical SYNs (and thus identical sequence
921 numbers) to both A and B:
923 A: gets SYN, seq=7
924 B: gets SYN, seq=7
926 By our good fortune, both A and B select the same initial
927 send sequence number of seven :-)
929 A: sends SYN|ACK, seq=7, ack_seq=8
930 B: sends SYN|ACK, seq=7, ack_seq=8
932 So we are now A eating this SYN|ACK, ACK test passes. So
933 does sequence test, SYN is truncated, and thus we consider
934 it a bare ACK.
936 If tp->defer_accept, we silently drop this bare ACK. Otherwise,
937 we create an established connection. Both ends (listening sockets)
938 accept the new incoming connection and try to talk to each other. 8-)
940 Note: This case is both harmless, and rare. Possibility is about the
941 same as us discovering intelligent life on another plant tomorrow.
943 But generally, we should (RFC lies!) to accept ACK
944 from SYNACK both here and in tcp_rcv_state_process().
945 tcp_rcv_state_process() does not, hence, we do not too.
947 Note that the case is absolutely generic:
948 we cannot optimize anything here without
949 violating protocol. All the checks must be made
950 before attempt to create socket.
953 /* RFC793 page 36: "If the connection is in any non-synchronized state ...
954 * and the incoming segment acknowledges something not yet
955 * sent (the segment carries an unaccaptable ACK) ...
956 * a reset is sent."
958 * Invalid ACK: reset will be sent by listening socket
960 if ((flg & TCP_FLAG_ACK) &&
961 (TCP_SKB_CB(skb)->ack_seq != req->snt_isn+1))
962 return sk;
964 /* Also, it would be not so bad idea to check rcv_tsecr, which
965 * is essentially ACK extension and too early or too late values
966 * should cause reset in unsynchronized states.
969 /* RFC793: "first check sequence number". */
971 if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
972 req->rcv_isn+1, req->rcv_isn+1+req->rcv_wnd)) {
973 /* Out of window: send ACK and drop. */
974 if (!(flg & TCP_FLAG_RST))
975 req->class->send_ack(skb, req);
976 if (paws_reject)
977 NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
978 return NULL;
981 /* In sequence, PAWS is OK. */
983 if (ttp.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, req->rcv_isn+1))
984 req->ts_recent = ttp.rcv_tsval;
986 if (TCP_SKB_CB(skb)->seq == req->rcv_isn) {
987 /* Truncate SYN, it is out of window starting
988 at req->rcv_isn+1. */
989 flg &= ~TCP_FLAG_SYN;
992 /* RFC793: "second check the RST bit" and
993 * "fourth, check the SYN bit"
995 if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN))
996 goto embryonic_reset;
998 /* ACK sequence verified above, just make sure ACK is
999 * set. If ACK not set, just silently drop the packet.
1001 if (!(flg & TCP_FLAG_ACK))
1002 return NULL;
1004 /* If TCP_DEFER_ACCEPT is set, drop bare ACK. */
1005 if (tp->defer_accept && TCP_SKB_CB(skb)->end_seq == req->rcv_isn+1) {
1006 req->acked = 1;
1007 return NULL;
1010 /* OK, ACK is valid, create big socket and
1011 * feed this segment to it. It will repeat all
1012 * the tests. THIS SEGMENT MUST MOVE SOCKET TO
1013 * ESTABLISHED STATE. If it will be dropped after
1014 * socket is created, wait for troubles.
1016 child = tp->af_specific->syn_recv_sock(sk, skb, req, NULL);
1017 if (child == NULL)
1018 goto listen_overflow;
1020 sk_set_owner(child, sk->sk_owner);
1021 tcp_synq_unlink(tp, req, prev);
1022 tcp_synq_removed(sk, req);
1024 tcp_acceptq_queue(sk, req, child);
1025 return child;
1027 listen_overflow:
1028 if (!sysctl_tcp_abort_on_overflow) {
1029 req->acked = 1;
1030 return NULL;
1033 embryonic_reset:
1034 NET_INC_STATS_BH(LINUX_MIB_EMBRYONICRSTS);
1035 if (!(flg & TCP_FLAG_RST))
1036 req->class->send_reset(skb);
1038 tcp_synq_drop(sk, req, prev);
1039 return NULL;
1043 * Queue segment on the new socket if the new socket is active,
1044 * otherwise we just shortcircuit this and continue with
1045 * the new socket.
1048 int tcp_child_process(struct sock *parent, struct sock *child,
1049 struct sk_buff *skb)
1051 int ret = 0;
1052 int state = child->sk_state;
1054 if (!sock_owned_by_user(child)) {
1055 ret = tcp_rcv_state_process(child, skb, skb->h.th, skb->len);
1057 /* Wakeup parent, send SIGIO */
1058 if (state == TCP_SYN_RECV && child->sk_state != state)
1059 parent->sk_data_ready(parent, 0);
1060 } else {
1061 /* Alas, it is possible again, because we do lookup
1062 * in main socket hash table and lock on listening
1063 * socket does not protect us more.
1065 sk_add_backlog(child, skb);
1068 bh_unlock_sock(child);
1069 sock_put(child);
1070 return ret;
1073 EXPORT_SYMBOL(tcp_check_req);
1074 EXPORT_SYMBOL(tcp_child_process);
1075 EXPORT_SYMBOL(tcp_create_openreq_child);
1076 EXPORT_SYMBOL(tcp_timewait_state_process);
1077 EXPORT_SYMBOL(tcp_tw_deschedule);
1079 #ifdef CONFIG_SYSCTL
1080 EXPORT_SYMBOL(sysctl_tcp_tw_recycle);
1081 #endif