[NETFILTER]: don't use nested attributes for conntrack_expect
[linux-2.6/mini2440.git] / net / ipv4 / tcp_minisocks.c
blob267cea1087e549fba348ae015437c7977d42d9f3
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * Version: $Id: tcp_minisocks.c,v 1.15 2002/02/01 22:01:04 davem Exp $
10 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 * Corey Minyard <wf-rch!minyard@relay.EU.net>
14 * Florian La Roche, <flla@stud.uni-sb.de>
15 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
16 * Linus Torvalds, <torvalds@cs.helsinki.fi>
17 * Alan Cox, <gw4pts@gw4pts.ampr.org>
18 * Matthew Dillon, <dillon@apollo.west.oic.com>
19 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
20 * Jorge Cwik, <jorge@laser.satlink.net>
23 #include <linux/config.h>
24 #include <linux/mm.h>
25 #include <linux/module.h>
26 #include <linux/sysctl.h>
27 #include <linux/workqueue.h>
28 #include <net/tcp.h>
29 #include <net/inet_common.h>
30 #include <net/xfrm.h>
32 #ifdef CONFIG_SYSCTL
33 #define SYNC_INIT 0 /* let the user enable it */
34 #else
35 #define SYNC_INIT 1
36 #endif
38 int sysctl_tcp_tw_recycle;
39 int sysctl_tcp_max_tw_buckets = NR_FILE*2;
41 int sysctl_tcp_syncookies = SYNC_INIT;
42 int sysctl_tcp_abort_on_overflow;
44 static void tcp_tw_schedule(struct tcp_tw_bucket *tw, int timeo);
46 static __inline__ int tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
48 if (seq == s_win)
49 return 1;
50 if (after(end_seq, s_win) && before(seq, e_win))
51 return 1;
52 return (seq == e_win && seq == end_seq);
55 /* New-style handling of TIME_WAIT sockets. */
57 int tcp_tw_count;
60 /* Must be called with locally disabled BHs. */
61 static void tcp_timewait_kill(struct tcp_tw_bucket *tw)
63 struct inet_ehash_bucket *ehead;
64 struct inet_bind_hashbucket *bhead;
65 struct inet_bind_bucket *tb;
67 /* Unlink from established hashes. */
68 ehead = &tcp_ehash[tw->tw_hashent];
69 write_lock(&ehead->lock);
70 if (hlist_unhashed(&tw->tw_node)) {
71 write_unlock(&ehead->lock);
72 return;
74 __hlist_del(&tw->tw_node);
75 sk_node_init(&tw->tw_node);
76 write_unlock(&ehead->lock);
78 /* Disassociate with bind bucket. */
79 bhead = &tcp_bhash[inet_bhashfn(tw->tw_num, tcp_bhash_size)];
80 spin_lock(&bhead->lock);
81 tb = tw->tw_tb;
82 __hlist_del(&tw->tw_bind_node);
83 tw->tw_tb = NULL;
84 inet_bind_bucket_destroy(tcp_bucket_cachep, tb);
85 spin_unlock(&bhead->lock);
87 #ifdef SOCK_REFCNT_DEBUG
88 if (atomic_read(&tw->tw_refcnt) != 1) {
89 printk(KERN_DEBUG "tw_bucket %p refcnt=%d\n", tw,
90 atomic_read(&tw->tw_refcnt));
92 #endif
93 tcp_tw_put(tw);
96 /*
97 * * Main purpose of TIME-WAIT state is to close connection gracefully,
98 * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
99 * (and, probably, tail of data) and one or more our ACKs are lost.
100 * * What is TIME-WAIT timeout? It is associated with maximal packet
101 * lifetime in the internet, which results in wrong conclusion, that
102 * it is set to catch "old duplicate segments" wandering out of their path.
103 * It is not quite correct. This timeout is calculated so that it exceeds
104 * maximal retransmission timeout enough to allow to lose one (or more)
105 * segments sent by peer and our ACKs. This time may be calculated from RTO.
106 * * When TIME-WAIT socket receives RST, it means that another end
107 * finally closed and we are allowed to kill TIME-WAIT too.
108 * * Second purpose of TIME-WAIT is catching old duplicate segments.
109 * Well, certainly it is pure paranoia, but if we load TIME-WAIT
110 * with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
111 * * If we invented some more clever way to catch duplicates
112 * (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
114 * The algorithm below is based on FORMAL INTERPRETATION of RFCs.
115 * When you compare it to RFCs, please, read section SEGMENT ARRIVES
116 * from the very beginning.
118 * NOTE. With recycling (and later with fin-wait-2) TW bucket
119 * is _not_ stateless. It means, that strictly speaking we must
120 * spinlock it. I do not want! Well, probability of misbehaviour
121 * is ridiculously low and, seems, we could use some mb() tricks
122 * to avoid misread sequence numbers, states etc. --ANK
124 enum tcp_tw_status
125 tcp_timewait_state_process(struct tcp_tw_bucket *tw, struct sk_buff *skb,
126 struct tcphdr *th, unsigned len)
128 struct tcp_options_received tmp_opt;
129 int paws_reject = 0;
131 tmp_opt.saw_tstamp = 0;
132 if (th->doff > (sizeof(struct tcphdr) >> 2) && tw->tw_ts_recent_stamp) {
133 tcp_parse_options(skb, &tmp_opt, 0);
135 if (tmp_opt.saw_tstamp) {
136 tmp_opt.ts_recent = tw->tw_ts_recent;
137 tmp_opt.ts_recent_stamp = tw->tw_ts_recent_stamp;
138 paws_reject = tcp_paws_check(&tmp_opt, th->rst);
142 if (tw->tw_substate == TCP_FIN_WAIT2) {
143 /* Just repeat all the checks of tcp_rcv_state_process() */
145 /* Out of window, send ACK */
146 if (paws_reject ||
147 !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
148 tw->tw_rcv_nxt,
149 tw->tw_rcv_nxt + tw->tw_rcv_wnd))
150 return TCP_TW_ACK;
152 if (th->rst)
153 goto kill;
155 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tw->tw_rcv_nxt))
156 goto kill_with_rst;
158 /* Dup ACK? */
159 if (!after(TCP_SKB_CB(skb)->end_seq, tw->tw_rcv_nxt) ||
160 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
161 tcp_tw_put(tw);
162 return TCP_TW_SUCCESS;
165 /* New data or FIN. If new data arrive after half-duplex close,
166 * reset.
168 if (!th->fin ||
169 TCP_SKB_CB(skb)->end_seq != tw->tw_rcv_nxt + 1) {
170 kill_with_rst:
171 tcp_tw_deschedule(tw);
172 tcp_tw_put(tw);
173 return TCP_TW_RST;
176 /* FIN arrived, enter true time-wait state. */
177 tw->tw_substate = TCP_TIME_WAIT;
178 tw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
179 if (tmp_opt.saw_tstamp) {
180 tw->tw_ts_recent_stamp = xtime.tv_sec;
181 tw->tw_ts_recent = tmp_opt.rcv_tsval;
184 /* I am shamed, but failed to make it more elegant.
185 * Yes, it is direct reference to IP, which is impossible
186 * to generalize to IPv6. Taking into account that IPv6
187 * do not undertsnad recycling in any case, it not
188 * a big problem in practice. --ANK */
189 if (tw->tw_family == AF_INET &&
190 sysctl_tcp_tw_recycle && tw->tw_ts_recent_stamp &&
191 tcp_v4_tw_remember_stamp(tw))
192 tcp_tw_schedule(tw, tw->tw_timeout);
193 else
194 tcp_tw_schedule(tw, TCP_TIMEWAIT_LEN);
195 return TCP_TW_ACK;
199 * Now real TIME-WAIT state.
201 * RFC 1122:
202 * "When a connection is [...] on TIME-WAIT state [...]
203 * [a TCP] MAY accept a new SYN from the remote TCP to
204 * reopen the connection directly, if it:
206 * (1) assigns its initial sequence number for the new
207 * connection to be larger than the largest sequence
208 * number it used on the previous connection incarnation,
209 * and
211 * (2) returns to TIME-WAIT state if the SYN turns out
212 * to be an old duplicate".
215 if (!paws_reject &&
216 (TCP_SKB_CB(skb)->seq == tw->tw_rcv_nxt &&
217 (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
218 /* In window segment, it may be only reset or bare ack. */
220 if (th->rst) {
221 /* This is TIME_WAIT assasination, in two flavors.
222 * Oh well... nobody has a sufficient solution to this
223 * protocol bug yet.
225 if (sysctl_tcp_rfc1337 == 0) {
226 kill:
227 tcp_tw_deschedule(tw);
228 tcp_tw_put(tw);
229 return TCP_TW_SUCCESS;
232 tcp_tw_schedule(tw, TCP_TIMEWAIT_LEN);
234 if (tmp_opt.saw_tstamp) {
235 tw->tw_ts_recent = tmp_opt.rcv_tsval;
236 tw->tw_ts_recent_stamp = xtime.tv_sec;
239 tcp_tw_put(tw);
240 return TCP_TW_SUCCESS;
243 /* Out of window segment.
245 All the segments are ACKed immediately.
247 The only exception is new SYN. We accept it, if it is
248 not old duplicate and we are not in danger to be killed
249 by delayed old duplicates. RFC check is that it has
250 newer sequence number works at rates <40Mbit/sec.
251 However, if paws works, it is reliable AND even more,
252 we even may relax silly seq space cutoff.
254 RED-PEN: we violate main RFC requirement, if this SYN will appear
255 old duplicate (i.e. we receive RST in reply to SYN-ACK),
256 we must return socket to time-wait state. It is not good,
257 but not fatal yet.
260 if (th->syn && !th->rst && !th->ack && !paws_reject &&
261 (after(TCP_SKB_CB(skb)->seq, tw->tw_rcv_nxt) ||
262 (tmp_opt.saw_tstamp && (s32)(tw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
263 u32 isn = tw->tw_snd_nxt + 65535 + 2;
264 if (isn == 0)
265 isn++;
266 TCP_SKB_CB(skb)->when = isn;
267 return TCP_TW_SYN;
270 if (paws_reject)
271 NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
273 if(!th->rst) {
274 /* In this case we must reset the TIMEWAIT timer.
276 * If it is ACKless SYN it may be both old duplicate
277 * and new good SYN with random sequence number <rcv_nxt.
278 * Do not reschedule in the last case.
280 if (paws_reject || th->ack)
281 tcp_tw_schedule(tw, TCP_TIMEWAIT_LEN);
283 /* Send ACK. Note, we do not put the bucket,
284 * it will be released by caller.
286 return TCP_TW_ACK;
288 tcp_tw_put(tw);
289 return TCP_TW_SUCCESS;
292 /* Enter the time wait state. This is called with locally disabled BH.
293 * Essentially we whip up a timewait bucket, copy the
294 * relevant info into it from the SK, and mess with hash chains
295 * and list linkage.
297 static void __tcp_tw_hashdance(struct sock *sk, struct tcp_tw_bucket *tw)
299 const struct inet_sock *inet = inet_sk(sk);
300 struct inet_ehash_bucket *ehead = &tcp_ehash[sk->sk_hashent];
301 struct inet_bind_hashbucket *bhead;
302 /* Step 1: Put TW into bind hash. Original socket stays there too.
303 Note, that any socket with inet->num != 0 MUST be bound in
304 binding cache, even if it is closed.
306 bhead = &tcp_bhash[inet_bhashfn(inet->num, tcp_bhash_size)];
307 spin_lock(&bhead->lock);
308 tw->tw_tb = inet->bind_hash;
309 BUG_TRAP(inet->bind_hash);
310 tw_add_bind_node(tw, &tw->tw_tb->owners);
311 spin_unlock(&bhead->lock);
313 write_lock(&ehead->lock);
315 /* Step 2: Remove SK from established hash. */
316 if (__sk_del_node_init(sk))
317 sock_prot_dec_use(sk->sk_prot);
319 /* Step 3: Hash TW into TIMEWAIT half of established hash table. */
320 tw_add_node(tw, &(ehead + tcp_ehash_size)->chain);
321 atomic_inc(&tw->tw_refcnt);
323 write_unlock(&ehead->lock);
327 * Move a socket to time-wait or dead fin-wait-2 state.
329 void tcp_time_wait(struct sock *sk, int state, int timeo)
331 struct tcp_tw_bucket *tw = NULL;
332 struct tcp_sock *tp = tcp_sk(sk);
333 int recycle_ok = 0;
335 if (sysctl_tcp_tw_recycle && tp->rx_opt.ts_recent_stamp)
336 recycle_ok = tp->af_specific->remember_stamp(sk);
338 if (tcp_tw_count < sysctl_tcp_max_tw_buckets)
339 tw = kmem_cache_alloc(tcp_timewait_cachep, SLAB_ATOMIC);
341 if(tw != NULL) {
342 struct inet_sock *inet = inet_sk(sk);
343 int rto = (tp->rto<<2) - (tp->rto>>1);
345 /* Give us an identity. */
346 tw->tw_daddr = inet->daddr;
347 tw->tw_rcv_saddr = inet->rcv_saddr;
348 tw->tw_bound_dev_if = sk->sk_bound_dev_if;
349 tw->tw_num = inet->num;
350 tw->tw_state = TCP_TIME_WAIT;
351 tw->tw_substate = state;
352 tw->tw_sport = inet->sport;
353 tw->tw_dport = inet->dport;
354 tw->tw_family = sk->sk_family;
355 tw->tw_reuse = sk->sk_reuse;
356 tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale;
357 atomic_set(&tw->tw_refcnt, 1);
359 tw->tw_hashent = sk->sk_hashent;
360 tw->tw_rcv_nxt = tp->rcv_nxt;
361 tw->tw_snd_nxt = tp->snd_nxt;
362 tw->tw_rcv_wnd = tcp_receive_window(tp);
363 tw->tw_ts_recent = tp->rx_opt.ts_recent;
364 tw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
365 tw_dead_node_init(tw);
367 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
368 if (tw->tw_family == PF_INET6) {
369 struct ipv6_pinfo *np = inet6_sk(sk);
371 ipv6_addr_copy(&tw->tw_v6_daddr, &np->daddr);
372 ipv6_addr_copy(&tw->tw_v6_rcv_saddr, &np->rcv_saddr);
373 tw->tw_v6_ipv6only = np->ipv6only;
374 } else {
375 memset(&tw->tw_v6_daddr, 0, sizeof(tw->tw_v6_daddr));
376 memset(&tw->tw_v6_rcv_saddr, 0, sizeof(tw->tw_v6_rcv_saddr));
377 tw->tw_v6_ipv6only = 0;
379 #endif
380 /* Linkage updates. */
381 __tcp_tw_hashdance(sk, tw);
383 /* Get the TIME_WAIT timeout firing. */
384 if (timeo < rto)
385 timeo = rto;
387 if (recycle_ok) {
388 tw->tw_timeout = rto;
389 } else {
390 tw->tw_timeout = TCP_TIMEWAIT_LEN;
391 if (state == TCP_TIME_WAIT)
392 timeo = TCP_TIMEWAIT_LEN;
395 tcp_tw_schedule(tw, timeo);
396 tcp_tw_put(tw);
397 } else {
398 /* Sorry, if we're out of memory, just CLOSE this
399 * socket up. We've got bigger problems than
400 * non-graceful socket closings.
402 if (net_ratelimit())
403 printk(KERN_INFO "TCP: time wait bucket table overflow\n");
406 tcp_update_metrics(sk);
407 tcp_done(sk);
410 /* Kill off TIME_WAIT sockets once their lifetime has expired. */
411 static int tcp_tw_death_row_slot;
413 static void tcp_twkill(unsigned long);
415 /* TIME_WAIT reaping mechanism. */
416 #define TCP_TWKILL_SLOTS 8 /* Please keep this a power of 2. */
417 #define TCP_TWKILL_PERIOD (TCP_TIMEWAIT_LEN/TCP_TWKILL_SLOTS)
419 #define TCP_TWKILL_QUOTA 100
421 static struct hlist_head tcp_tw_death_row[TCP_TWKILL_SLOTS];
422 static DEFINE_SPINLOCK(tw_death_lock);
423 static struct timer_list tcp_tw_timer = TIMER_INITIALIZER(tcp_twkill, 0, 0);
424 static void twkill_work(void *);
425 static DECLARE_WORK(tcp_twkill_work, twkill_work, NULL);
426 static u32 twkill_thread_slots;
428 /* Returns non-zero if quota exceeded. */
429 static int tcp_do_twkill_work(int slot, unsigned int quota)
431 struct tcp_tw_bucket *tw;
432 struct hlist_node *node;
433 unsigned int killed;
434 int ret;
436 /* NOTE: compare this to previous version where lock
437 * was released after detaching chain. It was racy,
438 * because tw buckets are scheduled in not serialized context
439 * in 2.3 (with netfilter), and with softnet it is common, because
440 * soft irqs are not sequenced.
442 killed = 0;
443 ret = 0;
444 rescan:
445 tw_for_each_inmate(tw, node, &tcp_tw_death_row[slot]) {
446 __tw_del_dead_node(tw);
447 spin_unlock(&tw_death_lock);
448 tcp_timewait_kill(tw);
449 tcp_tw_put(tw);
450 killed++;
451 spin_lock(&tw_death_lock);
452 if (killed > quota) {
453 ret = 1;
454 break;
457 /* While we dropped tw_death_lock, another cpu may have
458 * killed off the next TW bucket in the list, therefore
459 * do a fresh re-read of the hlist head node with the
460 * lock reacquired. We still use the hlist traversal
461 * macro in order to get the prefetches.
463 goto rescan;
466 tcp_tw_count -= killed;
467 NET_ADD_STATS_BH(LINUX_MIB_TIMEWAITED, killed);
469 return ret;
472 static void tcp_twkill(unsigned long dummy)
474 int need_timer, ret;
476 spin_lock(&tw_death_lock);
478 if (tcp_tw_count == 0)
479 goto out;
481 need_timer = 0;
482 ret = tcp_do_twkill_work(tcp_tw_death_row_slot, TCP_TWKILL_QUOTA);
483 if (ret) {
484 twkill_thread_slots |= (1 << tcp_tw_death_row_slot);
485 mb();
486 schedule_work(&tcp_twkill_work);
487 need_timer = 1;
488 } else {
489 /* We purged the entire slot, anything left? */
490 if (tcp_tw_count)
491 need_timer = 1;
493 tcp_tw_death_row_slot =
494 ((tcp_tw_death_row_slot + 1) & (TCP_TWKILL_SLOTS - 1));
495 if (need_timer)
496 mod_timer(&tcp_tw_timer, jiffies + TCP_TWKILL_PERIOD);
497 out:
498 spin_unlock(&tw_death_lock);
501 extern void twkill_slots_invalid(void);
503 static void twkill_work(void *dummy)
505 int i;
507 if ((TCP_TWKILL_SLOTS - 1) > (sizeof(twkill_thread_slots) * 8))
508 twkill_slots_invalid();
510 while (twkill_thread_slots) {
511 spin_lock_bh(&tw_death_lock);
512 for (i = 0; i < TCP_TWKILL_SLOTS; i++) {
513 if (!(twkill_thread_slots & (1 << i)))
514 continue;
516 while (tcp_do_twkill_work(i, TCP_TWKILL_QUOTA) != 0) {
517 if (need_resched()) {
518 spin_unlock_bh(&tw_death_lock);
519 schedule();
520 spin_lock_bh(&tw_death_lock);
524 twkill_thread_slots &= ~(1 << i);
526 spin_unlock_bh(&tw_death_lock);
530 /* These are always called from BH context. See callers in
531 * tcp_input.c to verify this.
534 /* This is for handling early-kills of TIME_WAIT sockets. */
535 void tcp_tw_deschedule(struct tcp_tw_bucket *tw)
537 spin_lock(&tw_death_lock);
538 if (tw_del_dead_node(tw)) {
539 tcp_tw_put(tw);
540 if (--tcp_tw_count == 0)
541 del_timer(&tcp_tw_timer);
543 spin_unlock(&tw_death_lock);
544 tcp_timewait_kill(tw);
547 /* Short-time timewait calendar */
549 static int tcp_twcal_hand = -1;
550 static int tcp_twcal_jiffie;
551 static void tcp_twcal_tick(unsigned long);
552 static struct timer_list tcp_twcal_timer =
553 TIMER_INITIALIZER(tcp_twcal_tick, 0, 0);
554 static struct hlist_head tcp_twcal_row[TCP_TW_RECYCLE_SLOTS];
556 static void tcp_tw_schedule(struct tcp_tw_bucket *tw, int timeo)
558 struct hlist_head *list;
559 int slot;
561 /* timeout := RTO * 3.5
563 * 3.5 = 1+2+0.5 to wait for two retransmits.
565 * RATIONALE: if FIN arrived and we entered TIME-WAIT state,
566 * our ACK acking that FIN can be lost. If N subsequent retransmitted
567 * FINs (or previous seqments) are lost (probability of such event
568 * is p^(N+1), where p is probability to lose single packet and
569 * time to detect the loss is about RTO*(2^N - 1) with exponential
570 * backoff). Normal timewait length is calculated so, that we
571 * waited at least for one retransmitted FIN (maximal RTO is 120sec).
572 * [ BTW Linux. following BSD, violates this requirement waiting
573 * only for 60sec, we should wait at least for 240 secs.
574 * Well, 240 consumes too much of resources 8)
576 * This interval is not reduced to catch old duplicate and
577 * responces to our wandering segments living for two MSLs.
578 * However, if we use PAWS to detect
579 * old duplicates, we can reduce the interval to bounds required
580 * by RTO, rather than MSL. So, if peer understands PAWS, we
581 * kill tw bucket after 3.5*RTO (it is important that this number
582 * is greater than TS tick!) and detect old duplicates with help
583 * of PAWS.
585 slot = (timeo + (1<<TCP_TW_RECYCLE_TICK) - 1) >> TCP_TW_RECYCLE_TICK;
587 spin_lock(&tw_death_lock);
589 /* Unlink it, if it was scheduled */
590 if (tw_del_dead_node(tw))
591 tcp_tw_count--;
592 else
593 atomic_inc(&tw->tw_refcnt);
595 if (slot >= TCP_TW_RECYCLE_SLOTS) {
596 /* Schedule to slow timer */
597 if (timeo >= TCP_TIMEWAIT_LEN) {
598 slot = TCP_TWKILL_SLOTS-1;
599 } else {
600 slot = (timeo + TCP_TWKILL_PERIOD-1) / TCP_TWKILL_PERIOD;
601 if (slot >= TCP_TWKILL_SLOTS)
602 slot = TCP_TWKILL_SLOTS-1;
604 tw->tw_ttd = jiffies + timeo;
605 slot = (tcp_tw_death_row_slot + slot) & (TCP_TWKILL_SLOTS - 1);
606 list = &tcp_tw_death_row[slot];
607 } else {
608 tw->tw_ttd = jiffies + (slot << TCP_TW_RECYCLE_TICK);
610 if (tcp_twcal_hand < 0) {
611 tcp_twcal_hand = 0;
612 tcp_twcal_jiffie = jiffies;
613 tcp_twcal_timer.expires = tcp_twcal_jiffie + (slot<<TCP_TW_RECYCLE_TICK);
614 add_timer(&tcp_twcal_timer);
615 } else {
616 if (time_after(tcp_twcal_timer.expires, jiffies + (slot<<TCP_TW_RECYCLE_TICK)))
617 mod_timer(&tcp_twcal_timer, jiffies + (slot<<TCP_TW_RECYCLE_TICK));
618 slot = (tcp_twcal_hand + slot)&(TCP_TW_RECYCLE_SLOTS-1);
620 list = &tcp_twcal_row[slot];
623 hlist_add_head(&tw->tw_death_node, list);
625 if (tcp_tw_count++ == 0)
626 mod_timer(&tcp_tw_timer, jiffies+TCP_TWKILL_PERIOD);
627 spin_unlock(&tw_death_lock);
630 void tcp_twcal_tick(unsigned long dummy)
632 int n, slot;
633 unsigned long j;
634 unsigned long now = jiffies;
635 int killed = 0;
636 int adv = 0;
638 spin_lock(&tw_death_lock);
639 if (tcp_twcal_hand < 0)
640 goto out;
642 slot = tcp_twcal_hand;
643 j = tcp_twcal_jiffie;
645 for (n=0; n<TCP_TW_RECYCLE_SLOTS; n++) {
646 if (time_before_eq(j, now)) {
647 struct hlist_node *node, *safe;
648 struct tcp_tw_bucket *tw;
650 tw_for_each_inmate_safe(tw, node, safe,
651 &tcp_twcal_row[slot]) {
652 __tw_del_dead_node(tw);
653 tcp_timewait_kill(tw);
654 tcp_tw_put(tw);
655 killed++;
657 } else {
658 if (!adv) {
659 adv = 1;
660 tcp_twcal_jiffie = j;
661 tcp_twcal_hand = slot;
664 if (!hlist_empty(&tcp_twcal_row[slot])) {
665 mod_timer(&tcp_twcal_timer, j);
666 goto out;
669 j += (1<<TCP_TW_RECYCLE_TICK);
670 slot = (slot+1)&(TCP_TW_RECYCLE_SLOTS-1);
672 tcp_twcal_hand = -1;
674 out:
675 if ((tcp_tw_count -= killed) == 0)
676 del_timer(&tcp_tw_timer);
677 NET_ADD_STATS_BH(LINUX_MIB_TIMEWAITKILLED, killed);
678 spin_unlock(&tw_death_lock);
681 /* This is not only more efficient than what we used to do, it eliminates
682 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
684 * Actually, we could lots of memory writes here. tp of listening
685 * socket contains all necessary default parameters.
687 struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, struct sk_buff *skb)
689 /* allocate the newsk from the same slab of the master sock,
690 * if not, at sk_free time we'll try to free it from the wrong
691 * slabcache (i.e. is it TCPv4 or v6?), this is handled thru sk->sk_prot -acme */
692 struct sock *newsk = sk_alloc(PF_INET, GFP_ATOMIC, sk->sk_prot, 0);
694 if(newsk != NULL) {
695 struct inet_request_sock *ireq = inet_rsk(req);
696 struct tcp_request_sock *treq = tcp_rsk(req);
697 struct inet_sock *newinet = inet_sk(newsk);
698 struct tcp_sock *newtp;
699 struct sk_filter *filter;
701 memcpy(newsk, sk, sizeof(struct tcp_sock));
702 newsk->sk_state = TCP_SYN_RECV;
704 /* SANITY */
705 sk_node_init(&newsk->sk_node);
706 newinet->bind_hash = NULL;
708 /* Clone the TCP header template */
709 newinet->dport = ireq->rmt_port;
711 sock_lock_init(newsk);
712 bh_lock_sock(newsk);
714 rwlock_init(&newsk->sk_dst_lock);
715 newsk->sk_dst_cache = NULL;
716 atomic_set(&newsk->sk_rmem_alloc, 0);
717 skb_queue_head_init(&newsk->sk_receive_queue);
718 atomic_set(&newsk->sk_wmem_alloc, 0);
719 skb_queue_head_init(&newsk->sk_write_queue);
720 atomic_set(&newsk->sk_omem_alloc, 0);
721 newsk->sk_wmem_queued = 0;
722 newsk->sk_forward_alloc = 0;
724 sock_reset_flag(newsk, SOCK_DONE);
725 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
726 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
727 newsk->sk_send_head = NULL;
728 rwlock_init(&newsk->sk_callback_lock);
729 skb_queue_head_init(&newsk->sk_error_queue);
730 newsk->sk_write_space = sk_stream_write_space;
732 if ((filter = newsk->sk_filter) != NULL)
733 sk_filter_charge(newsk, filter);
735 if (unlikely(xfrm_sk_clone_policy(newsk))) {
736 /* It is still raw copy of parent, so invalidate
737 * destructor and make plain sk_free() */
738 newsk->sk_destruct = NULL;
739 sk_free(newsk);
740 return NULL;
743 /* Now setup tcp_sock */
744 newtp = tcp_sk(newsk);
745 newtp->pred_flags = 0;
746 newtp->rcv_nxt = treq->rcv_isn + 1;
747 newtp->snd_nxt = treq->snt_isn + 1;
748 newtp->snd_una = treq->snt_isn + 1;
749 newtp->snd_sml = treq->snt_isn + 1;
751 tcp_prequeue_init(newtp);
753 tcp_init_wl(newtp, treq->snt_isn, treq->rcv_isn);
755 newtp->retransmits = 0;
756 newtp->backoff = 0;
757 newtp->srtt = 0;
758 newtp->mdev = TCP_TIMEOUT_INIT;
759 newtp->rto = TCP_TIMEOUT_INIT;
761 newtp->packets_out = 0;
762 newtp->left_out = 0;
763 newtp->retrans_out = 0;
764 newtp->sacked_out = 0;
765 newtp->fackets_out = 0;
766 newtp->snd_ssthresh = 0x7fffffff;
768 /* So many TCP implementations out there (incorrectly) count the
769 * initial SYN frame in their delayed-ACK and congestion control
770 * algorithms that we must have the following bandaid to talk
771 * efficiently to them. -DaveM
773 newtp->snd_cwnd = 2;
774 newtp->snd_cwnd_cnt = 0;
776 newtp->frto_counter = 0;
777 newtp->frto_highmark = 0;
779 newtp->ca_ops = &tcp_reno;
781 tcp_set_ca_state(newtp, TCP_CA_Open);
782 tcp_init_xmit_timers(newsk);
783 skb_queue_head_init(&newtp->out_of_order_queue);
784 newtp->rcv_wup = treq->rcv_isn + 1;
785 newtp->write_seq = treq->snt_isn + 1;
786 newtp->pushed_seq = newtp->write_seq;
787 newtp->copied_seq = treq->rcv_isn + 1;
789 newtp->rx_opt.saw_tstamp = 0;
791 newtp->rx_opt.dsack = 0;
792 newtp->rx_opt.eff_sacks = 0;
794 newtp->probes_out = 0;
795 newtp->rx_opt.num_sacks = 0;
796 newtp->urg_data = 0;
797 /* Deinitialize accept_queue to trap illegal accesses. */
798 memset(&newtp->accept_queue, 0, sizeof(newtp->accept_queue));
800 /* Back to base struct sock members. */
801 newsk->sk_err = 0;
802 newsk->sk_priority = 0;
803 atomic_set(&newsk->sk_refcnt, 2);
806 * Increment the counter in the same struct proto as the master
807 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
808 * is the same as sk->sk_prot->socks, as this field was copied
809 * with memcpy), same rationale as the first comment in this
810 * function.
812 * This _changes_ the previous behaviour, where
813 * tcp_create_openreq_child always was incrementing the
814 * equivalent to tcp_prot->socks (inet_sock_nr), so this have
815 * to be taken into account in all callers. -acme
817 sk_refcnt_debug_inc(newsk);
819 atomic_inc(&tcp_sockets_allocated);
821 if (sock_flag(newsk, SOCK_KEEPOPEN))
822 tcp_reset_keepalive_timer(newsk,
823 keepalive_time_when(newtp));
824 newsk->sk_socket = NULL;
825 newsk->sk_sleep = NULL;
827 newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
828 if((newtp->rx_opt.sack_ok = ireq->sack_ok) != 0) {
829 if (sysctl_tcp_fack)
830 newtp->rx_opt.sack_ok |= 2;
832 newtp->window_clamp = req->window_clamp;
833 newtp->rcv_ssthresh = req->rcv_wnd;
834 newtp->rcv_wnd = req->rcv_wnd;
835 newtp->rx_opt.wscale_ok = ireq->wscale_ok;
836 if (newtp->rx_opt.wscale_ok) {
837 newtp->rx_opt.snd_wscale = ireq->snd_wscale;
838 newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
839 } else {
840 newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
841 newtp->window_clamp = min(newtp->window_clamp, 65535U);
843 newtp->snd_wnd = ntohs(skb->h.th->window) << newtp->rx_opt.snd_wscale;
844 newtp->max_window = newtp->snd_wnd;
846 if (newtp->rx_opt.tstamp_ok) {
847 newtp->rx_opt.ts_recent = req->ts_recent;
848 newtp->rx_opt.ts_recent_stamp = xtime.tv_sec;
849 newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
850 } else {
851 newtp->rx_opt.ts_recent_stamp = 0;
852 newtp->tcp_header_len = sizeof(struct tcphdr);
854 if (skb->len >= TCP_MIN_RCVMSS+newtp->tcp_header_len)
855 newtp->ack.last_seg_size = skb->len-newtp->tcp_header_len;
856 newtp->rx_opt.mss_clamp = req->mss;
857 TCP_ECN_openreq_child(newtp, req);
858 if (newtp->ecn_flags&TCP_ECN_OK)
859 sock_set_flag(newsk, SOCK_NO_LARGESEND);
861 TCP_INC_STATS_BH(TCP_MIB_PASSIVEOPENS);
863 return newsk;
867 * Process an incoming packet for SYN_RECV sockets represented
868 * as a request_sock.
871 struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
872 struct request_sock *req,
873 struct request_sock **prev)
875 struct tcphdr *th = skb->h.th;
876 struct tcp_sock *tp = tcp_sk(sk);
877 u32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
878 int paws_reject = 0;
879 struct tcp_options_received tmp_opt;
880 struct sock *child;
882 tmp_opt.saw_tstamp = 0;
883 if (th->doff > (sizeof(struct tcphdr)>>2)) {
884 tcp_parse_options(skb, &tmp_opt, 0);
886 if (tmp_opt.saw_tstamp) {
887 tmp_opt.ts_recent = req->ts_recent;
888 /* We do not store true stamp, but it is not required,
889 * it can be estimated (approximately)
890 * from another data.
892 tmp_opt.ts_recent_stamp = xtime.tv_sec - ((TCP_TIMEOUT_INIT/HZ)<<req->retrans);
893 paws_reject = tcp_paws_check(&tmp_opt, th->rst);
897 /* Check for pure retransmitted SYN. */
898 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
899 flg == TCP_FLAG_SYN &&
900 !paws_reject) {
902 * RFC793 draws (Incorrectly! It was fixed in RFC1122)
903 * this case on figure 6 and figure 8, but formal
904 * protocol description says NOTHING.
905 * To be more exact, it says that we should send ACK,
906 * because this segment (at least, if it has no data)
907 * is out of window.
909 * CONCLUSION: RFC793 (even with RFC1122) DOES NOT
910 * describe SYN-RECV state. All the description
911 * is wrong, we cannot believe to it and should
912 * rely only on common sense and implementation
913 * experience.
915 * Enforce "SYN-ACK" according to figure 8, figure 6
916 * of RFC793, fixed by RFC1122.
918 req->rsk_ops->rtx_syn_ack(sk, req, NULL);
919 return NULL;
922 /* Further reproduces section "SEGMENT ARRIVES"
923 for state SYN-RECEIVED of RFC793.
924 It is broken, however, it does not work only
925 when SYNs are crossed.
927 You would think that SYN crossing is impossible here, since
928 we should have a SYN_SENT socket (from connect()) on our end,
929 but this is not true if the crossed SYNs were sent to both
930 ends by a malicious third party. We must defend against this,
931 and to do that we first verify the ACK (as per RFC793, page
932 36) and reset if it is invalid. Is this a true full defense?
933 To convince ourselves, let us consider a way in which the ACK
934 test can still pass in this 'malicious crossed SYNs' case.
935 Malicious sender sends identical SYNs (and thus identical sequence
936 numbers) to both A and B:
938 A: gets SYN, seq=7
939 B: gets SYN, seq=7
941 By our good fortune, both A and B select the same initial
942 send sequence number of seven :-)
944 A: sends SYN|ACK, seq=7, ack_seq=8
945 B: sends SYN|ACK, seq=7, ack_seq=8
947 So we are now A eating this SYN|ACK, ACK test passes. So
948 does sequence test, SYN is truncated, and thus we consider
949 it a bare ACK.
951 If tp->defer_accept, we silently drop this bare ACK. Otherwise,
952 we create an established connection. Both ends (listening sockets)
953 accept the new incoming connection and try to talk to each other. 8-)
955 Note: This case is both harmless, and rare. Possibility is about the
956 same as us discovering intelligent life on another plant tomorrow.
958 But generally, we should (RFC lies!) to accept ACK
959 from SYNACK both here and in tcp_rcv_state_process().
960 tcp_rcv_state_process() does not, hence, we do not too.
962 Note that the case is absolutely generic:
963 we cannot optimize anything here without
964 violating protocol. All the checks must be made
965 before attempt to create socket.
968 /* RFC793 page 36: "If the connection is in any non-synchronized state ...
969 * and the incoming segment acknowledges something not yet
970 * sent (the segment carries an unaccaptable ACK) ...
971 * a reset is sent."
973 * Invalid ACK: reset will be sent by listening socket
975 if ((flg & TCP_FLAG_ACK) &&
976 (TCP_SKB_CB(skb)->ack_seq != tcp_rsk(req)->snt_isn + 1))
977 return sk;
979 /* Also, it would be not so bad idea to check rcv_tsecr, which
980 * is essentially ACK extension and too early or too late values
981 * should cause reset in unsynchronized states.
984 /* RFC793: "first check sequence number". */
986 if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
987 tcp_rsk(req)->rcv_isn + 1, tcp_rsk(req)->rcv_isn + 1 + req->rcv_wnd)) {
988 /* Out of window: send ACK and drop. */
989 if (!(flg & TCP_FLAG_RST))
990 req->rsk_ops->send_ack(skb, req);
991 if (paws_reject)
992 NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
993 return NULL;
996 /* In sequence, PAWS is OK. */
998 if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_isn + 1))
999 req->ts_recent = tmp_opt.rcv_tsval;
1001 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
1002 /* Truncate SYN, it is out of window starting
1003 at tcp_rsk(req)->rcv_isn + 1. */
1004 flg &= ~TCP_FLAG_SYN;
1007 /* RFC793: "second check the RST bit" and
1008 * "fourth, check the SYN bit"
1010 if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN))
1011 goto embryonic_reset;
1013 /* ACK sequence verified above, just make sure ACK is
1014 * set. If ACK not set, just silently drop the packet.
1016 if (!(flg & TCP_FLAG_ACK))
1017 return NULL;
1019 /* If TCP_DEFER_ACCEPT is set, drop bare ACK. */
1020 if (tp->defer_accept && TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
1021 inet_rsk(req)->acked = 1;
1022 return NULL;
1025 /* OK, ACK is valid, create big socket and
1026 * feed this segment to it. It will repeat all
1027 * the tests. THIS SEGMENT MUST MOVE SOCKET TO
1028 * ESTABLISHED STATE. If it will be dropped after
1029 * socket is created, wait for troubles.
1031 child = tp->af_specific->syn_recv_sock(sk, skb, req, NULL);
1032 if (child == NULL)
1033 goto listen_overflow;
1035 tcp_synq_unlink(tp, req, prev);
1036 tcp_synq_removed(sk, req);
1038 tcp_acceptq_queue(sk, req, child);
1039 return child;
1041 listen_overflow:
1042 if (!sysctl_tcp_abort_on_overflow) {
1043 inet_rsk(req)->acked = 1;
1044 return NULL;
1047 embryonic_reset:
1048 NET_INC_STATS_BH(LINUX_MIB_EMBRYONICRSTS);
1049 if (!(flg & TCP_FLAG_RST))
1050 req->rsk_ops->send_reset(skb);
1052 tcp_synq_drop(sk, req, prev);
1053 return NULL;
1057 * Queue segment on the new socket if the new socket is active,
1058 * otherwise we just shortcircuit this and continue with
1059 * the new socket.
1062 int tcp_child_process(struct sock *parent, struct sock *child,
1063 struct sk_buff *skb)
1065 int ret = 0;
1066 int state = child->sk_state;
1068 if (!sock_owned_by_user(child)) {
1069 ret = tcp_rcv_state_process(child, skb, skb->h.th, skb->len);
1071 /* Wakeup parent, send SIGIO */
1072 if (state == TCP_SYN_RECV && child->sk_state != state)
1073 parent->sk_data_ready(parent, 0);
1074 } else {
1075 /* Alas, it is possible again, because we do lookup
1076 * in main socket hash table and lock on listening
1077 * socket does not protect us more.
1079 sk_add_backlog(child, skb);
1082 bh_unlock_sock(child);
1083 sock_put(child);
1084 return ret;
1087 EXPORT_SYMBOL(tcp_check_req);
1088 EXPORT_SYMBOL(tcp_child_process);
1089 EXPORT_SYMBOL(tcp_create_openreq_child);
1090 EXPORT_SYMBOL(tcp_timewait_state_process);
1091 EXPORT_SYMBOL(tcp_tw_deschedule);