Import 2.3.13
[davej-history.git] / net / ipv4 / tcp_timer.c
blob05a92f7f772c65c91a23249959076a84eac1fcac
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * Version: $Id: tcp_timer.c,v 1.65 1999/07/02 11:26:35 davem Exp $
10 * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 * Corey Minyard <wf-rch!minyard@relay.EU.net>
14 * Florian La Roche, <flla@stud.uni-sb.de>
15 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
16 * Linus Torvalds, <torvalds@cs.helsinki.fi>
17 * Alan Cox, <gw4pts@gw4pts.ampr.org>
18 * Matthew Dillon, <dillon@apollo.west.oic.com>
19 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
20 * Jorge Cwik, <jorge@laser.satlink.net>
23 #include <net/tcp.h>
25 int sysctl_tcp_syn_retries = TCP_SYN_RETRIES;
26 int sysctl_tcp_keepalive_time = TCP_KEEPALIVE_TIME;
27 int sysctl_tcp_keepalive_probes = TCP_KEEPALIVE_PROBES;
28 int sysctl_tcp_retries1 = TCP_RETR1;
29 int sysctl_tcp_retries2 = TCP_RETR2;
31 static void tcp_sltimer_handler(unsigned long);
32 static void tcp_syn_recv_timer(unsigned long);
33 static void tcp_keepalive(unsigned long data);
34 static void tcp_twkill(unsigned long);
36 struct timer_list tcp_slow_timer = {
37 NULL, NULL,
38 0, 0,
39 tcp_sltimer_handler,
43 struct tcp_sl_timer tcp_slt_array[TCP_SLT_MAX] = {
44 {ATOMIC_INIT(0), TCP_SYNACK_PERIOD, 0, tcp_syn_recv_timer},/* SYNACK */
45 {ATOMIC_INIT(0), TCP_KEEPALIVE_PERIOD, 0, tcp_keepalive}, /* KEEPALIVE */
46 {ATOMIC_INIT(0), TCP_TWKILL_PERIOD, 0, tcp_twkill} /* TWKILL */
49 const char timer_bug_msg[] = KERN_DEBUG "tcpbug: unknown timer value\n";
52 * Using different timers for retransmit, delayed acks and probes
53 * We may wish use just one timer maintaining a list of expire jiffies
54 * to optimize.
57 void tcp_init_xmit_timers(struct sock *sk)
59 init_timer(&sk->tp_pinfo.af_tcp.retransmit_timer);
60 sk->tp_pinfo.af_tcp.retransmit_timer.function=&tcp_retransmit_timer;
61 sk->tp_pinfo.af_tcp.retransmit_timer.data = (unsigned long) sk;
63 init_timer(&sk->tp_pinfo.af_tcp.delack_timer);
64 sk->tp_pinfo.af_tcp.delack_timer.function=&tcp_delack_timer;
65 sk->tp_pinfo.af_tcp.delack_timer.data = (unsigned long) sk;
67 init_timer(&sk->tp_pinfo.af_tcp.probe_timer);
68 sk->tp_pinfo.af_tcp.probe_timer.function=&tcp_probe_timer;
69 sk->tp_pinfo.af_tcp.probe_timer.data = (unsigned long) sk;
73 * Reset the retransmission timer
76 void tcp_reset_xmit_timer(struct sock *sk, int what, unsigned long when)
78 struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
80 switch (what) {
81 case TIME_RETRANS:
82 /* When seting the transmit timer the probe timer
83 * should not be set.
84 * The delayed ack timer can be set if we are changing the
85 * retransmit timer when removing acked frames.
87 if(tp->probe_timer.prev)
88 del_timer(&tp->probe_timer);
89 mod_timer(&tp->retransmit_timer, jiffies+when);
90 break;
92 case TIME_DACK:
93 mod_timer(&tp->delack_timer, jiffies+when);
94 break;
96 case TIME_PROBE0:
97 mod_timer(&tp->probe_timer, jiffies+when);
98 break;
100 case TIME_WRITE:
101 printk(KERN_DEBUG "bug: tcp_reset_xmit_timer TIME_WRITE\n");
102 break;
104 default:
105 printk(KERN_DEBUG "bug: unknown timer value\n");
109 void tcp_clear_xmit_timers(struct sock *sk)
111 struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
113 if(tp->retransmit_timer.prev)
114 del_timer(&tp->retransmit_timer);
115 if(tp->delack_timer.prev)
116 del_timer(&tp->delack_timer);
117 if(tp->probe_timer.prev)
118 del_timer(&tp->probe_timer);
121 static int tcp_write_err(struct sock *sk, int force)
123 sk->err = sk->err_soft ? sk->err_soft : ETIMEDOUT;
124 sk->error_report(sk);
126 tcp_clear_xmit_timers(sk);
128 /* Time wait the socket. */
129 if (!force && ((1<<sk->state) & (TCPF_FIN_WAIT1|TCPF_FIN_WAIT2|TCPF_CLOSING))) {
130 tcp_time_wait(sk);
131 } else {
132 /* Clean up time. */
133 tcp_set_state(sk, TCP_CLOSE);
134 return 0;
136 return 1;
139 /* A write timeout has occurred. Process the after effects. */
140 static int tcp_write_timeout(struct sock *sk)
142 struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
144 /* Look for a 'soft' timeout. */
145 if ((sk->state == TCP_ESTABLISHED &&
146 tp->retransmits && (tp->retransmits % TCP_QUICK_TRIES) == 0) ||
147 (sk->state != TCP_ESTABLISHED && tp->retransmits > sysctl_tcp_retries1)) {
148 dst_negative_advice(&sk->dst_cache);
151 /* Have we tried to SYN too many times (repent repent 8)) */
152 if(tp->retransmits > sysctl_tcp_syn_retries && sk->state==TCP_SYN_SENT) {
153 tcp_write_err(sk, 1);
154 /* Don't FIN, we got nothing back */
155 return 0;
158 /* Has it gone just too far? */
159 if (tp->retransmits > sysctl_tcp_retries2)
160 return tcp_write_err(sk, 0);
162 return 1;
165 void tcp_delack_timer(unsigned long data)
167 struct sock *sk = (struct sock*)data;
169 bh_lock_sock(sk);
170 if(!sk->zapped &&
171 sk->tp_pinfo.af_tcp.delayed_acks &&
172 sk->state != TCP_CLOSE) {
173 if (!sk->lock.users)
174 tcp_send_ack(sk);
175 else
176 tcp_send_delayed_ack(&(sk->tp_pinfo.af_tcp), HZ/10);
178 bh_unlock_sock(sk);
181 void tcp_probe_timer(unsigned long data)
183 struct sock *sk = (struct sock*)data;
184 struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
186 if(sk->zapped)
187 return;
189 bh_lock_sock(sk);
190 if (sk->lock.users) {
191 /* Try again later. */
192 tcp_reset_xmit_timer(sk, TIME_PROBE0, HZ/5);
193 bh_unlock_sock(sk);
194 return;
197 /* *WARNING* RFC 1122 forbids this
198 * It doesn't AFAIK, because we kill the retransmit timer -AK
199 * FIXME: We ought not to do it, Solaris 2.5 actually has fixing
200 * this behaviour in Solaris down as a bug fix. [AC]
202 if (tp->probes_out > sysctl_tcp_retries2) {
203 if(sk->err_soft)
204 sk->err = sk->err_soft;
205 else
206 sk->err = ETIMEDOUT;
207 sk->error_report(sk);
209 if ((1<<sk->state) & (TCPF_FIN_WAIT1|TCPF_FIN_WAIT2|TCPF_CLOSING)) {
210 /* Time wait the socket. */
211 tcp_time_wait(sk);
212 } else {
213 /* Clean up time. */
214 tcp_set_state(sk, TCP_CLOSE);
216 } else {
217 /* Only send another probe if we didn't close things up. */
218 tcp_send_probe0(sk);
220 bh_unlock_sock(sk);
223 static __inline__ int tcp_keepopen_proc(struct sock *sk)
225 int res = 0;
227 if ((1<<sk->state) & (TCPF_ESTABLISHED|TCPF_CLOSE_WAIT|TCPF_FIN_WAIT2)) {
228 struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
229 __u32 elapsed = tcp_time_stamp - tp->rcv_tstamp;
231 if (elapsed >= sysctl_tcp_keepalive_time) {
232 if (tp->probes_out > sysctl_tcp_keepalive_probes) {
233 if(sk->err_soft)
234 sk->err = sk->err_soft;
235 else
236 sk->err = ETIMEDOUT;
238 tcp_set_state(sk, TCP_CLOSE);
239 sk->shutdown = SHUTDOWN_MASK;
240 if (!sk->dead)
241 sk->state_change(sk);
242 } else {
243 tp->probes_out++;
244 tp->pending = TIME_KEEPOPEN;
245 tcp_write_wakeup(sk);
246 res = 1;
250 return res;
253 /* Kill off TIME_WAIT sockets once their lifetime has expired. */
254 int tcp_tw_death_row_slot = 0;
255 static struct tcp_tw_bucket *tcp_tw_death_row[TCP_TWKILL_SLOTS] =
256 { NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL };
258 extern void tcp_timewait_kill(struct tcp_tw_bucket *tw);
260 static void tcp_twkill(unsigned long data)
262 struct tcp_tw_bucket *tw;
263 int killed = 0;
265 /* The death-row tw chains are only ever touched
266 * in BH context so no locking is needed.
268 tw = tcp_tw_death_row[tcp_tw_death_row_slot];
269 tcp_tw_death_row[tcp_tw_death_row_slot] = NULL;
270 tcp_tw_death_row_slot =
271 ((tcp_tw_death_row_slot + 1) & (TCP_TWKILL_SLOTS - 1));
273 while(tw != NULL) {
274 struct tcp_tw_bucket *next = tw->next_death;
276 tcp_timewait_kill(tw);
277 killed++;
278 tw = next;
280 if(killed != 0) {
281 struct tcp_sl_timer *slt = (struct tcp_sl_timer *)data;
282 atomic_sub(killed, &slt->count);
286 /* These are always called from BH context. See callers in
287 * tcp_input.c to verify this.
289 void tcp_tw_schedule(struct tcp_tw_bucket *tw)
291 int slot = (tcp_tw_death_row_slot - 1) & (TCP_TWKILL_SLOTS - 1);
292 struct tcp_tw_bucket **tpp = &tcp_tw_death_row[slot];
294 SOCKHASH_LOCK_WRITE_BH();
295 if((tw->next_death = *tpp) != NULL)
296 (*tpp)->pprev_death = &tw->next_death;
297 *tpp = tw;
298 tw->pprev_death = tpp;
300 tw->death_slot = slot;
301 SOCKHASH_UNLOCK_WRITE_BH();
303 tcp_inc_slow_timer(TCP_SLT_TWKILL);
306 /* Happens rarely if at all, no care about scalability here. */
307 void tcp_tw_reschedule(struct tcp_tw_bucket *tw)
309 struct tcp_tw_bucket **tpp;
310 int slot;
312 SOCKHASH_LOCK_WRITE_BH();
313 if(tw->next_death)
314 tw->next_death->pprev_death = tw->pprev_death;
315 *tw->pprev_death = tw->next_death;
316 tw->pprev_death = NULL;
318 slot = (tcp_tw_death_row_slot - 1) & (TCP_TWKILL_SLOTS - 1);
319 tpp = &tcp_tw_death_row[slot];
320 if((tw->next_death = *tpp) != NULL)
321 (*tpp)->pprev_death = &tw->next_death;
322 *tpp = tw;
323 tw->pprev_death = tpp;
325 tw->death_slot = slot;
326 SOCKHASH_UNLOCK_WRITE_BH();
328 /* Timer was incremented when we first entered the table. */
331 /* This is for handling early-kills of TIME_WAIT sockets. */
332 void tcp_tw_deschedule(struct tcp_tw_bucket *tw)
334 SOCKHASH_LOCK_WRITE_BH();
335 if(tw->next_death)
336 tw->next_death->pprev_death = tw->pprev_death;
337 *tw->pprev_death = tw->next_death;
338 tw->pprev_death = NULL;
339 SOCKHASH_UNLOCK_WRITE_BH();
341 tcp_dec_slow_timer(TCP_SLT_TWKILL);
345 * Check all sockets for keepalive timer
346 * Called every 75 seconds
347 * This timer is started by af_inet init routine and is constantly
348 * running.
350 * It might be better to maintain a count of sockets that need it using
351 * setsockopt/tcp_destroy_sk and only set the timer when needed.
355 * don't send over 5 keepopens at a time to avoid burstiness
356 * on big servers [AC]
358 #define MAX_KA_PROBES 5
360 int sysctl_tcp_max_ka_probes = MAX_KA_PROBES;
362 /* Keepopen's are only valid for "established" TCP's, nicely our listener
363 * hash gets rid of most of the useless testing, so we run through a couple
364 * of the established hash chains each clock tick. -DaveM
366 * And now, even more magic... TIME_WAIT TCP's cannot have keepalive probes
367 * going off for them, so we only need check the first half of the established
368 * hash table, even less testing under heavy load.
370 * I _really_ would rather do this by adding a new timer_struct to struct sock,
371 * and this way only those who set the keepalive option will get the overhead.
372 * The idea is you set it for 2 hours when the sock is first connected, when it
373 * does fire off (if at all, most sockets die earlier) you check for the keepalive
374 * option and also if the sock has been idle long enough to start probing.
376 static void tcp_keepalive(unsigned long data)
378 static int chain_start = 0;
379 int count = 0;
380 int i;
382 SOCKHASH_LOCK_READ_BH();
383 for(i = chain_start; i < (chain_start + ((tcp_ehash_size >> 1) >> 2)); i++) {
384 struct sock *sk;
386 sk = tcp_ehash[i];
387 while(sk) {
388 struct sock *next = sk->next;
390 bh_lock_sock(sk);
391 if (sk->keepopen && !sk->lock.users) {
392 SOCKHASH_UNLOCK_READ_BH();
393 count += tcp_keepopen_proc(sk);
394 SOCKHASH_LOCK_READ_BH();
396 bh_unlock_sock(sk);
397 if(count == sysctl_tcp_max_ka_probes)
398 goto out;
399 sk = next;
402 out:
403 SOCKHASH_UNLOCK_READ_BH();
404 chain_start = ((chain_start + ((tcp_ehash_size >> 1)>>2)) &
405 ((tcp_ehash_size >> 1) - 1));
409 * The TCP retransmit timer. This lacks a few small details.
411 * 1. An initial rtt timeout on the probe0 should cause what we can
412 * of the first write queue buffer to be split and sent.
413 * 2. On a 'major timeout' as defined by RFC1122 we shouldn't report
414 * ETIMEDOUT if we know an additional 'soft' error caused this.
415 * tcp_err should save a 'soft error' for us.
416 * [Unless someone has broken it then it does, except for one 2.0
417 * broken case of a send when the route/device is directly unreachable,
418 * and we error but should retry! - FIXME] [AC]
421 void tcp_retransmit_timer(unsigned long data)
423 struct sock *sk = (struct sock*)data;
424 struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
426 /* We are reset. We will send no more retransmits. */
427 if(sk->zapped) {
428 tcp_clear_xmit_timer(sk, TIME_RETRANS);
429 return;
432 bh_lock_sock(sk);
433 if (sk->lock.users) {
434 /* Try again later */
435 tcp_reset_xmit_timer(sk, TIME_RETRANS, HZ/20);
436 bh_unlock_sock(sk);
437 return;
440 /* Clear delay ack timer. */
441 tcp_clear_xmit_timer(sk, TIME_DACK);
443 /* RFC 2018, clear all 'sacked' flags in retransmission queue,
444 * the sender may have dropped out of order frames and we must
445 * send them out should this timer fire on us.
447 if(tp->sack_ok) {
448 struct sk_buff *skb = skb_peek(&sk->write_queue);
450 while((skb != NULL) &&
451 (skb != tp->send_head) &&
452 (skb != (struct sk_buff *)&sk->write_queue)) {
453 TCP_SKB_CB(skb)->sacked &=
454 ~(TCPCB_SACKED_ACKED | TCPCB_SACKED_RETRANS);
455 skb = skb->next;
459 /* Retransmission. */
460 tp->retrans_head = NULL;
461 tp->rexmt_done = 0;
462 tp->fackets_out = 0;
463 tp->retrans_out = 0;
464 if (tp->retransmits == 0) {
465 /* Remember window where we lost:
466 * "one half of the current window but at least 2 segments"
468 * Here "current window" means the effective one, which
469 * means it must be an accurate representation of our current
470 * sending rate _and_ the snd_wnd.
472 tp->snd_ssthresh = tcp_recalc_ssthresh(tp);
473 tp->snd_cwnd_cnt = 0;
474 tp->snd_cwnd = 1;
477 tp->retransmits++;
479 tp->dup_acks = 0;
480 tp->high_seq = tp->snd_nxt;
481 tcp_retransmit_skb(sk, skb_peek(&sk->write_queue));
483 /* Increase the timeout each time we retransmit. Note that
484 * we do not increase the rtt estimate. rto is initialized
485 * from rtt, but increases here. Jacobson (SIGCOMM 88) suggests
486 * that doubling rto each time is the least we can get away with.
487 * In KA9Q, Karn uses this for the first few times, and then
488 * goes to quadratic. netBSD doubles, but only goes up to *64,
489 * and clamps at 1 to 64 sec afterwards. Note that 120 sec is
490 * defined in the protocol as the maximum possible RTT. I guess
491 * we'll have to use something other than TCP to talk to the
492 * University of Mars.
494 * PAWS allows us longer timeouts and large windows, so once
495 * implemented ftp to mars will work nicely. We will have to fix
496 * the 120 second clamps though!
498 tp->backoff++;
499 tp->rto = min(tp->rto << 1, 120*HZ);
500 tcp_reset_xmit_timer(sk, TIME_RETRANS, tp->rto);
502 tcp_write_timeout(sk);
504 bh_unlock_sock(sk);
508 * Slow timer for SYN-RECV sockets
511 static void tcp_do_syn_queue(struct sock *sk, struct tcp_opt *tp, unsigned long now)
513 struct open_request *prev, *req;
515 prev = (struct open_request *) &tp->syn_wait_queue;
516 for(req = tp->syn_wait_queue; req; ) {
517 struct open_request *next = req->dl_next;
519 if (! req->sk) {
520 tcp_synq_unlink(tp, req, prev);
521 if(req->retrans >= sysctl_tcp_retries1) {
522 (*req->class->destructor)(req);
523 tcp_dec_slow_timer(TCP_SLT_SYNACK);
524 tp->syn_backlog--;
525 tcp_openreq_free(req);
526 if (! tp->syn_wait_queue)
527 break;
528 } else {
529 unsigned long timeo;
530 struct open_request *rp;
532 (*req->class->rtx_syn_ack)(sk, req);
533 req->retrans++;
534 timeo = min((TCP_TIMEOUT_INIT << req->retrans),
535 (120 * HZ));
536 req->expires = now + timeo;
537 rp = prev->dl_next;
538 tcp_synq_queue(tp, req);
539 if(rp != prev->dl_next)
540 prev = prev->dl_next;
542 } else
543 prev = req;
544 req = next;
548 /* This now scales very nicely. -DaveM */
549 static void tcp_syn_recv_timer(unsigned long data)
551 struct sock *sk;
552 unsigned long now = jiffies;
553 int i;
555 SOCKHASH_LOCK_READ_BH();
556 for(i = 0; i < TCP_LHTABLE_SIZE; i++) {
557 sk = tcp_listening_hash[i];
558 while(sk) {
559 struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
561 /* TCP_LISTEN is implied. */
562 bh_lock_sock(sk);
563 if (!sk->lock.users && tp->syn_wait_queue)
564 tcp_do_syn_queue(sk, tp, now);
565 bh_unlock_sock(sk);
566 sk = sk->next;
569 SOCKHASH_UNLOCK_READ_BH();
572 void tcp_sltimer_handler(unsigned long data)
574 struct tcp_sl_timer *slt = tcp_slt_array;
575 unsigned long next = ~0UL;
576 unsigned long now = jiffies;
577 int i;
579 for (i=0; i < TCP_SLT_MAX; i++, slt++) {
580 if (atomic_read(&slt->count)) {
581 long trigger;
583 trigger = slt->period - ((long)(now - slt->last));
585 if (trigger <= 0) {
586 (*slt->handler)((unsigned long) slt);
587 slt->last = now;
588 trigger = slt->period;
591 /* Only reschedule if some events remain. */
592 if (atomic_read(&slt->count))
593 next = min(next, trigger);
596 if (next != ~0UL)
597 mod_timer(&tcp_slow_timer, (now + next));
600 void __tcp_inc_slow_timer(struct tcp_sl_timer *slt)
602 unsigned long now = jiffies;
603 unsigned long when;
605 slt->last = now;
607 when = now + slt->period;
609 if (tcp_slow_timer.prev) {
610 if ((long)(tcp_slow_timer.expires - when) >= 0)
611 mod_timer(&tcp_slow_timer, when);
612 } else {
613 tcp_slow_timer.expires = when;
614 add_timer(&tcp_slow_timer);