2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * Version: $Id: tcp_timer.c,v 1.65 1999/07/02 11:26:35 davem Exp $
10 * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 * Corey Minyard <wf-rch!minyard@relay.EU.net>
14 * Florian La Roche, <flla@stud.uni-sb.de>
15 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
16 * Linus Torvalds, <torvalds@cs.helsinki.fi>
17 * Alan Cox, <gw4pts@gw4pts.ampr.org>
18 * Matthew Dillon, <dillon@apollo.west.oic.com>
19 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
20 * Jorge Cwik, <jorge@laser.satlink.net>
25 int sysctl_tcp_syn_retries
= TCP_SYN_RETRIES
;
26 int sysctl_tcp_keepalive_time
= TCP_KEEPALIVE_TIME
;
27 int sysctl_tcp_keepalive_probes
= TCP_KEEPALIVE_PROBES
;
28 int sysctl_tcp_retries1
= TCP_RETR1
;
29 int sysctl_tcp_retries2
= TCP_RETR2
;
31 static void tcp_sltimer_handler(unsigned long);
32 static void tcp_syn_recv_timer(unsigned long);
33 static void tcp_keepalive(unsigned long data
);
34 static void tcp_twkill(unsigned long);
36 struct timer_list tcp_slow_timer
= {
43 struct tcp_sl_timer tcp_slt_array
[TCP_SLT_MAX
] = {
44 {ATOMIC_INIT(0), TCP_SYNACK_PERIOD
, 0, tcp_syn_recv_timer
},/* SYNACK */
45 {ATOMIC_INIT(0), TCP_KEEPALIVE_PERIOD
, 0, tcp_keepalive
}, /* KEEPALIVE */
46 {ATOMIC_INIT(0), TCP_TWKILL_PERIOD
, 0, tcp_twkill
} /* TWKILL */
49 const char timer_bug_msg
[] = KERN_DEBUG
"tcpbug: unknown timer value\n";
52 * Using different timers for retransmit, delayed acks and probes
53 * We may wish use just one timer maintaining a list of expire jiffies
57 void tcp_init_xmit_timers(struct sock
*sk
)
59 init_timer(&sk
->tp_pinfo
.af_tcp
.retransmit_timer
);
60 sk
->tp_pinfo
.af_tcp
.retransmit_timer
.function
=&tcp_retransmit_timer
;
61 sk
->tp_pinfo
.af_tcp
.retransmit_timer
.data
= (unsigned long) sk
;
63 init_timer(&sk
->tp_pinfo
.af_tcp
.delack_timer
);
64 sk
->tp_pinfo
.af_tcp
.delack_timer
.function
=&tcp_delack_timer
;
65 sk
->tp_pinfo
.af_tcp
.delack_timer
.data
= (unsigned long) sk
;
67 init_timer(&sk
->tp_pinfo
.af_tcp
.probe_timer
);
68 sk
->tp_pinfo
.af_tcp
.probe_timer
.function
=&tcp_probe_timer
;
69 sk
->tp_pinfo
.af_tcp
.probe_timer
.data
= (unsigned long) sk
;
73 * Reset the retransmission timer
76 void tcp_reset_xmit_timer(struct sock
*sk
, int what
, unsigned long when
)
78 struct tcp_opt
*tp
= &sk
->tp_pinfo
.af_tcp
;
82 /* When seting the transmit timer the probe timer
84 * The delayed ack timer can be set if we are changing the
85 * retransmit timer when removing acked frames.
87 if(tp
->probe_timer
.prev
)
88 del_timer(&tp
->probe_timer
);
89 mod_timer(&tp
->retransmit_timer
, jiffies
+when
);
93 mod_timer(&tp
->delack_timer
, jiffies
+when
);
97 mod_timer(&tp
->probe_timer
, jiffies
+when
);
101 printk(KERN_DEBUG
"bug: tcp_reset_xmit_timer TIME_WRITE\n");
105 printk(KERN_DEBUG
"bug: unknown timer value\n");
109 void tcp_clear_xmit_timers(struct sock
*sk
)
111 struct tcp_opt
*tp
= &sk
->tp_pinfo
.af_tcp
;
113 if(tp
->retransmit_timer
.prev
)
114 del_timer(&tp
->retransmit_timer
);
115 if(tp
->delack_timer
.prev
)
116 del_timer(&tp
->delack_timer
);
117 if(tp
->probe_timer
.prev
)
118 del_timer(&tp
->probe_timer
);
121 static int tcp_write_err(struct sock
*sk
, int force
)
123 sk
->err
= sk
->err_soft
? sk
->err_soft
: ETIMEDOUT
;
124 sk
->error_report(sk
);
126 tcp_clear_xmit_timers(sk
);
128 /* Time wait the socket. */
129 if (!force
&& ((1<<sk
->state
) & (TCPF_FIN_WAIT1
|TCPF_FIN_WAIT2
|TCPF_CLOSING
))) {
133 tcp_set_state(sk
, TCP_CLOSE
);
139 /* A write timeout has occurred. Process the after effects. */
140 static int tcp_write_timeout(struct sock
*sk
)
142 struct tcp_opt
*tp
= &(sk
->tp_pinfo
.af_tcp
);
144 /* Look for a 'soft' timeout. */
145 if ((sk
->state
== TCP_ESTABLISHED
&&
146 tp
->retransmits
&& (tp
->retransmits
% TCP_QUICK_TRIES
) == 0) ||
147 (sk
->state
!= TCP_ESTABLISHED
&& tp
->retransmits
> sysctl_tcp_retries1
)) {
148 dst_negative_advice(&sk
->dst_cache
);
151 /* Have we tried to SYN too many times (repent repent 8)) */
152 if(tp
->retransmits
> sysctl_tcp_syn_retries
&& sk
->state
==TCP_SYN_SENT
) {
153 tcp_write_err(sk
, 1);
154 /* Don't FIN, we got nothing back */
158 /* Has it gone just too far? */
159 if (tp
->retransmits
> sysctl_tcp_retries2
)
160 return tcp_write_err(sk
, 0);
165 void tcp_delack_timer(unsigned long data
)
167 struct sock
*sk
= (struct sock
*)data
;
171 sk
->tp_pinfo
.af_tcp
.delayed_acks
&&
172 sk
->state
!= TCP_CLOSE
) {
176 tcp_send_delayed_ack(&(sk
->tp_pinfo
.af_tcp
), HZ
/10);
181 void tcp_probe_timer(unsigned long data
)
183 struct sock
*sk
= (struct sock
*)data
;
184 struct tcp_opt
*tp
= &sk
->tp_pinfo
.af_tcp
;
190 if (sk
->lock
.users
) {
191 /* Try again later. */
192 tcp_reset_xmit_timer(sk
, TIME_PROBE0
, HZ
/5);
197 /* *WARNING* RFC 1122 forbids this
198 * It doesn't AFAIK, because we kill the retransmit timer -AK
199 * FIXME: We ought not to do it, Solaris 2.5 actually has fixing
200 * this behaviour in Solaris down as a bug fix. [AC]
202 if (tp
->probes_out
> sysctl_tcp_retries2
) {
204 sk
->err
= sk
->err_soft
;
207 sk
->error_report(sk
);
209 if ((1<<sk
->state
) & (TCPF_FIN_WAIT1
|TCPF_FIN_WAIT2
|TCPF_CLOSING
)) {
210 /* Time wait the socket. */
214 tcp_set_state(sk
, TCP_CLOSE
);
217 /* Only send another probe if we didn't close things up. */
223 static __inline__
int tcp_keepopen_proc(struct sock
*sk
)
227 if ((1<<sk
->state
) & (TCPF_ESTABLISHED
|TCPF_CLOSE_WAIT
|TCPF_FIN_WAIT2
)) {
228 struct tcp_opt
*tp
= &sk
->tp_pinfo
.af_tcp
;
229 __u32 elapsed
= tcp_time_stamp
- tp
->rcv_tstamp
;
231 if (elapsed
>= sysctl_tcp_keepalive_time
) {
232 if (tp
->probes_out
> sysctl_tcp_keepalive_probes
) {
234 sk
->err
= sk
->err_soft
;
238 tcp_set_state(sk
, TCP_CLOSE
);
239 sk
->shutdown
= SHUTDOWN_MASK
;
241 sk
->state_change(sk
);
244 tp
->pending
= TIME_KEEPOPEN
;
245 tcp_write_wakeup(sk
);
253 /* Kill off TIME_WAIT sockets once their lifetime has expired. */
254 int tcp_tw_death_row_slot
= 0;
255 static struct tcp_tw_bucket
*tcp_tw_death_row
[TCP_TWKILL_SLOTS
] =
256 { NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
};
258 extern void tcp_timewait_kill(struct tcp_tw_bucket
*tw
);
260 static void tcp_twkill(unsigned long data
)
262 struct tcp_tw_bucket
*tw
;
265 /* The death-row tw chains are only ever touched
266 * in BH context so no locking is needed.
268 tw
= tcp_tw_death_row
[tcp_tw_death_row_slot
];
269 tcp_tw_death_row
[tcp_tw_death_row_slot
] = NULL
;
270 tcp_tw_death_row_slot
=
271 ((tcp_tw_death_row_slot
+ 1) & (TCP_TWKILL_SLOTS
- 1));
274 struct tcp_tw_bucket
*next
= tw
->next_death
;
276 tcp_timewait_kill(tw
);
281 struct tcp_sl_timer
*slt
= (struct tcp_sl_timer
*)data
;
282 atomic_sub(killed
, &slt
->count
);
286 /* These are always called from BH context. See callers in
287 * tcp_input.c to verify this.
289 void tcp_tw_schedule(struct tcp_tw_bucket
*tw
)
291 int slot
= (tcp_tw_death_row_slot
- 1) & (TCP_TWKILL_SLOTS
- 1);
292 struct tcp_tw_bucket
**tpp
= &tcp_tw_death_row
[slot
];
294 SOCKHASH_LOCK_WRITE_BH();
295 if((tw
->next_death
= *tpp
) != NULL
)
296 (*tpp
)->pprev_death
= &tw
->next_death
;
298 tw
->pprev_death
= tpp
;
300 tw
->death_slot
= slot
;
301 SOCKHASH_UNLOCK_WRITE_BH();
303 tcp_inc_slow_timer(TCP_SLT_TWKILL
);
306 /* Happens rarely if at all, no care about scalability here. */
307 void tcp_tw_reschedule(struct tcp_tw_bucket
*tw
)
309 struct tcp_tw_bucket
**tpp
;
312 SOCKHASH_LOCK_WRITE_BH();
314 tw
->next_death
->pprev_death
= tw
->pprev_death
;
315 *tw
->pprev_death
= tw
->next_death
;
316 tw
->pprev_death
= NULL
;
318 slot
= (tcp_tw_death_row_slot
- 1) & (TCP_TWKILL_SLOTS
- 1);
319 tpp
= &tcp_tw_death_row
[slot
];
320 if((tw
->next_death
= *tpp
) != NULL
)
321 (*tpp
)->pprev_death
= &tw
->next_death
;
323 tw
->pprev_death
= tpp
;
325 tw
->death_slot
= slot
;
326 SOCKHASH_UNLOCK_WRITE_BH();
328 /* Timer was incremented when we first entered the table. */
331 /* This is for handling early-kills of TIME_WAIT sockets. */
332 void tcp_tw_deschedule(struct tcp_tw_bucket
*tw
)
334 SOCKHASH_LOCK_WRITE_BH();
336 tw
->next_death
->pprev_death
= tw
->pprev_death
;
337 *tw
->pprev_death
= tw
->next_death
;
338 tw
->pprev_death
= NULL
;
339 SOCKHASH_UNLOCK_WRITE_BH();
341 tcp_dec_slow_timer(TCP_SLT_TWKILL
);
345 * Check all sockets for keepalive timer
346 * Called every 75 seconds
347 * This timer is started by af_inet init routine and is constantly
350 * It might be better to maintain a count of sockets that need it using
351 * setsockopt/tcp_destroy_sk and only set the timer when needed.
355 * don't send over 5 keepopens at a time to avoid burstiness
356 * on big servers [AC]
358 #define MAX_KA_PROBES 5
360 int sysctl_tcp_max_ka_probes
= MAX_KA_PROBES
;
362 /* Keepopen's are only valid for "established" TCP's, nicely our listener
363 * hash gets rid of most of the useless testing, so we run through a couple
364 * of the established hash chains each clock tick. -DaveM
366 * And now, even more magic... TIME_WAIT TCP's cannot have keepalive probes
367 * going off for them, so we only need check the first half of the established
368 * hash table, even less testing under heavy load.
370 * I _really_ would rather do this by adding a new timer_struct to struct sock,
371 * and this way only those who set the keepalive option will get the overhead.
372 * The idea is you set it for 2 hours when the sock is first connected, when it
373 * does fire off (if at all, most sockets die earlier) you check for the keepalive
374 * option and also if the sock has been idle long enough to start probing.
376 static void tcp_keepalive(unsigned long data
)
378 static int chain_start
= 0;
382 SOCKHASH_LOCK_READ_BH();
383 for(i
= chain_start
; i
< (chain_start
+ ((tcp_ehash_size
>> 1) >> 2)); i
++) {
388 struct sock
*next
= sk
->next
;
391 if (sk
->keepopen
&& !sk
->lock
.users
) {
392 SOCKHASH_UNLOCK_READ_BH();
393 count
+= tcp_keepopen_proc(sk
);
394 SOCKHASH_LOCK_READ_BH();
397 if(count
== sysctl_tcp_max_ka_probes
)
403 SOCKHASH_UNLOCK_READ_BH();
404 chain_start
= ((chain_start
+ ((tcp_ehash_size
>> 1)>>2)) &
405 ((tcp_ehash_size
>> 1) - 1));
409 * The TCP retransmit timer. This lacks a few small details.
411 * 1. An initial rtt timeout on the probe0 should cause what we can
412 * of the first write queue buffer to be split and sent.
413 * 2. On a 'major timeout' as defined by RFC1122 we shouldn't report
414 * ETIMEDOUT if we know an additional 'soft' error caused this.
415 * tcp_err should save a 'soft error' for us.
416 * [Unless someone has broken it then it does, except for one 2.0
417 * broken case of a send when the route/device is directly unreachable,
418 * and we error but should retry! - FIXME] [AC]
421 void tcp_retransmit_timer(unsigned long data
)
423 struct sock
*sk
= (struct sock
*)data
;
424 struct tcp_opt
*tp
= &sk
->tp_pinfo
.af_tcp
;
426 /* We are reset. We will send no more retransmits. */
428 tcp_clear_xmit_timer(sk
, TIME_RETRANS
);
433 if (sk
->lock
.users
) {
434 /* Try again later */
435 tcp_reset_xmit_timer(sk
, TIME_RETRANS
, HZ
/20);
440 /* Clear delay ack timer. */
441 tcp_clear_xmit_timer(sk
, TIME_DACK
);
443 /* RFC 2018, clear all 'sacked' flags in retransmission queue,
444 * the sender may have dropped out of order frames and we must
445 * send them out should this timer fire on us.
448 struct sk_buff
*skb
= skb_peek(&sk
->write_queue
);
450 while((skb
!= NULL
) &&
451 (skb
!= tp
->send_head
) &&
452 (skb
!= (struct sk_buff
*)&sk
->write_queue
)) {
453 TCP_SKB_CB(skb
)->sacked
&=
454 ~(TCPCB_SACKED_ACKED
| TCPCB_SACKED_RETRANS
);
459 /* Retransmission. */
460 tp
->retrans_head
= NULL
;
464 if (tp
->retransmits
== 0) {
465 /* Remember window where we lost:
466 * "one half of the current window but at least 2 segments"
468 * Here "current window" means the effective one, which
469 * means it must be an accurate representation of our current
470 * sending rate _and_ the snd_wnd.
472 tp
->snd_ssthresh
= tcp_recalc_ssthresh(tp
);
473 tp
->snd_cwnd_cnt
= 0;
480 tp
->high_seq
= tp
->snd_nxt
;
481 tcp_retransmit_skb(sk
, skb_peek(&sk
->write_queue
));
483 /* Increase the timeout each time we retransmit. Note that
484 * we do not increase the rtt estimate. rto is initialized
485 * from rtt, but increases here. Jacobson (SIGCOMM 88) suggests
486 * that doubling rto each time is the least we can get away with.
487 * In KA9Q, Karn uses this for the first few times, and then
488 * goes to quadratic. netBSD doubles, but only goes up to *64,
489 * and clamps at 1 to 64 sec afterwards. Note that 120 sec is
490 * defined in the protocol as the maximum possible RTT. I guess
491 * we'll have to use something other than TCP to talk to the
492 * University of Mars.
494 * PAWS allows us longer timeouts and large windows, so once
495 * implemented ftp to mars will work nicely. We will have to fix
496 * the 120 second clamps though!
499 tp
->rto
= min(tp
->rto
<< 1, 120*HZ
);
500 tcp_reset_xmit_timer(sk
, TIME_RETRANS
, tp
->rto
);
502 tcp_write_timeout(sk
);
508 * Slow timer for SYN-RECV sockets
511 static void tcp_do_syn_queue(struct sock
*sk
, struct tcp_opt
*tp
, unsigned long now
)
513 struct open_request
*prev
, *req
;
515 prev
= (struct open_request
*) &tp
->syn_wait_queue
;
516 for(req
= tp
->syn_wait_queue
; req
; ) {
517 struct open_request
*next
= req
->dl_next
;
520 tcp_synq_unlink(tp
, req
, prev
);
521 if(req
->retrans
>= sysctl_tcp_retries1
) {
522 (*req
->class->destructor
)(req
);
523 tcp_dec_slow_timer(TCP_SLT_SYNACK
);
525 tcp_openreq_free(req
);
526 if (! tp
->syn_wait_queue
)
530 struct open_request
*rp
;
532 (*req
->class->rtx_syn_ack
)(sk
, req
);
534 timeo
= min((TCP_TIMEOUT_INIT
<< req
->retrans
),
536 req
->expires
= now
+ timeo
;
538 tcp_synq_queue(tp
, req
);
539 if(rp
!= prev
->dl_next
)
540 prev
= prev
->dl_next
;
548 /* This now scales very nicely. -DaveM */
549 static void tcp_syn_recv_timer(unsigned long data
)
552 unsigned long now
= jiffies
;
555 SOCKHASH_LOCK_READ_BH();
556 for(i
= 0; i
< TCP_LHTABLE_SIZE
; i
++) {
557 sk
= tcp_listening_hash
[i
];
559 struct tcp_opt
*tp
= &sk
->tp_pinfo
.af_tcp
;
561 /* TCP_LISTEN is implied. */
563 if (!sk
->lock
.users
&& tp
->syn_wait_queue
)
564 tcp_do_syn_queue(sk
, tp
, now
);
569 SOCKHASH_UNLOCK_READ_BH();
572 void tcp_sltimer_handler(unsigned long data
)
574 struct tcp_sl_timer
*slt
= tcp_slt_array
;
575 unsigned long next
= ~0UL;
576 unsigned long now
= jiffies
;
579 for (i
=0; i
< TCP_SLT_MAX
; i
++, slt
++) {
580 if (atomic_read(&slt
->count
)) {
583 trigger
= slt
->period
- ((long)(now
- slt
->last
));
586 (*slt
->handler
)((unsigned long) slt
);
588 trigger
= slt
->period
;
591 /* Only reschedule if some events remain. */
592 if (atomic_read(&slt
->count
))
593 next
= min(next
, trigger
);
597 mod_timer(&tcp_slow_timer
, (now
+ next
));
600 void __tcp_inc_slow_timer(struct tcp_sl_timer
*slt
)
602 unsigned long now
= jiffies
;
607 when
= now
+ slt
->period
;
609 if (tcp_slow_timer
.prev
) {
610 if ((long)(tcp_slow_timer
.expires
- when
) >= 0)
611 mod_timer(&tcp_slow_timer
, when
);
613 tcp_slow_timer
.expires
= when
;
614 add_timer(&tcp_slow_timer
);