MAINTAINERS: EDAC: add Mauro and Borislav as interim patch collectors
[linux-2.6/btrfs-unstable.git] / net / ipv4 / tcp_timer.c
blob64f0354c84c7a8956230f2794b4dcb56331d98fc
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * Authors: Ross Biro
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Mark Evans, <evansmp@uhura.aston.ac.uk>
11 * Corey Minyard <wf-rch!minyard@relay.EU.net>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
14 * Linus Torvalds, <torvalds@cs.helsinki.fi>
15 * Alan Cox, <gw4pts@gw4pts.ampr.org>
16 * Matthew Dillon, <dillon@apollo.west.oic.com>
17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18 * Jorge Cwik, <jorge@laser.satlink.net>
21 #include <linux/module.h>
22 #include <linux/gfp.h>
23 #include <net/tcp.h>
25 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
26 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
27 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
28 int sysctl_tcp_keepalive_probes __read_mostly = TCP_KEEPALIVE_PROBES;
29 int sysctl_tcp_keepalive_intvl __read_mostly = TCP_KEEPALIVE_INTVL;
30 int sysctl_tcp_retries1 __read_mostly = TCP_RETR1;
31 int sysctl_tcp_retries2 __read_mostly = TCP_RETR2;
32 int sysctl_tcp_orphan_retries __read_mostly;
33 int sysctl_tcp_thin_linear_timeouts __read_mostly;
35 static void tcp_write_err(struct sock *sk)
37 sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
38 sk->sk_error_report(sk);
40 tcp_done(sk);
41 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT);
44 /* Do not allow orphaned sockets to eat all our resources.
45 * This is direct violation of TCP specs, but it is required
46 * to prevent DoS attacks. It is called when a retransmission timeout
47 * or zero probe timeout occurs on orphaned socket.
49 * Criteria is still not confirmed experimentally and may change.
50 * We kill the socket, if:
51 * 1. If number of orphaned sockets exceeds an administratively configured
52 * limit.
53 * 2. If we have strong memory pressure.
55 static int tcp_out_of_resources(struct sock *sk, int do_reset)
57 struct tcp_sock *tp = tcp_sk(sk);
58 int shift = 0;
60 /* If peer does not open window for long time, or did not transmit
61 * anything for long time, penalize it. */
62 if ((s32)(tcp_time_stamp - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset)
63 shift++;
65 /* If some dubious ICMP arrived, penalize even more. */
66 if (sk->sk_err_soft)
67 shift++;
69 if (tcp_check_oom(sk, shift)) {
70 /* Catch exceptional cases, when connection requires reset.
71 * 1. Last segment was sent recently. */
72 if ((s32)(tcp_time_stamp - tp->lsndtime) <= TCP_TIMEWAIT_LEN ||
73 /* 2. Window is closed. */
74 (!tp->snd_wnd && !tp->packets_out))
75 do_reset = 1;
76 if (do_reset)
77 tcp_send_active_reset(sk, GFP_ATOMIC);
78 tcp_done(sk);
79 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY);
80 return 1;
82 return 0;
85 /* Calculate maximal number or retries on an orphaned socket. */
86 static int tcp_orphan_retries(struct sock *sk, int alive)
88 int retries = sysctl_tcp_orphan_retries; /* May be zero. */
90 /* We know from an ICMP that something is wrong. */
91 if (sk->sk_err_soft && !alive)
92 retries = 0;
94 /* However, if socket sent something recently, select some safe
95 * number of retries. 8 corresponds to >100 seconds with minimal
96 * RTO of 200msec. */
97 if (retries == 0 && alive)
98 retries = 8;
99 return retries;
102 static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
104 /* Black hole detection */
105 if (sysctl_tcp_mtu_probing) {
106 if (!icsk->icsk_mtup.enabled) {
107 icsk->icsk_mtup.enabled = 1;
108 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
109 } else {
110 struct tcp_sock *tp = tcp_sk(sk);
111 int mss;
113 mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1;
114 mss = min(sysctl_tcp_base_mss, mss);
115 mss = max(mss, 68 - tp->tcp_header_len);
116 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
117 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
122 /* This function calculates a "timeout" which is equivalent to the timeout of a
123 * TCP connection after "boundary" unsuccessful, exponentially backed-off
124 * retransmissions with an initial RTO of TCP_RTO_MIN or TCP_TIMEOUT_INIT if
125 * syn_set flag is set.
127 static bool retransmits_timed_out(struct sock *sk,
128 unsigned int boundary,
129 unsigned int timeout,
130 bool syn_set)
132 unsigned int linear_backoff_thresh, start_ts;
133 unsigned int rto_base = syn_set ? TCP_TIMEOUT_INIT : TCP_RTO_MIN;
135 if (!inet_csk(sk)->icsk_retransmits)
136 return false;
138 if (unlikely(!tcp_sk(sk)->retrans_stamp))
139 start_ts = TCP_SKB_CB(tcp_write_queue_head(sk))->when;
140 else
141 start_ts = tcp_sk(sk)->retrans_stamp;
143 if (likely(timeout == 0)) {
144 linear_backoff_thresh = ilog2(TCP_RTO_MAX/rto_base);
146 if (boundary <= linear_backoff_thresh)
147 timeout = ((2 << boundary) - 1) * rto_base;
148 else
149 timeout = ((2 << linear_backoff_thresh) - 1) * rto_base +
150 (boundary - linear_backoff_thresh) * TCP_RTO_MAX;
152 return (tcp_time_stamp - start_ts) >= timeout;
155 /* A write timeout has occurred. Process the after effects. */
156 static int tcp_write_timeout(struct sock *sk)
158 struct inet_connection_sock *icsk = inet_csk(sk);
159 struct tcp_sock *tp = tcp_sk(sk);
160 int retry_until;
161 bool do_reset, syn_set = false;
163 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
164 if (icsk->icsk_retransmits) {
165 dst_negative_advice(sk);
166 if (tp->syn_fastopen || tp->syn_data)
167 tcp_fastopen_cache_set(sk, 0, NULL, true);
169 retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
170 syn_set = true;
171 } else {
172 if (retransmits_timed_out(sk, sysctl_tcp_retries1, 0, 0)) {
173 /* Black hole detection */
174 tcp_mtu_probing(icsk, sk);
176 dst_negative_advice(sk);
179 retry_until = sysctl_tcp_retries2;
180 if (sock_flag(sk, SOCK_DEAD)) {
181 const int alive = (icsk->icsk_rto < TCP_RTO_MAX);
183 retry_until = tcp_orphan_retries(sk, alive);
184 do_reset = alive ||
185 !retransmits_timed_out(sk, retry_until, 0, 0);
187 if (tcp_out_of_resources(sk, do_reset))
188 return 1;
192 if (retransmits_timed_out(sk, retry_until,
193 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
194 /* Has it gone just too far? */
195 tcp_write_err(sk);
196 return 1;
198 return 0;
201 void tcp_delack_timer_handler(struct sock *sk)
203 struct tcp_sock *tp = tcp_sk(sk);
204 struct inet_connection_sock *icsk = inet_csk(sk);
206 sk_mem_reclaim_partial(sk);
208 if (sk->sk_state == TCP_CLOSE || !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
209 goto out;
211 if (time_after(icsk->icsk_ack.timeout, jiffies)) {
212 sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout);
213 goto out;
215 icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER;
217 if (!skb_queue_empty(&tp->ucopy.prequeue)) {
218 struct sk_buff *skb;
220 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSCHEDULERFAILED);
222 while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
223 sk_backlog_rcv(sk, skb);
225 tp->ucopy.memory = 0;
228 if (inet_csk_ack_scheduled(sk)) {
229 if (!icsk->icsk_ack.pingpong) {
230 /* Delayed ACK missed: inflate ATO. */
231 icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1, icsk->icsk_rto);
232 } else {
233 /* Delayed ACK missed: leave pingpong mode and
234 * deflate ATO.
236 icsk->icsk_ack.pingpong = 0;
237 icsk->icsk_ack.ato = TCP_ATO_MIN;
239 tcp_send_ack(sk);
240 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKS);
243 out:
244 if (sk_under_memory_pressure(sk))
245 sk_mem_reclaim(sk);
248 static void tcp_delack_timer(unsigned long data)
250 struct sock *sk = (struct sock *)data;
252 bh_lock_sock(sk);
253 if (!sock_owned_by_user(sk)) {
254 tcp_delack_timer_handler(sk);
255 } else {
256 inet_csk(sk)->icsk_ack.blocked = 1;
257 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
258 /* deleguate our work to tcp_release_cb() */
259 if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags))
260 sock_hold(sk);
262 bh_unlock_sock(sk);
263 sock_put(sk);
266 static void tcp_probe_timer(struct sock *sk)
268 struct inet_connection_sock *icsk = inet_csk(sk);
269 struct tcp_sock *tp = tcp_sk(sk);
270 int max_probes;
272 if (tp->packets_out || !tcp_send_head(sk)) {
273 icsk->icsk_probes_out = 0;
274 return;
277 /* *WARNING* RFC 1122 forbids this
279 * It doesn't AFAIK, because we kill the retransmit timer -AK
281 * FIXME: We ought not to do it, Solaris 2.5 actually has fixing
282 * this behaviour in Solaris down as a bug fix. [AC]
284 * Let me to explain. icsk_probes_out is zeroed by incoming ACKs
285 * even if they advertise zero window. Hence, connection is killed only
286 * if we received no ACKs for normal connection timeout. It is not killed
287 * only because window stays zero for some time, window may be zero
288 * until armageddon and even later. We are in full accordance
289 * with RFCs, only probe timer combines both retransmission timeout
290 * and probe timeout in one bottle. --ANK
292 max_probes = sysctl_tcp_retries2;
294 if (sock_flag(sk, SOCK_DEAD)) {
295 const int alive = ((icsk->icsk_rto << icsk->icsk_backoff) < TCP_RTO_MAX);
297 max_probes = tcp_orphan_retries(sk, alive);
299 if (tcp_out_of_resources(sk, alive || icsk->icsk_probes_out <= max_probes))
300 return;
303 if (icsk->icsk_probes_out > max_probes) {
304 tcp_write_err(sk);
305 } else {
306 /* Only send another probe if we didn't close things up. */
307 tcp_send_probe0(sk);
312 * Timer for Fast Open socket to retransmit SYNACK. Note that the
313 * sk here is the child socket, not the parent (listener) socket.
315 static void tcp_fastopen_synack_timer(struct sock *sk)
317 struct inet_connection_sock *icsk = inet_csk(sk);
318 int max_retries = icsk->icsk_syn_retries ? :
319 sysctl_tcp_synack_retries + 1; /* add one more retry for fastopen */
320 struct request_sock *req;
322 req = tcp_sk(sk)->fastopen_rsk;
323 req->rsk_ops->syn_ack_timeout(sk, req);
325 if (req->num_timeout >= max_retries) {
326 tcp_write_err(sk);
327 return;
329 /* XXX (TFO) - Unlike regular SYN-ACK retransmit, we ignore error
330 * returned from rtx_syn_ack() to make it more persistent like
331 * regular retransmit because if the child socket has been accepted
332 * it's not good to give up too easily.
334 inet_rtx_syn_ack(sk, req);
335 req->num_timeout++;
336 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
337 TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX);
341 * The TCP retransmit timer.
344 void tcp_retransmit_timer(struct sock *sk)
346 struct tcp_sock *tp = tcp_sk(sk);
347 struct inet_connection_sock *icsk = inet_csk(sk);
349 if (tp->fastopen_rsk) {
350 WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&
351 sk->sk_state != TCP_FIN_WAIT1);
352 tcp_fastopen_synack_timer(sk);
353 /* Before we receive ACK to our SYN-ACK don't retransmit
354 * anything else (e.g., data or FIN segments).
356 return;
358 if (!tp->packets_out)
359 goto out;
361 WARN_ON(tcp_write_queue_empty(sk));
363 tp->tlp_high_seq = 0;
365 if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) &&
366 !((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) {
367 /* Receiver dastardly shrinks window. Our retransmits
368 * become zero probes, but we should not timeout this
369 * connection. If the socket is an orphan, time it out,
370 * we cannot allow such beasts to hang infinitely.
372 struct inet_sock *inet = inet_sk(sk);
373 if (sk->sk_family == AF_INET) {
374 LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("Peer %pI4:%u/%u unexpectedly shrunk window %u:%u (repaired)\n"),
375 &inet->inet_daddr,
376 ntohs(inet->inet_dport), inet->inet_num,
377 tp->snd_una, tp->snd_nxt);
379 #if IS_ENABLED(CONFIG_IPV6)
380 else if (sk->sk_family == AF_INET6) {
381 LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("Peer %pI6:%u/%u unexpectedly shrunk window %u:%u (repaired)\n"),
382 &sk->sk_v6_daddr,
383 ntohs(inet->inet_dport), inet->inet_num,
384 tp->snd_una, tp->snd_nxt);
386 #endif
387 if (tcp_time_stamp - tp->rcv_tstamp > TCP_RTO_MAX) {
388 tcp_write_err(sk);
389 goto out;
391 tcp_enter_loss(sk, 0);
392 tcp_retransmit_skb(sk, tcp_write_queue_head(sk));
393 __sk_dst_reset(sk);
394 goto out_reset_timer;
397 if (tcp_write_timeout(sk))
398 goto out;
400 if (icsk->icsk_retransmits == 0) {
401 int mib_idx;
403 if (icsk->icsk_ca_state == TCP_CA_Recovery) {
404 if (tcp_is_sack(tp))
405 mib_idx = LINUX_MIB_TCPSACKRECOVERYFAIL;
406 else
407 mib_idx = LINUX_MIB_TCPRENORECOVERYFAIL;
408 } else if (icsk->icsk_ca_state == TCP_CA_Loss) {
409 mib_idx = LINUX_MIB_TCPLOSSFAILURES;
410 } else if ((icsk->icsk_ca_state == TCP_CA_Disorder) ||
411 tp->sacked_out) {
412 if (tcp_is_sack(tp))
413 mib_idx = LINUX_MIB_TCPSACKFAILURES;
414 else
415 mib_idx = LINUX_MIB_TCPRENOFAILURES;
416 } else {
417 mib_idx = LINUX_MIB_TCPTIMEOUTS;
419 NET_INC_STATS_BH(sock_net(sk), mib_idx);
422 tcp_enter_loss(sk, 0);
424 if (tcp_retransmit_skb(sk, tcp_write_queue_head(sk)) > 0) {
425 /* Retransmission failed because of local congestion,
426 * do not backoff.
428 if (!icsk->icsk_retransmits)
429 icsk->icsk_retransmits = 1;
430 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
431 min(icsk->icsk_rto, TCP_RESOURCE_PROBE_INTERVAL),
432 TCP_RTO_MAX);
433 goto out;
436 /* Increase the timeout each time we retransmit. Note that
437 * we do not increase the rtt estimate. rto is initialized
438 * from rtt, but increases here. Jacobson (SIGCOMM 88) suggests
439 * that doubling rto each time is the least we can get away with.
440 * In KA9Q, Karn uses this for the first few times, and then
441 * goes to quadratic. netBSD doubles, but only goes up to *64,
442 * and clamps at 1 to 64 sec afterwards. Note that 120 sec is
443 * defined in the protocol as the maximum possible RTT. I guess
444 * we'll have to use something other than TCP to talk to the
445 * University of Mars.
447 * PAWS allows us longer timeouts and large windows, so once
448 * implemented ftp to mars will work nicely. We will have to fix
449 * the 120 second clamps though!
451 icsk->icsk_backoff++;
452 icsk->icsk_retransmits++;
454 out_reset_timer:
455 /* If stream is thin, use linear timeouts. Since 'icsk_backoff' is
456 * used to reset timer, set to 0. Recalculate 'icsk_rto' as this
457 * might be increased if the stream oscillates between thin and thick,
458 * thus the old value might already be too high compared to the value
459 * set by 'tcp_set_rto' in tcp_input.c which resets the rto without
460 * backoff. Limit to TCP_THIN_LINEAR_RETRIES before initiating
461 * exponential backoff behaviour to avoid continue hammering
462 * linear-timeout retransmissions into a black hole
464 if (sk->sk_state == TCP_ESTABLISHED &&
465 (tp->thin_lto || sysctl_tcp_thin_linear_timeouts) &&
466 tcp_stream_is_thin(tp) &&
467 icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) {
468 icsk->icsk_backoff = 0;
469 icsk->icsk_rto = min(__tcp_set_rto(tp), TCP_RTO_MAX);
470 } else {
471 /* Use normal (exponential) backoff */
472 icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
474 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX);
475 if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1, 0, 0))
476 __sk_dst_reset(sk);
478 out:;
481 void tcp_write_timer_handler(struct sock *sk)
483 struct inet_connection_sock *icsk = inet_csk(sk);
484 int event;
486 if (sk->sk_state == TCP_CLOSE || !icsk->icsk_pending)
487 goto out;
489 if (time_after(icsk->icsk_timeout, jiffies)) {
490 sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
491 goto out;
494 event = icsk->icsk_pending;
496 switch (event) {
497 case ICSK_TIME_EARLY_RETRANS:
498 tcp_resume_early_retransmit(sk);
499 break;
500 case ICSK_TIME_LOSS_PROBE:
501 tcp_send_loss_probe(sk);
502 break;
503 case ICSK_TIME_RETRANS:
504 icsk->icsk_pending = 0;
505 tcp_retransmit_timer(sk);
506 break;
507 case ICSK_TIME_PROBE0:
508 icsk->icsk_pending = 0;
509 tcp_probe_timer(sk);
510 break;
513 out:
514 sk_mem_reclaim(sk);
517 static void tcp_write_timer(unsigned long data)
519 struct sock *sk = (struct sock *)data;
521 bh_lock_sock(sk);
522 if (!sock_owned_by_user(sk)) {
523 tcp_write_timer_handler(sk);
524 } else {
525 /* deleguate our work to tcp_release_cb() */
526 if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags))
527 sock_hold(sk);
529 bh_unlock_sock(sk);
530 sock_put(sk);
534 * Timer for listening sockets
537 static void tcp_synack_timer(struct sock *sk)
539 inet_csk_reqsk_queue_prune(sk, TCP_SYNQ_INTERVAL,
540 TCP_TIMEOUT_INIT, TCP_RTO_MAX);
543 void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req)
545 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPTIMEOUTS);
547 EXPORT_SYMBOL(tcp_syn_ack_timeout);
549 void tcp_set_keepalive(struct sock *sk, int val)
551 if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
552 return;
554 if (val && !sock_flag(sk, SOCK_KEEPOPEN))
555 inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tcp_sk(sk)));
556 else if (!val)
557 inet_csk_delete_keepalive_timer(sk);
561 static void tcp_keepalive_timer (unsigned long data)
563 struct sock *sk = (struct sock *) data;
564 struct inet_connection_sock *icsk = inet_csk(sk);
565 struct tcp_sock *tp = tcp_sk(sk);
566 u32 elapsed;
568 /* Only process if socket is not in use. */
569 bh_lock_sock(sk);
570 if (sock_owned_by_user(sk)) {
571 /* Try again later. */
572 inet_csk_reset_keepalive_timer (sk, HZ/20);
573 goto out;
576 if (sk->sk_state == TCP_LISTEN) {
577 tcp_synack_timer(sk);
578 goto out;
581 if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) {
582 if (tp->linger2 >= 0) {
583 const int tmo = tcp_fin_time(sk) - TCP_TIMEWAIT_LEN;
585 if (tmo > 0) {
586 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
587 goto out;
590 tcp_send_active_reset(sk, GFP_ATOMIC);
591 goto death;
594 if (!sock_flag(sk, SOCK_KEEPOPEN) || sk->sk_state == TCP_CLOSE)
595 goto out;
597 elapsed = keepalive_time_when(tp);
599 /* It is alive without keepalive 8) */
600 if (tp->packets_out || tcp_send_head(sk))
601 goto resched;
603 elapsed = keepalive_time_elapsed(tp);
605 if (elapsed >= keepalive_time_when(tp)) {
606 /* If the TCP_USER_TIMEOUT option is enabled, use that
607 * to determine when to timeout instead.
609 if ((icsk->icsk_user_timeout != 0 &&
610 elapsed >= icsk->icsk_user_timeout &&
611 icsk->icsk_probes_out > 0) ||
612 (icsk->icsk_user_timeout == 0 &&
613 icsk->icsk_probes_out >= keepalive_probes(tp))) {
614 tcp_send_active_reset(sk, GFP_ATOMIC);
615 tcp_write_err(sk);
616 goto out;
618 if (tcp_write_wakeup(sk) <= 0) {
619 icsk->icsk_probes_out++;
620 elapsed = keepalive_intvl_when(tp);
621 } else {
622 /* If keepalive was lost due to local congestion,
623 * try harder.
625 elapsed = TCP_RESOURCE_PROBE_INTERVAL;
627 } else {
628 /* It is tp->rcv_tstamp + keepalive_time_when(tp) */
629 elapsed = keepalive_time_when(tp) - elapsed;
632 sk_mem_reclaim(sk);
634 resched:
635 inet_csk_reset_keepalive_timer (sk, elapsed);
636 goto out;
638 death:
639 tcp_done(sk);
641 out:
642 bh_unlock_sock(sk);
643 sock_put(sk);
646 void tcp_init_xmit_timers(struct sock *sk)
648 inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer,
649 &tcp_keepalive_timer);
651 EXPORT_SYMBOL(tcp_init_xmit_timers);