2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * Version: $Id: tcp_input.c,v 1.198 2000/08/15 20:15:23 davem Exp $
10 * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 * Corey Minyard <wf-rch!minyard@relay.EU.net>
14 * Florian La Roche, <flla@stud.uni-sb.de>
15 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
16 * Linus Torvalds, <torvalds@cs.helsinki.fi>
17 * Alan Cox, <gw4pts@gw4pts.ampr.org>
18 * Matthew Dillon, <dillon@apollo.west.oic.com>
19 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
20 * Jorge Cwik, <jorge@laser.satlink.net>
25 * Pedro Roque : Fast Retransmit/Recovery.
27 * Retransmit queue handled by TCP.
28 * Better retransmit timer handling.
29 * New congestion avoidance.
33 * Eric : Fast Retransmit.
34 * Randy Scott : MSS option defines.
35 * Eric Schenk : Fixes to slow start algorithm.
36 * Eric Schenk : Yet another double ACK bug.
37 * Eric Schenk : Delayed ACK bug fixes.
38 * Eric Schenk : Floyd style fast retrans war avoidance.
39 * David S. Miller : Don't allow zero congestion window.
40 * Eric Schenk : Fix retransmitter so that it sends
41 * next packet on ack of previous packet.
42 * Andi Kleen : Moved open_request checking here
43 * and process RSTs for open_requests.
44 * Andi Kleen : Better prune_queue, and other fixes.
45 * Andrey Savochkin: Fix RTT measurements in the presnce of
47 * Andrey Savochkin: Check sequence numbers correctly when
48 * removing SACKs due to in sequence incoming
50 * Andi Kleen: Make sure we never ack data there is not
51 * enough room for. Also make this condition
52 * a fatal error if it might still happen.
53 * Andi Kleen: Add tcp_measure_rcv_mss to make
54 * connections with MSS<min(MTU,ann. MSS)
55 * work without delayed acks.
56 * Andi Kleen: Process packets with PSH set in the
58 * J Hadi Salim: ECN support
62 #include <linux/sysctl.h>
64 #include <net/inet_common.h>
65 #include <linux/ipsec.h>
68 /* These are on by default so the code paths get tested.
69 * For the final 2.2 this may be undone at our discretion. -DaveM
71 int sysctl_tcp_timestamps
= 1;
72 int sysctl_tcp_window_scaling
= 1;
73 int sysctl_tcp_sack
= 1;
74 int sysctl_tcp_fack
= 1;
75 int sysctl_tcp_reordering
= TCP_FASTRETRANS_THRESH
;
76 int sysctl_tcp_ecn
= 1;
77 int sysctl_tcp_dsack
= 1;
78 int sysctl_tcp_app_win
= 31;
79 int sysctl_tcp_adv_win_scale
= 2;
81 int sysctl_tcp_stdurg
= 0;
82 int sysctl_tcp_rfc1337
= 0;
83 int sysctl_tcp_max_orphans
= NR_FILE
;
85 #define FLAG_DATA 0x01 /* Incoming frame contained data. */
86 #define FLAG_WIN_UPDATE 0x02 /* Incoming ACK was a window update. */
87 #define FLAG_DATA_ACKED 0x04 /* This ACK acknowledged new data. */
88 #define FLAG_RETRANS_DATA_ACKED 0x08 /* "" "" some of which was retransmitted. */
89 #define FLAG_SYN_ACKED 0x10 /* This ACK acknowledged SYN. */
90 #define FLAG_DATA_SACKED 0x20 /* New SACK. */
91 #define FLAG_ECE 0x40 /* ECE in this ACK */
92 #define FLAG_DATA_LOST 0x80 /* SACK detected data lossage. */
93 #define FLAG_SLOWPATH 0x100 /* Do not skip RFC checks for window update.*/
95 #define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED)
96 #define FLAG_NOT_DUP (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED)
97 #define FLAG_CA_ALERT (FLAG_DATA_SACKED|FLAG_ECE)
98 #define FLAG_FORWARD_PROGRESS (FLAG_ACKED|FLAG_DATA_SACKED)
100 #define IsReno(tp) ((tp)->sack_ok == 0)
101 #define IsFack(tp) ((tp)->sack_ok & 2)
103 #define TCP_REMNANT (TCP_FLAG_FIN|TCP_FLAG_URG|TCP_FLAG_SYN|TCP_FLAG_PSH)
105 /* Adapt the MSS value used to make delayed ack decision to the
108 static __inline__
void tcp_measure_rcv_mss(struct tcp_opt
*tp
, struct sk_buff
*skb
)
110 unsigned int len
, lss
;
112 lss
= tp
->ack
.last_seg_size
;
113 tp
->ack
.last_seg_size
= 0;
115 /* skb->len may jitter because of SACKs, even if peer
116 * sends good full-sized frames.
119 if (len
>= tp
->ack
.rcv_mss
) {
120 tp
->ack
.rcv_mss
= len
;
121 /* Dubious? Rather, it is final cut. 8) */
122 if (tcp_flag_word(skb
->h
.th
)&TCP_REMNANT
)
123 tp
->ack
.pending
|= TCP_ACK_PUSHED
;
125 /* Otherwise, we make more careful check taking into account,
126 * that SACKs block is variable.
128 * "len" is invariant segment length, including TCP header.
130 len
= skb
->tail
- skb
->h
.raw
;
131 if (len
>= TCP_MIN_RCVMSS
+ sizeof(struct tcphdr
) ||
132 /* If PSH is not set, packet should be
133 * full sized, provided peer TCP is not badly broken.
134 * This observation (if it is correct 8)) allows
135 * to handle super-low mtu links fairly.
137 (len
>= TCP_MIN_MSS
+ sizeof(struct tcphdr
) &&
138 !(tcp_flag_word(skb
->h
.th
)&TCP_REMNANT
))) {
139 /* Subtract also invariant (if peer is RFC compliant),
140 * tcp header plus fixed timestamp option length.
141 * Resulting "len" is MSS free of SACK jitter.
143 len
-= tp
->tcp_header_len
;
144 tp
->ack
.last_seg_size
= len
;
146 tp
->ack
.rcv_mss
= len
;
150 tp
->ack
.pending
|= TCP_ACK_PUSHED
;
154 static void tcp_incr_quickack(struct tcp_opt
*tp
)
156 unsigned quickacks
= tp
->rcv_wnd
/(2*tp
->ack
.rcv_mss
);
160 if (quickacks
> tp
->ack
.quick
)
161 tp
->ack
.quick
= min(quickacks
, TCP_MAX_QUICKACKS
);
164 void tcp_enter_quickack_mode(struct tcp_opt
*tp
)
166 tcp_incr_quickack(tp
);
167 tp
->ack
.pingpong
= 0;
168 tp
->ack
.ato
= TCP_ATO_MIN
;
171 /* Send ACKs quickly, if "quick" count is not exhausted
172 * and the session is not interactive.
175 static __inline__
int tcp_in_quickack_mode(struct tcp_opt
*tp
)
177 return (tp
->ack
.quick
&& !tp
->ack
.pingpong
);
180 /* Buffer size and advertised window tuning.
182 * 1. Tuning sk->sndbuf, when connection enters established state.
185 static void tcp_fixup_sndbuf(struct sock
*sk
)
187 struct tcp_opt
*tp
= &(sk
->tp_pinfo
.af_tcp
);
188 int sndmem
= tp
->mss_clamp
+MAX_TCP_HEADER
+16+sizeof(struct sk_buff
);
190 if (sk
->sndbuf
< 3*sndmem
)
191 sk
->sndbuf
= min(3*sndmem
, sysctl_tcp_wmem
[2]);
194 /* 2. Tuning advertised window (window_clamp, rcv_ssthresh)
196 * All tcp_full_space() is split to two parts: "network" buffer, allocated
197 * forward and advertised in receiver window (tp->rcv_wnd) and
198 * "application buffer", required to isolate scheduling/application
199 * latencies from network.
200 * window_clamp is maximal advertised window. It can be less than
201 * tcp_full_space(), in this case tcp_full_space() - window_clamp
202 * is reserved for "application" buffer. The less window_clamp is
203 * the smoother our behaviour from viewpoint of network, but the lower
204 * throughput and the higher sensitivity of the connection to losses. 8)
206 * rcv_ssthresh is more strict window_clamp used at "slow start"
207 * phase to predict further behaviour of this connection.
208 * It is used for two goals:
209 * - to enforce header prediction at sender, even when application
210 * requires some significant "application buffer". It is check #1.
211 * - to prevent pruning of receive queue because of misprediction
212 * of receiver window. Check #2.
214 * The scheme does not work when sender sends good segments opening
215 * window and then starts to feed us spagetti. But it should work
216 * in common situations. Otherwise, we have to rely on queue collapsing.
219 /* Slow part of check#2. */
221 __tcp_grow_window(struct sock
*sk
, struct tcp_opt
*tp
, struct sk_buff
*skb
)
224 int truesize
= tcp_win_from_space(skb
->truesize
)/2;
225 int window
= tcp_full_space(sk
)/2;
227 while (tp
->rcv_ssthresh
<= window
) {
228 if (truesize
<= skb
->len
)
229 return 2*tp
->ack
.rcv_mss
;
237 static __inline__
void
238 tcp_grow_window(struct sock
*sk
, struct tcp_opt
*tp
, struct sk_buff
*skb
)
241 if (tp
->rcv_ssthresh
< tp
->window_clamp
&&
242 (int)tp
->rcv_ssthresh
< tcp_space(sk
) &&
243 !tcp_memory_pressure
) {
246 /* Check #2. Increase window, if skb with such overhead
247 * will fit to rcvbuf in future.
249 if (tcp_win_from_space(skb
->truesize
) <= skb
->len
)
252 incr
= __tcp_grow_window(sk
, tp
, skb
);
255 tp
->rcv_ssthresh
= min(tp
->rcv_ssthresh
+ incr
, tp
->window_clamp
);
261 /* 3. Tuning rcvbuf, when connection enters established state. */
263 static void tcp_fixup_rcvbuf(struct sock
*sk
)
265 struct tcp_opt
*tp
= &(sk
->tp_pinfo
.af_tcp
);
266 int rcvmem
= tp
->advmss
+MAX_TCP_HEADER
+16+sizeof(struct sk_buff
);
268 /* Try to select rcvbuf so that 4 mss-sized segments
269 * will fit to window and correspoding skbs will fit to our rcvbuf.
270 * (was 3; 4 is minimum to allow fast retransmit to work.)
272 while (tcp_win_from_space(rcvmem
) < tp
->advmss
)
274 if (sk
->rcvbuf
< 4*rcvmem
)
275 sk
->rcvbuf
= min(4*rcvmem
, sysctl_tcp_rmem
[2]);
278 /* 4. Try to fixup all. It is made iimediately after connection enters
281 static void tcp_init_buffer_space(struct sock
*sk
)
283 struct tcp_opt
*tp
= &(sk
->tp_pinfo
.af_tcp
);
286 if (!(sk
->userlocks
&SOCK_RCVBUF_LOCK
))
287 tcp_fixup_rcvbuf(sk
);
288 if (!(sk
->userlocks
&SOCK_SNDBUF_LOCK
))
289 tcp_fixup_sndbuf(sk
);
291 maxwin
= tcp_full_space(sk
);
293 if (tp
->window_clamp
>= maxwin
) {
294 tp
->window_clamp
= maxwin
;
296 if (sysctl_tcp_app_win
&& maxwin
>4*tp
->advmss
)
297 tp
->window_clamp
= max(maxwin
-(maxwin
>>sysctl_tcp_app_win
), 4*tp
->advmss
);
300 /* Force reservation of one segment. */
301 if (sysctl_tcp_app_win
&&
302 tp
->window_clamp
> 2*tp
->advmss
&&
303 tp
->window_clamp
+ tp
->advmss
> maxwin
)
304 tp
->window_clamp
= max(2*tp
->advmss
, maxwin
-tp
->advmss
);
306 tp
->rcv_ssthresh
= min(tp
->rcv_ssthresh
, tp
->window_clamp
);
307 tp
->snd_cwnd_stamp
= tcp_time_stamp
;
310 /* 5. Recalculate window clamp after socket hit its memory bounds. */
311 static void tcp_clamp_window(struct sock
*sk
, struct tcp_opt
*tp
)
314 int app_win
= tp
->rcv_nxt
- tp
->copied_seq
;
319 skb_queue_walk(&tp
->out_of_order_queue
, skb
) {
323 /* If overcommit is due to out of order segments,
324 * do not clamp window. Try to expand rcvbuf instead.
327 if (sk
->rcvbuf
< sysctl_tcp_rmem
[2] &&
328 !(sk
->userlocks
&SOCK_RCVBUF_LOCK
) &&
329 !tcp_memory_pressure
&&
330 atomic_read(&tcp_memory_allocated
) < sysctl_tcp_mem
[0])
331 sk
->rcvbuf
= min(atomic_read(&sk
->rmem_alloc
), sysctl_tcp_rmem
[2]);
333 if (atomic_read(&sk
->rmem_alloc
) > sk
->rcvbuf
) {
335 if (atomic_read(&sk
->rmem_alloc
) >= 2*sk
->rcvbuf
)
337 if (app_win
> tp
->ack
.rcv_mss
)
338 app_win
-= tp
->ack
.rcv_mss
;
339 app_win
= max(app_win
, 2*tp
->advmss
);
342 tp
->window_clamp
= min(tp
->window_clamp
, app_win
);
343 tp
->rcv_ssthresh
= min(tp
->window_clamp
, 2*tp
->advmss
);
347 /* There is something which you must keep in mind when you analyze the
348 * behavior of the tp->ato delayed ack timeout interval. When a
349 * connection starts up, we want to ack as quickly as possible. The
350 * problem is that "good" TCP's do slow start at the beginning of data
351 * transmission. The means that until we send the first few ACK's the
352 * sender will sit on his end and only queue most of his data, because
353 * he can only send snd_cwnd unacked packets at any given time. For
354 * each ACK we send, he increments snd_cwnd and transmits more of his
357 static void tcp_event_data_recv(struct sock
*sk
, struct tcp_opt
*tp
, struct sk_buff
*skb
)
361 tcp_schedule_ack(tp
);
363 tcp_measure_rcv_mss(tp
, skb
);
365 now
= tcp_time_stamp
;
368 /* The _first_ data packet received, initialize
369 * delayed ACK engine.
371 tcp_enter_quickack_mode(tp
);
373 int m
= now
- tp
->ack
.lrcvtime
;
375 if (m
<= TCP_ATO_MIN
/2) {
376 /* The fastest case is the first. */
377 tp
->ack
.ato
= (tp
->ack
.ato
>>1) + TCP_ATO_MIN
/2;
378 } else if (m
< tp
->ack
.ato
) {
379 tp
->ack
.ato
= (tp
->ack
.ato
>>1) + m
;
380 if (tp
->ack
.ato
> tp
->rto
)
381 tp
->ack
.ato
= tp
->rto
;
382 } else if (m
> tp
->rto
) {
383 /* Too long gap. Apparently sender falled to
384 * restart window, so that we send ACKs quickly.
386 tcp_incr_quickack(tp
);
390 tp
->ack
.lrcvtime
= now
;
392 TCP_ECN_check_ce(tp
, skb
);
395 tcp_grow_window(sk
, tp
, skb
);
398 /* Called to compute a smoothed rtt estimate. The data fed to this
399 * routine either comes from timestamps, or from segments that were
400 * known _not_ to have been retransmitted [see Karn/Partridge
401 * Proceedings SIGCOMM 87]. The algorithm is from the SIGCOMM 88
402 * piece by Van Jacobson.
403 * NOTE: the next three routines used to be one big routine.
404 * To save cycles in the RFC 1323 implementation it was better to break
405 * it up into three procedures. -- erics
407 static __inline__
void tcp_rtt_estimator(struct tcp_opt
*tp
, __u32 mrtt
)
409 long m
= mrtt
; /* RTT */
411 /* The following amusing code comes from Jacobson's
412 * article in SIGCOMM '88. Note that rtt and mdev
413 * are scaled versions of rtt and mean deviation.
414 * This is designed to be as fast as possible
415 * m stands for "measurement".
417 * On a 1990 paper the rto value is changed to:
418 * RTO = rtt + 4 * mdev
420 * Funny. This algorithm seems to be very broken.
421 * These formulae increase RTO, when it should be decreased, increase
422 * too slowly, when it should be incresed fastly, decrease too fastly
423 * etc. I guess in BSD RTO takes ONE value, so that it is absolutely
424 * does not matter how to _calculate_ it. Seems, it was trap
425 * that VJ failed to avoid. 8)
430 m
-= (tp
->srtt
>> 3); /* m is now error in rtt est */
431 tp
->srtt
+= m
; /* rtt = 7/8 rtt + 1/8 new */
433 m
= -m
; /* m is now abs(error) */
434 m
-= (tp
->mdev
>> 2); /* similar update on mdev */
435 tp
->mdev
+= m
; /* mdev = 3/4 mdev + 1/4 new */
437 /* no previous measure. */
438 tp
->srtt
= m
<<3; /* take the measured time to be rtt */
439 tp
->mdev
= m
<<2; /* make sure rto = 3*rtt */
443 /* Calculate rto without backoff. This is the second half of Van Jacobson's
444 * routine referred to above.
446 static __inline__
void tcp_set_rto(struct tcp_opt
*tp
)
448 tp
->rto
= (tp
->srtt
>> 3) + tp
->mdev
;
449 /* I am not enough educated to understand this magic.
450 * However, it smells bad. snd_cwnd>31 is common case.
452 /* OK, I found comment in 2.0 source tree, it deserves
455 * Note: Jacobson's algorithm is fine on BSD which has a 1/2 second
456 * granularity clock, but with our 1/100 second granularity clock we
457 * become too sensitive to minor changes in the round trip time.
458 * We add in two compensating factors. First we multiply by 5/4.
459 * For large congestion windows this allows us to tolerate burst
460 * traffic delaying up to 1/4 of our packets. We also add in
461 * a rtt / cong_window term. For small congestion windows this allows
462 * a single packet delay, but has negligible effect
463 * on the compensation for large windows.
465 tp
->rto
+= (tp
->rto
>> 2) + (tp
->rto
>> (tp
->snd_cwnd
-1));
468 /* Keep the rto between HZ/5 and 120*HZ. 120*HZ is the upper bound
469 * on packet lifetime in the internet. We need the HZ/5 lower
470 * bound to behave correctly against BSD stacks with a fixed
472 * FIXME: It's not entirely clear this lower bound is the best
473 * way to avoid the problem. Is it possible to drop the lower
474 * bound and still avoid trouble with BSD stacks? Perhaps
475 * some modification to the RTO calculation that takes delayed
476 * ack bias into account? This needs serious thought. -- erics
478 static __inline__
void tcp_bound_rto(struct tcp_opt
*tp
)
480 if (tp
->rto
< TCP_RTO_MIN
)
481 tp
->rto
= TCP_RTO_MIN
;
482 else if (tp
->rto
> TCP_RTO_MAX
)
483 tp
->rto
= TCP_RTO_MAX
;
487 /* Save metrics learned by this TCP session.
488 This function is called only, when TCP finishes sucessfully
489 i.e. when it enters TIME-WAIT or goes from LAST-ACK to CLOSE.
491 void tcp_update_metrics(struct sock
*sk
)
493 struct tcp_opt
*tp
= &(sk
->tp_pinfo
.af_tcp
);
494 struct dst_entry
*dst
= __sk_dst_get(sk
);
498 if (dst
&& (dst
->flags
&DST_HOST
)) {
501 if (tp
->backoff
|| !tp
->srtt
) {
502 /* This session failed to estimate rtt. Why?
503 * Probably, no packets returned in time.
506 if (!(dst
->mxlock
&(1<<RTAX_RTT
)))
511 m
= dst
->rtt
- tp
->srtt
;
513 /* If newly calculated rtt larger than stored one,
514 * store new one. Otherwise, use EWMA. Remember,
515 * rtt overestimation is always better than underestimation.
517 if (!(dst
->mxlock
&(1<<RTAX_RTT
))) {
524 if (!(dst
->mxlock
&(1<<RTAX_RTTVAR
))) {
528 /* Scale deviation to rttvar fixed point */
533 if (m
>= dst
->rttvar
)
536 dst
->rttvar
-= (dst
->rttvar
- m
)>>2;
539 if (tp
->snd_ssthresh
>= 0xFFFF) {
540 /* Slow start still did not finish. */
542 !(dst
->mxlock
&(1<<RTAX_SSTHRESH
)) &&
543 (tp
->snd_cwnd
>>1) > dst
->ssthresh
)
544 dst
->ssthresh
= (tp
->snd_cwnd
>>1);
545 if (!(dst
->mxlock
&(1<<RTAX_CWND
)) &&
546 tp
->snd_cwnd
> dst
->cwnd
)
547 dst
->cwnd
= tp
->snd_cwnd
;
548 } else if (tp
->snd_cwnd
> tp
->snd_ssthresh
&&
549 tp
->ca_state
== TCP_CA_Open
) {
550 /* Cong. avoidance phase, cwnd is reliable. */
551 if (!(dst
->mxlock
&(1<<RTAX_SSTHRESH
)))
552 dst
->ssthresh
= max(tp
->snd_cwnd
>>1, tp
->snd_ssthresh
);
553 if (!(dst
->mxlock
&(1<<RTAX_CWND
)))
554 dst
->cwnd
= (dst
->cwnd
+ tp
->snd_cwnd
)>>1;
556 /* Else slow start did not finish, cwnd is non-sense,
557 ssthresh may be also invalid.
559 if (!(dst
->mxlock
&(1<<RTAX_CWND
)))
560 dst
->cwnd
= (dst
->cwnd
+ tp
->snd_ssthresh
)>>1;
562 !(dst
->mxlock
&(1<<RTAX_SSTHRESH
)) &&
563 tp
->snd_ssthresh
> dst
->ssthresh
)
564 dst
->ssthresh
= tp
->snd_ssthresh
;
567 if (!(dst
->mxlock
&(1<<RTAX_REORDERING
))) {
568 if (dst
->reordering
< tp
->reordering
&&
569 tp
->reordering
!= sysctl_tcp_reordering
)
570 dst
->reordering
= tp
->reordering
;
575 /* Increase initial CWND conservatively: if estimated
576 * RTT is low enough (<20msec) or if we have some preset ssthresh.
578 * Numbers are taken from RFC1414.
580 __u32
tcp_init_cwnd(struct tcp_opt
*tp
)
584 if (tp
->mss_cache
> 1460)
587 cwnd
= (tp
->mss_cache
> 1095) ? 3 : 4;
589 if (!tp
->srtt
|| (tp
->snd_ssthresh
>= 0xFFFF && tp
->srtt
> ((HZ
/50)<<3)))
591 else if (cwnd
> tp
->snd_ssthresh
)
592 cwnd
= tp
->snd_ssthresh
;
594 return min(cwnd
, tp
->snd_cwnd_clamp
);
597 /* Initialize metrics on socket. */
599 static void tcp_init_metrics(struct sock
*sk
)
601 struct tcp_opt
*tp
= &(sk
->tp_pinfo
.af_tcp
);
602 struct dst_entry
*dst
= __sk_dst_get(sk
);
609 if (dst
->mxlock
&(1<<RTAX_CWND
))
610 tp
->snd_cwnd_clamp
= dst
->cwnd
;
612 tp
->snd_ssthresh
= dst
->ssthresh
;
613 if (tp
->snd_ssthresh
> tp
->snd_cwnd_clamp
)
614 tp
->snd_ssthresh
= tp
->snd_cwnd_clamp
;
616 if (dst
->reordering
&& tp
->reordering
!= dst
->reordering
) {
618 tp
->reordering
= dst
->reordering
;
624 if (!tp
->srtt
&& dst
->rtt
< (TCP_TIMEOUT_INIT
<<3))
627 /* Initial rtt is determined from SYN,SYN-ACK.
628 * The segment is small and rtt may appear much
629 * less than real one. Use per-dst memory
630 * to make it more realistic.
632 * A bit of theory. RTT is time passed after "normal" sized packet
633 * is sent until it is ACKed. In normal curcumstances sending small
634 * packets force peer to delay ACKs and calculation is correct too.
635 * The algorithm is adaptive and, provided we follow specs, it
636 * NEVER underestimate RTT. BUT! If peer tries to make some clever
637 * tricks sort of "quick acks" for time long enough to decrease RTT
638 * to low value, and then abruptly stops to do it and starts to delay
639 * ACKs, wait for troubles.
641 if (dst
->rtt
> tp
->srtt
)
643 if (dst
->rttvar
> tp
->mdev
)
644 tp
->mdev
= dst
->rttvar
;
647 if (tp
->rto
< TCP_TIMEOUT_INIT
&& !tp
->saw_tstamp
)
649 tp
->snd_cwnd
= tcp_init_cwnd(tp
);
650 tp
->snd_cwnd_stamp
= tcp_time_stamp
;
654 /* Play conservative. If timestamps are not
655 * supported, TCP will fail to recalculate correct
656 * rtt, if initial rto is too small. FORGET ALL AND RESET!
658 if (!tp
->saw_tstamp
&& tp
->srtt
) {
660 tp
->mdev
= TCP_TIMEOUT_INIT
;
661 tp
->rto
= TCP_TIMEOUT_INIT
;
665 static void tcp_update_reordering(struct tcp_opt
*tp
, int metric
, int ts
)
667 if (metric
> tp
->reordering
) {
668 tp
->reordering
= min(TCP_MAX_REORDERING
, metric
);
670 /* This exciting event is worth to be remembered. 8) */
672 NET_INC_STATS_BH(TCPTSReorder
);
674 NET_INC_STATS_BH(TCPRenoReorder
);
676 NET_INC_STATS_BH(TCPFACKReorder
);
678 NET_INC_STATS_BH(TCPSACKReorder
);
679 #if FASTRETRANS_DEBUG > 1
680 printk(KERN_DEBUG
"Disorder%d %d %u f%u s%u rr%d\n",
681 tp
->sack_ok
, tp
->ca_state
,
682 tp
->reordering
, tp
->fackets_out
, tp
->sacked_out
,
683 tp
->undo_marker
? tp
->undo_retrans
: 0);
685 /* Disable FACK yet. */
690 /* This procedure tags the retransmission queue when SACKs arrive.
692 * We have three tag bits: SACKED(S), RETRANS(R) and LOST(L).
693 * Packets in queue with these bits set are counted in variables
694 * sacked_out, retrans_out and lost_out, correspondingly.
696 * Valid combinations are:
697 * Tag InFlight Description
698 * 0 1 - orig segment is in flight.
699 * S 0 - nothing flies, orig reached receiver.
700 * L 0 - nothing flies, orig lost by net.
701 * R 2 - both orig and retransmit are in flight.
702 * L|R 1 - orig is lost, retransmit is in flight.
703 * S|R 1 - orig reached receiver, retrans is still in flight.
704 * (L|S|R is logically valid, it could occur when L|R is sacked,
705 * but it is equivalent to plain S and code short-curcuits it to S.
706 * L|S is logically invalid, it would mean -1 packet in flight 8))
708 * These 6 states form finite state machine, controlled by the following events:
709 * 1. New ACK (+SACK) arrives. (tcp_sacktag_write_queue())
710 * 2. Retransmission. (tcp_retransmit_skb(), tcp_xmit_retransmit_queue())
711 * 3. Loss detection event of one of three flavors:
712 * A. Scoreboard estimator decided the packet is lost.
713 * A'. Reno "three dupacks" marks head of queue lost.
714 * A''. Its FACK modfication, head until snd.fack is lost.
715 * B. SACK arrives sacking data transmitted after never retransmitted
717 * C. SACK arrives sacking SND.NXT at the moment, when the
718 * segment was retransmitted.
719 * 4. D-SACK added new rule: D-SACK changes any tag to S.
721 * It is pleasant to note, that state diagram turns out to be commutative,
722 * so that we are allowed not to be bothered by order of our actions,
723 * when multiple events arrive simultaneously. (see the function below).
725 * Reordering detection.
726 * --------------------
727 * Reordering metric is maximal distance, which a packet can be displaced
728 * in packet stream. With SACKs we can estimate it:
730 * 1. SACK fills old hole and the corresponding segment was not
731 * ever retransmitted -> reordering. Alas, we cannot use it
732 * when segment was retransmitted.
733 * 2. The last flaw is solved with D-SACK. D-SACK arrives
734 * for retransmitted and already SACKed segment -> reordering..
735 * Both of these heuristics are not used in Loss state, when we cannot
736 * account for retransmits accurately.
739 tcp_sacktag_write_queue(struct sock
*sk
, struct sk_buff
*ack_skb
, u32 prior_snd_una
)
741 struct tcp_opt
*tp
= &(sk
->tp_pinfo
.af_tcp
);
742 unsigned char *ptr
= ack_skb
->h
.raw
+ TCP_SKB_CB(ack_skb
)->sacked
;
743 struct tcp_sack_block
*sp
= (struct tcp_sack_block
*)(ptr
+2);
744 int num_sacks
= (ptr
[1] - TCPOLEN_SACK_BASE
)>>3;
745 int reord
= tp
->packets_out
;
747 u32 lost_retrans
= 0;
753 prior_fackets
= tp
->fackets_out
;
755 for (i
=0; i
<num_sacks
; i
++, sp
++) {
757 __u32 start_seq
= ntohl(sp
->start_seq
);
758 __u32 end_seq
= ntohl(sp
->end_seq
);
762 /* Check for D-SACK. */
764 u32 ack
= TCP_SKB_CB(ack_skb
)->ack_seq
;
766 if (before(start_seq
, ack
)) {
768 NET_INC_STATS_BH(TCPDSACKRecv
);
769 } else if (num_sacks
> 1 &&
770 !after(end_seq
, ntohl(sp
[1].end_seq
)) &&
771 !before(start_seq
, ntohl(sp
[1].start_seq
))) {
773 NET_INC_STATS_BH(TCPDSACKOfoRecv
);
776 /* D-SACK for already forgotten data...
777 * Do dumb counting. */
779 !after(end_seq
, prior_snd_una
) &&
780 after(end_seq
, tp
->undo_marker
))
783 /* Eliminate too old ACKs, but take into
784 * account more or less fresh ones, they can
785 * contain valid SACK info.
787 if (before(ack
, prior_snd_una
-tp
->max_window
))
791 /* Event "B" in the comment above. */
792 if (after(end_seq
, tp
->high_seq
))
793 flag
|= FLAG_DATA_LOST
;
795 for_retrans_queue(skb
, sk
, tp
) {
796 u8 sacked
= TCP_SKB_CB(skb
)->sacked
;
799 /* The retransmission queue is always in order, so
800 * we can short-circuit the walk early.
802 if(!before(TCP_SKB_CB(skb
)->seq
, end_seq
))
807 in_sack
= !after(start_seq
, TCP_SKB_CB(skb
)->seq
) &&
808 !before(end_seq
, TCP_SKB_CB(skb
)->end_seq
);
810 /* Account D-SACK for retransmitted packet. */
811 if ((dup_sack
&& in_sack
) &&
812 (sacked
& TCPCB_RETRANS
) &&
813 after(TCP_SKB_CB(skb
)->end_seq
, tp
->undo_marker
))
816 /* The frame is ACKed. */
817 if (!after(TCP_SKB_CB(skb
)->end_seq
, tp
->snd_una
)) {
818 if (sacked
&TCPCB_RETRANS
) {
819 if ((dup_sack
&& in_sack
) &&
820 (sacked
&TCPCB_SACKED_ACKED
))
821 reord
= min(fack_count
, reord
);
823 /* If it was in a hole, we detected reordering. */
824 if (fack_count
< prior_fackets
&&
825 !(sacked
&TCPCB_SACKED_ACKED
))
826 reord
= min(fack_count
, reord
);
829 /* Nothing to do; acked frame is about to be dropped. */
833 if ((sacked
&TCPCB_SACKED_RETRANS
) &&
834 after(end_seq
, TCP_SKB_CB(skb
)->ack_seq
) &&
835 (!lost_retrans
|| after(end_seq
, lost_retrans
)))
836 lost_retrans
= end_seq
;
841 if (!(sacked
&TCPCB_SACKED_ACKED
)) {
842 if (sacked
& TCPCB_SACKED_RETRANS
) {
843 /* If the segment is not tagged as lost,
844 * we do not clear RETRANS, believing
845 * that retransmission is still in flight.
847 if (sacked
& TCPCB_LOST
) {
848 TCP_SKB_CB(skb
)->sacked
&= ~(TCPCB_LOST
|TCPCB_SACKED_RETRANS
);
853 /* New sack for not retransmitted frame,
854 * which was in hole. It is reordering.
856 if (!(sacked
& TCPCB_RETRANS
) &&
857 fack_count
< prior_fackets
)
858 reord
= min(fack_count
, reord
);
860 if (sacked
& TCPCB_LOST
) {
861 TCP_SKB_CB(skb
)->sacked
&= ~TCPCB_LOST
;
866 TCP_SKB_CB(skb
)->sacked
|= TCPCB_SACKED_ACKED
;
867 flag
|= FLAG_DATA_SACKED
;
870 if (fack_count
> tp
->fackets_out
)
871 tp
->fackets_out
= fack_count
;
873 if (dup_sack
&& (sacked
&TCPCB_RETRANS
))
874 reord
= min(fack_count
, reord
);
877 /* D-SACK. We can detect redundant retransmission
878 * in S|R and plain R frames and clear it.
879 * undo_retrans is decreased above, L|R frames
880 * are accounted above as well.
883 (TCP_SKB_CB(skb
)->sacked
&TCPCB_SACKED_RETRANS
)) {
884 TCP_SKB_CB(skb
)->sacked
&= ~TCPCB_SACKED_RETRANS
;
890 /* Check for lost retransmit. This superb idea is
891 * borrowed from "ratehalving". Event "C".
892 * Later note: FACK people cheated me again 8),
893 * we have to account for reordering! Ugly,
896 if (lost_retrans
&& tp
->ca_state
== TCP_CA_Recovery
) {
899 for_retrans_queue(skb
, sk
, tp
) {
900 if (after(TCP_SKB_CB(skb
)->seq
, lost_retrans
))
902 if (!after(TCP_SKB_CB(skb
)->end_seq
, tp
->snd_una
))
904 if ((TCP_SKB_CB(skb
)->sacked
&TCPCB_SACKED_RETRANS
) &&
905 after(lost_retrans
, TCP_SKB_CB(skb
)->ack_seq
) &&
907 !before(lost_retrans
, TCP_SKB_CB(skb
)->ack_seq
+tp
->reordering
*tp
->mss_cache
))) {
908 TCP_SKB_CB(skb
)->sacked
&= ~TCPCB_SACKED_RETRANS
;
911 if (!(TCP_SKB_CB(skb
)->sacked
&(TCPCB_LOST
|TCPCB_SACKED_ACKED
))) {
913 TCP_SKB_CB(skb
)->sacked
|= TCPCB_LOST
;
914 flag
|= FLAG_DATA_SACKED
;
915 NET_INC_STATS_BH(TCPLostRetransmit
);
921 tp
->left_out
= tp
->sacked_out
+ tp
->lost_out
;
923 if (reord
< tp
->fackets_out
&& tp
->ca_state
!= TCP_CA_Loss
)
924 tcp_update_reordering(tp
, (tp
->fackets_out
+1)-reord
, 0);
926 #if FASTRETRANS_DEBUG > 0
927 BUG_TRAP((int)tp
->sacked_out
>= 0);
928 BUG_TRAP((int)tp
->lost_out
>= 0);
929 BUG_TRAP((int)tp
->retrans_out
>= 0);
930 BUG_TRAP((int)tcp_packets_in_flight(tp
) >= 0);
935 void tcp_clear_retrans(struct tcp_opt
*tp
)
945 tp
->undo_retrans
= 0;
948 /* Enter Loss state. If "how" is not zero, forget all SACK information
949 * and reset tags completely, otherwise preserve SACKs. If receiver
950 * dropped its ofo queue, we will know this due to reneging detection.
952 void tcp_enter_loss(struct sock
*sk
, int how
)
954 struct tcp_opt
*tp
= &sk
->tp_pinfo
.af_tcp
;
958 /* Reduce ssthresh if it has not yet been made inside this window. */
959 if (tp
->ca_state
<= TCP_CA_Disorder
||
960 tp
->snd_una
== tp
->high_seq
||
961 (tp
->ca_state
== TCP_CA_Loss
&& !tp
->retransmits
)) {
962 tp
->prior_ssthresh
= tcp_current_ssthresh(tp
);
963 tp
->snd_ssthresh
= tcp_recalc_ssthresh(tp
);
966 tp
->snd_cwnd_cnt
= 0;
967 tp
->snd_cwnd_stamp
= tcp_time_stamp
;
969 tcp_clear_retrans(tp
);
971 /* Push undo marker, if it was plain RTO and nothing
972 * was retransmitted. */
974 tp
->undo_marker
= tp
->snd_una
;
976 for_retrans_queue(skb
, sk
, tp
) {
978 if (TCP_SKB_CB(skb
)->sacked
&TCPCB_RETRANS
)
980 TCP_SKB_CB(skb
)->sacked
&= (~TCPCB_TAGBITS
)|TCPCB_SACKED_ACKED
;
981 if (!(TCP_SKB_CB(skb
)->sacked
&TCPCB_SACKED_ACKED
) || how
) {
982 TCP_SKB_CB(skb
)->sacked
&= ~TCPCB_SACKED_ACKED
;
983 TCP_SKB_CB(skb
)->sacked
|= TCPCB_LOST
;
987 tp
->fackets_out
= cnt
;
990 tp
->left_out
= tp
->sacked_out
+ tp
->lost_out
;
992 tp
->reordering
= min(tp
->reordering
, sysctl_tcp_reordering
);
993 tp
->ca_state
= TCP_CA_Loss
;
994 tp
->high_seq
= tp
->snd_nxt
;
995 TCP_ECN_queue_cwr(tp
);
998 static int tcp_check_sack_reneging(struct sock
*sk
, struct tcp_opt
*tp
)
1000 struct sk_buff
*skb
;
1002 /* If ACK arrived pointing to a remembered SACK,
1003 * it means that our remembered SACKs do not reflect
1004 * real state of receiver i.e.
1005 * receiver _host_ is heavily congested (or buggy).
1006 * Do processing similar to RTO timeout.
1008 if ((skb
= skb_peek(&sk
->write_queue
)) != NULL
&&
1009 (TCP_SKB_CB(skb
)->sacked
& TCPCB_SACKED_ACKED
)) {
1010 NET_INC_STATS_BH(TCPSACKReneging
);
1012 tcp_enter_loss(sk
, 1);
1014 tcp_retransmit_skb(sk
, skb_peek(&sk
->write_queue
));
1015 tcp_reset_xmit_timer(sk
, TCP_TIME_RETRANS
, tp
->rto
);
1021 static inline int tcp_fackets_out(struct tcp_opt
*tp
)
1023 return IsReno(tp
) ? tp
->sacked_out
+1 : tp
->fackets_out
;
1027 /* Linux NewReno/SACK/FACK/ECN state machine.
1028 * --------------------------------------
1030 * "Open" Normal state, no dubious events, fast path.
1031 * "Disorder" In all the respects it is "Open",
1032 * but requires a bit more attention. It is entered when
1033 * we see some SACKs or dupacks. It is split of "Open"
1034 * mainly to move some processing from fast path to slow one.
1035 * "CWR" CWND was reduced due to some Congestion Notification event.
1036 * It can be ECN, ICMP source quench, local device congestion.
1037 * "Recovery" CWND was reduced, we are fast-retransmitting.
1038 * "Loss" CWND was reduced due to RTO timeout or SACK reneging.
1040 * tcp_fastretrans_alert() is entered:
1041 * - each incoming ACK, if state is not "Open"
1042 * - when arrived ACK is unusual, namely:
1047 * Counting packets in flight is pretty simple.
1049 * in_flight = packets_out - left_out + retrans_out
1051 * packets_out is SND.NXT-SND.UNA counted in packets.
1053 * retrans_out is number of retransmitted segments.
1055 * left_out is number of segments left network, but not ACKed yet.
1057 * left_out = sacked_out + lost_out
1059 * sacked_out: Packets, which arrived to receiver out of order
1060 * and hence not ACKed. With SACKs this number is simply
1061 * amount of SACKed data. Even without SACKs
1062 * it is easy to give pretty reliable estimate of this number,
1063 * counting duplicate ACKs.
1065 * lost_out: Packets lost by network. TCP has no explicit
1066 * "loss notification" feedback from network (for now).
1067 * It means that this number can be only _guessed_.
1068 * Actually, it is the heuristics to predict lossage that
1069 * distinguishes different algorithms.
1071 * F.e. after RTO, when all the queue is considered as lost,
1072 * lost_out = packets_out and in_flight = retrans_out.
1074 * Essentially, we have now two algorithms counting
1077 * FACK: It is the simplest heuristics. As soon as we decided
1078 * that something is lost, we decide that _all_ not SACKed
1079 * packets until the most forward SACK are lost. I.e.
1080 * lost_out = fackets_out - sacked_out and left_out = fackets_out.
1081 * It is absolutely correct estimate, if network does not reorder
1082 * packets. And it loses any connection to reality when reordering
1083 * takes place. We use FACK by default until reordering
1084 * is suspected on the path to this destination.
1086 * NewReno: when Recovery is entered, we assume that one segment
1087 * is lost (classic Reno). While we are in Recovery and
1088 * a partial ACK arrives, we assume that one more packet
1089 * is lost (NewReno). This heuristics are the same in NewReno
1092 * Imagine, that's all! Forget about all this shamanism about CWND inflation
1093 * deflation etc. CWND is real congestion window, never inflated, changes
1094 * only according to classic VJ rules.
1096 * Really tricky (and requiring careful tuning) part of algorithm
1097 * is hidden in functions tcp_time_to_recover() and tcp_xmit_retransmit_queue().
1098 * The first determines the moment _when_ we should reduce CWND and,
1099 * hence, slow down forward transmission. In fact, it determines the moment
1100 * when we decide that hole is caused by loss, rather than by a reorder.
1102 * tcp_xmit_retransmit_queue() decides, _what_ we should retransmit to fill
1103 * holes, caused by lost packets.
1105 * And the most logically complicated part of algorithm is undo
1106 * heuristics. We detect false retransmits due to both too early
1107 * fast retransmit (reordering) and underestimated RTO, analyzing
1108 * timestamps and D-SACKs. When we detect that some segments were
1109 * retransmitted by mistake and CWND reduction was wrong, we undo
1110 * window reduction and abort recovery phase. This logic is hidden
1111 * inside several functions named tcp_try_undo_<something>.
1114 /* This function decides, when we should leave Disordered state
1115 * and enter Recovery phase, reducing congestion window.
1117 * Main question: may we further continue forward transmission
1118 * with the same cwnd?
1121 tcp_time_to_recover(struct sock
*sk
, struct tcp_opt
*tp
)
1123 /* Trick#1: The loss is proven. */
1127 /* Not-A-Trick#2 : Classic rule... */
1128 if (tcp_fackets_out(tp
) > tp
->reordering
)
1131 /* Trick#3: It is still not OK... But will it be useful to delay
1134 if (tp
->packets_out
<= tp
->reordering
&&
1135 tp
->sacked_out
>= max(tp
->packets_out
/2, sysctl_tcp_reordering
) &&
1136 !tcp_may_send_now(sk
, tp
)) {
1137 /* We have nothing to send. This connection is limited
1138 * either by receiver window or by application.
1146 /* If we receive more dupacks than we expected counting segments
1147 * in assumption of absent reordering, interpret this as reordering.
1148 * The only another reason could be bug in receiver TCP.
1150 static void tcp_check_reno_reordering(struct tcp_opt
*tp
, int addend
)
1152 if (tp
->sacked_out
+ 1 > tp
->packets_out
) {
1153 tp
->sacked_out
= tp
->packets_out
? tp
->packets_out
- 1 : 0;
1154 tcp_update_reordering(tp
, tp
->packets_out
+addend
, 0);
1158 /* Emulate SACKs for SACKless connection: account for a new dupack. */
1160 static void tcp_add_reno_sack(struct tcp_opt
*tp
)
1163 tcp_check_reno_reordering(tp
, 0);
1164 tp
->left_out
= tp
->sacked_out
+ tp
->lost_out
;
1167 /* Account for ACK, ACKing some data in Reno Recovery phase. */
1169 static void tcp_remove_reno_sacks(struct sock
*sk
, struct tcp_opt
*tp
, int acked
)
1172 /* One ACK eated lost packet. Must eat! */
1173 BUG_TRAP(tp
->lost_out
== 0);
1175 /* The rest eat duplicate ACKs. */
1176 if (acked
-1 >= tp
->sacked_out
)
1179 tp
->sacked_out
-= acked
-1;
1181 tcp_check_reno_reordering(tp
, acked
);
1182 tp
->left_out
= tp
->sacked_out
+ tp
->lost_out
;
1185 static inline void tcp_reset_reno_sack(struct tcp_opt
*tp
)
1188 tp
->left_out
= tp
->lost_out
;
1191 /* Mark head of queue up as lost. */
1193 tcp_mark_head_lost(struct sock
*sk
, struct tcp_opt
*tp
, int packets
, u32 high_seq
)
1195 struct sk_buff
*skb
;
1198 BUG_TRAP(cnt
<= tp
->packets_out
);
1200 for_retrans_queue(skb
, sk
, tp
) {
1201 if (--cnt
< 0 || after(TCP_SKB_CB(skb
)->end_seq
, high_seq
))
1203 if (!(TCP_SKB_CB(skb
)->sacked
&TCPCB_TAGBITS
)) {
1204 TCP_SKB_CB(skb
)->sacked
|= TCPCB_LOST
;
1208 tp
->left_out
= tp
->sacked_out
+ tp
->lost_out
;
1211 /* Account newly detected lost packet(s) */
1213 static void tcp_update_scoreboard(struct sock
*sk
, struct tcp_opt
*tp
)
1216 int lost
= tp
->fackets_out
- tp
->reordering
;
1219 tcp_mark_head_lost(sk
, tp
, lost
, tp
->high_seq
);
1221 tcp_mark_head_lost(sk
, tp
, 1, tp
->high_seq
);
1225 /* CWND moderation, preventing bursts due to too big ACKs
1226 * in dubious situations.
1228 static __inline__
void tcp_moderate_cwnd(struct tcp_opt
*tp
)
1230 tp
->snd_cwnd
= min(tp
->snd_cwnd
,
1231 tcp_packets_in_flight(tp
)+tcp_max_burst(tp
));
1232 tp
->snd_cwnd_stamp
= tcp_time_stamp
;
1235 /* Decrease cwnd each second ack. */
1237 static void tcp_cwnd_down(struct tcp_opt
*tp
)
1239 int decr
= tp
->snd_cwnd_cnt
+ 1;
1241 tp
->snd_cwnd_cnt
= decr
&1;
1244 if (decr
&& tp
->snd_cwnd
> tp
->snd_ssthresh
/2)
1245 tp
->snd_cwnd
-= decr
;
1247 tp
->snd_cwnd
= min(tp
->snd_cwnd
, tcp_packets_in_flight(tp
)+1);
1248 tp
->snd_cwnd_stamp
= tcp_time_stamp
;
1251 /* Nothing was retransmitted or returned timestamp is less
1252 * than timestamp of the first retransmission.
1254 static __inline__
int tcp_packet_delayed(struct tcp_opt
*tp
)
1256 return !tp
->retrans_stamp
||
1258 (__s32
)(tp
->rcv_tsecr
- tp
->retrans_stamp
) < 0);
1261 /* Undo procedures. */
1263 #if FASTRETRANS_DEBUG > 1
1264 static void DBGUNDO(struct sock
*sk
, struct tcp_opt
*tp
, const char *msg
)
1266 printk(KERN_DEBUG
"Undo %s %u.%u.%u.%u/%u c%u l%u ss%u/%u p%u\n",
1268 NIPQUAD(sk
->daddr
), ntohs(sk
->dport
),
1269 tp
->snd_cwnd
, tp
->left_out
,
1270 tp
->snd_ssthresh
, tp
->prior_ssthresh
, tp
->packets_out
);
1273 #define DBGUNDO(x...) do { } while (0)
1276 static void tcp_undo_cwr(struct tcp_opt
*tp
, int undo
)
1278 if (tp
->prior_ssthresh
) {
1279 tp
->snd_cwnd
= max(tp
->snd_cwnd
, tp
->snd_ssthresh
<<1);
1280 if (undo
&& tp
->prior_ssthresh
> tp
->snd_ssthresh
)
1281 tp
->snd_ssthresh
= tp
->prior_ssthresh
;
1283 tp
->snd_cwnd
= max(tp
->snd_cwnd
, tp
->snd_ssthresh
);
1285 tcp_moderate_cwnd(tp
);
1286 tp
->snd_cwnd_stamp
= tcp_time_stamp
;
1289 static inline int tcp_may_undo(struct tcp_opt
*tp
)
1291 return tp
->undo_marker
&&
1292 (!tp
->undo_retrans
|| tcp_packet_delayed(tp
));
1295 /* People celebrate: "We love our President!" */
1296 static int tcp_try_undo_recovery(struct sock
*sk
, struct tcp_opt
*tp
)
1298 if (tcp_may_undo(tp
)) {
1299 /* Happy end! We did not retransmit anything
1300 * or our original transmission succeeded.
1302 DBGUNDO(sk
, tp
, tp
->ca_state
== TCP_CA_Loss
? "loss" : "retrans");
1303 tcp_undo_cwr(tp
, 1);
1304 if (tp
->ca_state
== TCP_CA_Loss
)
1305 NET_INC_STATS_BH(TCPLossUndo
);
1307 NET_INC_STATS_BH(TCPFullUndo
);
1308 tp
->undo_marker
= 0;
1310 if (tp
->snd_una
== tp
->high_seq
&& IsReno(tp
)) {
1311 /* Hold old state until something *above* high_seq
1312 * is ACKed. For Reno it is MUST to prevent false
1313 * fast retransmits (RFC2582). SACK TCP is safe. */
1314 tcp_moderate_cwnd(tp
);
1317 tp
->ca_state
= TCP_CA_Open
;
1321 /* Try to undo cwnd reduction, because D-SACKs acked all retransmitted data */
1322 static void tcp_try_undo_dsack(struct sock
*sk
, struct tcp_opt
*tp
)
1324 if (tp
->undo_marker
&& !tp
->undo_retrans
) {
1325 DBGUNDO(sk
, tp
, "D-SACK");
1326 tcp_undo_cwr(tp
, 1);
1327 tp
->undo_marker
= 0;
1328 NET_INC_STATS_BH(TCPDSACKUndo
);
1332 /* Undo during fast recovery after partial ACK. */
1334 static int tcp_try_undo_partial(struct sock
*sk
, struct tcp_opt
*tp
, int acked
)
1336 /* Partial ACK arrived. Force Hoe's retransmit. */
1337 int failed
= IsReno(tp
) || tp
->fackets_out
>tp
->reordering
;
1339 if (tcp_may_undo(tp
)) {
1340 /* Plain luck! Hole if filled with delayed
1341 * packet, rather than with a retransmit.
1343 if (tp
->retrans_out
== 0)
1344 tp
->retrans_stamp
= 0;
1346 tcp_update_reordering(tp
, tcp_fackets_out(tp
)+acked
, 1);
1348 DBGUNDO(sk
, tp
, "Hoe");
1349 tcp_undo_cwr(tp
, 0);
1350 NET_INC_STATS_BH(TCPPartialUndo
);
1352 /* So... Do not make Hoe's retransmit yet.
1353 * If the first packet was delayed, the rest
1354 * ones are most probably delayed as well.
1361 /* Undo during loss recovery after partial ACK. */
1362 static int tcp_try_undo_loss(struct sock
*sk
, struct tcp_opt
*tp
)
1364 if (tcp_may_undo(tp
)) {
1365 struct sk_buff
*skb
;
1366 for_retrans_queue(skb
, sk
, tp
) {
1367 TCP_SKB_CB(skb
)->sacked
&= ~TCPCB_LOST
;
1369 DBGUNDO(sk
, tp
, "partial loss");
1371 tp
->left_out
= tp
->sacked_out
;
1372 tcp_undo_cwr(tp
, 1);
1373 NET_INC_STATS_BH(TCPLossUndo
);
1374 tp
->retransmits
= 0;
1375 tp
->undo_marker
= 0;
1377 tp
->ca_state
= TCP_CA_Open
;
1385 static __inline__
void tcp_complete_cwr(struct tcp_opt
*tp
)
1387 tp
->snd_cwnd
= min(tp
->snd_cwnd
, tp
->snd_ssthresh
);
1388 tp
->snd_cwnd_stamp
= tcp_time_stamp
;
1391 static void tcp_try_to_open(struct sock
*sk
, struct tcp_opt
*tp
, int flag
)
1393 tp
->left_out
= tp
->sacked_out
;
1395 if (tp
->retrans_out
== 0)
1396 tp
->retrans_stamp
= 0;
1401 if (tp
->ca_state
!= TCP_CA_CWR
) {
1402 int state
= TCP_CA_Open
;
1407 state
= TCP_CA_Disorder
;
1409 if (tp
->ca_state
!= state
) {
1410 tp
->ca_state
= state
;
1411 tp
->high_seq
= tp
->snd_nxt
;
1413 tcp_moderate_cwnd(tp
);
1419 /* Process an event, which can update packets-in-flight not trivially.
1420 * Main goal of this function is to calculate new estimate for left_out,
1421 * taking into account both packets sitting in receiver's buffer and
1422 * packets lost by network.
1424 * Besides that it does CWND reduction, when packet loss is detected
1425 * and changes state of machine.
1427 * It does _not_ decide what to send, it is made in function
1428 * tcp_xmit_retransmit_queue().
1431 tcp_fastretrans_alert(struct sock
*sk
, u32 prior_snd_una
,
1432 int prior_packets
, int flag
)
1434 struct tcp_opt
*tp
= &(sk
->tp_pinfo
.af_tcp
);
1435 int is_dupack
= (tp
->snd_una
== prior_snd_una
&& !(flag
&FLAG_NOT_DUP
));
1437 /* Some technical things:
1438 * 1. Reno does not count dupacks (sacked_out) automatically. */
1439 if (!tp
->packets_out
)
1441 /* 2. SACK counts snd_fack in packets inaccurately. */
1442 if (tp
->sacked_out
== 0)
1443 tp
->fackets_out
= 0;
1445 /* Now state machine starts.
1446 * A. ECE, hence prohibit cwnd undoing, the reduction is required. */
1448 tp
->prior_ssthresh
= 0;
1450 /* B. In all the states check for reneging SACKs. */
1451 if (tp
->sacked_out
&& tcp_check_sack_reneging(sk
, tp
))
1454 /* C. Process data loss notification, provided it is valid. */
1455 if ((flag
&FLAG_DATA_LOST
) &&
1456 before(tp
->snd_una
, tp
->high_seq
) &&
1457 tp
->ca_state
!= TCP_CA_Open
&&
1458 tp
->fackets_out
> tp
->reordering
) {
1459 tcp_mark_head_lost(sk
, tp
, tp
->fackets_out
-tp
->reordering
, tp
->high_seq
);
1460 NET_INC_STATS_BH(TCPLoss
);
1463 /* D. Synchronize left_out to current state. */
1464 tp
->left_out
= tp
->sacked_out
+ tp
->lost_out
;
1466 /* E. Check state exit conditions. State can be terminated
1467 * when high_seq is ACKed. */
1468 if (tp
->ca_state
== TCP_CA_Open
) {
1469 BUG_TRAP(tp
->retrans_out
== 0);
1470 tp
->retrans_stamp
= 0;
1471 } else if (!before(tp
->snd_una
, tp
->high_seq
)) {
1472 switch (tp
->ca_state
) {
1474 tp
->retransmits
= 0;
1475 if (tcp_try_undo_recovery(sk
, tp
))
1481 /* CWR is to be held something *above* high_seq
1482 * is ACKed for CWR bit to reach receiver. */
1483 if (tp
->snd_una
!= tp
->high_seq
) {
1484 tcp_complete_cwr(tp
);
1485 tp
->ca_state
= TCP_CA_Open
;
1489 case TCP_CA_Disorder
:
1490 tcp_try_undo_dsack(sk
, tp
);
1491 if (IsReno(tp
) || !tp
->undo_marker
) {
1492 tp
->undo_marker
= 0;
1493 tp
->ca_state
= TCP_CA_Open
;
1497 case TCP_CA_Recovery
:
1499 tcp_reset_reno_sack(tp
);
1500 if (tcp_try_undo_recovery(sk
, tp
))
1502 tcp_complete_cwr(tp
);
1507 /* F. Process state. */
1508 switch (tp
->ca_state
) {
1509 case TCP_CA_Recovery
:
1510 if (prior_snd_una
== tp
->snd_una
) {
1511 if (IsReno(tp
) && is_dupack
)
1512 tcp_add_reno_sack(tp
);
1514 int acked
= prior_packets
- tp
->packets_out
;
1516 tcp_remove_reno_sacks(sk
, tp
, acked
);
1517 is_dupack
= tcp_try_undo_partial(sk
, tp
, acked
);
1521 if (flag
& FLAG_ACKED
)
1522 tcp_reset_xmit_timer(sk
, TCP_TIME_RETRANS
, tp
->rto
);
1523 if (!tcp_try_undo_loss(sk
, tp
)) {
1524 tcp_moderate_cwnd(tp
);
1525 tcp_xmit_retransmit_queue(sk
);
1528 if (tp
->ca_state
!= TCP_CA_Open
)
1530 /* Loss is undone; fall through to processing in Open state. */
1533 if (tp
->snd_una
!= prior_snd_una
)
1534 tcp_reset_reno_sack(tp
);
1536 tcp_add_reno_sack(tp
);
1539 if (tp
->ca_state
== TCP_CA_Disorder
)
1540 tcp_try_undo_dsack(sk
, tp
);
1542 if (!tcp_time_to_recover(sk
, tp
)) {
1543 tcp_try_to_open(sk
, tp
, flag
);
1547 /* Otherwise enter Recovery state */
1550 NET_INC_STATS_BH(TCPRenoRecovery
);
1552 NET_INC_STATS_BH(TCPSackRecovery
);
1554 tp
->high_seq
= tp
->snd_nxt
;
1555 tp
->prior_ssthresh
= 0;
1556 tp
->undo_marker
= tp
->snd_una
;
1557 tp
->undo_retrans
= tp
->retrans_out
;
1559 if (tp
->ca_state
< TCP_CA_CWR
) {
1560 if (!(flag
&FLAG_ECE
))
1561 tp
->prior_ssthresh
= tcp_current_ssthresh(tp
);
1562 tp
->snd_ssthresh
= tcp_recalc_ssthresh(tp
);
1563 TCP_ECN_queue_cwr(tp
);
1566 tp
->snd_cwnd_cnt
= 0;
1567 tp
->ca_state
= TCP_CA_Recovery
;
1571 tcp_update_scoreboard(sk
, tp
);
1573 tcp_xmit_retransmit_queue(sk
);
1576 /* Read draft-ietf-tcplw-high-performance before mucking
1577 * with this code. (Superceeds RFC1323)
1579 static void tcp_ack_saw_tstamp(struct tcp_opt
*tp
)
1583 /* RTTM Rule: A TSecr value received in a segment is used to
1584 * update the averaged RTT measurement only if the segment
1585 * acknowledges some new data, i.e., only if it advances the
1586 * left edge of the send window.
1588 * See draft-ietf-tcplw-high-performance-00, section 3.3.
1589 * 1998/04/10 Andrey V. Savochkin <saw@msu.ru>
1591 seq_rtt
= tcp_time_stamp
- tp
->rcv_tsecr
;
1592 tcp_rtt_estimator(tp
, seq_rtt
);
1594 tp
->rto
<<= tp
->backoff
;
1598 static void tcp_ack_no_tstamp(struct tcp_opt
*tp
, u32 seq_rtt
, int flag
)
1600 /* We don't have a timestamp. Can only use
1601 * packets that are not retransmitted to determine
1602 * rtt estimates. Also, we must not reset the
1603 * backoff for rto until we get a non-retransmitted
1604 * packet. This allows us to deal with a situation
1605 * where the network delay has increased suddenly.
1606 * I.e. Karn's algorithm. (SIGCOMM '87, p5.)
1609 if (!tp
->retransmits
&& !(flag
& FLAG_RETRANS_DATA_ACKED
)) {
1611 tcp_rtt_estimator(tp
, seq_rtt
);
1617 static __inline__
void
1618 tcp_ack_update_rtt(struct tcp_opt
*tp
, int flag
, u32 seq_rtt
)
1621 tcp_ack_saw_tstamp(tp
);
1623 tcp_ack_no_tstamp(tp
, seq_rtt
, flag
);
1626 /* This is Jacobson's slow start and congestion avoidance.
1627 * SIGCOMM '88, p. 328.
1629 static __inline__
void tcp_cong_avoid(struct tcp_opt
*tp
)
1631 if (tp
->snd_cwnd
<= tp
->snd_ssthresh
) {
1632 /* In "safe" area, increase. */
1633 if (tp
->snd_cwnd
< tp
->snd_cwnd_clamp
)
1636 /* In dangerous area, increase slowly.
1637 * In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd
1639 if (tp
->snd_cwnd_cnt
>= tp
->snd_cwnd
) {
1640 if (tp
->snd_cwnd
< tp
->snd_cwnd_clamp
)
1648 static __inline__
void tcp_ack_packets_out(struct sock
*sk
, struct tcp_opt
*tp
)
1650 if (tp
->packets_out
==0) {
1651 tcp_clear_xmit_timer(sk
, TCP_TIME_RETRANS
);
1653 struct sk_buff
*skb
= skb_peek(&sk
->write_queue
);
1654 __u32 when
= tp
->rto
- (tcp_time_stamp
- TCP_SKB_CB(skb
)->when
);
1656 if ((__s32
)when
<= 0)
1658 tcp_reset_xmit_timer(sk
, TCP_TIME_RETRANS
, when
);
1662 /* Remove acknowledged frames from the retransmission queue. */
1663 static int tcp_clean_rtx_queue(struct sock
*sk
)
1665 struct tcp_opt
*tp
= &(sk
->tp_pinfo
.af_tcp
);
1666 struct sk_buff
*skb
;
1667 __u32 now
= tcp_time_stamp
;
1669 __u32 seq_rtt
= 0; /* F..g gcc... */
1671 while((skb
=skb_peek(&sk
->write_queue
)) && (skb
!= tp
->send_head
)) {
1672 struct tcp_skb_cb
*scb
= TCP_SKB_CB(skb
);
1673 __u8 sacked
= scb
->sacked
;
1675 /* If our packet is before the ack sequence we can
1676 * discard it as it's confirmed to have arrived at
1679 if (after(scb
->end_seq
, tp
->snd_una
))
1682 /* Initial outgoing SYN's get put onto the write_queue
1683 * just like anything else we transmit. It is not
1684 * true data, and if we misinform our callers that
1685 * this ACK acks real data, we will erroneously exit
1686 * connection startup slow start one packet too
1687 * quickly. This is severely frowned upon behavior.
1689 if(!(scb
->flags
& TCPCB_FLAG_SYN
)) {
1690 acked
|= FLAG_DATA_ACKED
;
1692 acked
|= FLAG_SYN_ACKED
;
1696 if(sacked
& TCPCB_RETRANS
) {
1697 if(sacked
& TCPCB_SACKED_RETRANS
)
1699 acked
|= FLAG_RETRANS_DATA_ACKED
;
1701 if(sacked
& TCPCB_SACKED_ACKED
)
1703 if(sacked
& TCPCB_LOST
)
1709 seq_rtt
= now
- scb
->when
;
1710 __skb_unlink(skb
, skb
->list
);
1711 tcp_free_skb(sk
, skb
);
1714 if (acked
&FLAG_ACKED
) {
1715 tcp_ack_update_rtt(tp
, acked
, seq_rtt
);
1716 tcp_ack_packets_out(sk
, tp
);
1719 #if FASTRETRANS_DEBUG > 0
1720 BUG_TRAP((int)tp
->sacked_out
>= 0);
1721 BUG_TRAP((int)tp
->lost_out
>= 0);
1722 BUG_TRAP((int)tp
->retrans_out
>= 0);
1723 if (tp
->packets_out
==0 && tp
->sack_ok
) {
1725 printk(KERN_DEBUG
"Leak l=%u %d\n", tp
->lost_out
, tp
->ca_state
);
1728 if (tp
->sacked_out
) {
1729 printk(KERN_DEBUG
"Leak s=%u %d\n", tp
->sacked_out
, tp
->ca_state
);
1732 if (tp
->retrans_out
) {
1733 printk(KERN_DEBUG
"Leak r=%u %d\n", tp
->retrans_out
, tp
->ca_state
);
1734 tp
->retrans_out
= 0;
1741 static void tcp_ack_probe(struct sock
*sk
)
1743 struct tcp_opt
*tp
= &(sk
->tp_pinfo
.af_tcp
);
1745 /* Was it a usable window open? */
1747 if (!after(TCP_SKB_CB(tp
->send_head
)->end_seq
, tp
->snd_una
+ tp
->snd_wnd
)) {
1749 tcp_clear_xmit_timer(sk
, TCP_TIME_PROBE0
);
1750 /* Socket must be waked up by subsequent tcp_data_snd_check().
1751 * This function is not for random using!
1754 tcp_reset_xmit_timer(sk
, TCP_TIME_PROBE0
,
1755 min(tp
->rto
<< tp
->backoff
, TCP_RTO_MAX
));
1759 static __inline__
int tcp_ack_is_dubious(struct tcp_opt
*tp
, int flag
)
1761 return (!(flag
& FLAG_NOT_DUP
) || (flag
& FLAG_CA_ALERT
) ||
1762 tp
->ca_state
!= TCP_CA_Open
);
1765 static __inline__
int tcp_may_raise_cwnd(struct tcp_opt
*tp
, int flag
)
1767 return (!(flag
& FLAG_ECE
) || tp
->snd_cwnd
< tp
->snd_ssthresh
) &&
1768 !((1<<tp
->ca_state
)&(TCPF_CA_Recovery
|TCPF_CA_CWR
));
1771 /* Check that window update is acceptable.
1772 * The function assumes that snd_una<=ack<=snd_next.
1774 static __inline__
int
1775 tcp_may_update_window(struct tcp_opt
*tp
, u32 ack
, u32 ack_seq
, u32 nwin
)
1777 return (after(ack
, tp
->snd_una
) ||
1778 after(ack_seq
, tp
->snd_wl1
) ||
1779 (ack_seq
== tp
->snd_wl1
&& nwin
> tp
->snd_wnd
));
1782 /* Update our send window.
1784 * Window update algorithm, described in RFC793/RFC1122 (used in linux-2.2
1785 * and in FreeBSD. NetBSD's one is even worse.) is wrong.
1787 static int tcp_ack_update_window(struct sock
*sk
, struct tcp_opt
*tp
,
1788 struct sk_buff
*skb
, u32 ack
, u32 ack_seq
)
1791 u32 nwin
= ntohs(skb
->h
.th
->window
) << tp
->snd_wscale
;
1793 if (tcp_may_update_window(tp
, ack
, ack_seq
, nwin
)) {
1794 flag
|= FLAG_WIN_UPDATE
;
1795 tcp_update_wl(tp
, ack
, ack_seq
);
1797 if (tp
->snd_wnd
!= nwin
) {
1800 /* Note, it is the only place, where
1801 * fast path is recovered for sending TCP.
1803 if (skb_queue_len(&tp
->out_of_order_queue
) == 0 &&
1804 #ifdef TCP_FORMAL_WINDOW
1805 tcp_receive_window(tp
) &&
1808 tcp_fast_path_on(tp
);
1810 if (nwin
> tp
->max_window
) {
1811 tp
->max_window
= nwin
;
1812 tcp_sync_mss(sk
, tp
->pmtu_cookie
);
1820 if (before(tp
->snd_una
+ tp
->snd_wnd
, tp
->snd_nxt
)) {
1821 if (net_ratelimit())
1822 printk(KERN_DEBUG
"TCP: peer shrinks window. Bad, what else can I say?\n");
1829 /* This routine deals with incoming acks, but not outgoing ones. */
1830 static int tcp_ack(struct sock
*sk
, struct sk_buff
*skb
, int flag
)
1832 struct tcp_opt
*tp
= &(sk
->tp_pinfo
.af_tcp
);
1833 u32 prior_snd_una
= tp
->snd_una
;
1834 u32 ack_seq
= TCP_SKB_CB(skb
)->seq
;
1835 u32 ack
= TCP_SKB_CB(skb
)->ack_seq
;
1836 u32 prior_in_flight
;
1839 /* If the ack is newer than sent or older than previous acks
1840 * then we can probably ignore it.
1842 if (after(ack
, tp
->snd_nxt
))
1843 goto uninteresting_ack
;
1845 if (before(ack
, prior_snd_una
))
1848 if (!(flag
&FLAG_SLOWPATH
) && after(ack
, prior_snd_una
)) {
1849 /* Window is constant, pure forward advance.
1850 * No more checks are required.
1851 * Note, we use the fact that SND.UNA>=SND.WL2.
1853 tcp_update_wl(tp
, ack
, ack_seq
);
1855 flag
|= FLAG_WIN_UPDATE
;
1857 NET_INC_STATS_BH(TCPHPAcks
);
1859 if (ack_seq
!= TCP_SKB_CB(skb
)->end_seq
)
1862 NET_INC_STATS_BH(TCPPureAcks
);
1864 flag
|= tcp_ack_update_window(sk
, tp
, skb
, ack
, ack_seq
);
1866 if (TCP_SKB_CB(skb
)->sacked
)
1867 flag
|= tcp_sacktag_write_queue(sk
, skb
, prior_snd_una
);
1869 if (TCP_ECN_rcv_ecn_echo(tp
, skb
->h
.th
))
1873 /* We passed data and got it acked, remove any soft error
1874 * log. Something worked...
1877 tp
->rcv_tstamp
= tcp_time_stamp
;
1878 if ((prior_packets
= tp
->packets_out
) == 0)
1881 prior_in_flight
= tcp_packets_in_flight(tp
);
1883 /* See if we can take anything off of the retransmit queue. */
1884 flag
|= tcp_clean_rtx_queue(sk
);
1886 if (tcp_ack_is_dubious(tp
, flag
)) {
1887 /* Advanve CWND, if state allows this. */
1888 if ((flag
&FLAG_DATA_ACKED
) && prior_in_flight
>= tp
->snd_cwnd
&&
1889 tcp_may_raise_cwnd(tp
, flag
))
1891 tcp_fastretrans_alert(sk
, prior_snd_una
, prior_packets
, flag
);
1893 if ((flag
&FLAG_DATA_ACKED
) && prior_in_flight
>= tp
->snd_cwnd
)
1897 if ((flag
& FLAG_FORWARD_PROGRESS
) || !(flag
&FLAG_NOT_DUP
))
1898 dst_confirm(sk
->dst_cache
);
1905 /* If this ack opens up a zero window, clear backoff. It was
1906 * being used to time the probes, and is probably far higher than
1907 * it needs to be for normal retransmission.
1914 if (TCP_SKB_CB(skb
)->sacked
)
1915 tcp_sacktag_write_queue(sk
, skb
, prior_snd_una
);
1918 SOCK_DEBUG(sk
, "Ack %u out of %u:%u\n", ack
, tp
->snd_una
, tp
->snd_nxt
);
1923 /* Look for tcp options. Normally only called on SYN and SYNACK packets.
1924 * But, this can also be called on packets in the established flow when
1925 * the fast version below fails.
1927 void tcp_parse_options(struct sk_buff
*skb
, struct tcp_opt
*tp
)
1930 struct tcphdr
*th
= skb
->h
.th
;
1931 int length
=(th
->doff
*4)-sizeof(struct tcphdr
);
1933 ptr
= (unsigned char *)(th
+ 1);
1943 case TCPOPT_NOP
: /* Ref: RFC 793 section 3.1 */
1948 if (opsize
< 2) /* "silly options" */
1950 if (opsize
> length
)
1951 break; /* don't parse partial options */
1954 if(opsize
==TCPOLEN_MSS
&& th
->syn
) {
1955 u16 in_mss
= ntohs(*(__u16
*)ptr
);
1957 if (tp
->user_mss
&& tp
->user_mss
< in_mss
)
1958 in_mss
= tp
->user_mss
;
1959 tp
->mss_clamp
= in_mss
;
1964 if(opsize
==TCPOLEN_WINDOW
&& th
->syn
)
1965 if (sysctl_tcp_window_scaling
) {
1967 tp
->snd_wscale
= *(__u8
*)ptr
;
1968 if(tp
->snd_wscale
> 14) {
1970 printk("tcp_parse_options: Illegal window "
1971 "scaling value %d >14 received.",
1973 tp
->snd_wscale
= 14;
1977 case TCPOPT_TIMESTAMP
:
1978 if(opsize
==TCPOLEN_TIMESTAMP
) {
1979 if (sysctl_tcp_timestamps
) {
1982 tp
->rcv_tsval
= ntohl(*(__u32
*)ptr
);
1983 tp
->rcv_tsecr
= ntohl(*(__u32
*)(ptr
+4));
1987 case TCPOPT_SACK_PERM
:
1988 if(opsize
==TCPOLEN_SACK_PERM
&& th
->syn
) {
1989 if (sysctl_tcp_sack
) {
1997 if((opsize
>= (TCPOLEN_SACK_BASE
+ TCPOLEN_SACK_PERBLOCK
)) &&
1998 !((opsize
- TCPOLEN_SACK_BASE
) % TCPOLEN_SACK_PERBLOCK
) &&
2000 TCP_SKB_CB(skb
)->sacked
= (ptr
- 2) - (unsigned char *)th
;
2009 /* Fast parse options. This hopes to only see timestamps.
2010 * If it is wrong it falls back on tcp_parse_options().
2012 static __inline__
int tcp_fast_parse_options(struct sk_buff
*skb
, struct tcphdr
*th
, struct tcp_opt
*tp
)
2014 if (th
->doff
== sizeof(struct tcphdr
)>>2) {
2017 } else if (th
->doff
== (sizeof(struct tcphdr
)>>2)+(TCPOLEN_TSTAMP_ALIGNED
>>2)) {
2018 __u32
*ptr
= (__u32
*)(th
+ 1);
2019 if (*ptr
== __constant_ntohl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16)
2020 | (TCPOPT_TIMESTAMP
<< 8) | TCPOLEN_TIMESTAMP
)) {
2023 tp
->rcv_tsval
= ntohl(*ptr
);
2025 tp
->rcv_tsecr
= ntohl(*ptr
);
2029 tcp_parse_options(skb
, tp
);
2033 extern __inline__
void
2034 tcp_store_ts_recent(struct tcp_opt
*tp
)
2036 tp
->ts_recent
= tp
->rcv_tsval
;
2037 tp
->ts_recent_stamp
= xtime
.tv_sec
;
2040 extern __inline__
void
2041 tcp_replace_ts_recent(struct tcp_opt
*tp
, u32 seq
)
2043 if (tp
->saw_tstamp
&& !after(seq
, tp
->rcv_wup
)) {
2044 /* PAWS bug workaround wrt. ACK frames, the PAWS discard
2045 * extra check below makes sure this can only happen
2046 * for pure ACK frames. -DaveM
2048 * Not only, also it occurs for expired timestamps.
2051 if((s32
)(tp
->rcv_tsval
- tp
->ts_recent
) >= 0 ||
2052 xtime
.tv_sec
>= tp
->ts_recent_stamp
+ TCP_PAWS_24DAYS
)
2053 tcp_store_ts_recent(tp
);
2057 /* Sorry, PAWS as specified is broken wrt. pure-ACKs -DaveM
2059 * It is not fatal. If this ACK does _not_ change critical state (seqs, window)
2060 * it can pass through stack. So, the following predicate verifies that
2061 * this segment is not used for anything but congestion avoidance or
2062 * fast retransmit. Moreover, we even are able to eliminate most of such
2063 * second order effects, if we apply some small "replay" window (~RTO)
2064 * to timestamp space.
2066 * All these measures still do not guarantee that we reject wrapped ACKs
2067 * on networks with high bandwidth, when sequence space is recycled fastly,
2068 * but it guarantees that such events will be very rare and do not affect
2069 * connection seriously. This doesn't look nice, but alas, PAWS is really
2072 * [ Later note. Even worse! It is buggy for segments _with_ data. RFC
2073 * states that events when retransmit arrives after original data are rare.
2074 * It is a blatant lie. VJ forgot about fast retransmit! 8)8) It is
2075 * the biggest problem on large power networks even with minor reordering.
2076 * OK, let's give it small replay window. If peer clock is even 1hz, it is safe
2077 * up to bandwidth of 18Gigabit/sec. 8) ]
2080 static int tcp_disordered_ack(struct tcp_opt
*tp
, struct sk_buff
*skb
)
2082 struct tcphdr
*th
= skb
->h
.th
;
2083 u32 seq
= TCP_SKB_CB(skb
)->seq
;
2084 u32 ack
= TCP_SKB_CB(skb
)->ack_seq
;
2086 return (/* 1. Pure ACK with correct sequence number. */
2087 (th
->ack
&& seq
== TCP_SKB_CB(skb
)->end_seq
&& seq
== tp
->rcv_nxt
) &&
2089 /* 2. ... and duplicate ACK. */
2090 ack
== tp
->snd_una
&&
2092 /* 3. ... and does not update window. */
2093 !tcp_may_update_window(tp
, ack
, seq
, ntohs(th
->window
)<<tp
->snd_wscale
) &&
2095 /* 4. ... and sits in replay window. */
2096 (s32
)(tp
->ts_recent
- tp
->rcv_tsval
) <= (tp
->rto
*1024)/HZ
);
2099 extern __inline__
int tcp_paws_discard(struct tcp_opt
*tp
, struct sk_buff
*skb
)
2101 return ((s32
)(tp
->ts_recent
- tp
->rcv_tsval
) > TCP_PAWS_WINDOW
&&
2102 xtime
.tv_sec
< tp
->ts_recent_stamp
+ TCP_PAWS_24DAYS
&&
2103 !tcp_disordered_ack(tp
, skb
));
2106 static int __tcp_sequence(struct tcp_opt
*tp
, u32 seq
, u32 end_seq
)
2108 u32 end_window
= tp
->rcv_wup
+ tp
->rcv_wnd
;
2109 #ifdef TCP_FORMAL_WINDOW
2110 u32 rcv_wnd
= tcp_receive_window(tp
);
2112 u32 rcv_wnd
= tp
->rcv_wnd
;
2116 after(end_seq
, tp
->rcv_nxt
) &&
2117 before(seq
, end_window
))
2119 if (seq
!= end_window
)
2121 return (seq
== end_seq
);
2124 /* This functions checks to see if the tcp header is actually acceptable.
2126 * Actually, our check is seriously broken, we must accept RST,ACK,URG
2127 * even on zero window effectively trimming data. It is RFC, guys.
2128 * But our check is so beautiful, that I do not want to repair it
2129 * now. However, taking into account those stupid plans to start to
2130 * send some texts with RST, we have to handle at least this case. --ANK
2132 extern __inline__
int tcp_sequence(struct tcp_opt
*tp
, u32 seq
, u32 end_seq
, int rst
)
2134 #ifdef TCP_FORMAL_WINDOW
2135 u32 rcv_wnd
= tcp_receive_window(tp
);
2137 u32 rcv_wnd
= tp
->rcv_wnd
;
2139 if (seq
== tp
->rcv_nxt
)
2140 return (rcv_wnd
|| (end_seq
== seq
) || rst
);
2142 return __tcp_sequence(tp
, seq
, end_seq
);
2145 /* When we get a reset we do this. */
2146 static void tcp_reset(struct sock
*sk
)
2148 /* We want the right error as BSD sees it (and indeed as we do). */
2149 switch (sk
->state
) {
2151 sk
->err
= ECONNREFUSED
;
2153 case TCP_CLOSE_WAIT
:
2159 sk
->err
= ECONNRESET
;
2163 sk
->error_report(sk
);
2169 * Process the FIN bit. This now behaves as it is supposed to work
2170 * and the FIN takes effect when it is validly part of sequence
2171 * space. Not before when we get holes.
2173 * If we are ESTABLISHED, a received fin moves us to CLOSE-WAIT
2174 * (and thence onto LAST-ACK and finally, CLOSE, we never enter
2177 * If we are in FINWAIT-1, a received FIN indicates simultaneous
2178 * close and we go into CLOSING (and later onto TIME-WAIT)
2180 * If we are in FINWAIT-2, a received FIN moves us to TIME-WAIT.
2182 static void tcp_fin(struct sk_buff
*skb
, struct sock
*sk
, struct tcphdr
*th
)
2184 struct tcp_opt
*tp
= &(sk
->tp_pinfo
.af_tcp
);
2186 tp
->fin_seq
= TCP_SKB_CB(skb
)->end_seq
;
2187 tcp_schedule_ack(tp
);
2189 sk
->shutdown
|= RCV_SHUTDOWN
;
2194 case TCP_ESTABLISHED
:
2195 /* Move to CLOSE_WAIT */
2196 tcp_set_state(sk
, TCP_CLOSE_WAIT
);
2197 tp
->ack
.pingpong
= 1;
2200 case TCP_CLOSE_WAIT
:
2202 /* Received a retransmission of the FIN, do
2207 /* RFC793: Remain in the LAST-ACK state. */
2211 /* This case occurs when a simultaneous close
2212 * happens, we must ack the received FIN and
2213 * enter the CLOSING state.
2216 tcp_set_state(sk
, TCP_CLOSING
);
2219 /* Received a FIN -- send ACK and enter TIME_WAIT. */
2221 tcp_time_wait(sk
, TCP_TIME_WAIT
, 0);
2224 /* Only TCP_LISTEN and TCP_CLOSE are left, in these
2225 * cases we should never reach this piece of code.
2227 printk("tcp_fin: Impossible, sk->state=%d\n", sk
->state
);
2231 /* It _is_ possible, that we have something out-of-order _after_ FIN.
2232 * Probably, we should reset in this case. For now drop them.
2234 __skb_queue_purge(&tp
->out_of_order_queue
);
2237 tcp_mem_reclaim(sk
);
2240 sk
->state_change(sk
);
2242 /* Do not send POLL_HUP for half duplex close. */
2243 if (sk
->shutdown
== SHUTDOWN_MASK
|| sk
->state
== TCP_CLOSE
)
2244 sk_wake_async(sk
, 1, POLL_HUP
);
2246 sk_wake_async(sk
, 1, POLL_IN
);
2250 static __inline__
int
2251 tcp_sack_extend(struct tcp_sack_block
*sp
, u32 seq
, u32 end_seq
)
2253 if (!after(seq
, sp
->end_seq
) && !after(sp
->start_seq
, end_seq
)) {
2254 if (before(seq
, sp
->start_seq
))
2255 sp
->start_seq
= seq
;
2256 if (after(end_seq
, sp
->end_seq
))
2257 sp
->end_seq
= end_seq
;
2263 static __inline__
void tcp_dsack_set(struct tcp_opt
*tp
, u32 seq
, u32 end_seq
)
2265 if (tp
->sack_ok
&& sysctl_tcp_dsack
) {
2266 if (before(seq
, tp
->rcv_nxt
))
2267 NET_INC_STATS_BH(TCPDSACKOldSent
);
2269 NET_INC_STATS_BH(TCPDSACKOfoSent
);
2272 tp
->duplicate_sack
[0].start_seq
= seq
;
2273 tp
->duplicate_sack
[0].end_seq
= end_seq
;
2274 tp
->eff_sacks
= min(tp
->num_sacks
+1, 4-tp
->tstamp_ok
);
2278 static __inline__
void tcp_dsack_extend(struct tcp_opt
*tp
, u32 seq
, u32 end_seq
)
2281 tcp_dsack_set(tp
, seq
, end_seq
);
2283 tcp_sack_extend(tp
->duplicate_sack
, seq
, end_seq
);
2286 static void tcp_send_dupack(struct sock
*sk
, struct sk_buff
*skb
)
2288 struct tcp_opt
*tp
= &(sk
->tp_pinfo
.af_tcp
);
2290 if (TCP_SKB_CB(skb
)->end_seq
!= TCP_SKB_CB(skb
)->seq
&&
2291 before(TCP_SKB_CB(skb
)->seq
, tp
->rcv_nxt
)) {
2292 NET_INC_STATS_BH(DelayedACKLost
);
2293 tcp_enter_quickack_mode(tp
);
2295 if (tp
->sack_ok
&& sysctl_tcp_dsack
) {
2296 u32 end_seq
= TCP_SKB_CB(skb
)->end_seq
;
2298 if (after(TCP_SKB_CB(skb
)->end_seq
, tp
->rcv_nxt
))
2299 end_seq
= tp
->rcv_nxt
;
2300 tcp_dsack_set(tp
, TCP_SKB_CB(skb
)->seq
, end_seq
);
2307 /* These routines update the SACK block as out-of-order packets arrive or
2308 * in-order packets close up the sequence space.
2310 static void tcp_sack_maybe_coalesce(struct tcp_opt
*tp
)
2313 struct tcp_sack_block
*sp
= &tp
->selective_acks
[0];
2314 struct tcp_sack_block
*swalk
= sp
+1;
2316 /* See if the recent change to the first SACK eats into
2317 * or hits the sequence space of other SACK blocks, if so coalesce.
2319 for (this_sack
= 1; this_sack
< tp
->num_sacks
; ) {
2320 if (tcp_sack_extend(sp
, swalk
->start_seq
, swalk
->end_seq
)) {
2323 /* Zap SWALK, by moving every further SACK up by one slot.
2324 * Decrease num_sacks.
2327 tp
->eff_sacks
= min(tp
->num_sacks
+tp
->dsack
, 4-tp
->tstamp_ok
);
2328 for(i
=this_sack
; i
< tp
->num_sacks
; i
++)
2332 this_sack
++, swalk
++;
2336 static __inline__
void tcp_sack_swap(struct tcp_sack_block
*sack1
, struct tcp_sack_block
*sack2
)
2340 tmp
= sack1
->start_seq
;
2341 sack1
->start_seq
= sack2
->start_seq
;
2342 sack2
->start_seq
= tmp
;
2344 tmp
= sack1
->end_seq
;
2345 sack1
->end_seq
= sack2
->end_seq
;
2346 sack2
->end_seq
= tmp
;
2349 static void tcp_sack_new_ofo_skb(struct sock
*sk
, u32 seq
, u32 end_seq
)
2351 struct tcp_opt
*tp
= &(sk
->tp_pinfo
.af_tcp
);
2352 struct tcp_sack_block
*sp
= &tp
->selective_acks
[0];
2353 int cur_sacks
= tp
->num_sacks
;
2359 for (this_sack
=0; this_sack
<cur_sacks
; this_sack
++, sp
++) {
2360 if (tcp_sack_extend(sp
, seq
, end_seq
)) {
2361 /* Rotate this_sack to the first one. */
2362 for (; this_sack
>0; this_sack
--, sp
--)
2363 tcp_sack_swap(sp
, sp
-1);
2365 tcp_sack_maybe_coalesce(tp
);
2370 /* Could not find an adjacent existing SACK, build a new one,
2371 * put it at the front, and shift everyone else down. We
2372 * always know there is at least one SACK present already here.
2374 * If the sack array is full, forget about the last one.
2376 if (this_sack
>= 4) {
2381 for(; this_sack
> 0; this_sack
--, sp
--)
2385 /* Build the new head SACK, and we're done. */
2386 sp
->start_seq
= seq
;
2387 sp
->end_seq
= end_seq
;
2389 tp
->eff_sacks
= min(tp
->num_sacks
+tp
->dsack
, 4-tp
->tstamp_ok
);
2392 /* RCV.NXT advances, some SACKs should be eaten. */
2394 static void tcp_sack_remove(struct tcp_opt
*tp
)
2396 struct tcp_sack_block
*sp
= &tp
->selective_acks
[0];
2397 int num_sacks
= tp
->num_sacks
;
2400 /* Empty ofo queue, hence, all the SACKs are eaten. Clear. */
2401 if (skb_queue_len(&tp
->out_of_order_queue
) == 0) {
2403 tp
->eff_sacks
= tp
->dsack
;
2407 for(this_sack
= 0; this_sack
< num_sacks
; ) {
2408 /* Check if the start of the sack is covered by RCV.NXT. */
2409 if (!before(tp
->rcv_nxt
, sp
->start_seq
)) {
2412 /* RCV.NXT must cover all the block! */
2413 BUG_TRAP(!before(tp
->rcv_nxt
, sp
->end_seq
));
2415 /* Zap this SACK, by moving forward any other SACKS. */
2416 for (i
=this_sack
+1; i
< num_sacks
; i
++)
2424 if (num_sacks
!= tp
->num_sacks
) {
2425 tp
->num_sacks
= num_sacks
;
2426 tp
->eff_sacks
= min(tp
->num_sacks
+tp
->dsack
, 4-tp
->tstamp_ok
);
2430 /* This one checks to see if we can put data from the
2431 * out_of_order queue into the receive_queue.
2433 static void tcp_ofo_queue(struct sock
*sk
)
2435 struct tcp_opt
*tp
= &(sk
->tp_pinfo
.af_tcp
);
2436 __u32 dsack_high
= tp
->rcv_nxt
;
2437 struct sk_buff
*skb
;
2439 while ((skb
= skb_peek(&tp
->out_of_order_queue
)) != NULL
) {
2440 if (after(TCP_SKB_CB(skb
)->seq
, tp
->rcv_nxt
))
2443 if (before(TCP_SKB_CB(skb
)->seq
, dsack_high
)) {
2444 __u32 dsack
= dsack_high
;
2445 if (before(TCP_SKB_CB(skb
)->end_seq
, dsack_high
))
2446 dsack_high
= TCP_SKB_CB(skb
)->end_seq
;
2447 tcp_dsack_extend(tp
, TCP_SKB_CB(skb
)->seq
, dsack
);
2450 if (!after(TCP_SKB_CB(skb
)->end_seq
, tp
->rcv_nxt
)) {
2451 SOCK_DEBUG(sk
, "ofo packet was already received \n");
2452 __skb_unlink(skb
, skb
->list
);
2456 SOCK_DEBUG(sk
, "ofo requeuing : rcv_next %X seq %X - %X\n",
2457 tp
->rcv_nxt
, TCP_SKB_CB(skb
)->seq
,
2458 TCP_SKB_CB(skb
)->end_seq
);
2460 __skb_unlink(skb
, skb
->list
);
2461 __skb_queue_tail(&sk
->receive_queue
, skb
);
2462 tp
->rcv_nxt
= TCP_SKB_CB(skb
)->end_seq
;
2464 tcp_fin(skb
, sk
, skb
->h
.th
);
2468 static void tcp_data_queue(struct sock
*sk
, struct sk_buff
*skb
)
2470 struct tcp_opt
*tp
= &(sk
->tp_pinfo
.af_tcp
);
2475 tp
->eff_sacks
= min(tp
->num_sacks
, 4-tp
->tstamp_ok
);
2478 /* Queue data for delivery to the user.
2479 * Packets in sequence go to the receive queue.
2480 * Out of sequence packets to the out_of_order_queue.
2482 if (TCP_SKB_CB(skb
)->seq
== tp
->rcv_nxt
) {
2483 /* Ok. In sequence. */
2484 if (tp
->ucopy
.task
== current
&&
2485 tp
->copied_seq
== tp
->rcv_nxt
&&
2489 int chunk
= min(skb
->len
, tp
->ucopy
.len
);
2491 __set_current_state(TASK_RUNNING
);
2494 if (memcpy_toiovec(tp
->ucopy
.iov
, skb
->data
, chunk
)) {
2496 sk
->error_report(sk
);
2499 tp
->ucopy
.len
-= chunk
;
2500 tp
->copied_seq
+= chunk
;
2501 eaten
= (chunk
== skb
->len
&& !skb
->h
.th
->fin
);
2506 tcp_set_owner_r(skb
, sk
);
2507 __skb_queue_tail(&sk
->receive_queue
, skb
);
2509 tp
->rcv_nxt
= TCP_SKB_CB(skb
)->end_seq
;
2511 tcp_event_data_recv(sk
, tp
, skb
);
2513 tcp_fin(skb
, sk
, skb
->h
.th
);
2515 if (skb_queue_len(&tp
->out_of_order_queue
)) {
2518 /* RFC2581. 4.2. SHOULD send immediate ACK, when
2519 * gap in queue is filled.
2521 if (skb_queue_len(&tp
->out_of_order_queue
) == 0)
2522 tp
->ack
.pingpong
= 0;
2526 tcp_sack_remove(tp
);
2528 /* Turn on fast path. */
2529 if (skb_queue_len(&tp
->out_of_order_queue
) == 0 &&
2530 #ifdef TCP_FORMAL_WINDOW
2531 tcp_receive_window(tp
) &&
2534 tcp_fast_path_on(tp
);
2538 } else if (!sk
->dead
)
2539 sk
->data_ready(sk
, 0);
2544 /* An old packet, either a retransmit or some packet got lost. */
2545 if (!after(TCP_SKB_CB(skb
)->end_seq
, tp
->rcv_nxt
)) {
2546 /* A retransmit, 2nd most common case. Force an imediate ack.
2548 * It is impossible, seq is checked by top level.
2550 printk("BUG: retransmit in tcp_data_queue: seq %X\n", TCP_SKB_CB(skb
)->seq
);
2551 tcp_enter_quickack_mode(tp
);
2552 tcp_schedule_ack(tp
);
2558 tcp_enter_quickack_mode(tp
);
2560 if (before(TCP_SKB_CB(skb
)->seq
, tp
->rcv_nxt
)) {
2561 /* Partial packet, seq < rcv_next < end_seq */
2562 SOCK_DEBUG(sk
, "partial packet: rcv_next %X seq %X - %X\n",
2563 tp
->rcv_nxt
, TCP_SKB_CB(skb
)->seq
,
2564 TCP_SKB_CB(skb
)->end_seq
);
2566 tcp_dsack_set(tp
, TCP_SKB_CB(skb
)->seq
, tp
->rcv_nxt
);
2570 TCP_ECN_check_ce(tp
, skb
);
2572 /* Disable header prediction. */
2574 tcp_schedule_ack(tp
);
2576 SOCK_DEBUG(sk
, "out of order segment: rcv_next %X seq %X - %X\n",
2577 tp
->rcv_nxt
, TCP_SKB_CB(skb
)->seq
, TCP_SKB_CB(skb
)->end_seq
);
2579 tcp_set_owner_r(skb
, sk
);
2581 if (skb_peek(&tp
->out_of_order_queue
) == NULL
) {
2582 /* Initial out of order segment, build 1 SACK. */
2587 tp
->selective_acks
[0].start_seq
= TCP_SKB_CB(skb
)->seq
;
2588 tp
->selective_acks
[0].end_seq
= TCP_SKB_CB(skb
)->end_seq
;
2590 __skb_queue_head(&tp
->out_of_order_queue
,skb
);
2592 struct sk_buff
*skb1
=tp
->out_of_order_queue
.prev
;
2593 u32 seq
= TCP_SKB_CB(skb
)->seq
;
2594 u32 end_seq
= TCP_SKB_CB(skb
)->end_seq
;
2596 if (seq
== TCP_SKB_CB(skb1
)->end_seq
) {
2597 __skb_append(skb1
, skb
);
2599 if (tp
->num_sacks
== 0 ||
2600 tp
->selective_acks
[0].end_seq
!= seq
)
2603 /* Common case: data arrive in order after hole. */
2604 tp
->selective_acks
[0].end_seq
= end_seq
;
2608 /* Find place to insert this segment. */
2610 if (!after(TCP_SKB_CB(skb1
)->seq
, seq
))
2612 } while ((skb1
=skb1
->prev
) != (struct sk_buff
*)&tp
->out_of_order_queue
);
2614 /* Do skb overlap to previous one? */
2615 if (skb1
!= (struct sk_buff
*)&tp
->out_of_order_queue
&&
2616 before(seq
, TCP_SKB_CB(skb1
)->end_seq
)) {
2617 if (!after(end_seq
, TCP_SKB_CB(skb1
)->end_seq
)) {
2618 /* All the bits are present. Drop. */
2620 tcp_dsack_set(tp
, seq
, end_seq
);
2623 if (after(seq
, TCP_SKB_CB(skb1
)->seq
)) {
2624 /* Partial overlap. */
2625 tcp_dsack_set(tp
, seq
, TCP_SKB_CB(skb1
)->end_seq
);
2630 __skb_insert(skb
, skb1
, skb1
->next
, &tp
->out_of_order_queue
);
2632 /* And clean segments covered by new one as whole. */
2633 while ((skb1
= skb
->next
) != (struct sk_buff
*)&tp
->out_of_order_queue
&&
2634 after(end_seq
, TCP_SKB_CB(skb1
)->seq
)) {
2635 if (before(end_seq
, TCP_SKB_CB(skb1
)->end_seq
)) {
2636 tcp_dsack_extend(tp
, TCP_SKB_CB(skb1
)->seq
, end_seq
);
2639 __skb_unlink(skb1
, skb1
->list
);
2640 tcp_dsack_extend(tp
, TCP_SKB_CB(skb1
)->seq
, TCP_SKB_CB(skb1
)->end_seq
);
2646 tcp_sack_new_ofo_skb(sk
, seq
, end_seq
);
2651 static void tcp_collapse_queue(struct sock
*sk
, struct sk_buff_head
*q
)
2653 struct sk_buff
*skb
= skb_peek(q
);
2654 struct sk_buff
*skb_next
;
2657 skb
!= (struct sk_buff
*)q
&&
2658 (skb_next
= skb
->next
) != (struct sk_buff
*)q
) {
2659 struct tcp_skb_cb
*scb
= TCP_SKB_CB(skb
);
2660 struct tcp_skb_cb
*scb_next
= TCP_SKB_CB(skb_next
);
2662 if (scb
->end_seq
== scb_next
->seq
&&
2663 skb_tailroom(skb
) >= skb_next
->len
&&
2664 #define TCP_DONT_COLLAPSE (TCP_FLAG_FIN|TCP_FLAG_URG|TCP_FLAG_SYN)
2665 !(tcp_flag_word(skb
->h
.th
)&TCP_DONT_COLLAPSE
) &&
2666 !(tcp_flag_word(skb_next
->h
.th
)&TCP_DONT_COLLAPSE
)) {
2667 /* OK to collapse two skbs to one */
2668 memcpy(skb_put(skb
, skb_next
->len
), skb_next
->data
, skb_next
->len
);
2669 __skb_unlink(skb_next
, skb_next
->list
);
2670 scb
->end_seq
= scb_next
->end_seq
;
2671 __kfree_skb(skb_next
);
2672 NET_INC_STATS_BH(TCPRcvCollapsed
);
2674 /* Lots of spare tailroom, reallocate this skb to trim it. */
2675 if (tcp_win_from_space(skb
->truesize
) > skb
->len
&&
2676 skb_tailroom(skb
) > sizeof(struct sk_buff
) + 16) {
2677 struct sk_buff
*nskb
;
2679 nskb
= skb_copy_expand(skb
, skb_headroom(skb
), 0, GFP_ATOMIC
);
2681 tcp_set_owner_r(nskb
, sk
);
2682 memcpy(nskb
->data
-skb_headroom(skb
),
2683 skb
->data
-skb_headroom(skb
),
2685 __skb_append(skb
, nskb
);
2686 __skb_unlink(skb
, skb
->list
);
2695 /* Clean the out_of_order queue if we can, trying to get
2696 * the socket within its memory limits again.
2698 * Return less than zero if we should start dropping frames
2699 * until the socket owning process reads some of the data
2700 * to stabilize the situation.
2702 static int tcp_prune_queue(struct sock
*sk
)
2704 struct tcp_opt
*tp
= &sk
->tp_pinfo
.af_tcp
;
2706 SOCK_DEBUG(sk
, "prune_queue: c=%x\n", tp
->copied_seq
);
2708 NET_INC_STATS_BH(PruneCalled
);
2710 if (atomic_read(&sk
->rmem_alloc
) >= sk
->rcvbuf
)
2711 tcp_clamp_window(sk
, tp
);
2712 else if (tcp_memory_pressure
)
2713 tp
->rcv_ssthresh
= min(tp
->rcv_ssthresh
, 4*tp
->advmss
);
2715 tcp_collapse_queue(sk
, &sk
->receive_queue
);
2716 tcp_collapse_queue(sk
, &tp
->out_of_order_queue
);
2717 tcp_mem_reclaim(sk
);
2719 if (atomic_read(&sk
->rmem_alloc
) <= sk
->rcvbuf
)
2722 /* Collapsing did not help, destructive actions follow.
2723 * This must not ever occur. */
2725 /* First, purge the out_of_order queue. */
2726 if (skb_queue_len(&tp
->out_of_order_queue
)) {
2727 net_statistics
[smp_processor_id()*2].OfoPruned
+= skb_queue_len(&tp
->out_of_order_queue
);
2728 __skb_queue_purge(&tp
->out_of_order_queue
);
2730 /* Reset SACK state. A conforming SACK implementation will
2731 * do the same at a timeout based retransmit. When a connection
2732 * is in a sad state like this, we care only about integrity
2733 * of the connection not performance.
2737 tcp_mem_reclaim(sk
);
2740 if(atomic_read(&sk
->rmem_alloc
) <= sk
->rcvbuf
)
2743 /* If we are really being abused, tell the caller to silently
2744 * drop receive data on the floor. It will get retransmitted
2745 * and hopefully then we'll have sufficient space.
2747 NET_INC_STATS_BH(RcvPruned
);
2749 /* Massive buffer overcommit. */
2753 static inline int tcp_rmem_schedule(struct sock
*sk
, struct sk_buff
*skb
)
2755 return (int)skb
->truesize
<= sk
->forward_alloc
||
2756 tcp_mem_schedule(sk
, skb
->truesize
, 1);
2760 * This routine handles the data. If there is room in the buffer,
2761 * it will be have already been moved into it. If there is no
2762 * room, then we will just have to discard the packet.
2765 static void tcp_data(struct sk_buff
*skb
, struct sock
*sk
, unsigned int len
)
2768 struct tcp_opt
*tp
= &(sk
->tp_pinfo
.af_tcp
);
2771 skb_pull(skb
, th
->doff
*4);
2772 skb_trim(skb
, len
- (th
->doff
*4));
2774 if (skb
->len
== 0 && !th
->fin
)
2777 TCP_ECN_accept_cwr(tp
, skb
);
2780 * If our receive queue has grown past its limits shrink it.
2781 * Make sure to do this before moving rcv_nxt, otherwise
2782 * data might be acked for that we don't have enough room.
2784 if (atomic_read(&sk
->rmem_alloc
) > sk
->rcvbuf
||
2785 !tcp_rmem_schedule(sk
, skb
)) {
2786 if (tcp_prune_queue(sk
) < 0 || !tcp_rmem_schedule(sk
, skb
))
2790 tcp_data_queue(sk
, skb
);
2793 if (before(tp
->rcv_nxt
, tp
->copied_seq
)) {
2794 printk(KERN_DEBUG
"*** tcp.c:tcp_data bug acked < copied\n");
2795 tp
->rcv_nxt
= tp
->copied_seq
;
2804 /* RFC2861, slow part. Adjust cwnd, after it was not full during one rto.
2805 * As additional protections, we do not touch cwnd in retransmission phases,
2806 * and if application hit its sndbuf limit recently.
2808 void tcp_cwnd_application_limited(struct sock
*sk
)
2810 struct tcp_opt
*tp
= &(sk
->tp_pinfo
.af_tcp
);
2812 if (tp
->ca_state
== TCP_CA_Open
&&
2813 sk
->socket
&& !test_bit(SOCK_NOSPACE
, &sk
->socket
->flags
)) {
2814 /* Limited by application or receiver window. */
2815 u32 win_used
= max(tp
->snd_cwnd_used
, 2);
2816 if (win_used
< tp
->snd_cwnd
) {
2817 tp
->snd_ssthresh
= tcp_current_ssthresh(tp
);
2818 tp
->snd_cwnd
= (tp
->snd_cwnd
+win_used
)>>1;
2820 tp
->snd_cwnd_used
= 0;
2822 tp
->snd_cwnd_stamp
= tcp_time_stamp
;
2826 /* When incoming ACK allowed to free some skb from write_queue,
2827 * we remember this event in flag tp->queue_shrunk and wake up socket
2828 * on the exit from tcp input handler.
2830 static void tcp_new_space(struct sock
*sk
)
2832 struct tcp_opt
*tp
= &(sk
->tp_pinfo
.af_tcp
);
2834 if (tp
->packets_out
< tp
->snd_cwnd
&&
2835 !(sk
->userlocks
&SOCK_SNDBUF_LOCK
) &&
2836 !tcp_memory_pressure
&&
2837 atomic_read(&tcp_memory_allocated
) < sysctl_tcp_mem
[0]) {
2838 int sndmem
, demanded
;
2840 sndmem
= tp
->mss_clamp
+MAX_TCP_HEADER
+16+sizeof(struct sk_buff
);
2841 demanded
= max(tp
->snd_cwnd
, tp
->reordering
+1);
2842 sndmem
*= 2*demanded
;
2843 if (sndmem
> sk
->sndbuf
)
2844 sk
->sndbuf
= min(sndmem
, sysctl_tcp_wmem
[2]);
2845 tp
->snd_cwnd_stamp
= tcp_time_stamp
;
2849 if (tcp_wspace(sk
) >= tcp_min_write_space(sk
)) {
2850 struct socket
*sock
= sk
->socket
;
2852 clear_bit(SOCK_NOSPACE
, &sock
->flags
);
2854 if (sk
->sleep
&& waitqueue_active(sk
->sleep
))
2855 wake_up_interruptible(sk
->sleep
);
2857 if (sock
->fasync_list
&& !(sk
->shutdown
&SEND_SHUTDOWN
))
2858 sock_wake_async(sock
, 2, POLL_OUT
);
2860 /* Satisfy those who hook write_space() callback. */
2861 if (sk
->write_space
!= tcp_write_space
)
2862 sk
->write_space(sk
);
2866 static inline void tcp_check_space(struct sock
*sk
)
2868 struct tcp_opt
*tp
= &(sk
->tp_pinfo
.af_tcp
);
2870 if (tp
->queue_shrunk
) {
2871 tp
->queue_shrunk
= 0;
2872 if (sk
->socket
&& test_bit(SOCK_NOSPACE
, &sk
->socket
->flags
))
2877 static void __tcp_data_snd_check(struct sock
*sk
, struct sk_buff
*skb
)
2879 struct tcp_opt
*tp
= &(sk
->tp_pinfo
.af_tcp
);
2881 if (after(TCP_SKB_CB(skb
)->end_seq
, tp
->snd_una
+ tp
->snd_wnd
) ||
2882 tcp_packets_in_flight(tp
) >= tp
->snd_cwnd
||
2884 tcp_check_probe_timer(sk
, tp
);
2887 static __inline__
void tcp_data_snd_check(struct sock
*sk
)
2889 struct sk_buff
*skb
= sk
->tp_pinfo
.af_tcp
.send_head
;
2892 __tcp_data_snd_check(sk
, skb
);
2893 tcp_check_space(sk
);
2897 * Check if sending an ack is needed.
2899 static __inline__
void __tcp_ack_snd_check(struct sock
*sk
, int ofo_possible
)
2901 struct tcp_opt
*tp
= &(sk
->tp_pinfo
.af_tcp
);
2903 /* More than one full frame received... */
2904 if (((tp
->rcv_nxt
- tp
->rcv_wup
) > tp
->ack
.rcv_mss
2905 /* ... and right edge of window advances far enough.
2906 * (tcp_recvmsg() will send ACK otherwise). Or...
2908 && __tcp_select_window(sk
) >= tp
->rcv_wnd
) ||
2909 /* We ACK each frame or... */
2910 tcp_in_quickack_mode(tp
) ||
2911 /* We have out of order data. */
2913 skb_peek(&tp
->out_of_order_queue
) != NULL
)) {
2914 /* Then ack it now */
2917 /* Else, send delayed ack. */
2918 tcp_send_delayed_ack(sk
);
2922 static __inline__
void tcp_ack_snd_check(struct sock
*sk
)
2924 struct tcp_opt
*tp
= &(sk
->tp_pinfo
.af_tcp
);
2925 if (!tcp_ack_scheduled(tp
)) {
2926 /* We sent a data segment already. */
2929 __tcp_ack_snd_check(sk
, 1);
2933 * This routine is only called when we have urgent data
2934 * signalled. Its the 'slow' part of tcp_urg. It could be
2935 * moved inline now as tcp_urg is only called from one
2936 * place. We handle URGent data wrong. We have to - as
2937 * BSD still doesn't use the correction from RFC961.
2938 * For 1003.1g we should support a new option TCP_STDURG to permit
2939 * either form (or just set the sysctl tcp_stdurg).
2942 static void tcp_check_urg(struct sock
* sk
, struct tcphdr
* th
)
2944 struct tcp_opt
*tp
= &(sk
->tp_pinfo
.af_tcp
);
2945 u32 ptr
= ntohs(th
->urg_ptr
);
2947 if (ptr
&& !sysctl_tcp_stdurg
)
2949 ptr
+= ntohl(th
->seq
);
2951 /* Ignore urgent data that we've already seen and read. */
2952 if (after(tp
->copied_seq
, ptr
))
2955 /* Do we already have a newer (or duplicate) urgent pointer? */
2956 if (tp
->urg_data
&& !after(ptr
, tp
->urg_seq
))
2959 /* Tell the world about our new urgent pointer. */
2960 if (sk
->proc
!= 0) {
2962 kill_proc(sk
->proc
, SIGURG
, 1);
2964 kill_pg(-sk
->proc
, SIGURG
, 1);
2965 sk_wake_async(sk
, 3, POLL_PRI
);
2968 /* We may be adding urgent data when the last byte read was
2969 * urgent. To do this requires some care. We cannot just ignore
2970 * tp->copied_seq since we would read the last urgent byte again
2971 * as data, nor can we alter copied_seq until this data arrives
2972 * or we break the sematics of SIOCATMARK (and thus sockatmark())
2974 if (tp
->urg_seq
== tp
->copied_seq
)
2975 tp
->copied_seq
++; /* Move the copied sequence on correctly */
2976 tp
->urg_data
= TCP_URG_NOTYET
;
2979 /* Disable header prediction. */
2983 /* This is the 'fast' part of urgent handling. */
2984 static inline void tcp_urg(struct sock
*sk
, struct tcphdr
*th
, unsigned long len
)
2986 struct tcp_opt
*tp
= &(sk
->tp_pinfo
.af_tcp
);
2988 /* Check if we get a new urgent pointer - normally not. */
2990 tcp_check_urg(sk
,th
);
2992 /* Do we wait for any urgent data? - normally not... */
2993 if (tp
->urg_data
== TCP_URG_NOTYET
) {
2994 u32 ptr
= tp
->urg_seq
- ntohl(th
->seq
) + (th
->doff
*4);
2996 /* Is the urgent pointer pointing into this packet? */
2998 tp
->urg_data
= TCP_URG_VALID
| *(ptr
+ (unsigned char *) th
);
3000 sk
->data_ready(sk
,0);
3005 static int tcp_copy_to_iovec(struct sock
*sk
, struct sk_buff
*skb
, int hlen
)
3007 struct tcp_opt
*tp
= &(sk
->tp_pinfo
.af_tcp
);
3008 int chunk
= skb
->len
- hlen
;
3012 if (skb
->ip_summed
==CHECKSUM_UNNECESSARY
)
3013 err
= memcpy_toiovec(tp
->ucopy
.iov
, skb
->h
.raw
+ hlen
, chunk
);
3015 err
= copy_and_csum_toiovec(tp
->ucopy
.iov
, skb
, hlen
);
3019 tp
->ucopy
.len
-= chunk
;
3020 tp
->copied_seq
+= chunk
;
3025 if (err
== -EFAULT
) {
3027 sk
->error_report(sk
);
3035 static int __tcp_checksum_complete_user(struct sock
*sk
, struct sk_buff
*skb
)
3039 if (sk
->lock
.users
) {
3041 result
= __tcp_checksum_complete(skb
);
3044 result
= __tcp_checksum_complete(skb
);
3049 static __inline__
int
3050 tcp_checksum_complete_user(struct sock
*sk
, struct sk_buff
*skb
)
3052 return skb
->ip_summed
!= CHECKSUM_UNNECESSARY
&&
3053 __tcp_checksum_complete_user(sk
, skb
);
3057 * TCP receive function for the ESTABLISHED state.
3059 * It is split into a fast path and a slow path. The fast path is
3061 * - A zero window was announced from us - zero window probing
3062 * is only handled properly in the slow path.
3063 * [ NOTE: actually, it was made incorrectly and nobody ever noticed
3064 * this! Reason is clear: 1. Correct senders do not send
3065 * to zero window. 2. Even if a sender sends to zero window,
3066 * nothing terrible occurs.
3068 * For now I cleaned this and fast path is really always disabled,
3069 * when window is zero, but I would be more happy to remove these
3070 * checks. Code will be only cleaner and _faster_. --ANK
3072 * Later note. I've just found that slow path also accepts
3073 * out of window segments, look at tcp_sequence(). So...
3074 * it is the last argument: I repair all and comment out
3075 * repaired code by TCP_FORMAL_WINDOW.
3076 * [ I remember one rhyme from a chidren's book. (I apologize,
3077 * the trasnlation is not rhymed 8)): people in one (jewish) village
3078 * decided to build sauna, but divided to two parties.
3079 * The first one insisted that battens should not be dubbed,
3080 * another objected that foots will suffer of splinters,
3081 * the first fended that dubbed wet battens are too slippy
3082 * and people will fall and it is much more serious!
3083 * Certaiinly, all they went to rabbi.
3084 * After some thinking, he judged: "Do not be lazy!
3085 * Certainly, dub the battens! But put them by dubbed surface down."
3089 * - Out of order segments arrived.
3090 * - Urgent data is expected.
3091 * - There is no buffer space left
3092 * - Unexpected TCP flags/window values/header lengths are received
3093 * (detected by checking the TCP header against pred_flags)
3094 * - Data is sent in both directions. Fast path only supports pure senders
3095 * or pure receivers (this means either the sequence number or the ack
3096 * value must stay constant)
3097 * - Unexpected TCP option.
3099 * When these conditions are not satisfied it drops into a standard
3100 * receive procedure patterned after RFC793 to handle all cases.
3101 * The first three cases are guaranteed by proper pred_flags setting,
3102 * the rest is checked inline. Fast processing is turned on in
3103 * tcp_data_queue when everything is OK.
3105 int tcp_rcv_established(struct sock
*sk
, struct sk_buff
*skb
,
3106 struct tcphdr
*th
, unsigned len
)
3108 struct tcp_opt
*tp
= &(sk
->tp_pinfo
.af_tcp
);
3111 * Header prediction.
3112 * The code losely follows the one in the famous
3113 * "30 instruction TCP receive" Van Jacobson mail.
3115 * Van's trick is to deposit buffers into socket queue
3116 * on a device interrupt, to call tcp_recv function
3117 * on the receive process context and checksum and copy
3118 * the buffer to user space. smart...
3120 * Our current scheme is not silly either but we take the
3121 * extra cost of the net_bh soft interrupt processing...
3122 * We do checksum and copy also but from device to kernel.
3127 /* pred_flags is 0xS?10 << 16 + snd_wnd
3128 * if header_predition is to be made
3129 * 'S' will always be tp->tcp_header_len >> 2
3130 * '?' will be 0 for the fast path, otherwise pred_flags is 0 to
3131 * turn it off (when there are holes in the receive
3132 * space for instance)
3133 * PSH flag is ignored.
3136 if ((tcp_flag_word(th
) & TCP_HP_BITS
) == tp
->pred_flags
&&
3137 TCP_SKB_CB(skb
)->seq
== tp
->rcv_nxt
) {
3138 int tcp_header_len
= tp
->tcp_header_len
;
3140 /* Timestamp header prediction: tcp_header_len
3141 * is automatically equal to th->doff*4 due to pred_flags
3145 /* Check timestamp */
3146 if (tcp_header_len
== sizeof(struct tcphdr
) + TCPOLEN_TSTAMP_ALIGNED
) {
3147 __u32
*ptr
= (__u32
*)(th
+ 1);
3149 /* No? Slow path! */
3150 if (*ptr
!= __constant_ntohl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16)
3151 | (TCPOPT_TIMESTAMP
<< 8) | TCPOLEN_TIMESTAMP
))
3156 tp
->rcv_tsval
= ntohl(*ptr
);
3158 tp
->rcv_tsecr
= ntohl(*ptr
);
3160 /* If PAWS failed, check it more carefully in slow path */
3161 if ((s32
)(tp
->rcv_tsval
- tp
->ts_recent
) < 0)
3164 /* Predicted packet is in window by definition.
3165 * seq == rcv_nxt and rcv_wup <= rcv_nxt.
3166 * Hence, check seq<=rcv_wup reduces to:
3168 if (tp
->rcv_nxt
== tp
->rcv_wup
)
3169 tcp_store_ts_recent(tp
);
3172 if (len
<= tcp_header_len
) {
3173 /* Bulk data transfer: sender */
3174 if (len
== tcp_header_len
) {
3175 /* We know that such packets are checksummed
3178 tcp_ack(sk
, skb
, 0);
3180 tcp_data_snd_check(sk
);
3182 } else { /* Header too small */
3183 TCP_INC_STATS_BH(TcpInErrs
);
3189 if (tp
->ucopy
.task
== current
&&
3190 tp
->copied_seq
== tp
->rcv_nxt
&&
3191 len
- tcp_header_len
<= tp
->ucopy
.len
&&
3195 NET_INC_STATS_BH(TCPHPHitsToUser
);
3197 __set_current_state(TASK_RUNNING
);
3199 if (tcp_copy_to_iovec(sk
, skb
, tcp_header_len
))
3202 __skb_pull(skb
,tcp_header_len
);
3204 tp
->rcv_nxt
= TCP_SKB_CB(skb
)->end_seq
;
3206 if (tcp_checksum_complete_user(sk
, skb
))
3209 if ((int)skb
->truesize
> sk
->forward_alloc
)
3212 NET_INC_STATS_BH(TCPHPHits
);
3214 /* Bulk data transfer: receiver */
3215 __skb_pull(skb
,tcp_header_len
);
3216 __skb_queue_tail(&sk
->receive_queue
, skb
);
3217 tcp_set_owner_r(skb
, sk
);
3218 tp
->rcv_nxt
= TCP_SKB_CB(skb
)->end_seq
;
3221 tcp_event_data_recv(sk
, tp
, skb
);
3223 if (TCP_SKB_CB(skb
)->ack_seq
!= tp
->snd_una
) {
3224 /* Well, only one small jumplet in fast path... */
3225 tcp_ack(sk
, skb
, FLAG_DATA
);
3226 tcp_data_snd_check(sk
);
3227 if (!tcp_ack_scheduled(tp
))
3232 if (tcp_in_quickack_mode(tp
)) {
3235 tcp_send_delayed_ack(sk
);
3238 __tcp_ack_snd_check(sk
, 0);
3245 sk
->data_ready(sk
, 0);
3251 if (len
< (th
->doff
<<2) || tcp_checksum_complete_user(sk
, skb
))
3255 * RFC1323: H1. Apply PAWS check first.
3257 if (tcp_fast_parse_options(skb
, th
, tp
) && tp
->saw_tstamp
&&
3258 tcp_paws_discard(tp
, skb
)) {
3260 NET_INC_STATS_BH(PAWSEstabRejected
);
3261 tcp_send_dupack(sk
, skb
);
3264 /* Resets are accepted even if PAWS failed.
3266 ts_recent update must be made after we are sure
3267 that the packet is in window.
3272 * Standard slow path.
3275 if (!tcp_sequence(tp
, TCP_SKB_CB(skb
)->seq
, TCP_SKB_CB(skb
)->end_seq
, th
->rst
)) {
3276 /* RFC793, page 37: "In all states except SYN-SENT, all reset
3277 * (RST) segments are validated by checking their SEQ-fields."
3278 * And page 69: "If an incoming segment is not acceptable,
3279 * an acknowledgment should be sent in reply (unless the RST bit
3280 * is set, if so drop the segment and return)".
3283 tcp_send_dupack(sk
, skb
);
3292 tcp_replace_ts_recent(tp
, TCP_SKB_CB(skb
)->seq
);
3294 if(th
->syn
&& TCP_SKB_CB(skb
)->seq
!= tp
->syn_seq
) {
3295 TCP_INC_STATS_BH(TcpInErrs
);
3296 NET_INC_STATS_BH(TCPAbortOnSyn
);
3303 tcp_ack(sk
, skb
, FLAG_SLOWPATH
);
3305 /* Process urgent data. */
3306 tcp_urg(sk
, th
, len
);
3308 /* step 7: process the segment text */
3309 tcp_data(skb
, sk
, len
);
3311 tcp_data_snd_check(sk
);
3312 tcp_ack_snd_check(sk
);
3316 TCP_INC_STATS_BH(TcpInErrs
);
3323 static int tcp_rcv_synsent_state_process(struct sock
*sk
, struct sk_buff
*skb
,
3324 struct tcphdr
*th
, unsigned len
)
3326 struct tcp_opt
*tp
= &(sk
->tp_pinfo
.af_tcp
);
3328 tcp_parse_options(skb
, tp
);
3332 * "If the state is SYN-SENT then
3333 * first check the ACK bit
3334 * If the ACK bit is set
3335 * If SEG.ACK =< ISS, or SEG.ACK > SND.NXT, send
3336 * a reset (unless the RST bit is set, if so drop
3337 * the segment and return)"
3339 * We do not send data with SYN, so that RFC-correct
3342 if (TCP_SKB_CB(skb
)->ack_seq
!= tp
->snd_nxt
)
3345 if (tp
->saw_tstamp
) {
3346 if (tp
->rcv_tsecr
== 0) {
3347 /* Workaround for bug in linux-2.1 and early
3348 * 2.2 kernels. Let's pretend that we did not
3349 * see such timestamp to avoid bogus rtt value,
3350 * calculated by tcp_ack().
3354 /* But do not forget to store peer's timestamp! */
3356 tcp_store_ts_recent(tp
);
3357 } else if (!between(tp
->rcv_tsecr
, tp
->retrans_stamp
, tcp_time_stamp
)) {
3358 NET_INC_STATS_BH(PAWSActiveRejected
);
3363 /* Now ACK is acceptable.
3365 * "If the RST bit is set
3366 * If the ACK was acceptable then signal the user "error:
3367 * connection reset", drop the segment, enter CLOSED state,
3368 * delete TCB, and return."
3377 * "fifth, if neither of the SYN or RST bits is set then
3378 * drop the segment and return."
3387 * "If the SYN bit is on ...
3388 * are acceptable then ...
3389 * (our SYN has been ACKed), change the connection
3390 * state to ESTABLISHED..."
3393 TCP_ECN_rcv_synack(tp
, th
);
3395 tp
->snd_wl1
= TCP_SKB_CB(skb
)->seq
;
3396 tcp_ack(sk
, skb
, FLAG_SLOWPATH
);
3398 /* Ok.. it's good. Set up sequence numbers and
3399 * move to established.
3401 tp
->rcv_nxt
= TCP_SKB_CB(skb
)->seq
+1;
3402 tp
->rcv_wup
= TCP_SKB_CB(skb
)->seq
+1;
3404 /* RFC1323: The window in SYN & SYN/ACK segments is
3407 tp
->snd_wnd
= ntohs(th
->window
);
3408 tcp_init_wl(tp
, TCP_SKB_CB(skb
)->ack_seq
, TCP_SKB_CB(skb
)->seq
);
3409 tp
->syn_seq
= TCP_SKB_CB(skb
)->seq
;
3410 tp
->fin_seq
= TCP_SKB_CB(skb
)->seq
;
3412 if (tp
->wscale_ok
== 0) {
3413 tp
->snd_wscale
= tp
->rcv_wscale
= 0;
3414 tp
->window_clamp
= min(tp
->window_clamp
,65535);
3417 if (tp
->tstamp_ok
) {
3418 tp
->tcp_header_len
=
3419 sizeof(struct tcphdr
) + TCPOLEN_TSTAMP_ALIGNED
;
3420 tp
->advmss
-= TCPOLEN_TSTAMP_ALIGNED
;
3422 tp
->tcp_header_len
= sizeof(struct tcphdr
);
3424 tcp_store_ts_recent(tp
);
3425 if (tp
->sack_ok
&& sysctl_tcp_fack
)
3428 tcp_sync_mss(sk
, tp
->pmtu_cookie
);
3429 tcp_initialize_rcv_mss(sk
);
3430 tcp_init_metrics(sk
);
3431 tcp_init_buffer_space(sk
);
3434 tcp_reset_keepalive_timer(sk
, keepalive_time_when(tp
));
3436 if (tp
->snd_wscale
== 0)
3437 __tcp_fast_path_on(tp
, tp
->snd_wnd
);
3441 /* Remember, tcp_poll() does not lock socket!
3442 * Change state from SYN-SENT only after copied_seq
3444 tp
->copied_seq
= tp
->rcv_nxt
;
3446 tcp_set_state(sk
, TCP_ESTABLISHED
);
3449 sk
->state_change(sk
);
3450 sk_wake_async(sk
, 0, POLL_OUT
);
3453 if (tp
->write_pending
|| tp
->defer_accept
) {
3454 /* Save one ACK. Data will be ready after
3455 * several ticks, if write_pending is set.
3457 * It may be deleted, but with this feature tcpdumps
3458 * look so _wonderfully_ clever, that I was not able
3459 * to stand against the temptation 8) --ANK
3461 tcp_schedule_ack(tp
);
3462 tp
->ack
.lrcvtime
= tcp_time_stamp
;
3463 tcp_enter_quickack_mode(tp
);
3464 tcp_reset_xmit_timer(sk
, TCP_TIME_DACK
, TCP_DELACK_MAX
);
3472 /* No ACK in the segment */
3476 * "If the RST bit is set
3478 * Otherwise (no ACK) drop the segment and return."
3485 if (tp
->ts_recent_stamp
&& tp
->saw_tstamp
&& tcp_paws_check(tp
, 0))
3489 /* We see SYN without ACK. It is attempt of
3490 * simultaneous connect with crossed SYNs.
3491 * Particularly, it can be connect to self.
3493 tcp_set_state(sk
, TCP_SYN_RECV
);
3495 tcp_store_ts_recent(tp
);
3497 tp
->rcv_nxt
= TCP_SKB_CB(skb
)->seq
+ 1;
3498 tp
->rcv_wup
= TCP_SKB_CB(skb
)->seq
+ 1;
3500 /* RFC1323: The window in SYN & SYN/ACK segments is
3503 tp
->snd_wnd
= ntohs(th
->window
);
3504 tp
->snd_wl1
= TCP_SKB_CB(skb
)->seq
;
3505 tp
->max_window
= tp
->snd_wnd
;
3507 tcp_sync_mss(sk
, tp
->pmtu_cookie
);
3508 tcp_initialize_rcv_mss(sk
);
3510 TCP_ECN_rcv_syn(tp
, th
);
3512 tcp_send_synack(sk
);
3514 /* Note, we could accept data and URG from this segment.
3515 * There are no obstacles to make this.
3517 * However, if we ignore data in ACKless segments sometimes,
3518 * we have no reasons to accept it sometimes.
3519 * Also, seems the code doing it in step6 of tcp_rcv_state_process
3520 * is not flawless. So, discard packet for sanity.
3521 * Uncomment this return to process the data.
3526 /* "fifth, if neither of the SYN or RST bits is set then
3527 * drop the segment and return."
3537 * This function implements the receiving procedure of RFC 793 for
3538 * all states except ESTABLISHED and TIME_WAIT.
3539 * It's called from both tcp_v4_rcv and tcp_v6_rcv and should be
3540 * address independent.
3543 int tcp_rcv_state_process(struct sock
*sk
, struct sk_buff
*skb
,
3544 struct tcphdr
*th
, unsigned len
)
3546 struct tcp_opt
*tp
= &(sk
->tp_pinfo
.af_tcp
);
3551 switch (sk
->state
) {
3560 if(tp
->af_specific
->conn_request(sk
, skb
) < 0)
3563 /* Now we have several options: In theory there is
3564 * nothing else in the frame. KA9Q has an option to
3565 * send data with the syn, BSD accepts data with the
3566 * syn up to the [to be] advertised window and
3567 * Solaris 2.1 gives you a protocol error. For now
3568 * we just ignore it, that fits the spec precisely
3569 * and avoids incompatibilities. It would be nice in
3570 * future to drop through and process the data.
3572 * Now that TTCP is starting to be used we ought to
3574 * But, this leaves one open to an easy denial of
3575 * service attack, and SYN cookies can't defend
3576 * against this problem. So, we drop the data
3577 * in the interest of security over speed.
3584 queued
= tcp_rcv_synsent_state_process(sk
, skb
, th
, len
);
3591 if (tcp_fast_parse_options(skb
, th
, tp
) && tp
->saw_tstamp
&&
3592 tcp_paws_discard(tp
, skb
)) {
3594 NET_INC_STATS_BH(PAWSEstabRejected
);
3595 tcp_send_dupack(sk
, skb
);
3598 /* Reset is accepted even if it did not pass PAWS. */
3601 /* step 1: check sequence number */
3602 if (!tcp_sequence(tp
, TCP_SKB_CB(skb
)->seq
, TCP_SKB_CB(skb
)->end_seq
, th
->rst
)) {
3604 tcp_send_dupack(sk
, skb
);
3608 /* step 2: check RST bit */
3614 tcp_replace_ts_recent(tp
, TCP_SKB_CB(skb
)->seq
);
3616 /* step 3: check security and precedence [ignored] */
3620 * Check for a SYN, and ensure it matches the SYN we were
3621 * first sent. We have to handle the rather unusual (but valid)
3622 * sequence that KA9Q derived products may generate of
3627 * SYN|ACK Data + More Data
3628 * .. we must ACK not RST...
3630 * We keep syn_seq as the sequence space occupied by the
3634 if (th
->syn
&& TCP_SKB_CB(skb
)->seq
!= tp
->syn_seq
) {
3635 NET_INC_STATS_BH(TCPAbortOnSyn
);
3640 /* step 5: check the ACK field */
3642 int acceptable
= tcp_ack(sk
, skb
, FLAG_SLOWPATH
);
3647 tp
->copied_seq
= tp
->rcv_nxt
;
3649 tcp_set_state(sk
, TCP_ESTABLISHED
);
3651 /* Note, that this wakeup is only for marginal
3652 * crossed SYN case. Passively open sockets
3653 * are not waked up, because sk->sleep == NULL
3654 * and sk->socket == NULL.
3657 sk
->state_change(sk
);
3658 sk_wake_async(sk
,0,POLL_OUT
);
3661 tp
->snd_una
= TCP_SKB_CB(skb
)->ack_seq
;
3662 tp
->snd_wnd
= ntohs(th
->window
) << tp
->snd_wscale
;
3663 tcp_init_wl(tp
, TCP_SKB_CB(skb
)->ack_seq
, TCP_SKB_CB(skb
)->seq
);
3665 /* tcp_ack considers this ACK as duplicate
3666 * and does not calculate rtt.
3667 * Fix it at least with timestamps.
3669 if (tp
->saw_tstamp
&& !tp
->srtt
)
3670 tcp_ack_saw_tstamp(tp
);
3673 tp
->advmss
-= TCPOLEN_TSTAMP_ALIGNED
;
3675 tcp_init_metrics(sk
);
3676 tcp_initialize_rcv_mss(sk
);
3677 tcp_init_buffer_space(sk
);
3678 tcp_fast_path_on(tp
);
3685 if (tp
->snd_una
== tp
->write_seq
) {
3686 tcp_set_state(sk
, TCP_FIN_WAIT2
);
3687 sk
->shutdown
|= SEND_SHUTDOWN
;
3688 dst_confirm(sk
->dst_cache
);
3691 /* Wake up lingering close() */
3692 sk
->state_change(sk
);
3696 if (tp
->linger2
< 0 ||
3697 (TCP_SKB_CB(skb
)->end_seq
!= TCP_SKB_CB(skb
)->seq
&&
3698 after(TCP_SKB_CB(skb
)->end_seq
- th
->fin
, tp
->rcv_nxt
))) {
3700 NET_INC_STATS_BH(TCPAbortOnData
);
3704 tmo
= tcp_fin_time(tp
);
3705 if (tmo
> TCP_TIMEWAIT_LEN
) {
3706 tcp_reset_keepalive_timer(sk
, tmo
- TCP_TIMEWAIT_LEN
);
3707 } else if (th
->fin
|| sk
->lock
.users
) {
3708 /* Bad case. We could lose such FIN otherwise.
3709 * It is not a big problem, but it looks confusing
3710 * and not so rare event. We still can lose it now,
3711 * if it spins in bh_lock_sock(), but it is really
3714 tcp_reset_keepalive_timer(sk
, tmo
);
3716 tcp_time_wait(sk
, TCP_FIN_WAIT2
, tmo
);
3724 if (tp
->snd_una
== tp
->write_seq
) {
3725 tcp_time_wait(sk
, TCP_TIME_WAIT
, 0);
3731 if (tp
->snd_una
== tp
->write_seq
) {
3732 tcp_update_metrics(sk
);
3742 /* step 6: check the URG bit */
3743 tcp_urg(sk
, th
, len
);
3745 /* step 7: process the segment text */
3746 switch (sk
->state
) {
3747 case TCP_CLOSE_WAIT
:
3749 if (!before(TCP_SKB_CB(skb
)->seq
, tp
->fin_seq
))
3753 /* RFC 793 says to queue data in these states,
3754 * RFC 1122 says we MUST send a reset.
3755 * BSD 4.4 also does reset.
3757 if (sk
->shutdown
& RCV_SHUTDOWN
) {
3758 if (TCP_SKB_CB(skb
)->end_seq
!= TCP_SKB_CB(skb
)->seq
&&
3759 after(TCP_SKB_CB(skb
)->end_seq
- th
->fin
, tp
->rcv_nxt
)) {
3760 NET_INC_STATS_BH(TCPAbortOnData
);
3766 case TCP_ESTABLISHED
:
3767 tcp_data(skb
, sk
, len
);
3772 /* tcp_data could move socket to TIME-WAIT */
3773 if (sk
->state
!= TCP_CLOSE
) {
3774 tcp_data_snd_check(sk
);
3775 tcp_ack_snd_check(sk
);