4 * Angelo Dell'Aera: TCP Westwood+ support
7 #include <linux/config.h>
9 #include <linux/module.h>
10 #include <linux/skbuff.h>
11 #include <linux/inet_diag.h>
14 /* TCP Westwood structure */
16 u32 bw_ns_est
; /* first bandwidth estimation..not too smoothed 8) */
17 u32 bw_est
; /* bandwidth estimate */
18 u32 rtt_win_sx
; /* here starts a new evaluation... */
20 u32 snd_una
; /* used for evaluating the number of acked bytes */
24 u32 rtt_min
; /* minimum observed RTT */
25 u8 first_ack
; /* flag which infers that this is the first ack */
29 /* TCP Westwood functions and constants */
30 #define TCP_WESTWOOD_RTT_MIN (HZ/20) /* 50ms */
31 #define TCP_WESTWOOD_INIT_RTT (20*HZ) /* maybe too conservative?! */
34 * @tcp_westwood_create
35 * This function initializes fields used in TCP Westwood+,
36 * it is called after the initial SYN, so the sequence numbers
37 * are correct but new passive connections we have no
38 * information about RTTmin at this time so we simply set it to
39 * TCP_WESTWOOD_INIT_RTT. This value was chosen to be too conservative
40 * since in this way we're sure it will be updated in a consistent
41 * way as soon as possible. It will reasonably happen within the first
42 * RTT period of the connection lifetime.
44 static void tcp_westwood_init(struct sock
*sk
)
46 struct westwood
*w
= inet_csk_ca(sk
);
53 w
->rtt_min
= w
->rtt
= TCP_WESTWOOD_INIT_RTT
;
54 w
->rtt_win_sx
= tcp_time_stamp
;
55 w
->snd_una
= tcp_sk(sk
)->snd_una
;
61 * Low-pass filter. Implemented using constant coefficients.
63 static inline u32
westwood_do_filter(u32 a
, u32 b
)
65 return (((7 * a
) + b
) >> 3);
68 static inline void westwood_filter(struct westwood
*w
, u32 delta
)
70 w
->bw_ns_est
= westwood_do_filter(w
->bw_ns_est
, w
->bk
/ delta
);
71 w
->bw_est
= westwood_do_filter(w
->bw_est
, w
->bw_ns_est
);
75 * @westwood_pkts_acked
76 * Called after processing group of packets.
77 * but all westwood needs is the last sample of srtt.
79 static void tcp_westwood_pkts_acked(struct sock
*sk
, u32 cnt
)
81 struct westwood
*w
= inet_csk_ca(sk
);
83 w
->rtt
= tcp_sk(sk
)->srtt
>> 3;
87 * @westwood_update_window
88 * It updates RTT evaluation window if it is the right moment to do
89 * it. If so it calls filter for evaluating bandwidth.
91 static void westwood_update_window(struct sock
*sk
)
93 struct westwood
*w
= inet_csk_ca(sk
);
94 s32 delta
= tcp_time_stamp
- w
->rtt_win_sx
;
96 /* Initialise w->snd_una with the first acked sequence number in order
97 * to fix mismatch between tp->snd_una and w->snd_una for the first
101 w
->snd_una
= tcp_sk(sk
)->snd_una
;
106 * See if a RTT-window has passed.
107 * Be careful since if RTT is less than
108 * 50ms we don't filter but we continue 'building the sample'.
109 * This minimum limit was chosen since an estimation on small
110 * time intervals is better to avoid...
111 * Obviously on a LAN we reasonably will always have
112 * right_bound = left_bound + WESTWOOD_RTT_MIN
114 if (w
->rtt
&& delta
> max_t(u32
, w
->rtt
, TCP_WESTWOOD_RTT_MIN
)) {
115 westwood_filter(w
, delta
);
118 w
->rtt_win_sx
= tcp_time_stamp
;
124 * It is called when we are in fast path. In particular it is called when
125 * header prediction is successful. In such case in fact update is
126 * straight forward and doesn't need any particular care.
128 static inline void westwood_fast_bw(struct sock
*sk
)
130 const struct tcp_sock
*tp
= tcp_sk(sk
);
131 struct westwood
*w
= inet_csk_ca(sk
);
133 westwood_update_window(sk
);
135 w
->bk
+= tp
->snd_una
- w
->snd_una
;
136 w
->snd_una
= tp
->snd_una
;
137 w
->rtt_min
= min(w
->rtt
, w
->rtt_min
);
141 * @westwood_acked_count
142 * This function evaluates cumul_ack for evaluating bk in case of
143 * delayed or partial acks.
145 static inline u32
westwood_acked_count(struct sock
*sk
)
147 const struct tcp_sock
*tp
= tcp_sk(sk
);
148 struct westwood
*w
= inet_csk_ca(sk
);
150 w
->cumul_ack
= tp
->snd_una
- w
->snd_una
;
152 /* If cumul_ack is 0 this is a dupack since it's not moving
156 w
->accounted
+= tp
->mss_cache
;
157 w
->cumul_ack
= tp
->mss_cache
;
160 if (w
->cumul_ack
> tp
->mss_cache
) {
161 /* Partial or delayed ack */
162 if (w
->accounted
>= w
->cumul_ack
) {
163 w
->accounted
-= w
->cumul_ack
;
164 w
->cumul_ack
= tp
->mss_cache
;
166 w
->cumul_ack
-= w
->accounted
;
171 w
->snd_una
= tp
->snd_una
;
179 * Here limit is evaluated as Bw estimation*RTTmin (for obtaining it
180 * in packets we use mss_cache). Rttmin is guaranteed to be >= 2
181 * so avoids ever returning 0.
183 static u32
tcp_westwood_bw_rttmin(const struct sock
*sk
)
185 const struct tcp_sock
*tp
= tcp_sk(sk
);
186 const struct westwood
*w
= inet_csk_ca(sk
);
187 return max_t(u32
, (w
->bw_est
* w
->rtt_min
) / tp
->mss_cache
, 2);
190 static void tcp_westwood_event(struct sock
*sk
, enum tcp_ca_event event
)
192 struct tcp_sock
*tp
= tcp_sk(sk
);
193 struct westwood
*w
= inet_csk_ca(sk
);
196 case CA_EVENT_FAST_ACK
:
197 westwood_fast_bw(sk
);
200 case CA_EVENT_COMPLETE_CWR
:
201 tp
->snd_cwnd
= tp
->snd_ssthresh
= tcp_westwood_bw_rttmin(sk
);
205 tp
->snd_ssthresh
= tcp_westwood_bw_rttmin(sk
);
208 case CA_EVENT_SLOW_ACK
:
209 westwood_update_window(sk
);
210 w
->bk
+= westwood_acked_count(sk
);
211 w
->rtt_min
= min(w
->rtt
, w
->rtt_min
);
221 /* Extract info for Tcp socket info provided via netlink. */
222 static void tcp_westwood_info(struct sock
*sk
, u32 ext
,
225 const struct westwood
*ca
= inet_csk_ca(sk
);
226 if (ext
& (1 << (INET_DIAG_VEGASINFO
- 1))) {
228 struct tcpvegas_info
*info
;
230 rta
= __RTA_PUT(skb
, INET_DIAG_VEGASINFO
, sizeof(*info
));
231 info
= RTA_DATA(rta
);
232 info
->tcpv_enabled
= 1;
233 info
->tcpv_rttcnt
= 0;
234 info
->tcpv_rtt
= jiffies_to_usecs(ca
->rtt
);
235 info
->tcpv_minrtt
= jiffies_to_usecs(ca
->rtt_min
);
241 static struct tcp_congestion_ops tcp_westwood
= {
242 .init
= tcp_westwood_init
,
243 .ssthresh
= tcp_reno_ssthresh
,
244 .cong_avoid
= tcp_reno_cong_avoid
,
245 .min_cwnd
= tcp_westwood_bw_rttmin
,
246 .cwnd_event
= tcp_westwood_event
,
247 .get_info
= tcp_westwood_info
,
248 .pkts_acked
= tcp_westwood_pkts_acked
,
250 .owner
= THIS_MODULE
,
254 static int __init
tcp_westwood_register(void)
256 BUG_ON(sizeof(struct westwood
) > ICSK_CA_PRIV_SIZE
);
257 return tcp_register_congestion_control(&tcp_westwood
);
260 static void __exit
tcp_westwood_unregister(void)
262 tcp_unregister_congestion_control(&tcp_westwood
);
265 module_init(tcp_westwood_register
);
266 module_exit(tcp_westwood_unregister
);
268 MODULE_AUTHOR("Stephen Hemminger, Angelo Dell'Aera");
269 MODULE_LICENSE("GPL");
270 MODULE_DESCRIPTION("TCP Westwood+");