net: phy: bcm7xxx: Implement EGPHY workaround for 7278
[linux-2.6/btrfs-unstable.git] / net / ipv4 / tcp_illinois.c
blob60352ff4f5a85f065793a5fb21c0d69713a0453a
1 /*
2 * TCP Illinois congestion control.
3 * Home page:
4 * http://www.ews.uiuc.edu/~shaoliu/tcpillinois/index.html
6 * The algorithm is described in:
7 * "TCP-Illinois: A Loss and Delay-Based Congestion Control Algorithm
8 * for High-Speed Networks"
9 * http://www.ifp.illinois.edu/~srikant/Papers/liubassri06perf.pdf
11 * Implemented from description in paper and ns-2 simulation.
12 * Copyright (C) 2007 Stephen Hemminger <shemminger@linux-foundation.org>
15 #include <linux/module.h>
16 #include <linux/skbuff.h>
17 #include <linux/inet_diag.h>
18 #include <asm/div64.h>
19 #include <net/tcp.h>
21 #define ALPHA_SHIFT 7
22 #define ALPHA_SCALE (1u<<ALPHA_SHIFT)
23 #define ALPHA_MIN ((3*ALPHA_SCALE)/10) /* ~0.3 */
24 #define ALPHA_MAX (10*ALPHA_SCALE) /* 10.0 */
25 #define ALPHA_BASE ALPHA_SCALE /* 1.0 */
26 #define RTT_MAX (U32_MAX / ALPHA_MAX) /* 3.3 secs */
28 #define BETA_SHIFT 6
29 #define BETA_SCALE (1u<<BETA_SHIFT)
30 #define BETA_MIN (BETA_SCALE/8) /* 0.125 */
31 #define BETA_MAX (BETA_SCALE/2) /* 0.5 */
32 #define BETA_BASE BETA_MAX
34 static int win_thresh __read_mostly = 15;
35 module_param(win_thresh, int, 0);
36 MODULE_PARM_DESC(win_thresh, "Window threshold for starting adaptive sizing");
38 static int theta __read_mostly = 5;
39 module_param(theta, int, 0);
40 MODULE_PARM_DESC(theta, "# of fast RTT's before full growth");
42 /* TCP Illinois Parameters */
43 struct illinois {
44 u64 sum_rtt; /* sum of rtt's measured within last rtt */
45 u16 cnt_rtt; /* # of rtts measured within last rtt */
46 u32 base_rtt; /* min of all rtt in usec */
47 u32 max_rtt; /* max of all rtt in usec */
48 u32 end_seq; /* right edge of current RTT */
49 u32 alpha; /* Additive increase */
50 u32 beta; /* Muliplicative decrease */
51 u32 loss_cwnd; /* cwnd on loss */
52 u16 acked; /* # packets acked by current ACK */
53 u8 rtt_above; /* average rtt has gone above threshold */
54 u8 rtt_low; /* # of rtts measurements below threshold */
57 static void rtt_reset(struct sock *sk)
59 struct tcp_sock *tp = tcp_sk(sk);
60 struct illinois *ca = inet_csk_ca(sk);
62 ca->end_seq = tp->snd_nxt;
63 ca->cnt_rtt = 0;
64 ca->sum_rtt = 0;
66 /* TODO: age max_rtt? */
69 static void tcp_illinois_init(struct sock *sk)
71 struct illinois *ca = inet_csk_ca(sk);
73 ca->alpha = ALPHA_MAX;
74 ca->beta = BETA_BASE;
75 ca->base_rtt = 0x7fffffff;
76 ca->max_rtt = 0;
78 ca->acked = 0;
79 ca->rtt_low = 0;
80 ca->rtt_above = 0;
82 rtt_reset(sk);
85 /* Measure RTT for each ack. */
86 static void tcp_illinois_acked(struct sock *sk, const struct ack_sample *sample)
88 struct illinois *ca = inet_csk_ca(sk);
89 s32 rtt_us = sample->rtt_us;
91 ca->acked = sample->pkts_acked;
93 /* dup ack, no rtt sample */
94 if (rtt_us < 0)
95 return;
97 /* ignore bogus values, this prevents wraparound in alpha math */
98 if (rtt_us > RTT_MAX)
99 rtt_us = RTT_MAX;
101 /* keep track of minimum RTT seen so far */
102 if (ca->base_rtt > rtt_us)
103 ca->base_rtt = rtt_us;
105 /* and max */
106 if (ca->max_rtt < rtt_us)
107 ca->max_rtt = rtt_us;
109 ++ca->cnt_rtt;
110 ca->sum_rtt += rtt_us;
113 /* Maximum queuing delay */
114 static inline u32 max_delay(const struct illinois *ca)
116 return ca->max_rtt - ca->base_rtt;
119 /* Average queuing delay */
120 static inline u32 avg_delay(const struct illinois *ca)
122 u64 t = ca->sum_rtt;
124 do_div(t, ca->cnt_rtt);
125 return t - ca->base_rtt;
129 * Compute value of alpha used for additive increase.
130 * If small window then use 1.0, equivalent to Reno.
132 * For larger windows, adjust based on average delay.
133 * A. If average delay is at minimum (we are uncongested),
134 * then use large alpha (10.0) to increase faster.
135 * B. If average delay is at maximum (getting congested)
136 * then use small alpha (0.3)
138 * The result is a convex window growth curve.
140 static u32 alpha(struct illinois *ca, u32 da, u32 dm)
142 u32 d1 = dm / 100; /* Low threshold */
144 if (da <= d1) {
145 /* If never got out of low delay zone, then use max */
146 if (!ca->rtt_above)
147 return ALPHA_MAX;
149 /* Wait for 5 good RTT's before allowing alpha to go alpha max.
150 * This prevents one good RTT from causing sudden window increase.
152 if (++ca->rtt_low < theta)
153 return ca->alpha;
155 ca->rtt_low = 0;
156 ca->rtt_above = 0;
157 return ALPHA_MAX;
160 ca->rtt_above = 1;
163 * Based on:
165 * (dm - d1) amin amax
166 * k1 = -------------------
167 * amax - amin
169 * (dm - d1) amin
170 * k2 = ---------------- - d1
171 * amax - amin
173 * k1
174 * alpha = ----------
175 * k2 + da
178 dm -= d1;
179 da -= d1;
180 return (dm * ALPHA_MAX) /
181 (dm + (da * (ALPHA_MAX - ALPHA_MIN)) / ALPHA_MIN);
185 * Beta used for multiplicative decrease.
186 * For small window sizes returns same value as Reno (0.5)
188 * If delay is small (10% of max) then beta = 1/8
189 * If delay is up to 80% of max then beta = 1/2
190 * In between is a linear function
192 static u32 beta(u32 da, u32 dm)
194 u32 d2, d3;
196 d2 = dm / 10;
197 if (da <= d2)
198 return BETA_MIN;
200 d3 = (8 * dm) / 10;
201 if (da >= d3 || d3 <= d2)
202 return BETA_MAX;
205 * Based on:
207 * bmin d3 - bmax d2
208 * k3 = -------------------
209 * d3 - d2
211 * bmax - bmin
212 * k4 = -------------
213 * d3 - d2
215 * b = k3 + k4 da
217 return (BETA_MIN * d3 - BETA_MAX * d2 + (BETA_MAX - BETA_MIN) * da)
218 / (d3 - d2);
221 /* Update alpha and beta values once per RTT */
222 static void update_params(struct sock *sk)
224 struct tcp_sock *tp = tcp_sk(sk);
225 struct illinois *ca = inet_csk_ca(sk);
227 if (tp->snd_cwnd < win_thresh) {
228 ca->alpha = ALPHA_BASE;
229 ca->beta = BETA_BASE;
230 } else if (ca->cnt_rtt > 0) {
231 u32 dm = max_delay(ca);
232 u32 da = avg_delay(ca);
234 ca->alpha = alpha(ca, da, dm);
235 ca->beta = beta(da, dm);
238 rtt_reset(sk);
242 * In case of loss, reset to default values
244 static void tcp_illinois_state(struct sock *sk, u8 new_state)
246 struct illinois *ca = inet_csk_ca(sk);
248 if (new_state == TCP_CA_Loss) {
249 ca->alpha = ALPHA_BASE;
250 ca->beta = BETA_BASE;
251 ca->rtt_low = 0;
252 ca->rtt_above = 0;
253 rtt_reset(sk);
258 * Increase window in response to successful acknowledgment.
260 static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 acked)
262 struct tcp_sock *tp = tcp_sk(sk);
263 struct illinois *ca = inet_csk_ca(sk);
265 if (after(ack, ca->end_seq))
266 update_params(sk);
268 /* RFC2861 only increase cwnd if fully utilized */
269 if (!tcp_is_cwnd_limited(sk))
270 return;
272 /* In slow start */
273 if (tcp_in_slow_start(tp))
274 tcp_slow_start(tp, acked);
276 else {
277 u32 delta;
279 /* snd_cwnd_cnt is # of packets since last cwnd increment */
280 tp->snd_cwnd_cnt += ca->acked;
281 ca->acked = 1;
283 /* This is close approximation of:
284 * tp->snd_cwnd += alpha/tp->snd_cwnd
286 delta = (tp->snd_cwnd_cnt * ca->alpha) >> ALPHA_SHIFT;
287 if (delta >= tp->snd_cwnd) {
288 tp->snd_cwnd = min(tp->snd_cwnd + delta / tp->snd_cwnd,
289 (u32)tp->snd_cwnd_clamp);
290 tp->snd_cwnd_cnt = 0;
295 static u32 tcp_illinois_ssthresh(struct sock *sk)
297 struct tcp_sock *tp = tcp_sk(sk);
298 struct illinois *ca = inet_csk_ca(sk);
300 ca->loss_cwnd = tp->snd_cwnd;
301 /* Multiplicative decrease */
302 return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->beta) >> BETA_SHIFT), 2U);
305 static u32 tcp_illinois_cwnd_undo(struct sock *sk)
307 const struct illinois *ca = inet_csk_ca(sk);
309 return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd);
312 /* Extract info for Tcp socket info provided via netlink. */
313 static size_t tcp_illinois_info(struct sock *sk, u32 ext, int *attr,
314 union tcp_cc_info *info)
316 const struct illinois *ca = inet_csk_ca(sk);
318 if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
319 info->vegas.tcpv_enabled = 1;
320 info->vegas.tcpv_rttcnt = ca->cnt_rtt;
321 info->vegas.tcpv_minrtt = ca->base_rtt;
322 info->vegas.tcpv_rtt = 0;
324 if (info->vegas.tcpv_rttcnt > 0) {
325 u64 t = ca->sum_rtt;
327 do_div(t, info->vegas.tcpv_rttcnt);
328 info->vegas.tcpv_rtt = t;
330 *attr = INET_DIAG_VEGASINFO;
331 return sizeof(struct tcpvegas_info);
333 return 0;
336 static struct tcp_congestion_ops tcp_illinois __read_mostly = {
337 .init = tcp_illinois_init,
338 .ssthresh = tcp_illinois_ssthresh,
339 .undo_cwnd = tcp_illinois_cwnd_undo,
340 .cong_avoid = tcp_illinois_cong_avoid,
341 .set_state = tcp_illinois_state,
342 .get_info = tcp_illinois_info,
343 .pkts_acked = tcp_illinois_acked,
345 .owner = THIS_MODULE,
346 .name = "illinois",
349 static int __init tcp_illinois_register(void)
351 BUILD_BUG_ON(sizeof(struct illinois) > ICSK_CA_PRIV_SIZE);
352 return tcp_register_congestion_control(&tcp_illinois);
355 static void __exit tcp_illinois_unregister(void)
357 tcp_unregister_congestion_control(&tcp_illinois);
360 module_init(tcp_illinois_register);
361 module_exit(tcp_illinois_unregister);
363 MODULE_AUTHOR("Stephen Hemminger, Shao Liu");
364 MODULE_LICENSE("GPL");
365 MODULE_DESCRIPTION("TCP Illinois");
366 MODULE_VERSION("1.0");