2 * Plugable TCP congestion control support and newReno
4 * Based on ideas from I/O scheduler suport and Web100.
6 * Copyright (C) 2005 Stephen Hemminger <shemminger@osdl.org>
9 #include <linux/module.h>
11 #include <linux/types.h>
12 #include <linux/list.h>
15 static DEFINE_SPINLOCK(tcp_cong_list_lock
);
16 static LIST_HEAD(tcp_cong_list
);
18 /* Simple linear search, don't expect many entries! */
19 static struct tcp_congestion_ops
*tcp_ca_find(const char *name
)
21 struct tcp_congestion_ops
*e
;
23 list_for_each_entry_rcu(e
, &tcp_cong_list
, list
) {
24 if (strcmp(e
->name
, name
) == 0)
32 * Attach new congestion control algorthim to the list
33 * of available options.
35 int tcp_register_congestion_control(struct tcp_congestion_ops
*ca
)
39 /* all algorithms must implement ssthresh and cong_avoid ops */
40 if (!ca
->ssthresh
|| !ca
->cong_avoid
) {
41 printk(KERN_ERR
"TCP %s does not implement required ops\n",
46 spin_lock(&tcp_cong_list_lock
);
47 if (tcp_ca_find(ca
->name
)) {
48 printk(KERN_NOTICE
"TCP %s already registered\n", ca
->name
);
51 list_add_tail_rcu(&ca
->list
, &tcp_cong_list
);
52 printk(KERN_INFO
"TCP %s registered\n", ca
->name
);
54 spin_unlock(&tcp_cong_list_lock
);
58 EXPORT_SYMBOL_GPL(tcp_register_congestion_control
);
61 * Remove congestion control algorithm, called from
62 * the module's remove function. Module ref counts are used
63 * to ensure that this can't be done till all sockets using
64 * that method are closed.
66 void tcp_unregister_congestion_control(struct tcp_congestion_ops
*ca
)
68 spin_lock(&tcp_cong_list_lock
);
69 list_del_rcu(&ca
->list
);
70 spin_unlock(&tcp_cong_list_lock
);
72 EXPORT_SYMBOL_GPL(tcp_unregister_congestion_control
);
74 /* Assign choice of congestion control. */
75 void tcp_init_congestion_control(struct sock
*sk
)
77 struct inet_connection_sock
*icsk
= inet_csk(sk
);
78 struct tcp_congestion_ops
*ca
;
80 if (icsk
->icsk_ca_ops
!= &tcp_init_congestion_ops
)
84 list_for_each_entry_rcu(ca
, &tcp_cong_list
, list
) {
85 if (try_module_get(ca
->owner
)) {
86 icsk
->icsk_ca_ops
= ca
;
93 if (icsk
->icsk_ca_ops
->init
)
94 icsk
->icsk_ca_ops
->init(sk
);
97 /* Manage refcounts on socket close. */
98 void tcp_cleanup_congestion_control(struct sock
*sk
)
100 struct inet_connection_sock
*icsk
= inet_csk(sk
);
102 if (icsk
->icsk_ca_ops
->release
)
103 icsk
->icsk_ca_ops
->release(sk
);
104 module_put(icsk
->icsk_ca_ops
->owner
);
107 /* Used by sysctl to change default congestion control */
108 int tcp_set_default_congestion_control(const char *name
)
110 struct tcp_congestion_ops
*ca
;
113 spin_lock(&tcp_cong_list_lock
);
114 ca
= tcp_ca_find(name
);
117 spin_unlock(&tcp_cong_list_lock
);
119 request_module("tcp_%s", name
);
120 spin_lock(&tcp_cong_list_lock
);
121 ca
= tcp_ca_find(name
);
126 ca
->non_restricted
= 1; /* default is always allowed */
127 list_move(&ca
->list
, &tcp_cong_list
);
130 spin_unlock(&tcp_cong_list_lock
);
135 /* Set default value from kernel configuration at bootup */
136 static int __init
tcp_congestion_default(void)
138 return tcp_set_default_congestion_control(CONFIG_DEFAULT_TCP_CONG
);
140 late_initcall(tcp_congestion_default
);
143 /* Build string with list of available congestion control values */
144 void tcp_get_available_congestion_control(char *buf
, size_t maxlen
)
146 struct tcp_congestion_ops
*ca
;
150 list_for_each_entry_rcu(ca
, &tcp_cong_list
, list
) {
151 offs
+= snprintf(buf
+ offs
, maxlen
- offs
,
153 offs
== 0 ? "" : " ", ca
->name
);
159 /* Get current default congestion control */
160 void tcp_get_default_congestion_control(char *name
)
162 struct tcp_congestion_ops
*ca
;
163 /* We will always have reno... */
164 BUG_ON(list_empty(&tcp_cong_list
));
167 ca
= list_entry(tcp_cong_list
.next
, struct tcp_congestion_ops
, list
);
168 strncpy(name
, ca
->name
, TCP_CA_NAME_MAX
);
172 /* Built list of non-restricted congestion control values */
173 void tcp_get_allowed_congestion_control(char *buf
, size_t maxlen
)
175 struct tcp_congestion_ops
*ca
;
180 list_for_each_entry_rcu(ca
, &tcp_cong_list
, list
) {
181 if (!ca
->non_restricted
)
183 offs
+= snprintf(buf
+ offs
, maxlen
- offs
,
185 offs
== 0 ? "" : " ", ca
->name
);
191 /* Change list of non-restricted congestion control */
192 int tcp_set_allowed_congestion_control(char *val
)
194 struct tcp_congestion_ops
*ca
;
198 clone
= kstrdup(val
, GFP_USER
);
202 spin_lock(&tcp_cong_list_lock
);
203 /* pass 1 check for bad entries */
204 while ((name
= strsep(&clone
, " ")) && *name
) {
205 ca
= tcp_ca_find(name
);
213 list_for_each_entry_rcu(ca
, &tcp_cong_list
, list
)
214 ca
->non_restricted
= 0;
216 /* pass 3 mark as allowed */
217 while ((name
= strsep(&val
, " ")) && *name
) {
218 ca
= tcp_ca_find(name
);
221 ca
->non_restricted
= 1;
224 spin_unlock(&tcp_cong_list_lock
);
230 /* Change congestion control for socket */
231 int tcp_set_congestion_control(struct sock
*sk
, const char *name
)
233 struct inet_connection_sock
*icsk
= inet_csk(sk
);
234 struct tcp_congestion_ops
*ca
;
238 ca
= tcp_ca_find(name
);
239 if (ca
== icsk
->icsk_ca_ops
)
245 else if (!(ca
->non_restricted
|| capable(CAP_NET_ADMIN
)))
248 else if (!try_module_get(ca
->owner
))
252 tcp_cleanup_congestion_control(sk
);
253 icsk
->icsk_ca_ops
= ca
;
254 if (icsk
->icsk_ca_ops
->init
)
255 icsk
->icsk_ca_ops
->init(sk
);
264 * Linear increase during slow start
266 void tcp_slow_start(struct tcp_sock
*tp
)
268 if (sysctl_tcp_abc
) {
269 /* RFC3465: Slow Start
270 * TCP sender SHOULD increase cwnd by the number of
271 * previously unacknowledged bytes ACKed by each incoming
272 * acknowledgment, provided the increase is not more than L
274 if (tp
->bytes_acked
< tp
->mss_cache
)
277 /* We MAY increase by 2 if discovered delayed ack */
278 if (sysctl_tcp_abc
> 1 && tp
->bytes_acked
>= 2*tp
->mss_cache
) {
279 if (tp
->snd_cwnd
< tp
->snd_cwnd_clamp
)
285 if (tp
->snd_cwnd
< tp
->snd_cwnd_clamp
)
288 EXPORT_SYMBOL_GPL(tcp_slow_start
);
291 * TCP Reno congestion control
292 * This is special case used for fallback as well.
294 /* This is Jacobson's slow start and congestion avoidance.
295 * SIGCOMM '88, p. 328.
297 void tcp_reno_cong_avoid(struct sock
*sk
, u32 ack
, u32 rtt
, u32 in_flight
,
300 struct tcp_sock
*tp
= tcp_sk(sk
);
302 if (!tcp_is_cwnd_limited(sk
, in_flight
))
305 /* In "safe" area, increase. */
306 if (tp
->snd_cwnd
<= tp
->snd_ssthresh
)
309 /* In dangerous area, increase slowly. */
310 else if (sysctl_tcp_abc
) {
311 /* RFC3465: Appropriate Byte Count
312 * increase once for each full cwnd acked
314 if (tp
->bytes_acked
>= tp
->snd_cwnd
*tp
->mss_cache
) {
315 tp
->bytes_acked
-= tp
->snd_cwnd
*tp
->mss_cache
;
316 if (tp
->snd_cwnd
< tp
->snd_cwnd_clamp
)
320 /* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd */
321 if (tp
->snd_cwnd_cnt
>= tp
->snd_cwnd
) {
322 if (tp
->snd_cwnd
< tp
->snd_cwnd_clamp
)
324 tp
->snd_cwnd_cnt
= 0;
329 EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid
);
331 /* Slow start threshold is half the congestion window (min 2) */
332 u32
tcp_reno_ssthresh(struct sock
*sk
)
334 const struct tcp_sock
*tp
= tcp_sk(sk
);
335 return max(tp
->snd_cwnd
>> 1U, 2U);
337 EXPORT_SYMBOL_GPL(tcp_reno_ssthresh
);
339 /* Lower bound on congestion window with halving. */
340 u32
tcp_reno_min_cwnd(const struct sock
*sk
)
342 const struct tcp_sock
*tp
= tcp_sk(sk
);
343 return tp
->snd_ssthresh
/2;
345 EXPORT_SYMBOL_GPL(tcp_reno_min_cwnd
);
347 struct tcp_congestion_ops tcp_reno
= {
350 .owner
= THIS_MODULE
,
351 .ssthresh
= tcp_reno_ssthresh
,
352 .cong_avoid
= tcp_reno_cong_avoid
,
353 .min_cwnd
= tcp_reno_min_cwnd
,
356 /* Initial congestion control used (until SYN)
357 * really reno under another name so we can tell difference
358 * during tcp_set_default_congestion_control
360 struct tcp_congestion_ops tcp_init_congestion_ops
= {
362 .owner
= THIS_MODULE
,
363 .ssthresh
= tcp_reno_ssthresh
,
364 .cong_avoid
= tcp_reno_cong_avoid
,
365 .min_cwnd
= tcp_reno_min_cwnd
,
367 EXPORT_SYMBOL_GPL(tcp_init_congestion_ops
);