2 * Plugable TCP congestion control support and newReno
4 * Based on ideas from I/O scheduler suport and Web100.
6 * Copyright (C) 2005 Stephen Hemminger <shemminger@osdl.org>
9 #include <linux/config.h>
10 #include <linux/module.h>
12 #include <linux/types.h>
13 #include <linux/list.h>
16 static DEFINE_SPINLOCK(tcp_cong_list_lock
);
17 static LIST_HEAD(tcp_cong_list
);
19 /* Simple linear search, don't expect many entries! */
20 static struct tcp_congestion_ops
*tcp_ca_find(const char *name
)
22 struct tcp_congestion_ops
*e
;
24 list_for_each_entry_rcu(e
, &tcp_cong_list
, list
) {
25 if (strcmp(e
->name
, name
) == 0)
33 * Attach new congestion control algorthim to the list
34 * of available options.
36 int tcp_register_congestion_control(struct tcp_congestion_ops
*ca
)
40 /* all algorithms must implement ssthresh and cong_avoid ops */
41 if (!ca
->ssthresh
|| !ca
->cong_avoid
|| !ca
->min_cwnd
) {
42 printk(KERN_ERR
"TCP %s does not implement required ops\n",
47 spin_lock(&tcp_cong_list_lock
);
48 if (tcp_ca_find(ca
->name
)) {
49 printk(KERN_NOTICE
"TCP %s already registered\n", ca
->name
);
52 list_add_rcu(&ca
->list
, &tcp_cong_list
);
53 printk(KERN_INFO
"TCP %s registered\n", ca
->name
);
55 spin_unlock(&tcp_cong_list_lock
);
59 EXPORT_SYMBOL_GPL(tcp_register_congestion_control
);
62 * Remove congestion control algorithm, called from
63 * the module's remove function. Module ref counts are used
64 * to ensure that this can't be done till all sockets using
65 * that method are closed.
67 void tcp_unregister_congestion_control(struct tcp_congestion_ops
*ca
)
69 spin_lock(&tcp_cong_list_lock
);
70 list_del_rcu(&ca
->list
);
71 spin_unlock(&tcp_cong_list_lock
);
73 EXPORT_SYMBOL_GPL(tcp_unregister_congestion_control
);
75 /* Assign choice of congestion control. */
76 void tcp_init_congestion_control(struct sock
*sk
)
78 struct inet_connection_sock
*icsk
= inet_csk(sk
);
79 struct tcp_congestion_ops
*ca
;
81 if (icsk
->icsk_ca_ops
!= &tcp_init_congestion_ops
)
85 list_for_each_entry_rcu(ca
, &tcp_cong_list
, list
) {
86 if (try_module_get(ca
->owner
)) {
87 icsk
->icsk_ca_ops
= ca
;
94 if (icsk
->icsk_ca_ops
->init
)
95 icsk
->icsk_ca_ops
->init(sk
);
98 /* Manage refcounts on socket close. */
99 void tcp_cleanup_congestion_control(struct sock
*sk
)
101 struct inet_connection_sock
*icsk
= inet_csk(sk
);
103 if (icsk
->icsk_ca_ops
->release
)
104 icsk
->icsk_ca_ops
->release(sk
);
105 module_put(icsk
->icsk_ca_ops
->owner
);
108 /* Used by sysctl to change default congestion control */
109 int tcp_set_default_congestion_control(const char *name
)
111 struct tcp_congestion_ops
*ca
;
114 spin_lock(&tcp_cong_list_lock
);
115 ca
= tcp_ca_find(name
);
118 spin_unlock(&tcp_cong_list_lock
);
120 request_module("tcp_%s", name
);
121 spin_lock(&tcp_cong_list_lock
);
122 ca
= tcp_ca_find(name
);
127 list_move(&ca
->list
, &tcp_cong_list
);
130 spin_unlock(&tcp_cong_list_lock
);
135 /* Get current default congestion control */
136 void tcp_get_default_congestion_control(char *name
)
138 struct tcp_congestion_ops
*ca
;
139 /* We will always have reno... */
140 BUG_ON(list_empty(&tcp_cong_list
));
143 ca
= list_entry(tcp_cong_list
.next
, struct tcp_congestion_ops
, list
);
144 strncpy(name
, ca
->name
, TCP_CA_NAME_MAX
);
148 /* Change congestion control for socket */
149 int tcp_set_congestion_control(struct sock
*sk
, const char *name
)
151 struct inet_connection_sock
*icsk
= inet_csk(sk
);
152 struct tcp_congestion_ops
*ca
;
156 ca
= tcp_ca_find(name
);
157 if (ca
== icsk
->icsk_ca_ops
)
163 else if (!try_module_get(ca
->owner
))
167 tcp_cleanup_congestion_control(sk
);
168 icsk
->icsk_ca_ops
= ca
;
169 if (icsk
->icsk_ca_ops
->init
)
170 icsk
->icsk_ca_ops
->init(sk
);
178 * TCP Reno congestion control
179 * This is special case used for fallback as well.
181 /* This is Jacobson's slow start and congestion avoidance.
182 * SIGCOMM '88, p. 328.
184 void tcp_reno_cong_avoid(struct sock
*sk
, u32 ack
, u32 rtt
, u32 in_flight
,
187 struct tcp_sock
*tp
= tcp_sk(sk
);
189 if (in_flight
< tp
->snd_cwnd
)
192 if (tp
->snd_cwnd
<= tp
->snd_ssthresh
) {
193 /* In "safe" area, increase. */
194 if (tp
->snd_cwnd
< tp
->snd_cwnd_clamp
)
197 /* In dangerous area, increase slowly.
198 * In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd
200 if (tp
->snd_cwnd_cnt
>= tp
->snd_cwnd
) {
201 if (tp
->snd_cwnd
< tp
->snd_cwnd_clamp
)
203 tp
->snd_cwnd_cnt
= 0;
208 EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid
);
210 /* Slow start threshold is half the congestion window (min 2) */
211 u32
tcp_reno_ssthresh(struct sock
*sk
)
213 const struct tcp_sock
*tp
= tcp_sk(sk
);
214 return max(tp
->snd_cwnd
>> 1U, 2U);
216 EXPORT_SYMBOL_GPL(tcp_reno_ssthresh
);
218 /* Lower bound on congestion window. */
219 u32
tcp_reno_min_cwnd(struct sock
*sk
)
221 const struct tcp_sock
*tp
= tcp_sk(sk
);
222 return tp
->snd_ssthresh
/2;
224 EXPORT_SYMBOL_GPL(tcp_reno_min_cwnd
);
226 struct tcp_congestion_ops tcp_reno
= {
228 .owner
= THIS_MODULE
,
229 .ssthresh
= tcp_reno_ssthresh
,
230 .cong_avoid
= tcp_reno_cong_avoid
,
231 .min_cwnd
= tcp_reno_min_cwnd
,
234 /* Initial congestion control used (until SYN)
235 * really reno under another name so we can tell difference
236 * during tcp_set_default_congestion_control
238 struct tcp_congestion_ops tcp_init_congestion_ops
= {
240 .owner
= THIS_MODULE
,
241 .ssthresh
= tcp_reno_ssthresh
,
242 .cong_avoid
= tcp_reno_cong_avoid
,
243 .min_cwnd
= tcp_reno_min_cwnd
,
245 EXPORT_SYMBOL_GPL(tcp_init_congestion_ops
);