cpufreq: intel_pstate: Expose global sysfs attributes upfront
[linux-2.6/btrfs-unstable.git] / net / ipv4 / tcp_scalable.c
blobf2123075ce6e1be4753e26bb1db81423e272caef
1 /* Tom Kelly's Scalable TCP
3 * See http://www.deneholme.net/tom/scalable/
5 * John Heffner <jheffner@sc.edu>
6 */
8 #include <linux/module.h>
9 #include <net/tcp.h>
11 /* These factors derived from the recommended values in the aer:
12 * .01 and and 7/8. We use 50 instead of 100 to account for
13 * delayed ack.
15 #define TCP_SCALABLE_AI_CNT 50U
16 #define TCP_SCALABLE_MD_SCALE 3
18 struct scalable {
19 u32 loss_cwnd;
22 static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked)
24 struct tcp_sock *tp = tcp_sk(sk);
26 if (!tcp_is_cwnd_limited(sk))
27 return;
29 if (tcp_in_slow_start(tp))
30 tcp_slow_start(tp, acked);
31 else
32 tcp_cong_avoid_ai(tp, min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT),
33 1);
36 static u32 tcp_scalable_ssthresh(struct sock *sk)
38 const struct tcp_sock *tp = tcp_sk(sk);
39 struct scalable *ca = inet_csk_ca(sk);
41 ca->loss_cwnd = tp->snd_cwnd;
43 return max(tp->snd_cwnd - (tp->snd_cwnd>>TCP_SCALABLE_MD_SCALE), 2U);
46 static u32 tcp_scalable_cwnd_undo(struct sock *sk)
48 const struct scalable *ca = inet_csk_ca(sk);
50 return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd);
53 static struct tcp_congestion_ops tcp_scalable __read_mostly = {
54 .ssthresh = tcp_scalable_ssthresh,
55 .undo_cwnd = tcp_scalable_cwnd_undo,
56 .cong_avoid = tcp_scalable_cong_avoid,
58 .owner = THIS_MODULE,
59 .name = "scalable",
62 static int __init tcp_scalable_register(void)
64 return tcp_register_congestion_control(&tcp_scalable);
67 static void __exit tcp_scalable_unregister(void)
69 tcp_unregister_congestion_control(&tcp_scalable);
72 module_init(tcp_scalable_register);
73 module_exit(tcp_scalable_unregister);
75 MODULE_AUTHOR("John Heffner");
76 MODULE_LICENSE("GPL");
77 MODULE_DESCRIPTION("Scalable TCP");