inet: restore gso for vxlan
[linux-2.6/btrfs-unstable.git] / net / ipv4 / tcp_memcontrol.c
blob03e9154f7e687efef63c91878e33427672bc4036
1 #include <net/tcp.h>
2 #include <net/tcp_memcontrol.h>
3 #include <net/sock.h>
4 #include <net/ip.h>
5 #include <linux/nsproxy.h>
6 #include <linux/memcontrol.h>
7 #include <linux/module.h>
9 static void memcg_tcp_enter_memory_pressure(struct sock *sk)
11 if (sk->sk_cgrp->memory_pressure)
12 sk->sk_cgrp->memory_pressure = 1;
14 EXPORT_SYMBOL(memcg_tcp_enter_memory_pressure);
16 int tcp_init_cgroup(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
19 * The root cgroup does not use res_counters, but rather,
20 * rely on the data already collected by the network
21 * subsystem
23 struct res_counter *res_parent = NULL;
24 struct cg_proto *cg_proto, *parent_cg;
25 struct mem_cgroup *parent = parent_mem_cgroup(memcg);
27 cg_proto = tcp_prot.proto_cgroup(memcg);
28 if (!cg_proto)
29 return 0;
31 cg_proto->sysctl_mem[0] = sysctl_tcp_mem[0];
32 cg_proto->sysctl_mem[1] = sysctl_tcp_mem[1];
33 cg_proto->sysctl_mem[2] = sysctl_tcp_mem[2];
34 cg_proto->memory_pressure = 0;
35 cg_proto->memcg = memcg;
37 parent_cg = tcp_prot.proto_cgroup(parent);
38 if (parent_cg)
39 res_parent = &parent_cg->memory_allocated;
41 res_counter_init(&cg_proto->memory_allocated, res_parent);
42 percpu_counter_init(&cg_proto->sockets_allocated, 0);
44 return 0;
46 EXPORT_SYMBOL(tcp_init_cgroup);
48 void tcp_destroy_cgroup(struct mem_cgroup *memcg)
50 struct cg_proto *cg_proto;
52 cg_proto = tcp_prot.proto_cgroup(memcg);
53 if (!cg_proto)
54 return;
56 percpu_counter_destroy(&cg_proto->sockets_allocated);
58 EXPORT_SYMBOL(tcp_destroy_cgroup);
60 static int tcp_update_limit(struct mem_cgroup *memcg, u64 val)
62 struct cg_proto *cg_proto;
63 u64 old_lim;
64 int i;
65 int ret;
67 cg_proto = tcp_prot.proto_cgroup(memcg);
68 if (!cg_proto)
69 return -EINVAL;
71 if (val > RES_COUNTER_MAX)
72 val = RES_COUNTER_MAX;
74 old_lim = res_counter_read_u64(&cg_proto->memory_allocated, RES_LIMIT);
75 ret = res_counter_set_limit(&cg_proto->memory_allocated, val);
76 if (ret)
77 return ret;
79 for (i = 0; i < 3; i++)
80 cg_proto->sysctl_mem[i] = min_t(long, val >> PAGE_SHIFT,
81 sysctl_tcp_mem[i]);
83 if (val == RES_COUNTER_MAX)
84 clear_bit(MEMCG_SOCK_ACTIVE, &cg_proto->flags);
85 else if (val != RES_COUNTER_MAX) {
87 * The active bit needs to be written after the static_key
88 * update. This is what guarantees that the socket activation
89 * function is the last one to run. See sock_update_memcg() for
90 * details, and note that we don't mark any socket as belonging
91 * to this memcg until that flag is up.
93 * We need to do this, because static_keys will span multiple
94 * sites, but we can't control their order. If we mark a socket
95 * as accounted, but the accounting functions are not patched in
96 * yet, we'll lose accounting.
98 * We never race with the readers in sock_update_memcg(),
99 * because when this value change, the code to process it is not
100 * patched in yet.
102 * The activated bit is used to guarantee that no two writers
103 * will do the update in the same memcg. Without that, we can't
104 * properly shutdown the static key.
106 if (!test_and_set_bit(MEMCG_SOCK_ACTIVATED, &cg_proto->flags))
107 static_key_slow_inc(&memcg_socket_limit_enabled);
108 set_bit(MEMCG_SOCK_ACTIVE, &cg_proto->flags);
111 return 0;
114 static int tcp_cgroup_write(struct cgroup_subsys_state *css, struct cftype *cft,
115 const char *buffer)
117 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
118 unsigned long long val;
119 int ret = 0;
121 switch (cft->private) {
122 case RES_LIMIT:
123 /* see memcontrol.c */
124 ret = res_counter_memparse_write_strategy(buffer, &val);
125 if (ret)
126 break;
127 ret = tcp_update_limit(memcg, val);
128 break;
129 default:
130 ret = -EINVAL;
131 break;
133 return ret;
136 static u64 tcp_read_stat(struct mem_cgroup *memcg, int type, u64 default_val)
138 struct cg_proto *cg_proto;
140 cg_proto = tcp_prot.proto_cgroup(memcg);
141 if (!cg_proto)
142 return default_val;
144 return res_counter_read_u64(&cg_proto->memory_allocated, type);
147 static u64 tcp_read_usage(struct mem_cgroup *memcg)
149 struct cg_proto *cg_proto;
151 cg_proto = tcp_prot.proto_cgroup(memcg);
152 if (!cg_proto)
153 return atomic_long_read(&tcp_memory_allocated) << PAGE_SHIFT;
155 return res_counter_read_u64(&cg_proto->memory_allocated, RES_USAGE);
158 static u64 tcp_cgroup_read(struct cgroup_subsys_state *css, struct cftype *cft)
160 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
161 u64 val;
163 switch (cft->private) {
164 case RES_LIMIT:
165 val = tcp_read_stat(memcg, RES_LIMIT, RES_COUNTER_MAX);
166 break;
167 case RES_USAGE:
168 val = tcp_read_usage(memcg);
169 break;
170 case RES_FAILCNT:
171 case RES_MAX_USAGE:
172 val = tcp_read_stat(memcg, cft->private, 0);
173 break;
174 default:
175 BUG();
177 return val;
180 static int tcp_cgroup_reset(struct cgroup_subsys_state *css, unsigned int event)
182 struct mem_cgroup *memcg;
183 struct cg_proto *cg_proto;
185 memcg = mem_cgroup_from_css(css);
186 cg_proto = tcp_prot.proto_cgroup(memcg);
187 if (!cg_proto)
188 return 0;
190 switch (event) {
191 case RES_MAX_USAGE:
192 res_counter_reset_max(&cg_proto->memory_allocated);
193 break;
194 case RES_FAILCNT:
195 res_counter_reset_failcnt(&cg_proto->memory_allocated);
196 break;
199 return 0;
202 static struct cftype tcp_files[] = {
204 .name = "kmem.tcp.limit_in_bytes",
205 .write_string = tcp_cgroup_write,
206 .read_u64 = tcp_cgroup_read,
207 .private = RES_LIMIT,
210 .name = "kmem.tcp.usage_in_bytes",
211 .read_u64 = tcp_cgroup_read,
212 .private = RES_USAGE,
215 .name = "kmem.tcp.failcnt",
216 .private = RES_FAILCNT,
217 .trigger = tcp_cgroup_reset,
218 .read_u64 = tcp_cgroup_read,
221 .name = "kmem.tcp.max_usage_in_bytes",
222 .private = RES_MAX_USAGE,
223 .trigger = tcp_cgroup_reset,
224 .read_u64 = tcp_cgroup_read,
226 { } /* terminate */
229 static int __init tcp_memcontrol_init(void)
231 WARN_ON(cgroup_add_cftypes(&mem_cgroup_subsys, tcp_files));
232 return 0;
234 __initcall(tcp_memcontrol_init);