block: make unplug timer trace event correspond to the schedule() unplug
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / kernel / cgroup_freezer.c
blobe7bebb7c6c38ff7ddaefb807d7e50cd1bcc1adbc
1 /*
2 * cgroup_freezer.c - control group freezer subsystem
4 * Copyright IBM Corporation, 2007
6 * Author : Cedric Le Goater <clg@fr.ibm.com>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2.1 of the GNU Lesser General Public License
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it would be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/cgroup.h>
20 #include <linux/fs.h>
21 #include <linux/uaccess.h>
22 #include <linux/freezer.h>
23 #include <linux/seq_file.h>
25 enum freezer_state {
26 CGROUP_THAWED = 0,
27 CGROUP_FREEZING,
28 CGROUP_FROZEN,
31 struct freezer {
32 struct cgroup_subsys_state css;
33 enum freezer_state state;
34 spinlock_t lock; /* protects _writes_ to state */
37 static inline struct freezer *cgroup_freezer(
38 struct cgroup *cgroup)
40 return container_of(
41 cgroup_subsys_state(cgroup, freezer_subsys_id),
42 struct freezer, css);
45 static inline struct freezer *task_freezer(struct task_struct *task)
47 return container_of(task_subsys_state(task, freezer_subsys_id),
48 struct freezer, css);
51 static inline int __cgroup_freezing_or_frozen(struct task_struct *task)
53 enum freezer_state state = task_freezer(task)->state;
54 return (state == CGROUP_FREEZING) || (state == CGROUP_FROZEN);
57 int cgroup_freezing_or_frozen(struct task_struct *task)
59 int result;
60 task_lock(task);
61 result = __cgroup_freezing_or_frozen(task);
62 task_unlock(task);
63 return result;
67 * cgroups_write_string() limits the size of freezer state strings to
68 * CGROUP_LOCAL_BUFFER_SIZE
70 static const char *freezer_state_strs[] = {
71 "THAWED",
72 "FREEZING",
73 "FROZEN",
77 * State diagram
78 * Transitions are caused by userspace writes to the freezer.state file.
79 * The values in parenthesis are state labels. The rest are edge labels.
81 * (THAWED) --FROZEN--> (FREEZING) --FROZEN--> (FROZEN)
82 * ^ ^ | |
83 * | \_______THAWED_______/ |
84 * \__________________________THAWED____________/
87 struct cgroup_subsys freezer_subsys;
89 /* Locks taken and their ordering
90 * ------------------------------
91 * cgroup_mutex (AKA cgroup_lock)
92 * freezer->lock
93 * css_set_lock
94 * task->alloc_lock (AKA task_lock)
95 * task->sighand->siglock
97 * cgroup code forces css_set_lock to be taken before task->alloc_lock
99 * freezer_create(), freezer_destroy():
100 * cgroup_mutex [ by cgroup core ]
102 * freezer_can_attach():
103 * cgroup_mutex (held by caller of can_attach)
105 * cgroup_freezing_or_frozen():
106 * task->alloc_lock (to get task's cgroup)
108 * freezer_fork() (preserving fork() performance means can't take cgroup_mutex):
109 * freezer->lock
110 * sighand->siglock (if the cgroup is freezing)
112 * freezer_read():
113 * cgroup_mutex
114 * freezer->lock
115 * write_lock css_set_lock (cgroup iterator start)
116 * task->alloc_lock
117 * read_lock css_set_lock (cgroup iterator start)
119 * freezer_write() (freeze):
120 * cgroup_mutex
121 * freezer->lock
122 * write_lock css_set_lock (cgroup iterator start)
123 * task->alloc_lock
124 * read_lock css_set_lock (cgroup iterator start)
125 * sighand->siglock (fake signal delivery inside freeze_task())
127 * freezer_write() (unfreeze):
128 * cgroup_mutex
129 * freezer->lock
130 * write_lock css_set_lock (cgroup iterator start)
131 * task->alloc_lock
132 * read_lock css_set_lock (cgroup iterator start)
133 * task->alloc_lock (inside thaw_process(), prevents race with refrigerator())
134 * sighand->siglock
136 static struct cgroup_subsys_state *freezer_create(struct cgroup_subsys *ss,
137 struct cgroup *cgroup)
139 struct freezer *freezer;
141 freezer = kzalloc(sizeof(struct freezer), GFP_KERNEL);
142 if (!freezer)
143 return ERR_PTR(-ENOMEM);
145 spin_lock_init(&freezer->lock);
146 freezer->state = CGROUP_THAWED;
147 return &freezer->css;
150 static void freezer_destroy(struct cgroup_subsys *ss,
151 struct cgroup *cgroup)
153 kfree(cgroup_freezer(cgroup));
157 * The call to cgroup_lock() in the freezer.state write method prevents
158 * a write to that file racing against an attach, and hence the
159 * can_attach() result will remain valid until the attach completes.
161 static int freezer_can_attach(struct cgroup_subsys *ss,
162 struct cgroup *new_cgroup,
163 struct task_struct *task, bool threadgroup)
165 struct freezer *freezer;
168 * Anything frozen can't move or be moved to/from.
171 freezer = cgroup_freezer(new_cgroup);
172 if (freezer->state != CGROUP_THAWED)
173 return -EBUSY;
175 rcu_read_lock();
176 if (__cgroup_freezing_or_frozen(task)) {
177 rcu_read_unlock();
178 return -EBUSY;
180 rcu_read_unlock();
182 if (threadgroup) {
183 struct task_struct *c;
185 rcu_read_lock();
186 list_for_each_entry_rcu(c, &task->thread_group, thread_group) {
187 if (__cgroup_freezing_or_frozen(c)) {
188 rcu_read_unlock();
189 return -EBUSY;
192 rcu_read_unlock();
195 return 0;
198 static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task)
200 struct freezer *freezer;
203 * No lock is needed, since the task isn't on tasklist yet,
204 * so it can't be moved to another cgroup, which means the
205 * freezer won't be removed and will be valid during this
206 * function call. Nevertheless, apply RCU read-side critical
207 * section to suppress RCU lockdep false positives.
209 rcu_read_lock();
210 freezer = task_freezer(task);
211 rcu_read_unlock();
214 * The root cgroup is non-freezable, so we can skip the
215 * following check.
217 if (!freezer->css.cgroup->parent)
218 return;
220 spin_lock_irq(&freezer->lock);
221 BUG_ON(freezer->state == CGROUP_FROZEN);
223 /* Locking avoids race with FREEZING -> THAWED transitions. */
224 if (freezer->state == CGROUP_FREEZING)
225 freeze_task(task, true);
226 spin_unlock_irq(&freezer->lock);
230 * caller must hold freezer->lock
232 static void update_if_frozen(struct cgroup *cgroup,
233 struct freezer *freezer)
235 struct cgroup_iter it;
236 struct task_struct *task;
237 unsigned int nfrozen = 0, ntotal = 0;
238 enum freezer_state old_state = freezer->state;
240 cgroup_iter_start(cgroup, &it);
241 while ((task = cgroup_iter_next(cgroup, &it))) {
242 ntotal++;
243 if (frozen(task))
244 nfrozen++;
247 if (old_state == CGROUP_THAWED) {
248 BUG_ON(nfrozen > 0);
249 } else if (old_state == CGROUP_FREEZING) {
250 if (nfrozen == ntotal)
251 freezer->state = CGROUP_FROZEN;
252 } else { /* old_state == CGROUP_FROZEN */
253 BUG_ON(nfrozen != ntotal);
256 cgroup_iter_end(cgroup, &it);
259 static int freezer_read(struct cgroup *cgroup, struct cftype *cft,
260 struct seq_file *m)
262 struct freezer *freezer;
263 enum freezer_state state;
265 if (!cgroup_lock_live_group(cgroup))
266 return -ENODEV;
268 freezer = cgroup_freezer(cgroup);
269 spin_lock_irq(&freezer->lock);
270 state = freezer->state;
271 if (state == CGROUP_FREEZING) {
272 /* We change from FREEZING to FROZEN lazily if the cgroup was
273 * only partially frozen when we exitted write. */
274 update_if_frozen(cgroup, freezer);
275 state = freezer->state;
277 spin_unlock_irq(&freezer->lock);
278 cgroup_unlock();
280 seq_puts(m, freezer_state_strs[state]);
281 seq_putc(m, '\n');
282 return 0;
285 static int try_to_freeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
287 struct cgroup_iter it;
288 struct task_struct *task;
289 unsigned int num_cant_freeze_now = 0;
291 freezer->state = CGROUP_FREEZING;
292 cgroup_iter_start(cgroup, &it);
293 while ((task = cgroup_iter_next(cgroup, &it))) {
294 if (!freeze_task(task, true))
295 continue;
296 if (frozen(task))
297 continue;
298 if (!freezing(task) && !freezer_should_skip(task))
299 num_cant_freeze_now++;
301 cgroup_iter_end(cgroup, &it);
303 return num_cant_freeze_now ? -EBUSY : 0;
306 static void unfreeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
308 struct cgroup_iter it;
309 struct task_struct *task;
311 cgroup_iter_start(cgroup, &it);
312 while ((task = cgroup_iter_next(cgroup, &it))) {
313 thaw_process(task);
315 cgroup_iter_end(cgroup, &it);
317 freezer->state = CGROUP_THAWED;
320 static int freezer_change_state(struct cgroup *cgroup,
321 enum freezer_state goal_state)
323 struct freezer *freezer;
324 int retval = 0;
326 freezer = cgroup_freezer(cgroup);
328 spin_lock_irq(&freezer->lock);
330 update_if_frozen(cgroup, freezer);
331 if (goal_state == freezer->state)
332 goto out;
334 switch (goal_state) {
335 case CGROUP_THAWED:
336 unfreeze_cgroup(cgroup, freezer);
337 break;
338 case CGROUP_FROZEN:
339 retval = try_to_freeze_cgroup(cgroup, freezer);
340 break;
341 default:
342 BUG();
344 out:
345 spin_unlock_irq(&freezer->lock);
347 return retval;
350 static int freezer_write(struct cgroup *cgroup,
351 struct cftype *cft,
352 const char *buffer)
354 int retval;
355 enum freezer_state goal_state;
357 if (strcmp(buffer, freezer_state_strs[CGROUP_THAWED]) == 0)
358 goal_state = CGROUP_THAWED;
359 else if (strcmp(buffer, freezer_state_strs[CGROUP_FROZEN]) == 0)
360 goal_state = CGROUP_FROZEN;
361 else
362 return -EINVAL;
364 if (!cgroup_lock_live_group(cgroup))
365 return -ENODEV;
366 retval = freezer_change_state(cgroup, goal_state);
367 cgroup_unlock();
368 return retval;
371 static struct cftype files[] = {
373 .name = "state",
374 .read_seq_string = freezer_read,
375 .write_string = freezer_write,
379 static int freezer_populate(struct cgroup_subsys *ss, struct cgroup *cgroup)
381 if (!cgroup->parent)
382 return 0;
383 return cgroup_add_files(cgroup, ss, files, ARRAY_SIZE(files));
386 struct cgroup_subsys freezer_subsys = {
387 .name = "freezer",
388 .create = freezer_create,
389 .destroy = freezer_destroy,
390 .populate = freezer_populate,
391 .subsys_id = freezer_subsys_id,
392 .can_attach = freezer_can_attach,
393 .attach = NULL,
394 .fork = freezer_fork,
395 .exit = NULL,