2 * cgroup_freezer.c - control group freezer subsystem
4 * Copyright IBM Corporation, 2007
6 * Author : Cedric Le Goater <clg@fr.ibm.com>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2.1 of the GNU Lesser General Public License
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it would be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/cgroup.h>
21 #include <linux/uaccess.h>
22 #include <linux/freezer.h>
23 #include <linux/seq_file.h>
32 struct cgroup_subsys_state css
;
33 enum freezer_state state
;
34 spinlock_t lock
; /* protects _writes_ to state */
37 static inline struct freezer
*cgroup_freezer(
38 struct cgroup
*cgroup
)
41 cgroup_subsys_state(cgroup
, freezer_subsys_id
),
45 static inline struct freezer
*task_freezer(struct task_struct
*task
)
47 return container_of(task_subsys_state(task
, freezer_subsys_id
),
51 int cgroup_freezing_or_frozen(struct task_struct
*task
)
53 struct freezer
*freezer
;
54 enum freezer_state state
;
57 freezer
= task_freezer(task
);
58 if (!freezer
->css
.cgroup
->parent
)
59 state
= CGROUP_THAWED
; /* root cgroup can't be frozen */
61 state
= freezer
->state
;
64 return (state
== CGROUP_FREEZING
) || (state
== CGROUP_FROZEN
);
68 * cgroups_write_string() limits the size of freezer state strings to
69 * CGROUP_LOCAL_BUFFER_SIZE
71 static const char *freezer_state_strs
[] = {
79 * Transitions are caused by userspace writes to the freezer.state file.
80 * The values in parenthesis are state labels. The rest are edge labels.
82 * (THAWED) --FROZEN--> (FREEZING) --FROZEN--> (FROZEN)
84 * | \_______THAWED_______/ |
85 * \__________________________THAWED____________/
88 struct cgroup_subsys freezer_subsys
;
90 /* Locks taken and their ordering
91 * ------------------------------
92 * cgroup_mutex (AKA cgroup_lock)
95 * task->alloc_lock (AKA task_lock)
96 * task->sighand->siglock
98 * cgroup code forces css_set_lock to be taken before task->alloc_lock
100 * freezer_create(), freezer_destroy():
101 * cgroup_mutex [ by cgroup core ]
103 * freezer_can_attach():
104 * cgroup_mutex (held by caller of can_attach)
106 * cgroup_freezing_or_frozen():
107 * task->alloc_lock (to get task's cgroup)
109 * freezer_fork() (preserving fork() performance means can't take cgroup_mutex):
111 * sighand->siglock (if the cgroup is freezing)
116 * write_lock css_set_lock (cgroup iterator start)
118 * read_lock css_set_lock (cgroup iterator start)
120 * freezer_write() (freeze):
123 * write_lock css_set_lock (cgroup iterator start)
125 * read_lock css_set_lock (cgroup iterator start)
126 * sighand->siglock (fake signal delivery inside freeze_task())
128 * freezer_write() (unfreeze):
131 * write_lock css_set_lock (cgroup iterator start)
133 * read_lock css_set_lock (cgroup iterator start)
134 * task->alloc_lock (inside thaw_process(), prevents race with refrigerator())
137 static struct cgroup_subsys_state
*freezer_create(struct cgroup_subsys
*ss
,
138 struct cgroup
*cgroup
)
140 struct freezer
*freezer
;
142 freezer
= kzalloc(sizeof(struct freezer
), GFP_KERNEL
);
144 return ERR_PTR(-ENOMEM
);
146 spin_lock_init(&freezer
->lock
);
147 freezer
->state
= CGROUP_THAWED
;
148 return &freezer
->css
;
151 static void freezer_destroy(struct cgroup_subsys
*ss
,
152 struct cgroup
*cgroup
)
154 kfree(cgroup_freezer(cgroup
));
157 /* Task is frozen or will freeze immediately when next it gets woken */
158 static bool is_task_frozen_enough(struct task_struct
*task
)
160 return frozen(task
) ||
161 (task_is_stopped_or_traced(task
) && freezing(task
));
165 * The call to cgroup_lock() in the freezer.state write method prevents
166 * a write to that file racing against an attach, and hence the
167 * can_attach() result will remain valid until the attach completes.
169 static int freezer_can_attach(struct cgroup_subsys
*ss
,
170 struct cgroup
*new_cgroup
,
171 struct task_struct
*task
, bool threadgroup
)
173 struct freezer
*freezer
;
176 * Anything frozen can't move or be moved to/from.
178 * Since orig_freezer->state == FROZEN means that @task has been
179 * frozen, so it's sufficient to check the latter condition.
182 if (is_task_frozen_enough(task
))
185 freezer
= cgroup_freezer(new_cgroup
);
186 if (freezer
->state
== CGROUP_FROZEN
)
190 struct task_struct
*c
;
193 list_for_each_entry_rcu(c
, &task
->thread_group
, thread_group
) {
194 if (is_task_frozen_enough(c
)) {
205 static void freezer_fork(struct cgroup_subsys
*ss
, struct task_struct
*task
)
207 struct freezer
*freezer
;
210 * No lock is needed, since the task isn't on tasklist yet,
211 * so it can't be moved to another cgroup, which means the
212 * freezer won't be removed and will be valid during this
213 * function call. Nevertheless, apply RCU read-side critical
214 * section to suppress RCU lockdep false positives.
217 freezer
= task_freezer(task
);
221 * The root cgroup is non-freezable, so we can skip the
224 if (!freezer
->css
.cgroup
->parent
)
227 spin_lock_irq(&freezer
->lock
);
228 BUG_ON(freezer
->state
== CGROUP_FROZEN
);
230 /* Locking avoids race with FREEZING -> THAWED transitions. */
231 if (freezer
->state
== CGROUP_FREEZING
)
232 freeze_task(task
, true);
233 spin_unlock_irq(&freezer
->lock
);
237 * caller must hold freezer->lock
239 static void update_freezer_state(struct cgroup
*cgroup
,
240 struct freezer
*freezer
)
242 struct cgroup_iter it
;
243 struct task_struct
*task
;
244 unsigned int nfrozen
= 0, ntotal
= 0;
246 cgroup_iter_start(cgroup
, &it
);
247 while ((task
= cgroup_iter_next(cgroup
, &it
))) {
249 if (is_task_frozen_enough(task
))
254 * Transition to FROZEN when no new tasks can be added ensures
255 * that we never exist in the FROZEN state while there are unfrozen
258 if (nfrozen
== ntotal
)
259 freezer
->state
= CGROUP_FROZEN
;
260 else if (nfrozen
> 0)
261 freezer
->state
= CGROUP_FREEZING
;
263 freezer
->state
= CGROUP_THAWED
;
264 cgroup_iter_end(cgroup
, &it
);
267 static int freezer_read(struct cgroup
*cgroup
, struct cftype
*cft
,
270 struct freezer
*freezer
;
271 enum freezer_state state
;
273 if (!cgroup_lock_live_group(cgroup
))
276 freezer
= cgroup_freezer(cgroup
);
277 spin_lock_irq(&freezer
->lock
);
278 state
= freezer
->state
;
279 if (state
== CGROUP_FREEZING
) {
280 /* We change from FREEZING to FROZEN lazily if the cgroup was
281 * only partially frozen when we exitted write. */
282 update_freezer_state(cgroup
, freezer
);
283 state
= freezer
->state
;
285 spin_unlock_irq(&freezer
->lock
);
288 seq_puts(m
, freezer_state_strs
[state
]);
293 static int try_to_freeze_cgroup(struct cgroup
*cgroup
, struct freezer
*freezer
)
295 struct cgroup_iter it
;
296 struct task_struct
*task
;
297 unsigned int num_cant_freeze_now
= 0;
299 freezer
->state
= CGROUP_FREEZING
;
300 cgroup_iter_start(cgroup
, &it
);
301 while ((task
= cgroup_iter_next(cgroup
, &it
))) {
302 if (!freeze_task(task
, true))
304 if (is_task_frozen_enough(task
))
306 if (!freezing(task
) && !freezer_should_skip(task
))
307 num_cant_freeze_now
++;
309 cgroup_iter_end(cgroup
, &it
);
311 return num_cant_freeze_now
? -EBUSY
: 0;
314 static void unfreeze_cgroup(struct cgroup
*cgroup
, struct freezer
*freezer
)
316 struct cgroup_iter it
;
317 struct task_struct
*task
;
319 cgroup_iter_start(cgroup
, &it
);
320 while ((task
= cgroup_iter_next(cgroup
, &it
))) {
323 cgroup_iter_end(cgroup
, &it
);
325 freezer
->state
= CGROUP_THAWED
;
328 static int freezer_change_state(struct cgroup
*cgroup
,
329 enum freezer_state goal_state
)
331 struct freezer
*freezer
;
334 freezer
= cgroup_freezer(cgroup
);
336 spin_lock_irq(&freezer
->lock
);
338 update_freezer_state(cgroup
, freezer
);
339 if (goal_state
== freezer
->state
)
342 switch (goal_state
) {
344 unfreeze_cgroup(cgroup
, freezer
);
347 retval
= try_to_freeze_cgroup(cgroup
, freezer
);
353 spin_unlock_irq(&freezer
->lock
);
358 static int freezer_write(struct cgroup
*cgroup
,
363 enum freezer_state goal_state
;
365 if (strcmp(buffer
, freezer_state_strs
[CGROUP_THAWED
]) == 0)
366 goal_state
= CGROUP_THAWED
;
367 else if (strcmp(buffer
, freezer_state_strs
[CGROUP_FROZEN
]) == 0)
368 goal_state
= CGROUP_FROZEN
;
372 if (!cgroup_lock_live_group(cgroup
))
374 retval
= freezer_change_state(cgroup
, goal_state
);
379 static struct cftype files
[] = {
382 .read_seq_string
= freezer_read
,
383 .write_string
= freezer_write
,
387 static int freezer_populate(struct cgroup_subsys
*ss
, struct cgroup
*cgroup
)
391 return cgroup_add_files(cgroup
, ss
, files
, ARRAY_SIZE(files
));
394 struct cgroup_subsys freezer_subsys
= {
396 .create
= freezer_create
,
397 .destroy
= freezer_destroy
,
398 .populate
= freezer_populate
,
399 .subsys_id
= freezer_subsys_id
,
400 .can_attach
= freezer_can_attach
,
402 .fork
= freezer_fork
,