2 * cgroup_freezer.c - control group freezer subsystem
4 * Copyright IBM Corporation, 2007
6 * Author : Cedric Le Goater <clg@fr.ibm.com>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2.1 of the GNU Lesser General Public License
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it would be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
17 #include <linux/module.h>
18 #include <linux/cgroup.h>
20 #include <linux/uaccess.h>
21 #include <linux/freezer.h>
22 #include <linux/seq_file.h>
31 struct cgroup_subsys_state css
;
32 enum freezer_state state
;
33 spinlock_t lock
; /* protects _writes_ to state */
36 static inline struct freezer
*cgroup_freezer(
37 struct cgroup
*cgroup
)
40 cgroup_subsys_state(cgroup
, freezer_subsys_id
),
44 static inline struct freezer
*task_freezer(struct task_struct
*task
)
46 return container_of(task_subsys_state(task
, freezer_subsys_id
),
50 int cgroup_frozen(struct task_struct
*task
)
52 struct freezer
*freezer
;
53 enum freezer_state state
;
56 freezer
= task_freezer(task
);
57 state
= freezer
->state
;
60 return state
== CGROUP_FROZEN
;
64 * cgroups_write_string() limits the size of freezer state strings to
65 * CGROUP_LOCAL_BUFFER_SIZE
67 static const char *freezer_state_strs
[] = {
75 * Transitions are caused by userspace writes to the freezer.state file.
76 * The values in parenthesis are state labels. The rest are edge labels.
78 * (THAWED) --FROZEN--> (FREEZING) --FROZEN--> (FROZEN)
80 * | \_______THAWED_______/ |
81 * \__________________________THAWED____________/
84 struct cgroup_subsys freezer_subsys
;
86 /* Locks taken and their ordering
87 * ------------------------------
89 * cgroup_mutex (AKA cgroup_lock)
90 * task->alloc_lock (AKA task_lock)
92 * task->sighand->siglock
94 * cgroup code forces css_set_lock to be taken before task->alloc_lock
96 * freezer_create(), freezer_destroy():
97 * cgroup_mutex [ by cgroup core ]
103 * task->alloc_lock (to get task's cgroup)
105 * freezer_fork() (preserving fork() performance means can't take cgroup_mutex):
106 * task->alloc_lock (to get task's cgroup)
108 * sighand->siglock (if the cgroup is freezing)
113 * read_lock css_set_lock (cgroup iterator start)
115 * freezer_write() (freeze):
118 * read_lock css_set_lock (cgroup iterator start)
121 * freezer_write() (unfreeze):
124 * read_lock css_set_lock (cgroup iterator start)
125 * task->alloc_lock (to prevent races with freeze_task())
128 static struct cgroup_subsys_state
*freezer_create(struct cgroup_subsys
*ss
,
129 struct cgroup
*cgroup
)
131 struct freezer
*freezer
;
133 freezer
= kzalloc(sizeof(struct freezer
), GFP_KERNEL
);
135 return ERR_PTR(-ENOMEM
);
137 spin_lock_init(&freezer
->lock
);
138 freezer
->state
= CGROUP_THAWED
;
139 return &freezer
->css
;
142 static void freezer_destroy(struct cgroup_subsys
*ss
,
143 struct cgroup
*cgroup
)
145 kfree(cgroup_freezer(cgroup
));
148 /* Task is frozen or will freeze immediately when next it gets woken */
149 static bool is_task_frozen_enough(struct task_struct
*task
)
151 return frozen(task
) ||
152 (task_is_stopped_or_traced(task
) && freezing(task
));
156 * The call to cgroup_lock() in the freezer.state write method prevents
157 * a write to that file racing against an attach, and hence the
158 * can_attach() result will remain valid until the attach completes.
160 static int freezer_can_attach(struct cgroup_subsys
*ss
,
161 struct cgroup
*new_cgroup
,
162 struct task_struct
*task
)
164 struct freezer
*freezer
;
167 * Anything frozen can't move or be moved to/from.
169 * Since orig_freezer->state == FROZEN means that @task has been
170 * frozen, so it's sufficient to check the latter condition.
173 if (is_task_frozen_enough(task
))
176 freezer
= cgroup_freezer(new_cgroup
);
177 if (freezer
->state
== CGROUP_FROZEN
)
183 static void freezer_fork(struct cgroup_subsys
*ss
, struct task_struct
*task
)
185 struct freezer
*freezer
;
188 freezer
= task_freezer(task
);
191 spin_lock_irq(&freezer
->lock
);
192 BUG_ON(freezer
->state
== CGROUP_FROZEN
);
194 /* Locking avoids race with FREEZING -> THAWED transitions. */
195 if (freezer
->state
== CGROUP_FREEZING
)
196 freeze_task(task
, true);
197 spin_unlock_irq(&freezer
->lock
);
201 * caller must hold freezer->lock
203 static void update_freezer_state(struct cgroup
*cgroup
,
204 struct freezer
*freezer
)
206 struct cgroup_iter it
;
207 struct task_struct
*task
;
208 unsigned int nfrozen
= 0, ntotal
= 0;
210 cgroup_iter_start(cgroup
, &it
);
211 while ((task
= cgroup_iter_next(cgroup
, &it
))) {
213 if (is_task_frozen_enough(task
))
218 * Transition to FROZEN when no new tasks can be added ensures
219 * that we never exist in the FROZEN state while there are unfrozen
222 if (nfrozen
== ntotal
)
223 freezer
->state
= CGROUP_FROZEN
;
224 else if (nfrozen
> 0)
225 freezer
->state
= CGROUP_FREEZING
;
227 freezer
->state
= CGROUP_THAWED
;
228 cgroup_iter_end(cgroup
, &it
);
231 static int freezer_read(struct cgroup
*cgroup
, struct cftype
*cft
,
234 struct freezer
*freezer
;
235 enum freezer_state state
;
237 if (!cgroup_lock_live_group(cgroup
))
240 freezer
= cgroup_freezer(cgroup
);
241 spin_lock_irq(&freezer
->lock
);
242 state
= freezer
->state
;
243 if (state
== CGROUP_FREEZING
) {
244 /* We change from FREEZING to FROZEN lazily if the cgroup was
245 * only partially frozen when we exitted write. */
246 update_freezer_state(cgroup
, freezer
);
247 state
= freezer
->state
;
249 spin_unlock_irq(&freezer
->lock
);
252 seq_puts(m
, freezer_state_strs
[state
]);
257 static int try_to_freeze_cgroup(struct cgroup
*cgroup
, struct freezer
*freezer
)
259 struct cgroup_iter it
;
260 struct task_struct
*task
;
261 unsigned int num_cant_freeze_now
= 0;
263 freezer
->state
= CGROUP_FREEZING
;
264 cgroup_iter_start(cgroup
, &it
);
265 while ((task
= cgroup_iter_next(cgroup
, &it
))) {
266 if (!freeze_task(task
, true))
268 if (is_task_frozen_enough(task
))
270 if (!freezing(task
) && !freezer_should_skip(task
))
271 num_cant_freeze_now
++;
273 cgroup_iter_end(cgroup
, &it
);
275 return num_cant_freeze_now
? -EBUSY
: 0;
278 static void unfreeze_cgroup(struct cgroup
*cgroup
, struct freezer
*freezer
)
280 struct cgroup_iter it
;
281 struct task_struct
*task
;
283 cgroup_iter_start(cgroup
, &it
);
284 while ((task
= cgroup_iter_next(cgroup
, &it
))) {
287 cgroup_iter_end(cgroup
, &it
);
289 freezer
->state
= CGROUP_THAWED
;
292 static int freezer_change_state(struct cgroup
*cgroup
,
293 enum freezer_state goal_state
)
295 struct freezer
*freezer
;
298 freezer
= cgroup_freezer(cgroup
);
300 spin_lock_irq(&freezer
->lock
);
302 update_freezer_state(cgroup
, freezer
);
303 if (goal_state
== freezer
->state
)
306 switch (goal_state
) {
308 unfreeze_cgroup(cgroup
, freezer
);
311 retval
= try_to_freeze_cgroup(cgroup
, freezer
);
317 spin_unlock_irq(&freezer
->lock
);
322 static int freezer_write(struct cgroup
*cgroup
,
327 enum freezer_state goal_state
;
329 if (strcmp(buffer
, freezer_state_strs
[CGROUP_THAWED
]) == 0)
330 goal_state
= CGROUP_THAWED
;
331 else if (strcmp(buffer
, freezer_state_strs
[CGROUP_FROZEN
]) == 0)
332 goal_state
= CGROUP_FROZEN
;
336 if (!cgroup_lock_live_group(cgroup
))
338 retval
= freezer_change_state(cgroup
, goal_state
);
343 static struct cftype files
[] = {
346 .read_seq_string
= freezer_read
,
347 .write_string
= freezer_write
,
351 static int freezer_populate(struct cgroup_subsys
*ss
, struct cgroup
*cgroup
)
353 return cgroup_add_files(cgroup
, ss
, files
, ARRAY_SIZE(files
));
356 struct cgroup_subsys freezer_subsys
= {
358 .create
= freezer_create
,
359 .destroy
= freezer_destroy
,
360 .populate
= freezer_populate
,
361 .subsys_id
= freezer_subsys_id
,
362 .can_attach
= freezer_can_attach
,
364 .fork
= freezer_fork
,