4 * Processor and Memory placement constraints for sets of tasks.
6 * Copyright (C) 2003 BULL SA.
7 * Copyright (C) 2004-2007 Silicon Graphics, Inc.
8 * Copyright (C) 2006 Google, Inc
10 * Portions derived from Patrick Mochel's sysfs code.
11 * sysfs is Copyright (c) 2001-3 Patrick Mochel
13 * 2003-10-10 Written by Simon Derr.
14 * 2003-10-22 Updates by Stephen Hemminger.
15 * 2004 May-July Rework by Paul Jackson.
16 * 2006 Rework by Paul Menage to use generic cgroups
18 * This file is subject to the terms and conditions of the GNU General Public
19 * License. See the file COPYING in the main directory of the Linux
20 * distribution for more details.
23 #include <linux/cpu.h>
24 #include <linux/cpumask.h>
25 #include <linux/cpuset.h>
26 #include <linux/err.h>
27 #include <linux/errno.h>
28 #include <linux/file.h>
30 #include <linux/init.h>
31 #include <linux/interrupt.h>
32 #include <linux/kernel.h>
33 #include <linux/kmod.h>
34 #include <linux/list.h>
35 #include <linux/mempolicy.h>
37 #include <linux/module.h>
38 #include <linux/mount.h>
39 #include <linux/namei.h>
40 #include <linux/pagemap.h>
41 #include <linux/proc_fs.h>
42 #include <linux/rcupdate.h>
43 #include <linux/sched.h>
44 #include <linux/seq_file.h>
45 #include <linux/security.h>
46 #include <linux/slab.h>
47 #include <linux/spinlock.h>
48 #include <linux/stat.h>
49 #include <linux/string.h>
50 #include <linux/time.h>
51 #include <linux/backing-dev.h>
52 #include <linux/sort.h>
54 #include <asm/uaccess.h>
55 #include <asm/atomic.h>
56 #include <linux/mutex.h>
57 #include <linux/kfifo.h>
60 * Tracks how many cpusets are currently defined in system.
61 * When there is only one cpuset (the root cpuset) we can
62 * short circuit some hooks.
64 int number_of_cpusets __read_mostly
;
66 /* Retrieve the cpuset from a cgroup */
67 struct cgroup_subsys cpuset_subsys
;
70 /* See "Frequency meter" comments, below. */
73 int cnt
; /* unprocessed events count */
74 int val
; /* most recent output value */
75 time_t time
; /* clock (secs) when val computed */
76 spinlock_t lock
; /* guards read or write of above */
80 struct cgroup_subsys_state css
;
82 unsigned long flags
; /* "unsigned long" so bitops work */
83 cpumask_t cpus_allowed
; /* CPUs allowed to tasks in cpuset */
84 nodemask_t mems_allowed
; /* Memory Nodes allowed to tasks */
86 struct cpuset
*parent
; /* my parent */
89 * Copy of global cpuset_mems_generation as of the most
90 * recent time this cpuset changed its mems_allowed.
94 struct fmeter fmeter
; /* memory_pressure filter */
96 /* partition number for rebuild_sched_domains() */
100 /* Retrieve the cpuset for a cgroup */
101 static inline struct cpuset
*cgroup_cs(struct cgroup
*cont
)
103 return container_of(cgroup_subsys_state(cont
, cpuset_subsys_id
),
107 /* Retrieve the cpuset for a task */
108 static inline struct cpuset
*task_cs(struct task_struct
*task
)
110 return container_of(task_subsys_state(task
, cpuset_subsys_id
),
115 /* bits in struct cpuset flags field */
120 CS_SCHED_LOAD_BALANCE
,
125 /* convenient tests for these bits */
126 static inline int is_cpu_exclusive(const struct cpuset
*cs
)
128 return test_bit(CS_CPU_EXCLUSIVE
, &cs
->flags
);
131 static inline int is_mem_exclusive(const struct cpuset
*cs
)
133 return test_bit(CS_MEM_EXCLUSIVE
, &cs
->flags
);
136 static inline int is_sched_load_balance(const struct cpuset
*cs
)
138 return test_bit(CS_SCHED_LOAD_BALANCE
, &cs
->flags
);
141 static inline int is_memory_migrate(const struct cpuset
*cs
)
143 return test_bit(CS_MEMORY_MIGRATE
, &cs
->flags
);
146 static inline int is_spread_page(const struct cpuset
*cs
)
148 return test_bit(CS_SPREAD_PAGE
, &cs
->flags
);
151 static inline int is_spread_slab(const struct cpuset
*cs
)
153 return test_bit(CS_SPREAD_SLAB
, &cs
->flags
);
157 * Increment this integer everytime any cpuset changes its
158 * mems_allowed value. Users of cpusets can track this generation
159 * number, and avoid having to lock and reload mems_allowed unless
160 * the cpuset they're using changes generation.
162 * A single, global generation is needed because attach_task() could
163 * reattach a task to a different cpuset, which must not have its
164 * generation numbers aliased with those of that tasks previous cpuset.
166 * Generations are needed for mems_allowed because one task cannot
167 * modify anothers memory placement. So we must enable every task,
168 * on every visit to __alloc_pages(), to efficiently check whether
169 * its current->cpuset->mems_allowed has changed, requiring an update
170 * of its current->mems_allowed.
172 * Since cpuset_mems_generation is guarded by manage_mutex,
173 * there is no need to mark it atomic.
175 static int cpuset_mems_generation
;
177 static struct cpuset top_cpuset
= {
178 .flags
= ((1 << CS_CPU_EXCLUSIVE
) | (1 << CS_MEM_EXCLUSIVE
)),
179 .cpus_allowed
= CPU_MASK_ALL
,
180 .mems_allowed
= NODE_MASK_ALL
,
184 * We have two global cpuset mutexes below. They can nest.
185 * It is ok to first take manage_mutex, then nest callback_mutex. We also
186 * require taking task_lock() when dereferencing a tasks cpuset pointer.
187 * See "The task_lock() exception", at the end of this comment.
189 * A task must hold both mutexes to modify cpusets. If a task
190 * holds manage_mutex, then it blocks others wanting that mutex,
191 * ensuring that it is the only task able to also acquire callback_mutex
192 * and be able to modify cpusets. It can perform various checks on
193 * the cpuset structure first, knowing nothing will change. It can
194 * also allocate memory while just holding manage_mutex. While it is
195 * performing these checks, various callback routines can briefly
196 * acquire callback_mutex to query cpusets. Once it is ready to make
197 * the changes, it takes callback_mutex, blocking everyone else.
199 * Calls to the kernel memory allocator can not be made while holding
200 * callback_mutex, as that would risk double tripping on callback_mutex
201 * from one of the callbacks into the cpuset code from within
204 * If a task is only holding callback_mutex, then it has read-only
207 * The task_struct fields mems_allowed and mems_generation may only
208 * be accessed in the context of that task, so require no locks.
210 * Any task can increment and decrement the count field without lock.
211 * So in general, code holding manage_mutex or callback_mutex can't rely
212 * on the count field not changing. However, if the count goes to
213 * zero, then only attach_task(), which holds both mutexes, can
214 * increment it again. Because a count of zero means that no tasks
215 * are currently attached, therefore there is no way a task attached
216 * to that cpuset can fork (the other way to increment the count).
217 * So code holding manage_mutex or callback_mutex can safely assume that
218 * if the count is zero, it will stay zero. Similarly, if a task
219 * holds manage_mutex or callback_mutex on a cpuset with zero count, it
220 * knows that the cpuset won't be removed, as cpuset_rmdir() needs
221 * both of those mutexes.
223 * The cpuset_common_file_write handler for operations that modify
224 * the cpuset hierarchy holds manage_mutex across the entire operation,
225 * single threading all such cpuset modifications across the system.
227 * The cpuset_common_file_read() handlers only hold callback_mutex across
228 * small pieces of code, such as when reading out possibly multi-word
229 * cpumasks and nodemasks.
231 * The fork and exit callbacks cpuset_fork() and cpuset_exit(), don't
232 * (usually) take either mutex. These are the two most performance
233 * critical pieces of code here. The exception occurs on cpuset_exit(),
234 * when a task in a notify_on_release cpuset exits. Then manage_mutex
235 * is taken, and if the cpuset count is zero, a usermode call made
236 * to /sbin/cpuset_release_agent with the name of the cpuset (path
237 * relative to the root of cpuset file system) as the argument.
239 * A cpuset can only be deleted if both its 'count' of using tasks
240 * is zero, and its list of 'children' cpusets is empty. Since all
241 * tasks in the system use _some_ cpuset, and since there is always at
242 * least one task in the system (init), therefore, top_cpuset
243 * always has either children cpusets and/or using tasks. So we don't
244 * need a special hack to ensure that top_cpuset cannot be deleted.
246 * The above "Tale of Two Semaphores" would be complete, but for:
248 * The task_lock() exception
250 * The need for this exception arises from the action of attach_task(),
251 * which overwrites one tasks cpuset pointer with another. It does
252 * so using both mutexes, however there are several performance
253 * critical places that need to reference task->cpuset without the
254 * expense of grabbing a system global mutex. Therefore except as
255 * noted below, when dereferencing or, as in attach_task(), modifying
256 * a tasks cpuset pointer we use task_lock(), which acts on a spinlock
257 * (task->alloc_lock) already in the task_struct routinely used for
260 * P.S. One more locking exception. RCU is used to guard the
261 * update of a tasks cpuset pointer by attach_task() and the
262 * access of task->cpuset->mems_generation via that pointer in
263 * the routine cpuset_update_task_memory_state().
266 static DEFINE_MUTEX(callback_mutex
);
268 /* This is ugly, but preserves the userspace API for existing cpuset
269 * users. If someone tries to mount the "cpuset" filesystem, we
270 * silently switch it to mount "cgroup" instead */
271 static int cpuset_get_sb(struct file_system_type
*fs_type
,
272 int flags
, const char *unused_dev_name
,
273 void *data
, struct vfsmount
*mnt
)
275 struct file_system_type
*cgroup_fs
= get_fs_type("cgroup");
280 "release_agent=/sbin/cpuset_release_agent";
281 ret
= cgroup_fs
->get_sb(cgroup_fs
, flags
,
282 unused_dev_name
, mountopts
, mnt
);
283 put_filesystem(cgroup_fs
);
288 static struct file_system_type cpuset_fs_type
= {
290 .get_sb
= cpuset_get_sb
,
294 * Return in *pmask the portion of a cpusets's cpus_allowed that
295 * are online. If none are online, walk up the cpuset hierarchy
296 * until we find one that does have some online cpus. If we get
297 * all the way to the top and still haven't found any online cpus,
298 * return cpu_online_map. Or if passed a NULL cs from an exit'ing
299 * task, return cpu_online_map.
301 * One way or another, we guarantee to return some non-empty subset
304 * Call with callback_mutex held.
307 static void guarantee_online_cpus(const struct cpuset
*cs
, cpumask_t
*pmask
)
309 while (cs
&& !cpus_intersects(cs
->cpus_allowed
, cpu_online_map
))
312 cpus_and(*pmask
, cs
->cpus_allowed
, cpu_online_map
);
314 *pmask
= cpu_online_map
;
315 BUG_ON(!cpus_intersects(*pmask
, cpu_online_map
));
319 * Return in *pmask the portion of a cpusets's mems_allowed that
320 * are online, with memory. If none are online with memory, walk
321 * up the cpuset hierarchy until we find one that does have some
322 * online mems. If we get all the way to the top and still haven't
323 * found any online mems, return node_states[N_HIGH_MEMORY].
325 * One way or another, we guarantee to return some non-empty subset
326 * of node_states[N_HIGH_MEMORY].
328 * Call with callback_mutex held.
331 static void guarantee_online_mems(const struct cpuset
*cs
, nodemask_t
*pmask
)
333 while (cs
&& !nodes_intersects(cs
->mems_allowed
,
334 node_states
[N_HIGH_MEMORY
]))
337 nodes_and(*pmask
, cs
->mems_allowed
,
338 node_states
[N_HIGH_MEMORY
]);
340 *pmask
= node_states
[N_HIGH_MEMORY
];
341 BUG_ON(!nodes_intersects(*pmask
, node_states
[N_HIGH_MEMORY
]));
345 * cpuset_update_task_memory_state - update task memory placement
347 * If the current tasks cpusets mems_allowed changed behind our
348 * backs, update current->mems_allowed, mems_generation and task NUMA
349 * mempolicy to the new value.
351 * Task mempolicy is updated by rebinding it relative to the
352 * current->cpuset if a task has its memory placement changed.
353 * Do not call this routine if in_interrupt().
355 * Call without callback_mutex or task_lock() held. May be
356 * called with or without manage_mutex held. Thanks in part to
357 * 'the_top_cpuset_hack', the tasks cpuset pointer will never
358 * be NULL. This routine also might acquire callback_mutex and
359 * current->mm->mmap_sem during call.
361 * Reading current->cpuset->mems_generation doesn't need task_lock
362 * to guard the current->cpuset derefence, because it is guarded
363 * from concurrent freeing of current->cpuset by attach_task(),
366 * The rcu_dereference() is technically probably not needed,
367 * as I don't actually mind if I see a new cpuset pointer but
368 * an old value of mems_generation. However this really only
369 * matters on alpha systems using cpusets heavily. If I dropped
370 * that rcu_dereference(), it would save them a memory barrier.
371 * For all other arch's, rcu_dereference is a no-op anyway, and for
372 * alpha systems not using cpusets, another planned optimization,
373 * avoiding the rcu critical section for tasks in the root cpuset
374 * which is statically allocated, so can't vanish, will make this
375 * irrelevant. Better to use RCU as intended, than to engage in
376 * some cute trick to save a memory barrier that is impossible to
377 * test, for alpha systems using cpusets heavily, which might not
380 * This routine is needed to update the per-task mems_allowed data,
381 * within the tasks context, when it is trying to allocate memory
382 * (in various mm/mempolicy.c routines) and notices that some other
383 * task has been modifying its cpuset.
386 void cpuset_update_task_memory_state(void)
388 int my_cpusets_mem_gen
;
389 struct task_struct
*tsk
= current
;
392 if (task_cs(tsk
) == &top_cpuset
) {
393 /* Don't need rcu for top_cpuset. It's never freed. */
394 my_cpusets_mem_gen
= top_cpuset
.mems_generation
;
397 my_cpusets_mem_gen
= task_cs(current
)->mems_generation
;
401 if (my_cpusets_mem_gen
!= tsk
->cpuset_mems_generation
) {
402 mutex_lock(&callback_mutex
);
404 cs
= task_cs(tsk
); /* Maybe changed when task not locked */
405 guarantee_online_mems(cs
, &tsk
->mems_allowed
);
406 tsk
->cpuset_mems_generation
= cs
->mems_generation
;
407 if (is_spread_page(cs
))
408 tsk
->flags
|= PF_SPREAD_PAGE
;
410 tsk
->flags
&= ~PF_SPREAD_PAGE
;
411 if (is_spread_slab(cs
))
412 tsk
->flags
|= PF_SPREAD_SLAB
;
414 tsk
->flags
&= ~PF_SPREAD_SLAB
;
416 mutex_unlock(&callback_mutex
);
417 mpol_rebind_task(tsk
, &tsk
->mems_allowed
);
422 * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q?
424 * One cpuset is a subset of another if all its allowed CPUs and
425 * Memory Nodes are a subset of the other, and its exclusive flags
426 * are only set if the other's are set. Call holding manage_mutex.
429 static int is_cpuset_subset(const struct cpuset
*p
, const struct cpuset
*q
)
431 return cpus_subset(p
->cpus_allowed
, q
->cpus_allowed
) &&
432 nodes_subset(p
->mems_allowed
, q
->mems_allowed
) &&
433 is_cpu_exclusive(p
) <= is_cpu_exclusive(q
) &&
434 is_mem_exclusive(p
) <= is_mem_exclusive(q
);
438 * validate_change() - Used to validate that any proposed cpuset change
439 * follows the structural rules for cpusets.
441 * If we replaced the flag and mask values of the current cpuset
442 * (cur) with those values in the trial cpuset (trial), would
443 * our various subset and exclusive rules still be valid? Presumes
446 * 'cur' is the address of an actual, in-use cpuset. Operations
447 * such as list traversal that depend on the actual address of the
448 * cpuset in the list must use cur below, not trial.
450 * 'trial' is the address of bulk structure copy of cur, with
451 * perhaps one or more of the fields cpus_allowed, mems_allowed,
452 * or flags changed to new, trial values.
454 * Return 0 if valid, -errno if not.
457 static int validate_change(const struct cpuset
*cur
, const struct cpuset
*trial
)
460 struct cpuset
*c
, *par
;
462 /* Each of our child cpusets must be a subset of us */
463 list_for_each_entry(cont
, &cur
->css
.cgroup
->children
, sibling
) {
464 if (!is_cpuset_subset(cgroup_cs(cont
), trial
))
468 /* Remaining checks don't apply to root cpuset */
469 if (cur
== &top_cpuset
)
474 /* We must be a subset of our parent cpuset */
475 if (!is_cpuset_subset(trial
, par
))
478 /* If either I or some sibling (!= me) is exclusive, we can't overlap */
479 list_for_each_entry(cont
, &par
->css
.cgroup
->children
, sibling
) {
481 if ((is_cpu_exclusive(trial
) || is_cpu_exclusive(c
)) &&
483 cpus_intersects(trial
->cpus_allowed
, c
->cpus_allowed
))
485 if ((is_mem_exclusive(trial
) || is_mem_exclusive(c
)) &&
487 nodes_intersects(trial
->mems_allowed
, c
->mems_allowed
))
491 /* Cpusets with tasks can't have empty cpus_allowed or mems_allowed */
492 if (cgroup_task_count(cur
->css
.cgroup
)) {
493 if (cpus_empty(trial
->cpus_allowed
) ||
494 nodes_empty(trial
->mems_allowed
)) {
503 * Helper routine for rebuild_sched_domains().
504 * Do cpusets a, b have overlapping cpus_allowed masks?
507 static int cpusets_overlap(struct cpuset
*a
, struct cpuset
*b
)
509 return cpus_intersects(a
->cpus_allowed
, b
->cpus_allowed
);
513 * rebuild_sched_domains()
515 * If the flag 'sched_load_balance' of any cpuset with non-empty
516 * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset
517 * which has that flag enabled, or if any cpuset with a non-empty
518 * 'cpus' is removed, then call this routine to rebuild the
519 * scheduler's dynamic sched domains.
521 * This routine builds a partial partition of the systems CPUs
522 * (the set of non-overlappping cpumask_t's in the array 'part'
523 * below), and passes that partial partition to the kernel/sched.c
524 * partition_sched_domains() routine, which will rebuild the
525 * schedulers load balancing domains (sched domains) as specified
526 * by that partial partition. A 'partial partition' is a set of
527 * non-overlapping subsets whose union is a subset of that set.
529 * See "What is sched_load_balance" in Documentation/cpusets.txt
530 * for a background explanation of this.
532 * Does not return errors, on the theory that the callers of this
533 * routine would rather not worry about failures to rebuild sched
534 * domains when operating in the severe memory shortage situations
535 * that could cause allocation failures below.
537 * Call with cgroup_mutex held. May take callback_mutex during
538 * call due to the kfifo_alloc() and kmalloc() calls. May nest
539 * a call to the lock_cpu_hotplug()/unlock_cpu_hotplug() pair.
540 * Must not be called holding callback_mutex, because we must not
541 * call lock_cpu_hotplug() while holding callback_mutex. Elsewhere
542 * the kernel nests callback_mutex inside lock_cpu_hotplug() calls.
543 * So the reverse nesting would risk an ABBA deadlock.
545 * The three key local variables below are:
546 * q - a kfifo queue of cpuset pointers, used to implement a
547 * top-down scan of all cpusets. This scan loads a pointer
548 * to each cpuset marked is_sched_load_balance into the
549 * array 'csa'. For our purposes, rebuilding the schedulers
550 * sched domains, we can ignore !is_sched_load_balance cpusets.
551 * csa - (for CpuSet Array) Array of pointers to all the cpusets
552 * that need to be load balanced, for convenient iterative
553 * access by the subsequent code that finds the best partition,
554 * i.e the set of domains (subsets) of CPUs such that the
555 * cpus_allowed of every cpuset marked is_sched_load_balance
556 * is a subset of one of these domains, while there are as
557 * many such domains as possible, each as small as possible.
558 * doms - Conversion of 'csa' to an array of cpumasks, for passing to
559 * the kernel/sched.c routine partition_sched_domains() in a
560 * convenient format, that can be easily compared to the prior
561 * value to determine what partition elements (sched domains)
562 * were changed (added or removed.)
564 * Finding the best partition (set of domains):
565 * The triple nested loops below over i, j, k scan over the
566 * load balanced cpusets (using the array of cpuset pointers in
567 * csa[]) looking for pairs of cpusets that have overlapping
568 * cpus_allowed, but which don't have the same 'pn' partition
569 * number and gives them in the same partition number. It keeps
570 * looping on the 'restart' label until it can no longer find
573 * The union of the cpus_allowed masks from the set of
574 * all cpusets having the same 'pn' value then form the one
575 * element of the partition (one sched domain) to be passed to
576 * partition_sched_domains().
579 static void rebuild_sched_domains(void)
581 struct kfifo
*q
; /* queue of cpusets to be scanned */
582 struct cpuset
*cp
; /* scans q */
583 struct cpuset
**csa
; /* array of all cpuset ptrs */
584 int csn
; /* how many cpuset ptrs in csa so far */
585 int i
, j
, k
; /* indices for partition finding loops */
586 cpumask_t
*doms
; /* resulting partition; i.e. sched domains */
587 int ndoms
; /* number of sched domains in result */
588 int nslot
; /* next empty doms[] cpumask_t slot */
594 /* Special case for the 99% of systems with one, full, sched domain */
595 if (is_sched_load_balance(&top_cpuset
)) {
597 doms
= kmalloc(sizeof(cpumask_t
), GFP_KERNEL
);
600 *doms
= top_cpuset
.cpus_allowed
;
604 q
= kfifo_alloc(number_of_cpusets
* sizeof(cp
), GFP_KERNEL
, NULL
);
607 csa
= kmalloc(number_of_cpusets
* sizeof(cp
), GFP_KERNEL
);
613 __kfifo_put(q
, (void *)&cp
, sizeof(cp
));
614 while (__kfifo_get(q
, (void *)&cp
, sizeof(cp
))) {
616 struct cpuset
*child
; /* scans child cpusets of cp */
617 if (is_sched_load_balance(cp
))
619 list_for_each_entry(cont
, &cp
->css
.cgroup
->children
, sibling
) {
620 child
= cgroup_cs(cont
);
621 __kfifo_put(q
, (void *)&child
, sizeof(cp
));
625 for (i
= 0; i
< csn
; i
++)
630 /* Find the best partition (set of sched domains) */
631 for (i
= 0; i
< csn
; i
++) {
632 struct cpuset
*a
= csa
[i
];
635 for (j
= 0; j
< csn
; j
++) {
636 struct cpuset
*b
= csa
[j
];
639 if (apn
!= bpn
&& cpusets_overlap(a
, b
)) {
640 for (k
= 0; k
< csn
; k
++) {
641 struct cpuset
*c
= csa
[k
];
646 ndoms
--; /* one less element */
652 /* Convert <csn, csa> to <ndoms, doms> */
653 doms
= kmalloc(ndoms
* sizeof(cpumask_t
), GFP_KERNEL
);
657 for (nslot
= 0, i
= 0; i
< csn
; i
++) {
658 struct cpuset
*a
= csa
[i
];
662 cpumask_t
*dp
= doms
+ nslot
;
664 if (nslot
== ndoms
) {
665 static int warnings
= 10;
668 "rebuild_sched_domains confused:"
669 " nslot %d, ndoms %d, csn %d, i %d,"
671 nslot
, ndoms
, csn
, i
, apn
);
678 for (j
= i
; j
< csn
; j
++) {
679 struct cpuset
*b
= csa
[j
];
682 cpus_or(*dp
, *dp
, b
->cpus_allowed
);
689 BUG_ON(nslot
!= ndoms
);
692 /* Have scheduler rebuild sched domains */
694 partition_sched_domains(ndoms
, doms
);
695 unlock_cpu_hotplug();
701 /* Don't kfree(doms) -- partition_sched_domains() does that. */
705 * Call with manage_mutex held. May take callback_mutex during call.
708 static int update_cpumask(struct cpuset
*cs
, char *buf
)
710 struct cpuset trialcs
;
712 int cpus_changed
, is_load_balanced
;
714 /* top_cpuset.cpus_allowed tracks cpu_online_map; it's read-only */
715 if (cs
== &top_cpuset
)
721 * An empty cpus_allowed is ok iff there are no tasks in the cpuset.
722 * Since cpulist_parse() fails on an empty mask, we special case
723 * that parsing. The validate_change() call ensures that cpusets
724 * with tasks have cpus.
728 cpus_clear(trialcs
.cpus_allowed
);
730 retval
= cpulist_parse(buf
, trialcs
.cpus_allowed
);
734 cpus_and(trialcs
.cpus_allowed
, trialcs
.cpus_allowed
, cpu_online_map
);
735 retval
= validate_change(cs
, &trialcs
);
739 cpus_changed
= !cpus_equal(cs
->cpus_allowed
, trialcs
.cpus_allowed
);
740 is_load_balanced
= is_sched_load_balance(&trialcs
);
742 mutex_lock(&callback_mutex
);
743 cs
->cpus_allowed
= trialcs
.cpus_allowed
;
744 mutex_unlock(&callback_mutex
);
746 if (cpus_changed
&& is_load_balanced
)
747 rebuild_sched_domains();
755 * Migrate memory region from one set of nodes to another.
757 * Temporarilly set tasks mems_allowed to target nodes of migration,
758 * so that the migration code can allocate pages on these nodes.
760 * Call holding manage_mutex, so our current->cpuset won't change
761 * during this call, as manage_mutex holds off any attach_task()
762 * calls. Therefore we don't need to take task_lock around the
763 * call to guarantee_online_mems(), as we know no one is changing
766 * Hold callback_mutex around the two modifications of our tasks
767 * mems_allowed to synchronize with cpuset_mems_allowed().
769 * While the mm_struct we are migrating is typically from some
770 * other task, the task_struct mems_allowed that we are hacking
771 * is for our current task, which must allocate new pages for that
772 * migrating memory region.
774 * We call cpuset_update_task_memory_state() before hacking
775 * our tasks mems_allowed, so that we are assured of being in
776 * sync with our tasks cpuset, and in particular, callbacks to
777 * cpuset_update_task_memory_state() from nested page allocations
778 * won't see any mismatch of our cpuset and task mems_generation
779 * values, so won't overwrite our hacked tasks mems_allowed
783 static void cpuset_migrate_mm(struct mm_struct
*mm
, const nodemask_t
*from
,
784 const nodemask_t
*to
)
786 struct task_struct
*tsk
= current
;
788 cpuset_update_task_memory_state();
790 mutex_lock(&callback_mutex
);
791 tsk
->mems_allowed
= *to
;
792 mutex_unlock(&callback_mutex
);
794 do_migrate_pages(mm
, from
, to
, MPOL_MF_MOVE_ALL
);
796 mutex_lock(&callback_mutex
);
797 guarantee_online_mems(task_cs(tsk
),&tsk
->mems_allowed
);
798 mutex_unlock(&callback_mutex
);
802 * Handle user request to change the 'mems' memory placement
803 * of a cpuset. Needs to validate the request, update the
804 * cpusets mems_allowed and mems_generation, and for each
805 * task in the cpuset, rebind any vma mempolicies and if
806 * the cpuset is marked 'memory_migrate', migrate the tasks
807 * pages to the new memory.
809 * Call with manage_mutex held. May take callback_mutex during call.
810 * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
811 * lock each such tasks mm->mmap_sem, scan its vma's and rebind
812 * their mempolicies to the cpusets new mems_allowed.
815 static void *cpuset_being_rebound
;
817 static int update_nodemask(struct cpuset
*cs
, char *buf
)
819 struct cpuset trialcs
;
821 struct task_struct
*p
;
822 struct mm_struct
**mmarray
;
827 struct cgroup_iter it
;
830 * top_cpuset.mems_allowed tracks node_stats[N_HIGH_MEMORY];
833 if (cs
== &top_cpuset
)
839 * An empty mems_allowed is ok iff there are no tasks in the cpuset.
840 * Since nodelist_parse() fails on an empty mask, we special case
841 * that parsing. The validate_change() call ensures that cpusets
842 * with tasks have memory.
846 nodes_clear(trialcs
.mems_allowed
);
848 retval
= nodelist_parse(buf
, trialcs
.mems_allowed
);
852 nodes_and(trialcs
.mems_allowed
, trialcs
.mems_allowed
,
853 node_states
[N_HIGH_MEMORY
]);
854 oldmem
= cs
->mems_allowed
;
855 if (nodes_equal(oldmem
, trialcs
.mems_allowed
)) {
856 retval
= 0; /* Too easy - nothing to do */
859 retval
= validate_change(cs
, &trialcs
);
863 mutex_lock(&callback_mutex
);
864 cs
->mems_allowed
= trialcs
.mems_allowed
;
865 cs
->mems_generation
= cpuset_mems_generation
++;
866 mutex_unlock(&callback_mutex
);
868 cpuset_being_rebound
= cs
; /* causes mpol_copy() rebind */
870 fudge
= 10; /* spare mmarray[] slots */
871 fudge
+= cpus_weight(cs
->cpus_allowed
); /* imagine one fork-bomb/cpu */
875 * Allocate mmarray[] to hold mm reference for each task
876 * in cpuset cs. Can't kmalloc GFP_KERNEL while holding
877 * tasklist_lock. We could use GFP_ATOMIC, but with a
878 * few more lines of code, we can retry until we get a big
879 * enough mmarray[] w/o using GFP_ATOMIC.
882 ntasks
= cgroup_task_count(cs
->css
.cgroup
); /* guess */
884 mmarray
= kmalloc(ntasks
* sizeof(*mmarray
), GFP_KERNEL
);
887 read_lock(&tasklist_lock
); /* block fork */
888 if (cgroup_task_count(cs
->css
.cgroup
) <= ntasks
)
889 break; /* got enough */
890 read_unlock(&tasklist_lock
); /* try again */
896 /* Load up mmarray[] with mm reference for each task in cpuset. */
897 cgroup_iter_start(cs
->css
.cgroup
, &it
);
898 while ((p
= cgroup_iter_next(cs
->css
.cgroup
, &it
))) {
899 struct mm_struct
*mm
;
903 "Cpuset mempolicy rebind incomplete.\n");
911 cgroup_iter_end(cs
->css
.cgroup
, &it
);
912 read_unlock(&tasklist_lock
);
915 * Now that we've dropped the tasklist spinlock, we can
916 * rebind the vma mempolicies of each mm in mmarray[] to their
917 * new cpuset, and release that mm. The mpol_rebind_mm()
918 * call takes mmap_sem, which we couldn't take while holding
919 * tasklist_lock. Forks can happen again now - the mpol_copy()
920 * cpuset_being_rebound check will catch such forks, and rebind
921 * their vma mempolicies too. Because we still hold the global
922 * cpuset manage_mutex, we know that no other rebind effort will
923 * be contending for the global variable cpuset_being_rebound.
924 * It's ok if we rebind the same mm twice; mpol_rebind_mm()
925 * is idempotent. Also migrate pages in each mm to new nodes.
927 migrate
= is_memory_migrate(cs
);
928 for (i
= 0; i
< n
; i
++) {
929 struct mm_struct
*mm
= mmarray
[i
];
931 mpol_rebind_mm(mm
, &cs
->mems_allowed
);
933 cpuset_migrate_mm(mm
, &oldmem
, &cs
->mems_allowed
);
937 /* We're done rebinding vma's to this cpusets new mems_allowed. */
939 cpuset_being_rebound
= NULL
;
945 int current_cpuset_is_being_rebound(void)
947 return task_cs(current
) == cpuset_being_rebound
;
951 * Call with manage_mutex held.
954 static int update_memory_pressure_enabled(struct cpuset
*cs
, char *buf
)
956 if (simple_strtoul(buf
, NULL
, 10) != 0)
957 cpuset_memory_pressure_enabled
= 1;
959 cpuset_memory_pressure_enabled
= 0;
964 * update_flag - read a 0 or a 1 in a file and update associated flag
965 * bit: the bit to update (CS_CPU_EXCLUSIVE, CS_MEM_EXCLUSIVE,
966 * CS_SCHED_LOAD_BALANCE,
967 * CS_NOTIFY_ON_RELEASE, CS_MEMORY_MIGRATE,
968 * CS_SPREAD_PAGE, CS_SPREAD_SLAB)
969 * cs: the cpuset to update
970 * buf: the buffer where we read the 0 or 1
972 * Call with manage_mutex held.
975 static int update_flag(cpuset_flagbits_t bit
, struct cpuset
*cs
, char *buf
)
978 struct cpuset trialcs
;
980 int cpus_nonempty
, balance_flag_changed
;
982 turning_on
= (simple_strtoul(buf
, NULL
, 10) != 0);
986 set_bit(bit
, &trialcs
.flags
);
988 clear_bit(bit
, &trialcs
.flags
);
990 err
= validate_change(cs
, &trialcs
);
994 cpus_nonempty
= !cpus_empty(trialcs
.cpus_allowed
);
995 balance_flag_changed
= (is_sched_load_balance(cs
) !=
996 is_sched_load_balance(&trialcs
));
998 mutex_lock(&callback_mutex
);
999 cs
->flags
= trialcs
.flags
;
1000 mutex_unlock(&callback_mutex
);
1002 if (cpus_nonempty
&& balance_flag_changed
)
1003 rebuild_sched_domains();
1009 * Frequency meter - How fast is some event occurring?
1011 * These routines manage a digitally filtered, constant time based,
1012 * event frequency meter. There are four routines:
1013 * fmeter_init() - initialize a frequency meter.
1014 * fmeter_markevent() - called each time the event happens.
1015 * fmeter_getrate() - returns the recent rate of such events.
1016 * fmeter_update() - internal routine used to update fmeter.
1018 * A common data structure is passed to each of these routines,
1019 * which is used to keep track of the state required to manage the
1020 * frequency meter and its digital filter.
1022 * The filter works on the number of events marked per unit time.
1023 * The filter is single-pole low-pass recursive (IIR). The time unit
1024 * is 1 second. Arithmetic is done using 32-bit integers scaled to
1025 * simulate 3 decimal digits of precision (multiplied by 1000).
1027 * With an FM_COEF of 933, and a time base of 1 second, the filter
1028 * has a half-life of 10 seconds, meaning that if the events quit
1029 * happening, then the rate returned from the fmeter_getrate()
1030 * will be cut in half each 10 seconds, until it converges to zero.
1032 * It is not worth doing a real infinitely recursive filter. If more
1033 * than FM_MAXTICKS ticks have elapsed since the last filter event,
1034 * just compute FM_MAXTICKS ticks worth, by which point the level
1037 * Limit the count of unprocessed events to FM_MAXCNT, so as to avoid
1038 * arithmetic overflow in the fmeter_update() routine.
1040 * Given the simple 32 bit integer arithmetic used, this meter works
1041 * best for reporting rates between one per millisecond (msec) and
1042 * one per 32 (approx) seconds. At constant rates faster than one
1043 * per msec it maxes out at values just under 1,000,000. At constant
1044 * rates between one per msec, and one per second it will stabilize
1045 * to a value N*1000, where N is the rate of events per second.
1046 * At constant rates between one per second and one per 32 seconds,
1047 * it will be choppy, moving up on the seconds that have an event,
1048 * and then decaying until the next event. At rates slower than
1049 * about one in 32 seconds, it decays all the way back to zero between
1053 #define FM_COEF 933 /* coefficient for half-life of 10 secs */
1054 #define FM_MAXTICKS ((time_t)99) /* useless computing more ticks than this */
1055 #define FM_MAXCNT 1000000 /* limit cnt to avoid overflow */
1056 #define FM_SCALE 1000 /* faux fixed point scale */
1058 /* Initialize a frequency meter */
1059 static void fmeter_init(struct fmeter
*fmp
)
1064 spin_lock_init(&fmp
->lock
);
1067 /* Internal meter update - process cnt events and update value */
1068 static void fmeter_update(struct fmeter
*fmp
)
1070 time_t now
= get_seconds();
1071 time_t ticks
= now
- fmp
->time
;
1076 ticks
= min(FM_MAXTICKS
, ticks
);
1078 fmp
->val
= (FM_COEF
* fmp
->val
) / FM_SCALE
;
1081 fmp
->val
+= ((FM_SCALE
- FM_COEF
) * fmp
->cnt
) / FM_SCALE
;
1085 /* Process any previous ticks, then bump cnt by one (times scale). */
1086 static void fmeter_markevent(struct fmeter
*fmp
)
1088 spin_lock(&fmp
->lock
);
1090 fmp
->cnt
= min(FM_MAXCNT
, fmp
->cnt
+ FM_SCALE
);
1091 spin_unlock(&fmp
->lock
);
1094 /* Process any previous ticks, then return current value. */
1095 static int fmeter_getrate(struct fmeter
*fmp
)
1099 spin_lock(&fmp
->lock
);
1102 spin_unlock(&fmp
->lock
);
1106 static int cpuset_can_attach(struct cgroup_subsys
*ss
,
1107 struct cgroup
*cont
, struct task_struct
*tsk
)
1109 struct cpuset
*cs
= cgroup_cs(cont
);
1111 if (cpus_empty(cs
->cpus_allowed
) || nodes_empty(cs
->mems_allowed
))
1114 return security_task_setscheduler(tsk
, 0, NULL
);
1117 static void cpuset_attach(struct cgroup_subsys
*ss
,
1118 struct cgroup
*cont
, struct cgroup
*oldcont
,
1119 struct task_struct
*tsk
)
1122 nodemask_t from
, to
;
1123 struct mm_struct
*mm
;
1124 struct cpuset
*cs
= cgroup_cs(cont
);
1125 struct cpuset
*oldcs
= cgroup_cs(oldcont
);
1127 mutex_lock(&callback_mutex
);
1128 guarantee_online_cpus(cs
, &cpus
);
1129 set_cpus_allowed(tsk
, cpus
);
1130 mutex_unlock(&callback_mutex
);
1132 from
= oldcs
->mems_allowed
;
1133 to
= cs
->mems_allowed
;
1134 mm
= get_task_mm(tsk
);
1136 mpol_rebind_mm(mm
, &to
);
1137 if (is_memory_migrate(cs
))
1138 cpuset_migrate_mm(mm
, &from
, &to
);
1144 /* The various types of files and directories in a cpuset file system */
1147 FILE_MEMORY_MIGRATE
,
1152 FILE_SCHED_LOAD_BALANCE
,
1153 FILE_MEMORY_PRESSURE_ENABLED
,
1154 FILE_MEMORY_PRESSURE
,
1157 } cpuset_filetype_t
;
1159 static ssize_t
cpuset_common_file_write(struct cgroup
*cont
,
1162 const char __user
*userbuf
,
1163 size_t nbytes
, loff_t
*unused_ppos
)
1165 struct cpuset
*cs
= cgroup_cs(cont
);
1166 cpuset_filetype_t type
= cft
->private;
1170 /* Crude upper limit on largest legitimate cpulist user might write. */
1171 if (nbytes
> 100U + 6 * max(NR_CPUS
, MAX_NUMNODES
))
1174 /* +1 for nul-terminator */
1175 if ((buffer
= kmalloc(nbytes
+ 1, GFP_KERNEL
)) == 0)
1178 if (copy_from_user(buffer
, userbuf
, nbytes
)) {
1182 buffer
[nbytes
] = 0; /* nul-terminate */
1186 if (cgroup_is_removed(cont
)) {
1193 retval
= update_cpumask(cs
, buffer
);
1196 retval
= update_nodemask(cs
, buffer
);
1198 case FILE_CPU_EXCLUSIVE
:
1199 retval
= update_flag(CS_CPU_EXCLUSIVE
, cs
, buffer
);
1201 case FILE_MEM_EXCLUSIVE
:
1202 retval
= update_flag(CS_MEM_EXCLUSIVE
, cs
, buffer
);
1204 case FILE_SCHED_LOAD_BALANCE
:
1205 retval
= update_flag(CS_SCHED_LOAD_BALANCE
, cs
, buffer
);
1207 case FILE_MEMORY_MIGRATE
:
1208 retval
= update_flag(CS_MEMORY_MIGRATE
, cs
, buffer
);
1210 case FILE_MEMORY_PRESSURE_ENABLED
:
1211 retval
= update_memory_pressure_enabled(cs
, buffer
);
1213 case FILE_MEMORY_PRESSURE
:
1216 case FILE_SPREAD_PAGE
:
1217 retval
= update_flag(CS_SPREAD_PAGE
, cs
, buffer
);
1218 cs
->mems_generation
= cpuset_mems_generation
++;
1220 case FILE_SPREAD_SLAB
:
1221 retval
= update_flag(CS_SPREAD_SLAB
, cs
, buffer
);
1222 cs
->mems_generation
= cpuset_mems_generation
++;
1239 * These ascii lists should be read in a single call, by using a user
1240 * buffer large enough to hold the entire map. If read in smaller
1241 * chunks, there is no guarantee of atomicity. Since the display format
1242 * used, list of ranges of sequential numbers, is variable length,
1243 * and since these maps can change value dynamically, one could read
1244 * gibberish by doing partial reads while a list was changing.
1245 * A single large read to a buffer that crosses a page boundary is
1246 * ok, because the result being copied to user land is not recomputed
1247 * across a page fault.
1250 static int cpuset_sprintf_cpulist(char *page
, struct cpuset
*cs
)
1254 mutex_lock(&callback_mutex
);
1255 mask
= cs
->cpus_allowed
;
1256 mutex_unlock(&callback_mutex
);
1258 return cpulist_scnprintf(page
, PAGE_SIZE
, mask
);
1261 static int cpuset_sprintf_memlist(char *page
, struct cpuset
*cs
)
1265 mutex_lock(&callback_mutex
);
1266 mask
= cs
->mems_allowed
;
1267 mutex_unlock(&callback_mutex
);
1269 return nodelist_scnprintf(page
, PAGE_SIZE
, mask
);
1272 static ssize_t
cpuset_common_file_read(struct cgroup
*cont
,
1276 size_t nbytes
, loff_t
*ppos
)
1278 struct cpuset
*cs
= cgroup_cs(cont
);
1279 cpuset_filetype_t type
= cft
->private;
1284 if (!(page
= (char *)__get_free_page(GFP_TEMPORARY
)))
1291 s
+= cpuset_sprintf_cpulist(s
, cs
);
1294 s
+= cpuset_sprintf_memlist(s
, cs
);
1296 case FILE_CPU_EXCLUSIVE
:
1297 *s
++ = is_cpu_exclusive(cs
) ? '1' : '0';
1299 case FILE_MEM_EXCLUSIVE
:
1300 *s
++ = is_mem_exclusive(cs
) ? '1' : '0';
1302 case FILE_SCHED_LOAD_BALANCE
:
1303 *s
++ = is_sched_load_balance(cs
) ? '1' : '0';
1305 case FILE_MEMORY_MIGRATE
:
1306 *s
++ = is_memory_migrate(cs
) ? '1' : '0';
1308 case FILE_MEMORY_PRESSURE_ENABLED
:
1309 *s
++ = cpuset_memory_pressure_enabled
? '1' : '0';
1311 case FILE_MEMORY_PRESSURE
:
1312 s
+= sprintf(s
, "%d", fmeter_getrate(&cs
->fmeter
));
1314 case FILE_SPREAD_PAGE
:
1315 *s
++ = is_spread_page(cs
) ? '1' : '0';
1317 case FILE_SPREAD_SLAB
:
1318 *s
++ = is_spread_slab(cs
) ? '1' : '0';
1326 retval
= simple_read_from_buffer(buf
, nbytes
, ppos
, page
, s
- page
);
1328 free_page((unsigned long)page
);
1337 * for the common functions, 'private' gives the type of file
1340 static struct cftype cft_cpus
= {
1342 .read
= cpuset_common_file_read
,
1343 .write
= cpuset_common_file_write
,
1344 .private = FILE_CPULIST
,
1347 static struct cftype cft_mems
= {
1349 .read
= cpuset_common_file_read
,
1350 .write
= cpuset_common_file_write
,
1351 .private = FILE_MEMLIST
,
1354 static struct cftype cft_cpu_exclusive
= {
1355 .name
= "cpu_exclusive",
1356 .read
= cpuset_common_file_read
,
1357 .write
= cpuset_common_file_write
,
1358 .private = FILE_CPU_EXCLUSIVE
,
1361 static struct cftype cft_mem_exclusive
= {
1362 .name
= "mem_exclusive",
1363 .read
= cpuset_common_file_read
,
1364 .write
= cpuset_common_file_write
,
1365 .private = FILE_MEM_EXCLUSIVE
,
1368 static struct cftype cft_sched_load_balance
= {
1369 .name
= "sched_load_balance",
1370 .read
= cpuset_common_file_read
,
1371 .write
= cpuset_common_file_write
,
1372 .private = FILE_SCHED_LOAD_BALANCE
,
1375 static struct cftype cft_memory_migrate
= {
1376 .name
= "memory_migrate",
1377 .read
= cpuset_common_file_read
,
1378 .write
= cpuset_common_file_write
,
1379 .private = FILE_MEMORY_MIGRATE
,
1382 static struct cftype cft_memory_pressure_enabled
= {
1383 .name
= "memory_pressure_enabled",
1384 .read
= cpuset_common_file_read
,
1385 .write
= cpuset_common_file_write
,
1386 .private = FILE_MEMORY_PRESSURE_ENABLED
,
1389 static struct cftype cft_memory_pressure
= {
1390 .name
= "memory_pressure",
1391 .read
= cpuset_common_file_read
,
1392 .write
= cpuset_common_file_write
,
1393 .private = FILE_MEMORY_PRESSURE
,
1396 static struct cftype cft_spread_page
= {
1397 .name
= "memory_spread_page",
1398 .read
= cpuset_common_file_read
,
1399 .write
= cpuset_common_file_write
,
1400 .private = FILE_SPREAD_PAGE
,
1403 static struct cftype cft_spread_slab
= {
1404 .name
= "memory_spread_slab",
1405 .read
= cpuset_common_file_read
,
1406 .write
= cpuset_common_file_write
,
1407 .private = FILE_SPREAD_SLAB
,
1410 static int cpuset_populate(struct cgroup_subsys
*ss
, struct cgroup
*cont
)
1414 if ((err
= cgroup_add_file(cont
, ss
, &cft_cpus
)) < 0)
1416 if ((err
= cgroup_add_file(cont
, ss
, &cft_mems
)) < 0)
1418 if ((err
= cgroup_add_file(cont
, ss
, &cft_cpu_exclusive
)) < 0)
1420 if ((err
= cgroup_add_file(cont
, ss
, &cft_mem_exclusive
)) < 0)
1422 if ((err
= cgroup_add_file(cont
, ss
, &cft_memory_migrate
)) < 0)
1424 if ((err
= cgroup_add_file(cont
, ss
, &cft_sched_load_balance
)) < 0)
1426 if ((err
= cgroup_add_file(cont
, ss
, &cft_memory_pressure
)) < 0)
1428 if ((err
= cgroup_add_file(cont
, ss
, &cft_spread_page
)) < 0)
1430 if ((err
= cgroup_add_file(cont
, ss
, &cft_spread_slab
)) < 0)
1432 /* memory_pressure_enabled is in root cpuset only */
1433 if (err
== 0 && !cont
->parent
)
1434 err
= cgroup_add_file(cont
, ss
,
1435 &cft_memory_pressure_enabled
);
1440 * post_clone() is called at the end of cgroup_clone().
1441 * 'cgroup' was just created automatically as a result of
1442 * a cgroup_clone(), and the current task is about to
1443 * be moved into 'cgroup'.
1445 * Currently we refuse to set up the cgroup - thereby
1446 * refusing the task to be entered, and as a result refusing
1447 * the sys_unshare() or clone() which initiated it - if any
1448 * sibling cpusets have exclusive cpus or mem.
1450 * If this becomes a problem for some users who wish to
1451 * allow that scenario, then cpuset_post_clone() could be
1452 * changed to grant parent->cpus_allowed-sibling_cpus_exclusive
1453 * (and likewise for mems) to the new cgroup.
1455 static void cpuset_post_clone(struct cgroup_subsys
*ss
,
1456 struct cgroup
*cgroup
)
1458 struct cgroup
*parent
, *child
;
1459 struct cpuset
*cs
, *parent_cs
;
1461 parent
= cgroup
->parent
;
1462 list_for_each_entry(child
, &parent
->children
, sibling
) {
1463 cs
= cgroup_cs(child
);
1464 if (is_mem_exclusive(cs
) || is_cpu_exclusive(cs
))
1467 cs
= cgroup_cs(cgroup
);
1468 parent_cs
= cgroup_cs(parent
);
1470 cs
->mems_allowed
= parent_cs
->mems_allowed
;
1471 cs
->cpus_allowed
= parent_cs
->cpus_allowed
;
1476 * cpuset_create - create a cpuset
1477 * parent: cpuset that will be parent of the new cpuset.
1478 * name: name of the new cpuset. Will be strcpy'ed.
1479 * mode: mode to set on new inode
1481 * Must be called with the mutex on the parent inode held
1484 static struct cgroup_subsys_state
*cpuset_create(
1485 struct cgroup_subsys
*ss
,
1486 struct cgroup
*cont
)
1489 struct cpuset
*parent
;
1491 if (!cont
->parent
) {
1492 /* This is early initialization for the top cgroup */
1493 top_cpuset
.mems_generation
= cpuset_mems_generation
++;
1494 return &top_cpuset
.css
;
1496 parent
= cgroup_cs(cont
->parent
);
1497 cs
= kmalloc(sizeof(*cs
), GFP_KERNEL
);
1499 return ERR_PTR(-ENOMEM
);
1501 cpuset_update_task_memory_state();
1503 if (is_spread_page(parent
))
1504 set_bit(CS_SPREAD_PAGE
, &cs
->flags
);
1505 if (is_spread_slab(parent
))
1506 set_bit(CS_SPREAD_SLAB
, &cs
->flags
);
1507 set_bit(CS_SCHED_LOAD_BALANCE
, &cs
->flags
);
1508 cs
->cpus_allowed
= CPU_MASK_NONE
;
1509 cs
->mems_allowed
= NODE_MASK_NONE
;
1510 cs
->mems_generation
= cpuset_mems_generation
++;
1511 fmeter_init(&cs
->fmeter
);
1513 cs
->parent
= parent
;
1514 number_of_cpusets
++;
1519 * Locking note on the strange update_flag() call below:
1521 * If the cpuset being removed has its flag 'sched_load_balance'
1522 * enabled, then simulate turning sched_load_balance off, which
1523 * will call rebuild_sched_domains(). The lock_cpu_hotplug()
1524 * call in rebuild_sched_domains() must not be made while holding
1525 * callback_mutex. Elsewhere the kernel nests callback_mutex inside
1526 * lock_cpu_hotplug() calls. So the reverse nesting would risk an
1530 static void cpuset_destroy(struct cgroup_subsys
*ss
, struct cgroup
*cont
)
1532 struct cpuset
*cs
= cgroup_cs(cont
);
1534 cpuset_update_task_memory_state();
1536 if (is_sched_load_balance(cs
))
1537 update_flag(CS_SCHED_LOAD_BALANCE
, cs
, "0");
1539 number_of_cpusets
--;
1543 struct cgroup_subsys cpuset_subsys
= {
1545 .create
= cpuset_create
,
1546 .destroy
= cpuset_destroy
,
1547 .can_attach
= cpuset_can_attach
,
1548 .attach
= cpuset_attach
,
1549 .populate
= cpuset_populate
,
1550 .post_clone
= cpuset_post_clone
,
1551 .subsys_id
= cpuset_subsys_id
,
1556 * cpuset_init_early - just enough so that the calls to
1557 * cpuset_update_task_memory_state() in early init code
1561 int __init
cpuset_init_early(void)
1563 top_cpuset
.mems_generation
= cpuset_mems_generation
++;
1569 * cpuset_init - initialize cpusets at system boot
1571 * Description: Initialize top_cpuset and the cpuset internal file system,
1574 int __init
cpuset_init(void)
1578 top_cpuset
.cpus_allowed
= CPU_MASK_ALL
;
1579 top_cpuset
.mems_allowed
= NODE_MASK_ALL
;
1581 fmeter_init(&top_cpuset
.fmeter
);
1582 top_cpuset
.mems_generation
= cpuset_mems_generation
++;
1583 set_bit(CS_SCHED_LOAD_BALANCE
, &top_cpuset
.flags
);
1585 err
= register_filesystem(&cpuset_fs_type
);
1589 number_of_cpusets
= 1;
1594 * If common_cpu_mem_hotplug_unplug(), below, unplugs any CPUs
1595 * or memory nodes, we need to walk over the cpuset hierarchy,
1596 * removing that CPU or node from all cpusets. If this removes the
1597 * last CPU or node from a cpuset, then the guarantee_online_cpus()
1598 * or guarantee_online_mems() code will use that emptied cpusets
1599 * parent online CPUs or nodes. Cpusets that were already empty of
1600 * CPUs or nodes are left empty.
1602 * This routine is intentionally inefficient in a couple of regards.
1603 * It will check all cpusets in a subtree even if the top cpuset of
1604 * the subtree has no offline CPUs or nodes. It checks both CPUs and
1605 * nodes, even though the caller could have been coded to know that
1606 * only one of CPUs or nodes needed to be checked on a given call.
1607 * This was done to minimize text size rather than cpu cycles.
1609 * Call with both manage_mutex and callback_mutex held.
1611 * Recursive, on depth of cpuset subtree.
1614 static void guarantee_online_cpus_mems_in_subtree(const struct cpuset
*cur
)
1616 struct cgroup
*cont
;
1619 /* Each of our child cpusets mems must be online */
1620 list_for_each_entry(cont
, &cur
->css
.cgroup
->children
, sibling
) {
1621 c
= cgroup_cs(cont
);
1622 guarantee_online_cpus_mems_in_subtree(c
);
1623 if (!cpus_empty(c
->cpus_allowed
))
1624 guarantee_online_cpus(c
, &c
->cpus_allowed
);
1625 if (!nodes_empty(c
->mems_allowed
))
1626 guarantee_online_mems(c
, &c
->mems_allowed
);
1631 * The cpus_allowed and mems_allowed nodemasks in the top_cpuset track
1632 * cpu_online_map and node_states[N_HIGH_MEMORY]. Force the top cpuset to
1633 * track what's online after any CPU or memory node hotplug or unplug
1636 * To ensure that we don't remove a CPU or node from the top cpuset
1637 * that is currently in use by a child cpuset (which would violate
1638 * the rule that cpusets must be subsets of their parent), we first
1639 * call the recursive routine guarantee_online_cpus_mems_in_subtree().
1641 * Since there are two callers of this routine, one for CPU hotplug
1642 * events and one for memory node hotplug events, we could have coded
1643 * two separate routines here. We code it as a single common routine
1644 * in order to minimize text size.
1647 static void common_cpu_mem_hotplug_unplug(void)
1650 mutex_lock(&callback_mutex
);
1652 guarantee_online_cpus_mems_in_subtree(&top_cpuset
);
1653 top_cpuset
.cpus_allowed
= cpu_online_map
;
1654 top_cpuset
.mems_allowed
= node_states
[N_HIGH_MEMORY
];
1656 mutex_unlock(&callback_mutex
);
1661 * The top_cpuset tracks what CPUs and Memory Nodes are online,
1662 * period. This is necessary in order to make cpusets transparent
1663 * (of no affect) on systems that are actively using CPU hotplug
1664 * but making no active use of cpusets.
1666 * This routine ensures that top_cpuset.cpus_allowed tracks
1667 * cpu_online_map on each CPU hotplug (cpuhp) event.
1670 static int cpuset_handle_cpuhp(struct notifier_block
*unused_nb
,
1671 unsigned long phase
, void *unused_cpu
)
1673 if (phase
== CPU_DYING
|| phase
== CPU_DYING_FROZEN
)
1676 common_cpu_mem_hotplug_unplug();
1680 #ifdef CONFIG_MEMORY_HOTPLUG
1682 * Keep top_cpuset.mems_allowed tracking node_states[N_HIGH_MEMORY].
1683 * Call this routine anytime after you change
1684 * node_states[N_HIGH_MEMORY].
1685 * See also the previous routine cpuset_handle_cpuhp().
1688 void cpuset_track_online_nodes(void)
1690 common_cpu_mem_hotplug_unplug();
1695 * cpuset_init_smp - initialize cpus_allowed
1697 * Description: Finish top cpuset after cpu, node maps are initialized
1700 void __init
cpuset_init_smp(void)
1702 top_cpuset
.cpus_allowed
= cpu_online_map
;
1703 top_cpuset
.mems_allowed
= node_states
[N_HIGH_MEMORY
];
1705 hotcpu_notifier(cpuset_handle_cpuhp
, 0);
1710 * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset.
1711 * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
1713 * Description: Returns the cpumask_t cpus_allowed of the cpuset
1714 * attached to the specified @tsk. Guaranteed to return some non-empty
1715 * subset of cpu_online_map, even if this means going outside the
1719 cpumask_t
cpuset_cpus_allowed(struct task_struct
*tsk
)
1723 mutex_lock(&callback_mutex
);
1725 guarantee_online_cpus(task_cs(tsk
), &mask
);
1727 mutex_unlock(&callback_mutex
);
1732 void cpuset_init_current_mems_allowed(void)
1734 current
->mems_allowed
= NODE_MASK_ALL
;
1738 * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset.
1739 * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed.
1741 * Description: Returns the nodemask_t mems_allowed of the cpuset
1742 * attached to the specified @tsk. Guaranteed to return some non-empty
1743 * subset of node_states[N_HIGH_MEMORY], even if this means going outside the
1747 nodemask_t
cpuset_mems_allowed(struct task_struct
*tsk
)
1751 mutex_lock(&callback_mutex
);
1753 guarantee_online_mems(task_cs(tsk
), &mask
);
1755 mutex_unlock(&callback_mutex
);
1761 * cpuset_zonelist_valid_mems_allowed - check zonelist vs. curremt mems_allowed
1762 * @zl: the zonelist to be checked
1764 * Are any of the nodes on zonelist zl allowed in current->mems_allowed?
1766 int cpuset_zonelist_valid_mems_allowed(struct zonelist
*zl
)
1770 for (i
= 0; zl
->zones
[i
]; i
++) {
1771 int nid
= zone_to_nid(zl
->zones
[i
]);
1773 if (node_isset(nid
, current
->mems_allowed
))
1780 * nearest_exclusive_ancestor() - Returns the nearest mem_exclusive
1781 * ancestor to the specified cpuset. Call holding callback_mutex.
1782 * If no ancestor is mem_exclusive (an unusual configuration), then
1783 * returns the root cpuset.
1785 static const struct cpuset
*nearest_exclusive_ancestor(const struct cpuset
*cs
)
1787 while (!is_mem_exclusive(cs
) && cs
->parent
)
1793 * cpuset_zone_allowed_softwall - Can we allocate on zone z's memory node?
1794 * @z: is this zone on an allowed node?
1795 * @gfp_mask: memory allocation flags
1797 * If we're in interrupt, yes, we can always allocate. If
1798 * __GFP_THISNODE is set, yes, we can always allocate. If zone
1799 * z's node is in our tasks mems_allowed, yes. If it's not a
1800 * __GFP_HARDWALL request and this zone's nodes is in the nearest
1801 * mem_exclusive cpuset ancestor to this tasks cpuset, yes.
1802 * If the task has been OOM killed and has access to memory reserves
1803 * as specified by the TIF_MEMDIE flag, yes.
1806 * If __GFP_HARDWALL is set, cpuset_zone_allowed_softwall()
1807 * reduces to cpuset_zone_allowed_hardwall(). Otherwise,
1808 * cpuset_zone_allowed_softwall() might sleep, and might allow a zone
1809 * from an enclosing cpuset.
1811 * cpuset_zone_allowed_hardwall() only handles the simpler case of
1812 * hardwall cpusets, and never sleeps.
1814 * The __GFP_THISNODE placement logic is really handled elsewhere,
1815 * by forcibly using a zonelist starting at a specified node, and by
1816 * (in get_page_from_freelist()) refusing to consider the zones for
1817 * any node on the zonelist except the first. By the time any such
1818 * calls get to this routine, we should just shut up and say 'yes'.
1820 * GFP_USER allocations are marked with the __GFP_HARDWALL bit,
1821 * and do not allow allocations outside the current tasks cpuset
1822 * unless the task has been OOM killed as is marked TIF_MEMDIE.
1823 * GFP_KERNEL allocations are not so marked, so can escape to the
1824 * nearest enclosing mem_exclusive ancestor cpuset.
1826 * Scanning up parent cpusets requires callback_mutex. The
1827 * __alloc_pages() routine only calls here with __GFP_HARDWALL bit
1828 * _not_ set if it's a GFP_KERNEL allocation, and all nodes in the
1829 * current tasks mems_allowed came up empty on the first pass over
1830 * the zonelist. So only GFP_KERNEL allocations, if all nodes in the
1831 * cpuset are short of memory, might require taking the callback_mutex
1834 * The first call here from mm/page_alloc:get_page_from_freelist()
1835 * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets,
1836 * so no allocation on a node outside the cpuset is allowed (unless
1837 * in interrupt, of course).
1839 * The second pass through get_page_from_freelist() doesn't even call
1840 * here for GFP_ATOMIC calls. For those calls, the __alloc_pages()
1841 * variable 'wait' is not set, and the bit ALLOC_CPUSET is not set
1842 * in alloc_flags. That logic and the checks below have the combined
1844 * in_interrupt - any node ok (current task context irrelevant)
1845 * GFP_ATOMIC - any node ok
1846 * TIF_MEMDIE - any node ok
1847 * GFP_KERNEL - any node in enclosing mem_exclusive cpuset ok
1848 * GFP_USER - only nodes in current tasks mems allowed ok.
1851 * Don't call cpuset_zone_allowed_softwall if you can't sleep, unless you
1852 * pass in the __GFP_HARDWALL flag set in gfp_flag, which disables
1853 * the code that might scan up ancestor cpusets and sleep.
1856 int __cpuset_zone_allowed_softwall(struct zone
*z
, gfp_t gfp_mask
)
1858 int node
; /* node that zone z is on */
1859 const struct cpuset
*cs
; /* current cpuset ancestors */
1860 int allowed
; /* is allocation in zone z allowed? */
1862 if (in_interrupt() || (gfp_mask
& __GFP_THISNODE
))
1864 node
= zone_to_nid(z
);
1865 might_sleep_if(!(gfp_mask
& __GFP_HARDWALL
));
1866 if (node_isset(node
, current
->mems_allowed
))
1869 * Allow tasks that have access to memory reserves because they have
1870 * been OOM killed to get memory anywhere.
1872 if (unlikely(test_thread_flag(TIF_MEMDIE
)))
1874 if (gfp_mask
& __GFP_HARDWALL
) /* If hardwall request, stop here */
1877 if (current
->flags
& PF_EXITING
) /* Let dying task have memory */
1880 /* Not hardwall and node outside mems_allowed: scan up cpusets */
1881 mutex_lock(&callback_mutex
);
1884 cs
= nearest_exclusive_ancestor(task_cs(current
));
1885 task_unlock(current
);
1887 allowed
= node_isset(node
, cs
->mems_allowed
);
1888 mutex_unlock(&callback_mutex
);
1893 * cpuset_zone_allowed_hardwall - Can we allocate on zone z's memory node?
1894 * @z: is this zone on an allowed node?
1895 * @gfp_mask: memory allocation flags
1897 * If we're in interrupt, yes, we can always allocate.
1898 * If __GFP_THISNODE is set, yes, we can always allocate. If zone
1899 * z's node is in our tasks mems_allowed, yes. If the task has been
1900 * OOM killed and has access to memory reserves as specified by the
1901 * TIF_MEMDIE flag, yes. Otherwise, no.
1903 * The __GFP_THISNODE placement logic is really handled elsewhere,
1904 * by forcibly using a zonelist starting at a specified node, and by
1905 * (in get_page_from_freelist()) refusing to consider the zones for
1906 * any node on the zonelist except the first. By the time any such
1907 * calls get to this routine, we should just shut up and say 'yes'.
1909 * Unlike the cpuset_zone_allowed_softwall() variant, above,
1910 * this variant requires that the zone be in the current tasks
1911 * mems_allowed or that we're in interrupt. It does not scan up the
1912 * cpuset hierarchy for the nearest enclosing mem_exclusive cpuset.
1916 int __cpuset_zone_allowed_hardwall(struct zone
*z
, gfp_t gfp_mask
)
1918 int node
; /* node that zone z is on */
1920 if (in_interrupt() || (gfp_mask
& __GFP_THISNODE
))
1922 node
= zone_to_nid(z
);
1923 if (node_isset(node
, current
->mems_allowed
))
1926 * Allow tasks that have access to memory reserves because they have
1927 * been OOM killed to get memory anywhere.
1929 if (unlikely(test_thread_flag(TIF_MEMDIE
)))
1935 * cpuset_lock - lock out any changes to cpuset structures
1937 * The out of memory (oom) code needs to mutex_lock cpusets
1938 * from being changed while it scans the tasklist looking for a
1939 * task in an overlapping cpuset. Expose callback_mutex via this
1940 * cpuset_lock() routine, so the oom code can lock it, before
1941 * locking the task list. The tasklist_lock is a spinlock, so
1942 * must be taken inside callback_mutex.
1945 void cpuset_lock(void)
1947 mutex_lock(&callback_mutex
);
1951 * cpuset_unlock - release lock on cpuset changes
1953 * Undo the lock taken in a previous cpuset_lock() call.
1956 void cpuset_unlock(void)
1958 mutex_unlock(&callback_mutex
);
1962 * cpuset_mem_spread_node() - On which node to begin search for a page
1964 * If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for
1965 * tasks in a cpuset with is_spread_page or is_spread_slab set),
1966 * and if the memory allocation used cpuset_mem_spread_node()
1967 * to determine on which node to start looking, as it will for
1968 * certain page cache or slab cache pages such as used for file
1969 * system buffers and inode caches, then instead of starting on the
1970 * local node to look for a free page, rather spread the starting
1971 * node around the tasks mems_allowed nodes.
1973 * We don't have to worry about the returned node being offline
1974 * because "it can't happen", and even if it did, it would be ok.
1976 * The routines calling guarantee_online_mems() are careful to
1977 * only set nodes in task->mems_allowed that are online. So it
1978 * should not be possible for the following code to return an
1979 * offline node. But if it did, that would be ok, as this routine
1980 * is not returning the node where the allocation must be, only
1981 * the node where the search should start. The zonelist passed to
1982 * __alloc_pages() will include all nodes. If the slab allocator
1983 * is passed an offline node, it will fall back to the local node.
1984 * See kmem_cache_alloc_node().
1987 int cpuset_mem_spread_node(void)
1991 node
= next_node(current
->cpuset_mem_spread_rotor
, current
->mems_allowed
);
1992 if (node
== MAX_NUMNODES
)
1993 node
= first_node(current
->mems_allowed
);
1994 current
->cpuset_mem_spread_rotor
= node
;
1997 EXPORT_SYMBOL_GPL(cpuset_mem_spread_node
);
2000 * cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's?
2001 * @tsk1: pointer to task_struct of some task.
2002 * @tsk2: pointer to task_struct of some other task.
2004 * Description: Return true if @tsk1's mems_allowed intersects the
2005 * mems_allowed of @tsk2. Used by the OOM killer to determine if
2006 * one of the task's memory usage might impact the memory available
2010 int cpuset_mems_allowed_intersects(const struct task_struct
*tsk1
,
2011 const struct task_struct
*tsk2
)
2013 return nodes_intersects(tsk1
->mems_allowed
, tsk2
->mems_allowed
);
2017 * Collection of memory_pressure is suppressed unless
2018 * this flag is enabled by writing "1" to the special
2019 * cpuset file 'memory_pressure_enabled' in the root cpuset.
2022 int cpuset_memory_pressure_enabled __read_mostly
;
2025 * cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims.
2027 * Keep a running average of the rate of synchronous (direct)
2028 * page reclaim efforts initiated by tasks in each cpuset.
2030 * This represents the rate at which some task in the cpuset
2031 * ran low on memory on all nodes it was allowed to use, and
2032 * had to enter the kernels page reclaim code in an effort to
2033 * create more free memory by tossing clean pages or swapping
2034 * or writing dirty pages.
2036 * Display to user space in the per-cpuset read-only file
2037 * "memory_pressure". Value displayed is an integer
2038 * representing the recent rate of entry into the synchronous
2039 * (direct) page reclaim by any task attached to the cpuset.
2042 void __cpuset_memory_pressure_bump(void)
2045 fmeter_markevent(&task_cs(current
)->fmeter
);
2046 task_unlock(current
);
2049 #ifdef CONFIG_PROC_PID_CPUSET
2051 * proc_cpuset_show()
2052 * - Print tasks cpuset path into seq_file.
2053 * - Used for /proc/<pid>/cpuset.
2054 * - No need to task_lock(tsk) on this tsk->cpuset reference, as it
2055 * doesn't really matter if tsk->cpuset changes after we read it,
2056 * and we take manage_mutex, keeping attach_task() from changing it
2057 * anyway. No need to check that tsk->cpuset != NULL, thanks to
2058 * the_top_cpuset_hack in cpuset_exit(), which sets an exiting tasks
2059 * cpuset to top_cpuset.
2061 static int proc_cpuset_show(struct seq_file
*m
, void *unused_v
)
2064 struct task_struct
*tsk
;
2066 struct cgroup_subsys_state
*css
;
2070 buf
= kmalloc(PAGE_SIZE
, GFP_KERNEL
);
2076 tsk
= get_pid_task(pid
, PIDTYPE_PID
);
2082 css
= task_subsys_state(tsk
, cpuset_subsys_id
);
2083 retval
= cgroup_path(css
->cgroup
, buf
, PAGE_SIZE
);
2090 put_task_struct(tsk
);
2097 static int cpuset_open(struct inode
*inode
, struct file
*file
)
2099 struct pid
*pid
= PROC_I(inode
)->pid
;
2100 return single_open(file
, proc_cpuset_show
, pid
);
2103 const struct file_operations proc_cpuset_operations
= {
2104 .open
= cpuset_open
,
2106 .llseek
= seq_lseek
,
2107 .release
= single_release
,
2109 #endif /* CONFIG_PROC_PID_CPUSET */
2111 /* Display task cpus_allowed, mems_allowed in /proc/<pid>/status file. */
2112 char *cpuset_task_status_allowed(struct task_struct
*task
, char *buffer
)
2114 buffer
+= sprintf(buffer
, "Cpus_allowed:\t");
2115 buffer
+= cpumask_scnprintf(buffer
, PAGE_SIZE
, task
->cpus_allowed
);
2116 buffer
+= sprintf(buffer
, "\n");
2117 buffer
+= sprintf(buffer
, "Mems_allowed:\t");
2118 buffer
+= nodemask_scnprintf(buffer
, PAGE_SIZE
, task
->mems_allowed
);
2119 buffer
+= sprintf(buffer
, "\n");