Merge tag 'fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/arm...
[linux-2.6.git] / include / linux / cpuset.h
blob3fe661fe96d13e534f2b1ece47a409b1609f055c
1 #ifndef _LINUX_CPUSET_H
2 #define _LINUX_CPUSET_H
3 /*
4 * cpuset interface
6 * Copyright (C) 2003 BULL SA
7 * Copyright (C) 2004-2006 Silicon Graphics, Inc.
9 */
11 #include <linux/sched.h>
12 #include <linux/cpumask.h>
13 #include <linux/nodemask.h>
14 #include <linux/mm.h>
16 #ifdef CONFIG_CPUSETS
18 extern int number_of_cpusets; /* How many cpusets are defined in system? */
20 extern int cpuset_init(void);
21 extern void cpuset_init_smp(void);
22 extern void cpuset_update_active_cpus(bool cpu_online);
23 extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
24 extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
25 extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
26 #define cpuset_current_mems_allowed (current->mems_allowed)
27 void cpuset_init_current_mems_allowed(void);
28 int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
30 extern int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask);
31 extern int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask);
33 static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
35 return number_of_cpusets <= 1 ||
36 __cpuset_node_allowed_softwall(node, gfp_mask);
39 static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
41 return number_of_cpusets <= 1 ||
42 __cpuset_node_allowed_hardwall(node, gfp_mask);
45 static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
47 return cpuset_node_allowed_softwall(zone_to_nid(z), gfp_mask);
50 static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
52 return cpuset_node_allowed_hardwall(zone_to_nid(z), gfp_mask);
55 extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
56 const struct task_struct *tsk2);
58 #define cpuset_memory_pressure_bump() \
59 do { \
60 if (cpuset_memory_pressure_enabled) \
61 __cpuset_memory_pressure_bump(); \
62 } while (0)
63 extern int cpuset_memory_pressure_enabled;
64 extern void __cpuset_memory_pressure_bump(void);
66 extern void cpuset_task_status_allowed(struct seq_file *m,
67 struct task_struct *task);
68 extern int proc_cpuset_show(struct seq_file *, void *);
70 extern int cpuset_mem_spread_node(void);
71 extern int cpuset_slab_spread_node(void);
73 static inline int cpuset_do_page_mem_spread(void)
75 return current->flags & PF_SPREAD_PAGE;
78 static inline int cpuset_do_slab_mem_spread(void)
80 return current->flags & PF_SPREAD_SLAB;
83 extern int current_cpuset_is_being_rebound(void);
85 extern void rebuild_sched_domains(void);
87 extern void cpuset_print_task_mems_allowed(struct task_struct *p);
90 * get_mems_allowed is required when making decisions involving mems_allowed
91 * such as during page allocation. mems_allowed can be updated in parallel
92 * and depending on the new value an operation can fail potentially causing
93 * process failure. A retry loop with get_mems_allowed and put_mems_allowed
94 * prevents these artificial failures.
96 static inline unsigned int get_mems_allowed(void)
98 return read_seqcount_begin(&current->mems_allowed_seq);
102 * If this returns false, the operation that took place after get_mems_allowed
103 * may have failed. It is up to the caller to retry the operation if
104 * appropriate.
106 static inline bool put_mems_allowed(unsigned int seq)
108 return !read_seqcount_retry(&current->mems_allowed_seq, seq);
111 static inline void set_mems_allowed(nodemask_t nodemask)
113 unsigned long flags;
115 task_lock(current);
116 local_irq_save(flags);
117 write_seqcount_begin(&current->mems_allowed_seq);
118 current->mems_allowed = nodemask;
119 write_seqcount_end(&current->mems_allowed_seq);
120 local_irq_restore(flags);
121 task_unlock(current);
124 #else /* !CONFIG_CPUSETS */
126 static inline int cpuset_init(void) { return 0; }
127 static inline void cpuset_init_smp(void) {}
129 static inline void cpuset_update_active_cpus(bool cpu_online)
131 partition_sched_domains(1, NULL, NULL);
134 static inline void cpuset_cpus_allowed(struct task_struct *p,
135 struct cpumask *mask)
137 cpumask_copy(mask, cpu_possible_mask);
140 static inline void cpuset_cpus_allowed_fallback(struct task_struct *p)
144 static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
146 return node_possible_map;
149 #define cpuset_current_mems_allowed (node_states[N_MEMORY])
150 static inline void cpuset_init_current_mems_allowed(void) {}
152 static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
154 return 1;
157 static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
159 return 1;
162 static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
164 return 1;
167 static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
169 return 1;
172 static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
174 return 1;
177 static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
178 const struct task_struct *tsk2)
180 return 1;
183 static inline void cpuset_memory_pressure_bump(void) {}
185 static inline void cpuset_task_status_allowed(struct seq_file *m,
186 struct task_struct *task)
190 static inline int cpuset_mem_spread_node(void)
192 return 0;
195 static inline int cpuset_slab_spread_node(void)
197 return 0;
200 static inline int cpuset_do_page_mem_spread(void)
202 return 0;
205 static inline int cpuset_do_slab_mem_spread(void)
207 return 0;
210 static inline int current_cpuset_is_being_rebound(void)
212 return 0;
215 static inline void rebuild_sched_domains(void)
217 partition_sched_domains(1, NULL, NULL);
220 static inline void cpuset_print_task_mems_allowed(struct task_struct *p)
224 static inline void set_mems_allowed(nodemask_t nodemask)
228 static inline unsigned int get_mems_allowed(void)
230 return 0;
233 static inline bool put_mems_allowed(unsigned int seq)
235 return true;
238 #endif /* !CONFIG_CPUSETS */
240 #endif /* _LINUX_CPUSET_H */