sched: use group_first_cpu() instead of cpumask_first(sched_group_cpus())
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / include / asm-generic / percpu.h
blob00f45ff081a63fc9f4401d2656346e1e5444f469
1 #ifndef _ASM_GENERIC_PERCPU_H_
2 #define _ASM_GENERIC_PERCPU_H_
3 #include <linux/compiler.h>
4 #include <linux/threads.h>
6 /*
7 * Determine the real variable name from the name visible in the
8 * kernel sources.
9 */
10 #define per_cpu_var(var) per_cpu__##var
12 #ifdef CONFIG_SMP
15 * per_cpu_offset() is the offset that has to be added to a
16 * percpu variable to get to the instance for a certain processor.
18 * Most arches use the __per_cpu_offset array for those offsets but
19 * some arches have their own ways of determining the offset (x86_64, s390).
21 #ifndef __per_cpu_offset
22 extern unsigned long __per_cpu_offset[NR_CPUS];
24 #define per_cpu_offset(x) (__per_cpu_offset[x])
25 #endif
28 * Determine the offset for the currently active processor.
29 * An arch may define __my_cpu_offset to provide a more effective
30 * means of obtaining the offset to the per cpu variables of the
31 * current processor.
33 #ifndef __my_cpu_offset
34 #define __my_cpu_offset per_cpu_offset(raw_smp_processor_id())
35 #endif
36 #ifdef CONFIG_DEBUG_PREEMPT
37 #define my_cpu_offset per_cpu_offset(smp_processor_id())
38 #else
39 #define my_cpu_offset __my_cpu_offset
40 #endif
43 * Add a offset to a pointer but keep the pointer as is.
45 * Only S390 provides its own means of moving the pointer.
47 #ifndef SHIFT_PERCPU_PTR
48 #define SHIFT_PERCPU_PTR(__p, __offset) RELOC_HIDE((__p), (__offset))
49 #endif
52 * A percpu variable may point to a discarded regions. The following are
53 * established ways to produce a usable pointer from the percpu variable
54 * offset.
56 #define per_cpu(var, cpu) \
57 (*SHIFT_PERCPU_PTR(&per_cpu_var(var), per_cpu_offset(cpu)))
58 #define __get_cpu_var(var) \
59 (*SHIFT_PERCPU_PTR(&per_cpu_var(var), my_cpu_offset))
60 #define __raw_get_cpu_var(var) \
61 (*SHIFT_PERCPU_PTR(&per_cpu_var(var), __my_cpu_offset))
64 #ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
65 extern void setup_per_cpu_areas(void);
66 #endif
68 #else /* ! SMP */
70 #define per_cpu(var, cpu) (*((void)(cpu), &per_cpu_var(var)))
71 #define __get_cpu_var(var) per_cpu_var(var)
72 #define __raw_get_cpu_var(var) per_cpu_var(var)
74 #endif /* SMP */
76 #ifndef PER_CPU_ATTRIBUTES
77 #define PER_CPU_ATTRIBUTES
78 #endif
80 #define DECLARE_PER_CPU(type, name) extern PER_CPU_ATTRIBUTES \
81 __typeof__(type) per_cpu_var(name)
84 * Optional methods for optimized non-lvalue per-cpu variable access.
86 * @var can be a percpu variable or a field of it and its size should
87 * equal char, int or long. percpu_read() evaluates to a lvalue and
88 * all others to void.
90 * These operations are guaranteed to be atomic w.r.t. preemption.
91 * The generic versions use plain get/put_cpu_var(). Archs are
92 * encouraged to implement single-instruction alternatives which don't
93 * require preemption protection.
95 #ifndef percpu_read
96 # define percpu_read(var) \
97 ({ \
98 typeof(per_cpu_var(var)) __tmp_var__; \
99 __tmp_var__ = get_cpu_var(var); \
100 put_cpu_var(var); \
101 __tmp_var__; \
103 #endif
105 #define __percpu_generic_to_op(var, val, op) \
106 do { \
107 get_cpu_var(var) op val; \
108 put_cpu_var(var); \
109 } while (0)
111 #ifndef percpu_write
112 # define percpu_write(var, val) __percpu_generic_to_op(var, (val), =)
113 #endif
115 #ifndef percpu_add
116 # define percpu_add(var, val) __percpu_generic_to_op(var, (val), +=)
117 #endif
119 #ifndef percpu_sub
120 # define percpu_sub(var, val) __percpu_generic_to_op(var, (val), -=)
121 #endif
123 #ifndef percpu_and
124 # define percpu_and(var, val) __percpu_generic_to_op(var, (val), &=)
125 #endif
127 #ifndef percpu_or
128 # define percpu_or(var, val) __percpu_generic_to_op(var, (val), |=)
129 #endif
131 #ifndef percpu_xor
132 # define percpu_xor(var, val) __percpu_generic_to_op(var, (val), ^=)
133 #endif
135 #endif /* _ASM_GENERIC_PERCPU_H_ */