1 #ifndef _ASM_POWERPC_PERCPU_H_
2 #define _ASM_POWERPC_PERCPU_H_
4 #include <linux/compiler.h>
7 * Same as asm-generic/percpu.h, except that we store the per cpu offset
8 * in the paca. Based on the x86-64 implementation.
15 #define __per_cpu_offset(cpu) (paca[cpu].data_offset)
16 #define __my_cpu_offset() get_paca()->data_offset
17 #define per_cpu_offset(x) (__per_cpu_offset(x))
19 /* Separate out the type, so (int[3], foo) works. */
20 #define DEFINE_PER_CPU(type, name) \
21 __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name
23 #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
24 __attribute__((__section__(".data.percpu.shared_aligned"))) \
25 __typeof__(type) per_cpu__##name \
26 ____cacheline_aligned_in_smp
28 /* var is in discarded region: offset to particular copy we want */
29 #define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset(cpu)))
30 #define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __my_cpu_offset()))
31 #define __raw_get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __my_cpu_offset()))
33 /* A macro to avoid #include hell... */
34 #define percpu_modcopy(pcpudst, src, size) \
37 for_each_possible_cpu(__i) \
38 memcpy((pcpudst)+__per_cpu_offset(__i), \
42 extern void setup_per_cpu_areas(void);
46 #define DEFINE_PER_CPU(type, name) \
47 __typeof__(type) per_cpu__##name
48 #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
49 DEFINE_PER_CPU(type, name)
51 #define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var))
52 #define __get_cpu_var(var) per_cpu__##var
53 #define __raw_get_cpu_var(var) per_cpu__##var
57 #define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name
59 #define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
60 #define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
63 #include <asm-generic/percpu.h>
66 #endif /* _ASM_POWERPC_PERCPU_H_ */