ACPI: thinkpad-acpi: synchronize input device switches
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / include / asm-ia64 / percpu.h
blob43a7aac414e04e7d74a1a6b45065b1a3b3c3846e
1 #ifndef _ASM_IA64_PERCPU_H
2 #define _ASM_IA64_PERCPU_H
4 /*
5 * Copyright (C) 2002-2003 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 */
9 #define PERCPU_ENOUGH_ROOM PERCPU_PAGE_SIZE
11 #ifdef __ASSEMBLY__
12 # define THIS_CPU(var) (per_cpu__##var) /* use this to mark accesses to per-CPU variables... */
13 #else /* !__ASSEMBLY__ */
16 #include <linux/threads.h>
18 #ifdef HAVE_MODEL_SMALL_ATTRIBUTE
19 # define __SMALL_ADDR_AREA __attribute__((__model__ (__small__)))
20 #else
21 # define __SMALL_ADDR_AREA
22 #endif
24 #define DECLARE_PER_CPU(type, name) \
25 extern __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name
27 /* Separate out the type, so (int[3], foo) works. */
28 #define DEFINE_PER_CPU(type, name) \
29 __attribute__((__section__(".data.percpu"))) \
30 __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name
32 #ifdef CONFIG_SMP
33 #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
34 __attribute__((__section__(".data.percpu.shared_aligned"))) \
35 __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name \
36 ____cacheline_aligned_in_smp
37 #else
38 #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
39 DEFINE_PER_CPU(type, name)
40 #endif
43 * Pretty much a literal copy of asm-generic/percpu.h, except that percpu_modcopy() is an
44 * external routine, to avoid include-hell.
46 #ifdef CONFIG_SMP
48 extern unsigned long __per_cpu_offset[NR_CPUS];
49 #define per_cpu_offset(x) (__per_cpu_offset(x))
51 /* Equal to __per_cpu_offset[smp_processor_id()], but faster to access: */
52 DECLARE_PER_CPU(unsigned long, local_per_cpu_offset);
54 #define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu]))
55 #define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __ia64_per_cpu_var(local_per_cpu_offset)))
56 #define __raw_get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __ia64_per_cpu_var(local_per_cpu_offset)))
58 extern void percpu_modcopy(void *pcpudst, const void *src, unsigned long size);
59 extern void setup_per_cpu_areas (void);
60 extern void *per_cpu_init(void);
62 #else /* ! SMP */
64 #define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var))
65 #define __get_cpu_var(var) per_cpu__##var
66 #define __raw_get_cpu_var(var) per_cpu__##var
67 #define per_cpu_init() (__phys_per_cpu_start)
69 #endif /* SMP */
71 #define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
72 #define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
75 * Be extremely careful when taking the address of this variable! Due to virtual
76 * remapping, it is different from the canonical address returned by __get_cpu_var(var)!
77 * On the positive side, using __ia64_per_cpu_var() instead of __get_cpu_var() is slightly
78 * more efficient.
80 #define __ia64_per_cpu_var(var) (per_cpu__##var)
82 #endif /* !__ASSEMBLY__ */
84 #endif /* _ASM_IA64_PERCPU_H */