kobject: Comment and warning fixes to kobject.c
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / include / asm-arm / system.h
blob69134c7518c1eceec790ed06e5e388ad8c2d46c8
1 #ifndef __ASM_ARM_SYSTEM_H
2 #define __ASM_ARM_SYSTEM_H
4 #ifdef __KERNEL__
6 #include <asm/memory.h>
8 #define CPU_ARCH_UNKNOWN 0
9 #define CPU_ARCH_ARMv3 1
10 #define CPU_ARCH_ARMv4 2
11 #define CPU_ARCH_ARMv4T 3
12 #define CPU_ARCH_ARMv5 4
13 #define CPU_ARCH_ARMv5T 5
14 #define CPU_ARCH_ARMv5TE 6
15 #define CPU_ARCH_ARMv5TEJ 7
16 #define CPU_ARCH_ARMv6 8
19 * CR1 bits (CP#15 CR1)
21 #define CR_M (1 << 0) /* MMU enable */
22 #define CR_A (1 << 1) /* Alignment abort enable */
23 #define CR_C (1 << 2) /* Dcache enable */
24 #define CR_W (1 << 3) /* Write buffer enable */
25 #define CR_P (1 << 4) /* 32-bit exception handler */
26 #define CR_D (1 << 5) /* 32-bit data address range */
27 #define CR_L (1 << 6) /* Implementation defined */
28 #define CR_B (1 << 7) /* Big endian */
29 #define CR_S (1 << 8) /* System MMU protection */
30 #define CR_R (1 << 9) /* ROM MMU protection */
31 #define CR_F (1 << 10) /* Implementation defined */
32 #define CR_Z (1 << 11) /* Implementation defined */
33 #define CR_I (1 << 12) /* Icache enable */
34 #define CR_V (1 << 13) /* Vectors relocated to 0xffff0000 */
35 #define CR_RR (1 << 14) /* Round Robin cache replacement */
36 #define CR_L4 (1 << 15) /* LDR pc can set T bit */
37 #define CR_DT (1 << 16)
38 #define CR_IT (1 << 18)
39 #define CR_ST (1 << 19)
40 #define CR_FI (1 << 21) /* Fast interrupt (lower latency mode) */
41 #define CR_U (1 << 22) /* Unaligned access operation */
42 #define CR_XP (1 << 23) /* Extended page tables */
43 #define CR_VE (1 << 24) /* Vectored interrupts */
45 #define CPUID_ID 0
46 #define CPUID_CACHETYPE 1
47 #define CPUID_TCM 2
48 #define CPUID_TLBTYPE 3
50 #ifdef CONFIG_CPU_CP15
51 #define read_cpuid(reg) \
52 ({ \
53 unsigned int __val; \
54 asm("mrc p15, 0, %0, c0, c0, " __stringify(reg) \
55 : "=r" (__val) \
56 : \
57 : "cc"); \
58 __val; \
60 #else
61 #define read_cpuid(reg) (processor_id)
62 #endif
65 * This is used to ensure the compiler did actually allocate the register we
66 * asked it for some inline assembly sequences. Apparently we can't trust
67 * the compiler from one version to another so a bit of paranoia won't hurt.
68 * This string is meant to be concatenated with the inline asm string and
69 * will cause compilation to stop on mismatch.
70 * (for details, see gcc PR 15089)
72 #define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t"
74 #ifndef __ASSEMBLY__
76 #include <linux/linkage.h>
77 #include <linux/irqflags.h>
79 struct thread_info;
80 struct task_struct;
82 /* information about the system we're running on */
83 extern unsigned int system_rev;
84 extern unsigned int system_serial_low;
85 extern unsigned int system_serial_high;
86 extern unsigned int mem_fclk_21285;
88 struct pt_regs;
90 void die(const char *msg, struct pt_regs *regs, int err)
91 __attribute__((noreturn));
93 struct siginfo;
94 void notify_die(const char *str, struct pt_regs *regs, struct siginfo *info,
95 unsigned long err, unsigned long trap);
97 void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int,
98 struct pt_regs *),
99 int sig, const char *name);
101 #define xchg(ptr,x) \
102 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
104 #define tas(ptr) (xchg((ptr),1))
106 extern asmlinkage void __backtrace(void);
107 extern asmlinkage void c_backtrace(unsigned long fp, int pmode);
109 struct mm_struct;
110 extern void show_pte(struct mm_struct *mm, unsigned long addr);
111 extern void __show_regs(struct pt_regs *);
113 extern int cpu_architecture(void);
114 extern void cpu_init(void);
116 void arm_machine_restart(char mode);
117 extern void (*arm_pm_restart)(char str);
120 * Intel's XScale3 core supports some v6 features (supersections, L2)
121 * but advertises itself as v5 as it does not support the v6 ISA. For
122 * this reason, we need a way to explicitly test for this type of CPU.
124 #ifndef CONFIG_CPU_XSC3
125 #define cpu_is_xsc3() 0
126 #else
127 static inline int cpu_is_xsc3(void)
129 extern unsigned int processor_id;
131 if ((processor_id & 0xffffe000) == 0x69056000)
132 return 1;
134 return 0;
136 #endif
138 #if !defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_CPU_XSC3)
139 #define cpu_is_xscale() 0
140 #else
141 #define cpu_is_xscale() 1
142 #endif
144 #define UDBG_UNDEFINED (1 << 0)
145 #define UDBG_SYSCALL (1 << 1)
146 #define UDBG_BADABORT (1 << 2)
147 #define UDBG_SEGV (1 << 3)
148 #define UDBG_BUS (1 << 4)
150 extern unsigned int user_debug;
152 #if __LINUX_ARM_ARCH__ >= 4
153 #define vectors_high() (cr_alignment & CR_V)
154 #else
155 #define vectors_high() (0)
156 #endif
158 #if defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ >= 6
159 #define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \
160 : : "r" (0) : "memory")
161 #define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
162 : : "r" (0) : "memory")
163 #define dmb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \
164 : : "r" (0) : "memory")
165 #else
166 #define isb() __asm__ __volatile__ ("" : : : "memory")
167 #define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
168 : : "r" (0) : "memory")
169 #define dmb() __asm__ __volatile__ ("" : : : "memory")
170 #endif
172 #ifndef CONFIG_SMP
173 #define mb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
174 #define rmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
175 #define wmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
176 #define smp_mb() barrier()
177 #define smp_rmb() barrier()
178 #define smp_wmb() barrier()
179 #else
180 #define mb() dmb()
181 #define rmb() dmb()
182 #define wmb() dmb()
183 #define smp_mb() dmb()
184 #define smp_rmb() dmb()
185 #define smp_wmb() dmb()
186 #endif
187 #define read_barrier_depends() do { } while(0)
188 #define smp_read_barrier_depends() do { } while(0)
190 #define set_mb(var, value) do { var = value; smp_mb(); } while (0)
191 #define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
193 extern unsigned long cr_no_alignment; /* defined in entry-armv.S */
194 extern unsigned long cr_alignment; /* defined in entry-armv.S */
196 static inline unsigned int get_cr(void)
198 unsigned int val;
199 asm("mrc p15, 0, %0, c1, c0, 0 @ get CR" : "=r" (val) : : "cc");
200 return val;
203 static inline void set_cr(unsigned int val)
205 asm volatile("mcr p15, 0, %0, c1, c0, 0 @ set CR"
206 : : "r" (val) : "cc");
207 isb();
210 #ifndef CONFIG_SMP
211 extern void adjust_cr(unsigned long mask, unsigned long set);
212 #endif
214 #define CPACC_FULL(n) (3 << (n * 2))
215 #define CPACC_SVC(n) (1 << (n * 2))
216 #define CPACC_DISABLE(n) (0 << (n * 2))
218 static inline unsigned int get_copro_access(void)
220 unsigned int val;
221 asm("mrc p15, 0, %0, c1, c0, 2 @ get copro access"
222 : "=r" (val) : : "cc");
223 return val;
226 static inline void set_copro_access(unsigned int val)
228 asm volatile("mcr p15, 0, %0, c1, c0, 2 @ set copro access"
229 : : "r" (val) : "cc");
230 isb();
234 * switch_mm() may do a full cache flush over the context switch,
235 * so enable interrupts over the context switch to avoid high
236 * latency.
238 #define __ARCH_WANT_INTERRUPTS_ON_CTXSW
241 * switch_to(prev, next) should switch from task `prev' to `next'
242 * `prev' will never be the same as `next'. schedule() itself
243 * contains the memory barrier to tell GCC not to cache `current'.
245 extern struct task_struct *__switch_to(struct task_struct *, struct thread_info *, struct thread_info *);
247 #define switch_to(prev,next,last) \
248 do { \
249 last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \
250 } while (0)
253 * On SMP systems, when the scheduler does migration-cost autodetection,
254 * it needs a way to flush as much of the CPU's caches as possible.
256 * TODO: fill this in!
258 static inline void sched_cacheflush(void)
262 #if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
264 * On the StrongARM, "swp" is terminally broken since it bypasses the
265 * cache totally. This means that the cache becomes inconsistent, and,
266 * since we use normal loads/stores as well, this is really bad.
267 * Typically, this causes oopsen in filp_close, but could have other,
268 * more disasterous effects. There are two work-arounds:
269 * 1. Disable interrupts and emulate the atomic swap
270 * 2. Clean the cache, perform atomic swap, flush the cache
272 * We choose (1) since its the "easiest" to achieve here and is not
273 * dependent on the processor type.
275 * NOTE that this solution won't work on an SMP system, so explcitly
276 * forbid it here.
278 #define swp_is_buggy
279 #endif
281 static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
283 extern void __bad_xchg(volatile void *, int);
284 unsigned long ret;
285 #ifdef swp_is_buggy
286 unsigned long flags;
287 #endif
288 #if __LINUX_ARM_ARCH__ >= 6
289 unsigned int tmp;
290 #endif
292 switch (size) {
293 #if __LINUX_ARM_ARCH__ >= 6
294 case 1:
295 asm volatile("@ __xchg1\n"
296 "1: ldrexb %0, [%3]\n"
297 " strexb %1, %2, [%3]\n"
298 " teq %1, #0\n"
299 " bne 1b"
300 : "=&r" (ret), "=&r" (tmp)
301 : "r" (x), "r" (ptr)
302 : "memory", "cc");
303 break;
304 case 4:
305 asm volatile("@ __xchg4\n"
306 "1: ldrex %0, [%3]\n"
307 " strex %1, %2, [%3]\n"
308 " teq %1, #0\n"
309 " bne 1b"
310 : "=&r" (ret), "=&r" (tmp)
311 : "r" (x), "r" (ptr)
312 : "memory", "cc");
313 break;
314 #elif defined(swp_is_buggy)
315 #ifdef CONFIG_SMP
316 #error SMP is not supported on this platform
317 #endif
318 case 1:
319 raw_local_irq_save(flags);
320 ret = *(volatile unsigned char *)ptr;
321 *(volatile unsigned char *)ptr = x;
322 raw_local_irq_restore(flags);
323 break;
325 case 4:
326 raw_local_irq_save(flags);
327 ret = *(volatile unsigned long *)ptr;
328 *(volatile unsigned long *)ptr = x;
329 raw_local_irq_restore(flags);
330 break;
331 #else
332 case 1:
333 asm volatile("@ __xchg1\n"
334 " swpb %0, %1, [%2]"
335 : "=&r" (ret)
336 : "r" (x), "r" (ptr)
337 : "memory", "cc");
338 break;
339 case 4:
340 asm volatile("@ __xchg4\n"
341 " swp %0, %1, [%2]"
342 : "=&r" (ret)
343 : "r" (x), "r" (ptr)
344 : "memory", "cc");
345 break;
346 #endif
347 default:
348 __bad_xchg(ptr, size), ret = 0;
349 break;
352 return ret;
355 extern void disable_hlt(void);
356 extern void enable_hlt(void);
358 #endif /* __ASSEMBLY__ */
360 #define arch_align_stack(x) (x)
362 #endif /* __KERNEL__ */
364 #endif