[IA64] re-enable preempt before page allocation for pgtable quicklist
[linux-2.6/kvm.git] / include / asm-arm / system.h
blobb13a8da4847b0240be605a000fb83e9ad9b0b882
1 #ifndef __ASM_ARM_SYSTEM_H
2 #define __ASM_ARM_SYSTEM_H
4 #ifdef __KERNEL__
6 #include <linux/config.h>
8 #define CPU_ARCH_UNKNOWN 0
9 #define CPU_ARCH_ARMv3 1
10 #define CPU_ARCH_ARMv4 2
11 #define CPU_ARCH_ARMv4T 3
12 #define CPU_ARCH_ARMv5 4
13 #define CPU_ARCH_ARMv5T 5
14 #define CPU_ARCH_ARMv5TE 6
15 #define CPU_ARCH_ARMv5TEJ 7
16 #define CPU_ARCH_ARMv6 8
19 * CR1 bits (CP#15 CR1)
21 #define CR_M (1 << 0) /* MMU enable */
22 #define CR_A (1 << 1) /* Alignment abort enable */
23 #define CR_C (1 << 2) /* Dcache enable */
24 #define CR_W (1 << 3) /* Write buffer enable */
25 #define CR_P (1 << 4) /* 32-bit exception handler */
26 #define CR_D (1 << 5) /* 32-bit data address range */
27 #define CR_L (1 << 6) /* Implementation defined */
28 #define CR_B (1 << 7) /* Big endian */
29 #define CR_S (1 << 8) /* System MMU protection */
30 #define CR_R (1 << 9) /* ROM MMU protection */
31 #define CR_F (1 << 10) /* Implementation defined */
32 #define CR_Z (1 << 11) /* Implementation defined */
33 #define CR_I (1 << 12) /* Icache enable */
34 #define CR_V (1 << 13) /* Vectors relocated to 0xffff0000 */
35 #define CR_RR (1 << 14) /* Round Robin cache replacement */
36 #define CR_L4 (1 << 15) /* LDR pc can set T bit */
37 #define CR_DT (1 << 16)
38 #define CR_IT (1 << 18)
39 #define CR_ST (1 << 19)
40 #define CR_FI (1 << 21) /* Fast interrupt (lower latency mode) */
41 #define CR_U (1 << 22) /* Unaligned access operation */
42 #define CR_XP (1 << 23) /* Extended page tables */
43 #define CR_VE (1 << 24) /* Vectored interrupts */
45 #define CPUID_ID 0
46 #define CPUID_CACHETYPE 1
47 #define CPUID_TCM 2
48 #define CPUID_TLBTYPE 3
50 #define read_cpuid(reg) \
51 ({ \
52 unsigned int __val; \
53 asm("mrc p15, 0, %0, c0, c0, " __stringify(reg) \
54 : "=r" (__val) \
55 : \
56 : "cc"); \
57 __val; \
61 * This is used to ensure the compiler did actually allocate the register we
62 * asked it for some inline assembly sequences. Apparently we can't trust
63 * the compiler from one version to another so a bit of paranoia won't hurt.
64 * This string is meant to be concatenated with the inline asm string and
65 * will cause compilation to stop on mismatch.
66 * (for details, see gcc PR 15089)
68 #define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t"
70 #ifndef __ASSEMBLY__
72 #include <linux/linkage.h>
74 struct thread_info;
75 struct task_struct;
77 /* information about the system we're running on */
78 extern unsigned int system_rev;
79 extern unsigned int system_serial_low;
80 extern unsigned int system_serial_high;
81 extern unsigned int mem_fclk_21285;
83 struct pt_regs;
85 void die(const char *msg, struct pt_regs *regs, int err)
86 __attribute__((noreturn));
88 void die_if_kernel(const char *str, struct pt_regs *regs, int err);
90 void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int,
91 struct pt_regs *),
92 int sig, const char *name);
94 #include <asm/proc-fns.h>
96 #define xchg(ptr,x) \
97 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
99 #define tas(ptr) (xchg((ptr),1))
101 extern asmlinkage void __backtrace(void);
102 extern asmlinkage void c_backtrace(unsigned long fp, int pmode);
103 extern void show_pte(struct mm_struct *mm, unsigned long addr);
104 extern void __show_regs(struct pt_regs *);
106 extern int cpu_architecture(void);
108 #define set_cr(x) \
109 __asm__ __volatile__( \
110 "mcr p15, 0, %0, c1, c0, 0 @ set CR" \
111 : : "r" (x) : "cc")
113 #define get_cr() \
114 ({ \
115 unsigned int __val; \
116 __asm__ __volatile__( \
117 "mrc p15, 0, %0, c1, c0, 0 @ get CR" \
118 : "=r" (__val) : : "cc"); \
119 __val; \
122 extern unsigned long cr_no_alignment; /* defined in entry-armv.S */
123 extern unsigned long cr_alignment; /* defined in entry-armv.S */
125 #define UDBG_UNDEFINED (1 << 0)
126 #define UDBG_SYSCALL (1 << 1)
127 #define UDBG_BADABORT (1 << 2)
128 #define UDBG_SEGV (1 << 3)
129 #define UDBG_BUS (1 << 4)
131 extern unsigned int user_debug;
133 #if __LINUX_ARM_ARCH__ >= 4
134 #define vectors_high() (cr_alignment & CR_V)
135 #else
136 #define vectors_high() (0)
137 #endif
139 #define mb() __asm__ __volatile__ ("" : : : "memory")
140 #define rmb() mb()
141 #define wmb() mb()
142 #define read_barrier_depends() do { } while(0)
143 #define set_mb(var, value) do { var = value; mb(); } while (0)
144 #define set_wmb(var, value) do { var = value; wmb(); } while (0)
145 #define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
147 #ifdef CONFIG_SMP
149 * Define our own context switch locking. This allows us to enable
150 * interrupts over the context switch, otherwise we end up with high
151 * interrupt latency. The real problem area is switch_mm() which may
152 * do a full cache flush.
154 #define prepare_arch_switch(rq,next) \
155 do { \
156 spin_lock(&(next)->switch_lock); \
157 spin_unlock_irq(&(rq)->lock); \
158 } while (0)
160 #define finish_arch_switch(rq,prev) \
161 spin_unlock(&(prev)->switch_lock)
163 #define task_running(rq,p) \
164 ((rq)->curr == (p) || spin_is_locked(&(p)->switch_lock))
165 #else
167 * Our UP-case is more simple, but we assume knowledge of how
168 * spin_unlock_irq() and friends are implemented. This avoids
169 * us needlessly decrementing and incrementing the preempt count.
171 #define prepare_arch_switch(rq,next) local_irq_enable()
172 #define finish_arch_switch(rq,prev) spin_unlock(&(rq)->lock)
173 #define task_running(rq,p) ((rq)->curr == (p))
174 #endif
177 * switch_to(prev, next) should switch from task `prev' to `next'
178 * `prev' will never be the same as `next'. schedule() itself
179 * contains the memory barrier to tell GCC not to cache `current'.
181 extern struct task_struct *__switch_to(struct task_struct *, struct thread_info *, struct thread_info *);
183 #define switch_to(prev,next,last) \
184 do { \
185 last = __switch_to(prev,prev->thread_info,next->thread_info); \
186 } while (0)
189 * CPU interrupt mask handling.
191 #if __LINUX_ARM_ARCH__ >= 6
193 #define local_irq_save(x) \
194 ({ \
195 __asm__ __volatile__( \
196 "mrs %0, cpsr @ local_irq_save\n" \
197 "cpsid i" \
198 : "=r" (x) : : "memory", "cc"); \
201 #define local_irq_enable() __asm__("cpsie i @ __sti" : : : "memory", "cc")
202 #define local_irq_disable() __asm__("cpsid i @ __cli" : : : "memory", "cc")
203 #define local_fiq_enable() __asm__("cpsie f @ __stf" : : : "memory", "cc")
204 #define local_fiq_disable() __asm__("cpsid f @ __clf" : : : "memory", "cc")
206 #else
209 * Save the current interrupt enable state & disable IRQs
211 #define local_irq_save(x) \
212 ({ \
213 unsigned long temp; \
214 (void) (&temp == &x); \
215 __asm__ __volatile__( \
216 "mrs %0, cpsr @ local_irq_save\n" \
217 " orr %1, %0, #128\n" \
218 " msr cpsr_c, %1" \
219 : "=r" (x), "=r" (temp) \
221 : "memory", "cc"); \
225 * Enable IRQs
227 #define local_irq_enable() \
228 ({ \
229 unsigned long temp; \
230 __asm__ __volatile__( \
231 "mrs %0, cpsr @ local_irq_enable\n" \
232 " bic %0, %0, #128\n" \
233 " msr cpsr_c, %0" \
234 : "=r" (temp) \
236 : "memory", "cc"); \
240 * Disable IRQs
242 #define local_irq_disable() \
243 ({ \
244 unsigned long temp; \
245 __asm__ __volatile__( \
246 "mrs %0, cpsr @ local_irq_disable\n" \
247 " orr %0, %0, #128\n" \
248 " msr cpsr_c, %0" \
249 : "=r" (temp) \
251 : "memory", "cc"); \
255 * Enable FIQs
257 #define local_fiq_enable() \
258 ({ \
259 unsigned long temp; \
260 __asm__ __volatile__( \
261 "mrs %0, cpsr @ stf\n" \
262 " bic %0, %0, #64\n" \
263 " msr cpsr_c, %0" \
264 : "=r" (temp) \
266 : "memory", "cc"); \
270 * Disable FIQs
272 #define local_fiq_disable() \
273 ({ \
274 unsigned long temp; \
275 __asm__ __volatile__( \
276 "mrs %0, cpsr @ clf\n" \
277 " orr %0, %0, #64\n" \
278 " msr cpsr_c, %0" \
279 : "=r" (temp) \
281 : "memory", "cc"); \
284 #endif
287 * Save the current interrupt enable state.
289 #define local_save_flags(x) \
290 ({ \
291 __asm__ __volatile__( \
292 "mrs %0, cpsr @ local_save_flags" \
293 : "=r" (x) : : "memory", "cc"); \
297 * restore saved IRQ & FIQ state
299 #define local_irq_restore(x) \
300 __asm__ __volatile__( \
301 "msr cpsr_c, %0 @ local_irq_restore\n" \
303 : "r" (x) \
304 : "memory", "cc")
306 #define irqs_disabled() \
307 ({ \
308 unsigned long flags; \
309 local_save_flags(flags); \
310 flags & PSR_I_BIT; \
313 #ifdef CONFIG_SMP
314 #error SMP not supported
316 #define smp_mb() mb()
317 #define smp_rmb() rmb()
318 #define smp_wmb() wmb()
319 #define smp_read_barrier_depends() read_barrier_depends()
321 #else
323 #define smp_mb() barrier()
324 #define smp_rmb() barrier()
325 #define smp_wmb() barrier()
326 #define smp_read_barrier_depends() do { } while(0)
328 #if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
330 * On the StrongARM, "swp" is terminally broken since it bypasses the
331 * cache totally. This means that the cache becomes inconsistent, and,
332 * since we use normal loads/stores as well, this is really bad.
333 * Typically, this causes oopsen in filp_close, but could have other,
334 * more disasterous effects. There are two work-arounds:
335 * 1. Disable interrupts and emulate the atomic swap
336 * 2. Clean the cache, perform atomic swap, flush the cache
338 * We choose (1) since its the "easiest" to achieve here and is not
339 * dependent on the processor type.
341 #define swp_is_buggy
342 #endif
344 static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
346 extern void __bad_xchg(volatile void *, int);
347 unsigned long ret;
348 #ifdef swp_is_buggy
349 unsigned long flags;
350 #endif
352 switch (size) {
353 #ifdef swp_is_buggy
354 case 1:
355 local_irq_save(flags);
356 ret = *(volatile unsigned char *)ptr;
357 *(volatile unsigned char *)ptr = x;
358 local_irq_restore(flags);
359 break;
361 case 4:
362 local_irq_save(flags);
363 ret = *(volatile unsigned long *)ptr;
364 *(volatile unsigned long *)ptr = x;
365 local_irq_restore(flags);
366 break;
367 #else
368 case 1: __asm__ __volatile__ ("swpb %0, %1, [%2]"
369 : "=&r" (ret)
370 : "r" (x), "r" (ptr)
371 : "memory", "cc");
372 break;
373 case 4: __asm__ __volatile__ ("swp %0, %1, [%2]"
374 : "=&r" (ret)
375 : "r" (x), "r" (ptr)
376 : "memory", "cc");
377 break;
378 #endif
379 default: __bad_xchg(ptr, size), ret = 0;
382 return ret;
385 #endif /* CONFIG_SMP */
387 #endif /* __ASSEMBLY__ */
389 #define arch_align_stack(x) (x)
391 #endif /* __KERNEL__ */
393 #endif