Ok. I didn't make 2.4.0 in 2000. Tough. I tried, but we had some
[davej-history.git] / include / asm-arm / proc-armv / system.h
blobbecb31c2d9a8646abf26428a5e243a9463dfe3f0
1 /*
2 * linux/include/asm-arm/proc-armv/system.h
4 * Copyright (C) 1996 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10 #ifndef __ASM_PROC_SYSTEM_H
11 #define __ASM_PROC_SYSTEM_H
13 #include <linux/config.h>
15 #define set_cr(x) \
16 __asm__ __volatile__( \
17 "mcr p15, 0, %0, c1, c0 @ set CR" \
18 : : "r" (x))
20 extern unsigned long cr_no_alignment; /* defined in entry-armv.S */
21 extern unsigned long cr_alignment; /* defined in entry-armv.S */
24 * A couple of speedups for the ARM
28 * Save the current interrupt enable state & disable IRQs
30 #define __save_flags_cli(x) \
31 ({ \
32 unsigned long temp; \
33 __asm__ __volatile__( \
34 "mrs %0, cpsr @ save_flags_cli\n" \
35 " orr %1, %0, #128\n" \
36 " msr cpsr_c, %1" \
37 : "=r" (x), "=r" (temp) \
38 : \
39 : "memory"); \
43 * Enable IRQs
45 #define __sti() \
46 ({ \
47 unsigned long temp; \
48 __asm__ __volatile__( \
49 "mrs %0, cpsr @ sti\n" \
50 " bic %0, %0, #128\n" \
51 " msr cpsr_c, %0" \
52 : "=r" (temp) \
53 : \
54 : "memory"); \
58 * Disable IRQs
60 #define __cli() \
61 ({ \
62 unsigned long temp; \
63 __asm__ __volatile__( \
64 "mrs %0, cpsr @ cli\n" \
65 " orr %0, %0, #128\n" \
66 " msr cpsr_c, %0" \
67 : "=r" (temp) \
68 : \
69 : "memory"); \
73 * Enable FIQs
75 #define __stf() \
76 ({ \
77 unsigned long temp; \
78 __asm__ __volatile__( \
79 "mrs %0, cpsr @ stf\n" \
80 " bic %0, %0, #64\n" \
81 " msr cpsr_c, %0" \
82 : "=r" (temp) \
83 : \
84 : "memory"); \
88 * Disable FIQs
90 #define __clf() \
91 ({ \
92 unsigned long temp; \
93 __asm__ __volatile__( \
94 "mrs %0, cpsr @ clf\n" \
95 " orr %0, %0, #64\n" \
96 " msr cpsr_c, %0" \
97 : "=r" (temp) \
98 : \
99 : "memory"); \
103 * save current IRQ & FIQ state
105 #define __save_flags(x) \
106 __asm__ __volatile__( \
107 "mrs %0, cpsr @ save_flags\n" \
108 : "=r" (x) \
110 : "memory")
113 * restore saved IRQ & FIQ state
115 #define __restore_flags(x) \
116 __asm__ __volatile__( \
117 "msr cpsr_c, %0 @ restore_flags\n" \
119 : "r" (x) \
120 : "memory")
122 #if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
124 * On the StrongARM, "swp" is terminally broken since it bypasses the
125 * cache totally. This means that the cache becomes inconsistent, and,
126 * since we use normal loads/stores as well, this is really bad.
127 * Typically, this causes oopsen in filp_close, but could have other,
128 * more disasterous effects. There are two work-arounds:
129 * 1. Disable interrupts and emulate the atomic swap
130 * 2. Clean the cache, perform atomic swap, flush the cache
132 * We choose (1) since its the "easiest" to achieve here and is not
133 * dependent on the processor type.
135 #define swp_is_buggy
136 #endif
138 extern __inline__ unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
140 extern void __bad_xchg(volatile void *, int);
141 unsigned long ret;
142 #ifdef swp_is_buggy
143 unsigned long flags;
144 #endif
146 switch (size) {
147 #ifdef swp_is_buggy
148 case 1:
149 __save_flags_cli(flags);
150 ret = *(volatile unsigned char *)ptr;
151 *(volatile unsigned char *)ptr = x;
152 __restore_flags(flags);
153 break;
155 case 4:
156 __save_flags_cli(flags);
157 ret = *(volatile unsigned long *)ptr;
158 *(volatile unsigned long *)ptr = x;
159 __restore_flags(flags);
160 break;
161 #else
162 case 1: __asm__ __volatile__ ("swpb %0, %1, [%2]"
163 : "=r" (ret)
164 : "r" (x), "r" (ptr)
165 : "memory");
166 break;
167 case 4: __asm__ __volatile__ ("swp %0, %1, [%2]"
168 : "=r" (ret)
169 : "r" (x), "r" (ptr)
170 : "memory");
171 break;
172 #endif
173 default: __bad_xchg(ptr, size);
176 return ret;
179 #endif