[ALSA] ak4xxx-adda - Code clean-up
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / include / asm-xtensa / system.h
blobf986170bd2a15416c5e6e94ec686f11fbb975c09
1 /*
2 * include/asm-xtensa/system.h
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
8 * Copyright (C) 2001 - 2005 Tensilica Inc.
9 */
11 #ifndef _XTENSA_SYSTEM_H
12 #define _XTENSA_SYSTEM_H
14 #include <linux/stringify.h>
16 #include <asm/processor.h>
18 /* interrupt control */
20 #define local_save_flags(x) \
21 __asm__ __volatile__ ("rsr %0,"__stringify(PS) : "=a" (x));
22 #define local_irq_restore(x) do { \
23 __asm__ __volatile__ ("wsr %0, "__stringify(PS)" ; rsync" \
24 :: "a" (x) : "memory"); } while(0);
25 #define local_irq_save(x) do { \
26 __asm__ __volatile__ ("rsil %0, "__stringify(LOCKLEVEL) \
27 : "=a" (x) :: "memory");} while(0);
29 static inline void local_irq_disable(void)
31 unsigned long flags;
32 __asm__ __volatile__ ("rsil %0, "__stringify(LOCKLEVEL)
33 : "=a" (flags) :: "memory");
35 static inline void local_irq_enable(void)
37 unsigned long flags;
38 __asm__ __volatile__ ("rsil %0, 0" : "=a" (flags) :: "memory");
42 static inline int irqs_disabled(void)
44 unsigned long flags;
45 local_save_flags(flags);
46 return flags & 0xf;
49 #define RSR_CPENABLE(x) do { \
50 __asm__ __volatile__("rsr %0," __stringify(CPENABLE) : "=a" (x)); \
51 } while(0);
52 #define WSR_CPENABLE(x) do { \
53 __asm__ __volatile__("wsr %0," __stringify(CPENABLE)";rsync" \
54 :: "a" (x));} while(0);
56 #define clear_cpenable() __clear_cpenable()
58 static inline void __clear_cpenable(void)
60 #if XCHAL_HAVE_CP
61 unsigned long i = 0;
62 WSR_CPENABLE(i);
63 #endif
66 static inline void enable_coprocessor(int i)
68 #if XCHAL_HAVE_CP
69 int cp;
70 RSR_CPENABLE(cp);
71 cp |= 1 << i;
72 WSR_CPENABLE(cp);
73 #endif
76 static inline void disable_coprocessor(int i)
78 #if XCHAL_HAVE_CP
79 int cp;
80 RSR_CPENABLE(cp);
81 cp &= ~(1 << i);
82 WSR_CPENABLE(cp);
83 #endif
86 #define smp_read_barrier_depends() do { } while(0)
87 #define read_barrier_depends() do { } while(0)
89 #define mb() barrier()
90 #define rmb() mb()
91 #define wmb() mb()
93 #ifdef CONFIG_SMP
94 #error smp_* not defined
95 #else
96 #define smp_mb() barrier()
97 #define smp_rmb() barrier()
98 #define smp_wmb() barrier()
99 #endif
101 #define set_mb(var, value) do { var = value; mb(); } while (0)
102 #define set_wmb(var, value) do { var = value; wmb(); } while (0)
104 #if !defined (__ASSEMBLY__)
106 /* * switch_to(n) should switch tasks to task nr n, first
107 * checking that n isn't the current task, in which case it does nothing.
109 extern void *_switch_to(void *last, void *next);
111 #endif /* __ASSEMBLY__ */
113 #define switch_to(prev,next,last) \
114 do { \
115 clear_cpenable(); \
116 (last) = _switch_to(prev, next); \
117 } while(0)
120 * cmpxchg
123 static inline unsigned long
124 __cmpxchg_u32(volatile int *p, int old, int new)
126 __asm__ __volatile__("rsil a15, "__stringify(LOCKLEVEL)"\n\t"
127 "l32i %0, %1, 0 \n\t"
128 "bne %0, %2, 1f \n\t"
129 "s32i %3, %1, 0 \n\t"
130 "1: \n\t"
131 "wsr a15, "__stringify(PS)" \n\t"
132 "rsync \n\t"
133 : "=&a" (old)
134 : "a" (p), "a" (old), "r" (new)
135 : "a15", "memory");
136 return old;
138 /* This function doesn't exist, so you'll get a linker error
139 * if something tries to do an invalid cmpxchg(). */
141 extern void __cmpxchg_called_with_bad_pointer(void);
143 static __inline__ unsigned long
144 __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
146 switch (size) {
147 case 4: return __cmpxchg_u32(ptr, old, new);
148 default: __cmpxchg_called_with_bad_pointer();
149 return old;
153 #define cmpxchg(ptr,o,n) \
154 ({ __typeof__(*(ptr)) _o_ = (o); \
155 __typeof__(*(ptr)) _n_ = (n); \
156 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
157 (unsigned long)_n_, sizeof (*(ptr))); \
164 * xchg_u32
166 * Note that a15 is used here because the register allocation
167 * done by the compiler is not guaranteed and a window overflow
168 * may not occur between the rsil and wsr instructions. By using
169 * a15 in the rsil, the machine is guaranteed to be in a state
170 * where no register reference will cause an overflow.
173 static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
175 unsigned long tmp;
176 __asm__ __volatile__("rsil a15, "__stringify(LOCKLEVEL)"\n\t"
177 "l32i %0, %1, 0 \n\t"
178 "s32i %2, %1, 0 \n\t"
179 "wsr a15, "__stringify(PS)" \n\t"
180 "rsync \n\t"
181 : "=&a" (tmp)
182 : "a" (m), "a" (val)
183 : "a15", "memory");
184 return tmp;
187 #define tas(ptr) (xchg((ptr),1))
189 #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
192 * This only works if the compiler isn't horribly bad at optimizing.
193 * gcc-2.5.8 reportedly can't handle this, but I define that one to
194 * be dead anyway.
197 extern void __xchg_called_with_bad_pointer(void);
199 static __inline__ unsigned long
200 __xchg(unsigned long x, volatile void * ptr, int size)
202 switch (size) {
203 case 4:
204 return xchg_u32(ptr, x);
206 __xchg_called_with_bad_pointer();
207 return x;
210 extern void set_except_vector(int n, void *addr);
212 static inline void spill_registers(void)
214 unsigned int a0, ps;
216 __asm__ __volatile__ (
217 "movi a14," __stringify (PS_EXCM_MASK) " | 1\n\t"
218 "mov a12, a0\n\t"
219 "rsr a13," __stringify(SAR) "\n\t"
220 "xsr a14," __stringify(PS) "\n\t"
221 "movi a0, _spill_registers\n\t"
222 "rsync\n\t"
223 "callx0 a0\n\t"
224 "mov a0, a12\n\t"
225 "wsr a13," __stringify(SAR) "\n\t"
226 "wsr a14," __stringify(PS) "\n\t"
227 :: "a" (&a0), "a" (&ps)
228 : "a2", "a3", "a12", "a13", "a14", "a15", "memory");
231 #define arch_align_stack(x) (x)
233 #endif /* _XTENSA_SYSTEM_H */