[PATCH] LSM: remove BSD secure level security module
[usb.git] / include / asm-sh / system.h
blob6c1f8fde5ac4a8e6a73c6d5059491db8c7e56554
1 #ifndef __ASM_SH_SYSTEM_H
2 #define __ASM_SH_SYSTEM_H
4 /*
5 * Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima
6 * Copyright (C) 2002 Paul Mundt
7 */
9 #include <asm/types.h>
12 * switch_to() should switch tasks to task nr n, first
15 #define switch_to(prev, next, last) do { \
16 struct task_struct *__last; \
17 register unsigned long *__ts1 __asm__ ("r1") = &prev->thread.sp; \
18 register unsigned long *__ts2 __asm__ ("r2") = &prev->thread.pc; \
19 register unsigned long *__ts4 __asm__ ("r4") = (unsigned long *)prev; \
20 register unsigned long *__ts5 __asm__ ("r5") = (unsigned long *)next; \
21 register unsigned long *__ts6 __asm__ ("r6") = &next->thread.sp; \
22 register unsigned long __ts7 __asm__ ("r7") = next->thread.pc; \
23 __asm__ __volatile__ (".balign 4\n\t" \
24 "stc.l gbr, @-r15\n\t" \
25 "sts.l pr, @-r15\n\t" \
26 "mov.l r8, @-r15\n\t" \
27 "mov.l r9, @-r15\n\t" \
28 "mov.l r10, @-r15\n\t" \
29 "mov.l r11, @-r15\n\t" \
30 "mov.l r12, @-r15\n\t" \
31 "mov.l r13, @-r15\n\t" \
32 "mov.l r14, @-r15\n\t" \
33 "mov.l r15, @r1 ! save SP\n\t" \
34 "mov.l @r6, r15 ! change to new stack\n\t" \
35 "mova 1f, %0\n\t" \
36 "mov.l %0, @r2 ! save PC\n\t" \
37 "mov.l 2f, %0\n\t" \
38 "jmp @%0 ! call __switch_to\n\t" \
39 " lds r7, pr ! with return to new PC\n\t" \
40 ".balign 4\n" \
41 "2:\n\t" \
42 ".long __switch_to\n" \
43 "1:\n\t" \
44 "mov.l @r15+, r14\n\t" \
45 "mov.l @r15+, r13\n\t" \
46 "mov.l @r15+, r12\n\t" \
47 "mov.l @r15+, r11\n\t" \
48 "mov.l @r15+, r10\n\t" \
49 "mov.l @r15+, r9\n\t" \
50 "mov.l @r15+, r8\n\t" \
51 "lds.l @r15+, pr\n\t" \
52 "ldc.l @r15+, gbr\n\t" \
53 : "=z" (__last) \
54 : "r" (__ts1), "r" (__ts2), "r" (__ts4), \
55 "r" (__ts5), "r" (__ts6), "r" (__ts7) \
56 : "r3", "t"); \
57 last = __last; \
58 } while (0)
61 * On SMP systems, when the scheduler does migration-cost autodetection,
62 * it needs a way to flush as much of the CPU's caches as possible.
64 * TODO: fill this in!
66 static inline void sched_cacheflush(void)
70 #ifdef CONFIG_CPU_SH4A
71 #define __icbi() \
72 { \
73 unsigned long __addr; \
74 __addr = 0xa8000000; \
75 __asm__ __volatile__( \
76 "icbi %0\n\t" \
77 : /* no output */ \
78 : "m" (__m(__addr))); \
80 #endif
82 static inline unsigned long tas(volatile int *m)
84 unsigned long retval;
86 __asm__ __volatile__ ("tas.b @%1\n\t"
87 "movt %0"
88 : "=r" (retval): "r" (m): "t", "memory");
89 return retval;
93 * A brief note on ctrl_barrier(), the control register write barrier.
95 * Legacy SH cores typically require a sequence of 8 nops after
96 * modification of a control register in order for the changes to take
97 * effect. On newer cores (like the sh4a and sh5) this is accomplished
98 * with icbi.
100 * Also note that on sh4a in the icbi case we can forego a synco for the
101 * write barrier, as it's not necessary for control registers.
103 * Historically we have only done this type of barrier for the MMUCR, but
104 * it's also necessary for the CCR, so we make it generic here instead.
106 #ifdef CONFIG_CPU_SH4A
107 #define mb() __asm__ __volatile__ ("synco": : :"memory")
108 #define rmb() mb()
109 #define wmb() __asm__ __volatile__ ("synco": : :"memory")
110 #define ctrl_barrier() __icbi()
111 #define read_barrier_depends() do { } while(0)
112 #else
113 #define mb() __asm__ __volatile__ ("": : :"memory")
114 #define rmb() mb()
115 #define wmb() __asm__ __volatile__ ("": : :"memory")
116 #define ctrl_barrier() __asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop")
117 #define read_barrier_depends() do { } while(0)
118 #endif
120 #ifdef CONFIG_SMP
121 #define smp_mb() mb()
122 #define smp_rmb() rmb()
123 #define smp_wmb() wmb()
124 #define smp_read_barrier_depends() read_barrier_depends()
125 #else
126 #define smp_mb() barrier()
127 #define smp_rmb() barrier()
128 #define smp_wmb() barrier()
129 #define smp_read_barrier_depends() do { } while(0)
130 #endif
132 #define set_mb(var, value) do { xchg(&var, value); } while (0)
134 /* Interrupt Control */
135 #ifdef CONFIG_CPU_HAS_SR_RB
136 static inline void local_irq_enable(void)
138 unsigned long __dummy0, __dummy1;
140 __asm__ __volatile__("stc sr, %0\n\t"
141 "and %1, %0\n\t"
142 "stc r6_bank, %1\n\t"
143 "or %1, %0\n\t"
144 "ldc %0, sr"
145 : "=&r" (__dummy0), "=r" (__dummy1)
146 : "1" (~0x000000f0)
147 : "memory");
149 #else
150 static inline void local_irq_enable(void)
152 unsigned long __dummy0, __dummy1;
154 __asm__ __volatile__ (
155 "stc sr, %0\n\t"
156 "and %1, %0\n\t"
157 "ldc %0, sr\n\t"
158 : "=&r" (__dummy0), "=r" (__dummy1)
159 : "1" (~0x000000f0)
160 : "memory");
162 #endif
164 static inline void local_irq_disable(void)
166 unsigned long __dummy;
167 __asm__ __volatile__("stc sr, %0\n\t"
168 "or #0xf0, %0\n\t"
169 "ldc %0, sr"
170 : "=&z" (__dummy)
171 : /* no inputs */
172 : "memory");
175 static inline void set_bl_bit(void)
177 unsigned long __dummy0, __dummy1;
179 __asm__ __volatile__ ("stc sr, %0\n\t"
180 "or %2, %0\n\t"
181 "and %3, %0\n\t"
182 "ldc %0, sr"
183 : "=&r" (__dummy0), "=r" (__dummy1)
184 : "r" (0x10000000), "r" (0xffffff0f)
185 : "memory");
188 static inline void clear_bl_bit(void)
190 unsigned long __dummy0, __dummy1;
192 __asm__ __volatile__ ("stc sr, %0\n\t"
193 "and %2, %0\n\t"
194 "ldc %0, sr"
195 : "=&r" (__dummy0), "=r" (__dummy1)
196 : "1" (~0x10000000)
197 : "memory");
200 #define local_save_flags(x) \
201 __asm__("stc sr, %0; and #0xf0, %0" : "=&z" (x) :/**/: "memory" )
203 #define irqs_disabled() \
204 ({ \
205 unsigned long flags; \
206 local_save_flags(flags); \
207 (flags != 0); \
210 static inline unsigned long local_irq_save(void)
212 unsigned long flags, __dummy;
214 __asm__ __volatile__("stc sr, %1\n\t"
215 "mov %1, %0\n\t"
216 "or #0xf0, %0\n\t"
217 "ldc %0, sr\n\t"
218 "mov %1, %0\n\t"
219 "and #0xf0, %0"
220 : "=&z" (flags), "=&r" (__dummy)
221 :/**/
222 : "memory" );
223 return flags;
226 #define local_irq_restore(x) do { \
227 if ((x & 0x000000f0) != 0x000000f0) \
228 local_irq_enable(); \
229 } while (0)
232 * Jump to P2 area.
233 * When handling TLB or caches, we need to do it from P2 area.
235 #define jump_to_P2() \
236 do { \
237 unsigned long __dummy; \
238 __asm__ __volatile__( \
239 "mov.l 1f, %0\n\t" \
240 "or %1, %0\n\t" \
241 "jmp @%0\n\t" \
242 " nop\n\t" \
243 ".balign 4\n" \
244 "1: .long 2f\n" \
245 "2:" \
246 : "=&r" (__dummy) \
247 : "r" (0x20000000)); \
248 } while (0)
251 * Back to P1 area.
253 #define back_to_P1() \
254 do { \
255 unsigned long __dummy; \
256 ctrl_barrier(); \
257 __asm__ __volatile__( \
258 "mov.l 1f, %0\n\t" \
259 "jmp @%0\n\t" \
260 " nop\n\t" \
261 ".balign 4\n" \
262 "1: .long 2f\n" \
263 "2:" \
264 : "=&r" (__dummy)); \
265 } while (0)
267 /* For spinlocks etc */
268 #define local_irq_save(x) x = local_irq_save()
270 static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val)
272 unsigned long flags, retval;
274 local_irq_save(flags);
275 retval = *m;
276 *m = val;
277 local_irq_restore(flags);
278 return retval;
281 static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val)
283 unsigned long flags, retval;
285 local_irq_save(flags);
286 retval = *m;
287 *m = val & 0xff;
288 local_irq_restore(flags);
289 return retval;
292 extern void __xchg_called_with_bad_pointer(void);
294 #define __xchg(ptr, x, size) \
295 ({ \
296 unsigned long __xchg__res; \
297 volatile void *__xchg_ptr = (ptr); \
298 switch (size) { \
299 case 4: \
300 __xchg__res = xchg_u32(__xchg_ptr, x); \
301 break; \
302 case 1: \
303 __xchg__res = xchg_u8(__xchg_ptr, x); \
304 break; \
305 default: \
306 __xchg_called_with_bad_pointer(); \
307 __xchg__res = x; \
308 break; \
311 __xchg__res; \
314 #define xchg(ptr,x) \
315 ((__typeof__(*(ptr)))__xchg((ptr),(unsigned long)(x), sizeof(*(ptr))))
317 static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old,
318 unsigned long new)
320 __u32 retval;
321 unsigned long flags;
323 local_irq_save(flags);
324 retval = *m;
325 if (retval == old)
326 *m = new;
327 local_irq_restore(flags); /* implies memory barrier */
328 return retval;
331 /* This function doesn't exist, so you'll get a linker error
332 * if something tries to do an invalid cmpxchg(). */
333 extern void __cmpxchg_called_with_bad_pointer(void);
335 #define __HAVE_ARCH_CMPXCHG 1
337 static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old,
338 unsigned long new, int size)
340 switch (size) {
341 case 4:
342 return __cmpxchg_u32(ptr, old, new);
344 __cmpxchg_called_with_bad_pointer();
345 return old;
348 #define cmpxchg(ptr,o,n) \
349 ({ \
350 __typeof__(*(ptr)) _o_ = (o); \
351 __typeof__(*(ptr)) _n_ = (n); \
352 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
353 (unsigned long)_n_, sizeof(*(ptr))); \
356 /* XXX
357 * disable hlt during certain critical i/o operations
359 #define HAVE_DISABLE_HLT
360 void disable_hlt(void);
361 void enable_hlt(void);
363 #define arch_align_stack(x) (x)
365 #endif