[POWERPC] Workaround oldworld OF bug with IRQs & P2P bridges
[linux-2.6/kvm.git] / include / asm-sh / system.h
blobb1e42e7f998b5639a41f57281a35d1d89c7c1ab6
1 #ifndef __ASM_SH_SYSTEM_H
2 #define __ASM_SH_SYSTEM_H
4 /*
5 * Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima
6 * Copyright (C) 2002 Paul Mundt
7 */
9 #include <linux/irqflags.h>
10 #include <asm/types.h>
13 * switch_to() should switch tasks to task nr n, first
16 #define switch_to(prev, next, last) do { \
17 struct task_struct *__last; \
18 register unsigned long *__ts1 __asm__ ("r1") = &prev->thread.sp; \
19 register unsigned long *__ts2 __asm__ ("r2") = &prev->thread.pc; \
20 register unsigned long *__ts4 __asm__ ("r4") = (unsigned long *)prev; \
21 register unsigned long *__ts5 __asm__ ("r5") = (unsigned long *)next; \
22 register unsigned long *__ts6 __asm__ ("r6") = &next->thread.sp; \
23 register unsigned long __ts7 __asm__ ("r7") = next->thread.pc; \
24 __asm__ __volatile__ (".balign 4\n\t" \
25 "stc.l gbr, @-r15\n\t" \
26 "sts.l pr, @-r15\n\t" \
27 "mov.l r8, @-r15\n\t" \
28 "mov.l r9, @-r15\n\t" \
29 "mov.l r10, @-r15\n\t" \
30 "mov.l r11, @-r15\n\t" \
31 "mov.l r12, @-r15\n\t" \
32 "mov.l r13, @-r15\n\t" \
33 "mov.l r14, @-r15\n\t" \
34 "mov.l r15, @r1 ! save SP\n\t" \
35 "mov.l @r6, r15 ! change to new stack\n\t" \
36 "mova 1f, %0\n\t" \
37 "mov.l %0, @r2 ! save PC\n\t" \
38 "mov.l 2f, %0\n\t" \
39 "jmp @%0 ! call __switch_to\n\t" \
40 " lds r7, pr ! with return to new PC\n\t" \
41 ".balign 4\n" \
42 "2:\n\t" \
43 ".long __switch_to\n" \
44 "1:\n\t" \
45 "mov.l @r15+, r14\n\t" \
46 "mov.l @r15+, r13\n\t" \
47 "mov.l @r15+, r12\n\t" \
48 "mov.l @r15+, r11\n\t" \
49 "mov.l @r15+, r10\n\t" \
50 "mov.l @r15+, r9\n\t" \
51 "mov.l @r15+, r8\n\t" \
52 "lds.l @r15+, pr\n\t" \
53 "ldc.l @r15+, gbr\n\t" \
54 : "=z" (__last) \
55 : "r" (__ts1), "r" (__ts2), "r" (__ts4), \
56 "r" (__ts5), "r" (__ts6), "r" (__ts7) \
57 : "r3", "t"); \
58 last = __last; \
59 } while (0)
62 * On SMP systems, when the scheduler does migration-cost autodetection,
63 * it needs a way to flush as much of the CPU's caches as possible.
65 * TODO: fill this in!
67 static inline void sched_cacheflush(void)
71 #ifdef CONFIG_CPU_SH4A
72 #define __icbi() \
73 { \
74 unsigned long __addr; \
75 __addr = 0xa8000000; \
76 __asm__ __volatile__( \
77 "icbi %0\n\t" \
78 : /* no output */ \
79 : "m" (__m(__addr))); \
81 #endif
83 static inline unsigned long tas(volatile int *m)
85 unsigned long retval;
87 __asm__ __volatile__ ("tas.b @%1\n\t"
88 "movt %0"
89 : "=r" (retval): "r" (m): "t", "memory");
90 return retval;
94 * A brief note on ctrl_barrier(), the control register write barrier.
96 * Legacy SH cores typically require a sequence of 8 nops after
97 * modification of a control register in order for the changes to take
98 * effect. On newer cores (like the sh4a and sh5) this is accomplished
99 * with icbi.
101 * Also note that on sh4a in the icbi case we can forego a synco for the
102 * write barrier, as it's not necessary for control registers.
104 * Historically we have only done this type of barrier for the MMUCR, but
105 * it's also necessary for the CCR, so we make it generic here instead.
107 #ifdef CONFIG_CPU_SH4A
108 #define mb() __asm__ __volatile__ ("synco": : :"memory")
109 #define rmb() mb()
110 #define wmb() __asm__ __volatile__ ("synco": : :"memory")
111 #define ctrl_barrier() __icbi()
112 #define read_barrier_depends() do { } while(0)
113 #else
114 #define mb() __asm__ __volatile__ ("": : :"memory")
115 #define rmb() mb()
116 #define wmb() __asm__ __volatile__ ("": : :"memory")
117 #define ctrl_barrier() __asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop")
118 #define read_barrier_depends() do { } while(0)
119 #endif
121 #ifdef CONFIG_SMP
122 #define smp_mb() mb()
123 #define smp_rmb() rmb()
124 #define smp_wmb() wmb()
125 #define smp_read_barrier_depends() read_barrier_depends()
126 #else
127 #define smp_mb() barrier()
128 #define smp_rmb() barrier()
129 #define smp_wmb() barrier()
130 #define smp_read_barrier_depends() do { } while(0)
131 #endif
133 #define set_mb(var, value) do { xchg(&var, value); } while (0)
136 * Jump to P2 area.
137 * When handling TLB or caches, we need to do it from P2 area.
139 #define jump_to_P2() \
140 do { \
141 unsigned long __dummy; \
142 __asm__ __volatile__( \
143 "mov.l 1f, %0\n\t" \
144 "or %1, %0\n\t" \
145 "jmp @%0\n\t" \
146 " nop\n\t" \
147 ".balign 4\n" \
148 "1: .long 2f\n" \
149 "2:" \
150 : "=&r" (__dummy) \
151 : "r" (0x20000000)); \
152 } while (0)
155 * Back to P1 area.
157 #define back_to_P1() \
158 do { \
159 unsigned long __dummy; \
160 ctrl_barrier(); \
161 __asm__ __volatile__( \
162 "mov.l 1f, %0\n\t" \
163 "jmp @%0\n\t" \
164 " nop\n\t" \
165 ".balign 4\n" \
166 "1: .long 2f\n" \
167 "2:" \
168 : "=&r" (__dummy)); \
169 } while (0)
171 static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val)
173 unsigned long flags, retval;
175 local_irq_save(flags);
176 retval = *m;
177 *m = val;
178 local_irq_restore(flags);
179 return retval;
182 static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val)
184 unsigned long flags, retval;
186 local_irq_save(flags);
187 retval = *m;
188 *m = val & 0xff;
189 local_irq_restore(flags);
190 return retval;
193 extern void __xchg_called_with_bad_pointer(void);
195 #define __xchg(ptr, x, size) \
196 ({ \
197 unsigned long __xchg__res; \
198 volatile void *__xchg_ptr = (ptr); \
199 switch (size) { \
200 case 4: \
201 __xchg__res = xchg_u32(__xchg_ptr, x); \
202 break; \
203 case 1: \
204 __xchg__res = xchg_u8(__xchg_ptr, x); \
205 break; \
206 default: \
207 __xchg_called_with_bad_pointer(); \
208 __xchg__res = x; \
209 break; \
212 __xchg__res; \
215 #define xchg(ptr,x) \
216 ((__typeof__(*(ptr)))__xchg((ptr),(unsigned long)(x), sizeof(*(ptr))))
218 static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old,
219 unsigned long new)
221 __u32 retval;
222 unsigned long flags;
224 local_irq_save(flags);
225 retval = *m;
226 if (retval == old)
227 *m = new;
228 local_irq_restore(flags); /* implies memory barrier */
229 return retval;
232 /* This function doesn't exist, so you'll get a linker error
233 * if something tries to do an invalid cmpxchg(). */
234 extern void __cmpxchg_called_with_bad_pointer(void);
236 #define __HAVE_ARCH_CMPXCHG 1
238 static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old,
239 unsigned long new, int size)
241 switch (size) {
242 case 4:
243 return __cmpxchg_u32(ptr, old, new);
245 __cmpxchg_called_with_bad_pointer();
246 return old;
249 #define cmpxchg(ptr,o,n) \
250 ({ \
251 __typeof__(*(ptr)) _o_ = (o); \
252 __typeof__(*(ptr)) _n_ = (n); \
253 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
254 (unsigned long)_n_, sizeof(*(ptr))); \
257 extern void *set_exception_table_vec(unsigned int vec, void *handler);
259 static inline void *set_exception_table_evt(unsigned int evt, void *handler)
261 return set_exception_table_vec(evt >> 5, handler);
264 /* XXX
265 * disable hlt during certain critical i/o operations
267 #define HAVE_DISABLE_HLT
268 void disable_hlt(void);
269 void enable_hlt(void);
271 #define arch_align_stack(x) (x)
273 #endif