[SPARC64]: Fix sparse warning wrt. fault_in_user_windows.
[linux-2.6.git] / include / asm-sparc64 / system.h
blobed91a5d8d4f05dce3c5ebe2f54d5e3f0ce510907
1 #ifndef __SPARC64_SYSTEM_H
2 #define __SPARC64_SYSTEM_H
4 #include <asm/ptrace.h>
5 #include <asm/processor.h>
6 #include <asm/visasm.h>
8 #ifndef __ASSEMBLY__
10 #include <linux/irqflags.h>
11 #include <asm-generic/cmpxchg-local.h>
14 * Sparc (general) CPU types
16 enum sparc_cpu {
17 sun4 = 0x00,
18 sun4c = 0x01,
19 sun4m = 0x02,
20 sun4d = 0x03,
21 sun4e = 0x04,
22 sun4u = 0x05, /* V8 ploos ploos */
23 sun_unknown = 0x06,
24 ap1000 = 0x07, /* almost a sun4m */
27 #define sparc_cpu_model sun4u
29 /* This cannot ever be a sun4c nor sun4 :) That's just history. */
30 #define ARCH_SUN4C_SUN4 0
31 #define ARCH_SUN4 0
33 /* These are here in an effort to more fully work around Spitfire Errata
34 * #51. Essentially, if a memory barrier occurs soon after a mispredicted
35 * branch, the chip can stop executing instructions until a trap occurs.
36 * Therefore, if interrupts are disabled, the chip can hang forever.
38 * It used to be believed that the memory barrier had to be right in the
39 * delay slot, but a case has been traced recently wherein the memory barrier
40 * was one instruction after the branch delay slot and the chip still hung.
41 * The offending sequence was the following in sym_wakeup_done() of the
42 * sym53c8xx_2 driver:
44 * call sym_ccb_from_dsa, 0
45 * movge %icc, 0, %l0
46 * brz,pn %o0, .LL1303
47 * mov %o0, %l2
48 * membar #LoadLoad
50 * The branch has to be mispredicted for the bug to occur. Therefore, we put
51 * the memory barrier explicitly into a "branch always, predicted taken"
52 * delay slot to avoid the problem case.
54 #define membar_safe(type) \
55 do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
56 " membar " type "\n" \
57 "1:\n" \
58 : : : "memory"); \
59 } while (0)
61 #define mb() \
62 membar_safe("#LoadLoad | #LoadStore | #StoreStore | #StoreLoad")
63 #define rmb() \
64 membar_safe("#LoadLoad")
65 #define wmb() \
66 membar_safe("#StoreStore")
67 #define membar_storeload() \
68 membar_safe("#StoreLoad")
69 #define membar_storeload_storestore() \
70 membar_safe("#StoreLoad | #StoreStore")
71 #define membar_storeload_loadload() \
72 membar_safe("#StoreLoad | #LoadLoad")
73 #define membar_storestore_loadstore() \
74 membar_safe("#StoreStore | #LoadStore")
76 #endif
78 #define nop() __asm__ __volatile__ ("nop")
80 #define read_barrier_depends() do { } while(0)
81 #define set_mb(__var, __value) \
82 do { __var = __value; membar_storeload_storestore(); } while(0)
84 #ifdef CONFIG_SMP
85 #define smp_mb() mb()
86 #define smp_rmb() rmb()
87 #define smp_wmb() wmb()
88 #define smp_read_barrier_depends() read_barrier_depends()
89 #else
90 #define smp_mb() __asm__ __volatile__("":::"memory")
91 #define smp_rmb() __asm__ __volatile__("":::"memory")
92 #define smp_wmb() __asm__ __volatile__("":::"memory")
93 #define smp_read_barrier_depends() do { } while(0)
94 #endif
96 #define flushi(addr) __asm__ __volatile__ ("flush %0" : : "r" (addr) : "memory")
98 #define flushw_all() __asm__ __volatile__("flushw")
100 /* Performance counter register access. */
101 #define read_pcr(__p) __asm__ __volatile__("rd %%pcr, %0" : "=r" (__p))
102 #define write_pcr(__p) __asm__ __volatile__("wr %0, 0x0, %%pcr" : : "r" (__p))
103 #define read_pic(__p) __asm__ __volatile__("rd %%pic, %0" : "=r" (__p))
105 /* Blackbird errata workaround. See commentary in
106 * arch/sparc64/kernel/smp.c:smp_percpu_timer_interrupt()
107 * for more information.
109 #define reset_pic() \
110 __asm__ __volatile__("ba,pt %xcc, 99f\n\t" \
111 ".align 64\n" \
112 "99:wr %g0, 0x0, %pic\n\t" \
113 "rd %pic, %g0")
115 #ifndef __ASSEMBLY__
117 extern void sun_do_break(void);
118 extern int stop_a_enabled;
120 extern void fault_in_user_windows(void);
121 extern void synchronize_user_stack(void);
123 extern void __flushw_user(void);
124 #define flushw_user() __flushw_user()
126 #define flush_user_windows flushw_user
127 #define flush_register_windows flushw_all
129 /* Don't hold the runqueue lock over context switch */
130 #define __ARCH_WANT_UNLOCKED_CTXSW
131 #define prepare_arch_switch(next) \
132 do { \
133 flushw_all(); \
134 } while (0)
136 /* See what happens when you design the chip correctly?
138 * We tell gcc we clobber all non-fixed-usage registers except
139 * for l0/l1. It will use one for 'next' and the other to hold
140 * the output value of 'last'. 'next' is not referenced again
141 * past the invocation of switch_to in the scheduler, so we need
142 * not preserve it's value. Hairy, but it lets us remove 2 loads
143 * and 2 stores in this critical code path. -DaveM
145 #define switch_to(prev, next, last) \
146 do { if (test_thread_flag(TIF_PERFCTR)) { \
147 unsigned long __tmp; \
148 read_pcr(__tmp); \
149 current_thread_info()->pcr_reg = __tmp; \
150 read_pic(__tmp); \
151 current_thread_info()->kernel_cntd0 += (unsigned int)(__tmp);\
152 current_thread_info()->kernel_cntd1 += ((__tmp) >> 32); \
154 flush_tlb_pending(); \
155 save_and_clear_fpu(); \
156 /* If you are tempted to conditionalize the following */ \
157 /* so that ASI is only written if it changes, think again. */ \
158 __asm__ __volatile__("wr %%g0, %0, %%asi" \
159 : : "r" (__thread_flag_byte_ptr(task_thread_info(next))[TI_FLAG_BYTE_CURRENT_DS]));\
160 trap_block[current_thread_info()->cpu].thread = \
161 task_thread_info(next); \
162 __asm__ __volatile__( \
163 "mov %%g4, %%g7\n\t" \
164 "stx %%i6, [%%sp + 2047 + 0x70]\n\t" \
165 "stx %%i7, [%%sp + 2047 + 0x78]\n\t" \
166 "rdpr %%wstate, %%o5\n\t" \
167 "stx %%o6, [%%g6 + %6]\n\t" \
168 "stb %%o5, [%%g6 + %5]\n\t" \
169 "rdpr %%cwp, %%o5\n\t" \
170 "stb %%o5, [%%g6 + %8]\n\t" \
171 "mov %4, %%g6\n\t" \
172 "ldub [%4 + %8], %%g1\n\t" \
173 "wrpr %%g1, %%cwp\n\t" \
174 "ldx [%%g6 + %6], %%o6\n\t" \
175 "ldub [%%g6 + %5], %%o5\n\t" \
176 "ldub [%%g6 + %7], %%o7\n\t" \
177 "wrpr %%o5, 0x0, %%wstate\n\t" \
178 "ldx [%%sp + 2047 + 0x70], %%i6\n\t" \
179 "ldx [%%sp + 2047 + 0x78], %%i7\n\t" \
180 "ldx [%%g6 + %9], %%g4\n\t" \
181 "brz,pt %%o7, 1f\n\t" \
182 " mov %%g7, %0\n\t" \
183 "sethi %%hi(ret_from_syscall), %%g1\n\t" \
184 "jmpl %%g1 + %%lo(ret_from_syscall), %%g0\n\t" \
185 " nop\n\t" \
186 "1:\n\t" \
187 : "=&r" (last), "=r" (current), "=r" (current_thread_info_reg), \
188 "=r" (__local_per_cpu_offset) \
189 : "0" (task_thread_info(next)), \
190 "i" (TI_WSTATE), "i" (TI_KSP), "i" (TI_NEW_CHILD), \
191 "i" (TI_CWP), "i" (TI_TASK) \
192 : "cc", \
193 "g1", "g2", "g3", "g7", \
194 "l1", "l2", "l3", "l4", "l5", "l6", "l7", \
195 "i0", "i1", "i2", "i3", "i4", "i5", \
196 "o0", "o1", "o2", "o3", "o4", "o5", "o7"); \
197 /* If you fuck with this, update ret_from_syscall code too. */ \
198 if (test_thread_flag(TIF_PERFCTR)) { \
199 write_pcr(current_thread_info()->pcr_reg); \
200 reset_pic(); \
202 } while(0)
204 static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val)
206 unsigned long tmp1, tmp2;
208 __asm__ __volatile__(
209 " membar #StoreLoad | #LoadLoad\n"
210 " mov %0, %1\n"
211 "1: lduw [%4], %2\n"
212 " cas [%4], %2, %0\n"
213 " cmp %2, %0\n"
214 " bne,a,pn %%icc, 1b\n"
215 " mov %1, %0\n"
216 " membar #StoreLoad | #StoreStore\n"
217 : "=&r" (val), "=&r" (tmp1), "=&r" (tmp2)
218 : "0" (val), "r" (m)
219 : "cc", "memory");
220 return val;
223 static inline unsigned long xchg64(__volatile__ unsigned long *m, unsigned long val)
225 unsigned long tmp1, tmp2;
227 __asm__ __volatile__(
228 " membar #StoreLoad | #LoadLoad\n"
229 " mov %0, %1\n"
230 "1: ldx [%4], %2\n"
231 " casx [%4], %2, %0\n"
232 " cmp %2, %0\n"
233 " bne,a,pn %%xcc, 1b\n"
234 " mov %1, %0\n"
235 " membar #StoreLoad | #StoreStore\n"
236 : "=&r" (val), "=&r" (tmp1), "=&r" (tmp2)
237 : "0" (val), "r" (m)
238 : "cc", "memory");
239 return val;
242 #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
244 extern void __xchg_called_with_bad_pointer(void);
246 static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr,
247 int size)
249 switch (size) {
250 case 4:
251 return xchg32(ptr, x);
252 case 8:
253 return xchg64(ptr, x);
255 __xchg_called_with_bad_pointer();
256 return x;
259 extern void die_if_kernel(char *str, struct pt_regs *regs) __attribute__ ((noreturn));
262 * Atomic compare and exchange. Compare OLD with MEM, if identical,
263 * store NEW in MEM. Return the initial value in MEM. Success is
264 * indicated by comparing RETURN with OLD.
267 #define __HAVE_ARCH_CMPXCHG 1
269 static inline unsigned long
270 __cmpxchg_u32(volatile int *m, int old, int new)
272 __asm__ __volatile__("membar #StoreLoad | #LoadLoad\n"
273 "cas [%2], %3, %0\n\t"
274 "membar #StoreLoad | #StoreStore"
275 : "=&r" (new)
276 : "0" (new), "r" (m), "r" (old)
277 : "memory");
279 return new;
282 static inline unsigned long
283 __cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new)
285 __asm__ __volatile__("membar #StoreLoad | #LoadLoad\n"
286 "casx [%2], %3, %0\n\t"
287 "membar #StoreLoad | #StoreStore"
288 : "=&r" (new)
289 : "0" (new), "r" (m), "r" (old)
290 : "memory");
292 return new;
295 /* This function doesn't exist, so you'll get a linker error
296 if something tries to do an invalid cmpxchg(). */
297 extern void __cmpxchg_called_with_bad_pointer(void);
299 static inline unsigned long
300 __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
302 switch (size) {
303 case 4:
304 return __cmpxchg_u32(ptr, old, new);
305 case 8:
306 return __cmpxchg_u64(ptr, old, new);
308 __cmpxchg_called_with_bad_pointer();
309 return old;
312 #define cmpxchg(ptr,o,n) \
313 ({ \
314 __typeof__(*(ptr)) _o_ = (o); \
315 __typeof__(*(ptr)) _n_ = (n); \
316 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
317 (unsigned long)_n_, sizeof(*(ptr))); \
321 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
322 * them available.
325 static inline unsigned long __cmpxchg_local(volatile void *ptr,
326 unsigned long old,
327 unsigned long new, int size)
329 switch (size) {
330 case 4:
331 case 8: return __cmpxchg(ptr, old, new, size);
332 default:
333 return __cmpxchg_local_generic(ptr, old, new, size);
336 return old;
339 #define cmpxchg_local(ptr, o, n) \
340 ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
341 (unsigned long)(n), sizeof(*(ptr))))
342 #define cmpxchg64_local(ptr, o, n) \
343 ({ \
344 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
345 cmpxchg_local((ptr), (o), (n)); \
348 #endif /* !(__ASSEMBLY__) */
350 #define arch_align_stack(x) (x)
352 #endif /* !(__SPARC64_SYSTEM_H) */