1 #ifndef __SPARC64_SYSTEM_H
2 #define __SPARC64_SYSTEM_H
4 #include <asm/ptrace.h>
5 #include <asm/processor.h>
6 #include <asm/visasm.h>
10 #include <linux/irqflags.h>
11 #include <asm-generic/cmpxchg-local.h>
14 * Sparc (general) CPU types
22 sun4u
= 0x05, /* V8 ploos ploos */
24 ap1000
= 0x07, /* almost a sun4m */
27 #define sparc_cpu_model sun4u
29 /* This cannot ever be a sun4c :) That's just history. */
32 extern const char *sparc_cpu_type
;
33 extern const char *sparc_fpu_type
;
34 extern const char *sparc_pmu_type
;
36 extern char reboot_command
[];
38 /* These are here in an effort to more fully work around Spitfire Errata
39 * #51. Essentially, if a memory barrier occurs soon after a mispredicted
40 * branch, the chip can stop executing instructions until a trap occurs.
41 * Therefore, if interrupts are disabled, the chip can hang forever.
43 * It used to be believed that the memory barrier had to be right in the
44 * delay slot, but a case has been traced recently wherein the memory barrier
45 * was one instruction after the branch delay slot and the chip still hung.
46 * The offending sequence was the following in sym_wakeup_done() of the
49 * call sym_ccb_from_dsa, 0
55 * The branch has to be mispredicted for the bug to occur. Therefore, we put
56 * the memory barrier explicitly into a "branch always, predicted taken"
57 * delay slot to avoid the problem case.
59 #define membar_safe(type) \
60 do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
61 " membar " type "\n" \
66 /* The kernel always executes in TSO memory model these days,
67 * and furthermore most sparc64 chips implement more stringent
68 * memory ordering than required by the specifications.
70 #define mb() membar_safe("#StoreLoad")
71 #define rmb() __asm__ __volatile__("":::"memory")
72 #define wmb() __asm__ __volatile__("":::"memory")
76 #define nop() __asm__ __volatile__ ("nop")
78 #define read_barrier_depends() do { } while(0)
79 #define set_mb(__var, __value) \
80 do { __var = __value; membar_safe("#StoreLoad"); } while(0)
84 #define smp_rmb() rmb()
85 #define smp_wmb() wmb()
87 #define smp_mb() __asm__ __volatile__("":::"memory")
88 #define smp_rmb() __asm__ __volatile__("":::"memory")
89 #define smp_wmb() __asm__ __volatile__("":::"memory")
92 #define smp_read_barrier_depends() do { } while(0)
94 #define flushi(addr) __asm__ __volatile__ ("flush %0" : : "r" (addr) : "memory")
96 #define flushw_all() __asm__ __volatile__("flushw")
98 /* Performance counter register access. */
99 #define read_pcr(__p) __asm__ __volatile__("rd %%pcr, %0" : "=r" (__p))
100 #define write_pcr(__p) __asm__ __volatile__("wr %0, 0x0, %%pcr" : : "r" (__p))
101 #define read_pic(__p) __asm__ __volatile__("rd %%pic, %0" : "=r" (__p))
103 /* Blackbird errata workaround. See commentary in
104 * arch/sparc64/kernel/smp.c:smp_percpu_timer_interrupt()
105 * for more information.
107 #define write_pic(__p) \
108 __asm__ __volatile__("ba,pt %%xcc, 99f\n\t" \
110 "99:wr %0, 0x0, %%pic\n\t" \
111 "rd %%pic, %%g0" : : "r" (__p))
112 #define reset_pic() write_pic(0)
116 extern void sun_do_break(void);
117 extern int stop_a_enabled
;
118 extern int scons_pwroff
;
120 extern void fault_in_user_windows(void);
121 extern void synchronize_user_stack(void);
123 extern void __flushw_user(void);
124 #define flushw_user() __flushw_user()
126 #define flush_user_windows flushw_user
127 #define flush_register_windows flushw_all
129 /* Don't hold the runqueue lock over context switch */
130 #define __ARCH_WANT_UNLOCKED_CTXSW
131 #define prepare_arch_switch(next) \
136 /* See what happens when you design the chip correctly?
138 * We tell gcc we clobber all non-fixed-usage registers except
139 * for l0/l1. It will use one for 'next' and the other to hold
140 * the output value of 'last'. 'next' is not referenced again
141 * past the invocation of switch_to in the scheduler, so we need
142 * not preserve it's value. Hairy, but it lets us remove 2 loads
143 * and 2 stores in this critical code path. -DaveM
145 #define switch_to(prev, next, last) \
146 do { if (test_thread_flag(TIF_PERFCTR)) { \
147 unsigned long __tmp; \
149 current_thread_info()->pcr_reg = __tmp; \
151 current_thread_info()->kernel_cntd0 += (unsigned int)(__tmp);\
152 current_thread_info()->kernel_cntd1 += ((__tmp) >> 32); \
154 flush_tlb_pending(); \
155 save_and_clear_fpu(); \
156 /* If you are tempted to conditionalize the following */ \
157 /* so that ASI is only written if it changes, think again. */ \
158 __asm__ __volatile__("wr %%g0, %0, %%asi" \
159 : : "r" (__thread_flag_byte_ptr(task_thread_info(next))[TI_FLAG_BYTE_CURRENT_DS]));\
160 trap_block[current_thread_info()->cpu].thread = \
161 task_thread_info(next); \
162 __asm__ __volatile__( \
163 "mov %%g4, %%g7\n\t" \
164 "stx %%i6, [%%sp + 2047 + 0x70]\n\t" \
165 "stx %%i7, [%%sp + 2047 + 0x78]\n\t" \
166 "rdpr %%wstate, %%o5\n\t" \
167 "stx %%o6, [%%g6 + %6]\n\t" \
168 "stb %%o5, [%%g6 + %5]\n\t" \
169 "rdpr %%cwp, %%o5\n\t" \
170 "stb %%o5, [%%g6 + %8]\n\t" \
171 "wrpr %%g0, 15, %%pil\n\t" \
173 "ldub [%4 + %8], %%g1\n\t" \
174 "wrpr %%g1, %%cwp\n\t" \
175 "ldx [%%g6 + %6], %%o6\n\t" \
176 "ldub [%%g6 + %5], %%o5\n\t" \
177 "ldub [%%g6 + %7], %%o7\n\t" \
178 "wrpr %%o5, 0x0, %%wstate\n\t" \
179 "ldx [%%sp + 2047 + 0x70], %%i6\n\t" \
180 "ldx [%%sp + 2047 + 0x78], %%i7\n\t" \
181 "ldx [%%g6 + %9], %%g4\n\t" \
182 "wrpr %%g0, 14, %%pil\n\t" \
183 "brz,pt %%o7, switch_to_pc\n\t" \
184 " mov %%g7, %0\n\t" \
185 "sethi %%hi(ret_from_syscall), %%g1\n\t" \
186 "jmpl %%g1 + %%lo(ret_from_syscall), %%g0\n\t" \
188 ".globl switch_to_pc\n\t" \
189 "switch_to_pc:\n\t" \
190 : "=&r" (last), "=r" (current), "=r" (current_thread_info_reg), \
191 "=r" (__local_per_cpu_offset) \
192 : "0" (task_thread_info(next)), \
193 "i" (TI_WSTATE), "i" (TI_KSP), "i" (TI_NEW_CHILD), \
194 "i" (TI_CWP), "i" (TI_TASK) \
196 "g1", "g2", "g3", "g7", \
197 "l1", "l2", "l3", "l4", "l5", "l6", "l7", \
198 "i0", "i1", "i2", "i3", "i4", "i5", \
199 "o0", "o1", "o2", "o3", "o4", "o5", "o7"); \
200 /* If you fuck with this, update ret_from_syscall code too. */ \
201 if (test_thread_flag(TIF_PERFCTR)) { \
202 write_pcr(current_thread_info()->pcr_reg); \
207 static inline unsigned long xchg32(__volatile__
unsigned int *m
, unsigned int val
)
209 unsigned long tmp1
, tmp2
;
211 __asm__
__volatile__(
214 " cas [%4], %2, %0\n"
216 " bne,a,pn %%icc, 1b\n"
218 : "=&r" (val
), "=&r" (tmp1
), "=&r" (tmp2
)
224 static inline unsigned long xchg64(__volatile__
unsigned long *m
, unsigned long val
)
226 unsigned long tmp1
, tmp2
;
228 __asm__
__volatile__(
231 " casx [%4], %2, %0\n"
233 " bne,a,pn %%xcc, 1b\n"
235 : "=&r" (val
), "=&r" (tmp1
), "=&r" (tmp2
)
241 #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
243 extern void __xchg_called_with_bad_pointer(void);
245 static inline unsigned long __xchg(unsigned long x
, __volatile__
void * ptr
,
250 return xchg32(ptr
, x
);
252 return xchg64(ptr
, x
);
254 __xchg_called_with_bad_pointer();
258 extern void die_if_kernel(char *str
, struct pt_regs
*regs
) __attribute__ ((noreturn
));
261 * Atomic compare and exchange. Compare OLD with MEM, if identical,
262 * store NEW in MEM. Return the initial value in MEM. Success is
263 * indicated by comparing RETURN with OLD.
266 #define __HAVE_ARCH_CMPXCHG 1
268 static inline unsigned long
269 __cmpxchg_u32(volatile int *m
, int old
, int new)
271 __asm__
__volatile__("cas [%2], %3, %0"
273 : "0" (new), "r" (m
), "r" (old
)
279 static inline unsigned long
280 __cmpxchg_u64(volatile long *m
, unsigned long old
, unsigned long new)
282 __asm__
__volatile__("casx [%2], %3, %0"
284 : "0" (new), "r" (m
), "r" (old
)
290 /* This function doesn't exist, so you'll get a linker error
291 if something tries to do an invalid cmpxchg(). */
292 extern void __cmpxchg_called_with_bad_pointer(void);
294 static inline unsigned long
295 __cmpxchg(volatile void *ptr
, unsigned long old
, unsigned long new, int size
)
299 return __cmpxchg_u32(ptr
, old
, new);
301 return __cmpxchg_u64(ptr
, old
, new);
303 __cmpxchg_called_with_bad_pointer();
307 #define cmpxchg(ptr,o,n) \
309 __typeof__(*(ptr)) _o_ = (o); \
310 __typeof__(*(ptr)) _n_ = (n); \
311 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
312 (unsigned long)_n_, sizeof(*(ptr))); \
316 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
320 static inline unsigned long __cmpxchg_local(volatile void *ptr
,
322 unsigned long new, int size
)
326 case 8: return __cmpxchg(ptr
, old
, new, size
);
328 return __cmpxchg_local_generic(ptr
, old
, new, size
);
334 #define cmpxchg_local(ptr, o, n) \
335 ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
336 (unsigned long)(n), sizeof(*(ptr))))
337 #define cmpxchg64_local(ptr, o, n) \
339 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
340 cmpxchg_local((ptr), (o), (n)); \
343 #endif /* !(__ASSEMBLY__) */
345 #define arch_align_stack(x) (x)
347 #endif /* !(__SPARC64_SYSTEM_H) */