1 /* $Id: system.h,v 1.69 2002/02/09 19:49:31 davem Exp $ */
2 #ifndef __SPARC64_SYSTEM_H
3 #define __SPARC64_SYSTEM_H
5 #include <linux/config.h>
6 #include <asm/ptrace.h>
7 #include <asm/processor.h>
8 #include <asm/visasm.h>
12 * Sparc (general) CPU types
20 sun4u
= 0x05, /* V8 ploos ploos */
22 ap1000
= 0x07, /* almost a sun4m */
25 #define sparc_cpu_model sun4u
27 /* This cannot ever be a sun4c nor sun4 :) That's just history. */
28 #define ARCH_SUN4C_SUN4 0
33 #define setipl(__new_ipl) \
34 __asm__ __volatile__("wrpr %0, %%pil" : : "r" (__new_ipl) : "memory")
36 #define local_irq_disable() \
37 __asm__ __volatile__("wrpr 15, %%pil" : : : "memory")
39 #define local_irq_enable() \
40 __asm__ __volatile__("wrpr 0, %%pil" : : : "memory")
43 ({ unsigned long retval; __asm__ __volatile__("rdpr %%pil, %0" : "=r" (retval)); retval; })
45 #define swap_pil(__new_pil) \
46 ({ unsigned long retval; \
47 __asm__ __volatile__("rdpr %%pil, %0\n\t" \
55 #define read_pil_and_cli() \
56 ({ unsigned long retval; \
57 __asm__ __volatile__("rdpr %%pil, %0\n\t" \
64 #define local_save_flags(flags) ((flags) = getipl())
65 #define local_irq_save(flags) ((flags) = read_pil_and_cli())
66 #define local_irq_restore(flags) setipl((flags))
68 /* On sparc64 IRQ flags are the PIL register. A value of zero
69 * means all interrupt levels are enabled, any other value means
70 * only IRQ levels greater than that value will be received.
71 * Consequently this means that the lowest IRQ level is one.
73 #define irqs_disabled() \
74 ({ unsigned long flags; \
75 local_save_flags(flags);\
79 #define nop() __asm__ __volatile__ ("nop")
81 #define membar(type) __asm__ __volatile__ ("membar " type : : : "memory")
83 membar("#LoadLoad | #LoadStore | #StoreStore | #StoreLoad")
84 #define rmb() membar("#LoadLoad")
85 #define wmb() membar("#StoreStore")
86 #define read_barrier_depends() do { } while(0)
87 #define set_mb(__var, __value) \
88 do { __var = __value; membar("#StoreLoad | #StoreStore"); } while(0)
89 #define set_wmb(__var, __value) \
90 do { __var = __value; membar("#StoreStore"); } while(0)
94 #define smp_rmb() rmb()
95 #define smp_wmb() wmb()
96 #define smp_read_barrier_depends() read_barrier_depends()
98 #define smp_mb() __asm__ __volatile__("":::"memory")
99 #define smp_rmb() __asm__ __volatile__("":::"memory")
100 #define smp_wmb() __asm__ __volatile__("":::"memory")
101 #define smp_read_barrier_depends() do { } while(0)
104 #define flushi(addr) __asm__ __volatile__ ("flush %0" : : "r" (addr) : "memory")
106 #define flushw_all() __asm__ __volatile__("flushw")
108 /* Performance counter register access. */
109 #define read_pcr(__p) __asm__ __volatile__("rd %%pcr, %0" : "=r" (__p))
110 #define write_pcr(__p) __asm__ __volatile__("wr %0, 0x0, %%pcr" : : "r" (__p))
111 #define read_pic(__p) __asm__ __volatile__("rd %%pic, %0" : "=r" (__p))
113 /* Blackbird errata workaround. See commentary in
114 * arch/sparc64/kernel/smp.c:smp_percpu_timer_interrupt()
115 * for more information.
117 #define reset_pic() \
118 __asm__ __volatile__("ba,pt %xcc, 99f\n\t" \
120 "99:wr %g0, 0x0, %pic\n\t" \
125 extern void sun_do_break(void);
126 extern int serial_console
;
127 extern int stop_a_enabled
;
129 static __inline__
int con_is_present(void)
131 return serial_console
? 0 : 1;
134 extern void synchronize_user_stack(void);
136 extern void __flushw_user(void);
137 #define flushw_user() __flushw_user()
139 #define flush_user_windows flushw_user
140 #define flush_register_windows flushw_all
142 /* Don't hold the runqueue lock over context switch */
143 #define __ARCH_WANT_UNLOCKED_CTXSW
144 #define prepare_arch_switch(next) \
149 /* See what happens when you design the chip correctly?
151 * We tell gcc we clobber all non-fixed-usage registers except
152 * for l0/l1. It will use one for 'next' and the other to hold
153 * the output value of 'last'. 'next' is not referenced again
154 * past the invocation of switch_to in the scheduler, so we need
155 * not preserve it's value. Hairy, but it lets us remove 2 loads
156 * and 2 stores in this critical code path. -DaveM
159 #define EXTRA_CLOBBER ,"%l1"
161 #define EXTRA_CLOBBER
163 #define switch_to(prev, next, last) \
164 do { if (test_thread_flag(TIF_PERFCTR)) { \
165 unsigned long __tmp; \
167 current_thread_info()->pcr_reg = __tmp; \
169 current_thread_info()->kernel_cntd0 += (unsigned int)(__tmp);\
170 current_thread_info()->kernel_cntd1 += ((__tmp) >> 32); \
172 flush_tlb_pending(); \
173 save_and_clear_fpu(); \
174 /* If you are tempted to conditionalize the following */ \
175 /* so that ASI is only written if it changes, think again. */ \
176 __asm__ __volatile__("wr %%g0, %0, %%asi" \
177 : : "r" (__thread_flag_byte_ptr(next->thread_info)[TI_FLAG_BYTE_CURRENT_DS]));\
178 __asm__ __volatile__( \
179 "mov %%g4, %%g7\n\t" \
180 "wrpr %%g0, 0x95, %%pstate\n\t" \
181 "stx %%i6, [%%sp + 2047 + 0x70]\n\t" \
182 "stx %%i7, [%%sp + 2047 + 0x78]\n\t" \
183 "rdpr %%wstate, %%o5\n\t" \
184 "stx %%o6, [%%g6 + %3]\n\t" \
185 "stb %%o5, [%%g6 + %2]\n\t" \
186 "rdpr %%cwp, %%o5\n\t" \
187 "stb %%o5, [%%g6 + %5]\n\t" \
189 "ldub [%1 + %5], %%g1\n\t" \
190 "wrpr %%g1, %%cwp\n\t" \
191 "ldx [%%g6 + %3], %%o6\n\t" \
192 "ldub [%%g6 + %2], %%o5\n\t" \
193 "ldub [%%g6 + %4], %%o7\n\t" \
194 "mov %%g6, %%l2\n\t" \
195 "wrpr %%o5, 0x0, %%wstate\n\t" \
196 "ldx [%%sp + 2047 + 0x70], %%i6\n\t" \
197 "ldx [%%sp + 2047 + 0x78], %%i7\n\t" \
198 "wrpr %%g0, 0x94, %%pstate\n\t" \
199 "mov %%l2, %%g6\n\t" \
200 "ldx [%%g6 + %6], %%g4\n\t" \
201 "wrpr %%g0, 0x96, %%pstate\n\t" \
202 "brz,pt %%o7, 1f\n\t" \
203 " mov %%g7, %0\n\t" \
204 "b,a ret_from_syscall\n\t" \
207 : "0" (next->thread_info), \
208 "i" (TI_WSTATE), "i" (TI_KSP), "i" (TI_NEW_CHILD), \
209 "i" (TI_CWP), "i" (TI_TASK) \
211 "g1", "g2", "g3", "g7", \
212 "l2", "l3", "l4", "l5", "l6", "l7", \
213 "i0", "i1", "i2", "i3", "i4", "i5", \
214 "o0", "o1", "o2", "o3", "o4", "o5", "o7" EXTRA_CLOBBER);\
215 /* If you fuck with this, update ret_from_syscall code too. */ \
216 if (test_thread_flag(TIF_PERFCTR)) { \
217 write_pcr(current_thread_info()->pcr_reg); \
222 static inline unsigned long xchg32(__volatile__
unsigned int *m
, unsigned int val
)
224 unsigned long tmp1
, tmp2
;
226 __asm__
__volatile__(
227 " membar #StoreLoad | #LoadLoad\n"
230 " cas [%4], %2, %0\n"
232 " bne,a,pn %%icc, 1b\n"
234 " membar #StoreLoad | #StoreStore\n"
235 : "=&r" (val
), "=&r" (tmp1
), "=&r" (tmp2
)
241 static inline unsigned long xchg64(__volatile__
unsigned long *m
, unsigned long val
)
243 unsigned long tmp1
, tmp2
;
245 __asm__
__volatile__(
246 " membar #StoreLoad | #LoadLoad\n"
249 " casx [%4], %2, %0\n"
251 " bne,a,pn %%xcc, 1b\n"
253 " membar #StoreLoad | #StoreStore\n"
254 : "=&r" (val
), "=&r" (tmp1
), "=&r" (tmp2
)
260 #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
261 #define tas(ptr) (xchg((ptr),1))
263 extern void __xchg_called_with_bad_pointer(void);
265 static __inline__
unsigned long __xchg(unsigned long x
, __volatile__
void * ptr
,
270 return xchg32(ptr
, x
);
272 return xchg64(ptr
, x
);
274 __xchg_called_with_bad_pointer();
278 extern void die_if_kernel(char *str
, struct pt_regs
*regs
) __attribute__ ((noreturn
));
281 * Atomic compare and exchange. Compare OLD with MEM, if identical,
282 * store NEW in MEM. Return the initial value in MEM. Success is
283 * indicated by comparing RETURN with OLD.
286 #define __HAVE_ARCH_CMPXCHG 1
288 static __inline__
unsigned long
289 __cmpxchg_u32(volatile int *m
, int old
, int new)
291 __asm__
__volatile__("membar #StoreLoad | #LoadLoad\n"
292 "cas [%2], %3, %0\n\t"
293 "membar #StoreLoad | #StoreStore"
295 : "0" (new), "r" (m
), "r" (old
)
301 static __inline__
unsigned long
302 __cmpxchg_u64(volatile long *m
, unsigned long old
, unsigned long new)
304 __asm__
__volatile__("membar #StoreLoad | #LoadLoad\n"
305 "casx [%2], %3, %0\n\t"
306 "membar #StoreLoad | #StoreStore"
308 : "0" (new), "r" (m
), "r" (old
)
314 /* This function doesn't exist, so you'll get a linker error
315 if something tries to do an invalid cmpxchg(). */
316 extern void __cmpxchg_called_with_bad_pointer(void);
318 static __inline__
unsigned long
319 __cmpxchg(volatile void *ptr
, unsigned long old
, unsigned long new, int size
)
323 return __cmpxchg_u32(ptr
, old
, new);
325 return __cmpxchg_u64(ptr
, old
, new);
327 __cmpxchg_called_with_bad_pointer();
331 #define cmpxchg(ptr,o,n) \
333 __typeof__(*(ptr)) _o_ = (o); \
334 __typeof__(*(ptr)) _n_ = (n); \
335 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
336 (unsigned long)_n_, sizeof(*(ptr))); \
339 #endif /* !(__ASSEMBLY__) */
341 #define arch_align_stack(x) (x)
343 #endif /* !(__SPARC64_SYSTEM_H) */