1 /* $Id: system.h,v 1.82 2000/05/09 17:40:15 davem Exp $ */
2 #include <linux/config.h>
4 #ifndef __SPARC_SYSTEM_H
5 #define __SPARC_SYSTEM_H
7 #include <linux/kernel.h>
9 #include <asm/segment.h>
13 #include <asm/oplib.h>
15 #include <asm/ptrace.h>
16 #include <asm/btfixup.h>
18 #endif /* __KERNEL__ */
23 * Sparc (general) CPU types
31 sun4u
= 0x05, /* V8 ploos ploos */
33 ap1000
= 0x07, /* almost a sun4m */
36 /* Really, userland should not be looking at any of this... */
39 extern enum sparc_cpu sparc_cpu_model
;
42 #define ARCH_SUN4C_SUN4 (sparc_cpu_model==sun4c)
45 #define ARCH_SUN4C_SUN4 1
49 #define SUN4M_NCPUS 4 /* Architectural limit of sun4m. */
51 extern unsigned long empty_bad_page
;
52 extern unsigned long empty_bad_page_table
;
53 extern unsigned long empty_zero_page
;
55 extern struct linux_romvec
*romvec
;
56 #define halt() romvec->pv_halt()
58 /* When a context switch happens we must flush all user windows so that
59 * the windows of the current process are flushed onto its stack. This
60 * way the windows are all clean for the next process and the stack
61 * frames are up to date.
63 extern void flush_user_windows(void);
64 extern void kill_user_windows(void);
65 extern void synchronize_user_stack(void);
66 extern void fpsave(unsigned long *fpregs
, unsigned long *fsr
,
67 void *fpqueue
, unsigned long *fpqdepth
);
70 #define SWITCH_ENTER \
71 if(prev->flags & PF_USEDFPU) { \
72 put_psr(get_psr() | PSR_EF); \
73 fpsave(&prev->thread.float_regs[0], &prev->thread.fsr, \
74 &prev->thread.fpqueue[0], &prev->thread.fpqdepth); \
75 prev->flags &= ~PF_USEDFPU; \
76 prev->thread.kregs->psr &= ~PSR_EF; \
79 #define SWITCH_DO_LAZY_FPU
82 #define SWITCH_DO_LAZY_FPU if(last_task_used_math != next) next->thread.kregs->psr&=~PSR_EF;
86 * Flush windows so that the VM switch which follows
87 * would not pull the stack from under us.
89 * SWITCH_ENTER and SWITH_DO_LAZY_FPU do not work yet (e.g. SMP does not work)
91 #define prepare_to_switch() do { \
92 __asm__ __volatile__( \
93 ".globl\tflush_patch_switch\nflush_patch_switch:\n\t" \
94 "save %sp, -0x40, %sp; save %sp, -0x40, %sp; save %sp, -0x40, %sp\n\t" \
95 "save %sp, -0x40, %sp; save %sp, -0x40, %sp; save %sp, -0x40, %sp\n\t" \
96 "save %sp, -0x40, %sp\n\t" \
97 "restore; restore; restore; restore; restore; restore; restore"); \
100 /* Much care has gone into this code, do not touch it.
102 * We need to loadup regs l0/l1 for the newly forked child
103 * case because the trap return path relies on those registers
104 * holding certain values, gcc is told that they are clobbered.
105 * Gcc needs registers for 3 values in and 1 value out, so we
106 * clobber every non-fixed-usage register besides l2/l3/o4/o5. -DaveM
108 * Hey Dave, that do not touch sign is too much of an incentive
111 #define switch_to(prev, next, last) do { \
113 register unsigned long task_pc asm("o7"); \
114 extern struct task_struct *current_set[NR_CPUS]; \
117 next->active_mm->cpu_vm_mask |= (1 << smp_processor_id()); \
118 task_pc = ((unsigned long) &&here) - 0x8; \
119 __asm__ __volatile__( \
120 "mov %%g6, %%g3\n\t" \
121 "rd %%psr, %%g4\n\t" \
122 "std %%sp, [%%g6 + %4]\n\t" \
123 "rd %%wim, %%g5\n\t" \
124 "wr %%g4, 0x20, %%psr\n\t" \
126 "std %%g4, [%%g6 + %3]\n\t" \
127 "ldd [%2 + %3], %%g4\n\t" \
129 ".globl patchme_store_new_current\n" \
130 "patchme_store_new_current:\n\t" \
132 "wr %%g4, 0x20, %%psr\n\t" \
135 "ldd [%%g6 + %4], %%sp\n\t" \
136 "wr %%g5, 0x0, %%wim\n\t" \
137 "ldd [%%sp + 0x00], %%l0\n\t" \
138 "ldd [%%sp + 0x38], %%i6\n\t" \
139 "wr %%g4, 0x0, %%psr\n\t" \
142 "jmpl %%o7 + 0x8, %%g0\n\t" \
143 " mov %%g3, %0\n\t" \
145 : "r" (&(current_set[hard_smp_processor_id()])), "r" (next), \
146 "i" ((const unsigned long)(&((struct task_struct *)0)->thread.kpsr)), \
147 "i" ((const unsigned long)(&((struct task_struct *)0)->thread.ksp)), \
149 : "g1", "g2", "g3", "g4", "g5", "g7", "l0", "l1", \
150 "l4", "l5", "l6", "l7", "i0", "i1", "i2", "i3", "i4", "i5", "o0", "o1", "o2", \
155 * Changing the IRQ level on the Sparc.
157 extern __inline__
void setipl(unsigned long __orig_psr
)
159 __asm__
__volatile__("
167 extern __inline__
void __cli(void)
171 __asm__
__volatile__("
173 nop; nop; nop; /* Sun4m + Cypress + SMP bug */
182 extern __inline__
void __sti(void)
186 __asm__
__volatile__("
188 nop; nop; nop; /* Sun4m + Cypress + SMP bug */
197 extern __inline__
unsigned long getipl(void)
199 unsigned long retval
;
201 __asm__
__volatile__("rd %%psr, %0" : "=r" (retval
));
205 extern __inline__
unsigned long swap_pil(unsigned long __new_psr
)
207 unsigned long retval
;
209 __asm__
__volatile__("
211 nop; nop; nop; /* Sun4m + Cypress + SMP bug */
214 xorcc %%g1, %%g2, %%g0
221 : "r" (__new_psr
), "i" (PSR_PIL
)
222 : "g1", "g2", "memory", "cc");
227 extern __inline__
unsigned long read_psr_and_cli(void)
229 unsigned long retval
;
231 __asm__
__volatile__("
233 nop; nop; nop; /* Sun4m + Cypress + SMP bug */
244 #define __save_flags(flags) ((flags) = getipl())
245 #define __save_and_cli(flags) ((flags) = read_psr_and_cli())
246 #define __restore_flags(flags) setipl((flags))
247 #define local_irq_disable() __cli()
248 #define local_irq_enable() __sti()
249 #define local_irq_save(flags) __save_and_cli(flags)
250 #define local_irq_restore(flags) __restore_flags(flags)
254 extern unsigned char global_irq_holder
;
256 #define save_and_cli(flags) do { save_flags(flags); cli(); } while(0)
258 extern void __global_cli(void);
259 extern void __global_sti(void);
260 extern unsigned long __global_save_flags(void);
261 extern void __global_restore_flags(unsigned long flags
);
262 #define cli() __global_cli()
263 #define sti() __global_sti()
264 #define save_flags(flags) ((flags)=__global_save_flags())
265 #define restore_flags(flags) __global_restore_flags(flags)
269 #define cli() __cli()
270 #define sti() __sti()
271 #define save_flags(x) __save_flags(x)
272 #define restore_flags(x) __restore_flags(x)
273 #define save_and_cli(x) __save_and_cli(x)
277 /* XXX Change this if we ever use a PSO mode kernel. */
278 #define mb() __asm__ __volatile__ ("" : : : "memory")
281 #define set_mb(__var, __value) do { __var = __value; mb(); } while(0)
282 #define set_wmb(__var, __value) set_mb(__var, __value)
284 #define nop() __asm__ __volatile__ ("nop");
286 /* This has special calling conventions */
288 BTFIXUPDEF_CALL(void, ___xchg32
, void)
291 extern __inline__
unsigned long xchg_u32(__volatile__
unsigned long *m
, unsigned long val
)
294 __asm__
__volatile__("swap [%2], %0"
296 : "0" (val
), "r" (m
));
299 register unsigned long *ptr
asm("g1");
300 register unsigned long ret
asm("g2");
302 ptr
= (unsigned long *) m
;
305 /* Note: this is magic and the nop there is
307 __asm__
__volatile__("
312 : "0" (ret
), "r" (ptr
)
313 : "g3", "g4", "g7", "memory", "cc");
319 #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
320 #define tas(ptr) (xchg((ptr),1))
322 extern void __xchg_called_with_bad_pointer(void);
324 static __inline__
unsigned long __xchg(unsigned long x
, __volatile__
void * ptr
, int size
)
328 return xchg_u32(ptr
, x
);
330 __xchg_called_with_bad_pointer();
334 extern void die_if_kernel(char *str
, struct pt_regs
*regs
) __attribute__ ((noreturn
));
336 #endif /* __KERNEL__ */
338 #endif /* __ASSEMBLY__ */
340 #endif /* !(__SPARC_SYSTEM_H) */