Import 2.3.9pre5
[davej-history.git] / include / asm-mips / system.h
blob89b25a4bf6e3ac4fa85de7927784c325d87b399c
1 /* $Id: system.h,v 1.8 1998/07/20 17:52:21 ralf Exp $
3 * This file is subject to the terms and conditions of the GNU General Public
4 * License. See the file "COPYING" in the main directory of this archive
5 * for more details.
7 * Copyright (C) 1994, 1995, 1996, 1997, 1998 by Ralf Baechle
8 * Modified further for R[236]000 by Paul M. Antoine, 1996
9 */
10 #ifndef __ASM_MIPS_SYSTEM_H
11 #define __ASM_MIPS_SYSTEM_H
13 #include <asm/sgidefs.h>
14 #include <linux/kernel.h>
16 extern __inline__ void
17 __sti(void)
19 __asm__ __volatile__(
20 ".set\tnoreorder\n\t"
21 ".set\tnoat\n\t"
22 "mfc0\t$1,$12\n\t"
23 "ori\t$1,0x1f\n\t"
24 "xori\t$1,0x1e\n\t"
25 "mtc0\t$1,$12\n\t"
26 ".set\tat\n\t"
27 ".set\treorder"
28 : /* no outputs */
29 : /* no inputs */
30 : "$1", "memory");
34 * For cli() we have to insert nops to make shure that the new value
35 * has actually arrived in the status register before the end of this
36 * macro.
37 * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs
38 * no nops at all.
40 extern __inline__ void
41 __cli(void)
43 __asm__ __volatile__(
44 ".set\tnoreorder\n\t"
45 ".set\tnoat\n\t"
46 "mfc0\t$1,$12\n\t"
47 "ori\t$1,1\n\t"
48 "xori\t$1,1\n\t"
49 "mtc0\t$1,$12\n\t"
50 "nop\n\t"
51 "nop\n\t"
52 "nop\n\t"
53 ".set\tat\n\t"
54 ".set\treorder"
55 : /* no outputs */
56 : /* no inputs */
57 : "$1", "memory");
60 #define __save_flags(x) \
61 __asm__ __volatile__( \
62 ".set\tnoreorder\n\t" \
63 "mfc0\t%0,$12\n\t" \
64 ".set\treorder" \
65 : "=r" (x) \
66 : /* no inputs */ \
67 : "memory")
69 #define __save_and_cli(x) \
70 __asm__ __volatile__( \
71 ".set\tnoreorder\n\t" \
72 ".set\tnoat\n\t" \
73 "mfc0\t%0,$12\n\t" \
74 "ori\t$1,%0,1\n\t" \
75 "xori\t$1,1\n\t" \
76 "mtc0\t$1,$12\n\t" \
77 "nop\n\t" \
78 "nop\n\t" \
79 "nop\n\t" \
80 ".set\tat\n\t" \
81 ".set\treorder" \
82 : "=r" (x) \
83 : /* no inputs */ \
84 : "$1", "memory")
86 extern void __inline__
87 __restore_flags(int flags)
89 __asm__ __volatile__(
90 ".set\tnoreorder\n\t"
91 "mfc0\t$8,$12\n\t"
92 "li\t$9,0xff00\n\t"
93 "and\t$8,$9\n\t"
94 "nor\t$9,$0,$9\n\t"
95 "and\t%0,$9\n\t"
96 "or\t%0,$8\n\t"
97 "mtc0\t%0,$12\n\t"
98 "nop\n\t"
99 "nop\n\t"
100 "nop\n\t"
101 ".set\treorder"
102 : /* no output */
103 : "r" (flags)
104 : "$8", "$9", "memory");
108 * Non-SMP versions ...
110 #define sti() __sti()
111 #define cli() __cli()
112 #define save_flags(x) __save_flags(x)
113 #define save_and_cli(x) __save_and_cli(x)
114 #define restore_flags(x) __restore_flags(x)
116 /* For spinlocks etc */
117 #define local_irq_save(x) __save_and_cli(x);
118 #define local_irq_restore(x) __restore_flags(x);
119 #define local_irq_disable() __cli();
120 #define local_irq_enable() __sti();
123 * These are probably defined overly paranoid ...
125 #define mb() \
126 __asm__ __volatile__( \
127 "# prevent instructions being moved around\n\t" \
128 ".set\tnoreorder\n\t" \
129 "# 8 nops to fool the R4400 pipeline\n\t" \
130 "nop;nop;nop;nop;nop;nop;nop;nop\n\t" \
131 ".set\treorder" \
132 : /* no output */ \
133 : /* no input */ \
134 : "memory")
135 #define rmb() mb()
136 #define wmb() mb()
138 #if !defined (_LANGUAGE_ASSEMBLY)
140 * switch_to(n) should switch tasks to task nr n, first
141 * checking that n isn't the current task, in which case it does nothing.
143 extern asmlinkage void *(*resume)(void *last, void *next);
144 #endif /* !defined (_LANGUAGE_ASSEMBLY) */
146 #define switch_to(prev,next,last) \
147 do { \
148 (last) = resume(prev, next); \
149 } while(0)
152 * For 32 and 64 bit operands we can take advantage of ll and sc.
153 * FIXME: This doesn't work for R3000 machines.
155 extern __inline__ unsigned long xchg_u32(volatile int * m, unsigned long val)
157 #if (_MIPS_ISA == _MIPS_ISA_MIPS2) || (_MIPS_ISA == _MIPS_ISA_MIPS3) || \
158 (_MIPS_ISA == _MIPS_ISA_MIPS4) || (_MIPS_ISA == _MIPS_ISA_MIPS5)
159 unsigned long dummy;
161 __asm__ __volatile__(
162 ".set\tnoreorder\n\t"
163 ".set\tnoat\n\t"
164 "ll\t%0,(%1)\n"
165 "1:\tmove\t$1,%2\n\t"
166 "sc\t$1,(%1)\n\t"
167 "beqzl\t$1,1b\n\t"
168 "ll\t%0,(%1)\n\t"
169 ".set\tat\n\t"
170 ".set\treorder"
171 : "=r" (val), "=r" (m), "=r" (dummy)
172 : "1" (m), "2" (val)
173 : "memory");
174 #else
175 unsigned long flags, retval;
177 save_flags(flags);
178 cli();
179 retval = *m;
180 *m = val;
181 restore_flags(flags);
183 #endif /* Processor-dependent optimization */
184 return val;
188 * Only used for 64 bit kernel.
190 extern __inline__ unsigned long xchg_u64(volatile long * m, unsigned long val)
192 unsigned long dummy;
194 __asm__ __volatile__(
195 ".set\tnoreorder\n\t"
196 ".set\tnoat\n\t"
197 "lld\t%0,(%1)\n"
198 "1:\tmove\t$1,%2\n\t"
199 "scd\t$1,(%1)\n\t"
200 "beqzl\t$1,1b\n\t"
201 "ll\t%0,(%1)\n\t"
202 ".set\tat\n\t"
203 ".set\treorder"
204 : "=r" (val), "=r" (m), "=r" (dummy)
205 : "1" (m), "2" (val)
206 : "memory");
208 return val;
211 #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
212 #define tas(ptr) (xchg((ptr),1))
215 * This function doesn't exist, so you'll get a linker error
216 * if something tries to do an invalid xchg().
218 * This only works if the compiler isn't horribly bad at optimizing.
219 * gcc-2.5.8 reportedly can't handle this, but I define that one to
220 * be dead anyway.
222 extern void __xchg_called_with_bad_pointer(void);
224 static __inline__ unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
226 switch (size) {
227 case 4:
228 return xchg_u32(ptr, x);
229 #if defined(__mips64)
230 case 8:
231 return xchg_u64(ptr, x);
232 #endif
234 __xchg_called_with_bad_pointer();
235 return x;
238 extern void set_except_vector(int n, void *addr);
240 #endif /* __ASM_MIPS_SYSTEM_H */