Replace extern inline with static inline.
[linux-2.6/linux-mips.git] / include / asm-mips / system.h
blob584f5e87453aecc01fdd75b9f61b6297b73ba3a3
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
6 * Copyright (C) 1994 - 1999 by Ralf Baechle
7 * Copyright (C) 1996 by Paul M. Antoine
8 * Copyright (C) 1994 - 1999 by Ralf Baechle
10 * Changed set_except_vector declaration to allow return of previous
11 * vector address value - necessary for "borrowing" vectors.
13 * Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com
14 * Copyright (C) 2000 MIPS Technologies, Inc.
16 #ifndef _ASM_SYSTEM_H
17 #define _ASM_SYSTEM_H
19 #include <linux/config.h>
20 #include <asm/sgidefs.h>
22 #include <linux/kernel.h>
24 #include <asm/addrspace.h>
25 #include <asm/ptrace.h>
27 __asm__ (
28 ".macro\tlocal_irq_enable\n\t"
29 ".set\tpush\n\t"
30 ".set\treorder\n\t"
31 ".set\tnoat\n\t"
32 "mfc0\t$1,$12\n\t"
33 "ori\t$1,0x1f\n\t"
34 "xori\t$1,0x1e\n\t"
35 "mtc0\t$1,$12\n\t"
36 ".set\tpop\n\t"
37 ".endm");
39 static inline void local_irq_enable(void)
41 __asm__ __volatile__(
42 "local_irq_enable"
43 : /* no outputs */
44 : /* no inputs */
45 : "memory");
49 * For cli() we have to insert nops to make sure that the new value
50 * has actually arrived in the status register before the end of this
51 * macro.
52 * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs
53 * no nops at all.
55 __asm__ (
56 ".macro\tlocal_irq_disable\n\t"
57 ".set\tpush\n\t"
58 ".set\tnoat\n\t"
59 "mfc0\t$1,$12\n\t"
60 "ori\t$1,1\n\t"
61 "xori\t$1,1\n\t"
62 ".set\tnoreorder\n\t"
63 "mtc0\t$1,$12\n\t"
64 "sll\t$0, $0, 1\t\t\t# nop\n\t"
65 "sll\t$0, $0, 1\t\t\t# nop\n\t"
66 "sll\t$0, $0, 1\t\t\t# nop\n\t"
67 ".set\tpop\n\t"
68 ".endm");
70 static inline void local_irq_disable(void)
72 __asm__ __volatile__(
73 "local_irq_disable"
74 : /* no outputs */
75 : /* no inputs */
76 : "memory");
79 __asm__ (
80 ".macro\tlocal_save_flags flags\n\t"
81 ".set\tpush\n\t"
82 ".set\treorder\n\t"
83 "mfc0\t\\flags, $12\n\t"
84 ".set\tpop\n\t"
85 ".endm");
87 #define local_save_flags(x) \
88 __asm__ __volatile__( \
89 "local_save_flags %0" \
90 : "=r" (x))
92 __asm__ (
93 ".macro\tlocal_irq_save result\n\t"
94 ".set\tpush\n\t"
95 ".set\treorder\n\t"
96 ".set\tnoat\n\t"
97 "mfc0\t\\result, $12\n\t"
98 "ori\t$1, \\result, 1\n\t"
99 "xori\t$1, 1\n\t"
100 ".set\tnoreorder\n\t"
101 "mtc0\t$1, $12\n\t"
102 "sll\t$0, $0, 1\t\t\t# nop\n\t"
103 "sll\t$0, $0, 1\t\t\t# nop\n\t"
104 "sll\t$0, $0, 1\t\t\t# nop\n\t"
105 ".set\tpop\n\t"
106 ".endm");
108 #define local_irq_save(x) \
109 __asm__ __volatile__( \
110 "local_irq_save\t%0" \
111 : "=r" (x) \
112 : /* no inputs */ \
113 : "memory")
115 __asm__(".macro\tlocal_irq_restore flags\n\t"
116 ".set\tnoreorder\n\t"
117 ".set\tnoat\n\t"
118 "mfc0\t$1, $12\n\t"
119 "andi\t\\flags, 1\n\t"
120 "ori\t$1, 1\n\t"
121 "xori\t$1, 1\n\t"
122 "or\t\\flags, $1\n\t"
123 "mtc0\t\\flags, $12\n\t"
124 "sll\t$0, $0, 1\t\t\t# nop\n\t"
125 "sll\t$0, $0, 1\t\t\t# nop\n\t"
126 "sll\t$0, $0, 1\t\t\t# nop\n\t"
127 ".set\tat\n\t"
128 ".set\treorder\n\t"
129 ".endm");
131 #define local_irq_restore(flags) \
132 do { \
133 unsigned long __tmp1; \
135 __asm__ __volatile__( \
136 "local_irq_restore\t%0" \
137 : "=r" (__tmp1) \
138 : "0" (flags) \
139 : "memory"); \
140 } while(0)
142 #define irqs_disabled() \
143 ({ \
144 unsigned long flags; \
145 local_save_flags(flags); \
146 !(flags & 1); \
150 * read_barrier_depends - Flush all pending reads that subsequents reads
151 * depend on.
153 * No data-dependent reads from memory-like regions are ever reordered
154 * over this barrier. All reads preceding this primitive are guaranteed
155 * to access memory (but not necessarily other CPUs' caches) before any
156 * reads following this primitive that depend on the data return by
157 * any of the preceding reads. This primitive is much lighter weight than
158 * rmb() on most CPUs, and is never heavier weight than is
159 * rmb().
161 * These ordering constraints are respected by both the local CPU
162 * and the compiler.
164 * Ordering is not guaranteed by anything other than these primitives,
165 * not even by data dependencies. See the documentation for
166 * memory_barrier() for examples and URLs to more information.
168 * For example, the following code would force ordering (the initial
169 * value of "a" is zero, "b" is one, and "p" is "&a"):
171 * <programlisting>
172 * CPU 0 CPU 1
174 * b = 2;
175 * memory_barrier();
176 * p = &b; q = p;
177 * read_barrier_depends();
178 * d = *q;
179 * </programlisting>
181 * because the read of "*q" depends on the read of "p" and these
182 * two reads are separated by a read_barrier_depends(). However,
183 * the following code, with the same initial values for "a" and "b":
185 * <programlisting>
186 * CPU 0 CPU 1
188 * a = 2;
189 * memory_barrier();
190 * b = 3; y = b;
191 * read_barrier_depends();
192 * x = a;
193 * </programlisting>
195 * does not enforce ordering, since there is no data dependency between
196 * the read of "a" and the read of "b". Therefore, on some CPUs, such
197 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
198 * in cases like thiswhere there are no data dependencies.
201 #define read_barrier_depends() do { } while(0)
203 #ifdef CONFIG_CPU_HAS_SYNC
204 #define __sync() \
205 __asm__ __volatile__( \
206 ".set push\n\t" \
207 ".set noreorder\n\t" \
208 ".set mips2\n\t" \
209 "sync\n\t" \
210 ".set pop" \
211 : /* no output */ \
212 : /* no input */ \
213 : "memory")
214 #else
215 #define __sync() do { } while(0)
216 #endif
218 #define __fast_iob() \
219 __asm__ __volatile__( \
220 ".set push\n\t" \
221 ".set noreorder\n\t" \
222 "lw $0,%0\n\t" \
223 "nop\n\t" \
224 ".set pop" \
225 : /* no output */ \
226 : "m" (*(int *)KSEG1) \
227 : "memory")
229 #define fast_wmb() __sync()
230 #define fast_rmb() __sync()
231 #define fast_mb() __sync()
232 #define fast_iob() \
233 do { \
234 __sync(); \
235 __fast_iob(); \
236 } while (0)
238 #ifdef CONFIG_CPU_HAS_WB
240 #include <asm/wbflush.h>
242 #define wmb() fast_wmb()
243 #define rmb() fast_rmb()
244 #define mb() wbflush();
245 #define iob() wbflush();
247 #else /* !CONFIG_CPU_HAS_WB */
249 #define wmb() fast_wmb()
250 #define rmb() fast_rmb()
251 #define mb() fast_mb()
252 #define iob() fast_iob()
254 #endif /* !CONFIG_CPU_HAS_WB */
256 #ifdef CONFIG_SMP
257 #define smp_mb() mb()
258 #define smp_rmb() rmb()
259 #define smp_wmb() wmb()
260 #define smp_read_barrier_depends() read_barrier_depends()
261 #else
262 #define smp_mb() barrier()
263 #define smp_rmb() barrier()
264 #define smp_wmb() barrier()
265 #define smp_read_barrier_depends() do { } while(0)
266 #endif
268 #define set_mb(var, value) \
269 do { var = value; mb(); } while (0)
271 #define set_wmb(var, value) \
272 do { var = value; wmb(); } while (0)
275 * switch_to(n) should switch tasks to task nr n, first
276 * checking that n isn't the current task, in which case it does nothing.
278 extern asmlinkage void *resume(void *last, void *next, void *next_ti);
280 struct task_struct;
282 #define switch_to(prev,next,last) \
283 do { \
284 (last) = resume(prev, next, next->thread_info); \
285 } while(0)
288 * For 32 and 64 bit operands we can take advantage of ll and sc.
289 * FIXME: This doesn't work for R3000 machines.
291 static __inline__ unsigned long xchg_u32(volatile int * m, unsigned long val)
293 #ifdef CONFIG_CPU_HAS_LLSC
294 unsigned long dummy;
296 __asm__ __volatile__(
297 ".set\tpush\t\t\t\t# xchg_u32\n\t"
298 ".set\tnoreorder\n\t"
299 ".set\tnomacro\n\t"
300 "ll\t%0, %3\n"
301 "1:\tmove\t%2, %z4\n\t"
302 "sc\t%2, %1\n\t"
303 "beqzl\t%2, 1b\n\t"
304 " ll\t%0, %3\n\t"
305 "sync\n\t"
306 ".set\tpop"
307 : "=&r" (val), "=m" (*m), "=&r" (dummy)
308 : "R" (*m), "Jr" (val)
309 : "memory");
311 return val;
312 #else
313 unsigned long flags, retval;
315 local_irq_save(flags);
316 retval = *m;
317 *m = val;
318 local_irq_restore(flags); /* implies memory barrier */
319 return retval;
320 #endif /* Processor-dependent optimization */
323 #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
324 #define tas(ptr) (xchg((ptr),1))
326 static __inline__ unsigned long
327 __xchg(unsigned long x, volatile void * ptr, int size)
329 switch (size) {
330 case 4:
331 return xchg_u32(ptr, x);
333 return x;
336 extern void *set_except_vector(int n, void *addr);
337 extern void per_cpu_trap_init(void);
339 extern void __die(const char *, struct pt_regs *, const char *file,
340 const char *func, unsigned long line) __attribute__((noreturn));
341 extern void __die_if_kernel(const char *, struct pt_regs *, const char *file,
342 const char *func, unsigned long line);
344 #define die(msg, regs) \
345 __die(msg, regs, __FILE__ ":", __FUNCTION__, __LINE__)
346 #define die_if_kernel(msg, regs) \
347 __die_if_kernel(msg, regs, __FILE__ ":", __FUNCTION__, __LINE__)
349 extern int serial_console;
350 extern int stop_a_enabled;
352 static __inline__ int con_is_present(void)
354 return serial_console ? 0 : 1;
357 #endif /* _ASM_SYSTEM_H */