1 /* $Id: system.h,v 1.8 1998/07/20 17:52:21 ralf Exp $
3 * This file is subject to the terms and conditions of the GNU General Public
4 * License. See the file "COPYING" in the main directory of this archive
7 * Copyright (C) 1994, 1995, 1996, 1997, 1998 by Ralf Baechle
8 * Modified further for R[236]000 by Paul M. Antoine, 1996
10 #ifndef __ASM_MIPS_SYSTEM_H
11 #define __ASM_MIPS_SYSTEM_H
13 #include <asm/sgidefs.h>
14 #include <linux/kernel.h>
16 extern __inline__
void
34 * For cli() we have to insert nops to make shure that the new value
35 * has actually arrived in the status register before the end of this
37 * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs
40 extern __inline__
void
60 #define __save_flags(x) \
61 __asm__ __volatile__( \
62 ".set\tnoreorder\n\t" \
69 #define __save_and_cli(x) \
70 __asm__ __volatile__( \
71 ".set\tnoreorder\n\t" \
86 extern void __inline__
87 __restore_flags(int flags
)
104 : "$8", "$9", "memory");
108 * Non-SMP versions ...
110 #define sti() __sti()
111 #define cli() __cli()
112 #define save_flags(x) __save_flags(x)
113 #define save_and_cli(x) __save_and_cli(x)
114 #define restore_flags(x) __restore_flags(x)
116 /* For spinlocks etc */
117 #define local_irq_save(x) __save_and_cli(x);
118 #define local_irq_restore(x) __restore_flags(x);
119 #define local_irq_disable() __cli();
120 #define local_irq_enable() __sti();
123 * These are probably defined overly paranoid ...
126 __asm__ __volatile__( \
127 "# prevent instructions being moved around\n\t" \
128 ".set\tnoreorder\n\t" \
129 "# 8 nops to fool the R4400 pipeline\n\t" \
130 "nop;nop;nop;nop;nop;nop;nop;nop\n\t" \
138 #if !defined (_LANGUAGE_ASSEMBLY)
140 * switch_to(n) should switch tasks to task nr n, first
141 * checking that n isn't the current task, in which case it does nothing.
143 extern asmlinkage
void *(*resume
)(void *last
, void *next
);
144 #endif /* !defined (_LANGUAGE_ASSEMBLY) */
146 #define switch_to(prev,next,last) \
148 (last) = resume(prev, next); \
152 * For 32 and 64 bit operands we can take advantage of ll and sc.
153 * FIXME: This doesn't work for R3000 machines.
155 extern __inline__
unsigned long xchg_u32(volatile int * m
, unsigned long val
)
157 #if (_MIPS_ISA == _MIPS_ISA_MIPS2) || (_MIPS_ISA == _MIPS_ISA_MIPS3) || \
158 (_MIPS_ISA == _MIPS_ISA_MIPS4) || (_MIPS_ISA == _MIPS_ISA_MIPS5)
161 __asm__
__volatile__(
162 ".set\tnoreorder\n\t"
165 "1:\tmove\t$1,%2\n\t"
171 : "=r" (val
), "=r" (m
), "=r" (dummy
)
175 unsigned long flags
, retval
;
181 restore_flags(flags
);
183 #endif /* Processor-dependent optimization */
188 * Only used for 64 bit kernel.
190 extern __inline__
unsigned long xchg_u64(volatile long * m
, unsigned long val
)
194 __asm__
__volatile__(
195 ".set\tnoreorder\n\t"
198 "1:\tmove\t$1,%2\n\t"
204 : "=r" (val
), "=r" (m
), "=r" (dummy
)
211 #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
212 #define tas(ptr) (xchg((ptr),1))
215 * This function doesn't exist, so you'll get a linker error
216 * if something tries to do an invalid xchg().
218 * This only works if the compiler isn't horribly bad at optimizing.
219 * gcc-2.5.8 reportedly can't handle this, but I define that one to
222 extern void __xchg_called_with_bad_pointer(void);
224 static __inline__
unsigned long __xchg(unsigned long x
, volatile void * ptr
, int size
)
228 return xchg_u32(ptr
, x
);
229 #if defined(__mips64)
231 return xchg_u64(ptr
, x
);
234 __xchg_called_with_bad_pointer();
238 extern void set_except_vector(int n
, void *addr
);
240 #endif /* __ASM_MIPS_SYSTEM_H */