2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003, 06 by Ralf Baechle
7 * Copyright (C) 1996 by Paul M. Antoine
8 * Copyright (C) 1999 Silicon Graphics
9 * Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com
10 * Copyright (C) 2000 MIPS Technologies, Inc.
15 #include <linux/kernel.h>
16 #include <linux/types.h>
17 #include <linux/irqflags.h>
19 #include <asm/addrspace.h>
20 #include <asm/barrier.h>
21 #include <asm/cpu-features.h>
27 * switch_to(n) should switch tasks to task nr n, first
28 * checking that n isn't the current task, in which case it does nothing.
30 extern asmlinkage
void *resume(void *last
, void *next
, void *next_ti
);
34 #ifdef CONFIG_MIPS_MT_FPAFF
37 * Handle the scheduler resume end of FPU affinity management. We do this
38 * inline to try to keep the overhead down. If we have been forced to run on
39 * a "CPU" with an FPU because of a previous high level of FP computation,
40 * but did not actually use the FPU during the most recent time-slice (CU1
41 * isn't set), we undo the restriction on cpus_allowed.
43 * We're not calling set_cpus_allowed() here, because we have no need to
44 * force prompt migration - we're already switching the current CPU to a
48 #define switch_to(prev,next,last) \
51 (prev->thread.mflags & MF_FPUBOUND) && \
52 (!(KSTK_STATUS(prev) & ST0_CU1))) { \
53 prev->thread.mflags &= ~MF_FPUBOUND; \
54 prev->cpus_allowed = prev->thread.user_cpus_allowed; \
58 next->thread.emulated_fp = 0; \
59 (last) = resume(prev, next, task_thread_info(next)); \
63 #define switch_to(prev,next,last) \
67 (last) = resume(prev, next, task_thread_info(next)); \
71 #define finish_arch_switch(prev) \
74 __restore_dsp(current); \
75 if (cpu_has_userlocal) \
76 write_c0_userlocal(current_thread_info()->tp_value); \
80 * On SMP systems, when the scheduler does migration-cost autodetection,
81 * it needs a way to flush as much of the CPU's caches as possible.
85 static inline void sched_cacheflush(void)
89 static inline unsigned long __xchg_u32(volatile int * m
, unsigned int val
)
93 if (cpu_has_llsc
&& R10000_LLSC_WAR
) {
98 "1: ll %0, %3 # xchg_u32 \n"
105 : "=&r" (retval
), "=m" (*m
), "=&r" (dummy
)
106 : "R" (*m
), "Jr" (val
)
108 } else if (cpu_has_llsc
) {
111 __asm__
__volatile__(
113 "1: ll %0, %3 # xchg_u32 \n"
123 : "=&r" (retval
), "=m" (*m
), "=&r" (dummy
)
124 : "R" (*m
), "Jr" (val
)
129 raw_local_irq_save(flags
);
132 raw_local_irq_restore(flags
); /* implies memory barrier */
141 static inline __u64
__xchg_u64(volatile __u64
* m
, __u64 val
)
145 if (cpu_has_llsc
&& R10000_LLSC_WAR
) {
148 __asm__
__volatile__(
150 "1: lld %0, %3 # xchg_u64 \n"
155 : "=&r" (retval
), "=m" (*m
), "=&r" (dummy
)
156 : "R" (*m
), "Jr" (val
)
158 } else if (cpu_has_llsc
) {
161 __asm__
__volatile__(
163 "1: lld %0, %3 # xchg_u64 \n"
171 : "=&r" (retval
), "=m" (*m
), "=&r" (dummy
)
172 : "R" (*m
), "Jr" (val
)
177 raw_local_irq_save(flags
);
180 raw_local_irq_restore(flags
); /* implies memory barrier */
188 extern __u64
__xchg_u64_unsupported_on_32bit_kernels(volatile __u64
* m
, __u64 val
);
189 #define __xchg_u64 __xchg_u64_unsupported_on_32bit_kernels
192 static inline unsigned long __xchg(unsigned long x
, volatile void * ptr
, int size
)
196 return __xchg_u32(ptr
, x
);
198 return __xchg_u64(ptr
, x
);
204 #define xchg(ptr, x) \
206 BUILD_BUG_ON(sizeof(*(ptr)) & ~0xc); \
208 ((__typeof__(*(ptr))) \
209 __xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))); \
212 #define __HAVE_ARCH_CMPXCHG 1
214 static inline unsigned long __cmpxchg_u32(volatile int * m
, unsigned long old
,
219 if (cpu_has_llsc
&& R10000_LLSC_WAR
) {
220 __asm__
__volatile__(
224 "1: ll %0, %2 # __cmpxchg_u32 \n"
225 " bne %0, %z3, 2f \n"
233 : "=&r" (retval
), "=R" (*m
)
234 : "R" (*m
), "Jr" (old
), "Jr" (new)
236 } else if (cpu_has_llsc
) {
237 __asm__
__volatile__(
241 "1: ll %0, %2 # __cmpxchg_u32 \n"
242 " bne %0, %z3, 2f \n"
253 : "=&r" (retval
), "=R" (*m
)
254 : "R" (*m
), "Jr" (old
), "Jr" (new)
259 raw_local_irq_save(flags
);
263 raw_local_irq_restore(flags
); /* implies memory barrier */
271 static inline unsigned long __cmpxchg_u32_local(volatile int * m
,
272 unsigned long old
, unsigned long new)
276 if (cpu_has_llsc
&& R10000_LLSC_WAR
) {
277 __asm__
__volatile__(
281 "1: ll %0, %2 # __cmpxchg_u32 \n"
282 " bne %0, %z3, 2f \n"
290 : "=&r" (retval
), "=R" (*m
)
291 : "R" (*m
), "Jr" (old
), "Jr" (new)
293 } else if (cpu_has_llsc
) {
294 __asm__
__volatile__(
298 "1: ll %0, %2 # __cmpxchg_u32 \n"
299 " bne %0, %z3, 2f \n"
307 : "=&r" (retval
), "=R" (*m
)
308 : "R" (*m
), "Jr" (old
), "Jr" (new)
313 local_irq_save(flags
);
317 local_irq_restore(flags
); /* implies memory barrier */
324 static inline unsigned long __cmpxchg_u64(volatile int * m
, unsigned long old
,
329 if (cpu_has_llsc
&& R10000_LLSC_WAR
) {
330 __asm__
__volatile__(
334 "1: lld %0, %2 # __cmpxchg_u64 \n"
335 " bne %0, %z3, 2f \n"
341 : "=&r" (retval
), "=R" (*m
)
342 : "R" (*m
), "Jr" (old
), "Jr" (new)
344 } else if (cpu_has_llsc
) {
345 __asm__
__volatile__(
349 "1: lld %0, %2 # __cmpxchg_u64 \n"
350 " bne %0, %z3, 2f \n"
359 : "=&r" (retval
), "=R" (*m
)
360 : "R" (*m
), "Jr" (old
), "Jr" (new)
365 raw_local_irq_save(flags
);
369 raw_local_irq_restore(flags
); /* implies memory barrier */
377 static inline unsigned long __cmpxchg_u64_local(volatile int * m
,
378 unsigned long old
, unsigned long new)
382 if (cpu_has_llsc
&& R10000_LLSC_WAR
) {
383 __asm__
__volatile__(
387 "1: lld %0, %2 # __cmpxchg_u64 \n"
388 " bne %0, %z3, 2f \n"
394 : "=&r" (retval
), "=R" (*m
)
395 : "R" (*m
), "Jr" (old
), "Jr" (new)
397 } else if (cpu_has_llsc
) {
398 __asm__
__volatile__(
402 "1: lld %0, %2 # __cmpxchg_u64 \n"
403 " bne %0, %z3, 2f \n"
409 : "=&r" (retval
), "=R" (*m
)
410 : "R" (*m
), "Jr" (old
), "Jr" (new)
415 local_irq_save(flags
);
419 local_irq_restore(flags
); /* implies memory barrier */
426 extern unsigned long __cmpxchg_u64_unsupported_on_32bit_kernels(
427 volatile int * m
, unsigned long old
, unsigned long new);
428 #define __cmpxchg_u64 __cmpxchg_u64_unsupported_on_32bit_kernels
429 extern unsigned long __cmpxchg_u64_local_unsupported_on_32bit_kernels(
430 volatile int * m
, unsigned long old
, unsigned long new);
431 #define __cmpxchg_u64_local __cmpxchg_u64_local_unsupported_on_32bit_kernels
434 /* This function doesn't exist, so you'll get a linker error
435 if something tries to do an invalid cmpxchg(). */
436 extern void __cmpxchg_called_with_bad_pointer(void);
438 static inline unsigned long __cmpxchg(volatile void * ptr
, unsigned long old
,
439 unsigned long new, int size
)
443 return __cmpxchg_u32(ptr
, old
, new);
445 return __cmpxchg_u64(ptr
, old
, new);
447 __cmpxchg_called_with_bad_pointer();
451 static inline unsigned long __cmpxchg_local(volatile void * ptr
,
452 unsigned long old
, unsigned long new, int size
)
456 return __cmpxchg_u32_local(ptr
, old
, new);
458 return __cmpxchg_u64_local(ptr
, old
, new);
460 __cmpxchg_called_with_bad_pointer();
464 #define cmpxchg(ptr,old,new) \
465 ((__typeof__(*(ptr)))__cmpxchg((ptr), \
466 (unsigned long)(old), (unsigned long)(new),sizeof(*(ptr))))
468 #define cmpxchg_local(ptr,old,new) \
469 ((__typeof__(*(ptr)))__cmpxchg_local((ptr), \
470 (unsigned long)(old), (unsigned long)(new),sizeof(*(ptr))))
472 extern void set_handler (unsigned long offset
, void *addr
, unsigned long len
);
473 extern void set_uncached_handler (unsigned long offset
, void *addr
, unsigned long len
);
475 typedef void (*vi_handler_t
)(void);
476 extern void *set_vi_handler (int n
, vi_handler_t addr
);
478 extern void *set_except_vector(int n
, void *addr
);
479 extern unsigned long ebase
;
480 extern void per_cpu_trap_init(void);
482 extern int stop_a_enabled
;
485 * See include/asm-ia64/system.h; prevents deadlock on SMP
488 #define __ARCH_WANT_UNLOCKED_CTXSW
490 #define arch_align_stack(x) (x)
492 #endif /* _ASM_SYSTEM_H */