2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003, 06 by Ralf Baechle
7 * Copyright (C) 1996 by Paul M. Antoine
8 * Copyright (C) 1999 Silicon Graphics
9 * Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com
10 * Copyright (C) 2000 MIPS Technologies, Inc.
15 #include <linux/types.h>
16 #include <linux/irqflags.h>
18 #include <asm/addrspace.h>
19 #include <asm/barrier.h>
20 #include <asm/cmpxchg.h>
21 #include <asm/cpu-features.h>
27 * switch_to(n) should switch tasks to task nr n, first
28 * checking that n isn't the current task, in which case it does nothing.
30 extern asmlinkage
void *resume(void *last
, void *next
, void *next_ti
);
34 #ifdef CONFIG_MIPS_MT_FPAFF
37 * Handle the scheduler resume end of FPU affinity management. We do this
38 * inline to try to keep the overhead down. If we have been forced to run on
39 * a "CPU" with an FPU because of a previous high level of FP computation,
40 * but did not actually use the FPU during the most recent time-slice (CU1
41 * isn't set), we undo the restriction on cpus_allowed.
43 * We're not calling set_cpus_allowed() here, because we have no need to
44 * force prompt migration - we're already switching the current CPU to a
48 #define __mips_mt_fpaff_switch_to(prev) \
50 struct thread_info *__prev_ti = task_thread_info(prev); \
53 test_ti_thread_flag(__prev_ti, TIF_FPUBOUND) && \
54 (!(KSTK_STATUS(prev) & ST0_CU1))) { \
55 clear_ti_thread_flag(__prev_ti, TIF_FPUBOUND); \
56 prev->cpus_allowed = prev->thread.user_cpus_allowed; \
58 next->thread.emulated_fp = 0; \
62 #define __mips_mt_fpaff_switch_to(prev) do { (void) (prev); } while (0)
65 #define switch_to(prev, next, last) \
67 __mips_mt_fpaff_switch_to(prev); \
70 (last) = resume(prev, next, task_thread_info(next)); \
73 #define finish_arch_switch(prev) \
76 __restore_dsp(current); \
77 if (cpu_has_userlocal) \
78 write_c0_userlocal(current_thread_info()->tp_value); \
81 static inline unsigned long __xchg_u32(volatile int * m
, unsigned int val
)
85 if (cpu_has_llsc
&& R10000_LLSC_WAR
) {
90 "1: ll %0, %3 # xchg_u32 \n"
97 : "=&r" (retval
), "=m" (*m
), "=&r" (dummy
)
98 : "R" (*m
), "Jr" (val
)
100 } else if (cpu_has_llsc
) {
103 __asm__
__volatile__(
105 "1: ll %0, %3 # xchg_u32 \n"
115 : "=&r" (retval
), "=m" (*m
), "=&r" (dummy
)
116 : "R" (*m
), "Jr" (val
)
121 raw_local_irq_save(flags
);
124 raw_local_irq_restore(flags
); /* implies memory barrier */
133 static inline __u64
__xchg_u64(volatile __u64
* m
, __u64 val
)
137 if (cpu_has_llsc
&& R10000_LLSC_WAR
) {
140 __asm__
__volatile__(
142 "1: lld %0, %3 # xchg_u64 \n"
147 : "=&r" (retval
), "=m" (*m
), "=&r" (dummy
)
148 : "R" (*m
), "Jr" (val
)
150 } else if (cpu_has_llsc
) {
153 __asm__
__volatile__(
155 "1: lld %0, %3 # xchg_u64 \n"
163 : "=&r" (retval
), "=m" (*m
), "=&r" (dummy
)
164 : "R" (*m
), "Jr" (val
)
169 raw_local_irq_save(flags
);
172 raw_local_irq_restore(flags
); /* implies memory barrier */
180 extern __u64
__xchg_u64_unsupported_on_32bit_kernels(volatile __u64
* m
, __u64 val
);
181 #define __xchg_u64 __xchg_u64_unsupported_on_32bit_kernels
184 /* This function doesn't exist, so you'll get a linker error
185 if something tries to do an invalid xchg(). */
186 extern void __xchg_called_with_bad_pointer(void);
188 static inline unsigned long __xchg(unsigned long x
, volatile void * ptr
, int size
)
192 return __xchg_u32(ptr
, x
);
194 return __xchg_u64(ptr
, x
);
196 __xchg_called_with_bad_pointer();
200 #define xchg(ptr, x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), sizeof(*(ptr))))
202 extern void set_handler(unsigned long offset
, void *addr
, unsigned long len
);
203 extern void set_uncached_handler(unsigned long offset
, void *addr
, unsigned long len
);
205 typedef void (*vi_handler_t
)(void);
206 extern void *set_vi_handler(int n
, vi_handler_t addr
);
208 extern void *set_except_vector(int n
, void *addr
);
209 extern unsigned long ebase
;
210 extern void per_cpu_trap_init(void);
213 * See include/asm-ia64/system.h; prevents deadlock on SMP
216 #define __ARCH_WANT_UNLOCKED_CTXSW
218 extern unsigned long arch_align_stack(unsigned long sp
);
220 #endif /* _ASM_SYSTEM_H */