1 #ifndef _ASM_M32R_SYSTEM_H
2 #define _ASM_M32R_SYSTEM_H
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
9 * Copyright (C) 2001 Hiroyuki Kondo, Hirokazu Takata, and Hitoshi Yamamoto
10 * Copyright (C) 2004, 2006 Hirokazu Takata <takata at linux-m32r.org>
13 #include <linux/compiler.h>
14 #include <asm/assembler.h>
19 * switch_to(prev, next) should switch from task `prev' to `next'
20 * `prev' will never be the same as `next'.
22 * `next' and `prev' should be struct task_struct, but it isn't always defined
25 #if defined(CONFIG_FRAME_POINTER) || !defined(CONFIG_SCHED_OMIT_FRAME_POINTER)
26 #define M32R_PUSH_FP " push fp\n"
27 #define M32R_POP_FP " pop fp\n"
29 #define M32R_PUSH_FP ""
30 #define M32R_POP_FP ""
33 #define switch_to(prev, next, last) do { \
34 __asm__ __volatile__ ( \
35 " seth lr, #high(1f) \n" \
36 " or3 lr, lr, #low(1f) \n" \
37 " st lr, @%4 ; store old LR \n" \
38 " ld lr, @%5 ; load new LR \n" \
40 " st sp, @%2 ; store old SP \n" \
41 " ld sp, @%3 ; load new SP \n" \
42 " push %1 ; store `prev' on new stack \n" \
46 " pop %0 ; restore `__last' from new stack \n" \
50 "r" (&(prev->thread.sp)), "r" (&(next->thread.sp)), \
51 "r" (&(prev->thread.lr)), "r" (&(next->thread.lr)) \
56 /* Interrupt Control */
57 #if !defined(CONFIG_CHIP_M32102) && !defined(CONFIG_CHIP_M32104)
58 #define local_irq_enable() \
59 __asm__ __volatile__ ("setpsw #0x40 -> nop": : :"memory")
60 #define local_irq_disable() \
61 __asm__ __volatile__ ("clrpsw #0x40 -> nop": : :"memory")
62 #else /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */
63 static inline void local_irq_enable(void)
68 "or3 %0, %0, #0x0040; \n\t"
70 : "=&r" (tmpreg
) : : "cbit", "memory");
73 static inline void local_irq_disable(void)
75 unsigned long tmpreg0
, tmpreg1
;
77 "ld24 %0, #0 ; Use 32-bit insn. \n\t"
78 "mvfc %1, psw ; No interrupt can be accepted here. \n\t"
80 "and3 %0, %1, #0xffbf \n\t"
82 : "=&r" (tmpreg0
), "=&r" (tmpreg1
) : : "cbit", "memory");
84 #endif /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */
86 #define local_save_flags(x) \
87 __asm__ __volatile__("mvfc %0,psw" : "=r"(x) : /* no input */)
89 #define local_irq_restore(x) \
90 __asm__ __volatile__("mvtc %0,psw" : /* no outputs */ \
91 : "r" (x) : "cbit", "memory")
93 #if !(defined(CONFIG_CHIP_M32102) || defined(CONFIG_CHIP_M32104))
94 #define local_irq_save(x) \
95 __asm__ __volatile__( \
96 "mvfc %0, psw; \n\t" \
97 "clrpsw #0x40 -> nop; \n\t" \
98 : "=r" (x) : /* no input */ : "memory")
99 #else /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */
100 #define local_irq_save(x) \
102 unsigned long tmpreg; \
103 __asm__ __volatile__( \
105 "mvfc %0, psw \n\t" \
106 "mvtc %1, psw \n\t" \
107 "and3 %1, %0, #0xffbf \n\t" \
108 "mvtc %1, psw \n\t" \
109 : "=r" (x), "=&r" (tmpreg) \
110 : : "cbit", "memory"); \
112 #endif /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */
114 #define irqs_disabled() \
116 unsigned long flags; \
117 local_save_flags(flags); \
121 #define nop() __asm__ __volatile__ ("nop" : : )
123 #define xchg(ptr, x) \
124 ((__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), sizeof(*(ptr))))
125 #define xchg_local(ptr, x) \
126 ((__typeof__(*(ptr)))__xchg_local((unsigned long)(x), (ptr), \
129 extern void __xchg_called_with_bad_pointer(void);
131 #ifdef CONFIG_CHIP_M32700_TS1
132 #define DCACHE_CLEAR(reg0, reg1, addr) \
133 "seth "reg1", #high(dcache_dummy); \n\t" \
134 "or3 "reg1", "reg1", #low(dcache_dummy); \n\t" \
135 "lock "reg0", @"reg1"; \n\t" \
136 "add3 "reg0", "addr", #0x1000; \n\t" \
137 "ld "reg0", @"reg0"; \n\t" \
138 "add3 "reg0", "addr", #0x2000; \n\t" \
139 "ld "reg0", @"reg0"; \n\t" \
140 "unlock "reg0", @"reg1"; \n\t"
141 #else /* CONFIG_CHIP_M32700_TS1 */
142 #define DCACHE_CLEAR(reg0, reg1, addr)
143 #endif /* CONFIG_CHIP_M32700_TS1 */
145 static __always_inline
unsigned long
146 __xchg(unsigned long x
, volatile void *ptr
, int size
)
149 unsigned long tmp
= 0;
151 local_irq_save(flags
);
156 __asm__
__volatile__ (
159 : "=&r" (tmp
) : "r" (x
), "r" (ptr
) : "memory");
162 __asm__
__volatile__ (
165 : "=&r" (tmp
) : "r" (x
), "r" (ptr
) : "memory");
168 __asm__
__volatile__ (
171 : "=&r" (tmp
) : "r" (x
), "r" (ptr
) : "memory");
173 #else /* CONFIG_SMP */
175 __asm__
__volatile__ (
176 DCACHE_CLEAR("%0", "r4", "%2")
178 "unlock %1, @%2; \n\t"
179 : "=&r" (tmp
) : "r" (x
), "r" (ptr
)
181 #ifdef CONFIG_CHIP_M32700_TS1
183 #endif /* CONFIG_CHIP_M32700_TS1 */
186 #endif /* CONFIG_SMP */
188 __xchg_called_with_bad_pointer();
191 local_irq_restore(flags
);
196 static __always_inline
unsigned long
197 __xchg_local(unsigned long x
, volatile void *ptr
, int size
)
200 unsigned long tmp
= 0;
202 local_irq_save(flags
);
206 __asm__
__volatile__ (
209 : "=&r" (tmp
) : "r" (x
), "r" (ptr
) : "memory");
212 __asm__
__volatile__ (
215 : "=&r" (tmp
) : "r" (x
), "r" (ptr
) : "memory");
218 __asm__
__volatile__ (
221 : "=&r" (tmp
) : "r" (x
), "r" (ptr
) : "memory");
224 __xchg_called_with_bad_pointer();
227 local_irq_restore(flags
);
232 #define __HAVE_ARCH_CMPXCHG 1
234 static inline unsigned long
235 __cmpxchg_u32(volatile unsigned int *p
, unsigned int old
, unsigned int new)
240 local_irq_save(flags
);
241 __asm__
__volatile__ (
242 DCACHE_CLEAR("%0", "r4", "%1")
243 M32R_LOCK
" %0, @%1; \n"
244 " bne %0, %2, 1f; \n"
245 M32R_UNLOCK
" %3, @%1; \n"
249 M32R_UNLOCK
" %0, @%1; \n"
253 : "r" (p
), "r" (old
), "r" (new)
255 #ifdef CONFIG_CHIP_M32700_TS1
257 #endif /* CONFIG_CHIP_M32700_TS1 */
259 local_irq_restore(flags
);
264 static inline unsigned long
265 __cmpxchg_local_u32(volatile unsigned int *p
, unsigned int old
,
271 local_irq_save(flags
);
272 __asm__
__volatile__ (
273 DCACHE_CLEAR("%0", "r4", "%1")
275 " bne %0, %2, 1f; \n"
284 : "r" (p
), "r" (old
), "r" (new)
286 #ifdef CONFIG_CHIP_M32700_TS1
288 #endif /* CONFIG_CHIP_M32700_TS1 */
290 local_irq_restore(flags
);
295 /* This function doesn't exist, so you'll get a linker error
296 if something tries to do an invalid cmpxchg(). */
297 extern void __cmpxchg_called_with_bad_pointer(void);
299 static inline unsigned long
300 __cmpxchg(volatile void *ptr
, unsigned long old
, unsigned long new, int size
)
304 return __cmpxchg_u32(ptr
, old
, new);
306 __cmpxchg_called_with_bad_pointer();
310 #define cmpxchg(ptr, o, n) \
311 ((__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)(o), \
312 (unsigned long)(n), sizeof(*(ptr))))
314 #include <asm-generic/cmpxchg-local.h>
316 static inline unsigned long __cmpxchg_local(volatile void *ptr
,
318 unsigned long new, int size
)
322 return __cmpxchg_local_u32(ptr
, old
, new);
324 return __cmpxchg_local_generic(ptr
, old
, new, size
);
331 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
334 #define cmpxchg_local(ptr, o, n) \
335 ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
336 (unsigned long)(n), sizeof(*(ptr))))
337 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
339 #endif /* __KERNEL__ */
344 * mb() prevents loads and stores being reordered across this point.
345 * rmb() prevents loads being reordered across this point.
346 * wmb() prevents stores being reordered across this point.
348 #define mb() barrier()
353 * read_barrier_depends - Flush all pending reads that subsequents reads
356 * No data-dependent reads from memory-like regions are ever reordered
357 * over this barrier. All reads preceding this primitive are guaranteed
358 * to access memory (but not necessarily other CPUs' caches) before any
359 * reads following this primitive that depend on the data return by
360 * any of the preceding reads. This primitive is much lighter weight than
361 * rmb() on most CPUs, and is never heavier weight than is
364 * These ordering constraints are respected by both the local CPU
367 * Ordering is not guaranteed by anything other than these primitives,
368 * not even by data dependencies. See the documentation for
369 * memory_barrier() for examples and URLs to more information.
371 * For example, the following code would force ordering (the initial
372 * value of "a" is zero, "b" is one, and "p" is "&a"):
380 * read_barrier_depends();
385 * because the read of "*q" depends on the read of "p" and these
386 * two reads are separated by a read_barrier_depends(). However,
387 * the following code, with the same initial values for "a" and "b":
395 * read_barrier_depends();
399 * does not enforce ordering, since there is no data dependency between
400 * the read of "a" and the read of "b". Therefore, on some CPUs, such
401 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
402 * in cases like this where there are no data dependencies.
405 #define read_barrier_depends() do { } while (0)
408 #define smp_mb() mb()
409 #define smp_rmb() rmb()
410 #define smp_wmb() wmb()
411 #define smp_read_barrier_depends() read_barrier_depends()
412 #define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
414 #define smp_mb() barrier()
415 #define smp_rmb() barrier()
416 #define smp_wmb() barrier()
417 #define smp_read_barrier_depends() do { } while (0)
418 #define set_mb(var, value) do { var = value; barrier(); } while (0)
421 #define arch_align_stack(x) (x)
423 #endif /* _ASM_M32R_SYSTEM_H */