1 /* MN10300 System definitions
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
14 #include <asm/cpu-regs.h>
19 #include <linux/kernel.h>
25 struct task_struct
*__switch_to(struct thread_struct
*prev
,
26 struct thread_struct
*next
,
27 struct task_struct
*prev_task
);
29 /* context switching is now performed out-of-line in switch_to.S */
30 #define switch_to(prev, next, last) \
32 current->thread.wchan = (u_long) __builtin_return_address(0); \
33 (last) = __switch_to(&(prev)->thread, &(next)->thread, (prev)); \
35 current->thread.wchan = 0; \
38 #define arch_align_stack(x) (x)
40 #define nop() asm volatile ("nop")
42 #endif /* !__ASSEMBLY__ */
45 * Force strict CPU ordering.
46 * And yes, this is required on UP too when we're talking
49 * For now, "wmb()" doesn't actually do anything, as all
50 * Intel CPU's follow what Intel calls a *Processor Order*,
51 * in which all writes are seen in the program order even
54 * I expect future Intel CPU's to have a weaker ordering,
55 * but I'd also expect them to finally get their act together
56 * and add some real memory barriers if so.
58 * Some non intel clones support out of order store. wmb() ceases to be a
62 #define mb() asm volatile ("": : :"memory")
64 #define wmb() asm volatile ("": : :"memory")
68 #define smp_rmb() rmb()
69 #define smp_wmb() wmb()
71 #define smp_mb() barrier()
72 #define smp_rmb() barrier()
73 #define smp_wmb() barrier()
76 #define set_mb(var, value) do { var = value; mb(); } while (0)
77 #define set_wmb(var, value) do { var = value; wmb(); } while (0)
79 #define read_barrier_depends() do {} while (0)
80 #define smp_read_barrier_depends() do {} while (0)
82 /*****************************************************************************/
85 * - "disabled": run in IM1/2
86 * - level 0 - GDB stub
87 * - level 1 - virtual serial DMA (if present)
88 * - level 5 - normal interrupt priority
89 * - level 6 - timer interrupt
90 * - "enabled": run in IM7
92 #ifdef CONFIG_MN10300_TTYSM
93 #define MN10300_CLI_LEVEL EPSW_IM_2
95 #define MN10300_CLI_LEVEL EPSW_IM_1
98 #define local_save_flags(x) \
100 typecheck(unsigned long, x); \
107 #define local_irq_disable() \
116 : "i"(~EPSW_IM), "i"(EPSW_IE | MN10300_CLI_LEVEL) \
120 #define local_irq_save(x) \
122 local_save_flags(x); \
123 local_irq_disable(); \
127 * we make sure local_irq_enable() doesn't cause priority inversion
131 extern unsigned long __mn10300_irq_enabled_epsw
;
135 #define local_irq_enable() \
145 : "i"(~EPSW_IM), "r"(__mn10300_irq_enabled_epsw) \
149 #define local_irq_restore(x) \
151 typecheck(unsigned long, x); \
163 #define irqs_disabled() \
165 unsigned long flags; \
166 local_save_flags(flags); \
167 (flags & EPSW_IM) <= MN10300_CLI_LEVEL; \
170 /* hook to save power by halting the CPU
171 * - called from the idle loop
172 * - must reenable interrupts (which takes three instruction cycles to complete)
174 #define safe_halt() \
176 asm volatile(" or %0,epsw \n" \
181 : "i"(EPSW_IE|EPSW_IM), "n"(&CPUM), "i"(CPUM_SLEEP)\
186 #define STI or EPSW_IE|EPSW_IM,epsw
187 #define CLI and ~EPSW_IM,epsw; or EPSW_IE|MN10300_CLI_LEVEL,epsw; nop; nop; nop
189 /*****************************************************************************/
191 * MN10300 doesn't actually have an exchange instruction
195 struct __xchg_dummy
{ unsigned long a
[100]; };
196 #define __xg(x) ((struct __xchg_dummy *)(x))
199 unsigned long __xchg(volatile unsigned long *m
, unsigned long val
)
201 unsigned long retval
;
204 local_irq_save(flags
);
207 local_irq_restore(flags
);
211 #define xchg(ptr, v) \
212 ((__typeof__(*(ptr))) __xchg((unsigned long *)(ptr), \
215 static inline unsigned long __cmpxchg(volatile unsigned long *m
,
216 unsigned long old
, unsigned long new)
218 unsigned long retval
;
221 local_irq_save(flags
);
225 local_irq_restore(flags
);
229 #define cmpxchg(ptr, o, n) \
230 ((__typeof__(*(ptr))) __cmpxchg((unsigned long *)(ptr), \
231 (unsigned long)(o), \
234 #endif /* !__ASSEMBLY__ */
236 #endif /* __KERNEL__ */
237 #endif /* _ASM_SYSTEM_H */