1 #ifndef __ASM_SH64_SYSTEM_H
2 #define __ASM_SH64_SYSTEM_H
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
9 * include/asm-sh64/system.h
11 * Copyright (C) 2000, 2001 Paolo Alberelli
12 * Copyright (C) 2003 Paul Mundt
13 * Copyright (C) 2004 Richard Curnow
17 #include <asm/registers.h>
18 #include <asm/processor.h>
21 * switch_to() should switch tasks to task nr n, first
28 extern struct task_struct
*sh64_switch_to(struct task_struct
*prev
,
29 struct thread_struct
*prev_thread
,
30 struct task_struct
*next
,
31 struct thread_struct
*next_thread
);
33 #define switch_to(prev,next,last) \
35 if (last_task_used_math != next) {\
36 struct pt_regs *regs = next->thread.uregs;\
37 if (regs) regs->sr |= SR_FD;\
39 last = sh64_switch_to(prev, &prev->thread, next, &next->thread);\
42 #define nop() __asm__ __volatile__ ("nop")
44 #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
46 #define tas(ptr) (xchg((ptr), 1))
48 extern void __xchg_called_with_bad_pointer(void);
50 #define mb() __asm__ __volatile__ ("synco": : :"memory")
52 #define wmb() __asm__ __volatile__ ("synco": : :"memory")
53 #define read_barrier_depends() do { } while (0)
57 #define smp_rmb() rmb()
58 #define smp_wmb() wmb()
59 #define smp_read_barrier_depends() read_barrier_depends()
61 #define smp_mb() barrier()
62 #define smp_rmb() barrier()
63 #define smp_wmb() barrier()
64 #define smp_read_barrier_depends() do { } while (0)
65 #endif /* CONFIG_SMP */
67 #define set_rmb(var, value) do { (void)xchg(&var, value); } while (0)
68 #define set_mb(var, value) set_rmb(var, value)
70 /* Interrupt Control */
72 #define SR_MASK_L 0x000000f0L
73 #define SR_MASK_LL 0x00000000000000f0LL
75 #define SR_MASK_L 0x10000000L
76 #define SR_MASK_LL 0x0000000010000000LL
79 static __inline__
void local_irq_enable(void)
81 /* cli/sti based on SR.BL */
82 unsigned long long __dummy0
, __dummy1
=~SR_MASK_LL
;
84 __asm__
__volatile__("getcon " __SR
", %0\n\t"
86 "putcon %0, " __SR
"\n\t"
91 static __inline__
void local_irq_disable(void)
93 /* cli/sti based on SR.BL */
94 unsigned long long __dummy0
, __dummy1
=SR_MASK_LL
;
95 __asm__
__volatile__("getcon " __SR
", %0\n\t"
97 "putcon %0, " __SR
"\n\t"
102 #define local_save_flags(x) \
103 (__extension__ ({ unsigned long long __dummy=SR_MASK_LL; \
104 __asm__ __volatile__( \
105 "getcon " __SR ", %0\n\t" \
110 #define local_irq_save(x) \
111 (__extension__ ({ unsigned long long __d2=SR_MASK_LL, __d1; \
112 __asm__ __volatile__( \
113 "getcon " __SR ", %1\n\t" \
114 "or %1, r63, %0\n\t" \
115 "or %1, %2, %1\n\t" \
116 "putcon %1, " __SR "\n\t" \
118 : "=&r" (x), "=&r" (__d1) \
121 #define local_irq_restore(x) do { \
122 if ( ((x) & SR_MASK_L) == 0 ) /* dropping to 0 ? */ \
123 local_irq_enable(); /* yes...re-enable */ \
126 #define irqs_disabled() \
128 unsigned long flags; \
129 local_save_flags(flags); \
133 static inline unsigned long xchg_u32(volatile int * m
, unsigned long val
)
135 unsigned long flags
, retval
;
137 local_irq_save(flags
);
140 local_irq_restore(flags
);
144 static inline unsigned long xchg_u8(volatile unsigned char * m
, unsigned long val
)
146 unsigned long flags
, retval
;
148 local_irq_save(flags
);
151 local_irq_restore(flags
);
155 static __inline__
unsigned long __xchg(unsigned long x
, volatile void * ptr
, int size
)
159 return xchg_u32(ptr
, x
);
162 return xchg_u8(ptr
, x
);
165 __xchg_called_with_bad_pointer();
170 * disable hlt during certain critical i/o operations
172 #define HAVE_DISABLE_HLT
173 void disable_hlt(void);
174 void enable_hlt(void);
177 #define smp_mb() barrier()
178 #define smp_rmb() barrier()
179 #define smp_wmb() barrier()
181 #ifdef CONFIG_SH_ALPHANUMERIC
182 /* This is only used for debugging. */
183 extern void print_seg(char *file
,int line
);
184 #define PLS() print_seg(__FILE__,__LINE__)
185 #else /* CONFIG_SH_ALPHANUMERIC */
187 #endif /* CONFIG_SH_ALPHANUMERIC */
189 #define PL() printk("@ <%s,%s:%d>\n",__FILE__,__FUNCTION__,__LINE__)
191 #define arch_align_stack(x) (x)
193 #endif /* __ASM_SH64_SYSTEM_H */