2 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
7 #include <linux/kernel.h>
9 #include <asm/atomic.h>
10 #include <asm/hw_irq.h>
14 * The sync instruction guarantees that all memory accesses initiated
15 * by this processor have been performed (with respect to all other
16 * mechanisms that access memory). The eieio instruction is a barrier
17 * providing an ordering (separately) for (a) cacheable stores and (b)
18 * loads and stores to non-cacheable memory (e.g. I/O devices).
20 * mb() prevents loads and stores being reordered across this point.
21 * rmb() prevents loads being reordered across this point.
22 * wmb() prevents stores being reordered across this point.
23 * read_barrier_depends() prevents data-dependent loads being reordered
24 * across this point (nop on PPC).
26 * We can use the eieio instruction for wmb, but since it doesn't
27 * give any ordering guarantees about loads, we have to use the
28 * stronger but slower sync instruction for mb and rmb.
30 #define mb() __asm__ __volatile__ ("sync" : : : "memory")
31 #define rmb() __asm__ __volatile__ ("sync" : : : "memory")
32 #define wmb() __asm__ __volatile__ ("eieio" : : : "memory")
33 #define read_barrier_depends() do { } while(0)
35 #define set_mb(var, value) do { var = value; mb(); } while (0)
39 #define smp_rmb() rmb()
40 #define smp_wmb() __asm__ __volatile__ ("eieio" : : : "memory")
41 #define smp_read_barrier_depends() read_barrier_depends()
43 #define smp_mb() barrier()
44 #define smp_rmb() barrier()
45 #define smp_wmb() barrier()
46 #define smp_read_barrier_depends() do { } while(0)
47 #endif /* CONFIG_SMP */
53 extern void print_backtrace(unsigned long *);
54 extern void show_regs(struct pt_regs
* regs
);
55 extern void flush_instruction_cache(void);
56 extern void hard_reset_now(void);
57 extern void poweroff_now(void);
59 extern long _get_L2CR(void);
60 extern long _get_L3CR(void);
61 extern void _set_L2CR(unsigned long);
62 extern void _set_L3CR(unsigned long);
64 #define _get_L2CR() 0L
65 #define _get_L3CR() 0L
66 #define _set_L2CR(val) do { } while(0)
67 #define _set_L3CR(val) do { } while(0)
69 extern void via_cuda_init(void);
70 extern void pmac_nvram_init(void);
71 extern void chrp_nvram_init(void);
72 extern void read_rtc_time(void);
73 extern void pmac_find_display(void);
74 extern void giveup_fpu(struct task_struct
*);
75 extern void disable_kernel_fp(void);
76 extern void enable_kernel_fp(void);
77 extern void flush_fp_to_thread(struct task_struct
*);
78 extern void enable_kernel_altivec(void);
79 extern void giveup_altivec(struct task_struct
*);
80 extern void load_up_altivec(struct task_struct
*);
81 extern int emulate_altivec(struct pt_regs
*);
82 extern void giveup_spe(struct task_struct
*);
83 extern void load_up_spe(struct task_struct
*);
84 extern int fix_alignment(struct pt_regs
*);
85 extern void cvt_fd(float *from
, double *to
, struct thread_struct
*thread
);
86 extern void cvt_df(double *from
, float *to
, struct thread_struct
*thread
);
89 extern void discard_lazy_cpu_state(void);
91 static inline void discard_lazy_cpu_state(void)
97 extern void flush_altivec_to_thread(struct task_struct
*);
99 static inline void flush_altivec_to_thread(struct task_struct
*t
)
105 extern void flush_spe_to_thread(struct task_struct
*);
107 static inline void flush_spe_to_thread(struct task_struct
*t
)
112 extern int call_rtas(const char *, int, int, unsigned long *, ...);
113 extern void cacheable_memzero(void *p
, unsigned int nb
);
114 extern void *cacheable_memcpy(void *, const void *, unsigned int);
115 extern int do_page_fault(struct pt_regs
*, unsigned long, unsigned long);
116 extern void bad_page_fault(struct pt_regs
*, unsigned long, int);
117 extern int die(const char *, struct pt_regs
*, long);
118 extern void _exception(int, struct pt_regs
*, int, unsigned long);
119 void _nmask_and_or_msr(unsigned long nmask
, unsigned long or_val
);
121 #ifdef CONFIG_BOOKE_WDT
122 extern u32 booke_wdt_enabled
;
123 extern u32 booke_wdt_period
;
124 #endif /* CONFIG_BOOKE_WDT */
127 extern void note_scsi_host(struct device_node
*, void *);
129 extern struct task_struct
*__switch_to(struct task_struct
*,
130 struct task_struct
*);
131 #define switch_to(prev, next, last) ((last) = __switch_to((prev), (next)))
134 * On SMP systems, when the scheduler does migration-cost autodetection,
135 * it needs a way to flush as much of the CPU's caches as possible.
137 * TODO: fill this in!
139 static inline void sched_cacheflush(void)
143 struct thread_struct
;
144 extern struct task_struct
*_switch(struct thread_struct
*prev
,
145 struct thread_struct
*next
);
147 extern unsigned int rtas_data
;
149 static __inline__
unsigned long
150 xchg_u32(volatile void *p
, unsigned long val
)
154 __asm__
__volatile__ ("\n\
159 : "=&r" (prev
), "=m" (*(volatile unsigned long *)p
)
160 : "r" (p
), "r" (val
), "m" (*(volatile unsigned long *)p
)
167 * This function doesn't exist, so you'll get a linker error
168 * if something tries to do an invalid xchg().
170 extern void __xchg_called_with_bad_pointer(void);
172 #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
173 #define tas(ptr) (xchg((ptr),1))
175 static inline unsigned long __xchg(unsigned long x
, volatile void *ptr
, int size
)
179 return (unsigned long) xchg_u32(ptr
, x
);
180 #if 0 /* xchg_u64 doesn't exist on 32-bit PPC */
182 return (unsigned long) xchg_u64(ptr
, x
);
185 __xchg_called_with_bad_pointer();
191 extern inline void * xchg_ptr(void * m
, void * val
)
193 return (void *) xchg_u32(m
, (unsigned long) val
);
197 #define __HAVE_ARCH_CMPXCHG 1
199 static __inline__
unsigned long
200 __cmpxchg_u32(volatile unsigned int *p
, unsigned int old
, unsigned int new)
204 __asm__
__volatile__ ("\n\
213 #endif /* CONFIG_SMP */
215 : "=&r" (prev
), "=m" (*p
)
216 : "r" (p
), "r" (old
), "r" (new), "m" (*p
)
222 /* This function doesn't exist, so you'll get a linker error
223 if something tries to do an invalid cmpxchg(). */
224 extern void __cmpxchg_called_with_bad_pointer(void);
226 static __inline__
unsigned long
227 __cmpxchg(volatile void *ptr
, unsigned long old
, unsigned long new, int size
)
231 return __cmpxchg_u32(ptr
, old
, new);
232 #if 0 /* we don't have __cmpxchg_u64 on 32-bit PPC */
234 return __cmpxchg_u64(ptr
, old
, new);
237 __cmpxchg_called_with_bad_pointer();
241 #define cmpxchg(ptr,o,n) \
243 __typeof__(*(ptr)) _o_ = (o); \
244 __typeof__(*(ptr)) _n_ = (n); \
245 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
246 (unsigned long)_n_, sizeof(*(ptr))); \
249 #define arch_align_stack(x) (x)
251 #endif /* __KERNEL__ */
252 #endif /* __PPC_SYSTEM_H */