Ok. I didn't make 2.4.0 in 2000. Tough. I tried, but we had some
[davej-history.git] / include / asm-ppc / system.h
blob571d2c81f0feadc31e5145024ab7cab0bf54bfbd
1 /*
2 * $Id: system.h,v 1.49 1999/09/11 18:37:54 cort Exp $
4 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
5 */
6 #ifdef __KERNEL__
7 #ifndef __PPC_SYSTEM_H
8 #define __PPC_SYSTEM_H
10 #include <linux/config.h>
11 #include <linux/kdev_t.h>
13 #include <asm/processor.h>
14 #include <asm/atomic.h>
15 #include <asm/hw_irq.h>
18 * Memory barrier.
19 * The sync instruction guarantees that all memory accesses initiated
20 * by this processor have been performed (with respect to all other
21 * mechanisms that access memory). The eieio instruction is a barrier
22 * providing an ordering (separately) for (a) cacheable stores and (b)
23 * loads and stores to non-cacheable memory (e.g. I/O devices).
25 * mb() prevents loads and stores being reordered across this point.
26 * rmb() prevents loads being reordered across this point.
27 * wmb() prevents stores being reordered across this point.
29 * We can use the eieio instruction for wmb, but since it doesn't
30 * give any ordering guarantees about loads, we have to use the
31 * stronger but slower sync instruction for mb and rmb.
33 #define mb() __asm__ __volatile__ ("sync" : : : "memory")
34 #define rmb() __asm__ __volatile__ ("sync" : : : "memory")
35 #define wmb() __asm__ __volatile__ ("eieio" : : : "memory")
37 #define set_mb(var, value) do { var = value; mb(); } while (0)
38 #define set_wmb(var, value) do { var = value; wmb(); } while (0)
40 #ifdef CONFIG_SMP
41 #define smp_mb() mb()
42 #define smp_rmb() rmb()
43 #define smp_wmb() wmb()
44 #else
45 #define smp_mb() __asm__ __volatile__("": : :"memory")
46 #define smp_rmb() __asm__ __volatile__("": : :"memory")
47 #define smp_wmb() __asm__ __volatile__("": : :"memory")
48 #endif /* CONFIG_SMP */
50 extern void xmon_irq(int, void *, struct pt_regs *);
51 extern void xmon(struct pt_regs *excp);
54 /* Data cache block flush - write out the cache line containing the
55 specified address and then invalidate it in the cache. */
56 extern __inline__ void dcbf(void *line)
58 asm("dcbf %0,%1; sync" : : "r" (line), "r" (0));
61 extern void print_backtrace(unsigned long *);
62 extern void show_regs(struct pt_regs * regs);
63 extern void flush_instruction_cache(void);
64 extern void hard_reset_now(void);
65 extern void poweroff_now(void);
66 extern int _get_PVR(void);
67 extern long _get_L2CR(void);
68 extern void _set_L2CR(unsigned long);
69 extern void via_cuda_init(void);
70 extern void pmac_nvram_init(void);
71 extern void read_rtc_time(void);
72 extern void pmac_find_display(void);
73 extern void giveup_fpu(struct task_struct *);
74 extern void enable_kernel_fp(void);
75 extern void giveup_altivec(struct task_struct *);
76 extern void load_up_altivec(struct task_struct *);
77 extern void cvt_fd(float *from, double *to, unsigned long *fpscr);
78 extern void cvt_df(double *from, float *to, unsigned long *fpscr);
79 extern int call_rtas(const char *, int, int, unsigned long *, ...);
80 extern int abs(int);
81 extern void cacheable_memzero(void *p, unsigned int nb);
83 struct device_node;
84 extern void note_scsi_host(struct device_node *, void *);
86 struct task_struct;
87 #define prepare_to_switch() do { } while(0)
88 #define switch_to(prev,next,last) _switch_to((prev),(next),&(last))
89 extern void _switch_to(struct task_struct *, struct task_struct *,
90 struct task_struct **);
92 struct thread_struct;
93 extern struct task_struct *_switch(struct thread_struct *prev,
94 struct thread_struct *next);
96 extern unsigned int rtas_data;
98 struct pt_regs;
99 extern void dump_regs(struct pt_regs *);
101 #ifndef CONFIG_SMP
103 #define cli() __cli()
104 #define sti() __sti()
105 #define save_flags(flags) __save_flags(flags)
106 #define restore_flags(flags) __restore_flags(flags)
107 #define save_and_cli(flags) __save_and_cli(flags)
109 #else /* CONFIG_SMP */
111 extern void __global_cli(void);
112 extern void __global_sti(void);
113 extern unsigned long __global_save_flags(void);
114 extern void __global_restore_flags(unsigned long);
115 #define cli() __global_cli()
116 #define sti() __global_sti()
117 #define save_flags(x) ((x)=__global_save_flags())
118 #define restore_flags(x) __global_restore_flags(x)
120 #endif /* !CONFIG_SMP */
122 #define local_irq_disable() __cli()
123 #define local_irq_enable() __sti()
124 #define local_irq_save(flags) __save_and_cli(flags)
125 #define local_irq_restore(flags) __restore_flags(flags)
127 #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
129 static __inline__ unsigned long
130 xchg_u32(volatile void *p, unsigned long val)
132 unsigned long prev;
134 __asm__ __volatile__ ("
135 1: lwarx %0,0,%2
136 stwcx. %3,0,%2
137 bne- 1b"
138 : "=&r" (prev), "=m" (*(volatile unsigned long *)p)
139 : "r" (p), "r" (val), "m" (*(volatile unsigned long *)p)
140 : "cc", "memory");
142 return prev;
146 * This function doesn't exist, so you'll get a linker error
147 * if something tries to do an invalid xchg().
149 extern void __xchg_called_with_bad_pointer(void);
151 #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
152 #define tas(ptr) (xchg((ptr),1))
154 static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
156 switch (size) {
157 case 4:
158 return (unsigned long )xchg_u32(ptr, x);
159 #if 0 /* xchg_u64 doesn't exist on 32-bit PPC */
160 case 8:
161 return (unsigned long )xchg_u64(ptr, x);
162 #endif /* 0 */
164 __xchg_called_with_bad_pointer();
165 return x;
170 extern inline void * xchg_ptr(void * m, void * val)
172 return (void *) xchg_u32(m, (unsigned long) val);
176 #define __HAVE_ARCH_CMPXCHG 1
178 static __inline__ unsigned long
179 __cmpxchg_u32(volatile int *p, int old, int new)
181 int prev;
183 __asm__ __volatile__ ("
184 1: lwarx %0,0,%2
185 cmpw 0,%0,%3
186 bne 2f
187 stwcx. %4,0,%2
188 bne- 1b\n"
189 #ifdef CONFIG_SMP
190 " sync\n"
191 #endif /* CONFIG_SMP */
192 "2:"
193 : "=&r" (prev), "=m" (*p)
194 : "r" (p), "r" (old), "r" (new), "m" (*p)
195 : "cc", "memory");
197 return prev;
200 /* This function doesn't exist, so you'll get a linker error
201 if something tries to do an invalid cmpxchg(). */
202 extern void __cmpxchg_called_with_bad_pointer(void);
204 static __inline__ unsigned long
205 __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
207 switch (size) {
208 case 4:
209 return __cmpxchg_u32(ptr, old, new);
210 #if 0 /* we don't have __cmpxchg_u64 on 32-bit PPC */
211 case 8:
212 return __cmpxchg_u64(ptr, old, new);
213 #endif /* 0 */
215 __cmpxchg_called_with_bad_pointer();
216 return old;
219 #define cmpxchg(ptr,o,n) \
220 ({ \
221 __typeof__(*(ptr)) _o_ = (o); \
222 __typeof__(*(ptr)) _n_ = (n); \
223 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
224 (unsigned long)_n_, sizeof(*(ptr))); \
227 #endif /* __PPC_SYSTEM_H */
228 #endif /* __KERNEL__ */