2 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
4 #ifndef _ASM_POWERPC_SYSTEM_H
5 #define _ASM_POWERPC_SYSTEM_H
7 #include <linux/kernel.h>
8 #include <linux/irqflags.h>
10 #include <asm/hw_irq.h>
14 * The sync instruction guarantees that all memory accesses initiated
15 * by this processor have been performed (with respect to all other
16 * mechanisms that access memory). The eieio instruction is a barrier
17 * providing an ordering (separately) for (a) cacheable stores and (b)
18 * loads and stores to non-cacheable memory (e.g. I/O devices).
20 * mb() prevents loads and stores being reordered across this point.
21 * rmb() prevents loads being reordered across this point.
22 * wmb() prevents stores being reordered across this point.
23 * read_barrier_depends() prevents data-dependent loads being reordered
24 * across this point (nop on PPC).
26 * We have to use the sync instructions for mb(), since lwsync doesn't
27 * order loads with respect to previous stores. Lwsync is fine for
28 * rmb(), though. Note that rmb() actually uses a sync on 32-bit
31 * For wmb(), we use sync since wmb is used in drivers to order
32 * stores to system memory with respect to writes to the device.
33 * However, smp_wmb() can be a lighter-weight lwsync or eieio barrier
34 * on SMP since it is only used to order updates to system memory.
36 #define mb() __asm__ __volatile__ ("sync" : : : "memory")
37 #define rmb() __asm__ __volatile__ ("sync" : : : "memory")
38 #define wmb() __asm__ __volatile__ ("sync" : : : "memory")
39 #define read_barrier_depends() do { } while(0)
41 #define set_mb(var, value) do { var = value; mb(); } while (0)
44 #define AT_VECTOR_SIZE_ARCH 6 /* entries in ARCH_DLINFO */
47 #ifdef __SUBARCH_HAS_LWSYNC
48 # define SMPWMB lwsync
54 #define smp_rmb() rmb()
55 #define smp_wmb() __asm__ __volatile__ (__stringify(SMPWMB) : : :"memory")
56 #define smp_read_barrier_depends() read_barrier_depends()
58 #define smp_mb() barrier()
59 #define smp_rmb() barrier()
60 #define smp_wmb() barrier()
61 #define smp_read_barrier_depends() do { } while(0)
62 #endif /* CONFIG_SMP */
65 * This is a barrier which prevents following instructions from being
66 * started until the value of the argument x is known. For example, if
67 * x is a variable loaded from memory, this prevents following
68 * instructions from being executed until the load has been performed.
70 #define data_barrier(x) \
71 asm volatile("twi 0,%0,0; isync" : : "r" (x) : "memory");
76 #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
78 extern int (*__debugger
)(struct pt_regs
*regs
);
79 extern int (*__debugger_ipi
)(struct pt_regs
*regs
);
80 extern int (*__debugger_bpt
)(struct pt_regs
*regs
);
81 extern int (*__debugger_sstep
)(struct pt_regs
*regs
);
82 extern int (*__debugger_iabr_match
)(struct pt_regs
*regs
);
83 extern int (*__debugger_dabr_match
)(struct pt_regs
*regs
);
84 extern int (*__debugger_fault_handler
)(struct pt_regs
*regs
);
86 #define DEBUGGER_BOILERPLATE(__NAME) \
87 static inline int __NAME(struct pt_regs *regs) \
89 if (unlikely(__ ## __NAME)) \
90 return __ ## __NAME(regs); \
94 DEBUGGER_BOILERPLATE(debugger
)
95 DEBUGGER_BOILERPLATE(debugger_ipi
)
96 DEBUGGER_BOILERPLATE(debugger_bpt
)
97 DEBUGGER_BOILERPLATE(debugger_sstep
)
98 DEBUGGER_BOILERPLATE(debugger_iabr_match
)
99 DEBUGGER_BOILERPLATE(debugger_dabr_match
)
100 DEBUGGER_BOILERPLATE(debugger_fault_handler
)
103 static inline int debugger(struct pt_regs
*regs
) { return 0; }
104 static inline int debugger_ipi(struct pt_regs
*regs
) { return 0; }
105 static inline int debugger_bpt(struct pt_regs
*regs
) { return 0; }
106 static inline int debugger_sstep(struct pt_regs
*regs
) { return 0; }
107 static inline int debugger_iabr_match(struct pt_regs
*regs
) { return 0; }
108 static inline int debugger_dabr_match(struct pt_regs
*regs
) { return 0; }
109 static inline int debugger_fault_handler(struct pt_regs
*regs
) { return 0; }
112 extern int set_dabr(unsigned long dabr
);
113 extern void do_dabr(struct pt_regs
*regs
, unsigned long address
,
114 unsigned long error_code
);
115 extern void print_backtrace(unsigned long *);
116 extern void show_regs(struct pt_regs
* regs
);
117 extern void flush_instruction_cache(void);
118 extern void hard_reset_now(void);
119 extern void poweroff_now(void);
122 extern long _get_L2CR(void);
123 extern long _get_L3CR(void);
124 extern void _set_L2CR(unsigned long);
125 extern void _set_L3CR(unsigned long);
127 #define _get_L2CR() 0L
128 #define _get_L3CR() 0L
129 #define _set_L2CR(val) do { } while(0)
130 #define _set_L3CR(val) do { } while(0)
133 extern void via_cuda_init(void);
134 extern void read_rtc_time(void);
135 extern void pmac_find_display(void);
136 extern void giveup_fpu(struct task_struct
*);
137 extern void disable_kernel_fp(void);
138 extern void enable_kernel_fp(void);
139 extern void flush_fp_to_thread(struct task_struct
*);
140 extern void enable_kernel_altivec(void);
141 extern void giveup_altivec(struct task_struct
*);
142 extern void load_up_altivec(struct task_struct
*);
143 extern int emulate_altivec(struct pt_regs
*);
144 extern void __giveup_vsx(struct task_struct
*);
145 extern void giveup_vsx(struct task_struct
*);
146 extern void enable_kernel_spe(void);
147 extern void giveup_spe(struct task_struct
*);
148 extern void load_up_spe(struct task_struct
*);
149 extern int fix_alignment(struct pt_regs
*);
150 extern void cvt_fd(float *from
, double *to
, struct thread_struct
*thread
);
151 extern void cvt_df(double *from
, float *to
, struct thread_struct
*thread
);
154 extern void discard_lazy_cpu_state(void);
156 static inline void discard_lazy_cpu_state(void)
161 #ifdef CONFIG_ALTIVEC
162 extern void flush_altivec_to_thread(struct task_struct
*);
164 static inline void flush_altivec_to_thread(struct task_struct
*t
)
170 extern void flush_vsx_to_thread(struct task_struct
*);
172 static inline void flush_vsx_to_thread(struct task_struct
*t
)
178 extern void flush_spe_to_thread(struct task_struct
*);
180 static inline void flush_spe_to_thread(struct task_struct
*t
)
185 extern int call_rtas(const char *, int, int, unsigned long *, ...);
186 extern void cacheable_memzero(void *p
, unsigned int nb
);
187 extern void *cacheable_memcpy(void *, const void *, unsigned int);
188 extern int do_page_fault(struct pt_regs
*, unsigned long, unsigned long);
189 extern void bad_page_fault(struct pt_regs
*, unsigned long, int);
190 extern int die(const char *, struct pt_regs
*, long);
191 extern void _exception(int, struct pt_regs
*, int, unsigned long);
192 extern void _nmask_and_or_msr(unsigned long nmask
, unsigned long or_val
);
194 #ifdef CONFIG_BOOKE_WDT
195 extern u32 booke_wdt_enabled
;
196 extern u32 booke_wdt_period
;
197 #endif /* CONFIG_BOOKE_WDT */
200 extern void note_scsi_host(struct device_node
*, void *);
202 extern struct task_struct
*__switch_to(struct task_struct
*,
203 struct task_struct
*);
204 #define switch_to(prev, next, last) ((last) = __switch_to((prev), (next)))
206 struct thread_struct
;
207 extern struct task_struct
*_switch(struct thread_struct
*prev
,
208 struct thread_struct
*next
);
210 extern unsigned int rtas_data
;
211 extern int mem_init_done
; /* set on boot once kmalloc can be called */
212 extern int init_bootmem_done
; /* set on !NUMA once bootmem is available */
213 extern unsigned long memory_limit
;
214 extern unsigned long klimit
;
216 extern void *alloc_maybe_bootmem(size_t size
, gfp_t mask
);
217 extern void *zalloc_maybe_bootmem(size_t size
, gfp_t mask
);
219 extern int powersave_nap
; /* set if nap mode can be used in idle loop */
224 * Changes the memory location '*ptr' to be val and returns
225 * the previous value stored there.
227 static __always_inline
unsigned long
228 __xchg_u32(volatile void *p
, unsigned long val
)
232 __asm__
__volatile__(
234 "1: lwarx %0,0,%2 \n"
239 : "=&r" (prev
), "+m" (*(volatile unsigned int *)p
)
249 * Changes the memory location '*ptr' to be val and returns
250 * the previous value stored there.
252 static __always_inline
unsigned long
253 __xchg_u32_local(volatile void *p
, unsigned long val
)
257 __asm__
__volatile__(
258 "1: lwarx %0,0,%2 \n"
262 : "=&r" (prev
), "+m" (*(volatile unsigned int *)p
)
270 static __always_inline
unsigned long
271 __xchg_u64(volatile void *p
, unsigned long val
)
275 __asm__
__volatile__(
277 "1: ldarx %0,0,%2 \n"
282 : "=&r" (prev
), "+m" (*(volatile unsigned long *)p
)
289 static __always_inline
unsigned long
290 __xchg_u64_local(volatile void *p
, unsigned long val
)
294 __asm__
__volatile__(
295 "1: ldarx %0,0,%2 \n"
299 : "=&r" (prev
), "+m" (*(volatile unsigned long *)p
)
308 * This function doesn't exist, so you'll get a linker error
309 * if something tries to do an invalid xchg().
311 extern void __xchg_called_with_bad_pointer(void);
313 static __always_inline
unsigned long
314 __xchg(volatile void *ptr
, unsigned long x
, unsigned int size
)
318 return __xchg_u32(ptr
, x
);
321 return __xchg_u64(ptr
, x
);
324 __xchg_called_with_bad_pointer();
328 static __always_inline
unsigned long
329 __xchg_local(volatile void *ptr
, unsigned long x
, unsigned int size
)
333 return __xchg_u32_local(ptr
, x
);
336 return __xchg_u64_local(ptr
, x
);
339 __xchg_called_with_bad_pointer();
342 #define xchg(ptr,x) \
344 __typeof__(*(ptr)) _x_ = (x); \
345 (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
348 #define xchg_local(ptr,x) \
350 __typeof__(*(ptr)) _x_ = (x); \
351 (__typeof__(*(ptr))) __xchg_local((ptr), \
352 (unsigned long)_x_, sizeof(*(ptr))); \
356 * Compare and exchange - if *p == old, set it to new,
357 * and return the old value of *p.
359 #define __HAVE_ARCH_CMPXCHG 1
361 static __always_inline
unsigned long
362 __cmpxchg_u32(volatile unsigned int *p
, unsigned long old
, unsigned long new)
366 __asm__
__volatile__ (
368 "1: lwarx %0,0,%2 # __cmpxchg_u32\n\
377 : "=&r" (prev
), "+m" (*p
)
378 : "r" (p
), "r" (old
), "r" (new)
384 static __always_inline
unsigned long
385 __cmpxchg_u32_local(volatile unsigned int *p
, unsigned long old
,
390 __asm__
__volatile__ (
391 "1: lwarx %0,0,%2 # __cmpxchg_u32\n\
399 : "=&r" (prev
), "+m" (*p
)
400 : "r" (p
), "r" (old
), "r" (new)
407 static __always_inline
unsigned long
408 __cmpxchg_u64(volatile unsigned long *p
, unsigned long old
, unsigned long new)
412 __asm__
__volatile__ (
414 "1: ldarx %0,0,%2 # __cmpxchg_u64\n\
422 : "=&r" (prev
), "+m" (*p
)
423 : "r" (p
), "r" (old
), "r" (new)
429 static __always_inline
unsigned long
430 __cmpxchg_u64_local(volatile unsigned long *p
, unsigned long old
,
435 __asm__
__volatile__ (
436 "1: ldarx %0,0,%2 # __cmpxchg_u64\n\
443 : "=&r" (prev
), "+m" (*p
)
444 : "r" (p
), "r" (old
), "r" (new)
451 /* This function doesn't exist, so you'll get a linker error
452 if something tries to do an invalid cmpxchg(). */
453 extern void __cmpxchg_called_with_bad_pointer(void);
455 static __always_inline
unsigned long
456 __cmpxchg(volatile void *ptr
, unsigned long old
, unsigned long new,
461 return __cmpxchg_u32(ptr
, old
, new);
464 return __cmpxchg_u64(ptr
, old
, new);
467 __cmpxchg_called_with_bad_pointer();
471 static __always_inline
unsigned long
472 __cmpxchg_local(volatile void *ptr
, unsigned long old
, unsigned long new,
477 return __cmpxchg_u32_local(ptr
, old
, new);
480 return __cmpxchg_u64_local(ptr
, old
, new);
483 __cmpxchg_called_with_bad_pointer();
487 #define cmpxchg(ptr, o, n) \
489 __typeof__(*(ptr)) _o_ = (o); \
490 __typeof__(*(ptr)) _n_ = (n); \
491 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
492 (unsigned long)_n_, sizeof(*(ptr))); \
496 #define cmpxchg_local(ptr, o, n) \
498 __typeof__(*(ptr)) _o_ = (o); \
499 __typeof__(*(ptr)) _n_ = (n); \
500 (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \
501 (unsigned long)_n_, sizeof(*(ptr))); \
506 * We handle most unaligned accesses in hardware. On the other hand
507 * unaligned DMA can be very expensive on some ppc64 IO chips (it does
508 * powers of 2 writes until it reaches sufficient alignment).
510 * Based on this we disable the IP header alignment in network drivers.
511 * We also modify NET_SKB_PAD to be a cacheline in size, thus maintaining
512 * cacheline alignment of buffers.
514 #define NET_IP_ALIGN 0
515 #define NET_SKB_PAD L1_CACHE_BYTES
517 #define cmpxchg64(ptr, o, n) \
519 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
520 cmpxchg((ptr), (o), (n)); \
522 #define cmpxchg64_local(ptr, o, n) \
524 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
525 cmpxchg_local((ptr), (o), (n)); \
528 #include <asm-generic/cmpxchg-local.h>
529 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
532 #define arch_align_stack(x) (x)
534 /* Used in very early kernel initialization. */
535 extern unsigned long reloc_offset(void);
536 extern unsigned long add_reloc_offset(unsigned long);
537 extern void reloc_got2(unsigned long);
539 #define PTRRELOC(x) ((typeof(x)) add_reloc_offset((unsigned long)(x)))
541 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
542 extern void account_system_vtime(struct task_struct
*);
545 extern struct dentry
*powerpc_debugfs_root
;
547 #endif /* __KERNEL__ */
548 #endif /* _ASM_POWERPC_SYSTEM_H */