Optimize andes_clear_page() and andes_copy_page() with prefetch
[linux-2.6/linux-mips.git] / include / asm-ppc / system.h
blob162a09a1d181dd9d247f4d9acfa93f884bac3166
1 /*
2 * $Id: system.h,v 1.49 1999/09/11 18:37:54 cort Exp $
4 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
5 */
6 #ifndef __PPC_SYSTEM_H
7 #define __PPC_SYSTEM_H
9 #include <linux/config.h>
10 #include <linux/kdev_t.h>
12 #include <asm/processor.h>
13 #include <asm/atomic.h>
14 #include <asm/hw_irq.h>
17 * Memory barrier.
18 * The sync instruction guarantees that all memory accesses initiated
19 * by this processor have been performed (with respect to all other
20 * mechanisms that access memory). The eieio instruction is a barrier
21 * providing an ordering (separately) for (a) cacheable stores and (b)
22 * loads and stores to non-cacheable memory (e.g. I/O devices).
24 * mb() prevents loads and stores being reordered across this point.
25 * rmb() prevents loads being reordered across this point.
26 * wmb() prevents stores being reordered across this point.
28 * We can use the eieio instruction for wmb, but since it doesn't
29 * give any ordering guarantees about loads, we have to use the
30 * stronger but slower sync instruction for mb and rmb.
32 #define mb() __asm__ __volatile__ ("sync" : : : "memory")
33 #define rmb() __asm__ __volatile__ ("sync" : : : "memory")
34 #define wmb() __asm__ __volatile__ ("eieio" : : : "memory")
36 #define set_mb(var, value) do { var = value; mb(); } while (0)
37 #define set_rmb(var, value) do { var = value; rmb(); } while (0)
38 #define set_wmb(var, value) do { var = value; wmb(); } while (0)
40 extern void xmon_irq(int, void *, struct pt_regs *);
41 extern void xmon(struct pt_regs *excp);
44 /* Data cache block flush - write out the cache line containing the
45 specified address and then invalidate it in the cache. */
46 extern __inline__ void dcbf(void *line)
48 asm("dcbf %0,%1; sync" : : "r" (line), "r" (0));
51 extern void print_backtrace(unsigned long *);
52 extern void show_regs(struct pt_regs * regs);
53 extern void flush_instruction_cache(void);
54 extern void hard_reset_now(void);
55 extern void poweroff_now(void);
56 extern int _get_PVR(void);
57 extern long _get_L2CR(void);
58 extern void _set_L2CR(unsigned long);
59 extern void via_cuda_init(void);
60 extern void pmac_nvram_init(void);
61 extern void read_rtc_time(void);
62 extern void pmac_find_display(void);
63 extern void giveup_fpu(struct task_struct *);
64 extern void enable_kernel_fp(void);
65 extern void giveup_altivec(struct task_struct *);
66 extern void load_up_altivec(struct task_struct *);
67 extern void cvt_fd(float *from, double *to, unsigned long *fpscr);
68 extern void cvt_df(double *from, float *to, unsigned long *fpscr);
69 extern int call_rtas(const char *, int, int, unsigned long *, ...);
70 extern int abs(int);
72 struct device_node;
73 extern void note_scsi_host(struct device_node *, void *);
75 struct task_struct;
76 #define prepare_to_switch() do { } while(0)
77 #define switch_to(prev,next,last) _switch_to((prev),(next),&(last))
78 extern void _switch_to(struct task_struct *, struct task_struct *,
79 struct task_struct **);
81 struct thread_struct;
82 extern struct task_struct *_switch(struct thread_struct *prev,
83 struct thread_struct *next);
85 extern unsigned int rtas_data;
87 struct pt_regs;
88 extern void dump_regs(struct pt_regs *);
90 #ifndef CONFIG_SMP
92 #define cli() __cli()
93 #define sti() __sti()
94 #define save_flags(flags) __save_flags(flags)
95 #define restore_flags(flags) __restore_flags(flags)
96 #define save_and_cli(flags) __save_and_cli(flags)
98 #else /* CONFIG_SMP */
100 extern void __global_cli(void);
101 extern void __global_sti(void);
102 extern unsigned long __global_save_flags(void);
103 extern void __global_restore_flags(unsigned long);
104 #define cli() __global_cli()
105 #define sti() __global_sti()
106 #define save_flags(x) ((x)=__global_save_flags())
107 #define restore_flags(x) __global_restore_flags(x)
109 #endif /* !CONFIG_SMP */
111 #define local_irq_disable() __cli()
112 #define local_irq_enable() __sti()
113 #define local_irq_save(flags) __save_and_cli(flags)
114 #define local_irq_restore(flags) __restore_flags(flags)
116 #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
118 extern unsigned long xchg_u64(void *ptr, unsigned long val);
119 extern unsigned long xchg_u32(void *ptr, unsigned long val);
122 * This function doesn't exist, so you'll get a linker error
123 * if something tries to do an invalid xchg().
125 * This only works if the compiler isn't horribly bad at optimizing.
126 * gcc-2.5.8 reportedly can't handle this, but as that doesn't work
127 * too well on the alpha anyway..
129 extern void __xchg_called_with_bad_pointer(void);
131 #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
132 #define tas(ptr) (xchg((ptr),1))
134 static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
136 switch (size) {
137 case 4:
138 return (unsigned long )xchg_u32(ptr, x);
139 case 8:
140 return (unsigned long )xchg_u64(ptr, x);
142 __xchg_called_with_bad_pointer();
143 return x;
148 extern inline void * xchg_ptr(void * m, void * val)
150 return (void *) xchg_u32(m, (unsigned long) val);
153 #endif