Import 2.3.18pre1
[davej-history.git] / include / asm-ppc / system.h
blob9984267dd10d4335c2aac6b63b11b84b044cc202
1 /*
2 * $Id: system.h,v 1.48 1999/09/05 11:56:40 paulus Exp $
4 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
5 */
6 #ifndef __PPC_SYSTEM_H
7 #define __PPC_SYSTEM_H
9 #include <linux/kdev_t.h>
11 #include <asm/processor.h>
12 #include <asm/atomic.h>
13 #include <asm/irq_control.h>
16 * Memory barrier.
17 * The sync instruction guarantees that all memory accesses initiated
18 * by this processor have been performed (with respect to all other
19 * mechanisms that access memory). The eieio instruction is a barrier
20 * providing an ordering (separately) for (a) cacheable stores and (b)
21 * loads and stores to non-cacheable memory (e.g. I/O devices).
23 * mb() prevents loads and stores being reordered across this point.
24 * rmb() prevents loads being reordered across this point.
25 * wmb() prevents stores being reordered across this point.
27 * We can use the eieio instruction for wmb, but since it doesn't
28 * give any ordering guarantees about loads, we have to use the
29 * stronger but slower sync instruction for mb and rmb.
31 #define mb() __asm__ __volatile__ ("sync" : : : "memory")
32 #define rmb() __asm__ __volatile__ ("sync" : : : "memory")
33 #define wmb() __asm__ __volatile__ ("eieio" : : : "memory")
35 #define set_mb(var, value) do { var = value; mb(); } while (0)
36 #define set_rmb(var, value) do { var = value; rmb(); } while (0)
37 #define set_wmb(var, value) do { var = value; wmb(); } while (0)
39 extern void xmon_irq(int, void *, struct pt_regs *);
40 extern void xmon(struct pt_regs *excp);
43 /* Data cache block flush - write out the cache line containing the
44 specified address and then invalidate it in the cache. */
45 extern __inline__ void dcbf(void *line)
47 asm("dcbf %0,%1; sync" : : "r" (line), "r" (0));
50 extern void print_backtrace(unsigned long *);
51 extern void show_regs(struct pt_regs * regs);
52 extern void flush_instruction_cache(void);
53 extern void hard_reset_now(void);
54 extern void poweroff_now(void);
55 extern int _get_PVR(void);
56 extern long _get_L2CR(void);
57 extern void _set_L2CR(unsigned long);
58 extern void via_cuda_init(void);
59 extern void pmac_nvram_init(void);
60 extern void read_rtc_time(void);
61 extern void pmac_find_display(void);
62 extern void giveup_fpu(struct task_struct *);
63 extern void enable_kernel_fp(void);
64 extern void cvt_fd(float *from, double *to, unsigned long *fpscr);
65 extern void cvt_df(double *from, float *to, unsigned long *fpscr);
66 extern int call_rtas(const char *, int, int, unsigned long *, ...);
68 struct device_node;
69 extern void note_scsi_host(struct device_node *, void *);
71 struct task_struct;
72 #define prepare_to_switch() do { } while(0)
73 #define switch_to(prev,next,last) _switch_to((prev),(next),&(last))
74 extern void _switch_to(struct task_struct *, struct task_struct *,
75 struct task_struct **);
77 struct thread_struct;
78 extern struct task_struct *_switch(struct thread_struct *prev,
79 struct thread_struct *next);
81 extern unsigned int rtas_data;
83 struct pt_regs;
84 extern void dump_regs(struct pt_regs *);
86 #ifndef __SMP__
88 #define cli() __cli()
89 #define sti() __sti()
90 #define save_flags(flags) __save_flags(flags)
91 #define restore_flags(flags) __restore_flags(flags)
92 #define save_and_cli(flags) __save_and_cli(flags)
94 #else /* __SMP__ */
96 extern void __global_cli(void);
97 extern void __global_sti(void);
98 extern unsigned long __global_save_flags(void);
99 extern void __global_restore_flags(unsigned long);
100 #define cli() __global_cli()
101 #define sti() __global_sti()
102 #define save_flags(x) ((x)=__global_save_flags())
103 #define restore_flags(x) __global_restore_flags(x)
105 #endif /* !__SMP__ */
107 #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
109 extern unsigned long xchg_u64(void *ptr, unsigned long val);
110 extern unsigned long xchg_u32(void *ptr, unsigned long val);
113 * This function doesn't exist, so you'll get a linker error
114 * if something tries to do an invalid xchg().
116 * This only works if the compiler isn't horribly bad at optimizing.
117 * gcc-2.5.8 reportedly can't handle this, but as that doesn't work
118 * too well on the alpha anyway..
120 extern void __xchg_called_with_bad_pointer(void);
122 #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
123 #define tas(ptr) (xchg((ptr),1))
125 static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
127 switch (size) {
128 case 4:
129 return (unsigned long )xchg_u32(ptr, x);
130 case 8:
131 return (unsigned long )xchg_u64(ptr, x);
133 __xchg_called_with_bad_pointer();
134 return x;
139 extern inline void * xchg_ptr(void * m, void * val)
141 return (void *) xchg_u32(m, (unsigned long) val);
144 #endif