mmc: tmio: tmio_mmc_host has .clk_enable
[linux-2.6/btrfs-unstable.git] / tools / perf / perf-sys.h
blob6ef68165c9db628d23bbe85b48945ac2581ec979
1 #ifndef _PERF_SYS_H
2 #define _PERF_SYS_H
4 #include <unistd.h>
5 #include <sys/types.h>
6 #include <sys/syscall.h>
7 #include <linux/types.h>
8 #include <linux/perf_event.h>
10 #if defined(__i386__)
11 #define mb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
12 #define wmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
13 #define rmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
14 #define cpu_relax() asm volatile("rep; nop" ::: "memory");
15 #define CPUINFO_PROC {"model name"}
16 #ifndef __NR_perf_event_open
17 # define __NR_perf_event_open 336
18 #endif
19 #ifndef __NR_futex
20 # define __NR_futex 240
21 #endif
22 #ifndef __NR_gettid
23 # define __NR_gettid 224
24 #endif
25 #endif
27 #if defined(__x86_64__)
28 #define mb() asm volatile("mfence" ::: "memory")
29 #define wmb() asm volatile("sfence" ::: "memory")
30 #define rmb() asm volatile("lfence" ::: "memory")
31 #define cpu_relax() asm volatile("rep; nop" ::: "memory");
32 #define CPUINFO_PROC {"model name"}
33 #ifndef __NR_perf_event_open
34 # define __NR_perf_event_open 298
35 #endif
36 #ifndef __NR_futex
37 # define __NR_futex 202
38 #endif
39 #ifndef __NR_gettid
40 # define __NR_gettid 186
41 #endif
42 #endif
44 #ifdef __powerpc__
45 #include "../../arch/powerpc/include/uapi/asm/unistd.h"
46 #define mb() asm volatile ("sync" ::: "memory")
47 #define wmb() asm volatile ("sync" ::: "memory")
48 #define rmb() asm volatile ("sync" ::: "memory")
49 #define CPUINFO_PROC {"cpu"}
50 #endif
52 #ifdef __s390__
53 #define mb() asm volatile("bcr 15,0" ::: "memory")
54 #define wmb() asm volatile("bcr 15,0" ::: "memory")
55 #define rmb() asm volatile("bcr 15,0" ::: "memory")
56 #define CPUINFO_PROC {"vendor_id"}
57 #endif
59 #ifdef __sh__
60 #if defined(__SH4A__) || defined(__SH5__)
61 # define mb() asm volatile("synco" ::: "memory")
62 # define wmb() asm volatile("synco" ::: "memory")
63 # define rmb() asm volatile("synco" ::: "memory")
64 #else
65 # define mb() asm volatile("" ::: "memory")
66 # define wmb() asm volatile("" ::: "memory")
67 # define rmb() asm volatile("" ::: "memory")
68 #endif
69 #define CPUINFO_PROC {"cpu type"}
70 #endif
72 #ifdef __hppa__
73 #define mb() asm volatile("" ::: "memory")
74 #define wmb() asm volatile("" ::: "memory")
75 #define rmb() asm volatile("" ::: "memory")
76 #define CPUINFO_PROC {"cpu"}
77 #endif
79 #ifdef __sparc__
80 #ifdef __LP64__
81 #define mb() asm volatile("ba,pt %%xcc, 1f\n" \
82 "membar #StoreLoad\n" \
83 "1:\n":::"memory")
84 #else
85 #define mb() asm volatile("":::"memory")
86 #endif
87 #define wmb() asm volatile("":::"memory")
88 #define rmb() asm volatile("":::"memory")
89 #define CPUINFO_PROC {"cpu"}
90 #endif
92 #ifdef __alpha__
93 #define mb() asm volatile("mb" ::: "memory")
94 #define wmb() asm volatile("wmb" ::: "memory")
95 #define rmb() asm volatile("mb" ::: "memory")
96 #define CPUINFO_PROC {"cpu model"}
97 #endif
99 #ifdef __ia64__
100 #define mb() asm volatile ("mf" ::: "memory")
101 #define wmb() asm volatile ("mf" ::: "memory")
102 #define rmb() asm volatile ("mf" ::: "memory")
103 #define cpu_relax() asm volatile ("hint @pause" ::: "memory")
104 #define CPUINFO_PROC {"model name"}
105 #endif
107 #ifdef __arm__
109 * Use the __kuser_memory_barrier helper in the CPU helper page. See
110 * arch/arm/kernel/entry-armv.S in the kernel source for details.
112 #define mb() ((void(*)(void))0xffff0fa0)()
113 #define wmb() ((void(*)(void))0xffff0fa0)()
114 #define rmb() ((void(*)(void))0xffff0fa0)()
115 #define CPUINFO_PROC {"model name", "Processor"}
116 #endif
118 #ifdef __aarch64__
119 #define mb() asm volatile("dmb ish" ::: "memory")
120 #define wmb() asm volatile("dmb ishst" ::: "memory")
121 #define rmb() asm volatile("dmb ishld" ::: "memory")
122 #define cpu_relax() asm volatile("yield" ::: "memory")
123 #endif
125 #ifdef __mips__
126 #define mb() asm volatile( \
127 ".set mips2\n\t" \
128 "sync\n\t" \
129 ".set mips0" \
130 : /* no output */ \
131 : /* no input */ \
132 : "memory")
133 #define wmb() mb()
134 #define rmb() mb()
135 #define CPUINFO_PROC {"cpu model"}
136 #endif
138 #ifdef __arc__
139 #define mb() asm volatile("" ::: "memory")
140 #define wmb() asm volatile("" ::: "memory")
141 #define rmb() asm volatile("" ::: "memory")
142 #define CPUINFO_PROC {"Processor"}
143 #endif
145 #ifdef __metag__
146 #define mb() asm volatile("" ::: "memory")
147 #define wmb() asm volatile("" ::: "memory")
148 #define rmb() asm volatile("" ::: "memory")
149 #define CPUINFO_PROC {"CPU"}
150 #endif
152 #ifdef __xtensa__
153 #define mb() asm volatile("memw" ::: "memory")
154 #define wmb() asm volatile("memw" ::: "memory")
155 #define rmb() asm volatile("" ::: "memory")
156 #define CPUINFO_PROC {"core ID"}
157 #endif
159 #ifdef __tile__
160 #define mb() asm volatile ("mf" ::: "memory")
161 #define wmb() asm volatile ("mf" ::: "memory")
162 #define rmb() asm volatile ("mf" ::: "memory")
163 #define cpu_relax() asm volatile ("mfspr zero, PASS" ::: "memory")
164 #define CPUINFO_PROC {"model name"}
165 #endif
167 #define barrier() asm volatile ("" ::: "memory")
169 #ifndef cpu_relax
170 #define cpu_relax() barrier()
171 #endif
173 static inline int
174 sys_perf_event_open(struct perf_event_attr *attr,
175 pid_t pid, int cpu, int group_fd,
176 unsigned long flags)
178 int fd;
180 fd = syscall(__NR_perf_event_open, attr, pid, cpu,
181 group_fd, flags);
183 #ifdef HAVE_ATTR_TEST
184 if (unlikely(test_attr__enabled))
185 test_attr__open(attr, pid, cpu, fd, group_fd, flags);
186 #endif
187 return fd;
190 #endif /* _PERF_SYS_H */