Merge branch 'x86/cleanups' into x86/trampoline
[linux-2.6.git] / arch / x86 / kernel / cpu / perf_event_intel_lbr.c
blobd202c1bece1a6407396626327c03d47be9adbecc
1 #ifdef CONFIG_CPU_SUP_INTEL
3 enum {
4 LBR_FORMAT_32 = 0x00,
5 LBR_FORMAT_LIP = 0x01,
6 LBR_FORMAT_EIP = 0x02,
7 LBR_FORMAT_EIP_FLAGS = 0x03,
8 };
11 * We only support LBR implementations that have FREEZE_LBRS_ON_PMI
12 * otherwise it becomes near impossible to get a reliable stack.
15 static void __intel_pmu_lbr_enable(void)
17 u64 debugctl;
19 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
20 debugctl |= (DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
21 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
24 static void __intel_pmu_lbr_disable(void)
26 u64 debugctl;
28 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
29 debugctl &= ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
30 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
33 static void intel_pmu_lbr_reset_32(void)
35 int i;
37 for (i = 0; i < x86_pmu.lbr_nr; i++)
38 wrmsrl(x86_pmu.lbr_from + i, 0);
41 static void intel_pmu_lbr_reset_64(void)
43 int i;
45 for (i = 0; i < x86_pmu.lbr_nr; i++) {
46 wrmsrl(x86_pmu.lbr_from + i, 0);
47 wrmsrl(x86_pmu.lbr_to + i, 0);
51 static void intel_pmu_lbr_reset(void)
53 if (!x86_pmu.lbr_nr)
54 return;
56 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32)
57 intel_pmu_lbr_reset_32();
58 else
59 intel_pmu_lbr_reset_64();
62 static void intel_pmu_lbr_enable(struct perf_event *event)
64 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
66 if (!x86_pmu.lbr_nr)
67 return;
69 WARN_ON_ONCE(cpuc->enabled);
72 * Reset the LBR stack if we changed task context to
73 * avoid data leaks.
76 if (event->ctx->task && cpuc->lbr_context != event->ctx) {
77 intel_pmu_lbr_reset();
78 cpuc->lbr_context = event->ctx;
81 cpuc->lbr_users++;
84 static void intel_pmu_lbr_disable(struct perf_event *event)
86 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
88 if (!x86_pmu.lbr_nr)
89 return;
91 cpuc->lbr_users--;
92 WARN_ON_ONCE(cpuc->lbr_users < 0);
94 if (cpuc->enabled && !cpuc->lbr_users)
95 __intel_pmu_lbr_disable();
98 static void intel_pmu_lbr_enable_all(void)
100 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
102 if (cpuc->lbr_users)
103 __intel_pmu_lbr_enable();
106 static void intel_pmu_lbr_disable_all(void)
108 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
110 if (cpuc->lbr_users)
111 __intel_pmu_lbr_disable();
114 static inline u64 intel_pmu_lbr_tos(void)
116 u64 tos;
118 rdmsrl(x86_pmu.lbr_tos, tos);
120 return tos;
123 static void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc)
125 unsigned long mask = x86_pmu.lbr_nr - 1;
126 u64 tos = intel_pmu_lbr_tos();
127 int i;
129 for (i = 0; i < x86_pmu.lbr_nr; i++) {
130 unsigned long lbr_idx = (tos - i) & mask;
131 union {
132 struct {
133 u32 from;
134 u32 to;
136 u64 lbr;
137 } msr_lastbranch;
139 rdmsrl(x86_pmu.lbr_from + lbr_idx, msr_lastbranch.lbr);
141 cpuc->lbr_entries[i].from = msr_lastbranch.from;
142 cpuc->lbr_entries[i].to = msr_lastbranch.to;
143 cpuc->lbr_entries[i].flags = 0;
145 cpuc->lbr_stack.nr = i;
148 #define LBR_FROM_FLAG_MISPRED (1ULL << 63)
151 * Due to lack of segmentation in Linux the effective address (offset)
152 * is the same as the linear address, allowing us to merge the LIP and EIP
153 * LBR formats.
155 static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
157 unsigned long mask = x86_pmu.lbr_nr - 1;
158 int lbr_format = x86_pmu.intel_cap.lbr_format;
159 u64 tos = intel_pmu_lbr_tos();
160 int i;
162 for (i = 0; i < x86_pmu.lbr_nr; i++) {
163 unsigned long lbr_idx = (tos - i) & mask;
164 u64 from, to, flags = 0;
166 rdmsrl(x86_pmu.lbr_from + lbr_idx, from);
167 rdmsrl(x86_pmu.lbr_to + lbr_idx, to);
169 if (lbr_format == LBR_FORMAT_EIP_FLAGS) {
170 flags = !!(from & LBR_FROM_FLAG_MISPRED);
171 from = (u64)((((s64)from) << 1) >> 1);
174 cpuc->lbr_entries[i].from = from;
175 cpuc->lbr_entries[i].to = to;
176 cpuc->lbr_entries[i].flags = flags;
178 cpuc->lbr_stack.nr = i;
181 static void intel_pmu_lbr_read(void)
183 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
185 if (!cpuc->lbr_users)
186 return;
188 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32)
189 intel_pmu_lbr_read_32(cpuc);
190 else
191 intel_pmu_lbr_read_64(cpuc);
194 static void intel_pmu_lbr_init_core(void)
196 x86_pmu.lbr_nr = 4;
197 x86_pmu.lbr_tos = 0x01c9;
198 x86_pmu.lbr_from = 0x40;
199 x86_pmu.lbr_to = 0x60;
202 static void intel_pmu_lbr_init_nhm(void)
204 x86_pmu.lbr_nr = 16;
205 x86_pmu.lbr_tos = 0x01c9;
206 x86_pmu.lbr_from = 0x680;
207 x86_pmu.lbr_to = 0x6c0;
210 static void intel_pmu_lbr_init_atom(void)
212 x86_pmu.lbr_nr = 8;
213 x86_pmu.lbr_tos = 0x01c9;
214 x86_pmu.lbr_from = 0x40;
215 x86_pmu.lbr_to = 0x60;
218 #endif /* CONFIG_CPU_SUP_INTEL */