2 * @file op_model_ppro.h
3 * Family 6 perfmon and architectural perfmon MSR operations
5 * @remark Copyright 2002 OProfile authors
6 * @remark Copyright 2008 Intel Corporation
7 * @remark Read the file COPYING
10 * @author Philippe Elie
11 * @author Graydon Hoare
13 * @author Robert Richter <robert.richter@amd.com>
16 #include <linux/oprofile.h>
17 #include <linux/slab.h>
18 #include <asm/ptrace.h>
23 #include "op_x86_model.h"
24 #include "op_counter.h"
26 static int num_counters
= 2;
27 static int counter_width
= 32;
29 #define MSR_PPRO_EVENTSEL_RESERVED ((0xFFFFFFFFULL<<32)|(1ULL<<21))
31 static u64
*reset_value
;
33 static void ppro_fill_in_addresses(struct op_msrs
* const msrs
)
37 for (i
= 0; i
< num_counters
; i
++) {
38 if (reserve_perfctr_nmi(MSR_P6_PERFCTR0
+ i
))
39 msrs
->counters
[i
].addr
= MSR_P6_PERFCTR0
+ i
;
42 for (i
= 0; i
< num_counters
; i
++) {
43 if (reserve_evntsel_nmi(MSR_P6_EVNTSEL0
+ i
))
44 msrs
->controls
[i
].addr
= MSR_P6_EVNTSEL0
+ i
;
49 static void ppro_setup_ctrs(struct op_x86_model_spec
const *model
,
50 struct op_msrs
const * const msrs
)
56 reset_value
= kzalloc(sizeof(reset_value
[0]) * num_counters
,
62 if (cpu_has_arch_perfmon
) {
63 union cpuid10_eax eax
;
64 eax
.full
= cpuid_eax(0xa);
67 * For Core2 (family 6, model 15), don't reset the
70 if (!(eax
.split
.version_id
== 0 &&
71 current_cpu_data
.x86
== 6 &&
72 current_cpu_data
.x86_model
== 15)) {
74 if (counter_width
< eax
.split
.bit_width
)
75 counter_width
= eax
.split
.bit_width
;
79 /* clear all counters */
80 for (i
= 0; i
< num_counters
; ++i
) {
81 if (unlikely(!msrs
->controls
[i
].addr
))
83 rdmsrl(msrs
->controls
[i
].addr
, val
);
84 val
&= model
->reserved
;
85 wrmsrl(msrs
->controls
[i
].addr
, val
);
88 /* avoid a false detection of ctr overflows in NMI handler */
89 for (i
= 0; i
< num_counters
; ++i
) {
90 if (unlikely(!msrs
->counters
[i
].addr
))
92 wrmsrl(msrs
->counters
[i
].addr
, -1LL);
95 /* enable active counters */
96 for (i
= 0; i
< num_counters
; ++i
) {
97 if (counter_config
[i
].enabled
&& msrs
->counters
[i
].addr
) {
98 reset_value
[i
] = counter_config
[i
].count
;
99 wrmsrl(msrs
->counters
[i
].addr
, -reset_value
[i
]);
100 rdmsrl(msrs
->controls
[i
].addr
, val
);
101 val
&= model
->reserved
;
102 val
|= op_x86_get_ctrl(model
, &counter_config
[i
]);
103 wrmsrl(msrs
->controls
[i
].addr
, val
);
111 static int ppro_check_ctrs(struct pt_regs
* const regs
,
112 struct op_msrs
const * const msrs
)
118 * This can happen if perf counters are in use when
119 * we steal the die notifier NMI.
121 if (unlikely(!reset_value
))
124 for (i
= 0; i
< num_counters
; ++i
) {
127 rdmsrl(msrs
->counters
[i
].addr
, val
);
128 if (val
& (1ULL << (counter_width
- 1)))
130 oprofile_add_sample(regs
, i
);
131 wrmsrl(msrs
->counters
[i
].addr
, -reset_value
[i
]);
135 /* Only P6 based Pentium M need to re-unmask the apic vector but it
136 * doesn't hurt other P6 variant */
137 apic_write(APIC_LVTPC
, apic_read(APIC_LVTPC
) & ~APIC_LVT_MASKED
);
139 /* We can't work out if we really handled an interrupt. We
140 * might have caught a *second* counter just after overflowing
141 * the interrupt for this counter then arrives
142 * and we don't find a counter that's overflowed, so we
143 * would return 0 and get dazed + confused. Instead we always
144 * assume we found an overflow. This sucks.
150 static void ppro_start(struct op_msrs
const * const msrs
)
157 for (i
= 0; i
< num_counters
; ++i
) {
158 if (reset_value
[i
]) {
159 rdmsrl(msrs
->controls
[i
].addr
, val
);
160 val
|= ARCH_PERFMON_EVENTSEL0_ENABLE
;
161 wrmsrl(msrs
->controls
[i
].addr
, val
);
167 static void ppro_stop(struct op_msrs
const * const msrs
)
174 for (i
= 0; i
< num_counters
; ++i
) {
177 rdmsrl(msrs
->controls
[i
].addr
, val
);
178 val
&= ~ARCH_PERFMON_EVENTSEL0_ENABLE
;
179 wrmsrl(msrs
->controls
[i
].addr
, val
);
183 static void ppro_shutdown(struct op_msrs
const * const msrs
)
187 for (i
= 0; i
< num_counters
; ++i
) {
188 if (msrs
->counters
[i
].addr
)
189 release_perfctr_nmi(MSR_P6_PERFCTR0
+ i
);
191 for (i
= 0; i
< num_counters
; ++i
) {
192 if (msrs
->controls
[i
].addr
)
193 release_evntsel_nmi(MSR_P6_EVNTSEL0
+ i
);
202 struct op_x86_model_spec op_ppro_spec
= {
205 .reserved
= MSR_PPRO_EVENTSEL_RESERVED
,
206 .fill_in_addresses
= &ppro_fill_in_addresses
,
207 .setup_ctrs
= &ppro_setup_ctrs
,
208 .check_ctrs
= &ppro_check_ctrs
,
209 .start
= &ppro_start
,
211 .shutdown
= &ppro_shutdown
215 * Architectural performance monitoring.
217 * Newer Intel CPUs (Core1+) have support for architectural
218 * events described in CPUID 0xA. See the IA32 SDM Vol3b.18 for details.
219 * The advantage of this is that it can be done without knowing about
223 static void arch_perfmon_setup_counters(void)
225 union cpuid10_eax eax
;
227 eax
.full
= cpuid_eax(0xa);
229 /* Workaround for BIOS bugs in 6/15. Taken from perfmon2 */
230 if (eax
.split
.version_id
== 0 && current_cpu_data
.x86
== 6 &&
231 current_cpu_data
.x86_model
== 15) {
232 eax
.split
.version_id
= 2;
233 eax
.split
.num_events
= 2;
234 eax
.split
.bit_width
= 40;
237 num_counters
= eax
.split
.num_events
;
239 op_arch_perfmon_spec
.num_counters
= num_counters
;
240 op_arch_perfmon_spec
.num_controls
= num_counters
;
243 static int arch_perfmon_init(struct oprofile_operations
*ignore
)
245 arch_perfmon_setup_counters();
249 struct op_x86_model_spec op_arch_perfmon_spec
= {
250 .reserved
= MSR_PPRO_EVENTSEL_RESERVED
,
251 .init
= &arch_perfmon_init
,
252 /* num_counters/num_controls filled in at runtime */
253 .fill_in_addresses
= &ppro_fill_in_addresses
,
254 /* user space does the cpuid check for available events */
255 .setup_ctrs
= &ppro_setup_ctrs
,
256 .check_ctrs
= &ppro_check_ctrs
,
257 .start
= &ppro_start
,
259 .shutdown
= &ppro_shutdown