2 * @file op_model_ppro.h
3 * Family 6 perfmon and architectural perfmon MSR operations
5 * @remark Copyright 2002 OProfile authors
6 * @remark Copyright 2008 Intel Corporation
7 * @remark Read the file COPYING
10 * @author Philippe Elie
11 * @author Graydon Hoare
13 * @author Robert Richter <robert.richter@amd.com>
16 #include <linux/oprofile.h>
17 #include <linux/slab.h>
18 #include <asm/ptrace.h>
23 #include "op_x86_model.h"
24 #include "op_counter.h"
26 static int num_counters
= 2;
27 static int counter_width
= 32;
29 #define MSR_PPRO_EVENTSEL_RESERVED ((0xFFFFFFFFULL<<32)|(1ULL<<21))
31 static u64
*reset_value
;
33 static void ppro_fill_in_addresses(struct op_msrs
* const msrs
)
37 for (i
= 0; i
< num_counters
; i
++) {
38 if (reserve_perfctr_nmi(MSR_P6_PERFCTR0
+ i
))
39 msrs
->counters
[i
].addr
= MSR_P6_PERFCTR0
+ i
;
42 for (i
= 0; i
< num_counters
; i
++) {
43 if (reserve_evntsel_nmi(MSR_P6_EVNTSEL0
+ i
))
44 msrs
->controls
[i
].addr
= MSR_P6_EVNTSEL0
+ i
;
49 static void ppro_setup_ctrs(struct op_x86_model_spec
const *model
,
50 struct op_msrs
const * const msrs
)
56 reset_value
= kzalloc(sizeof(reset_value
[0]) * num_counters
,
62 if (cpu_has_arch_perfmon
) {
63 union cpuid10_eax eax
;
64 eax
.full
= cpuid_eax(0xa);
67 * For Core2 (family 6, model 15), don't reset the
70 if (!(eax
.split
.version_id
== 0 &&
71 current_cpu_data
.x86
== 6 &&
72 current_cpu_data
.x86_model
== 15)) {
74 if (counter_width
< eax
.split
.bit_width
)
75 counter_width
= eax
.split
.bit_width
;
79 /* clear all counters */
80 for (i
= 0; i
< num_counters
; ++i
) {
81 if (unlikely(!msrs
->controls
[i
].addr
)) {
82 if (counter_config
[i
].enabled
&& !smp_processor_id())
84 * counter is reserved, this is on all
85 * cpus, so report only for cpu #0
87 op_x86_warn_reserved(i
);
90 rdmsrl(msrs
->controls
[i
].addr
, val
);
91 if (val
& ARCH_PERFMON_EVENTSEL0_ENABLE
)
92 op_x86_warn_in_use(i
);
93 val
&= model
->reserved
;
94 wrmsrl(msrs
->controls
[i
].addr
, val
);
97 /* avoid a false detection of ctr overflows in NMI handler */
98 for (i
= 0; i
< num_counters
; ++i
) {
99 if (unlikely(!msrs
->counters
[i
].addr
))
101 wrmsrl(msrs
->counters
[i
].addr
, -1LL);
104 /* enable active counters */
105 for (i
= 0; i
< num_counters
; ++i
) {
106 if (counter_config
[i
].enabled
&& msrs
->counters
[i
].addr
) {
107 reset_value
[i
] = counter_config
[i
].count
;
108 wrmsrl(msrs
->counters
[i
].addr
, -reset_value
[i
]);
109 rdmsrl(msrs
->controls
[i
].addr
, val
);
110 val
&= model
->reserved
;
111 val
|= op_x86_get_ctrl(model
, &counter_config
[i
]);
112 wrmsrl(msrs
->controls
[i
].addr
, val
);
120 static int ppro_check_ctrs(struct pt_regs
* const regs
,
121 struct op_msrs
const * const msrs
)
127 * This can happen if perf counters are in use when
128 * we steal the die notifier NMI.
130 if (unlikely(!reset_value
))
133 for (i
= 0; i
< num_counters
; ++i
) {
136 rdmsrl(msrs
->counters
[i
].addr
, val
);
137 if (val
& (1ULL << (counter_width
- 1)))
139 oprofile_add_sample(regs
, i
);
140 wrmsrl(msrs
->counters
[i
].addr
, -reset_value
[i
]);
144 /* Only P6 based Pentium M need to re-unmask the apic vector but it
145 * doesn't hurt other P6 variant */
146 apic_write(APIC_LVTPC
, apic_read(APIC_LVTPC
) & ~APIC_LVT_MASKED
);
148 /* We can't work out if we really handled an interrupt. We
149 * might have caught a *second* counter just after overflowing
150 * the interrupt for this counter then arrives
151 * and we don't find a counter that's overflowed, so we
152 * would return 0 and get dazed + confused. Instead we always
153 * assume we found an overflow. This sucks.
159 static void ppro_start(struct op_msrs
const * const msrs
)
166 for (i
= 0; i
< num_counters
; ++i
) {
167 if (reset_value
[i
]) {
168 rdmsrl(msrs
->controls
[i
].addr
, val
);
169 val
|= ARCH_PERFMON_EVENTSEL0_ENABLE
;
170 wrmsrl(msrs
->controls
[i
].addr
, val
);
176 static void ppro_stop(struct op_msrs
const * const msrs
)
183 for (i
= 0; i
< num_counters
; ++i
) {
186 rdmsrl(msrs
->controls
[i
].addr
, val
);
187 val
&= ~ARCH_PERFMON_EVENTSEL0_ENABLE
;
188 wrmsrl(msrs
->controls
[i
].addr
, val
);
192 static void ppro_shutdown(struct op_msrs
const * const msrs
)
196 for (i
= 0; i
< num_counters
; ++i
) {
197 if (msrs
->counters
[i
].addr
)
198 release_perfctr_nmi(MSR_P6_PERFCTR0
+ i
);
200 for (i
= 0; i
< num_counters
; ++i
) {
201 if (msrs
->controls
[i
].addr
)
202 release_evntsel_nmi(MSR_P6_EVNTSEL0
+ i
);
211 struct op_x86_model_spec op_ppro_spec
= {
214 .reserved
= MSR_PPRO_EVENTSEL_RESERVED
,
215 .fill_in_addresses
= &ppro_fill_in_addresses
,
216 .setup_ctrs
= &ppro_setup_ctrs
,
217 .check_ctrs
= &ppro_check_ctrs
,
218 .start
= &ppro_start
,
220 .shutdown
= &ppro_shutdown
224 * Architectural performance monitoring.
226 * Newer Intel CPUs (Core1+) have support for architectural
227 * events described in CPUID 0xA. See the IA32 SDM Vol3b.18 for details.
228 * The advantage of this is that it can be done without knowing about
232 static void arch_perfmon_setup_counters(void)
234 union cpuid10_eax eax
;
236 eax
.full
= cpuid_eax(0xa);
238 /* Workaround for BIOS bugs in 6/15. Taken from perfmon2 */
239 if (eax
.split
.version_id
== 0 && current_cpu_data
.x86
== 6 &&
240 current_cpu_data
.x86_model
== 15) {
241 eax
.split
.version_id
= 2;
242 eax
.split
.num_events
= 2;
243 eax
.split
.bit_width
= 40;
246 num_counters
= eax
.split
.num_events
;
248 op_arch_perfmon_spec
.num_counters
= num_counters
;
249 op_arch_perfmon_spec
.num_controls
= num_counters
;
252 static int arch_perfmon_init(struct oprofile_operations
*ignore
)
254 arch_perfmon_setup_counters();
258 struct op_x86_model_spec op_arch_perfmon_spec
= {
259 .reserved
= MSR_PPRO_EVENTSEL_RESERVED
,
260 .init
= &arch_perfmon_init
,
261 /* num_counters/num_controls filled in at runtime */
262 .fill_in_addresses
= &ppro_fill_in_addresses
,
263 /* user space does the cpuid check for available events */
264 .setup_ctrs
= &ppro_setup_ctrs
,
265 .check_ctrs
= &ppro_check_ctrs
,
266 .start
= &ppro_start
,
268 .shutdown
= &ppro_shutdown