2 * @file op_model_ppro.h
3 * Family 6 perfmon and architectural perfmon MSR operations
5 * @remark Copyright 2002 OProfile authors
6 * @remark Copyright 2008 Intel Corporation
7 * @remark Read the file COPYING
10 * @author Philippe Elie
11 * @author Graydon Hoare
15 #include <linux/oprofile.h>
16 #include <linux/slab.h>
17 #include <asm/ptrace.h>
22 #include "op_x86_model.h"
23 #include "op_counter.h"
25 static int num_counters
= 2;
26 static int counter_width
= 32;
28 #define CTR_OVERFLOWED(n) (!((n) & (1ULL<<(counter_width-1))))
29 #define CTRL_CLEAR(x) (x &= (1<<21))
30 #define CTRL_SET_EVENT(val, e) (val |= e)
32 static u64
*reset_value
;
34 static void ppro_fill_in_addresses(struct op_msrs
* const msrs
)
38 for (i
= 0; i
< num_counters
; i
++) {
39 if (reserve_perfctr_nmi(MSR_P6_PERFCTR0
+ i
))
40 msrs
->counters
[i
].addr
= MSR_P6_PERFCTR0
+ i
;
42 msrs
->counters
[i
].addr
= 0;
45 for (i
= 0; i
< num_counters
; i
++) {
46 if (reserve_evntsel_nmi(MSR_P6_EVNTSEL0
+ i
))
47 msrs
->controls
[i
].addr
= MSR_P6_EVNTSEL0
+ i
;
49 msrs
->controls
[i
].addr
= 0;
54 static void ppro_setup_ctrs(struct op_x86_model_spec
const *model
,
55 struct op_msrs
const * const msrs
)
57 unsigned int low
, high
;
61 reset_value
= kmalloc(sizeof(reset_value
[0]) * num_counters
,
67 if (cpu_has_arch_perfmon
) {
68 union cpuid10_eax eax
;
69 eax
.full
= cpuid_eax(0xa);
72 * For Core2 (family 6, model 15), don't reset the
75 if (!(eax
.split
.version_id
== 0 &&
76 current_cpu_data
.x86
== 6 &&
77 current_cpu_data
.x86_model
== 15)) {
79 if (counter_width
< eax
.split
.bit_width
)
80 counter_width
= eax
.split
.bit_width
;
84 /* clear all counters */
85 for (i
= 0 ; i
< num_counters
; ++i
) {
86 if (unlikely(!CTRL_IS_RESERVED(msrs
, i
)))
88 rdmsr(msrs
->controls
[i
].addr
, low
, high
);
90 wrmsr(msrs
->controls
[i
].addr
, low
, high
);
93 /* avoid a false detection of ctr overflows in NMI handler */
94 for (i
= 0; i
< num_counters
; ++i
) {
95 if (unlikely(!CTR_IS_RESERVED(msrs
, i
)))
97 wrmsrl(msrs
->counters
[i
].addr
, -1LL);
100 /* enable active counters */
101 for (i
= 0; i
< num_counters
; ++i
) {
102 if ((counter_config
[i
].enabled
) && (CTR_IS_RESERVED(msrs
, i
))) {
103 reset_value
[i
] = counter_config
[i
].count
;
105 wrmsrl(msrs
->counters
[i
].addr
, -reset_value
[i
]);
107 rdmsr(msrs
->controls
[i
].addr
, low
, high
);
109 CTRL_SET_ENABLE(low
);
110 CTRL_SET_USR(low
, counter_config
[i
].user
);
111 CTRL_SET_KERN(low
, counter_config
[i
].kernel
);
112 CTRL_SET_UM(low
, counter_config
[i
].unit_mask
);
113 CTRL_SET_EVENT(low
, counter_config
[i
].event
);
114 wrmsr(msrs
->controls
[i
].addr
, low
, high
);
122 static int ppro_check_ctrs(struct pt_regs
* const regs
,
123 struct op_msrs
const * const msrs
)
128 for (i
= 0 ; i
< num_counters
; ++i
) {
131 rdmsrl(msrs
->counters
[i
].addr
, val
);
132 if (CTR_OVERFLOWED(val
)) {
133 oprofile_add_sample(regs
, i
);
134 wrmsrl(msrs
->counters
[i
].addr
, -reset_value
[i
]);
138 /* Only P6 based Pentium M need to re-unmask the apic vector but it
139 * doesn't hurt other P6 variant */
140 apic_write(APIC_LVTPC
, apic_read(APIC_LVTPC
) & ~APIC_LVT_MASKED
);
142 /* We can't work out if we really handled an interrupt. We
143 * might have caught a *second* counter just after overflowing
144 * the interrupt for this counter then arrives
145 * and we don't find a counter that's overflowed, so we
146 * would return 0 and get dazed + confused. Instead we always
147 * assume we found an overflow. This sucks.
153 static void ppro_start(struct op_msrs
const * const msrs
)
155 unsigned int low
, high
;
160 for (i
= 0; i
< num_counters
; ++i
) {
161 if (reset_value
[i
]) {
162 rdmsr(msrs
->controls
[i
].addr
, low
, high
);
163 CTRL_SET_ACTIVE(low
);
164 wrmsr(msrs
->controls
[i
].addr
, low
, high
);
170 static void ppro_stop(struct op_msrs
const * const msrs
)
172 unsigned int low
, high
;
177 for (i
= 0; i
< num_counters
; ++i
) {
180 rdmsr(msrs
->controls
[i
].addr
, low
, high
);
181 CTRL_SET_INACTIVE(low
);
182 wrmsr(msrs
->controls
[i
].addr
, low
, high
);
186 static void ppro_shutdown(struct op_msrs
const * const msrs
)
190 for (i
= 0 ; i
< num_counters
; ++i
) {
191 if (CTR_IS_RESERVED(msrs
, i
))
192 release_perfctr_nmi(MSR_P6_PERFCTR0
+ i
);
194 for (i
= 0 ; i
< num_counters
; ++i
) {
195 if (CTRL_IS_RESERVED(msrs
, i
))
196 release_evntsel_nmi(MSR_P6_EVNTSEL0
+ i
);
205 struct op_x86_model_spec
const op_ppro_spec
= {
208 .fill_in_addresses
= &ppro_fill_in_addresses
,
209 .setup_ctrs
= &ppro_setup_ctrs
,
210 .check_ctrs
= &ppro_check_ctrs
,
211 .start
= &ppro_start
,
213 .shutdown
= &ppro_shutdown
217 * Architectural performance monitoring.
219 * Newer Intel CPUs (Core1+) have support for architectural
220 * events described in CPUID 0xA. See the IA32 SDM Vol3b.18 for details.
221 * The advantage of this is that it can be done without knowing about
225 static void arch_perfmon_setup_counters(void)
227 union cpuid10_eax eax
;
229 eax
.full
= cpuid_eax(0xa);
231 /* Workaround for BIOS bugs in 6/15. Taken from perfmon2 */
232 if (eax
.split
.version_id
== 0 && current_cpu_data
.x86
== 6 &&
233 current_cpu_data
.x86_model
== 15) {
234 eax
.split
.version_id
= 2;
235 eax
.split
.num_counters
= 2;
236 eax
.split
.bit_width
= 40;
239 num_counters
= eax
.split
.num_counters
;
241 op_arch_perfmon_spec
.num_counters
= num_counters
;
242 op_arch_perfmon_spec
.num_controls
= num_counters
;
245 static int arch_perfmon_init(struct oprofile_operations
*ignore
)
247 arch_perfmon_setup_counters();
251 struct op_x86_model_spec op_arch_perfmon_spec
= {
252 .init
= &arch_perfmon_init
,
253 /* num_counters/num_controls filled in at runtime */
254 .fill_in_addresses
= &ppro_fill_in_addresses
,
255 /* user space does the cpuid check for available events */
256 .setup_ctrs
= &ppro_setup_ctrs
,
257 .check_ctrs
= &ppro_check_ctrs
,
258 .start
= &ppro_start
,
260 .shutdown
= &ppro_shutdown