3 * athlon / K7 / K8 / Family 10h model-specific MSR operations
5 * @remark Copyright 2002-2009 OProfile authors
6 * @remark Read the file COPYING
9 * @author Philippe Elie
10 * @author Graydon Hoare
11 * @author Robert Richter <robert.richter@amd.com>
12 * @author Barry Kasindorf <barry.kasindorf@amd.com>
13 * @author Jason Yeh <jason.yeh@amd.com>
14 * @author Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
17 #include <linux/oprofile.h>
18 #include <linux/device.h>
19 #include <linux/pci.h>
20 #include <linux/percpu.h>
22 #include <asm/ptrace.h>
27 #include "op_x86_model.h"
28 #include "op_counter.h"
30 #define NUM_COUNTERS 4
31 #define NUM_CONTROLS 4
32 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
33 #define NUM_VIRT_COUNTERS 32
34 #define NUM_VIRT_CONTROLS 32
36 #define NUM_VIRT_COUNTERS NUM_COUNTERS
37 #define NUM_VIRT_CONTROLS NUM_CONTROLS
40 #define OP_EVENT_MASK 0x0FFF
41 #define OP_CTR_OVERFLOW (1ULL<<31)
43 #define MSR_AMD_EVENTSEL_RESERVED ((0xFFFFFCF0ULL<<32)|(1ULL<<21))
45 static unsigned long reset_value
[NUM_VIRT_COUNTERS
];
47 /* IbsFetchCtl bits/masks */
48 #define IBS_FETCH_RAND_EN (1ULL<<57)
49 #define IBS_FETCH_VAL (1ULL<<49)
50 #define IBS_FETCH_ENABLE (1ULL<<48)
51 #define IBS_FETCH_CNT_MASK 0xFFFF0000ULL
54 #define IBS_OP_CNT_CTL (1ULL<<19)
55 #define IBS_OP_VAL (1ULL<<18)
56 #define IBS_OP_ENABLE (1ULL<<17)
58 #define IBS_FETCH_SIZE 6
59 #define IBS_OP_SIZE 12
61 static int has_ibs
; /* AMD Family10h and later */
63 struct op_ibs_config
{
64 unsigned long op_enabled
;
65 unsigned long fetch_enabled
;
66 unsigned long max_cnt_fetch
;
67 unsigned long max_cnt_op
;
68 unsigned long rand_en
;
69 unsigned long dispatched_ops
;
72 static struct op_ibs_config ibs_config
;
74 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
76 static void op_mux_fill_in_addresses(struct op_msrs
* const msrs
)
80 for (i
= 0; i
< NUM_VIRT_COUNTERS
; i
++) {
81 int hw_counter
= op_x86_virt_to_phys(i
);
82 if (reserve_perfctr_nmi(MSR_K7_PERFCTR0
+ i
))
83 msrs
->multiplex
[i
].addr
= MSR_K7_PERFCTR0
+ hw_counter
;
85 msrs
->multiplex
[i
].addr
= 0;
89 static void op_mux_switch_ctrl(struct op_x86_model_spec
const *model
,
90 struct op_msrs
const * const msrs
)
95 /* enable active counters */
96 for (i
= 0; i
< NUM_COUNTERS
; ++i
) {
97 int virt
= op_x86_phys_to_virt(i
);
98 if (!counter_config
[virt
].enabled
)
100 rdmsrl(msrs
->controls
[i
].addr
, val
);
101 val
&= model
->reserved
;
102 val
|= op_x86_get_ctrl(model
, &counter_config
[virt
]);
103 wrmsrl(msrs
->controls
[i
].addr
, val
);
109 static inline void op_mux_fill_in_addresses(struct op_msrs
* const msrs
) { }
113 /* functions for op_amd_spec */
115 static void op_amd_fill_in_addresses(struct op_msrs
* const msrs
)
119 for (i
= 0; i
< NUM_COUNTERS
; i
++) {
120 if (reserve_perfctr_nmi(MSR_K7_PERFCTR0
+ i
))
121 msrs
->counters
[i
].addr
= MSR_K7_PERFCTR0
+ i
;
123 msrs
->counters
[i
].addr
= 0;
126 for (i
= 0; i
< NUM_CONTROLS
; i
++) {
127 if (reserve_evntsel_nmi(MSR_K7_EVNTSEL0
+ i
))
128 msrs
->controls
[i
].addr
= MSR_K7_EVNTSEL0
+ i
;
130 msrs
->controls
[i
].addr
= 0;
133 op_mux_fill_in_addresses(msrs
);
136 static void op_amd_setup_ctrs(struct op_x86_model_spec
const *model
,
137 struct op_msrs
const * const msrs
)
142 /* setup reset_value */
143 for (i
= 0; i
< NUM_VIRT_COUNTERS
; ++i
) {
144 if (counter_config
[i
].enabled
)
145 reset_value
[i
] = counter_config
[i
].count
;
150 /* clear all counters */
151 for (i
= 0; i
< NUM_CONTROLS
; ++i
) {
152 if (unlikely(!msrs
->controls
[i
].addr
))
154 rdmsrl(msrs
->controls
[i
].addr
, val
);
155 val
&= model
->reserved
;
156 wrmsrl(msrs
->controls
[i
].addr
, val
);
159 /* avoid a false detection of ctr overflows in NMI handler */
160 for (i
= 0; i
< NUM_COUNTERS
; ++i
) {
161 if (unlikely(!msrs
->counters
[i
].addr
))
163 wrmsrl(msrs
->counters
[i
].addr
, -1LL);
166 /* enable active counters */
167 for (i
= 0; i
< NUM_COUNTERS
; ++i
) {
168 int virt
= op_x86_phys_to_virt(i
);
169 if (!counter_config
[virt
].enabled
)
171 if (!msrs
->counters
[i
].addr
)
174 /* setup counter registers */
175 wrmsrl(msrs
->counters
[i
].addr
, -(u64
)reset_value
[virt
]);
177 /* setup control registers */
178 rdmsrl(msrs
->controls
[i
].addr
, val
);
179 val
&= model
->reserved
;
180 val
|= op_x86_get_ctrl(model
, &counter_config
[virt
]);
181 wrmsrl(msrs
->controls
[i
].addr
, val
);
186 op_amd_handle_ibs(struct pt_regs
* const regs
,
187 struct op_msrs
const * const msrs
)
190 struct op_entry entry
;
195 if (ibs_config
.fetch_enabled
) {
196 rdmsrl(MSR_AMD64_IBSFETCHCTL
, ctl
);
197 if (ctl
& IBS_FETCH_VAL
) {
198 rdmsrl(MSR_AMD64_IBSFETCHLINAD
, val
);
199 oprofile_write_reserve(&entry
, regs
, val
,
200 IBS_FETCH_CODE
, IBS_FETCH_SIZE
);
201 oprofile_add_data64(&entry
, val
);
202 oprofile_add_data64(&entry
, ctl
);
203 rdmsrl(MSR_AMD64_IBSFETCHPHYSAD
, val
);
204 oprofile_add_data64(&entry
, val
);
205 oprofile_write_commit(&entry
);
207 /* reenable the IRQ */
208 ctl
&= ~(IBS_FETCH_VAL
| IBS_FETCH_CNT_MASK
);
209 ctl
|= IBS_FETCH_ENABLE
;
210 wrmsrl(MSR_AMD64_IBSFETCHCTL
, ctl
);
214 if (ibs_config
.op_enabled
) {
215 rdmsrl(MSR_AMD64_IBSOPCTL
, ctl
);
216 if (ctl
& IBS_OP_VAL
) {
217 rdmsrl(MSR_AMD64_IBSOPRIP
, val
);
218 oprofile_write_reserve(&entry
, regs
, val
,
219 IBS_OP_CODE
, IBS_OP_SIZE
);
220 oprofile_add_data64(&entry
, val
);
221 rdmsrl(MSR_AMD64_IBSOPDATA
, val
);
222 oprofile_add_data64(&entry
, val
);
223 rdmsrl(MSR_AMD64_IBSOPDATA2
, val
);
224 oprofile_add_data64(&entry
, val
);
225 rdmsrl(MSR_AMD64_IBSOPDATA3
, val
);
226 oprofile_add_data64(&entry
, val
);
227 rdmsrl(MSR_AMD64_IBSDCLINAD
, val
);
228 oprofile_add_data64(&entry
, val
);
229 rdmsrl(MSR_AMD64_IBSDCPHYSAD
, val
);
230 oprofile_add_data64(&entry
, val
);
231 oprofile_write_commit(&entry
);
233 /* reenable the IRQ */
234 ctl
&= ~IBS_OP_VAL
& 0xFFFFFFFF;
235 ctl
|= IBS_OP_ENABLE
;
236 wrmsrl(MSR_AMD64_IBSOPCTL
, ctl
);
241 static inline void op_amd_start_ibs(void)
244 if (has_ibs
&& ibs_config
.fetch_enabled
) {
245 val
= (ibs_config
.max_cnt_fetch
>> 4) & 0xFFFF;
246 val
|= ibs_config
.rand_en
? IBS_FETCH_RAND_EN
: 0;
247 val
|= IBS_FETCH_ENABLE
;
248 wrmsrl(MSR_AMD64_IBSFETCHCTL
, val
);
251 if (has_ibs
&& ibs_config
.op_enabled
) {
252 val
= (ibs_config
.max_cnt_op
>> 4) & 0xFFFF;
253 val
|= ibs_config
.dispatched_ops
? IBS_OP_CNT_CTL
: 0;
254 val
|= IBS_OP_ENABLE
;
255 wrmsrl(MSR_AMD64_IBSOPCTL
, val
);
259 static void op_amd_stop_ibs(void)
261 if (has_ibs
&& ibs_config
.fetch_enabled
)
262 /* clear max count and enable */
263 wrmsrl(MSR_AMD64_IBSFETCHCTL
, 0);
265 if (has_ibs
&& ibs_config
.op_enabled
)
266 /* clear max count and enable */
267 wrmsrl(MSR_AMD64_IBSOPCTL
, 0);
270 static int op_amd_check_ctrs(struct pt_regs
* const regs
,
271 struct op_msrs
const * const msrs
)
276 for (i
= 0; i
< NUM_COUNTERS
; ++i
) {
277 int virt
= op_x86_phys_to_virt(i
);
278 if (!reset_value
[virt
])
280 rdmsrl(msrs
->counters
[i
].addr
, val
);
281 /* bit is clear if overflowed: */
282 if (val
& OP_CTR_OVERFLOW
)
284 oprofile_add_sample(regs
, virt
);
285 wrmsrl(msrs
->counters
[i
].addr
, -(u64
)reset_value
[virt
]);
288 op_amd_handle_ibs(regs
, msrs
);
290 /* See op_model_ppro.c */
294 static void op_amd_start(struct op_msrs
const * const msrs
)
299 for (i
= 0; i
< NUM_COUNTERS
; ++i
) {
300 if (!reset_value
[op_x86_phys_to_virt(i
)])
302 rdmsrl(msrs
->controls
[i
].addr
, val
);
303 val
|= ARCH_PERFMON_EVENTSEL0_ENABLE
;
304 wrmsrl(msrs
->controls
[i
].addr
, val
);
310 static void op_amd_stop(struct op_msrs
const * const msrs
)
316 * Subtle: stop on all counters to avoid race with setting our
319 for (i
= 0; i
< NUM_COUNTERS
; ++i
) {
320 if (!reset_value
[op_x86_phys_to_virt(i
)])
322 rdmsrl(msrs
->controls
[i
].addr
, val
);
323 val
&= ~ARCH_PERFMON_EVENTSEL0_ENABLE
;
324 wrmsrl(msrs
->controls
[i
].addr
, val
);
330 static void op_amd_shutdown(struct op_msrs
const * const msrs
)
334 for (i
= 0; i
< NUM_COUNTERS
; ++i
) {
335 if (msrs
->counters
[i
].addr
)
336 release_perfctr_nmi(MSR_K7_PERFCTR0
+ i
);
338 for (i
= 0; i
< NUM_CONTROLS
; ++i
) {
339 if (msrs
->controls
[i
].addr
)
340 release_evntsel_nmi(MSR_K7_EVNTSEL0
+ i
);
344 static u8 ibs_eilvt_off
;
346 static inline void apic_init_ibs_nmi_per_cpu(void *arg
)
348 ibs_eilvt_off
= setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_NMI
, 0);
351 static inline void apic_clear_ibs_nmi_per_cpu(void *arg
)
353 setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_FIX
, 1);
356 static int init_ibs_nmi(void)
358 #define IBSCTL_LVTOFFSETVAL (1 << 8)
360 struct pci_dev
*cpu_cfg
;
365 on_each_cpu(apic_init_ibs_nmi_per_cpu
, NULL
, 1);
370 cpu_cfg
= pci_get_device(PCI_VENDOR_ID_AMD
,
371 PCI_DEVICE_ID_AMD_10H_NB_MISC
,
376 pci_write_config_dword(cpu_cfg
, IBSCTL
, ibs_eilvt_off
377 | IBSCTL_LVTOFFSETVAL
);
378 pci_read_config_dword(cpu_cfg
, IBSCTL
, &value
);
379 if (value
!= (ibs_eilvt_off
| IBSCTL_LVTOFFSETVAL
)) {
380 pci_dev_put(cpu_cfg
);
381 printk(KERN_DEBUG
"Failed to setup IBS LVT offset, "
382 "IBSCTL = 0x%08x", value
);
388 printk(KERN_DEBUG
"No CPU node configured for IBS");
394 /* Works only for 64bit with proper numa implementation. */
395 if (nodes
!= num_possible_nodes()) {
396 printk(KERN_DEBUG
"Failed to setup CPU node(s) for IBS, "
397 "found: %d, expected %d",
398 nodes
, num_possible_nodes());
405 /* uninitialize the APIC for the IBS interrupts if needed */
406 static void clear_ibs_nmi(void)
409 on_each_cpu(apic_clear_ibs_nmi_per_cpu
, NULL
, 1);
412 /* initialize the APIC for the IBS interrupts if available */
413 static void ibs_init(void)
415 has_ibs
= boot_cpu_has(X86_FEATURE_IBS
);
420 if (init_ibs_nmi()) {
425 printk(KERN_INFO
"oprofile: AMD IBS detected\n");
428 static void ibs_exit(void)
436 static int (*create_arch_files
)(struct super_block
*sb
, struct dentry
*root
);
438 static int setup_ibs_files(struct super_block
*sb
, struct dentry
*root
)
443 /* architecture specific files */
444 if (create_arch_files
)
445 ret
= create_arch_files(sb
, root
);
453 /* model specific files */
455 /* setup some reasonable defaults */
456 ibs_config
.max_cnt_fetch
= 250000;
457 ibs_config
.fetch_enabled
= 0;
458 ibs_config
.max_cnt_op
= 250000;
459 ibs_config
.op_enabled
= 0;
460 ibs_config
.dispatched_ops
= 1;
462 dir
= oprofilefs_mkdir(sb
, root
, "ibs_fetch");
463 oprofilefs_create_ulong(sb
, dir
, "enable",
464 &ibs_config
.fetch_enabled
);
465 oprofilefs_create_ulong(sb
, dir
, "max_count",
466 &ibs_config
.max_cnt_fetch
);
467 oprofilefs_create_ulong(sb
, dir
, "rand_enable",
468 &ibs_config
.rand_en
);
470 dir
= oprofilefs_mkdir(sb
, root
, "ibs_op");
471 oprofilefs_create_ulong(sb
, dir
, "enable",
472 &ibs_config
.op_enabled
);
473 oprofilefs_create_ulong(sb
, dir
, "max_count",
474 &ibs_config
.max_cnt_op
);
475 oprofilefs_create_ulong(sb
, dir
, "dispatched_ops",
476 &ibs_config
.dispatched_ops
);
481 static int op_amd_init(struct oprofile_operations
*ops
)
484 create_arch_files
= ops
->create_files
;
485 ops
->create_files
= setup_ibs_files
;
489 static void op_amd_exit(void)
494 struct op_x86_model_spec op_amd_spec
= {
495 .num_counters
= NUM_COUNTERS
,
496 .num_controls
= NUM_CONTROLS
,
497 .num_virt_counters
= NUM_VIRT_COUNTERS
,
498 .reserved
= MSR_AMD_EVENTSEL_RESERVED
,
499 .event_mask
= OP_EVENT_MASK
,
502 .fill_in_addresses
= &op_amd_fill_in_addresses
,
503 .setup_ctrs
= &op_amd_setup_ctrs
,
504 .check_ctrs
= &op_amd_check_ctrs
,
505 .start
= &op_amd_start
,
506 .stop
= &op_amd_stop
,
507 .shutdown
= &op_amd_shutdown
,
508 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
509 .switch_ctrl
= &op_mux_switch_ctrl
,