3 * athlon / K7 / K8 / Family 10h model-specific MSR operations
5 * @remark Copyright 2002-2009 OProfile authors
6 * @remark Read the file COPYING
9 * @author Philippe Elie
10 * @author Graydon Hoare
11 * @author Robert Richter <robert.richter@amd.com>
12 * @author Barry Kasindorf <barry.kasindorf@amd.com>
13 * @author Jason Yeh <jason.yeh@amd.com>
14 * @author Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
17 #include <linux/oprofile.h>
18 #include <linux/device.h>
19 #include <linux/pci.h>
20 #include <linux/percpu.h>
22 #include <asm/ptrace.h>
26 #include "op_x86_model.h"
27 #include "op_counter.h"
29 #define NUM_COUNTERS 4
30 #define NUM_CONTROLS 4
31 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
32 #define NUM_VIRT_COUNTERS 32
33 #define NUM_VIRT_CONTROLS 32
35 #define NUM_VIRT_COUNTERS NUM_COUNTERS
36 #define NUM_VIRT_CONTROLS NUM_CONTROLS
39 #define OP_EVENT_MASK 0x0FFF
40 #define OP_CTR_OVERFLOW (1ULL<<31)
42 #define MSR_AMD_EVENTSEL_RESERVED ((0xFFFFFCF0ULL<<32)|(1ULL<<21))
44 static unsigned long reset_value
[NUM_VIRT_COUNTERS
];
46 #ifdef CONFIG_OPROFILE_IBS
48 /* IbsFetchCtl bits/masks */
49 #define IBS_FETCH_RAND_EN (1ULL<<57)
50 #define IBS_FETCH_VAL (1ULL<<49)
51 #define IBS_FETCH_ENABLE (1ULL<<48)
52 #define IBS_FETCH_CNT_MASK 0xFFFF0000ULL
55 #define IBS_OP_CNT_CTL (1ULL<<19)
56 #define IBS_OP_VAL (1ULL<<18)
57 #define IBS_OP_ENABLE (1ULL<<17)
59 #define IBS_FETCH_SIZE 6
60 #define IBS_OP_SIZE 12
62 static int has_ibs
; /* AMD Family10h and later */
64 struct op_ibs_config
{
65 unsigned long op_enabled
;
66 unsigned long fetch_enabled
;
67 unsigned long max_cnt_fetch
;
68 unsigned long max_cnt_op
;
69 unsigned long rand_en
;
70 unsigned long dispatched_ops
;
73 static struct op_ibs_config ibs_config
;
77 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
79 static void op_mux_switch_ctrl(struct op_x86_model_spec
const *model
,
80 struct op_msrs
const * const msrs
)
85 /* enable active counters */
86 for (i
= 0; i
< NUM_COUNTERS
; ++i
) {
87 int virt
= op_x86_phys_to_virt(i
);
88 if (!counter_config
[virt
].enabled
)
90 rdmsrl(msrs
->controls
[i
].addr
, val
);
91 val
&= model
->reserved
;
92 val
|= op_x86_get_ctrl(model
, &counter_config
[virt
]);
93 wrmsrl(msrs
->controls
[i
].addr
, val
);
99 /* functions for op_amd_spec */
101 static void op_amd_fill_in_addresses(struct op_msrs
* const msrs
)
105 for (i
= 0; i
< NUM_COUNTERS
; i
++) {
106 if (reserve_perfctr_nmi(MSR_K7_PERFCTR0
+ i
))
107 msrs
->counters
[i
].addr
= MSR_K7_PERFCTR0
+ i
;
109 msrs
->counters
[i
].addr
= 0;
112 for (i
= 0; i
< NUM_CONTROLS
; i
++) {
113 if (reserve_evntsel_nmi(MSR_K7_EVNTSEL0
+ i
))
114 msrs
->controls
[i
].addr
= MSR_K7_EVNTSEL0
+ i
;
116 msrs
->controls
[i
].addr
= 0;
120 static void op_amd_setup_ctrs(struct op_x86_model_spec
const *model
,
121 struct op_msrs
const * const msrs
)
126 /* setup reset_value */
127 for (i
= 0; i
< NUM_VIRT_COUNTERS
; ++i
) {
128 if (counter_config
[i
].enabled
)
129 reset_value
[i
] = counter_config
[i
].count
;
134 /* clear all counters */
135 for (i
= 0; i
< NUM_CONTROLS
; ++i
) {
136 if (unlikely(!msrs
->controls
[i
].addr
))
138 rdmsrl(msrs
->controls
[i
].addr
, val
);
139 val
&= model
->reserved
;
140 wrmsrl(msrs
->controls
[i
].addr
, val
);
143 /* avoid a false detection of ctr overflows in NMI handler */
144 for (i
= 0; i
< NUM_COUNTERS
; ++i
) {
145 if (unlikely(!msrs
->counters
[i
].addr
))
147 wrmsrl(msrs
->counters
[i
].addr
, -1LL);
150 /* enable active counters */
151 for (i
= 0; i
< NUM_COUNTERS
; ++i
) {
152 int virt
= op_x86_phys_to_virt(i
);
153 if (!counter_config
[virt
].enabled
)
155 if (!msrs
->counters
[i
].addr
)
158 /* setup counter registers */
159 wrmsrl(msrs
->counters
[i
].addr
, -(u64
)reset_value
[virt
]);
161 /* setup control registers */
162 rdmsrl(msrs
->controls
[i
].addr
, val
);
163 val
&= model
->reserved
;
164 val
|= op_x86_get_ctrl(model
, &counter_config
[virt
]);
165 wrmsrl(msrs
->controls
[i
].addr
, val
);
169 #ifdef CONFIG_OPROFILE_IBS
172 op_amd_handle_ibs(struct pt_regs
* const regs
,
173 struct op_msrs
const * const msrs
)
176 struct op_entry entry
;
181 if (ibs_config
.fetch_enabled
) {
182 rdmsrl(MSR_AMD64_IBSFETCHCTL
, ctl
);
183 if (ctl
& IBS_FETCH_VAL
) {
184 rdmsrl(MSR_AMD64_IBSFETCHLINAD
, val
);
185 oprofile_write_reserve(&entry
, regs
, val
,
186 IBS_FETCH_CODE
, IBS_FETCH_SIZE
);
187 oprofile_add_data64(&entry
, val
);
188 oprofile_add_data64(&entry
, ctl
);
189 rdmsrl(MSR_AMD64_IBSFETCHPHYSAD
, val
);
190 oprofile_add_data64(&entry
, val
);
191 oprofile_write_commit(&entry
);
193 /* reenable the IRQ */
194 ctl
&= ~(IBS_FETCH_VAL
| IBS_FETCH_CNT_MASK
);
195 ctl
|= IBS_FETCH_ENABLE
;
196 wrmsrl(MSR_AMD64_IBSFETCHCTL
, ctl
);
200 if (ibs_config
.op_enabled
) {
201 rdmsrl(MSR_AMD64_IBSOPCTL
, ctl
);
202 if (ctl
& IBS_OP_VAL
) {
203 rdmsrl(MSR_AMD64_IBSOPRIP
, val
);
204 oprofile_write_reserve(&entry
, regs
, val
,
205 IBS_OP_CODE
, IBS_OP_SIZE
);
206 oprofile_add_data64(&entry
, val
);
207 rdmsrl(MSR_AMD64_IBSOPDATA
, val
);
208 oprofile_add_data64(&entry
, val
);
209 rdmsrl(MSR_AMD64_IBSOPDATA2
, val
);
210 oprofile_add_data64(&entry
, val
);
211 rdmsrl(MSR_AMD64_IBSOPDATA3
, val
);
212 oprofile_add_data64(&entry
, val
);
213 rdmsrl(MSR_AMD64_IBSDCLINAD
, val
);
214 oprofile_add_data64(&entry
, val
);
215 rdmsrl(MSR_AMD64_IBSDCPHYSAD
, val
);
216 oprofile_add_data64(&entry
, val
);
217 oprofile_write_commit(&entry
);
219 /* reenable the IRQ */
220 ctl
&= ~IBS_OP_VAL
& 0xFFFFFFFF;
221 ctl
|= IBS_OP_ENABLE
;
222 wrmsrl(MSR_AMD64_IBSOPCTL
, ctl
);
227 static inline void op_amd_start_ibs(void)
230 if (has_ibs
&& ibs_config
.fetch_enabled
) {
231 val
= (ibs_config
.max_cnt_fetch
>> 4) & 0xFFFF;
232 val
|= ibs_config
.rand_en
? IBS_FETCH_RAND_EN
: 0;
233 val
|= IBS_FETCH_ENABLE
;
234 wrmsrl(MSR_AMD64_IBSFETCHCTL
, val
);
237 if (has_ibs
&& ibs_config
.op_enabled
) {
238 val
= (ibs_config
.max_cnt_op
>> 4) & 0xFFFF;
239 val
|= ibs_config
.dispatched_ops
? IBS_OP_CNT_CTL
: 0;
240 val
|= IBS_OP_ENABLE
;
241 wrmsrl(MSR_AMD64_IBSOPCTL
, val
);
245 static void op_amd_stop_ibs(void)
247 if (has_ibs
&& ibs_config
.fetch_enabled
)
248 /* clear max count and enable */
249 wrmsrl(MSR_AMD64_IBSFETCHCTL
, 0);
251 if (has_ibs
&& ibs_config
.op_enabled
)
252 /* clear max count and enable */
253 wrmsrl(MSR_AMD64_IBSOPCTL
, 0);
258 static inline void op_amd_handle_ibs(struct pt_regs
* const regs
,
259 struct op_msrs
const * const msrs
) { }
260 static inline void op_amd_start_ibs(void) { }
261 static inline void op_amd_stop_ibs(void) { }
265 static int op_amd_check_ctrs(struct pt_regs
* const regs
,
266 struct op_msrs
const * const msrs
)
271 for (i
= 0; i
< NUM_COUNTERS
; ++i
) {
272 int virt
= op_x86_phys_to_virt(i
);
273 if (!reset_value
[virt
])
275 rdmsrl(msrs
->counters
[i
].addr
, val
);
276 /* bit is clear if overflowed: */
277 if (val
& OP_CTR_OVERFLOW
)
279 oprofile_add_sample(regs
, virt
);
280 wrmsrl(msrs
->counters
[i
].addr
, -(u64
)reset_value
[virt
]);
283 op_amd_handle_ibs(regs
, msrs
);
285 /* See op_model_ppro.c */
289 static void op_amd_start(struct op_msrs
const * const msrs
)
294 for (i
= 0; i
< NUM_COUNTERS
; ++i
) {
295 if (!reset_value
[op_x86_phys_to_virt(i
)])
297 rdmsrl(msrs
->controls
[i
].addr
, val
);
298 val
|= ARCH_PERFMON_EVENTSEL0_ENABLE
;
299 wrmsrl(msrs
->controls
[i
].addr
, val
);
305 static void op_amd_stop(struct op_msrs
const * const msrs
)
311 * Subtle: stop on all counters to avoid race with setting our
314 for (i
= 0; i
< NUM_COUNTERS
; ++i
) {
315 if (!reset_value
[op_x86_phys_to_virt(i
)])
317 rdmsrl(msrs
->controls
[i
].addr
, val
);
318 val
&= ~ARCH_PERFMON_EVENTSEL0_ENABLE
;
319 wrmsrl(msrs
->controls
[i
].addr
, val
);
325 static void op_amd_shutdown(struct op_msrs
const * const msrs
)
329 for (i
= 0; i
< NUM_COUNTERS
; ++i
) {
330 if (msrs
->counters
[i
].addr
)
331 release_perfctr_nmi(MSR_K7_PERFCTR0
+ i
);
333 for (i
= 0; i
< NUM_CONTROLS
; ++i
) {
334 if (msrs
->controls
[i
].addr
)
335 release_evntsel_nmi(MSR_K7_EVNTSEL0
+ i
);
339 #ifdef CONFIG_OPROFILE_IBS
341 static u8 ibs_eilvt_off
;
343 static inline void apic_init_ibs_nmi_per_cpu(void *arg
)
345 ibs_eilvt_off
= setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_NMI
, 0);
348 static inline void apic_clear_ibs_nmi_per_cpu(void *arg
)
350 setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_FIX
, 1);
353 static int init_ibs_nmi(void)
355 #define IBSCTL_LVTOFFSETVAL (1 << 8)
357 struct pci_dev
*cpu_cfg
;
362 on_each_cpu(apic_init_ibs_nmi_per_cpu
, NULL
, 1);
367 cpu_cfg
= pci_get_device(PCI_VENDOR_ID_AMD
,
368 PCI_DEVICE_ID_AMD_10H_NB_MISC
,
373 pci_write_config_dword(cpu_cfg
, IBSCTL
, ibs_eilvt_off
374 | IBSCTL_LVTOFFSETVAL
);
375 pci_read_config_dword(cpu_cfg
, IBSCTL
, &value
);
376 if (value
!= (ibs_eilvt_off
| IBSCTL_LVTOFFSETVAL
)) {
377 pci_dev_put(cpu_cfg
);
378 printk(KERN_DEBUG
"Failed to setup IBS LVT offset, "
379 "IBSCTL = 0x%08x", value
);
385 printk(KERN_DEBUG
"No CPU node configured for IBS");
391 /* Works only for 64bit with proper numa implementation. */
392 if (nodes
!= num_possible_nodes()) {
393 printk(KERN_DEBUG
"Failed to setup CPU node(s) for IBS, "
394 "found: %d, expected %d",
395 nodes
, num_possible_nodes());
402 /* uninitialize the APIC for the IBS interrupts if needed */
403 static void clear_ibs_nmi(void)
406 on_each_cpu(apic_clear_ibs_nmi_per_cpu
, NULL
, 1);
409 /* initialize the APIC for the IBS interrupts if available */
410 static void ibs_init(void)
412 has_ibs
= boot_cpu_has(X86_FEATURE_IBS
);
417 if (init_ibs_nmi()) {
422 printk(KERN_INFO
"oprofile: AMD IBS detected\n");
425 static void ibs_exit(void)
433 static int (*create_arch_files
)(struct super_block
*sb
, struct dentry
*root
);
435 static int setup_ibs_files(struct super_block
*sb
, struct dentry
*root
)
440 /* architecture specific files */
441 if (create_arch_files
)
442 ret
= create_arch_files(sb
, root
);
450 /* model specific files */
452 /* setup some reasonable defaults */
453 ibs_config
.max_cnt_fetch
= 250000;
454 ibs_config
.fetch_enabled
= 0;
455 ibs_config
.max_cnt_op
= 250000;
456 ibs_config
.op_enabled
= 0;
457 ibs_config
.dispatched_ops
= 1;
459 dir
= oprofilefs_mkdir(sb
, root
, "ibs_fetch");
460 oprofilefs_create_ulong(sb
, dir
, "enable",
461 &ibs_config
.fetch_enabled
);
462 oprofilefs_create_ulong(sb
, dir
, "max_count",
463 &ibs_config
.max_cnt_fetch
);
464 oprofilefs_create_ulong(sb
, dir
, "rand_enable",
465 &ibs_config
.rand_en
);
467 dir
= oprofilefs_mkdir(sb
, root
, "ibs_op");
468 oprofilefs_create_ulong(sb
, dir
, "enable",
469 &ibs_config
.op_enabled
);
470 oprofilefs_create_ulong(sb
, dir
, "max_count",
471 &ibs_config
.max_cnt_op
);
472 oprofilefs_create_ulong(sb
, dir
, "dispatched_ops",
473 &ibs_config
.dispatched_ops
);
478 static int op_amd_init(struct oprofile_operations
*ops
)
481 create_arch_files
= ops
->create_files
;
482 ops
->create_files
= setup_ibs_files
;
486 static void op_amd_exit(void)
495 static int op_amd_init(struct oprofile_operations
*ops
)
500 static void op_amd_exit(void) {}
502 #endif /* CONFIG_OPROFILE_IBS */
504 struct op_x86_model_spec op_amd_spec
= {
505 .num_counters
= NUM_COUNTERS
,
506 .num_controls
= NUM_CONTROLS
,
507 .num_virt_counters
= NUM_VIRT_COUNTERS
,
508 .reserved
= MSR_AMD_EVENTSEL_RESERVED
,
509 .event_mask
= OP_EVENT_MASK
,
512 .fill_in_addresses
= &op_amd_fill_in_addresses
,
513 .setup_ctrs
= &op_amd_setup_ctrs
,
514 .check_ctrs
= &op_amd_check_ctrs
,
515 .start
= &op_amd_start
,
516 .stop
= &op_amd_stop
,
517 .shutdown
= &op_amd_shutdown
,
518 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
519 .switch_ctrl
= &op_mux_switch_ctrl
,