2 * Performance counter x86 architecture code
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2009 Jaswinder Singh Rajput
7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
10 * For licencing details see kernel-base/COPYING
13 #include <linux/perf_counter.h>
14 #include <linux/capability.h>
15 #include <linux/notifier.h>
16 #include <linux/hardirq.h>
17 #include <linux/kprobes.h>
18 #include <linux/module.h>
19 #include <linux/kdebug.h>
20 #include <linux/sched.h>
21 #include <linux/uaccess.h>
24 #include <asm/stacktrace.h>
27 static u64 perf_counter_mask __read_mostly
;
29 struct cpu_hw_counters
{
30 struct perf_counter
*counters
[X86_PMC_IDX_MAX
];
31 unsigned long used_mask
[BITS_TO_LONGS(X86_PMC_IDX_MAX
)];
32 unsigned long active_mask
[BITS_TO_LONGS(X86_PMC_IDX_MAX
)];
33 unsigned long interrupts
;
38 * struct x86_pmu - generic x86 pmu
43 int (*handle_irq
)(struct pt_regs
*);
44 void (*disable_all
)(void);
45 void (*enable_all
)(void);
46 void (*enable
)(struct hw_perf_counter
*, int);
47 void (*disable
)(struct hw_perf_counter
*, int);
50 u64 (*event_map
)(int);
51 u64 (*raw_event
)(u64
);
54 int num_counters_fixed
;
61 static struct x86_pmu x86_pmu __read_mostly
;
63 static DEFINE_PER_CPU(struct cpu_hw_counters
, cpu_hw_counters
) = {
68 * Intel PerfMon v3. Used on Core2 and later.
70 static const u64 intel_perfmon_event_map
[] =
72 [PERF_COUNT_CPU_CYCLES
] = 0x003c,
73 [PERF_COUNT_INSTRUCTIONS
] = 0x00c0,
74 [PERF_COUNT_CACHE_REFERENCES
] = 0x4f2e,
75 [PERF_COUNT_CACHE_MISSES
] = 0x412e,
76 [PERF_COUNT_BRANCH_INSTRUCTIONS
] = 0x00c4,
77 [PERF_COUNT_BRANCH_MISSES
] = 0x00c5,
78 [PERF_COUNT_BUS_CYCLES
] = 0x013c,
81 static u64
intel_pmu_event_map(int event
)
83 return intel_perfmon_event_map
[event
];
87 * Generalized hw caching related event table, filled
88 * in on a per model basis. A value of 0 means
89 * 'not supported', -1 means 'event makes no sense on
90 * this CPU', any other value means the raw event
94 #define C(x) PERF_COUNT_HW_CACHE_##x
96 static u64 __read_mostly hw_cache_event_ids
97 [PERF_COUNT_HW_CACHE_MAX
]
98 [PERF_COUNT_HW_CACHE_OP_MAX
]
99 [PERF_COUNT_HW_CACHE_RESULT_MAX
];
101 static const u64 nehalem_hw_cache_event_ids
102 [PERF_COUNT_HW_CACHE_MAX
]
103 [PERF_COUNT_HW_CACHE_OP_MAX
]
104 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
108 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
109 [ C(RESULT_MISS
) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
112 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
113 [ C(RESULT_MISS
) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
115 [ C(OP_PREFETCH
) ] = {
116 [ C(RESULT_ACCESS
) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
117 [ C(RESULT_MISS
) ] = 0x024e, /* L1D_PREFETCH.MISS */
122 [ C(RESULT_ACCESS
) ] = 0x0480, /* L1I.READS */
123 [ C(RESULT_MISS
) ] = 0x0280, /* L1I.MISSES */
126 [ C(RESULT_ACCESS
) ] = -1,
127 [ C(RESULT_MISS
) ] = -1,
129 [ C(OP_PREFETCH
) ] = {
130 [ C(RESULT_ACCESS
) ] = 0x0,
131 [ C(RESULT_MISS
) ] = 0x0,
136 [ C(RESULT_ACCESS
) ] = 0x0324, /* L2_RQSTS.LOADS */
137 [ C(RESULT_MISS
) ] = 0x0224, /* L2_RQSTS.LD_MISS */
140 [ C(RESULT_ACCESS
) ] = 0x0c24, /* L2_RQSTS.RFOS */
141 [ C(RESULT_MISS
) ] = 0x0824, /* L2_RQSTS.RFO_MISS */
143 [ C(OP_PREFETCH
) ] = {
144 [ C(RESULT_ACCESS
) ] = 0xc024, /* L2_RQSTS.PREFETCHES */
145 [ C(RESULT_MISS
) ] = 0x8024, /* L2_RQSTS.PREFETCH_MISS */
150 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
151 [ C(RESULT_MISS
) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
154 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
155 [ C(RESULT_MISS
) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
157 [ C(OP_PREFETCH
) ] = {
158 [ C(RESULT_ACCESS
) ] = 0x0,
159 [ C(RESULT_MISS
) ] = 0x0,
164 [ C(RESULT_ACCESS
) ] = 0x01c0, /* INST_RETIRED.ANY_P */
165 [ C(RESULT_MISS
) ] = 0x0185, /* ITLB_MISS_RETIRED */
168 [ C(RESULT_ACCESS
) ] = -1,
169 [ C(RESULT_MISS
) ] = -1,
171 [ C(OP_PREFETCH
) ] = {
172 [ C(RESULT_ACCESS
) ] = -1,
173 [ C(RESULT_MISS
) ] = -1,
178 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
179 [ C(RESULT_MISS
) ] = 0x03e8, /* BPU_CLEARS.ANY */
182 [ C(RESULT_ACCESS
) ] = -1,
183 [ C(RESULT_MISS
) ] = -1,
185 [ C(OP_PREFETCH
) ] = {
186 [ C(RESULT_ACCESS
) ] = -1,
187 [ C(RESULT_MISS
) ] = -1,
192 static const u64 core2_hw_cache_event_ids
193 [PERF_COUNT_HW_CACHE_MAX
]
194 [PERF_COUNT_HW_CACHE_OP_MAX
]
195 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
197 /* To be filled in */
200 static const u64 atom_hw_cache_event_ids
201 [PERF_COUNT_HW_CACHE_MAX
]
202 [PERF_COUNT_HW_CACHE_OP_MAX
]
203 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
205 /* To be filled in */
208 static u64
intel_pmu_raw_event(u64 event
)
210 #define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL
211 #define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL
212 #define CORE_EVNTSEL_EDGE_MASK 0x00040000ULL
213 #define CORE_EVNTSEL_INV_MASK 0x00800000ULL
214 #define CORE_EVNTSEL_COUNTER_MASK 0xFF000000ULL
216 #define CORE_EVNTSEL_MASK \
217 (CORE_EVNTSEL_EVENT_MASK | \
218 CORE_EVNTSEL_UNIT_MASK | \
219 CORE_EVNTSEL_EDGE_MASK | \
220 CORE_EVNTSEL_INV_MASK | \
221 CORE_EVNTSEL_COUNTER_MASK)
223 return event
& CORE_EVNTSEL_MASK
;
227 * AMD Performance Monitor K7 and later.
229 static const u64 amd_perfmon_event_map
[] =
231 [PERF_COUNT_CPU_CYCLES
] = 0x0076,
232 [PERF_COUNT_INSTRUCTIONS
] = 0x00c0,
233 [PERF_COUNT_CACHE_REFERENCES
] = 0x0080,
234 [PERF_COUNT_CACHE_MISSES
] = 0x0081,
235 [PERF_COUNT_BRANCH_INSTRUCTIONS
] = 0x00c4,
236 [PERF_COUNT_BRANCH_MISSES
] = 0x00c5,
239 static u64
amd_pmu_event_map(int event
)
241 return amd_perfmon_event_map
[event
];
244 static u64
amd_pmu_raw_event(u64 event
)
246 #define K7_EVNTSEL_EVENT_MASK 0x7000000FFULL
247 #define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL
248 #define K7_EVNTSEL_EDGE_MASK 0x000040000ULL
249 #define K7_EVNTSEL_INV_MASK 0x000800000ULL
250 #define K7_EVNTSEL_COUNTER_MASK 0x0FF000000ULL
252 #define K7_EVNTSEL_MASK \
253 (K7_EVNTSEL_EVENT_MASK | \
254 K7_EVNTSEL_UNIT_MASK | \
255 K7_EVNTSEL_EDGE_MASK | \
256 K7_EVNTSEL_INV_MASK | \
257 K7_EVNTSEL_COUNTER_MASK)
259 return event
& K7_EVNTSEL_MASK
;
263 * Propagate counter elapsed time into the generic counter.
264 * Can only be executed on the CPU where the counter is active.
265 * Returns the delta events processed.
268 x86_perf_counter_update(struct perf_counter
*counter
,
269 struct hw_perf_counter
*hwc
, int idx
)
271 int shift
= 64 - x86_pmu
.counter_bits
;
272 u64 prev_raw_count
, new_raw_count
;
276 * Careful: an NMI might modify the previous counter value.
278 * Our tactic to handle this is to first atomically read and
279 * exchange a new raw count - then add that new-prev delta
280 * count to the generic counter atomically:
283 prev_raw_count
= atomic64_read(&hwc
->prev_count
);
284 rdmsrl(hwc
->counter_base
+ idx
, new_raw_count
);
286 if (atomic64_cmpxchg(&hwc
->prev_count
, prev_raw_count
,
287 new_raw_count
) != prev_raw_count
)
291 * Now we have the new raw value and have updated the prev
292 * timestamp already. We can now calculate the elapsed delta
293 * (counter-)time and add that to the generic counter.
295 * Careful, not all hw sign-extends above the physical width
298 delta
= (new_raw_count
<< shift
) - (prev_raw_count
<< shift
);
301 atomic64_add(delta
, &counter
->count
);
302 atomic64_sub(delta
, &hwc
->period_left
);
304 return new_raw_count
;
307 static atomic_t active_counters
;
308 static DEFINE_MUTEX(pmc_reserve_mutex
);
310 static bool reserve_pmc_hardware(void)
314 if (nmi_watchdog
== NMI_LOCAL_APIC
)
315 disable_lapic_nmi_watchdog();
317 for (i
= 0; i
< x86_pmu
.num_counters
; i
++) {
318 if (!reserve_perfctr_nmi(x86_pmu
.perfctr
+ i
))
322 for (i
= 0; i
< x86_pmu
.num_counters
; i
++) {
323 if (!reserve_evntsel_nmi(x86_pmu
.eventsel
+ i
))
330 for (i
--; i
>= 0; i
--)
331 release_evntsel_nmi(x86_pmu
.eventsel
+ i
);
333 i
= x86_pmu
.num_counters
;
336 for (i
--; i
>= 0; i
--)
337 release_perfctr_nmi(x86_pmu
.perfctr
+ i
);
339 if (nmi_watchdog
== NMI_LOCAL_APIC
)
340 enable_lapic_nmi_watchdog();
345 static void release_pmc_hardware(void)
349 for (i
= 0; i
< x86_pmu
.num_counters
; i
++) {
350 release_perfctr_nmi(x86_pmu
.perfctr
+ i
);
351 release_evntsel_nmi(x86_pmu
.eventsel
+ i
);
354 if (nmi_watchdog
== NMI_LOCAL_APIC
)
355 enable_lapic_nmi_watchdog();
358 static void hw_perf_counter_destroy(struct perf_counter
*counter
)
360 if (atomic_dec_and_mutex_lock(&active_counters
, &pmc_reserve_mutex
)) {
361 release_pmc_hardware();
362 mutex_unlock(&pmc_reserve_mutex
);
366 static inline int x86_pmu_initialized(void)
368 return x86_pmu
.handle_irq
!= NULL
;
372 set_ext_hw_attr(struct hw_perf_counter
*hwc
, struct perf_counter_attr
*attr
)
374 unsigned int cache_type
, cache_op
, cache_result
;
377 config
= attr
->config
;
379 cache_type
= (config
>> 0) & 0xff;
380 if (cache_type
>= PERF_COUNT_HW_CACHE_MAX
)
383 cache_op
= (config
>> 8) & 0xff;
384 if (cache_op
>= PERF_COUNT_HW_CACHE_OP_MAX
)
387 cache_result
= (config
>> 16) & 0xff;
388 if (cache_result
>= PERF_COUNT_HW_CACHE_RESULT_MAX
)
391 val
= hw_cache_event_ids
[cache_type
][cache_op
][cache_result
];
405 * Setup the hardware configuration for a given attr_type
407 static int __hw_perf_counter_init(struct perf_counter
*counter
)
409 struct perf_counter_attr
*attr
= &counter
->attr
;
410 struct hw_perf_counter
*hwc
= &counter
->hw
;
413 if (!x86_pmu_initialized())
417 if (!atomic_inc_not_zero(&active_counters
)) {
418 mutex_lock(&pmc_reserve_mutex
);
419 if (atomic_read(&active_counters
) == 0 && !reserve_pmc_hardware())
422 atomic_inc(&active_counters
);
423 mutex_unlock(&pmc_reserve_mutex
);
430 * (keep 'enabled' bit clear for now)
432 hwc
->config
= ARCH_PERFMON_EVENTSEL_INT
;
435 * Count user and OS events unless requested not to.
437 if (!attr
->exclude_user
)
438 hwc
->config
|= ARCH_PERFMON_EVENTSEL_USR
;
439 if (!attr
->exclude_kernel
)
440 hwc
->config
|= ARCH_PERFMON_EVENTSEL_OS
;
442 if (!hwc
->sample_period
)
443 hwc
->sample_period
= x86_pmu
.max_period
;
445 atomic64_set(&hwc
->period_left
, hwc
->sample_period
);
446 counter
->destroy
= hw_perf_counter_destroy
;
449 * Raw event type provide the config in the event structure
451 if (attr
->type
== PERF_TYPE_RAW
) {
452 hwc
->config
|= x86_pmu
.raw_event(attr
->config
);
456 if (attr
->type
== PERF_TYPE_HW_CACHE
)
457 return set_ext_hw_attr(hwc
, attr
);
459 if (attr
->config
>= x86_pmu
.max_events
)
464 hwc
->config
|= x86_pmu
.event_map(attr
->config
);
469 static void intel_pmu_disable_all(void)
471 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, 0);
474 static void amd_pmu_disable_all(void)
476 struct cpu_hw_counters
*cpuc
= &__get_cpu_var(cpu_hw_counters
);
484 * ensure we write the disable before we start disabling the
485 * counters proper, so that amd_pmu_enable_counter() does the
490 for (idx
= 0; idx
< x86_pmu
.num_counters
; idx
++) {
493 if (!test_bit(idx
, cpuc
->active_mask
))
495 rdmsrl(MSR_K7_EVNTSEL0
+ idx
, val
);
496 if (!(val
& ARCH_PERFMON_EVENTSEL0_ENABLE
))
498 val
&= ~ARCH_PERFMON_EVENTSEL0_ENABLE
;
499 wrmsrl(MSR_K7_EVNTSEL0
+ idx
, val
);
503 void hw_perf_disable(void)
505 if (!x86_pmu_initialized())
507 return x86_pmu
.disable_all();
510 static void intel_pmu_enable_all(void)
512 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, x86_pmu
.intel_ctrl
);
515 static void amd_pmu_enable_all(void)
517 struct cpu_hw_counters
*cpuc
= &__get_cpu_var(cpu_hw_counters
);
526 for (idx
= 0; idx
< x86_pmu
.num_counters
; idx
++) {
529 if (!test_bit(idx
, cpuc
->active_mask
))
531 rdmsrl(MSR_K7_EVNTSEL0
+ idx
, val
);
532 if (val
& ARCH_PERFMON_EVENTSEL0_ENABLE
)
534 val
|= ARCH_PERFMON_EVENTSEL0_ENABLE
;
535 wrmsrl(MSR_K7_EVNTSEL0
+ idx
, val
);
539 void hw_perf_enable(void)
541 if (!x86_pmu_initialized())
543 x86_pmu
.enable_all();
546 static inline u64
intel_pmu_get_status(void)
550 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS
, status
);
555 static inline void intel_pmu_ack_status(u64 ack
)
557 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL
, ack
);
560 static inline void x86_pmu_enable_counter(struct hw_perf_counter
*hwc
, int idx
)
563 err
= checking_wrmsrl(hwc
->config_base
+ idx
,
564 hwc
->config
| ARCH_PERFMON_EVENTSEL0_ENABLE
);
567 static inline void x86_pmu_disable_counter(struct hw_perf_counter
*hwc
, int idx
)
570 err
= checking_wrmsrl(hwc
->config_base
+ idx
,
575 intel_pmu_disable_fixed(struct hw_perf_counter
*hwc
, int __idx
)
577 int idx
= __idx
- X86_PMC_IDX_FIXED
;
581 mask
= 0xfULL
<< (idx
* 4);
583 rdmsrl(hwc
->config_base
, ctrl_val
);
585 err
= checking_wrmsrl(hwc
->config_base
, ctrl_val
);
589 intel_pmu_disable_counter(struct hw_perf_counter
*hwc
, int idx
)
591 if (unlikely(hwc
->config_base
== MSR_ARCH_PERFMON_FIXED_CTR_CTRL
)) {
592 intel_pmu_disable_fixed(hwc
, idx
);
596 x86_pmu_disable_counter(hwc
, idx
);
600 amd_pmu_disable_counter(struct hw_perf_counter
*hwc
, int idx
)
602 x86_pmu_disable_counter(hwc
, idx
);
605 static DEFINE_PER_CPU(u64
, prev_left
[X86_PMC_IDX_MAX
]);
608 * Set the next IRQ period, based on the hwc->period_left value.
609 * To be called with the counter disabled in hw:
612 x86_perf_counter_set_period(struct perf_counter
*counter
,
613 struct hw_perf_counter
*hwc
, int idx
)
615 s64 left
= atomic64_read(&hwc
->period_left
);
616 s64 period
= hwc
->sample_period
;
620 * If we are way outside a reasoable range then just skip forward:
622 if (unlikely(left
<= -period
)) {
624 atomic64_set(&hwc
->period_left
, left
);
628 if (unlikely(left
<= 0)) {
630 atomic64_set(&hwc
->period_left
, left
);
634 * Quirk: certain CPUs dont like it if just 1 event is left:
636 if (unlikely(left
< 2))
639 if (left
> x86_pmu
.max_period
)
640 left
= x86_pmu
.max_period
;
642 per_cpu(prev_left
[idx
], smp_processor_id()) = left
;
645 * The hw counter starts counting from this counter offset,
646 * mark it to be able to extra future deltas:
648 atomic64_set(&hwc
->prev_count
, (u64
)-left
);
650 err
= checking_wrmsrl(hwc
->counter_base
+ idx
,
651 (u64
)(-left
) & x86_pmu
.counter_mask
);
657 intel_pmu_enable_fixed(struct hw_perf_counter
*hwc
, int __idx
)
659 int idx
= __idx
- X86_PMC_IDX_FIXED
;
660 u64 ctrl_val
, bits
, mask
;
664 * Enable IRQ generation (0x8),
665 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
669 if (hwc
->config
& ARCH_PERFMON_EVENTSEL_USR
)
671 if (hwc
->config
& ARCH_PERFMON_EVENTSEL_OS
)
674 mask
= 0xfULL
<< (idx
* 4);
676 rdmsrl(hwc
->config_base
, ctrl_val
);
679 err
= checking_wrmsrl(hwc
->config_base
, ctrl_val
);
682 static void intel_pmu_enable_counter(struct hw_perf_counter
*hwc
, int idx
)
684 if (unlikely(hwc
->config_base
== MSR_ARCH_PERFMON_FIXED_CTR_CTRL
)) {
685 intel_pmu_enable_fixed(hwc
, idx
);
689 x86_pmu_enable_counter(hwc
, idx
);
692 static void amd_pmu_enable_counter(struct hw_perf_counter
*hwc
, int idx
)
694 struct cpu_hw_counters
*cpuc
= &__get_cpu_var(cpu_hw_counters
);
697 x86_pmu_enable_counter(hwc
, idx
);
699 x86_pmu_disable_counter(hwc
, idx
);
703 fixed_mode_idx(struct perf_counter
*counter
, struct hw_perf_counter
*hwc
)
707 if (!x86_pmu
.num_counters_fixed
)
710 event
= hwc
->config
& ARCH_PERFMON_EVENT_MASK
;
712 if (unlikely(event
== x86_pmu
.event_map(PERF_COUNT_INSTRUCTIONS
)))
713 return X86_PMC_IDX_FIXED_INSTRUCTIONS
;
714 if (unlikely(event
== x86_pmu
.event_map(PERF_COUNT_CPU_CYCLES
)))
715 return X86_PMC_IDX_FIXED_CPU_CYCLES
;
716 if (unlikely(event
== x86_pmu
.event_map(PERF_COUNT_BUS_CYCLES
)))
717 return X86_PMC_IDX_FIXED_BUS_CYCLES
;
723 * Find a PMC slot for the freshly enabled / scheduled in counter:
725 static int x86_pmu_enable(struct perf_counter
*counter
)
727 struct cpu_hw_counters
*cpuc
= &__get_cpu_var(cpu_hw_counters
);
728 struct hw_perf_counter
*hwc
= &counter
->hw
;
731 idx
= fixed_mode_idx(counter
, hwc
);
734 * Try to get the fixed counter, if that is already taken
735 * then try to get a generic counter:
737 if (test_and_set_bit(idx
, cpuc
->used_mask
))
740 hwc
->config_base
= MSR_ARCH_PERFMON_FIXED_CTR_CTRL
;
742 * We set it so that counter_base + idx in wrmsr/rdmsr maps to
743 * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
746 MSR_ARCH_PERFMON_FIXED_CTR0
- X86_PMC_IDX_FIXED
;
750 /* Try to get the previous generic counter again */
751 if (test_and_set_bit(idx
, cpuc
->used_mask
)) {
753 idx
= find_first_zero_bit(cpuc
->used_mask
,
754 x86_pmu
.num_counters
);
755 if (idx
== x86_pmu
.num_counters
)
758 set_bit(idx
, cpuc
->used_mask
);
761 hwc
->config_base
= x86_pmu
.eventsel
;
762 hwc
->counter_base
= x86_pmu
.perfctr
;
765 perf_counters_lapic_init();
767 x86_pmu
.disable(hwc
, idx
);
769 cpuc
->counters
[idx
] = counter
;
770 set_bit(idx
, cpuc
->active_mask
);
772 x86_perf_counter_set_period(counter
, hwc
, idx
);
773 x86_pmu
.enable(hwc
, idx
);
778 static void x86_pmu_unthrottle(struct perf_counter
*counter
)
780 struct cpu_hw_counters
*cpuc
= &__get_cpu_var(cpu_hw_counters
);
781 struct hw_perf_counter
*hwc
= &counter
->hw
;
783 if (WARN_ON_ONCE(hwc
->idx
>= X86_PMC_IDX_MAX
||
784 cpuc
->counters
[hwc
->idx
] != counter
))
787 x86_pmu
.enable(hwc
, hwc
->idx
);
790 void perf_counter_print_debug(void)
792 u64 ctrl
, status
, overflow
, pmc_ctrl
, pmc_count
, prev_left
, fixed
;
793 struct cpu_hw_counters
*cpuc
;
797 if (!x86_pmu
.num_counters
)
800 local_irq_save(flags
);
802 cpu
= smp_processor_id();
803 cpuc
= &per_cpu(cpu_hw_counters
, cpu
);
805 if (x86_pmu
.version
>= 2) {
806 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, ctrl
);
807 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS
, status
);
808 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL
, overflow
);
809 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL
, fixed
);
812 pr_info("CPU#%d: ctrl: %016llx\n", cpu
, ctrl
);
813 pr_info("CPU#%d: status: %016llx\n", cpu
, status
);
814 pr_info("CPU#%d: overflow: %016llx\n", cpu
, overflow
);
815 pr_info("CPU#%d: fixed: %016llx\n", cpu
, fixed
);
817 pr_info("CPU#%d: used: %016llx\n", cpu
, *(u64
*)cpuc
->used_mask
);
819 for (idx
= 0; idx
< x86_pmu
.num_counters
; idx
++) {
820 rdmsrl(x86_pmu
.eventsel
+ idx
, pmc_ctrl
);
821 rdmsrl(x86_pmu
.perfctr
+ idx
, pmc_count
);
823 prev_left
= per_cpu(prev_left
[idx
], cpu
);
825 pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
827 pr_info("CPU#%d: gen-PMC%d count: %016llx\n",
828 cpu
, idx
, pmc_count
);
829 pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
830 cpu
, idx
, prev_left
);
832 for (idx
= 0; idx
< x86_pmu
.num_counters_fixed
; idx
++) {
833 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0
+ idx
, pmc_count
);
835 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
836 cpu
, idx
, pmc_count
);
838 local_irq_restore(flags
);
841 static void x86_pmu_disable(struct perf_counter
*counter
)
843 struct cpu_hw_counters
*cpuc
= &__get_cpu_var(cpu_hw_counters
);
844 struct hw_perf_counter
*hwc
= &counter
->hw
;
848 * Must be done before we disable, otherwise the nmi handler
849 * could reenable again:
851 clear_bit(idx
, cpuc
->active_mask
);
852 x86_pmu
.disable(hwc
, idx
);
855 * Make sure the cleared pointer becomes visible before we
856 * (potentially) free the counter:
861 * Drain the remaining delta count out of a counter
862 * that we are disabling:
864 x86_perf_counter_update(counter
, hwc
, idx
);
865 cpuc
->counters
[idx
] = NULL
;
866 clear_bit(idx
, cpuc
->used_mask
);
870 * Save and restart an expired counter. Called by NMI contexts,
871 * so it has to be careful about preempting normal counter ops:
873 static int intel_pmu_save_and_restart(struct perf_counter
*counter
)
875 struct hw_perf_counter
*hwc
= &counter
->hw
;
879 x86_perf_counter_update(counter
, hwc
, idx
);
880 ret
= x86_perf_counter_set_period(counter
, hwc
, idx
);
882 if (counter
->state
== PERF_COUNTER_STATE_ACTIVE
)
883 intel_pmu_enable_counter(hwc
, idx
);
888 static void intel_pmu_reset(void)
893 if (!x86_pmu
.num_counters
)
896 local_irq_save(flags
);
898 printk("clearing PMU state on CPU#%d\n", smp_processor_id());
900 for (idx
= 0; idx
< x86_pmu
.num_counters
; idx
++) {
901 checking_wrmsrl(x86_pmu
.eventsel
+ idx
, 0ull);
902 checking_wrmsrl(x86_pmu
.perfctr
+ idx
, 0ull);
904 for (idx
= 0; idx
< x86_pmu
.num_counters_fixed
; idx
++) {
905 checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0
+ idx
, 0ull);
908 local_irq_restore(flags
);
913 * This handler is triggered by the local APIC, so the APIC IRQ handling
916 static int intel_pmu_handle_irq(struct pt_regs
*regs
)
918 struct cpu_hw_counters
*cpuc
;
919 struct cpu_hw_counters
;
923 cpu
= smp_processor_id();
924 cpuc
= &per_cpu(cpu_hw_counters
, cpu
);
927 status
= intel_pmu_get_status();
936 WARN_ONCE(1, "perfcounters: irq loop stuck!\n");
937 perf_counter_print_debug();
943 inc_irq_stat(apic_perf_irqs
);
945 for_each_bit(bit
, (unsigned long *)&status
, X86_PMC_IDX_MAX
) {
946 struct perf_counter
*counter
= cpuc
->counters
[bit
];
948 clear_bit(bit
, (unsigned long *) &status
);
949 if (!test_bit(bit
, cpuc
->active_mask
))
952 if (!intel_pmu_save_and_restart(counter
))
955 if (perf_counter_overflow(counter
, 1, regs
, 0))
956 intel_pmu_disable_counter(&counter
->hw
, bit
);
959 intel_pmu_ack_status(ack
);
962 * Repeat if there is more work to be done:
964 status
= intel_pmu_get_status();
973 static int amd_pmu_handle_irq(struct pt_regs
*regs
)
975 int cpu
, idx
, handled
= 0;
976 struct cpu_hw_counters
*cpuc
;
977 struct perf_counter
*counter
;
978 struct hw_perf_counter
*hwc
;
981 cpu
= smp_processor_id();
982 cpuc
= &per_cpu(cpu_hw_counters
, cpu
);
984 for (idx
= 0; idx
< x86_pmu
.num_counters
; idx
++) {
985 if (!test_bit(idx
, cpuc
->active_mask
))
988 counter
= cpuc
->counters
[idx
];
991 val
= x86_perf_counter_update(counter
, hwc
, idx
);
992 if (val
& (1ULL << (x86_pmu
.counter_bits
- 1)))
995 /* counter overflow */
997 inc_irq_stat(apic_perf_irqs
);
998 if (!x86_perf_counter_set_period(counter
, hwc
, idx
))
1001 if (perf_counter_overflow(counter
, 1, regs
, 0))
1002 amd_pmu_disable_counter(hwc
, idx
);
1008 void smp_perf_pending_interrupt(struct pt_regs
*regs
)
1012 inc_irq_stat(apic_pending_irqs
);
1013 perf_counter_do_pending();
1017 void set_perf_counter_pending(void)
1019 apic
->send_IPI_self(LOCAL_PENDING_VECTOR
);
1022 void perf_counters_lapic_init(void)
1024 if (!x86_pmu_initialized())
1028 * Always use NMI for PMU
1030 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
1033 static int __kprobes
1034 perf_counter_nmi_handler(struct notifier_block
*self
,
1035 unsigned long cmd
, void *__args
)
1037 struct die_args
*args
= __args
;
1038 struct pt_regs
*regs
;
1040 if (!atomic_read(&active_counters
))
1054 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
1056 * Can't rely on the handled return value to say it was our NMI, two
1057 * counters could trigger 'simultaneously' raising two back-to-back NMIs.
1059 * If the first NMI handles both, the latter will be empty and daze
1062 x86_pmu
.handle_irq(regs
);
1067 static __read_mostly
struct notifier_block perf_counter_nmi_notifier
= {
1068 .notifier_call
= perf_counter_nmi_handler
,
1073 static struct x86_pmu intel_pmu
= {
1075 .handle_irq
= intel_pmu_handle_irq
,
1076 .disable_all
= intel_pmu_disable_all
,
1077 .enable_all
= intel_pmu_enable_all
,
1078 .enable
= intel_pmu_enable_counter
,
1079 .disable
= intel_pmu_disable_counter
,
1080 .eventsel
= MSR_ARCH_PERFMON_EVENTSEL0
,
1081 .perfctr
= MSR_ARCH_PERFMON_PERFCTR0
,
1082 .event_map
= intel_pmu_event_map
,
1083 .raw_event
= intel_pmu_raw_event
,
1084 .max_events
= ARRAY_SIZE(intel_perfmon_event_map
),
1086 * Intel PMCs cannot be accessed sanely above 32 bit width,
1087 * so we install an artificial 1<<31 period regardless of
1088 * the generic counter period:
1090 .max_period
= (1ULL << 31) - 1,
1093 static struct x86_pmu amd_pmu
= {
1095 .handle_irq
= amd_pmu_handle_irq
,
1096 .disable_all
= amd_pmu_disable_all
,
1097 .enable_all
= amd_pmu_enable_all
,
1098 .enable
= amd_pmu_enable_counter
,
1099 .disable
= amd_pmu_disable_counter
,
1100 .eventsel
= MSR_K7_EVNTSEL0
,
1101 .perfctr
= MSR_K7_PERFCTR0
,
1102 .event_map
= amd_pmu_event_map
,
1103 .raw_event
= amd_pmu_raw_event
,
1104 .max_events
= ARRAY_SIZE(amd_perfmon_event_map
),
1107 .counter_mask
= (1ULL << 48) - 1,
1108 /* use highest bit to detect overflow */
1109 .max_period
= (1ULL << 47) - 1,
1112 static int intel_pmu_init(void)
1114 union cpuid10_edx edx
;
1115 union cpuid10_eax eax
;
1116 unsigned int unused
;
1120 if (!cpu_has(&boot_cpu_data
, X86_FEATURE_ARCH_PERFMON
))
1124 * Check whether the Architectural PerfMon supports
1125 * Branch Misses Retired Event or not.
1127 cpuid(10, &eax
.full
, &ebx
, &unused
, &edx
.full
);
1128 if (eax
.split
.mask_length
<= ARCH_PERFMON_BRANCH_MISSES_RETIRED
)
1131 version
= eax
.split
.version_id
;
1135 x86_pmu
= intel_pmu
;
1136 x86_pmu
.version
= version
;
1137 x86_pmu
.num_counters
= eax
.split
.num_counters
;
1140 * Quirk: v2 perfmon does not report fixed-purpose counters, so
1141 * assume at least 3 counters:
1143 x86_pmu
.num_counters_fixed
= max((int)edx
.split
.num_counters_fixed
, 3);
1145 x86_pmu
.counter_bits
= eax
.split
.bit_width
;
1146 x86_pmu
.counter_mask
= (1ULL << eax
.split
.bit_width
) - 1;
1148 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, x86_pmu
.intel_ctrl
);
1153 switch (boot_cpu_data
.x86_model
) {
1155 memcpy(hw_cache_event_ids
, core2_hw_cache_event_ids
,
1156 sizeof(u64
)*PERF_COUNT_HW_CACHE_MAX
*
1157 PERF_COUNT_HW_CACHE_OP_MAX
*PERF_COUNT_HW_CACHE_RESULT_MAX
);
1159 pr_info("... installed Core2 event tables\n");
1163 memcpy(hw_cache_event_ids
, nehalem_hw_cache_event_ids
,
1164 sizeof(u64
)*PERF_COUNT_HW_CACHE_MAX
*
1165 PERF_COUNT_HW_CACHE_OP_MAX
*PERF_COUNT_HW_CACHE_RESULT_MAX
);
1167 pr_info("... installed Nehalem/Corei7 event tables\n");
1170 memcpy(hw_cache_event_ids
, atom_hw_cache_event_ids
,
1171 sizeof(u64
)*PERF_COUNT_HW_CACHE_MAX
*
1172 PERF_COUNT_HW_CACHE_OP_MAX
*PERF_COUNT_HW_CACHE_RESULT_MAX
);
1174 pr_info("... installed Atom event tables\n");
1180 static int amd_pmu_init(void)
1186 void __init
init_hw_perf_counters(void)
1190 switch (boot_cpu_data
.x86_vendor
) {
1191 case X86_VENDOR_INTEL
:
1192 err
= intel_pmu_init();
1194 case X86_VENDOR_AMD
:
1195 err
= amd_pmu_init();
1203 pr_info("%s Performance Monitoring support detected.\n", x86_pmu
.name
);
1204 pr_info("... version: %d\n", x86_pmu
.version
);
1205 pr_info("... bit width: %d\n", x86_pmu
.counter_bits
);
1207 pr_info("... num counters: %d\n", x86_pmu
.num_counters
);
1208 if (x86_pmu
.num_counters
> X86_PMC_MAX_GENERIC
) {
1209 x86_pmu
.num_counters
= X86_PMC_MAX_GENERIC
;
1210 WARN(1, KERN_ERR
"hw perf counters %d > max(%d), clipping!",
1211 x86_pmu
.num_counters
, X86_PMC_MAX_GENERIC
);
1213 perf_counter_mask
= (1 << x86_pmu
.num_counters
) - 1;
1214 perf_max_counters
= x86_pmu
.num_counters
;
1216 pr_info("... value mask: %016Lx\n", x86_pmu
.counter_mask
);
1217 pr_info("... max period: %016Lx\n", x86_pmu
.max_period
);
1219 if (x86_pmu
.num_counters_fixed
> X86_PMC_MAX_FIXED
) {
1220 x86_pmu
.num_counters_fixed
= X86_PMC_MAX_FIXED
;
1221 WARN(1, KERN_ERR
"hw perf counters fixed %d > max(%d), clipping!",
1222 x86_pmu
.num_counters_fixed
, X86_PMC_MAX_FIXED
);
1224 pr_info("... fixed counters: %d\n", x86_pmu
.num_counters_fixed
);
1226 perf_counter_mask
|=
1227 ((1LL << x86_pmu
.num_counters_fixed
)-1) << X86_PMC_IDX_FIXED
;
1229 pr_info("... counter mask: %016Lx\n", perf_counter_mask
);
1231 perf_counters_lapic_init();
1232 register_die_notifier(&perf_counter_nmi_notifier
);
1235 static inline void x86_pmu_read(struct perf_counter
*counter
)
1237 x86_perf_counter_update(counter
, &counter
->hw
, counter
->hw
.idx
);
1240 static const struct pmu pmu
= {
1241 .enable
= x86_pmu_enable
,
1242 .disable
= x86_pmu_disable
,
1243 .read
= x86_pmu_read
,
1244 .unthrottle
= x86_pmu_unthrottle
,
1247 const struct pmu
*hw_perf_counter_init(struct perf_counter
*counter
)
1251 err
= __hw_perf_counter_init(counter
);
1253 return ERR_PTR(err
);
1263 void callchain_store(struct perf_callchain_entry
*entry
, unsigned long ip
)
1265 if (entry
->nr
< MAX_STACK_DEPTH
)
1266 entry
->ip
[entry
->nr
++] = ip
;
1269 static DEFINE_PER_CPU(struct perf_callchain_entry
, irq_entry
);
1270 static DEFINE_PER_CPU(struct perf_callchain_entry
, nmi_entry
);
1274 backtrace_warning_symbol(void *data
, char *msg
, unsigned long symbol
)
1276 /* Ignore warnings */
1279 static void backtrace_warning(void *data
, char *msg
)
1281 /* Ignore warnings */
1284 static int backtrace_stack(void *data
, char *name
)
1286 /* Don't bother with IRQ stacks for now */
1290 static void backtrace_address(void *data
, unsigned long addr
, int reliable
)
1292 struct perf_callchain_entry
*entry
= data
;
1295 callchain_store(entry
, addr
);
1298 static const struct stacktrace_ops backtrace_ops
= {
1299 .warning
= backtrace_warning
,
1300 .warning_symbol
= backtrace_warning_symbol
,
1301 .stack
= backtrace_stack
,
1302 .address
= backtrace_address
,
1306 perf_callchain_kernel(struct pt_regs
*regs
, struct perf_callchain_entry
*entry
)
1312 callchain_store(entry
, instruction_pointer(regs
));
1314 stack
= ((char *)regs
+ sizeof(struct pt_regs
));
1315 #ifdef CONFIG_FRAME_POINTER
1316 bp
= frame_pointer(regs
);
1321 dump_trace(NULL
, regs
, (void *)stack
, bp
, &backtrace_ops
, entry
);
1323 entry
->kernel
= entry
->nr
- nr
;
1327 struct stack_frame
{
1328 const void __user
*next_fp
;
1329 unsigned long return_address
;
1332 static int copy_stack_frame(const void __user
*fp
, struct stack_frame
*frame
)
1336 if (!access_ok(VERIFY_READ
, fp
, sizeof(*frame
)))
1340 pagefault_disable();
1341 if (__copy_from_user_inatomic(frame
, fp
, sizeof(*frame
)))
1349 perf_callchain_user(struct pt_regs
*regs
, struct perf_callchain_entry
*entry
)
1351 struct stack_frame frame
;
1352 const void __user
*fp
;
1355 regs
= (struct pt_regs
*)current
->thread
.sp0
- 1;
1356 fp
= (void __user
*)regs
->bp
;
1358 callchain_store(entry
, regs
->ip
);
1360 while (entry
->nr
< MAX_STACK_DEPTH
) {
1361 frame
.next_fp
= NULL
;
1362 frame
.return_address
= 0;
1364 if (!copy_stack_frame(fp
, &frame
))
1367 if ((unsigned long)fp
< user_stack_pointer(regs
))
1370 callchain_store(entry
, frame
.return_address
);
1374 entry
->user
= entry
->nr
- nr
;
1378 perf_do_callchain(struct pt_regs
*regs
, struct perf_callchain_entry
*entry
)
1385 is_user
= user_mode(regs
);
1387 if (!current
|| current
->pid
== 0)
1390 if (is_user
&& current
->state
!= TASK_RUNNING
)
1394 perf_callchain_kernel(regs
, entry
);
1397 perf_callchain_user(regs
, entry
);
1400 struct perf_callchain_entry
*perf_callchain(struct pt_regs
*regs
)
1402 struct perf_callchain_entry
*entry
;
1405 entry
= &__get_cpu_var(nmi_entry
);
1407 entry
= &__get_cpu_var(irq_entry
);
1414 perf_do_callchain(regs
, entry
);