2 * Performance counter x86 architecture code
4 * Copyright(C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright(C) 2008 Red Hat, Inc., Ingo Molnar
7 * For licencing details see kernel-base/COPYING
10 #include <linux/perf_counter.h>
11 #include <linux/capability.h>
12 #include <linux/notifier.h>
13 #include <linux/hardirq.h>
14 #include <linux/kprobes.h>
15 #include <linux/module.h>
16 #include <linux/kdebug.h>
17 #include <linux/sched.h>
19 #include <asm/perf_counter.h>
22 static bool perf_counters_initialized __read_mostly
;
25 * Number of (generic) HW counters:
27 static int nr_counters_generic __read_mostly
;
28 static u64 perf_counter_mask __read_mostly
;
29 static u64 counter_value_mask __read_mostly
;
31 static int nr_counters_fixed __read_mostly
;
33 struct cpu_hw_counters
{
34 struct perf_counter
*counters
[X86_PMC_IDX_MAX
];
35 unsigned long used
[BITS_TO_LONGS(X86_PMC_IDX_MAX
)];
36 unsigned long interrupts
;
41 * Intel PerfMon v3. Used on Core2 and later.
43 static DEFINE_PER_CPU(struct cpu_hw_counters
, cpu_hw_counters
);
45 static const int intel_perfmon_event_map
[] =
47 [PERF_COUNT_CPU_CYCLES
] = 0x003c,
48 [PERF_COUNT_INSTRUCTIONS
] = 0x00c0,
49 [PERF_COUNT_CACHE_REFERENCES
] = 0x4f2e,
50 [PERF_COUNT_CACHE_MISSES
] = 0x412e,
51 [PERF_COUNT_BRANCH_INSTRUCTIONS
] = 0x00c4,
52 [PERF_COUNT_BRANCH_MISSES
] = 0x00c5,
53 [PERF_COUNT_BUS_CYCLES
] = 0x013c,
56 static const int max_intel_perfmon_events
= ARRAY_SIZE(intel_perfmon_event_map
);
59 * Propagate counter elapsed time into the generic counter.
60 * Can only be executed on the CPU where the counter is active.
61 * Returns the delta events processed.
64 x86_perf_counter_update(struct perf_counter
*counter
,
65 struct hw_perf_counter
*hwc
, int idx
)
67 u64 prev_raw_count
, new_raw_count
, delta
;
70 * Careful: an NMI might modify the previous counter value.
72 * Our tactic to handle this is to first atomically read and
73 * exchange a new raw count - then add that new-prev delta
74 * count to the generic counter atomically:
77 prev_raw_count
= atomic64_read(&hwc
->prev_count
);
78 rdmsrl(hwc
->counter_base
+ idx
, new_raw_count
);
80 if (atomic64_cmpxchg(&hwc
->prev_count
, prev_raw_count
,
81 new_raw_count
) != prev_raw_count
)
85 * Now we have the new raw value and have updated the prev
86 * timestamp already. We can now calculate the elapsed delta
87 * (counter-)time and add that to the generic counter.
89 * Careful, not all hw sign-extends above the physical width
90 * of the count, so we do that by clipping the delta to 32 bits:
92 delta
= (u64
)(u32
)((s32
)new_raw_count
- (s32
)prev_raw_count
);
94 atomic64_add(delta
, &counter
->count
);
95 atomic64_sub(delta
, &hwc
->period_left
);
99 * Setup the hardware configuration for a given hw_event_type
101 static int __hw_perf_counter_init(struct perf_counter
*counter
)
103 struct perf_counter_hw_event
*hw_event
= &counter
->hw_event
;
104 struct hw_perf_counter
*hwc
= &counter
->hw
;
106 if (unlikely(!perf_counters_initialized
))
111 * (keep 'enabled' bit clear for now)
113 hwc
->config
= ARCH_PERFMON_EVENTSEL_INT
;
116 * Count user and OS events unless requested not to.
118 if (!hw_event
->exclude_user
)
119 hwc
->config
|= ARCH_PERFMON_EVENTSEL_USR
;
120 if (!hw_event
->exclude_kernel
)
121 hwc
->config
|= ARCH_PERFMON_EVENTSEL_OS
;
124 * If privileged enough, allow NMI events:
127 if (capable(CAP_SYS_ADMIN
) && hw_event
->nmi
)
130 hwc
->irq_period
= hw_event
->irq_period
;
132 * Intel PMCs cannot be accessed sanely above 32 bit width,
133 * so we install an artificial 1<<31 period regardless of
134 * the generic counter period:
136 if ((s64
)hwc
->irq_period
<= 0 || hwc
->irq_period
> 0x7FFFFFFF)
137 hwc
->irq_period
= 0x7FFFFFFF;
139 atomic64_set(&hwc
->period_left
, hwc
->irq_period
);
142 * Raw event type provide the config in the event structure
145 hwc
->config
|= hw_event
->type
;
147 if (hw_event
->type
>= max_intel_perfmon_events
)
152 hwc
->config
|= intel_perfmon_event_map
[hw_event
->type
];
154 counter
->wakeup_pending
= 0;
159 u64
hw_perf_save_disable(void)
163 if (unlikely(!perf_counters_initialized
))
166 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, ctrl
);
167 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, 0);
171 EXPORT_SYMBOL_GPL(hw_perf_save_disable
);
173 void hw_perf_restore(u64 ctrl
)
175 if (unlikely(!perf_counters_initialized
))
178 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, ctrl
);
180 EXPORT_SYMBOL_GPL(hw_perf_restore
);
183 __pmc_fixed_disable(struct perf_counter
*counter
,
184 struct hw_perf_counter
*hwc
, unsigned int __idx
)
186 int idx
= __idx
- X86_PMC_IDX_FIXED
;
190 mask
= 0xfULL
<< (idx
* 4);
192 rdmsrl(hwc
->config_base
, ctrl_val
);
194 err
= checking_wrmsrl(hwc
->config_base
, ctrl_val
);
198 __pmc_generic_disable(struct perf_counter
*counter
,
199 struct hw_perf_counter
*hwc
, unsigned int idx
)
201 if (unlikely(hwc
->config_base
== MSR_ARCH_PERFMON_FIXED_CTR_CTRL
))
202 __pmc_fixed_disable(counter
, hwc
, idx
);
204 wrmsr_safe(hwc
->config_base
+ idx
, hwc
->config
, 0);
207 static DEFINE_PER_CPU(u64
, prev_left
[X86_PMC_IDX_MAX
]);
210 * Set the next IRQ period, based on the hwc->period_left value.
211 * To be called with the counter disabled in hw:
214 __hw_perf_counter_set_period(struct perf_counter
*counter
,
215 struct hw_perf_counter
*hwc
, int idx
)
217 s64 left
= atomic64_read(&hwc
->period_left
);
218 s32 period
= hwc
->irq_period
;
222 * If we are way outside a reasoable range then just skip forward:
224 if (unlikely(left
<= -period
)) {
226 atomic64_set(&hwc
->period_left
, left
);
229 if (unlikely(left
<= 0)) {
231 atomic64_set(&hwc
->period_left
, left
);
234 per_cpu(prev_left
[idx
], smp_processor_id()) = left
;
237 * The hw counter starts counting from this counter offset,
238 * mark it to be able to extra future deltas:
240 atomic64_set(&hwc
->prev_count
, (u64
)-left
);
242 err
= checking_wrmsrl(hwc
->counter_base
+ idx
,
243 (u64
)(-left
) & counter_value_mask
);
247 __pmc_fixed_enable(struct perf_counter
*counter
,
248 struct hw_perf_counter
*hwc
, unsigned int __idx
)
250 int idx
= __idx
- X86_PMC_IDX_FIXED
;
251 u64 ctrl_val
, bits
, mask
;
255 * Enable IRQ generation (0x8),
256 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
260 if (hwc
->config
& ARCH_PERFMON_EVENTSEL_USR
)
262 if (hwc
->config
& ARCH_PERFMON_EVENTSEL_OS
)
265 mask
= 0xfULL
<< (idx
* 4);
267 rdmsrl(hwc
->config_base
, ctrl_val
);
270 err
= checking_wrmsrl(hwc
->config_base
, ctrl_val
);
274 __pmc_generic_enable(struct perf_counter
*counter
,
275 struct hw_perf_counter
*hwc
, int idx
)
277 if (unlikely(hwc
->config_base
== MSR_ARCH_PERFMON_FIXED_CTR_CTRL
))
278 __pmc_fixed_enable(counter
, hwc
, idx
);
280 wrmsr(hwc
->config_base
+ idx
,
281 hwc
->config
| ARCH_PERFMON_EVENTSEL0_ENABLE
, 0);
285 fixed_mode_idx(struct perf_counter
*counter
, struct hw_perf_counter
*hwc
)
289 if (unlikely(hwc
->nmi
))
292 event
= hwc
->config
& ARCH_PERFMON_EVENT_MASK
;
294 if (unlikely(event
== intel_perfmon_event_map
[PERF_COUNT_INSTRUCTIONS
]))
295 return X86_PMC_IDX_FIXED_INSTRUCTIONS
;
296 if (unlikely(event
== intel_perfmon_event_map
[PERF_COUNT_CPU_CYCLES
]))
297 return X86_PMC_IDX_FIXED_CPU_CYCLES
;
298 if (unlikely(event
== intel_perfmon_event_map
[PERF_COUNT_BUS_CYCLES
]))
299 return X86_PMC_IDX_FIXED_BUS_CYCLES
;
305 * Find a PMC slot for the freshly enabled / scheduled in counter:
307 static int pmc_generic_enable(struct perf_counter
*counter
)
309 struct cpu_hw_counters
*cpuc
= &__get_cpu_var(cpu_hw_counters
);
310 struct hw_perf_counter
*hwc
= &counter
->hw
;
313 idx
= fixed_mode_idx(counter
, hwc
);
316 * Try to get the fixed counter, if that is already taken
317 * then try to get a generic counter:
319 if (test_and_set_bit(idx
, cpuc
->used
))
322 hwc
->config_base
= MSR_ARCH_PERFMON_FIXED_CTR_CTRL
;
324 * We set it so that counter_base + idx in wrmsr/rdmsr maps to
325 * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
328 MSR_ARCH_PERFMON_FIXED_CTR0
- X86_PMC_IDX_FIXED
;
332 /* Try to get the previous generic counter again */
333 if (test_and_set_bit(idx
, cpuc
->used
)) {
335 idx
= find_first_zero_bit(cpuc
->used
, nr_counters_generic
);
336 if (idx
== nr_counters_generic
)
339 set_bit(idx
, cpuc
->used
);
342 hwc
->config_base
= MSR_ARCH_PERFMON_EVENTSEL0
;
343 hwc
->counter_base
= MSR_ARCH_PERFMON_PERFCTR0
;
346 perf_counters_lapic_init(hwc
->nmi
);
348 __pmc_generic_disable(counter
, hwc
, idx
);
350 cpuc
->counters
[idx
] = counter
;
352 * Make it visible before enabling the hw:
356 __hw_perf_counter_set_period(counter
, hwc
, idx
);
357 __pmc_generic_enable(counter
, hwc
, idx
);
362 void perf_counter_print_debug(void)
364 u64 ctrl
, status
, overflow
, pmc_ctrl
, pmc_count
, prev_left
, fixed
;
365 struct cpu_hw_counters
*cpuc
;
368 if (!nr_counters_generic
)
373 cpu
= smp_processor_id();
374 cpuc
= &per_cpu(cpu_hw_counters
, cpu
);
376 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, ctrl
);
377 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS
, status
);
378 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL
, overflow
);
379 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL
, fixed
);
381 printk(KERN_INFO
"\n");
382 printk(KERN_INFO
"CPU#%d: ctrl: %016llx\n", cpu
, ctrl
);
383 printk(KERN_INFO
"CPU#%d: status: %016llx\n", cpu
, status
);
384 printk(KERN_INFO
"CPU#%d: overflow: %016llx\n", cpu
, overflow
);
385 printk(KERN_INFO
"CPU#%d: fixed: %016llx\n", cpu
, fixed
);
386 printk(KERN_INFO
"CPU#%d: used: %016llx\n", cpu
, *(u64
*)cpuc
->used
);
388 for (idx
= 0; idx
< nr_counters_generic
; idx
++) {
389 rdmsrl(MSR_ARCH_PERFMON_EVENTSEL0
+ idx
, pmc_ctrl
);
390 rdmsrl(MSR_ARCH_PERFMON_PERFCTR0
+ idx
, pmc_count
);
392 prev_left
= per_cpu(prev_left
[idx
], cpu
);
394 printk(KERN_INFO
"CPU#%d: gen-PMC%d ctrl: %016llx\n",
396 printk(KERN_INFO
"CPU#%d: gen-PMC%d count: %016llx\n",
397 cpu
, idx
, pmc_count
);
398 printk(KERN_INFO
"CPU#%d: gen-PMC%d left: %016llx\n",
399 cpu
, idx
, prev_left
);
401 for (idx
= 0; idx
< nr_counters_fixed
; idx
++) {
402 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0
+ idx
, pmc_count
);
404 printk(KERN_INFO
"CPU#%d: fixed-PMC%d count: %016llx\n",
405 cpu
, idx
, pmc_count
);
410 static void pmc_generic_disable(struct perf_counter
*counter
)
412 struct cpu_hw_counters
*cpuc
= &__get_cpu_var(cpu_hw_counters
);
413 struct hw_perf_counter
*hwc
= &counter
->hw
;
414 unsigned int idx
= hwc
->idx
;
416 __pmc_generic_disable(counter
, hwc
, idx
);
418 clear_bit(idx
, cpuc
->used
);
419 cpuc
->counters
[idx
] = NULL
;
421 * Make sure the cleared pointer becomes visible before we
422 * (potentially) free the counter:
427 * Drain the remaining delta count out of a counter
428 * that we are disabling:
430 x86_perf_counter_update(counter
, hwc
, idx
);
433 static void perf_store_irq_data(struct perf_counter
*counter
, u64 data
)
435 struct perf_data
*irqdata
= counter
->irqdata
;
437 if (irqdata
->len
> PERF_DATA_BUFLEN
- sizeof(u64
)) {
440 u64
*p
= (u64
*) &irqdata
->data
[irqdata
->len
];
443 irqdata
->len
+= sizeof(u64
);
448 * Save and restart an expired counter. Called by NMI contexts,
449 * so it has to be careful about preempting normal counter ops:
451 static void perf_save_and_restart(struct perf_counter
*counter
)
453 struct hw_perf_counter
*hwc
= &counter
->hw
;
456 x86_perf_counter_update(counter
, hwc
, idx
);
457 __hw_perf_counter_set_period(counter
, hwc
, idx
);
459 if (counter
->state
== PERF_COUNTER_STATE_ACTIVE
)
460 __pmc_generic_enable(counter
, hwc
, idx
);
464 perf_handle_group(struct perf_counter
*sibling
, u64
*status
, u64
*overflown
)
466 struct perf_counter
*counter
, *group_leader
= sibling
->group_leader
;
469 * Store sibling timestamps (if any):
471 list_for_each_entry(counter
, &group_leader
->sibling_list
, list_entry
) {
473 x86_perf_counter_update(counter
, &counter
->hw
, counter
->hw
.idx
);
474 perf_store_irq_data(sibling
, counter
->hw_event
.type
);
475 perf_store_irq_data(sibling
, atomic64_read(&counter
->count
));
480 * Maximum interrupt frequency of 100KHz per CPU
482 #define PERFMON_MAX_INTERRUPTS 100000/HZ
485 * This handler is triggered by the local APIC, so the APIC IRQ handling
488 static void __smp_perf_counter_interrupt(struct pt_regs
*regs
, int nmi
)
490 int bit
, cpu
= smp_processor_id();
492 struct cpu_hw_counters
*cpuc
= &per_cpu(cpu_hw_counters
, cpu
);
494 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, cpuc
->global_enable
);
496 /* Disable counters globally */
497 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, 0);
500 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS
, status
);
505 inc_irq_stat(apic_perf_irqs
);
507 for_each_bit(bit
, (unsigned long *)&status
, X86_PMC_IDX_MAX
) {
508 struct perf_counter
*counter
= cpuc
->counters
[bit
];
510 clear_bit(bit
, (unsigned long *) &status
);
514 perf_save_and_restart(counter
);
516 switch (counter
->hw_event
.record_type
) {
517 case PERF_RECORD_SIMPLE
:
519 case PERF_RECORD_IRQ
:
520 perf_store_irq_data(counter
, instruction_pointer(regs
));
522 case PERF_RECORD_GROUP
:
523 perf_handle_group(counter
, &status
, &ack
);
527 * From NMI context we cannot call into the scheduler to
528 * do a task wakeup - but we mark these generic as
529 * wakeup_pending and initate a wakeup callback:
532 counter
->wakeup_pending
= 1;
533 set_tsk_thread_flag(current
, TIF_PERF_COUNTERS
);
535 wake_up(&counter
->waitq
);
539 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL
, ack
);
542 * Repeat if there is more work to be done:
544 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS
, status
);
549 * Restore - do not reenable when global enable is off or throttled:
551 if (++cpuc
->interrupts
< PERFMON_MAX_INTERRUPTS
)
552 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, cpuc
->global_enable
);
555 void perf_counter_unthrottle(void)
557 struct cpu_hw_counters
*cpuc
;
560 if (!cpu_has(&boot_cpu_data
, X86_FEATURE_ARCH_PERFMON
))
563 if (unlikely(!perf_counters_initialized
))
566 cpuc
= &per_cpu(cpu_hw_counters
, smp_processor_id());
567 if (cpuc
->interrupts
>= PERFMON_MAX_INTERRUPTS
) {
568 if (printk_ratelimit())
569 printk(KERN_WARNING
"PERFMON: max interrupts exceeded!\n");
570 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, cpuc
->global_enable
);
572 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, global_enable
);
573 if (unlikely(cpuc
->global_enable
&& !global_enable
))
574 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, cpuc
->global_enable
);
575 cpuc
->interrupts
= 0;
578 void smp_perf_counter_interrupt(struct pt_regs
*regs
)
581 apic_write(APIC_LVTPC
, LOCAL_PERF_VECTOR
);
582 __smp_perf_counter_interrupt(regs
, 0);
588 * This handler is triggered by NMI contexts:
590 void perf_counter_notify(struct pt_regs
*regs
)
592 struct cpu_hw_counters
*cpuc
;
596 local_irq_save(flags
);
597 cpu
= smp_processor_id();
598 cpuc
= &per_cpu(cpu_hw_counters
, cpu
);
600 for_each_bit(bit
, cpuc
->used
, X86_PMC_IDX_MAX
) {
601 struct perf_counter
*counter
= cpuc
->counters
[bit
];
606 if (counter
->wakeup_pending
) {
607 counter
->wakeup_pending
= 0;
608 wake_up(&counter
->waitq
);
612 local_irq_restore(flags
);
615 void perf_counters_lapic_init(int nmi
)
619 if (!perf_counters_initialized
)
622 * Enable the performance counter vector in the APIC LVT:
624 apic_val
= apic_read(APIC_LVTERR
);
626 apic_write(APIC_LVTERR
, apic_val
| APIC_LVT_MASKED
);
628 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
630 apic_write(APIC_LVTPC
, LOCAL_PERF_VECTOR
);
631 apic_write(APIC_LVTERR
, apic_val
);
635 perf_counter_nmi_handler(struct notifier_block
*self
,
636 unsigned long cmd
, void *__args
)
638 struct die_args
*args
= __args
;
639 struct pt_regs
*regs
;
641 if (likely(cmd
!= DIE_NMI_IPI
))
646 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
647 __smp_perf_counter_interrupt(regs
, 1);
652 static __read_mostly
struct notifier_block perf_counter_nmi_notifier
= {
653 .notifier_call
= perf_counter_nmi_handler
,
658 void __init
init_hw_perf_counters(void)
660 union cpuid10_eax eax
;
663 union cpuid10_edx edx
;
665 if (!cpu_has(&boot_cpu_data
, X86_FEATURE_ARCH_PERFMON
))
669 * Check whether the Architectural PerfMon supports
670 * Branch Misses Retired Event or not.
672 cpuid(10, &eax
.full
, &ebx
, &unused
, &edx
.full
);
673 if (eax
.split
.mask_length
<= ARCH_PERFMON_BRANCH_MISSES_RETIRED
)
676 printk(KERN_INFO
"Intel Performance Monitoring support detected.\n");
678 printk(KERN_INFO
"... version: %d\n", eax
.split
.version_id
);
679 printk(KERN_INFO
"... num counters: %d\n", eax
.split
.num_counters
);
680 nr_counters_generic
= eax
.split
.num_counters
;
681 if (nr_counters_generic
> X86_PMC_MAX_GENERIC
) {
682 nr_counters_generic
= X86_PMC_MAX_GENERIC
;
683 WARN(1, KERN_ERR
"hw perf counters %d > max(%d), clipping!",
684 nr_counters_generic
, X86_PMC_MAX_GENERIC
);
686 perf_counter_mask
= (1 << nr_counters_generic
) - 1;
687 perf_max_counters
= nr_counters_generic
;
689 printk(KERN_INFO
"... bit width: %d\n", eax
.split
.bit_width
);
690 counter_value_mask
= (1ULL << eax
.split
.bit_width
) - 1;
691 printk(KERN_INFO
"... value mask: %016Lx\n", counter_value_mask
);
693 printk(KERN_INFO
"... mask length: %d\n", eax
.split
.mask_length
);
695 nr_counters_fixed
= edx
.split
.num_counters_fixed
;
696 if (nr_counters_fixed
> X86_PMC_MAX_FIXED
) {
697 nr_counters_fixed
= X86_PMC_MAX_FIXED
;
698 WARN(1, KERN_ERR
"hw perf counters fixed %d > max(%d), clipping!",
699 nr_counters_fixed
, X86_PMC_MAX_FIXED
);
701 printk(KERN_INFO
"... fixed counters: %d\n", nr_counters_fixed
);
703 perf_counter_mask
|= ((1LL << nr_counters_fixed
)-1) << X86_PMC_IDX_FIXED
;
705 printk(KERN_INFO
"... counter mask: %016Lx\n", perf_counter_mask
);
706 perf_counters_initialized
= true;
708 perf_counters_lapic_init(0);
709 register_die_notifier(&perf_counter_nmi_notifier
);
712 static void pmc_generic_read(struct perf_counter
*counter
)
714 x86_perf_counter_update(counter
, &counter
->hw
, counter
->hw
.idx
);
717 static const struct hw_perf_counter_ops x86_perf_counter_ops
= {
718 .enable
= pmc_generic_enable
,
719 .disable
= pmc_generic_disable
,
720 .read
= pmc_generic_read
,
723 const struct hw_perf_counter_ops
*
724 hw_perf_counter_init(struct perf_counter
*counter
)
728 err
= __hw_perf_counter_init(counter
);
732 return &x86_perf_counter_ops
;