perf_counter/x86: Always use NMI for performance-monitoring interrupt
[linux-2.6/btrfs-unstable.git] / arch / x86 / kernel / cpu / perf_counter.c
blob316b0c995f3878b4a8efc135472a89baad3948b0
1 /*
2 * Performance counter x86 architecture code
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2009 Jaswinder Singh Rajput
7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
10 * For licencing details see kernel-base/COPYING
13 #include <linux/perf_counter.h>
14 #include <linux/capability.h>
15 #include <linux/notifier.h>
16 #include <linux/hardirq.h>
17 #include <linux/kprobes.h>
18 #include <linux/module.h>
19 #include <linux/kdebug.h>
20 #include <linux/sched.h>
21 #include <linux/uaccess.h>
23 #include <asm/apic.h>
24 #include <asm/stacktrace.h>
25 #include <asm/nmi.h>
27 static u64 perf_counter_mask __read_mostly;
29 struct cpu_hw_counters {
30 struct perf_counter *counters[X86_PMC_IDX_MAX];
31 unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
32 unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
33 unsigned long interrupts;
34 int enabled;
38 * struct x86_pmu - generic x86 pmu
40 struct x86_pmu {
41 const char *name;
42 int version;
43 int (*handle_irq)(struct pt_regs *, int);
44 void (*disable_all)(void);
45 void (*enable_all)(void);
46 void (*enable)(struct hw_perf_counter *, int);
47 void (*disable)(struct hw_perf_counter *, int);
48 unsigned eventsel;
49 unsigned perfctr;
50 u64 (*event_map)(int);
51 u64 (*raw_event)(u64);
52 int max_events;
53 int num_counters;
54 int num_counters_fixed;
55 int counter_bits;
56 u64 counter_mask;
57 u64 max_period;
58 u64 intel_ctrl;
61 static struct x86_pmu x86_pmu __read_mostly;
63 static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = {
64 .enabled = 1,
68 * Intel PerfMon v3. Used on Core2 and later.
70 static const u64 intel_perfmon_event_map[] =
72 [PERF_COUNT_CPU_CYCLES] = 0x003c,
73 [PERF_COUNT_INSTRUCTIONS] = 0x00c0,
74 [PERF_COUNT_CACHE_REFERENCES] = 0x4f2e,
75 [PERF_COUNT_CACHE_MISSES] = 0x412e,
76 [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x00c4,
77 [PERF_COUNT_BRANCH_MISSES] = 0x00c5,
78 [PERF_COUNT_BUS_CYCLES] = 0x013c,
81 static u64 intel_pmu_event_map(int event)
83 return intel_perfmon_event_map[event];
86 static u64 intel_pmu_raw_event(u64 event)
88 #define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL
89 #define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL
90 #define CORE_EVNTSEL_EDGE_MASK 0x00040000ULL
91 #define CORE_EVNTSEL_INV_MASK 0x00800000ULL
92 #define CORE_EVNTSEL_COUNTER_MASK 0xFF000000ULL
94 #define CORE_EVNTSEL_MASK \
95 (CORE_EVNTSEL_EVENT_MASK | \
96 CORE_EVNTSEL_UNIT_MASK | \
97 CORE_EVNTSEL_EDGE_MASK | \
98 CORE_EVNTSEL_INV_MASK | \
99 CORE_EVNTSEL_COUNTER_MASK)
101 return event & CORE_EVNTSEL_MASK;
105 * AMD Performance Monitor K7 and later.
107 static const u64 amd_perfmon_event_map[] =
109 [PERF_COUNT_CPU_CYCLES] = 0x0076,
110 [PERF_COUNT_INSTRUCTIONS] = 0x00c0,
111 [PERF_COUNT_CACHE_REFERENCES] = 0x0080,
112 [PERF_COUNT_CACHE_MISSES] = 0x0081,
113 [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x00c4,
114 [PERF_COUNT_BRANCH_MISSES] = 0x00c5,
117 static u64 amd_pmu_event_map(int event)
119 return amd_perfmon_event_map[event];
122 static u64 amd_pmu_raw_event(u64 event)
124 #define K7_EVNTSEL_EVENT_MASK 0x7000000FFULL
125 #define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL
126 #define K7_EVNTSEL_EDGE_MASK 0x000040000ULL
127 #define K7_EVNTSEL_INV_MASK 0x000800000ULL
128 #define K7_EVNTSEL_COUNTER_MASK 0x0FF000000ULL
130 #define K7_EVNTSEL_MASK \
131 (K7_EVNTSEL_EVENT_MASK | \
132 K7_EVNTSEL_UNIT_MASK | \
133 K7_EVNTSEL_EDGE_MASK | \
134 K7_EVNTSEL_INV_MASK | \
135 K7_EVNTSEL_COUNTER_MASK)
137 return event & K7_EVNTSEL_MASK;
141 * Propagate counter elapsed time into the generic counter.
142 * Can only be executed on the CPU where the counter is active.
143 * Returns the delta events processed.
145 static u64
146 x86_perf_counter_update(struct perf_counter *counter,
147 struct hw_perf_counter *hwc, int idx)
149 int shift = 64 - x86_pmu.counter_bits;
150 u64 prev_raw_count, new_raw_count;
151 s64 delta;
154 * Careful: an NMI might modify the previous counter value.
156 * Our tactic to handle this is to first atomically read and
157 * exchange a new raw count - then add that new-prev delta
158 * count to the generic counter atomically:
160 again:
161 prev_raw_count = atomic64_read(&hwc->prev_count);
162 rdmsrl(hwc->counter_base + idx, new_raw_count);
164 if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
165 new_raw_count) != prev_raw_count)
166 goto again;
169 * Now we have the new raw value and have updated the prev
170 * timestamp already. We can now calculate the elapsed delta
171 * (counter-)time and add that to the generic counter.
173 * Careful, not all hw sign-extends above the physical width
174 * of the count.
176 delta = (new_raw_count << shift) - (prev_raw_count << shift);
177 delta >>= shift;
179 atomic64_add(delta, &counter->count);
180 atomic64_sub(delta, &hwc->period_left);
182 return new_raw_count;
185 static atomic_t active_counters;
186 static DEFINE_MUTEX(pmc_reserve_mutex);
188 static bool reserve_pmc_hardware(void)
190 int i;
192 if (nmi_watchdog == NMI_LOCAL_APIC)
193 disable_lapic_nmi_watchdog();
195 for (i = 0; i < x86_pmu.num_counters; i++) {
196 if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
197 goto perfctr_fail;
200 for (i = 0; i < x86_pmu.num_counters; i++) {
201 if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
202 goto eventsel_fail;
205 return true;
207 eventsel_fail:
208 for (i--; i >= 0; i--)
209 release_evntsel_nmi(x86_pmu.eventsel + i);
211 i = x86_pmu.num_counters;
213 perfctr_fail:
214 for (i--; i >= 0; i--)
215 release_perfctr_nmi(x86_pmu.perfctr + i);
217 if (nmi_watchdog == NMI_LOCAL_APIC)
218 enable_lapic_nmi_watchdog();
220 return false;
223 static void release_pmc_hardware(void)
225 int i;
227 for (i = 0; i < x86_pmu.num_counters; i++) {
228 release_perfctr_nmi(x86_pmu.perfctr + i);
229 release_evntsel_nmi(x86_pmu.eventsel + i);
232 if (nmi_watchdog == NMI_LOCAL_APIC)
233 enable_lapic_nmi_watchdog();
236 static void hw_perf_counter_destroy(struct perf_counter *counter)
238 if (atomic_dec_and_mutex_lock(&active_counters, &pmc_reserve_mutex)) {
239 release_pmc_hardware();
240 mutex_unlock(&pmc_reserve_mutex);
244 static inline int x86_pmu_initialized(void)
246 return x86_pmu.handle_irq != NULL;
250 * Setup the hardware configuration for a given hw_event_type
252 static int __hw_perf_counter_init(struct perf_counter *counter)
254 struct perf_counter_hw_event *hw_event = &counter->hw_event;
255 struct hw_perf_counter *hwc = &counter->hw;
256 int err;
258 if (!x86_pmu_initialized())
259 return -ENODEV;
261 err = 0;
262 if (!atomic_inc_not_zero(&active_counters)) {
263 mutex_lock(&pmc_reserve_mutex);
264 if (atomic_read(&active_counters) == 0 && !reserve_pmc_hardware())
265 err = -EBUSY;
266 else
267 atomic_inc(&active_counters);
268 mutex_unlock(&pmc_reserve_mutex);
270 if (err)
271 return err;
274 * Generate PMC IRQs:
275 * (keep 'enabled' bit clear for now)
277 hwc->config = ARCH_PERFMON_EVENTSEL_INT;
280 * Count user and OS events unless requested not to.
282 if (!hw_event->exclude_user)
283 hwc->config |= ARCH_PERFMON_EVENTSEL_USR;
284 if (!hw_event->exclude_kernel)
285 hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
288 * Use NMI events all the time:
290 hwc->nmi = 1;
291 hw_event->nmi = 1;
293 if (!hwc->irq_period)
294 hwc->irq_period = x86_pmu.max_period;
296 atomic64_set(&hwc->period_left,
297 min(x86_pmu.max_period, hwc->irq_period));
300 * Raw event type provide the config in the event structure
302 if (perf_event_raw(hw_event)) {
303 hwc->config |= x86_pmu.raw_event(perf_event_config(hw_event));
304 } else {
305 if (perf_event_id(hw_event) >= x86_pmu.max_events)
306 return -EINVAL;
308 * The generic map:
310 hwc->config |= x86_pmu.event_map(perf_event_id(hw_event));
313 counter->destroy = hw_perf_counter_destroy;
315 return 0;
318 static void intel_pmu_disable_all(void)
320 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
323 static void amd_pmu_disable_all(void)
325 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
326 int idx;
328 if (!cpuc->enabled)
329 return;
331 cpuc->enabled = 0;
333 * ensure we write the disable before we start disabling the
334 * counters proper, so that amd_pmu_enable_counter() does the
335 * right thing.
337 barrier();
339 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
340 u64 val;
342 if (!test_bit(idx, cpuc->active_mask))
343 continue;
344 rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
345 if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE))
346 continue;
347 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
348 wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
352 void hw_perf_disable(void)
354 if (!x86_pmu_initialized())
355 return;
356 return x86_pmu.disable_all();
359 static void intel_pmu_enable_all(void)
361 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
364 static void amd_pmu_enable_all(void)
366 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
367 int idx;
369 if (cpuc->enabled)
370 return;
372 cpuc->enabled = 1;
373 barrier();
375 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
376 u64 val;
378 if (!test_bit(idx, cpuc->active_mask))
379 continue;
380 rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
381 if (val & ARCH_PERFMON_EVENTSEL0_ENABLE)
382 continue;
383 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
384 wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
388 void hw_perf_enable(void)
390 if (!x86_pmu_initialized())
391 return;
392 x86_pmu.enable_all();
395 static inline u64 intel_pmu_get_status(void)
397 u64 status;
399 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
401 return status;
404 static inline void intel_pmu_ack_status(u64 ack)
406 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
409 static inline void x86_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
411 int err;
412 err = checking_wrmsrl(hwc->config_base + idx,
413 hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE);
416 static inline void x86_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
418 int err;
419 err = checking_wrmsrl(hwc->config_base + idx,
420 hwc->config);
423 static inline void
424 intel_pmu_disable_fixed(struct hw_perf_counter *hwc, int __idx)
426 int idx = __idx - X86_PMC_IDX_FIXED;
427 u64 ctrl_val, mask;
428 int err;
430 mask = 0xfULL << (idx * 4);
432 rdmsrl(hwc->config_base, ctrl_val);
433 ctrl_val &= ~mask;
434 err = checking_wrmsrl(hwc->config_base, ctrl_val);
437 static inline void
438 intel_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
440 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
441 intel_pmu_disable_fixed(hwc, idx);
442 return;
445 x86_pmu_disable_counter(hwc, idx);
448 static inline void
449 amd_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
451 x86_pmu_disable_counter(hwc, idx);
454 static DEFINE_PER_CPU(u64, prev_left[X86_PMC_IDX_MAX]);
457 * Set the next IRQ period, based on the hwc->period_left value.
458 * To be called with the counter disabled in hw:
460 static void
461 x86_perf_counter_set_period(struct perf_counter *counter,
462 struct hw_perf_counter *hwc, int idx)
464 s64 left = atomic64_read(&hwc->period_left);
465 s64 period = min(x86_pmu.max_period, hwc->irq_period);
466 int err;
469 * If we are way outside a reasoable range then just skip forward:
471 if (unlikely(left <= -period)) {
472 left = period;
473 atomic64_set(&hwc->period_left, left);
476 if (unlikely(left <= 0)) {
477 left += period;
478 atomic64_set(&hwc->period_left, left);
481 * Quirk: certain CPUs dont like it if just 1 event is left:
483 if (unlikely(left < 2))
484 left = 2;
486 per_cpu(prev_left[idx], smp_processor_id()) = left;
489 * The hw counter starts counting from this counter offset,
490 * mark it to be able to extra future deltas:
492 atomic64_set(&hwc->prev_count, (u64)-left);
494 err = checking_wrmsrl(hwc->counter_base + idx,
495 (u64)(-left) & x86_pmu.counter_mask);
498 static inline void
499 intel_pmu_enable_fixed(struct hw_perf_counter *hwc, int __idx)
501 int idx = __idx - X86_PMC_IDX_FIXED;
502 u64 ctrl_val, bits, mask;
503 int err;
506 * Enable IRQ generation (0x8),
507 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
508 * if requested:
510 bits = 0x8ULL;
511 if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
512 bits |= 0x2;
513 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
514 bits |= 0x1;
515 bits <<= (idx * 4);
516 mask = 0xfULL << (idx * 4);
518 rdmsrl(hwc->config_base, ctrl_val);
519 ctrl_val &= ~mask;
520 ctrl_val |= bits;
521 err = checking_wrmsrl(hwc->config_base, ctrl_val);
524 static void intel_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
526 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
527 intel_pmu_enable_fixed(hwc, idx);
528 return;
531 x86_pmu_enable_counter(hwc, idx);
534 static void amd_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
536 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
538 if (cpuc->enabled)
539 x86_pmu_enable_counter(hwc, idx);
540 else
541 x86_pmu_disable_counter(hwc, idx);
544 static int
545 fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc)
547 unsigned int event;
549 if (!x86_pmu.num_counters_fixed)
550 return -1;
552 event = hwc->config & ARCH_PERFMON_EVENT_MASK;
554 if (unlikely(event == x86_pmu.event_map(PERF_COUNT_INSTRUCTIONS)))
555 return X86_PMC_IDX_FIXED_INSTRUCTIONS;
556 if (unlikely(event == x86_pmu.event_map(PERF_COUNT_CPU_CYCLES)))
557 return X86_PMC_IDX_FIXED_CPU_CYCLES;
558 if (unlikely(event == x86_pmu.event_map(PERF_COUNT_BUS_CYCLES)))
559 return X86_PMC_IDX_FIXED_BUS_CYCLES;
561 return -1;
565 * Find a PMC slot for the freshly enabled / scheduled in counter:
567 static int x86_pmu_enable(struct perf_counter *counter)
569 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
570 struct hw_perf_counter *hwc = &counter->hw;
571 int idx;
573 idx = fixed_mode_idx(counter, hwc);
574 if (idx >= 0) {
576 * Try to get the fixed counter, if that is already taken
577 * then try to get a generic counter:
579 if (test_and_set_bit(idx, cpuc->used_mask))
580 goto try_generic;
582 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
584 * We set it so that counter_base + idx in wrmsr/rdmsr maps to
585 * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
587 hwc->counter_base =
588 MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
589 hwc->idx = idx;
590 } else {
591 idx = hwc->idx;
592 /* Try to get the previous generic counter again */
593 if (test_and_set_bit(idx, cpuc->used_mask)) {
594 try_generic:
595 idx = find_first_zero_bit(cpuc->used_mask,
596 x86_pmu.num_counters);
597 if (idx == x86_pmu.num_counters)
598 return -EAGAIN;
600 set_bit(idx, cpuc->used_mask);
601 hwc->idx = idx;
603 hwc->config_base = x86_pmu.eventsel;
604 hwc->counter_base = x86_pmu.perfctr;
607 perf_counters_lapic_init();
609 x86_pmu.disable(hwc, idx);
611 cpuc->counters[idx] = counter;
612 set_bit(idx, cpuc->active_mask);
614 x86_perf_counter_set_period(counter, hwc, idx);
615 x86_pmu.enable(hwc, idx);
617 return 0;
620 static void x86_pmu_unthrottle(struct perf_counter *counter)
622 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
623 struct hw_perf_counter *hwc = &counter->hw;
625 if (WARN_ON_ONCE(hwc->idx >= X86_PMC_IDX_MAX ||
626 cpuc->counters[hwc->idx] != counter))
627 return;
629 x86_pmu.enable(hwc, hwc->idx);
632 void perf_counter_print_debug(void)
634 u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
635 struct cpu_hw_counters *cpuc;
636 unsigned long flags;
637 int cpu, idx;
639 if (!x86_pmu.num_counters)
640 return;
642 local_irq_save(flags);
644 cpu = smp_processor_id();
645 cpuc = &per_cpu(cpu_hw_counters, cpu);
647 if (x86_pmu.version >= 2) {
648 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
649 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
650 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
651 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
653 pr_info("\n");
654 pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl);
655 pr_info("CPU#%d: status: %016llx\n", cpu, status);
656 pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
657 pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
659 pr_info("CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used_mask);
661 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
662 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
663 rdmsrl(x86_pmu.perfctr + idx, pmc_count);
665 prev_left = per_cpu(prev_left[idx], cpu);
667 pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
668 cpu, idx, pmc_ctrl);
669 pr_info("CPU#%d: gen-PMC%d count: %016llx\n",
670 cpu, idx, pmc_count);
671 pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
672 cpu, idx, prev_left);
674 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
675 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
677 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
678 cpu, idx, pmc_count);
680 local_irq_restore(flags);
683 static void x86_pmu_disable(struct perf_counter *counter)
685 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
686 struct hw_perf_counter *hwc = &counter->hw;
687 int idx = hwc->idx;
690 * Must be done before we disable, otherwise the nmi handler
691 * could reenable again:
693 clear_bit(idx, cpuc->active_mask);
694 x86_pmu.disable(hwc, idx);
697 * Make sure the cleared pointer becomes visible before we
698 * (potentially) free the counter:
700 barrier();
703 * Drain the remaining delta count out of a counter
704 * that we are disabling:
706 x86_perf_counter_update(counter, hwc, idx);
707 cpuc->counters[idx] = NULL;
708 clear_bit(idx, cpuc->used_mask);
712 * Save and restart an expired counter. Called by NMI contexts,
713 * so it has to be careful about preempting normal counter ops:
715 static void intel_pmu_save_and_restart(struct perf_counter *counter)
717 struct hw_perf_counter *hwc = &counter->hw;
718 int idx = hwc->idx;
720 x86_perf_counter_update(counter, hwc, idx);
721 x86_perf_counter_set_period(counter, hwc, idx);
723 if (counter->state == PERF_COUNTER_STATE_ACTIVE)
724 intel_pmu_enable_counter(hwc, idx);
727 static void intel_pmu_reset(void)
729 unsigned long flags;
730 int idx;
732 if (!x86_pmu.num_counters)
733 return;
735 local_irq_save(flags);
737 printk("clearing PMU state on CPU#%d\n", smp_processor_id());
739 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
740 checking_wrmsrl(x86_pmu.eventsel + idx, 0ull);
741 checking_wrmsrl(x86_pmu.perfctr + idx, 0ull);
743 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
744 checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
747 local_irq_restore(flags);
752 * This handler is triggered by the local APIC, so the APIC IRQ handling
753 * rules apply:
755 static int intel_pmu_handle_irq(struct pt_regs *regs, int nmi)
757 struct cpu_hw_counters *cpuc;
758 struct cpu_hw_counters;
759 int bit, cpu, loops;
760 u64 ack, status;
762 cpu = smp_processor_id();
763 cpuc = &per_cpu(cpu_hw_counters, cpu);
765 perf_disable();
766 status = intel_pmu_get_status();
767 if (!status) {
768 perf_enable();
769 return 0;
772 loops = 0;
773 again:
774 if (++loops > 100) {
775 WARN_ONCE(1, "perfcounters: irq loop stuck!\n");
776 perf_counter_print_debug();
777 intel_pmu_reset();
778 perf_enable();
779 return 1;
782 inc_irq_stat(apic_perf_irqs);
783 ack = status;
784 for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
785 struct perf_counter *counter = cpuc->counters[bit];
787 clear_bit(bit, (unsigned long *) &status);
788 if (!test_bit(bit, cpuc->active_mask))
789 continue;
791 intel_pmu_save_and_restart(counter);
792 if (perf_counter_overflow(counter, nmi, regs, 0))
793 intel_pmu_disable_counter(&counter->hw, bit);
796 intel_pmu_ack_status(ack);
799 * Repeat if there is more work to be done:
801 status = intel_pmu_get_status();
802 if (status)
803 goto again;
805 perf_enable();
807 return 1;
810 static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi)
812 int cpu, idx, handled = 0;
813 struct cpu_hw_counters *cpuc;
814 struct perf_counter *counter;
815 struct hw_perf_counter *hwc;
816 u64 val;
818 cpu = smp_processor_id();
819 cpuc = &per_cpu(cpu_hw_counters, cpu);
821 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
822 if (!test_bit(idx, cpuc->active_mask))
823 continue;
825 counter = cpuc->counters[idx];
826 hwc = &counter->hw;
828 val = x86_perf_counter_update(counter, hwc, idx);
829 if (val & (1ULL << (x86_pmu.counter_bits - 1)))
830 continue;
832 /* counter overflow */
833 x86_perf_counter_set_period(counter, hwc, idx);
834 handled = 1;
835 inc_irq_stat(apic_perf_irqs);
836 if (perf_counter_overflow(counter, nmi, regs, 0))
837 amd_pmu_disable_counter(hwc, idx);
840 return handled;
843 void smp_perf_counter_interrupt(struct pt_regs *regs)
845 irq_enter();
846 apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR);
847 ack_APIC_irq();
848 x86_pmu.handle_irq(regs, 0);
849 irq_exit();
852 void smp_perf_pending_interrupt(struct pt_regs *regs)
854 irq_enter();
855 ack_APIC_irq();
856 inc_irq_stat(apic_pending_irqs);
857 perf_counter_do_pending();
858 irq_exit();
861 void set_perf_counter_pending(void)
863 apic->send_IPI_self(LOCAL_PENDING_VECTOR);
866 void perf_counters_lapic_init(void)
868 if (!x86_pmu_initialized())
869 return;
872 * Always use NMI for PMU
874 apic_write(APIC_LVTPC, APIC_DM_NMI);
877 static int __kprobes
878 perf_counter_nmi_handler(struct notifier_block *self,
879 unsigned long cmd, void *__args)
881 struct die_args *args = __args;
882 struct pt_regs *regs;
884 if (!atomic_read(&active_counters))
885 return NOTIFY_DONE;
887 switch (cmd) {
888 case DIE_NMI:
889 case DIE_NMI_IPI:
890 break;
892 default:
893 return NOTIFY_DONE;
896 regs = args->regs;
898 apic_write(APIC_LVTPC, APIC_DM_NMI);
900 * Can't rely on the handled return value to say it was our NMI, two
901 * counters could trigger 'simultaneously' raising two back-to-back NMIs.
903 * If the first NMI handles both, the latter will be empty and daze
904 * the CPU.
906 x86_pmu.handle_irq(regs, 1);
908 return NOTIFY_STOP;
911 static __read_mostly struct notifier_block perf_counter_nmi_notifier = {
912 .notifier_call = perf_counter_nmi_handler,
913 .next = NULL,
914 .priority = 1
917 static struct x86_pmu intel_pmu = {
918 .name = "Intel",
919 .handle_irq = intel_pmu_handle_irq,
920 .disable_all = intel_pmu_disable_all,
921 .enable_all = intel_pmu_enable_all,
922 .enable = intel_pmu_enable_counter,
923 .disable = intel_pmu_disable_counter,
924 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
925 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
926 .event_map = intel_pmu_event_map,
927 .raw_event = intel_pmu_raw_event,
928 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
930 * Intel PMCs cannot be accessed sanely above 32 bit width,
931 * so we install an artificial 1<<31 period regardless of
932 * the generic counter period:
934 .max_period = (1ULL << 31) - 1,
937 static struct x86_pmu amd_pmu = {
938 .name = "AMD",
939 .handle_irq = amd_pmu_handle_irq,
940 .disable_all = amd_pmu_disable_all,
941 .enable_all = amd_pmu_enable_all,
942 .enable = amd_pmu_enable_counter,
943 .disable = amd_pmu_disable_counter,
944 .eventsel = MSR_K7_EVNTSEL0,
945 .perfctr = MSR_K7_PERFCTR0,
946 .event_map = amd_pmu_event_map,
947 .raw_event = amd_pmu_raw_event,
948 .max_events = ARRAY_SIZE(amd_perfmon_event_map),
949 .num_counters = 4,
950 .counter_bits = 48,
951 .counter_mask = (1ULL << 48) - 1,
952 /* use highest bit to detect overflow */
953 .max_period = (1ULL << 47) - 1,
956 static int intel_pmu_init(void)
958 union cpuid10_edx edx;
959 union cpuid10_eax eax;
960 unsigned int unused;
961 unsigned int ebx;
962 int version;
964 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
965 return -ENODEV;
968 * Check whether the Architectural PerfMon supports
969 * Branch Misses Retired Event or not.
971 cpuid(10, &eax.full, &ebx, &unused, &edx.full);
972 if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
973 return -ENODEV;
975 version = eax.split.version_id;
976 if (version < 2)
977 return -ENODEV;
979 x86_pmu = intel_pmu;
980 x86_pmu.version = version;
981 x86_pmu.num_counters = eax.split.num_counters;
984 * Quirk: v2 perfmon does not report fixed-purpose counters, so
985 * assume at least 3 counters:
987 x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
989 x86_pmu.counter_bits = eax.split.bit_width;
990 x86_pmu.counter_mask = (1ULL << eax.split.bit_width) - 1;
992 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
994 return 0;
997 static int amd_pmu_init(void)
999 x86_pmu = amd_pmu;
1000 return 0;
1003 void __init init_hw_perf_counters(void)
1005 int err;
1007 switch (boot_cpu_data.x86_vendor) {
1008 case X86_VENDOR_INTEL:
1009 err = intel_pmu_init();
1010 break;
1011 case X86_VENDOR_AMD:
1012 err = amd_pmu_init();
1013 break;
1014 default:
1015 return;
1017 if (err != 0)
1018 return;
1020 pr_info("%s Performance Monitoring support detected.\n", x86_pmu.name);
1021 pr_info("... version: %d\n", x86_pmu.version);
1022 pr_info("... bit width: %d\n", x86_pmu.counter_bits);
1024 pr_info("... num counters: %d\n", x86_pmu.num_counters);
1025 if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) {
1026 x86_pmu.num_counters = X86_PMC_MAX_GENERIC;
1027 WARN(1, KERN_ERR "hw perf counters %d > max(%d), clipping!",
1028 x86_pmu.num_counters, X86_PMC_MAX_GENERIC);
1030 perf_counter_mask = (1 << x86_pmu.num_counters) - 1;
1031 perf_max_counters = x86_pmu.num_counters;
1033 pr_info("... value mask: %016Lx\n", x86_pmu.counter_mask);
1034 pr_info("... max period: %016Lx\n", x86_pmu.max_period);
1036 if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) {
1037 x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED;
1038 WARN(1, KERN_ERR "hw perf counters fixed %d > max(%d), clipping!",
1039 x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED);
1041 pr_info("... fixed counters: %d\n", x86_pmu.num_counters_fixed);
1043 perf_counter_mask |=
1044 ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED;
1046 pr_info("... counter mask: %016Lx\n", perf_counter_mask);
1048 perf_counters_lapic_init();
1049 register_die_notifier(&perf_counter_nmi_notifier);
1052 static inline void x86_pmu_read(struct perf_counter *counter)
1054 x86_perf_counter_update(counter, &counter->hw, counter->hw.idx);
1057 static const struct pmu pmu = {
1058 .enable = x86_pmu_enable,
1059 .disable = x86_pmu_disable,
1060 .read = x86_pmu_read,
1061 .unthrottle = x86_pmu_unthrottle,
1064 const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
1066 int err;
1068 err = __hw_perf_counter_init(counter);
1069 if (err)
1070 return ERR_PTR(err);
1072 return &pmu;
1076 * callchain support
1079 static inline
1080 void callchain_store(struct perf_callchain_entry *entry, unsigned long ip)
1082 if (entry->nr < MAX_STACK_DEPTH)
1083 entry->ip[entry->nr++] = ip;
1086 static DEFINE_PER_CPU(struct perf_callchain_entry, irq_entry);
1087 static DEFINE_PER_CPU(struct perf_callchain_entry, nmi_entry);
1090 static void
1091 backtrace_warning_symbol(void *data, char *msg, unsigned long symbol)
1093 /* Ignore warnings */
1096 static void backtrace_warning(void *data, char *msg)
1098 /* Ignore warnings */
1101 static int backtrace_stack(void *data, char *name)
1103 /* Don't bother with IRQ stacks for now */
1104 return -1;
1107 static void backtrace_address(void *data, unsigned long addr, int reliable)
1109 struct perf_callchain_entry *entry = data;
1111 if (reliable)
1112 callchain_store(entry, addr);
1115 static const struct stacktrace_ops backtrace_ops = {
1116 .warning = backtrace_warning,
1117 .warning_symbol = backtrace_warning_symbol,
1118 .stack = backtrace_stack,
1119 .address = backtrace_address,
1122 static void
1123 perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
1125 unsigned long bp;
1126 char *stack;
1127 int nr = entry->nr;
1129 callchain_store(entry, instruction_pointer(regs));
1131 stack = ((char *)regs + sizeof(struct pt_regs));
1132 #ifdef CONFIG_FRAME_POINTER
1133 bp = frame_pointer(regs);
1134 #else
1135 bp = 0;
1136 #endif
1138 dump_trace(NULL, regs, (void *)stack, bp, &backtrace_ops, entry);
1140 entry->kernel = entry->nr - nr;
1144 struct stack_frame {
1145 const void __user *next_fp;
1146 unsigned long return_address;
1149 static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
1151 int ret;
1153 if (!access_ok(VERIFY_READ, fp, sizeof(*frame)))
1154 return 0;
1156 ret = 1;
1157 pagefault_disable();
1158 if (__copy_from_user_inatomic(frame, fp, sizeof(*frame)))
1159 ret = 0;
1160 pagefault_enable();
1162 return ret;
1165 static void
1166 perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
1168 struct stack_frame frame;
1169 const void __user *fp;
1170 int nr = entry->nr;
1172 regs = (struct pt_regs *)current->thread.sp0 - 1;
1173 fp = (void __user *)regs->bp;
1175 callchain_store(entry, regs->ip);
1177 while (entry->nr < MAX_STACK_DEPTH) {
1178 frame.next_fp = NULL;
1179 frame.return_address = 0;
1181 if (!copy_stack_frame(fp, &frame))
1182 break;
1184 if ((unsigned long)fp < user_stack_pointer(regs))
1185 break;
1187 callchain_store(entry, frame.return_address);
1188 fp = frame.next_fp;
1191 entry->user = entry->nr - nr;
1194 static void
1195 perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
1197 int is_user;
1199 if (!regs)
1200 return;
1202 is_user = user_mode(regs);
1204 if (!current || current->pid == 0)
1205 return;
1207 if (is_user && current->state != TASK_RUNNING)
1208 return;
1210 if (!is_user)
1211 perf_callchain_kernel(regs, entry);
1213 if (current->mm)
1214 perf_callchain_user(regs, entry);
1217 struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
1219 struct perf_callchain_entry *entry;
1221 if (in_nmi())
1222 entry = &__get_cpu_var(nmi_entry);
1223 else
1224 entry = &__get_cpu_var(irq_entry);
1226 entry->nr = 0;
1227 entry->hv = 0;
1228 entry->kernel = 0;
1229 entry->user = 0;
1231 perf_do_callchain(regs, entry);
1233 return entry;