Merge branch 'master' into export-slabh
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / x86 / kernel / cpu / perf_event.c
blobdb5bdc8addf82f1df406488025a5b4877ed53419
1 /*
2 * Performance events x86 architecture code
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2009 Jaswinder Singh Rajput
7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
9 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
10 * Copyright (C) 2009 Google, Inc., Stephane Eranian
12 * For licencing details see kernel-base/COPYING
15 #include <linux/perf_event.h>
16 #include <linux/capability.h>
17 #include <linux/notifier.h>
18 #include <linux/hardirq.h>
19 #include <linux/kprobes.h>
20 #include <linux/module.h>
21 #include <linux/kdebug.h>
22 #include <linux/sched.h>
23 #include <linux/uaccess.h>
24 #include <linux/slab.h>
25 #include <linux/highmem.h>
26 #include <linux/cpu.h>
27 #include <linux/bitops.h>
29 #include <asm/apic.h>
30 #include <asm/stacktrace.h>
31 #include <asm/nmi.h>
32 #include <asm/compat.h>
34 static u64 perf_event_mask __read_mostly;
36 /* The maximal number of PEBS events: */
37 #define MAX_PEBS_EVENTS 4
39 /* The size of a BTS record in bytes: */
40 #define BTS_RECORD_SIZE 24
42 /* The size of a per-cpu BTS buffer in bytes: */
43 #define BTS_BUFFER_SIZE (BTS_RECORD_SIZE * 2048)
45 /* The BTS overflow threshold in bytes from the end of the buffer: */
46 #define BTS_OVFL_TH (BTS_RECORD_SIZE * 128)
50 * Bits in the debugctlmsr controlling branch tracing.
52 #define X86_DEBUGCTL_TR (1 << 6)
53 #define X86_DEBUGCTL_BTS (1 << 7)
54 #define X86_DEBUGCTL_BTINT (1 << 8)
55 #define X86_DEBUGCTL_BTS_OFF_OS (1 << 9)
56 #define X86_DEBUGCTL_BTS_OFF_USR (1 << 10)
59 * A debug store configuration.
61 * We only support architectures that use 64bit fields.
63 struct debug_store {
64 u64 bts_buffer_base;
65 u64 bts_index;
66 u64 bts_absolute_maximum;
67 u64 bts_interrupt_threshold;
68 u64 pebs_buffer_base;
69 u64 pebs_index;
70 u64 pebs_absolute_maximum;
71 u64 pebs_interrupt_threshold;
72 u64 pebs_event_reset[MAX_PEBS_EVENTS];
75 struct event_constraint {
76 union {
77 unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
78 u64 idxmsk64;
80 u64 code;
81 u64 cmask;
82 int weight;
85 struct amd_nb {
86 int nb_id; /* NorthBridge id */
87 int refcnt; /* reference count */
88 struct perf_event *owners[X86_PMC_IDX_MAX];
89 struct event_constraint event_constraints[X86_PMC_IDX_MAX];
92 struct cpu_hw_events {
93 struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */
94 unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
95 unsigned long interrupts;
96 int enabled;
97 struct debug_store *ds;
99 int n_events;
100 int n_added;
101 int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
102 u64 tags[X86_PMC_IDX_MAX];
103 struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
104 struct amd_nb *amd_nb;
107 #define __EVENT_CONSTRAINT(c, n, m, w) {\
108 { .idxmsk64 = (n) }, \
109 .code = (c), \
110 .cmask = (m), \
111 .weight = (w), \
114 #define EVENT_CONSTRAINT(c, n, m) \
115 __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n))
117 #define INTEL_EVENT_CONSTRAINT(c, n) \
118 EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVTSEL_MASK)
120 #define FIXED_EVENT_CONSTRAINT(c, n) \
121 EVENT_CONSTRAINT(c, (1ULL << (32+n)), INTEL_ARCH_FIXED_MASK)
123 #define EVENT_CONSTRAINT_END \
124 EVENT_CONSTRAINT(0, 0, 0)
126 #define for_each_event_constraint(e, c) \
127 for ((e) = (c); (e)->cmask; (e)++)
130 * struct x86_pmu - generic x86 pmu
132 struct x86_pmu {
133 const char *name;
134 int version;
135 int (*handle_irq)(struct pt_regs *);
136 void (*disable_all)(void);
137 void (*enable_all)(void);
138 void (*enable)(struct perf_event *);
139 void (*disable)(struct perf_event *);
140 unsigned eventsel;
141 unsigned perfctr;
142 u64 (*event_map)(int);
143 u64 (*raw_event)(u64);
144 int max_events;
145 int num_events;
146 int num_events_fixed;
147 int event_bits;
148 u64 event_mask;
149 int apic;
150 u64 max_period;
151 u64 intel_ctrl;
152 void (*enable_bts)(u64 config);
153 void (*disable_bts)(void);
155 struct event_constraint *
156 (*get_event_constraints)(struct cpu_hw_events *cpuc,
157 struct perf_event *event);
159 void (*put_event_constraints)(struct cpu_hw_events *cpuc,
160 struct perf_event *event);
161 struct event_constraint *event_constraints;
163 int (*cpu_prepare)(int cpu);
164 void (*cpu_starting)(int cpu);
165 void (*cpu_dying)(int cpu);
166 void (*cpu_dead)(int cpu);
169 static struct x86_pmu x86_pmu __read_mostly;
171 static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
172 .enabled = 1,
175 static int x86_perf_event_set_period(struct perf_event *event);
178 * Generalized hw caching related hw_event table, filled
179 * in on a per model basis. A value of 0 means
180 * 'not supported', -1 means 'hw_event makes no sense on
181 * this CPU', any other value means the raw hw_event
182 * ID.
185 #define C(x) PERF_COUNT_HW_CACHE_##x
187 static u64 __read_mostly hw_cache_event_ids
188 [PERF_COUNT_HW_CACHE_MAX]
189 [PERF_COUNT_HW_CACHE_OP_MAX]
190 [PERF_COUNT_HW_CACHE_RESULT_MAX];
193 * Propagate event elapsed time into the generic event.
194 * Can only be executed on the CPU where the event is active.
195 * Returns the delta events processed.
197 static u64
198 x86_perf_event_update(struct perf_event *event)
200 struct hw_perf_event *hwc = &event->hw;
201 int shift = 64 - x86_pmu.event_bits;
202 u64 prev_raw_count, new_raw_count;
203 int idx = hwc->idx;
204 s64 delta;
206 if (idx == X86_PMC_IDX_FIXED_BTS)
207 return 0;
210 * Careful: an NMI might modify the previous event value.
212 * Our tactic to handle this is to first atomically read and
213 * exchange a new raw count - then add that new-prev delta
214 * count to the generic event atomically:
216 again:
217 prev_raw_count = atomic64_read(&hwc->prev_count);
218 rdmsrl(hwc->event_base + idx, new_raw_count);
220 if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
221 new_raw_count) != prev_raw_count)
222 goto again;
225 * Now we have the new raw value and have updated the prev
226 * timestamp already. We can now calculate the elapsed delta
227 * (event-)time and add that to the generic event.
229 * Careful, not all hw sign-extends above the physical width
230 * of the count.
232 delta = (new_raw_count << shift) - (prev_raw_count << shift);
233 delta >>= shift;
235 atomic64_add(delta, &event->count);
236 atomic64_sub(delta, &hwc->period_left);
238 return new_raw_count;
241 static atomic_t active_events;
242 static DEFINE_MUTEX(pmc_reserve_mutex);
244 static bool reserve_pmc_hardware(void)
246 #ifdef CONFIG_X86_LOCAL_APIC
247 int i;
249 if (nmi_watchdog == NMI_LOCAL_APIC)
250 disable_lapic_nmi_watchdog();
252 for (i = 0; i < x86_pmu.num_events; i++) {
253 if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
254 goto perfctr_fail;
257 for (i = 0; i < x86_pmu.num_events; i++) {
258 if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
259 goto eventsel_fail;
261 #endif
263 return true;
265 #ifdef CONFIG_X86_LOCAL_APIC
266 eventsel_fail:
267 for (i--; i >= 0; i--)
268 release_evntsel_nmi(x86_pmu.eventsel + i);
270 i = x86_pmu.num_events;
272 perfctr_fail:
273 for (i--; i >= 0; i--)
274 release_perfctr_nmi(x86_pmu.perfctr + i);
276 if (nmi_watchdog == NMI_LOCAL_APIC)
277 enable_lapic_nmi_watchdog();
279 return false;
280 #endif
283 static void release_pmc_hardware(void)
285 #ifdef CONFIG_X86_LOCAL_APIC
286 int i;
288 for (i = 0; i < x86_pmu.num_events; i++) {
289 release_perfctr_nmi(x86_pmu.perfctr + i);
290 release_evntsel_nmi(x86_pmu.eventsel + i);
293 if (nmi_watchdog == NMI_LOCAL_APIC)
294 enable_lapic_nmi_watchdog();
295 #endif
298 static inline bool bts_available(void)
300 return x86_pmu.enable_bts != NULL;
303 static void init_debug_store_on_cpu(int cpu)
305 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
307 if (!ds)
308 return;
310 wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA,
311 (u32)((u64)(unsigned long)ds),
312 (u32)((u64)(unsigned long)ds >> 32));
315 static void fini_debug_store_on_cpu(int cpu)
317 if (!per_cpu(cpu_hw_events, cpu).ds)
318 return;
320 wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
323 static void release_bts_hardware(void)
325 int cpu;
327 if (!bts_available())
328 return;
330 get_online_cpus();
332 for_each_online_cpu(cpu)
333 fini_debug_store_on_cpu(cpu);
335 for_each_possible_cpu(cpu) {
336 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
338 if (!ds)
339 continue;
341 per_cpu(cpu_hw_events, cpu).ds = NULL;
343 kfree((void *)(unsigned long)ds->bts_buffer_base);
344 kfree(ds);
347 put_online_cpus();
350 static int reserve_bts_hardware(void)
352 int cpu, err = 0;
354 if (!bts_available())
355 return 0;
357 get_online_cpus();
359 for_each_possible_cpu(cpu) {
360 struct debug_store *ds;
361 void *buffer;
363 err = -ENOMEM;
364 buffer = kzalloc(BTS_BUFFER_SIZE, GFP_KERNEL);
365 if (unlikely(!buffer))
366 break;
368 ds = kzalloc(sizeof(*ds), GFP_KERNEL);
369 if (unlikely(!ds)) {
370 kfree(buffer);
371 break;
374 ds->bts_buffer_base = (u64)(unsigned long)buffer;
375 ds->bts_index = ds->bts_buffer_base;
376 ds->bts_absolute_maximum =
377 ds->bts_buffer_base + BTS_BUFFER_SIZE;
378 ds->bts_interrupt_threshold =
379 ds->bts_absolute_maximum - BTS_OVFL_TH;
381 per_cpu(cpu_hw_events, cpu).ds = ds;
382 err = 0;
385 if (err)
386 release_bts_hardware();
387 else {
388 for_each_online_cpu(cpu)
389 init_debug_store_on_cpu(cpu);
392 put_online_cpus();
394 return err;
397 static void hw_perf_event_destroy(struct perf_event *event)
399 if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
400 release_pmc_hardware();
401 release_bts_hardware();
402 mutex_unlock(&pmc_reserve_mutex);
406 static inline int x86_pmu_initialized(void)
408 return x86_pmu.handle_irq != NULL;
411 static inline int
412 set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr)
414 unsigned int cache_type, cache_op, cache_result;
415 u64 config, val;
417 config = attr->config;
419 cache_type = (config >> 0) & 0xff;
420 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
421 return -EINVAL;
423 cache_op = (config >> 8) & 0xff;
424 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
425 return -EINVAL;
427 cache_result = (config >> 16) & 0xff;
428 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
429 return -EINVAL;
431 val = hw_cache_event_ids[cache_type][cache_op][cache_result];
433 if (val == 0)
434 return -ENOENT;
436 if (val == -1)
437 return -EINVAL;
439 hwc->config |= val;
441 return 0;
445 * Setup the hardware configuration for a given attr_type
447 static int __hw_perf_event_init(struct perf_event *event)
449 struct perf_event_attr *attr = &event->attr;
450 struct hw_perf_event *hwc = &event->hw;
451 u64 config;
452 int err;
454 if (!x86_pmu_initialized())
455 return -ENODEV;
457 err = 0;
458 if (!atomic_inc_not_zero(&active_events)) {
459 mutex_lock(&pmc_reserve_mutex);
460 if (atomic_read(&active_events) == 0) {
461 if (!reserve_pmc_hardware())
462 err = -EBUSY;
463 else
464 err = reserve_bts_hardware();
466 if (!err)
467 atomic_inc(&active_events);
468 mutex_unlock(&pmc_reserve_mutex);
470 if (err)
471 return err;
473 event->destroy = hw_perf_event_destroy;
476 * Generate PMC IRQs:
477 * (keep 'enabled' bit clear for now)
479 hwc->config = ARCH_PERFMON_EVENTSEL_INT;
481 hwc->idx = -1;
482 hwc->last_cpu = -1;
483 hwc->last_tag = ~0ULL;
486 * Count user and OS events unless requested not to.
488 if (!attr->exclude_user)
489 hwc->config |= ARCH_PERFMON_EVENTSEL_USR;
490 if (!attr->exclude_kernel)
491 hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
493 if (!hwc->sample_period) {
494 hwc->sample_period = x86_pmu.max_period;
495 hwc->last_period = hwc->sample_period;
496 atomic64_set(&hwc->period_left, hwc->sample_period);
497 } else {
499 * If we have a PMU initialized but no APIC
500 * interrupts, we cannot sample hardware
501 * events (user-space has to fall back and
502 * sample via a hrtimer based software event):
504 if (!x86_pmu.apic)
505 return -EOPNOTSUPP;
509 * Raw hw_event type provide the config in the hw_event structure
511 if (attr->type == PERF_TYPE_RAW) {
512 hwc->config |= x86_pmu.raw_event(attr->config);
513 if ((hwc->config & ARCH_PERFMON_EVENTSEL_ANY) &&
514 perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
515 return -EACCES;
516 return 0;
519 if (attr->type == PERF_TYPE_HW_CACHE)
520 return set_ext_hw_attr(hwc, attr);
522 if (attr->config >= x86_pmu.max_events)
523 return -EINVAL;
526 * The generic map:
528 config = x86_pmu.event_map(attr->config);
530 if (config == 0)
531 return -ENOENT;
533 if (config == -1LL)
534 return -EINVAL;
537 * Branch tracing:
539 if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
540 (hwc->sample_period == 1)) {
541 /* BTS is not supported by this architecture. */
542 if (!bts_available())
543 return -EOPNOTSUPP;
545 /* BTS is currently only allowed for user-mode. */
546 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
547 return -EOPNOTSUPP;
550 hwc->config |= config;
552 return 0;
555 static void x86_pmu_disable_all(void)
557 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
558 int idx;
560 for (idx = 0; idx < x86_pmu.num_events; idx++) {
561 u64 val;
563 if (!test_bit(idx, cpuc->active_mask))
564 continue;
565 rdmsrl(x86_pmu.eventsel + idx, val);
566 if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE))
567 continue;
568 val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
569 wrmsrl(x86_pmu.eventsel + idx, val);
573 void hw_perf_disable(void)
575 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
577 if (!x86_pmu_initialized())
578 return;
580 if (!cpuc->enabled)
581 return;
583 cpuc->n_added = 0;
584 cpuc->enabled = 0;
585 barrier();
587 x86_pmu.disable_all();
590 static void x86_pmu_enable_all(void)
592 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
593 int idx;
595 for (idx = 0; idx < x86_pmu.num_events; idx++) {
596 struct perf_event *event = cpuc->events[idx];
597 u64 val;
599 if (!test_bit(idx, cpuc->active_mask))
600 continue;
602 val = event->hw.config;
603 val |= ARCH_PERFMON_EVENTSEL_ENABLE;
604 wrmsrl(x86_pmu.eventsel + idx, val);
608 static const struct pmu pmu;
610 static inline int is_x86_event(struct perf_event *event)
612 return event->pmu == &pmu;
615 static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
617 struct event_constraint *c, *constraints[X86_PMC_IDX_MAX];
618 unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
619 int i, j, w, wmax, num = 0;
620 struct hw_perf_event *hwc;
622 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
624 for (i = 0; i < n; i++) {
625 c = x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]);
626 constraints[i] = c;
630 * fastpath, try to reuse previous register
632 for (i = 0; i < n; i++) {
633 hwc = &cpuc->event_list[i]->hw;
634 c = constraints[i];
636 /* never assigned */
637 if (hwc->idx == -1)
638 break;
640 /* constraint still honored */
641 if (!test_bit(hwc->idx, c->idxmsk))
642 break;
644 /* not already used */
645 if (test_bit(hwc->idx, used_mask))
646 break;
648 __set_bit(hwc->idx, used_mask);
649 if (assign)
650 assign[i] = hwc->idx;
652 if (i == n)
653 goto done;
656 * begin slow path
659 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
662 * weight = number of possible counters
664 * 1 = most constrained, only works on one counter
665 * wmax = least constrained, works on any counter
667 * assign events to counters starting with most
668 * constrained events.
670 wmax = x86_pmu.num_events;
673 * when fixed event counters are present,
674 * wmax is incremented by 1 to account
675 * for one more choice
677 if (x86_pmu.num_events_fixed)
678 wmax++;
680 for (w = 1, num = n; num && w <= wmax; w++) {
681 /* for each event */
682 for (i = 0; num && i < n; i++) {
683 c = constraints[i];
684 hwc = &cpuc->event_list[i]->hw;
686 if (c->weight != w)
687 continue;
689 for_each_set_bit(j, c->idxmsk, X86_PMC_IDX_MAX) {
690 if (!test_bit(j, used_mask))
691 break;
694 if (j == X86_PMC_IDX_MAX)
695 break;
697 __set_bit(j, used_mask);
699 if (assign)
700 assign[i] = j;
701 num--;
704 done:
706 * scheduling failed or is just a simulation,
707 * free resources if necessary
709 if (!assign || num) {
710 for (i = 0; i < n; i++) {
711 if (x86_pmu.put_event_constraints)
712 x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]);
715 return num ? -ENOSPC : 0;
719 * dogrp: true if must collect siblings events (group)
720 * returns total number of events and error code
722 static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp)
724 struct perf_event *event;
725 int n, max_count;
727 max_count = x86_pmu.num_events + x86_pmu.num_events_fixed;
729 /* current number of events already accepted */
730 n = cpuc->n_events;
732 if (is_x86_event(leader)) {
733 if (n >= max_count)
734 return -ENOSPC;
735 cpuc->event_list[n] = leader;
736 n++;
738 if (!dogrp)
739 return n;
741 list_for_each_entry(event, &leader->sibling_list, group_entry) {
742 if (!is_x86_event(event) ||
743 event->state <= PERF_EVENT_STATE_OFF)
744 continue;
746 if (n >= max_count)
747 return -ENOSPC;
749 cpuc->event_list[n] = event;
750 n++;
752 return n;
755 static inline void x86_assign_hw_event(struct perf_event *event,
756 struct cpu_hw_events *cpuc, int i)
758 struct hw_perf_event *hwc = &event->hw;
760 hwc->idx = cpuc->assign[i];
761 hwc->last_cpu = smp_processor_id();
762 hwc->last_tag = ++cpuc->tags[i];
764 if (hwc->idx == X86_PMC_IDX_FIXED_BTS) {
765 hwc->config_base = 0;
766 hwc->event_base = 0;
767 } else if (hwc->idx >= X86_PMC_IDX_FIXED) {
768 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
770 * We set it so that event_base + idx in wrmsr/rdmsr maps to
771 * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
773 hwc->event_base =
774 MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
775 } else {
776 hwc->config_base = x86_pmu.eventsel;
777 hwc->event_base = x86_pmu.perfctr;
781 static inline int match_prev_assignment(struct hw_perf_event *hwc,
782 struct cpu_hw_events *cpuc,
783 int i)
785 return hwc->idx == cpuc->assign[i] &&
786 hwc->last_cpu == smp_processor_id() &&
787 hwc->last_tag == cpuc->tags[i];
790 static int x86_pmu_start(struct perf_event *event);
791 static void x86_pmu_stop(struct perf_event *event);
793 void hw_perf_enable(void)
795 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
796 struct perf_event *event;
797 struct hw_perf_event *hwc;
798 int i;
800 if (!x86_pmu_initialized())
801 return;
803 if (cpuc->enabled)
804 return;
806 if (cpuc->n_added) {
807 int n_running = cpuc->n_events - cpuc->n_added;
809 * apply assignment obtained either from
810 * hw_perf_group_sched_in() or x86_pmu_enable()
812 * step1: save events moving to new counters
813 * step2: reprogram moved events into new counters
815 for (i = 0; i < n_running; i++) {
816 event = cpuc->event_list[i];
817 hwc = &event->hw;
820 * we can avoid reprogramming counter if:
821 * - assigned same counter as last time
822 * - running on same CPU as last time
823 * - no other event has used the counter since
825 if (hwc->idx == -1 ||
826 match_prev_assignment(hwc, cpuc, i))
827 continue;
829 x86_pmu_stop(event);
832 for (i = 0; i < cpuc->n_events; i++) {
833 event = cpuc->event_list[i];
834 hwc = &event->hw;
836 if (!match_prev_assignment(hwc, cpuc, i))
837 x86_assign_hw_event(event, cpuc, i);
838 else if (i < n_running)
839 continue;
841 x86_pmu_start(event);
843 cpuc->n_added = 0;
844 perf_events_lapic_init();
847 cpuc->enabled = 1;
848 barrier();
850 x86_pmu.enable_all();
853 static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc)
855 (void)checking_wrmsrl(hwc->config_base + hwc->idx,
856 hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE);
859 static inline void x86_pmu_disable_event(struct perf_event *event)
861 struct hw_perf_event *hwc = &event->hw;
862 (void)checking_wrmsrl(hwc->config_base + hwc->idx, hwc->config);
865 static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
868 * Set the next IRQ period, based on the hwc->period_left value.
869 * To be called with the event disabled in hw:
871 static int
872 x86_perf_event_set_period(struct perf_event *event)
874 struct hw_perf_event *hwc = &event->hw;
875 s64 left = atomic64_read(&hwc->period_left);
876 s64 period = hwc->sample_period;
877 int err, ret = 0, idx = hwc->idx;
879 if (idx == X86_PMC_IDX_FIXED_BTS)
880 return 0;
883 * If we are way outside a reasonable range then just skip forward:
885 if (unlikely(left <= -period)) {
886 left = period;
887 atomic64_set(&hwc->period_left, left);
888 hwc->last_period = period;
889 ret = 1;
892 if (unlikely(left <= 0)) {
893 left += period;
894 atomic64_set(&hwc->period_left, left);
895 hwc->last_period = period;
896 ret = 1;
899 * Quirk: certain CPUs dont like it if just 1 hw_event is left:
901 if (unlikely(left < 2))
902 left = 2;
904 if (left > x86_pmu.max_period)
905 left = x86_pmu.max_period;
907 per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
910 * The hw event starts counting from this event offset,
911 * mark it to be able to extra future deltas:
913 atomic64_set(&hwc->prev_count, (u64)-left);
915 err = checking_wrmsrl(hwc->event_base + idx,
916 (u64)(-left) & x86_pmu.event_mask);
918 perf_event_update_userpage(event);
920 return ret;
923 static void x86_pmu_enable_event(struct perf_event *event)
925 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
926 if (cpuc->enabled)
927 __x86_pmu_enable_event(&event->hw);
931 * activate a single event
933 * The event is added to the group of enabled events
934 * but only if it can be scehduled with existing events.
936 * Called with PMU disabled. If successful and return value 1,
937 * then guaranteed to call perf_enable() and hw_perf_enable()
939 static int x86_pmu_enable(struct perf_event *event)
941 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
942 struct hw_perf_event *hwc;
943 int assign[X86_PMC_IDX_MAX];
944 int n, n0, ret;
946 hwc = &event->hw;
948 n0 = cpuc->n_events;
949 n = collect_events(cpuc, event, false);
950 if (n < 0)
951 return n;
953 ret = x86_schedule_events(cpuc, n, assign);
954 if (ret)
955 return ret;
957 * copy new assignment, now we know it is possible
958 * will be used by hw_perf_enable()
960 memcpy(cpuc->assign, assign, n*sizeof(int));
962 cpuc->n_events = n;
963 cpuc->n_added += n - n0;
965 return 0;
968 static int x86_pmu_start(struct perf_event *event)
970 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
971 int idx = event->hw.idx;
973 if (idx == -1)
974 return -EAGAIN;
976 x86_perf_event_set_period(event);
977 cpuc->events[idx] = event;
978 __set_bit(idx, cpuc->active_mask);
979 x86_pmu.enable(event);
980 perf_event_update_userpage(event);
982 return 0;
985 static void x86_pmu_unthrottle(struct perf_event *event)
987 int ret = x86_pmu_start(event);
988 WARN_ON_ONCE(ret);
991 void perf_event_print_debug(void)
993 u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
994 struct cpu_hw_events *cpuc;
995 unsigned long flags;
996 int cpu, idx;
998 if (!x86_pmu.num_events)
999 return;
1001 local_irq_save(flags);
1003 cpu = smp_processor_id();
1004 cpuc = &per_cpu(cpu_hw_events, cpu);
1006 if (x86_pmu.version >= 2) {
1007 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
1008 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1009 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
1010 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
1012 pr_info("\n");
1013 pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl);
1014 pr_info("CPU#%d: status: %016llx\n", cpu, status);
1015 pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
1016 pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
1018 pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask);
1020 for (idx = 0; idx < x86_pmu.num_events; idx++) {
1021 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
1022 rdmsrl(x86_pmu.perfctr + idx, pmc_count);
1024 prev_left = per_cpu(pmc_prev_left[idx], cpu);
1026 pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
1027 cpu, idx, pmc_ctrl);
1028 pr_info("CPU#%d: gen-PMC%d count: %016llx\n",
1029 cpu, idx, pmc_count);
1030 pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
1031 cpu, idx, prev_left);
1033 for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
1034 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
1036 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
1037 cpu, idx, pmc_count);
1039 local_irq_restore(flags);
1042 static void x86_pmu_stop(struct perf_event *event)
1044 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1045 struct hw_perf_event *hwc = &event->hw;
1046 int idx = hwc->idx;
1048 if (!__test_and_clear_bit(idx, cpuc->active_mask))
1049 return;
1051 x86_pmu.disable(event);
1054 * Drain the remaining delta count out of a event
1055 * that we are disabling:
1057 x86_perf_event_update(event);
1059 cpuc->events[idx] = NULL;
1062 static void x86_pmu_disable(struct perf_event *event)
1064 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1065 int i;
1067 x86_pmu_stop(event);
1069 for (i = 0; i < cpuc->n_events; i++) {
1070 if (event == cpuc->event_list[i]) {
1072 if (x86_pmu.put_event_constraints)
1073 x86_pmu.put_event_constraints(cpuc, event);
1075 while (++i < cpuc->n_events)
1076 cpuc->event_list[i-1] = cpuc->event_list[i];
1078 --cpuc->n_events;
1079 break;
1082 perf_event_update_userpage(event);
1085 static int x86_pmu_handle_irq(struct pt_regs *regs)
1087 struct perf_sample_data data;
1088 struct cpu_hw_events *cpuc;
1089 struct perf_event *event;
1090 struct hw_perf_event *hwc;
1091 int idx, handled = 0;
1092 u64 val;
1094 perf_sample_data_init(&data, 0);
1096 cpuc = &__get_cpu_var(cpu_hw_events);
1098 for (idx = 0; idx < x86_pmu.num_events; idx++) {
1099 if (!test_bit(idx, cpuc->active_mask))
1100 continue;
1102 event = cpuc->events[idx];
1103 hwc = &event->hw;
1105 val = x86_perf_event_update(event);
1106 if (val & (1ULL << (x86_pmu.event_bits - 1)))
1107 continue;
1110 * event overflow
1112 handled = 1;
1113 data.period = event->hw.last_period;
1115 if (!x86_perf_event_set_period(event))
1116 continue;
1118 if (perf_event_overflow(event, 1, &data, regs))
1119 x86_pmu_stop(event);
1122 if (handled)
1123 inc_irq_stat(apic_perf_irqs);
1125 return handled;
1128 void smp_perf_pending_interrupt(struct pt_regs *regs)
1130 irq_enter();
1131 ack_APIC_irq();
1132 inc_irq_stat(apic_pending_irqs);
1133 perf_event_do_pending();
1134 irq_exit();
1137 void set_perf_event_pending(void)
1139 #ifdef CONFIG_X86_LOCAL_APIC
1140 if (!x86_pmu.apic || !x86_pmu_initialized())
1141 return;
1143 apic->send_IPI_self(LOCAL_PENDING_VECTOR);
1144 #endif
1147 void perf_events_lapic_init(void)
1149 #ifdef CONFIG_X86_LOCAL_APIC
1150 if (!x86_pmu.apic || !x86_pmu_initialized())
1151 return;
1154 * Always use NMI for PMU
1156 apic_write(APIC_LVTPC, APIC_DM_NMI);
1157 #endif
1160 static int __kprobes
1161 perf_event_nmi_handler(struct notifier_block *self,
1162 unsigned long cmd, void *__args)
1164 struct die_args *args = __args;
1165 struct pt_regs *regs;
1167 if (!atomic_read(&active_events))
1168 return NOTIFY_DONE;
1170 switch (cmd) {
1171 case DIE_NMI:
1172 case DIE_NMI_IPI:
1173 break;
1175 default:
1176 return NOTIFY_DONE;
1179 regs = args->regs;
1181 #ifdef CONFIG_X86_LOCAL_APIC
1182 apic_write(APIC_LVTPC, APIC_DM_NMI);
1183 #endif
1185 * Can't rely on the handled return value to say it was our NMI, two
1186 * events could trigger 'simultaneously' raising two back-to-back NMIs.
1188 * If the first NMI handles both, the latter will be empty and daze
1189 * the CPU.
1191 x86_pmu.handle_irq(regs);
1193 return NOTIFY_STOP;
1196 static __read_mostly struct notifier_block perf_event_nmi_notifier = {
1197 .notifier_call = perf_event_nmi_handler,
1198 .next = NULL,
1199 .priority = 1
1202 static struct event_constraint unconstrained;
1203 static struct event_constraint emptyconstraint;
1205 static struct event_constraint *
1206 x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
1208 struct event_constraint *c;
1210 if (x86_pmu.event_constraints) {
1211 for_each_event_constraint(c, x86_pmu.event_constraints) {
1212 if ((event->hw.config & c->cmask) == c->code)
1213 return c;
1217 return &unconstrained;
1220 static int x86_event_sched_in(struct perf_event *event,
1221 struct perf_cpu_context *cpuctx)
1223 int ret = 0;
1225 event->state = PERF_EVENT_STATE_ACTIVE;
1226 event->oncpu = smp_processor_id();
1227 event->tstamp_running += event->ctx->time - event->tstamp_stopped;
1229 if (!is_x86_event(event))
1230 ret = event->pmu->enable(event);
1232 if (!ret && !is_software_event(event))
1233 cpuctx->active_oncpu++;
1235 if (!ret && event->attr.exclusive)
1236 cpuctx->exclusive = 1;
1238 return ret;
1241 static void x86_event_sched_out(struct perf_event *event,
1242 struct perf_cpu_context *cpuctx)
1244 event->state = PERF_EVENT_STATE_INACTIVE;
1245 event->oncpu = -1;
1247 if (!is_x86_event(event))
1248 event->pmu->disable(event);
1250 event->tstamp_running -= event->ctx->time - event->tstamp_stopped;
1252 if (!is_software_event(event))
1253 cpuctx->active_oncpu--;
1255 if (event->attr.exclusive || !cpuctx->active_oncpu)
1256 cpuctx->exclusive = 0;
1260 * Called to enable a whole group of events.
1261 * Returns 1 if the group was enabled, or -EAGAIN if it could not be.
1262 * Assumes the caller has disabled interrupts and has
1263 * frozen the PMU with hw_perf_save_disable.
1265 * called with PMU disabled. If successful and return value 1,
1266 * then guaranteed to call perf_enable() and hw_perf_enable()
1268 int hw_perf_group_sched_in(struct perf_event *leader,
1269 struct perf_cpu_context *cpuctx,
1270 struct perf_event_context *ctx)
1272 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1273 struct perf_event *sub;
1274 int assign[X86_PMC_IDX_MAX];
1275 int n0, n1, ret;
1277 /* n0 = total number of events */
1278 n0 = collect_events(cpuc, leader, true);
1279 if (n0 < 0)
1280 return n0;
1282 ret = x86_schedule_events(cpuc, n0, assign);
1283 if (ret)
1284 return ret;
1286 ret = x86_event_sched_in(leader, cpuctx);
1287 if (ret)
1288 return ret;
1290 n1 = 1;
1291 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
1292 if (sub->state > PERF_EVENT_STATE_OFF) {
1293 ret = x86_event_sched_in(sub, cpuctx);
1294 if (ret)
1295 goto undo;
1296 ++n1;
1300 * copy new assignment, now we know it is possible
1301 * will be used by hw_perf_enable()
1303 memcpy(cpuc->assign, assign, n0*sizeof(int));
1305 cpuc->n_events = n0;
1306 cpuc->n_added += n1;
1307 ctx->nr_active += n1;
1310 * 1 means successful and events are active
1311 * This is not quite true because we defer
1312 * actual activation until hw_perf_enable() but
1313 * this way we* ensure caller won't try to enable
1314 * individual events
1316 return 1;
1317 undo:
1318 x86_event_sched_out(leader, cpuctx);
1319 n0 = 1;
1320 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
1321 if (sub->state == PERF_EVENT_STATE_ACTIVE) {
1322 x86_event_sched_out(sub, cpuctx);
1323 if (++n0 == n1)
1324 break;
1327 return ret;
1330 #include "perf_event_amd.c"
1331 #include "perf_event_p6.c"
1332 #include "perf_event_intel.c"
1334 static int __cpuinit
1335 x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
1337 unsigned int cpu = (long)hcpu;
1338 int ret = NOTIFY_OK;
1340 switch (action & ~CPU_TASKS_FROZEN) {
1341 case CPU_UP_PREPARE:
1342 if (x86_pmu.cpu_prepare)
1343 ret = x86_pmu.cpu_prepare(cpu);
1344 break;
1346 case CPU_STARTING:
1347 if (x86_pmu.cpu_starting)
1348 x86_pmu.cpu_starting(cpu);
1349 break;
1351 case CPU_DYING:
1352 if (x86_pmu.cpu_dying)
1353 x86_pmu.cpu_dying(cpu);
1354 break;
1356 case CPU_UP_CANCELED:
1357 case CPU_DEAD:
1358 if (x86_pmu.cpu_dead)
1359 x86_pmu.cpu_dead(cpu);
1360 break;
1362 default:
1363 break;
1366 return ret;
1369 static void __init pmu_check_apic(void)
1371 if (cpu_has_apic)
1372 return;
1374 x86_pmu.apic = 0;
1375 pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
1376 pr_info("no hardware sampling interrupt available.\n");
1379 void __init init_hw_perf_events(void)
1381 struct event_constraint *c;
1382 int err;
1384 pr_info("Performance Events: ");
1386 switch (boot_cpu_data.x86_vendor) {
1387 case X86_VENDOR_INTEL:
1388 err = intel_pmu_init();
1389 break;
1390 case X86_VENDOR_AMD:
1391 err = amd_pmu_init();
1392 break;
1393 default:
1394 return;
1396 if (err != 0) {
1397 pr_cont("no PMU driver, software events only.\n");
1398 return;
1401 pmu_check_apic();
1403 pr_cont("%s PMU driver.\n", x86_pmu.name);
1405 if (x86_pmu.num_events > X86_PMC_MAX_GENERIC) {
1406 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
1407 x86_pmu.num_events, X86_PMC_MAX_GENERIC);
1408 x86_pmu.num_events = X86_PMC_MAX_GENERIC;
1410 perf_event_mask = (1 << x86_pmu.num_events) - 1;
1411 perf_max_events = x86_pmu.num_events;
1413 if (x86_pmu.num_events_fixed > X86_PMC_MAX_FIXED) {
1414 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
1415 x86_pmu.num_events_fixed, X86_PMC_MAX_FIXED);
1416 x86_pmu.num_events_fixed = X86_PMC_MAX_FIXED;
1419 perf_event_mask |=
1420 ((1LL << x86_pmu.num_events_fixed)-1) << X86_PMC_IDX_FIXED;
1421 x86_pmu.intel_ctrl = perf_event_mask;
1423 perf_events_lapic_init();
1424 register_die_notifier(&perf_event_nmi_notifier);
1426 unconstrained = (struct event_constraint)
1427 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_events) - 1,
1428 0, x86_pmu.num_events);
1430 if (x86_pmu.event_constraints) {
1431 for_each_event_constraint(c, x86_pmu.event_constraints) {
1432 if (c->cmask != INTEL_ARCH_FIXED_MASK)
1433 continue;
1435 c->idxmsk64 |= (1ULL << x86_pmu.num_events) - 1;
1436 c->weight += x86_pmu.num_events;
1440 pr_info("... version: %d\n", x86_pmu.version);
1441 pr_info("... bit width: %d\n", x86_pmu.event_bits);
1442 pr_info("... generic registers: %d\n", x86_pmu.num_events);
1443 pr_info("... value mask: %016Lx\n", x86_pmu.event_mask);
1444 pr_info("... max period: %016Lx\n", x86_pmu.max_period);
1445 pr_info("... fixed-purpose events: %d\n", x86_pmu.num_events_fixed);
1446 pr_info("... event mask: %016Lx\n", perf_event_mask);
1448 perf_cpu_notifier(x86_pmu_notifier);
1451 static inline void x86_pmu_read(struct perf_event *event)
1453 x86_perf_event_update(event);
1456 static const struct pmu pmu = {
1457 .enable = x86_pmu_enable,
1458 .disable = x86_pmu_disable,
1459 .start = x86_pmu_start,
1460 .stop = x86_pmu_stop,
1461 .read = x86_pmu_read,
1462 .unthrottle = x86_pmu_unthrottle,
1466 * validate a single event group
1468 * validation include:
1469 * - check events are compatible which each other
1470 * - events do not compete for the same counter
1471 * - number of events <= number of counters
1473 * validation ensures the group can be loaded onto the
1474 * PMU if it was the only group available.
1476 static int validate_group(struct perf_event *event)
1478 struct perf_event *leader = event->group_leader;
1479 struct cpu_hw_events *fake_cpuc;
1480 int ret, n;
1482 ret = -ENOMEM;
1483 fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO);
1484 if (!fake_cpuc)
1485 goto out;
1488 * the event is not yet connected with its
1489 * siblings therefore we must first collect
1490 * existing siblings, then add the new event
1491 * before we can simulate the scheduling
1493 ret = -ENOSPC;
1494 n = collect_events(fake_cpuc, leader, true);
1495 if (n < 0)
1496 goto out_free;
1498 fake_cpuc->n_events = n;
1499 n = collect_events(fake_cpuc, event, false);
1500 if (n < 0)
1501 goto out_free;
1503 fake_cpuc->n_events = n;
1505 ret = x86_schedule_events(fake_cpuc, n, NULL);
1507 out_free:
1508 kfree(fake_cpuc);
1509 out:
1510 return ret;
1513 const struct pmu *hw_perf_event_init(struct perf_event *event)
1515 const struct pmu *tmp;
1516 int err;
1518 err = __hw_perf_event_init(event);
1519 if (!err) {
1521 * we temporarily connect event to its pmu
1522 * such that validate_group() can classify
1523 * it as an x86 event using is_x86_event()
1525 tmp = event->pmu;
1526 event->pmu = &pmu;
1528 if (event->group_leader != event)
1529 err = validate_group(event);
1531 event->pmu = tmp;
1533 if (err) {
1534 if (event->destroy)
1535 event->destroy(event);
1536 return ERR_PTR(err);
1539 return &pmu;
1543 * callchain support
1546 static inline
1547 void callchain_store(struct perf_callchain_entry *entry, u64 ip)
1549 if (entry->nr < PERF_MAX_STACK_DEPTH)
1550 entry->ip[entry->nr++] = ip;
1553 static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
1554 static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry);
1557 static void
1558 backtrace_warning_symbol(void *data, char *msg, unsigned long symbol)
1560 /* Ignore warnings */
1563 static void backtrace_warning(void *data, char *msg)
1565 /* Ignore warnings */
1568 static int backtrace_stack(void *data, char *name)
1570 return 0;
1573 static void backtrace_address(void *data, unsigned long addr, int reliable)
1575 struct perf_callchain_entry *entry = data;
1577 if (reliable)
1578 callchain_store(entry, addr);
1581 static const struct stacktrace_ops backtrace_ops = {
1582 .warning = backtrace_warning,
1583 .warning_symbol = backtrace_warning_symbol,
1584 .stack = backtrace_stack,
1585 .address = backtrace_address,
1586 .walk_stack = print_context_stack_bp,
1589 #include "../dumpstack.h"
1591 static void
1592 perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
1594 callchain_store(entry, PERF_CONTEXT_KERNEL);
1595 callchain_store(entry, regs->ip);
1597 dump_trace(NULL, regs, NULL, regs->bp, &backtrace_ops, entry);
1601 * best effort, GUP based copy_from_user() that assumes IRQ or NMI context
1603 static unsigned long
1604 copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
1606 unsigned long offset, addr = (unsigned long)from;
1607 int type = in_nmi() ? KM_NMI : KM_IRQ0;
1608 unsigned long size, len = 0;
1609 struct page *page;
1610 void *map;
1611 int ret;
1613 do {
1614 ret = __get_user_pages_fast(addr, 1, 0, &page);
1615 if (!ret)
1616 break;
1618 offset = addr & (PAGE_SIZE - 1);
1619 size = min(PAGE_SIZE - offset, n - len);
1621 map = kmap_atomic(page, type);
1622 memcpy(to, map+offset, size);
1623 kunmap_atomic(map, type);
1624 put_page(page);
1626 len += size;
1627 to += size;
1628 addr += size;
1630 } while (len < n);
1632 return len;
1635 #ifdef CONFIG_COMPAT
1636 static inline int
1637 perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
1639 /* 32-bit process in 64-bit kernel. */
1640 struct stack_frame_ia32 frame;
1641 const void __user *fp;
1643 if (!test_thread_flag(TIF_IA32))
1644 return 0;
1646 fp = compat_ptr(regs->bp);
1647 while (entry->nr < PERF_MAX_STACK_DEPTH) {
1648 unsigned long bytes;
1649 frame.next_frame = 0;
1650 frame.return_address = 0;
1652 bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
1653 if (bytes != sizeof(frame))
1654 break;
1656 if (fp < compat_ptr(regs->sp))
1657 break;
1659 callchain_store(entry, frame.return_address);
1660 fp = compat_ptr(frame.next_frame);
1662 return 1;
1664 #else
1665 static inline int
1666 perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
1668 return 0;
1670 #endif
1672 static void
1673 perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
1675 struct stack_frame frame;
1676 const void __user *fp;
1678 if (!user_mode(regs))
1679 regs = task_pt_regs(current);
1681 fp = (void __user *)regs->bp;
1683 callchain_store(entry, PERF_CONTEXT_USER);
1684 callchain_store(entry, regs->ip);
1686 if (perf_callchain_user32(regs, entry))
1687 return;
1689 while (entry->nr < PERF_MAX_STACK_DEPTH) {
1690 unsigned long bytes;
1691 frame.next_frame = NULL;
1692 frame.return_address = 0;
1694 bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
1695 if (bytes != sizeof(frame))
1696 break;
1698 if ((unsigned long)fp < regs->sp)
1699 break;
1701 callchain_store(entry, frame.return_address);
1702 fp = frame.next_frame;
1706 static void
1707 perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
1709 int is_user;
1711 if (!regs)
1712 return;
1714 is_user = user_mode(regs);
1716 if (is_user && current->state != TASK_RUNNING)
1717 return;
1719 if (!is_user)
1720 perf_callchain_kernel(regs, entry);
1722 if (current->mm)
1723 perf_callchain_user(regs, entry);
1726 struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
1728 struct perf_callchain_entry *entry;
1730 if (in_nmi())
1731 entry = &__get_cpu_var(pmc_nmi_entry);
1732 else
1733 entry = &__get_cpu_var(pmc_irq_entry);
1735 entry->nr = 0;
1737 perf_do_callchain(regs, entry);
1739 return entry;
1742 void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip)
1744 regs->ip = ip;
1746 * perf_arch_fetch_caller_regs adds another call, we need to increment
1747 * the skip level
1749 regs->bp = rewind_frame_pointer(skip + 1);
1750 regs->cs = __KERNEL_CS;
1751 local_save_flags(regs->flags);