4 * ARM performance counter support.
6 * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
7 * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
9 * This code is based on the sparc64 perf event code, which is in turn based
10 * on the x86 code. Callchain code is based on the ARM OProfile backtrace
13 #define pr_fmt(fmt) "hw perfevents: " fmt
15 #include <linux/interrupt.h>
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/perf_event.h>
19 #include <linux/platform_device.h>
20 #include <linux/spinlock.h>
21 #include <linux/uaccess.h>
23 #include <asm/cputype.h>
25 #include <asm/irq_regs.h>
27 #include <asm/stacktrace.h>
30 * ARMv6 supports a maximum of 3 events, starting from index 0. If we add
31 * another platform that supports more, we need to increase this to be the
32 * largest of all platforms.
34 * ARMv7 supports up to 32 events:
35 * cycle counter CCNT + 31 events counters CNT0..30.
36 * Cortex-A8 has 1+4 counters, Cortex-A9 has 1+6 counters.
38 #define ARMPMU_MAX_HWEVENTS 32
40 /* The events for a given CPU. */
41 struct cpu_hw_events
{
43 * The events that are active on the CPU for the given index.
45 struct perf_event
**events
;
48 * A 1 bit for an index indicates that the counter is being used for
49 * an event. A 0 means that the counter can be used.
51 unsigned long *used_mask
;
54 * Hardware lock to serialize accesses to PMU registers. Needed for the
55 * read/modify/write sequences.
57 raw_spinlock_t pmu_lock
;
60 static DEFINE_PER_CPU(struct perf_event
* [ARMPMU_MAX_HWEVENTS
], hw_events
);
61 static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS
)], used_mask
);
62 static DEFINE_PER_CPU(struct cpu_hw_events
, cpu_hw_events
);
66 enum arm_perf_pmu_ids id
;
67 enum arm_pmu_type type
;
68 cpumask_t active_irqs
;
70 irqreturn_t (*handle_irq
)(int irq_num
, void *dev
);
71 void (*enable
)(struct hw_perf_event
*evt
, int idx
);
72 void (*disable
)(struct hw_perf_event
*evt
, int idx
);
73 int (*get_event_idx
)(struct cpu_hw_events
*cpuc
,
74 struct hw_perf_event
*hwc
);
75 int (*set_event_filter
)(struct hw_perf_event
*evt
,
76 struct perf_event_attr
*attr
);
77 u32 (*read_counter
)(int idx
);
78 void (*write_counter
)(int idx
, u32 val
);
81 void (*reset
)(void *);
82 int (*map_event
)(struct perf_event
*event
);
84 atomic_t active_events
;
85 struct mutex reserve_mutex
;
87 struct platform_device
*plat_device
;
88 struct cpu_hw_events
*(*get_hw_events
)(void);
91 #define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
93 /* Set at runtime when we know what CPU type we are. */
94 static struct arm_pmu
*armpmu
;
97 armpmu_get_pmu_id(void)
106 EXPORT_SYMBOL_GPL(armpmu_get_pmu_id
);
109 armpmu_get_max_events(void)
114 max_events
= armpmu
->num_events
;
118 EXPORT_SYMBOL_GPL(armpmu_get_max_events
);
120 int perf_num_counters(void)
122 return armpmu_get_max_events();
124 EXPORT_SYMBOL_GPL(perf_num_counters
);
126 #define HW_OP_UNSUPPORTED 0xFFFF
129 PERF_COUNT_HW_CACHE_##_x
131 #define CACHE_OP_UNSUPPORTED 0xFFFF
134 armpmu_map_cache_event(const unsigned (*cache_map
)
135 [PERF_COUNT_HW_CACHE_MAX
]
136 [PERF_COUNT_HW_CACHE_OP_MAX
]
137 [PERF_COUNT_HW_CACHE_RESULT_MAX
],
140 unsigned int cache_type
, cache_op
, cache_result
, ret
;
142 cache_type
= (config
>> 0) & 0xff;
143 if (cache_type
>= PERF_COUNT_HW_CACHE_MAX
)
146 cache_op
= (config
>> 8) & 0xff;
147 if (cache_op
>= PERF_COUNT_HW_CACHE_OP_MAX
)
150 cache_result
= (config
>> 16) & 0xff;
151 if (cache_result
>= PERF_COUNT_HW_CACHE_RESULT_MAX
)
154 ret
= (int)(*cache_map
)[cache_type
][cache_op
][cache_result
];
156 if (ret
== CACHE_OP_UNSUPPORTED
)
163 armpmu_map_event(const unsigned (*event_map
)[PERF_COUNT_HW_MAX
], u64 config
)
165 int mapping
= (*event_map
)[config
];
166 return mapping
== HW_OP_UNSUPPORTED
? -ENOENT
: mapping
;
170 armpmu_map_raw_event(u32 raw_event_mask
, u64 config
)
172 return (int)(config
& raw_event_mask
);
175 static int map_cpu_event(struct perf_event
*event
,
176 const unsigned (*event_map
)[PERF_COUNT_HW_MAX
],
177 const unsigned (*cache_map
)
178 [PERF_COUNT_HW_CACHE_MAX
]
179 [PERF_COUNT_HW_CACHE_OP_MAX
]
180 [PERF_COUNT_HW_CACHE_RESULT_MAX
],
183 u64 config
= event
->attr
.config
;
185 switch (event
->attr
.type
) {
186 case PERF_TYPE_HARDWARE
:
187 return armpmu_map_event(event_map
, config
);
188 case PERF_TYPE_HW_CACHE
:
189 return armpmu_map_cache_event(cache_map
, config
);
191 return armpmu_map_raw_event(raw_event_mask
, config
);
198 armpmu_event_set_period(struct perf_event
*event
,
199 struct hw_perf_event
*hwc
,
202 struct arm_pmu
*armpmu
= to_arm_pmu(event
->pmu
);
203 s64 left
= local64_read(&hwc
->period_left
);
204 s64 period
= hwc
->sample_period
;
207 if (unlikely(left
<= -period
)) {
209 local64_set(&hwc
->period_left
, left
);
210 hwc
->last_period
= period
;
214 if (unlikely(left
<= 0)) {
216 local64_set(&hwc
->period_left
, left
);
217 hwc
->last_period
= period
;
221 if (left
> (s64
)armpmu
->max_period
)
222 left
= armpmu
->max_period
;
224 local64_set(&hwc
->prev_count
, (u64
)-left
);
226 armpmu
->write_counter(idx
, (u64
)(-left
) & 0xffffffff);
228 perf_event_update_userpage(event
);
234 armpmu_event_update(struct perf_event
*event
,
235 struct hw_perf_event
*hwc
,
236 int idx
, int overflow
)
238 struct arm_pmu
*armpmu
= to_arm_pmu(event
->pmu
);
239 u64 delta
, prev_raw_count
, new_raw_count
;
242 prev_raw_count
= local64_read(&hwc
->prev_count
);
243 new_raw_count
= armpmu
->read_counter(idx
);
245 if (local64_cmpxchg(&hwc
->prev_count
, prev_raw_count
,
246 new_raw_count
) != prev_raw_count
)
249 new_raw_count
&= armpmu
->max_period
;
250 prev_raw_count
&= armpmu
->max_period
;
253 delta
= armpmu
->max_period
- prev_raw_count
+ new_raw_count
+ 1;
255 delta
= new_raw_count
- prev_raw_count
;
257 local64_add(delta
, &event
->count
);
258 local64_sub(delta
, &hwc
->period_left
);
260 return new_raw_count
;
264 armpmu_read(struct perf_event
*event
)
266 struct hw_perf_event
*hwc
= &event
->hw
;
268 /* Don't read disabled counters! */
272 armpmu_event_update(event
, hwc
, hwc
->idx
, 0);
276 armpmu_stop(struct perf_event
*event
, int flags
)
278 struct arm_pmu
*armpmu
= to_arm_pmu(event
->pmu
);
279 struct hw_perf_event
*hwc
= &event
->hw
;
282 * ARM pmu always has to update the counter, so ignore
283 * PERF_EF_UPDATE, see comments in armpmu_start().
285 if (!(hwc
->state
& PERF_HES_STOPPED
)) {
286 armpmu
->disable(hwc
, hwc
->idx
);
287 barrier(); /* why? */
288 armpmu_event_update(event
, hwc
, hwc
->idx
, 0);
289 hwc
->state
|= PERF_HES_STOPPED
| PERF_HES_UPTODATE
;
294 armpmu_start(struct perf_event
*event
, int flags
)
296 struct arm_pmu
*armpmu
= to_arm_pmu(event
->pmu
);
297 struct hw_perf_event
*hwc
= &event
->hw
;
300 * ARM pmu always has to reprogram the period, so ignore
301 * PERF_EF_RELOAD, see the comment below.
303 if (flags
& PERF_EF_RELOAD
)
304 WARN_ON_ONCE(!(hwc
->state
& PERF_HES_UPTODATE
));
308 * Set the period again. Some counters can't be stopped, so when we
309 * were stopped we simply disabled the IRQ source and the counter
310 * may have been left counting. If we don't do this step then we may
311 * get an interrupt too soon or *way* too late if the overflow has
312 * happened since disabling.
314 armpmu_event_set_period(event
, hwc
, hwc
->idx
);
315 armpmu
->enable(hwc
, hwc
->idx
);
319 armpmu_del(struct perf_event
*event
, int flags
)
321 struct arm_pmu
*armpmu
= to_arm_pmu(event
->pmu
);
322 struct cpu_hw_events
*cpuc
= armpmu
->get_hw_events();
323 struct hw_perf_event
*hwc
= &event
->hw
;
328 armpmu_stop(event
, PERF_EF_UPDATE
);
329 cpuc
->events
[idx
] = NULL
;
330 clear_bit(idx
, cpuc
->used_mask
);
332 perf_event_update_userpage(event
);
336 armpmu_add(struct perf_event
*event
, int flags
)
338 struct arm_pmu
*armpmu
= to_arm_pmu(event
->pmu
);
339 struct cpu_hw_events
*cpuc
= armpmu
->get_hw_events();
340 struct hw_perf_event
*hwc
= &event
->hw
;
344 perf_pmu_disable(event
->pmu
);
346 /* If we don't have a space for the counter then finish early. */
347 idx
= armpmu
->get_event_idx(cpuc
, hwc
);
354 * If there is an event in the counter we are going to use then make
355 * sure it is disabled.
358 armpmu
->disable(hwc
, idx
);
359 cpuc
->events
[idx
] = event
;
361 hwc
->state
= PERF_HES_STOPPED
| PERF_HES_UPTODATE
;
362 if (flags
& PERF_EF_START
)
363 armpmu_start(event
, PERF_EF_RELOAD
);
365 /* Propagate our changes to the userspace mapping. */
366 perf_event_update_userpage(event
);
369 perf_pmu_enable(event
->pmu
);
374 validate_event(struct cpu_hw_events
*cpuc
,
375 struct perf_event
*event
)
377 struct arm_pmu
*armpmu
= to_arm_pmu(event
->pmu
);
378 struct hw_perf_event fake_event
= event
->hw
;
379 struct pmu
*leader_pmu
= event
->group_leader
->pmu
;
381 if (event
->pmu
!= leader_pmu
|| event
->state
<= PERF_EVENT_STATE_OFF
)
384 return armpmu
->get_event_idx(cpuc
, &fake_event
) >= 0;
388 validate_group(struct perf_event
*event
)
390 struct perf_event
*sibling
, *leader
= event
->group_leader
;
391 struct cpu_hw_events fake_pmu
;
393 memset(&fake_pmu
, 0, sizeof(fake_pmu
));
395 if (!validate_event(&fake_pmu
, leader
))
398 list_for_each_entry(sibling
, &leader
->sibling_list
, group_entry
) {
399 if (!validate_event(&fake_pmu
, sibling
))
403 if (!validate_event(&fake_pmu
, event
))
409 static irqreturn_t
armpmu_platform_irq(int irq
, void *dev
)
411 struct arm_pmu
*armpmu
= (struct arm_pmu
*) dev
;
412 struct platform_device
*plat_device
= armpmu
->plat_device
;
413 struct arm_pmu_platdata
*plat
= dev_get_platdata(&plat_device
->dev
);
415 return plat
->handle_irq(irq
, dev
, armpmu
->handle_irq
);
419 armpmu_release_hardware(struct arm_pmu
*armpmu
)
422 struct platform_device
*pmu_device
= armpmu
->plat_device
;
424 irqs
= min(pmu_device
->num_resources
, num_possible_cpus());
426 for (i
= 0; i
< irqs
; ++i
) {
427 if (!cpumask_test_and_clear_cpu(i
, &armpmu
->active_irqs
))
429 irq
= platform_get_irq(pmu_device
, i
);
431 free_irq(irq
, armpmu
);
434 release_pmu(armpmu
->type
);
438 armpmu_reserve_hardware(struct arm_pmu
*armpmu
)
440 struct arm_pmu_platdata
*plat
;
441 irq_handler_t handle_irq
;
442 int i
, err
, irq
, irqs
;
443 struct platform_device
*pmu_device
= armpmu
->plat_device
;
445 err
= reserve_pmu(armpmu
->type
);
447 pr_warning("unable to reserve pmu\n");
451 plat
= dev_get_platdata(&pmu_device
->dev
);
452 if (plat
&& plat
->handle_irq
)
453 handle_irq
= armpmu_platform_irq
;
455 handle_irq
= armpmu
->handle_irq
;
457 irqs
= min(pmu_device
->num_resources
, num_possible_cpus());
459 pr_err("no irqs for PMUs defined\n");
463 for (i
= 0; i
< irqs
; ++i
) {
465 irq
= platform_get_irq(pmu_device
, i
);
470 * If we have a single PMU interrupt that we can't shift,
471 * assume that we're running on a uniprocessor machine and
472 * continue. Otherwise, continue without this interrupt.
474 if (irq_set_affinity(irq
, cpumask_of(i
)) && irqs
> 1) {
475 pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
480 err
= request_irq(irq
, handle_irq
,
481 IRQF_DISABLED
| IRQF_NOBALANCING
,
484 pr_err("unable to request IRQ%d for ARM PMU counters\n",
486 armpmu_release_hardware(armpmu
);
490 cpumask_set_cpu(i
, &armpmu
->active_irqs
);
497 hw_perf_event_destroy(struct perf_event
*event
)
499 struct arm_pmu
*armpmu
= to_arm_pmu(event
->pmu
);
500 atomic_t
*active_events
= &armpmu
->active_events
;
501 struct mutex
*pmu_reserve_mutex
= &armpmu
->reserve_mutex
;
503 if (atomic_dec_and_mutex_lock(active_events
, pmu_reserve_mutex
)) {
504 armpmu_release_hardware(armpmu
);
505 mutex_unlock(pmu_reserve_mutex
);
510 event_requires_mode_exclusion(struct perf_event_attr
*attr
)
512 return attr
->exclude_idle
|| attr
->exclude_user
||
513 attr
->exclude_kernel
|| attr
->exclude_hv
;
517 __hw_perf_event_init(struct perf_event
*event
)
519 struct arm_pmu
*armpmu
= to_arm_pmu(event
->pmu
);
520 struct hw_perf_event
*hwc
= &event
->hw
;
523 mapping
= armpmu
->map_event(event
);
526 pr_debug("event %x:%llx not supported\n", event
->attr
.type
,
532 * We don't assign an index until we actually place the event onto
533 * hardware. Use -1 to signify that we haven't decided where to put it
534 * yet. For SMP systems, each core has it's own PMU so we can't do any
535 * clever allocation or constraints checking at this point.
538 hwc
->config_base
= 0;
543 * Check whether we need to exclude the counter from certain modes.
545 if ((!armpmu
->set_event_filter
||
546 armpmu
->set_event_filter(hwc
, &event
->attr
)) &&
547 event_requires_mode_exclusion(&event
->attr
)) {
548 pr_debug("ARM performance counters do not support "
554 * Store the event encoding into the config_base field.
556 hwc
->config_base
|= (unsigned long)mapping
;
558 if (!hwc
->sample_period
) {
559 hwc
->sample_period
= armpmu
->max_period
;
560 hwc
->last_period
= hwc
->sample_period
;
561 local64_set(&hwc
->period_left
, hwc
->sample_period
);
565 if (event
->group_leader
!= event
) {
566 err
= validate_group(event
);
574 static int armpmu_event_init(struct perf_event
*event
)
576 struct arm_pmu
*armpmu
= to_arm_pmu(event
->pmu
);
578 atomic_t
*active_events
= &armpmu
->active_events
;
580 if (armpmu
->map_event(event
) == -ENOENT
)
583 event
->destroy
= hw_perf_event_destroy
;
585 if (!atomic_inc_not_zero(active_events
)) {
586 mutex_lock(&armpmu
->reserve_mutex
);
587 if (atomic_read(active_events
) == 0)
588 err
= armpmu_reserve_hardware(armpmu
);
591 atomic_inc(active_events
);
592 mutex_unlock(&armpmu
->reserve_mutex
);
598 err
= __hw_perf_event_init(event
);
600 hw_perf_event_destroy(event
);
605 static void armpmu_enable(struct pmu
*pmu
)
607 struct arm_pmu
*armpmu
= to_arm_pmu(pmu
);
608 /* Enable all of the perf events on hardware. */
609 int idx
, enabled
= 0;
610 struct cpu_hw_events
*cpuc
= armpmu
->get_hw_events();
612 for (idx
= 0; idx
< armpmu
->num_events
; ++idx
) {
613 struct perf_event
*event
= cpuc
->events
[idx
];
618 armpmu
->enable(&event
->hw
, idx
);
626 static void armpmu_disable(struct pmu
*pmu
)
628 struct arm_pmu
*armpmu
= to_arm_pmu(pmu
);
632 static void __init
armpmu_init(struct arm_pmu
*armpmu
)
634 atomic_set(&armpmu
->active_events
, 0);
635 mutex_init(&armpmu
->reserve_mutex
);
637 armpmu
->pmu
= (struct pmu
) {
638 .pmu_enable
= armpmu_enable
,
639 .pmu_disable
= armpmu_disable
,
640 .event_init
= armpmu_event_init
,
643 .start
= armpmu_start
,
649 static int __init
armpmu_register(struct arm_pmu
*armpmu
, char *name
, int type
)
652 return perf_pmu_register(&armpmu
->pmu
, name
, type
);
655 /* Include the PMU-specific implementations. */
656 #include "perf_event_xscale.c"
657 #include "perf_event_v6.c"
658 #include "perf_event_v7.c"
661 * Ensure the PMU has sane values out of reset.
662 * This requires SMP to be available, so exists as a separate initcall.
667 if (armpmu
&& armpmu
->reset
)
668 return on_each_cpu(armpmu
->reset
, NULL
, 1);
671 arch_initcall(armpmu_reset
);
674 * PMU platform driver and devicetree bindings.
676 static struct of_device_id armpmu_of_device_ids
[] = {
677 {.compatible
= "arm,cortex-a9-pmu"},
678 {.compatible
= "arm,cortex-a8-pmu"},
679 {.compatible
= "arm,arm1136-pmu"},
680 {.compatible
= "arm,arm1176-pmu"},
684 static struct platform_device_id armpmu_plat_device_ids
[] = {
689 static int __devinit
armpmu_device_probe(struct platform_device
*pdev
)
691 armpmu
->plat_device
= pdev
;
695 static struct platform_driver armpmu_driver
= {
698 .of_match_table
= armpmu_of_device_ids
,
700 .probe
= armpmu_device_probe
,
701 .id_table
= armpmu_plat_device_ids
,
704 static int __init
register_pmu_driver(void)
706 return platform_driver_register(&armpmu_driver
);
708 device_initcall(register_pmu_driver
);
710 static struct cpu_hw_events
*armpmu_get_cpu_events(void)
712 return &__get_cpu_var(cpu_hw_events
);
715 static void __init
cpu_pmu_init(struct arm_pmu
*armpmu
)
718 for_each_possible_cpu(cpu
) {
719 struct cpu_hw_events
*events
= &per_cpu(cpu_hw_events
, cpu
);
720 events
->events
= per_cpu(hw_events
, cpu
);
721 events
->used_mask
= per_cpu(used_mask
, cpu
);
722 raw_spin_lock_init(&events
->pmu_lock
);
724 armpmu
->get_hw_events
= armpmu_get_cpu_events
;
725 armpmu
->type
= ARM_PMU_DEVICE_CPU
;
729 * CPU PMU identification and registration.
732 init_hw_perf_events(void)
734 unsigned long cpuid
= read_cpuid_id();
735 unsigned long implementor
= (cpuid
& 0xFF000000) >> 24;
736 unsigned long part_number
= (cpuid
& 0xFFF0);
739 if (0x41 == implementor
) {
740 switch (part_number
) {
741 case 0xB360: /* ARM1136 */
742 case 0xB560: /* ARM1156 */
743 case 0xB760: /* ARM1176 */
744 armpmu
= armv6pmu_init();
746 case 0xB020: /* ARM11mpcore */
747 armpmu
= armv6mpcore_pmu_init();
749 case 0xC080: /* Cortex-A8 */
750 armpmu
= armv7_a8_pmu_init();
752 case 0xC090: /* Cortex-A9 */
753 armpmu
= armv7_a9_pmu_init();
755 case 0xC050: /* Cortex-A5 */
756 armpmu
= armv7_a5_pmu_init();
758 case 0xC0F0: /* Cortex-A15 */
759 armpmu
= armv7_a15_pmu_init();
762 /* Intel CPUs [xscale]. */
763 } else if (0x69 == implementor
) {
764 part_number
= (cpuid
>> 13) & 0x7;
765 switch (part_number
) {
767 armpmu
= xscale1pmu_init();
770 armpmu
= xscale2pmu_init();
776 pr_info("enabled with %s PMU driver, %d counters available\n",
777 armpmu
->name
, armpmu
->num_events
);
778 cpu_pmu_init(armpmu
);
779 armpmu_register(armpmu
, "cpu", PERF_TYPE_RAW
);
781 pr_info("no hardware support available\n");
786 early_initcall(init_hw_perf_events
);
789 * Callchain handling code.
793 * The registers we're interested in are at the end of the variable
794 * length saved register structure. The fp points at the end of this
795 * structure so the address of this struct is:
796 * (struct frame_tail *)(xxx->fp)-1
798 * This code has been adapted from the ARM OProfile support.
801 struct frame_tail __user
*fp
;
804 } __attribute__((packed
));
807 * Get the return address for a single stackframe and return a pointer to the
810 static struct frame_tail __user
*
811 user_backtrace(struct frame_tail __user
*tail
,
812 struct perf_callchain_entry
*entry
)
814 struct frame_tail buftail
;
816 /* Also check accessibility of one struct frame_tail beyond */
817 if (!access_ok(VERIFY_READ
, tail
, sizeof(buftail
)))
819 if (__copy_from_user_inatomic(&buftail
, tail
, sizeof(buftail
)))
822 perf_callchain_store(entry
, buftail
.lr
);
825 * Frame pointers should strictly progress back up the stack
826 * (towards higher addresses).
828 if (tail
+ 1 >= buftail
.fp
)
831 return buftail
.fp
- 1;
835 perf_callchain_user(struct perf_callchain_entry
*entry
, struct pt_regs
*regs
)
837 struct frame_tail __user
*tail
;
840 tail
= (struct frame_tail __user
*)regs
->ARM_fp
- 1;
842 while ((entry
->nr
< PERF_MAX_STACK_DEPTH
) &&
843 tail
&& !((unsigned long)tail
& 0x3))
844 tail
= user_backtrace(tail
, entry
);
848 * Gets called by walk_stackframe() for every stackframe. This will be called
849 * whist unwinding the stackframe and is like a subroutine return so we use
853 callchain_trace(struct stackframe
*fr
,
856 struct perf_callchain_entry
*entry
= data
;
857 perf_callchain_store(entry
, fr
->pc
);
862 perf_callchain_kernel(struct perf_callchain_entry
*entry
, struct pt_regs
*regs
)
864 struct stackframe fr
;
866 fr
.fp
= regs
->ARM_fp
;
867 fr
.sp
= regs
->ARM_sp
;
868 fr
.lr
= regs
->ARM_lr
;
869 fr
.pc
= regs
->ARM_pc
;
870 walk_stackframe(&fr
, callchain_trace
, entry
);