4 * ARM performance counter support.
6 * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
8 * ARMv7 support: Jean Pihet <jpihet@mvista.com>
9 * 2010 (c) MontaVista Software, LLC.
11 * This code is based on the sparc64 perf event code, which is in turn based
12 * on the x86 code. Callchain code is based on the ARM OProfile backtrace
15 #define pr_fmt(fmt) "hw perfevents: " fmt
17 #include <linux/interrupt.h>
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/perf_event.h>
21 #include <linux/platform_device.h>
22 #include <linux/spinlock.h>
23 #include <linux/uaccess.h>
25 #include <asm/cputype.h>
27 #include <asm/irq_regs.h>
29 #include <asm/stacktrace.h>
31 static struct platform_device
*pmu_device
;
34 * Hardware lock to serialize accesses to PMU registers. Needed for the
35 * read/modify/write sequences.
37 DEFINE_SPINLOCK(pmu_lock
);
40 * ARMv6 supports a maximum of 3 events, starting from index 1. If we add
41 * another platform that supports more, we need to increase this to be the
42 * largest of all platforms.
44 * ARMv7 supports up to 32 events:
45 * cycle counter CCNT + 31 events counters CNT0..30.
46 * Cortex-A8 has 1+4 counters, Cortex-A9 has 1+6 counters.
48 #define ARMPMU_MAX_HWEVENTS 33
50 /* The events for a given CPU. */
51 struct cpu_hw_events
{
53 * The events that are active on the CPU for the given index. Index 0
56 struct perf_event
*events
[ARMPMU_MAX_HWEVENTS
];
59 * A 1 bit for an index indicates that the counter is being used for
60 * an event. A 0 means that the counter can be used.
62 unsigned long used_mask
[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS
)];
65 * A 1 bit for an index indicates that the counter is actively being
68 unsigned long active_mask
[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS
)];
70 DEFINE_PER_CPU(struct cpu_hw_events
, cpu_hw_events
);
73 static const char *arm_pmu_names
[] = {
74 [ARM_PERF_PMU_ID_XSCALE1
] = "xscale1",
75 [ARM_PERF_PMU_ID_XSCALE2
] = "xscale2",
76 [ARM_PERF_PMU_ID_V6
] = "v6",
77 [ARM_PERF_PMU_ID_V6MP
] = "v6mpcore",
78 [ARM_PERF_PMU_ID_CA8
] = "ARMv7 Cortex-A8",
79 [ARM_PERF_PMU_ID_CA9
] = "ARMv7 Cortex-A9",
83 enum arm_perf_pmu_ids id
;
84 irqreturn_t (*handle_irq
)(int irq_num
, void *dev
);
85 void (*enable
)(struct hw_perf_event
*evt
, int idx
);
86 void (*disable
)(struct hw_perf_event
*evt
, int idx
);
87 int (*event_map
)(int evt
);
88 u64 (*raw_event
)(u64
);
89 int (*get_event_idx
)(struct cpu_hw_events
*cpuc
,
90 struct hw_perf_event
*hwc
);
91 u32 (*read_counter
)(int idx
);
92 void (*write_counter
)(int idx
, u32 val
);
99 /* Set at runtime when we know what CPU type we are. */
100 static const struct arm_pmu
*armpmu
;
102 enum arm_perf_pmu_ids
103 armpmu_get_pmu_id(void)
112 EXPORT_SYMBOL_GPL(armpmu_get_pmu_id
);
115 armpmu_get_max_events(void)
120 max_events
= armpmu
->num_events
;
124 EXPORT_SYMBOL_GPL(armpmu_get_max_events
);
126 int perf_num_counters(void)
128 return armpmu_get_max_events();
130 EXPORT_SYMBOL_GPL(perf_num_counters
);
132 #define HW_OP_UNSUPPORTED 0xFFFF
135 PERF_COUNT_HW_CACHE_##_x
137 #define CACHE_OP_UNSUPPORTED 0xFFFF
139 static unsigned armpmu_perf_cache_map
[PERF_COUNT_HW_CACHE_MAX
]
140 [PERF_COUNT_HW_CACHE_OP_MAX
]
141 [PERF_COUNT_HW_CACHE_RESULT_MAX
];
144 armpmu_map_cache_event(u64 config
)
146 unsigned int cache_type
, cache_op
, cache_result
, ret
;
148 cache_type
= (config
>> 0) & 0xff;
149 if (cache_type
>= PERF_COUNT_HW_CACHE_MAX
)
152 cache_op
= (config
>> 8) & 0xff;
153 if (cache_op
>= PERF_COUNT_HW_CACHE_OP_MAX
)
156 cache_result
= (config
>> 16) & 0xff;
157 if (cache_result
>= PERF_COUNT_HW_CACHE_RESULT_MAX
)
160 ret
= (int)armpmu_perf_cache_map
[cache_type
][cache_op
][cache_result
];
162 if (ret
== CACHE_OP_UNSUPPORTED
)
169 armpmu_event_set_period(struct perf_event
*event
,
170 struct hw_perf_event
*hwc
,
173 s64 left
= local64_read(&hwc
->period_left
);
174 s64 period
= hwc
->sample_period
;
177 if (unlikely(left
<= -period
)) {
179 local64_set(&hwc
->period_left
, left
);
180 hwc
->last_period
= period
;
184 if (unlikely(left
<= 0)) {
186 local64_set(&hwc
->period_left
, left
);
187 hwc
->last_period
= period
;
191 if (left
> (s64
)armpmu
->max_period
)
192 left
= armpmu
->max_period
;
194 local64_set(&hwc
->prev_count
, (u64
)-left
);
196 armpmu
->write_counter(idx
, (u64
)(-left
) & 0xffffffff);
198 perf_event_update_userpage(event
);
204 armpmu_event_update(struct perf_event
*event
,
205 struct hw_perf_event
*hwc
,
209 s64 prev_raw_count
, new_raw_count
;
213 prev_raw_count
= local64_read(&hwc
->prev_count
);
214 new_raw_count
= armpmu
->read_counter(idx
);
216 if (local64_cmpxchg(&hwc
->prev_count
, prev_raw_count
,
217 new_raw_count
) != prev_raw_count
)
220 delta
= (new_raw_count
<< shift
) - (prev_raw_count
<< shift
);
223 local64_add(delta
, &event
->count
);
224 local64_sub(delta
, &hwc
->period_left
);
226 return new_raw_count
;
230 armpmu_read(struct perf_event
*event
)
232 struct hw_perf_event
*hwc
= &event
->hw
;
234 /* Don't read disabled counters! */
238 armpmu_event_update(event
, hwc
, hwc
->idx
);
242 armpmu_stop(struct perf_event
*event
, int flags
)
244 struct hw_perf_event
*hwc
= &event
->hw
;
250 * ARM pmu always has to update the counter, so ignore
251 * PERF_EF_UPDATE, see comments in armpmu_start().
253 if (!(hwc
->state
& PERF_HES_STOPPED
)) {
254 armpmu
->disable(hwc
, hwc
->idx
);
255 barrier(); /* why? */
256 armpmu_event_update(event
, hwc
, hwc
->idx
);
257 hwc
->state
|= PERF_HES_STOPPED
| PERF_HES_UPTODATE
;
262 armpmu_start(struct perf_event
*event
, int flags
)
264 struct hw_perf_event
*hwc
= &event
->hw
;
270 * ARM pmu always has to reprogram the period, so ignore
271 * PERF_EF_RELOAD, see the comment below.
273 if (flags
& PERF_EF_RELOAD
)
274 WARN_ON_ONCE(!(hwc
->state
& PERF_HES_UPTODATE
));
278 * Set the period again. Some counters can't be stopped, so when we
279 * were stopped we simply disabled the IRQ source and the counter
280 * may have been left counting. If we don't do this step then we may
281 * get an interrupt too soon or *way* too late if the overflow has
282 * happened since disabling.
284 armpmu_event_set_period(event
, hwc
, hwc
->idx
);
285 armpmu
->enable(hwc
, hwc
->idx
);
289 armpmu_del(struct perf_event
*event
, int flags
)
291 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
292 struct hw_perf_event
*hwc
= &event
->hw
;
297 clear_bit(idx
, cpuc
->active_mask
);
298 armpmu_stop(event
, PERF_EF_UPDATE
);
299 cpuc
->events
[idx
] = NULL
;
300 clear_bit(idx
, cpuc
->used_mask
);
302 perf_event_update_userpage(event
);
306 armpmu_add(struct perf_event
*event
, int flags
)
308 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
309 struct hw_perf_event
*hwc
= &event
->hw
;
313 perf_pmu_disable(event
->pmu
);
315 /* If we don't have a space for the counter then finish early. */
316 idx
= armpmu
->get_event_idx(cpuc
, hwc
);
323 * If there is an event in the counter we are going to use then make
324 * sure it is disabled.
327 armpmu
->disable(hwc
, idx
);
328 cpuc
->events
[idx
] = event
;
329 set_bit(idx
, cpuc
->active_mask
);
331 hwc
->state
= PERF_HES_STOPPED
| PERF_HES_UPTODATE
;
332 if (flags
& PERF_EF_START
)
333 armpmu_start(event
, PERF_EF_RELOAD
);
335 /* Propagate our changes to the userspace mapping. */
336 perf_event_update_userpage(event
);
339 perf_pmu_enable(event
->pmu
);
343 static struct pmu pmu
;
346 validate_event(struct cpu_hw_events
*cpuc
,
347 struct perf_event
*event
)
349 struct hw_perf_event fake_event
= event
->hw
;
351 if (event
->pmu
!= &pmu
|| event
->state
<= PERF_EVENT_STATE_OFF
)
354 return armpmu
->get_event_idx(cpuc
, &fake_event
) >= 0;
358 validate_group(struct perf_event
*event
)
360 struct perf_event
*sibling
, *leader
= event
->group_leader
;
361 struct cpu_hw_events fake_pmu
;
363 memset(&fake_pmu
, 0, sizeof(fake_pmu
));
365 if (!validate_event(&fake_pmu
, leader
))
368 list_for_each_entry(sibling
, &leader
->sibling_list
, group_entry
) {
369 if (!validate_event(&fake_pmu
, sibling
))
373 if (!validate_event(&fake_pmu
, event
))
380 armpmu_reserve_hardware(void)
382 int i
, err
= -ENODEV
, irq
;
384 pmu_device
= reserve_pmu(ARM_PMU_DEVICE_CPU
);
385 if (IS_ERR(pmu_device
)) {
386 pr_warning("unable to reserve pmu\n");
387 return PTR_ERR(pmu_device
);
390 init_pmu(ARM_PMU_DEVICE_CPU
);
392 if (pmu_device
->num_resources
< 1) {
393 pr_err("no irqs for PMUs defined\n");
397 for (i
= 0; i
< pmu_device
->num_resources
; ++i
) {
398 irq
= platform_get_irq(pmu_device
, i
);
402 err
= request_irq(irq
, armpmu
->handle_irq
,
403 IRQF_DISABLED
| IRQF_NOBALANCING
,
406 pr_warning("unable to request IRQ%d for ARM perf "
413 for (i
= i
- 1; i
>= 0; --i
) {
414 irq
= platform_get_irq(pmu_device
, i
);
418 release_pmu(pmu_device
);
426 armpmu_release_hardware(void)
430 for (i
= pmu_device
->num_resources
- 1; i
>= 0; --i
) {
431 irq
= platform_get_irq(pmu_device
, i
);
437 release_pmu(pmu_device
);
441 static atomic_t active_events
= ATOMIC_INIT(0);
442 static DEFINE_MUTEX(pmu_reserve_mutex
);
445 hw_perf_event_destroy(struct perf_event
*event
)
447 if (atomic_dec_and_mutex_lock(&active_events
, &pmu_reserve_mutex
)) {
448 armpmu_release_hardware();
449 mutex_unlock(&pmu_reserve_mutex
);
454 __hw_perf_event_init(struct perf_event
*event
)
456 struct hw_perf_event
*hwc
= &event
->hw
;
459 /* Decode the generic type into an ARM event identifier. */
460 if (PERF_TYPE_HARDWARE
== event
->attr
.type
) {
461 mapping
= armpmu
->event_map(event
->attr
.config
);
462 } else if (PERF_TYPE_HW_CACHE
== event
->attr
.type
) {
463 mapping
= armpmu_map_cache_event(event
->attr
.config
);
464 } else if (PERF_TYPE_RAW
== event
->attr
.type
) {
465 mapping
= armpmu
->raw_event(event
->attr
.config
);
467 pr_debug("event type %x not supported\n", event
->attr
.type
);
472 pr_debug("event %x:%llx not supported\n", event
->attr
.type
,
478 * Check whether we need to exclude the counter from certain modes.
479 * The ARM performance counters are on all of the time so if someone
480 * has asked us for some excludes then we have to fail.
482 if (event
->attr
.exclude_kernel
|| event
->attr
.exclude_user
||
483 event
->attr
.exclude_hv
|| event
->attr
.exclude_idle
) {
484 pr_debug("ARM performance counters do not support "
490 * We don't assign an index until we actually place the event onto
491 * hardware. Use -1 to signify that we haven't decided where to put it
492 * yet. For SMP systems, each core has it's own PMU so we can't do any
493 * clever allocation or constraints checking at this point.
498 * Store the event encoding into the config_base field. config and
499 * event_base are unused as the only 2 things we need to know are
500 * the event mapping and the counter to use. The counter to use is
501 * also the indx and the config_base is the event type.
503 hwc
->config_base
= (unsigned long)mapping
;
507 if (!hwc
->sample_period
) {
508 hwc
->sample_period
= armpmu
->max_period
;
509 hwc
->last_period
= hwc
->sample_period
;
510 local64_set(&hwc
->period_left
, hwc
->sample_period
);
514 if (event
->group_leader
!= event
) {
515 err
= validate_group(event
);
523 static int armpmu_event_init(struct perf_event
*event
)
527 switch (event
->attr
.type
) {
529 case PERF_TYPE_HARDWARE
:
530 case PERF_TYPE_HW_CACHE
:
540 event
->destroy
= hw_perf_event_destroy
;
542 if (!atomic_inc_not_zero(&active_events
)) {
543 if (atomic_read(&active_events
) > armpmu
->num_events
) {
544 atomic_dec(&active_events
);
548 mutex_lock(&pmu_reserve_mutex
);
549 if (atomic_read(&active_events
) == 0) {
550 err
= armpmu_reserve_hardware();
554 atomic_inc(&active_events
);
555 mutex_unlock(&pmu_reserve_mutex
);
561 err
= __hw_perf_event_init(event
);
563 hw_perf_event_destroy(event
);
568 static void armpmu_enable(struct pmu
*pmu
)
570 /* Enable all of the perf events on hardware. */
572 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
577 for (idx
= 0; idx
<= armpmu
->num_events
; ++idx
) {
578 struct perf_event
*event
= cpuc
->events
[idx
];
583 armpmu
->enable(&event
->hw
, idx
);
589 static void armpmu_disable(struct pmu
*pmu
)
595 static struct pmu pmu
= {
596 .pmu_enable
= armpmu_enable
,
597 .pmu_disable
= armpmu_disable
,
598 .event_init
= armpmu_event_init
,
601 .start
= armpmu_start
,
607 * ARMv6 Performance counter handling code.
609 * ARMv6 has 2 configurable performance counters and a single cycle counter.
610 * They all share a single reset bit but can be written to zero so we can use
613 * The counters can't be individually enabled or disabled so when we remove
614 * one event and replace it with another we could get spurious counts from the
615 * wrong event. However, we can take advantage of the fact that the
616 * performance counters can export events to the event bus, and the event bus
617 * itself can be monitored. This requires that we *don't* export the events to
618 * the event bus. The procedure for disabling a configurable counter is:
619 * - change the counter to count the ETMEXTOUT[0] signal (0x20). This
620 * effectively stops the counter from counting.
621 * - disable the counter's interrupt generation (each counter has it's
622 * own interrupt enable bit).
623 * Once stopped, the counter value can be written as 0 to reset.
625 * To enable a counter:
626 * - enable the counter's interrupt generation.
627 * - set the new event type.
629 * Note: the dedicated cycle counter only counts cycles and can't be
630 * enabled/disabled independently of the others. When we want to disable the
631 * cycle counter, we have to just disable the interrupt reporting and start
632 * ignoring that counter. When re-enabling, we have to reset the value and
633 * enable the interrupt.
636 enum armv6_perf_types
{
637 ARMV6_PERFCTR_ICACHE_MISS
= 0x0,
638 ARMV6_PERFCTR_IBUF_STALL
= 0x1,
639 ARMV6_PERFCTR_DDEP_STALL
= 0x2,
640 ARMV6_PERFCTR_ITLB_MISS
= 0x3,
641 ARMV6_PERFCTR_DTLB_MISS
= 0x4,
642 ARMV6_PERFCTR_BR_EXEC
= 0x5,
643 ARMV6_PERFCTR_BR_MISPREDICT
= 0x6,
644 ARMV6_PERFCTR_INSTR_EXEC
= 0x7,
645 ARMV6_PERFCTR_DCACHE_HIT
= 0x9,
646 ARMV6_PERFCTR_DCACHE_ACCESS
= 0xA,
647 ARMV6_PERFCTR_DCACHE_MISS
= 0xB,
648 ARMV6_PERFCTR_DCACHE_WBACK
= 0xC,
649 ARMV6_PERFCTR_SW_PC_CHANGE
= 0xD,
650 ARMV6_PERFCTR_MAIN_TLB_MISS
= 0xF,
651 ARMV6_PERFCTR_EXPL_D_ACCESS
= 0x10,
652 ARMV6_PERFCTR_LSU_FULL_STALL
= 0x11,
653 ARMV6_PERFCTR_WBUF_DRAINED
= 0x12,
654 ARMV6_PERFCTR_CPU_CYCLES
= 0xFF,
655 ARMV6_PERFCTR_NOP
= 0x20,
658 enum armv6_counters
{
659 ARMV6_CYCLE_COUNTER
= 1,
665 * The hardware events that we support. We do support cache operations but
666 * we have harvard caches and no way to combine instruction and data
667 * accesses/misses in hardware.
669 static const unsigned armv6_perf_map
[PERF_COUNT_HW_MAX
] = {
670 [PERF_COUNT_HW_CPU_CYCLES
] = ARMV6_PERFCTR_CPU_CYCLES
,
671 [PERF_COUNT_HW_INSTRUCTIONS
] = ARMV6_PERFCTR_INSTR_EXEC
,
672 [PERF_COUNT_HW_CACHE_REFERENCES
] = HW_OP_UNSUPPORTED
,
673 [PERF_COUNT_HW_CACHE_MISSES
] = HW_OP_UNSUPPORTED
,
674 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = ARMV6_PERFCTR_BR_EXEC
,
675 [PERF_COUNT_HW_BRANCH_MISSES
] = ARMV6_PERFCTR_BR_MISPREDICT
,
676 [PERF_COUNT_HW_BUS_CYCLES
] = HW_OP_UNSUPPORTED
,
679 static const unsigned armv6_perf_cache_map
[PERF_COUNT_HW_CACHE_MAX
]
680 [PERF_COUNT_HW_CACHE_OP_MAX
]
681 [PERF_COUNT_HW_CACHE_RESULT_MAX
] = {
684 * The performance counters don't differentiate between read
685 * and write accesses/misses so this isn't strictly correct,
686 * but it's the best we can do. Writes and reads get
690 [C(RESULT_ACCESS
)] = ARMV6_PERFCTR_DCACHE_ACCESS
,
691 [C(RESULT_MISS
)] = ARMV6_PERFCTR_DCACHE_MISS
,
694 [C(RESULT_ACCESS
)] = ARMV6_PERFCTR_DCACHE_ACCESS
,
695 [C(RESULT_MISS
)] = ARMV6_PERFCTR_DCACHE_MISS
,
698 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
699 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
704 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
705 [C(RESULT_MISS
)] = ARMV6_PERFCTR_ICACHE_MISS
,
708 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
709 [C(RESULT_MISS
)] = ARMV6_PERFCTR_ICACHE_MISS
,
712 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
713 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
718 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
719 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
722 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
723 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
726 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
727 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
732 * The ARM performance counters can count micro DTLB misses,
733 * micro ITLB misses and main TLB misses. There isn't an event
734 * for TLB misses, so use the micro misses here and if users
735 * want the main TLB misses they can use a raw counter.
738 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
739 [C(RESULT_MISS
)] = ARMV6_PERFCTR_DTLB_MISS
,
742 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
743 [C(RESULT_MISS
)] = ARMV6_PERFCTR_DTLB_MISS
,
746 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
747 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
752 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
753 [C(RESULT_MISS
)] = ARMV6_PERFCTR_ITLB_MISS
,
756 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
757 [C(RESULT_MISS
)] = ARMV6_PERFCTR_ITLB_MISS
,
760 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
761 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
766 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
767 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
770 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
771 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
774 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
775 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
780 enum armv6mpcore_perf_types
{
781 ARMV6MPCORE_PERFCTR_ICACHE_MISS
= 0x0,
782 ARMV6MPCORE_PERFCTR_IBUF_STALL
= 0x1,
783 ARMV6MPCORE_PERFCTR_DDEP_STALL
= 0x2,
784 ARMV6MPCORE_PERFCTR_ITLB_MISS
= 0x3,
785 ARMV6MPCORE_PERFCTR_DTLB_MISS
= 0x4,
786 ARMV6MPCORE_PERFCTR_BR_EXEC
= 0x5,
787 ARMV6MPCORE_PERFCTR_BR_NOTPREDICT
= 0x6,
788 ARMV6MPCORE_PERFCTR_BR_MISPREDICT
= 0x7,
789 ARMV6MPCORE_PERFCTR_INSTR_EXEC
= 0x8,
790 ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS
= 0xA,
791 ARMV6MPCORE_PERFCTR_DCACHE_RDMISS
= 0xB,
792 ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS
= 0xC,
793 ARMV6MPCORE_PERFCTR_DCACHE_WRMISS
= 0xD,
794 ARMV6MPCORE_PERFCTR_DCACHE_EVICTION
= 0xE,
795 ARMV6MPCORE_PERFCTR_SW_PC_CHANGE
= 0xF,
796 ARMV6MPCORE_PERFCTR_MAIN_TLB_MISS
= 0x10,
797 ARMV6MPCORE_PERFCTR_EXPL_MEM_ACCESS
= 0x11,
798 ARMV6MPCORE_PERFCTR_LSU_FULL_STALL
= 0x12,
799 ARMV6MPCORE_PERFCTR_WBUF_DRAINED
= 0x13,
800 ARMV6MPCORE_PERFCTR_CPU_CYCLES
= 0xFF,
804 * The hardware events that we support. We do support cache operations but
805 * we have harvard caches and no way to combine instruction and data
806 * accesses/misses in hardware.
808 static const unsigned armv6mpcore_perf_map
[PERF_COUNT_HW_MAX
] = {
809 [PERF_COUNT_HW_CPU_CYCLES
] = ARMV6MPCORE_PERFCTR_CPU_CYCLES
,
810 [PERF_COUNT_HW_INSTRUCTIONS
] = ARMV6MPCORE_PERFCTR_INSTR_EXEC
,
811 [PERF_COUNT_HW_CACHE_REFERENCES
] = HW_OP_UNSUPPORTED
,
812 [PERF_COUNT_HW_CACHE_MISSES
] = HW_OP_UNSUPPORTED
,
813 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = ARMV6MPCORE_PERFCTR_BR_EXEC
,
814 [PERF_COUNT_HW_BRANCH_MISSES
] = ARMV6MPCORE_PERFCTR_BR_MISPREDICT
,
815 [PERF_COUNT_HW_BUS_CYCLES
] = HW_OP_UNSUPPORTED
,
818 static const unsigned armv6mpcore_perf_cache_map
[PERF_COUNT_HW_CACHE_MAX
]
819 [PERF_COUNT_HW_CACHE_OP_MAX
]
820 [PERF_COUNT_HW_CACHE_RESULT_MAX
] = {
824 ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS
,
826 ARMV6MPCORE_PERFCTR_DCACHE_RDMISS
,
830 ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS
,
832 ARMV6MPCORE_PERFCTR_DCACHE_WRMISS
,
835 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
836 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
841 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
842 [C(RESULT_MISS
)] = ARMV6MPCORE_PERFCTR_ICACHE_MISS
,
845 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
846 [C(RESULT_MISS
)] = ARMV6MPCORE_PERFCTR_ICACHE_MISS
,
849 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
850 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
855 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
856 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
859 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
860 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
863 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
864 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
869 * The ARM performance counters can count micro DTLB misses,
870 * micro ITLB misses and main TLB misses. There isn't an event
871 * for TLB misses, so use the micro misses here and if users
872 * want the main TLB misses they can use a raw counter.
875 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
876 [C(RESULT_MISS
)] = ARMV6MPCORE_PERFCTR_DTLB_MISS
,
879 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
880 [C(RESULT_MISS
)] = ARMV6MPCORE_PERFCTR_DTLB_MISS
,
883 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
884 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
889 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
890 [C(RESULT_MISS
)] = ARMV6MPCORE_PERFCTR_ITLB_MISS
,
893 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
894 [C(RESULT_MISS
)] = ARMV6MPCORE_PERFCTR_ITLB_MISS
,
897 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
898 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
903 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
904 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
907 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
908 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
911 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
912 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
917 static inline unsigned long
918 armv6_pmcr_read(void)
921 asm volatile("mrc p15, 0, %0, c15, c12, 0" : "=r"(val
));
926 armv6_pmcr_write(unsigned long val
)
928 asm volatile("mcr p15, 0, %0, c15, c12, 0" : : "r"(val
));
931 #define ARMV6_PMCR_ENABLE (1 << 0)
932 #define ARMV6_PMCR_CTR01_RESET (1 << 1)
933 #define ARMV6_PMCR_CCOUNT_RESET (1 << 2)
934 #define ARMV6_PMCR_CCOUNT_DIV (1 << 3)
935 #define ARMV6_PMCR_COUNT0_IEN (1 << 4)
936 #define ARMV6_PMCR_COUNT1_IEN (1 << 5)
937 #define ARMV6_PMCR_CCOUNT_IEN (1 << 6)
938 #define ARMV6_PMCR_COUNT0_OVERFLOW (1 << 8)
939 #define ARMV6_PMCR_COUNT1_OVERFLOW (1 << 9)
940 #define ARMV6_PMCR_CCOUNT_OVERFLOW (1 << 10)
941 #define ARMV6_PMCR_EVT_COUNT0_SHIFT 20
942 #define ARMV6_PMCR_EVT_COUNT0_MASK (0xFF << ARMV6_PMCR_EVT_COUNT0_SHIFT)
943 #define ARMV6_PMCR_EVT_COUNT1_SHIFT 12
944 #define ARMV6_PMCR_EVT_COUNT1_MASK (0xFF << ARMV6_PMCR_EVT_COUNT1_SHIFT)
946 #define ARMV6_PMCR_OVERFLOWED_MASK \
947 (ARMV6_PMCR_COUNT0_OVERFLOW | ARMV6_PMCR_COUNT1_OVERFLOW | \
948 ARMV6_PMCR_CCOUNT_OVERFLOW)
951 armv6_pmcr_has_overflowed(unsigned long pmcr
)
953 return (pmcr
& ARMV6_PMCR_OVERFLOWED_MASK
);
957 armv6_pmcr_counter_has_overflowed(unsigned long pmcr
,
958 enum armv6_counters counter
)
962 if (ARMV6_CYCLE_COUNTER
== counter
)
963 ret
= pmcr
& ARMV6_PMCR_CCOUNT_OVERFLOW
;
964 else if (ARMV6_COUNTER0
== counter
)
965 ret
= pmcr
& ARMV6_PMCR_COUNT0_OVERFLOW
;
966 else if (ARMV6_COUNTER1
== counter
)
967 ret
= pmcr
& ARMV6_PMCR_COUNT1_OVERFLOW
;
969 WARN_ONCE(1, "invalid counter number (%d)\n", counter
);
975 armv6pmu_read_counter(int counter
)
977 unsigned long value
= 0;
979 if (ARMV6_CYCLE_COUNTER
== counter
)
980 asm volatile("mrc p15, 0, %0, c15, c12, 1" : "=r"(value
));
981 else if (ARMV6_COUNTER0
== counter
)
982 asm volatile("mrc p15, 0, %0, c15, c12, 2" : "=r"(value
));
983 else if (ARMV6_COUNTER1
== counter
)
984 asm volatile("mrc p15, 0, %0, c15, c12, 3" : "=r"(value
));
986 WARN_ONCE(1, "invalid counter number (%d)\n", counter
);
992 armv6pmu_write_counter(int counter
,
995 if (ARMV6_CYCLE_COUNTER
== counter
)
996 asm volatile("mcr p15, 0, %0, c15, c12, 1" : : "r"(value
));
997 else if (ARMV6_COUNTER0
== counter
)
998 asm volatile("mcr p15, 0, %0, c15, c12, 2" : : "r"(value
));
999 else if (ARMV6_COUNTER1
== counter
)
1000 asm volatile("mcr p15, 0, %0, c15, c12, 3" : : "r"(value
));
1002 WARN_ONCE(1, "invalid counter number (%d)\n", counter
);
1006 armv6pmu_enable_event(struct hw_perf_event
*hwc
,
1009 unsigned long val
, mask
, evt
, flags
;
1011 if (ARMV6_CYCLE_COUNTER
== idx
) {
1013 evt
= ARMV6_PMCR_CCOUNT_IEN
;
1014 } else if (ARMV6_COUNTER0
== idx
) {
1015 mask
= ARMV6_PMCR_EVT_COUNT0_MASK
;
1016 evt
= (hwc
->config_base
<< ARMV6_PMCR_EVT_COUNT0_SHIFT
) |
1017 ARMV6_PMCR_COUNT0_IEN
;
1018 } else if (ARMV6_COUNTER1
== idx
) {
1019 mask
= ARMV6_PMCR_EVT_COUNT1_MASK
;
1020 evt
= (hwc
->config_base
<< ARMV6_PMCR_EVT_COUNT1_SHIFT
) |
1021 ARMV6_PMCR_COUNT1_IEN
;
1023 WARN_ONCE(1, "invalid counter number (%d)\n", idx
);
1028 * Mask out the current event and set the counter to count the event
1029 * that we're interested in.
1031 spin_lock_irqsave(&pmu_lock
, flags
);
1032 val
= armv6_pmcr_read();
1035 armv6_pmcr_write(val
);
1036 spin_unlock_irqrestore(&pmu_lock
, flags
);
1040 armv6pmu_handle_irq(int irq_num
,
1043 unsigned long pmcr
= armv6_pmcr_read();
1044 struct perf_sample_data data
;
1045 struct cpu_hw_events
*cpuc
;
1046 struct pt_regs
*regs
;
1049 if (!armv6_pmcr_has_overflowed(pmcr
))
1052 regs
= get_irq_regs();
1055 * The interrupts are cleared by writing the overflow flags back to
1056 * the control register. All of the other bits don't have any effect
1057 * if they are rewritten, so write the whole value back.
1059 armv6_pmcr_write(pmcr
);
1061 perf_sample_data_init(&data
, 0);
1063 cpuc
= &__get_cpu_var(cpu_hw_events
);
1064 for (idx
= 0; idx
<= armpmu
->num_events
; ++idx
) {
1065 struct perf_event
*event
= cpuc
->events
[idx
];
1066 struct hw_perf_event
*hwc
;
1068 if (!test_bit(idx
, cpuc
->active_mask
))
1072 * We have a single interrupt for all counters. Check that
1073 * each counter has overflowed before we process it.
1075 if (!armv6_pmcr_counter_has_overflowed(pmcr
, idx
))
1079 armpmu_event_update(event
, hwc
, idx
);
1080 data
.period
= event
->hw
.last_period
;
1081 if (!armpmu_event_set_period(event
, hwc
, idx
))
1084 if (perf_event_overflow(event
, 0, &data
, regs
))
1085 armpmu
->disable(hwc
, idx
);
1089 * Handle the pending perf events.
1091 * Note: this call *must* be run with interrupts disabled. For
1092 * platforms that can have the PMU interrupts raised as an NMI, this
1101 armv6pmu_start(void)
1103 unsigned long flags
, val
;
1105 spin_lock_irqsave(&pmu_lock
, flags
);
1106 val
= armv6_pmcr_read();
1107 val
|= ARMV6_PMCR_ENABLE
;
1108 armv6_pmcr_write(val
);
1109 spin_unlock_irqrestore(&pmu_lock
, flags
);
1115 unsigned long flags
, val
;
1117 spin_lock_irqsave(&pmu_lock
, flags
);
1118 val
= armv6_pmcr_read();
1119 val
&= ~ARMV6_PMCR_ENABLE
;
1120 armv6_pmcr_write(val
);
1121 spin_unlock_irqrestore(&pmu_lock
, flags
);
1125 armv6pmu_event_map(int config
)
1127 int mapping
= armv6_perf_map
[config
];
1128 if (HW_OP_UNSUPPORTED
== mapping
)
1129 mapping
= -EOPNOTSUPP
;
1134 armv6mpcore_pmu_event_map(int config
)
1136 int mapping
= armv6mpcore_perf_map
[config
];
1137 if (HW_OP_UNSUPPORTED
== mapping
)
1138 mapping
= -EOPNOTSUPP
;
1143 armv6pmu_raw_event(u64 config
)
1145 return config
& 0xff;
1149 armv6pmu_get_event_idx(struct cpu_hw_events
*cpuc
,
1150 struct hw_perf_event
*event
)
1152 /* Always place a cycle counter into the cycle counter. */
1153 if (ARMV6_PERFCTR_CPU_CYCLES
== event
->config_base
) {
1154 if (test_and_set_bit(ARMV6_CYCLE_COUNTER
, cpuc
->used_mask
))
1157 return ARMV6_CYCLE_COUNTER
;
1160 * For anything other than a cycle counter, try and use
1161 * counter0 and counter1.
1163 if (!test_and_set_bit(ARMV6_COUNTER1
, cpuc
->used_mask
)) {
1164 return ARMV6_COUNTER1
;
1167 if (!test_and_set_bit(ARMV6_COUNTER0
, cpuc
->used_mask
)) {
1168 return ARMV6_COUNTER0
;
1171 /* The counters are all in use. */
1177 armv6pmu_disable_event(struct hw_perf_event
*hwc
,
1180 unsigned long val
, mask
, evt
, flags
;
1182 if (ARMV6_CYCLE_COUNTER
== idx
) {
1183 mask
= ARMV6_PMCR_CCOUNT_IEN
;
1185 } else if (ARMV6_COUNTER0
== idx
) {
1186 mask
= ARMV6_PMCR_COUNT0_IEN
| ARMV6_PMCR_EVT_COUNT0_MASK
;
1187 evt
= ARMV6_PERFCTR_NOP
<< ARMV6_PMCR_EVT_COUNT0_SHIFT
;
1188 } else if (ARMV6_COUNTER1
== idx
) {
1189 mask
= ARMV6_PMCR_COUNT1_IEN
| ARMV6_PMCR_EVT_COUNT1_MASK
;
1190 evt
= ARMV6_PERFCTR_NOP
<< ARMV6_PMCR_EVT_COUNT1_SHIFT
;
1192 WARN_ONCE(1, "invalid counter number (%d)\n", idx
);
1197 * Mask out the current event and set the counter to count the number
1198 * of ETM bus signal assertion cycles. The external reporting should
1199 * be disabled and so this should never increment.
1201 spin_lock_irqsave(&pmu_lock
, flags
);
1202 val
= armv6_pmcr_read();
1205 armv6_pmcr_write(val
);
1206 spin_unlock_irqrestore(&pmu_lock
, flags
);
1210 armv6mpcore_pmu_disable_event(struct hw_perf_event
*hwc
,
1213 unsigned long val
, mask
, flags
, evt
= 0;
1215 if (ARMV6_CYCLE_COUNTER
== idx
) {
1216 mask
= ARMV6_PMCR_CCOUNT_IEN
;
1217 } else if (ARMV6_COUNTER0
== idx
) {
1218 mask
= ARMV6_PMCR_COUNT0_IEN
;
1219 } else if (ARMV6_COUNTER1
== idx
) {
1220 mask
= ARMV6_PMCR_COUNT1_IEN
;
1222 WARN_ONCE(1, "invalid counter number (%d)\n", idx
);
1227 * Unlike UP ARMv6, we don't have a way of stopping the counters. We
1228 * simply disable the interrupt reporting.
1230 spin_lock_irqsave(&pmu_lock
, flags
);
1231 val
= armv6_pmcr_read();
1234 armv6_pmcr_write(val
);
1235 spin_unlock_irqrestore(&pmu_lock
, flags
);
1238 static const struct arm_pmu armv6pmu
= {
1239 .id
= ARM_PERF_PMU_ID_V6
,
1240 .handle_irq
= armv6pmu_handle_irq
,
1241 .enable
= armv6pmu_enable_event
,
1242 .disable
= armv6pmu_disable_event
,
1243 .event_map
= armv6pmu_event_map
,
1244 .raw_event
= armv6pmu_raw_event
,
1245 .read_counter
= armv6pmu_read_counter
,
1246 .write_counter
= armv6pmu_write_counter
,
1247 .get_event_idx
= armv6pmu_get_event_idx
,
1248 .start
= armv6pmu_start
,
1249 .stop
= armv6pmu_stop
,
1251 .max_period
= (1LLU << 32) - 1,
1255 * ARMv6mpcore is almost identical to single core ARMv6 with the exception
1256 * that some of the events have different enumerations and that there is no
1257 * *hack* to stop the programmable counters. To stop the counters we simply
1258 * disable the interrupt reporting and update the event. When unthrottling we
1259 * reset the period and enable the interrupt reporting.
1261 static const struct arm_pmu armv6mpcore_pmu
= {
1262 .id
= ARM_PERF_PMU_ID_V6MP
,
1263 .handle_irq
= armv6pmu_handle_irq
,
1264 .enable
= armv6pmu_enable_event
,
1265 .disable
= armv6mpcore_pmu_disable_event
,
1266 .event_map
= armv6mpcore_pmu_event_map
,
1267 .raw_event
= armv6pmu_raw_event
,
1268 .read_counter
= armv6pmu_read_counter
,
1269 .write_counter
= armv6pmu_write_counter
,
1270 .get_event_idx
= armv6pmu_get_event_idx
,
1271 .start
= armv6pmu_start
,
1272 .stop
= armv6pmu_stop
,
1274 .max_period
= (1LLU << 32) - 1,
1278 * ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code.
1280 * Copied from ARMv6 code, with the low level code inspired
1281 * by the ARMv7 Oprofile code.
1283 * Cortex-A8 has up to 4 configurable performance counters and
1284 * a single cycle counter.
1285 * Cortex-A9 has up to 31 configurable performance counters and
1286 * a single cycle counter.
1288 * All counters can be enabled/disabled and IRQ masked separately. The cycle
1289 * counter and all 4 performance counters together can be reset separately.
1292 /* Common ARMv7 event types */
1293 enum armv7_perf_types
{
1294 ARMV7_PERFCTR_PMNC_SW_INCR
= 0x00,
1295 ARMV7_PERFCTR_IFETCH_MISS
= 0x01,
1296 ARMV7_PERFCTR_ITLB_MISS
= 0x02,
1297 ARMV7_PERFCTR_DCACHE_REFILL
= 0x03,
1298 ARMV7_PERFCTR_DCACHE_ACCESS
= 0x04,
1299 ARMV7_PERFCTR_DTLB_REFILL
= 0x05,
1300 ARMV7_PERFCTR_DREAD
= 0x06,
1301 ARMV7_PERFCTR_DWRITE
= 0x07,
1303 ARMV7_PERFCTR_EXC_TAKEN
= 0x09,
1304 ARMV7_PERFCTR_EXC_EXECUTED
= 0x0A,
1305 ARMV7_PERFCTR_CID_WRITE
= 0x0B,
1306 /* ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS.
1308 * - all branch instructions,
1309 * - instructions that explicitly write the PC,
1310 * - exception generating instructions.
1312 ARMV7_PERFCTR_PC_WRITE
= 0x0C,
1313 ARMV7_PERFCTR_PC_IMM_BRANCH
= 0x0D,
1314 ARMV7_PERFCTR_UNALIGNED_ACCESS
= 0x0F,
1315 ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
= 0x10,
1316 ARMV7_PERFCTR_CLOCK_CYCLES
= 0x11,
1318 ARMV7_PERFCTR_PC_BRANCH_MIS_USED
= 0x12,
1320 ARMV7_PERFCTR_CPU_CYCLES
= 0xFF
1323 /* ARMv7 Cortex-A8 specific event types */
1324 enum armv7_a8_perf_types
{
1325 ARMV7_PERFCTR_INSTR_EXECUTED
= 0x08,
1327 ARMV7_PERFCTR_PC_PROC_RETURN
= 0x0E,
1329 ARMV7_PERFCTR_WRITE_BUFFER_FULL
= 0x40,
1330 ARMV7_PERFCTR_L2_STORE_MERGED
= 0x41,
1331 ARMV7_PERFCTR_L2_STORE_BUFF
= 0x42,
1332 ARMV7_PERFCTR_L2_ACCESS
= 0x43,
1333 ARMV7_PERFCTR_L2_CACH_MISS
= 0x44,
1334 ARMV7_PERFCTR_AXI_READ_CYCLES
= 0x45,
1335 ARMV7_PERFCTR_AXI_WRITE_CYCLES
= 0x46,
1336 ARMV7_PERFCTR_MEMORY_REPLAY
= 0x47,
1337 ARMV7_PERFCTR_UNALIGNED_ACCESS_REPLAY
= 0x48,
1338 ARMV7_PERFCTR_L1_DATA_MISS
= 0x49,
1339 ARMV7_PERFCTR_L1_INST_MISS
= 0x4A,
1340 ARMV7_PERFCTR_L1_DATA_COLORING
= 0x4B,
1341 ARMV7_PERFCTR_L1_NEON_DATA
= 0x4C,
1342 ARMV7_PERFCTR_L1_NEON_CACH_DATA
= 0x4D,
1343 ARMV7_PERFCTR_L2_NEON
= 0x4E,
1344 ARMV7_PERFCTR_L2_NEON_HIT
= 0x4F,
1345 ARMV7_PERFCTR_L1_INST
= 0x50,
1346 ARMV7_PERFCTR_PC_RETURN_MIS_PRED
= 0x51,
1347 ARMV7_PERFCTR_PC_BRANCH_FAILED
= 0x52,
1348 ARMV7_PERFCTR_PC_BRANCH_TAKEN
= 0x53,
1349 ARMV7_PERFCTR_PC_BRANCH_EXECUTED
= 0x54,
1350 ARMV7_PERFCTR_OP_EXECUTED
= 0x55,
1351 ARMV7_PERFCTR_CYCLES_INST_STALL
= 0x56,
1352 ARMV7_PERFCTR_CYCLES_INST
= 0x57,
1353 ARMV7_PERFCTR_CYCLES_NEON_DATA_STALL
= 0x58,
1354 ARMV7_PERFCTR_CYCLES_NEON_INST_STALL
= 0x59,
1355 ARMV7_PERFCTR_NEON_CYCLES
= 0x5A,
1357 ARMV7_PERFCTR_PMU0_EVENTS
= 0x70,
1358 ARMV7_PERFCTR_PMU1_EVENTS
= 0x71,
1359 ARMV7_PERFCTR_PMU_EVENTS
= 0x72,
1362 /* ARMv7 Cortex-A9 specific event types */
1363 enum armv7_a9_perf_types
{
1364 ARMV7_PERFCTR_JAVA_HW_BYTECODE_EXEC
= 0x40,
1365 ARMV7_PERFCTR_JAVA_SW_BYTECODE_EXEC
= 0x41,
1366 ARMV7_PERFCTR_JAZELLE_BRANCH_EXEC
= 0x42,
1368 ARMV7_PERFCTR_COHERENT_LINE_MISS
= 0x50,
1369 ARMV7_PERFCTR_COHERENT_LINE_HIT
= 0x51,
1371 ARMV7_PERFCTR_ICACHE_DEP_STALL_CYCLES
= 0x60,
1372 ARMV7_PERFCTR_DCACHE_DEP_STALL_CYCLES
= 0x61,
1373 ARMV7_PERFCTR_TLB_MISS_DEP_STALL_CYCLES
= 0x62,
1374 ARMV7_PERFCTR_STREX_EXECUTED_PASSED
= 0x63,
1375 ARMV7_PERFCTR_STREX_EXECUTED_FAILED
= 0x64,
1376 ARMV7_PERFCTR_DATA_EVICTION
= 0x65,
1377 ARMV7_PERFCTR_ISSUE_STAGE_NO_INST
= 0x66,
1378 ARMV7_PERFCTR_ISSUE_STAGE_EMPTY
= 0x67,
1379 ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE
= 0x68,
1381 ARMV7_PERFCTR_PREDICTABLE_FUNCT_RETURNS
= 0x6E,
1383 ARMV7_PERFCTR_MAIN_UNIT_EXECUTED_INST
= 0x70,
1384 ARMV7_PERFCTR_SECOND_UNIT_EXECUTED_INST
= 0x71,
1385 ARMV7_PERFCTR_LD_ST_UNIT_EXECUTED_INST
= 0x72,
1386 ARMV7_PERFCTR_FP_EXECUTED_INST
= 0x73,
1387 ARMV7_PERFCTR_NEON_EXECUTED_INST
= 0x74,
1389 ARMV7_PERFCTR_PLD_FULL_DEP_STALL_CYCLES
= 0x80,
1390 ARMV7_PERFCTR_DATA_WR_DEP_STALL_CYCLES
= 0x81,
1391 ARMV7_PERFCTR_ITLB_MISS_DEP_STALL_CYCLES
= 0x82,
1392 ARMV7_PERFCTR_DTLB_MISS_DEP_STALL_CYCLES
= 0x83,
1393 ARMV7_PERFCTR_MICRO_ITLB_MISS_DEP_STALL_CYCLES
= 0x84,
1394 ARMV7_PERFCTR_MICRO_DTLB_MISS_DEP_STALL_CYCLES
= 0x85,
1395 ARMV7_PERFCTR_DMB_DEP_STALL_CYCLES
= 0x86,
1397 ARMV7_PERFCTR_INTGR_CLK_ENABLED_CYCLES
= 0x8A,
1398 ARMV7_PERFCTR_DATA_ENGINE_CLK_EN_CYCLES
= 0x8B,
1400 ARMV7_PERFCTR_ISB_INST
= 0x90,
1401 ARMV7_PERFCTR_DSB_INST
= 0x91,
1402 ARMV7_PERFCTR_DMB_INST
= 0x92,
1403 ARMV7_PERFCTR_EXT_INTERRUPTS
= 0x93,
1405 ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_COMPLETED
= 0xA0,
1406 ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_SKIPPED
= 0xA1,
1407 ARMV7_PERFCTR_PLE_FIFO_FLUSH
= 0xA2,
1408 ARMV7_PERFCTR_PLE_RQST_COMPLETED
= 0xA3,
1409 ARMV7_PERFCTR_PLE_FIFO_OVERFLOW
= 0xA4,
1410 ARMV7_PERFCTR_PLE_RQST_PROG
= 0xA5
1414 * Cortex-A8 HW events mapping
1416 * The hardware events that we support. We do support cache operations but
1417 * we have harvard caches and no way to combine instruction and data
1418 * accesses/misses in hardware.
1420 static const unsigned armv7_a8_perf_map
[PERF_COUNT_HW_MAX
] = {
1421 [PERF_COUNT_HW_CPU_CYCLES
] = ARMV7_PERFCTR_CPU_CYCLES
,
1422 [PERF_COUNT_HW_INSTRUCTIONS
] = ARMV7_PERFCTR_INSTR_EXECUTED
,
1423 [PERF_COUNT_HW_CACHE_REFERENCES
] = HW_OP_UNSUPPORTED
,
1424 [PERF_COUNT_HW_CACHE_MISSES
] = HW_OP_UNSUPPORTED
,
1425 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = ARMV7_PERFCTR_PC_WRITE
,
1426 [PERF_COUNT_HW_BRANCH_MISSES
] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
1427 [PERF_COUNT_HW_BUS_CYCLES
] = ARMV7_PERFCTR_CLOCK_CYCLES
,
1430 static const unsigned armv7_a8_perf_cache_map
[PERF_COUNT_HW_CACHE_MAX
]
1431 [PERF_COUNT_HW_CACHE_OP_MAX
]
1432 [PERF_COUNT_HW_CACHE_RESULT_MAX
] = {
1435 * The performance counters don't differentiate between read
1436 * and write accesses/misses so this isn't strictly correct,
1437 * but it's the best we can do. Writes and reads get
1441 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_DCACHE_ACCESS
,
1442 [C(RESULT_MISS
)] = ARMV7_PERFCTR_DCACHE_REFILL
,
1445 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_DCACHE_ACCESS
,
1446 [C(RESULT_MISS
)] = ARMV7_PERFCTR_DCACHE_REFILL
,
1448 [C(OP_PREFETCH
)] = {
1449 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1450 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
1455 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_L1_INST
,
1456 [C(RESULT_MISS
)] = ARMV7_PERFCTR_L1_INST_MISS
,
1459 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_L1_INST
,
1460 [C(RESULT_MISS
)] = ARMV7_PERFCTR_L1_INST_MISS
,
1462 [C(OP_PREFETCH
)] = {
1463 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1464 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
1469 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_L2_ACCESS
,
1470 [C(RESULT_MISS
)] = ARMV7_PERFCTR_L2_CACH_MISS
,
1473 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_L2_ACCESS
,
1474 [C(RESULT_MISS
)] = ARMV7_PERFCTR_L2_CACH_MISS
,
1476 [C(OP_PREFETCH
)] = {
1477 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1478 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
1483 * Only ITLB misses and DTLB refills are supported.
1484 * If users want the DTLB refills misses a raw counter
1488 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1489 [C(RESULT_MISS
)] = ARMV7_PERFCTR_DTLB_REFILL
,
1492 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1493 [C(RESULT_MISS
)] = ARMV7_PERFCTR_DTLB_REFILL
,
1495 [C(OP_PREFETCH
)] = {
1496 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1497 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
1502 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1503 [C(RESULT_MISS
)] = ARMV7_PERFCTR_ITLB_MISS
,
1506 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1507 [C(RESULT_MISS
)] = ARMV7_PERFCTR_ITLB_MISS
,
1509 [C(OP_PREFETCH
)] = {
1510 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1511 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
1516 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_PC_WRITE
,
1518 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
1521 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_PC_WRITE
,
1523 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
1525 [C(OP_PREFETCH
)] = {
1526 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1527 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
1533 * Cortex-A9 HW events mapping
1535 static const unsigned armv7_a9_perf_map
[PERF_COUNT_HW_MAX
] = {
1536 [PERF_COUNT_HW_CPU_CYCLES
] = ARMV7_PERFCTR_CPU_CYCLES
,
1537 [PERF_COUNT_HW_INSTRUCTIONS
] =
1538 ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE
,
1539 [PERF_COUNT_HW_CACHE_REFERENCES
] = ARMV7_PERFCTR_COHERENT_LINE_HIT
,
1540 [PERF_COUNT_HW_CACHE_MISSES
] = ARMV7_PERFCTR_COHERENT_LINE_MISS
,
1541 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = ARMV7_PERFCTR_PC_WRITE
,
1542 [PERF_COUNT_HW_BRANCH_MISSES
] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
1543 [PERF_COUNT_HW_BUS_CYCLES
] = ARMV7_PERFCTR_CLOCK_CYCLES
,
1546 static const unsigned armv7_a9_perf_cache_map
[PERF_COUNT_HW_CACHE_MAX
]
1547 [PERF_COUNT_HW_CACHE_OP_MAX
]
1548 [PERF_COUNT_HW_CACHE_RESULT_MAX
] = {
1551 * The performance counters don't differentiate between read
1552 * and write accesses/misses so this isn't strictly correct,
1553 * but it's the best we can do. Writes and reads get
1557 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_DCACHE_ACCESS
,
1558 [C(RESULT_MISS
)] = ARMV7_PERFCTR_DCACHE_REFILL
,
1561 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_DCACHE_ACCESS
,
1562 [C(RESULT_MISS
)] = ARMV7_PERFCTR_DCACHE_REFILL
,
1564 [C(OP_PREFETCH
)] = {
1565 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1566 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
1571 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1572 [C(RESULT_MISS
)] = ARMV7_PERFCTR_IFETCH_MISS
,
1575 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1576 [C(RESULT_MISS
)] = ARMV7_PERFCTR_IFETCH_MISS
,
1578 [C(OP_PREFETCH
)] = {
1579 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1580 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
1585 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1586 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
1589 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1590 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
1592 [C(OP_PREFETCH
)] = {
1593 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1594 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
1599 * Only ITLB misses and DTLB refills are supported.
1600 * If users want the DTLB refills misses a raw counter
1604 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1605 [C(RESULT_MISS
)] = ARMV7_PERFCTR_DTLB_REFILL
,
1608 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1609 [C(RESULT_MISS
)] = ARMV7_PERFCTR_DTLB_REFILL
,
1611 [C(OP_PREFETCH
)] = {
1612 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1613 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
1618 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1619 [C(RESULT_MISS
)] = ARMV7_PERFCTR_ITLB_MISS
,
1622 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1623 [C(RESULT_MISS
)] = ARMV7_PERFCTR_ITLB_MISS
,
1625 [C(OP_PREFETCH
)] = {
1626 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1627 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
1632 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_PC_WRITE
,
1634 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
1637 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_PC_WRITE
,
1639 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
1641 [C(OP_PREFETCH
)] = {
1642 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1643 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
1649 * Perf Events counters
1651 enum armv7_counters
{
1652 ARMV7_CYCLE_COUNTER
= 1, /* Cycle counter */
1653 ARMV7_COUNTER0
= 2, /* First event counter */
1657 * The cycle counter is ARMV7_CYCLE_COUNTER.
1658 * The first event counter is ARMV7_COUNTER0.
1659 * The last event counter is (ARMV7_COUNTER0 + armpmu->num_events - 1).
1661 #define ARMV7_COUNTER_LAST (ARMV7_COUNTER0 + armpmu->num_events - 1)
1664 * ARMv7 low level PMNC access
1668 * Per-CPU PMNC: config reg
1670 #define ARMV7_PMNC_E (1 << 0) /* Enable all counters */
1671 #define ARMV7_PMNC_P (1 << 1) /* Reset all counters */
1672 #define ARMV7_PMNC_C (1 << 2) /* Cycle counter reset */
1673 #define ARMV7_PMNC_D (1 << 3) /* CCNT counts every 64th cpu cycle */
1674 #define ARMV7_PMNC_X (1 << 4) /* Export to ETM */
1675 #define ARMV7_PMNC_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
1676 #define ARMV7_PMNC_N_SHIFT 11 /* Number of counters supported */
1677 #define ARMV7_PMNC_N_MASK 0x1f
1678 #define ARMV7_PMNC_MASK 0x3f /* Mask for writable bits */
1681 * Available counters
1683 #define ARMV7_CNT0 0 /* First event counter */
1684 #define ARMV7_CCNT 31 /* Cycle counter */
1686 /* Perf Event to low level counters mapping */
1687 #define ARMV7_EVENT_CNT_TO_CNTx (ARMV7_COUNTER0 - ARMV7_CNT0)
1690 * CNTENS: counters enable reg
1692 #define ARMV7_CNTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
1693 #define ARMV7_CNTENS_C (1 << ARMV7_CCNT)
1696 * CNTENC: counters disable reg
1698 #define ARMV7_CNTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
1699 #define ARMV7_CNTENC_C (1 << ARMV7_CCNT)
1702 * INTENS: counters overflow interrupt enable reg
1704 #define ARMV7_INTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
1705 #define ARMV7_INTENS_C (1 << ARMV7_CCNT)
1708 * INTENC: counters overflow interrupt disable reg
1710 #define ARMV7_INTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
1711 #define ARMV7_INTENC_C (1 << ARMV7_CCNT)
1714 * EVTSEL: Event selection reg
1716 #define ARMV7_EVTSEL_MASK 0xff /* Mask for writable bits */
1719 * SELECT: Counter selection reg
1721 #define ARMV7_SELECT_MASK 0x1f /* Mask for writable bits */
1724 * FLAG: counters overflow flag status reg
1726 #define ARMV7_FLAG_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
1727 #define ARMV7_FLAG_C (1 << ARMV7_CCNT)
1728 #define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */
1729 #define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK
1731 static inline unsigned long armv7_pmnc_read(void)
1734 asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val
));
1738 static inline void armv7_pmnc_write(unsigned long val
)
1740 val
&= ARMV7_PMNC_MASK
;
1741 asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val
));
1744 static inline int armv7_pmnc_has_overflowed(unsigned long pmnc
)
1746 return pmnc
& ARMV7_OVERFLOWED_MASK
;
1749 static inline int armv7_pmnc_counter_has_overflowed(unsigned long pmnc
,
1750 enum armv7_counters counter
)
1754 if (counter
== ARMV7_CYCLE_COUNTER
)
1755 ret
= pmnc
& ARMV7_FLAG_C
;
1756 else if ((counter
>= ARMV7_COUNTER0
) && (counter
<= ARMV7_COUNTER_LAST
))
1757 ret
= pmnc
& ARMV7_FLAG_P(counter
);
1759 pr_err("CPU%u checking wrong counter %d overflow status\n",
1760 smp_processor_id(), counter
);
1765 static inline int armv7_pmnc_select_counter(unsigned int idx
)
1769 if ((idx
< ARMV7_COUNTER0
) || (idx
> ARMV7_COUNTER_LAST
)) {
1770 pr_err("CPU%u selecting wrong PMNC counter"
1771 " %d\n", smp_processor_id(), idx
);
1775 val
= (idx
- ARMV7_EVENT_CNT_TO_CNTx
) & ARMV7_SELECT_MASK
;
1776 asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (val
));
1781 static inline u32
armv7pmu_read_counter(int idx
)
1783 unsigned long value
= 0;
1785 if (idx
== ARMV7_CYCLE_COUNTER
)
1786 asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value
));
1787 else if ((idx
>= ARMV7_COUNTER0
) && (idx
<= ARMV7_COUNTER_LAST
)) {
1788 if (armv7_pmnc_select_counter(idx
) == idx
)
1789 asm volatile("mrc p15, 0, %0, c9, c13, 2"
1792 pr_err("CPU%u reading wrong counter %d\n",
1793 smp_processor_id(), idx
);
1798 static inline void armv7pmu_write_counter(int idx
, u32 value
)
1800 if (idx
== ARMV7_CYCLE_COUNTER
)
1801 asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value
));
1802 else if ((idx
>= ARMV7_COUNTER0
) && (idx
<= ARMV7_COUNTER_LAST
)) {
1803 if (armv7_pmnc_select_counter(idx
) == idx
)
1804 asm volatile("mcr p15, 0, %0, c9, c13, 2"
1807 pr_err("CPU%u writing wrong counter %d\n",
1808 smp_processor_id(), idx
);
1811 static inline void armv7_pmnc_write_evtsel(unsigned int idx
, u32 val
)
1813 if (armv7_pmnc_select_counter(idx
) == idx
) {
1814 val
&= ARMV7_EVTSEL_MASK
;
1815 asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val
));
1819 static inline u32
armv7_pmnc_enable_counter(unsigned int idx
)
1823 if ((idx
!= ARMV7_CYCLE_COUNTER
) &&
1824 ((idx
< ARMV7_COUNTER0
) || (idx
> ARMV7_COUNTER_LAST
))) {
1825 pr_err("CPU%u enabling wrong PMNC counter"
1826 " %d\n", smp_processor_id(), idx
);
1830 if (idx
== ARMV7_CYCLE_COUNTER
)
1831 val
= ARMV7_CNTENS_C
;
1833 val
= ARMV7_CNTENS_P(idx
);
1835 asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (val
));
1840 static inline u32
armv7_pmnc_disable_counter(unsigned int idx
)
1845 if ((idx
!= ARMV7_CYCLE_COUNTER
) &&
1846 ((idx
< ARMV7_COUNTER0
) || (idx
> ARMV7_COUNTER_LAST
))) {
1847 pr_err("CPU%u disabling wrong PMNC counter"
1848 " %d\n", smp_processor_id(), idx
);
1852 if (idx
== ARMV7_CYCLE_COUNTER
)
1853 val
= ARMV7_CNTENC_C
;
1855 val
= ARMV7_CNTENC_P(idx
);
1857 asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (val
));
1862 static inline u32
armv7_pmnc_enable_intens(unsigned int idx
)
1866 if ((idx
!= ARMV7_CYCLE_COUNTER
) &&
1867 ((idx
< ARMV7_COUNTER0
) || (idx
> ARMV7_COUNTER_LAST
))) {
1868 pr_err("CPU%u enabling wrong PMNC counter"
1869 " interrupt enable %d\n", smp_processor_id(), idx
);
1873 if (idx
== ARMV7_CYCLE_COUNTER
)
1874 val
= ARMV7_INTENS_C
;
1876 val
= ARMV7_INTENS_P(idx
);
1878 asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (val
));
1883 static inline u32
armv7_pmnc_disable_intens(unsigned int idx
)
1887 if ((idx
!= ARMV7_CYCLE_COUNTER
) &&
1888 ((idx
< ARMV7_COUNTER0
) || (idx
> ARMV7_COUNTER_LAST
))) {
1889 pr_err("CPU%u disabling wrong PMNC counter"
1890 " interrupt enable %d\n", smp_processor_id(), idx
);
1894 if (idx
== ARMV7_CYCLE_COUNTER
)
1895 val
= ARMV7_INTENC_C
;
1897 val
= ARMV7_INTENC_P(idx
);
1899 asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (val
));
1904 static inline u32
armv7_pmnc_getreset_flags(void)
1909 asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val
));
1911 /* Write to clear flags */
1912 val
&= ARMV7_FLAG_MASK
;
1913 asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val
));
1919 static void armv7_pmnc_dump_regs(void)
1924 printk(KERN_INFO
"PMNC registers dump:\n");
1926 asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val
));
1927 printk(KERN_INFO
"PMNC =0x%08x\n", val
);
1929 asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val
));
1930 printk(KERN_INFO
"CNTENS=0x%08x\n", val
);
1932 asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val
));
1933 printk(KERN_INFO
"INTENS=0x%08x\n", val
);
1935 asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val
));
1936 printk(KERN_INFO
"FLAGS =0x%08x\n", val
);
1938 asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val
));
1939 printk(KERN_INFO
"SELECT=0x%08x\n", val
);
1941 asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val
));
1942 printk(KERN_INFO
"CCNT =0x%08x\n", val
);
1944 for (cnt
= ARMV7_COUNTER0
; cnt
< ARMV7_COUNTER_LAST
; cnt
++) {
1945 armv7_pmnc_select_counter(cnt
);
1946 asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val
));
1947 printk(KERN_INFO
"CNT[%d] count =0x%08x\n",
1948 cnt
-ARMV7_EVENT_CNT_TO_CNTx
, val
);
1949 asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val
));
1950 printk(KERN_INFO
"CNT[%d] evtsel=0x%08x\n",
1951 cnt
-ARMV7_EVENT_CNT_TO_CNTx
, val
);
1956 void armv7pmu_enable_event(struct hw_perf_event
*hwc
, int idx
)
1958 unsigned long flags
;
1961 * Enable counter and interrupt, and set the counter to count
1962 * the event that we're interested in.
1964 spin_lock_irqsave(&pmu_lock
, flags
);
1969 armv7_pmnc_disable_counter(idx
);
1972 * Set event (if destined for PMNx counters)
1973 * We don't need to set the event if it's a cycle count
1975 if (idx
!= ARMV7_CYCLE_COUNTER
)
1976 armv7_pmnc_write_evtsel(idx
, hwc
->config_base
);
1979 * Enable interrupt for this counter
1981 armv7_pmnc_enable_intens(idx
);
1986 armv7_pmnc_enable_counter(idx
);
1988 spin_unlock_irqrestore(&pmu_lock
, flags
);
1991 static void armv7pmu_disable_event(struct hw_perf_event
*hwc
, int idx
)
1993 unsigned long flags
;
1996 * Disable counter and interrupt
1998 spin_lock_irqsave(&pmu_lock
, flags
);
2003 armv7_pmnc_disable_counter(idx
);
2006 * Disable interrupt for this counter
2008 armv7_pmnc_disable_intens(idx
);
2010 spin_unlock_irqrestore(&pmu_lock
, flags
);
2013 static irqreturn_t
armv7pmu_handle_irq(int irq_num
, void *dev
)
2016 struct perf_sample_data data
;
2017 struct cpu_hw_events
*cpuc
;
2018 struct pt_regs
*regs
;
2022 * Get and reset the IRQ flags
2024 pmnc
= armv7_pmnc_getreset_flags();
2027 * Did an overflow occur?
2029 if (!armv7_pmnc_has_overflowed(pmnc
))
2033 * Handle the counter(s) overflow(s)
2035 regs
= get_irq_regs();
2037 perf_sample_data_init(&data
, 0);
2039 cpuc
= &__get_cpu_var(cpu_hw_events
);
2040 for (idx
= 0; idx
<= armpmu
->num_events
; ++idx
) {
2041 struct perf_event
*event
= cpuc
->events
[idx
];
2042 struct hw_perf_event
*hwc
;
2044 if (!test_bit(idx
, cpuc
->active_mask
))
2048 * We have a single interrupt for all counters. Check that
2049 * each counter has overflowed before we process it.
2051 if (!armv7_pmnc_counter_has_overflowed(pmnc
, idx
))
2055 armpmu_event_update(event
, hwc
, idx
);
2056 data
.period
= event
->hw
.last_period
;
2057 if (!armpmu_event_set_period(event
, hwc
, idx
))
2060 if (perf_event_overflow(event
, 0, &data
, regs
))
2061 armpmu
->disable(hwc
, idx
);
2065 * Handle the pending perf events.
2067 * Note: this call *must* be run with interrupts disabled. For
2068 * platforms that can have the PMU interrupts raised as an NMI, this
2076 static void armv7pmu_start(void)
2078 unsigned long flags
;
2080 spin_lock_irqsave(&pmu_lock
, flags
);
2081 /* Enable all counters */
2082 armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E
);
2083 spin_unlock_irqrestore(&pmu_lock
, flags
);
2086 static void armv7pmu_stop(void)
2088 unsigned long flags
;
2090 spin_lock_irqsave(&pmu_lock
, flags
);
2091 /* Disable all counters */
2092 armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E
);
2093 spin_unlock_irqrestore(&pmu_lock
, flags
);
2096 static inline int armv7_a8_pmu_event_map(int config
)
2098 int mapping
= armv7_a8_perf_map
[config
];
2099 if (HW_OP_UNSUPPORTED
== mapping
)
2100 mapping
= -EOPNOTSUPP
;
2104 static inline int armv7_a9_pmu_event_map(int config
)
2106 int mapping
= armv7_a9_perf_map
[config
];
2107 if (HW_OP_UNSUPPORTED
== mapping
)
2108 mapping
= -EOPNOTSUPP
;
2112 static u64
armv7pmu_raw_event(u64 config
)
2114 return config
& 0xff;
2117 static int armv7pmu_get_event_idx(struct cpu_hw_events
*cpuc
,
2118 struct hw_perf_event
*event
)
2122 /* Always place a cycle counter into the cycle counter. */
2123 if (event
->config_base
== ARMV7_PERFCTR_CPU_CYCLES
) {
2124 if (test_and_set_bit(ARMV7_CYCLE_COUNTER
, cpuc
->used_mask
))
2127 return ARMV7_CYCLE_COUNTER
;
2130 * For anything other than a cycle counter, try and use
2131 * the events counters
2133 for (idx
= ARMV7_COUNTER0
; idx
<= armpmu
->num_events
; ++idx
) {
2134 if (!test_and_set_bit(idx
, cpuc
->used_mask
))
2138 /* The counters are all in use. */
2143 static struct arm_pmu armv7pmu
= {
2144 .handle_irq
= armv7pmu_handle_irq
,
2145 .enable
= armv7pmu_enable_event
,
2146 .disable
= armv7pmu_disable_event
,
2147 .raw_event
= armv7pmu_raw_event
,
2148 .read_counter
= armv7pmu_read_counter
,
2149 .write_counter
= armv7pmu_write_counter
,
2150 .get_event_idx
= armv7pmu_get_event_idx
,
2151 .start
= armv7pmu_start
,
2152 .stop
= armv7pmu_stop
,
2153 .max_period
= (1LLU << 32) - 1,
2156 static u32 __init
armv7_reset_read_pmnc(void)
2160 /* Initialize & Reset PMNC: C and P bits */
2161 armv7_pmnc_write(ARMV7_PMNC_P
| ARMV7_PMNC_C
);
2163 /* Read the nb of CNTx counters supported from PMNC */
2164 nb_cnt
= (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT
) & ARMV7_PMNC_N_MASK
;
2166 /* Add the CPU cycles counter and return */
2171 * ARMv5 [xscale] Performance counter handling code.
2173 * Based on xscale OProfile code.
2175 * There are two variants of the xscale PMU that we support:
2176 * - xscale1pmu: 2 event counters and a cycle counter
2177 * - xscale2pmu: 4 event counters and a cycle counter
2178 * The two variants share event definitions, but have different
2182 enum xscale_perf_types
{
2183 XSCALE_PERFCTR_ICACHE_MISS
= 0x00,
2184 XSCALE_PERFCTR_ICACHE_NO_DELIVER
= 0x01,
2185 XSCALE_PERFCTR_DATA_STALL
= 0x02,
2186 XSCALE_PERFCTR_ITLB_MISS
= 0x03,
2187 XSCALE_PERFCTR_DTLB_MISS
= 0x04,
2188 XSCALE_PERFCTR_BRANCH
= 0x05,
2189 XSCALE_PERFCTR_BRANCH_MISS
= 0x06,
2190 XSCALE_PERFCTR_INSTRUCTION
= 0x07,
2191 XSCALE_PERFCTR_DCACHE_FULL_STALL
= 0x08,
2192 XSCALE_PERFCTR_DCACHE_FULL_STALL_CONTIG
= 0x09,
2193 XSCALE_PERFCTR_DCACHE_ACCESS
= 0x0A,
2194 XSCALE_PERFCTR_DCACHE_MISS
= 0x0B,
2195 XSCALE_PERFCTR_DCACHE_WRITE_BACK
= 0x0C,
2196 XSCALE_PERFCTR_PC_CHANGED
= 0x0D,
2197 XSCALE_PERFCTR_BCU_REQUEST
= 0x10,
2198 XSCALE_PERFCTR_BCU_FULL
= 0x11,
2199 XSCALE_PERFCTR_BCU_DRAIN
= 0x12,
2200 XSCALE_PERFCTR_BCU_ECC_NO_ELOG
= 0x14,
2201 XSCALE_PERFCTR_BCU_1_BIT_ERR
= 0x15,
2202 XSCALE_PERFCTR_RMW
= 0x16,
2203 /* XSCALE_PERFCTR_CCNT is not hardware defined */
2204 XSCALE_PERFCTR_CCNT
= 0xFE,
2205 XSCALE_PERFCTR_UNUSED
= 0xFF,
2208 enum xscale_counters
{
2209 XSCALE_CYCLE_COUNTER
= 1,
2216 static const unsigned xscale_perf_map
[PERF_COUNT_HW_MAX
] = {
2217 [PERF_COUNT_HW_CPU_CYCLES
] = XSCALE_PERFCTR_CCNT
,
2218 [PERF_COUNT_HW_INSTRUCTIONS
] = XSCALE_PERFCTR_INSTRUCTION
,
2219 [PERF_COUNT_HW_CACHE_REFERENCES
] = HW_OP_UNSUPPORTED
,
2220 [PERF_COUNT_HW_CACHE_MISSES
] = HW_OP_UNSUPPORTED
,
2221 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = XSCALE_PERFCTR_BRANCH
,
2222 [PERF_COUNT_HW_BRANCH_MISSES
] = XSCALE_PERFCTR_BRANCH_MISS
,
2223 [PERF_COUNT_HW_BUS_CYCLES
] = HW_OP_UNSUPPORTED
,
2226 static const unsigned xscale_perf_cache_map
[PERF_COUNT_HW_CACHE_MAX
]
2227 [PERF_COUNT_HW_CACHE_OP_MAX
]
2228 [PERF_COUNT_HW_CACHE_RESULT_MAX
] = {
2231 [C(RESULT_ACCESS
)] = XSCALE_PERFCTR_DCACHE_ACCESS
,
2232 [C(RESULT_MISS
)] = XSCALE_PERFCTR_DCACHE_MISS
,
2235 [C(RESULT_ACCESS
)] = XSCALE_PERFCTR_DCACHE_ACCESS
,
2236 [C(RESULT_MISS
)] = XSCALE_PERFCTR_DCACHE_MISS
,
2238 [C(OP_PREFETCH
)] = {
2239 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
2240 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
2245 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
2246 [C(RESULT_MISS
)] = XSCALE_PERFCTR_ICACHE_MISS
,
2249 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
2250 [C(RESULT_MISS
)] = XSCALE_PERFCTR_ICACHE_MISS
,
2252 [C(OP_PREFETCH
)] = {
2253 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
2254 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
2259 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
2260 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
2263 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
2264 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
2266 [C(OP_PREFETCH
)] = {
2267 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
2268 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
2273 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
2274 [C(RESULT_MISS
)] = XSCALE_PERFCTR_DTLB_MISS
,
2277 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
2278 [C(RESULT_MISS
)] = XSCALE_PERFCTR_DTLB_MISS
,
2280 [C(OP_PREFETCH
)] = {
2281 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
2282 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
2287 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
2288 [C(RESULT_MISS
)] = XSCALE_PERFCTR_ITLB_MISS
,
2291 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
2292 [C(RESULT_MISS
)] = XSCALE_PERFCTR_ITLB_MISS
,
2294 [C(OP_PREFETCH
)] = {
2295 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
2296 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
2301 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
2302 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
2305 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
2306 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
2308 [C(OP_PREFETCH
)] = {
2309 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
2310 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
2315 #define XSCALE_PMU_ENABLE 0x001
2316 #define XSCALE_PMN_RESET 0x002
2317 #define XSCALE_CCNT_RESET 0x004
2318 #define XSCALE_PMU_RESET (CCNT_RESET | PMN_RESET)
2319 #define XSCALE_PMU_CNT64 0x008
2322 xscalepmu_event_map(int config
)
2324 int mapping
= xscale_perf_map
[config
];
2325 if (HW_OP_UNSUPPORTED
== mapping
)
2326 mapping
= -EOPNOTSUPP
;
2331 xscalepmu_raw_event(u64 config
)
2333 return config
& 0xff;
2336 #define XSCALE1_OVERFLOWED_MASK 0x700
2337 #define XSCALE1_CCOUNT_OVERFLOW 0x400
2338 #define XSCALE1_COUNT0_OVERFLOW 0x100
2339 #define XSCALE1_COUNT1_OVERFLOW 0x200
2340 #define XSCALE1_CCOUNT_INT_EN 0x040
2341 #define XSCALE1_COUNT0_INT_EN 0x010
2342 #define XSCALE1_COUNT1_INT_EN 0x020
2343 #define XSCALE1_COUNT0_EVT_SHFT 12
2344 #define XSCALE1_COUNT0_EVT_MASK (0xff << XSCALE1_COUNT0_EVT_SHFT)
2345 #define XSCALE1_COUNT1_EVT_SHFT 20
2346 #define XSCALE1_COUNT1_EVT_MASK (0xff << XSCALE1_COUNT1_EVT_SHFT)
2349 xscale1pmu_read_pmnc(void)
2352 asm volatile("mrc p14, 0, %0, c0, c0, 0" : "=r" (val
));
2357 xscale1pmu_write_pmnc(u32 val
)
2359 /* upper 4bits and 7, 11 are write-as-0 */
2361 asm volatile("mcr p14, 0, %0, c0, c0, 0" : : "r" (val
));
2365 xscale1_pmnc_counter_has_overflowed(unsigned long pmnc
,
2366 enum xscale_counters counter
)
2371 case XSCALE_CYCLE_COUNTER
:
2372 ret
= pmnc
& XSCALE1_CCOUNT_OVERFLOW
;
2374 case XSCALE_COUNTER0
:
2375 ret
= pmnc
& XSCALE1_COUNT0_OVERFLOW
;
2377 case XSCALE_COUNTER1
:
2378 ret
= pmnc
& XSCALE1_COUNT1_OVERFLOW
;
2381 WARN_ONCE(1, "invalid counter number (%d)\n", counter
);
2388 xscale1pmu_handle_irq(int irq_num
, void *dev
)
2391 struct perf_sample_data data
;
2392 struct cpu_hw_events
*cpuc
;
2393 struct pt_regs
*regs
;
2397 * NOTE: there's an A stepping erratum that states if an overflow
2398 * bit already exists and another occurs, the previous
2399 * Overflow bit gets cleared. There's no workaround.
2400 * Fixed in B stepping or later.
2402 pmnc
= xscale1pmu_read_pmnc();
2405 * Write the value back to clear the overflow flags. Overflow
2406 * flags remain in pmnc for use below. We also disable the PMU
2407 * while we process the interrupt.
2409 xscale1pmu_write_pmnc(pmnc
& ~XSCALE_PMU_ENABLE
);
2411 if (!(pmnc
& XSCALE1_OVERFLOWED_MASK
))
2414 regs
= get_irq_regs();
2416 perf_sample_data_init(&data
, 0);
2418 cpuc
= &__get_cpu_var(cpu_hw_events
);
2419 for (idx
= 0; idx
<= armpmu
->num_events
; ++idx
) {
2420 struct perf_event
*event
= cpuc
->events
[idx
];
2421 struct hw_perf_event
*hwc
;
2423 if (!test_bit(idx
, cpuc
->active_mask
))
2426 if (!xscale1_pmnc_counter_has_overflowed(pmnc
, idx
))
2430 armpmu_event_update(event
, hwc
, idx
);
2431 data
.period
= event
->hw
.last_period
;
2432 if (!armpmu_event_set_period(event
, hwc
, idx
))
2435 if (perf_event_overflow(event
, 0, &data
, regs
))
2436 armpmu
->disable(hwc
, idx
);
2442 * Re-enable the PMU.
2444 pmnc
= xscale1pmu_read_pmnc() | XSCALE_PMU_ENABLE
;
2445 xscale1pmu_write_pmnc(pmnc
);
2451 xscale1pmu_enable_event(struct hw_perf_event
*hwc
, int idx
)
2453 unsigned long val
, mask
, evt
, flags
;
2456 case XSCALE_CYCLE_COUNTER
:
2458 evt
= XSCALE1_CCOUNT_INT_EN
;
2460 case XSCALE_COUNTER0
:
2461 mask
= XSCALE1_COUNT0_EVT_MASK
;
2462 evt
= (hwc
->config_base
<< XSCALE1_COUNT0_EVT_SHFT
) |
2463 XSCALE1_COUNT0_INT_EN
;
2465 case XSCALE_COUNTER1
:
2466 mask
= XSCALE1_COUNT1_EVT_MASK
;
2467 evt
= (hwc
->config_base
<< XSCALE1_COUNT1_EVT_SHFT
) |
2468 XSCALE1_COUNT1_INT_EN
;
2471 WARN_ONCE(1, "invalid counter number (%d)\n", idx
);
2475 spin_lock_irqsave(&pmu_lock
, flags
);
2476 val
= xscale1pmu_read_pmnc();
2479 xscale1pmu_write_pmnc(val
);
2480 spin_unlock_irqrestore(&pmu_lock
, flags
);
2484 xscale1pmu_disable_event(struct hw_perf_event
*hwc
, int idx
)
2486 unsigned long val
, mask
, evt
, flags
;
2489 case XSCALE_CYCLE_COUNTER
:
2490 mask
= XSCALE1_CCOUNT_INT_EN
;
2493 case XSCALE_COUNTER0
:
2494 mask
= XSCALE1_COUNT0_INT_EN
| XSCALE1_COUNT0_EVT_MASK
;
2495 evt
= XSCALE_PERFCTR_UNUSED
<< XSCALE1_COUNT0_EVT_SHFT
;
2497 case XSCALE_COUNTER1
:
2498 mask
= XSCALE1_COUNT1_INT_EN
| XSCALE1_COUNT1_EVT_MASK
;
2499 evt
= XSCALE_PERFCTR_UNUSED
<< XSCALE1_COUNT1_EVT_SHFT
;
2502 WARN_ONCE(1, "invalid counter number (%d)\n", idx
);
2506 spin_lock_irqsave(&pmu_lock
, flags
);
2507 val
= xscale1pmu_read_pmnc();
2510 xscale1pmu_write_pmnc(val
);
2511 spin_unlock_irqrestore(&pmu_lock
, flags
);
2515 xscale1pmu_get_event_idx(struct cpu_hw_events
*cpuc
,
2516 struct hw_perf_event
*event
)
2518 if (XSCALE_PERFCTR_CCNT
== event
->config_base
) {
2519 if (test_and_set_bit(XSCALE_CYCLE_COUNTER
, cpuc
->used_mask
))
2522 return XSCALE_CYCLE_COUNTER
;
2524 if (!test_and_set_bit(XSCALE_COUNTER1
, cpuc
->used_mask
)) {
2525 return XSCALE_COUNTER1
;
2528 if (!test_and_set_bit(XSCALE_COUNTER0
, cpuc
->used_mask
)) {
2529 return XSCALE_COUNTER0
;
2537 xscale1pmu_start(void)
2539 unsigned long flags
, val
;
2541 spin_lock_irqsave(&pmu_lock
, flags
);
2542 val
= xscale1pmu_read_pmnc();
2543 val
|= XSCALE_PMU_ENABLE
;
2544 xscale1pmu_write_pmnc(val
);
2545 spin_unlock_irqrestore(&pmu_lock
, flags
);
2549 xscale1pmu_stop(void)
2551 unsigned long flags
, val
;
2553 spin_lock_irqsave(&pmu_lock
, flags
);
2554 val
= xscale1pmu_read_pmnc();
2555 val
&= ~XSCALE_PMU_ENABLE
;
2556 xscale1pmu_write_pmnc(val
);
2557 spin_unlock_irqrestore(&pmu_lock
, flags
);
2561 xscale1pmu_read_counter(int counter
)
2566 case XSCALE_CYCLE_COUNTER
:
2567 asm volatile("mrc p14, 0, %0, c1, c0, 0" : "=r" (val
));
2569 case XSCALE_COUNTER0
:
2570 asm volatile("mrc p14, 0, %0, c2, c0, 0" : "=r" (val
));
2572 case XSCALE_COUNTER1
:
2573 asm volatile("mrc p14, 0, %0, c3, c0, 0" : "=r" (val
));
2581 xscale1pmu_write_counter(int counter
, u32 val
)
2584 case XSCALE_CYCLE_COUNTER
:
2585 asm volatile("mcr p14, 0, %0, c1, c0, 0" : : "r" (val
));
2587 case XSCALE_COUNTER0
:
2588 asm volatile("mcr p14, 0, %0, c2, c0, 0" : : "r" (val
));
2590 case XSCALE_COUNTER1
:
2591 asm volatile("mcr p14, 0, %0, c3, c0, 0" : : "r" (val
));
2596 static const struct arm_pmu xscale1pmu
= {
2597 .id
= ARM_PERF_PMU_ID_XSCALE1
,
2598 .handle_irq
= xscale1pmu_handle_irq
,
2599 .enable
= xscale1pmu_enable_event
,
2600 .disable
= xscale1pmu_disable_event
,
2601 .event_map
= xscalepmu_event_map
,
2602 .raw_event
= xscalepmu_raw_event
,
2603 .read_counter
= xscale1pmu_read_counter
,
2604 .write_counter
= xscale1pmu_write_counter
,
2605 .get_event_idx
= xscale1pmu_get_event_idx
,
2606 .start
= xscale1pmu_start
,
2607 .stop
= xscale1pmu_stop
,
2609 .max_period
= (1LLU << 32) - 1,
2612 #define XSCALE2_OVERFLOWED_MASK 0x01f
2613 #define XSCALE2_CCOUNT_OVERFLOW 0x001
2614 #define XSCALE2_COUNT0_OVERFLOW 0x002
2615 #define XSCALE2_COUNT1_OVERFLOW 0x004
2616 #define XSCALE2_COUNT2_OVERFLOW 0x008
2617 #define XSCALE2_COUNT3_OVERFLOW 0x010
2618 #define XSCALE2_CCOUNT_INT_EN 0x001
2619 #define XSCALE2_COUNT0_INT_EN 0x002
2620 #define XSCALE2_COUNT1_INT_EN 0x004
2621 #define XSCALE2_COUNT2_INT_EN 0x008
2622 #define XSCALE2_COUNT3_INT_EN 0x010
2623 #define XSCALE2_COUNT0_EVT_SHFT 0
2624 #define XSCALE2_COUNT0_EVT_MASK (0xff << XSCALE2_COUNT0_EVT_SHFT)
2625 #define XSCALE2_COUNT1_EVT_SHFT 8
2626 #define XSCALE2_COUNT1_EVT_MASK (0xff << XSCALE2_COUNT1_EVT_SHFT)
2627 #define XSCALE2_COUNT2_EVT_SHFT 16
2628 #define XSCALE2_COUNT2_EVT_MASK (0xff << XSCALE2_COUNT2_EVT_SHFT)
2629 #define XSCALE2_COUNT3_EVT_SHFT 24
2630 #define XSCALE2_COUNT3_EVT_MASK (0xff << XSCALE2_COUNT3_EVT_SHFT)
2633 xscale2pmu_read_pmnc(void)
2636 asm volatile("mrc p14, 0, %0, c0, c1, 0" : "=r" (val
));
2637 /* bits 1-2 and 4-23 are read-unpredictable */
2638 return val
& 0xff000009;
2642 xscale2pmu_write_pmnc(u32 val
)
2644 /* bits 4-23 are write-as-0, 24-31 are write ignored */
2646 asm volatile("mcr p14, 0, %0, c0, c1, 0" : : "r" (val
));
2650 xscale2pmu_read_overflow_flags(void)
2653 asm volatile("mrc p14, 0, %0, c5, c1, 0" : "=r" (val
));
2658 xscale2pmu_write_overflow_flags(u32 val
)
2660 asm volatile("mcr p14, 0, %0, c5, c1, 0" : : "r" (val
));
2664 xscale2pmu_read_event_select(void)
2667 asm volatile("mrc p14, 0, %0, c8, c1, 0" : "=r" (val
));
2672 xscale2pmu_write_event_select(u32 val
)
2674 asm volatile("mcr p14, 0, %0, c8, c1, 0" : : "r"(val
));
2678 xscale2pmu_read_int_enable(void)
2681 asm volatile("mrc p14, 0, %0, c4, c1, 0" : "=r" (val
));
2686 xscale2pmu_write_int_enable(u32 val
)
2688 asm volatile("mcr p14, 0, %0, c4, c1, 0" : : "r" (val
));
2692 xscale2_pmnc_counter_has_overflowed(unsigned long of_flags
,
2693 enum xscale_counters counter
)
2698 case XSCALE_CYCLE_COUNTER
:
2699 ret
= of_flags
& XSCALE2_CCOUNT_OVERFLOW
;
2701 case XSCALE_COUNTER0
:
2702 ret
= of_flags
& XSCALE2_COUNT0_OVERFLOW
;
2704 case XSCALE_COUNTER1
:
2705 ret
= of_flags
& XSCALE2_COUNT1_OVERFLOW
;
2707 case XSCALE_COUNTER2
:
2708 ret
= of_flags
& XSCALE2_COUNT2_OVERFLOW
;
2710 case XSCALE_COUNTER3
:
2711 ret
= of_flags
& XSCALE2_COUNT3_OVERFLOW
;
2714 WARN_ONCE(1, "invalid counter number (%d)\n", counter
);
2721 xscale2pmu_handle_irq(int irq_num
, void *dev
)
2723 unsigned long pmnc
, of_flags
;
2724 struct perf_sample_data data
;
2725 struct cpu_hw_events
*cpuc
;
2726 struct pt_regs
*regs
;
2729 /* Disable the PMU. */
2730 pmnc
= xscale2pmu_read_pmnc();
2731 xscale2pmu_write_pmnc(pmnc
& ~XSCALE_PMU_ENABLE
);
2733 /* Check the overflow flag register. */
2734 of_flags
= xscale2pmu_read_overflow_flags();
2735 if (!(of_flags
& XSCALE2_OVERFLOWED_MASK
))
2738 /* Clear the overflow bits. */
2739 xscale2pmu_write_overflow_flags(of_flags
);
2741 regs
= get_irq_regs();
2743 perf_sample_data_init(&data
, 0);
2745 cpuc
= &__get_cpu_var(cpu_hw_events
);
2746 for (idx
= 0; idx
<= armpmu
->num_events
; ++idx
) {
2747 struct perf_event
*event
= cpuc
->events
[idx
];
2748 struct hw_perf_event
*hwc
;
2750 if (!test_bit(idx
, cpuc
->active_mask
))
2753 if (!xscale2_pmnc_counter_has_overflowed(pmnc
, idx
))
2757 armpmu_event_update(event
, hwc
, idx
);
2758 data
.period
= event
->hw
.last_period
;
2759 if (!armpmu_event_set_period(event
, hwc
, idx
))
2762 if (perf_event_overflow(event
, 0, &data
, regs
))
2763 armpmu
->disable(hwc
, idx
);
2769 * Re-enable the PMU.
2771 pmnc
= xscale2pmu_read_pmnc() | XSCALE_PMU_ENABLE
;
2772 xscale2pmu_write_pmnc(pmnc
);
2778 xscale2pmu_enable_event(struct hw_perf_event
*hwc
, int idx
)
2780 unsigned long flags
, ien
, evtsel
;
2782 ien
= xscale2pmu_read_int_enable();
2783 evtsel
= xscale2pmu_read_event_select();
2786 case XSCALE_CYCLE_COUNTER
:
2787 ien
|= XSCALE2_CCOUNT_INT_EN
;
2789 case XSCALE_COUNTER0
:
2790 ien
|= XSCALE2_COUNT0_INT_EN
;
2791 evtsel
&= ~XSCALE2_COUNT0_EVT_MASK
;
2792 evtsel
|= hwc
->config_base
<< XSCALE2_COUNT0_EVT_SHFT
;
2794 case XSCALE_COUNTER1
:
2795 ien
|= XSCALE2_COUNT1_INT_EN
;
2796 evtsel
&= ~XSCALE2_COUNT1_EVT_MASK
;
2797 evtsel
|= hwc
->config_base
<< XSCALE2_COUNT1_EVT_SHFT
;
2799 case XSCALE_COUNTER2
:
2800 ien
|= XSCALE2_COUNT2_INT_EN
;
2801 evtsel
&= ~XSCALE2_COUNT2_EVT_MASK
;
2802 evtsel
|= hwc
->config_base
<< XSCALE2_COUNT2_EVT_SHFT
;
2804 case XSCALE_COUNTER3
:
2805 ien
|= XSCALE2_COUNT3_INT_EN
;
2806 evtsel
&= ~XSCALE2_COUNT3_EVT_MASK
;
2807 evtsel
|= hwc
->config_base
<< XSCALE2_COUNT3_EVT_SHFT
;
2810 WARN_ONCE(1, "invalid counter number (%d)\n", idx
);
2814 spin_lock_irqsave(&pmu_lock
, flags
);
2815 xscale2pmu_write_event_select(evtsel
);
2816 xscale2pmu_write_int_enable(ien
);
2817 spin_unlock_irqrestore(&pmu_lock
, flags
);
2821 xscale2pmu_disable_event(struct hw_perf_event
*hwc
, int idx
)
2823 unsigned long flags
, ien
, evtsel
;
2825 ien
= xscale2pmu_read_int_enable();
2826 evtsel
= xscale2pmu_read_event_select();
2829 case XSCALE_CYCLE_COUNTER
:
2830 ien
&= ~XSCALE2_CCOUNT_INT_EN
;
2832 case XSCALE_COUNTER0
:
2833 ien
&= ~XSCALE2_COUNT0_INT_EN
;
2834 evtsel
&= ~XSCALE2_COUNT0_EVT_MASK
;
2835 evtsel
|= XSCALE_PERFCTR_UNUSED
<< XSCALE2_COUNT0_EVT_SHFT
;
2837 case XSCALE_COUNTER1
:
2838 ien
&= ~XSCALE2_COUNT1_INT_EN
;
2839 evtsel
&= ~XSCALE2_COUNT1_EVT_MASK
;
2840 evtsel
|= XSCALE_PERFCTR_UNUSED
<< XSCALE2_COUNT1_EVT_SHFT
;
2842 case XSCALE_COUNTER2
:
2843 ien
&= ~XSCALE2_COUNT2_INT_EN
;
2844 evtsel
&= ~XSCALE2_COUNT2_EVT_MASK
;
2845 evtsel
|= XSCALE_PERFCTR_UNUSED
<< XSCALE2_COUNT2_EVT_SHFT
;
2847 case XSCALE_COUNTER3
:
2848 ien
&= ~XSCALE2_COUNT3_INT_EN
;
2849 evtsel
&= ~XSCALE2_COUNT3_EVT_MASK
;
2850 evtsel
|= XSCALE_PERFCTR_UNUSED
<< XSCALE2_COUNT3_EVT_SHFT
;
2853 WARN_ONCE(1, "invalid counter number (%d)\n", idx
);
2857 spin_lock_irqsave(&pmu_lock
, flags
);
2858 xscale2pmu_write_event_select(evtsel
);
2859 xscale2pmu_write_int_enable(ien
);
2860 spin_unlock_irqrestore(&pmu_lock
, flags
);
2864 xscale2pmu_get_event_idx(struct cpu_hw_events
*cpuc
,
2865 struct hw_perf_event
*event
)
2867 int idx
= xscale1pmu_get_event_idx(cpuc
, event
);
2871 if (!test_and_set_bit(XSCALE_COUNTER3
, cpuc
->used_mask
))
2872 idx
= XSCALE_COUNTER3
;
2873 else if (!test_and_set_bit(XSCALE_COUNTER2
, cpuc
->used_mask
))
2874 idx
= XSCALE_COUNTER2
;
2880 xscale2pmu_start(void)
2882 unsigned long flags
, val
;
2884 spin_lock_irqsave(&pmu_lock
, flags
);
2885 val
= xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64
;
2886 val
|= XSCALE_PMU_ENABLE
;
2887 xscale2pmu_write_pmnc(val
);
2888 spin_unlock_irqrestore(&pmu_lock
, flags
);
2892 xscale2pmu_stop(void)
2894 unsigned long flags
, val
;
2896 spin_lock_irqsave(&pmu_lock
, flags
);
2897 val
= xscale2pmu_read_pmnc();
2898 val
&= ~XSCALE_PMU_ENABLE
;
2899 xscale2pmu_write_pmnc(val
);
2900 spin_unlock_irqrestore(&pmu_lock
, flags
);
2904 xscale2pmu_read_counter(int counter
)
2909 case XSCALE_CYCLE_COUNTER
:
2910 asm volatile("mrc p14, 0, %0, c1, c1, 0" : "=r" (val
));
2912 case XSCALE_COUNTER0
:
2913 asm volatile("mrc p14, 0, %0, c0, c2, 0" : "=r" (val
));
2915 case XSCALE_COUNTER1
:
2916 asm volatile("mrc p14, 0, %0, c1, c2, 0" : "=r" (val
));
2918 case XSCALE_COUNTER2
:
2919 asm volatile("mrc p14, 0, %0, c2, c2, 0" : "=r" (val
));
2921 case XSCALE_COUNTER3
:
2922 asm volatile("mrc p14, 0, %0, c3, c2, 0" : "=r" (val
));
2930 xscale2pmu_write_counter(int counter
, u32 val
)
2933 case XSCALE_CYCLE_COUNTER
:
2934 asm volatile("mcr p14, 0, %0, c1, c1, 0" : : "r" (val
));
2936 case XSCALE_COUNTER0
:
2937 asm volatile("mcr p14, 0, %0, c0, c2, 0" : : "r" (val
));
2939 case XSCALE_COUNTER1
:
2940 asm volatile("mcr p14, 0, %0, c1, c2, 0" : : "r" (val
));
2942 case XSCALE_COUNTER2
:
2943 asm volatile("mcr p14, 0, %0, c2, c2, 0" : : "r" (val
));
2945 case XSCALE_COUNTER3
:
2946 asm volatile("mcr p14, 0, %0, c3, c2, 0" : : "r" (val
));
2951 static const struct arm_pmu xscale2pmu
= {
2952 .id
= ARM_PERF_PMU_ID_XSCALE2
,
2953 .handle_irq
= xscale2pmu_handle_irq
,
2954 .enable
= xscale2pmu_enable_event
,
2955 .disable
= xscale2pmu_disable_event
,
2956 .event_map
= xscalepmu_event_map
,
2957 .raw_event
= xscalepmu_raw_event
,
2958 .read_counter
= xscale2pmu_read_counter
,
2959 .write_counter
= xscale2pmu_write_counter
,
2960 .get_event_idx
= xscale2pmu_get_event_idx
,
2961 .start
= xscale2pmu_start
,
2962 .stop
= xscale2pmu_stop
,
2964 .max_period
= (1LLU << 32) - 1,
2968 init_hw_perf_events(void)
2970 unsigned long cpuid
= read_cpuid_id();
2971 unsigned long implementor
= (cpuid
& 0xFF000000) >> 24;
2972 unsigned long part_number
= (cpuid
& 0xFFF0);
2975 if (0x41 == implementor
) {
2976 switch (part_number
) {
2977 case 0xB360: /* ARM1136 */
2978 case 0xB560: /* ARM1156 */
2979 case 0xB760: /* ARM1176 */
2981 memcpy(armpmu_perf_cache_map
, armv6_perf_cache_map
,
2982 sizeof(armv6_perf_cache_map
));
2984 case 0xB020: /* ARM11mpcore */
2985 armpmu
= &armv6mpcore_pmu
;
2986 memcpy(armpmu_perf_cache_map
,
2987 armv6mpcore_perf_cache_map
,
2988 sizeof(armv6mpcore_perf_cache_map
));
2990 case 0xC080: /* Cortex-A8 */
2991 armv7pmu
.id
= ARM_PERF_PMU_ID_CA8
;
2992 memcpy(armpmu_perf_cache_map
, armv7_a8_perf_cache_map
,
2993 sizeof(armv7_a8_perf_cache_map
));
2994 armv7pmu
.event_map
= armv7_a8_pmu_event_map
;
2997 /* Reset PMNC and read the nb of CNTx counters
2999 armv7pmu
.num_events
= armv7_reset_read_pmnc();
3001 case 0xC090: /* Cortex-A9 */
3002 armv7pmu
.id
= ARM_PERF_PMU_ID_CA9
;
3003 memcpy(armpmu_perf_cache_map
, armv7_a9_perf_cache_map
,
3004 sizeof(armv7_a9_perf_cache_map
));
3005 armv7pmu
.event_map
= armv7_a9_pmu_event_map
;
3008 /* Reset PMNC and read the nb of CNTx counters
3010 armv7pmu
.num_events
= armv7_reset_read_pmnc();
3013 /* Intel CPUs [xscale]. */
3014 } else if (0x69 == implementor
) {
3015 part_number
= (cpuid
>> 13) & 0x7;
3016 switch (part_number
) {
3018 armpmu
= &xscale1pmu
;
3019 memcpy(armpmu_perf_cache_map
, xscale_perf_cache_map
,
3020 sizeof(xscale_perf_cache_map
));
3023 armpmu
= &xscale2pmu
;
3024 memcpy(armpmu_perf_cache_map
, xscale_perf_cache_map
,
3025 sizeof(xscale_perf_cache_map
));
3031 pr_info("enabled with %s PMU driver, %d counters available\n",
3032 arm_pmu_names
[armpmu
->id
], armpmu
->num_events
);
3034 pr_info("no hardware support available\n");
3037 perf_pmu_register(&pmu
);
3041 arch_initcall(init_hw_perf_events
);
3044 * Callchain handling code.
3048 * The registers we're interested in are at the end of the variable
3049 * length saved register structure. The fp points at the end of this
3050 * structure so the address of this struct is:
3051 * (struct frame_tail *)(xxx->fp)-1
3053 * This code has been adapted from the ARM OProfile support.
3056 struct frame_tail
*fp
;
3059 } __attribute__((packed
));
3062 * Get the return address for a single stackframe and return a pointer to the
3065 static struct frame_tail
*
3066 user_backtrace(struct frame_tail
*tail
,
3067 struct perf_callchain_entry
*entry
)
3069 struct frame_tail buftail
;
3071 /* Also check accessibility of one struct frame_tail beyond */
3072 if (!access_ok(VERIFY_READ
, tail
, sizeof(buftail
)))
3074 if (__copy_from_user_inatomic(&buftail
, tail
, sizeof(buftail
)))
3077 perf_callchain_store(entry
, buftail
.lr
);
3080 * Frame pointers should strictly progress back up the stack
3081 * (towards higher addresses).
3083 if (tail
>= buftail
.fp
)
3086 return buftail
.fp
- 1;
3090 perf_callchain_user(struct perf_callchain_entry
*entry
, struct pt_regs
*regs
)
3092 struct frame_tail
*tail
;
3095 tail
= (struct frame_tail
*)regs
->ARM_fp
- 1;
3097 while (tail
&& !((unsigned long)tail
& 0x3))
3098 tail
= user_backtrace(tail
, entry
);
3102 * Gets called by walk_stackframe() for every stackframe. This will be called
3103 * whist unwinding the stackframe and is like a subroutine return so we use
3107 callchain_trace(struct stackframe
*fr
,
3110 struct perf_callchain_entry
*entry
= data
;
3111 perf_callchain_store(entry
, fr
->pc
);
3116 perf_callchain_kernel(struct perf_callchain_entry
*entry
, struct pt_regs
*regs
)
3118 struct stackframe fr
;
3120 fr
.fp
= regs
->ARM_fp
;
3121 fr
.sp
= regs
->ARM_sp
;
3122 fr
.lr
= regs
->ARM_lr
;
3123 fr
.pc
= regs
->ARM_pc
;
3124 walk_stackframe(&fr
, callchain_trace
, entry
);