2 * Blackfin performance counters
4 * Copyright 2011 Analog Devices Inc.
6 * Ripped from SuperH version:
8 * Copyright (C) 2009 Paul Mundt
10 * Heavily based on the x86 and PowerPC implementations.
13 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
14 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
15 * Copyright (C) 2009 Jaswinder Singh Rajput
16 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
17 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
18 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
21 * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
23 * Licensed under the GPL-2 or later.
26 #include <linux/kernel.h>
27 #include <linux/init.h>
28 #include <linux/perf_event.h>
29 #include <asm/bfin_pfmon.h>
32 * We have two counters, and each counter can support an event type.
33 * The 'o' is PFCNTx=1 and 's' is PFCNTx=0
35 * 0x04 o pc invariant branches
36 * 0x06 o mispredicted branches
37 * 0x09 o predicted branches taken
39 * 0x0C o CSYNC/SSYNC insn
40 * 0x0D o Insns committed
41 * 0x0E o Interrupts taken
42 * 0x0F o Misaligned address exceptions
43 * 0x80 o Code memory fetches stalled due to DMA
44 * 0x83 o 64bit insn fetches delivered
45 * 0x9A o data cache fills (bank a)
46 * 0x9B o data cache fills (bank b)
47 * 0x9C o data cache lines evicted (bank a)
48 * 0x9D o data cache lines evicted (bank b)
49 * 0x9E o data cache high priority fills
50 * 0x9F o data cache low priority fills
51 * 0x00 s loop 0 iterations
52 * 0x01 s loop 1 iterations
53 * 0x0A s CSYNC/SSYNC stalls
54 * 0x10 s DAG read/after write hazards
55 * 0x13 s RAW data hazards
56 * 0x81 s code TAG stalls
57 * 0x82 s code fill stalls
58 * 0x90 s processor to memory stalls
59 * 0x91 s data memory stalls not hidden by 0x90
60 * 0x92 s data store buffer full stalls
61 * 0x93 s data memory write buffer full stalls due to high->low priority
62 * 0x95 s data memory fill buffer stalls
63 * 0x96 s data TAG collision stalls
64 * 0x97 s data collision stalls
66 * 0x99 s data stalls sent to processor
69 static const int event_map
[] = {
70 /* use CYCLES cpu register */
71 [PERF_COUNT_HW_CPU_CYCLES
] = -1,
72 [PERF_COUNT_HW_INSTRUCTIONS
] = 0x0D,
73 [PERF_COUNT_HW_CACHE_REFERENCES
] = -1,
74 [PERF_COUNT_HW_CACHE_MISSES
] = 0x83,
75 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = 0x09,
76 [PERF_COUNT_HW_BRANCH_MISSES
] = 0x06,
77 [PERF_COUNT_HW_BUS_CYCLES
] = -1,
80 #define C(x) PERF_COUNT_HW_CACHE_##x
82 static const int cache_events
[PERF_COUNT_HW_CACHE_MAX
]
83 [PERF_COUNT_HW_CACHE_OP_MAX
]
84 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
86 [C(L1D
)] = { /* Data bank A */
88 [C(RESULT_ACCESS
)] = 0,
89 [C(RESULT_MISS
) ] = 0x9A,
92 [C(RESULT_ACCESS
)] = 0,
93 [C(RESULT_MISS
) ] = 0,
96 [C(RESULT_ACCESS
)] = 0,
97 [C(RESULT_MISS
) ] = 0,
103 [C(RESULT_ACCESS
)] = 0,
104 [C(RESULT_MISS
) ] = 0x83,
107 [C(RESULT_ACCESS
)] = -1,
108 [C(RESULT_MISS
) ] = -1,
111 [C(RESULT_ACCESS
)] = 0,
112 [C(RESULT_MISS
) ] = 0,
118 [C(RESULT_ACCESS
)] = -1,
119 [C(RESULT_MISS
) ] = -1,
122 [C(RESULT_ACCESS
)] = -1,
123 [C(RESULT_MISS
) ] = -1,
126 [C(RESULT_ACCESS
)] = -1,
127 [C(RESULT_MISS
) ] = -1,
133 [C(RESULT_ACCESS
)] = -1,
134 [C(RESULT_MISS
) ] = -1,
137 [C(RESULT_ACCESS
)] = -1,
138 [C(RESULT_MISS
) ] = -1,
141 [C(RESULT_ACCESS
)] = -1,
142 [C(RESULT_MISS
) ] = -1,
148 [C(RESULT_ACCESS
)] = -1,
149 [C(RESULT_MISS
) ] = -1,
152 [C(RESULT_ACCESS
)] = -1,
153 [C(RESULT_MISS
) ] = -1,
156 [C(RESULT_ACCESS
)] = -1,
157 [C(RESULT_MISS
) ] = -1,
163 [C(RESULT_ACCESS
)] = -1,
164 [C(RESULT_MISS
) ] = -1,
167 [C(RESULT_ACCESS
)] = -1,
168 [C(RESULT_MISS
) ] = -1,
171 [C(RESULT_ACCESS
)] = -1,
172 [C(RESULT_MISS
) ] = -1,
177 const char *perf_pmu_name(void)
181 EXPORT_SYMBOL(perf_pmu_name
);
183 int perf_num_counters(void)
185 return ARRAY_SIZE(event_map
);
187 EXPORT_SYMBOL(perf_num_counters
);
189 static u64
bfin_pfmon_read(int idx
)
191 return bfin_read32(PFCNTR0
+ (idx
* 4));
194 static void bfin_pfmon_disable(struct hw_perf_event
*hwc
, int idx
)
196 bfin_write_PFCTL(bfin_read_PFCTL() & ~PFCEN(idx
, PFCEN_MASK
));
199 static void bfin_pfmon_enable(struct hw_perf_event
*hwc
, int idx
)
205 mask
= ~(PFCNT1
| PFMON1
| PFCEN1
| PEMUSW1
);
206 /* The packed config is for event0, so shift it to event1 slots */
207 val
|= (hwc
->config
<< (PFMON1_P
- PFMON0_P
));
208 val
|= (hwc
->config
& PFCNT0
) << (PFCNT1_P
- PFCNT0_P
);
209 bfin_write_PFCNTR1(0);
211 mask
= ~(PFCNT0
| PFMON0
| PFCEN0
| PEMUSW0
);
213 bfin_write_PFCNTR0(0);
216 bfin_write_PFCTL((bfin_read_PFCTL() & mask
) | val
);
219 static void bfin_pfmon_disable_all(void)
221 bfin_write_PFCTL(bfin_read_PFCTL() & ~PFPWR
);
224 static void bfin_pfmon_enable_all(void)
226 bfin_write_PFCTL(bfin_read_PFCTL() | PFPWR
);
229 struct cpu_hw_events
{
230 struct perf_event
*events
[MAX_HWEVENTS
];
231 unsigned long used_mask
[BITS_TO_LONGS(MAX_HWEVENTS
)];
233 DEFINE_PER_CPU(struct cpu_hw_events
, cpu_hw_events
);
235 static int hw_perf_cache_event(int config
, int *evp
)
237 unsigned long type
, op
, result
;
241 type
= config
& 0xff;
242 op
= (config
>> 8) & 0xff;
243 result
= (config
>> 16) & 0xff;
245 if (type
>= PERF_COUNT_HW_CACHE_MAX
||
246 op
>= PERF_COUNT_HW_CACHE_OP_MAX
||
247 result
>= PERF_COUNT_HW_CACHE_RESULT_MAX
)
250 ev
= cache_events
[type
][op
][result
];
259 static void bfin_perf_event_update(struct perf_event
*event
,
260 struct hw_perf_event
*hwc
, int idx
)
262 u64 prev_raw_count
, new_raw_count
;
267 * Depending on the counter configuration, they may or may not
268 * be chained, in which case the previous counter value can be
269 * updated underneath us if the lower-half overflows.
271 * Our tactic to handle this is to first atomically read and
272 * exchange a new raw count - then add that new-prev delta
273 * count to the generic counter atomically.
275 * As there is no interrupt associated with the overflow events,
276 * this is the simplest approach for maintaining consistency.
279 prev_raw_count
= local64_read(&hwc
->prev_count
);
280 new_raw_count
= bfin_pfmon_read(idx
);
282 if (local64_cmpxchg(&hwc
->prev_count
, prev_raw_count
,
283 new_raw_count
) != prev_raw_count
)
287 * Now we have the new raw value and have updated the prev
288 * timestamp already. We can now calculate the elapsed delta
289 * (counter-)time and add that to the generic counter.
291 * Careful, not all hw sign-extends above the physical width
294 delta
= (new_raw_count
<< shift
) - (prev_raw_count
<< shift
);
297 local64_add(delta
, &event
->count
);
300 static void bfin_pmu_stop(struct perf_event
*event
, int flags
)
302 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
303 struct hw_perf_event
*hwc
= &event
->hw
;
306 if (!(event
->hw
.state
& PERF_HES_STOPPED
)) {
307 bfin_pfmon_disable(hwc
, idx
);
308 cpuc
->events
[idx
] = NULL
;
309 event
->hw
.state
|= PERF_HES_STOPPED
;
312 if ((flags
& PERF_EF_UPDATE
) && !(event
->hw
.state
& PERF_HES_UPTODATE
)) {
313 bfin_perf_event_update(event
, &event
->hw
, idx
);
314 event
->hw
.state
|= PERF_HES_UPTODATE
;
318 static void bfin_pmu_start(struct perf_event
*event
, int flags
)
320 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
321 struct hw_perf_event
*hwc
= &event
->hw
;
324 if (WARN_ON_ONCE(idx
== -1))
327 if (flags
& PERF_EF_RELOAD
)
328 WARN_ON_ONCE(!(event
->hw
.state
& PERF_HES_UPTODATE
));
330 cpuc
->events
[idx
] = event
;
332 bfin_pfmon_enable(hwc
, idx
);
335 static void bfin_pmu_del(struct perf_event
*event
, int flags
)
337 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
339 bfin_pmu_stop(event
, PERF_EF_UPDATE
);
340 __clear_bit(event
->hw
.idx
, cpuc
->used_mask
);
342 perf_event_update_userpage(event
);
345 static int bfin_pmu_add(struct perf_event
*event
, int flags
)
347 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
348 struct hw_perf_event
*hwc
= &event
->hw
;
352 perf_pmu_disable(event
->pmu
);
354 if (__test_and_set_bit(idx
, cpuc
->used_mask
)) {
355 idx
= find_first_zero_bit(cpuc
->used_mask
, MAX_HWEVENTS
);
356 if (idx
== MAX_HWEVENTS
)
359 __set_bit(idx
, cpuc
->used_mask
);
363 bfin_pfmon_disable(hwc
, idx
);
365 event
->hw
.state
= PERF_HES_UPTODATE
| PERF_HES_STOPPED
;
366 if (flags
& PERF_EF_START
)
367 bfin_pmu_start(event
, PERF_EF_RELOAD
);
369 perf_event_update_userpage(event
);
372 perf_pmu_enable(event
->pmu
);
376 static void bfin_pmu_read(struct perf_event
*event
)
378 bfin_perf_event_update(event
, &event
->hw
, event
->hw
.idx
);
381 static int bfin_pmu_event_init(struct perf_event
*event
)
383 struct perf_event_attr
*attr
= &event
->attr
;
384 struct hw_perf_event
*hwc
= &event
->hw
;
388 if (attr
->exclude_hv
|| attr
->exclude_idle
)
392 * All of the on-chip counters are "limited", in that they have
393 * no interrupts, and are therefore unable to do sampling without
394 * further work and timer assistance.
396 if (hwc
->sample_period
)
400 switch (attr
->type
) {
402 config
= PFMON(0, attr
->config
& PFMON_MASK
) |
403 PFCNT(0, !(attr
->config
& 0x100));
405 case PERF_TYPE_HW_CACHE
:
406 ret
= hw_perf_cache_event(attr
->config
, &config
);
408 case PERF_TYPE_HARDWARE
:
409 if (attr
->config
>= ARRAY_SIZE(event_map
))
412 config
= event_map
[attr
->config
];
419 if (!attr
->exclude_kernel
)
420 config
|= PFCEN(0, PFCEN_ENABLE_SUPV
);
421 if (!attr
->exclude_user
)
422 config
|= PFCEN(0, PFCEN_ENABLE_USER
);
424 hwc
->config
|= config
;
429 static void bfin_pmu_enable(struct pmu
*pmu
)
431 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
432 struct perf_event
*event
;
433 struct hw_perf_event
*hwc
;
436 for (i
= 0; i
< MAX_HWEVENTS
; ++i
) {
437 event
= cpuc
->events
[i
];
441 bfin_pfmon_enable(hwc
, hwc
->idx
);
444 bfin_pfmon_enable_all();
447 static void bfin_pmu_disable(struct pmu
*pmu
)
449 bfin_pfmon_disable_all();
452 static struct pmu pmu
= {
453 .pmu_enable
= bfin_pmu_enable
,
454 .pmu_disable
= bfin_pmu_disable
,
455 .event_init
= bfin_pmu_event_init
,
458 .start
= bfin_pmu_start
,
459 .stop
= bfin_pmu_stop
,
460 .read
= bfin_pmu_read
,
463 static void bfin_pmu_setup(int cpu
)
465 struct cpu_hw_events
*cpuhw
= &per_cpu(cpu_hw_events
, cpu
);
467 memset(cpuhw
, 0, sizeof(struct cpu_hw_events
));
471 bfin_pmu_notifier(struct notifier_block
*self
, unsigned long action
, void *hcpu
)
473 unsigned int cpu
= (long)hcpu
;
475 switch (action
& ~CPU_TASKS_FROZEN
) {
488 static int __init
bfin_pmu_init(void)
492 ret
= perf_pmu_register(&pmu
, "cpu", PERF_TYPE_RAW
);
494 perf_cpu_notifier(bfin_pmu_notifier
);
498 early_initcall(bfin_pmu_init
);