watchdog: Fix rounding bug in get_sample_period()
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / arm / kernel / perf_event_v7.c
blob4960686afb5815c2b54d1258e41bc6ab81a9adde
1 /*
2 * ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code.
4 * ARMv7 support: Jean Pihet <jpihet@mvista.com>
5 * 2010 (c) MontaVista Software, LLC.
7 * Copied from ARMv6 code, with the low level code inspired
8 * by the ARMv7 Oprofile code.
10 * Cortex-A8 has up to 4 configurable performance counters and
11 * a single cycle counter.
12 * Cortex-A9 has up to 31 configurable performance counters and
13 * a single cycle counter.
15 * All counters can be enabled/disabled and IRQ masked separately. The cycle
16 * counter and all 4 performance counters together can be reset separately.
19 #ifdef CONFIG_CPU_V7
20 /* Common ARMv7 event types */
21 enum armv7_perf_types {
22 ARMV7_PERFCTR_PMNC_SW_INCR = 0x00,
23 ARMV7_PERFCTR_IFETCH_MISS = 0x01,
24 ARMV7_PERFCTR_ITLB_MISS = 0x02,
25 ARMV7_PERFCTR_DCACHE_REFILL = 0x03,
26 ARMV7_PERFCTR_DCACHE_ACCESS = 0x04,
27 ARMV7_PERFCTR_DTLB_REFILL = 0x05,
28 ARMV7_PERFCTR_DREAD = 0x06,
29 ARMV7_PERFCTR_DWRITE = 0x07,
31 ARMV7_PERFCTR_EXC_TAKEN = 0x09,
32 ARMV7_PERFCTR_EXC_EXECUTED = 0x0A,
33 ARMV7_PERFCTR_CID_WRITE = 0x0B,
34 /* ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS.
35 * It counts:
36 * - all branch instructions,
37 * - instructions that explicitly write the PC,
38 * - exception generating instructions.
40 ARMV7_PERFCTR_PC_WRITE = 0x0C,
41 ARMV7_PERFCTR_PC_IMM_BRANCH = 0x0D,
42 ARMV7_PERFCTR_UNALIGNED_ACCESS = 0x0F,
43 ARMV7_PERFCTR_PC_BRANCH_MIS_PRED = 0x10,
44 ARMV7_PERFCTR_CLOCK_CYCLES = 0x11,
46 ARMV7_PERFCTR_PC_BRANCH_MIS_USED = 0x12,
48 ARMV7_PERFCTR_CPU_CYCLES = 0xFF
51 /* ARMv7 Cortex-A8 specific event types */
52 enum armv7_a8_perf_types {
53 ARMV7_PERFCTR_INSTR_EXECUTED = 0x08,
55 ARMV7_PERFCTR_PC_PROC_RETURN = 0x0E,
57 ARMV7_PERFCTR_WRITE_BUFFER_FULL = 0x40,
58 ARMV7_PERFCTR_L2_STORE_MERGED = 0x41,
59 ARMV7_PERFCTR_L2_STORE_BUFF = 0x42,
60 ARMV7_PERFCTR_L2_ACCESS = 0x43,
61 ARMV7_PERFCTR_L2_CACH_MISS = 0x44,
62 ARMV7_PERFCTR_AXI_READ_CYCLES = 0x45,
63 ARMV7_PERFCTR_AXI_WRITE_CYCLES = 0x46,
64 ARMV7_PERFCTR_MEMORY_REPLAY = 0x47,
65 ARMV7_PERFCTR_UNALIGNED_ACCESS_REPLAY = 0x48,
66 ARMV7_PERFCTR_L1_DATA_MISS = 0x49,
67 ARMV7_PERFCTR_L1_INST_MISS = 0x4A,
68 ARMV7_PERFCTR_L1_DATA_COLORING = 0x4B,
69 ARMV7_PERFCTR_L1_NEON_DATA = 0x4C,
70 ARMV7_PERFCTR_L1_NEON_CACH_DATA = 0x4D,
71 ARMV7_PERFCTR_L2_NEON = 0x4E,
72 ARMV7_PERFCTR_L2_NEON_HIT = 0x4F,
73 ARMV7_PERFCTR_L1_INST = 0x50,
74 ARMV7_PERFCTR_PC_RETURN_MIS_PRED = 0x51,
75 ARMV7_PERFCTR_PC_BRANCH_FAILED = 0x52,
76 ARMV7_PERFCTR_PC_BRANCH_TAKEN = 0x53,
77 ARMV7_PERFCTR_PC_BRANCH_EXECUTED = 0x54,
78 ARMV7_PERFCTR_OP_EXECUTED = 0x55,
79 ARMV7_PERFCTR_CYCLES_INST_STALL = 0x56,
80 ARMV7_PERFCTR_CYCLES_INST = 0x57,
81 ARMV7_PERFCTR_CYCLES_NEON_DATA_STALL = 0x58,
82 ARMV7_PERFCTR_CYCLES_NEON_INST_STALL = 0x59,
83 ARMV7_PERFCTR_NEON_CYCLES = 0x5A,
85 ARMV7_PERFCTR_PMU0_EVENTS = 0x70,
86 ARMV7_PERFCTR_PMU1_EVENTS = 0x71,
87 ARMV7_PERFCTR_PMU_EVENTS = 0x72,
90 /* ARMv7 Cortex-A9 specific event types */
91 enum armv7_a9_perf_types {
92 ARMV7_PERFCTR_JAVA_HW_BYTECODE_EXEC = 0x40,
93 ARMV7_PERFCTR_JAVA_SW_BYTECODE_EXEC = 0x41,
94 ARMV7_PERFCTR_JAZELLE_BRANCH_EXEC = 0x42,
96 ARMV7_PERFCTR_COHERENT_LINE_MISS = 0x50,
97 ARMV7_PERFCTR_COHERENT_LINE_HIT = 0x51,
99 ARMV7_PERFCTR_ICACHE_DEP_STALL_CYCLES = 0x60,
100 ARMV7_PERFCTR_DCACHE_DEP_STALL_CYCLES = 0x61,
101 ARMV7_PERFCTR_TLB_MISS_DEP_STALL_CYCLES = 0x62,
102 ARMV7_PERFCTR_STREX_EXECUTED_PASSED = 0x63,
103 ARMV7_PERFCTR_STREX_EXECUTED_FAILED = 0x64,
104 ARMV7_PERFCTR_DATA_EVICTION = 0x65,
105 ARMV7_PERFCTR_ISSUE_STAGE_NO_INST = 0x66,
106 ARMV7_PERFCTR_ISSUE_STAGE_EMPTY = 0x67,
107 ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE = 0x68,
109 ARMV7_PERFCTR_PREDICTABLE_FUNCT_RETURNS = 0x6E,
111 ARMV7_PERFCTR_MAIN_UNIT_EXECUTED_INST = 0x70,
112 ARMV7_PERFCTR_SECOND_UNIT_EXECUTED_INST = 0x71,
113 ARMV7_PERFCTR_LD_ST_UNIT_EXECUTED_INST = 0x72,
114 ARMV7_PERFCTR_FP_EXECUTED_INST = 0x73,
115 ARMV7_PERFCTR_NEON_EXECUTED_INST = 0x74,
117 ARMV7_PERFCTR_PLD_FULL_DEP_STALL_CYCLES = 0x80,
118 ARMV7_PERFCTR_DATA_WR_DEP_STALL_CYCLES = 0x81,
119 ARMV7_PERFCTR_ITLB_MISS_DEP_STALL_CYCLES = 0x82,
120 ARMV7_PERFCTR_DTLB_MISS_DEP_STALL_CYCLES = 0x83,
121 ARMV7_PERFCTR_MICRO_ITLB_MISS_DEP_STALL_CYCLES = 0x84,
122 ARMV7_PERFCTR_MICRO_DTLB_MISS_DEP_STALL_CYCLES = 0x85,
123 ARMV7_PERFCTR_DMB_DEP_STALL_CYCLES = 0x86,
125 ARMV7_PERFCTR_INTGR_CLK_ENABLED_CYCLES = 0x8A,
126 ARMV7_PERFCTR_DATA_ENGINE_CLK_EN_CYCLES = 0x8B,
128 ARMV7_PERFCTR_ISB_INST = 0x90,
129 ARMV7_PERFCTR_DSB_INST = 0x91,
130 ARMV7_PERFCTR_DMB_INST = 0x92,
131 ARMV7_PERFCTR_EXT_INTERRUPTS = 0x93,
133 ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_COMPLETED = 0xA0,
134 ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_SKIPPED = 0xA1,
135 ARMV7_PERFCTR_PLE_FIFO_FLUSH = 0xA2,
136 ARMV7_PERFCTR_PLE_RQST_COMPLETED = 0xA3,
137 ARMV7_PERFCTR_PLE_FIFO_OVERFLOW = 0xA4,
138 ARMV7_PERFCTR_PLE_RQST_PROG = 0xA5
142 * Cortex-A8 HW events mapping
144 * The hardware events that we support. We do support cache operations but
145 * we have harvard caches and no way to combine instruction and data
146 * accesses/misses in hardware.
148 static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = {
149 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
150 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
151 [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
152 [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
153 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
154 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
155 [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
158 static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
159 [PERF_COUNT_HW_CACHE_OP_MAX]
160 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
161 [C(L1D)] = {
163 * The performance counters don't differentiate between read
164 * and write accesses/misses so this isn't strictly correct,
165 * but it's the best we can do. Writes and reads get
166 * combined.
168 [C(OP_READ)] = {
169 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS,
170 [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL,
172 [C(OP_WRITE)] = {
173 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS,
174 [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL,
176 [C(OP_PREFETCH)] = {
177 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
178 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
181 [C(L1I)] = {
182 [C(OP_READ)] = {
183 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_INST,
184 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_INST_MISS,
186 [C(OP_WRITE)] = {
187 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_INST,
188 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_INST_MISS,
190 [C(OP_PREFETCH)] = {
191 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
192 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
195 [C(LL)] = {
196 [C(OP_READ)] = {
197 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_ACCESS,
198 [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACH_MISS,
200 [C(OP_WRITE)] = {
201 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_ACCESS,
202 [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACH_MISS,
204 [C(OP_PREFETCH)] = {
205 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
206 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
209 [C(DTLB)] = {
211 * Only ITLB misses and DTLB refills are supported.
212 * If users want the DTLB refills misses a raw counter
213 * must be used.
215 [C(OP_READ)] = {
216 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
217 [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
219 [C(OP_WRITE)] = {
220 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
221 [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
223 [C(OP_PREFETCH)] = {
224 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
225 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
228 [C(ITLB)] = {
229 [C(OP_READ)] = {
230 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
231 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
233 [C(OP_WRITE)] = {
234 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
235 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
237 [C(OP_PREFETCH)] = {
238 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
239 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
242 [C(BPU)] = {
243 [C(OP_READ)] = {
244 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE,
245 [C(RESULT_MISS)]
246 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
248 [C(OP_WRITE)] = {
249 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE,
250 [C(RESULT_MISS)]
251 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
253 [C(OP_PREFETCH)] = {
254 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
255 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
261 * Cortex-A9 HW events mapping
263 static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = {
264 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
265 [PERF_COUNT_HW_INSTRUCTIONS] =
266 ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE,
267 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_COHERENT_LINE_HIT,
268 [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_COHERENT_LINE_MISS,
269 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
270 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
271 [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
274 static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
275 [PERF_COUNT_HW_CACHE_OP_MAX]
276 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
277 [C(L1D)] = {
279 * The performance counters don't differentiate between read
280 * and write accesses/misses so this isn't strictly correct,
281 * but it's the best we can do. Writes and reads get
282 * combined.
284 [C(OP_READ)] = {
285 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS,
286 [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL,
288 [C(OP_WRITE)] = {
289 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS,
290 [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL,
292 [C(OP_PREFETCH)] = {
293 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
294 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
297 [C(L1I)] = {
298 [C(OP_READ)] = {
299 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
300 [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS,
302 [C(OP_WRITE)] = {
303 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
304 [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS,
306 [C(OP_PREFETCH)] = {
307 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
308 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
311 [C(LL)] = {
312 [C(OP_READ)] = {
313 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
314 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
316 [C(OP_WRITE)] = {
317 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
318 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
320 [C(OP_PREFETCH)] = {
321 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
322 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
325 [C(DTLB)] = {
327 * Only ITLB misses and DTLB refills are supported.
328 * If users want the DTLB refills misses a raw counter
329 * must be used.
331 [C(OP_READ)] = {
332 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
333 [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
335 [C(OP_WRITE)] = {
336 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
337 [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
339 [C(OP_PREFETCH)] = {
340 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
341 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
344 [C(ITLB)] = {
345 [C(OP_READ)] = {
346 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
347 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
349 [C(OP_WRITE)] = {
350 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
351 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
353 [C(OP_PREFETCH)] = {
354 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
355 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
358 [C(BPU)] = {
359 [C(OP_READ)] = {
360 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE,
361 [C(RESULT_MISS)]
362 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
364 [C(OP_WRITE)] = {
365 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE,
366 [C(RESULT_MISS)]
367 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
369 [C(OP_PREFETCH)] = {
370 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
371 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
377 * Perf Events counters
379 enum armv7_counters {
380 ARMV7_CYCLE_COUNTER = 1, /* Cycle counter */
381 ARMV7_COUNTER0 = 2, /* First event counter */
385 * The cycle counter is ARMV7_CYCLE_COUNTER.
386 * The first event counter is ARMV7_COUNTER0.
387 * The last event counter is (ARMV7_COUNTER0 + armpmu->num_events - 1).
389 #define ARMV7_COUNTER_LAST (ARMV7_COUNTER0 + armpmu->num_events - 1)
392 * ARMv7 low level PMNC access
396 * Per-CPU PMNC: config reg
398 #define ARMV7_PMNC_E (1 << 0) /* Enable all counters */
399 #define ARMV7_PMNC_P (1 << 1) /* Reset all counters */
400 #define ARMV7_PMNC_C (1 << 2) /* Cycle counter reset */
401 #define ARMV7_PMNC_D (1 << 3) /* CCNT counts every 64th cpu cycle */
402 #define ARMV7_PMNC_X (1 << 4) /* Export to ETM */
403 #define ARMV7_PMNC_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
404 #define ARMV7_PMNC_N_SHIFT 11 /* Number of counters supported */
405 #define ARMV7_PMNC_N_MASK 0x1f
406 #define ARMV7_PMNC_MASK 0x3f /* Mask for writable bits */
409 * Available counters
411 #define ARMV7_CNT0 0 /* First event counter */
412 #define ARMV7_CCNT 31 /* Cycle counter */
414 /* Perf Event to low level counters mapping */
415 #define ARMV7_EVENT_CNT_TO_CNTx (ARMV7_COUNTER0 - ARMV7_CNT0)
418 * CNTENS: counters enable reg
420 #define ARMV7_CNTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
421 #define ARMV7_CNTENS_C (1 << ARMV7_CCNT)
424 * CNTENC: counters disable reg
426 #define ARMV7_CNTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
427 #define ARMV7_CNTENC_C (1 << ARMV7_CCNT)
430 * INTENS: counters overflow interrupt enable reg
432 #define ARMV7_INTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
433 #define ARMV7_INTENS_C (1 << ARMV7_CCNT)
436 * INTENC: counters overflow interrupt disable reg
438 #define ARMV7_INTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
439 #define ARMV7_INTENC_C (1 << ARMV7_CCNT)
442 * EVTSEL: Event selection reg
444 #define ARMV7_EVTSEL_MASK 0xff /* Mask for writable bits */
447 * SELECT: Counter selection reg
449 #define ARMV7_SELECT_MASK 0x1f /* Mask for writable bits */
452 * FLAG: counters overflow flag status reg
454 #define ARMV7_FLAG_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
455 #define ARMV7_FLAG_C (1 << ARMV7_CCNT)
456 #define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */
457 #define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK
459 static inline unsigned long armv7_pmnc_read(void)
461 u32 val;
462 asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val));
463 return val;
466 static inline void armv7_pmnc_write(unsigned long val)
468 val &= ARMV7_PMNC_MASK;
469 isb();
470 asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val));
473 static inline int armv7_pmnc_has_overflowed(unsigned long pmnc)
475 return pmnc & ARMV7_OVERFLOWED_MASK;
478 static inline int armv7_pmnc_counter_has_overflowed(unsigned long pmnc,
479 enum armv7_counters counter)
481 int ret = 0;
483 if (counter == ARMV7_CYCLE_COUNTER)
484 ret = pmnc & ARMV7_FLAG_C;
485 else if ((counter >= ARMV7_COUNTER0) && (counter <= ARMV7_COUNTER_LAST))
486 ret = pmnc & ARMV7_FLAG_P(counter);
487 else
488 pr_err("CPU%u checking wrong counter %d overflow status\n",
489 smp_processor_id(), counter);
491 return ret;
494 static inline int armv7_pmnc_select_counter(unsigned int idx)
496 u32 val;
498 if ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST)) {
499 pr_err("CPU%u selecting wrong PMNC counter"
500 " %d\n", smp_processor_id(), idx);
501 return -1;
504 val = (idx - ARMV7_EVENT_CNT_TO_CNTx) & ARMV7_SELECT_MASK;
505 asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (val));
506 isb();
508 return idx;
511 static inline u32 armv7pmu_read_counter(int idx)
513 unsigned long value = 0;
515 if (idx == ARMV7_CYCLE_COUNTER)
516 asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value));
517 else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) {
518 if (armv7_pmnc_select_counter(idx) == idx)
519 asm volatile("mrc p15, 0, %0, c9, c13, 2"
520 : "=r" (value));
521 } else
522 pr_err("CPU%u reading wrong counter %d\n",
523 smp_processor_id(), idx);
525 return value;
528 static inline void armv7pmu_write_counter(int idx, u32 value)
530 if (idx == ARMV7_CYCLE_COUNTER)
531 asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value));
532 else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) {
533 if (armv7_pmnc_select_counter(idx) == idx)
534 asm volatile("mcr p15, 0, %0, c9, c13, 2"
535 : : "r" (value));
536 } else
537 pr_err("CPU%u writing wrong counter %d\n",
538 smp_processor_id(), idx);
541 static inline void armv7_pmnc_write_evtsel(unsigned int idx, u32 val)
543 if (armv7_pmnc_select_counter(idx) == idx) {
544 val &= ARMV7_EVTSEL_MASK;
545 asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
549 static inline u32 armv7_pmnc_enable_counter(unsigned int idx)
551 u32 val;
553 if ((idx != ARMV7_CYCLE_COUNTER) &&
554 ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
555 pr_err("CPU%u enabling wrong PMNC counter"
556 " %d\n", smp_processor_id(), idx);
557 return -1;
560 if (idx == ARMV7_CYCLE_COUNTER)
561 val = ARMV7_CNTENS_C;
562 else
563 val = ARMV7_CNTENS_P(idx);
565 asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (val));
567 return idx;
570 static inline u32 armv7_pmnc_disable_counter(unsigned int idx)
572 u32 val;
575 if ((idx != ARMV7_CYCLE_COUNTER) &&
576 ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
577 pr_err("CPU%u disabling wrong PMNC counter"
578 " %d\n", smp_processor_id(), idx);
579 return -1;
582 if (idx == ARMV7_CYCLE_COUNTER)
583 val = ARMV7_CNTENC_C;
584 else
585 val = ARMV7_CNTENC_P(idx);
587 asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (val));
589 return idx;
592 static inline u32 armv7_pmnc_enable_intens(unsigned int idx)
594 u32 val;
596 if ((idx != ARMV7_CYCLE_COUNTER) &&
597 ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
598 pr_err("CPU%u enabling wrong PMNC counter"
599 " interrupt enable %d\n", smp_processor_id(), idx);
600 return -1;
603 if (idx == ARMV7_CYCLE_COUNTER)
604 val = ARMV7_INTENS_C;
605 else
606 val = ARMV7_INTENS_P(idx);
608 asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (val));
610 return idx;
613 static inline u32 armv7_pmnc_disable_intens(unsigned int idx)
615 u32 val;
617 if ((idx != ARMV7_CYCLE_COUNTER) &&
618 ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
619 pr_err("CPU%u disabling wrong PMNC counter"
620 " interrupt enable %d\n", smp_processor_id(), idx);
621 return -1;
624 if (idx == ARMV7_CYCLE_COUNTER)
625 val = ARMV7_INTENC_C;
626 else
627 val = ARMV7_INTENC_P(idx);
629 asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (val));
631 return idx;
634 static inline u32 armv7_pmnc_getreset_flags(void)
636 u32 val;
638 /* Read */
639 asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
641 /* Write to clear flags */
642 val &= ARMV7_FLAG_MASK;
643 asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val));
645 return val;
648 #ifdef DEBUG
649 static void armv7_pmnc_dump_regs(void)
651 u32 val;
652 unsigned int cnt;
654 printk(KERN_INFO "PMNC registers dump:\n");
656 asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
657 printk(KERN_INFO "PMNC =0x%08x\n", val);
659 asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val));
660 printk(KERN_INFO "CNTENS=0x%08x\n", val);
662 asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val));
663 printk(KERN_INFO "INTENS=0x%08x\n", val);
665 asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
666 printk(KERN_INFO "FLAGS =0x%08x\n", val);
668 asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val));
669 printk(KERN_INFO "SELECT=0x%08x\n", val);
671 asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
672 printk(KERN_INFO "CCNT =0x%08x\n", val);
674 for (cnt = ARMV7_COUNTER0; cnt < ARMV7_COUNTER_LAST; cnt++) {
675 armv7_pmnc_select_counter(cnt);
676 asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
677 printk(KERN_INFO "CNT[%d] count =0x%08x\n",
678 cnt-ARMV7_EVENT_CNT_TO_CNTx, val);
679 asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val));
680 printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n",
681 cnt-ARMV7_EVENT_CNT_TO_CNTx, val);
684 #endif
686 static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx)
688 unsigned long flags;
691 * Enable counter and interrupt, and set the counter to count
692 * the event that we're interested in.
694 raw_spin_lock_irqsave(&pmu_lock, flags);
697 * Disable counter
699 armv7_pmnc_disable_counter(idx);
702 * Set event (if destined for PMNx counters)
703 * We don't need to set the event if it's a cycle count
705 if (idx != ARMV7_CYCLE_COUNTER)
706 armv7_pmnc_write_evtsel(idx, hwc->config_base);
709 * Enable interrupt for this counter
711 armv7_pmnc_enable_intens(idx);
714 * Enable counter
716 armv7_pmnc_enable_counter(idx);
718 raw_spin_unlock_irqrestore(&pmu_lock, flags);
721 static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx)
723 unsigned long flags;
726 * Disable counter and interrupt
728 raw_spin_lock_irqsave(&pmu_lock, flags);
731 * Disable counter
733 armv7_pmnc_disable_counter(idx);
736 * Disable interrupt for this counter
738 armv7_pmnc_disable_intens(idx);
740 raw_spin_unlock_irqrestore(&pmu_lock, flags);
743 static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
745 unsigned long pmnc;
746 struct perf_sample_data data;
747 struct cpu_hw_events *cpuc;
748 struct pt_regs *regs;
749 int idx;
752 * Get and reset the IRQ flags
754 pmnc = armv7_pmnc_getreset_flags();
757 * Did an overflow occur?
759 if (!armv7_pmnc_has_overflowed(pmnc))
760 return IRQ_NONE;
763 * Handle the counter(s) overflow(s)
765 regs = get_irq_regs();
767 perf_sample_data_init(&data, 0);
769 cpuc = &__get_cpu_var(cpu_hw_events);
770 for (idx = 0; idx <= armpmu->num_events; ++idx) {
771 struct perf_event *event = cpuc->events[idx];
772 struct hw_perf_event *hwc;
774 if (!test_bit(idx, cpuc->active_mask))
775 continue;
778 * We have a single interrupt for all counters. Check that
779 * each counter has overflowed before we process it.
781 if (!armv7_pmnc_counter_has_overflowed(pmnc, idx))
782 continue;
784 hwc = &event->hw;
785 armpmu_event_update(event, hwc, idx, 1);
786 data.period = event->hw.last_period;
787 if (!armpmu_event_set_period(event, hwc, idx))
788 continue;
790 if (perf_event_overflow(event, 0, &data, regs))
791 armpmu->disable(hwc, idx);
795 * Handle the pending perf events.
797 * Note: this call *must* be run with interrupts disabled. For
798 * platforms that can have the PMU interrupts raised as an NMI, this
799 * will not work.
801 irq_work_run();
803 return IRQ_HANDLED;
806 static void armv7pmu_start(void)
808 unsigned long flags;
810 raw_spin_lock_irqsave(&pmu_lock, flags);
811 /* Enable all counters */
812 armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E);
813 raw_spin_unlock_irqrestore(&pmu_lock, flags);
816 static void armv7pmu_stop(void)
818 unsigned long flags;
820 raw_spin_lock_irqsave(&pmu_lock, flags);
821 /* Disable all counters */
822 armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E);
823 raw_spin_unlock_irqrestore(&pmu_lock, flags);
826 static int armv7pmu_get_event_idx(struct cpu_hw_events *cpuc,
827 struct hw_perf_event *event)
829 int idx;
831 /* Always place a cycle counter into the cycle counter. */
832 if (event->config_base == ARMV7_PERFCTR_CPU_CYCLES) {
833 if (test_and_set_bit(ARMV7_CYCLE_COUNTER, cpuc->used_mask))
834 return -EAGAIN;
836 return ARMV7_CYCLE_COUNTER;
837 } else {
839 * For anything other than a cycle counter, try and use
840 * the events counters
842 for (idx = ARMV7_COUNTER0; idx <= armpmu->num_events; ++idx) {
843 if (!test_and_set_bit(idx, cpuc->used_mask))
844 return idx;
847 /* The counters are all in use. */
848 return -EAGAIN;
852 static void armv7pmu_reset(void *info)
854 u32 idx, nb_cnt = armpmu->num_events;
856 /* The counter and interrupt enable registers are unknown at reset. */
857 for (idx = 1; idx < nb_cnt; ++idx)
858 armv7pmu_disable_event(NULL, idx);
860 /* Initialize & Reset PMNC: C and P bits */
861 armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
864 static struct arm_pmu armv7pmu = {
865 .handle_irq = armv7pmu_handle_irq,
866 .enable = armv7pmu_enable_event,
867 .disable = armv7pmu_disable_event,
868 .read_counter = armv7pmu_read_counter,
869 .write_counter = armv7pmu_write_counter,
870 .get_event_idx = armv7pmu_get_event_idx,
871 .start = armv7pmu_start,
872 .stop = armv7pmu_stop,
873 .reset = armv7pmu_reset,
874 .raw_event_mask = 0xFF,
875 .max_period = (1LLU << 32) - 1,
878 static u32 __init armv7_read_num_pmnc_events(void)
880 u32 nb_cnt;
882 /* Read the nb of CNTx counters supported from PMNC */
883 nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK;
885 /* Add the CPU cycles counter and return */
886 return nb_cnt + 1;
889 static const struct arm_pmu *__init armv7_a8_pmu_init(void)
891 armv7pmu.id = ARM_PERF_PMU_ID_CA8;
892 armv7pmu.name = "ARMv7 Cortex-A8";
893 armv7pmu.cache_map = &armv7_a8_perf_cache_map;
894 armv7pmu.event_map = &armv7_a8_perf_map;
895 armv7pmu.num_events = armv7_read_num_pmnc_events();
896 return &armv7pmu;
899 static const struct arm_pmu *__init armv7_a9_pmu_init(void)
901 armv7pmu.id = ARM_PERF_PMU_ID_CA9;
902 armv7pmu.name = "ARMv7 Cortex-A9";
903 armv7pmu.cache_map = &armv7_a9_perf_cache_map;
904 armv7pmu.event_map = &armv7_a9_perf_map;
905 armv7pmu.num_events = armv7_read_num_pmnc_events();
906 return &armv7pmu;
908 #else
909 static const struct arm_pmu *__init armv7_a8_pmu_init(void)
911 return NULL;
914 static const struct arm_pmu *__init armv7_a9_pmu_init(void)
916 return NULL;
918 #endif /* CONFIG_CPU_V7 */