2 * Performance event support - powerpc architecture code
4 * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 #include <linux/kernel.h>
12 #include <linux/sched.h>
13 #include <linux/perf_event.h>
14 #include <linux/percpu.h>
15 #include <linux/hardirq.h>
16 #include <linux/uaccess.h>
19 #include <asm/machdep.h>
20 #include <asm/firmware.h>
21 #include <asm/ptrace.h>
22 #include <asm/code-patching.h>
24 #define BHRB_MAX_ENTRIES 32
25 #define BHRB_TARGET 0x0000000000000002
26 #define BHRB_PREDICTION 0x0000000000000001
27 #define BHRB_EA 0xFFFFFFFFFFFFFFFCUL
29 struct cpu_hw_events
{
36 struct perf_event
*event
[MAX_HWEVENTS
];
37 u64 events
[MAX_HWEVENTS
];
38 unsigned int flags
[MAX_HWEVENTS
];
40 * The order of the MMCR array is:
41 * - 64-bit, MMCR0, MMCR1, MMCRA, MMCR2
42 * - 32-bit, MMCR0, MMCR1, MMCR2
44 unsigned long mmcr
[4];
45 struct perf_event
*limited_counter
[MAX_LIMITED_HWCOUNTERS
];
46 u8 limited_hwidx
[MAX_LIMITED_HWCOUNTERS
];
47 u64 alternatives
[MAX_HWEVENTS
][MAX_EVENT_ALTERNATIVES
];
48 unsigned long amasks
[MAX_HWEVENTS
][MAX_EVENT_ALTERNATIVES
];
49 unsigned long avalues
[MAX_HWEVENTS
][MAX_EVENT_ALTERNATIVES
];
51 unsigned int txn_flags
;
55 u64 bhrb_filter
; /* BHRB HW branch filter */
56 unsigned int bhrb_users
;
58 struct perf_branch_stack bhrb_stack
;
59 struct perf_branch_entry bhrb_entries
[BHRB_MAX_ENTRIES
];
63 static DEFINE_PER_CPU(struct cpu_hw_events
, cpu_hw_events
);
65 static struct power_pmu
*ppmu
;
68 * Normally, to ignore kernel events we set the FCS (freeze counters
69 * in supervisor mode) bit in MMCR0, but if the kernel runs with the
70 * hypervisor bit set in the MSR, or if we are running on a processor
71 * where the hypervisor bit is forced to 1 (as on Apple G5 processors),
72 * then we need to use the FCHV bit to ignore kernel events.
74 static unsigned int freeze_events_kernel
= MMCR0_FCS
;
77 * 32-bit doesn't have MMCRA but does have an MMCR2,
78 * and a few other names are different.
83 #define MMCR0_PMCjCE MMCR0_PMCnCE
89 #define MMCR0_PMCC_U6 0
91 #define SPRN_MMCRA SPRN_MMCR2
92 #define MMCRA_SAMPLE_ENABLE 0
94 static inline unsigned long perf_ip_adjust(struct pt_regs
*regs
)
98 static inline void perf_get_data_addr(struct pt_regs
*regs
, u64
*addrp
) { }
99 static inline u32
perf_get_misc_flags(struct pt_regs
*regs
)
103 static inline void perf_read_regs(struct pt_regs
*regs
)
107 static inline int perf_intr_is_nmi(struct pt_regs
*regs
)
112 static inline int siar_valid(struct pt_regs
*regs
)
117 static bool is_ebb_event(struct perf_event
*event
) { return false; }
118 static int ebb_event_check(struct perf_event
*event
) { return 0; }
119 static void ebb_event_add(struct perf_event
*event
) { }
120 static void ebb_switch_out(unsigned long mmcr0
) { }
121 static unsigned long ebb_switch_in(bool ebb
, struct cpu_hw_events
*cpuhw
)
123 return cpuhw
->mmcr
[0];
126 static inline void power_pmu_bhrb_enable(struct perf_event
*event
) {}
127 static inline void power_pmu_bhrb_disable(struct perf_event
*event
) {}
128 static void power_pmu_sched_task(struct perf_event_context
*ctx
, bool sched_in
) {}
129 static inline void power_pmu_bhrb_read(struct cpu_hw_events
*cpuhw
) {}
130 static void pmao_restore_workaround(bool ebb
) { }
131 #endif /* CONFIG_PPC32 */
133 static bool regs_use_siar(struct pt_regs
*regs
)
136 * When we take a performance monitor exception the regs are setup
137 * using perf_read_regs() which overloads some fields, in particular
138 * regs->result to tell us whether to use SIAR.
140 * However if the regs are from another exception, eg. a syscall, then
141 * they have not been setup using perf_read_regs() and so regs->result
142 * is something random.
144 return ((TRAP(regs
) == 0xf00) && regs
->result
);
148 * Things that are specific to 64-bit implementations.
152 static inline unsigned long perf_ip_adjust(struct pt_regs
*regs
)
154 unsigned long mmcra
= regs
->dsisr
;
156 if ((ppmu
->flags
& PPMU_HAS_SSLOT
) && (mmcra
& MMCRA_SAMPLE_ENABLE
)) {
157 unsigned long slot
= (mmcra
& MMCRA_SLOT
) >> MMCRA_SLOT_SHIFT
;
159 return 4 * (slot
- 1);
166 * The user wants a data address recorded.
167 * If we're not doing instruction sampling, give them the SDAR
168 * (sampled data address). If we are doing instruction sampling, then
169 * only give them the SDAR if it corresponds to the instruction
170 * pointed to by SIAR; this is indicated by the [POWER6_]MMCRA_SDSYNC, the
171 * [POWER7P_]MMCRA_SDAR_VALID bit in MMCRA, or the SDAR_VALID bit in SIER.
173 static inline void perf_get_data_addr(struct pt_regs
*regs
, u64
*addrp
)
175 unsigned long mmcra
= regs
->dsisr
;
178 if (ppmu
->flags
& PPMU_HAS_SIER
)
179 sdar_valid
= regs
->dar
& SIER_SDAR_VALID
;
181 unsigned long sdsync
;
183 if (ppmu
->flags
& PPMU_SIAR_VALID
)
184 sdsync
= POWER7P_MMCRA_SDAR_VALID
;
185 else if (ppmu
->flags
& PPMU_ALT_SIPR
)
186 sdsync
= POWER6_MMCRA_SDSYNC
;
187 else if (ppmu
->flags
& PPMU_NO_SIAR
)
188 sdsync
= MMCRA_SAMPLE_ENABLE
;
190 sdsync
= MMCRA_SDSYNC
;
192 sdar_valid
= mmcra
& sdsync
;
195 if (!(mmcra
& MMCRA_SAMPLE_ENABLE
) || sdar_valid
)
196 *addrp
= mfspr(SPRN_SDAR
);
198 if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN
) &&
199 is_kernel_addr(mfspr(SPRN_SDAR
)))
203 static bool regs_sihv(struct pt_regs
*regs
)
205 unsigned long sihv
= MMCRA_SIHV
;
207 if (ppmu
->flags
& PPMU_HAS_SIER
)
208 return !!(regs
->dar
& SIER_SIHV
);
210 if (ppmu
->flags
& PPMU_ALT_SIPR
)
211 sihv
= POWER6_MMCRA_SIHV
;
213 return !!(regs
->dsisr
& sihv
);
216 static bool regs_sipr(struct pt_regs
*regs
)
218 unsigned long sipr
= MMCRA_SIPR
;
220 if (ppmu
->flags
& PPMU_HAS_SIER
)
221 return !!(regs
->dar
& SIER_SIPR
);
223 if (ppmu
->flags
& PPMU_ALT_SIPR
)
224 sipr
= POWER6_MMCRA_SIPR
;
226 return !!(regs
->dsisr
& sipr
);
229 static inline u32
perf_flags_from_msr(struct pt_regs
*regs
)
231 if (regs
->msr
& MSR_PR
)
232 return PERF_RECORD_MISC_USER
;
233 if ((regs
->msr
& MSR_HV
) && freeze_events_kernel
!= MMCR0_FCHV
)
234 return PERF_RECORD_MISC_HYPERVISOR
;
235 return PERF_RECORD_MISC_KERNEL
;
238 static inline u32
perf_get_misc_flags(struct pt_regs
*regs
)
240 bool use_siar
= regs_use_siar(regs
);
243 return perf_flags_from_msr(regs
);
246 * If we don't have flags in MMCRA, rather than using
247 * the MSR, we intuit the flags from the address in
248 * SIAR which should give slightly more reliable
251 if (ppmu
->flags
& PPMU_NO_SIPR
) {
252 unsigned long siar
= mfspr(SPRN_SIAR
);
253 if (is_kernel_addr(siar
))
254 return PERF_RECORD_MISC_KERNEL
;
255 return PERF_RECORD_MISC_USER
;
258 /* PR has priority over HV, so order below is important */
260 return PERF_RECORD_MISC_USER
;
262 if (regs_sihv(regs
) && (freeze_events_kernel
!= MMCR0_FCHV
))
263 return PERF_RECORD_MISC_HYPERVISOR
;
265 return PERF_RECORD_MISC_KERNEL
;
269 * Overload regs->dsisr to store MMCRA so we only need to read it once
271 * Overload regs->dar to store SIER if we have it.
272 * Overload regs->result to specify whether we should use the MSR (result
273 * is zero) or the SIAR (result is non zero).
275 static inline void perf_read_regs(struct pt_regs
*regs
)
277 unsigned long mmcra
= mfspr(SPRN_MMCRA
);
278 int marked
= mmcra
& MMCRA_SAMPLE_ENABLE
;
283 if (ppmu
->flags
& PPMU_HAS_SIER
)
284 regs
->dar
= mfspr(SPRN_SIER
);
287 * If this isn't a PMU exception (eg a software event) the SIAR is
288 * not valid. Use pt_regs.
290 * If it is a marked event use the SIAR.
292 * If the PMU doesn't update the SIAR for non marked events use
295 * If the PMU has HV/PR flags then check to see if they
296 * place the exception in userspace. If so, use pt_regs. In
297 * continuous sampling mode the SIAR and the PMU exception are
298 * not synchronised, so they may be many instructions apart.
299 * This can result in confusing backtraces. We still want
300 * hypervisor samples as well as samples in the kernel with
301 * interrupts off hence the userspace check.
303 if (TRAP(regs
) != 0xf00)
305 else if ((ppmu
->flags
& PPMU_NO_SIAR
))
309 else if ((ppmu
->flags
& PPMU_NO_CONT_SAMPLING
))
311 else if (!(ppmu
->flags
& PPMU_NO_SIPR
) && regs_sipr(regs
))
316 regs
->result
= use_siar
;
320 * If interrupts were soft-disabled when a PMU interrupt occurs, treat
323 static inline int perf_intr_is_nmi(struct pt_regs
*regs
)
325 return (regs
->softe
& IRQS_DISABLED
);
329 * On processors like P7+ that have the SIAR-Valid bit, marked instructions
330 * must be sampled only if the SIAR-valid bit is set.
332 * For unmarked instructions and for processors that don't have the SIAR-Valid
333 * bit, assume that SIAR is valid.
335 static inline int siar_valid(struct pt_regs
*regs
)
337 unsigned long mmcra
= regs
->dsisr
;
338 int marked
= mmcra
& MMCRA_SAMPLE_ENABLE
;
341 if (ppmu
->flags
& PPMU_HAS_SIER
)
342 return regs
->dar
& SIER_SIAR_VALID
;
344 if (ppmu
->flags
& PPMU_SIAR_VALID
)
345 return mmcra
& POWER7P_MMCRA_SIAR_VALID
;
352 /* Reset all possible BHRB entries */
353 static void power_pmu_bhrb_reset(void)
355 asm volatile(PPC_CLRBHRB
);
358 static void power_pmu_bhrb_enable(struct perf_event
*event
)
360 struct cpu_hw_events
*cpuhw
= this_cpu_ptr(&cpu_hw_events
);
365 /* Clear BHRB if we changed task context to avoid data leaks */
366 if (event
->ctx
->task
&& cpuhw
->bhrb_context
!= event
->ctx
) {
367 power_pmu_bhrb_reset();
368 cpuhw
->bhrb_context
= event
->ctx
;
371 perf_sched_cb_inc(event
->ctx
->pmu
);
374 static void power_pmu_bhrb_disable(struct perf_event
*event
)
376 struct cpu_hw_events
*cpuhw
= this_cpu_ptr(&cpu_hw_events
);
381 WARN_ON_ONCE(!cpuhw
->bhrb_users
);
383 perf_sched_cb_dec(event
->ctx
->pmu
);
385 if (!cpuhw
->disabled
&& !cpuhw
->bhrb_users
) {
386 /* BHRB cannot be turned off when other
387 * events are active on the PMU.
390 /* avoid stale pointer */
391 cpuhw
->bhrb_context
= NULL
;
395 /* Called from ctxsw to prevent one process's branch entries to
396 * mingle with the other process's entries during context switch.
398 static void power_pmu_sched_task(struct perf_event_context
*ctx
, bool sched_in
)
404 power_pmu_bhrb_reset();
406 /* Calculate the to address for a branch */
407 static __u64
power_pmu_bhrb_to(u64 addr
)
413 if (is_kernel_addr(addr
)) {
414 if (probe_kernel_read(&instr
, (void *)addr
, sizeof(instr
)))
417 return branch_target(&instr
);
420 /* Userspace: need copy instruction here then translate it */
422 ret
= __get_user_inatomic(instr
, (unsigned int __user
*)addr
);
429 target
= branch_target(&instr
);
430 if ((!target
) || (instr
& BRANCH_ABSOLUTE
))
433 /* Translate relative branch target from kernel to user address */
434 return target
- (unsigned long)&instr
+ addr
;
437 /* Processing BHRB entries */
438 static void power_pmu_bhrb_read(struct cpu_hw_events
*cpuhw
)
442 int r_index
, u_index
, pred
;
446 while (r_index
< ppmu
->bhrb_nr
) {
447 /* Assembly read function */
448 val
= read_bhrb(r_index
++);
450 /* Terminal marker: End of valid BHRB entries */
453 addr
= val
& BHRB_EA
;
454 pred
= val
& BHRB_PREDICTION
;
461 * BHRB rolling buffer could very much contain the kernel
462 * addresses at this point. Check the privileges before
463 * exporting it to userspace (avoid exposure of regions
464 * where we could have speculative execution)
466 if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN
) &&
467 is_kernel_addr(addr
))
470 /* Branches are read most recent first (ie. mfbhrb 0 is
471 * the most recent branch).
472 * There are two types of valid entries:
473 * 1) a target entry which is the to address of a
474 * computed goto like a blr,bctr,btar. The next
475 * entry read from the bhrb will be branch
476 * corresponding to this target (ie. the actual
477 * blr/bctr/btar instruction).
478 * 2) a from address which is an actual branch. If a
479 * target entry proceeds this, then this is the
480 * matching branch for that target. If this is not
481 * following a target entry, then this is a branch
482 * where the target is given as an immediate field
483 * in the instruction (ie. an i or b form branch).
484 * In this case we need to read the instruction from
485 * memory to determine the target/to address.
488 if (val
& BHRB_TARGET
) {
489 /* Target branches use two entries
490 * (ie. computed gotos/XL form)
492 cpuhw
->bhrb_entries
[u_index
].to
= addr
;
493 cpuhw
->bhrb_entries
[u_index
].mispred
= pred
;
494 cpuhw
->bhrb_entries
[u_index
].predicted
= ~pred
;
496 /* Get from address in next entry */
497 val
= read_bhrb(r_index
++);
498 addr
= val
& BHRB_EA
;
499 if (val
& BHRB_TARGET
) {
500 /* Shouldn't have two targets in a
501 row.. Reset index and try again */
505 cpuhw
->bhrb_entries
[u_index
].from
= addr
;
507 /* Branches to immediate field
509 cpuhw
->bhrb_entries
[u_index
].from
= addr
;
510 cpuhw
->bhrb_entries
[u_index
].to
=
511 power_pmu_bhrb_to(addr
);
512 cpuhw
->bhrb_entries
[u_index
].mispred
= pred
;
513 cpuhw
->bhrb_entries
[u_index
].predicted
= ~pred
;
519 cpuhw
->bhrb_stack
.nr
= u_index
;
523 static bool is_ebb_event(struct perf_event
*event
)
526 * This could be a per-PMU callback, but we'd rather avoid the cost. We
527 * check that the PMU supports EBB, meaning those that don't can still
528 * use bit 63 of the event code for something else if they wish.
530 return (ppmu
->flags
& PPMU_ARCH_207S
) &&
531 ((event
->attr
.config
>> PERF_EVENT_CONFIG_EBB_SHIFT
) & 1);
534 static int ebb_event_check(struct perf_event
*event
)
536 struct perf_event
*leader
= event
->group_leader
;
538 /* Event and group leader must agree on EBB */
539 if (is_ebb_event(leader
) != is_ebb_event(event
))
542 if (is_ebb_event(event
)) {
543 if (!(event
->attach_state
& PERF_ATTACH_TASK
))
546 if (!leader
->attr
.pinned
|| !leader
->attr
.exclusive
)
549 if (event
->attr
.freq
||
550 event
->attr
.inherit
||
551 event
->attr
.sample_type
||
552 event
->attr
.sample_period
||
553 event
->attr
.enable_on_exec
)
560 static void ebb_event_add(struct perf_event
*event
)
562 if (!is_ebb_event(event
) || current
->thread
.used_ebb
)
566 * IFF this is the first time we've added an EBB event, set
567 * PMXE in the user MMCR0 so we can detect when it's cleared by
568 * userspace. We need this so that we can context switch while
569 * userspace is in the EBB handler (where PMXE is 0).
571 current
->thread
.used_ebb
= 1;
572 current
->thread
.mmcr0
|= MMCR0_PMXE
;
575 static void ebb_switch_out(unsigned long mmcr0
)
577 if (!(mmcr0
& MMCR0_EBE
))
580 current
->thread
.siar
= mfspr(SPRN_SIAR
);
581 current
->thread
.sier
= mfspr(SPRN_SIER
);
582 current
->thread
.sdar
= mfspr(SPRN_SDAR
);
583 current
->thread
.mmcr0
= mmcr0
& MMCR0_USER_MASK
;
584 current
->thread
.mmcr2
= mfspr(SPRN_MMCR2
) & MMCR2_USER_MASK
;
587 static unsigned long ebb_switch_in(bool ebb
, struct cpu_hw_events
*cpuhw
)
589 unsigned long mmcr0
= cpuhw
->mmcr
[0];
594 /* Enable EBB and read/write to all 6 PMCs and BHRB for userspace */
595 mmcr0
|= MMCR0_EBE
| MMCR0_BHRBA
| MMCR0_PMCC_U6
;
598 * Add any bits from the user MMCR0, FC or PMAO. This is compatible
599 * with pmao_restore_workaround() because we may add PMAO but we never
602 mmcr0
|= current
->thread
.mmcr0
;
605 * Be careful not to set PMXE if userspace had it cleared. This is also
606 * compatible with pmao_restore_workaround() because it has already
607 * cleared PMXE and we leave PMAO alone.
609 if (!(current
->thread
.mmcr0
& MMCR0_PMXE
))
610 mmcr0
&= ~MMCR0_PMXE
;
612 mtspr(SPRN_SIAR
, current
->thread
.siar
);
613 mtspr(SPRN_SIER
, current
->thread
.sier
);
614 mtspr(SPRN_SDAR
, current
->thread
.sdar
);
617 * Merge the kernel & user values of MMCR2. The semantics we implement
618 * are that the user MMCR2 can set bits, ie. cause counters to freeze,
619 * but not clear bits. If a task wants to be able to clear bits, ie.
620 * unfreeze counters, it should not set exclude_xxx in its events and
621 * instead manage the MMCR2 entirely by itself.
623 mtspr(SPRN_MMCR2
, cpuhw
->mmcr
[3] | current
->thread
.mmcr2
);
628 static void pmao_restore_workaround(bool ebb
)
632 if (!cpu_has_feature(CPU_FTR_PMAO_BUG
))
636 * On POWER8E there is a hardware defect which affects the PMU context
637 * switch logic, ie. power_pmu_disable/enable().
639 * When a counter overflows PMXE is cleared and FC/PMAO is set in MMCR0
640 * by the hardware. Sometime later the actual PMU exception is
643 * If we context switch, or simply disable/enable, the PMU prior to the
644 * exception arriving, the exception will be lost when we clear PMAO.
646 * When we reenable the PMU, we will write the saved MMCR0 with PMAO
647 * set, and this _should_ generate an exception. However because of the
648 * defect no exception is generated when we write PMAO, and we get
649 * stuck with no counters counting but no exception delivered.
651 * The workaround is to detect this case and tweak the hardware to
652 * create another pending PMU exception.
654 * We do that by setting up PMC6 (cycles) for an imminent overflow and
655 * enabling the PMU. That causes a new exception to be generated in the
656 * chip, but we don't take it yet because we have interrupts hard
657 * disabled. We then write back the PMU state as we want it to be seen
658 * by the exception handler. When we reenable interrupts the exception
659 * handler will be called and see the correct state.
661 * The logic is the same for EBB, except that the exception is gated by
662 * us having interrupts hard disabled as well as the fact that we are
663 * not in userspace. The exception is finally delivered when we return
667 /* Only if PMAO is set and PMAO_SYNC is clear */
668 if ((current
->thread
.mmcr0
& (MMCR0_PMAO
| MMCR0_PMAO_SYNC
)) != MMCR0_PMAO
)
671 /* If we're doing EBB, only if BESCR[GE] is set */
672 if (ebb
&& !(current
->thread
.bescr
& BESCR_GE
))
676 * We are already soft-disabled in power_pmu_enable(). We need to hard
677 * disable to actually prevent the PMU exception from firing.
682 * This is a bit gross, but we know we're on POWER8E and have 6 PMCs.
683 * Using read/write_pmc() in a for loop adds 12 function calls and
684 * almost doubles our code size.
686 pmcs
[0] = mfspr(SPRN_PMC1
);
687 pmcs
[1] = mfspr(SPRN_PMC2
);
688 pmcs
[2] = mfspr(SPRN_PMC3
);
689 pmcs
[3] = mfspr(SPRN_PMC4
);
690 pmcs
[4] = mfspr(SPRN_PMC5
);
691 pmcs
[5] = mfspr(SPRN_PMC6
);
693 /* Ensure all freeze bits are unset */
694 mtspr(SPRN_MMCR2
, 0);
696 /* Set up PMC6 to overflow in one cycle */
697 mtspr(SPRN_PMC6
, 0x7FFFFFFE);
699 /* Enable exceptions and unfreeze PMC6 */
700 mtspr(SPRN_MMCR0
, MMCR0_PMXE
| MMCR0_PMCjCE
| MMCR0_PMAO
);
702 /* Now we need to refreeze and restore the PMCs */
703 mtspr(SPRN_MMCR0
, MMCR0_FC
| MMCR0_PMAO
);
705 mtspr(SPRN_PMC1
, pmcs
[0]);
706 mtspr(SPRN_PMC2
, pmcs
[1]);
707 mtspr(SPRN_PMC3
, pmcs
[2]);
708 mtspr(SPRN_PMC4
, pmcs
[3]);
709 mtspr(SPRN_PMC5
, pmcs
[4]);
710 mtspr(SPRN_PMC6
, pmcs
[5]);
713 #endif /* CONFIG_PPC64 */
715 static void perf_event_interrupt(struct pt_regs
*regs
);
718 * Read one performance monitor counter (PMC).
720 static unsigned long read_pmc(int idx
)
726 val
= mfspr(SPRN_PMC1
);
729 val
= mfspr(SPRN_PMC2
);
732 val
= mfspr(SPRN_PMC3
);
735 val
= mfspr(SPRN_PMC4
);
738 val
= mfspr(SPRN_PMC5
);
741 val
= mfspr(SPRN_PMC6
);
745 val
= mfspr(SPRN_PMC7
);
748 val
= mfspr(SPRN_PMC8
);
750 #endif /* CONFIG_PPC64 */
752 printk(KERN_ERR
"oops trying to read PMC%d\n", idx
);
761 static void write_pmc(int idx
, unsigned long val
)
765 mtspr(SPRN_PMC1
, val
);
768 mtspr(SPRN_PMC2
, val
);
771 mtspr(SPRN_PMC3
, val
);
774 mtspr(SPRN_PMC4
, val
);
777 mtspr(SPRN_PMC5
, val
);
780 mtspr(SPRN_PMC6
, val
);
784 mtspr(SPRN_PMC7
, val
);
787 mtspr(SPRN_PMC8
, val
);
789 #endif /* CONFIG_PPC64 */
791 printk(KERN_ERR
"oops trying to write PMC%d\n", idx
);
795 /* Called from sysrq_handle_showregs() */
796 void perf_event_print_debug(void)
798 unsigned long sdar
, sier
, flags
;
799 u32 pmcs
[MAX_HWEVENTS
];
803 pr_info("Performance monitor hardware not registered.\n");
807 if (!ppmu
->n_counter
)
810 local_irq_save(flags
);
812 pr_info("CPU: %d PMU registers, ppmu = %s n_counters = %d",
813 smp_processor_id(), ppmu
->name
, ppmu
->n_counter
);
815 for (i
= 0; i
< ppmu
->n_counter
; i
++)
816 pmcs
[i
] = read_pmc(i
+ 1);
818 for (; i
< MAX_HWEVENTS
; i
++)
819 pmcs
[i
] = 0xdeadbeef;
821 pr_info("PMC1: %08x PMC2: %08x PMC3: %08x PMC4: %08x\n",
822 pmcs
[0], pmcs
[1], pmcs
[2], pmcs
[3]);
824 if (ppmu
->n_counter
> 4)
825 pr_info("PMC5: %08x PMC6: %08x PMC7: %08x PMC8: %08x\n",
826 pmcs
[4], pmcs
[5], pmcs
[6], pmcs
[7]);
828 pr_info("MMCR0: %016lx MMCR1: %016lx MMCRA: %016lx\n",
829 mfspr(SPRN_MMCR0
), mfspr(SPRN_MMCR1
), mfspr(SPRN_MMCRA
));
833 sdar
= mfspr(SPRN_SDAR
);
835 if (ppmu
->flags
& PPMU_HAS_SIER
)
836 sier
= mfspr(SPRN_SIER
);
838 if (ppmu
->flags
& PPMU_ARCH_207S
) {
839 pr_info("MMCR2: %016lx EBBHR: %016lx\n",
840 mfspr(SPRN_MMCR2
), mfspr(SPRN_EBBHR
));
841 pr_info("EBBRR: %016lx BESCR: %016lx\n",
842 mfspr(SPRN_EBBRR
), mfspr(SPRN_BESCR
));
845 pr_info("SIAR: %016lx SDAR: %016lx SIER: %016lx\n",
846 mfspr(SPRN_SIAR
), sdar
, sier
);
848 local_irq_restore(flags
);
852 * Check if a set of events can all go on the PMU at once.
853 * If they can't, this will look at alternative codes for the events
854 * and see if any combination of alternative codes is feasible.
855 * The feasible set is returned in event_id[].
857 static int power_check_constraints(struct cpu_hw_events
*cpuhw
,
858 u64 event_id
[], unsigned int cflags
[],
861 unsigned long mask
, value
, nv
;
862 unsigned long smasks
[MAX_HWEVENTS
], svalues
[MAX_HWEVENTS
];
863 int n_alt
[MAX_HWEVENTS
], choice
[MAX_HWEVENTS
];
865 unsigned long addf
= ppmu
->add_fields
;
866 unsigned long tadd
= ppmu
->test_adder
;
868 if (n_ev
> ppmu
->n_counter
)
871 /* First see if the events will go on as-is */
872 for (i
= 0; i
< n_ev
; ++i
) {
873 if ((cflags
[i
] & PPMU_LIMITED_PMC_REQD
)
874 && !ppmu
->limited_pmc_event(event_id
[i
])) {
875 ppmu
->get_alternatives(event_id
[i
], cflags
[i
],
876 cpuhw
->alternatives
[i
]);
877 event_id
[i
] = cpuhw
->alternatives
[i
][0];
879 if (ppmu
->get_constraint(event_id
[i
], &cpuhw
->amasks
[i
][0],
880 &cpuhw
->avalues
[i
][0]))
884 for (i
= 0; i
< n_ev
; ++i
) {
885 nv
= (value
| cpuhw
->avalues
[i
][0]) +
886 (value
& cpuhw
->avalues
[i
][0] & addf
);
887 if ((((nv
+ tadd
) ^ value
) & mask
) != 0 ||
888 (((nv
+ tadd
) ^ cpuhw
->avalues
[i
][0]) &
889 cpuhw
->amasks
[i
][0]) != 0)
892 mask
|= cpuhw
->amasks
[i
][0];
895 return 0; /* all OK */
897 /* doesn't work, gather alternatives... */
898 if (!ppmu
->get_alternatives
)
900 for (i
= 0; i
< n_ev
; ++i
) {
902 n_alt
[i
] = ppmu
->get_alternatives(event_id
[i
], cflags
[i
],
903 cpuhw
->alternatives
[i
]);
904 for (j
= 1; j
< n_alt
[i
]; ++j
)
905 ppmu
->get_constraint(cpuhw
->alternatives
[i
][j
],
906 &cpuhw
->amasks
[i
][j
],
907 &cpuhw
->avalues
[i
][j
]);
910 /* enumerate all possibilities and see if any will work */
913 value
= mask
= nv
= 0;
916 /* we're backtracking, restore context */
922 * See if any alternative k for event_id i,
923 * where k > j, will satisfy the constraints.
925 while (++j
< n_alt
[i
]) {
926 nv
= (value
| cpuhw
->avalues
[i
][j
]) +
927 (value
& cpuhw
->avalues
[i
][j
] & addf
);
928 if ((((nv
+ tadd
) ^ value
) & mask
) == 0 &&
929 (((nv
+ tadd
) ^ cpuhw
->avalues
[i
][j
])
930 & cpuhw
->amasks
[i
][j
]) == 0)
935 * No feasible alternative, backtrack
936 * to event_id i-1 and continue enumerating its
937 * alternatives from where we got up to.
943 * Found a feasible alternative for event_id i,
944 * remember where we got up to with this event_id,
945 * go on to the next event_id, and start with
946 * the first alternative for it.
952 mask
|= cpuhw
->amasks
[i
][j
];
958 /* OK, we have a feasible combination, tell the caller the solution */
959 for (i
= 0; i
< n_ev
; ++i
)
960 event_id
[i
] = cpuhw
->alternatives
[i
][choice
[i
]];
965 * Check if newly-added events have consistent settings for
966 * exclude_{user,kernel,hv} with each other and any previously
969 static int check_excludes(struct perf_event
**ctrs
, unsigned int cflags
[],
970 int n_prev
, int n_new
)
972 int eu
= 0, ek
= 0, eh
= 0;
974 struct perf_event
*event
;
977 * If the PMU we're on supports per event exclude settings then we
978 * don't need to do any of this logic. NB. This assumes no PMU has both
979 * per event exclude and limited PMCs.
981 if (ppmu
->flags
& PPMU_ARCH_207S
)
989 for (i
= 0; i
< n
; ++i
) {
990 if (cflags
[i
] & PPMU_LIMITED_PMC_OK
) {
991 cflags
[i
] &= ~PPMU_LIMITED_PMC_REQD
;
996 eu
= event
->attr
.exclude_user
;
997 ek
= event
->attr
.exclude_kernel
;
998 eh
= event
->attr
.exclude_hv
;
1000 } else if (event
->attr
.exclude_user
!= eu
||
1001 event
->attr
.exclude_kernel
!= ek
||
1002 event
->attr
.exclude_hv
!= eh
) {
1008 for (i
= 0; i
< n
; ++i
)
1009 if (cflags
[i
] & PPMU_LIMITED_PMC_OK
)
1010 cflags
[i
] |= PPMU_LIMITED_PMC_REQD
;
1015 static u64
check_and_compute_delta(u64 prev
, u64 val
)
1017 u64 delta
= (val
- prev
) & 0xfffffffful
;
1020 * POWER7 can roll back counter values, if the new value is smaller
1021 * than the previous value it will cause the delta and the counter to
1022 * have bogus values unless we rolled a counter over. If a coutner is
1023 * rolled back, it will be smaller, but within 256, which is the maximum
1024 * number of events to rollback at once. If we detect a rollback
1025 * return 0. This can lead to a small lack of precision in the
1028 if (prev
> val
&& (prev
- val
) < 256)
1034 static void power_pmu_read(struct perf_event
*event
)
1036 s64 val
, delta
, prev
;
1038 if (event
->hw
.state
& PERF_HES_STOPPED
)
1044 if (is_ebb_event(event
)) {
1045 val
= read_pmc(event
->hw
.idx
);
1046 local64_set(&event
->hw
.prev_count
, val
);
1051 * Performance monitor interrupts come even when interrupts
1052 * are soft-disabled, as long as interrupts are hard-enabled.
1053 * Therefore we treat them like NMIs.
1056 prev
= local64_read(&event
->hw
.prev_count
);
1058 val
= read_pmc(event
->hw
.idx
);
1059 delta
= check_and_compute_delta(prev
, val
);
1062 } while (local64_cmpxchg(&event
->hw
.prev_count
, prev
, val
) != prev
);
1064 local64_add(delta
, &event
->count
);
1067 * A number of places program the PMC with (0x80000000 - period_left).
1068 * We never want period_left to be less than 1 because we will program
1069 * the PMC with a value >= 0x800000000 and an edge detected PMC will
1070 * roll around to 0 before taking an exception. We have seen this
1073 * To fix this, clamp the minimum value of period_left to 1.
1076 prev
= local64_read(&event
->hw
.period_left
);
1080 } while (local64_cmpxchg(&event
->hw
.period_left
, prev
, val
) != prev
);
1084 * On some machines, PMC5 and PMC6 can't be written, don't respect
1085 * the freeze conditions, and don't generate interrupts. This tells
1086 * us if `event' is using such a PMC.
1088 static int is_limited_pmc(int pmcnum
)
1090 return (ppmu
->flags
& PPMU_LIMITED_PMC5_6
)
1091 && (pmcnum
== 5 || pmcnum
== 6);
1094 static void freeze_limited_counters(struct cpu_hw_events
*cpuhw
,
1095 unsigned long pmc5
, unsigned long pmc6
)
1097 struct perf_event
*event
;
1098 u64 val
, prev
, delta
;
1101 for (i
= 0; i
< cpuhw
->n_limited
; ++i
) {
1102 event
= cpuhw
->limited_counter
[i
];
1105 val
= (event
->hw
.idx
== 5) ? pmc5
: pmc6
;
1106 prev
= local64_read(&event
->hw
.prev_count
);
1108 delta
= check_and_compute_delta(prev
, val
);
1110 local64_add(delta
, &event
->count
);
1114 static void thaw_limited_counters(struct cpu_hw_events
*cpuhw
,
1115 unsigned long pmc5
, unsigned long pmc6
)
1117 struct perf_event
*event
;
1121 for (i
= 0; i
< cpuhw
->n_limited
; ++i
) {
1122 event
= cpuhw
->limited_counter
[i
];
1123 event
->hw
.idx
= cpuhw
->limited_hwidx
[i
];
1124 val
= (event
->hw
.idx
== 5) ? pmc5
: pmc6
;
1125 prev
= local64_read(&event
->hw
.prev_count
);
1126 if (check_and_compute_delta(prev
, val
))
1127 local64_set(&event
->hw
.prev_count
, val
);
1128 perf_event_update_userpage(event
);
1133 * Since limited events don't respect the freeze conditions, we
1134 * have to read them immediately after freezing or unfreezing the
1135 * other events. We try to keep the values from the limited
1136 * events as consistent as possible by keeping the delay (in
1137 * cycles and instructions) between freezing/unfreezing and reading
1138 * the limited events as small and consistent as possible.
1139 * Therefore, if any limited events are in use, we read them
1140 * both, and always in the same order, to minimize variability,
1141 * and do it inside the same asm that writes MMCR0.
1143 static void write_mmcr0(struct cpu_hw_events
*cpuhw
, unsigned long mmcr0
)
1145 unsigned long pmc5
, pmc6
;
1147 if (!cpuhw
->n_limited
) {
1148 mtspr(SPRN_MMCR0
, mmcr0
);
1153 * Write MMCR0, then read PMC5 and PMC6 immediately.
1154 * To ensure we don't get a performance monitor interrupt
1155 * between writing MMCR0 and freezing/thawing the limited
1156 * events, we first write MMCR0 with the event overflow
1157 * interrupt enable bits turned off.
1159 asm volatile("mtspr %3,%2; mfspr %0,%4; mfspr %1,%5"
1160 : "=&r" (pmc5
), "=&r" (pmc6
)
1161 : "r" (mmcr0
& ~(MMCR0_PMC1CE
| MMCR0_PMCjCE
)),
1163 "i" (SPRN_PMC5
), "i" (SPRN_PMC6
));
1165 if (mmcr0
& MMCR0_FC
)
1166 freeze_limited_counters(cpuhw
, pmc5
, pmc6
);
1168 thaw_limited_counters(cpuhw
, pmc5
, pmc6
);
1171 * Write the full MMCR0 including the event overflow interrupt
1172 * enable bits, if necessary.
1174 if (mmcr0
& (MMCR0_PMC1CE
| MMCR0_PMCjCE
))
1175 mtspr(SPRN_MMCR0
, mmcr0
);
1179 * Disable all events to prevent PMU interrupts and to allow
1180 * events to be added or removed.
1182 static void power_pmu_disable(struct pmu
*pmu
)
1184 struct cpu_hw_events
*cpuhw
;
1185 unsigned long flags
, mmcr0
, val
;
1189 local_irq_save(flags
);
1190 cpuhw
= this_cpu_ptr(&cpu_hw_events
);
1192 if (!cpuhw
->disabled
) {
1194 * Check if we ever enabled the PMU on this cpu.
1196 if (!cpuhw
->pmcs_enabled
) {
1198 cpuhw
->pmcs_enabled
= 1;
1202 * Set the 'freeze counters' bit, clear EBE/BHRBA/PMCC/PMAO/FC56
1204 val
= mmcr0
= mfspr(SPRN_MMCR0
);
1206 val
&= ~(MMCR0_EBE
| MMCR0_BHRBA
| MMCR0_PMCC
| MMCR0_PMAO
|
1210 * The barrier is to make sure the mtspr has been
1211 * executed and the PMU has frozen the events etc.
1214 write_mmcr0(cpuhw
, val
);
1219 * Disable instruction sampling if it was enabled
1221 if (cpuhw
->mmcr
[2] & MMCRA_SAMPLE_ENABLE
) {
1223 cpuhw
->mmcr
[2] & ~MMCRA_SAMPLE_ENABLE
);
1228 cpuhw
->disabled
= 1;
1231 ebb_switch_out(mmcr0
);
1235 * These are readable by userspace, may contain kernel
1236 * addresses and are not switched by context switch, so clear
1237 * them now to avoid leaking anything to userspace in general
1238 * including to another process.
1240 if (ppmu
->flags
& PPMU_ARCH_207S
) {
1241 mtspr(SPRN_SDAR
, 0);
1242 mtspr(SPRN_SIAR
, 0);
1247 local_irq_restore(flags
);
1251 * Re-enable all events if disable == 0.
1252 * If we were previously disabled and events were added, then
1253 * put the new config on the PMU.
1255 static void power_pmu_enable(struct pmu
*pmu
)
1257 struct perf_event
*event
;
1258 struct cpu_hw_events
*cpuhw
;
1259 unsigned long flags
;
1261 unsigned long val
, mmcr0
;
1263 unsigned int hwc_index
[MAX_HWEVENTS
];
1270 local_irq_save(flags
);
1272 cpuhw
= this_cpu_ptr(&cpu_hw_events
);
1273 if (!cpuhw
->disabled
)
1276 if (cpuhw
->n_events
== 0) {
1277 ppc_set_pmu_inuse(0);
1281 cpuhw
->disabled
= 0;
1284 * EBB requires an exclusive group and all events must have the EBB
1285 * flag set, or not set, so we can just check a single event. Also we
1286 * know we have at least one event.
1288 ebb
= is_ebb_event(cpuhw
->event
[0]);
1291 * If we didn't change anything, or only removed events,
1292 * no need to recalculate MMCR* settings and reset the PMCs.
1293 * Just reenable the PMU with the current MMCR* settings
1294 * (possibly updated for removal of events).
1296 if (!cpuhw
->n_added
) {
1297 mtspr(SPRN_MMCRA
, cpuhw
->mmcr
[2] & ~MMCRA_SAMPLE_ENABLE
);
1298 mtspr(SPRN_MMCR1
, cpuhw
->mmcr
[1]);
1303 * Clear all MMCR settings and recompute them for the new set of events.
1305 memset(cpuhw
->mmcr
, 0, sizeof(cpuhw
->mmcr
));
1307 if (ppmu
->compute_mmcr(cpuhw
->events
, cpuhw
->n_events
, hwc_index
,
1308 cpuhw
->mmcr
, cpuhw
->event
)) {
1309 /* shouldn't ever get here */
1310 printk(KERN_ERR
"oops compute_mmcr failed\n");
1314 if (!(ppmu
->flags
& PPMU_ARCH_207S
)) {
1316 * Add in MMCR0 freeze bits corresponding to the attr.exclude_*
1317 * bits for the first event. We have already checked that all
1318 * events have the same value for these bits as the first event.
1320 event
= cpuhw
->event
[0];
1321 if (event
->attr
.exclude_user
)
1322 cpuhw
->mmcr
[0] |= MMCR0_FCP
;
1323 if (event
->attr
.exclude_kernel
)
1324 cpuhw
->mmcr
[0] |= freeze_events_kernel
;
1325 if (event
->attr
.exclude_hv
)
1326 cpuhw
->mmcr
[0] |= MMCR0_FCHV
;
1330 * Write the new configuration to MMCR* with the freeze
1331 * bit set and set the hardware events to their initial values.
1332 * Then unfreeze the events.
1334 ppc_set_pmu_inuse(1);
1335 mtspr(SPRN_MMCRA
, cpuhw
->mmcr
[2] & ~MMCRA_SAMPLE_ENABLE
);
1336 mtspr(SPRN_MMCR1
, cpuhw
->mmcr
[1]);
1337 mtspr(SPRN_MMCR0
, (cpuhw
->mmcr
[0] & ~(MMCR0_PMC1CE
| MMCR0_PMCjCE
))
1339 if (ppmu
->flags
& PPMU_ARCH_207S
)
1340 mtspr(SPRN_MMCR2
, cpuhw
->mmcr
[3]);
1343 * Read off any pre-existing events that need to move
1346 for (i
= 0; i
< cpuhw
->n_events
; ++i
) {
1347 event
= cpuhw
->event
[i
];
1348 if (event
->hw
.idx
&& event
->hw
.idx
!= hwc_index
[i
] + 1) {
1349 power_pmu_read(event
);
1350 write_pmc(event
->hw
.idx
, 0);
1356 * Initialize the PMCs for all the new and moved events.
1358 cpuhw
->n_limited
= n_lim
= 0;
1359 for (i
= 0; i
< cpuhw
->n_events
; ++i
) {
1360 event
= cpuhw
->event
[i
];
1363 idx
= hwc_index
[i
] + 1;
1364 if (is_limited_pmc(idx
)) {
1365 cpuhw
->limited_counter
[n_lim
] = event
;
1366 cpuhw
->limited_hwidx
[n_lim
] = idx
;
1372 val
= local64_read(&event
->hw
.prev_count
);
1375 if (event
->hw
.sample_period
) {
1376 left
= local64_read(&event
->hw
.period_left
);
1377 if (left
< 0x80000000L
)
1378 val
= 0x80000000L
- left
;
1380 local64_set(&event
->hw
.prev_count
, val
);
1383 event
->hw
.idx
= idx
;
1384 if (event
->hw
.state
& PERF_HES_STOPPED
)
1386 write_pmc(idx
, val
);
1388 perf_event_update_userpage(event
);
1390 cpuhw
->n_limited
= n_lim
;
1391 cpuhw
->mmcr
[0] |= MMCR0_PMXE
| MMCR0_FCECE
;
1394 pmao_restore_workaround(ebb
);
1396 mmcr0
= ebb_switch_in(ebb
, cpuhw
);
1399 if (cpuhw
->bhrb_users
)
1400 ppmu
->config_bhrb(cpuhw
->bhrb_filter
);
1402 write_mmcr0(cpuhw
, mmcr0
);
1405 * Enable instruction sampling if necessary
1407 if (cpuhw
->mmcr
[2] & MMCRA_SAMPLE_ENABLE
) {
1409 mtspr(SPRN_MMCRA
, cpuhw
->mmcr
[2]);
1414 local_irq_restore(flags
);
1417 static int collect_events(struct perf_event
*group
, int max_count
,
1418 struct perf_event
*ctrs
[], u64
*events
,
1419 unsigned int *flags
)
1422 struct perf_event
*event
;
1424 if (group
->pmu
->task_ctx_nr
== perf_hw_context
) {
1428 flags
[n
] = group
->hw
.event_base
;
1429 events
[n
++] = group
->hw
.config
;
1431 for_each_sibling_event(event
, group
) {
1432 if (event
->pmu
->task_ctx_nr
== perf_hw_context
&&
1433 event
->state
!= PERF_EVENT_STATE_OFF
) {
1437 flags
[n
] = event
->hw
.event_base
;
1438 events
[n
++] = event
->hw
.config
;
1445 * Add an event to the PMU.
1446 * If all events are not already frozen, then we disable and
1447 * re-enable the PMU in order to get hw_perf_enable to do the
1448 * actual work of reconfiguring the PMU.
1450 static int power_pmu_add(struct perf_event
*event
, int ef_flags
)
1452 struct cpu_hw_events
*cpuhw
;
1453 unsigned long flags
;
1457 local_irq_save(flags
);
1458 perf_pmu_disable(event
->pmu
);
1461 * Add the event to the list (if there is room)
1462 * and check whether the total set is still feasible.
1464 cpuhw
= this_cpu_ptr(&cpu_hw_events
);
1465 n0
= cpuhw
->n_events
;
1466 if (n0
>= ppmu
->n_counter
)
1468 cpuhw
->event
[n0
] = event
;
1469 cpuhw
->events
[n0
] = event
->hw
.config
;
1470 cpuhw
->flags
[n0
] = event
->hw
.event_base
;
1473 * This event may have been disabled/stopped in record_and_restart()
1474 * because we exceeded the ->event_limit. If re-starting the event,
1475 * clear the ->hw.state (STOPPED and UPTODATE flags), so the user
1476 * notification is re-enabled.
1478 if (!(ef_flags
& PERF_EF_START
))
1479 event
->hw
.state
= PERF_HES_STOPPED
| PERF_HES_UPTODATE
;
1481 event
->hw
.state
= 0;
1484 * If group events scheduling transaction was started,
1485 * skip the schedulability test here, it will be performed
1486 * at commit time(->commit_txn) as a whole
1488 if (cpuhw
->txn_flags
& PERF_PMU_TXN_ADD
)
1491 if (check_excludes(cpuhw
->event
, cpuhw
->flags
, n0
, 1))
1493 if (power_check_constraints(cpuhw
, cpuhw
->events
, cpuhw
->flags
, n0
+ 1))
1495 event
->hw
.config
= cpuhw
->events
[n0
];
1498 ebb_event_add(event
);
1505 if (has_branch_stack(event
)) {
1506 power_pmu_bhrb_enable(event
);
1507 cpuhw
->bhrb_filter
= ppmu
->bhrb_filter_map(
1508 event
->attr
.branch_sample_type
);
1511 perf_pmu_enable(event
->pmu
);
1512 local_irq_restore(flags
);
1517 * Remove an event from the PMU.
1519 static void power_pmu_del(struct perf_event
*event
, int ef_flags
)
1521 struct cpu_hw_events
*cpuhw
;
1523 unsigned long flags
;
1525 local_irq_save(flags
);
1526 perf_pmu_disable(event
->pmu
);
1528 power_pmu_read(event
);
1530 cpuhw
= this_cpu_ptr(&cpu_hw_events
);
1531 for (i
= 0; i
< cpuhw
->n_events
; ++i
) {
1532 if (event
== cpuhw
->event
[i
]) {
1533 while (++i
< cpuhw
->n_events
) {
1534 cpuhw
->event
[i
-1] = cpuhw
->event
[i
];
1535 cpuhw
->events
[i
-1] = cpuhw
->events
[i
];
1536 cpuhw
->flags
[i
-1] = cpuhw
->flags
[i
];
1539 ppmu
->disable_pmc(event
->hw
.idx
- 1, cpuhw
->mmcr
);
1540 if (event
->hw
.idx
) {
1541 write_pmc(event
->hw
.idx
, 0);
1544 perf_event_update_userpage(event
);
1548 for (i
= 0; i
< cpuhw
->n_limited
; ++i
)
1549 if (event
== cpuhw
->limited_counter
[i
])
1551 if (i
< cpuhw
->n_limited
) {
1552 while (++i
< cpuhw
->n_limited
) {
1553 cpuhw
->limited_counter
[i
-1] = cpuhw
->limited_counter
[i
];
1554 cpuhw
->limited_hwidx
[i
-1] = cpuhw
->limited_hwidx
[i
];
1558 if (cpuhw
->n_events
== 0) {
1559 /* disable exceptions if no events are running */
1560 cpuhw
->mmcr
[0] &= ~(MMCR0_PMXE
| MMCR0_FCECE
);
1563 if (has_branch_stack(event
))
1564 power_pmu_bhrb_disable(event
);
1566 perf_pmu_enable(event
->pmu
);
1567 local_irq_restore(flags
);
1571 * POWER-PMU does not support disabling individual counters, hence
1572 * program their cycle counter to their max value and ignore the interrupts.
1575 static void power_pmu_start(struct perf_event
*event
, int ef_flags
)
1577 unsigned long flags
;
1581 if (!event
->hw
.idx
|| !event
->hw
.sample_period
)
1584 if (!(event
->hw
.state
& PERF_HES_STOPPED
))
1587 if (ef_flags
& PERF_EF_RELOAD
)
1588 WARN_ON_ONCE(!(event
->hw
.state
& PERF_HES_UPTODATE
));
1590 local_irq_save(flags
);
1591 perf_pmu_disable(event
->pmu
);
1593 event
->hw
.state
= 0;
1594 left
= local64_read(&event
->hw
.period_left
);
1597 if (left
< 0x80000000L
)
1598 val
= 0x80000000L
- left
;
1600 write_pmc(event
->hw
.idx
, val
);
1602 perf_event_update_userpage(event
);
1603 perf_pmu_enable(event
->pmu
);
1604 local_irq_restore(flags
);
1607 static void power_pmu_stop(struct perf_event
*event
, int ef_flags
)
1609 unsigned long flags
;
1611 if (!event
->hw
.idx
|| !event
->hw
.sample_period
)
1614 if (event
->hw
.state
& PERF_HES_STOPPED
)
1617 local_irq_save(flags
);
1618 perf_pmu_disable(event
->pmu
);
1620 power_pmu_read(event
);
1621 event
->hw
.state
|= PERF_HES_STOPPED
| PERF_HES_UPTODATE
;
1622 write_pmc(event
->hw
.idx
, 0);
1624 perf_event_update_userpage(event
);
1625 perf_pmu_enable(event
->pmu
);
1626 local_irq_restore(flags
);
1630 * Start group events scheduling transaction
1631 * Set the flag to make pmu::enable() not perform the
1632 * schedulability test, it will be performed at commit time
1634 * We only support PERF_PMU_TXN_ADD transactions. Save the
1635 * transaction flags but otherwise ignore non-PERF_PMU_TXN_ADD
1638 static void power_pmu_start_txn(struct pmu
*pmu
, unsigned int txn_flags
)
1640 struct cpu_hw_events
*cpuhw
= this_cpu_ptr(&cpu_hw_events
);
1642 WARN_ON_ONCE(cpuhw
->txn_flags
); /* txn already in flight */
1644 cpuhw
->txn_flags
= txn_flags
;
1645 if (txn_flags
& ~PERF_PMU_TXN_ADD
)
1648 perf_pmu_disable(pmu
);
1649 cpuhw
->n_txn_start
= cpuhw
->n_events
;
1653 * Stop group events scheduling transaction
1654 * Clear the flag and pmu::enable() will perform the
1655 * schedulability test.
1657 static void power_pmu_cancel_txn(struct pmu
*pmu
)
1659 struct cpu_hw_events
*cpuhw
= this_cpu_ptr(&cpu_hw_events
);
1660 unsigned int txn_flags
;
1662 WARN_ON_ONCE(!cpuhw
->txn_flags
); /* no txn in flight */
1664 txn_flags
= cpuhw
->txn_flags
;
1665 cpuhw
->txn_flags
= 0;
1666 if (txn_flags
& ~PERF_PMU_TXN_ADD
)
1669 perf_pmu_enable(pmu
);
1673 * Commit group events scheduling transaction
1674 * Perform the group schedulability test as a whole
1675 * Return 0 if success
1677 static int power_pmu_commit_txn(struct pmu
*pmu
)
1679 struct cpu_hw_events
*cpuhw
;
1685 cpuhw
= this_cpu_ptr(&cpu_hw_events
);
1686 WARN_ON_ONCE(!cpuhw
->txn_flags
); /* no txn in flight */
1688 if (cpuhw
->txn_flags
& ~PERF_PMU_TXN_ADD
) {
1689 cpuhw
->txn_flags
= 0;
1693 n
= cpuhw
->n_events
;
1694 if (check_excludes(cpuhw
->event
, cpuhw
->flags
, 0, n
))
1696 i
= power_check_constraints(cpuhw
, cpuhw
->events
, cpuhw
->flags
, n
);
1700 for (i
= cpuhw
->n_txn_start
; i
< n
; ++i
)
1701 cpuhw
->event
[i
]->hw
.config
= cpuhw
->events
[i
];
1703 cpuhw
->txn_flags
= 0;
1704 perf_pmu_enable(pmu
);
1709 * Return 1 if we might be able to put event on a limited PMC,
1711 * An event can only go on a limited PMC if it counts something
1712 * that a limited PMC can count, doesn't require interrupts, and
1713 * doesn't exclude any processor mode.
1715 static int can_go_on_limited_pmc(struct perf_event
*event
, u64 ev
,
1719 u64 alt
[MAX_EVENT_ALTERNATIVES
];
1721 if (event
->attr
.exclude_user
1722 || event
->attr
.exclude_kernel
1723 || event
->attr
.exclude_hv
1724 || event
->attr
.sample_period
)
1727 if (ppmu
->limited_pmc_event(ev
))
1731 * The requested event_id isn't on a limited PMC already;
1732 * see if any alternative code goes on a limited PMC.
1734 if (!ppmu
->get_alternatives
)
1737 flags
|= PPMU_LIMITED_PMC_OK
| PPMU_LIMITED_PMC_REQD
;
1738 n
= ppmu
->get_alternatives(ev
, flags
, alt
);
1744 * Find an alternative event_id that goes on a normal PMC, if possible,
1745 * and return the event_id code, or 0 if there is no such alternative.
1746 * (Note: event_id code 0 is "don't count" on all machines.)
1748 static u64
normal_pmc_alternative(u64 ev
, unsigned long flags
)
1750 u64 alt
[MAX_EVENT_ALTERNATIVES
];
1753 flags
&= ~(PPMU_LIMITED_PMC_OK
| PPMU_LIMITED_PMC_REQD
);
1754 n
= ppmu
->get_alternatives(ev
, flags
, alt
);
1760 /* Number of perf_events counting hardware events */
1761 static atomic_t num_events
;
1762 /* Used to avoid races in calling reserve/release_pmc_hardware */
1763 static DEFINE_MUTEX(pmc_reserve_mutex
);
1766 * Release the PMU if this is the last perf_event.
1768 static void hw_perf_event_destroy(struct perf_event
*event
)
1770 if (!atomic_add_unless(&num_events
, -1, 1)) {
1771 mutex_lock(&pmc_reserve_mutex
);
1772 if (atomic_dec_return(&num_events
) == 0)
1773 release_pmc_hardware();
1774 mutex_unlock(&pmc_reserve_mutex
);
1779 * Translate a generic cache event_id config to a raw event_id code.
1781 static int hw_perf_cache_event(u64 config
, u64
*eventp
)
1783 unsigned long type
, op
, result
;
1786 if (!ppmu
->cache_events
)
1790 type
= config
& 0xff;
1791 op
= (config
>> 8) & 0xff;
1792 result
= (config
>> 16) & 0xff;
1794 if (type
>= PERF_COUNT_HW_CACHE_MAX
||
1795 op
>= PERF_COUNT_HW_CACHE_OP_MAX
||
1796 result
>= PERF_COUNT_HW_CACHE_RESULT_MAX
)
1799 ev
= (*ppmu
->cache_events
)[type
][op
][result
];
1808 static bool is_event_blacklisted(u64 ev
)
1812 for (i
=0; i
< ppmu
->n_blacklist_ev
; i
++) {
1813 if (ppmu
->blacklist_ev
[i
] == ev
)
1820 static int power_pmu_event_init(struct perf_event
*event
)
1823 unsigned long flags
;
1824 struct perf_event
*ctrs
[MAX_HWEVENTS
];
1825 u64 events
[MAX_HWEVENTS
];
1826 unsigned int cflags
[MAX_HWEVENTS
];
1829 struct cpu_hw_events
*cpuhw
;
1834 if (has_branch_stack(event
)) {
1835 /* PMU has BHRB enabled */
1836 if (!(ppmu
->flags
& PPMU_ARCH_207S
))
1840 switch (event
->attr
.type
) {
1841 case PERF_TYPE_HARDWARE
:
1842 ev
= event
->attr
.config
;
1843 if (ev
>= ppmu
->n_generic
|| ppmu
->generic_events
[ev
] == 0)
1846 if (ppmu
->blacklist_ev
&& is_event_blacklisted(ev
))
1848 ev
= ppmu
->generic_events
[ev
];
1850 case PERF_TYPE_HW_CACHE
:
1851 err
= hw_perf_cache_event(event
->attr
.config
, &ev
);
1855 if (ppmu
->blacklist_ev
&& is_event_blacklisted(ev
))
1859 ev
= event
->attr
.config
;
1861 if (ppmu
->blacklist_ev
&& is_event_blacklisted(ev
))
1868 event
->hw
.config_base
= ev
;
1872 * If we are not running on a hypervisor, force the
1873 * exclude_hv bit to 0 so that we don't care what
1874 * the user set it to.
1876 if (!firmware_has_feature(FW_FEATURE_LPAR
))
1877 event
->attr
.exclude_hv
= 0;
1880 * If this is a per-task event, then we can use
1881 * PM_RUN_* events interchangeably with their non RUN_*
1882 * equivalents, e.g. PM_RUN_CYC instead of PM_CYC.
1883 * XXX we should check if the task is an idle task.
1886 if (event
->attach_state
& PERF_ATTACH_TASK
)
1887 flags
|= PPMU_ONLY_COUNT_RUN
;
1890 * If this machine has limited events, check whether this
1891 * event_id could go on a limited event.
1893 if (ppmu
->flags
& PPMU_LIMITED_PMC5_6
) {
1894 if (can_go_on_limited_pmc(event
, ev
, flags
)) {
1895 flags
|= PPMU_LIMITED_PMC_OK
;
1896 } else if (ppmu
->limited_pmc_event(ev
)) {
1898 * The requested event_id is on a limited PMC,
1899 * but we can't use a limited PMC; see if any
1900 * alternative goes on a normal PMC.
1902 ev
= normal_pmc_alternative(ev
, flags
);
1908 /* Extra checks for EBB */
1909 err
= ebb_event_check(event
);
1914 * If this is in a group, check if it can go on with all the
1915 * other hardware events in the group. We assume the event
1916 * hasn't been linked into its leader's sibling list at this point.
1919 if (event
->group_leader
!= event
) {
1920 n
= collect_events(event
->group_leader
, ppmu
->n_counter
- 1,
1921 ctrs
, events
, cflags
);
1928 if (check_excludes(ctrs
, cflags
, n
, 1))
1931 cpuhw
= &get_cpu_var(cpu_hw_events
);
1932 err
= power_check_constraints(cpuhw
, events
, cflags
, n
+ 1);
1934 if (has_branch_stack(event
)) {
1935 cpuhw
->bhrb_filter
= ppmu
->bhrb_filter_map(
1936 event
->attr
.branch_sample_type
);
1938 if (cpuhw
->bhrb_filter
== -1) {
1939 put_cpu_var(cpu_hw_events
);
1944 put_cpu_var(cpu_hw_events
);
1948 event
->hw
.config
= events
[n
];
1949 event
->hw
.event_base
= cflags
[n
];
1950 event
->hw
.last_period
= event
->hw
.sample_period
;
1951 local64_set(&event
->hw
.period_left
, event
->hw
.last_period
);
1954 * For EBB events we just context switch the PMC value, we don't do any
1955 * of the sample_period logic. We use hw.prev_count for this.
1957 if (is_ebb_event(event
))
1958 local64_set(&event
->hw
.prev_count
, 0);
1961 * See if we need to reserve the PMU.
1962 * If no events are currently in use, then we have to take a
1963 * mutex to ensure that we don't race with another task doing
1964 * reserve_pmc_hardware or release_pmc_hardware.
1967 if (!atomic_inc_not_zero(&num_events
)) {
1968 mutex_lock(&pmc_reserve_mutex
);
1969 if (atomic_read(&num_events
) == 0 &&
1970 reserve_pmc_hardware(perf_event_interrupt
))
1973 atomic_inc(&num_events
);
1974 mutex_unlock(&pmc_reserve_mutex
);
1976 event
->destroy
= hw_perf_event_destroy
;
1981 static int power_pmu_event_idx(struct perf_event
*event
)
1983 return event
->hw
.idx
;
1986 ssize_t
power_events_sysfs_show(struct device
*dev
,
1987 struct device_attribute
*attr
, char *page
)
1989 struct perf_pmu_events_attr
*pmu_attr
;
1991 pmu_attr
= container_of(attr
, struct perf_pmu_events_attr
, attr
);
1993 return sprintf(page
, "event=0x%02llx\n", pmu_attr
->id
);
1996 static struct pmu power_pmu
= {
1997 .pmu_enable
= power_pmu_enable
,
1998 .pmu_disable
= power_pmu_disable
,
1999 .event_init
= power_pmu_event_init
,
2000 .add
= power_pmu_add
,
2001 .del
= power_pmu_del
,
2002 .start
= power_pmu_start
,
2003 .stop
= power_pmu_stop
,
2004 .read
= power_pmu_read
,
2005 .start_txn
= power_pmu_start_txn
,
2006 .cancel_txn
= power_pmu_cancel_txn
,
2007 .commit_txn
= power_pmu_commit_txn
,
2008 .event_idx
= power_pmu_event_idx
,
2009 .sched_task
= power_pmu_sched_task
,
2013 * A counter has overflowed; update its count and record
2014 * things if requested. Note that interrupts are hard-disabled
2015 * here so there is no possibility of being interrupted.
2017 static void record_and_restart(struct perf_event
*event
, unsigned long val
,
2018 struct pt_regs
*regs
)
2020 u64 period
= event
->hw
.sample_period
;
2021 s64 prev
, delta
, left
;
2024 if (event
->hw
.state
& PERF_HES_STOPPED
) {
2025 write_pmc(event
->hw
.idx
, 0);
2029 /* we don't have to worry about interrupts here */
2030 prev
= local64_read(&event
->hw
.prev_count
);
2031 delta
= check_and_compute_delta(prev
, val
);
2032 local64_add(delta
, &event
->count
);
2035 * See if the total period for this event has expired,
2036 * and update for the next period.
2039 left
= local64_read(&event
->hw
.period_left
) - delta
;
2047 record
= siar_valid(regs
);
2048 event
->hw
.last_period
= event
->hw
.sample_period
;
2050 if (left
< 0x80000000LL
)
2051 val
= 0x80000000LL
- left
;
2054 write_pmc(event
->hw
.idx
, val
);
2055 local64_set(&event
->hw
.prev_count
, val
);
2056 local64_set(&event
->hw
.period_left
, left
);
2057 perf_event_update_userpage(event
);
2060 * Finally record data if requested.
2063 struct perf_sample_data data
;
2065 perf_sample_data_init(&data
, ~0ULL, event
->hw
.last_period
);
2067 if (event
->attr
.sample_type
&
2068 (PERF_SAMPLE_ADDR
| PERF_SAMPLE_PHYS_ADDR
))
2069 perf_get_data_addr(regs
, &data
.addr
);
2071 if (event
->attr
.sample_type
& PERF_SAMPLE_BRANCH_STACK
) {
2072 struct cpu_hw_events
*cpuhw
;
2073 cpuhw
= this_cpu_ptr(&cpu_hw_events
);
2074 power_pmu_bhrb_read(cpuhw
);
2075 data
.br_stack
= &cpuhw
->bhrb_stack
;
2078 if (event
->attr
.sample_type
& PERF_SAMPLE_DATA_SRC
&&
2079 ppmu
->get_mem_data_src
)
2080 ppmu
->get_mem_data_src(&data
.data_src
, ppmu
->flags
, regs
);
2082 if (event
->attr
.sample_type
& PERF_SAMPLE_WEIGHT
&&
2083 ppmu
->get_mem_weight
)
2084 ppmu
->get_mem_weight(&data
.weight
);
2086 if (perf_event_overflow(event
, &data
, regs
))
2087 power_pmu_stop(event
, 0);
2092 * Called from generic code to get the misc flags (i.e. processor mode)
2095 unsigned long perf_misc_flags(struct pt_regs
*regs
)
2097 u32 flags
= perf_get_misc_flags(regs
);
2101 return user_mode(regs
) ? PERF_RECORD_MISC_USER
:
2102 PERF_RECORD_MISC_KERNEL
;
2106 * Called from generic code to get the instruction pointer
2109 unsigned long perf_instruction_pointer(struct pt_regs
*regs
)
2111 bool use_siar
= regs_use_siar(regs
);
2113 if (use_siar
&& siar_valid(regs
))
2114 return mfspr(SPRN_SIAR
) + perf_ip_adjust(regs
);
2116 return 0; // no valid instruction pointer
2121 static bool pmc_overflow_power7(unsigned long val
)
2124 * Events on POWER7 can roll back if a speculative event doesn't
2125 * eventually complete. Unfortunately in some rare cases they will
2126 * raise a performance monitor exception. We need to catch this to
2127 * ensure we reset the PMC. In all cases the PMC will be 256 or less
2128 * cycles from overflow.
2130 * We only do this if the first pass fails to find any overflowing
2131 * PMCs because a user might set a period of less than 256 and we
2132 * don't want to mistakenly reset them.
2134 if ((0x80000000 - val
) <= 256)
2140 static bool pmc_overflow(unsigned long val
)
2149 * Performance monitor interrupt stuff
2151 static void perf_event_interrupt(struct pt_regs
*regs
)
2154 struct cpu_hw_events
*cpuhw
= this_cpu_ptr(&cpu_hw_events
);
2155 struct perf_event
*event
;
2156 unsigned long val
[8];
2160 if (cpuhw
->n_limited
)
2161 freeze_limited_counters(cpuhw
, mfspr(SPRN_PMC5
),
2164 perf_read_regs(regs
);
2166 nmi
= perf_intr_is_nmi(regs
);
2172 /* Read all the PMCs since we'll need them a bunch of times */
2173 for (i
= 0; i
< ppmu
->n_counter
; ++i
)
2174 val
[i
] = read_pmc(i
+ 1);
2176 /* Try to find what caused the IRQ */
2178 for (i
= 0; i
< ppmu
->n_counter
; ++i
) {
2179 if (!pmc_overflow(val
[i
]))
2181 if (is_limited_pmc(i
+ 1))
2182 continue; /* these won't generate IRQs */
2184 * We've found one that's overflowed. For active
2185 * counters we need to log this. For inactive
2186 * counters, we need to reset it anyway
2190 for (j
= 0; j
< cpuhw
->n_events
; ++j
) {
2191 event
= cpuhw
->event
[j
];
2192 if (event
->hw
.idx
== (i
+ 1)) {
2194 record_and_restart(event
, val
[i
], regs
);
2199 /* reset non active counters that have overflowed */
2200 write_pmc(i
+ 1, 0);
2202 if (!found
&& pvr_version_is(PVR_POWER7
)) {
2203 /* check active counters for special buggy p7 overflow */
2204 for (i
= 0; i
< cpuhw
->n_events
; ++i
) {
2205 event
= cpuhw
->event
[i
];
2206 if (!event
->hw
.idx
|| is_limited_pmc(event
->hw
.idx
))
2208 if (pmc_overflow_power7(val
[event
->hw
.idx
- 1])) {
2209 /* event has overflowed in a buggy way*/
2211 record_and_restart(event
,
2212 val
[event
->hw
.idx
- 1],
2217 if (!found
&& !nmi
&& printk_ratelimit())
2218 printk(KERN_WARNING
"Can't find PMC that caused IRQ\n");
2221 * Reset MMCR0 to its normal value. This will set PMXE and
2222 * clear FC (freeze counters) and PMAO (perf mon alert occurred)
2223 * and thus allow interrupts to occur again.
2224 * XXX might want to use MSR.PM to keep the events frozen until
2225 * we get back out of this interrupt.
2227 write_mmcr0(cpuhw
, cpuhw
->mmcr
[0]);
2235 static int power_pmu_prepare_cpu(unsigned int cpu
)
2237 struct cpu_hw_events
*cpuhw
= &per_cpu(cpu_hw_events
, cpu
);
2240 memset(cpuhw
, 0, sizeof(*cpuhw
));
2241 cpuhw
->mmcr
[0] = MMCR0_FC
;
2246 int register_power_pmu(struct power_pmu
*pmu
)
2249 return -EBUSY
; /* something's already registered */
2252 pr_info("%s performance monitor hardware support registered\n",
2255 power_pmu
.attr_groups
= ppmu
->attr_groups
;
2259 * Use FCHV to ignore kernel events if MSR.HV is set.
2261 if (mfmsr() & MSR_HV
)
2262 freeze_events_kernel
= MMCR0_FCHV
;
2263 #endif /* CONFIG_PPC64 */
2265 perf_pmu_register(&power_pmu
, "cpu", PERF_TYPE_RAW
);
2266 cpuhp_setup_state(CPUHP_PERF_POWER
, "perf/powerpc:prepare",
2267 power_pmu_prepare_cpu
, NULL
);