2 * Performance counter support - powerpc architecture code
4 * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 #include <linux/kernel.h>
12 #include <linux/sched.h>
13 #include <linux/perf_counter.h>
14 #include <linux/percpu.h>
15 #include <linux/hardirq.h>
18 #include <asm/machdep.h>
19 #include <asm/firmware.h>
20 #include <asm/ptrace.h>
22 struct cpu_hw_counters
{
29 struct perf_counter
*counter
[MAX_HWCOUNTERS
];
30 u64 events
[MAX_HWCOUNTERS
];
31 unsigned int flags
[MAX_HWCOUNTERS
];
32 unsigned long mmcr
[3];
33 struct perf_counter
*limited_counter
[MAX_LIMITED_HWCOUNTERS
];
34 u8 limited_hwidx
[MAX_LIMITED_HWCOUNTERS
];
36 DEFINE_PER_CPU(struct cpu_hw_counters
, cpu_hw_counters
);
38 struct power_pmu
*ppmu
;
41 * Normally, to ignore kernel events we set the FCS (freeze counters
42 * in supervisor mode) bit in MMCR0, but if the kernel runs with the
43 * hypervisor bit set in the MSR, or if we are running on a processor
44 * where the hypervisor bit is forced to 1 (as on Apple G5 processors),
45 * then we need to use the FCHV bit to ignore kernel events.
47 static unsigned int freeze_counters_kernel
= MMCR0_FCS
;
49 static void perf_counter_interrupt(struct pt_regs
*regs
);
51 void perf_counter_print_debug(void)
56 * Read one performance monitor counter (PMC).
58 static unsigned long read_pmc(int idx
)
64 val
= mfspr(SPRN_PMC1
);
67 val
= mfspr(SPRN_PMC2
);
70 val
= mfspr(SPRN_PMC3
);
73 val
= mfspr(SPRN_PMC4
);
76 val
= mfspr(SPRN_PMC5
);
79 val
= mfspr(SPRN_PMC6
);
82 val
= mfspr(SPRN_PMC7
);
85 val
= mfspr(SPRN_PMC8
);
88 printk(KERN_ERR
"oops trying to read PMC%d\n", idx
);
97 static void write_pmc(int idx
, unsigned long val
)
101 mtspr(SPRN_PMC1
, val
);
104 mtspr(SPRN_PMC2
, val
);
107 mtspr(SPRN_PMC3
, val
);
110 mtspr(SPRN_PMC4
, val
);
113 mtspr(SPRN_PMC5
, val
);
116 mtspr(SPRN_PMC6
, val
);
119 mtspr(SPRN_PMC7
, val
);
122 mtspr(SPRN_PMC8
, val
);
125 printk(KERN_ERR
"oops trying to write PMC%d\n", idx
);
130 * Check if a set of events can all go on the PMU at once.
131 * If they can't, this will look at alternative codes for the events
132 * and see if any combination of alternative codes is feasible.
133 * The feasible set is returned in event[].
135 static int power_check_constraints(u64 event
[], unsigned int cflags
[],
138 unsigned long mask
, value
, nv
;
139 u64 alternatives
[MAX_HWCOUNTERS
][MAX_EVENT_ALTERNATIVES
];
140 unsigned long amasks
[MAX_HWCOUNTERS
][MAX_EVENT_ALTERNATIVES
];
141 unsigned long avalues
[MAX_HWCOUNTERS
][MAX_EVENT_ALTERNATIVES
];
142 unsigned long smasks
[MAX_HWCOUNTERS
], svalues
[MAX_HWCOUNTERS
];
143 int n_alt
[MAX_HWCOUNTERS
], choice
[MAX_HWCOUNTERS
];
145 unsigned long addf
= ppmu
->add_fields
;
146 unsigned long tadd
= ppmu
->test_adder
;
148 if (n_ev
> ppmu
->n_counter
)
151 /* First see if the events will go on as-is */
152 for (i
= 0; i
< n_ev
; ++i
) {
153 if ((cflags
[i
] & PPMU_LIMITED_PMC_REQD
)
154 && !ppmu
->limited_pmc_event(event
[i
])) {
155 ppmu
->get_alternatives(event
[i
], cflags
[i
],
157 event
[i
] = alternatives
[i
][0];
159 if (ppmu
->get_constraint(event
[i
], &amasks
[i
][0],
164 for (i
= 0; i
< n_ev
; ++i
) {
165 nv
= (value
| avalues
[i
][0]) + (value
& avalues
[i
][0] & addf
);
166 if ((((nv
+ tadd
) ^ value
) & mask
) != 0 ||
167 (((nv
+ tadd
) ^ avalues
[i
][0]) & amasks
[i
][0]) != 0)
170 mask
|= amasks
[i
][0];
173 return 0; /* all OK */
175 /* doesn't work, gather alternatives... */
176 if (!ppmu
->get_alternatives
)
178 for (i
= 0; i
< n_ev
; ++i
) {
180 n_alt
[i
] = ppmu
->get_alternatives(event
[i
], cflags
[i
],
182 for (j
= 1; j
< n_alt
[i
]; ++j
)
183 ppmu
->get_constraint(alternatives
[i
][j
],
184 &amasks
[i
][j
], &avalues
[i
][j
]);
187 /* enumerate all possibilities and see if any will work */
190 value
= mask
= nv
= 0;
193 /* we're backtracking, restore context */
199 * See if any alternative k for event i,
200 * where k > j, will satisfy the constraints.
202 while (++j
< n_alt
[i
]) {
203 nv
= (value
| avalues
[i
][j
]) +
204 (value
& avalues
[i
][j
] & addf
);
205 if ((((nv
+ tadd
) ^ value
) & mask
) == 0 &&
206 (((nv
+ tadd
) ^ avalues
[i
][j
])
207 & amasks
[i
][j
]) == 0)
212 * No feasible alternative, backtrack
213 * to event i-1 and continue enumerating its
214 * alternatives from where we got up to.
220 * Found a feasible alternative for event i,
221 * remember where we got up to with this event,
222 * go on to the next event, and start with
223 * the first alternative for it.
229 mask
|= amasks
[i
][j
];
235 /* OK, we have a feasible combination, tell the caller the solution */
236 for (i
= 0; i
< n_ev
; ++i
)
237 event
[i
] = alternatives
[i
][choice
[i
]];
242 * Check if newly-added counters have consistent settings for
243 * exclude_{user,kernel,hv} with each other and any previously
246 static int check_excludes(struct perf_counter
**ctrs
, unsigned int cflags
[],
247 int n_prev
, int n_new
)
249 int eu
= 0, ek
= 0, eh
= 0;
251 struct perf_counter
*counter
;
258 for (i
= 0; i
< n
; ++i
) {
259 if (cflags
[i
] & PPMU_LIMITED_PMC_OK
) {
260 cflags
[i
] &= ~PPMU_LIMITED_PMC_REQD
;
265 eu
= counter
->attr
.exclude_user
;
266 ek
= counter
->attr
.exclude_kernel
;
267 eh
= counter
->attr
.exclude_hv
;
269 } else if (counter
->attr
.exclude_user
!= eu
||
270 counter
->attr
.exclude_kernel
!= ek
||
271 counter
->attr
.exclude_hv
!= eh
) {
277 for (i
= 0; i
< n
; ++i
)
278 if (cflags
[i
] & PPMU_LIMITED_PMC_OK
)
279 cflags
[i
] |= PPMU_LIMITED_PMC_REQD
;
284 static void power_pmu_read(struct perf_counter
*counter
)
286 long val
, delta
, prev
;
288 if (!counter
->hw
.idx
)
291 * Performance monitor interrupts come even when interrupts
292 * are soft-disabled, as long as interrupts are hard-enabled.
293 * Therefore we treat them like NMIs.
296 prev
= atomic64_read(&counter
->hw
.prev_count
);
298 val
= read_pmc(counter
->hw
.idx
);
299 } while (atomic64_cmpxchg(&counter
->hw
.prev_count
, prev
, val
) != prev
);
301 /* The counters are only 32 bits wide */
302 delta
= (val
- prev
) & 0xfffffffful
;
303 atomic64_add(delta
, &counter
->count
);
304 atomic64_sub(delta
, &counter
->hw
.period_left
);
308 * On some machines, PMC5 and PMC6 can't be written, don't respect
309 * the freeze conditions, and don't generate interrupts. This tells
310 * us if `counter' is using such a PMC.
312 static int is_limited_pmc(int pmcnum
)
314 return (ppmu
->flags
& PPMU_LIMITED_PMC5_6
)
315 && (pmcnum
== 5 || pmcnum
== 6);
318 static void freeze_limited_counters(struct cpu_hw_counters
*cpuhw
,
319 unsigned long pmc5
, unsigned long pmc6
)
321 struct perf_counter
*counter
;
322 u64 val
, prev
, delta
;
325 for (i
= 0; i
< cpuhw
->n_limited
; ++i
) {
326 counter
= cpuhw
->limited_counter
[i
];
327 if (!counter
->hw
.idx
)
329 val
= (counter
->hw
.idx
== 5) ? pmc5
: pmc6
;
330 prev
= atomic64_read(&counter
->hw
.prev_count
);
332 delta
= (val
- prev
) & 0xfffffffful
;
333 atomic64_add(delta
, &counter
->count
);
337 static void thaw_limited_counters(struct cpu_hw_counters
*cpuhw
,
338 unsigned long pmc5
, unsigned long pmc6
)
340 struct perf_counter
*counter
;
344 for (i
= 0; i
< cpuhw
->n_limited
; ++i
) {
345 counter
= cpuhw
->limited_counter
[i
];
346 counter
->hw
.idx
= cpuhw
->limited_hwidx
[i
];
347 val
= (counter
->hw
.idx
== 5) ? pmc5
: pmc6
;
348 atomic64_set(&counter
->hw
.prev_count
, val
);
349 perf_counter_update_userpage(counter
);
354 * Since limited counters don't respect the freeze conditions, we
355 * have to read them immediately after freezing or unfreezing the
356 * other counters. We try to keep the values from the limited
357 * counters as consistent as possible by keeping the delay (in
358 * cycles and instructions) between freezing/unfreezing and reading
359 * the limited counters as small and consistent as possible.
360 * Therefore, if any limited counters are in use, we read them
361 * both, and always in the same order, to minimize variability,
362 * and do it inside the same asm that writes MMCR0.
364 static void write_mmcr0(struct cpu_hw_counters
*cpuhw
, unsigned long mmcr0
)
366 unsigned long pmc5
, pmc6
;
368 if (!cpuhw
->n_limited
) {
369 mtspr(SPRN_MMCR0
, mmcr0
);
374 * Write MMCR0, then read PMC5 and PMC6 immediately.
375 * To ensure we don't get a performance monitor interrupt
376 * between writing MMCR0 and freezing/thawing the limited
377 * counters, we first write MMCR0 with the counter overflow
378 * interrupt enable bits turned off.
380 asm volatile("mtspr %3,%2; mfspr %0,%4; mfspr %1,%5"
381 : "=&r" (pmc5
), "=&r" (pmc6
)
382 : "r" (mmcr0
& ~(MMCR0_PMC1CE
| MMCR0_PMCjCE
)),
384 "i" (SPRN_PMC5
), "i" (SPRN_PMC6
));
386 if (mmcr0
& MMCR0_FC
)
387 freeze_limited_counters(cpuhw
, pmc5
, pmc6
);
389 thaw_limited_counters(cpuhw
, pmc5
, pmc6
);
392 * Write the full MMCR0 including the counter overflow interrupt
393 * enable bits, if necessary.
395 if (mmcr0
& (MMCR0_PMC1CE
| MMCR0_PMCjCE
))
396 mtspr(SPRN_MMCR0
, mmcr0
);
400 * Disable all counters to prevent PMU interrupts and to allow
401 * counters to be added or removed.
403 void hw_perf_disable(void)
405 struct cpu_hw_counters
*cpuhw
;
408 local_irq_save(flags
);
409 cpuhw
= &__get_cpu_var(cpu_hw_counters
);
411 if (!cpuhw
->disabled
) {
416 * Check if we ever enabled the PMU on this cpu.
418 if (!cpuhw
->pmcs_enabled
) {
419 if (ppc_md
.enable_pmcs
)
420 ppc_md
.enable_pmcs();
421 cpuhw
->pmcs_enabled
= 1;
425 * Disable instruction sampling if it was enabled
427 if (cpuhw
->mmcr
[2] & MMCRA_SAMPLE_ENABLE
) {
429 cpuhw
->mmcr
[2] & ~MMCRA_SAMPLE_ENABLE
);
434 * Set the 'freeze counters' bit.
435 * The barrier is to make sure the mtspr has been
436 * executed and the PMU has frozen the counters
439 write_mmcr0(cpuhw
, mfspr(SPRN_MMCR0
) | MMCR0_FC
);
442 local_irq_restore(flags
);
446 * Re-enable all counters if disable == 0.
447 * If we were previously disabled and counters were added, then
448 * put the new config on the PMU.
450 void hw_perf_enable(void)
452 struct perf_counter
*counter
;
453 struct cpu_hw_counters
*cpuhw
;
458 unsigned int hwc_index
[MAX_HWCOUNTERS
];
462 local_irq_save(flags
);
463 cpuhw
= &__get_cpu_var(cpu_hw_counters
);
464 if (!cpuhw
->disabled
) {
465 local_irq_restore(flags
);
471 * If we didn't change anything, or only removed counters,
472 * no need to recalculate MMCR* settings and reset the PMCs.
473 * Just reenable the PMU with the current MMCR* settings
474 * (possibly updated for removal of counters).
476 if (!cpuhw
->n_added
) {
477 mtspr(SPRN_MMCRA
, cpuhw
->mmcr
[2] & ~MMCRA_SAMPLE_ENABLE
);
478 mtspr(SPRN_MMCR1
, cpuhw
->mmcr
[1]);
479 if (cpuhw
->n_counters
== 0)
480 get_lppaca()->pmcregs_in_use
= 0;
485 * Compute MMCR* values for the new set of counters
487 if (ppmu
->compute_mmcr(cpuhw
->events
, cpuhw
->n_counters
, hwc_index
,
489 /* shouldn't ever get here */
490 printk(KERN_ERR
"oops compute_mmcr failed\n");
495 * Add in MMCR0 freeze bits corresponding to the
496 * attr.exclude_* bits for the first counter.
497 * We have already checked that all counters have the
498 * same values for these bits as the first counter.
500 counter
= cpuhw
->counter
[0];
501 if (counter
->attr
.exclude_user
)
502 cpuhw
->mmcr
[0] |= MMCR0_FCP
;
503 if (counter
->attr
.exclude_kernel
)
504 cpuhw
->mmcr
[0] |= freeze_counters_kernel
;
505 if (counter
->attr
.exclude_hv
)
506 cpuhw
->mmcr
[0] |= MMCR0_FCHV
;
509 * Write the new configuration to MMCR* with the freeze
510 * bit set and set the hardware counters to their initial values.
511 * Then unfreeze the counters.
513 get_lppaca()->pmcregs_in_use
= 1;
514 mtspr(SPRN_MMCRA
, cpuhw
->mmcr
[2] & ~MMCRA_SAMPLE_ENABLE
);
515 mtspr(SPRN_MMCR1
, cpuhw
->mmcr
[1]);
516 mtspr(SPRN_MMCR0
, (cpuhw
->mmcr
[0] & ~(MMCR0_PMC1CE
| MMCR0_PMCjCE
))
520 * Read off any pre-existing counters that need to move
523 for (i
= 0; i
< cpuhw
->n_counters
; ++i
) {
524 counter
= cpuhw
->counter
[i
];
525 if (counter
->hw
.idx
&& counter
->hw
.idx
!= hwc_index
[i
] + 1) {
526 power_pmu_read(counter
);
527 write_pmc(counter
->hw
.idx
, 0);
533 * Initialize the PMCs for all the new and moved counters.
535 cpuhw
->n_limited
= n_lim
= 0;
536 for (i
= 0; i
< cpuhw
->n_counters
; ++i
) {
537 counter
= cpuhw
->counter
[i
];
540 idx
= hwc_index
[i
] + 1;
541 if (is_limited_pmc(idx
)) {
542 cpuhw
->limited_counter
[n_lim
] = counter
;
543 cpuhw
->limited_hwidx
[n_lim
] = idx
;
548 if (counter
->hw
.sample_period
) {
549 left
= atomic64_read(&counter
->hw
.period_left
);
550 if (left
< 0x80000000L
)
551 val
= 0x80000000L
- left
;
553 atomic64_set(&counter
->hw
.prev_count
, val
);
554 counter
->hw
.idx
= idx
;
556 perf_counter_update_userpage(counter
);
558 cpuhw
->n_limited
= n_lim
;
559 cpuhw
->mmcr
[0] |= MMCR0_PMXE
| MMCR0_FCECE
;
563 write_mmcr0(cpuhw
, cpuhw
->mmcr
[0]);
566 * Enable instruction sampling if necessary
568 if (cpuhw
->mmcr
[2] & MMCRA_SAMPLE_ENABLE
) {
570 mtspr(SPRN_MMCRA
, cpuhw
->mmcr
[2]);
574 local_irq_restore(flags
);
577 static int collect_events(struct perf_counter
*group
, int max_count
,
578 struct perf_counter
*ctrs
[], u64
*events
,
582 struct perf_counter
*counter
;
584 if (!is_software_counter(group
)) {
588 flags
[n
] = group
->hw
.counter_base
;
589 events
[n
++] = group
->hw
.config
;
591 list_for_each_entry(counter
, &group
->sibling_list
, list_entry
) {
592 if (!is_software_counter(counter
) &&
593 counter
->state
!= PERF_COUNTER_STATE_OFF
) {
597 flags
[n
] = counter
->hw
.counter_base
;
598 events
[n
++] = counter
->hw
.config
;
604 static void counter_sched_in(struct perf_counter
*counter
, int cpu
)
606 counter
->state
= PERF_COUNTER_STATE_ACTIVE
;
607 counter
->oncpu
= cpu
;
608 counter
->tstamp_running
+= counter
->ctx
->time
- counter
->tstamp_stopped
;
609 if (is_software_counter(counter
))
610 counter
->pmu
->enable(counter
);
614 * Called to enable a whole group of counters.
615 * Returns 1 if the group was enabled, or -EAGAIN if it could not be.
616 * Assumes the caller has disabled interrupts and has
617 * frozen the PMU with hw_perf_save_disable.
619 int hw_perf_group_sched_in(struct perf_counter
*group_leader
,
620 struct perf_cpu_context
*cpuctx
,
621 struct perf_counter_context
*ctx
, int cpu
)
623 struct cpu_hw_counters
*cpuhw
;
625 struct perf_counter
*sub
;
627 cpuhw
= &__get_cpu_var(cpu_hw_counters
);
628 n0
= cpuhw
->n_counters
;
629 n
= collect_events(group_leader
, ppmu
->n_counter
- n0
,
630 &cpuhw
->counter
[n0
], &cpuhw
->events
[n0
],
634 if (check_excludes(cpuhw
->counter
, cpuhw
->flags
, n0
, n
))
636 i
= power_check_constraints(cpuhw
->events
, cpuhw
->flags
, n
+ n0
);
639 cpuhw
->n_counters
= n0
+ n
;
643 * OK, this group can go on; update counter states etc.,
644 * and enable any software counters
646 for (i
= n0
; i
< n0
+ n
; ++i
)
647 cpuhw
->counter
[i
]->hw
.config
= cpuhw
->events
[i
];
648 cpuctx
->active_oncpu
+= n
;
650 counter_sched_in(group_leader
, cpu
);
651 list_for_each_entry(sub
, &group_leader
->sibling_list
, list_entry
) {
652 if (sub
->state
!= PERF_COUNTER_STATE_OFF
) {
653 counter_sched_in(sub
, cpu
);
663 * Add a counter to the PMU.
664 * If all counters are not already frozen, then we disable and
665 * re-enable the PMU in order to get hw_perf_enable to do the
666 * actual work of reconfiguring the PMU.
668 static int power_pmu_enable(struct perf_counter
*counter
)
670 struct cpu_hw_counters
*cpuhw
;
675 local_irq_save(flags
);
679 * Add the counter to the list (if there is room)
680 * and check whether the total set is still feasible.
682 cpuhw
= &__get_cpu_var(cpu_hw_counters
);
683 n0
= cpuhw
->n_counters
;
684 if (n0
>= ppmu
->n_counter
)
686 cpuhw
->counter
[n0
] = counter
;
687 cpuhw
->events
[n0
] = counter
->hw
.config
;
688 cpuhw
->flags
[n0
] = counter
->hw
.counter_base
;
689 if (check_excludes(cpuhw
->counter
, cpuhw
->flags
, n0
, 1))
691 if (power_check_constraints(cpuhw
->events
, cpuhw
->flags
, n0
+ 1))
694 counter
->hw
.config
= cpuhw
->events
[n0
];
701 local_irq_restore(flags
);
706 * Remove a counter from the PMU.
708 static void power_pmu_disable(struct perf_counter
*counter
)
710 struct cpu_hw_counters
*cpuhw
;
714 local_irq_save(flags
);
717 power_pmu_read(counter
);
719 cpuhw
= &__get_cpu_var(cpu_hw_counters
);
720 for (i
= 0; i
< cpuhw
->n_counters
; ++i
) {
721 if (counter
== cpuhw
->counter
[i
]) {
722 while (++i
< cpuhw
->n_counters
)
723 cpuhw
->counter
[i
-1] = cpuhw
->counter
[i
];
725 ppmu
->disable_pmc(counter
->hw
.idx
- 1, cpuhw
->mmcr
);
726 if (counter
->hw
.idx
) {
727 write_pmc(counter
->hw
.idx
, 0);
730 perf_counter_update_userpage(counter
);
734 for (i
= 0; i
< cpuhw
->n_limited
; ++i
)
735 if (counter
== cpuhw
->limited_counter
[i
])
737 if (i
< cpuhw
->n_limited
) {
738 while (++i
< cpuhw
->n_limited
) {
739 cpuhw
->limited_counter
[i
-1] = cpuhw
->limited_counter
[i
];
740 cpuhw
->limited_hwidx
[i
-1] = cpuhw
->limited_hwidx
[i
];
744 if (cpuhw
->n_counters
== 0) {
745 /* disable exceptions if no counters are running */
746 cpuhw
->mmcr
[0] &= ~(MMCR0_PMXE
| MMCR0_FCECE
);
750 local_irq_restore(flags
);
754 * Re-enable interrupts on a counter after they were throttled
755 * because they were coming too fast.
757 static void power_pmu_unthrottle(struct perf_counter
*counter
)
762 if (!counter
->hw
.idx
|| !counter
->hw
.sample_period
)
764 local_irq_save(flags
);
766 power_pmu_read(counter
);
767 left
= counter
->hw
.sample_period
;
768 counter
->hw
.last_period
= left
;
770 if (left
< 0x80000000L
)
771 val
= 0x80000000L
- left
;
772 write_pmc(counter
->hw
.idx
, val
);
773 atomic64_set(&counter
->hw
.prev_count
, val
);
774 atomic64_set(&counter
->hw
.period_left
, left
);
775 perf_counter_update_userpage(counter
);
777 local_irq_restore(flags
);
780 struct pmu power_pmu
= {
781 .enable
= power_pmu_enable
,
782 .disable
= power_pmu_disable
,
783 .read
= power_pmu_read
,
784 .unthrottle
= power_pmu_unthrottle
,
788 * Return 1 if we might be able to put counter on a limited PMC,
790 * A counter can only go on a limited PMC if it counts something
791 * that a limited PMC can count, doesn't require interrupts, and
792 * doesn't exclude any processor mode.
794 static int can_go_on_limited_pmc(struct perf_counter
*counter
, u64 ev
,
798 u64 alt
[MAX_EVENT_ALTERNATIVES
];
800 if (counter
->attr
.exclude_user
801 || counter
->attr
.exclude_kernel
802 || counter
->attr
.exclude_hv
803 || counter
->attr
.sample_period
)
806 if (ppmu
->limited_pmc_event(ev
))
810 * The requested event isn't on a limited PMC already;
811 * see if any alternative code goes on a limited PMC.
813 if (!ppmu
->get_alternatives
)
816 flags
|= PPMU_LIMITED_PMC_OK
| PPMU_LIMITED_PMC_REQD
;
817 n
= ppmu
->get_alternatives(ev
, flags
, alt
);
823 * Find an alternative event that goes on a normal PMC, if possible,
824 * and return the event code, or 0 if there is no such alternative.
825 * (Note: event code 0 is "don't count" on all machines.)
827 static u64
normal_pmc_alternative(u64 ev
, unsigned long flags
)
829 u64 alt
[MAX_EVENT_ALTERNATIVES
];
832 flags
&= ~(PPMU_LIMITED_PMC_OK
| PPMU_LIMITED_PMC_REQD
);
833 n
= ppmu
->get_alternatives(ev
, flags
, alt
);
839 /* Number of perf_counters counting hardware events */
840 static atomic_t num_counters
;
841 /* Used to avoid races in calling reserve/release_pmc_hardware */
842 static DEFINE_MUTEX(pmc_reserve_mutex
);
845 * Release the PMU if this is the last perf_counter.
847 static void hw_perf_counter_destroy(struct perf_counter
*counter
)
849 if (!atomic_add_unless(&num_counters
, -1, 1)) {
850 mutex_lock(&pmc_reserve_mutex
);
851 if (atomic_dec_return(&num_counters
) == 0)
852 release_pmc_hardware();
853 mutex_unlock(&pmc_reserve_mutex
);
858 * Translate a generic cache event config to a raw event code.
860 static int hw_perf_cache_event(u64 config
, u64
*eventp
)
862 unsigned long type
, op
, result
;
865 if (!ppmu
->cache_events
)
869 type
= config
& 0xff;
870 op
= (config
>> 8) & 0xff;
871 result
= (config
>> 16) & 0xff;
873 if (type
>= PERF_COUNT_HW_CACHE_MAX
||
874 op
>= PERF_COUNT_HW_CACHE_OP_MAX
||
875 result
>= PERF_COUNT_HW_CACHE_RESULT_MAX
)
878 ev
= (*ppmu
->cache_events
)[type
][op
][result
];
887 const struct pmu
*hw_perf_counter_init(struct perf_counter
*counter
)
891 struct perf_counter
*ctrs
[MAX_HWCOUNTERS
];
892 u64 events
[MAX_HWCOUNTERS
];
893 unsigned int cflags
[MAX_HWCOUNTERS
];
898 return ERR_PTR(-ENXIO
);
899 switch (counter
->attr
.type
) {
900 case PERF_TYPE_HARDWARE
:
901 ev
= counter
->attr
.config
;
902 if (ev
>= ppmu
->n_generic
|| ppmu
->generic_events
[ev
] == 0)
903 return ERR_PTR(-EOPNOTSUPP
);
904 ev
= ppmu
->generic_events
[ev
];
906 case PERF_TYPE_HW_CACHE
:
907 err
= hw_perf_cache_event(counter
->attr
.config
, &ev
);
912 ev
= counter
->attr
.config
;
915 return ERR_PTR(-EINVAL
);
917 counter
->hw
.config_base
= ev
;
921 * If we are not running on a hypervisor, force the
922 * exclude_hv bit to 0 so that we don't care what
923 * the user set it to.
925 if (!firmware_has_feature(FW_FEATURE_LPAR
))
926 counter
->attr
.exclude_hv
= 0;
929 * If this is a per-task counter, then we can use
930 * PM_RUN_* events interchangeably with their non RUN_*
931 * equivalents, e.g. PM_RUN_CYC instead of PM_CYC.
932 * XXX we should check if the task is an idle task.
935 if (counter
->ctx
->task
)
936 flags
|= PPMU_ONLY_COUNT_RUN
;
939 * If this machine has limited counters, check whether this
940 * event could go on a limited counter.
942 if (ppmu
->flags
& PPMU_LIMITED_PMC5_6
) {
943 if (can_go_on_limited_pmc(counter
, ev
, flags
)) {
944 flags
|= PPMU_LIMITED_PMC_OK
;
945 } else if (ppmu
->limited_pmc_event(ev
)) {
947 * The requested event is on a limited PMC,
948 * but we can't use a limited PMC; see if any
949 * alternative goes on a normal PMC.
951 ev
= normal_pmc_alternative(ev
, flags
);
953 return ERR_PTR(-EINVAL
);
958 * If this is in a group, check if it can go on with all the
959 * other hardware counters in the group. We assume the counter
960 * hasn't been linked into its leader's sibling list at this point.
963 if (counter
->group_leader
!= counter
) {
964 n
= collect_events(counter
->group_leader
, ppmu
->n_counter
- 1,
965 ctrs
, events
, cflags
);
967 return ERR_PTR(-EINVAL
);
972 if (check_excludes(ctrs
, cflags
, n
, 1))
973 return ERR_PTR(-EINVAL
);
974 if (power_check_constraints(events
, cflags
, n
+ 1))
975 return ERR_PTR(-EINVAL
);
977 counter
->hw
.config
= events
[n
];
978 counter
->hw
.counter_base
= cflags
[n
];
979 counter
->hw
.last_period
= counter
->hw
.sample_period
;
980 atomic64_set(&counter
->hw
.period_left
, counter
->hw
.last_period
);
983 * See if we need to reserve the PMU.
984 * If no counters are currently in use, then we have to take a
985 * mutex to ensure that we don't race with another task doing
986 * reserve_pmc_hardware or release_pmc_hardware.
989 if (!atomic_inc_not_zero(&num_counters
)) {
990 mutex_lock(&pmc_reserve_mutex
);
991 if (atomic_read(&num_counters
) == 0 &&
992 reserve_pmc_hardware(perf_counter_interrupt
))
995 atomic_inc(&num_counters
);
996 mutex_unlock(&pmc_reserve_mutex
);
998 counter
->destroy
= hw_perf_counter_destroy
;
1001 return ERR_PTR(err
);
1006 * A counter has overflowed; update its count and record
1007 * things if requested. Note that interrupts are hard-disabled
1008 * here so there is no possibility of being interrupted.
1010 static void record_and_restart(struct perf_counter
*counter
, long val
,
1011 struct pt_regs
*regs
, int nmi
)
1013 u64 period
= counter
->hw
.sample_period
;
1014 unsigned long mmcra
, sdsync
;
1015 s64 prev
, delta
, left
;
1018 /* we don't have to worry about interrupts here */
1019 prev
= atomic64_read(&counter
->hw
.prev_count
);
1020 delta
= (val
- prev
) & 0xfffffffful
;
1021 atomic64_add(delta
, &counter
->count
);
1024 * See if the total period for this counter has expired,
1025 * and update for the next period.
1028 left
= atomic64_read(&counter
->hw
.period_left
) - delta
;
1036 if (left
< 0x80000000L
)
1037 val
= 0x80000000L
- left
;
1041 * Finally record data if requested.
1044 struct perf_sample_data data
= {
1047 .period
= counter
->hw
.last_period
,
1050 if (counter
->attr
.sample_type
& PERF_SAMPLE_ADDR
) {
1052 * The user wants a data address recorded.
1053 * If we're not doing instruction sampling,
1054 * give them the SDAR (sampled data address).
1055 * If we are doing instruction sampling, then only
1056 * give them the SDAR if it corresponds to the
1057 * instruction pointed to by SIAR; this is indicated
1058 * by the [POWER6_]MMCRA_SDSYNC bit in MMCRA.
1060 mmcra
= regs
->dsisr
;
1061 sdsync
= (ppmu
->flags
& PPMU_ALT_SIPR
) ?
1062 POWER6_MMCRA_SDSYNC
: MMCRA_SDSYNC
;
1063 if (!(mmcra
& MMCRA_SAMPLE_ENABLE
) || (mmcra
& sdsync
))
1064 data
.addr
= mfspr(SPRN_SDAR
);
1066 if (perf_counter_overflow(counter
, nmi
, &data
)) {
1068 * Interrupts are coming too fast - throttle them
1069 * by setting the counter to 0, so it will be
1070 * at least 2^30 cycles until the next interrupt
1071 * (assuming each counter counts at most 2 counts
1079 write_pmc(counter
->hw
.idx
, val
);
1080 atomic64_set(&counter
->hw
.prev_count
, val
);
1081 atomic64_set(&counter
->hw
.period_left
, left
);
1082 perf_counter_update_userpage(counter
);
1086 * Called from generic code to get the misc flags (i.e. processor mode)
1089 unsigned long perf_misc_flags(struct pt_regs
*regs
)
1091 unsigned long mmcra
;
1093 if (TRAP(regs
) != 0xf00) {
1094 /* not a PMU interrupt */
1095 return user_mode(regs
) ? PERF_EVENT_MISC_USER
:
1096 PERF_EVENT_MISC_KERNEL
;
1099 mmcra
= regs
->dsisr
;
1100 if (ppmu
->flags
& PPMU_ALT_SIPR
) {
1101 if (mmcra
& POWER6_MMCRA_SIHV
)
1102 return PERF_EVENT_MISC_HYPERVISOR
;
1103 return (mmcra
& POWER6_MMCRA_SIPR
) ? PERF_EVENT_MISC_USER
:
1104 PERF_EVENT_MISC_KERNEL
;
1106 if (mmcra
& MMCRA_SIHV
)
1107 return PERF_EVENT_MISC_HYPERVISOR
;
1108 return (mmcra
& MMCRA_SIPR
) ? PERF_EVENT_MISC_USER
:
1109 PERF_EVENT_MISC_KERNEL
;
1113 * Called from generic code to get the instruction pointer
1116 unsigned long perf_instruction_pointer(struct pt_regs
*regs
)
1118 unsigned long mmcra
;
1122 if (TRAP(regs
) != 0xf00)
1123 return regs
->nip
; /* not a PMU interrupt */
1125 ip
= mfspr(SPRN_SIAR
);
1126 mmcra
= regs
->dsisr
;
1127 if ((mmcra
& MMCRA_SAMPLE_ENABLE
) && !(ppmu
->flags
& PPMU_ALT_SIPR
)) {
1128 slot
= (mmcra
& MMCRA_SLOT
) >> MMCRA_SLOT_SHIFT
;
1130 ip
+= 4 * (slot
- 1);
1136 * Performance monitor interrupt stuff
1138 static void perf_counter_interrupt(struct pt_regs
*regs
)
1141 struct cpu_hw_counters
*cpuhw
= &__get_cpu_var(cpu_hw_counters
);
1142 struct perf_counter
*counter
;
1147 if (cpuhw
->n_limited
)
1148 freeze_limited_counters(cpuhw
, mfspr(SPRN_PMC5
),
1152 * Overload regs->dsisr to store MMCRA so we only need to read it once.
1154 regs
->dsisr
= mfspr(SPRN_MMCRA
);
1157 * If interrupts were soft-disabled when this PMU interrupt
1158 * occurred, treat it as an NMI.
1166 for (i
= 0; i
< cpuhw
->n_counters
; ++i
) {
1167 counter
= cpuhw
->counter
[i
];
1168 if (!counter
->hw
.idx
|| is_limited_pmc(counter
->hw
.idx
))
1170 val
= read_pmc(counter
->hw
.idx
);
1172 /* counter has overflowed */
1174 record_and_restart(counter
, val
, regs
, nmi
);
1179 * In case we didn't find and reset the counter that caused
1180 * the interrupt, scan all counters and reset any that are
1181 * negative, to avoid getting continual interrupts.
1182 * Any that we processed in the previous loop will not be negative.
1185 for (i
= 0; i
< ppmu
->n_counter
; ++i
) {
1186 if (is_limited_pmc(i
+ 1))
1188 val
= read_pmc(i
+ 1);
1190 write_pmc(i
+ 1, 0);
1195 * Reset MMCR0 to its normal value. This will set PMXE and
1196 * clear FC (freeze counters) and PMAO (perf mon alert occurred)
1197 * and thus allow interrupts to occur again.
1198 * XXX might want to use MSR.PM to keep the counters frozen until
1199 * we get back out of this interrupt.
1201 write_mmcr0(cpuhw
, cpuhw
->mmcr
[0]);
1209 void hw_perf_counter_setup(int cpu
)
1211 struct cpu_hw_counters
*cpuhw
= &per_cpu(cpu_hw_counters
, cpu
);
1213 memset(cpuhw
, 0, sizeof(*cpuhw
));
1214 cpuhw
->mmcr
[0] = MMCR0_FC
;
1217 int register_power_pmu(struct power_pmu
*pmu
)
1220 return -EBUSY
; /* something's already registered */
1223 pr_info("%s performance monitor hardware support registered\n",
1227 * Use FCHV to ignore kernel events if MSR.HV is set.
1229 if (mfmsr() & MSR_HV
)
1230 freeze_counters_kernel
= MMCR0_FCHV
;