perf_counter/powerpc: Fix oops on cpus without perf_counter hardware support
[linux-2.6/mini2440.git] / arch / powerpc / kernel / perf_counter.c
blob70e1f57f7dd864bb2ec9d2f2e5e16c301e647a19
1 /*
2 * Performance counter support - powerpc architecture code
4 * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 #include <linux/kernel.h>
12 #include <linux/sched.h>
13 #include <linux/perf_counter.h>
14 #include <linux/percpu.h>
15 #include <linux/hardirq.h>
16 #include <asm/reg.h>
17 #include <asm/pmc.h>
18 #include <asm/machdep.h>
19 #include <asm/firmware.h>
20 #include <asm/ptrace.h>
22 struct cpu_hw_counters {
23 int n_counters;
24 int n_percpu;
25 int disabled;
26 int n_added;
27 int n_limited;
28 u8 pmcs_enabled;
29 struct perf_counter *counter[MAX_HWCOUNTERS];
30 u64 events[MAX_HWCOUNTERS];
31 unsigned int flags[MAX_HWCOUNTERS];
32 unsigned long mmcr[3];
33 struct perf_counter *limited_counter[MAX_LIMITED_HWCOUNTERS];
34 u8 limited_hwidx[MAX_LIMITED_HWCOUNTERS];
36 DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters);
38 struct power_pmu *ppmu;
41 * Normally, to ignore kernel events we set the FCS (freeze counters
42 * in supervisor mode) bit in MMCR0, but if the kernel runs with the
43 * hypervisor bit set in the MSR, or if we are running on a processor
44 * where the hypervisor bit is forced to 1 (as on Apple G5 processors),
45 * then we need to use the FCHV bit to ignore kernel events.
47 static unsigned int freeze_counters_kernel = MMCR0_FCS;
50 * 32-bit doesn't have MMCRA but does have an MMCR2,
51 * and a few other names are different.
53 #ifdef CONFIG_PPC32
55 #define MMCR0_FCHV 0
56 #define MMCR0_PMCjCE MMCR0_PMCnCE
58 #define SPRN_MMCRA SPRN_MMCR2
59 #define MMCRA_SAMPLE_ENABLE 0
61 static inline unsigned long perf_ip_adjust(struct pt_regs *regs)
63 return 0;
65 static inline void perf_set_pmu_inuse(int inuse) { }
66 static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp) { }
67 static inline u32 perf_get_misc_flags(struct pt_regs *regs)
69 return 0;
71 static inline void perf_read_regs(struct pt_regs *regs) { }
72 static inline int perf_intr_is_nmi(struct pt_regs *regs)
74 return 0;
77 #endif /* CONFIG_PPC32 */
80 * Things that are specific to 64-bit implementations.
82 #ifdef CONFIG_PPC64
84 static inline unsigned long perf_ip_adjust(struct pt_regs *regs)
86 unsigned long mmcra = regs->dsisr;
88 if ((mmcra & MMCRA_SAMPLE_ENABLE) && !(ppmu->flags & PPMU_ALT_SIPR)) {
89 unsigned long slot = (mmcra & MMCRA_SLOT) >> MMCRA_SLOT_SHIFT;
90 if (slot > 1)
91 return 4 * (slot - 1);
93 return 0;
96 static inline void perf_set_pmu_inuse(int inuse)
98 get_lppaca()->pmcregs_in_use = inuse;
102 * The user wants a data address recorded.
103 * If we're not doing instruction sampling, give them the SDAR
104 * (sampled data address). If we are doing instruction sampling, then
105 * only give them the SDAR if it corresponds to the instruction
106 * pointed to by SIAR; this is indicated by the [POWER6_]MMCRA_SDSYNC
107 * bit in MMCRA.
109 static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp)
111 unsigned long mmcra = regs->dsisr;
112 unsigned long sdsync = (ppmu->flags & PPMU_ALT_SIPR) ?
113 POWER6_MMCRA_SDSYNC : MMCRA_SDSYNC;
115 if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync))
116 *addrp = mfspr(SPRN_SDAR);
119 static inline u32 perf_get_misc_flags(struct pt_regs *regs)
121 unsigned long mmcra = regs->dsisr;
123 if (TRAP(regs) != 0xf00)
124 return 0; /* not a PMU interrupt */
126 if (ppmu->flags & PPMU_ALT_SIPR) {
127 if (mmcra & POWER6_MMCRA_SIHV)
128 return PERF_EVENT_MISC_HYPERVISOR;
129 return (mmcra & POWER6_MMCRA_SIPR) ?
130 PERF_EVENT_MISC_USER : PERF_EVENT_MISC_KERNEL;
132 if (mmcra & MMCRA_SIHV)
133 return PERF_EVENT_MISC_HYPERVISOR;
134 return (mmcra & MMCRA_SIPR) ? PERF_EVENT_MISC_USER :
135 PERF_EVENT_MISC_KERNEL;
139 * Overload regs->dsisr to store MMCRA so we only need to read it once
140 * on each interrupt.
142 static inline void perf_read_regs(struct pt_regs *regs)
144 regs->dsisr = mfspr(SPRN_MMCRA);
148 * If interrupts were soft-disabled when a PMU interrupt occurs, treat
149 * it as an NMI.
151 static inline int perf_intr_is_nmi(struct pt_regs *regs)
153 return !regs->softe;
156 #endif /* CONFIG_PPC64 */
158 static void perf_counter_interrupt(struct pt_regs *regs);
160 void perf_counter_print_debug(void)
165 * Read one performance monitor counter (PMC).
167 static unsigned long read_pmc(int idx)
169 unsigned long val;
171 switch (idx) {
172 case 1:
173 val = mfspr(SPRN_PMC1);
174 break;
175 case 2:
176 val = mfspr(SPRN_PMC2);
177 break;
178 case 3:
179 val = mfspr(SPRN_PMC3);
180 break;
181 case 4:
182 val = mfspr(SPRN_PMC4);
183 break;
184 case 5:
185 val = mfspr(SPRN_PMC5);
186 break;
187 case 6:
188 val = mfspr(SPRN_PMC6);
189 break;
190 #ifdef CONFIG_PPC64
191 case 7:
192 val = mfspr(SPRN_PMC7);
193 break;
194 case 8:
195 val = mfspr(SPRN_PMC8);
196 break;
197 #endif /* CONFIG_PPC64 */
198 default:
199 printk(KERN_ERR "oops trying to read PMC%d\n", idx);
200 val = 0;
202 return val;
206 * Write one PMC.
208 static void write_pmc(int idx, unsigned long val)
210 switch (idx) {
211 case 1:
212 mtspr(SPRN_PMC1, val);
213 break;
214 case 2:
215 mtspr(SPRN_PMC2, val);
216 break;
217 case 3:
218 mtspr(SPRN_PMC3, val);
219 break;
220 case 4:
221 mtspr(SPRN_PMC4, val);
222 break;
223 case 5:
224 mtspr(SPRN_PMC5, val);
225 break;
226 case 6:
227 mtspr(SPRN_PMC6, val);
228 break;
229 #ifdef CONFIG_PPC64
230 case 7:
231 mtspr(SPRN_PMC7, val);
232 break;
233 case 8:
234 mtspr(SPRN_PMC8, val);
235 break;
236 #endif /* CONFIG_PPC64 */
237 default:
238 printk(KERN_ERR "oops trying to write PMC%d\n", idx);
243 * Check if a set of events can all go on the PMU at once.
244 * If they can't, this will look at alternative codes for the events
245 * and see if any combination of alternative codes is feasible.
246 * The feasible set is returned in event[].
248 static int power_check_constraints(u64 event[], unsigned int cflags[],
249 int n_ev)
251 unsigned long mask, value, nv;
252 u64 alternatives[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
253 unsigned long amasks[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
254 unsigned long avalues[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
255 unsigned long smasks[MAX_HWCOUNTERS], svalues[MAX_HWCOUNTERS];
256 int n_alt[MAX_HWCOUNTERS], choice[MAX_HWCOUNTERS];
257 int i, j;
258 unsigned long addf = ppmu->add_fields;
259 unsigned long tadd = ppmu->test_adder;
261 if (n_ev > ppmu->n_counter)
262 return -1;
264 /* First see if the events will go on as-is */
265 for (i = 0; i < n_ev; ++i) {
266 if ((cflags[i] & PPMU_LIMITED_PMC_REQD)
267 && !ppmu->limited_pmc_event(event[i])) {
268 ppmu->get_alternatives(event[i], cflags[i],
269 alternatives[i]);
270 event[i] = alternatives[i][0];
272 if (ppmu->get_constraint(event[i], &amasks[i][0],
273 &avalues[i][0]))
274 return -1;
276 value = mask = 0;
277 for (i = 0; i < n_ev; ++i) {
278 nv = (value | avalues[i][0]) + (value & avalues[i][0] & addf);
279 if ((((nv + tadd) ^ value) & mask) != 0 ||
280 (((nv + tadd) ^ avalues[i][0]) & amasks[i][0]) != 0)
281 break;
282 value = nv;
283 mask |= amasks[i][0];
285 if (i == n_ev)
286 return 0; /* all OK */
288 /* doesn't work, gather alternatives... */
289 if (!ppmu->get_alternatives)
290 return -1;
291 for (i = 0; i < n_ev; ++i) {
292 choice[i] = 0;
293 n_alt[i] = ppmu->get_alternatives(event[i], cflags[i],
294 alternatives[i]);
295 for (j = 1; j < n_alt[i]; ++j)
296 ppmu->get_constraint(alternatives[i][j],
297 &amasks[i][j], &avalues[i][j]);
300 /* enumerate all possibilities and see if any will work */
301 i = 0;
302 j = -1;
303 value = mask = nv = 0;
304 while (i < n_ev) {
305 if (j >= 0) {
306 /* we're backtracking, restore context */
307 value = svalues[i];
308 mask = smasks[i];
309 j = choice[i];
312 * See if any alternative k for event i,
313 * where k > j, will satisfy the constraints.
315 while (++j < n_alt[i]) {
316 nv = (value | avalues[i][j]) +
317 (value & avalues[i][j] & addf);
318 if ((((nv + tadd) ^ value) & mask) == 0 &&
319 (((nv + tadd) ^ avalues[i][j])
320 & amasks[i][j]) == 0)
321 break;
323 if (j >= n_alt[i]) {
325 * No feasible alternative, backtrack
326 * to event i-1 and continue enumerating its
327 * alternatives from where we got up to.
329 if (--i < 0)
330 return -1;
331 } else {
333 * Found a feasible alternative for event i,
334 * remember where we got up to with this event,
335 * go on to the next event, and start with
336 * the first alternative for it.
338 choice[i] = j;
339 svalues[i] = value;
340 smasks[i] = mask;
341 value = nv;
342 mask |= amasks[i][j];
343 ++i;
344 j = -1;
348 /* OK, we have a feasible combination, tell the caller the solution */
349 for (i = 0; i < n_ev; ++i)
350 event[i] = alternatives[i][choice[i]];
351 return 0;
355 * Check if newly-added counters have consistent settings for
356 * exclude_{user,kernel,hv} with each other and any previously
357 * added counters.
359 static int check_excludes(struct perf_counter **ctrs, unsigned int cflags[],
360 int n_prev, int n_new)
362 int eu = 0, ek = 0, eh = 0;
363 int i, n, first;
364 struct perf_counter *counter;
366 n = n_prev + n_new;
367 if (n <= 1)
368 return 0;
370 first = 1;
371 for (i = 0; i < n; ++i) {
372 if (cflags[i] & PPMU_LIMITED_PMC_OK) {
373 cflags[i] &= ~PPMU_LIMITED_PMC_REQD;
374 continue;
376 counter = ctrs[i];
377 if (first) {
378 eu = counter->attr.exclude_user;
379 ek = counter->attr.exclude_kernel;
380 eh = counter->attr.exclude_hv;
381 first = 0;
382 } else if (counter->attr.exclude_user != eu ||
383 counter->attr.exclude_kernel != ek ||
384 counter->attr.exclude_hv != eh) {
385 return -EAGAIN;
389 if (eu || ek || eh)
390 for (i = 0; i < n; ++i)
391 if (cflags[i] & PPMU_LIMITED_PMC_OK)
392 cflags[i] |= PPMU_LIMITED_PMC_REQD;
394 return 0;
397 static void power_pmu_read(struct perf_counter *counter)
399 s64 val, delta, prev;
401 if (!counter->hw.idx)
402 return;
404 * Performance monitor interrupts come even when interrupts
405 * are soft-disabled, as long as interrupts are hard-enabled.
406 * Therefore we treat them like NMIs.
408 do {
409 prev = atomic64_read(&counter->hw.prev_count);
410 barrier();
411 val = read_pmc(counter->hw.idx);
412 } while (atomic64_cmpxchg(&counter->hw.prev_count, prev, val) != prev);
414 /* The counters are only 32 bits wide */
415 delta = (val - prev) & 0xfffffffful;
416 atomic64_add(delta, &counter->count);
417 atomic64_sub(delta, &counter->hw.period_left);
421 * On some machines, PMC5 and PMC6 can't be written, don't respect
422 * the freeze conditions, and don't generate interrupts. This tells
423 * us if `counter' is using such a PMC.
425 static int is_limited_pmc(int pmcnum)
427 return (ppmu->flags & PPMU_LIMITED_PMC5_6)
428 && (pmcnum == 5 || pmcnum == 6);
431 static void freeze_limited_counters(struct cpu_hw_counters *cpuhw,
432 unsigned long pmc5, unsigned long pmc6)
434 struct perf_counter *counter;
435 u64 val, prev, delta;
436 int i;
438 for (i = 0; i < cpuhw->n_limited; ++i) {
439 counter = cpuhw->limited_counter[i];
440 if (!counter->hw.idx)
441 continue;
442 val = (counter->hw.idx == 5) ? pmc5 : pmc6;
443 prev = atomic64_read(&counter->hw.prev_count);
444 counter->hw.idx = 0;
445 delta = (val - prev) & 0xfffffffful;
446 atomic64_add(delta, &counter->count);
450 static void thaw_limited_counters(struct cpu_hw_counters *cpuhw,
451 unsigned long pmc5, unsigned long pmc6)
453 struct perf_counter *counter;
454 u64 val;
455 int i;
457 for (i = 0; i < cpuhw->n_limited; ++i) {
458 counter = cpuhw->limited_counter[i];
459 counter->hw.idx = cpuhw->limited_hwidx[i];
460 val = (counter->hw.idx == 5) ? pmc5 : pmc6;
461 atomic64_set(&counter->hw.prev_count, val);
462 perf_counter_update_userpage(counter);
467 * Since limited counters don't respect the freeze conditions, we
468 * have to read them immediately after freezing or unfreezing the
469 * other counters. We try to keep the values from the limited
470 * counters as consistent as possible by keeping the delay (in
471 * cycles and instructions) between freezing/unfreezing and reading
472 * the limited counters as small and consistent as possible.
473 * Therefore, if any limited counters are in use, we read them
474 * both, and always in the same order, to minimize variability,
475 * and do it inside the same asm that writes MMCR0.
477 static void write_mmcr0(struct cpu_hw_counters *cpuhw, unsigned long mmcr0)
479 unsigned long pmc5, pmc6;
481 if (!cpuhw->n_limited) {
482 mtspr(SPRN_MMCR0, mmcr0);
483 return;
487 * Write MMCR0, then read PMC5 and PMC6 immediately.
488 * To ensure we don't get a performance monitor interrupt
489 * between writing MMCR0 and freezing/thawing the limited
490 * counters, we first write MMCR0 with the counter overflow
491 * interrupt enable bits turned off.
493 asm volatile("mtspr %3,%2; mfspr %0,%4; mfspr %1,%5"
494 : "=&r" (pmc5), "=&r" (pmc6)
495 : "r" (mmcr0 & ~(MMCR0_PMC1CE | MMCR0_PMCjCE)),
496 "i" (SPRN_MMCR0),
497 "i" (SPRN_PMC5), "i" (SPRN_PMC6));
499 if (mmcr0 & MMCR0_FC)
500 freeze_limited_counters(cpuhw, pmc5, pmc6);
501 else
502 thaw_limited_counters(cpuhw, pmc5, pmc6);
505 * Write the full MMCR0 including the counter overflow interrupt
506 * enable bits, if necessary.
508 if (mmcr0 & (MMCR0_PMC1CE | MMCR0_PMCjCE))
509 mtspr(SPRN_MMCR0, mmcr0);
513 * Disable all counters to prevent PMU interrupts and to allow
514 * counters to be added or removed.
516 void hw_perf_disable(void)
518 struct cpu_hw_counters *cpuhw;
519 unsigned long flags;
521 if (!ppmu)
522 return;
523 local_irq_save(flags);
524 cpuhw = &__get_cpu_var(cpu_hw_counters);
526 if (!cpuhw->disabled) {
527 cpuhw->disabled = 1;
528 cpuhw->n_added = 0;
531 * Check if we ever enabled the PMU on this cpu.
533 if (!cpuhw->pmcs_enabled) {
534 if (ppc_md.enable_pmcs)
535 ppc_md.enable_pmcs();
536 cpuhw->pmcs_enabled = 1;
540 * Disable instruction sampling if it was enabled
542 if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) {
543 mtspr(SPRN_MMCRA,
544 cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
545 mb();
549 * Set the 'freeze counters' bit.
550 * The barrier is to make sure the mtspr has been
551 * executed and the PMU has frozen the counters
552 * before we return.
554 write_mmcr0(cpuhw, mfspr(SPRN_MMCR0) | MMCR0_FC);
555 mb();
557 local_irq_restore(flags);
561 * Re-enable all counters if disable == 0.
562 * If we were previously disabled and counters were added, then
563 * put the new config on the PMU.
565 void hw_perf_enable(void)
567 struct perf_counter *counter;
568 struct cpu_hw_counters *cpuhw;
569 unsigned long flags;
570 long i;
571 unsigned long val;
572 s64 left;
573 unsigned int hwc_index[MAX_HWCOUNTERS];
574 int n_lim;
575 int idx;
577 if (!ppmu)
578 return;
579 local_irq_save(flags);
580 cpuhw = &__get_cpu_var(cpu_hw_counters);
581 if (!cpuhw->disabled) {
582 local_irq_restore(flags);
583 return;
585 cpuhw->disabled = 0;
588 * If we didn't change anything, or only removed counters,
589 * no need to recalculate MMCR* settings and reset the PMCs.
590 * Just reenable the PMU with the current MMCR* settings
591 * (possibly updated for removal of counters).
593 if (!cpuhw->n_added) {
594 mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
595 mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
596 if (cpuhw->n_counters == 0)
597 perf_set_pmu_inuse(0);
598 goto out_enable;
602 * Compute MMCR* values for the new set of counters
604 if (ppmu->compute_mmcr(cpuhw->events, cpuhw->n_counters, hwc_index,
605 cpuhw->mmcr)) {
606 /* shouldn't ever get here */
607 printk(KERN_ERR "oops compute_mmcr failed\n");
608 goto out;
612 * Add in MMCR0 freeze bits corresponding to the
613 * attr.exclude_* bits for the first counter.
614 * We have already checked that all counters have the
615 * same values for these bits as the first counter.
617 counter = cpuhw->counter[0];
618 if (counter->attr.exclude_user)
619 cpuhw->mmcr[0] |= MMCR0_FCP;
620 if (counter->attr.exclude_kernel)
621 cpuhw->mmcr[0] |= freeze_counters_kernel;
622 if (counter->attr.exclude_hv)
623 cpuhw->mmcr[0] |= MMCR0_FCHV;
626 * Write the new configuration to MMCR* with the freeze
627 * bit set and set the hardware counters to their initial values.
628 * Then unfreeze the counters.
630 perf_set_pmu_inuse(1);
631 mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
632 mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
633 mtspr(SPRN_MMCR0, (cpuhw->mmcr[0] & ~(MMCR0_PMC1CE | MMCR0_PMCjCE))
634 | MMCR0_FC);
637 * Read off any pre-existing counters that need to move
638 * to another PMC.
640 for (i = 0; i < cpuhw->n_counters; ++i) {
641 counter = cpuhw->counter[i];
642 if (counter->hw.idx && counter->hw.idx != hwc_index[i] + 1) {
643 power_pmu_read(counter);
644 write_pmc(counter->hw.idx, 0);
645 counter->hw.idx = 0;
650 * Initialize the PMCs for all the new and moved counters.
652 cpuhw->n_limited = n_lim = 0;
653 for (i = 0; i < cpuhw->n_counters; ++i) {
654 counter = cpuhw->counter[i];
655 if (counter->hw.idx)
656 continue;
657 idx = hwc_index[i] + 1;
658 if (is_limited_pmc(idx)) {
659 cpuhw->limited_counter[n_lim] = counter;
660 cpuhw->limited_hwidx[n_lim] = idx;
661 ++n_lim;
662 continue;
664 val = 0;
665 if (counter->hw.sample_period) {
666 left = atomic64_read(&counter->hw.period_left);
667 if (left < 0x80000000L)
668 val = 0x80000000L - left;
670 atomic64_set(&counter->hw.prev_count, val);
671 counter->hw.idx = idx;
672 write_pmc(idx, val);
673 perf_counter_update_userpage(counter);
675 cpuhw->n_limited = n_lim;
676 cpuhw->mmcr[0] |= MMCR0_PMXE | MMCR0_FCECE;
678 out_enable:
679 mb();
680 write_mmcr0(cpuhw, cpuhw->mmcr[0]);
683 * Enable instruction sampling if necessary
685 if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) {
686 mb();
687 mtspr(SPRN_MMCRA, cpuhw->mmcr[2]);
690 out:
691 local_irq_restore(flags);
694 static int collect_events(struct perf_counter *group, int max_count,
695 struct perf_counter *ctrs[], u64 *events,
696 unsigned int *flags)
698 int n = 0;
699 struct perf_counter *counter;
701 if (!is_software_counter(group)) {
702 if (n >= max_count)
703 return -1;
704 ctrs[n] = group;
705 flags[n] = group->hw.counter_base;
706 events[n++] = group->hw.config;
708 list_for_each_entry(counter, &group->sibling_list, list_entry) {
709 if (!is_software_counter(counter) &&
710 counter->state != PERF_COUNTER_STATE_OFF) {
711 if (n >= max_count)
712 return -1;
713 ctrs[n] = counter;
714 flags[n] = counter->hw.counter_base;
715 events[n++] = counter->hw.config;
718 return n;
721 static void counter_sched_in(struct perf_counter *counter, int cpu)
723 counter->state = PERF_COUNTER_STATE_ACTIVE;
724 counter->oncpu = cpu;
725 counter->tstamp_running += counter->ctx->time - counter->tstamp_stopped;
726 if (is_software_counter(counter))
727 counter->pmu->enable(counter);
731 * Called to enable a whole group of counters.
732 * Returns 1 if the group was enabled, or -EAGAIN if it could not be.
733 * Assumes the caller has disabled interrupts and has
734 * frozen the PMU with hw_perf_save_disable.
736 int hw_perf_group_sched_in(struct perf_counter *group_leader,
737 struct perf_cpu_context *cpuctx,
738 struct perf_counter_context *ctx, int cpu)
740 struct cpu_hw_counters *cpuhw;
741 long i, n, n0;
742 struct perf_counter *sub;
744 if (!ppmu)
745 return 0;
746 cpuhw = &__get_cpu_var(cpu_hw_counters);
747 n0 = cpuhw->n_counters;
748 n = collect_events(group_leader, ppmu->n_counter - n0,
749 &cpuhw->counter[n0], &cpuhw->events[n0],
750 &cpuhw->flags[n0]);
751 if (n < 0)
752 return -EAGAIN;
753 if (check_excludes(cpuhw->counter, cpuhw->flags, n0, n))
754 return -EAGAIN;
755 i = power_check_constraints(cpuhw->events, cpuhw->flags, n + n0);
756 if (i < 0)
757 return -EAGAIN;
758 cpuhw->n_counters = n0 + n;
759 cpuhw->n_added += n;
762 * OK, this group can go on; update counter states etc.,
763 * and enable any software counters
765 for (i = n0; i < n0 + n; ++i)
766 cpuhw->counter[i]->hw.config = cpuhw->events[i];
767 cpuctx->active_oncpu += n;
768 n = 1;
769 counter_sched_in(group_leader, cpu);
770 list_for_each_entry(sub, &group_leader->sibling_list, list_entry) {
771 if (sub->state != PERF_COUNTER_STATE_OFF) {
772 counter_sched_in(sub, cpu);
773 ++n;
776 ctx->nr_active += n;
778 return 1;
782 * Add a counter to the PMU.
783 * If all counters are not already frozen, then we disable and
784 * re-enable the PMU in order to get hw_perf_enable to do the
785 * actual work of reconfiguring the PMU.
787 static int power_pmu_enable(struct perf_counter *counter)
789 struct cpu_hw_counters *cpuhw;
790 unsigned long flags;
791 int n0;
792 int ret = -EAGAIN;
794 local_irq_save(flags);
795 perf_disable();
798 * Add the counter to the list (if there is room)
799 * and check whether the total set is still feasible.
801 cpuhw = &__get_cpu_var(cpu_hw_counters);
802 n0 = cpuhw->n_counters;
803 if (n0 >= ppmu->n_counter)
804 goto out;
805 cpuhw->counter[n0] = counter;
806 cpuhw->events[n0] = counter->hw.config;
807 cpuhw->flags[n0] = counter->hw.counter_base;
808 if (check_excludes(cpuhw->counter, cpuhw->flags, n0, 1))
809 goto out;
810 if (power_check_constraints(cpuhw->events, cpuhw->flags, n0 + 1))
811 goto out;
813 counter->hw.config = cpuhw->events[n0];
814 ++cpuhw->n_counters;
815 ++cpuhw->n_added;
817 ret = 0;
818 out:
819 perf_enable();
820 local_irq_restore(flags);
821 return ret;
825 * Remove a counter from the PMU.
827 static void power_pmu_disable(struct perf_counter *counter)
829 struct cpu_hw_counters *cpuhw;
830 long i;
831 unsigned long flags;
833 local_irq_save(flags);
834 perf_disable();
836 power_pmu_read(counter);
838 cpuhw = &__get_cpu_var(cpu_hw_counters);
839 for (i = 0; i < cpuhw->n_counters; ++i) {
840 if (counter == cpuhw->counter[i]) {
841 while (++i < cpuhw->n_counters)
842 cpuhw->counter[i-1] = cpuhw->counter[i];
843 --cpuhw->n_counters;
844 ppmu->disable_pmc(counter->hw.idx - 1, cpuhw->mmcr);
845 if (counter->hw.idx) {
846 write_pmc(counter->hw.idx, 0);
847 counter->hw.idx = 0;
849 perf_counter_update_userpage(counter);
850 break;
853 for (i = 0; i < cpuhw->n_limited; ++i)
854 if (counter == cpuhw->limited_counter[i])
855 break;
856 if (i < cpuhw->n_limited) {
857 while (++i < cpuhw->n_limited) {
858 cpuhw->limited_counter[i-1] = cpuhw->limited_counter[i];
859 cpuhw->limited_hwidx[i-1] = cpuhw->limited_hwidx[i];
861 --cpuhw->n_limited;
863 if (cpuhw->n_counters == 0) {
864 /* disable exceptions if no counters are running */
865 cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE);
868 perf_enable();
869 local_irq_restore(flags);
873 * Re-enable interrupts on a counter after they were throttled
874 * because they were coming too fast.
876 static void power_pmu_unthrottle(struct perf_counter *counter)
878 s64 val, left;
879 unsigned long flags;
881 if (!counter->hw.idx || !counter->hw.sample_period)
882 return;
883 local_irq_save(flags);
884 perf_disable();
885 power_pmu_read(counter);
886 left = counter->hw.sample_period;
887 counter->hw.last_period = left;
888 val = 0;
889 if (left < 0x80000000L)
890 val = 0x80000000L - left;
891 write_pmc(counter->hw.idx, val);
892 atomic64_set(&counter->hw.prev_count, val);
893 atomic64_set(&counter->hw.period_left, left);
894 perf_counter_update_userpage(counter);
895 perf_enable();
896 local_irq_restore(flags);
899 struct pmu power_pmu = {
900 .enable = power_pmu_enable,
901 .disable = power_pmu_disable,
902 .read = power_pmu_read,
903 .unthrottle = power_pmu_unthrottle,
907 * Return 1 if we might be able to put counter on a limited PMC,
908 * or 0 if not.
909 * A counter can only go on a limited PMC if it counts something
910 * that a limited PMC can count, doesn't require interrupts, and
911 * doesn't exclude any processor mode.
913 static int can_go_on_limited_pmc(struct perf_counter *counter, u64 ev,
914 unsigned int flags)
916 int n;
917 u64 alt[MAX_EVENT_ALTERNATIVES];
919 if (counter->attr.exclude_user
920 || counter->attr.exclude_kernel
921 || counter->attr.exclude_hv
922 || counter->attr.sample_period)
923 return 0;
925 if (ppmu->limited_pmc_event(ev))
926 return 1;
929 * The requested event isn't on a limited PMC already;
930 * see if any alternative code goes on a limited PMC.
932 if (!ppmu->get_alternatives)
933 return 0;
935 flags |= PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD;
936 n = ppmu->get_alternatives(ev, flags, alt);
938 return n > 0;
942 * Find an alternative event that goes on a normal PMC, if possible,
943 * and return the event code, or 0 if there is no such alternative.
944 * (Note: event code 0 is "don't count" on all machines.)
946 static u64 normal_pmc_alternative(u64 ev, unsigned long flags)
948 u64 alt[MAX_EVENT_ALTERNATIVES];
949 int n;
951 flags &= ~(PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD);
952 n = ppmu->get_alternatives(ev, flags, alt);
953 if (!n)
954 return 0;
955 return alt[0];
958 /* Number of perf_counters counting hardware events */
959 static atomic_t num_counters;
960 /* Used to avoid races in calling reserve/release_pmc_hardware */
961 static DEFINE_MUTEX(pmc_reserve_mutex);
964 * Release the PMU if this is the last perf_counter.
966 static void hw_perf_counter_destroy(struct perf_counter *counter)
968 if (!atomic_add_unless(&num_counters, -1, 1)) {
969 mutex_lock(&pmc_reserve_mutex);
970 if (atomic_dec_return(&num_counters) == 0)
971 release_pmc_hardware();
972 mutex_unlock(&pmc_reserve_mutex);
977 * Translate a generic cache event config to a raw event code.
979 static int hw_perf_cache_event(u64 config, u64 *eventp)
981 unsigned long type, op, result;
982 int ev;
984 if (!ppmu->cache_events)
985 return -EINVAL;
987 /* unpack config */
988 type = config & 0xff;
989 op = (config >> 8) & 0xff;
990 result = (config >> 16) & 0xff;
992 if (type >= PERF_COUNT_HW_CACHE_MAX ||
993 op >= PERF_COUNT_HW_CACHE_OP_MAX ||
994 result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
995 return -EINVAL;
997 ev = (*ppmu->cache_events)[type][op][result];
998 if (ev == 0)
999 return -EOPNOTSUPP;
1000 if (ev == -1)
1001 return -EINVAL;
1002 *eventp = ev;
1003 return 0;
1006 const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
1008 u64 ev;
1009 unsigned long flags;
1010 struct perf_counter *ctrs[MAX_HWCOUNTERS];
1011 u64 events[MAX_HWCOUNTERS];
1012 unsigned int cflags[MAX_HWCOUNTERS];
1013 int n;
1014 int err;
1016 if (!ppmu)
1017 return ERR_PTR(-ENXIO);
1018 switch (counter->attr.type) {
1019 case PERF_TYPE_HARDWARE:
1020 ev = counter->attr.config;
1021 if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
1022 return ERR_PTR(-EOPNOTSUPP);
1023 ev = ppmu->generic_events[ev];
1024 break;
1025 case PERF_TYPE_HW_CACHE:
1026 err = hw_perf_cache_event(counter->attr.config, &ev);
1027 if (err)
1028 return ERR_PTR(err);
1029 break;
1030 case PERF_TYPE_RAW:
1031 ev = counter->attr.config;
1032 break;
1033 default:
1034 return ERR_PTR(-EINVAL);
1036 counter->hw.config_base = ev;
1037 counter->hw.idx = 0;
1040 * If we are not running on a hypervisor, force the
1041 * exclude_hv bit to 0 so that we don't care what
1042 * the user set it to.
1044 if (!firmware_has_feature(FW_FEATURE_LPAR))
1045 counter->attr.exclude_hv = 0;
1048 * If this is a per-task counter, then we can use
1049 * PM_RUN_* events interchangeably with their non RUN_*
1050 * equivalents, e.g. PM_RUN_CYC instead of PM_CYC.
1051 * XXX we should check if the task is an idle task.
1053 flags = 0;
1054 if (counter->ctx->task)
1055 flags |= PPMU_ONLY_COUNT_RUN;
1058 * If this machine has limited counters, check whether this
1059 * event could go on a limited counter.
1061 if (ppmu->flags & PPMU_LIMITED_PMC5_6) {
1062 if (can_go_on_limited_pmc(counter, ev, flags)) {
1063 flags |= PPMU_LIMITED_PMC_OK;
1064 } else if (ppmu->limited_pmc_event(ev)) {
1066 * The requested event is on a limited PMC,
1067 * but we can't use a limited PMC; see if any
1068 * alternative goes on a normal PMC.
1070 ev = normal_pmc_alternative(ev, flags);
1071 if (!ev)
1072 return ERR_PTR(-EINVAL);
1077 * If this is in a group, check if it can go on with all the
1078 * other hardware counters in the group. We assume the counter
1079 * hasn't been linked into its leader's sibling list at this point.
1081 n = 0;
1082 if (counter->group_leader != counter) {
1083 n = collect_events(counter->group_leader, ppmu->n_counter - 1,
1084 ctrs, events, cflags);
1085 if (n < 0)
1086 return ERR_PTR(-EINVAL);
1088 events[n] = ev;
1089 ctrs[n] = counter;
1090 cflags[n] = flags;
1091 if (check_excludes(ctrs, cflags, n, 1))
1092 return ERR_PTR(-EINVAL);
1093 if (power_check_constraints(events, cflags, n + 1))
1094 return ERR_PTR(-EINVAL);
1096 counter->hw.config = events[n];
1097 counter->hw.counter_base = cflags[n];
1098 counter->hw.last_period = counter->hw.sample_period;
1099 atomic64_set(&counter->hw.period_left, counter->hw.last_period);
1102 * See if we need to reserve the PMU.
1103 * If no counters are currently in use, then we have to take a
1104 * mutex to ensure that we don't race with another task doing
1105 * reserve_pmc_hardware or release_pmc_hardware.
1107 err = 0;
1108 if (!atomic_inc_not_zero(&num_counters)) {
1109 mutex_lock(&pmc_reserve_mutex);
1110 if (atomic_read(&num_counters) == 0 &&
1111 reserve_pmc_hardware(perf_counter_interrupt))
1112 err = -EBUSY;
1113 else
1114 atomic_inc(&num_counters);
1115 mutex_unlock(&pmc_reserve_mutex);
1117 counter->destroy = hw_perf_counter_destroy;
1119 if (err)
1120 return ERR_PTR(err);
1121 return &power_pmu;
1125 * A counter has overflowed; update its count and record
1126 * things if requested. Note that interrupts are hard-disabled
1127 * here so there is no possibility of being interrupted.
1129 static void record_and_restart(struct perf_counter *counter, unsigned long val,
1130 struct pt_regs *regs, int nmi)
1132 u64 period = counter->hw.sample_period;
1133 s64 prev, delta, left;
1134 int record = 0;
1136 /* we don't have to worry about interrupts here */
1137 prev = atomic64_read(&counter->hw.prev_count);
1138 delta = (val - prev) & 0xfffffffful;
1139 atomic64_add(delta, &counter->count);
1142 * See if the total period for this counter has expired,
1143 * and update for the next period.
1145 val = 0;
1146 left = atomic64_read(&counter->hw.period_left) - delta;
1147 if (period) {
1148 if (left <= 0) {
1149 left += period;
1150 if (left <= 0)
1151 left = period;
1152 record = 1;
1154 if (left < 0x80000000LL)
1155 val = 0x80000000LL - left;
1159 * Finally record data if requested.
1161 if (record) {
1162 struct perf_sample_data data = {
1163 .regs = regs,
1164 .addr = 0,
1165 .period = counter->hw.last_period,
1168 if (counter->attr.sample_type & PERF_SAMPLE_ADDR)
1169 perf_get_data_addr(regs, &data.addr);
1171 if (perf_counter_overflow(counter, nmi, &data)) {
1173 * Interrupts are coming too fast - throttle them
1174 * by setting the counter to 0, so it will be
1175 * at least 2^30 cycles until the next interrupt
1176 * (assuming each counter counts at most 2 counts
1177 * per cycle).
1179 val = 0;
1180 left = ~0ULL >> 1;
1184 write_pmc(counter->hw.idx, val);
1185 atomic64_set(&counter->hw.prev_count, val);
1186 atomic64_set(&counter->hw.period_left, left);
1187 perf_counter_update_userpage(counter);
1191 * Called from generic code to get the misc flags (i.e. processor mode)
1192 * for an event.
1194 unsigned long perf_misc_flags(struct pt_regs *regs)
1196 u32 flags = perf_get_misc_flags(regs);
1198 if (flags)
1199 return flags;
1200 return user_mode(regs) ? PERF_EVENT_MISC_USER :
1201 PERF_EVENT_MISC_KERNEL;
1205 * Called from generic code to get the instruction pointer
1206 * for an event.
1208 unsigned long perf_instruction_pointer(struct pt_regs *regs)
1210 unsigned long ip;
1212 if (TRAP(regs) != 0xf00)
1213 return regs->nip; /* not a PMU interrupt */
1215 ip = mfspr(SPRN_SIAR) + perf_ip_adjust(regs);
1216 return ip;
1220 * Performance monitor interrupt stuff
1222 static void perf_counter_interrupt(struct pt_regs *regs)
1224 int i;
1225 struct cpu_hw_counters *cpuhw = &__get_cpu_var(cpu_hw_counters);
1226 struct perf_counter *counter;
1227 unsigned long val;
1228 int found = 0;
1229 int nmi;
1231 if (cpuhw->n_limited)
1232 freeze_limited_counters(cpuhw, mfspr(SPRN_PMC5),
1233 mfspr(SPRN_PMC6));
1235 perf_read_regs(regs);
1237 nmi = perf_intr_is_nmi(regs);
1238 if (nmi)
1239 nmi_enter();
1240 else
1241 irq_enter();
1243 for (i = 0; i < cpuhw->n_counters; ++i) {
1244 counter = cpuhw->counter[i];
1245 if (!counter->hw.idx || is_limited_pmc(counter->hw.idx))
1246 continue;
1247 val = read_pmc(counter->hw.idx);
1248 if ((int)val < 0) {
1249 /* counter has overflowed */
1250 found = 1;
1251 record_and_restart(counter, val, regs, nmi);
1256 * In case we didn't find and reset the counter that caused
1257 * the interrupt, scan all counters and reset any that are
1258 * negative, to avoid getting continual interrupts.
1259 * Any that we processed in the previous loop will not be negative.
1261 if (!found) {
1262 for (i = 0; i < ppmu->n_counter; ++i) {
1263 if (is_limited_pmc(i + 1))
1264 continue;
1265 val = read_pmc(i + 1);
1266 if ((int)val < 0)
1267 write_pmc(i + 1, 0);
1272 * Reset MMCR0 to its normal value. This will set PMXE and
1273 * clear FC (freeze counters) and PMAO (perf mon alert occurred)
1274 * and thus allow interrupts to occur again.
1275 * XXX might want to use MSR.PM to keep the counters frozen until
1276 * we get back out of this interrupt.
1278 write_mmcr0(cpuhw, cpuhw->mmcr[0]);
1280 if (nmi)
1281 nmi_exit();
1282 else
1283 irq_exit();
1286 void hw_perf_counter_setup(int cpu)
1288 struct cpu_hw_counters *cpuhw = &per_cpu(cpu_hw_counters, cpu);
1290 if (!ppmu)
1291 return;
1292 memset(cpuhw, 0, sizeof(*cpuhw));
1293 cpuhw->mmcr[0] = MMCR0_FC;
1296 int register_power_pmu(struct power_pmu *pmu)
1298 if (ppmu)
1299 return -EBUSY; /* something's already registered */
1301 ppmu = pmu;
1302 pr_info("%s performance monitor hardware support registered\n",
1303 pmu->name);
1305 #ifdef MSR_HV
1307 * Use FCHV to ignore kernel events if MSR.HV is set.
1309 if (mfmsr() & MSR_HV)
1310 freeze_counters_kernel = MMCR0_FCHV;
1311 #endif /* CONFIG_PPC64 */
1313 return 0;