1 /* Performance event support for sparc64.
3 * Copyright (C) 2009, 2010 David S. Miller <davem@davemloft.net>
5 * This code is based almost entirely upon the x86 perf event
8 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
9 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
10 * Copyright (C) 2009 Jaswinder Singh Rajput
11 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
12 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
15 #include <linux/perf_event.h>
16 #include <linux/kprobes.h>
17 #include <linux/kernel.h>
18 #include <linux/kdebug.h>
19 #include <linux/mutex.h>
21 #include <asm/stacktrace.h>
22 #include <asm/cpudata.h>
23 #include <asm/uaccess.h>
24 #include <asm/atomic.h>
30 /* Sparc64 chips have two performance counters, 32-bits each, with
31 * overflow interrupts generated on transition from 0xffffffff to 0.
32 * The counters are accessed in one go using a 64-bit register.
34 * Both counters are controlled using a single control register. The
35 * only way to stop all sampling is to clear all of the context (user,
36 * supervisor, hypervisor) sampling enable bits. But these bits apply
37 * to both counters, thus the two counters can't be enabled/disabled
40 * The control register has two event fields, one for each of the two
41 * counters. It's thus nearly impossible to have one counter going
42 * while keeping the other one stopped. Therefore it is possible to
43 * get overflow interrupts for counters not currently "in use" and
44 * that condition must be checked in the overflow interrupt handler.
46 * So we use a hack, in that we program inactive counters with the
47 * "sw_count0" and "sw_count1" events. These count how many times
48 * the instruction "sethi %hi(0xfc000), %g0" is executed. It's an
49 * unusual way to encode a NOP and therefore will not trigger in
53 #define MAX_HWEVENTS 2
54 #define MAX_PERIOD ((1UL << 32) - 1)
56 #define PIC_UPPER_INDEX 0
57 #define PIC_LOWER_INDEX 1
58 #define PIC_NO_INDEX -1
60 struct cpu_hw_events
{
61 /* Number of events currently scheduled onto this cpu.
62 * This tells how many entries in the arrays below
67 /* Number of new events added since the last hw_perf_disable().
68 * This works because the perf event layer always adds new
69 * events inside of a perf_{disable,enable}() sequence.
73 /* Array of events current scheduled on this cpu. */
74 struct perf_event
*event
[MAX_HWEVENTS
];
76 /* Array of encoded longs, specifying the %pcr register
77 * encoding and the mask of PIC counters this even can
78 * be scheduled on. See perf_event_encode() et al.
80 unsigned long events
[MAX_HWEVENTS
];
82 /* The current counter index assigned to an event. When the
83 * event hasn't been programmed into the cpu yet, this will
84 * hold PIC_NO_INDEX. The event->hw.idx value tells us where
85 * we ought to schedule the event.
87 int current_idx
[MAX_HWEVENTS
];
89 /* Software copy of %pcr register on this cpu. */
92 /* Enabled/disable state. */
95 unsigned int group_flag
;
97 DEFINE_PER_CPU(struct cpu_hw_events
, cpu_hw_events
) = { .enabled
= 1, };
99 /* An event map describes the characteristics of a performance
100 * counter event. In particular it gives the encoding as well as
101 * a mask telling which counters the event can be measured on.
103 struct perf_event_map
{
106 #define PIC_NONE 0x00
107 #define PIC_UPPER 0x01
108 #define PIC_LOWER 0x02
111 /* Encode a perf_event_map entry into a long. */
112 static unsigned long perf_event_encode(const struct perf_event_map
*pmap
)
114 return ((unsigned long) pmap
->encoding
<< 16) | pmap
->pic_mask
;
117 static u8
perf_event_get_msk(unsigned long val
)
122 static u64
perf_event_get_enc(unsigned long val
)
127 #define C(x) PERF_COUNT_HW_CACHE_##x
129 #define CACHE_OP_UNSUPPORTED 0xfffe
130 #define CACHE_OP_NONSENSE 0xffff
132 typedef struct perf_event_map cache_map_t
133 [PERF_COUNT_HW_CACHE_MAX
]
134 [PERF_COUNT_HW_CACHE_OP_MAX
]
135 [PERF_COUNT_HW_CACHE_RESULT_MAX
];
138 const struct perf_event_map
*(*event_map
)(int);
139 const cache_map_t
*cache_map
;
150 static const struct perf_event_map ultra3_perfmon_event_map
[] = {
151 [PERF_COUNT_HW_CPU_CYCLES
] = { 0x0000, PIC_UPPER
| PIC_LOWER
},
152 [PERF_COUNT_HW_INSTRUCTIONS
] = { 0x0001, PIC_UPPER
| PIC_LOWER
},
153 [PERF_COUNT_HW_CACHE_REFERENCES
] = { 0x0009, PIC_LOWER
},
154 [PERF_COUNT_HW_CACHE_MISSES
] = { 0x0009, PIC_UPPER
},
157 static const struct perf_event_map
*ultra3_event_map(int event_id
)
159 return &ultra3_perfmon_event_map
[event_id
];
162 static const cache_map_t ultra3_cache_map
= {
165 [C(RESULT_ACCESS
)] = { 0x09, PIC_LOWER
, },
166 [C(RESULT_MISS
)] = { 0x09, PIC_UPPER
, },
169 [C(RESULT_ACCESS
)] = { 0x0a, PIC_LOWER
},
170 [C(RESULT_MISS
)] = { 0x0a, PIC_UPPER
},
173 [C(RESULT_ACCESS
)] = { CACHE_OP_UNSUPPORTED
},
174 [C(RESULT_MISS
)] = { CACHE_OP_UNSUPPORTED
},
179 [C(RESULT_ACCESS
)] = { 0x09, PIC_LOWER
, },
180 [C(RESULT_MISS
)] = { 0x09, PIC_UPPER
, },
183 [ C(RESULT_ACCESS
) ] = { CACHE_OP_NONSENSE
},
184 [ C(RESULT_MISS
) ] = { CACHE_OP_NONSENSE
},
186 [ C(OP_PREFETCH
) ] = {
187 [ C(RESULT_ACCESS
) ] = { CACHE_OP_UNSUPPORTED
},
188 [ C(RESULT_MISS
) ] = { CACHE_OP_UNSUPPORTED
},
193 [C(RESULT_ACCESS
)] = { 0x0c, PIC_LOWER
, },
194 [C(RESULT_MISS
)] = { 0x0c, PIC_UPPER
, },
197 [C(RESULT_ACCESS
)] = { 0x0c, PIC_LOWER
},
198 [C(RESULT_MISS
)] = { 0x0c, PIC_UPPER
},
201 [C(RESULT_ACCESS
)] = { CACHE_OP_UNSUPPORTED
},
202 [C(RESULT_MISS
)] = { CACHE_OP_UNSUPPORTED
},
207 [C(RESULT_ACCESS
)] = { CACHE_OP_UNSUPPORTED
},
208 [C(RESULT_MISS
)] = { 0x12, PIC_UPPER
, },
211 [ C(RESULT_ACCESS
) ] = { CACHE_OP_UNSUPPORTED
},
212 [ C(RESULT_MISS
) ] = { CACHE_OP_UNSUPPORTED
},
214 [ C(OP_PREFETCH
) ] = {
215 [ C(RESULT_ACCESS
) ] = { CACHE_OP_UNSUPPORTED
},
216 [ C(RESULT_MISS
) ] = { CACHE_OP_UNSUPPORTED
},
221 [C(RESULT_ACCESS
)] = { CACHE_OP_UNSUPPORTED
},
222 [C(RESULT_MISS
)] = { 0x11, PIC_UPPER
, },
225 [ C(RESULT_ACCESS
) ] = { CACHE_OP_UNSUPPORTED
},
226 [ C(RESULT_MISS
) ] = { CACHE_OP_UNSUPPORTED
},
228 [ C(OP_PREFETCH
) ] = {
229 [ C(RESULT_ACCESS
) ] = { CACHE_OP_UNSUPPORTED
},
230 [ C(RESULT_MISS
) ] = { CACHE_OP_UNSUPPORTED
},
235 [C(RESULT_ACCESS
)] = { CACHE_OP_UNSUPPORTED
},
236 [C(RESULT_MISS
)] = { CACHE_OP_UNSUPPORTED
},
239 [ C(RESULT_ACCESS
) ] = { CACHE_OP_UNSUPPORTED
},
240 [ C(RESULT_MISS
) ] = { CACHE_OP_UNSUPPORTED
},
242 [ C(OP_PREFETCH
) ] = {
243 [ C(RESULT_ACCESS
) ] = { CACHE_OP_UNSUPPORTED
},
244 [ C(RESULT_MISS
) ] = { CACHE_OP_UNSUPPORTED
},
249 static const struct sparc_pmu ultra3_pmu
= {
250 .event_map
= ultra3_event_map
,
251 .cache_map
= &ultra3_cache_map
,
252 .max_events
= ARRAY_SIZE(ultra3_perfmon_event_map
),
260 /* Niagara1 is very limited. The upper PIC is hard-locked to count
261 * only instructions, so it is free running which creates all kinds of
262 * problems. Some hardware designs make one wonder if the creator
263 * even looked at how this stuff gets used by software.
265 static const struct perf_event_map niagara1_perfmon_event_map
[] = {
266 [PERF_COUNT_HW_CPU_CYCLES
] = { 0x00, PIC_UPPER
},
267 [PERF_COUNT_HW_INSTRUCTIONS
] = { 0x00, PIC_UPPER
},
268 [PERF_COUNT_HW_CACHE_REFERENCES
] = { 0, PIC_NONE
},
269 [PERF_COUNT_HW_CACHE_MISSES
] = { 0x03, PIC_LOWER
},
272 static const struct perf_event_map
*niagara1_event_map(int event_id
)
274 return &niagara1_perfmon_event_map
[event_id
];
277 static const cache_map_t niagara1_cache_map
= {
280 [C(RESULT_ACCESS
)] = { CACHE_OP_UNSUPPORTED
},
281 [C(RESULT_MISS
)] = { 0x03, PIC_LOWER
, },
284 [C(RESULT_ACCESS
)] = { CACHE_OP_UNSUPPORTED
},
285 [C(RESULT_MISS
)] = { 0x03, PIC_LOWER
, },
288 [C(RESULT_ACCESS
)] = { CACHE_OP_UNSUPPORTED
},
289 [C(RESULT_MISS
)] = { CACHE_OP_UNSUPPORTED
},
294 [C(RESULT_ACCESS
)] = { 0x00, PIC_UPPER
},
295 [C(RESULT_MISS
)] = { 0x02, PIC_LOWER
, },
298 [ C(RESULT_ACCESS
) ] = { CACHE_OP_NONSENSE
},
299 [ C(RESULT_MISS
) ] = { CACHE_OP_NONSENSE
},
301 [ C(OP_PREFETCH
) ] = {
302 [ C(RESULT_ACCESS
) ] = { CACHE_OP_UNSUPPORTED
},
303 [ C(RESULT_MISS
) ] = { CACHE_OP_UNSUPPORTED
},
308 [C(RESULT_ACCESS
)] = { CACHE_OP_UNSUPPORTED
},
309 [C(RESULT_MISS
)] = { 0x07, PIC_LOWER
, },
312 [C(RESULT_ACCESS
)] = { CACHE_OP_UNSUPPORTED
},
313 [C(RESULT_MISS
)] = { 0x07, PIC_LOWER
, },
316 [C(RESULT_ACCESS
)] = { CACHE_OP_UNSUPPORTED
},
317 [C(RESULT_MISS
)] = { CACHE_OP_UNSUPPORTED
},
322 [C(RESULT_ACCESS
)] = { CACHE_OP_UNSUPPORTED
},
323 [C(RESULT_MISS
)] = { 0x05, PIC_LOWER
, },
326 [ C(RESULT_ACCESS
) ] = { CACHE_OP_UNSUPPORTED
},
327 [ C(RESULT_MISS
) ] = { CACHE_OP_UNSUPPORTED
},
329 [ C(OP_PREFETCH
) ] = {
330 [ C(RESULT_ACCESS
) ] = { CACHE_OP_UNSUPPORTED
},
331 [ C(RESULT_MISS
) ] = { CACHE_OP_UNSUPPORTED
},
336 [C(RESULT_ACCESS
)] = { CACHE_OP_UNSUPPORTED
},
337 [C(RESULT_MISS
)] = { 0x04, PIC_LOWER
, },
340 [ C(RESULT_ACCESS
) ] = { CACHE_OP_UNSUPPORTED
},
341 [ C(RESULT_MISS
) ] = { CACHE_OP_UNSUPPORTED
},
343 [ C(OP_PREFETCH
) ] = {
344 [ C(RESULT_ACCESS
) ] = { CACHE_OP_UNSUPPORTED
},
345 [ C(RESULT_MISS
) ] = { CACHE_OP_UNSUPPORTED
},
350 [C(RESULT_ACCESS
)] = { CACHE_OP_UNSUPPORTED
},
351 [C(RESULT_MISS
)] = { CACHE_OP_UNSUPPORTED
},
354 [ C(RESULT_ACCESS
) ] = { CACHE_OP_UNSUPPORTED
},
355 [ C(RESULT_MISS
) ] = { CACHE_OP_UNSUPPORTED
},
357 [ C(OP_PREFETCH
) ] = {
358 [ C(RESULT_ACCESS
) ] = { CACHE_OP_UNSUPPORTED
},
359 [ C(RESULT_MISS
) ] = { CACHE_OP_UNSUPPORTED
},
364 static const struct sparc_pmu niagara1_pmu
= {
365 .event_map
= niagara1_event_map
,
366 .cache_map
= &niagara1_cache_map
,
367 .max_events
= ARRAY_SIZE(niagara1_perfmon_event_map
),
375 static const struct perf_event_map niagara2_perfmon_event_map
[] = {
376 [PERF_COUNT_HW_CPU_CYCLES
] = { 0x02ff, PIC_UPPER
| PIC_LOWER
},
377 [PERF_COUNT_HW_INSTRUCTIONS
] = { 0x02ff, PIC_UPPER
| PIC_LOWER
},
378 [PERF_COUNT_HW_CACHE_REFERENCES
] = { 0x0208, PIC_UPPER
| PIC_LOWER
},
379 [PERF_COUNT_HW_CACHE_MISSES
] = { 0x0302, PIC_UPPER
| PIC_LOWER
},
380 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = { 0x0201, PIC_UPPER
| PIC_LOWER
},
381 [PERF_COUNT_HW_BRANCH_MISSES
] = { 0x0202, PIC_UPPER
| PIC_LOWER
},
384 static const struct perf_event_map
*niagara2_event_map(int event_id
)
386 return &niagara2_perfmon_event_map
[event_id
];
389 static const cache_map_t niagara2_cache_map
= {
392 [C(RESULT_ACCESS
)] = { 0x0208, PIC_UPPER
| PIC_LOWER
, },
393 [C(RESULT_MISS
)] = { 0x0302, PIC_UPPER
| PIC_LOWER
, },
396 [C(RESULT_ACCESS
)] = { 0x0210, PIC_UPPER
| PIC_LOWER
, },
397 [C(RESULT_MISS
)] = { 0x0302, PIC_UPPER
| PIC_LOWER
, },
400 [C(RESULT_ACCESS
)] = { CACHE_OP_UNSUPPORTED
},
401 [C(RESULT_MISS
)] = { CACHE_OP_UNSUPPORTED
},
406 [C(RESULT_ACCESS
)] = { 0x02ff, PIC_UPPER
| PIC_LOWER
, },
407 [C(RESULT_MISS
)] = { 0x0301, PIC_UPPER
| PIC_LOWER
, },
410 [ C(RESULT_ACCESS
) ] = { CACHE_OP_NONSENSE
},
411 [ C(RESULT_MISS
) ] = { CACHE_OP_NONSENSE
},
413 [ C(OP_PREFETCH
) ] = {
414 [ C(RESULT_ACCESS
) ] = { CACHE_OP_UNSUPPORTED
},
415 [ C(RESULT_MISS
) ] = { CACHE_OP_UNSUPPORTED
},
420 [C(RESULT_ACCESS
)] = { 0x0208, PIC_UPPER
| PIC_LOWER
, },
421 [C(RESULT_MISS
)] = { 0x0330, PIC_UPPER
| PIC_LOWER
, },
424 [C(RESULT_ACCESS
)] = { 0x0210, PIC_UPPER
| PIC_LOWER
, },
425 [C(RESULT_MISS
)] = { 0x0320, PIC_UPPER
| PIC_LOWER
, },
428 [C(RESULT_ACCESS
)] = { CACHE_OP_UNSUPPORTED
},
429 [C(RESULT_MISS
)] = { CACHE_OP_UNSUPPORTED
},
434 [C(RESULT_ACCESS
)] = { CACHE_OP_UNSUPPORTED
},
435 [C(RESULT_MISS
)] = { 0x0b08, PIC_UPPER
| PIC_LOWER
, },
438 [ C(RESULT_ACCESS
) ] = { CACHE_OP_UNSUPPORTED
},
439 [ C(RESULT_MISS
) ] = { CACHE_OP_UNSUPPORTED
},
441 [ C(OP_PREFETCH
) ] = {
442 [ C(RESULT_ACCESS
) ] = { CACHE_OP_UNSUPPORTED
},
443 [ C(RESULT_MISS
) ] = { CACHE_OP_UNSUPPORTED
},
448 [C(RESULT_ACCESS
)] = { CACHE_OP_UNSUPPORTED
},
449 [C(RESULT_MISS
)] = { 0xb04, PIC_UPPER
| PIC_LOWER
, },
452 [ C(RESULT_ACCESS
) ] = { CACHE_OP_UNSUPPORTED
},
453 [ C(RESULT_MISS
) ] = { CACHE_OP_UNSUPPORTED
},
455 [ C(OP_PREFETCH
) ] = {
456 [ C(RESULT_ACCESS
) ] = { CACHE_OP_UNSUPPORTED
},
457 [ C(RESULT_MISS
) ] = { CACHE_OP_UNSUPPORTED
},
462 [C(RESULT_ACCESS
)] = { CACHE_OP_UNSUPPORTED
},
463 [C(RESULT_MISS
)] = { CACHE_OP_UNSUPPORTED
},
466 [ C(RESULT_ACCESS
) ] = { CACHE_OP_UNSUPPORTED
},
467 [ C(RESULT_MISS
) ] = { CACHE_OP_UNSUPPORTED
},
469 [ C(OP_PREFETCH
) ] = {
470 [ C(RESULT_ACCESS
) ] = { CACHE_OP_UNSUPPORTED
},
471 [ C(RESULT_MISS
) ] = { CACHE_OP_UNSUPPORTED
},
476 static const struct sparc_pmu niagara2_pmu
= {
477 .event_map
= niagara2_event_map
,
478 .cache_map
= &niagara2_cache_map
,
479 .max_events
= ARRAY_SIZE(niagara2_perfmon_event_map
),
489 static const struct sparc_pmu
*sparc_pmu __read_mostly
;
491 static u64
event_encoding(u64 event_id
, int idx
)
493 if (idx
== PIC_UPPER_INDEX
)
494 event_id
<<= sparc_pmu
->upper_shift
;
496 event_id
<<= sparc_pmu
->lower_shift
;
500 static u64
mask_for_index(int idx
)
502 return event_encoding(sparc_pmu
->event_mask
, idx
);
505 static u64
nop_for_index(int idx
)
507 return event_encoding(idx
== PIC_UPPER_INDEX
?
508 sparc_pmu
->upper_nop
:
509 sparc_pmu
->lower_nop
, idx
);
512 static inline void sparc_pmu_enable_event(struct cpu_hw_events
*cpuc
, struct hw_perf_event
*hwc
, int idx
)
514 u64 val
, mask
= mask_for_index(idx
);
521 pcr_ops
->write(cpuc
->pcr
);
524 static inline void sparc_pmu_disable_event(struct cpu_hw_events
*cpuc
, struct hw_perf_event
*hwc
, int idx
)
526 u64 mask
= mask_for_index(idx
);
527 u64 nop
= nop_for_index(idx
);
535 pcr_ops
->write(cpuc
->pcr
);
538 static u32
read_pmc(int idx
)
543 if (idx
== PIC_UPPER_INDEX
)
546 return val
& 0xffffffff;
549 static void write_pmc(int idx
, u64 val
)
551 u64 shift
, mask
, pic
;
554 if (idx
== PIC_UPPER_INDEX
)
557 mask
= ((u64
) 0xffffffff) << shift
;
566 static u64
sparc_perf_event_update(struct perf_event
*event
,
567 struct hw_perf_event
*hwc
, int idx
)
570 u64 prev_raw_count
, new_raw_count
;
574 prev_raw_count
= atomic64_read(&hwc
->prev_count
);
575 new_raw_count
= read_pmc(idx
);
577 if (atomic64_cmpxchg(&hwc
->prev_count
, prev_raw_count
,
578 new_raw_count
) != prev_raw_count
)
581 delta
= (new_raw_count
<< shift
) - (prev_raw_count
<< shift
);
584 atomic64_add(delta
, &event
->count
);
585 atomic64_sub(delta
, &hwc
->period_left
);
587 return new_raw_count
;
590 static int sparc_perf_event_set_period(struct perf_event
*event
,
591 struct hw_perf_event
*hwc
, int idx
)
593 s64 left
= atomic64_read(&hwc
->period_left
);
594 s64 period
= hwc
->sample_period
;
597 if (unlikely(left
<= -period
)) {
599 atomic64_set(&hwc
->period_left
, left
);
600 hwc
->last_period
= period
;
604 if (unlikely(left
<= 0)) {
606 atomic64_set(&hwc
->period_left
, left
);
607 hwc
->last_period
= period
;
610 if (left
> MAX_PERIOD
)
613 atomic64_set(&hwc
->prev_count
, (u64
)-left
);
615 write_pmc(idx
, (u64
)(-left
) & 0xffffffff);
617 perf_event_update_userpage(event
);
622 /* If performance event entries have been added, move existing
623 * events around (if necessary) and then assign new entries to
626 static u64
maybe_change_configuration(struct cpu_hw_events
*cpuc
, u64 pcr
)
633 /* Read in the counters which are moving. */
634 for (i
= 0; i
< cpuc
->n_events
; i
++) {
635 struct perf_event
*cp
= cpuc
->event
[i
];
637 if (cpuc
->current_idx
[i
] != PIC_NO_INDEX
&&
638 cpuc
->current_idx
[i
] != cp
->hw
.idx
) {
639 sparc_perf_event_update(cp
, &cp
->hw
,
640 cpuc
->current_idx
[i
]);
641 cpuc
->current_idx
[i
] = PIC_NO_INDEX
;
645 /* Assign to counters all unassigned events. */
646 for (i
= 0; i
< cpuc
->n_events
; i
++) {
647 struct perf_event
*cp
= cpuc
->event
[i
];
648 struct hw_perf_event
*hwc
= &cp
->hw
;
652 if (cpuc
->current_idx
[i
] != PIC_NO_INDEX
)
655 sparc_perf_event_set_period(cp
, hwc
, idx
);
656 cpuc
->current_idx
[i
] = idx
;
658 enc
= perf_event_get_enc(cpuc
->events
[i
]);
659 pcr
|= event_encoding(enc
, idx
);
665 void hw_perf_enable(void)
667 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
677 if (!cpuc
->n_events
) {
680 pcr
= maybe_change_configuration(cpuc
, pcr
);
682 /* We require that all of the events have the same
683 * configuration, so just fetch the settings from the
686 cpuc
->pcr
= pcr
| cpuc
->event
[0]->hw
.config_base
;
689 pcr_ops
->write(cpuc
->pcr
);
692 void hw_perf_disable(void)
694 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
704 val
&= ~(PCR_UTRACE
| PCR_STRACE
|
705 sparc_pmu
->hv_bit
| sparc_pmu
->irq_bit
);
708 pcr_ops
->write(cpuc
->pcr
);
711 static void sparc_pmu_disable(struct perf_event
*event
)
713 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
714 struct hw_perf_event
*hwc
= &event
->hw
;
718 local_irq_save(flags
);
721 for (i
= 0; i
< cpuc
->n_events
; i
++) {
722 if (event
== cpuc
->event
[i
]) {
723 int idx
= cpuc
->current_idx
[i
];
725 /* Shift remaining entries down into
728 while (++i
< cpuc
->n_events
) {
729 cpuc
->event
[i
- 1] = cpuc
->event
[i
];
730 cpuc
->events
[i
- 1] = cpuc
->events
[i
];
731 cpuc
->current_idx
[i
- 1] =
732 cpuc
->current_idx
[i
];
735 /* Absorb the final count and turn off the
738 sparc_pmu_disable_event(cpuc
, hwc
, idx
);
740 sparc_perf_event_update(event
, hwc
, idx
);
742 perf_event_update_userpage(event
);
750 local_irq_restore(flags
);
753 static int active_event_index(struct cpu_hw_events
*cpuc
,
754 struct perf_event
*event
)
758 for (i
= 0; i
< cpuc
->n_events
; i
++) {
759 if (cpuc
->event
[i
] == event
)
762 BUG_ON(i
== cpuc
->n_events
);
763 return cpuc
->current_idx
[i
];
766 static void sparc_pmu_read(struct perf_event
*event
)
768 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
769 int idx
= active_event_index(cpuc
, event
);
770 struct hw_perf_event
*hwc
= &event
->hw
;
772 sparc_perf_event_update(event
, hwc
, idx
);
775 static void sparc_pmu_unthrottle(struct perf_event
*event
)
777 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
778 int idx
= active_event_index(cpuc
, event
);
779 struct hw_perf_event
*hwc
= &event
->hw
;
781 sparc_pmu_enable_event(cpuc
, hwc
, idx
);
784 static atomic_t active_events
= ATOMIC_INIT(0);
785 static DEFINE_MUTEX(pmc_grab_mutex
);
787 static void perf_stop_nmi_watchdog(void *unused
)
789 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
791 stop_nmi_watchdog(NULL
);
792 cpuc
->pcr
= pcr_ops
->read();
795 void perf_event_grab_pmc(void)
797 if (atomic_inc_not_zero(&active_events
))
800 mutex_lock(&pmc_grab_mutex
);
801 if (atomic_read(&active_events
) == 0) {
802 if (atomic_read(&nmi_active
) > 0) {
803 on_each_cpu(perf_stop_nmi_watchdog
, NULL
, 1);
804 BUG_ON(atomic_read(&nmi_active
) != 0);
806 atomic_inc(&active_events
);
808 mutex_unlock(&pmc_grab_mutex
);
811 void perf_event_release_pmc(void)
813 if (atomic_dec_and_mutex_lock(&active_events
, &pmc_grab_mutex
)) {
814 if (atomic_read(&nmi_active
) == 0)
815 on_each_cpu(start_nmi_watchdog
, NULL
, 1);
816 mutex_unlock(&pmc_grab_mutex
);
820 static const struct perf_event_map
*sparc_map_cache_event(u64 config
)
822 unsigned int cache_type
, cache_op
, cache_result
;
823 const struct perf_event_map
*pmap
;
825 if (!sparc_pmu
->cache_map
)
826 return ERR_PTR(-ENOENT
);
828 cache_type
= (config
>> 0) & 0xff;
829 if (cache_type
>= PERF_COUNT_HW_CACHE_MAX
)
830 return ERR_PTR(-EINVAL
);
832 cache_op
= (config
>> 8) & 0xff;
833 if (cache_op
>= PERF_COUNT_HW_CACHE_OP_MAX
)
834 return ERR_PTR(-EINVAL
);
836 cache_result
= (config
>> 16) & 0xff;
837 if (cache_result
>= PERF_COUNT_HW_CACHE_RESULT_MAX
)
838 return ERR_PTR(-EINVAL
);
840 pmap
= &((*sparc_pmu
->cache_map
)[cache_type
][cache_op
][cache_result
]);
842 if (pmap
->encoding
== CACHE_OP_UNSUPPORTED
)
843 return ERR_PTR(-ENOENT
);
845 if (pmap
->encoding
== CACHE_OP_NONSENSE
)
846 return ERR_PTR(-EINVAL
);
851 static void hw_perf_event_destroy(struct perf_event
*event
)
853 perf_event_release_pmc();
856 /* Make sure all events can be scheduled into the hardware at
857 * the same time. This is simplified by the fact that we only
858 * need to support 2 simultaneous HW events.
860 * As a side effect, the evts[]->hw.idx values will be assigned
861 * on success. These are pending indexes. When the events are
862 * actually programmed into the chip, these values will propagate
863 * to the per-cpu cpuc->current_idx[] slots, see the code in
864 * maybe_change_configuration() for details.
866 static int sparc_check_constraints(struct perf_event
**evts
,
867 unsigned long *events
, int n_ev
)
869 u8 msk0
= 0, msk1
= 0;
872 /* This case is possible when we are invoked from
873 * hw_perf_group_sched_in().
878 if (n_ev
> perf_max_events
)
881 msk0
= perf_event_get_msk(events
[0]);
883 if (msk0
& PIC_LOWER
)
888 msk1
= perf_event_get_msk(events
[1]);
890 /* If both events can go on any counter, OK. */
891 if (msk0
== (PIC_UPPER
| PIC_LOWER
) &&
892 msk1
== (PIC_UPPER
| PIC_LOWER
))
895 /* If one event is limited to a specific counter,
896 * and the other can go on both, OK.
898 if ((msk0
== PIC_UPPER
|| msk0
== PIC_LOWER
) &&
899 msk1
== (PIC_UPPER
| PIC_LOWER
)) {
900 if (msk0
& PIC_LOWER
)
905 if ((msk1
== PIC_UPPER
|| msk1
== PIC_LOWER
) &&
906 msk0
== (PIC_UPPER
| PIC_LOWER
)) {
907 if (msk1
& PIC_UPPER
)
912 /* If the events are fixed to different counters, OK. */
913 if ((msk0
== PIC_UPPER
&& msk1
== PIC_LOWER
) ||
914 (msk0
== PIC_LOWER
&& msk1
== PIC_UPPER
)) {
915 if (msk0
& PIC_LOWER
)
920 /* Otherwise, there is a conflict. */
924 evts
[0]->hw
.idx
= idx0
;
926 evts
[1]->hw
.idx
= idx0
^ 1;
930 static int check_excludes(struct perf_event
**evts
, int n_prev
, int n_new
)
932 int eu
= 0, ek
= 0, eh
= 0;
933 struct perf_event
*event
;
941 for (i
= 0; i
< n
; i
++) {
944 eu
= event
->attr
.exclude_user
;
945 ek
= event
->attr
.exclude_kernel
;
946 eh
= event
->attr
.exclude_hv
;
948 } else if (event
->attr
.exclude_user
!= eu
||
949 event
->attr
.exclude_kernel
!= ek
||
950 event
->attr
.exclude_hv
!= eh
) {
958 static int collect_events(struct perf_event
*group
, int max_count
,
959 struct perf_event
*evts
[], unsigned long *events
,
962 struct perf_event
*event
;
965 if (!is_software_event(group
)) {
969 events
[n
] = group
->hw
.event_base
;
970 current_idx
[n
++] = PIC_NO_INDEX
;
972 list_for_each_entry(event
, &group
->sibling_list
, group_entry
) {
973 if (!is_software_event(event
) &&
974 event
->state
!= PERF_EVENT_STATE_OFF
) {
978 events
[n
] = event
->hw
.event_base
;
979 current_idx
[n
++] = PIC_NO_INDEX
;
985 static int sparc_pmu_enable(struct perf_event
*event
)
987 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
988 int n0
, ret
= -EAGAIN
;
991 local_irq_save(flags
);
995 if (n0
>= perf_max_events
)
998 cpuc
->event
[n0
] = event
;
999 cpuc
->events
[n0
] = event
->hw
.event_base
;
1000 cpuc
->current_idx
[n0
] = PIC_NO_INDEX
;
1003 * If group events scheduling transaction was started,
1004 * skip the schedulability test here, it will be peformed
1005 * at commit time(->commit_txn) as a whole
1007 if (cpuc
->group_flag
& PERF_EVENT_TXN_STARTED
)
1010 if (check_excludes(cpuc
->event
, n0
, 1))
1012 if (sparc_check_constraints(cpuc
->event
, cpuc
->events
, n0
+ 1))
1022 local_irq_restore(flags
);
1026 static int __hw_perf_event_init(struct perf_event
*event
)
1028 struct perf_event_attr
*attr
= &event
->attr
;
1029 struct perf_event
*evts
[MAX_HWEVENTS
];
1030 struct hw_perf_event
*hwc
= &event
->hw
;
1031 unsigned long events
[MAX_HWEVENTS
];
1032 int current_idx_dmy
[MAX_HWEVENTS
];
1033 const struct perf_event_map
*pmap
;
1036 if (atomic_read(&nmi_active
) < 0)
1039 if (attr
->type
== PERF_TYPE_HARDWARE
) {
1040 if (attr
->config
>= sparc_pmu
->max_events
)
1042 pmap
= sparc_pmu
->event_map(attr
->config
);
1043 } else if (attr
->type
== PERF_TYPE_HW_CACHE
) {
1044 pmap
= sparc_map_cache_event(attr
->config
);
1046 return PTR_ERR(pmap
);
1050 /* We save the enable bits in the config_base. */
1051 hwc
->config_base
= sparc_pmu
->irq_bit
;
1052 if (!attr
->exclude_user
)
1053 hwc
->config_base
|= PCR_UTRACE
;
1054 if (!attr
->exclude_kernel
)
1055 hwc
->config_base
|= PCR_STRACE
;
1056 if (!attr
->exclude_hv
)
1057 hwc
->config_base
|= sparc_pmu
->hv_bit
;
1059 hwc
->event_base
= perf_event_encode(pmap
);
1062 if (event
->group_leader
!= event
) {
1063 n
= collect_events(event
->group_leader
,
1064 perf_max_events
- 1,
1065 evts
, events
, current_idx_dmy
);
1069 events
[n
] = hwc
->event_base
;
1072 if (check_excludes(evts
, n
, 1))
1075 if (sparc_check_constraints(evts
, events
, n
+ 1))
1078 hwc
->idx
= PIC_NO_INDEX
;
1080 /* Try to do all error checking before this point, as unwinding
1081 * state after grabbing the PMC is difficult.
1083 perf_event_grab_pmc();
1084 event
->destroy
= hw_perf_event_destroy
;
1086 if (!hwc
->sample_period
) {
1087 hwc
->sample_period
= MAX_PERIOD
;
1088 hwc
->last_period
= hwc
->sample_period
;
1089 atomic64_set(&hwc
->period_left
, hwc
->sample_period
);
1096 * Start group events scheduling transaction
1097 * Set the flag to make pmu::enable() not perform the
1098 * schedulability test, it will be performed at commit time
1100 static void sparc_pmu_start_txn(const struct pmu
*pmu
)
1102 struct cpu_hw_events
*cpuhw
= &__get_cpu_var(cpu_hw_events
);
1104 cpuhw
->group_flag
|= PERF_EVENT_TXN_STARTED
;
1108 * Stop group events scheduling transaction
1109 * Clear the flag and pmu::enable() will perform the
1110 * schedulability test.
1112 static void sparc_pmu_cancel_txn(const struct pmu
*pmu
)
1114 struct cpu_hw_events
*cpuhw
= &__get_cpu_var(cpu_hw_events
);
1116 cpuhw
->group_flag
&= ~PERF_EVENT_TXN_STARTED
;
1120 * Commit group events scheduling transaction
1121 * Perform the group schedulability test as a whole
1122 * Return 0 if success
1124 static int sparc_pmu_commit_txn(const struct pmu
*pmu
)
1126 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
1132 cpuc
= &__get_cpu_var(cpu_hw_events
);
1134 if (check_excludes(cpuc
->event
, 0, n
))
1136 if (sparc_check_constraints(cpuc
->event
, cpuc
->events
, n
))
1142 static const struct pmu pmu
= {
1143 .enable
= sparc_pmu_enable
,
1144 .disable
= sparc_pmu_disable
,
1145 .read
= sparc_pmu_read
,
1146 .unthrottle
= sparc_pmu_unthrottle
,
1147 .start_txn
= sparc_pmu_start_txn
,
1148 .cancel_txn
= sparc_pmu_cancel_txn
,
1149 .commit_txn
= sparc_pmu_commit_txn
,
1152 const struct pmu
*hw_perf_event_init(struct perf_event
*event
)
1154 int err
= __hw_perf_event_init(event
);
1157 return ERR_PTR(err
);
1161 void perf_event_print_debug(void)
1163 unsigned long flags
;
1170 local_irq_save(flags
);
1172 cpu
= smp_processor_id();
1174 pcr
= pcr_ops
->read();
1178 pr_info("CPU#%d: PCR[%016llx] PIC[%016llx]\n",
1181 local_irq_restore(flags
);
1184 static int __kprobes
perf_event_nmi_handler(struct notifier_block
*self
,
1185 unsigned long cmd
, void *__args
)
1187 struct die_args
*args
= __args
;
1188 struct perf_sample_data data
;
1189 struct cpu_hw_events
*cpuc
;
1190 struct pt_regs
*regs
;
1193 if (!atomic_read(&active_events
))
1206 perf_sample_data_init(&data
, 0);
1208 cpuc
= &__get_cpu_var(cpu_hw_events
);
1210 /* If the PMU has the TOE IRQ enable bits, we need to do a
1211 * dummy write to the %pcr to clear the overflow bits and thus
1214 * Do this before we peek at the counters to determine
1215 * overflow so we don't lose any events.
1217 if (sparc_pmu
->irq_bit
)
1218 pcr_ops
->write(cpuc
->pcr
);
1220 for (i
= 0; i
< cpuc
->n_events
; i
++) {
1221 struct perf_event
*event
= cpuc
->event
[i
];
1222 int idx
= cpuc
->current_idx
[i
];
1223 struct hw_perf_event
*hwc
;
1227 val
= sparc_perf_event_update(event
, hwc
, idx
);
1228 if (val
& (1ULL << 31))
1231 data
.period
= event
->hw
.last_period
;
1232 if (!sparc_perf_event_set_period(event
, hwc
, idx
))
1235 if (perf_event_overflow(event
, 1, &data
, regs
))
1236 sparc_pmu_disable_event(cpuc
, hwc
, idx
);
1242 static __read_mostly
struct notifier_block perf_event_nmi_notifier
= {
1243 .notifier_call
= perf_event_nmi_handler
,
1246 static bool __init
supported_pmu(void)
1248 if (!strcmp(sparc_pmu_type
, "ultra3") ||
1249 !strcmp(sparc_pmu_type
, "ultra3+") ||
1250 !strcmp(sparc_pmu_type
, "ultra3i") ||
1251 !strcmp(sparc_pmu_type
, "ultra4+")) {
1252 sparc_pmu
= &ultra3_pmu
;
1255 if (!strcmp(sparc_pmu_type
, "niagara")) {
1256 sparc_pmu
= &niagara1_pmu
;
1259 if (!strcmp(sparc_pmu_type
, "niagara2")) {
1260 sparc_pmu
= &niagara2_pmu
;
1266 void __init
init_hw_perf_events(void)
1268 pr_info("Performance events: ");
1270 if (!supported_pmu()) {
1271 pr_cont("No support for PMU type '%s'\n", sparc_pmu_type
);
1275 pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type
);
1277 /* All sparc64 PMUs currently have 2 events. */
1278 perf_max_events
= 2;
1280 register_die_notifier(&perf_event_nmi_notifier
);
1283 static inline void callchain_store(struct perf_callchain_entry
*entry
, u64 ip
)
1285 if (entry
->nr
< PERF_MAX_STACK_DEPTH
)
1286 entry
->ip
[entry
->nr
++] = ip
;
1289 static void perf_callchain_kernel(struct pt_regs
*regs
,
1290 struct perf_callchain_entry
*entry
)
1292 unsigned long ksp
, fp
;
1294 callchain_store(entry
, PERF_CONTEXT_KERNEL
);
1295 callchain_store(entry
, regs
->tpc
);
1297 ksp
= regs
->u_regs
[UREG_I6
];
1298 fp
= ksp
+ STACK_BIAS
;
1300 struct sparc_stackf
*sf
;
1301 struct pt_regs
*regs
;
1304 if (!kstack_valid(current_thread_info(), fp
))
1307 sf
= (struct sparc_stackf
*) fp
;
1308 regs
= (struct pt_regs
*) (sf
+ 1);
1310 if (kstack_is_trap_frame(current_thread_info(), regs
)) {
1311 if (user_mode(regs
))
1314 fp
= regs
->u_regs
[UREG_I6
] + STACK_BIAS
;
1316 pc
= sf
->callers_pc
;
1317 fp
= (unsigned long)sf
->fp
+ STACK_BIAS
;
1319 callchain_store(entry
, pc
);
1320 } while (entry
->nr
< PERF_MAX_STACK_DEPTH
);
1323 static void perf_callchain_user_64(struct pt_regs
*regs
,
1324 struct perf_callchain_entry
*entry
)
1328 callchain_store(entry
, PERF_CONTEXT_USER
);
1329 callchain_store(entry
, regs
->tpc
);
1331 ufp
= regs
->u_regs
[UREG_I6
] + STACK_BIAS
;
1333 struct sparc_stackf
*usf
, sf
;
1336 usf
= (struct sparc_stackf
*) ufp
;
1337 if (__copy_from_user_inatomic(&sf
, usf
, sizeof(sf
)))
1341 ufp
= (unsigned long)sf
.fp
+ STACK_BIAS
;
1342 callchain_store(entry
, pc
);
1343 } while (entry
->nr
< PERF_MAX_STACK_DEPTH
);
1346 static void perf_callchain_user_32(struct pt_regs
*regs
,
1347 struct perf_callchain_entry
*entry
)
1351 callchain_store(entry
, PERF_CONTEXT_USER
);
1352 callchain_store(entry
, regs
->tpc
);
1354 ufp
= regs
->u_regs
[UREG_I6
] & 0xffffffffUL
;
1356 struct sparc_stackf32
*usf
, sf
;
1359 usf
= (struct sparc_stackf32
*) ufp
;
1360 if (__copy_from_user_inatomic(&sf
, usf
, sizeof(sf
)))
1364 ufp
= (unsigned long)sf
.fp
;
1365 callchain_store(entry
, pc
);
1366 } while (entry
->nr
< PERF_MAX_STACK_DEPTH
);
1369 /* Like powerpc we can't get PMU interrupts within the PMU handler,
1370 * so no need for separate NMI and IRQ chains as on x86.
1372 static DEFINE_PER_CPU(struct perf_callchain_entry
, callchain
);
1374 struct perf_callchain_entry
*perf_callchain(struct pt_regs
*regs
)
1376 struct perf_callchain_entry
*entry
= &__get_cpu_var(callchain
);
1379 if (!user_mode(regs
)) {
1380 stack_trace_flush();
1381 perf_callchain_kernel(regs
, entry
);
1383 regs
= task_pt_regs(current
);
1389 if (test_thread_flag(TIF_32BIT
))
1390 perf_callchain_user_32(regs
, entry
);
1392 perf_callchain_user_64(regs
, entry
);