1 /* Performance event support for sparc64.
3 * Copyright (C) 2009, 2010 David S. Miller <davem@davemloft.net>
5 * This code is based almost entirely upon the x86 perf event
8 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
9 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
10 * Copyright (C) 2009 Jaswinder Singh Rajput
11 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
12 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
15 #include <linux/perf_event.h>
16 #include <linux/kprobes.h>
17 #include <linux/ftrace.h>
18 #include <linux/kernel.h>
19 #include <linux/kdebug.h>
20 #include <linux/mutex.h>
22 #include <asm/stacktrace.h>
23 #include <asm/cpudata.h>
24 #include <asm/uaccess.h>
25 #include <asm/atomic.h>
31 /* Sparc64 chips have two performance counters, 32-bits each, with
32 * overflow interrupts generated on transition from 0xffffffff to 0.
33 * The counters are accessed in one go using a 64-bit register.
35 * Both counters are controlled using a single control register. The
36 * only way to stop all sampling is to clear all of the context (user,
37 * supervisor, hypervisor) sampling enable bits. But these bits apply
38 * to both counters, thus the two counters can't be enabled/disabled
41 * The control register has two event fields, one for each of the two
42 * counters. It's thus nearly impossible to have one counter going
43 * while keeping the other one stopped. Therefore it is possible to
44 * get overflow interrupts for counters not currently "in use" and
45 * that condition must be checked in the overflow interrupt handler.
47 * So we use a hack, in that we program inactive counters with the
48 * "sw_count0" and "sw_count1" events. These count how many times
49 * the instruction "sethi %hi(0xfc000), %g0" is executed. It's an
50 * unusual way to encode a NOP and therefore will not trigger in
54 #define MAX_HWEVENTS 2
55 #define MAX_PERIOD ((1UL << 32) - 1)
57 #define PIC_UPPER_INDEX 0
58 #define PIC_LOWER_INDEX 1
59 #define PIC_NO_INDEX -1
61 struct cpu_hw_events
{
62 /* Number of events currently scheduled onto this cpu.
63 * This tells how many entries in the arrays below
68 /* Number of new events added since the last hw_perf_disable().
69 * This works because the perf event layer always adds new
70 * events inside of a perf_{disable,enable}() sequence.
74 /* Array of events current scheduled on this cpu. */
75 struct perf_event
*event
[MAX_HWEVENTS
];
77 /* Array of encoded longs, specifying the %pcr register
78 * encoding and the mask of PIC counters this even can
79 * be scheduled on. See perf_event_encode() et al.
81 unsigned long events
[MAX_HWEVENTS
];
83 /* The current counter index assigned to an event. When the
84 * event hasn't been programmed into the cpu yet, this will
85 * hold PIC_NO_INDEX. The event->hw.idx value tells us where
86 * we ought to schedule the event.
88 int current_idx
[MAX_HWEVENTS
];
90 /* Software copy of %pcr register on this cpu. */
93 /* Enabled/disable state. */
96 unsigned int group_flag
;
98 DEFINE_PER_CPU(struct cpu_hw_events
, cpu_hw_events
) = { .enabled
= 1, };
100 /* An event map describes the characteristics of a performance
101 * counter event. In particular it gives the encoding as well as
102 * a mask telling which counters the event can be measured on.
104 struct perf_event_map
{
107 #define PIC_NONE 0x00
108 #define PIC_UPPER 0x01
109 #define PIC_LOWER 0x02
112 /* Encode a perf_event_map entry into a long. */
113 static unsigned long perf_event_encode(const struct perf_event_map
*pmap
)
115 return ((unsigned long) pmap
->encoding
<< 16) | pmap
->pic_mask
;
118 static u8
perf_event_get_msk(unsigned long val
)
123 static u64
perf_event_get_enc(unsigned long val
)
128 #define C(x) PERF_COUNT_HW_CACHE_##x
130 #define CACHE_OP_UNSUPPORTED 0xfffe
131 #define CACHE_OP_NONSENSE 0xffff
133 typedef struct perf_event_map cache_map_t
134 [PERF_COUNT_HW_CACHE_MAX
]
135 [PERF_COUNT_HW_CACHE_OP_MAX
]
136 [PERF_COUNT_HW_CACHE_RESULT_MAX
];
139 const struct perf_event_map
*(*event_map
)(int);
140 const cache_map_t
*cache_map
;
151 static const struct perf_event_map ultra3_perfmon_event_map
[] = {
152 [PERF_COUNT_HW_CPU_CYCLES
] = { 0x0000, PIC_UPPER
| PIC_LOWER
},
153 [PERF_COUNT_HW_INSTRUCTIONS
] = { 0x0001, PIC_UPPER
| PIC_LOWER
},
154 [PERF_COUNT_HW_CACHE_REFERENCES
] = { 0x0009, PIC_LOWER
},
155 [PERF_COUNT_HW_CACHE_MISSES
] = { 0x0009, PIC_UPPER
},
158 static const struct perf_event_map
*ultra3_event_map(int event_id
)
160 return &ultra3_perfmon_event_map
[event_id
];
163 static const cache_map_t ultra3_cache_map
= {
166 [C(RESULT_ACCESS
)] = { 0x09, PIC_LOWER
, },
167 [C(RESULT_MISS
)] = { 0x09, PIC_UPPER
, },
170 [C(RESULT_ACCESS
)] = { 0x0a, PIC_LOWER
},
171 [C(RESULT_MISS
)] = { 0x0a, PIC_UPPER
},
174 [C(RESULT_ACCESS
)] = { CACHE_OP_UNSUPPORTED
},
175 [C(RESULT_MISS
)] = { CACHE_OP_UNSUPPORTED
},
180 [C(RESULT_ACCESS
)] = { 0x09, PIC_LOWER
, },
181 [C(RESULT_MISS
)] = { 0x09, PIC_UPPER
, },
184 [ C(RESULT_ACCESS
) ] = { CACHE_OP_NONSENSE
},
185 [ C(RESULT_MISS
) ] = { CACHE_OP_NONSENSE
},
187 [ C(OP_PREFETCH
) ] = {
188 [ C(RESULT_ACCESS
) ] = { CACHE_OP_UNSUPPORTED
},
189 [ C(RESULT_MISS
) ] = { CACHE_OP_UNSUPPORTED
},
194 [C(RESULT_ACCESS
)] = { 0x0c, PIC_LOWER
, },
195 [C(RESULT_MISS
)] = { 0x0c, PIC_UPPER
, },
198 [C(RESULT_ACCESS
)] = { 0x0c, PIC_LOWER
},
199 [C(RESULT_MISS
)] = { 0x0c, PIC_UPPER
},
202 [C(RESULT_ACCESS
)] = { CACHE_OP_UNSUPPORTED
},
203 [C(RESULT_MISS
)] = { CACHE_OP_UNSUPPORTED
},
208 [C(RESULT_ACCESS
)] = { CACHE_OP_UNSUPPORTED
},
209 [C(RESULT_MISS
)] = { 0x12, PIC_UPPER
, },
212 [ C(RESULT_ACCESS
) ] = { CACHE_OP_UNSUPPORTED
},
213 [ C(RESULT_MISS
) ] = { CACHE_OP_UNSUPPORTED
},
215 [ C(OP_PREFETCH
) ] = {
216 [ C(RESULT_ACCESS
) ] = { CACHE_OP_UNSUPPORTED
},
217 [ C(RESULT_MISS
) ] = { CACHE_OP_UNSUPPORTED
},
222 [C(RESULT_ACCESS
)] = { CACHE_OP_UNSUPPORTED
},
223 [C(RESULT_MISS
)] = { 0x11, PIC_UPPER
, },
226 [ C(RESULT_ACCESS
) ] = { CACHE_OP_UNSUPPORTED
},
227 [ C(RESULT_MISS
) ] = { CACHE_OP_UNSUPPORTED
},
229 [ C(OP_PREFETCH
) ] = {
230 [ C(RESULT_ACCESS
) ] = { CACHE_OP_UNSUPPORTED
},
231 [ C(RESULT_MISS
) ] = { CACHE_OP_UNSUPPORTED
},
236 [C(RESULT_ACCESS
)] = { CACHE_OP_UNSUPPORTED
},
237 [C(RESULT_MISS
)] = { CACHE_OP_UNSUPPORTED
},
240 [ C(RESULT_ACCESS
) ] = { CACHE_OP_UNSUPPORTED
},
241 [ C(RESULT_MISS
) ] = { CACHE_OP_UNSUPPORTED
},
243 [ C(OP_PREFETCH
) ] = {
244 [ C(RESULT_ACCESS
) ] = { CACHE_OP_UNSUPPORTED
},
245 [ C(RESULT_MISS
) ] = { CACHE_OP_UNSUPPORTED
},
250 static const struct sparc_pmu ultra3_pmu
= {
251 .event_map
= ultra3_event_map
,
252 .cache_map
= &ultra3_cache_map
,
253 .max_events
= ARRAY_SIZE(ultra3_perfmon_event_map
),
261 /* Niagara1 is very limited. The upper PIC is hard-locked to count
262 * only instructions, so it is free running which creates all kinds of
263 * problems. Some hardware designs make one wonder if the creator
264 * even looked at how this stuff gets used by software.
266 static const struct perf_event_map niagara1_perfmon_event_map
[] = {
267 [PERF_COUNT_HW_CPU_CYCLES
] = { 0x00, PIC_UPPER
},
268 [PERF_COUNT_HW_INSTRUCTIONS
] = { 0x00, PIC_UPPER
},
269 [PERF_COUNT_HW_CACHE_REFERENCES
] = { 0, PIC_NONE
},
270 [PERF_COUNT_HW_CACHE_MISSES
] = { 0x03, PIC_LOWER
},
273 static const struct perf_event_map
*niagara1_event_map(int event_id
)
275 return &niagara1_perfmon_event_map
[event_id
];
278 static const cache_map_t niagara1_cache_map
= {
281 [C(RESULT_ACCESS
)] = { CACHE_OP_UNSUPPORTED
},
282 [C(RESULT_MISS
)] = { 0x03, PIC_LOWER
, },
285 [C(RESULT_ACCESS
)] = { CACHE_OP_UNSUPPORTED
},
286 [C(RESULT_MISS
)] = { 0x03, PIC_LOWER
, },
289 [C(RESULT_ACCESS
)] = { CACHE_OP_UNSUPPORTED
},
290 [C(RESULT_MISS
)] = { CACHE_OP_UNSUPPORTED
},
295 [C(RESULT_ACCESS
)] = { 0x00, PIC_UPPER
},
296 [C(RESULT_MISS
)] = { 0x02, PIC_LOWER
, },
299 [ C(RESULT_ACCESS
) ] = { CACHE_OP_NONSENSE
},
300 [ C(RESULT_MISS
) ] = { CACHE_OP_NONSENSE
},
302 [ C(OP_PREFETCH
) ] = {
303 [ C(RESULT_ACCESS
) ] = { CACHE_OP_UNSUPPORTED
},
304 [ C(RESULT_MISS
) ] = { CACHE_OP_UNSUPPORTED
},
309 [C(RESULT_ACCESS
)] = { CACHE_OP_UNSUPPORTED
},
310 [C(RESULT_MISS
)] = { 0x07, PIC_LOWER
, },
313 [C(RESULT_ACCESS
)] = { CACHE_OP_UNSUPPORTED
},
314 [C(RESULT_MISS
)] = { 0x07, PIC_LOWER
, },
317 [C(RESULT_ACCESS
)] = { CACHE_OP_UNSUPPORTED
},
318 [C(RESULT_MISS
)] = { CACHE_OP_UNSUPPORTED
},
323 [C(RESULT_ACCESS
)] = { CACHE_OP_UNSUPPORTED
},
324 [C(RESULT_MISS
)] = { 0x05, PIC_LOWER
, },
327 [ C(RESULT_ACCESS
) ] = { CACHE_OP_UNSUPPORTED
},
328 [ C(RESULT_MISS
) ] = { CACHE_OP_UNSUPPORTED
},
330 [ C(OP_PREFETCH
) ] = {
331 [ C(RESULT_ACCESS
) ] = { CACHE_OP_UNSUPPORTED
},
332 [ C(RESULT_MISS
) ] = { CACHE_OP_UNSUPPORTED
},
337 [C(RESULT_ACCESS
)] = { CACHE_OP_UNSUPPORTED
},
338 [C(RESULT_MISS
)] = { 0x04, PIC_LOWER
, },
341 [ C(RESULT_ACCESS
) ] = { CACHE_OP_UNSUPPORTED
},
342 [ C(RESULT_MISS
) ] = { CACHE_OP_UNSUPPORTED
},
344 [ C(OP_PREFETCH
) ] = {
345 [ C(RESULT_ACCESS
) ] = { CACHE_OP_UNSUPPORTED
},
346 [ C(RESULT_MISS
) ] = { CACHE_OP_UNSUPPORTED
},
351 [C(RESULT_ACCESS
)] = { CACHE_OP_UNSUPPORTED
},
352 [C(RESULT_MISS
)] = { CACHE_OP_UNSUPPORTED
},
355 [ C(RESULT_ACCESS
) ] = { CACHE_OP_UNSUPPORTED
},
356 [ C(RESULT_MISS
) ] = { CACHE_OP_UNSUPPORTED
},
358 [ C(OP_PREFETCH
) ] = {
359 [ C(RESULT_ACCESS
) ] = { CACHE_OP_UNSUPPORTED
},
360 [ C(RESULT_MISS
) ] = { CACHE_OP_UNSUPPORTED
},
365 static const struct sparc_pmu niagara1_pmu
= {
366 .event_map
= niagara1_event_map
,
367 .cache_map
= &niagara1_cache_map
,
368 .max_events
= ARRAY_SIZE(niagara1_perfmon_event_map
),
376 static const struct perf_event_map niagara2_perfmon_event_map
[] = {
377 [PERF_COUNT_HW_CPU_CYCLES
] = { 0x02ff, PIC_UPPER
| PIC_LOWER
},
378 [PERF_COUNT_HW_INSTRUCTIONS
] = { 0x02ff, PIC_UPPER
| PIC_LOWER
},
379 [PERF_COUNT_HW_CACHE_REFERENCES
] = { 0x0208, PIC_UPPER
| PIC_LOWER
},
380 [PERF_COUNT_HW_CACHE_MISSES
] = { 0x0302, PIC_UPPER
| PIC_LOWER
},
381 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = { 0x0201, PIC_UPPER
| PIC_LOWER
},
382 [PERF_COUNT_HW_BRANCH_MISSES
] = { 0x0202, PIC_UPPER
| PIC_LOWER
},
385 static const struct perf_event_map
*niagara2_event_map(int event_id
)
387 return &niagara2_perfmon_event_map
[event_id
];
390 static const cache_map_t niagara2_cache_map
= {
393 [C(RESULT_ACCESS
)] = { 0x0208, PIC_UPPER
| PIC_LOWER
, },
394 [C(RESULT_MISS
)] = { 0x0302, PIC_UPPER
| PIC_LOWER
, },
397 [C(RESULT_ACCESS
)] = { 0x0210, PIC_UPPER
| PIC_LOWER
, },
398 [C(RESULT_MISS
)] = { 0x0302, PIC_UPPER
| PIC_LOWER
, },
401 [C(RESULT_ACCESS
)] = { CACHE_OP_UNSUPPORTED
},
402 [C(RESULT_MISS
)] = { CACHE_OP_UNSUPPORTED
},
407 [C(RESULT_ACCESS
)] = { 0x02ff, PIC_UPPER
| PIC_LOWER
, },
408 [C(RESULT_MISS
)] = { 0x0301, PIC_UPPER
| PIC_LOWER
, },
411 [ C(RESULT_ACCESS
) ] = { CACHE_OP_NONSENSE
},
412 [ C(RESULT_MISS
) ] = { CACHE_OP_NONSENSE
},
414 [ C(OP_PREFETCH
) ] = {
415 [ C(RESULT_ACCESS
) ] = { CACHE_OP_UNSUPPORTED
},
416 [ C(RESULT_MISS
) ] = { CACHE_OP_UNSUPPORTED
},
421 [C(RESULT_ACCESS
)] = { 0x0208, PIC_UPPER
| PIC_LOWER
, },
422 [C(RESULT_MISS
)] = { 0x0330, PIC_UPPER
| PIC_LOWER
, },
425 [C(RESULT_ACCESS
)] = { 0x0210, PIC_UPPER
| PIC_LOWER
, },
426 [C(RESULT_MISS
)] = { 0x0320, PIC_UPPER
| PIC_LOWER
, },
429 [C(RESULT_ACCESS
)] = { CACHE_OP_UNSUPPORTED
},
430 [C(RESULT_MISS
)] = { CACHE_OP_UNSUPPORTED
},
435 [C(RESULT_ACCESS
)] = { CACHE_OP_UNSUPPORTED
},
436 [C(RESULT_MISS
)] = { 0x0b08, PIC_UPPER
| PIC_LOWER
, },
439 [ C(RESULT_ACCESS
) ] = { CACHE_OP_UNSUPPORTED
},
440 [ C(RESULT_MISS
) ] = { CACHE_OP_UNSUPPORTED
},
442 [ C(OP_PREFETCH
) ] = {
443 [ C(RESULT_ACCESS
) ] = { CACHE_OP_UNSUPPORTED
},
444 [ C(RESULT_MISS
) ] = { CACHE_OP_UNSUPPORTED
},
449 [C(RESULT_ACCESS
)] = { CACHE_OP_UNSUPPORTED
},
450 [C(RESULT_MISS
)] = { 0xb04, PIC_UPPER
| PIC_LOWER
, },
453 [ C(RESULT_ACCESS
) ] = { CACHE_OP_UNSUPPORTED
},
454 [ C(RESULT_MISS
) ] = { CACHE_OP_UNSUPPORTED
},
456 [ C(OP_PREFETCH
) ] = {
457 [ C(RESULT_ACCESS
) ] = { CACHE_OP_UNSUPPORTED
},
458 [ C(RESULT_MISS
) ] = { CACHE_OP_UNSUPPORTED
},
463 [C(RESULT_ACCESS
)] = { CACHE_OP_UNSUPPORTED
},
464 [C(RESULT_MISS
)] = { CACHE_OP_UNSUPPORTED
},
467 [ C(RESULT_ACCESS
) ] = { CACHE_OP_UNSUPPORTED
},
468 [ C(RESULT_MISS
) ] = { CACHE_OP_UNSUPPORTED
},
470 [ C(OP_PREFETCH
) ] = {
471 [ C(RESULT_ACCESS
) ] = { CACHE_OP_UNSUPPORTED
},
472 [ C(RESULT_MISS
) ] = { CACHE_OP_UNSUPPORTED
},
477 static const struct sparc_pmu niagara2_pmu
= {
478 .event_map
= niagara2_event_map
,
479 .cache_map
= &niagara2_cache_map
,
480 .max_events
= ARRAY_SIZE(niagara2_perfmon_event_map
),
490 static const struct sparc_pmu
*sparc_pmu __read_mostly
;
492 static u64
event_encoding(u64 event_id
, int idx
)
494 if (idx
== PIC_UPPER_INDEX
)
495 event_id
<<= sparc_pmu
->upper_shift
;
497 event_id
<<= sparc_pmu
->lower_shift
;
501 static u64
mask_for_index(int idx
)
503 return event_encoding(sparc_pmu
->event_mask
, idx
);
506 static u64
nop_for_index(int idx
)
508 return event_encoding(idx
== PIC_UPPER_INDEX
?
509 sparc_pmu
->upper_nop
:
510 sparc_pmu
->lower_nop
, idx
);
513 static inline void sparc_pmu_enable_event(struct cpu_hw_events
*cpuc
, struct hw_perf_event
*hwc
, int idx
)
515 u64 val
, mask
= mask_for_index(idx
);
522 pcr_ops
->write(cpuc
->pcr
);
525 static inline void sparc_pmu_disable_event(struct cpu_hw_events
*cpuc
, struct hw_perf_event
*hwc
, int idx
)
527 u64 mask
= mask_for_index(idx
);
528 u64 nop
= nop_for_index(idx
);
536 pcr_ops
->write(cpuc
->pcr
);
539 static u32
read_pmc(int idx
)
544 if (idx
== PIC_UPPER_INDEX
)
547 return val
& 0xffffffff;
550 static void write_pmc(int idx
, u64 val
)
552 u64 shift
, mask
, pic
;
555 if (idx
== PIC_UPPER_INDEX
)
558 mask
= ((u64
) 0xffffffff) << shift
;
567 static u64
sparc_perf_event_update(struct perf_event
*event
,
568 struct hw_perf_event
*hwc
, int idx
)
571 u64 prev_raw_count
, new_raw_count
;
575 prev_raw_count
= local64_read(&hwc
->prev_count
);
576 new_raw_count
= read_pmc(idx
);
578 if (local64_cmpxchg(&hwc
->prev_count
, prev_raw_count
,
579 new_raw_count
) != prev_raw_count
)
582 delta
= (new_raw_count
<< shift
) - (prev_raw_count
<< shift
);
585 local64_add(delta
, &event
->count
);
586 local64_sub(delta
, &hwc
->period_left
);
588 return new_raw_count
;
591 static int sparc_perf_event_set_period(struct perf_event
*event
,
592 struct hw_perf_event
*hwc
, int idx
)
594 s64 left
= local64_read(&hwc
->period_left
);
595 s64 period
= hwc
->sample_period
;
598 if (unlikely(left
<= -period
)) {
600 local64_set(&hwc
->period_left
, left
);
601 hwc
->last_period
= period
;
605 if (unlikely(left
<= 0)) {
607 local64_set(&hwc
->period_left
, left
);
608 hwc
->last_period
= period
;
611 if (left
> MAX_PERIOD
)
614 local64_set(&hwc
->prev_count
, (u64
)-left
);
616 write_pmc(idx
, (u64
)(-left
) & 0xffffffff);
618 perf_event_update_userpage(event
);
623 /* If performance event entries have been added, move existing
624 * events around (if necessary) and then assign new entries to
627 static u64
maybe_change_configuration(struct cpu_hw_events
*cpuc
, u64 pcr
)
634 /* Read in the counters which are moving. */
635 for (i
= 0; i
< cpuc
->n_events
; i
++) {
636 struct perf_event
*cp
= cpuc
->event
[i
];
638 if (cpuc
->current_idx
[i
] != PIC_NO_INDEX
&&
639 cpuc
->current_idx
[i
] != cp
->hw
.idx
) {
640 sparc_perf_event_update(cp
, &cp
->hw
,
641 cpuc
->current_idx
[i
]);
642 cpuc
->current_idx
[i
] = PIC_NO_INDEX
;
646 /* Assign to counters all unassigned events. */
647 for (i
= 0; i
< cpuc
->n_events
; i
++) {
648 struct perf_event
*cp
= cpuc
->event
[i
];
649 struct hw_perf_event
*hwc
= &cp
->hw
;
653 if (cpuc
->current_idx
[i
] != PIC_NO_INDEX
)
656 sparc_perf_event_set_period(cp
, hwc
, idx
);
657 cpuc
->current_idx
[i
] = idx
;
659 enc
= perf_event_get_enc(cpuc
->events
[i
]);
660 pcr
&= ~mask_for_index(idx
);
661 pcr
|= event_encoding(enc
, idx
);
667 void hw_perf_enable(void)
669 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
679 if (!cpuc
->n_events
) {
682 pcr
= maybe_change_configuration(cpuc
, pcr
);
684 /* We require that all of the events have the same
685 * configuration, so just fetch the settings from the
688 cpuc
->pcr
= pcr
| cpuc
->event
[0]->hw
.config_base
;
691 pcr_ops
->write(cpuc
->pcr
);
694 void hw_perf_disable(void)
696 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
706 val
&= ~(PCR_UTRACE
| PCR_STRACE
|
707 sparc_pmu
->hv_bit
| sparc_pmu
->irq_bit
);
710 pcr_ops
->write(cpuc
->pcr
);
713 static void sparc_pmu_disable(struct perf_event
*event
)
715 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
716 struct hw_perf_event
*hwc
= &event
->hw
;
720 local_irq_save(flags
);
723 for (i
= 0; i
< cpuc
->n_events
; i
++) {
724 if (event
== cpuc
->event
[i
]) {
725 int idx
= cpuc
->current_idx
[i
];
727 /* Shift remaining entries down into
730 while (++i
< cpuc
->n_events
) {
731 cpuc
->event
[i
- 1] = cpuc
->event
[i
];
732 cpuc
->events
[i
- 1] = cpuc
->events
[i
];
733 cpuc
->current_idx
[i
- 1] =
734 cpuc
->current_idx
[i
];
737 /* Absorb the final count and turn off the
740 sparc_pmu_disable_event(cpuc
, hwc
, idx
);
742 sparc_perf_event_update(event
, hwc
, idx
);
744 perf_event_update_userpage(event
);
752 local_irq_restore(flags
);
755 static int active_event_index(struct cpu_hw_events
*cpuc
,
756 struct perf_event
*event
)
760 for (i
= 0; i
< cpuc
->n_events
; i
++) {
761 if (cpuc
->event
[i
] == event
)
764 BUG_ON(i
== cpuc
->n_events
);
765 return cpuc
->current_idx
[i
];
768 static void sparc_pmu_read(struct perf_event
*event
)
770 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
771 int idx
= active_event_index(cpuc
, event
);
772 struct hw_perf_event
*hwc
= &event
->hw
;
774 sparc_perf_event_update(event
, hwc
, idx
);
777 static void sparc_pmu_unthrottle(struct perf_event
*event
)
779 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
780 int idx
= active_event_index(cpuc
, event
);
781 struct hw_perf_event
*hwc
= &event
->hw
;
783 sparc_pmu_enable_event(cpuc
, hwc
, idx
);
786 static atomic_t active_events
= ATOMIC_INIT(0);
787 static DEFINE_MUTEX(pmc_grab_mutex
);
789 static void perf_stop_nmi_watchdog(void *unused
)
791 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
793 stop_nmi_watchdog(NULL
);
794 cpuc
->pcr
= pcr_ops
->read();
797 void perf_event_grab_pmc(void)
799 if (atomic_inc_not_zero(&active_events
))
802 mutex_lock(&pmc_grab_mutex
);
803 if (atomic_read(&active_events
) == 0) {
804 if (atomic_read(&nmi_active
) > 0) {
805 on_each_cpu(perf_stop_nmi_watchdog
, NULL
, 1);
806 BUG_ON(atomic_read(&nmi_active
) != 0);
808 atomic_inc(&active_events
);
810 mutex_unlock(&pmc_grab_mutex
);
813 void perf_event_release_pmc(void)
815 if (atomic_dec_and_mutex_lock(&active_events
, &pmc_grab_mutex
)) {
816 if (atomic_read(&nmi_active
) == 0)
817 on_each_cpu(start_nmi_watchdog
, NULL
, 1);
818 mutex_unlock(&pmc_grab_mutex
);
822 static const struct perf_event_map
*sparc_map_cache_event(u64 config
)
824 unsigned int cache_type
, cache_op
, cache_result
;
825 const struct perf_event_map
*pmap
;
827 if (!sparc_pmu
->cache_map
)
828 return ERR_PTR(-ENOENT
);
830 cache_type
= (config
>> 0) & 0xff;
831 if (cache_type
>= PERF_COUNT_HW_CACHE_MAX
)
832 return ERR_PTR(-EINVAL
);
834 cache_op
= (config
>> 8) & 0xff;
835 if (cache_op
>= PERF_COUNT_HW_CACHE_OP_MAX
)
836 return ERR_PTR(-EINVAL
);
838 cache_result
= (config
>> 16) & 0xff;
839 if (cache_result
>= PERF_COUNT_HW_CACHE_RESULT_MAX
)
840 return ERR_PTR(-EINVAL
);
842 pmap
= &((*sparc_pmu
->cache_map
)[cache_type
][cache_op
][cache_result
]);
844 if (pmap
->encoding
== CACHE_OP_UNSUPPORTED
)
845 return ERR_PTR(-ENOENT
);
847 if (pmap
->encoding
== CACHE_OP_NONSENSE
)
848 return ERR_PTR(-EINVAL
);
853 static void hw_perf_event_destroy(struct perf_event
*event
)
855 perf_event_release_pmc();
858 /* Make sure all events can be scheduled into the hardware at
859 * the same time. This is simplified by the fact that we only
860 * need to support 2 simultaneous HW events.
862 * As a side effect, the evts[]->hw.idx values will be assigned
863 * on success. These are pending indexes. When the events are
864 * actually programmed into the chip, these values will propagate
865 * to the per-cpu cpuc->current_idx[] slots, see the code in
866 * maybe_change_configuration() for details.
868 static int sparc_check_constraints(struct perf_event
**evts
,
869 unsigned long *events
, int n_ev
)
871 u8 msk0
= 0, msk1
= 0;
874 /* This case is possible when we are invoked from
875 * hw_perf_group_sched_in().
880 if (n_ev
> perf_max_events
)
883 msk0
= perf_event_get_msk(events
[0]);
885 if (msk0
& PIC_LOWER
)
890 msk1
= perf_event_get_msk(events
[1]);
892 /* If both events can go on any counter, OK. */
893 if (msk0
== (PIC_UPPER
| PIC_LOWER
) &&
894 msk1
== (PIC_UPPER
| PIC_LOWER
))
897 /* If one event is limited to a specific counter,
898 * and the other can go on both, OK.
900 if ((msk0
== PIC_UPPER
|| msk0
== PIC_LOWER
) &&
901 msk1
== (PIC_UPPER
| PIC_LOWER
)) {
902 if (msk0
& PIC_LOWER
)
907 if ((msk1
== PIC_UPPER
|| msk1
== PIC_LOWER
) &&
908 msk0
== (PIC_UPPER
| PIC_LOWER
)) {
909 if (msk1
& PIC_UPPER
)
914 /* If the events are fixed to different counters, OK. */
915 if ((msk0
== PIC_UPPER
&& msk1
== PIC_LOWER
) ||
916 (msk0
== PIC_LOWER
&& msk1
== PIC_UPPER
)) {
917 if (msk0
& PIC_LOWER
)
922 /* Otherwise, there is a conflict. */
926 evts
[0]->hw
.idx
= idx0
;
928 evts
[1]->hw
.idx
= idx0
^ 1;
932 static int check_excludes(struct perf_event
**evts
, int n_prev
, int n_new
)
934 int eu
= 0, ek
= 0, eh
= 0;
935 struct perf_event
*event
;
943 for (i
= 0; i
< n
; i
++) {
946 eu
= event
->attr
.exclude_user
;
947 ek
= event
->attr
.exclude_kernel
;
948 eh
= event
->attr
.exclude_hv
;
950 } else if (event
->attr
.exclude_user
!= eu
||
951 event
->attr
.exclude_kernel
!= ek
||
952 event
->attr
.exclude_hv
!= eh
) {
960 static int collect_events(struct perf_event
*group
, int max_count
,
961 struct perf_event
*evts
[], unsigned long *events
,
964 struct perf_event
*event
;
967 if (!is_software_event(group
)) {
971 events
[n
] = group
->hw
.event_base
;
972 current_idx
[n
++] = PIC_NO_INDEX
;
974 list_for_each_entry(event
, &group
->sibling_list
, group_entry
) {
975 if (!is_software_event(event
) &&
976 event
->state
!= PERF_EVENT_STATE_OFF
) {
980 events
[n
] = event
->hw
.event_base
;
981 current_idx
[n
++] = PIC_NO_INDEX
;
987 static int sparc_pmu_enable(struct perf_event
*event
)
989 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
990 int n0
, ret
= -EAGAIN
;
993 local_irq_save(flags
);
997 if (n0
>= perf_max_events
)
1000 cpuc
->event
[n0
] = event
;
1001 cpuc
->events
[n0
] = event
->hw
.event_base
;
1002 cpuc
->current_idx
[n0
] = PIC_NO_INDEX
;
1005 * If group events scheduling transaction was started,
1006 * skip the schedulability test here, it will be peformed
1007 * at commit time(->commit_txn) as a whole
1009 if (cpuc
->group_flag
& PERF_EVENT_TXN
)
1012 if (check_excludes(cpuc
->event
, n0
, 1))
1014 if (sparc_check_constraints(cpuc
->event
, cpuc
->events
, n0
+ 1))
1024 local_irq_restore(flags
);
1028 static int __hw_perf_event_init(struct perf_event
*event
)
1030 struct perf_event_attr
*attr
= &event
->attr
;
1031 struct perf_event
*evts
[MAX_HWEVENTS
];
1032 struct hw_perf_event
*hwc
= &event
->hw
;
1033 unsigned long events
[MAX_HWEVENTS
];
1034 int current_idx_dmy
[MAX_HWEVENTS
];
1035 const struct perf_event_map
*pmap
;
1038 if (atomic_read(&nmi_active
) < 0)
1041 if (attr
->type
== PERF_TYPE_HARDWARE
) {
1042 if (attr
->config
>= sparc_pmu
->max_events
)
1044 pmap
= sparc_pmu
->event_map(attr
->config
);
1045 } else if (attr
->type
== PERF_TYPE_HW_CACHE
) {
1046 pmap
= sparc_map_cache_event(attr
->config
);
1048 return PTR_ERR(pmap
);
1052 /* We save the enable bits in the config_base. */
1053 hwc
->config_base
= sparc_pmu
->irq_bit
;
1054 if (!attr
->exclude_user
)
1055 hwc
->config_base
|= PCR_UTRACE
;
1056 if (!attr
->exclude_kernel
)
1057 hwc
->config_base
|= PCR_STRACE
;
1058 if (!attr
->exclude_hv
)
1059 hwc
->config_base
|= sparc_pmu
->hv_bit
;
1061 hwc
->event_base
= perf_event_encode(pmap
);
1064 if (event
->group_leader
!= event
) {
1065 n
= collect_events(event
->group_leader
,
1066 perf_max_events
- 1,
1067 evts
, events
, current_idx_dmy
);
1071 events
[n
] = hwc
->event_base
;
1074 if (check_excludes(evts
, n
, 1))
1077 if (sparc_check_constraints(evts
, events
, n
+ 1))
1080 hwc
->idx
= PIC_NO_INDEX
;
1082 /* Try to do all error checking before this point, as unwinding
1083 * state after grabbing the PMC is difficult.
1085 perf_event_grab_pmc();
1086 event
->destroy
= hw_perf_event_destroy
;
1088 if (!hwc
->sample_period
) {
1089 hwc
->sample_period
= MAX_PERIOD
;
1090 hwc
->last_period
= hwc
->sample_period
;
1091 local64_set(&hwc
->period_left
, hwc
->sample_period
);
1098 * Start group events scheduling transaction
1099 * Set the flag to make pmu::enable() not perform the
1100 * schedulability test, it will be performed at commit time
1102 static void sparc_pmu_start_txn(const struct pmu
*pmu
)
1104 struct cpu_hw_events
*cpuhw
= &__get_cpu_var(cpu_hw_events
);
1106 cpuhw
->group_flag
|= PERF_EVENT_TXN
;
1110 * Stop group events scheduling transaction
1111 * Clear the flag and pmu::enable() will perform the
1112 * schedulability test.
1114 static void sparc_pmu_cancel_txn(const struct pmu
*pmu
)
1116 struct cpu_hw_events
*cpuhw
= &__get_cpu_var(cpu_hw_events
);
1118 cpuhw
->group_flag
&= ~PERF_EVENT_TXN
;
1122 * Commit group events scheduling transaction
1123 * Perform the group schedulability test as a whole
1124 * Return 0 if success
1126 static int sparc_pmu_commit_txn(const struct pmu
*pmu
)
1128 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
1134 cpuc
= &__get_cpu_var(cpu_hw_events
);
1136 if (check_excludes(cpuc
->event
, 0, n
))
1138 if (sparc_check_constraints(cpuc
->event
, cpuc
->events
, n
))
1141 cpuc
->group_flag
&= ~PERF_EVENT_TXN
;
1145 static const struct pmu pmu
= {
1146 .enable
= sparc_pmu_enable
,
1147 .disable
= sparc_pmu_disable
,
1148 .read
= sparc_pmu_read
,
1149 .unthrottle
= sparc_pmu_unthrottle
,
1150 .start_txn
= sparc_pmu_start_txn
,
1151 .cancel_txn
= sparc_pmu_cancel_txn
,
1152 .commit_txn
= sparc_pmu_commit_txn
,
1155 const struct pmu
*hw_perf_event_init(struct perf_event
*event
)
1157 int err
= __hw_perf_event_init(event
);
1160 return ERR_PTR(err
);
1164 void perf_event_print_debug(void)
1166 unsigned long flags
;
1173 local_irq_save(flags
);
1175 cpu
= smp_processor_id();
1177 pcr
= pcr_ops
->read();
1181 pr_info("CPU#%d: PCR[%016llx] PIC[%016llx]\n",
1184 local_irq_restore(flags
);
1187 static int __kprobes
perf_event_nmi_handler(struct notifier_block
*self
,
1188 unsigned long cmd
, void *__args
)
1190 struct die_args
*args
= __args
;
1191 struct perf_sample_data data
;
1192 struct cpu_hw_events
*cpuc
;
1193 struct pt_regs
*regs
;
1196 if (!atomic_read(&active_events
))
1209 perf_sample_data_init(&data
, 0);
1211 cpuc
= &__get_cpu_var(cpu_hw_events
);
1213 /* If the PMU has the TOE IRQ enable bits, we need to do a
1214 * dummy write to the %pcr to clear the overflow bits and thus
1217 * Do this before we peek at the counters to determine
1218 * overflow so we don't lose any events.
1220 if (sparc_pmu
->irq_bit
)
1221 pcr_ops
->write(cpuc
->pcr
);
1223 for (i
= 0; i
< cpuc
->n_events
; i
++) {
1224 struct perf_event
*event
= cpuc
->event
[i
];
1225 int idx
= cpuc
->current_idx
[i
];
1226 struct hw_perf_event
*hwc
;
1230 val
= sparc_perf_event_update(event
, hwc
, idx
);
1231 if (val
& (1ULL << 31))
1234 data
.period
= event
->hw
.last_period
;
1235 if (!sparc_perf_event_set_period(event
, hwc
, idx
))
1238 if (perf_event_overflow(event
, 1, &data
, regs
))
1239 sparc_pmu_disable_event(cpuc
, hwc
, idx
);
1245 static __read_mostly
struct notifier_block perf_event_nmi_notifier
= {
1246 .notifier_call
= perf_event_nmi_handler
,
1249 static bool __init
supported_pmu(void)
1251 if (!strcmp(sparc_pmu_type
, "ultra3") ||
1252 !strcmp(sparc_pmu_type
, "ultra3+") ||
1253 !strcmp(sparc_pmu_type
, "ultra3i") ||
1254 !strcmp(sparc_pmu_type
, "ultra4+")) {
1255 sparc_pmu
= &ultra3_pmu
;
1258 if (!strcmp(sparc_pmu_type
, "niagara")) {
1259 sparc_pmu
= &niagara1_pmu
;
1262 if (!strcmp(sparc_pmu_type
, "niagara2")) {
1263 sparc_pmu
= &niagara2_pmu
;
1269 void __init
init_hw_perf_events(void)
1271 pr_info("Performance events: ");
1273 if (!supported_pmu()) {
1274 pr_cont("No support for PMU type '%s'\n", sparc_pmu_type
);
1278 pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type
);
1280 /* All sparc64 PMUs currently have 2 events. */
1281 perf_max_events
= 2;
1283 register_die_notifier(&perf_event_nmi_notifier
);
1286 static void perf_callchain_kernel(struct pt_regs
*regs
,
1287 struct perf_callchain_entry
*entry
)
1289 unsigned long ksp
, fp
;
1290 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1294 perf_callchain_store(entry
, PERF_CONTEXT_KERNEL
);
1295 perf_callchain_store(entry
, regs
->tpc
);
1297 ksp
= regs
->u_regs
[UREG_I6
];
1298 fp
= ksp
+ STACK_BIAS
;
1300 struct sparc_stackf
*sf
;
1301 struct pt_regs
*regs
;
1304 if (!kstack_valid(current_thread_info(), fp
))
1307 sf
= (struct sparc_stackf
*) fp
;
1308 regs
= (struct pt_regs
*) (sf
+ 1);
1310 if (kstack_is_trap_frame(current_thread_info(), regs
)) {
1311 if (user_mode(regs
))
1314 fp
= regs
->u_regs
[UREG_I6
] + STACK_BIAS
;
1316 pc
= sf
->callers_pc
;
1317 fp
= (unsigned long)sf
->fp
+ STACK_BIAS
;
1319 perf_callchain_store(entry
, pc
);
1320 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1321 if ((pc
+ 8UL) == (unsigned long) &return_to_handler
) {
1322 int index
= current
->curr_ret_stack
;
1323 if (current
->ret_stack
&& index
>= graph
) {
1324 pc
= current
->ret_stack
[index
- graph
].ret
;
1325 perf_callchain_store(entry
, pc
);
1330 } while (entry
->nr
< PERF_MAX_STACK_DEPTH
);
1333 static void perf_callchain_user_64(struct pt_regs
*regs
,
1334 struct perf_callchain_entry
*entry
)
1338 perf_callchain_store(entry
, PERF_CONTEXT_USER
);
1339 perf_callchain_store(entry
, regs
->tpc
);
1341 ufp
= regs
->u_regs
[UREG_I6
] + STACK_BIAS
;
1343 struct sparc_stackf
*usf
, sf
;
1346 usf
= (struct sparc_stackf
*) ufp
;
1347 if (__copy_from_user_inatomic(&sf
, usf
, sizeof(sf
)))
1351 ufp
= (unsigned long)sf
.fp
+ STACK_BIAS
;
1352 perf_callchain_store(entry
, pc
);
1353 } while (entry
->nr
< PERF_MAX_STACK_DEPTH
);
1356 static void perf_callchain_user_32(struct pt_regs
*regs
,
1357 struct perf_callchain_entry
*entry
)
1361 perf_callchain_store(entry
, PERF_CONTEXT_USER
);
1362 perf_callchain_store(entry
, regs
->tpc
);
1364 ufp
= regs
->u_regs
[UREG_I6
] & 0xffffffffUL
;
1366 struct sparc_stackf32
*usf
, sf
;
1369 usf
= (struct sparc_stackf32
*) ufp
;
1370 if (__copy_from_user_inatomic(&sf
, usf
, sizeof(sf
)))
1374 ufp
= (unsigned long)sf
.fp
;
1375 perf_callchain_store(entry
, pc
);
1376 } while (entry
->nr
< PERF_MAX_STACK_DEPTH
);
1379 /* Like powerpc we can't get PMU interrupts within the PMU handler,
1380 * so no need for separate NMI and IRQ chains as on x86.
1382 static DEFINE_PER_CPU(struct perf_callchain_entry
, callchain
);
1384 struct perf_callchain_entry
*perf_callchain(struct pt_regs
*regs
)
1386 struct perf_callchain_entry
*entry
= &__get_cpu_var(callchain
);
1389 if (!user_mode(regs
)) {
1390 stack_trace_flush();
1391 perf_callchain_kernel(regs
, entry
);
1393 regs
= task_pt_regs(current
);
1399 if (test_thread_flag(TIF_32BIT
))
1400 perf_callchain_user_32(regs
, entry
);
1402 perf_callchain_user_64(regs
, entry
);