2 * Linux performance counter support for MIPS.
4 * Copyright (C) 2010 MIPS Technologies, Inc.
5 * Author: Deng-Cheng Zhu
7 * This code is based on the implementation for ARM, which is in turn
8 * based on the sparc64 perf event code and the x86 code. Performance
9 * counter access is based on the MIPS Oprofile code. And the callchain
10 * support references the code of MIPS stacktrace.c.
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
17 #include <linux/cpumask.h>
18 #include <linux/interrupt.h>
19 #include <linux/smp.h>
20 #include <linux/kernel.h>
21 #include <linux/perf_event.h>
22 #include <linux/uaccess.h>
25 #include <asm/irq_regs.h>
26 #include <asm/stacktrace.h>
27 #include <asm/time.h> /* For perf_irq */
29 /* These are for 32bit counters. For 64bit ones, define them accordingly. */
30 #define MAX_PERIOD ((1ULL << 32) - 1)
31 #define VALID_COUNT 0x7fffffff
33 #define HIGHEST_BIT 31
35 #define MIPS_MAX_HWEVENTS 4
37 struct cpu_hw_events
{
38 /* Array of events on this cpu. */
39 struct perf_event
*events
[MIPS_MAX_HWEVENTS
];
42 * Set the bit (indexed by the counter number) when the counter
43 * is used for an event.
45 unsigned long used_mask
[BITS_TO_LONGS(MIPS_MAX_HWEVENTS
)];
48 * The borrowed MSB for the performance counter. A MIPS performance
49 * counter uses its bit 31 (for 32bit counters) or bit 63 (for 64bit
50 * counters) as a factor of determining whether a counter overflow
51 * should be signaled. So here we use a separate MSB for each
52 * counter to make things easy.
54 unsigned long msbs
[BITS_TO_LONGS(MIPS_MAX_HWEVENTS
)];
57 * Software copy of the control register for each performance counter.
58 * MIPS CPUs vary in performance counters. They use this differently,
59 * and even may not use it.
61 unsigned int saved_ctrl
[MIPS_MAX_HWEVENTS
];
63 DEFINE_PER_CPU(struct cpu_hw_events
, cpu_hw_events
) = {
67 /* The description of MIPS performance events. */
68 struct mips_perf_event
{
69 unsigned int event_id
;
71 * MIPS performance counters are indexed starting from 0.
72 * CNTR_EVEN indicates the indexes of the counters to be used are
75 unsigned int cntr_mask
;
76 #define CNTR_EVEN 0x55555555
77 #define CNTR_ODD 0xaaaaaaaa
78 #ifdef CONFIG_MIPS_MT_SMP
91 static struct mips_perf_event raw_event
;
92 static DEFINE_MUTEX(raw_event_mutex
);
94 #define UNSUPPORTED_PERF_EVENT_ID 0xffffffff
95 #define C(x) PERF_COUNT_HW_CACHE_##x
100 irqreturn_t (*handle_irq
)(int irq
, void *dev
);
101 int (*handle_shared_irq
)(void);
104 int (*alloc_counter
)(struct cpu_hw_events
*cpuc
,
105 struct hw_perf_event
*hwc
);
106 u64 (*read_counter
)(unsigned int idx
);
107 void (*write_counter
)(unsigned int idx
, u64 val
);
108 void (*enable_event
)(struct hw_perf_event
*evt
, int idx
);
109 void (*disable_event
)(int idx
);
110 const struct mips_perf_event
*(*map_raw_event
)(u64 config
);
111 const struct mips_perf_event (*general_event_map
)[PERF_COUNT_HW_MAX
];
112 const struct mips_perf_event (*cache_event_map
)
113 [PERF_COUNT_HW_CACHE_MAX
]
114 [PERF_COUNT_HW_CACHE_OP_MAX
]
115 [PERF_COUNT_HW_CACHE_RESULT_MAX
];
116 unsigned int num_counters
;
119 static const struct mips_pmu
*mipspmu
;
122 mipspmu_event_set_period(struct perf_event
*event
,
123 struct hw_perf_event
*hwc
,
126 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
127 s64 left
= local64_read(&hwc
->period_left
);
128 s64 period
= hwc
->sample_period
;
133 if (unlikely(left
<= -period
)) {
135 local64_set(&hwc
->period_left
, left
);
136 hwc
->last_period
= period
;
140 if (unlikely(left
<= 0)) {
142 local64_set(&hwc
->period_left
, left
);
143 hwc
->last_period
= period
;
147 if (left
> (s64
)MAX_PERIOD
)
150 local64_set(&hwc
->prev_count
, (u64
)-left
);
152 local_irq_save(flags
);
153 uleft
= (u64
)(-left
) & MAX_PERIOD
;
154 uleft
> VALID_COUNT
?
155 set_bit(idx
, cpuc
->msbs
) : clear_bit(idx
, cpuc
->msbs
);
156 mipspmu
->write_counter(idx
, (u64
)(-left
) & VALID_COUNT
);
157 local_irq_restore(flags
);
159 perf_event_update_userpage(event
);
164 static int mipspmu_enable(struct perf_event
*event
)
166 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
167 struct hw_perf_event
*hwc
= &event
->hw
;
171 /* To look for a free counter for this event. */
172 idx
= mipspmu
->alloc_counter(cpuc
, hwc
);
179 * If there is an event in the counter we are going to use then
180 * make sure it is disabled.
183 mipspmu
->disable_event(idx
);
184 cpuc
->events
[idx
] = event
;
186 /* Set the period for the event. */
187 mipspmu_event_set_period(event
, hwc
, idx
);
189 /* Enable the event. */
190 mipspmu
->enable_event(hwc
, idx
);
192 /* Propagate our changes to the userspace mapping. */
193 perf_event_update_userpage(event
);
199 static void mipspmu_event_update(struct perf_event
*event
,
200 struct hw_perf_event
*hwc
,
203 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
205 int shift
= 64 - TOTAL_BITS
;
206 s64 prev_raw_count
, new_raw_count
;
210 prev_raw_count
= local64_read(&hwc
->prev_count
);
211 local_irq_save(flags
);
212 /* Make the counter value be a "real" one. */
213 new_raw_count
= mipspmu
->read_counter(idx
);
214 if (new_raw_count
& (test_bit(idx
, cpuc
->msbs
) << HIGHEST_BIT
)) {
215 new_raw_count
&= VALID_COUNT
;
216 clear_bit(idx
, cpuc
->msbs
);
218 new_raw_count
|= (test_bit(idx
, cpuc
->msbs
) << HIGHEST_BIT
);
219 local_irq_restore(flags
);
221 if (local64_cmpxchg(&hwc
->prev_count
, prev_raw_count
,
222 new_raw_count
) != prev_raw_count
)
225 delta
= (new_raw_count
<< shift
) - (prev_raw_count
<< shift
);
228 local64_add(delta
, &event
->count
);
229 local64_sub(delta
, &hwc
->period_left
);
234 static void mipspmu_disable(struct perf_event
*event
)
236 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
237 struct hw_perf_event
*hwc
= &event
->hw
;
241 WARN_ON(idx
< 0 || idx
>= mipspmu
->num_counters
);
243 /* We are working on a local event. */
244 mipspmu
->disable_event(idx
);
248 mipspmu_event_update(event
, hwc
, idx
);
249 cpuc
->events
[idx
] = NULL
;
250 clear_bit(idx
, cpuc
->used_mask
);
252 perf_event_update_userpage(event
);
255 static void mipspmu_unthrottle(struct perf_event
*event
)
257 struct hw_perf_event
*hwc
= &event
->hw
;
259 mipspmu
->enable_event(hwc
, hwc
->idx
);
262 static void mipspmu_read(struct perf_event
*event
)
264 struct hw_perf_event
*hwc
= &event
->hw
;
266 /* Don't read disabled counters! */
270 mipspmu_event_update(event
, hwc
, hwc
->idx
);
273 static struct pmu pmu
= {
274 .enable
= mipspmu_enable
,
275 .disable
= mipspmu_disable
,
276 .unthrottle
= mipspmu_unthrottle
,
277 .read
= mipspmu_read
,
280 static atomic_t active_events
= ATOMIC_INIT(0);
281 static DEFINE_MUTEX(pmu_reserve_mutex
);
282 static int (*save_perf_irq
)(void);
284 static int mipspmu_get_irq(void)
288 if (mipspmu
->irq
>= 0) {
289 /* Request my own irq handler. */
290 err
= request_irq(mipspmu
->irq
, mipspmu
->handle_irq
,
291 IRQF_DISABLED
| IRQF_NOBALANCING
,
292 "mips_perf_pmu", NULL
);
294 pr_warning("Unable to request IRQ%d for MIPS "
295 "performance counters!\n", mipspmu
->irq
);
297 } else if (cp0_perfcount_irq
< 0) {
299 * We are sharing the irq number with the timer interrupt.
301 save_perf_irq
= perf_irq
;
302 perf_irq
= mipspmu
->handle_shared_irq
;
305 pr_warning("The platform hasn't properly defined its "
306 "interrupt controller.\n");
313 static void mipspmu_free_irq(void)
315 if (mipspmu
->irq
>= 0)
316 free_irq(mipspmu
->irq
, NULL
);
317 else if (cp0_perfcount_irq
< 0)
318 perf_irq
= save_perf_irq
;
321 static inline unsigned int
322 mipspmu_perf_event_encode(const struct mips_perf_event
*pev
)
325 * Top 8 bits for range, next 16 bits for cntr_mask, lowest 8 bits for
328 #ifdef CONFIG_MIPS_MT_SMP
329 return ((unsigned int)pev
->range
<< 24) |
330 (pev
->cntr_mask
& 0xffff00) |
331 (pev
->event_id
& 0xff);
333 return (pev
->cntr_mask
& 0xffff00) |
334 (pev
->event_id
& 0xff);
338 static const struct mips_perf_event
*
339 mipspmu_map_general_event(int idx
)
341 const struct mips_perf_event
*pev
;
343 pev
= ((*mipspmu
->general_event_map
)[idx
].event_id
==
344 UNSUPPORTED_PERF_EVENT_ID
? ERR_PTR(-EOPNOTSUPP
) :
345 &(*mipspmu
->general_event_map
)[idx
]);
350 static const struct mips_perf_event
*
351 mipspmu_map_cache_event(u64 config
)
353 unsigned int cache_type
, cache_op
, cache_result
;
354 const struct mips_perf_event
*pev
;
356 cache_type
= (config
>> 0) & 0xff;
357 if (cache_type
>= PERF_COUNT_HW_CACHE_MAX
)
358 return ERR_PTR(-EINVAL
);
360 cache_op
= (config
>> 8) & 0xff;
361 if (cache_op
>= PERF_COUNT_HW_CACHE_OP_MAX
)
362 return ERR_PTR(-EINVAL
);
364 cache_result
= (config
>> 16) & 0xff;
365 if (cache_result
>= PERF_COUNT_HW_CACHE_RESULT_MAX
)
366 return ERR_PTR(-EINVAL
);
368 pev
= &((*mipspmu
->cache_event_map
)
373 if (pev
->event_id
== UNSUPPORTED_PERF_EVENT_ID
)
374 return ERR_PTR(-EOPNOTSUPP
);
380 static int validate_event(struct cpu_hw_events
*cpuc
,
381 struct perf_event
*event
)
383 struct hw_perf_event fake_hwc
= event
->hw
;
385 if (event
->pmu
&& event
->pmu
!= &pmu
)
388 return mipspmu
->alloc_counter(cpuc
, &fake_hwc
) >= 0;
391 static int validate_group(struct perf_event
*event
)
393 struct perf_event
*sibling
, *leader
= event
->group_leader
;
394 struct cpu_hw_events fake_cpuc
;
396 memset(&fake_cpuc
, 0, sizeof(fake_cpuc
));
398 if (!validate_event(&fake_cpuc
, leader
))
401 list_for_each_entry(sibling
, &leader
->sibling_list
, group_entry
) {
402 if (!validate_event(&fake_cpuc
, sibling
))
406 if (!validate_event(&fake_cpuc
, event
))
413 * mipsxx/rm9000/loongson2 have different performance counters, they have
414 * specific low-level init routines.
416 static void reset_counters(void *arg
);
417 static int __hw_perf_event_init(struct perf_event
*event
);
419 static void hw_perf_event_destroy(struct perf_event
*event
)
421 if (atomic_dec_and_mutex_lock(&active_events
,
422 &pmu_reserve_mutex
)) {
424 * We must not call the destroy function with interrupts
427 on_each_cpu(reset_counters
,
428 (void *)(long)mipspmu
->num_counters
, 1);
430 mutex_unlock(&pmu_reserve_mutex
);
434 const struct pmu
*hw_perf_event_init(struct perf_event
*event
)
438 if (!mipspmu
|| event
->cpu
>= nr_cpumask_bits
||
439 (event
->cpu
>= 0 && !cpu_online(event
->cpu
)))
440 return ERR_PTR(-ENODEV
);
442 if (!atomic_inc_not_zero(&active_events
)) {
443 if (atomic_read(&active_events
) > MIPS_MAX_HWEVENTS
) {
444 atomic_dec(&active_events
);
445 return ERR_PTR(-ENOSPC
);
448 mutex_lock(&pmu_reserve_mutex
);
449 if (atomic_read(&active_events
) == 0)
450 err
= mipspmu_get_irq();
453 atomic_inc(&active_events
);
454 mutex_unlock(&pmu_reserve_mutex
);
460 err
= __hw_perf_event_init(event
);
462 hw_perf_event_destroy(event
);
464 return err
? ERR_PTR(err
) : &pmu
;
467 void hw_perf_enable(void)
473 void hw_perf_disable(void)
479 /* This is needed by specific irq handlers in perf_event_*.c */
481 handle_associated_event(struct cpu_hw_events
*cpuc
,
482 int idx
, struct perf_sample_data
*data
, struct pt_regs
*regs
)
484 struct perf_event
*event
= cpuc
->events
[idx
];
485 struct hw_perf_event
*hwc
= &event
->hw
;
487 mipspmu_event_update(event
, hwc
, idx
);
488 data
->period
= event
->hw
.last_period
;
489 if (!mipspmu_event_set_period(event
, hwc
, idx
))
492 if (perf_event_overflow(event
, 0, data
, regs
))
493 mipspmu
->disable_event(idx
);
496 #include "perf_event_mipsxx.c"
498 /* Callchain handling code. */
500 callchain_store(struct perf_callchain_entry
*entry
,
503 if (entry
->nr
< PERF_MAX_STACK_DEPTH
)
504 entry
->ip
[entry
->nr
++] = ip
;
508 * Leave userspace callchain empty for now. When we find a way to trace
509 * the user stack callchains, we add here.
512 perf_callchain_user(struct pt_regs
*regs
,
513 struct perf_callchain_entry
*entry
)
517 static void save_raw_perf_callchain(struct perf_callchain_entry
*entry
,
520 unsigned long *sp
= (unsigned long *)reg29
;
523 while (!kstack_end(sp
)) {
525 if (__kernel_text_address(addr
)) {
526 callchain_store(entry
, addr
);
527 if (entry
->nr
>= PERF_MAX_STACK_DEPTH
)
534 perf_callchain_kernel(struct pt_regs
*regs
,
535 struct perf_callchain_entry
*entry
)
537 unsigned long sp
= regs
->regs
[29];
538 #ifdef CONFIG_KALLSYMS
539 unsigned long ra
= regs
->regs
[31];
540 unsigned long pc
= regs
->cp0_epc
;
542 callchain_store(entry
, PERF_CONTEXT_KERNEL
);
543 if (raw_show_trace
|| !__kernel_text_address(pc
)) {
544 unsigned long stack_page
=
545 (unsigned long)task_stack_page(current
);
546 if (stack_page
&& sp
>= stack_page
&&
547 sp
<= stack_page
+ THREAD_SIZE
- 32)
548 save_raw_perf_callchain(entry
, sp
);
552 callchain_store(entry
, pc
);
553 if (entry
->nr
>= PERF_MAX_STACK_DEPTH
)
555 pc
= unwind_stack(current
, &sp
, pc
, &ra
);
558 callchain_store(entry
, PERF_CONTEXT_KERNEL
);
559 save_raw_perf_callchain(entry
, sp
);
564 perf_do_callchain(struct pt_regs
*regs
,
565 struct perf_callchain_entry
*entry
)
572 is_user
= user_mode(regs
);
574 if (!current
|| !current
->pid
)
577 if (is_user
&& current
->state
!= TASK_RUNNING
)
581 perf_callchain_kernel(regs
, entry
);
583 regs
= task_pt_regs(current
);
588 perf_callchain_user(regs
, entry
);
591 static DEFINE_PER_CPU(struct perf_callchain_entry
, pmc_irq_entry
);
593 struct perf_callchain_entry
*
594 perf_callchain(struct pt_regs
*regs
)
596 struct perf_callchain_entry
*entry
= &__get_cpu_var(pmc_irq_entry
);
599 perf_do_callchain(regs
, entry
);