2 * @file op_model_mpcore.c
3 * MPCORE Event Monitor Driver
4 * @remark Copyright 2004 ARM SMP Development Team
5 * @remark Copyright 2000-2004 Deepak Saxena <dsaxena@mvista.com>
6 * @remark Copyright 2000-2004 MontaVista Software Inc
7 * @remark Copyright 2004 Dave Jiang <dave.jiang@intel.com>
8 * @remark Copyright 2004 Intel Corporation
9 * @remark Copyright 2004 Zwane Mwaikambo <zwane@arm.linux.org.uk>
10 * @remark Copyright 2004 Oprofile Authors
12 * @remark Read the file COPYING
14 * @author Zwane Mwaikambo
17 * 0: PMN0 on CPU0, per-cpu configurable event counter
18 * 1: PMN1 on CPU0, per-cpu configurable event counter
29 * 12-19: configurable SCU event counters
33 #include <linux/types.h>
34 #include <linux/errno.h>
35 #include <linux/err.h>
36 #include <linux/sched.h>
37 #include <linux/oprofile.h>
38 #include <linux/interrupt.h>
39 #include <linux/smp.h>
43 #include <asm/mach/irq.h>
44 #include <mach/hardware.h>
45 #include <mach/board-eb.h>
46 #include <asm/system.h>
49 #include "op_counter.h"
50 #include "op_arm_model.h"
51 #include "op_model_arm11_core.h"
52 #include "op_model_mpcore.h"
55 * MPCore SCU event monitor support
57 #define SCU_EVENTMONITORS_VA_BASE __io_address(REALVIEW_EB11MP_SCU_BASE + 0x10)
60 * Bitmask of used SCU counters
62 static unsigned int scu_em_used
;
63 static const struct pmu_irqs
*pmu_irqs
;
66 * 2 helper fns take a counter number from 0-7 (not the userspace-visible counter number)
68 static inline void scu_reset_counter(struct eventmonitor __iomem
*emc
, unsigned int n
)
70 writel(-(u32
)counter_config
[SCU_COUNTER(n
)].count
, &emc
->MC
[n
]);
73 static inline void scu_set_event(struct eventmonitor __iomem
*emc
, unsigned int n
, u32 event
)
76 writeb(event
, &emc
->MCEB
[n
]);
80 * SCU counters' IRQ handler (one IRQ per counter => 2 IRQs per CPU)
82 static irqreturn_t
scu_em_interrupt(int irq
, void *arg
)
84 struct eventmonitor __iomem
*emc
= SCU_EVENTMONITORS_VA_BASE
;
87 cnt
= irq
- IRQ_EB11MP_PMU_SCU0
;
88 oprofile_add_sample(get_irq_regs(), SCU_COUNTER(cnt
));
89 scu_reset_counter(emc
, cnt
);
91 /* Clear overflow flag for this counter */
92 writel(1 << (cnt
+ 16), &emc
->PMCR
);
97 /* Configure just the SCU counters that the user has requested */
98 static void scu_setup(void)
100 struct eventmonitor __iomem
*emc
= SCU_EVENTMONITORS_VA_BASE
;
105 for (i
= 0; i
< NUM_SCU_COUNTERS
; i
++) {
106 if (counter_config
[SCU_COUNTER(i
)].enabled
&&
107 counter_config
[SCU_COUNTER(i
)].event
) {
108 scu_set_event(emc
, i
, 0); /* disable counter for now */
109 scu_em_used
|= 1 << i
;
114 static int scu_start(void)
116 struct eventmonitor __iomem
*emc
= SCU_EVENTMONITORS_VA_BASE
;
117 unsigned int temp
, i
;
122 * request the SCU counter interrupts that we need
124 for (i
= 0; i
< NUM_SCU_COUNTERS
; i
++) {
125 if (scu_em_used
& (1 << i
)) {
126 ret
= request_irq(IRQ_EB11MP_PMU_SCU0
+ i
, scu_em_interrupt
, IRQF_DISABLED
, "SCU PMU", NULL
);
128 printk(KERN_ERR
"oprofile: unable to request IRQ%u for SCU Event Monitor\n",
129 IRQ_EB11MP_PMU_SCU0
+ i
);
136 * clear overflow and enable interrupt for all used counters
138 temp
= readl(&emc
->PMCR
);
139 for (i
= 0; i
< NUM_SCU_COUNTERS
; i
++) {
140 if (scu_em_used
& (1 << i
)) {
141 scu_reset_counter(emc
, i
);
142 event
= counter_config
[SCU_COUNTER(i
)].event
;
143 scu_set_event(emc
, i
, event
);
145 /* clear overflow/interrupt */
146 temp
|= 1 << (i
+ 16);
147 /* enable interrupt*/
148 temp
|= 1 << (i
+ 8);
152 /* Enable all 8 counters */
154 writel(temp
, &emc
->PMCR
);
160 free_irq(IRQ_EB11MP_PMU_SCU0
+ i
, NULL
);
164 static void scu_stop(void)
166 struct eventmonitor __iomem
*emc
= SCU_EVENTMONITORS_VA_BASE
;
167 unsigned int temp
, i
;
169 /* Disable counter interrupts */
170 /* Don't disable all 8 counters (with the E bit) as they may be in use */
171 temp
= readl(&emc
->PMCR
);
172 for (i
= 0; i
< NUM_SCU_COUNTERS
; i
++) {
173 if (scu_em_used
& (1 << i
))
174 temp
&= ~(1 << (i
+ 8));
176 writel(temp
, &emc
->PMCR
);
178 /* Free counter interrupts and reset counters */
179 for (i
= 0; i
< NUM_SCU_COUNTERS
; i
++) {
180 if (scu_em_used
& (1 << i
)) {
181 scu_reset_counter(emc
, i
);
182 free_irq(IRQ_EB11MP_PMU_SCU0
+ i
, NULL
);
187 struct em_function_data
{
192 static void em_func(void *data
)
194 struct em_function_data
*d
= data
;
200 static int em_call_function(int (*fn
)(void))
202 struct em_function_data data
;
208 smp_call_function(em_func
, &data
, 1);
216 * Glue to stick the individual ARM11 PMUs and the SCU
217 * into the oprofile framework.
219 static int em_setup_ctrs(void)
223 /* Configure CPU counters by cross-calling to the other CPUs */
224 ret
= em_call_function(arm11_setup_pmu
);
231 static int em_start(void)
235 pmu_irqs
= reserve_pmu();
236 if (IS_ERR(pmu_irqs
)) {
237 ret
= PTR_ERR(pmu_irqs
);
241 ret
= arm11_request_interrupts(pmu_irqs
->irqs
, pmu_irqs
->num_irqs
);
243 em_call_function(arm11_start_pmu
);
247 arm11_release_interrupts(pmu_irqs
->irqs
,
250 release_pmu(pmu_irqs
);
259 static void em_stop(void)
261 em_call_function(arm11_stop_pmu
);
262 arm11_release_interrupts(pmu_irqs
->irqs
, pmu_irqs
->num_irqs
);
264 release_pmu(pmu_irqs
);
268 * Why isn't there a function to route an IRQ to a specific CPU in
271 static void em_route_irq(int irq
, unsigned int cpu
)
273 struct irq_desc
*desc
= irq_desc
+ irq
;
274 const struct cpumask
*mask
= cpumask_of(cpu
);
276 spin_lock_irq(&desc
->lock
);
277 cpumask_copy(desc
->affinity
, mask
);
278 desc
->chip
->set_affinity(irq
, mask
);
279 spin_unlock_irq(&desc
->lock
);
282 static int em_setup(void)
285 * Send SCU PMU interrupts to the "owner" CPU.
287 em_route_irq(IRQ_EB11MP_PMU_SCU0
, 0);
288 em_route_irq(IRQ_EB11MP_PMU_SCU1
, 0);
289 em_route_irq(IRQ_EB11MP_PMU_SCU2
, 1);
290 em_route_irq(IRQ_EB11MP_PMU_SCU3
, 1);
291 em_route_irq(IRQ_EB11MP_PMU_SCU4
, 2);
292 em_route_irq(IRQ_EB11MP_PMU_SCU5
, 2);
293 em_route_irq(IRQ_EB11MP_PMU_SCU6
, 3);
294 em_route_irq(IRQ_EB11MP_PMU_SCU7
, 3);
299 struct op_arm_model_spec op_mpcore_spec
= {
301 .num_counters
= MPCORE_NUM_COUNTERS
,
302 .setup_ctrs
= em_setup_ctrs
,
305 .name
= "arm/mpcore",