initial commit with v2.6.9
[linux-2.6.9-moxart.git] / arch / arm / oprofile / op_model_xscale.c
blob447653df1c144bb2f68dbbb7448375890b3bd98b
1 /**
2 * @file op_model_xscale.c
3 * XScale Performance Monitor Driver
5 * @remark Copyright 2000-2004 Deepak Saxena <dsaxena@mvista.com>
6 * @remark Copyright 2000-2004 MontaVista Software Inc
7 * @remark Copyright 2004 Dave Jiang <dave.jiang@intel.com>
8 * @remark Copyright 2004 Intel Corporation
9 * @remark Copyright 2004 Zwane Mwaikambo <zwane@arm.linux.org.uk>
10 * @remark Copyright 2004 OProfile Authors
12 * @remark Read the file COPYING
14 * @author Zwane Mwaikambo
17 /* #define DEBUG */
18 #include <linux/types.h>
19 #include <linux/errno.h>
20 #include <linux/sched.h>
21 #include <linux/oprofile.h>
22 #include <linux/interrupt.h>
23 #include <asm/irq.h>
24 #include <asm/system.h>
26 #include "op_counter.h"
27 #include "op_arm_model.h"
29 #define PMU_ENABLE 0x001 /* Enable counters */
30 #define PMN_RESET 0x002 /* Reset event counters */
31 #define CCNT_RESET 0x004 /* Reset clock counter */
32 #define PMU_RESET (CCNT_RESET | PMN_RESET)
33 #define PMU_CNT64 0x008 /* Make CCNT count every 64th cycle */
35 /* TODO do runtime detection */
36 #ifdef CONFIG_ARCH_IOP310
37 #define XSCALE_PMU_IRQ IRQ_XS80200_PMU
38 #endif
39 #ifdef CONFIG_ARCH_IOP321
40 #define XSCALE_PMU_IRQ IRQ_IOP321_CORE_PMU
41 #endif
42 #ifdef CONFIG_ARCH_IOP331
43 #define XSCALE_PMU_IRQ IRQ_IOP331_CORE_PMU
44 #endif
47 * Different types of events that can be counted by the XScale PMU
48 * as used by Oprofile userspace. Here primarily for documentation
49 * purposes.
52 #define EVT_ICACHE_MISS 0x00
53 #define EVT_ICACHE_NO_DELIVER 0x01
54 #define EVT_DATA_STALL 0x02
55 #define EVT_ITLB_MISS 0x03
56 #define EVT_DTLB_MISS 0x04
57 #define EVT_BRANCH 0x05
58 #define EVT_BRANCH_MISS 0x06
59 #define EVT_INSTRUCTION 0x07
60 #define EVT_DCACHE_FULL_STALL 0x08
61 #define EVT_DCACHE_FULL_STALL_CONTIG 0x09
62 #define EVT_DCACHE_ACCESS 0x0A
63 #define EVT_DCACHE_MISS 0x0B
64 #define EVT_DCACE_WRITE_BACK 0x0C
65 #define EVT_PC_CHANGED 0x0D
66 #define EVT_BCU_REQUEST 0x10
67 #define EVT_BCU_FULL 0x11
68 #define EVT_BCU_DRAIN 0x12
69 #define EVT_BCU_ECC_NO_ELOG 0x14
70 #define EVT_BCU_1_BIT_ERR 0x15
71 #define EVT_RMW 0x16
72 /* EVT_CCNT is not hardware defined */
73 #define EVT_CCNT 0xFE
74 #define EVT_UNUSED 0xFF
76 struct pmu_counter {
77 volatile unsigned long ovf;
78 unsigned long reset_counter;
81 enum { CCNT, PMN0, PMN1, PMN2, PMN3, MAX_COUNTERS };
83 static struct pmu_counter results[MAX_COUNTERS];
86 * There are two versions of the PMU in current XScale processors
87 * with differing register layouts and number of performance counters.
88 * e.g. IOP321 is xsc1 whilst IOP331 is xsc2.
89 * We detect which register layout to use in xscale_detect_pmu()
91 enum { PMU_XSC1, PMU_XSC2 };
93 struct pmu_type {
94 int id;
95 char *name;
96 int num_counters;
97 unsigned int int_enable;
98 unsigned int cnt_ovf[MAX_COUNTERS];
99 unsigned int int_mask[MAX_COUNTERS];
102 static struct pmu_type pmu_parms[] = {
104 .id = PMU_XSC1,
105 .name = "arm/xscale1",
106 .num_counters = 3,
107 .int_mask = { [PMN0] = 0x10, [PMN1] = 0x20,
108 [CCNT] = 0x40 },
109 .cnt_ovf = { [CCNT] = 0x400, [PMN0] = 0x100,
110 [PMN1] = 0x200},
113 .id = PMU_XSC2,
114 .name = "arm/xscale2",
115 .num_counters = 5,
116 .int_mask = { [CCNT] = 0x01, [PMN0] = 0x02,
117 [PMN1] = 0x04, [PMN2] = 0x08,
118 [PMN3] = 0x10 },
119 .cnt_ovf = { [CCNT] = 0x01, [PMN0] = 0x02,
120 [PMN1] = 0x04, [PMN2] = 0x08,
121 [PMN3] = 0x10 },
125 static struct pmu_type *pmu;
127 static void write_pmnc(u32 val)
129 if (pmu->id == PMU_XSC1) {
130 /* upper 4bits and 7, 11 are write-as-0 */
131 val &= 0xffff77f;
132 __asm__ __volatile__ ("mcr p14, 0, %0, c0, c0, 0" : : "r" (val));
133 } else {
134 /* bits 4-23 are write-as-0, 24-31 are write ignored */
135 val &= 0xf;
136 __asm__ __volatile__ ("mcr p14, 0, %0, c0, c1, 0" : : "r" (val));
140 static u32 read_pmnc(void)
142 u32 val;
144 if (pmu->id == PMU_XSC1)
145 __asm__ __volatile__ ("mrc p14, 0, %0, c0, c0, 0" : "=r" (val));
146 else {
147 __asm__ __volatile__ ("mrc p14, 0, %0, c0, c1, 0" : "=r" (val));
148 /* bits 1-2 and 4-23 are read-unpredictable */
149 val &= 0xff000009;
152 return val;
155 static u32 __xsc1_read_counter(int counter)
157 u32 val = 0;
159 switch (counter) {
160 case CCNT:
161 __asm__ __volatile__ ("mrc p14, 0, %0, c1, c0, 0" : "=r" (val));
162 break;
163 case PMN0:
164 __asm__ __volatile__ ("mrc p14, 0, %0, c2, c0, 0" : "=r" (val));
165 break;
166 case PMN1:
167 __asm__ __volatile__ ("mrc p14, 0, %0, c3, c0, 0" : "=r" (val));
168 break;
170 return val;
173 static u32 __xsc2_read_counter(int counter)
175 u32 val = 0;
177 switch (counter) {
178 case CCNT:
179 __asm__ __volatile__ ("mrc p14, 0, %0, c1, c1, 0" : "=r" (val));
180 break;
181 case PMN0:
182 __asm__ __volatile__ ("mrc p14, 0, %0, c0, c2, 0" : "=r" (val));
183 break;
184 case PMN1:
185 __asm__ __volatile__ ("mrc p14, 0, %0, c1, c2, 0" : "=r" (val));
186 break;
187 case PMN2:
188 __asm__ __volatile__ ("mrc p14, 0, %0, c2, c2, 0" : "=r" (val));
189 break;
190 case PMN3:
191 __asm__ __volatile__ ("mrc p14, 0, %0, c3, c2, 0" : "=r" (val));
192 break;
194 return val;
197 static u32 read_counter(int counter)
199 u32 val;
201 if (pmu->id == PMU_XSC1)
202 val = __xsc1_read_counter(counter);
203 else
204 val = __xsc2_read_counter(counter);
206 return val;
209 static void __xsc1_write_counter(int counter, u32 val)
211 switch (counter) {
212 case CCNT:
213 __asm__ __volatile__ ("mcr p14, 0, %0, c1, c0, 0" : : "r" (val));
214 break;
215 case PMN0:
216 __asm__ __volatile__ ("mcr p14, 0, %0, c2, c0, 0" : : "r" (val));
217 break;
218 case PMN1:
219 __asm__ __volatile__ ("mcr p14, 0, %0, c3, c0, 0" : : "r" (val));
220 break;
224 static void __xsc2_write_counter(int counter, u32 val)
226 switch (counter) {
227 case CCNT:
228 __asm__ __volatile__ ("mcr p14, 0, %0, c1, c1, 0" : : "r" (val));
229 break;
230 case PMN0:
231 __asm__ __volatile__ ("mcr p14, 0, %0, c0, c2, 0" : : "r" (val));
232 break;
233 case PMN1:
234 __asm__ __volatile__ ("mcr p14, 0, %0, c1, c2, 0" : : "r" (val));
235 break;
236 case PMN2:
237 __asm__ __volatile__ ("mcr p14, 0, %0, c2, c2, 0" : : "r" (val));
238 break;
239 case PMN3:
240 __asm__ __volatile__ ("mcr p14, 0, %0, c3, c2, 0" : : "r" (val));
241 break;
245 static void write_counter(int counter, u32 val)
247 if (pmu->id == PMU_XSC1)
248 __xsc1_write_counter(counter, val);
249 else
250 __xsc2_write_counter(counter, val);
253 static int xscale_setup_ctrs(void)
255 u32 evtsel, pmnc;
256 int i;
258 for (i = CCNT; i < MAX_COUNTERS; i++) {
259 if (counter_config[i].enabled)
260 continue;
262 counter_config[i].event = EVT_UNUSED;
265 switch (pmu->id) {
266 case PMU_XSC1:
267 pmnc = (counter_config[PMN1].event << 20) | (counter_config[PMN0].event << 12);
268 pr_debug("xscale_setup_ctrs: pmnc: %#08x\n", pmnc);
269 write_pmnc(pmnc);
270 break;
272 case PMU_XSC2:
273 evtsel = counter_config[PMN0].event | (counter_config[PMN1].event << 8) |
274 (counter_config[PMN2].event << 16) | (counter_config[PMN3].event << 24);
276 pr_debug("xscale_setup_ctrs: evtsel %#08x\n", evtsel);
277 __asm__ __volatile__ ("mcr p14, 0, %0, c8, c1, 0" : : "r" (evtsel));
278 break;
281 for (i = CCNT; i < MAX_COUNTERS; i++) {
282 if (counter_config[i].event == EVT_UNUSED) {
283 counter_config[i].event = 0;
284 pmu->int_enable &= ~pmu->int_mask[i];
285 continue;
288 results[i].reset_counter = counter_config[i].count;
289 write_counter(i, -(u32)counter_config[i].count);
290 pmu->int_enable |= pmu->int_mask[i];
291 pr_debug("xscale_setup_ctrs: counter%d %#08x from %#08lx\n", i,
292 read_counter(i), counter_config[i].count);
295 return 0;
298 static void inline __xsc1_check_ctrs(void)
300 int i;
301 u32 pmnc = read_pmnc();
303 /* NOTE: there's an A stepping errata that states if an overflow */
304 /* bit already exists and another occurs, the previous */
305 /* Overflow bit gets cleared. There's no workaround. */
306 /* Fixed in B stepping or later */
308 pmnc &= ~(PMU_ENABLE | pmu->cnt_ovf[PMN0] | pmu->cnt_ovf[PMN1] |
309 pmu->cnt_ovf[CCNT]);
310 write_pmnc(pmnc);
312 for (i = CCNT; i <= PMN1; i++) {
313 if (!(pmu->int_mask[i] & pmu->int_enable))
314 continue;
316 if (pmnc & pmu->cnt_ovf[i])
317 results[i].ovf++;
321 static void inline __xsc2_check_ctrs(void)
323 int i;
324 u32 flag = 0, pmnc = read_pmnc();
326 pmnc &= ~PMU_ENABLE;
327 write_pmnc(pmnc);
329 /* read overflow flag register */
330 __asm__ __volatile__ ("mrc p14, 0, %0, c5, c1, 0" : "=r" (flag));
332 for (i = CCNT; i <= PMN3; i++) {
333 if (!(pmu->int_mask[i] & pmu->int_enable))
334 continue;
336 if (flag & pmu->cnt_ovf[i])
337 results[i].ovf++;
340 /* writeback clears overflow bits */
341 __asm__ __volatile__ ("mcr p14, 0, %0, c5, c1, 0" : : "r" (flag));
344 static irqreturn_t xscale_pmu_interrupt(int irq, void *arg, struct pt_regs *regs)
346 unsigned long pc = profile_pc(regs);
347 int i, is_kernel = !user_mode(regs);
348 u32 pmnc;
350 if (pmu->id == PMU_XSC1)
351 __xsc1_check_ctrs();
352 else
353 __xsc2_check_ctrs();
355 for (i = CCNT; i < MAX_COUNTERS; i++) {
356 if (!results[i].ovf)
357 continue;
359 write_counter(i, -(u32)results[i].reset_counter);
360 oprofile_add_sample(pc, is_kernel, i, smp_processor_id());
361 results[i].ovf--;
364 pmnc = read_pmnc() | PMU_ENABLE;
365 write_pmnc(pmnc);
367 return IRQ_HANDLED;
370 static void xscale_pmu_stop(void)
372 u32 pmnc = read_pmnc();
374 pmnc &= ~PMU_ENABLE;
375 write_pmnc(pmnc);
377 free_irq(XSCALE_PMU_IRQ, results);
380 static int xscale_pmu_start(void)
382 int ret;
383 u32 pmnc = read_pmnc();
385 ret = request_irq(XSCALE_PMU_IRQ, xscale_pmu_interrupt, SA_INTERRUPT,
386 "XScale PMU", (void *)results);
388 if (ret < 0) {
389 printk(KERN_ERR "oprofile: unable to request IRQ%d for XScale PMU\n",
390 XSCALE_PMU_IRQ);
391 return ret;
394 if (pmu->id == PMU_XSC1)
395 pmnc |= pmu->int_enable;
396 else {
397 __asm__ __volatile__ ("mcr p14, 0, %0, c4, c1, 0" : : "r" (pmu->int_enable));
398 pmnc &= ~PMU_CNT64;
401 pmnc |= PMU_ENABLE;
402 write_pmnc(pmnc);
403 pr_debug("xscale_pmu_start: pmnc: %#08x mask: %08x\n", pmnc, pmu->int_enable);
404 return 0;
407 static int xscale_detect_pmu(void)
409 int ret = 0;
410 u32 id;
412 id = (read_cpuid(CPUID_ID) >> 13) & 0x7;
414 switch (id) {
415 case 1:
416 pmu = &pmu_parms[PMU_XSC1];
417 break;
418 case 2:
419 pmu = &pmu_parms[PMU_XSC2];
420 break;
421 default:
422 ret = -ENODEV;
423 break;
426 if (!ret) {
427 op_xscale_spec.name = pmu->name;
428 op_xscale_spec.num_counters = pmu->num_counters;
429 pr_debug("xscale_detect_pmu: detected %s PMU\n", pmu->name);
432 return ret;
435 struct op_arm_model_spec op_xscale_spec = {
436 .init = xscale_detect_pmu,
437 .setup_ctrs = xscale_setup_ctrs,
438 .start = xscale_pmu_start,
439 .stop = xscale_pmu_stop,