2 * arch/powerpc/oprofile/op_model_fsl_booke.c
4 * Freescale Book-E oprofile support, based on ppc64 oprofile support
5 * Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
7 * Copyright (c) 2004 Freescale Semiconductor, Inc
10 * Maintainer: Kumar Gala <galak@kernel.crashing.org>
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
18 #include <linux/oprofile.h>
19 #include <linux/init.h>
20 #include <linux/smp.h>
21 #include <asm/ptrace.h>
22 #include <asm/system.h>
23 #include <asm/processor.h>
24 #include <asm/cputable.h>
25 #include <asm/reg_booke.h>
28 #include <asm/oprofile_impl.h>
30 static unsigned long reset_value
[OP_MAX_COUNTER
];
32 static int num_counters
;
33 static int oprofile_running
;
35 static inline u32
get_pmlca(int ctr
)
41 pmlca
= mfpmr(PMRN_PMLCA0
);
44 pmlca
= mfpmr(PMRN_PMLCA1
);
47 pmlca
= mfpmr(PMRN_PMLCA2
);
50 pmlca
= mfpmr(PMRN_PMLCA3
);
53 panic("Bad ctr number\n");
59 static inline void set_pmlca(int ctr
, u32 pmlca
)
63 mtpmr(PMRN_PMLCA0
, pmlca
);
66 mtpmr(PMRN_PMLCA1
, pmlca
);
69 mtpmr(PMRN_PMLCA2
, pmlca
);
72 mtpmr(PMRN_PMLCA3
, pmlca
);
75 panic("Bad ctr number\n");
79 static inline unsigned int ctr_read(unsigned int i
)
83 return mfpmr(PMRN_PMC0
);
85 return mfpmr(PMRN_PMC1
);
87 return mfpmr(PMRN_PMC2
);
89 return mfpmr(PMRN_PMC3
);
95 static inline void ctr_write(unsigned int i
, unsigned int val
)
99 mtpmr(PMRN_PMC0
, val
);
102 mtpmr(PMRN_PMC1
, val
);
105 mtpmr(PMRN_PMC2
, val
);
108 mtpmr(PMRN_PMC3
, val
);
116 static void init_pmc_stop(int ctr
)
118 u32 pmlca
= (PMLCA_FC
| PMLCA_FCS
| PMLCA_FCU
|
119 PMLCA_FCM1
| PMLCA_FCM0
);
124 mtpmr(PMRN_PMLCA0
, pmlca
);
125 mtpmr(PMRN_PMLCB0
, pmlcb
);
128 mtpmr(PMRN_PMLCA1
, pmlca
);
129 mtpmr(PMRN_PMLCB1
, pmlcb
);
132 mtpmr(PMRN_PMLCA2
, pmlca
);
133 mtpmr(PMRN_PMLCB2
, pmlcb
);
136 mtpmr(PMRN_PMLCA3
, pmlca
);
137 mtpmr(PMRN_PMLCB3
, pmlcb
);
140 panic("Bad ctr number!\n");
144 static void set_pmc_event(int ctr
, int event
)
148 pmlca
= get_pmlca(ctr
);
150 pmlca
= (pmlca
& ~PMLCA_EVENT_MASK
) |
151 ((event
<< PMLCA_EVENT_SHIFT
) &
154 set_pmlca(ctr
, pmlca
);
157 static void set_pmc_user_kernel(int ctr
, int user
, int kernel
)
161 pmlca
= get_pmlca(ctr
);
173 set_pmlca(ctr
, pmlca
);
176 static void set_pmc_marked(int ctr
, int mark0
, int mark1
)
178 u32 pmlca
= get_pmlca(ctr
);
181 pmlca
&= ~PMLCA_FCM0
;
186 pmlca
&= ~PMLCA_FCM1
;
190 set_pmlca(ctr
, pmlca
);
193 static void pmc_start_ctr(int ctr
, int enable
)
195 u32 pmlca
= get_pmlca(ctr
);
204 set_pmlca(ctr
, pmlca
);
207 static void pmc_start_ctrs(int enable
)
209 u32 pmgc0
= mfpmr(PMRN_PMGC0
);
212 pmgc0
|= PMGC0_FCECE
;
217 pmgc0
&= ~PMGC0_PMIE
;
219 mtpmr(PMRN_PMGC0
, pmgc0
);
222 static void pmc_stop_ctrs(void)
224 u32 pmgc0
= mfpmr(PMRN_PMGC0
);
228 pmgc0
&= ~(PMGC0_PMIE
| PMGC0_FCECE
);
230 mtpmr(PMRN_PMGC0
, pmgc0
);
233 static void dump_pmcs(void)
235 printk("pmgc0: %x\n", mfpmr(PMRN_PMGC0
));
236 printk("pmc\t\tpmlca\t\tpmlcb\n");
237 printk("%8x\t%8x\t%8x\n", mfpmr(PMRN_PMC0
),
238 mfpmr(PMRN_PMLCA0
), mfpmr(PMRN_PMLCB0
));
239 printk("%8x\t%8x\t%8x\n", mfpmr(PMRN_PMC1
),
240 mfpmr(PMRN_PMLCA1
), mfpmr(PMRN_PMLCB1
));
241 printk("%8x\t%8x\t%8x\n", mfpmr(PMRN_PMC2
),
242 mfpmr(PMRN_PMLCA2
), mfpmr(PMRN_PMLCB2
));
243 printk("%8x\t%8x\t%8x\n", mfpmr(PMRN_PMC3
),
244 mfpmr(PMRN_PMLCA3
), mfpmr(PMRN_PMLCB3
));
247 static int fsl_booke_cpu_setup(struct op_counter_config
*ctr
)
251 /* freeze all counters */
254 for (i
= 0;i
< num_counters
;i
++) {
257 set_pmc_event(i
, ctr
[i
].event
);
259 set_pmc_user_kernel(i
, ctr
[i
].user
, ctr
[i
].kernel
);
265 static int fsl_booke_reg_setup(struct op_counter_config
*ctr
,
266 struct op_system_config
*sys
,
271 num_counters
= num_ctrs
;
273 /* Our counters count up, and "count" refers to
274 * how much before the next interrupt, and we interrupt
275 * on overflow. So we calculate the starting value
276 * which will give us "count" until overflow.
277 * Then we set the events on the enabled counters */
278 for (i
= 0; i
< num_counters
; ++i
)
279 reset_value
[i
] = 0x80000000UL
- ctr
[i
].count
;
284 static int fsl_booke_start(struct op_counter_config
*ctr
)
288 mtmsr(mfmsr() | MSR_PMM
);
290 for (i
= 0; i
< num_counters
; ++i
) {
291 if (ctr
[i
].enabled
) {
292 ctr_write(i
, reset_value
[i
]);
293 /* Set each enabled counter to only
294 * count when the Mark bit is *not* set */
295 set_pmc_marked(i
, 1, 0);
300 /* Set the ctr to be stopped */
305 /* Clear the freeze bit, and enable the interrupt.
306 * The counters won't actually start until the rfi clears
310 oprofile_running
= 1;
312 pr_debug("start on cpu %d, pmgc0 %x\n", smp_processor_id(),
318 static void fsl_booke_stop(void)
320 /* freeze counters */
323 oprofile_running
= 0;
325 pr_debug("stop on cpu %d, pmgc0 %x\n", smp_processor_id(),
332 static void fsl_booke_handle_interrupt(struct pt_regs
*regs
,
333 struct op_counter_config
*ctr
)
340 /* set the PMM bit (see comment below) */
341 mtmsr(mfmsr() | MSR_PMM
);
344 is_kernel
= is_kernel_addr(pc
);
346 for (i
= 0; i
< num_counters
; ++i
) {
349 if (oprofile_running
&& ctr
[i
].enabled
) {
350 oprofile_add_ext_sample(pc
, regs
, i
, is_kernel
);
351 ctr_write(i
, reset_value
[i
]);
358 /* The freeze bit was set by the interrupt. */
359 /* Clear the freeze bit, and reenable the interrupt.
360 * The counters won't actually start until the rfi clears
365 struct op_powerpc_model op_model_fsl_booke
= {
366 .reg_setup
= fsl_booke_reg_setup
,
367 .cpu_setup
= fsl_booke_cpu_setup
,
368 .start
= fsl_booke_start
,
369 .stop
= fsl_booke_stop
,
370 .handle_interrupt
= fsl_booke_handle_interrupt
,