2 * PPC 64 oprofile support:
3 * Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
4 * PPC 32 oprofile support: (based on PPC 64 support)
5 * Copyright (C) Freescale Semiconductor, Inc 2004
8 * Based on alpha version.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
16 #include <linux/oprofile.h>
17 #include <linux/init.h>
18 #include <linux/smp.h>
19 #include <linux/errno.h>
20 #include <asm/ptrace.h>
21 #include <asm/system.h>
23 #include <asm/cputable.h>
24 #include <asm/oprofile_impl.h>
25 #include <asm/firmware.h>
27 static struct op_powerpc_model
*model
;
29 static struct op_counter_config ctr
[OP_MAX_COUNTER
];
30 static struct op_system_config sys
;
32 static int op_per_cpu_rc
;
34 static void op_handle_interrupt(struct pt_regs
*regs
)
36 model
->handle_interrupt(regs
, ctr
);
39 static void op_powerpc_cpu_setup(void *dummy
)
43 ret
= model
->cpu_setup(ctr
);
49 static int op_powerpc_setup(void)
55 /* Grab the hardware */
56 err
= reserve_pmc_hardware(op_handle_interrupt
);
60 /* Pre-compute the values to stuff in the hardware registers. */
61 op_per_cpu_rc
= model
->reg_setup(ctr
, &sys
, model
->num_counters
);
66 /* Configure the registers on all cpus. If an error occurs on one
67 * of the cpus, op_per_cpu_rc will be set to the error */
68 on_each_cpu(op_powerpc_cpu_setup
, NULL
, 1);
70 out
: if (op_per_cpu_rc
) {
71 /* error on setup release the performance counter hardware */
72 release_pmc_hardware();
78 static void op_powerpc_shutdown(void)
80 release_pmc_hardware();
83 static void op_powerpc_cpu_start(void *dummy
)
85 /* If any of the cpus have return an error, set the
86 * global flag to the error so it can be returned
87 * to the generic OProfile caller.
91 ret
= model
->start(ctr
);
96 static int op_powerpc_start(void)
100 if (model
->global_start
)
101 return model
->global_start(ctr
);
103 on_each_cpu(op_powerpc_cpu_start
, NULL
, 1);
104 return op_per_cpu_rc
;
106 return -EIO
; /* No start function is defined for this
107 power architecture */
110 static inline void op_powerpc_cpu_stop(void *dummy
)
115 static void op_powerpc_stop(void)
118 on_each_cpu(op_powerpc_cpu_stop
, NULL
, 1);
119 if (model
->global_stop
)
120 model
->global_stop();
123 static int op_powerpc_create_files(struct super_block
*sb
, struct dentry
*root
)
129 * There is one mmcr0, mmcr1 and mmcra for setting the events for
130 * all of the counters.
132 oprofilefs_create_ulong(sb
, root
, "mmcr0", &sys
.mmcr0
);
133 oprofilefs_create_ulong(sb
, root
, "mmcr1", &sys
.mmcr1
);
134 oprofilefs_create_ulong(sb
, root
, "mmcra", &sys
.mmcra
);
135 #ifdef CONFIG_OPROFILE_CELL
136 /* create a file the user tool can check to see what level of profiling
137 * support exits with this kernel. Initialize bit mask to indicate
138 * what support the kernel has:
139 * bit 0 - Supports SPU event profiling in addition to PPU
140 * event and cycles; and SPU cycle profiling
141 * bits 1-31 - Currently unused.
143 * If the file does not exist, then the kernel only supports SPU
144 * cycle profiling, PPU event and cycle profiling.
146 oprofilefs_create_ulong(sb
, root
, "cell_support", &sys
.cell_support
);
147 sys
.cell_support
= 0x1; /* Note, the user OProfile tool must check
148 * that this bit is set before attempting to
149 * user SPU event profiling. Older kernels
150 * will not have this file, hence the user
151 * tool is not allowed to do SPU event
152 * profiling on older kernels. Older kernels
153 * will accept SPU events but collected data
159 for (i
= 0; i
< model
->num_counters
; ++i
) {
163 snprintf(buf
, sizeof buf
, "%d", i
);
164 dir
= oprofilefs_mkdir(sb
, root
, buf
);
166 oprofilefs_create_ulong(sb
, dir
, "enabled", &ctr
[i
].enabled
);
167 oprofilefs_create_ulong(sb
, dir
, "event", &ctr
[i
].event
);
168 oprofilefs_create_ulong(sb
, dir
, "count", &ctr
[i
].count
);
171 * Classic PowerPC doesn't support per-counter
172 * control like this, but the options are
173 * expected, so they remain. For Freescale
174 * Book-E style performance monitors, we do
177 oprofilefs_create_ulong(sb
, dir
, "kernel", &ctr
[i
].kernel
);
178 oprofilefs_create_ulong(sb
, dir
, "user", &ctr
[i
].user
);
180 oprofilefs_create_ulong(sb
, dir
, "unit_mask", &ctr
[i
].unit_mask
);
183 oprofilefs_create_ulong(sb
, root
, "enable_kernel", &sys
.enable_kernel
);
184 oprofilefs_create_ulong(sb
, root
, "enable_user", &sys
.enable_user
);
186 /* Default to tracing both kernel and user */
187 sys
.enable_kernel
= 1;
193 int __init
oprofile_arch_init(struct oprofile_operations
*ops
)
195 if (!cur_cpu_spec
->oprofile_cpu_type
)
198 if (firmware_has_feature(FW_FEATURE_ISERIES
))
201 switch (cur_cpu_spec
->oprofile_type
) {
203 #ifdef CONFIG_OPROFILE_CELL
204 case PPC_OPROFILE_CELL
:
205 if (firmware_has_feature(FW_FEATURE_LPAR
))
207 model
= &op_model_cell
;
208 ops
->sync_start
= model
->sync_start
;
209 ops
->sync_stop
= model
->sync_stop
;
212 case PPC_OPROFILE_RS64
:
213 model
= &op_model_rs64
;
215 case PPC_OPROFILE_POWER4
:
216 model
= &op_model_power4
;
218 case PPC_OPROFILE_PA6T
:
219 model
= &op_model_pa6t
;
223 case PPC_OPROFILE_G4
:
224 model
= &op_model_7450
;
227 #if defined(CONFIG_FSL_EMB_PERFMON)
228 case PPC_OPROFILE_FSL_EMB
:
229 model
= &op_model_fsl_emb
;
236 model
->num_counters
= cur_cpu_spec
->num_pmcs
;
238 ops
->cpu_type
= cur_cpu_spec
->oprofile_cpu_type
;
239 ops
->create_files
= op_powerpc_create_files
;
240 ops
->setup
= op_powerpc_setup
;
241 ops
->shutdown
= op_powerpc_shutdown
;
242 ops
->start
= op_powerpc_start
;
243 ops
->stop
= op_powerpc_stop
;
244 ops
->backtrace
= op_powerpc_backtrace
;
246 printk(KERN_DEBUG
"oprofile: using %s performance monitoring.\n",
252 void oprofile_arch_exit(void)