2 * PMU emulation helpers for TCG IBM POWER chips
4 * Copyright IBM Corp. 2021
7 * Daniel Henrique Barboza <danielhb413@gmail.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
13 #include "qemu/osdep.h"
15 #include "helper_regs.h"
16 #include "exec/exec-all.h"
17 #include "exec/helper-proto.h"
18 #include "qemu/error-report.h"
19 #include "qemu/timer.h"
20 #include "hw/ppc/ppc.h"
21 #include "power8-pmu.h"
23 #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
25 static bool pmc_has_overflow_enabled(CPUPPCState
*env
, int sprn
)
27 if (sprn
== SPR_POWER_PMC1
) {
28 return env
->spr
[SPR_POWER_MMCR0
] & MMCR0_PMC1CE
;
31 return env
->spr
[SPR_POWER_MMCR0
] & MMCR0_PMCjCE
;
35 * Called after MMCR0 or MMCR1 changes to update pmc_ins_cnt and pmc_cyc_cnt.
36 * hflags must subsequently be updated.
38 static void pmu_update_summaries(CPUPPCState
*env
)
40 target_ulong mmcr0
= env
->spr
[SPR_POWER_MMCR0
];
41 target_ulong mmcr1
= env
->spr
[SPR_POWER_MMCR1
];
45 if (mmcr0
& MMCR0_FC
) {
49 if (!(mmcr0
& MMCR0_FC14
) && mmcr1
!= 0) {
52 sel
= extract64(mmcr1
, MMCR1_PMC1EVT_EXTR
, MMCR1_EVT_SIZE
);
64 sel
= extract64(mmcr1
, MMCR1_PMC2EVT_EXTR
, MMCR1_EVT_SIZE
);
65 ins_cnt
|= (sel
== 0x02) << 2;
66 cyc_cnt
|= (sel
== 0x1e) << 2;
68 sel
= extract64(mmcr1
, MMCR1_PMC3EVT_EXTR
, MMCR1_EVT_SIZE
);
69 ins_cnt
|= (sel
== 0x02) << 3;
70 cyc_cnt
|= (sel
== 0x1e) << 3;
72 sel
= extract64(mmcr1
, MMCR1_PMC4EVT_EXTR
, MMCR1_EVT_SIZE
);
73 ins_cnt
|= ((sel
== 0xfa) || (sel
== 0x2)) << 4;
74 cyc_cnt
|= (sel
== 0x1e) << 4;
77 ins_cnt
|= !(mmcr0
& MMCR0_FC56
) << 5;
78 cyc_cnt
|= !(mmcr0
& MMCR0_FC56
) << 6;
81 env
->pmc_ins_cnt
= ins_cnt
;
82 env
->pmc_cyc_cnt
= cyc_cnt
;
85 static void hreg_bhrb_filter_update(CPUPPCState
*env
)
89 if (!(env
->spr
[SPR_POWER_MMCR0
] & MMCR0_PMAE
)) {
90 /* disable recording to BHRB */
91 env
->bhrb_filter
= BHRB_TYPE_NORECORD
;
95 ifm
= (env
->spr
[SPR_POWER_MMCRA
] & MMCRA_IFM_MASK
) >> MMCRA_IFM_SHIFT
;
98 /* record all branches */
99 env
->bhrb_filter
= -1;
102 /* only record calls (LK = 1) */
103 env
->bhrb_filter
= BHRB_TYPE_CALL
;
106 /* only record indirect branches */
107 env
->bhrb_filter
= BHRB_TYPE_INDIRECT
;
110 /* only record conditional branches */
111 env
->bhrb_filter
= BHRB_TYPE_COND
;
116 void pmu_mmcr01a_updated(CPUPPCState
*env
)
118 PowerPCCPU
*cpu
= env_archcpu(env
);
120 pmu_update_summaries(env
);
121 hreg_update_pmu_hflags(env
);
123 if (env
->spr
[SPR_POWER_MMCR0
] & MMCR0_PMAO
) {
124 ppc_set_irq(cpu
, PPC_INTERRUPT_PERFM
, 1);
126 ppc_set_irq(cpu
, PPC_INTERRUPT_PERFM
, 0);
129 hreg_bhrb_filter_update(env
);
132 * Should this update overflow timers (if mmcr0 is updated) so they
133 * get set in cpu_post_load?
137 static bool pmu_increment_insns(CPUPPCState
*env
, uint32_t num_insns
)
139 target_ulong mmcr0
= env
->spr
[SPR_POWER_MMCR0
];
140 unsigned ins_cnt
= env
->pmc_ins_cnt
;
141 bool overflow_triggered
= false;
144 if (ins_cnt
& (1 << 1)) {
145 tmp
= env
->spr
[SPR_POWER_PMC1
];
147 if (tmp
>= PMC_COUNTER_NEGATIVE_VAL
&& (mmcr0
& MMCR0_PMC1CE
)) {
148 tmp
= PMC_COUNTER_NEGATIVE_VAL
;
149 overflow_triggered
= true;
151 env
->spr
[SPR_POWER_PMC1
] = tmp
;
154 if (ins_cnt
& (1 << 2)) {
155 tmp
= env
->spr
[SPR_POWER_PMC2
];
157 if (tmp
>= PMC_COUNTER_NEGATIVE_VAL
&& (mmcr0
& MMCR0_PMCjCE
)) {
158 tmp
= PMC_COUNTER_NEGATIVE_VAL
;
159 overflow_triggered
= true;
161 env
->spr
[SPR_POWER_PMC2
] = tmp
;
164 if (ins_cnt
& (1 << 3)) {
165 tmp
= env
->spr
[SPR_POWER_PMC3
];
167 if (tmp
>= PMC_COUNTER_NEGATIVE_VAL
&& (mmcr0
& MMCR0_PMCjCE
)) {
168 tmp
= PMC_COUNTER_NEGATIVE_VAL
;
169 overflow_triggered
= true;
171 env
->spr
[SPR_POWER_PMC3
] = tmp
;
174 if (ins_cnt
& (1 << 4)) {
175 target_ulong mmcr1
= env
->spr
[SPR_POWER_MMCR1
];
176 int sel
= extract64(mmcr1
, MMCR1_PMC4EVT_EXTR
, MMCR1_EVT_SIZE
);
177 if (sel
== 0x02 || (env
->spr
[SPR_CTRL
] & CTRL_RUN
)) {
178 tmp
= env
->spr
[SPR_POWER_PMC4
];
180 if (tmp
>= PMC_COUNTER_NEGATIVE_VAL
&& (mmcr0
& MMCR0_PMCjCE
)) {
181 tmp
= PMC_COUNTER_NEGATIVE_VAL
;
182 overflow_triggered
= true;
184 env
->spr
[SPR_POWER_PMC4
] = tmp
;
188 if (ins_cnt
& (1 << 5)) {
189 tmp
= env
->spr
[SPR_POWER_PMC5
];
191 if (tmp
>= PMC_COUNTER_NEGATIVE_VAL
&& (mmcr0
& MMCR0_PMCjCE
)) {
192 tmp
= PMC_COUNTER_NEGATIVE_VAL
;
193 overflow_triggered
= true;
195 env
->spr
[SPR_POWER_PMC5
] = tmp
;
198 return overflow_triggered
;
201 static void pmu_update_cycles(CPUPPCState
*env
)
203 uint64_t now
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
);
204 uint64_t time_delta
= now
- env
->pmu_base_time
;
205 int sprn
, cyc_cnt
= env
->pmc_cyc_cnt
;
207 for (sprn
= SPR_POWER_PMC1
; sprn
<= SPR_POWER_PMC6
; sprn
++) {
208 if (cyc_cnt
& (1 << (sprn
- SPR_POWER_PMC1
+ 1))) {
210 * The pseries and powernv clock runs at 1Ghz, meaning
211 * that 1 nanosec equals 1 cycle.
213 env
->spr
[sprn
] += time_delta
;
217 /* Update base_time for future calculations */
218 env
->pmu_base_time
= now
;
222 * Helper function to retrieve the cycle overflow timer of the
225 static QEMUTimer
*get_cyc_overflow_timer(CPUPPCState
*env
, int sprn
)
227 return env
->pmu_cyc_overflow_timers
[sprn
- SPR_POWER_PMC1
];
230 static void pmc_update_overflow_timer(CPUPPCState
*env
, int sprn
)
232 QEMUTimer
*pmc_overflow_timer
= get_cyc_overflow_timer(env
, sprn
);
236 * PMC5 does not have an overflow timer and this pointer
239 if (!pmc_overflow_timer
) {
243 if (!(env
->pmc_cyc_cnt
& (1 << (sprn
- SPR_POWER_PMC1
+ 1))) ||
244 !pmc_has_overflow_enabled(env
, sprn
)) {
245 /* Overflow timer is not needed for this counter */
246 timer_del(pmc_overflow_timer
);
250 if (env
->spr
[sprn
] >= PMC_COUNTER_NEGATIVE_VAL
) {
253 timeout
= PMC_COUNTER_NEGATIVE_VAL
- env
->spr
[sprn
];
257 * Use timer_mod_anticipate() because an overflow timer might
258 * be already running for this PMC.
260 timer_mod_anticipate(pmc_overflow_timer
, env
->pmu_base_time
+ timeout
);
263 static void pmu_update_overflow_timers(CPUPPCState
*env
)
268 * Scroll through all PMCs and start counter overflow timers for
269 * PM_CYC events, if needed.
271 for (sprn
= SPR_POWER_PMC1
; sprn
<= SPR_POWER_PMC6
; sprn
++) {
272 pmc_update_overflow_timer(env
, sprn
);
276 static void pmu_delete_timers(CPUPPCState
*env
)
278 QEMUTimer
*pmc_overflow_timer
;
281 for (sprn
= SPR_POWER_PMC1
; sprn
<= SPR_POWER_PMC6
; sprn
++) {
282 pmc_overflow_timer
= get_cyc_overflow_timer(env
, sprn
);
284 if (pmc_overflow_timer
) {
285 timer_del(pmc_overflow_timer
);
290 void helper_store_mmcr0(CPUPPCState
*env
, target_ulong value
)
292 pmu_update_cycles(env
);
294 env
->spr
[SPR_POWER_MMCR0
] = value
;
296 pmu_mmcr01a_updated(env
);
298 /* Update cycle overflow timers with the current MMCR0 state */
299 pmu_update_overflow_timers(env
);
302 void helper_store_mmcr1(CPUPPCState
*env
, uint64_t value
)
304 pmu_update_cycles(env
);
306 env
->spr
[SPR_POWER_MMCR1
] = value
;
308 pmu_mmcr01a_updated(env
);
311 void helper_store_mmcrA(CPUPPCState
*env
, uint64_t value
)
313 env
->spr
[SPR_POWER_MMCRA
] = value
;
315 pmu_mmcr01a_updated(env
);
318 target_ulong
helper_read_pmc(CPUPPCState
*env
, uint32_t sprn
)
320 pmu_update_cycles(env
);
322 return env
->spr
[sprn
];
325 void helper_store_pmc(CPUPPCState
*env
, uint32_t sprn
, uint64_t value
)
327 pmu_update_cycles(env
);
329 env
->spr
[sprn
] = (uint32_t)value
;
331 pmc_update_overflow_timer(env
, sprn
);
334 static void perfm_alert(PowerPCCPU
*cpu
)
336 CPUPPCState
*env
= &cpu
->env
;
338 pmu_update_cycles(env
);
340 if (env
->spr
[SPR_POWER_MMCR0
] & MMCR0_FCECE
) {
341 env
->spr
[SPR_POWER_MMCR0
] |= MMCR0_FC
;
343 /* Changing MMCR0_FC requires summaries and hflags update */
344 pmu_mmcr01a_updated(env
);
347 * Delete all pending timers if we need to freeze
348 * the PMC. We'll restart them when the PMC starts
351 pmu_delete_timers(env
);
354 if (env
->spr
[SPR_POWER_MMCR0
] & MMCR0_PMAE
) {
355 /* These MMCR0 bits do not require summaries or hflags update. */
356 env
->spr
[SPR_POWER_MMCR0
] &= ~MMCR0_PMAE
;
357 env
->spr
[SPR_POWER_MMCR0
] |= MMCR0_PMAO
;
358 ppc_set_irq(cpu
, PPC_INTERRUPT_PERFM
, 1);
361 raise_ebb_perfm_exception(env
);
364 void helper_handle_pmc5_overflow(CPUPPCState
*env
)
366 env
->spr
[SPR_POWER_PMC5
] = PMC_COUNTER_NEGATIVE_VAL
;
367 perfm_alert(env_archcpu(env
));
370 /* This helper assumes that the PMC is running. */
371 void helper_insns_inc(CPUPPCState
*env
, uint32_t num_insns
)
373 bool overflow_triggered
;
375 overflow_triggered
= pmu_increment_insns(env
, num_insns
);
376 if (overflow_triggered
) {
377 perfm_alert(env_archcpu(env
));
381 static void cpu_ppc_pmu_timer_cb(void *opaque
)
383 PowerPCCPU
*cpu
= opaque
;
388 void cpu_ppc_pmu_init(CPUPPCState
*env
)
390 PowerPCCPU
*cpu
= env_archcpu(env
);
393 for (sprn
= SPR_POWER_PMC1
; sprn
<= SPR_POWER_PMC6
; sprn
++) {
394 if (sprn
== SPR_POWER_PMC5
) {
398 i
= sprn
- SPR_POWER_PMC1
;
400 env
->pmu_cyc_overflow_timers
[i
] = timer_new_ns(QEMU_CLOCK_VIRTUAL
,
401 &cpu_ppc_pmu_timer_cb
,
405 #endif /* defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) */