2 * PMU emulation helpers for TCG IBM POWER chips
4 * Copyright IBM Corp. 2021
7 * Daniel Henrique Barboza <danielhb413@gmail.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
13 #include "qemu/osdep.h"
15 #include "helper_regs.h"
16 #include "exec/exec-all.h"
17 #include "exec/helper-proto.h"
18 #include "qemu/error-report.h"
19 #include "qemu/main-loop.h"
20 #include "hw/ppc/ppc.h"
21 #include "power8-pmu.h"
23 #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
25 static bool pmc_has_overflow_enabled(CPUPPCState
*env
, int sprn
)
27 if (sprn
== SPR_POWER_PMC1
) {
28 return env
->spr
[SPR_POWER_MMCR0
] & MMCR0_PMC1CE
;
31 return env
->spr
[SPR_POWER_MMCR0
] & MMCR0_PMCjCE
;
34 void pmu_update_summaries(CPUPPCState
*env
)
36 target_ulong mmcr0
= env
->spr
[SPR_POWER_MMCR0
];
37 target_ulong mmcr1
= env
->spr
[SPR_POWER_MMCR1
];
41 if (mmcr0
& MMCR0_FC
) {
45 if (!(mmcr0
& MMCR0_FC14
) && mmcr1
!= 0) {
48 sel
= extract64(mmcr1
, MMCR1_PMC1EVT_EXTR
, MMCR1_EVT_SIZE
);
60 sel
= extract64(mmcr1
, MMCR1_PMC2EVT_EXTR
, MMCR1_EVT_SIZE
);
61 ins_cnt
|= (sel
== 0x02) << 2;
62 cyc_cnt
|= (sel
== 0x1e) << 2;
64 sel
= extract64(mmcr1
, MMCR1_PMC3EVT_EXTR
, MMCR1_EVT_SIZE
);
65 ins_cnt
|= (sel
== 0x02) << 3;
66 cyc_cnt
|= (sel
== 0x1e) << 3;
68 sel
= extract64(mmcr1
, MMCR1_PMC4EVT_EXTR
, MMCR1_EVT_SIZE
);
69 ins_cnt
|= ((sel
== 0xfa) || (sel
== 0x2)) << 4;
70 cyc_cnt
|= (sel
== 0x1e) << 4;
73 ins_cnt
|= !(mmcr0
& MMCR0_FC56
) << 5;
74 cyc_cnt
|= !(mmcr0
& MMCR0_FC56
) << 6;
77 env
->pmc_ins_cnt
= ins_cnt
;
78 env
->pmc_cyc_cnt
= cyc_cnt
;
79 env
->hflags
= deposit32(env
->hflags
, HFLAGS_INSN_CNT
, 1, ins_cnt
!= 0);
82 static bool pmu_increment_insns(CPUPPCState
*env
, uint32_t num_insns
)
84 target_ulong mmcr0
= env
->spr
[SPR_POWER_MMCR0
];
85 unsigned ins_cnt
= env
->pmc_ins_cnt
;
86 bool overflow_triggered
= false;
89 if (ins_cnt
& (1 << 1)) {
90 tmp
= env
->spr
[SPR_POWER_PMC1
];
92 if (tmp
>= PMC_COUNTER_NEGATIVE_VAL
&& (mmcr0
& MMCR0_PMC1CE
)) {
93 tmp
= PMC_COUNTER_NEGATIVE_VAL
;
94 overflow_triggered
= true;
96 env
->spr
[SPR_POWER_PMC1
] = tmp
;
99 if (ins_cnt
& (1 << 2)) {
100 tmp
= env
->spr
[SPR_POWER_PMC2
];
102 if (tmp
>= PMC_COUNTER_NEGATIVE_VAL
&& (mmcr0
& MMCR0_PMCjCE
)) {
103 tmp
= PMC_COUNTER_NEGATIVE_VAL
;
104 overflow_triggered
= true;
106 env
->spr
[SPR_POWER_PMC2
] = tmp
;
109 if (ins_cnt
& (1 << 3)) {
110 tmp
= env
->spr
[SPR_POWER_PMC3
];
112 if (tmp
>= PMC_COUNTER_NEGATIVE_VAL
&& (mmcr0
& MMCR0_PMCjCE
)) {
113 tmp
= PMC_COUNTER_NEGATIVE_VAL
;
114 overflow_triggered
= true;
116 env
->spr
[SPR_POWER_PMC3
] = tmp
;
119 if (ins_cnt
& (1 << 4)) {
120 target_ulong mmcr1
= env
->spr
[SPR_POWER_MMCR1
];
121 int sel
= extract64(mmcr1
, MMCR1_PMC4EVT_EXTR
, MMCR1_EVT_SIZE
);
122 if (sel
== 0x02 || (env
->spr
[SPR_CTRL
] & CTRL_RUN
)) {
123 tmp
= env
->spr
[SPR_POWER_PMC4
];
125 if (tmp
>= PMC_COUNTER_NEGATIVE_VAL
&& (mmcr0
& MMCR0_PMCjCE
)) {
126 tmp
= PMC_COUNTER_NEGATIVE_VAL
;
127 overflow_triggered
= true;
129 env
->spr
[SPR_POWER_PMC4
] = tmp
;
133 if (ins_cnt
& (1 << 5)) {
134 tmp
= env
->spr
[SPR_POWER_PMC5
];
136 if (tmp
>= PMC_COUNTER_NEGATIVE_VAL
&& (mmcr0
& MMCR0_PMCjCE
)) {
137 tmp
= PMC_COUNTER_NEGATIVE_VAL
;
138 overflow_triggered
= true;
140 env
->spr
[SPR_POWER_PMC5
] = tmp
;
143 return overflow_triggered
;
146 static void pmu_update_cycles(CPUPPCState
*env
)
148 uint64_t now
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
);
149 uint64_t time_delta
= now
- env
->pmu_base_time
;
150 int sprn
, cyc_cnt
= env
->pmc_cyc_cnt
;
152 for (sprn
= SPR_POWER_PMC1
; sprn
<= SPR_POWER_PMC6
; sprn
++) {
153 if (cyc_cnt
& (1 << (sprn
- SPR_POWER_PMC1
+ 1))) {
155 * The pseries and powernv clock runs at 1Ghz, meaning
156 * that 1 nanosec equals 1 cycle.
158 env
->spr
[sprn
] += time_delta
;
162 /* Update base_time for future calculations */
163 env
->pmu_base_time
= now
;
167 * Helper function to retrieve the cycle overflow timer of the
170 static QEMUTimer
*get_cyc_overflow_timer(CPUPPCState
*env
, int sprn
)
172 return env
->pmu_cyc_overflow_timers
[sprn
- SPR_POWER_PMC1
];
175 static void pmc_update_overflow_timer(CPUPPCState
*env
, int sprn
)
177 QEMUTimer
*pmc_overflow_timer
= get_cyc_overflow_timer(env
, sprn
);
181 * PMC5 does not have an overflow timer and this pointer
184 if (!pmc_overflow_timer
) {
188 if (!(env
->pmc_cyc_cnt
& (1 << (sprn
- SPR_POWER_PMC1
+ 1))) ||
189 !pmc_has_overflow_enabled(env
, sprn
)) {
190 /* Overflow timer is not needed for this counter */
191 timer_del(pmc_overflow_timer
);
195 if (env
->spr
[sprn
] >= PMC_COUNTER_NEGATIVE_VAL
) {
198 timeout
= PMC_COUNTER_NEGATIVE_VAL
- env
->spr
[sprn
];
202 * Use timer_mod_anticipate() because an overflow timer might
203 * be already running for this PMC.
205 timer_mod_anticipate(pmc_overflow_timer
, env
->pmu_base_time
+ timeout
);
208 static void pmu_update_overflow_timers(CPUPPCState
*env
)
213 * Scroll through all PMCs and start counter overflow timers for
214 * PM_CYC events, if needed.
216 for (sprn
= SPR_POWER_PMC1
; sprn
<= SPR_POWER_PMC6
; sprn
++) {
217 pmc_update_overflow_timer(env
, sprn
);
221 static void pmu_delete_timers(CPUPPCState
*env
)
223 QEMUTimer
*pmc_overflow_timer
;
226 for (sprn
= SPR_POWER_PMC1
; sprn
<= SPR_POWER_PMC6
; sprn
++) {
227 pmc_overflow_timer
= get_cyc_overflow_timer(env
, sprn
);
229 if (pmc_overflow_timer
) {
230 timer_del(pmc_overflow_timer
);
235 void helper_store_mmcr0(CPUPPCState
*env
, target_ulong value
)
237 bool hflags_pmcc0
= (value
& MMCR0_PMCC0
) != 0;
238 bool hflags_pmcc1
= (value
& MMCR0_PMCC1
) != 0;
240 pmu_update_cycles(env
);
242 env
->spr
[SPR_POWER_MMCR0
] = value
;
244 /* MMCR0 writes can change HFLAGS_PMCC[01] and HFLAGS_INSN_CNT */
245 env
->hflags
= deposit32(env
->hflags
, HFLAGS_PMCC0
, 1, hflags_pmcc0
);
246 env
->hflags
= deposit32(env
->hflags
, HFLAGS_PMCC1
, 1, hflags_pmcc1
);
248 pmu_update_summaries(env
);
250 /* Update cycle overflow timers with the current MMCR0 state */
251 pmu_update_overflow_timers(env
);
254 void helper_store_mmcr1(CPUPPCState
*env
, uint64_t value
)
256 pmu_update_cycles(env
);
258 env
->spr
[SPR_POWER_MMCR1
] = value
;
260 /* MMCR1 writes can change HFLAGS_INSN_CNT */
261 pmu_update_summaries(env
);
264 target_ulong
helper_read_pmc(CPUPPCState
*env
, uint32_t sprn
)
266 pmu_update_cycles(env
);
268 return env
->spr
[sprn
];
271 void helper_store_pmc(CPUPPCState
*env
, uint32_t sprn
, uint64_t value
)
273 pmu_update_cycles(env
);
275 env
->spr
[sprn
] = value
;
277 pmc_update_overflow_timer(env
, sprn
);
280 static void fire_PMC_interrupt(PowerPCCPU
*cpu
)
282 CPUPPCState
*env
= &cpu
->env
;
284 pmu_update_cycles(env
);
286 if (env
->spr
[SPR_POWER_MMCR0
] & MMCR0_FCECE
) {
287 env
->spr
[SPR_POWER_MMCR0
] &= ~MMCR0_FCECE
;
288 env
->spr
[SPR_POWER_MMCR0
] |= MMCR0_FC
;
290 /* Changing MMCR0_FC requires a new HFLAGS_INSN_CNT calc */
291 pmu_update_summaries(env
);
294 * Delete all pending timers if we need to freeze
295 * the PMC. We'll restart them when the PMC starts
298 pmu_delete_timers(env
);
301 if (env
->spr
[SPR_POWER_MMCR0
] & MMCR0_PMAE
) {
302 env
->spr
[SPR_POWER_MMCR0
] &= ~MMCR0_PMAE
;
303 env
->spr
[SPR_POWER_MMCR0
] |= MMCR0_PMAO
;
306 raise_ebb_perfm_exception(env
);
309 void helper_handle_pmc5_overflow(CPUPPCState
*env
)
311 env
->spr
[SPR_POWER_PMC5
] = PMC_COUNTER_NEGATIVE_VAL
;
312 fire_PMC_interrupt(env_archcpu(env
));
315 /* This helper assumes that the PMC is running. */
316 void helper_insns_inc(CPUPPCState
*env
, uint32_t num_insns
)
318 bool overflow_triggered
;
321 overflow_triggered
= pmu_increment_insns(env
, num_insns
);
323 if (overflow_triggered
) {
324 cpu
= env_archcpu(env
);
325 fire_PMC_interrupt(cpu
);
329 static void cpu_ppc_pmu_timer_cb(void *opaque
)
331 PowerPCCPU
*cpu
= opaque
;
333 fire_PMC_interrupt(cpu
);
336 void cpu_ppc_pmu_init(CPUPPCState
*env
)
338 PowerPCCPU
*cpu
= env_archcpu(env
);
341 for (sprn
= SPR_POWER_PMC1
; sprn
<= SPR_POWER_PMC6
; sprn
++) {
342 if (sprn
== SPR_POWER_PMC5
) {
346 i
= sprn
- SPR_POWER_PMC1
;
348 env
->pmu_cyc_overflow_timers
[i
] = timer_new_ns(QEMU_CLOCK_VIRTUAL
,
349 &cpu_ppc_pmu_timer_cb
,
353 #endif /* defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) */