4 * This code is licensed under the GNU GPL v2 or later.
6 * SPDX-License-Identifier: GPL-2.0-or-later
8 #include "qemu/osdep.h"
10 #include "internals.h"
11 #include "cpu-features.h"
12 #include "exec/helper-proto.h"
15 static inline bool fgt_svc(CPUARMState
*env
, int el
)
18 * Assuming fine-grained-traps are active, return true if we
19 * should be trapping on SVC instructions. Only AArch64 can
20 * trap on an SVC at EL1, but we don't need to special-case this
21 * because if this is AArch32 EL1 then arm_fgt_active() is false.
22 * We also know el is 0 or 1.
25 FIELD_EX64(env
->cp15
.fgt_exec
[FGTREG_HFGITR
], HFGITR_EL2
, SVC_EL0
) :
26 FIELD_EX64(env
->cp15
.fgt_exec
[FGTREG_HFGITR
], HFGITR_EL2
, SVC_EL1
);
29 static CPUARMTBFlags
rebuild_hflags_common(CPUARMState
*env
, int fp_el
,
33 DP_TBFLAG_ANY(flags
, FPEXC_EL
, fp_el
);
34 DP_TBFLAG_ANY(flags
, MMUIDX
, arm_to_core_mmu_idx(mmu_idx
));
36 if (arm_singlestep_active(env
)) {
37 DP_TBFLAG_ANY(flags
, SS_ACTIVE
, 1);
43 static CPUARMTBFlags
rebuild_hflags_common_32(CPUARMState
*env
, int fp_el
,
47 bool sctlr_b
= arm_sctlr_b(env
);
50 DP_TBFLAG_A32(flags
, SCTLR__B
, 1);
52 if (arm_cpu_data_is_big_endian_a32(env
, sctlr_b
)) {
53 DP_TBFLAG_ANY(flags
, BE_DATA
, 1);
55 DP_TBFLAG_A32(flags
, NS
, !access_secure_reg(env
));
57 return rebuild_hflags_common(env
, fp_el
, mmu_idx
, flags
);
60 static CPUARMTBFlags
rebuild_hflags_m32(CPUARMState
*env
, int fp_el
,
63 CPUARMTBFlags flags
= {};
64 uint32_t ccr
= env
->v7m
.ccr
[env
->v7m
.secure
];
66 /* Without HaveMainExt, CCR.UNALIGN_TRP is RES1. */
67 if (ccr
& R_V7M_CCR_UNALIGN_TRP_MASK
) {
68 DP_TBFLAG_ANY(flags
, ALIGN_MEM
, 1);
71 if (arm_v7m_is_handler_mode(env
)) {
72 DP_TBFLAG_M32(flags
, HANDLER
, 1);
76 * v8M always applies stack limit checks unless CCR.STKOFHFNMIGN
77 * is suppressing them because the requested execution priority
80 if (arm_feature(env
, ARM_FEATURE_V8
) &&
81 !((mmu_idx
& ARM_MMU_IDX_M_NEGPRI
) &&
82 (ccr
& R_V7M_CCR_STKOFHFNMIGN_MASK
))) {
83 DP_TBFLAG_M32(flags
, STACKCHECK
, 1);
86 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
) && env
->v7m
.secure
) {
87 DP_TBFLAG_M32(flags
, SECURE
, 1);
90 return rebuild_hflags_common_32(env
, fp_el
, mmu_idx
, flags
);
93 /* This corresponds to the ARM pseudocode function IsFullA64Enabled(). */
94 static bool sme_fa64(CPUARMState
*env
, int el
)
96 if (!cpu_isar_feature(aa64_sme_fa64
, env_archcpu(env
))) {
100 if (el
<= 1 && !el_is_in_host(env
, el
)) {
101 if (!FIELD_EX64(env
->vfp
.smcr_el
[1], SMCR
, FA64
)) {
105 if (el
<= 2 && arm_is_el2_enabled(env
)) {
106 if (!FIELD_EX64(env
->vfp
.smcr_el
[2], SMCR
, FA64
)) {
110 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
111 if (!FIELD_EX64(env
->vfp
.smcr_el
[3], SMCR
, FA64
)) {
119 static CPUARMTBFlags
rebuild_hflags_a32(CPUARMState
*env
, int fp_el
,
122 CPUARMTBFlags flags
= {};
123 int el
= arm_current_el(env
);
125 if (arm_sctlr(env
, el
) & SCTLR_A
) {
126 DP_TBFLAG_ANY(flags
, ALIGN_MEM
, 1);
129 if (arm_el_is_aa64(env
, 1)) {
130 DP_TBFLAG_A32(flags
, VFPEN
, 1);
133 if (el
< 2 && env
->cp15
.hstr_el2
&& arm_is_el2_enabled(env
) &&
134 (arm_hcr_el2_eff(env
) & (HCR_E2H
| HCR_TGE
)) != (HCR_E2H
| HCR_TGE
)) {
135 DP_TBFLAG_A32(flags
, HSTR_ACTIVE
, 1);
138 if (arm_fgt_active(env
, el
)) {
139 DP_TBFLAG_ANY(flags
, FGT_ACTIVE
, 1);
140 if (fgt_svc(env
, el
)) {
141 DP_TBFLAG_ANY(flags
, FGT_SVC
, 1);
145 if (env
->uncached_cpsr
& CPSR_IL
) {
146 DP_TBFLAG_ANY(flags
, PSTATE__IL
, 1);
150 * The SME exception we are testing for is raised via
151 * AArch64.CheckFPAdvSIMDEnabled(), as called from
152 * AArch32.CheckAdvSIMDOrFPEnabled().
155 && FIELD_EX64(env
->svcr
, SVCR
, SM
)
156 && (!arm_is_el2_enabled(env
)
157 || (arm_el_is_aa64(env
, 2) && !(env
->cp15
.hcr_el2
& HCR_TGE
)))
158 && arm_el_is_aa64(env
, 1)
159 && !sme_fa64(env
, el
)) {
160 DP_TBFLAG_A32(flags
, SME_TRAP_NONSTREAMING
, 1);
163 return rebuild_hflags_common_32(env
, fp_el
, mmu_idx
, flags
);
166 static CPUARMTBFlags
rebuild_hflags_a64(CPUARMState
*env
, int el
, int fp_el
,
169 CPUARMTBFlags flags
= {};
170 ARMMMUIdx stage1
= stage_1_mmu_idx(mmu_idx
);
171 uint64_t tcr
= regime_tcr(env
, mmu_idx
);
175 DP_TBFLAG_ANY(flags
, AARCH64_STATE
, 1);
177 /* Get control bits for tagged addresses. */
178 tbid
= aa64_va_parameter_tbi(tcr
, mmu_idx
);
179 tbii
= tbid
& ~aa64_va_parameter_tbid(tcr
, mmu_idx
);
181 DP_TBFLAG_A64(flags
, TBII
, tbii
);
182 DP_TBFLAG_A64(flags
, TBID
, tbid
);
184 if (cpu_isar_feature(aa64_sve
, env_archcpu(env
))) {
185 int sve_el
= sve_exception_el(env
, el
);
188 * If either FP or SVE are disabled, translator does not need len.
189 * If SVE EL > FP EL, FP exception has precedence, and translator
190 * does not need SVE EL. Save potential re-translations by forcing
191 * the unneeded data to zero.
194 if (sve_el
> fp_el
) {
197 } else if (sve_el
== 0) {
198 DP_TBFLAG_A64(flags
, VL
, sve_vqm1_for_el(env
, el
));
200 DP_TBFLAG_A64(flags
, SVEEXC_EL
, sve_el
);
202 if (cpu_isar_feature(aa64_sme
, env_archcpu(env
))) {
203 int sme_el
= sme_exception_el(env
, el
);
204 bool sm
= FIELD_EX64(env
->svcr
, SVCR
, SM
);
206 DP_TBFLAG_A64(flags
, SMEEXC_EL
, sme_el
);
208 /* Similarly, do not compute SVL if SME is disabled. */
209 int svl
= sve_vqm1_for_el_sm(env
, el
, true);
210 DP_TBFLAG_A64(flags
, SVL
, svl
);
212 /* If SVE is disabled, we will not have set VL above. */
213 DP_TBFLAG_A64(flags
, VL
, svl
);
217 DP_TBFLAG_A64(flags
, PSTATE_SM
, 1);
218 DP_TBFLAG_A64(flags
, SME_TRAP_NONSTREAMING
, !sme_fa64(env
, el
));
220 DP_TBFLAG_A64(flags
, PSTATE_ZA
, FIELD_EX64(env
->svcr
, SVCR
, ZA
));
223 sctlr
= regime_sctlr(env
, stage1
);
225 if (sctlr
& SCTLR_A
) {
226 DP_TBFLAG_ANY(flags
, ALIGN_MEM
, 1);
229 if (arm_cpu_data_is_big_endian_a64(el
, sctlr
)) {
230 DP_TBFLAG_ANY(flags
, BE_DATA
, 1);
233 if (cpu_isar_feature(aa64_pauth
, env_archcpu(env
))) {
235 * In order to save space in flags, we record only whether
236 * pauth is "inactive", meaning all insns are implemented as
237 * a nop, or "active" when some action must be performed.
238 * The decision of which action to take is left to a helper.
240 if (sctlr
& (SCTLR_EnIA
| SCTLR_EnIB
| SCTLR_EnDA
| SCTLR_EnDB
)) {
241 DP_TBFLAG_A64(flags
, PAUTH_ACTIVE
, 1);
245 if (cpu_isar_feature(aa64_bti
, env_archcpu(env
))) {
246 /* Note that SCTLR_EL[23].BT == SCTLR_BT1. */
247 if (sctlr
& (el
== 0 ? SCTLR_BT0
: SCTLR_BT1
)) {
248 DP_TBFLAG_A64(flags
, BT
, 1);
252 if (cpu_isar_feature(aa64_lse2
, env_archcpu(env
))) {
253 if (sctlr
& SCTLR_nAA
) {
254 DP_TBFLAG_A64(flags
, NAA
, 1);
258 /* Compute the condition for using AccType_UNPRIV for LDTR et al. */
259 if (!(env
->pstate
& PSTATE_UAO
)) {
261 case ARMMMUIdx_E10_1
:
262 case ARMMMUIdx_E10_1_PAN
:
263 /* TODO: ARMv8.3-NV */
264 DP_TBFLAG_A64(flags
, UNPRIV
, 1);
266 case ARMMMUIdx_E20_2
:
267 case ARMMMUIdx_E20_2_PAN
:
269 * Note that EL20_2 is gated by HCR_EL2.E2H == 1, but EL20_0 is
270 * gated by HCR_EL2.<E2H,TGE> == '11', and so is LDTR.
272 if (env
->cp15
.hcr_el2
& HCR_TGE
) {
273 DP_TBFLAG_A64(flags
, UNPRIV
, 1);
281 if (env
->pstate
& PSTATE_IL
) {
282 DP_TBFLAG_ANY(flags
, PSTATE__IL
, 1);
285 if (arm_fgt_active(env
, el
)) {
286 DP_TBFLAG_ANY(flags
, FGT_ACTIVE
, 1);
287 if (FIELD_EX64(env
->cp15
.fgt_exec
[FGTREG_HFGITR
], HFGITR_EL2
, ERET
)) {
288 DP_TBFLAG_A64(flags
, FGT_ERET
, 1);
290 if (fgt_svc(env
, el
)) {
291 DP_TBFLAG_ANY(flags
, FGT_SVC
, 1);
295 if (cpu_isar_feature(aa64_mte
, env_archcpu(env
))) {
297 * Set MTE_ACTIVE if any access may be Checked, and leave clear
298 * if all accesses must be Unchecked:
299 * 1) If no TBI, then there are no tags in the address to check,
300 * 2) If Tag Check Override, then all accesses are Unchecked,
301 * 3) If Tag Check Fail == 0, then Checked access have no effect,
302 * 4) If no Allocation Tag Access, then all accesses are Unchecked.
304 if (allocation_tag_access_enabled(env
, el
, sctlr
)) {
305 DP_TBFLAG_A64(flags
, ATA
, 1);
307 && !(env
->pstate
& PSTATE_TCO
)
308 && (sctlr
& (el
== 0 ? SCTLR_TCF0
: SCTLR_TCF
))) {
309 DP_TBFLAG_A64(flags
, MTE_ACTIVE
, 1);
310 if (!EX_TBFLAG_A64(flags
, UNPRIV
)) {
312 * In non-unpriv contexts (eg EL0), unpriv load/stores
313 * act like normal ones; duplicate the MTE info to
314 * avoid translate-a64.c having to check UNPRIV to see
315 * whether it is OK to index into MTE_ACTIVE[].
317 DP_TBFLAG_A64(flags
, MTE0_ACTIVE
, 1);
321 /* And again for unprivileged accesses, if required. */
322 if (EX_TBFLAG_A64(flags
, UNPRIV
)
324 && !(env
->pstate
& PSTATE_TCO
)
325 && (sctlr
& SCTLR_TCF0
)
326 && allocation_tag_access_enabled(env
, 0, sctlr
)) {
327 DP_TBFLAG_A64(flags
, MTE0_ACTIVE
, 1);
330 * For unpriv tag-setting accesses we also need ATA0. Again, in
331 * contexts where unpriv and normal insns are the same we
332 * duplicate the ATA bit to save effort for translate-a64.c.
334 if (EX_TBFLAG_A64(flags
, UNPRIV
)) {
335 if (allocation_tag_access_enabled(env
, 0, sctlr
)) {
336 DP_TBFLAG_A64(flags
, ATA0
, 1);
339 DP_TBFLAG_A64(flags
, ATA0
, EX_TBFLAG_A64(flags
, ATA
));
341 /* Cache TCMA as well as TBI. */
342 DP_TBFLAG_A64(flags
, TCMA
, aa64_va_parameter_tcma(tcr
, mmu_idx
));
345 return rebuild_hflags_common(env
, fp_el
, mmu_idx
, flags
);
348 static CPUARMTBFlags
rebuild_hflags_internal(CPUARMState
*env
)
350 int el
= arm_current_el(env
);
351 int fp_el
= fp_exception_el(env
, el
);
352 ARMMMUIdx mmu_idx
= arm_mmu_idx_el(env
, el
);
355 return rebuild_hflags_a64(env
, el
, fp_el
, mmu_idx
);
356 } else if (arm_feature(env
, ARM_FEATURE_M
)) {
357 return rebuild_hflags_m32(env
, fp_el
, mmu_idx
);
359 return rebuild_hflags_a32(env
, fp_el
, mmu_idx
);
363 void arm_rebuild_hflags(CPUARMState
*env
)
365 env
->hflags
= rebuild_hflags_internal(env
);
369 * If we have triggered a EL state change we can't rely on the
370 * translator having passed it to us, we need to recompute.
372 void HELPER(rebuild_hflags_m32_newel
)(CPUARMState
*env
)
374 int el
= arm_current_el(env
);
375 int fp_el
= fp_exception_el(env
, el
);
376 ARMMMUIdx mmu_idx
= arm_mmu_idx_el(env
, el
);
378 env
->hflags
= rebuild_hflags_m32(env
, fp_el
, mmu_idx
);
381 void HELPER(rebuild_hflags_m32
)(CPUARMState
*env
, int el
)
383 int fp_el
= fp_exception_el(env
, el
);
384 ARMMMUIdx mmu_idx
= arm_mmu_idx_el(env
, el
);
386 env
->hflags
= rebuild_hflags_m32(env
, fp_el
, mmu_idx
);
390 * If we have triggered a EL state change we can't rely on the
391 * translator having passed it to us, we need to recompute.
393 void HELPER(rebuild_hflags_a32_newel
)(CPUARMState
*env
)
395 int el
= arm_current_el(env
);
396 int fp_el
= fp_exception_el(env
, el
);
397 ARMMMUIdx mmu_idx
= arm_mmu_idx_el(env
, el
);
398 env
->hflags
= rebuild_hflags_a32(env
, fp_el
, mmu_idx
);
401 void HELPER(rebuild_hflags_a32
)(CPUARMState
*env
, int el
)
403 int fp_el
= fp_exception_el(env
, el
);
404 ARMMMUIdx mmu_idx
= arm_mmu_idx_el(env
, el
);
406 env
->hflags
= rebuild_hflags_a32(env
, fp_el
, mmu_idx
);
409 void HELPER(rebuild_hflags_a64
)(CPUARMState
*env
, int el
)
411 int fp_el
= fp_exception_el(env
, el
);
412 ARMMMUIdx mmu_idx
= arm_mmu_idx_el(env
, el
);
414 env
->hflags
= rebuild_hflags_a64(env
, el
, fp_el
, mmu_idx
);
417 void assert_hflags_rebuild_correctly(CPUARMState
*env
)
419 #ifdef CONFIG_DEBUG_TCG
420 CPUARMTBFlags c
= env
->hflags
;
421 CPUARMTBFlags r
= rebuild_hflags_internal(env
);
423 if (unlikely(c
.flags
!= r
.flags
|| c
.flags2
!= r
.flags2
)) {
424 fprintf(stderr
, "TCG hflags mismatch "
425 "(current:(0x%08x,0x" TARGET_FMT_lx
")"
426 " rebuilt:(0x%08x,0x" TARGET_FMT_lx
")\n",
427 c
.flags
, c
.flags2
, r
.flags
, r
.flags2
);