1 #ifndef TARGET_ARM_TRANSLATE_H
2 #define TARGET_ARM_TRANSLATE_H
4 #include "exec/translator.h"
9 typedef struct DisasContext
{
10 DisasContextBase base
;
11 const ARMISARegisters
*isar
;
13 /* The address of the current instruction being translated. */
15 target_ulong page_start
;
17 /* Nonzero if this instruction has been conditionally skipped. */
19 /* The label that will be jumped to when the instruction is skipped. */
21 /* Thumb-2 conditional execution bits. */
27 #if !defined(CONFIG_USER_ONLY)
30 ARMMMUIdx mmu_idx
; /* MMU index to use for normal loads/stores */
31 uint8_t tbii
; /* TBI1|TBI0 for insns */
32 uint8_t tbid
; /* TBI1|TBI0 for data */
33 uint8_t tcma
; /* TCMA1|TCMA0 for MTE */
34 bool ns
; /* Use non-secure CPREG bank on access */
35 int fp_excp_el
; /* FP exception EL or 0 if enabled */
36 int sve_excp_el
; /* SVE exception EL or 0 if enabled */
37 int sve_len
; /* SVE vector length in bytes */
38 /* Flag indicating that exceptions from secure mode are routed to EL3. */
39 bool secure_routed_to_el3
;
40 bool vfp_enabled
; /* FP enabled via FPSCR.EN */
43 bool v7m_handler_mode
;
44 bool v8m_secure
; /* true if v8M and we're in Secure mode */
45 bool v8m_stackcheck
; /* true if we need to perform v8M stack limit checks */
46 bool v8m_fpccr_s_wrong
; /* true if v8M FPCCR.S != v8m_secure */
47 bool v7m_new_fp_ctxt_needed
; /* ASPEN set but no active FP context */
48 bool v7m_lspact
; /* FPCCR.LSPACT set */
49 /* Immediate value in AArch32 SVC insn; must be set if is_jmp == DISAS_SWI
50 * so that top level loop can generate correct syndrome information.
55 /* Debug target exception level for single-step exceptions */
58 uint64_t features
; /* CPU features bits */
59 /* Because unallocated encodings generate different exception syndrome
60 * information from traps due to FP being disabled, we can't do a single
61 * "is fp access disabled" check at a high level in the decode tree.
62 * To help in catching bugs where the access check was forgotten in some
63 * code path, we set this flag when the access check is done, and assert
64 * that it is set at the point where we actually touch the FP regs.
66 bool fp_access_checked
;
67 bool sve_access_checked
;
68 /* ARMv8 single-step state (this is distinct from the QEMU gdbstub
69 * single-step support).
73 /* True if the insn just emitted was a load-exclusive instruction
74 * (necessary for syndrome information for single step exceptions),
75 * ie A64 LDX*, LDAX*, A32/T32 LDREX*, LDAEX*.
78 /* True if AccType_UNPRIV should be used for LDTR et al */
80 /* True if v8.3-PAuth is active. */
82 /* True if v8.5-MTE access to tags is enabled. */
84 /* True if v8.5-MTE tag checks affect the PE; index with is_unpriv. */
86 /* True with v8.5-BTI and SCTLR_ELx.BT* set. */
88 /* True if any CP15 access is trapped by HSTR_EL2 */
90 /* True if memory operations require alignment */
93 * >= 0, a copy of PSTATE.BTYPE, which will be 0 without v8.5-BTI.
94 * < 0, set by the current instruction.
97 /* A copy of cpu->dcz_blocksize. */
98 uint8_t dcz_blocksize
;
99 /* True if this page is guarded. */
101 /* Bottom two bits of XScale c15_cpar coprocessor access control reg */
103 /* TCG op of the current insn_start. */
105 #define TMP_A64_MAX 16
107 TCGv_i64 tmp_a64
[TMP_A64_MAX
];
110 typedef struct DisasCompare
{
116 /* Share the TCG temporaries common between 32 and 64 bit modes. */
117 extern TCGv_i32 cpu_NF
, cpu_ZF
, cpu_CF
, cpu_VF
;
118 extern TCGv_i64 cpu_exclusive_addr
;
119 extern TCGv_i64 cpu_exclusive_val
;
122 * Constant expanders for the decoders.
125 static inline int negate(DisasContext
*s
, int x
)
130 static inline int plus_2(DisasContext
*s
, int x
)
135 static inline int times_2(DisasContext
*s
, int x
)
140 static inline int times_4(DisasContext
*s
, int x
)
145 static inline int arm_dc_feature(DisasContext
*dc
, int feature
)
147 return (dc
->features
& (1ULL << feature
)) != 0;
150 static inline int get_mem_index(DisasContext
*s
)
152 return arm_to_core_mmu_idx(s
->mmu_idx
);
155 /* Function used to determine the target exception EL when otherwise not known
158 static inline int default_exception_el(DisasContext
*s
)
160 /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
161 * there is no secure EL1, so we route exceptions to EL3. Otherwise,
162 * exceptions can only be routed to ELs above 1, so we target the higher of
163 * 1 or the current EL.
165 return (s
->mmu_idx
== ARMMMUIdx_SE10_0
&& s
->secure_routed_to_el3
)
166 ? 3 : MAX(1, s
->current_el
);
169 static inline void disas_set_insn_syndrome(DisasContext
*s
, uint32_t syn
)
171 /* We don't need to save all of the syndrome so we mask and shift
172 * out unneeded bits to help the sleb128 encoder do a better job.
174 syn
&= ARM_INSN_START_WORD2_MASK
;
175 syn
>>= ARM_INSN_START_WORD2_SHIFT
;
177 /* We check and clear insn_start_idx to catch multiple updates. */
178 assert(s
->insn_start
!= NULL
);
179 tcg_set_insn_start_param(s
->insn_start
, 2, syn
);
180 s
->insn_start
= NULL
;
183 /* is_jmp field values */
184 #define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
185 /* CPU state was modified dynamically; exit to main loop for interrupts. */
186 #define DISAS_UPDATE_EXIT DISAS_TARGET_1
187 /* These instructions trap after executing, so the A32/T32 decoder must
188 * defer them until after the conditional execution state has been updated.
189 * WFI also needs special handling when single-stepping.
191 #define DISAS_WFI DISAS_TARGET_2
192 #define DISAS_SWI DISAS_TARGET_3
194 #define DISAS_WFE DISAS_TARGET_4
195 #define DISAS_HVC DISAS_TARGET_5
196 #define DISAS_SMC DISAS_TARGET_6
197 #define DISAS_YIELD DISAS_TARGET_7
198 /* M profile branch which might be an exception return (and so needs
199 * custom end-of-TB code)
201 #define DISAS_BX_EXCRET DISAS_TARGET_8
203 * For instructions which want an immediate exit to the main loop, as opposed
204 * to attempting to use lookup_and_goto_ptr. Unlike DISAS_UPDATE_EXIT, this
205 * doesn't write the PC on exiting the translation loop so you need to ensure
206 * something (gen_a64_set_pc_im or runtime helper) has done so before we reach
207 * return from cpu_tb_exec.
209 #define DISAS_EXIT DISAS_TARGET_9
210 /* CPU state was modified dynamically; no need to exit, but do not chain. */
211 #define DISAS_UPDATE_NOCHAIN DISAS_TARGET_10
213 #ifdef TARGET_AARCH64
214 void a64_translate_init(void);
215 void gen_a64_set_pc_im(uint64_t val
);
216 extern const TranslatorOps aarch64_translator_ops
;
218 static inline void a64_translate_init(void)
222 static inline void gen_a64_set_pc_im(uint64_t val
)
227 void arm_test_cc(DisasCompare
*cmp
, int cc
);
228 void arm_free_cc(DisasCompare
*cmp
);
229 void arm_jump_cc(DisasCompare
*cmp
, TCGLabel
*label
);
230 void arm_gen_test_cc(int cc
, TCGLabel
*label
);
231 MemOp
pow2_align(unsigned i
);
232 void unallocated_encoding(DisasContext
*s
);
233 void gen_exception_insn(DisasContext
*s
, uint64_t pc
, int excp
,
234 uint32_t syn
, uint32_t target_el
);
236 /* Return state of Alternate Half-precision flag, caller frees result */
237 static inline TCGv_i32
get_ahp_flag(void)
239 TCGv_i32 ret
= tcg_temp_new_i32();
241 tcg_gen_ld_i32(ret
, cpu_env
,
242 offsetof(CPUARMState
, vfp
.xregs
[ARM_VFP_FPSCR
]));
243 tcg_gen_extract_i32(ret
, ret
, 26, 1);
248 /* Set bits within PSTATE. */
249 static inline void set_pstate_bits(uint32_t bits
)
251 TCGv_i32 p
= tcg_temp_new_i32();
253 tcg_debug_assert(!(bits
& CACHED_PSTATE_BITS
));
255 tcg_gen_ld_i32(p
, cpu_env
, offsetof(CPUARMState
, pstate
));
256 tcg_gen_ori_i32(p
, p
, bits
);
257 tcg_gen_st_i32(p
, cpu_env
, offsetof(CPUARMState
, pstate
));
258 tcg_temp_free_i32(p
);
261 /* Clear bits within PSTATE. */
262 static inline void clear_pstate_bits(uint32_t bits
)
264 TCGv_i32 p
= tcg_temp_new_i32();
266 tcg_debug_assert(!(bits
& CACHED_PSTATE_BITS
));
268 tcg_gen_ld_i32(p
, cpu_env
, offsetof(CPUARMState
, pstate
));
269 tcg_gen_andi_i32(p
, p
, ~bits
);
270 tcg_gen_st_i32(p
, cpu_env
, offsetof(CPUARMState
, pstate
));
271 tcg_temp_free_i32(p
);
274 /* If the singlestep state is Active-not-pending, advance to Active-pending. */
275 static inline void gen_ss_advance(DisasContext
*s
)
279 clear_pstate_bits(PSTATE_SS
);
283 static inline void gen_exception(int excp
, uint32_t syndrome
,
286 TCGv_i32 tcg_excp
= tcg_const_i32(excp
);
287 TCGv_i32 tcg_syn
= tcg_const_i32(syndrome
);
288 TCGv_i32 tcg_el
= tcg_const_i32(target_el
);
290 gen_helper_exception_with_syndrome(cpu_env
, tcg_excp
,
293 tcg_temp_free_i32(tcg_el
);
294 tcg_temp_free_i32(tcg_syn
);
295 tcg_temp_free_i32(tcg_excp
);
298 /* Generate an architectural singlestep exception */
299 static inline void gen_swstep_exception(DisasContext
*s
, int isv
, int ex
)
301 bool same_el
= (s
->debug_target_el
== s
->current_el
);
304 * If singlestep is targeting a lower EL than the current one,
305 * then s->ss_active must be false and we can never get here.
307 assert(s
->debug_target_el
>= s
->current_el
);
309 gen_exception(EXCP_UDEF
, syn_swstep(same_el
, isv
, ex
), s
->debug_target_el
);
313 * Given a VFP floating point constant encoded into an 8 bit immediate in an
314 * instruction, expand it to the actual constant value of the specified
315 * size, as per the VFPExpandImm() pseudocode in the Arm ARM.
317 uint64_t vfp_expand_imm(int size
, uint8_t imm8
);
319 /* Vector operations shared between ARM and AArch64. */
320 void gen_gvec_ceq0(unsigned vece
, uint32_t rd_ofs
, uint32_t rm_ofs
,
321 uint32_t opr_sz
, uint32_t max_sz
);
322 void gen_gvec_clt0(unsigned vece
, uint32_t rd_ofs
, uint32_t rm_ofs
,
323 uint32_t opr_sz
, uint32_t max_sz
);
324 void gen_gvec_cgt0(unsigned vece
, uint32_t rd_ofs
, uint32_t rm_ofs
,
325 uint32_t opr_sz
, uint32_t max_sz
);
326 void gen_gvec_cle0(unsigned vece
, uint32_t rd_ofs
, uint32_t rm_ofs
,
327 uint32_t opr_sz
, uint32_t max_sz
);
328 void gen_gvec_cge0(unsigned vece
, uint32_t rd_ofs
, uint32_t rm_ofs
,
329 uint32_t opr_sz
, uint32_t max_sz
);
331 void gen_gvec_mla(unsigned vece
, uint32_t rd_ofs
, uint32_t rn_ofs
,
332 uint32_t rm_ofs
, uint32_t opr_sz
, uint32_t max_sz
);
333 void gen_gvec_mls(unsigned vece
, uint32_t rd_ofs
, uint32_t rn_ofs
,
334 uint32_t rm_ofs
, uint32_t opr_sz
, uint32_t max_sz
);
336 void gen_gvec_cmtst(unsigned vece
, uint32_t rd_ofs
, uint32_t rn_ofs
,
337 uint32_t rm_ofs
, uint32_t opr_sz
, uint32_t max_sz
);
338 void gen_gvec_sshl(unsigned vece
, uint32_t rd_ofs
, uint32_t rn_ofs
,
339 uint32_t rm_ofs
, uint32_t opr_sz
, uint32_t max_sz
);
340 void gen_gvec_ushl(unsigned vece
, uint32_t rd_ofs
, uint32_t rn_ofs
,
341 uint32_t rm_ofs
, uint32_t opr_sz
, uint32_t max_sz
);
343 void gen_cmtst_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
);
344 void gen_ushl_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
);
345 void gen_sshl_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
);
346 void gen_ushl_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
);
347 void gen_sshl_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
);
349 void gen_gvec_uqadd_qc(unsigned vece
, uint32_t rd_ofs
, uint32_t rn_ofs
,
350 uint32_t rm_ofs
, uint32_t opr_sz
, uint32_t max_sz
);
351 void gen_gvec_sqadd_qc(unsigned vece
, uint32_t rd_ofs
, uint32_t rn_ofs
,
352 uint32_t rm_ofs
, uint32_t opr_sz
, uint32_t max_sz
);
353 void gen_gvec_uqsub_qc(unsigned vece
, uint32_t rd_ofs
, uint32_t rn_ofs
,
354 uint32_t rm_ofs
, uint32_t opr_sz
, uint32_t max_sz
);
355 void gen_gvec_sqsub_qc(unsigned vece
, uint32_t rd_ofs
, uint32_t rn_ofs
,
356 uint32_t rm_ofs
, uint32_t opr_sz
, uint32_t max_sz
);
358 void gen_gvec_ssra(unsigned vece
, uint32_t rd_ofs
, uint32_t rm_ofs
,
359 int64_t shift
, uint32_t opr_sz
, uint32_t max_sz
);
360 void gen_gvec_usra(unsigned vece
, uint32_t rd_ofs
, uint32_t rm_ofs
,
361 int64_t shift
, uint32_t opr_sz
, uint32_t max_sz
);
363 void gen_gvec_srshr(unsigned vece
, uint32_t rd_ofs
, uint32_t rm_ofs
,
364 int64_t shift
, uint32_t opr_sz
, uint32_t max_sz
);
365 void gen_gvec_urshr(unsigned vece
, uint32_t rd_ofs
, uint32_t rm_ofs
,
366 int64_t shift
, uint32_t opr_sz
, uint32_t max_sz
);
367 void gen_gvec_srsra(unsigned vece
, uint32_t rd_ofs
, uint32_t rm_ofs
,
368 int64_t shift
, uint32_t opr_sz
, uint32_t max_sz
);
369 void gen_gvec_ursra(unsigned vece
, uint32_t rd_ofs
, uint32_t rm_ofs
,
370 int64_t shift
, uint32_t opr_sz
, uint32_t max_sz
);
372 void gen_gvec_sri(unsigned vece
, uint32_t rd_ofs
, uint32_t rm_ofs
,
373 int64_t shift
, uint32_t opr_sz
, uint32_t max_sz
);
374 void gen_gvec_sli(unsigned vece
, uint32_t rd_ofs
, uint32_t rm_ofs
,
375 int64_t shift
, uint32_t opr_sz
, uint32_t max_sz
);
377 void gen_gvec_sqrdmlah_qc(unsigned vece
, uint32_t rd_ofs
, uint32_t rn_ofs
,
378 uint32_t rm_ofs
, uint32_t opr_sz
, uint32_t max_sz
);
379 void gen_gvec_sqrdmlsh_qc(unsigned vece
, uint32_t rd_ofs
, uint32_t rn_ofs
,
380 uint32_t rm_ofs
, uint32_t opr_sz
, uint32_t max_sz
);
382 void gen_gvec_sabd(unsigned vece
, uint32_t rd_ofs
, uint32_t rn_ofs
,
383 uint32_t rm_ofs
, uint32_t opr_sz
, uint32_t max_sz
);
384 void gen_gvec_uabd(unsigned vece
, uint32_t rd_ofs
, uint32_t rn_ofs
,
385 uint32_t rm_ofs
, uint32_t opr_sz
, uint32_t max_sz
);
387 void gen_gvec_saba(unsigned vece
, uint32_t rd_ofs
, uint32_t rn_ofs
,
388 uint32_t rm_ofs
, uint32_t opr_sz
, uint32_t max_sz
);
389 void gen_gvec_uaba(unsigned vece
, uint32_t rd_ofs
, uint32_t rn_ofs
,
390 uint32_t rm_ofs
, uint32_t opr_sz
, uint32_t max_sz
);
393 * Forward to the isar_feature_* tests given a DisasContext pointer.
395 #define dc_isar_feature(name, ctx) \
396 ({ DisasContext *ctx_ = (ctx); isar_feature_##name(ctx_->isar); })
398 /* Note that the gvec expanders operate on offsets + sizes. */
399 typedef void GVecGen2Fn(unsigned, uint32_t, uint32_t, uint32_t, uint32_t);
400 typedef void GVecGen2iFn(unsigned, uint32_t, uint32_t, int64_t,
402 typedef void GVecGen3Fn(unsigned, uint32_t, uint32_t,
403 uint32_t, uint32_t, uint32_t);
404 typedef void GVecGen4Fn(unsigned, uint32_t, uint32_t, uint32_t,
405 uint32_t, uint32_t, uint32_t);
407 /* Function prototype for gen_ functions for calling Neon helpers */
408 typedef void NeonGenOneOpFn(TCGv_i32
, TCGv_i32
);
409 typedef void NeonGenOneOpEnvFn(TCGv_i32
, TCGv_ptr
, TCGv_i32
);
410 typedef void NeonGenTwoOpFn(TCGv_i32
, TCGv_i32
, TCGv_i32
);
411 typedef void NeonGenTwoOpEnvFn(TCGv_i32
, TCGv_ptr
, TCGv_i32
, TCGv_i32
);
412 typedef void NeonGenThreeOpEnvFn(TCGv_i32
, TCGv_env
, TCGv_i32
,
414 typedef void NeonGenTwo64OpFn(TCGv_i64
, TCGv_i64
, TCGv_i64
);
415 typedef void NeonGenTwo64OpEnvFn(TCGv_i64
, TCGv_ptr
, TCGv_i64
, TCGv_i64
);
416 typedef void NeonGenNarrowFn(TCGv_i32
, TCGv_i64
);
417 typedef void NeonGenNarrowEnvFn(TCGv_i32
, TCGv_ptr
, TCGv_i64
);
418 typedef void NeonGenWidenFn(TCGv_i64
, TCGv_i32
);
419 typedef void NeonGenTwoOpWidenFn(TCGv_i64
, TCGv_i32
, TCGv_i32
);
420 typedef void NeonGenOneSingleOpFn(TCGv_i32
, TCGv_i32
, TCGv_ptr
);
421 typedef void NeonGenTwoSingleOpFn(TCGv_i32
, TCGv_i32
, TCGv_i32
, TCGv_ptr
);
422 typedef void NeonGenTwoDoubleOpFn(TCGv_i64
, TCGv_i64
, TCGv_i64
, TCGv_ptr
);
423 typedef void NeonGenOne64OpFn(TCGv_i64
, TCGv_i64
);
424 typedef void CryptoTwoOpFn(TCGv_ptr
, TCGv_ptr
);
425 typedef void CryptoThreeOpIntFn(TCGv_ptr
, TCGv_ptr
, TCGv_i32
);
426 typedef void CryptoThreeOpFn(TCGv_ptr
, TCGv_ptr
, TCGv_ptr
);
427 typedef void AtomicThreeOpFn(TCGv_i64
, TCGv_i64
, TCGv_i64
, TCGArg
, MemOp
);
430 * arm_tbflags_from_tb:
431 * @tb: the TranslationBlock
433 * Extract the flag values from @tb.
435 static inline CPUARMTBFlags
arm_tbflags_from_tb(const TranslationBlock
*tb
)
437 return (CPUARMTBFlags
){ tb
->flags
, tb
->cs_base
};
441 * Enum for argument to fpstatus_ptr().
443 typedef enum ARMFPStatusFlavour
{
448 } ARMFPStatusFlavour
;
451 * fpstatus_ptr: return TCGv_ptr to the specified fp_status field
453 * We have multiple softfloat float_status fields in the Arm CPU state struct
454 * (see the comment in cpu.h for details). Return a TCGv_ptr which has
455 * been set up to point to the requested field in the CPU state struct.
459 * for non-FP16 operations controlled by the FPCR
461 * for operations controlled by the FPCR where FPCR.FZ16 is to be used
463 * for A32/T32 Neon operations using the "standard FPSCR value"
465 * as FPST_STD, but where FPCR.FZ16 is to be used
467 static inline TCGv_ptr
fpstatus_ptr(ARMFPStatusFlavour flavour
)
469 TCGv_ptr statusptr
= tcg_temp_new_ptr();
474 offset
= offsetof(CPUARMState
, vfp
.fp_status
);
477 offset
= offsetof(CPUARMState
, vfp
.fp_status_f16
);
480 offset
= offsetof(CPUARMState
, vfp
.standard_fp_status
);
483 offset
= offsetof(CPUARMState
, vfp
.standard_fp_status_f16
);
486 g_assert_not_reached();
488 tcg_gen_addi_ptr(statusptr
, cpu_env
, offset
);
495 * @opc: size+sign+align of the memory operation
497 * Build the complete MemOp for a memory operation, including alignment
500 * If (op & MO_AMASK) then the operation already contains the required
501 * alignment, e.g. for AccType_ATOMIC. Otherwise, this an optionally
502 * unaligned operation, e.g. for AccType_NORMAL.
504 * In the latter case, there are configuration bits that require alignment,
505 * and this is applied here. Note that there is no way to indicate that
506 * no alignment should ever be enforced; this must be handled manually.
508 static inline MemOp
finalize_memop(DisasContext
*s
, MemOp opc
)
510 if (s
->align_mem
&& !(opc
& MO_AMASK
)) {
513 return opc
| s
->be_data
;
516 #endif /* TARGET_ARM_TRANSLATE_H */